code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
"""File that aims to visualize the facial images data"""
import numpy as np
import tensorflow as tf
import umap
import umap.plot
from src.data.image_data_reader import ImageDataReader, Set
def embedding_model() -> tf.keras.Model:
input = tf.keras.layers.Input(
shape=(48, 48, 3), dtype=tf.float32, name="image"
)
input = tf.keras.applications.efficientnet.preprocess_input(input)
model = tf.keras.applications.EfficientNetB2(
include_top=False,
weights="imagenet",
input_tensor=input,
input_shape=(48, 48, 3),
)
output = model(input)
output = tf.keras.layers.Flatten()(output)
return tf.keras.Model(input, output)
if __name__ == "__main__": # pragma: no cover
dr = ImageDataReader()
dataset = dr.get_seven_emotion_data(
Set.TEST, batch_size=1000, shuffle=False
).map(lambda x, y: (tf.image.grayscale_to_rgb(x), y))
model = embedding_model()
data = np.empty((0, 5632))
for images, labels in dataset:
embeddings = model(images).numpy()
data = np.concatenate([data, embeddings], axis=0)
labels = dr.get_labels(Set.TEST)
reducer = umap.UMAP(random_state=42)
reducer.fit(data)
umap.plot.points(reducer, labels=labels)
umap.plot.plt.show()
| [
"tensorflow.keras.applications.EfficientNetB2",
"tensorflow.image.grayscale_to_rgb",
"tensorflow.keras.applications.efficientnet.preprocess_input",
"numpy.empty",
"umap.plot.plt.show",
"tensorflow.keras.Model",
"src.data.image_data_reader.ImageDataReader",
"umap.UMAP",
"tensorflow.keras.layers.Input... | [((246, 318), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(48, 48, 3)', 'dtype': 'tf.float32', 'name': '"""image"""'}), "(shape=(48, 48, 3), dtype=tf.float32, name='image')\n", (267, 318), True, 'import tensorflow as tf\n'), ((345, 403), 'tensorflow.keras.applications.efficientnet.preprocess_input', 'tf.keras.applications.efficientnet.preprocess_input', (['input'], {}), '(input)\n', (396, 403), True, 'import tensorflow as tf\n'), ((417, 541), 'tensorflow.keras.applications.EfficientNetB2', 'tf.keras.applications.EfficientNetB2', ([], {'include_top': '(False)', 'weights': '"""imagenet"""', 'input_tensor': 'input', 'input_shape': '(48, 48, 3)'}), "(include_top=False, weights='imagenet',\n input_tensor=input, input_shape=(48, 48, 3))\n", (453, 541), True, 'import tensorflow as tf\n'), ((662, 691), 'tensorflow.keras.Model', 'tf.keras.Model', (['input', 'output'], {}), '(input, output)\n', (676, 691), True, 'import tensorflow as tf\n'), ((750, 767), 'src.data.image_data_reader.ImageDataReader', 'ImageDataReader', ([], {}), '()\n', (765, 767), False, 'from src.data.image_data_reader import ImageDataReader, Set\n'), ((957, 976), 'numpy.empty', 'np.empty', (['(0, 5632)'], {}), '((0, 5632))\n', (965, 976), True, 'import numpy as np\n'), ((1164, 1190), 'umap.UMAP', 'umap.UMAP', ([], {'random_state': '(42)'}), '(random_state=42)\n', (1173, 1190), False, 'import umap\n'), ((1217, 1257), 'umap.plot.points', 'umap.plot.points', (['reducer'], {'labels': 'labels'}), '(reducer, labels=labels)\n', (1233, 1257), False, 'import umap\n'), ((1262, 1282), 'umap.plot.plt.show', 'umap.plot.plt.show', ([], {}), '()\n', (1280, 1282), False, 'import umap\n'), ((616, 641), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (639, 641), True, 'import tensorflow as tf\n'), ((1070, 1112), 'numpy.concatenate', 'np.concatenate', (['[data, embeddings]'], {'axis': '(0)'}), '([data, embeddings], axis=0)\n', (1084, 1112), True, 'import numpy as np\n'), ((882, 910), 'tensorflow.image.grayscale_to_rgb', 'tf.image.grayscale_to_rgb', (['x'], {}), '(x)\n', (907, 910), True, 'import tensorflow as tf\n')] |
import unittest
import numpy as np
from pykrev import van_krevelen_plot, element_ratios, element_counts, kendrick_mass_defect_plot, multi_van_krevelen_plot, van_krevelen_histogram, missing_plot, unique_plot, double_bond_equivalent, atomic_class_plot, compound_class_plot, mass_spectrum, mass_histogram
class TestPLOTTING(unittest.TestCase):
def setUp(self):
pass
def test_van_krevelen_plot_no_patch(self):
x = ['C13H14O5','C13H14N2O4S2','C36H45ClN6O12','C9H11NO2', 'C9H11NO3', 'C11H12N2O2', 'C5H7NO3', 'C5H9NO3', 'C6H12N2O4S2','C6H11NO3S']
x_dbe = double_bond_equivalent(x)
van_krevelen_plot(x, y_ratio = 'NC', c = x_dbe, patch_classes = [])
def test_van_krevelen_plot_patch(self):
x = ['C13H14O5','C13H14N2O4S2','C36H45ClN6O12','C9H11NO2', 'C9H11NO3', 'C11H12N2O2', 'C5H7NO3', 'C5H9NO3', 'C6H12N2O4S2','C6H11NO3S']
x_dbe = double_bond_equivalent(x)
fig,ax = van_krevelen_plot(x, y_ratio = 'NC', c = x_dbe, patch_classes = ['lipid-like','lignin-like'], patch_alpha = 0.2, patch_text = False, patch_colors = ['#ffffbf','#ffffbf'])
def test_multi_van_krevelen_plot_no_patch(self):
x = ['C13H14O5','C13H14N2O4S2','C36H45ClN6O12','C9H11NO2', 'C9H11NO3', 'C11H12N2O2', 'C5H7NO3', 'C5H9NO3', 'C6H12N2O4S2','C6H11NO3S']
x2 = ['C14H14O5','C12H14N2O4S2','C36H45ClN6O12','C9H14NO2', 'C9H11N2O3', 'C11H12N2O2', 'C55H7NO3', 'C5H9NO3', 'C6H12N2O4S2','C6H11NO3S']
multi_van_krevelen_plot(x, x2, group_labels = ['Group_1','Group_2'], patch_classes = [])
def test_multi_van_krevelen_plot_patch(self):
x = ['C13H14O5','C13H14N2O4S2','C36H45ClN6O12','C9H11NO2', 'C9H11NO3', 'C11H12N2O2', 'C5H7NO3', 'C5H9NO3', 'C6H12N2O4S2','C6H11NO3S']
x2 = ['C14H14O5','C12H14N2O4S2','C36H45ClN6O12','C9H14NO2', 'C9H11N2O3', 'C11H12N2O2', 'C55H7NO3', 'C5H9NO3', 'C6H12N2O4S2','C6H11NO3S']
multi_van_krevelen_plot(x, x2, group_labels = ['Group_1','Group_2'], patch_classes = ['tannin-like','lignin-like'], patch_alpha = 0.4, patch_text = True, patch_colors = ['#ffffbf','#ffffbf'])
def test_missing_plot(self):
x = ['C13H14O5','C13H14N2O4S2','C36H45ClN6O12','C9H11NO2', 'C9H11NO3', 'C11H12N2O2', 'C5H7NO3', 'C5H9NO3', 'C6H12N2O4S2','C6H11NO3S']
x2 = ['C14H14O5','C12H14N2O4S2','C36H45ClN6O12','C9H14NO2', 'C9H11N2O3', 'C11H12N2O2', 'C55H7NO3', 'C5H9NO3', 'C6H12N2O4S2','C6H11NO3S']
missing_plot(x,x2,y_ratio = 'NC',group_labels = ['Group_1','Group_2'])
def test_unique_plot(self):
x = ['C13H14O5','C13H14N2O4S2','C36H45ClN6O12','C9H11NO2', 'C9H11NO3', 'C11H12N2O2', 'C5H7NO3', 'C5H9NO3', 'C6H12N2O4S2','C6H11NO3S']
x2 = ['C14H14O5','C12H14N2O4S2','C36H45ClN6O12','C9H14NO2', 'C9H11N2O3', 'C11H12N2O2', 'C55H7NO3', 'C5H9NO3', 'C6H12N2O4S2','C6H11NO3S']
unique_plot(x,x2,y_ratio = 'SC',group_labels = ['Group_1','Group_2'])
def test_kmd_plot(self):
z = np.array([1000,2432,3000,4201,2000,5990,1000,6520,8000,9001])
kendrick_mass_defect_plot(z, base = ['CO'], rounding = 'even')
kendrick_mass_defect_plot(z, base = ['CO'], rounding = 'rint')
kendrick_mass_defect_plot(z, base = ['CO'], rounding = 'ceil')
kendrick_mass_defect_plot(z, base = ['CO'], rounding = 'floor')
def test_van_krevelen_histogram(self):
x = ['C13H14O5','C13H14N2O4S2','C36H45ClN6O12','C9H11NO2', 'C9H11NO3', 'C11H12N2O2', 'C5H7NO3', 'C5H9NO3', 'C6H12N2O4S2','C6H11NO3S']
van_krevelen_histogram(x)
def test_atomic_class_plot(self):
x = ['C13H14O5','C13H14N2O4S2','C36H45ClN6O12','C9H11NO2', 'C9H11NO3', 'C11H12N2O2', 'C5H7NO3', 'C5H9NO3', 'C6H12N2O4S2','C6H11NO3S']
atomic_class_plot(x, element = 'S')
def test_compound_class_plot(self):
x = ['C13H14O5','C13H14N2O4S2','C36H45ClN6O12','C9H11NO2', 'C9H11NO3', 'C11H12N2O2', 'C5H7NO3', 'C5H9NO3', 'C6H12N2O4S2','C6H11NO3S']
compound_class_plot(x, method = 'KELL')
def test_mass_histogram(self):
x = ['C13H14O5','C13H14N2O4S2','C36H45ClN6O12','C9H11NO2', 'C9H11NO3', 'C11H12N2O2', 'C5H7NO3', 'C5H9NO3', 'C6H12N2O4S2','C6H11NO3S']
mass_histogram(x)
mass_histogram(x, method = 'nominal')
def test_mass_spectrum(self):
x = ['C13H14O5','C13H14N2O4S2','C36H45ClN6O12','C9H11NO2', 'C9H11NO3', 'C11H12N2O2', 'C5H7NO3', 'C5H9NO3', 'C6H12N2O4S2','C6H11NO3S']
y = np.array([3210,43,432,423,42,10,103,305,2054,1388])
mass_spectrum(x,y)
mass_spectrum(x,y, method = 'nominal')
if __name__ == '__main__':
unittest.main() | [
"unittest.main",
"pykrev.kendrick_mass_defect_plot",
"pykrev.atomic_class_plot",
"pykrev.mass_histogram",
"pykrev.mass_spectrum",
"pykrev.compound_class_plot",
"pykrev.missing_plot",
"pykrev.unique_plot",
"numpy.array",
"pykrev.double_bond_equivalent",
"pykrev.van_krevelen_plot",
"pykrev.van_k... | [((4514, 4529), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4527, 4529), False, 'import unittest\n'), ((581, 606), 'pykrev.double_bond_equivalent', 'double_bond_equivalent', (['x'], {}), '(x)\n', (603, 606), False, 'from pykrev import van_krevelen_plot, element_ratios, element_counts, kendrick_mass_defect_plot, multi_van_krevelen_plot, van_krevelen_histogram, missing_plot, unique_plot, double_bond_equivalent, atomic_class_plot, compound_class_plot, mass_spectrum, mass_histogram\n'), ((614, 675), 'pykrev.van_krevelen_plot', 'van_krevelen_plot', (['x'], {'y_ratio': '"""NC"""', 'c': 'x_dbe', 'patch_classes': '[]'}), "(x, y_ratio='NC', c=x_dbe, patch_classes=[])\n", (631, 675), False, 'from pykrev import van_krevelen_plot, element_ratios, element_counts, kendrick_mass_defect_plot, multi_van_krevelen_plot, van_krevelen_histogram, missing_plot, unique_plot, double_bond_equivalent, atomic_class_plot, compound_class_plot, mass_spectrum, mass_histogram\n'), ((883, 908), 'pykrev.double_bond_equivalent', 'double_bond_equivalent', (['x'], {}), '(x)\n', (905, 908), False, 'from pykrev import van_krevelen_plot, element_ratios, element_counts, kendrick_mass_defect_plot, multi_van_krevelen_plot, van_krevelen_histogram, missing_plot, unique_plot, double_bond_equivalent, atomic_class_plot, compound_class_plot, mass_spectrum, mass_histogram\n'), ((925, 1094), 'pykrev.van_krevelen_plot', 'van_krevelen_plot', (['x'], {'y_ratio': '"""NC"""', 'c': 'x_dbe', 'patch_classes': "['lipid-like', 'lignin-like']", 'patch_alpha': '(0.2)', 'patch_text': '(False)', 'patch_colors': "['#ffffbf', '#ffffbf']"}), "(x, y_ratio='NC', c=x_dbe, patch_classes=['lipid-like',\n 'lignin-like'], patch_alpha=0.2, patch_text=False, patch_colors=[\n '#ffffbf', '#ffffbf'])\n", (942, 1094), False, 'from pykrev import van_krevelen_plot, element_ratios, element_counts, kendrick_mass_defect_plot, multi_van_krevelen_plot, van_krevelen_histogram, missing_plot, unique_plot, double_bond_equivalent, atomic_class_plot, compound_class_plot, mass_spectrum, mass_histogram\n'), ((1442, 1531), 'pykrev.multi_van_krevelen_plot', 'multi_van_krevelen_plot', (['x', 'x2'], {'group_labels': "['Group_1', 'Group_2']", 'patch_classes': '[]'}), "(x, x2, group_labels=['Group_1', 'Group_2'],\n patch_classes=[])\n", (1465, 1531), False, 'from pykrev import van_krevelen_plot, element_ratios, element_counts, kendrick_mass_defect_plot, multi_van_krevelen_plot, van_krevelen_histogram, missing_plot, unique_plot, double_bond_equivalent, atomic_class_plot, compound_class_plot, mass_spectrum, mass_histogram\n'), ((1874, 2066), 'pykrev.multi_van_krevelen_plot', 'multi_van_krevelen_plot', (['x', 'x2'], {'group_labels': "['Group_1', 'Group_2']", 'patch_classes': "['tannin-like', 'lignin-like']", 'patch_alpha': '(0.4)', 'patch_text': '(True)', 'patch_colors': "['#ffffbf', '#ffffbf']"}), "(x, x2, group_labels=['Group_1', 'Group_2'],\n patch_classes=['tannin-like', 'lignin-like'], patch_alpha=0.4,\n patch_text=True, patch_colors=['#ffffbf', '#ffffbf'])\n", (1897, 2066), False, 'from pykrev import van_krevelen_plot, element_ratios, element_counts, kendrick_mass_defect_plot, multi_van_krevelen_plot, van_krevelen_histogram, missing_plot, unique_plot, double_bond_equivalent, atomic_class_plot, compound_class_plot, mass_spectrum, mass_histogram\n'), ((2392, 2462), 'pykrev.missing_plot', 'missing_plot', (['x', 'x2'], {'y_ratio': '"""NC"""', 'group_labels': "['Group_1', 'Group_2']"}), "(x, x2, y_ratio='NC', group_labels=['Group_1', 'Group_2'])\n", (2404, 2462), False, 'from pykrev import van_krevelen_plot, element_ratios, element_counts, kendrick_mass_defect_plot, multi_van_krevelen_plot, van_krevelen_histogram, missing_plot, unique_plot, double_bond_equivalent, atomic_class_plot, compound_class_plot, mass_spectrum, mass_histogram\n'), ((2788, 2857), 'pykrev.unique_plot', 'unique_plot', (['x', 'x2'], {'y_ratio': '"""SC"""', 'group_labels': "['Group_1', 'Group_2']"}), "(x, x2, y_ratio='SC', group_labels=['Group_1', 'Group_2'])\n", (2799, 2857), False, 'from pykrev import van_krevelen_plot, element_ratios, element_counts, kendrick_mass_defect_plot, multi_van_krevelen_plot, van_krevelen_histogram, missing_plot, unique_plot, double_bond_equivalent, atomic_class_plot, compound_class_plot, mass_spectrum, mass_histogram\n'), ((2899, 2969), 'numpy.array', 'np.array', (['[1000, 2432, 3000, 4201, 2000, 5990, 1000, 6520, 8000, 9001]'], {}), '([1000, 2432, 3000, 4201, 2000, 5990, 1000, 6520, 8000, 9001])\n', (2907, 2969), True, 'import numpy as np\n'), ((2968, 3026), 'pykrev.kendrick_mass_defect_plot', 'kendrick_mass_defect_plot', (['z'], {'base': "['CO']", 'rounding': '"""even"""'}), "(z, base=['CO'], rounding='even')\n", (2993, 3026), False, 'from pykrev import van_krevelen_plot, element_ratios, element_counts, kendrick_mass_defect_plot, multi_van_krevelen_plot, van_krevelen_histogram, missing_plot, unique_plot, double_bond_equivalent, atomic_class_plot, compound_class_plot, mass_spectrum, mass_histogram\n'), ((3038, 3096), 'pykrev.kendrick_mass_defect_plot', 'kendrick_mass_defect_plot', (['z'], {'base': "['CO']", 'rounding': '"""rint"""'}), "(z, base=['CO'], rounding='rint')\n", (3063, 3096), False, 'from pykrev import van_krevelen_plot, element_ratios, element_counts, kendrick_mass_defect_plot, multi_van_krevelen_plot, van_krevelen_histogram, missing_plot, unique_plot, double_bond_equivalent, atomic_class_plot, compound_class_plot, mass_spectrum, mass_histogram\n'), ((3108, 3166), 'pykrev.kendrick_mass_defect_plot', 'kendrick_mass_defect_plot', (['z'], {'base': "['CO']", 'rounding': '"""ceil"""'}), "(z, base=['CO'], rounding='ceil')\n", (3133, 3166), False, 'from pykrev import van_krevelen_plot, element_ratios, element_counts, kendrick_mass_defect_plot, multi_van_krevelen_plot, van_krevelen_histogram, missing_plot, unique_plot, double_bond_equivalent, atomic_class_plot, compound_class_plot, mass_spectrum, mass_histogram\n'), ((3178, 3237), 'pykrev.kendrick_mass_defect_plot', 'kendrick_mass_defect_plot', (['z'], {'base': "['CO']", 'rounding': '"""floor"""'}), "(z, base=['CO'], rounding='floor')\n", (3203, 3237), False, 'from pykrev import van_krevelen_plot, element_ratios, element_counts, kendrick_mass_defect_plot, multi_van_krevelen_plot, van_krevelen_histogram, missing_plot, unique_plot, double_bond_equivalent, atomic_class_plot, compound_class_plot, mass_spectrum, mass_histogram\n'), ((3434, 3459), 'pykrev.van_krevelen_histogram', 'van_krevelen_histogram', (['x'], {}), '(x)\n', (3456, 3459), False, 'from pykrev import van_krevelen_plot, element_ratios, element_counts, kendrick_mass_defect_plot, multi_van_krevelen_plot, van_krevelen_histogram, missing_plot, unique_plot, double_bond_equivalent, atomic_class_plot, compound_class_plot, mass_spectrum, mass_histogram\n'), ((3647, 3680), 'pykrev.atomic_class_plot', 'atomic_class_plot', (['x'], {'element': '"""S"""'}), "(x, element='S')\n", (3664, 3680), False, 'from pykrev import van_krevelen_plot, element_ratios, element_counts, kendrick_mass_defect_plot, multi_van_krevelen_plot, van_krevelen_histogram, missing_plot, unique_plot, double_bond_equivalent, atomic_class_plot, compound_class_plot, mass_spectrum, mass_histogram\n'), ((3872, 3909), 'pykrev.compound_class_plot', 'compound_class_plot', (['x'], {'method': '"""KELL"""'}), "(x, method='KELL')\n", (3891, 3909), False, 'from pykrev import van_krevelen_plot, element_ratios, element_counts, kendrick_mass_defect_plot, multi_van_krevelen_plot, van_krevelen_histogram, missing_plot, unique_plot, double_bond_equivalent, atomic_class_plot, compound_class_plot, mass_spectrum, mass_histogram\n'), ((4104, 4121), 'pykrev.mass_histogram', 'mass_histogram', (['x'], {}), '(x)\n', (4118, 4121), False, 'from pykrev import van_krevelen_plot, element_ratios, element_counts, kendrick_mass_defect_plot, multi_van_krevelen_plot, van_krevelen_histogram, missing_plot, unique_plot, double_bond_equivalent, atomic_class_plot, compound_class_plot, mass_spectrum, mass_histogram\n'), ((4129, 4164), 'pykrev.mass_histogram', 'mass_histogram', (['x'], {'method': '"""nominal"""'}), "(x, method='nominal')\n", (4143, 4164), False, 'from pykrev import van_krevelen_plot, element_ratios, element_counts, kendrick_mass_defect_plot, multi_van_krevelen_plot, van_krevelen_histogram, missing_plot, unique_plot, double_bond_equivalent, atomic_class_plot, compound_class_plot, mass_spectrum, mass_histogram\n'), ((4358, 4418), 'numpy.array', 'np.array', (['[3210, 43, 432, 423, 42, 10, 103, 305, 2054, 1388]'], {}), '([3210, 43, 432, 423, 42, 10, 103, 305, 2054, 1388])\n', (4366, 4418), True, 'import numpy as np\n'), ((4417, 4436), 'pykrev.mass_spectrum', 'mass_spectrum', (['x', 'y'], {}), '(x, y)\n', (4430, 4436), False, 'from pykrev import van_krevelen_plot, element_ratios, element_counts, kendrick_mass_defect_plot, multi_van_krevelen_plot, van_krevelen_histogram, missing_plot, unique_plot, double_bond_equivalent, atomic_class_plot, compound_class_plot, mass_spectrum, mass_histogram\n'), ((4443, 4480), 'pykrev.mass_spectrum', 'mass_spectrum', (['x', 'y'], {'method': '"""nominal"""'}), "(x, y, method='nominal')\n", (4456, 4480), False, 'from pykrev import van_krevelen_plot, element_ratios, element_counts, kendrick_mass_defect_plot, multi_van_krevelen_plot, van_krevelen_histogram, missing_plot, unique_plot, double_bond_equivalent, atomic_class_plot, compound_class_plot, mass_spectrum, mass_histogram\n')] |
import argparse
import json
import numpy as np
from evaulate_pt import *
from evaulate_pt import Config as cg_pt
from evaulate_tf import DatasetGeneratorTF, TfModel
from evaulate_tf import Config as cg
from evaulate_sst import *
def argp():
parser = argparse.ArgumentParser(description='Script to train pytorch script on text data')
parser.add_argument('text',
help='Enter the text you want to check on')
parser.add_argument('audio',
help='Pass audio else Enter None')
parser.add_argument('fm',
help='pt or tf for inference')
args = parser.parse_args()
return args.text, args.audio, args.fm
if __name__=="__main__":
text, audio, fm = argp()
column_list = {"action":6, "object":14, "location":4}
vocab_list = vocab()
if fm=="pt":
for column_name in column_list.keys():
model = load_model(f"{cg_pt.globals_['model_save_dir']}/{column_name}_{cg_pt.globals_['version']}.pt",vocab_list, column_list[column_name], 'cpu')
model.to('cpu')
test_dataset = DatasetGenerator(transcription=[text], vocab_list=vocab_list, Is_Train=False)
test_loader = torch.utils.data.DataLoader(
test_dataset,
collate_fn=Collate(vocab_list.string_to_index['<PAD>'], Is_Train=False),
batch_size=1,
num_workers=1,
shuffle=False,
pin_memory=False,
)
engine = EngineTest(model)
pred = engine.fit(test_loader)
with open(f'label_keys\{column_name}_key.json', 'r') as f:
action = json.load(f)
action = {k: v for k, v in enumerate(action)}
print(f'{column_name}: {action[pred[0]]}')
if fm=="tf":
for column_name in column_list.keys():
model = TfModel(input=vocab_list.__len__(), n_classes=column_list[column_name]).get_model()
model.load_weights(f"{cg.globals_['model_save_dir']}/{column_name}_{cg.globals_['version']}.ckpt")
dataset_valid = DatasetGeneratorTF(x=[text], vocab_list=vocab_list, Is_Train=False).data()
dataset_valid = dataset_valid.batch(1, drop_remainder=False)
temp=model.predict(dataset_valid)
temp=np.argmax(temp)
with open(f'label_keys\{column_name}_key.json', 'r') as f:
action = json.load(f)
action = {k: v for k, v in enumerate(action)}
print(f'{column_name}: {action[temp]}')
if audio:
processor = Wav2Vec2Processor.from_pretrained(Config.globals_['processor'])
model = Wav2Vec2ForCTC.from_pretrained(Config.globals_['model'])
speech_array, sampling_rate = librosa.load(audio, sr=8000)
inputs = processor(speech_array, sampling_rate=8000, return_tensors="pt", padding=True)
with torch.no_grad():
logits = model(inputs.input_values.to('cpu')).logits
pred_ids = torch.argmax(logits, dim=-1)
pred = processor.batch_decode(pred_ids)
print(f'Statement: {pred[0]}')
| [
"evaulate_tf.DatasetGeneratorTF",
"json.load",
"argparse.ArgumentParser",
"numpy.argmax"
] | [((258, 345), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Script to train pytorch script on text data"""'}), "(description=\n 'Script to train pytorch script on text data')\n", (281, 345), False, 'import argparse\n'), ((2441, 2456), 'numpy.argmax', 'np.argmax', (['temp'], {}), '(temp)\n', (2450, 2456), True, 'import numpy as np\n'), ((1753, 1765), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1762, 1765), False, 'import json\n'), ((2554, 2566), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2563, 2566), False, 'import json\n'), ((2229, 2296), 'evaulate_tf.DatasetGeneratorTF', 'DatasetGeneratorTF', ([], {'x': '[text]', 'vocab_list': 'vocab_list', 'Is_Train': '(False)'}), '(x=[text], vocab_list=vocab_list, Is_Train=False)\n', (2247, 2296), False, 'from evaulate_tf import DatasetGeneratorTF, TfModel\n')] |
import warnings
import unittest
from itertools import product
import random
import torch
from torch.testing._internal.common_utils import \
(TestCase, run_tests, do_test_empty_full, TEST_NUMPY, suppress_warnings,
IS_WINDOWS, torch_to_numpy_dtype_dict, slowTest)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, deviceCountAtLeast, onlyOnCPUAndCUDA,
onlyCPU, skipCUDAIfNotRocm, largeCUDATensorTest, precisionOverride, dtypes,
onlyCUDA, skipCPUIf, dtypesIfCUDA)
if TEST_NUMPY:
import numpy as np
# Test suite for tensor creation ops
#
# Includes creation functions like torch.eye, random creation functions like
# torch.rand, and *like functions like torch.ones_like.
# DOES NOT INCLUDE view ops, which are tested in TestViewOps (currently in
# test_torch.py) OR numpy interop (which is also still tested in test_torch.py)
#
# See https://pytorch.org/docs/master/torch.html#creation-ops
class TestTensorCreation(TestCase):
exact_dtype = True
# TODO: this test should be updated
@onlyOnCPUAndCUDA
def test_empty_full(self, device):
torch_device = torch.device(device)
device_type = torch_device.type
if device_type == 'cpu':
do_test_empty_full(self, torch.testing.get_all_math_dtypes('cpu'), torch.strided, torch_device)
if device_type == 'cuda':
do_test_empty_full(self, torch.testing.get_all_math_dtypes('cpu'), torch.strided, None)
do_test_empty_full(self, torch.testing.get_all_math_dtypes('cpu'), torch.strided, torch_device)
# TODO: this test should be updated
@suppress_warnings
@onlyOnCPUAndCUDA
@deviceCountAtLeast(1)
def test_tensor_device(self, devices):
device_type = torch.device(devices[0]).type
if device_type == 'cpu':
self.assertEqual('cpu', torch.tensor(5).device.type)
self.assertEqual('cpu',
torch.ones((2, 3), dtype=torch.float32, device='cpu').device.type)
self.assertEqual('cpu',
torch.ones((2, 3), dtype=torch.float32, device='cpu:0').device.type)
self.assertEqual('cpu',
torch.tensor(torch.ones((2, 3), dtype=torch.float32), device='cpu:0').device.type)
if TEST_NUMPY:
self.assertEqual('cpu', torch.tensor(np.random.randn(2, 3), device='cpu').device.type)
if device_type == 'cuda':
self.assertEqual('cuda:0', str(torch.tensor(5).cuda(0).device))
self.assertEqual('cuda:0', str(torch.tensor(5).cuda('cuda:0').device))
self.assertEqual('cuda:0',
str(torch.tensor(5, dtype=torch.int64, device=0).device))
self.assertEqual('cuda:0',
str(torch.tensor(5, dtype=torch.int64, device='cuda:0').device))
self.assertEqual('cuda:0',
str(torch.tensor(torch.ones((2, 3), dtype=torch.float32), device='cuda:0').device))
if TEST_NUMPY:
self.assertEqual('cuda:0', str(torch.tensor(np.random.randn(2, 3), device='cuda:0').device))
for device in devices:
with torch.cuda.device(device):
device_string = 'cuda:' + str(torch.cuda.current_device())
self.assertEqual(device_string,
str(torch.tensor(5, dtype=torch.int64, device='cuda').device))
with self.assertRaises(RuntimeError):
torch.tensor(5).cuda('cpu')
with self.assertRaises(RuntimeError):
torch.tensor(5).cuda('cpu:0')
if len(devices) > 1:
self.assertEqual('cuda:1', str(torch.tensor(5).cuda(1).device))
self.assertEqual('cuda:1', str(torch.tensor(5).cuda('cuda:1').device))
self.assertEqual('cuda:1',
str(torch.tensor(5, dtype=torch.int64, device=1).device))
self.assertEqual('cuda:1',
str(torch.tensor(5, dtype=torch.int64, device='cuda:1').device))
self.assertEqual('cuda:1',
str(torch.tensor(torch.ones((2, 3), dtype=torch.float32),
device='cuda:1').device))
if TEST_NUMPY:
self.assertEqual('cuda:1',
str(torch.tensor(np.random.randn(2, 3), device='cuda:1').device))
# TODO: this test should be updated
@onlyOnCPUAndCUDA
def test_as_strided_neg(self, device):
error = r'as_strided: Negative strides are not supported at the ' \
r'moment, got strides: \[-?[0-9]+(, -?[0-9]+)*\]'
with self.assertRaisesRegex(RuntimeError, error):
torch.as_strided(torch.ones(3, 3, device=device), (1, 1), (2, -1))
with self.assertRaisesRegex(RuntimeError, error):
torch.as_strided(torch.ones(14, device=device), (2,), (-11,))
# TODO: this test should be updated
def test_zeros(self, device):
res1 = torch.zeros(100, 100, device=device)
res2 = torch.tensor((), device=device)
torch.zeros(100, 100, device=device, out=res2)
self.assertEqual(res1, res2)
boolTensor = torch.zeros(2, 2, device=device, dtype=torch.bool)
expected = torch.tensor([[False, False], [False, False]],
device=device, dtype=torch.bool)
self.assertEqual(boolTensor, expected)
halfTensor = torch.zeros(1, 1, device=device, dtype=torch.half)
expected = torch.tensor([[0.]], device=device, dtype=torch.float16)
self.assertEqual(halfTensor, expected)
bfloat16Tensor = torch.zeros(1, 1, device=device, dtype=torch.bfloat16)
expected = torch.tensor([[0.]], device=device, dtype=torch.bfloat16)
self.assertEqual(bfloat16Tensor, expected)
complexTensor = torch.zeros(2, 2, device=device, dtype=torch.complex64)
expected = torch.tensor([[0., 0.], [0., 0.]], device=device, dtype=torch.complex64)
self.assertEqual(complexTensor, expected)
# TODO: this test should be updated
def test_zeros_out(self, device):
shape = (3, 4)
out = torch.zeros(shape, device=device)
torch.zeros(shape, device=device, out=out)
# change the dtype, layout, device
with self.assertRaises(RuntimeError):
torch.zeros(shape, device=device, dtype=torch.int64, out=out)
with self.assertRaises(RuntimeError):
torch.zeros(shape, device=device, layout=torch.sparse_coo, out=out)
# leave them the same
self.assertEqual(torch.zeros(shape, device=device),
torch.zeros(shape, device=device, dtype=out.dtype, out=out))
self.assertEqual(torch.zeros(shape, device=device),
torch.zeros(shape, device=device, layout=torch.strided, out=out))
self.assertEqual(torch.zeros(shape, device=device),
torch.zeros(shape, device=device, out=out))
# TODO: this test should be updated
def test_ones(self, device):
res1 = torch.ones(100, 100, device=device)
res2 = torch.tensor((), device=device)
torch.ones(100, 100, device=device, out=res2)
self.assertEqual(res1, res2)
# test boolean tensor
res1 = torch.ones(1, 2, device=device, dtype=torch.bool)
expected = torch.tensor([[True, True]], device=device, dtype=torch.bool)
self.assertEqual(res1, expected)
# TODO: this test should be updated
@onlyCPU
def test_constructor_dtypes(self, device):
default_type = torch.Tensor().type()
self.assertIs(torch.Tensor().dtype, torch.get_default_dtype())
self.assertIs(torch.uint8, torch.ByteTensor.dtype)
self.assertIs(torch.float32, torch.FloatTensor.dtype)
self.assertIs(torch.float64, torch.DoubleTensor.dtype)
torch.set_default_tensor_type('torch.FloatTensor')
self.assertIs(torch.float32, torch.get_default_dtype())
self.assertIs(torch.FloatStorage, torch.Storage)
torch.set_default_dtype(torch.float64)
self.assertIs(torch.float64, torch.get_default_dtype())
self.assertIs(torch.DoubleStorage, torch.Storage)
torch.set_default_tensor_type(torch.FloatTensor)
self.assertIs(torch.float32, torch.get_default_dtype())
self.assertIs(torch.FloatStorage, torch.Storage)
if torch.cuda.is_available():
torch.set_default_tensor_type(torch.cuda.FloatTensor)
self.assertIs(torch.float32, torch.get_default_dtype())
self.assertIs(torch.float32, torch.cuda.FloatTensor.dtype)
self.assertIs(torch.cuda.FloatStorage, torch.Storage)
torch.set_default_dtype(torch.float64)
self.assertIs(torch.float64, torch.get_default_dtype())
self.assertIs(torch.cuda.DoubleStorage, torch.Storage)
# don't support integral or sparse default types.
self.assertRaises(TypeError, lambda: torch.set_default_tensor_type('torch.IntTensor'))
self.assertRaises(TypeError, lambda: torch.set_default_dtype(torch.int64))
# don't allow passing dtype to set_default_tensor_type
self.assertRaises(TypeError, lambda: torch.set_default_tensor_type(torch.float32))
torch.set_default_tensor_type(default_type)
# TODO: this test should be updated
@onlyCPU
def test_constructor_device_legacy(self, device):
self.assertRaises(RuntimeError, lambda: torch.FloatTensor(device='cuda'))
self.assertRaises(RuntimeError, lambda: torch.FloatTensor(torch.Size([2, 3, 4]), device='cuda'))
self.assertRaises(RuntimeError, lambda: torch.FloatTensor((2.0, 3.0), device='cuda'))
self.assertRaises(RuntimeError, lambda: torch.Tensor(device='cuda'))
self.assertRaises(RuntimeError, lambda: torch.Tensor(torch.Size([2, 3, 4]), device='cuda'))
self.assertRaises(RuntimeError, lambda: torch.Tensor((2.0, 3.0), device='cuda'))
x = torch.randn((3,), device='cpu')
self.assertRaises(RuntimeError, lambda: x.new(device='cuda'))
self.assertRaises(RuntimeError, lambda: x.new(torch.Size([2, 3, 4]), device='cuda'))
self.assertRaises(RuntimeError, lambda: x.new((2.0, 3.0), device='cuda'))
if torch.cuda.is_available():
self.assertRaises(RuntimeError, lambda: torch.cuda.FloatTensor(device='cpu'))
self.assertRaises(RuntimeError, lambda: torch.cuda.FloatTensor(torch.Size([2, 3, 4]), device='cpu'))
self.assertRaises(RuntimeError, lambda: torch.cuda.FloatTensor((2.0, 3.0), device='cpu'))
default_type = torch.Tensor().type()
torch.set_default_tensor_type(torch.cuda.FloatTensor)
self.assertRaises(RuntimeError, lambda: torch.Tensor(device='cpu'))
self.assertRaises(RuntimeError, lambda: torch.Tensor(torch.Size([2, 3, 4]), device='cpu'))
self.assertRaises(RuntimeError, lambda: torch.Tensor((2.0, 3.0), device='cpu'))
torch.set_default_tensor_type(torch.cuda.FloatTensor)
torch.set_default_tensor_type(default_type)
x = torch.randn((3,), device='cuda')
self.assertRaises(RuntimeError, lambda: x.new(device='cpu'))
self.assertRaises(RuntimeError, lambda: x.new(torch.Size([2, 3, 4]), device='cpu'))
self.assertRaises(RuntimeError, lambda: x.new((2.0, 3.0), device='cpu'))
# TODO: this test should be updated
@suppress_warnings
@onlyCPU
def test_tensor_factory(self, device):
# TODO: This test probably doesn't make too much sense now that
# torch.tensor has been established for a while; it makes more
# sense to test the legacy behavior in terms of the new behavior
expected = torch.Tensor([1, 1])
# test data
res1 = torch.tensor([1, 1])
self.assertEqual(res1, expected, exact_dtype=False)
res1 = torch.tensor([1, 1], dtype=torch.int)
self.assertEqual(res1, expected, exact_dtype=False)
self.assertIs(torch.int, res1.dtype)
# test copy
res2 = torch.tensor(expected)
self.assertEqual(res2, expected)
res2[1] = 2
self.assertEqual(expected, torch.ones_like(expected))
res2 = torch.tensor(expected, dtype=torch.int)
self.assertEqual(res1, expected, exact_dtype=False)
self.assertIs(torch.int, res1.dtype)
# test copy with numpy
if TEST_NUMPY:
for dtype in [np.float64, np.int64, np.int8, np.uint8]:
a = np.array([5.]).astype(dtype)
res1 = torch.tensor(a)
self.assertEqual(5., res1[0].item())
a[0] = 7.
self.assertEqual(5., res1[0].item())
# test boolean tensor
a = torch.tensor([True, True, False, True, True], dtype=torch.bool)
b = torch.tensor([-1, -1.1, 0, 1, 1.1], dtype=torch.bool)
self.assertEqual(a, b)
c = torch.tensor([-0.1, -1.1, 0, 1, 0.1], dtype=torch.bool)
self.assertEqual(a, c)
d = torch.tensor((-.3, 0, .3, 1, 3 / 7), dtype=torch.bool)
e = torch.tensor((True, False, True, True, True), dtype=torch.bool)
self.assertEqual(e, d)
f = torch.tensor((-1, 0, -1.1, 1, 1.1), dtype=torch.bool)
self.assertEqual(e, f)
int64_max = torch.iinfo(torch.int64).max
int64_min = torch.iinfo(torch.int64).min
float64_max = torch.finfo(torch.float64).max
float64_min = torch.finfo(torch.float64).min
g_1 = torch.tensor((float('nan'), 0, int64_min, int64_max, int64_min - 1), dtype=torch.bool)
self.assertEqual(e, g_1)
g_2 = torch.tensor((int64_max + 1, 0, (int64_max + 1) * 2, (int64_max + 1) * 2 + 1, float64_min), dtype=torch.bool)
self.assertEqual(e, g_2)
g_3 = torch.tensor((float64_max, 0, float64_max + 1, float64_min - 1, float64_max + 1e291), dtype=torch.bool)
self.assertEqual(e, g_3)
h = torch.tensor([True, False, False, True, False, True, True], dtype=torch.bool)
i = torch.tensor([1e-323, 1e-324, 0j, 1e-323j, 1e-324j, 1 + 2j, -1j], dtype=torch.bool)
self.assertEqual(h, i)
j = torch.tensor((True, True, True, True), dtype=torch.bool)
k = torch.tensor((1e323, -1e323, float('inf'), -float('inf')), dtype=torch.bool)
self.assertEqual(j, k)
# TODO: this test should be updated
@suppress_warnings
@onlyCPU
def test_tensor_factory_copy_var(self, device):
def check_copy(copy, is_leaf, requires_grad, data_ptr=None):
if data_ptr is None:
data_ptr = copy.data_ptr
self.assertEqual(copy, source, exact_dtype=False)
self.assertTrue(copy.is_leaf == is_leaf)
self.assertTrue(copy.requires_grad == requires_grad)
self.assertTrue(copy.data_ptr == data_ptr)
source = torch.randn(5, 5, dtype=torch.double, requires_grad=True)
# test torch.tensor()
check_copy(torch.tensor(source), True, False)
check_copy(torch.tensor(source, requires_grad=False), True, False)
check_copy(torch.tensor(source, requires_grad=True), True, True)
# test tensor.new_tensor()
copy = torch.randn(1)
check_copy(copy.new_tensor(source), True, False)
check_copy(copy.new_tensor(source, requires_grad=False), True, False)
check_copy(copy.new_tensor(source, requires_grad=True), True, True)
# test torch.as_tensor()
check_copy(torch.as_tensor(source), source.is_leaf, source.requires_grad, source.data_ptr) # not copy
check_copy(torch.as_tensor(source, dtype=torch.float), False, True) # copy and keep the graph
# TODO: this test should be updated
@onlyCPU
def test_tensor_factory_type_inference(self, device):
def test_inference(default_dtype):
saved_dtype = torch.get_default_dtype()
torch.set_default_dtype(default_dtype)
default_complex_dtype = torch.complex64 if default_dtype == torch.float32 else torch.complex128
self.assertIs(default_dtype, torch.tensor(()).dtype)
self.assertIs(default_dtype, torch.tensor(5.).dtype)
self.assertIs(torch.int64, torch.tensor(5).dtype)
self.assertIs(torch.bool, torch.tensor(True).dtype)
self.assertIs(torch.int32, torch.tensor(5, dtype=torch.int32).dtype)
self.assertIs(default_dtype, torch.tensor(((7, 5), (9, 5.))).dtype)
self.assertIs(default_dtype, torch.tensor(((5., 5), (3, 5))).dtype)
self.assertIs(torch.int64, torch.tensor(((5, 3), (3, 5))).dtype)
self.assertIs(default_complex_dtype, torch.tensor(((5, 3 + 2j), (3, 5 + 4j))).dtype)
if TEST_NUMPY:
self.assertIs(torch.float64, torch.tensor(np.array(())).dtype)
self.assertIs(torch.float64, torch.tensor(np.array(5.)).dtype)
if np.array(5).dtype == np.int64: # np long, which can be 4 bytes (e.g. on windows)
self.assertIs(torch.int64, torch.tensor(np.array(5)).dtype)
else:
self.assertIs(torch.int32, torch.tensor(np.array(5)).dtype)
self.assertIs(torch.uint8, torch.tensor(np.array(3, dtype=np.uint8)).dtype)
self.assertIs(default_dtype, torch.tensor(((7, np.array(5)), (np.array(9), 5.))).dtype)
self.assertIs(torch.float64, torch.tensor(((7, 5), (9, np.array(5.)))).dtype)
self.assertIs(torch.int64, torch.tensor(((5, np.array(3)), (np.array(3), 5))).dtype)
torch.set_default_dtype(saved_dtype)
test_inference(torch.float64)
test_inference(torch.float32)
# TODO: this test should be updated
@suppress_warnings
@onlyCPU
def test_new_tensor(self, device):
expected = torch.autograd.Variable(torch.ByteTensor([1, 1]))
# test data
res1 = expected.new_tensor([1, 1])
self.assertEqual(res1, expected)
res1 = expected.new_tensor([1, 1], dtype=torch.int)
self.assertEqual(res1, expected, exact_dtype=False)
self.assertIs(torch.int, res1.dtype)
# test copy
res2 = expected.new_tensor(expected)
self.assertEqual(res2, expected)
res2[1] = 2
self.assertEqual(expected, torch.ones_like(expected))
res2 = expected.new_tensor(expected, dtype=torch.int)
self.assertEqual(res2, expected, exact_dtype=False)
self.assertIs(torch.int, res2.dtype)
# test copy with numpy
if TEST_NUMPY:
a = np.array([5.])
res1 = torch.tensor(a)
res1 = res1.new_tensor(a)
self.assertEqual(5., res1[0].item())
a[0] = 7.
self.assertEqual(5., res1[0].item())
if torch.cuda.device_count() >= 2:
expected = expected.cuda(1)
res1 = expected.new_tensor([1, 1])
self.assertEqual(res1.get_device(), expected.get_device())
res1 = expected.new_tensor([1, 1], dtype=torch.int)
self.assertIs(torch.int, res1.dtype)
self.assertEqual(res1.get_device(), expected.get_device())
res2 = expected.new_tensor(expected)
self.assertEqual(res2.get_device(), expected.get_device())
res2 = expected.new_tensor(expected, dtype=torch.int)
self.assertIs(torch.int, res1.dtype)
self.assertEqual(res2.get_device(), expected.get_device())
res2 = expected.new_tensor(expected, dtype=torch.int, device=0)
self.assertIs(torch.int, res1.dtype)
self.assertEqual(res2.get_device(), 0)
res1 = expected.new_tensor(1)
self.assertEqual(res1.get_device(), expected.get_device())
res1 = expected.new_tensor(1, dtype=torch.int)
self.assertIs(torch.int, res1.dtype)
self.assertEqual(res1.get_device(), expected.get_device())
# TODO: this test should be updated
@onlyCPU
def test_as_tensor(self, device):
# from python data
x = [[0, 1], [2, 3]]
self.assertEqual(torch.tensor(x), torch.as_tensor(x))
self.assertEqual(torch.tensor(x, dtype=torch.float32), torch.as_tensor(x, dtype=torch.float32))
# python data with heterogeneous types
z = [0, 'torch']
with self.assertRaisesRegex(TypeError, "invalid data type"):
torch.tensor(z)
torch.as_tensor(z)
# python data with self-referential lists
z = [0]
z += [z]
with self.assertRaisesRegex(TypeError, "self-referential lists are incompatible"):
torch.tensor(z)
torch.as_tensor(z)
z = [[1, 2], z]
with self.assertRaisesRegex(TypeError, "self-referential lists are incompatible"):
torch.tensor(z)
torch.as_tensor(z)
# from tensor (doesn't copy unless type is different)
y = torch.tensor(x)
self.assertIs(y, torch.as_tensor(y))
self.assertIsNot(y, torch.as_tensor(y, dtype=torch.float32))
if torch.cuda.is_available():
self.assertIsNot(y, torch.as_tensor(y, device='cuda'))
y_cuda = y.to('cuda')
self.assertIs(y_cuda, torch.as_tensor(y_cuda))
self.assertIs(y_cuda, torch.as_tensor(y_cuda, device='cuda'))
if TEST_NUMPY:
# doesn't copy
for dtype in [np.float64, np.int64, np.int8, np.uint8]:
n = np.random.rand(5, 6).astype(dtype)
n_astensor = torch.as_tensor(n)
self.assertEqual(torch.tensor(n), n_astensor)
n_astensor[0][0] = 25.7
self.assertEqual(torch.tensor(n), n_astensor)
# changing dtype causes copy
n = np.random.rand(5, 6).astype(np.float32)
n_astensor = torch.as_tensor(n, dtype=torch.float64)
self.assertEqual(torch.tensor(n, dtype=torch.float64), n_astensor)
n_astensor[0][1] = 250.8
self.assertNotEqual(torch.tensor(n, dtype=torch.float64), n_astensor)
# changing device causes copy
if torch.cuda.is_available():
n = np.random.randn(5, 6)
n_astensor = torch.as_tensor(n, device='cuda')
self.assertEqual(torch.tensor(n, device='cuda'), n_astensor)
n_astensor[0][2] = 250.9
self.assertNotEqual(torch.tensor(n, device='cuda'), n_astensor)
# TODO: this test should be updated
@suppress_warnings
def test_range(self, device):
res1 = torch.range(0, 1, device=device)
res2 = torch.tensor((), device=device)
torch.range(0, 1, device=device, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
# Check range for non-contiguous tensors.
x = torch.zeros(2, 3, device=device)
torch.range(0, 3, device=device, out=x.narrow(1, 1, 2))
res2 = torch.tensor(((0, 0, 1), (0, 2, 3)), device=device, dtype=torch.float32)
self.assertEqual(x, res2, atol=1e-16, rtol=0)
# Check negative
res1 = torch.tensor((1, 0), device=device, dtype=torch.float32)
res2 = torch.tensor((), device=device)
torch.range(1, 0, -1, device=device, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
# Equal bounds
res1 = torch.ones(1, device=device)
res2 = torch.tensor((), device=device)
torch.range(1, 1, -1, device=device, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
torch.range(1, 1, 1, device=device, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
# TODO: this test should be updated
def test_range_warning(self, device):
with warnings.catch_warnings(record=True) as w:
torch.range(0, 10, device=device)
self.assertEqual(len(w), 1)
# TODO: this test should be updated
@onlyCPU
def test_arange(self, device):
res = torch.tensor(range(10000))
res1 = torch.arange(0, 10000) # Use a larger number so vectorized code can be triggered
res2 = torch.tensor([], dtype=torch.int64)
torch.arange(0, 10000, out=res2)
self.assertEqual(res, res1, atol=0, rtol=0)
self.assertEqual(res, res2, atol=0, rtol=0)
# Vectorization on non-contiguous tensors
res = torch.rand(3, 3, 300000).to(torch.int64)
res = res.permute(2, 0, 1)
torch.arange(0, 300000 * 3 * 3, out=res)
self.assertEqual(res.flatten(), torch.arange(0, 300000 * 3 * 3))
# Check arange with only one argument
res1 = torch.arange(10)
res2 = torch.arange(0, 10)
self.assertEqual(res1, res2, atol=0, rtol=0)
# Check arange for non-contiguous tensors.
x = torch.zeros(2, 3)
torch.arange(0, 4, out=x.narrow(1, 1, 2))
res2 = torch.Tensor(((0, 0, 1), (0, 2, 3)))
self.assertEqual(x, res2, atol=1e-16, rtol=0)
# Check negative
res1 = torch.Tensor((1, 0))
res2 = torch.Tensor()
torch.arange(1, -1, -1, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
# Equal bounds
res1 = torch.ones(1)
res2 = torch.Tensor()
torch.arange(1, 0, -1, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
torch.arange(1, 2, 1, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
# FloatTensor
res1 = torch.arange(0.6, 0.89, 0.1, out=torch.FloatTensor())
self.assertEqual(res1, [0.6, 0.7, 0.8])
res1 = torch.arange(1, 10, 0.3, out=torch.FloatTensor())
self.assertEqual(res1.size(0), 30)
self.assertEqual(res1[0], 1)
self.assertEqual(res1[29], 9.7)
# DoubleTensor
res1 = torch.arange(0.6, 0.89, 0.1, out=torch.DoubleTensor())
self.assertEqual(res1, [0.6, 0.7, 0.8])
res1 = torch.arange(1, 10, 0.3, out=torch.DoubleTensor())
self.assertEqual(res1.size(0), 30)
self.assertEqual(res1[0], 1)
self.assertEqual(res1[29], 9.7)
# Bool Input matching numpy semantics
r = torch.arange(True)
self.assertEqual(r[0], 0)
r2 = torch.arange(False)
self.assertEqual(len(r2), 0)
self.assertEqual(r.dtype, torch.int64)
self.assertEqual(r2.dtype, torch.int64)
# Check that it's exclusive
r = torch.arange(0, 5)
self.assertEqual(r.min(), 0)
self.assertEqual(r.max(), 4)
self.assertEqual(r.numel(), 5)
r = torch.arange(0, 5, 2)
self.assertEqual(r.min(), 0)
self.assertEqual(r.max(), 4)
self.assertEqual(r.numel(), 3)
r1 = torch.arange(0, 5 + 1e-6)
# NB: without the dtype, we'll infer output type to be int64
r2 = torch.arange(0, 5, dtype=torch.float32)
r3 = torch.arange(0, 5 - 1e-6)
self.assertEqual(r1[:-1], r2, atol=0, rtol=0)
self.assertEqual(r2, r3, atol=0, rtol=0)
r1 = torch.arange(10, -1 + 1e-6, -1)
# NB: without the dtype, we'll infer output type to be int64
r2 = torch.arange(10, -1, -1, dtype=torch.float32)
r3 = torch.arange(10, -1 - 1e-6, -1)
self.assertEqual(r1, r2, atol=0, rtol=0)
self.assertEqual(r2, r3[:-1], atol=0, rtol=0)
# Test Rounding Errors
line = torch.zeros(size=(1, 49))
self.assertWarnsRegex(UserWarning, 'The out tensor will be resized',
lambda: torch.arange(-1, 1, 2. / 49, dtype=torch.float32, out=line))
self.assertEqual(line.shape, [50])
x = torch.empty(1).expand(10)
self.assertRaises(RuntimeError, lambda: torch.arange(10, out=x))
msg = "unsupported range"
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(0, float('inf')))
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(float('inf')))
for device in torch.testing.get_all_device_types():
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(-5, float('nan'), device=device))
# check with step size
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(0, float('-inf'), -1, device=device))
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(0, float('inf'), device=device))
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(float('-inf'), 10, device=device))
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(float('nan'), 10, device=device))
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(float('inf'), device=device))
self.assertRaisesRegex(RuntimeError, msg, lambda: torch.arange(float('nan'), device=device))
self.assertRaisesRegex(
RuntimeError, "overflow",
lambda: torch.arange(1.175494351e-38, 3.402823466e+38, device=device))
# check that it holds a consistent output shape on precision-cornered step sizes
d = torch.arange(-4.0, 4.0, 0.01, dtype=torch.float32, device=device)
self.assertEqual(d.shape[0], 800)
# TODO: this test should be updated
@onlyCPU
def test_arange_inference(self, device):
saved_dtype = torch.get_default_dtype()
torch.set_default_dtype(torch.float32)
# end only
self.assertIs(torch.float32, torch.arange(1.).dtype)
self.assertIs(torch.float32, torch.arange(torch.tensor(1.)).dtype)
self.assertIs(torch.float32, torch.arange(torch.tensor(1., dtype=torch.float64)).dtype)
self.assertIs(torch.int64, torch.arange(1).dtype)
self.assertIs(torch.int64, torch.arange(torch.tensor(1)).dtype)
self.assertIs(torch.int64, torch.arange(torch.tensor(1, dtype=torch.int16)).dtype)
# start, end, [step]
self.assertIs(torch.float32, torch.arange(1., 3).dtype)
self.assertIs(torch.float32, torch.arange(torch.tensor(1., dtype=torch.float64), 3).dtype)
self.assertIs(torch.float32, torch.arange(1, 3.).dtype)
self.assertIs(torch.float32, torch.arange(torch.tensor(1, dtype=torch.int16), torch.tensor(3.)).dtype)
self.assertIs(torch.float32, torch.arange(1, 3, 1.).dtype)
self.assertIs(torch.float32,
torch.arange(torch.tensor(1),
torch.tensor(3, dtype=torch.int16),
torch.tensor(1., dtype=torch.float64)).dtype)
self.assertIs(torch.int64, torch.arange(1, 3).dtype)
self.assertIs(torch.int64, torch.arange(torch.tensor(1), 3).dtype)
self.assertIs(torch.int64, torch.arange(torch.tensor(1), torch.tensor(3, dtype=torch.int16)).dtype)
self.assertIs(torch.int64, torch.arange(1, 3, 1).dtype)
self.assertIs(torch.int64,
torch.arange(torch.tensor(1),
torch.tensor(3),
torch.tensor(1, dtype=torch.int16)).dtype)
torch.set_default_dtype(saved_dtype)
def test_empty_strided(self, device):
for shape in [(2, 3, 4), (0, 2, 0)]:
# some of these cases are pretty strange, just verifying that if as_strided
# allows them then empty_strided can as well.
for strides in [(12, 4, 1), (2, 4, 6), (0, 0, 0)]:
empty_strided = torch.empty_strided(shape, strides, device=device)
# as_strided checks the storage size is big enough to support such a strided tensor;
# instead of repeating this calculation, we just use empty_strided which does the same
# calculation when setting the storage size.
as_strided = torch.empty(empty_strided.storage().size(),
device=device).as_strided(shape, strides)
self.assertEqual(empty_strided.shape, as_strided.shape)
self.assertEqual(empty_strided.stride(), as_strided.stride())
def test_strided_mismatched_stride_shape(self, device):
for shape, strides in [((1, ), ()), ((1, 2), (1, ))]:
with self.assertRaisesRegex(RuntimeError, "mismatch in length of strides and shape"):
torch.tensor(0.42, device=device).as_strided(shape, strides)
with self.assertRaisesRegex(RuntimeError, "mismatch in length of strides and shape"):
torch.tensor(0.42, device=device).as_strided_(shape, strides)
def test_empty_tensor_props(self, device):
sizes = [(0,), (0, 3), (5, 0), (5, 0, 3, 0, 2), (0, 3, 0, 2), (0, 5, 0, 2, 0)]
for size in sizes:
x = torch.empty(tuple(size), device=device)
self.assertEqual(size, x.shape)
self.assertTrue(x.is_contiguous())
size_ones_instead_of_zeros = (x if x != 0 else 1 for x in size)
y = torch.empty(tuple(size_ones_instead_of_zeros), device=device)
self.assertEqual(x.stride(), y.stride())
def test_eye(self, device):
for dtype in torch.testing.get_all_dtypes():
if dtype == torch.bfloat16:
continue
for n, m in product([3, 5, 7], repeat=2):
# Construct identity using diagonal and fill
res1 = torch.eye(n, m, device=device, dtype=dtype)
naive_eye = torch.zeros(n, m, dtype=dtype, device=device)
naive_eye.diagonal(dim1=-2, dim2=-1).fill_(1)
self.assertEqual(naive_eye, res1)
# Check eye_out outputs
res2 = torch.empty(0, device=device, dtype=dtype)
torch.eye(n, m, out=res2)
self.assertEqual(res1, res2)
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
@precisionOverride({torch.float: 1e-8, torch.double: 1e-10})
@dtypes(*(torch.testing.get_all_fp_dtypes(include_half=False, include_bfloat16=False) +
torch.testing.get_all_complex_dtypes()))
def test_linspace_vs_numpy(self, device, dtype):
start = -0.0316082797944545745849609375 + (0.8888888888j if dtype.is_complex else 0)
end = .0315315723419189453125 + (0.444444444444j if dtype.is_complex else 0)
for steps in [1, 2, 3, 5, 11, 256, 257, 2**22]:
t = torch.linspace(start, end, steps, device=device, dtype=dtype)
a = np.linspace(start, end, steps, dtype=torch_to_numpy_dtype_dict[dtype])
t = t.cpu()
self.assertEqual(t, torch.from_numpy(a))
self.assertTrue(t[0].item() == a[0])
self.assertTrue(t[steps - 1].item() == a[steps - 1])
@unittest.skipIf(not TEST_NUMPY, "NumPy not found")
@precisionOverride({torch.float: 1e-6, torch.double: 1e-10})
@dtypes(torch.float, torch.double)
def test_logspace_vs_numpy(self, device, dtype):
start = -0.0316082797944545745849609375
end = .0315315723419189453125
for steps in [1, 2, 3, 5, 11, 256, 257, 2**22]:
t = torch.logspace(start, end, steps, device=device, dtype=dtype)
a = np.logspace(start, end, steps, dtype=torch_to_numpy_dtype_dict[dtype])
t = t.cpu()
self.assertEqual(t, torch.from_numpy(a))
self.assertEqual(t[0], a[0])
self.assertEqual(t[steps - 1], a[steps - 1])
@largeCUDATensorTest('16GB')
def test_range_factories_64bit_indexing(self, device):
bigint = 2 ** 31 + 1
t = torch.arange(bigint, dtype=torch.long, device=device)
self.assertEqual(t[-1].item(), bigint - 1)
del t
t = torch.linspace(0, 1, bigint, dtype=torch.float, device=device)
self.assertEqual(t[-1].item(), 1)
del t
t = torch.logspace(0, 1, bigint, 2, dtype=torch.float, device=device)
self.assertEqual(t[-1].item(), 2)
del t
@onlyOnCPUAndCUDA
def test_tensor_ctor_device_inference(self, device):
torch_device = torch.device(device)
values = torch.tensor((1, 2, 3), device=device)
# Tests tensor and as_tensor
# Note: warnings are suppressed (suppresses warnings)
for op in (torch.tensor, torch.as_tensor):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.assertEqual(op(values).device, torch_device)
self.assertEqual(op(values, dtype=torch.float64).device, torch_device)
if self.device_type == 'cuda':
with torch.cuda.device(device):
self.assertEqual(op(values.cpu()).device, torch.device('cpu'))
# Tests sparse ctor
indices = torch.tensor([[0, 1, 1],
[2, 0, 1],
[2, 1, 0]], device=device)
sparse_size = (3, 3, 3)
sparse_default = torch.sparse_coo_tensor(indices, values, sparse_size)
self.assertEqual(sparse_default.device, torch_device)
sparse_with_dtype = torch.sparse_coo_tensor(indices, values, sparse_size, dtype=torch.float64)
self.assertEqual(sparse_with_dtype.device, torch_device)
if self.device_type == 'cuda':
with torch.cuda.device(device):
sparse_with_dtype = torch.sparse_coo_tensor(indices.cpu(), values.cpu(),
sparse_size, dtype=torch.float64)
self.assertEqual(sparse_with_dtype.device, torch.device('cpu'))
def test_tensor_factories_empty(self, device):
# ensure we can create empty tensors from each factory function
shapes = [(5, 0, 1), (0,), (0, 0, 1, 0, 2, 0, 0)]
for shape in shapes:
for dt in torch.testing.get_all_dtypes():
self.assertEqual(shape, torch.zeros(shape, device=device, dtype=dt).shape)
self.assertEqual(shape, torch.zeros_like(torch.zeros(shape, device=device, dtype=dt)).shape)
self.assertEqual(shape, torch.full(shape, 3, device=device, dtype=dt).shape)
self.assertEqual(shape, torch.full_like(torch.zeros(shape, device=device, dtype=dt), 3).shape)
self.assertEqual(shape, torch.ones(shape, device=device, dtype=dt).shape)
self.assertEqual(shape, torch.ones_like(torch.zeros(shape, device=device, dtype=dt)).shape)
self.assertEqual(shape, torch.empty(shape, device=device, dtype=dt).shape)
self.assertEqual(shape, torch.empty_like(torch.zeros(shape, device=device, dtype=dt)).shape)
self.assertEqual(shape, torch.empty_strided(shape, (0,) * len(shape), device=device, dtype=dt).shape)
if dt == torch.bfloat16 and device.startswith('cuda') and IS_WINDOWS:
# TODO: https://github.com/pytorch/pytorch/issues/33793
self.assertRaises(RuntimeError, lambda: torch.randint(6, shape, device=device, dtype=dt).shape)
elif dt == torch.bool:
self.assertEqual(shape, torch.randint(2, shape, device=device, dtype=dt).shape)
self.assertEqual(shape, torch.randint_like(torch.zeros(shape, device=device, dtype=dt), 2).shape)
elif dt.is_complex:
self.assertRaises(RuntimeError, lambda: torch.randint(6, shape, device=device, dtype=dt).shape)
else:
self.assertEqual(shape, torch.randint(6, shape, device=device, dtype=dt).shape)
self.assertEqual(shape, torch.randint_like(torch.zeros(shape, device=device, dtype=dt), 6).shape)
if dt not in {torch.double, torch.float, torch.half, torch.bfloat16, torch.complex64, torch.complex128}:
self.assertRaises(RuntimeError, lambda: torch.rand(shape, device=device, dtype=dt).shape)
if dt == torch.double or dt == torch.float or dt.is_complex:
self.assertEqual(shape, torch.randn(shape, device=device, dtype=dt).shape)
self.assertEqual(shape, torch.randn_like(torch.zeros(shape, device=device, dtype=dt)).shape)
self.assertEqual((0,), torch.arange(0, device=device).shape)
self.assertEqual((0, 0), torch.eye(0, device=device).shape)
self.assertEqual((0, 0), torch.eye(0, 0, device=device).shape)
self.assertEqual((5, 0), torch.eye(5, 0, device=device).shape)
self.assertEqual((0, 5), torch.eye(0, 5, device=device).shape)
self.assertEqual((0,), torch.linspace(1, 1, 0, device=device).shape)
self.assertEqual((0,), torch.logspace(1, 1, 0, device=device).shape)
self.assertEqual((0,), torch.randperm(0, device=device).shape)
self.assertEqual((0,), torch.bartlett_window(0, device=device).shape)
self.assertEqual((0,), torch.bartlett_window(0, periodic=False, device=device).shape)
self.assertEqual((0,), torch.hamming_window(0, device=device).shape)
self.assertEqual((0,), torch.hann_window(0, device=device).shape)
self.assertEqual((1, 1, 0), torch.tensor([[[]]], device=device).shape)
self.assertEqual((1, 1, 0), torch.as_tensor([[[]]], device=device).shape)
@onlyCUDA
def test_tensor_factory_gpu_type_inference(self, device):
saved_type = torch.Tensor().type()
torch.set_default_tensor_type(torch.cuda.DoubleTensor)
torch.set_default_dtype(torch.float32)
self.assertIs(torch.float32, torch.tensor(0.).dtype)
self.assertEqual(torch.device(device), torch.tensor(0.).device)
torch.set_default_dtype(torch.float64)
self.assertIs(torch.float64, torch.tensor(0.).dtype)
self.assertEqual(torch.device(device), torch.tensor(0.).device)
torch.set_default_tensor_type(saved_type)
@onlyCUDA
def test_tensor_factory_gpu_type(self, device):
saved_type = torch.Tensor().type()
torch.set_default_tensor_type(torch.cuda.FloatTensor)
x = torch.zeros((5, 5))
self.assertIs(torch.float32, x.dtype)
self.assertTrue(x.is_cuda)
torch.set_default_tensor_type(torch.cuda.DoubleTensor)
x = torch.zeros((5, 5))
self.assertIs(torch.float64, x.dtype)
self.assertTrue(x.is_cuda)
torch.set_default_tensor_type(saved_type)
@skipCPUIf(True, 'compares device with cpu')
@dtypes(torch.int, torch.long, torch.float, torch.double)
def test_arange_device_vs_cpu(self, device, dtype):
cpu_tensor = torch.arange(0, 10, dtype=dtype, device='cpu')
device_tensor = torch.arange(0, 10, dtype=dtype, device=device)
self.assertEqual(cpu_tensor, device_tensor)
@onlyCUDA
@skipCUDAIfNotRocm
def test_arange_bfloat16(self, device):
ref_tensor = torch.tensor([0, 1, 2, 3], dtype=torch.bfloat16, device=device)
bfloat16_tensor = torch.arange(0, 4, dtype=torch.bfloat16, device=device)
self.assertEqual(ref_tensor, bfloat16_tensor)
# step=2
ref_tensor = torch.tensor([0, 2, 4], dtype=torch.bfloat16, device=device)
bfloat16_tensor = torch.arange(0, 6, step=2, dtype=torch.bfloat16, device=device)
self.assertEqual(ref_tensor, bfloat16_tensor)
@dtypes(*torch.testing.get_all_dtypes(include_bool=False, include_half=False))
@dtypesIfCUDA(*torch.testing.get_all_dtypes(include_bool=False, include_half=True))
def test_linspace(self, device, dtype):
_from = random.random()
to = _from + random.random()
res1 = torch.linspace(_from, to, 137, device=device, dtype=dtype)
res2 = torch.tensor((), device=device, dtype=dtype)
torch.linspace(_from, to, 137, dtype=dtype, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
# small tensor
self.assertEqual(torch.linspace(10, 20, 11, device=device, dtype=dtype),
torch.tensor(list(range(10, 21)), device=device, dtype=dtype))
# large tensor
if dtype not in (torch.int8, torch.uint8):
self.assertEqual(torch.linspace(10, 2000, 1991, device=device, dtype=dtype),
torch.tensor(list(range(10, 2001)), device=device, dtype=dtype))
# Vectorization on non-contiguous tensors
if dtype not in (torch.int8, torch.uint8): # int8 and uint8 are too small for this test
res = torch.rand(3, 3, 1000, device=device).to(dtype)
res = res.permute(2, 0, 1)
torch.linspace(0, 1000 * 3 * 3, 1000 * 3 * 3, out=res)
self.assertEqual(res.flatten(), torch.linspace(0, 1000 * 3 * 3, 1000 * 3 * 3, device=device, dtype=dtype))
self.assertRaises(RuntimeError, lambda: torch.linspace(0, 1, -1, device=device, dtype=dtype))
# steps = 1
self.assertEqual(torch.linspace(0, 1, 1, device=device, dtype=dtype),
torch.zeros(1, device=device, dtype=dtype), atol=0, rtol=0)
# steps = 0
self.assertEqual(torch.linspace(0, 1, 0, device=device, dtype=dtype).numel(), 0, atol=0, rtol=0)
# Check linspace for generating the correct output for each dtype.
start = 0 if dtype == torch.uint8 else -100
expected_lin = torch.tensor([start + .5 * i for i in range(401)], device=device, dtype=torch.double)
actual_lin = torch.linspace(start, start + 200, 401, device=device, dtype=dtype)
# If on GPU, allow for minor error depending on dtype.
tol = 0.
if device != 'cpu':
if dtype == torch.half:
tol = 1e-1
elif dtype == torch.float:
tol = 1e-5
elif dtype == torch.double:
tol = 1e-10
self.assertEqual(expected_lin.to(dtype), actual_lin, atol=tol, rtol=0)
# Check linspace for generating with start > end.
self.assertEqual(torch.linspace(2, 0, 3, device=device, dtype=dtype),
torch.tensor((2, 1, 0), device=device, dtype=dtype),
atol=0, rtol=0)
# Create non-complex tensor from complex numbers
if not dtype.is_complex:
self.assertRaises(RuntimeError, lambda: torch.linspace(1j, 2j, 3, device=device, dtype=dtype))
# Check for race condition (correctness when applied on a large tensor).
if dtype not in (torch.int8, torch.uint8, torch.int16, torch.half, torch.bfloat16):
y = torch.linspace(0, 999999 + (999999j if dtype.is_complex else 0),
1000000, device=device, dtype=dtype)
if dtype.is_complex:
cond = torch.logical_and(y[:-1].real < y[1:].real, y[:-1].imag < y[1:].imag)
else:
cond = y[:-1] < y[1:]
correct = all(cond)
self.assertTrue(correct)
# Check linspace for non-contiguous tensors.
x = torch.zeros(2, 3, device=device, dtype=dtype)
y = torch.linspace(0, 3, 4, out=x.narrow(1, 1, 2), dtype=dtype)
self.assertEqual(x, torch.tensor(((0, 0, 1), (0, 2, 3)), device=device, dtype=dtype), atol=0, rtol=0)
def test_linspace_deduction(self, device):
# Test deduction from input parameters.
self.assertEqual(torch.linspace(1, 2, device=device).dtype, torch.float32)
self.assertEqual(torch.linspace(1., 2, device=device).dtype, torch.float32)
self.assertEqual(torch.linspace(1., -2., device=device).dtype, torch.float32)
# TODO: Need fix
with self.assertRaises(RuntimeError):
torch.linspace(1j, -2j, device=device)
# The implementation of linspace+logspace goes through a different path
# when the steps arg is equal to 0 or 1. For other values of `steps`
# they call specialized linspace (or logspace) kernels.
LINSPACE_LOGSPACE_SPECIAL_STEPS = [0, 1]
# NOTE [Linspace+Logspace precision override]
# Our Linspace and logspace torch.half CUDA kernels are not very precise.
# Since linspace/logspace are deterministic, we can compute an expected
# amount of error (by testing without a precision override), adding a tiny
# amount (EPS) to that, and using that value as the override.
LINSPACE_LOGSPACE_EXTRA_EPS = 1e-5
# Compares linspace device vs. cpu
def _test_linspace(self, device, dtype, steps):
a = torch.linspace(0, 10, steps=steps, dtype=dtype, device=device)
b = torch.linspace(0, 10, steps=steps)
self.assertEqual(a, b, exact_dtype=False)
# See NOTE [Linspace+Logspace precision override]
@skipCPUIf(True, "compares with CPU")
@precisionOverride({torch.half: 0.0039 + LINSPACE_LOGSPACE_EXTRA_EPS})
@dtypesIfCUDA(*(torch.testing.get_all_fp_dtypes() + torch.testing.get_all_complex_dtypes()))
def test_linspace_device_vs_cpu(self, device, dtype):
self._test_linspace(device, dtype, steps=10)
@skipCPUIf(True, "compares with CPU")
@dtypesIfCUDA(*(torch.testing.get_all_fp_dtypes() + torch.testing.get_all_complex_dtypes()))
def test_linspace_special_steps(self, device, dtype):
for steps in self.LINSPACE_LOGSPACE_SPECIAL_STEPS:
self._test_linspace(device, dtype, steps=steps)
# Compares logspace device vs cpu
def _test_logspace(self, device, dtype, steps):
a = torch.logspace(1, 1.1, steps=steps, dtype=dtype, device=device)
b = torch.logspace(1, 1.1, steps=steps)
self.assertEqual(a, b, exact_dtype=False)
# Compares logspace device vs cpu
def _test_logspace_base2(self, device, dtype, steps):
a = torch.logspace(1, 1.1, steps=steps, base=2, dtype=dtype, device=device)
b = torch.logspace(1, 1.1, steps=steps, base=2)
self.assertEqual(a, b, exact_dtype=False)
# See NOTE [Linspace+Logspace precision override]
@skipCPUIf(True, "compares with CPU")
@precisionOverride({torch.half: 0.025 + LINSPACE_LOGSPACE_EXTRA_EPS})
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float, torch.double)
def test_logspace_device_vs_cpu(self, device, dtype):
self._test_logspace(device, dtype, steps=10)
# See NOTE [Linspace+Logspace precision override]
@skipCPUIf(True, "compares with CPU")
@precisionOverride({torch.half: 0.0201 + LINSPACE_LOGSPACE_EXTRA_EPS})
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float, torch.double)
def test_logspace_base2(self, device, dtype):
self._test_logspace_base2(device, dtype, steps=10)
@skipCPUIf(True, "compares with CPU")
@dtypesIfCUDA(torch.half, torch.float, torch.double)
@dtypes(torch.float, torch.double)
def test_logspace_special_steps(self, device, dtype):
for steps in self.LINSPACE_LOGSPACE_SPECIAL_STEPS:
self._test_logspace(device, dtype, steps=steps)
self._test_logspace_base2(device, dtype, steps=steps)
@precisionOverride({torch.half: 1e-1, torch.float: 1e-5, torch.double: 1e-10})
@dtypes(torch.uint8, torch.int8, torch.short, torch.int, torch.long, torch.float, torch.double)
@dtypesIfCUDA(torch.uint8, torch.int8, torch.short, torch.int, torch.long, torch.half, torch.float, torch.double)
def test_logspace(self, device, dtype):
_from = random.random()
to = _from + random.random()
res1 = torch.logspace(_from, to, 137, device=device, dtype=dtype)
res2 = torch.tensor((), device=device, dtype=dtype)
torch.logspace(_from, to, 137, device=device, dtype=dtype, out=res2)
self.assertEqual(res1, res2, atol=0, rtol=0)
self.assertRaises(RuntimeError, lambda: torch.logspace(0, 1, -1, device=device, dtype=dtype))
self.assertEqual(torch.logspace(0, 1, 1, device=device, dtype=dtype),
torch.ones(1, device=device, dtype=dtype), atol=0, rtol=0)
# Check precision - start, stop and base are chosen to avoid overflow
# steps is chosen so that step size is not subject to rounding error
# a tolerance is needed for gpu tests due to differences in computation
atol = None
rtol = None
if self.device_type == 'cpu':
atol = 0
rtol = 0
self.assertEqual(torch.tensor([2. ** (i / 8.) for i in range(49)], device=device, dtype=dtype),
torch.logspace(0, 6, steps=49, base=2, device=device, dtype=dtype),
atol=atol, rtol=rtol)
# Check non-default base=2
self.assertEqual(torch.logspace(1, 1, 1, 2, device=device, dtype=dtype),
torch.ones(1, device=device, dtype=dtype) * 2)
self.assertEqual(torch.logspace(0, 2, 3, 2, device=device, dtype=dtype),
torch.tensor((1, 2, 4), device=device, dtype=dtype))
# Check logspace_ for generating with start > end.
self.assertEqual(torch.logspace(1, 0, 2, device=device, dtype=dtype),
torch.tensor((10, 1), device=device, dtype=dtype), atol=0, rtol=0)
# Check logspace_ for non-contiguous tensors.
x = torch.zeros(2, 3, device=device, dtype=dtype)
y = torch.logspace(0, 3, 4, base=2, device=device, dtype=dtype, out=x.narrow(1, 1, 2))
self.assertEqual(x, torch.tensor(((0, 1, 2), (0, 4, 8)), device=device, dtype=dtype), atol=0, rtol=0)
@onlyOnCPUAndCUDA
@dtypes(torch.half, torch.float, torch.double)
def test_full_inference(self, device, dtype):
size = (2, 2)
prev_default = torch.get_default_dtype()
torch.set_default_dtype(dtype)
# Tests bool fill value inference
t = torch.full(size, True)
self.assertEqual(t.dtype, torch.bool)
# Tests integer fill value inference
t = torch.full(size, 1)
self.assertEqual(t.dtype, torch.long)
# Tests float fill value inference
t = torch.full(size, 1.)
self.assertEqual(t.dtype, dtype)
# Tests complex inference
t = torch.full(size, (1 + 1j))
ctype = torch.complex128 if dtype is torch.double else torch.complex64
self.assertEqual(t.dtype, ctype)
torch.set_default_dtype(prev_default)
def test_full_out(self, device):
size = (5,)
o = torch.empty(size, device=device, dtype=torch.long)
# verifies dtype/out conflict throws a RuntimeError
with self.assertRaises(RuntimeError):
torch.full(o.shape, 1., dtype=torch.float, out=o)
# verifies out dtype overrides inference
self.assertEqual(torch.full(o.shape, 1., out=o).dtype, o.dtype)
self.assertEqual(torch.full(size, 1, out=o).dtype, o.dtype)
# Class for testing random tensor creation ops, like torch.randint
class TestRandomTensorCreation(TestCase):
exact_dtype = True
# TODO: this test should be updated
@onlyCPU
def test_randint_inference(self, device):
size = (2, 1)
for args in [(3,), (1, 3)]: # (low,) and (low, high)
self.assertIs(torch.int64, torch.randint(*args, size=size).dtype)
self.assertIs(torch.int64, torch.randint(*args, size=size, layout=torch.strided).dtype)
self.assertIs(torch.int64, torch.randint(*args, size=size, generator=torch.default_generator).dtype)
self.assertIs(torch.float32, torch.randint(*args, size=size, dtype=torch.float32).dtype)
out = torch.empty(size, dtype=torch.float32)
self.assertIs(torch.float32, torch.randint(*args, size=size, out=out).dtype)
self.assertIs(torch.float32, torch.randint(*args, size=size, out=out, dtype=torch.float32).dtype)
out = torch.empty(size, dtype=torch.int64)
self.assertIs(torch.int64, torch.randint(*args, size=size, out=out).dtype)
self.assertIs(torch.int64, torch.randint(*args, size=size, out=out, dtype=torch.int64).dtype)
# TODO: this test should be updated
@onlyCPU
def test_randint(self, device):
SIZE = 100
def seed(generator):
if generator is None:
torch.manual_seed(123456)
else:
generator.manual_seed(123456)
return generator
for generator in (None, torch.Generator()):
generator = seed(generator)
res1 = torch.randint(0, 6, (SIZE, SIZE), generator=generator)
res2 = torch.empty((), dtype=torch.int64)
generator = seed(generator)
torch.randint(0, 6, (SIZE, SIZE), generator=generator, out=res2)
generator = seed(generator)
res3 = torch.randint(6, (SIZE, SIZE), generator=generator)
res4 = torch.empty((), dtype=torch.int64)
generator = seed(generator)
torch.randint(6, (SIZE, SIZE), out=res4, generator=generator)
self.assertEqual(res1, res2)
self.assertEqual(res1, res3)
self.assertEqual(res1, res4)
self.assertEqual(res2, res3)
self.assertEqual(res2, res4)
self.assertEqual(res3, res4)
self.assertTrue((res1 < 6).all().item())
self.assertTrue((res1 >= 0).all().item())
@dtypes(torch.half, torch.float, torch.double,
torch.complex32, torch.complex64, torch.complex128)
def test_randn(self, device, dtype):
SIZE = 100
for size in [0, SIZE]:
torch.manual_seed(123456)
res1 = torch.randn(size, size, dtype=dtype, device=device)
res2 = torch.tensor([], dtype=dtype, device=device)
torch.manual_seed(123456)
torch.randn(size, size, out=res2)
self.assertEqual(res1, res2)
@dtypes(torch.float, torch.double, torch.complex64, torch.complex128)
def test_rand(self, device, dtype):
SIZE = 100
for size in [0, SIZE]:
torch.manual_seed(123456)
res1 = torch.rand(size, size, dtype=dtype, device=device)
res2 = torch.tensor([], dtype=dtype, device=device)
torch.manual_seed(123456)
torch.rand(size, size, out=res2)
self.assertEqual(res1, res2)
@slowTest
def test_randperm(self, device):
if device == 'cpu':
rng_device = None
else:
rng_device = [device]
# Test core functionality. On CUDA, for small n, randperm is offloaded to CPU instead. For large n, randperm is
# executed on GPU.
for n in (100, 50000, 100000):
# Ensure both integer and floating-point numbers are tested. Half follows an execution path that is
# different from others on CUDA.
for dtype in (torch.long, torch.half, torch.float):
if n > 2049 and dtype == torch.half: # Large n for torch.half will raise an exception, do not test here.
continue
with torch.random.fork_rng(devices=rng_device):
res1 = torch.randperm(n, dtype=dtype, device=device)
res2 = torch.empty(0, dtype=dtype, device=device)
torch.randperm(n, out=res2, dtype=dtype, device=device)
self.assertEqual(res1, res2, atol=0, rtol=0)
# Default type is long
for n in (100, 10000):
self.assertEqual(torch.randperm(n, device=device).dtype, torch.long)
# randperm of 0 elements is an empty tensor
res1 = torch.randperm(0)
res2 = torch.tensor(5, dtype=dtype, device=device)
torch.randperm(0, out=res2)
self.assertEqual(res1.numel(), 0)
self.assertEqual(res2.numel(), 0)
# Test exceptions when n is too large for a floating point type
for dtype, small_n, large_n in ((torch.half, 2**11 + 1, 2**11 + 2),
(torch.float, 2**24 + 1, 2**24 + 2),
(torch.double, 2**25, # 2**53 + 1 is too large to run
2**53 + 2)):
res = torch.empty(0, dtype=dtype, device=device)
torch.randperm(small_n, out=res) # No exception expected
self.assertRaises(RuntimeError, lambda: torch.randperm(large_n, out=res, device=device))
# Test non-contiguous tensors
for n in (4, 5, 6, 10, 20):
non_contiguous_tensor = torch.zeros((2, 3), dtype=torch.long, device=device).t()
self.assertFalse(non_contiguous_tensor.is_contiguous())
with torch.random.fork_rng(devices=rng_device):
res = torch.randperm(n, dtype=torch.long, device=device)
torch.randperm(n, out=non_contiguous_tensor)
self.assertEqual(non_contiguous_tensor, res)
# Class for testing *like ops, like torch.ones_like
class TestLikeTensorCreation(TestCase):
exact_dtype = True
# TODO: this test should be updated
def test_ones_like(self, device):
expected = torch.ones(100, 100, device=device)
res1 = torch.ones_like(expected)
self.assertEqual(res1, expected)
# test boolean tensor
expected = torch.tensor([True, True], device=device, dtype=torch.bool)
res1 = torch.ones_like(expected)
self.assertEqual(res1, expected)
# TODO: this test should be updated
@onlyCPU
def test_empty_like(self, device):
x = torch.autograd.Variable(torch.Tensor())
y = torch.autograd.Variable(torch.randn(4, 4))
z = torch.autograd.Variable(torch.IntTensor([1, 2, 3]))
for a in (x, y, z):
self.assertEqual(torch.empty_like(a).shape, a.shape)
self.assertEqualTypeString(torch.empty_like(a), a)
def test_zeros_like(self, device):
expected = torch.zeros((100, 100,), device=device)
res1 = torch.zeros_like(expected)
self.assertEqual(res1, expected)
@deviceCountAtLeast(2)
def test_zeros_like_multiple_device(self, devices):
expected = torch.zeros(100, 100, device=devices[0])
x = torch.randn(100, 100, device=devices[1], dtype=torch.float32)
output = torch.zeros_like(x)
self.assertEqual(output, expected)
@deviceCountAtLeast(2)
def test_ones_like_multiple_device(self, devices):
expected = torch.ones(100, 100, device=devices[0])
x = torch.randn(100, 100, device=devices[1], dtype=torch.float32)
output = torch.ones_like(x)
self.assertEqual(output, expected)
# Full-like precedence is the explicit dtype then the dtype of the "like"
# tensor.
@onlyOnCPUAndCUDA
def test_full_like_inference(self, device):
size = (2, 2)
like = torch.empty((5,), device=device, dtype=torch.long)
self.assertEqual(torch.full_like(like, 1.).dtype, torch.long)
self.assertEqual(torch.full_like(like, 1., dtype=torch.complex64).dtype,
torch.complex64)
instantiate_device_type_tests(TestTensorCreation, globals())
instantiate_device_type_tests(TestRandomTensorCreation, globals())
instantiate_device_type_tests(TestLikeTensorCreation, globals())
if __name__ == '__main__':
run_tests()
| [
"torch.eye",
"torch.iinfo",
"torch.ByteTensor",
"numpy.logspace",
"torch.testing._internal.common_device_type.skipCPUIf",
"torch.set_default_tensor_type",
"torch.randn",
"torch.full",
"torch.empty",
"torch.cuda.device_count",
"torch.set_default_dtype",
"torch.testing.get_all_complex_dtypes",
... | [((1681, 1702), 'torch.testing._internal.common_device_type.deviceCountAtLeast', 'deviceCountAtLeast', (['(1)'], {}), '(1)\n', (1699, 1702), False, 'from torch.testing._internal.common_device_type import instantiate_device_type_tests, deviceCountAtLeast, onlyOnCPUAndCUDA, onlyCPU, skipCUDAIfNotRocm, largeCUDATensorTest, precisionOverride, dtypes, onlyCUDA, skipCPUIf, dtypesIfCUDA\n'), ((34002, 34052), 'unittest.skipIf', 'unittest.skipIf', (['(not TEST_NUMPY)', '"""NumPy not found"""'], {}), "(not TEST_NUMPY, 'NumPy not found')\n", (34017, 34052), False, 'import unittest\n'), ((34058, 34118), 'torch.testing._internal.common_device_type.precisionOverride', 'precisionOverride', (['{torch.float: 1e-08, torch.double: 1e-10}'], {}), '({torch.float: 1e-08, torch.double: 1e-10})\n', (34075, 34118), False, 'from torch.testing._internal.common_device_type import instantiate_device_type_tests, deviceCountAtLeast, onlyOnCPUAndCUDA, onlyCPU, skipCUDAIfNotRocm, largeCUDATensorTest, precisionOverride, dtypes, onlyCUDA, skipCPUIf, dtypesIfCUDA\n'), ((34915, 34965), 'unittest.skipIf', 'unittest.skipIf', (['(not TEST_NUMPY)', '"""NumPy not found"""'], {}), "(not TEST_NUMPY, 'NumPy not found')\n", (34930, 34965), False, 'import unittest\n'), ((34971, 35031), 'torch.testing._internal.common_device_type.precisionOverride', 'precisionOverride', (['{torch.float: 1e-06, torch.double: 1e-10}'], {}), '({torch.float: 1e-06, torch.double: 1e-10})\n', (34988, 35031), False, 'from torch.testing._internal.common_device_type import instantiate_device_type_tests, deviceCountAtLeast, onlyOnCPUAndCUDA, onlyCPU, skipCUDAIfNotRocm, largeCUDATensorTest, precisionOverride, dtypes, onlyCUDA, skipCPUIf, dtypesIfCUDA\n'), ((35036, 35069), 'torch.testing._internal.common_device_type.dtypes', 'dtypes', (['torch.float', 'torch.double'], {}), '(torch.float, torch.double)\n', (35042, 35069), False, 'from torch.testing._internal.common_device_type import instantiate_device_type_tests, deviceCountAtLeast, onlyOnCPUAndCUDA, onlyCPU, skipCUDAIfNotRocm, largeCUDATensorTest, precisionOverride, dtypes, onlyCUDA, skipCPUIf, dtypesIfCUDA\n'), ((35612, 35639), 'torch.testing._internal.common_device_type.largeCUDATensorTest', 'largeCUDATensorTest', (['"""16GB"""'], {}), "('16GB')\n", (35631, 35639), False, 'from torch.testing._internal.common_device_type import instantiate_device_type_tests, deviceCountAtLeast, onlyOnCPUAndCUDA, onlyCPU, skipCUDAIfNotRocm, largeCUDATensorTest, precisionOverride, dtypes, onlyCUDA, skipCPUIf, dtypesIfCUDA\n'), ((42554, 42597), 'torch.testing._internal.common_device_type.skipCPUIf', 'skipCPUIf', (['(True)', '"""compares device with cpu"""'], {}), "(True, 'compares device with cpu')\n", (42563, 42597), False, 'from torch.testing._internal.common_device_type import instantiate_device_type_tests, deviceCountAtLeast, onlyOnCPUAndCUDA, onlyCPU, skipCUDAIfNotRocm, largeCUDATensorTest, precisionOverride, dtypes, onlyCUDA, skipCPUIf, dtypesIfCUDA\n'), ((42603, 42659), 'torch.testing._internal.common_device_type.dtypes', 'dtypes', (['torch.int', 'torch.long', 'torch.float', 'torch.double'], {}), '(torch.int, torch.long, torch.float, torch.double)\n', (42609, 42659), False, 'from torch.testing._internal.common_device_type import instantiate_device_type_tests, deviceCountAtLeast, onlyOnCPUAndCUDA, onlyCPU, skipCUDAIfNotRocm, largeCUDATensorTest, precisionOverride, dtypes, onlyCUDA, skipCPUIf, dtypesIfCUDA\n'), ((48761, 48797), 'torch.testing._internal.common_device_type.skipCPUIf', 'skipCPUIf', (['(True)', '"""compares with CPU"""'], {}), "(True, 'compares with CPU')\n", (48770, 48797), False, 'from torch.testing._internal.common_device_type import instantiate_device_type_tests, deviceCountAtLeast, onlyOnCPUAndCUDA, onlyCPU, skipCUDAIfNotRocm, largeCUDATensorTest, precisionOverride, dtypes, onlyCUDA, skipCPUIf, dtypesIfCUDA\n'), ((48803, 48872), 'torch.testing._internal.common_device_type.precisionOverride', 'precisionOverride', (['{torch.half: 0.0039 + LINSPACE_LOGSPACE_EXTRA_EPS}'], {}), '({torch.half: 0.0039 + LINSPACE_LOGSPACE_EXTRA_EPS})\n', (48820, 48872), False, 'from torch.testing._internal.common_device_type import instantiate_device_type_tests, deviceCountAtLeast, onlyOnCPUAndCUDA, onlyCPU, skipCUDAIfNotRocm, largeCUDATensorTest, precisionOverride, dtypes, onlyCUDA, skipCPUIf, dtypesIfCUDA\n'), ((49087, 49123), 'torch.testing._internal.common_device_type.skipCPUIf', 'skipCPUIf', (['(True)', '"""compares with CPU"""'], {}), "(True, 'compares with CPU')\n", (49096, 49123), False, 'from torch.testing._internal.common_device_type import instantiate_device_type_tests, deviceCountAtLeast, onlyOnCPUAndCUDA, onlyCPU, skipCUDAIfNotRocm, largeCUDATensorTest, precisionOverride, dtypes, onlyCUDA, skipCPUIf, dtypesIfCUDA\n'), ((50010, 50046), 'torch.testing._internal.common_device_type.skipCPUIf', 'skipCPUIf', (['(True)', '"""compares with CPU"""'], {}), "(True, 'compares with CPU')\n", (50019, 50046), False, 'from torch.testing._internal.common_device_type import instantiate_device_type_tests, deviceCountAtLeast, onlyOnCPUAndCUDA, onlyCPU, skipCUDAIfNotRocm, largeCUDATensorTest, precisionOverride, dtypes, onlyCUDA, skipCPUIf, dtypesIfCUDA\n'), ((50052, 50120), 'torch.testing._internal.common_device_type.precisionOverride', 'precisionOverride', (['{torch.half: 0.025 + LINSPACE_LOGSPACE_EXTRA_EPS}'], {}), '({torch.half: 0.025 + LINSPACE_LOGSPACE_EXTRA_EPS})\n', (50069, 50120), False, 'from torch.testing._internal.common_device_type import instantiate_device_type_tests, deviceCountAtLeast, onlyOnCPUAndCUDA, onlyCPU, skipCUDAIfNotRocm, largeCUDATensorTest, precisionOverride, dtypes, onlyCUDA, skipCPUIf, dtypesIfCUDA\n'), ((50126, 50177), 'torch.testing._internal.common_device_type.dtypesIfCUDA', 'dtypesIfCUDA', (['torch.half', 'torch.float', 'torch.double'], {}), '(torch.half, torch.float, torch.double)\n', (50138, 50177), False, 'from torch.testing._internal.common_device_type import instantiate_device_type_tests, deviceCountAtLeast, onlyOnCPUAndCUDA, onlyCPU, skipCUDAIfNotRocm, largeCUDATensorTest, precisionOverride, dtypes, onlyCUDA, skipCPUIf, dtypesIfCUDA\n'), ((50183, 50216), 'torch.testing._internal.common_device_type.dtypes', 'dtypes', (['torch.float', 'torch.double'], {}), '(torch.float, torch.double)\n', (50189, 50216), False, 'from torch.testing._internal.common_device_type import instantiate_device_type_tests, deviceCountAtLeast, onlyOnCPUAndCUDA, onlyCPU, skipCUDAIfNotRocm, largeCUDATensorTest, precisionOverride, dtypes, onlyCUDA, skipCPUIf, dtypesIfCUDA\n'), ((50388, 50424), 'torch.testing._internal.common_device_type.skipCPUIf', 'skipCPUIf', (['(True)', '"""compares with CPU"""'], {}), "(True, 'compares with CPU')\n", (50397, 50424), False, 'from torch.testing._internal.common_device_type import instantiate_device_type_tests, deviceCountAtLeast, onlyOnCPUAndCUDA, onlyCPU, skipCUDAIfNotRocm, largeCUDATensorTest, precisionOverride, dtypes, onlyCUDA, skipCPUIf, dtypesIfCUDA\n'), ((50430, 50499), 'torch.testing._internal.common_device_type.precisionOverride', 'precisionOverride', (['{torch.half: 0.0201 + LINSPACE_LOGSPACE_EXTRA_EPS}'], {}), '({torch.half: 0.0201 + LINSPACE_LOGSPACE_EXTRA_EPS})\n', (50447, 50499), False, 'from torch.testing._internal.common_device_type import instantiate_device_type_tests, deviceCountAtLeast, onlyOnCPUAndCUDA, onlyCPU, skipCUDAIfNotRocm, largeCUDATensorTest, precisionOverride, dtypes, onlyCUDA, skipCPUIf, dtypesIfCUDA\n'), ((50505, 50556), 'torch.testing._internal.common_device_type.dtypesIfCUDA', 'dtypesIfCUDA', (['torch.half', 'torch.float', 'torch.double'], {}), '(torch.half, torch.float, torch.double)\n', (50517, 50556), False, 'from torch.testing._internal.common_device_type import instantiate_device_type_tests, deviceCountAtLeast, onlyOnCPUAndCUDA, onlyCPU, skipCUDAIfNotRocm, largeCUDATensorTest, precisionOverride, dtypes, onlyCUDA, skipCPUIf, dtypesIfCUDA\n'), ((50562, 50595), 'torch.testing._internal.common_device_type.dtypes', 'dtypes', (['torch.float', 'torch.double'], {}), '(torch.float, torch.double)\n', (50568, 50595), False, 'from torch.testing._internal.common_device_type import instantiate_device_type_tests, deviceCountAtLeast, onlyOnCPUAndCUDA, onlyCPU, skipCUDAIfNotRocm, largeCUDATensorTest, precisionOverride, dtypes, onlyCUDA, skipCPUIf, dtypesIfCUDA\n'), ((50711, 50747), 'torch.testing._internal.common_device_type.skipCPUIf', 'skipCPUIf', (['(True)', '"""compares with CPU"""'], {}), "(True, 'compares with CPU')\n", (50720, 50747), False, 'from torch.testing._internal.common_device_type import instantiate_device_type_tests, deviceCountAtLeast, onlyOnCPUAndCUDA, onlyCPU, skipCUDAIfNotRocm, largeCUDATensorTest, precisionOverride, dtypes, onlyCUDA, skipCPUIf, dtypesIfCUDA\n'), ((50753, 50804), 'torch.testing._internal.common_device_type.dtypesIfCUDA', 'dtypesIfCUDA', (['torch.half', 'torch.float', 'torch.double'], {}), '(torch.half, torch.float, torch.double)\n', (50765, 50804), False, 'from torch.testing._internal.common_device_type import instantiate_device_type_tests, deviceCountAtLeast, onlyOnCPUAndCUDA, onlyCPU, skipCUDAIfNotRocm, largeCUDATensorTest, precisionOverride, dtypes, onlyCUDA, skipCPUIf, dtypesIfCUDA\n'), ((50810, 50843), 'torch.testing._internal.common_device_type.dtypes', 'dtypes', (['torch.float', 'torch.double'], {}), '(torch.float, torch.double)\n', (50816, 50843), False, 'from torch.testing._internal.common_device_type import instantiate_device_type_tests, deviceCountAtLeast, onlyOnCPUAndCUDA, onlyCPU, skipCUDAIfNotRocm, largeCUDATensorTest, precisionOverride, dtypes, onlyCUDA, skipCPUIf, dtypesIfCUDA\n'), ((51093, 51170), 'torch.testing._internal.common_device_type.precisionOverride', 'precisionOverride', (['{torch.half: 0.1, torch.float: 1e-05, torch.double: 1e-10}'], {}), '({torch.half: 0.1, torch.float: 1e-05, torch.double: 1e-10})\n', (51110, 51170), False, 'from torch.testing._internal.common_device_type import instantiate_device_type_tests, deviceCountAtLeast, onlyOnCPUAndCUDA, onlyCPU, skipCUDAIfNotRocm, largeCUDATensorTest, precisionOverride, dtypes, onlyCUDA, skipCPUIf, dtypesIfCUDA\n'), ((51176, 51275), 'torch.testing._internal.common_device_type.dtypes', 'dtypes', (['torch.uint8', 'torch.int8', 'torch.short', 'torch.int', 'torch.long', 'torch.float', 'torch.double'], {}), '(torch.uint8, torch.int8, torch.short, torch.int, torch.long, torch.\n float, torch.double)\n', (51182, 51275), False, 'from torch.testing._internal.common_device_type import instantiate_device_type_tests, deviceCountAtLeast, onlyOnCPUAndCUDA, onlyCPU, skipCUDAIfNotRocm, largeCUDATensorTest, precisionOverride, dtypes, onlyCUDA, skipCPUIf, dtypesIfCUDA\n'), ((51276, 51392), 'torch.testing._internal.common_device_type.dtypesIfCUDA', 'dtypesIfCUDA', (['torch.uint8', 'torch.int8', 'torch.short', 'torch.int', 'torch.long', 'torch.half', 'torch.float', 'torch.double'], {}), '(torch.uint8, torch.int8, torch.short, torch.int, torch.long,\n torch.half, torch.float, torch.double)\n', (51288, 51392), False, 'from torch.testing._internal.common_device_type import instantiate_device_type_tests, deviceCountAtLeast, onlyOnCPUAndCUDA, onlyCPU, skipCUDAIfNotRocm, largeCUDATensorTest, precisionOverride, dtypes, onlyCUDA, skipCPUIf, dtypesIfCUDA\n'), ((53554, 53599), 'torch.testing._internal.common_device_type.dtypes', 'dtypes', (['torch.half', 'torch.float', 'torch.double'], {}), '(torch.half, torch.float, torch.double)\n', (53560, 53599), False, 'from torch.testing._internal.common_device_type import instantiate_device_type_tests, deviceCountAtLeast, onlyOnCPUAndCUDA, onlyCPU, skipCUDAIfNotRocm, largeCUDATensorTest, precisionOverride, dtypes, onlyCUDA, skipCPUIf, dtypesIfCUDA\n'), ((57347, 57449), 'torch.testing._internal.common_device_type.dtypes', 'dtypes', (['torch.half', 'torch.float', 'torch.double', 'torch.complex32', 'torch.complex64', 'torch.complex128'], {}), '(torch.half, torch.float, torch.double, torch.complex32, torch.\n complex64, torch.complex128)\n', (57353, 57449), False, 'from torch.testing._internal.common_device_type import instantiate_device_type_tests, deviceCountAtLeast, onlyOnCPUAndCUDA, onlyCPU, skipCUDAIfNotRocm, largeCUDATensorTest, precisionOverride, dtypes, onlyCUDA, skipCPUIf, dtypesIfCUDA\n'), ((57852, 57920), 'torch.testing._internal.common_device_type.dtypes', 'dtypes', (['torch.float', 'torch.double', 'torch.complex64', 'torch.complex128'], {}), '(torch.float, torch.double, torch.complex64, torch.complex128)\n', (57858, 57920), False, 'from torch.testing._internal.common_device_type import instantiate_device_type_tests, deviceCountAtLeast, onlyOnCPUAndCUDA, onlyCPU, skipCUDAIfNotRocm, largeCUDATensorTest, precisionOverride, dtypes, onlyCUDA, skipCPUIf, dtypesIfCUDA\n'), ((61994, 62015), 'torch.testing._internal.common_device_type.deviceCountAtLeast', 'deviceCountAtLeast', (['(2)'], {}), '(2)\n', (62012, 62015), False, 'from torch.testing._internal.common_device_type import instantiate_device_type_tests, deviceCountAtLeast, onlyOnCPUAndCUDA, onlyCPU, skipCUDAIfNotRocm, largeCUDATensorTest, precisionOverride, dtypes, onlyCUDA, skipCPUIf, dtypesIfCUDA\n'), ((62292, 62313), 'torch.testing._internal.common_device_type.deviceCountAtLeast', 'deviceCountAtLeast', (['(2)'], {}), '(2)\n', (62310, 62313), False, 'from torch.testing._internal.common_device_type import instantiate_device_type_tests, deviceCountAtLeast, onlyOnCPUAndCUDA, onlyCPU, skipCUDAIfNotRocm, largeCUDATensorTest, precisionOverride, dtypes, onlyCUDA, skipCPUIf, dtypesIfCUDA\n'), ((63253, 63264), 'torch.testing._internal.common_utils.run_tests', 'run_tests', ([], {}), '()\n', (63262, 63264), False, 'from torch.testing._internal.common_utils import TestCase, run_tests, do_test_empty_full, TEST_NUMPY, suppress_warnings, IS_WINDOWS, torch_to_numpy_dtype_dict, slowTest\n'), ((1145, 1165), 'torch.device', 'torch.device', (['device'], {}), '(device)\n', (1157, 1165), False, 'import torch\n'), ((5150, 5186), 'torch.zeros', 'torch.zeros', (['(100)', '(100)'], {'device': 'device'}), '(100, 100, device=device)\n', (5161, 5186), False, 'import torch\n'), ((5202, 5233), 'torch.tensor', 'torch.tensor', (['()'], {'device': 'device'}), '((), device=device)\n', (5214, 5233), False, 'import torch\n'), ((5242, 5288), 'torch.zeros', 'torch.zeros', (['(100)', '(100)'], {'device': 'device', 'out': 'res2'}), '(100, 100, device=device, out=res2)\n', (5253, 5288), False, 'import torch\n'), ((5349, 5399), 'torch.zeros', 'torch.zeros', (['(2)', '(2)'], {'device': 'device', 'dtype': 'torch.bool'}), '(2, 2, device=device, dtype=torch.bool)\n', (5360, 5399), False, 'import torch\n'), ((5419, 5498), 'torch.tensor', 'torch.tensor', (['[[False, False], [False, False]]'], {'device': 'device', 'dtype': 'torch.bool'}), '([[False, False], [False, False]], device=device, dtype=torch.bool)\n', (5431, 5498), False, 'import torch\n'), ((5600, 5650), 'torch.zeros', 'torch.zeros', (['(1)', '(1)'], {'device': 'device', 'dtype': 'torch.half'}), '(1, 1, device=device, dtype=torch.half)\n', (5611, 5650), False, 'import torch\n'), ((5670, 5727), 'torch.tensor', 'torch.tensor', (['[[0.0]]'], {'device': 'device', 'dtype': 'torch.float16'}), '([[0.0]], device=device, dtype=torch.float16)\n', (5682, 5727), False, 'import torch\n'), ((5800, 5854), 'torch.zeros', 'torch.zeros', (['(1)', '(1)'], {'device': 'device', 'dtype': 'torch.bfloat16'}), '(1, 1, device=device, dtype=torch.bfloat16)\n', (5811, 5854), False, 'import torch\n'), ((5874, 5932), 'torch.tensor', 'torch.tensor', (['[[0.0]]'], {'device': 'device', 'dtype': 'torch.bfloat16'}), '([[0.0]], device=device, dtype=torch.bfloat16)\n', (5886, 5932), False, 'import torch\n'), ((6008, 6063), 'torch.zeros', 'torch.zeros', (['(2)', '(2)'], {'device': 'device', 'dtype': 'torch.complex64'}), '(2, 2, device=device, dtype=torch.complex64)\n', (6019, 6063), False, 'import torch\n'), ((6083, 6159), 'torch.tensor', 'torch.tensor', (['[[0.0, 0.0], [0.0, 0.0]]'], {'device': 'device', 'dtype': 'torch.complex64'}), '([[0.0, 0.0], [0.0, 0.0]], device=device, dtype=torch.complex64)\n', (6095, 6159), False, 'import torch\n'), ((6322, 6355), 'torch.zeros', 'torch.zeros', (['shape'], {'device': 'device'}), '(shape, device=device)\n', (6333, 6355), False, 'import torch\n'), ((6364, 6406), 'torch.zeros', 'torch.zeros', (['shape'], {'device': 'device', 'out': 'out'}), '(shape, device=device, out=out)\n', (6375, 6406), False, 'import torch\n'), ((7243, 7278), 'torch.ones', 'torch.ones', (['(100)', '(100)'], {'device': 'device'}), '(100, 100, device=device)\n', (7253, 7278), False, 'import torch\n'), ((7294, 7325), 'torch.tensor', 'torch.tensor', (['()'], {'device': 'device'}), '((), device=device)\n', (7306, 7325), False, 'import torch\n'), ((7334, 7379), 'torch.ones', 'torch.ones', (['(100)', '(100)'], {'device': 'device', 'out': 'res2'}), '(100, 100, device=device, out=res2)\n', (7344, 7379), False, 'import torch\n'), ((7463, 7512), 'torch.ones', 'torch.ones', (['(1)', '(2)'], {'device': 'device', 'dtype': 'torch.bool'}), '(1, 2, device=device, dtype=torch.bool)\n', (7473, 7512), False, 'import torch\n'), ((7532, 7593), 'torch.tensor', 'torch.tensor', (['[[True, True]]'], {'device': 'device', 'dtype': 'torch.bool'}), '([[True, True]], device=device, dtype=torch.bool)\n', (7544, 7593), False, 'import torch\n'), ((8046, 8096), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['"""torch.FloatTensor"""'], {}), "('torch.FloatTensor')\n", (8075, 8096), False, 'import torch\n'), ((8227, 8265), 'torch.set_default_dtype', 'torch.set_default_dtype', (['torch.float64'], {}), '(torch.float64)\n', (8250, 8265), False, 'import torch\n'), ((8397, 8445), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['torch.FloatTensor'], {}), '(torch.FloatTensor)\n', (8426, 8445), False, 'import torch\n'), ((8579, 8604), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (8602, 8604), False, 'import torch\n'), ((9465, 9508), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['default_type'], {}), '(default_type)\n', (9494, 9508), False, 'import torch\n'), ((10178, 10209), 'torch.randn', 'torch.randn', (['(3,)'], {'device': '"""cpu"""'}), "((3,), device='cpu')\n", (10189, 10209), False, 'import torch\n'), ((10467, 10492), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (10490, 10492), False, 'import torch\n'), ((11971, 11991), 'torch.Tensor', 'torch.Tensor', (['[1, 1]'], {}), '([1, 1])\n', (11983, 11991), False, 'import torch\n'), ((12027, 12047), 'torch.tensor', 'torch.tensor', (['[1, 1]'], {}), '([1, 1])\n', (12039, 12047), False, 'import torch\n'), ((12124, 12161), 'torch.tensor', 'torch.tensor', (['[1, 1]'], {'dtype': 'torch.int'}), '([1, 1], dtype=torch.int)\n', (12136, 12161), False, 'import torch\n'), ((12303, 12325), 'torch.tensor', 'torch.tensor', (['expected'], {}), '(expected)\n', (12315, 12325), False, 'import torch\n'), ((12465, 12504), 'torch.tensor', 'torch.tensor', (['expected'], {'dtype': 'torch.int'}), '(expected, dtype=torch.int)\n', (12477, 12504), False, 'import torch\n'), ((12996, 13059), 'torch.tensor', 'torch.tensor', (['[True, True, False, True, True]'], {'dtype': 'torch.bool'}), '([True, True, False, True, True], dtype=torch.bool)\n', (13008, 13059), False, 'import torch\n'), ((13072, 13125), 'torch.tensor', 'torch.tensor', (['[-1, -1.1, 0, 1, 1.1]'], {'dtype': 'torch.bool'}), '([-1, -1.1, 0, 1, 1.1], dtype=torch.bool)\n', (13084, 13125), False, 'import torch\n'), ((13169, 13224), 'torch.tensor', 'torch.tensor', (['[-0.1, -1.1, 0, 1, 0.1]'], {'dtype': 'torch.bool'}), '([-0.1, -1.1, 0, 1, 0.1], dtype=torch.bool)\n', (13181, 13224), False, 'import torch\n'), ((13268, 13324), 'torch.tensor', 'torch.tensor', (['(-0.3, 0, 0.3, 1, 3 / 7)'], {'dtype': 'torch.bool'}), '((-0.3, 0, 0.3, 1, 3 / 7), dtype=torch.bool)\n', (13280, 13324), False, 'import torch\n'), ((13335, 13398), 'torch.tensor', 'torch.tensor', (['(True, False, True, True, True)'], {'dtype': 'torch.bool'}), '((True, False, True, True, True), dtype=torch.bool)\n', (13347, 13398), False, 'import torch\n'), ((13442, 13495), 'torch.tensor', 'torch.tensor', (['(-1, 0, -1.1, 1, 1.1)'], {'dtype': 'torch.bool'}), '((-1, 0, -1.1, 1, 1.1), dtype=torch.bool)\n', (13454, 13495), False, 'import torch\n'), ((13880, 13994), 'torch.tensor', 'torch.tensor', (['(int64_max + 1, 0, (int64_max + 1) * 2, (int64_max + 1) * 2 + 1, float64_min)'], {'dtype': 'torch.bool'}), '((int64_max + 1, 0, (int64_max + 1) * 2, (int64_max + 1) * 2 + \n 1, float64_min), dtype=torch.bool)\n', (13892, 13994), False, 'import torch\n'), ((14037, 14145), 'torch.tensor', 'torch.tensor', (['(float64_max, 0, float64_max + 1, float64_min - 1, float64_max + 1e+291)'], {'dtype': 'torch.bool'}), '((float64_max, 0, float64_max + 1, float64_min - 1, float64_max +\n 1e+291), dtype=torch.bool)\n', (14049, 14145), False, 'import torch\n'), ((14187, 14264), 'torch.tensor', 'torch.tensor', (['[True, False, False, True, False, True, True]'], {'dtype': 'torch.bool'}), '([True, False, False, True, False, True, True], dtype=torch.bool)\n', (14199, 14264), False, 'import torch\n'), ((14277, 14365), 'torch.tensor', 'torch.tensor', (['[1e-323, 0.0, 0.0j, 1e-323j, 0.0j, 1 + 2.0j, -1.0j]'], {'dtype': 'torch.bool'}), '([1e-323, 0.0, 0.0j, 1e-323j, 0.0j, 1 + 2.0j, -1.0j], dtype=\n torch.bool)\n', (14289, 14365), False, 'import torch\n'), ((14404, 14460), 'torch.tensor', 'torch.tensor', (['(True, True, True, True)'], {'dtype': 'torch.bool'}), '((True, True, True, True), dtype=torch.bool)\n', (14416, 14460), False, 'import torch\n'), ((15106, 15163), 'torch.randn', 'torch.randn', (['(5)', '(5)'], {'dtype': 'torch.double', 'requires_grad': '(True)'}), '(5, 5, dtype=torch.double, requires_grad=True)\n', (15117, 15163), False, 'import torch\n'), ((15447, 15461), 'torch.randn', 'torch.randn', (['(1)'], {}), '(1)\n', (15458, 15461), False, 'import torch\n'), ((21194, 21209), 'torch.tensor', 'torch.tensor', (['x'], {}), '(x)\n', (21206, 21209), False, 'import torch\n'), ((21335, 21360), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (21358, 21360), False, 'import torch\n'), ((22844, 22876), 'torch.range', 'torch.range', (['(0)', '(1)'], {'device': 'device'}), '(0, 1, device=device)\n', (22855, 22876), False, 'import torch\n'), ((22892, 22923), 'torch.tensor', 'torch.tensor', (['()'], {'device': 'device'}), '((), device=device)\n', (22904, 22923), False, 'import torch\n'), ((22932, 22974), 'torch.range', 'torch.range', (['(0)', '(1)'], {'device': 'device', 'out': 'res2'}), '(0, 1, device=device, out=res2)\n', (22943, 22974), False, 'import torch\n'), ((23091, 23123), 'torch.zeros', 'torch.zeros', (['(2)', '(3)'], {'device': 'device'}), '(2, 3, device=device)\n', (23102, 23123), False, 'import torch\n'), ((23203, 23275), 'torch.tensor', 'torch.tensor', (['((0, 0, 1), (0, 2, 3))'], {'device': 'device', 'dtype': 'torch.float32'}), '(((0, 0, 1), (0, 2, 3)), device=device, dtype=torch.float32)\n', (23215, 23275), False, 'import torch\n'), ((23371, 23427), 'torch.tensor', 'torch.tensor', (['(1, 0)'], {'device': 'device', 'dtype': 'torch.float32'}), '((1, 0), device=device, dtype=torch.float32)\n', (23383, 23427), False, 'import torch\n'), ((23443, 23474), 'torch.tensor', 'torch.tensor', (['()'], {'device': 'device'}), '((), device=device)\n', (23455, 23474), False, 'import torch\n'), ((23483, 23529), 'torch.range', 'torch.range', (['(1)', '(0)', '(-1)'], {'device': 'device', 'out': 'res2'}), '(1, 0, -1, device=device, out=res2)\n', (23494, 23529), False, 'import torch\n'), ((23622, 23650), 'torch.ones', 'torch.ones', (['(1)'], {'device': 'device'}), '(1, device=device)\n', (23632, 23650), False, 'import torch\n'), ((23666, 23697), 'torch.tensor', 'torch.tensor', (['()'], {'device': 'device'}), '((), device=device)\n', (23678, 23697), False, 'import torch\n'), ((23706, 23752), 'torch.range', 'torch.range', (['(1)', '(1)', '(-1)'], {'device': 'device', 'out': 'res2'}), '(1, 1, -1, device=device, out=res2)\n', (23717, 23752), False, 'import torch\n'), ((23814, 23859), 'torch.range', 'torch.range', (['(1)', '(1)', '(1)'], {'device': 'device', 'out': 'res2'}), '(1, 1, 1, device=device, out=res2)\n', (23825, 23859), False, 'import torch\n'), ((24283, 24305), 'torch.arange', 'torch.arange', (['(0)', '(10000)'], {}), '(0, 10000)\n', (24295, 24305), False, 'import torch\n'), ((24380, 24415), 'torch.tensor', 'torch.tensor', (['[]'], {'dtype': 'torch.int64'}), '([], dtype=torch.int64)\n', (24392, 24415), False, 'import torch\n'), ((24424, 24456), 'torch.arange', 'torch.arange', (['(0)', '(10000)'], {'out': 'res2'}), '(0, 10000, out=res2)\n', (24436, 24456), False, 'import torch\n'), ((24710, 24750), 'torch.arange', 'torch.arange', (['(0)', '(300000 * 3 * 3)'], {'out': 'res'}), '(0, 300000 * 3 * 3, out=res)\n', (24722, 24750), False, 'import torch\n'), ((24886, 24902), 'torch.arange', 'torch.arange', (['(10)'], {}), '(10)\n', (24898, 24902), False, 'import torch\n'), ((24918, 24937), 'torch.arange', 'torch.arange', (['(0)', '(10)'], {}), '(0, 10)\n', (24930, 24937), False, 'import torch\n'), ((25055, 25072), 'torch.zeros', 'torch.zeros', (['(2)', '(3)'], {}), '(2, 3)\n', (25066, 25072), False, 'import torch\n'), ((25138, 25174), 'torch.Tensor', 'torch.Tensor', (['((0, 0, 1), (0, 2, 3))'], {}), '(((0, 0, 1), (0, 2, 3)))\n', (25150, 25174), False, 'import torch\n'), ((25270, 25290), 'torch.Tensor', 'torch.Tensor', (['(1, 0)'], {}), '((1, 0))\n', (25282, 25290), False, 'import torch\n'), ((25306, 25320), 'torch.Tensor', 'torch.Tensor', ([], {}), '()\n', (25318, 25320), False, 'import torch\n'), ((25329, 25362), 'torch.arange', 'torch.arange', (['(1)', '(-1)', '(-1)'], {'out': 'res2'}), '(1, -1, -1, out=res2)\n', (25341, 25362), False, 'import torch\n'), ((25455, 25468), 'torch.ones', 'torch.ones', (['(1)'], {}), '(1)\n', (25465, 25468), False, 'import torch\n'), ((25484, 25498), 'torch.Tensor', 'torch.Tensor', ([], {}), '()\n', (25496, 25498), False, 'import torch\n'), ((25507, 25539), 'torch.arange', 'torch.arange', (['(1)', '(0)', '(-1)'], {'out': 'res2'}), '(1, 0, -1, out=res2)\n', (25519, 25539), False, 'import torch\n'), ((25601, 25632), 'torch.arange', 'torch.arange', (['(1)', '(2)', '(1)'], {'out': 'res2'}), '(1, 2, 1, out=res2)\n', (25613, 25632), False, 'import torch\n'), ((26398, 26416), 'torch.arange', 'torch.arange', (['(True)'], {}), '(True)\n', (26410, 26416), False, 'import torch\n'), ((26464, 26483), 'torch.arange', 'torch.arange', (['(False)'], {}), '(False)\n', (26476, 26483), False, 'import torch\n'), ((26665, 26683), 'torch.arange', 'torch.arange', (['(0)', '(5)'], {}), '(0, 5)\n', (26677, 26683), False, 'import torch\n'), ((26810, 26831), 'torch.arange', 'torch.arange', (['(0)', '(5)', '(2)'], {}), '(0, 5, 2)\n', (26822, 26831), False, 'import torch\n'), ((26959, 26985), 'torch.arange', 'torch.arange', (['(0)', '(5 + 1e-06)'], {}), '(0, 5 + 1e-06)\n', (26971, 26985), False, 'import torch\n'), ((27067, 27106), 'torch.arange', 'torch.arange', (['(0)', '(5)'], {'dtype': 'torch.float32'}), '(0, 5, dtype=torch.float32)\n', (27079, 27106), False, 'import torch\n'), ((27120, 27146), 'torch.arange', 'torch.arange', (['(0)', '(5 - 1e-06)'], {}), '(0, 5 - 1e-06)\n', (27132, 27146), False, 'import torch\n'), ((27263, 27295), 'torch.arange', 'torch.arange', (['(10)', '(-1 + 1e-06)', '(-1)'], {}), '(10, -1 + 1e-06, -1)\n', (27275, 27295), False, 'import torch\n'), ((27377, 27422), 'torch.arange', 'torch.arange', (['(10)', '(-1)', '(-1)'], {'dtype': 'torch.float32'}), '(10, -1, -1, dtype=torch.float32)\n', (27389, 27422), False, 'import torch\n'), ((27436, 27468), 'torch.arange', 'torch.arange', (['(10)', '(-1 - 1e-06)', '(-1)'], {}), '(10, -1 - 1e-06, -1)\n', (27448, 27468), False, 'import torch\n'), ((27618, 27643), 'torch.zeros', 'torch.zeros', ([], {'size': '(1, 49)'}), '(size=(1, 49))\n', (27629, 27643), False, 'import torch\n'), ((28207, 28243), 'torch.testing.get_all_device_types', 'torch.testing.get_all_device_types', ([], {}), '()\n', (28241, 28243), False, 'import torch\n'), ((29548, 29573), 'torch.get_default_dtype', 'torch.get_default_dtype', ([], {}), '()\n', (29571, 29573), False, 'import torch\n'), ((29582, 29620), 'torch.set_default_dtype', 'torch.set_default_dtype', (['torch.float32'], {}), '(torch.float32)\n', (29605, 29620), False, 'import torch\n'), ((31304, 31340), 'torch.set_default_dtype', 'torch.set_default_dtype', (['saved_dtype'], {}), '(saved_dtype)\n', (31327, 31340), False, 'import torch\n'), ((33337, 33367), 'torch.testing.get_all_dtypes', 'torch.testing.get_all_dtypes', ([], {}), '()\n', (33365, 33367), False, 'import torch\n'), ((35740, 35793), 'torch.arange', 'torch.arange', (['bigint'], {'dtype': 'torch.long', 'device': 'device'}), '(bigint, dtype=torch.long, device=device)\n', (35752, 35793), False, 'import torch\n'), ((35871, 35933), 'torch.linspace', 'torch.linspace', (['(0)', '(1)', 'bigint'], {'dtype': 'torch.float', 'device': 'device'}), '(0, 1, bigint, dtype=torch.float, device=device)\n', (35885, 35933), False, 'import torch\n'), ((36002, 36067), 'torch.logspace', 'torch.logspace', (['(0)', '(1)', 'bigint', '(2)'], {'dtype': 'torch.float', 'device': 'device'}), '(0, 1, bigint, 2, dtype=torch.float, device=device)\n', (36016, 36067), False, 'import torch\n'), ((36227, 36247), 'torch.device', 'torch.device', (['device'], {}), '(device)\n', (36239, 36247), False, 'import torch\n'), ((36265, 36303), 'torch.tensor', 'torch.tensor', (['(1, 2, 3)'], {'device': 'device'}), '((1, 2, 3), device=device)\n', (36277, 36303), False, 'import torch\n'), ((36934, 36996), 'torch.tensor', 'torch.tensor', (['[[0, 1, 1], [2, 0, 1], [2, 1, 0]]'], {'device': 'device'}), '([[0, 1, 1], [2, 0, 1], [2, 1, 0]], device=device)\n', (36946, 36996), False, 'import torch\n'), ((37119, 37172), 'torch.sparse_coo_tensor', 'torch.sparse_coo_tensor', (['indices', 'values', 'sparse_size'], {}), '(indices, values, sparse_size)\n', (37142, 37172), False, 'import torch\n'), ((37264, 37338), 'torch.sparse_coo_tensor', 'torch.sparse_coo_tensor', (['indices', 'values', 'sparse_size'], {'dtype': 'torch.float64'}), '(indices, values, sparse_size, dtype=torch.float64)\n', (37287, 37338), False, 'import torch\n'), ((41572, 41626), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['torch.cuda.DoubleTensor'], {}), '(torch.cuda.DoubleTensor)\n', (41601, 41626), False, 'import torch\n'), ((41635, 41673), 'torch.set_default_dtype', 'torch.set_default_dtype', (['torch.float32'], {}), '(torch.float32)\n', (41658, 41673), False, 'import torch\n'), ((41815, 41853), 'torch.set_default_dtype', 'torch.set_default_dtype', (['torch.float64'], {}), '(torch.float64)\n', (41838, 41853), False, 'import torch\n'), ((41995, 42036), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['saved_type'], {}), '(saved_type)\n', (42024, 42036), False, 'import torch\n'), ((42155, 42208), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['torch.cuda.FloatTensor'], {}), '(torch.cuda.FloatTensor)\n', (42184, 42208), False, 'import torch\n'), ((42221, 42240), 'torch.zeros', 'torch.zeros', (['(5, 5)'], {}), '((5, 5))\n', (42232, 42240), False, 'import torch\n'), ((42330, 42384), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['torch.cuda.DoubleTensor'], {}), '(torch.cuda.DoubleTensor)\n', (42359, 42384), False, 'import torch\n'), ((42397, 42416), 'torch.zeros', 'torch.zeros', (['(5, 5)'], {}), '((5, 5))\n', (42408, 42416), False, 'import torch\n'), ((42506, 42547), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['saved_type'], {}), '(saved_type)\n', (42535, 42547), False, 'import torch\n'), ((42737, 42783), 'torch.arange', 'torch.arange', (['(0)', '(10)'], {'dtype': 'dtype', 'device': '"""cpu"""'}), "(0, 10, dtype=dtype, device='cpu')\n", (42749, 42783), False, 'import torch\n'), ((42808, 42855), 'torch.arange', 'torch.arange', (['(0)', '(10)'], {'dtype': 'dtype', 'device': 'device'}), '(0, 10, dtype=dtype, device=device)\n', (42820, 42855), False, 'import torch\n'), ((43011, 43074), 'torch.tensor', 'torch.tensor', (['[0, 1, 2, 3]'], {'dtype': 'torch.bfloat16', 'device': 'device'}), '([0, 1, 2, 3], dtype=torch.bfloat16, device=device)\n', (43023, 43074), False, 'import torch\n'), ((43101, 43156), 'torch.arange', 'torch.arange', (['(0)', '(4)'], {'dtype': 'torch.bfloat16', 'device': 'device'}), '(0, 4, dtype=torch.bfloat16, device=device)\n', (43113, 43156), False, 'import torch\n'), ((43250, 43310), 'torch.tensor', 'torch.tensor', (['[0, 2, 4]'], {'dtype': 'torch.bfloat16', 'device': 'device'}), '([0, 2, 4], dtype=torch.bfloat16, device=device)\n', (43262, 43310), False, 'import torch\n'), ((43337, 43400), 'torch.arange', 'torch.arange', (['(0)', '(6)'], {'step': '(2)', 'dtype': 'torch.bfloat16', 'device': 'device'}), '(0, 6, step=2, dtype=torch.bfloat16, device=device)\n', (43349, 43400), False, 'import torch\n'), ((43687, 43702), 'random.random', 'random.random', ([], {}), '()\n', (43700, 43702), False, 'import random\n'), ((43755, 43813), 'torch.linspace', 'torch.linspace', (['_from', 'to', '(137)'], {'device': 'device', 'dtype': 'dtype'}), '(_from, to, 137, device=device, dtype=dtype)\n', (43769, 43813), False, 'import torch\n'), ((43829, 43873), 'torch.tensor', 'torch.tensor', (['()'], {'device': 'device', 'dtype': 'dtype'}), '((), device=device, dtype=dtype)\n', (43841, 43873), False, 'import torch\n'), ((43882, 43935), 'torch.linspace', 'torch.linspace', (['_from', 'to', '(137)'], {'dtype': 'dtype', 'out': 'res2'}), '(_from, to, 137, dtype=dtype, out=res2)\n', (43896, 43935), False, 'import torch\n'), ((45547, 45614), 'torch.linspace', 'torch.linspace', (['start', '(start + 200)', '(401)'], {'device': 'device', 'dtype': 'dtype'}), '(start, start + 200, 401, device=device, dtype=dtype)\n', (45561, 45614), False, 'import torch\n'), ((47094, 47139), 'torch.zeros', 'torch.zeros', (['(2)', '(3)'], {'device': 'device', 'dtype': 'dtype'}), '(2, 3, device=device, dtype=dtype)\n', (47105, 47139), False, 'import torch\n'), ((48541, 48603), 'torch.linspace', 'torch.linspace', (['(0)', '(10)'], {'steps': 'steps', 'dtype': 'dtype', 'device': 'device'}), '(0, 10, steps=steps, dtype=dtype, device=device)\n', (48555, 48603), False, 'import torch\n'), ((48616, 48650), 'torch.linspace', 'torch.linspace', (['(0)', '(10)'], {'steps': 'steps'}), '(0, 10, steps=steps)\n', (48630, 48650), False, 'import torch\n'), ((49501, 49564), 'torch.logspace', 'torch.logspace', (['(1)', '(1.1)'], {'steps': 'steps', 'dtype': 'dtype', 'device': 'device'}), '(1, 1.1, steps=steps, dtype=dtype, device=device)\n', (49515, 49564), False, 'import torch\n'), ((49577, 49612), 'torch.logspace', 'torch.logspace', (['(1)', '(1.1)'], {'steps': 'steps'}), '(1, 1.1, steps=steps)\n', (49591, 49612), False, 'import torch\n'), ((49772, 49843), 'torch.logspace', 'torch.logspace', (['(1)', '(1.1)'], {'steps': 'steps', 'base': '(2)', 'dtype': 'dtype', 'device': 'device'}), '(1, 1.1, steps=steps, base=2, dtype=dtype, device=device)\n', (49786, 49843), False, 'import torch\n'), ((49856, 49899), 'torch.logspace', 'torch.logspace', (['(1)', '(1.1)'], {'steps': 'steps', 'base': '(2)'}), '(1, 1.1, steps=steps, base=2)\n', (49870, 49899), False, 'import torch\n'), ((51449, 51464), 'random.random', 'random.random', ([], {}), '()\n', (51462, 51464), False, 'import random\n'), ((51517, 51575), 'torch.logspace', 'torch.logspace', (['_from', 'to', '(137)'], {'device': 'device', 'dtype': 'dtype'}), '(_from, to, 137, device=device, dtype=dtype)\n', (51531, 51575), False, 'import torch\n'), ((51591, 51635), 'torch.tensor', 'torch.tensor', (['()'], {'device': 'device', 'dtype': 'dtype'}), '((), device=device, dtype=dtype)\n', (51603, 51635), False, 'import torch\n'), ((51644, 51712), 'torch.logspace', 'torch.logspace', (['_from', 'to', '(137)'], {'device': 'device', 'dtype': 'dtype', 'out': 'res2'}), '(_from, to, 137, device=device, dtype=dtype, out=res2)\n', (51658, 51712), False, 'import torch\n'), ((53275, 53320), 'torch.zeros', 'torch.zeros', (['(2)', '(3)'], {'device': 'device', 'dtype': 'dtype'}), '(2, 3, device=device, dtype=dtype)\n', (53286, 53320), False, 'import torch\n'), ((53696, 53721), 'torch.get_default_dtype', 'torch.get_default_dtype', ([], {}), '()\n', (53719, 53721), False, 'import torch\n'), ((53730, 53760), 'torch.set_default_dtype', 'torch.set_default_dtype', (['dtype'], {}), '(dtype)\n', (53753, 53760), False, 'import torch\n'), ((53816, 53838), 'torch.full', 'torch.full', (['size', '(True)'], {}), '(size, True)\n', (53826, 53838), False, 'import torch\n'), ((53943, 53962), 'torch.full', 'torch.full', (['size', '(1)'], {}), '(size, 1)\n', (53953, 53962), False, 'import torch\n'), ((54065, 54086), 'torch.full', 'torch.full', (['size', '(1.0)'], {}), '(size, 1.0)\n', (54075, 54086), False, 'import torch\n'), ((54174, 54200), 'torch.full', 'torch.full', (['size', '(1 + 1.0j)'], {}), '(size, 1 + 1.0j)\n', (54184, 54200), False, 'import torch\n'), ((54330, 54367), 'torch.set_default_dtype', 'torch.set_default_dtype', (['prev_default'], {}), '(prev_default)\n', (54353, 54367), False, 'import torch\n'), ((54438, 54488), 'torch.empty', 'torch.empty', (['size'], {'device': 'device', 'dtype': 'torch.long'}), '(size, device=device, dtype=torch.long)\n', (54449, 54488), False, 'import torch\n'), ((59572, 59589), 'torch.randperm', 'torch.randperm', (['(0)'], {}), '(0)\n', (59586, 59589), False, 'import torch\n'), ((59605, 59648), 'torch.tensor', 'torch.tensor', (['(5)'], {'dtype': 'dtype', 'device': 'device'}), '(5, dtype=dtype, device=device)\n', (59617, 59648), False, 'import torch\n'), ((59657, 59684), 'torch.randperm', 'torch.randperm', (['(0)'], {'out': 'res2'}), '(0, out=res2)\n', (59671, 59684), False, 'import torch\n'), ((61074, 61109), 'torch.ones', 'torch.ones', (['(100)', '(100)'], {'device': 'device'}), '(100, 100, device=device)\n', (61084, 61109), False, 'import torch\n'), ((61126, 61151), 'torch.ones_like', 'torch.ones_like', (['expected'], {}), '(expected)\n', (61141, 61151), False, 'import torch\n'), ((61243, 61302), 'torch.tensor', 'torch.tensor', (['[True, True]'], {'device': 'device', 'dtype': 'torch.bool'}), '([True, True], device=device, dtype=torch.bool)\n', (61255, 61302), False, 'import torch\n'), ((61318, 61343), 'torch.ones_like', 'torch.ones_like', (['expected'], {}), '(expected)\n', (61333, 61343), False, 'import torch\n'), ((61864, 61902), 'torch.zeros', 'torch.zeros', (['(100, 100)'], {'device': 'device'}), '((100, 100), device=device)\n', (61875, 61902), False, 'import torch\n'), ((61920, 61946), 'torch.zeros_like', 'torch.zeros_like', (['expected'], {}), '(expected)\n', (61936, 61946), False, 'import torch\n'), ((62091, 62131), 'torch.zeros', 'torch.zeros', (['(100)', '(100)'], {'device': 'devices[0]'}), '(100, 100, device=devices[0])\n', (62102, 62131), False, 'import torch\n'), ((62144, 62205), 'torch.randn', 'torch.randn', (['(100)', '(100)'], {'device': 'devices[1]', 'dtype': 'torch.float32'}), '(100, 100, device=devices[1], dtype=torch.float32)\n', (62155, 62205), False, 'import torch\n'), ((62223, 62242), 'torch.zeros_like', 'torch.zeros_like', (['x'], {}), '(x)\n', (62239, 62242), False, 'import torch\n'), ((62388, 62427), 'torch.ones', 'torch.ones', (['(100)', '(100)'], {'device': 'devices[0]'}), '(100, 100, device=devices[0])\n', (62398, 62427), False, 'import torch\n'), ((62440, 62501), 'torch.randn', 'torch.randn', (['(100)', '(100)'], {'device': 'devices[1]', 'dtype': 'torch.float32'}), '(100, 100, device=devices[1], dtype=torch.float32)\n', (62451, 62501), False, 'import torch\n'), ((62519, 62537), 'torch.ones_like', 'torch.ones_like', (['x'], {}), '(x)\n', (62534, 62537), False, 'import torch\n'), ((62781, 62831), 'torch.empty', 'torch.empty', (['(5,)'], {'device': 'device', 'dtype': 'torch.long'}), '((5,), device=device, dtype=torch.long)\n', (62792, 62831), False, 'import torch\n'), ((1768, 1792), 'torch.device', 'torch.device', (['devices[0]'], {}), '(devices[0])\n', (1780, 1792), False, 'import torch\n'), ((6509, 6570), 'torch.zeros', 'torch.zeros', (['shape'], {'device': 'device', 'dtype': 'torch.int64', 'out': 'out'}), '(shape, device=device, dtype=torch.int64, out=out)\n', (6520, 6570), False, 'import torch\n'), ((6629, 6696), 'torch.zeros', 'torch.zeros', (['shape'], {'device': 'device', 'layout': 'torch.sparse_coo', 'out': 'out'}), '(shape, device=device, layout=torch.sparse_coo, out=out)\n', (6640, 6696), False, 'import torch\n'), ((6753, 6786), 'torch.zeros', 'torch.zeros', (['shape'], {'device': 'device'}), '(shape, device=device)\n', (6764, 6786), False, 'import torch\n'), ((6813, 6872), 'torch.zeros', 'torch.zeros', (['shape'], {'device': 'device', 'dtype': 'out.dtype', 'out': 'out'}), '(shape, device=device, dtype=out.dtype, out=out)\n', (6824, 6872), False, 'import torch\n'), ((6899, 6932), 'torch.zeros', 'torch.zeros', (['shape'], {'device': 'device'}), '(shape, device=device)\n', (6910, 6932), False, 'import torch\n'), ((6959, 7023), 'torch.zeros', 'torch.zeros', (['shape'], {'device': 'device', 'layout': 'torch.strided', 'out': 'out'}), '(shape, device=device, layout=torch.strided, out=out)\n', (6970, 7023), False, 'import torch\n'), ((7050, 7083), 'torch.zeros', 'torch.zeros', (['shape'], {'device': 'device'}), '(shape, device=device)\n', (7061, 7083), False, 'import torch\n'), ((7110, 7152), 'torch.zeros', 'torch.zeros', (['shape'], {'device': 'device', 'out': 'out'}), '(shape, device=device, out=out)\n', (7121, 7152), False, 'import torch\n'), ((7825, 7850), 'torch.get_default_dtype', 'torch.get_default_dtype', ([], {}), '()\n', (7848, 7850), False, 'import torch\n'), ((8134, 8159), 'torch.get_default_dtype', 'torch.get_default_dtype', ([], {}), '()\n', (8157, 8159), False, 'import torch\n'), ((8303, 8328), 'torch.get_default_dtype', 'torch.get_default_dtype', ([], {}), '()\n', (8326, 8328), False, 'import torch\n'), ((8483, 8508), 'torch.get_default_dtype', 'torch.get_default_dtype', ([], {}), '()\n', (8506, 8508), False, 'import torch\n'), ((8618, 8671), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['torch.cuda.FloatTensor'], {}), '(torch.cuda.FloatTensor)\n', (8647, 8671), False, 'import torch\n'), ((8890, 8928), 'torch.set_default_dtype', 'torch.set_default_dtype', (['torch.float64'], {}), '(torch.float64)\n', (8913, 8928), False, 'import torch\n'), ((10861, 10914), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['torch.cuda.FloatTensor'], {}), '(torch.cuda.FloatTensor)\n', (10890, 10914), False, 'import torch\n'), ((11202, 11255), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['torch.cuda.FloatTensor'], {}), '(torch.cuda.FloatTensor)\n', (11231, 11255), False, 'import torch\n'), ((11268, 11311), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['default_type'], {}), '(default_type)\n', (11297, 11311), False, 'import torch\n'), ((11329, 11361), 'torch.randn', 'torch.randn', (['(3,)'], {'device': '"""cuda"""'}), "((3,), device='cuda')\n", (11340, 11361), False, 'import torch\n'), ((12422, 12447), 'torch.ones_like', 'torch.ones_like', (['expected'], {}), '(expected)\n', (12437, 12447), False, 'import torch\n'), ((13548, 13572), 'torch.iinfo', 'torch.iinfo', (['torch.int64'], {}), '(torch.int64)\n', (13559, 13572), False, 'import torch\n'), ((13597, 13621), 'torch.iinfo', 'torch.iinfo', (['torch.int64'], {}), '(torch.int64)\n', (13608, 13621), False, 'import torch\n'), ((13648, 13674), 'torch.finfo', 'torch.finfo', (['torch.float64'], {}), '(torch.float64)\n', (13659, 13674), False, 'import torch\n'), ((13701, 13727), 'torch.finfo', 'torch.finfo', (['torch.float64'], {}), '(torch.float64)\n', (13712, 13727), False, 'import torch\n'), ((15213, 15233), 'torch.tensor', 'torch.tensor', (['source'], {}), '(source)\n', (15225, 15233), False, 'import torch\n'), ((15267, 15308), 'torch.tensor', 'torch.tensor', (['source'], {'requires_grad': '(False)'}), '(source, requires_grad=False)\n', (15279, 15308), False, 'import torch\n'), ((15342, 15382), 'torch.tensor', 'torch.tensor', (['source'], {'requires_grad': '(True)'}), '(source, requires_grad=True)\n', (15354, 15382), False, 'import torch\n'), ((15726, 15749), 'torch.as_tensor', 'torch.as_tensor', (['source'], {}), '(source)\n', (15741, 15749), False, 'import torch\n'), ((15837, 15879), 'torch.as_tensor', 'torch.as_tensor', (['source'], {'dtype': 'torch.float'}), '(source, dtype=torch.float)\n', (15852, 15879), False, 'import torch\n'), ((16102, 16127), 'torch.get_default_dtype', 'torch.get_default_dtype', ([], {}), '()\n', (16125, 16127), False, 'import torch\n'), ((16140, 16178), 'torch.set_default_dtype', 'torch.set_default_dtype', (['default_dtype'], {}), '(default_dtype)\n', (16163, 16178), False, 'import torch\n'), ((17830, 17866), 'torch.set_default_dtype', 'torch.set_default_dtype', (['saved_dtype'], {}), '(saved_dtype)\n', (17853, 17866), False, 'import torch\n'), ((18103, 18127), 'torch.ByteTensor', 'torch.ByteTensor', (['[1, 1]'], {}), '([1, 1])\n', (18119, 18127), False, 'import torch\n'), ((18560, 18585), 'torch.ones_like', 'torch.ones_like', (['expected'], {}), '(expected)\n', (18575, 18585), False, 'import torch\n'), ((18825, 18840), 'numpy.array', 'np.array', (['[5.0]'], {}), '([5.0])\n', (18833, 18840), True, 'import numpy as np\n'), ((18859, 18874), 'torch.tensor', 'torch.tensor', (['a'], {}), '(a)\n', (18871, 18874), False, 'import torch\n'), ((19045, 19070), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (19068, 19070), False, 'import torch\n'), ((20368, 20383), 'torch.tensor', 'torch.tensor', (['x'], {}), '(x)\n', (20380, 20383), False, 'import torch\n'), ((20385, 20403), 'torch.as_tensor', 'torch.as_tensor', (['x'], {}), '(x)\n', (20400, 20403), False, 'import torch\n'), ((20430, 20466), 'torch.tensor', 'torch.tensor', (['x'], {'dtype': 'torch.float32'}), '(x, dtype=torch.float32)\n', (20442, 20466), False, 'import torch\n'), ((20468, 20507), 'torch.as_tensor', 'torch.as_tensor', (['x'], {'dtype': 'torch.float32'}), '(x, dtype=torch.float32)\n', (20483, 20507), False, 'import torch\n'), ((20663, 20678), 'torch.tensor', 'torch.tensor', (['z'], {}), '(z)\n', (20675, 20678), False, 'import torch\n'), ((20691, 20709), 'torch.as_tensor', 'torch.as_tensor', (['z'], {}), '(z)\n', (20706, 20709), False, 'import torch\n'), ((20897, 20912), 'torch.tensor', 'torch.tensor', (['z'], {}), '(z)\n', (20909, 20912), False, 'import torch\n'), ((20925, 20943), 'torch.as_tensor', 'torch.as_tensor', (['z'], {}), '(z)\n', (20940, 20943), False, 'import torch\n'), ((21072, 21087), 'torch.tensor', 'torch.tensor', (['z'], {}), '(z)\n', (21084, 21087), False, 'import torch\n'), ((21100, 21118), 'torch.as_tensor', 'torch.as_tensor', (['z'], {}), '(z)\n', (21115, 21118), False, 'import torch\n'), ((21235, 21253), 'torch.as_tensor', 'torch.as_tensor', (['y'], {}), '(y)\n', (21250, 21253), False, 'import torch\n'), ((21283, 21322), 'torch.as_tensor', 'torch.as_tensor', (['y'], {'dtype': 'torch.float32'}), '(y, dtype=torch.float32)\n', (21298, 21322), False, 'import torch\n'), ((22105, 22144), 'torch.as_tensor', 'torch.as_tensor', (['n'], {'dtype': 'torch.float64'}), '(n, dtype=torch.float64)\n', (22120, 22144), False, 'import torch\n'), ((22401, 22426), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (22424, 22426), False, 'import torch\n'), ((24009, 24045), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (24032, 24045), False, 'import warnings\n'), ((24064, 24097), 'torch.range', 'torch.range', (['(0)', '(10)'], {'device': 'device'}), '(0, 10, device=device)\n', (24075, 24097), False, 'import torch\n'), ((24791, 24822), 'torch.arange', 'torch.arange', (['(0)', '(300000 * 3 * 3)'], {}), '(0, 300000 * 3 * 3)\n', (24803, 24822), False, 'import torch\n'), ((29315, 29380), 'torch.arange', 'torch.arange', (['(-4.0)', '(4.0)', '(0.01)'], {'dtype': 'torch.float32', 'device': 'device'}), '(-4.0, 4.0, 0.01, dtype=torch.float32, device=device)\n', (29327, 29380), False, 'import torch\n'), ((33458, 33486), 'itertools.product', 'product', (['[3, 5, 7]'], {'repeat': '(2)'}), '([3, 5, 7], repeat=2)\n', (33465, 33486), False, 'from itertools import product\n'), ((34569, 34630), 'torch.linspace', 'torch.linspace', (['start', 'end', 'steps'], {'device': 'device', 'dtype': 'dtype'}), '(start, end, steps, device=device, dtype=dtype)\n', (34583, 34630), False, 'import torch\n'), ((34647, 34717), 'numpy.linspace', 'np.linspace', (['start', 'end', 'steps'], {'dtype': 'torch_to_numpy_dtype_dict[dtype]'}), '(start, end, steps, dtype=torch_to_numpy_dtype_dict[dtype])\n', (34658, 34717), True, 'import numpy as np\n'), ((35282, 35343), 'torch.logspace', 'torch.logspace', (['start', 'end', 'steps'], {'device': 'device', 'dtype': 'dtype'}), '(start, end, steps, device=device, dtype=dtype)\n', (35296, 35343), False, 'import torch\n'), ((35360, 35430), 'numpy.logspace', 'np.logspace', (['start', 'end', 'steps'], {'dtype': 'torch_to_numpy_dtype_dict[dtype]'}), '(start, end, steps, dtype=torch_to_numpy_dtype_dict[dtype])\n', (35371, 35430), True, 'import numpy as np\n'), ((37985, 38015), 'torch.testing.get_all_dtypes', 'torch.testing.get_all_dtypes', ([], {}), '()\n', (38013, 38015), False, 'import torch\n'), ((41760, 41780), 'torch.device', 'torch.device', (['device'], {}), '(device)\n', (41772, 41780), False, 'import torch\n'), ((41940, 41960), 'torch.device', 'torch.device', (['device'], {}), '(device)\n', (41952, 41960), False, 'import torch\n'), ((43724, 43739), 'random.random', 'random.random', ([], {}), '()\n', (43737, 43739), False, 'import random\n'), ((44038, 44092), 'torch.linspace', 'torch.linspace', (['(10)', '(20)', '(11)'], {'device': 'device', 'dtype': 'dtype'}), '(10, 20, 11, device=device, dtype=dtype)\n', (44052, 44092), False, 'import torch\n'), ((44704, 44758), 'torch.linspace', 'torch.linspace', (['(0)', '(1000 * 3 * 3)', '(1000 * 3 * 3)'], {'out': 'res'}), '(0, 1000 * 3 * 3, 1000 * 3 * 3, out=res)\n', (44718, 44758), False, 'import torch\n'), ((45026, 45077), 'torch.linspace', 'torch.linspace', (['(0)', '(1)', '(1)'], {'device': 'device', 'dtype': 'dtype'}), '(0, 1, 1, device=device, dtype=dtype)\n', (45040, 45077), False, 'import torch\n'), ((45104, 45146), 'torch.zeros', 'torch.zeros', (['(1)'], {'device': 'device', 'dtype': 'dtype'}), '(1, device=device, dtype=dtype)\n', (45115, 45146), False, 'import torch\n'), ((46084, 46135), 'torch.linspace', 'torch.linspace', (['(2)', '(0)', '(3)'], {'device': 'device', 'dtype': 'dtype'}), '(2, 0, 3, device=device, dtype=dtype)\n', (46098, 46135), False, 'import torch\n'), ((46162, 46213), 'torch.tensor', 'torch.tensor', (['(2, 1, 0)'], {'device': 'device', 'dtype': 'dtype'}), '((2, 1, 0), device=device, dtype=dtype)\n', (46174, 46213), False, 'import torch\n'), ((46644, 46751), 'torch.linspace', 'torch.linspace', (['(0)', '(999999 + (999999.0j if dtype.is_complex else 0))', '(1000000)'], {'device': 'device', 'dtype': 'dtype'}), '(0, 999999 + (999999.0j if dtype.is_complex else 0), 1000000,\n device=device, dtype=dtype)\n', (46658, 46751), False, 'import torch\n'), ((47240, 47304), 'torch.tensor', 'torch.tensor', (['((0, 0, 1), (0, 2, 3))'], {'device': 'device', 'dtype': 'dtype'}), '(((0, 0, 1), (0, 2, 3)), device=device, dtype=dtype)\n', (47252, 47304), False, 'import torch\n'), ((43469, 43537), 'torch.testing.get_all_dtypes', 'torch.testing.get_all_dtypes', ([], {'include_bool': '(False)', 'include_half': '(False)'}), '(include_bool=False, include_half=False)\n', (43497, 43537), False, 'import torch\n'), ((43558, 43625), 'torch.testing.get_all_dtypes', 'torch.testing.get_all_dtypes', ([], {'include_bool': '(False)', 'include_half': '(True)'}), '(include_bool=False, include_half=True)\n', (43586, 43625), False, 'import torch\n'), ((47754, 47796), 'torch.linspace', 'torch.linspace', (['(1.0j)', '(-2.0j)'], {'device': 'device'}), '(1.0j, -2.0j, device=device)\n', (47768, 47796), False, 'import torch\n'), ((51486, 51501), 'random.random', 'random.random', ([], {}), '()\n', (51499, 51501), False, 'import random\n'), ((51893, 51944), 'torch.logspace', 'torch.logspace', (['(0)', '(1)', '(1)'], {'device': 'device', 'dtype': 'dtype'}), '(0, 1, 1, device=device, dtype=dtype)\n', (51907, 51944), False, 'import torch\n'), ((51971, 52012), 'torch.ones', 'torch.ones', (['(1)'], {'device': 'device', 'dtype': 'dtype'}), '(1, device=device, dtype=dtype)\n', (51981, 52012), False, 'import torch\n'), ((52515, 52581), 'torch.logspace', 'torch.logspace', (['(0)', '(6)'], {'steps': '(49)', 'base': '(2)', 'device': 'device', 'dtype': 'dtype'}), '(0, 6, steps=49, base=2, device=device, dtype=dtype)\n', (52529, 52581), False, 'import torch\n'), ((52691, 52745), 'torch.logspace', 'torch.logspace', (['(1)', '(1)', '(1)', '(2)'], {'device': 'device', 'dtype': 'dtype'}), '(1, 1, 1, 2, device=device, dtype=dtype)\n', (52705, 52745), False, 'import torch\n'), ((52844, 52898), 'torch.logspace', 'torch.logspace', (['(0)', '(2)', '(3)', '(2)'], {'device': 'device', 'dtype': 'dtype'}), '(0, 2, 3, 2, device=device, dtype=dtype)\n', (52858, 52898), False, 'import torch\n'), ((52925, 52976), 'torch.tensor', 'torch.tensor', (['(1, 2, 4)'], {'device': 'device', 'dtype': 'dtype'}), '((1, 2, 4), device=device, dtype=dtype)\n', (52937, 52976), False, 'import torch\n'), ((53063, 53114), 'torch.logspace', 'torch.logspace', (['(1)', '(0)', '(2)'], {'device': 'device', 'dtype': 'dtype'}), '(1, 0, 2, device=device, dtype=dtype)\n', (53077, 53114), False, 'import torch\n'), ((53141, 53190), 'torch.tensor', 'torch.tensor', (['(10, 1)'], {'device': 'device', 'dtype': 'dtype'}), '((10, 1), device=device, dtype=dtype)\n', (53153, 53190), False, 'import torch\n'), ((53444, 53508), 'torch.tensor', 'torch.tensor', (['((0, 1, 2), (0, 4, 8))'], {'device': 'device', 'dtype': 'dtype'}), '(((0, 1, 2), (0, 4, 8)), device=device, dtype=dtype)\n', (53456, 53508), False, 'import torch\n'), ((54608, 54658), 'torch.full', 'torch.full', (['o.shape', '(1.0)'], {'dtype': 'torch.float', 'out': 'o'}), '(o.shape, 1.0, dtype=torch.float, out=o)\n', (54618, 54658), False, 'import torch\n'), ((55577, 55615), 'torch.empty', 'torch.empty', (['size'], {'dtype': 'torch.float32'}), '(size, dtype=torch.float32)\n', (55588, 55615), False, 'import torch\n'), ((55833, 55869), 'torch.empty', 'torch.empty', (['size'], {'dtype': 'torch.int64'}), '(size, dtype=torch.int64)\n', (55844, 55869), False, 'import torch\n'), ((56404, 56421), 'torch.Generator', 'torch.Generator', ([], {}), '()\n', (56419, 56421), False, 'import torch\n'), ((56483, 56537), 'torch.randint', 'torch.randint', (['(0)', '(6)', '(SIZE, SIZE)'], {'generator': 'generator'}), '(0, 6, (SIZE, SIZE), generator=generator)\n', (56496, 56537), False, 'import torch\n'), ((56557, 56591), 'torch.empty', 'torch.empty', (['()'], {'dtype': 'torch.int64'}), '((), dtype=torch.int64)\n', (56568, 56591), False, 'import torch\n'), ((56644, 56708), 'torch.randint', 'torch.randint', (['(0)', '(6)', '(SIZE, SIZE)'], {'generator': 'generator', 'out': 'res2'}), '(0, 6, (SIZE, SIZE), generator=generator, out=res2)\n', (56657, 56708), False, 'import torch\n'), ((56768, 56819), 'torch.randint', 'torch.randint', (['(6)', '(SIZE, SIZE)'], {'generator': 'generator'}), '(6, (SIZE, SIZE), generator=generator)\n', (56781, 56819), False, 'import torch\n'), ((56839, 56873), 'torch.empty', 'torch.empty', (['()'], {'dtype': 'torch.int64'}), '((), dtype=torch.int64)\n', (56850, 56873), False, 'import torch\n'), ((56926, 56987), 'torch.randint', 'torch.randint', (['(6)', '(SIZE, SIZE)'], {'out': 'res4', 'generator': 'generator'}), '(6, (SIZE, SIZE), out=res4, generator=generator)\n', (56939, 56987), False, 'import torch\n'), ((57560, 57585), 'torch.manual_seed', 'torch.manual_seed', (['(123456)'], {}), '(123456)\n', (57577, 57585), False, 'import torch\n'), ((57605, 57656), 'torch.randn', 'torch.randn', (['size', 'size'], {'dtype': 'dtype', 'device': 'device'}), '(size, size, dtype=dtype, device=device)\n', (57616, 57656), False, 'import torch\n'), ((57676, 57720), 'torch.tensor', 'torch.tensor', (['[]'], {'dtype': 'dtype', 'device': 'device'}), '([], dtype=dtype, device=device)\n', (57688, 57720), False, 'import torch\n'), ((57733, 57758), 'torch.manual_seed', 'torch.manual_seed', (['(123456)'], {}), '(123456)\n', (57750, 57758), False, 'import torch\n'), ((57771, 57804), 'torch.randn', 'torch.randn', (['size', 'size'], {'out': 'res2'}), '(size, size, out=res2)\n', (57782, 57804), False, 'import torch\n'), ((58023, 58048), 'torch.manual_seed', 'torch.manual_seed', (['(123456)'], {}), '(123456)\n', (58040, 58048), False, 'import torch\n'), ((58068, 58118), 'torch.rand', 'torch.rand', (['size', 'size'], {'dtype': 'dtype', 'device': 'device'}), '(size, size, dtype=dtype, device=device)\n', (58078, 58118), False, 'import torch\n'), ((58138, 58182), 'torch.tensor', 'torch.tensor', (['[]'], {'dtype': 'dtype', 'device': 'device'}), '([], dtype=dtype, device=device)\n', (58150, 58182), False, 'import torch\n'), ((58195, 58220), 'torch.manual_seed', 'torch.manual_seed', (['(123456)'], {}), '(123456)\n', (58212, 58220), False, 'import torch\n'), ((58233, 58265), 'torch.rand', 'torch.rand', (['size', 'size'], {'out': 'res2'}), '(size, size, out=res2)\n', (58243, 58265), False, 'import torch\n'), ((60162, 60204), 'torch.empty', 'torch.empty', (['(0)'], {'dtype': 'dtype', 'device': 'device'}), '(0, dtype=dtype, device=device)\n', (60173, 60204), False, 'import torch\n'), ((60217, 60249), 'torch.randperm', 'torch.randperm', (['small_n'], {'out': 'res'}), '(small_n, out=res)\n', (60231, 60249), False, 'import torch\n'), ((60757, 60801), 'torch.randperm', 'torch.randperm', (['n'], {'out': 'non_contiguous_tensor'}), '(n, out=non_contiguous_tensor)\n', (60771, 60801), False, 'import torch\n'), ((61514, 61528), 'torch.Tensor', 'torch.Tensor', ([], {}), '()\n', (61526, 61528), False, 'import torch\n'), ((61566, 61583), 'torch.randn', 'torch.randn', (['(4)', '(4)'], {}), '(4, 4)\n', (61577, 61583), False, 'import torch\n'), ((61621, 61647), 'torch.IntTensor', 'torch.IntTensor', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (61636, 61647), False, 'import torch\n'), ((1277, 1317), 'torch.testing.get_all_math_dtypes', 'torch.testing.get_all_math_dtypes', (['"""cpu"""'], {}), "('cpu')\n", (1310, 1317), False, 'import torch\n'), ((1419, 1459), 'torch.testing.get_all_math_dtypes', 'torch.testing.get_all_math_dtypes', (['"""cpu"""'], {}), "('cpu')\n", (1452, 1459), False, 'import torch\n'), ((1519, 1559), 'torch.testing.get_all_math_dtypes', 'torch.testing.get_all_math_dtypes', (['"""cpu"""'], {}), "('cpu')\n", (1552, 1559), False, 'import torch\n'), ((4878, 4909), 'torch.ones', 'torch.ones', (['(3)', '(3)'], {'device': 'device'}), '(3, 3, device=device)\n', (4888, 4909), False, 'import torch\n'), ((5015, 5044), 'torch.ones', 'torch.ones', (['(14)'], {'device': 'device'}), '(14, device=device)\n', (5025, 5044), False, 'import torch\n'), ((7759, 7773), 'torch.Tensor', 'torch.Tensor', ([], {}), '()\n', (7771, 7773), False, 'import torch\n'), ((7803, 7817), 'torch.Tensor', 'torch.Tensor', ([], {}), '()\n', (7815, 7817), False, 'import torch\n'), ((8713, 8738), 'torch.get_default_dtype', 'torch.get_default_dtype', ([], {}), '()\n', (8736, 8738), False, 'import torch\n'), ((8970, 8995), 'torch.get_default_dtype', 'torch.get_default_dtype', ([], {}), '()\n', (8993, 8995), False, 'import torch\n'), ((9168, 9216), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['"""torch.IntTensor"""'], {}), "('torch.IntTensor')\n", (9197, 9216), False, 'import torch\n'), ((9263, 9299), 'torch.set_default_dtype', 'torch.set_default_dtype', (['torch.int64'], {}), '(torch.int64)\n', (9286, 9299), False, 'import torch\n'), ((9410, 9454), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['torch.float32'], {}), '(torch.float32)\n', (9439, 9454), False, 'import torch\n'), ((9665, 9697), 'torch.FloatTensor', 'torch.FloatTensor', ([], {'device': '"""cuda"""'}), "(device='cuda')\n", (9682, 9697), False, 'import torch\n'), ((9852, 9896), 'torch.FloatTensor', 'torch.FloatTensor', (['(2.0, 3.0)'], {'device': '"""cuda"""'}), "((2.0, 3.0), device='cuda')\n", (9869, 9896), False, 'import torch\n'), ((9947, 9974), 'torch.Tensor', 'torch.Tensor', ([], {'device': '"""cuda"""'}), "(device='cuda')\n", (9959, 9974), False, 'import torch\n'), ((10124, 10163), 'torch.Tensor', 'torch.Tensor', (['(2.0, 3.0)'], {'device': '"""cuda"""'}), "((2.0, 3.0), device='cuda')\n", (10136, 10163), False, 'import torch\n'), ((12805, 12820), 'torch.tensor', 'torch.tensor', (['a'], {}), '(a)\n', (12817, 12820), False, 'import torch\n'), ((21394, 21427), 'torch.as_tensor', 'torch.as_tensor', (['y'], {'device': '"""cuda"""'}), "(y, device='cuda')\n", (21409, 21427), False, 'import torch\n'), ((21497, 21520), 'torch.as_tensor', 'torch.as_tensor', (['y_cuda'], {}), '(y_cuda)\n', (21512, 21520), False, 'import torch\n'), ((21556, 21594), 'torch.as_tensor', 'torch.as_tensor', (['y_cuda'], {'device': '"""cuda"""'}), "(y_cuda, device='cuda')\n", (21571, 21594), False, 'import torch\n'), ((21799, 21817), 'torch.as_tensor', 'torch.as_tensor', (['n'], {}), '(n)\n', (21814, 21817), False, 'import torch\n'), ((22174, 22210), 'torch.tensor', 'torch.tensor', (['n'], {'dtype': 'torch.float64'}), '(n, dtype=torch.float64)\n', (22186, 22210), False, 'import torch\n'), ((22293, 22329), 'torch.tensor', 'torch.tensor', (['n'], {'dtype': 'torch.float64'}), '(n, dtype=torch.float64)\n', (22305, 22329), False, 'import torch\n'), ((22448, 22469), 'numpy.random.randn', 'np.random.randn', (['(5)', '(6)'], {}), '(5, 6)\n', (22463, 22469), True, 'import numpy as np\n'), ((22499, 22532), 'torch.as_tensor', 'torch.as_tensor', (['n'], {'device': '"""cuda"""'}), "(n, device='cuda')\n", (22514, 22532), False, 'import torch\n'), ((24626, 24650), 'torch.rand', 'torch.rand', (['(3)', '(3)', '(300000)'], {}), '(3, 3, 300000)\n', (24636, 24650), False, 'import torch\n'), ((25757, 25776), 'torch.FloatTensor', 'torch.FloatTensor', ([], {}), '()\n', (25774, 25776), False, 'import torch\n'), ((25870, 25889), 'torch.FloatTensor', 'torch.FloatTensor', ([], {}), '()\n', (25887, 25889), False, 'import torch\n'), ((26083, 26103), 'torch.DoubleTensor', 'torch.DoubleTensor', ([], {}), '()\n', (26101, 26103), False, 'import torch\n'), ((26197, 26217), 'torch.DoubleTensor', 'torch.DoubleTensor', ([], {}), '()\n', (26215, 26217), False, 'import torch\n'), ((27759, 27819), 'torch.arange', 'torch.arange', (['(-1)', '(1)', '(2.0 / 49)'], {'dtype': 'torch.float32', 'out': 'line'}), '(-1, 1, 2.0 / 49, dtype=torch.float32, out=line)\n', (27771, 27819), False, 'import torch\n'), ((27876, 27890), 'torch.empty', 'torch.empty', (['(1)'], {}), '(1)\n', (27887, 27890), False, 'import torch\n'), ((27950, 27973), 'torch.arange', 'torch.arange', (['(10)'], {'out': 'x'}), '(10, out=x)\n', (27962, 27973), False, 'import torch\n'), ((29677, 29694), 'torch.arange', 'torch.arange', (['(1.0)'], {}), '(1.0)\n', (29689, 29694), False, 'import torch\n'), ((29908, 29923), 'torch.arange', 'torch.arange', (['(1)'], {}), '(1)\n', (29920, 29923), False, 'import torch\n'), ((30161, 30181), 'torch.arange', 'torch.arange', (['(1.0)', '(3)'], {}), '(1.0, 3)\n', (30173, 30181), False, 'import torch\n'), ((30324, 30344), 'torch.arange', 'torch.arange', (['(1)', '(3.0)'], {}), '(1, 3.0)\n', (30336, 30344), False, 'import torch\n'), ((30499, 30522), 'torch.arange', 'torch.arange', (['(1)', '(3)', '(1.0)'], {}), '(1, 3, 1.0)\n', (30511, 30522), False, 'import torch\n'), ((30806, 30824), 'torch.arange', 'torch.arange', (['(1)', '(3)'], {}), '(1, 3)\n', (30818, 30824), False, 'import torch\n'), ((31050, 31071), 'torch.arange', 'torch.arange', (['(1)', '(3)', '(1)'], {}), '(1, 3, 1)\n', (31062, 31071), False, 'import torch\n'), ((31670, 31720), 'torch.empty_strided', 'torch.empty_strided', (['shape', 'strides'], {'device': 'device'}), '(shape, strides, device=device)\n', (31689, 31720), False, 'import torch\n'), ((33572, 33615), 'torch.eye', 'torch.eye', (['n', 'm'], {'device': 'device', 'dtype': 'dtype'}), '(n, m, device=device, dtype=dtype)\n', (33581, 33615), False, 'import torch\n'), ((33644, 33689), 'torch.zeros', 'torch.zeros', (['n', 'm'], {'dtype': 'dtype', 'device': 'device'}), '(n, m, dtype=dtype, device=device)\n', (33655, 33689), False, 'import torch\n'), ((33866, 33908), 'torch.empty', 'torch.empty', (['(0)'], {'device': 'device', 'dtype': 'dtype'}), '(0, device=device, dtype=dtype)\n', (33877, 33908), False, 'import torch\n'), ((33925, 33950), 'torch.eye', 'torch.eye', (['n', 'm'], {'out': 'res2'}), '(n, m, out=res2)\n', (33934, 33950), False, 'import torch\n'), ((34774, 34793), 'torch.from_numpy', 'torch.from_numpy', (['a'], {}), '(a)\n', (34790, 34793), False, 'import torch\n'), ((34132, 34207), 'torch.testing.get_all_fp_dtypes', 'torch.testing.get_all_fp_dtypes', ([], {'include_half': '(False)', 'include_bfloat16': '(False)'}), '(include_half=False, include_bfloat16=False)\n', (34163, 34207), False, 'import torch\n'), ((34224, 34262), 'torch.testing.get_all_complex_dtypes', 'torch.testing.get_all_complex_dtypes', ([], {}), '()\n', (34260, 34262), False, 'import torch\n'), ((35487, 35506), 'torch.from_numpy', 'torch.from_numpy', (['a'], {}), '(a)\n', (35503, 35506), False, 'import torch\n'), ((36472, 36497), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (36495, 36497), False, 'import warnings\n'), ((36515, 36546), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (36536, 36546), False, 'import warnings\n'), ((37461, 37486), 'torch.cuda.device', 'torch.cuda.device', (['device'], {}), '(device)\n', (37478, 37486), False, 'import torch\n'), ((40416, 40446), 'torch.arange', 'torch.arange', (['(0)'], {'device': 'device'}), '(0, device=device)\n', (40428, 40446), False, 'import torch\n'), ((40487, 40514), 'torch.eye', 'torch.eye', (['(0)'], {'device': 'device'}), '(0, device=device)\n', (40496, 40514), False, 'import torch\n'), ((40555, 40585), 'torch.eye', 'torch.eye', (['(0)', '(0)'], {'device': 'device'}), '(0, 0, device=device)\n', (40564, 40585), False, 'import torch\n'), ((40626, 40656), 'torch.eye', 'torch.eye', (['(5)', '(0)'], {'device': 'device'}), '(5, 0, device=device)\n', (40635, 40656), False, 'import torch\n'), ((40697, 40727), 'torch.eye', 'torch.eye', (['(0)', '(5)'], {'device': 'device'}), '(0, 5, device=device)\n', (40706, 40727), False, 'import torch\n'), ((40766, 40804), 'torch.linspace', 'torch.linspace', (['(1)', '(1)', '(0)'], {'device': 'device'}), '(1, 1, 0, device=device)\n', (40780, 40804), False, 'import torch\n'), ((40843, 40881), 'torch.logspace', 'torch.logspace', (['(1)', '(1)', '(0)'], {'device': 'device'}), '(1, 1, 0, device=device)\n', (40857, 40881), False, 'import torch\n'), ((40920, 40952), 'torch.randperm', 'torch.randperm', (['(0)'], {'device': 'device'}), '(0, device=device)\n', (40934, 40952), False, 'import torch\n'), ((40991, 41030), 'torch.bartlett_window', 'torch.bartlett_window', (['(0)'], {'device': 'device'}), '(0, device=device)\n', (41012, 41030), False, 'import torch\n'), ((41069, 41124), 'torch.bartlett_window', 'torch.bartlett_window', (['(0)'], {'periodic': '(False)', 'device': 'device'}), '(0, periodic=False, device=device)\n', (41090, 41124), False, 'import torch\n'), ((41163, 41201), 'torch.hamming_window', 'torch.hamming_window', (['(0)'], {'device': 'device'}), '(0, device=device)\n', (41183, 41201), False, 'import torch\n'), ((41240, 41275), 'torch.hann_window', 'torch.hann_window', (['(0)'], {'device': 'device'}), '(0, device=device)\n', (41257, 41275), False, 'import torch\n'), ((41319, 41354), 'torch.tensor', 'torch.tensor', (['[[[]]]'], {'device': 'device'}), '([[[]]], device=device)\n', (41331, 41354), False, 'import torch\n'), ((41398, 41436), 'torch.as_tensor', 'torch.as_tensor', (['[[[]]]'], {'device': 'device'}), '([[[]]], device=device)\n', (41413, 41436), False, 'import torch\n'), ((41542, 41556), 'torch.Tensor', 'torch.Tensor', ([], {}), '()\n', (41554, 41556), False, 'import torch\n'), ((41711, 41728), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (41723, 41728), False, 'import torch\n'), ((41782, 41799), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (41794, 41799), False, 'import torch\n'), ((41891, 41908), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (41903, 41908), False, 'import torch\n'), ((41962, 41979), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (41974, 41979), False, 'import torch\n'), ((42125, 42139), 'torch.Tensor', 'torch.Tensor', ([], {}), '()\n', (42137, 42139), False, 'import torch\n'), ((44285, 44343), 'torch.linspace', 'torch.linspace', (['(10)', '(2000)', '(1991)'], {'device': 'device', 'dtype': 'dtype'}), '(10, 2000, 1991, device=device, dtype=dtype)\n', (44299, 44343), False, 'import torch\n'), ((44803, 44876), 'torch.linspace', 'torch.linspace', (['(0)', '(1000 * 3 * 3)', '(1000 * 3 * 3)'], {'device': 'device', 'dtype': 'dtype'}), '(0, 1000 * 3 * 3, 1000 * 3 * 3, device=device, dtype=dtype)\n', (44817, 44876), False, 'import torch\n'), ((44927, 44979), 'torch.linspace', 'torch.linspace', (['(0)', '(1)', '(-1)'], {'device': 'device', 'dtype': 'dtype'}), '(0, 1, -1, device=device, dtype=dtype)\n', (44941, 44979), False, 'import torch\n'), ((46833, 46902), 'torch.logical_and', 'torch.logical_and', (['(y[:-1].real < y[1:].real)', '(y[:-1].imag < y[1:].imag)'], {}), '(y[:-1].real < y[1:].real, y[:-1].imag < y[1:].imag)\n', (46850, 46902), False, 'import torch\n'), ((47443, 47478), 'torch.linspace', 'torch.linspace', (['(1)', '(2)'], {'device': 'device'}), '(1, 2, device=device)\n', (47457, 47478), False, 'import torch\n'), ((47526, 47563), 'torch.linspace', 'torch.linspace', (['(1.0)', '(2)'], {'device': 'device'}), '(1.0, 2, device=device)\n', (47540, 47563), False, 'import torch\n'), ((47610, 47650), 'torch.linspace', 'torch.linspace', (['(1.0)', '(-2.0)'], {'device': 'device'}), '(1.0, -2.0, device=device)\n', (47624, 47650), False, 'import torch\n'), ((48893, 48926), 'torch.testing.get_all_fp_dtypes', 'torch.testing.get_all_fp_dtypes', ([], {}), '()\n', (48924, 48926), False, 'import torch\n'), ((48929, 48967), 'torch.testing.get_all_complex_dtypes', 'torch.testing.get_all_complex_dtypes', ([], {}), '()\n', (48965, 48967), False, 'import torch\n'), ((49144, 49177), 'torch.testing.get_all_fp_dtypes', 'torch.testing.get_all_fp_dtypes', ([], {}), '()\n', (49175, 49177), False, 'import torch\n'), ((49180, 49218), 'torch.testing.get_all_complex_dtypes', 'torch.testing.get_all_complex_dtypes', ([], {}), '()\n', (49216, 49218), False, 'import torch\n'), ((51814, 51866), 'torch.logspace', 'torch.logspace', (['(0)', '(1)', '(-1)'], {'device': 'device', 'dtype': 'dtype'}), '(0, 1, -1, device=device, dtype=dtype)\n', (51828, 51866), False, 'import torch\n'), ((52772, 52813), 'torch.ones', 'torch.ones', (['(1)'], {'device': 'device', 'dtype': 'dtype'}), '(1, device=device, dtype=dtype)\n', (52782, 52813), False, 'import torch\n'), ((54733, 54764), 'torch.full', 'torch.full', (['o.shape', '(1.0)'], {'out': 'o'}), '(o.shape, 1.0, out=o)\n', (54743, 54764), False, 'import torch\n'), ((54805, 54831), 'torch.full', 'torch.full', (['size', '(1)'], {'out': 'o'}), '(size, 1, out=o)\n', (54815, 54831), False, 'import torch\n'), ((56252, 56277), 'torch.manual_seed', 'torch.manual_seed', (['(123456)'], {}), '(123456)\n', (56269, 56277), False, 'import torch\n'), ((59184, 59226), 'torch.empty', 'torch.empty', (['(0)'], {'dtype': 'dtype', 'device': 'device'}), '(0, dtype=dtype, device=device)\n', (59195, 59226), False, 'import torch\n'), ((59243, 59298), 'torch.randperm', 'torch.randperm', (['n'], {'out': 'res2', 'dtype': 'dtype', 'device': 'device'}), '(n, out=res2, dtype=dtype, device=device)\n', (59257, 59298), False, 'import torch\n'), ((60629, 60670), 'torch.random.fork_rng', 'torch.random.fork_rng', ([], {'devices': 'rng_device'}), '(devices=rng_device)\n', (60650, 60670), False, 'import torch\n'), ((60694, 60744), 'torch.randperm', 'torch.randperm', (['n'], {'dtype': 'torch.long', 'device': 'device'}), '(n, dtype=torch.long, device=device)\n', (60708, 60744), False, 'import torch\n'), ((61781, 61800), 'torch.empty_like', 'torch.empty_like', (['a'], {}), '(a)\n', (61797, 61800), False, 'import torch\n'), ((62858, 62884), 'torch.full_like', 'torch.full_like', (['like', '(1.0)'], {}), '(like, 1.0)\n', (62873, 62884), False, 'import torch\n'), ((62928, 62977), 'torch.full_like', 'torch.full_like', (['like', '(1.0)'], {'dtype': 'torch.complex64'}), '(like, 1.0, dtype=torch.complex64)\n', (62943, 62977), False, 'import torch\n'), ((3239, 3264), 'torch.cuda.device', 'torch.cuda.device', (['device'], {}), '(device)\n', (3256, 3264), False, 'import torch\n'), ((9765, 9786), 'torch.Size', 'torch.Size', (['[2, 3, 4]'], {}), '([2, 3, 4])\n', (9775, 9786), False, 'import torch\n'), ((10037, 10058), 'torch.Size', 'torch.Size', (['[2, 3, 4]'], {}), '([2, 3, 4])\n', (10047, 10058), False, 'import torch\n'), ((10334, 10355), 'torch.Size', 'torch.Size', (['[2, 3, 4]'], {}), '([2, 3, 4])\n', (10344, 10355), False, 'import torch\n'), ((10546, 10582), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', ([], {'device': '"""cpu"""'}), "(device='cpu')\n", (10568, 10582), False, 'import torch\n'), ((10749, 10797), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['(2.0, 3.0)'], {'device': '"""cpu"""'}), "((2.0, 3.0), device='cpu')\n", (10771, 10797), False, 'import torch\n'), ((10827, 10841), 'torch.Tensor', 'torch.Tensor', ([], {}), '()\n', (10839, 10841), False, 'import torch\n'), ((10967, 10993), 'torch.Tensor', 'torch.Tensor', ([], {'device': '"""cpu"""'}), "(device='cpu')\n", (10979, 10993), False, 'import torch\n'), ((11150, 11188), 'torch.Tensor', 'torch.Tensor', (['(2.0, 3.0)'], {'device': '"""cpu"""'}), "((2.0, 3.0), device='cpu')\n", (11162, 11188), False, 'import torch\n'), ((16328, 16344), 'torch.tensor', 'torch.tensor', (['()'], {}), '(())\n', (16340, 16344), False, 'import torch\n'), ((16393, 16410), 'torch.tensor', 'torch.tensor', (['(5.0)'], {}), '(5.0)\n', (16405, 16410), False, 'import torch\n'), ((16456, 16471), 'torch.tensor', 'torch.tensor', (['(5)'], {}), '(5)\n', (16468, 16471), False, 'import torch\n'), ((16517, 16535), 'torch.tensor', 'torch.tensor', (['(True)'], {}), '(True)\n', (16529, 16535), False, 'import torch\n'), ((16582, 16616), 'torch.tensor', 'torch.tensor', (['(5)'], {'dtype': 'torch.int32'}), '(5, dtype=torch.int32)\n', (16594, 16616), False, 'import torch\n'), ((16665, 16697), 'torch.tensor', 'torch.tensor', (['((7, 5), (9, 5.0))'], {}), '(((7, 5), (9, 5.0)))\n', (16677, 16697), False, 'import torch\n'), ((16745, 16777), 'torch.tensor', 'torch.tensor', (['((5.0, 5), (3, 5))'], {}), '(((5.0, 5), (3, 5)))\n', (16757, 16777), False, 'import torch\n'), ((16823, 16853), 'torch.tensor', 'torch.tensor', (['((5, 3), (3, 5))'], {}), '(((5, 3), (3, 5)))\n', (16835, 16853), False, 'import torch\n'), ((16910, 16954), 'torch.tensor', 'torch.tensor', (['((5, 3 + 2.0j), (3, 5 + 4.0j))'], {}), '(((5, 3 + 2.0j), (3, 5 + 4.0j)))\n', (16922, 16954), False, 'import torch\n'), ((21851, 21866), 'torch.tensor', 'torch.tensor', (['n'], {}), '(n)\n', (21863, 21866), False, 'import torch\n'), ((21953, 21968), 'torch.tensor', 'torch.tensor', (['n'], {}), '(n)\n', (21965, 21968), False, 'import torch\n'), ((22040, 22060), 'numpy.random.rand', 'np.random.rand', (['(5)', '(6)'], {}), '(5, 6)\n', (22054, 22060), True, 'import numpy as np\n'), ((22566, 22596), 'torch.tensor', 'torch.tensor', (['n'], {'device': '"""cuda"""'}), "(n, device='cuda')\n", (22578, 22596), False, 'import torch\n'), ((22687, 22717), 'torch.tensor', 'torch.tensor', (['n'], {'device': '"""cuda"""'}), "(n, device='cuda')\n", (22699, 22717), False, 'import torch\n'), ((29142, 29203), 'torch.arange', 'torch.arange', (['(1.175494351e-38)', '(3.402823466e+38)'], {'device': 'device'}), '(1.175494351e-38, 3.402823466e+38, device=device)\n', (29154, 29203), False, 'import torch\n'), ((29751, 29768), 'torch.tensor', 'torch.tensor', (['(1.0)'], {}), '(1.0)\n', (29763, 29768), False, 'import torch\n'), ((29826, 29864), 'torch.tensor', 'torch.tensor', (['(1.0)'], {'dtype': 'torch.float64'}), '(1.0, dtype=torch.float64)\n', (29838, 29864), False, 'import torch\n'), ((29979, 29994), 'torch.tensor', 'torch.tensor', (['(1)'], {}), '(1)\n', (29991, 29994), False, 'import torch\n'), ((30051, 30085), 'torch.tensor', 'torch.tensor', (['(1)'], {'dtype': 'torch.int16'}), '(1, dtype=torch.int16)\n', (30063, 30085), False, 'import torch\n'), ((30238, 30276), 'torch.tensor', 'torch.tensor', (['(1.0)'], {'dtype': 'torch.float64'}), '(1.0, dtype=torch.float64)\n', (30250, 30276), False, 'import torch\n'), ((30401, 30435), 'torch.tensor', 'torch.tensor', (['(1)'], {'dtype': 'torch.int16'}), '(1, dtype=torch.int16)\n', (30413, 30435), False, 'import torch\n'), ((30437, 30454), 'torch.tensor', 'torch.tensor', (['(3.0)'], {}), '(3.0)\n', (30449, 30454), False, 'import torch\n'), ((30601, 30616), 'torch.tensor', 'torch.tensor', (['(1)'], {}), '(1)\n', (30613, 30616), False, 'import torch\n'), ((30653, 30687), 'torch.tensor', 'torch.tensor', (['(3)'], {'dtype': 'torch.int16'}), '(3, dtype=torch.int16)\n', (30665, 30687), False, 'import torch\n'), ((30724, 30762), 'torch.tensor', 'torch.tensor', (['(1.0)'], {'dtype': 'torch.float64'}), '(1.0, dtype=torch.float64)\n', (30736, 30762), False, 'import torch\n'), ((30880, 30895), 'torch.tensor', 'torch.tensor', (['(1)'], {}), '(1)\n', (30892, 30895), False, 'import torch\n'), ((30955, 30970), 'torch.tensor', 'torch.tensor', (['(1)'], {}), '(1)\n', (30967, 30970), False, 'import torch\n'), ((30972, 31006), 'torch.tensor', 'torch.tensor', (['(3)'], {'dtype': 'torch.int16'}), '(3, dtype=torch.int16)\n', (30984, 31006), False, 'import torch\n'), ((31149, 31164), 'torch.tensor', 'torch.tensor', (['(1)'], {}), '(1)\n', (31161, 31164), False, 'import torch\n'), ((31201, 31216), 'torch.tensor', 'torch.tensor', (['(3)'], {}), '(3)\n', (31213, 31216), False, 'import torch\n'), ((31253, 31287), 'torch.tensor', 'torch.tensor', (['(1)'], {'dtype': 'torch.int16'}), '(1, dtype=torch.int16)\n', (31265, 31287), False, 'import torch\n'), ((37730, 37749), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (37742, 37749), False, 'import torch\n'), ((44605, 44642), 'torch.rand', 'torch.rand', (['(3)', '(3)', '(1000)'], {'device': 'device'}), '(3, 3, 1000, device=device)\n', (44615, 44642), False, 'import torch\n'), ((45209, 45260), 'torch.linspace', 'torch.linspace', (['(0)', '(1)', '(0)'], {'device': 'device', 'dtype': 'dtype'}), '(0, 1, 0, device=device, dtype=dtype)\n', (45223, 45260), False, 'import torch\n'), ((46399, 46456), 'torch.linspace', 'torch.linspace', (['(1.0j)', '(2.0j)', '(3)'], {'device': 'device', 'dtype': 'dtype'}), '(1.0j, 2.0j, 3, device=device, dtype=dtype)\n', (46413, 46456), False, 'import torch\n'), ((55206, 55237), 'torch.randint', 'torch.randint', (['*args'], {'size': 'size'}), '(*args, size=size)\n', (55219, 55237), False, 'import torch\n'), ((55284, 55337), 'torch.randint', 'torch.randint', (['*args'], {'size': 'size', 'layout': 'torch.strided'}), '(*args, size=size, layout=torch.strided)\n', (55297, 55337), False, 'import torch\n'), ((55384, 55450), 'torch.randint', 'torch.randint', (['*args'], {'size': 'size', 'generator': 'torch.default_generator'}), '(*args, size=size, generator=torch.default_generator)\n', (55397, 55450), False, 'import torch\n'), ((55499, 55551), 'torch.randint', 'torch.randint', (['*args'], {'size': 'size', 'dtype': 'torch.float32'}), '(*args, size=size, dtype=torch.float32)\n', (55512, 55551), False, 'import torch\n'), ((55657, 55697), 'torch.randint', 'torch.randint', (['*args'], {'size': 'size', 'out': 'out'}), '(*args, size=size, out=out)\n', (55670, 55697), False, 'import torch\n'), ((55746, 55807), 'torch.randint', 'torch.randint', (['*args'], {'size': 'size', 'out': 'out', 'dtype': 'torch.float32'}), '(*args, size=size, out=out, dtype=torch.float32)\n', (55759, 55807), False, 'import torch\n'), ((55909, 55949), 'torch.randint', 'torch.randint', (['*args'], {'size': 'size', 'out': 'out'}), '(*args, size=size, out=out)\n', (55922, 55949), False, 'import torch\n'), ((55996, 56055), 'torch.randint', 'torch.randint', (['*args'], {'size': 'size', 'out': 'out', 'dtype': 'torch.int64'}), '(*args, size=size, out=out, dtype=torch.int64)\n', (56009, 56055), False, 'import torch\n'), ((59045, 59086), 'torch.random.fork_rng', 'torch.random.fork_rng', ([], {'devices': 'rng_device'}), '(devices=rng_device)\n', (59066, 59086), False, 'import torch\n'), ((59115, 59160), 'torch.randperm', 'torch.randperm', (['n'], {'dtype': 'dtype', 'device': 'device'}), '(n, dtype=dtype, device=device)\n', (59129, 59160), False, 'import torch\n'), ((59452, 59484), 'torch.randperm', 'torch.randperm', (['n'], {'device': 'device'}), '(n, device=device)\n', (59466, 59484), False, 'import torch\n'), ((60327, 60374), 'torch.randperm', 'torch.randperm', (['large_n'], {'out': 'res', 'device': 'device'}), '(large_n, out=res, device=device)\n', (60341, 60374), False, 'import torch\n'), ((60487, 60539), 'torch.zeros', 'torch.zeros', (['(2, 3)'], {'dtype': 'torch.long', 'device': 'device'}), '((2, 3), dtype=torch.long, device=device)\n', (60498, 60539), False, 'import torch\n'), ((61706, 61725), 'torch.empty_like', 'torch.empty_like', (['a'], {}), '(a)\n', (61722, 61725), False, 'import torch\n'), ((1867, 1882), 'torch.tensor', 'torch.tensor', (['(5)'], {}), '(5)\n', (1879, 1882), False, 'import torch\n'), ((1961, 2014), 'torch.ones', 'torch.ones', (['(2, 3)'], {'dtype': 'torch.float32', 'device': '"""cpu"""'}), "((2, 3), dtype=torch.float32, device='cpu')\n", (1971, 2014), False, 'import torch\n'), ((2093, 2148), 'torch.ones', 'torch.ones', (['(2, 3)'], {'dtype': 'torch.float32', 'device': '"""cpu:0"""'}), "((2, 3), dtype=torch.float32, device='cpu:0')\n", (2103, 2148), False, 'import torch\n'), ((2706, 2750), 'torch.tensor', 'torch.tensor', (['(5)'], {'dtype': 'torch.int64', 'device': '(0)'}), '(5, dtype=torch.int64, device=0)\n', (2718, 2750), False, 'import torch\n'), ((2832, 2883), 'torch.tensor', 'torch.tensor', (['(5)'], {'dtype': 'torch.int64', 'device': '"""cuda:0"""'}), "(5, dtype=torch.int64, device='cuda:0')\n", (2844, 2883), False, 'import torch\n'), ((3564, 3579), 'torch.tensor', 'torch.tensor', (['(5)'], {}), '(5)\n', (3576, 3579), False, 'import torch\n'), ((3658, 3673), 'torch.tensor', 'torch.tensor', (['(5)'], {}), '(5)\n', (3670, 3673), False, 'import torch\n'), ((10659, 10680), 'torch.Size', 'torch.Size', (['[2, 3, 4]'], {}), '([2, 3, 4])\n', (10669, 10680), False, 'import torch\n'), ((11060, 11081), 'torch.Size', 'torch.Size', (['[2, 3, 4]'], {}), '([2, 3, 4])\n', (11070, 11081), False, 'import torch\n'), ((11493, 11514), 'torch.Size', 'torch.Size', (['[2, 3, 4]'], {}), '([2, 3, 4])\n', (11503, 11514), False, 'import torch\n'), ((12753, 12768), 'numpy.array', 'np.array', (['[5.0]'], {}), '([5.0])\n', (12761, 12768), True, 'import numpy as np\n'), ((17163, 17174), 'numpy.array', 'np.array', (['(5)'], {}), '(5)\n', (17171, 17174), True, 'import numpy as np\n'), ((21735, 21755), 'numpy.random.rand', 'np.random.rand', (['(5)', '(6)'], {}), '(5, 6)\n', (21749, 21755), True, 'import numpy as np\n'), ((32529, 32562), 'torch.tensor', 'torch.tensor', (['(0.42)'], {'device': 'device'}), '(0.42, device=device)\n', (32541, 32562), False, 'import torch\n'), ((32705, 32738), 'torch.tensor', 'torch.tensor', (['(0.42)'], {'device': 'device'}), '(0.42, device=device)\n', (32717, 32738), False, 'import torch\n'), ((36773, 36798), 'torch.cuda.device', 'torch.cuda.device', (['device'], {}), '(device)\n', (36790, 36798), False, 'import torch\n'), ((38058, 38101), 'torch.zeros', 'torch.zeros', (['shape'], {'device': 'device', 'dtype': 'dt'}), '(shape, device=device, dtype=dt)\n', (38069, 38101), False, 'import torch\n'), ((38258, 38303), 'torch.full', 'torch.full', (['shape', '(3)'], {'device': 'device', 'dtype': 'dt'}), '(shape, 3, device=device, dtype=dt)\n', (38268, 38303), False, 'import torch\n'), ((38462, 38504), 'torch.ones', 'torch.ones', (['shape'], {'device': 'device', 'dtype': 'dt'}), '(shape, device=device, dtype=dt)\n', (38472, 38504), False, 'import torch\n'), ((38660, 38703), 'torch.empty', 'torch.empty', (['shape'], {'device': 'device', 'dtype': 'dt'}), '(shape, device=device, dtype=dt)\n', (38671, 38703), False, 'import torch\n'), ((2240, 2279), 'torch.ones', 'torch.ones', (['(2, 3)'], {'dtype': 'torch.float32'}), '((2, 3), dtype=torch.float32)\n', (2250, 2279), False, 'import torch\n'), ((2978, 3017), 'torch.ones', 'torch.ones', (['(2, 3)'], {'dtype': 'torch.float32'}), '((2, 3), dtype=torch.float32)\n', (2988, 3017), False, 'import torch\n'), ((3316, 3343), 'torch.cuda.current_device', 'torch.cuda.current_device', ([], {}), '()\n', (3341, 3343), False, 'import torch\n'), ((3969, 4013), 'torch.tensor', 'torch.tensor', (['(5)'], {'dtype': 'torch.int64', 'device': '(1)'}), '(5, dtype=torch.int64, device=1)\n', (3981, 4013), False, 'import torch\n'), ((4103, 4154), 'torch.tensor', 'torch.tensor', (['(5)'], {'dtype': 'torch.int64', 'device': '"""cuda:1"""'}), "(5, dtype=torch.int64, device='cuda:1')\n", (4115, 4154), False, 'import torch\n'), ((17044, 17056), 'numpy.array', 'np.array', (['()'], {}), '(())\n', (17052, 17056), True, 'import numpy as np\n'), ((17123, 17136), 'numpy.array', 'np.array', (['(5.0)'], {}), '(5.0)\n', (17131, 17136), True, 'import numpy as np\n'), ((17483, 17510), 'numpy.array', 'np.array', (['(3)'], {'dtype': 'np.uint8'}), '(3, dtype=np.uint8)\n', (17491, 17510), True, 'import numpy as np\n'), ((36866, 36885), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (36878, 36885), False, 'import torch\n'), ((38166, 38209), 'torch.zeros', 'torch.zeros', (['shape'], {'device': 'device', 'dtype': 'dt'}), '(shape, device=device, dtype=dt)\n', (38177, 38209), False, 'import torch\n'), ((38367, 38410), 'torch.zeros', 'torch.zeros', (['shape'], {'device': 'device', 'dtype': 'dt'}), '(shape, device=device, dtype=dt)\n', (38378, 38410), False, 'import torch\n'), ((38568, 38611), 'torch.zeros', 'torch.zeros', (['shape'], {'device': 'device', 'dtype': 'dt'}), '(shape, device=device, dtype=dt)\n', (38579, 38611), False, 'import torch\n'), ((38768, 38811), 'torch.zeros', 'torch.zeros', (['shape'], {'device': 'device', 'dtype': 'dt'}), '(shape, device=device, dtype=dt)\n', (38779, 38811), False, 'import torch\n'), ((40220, 40263), 'torch.randn', 'torch.randn', (['shape'], {'device': 'device', 'dtype': 'dt'}), '(shape, device=device, dtype=dt)\n', (40231, 40263), False, 'import torch\n'), ((2391, 2412), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)'], {}), '(2, 3)\n', (2406, 2412), True, 'import numpy as np\n'), ((2518, 2533), 'torch.tensor', 'torch.tensor', (['(5)'], {}), '(5)\n', (2530, 2533), False, 'import torch\n'), ((2594, 2609), 'torch.tensor', 'torch.tensor', (['(5)'], {}), '(5)\n', (2606, 2609), False, 'import torch\n'), ((3133, 3154), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)'], {}), '(2, 3)\n', (3148, 3154), True, 'import numpy as np\n'), ((3438, 3487), 'torch.tensor', 'torch.tensor', (['(5)'], {'dtype': 'torch.int64', 'device': '"""cuda"""'}), "(5, dtype=torch.int64, device='cuda')\n", (3450, 3487), False, 'import torch\n'), ((4257, 4296), 'torch.ones', 'torch.ones', (['(2, 3)'], {'dtype': 'torch.float32'}), '((2, 3), dtype=torch.float32)\n', (4267, 4296), False, 'import torch\n'), ((17305, 17316), 'numpy.array', 'np.array', (['(5)'], {}), '(5)\n', (17313, 17316), True, 'import numpy as np\n'), ((17407, 17418), 'numpy.array', 'np.array', (['(5)'], {}), '(5)\n', (17415, 17418), True, 'import numpy as np\n'), ((39161, 39209), 'torch.randint', 'torch.randint', (['(6)', 'shape'], {'device': 'device', 'dtype': 'dt'}), '(6, shape, device=device, dtype=dt)\n', (39174, 39209), False, 'import torch\n'), ((39300, 39348), 'torch.randint', 'torch.randint', (['(2)', 'shape'], {'device': 'device', 'dtype': 'dt'}), '(2, shape, device=device, dtype=dt)\n', (39313, 39348), False, 'import torch\n'), ((40048, 40090), 'torch.rand', 'torch.rand', (['shape'], {'device': 'device', 'dtype': 'dt'}), '(shape, device=device, dtype=dt)\n', (40058, 40090), False, 'import torch\n'), ((40332, 40375), 'torch.zeros', 'torch.zeros', (['shape'], {'device': 'device', 'dtype': 'dt'}), '(shape, device=device, dtype=dt)\n', (40343, 40375), False, 'import torch\n'), ((3769, 3784), 'torch.tensor', 'torch.tensor', (['(5)'], {}), '(5)\n', (3781, 3784), False, 'import torch\n'), ((3849, 3864), 'torch.tensor', 'torch.tensor', (['(5)'], {}), '(5)\n', (3861, 3864), False, 'import torch\n'), ((4494, 4515), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)'], {}), '(2, 3)\n', (4509, 4515), True, 'import numpy as np\n'), ((17582, 17593), 'numpy.array', 'np.array', (['(5)'], {}), '(5)\n', (17590, 17593), True, 'import numpy as np\n'), ((17597, 17608), 'numpy.array', 'np.array', (['(9)'], {}), '(9)\n', (17605, 17608), True, 'import numpy as np\n'), ((17694, 17707), 'numpy.array', 'np.array', (['(5.0)'], {}), '(5.0)\n', (17702, 17707), True, 'import numpy as np\n'), ((17778, 17789), 'numpy.array', 'np.array', (['(3)'], {}), '(3)\n', (17786, 17789), True, 'import numpy as np\n'), ((17793, 17804), 'numpy.array', 'np.array', (['(3)'], {}), '(3)\n', (17801, 17804), True, 'import numpy as np\n'), ((39419, 39462), 'torch.zeros', 'torch.zeros', (['shape'], {'device': 'device', 'dtype': 'dt'}), '(shape, device=device, dtype=dt)\n', (39430, 39462), False, 'import torch\n'), ((39692, 39740), 'torch.randint', 'torch.randint', (['(6)', 'shape'], {'device': 'device', 'dtype': 'dt'}), '(6, shape, device=device, dtype=dt)\n', (39705, 39740), False, 'import torch\n'), ((39570, 39618), 'torch.randint', 'torch.randint', (['(6)', 'shape'], {'device': 'device', 'dtype': 'dt'}), '(6, shape, device=device, dtype=dt)\n', (39583, 39618), False, 'import torch\n'), ((39811, 39854), 'torch.zeros', 'torch.zeros', (['shape'], {'device': 'device', 'dtype': 'dt'}), '(shape, device=device, dtype=dt)\n', (39822, 39854), False, 'import torch\n')] |
import numpy as np
import matplotlib.pyplot as plt
x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
beta = np.polyfit(x, y, deg=5)
# create a polynomial function from the beta coefficients
p = np.poly1d(beta)
# print(p)
xx = np.linspace(0.0,5.0, 100)
plt.scatter(x,y, 50, 'steelblue')
plt.plot(xx, p(xx), color='indianred', linewidth=2)
plt.xlabel('X')
plt.ylabel('Y')
plt.show()
| [
"numpy.poly1d",
"matplotlib.pyplot.show",
"numpy.polyfit",
"matplotlib.pyplot.scatter",
"numpy.array",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((56, 96), 'numpy.array', 'np.array', (['[0.0, 1.0, 2.0, 3.0, 4.0, 5.0]'], {}), '([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])\n', (64, 96), True, 'import numpy as np\n'), ((103, 145), 'numpy.array', 'np.array', (['[0.0, 0.8, 0.9, 0.1, -0.8, -1.0]'], {}), '([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])\n', (111, 145), True, 'import numpy as np\n'), ((153, 176), 'numpy.polyfit', 'np.polyfit', (['x', 'y'], {'deg': '(5)'}), '(x, y, deg=5)\n', (163, 176), True, 'import numpy as np\n'), ((240, 255), 'numpy.poly1d', 'np.poly1d', (['beta'], {}), '(beta)\n', (249, 255), True, 'import numpy as np\n'), ((272, 298), 'numpy.linspace', 'np.linspace', (['(0.0)', '(5.0)', '(100)'], {}), '(0.0, 5.0, 100)\n', (283, 298), True, 'import numpy as np\n'), ((299, 333), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y', '(50)', '"""steelblue"""'], {}), "(x, y, 50, 'steelblue')\n", (310, 333), True, 'import matplotlib.pyplot as plt\n'), ((385, 400), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X"""'], {}), "('X')\n", (395, 400), True, 'import matplotlib.pyplot as plt\n'), ((401, 416), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y"""'], {}), "('Y')\n", (411, 416), True, 'import matplotlib.pyplot as plt\n'), ((417, 427), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (425, 427), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
2D MAF data of Cs2O.4.72SiO2 glass
==================================
"""
# %%
# The following example illustrates an application of the statistical learning method
# applied in determining the distribution of the nuclear shielding tensor parameters
# from a 2D magic-angle flipping (MAF) spectrum. In this example, we use the 2D MAF
# spectrum [#f1]_ of :math:`\text{Cs}_2\text{O}\cdot4.72\text{SiO}_2` glass.
#
# Before getting started
# ----------------------
#
# Import all relevant packages.
import csdmpy as cp
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import cm
from csdmpy import statistics as stats
from mrinversion.kernel.nmr import ShieldingPALineshape
from mrinversion.kernel.utils import x_y_to_zeta_eta
from mrinversion.linear_model import SmoothLasso, TSVDCompression
from mrinversion.utils import plot_3d, to_Haeberlen_grid
# sphinx_gallery_thumbnail_number = 5
# %%
# Setup for the matplotlib figures.
# function for plotting 2D dataset
def plot2D(csdm_object, **kwargs):
plt.figure(figsize=(4.5, 3.5))
ax = plt.subplot(projection="csdm")
ax.imshow(csdm_object, cmap="gist_ncar_r", aspect="auto", **kwargs)
ax.invert_xaxis()
ax.invert_yaxis()
plt.tight_layout()
plt.show()
# %%
# Dataset setup
# -------------
#
# Import the dataset
# ''''''''''''''''''
#
# Load the dataset. Here, we import the dataset as the CSDM data-object.
# The 2D MAF dataset in csdm format
filename = "https://zenodo.org/record/3964531/files/Cs2O-4_72SiO2-MAF.csdf"
data_object = cp.load(filename)
# For inversion, we only interest ourselves with the real part of the complex dataset.
data_object = data_object.real
# We will also convert the coordinates of both dimensions from Hz to ppm.
_ = [item.to("ppm", "nmr_frequency_ratio") for item in data_object.dimensions]
# %%
# Here, the variable ``data_object`` is a
# `CSDM <https://csdmpy.readthedocs.io/en/latest/api/CSDM.html>`_
# object that holds the real part of the 2D MAF dataset. The plot of the 2D MAF dataset
# is
plot2D(data_object)
# %%
# There are two dimensions in this dataset. The dimension at index 0, the horizontal
# dimension in the figure, is the pure anisotropic dimension, while the dimension at
# index 1 is the isotropic chemical shift dimension.
#
# Prepping the data for inversion
# '''''''''''''''''''''''''''''''
# **Step-1: Data Alignment**
#
# When using the csdm objects with the ``mrinversion`` package, the dimension at index
# 0 must be the dimension undergoing the linear inversion. In this example, we plan to
# invert the pure anisotropic shielding line-shape. In the ``data_object``, the
# anisotropic dimension is already at index 0 and, therefore, no further action is
# required.
#
# **Step-2: Optimization**
#
# Also notice, the signal from the 2D MAF dataset occupies a small fraction of the
# two-dimensional frequency grid. For optimum performance, truncate the dataset to the
# relevant region before proceeding. Use the appropriate array indexing/slicing to
# select the signal region.
data_object_truncated = data_object[:, 290:330]
plot2D(data_object_truncated)
# %%
# Linear Inversion setup
# ----------------------
#
# Dimension setup
# '''''''''''''''
#
# **Anisotropic-dimension:**
# The dimension of the dataset that holds the pure anisotropic frequency
# contributions. In ``mrinversion``, this must always be the dimension at index 0 of
# the data object.
anisotropic_dimension = data_object_truncated.dimensions[0]
# %%
# **x-y dimensions:**
# The two inverse dimensions corresponding to the `x` and `y`-axis of the `x`-`y` grid.
inverse_dimensions = [
cp.LinearDimension(count=25, increment="450 Hz", label="x"), # the `x`-dimension
cp.LinearDimension(count=25, increment="450 Hz", label="y"), # the `y`-dimension
]
# %%
# Generating the kernel
# '''''''''''''''''''''
#
# For MAF datasets, the line-shape kernel corresponds to the pure nuclear shielding
# anisotropy line-shapes. Use the
# :class:`~mrinversion.kernel.nmr.ShieldingPALineshape` class to generate a
# shielding line-shape kernel.
lineshape = ShieldingPALineshape(
anisotropic_dimension=anisotropic_dimension,
inverse_dimension=inverse_dimensions,
channel="29Si",
magnetic_flux_density="9.4 T",
rotor_angle="87.14°",
rotor_frequency="14 kHz",
number_of_sidebands=4,
)
# %%
# Here, ``lineshape`` is an instance of the
# :class:`~mrinversion.kernel.nmr.ShieldingPALineshape` class. The required
# arguments of this class are the `anisotropic_dimension`, `inverse_dimension`, and
# `channel`. We have already defined the first two arguments in the previous
# sub-section. The value of the `channel` argument is the nucleus observed in the MAF
# experiment. In this example, this value is '29Si'.
# The remaining arguments, such as the `magnetic_flux_density`, `rotor_angle`,
# and `rotor_frequency`, are set to match the conditions under which the 2D MAF
# spectrum was acquired. Note for this particular MAF measurement, the rotor angle was
# set to :math:`87.14^\circ` for the anisotropic dimension, not the usual
# :math:`90^\circ`. The value of the
# `number_of_sidebands` argument is the number of sidebands calculated for each
# line-shape within the kernel. Unless, you have a lot of spinning sidebands in your
# MAF dataset, four sidebands should be enough.
#
# Once the ShieldingPALineshape instance is created, use the
# :meth:`~mrinversion.kernel.nmr.ShieldingPALineshape.kernel` method of the
# instance to generate the MAF line-shape kernel.
K = lineshape.kernel(supersampling=1)
print(K.shape)
# %%
# The kernel ``K`` is a NumPy array of shape (128, 625), where the axes with 128 and
# 625 points are the anisotropic dimension and the features (x-y coordinates)
# corresponding to the :math:`25\times 25` `x`-`y` grid, respectively.
# %%
# Data Compression
# ''''''''''''''''
#
# Data compression is optional but recommended. It may reduce the size of the
# inverse problem and, thus, further computation time.
new_system = TSVDCompression(K, data_object_truncated)
compressed_K = new_system.compressed_K
compressed_s = new_system.compressed_s
print(f"truncation_index = {new_system.truncation_index}")
# %%
# Solving the inverse problem
# ---------------------------
#
# Smooth LASSO cross-validation
# '''''''''''''''''''''''''''''
#
# Solve the smooth-lasso problem. Ordinarily, one should use the statistical learning
# method to solve the inverse problem over a range of α and λ values and then determine
# the best nuclear shielding tensor parameter distribution for the given 2D MAF
# dataset. Considering the limited build time for the documentation, we skip this step
# and evaluate the distribution at pre-optimized α and λ values. The optimum values are
# :math:`\alpha = 5.62\times 10^{-7}` and :math:`\lambda = 3.16\times 10^{-6}`.
# The following commented code was used in determining the optimum α and λ values.
# %%
# from mrinversion.linear_model import SmoothLassoCV
# # setup the pre-defined range of alpha and lambda values
# lambdas = 10 ** (-4 - 3 * (np.arange(20) / 19))
# alphas = 10 ** (-4.5 - 3 * (np.arange(20) / 19))
# # setup the smooth lasso cross-validation class
# s_lasso = SmoothLassoCV(
# alphas=alphas, # A numpy array of alpha values.
# lambdas=lambdas, # A numpy array of lambda values.
# sigma=0.002, # The standard deviation of noise from the MAF data.
# folds=10, # The number of folds in n-folds cross-validation.
# inverse_dimension=inverse_dimensions, # previously defined inverse dimensions.
# verbose=1, # If non-zero, prints the progress as the computation proceeds.
# )
# # run fit using the compressed kernel and compressed data.
# s_lasso.fit(compressed_K, compressed_s)
# # the optimum hyper-parameters, alpha and lambda, from the cross-validation.
# print(s_lasso.hyperparameters)
# # the solution
# f_sol = s_lasso.f
# # the cross-validation error curve
# CV_metric = s_lasso.cross_validation_curve
# %%
# If you use the above ``SmoothLassoCV`` method, skip the following code-block. The
# following code-block evaluates the smooth-lasso solution at the pre-optimized
# hyperparameters.
# Setup the smooth lasso class
s_lasso = SmoothLasso(
alpha=8.34e-7, lambda1=6.16e-7, inverse_dimension=inverse_dimensions
)
# run the fit method on the compressed kernel and compressed data.
s_lasso.fit(K=compressed_K, s=compressed_s)
# %%
# The optimum solution
# ''''''''''''''''''''
#
# The :attr:`~mrinversion.linear_model.SmoothLasso.f` attribute of the instance holds
# the solution,
f_sol = s_lasso.f # f_sol is a CSDM object.
# %%
# where ``f_sol`` is the optimum solution.
#
# The fit residuals
# '''''''''''''''''
#
# To calculate the residuals between the data and predicted data(fit), use the
# :meth:`~mrinversion.linear_model.SmoothLasso.residuals` method, as follows,
residuals = s_lasso.residuals(K=K, s=data_object_truncated)
# residuals is a CSDM object.
# The plot of the residuals.
plot2D(residuals, vmax=data_object_truncated.max(), vmin=data_object_truncated.min())
# %%
# The standard deviation of the residuals is
residuals.std()
# %%
# Saving the solution
# '''''''''''''''''''
#
# To serialize the solution to a file, use the `save()` method of the CSDM object,
# for example,
f_sol.save("Cs2O.4.72SiO2_inverse.csdf") # save the solution
residuals.save("Cs2O.4.72SiO2_residue.csdf") # save the residuals
# %%
# Data Visualization
# ------------------
#
# At this point, we have solved the inverse problem and obtained an optimum
# distribution of the nuclear shielding tensor parameters from the 2D MAF dataset. You
# may use any data visualization and interpretation tool of choice for further
# analysis. In the following sections, we provide minimal visualization and analysis
# to complete the case study.
#
# Visualizing the 3D solution
# '''''''''''''''''''''''''''
# Normalize the solution
f_sol /= f_sol.max()
# Convert the coordinates of the solution, `f_sol`, from Hz to ppm.
[item.to("ppm", "nmr_frequency_ratio") for item in f_sol.dimensions]
# The 3D plot of the solution
plt.figure(figsize=(5, 4.4))
ax = plt.subplot(projection="3d")
plot_3d(ax, f_sol, x_lim=[0, 140], y_lim=[0, 140], z_lim=[-50, -150])
plt.tight_layout()
plt.show()
# %%
# From the 3D plot, we observe two distinct regions: one for the :math:`\text{Q}^4`
# sites and another for the :math:`\text{Q}^3` sites.
# Select the respective regions by using the appropriate array indexing,
Q4_region = f_sol[0:7, 0:7, 8:34]
Q4_region.description = "Q4 region"
Q3_region = f_sol[0:7, 10:22, 14:35]
Q3_region.description = "Q3 region"
# %%
# The plot of the respective regions is shown below.
# Calculate the normalization factor for the 2D contours and 1D projections from the
# original solution, `f_sol`. Use this normalization factor to scale the intensities
# from the sub-regions.
max_2d = [
f_sol.sum(axis=0).max().value,
f_sol.sum(axis=1).max().value,
f_sol.sum(axis=2).max().value,
]
max_1d = [
f_sol.sum(axis=(1, 2)).max().value,
f_sol.sum(axis=(0, 2)).max().value,
f_sol.sum(axis=(0, 1)).max().value,
]
plt.figure(figsize=(5, 4.4))
ax = plt.subplot(projection="3d")
# plot for the Q4 region
plot_3d(
ax,
Q4_region,
x_lim=[0, 140], # the x-limit
y_lim=[0, 140], # the y-limit
z_lim=[-50, -150], # the z-limit
max_2d=max_2d, # normalization factors for the 2D contours projections
max_1d=max_1d, # normalization factors for the 1D projections
cmap=cm.Reds_r, # colormap
box=True, # draw a box around the region
)
# plot for the Q3 region
plot_3d(
ax,
Q3_region,
x_lim=[0, 140], # the x-limit
y_lim=[0, 140], # the y-limit
z_lim=[-50, -150], # the z-limit
max_2d=max_2d, # normalization factors for the 2D contours projections
max_1d=max_1d, # normalization factors for the 1D projections
cmap=cm.Blues_r, # colormap
box=True, # draw a box around the region
)
ax.legend()
plt.tight_layout()
plt.show()
# %%
# Visualizing the isotropic projections.
# ''''''''''''''''''''''''''''''''''''''
#
# Because the :math:`\text{Q}^4` and :math:`\text{Q}^3` regions are fully resolved
# after the inversion, evaluating the contributions from these regions is trivial.
# For examples, the distribution of the isotropic chemical shifts for these regions are
# Isotropic chemical shift projection of the 2D MAF dataset.
data_iso = data_object_truncated.sum(axis=0)
data_iso /= data_iso.max() # normalize the projection
# Isotropic chemical shift projection of the tensor distribution dataset.
f_sol_iso = f_sol.sum(axis=(0, 1))
# Isotropic chemical shift projection of the tensor distribution for the Q4 region.
Q4_region_iso = Q4_region.sum(axis=(0, 1))
# Isotropic chemical shift projection of the tensor distribution for the Q3 region.
Q3_region_iso = Q3_region.sum(axis=(0, 1))
# Normalize the three projections.
f_sol_iso_max = f_sol_iso.max()
f_sol_iso /= f_sol_iso_max
Q4_region_iso /= f_sol_iso_max
Q3_region_iso /= f_sol_iso_max
# The plot of the different projections.
plt.figure(figsize=(5.5, 3.5))
ax = plt.subplot(projection="csdm")
ax.plot(f_sol_iso, "--k", label="tensor")
ax.plot(Q4_region_iso, "r", label="Q4")
ax.plot(Q3_region_iso, "b", label="Q3")
ax.plot(data_iso, "-k", label="MAF")
ax.plot(data_iso - f_sol_iso - 0.1, "gray", label="residuals")
ax.set_title("Isotropic projection")
ax.invert_xaxis()
plt.legend()
plt.tight_layout()
plt.show()
# %%
# Notice the skew in the isotropic chemical shift distribution for the
# :math:`\text{Q}^4` regions, which is expected.
#
# Analysis
# --------
#
# For the analysis, we use the
# `statistics <https://csdmpy.readthedocs.io/en/latest/api/statistics.html>`_
# module of the csdmpy package. Following is the moment analysis of the 3D volumes for
# both the :math:`\text{Q}^4` and :math:`\text{Q}^3` regions up to the second moment.
int_Q4 = stats.integral(Q4_region) # volume of the Q4 distribution
mean_Q4 = stats.mean(Q4_region) # mean of the Q4 distribution
std_Q4 = stats.std(Q4_region) # standard deviation of the Q4 distribution
int_Q3 = stats.integral(Q3_region) # volume of the Q3 distribution
mean_Q3 = stats.mean(Q3_region) # mean of the Q3 distribution
std_Q3 = stats.std(Q3_region) # standard deviation of the Q3 distribution
print("Q4 statistics")
print(f"\tpopulation = {100 * int_Q4 / (int_Q4 + int_Q3)}%")
print("\tmean\n\t\tx:\t{0}\n\t\ty:\t{1}\n\t\tiso:\t{2}".format(*mean_Q4))
print("\tstandard deviation\n\t\tx:\t{0}\n\t\ty:\t{1}\n\t\tiso:\t{2}".format(*std_Q4))
print("Q3 statistics")
print(f"\tpopulation = {100 * int_Q3 / (int_Q4 + int_Q3)}%")
print("\tmean\n\t\tx:\t{0}\n\t\ty:\t{1}\n\t\tiso:\t{2}".format(*mean_Q3))
print("\tstandard deviation\n\t\tx:\t{0}\n\t\ty:\t{1}\n\t\tiso:\t{2}".format(*std_Q3))
# %%
# The statistics shown above are according to the respective dimensions, that is, the
# `x`, `y`, and the isotropic chemical shifts. To convert the `x` and `y` statistics
# to commonly used :math:`\zeta_\sigma` and :math:`\eta_\sigma` statistics, use the
# :func:`~mrinversion.kernel.utils.x_y_to_zeta_eta` function.
mean_ζη_Q3 = x_y_to_zeta_eta(*mean_Q3[0:2])
# error propagation for calculating the standard deviation
std_ζ = (std_Q3[0] * mean_Q3[0]) ** 2 + (std_Q3[1] * mean_Q3[1]) ** 2
std_ζ /= mean_Q3[0] ** 2 + mean_Q3[1] ** 2
std_ζ = np.sqrt(std_ζ)
std_η = (std_Q3[1] * mean_Q3[0]) ** 2 + (std_Q3[0] * mean_Q3[1]) ** 2
std_η /= (mean_Q3[0] ** 2 + mean_Q3[1] ** 2) ** 2
std_η = (4 / np.pi) * np.sqrt(std_η)
print("Q3 statistics")
print(f"\tpopulation = {100 * int_Q3 / (int_Q4 + int_Q3)}%")
print("\tmean\n\t\tζ:\t{0}\n\t\tη:\t{1}\n\t\tiso:\t{2}".format(*mean_ζη_Q3, mean_Q3[2]))
print(
"\tstandard deviation\n\t\tζ:\t{0}\n\t\tη:\t{1}\n\t\tiso:\t{2}".format(
std_ζ, std_η, std_Q3[2]
)
)
# %%
# Result cross-verification
# -------------------------
#
# The reported value for the Qn-species distribution from Baltisberger `et. al.` [#f1]_
# is listed below and is consistent with the above result.
#
# .. list-table::
# :widths: 7 15 28 25 25
# :header-rows: 1
#
# * - Species
# - Yield
# - Isotropic chemical shift, :math:`\delta_\text{iso}`
# - Shielding anisotropy, :math:`\zeta_\sigma`:
# - Shielding asymmetry, :math:`\eta_\sigma`:
#
# * - Q4
# - :math:`57.7 \pm 0.4` %
# - :math:`-104.7 \pm 5.2` ppm
# - 0 ppm (fixed)
# - 0 (fixed)
#
# * - Q3
# - :math:`42.3 \pm 0.4` %
# - :math:`-96.1 \pm 4.0` ppm
# - 89.0 ppm
# - 0 (fixed)
#
# %%
# Convert the 3D tensor distribution in Haeberlen parameters
# ----------------------------------------------------------
# You may re-bin the 3D tensor parameter distribution from a
# :math:`\rho(\delta_\text{iso}, x, y)` distribution to
# :math:`\rho(\delta_\text{iso}, \zeta_\sigma, \eta_\sigma)` distribution as follows.
# Create the zeta and eta dimensions,, as shown below.
zeta = cp.as_dimension(np.arange(40) * 4 - 40, unit="ppm", label="zeta")
eta = cp.as_dimension(np.arange(16) / 15, label="eta")
# Use the `to_Haeberlen_grid` function to convert the tensor parameter distribution.
fsol_Hae = to_Haeberlen_grid(f_sol, zeta, eta)
# %%
# The 3D plot
# '''''''''''
plt.figure(figsize=(5, 4.4))
ax = plt.subplot(projection="3d")
plot_3d(ax, fsol_Hae, x_lim=[0, 1], y_lim=[-40, 120], z_lim=[-50, -150], alpha=0.2)
plt.tight_layout()
plt.show()
# %%
# References
# ----------
#
# .. [#f1] <NAME>., <NAME>., <NAME>., <NAME>.,
# <NAME>. Cluster formation of network-modifier cations in cesium
# silicate glasses, J. Chem. Phys. **148**, 094502, (2018).
# `doi:10.1063/1.5020986 <https://doi.org/10.1063/1.5020986>`_
| [
"matplotlib.pyplot.subplot",
"csdmpy.load",
"matplotlib.pyplot.show",
"csdmpy.statistics.std",
"matplotlib.pyplot.legend",
"mrinversion.linear_model.SmoothLasso",
"mrinversion.kernel.utils.x_y_to_zeta_eta",
"mrinversion.utils.to_Haeberlen_grid",
"csdmpy.statistics.integral",
"matplotlib.pyplot.fig... | [((1583, 1600), 'csdmpy.load', 'cp.load', (['filename'], {}), '(filename)\n', (1590, 1600), True, 'import csdmpy as cp\n'), ((4138, 4372), 'mrinversion.kernel.nmr.ShieldingPALineshape', 'ShieldingPALineshape', ([], {'anisotropic_dimension': 'anisotropic_dimension', 'inverse_dimension': 'inverse_dimensions', 'channel': '"""29Si"""', 'magnetic_flux_density': '"""9.4 T"""', 'rotor_angle': '"""87.14°"""', 'rotor_frequency': '"""14 kHz"""', 'number_of_sidebands': '(4)'}), "(anisotropic_dimension=anisotropic_dimension,\n inverse_dimension=inverse_dimensions, channel='29Si',\n magnetic_flux_density='9.4 T', rotor_angle='87.14°', rotor_frequency=\n '14 kHz', number_of_sidebands=4)\n", (4158, 4372), False, 'from mrinversion.kernel.nmr import ShieldingPALineshape\n'), ((6061, 6102), 'mrinversion.linear_model.TSVDCompression', 'TSVDCompression', (['K', 'data_object_truncated'], {}), '(K, data_object_truncated)\n', (6076, 6102), False, 'from mrinversion.linear_model import SmoothLasso, TSVDCompression\n'), ((8261, 8349), 'mrinversion.linear_model.SmoothLasso', 'SmoothLasso', ([], {'alpha': '(8.34e-07)', 'lambda1': '(6.16e-07)', 'inverse_dimension': 'inverse_dimensions'}), '(alpha=8.34e-07, lambda1=6.16e-07, inverse_dimension=\n inverse_dimensions)\n', (8272, 8349), False, 'from mrinversion.linear_model import SmoothLasso, TSVDCompression\n'), ((10148, 10176), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 4.4)'}), '(figsize=(5, 4.4))\n', (10158, 10176), True, 'import matplotlib.pyplot as plt\n'), ((10182, 10210), 'matplotlib.pyplot.subplot', 'plt.subplot', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (10193, 10210), True, 'import matplotlib.pyplot as plt\n'), ((10211, 10280), 'mrinversion.utils.plot_3d', 'plot_3d', (['ax', 'f_sol'], {'x_lim': '[0, 140]', 'y_lim': '[0, 140]', 'z_lim': '[-50, -150]'}), '(ax, f_sol, x_lim=[0, 140], y_lim=[0, 140], z_lim=[-50, -150])\n', (10218, 10280), False, 'from mrinversion.utils import plot_3d, to_Haeberlen_grid\n'), ((10281, 10299), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10297, 10299), True, 'import matplotlib.pyplot as plt\n'), ((10300, 10310), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10308, 10310), True, 'import matplotlib.pyplot as plt\n'), ((11179, 11207), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 4.4)'}), '(figsize=(5, 4.4))\n', (11189, 11207), True, 'import matplotlib.pyplot as plt\n'), ((11213, 11241), 'matplotlib.pyplot.subplot', 'plt.subplot', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (11224, 11241), True, 'import matplotlib.pyplot as plt\n'), ((11268, 11401), 'mrinversion.utils.plot_3d', 'plot_3d', (['ax', 'Q4_region'], {'x_lim': '[0, 140]', 'y_lim': '[0, 140]', 'z_lim': '[-50, -150]', 'max_2d': 'max_2d', 'max_1d': 'max_1d', 'cmap': 'cm.Reds_r', 'box': '(True)'}), '(ax, Q4_region, x_lim=[0, 140], y_lim=[0, 140], z_lim=[-50, -150],\n max_2d=max_2d, max_1d=max_1d, cmap=cm.Reds_r, box=True)\n', (11275, 11401), False, 'from mrinversion.utils import plot_3d, to_Haeberlen_grid\n'), ((11656, 11790), 'mrinversion.utils.plot_3d', 'plot_3d', (['ax', 'Q3_region'], {'x_lim': '[0, 140]', 'y_lim': '[0, 140]', 'z_lim': '[-50, -150]', 'max_2d': 'max_2d', 'max_1d': 'max_1d', 'cmap': 'cm.Blues_r', 'box': '(True)'}), '(ax, Q3_region, x_lim=[0, 140], y_lim=[0, 140], z_lim=[-50, -150],\n max_2d=max_2d, max_1d=max_1d, cmap=cm.Blues_r, box=True)\n', (11663, 11790), False, 'from mrinversion.utils import plot_3d, to_Haeberlen_grid\n'), ((12032, 12050), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (12048, 12050), True, 'import matplotlib.pyplot as plt\n'), ((12051, 12061), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12059, 12061), True, 'import matplotlib.pyplot as plt\n'), ((13133, 13163), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5.5, 3.5)'}), '(figsize=(5.5, 3.5))\n', (13143, 13163), True, 'import matplotlib.pyplot as plt\n'), ((13169, 13199), 'matplotlib.pyplot.subplot', 'plt.subplot', ([], {'projection': '"""csdm"""'}), "(projection='csdm')\n", (13180, 13199), True, 'import matplotlib.pyplot as plt\n'), ((13477, 13489), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (13487, 13489), True, 'import matplotlib.pyplot as plt\n'), ((13490, 13508), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (13506, 13508), True, 'import matplotlib.pyplot as plt\n'), ((13509, 13519), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13517, 13519), True, 'import matplotlib.pyplot as plt\n'), ((13964, 13989), 'csdmpy.statistics.integral', 'stats.integral', (['Q4_region'], {}), '(Q4_region)\n', (13978, 13989), True, 'from csdmpy import statistics as stats\n'), ((14033, 14054), 'csdmpy.statistics.mean', 'stats.mean', (['Q4_region'], {}), '(Q4_region)\n', (14043, 14054), True, 'from csdmpy import statistics as stats\n'), ((14095, 14115), 'csdmpy.statistics.std', 'stats.std', (['Q4_region'], {}), '(Q4_region)\n', (14104, 14115), True, 'from csdmpy import statistics as stats\n'), ((14171, 14196), 'csdmpy.statistics.integral', 'stats.integral', (['Q3_region'], {}), '(Q3_region)\n', (14185, 14196), True, 'from csdmpy import statistics as stats\n'), ((14240, 14261), 'csdmpy.statistics.mean', 'stats.mean', (['Q3_region'], {}), '(Q3_region)\n', (14250, 14261), True, 'from csdmpy import statistics as stats\n'), ((14302, 14322), 'csdmpy.statistics.std', 'stats.std', (['Q3_region'], {}), '(Q3_region)\n', (14311, 14322), True, 'from csdmpy import statistics as stats\n'), ((15198, 15228), 'mrinversion.kernel.utils.x_y_to_zeta_eta', 'x_y_to_zeta_eta', (['*mean_Q3[0:2]'], {}), '(*mean_Q3[0:2])\n', (15213, 15228), False, 'from mrinversion.kernel.utils import x_y_to_zeta_eta\n'), ((15409, 15423), 'numpy.sqrt', 'np.sqrt', (['std_ζ'], {}), '(std_ζ)\n', (15416, 15423), True, 'import numpy as np\n'), ((17213, 17248), 'mrinversion.utils.to_Haeberlen_grid', 'to_Haeberlen_grid', (['f_sol', 'zeta', 'eta'], {}), '(f_sol, zeta, eta)\n', (17230, 17248), False, 'from mrinversion.utils import plot_3d, to_Haeberlen_grid\n'), ((17283, 17311), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 4.4)'}), '(figsize=(5, 4.4))\n', (17293, 17311), True, 'import matplotlib.pyplot as plt\n'), ((17317, 17345), 'matplotlib.pyplot.subplot', 'plt.subplot', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (17328, 17345), True, 'import matplotlib.pyplot as plt\n'), ((17346, 17433), 'mrinversion.utils.plot_3d', 'plot_3d', (['ax', 'fsol_Hae'], {'x_lim': '[0, 1]', 'y_lim': '[-40, 120]', 'z_lim': '[-50, -150]', 'alpha': '(0.2)'}), '(ax, fsol_Hae, x_lim=[0, 1], y_lim=[-40, 120], z_lim=[-50, -150],\n alpha=0.2)\n', (17353, 17433), False, 'from mrinversion.utils import plot_3d, to_Haeberlen_grid\n'), ((17430, 17448), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (17446, 17448), True, 'import matplotlib.pyplot as plt\n'), ((17449, 17459), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17457, 17459), True, 'import matplotlib.pyplot as plt\n'), ((1073, 1103), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4.5, 3.5)'}), '(figsize=(4.5, 3.5))\n', (1083, 1103), True, 'import matplotlib.pyplot as plt\n'), ((1113, 1143), 'matplotlib.pyplot.subplot', 'plt.subplot', ([], {'projection': '"""csdm"""'}), "(projection='csdm')\n", (1124, 1143), True, 'import matplotlib.pyplot as plt\n'), ((1264, 1282), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1280, 1282), True, 'import matplotlib.pyplot as plt\n'), ((1287, 1297), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1295, 1297), True, 'import matplotlib.pyplot as plt\n'), ((3675, 3734), 'csdmpy.LinearDimension', 'cp.LinearDimension', ([], {'count': '(25)', 'increment': '"""450 Hz"""', 'label': '"""x"""'}), "(count=25, increment='450 Hz', label='x')\n", (3693, 3734), True, 'import csdmpy as cp\n'), ((3761, 3820), 'csdmpy.LinearDimension', 'cp.LinearDimension', ([], {'count': '(25)', 'increment': '"""450 Hz"""', 'label': '"""y"""'}), "(count=25, increment='450 Hz', label='y')\n", (3779, 3820), True, 'import csdmpy as cp\n'), ((15567, 15581), 'numpy.sqrt', 'np.sqrt', (['std_η'], {}), '(std_η)\n', (15574, 15581), True, 'import numpy as np\n'), ((17083, 17096), 'numpy.arange', 'np.arange', (['(16)'], {}), '(16)\n', (17092, 17096), True, 'import numpy as np\n'), ((17011, 17024), 'numpy.arange', 'np.arange', (['(40)'], {}), '(40)\n', (17020, 17024), True, 'import numpy as np\n')] |
import numpy as np
import dask.array as da
import xarray as xr
from xrspatial.utils import ArrayTypeFunctionMapping
def general_output_checks(input_agg: xr.DataArray,
output_agg: xr.DataArray,
expected_results: np.ndarray = None,
verify_attrs: bool = True):
# type of output is the same as of input
assert isinstance(output_agg.data, type(input_agg.data))
if isinstance(input_agg.data, da.Array):
# dask case
assert isinstance(
output_agg.data.compute(), type(input_agg.data.compute()))
if verify_attrs:
# shape and other attributes remain the same
assert output_agg.shape == input_agg.shape
assert output_agg.dims == input_agg.dims
assert output_agg.attrs == input_agg.attrs
for coord in input_agg.coords:
np.testing.assert_allclose(
output_agg[coord].data, input_agg[coord].data, equal_nan=True
)
if expected_results is not None:
numpy_func = lambda output, expected: np.testing.assert_allclose( # noqa: E731, E501
output, expected_results, equal_nan=True, rtol=1e-06
)
dask_func = lambda output, expected: np.testing.assert_allclose( # noqa: E731, E501
output.compute(), expected_results, equal_nan=True, rtol=1e-06
)
cupy_func = lambda output, expected: np.testing.assert_allclose( # noqa: E731, E501
output.get(), expected_results, equal_nan=True, rtol=1e-06
)
dask_cupy_func = lambda output, expected: np.testing.assert_allclose( # noqa: E731, E501
output.compute().get(), expected_results,
equal_nan=True, rtol=1e-06
)
mapper = ArrayTypeFunctionMapping(
numpy_func=numpy_func,
dask_func=dask_func,
cupy_func=cupy_func,
dask_cupy_func=dask_cupy_func,
)
mapper(output_agg)(output_agg.data, expected_results)
| [
"numpy.testing.assert_allclose",
"xrspatial.utils.ArrayTypeFunctionMapping"
] | [((1828, 1952), 'xrspatial.utils.ArrayTypeFunctionMapping', 'ArrayTypeFunctionMapping', ([], {'numpy_func': 'numpy_func', 'dask_func': 'dask_func', 'cupy_func': 'cupy_func', 'dask_cupy_func': 'dask_cupy_func'}), '(numpy_func=numpy_func, dask_func=dask_func,\n cupy_func=cupy_func, dask_cupy_func=dask_cupy_func)\n', (1852, 1952), False, 'from xrspatial.utils import ArrayTypeFunctionMapping\n'), ((913, 1006), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['output_agg[coord].data', 'input_agg[coord].data'], {'equal_nan': '(True)'}), '(output_agg[coord].data, input_agg[coord].data,\n equal_nan=True)\n', (939, 1006), True, 'import numpy as np\n'), ((1122, 1207), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['output', 'expected_results'], {'equal_nan': '(True)', 'rtol': '(1e-06)'}), '(output, expected_results, equal_nan=True, rtol=1e-06\n )\n', (1148, 1207), True, 'import numpy as np\n')] |
#!/usr/bin/python3.6
import numpy as np
from domains import UnitDisk
from dynamics import NVortexProblem
from plot import VortexPlot
from utils import star_configuration
if __name__ == '__main__':
domain = UnitDisk()
problem = NVortexProblem(domain, Tmax=16)
x0, Gamma = star_configuration([-1/8], [.1], 3)
x1, Gamma1 = star_configuration([-1/8], [-.1], 3)
x1 = np.array([x1[j:j+2] @ [[1/2, -1/2*np.sqrt(3)], [1/2*np.sqrt(3), 1/2]] for j in range(0, len(x1), 2)]).flatten()
z0 = list(x0) + list(x1)
Gamma = list(Gamma) + list(Gamma1)
app = VortexPlot(problem)
app.animate(z0, Gamma)
| [
"dynamics.NVortexProblem",
"domains.UnitDisk",
"utils.star_configuration",
"plot.VortexPlot",
"numpy.sqrt"
] | [((212, 222), 'domains.UnitDisk', 'UnitDisk', ([], {}), '()\n', (220, 222), False, 'from domains import UnitDisk\n'), ((237, 268), 'dynamics.NVortexProblem', 'NVortexProblem', (['domain'], {'Tmax': '(16)'}), '(domain, Tmax=16)\n', (251, 268), False, 'from dynamics import NVortexProblem\n'), ((285, 323), 'utils.star_configuration', 'star_configuration', (['[-1 / 8]', '[0.1]', '(3)'], {}), '([-1 / 8], [0.1], 3)\n', (303, 323), False, 'from utils import star_configuration\n'), ((338, 377), 'utils.star_configuration', 'star_configuration', (['[-1 / 8]', '[-0.1]', '(3)'], {}), '([-1 / 8], [-0.1], 3)\n', (356, 377), False, 'from utils import star_configuration\n'), ((575, 594), 'plot.VortexPlot', 'VortexPlot', (['problem'], {}), '(problem)\n', (585, 594), False, 'from plot import VortexPlot\n'), ((418, 428), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (425, 428), True, 'import numpy as np\n'), ((436, 446), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (443, 446), True, 'import numpy as np\n')] |
import time
from functools import reduce
from typing import Dict, List, Any, Optional
import numpy as np
import torch
from falkon.hopt.objectives.objectives import HyperoptObjective, FakeTorchModelMixin
from falkon.hopt.optimization.reporting import pred_reporting, EarlyStop, epoch_bookkeeping
__all__ = [
"train_complexity_reg",
"train_complexity_reg_mb",
]
def hp_grad(model: FakeTorchModelMixin, *loss_terms, accumulate_grads=True, verbose=True, losses_are_grads=False):
grads = []
hparams = model.parameters()
if not losses_are_grads:
if verbose:
for loss in loss_terms:
grads.append(torch.autograd.grad(loss, hparams, retain_graph=True, allow_unused=True))
else:
loss = reduce(torch.add, loss_terms)
grads.append(torch.autograd.grad(loss, hparams, retain_graph=False))
else:
grads = loss_terms
if accumulate_grads:
for g in grads:
for i in range(len(hparams)):
hp = hparams[i]
if hp.grad is None:
hp.grad = torch.zeros_like(hp)
if g[i] is not None:
hp.grad += g[i]
return grads
def create_optimizer(opt_type: str, model: HyperoptObjective, learning_rate: float):
center_lr_div = 1
schedule = None
named_params = model.named_parameters()
print("Creating optimizer with the following parameters:")
for k, v in named_params.items():
print(f"\t{k} : {v.shape}")
if opt_type == "adam":
if 'penalty' not in named_params:
opt_modules = [
{"params": named_params.values(), 'lr': learning_rate}
]
else:
opt_modules = []
if 'sigma' in named_params:
opt_modules.append({"params": named_params['sigma'], 'lr': learning_rate})
if 'penalty' in named_params:
opt_modules.append({"params": named_params['penalty'], 'lr': learning_rate})
if 'centers' in named_params:
opt_modules.append({
"params": named_params['centers'], 'lr': learning_rate / center_lr_div})
opt_hp = torch.optim.Adam(opt_modules)
# schedule = torch.optim.lr_scheduler.ReduceLROnPlateau(opt_hp, factor=0.5, patience=1)
# schedule = torch.optim.lr_scheduler.MultiStepLR(opt_hp, [2, 10, 40], gamma=0.5)
schedule = torch.optim.lr_scheduler.StepLR(opt_hp, 200, gamma=0.1)
elif opt_type == "sgd":
opt_hp = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9)
elif opt_type == "lbfgs":
if model.losses_are_grads:
raise ValueError("L-BFGS not valid for model %s" % (model))
opt_hp = torch.optim.LBFGS(model.parameters(), lr=learning_rate,
history_size=100, )
elif opt_type == "rmsprop":
opt_hp = torch.optim.RMSprop(model.parameters(), lr=learning_rate)
else:
raise ValueError("Optimizer type %s not recognized" % (opt_type))
return opt_hp, schedule
def train_complexity_reg(
Xtr: torch.Tensor,
Ytr: torch.Tensor,
Xts: torch.Tensor,
Yts: torch.Tensor,
model: HyperoptObjective,
err_fn,
learning_rate: float,
num_epochs: int,
cuda: bool,
verbose: bool,
loss_every: int,
early_stop_epochs: int,
cgtol_decrease_epochs: Optional[int],
optimizer: str,
retrain_nkrr: bool = False,
) -> List[Dict[str, float]]:
if cuda:
Xtr, Ytr, Xts, Yts = Xtr.cuda(), Ytr.cuda(), Xts.cuda(), Yts.cuda()
opt_hp, schedule = create_optimizer(optimizer, model, learning_rate)
print(f"Starting hyperparameter optimization on model {model}.")
print(f"Will run for {num_epochs} epochs with {opt_hp} optimizer.")
logs = []
cum_time = 0
with torch.autograd.profiler.profile(enabled=False) as prof:
for epoch in range(num_epochs):
t_start = time.time()
grads: Any = None
losses: Any = None
def closure():
opt_hp.zero_grad()
nonlocal grads, losses
losses = model.hp_loss(Xtr, Ytr)
grads = hp_grad(model, *losses, accumulate_grads=True,
losses_are_grads=model.losses_are_grads, verbose=False)
loss = reduce(torch.add, losses)
return float(loss)
try:
opt_hp.step(closure)
except RuntimeError as e:
if "Cholesky" not in str(e):
raise e
print(f"Cholesky failure at epoch {epoch}. Exiting optimization!")
break
cum_time += time.time() - t_start
try:
epoch_bookkeeping(epoch=epoch, model=model, data={'Xtr': Xtr, 'Ytr': Ytr, 'Xts': Xts, 'Yts': Yts},
err_fn=err_fn, grads=grads, losses=losses, loss_every=loss_every,
early_stop_patience=early_stop_epochs, schedule=schedule, minibatch=None,
logs=logs, cum_time=cum_time, verbose=verbose,
accuracy_increase_patience=cgtol_decrease_epochs)
except EarlyStop as e:
print(e)
break
finally:
del grads, losses
torch.cuda.empty_cache()
if prof is not None:
print(prof.key_averages().table())
if retrain_nkrr:
print(f"Final retrain after {num_epochs} epochs:")
pred_dict = pred_reporting(
model=model, Xtr=Xtr, Ytr=Ytr, Xts=Xts, Yts=Yts,
err_fn=err_fn, epoch=num_epochs, cum_time=cum_time,
resolve_model=True)
logs.append(pred_dict)
return logs
def train_complexity_reg_mb(
Xtr: torch.Tensor,
Ytr: torch.Tensor,
Xts: torch.Tensor,
Yts: torch.Tensor,
model: HyperoptObjective,
err_fn,
learning_rate: float,
num_epochs: int,
cuda: bool,
verbose: bool,
loss_every: int,
early_stop_epochs: int,
cgtol_decrease_epochs: Optional[int],
optimizer: str,
minibatch: int,
retrain_nkrr: bool = False,
) -> List[Dict[str, float]]:
Xtrc, Ytrc, Xtsc, Ytsc = Xtr, Ytr, Xts, Yts
if cuda:
Xtrc, Ytrc, Xtsc, Ytsc = Xtr.cuda(), Ytr.cuda(), Xts.cuda(), Yts.cuda()
opt_hp, schedule = create_optimizer(optimizer, model, learning_rate)
print(f"Starting hyperparameter optimization on model {model}.")
print(f"Will run for {num_epochs} epochs with {opt_hp} optimizer, "
f"mini-batch size {minibatch}.")
logs = []
cum_time = 0
mb_indices = np.arange(Xtr.shape[0])
for epoch in range(num_epochs):
t_start = time.time()
np.random.shuffle(mb_indices)
for mb_start in range(0, Xtr.shape[0], minibatch):
Xtr_batch = (Xtr[mb_indices[mb_start: mb_start + minibatch], :]).contiguous()
Ytr_batch = (Ytr[mb_indices[mb_start: mb_start + minibatch], :]).contiguous()
if cuda:
Xtr_batch, Ytr_batch = Xtr_batch.cuda(), Ytr_batch.cuda()
opt_hp.zero_grad()
loss = model.hp_loss(Xtr_batch, Ytr_batch)[0] # There is only one loss!
loss.backward()
opt_hp.step()
cum_time += time.time() - t_start
try:
epoch_bookkeeping(epoch=epoch, model=model, data={'Xtr': Xtrc, 'Ytr': Ytrc, 'Xts': Xtsc, 'Yts': Ytsc},
err_fn=err_fn, grads=None, losses=None, loss_every=loss_every,
early_stop_patience=early_stop_epochs, schedule=schedule, minibatch=minibatch,
logs=logs, cum_time=cum_time, verbose=verbose,
accuracy_increase_patience=cgtol_decrease_epochs)
except EarlyStop as e:
print(e)
break
if retrain_nkrr:
print(f"Final retrain after {num_epochs} epochs:")
pred_dict = pred_reporting(
model=model, Xtr=Xtrc, Ytr=Ytrc, Xts=Xtsc, Yts=Ytsc,
err_fn=err_fn, epoch=num_epochs, cum_time=cum_time,
resolve_model=True)
logs.append(pred_dict)
return logs
| [
"torch.optim.lr_scheduler.StepLR",
"torch.zeros_like",
"torch.autograd.profiler.profile",
"falkon.hopt.optimization.reporting.epoch_bookkeeping",
"torch.autograd.grad",
"falkon.hopt.optimization.reporting.pred_reporting",
"time.time",
"torch.optim.Adam",
"numpy.arange",
"torch.cuda.empty_cache",
... | [((6821, 6844), 'numpy.arange', 'np.arange', (['Xtr.shape[0]'], {}), '(Xtr.shape[0])\n', (6830, 6844), True, 'import numpy as np\n'), ((2196, 2225), 'torch.optim.Adam', 'torch.optim.Adam', (['opt_modules'], {}), '(opt_modules)\n', (2212, 2225), False, 'import torch\n'), ((2431, 2486), 'torch.optim.lr_scheduler.StepLR', 'torch.optim.lr_scheduler.StepLR', (['opt_hp', '(200)'], {'gamma': '(0.1)'}), '(opt_hp, 200, gamma=0.1)\n', (2462, 2486), False, 'import torch\n'), ((3905, 3951), 'torch.autograd.profiler.profile', 'torch.autograd.profiler.profile', ([], {'enabled': '(False)'}), '(enabled=False)\n', (3936, 3951), False, 'import torch\n'), ((5457, 5481), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (5479, 5481), False, 'import torch\n'), ((5650, 5790), 'falkon.hopt.optimization.reporting.pred_reporting', 'pred_reporting', ([], {'model': 'model', 'Xtr': 'Xtr', 'Ytr': 'Ytr', 'Xts': 'Xts', 'Yts': 'Yts', 'err_fn': 'err_fn', 'epoch': 'num_epochs', 'cum_time': 'cum_time', 'resolve_model': '(True)'}), '(model=model, Xtr=Xtr, Ytr=Ytr, Xts=Xts, Yts=Yts, err_fn=\n err_fn, epoch=num_epochs, cum_time=cum_time, resolve_model=True)\n', (5664, 5790), False, 'from falkon.hopt.optimization.reporting import pred_reporting, EarlyStop, epoch_bookkeeping\n'), ((6899, 6910), 'time.time', 'time.time', ([], {}), '()\n', (6908, 6910), False, 'import time\n'), ((6919, 6948), 'numpy.random.shuffle', 'np.random.shuffle', (['mb_indices'], {}), '(mb_indices)\n', (6936, 6948), True, 'import numpy as np\n'), ((8154, 8298), 'falkon.hopt.optimization.reporting.pred_reporting', 'pred_reporting', ([], {'model': 'model', 'Xtr': 'Xtrc', 'Ytr': 'Ytrc', 'Xts': 'Xtsc', 'Yts': 'Ytsc', 'err_fn': 'err_fn', 'epoch': 'num_epochs', 'cum_time': 'cum_time', 'resolve_model': '(True)'}), '(model=model, Xtr=Xtrc, Ytr=Ytrc, Xts=Xtsc, Yts=Ytsc, err_fn=\n err_fn, epoch=num_epochs, cum_time=cum_time, resolve_model=True)\n', (8168, 8298), False, 'from falkon.hopt.optimization.reporting import pred_reporting, EarlyStop, epoch_bookkeeping\n'), ((757, 786), 'functools.reduce', 'reduce', (['torch.add', 'loss_terms'], {}), '(torch.add, loss_terms)\n', (763, 786), False, 'from functools import reduce\n'), ((4023, 4034), 'time.time', 'time.time', ([], {}), '()\n', (4032, 4034), False, 'import time\n'), ((7475, 7486), 'time.time', 'time.time', ([], {}), '()\n', (7484, 7486), False, 'import time\n'), ((7522, 7881), 'falkon.hopt.optimization.reporting.epoch_bookkeeping', 'epoch_bookkeeping', ([], {'epoch': 'epoch', 'model': 'model', 'data': "{'Xtr': Xtrc, 'Ytr': Ytrc, 'Xts': Xtsc, 'Yts': Ytsc}", 'err_fn': 'err_fn', 'grads': 'None', 'losses': 'None', 'loss_every': 'loss_every', 'early_stop_patience': 'early_stop_epochs', 'schedule': 'schedule', 'minibatch': 'minibatch', 'logs': 'logs', 'cum_time': 'cum_time', 'verbose': 'verbose', 'accuracy_increase_patience': 'cgtol_decrease_epochs'}), "(epoch=epoch, model=model, data={'Xtr': Xtrc, 'Ytr': Ytrc,\n 'Xts': Xtsc, 'Yts': Ytsc}, err_fn=err_fn, grads=None, losses=None,\n loss_every=loss_every, early_stop_patience=early_stop_epochs, schedule=\n schedule, minibatch=minibatch, logs=logs, cum_time=cum_time, verbose=\n verbose, accuracy_increase_patience=cgtol_decrease_epochs)\n", (7539, 7881), False, 'from falkon.hopt.optimization.reporting import pred_reporting, EarlyStop, epoch_bookkeeping\n'), ((812, 866), 'torch.autograd.grad', 'torch.autograd.grad', (['loss', 'hparams'], {'retain_graph': '(False)'}), '(loss, hparams, retain_graph=False)\n', (831, 866), False, 'import torch\n'), ((4429, 4454), 'functools.reduce', 'reduce', (['torch.add', 'losses'], {}), '(torch.add, losses)\n', (4435, 4454), False, 'from functools import reduce\n'), ((4785, 4796), 'time.time', 'time.time', ([], {}), '()\n', (4794, 4796), False, 'import time\n'), ((4840, 5192), 'falkon.hopt.optimization.reporting.epoch_bookkeeping', 'epoch_bookkeeping', ([], {'epoch': 'epoch', 'model': 'model', 'data': "{'Xtr': Xtr, 'Ytr': Ytr, 'Xts': Xts, 'Yts': Yts}", 'err_fn': 'err_fn', 'grads': 'grads', 'losses': 'losses', 'loss_every': 'loss_every', 'early_stop_patience': 'early_stop_epochs', 'schedule': 'schedule', 'minibatch': 'None', 'logs': 'logs', 'cum_time': 'cum_time', 'verbose': 'verbose', 'accuracy_increase_patience': 'cgtol_decrease_epochs'}), "(epoch=epoch, model=model, data={'Xtr': Xtr, 'Ytr': Ytr,\n 'Xts': Xts, 'Yts': Yts}, err_fn=err_fn, grads=grads, losses=losses,\n loss_every=loss_every, early_stop_patience=early_stop_epochs, schedule=\n schedule, minibatch=None, logs=logs, cum_time=cum_time, verbose=verbose,\n accuracy_increase_patience=cgtol_decrease_epochs)\n", (4857, 5192), False, 'from falkon.hopt.optimization.reporting import pred_reporting, EarlyStop, epoch_bookkeeping\n'), ((650, 722), 'torch.autograd.grad', 'torch.autograd.grad', (['loss', 'hparams'], {'retain_graph': '(True)', 'allow_unused': '(True)'}), '(loss, hparams, retain_graph=True, allow_unused=True)\n', (669, 722), False, 'import torch\n'), ((1095, 1115), 'torch.zeros_like', 'torch.zeros_like', (['hp'], {}), '(hp)\n', (1111, 1115), False, 'import torch\n')] |
# CGD optim source code from:
# https://github.com/devzhk/Implicit-Competitive-Regularization
import random
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.autograd as autograd
from scipy.stats import gaussian_kde
import torch.nn.functional as F
import matplotlib.pyplot as plt
import torch.optim as optim
from torch.nn.utils import parameters_to_vector
import math
import os
import warnings
import time
random.seed(1234)
np.random.seed(1234)
torch.manual_seed(1234)
torch.cuda.manual_seed_all(1234)
update_rule = 'adam_lsd'
dis_iter = 1
_batch_size = 256
dim = 2000
use_cuda = True
z_dim = 64
iterations = 100
lr = 3e-4
beta = 0.55
alpha = 0.6
def _check_param_device(param, old_param_device):
if old_param_device is None:
old_param_device = param.get_device() if param.is_cuda else -1
else:
warn = False
if param.is_cuda: # Check if in same GPU
warn = (param.get_device() != old_param_device)
else: # Check if in CPU
warn = (old_param_device != -1)
if warn:
raise TypeError('Found two parameters on different devices, '
'this is currently not supported.')
return old_param_device
def parameters_grad_to_vector(parameters):
param_device = None
vec = []
for param in parameters:
param_device = _check_param_device(param, param_device)
vec.append(param.grad.view(-1))
return torch.cat(vec)
def conjugate_gradient(grad_x, grad_y,
x_params, y_params,
b, x=None, nsteps=10,
tol=1e-10, atol=1e-16,
lr_x=1.0, lr_y=1.0,
device=torch.device('cpu')):
if x is None:
x = torch.zeros(b.shape[0], device=device)
r = b.clone().detach()
else:
h1 = Hvp_vec(grad_vec=grad_x, params=y_params, vec=x, retain_graph=True).detach_().mul(lr_y)
h2 = Hvp_vec(grad_vec=grad_y, params=x_params, vec=h1, retain_graph=True).detach_().mul(lr_x)
Avx = x + h2
r = b.clone().detach() - Avx
p = r.clone().detach()
rdotr = torch.dot(r, r)
residual_tol = tol * rdotr
for i in range(nsteps):
# To compute Avp
h_1 = Hvp_vec(grad_vec=grad_x, params=y_params, vec=p, retain_graph=True).detach_().mul(lr_y)
h_2 = Hvp_vec(grad_vec=grad_y, params=x_params, vec=h_1, retain_graph=True).detach_().mul(lr_x)
Avp_ = p + h_2
alpha = rdotr / torch.dot(p, Avp_)
x.data.add_(alpha * p)
r.data.add_(- alpha * Avp_)
new_rdotr = torch.dot(r, r)
beta = new_rdotr / rdotr
p = r + beta * p
rdotr = new_rdotr
if rdotr < residual_tol or rdotr < atol:
break
if i > 99:
warnings.warn('CG iter num: %d' % (i + 1))
return x, i + 1
def Hvp_vec(grad_vec, params, vec, retain_graph=False):
'''
return Hessian vector product
'''
if torch.isnan(grad_vec).any():
raise ValueError('Gradvec nan')
if torch.isnan(vec).any():
raise ValueError('vector nan')
# zero padding for None
grad_grad = autograd.grad(grad_vec, params, grad_outputs=vec,
retain_graph=retain_graph,
allow_unused=True)
grad_list = []
for i, p in enumerate(params):
if grad_grad[i] is None:
grad_list.append(torch.zeros_like(p).view(-1))
else:
grad_list.append(grad_grad[i].contiguous().view(-1))
hvp = torch.cat(grad_list)
if torch.isnan(hvp).any():
raise ValueError('hvp Nan')
return hvp
def general_conjugate_gradient(grad_x, grad_y,
x_params, y_params, b,
lr_x, lr_y, x=None, nsteps=None,
tol=1e-12, atol=1e-20,
device=torch.device('cpu')):
lr_x = lr_x.sqrt()
if x is None:
x = torch.zeros(b.shape[0], device=device)
r = b.clone().detach()
else:
h1 = Hvp_vec(grad_vec=grad_x, params=y_params, vec=lr_x * x, retain_graph=True).mul_(lr_y)
h2 = Hvp_vec(grad_vec=grad_y, params=x_params, vec=h1, retain_graph=True).mul_(lr_x)
Avx = x + h2
r = b.clone().detach() - Avx
if nsteps is None:
nsteps = b.shape[0]
if grad_x.shape != b.shape:
raise RuntimeError('CG: hessian vector product shape mismatch')
p = r.clone().detach()
rdotr = torch.dot(r, r)
residual_tol = tol * rdotr
for i in range(nsteps):
# To compute Avp
# h_1 = Hvp_vec(grad_vec=grad_x, params=y_params, vec=lr_x * p, retain_graph=True)
h_1 = Hvp_vec(grad_vec=grad_x, params=y_params, vec=lr_x * p, retain_graph=True).mul_(lr_y)
# h_1.mul_(lr_y)
# lr_y * D_yx * b
# h_2 = Hvp_vec(grad_vec=grad_y, params=x_params, vec=lr_y * h_1, retain_graph=True)
h_2 = Hvp_vec(grad_vec=grad_y, params=x_params, vec=h_1, retain_graph=True).mul_(lr_x)
# h_2.mul_(lr_x)
# lr_x * D_xy * lr_y * D_yx * b
Avp_ = p + h_2
alpha = rdotr / torch.dot(p, Avp_)
x.data.add_(alpha * p)
r.data.add_(- alpha * Avp_)
new_rdotr = torch.dot(r, r)
beta = new_rdotr / rdotr
p = r + beta * p
rdotr = new_rdotr
if rdotr < residual_tol or rdotr < atol:
break
return x, i + 1
def zero_grad(params):
for p in params:
if p.grad is not None:
p.grad.detach()
p.grad.zero_()
class ACGD(object):
def __init__(self, max_params, min_params,
lr_max=1e-3, lr_min=1e-3,
eps=1e-5, beta=0.99,
tol=1e-12, atol=1e-20,
device=torch.device('cpu'),
solve_x=False, collect_info=True):
self.max_params = list(max_params)
self.min_params = list(min_params)
self.state = {'lr_max': lr_max, 'lr_min': lr_min,
'eps': eps, 'solve_x': solve_x,
'tol': tol, 'atol': atol,
'beta': beta, 'step': 0,
'old_max': None, 'old_min': None, # start point of CG
'sq_exp_avg_max': None, 'sq_exp_avg_min': None} # save last update
self.info = {'grad_x': None, 'grad_y': None,
'hvp_x': None, 'hvp_y': None,
'cg_x': None, 'cg_y': None,
'time': 0, 'iter_num': 0}
self.device = device
self.collect_info = collect_info
def zero_grad(self):
zero_grad(self.max_params)
zero_grad(self.min_params)
def get_info(self):
if self.info['grad_x'] is None:
print('Warning! No update information stored. Set collect_info=True before call this method')
return self.info
def state_dict(self):
return self.state
def load_state_dict(self, state_dict):
self.state.update(state_dict)
print('Load state: {}'.format(state_dict))
def set_lr(self, lr_max, lr_min):
self.state.update({'lr_max': lr_max, 'lr_min': lr_min})
print('Maximizing side learning rate: {:.4f}\n '
'Minimizing side learning rate: {:.4f}'.format(lr_max, lr_min))
def step(self, loss):
lr_max = self.state['lr_max']
lr_min = self.state['lr_min']
beta = self.state['beta']
eps = self.state['eps']
tol = self.state['tol']
atol = self.state['atol']
time_step = self.state['step'] + 1
self.state['step'] = time_step
grad_x = autograd.grad(loss, self.max_params, create_graph=True, retain_graph=True)
grad_x_vec = torch.cat([g.contiguous().view(-1) for g in grad_x])
grad_y = autograd.grad(loss, self.min_params, create_graph=True, retain_graph=True)
grad_y_vec = torch.cat([g.contiguous().view(-1) for g in grad_y])
grad_x_vec_d = grad_x_vec.clone().detach()
grad_y_vec_d = grad_y_vec.clone().detach()
sq_avg_x = self.state['sq_exp_avg_max']
sq_avg_y = self.state['sq_exp_avg_min']
sq_avg_x = torch.zeros_like(grad_x_vec_d, requires_grad=False) if sq_avg_x is None else sq_avg_x
sq_avg_y = torch.zeros_like(grad_y_vec_d, requires_grad=False) if sq_avg_y is None else sq_avg_y
sq_avg_x.mul_(beta).addcmul_(1 - beta, grad_x_vec_d, grad_x_vec_d)
sq_avg_y.mul_(beta).addcmul_(1 - beta, grad_y_vec_d, grad_y_vec_d)
bias_correction = 1 - beta ** time_step
lr_max = math.sqrt(bias_correction) * lr_max / sq_avg_x.sqrt().add(eps)
lr_min = math.sqrt(bias_correction) * lr_min / sq_avg_y.sqrt().add(eps)
scaled_grad_x = torch.mul(lr_max, grad_x_vec_d)
scaled_grad_y = torch.mul(lr_min, grad_y_vec_d)
hvp_x_vec = Hvp_vec(grad_y_vec, self.max_params, scaled_grad_y,
retain_graph=True) # h_xy * d_y
hvp_y_vec = Hvp_vec(grad_x_vec, self.min_params, scaled_grad_x,
retain_graph=True) # h_yx * d_x
p_x = torch.add(grad_x_vec_d, - hvp_x_vec)
p_y = torch.add(grad_y_vec_d, hvp_y_vec)
if self.collect_info:
norm_px = torch.norm(hvp_x_vec, p=2).item()
norm_py = torch.norm(hvp_y_vec, p=2).item()
timer = time.time()
if self.state['solve_x']:
p_y.mul_(lr_min.sqrt())
cg_y, iter_num = general_conjugate_gradient(grad_x=grad_y_vec, grad_y=grad_x_vec,
x_params=self.min_params, y_params=self.max_params,
b=p_y, x=self.state['old_min'],
tol=tol, atol=atol,
lr_x=lr_min, lr_y=lr_max, device=self.device)
old_min = cg_y.detach_()
min_update = cg_y.mul(- lr_min.sqrt())
hcg = Hvp_vec(grad_y_vec, self.max_params, min_update).detach_()
hcg.add_(grad_x_vec_d)
max_update = hcg.mul(lr_max)
old_max = hcg.mul(lr_max.sqrt())
else:
p_x.mul_(lr_max.sqrt())
cg_x, iter_num = general_conjugate_gradient(grad_x=grad_x_vec, grad_y=grad_y_vec,
x_params=self.max_params, y_params=self.min_params,
b=p_x, x=self.state['old_max'],
tol=tol, atol=atol,
lr_x=lr_max, lr_y=lr_min, device=self.device)
old_max = cg_x.detach_()
max_update = cg_x.mul(lr_max.sqrt())
hcg = Hvp_vec(grad_x_vec, self.min_params, max_update).detach_()
hcg.add_(grad_y_vec_d)
min_update = hcg.mul(- lr_min)
old_min = hcg.mul(lr_min.sqrt())
self.state.update({'old_max': old_max, 'old_min': old_min,
'sq_exp_avg_max': sq_avg_x, 'sq_exp_avg_min': sq_avg_y})
if self.collect_info:
timer = time.time() - timer
self.info.update({'time': timer, 'iter_num': iter_num,
'hvp_x': norm_px, 'hvp_y': norm_py})
index = 0
for p in self.max_params:
p.data.add_(max_update[index: index + p.numel()].reshape(p.shape))
index += p.numel()
assert index == max_update.numel(), 'Maximizer CG size mismatch'
index = 0
for p in self.min_params:
p.data.add_(min_update[index: index + p.numel()].reshape(p.shape))
index += p.numel()
assert index == min_update.numel(), 'Minimizer CG size mismatch'
if self.collect_info:
norm_gx = torch.norm(grad_x_vec, p=2).item()
norm_gy = torch.norm(grad_y_vec, p=2).item()
norm_cgx = torch.norm(max_update, p=2).item()
norm_cgy = torch.norm(min_update, p=2).item()
self.info.update({'grad_x': norm_gx, 'grad_y': norm_gy,
'cg_x': norm_cgx, 'cg_y': norm_cgy})
self.state['solve_x'] = False if self.state['solve_x'] else True
class Gen(nn.Module):
def __init__(self):
super(Gen, self).__init__()
main = nn.Sequential(
nn.Linear(z_dim, dim),
nn.ReLU(True),
nn.Linear(dim, dim),
nn.ReLU(True),
nn.Linear(dim, dim),
nn.ReLU(True),
nn.Linear(dim, 2),
)
self.main = main
def forward(self, noise):
output = self.main(noise)
return output
class Dis(nn.Module):
def __init__(self):
super(Dis, self).__init__()
main = nn.Sequential(
nn.Linear(2, dim),
nn.ReLU(True),
nn.Linear(dim, dim),
nn.ReLU(True),
nn.Linear(dim, dim),
nn.ReLU(True),
nn.Linear(dim, 1),
)
self.main = main
def forward(self, inputs):
output = self.main(inputs)
return output.view(-1)
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def get_8gaussians(batch_size):
scale = 2.
centers = [
(1, 0),
(-1, 0),
(0, 1),
(0, -1),
(1. / np.sqrt(2), 1. / np.sqrt(2)),
(1. / np.sqrt(2), -1. / np.sqrt(2)),
(-1. / np.sqrt(2), 1. / np.sqrt(2)),
(-1. / np.sqrt(2), -1. / np.sqrt(2))
]
centers = [(scale * x, scale * y) for x, y in centers]
while True:
dataset = []
for i in range(batch_size):
point = np.random.randn(2) * .05
center = random.choice(centers)
point[0] += center[0]
point[1] += center[1]
dataset.append(point)
dataset = np.array(dataset, dtype='float32')
dataset /= 1.414
out = Variable(torch.Tensor(dataset))
if use_cuda:
out = out.cuda()
yield out
def get_dens_real(batch_size):
data = get_8gaussians(batch_size).__next__()
real = np.array(data.data.cpu())
kde_real = gaussian_kde(real.T, bw_method=0.22)
x, y = np.mgrid[-2:2:(200 * 1j), -2:2:(200 * 1j)]
z_real = kde_real((x.ravel(), y.ravel())).reshape(*x.shape)
return z_real
z_real = get_dens_real(1000)
def plot(fake, epoch, name):
plt.figure(figsize=(20, 9))
fake = np.array(fake.data.cpu())
kde_fake = gaussian_kde(fake.T, bw_method=0.22)
x, y = np.mgrid[-2:2:(200 * 1j), -2:2:(200 * 1j)]
z_fake = kde_fake((x.ravel(), y.ravel())).reshape(*x.shape)
ax1 = plt.subplot(1, 2, 1)
ax1.pcolor(x, y, z_real, cmap='GnBu')
ax2 = plt.subplot(1, 2, 2)
ax2.pcolor(x, y, z_fake, cmap='GnBu')
ax1.scatter(real.data.cpu().numpy()[:, 0],
real.data.cpu().numpy()[:, 1])
ax2.scatter(fake[:, 0], fake[:, 1])
# plt.show()
if not os.path.exists('8_G_res/_' + name):
os.makedirs('8_G_res/_' + name)
plt.savefig('8_G_res/_' + name + '/' + str(epoch) + '.png')
plt.close()
dis = Dis()
gen = Gen()
dis.apply(weights_init)
gen.apply(weights_init)
if use_cuda:
dis = dis.cuda()
gen = gen.cuda()
if use_cuda:
device = torch.device('cuda')
else:
device = torch.device('cpu')
optimizer = ACGD(max_params=gen.parameters(), min_params=dis.parameters(),
device=device, solve_x=False, collect_info=False)
one = torch.FloatTensor([1])
mone = one * -1
if use_cuda:
one = one.cuda()
mone = mone.cuda()
dataset = get_8gaussians(_batch_size)
criterion = nn.BCEWithLogitsLoss()
ones = Variable(torch.ones(_batch_size))
zeros = Variable(torch.zeros(_batch_size))
if use_cuda:
criterion = criterion.cuda()
ones = ones.cuda()
zeros = zeros.cuda()
points = []
dis_params_flatten = parameters_to_vector(dis.parameters())
gen_params_flatten = parameters_to_vector(gen.parameters())
# just to fill the empty grad buffers
noise = torch.randn(_batch_size, z_dim)
if use_cuda:
noise = noise.cuda()
noise = autograd.Variable(noise)
fake = gen(noise)
pred_fake = criterion(dis(fake), zeros).sum()
(0.0 * pred_fake).backward(create_graph=True)
gen_loss = 0
pred_tot = 0
elapsed_time_list = []
for iteration in range(iterations):
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
start_event.record()
noise = torch.randn(_batch_size, z_dim)
if use_cuda:
noise = noise.cuda()
noise = autograd.Variable(noise)
real = dataset.__next__()
loss_real = criterion(dis(real), ones)
fake = gen(noise)
loss_fake = criterion(dis(fake), zeros)
gradient_penalty = 0
loss_d = loss_real + loss_fake + gradient_penalty
grad_d = torch.autograd.grad(
loss_d, inputs=(dis.parameters()), create_graph=True)
for p, g in zip(dis.parameters(), grad_d):
p.grad = g
optimizer.step(loss_d)
noise = torch.randn(_batch_size, z_dim)
ones = Variable(torch.ones(_batch_size))
zeros = Variable(torch.zeros(_batch_size))
if use_cuda:
noise = noise.cuda()
ones = ones.cuda()
zeros = zeros.cuda()
noise = autograd.Variable(noise)
fake = gen(noise)
loss_g = criterion(dis(fake), ones)
grad_g = torch.autograd.grad(
loss_g, inputs=(gen.parameters()), create_graph=True)
for p, g in zip(gen.parameters(), grad_g):
p.grad = g
optimizer.step(loss_g)
end_event.record()
torch.cuda.synchronize() # Wait for the events to be recorded!
elapsed_time_ms = start_event.elapsed_time(end_event)
if iteration > 3:
elapsed_time_list.append(elapsed_time_ms)
print(elapsed_time_ms)
print("iteration: " + str(iteration))
avg_time = np.mean(elapsed_time_list)
print('avg_time: ' + str(avg_time))
| [
"torch.cuda.synchronize",
"torch.dot",
"numpy.random.seed",
"torch.autograd.grad",
"torch.randn",
"torch.cat",
"matplotlib.pyplot.figure",
"numpy.mean",
"torch.device",
"torch.isnan",
"torch.ones",
"numpy.random.randn",
"matplotlib.pyplot.close",
"torch.FloatTensor",
"os.path.exists",
... | [((460, 477), 'random.seed', 'random.seed', (['(1234)'], {}), '(1234)\n', (471, 477), False, 'import random\n'), ((478, 498), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (492, 498), True, 'import numpy as np\n'), ((499, 522), 'torch.manual_seed', 'torch.manual_seed', (['(1234)'], {}), '(1234)\n', (516, 522), False, 'import torch\n'), ((523, 555), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['(1234)'], {}), '(1234)\n', (549, 555), False, 'import torch\n'), ((15824, 15846), 'torch.FloatTensor', 'torch.FloatTensor', (['[1]'], {}), '([1])\n', (15841, 15846), False, 'import torch\n'), ((15971, 15993), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {}), '()\n', (15991, 15993), True, 'import torch.nn as nn\n'), ((16353, 16384), 'torch.randn', 'torch.randn', (['_batch_size', 'z_dim'], {}), '(_batch_size, z_dim)\n', (16364, 16384), False, 'import torch\n'), ((16431, 16455), 'torch.autograd.Variable', 'autograd.Variable', (['noise'], {}), '(noise)\n', (16448, 16455), True, 'import torch.autograd as autograd\n'), ((18156, 18182), 'numpy.mean', 'np.mean', (['elapsed_time_list'], {}), '(elapsed_time_list)\n', (18163, 18182), True, 'import numpy as np\n'), ((1487, 1501), 'torch.cat', 'torch.cat', (['vec'], {}), '(vec)\n', (1496, 1501), False, 'import torch\n'), ((1750, 1769), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1762, 1769), False, 'import torch\n'), ((2183, 2198), 'torch.dot', 'torch.dot', (['r', 'r'], {}), '(r, r)\n', (2192, 2198), False, 'import torch\n'), ((3200, 3299), 'torch.autograd.grad', 'autograd.grad', (['grad_vec', 'params'], {'grad_outputs': 'vec', 'retain_graph': 'retain_graph', 'allow_unused': '(True)'}), '(grad_vec, params, grad_outputs=vec, retain_graph=retain_graph,\n allow_unused=True)\n', (3213, 3299), True, 'import torch.autograd as autograd\n'), ((3591, 3611), 'torch.cat', 'torch.cat', (['grad_list'], {}), '(grad_list)\n', (3600, 3611), False, 'import torch\n'), ((3953, 3972), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (3965, 3972), False, 'import torch\n'), ((4554, 4569), 'torch.dot', 'torch.dot', (['r', 'r'], {}), '(r, r)\n', (4563, 4569), False, 'import torch\n'), ((14517, 14553), 'scipy.stats.gaussian_kde', 'gaussian_kde', (['real.T'], {'bw_method': '(0.22)'}), '(real.T, bw_method=0.22)\n', (14529, 14553), False, 'from scipy.stats import gaussian_kde\n'), ((14755, 14782), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 9)'}), '(figsize=(20, 9))\n', (14765, 14782), True, 'import matplotlib.pyplot as plt\n'), ((14835, 14871), 'scipy.stats.gaussian_kde', 'gaussian_kde', (['fake.T'], {'bw_method': '(0.22)'}), '(fake.T, bw_method=0.22)\n', (14847, 14871), False, 'from scipy.stats import gaussian_kde\n'), ((15002, 15022), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (15013, 15022), True, 'import matplotlib.pyplot as plt\n'), ((15076, 15096), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (15087, 15096), True, 'import matplotlib.pyplot as plt\n'), ((15445, 15456), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (15454, 15456), True, 'import matplotlib.pyplot as plt\n'), ((15614, 15634), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (15626, 15634), False, 'import torch\n'), ((15654, 15673), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (15666, 15673), False, 'import torch\n'), ((16011, 16034), 'torch.ones', 'torch.ones', (['_batch_size'], {}), '(_batch_size)\n', (16021, 16034), False, 'import torch\n'), ((16053, 16077), 'torch.zeros', 'torch.zeros', (['_batch_size'], {}), '(_batch_size)\n', (16064, 16077), False, 'import torch\n'), ((16670, 16706), 'torch.cuda.Event', 'torch.cuda.Event', ([], {'enable_timing': '(True)'}), '(enable_timing=True)\n', (16686, 16706), False, 'import torch\n'), ((16723, 16759), 'torch.cuda.Event', 'torch.cuda.Event', ([], {'enable_timing': '(True)'}), '(enable_timing=True)\n', (16739, 16759), False, 'import torch\n'), ((16798, 16829), 'torch.randn', 'torch.randn', (['_batch_size', 'z_dim'], {}), '(_batch_size, z_dim)\n', (16809, 16829), False, 'import torch\n'), ((16889, 16913), 'torch.autograd.Variable', 'autograd.Variable', (['noise'], {}), '(noise)\n', (16906, 16913), True, 'import torch.autograd as autograd\n'), ((17337, 17368), 'torch.randn', 'torch.randn', (['_batch_size', 'z_dim'], {}), '(_batch_size, z_dim)\n', (17348, 17368), False, 'import torch\n'), ((17575, 17599), 'torch.autograd.Variable', 'autograd.Variable', (['noise'], {}), '(noise)\n', (17592, 17599), True, 'import torch.autograd as autograd\n'), ((17880, 17904), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (17902, 17904), False, 'import torch\n'), ((1802, 1840), 'torch.zeros', 'torch.zeros', (['b.shape[0]'], {'device': 'device'}), '(b.shape[0], device=device)\n', (1813, 1840), False, 'import torch\n'), ((2645, 2660), 'torch.dot', 'torch.dot', (['r', 'r'], {}), '(r, r)\n', (2654, 2660), False, 'import torch\n'), ((2835, 2877), 'warnings.warn', 'warnings.warn', (["('CG iter num: %d' % (i + 1))"], {}), "('CG iter num: %d' % (i + 1))\n", (2848, 2877), False, 'import warnings\n'), ((4028, 4066), 'torch.zeros', 'torch.zeros', (['b.shape[0]'], {'device': 'device'}), '(b.shape[0], device=device)\n', (4039, 4066), False, 'import torch\n'), ((5303, 5318), 'torch.dot', 'torch.dot', (['r', 'r'], {}), '(r, r)\n', (5312, 5318), False, 'import torch\n'), ((5836, 5855), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (5848, 5855), False, 'import torch\n'), ((7690, 7764), 'torch.autograd.grad', 'autograd.grad', (['loss', 'self.max_params'], {'create_graph': '(True)', 'retain_graph': '(True)'}), '(loss, self.max_params, create_graph=True, retain_graph=True)\n', (7703, 7764), True, 'import torch.autograd as autograd\n'), ((7856, 7930), 'torch.autograd.grad', 'autograd.grad', (['loss', 'self.min_params'], {'create_graph': '(True)', 'retain_graph': '(True)'}), '(loss, self.min_params, create_graph=True, retain_graph=True)\n', (7869, 7930), True, 'import torch.autograd as autograd\n'), ((8798, 8829), 'torch.mul', 'torch.mul', (['lr_max', 'grad_x_vec_d'], {}), '(lr_max, grad_x_vec_d)\n', (8807, 8829), False, 'import torch\n'), ((8854, 8885), 'torch.mul', 'torch.mul', (['lr_min', 'grad_y_vec_d'], {}), '(lr_min, grad_y_vec_d)\n', (8863, 8885), False, 'import torch\n'), ((9166, 9201), 'torch.add', 'torch.add', (['grad_x_vec_d', '(-hvp_x_vec)'], {}), '(grad_x_vec_d, -hvp_x_vec)\n', (9175, 9201), False, 'import torch\n'), ((9217, 9251), 'torch.add', 'torch.add', (['grad_y_vec_d', 'hvp_y_vec'], {}), '(grad_y_vec_d, hvp_y_vec)\n', (9226, 9251), False, 'import torch\n'), ((14209, 14243), 'numpy.array', 'np.array', (['dataset'], {'dtype': '"""float32"""'}), "(dataset, dtype='float32')\n", (14217, 14243), True, 'import numpy as np\n'), ((15301, 15335), 'os.path.exists', 'os.path.exists', (["('8_G_res/_' + name)"], {}), "('8_G_res/_' + name)\n", (15315, 15335), False, 'import os\n'), ((15345, 15376), 'os.makedirs', 'os.makedirs', (["('8_G_res/_' + name)"], {}), "('8_G_res/_' + name)\n", (15356, 15376), False, 'import os\n'), ((17389, 17412), 'torch.ones', 'torch.ones', (['_batch_size'], {}), '(_batch_size)\n', (17399, 17412), False, 'import torch\n'), ((17435, 17459), 'torch.zeros', 'torch.zeros', (['_batch_size'], {}), '(_batch_size)\n', (17446, 17459), False, 'import torch\n'), ((2538, 2556), 'torch.dot', 'torch.dot', (['p', 'Avp_'], {}), '(p, Avp_)\n', (2547, 2556), False, 'import torch\n'), ((3013, 3034), 'torch.isnan', 'torch.isnan', (['grad_vec'], {}), '(grad_vec)\n', (3024, 3034), False, 'import torch\n'), ((3089, 3105), 'torch.isnan', 'torch.isnan', (['vec'], {}), '(vec)\n', (3100, 3105), False, 'import torch\n'), ((3619, 3635), 'torch.isnan', 'torch.isnan', (['hvp'], {}), '(hvp)\n', (3630, 3635), False, 'import torch\n'), ((5197, 5215), 'torch.dot', 'torch.dot', (['p', 'Avp_'], {}), '(p, Avp_)\n', (5206, 5215), False, 'import torch\n'), ((8223, 8274), 'torch.zeros_like', 'torch.zeros_like', (['grad_x_vec_d'], {'requires_grad': '(False)'}), '(grad_x_vec_d, requires_grad=False)\n', (8239, 8274), False, 'import torch\n'), ((8328, 8379), 'torch.zeros_like', 'torch.zeros_like', (['grad_y_vec_d'], {'requires_grad': '(False)'}), '(grad_y_vec_d, requires_grad=False)\n', (8344, 8379), False, 'import torch\n'), ((9414, 9425), 'time.time', 'time.time', ([], {}), '()\n', (9423, 9425), False, 'import time\n'), ((12480, 12501), 'torch.nn.Linear', 'nn.Linear', (['z_dim', 'dim'], {}), '(z_dim, dim)\n', (12489, 12501), True, 'import torch.nn as nn\n'), ((12515, 12528), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (12522, 12528), True, 'import torch.nn as nn\n'), ((12542, 12561), 'torch.nn.Linear', 'nn.Linear', (['dim', 'dim'], {}), '(dim, dim)\n', (12551, 12561), True, 'import torch.nn as nn\n'), ((12575, 12588), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (12582, 12588), True, 'import torch.nn as nn\n'), ((12602, 12621), 'torch.nn.Linear', 'nn.Linear', (['dim', 'dim'], {}), '(dim, dim)\n', (12611, 12621), True, 'import torch.nn as nn\n'), ((12635, 12648), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (12642, 12648), True, 'import torch.nn as nn\n'), ((12662, 12679), 'torch.nn.Linear', 'nn.Linear', (['dim', '(2)'], {}), '(dim, 2)\n', (12671, 12679), True, 'import torch.nn as nn\n'), ((12939, 12956), 'torch.nn.Linear', 'nn.Linear', (['(2)', 'dim'], {}), '(2, dim)\n', (12948, 12956), True, 'import torch.nn as nn\n'), ((12970, 12983), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (12977, 12983), True, 'import torch.nn as nn\n'), ((12997, 13016), 'torch.nn.Linear', 'nn.Linear', (['dim', 'dim'], {}), '(dim, dim)\n', (13006, 13016), True, 'import torch.nn as nn\n'), ((13030, 13043), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (13037, 13043), True, 'import torch.nn as nn\n'), ((13057, 13076), 'torch.nn.Linear', 'nn.Linear', (['dim', 'dim'], {}), '(dim, dim)\n', (13066, 13076), True, 'import torch.nn as nn\n'), ((13090, 13103), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (13097, 13103), True, 'import torch.nn as nn\n'), ((13117, 13134), 'torch.nn.Linear', 'nn.Linear', (['dim', '(1)'], {}), '(dim, 1)\n', (13126, 13134), True, 'import torch.nn as nn\n'), ((14066, 14088), 'random.choice', 'random.choice', (['centers'], {}), '(centers)\n', (14079, 14088), False, 'import random\n'), ((14292, 14313), 'torch.Tensor', 'torch.Tensor', (['dataset'], {}), '(dataset)\n', (14304, 14313), False, 'import torch\n'), ((8630, 8656), 'math.sqrt', 'math.sqrt', (['bias_correction'], {}), '(bias_correction)\n', (8639, 8656), False, 'import math\n'), ((8710, 8736), 'math.sqrt', 'math.sqrt', (['bias_correction'], {}), '(bias_correction)\n', (8719, 8736), False, 'import math\n'), ((11257, 11268), 'time.time', 'time.time', ([], {}), '()\n', (11266, 11268), False, 'import time\n'), ((13697, 13707), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (13704, 13707), True, 'import numpy as np\n'), ((13714, 13724), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (13721, 13724), True, 'import numpy as np\n'), ((13741, 13751), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (13748, 13751), True, 'import numpy as np\n'), ((13759, 13769), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (13766, 13769), True, 'import numpy as np\n'), ((13787, 13797), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (13794, 13797), True, 'import numpy as np\n'), ((13804, 13814), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (13811, 13814), True, 'import numpy as np\n'), ((13832, 13842), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (13839, 13842), True, 'import numpy as np\n'), ((13850, 13860), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (13857, 13860), True, 'import numpy as np\n'), ((14020, 14038), 'numpy.random.randn', 'np.random.randn', (['(2)'], {}), '(2)\n', (14035, 14038), True, 'import numpy as np\n'), ((9304, 9330), 'torch.norm', 'torch.norm', (['hvp_x_vec'], {'p': '(2)'}), '(hvp_x_vec, p=2)\n', (9314, 9330), False, 'import torch\n'), ((9360, 9386), 'torch.norm', 'torch.norm', (['hvp_y_vec'], {'p': '(2)'}), '(hvp_y_vec, p=2)\n', (9370, 9386), False, 'import torch\n'), ((11936, 11963), 'torch.norm', 'torch.norm', (['grad_x_vec'], {'p': '(2)'}), '(grad_x_vec, p=2)\n', (11946, 11963), False, 'import torch\n'), ((11993, 12020), 'torch.norm', 'torch.norm', (['grad_y_vec'], {'p': '(2)'}), '(grad_y_vec, p=2)\n', (12003, 12020), False, 'import torch\n'), ((12051, 12078), 'torch.norm', 'torch.norm', (['max_update'], {'p': '(2)'}), '(max_update, p=2)\n', (12061, 12078), False, 'import torch\n'), ((12109, 12136), 'torch.norm', 'torch.norm', (['min_update'], {'p': '(2)'}), '(min_update, p=2)\n', (12119, 12136), False, 'import torch\n'), ((3472, 3491), 'torch.zeros_like', 'torch.zeros_like', (['p'], {}), '(p)\n', (3488, 3491), False, 'import torch\n')] |
#!/usr/bin/env python
#
# get uncertainty of a single DEM, using the provided reference points (e.g. ICESAT).
# last modified: <NAME>, June 6, 2019
#
#
# usage: DEMUncertainty.py [-h] [-z Z_FIELD] Refpts_file DEM_file
# positional arguments:
# Refpts_file Path to the reference points
# (can be an ESRI shapefile or a 3-column textfile in XYZ format)
# DEM_file DEM file name (Must be a Geotiff, CRS must agree with the reference points)
#
# optional arguments:
# -z Z_FIELD, --zfield Z_FIELD Field name for accessing reference values
# (MANDATORY if using a shapefile for reference points)
#
#
# example: 1) DEMUncertainty.py data/ICESAT_subset.shp ../dhdt/Demo_DEMs/HookerFJL_11AUG16WV01DEM1_EPSG32640.tif -z H_ell
# 2) DEMUncertainty.py data/ICESAT_subset2.xyz ../dhdt/Demo_DEMs/HookerFJL_11AUG16WV01DEM1_EPSG32640.tif
#
#
# output: There are 2 output files. All have the same file name from DEM file, but the file extensions are different.
# 1) A png file showing the histogram of the offset between the DEM and the reference points.
# 2) A text (.param) file showing statistics of the offset; the std of which is used to estimate DEM uncertainty.
# The output file is properly formatted so it fits the param file the main dh/dt script (dhdt.py) needs.
#
# version history: This script is modified from
# v0.1: getPointsAtIcesat.py
# v0.2: corrected_dem_var.bash, corrected_dem_var.py
# v1.0: getUncertaintyDEM.py (This one runs slowly on a large DEM or a large reference dataset)
from argparse import ArgumentParser
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.dirname(sys.argv[0])) + '/../Utilities/Python') # for all modules
from UtilRaster import SingleRaster, timeit
from UtilXYZ import ZArray
import numpy as np
parser = ArgumentParser()
parser.add_argument('Refpts_file', help='Path to the reference points')
parser.add_argument('DEM_file', help='DEM file name')
parser.add_argument('-z', '--zfield', help='(MANDATORY if using a shapefile for reference points) Field name for accessing reference values', dest='z_field')
args = parser.parse_args()
@timeit
def DEMUncertainty(pointfilepath, demfpath, zfield=None):
if pointfilepath.split('.')[-1] == 'shp':
import geopandas as gpd
shp = gpd.read_file(pointfilepath)
x = np.array([i.x for i in shp['geometry']])
y = np.array([i.y for i in shp['geometry']])
z = np.array([i for i in shp[zfield]])
else:
xyz = np.loadtxt(pointfilepath)
x = xyz[:, 0]
y = xyz[:, 1]
z = xyz[:, 2]
dem = SingleRaster(demfpath)
demz = dem.ReadGeolocPoints(x, y)
offset = demz - z
nonan_idx = ~np.isnan(offset)
offset = ZArray(offset[nonan_idx])
output_png = demfpath.replace('.tif', '.png')
# output_png = output_png.split('/')[-1]
offset.MADStats()
offset.MADHist(output_png)
return offset
offset = DEMUncertainty(args.Refpts_file, args.DEM_file, args.z_field)
paramfile = args.DEM_file.replace('.tif', '.param')
# paramfile = paramfile.split('/')[-1]
with open(paramfile, 'w') as f:
f.write('n = {}\n'.format(offset.size))
f.write('mean = {}\n'.format(offset.MAD_mean))
f.write('median = {}\n'.format(offset.MAD_median))
f.write('std = {}\n'.format(offset.MAD_std))
f.write('refpts = {}\n'.format(os.path.abspath(args.Refpts_file)))
# ==== Other possible input for testing purpose ====
# pointfile = '/data/whyj/Projects/Franz_Josef_Land/Source/Shapefile_ICESAT_classification/ICESAT_forFJL_correction/ICESAT_FJL_UTM40_BR_SP_SS_RemovedOutlier.shp'
# demfile = '/13t1/whyj/Projects/Franz_Josef_Land/DEM_Aligned/WV_3m_Rankfiltered/RankFilterSquare4Thres50_11AUG16_WV01_FJL_1020010015941D00_1020010015D7CD00_DEM1-3m_ICESAT-Aligned.tif'
# demfile = '/13t1/whyj/Projects/Franz_Josef_Land/DEM_Aligned/WV_3m_Rankfiltered/RankFilterSquare4Thres50_12MAR22_WV01_FJL_102001001A7FC400_102001001AB83F00_DEM1-3m_ICESAT-Aligned.tif' # a huge one | [
"UtilRaster.SingleRaster",
"os.path.abspath",
"argparse.ArgumentParser",
"os.path.dirname",
"UtilXYZ.ZArray",
"numpy.isnan",
"numpy.array",
"numpy.loadtxt",
"geopandas.read_file"
] | [((1972, 1988), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (1986, 1988), False, 'from argparse import ArgumentParser\n'), ((2712, 2734), 'UtilRaster.SingleRaster', 'SingleRaster', (['demfpath'], {}), '(demfpath)\n', (2724, 2734), False, 'from UtilRaster import SingleRaster, timeit\n'), ((2831, 2856), 'UtilXYZ.ZArray', 'ZArray', (['offset[nonan_idx]'], {}), '(offset[nonan_idx])\n', (2837, 2856), False, 'from UtilXYZ import ZArray\n'), ((2450, 2478), 'geopandas.read_file', 'gpd.read_file', (['pointfilepath'], {}), '(pointfilepath)\n', (2463, 2478), True, 'import geopandas as gpd\n'), ((2485, 2525), 'numpy.array', 'np.array', (["[i.x for i in shp['geometry']]"], {}), "([i.x for i in shp['geometry']])\n", (2493, 2525), True, 'import numpy as np\n'), ((2532, 2572), 'numpy.array', 'np.array', (["[i.y for i in shp['geometry']]"], {}), "([i.y for i in shp['geometry']])\n", (2540, 2572), True, 'import numpy as np\n'), ((2579, 2613), 'numpy.array', 'np.array', (['[i for i in shp[zfield]]'], {}), '([i for i in shp[zfield]])\n', (2587, 2613), True, 'import numpy as np\n'), ((2629, 2654), 'numpy.loadtxt', 'np.loadtxt', (['pointfilepath'], {}), '(pointfilepath)\n', (2639, 2654), True, 'import numpy as np\n'), ((2804, 2820), 'numpy.isnan', 'np.isnan', (['offset'], {}), '(offset)\n', (2812, 2820), True, 'import numpy as np\n'), ((1791, 1819), 'os.path.dirname', 'os.path.dirname', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (1806, 1819), False, 'import os\n'), ((3432, 3465), 'os.path.abspath', 'os.path.abspath', (['args.Refpts_file'], {}), '(args.Refpts_file)\n', (3447, 3465), False, 'import os\n')] |
#!/usr/bin/python
#learn TFA matrix values from
# symbolic TFA matrix,
# CS matrix,
# and measured log expression values
from gurobipy import *
import numpy
import random
import argparse
import time
def parseInputs():
parser = argparse.ArgumentParser(description='Infer CS and TFA values')
parser.add_argument('--expression', '-e', type=str,
dest='expressionFile', action='store', required=True,
help='path to csv file of expression data')
parser.add_argument('--crossSet', '-v', type=str,
dest='crossFile', action='store',
help='path to csv file of expression data for cross checking overfitting')
parser.add_argument('--WT', '-w', '--wt', type=int,
dest='wtIndex', action='store', default=-1,
help='column index for the WT or reference sample')
parser.add_argument('--binaryCS', '-c', type=str,
dest='csFile', action='store',
help='csv file of edges between TFs (columns) and target genes (rows)\
\n -1: repressive edge\t 0: no edge\t 1: activating edge')
parser.add_argument('--startingCS', '-s', type=str,
dest='startFile', action='store',
help='csv file of edge strengths between TFs (columns) and target genes (rows)\
\n optimization will start with these values')
parser.add_argument('--binaryTFA', '-a', type=str,
dest='tfaFile', action='store',
help='csv file of known activity behavior for TFs (rows) in samples (columns)\
\n 0: no activity\t 1: no prior expectation\t 2: greater activity than WT/reference')
#parser.add_argument('--freezeCS', '-f', type=int,
# dest='freezeFlag', action='store',
# default=0, choices = [0,1],
# help='meant for second round fitting where CS values are held constant\
# \n 0: optimize all \t1: only optimize TFA and baselines')
parser.add_argument('--constrainMOR', '-m', type=int,
dest='morFlag', action='store',
default=0, choices = [0,1],
help='constrain mode of regulation by using signs from binaryCS\
\n 0: use signs to constrain CS values\t 1: don\'t constrain CS values')
parser.add_argument('--maxIterations', '-i', type=int,
dest='maxIter', action='store', default=100,
help='maximum number of alternative iterations of optimization')
parser.add_argument('--fileTag', '-t', type=str,
dest='fileTag', action='store', default='',
help='string to help label log and output files')
args = parser.parse_args()
#check for required expression data
try:
expressionValues = numpy.genfromtxt(args.expressionFile, delimiter=',')
except IOError as e:
print("Error:\t", e)
quit()
#check for binaryCS and/or startingCS file
if args.startFile != None:
try:
startCS = numpy.genfromtxt(args.startFile, delimiter=',', dtype=float)
except IOError as e:
print("Error:\t", e)
quit()
elif args.csFile != None:
try:
binaryCS = numpy.genfromtxt(args.csFile, delimiter=',', dtype=float)
startCS = makeRandomStart(binaryCS, args.morFlag==0)
except IOError as e:
print("Error:\t", e)
quit()
else:
print("Either a binaryCS file or a startingCS file is required")
quit()
#check for binaryTFA data
if args.tfaFile != None:
try:
binaryTFA = numpy.genfromtxt(args.tfaFile, delimiter=',')
except IOError as e:
print("Error:\t", e)
quit()
else:
binaryTFA = numpy.ones((len(startCS[0])-1,len(expressionValues[0])))
if len(expressionValues) != len(startCS):
print("Error: number of genes (",len(expressionValues),"rows) in expression data does not match (",len(startCS),"rows) CS data")
quit()
if len(expressionValues[0]) != len(binaryTFA[0]):
print("Error: number of samples (",len(expressionValues[0]),"columns) in expression data does not match (",len(binaryTFA[0]),"columns) TFA data")
quit()
if len(startCS[0])-1 != len(binaryTFA):
print("Error: number of TFs ("+str(len(startCS[0])-1)+"columns) in CS data does not match ("+str(len(binaryTFA))+"rows) TFA data")
quit()
if abs(args.wtIndex) > len(expressionValues[0]):
print("Error: index of reference sample outside the bounds of given expression data")
quit()
if args.morFlag == 0:
print("will constrain the signs/mode of regulation for CS values")
return [args, expressionValues, startCS, binaryTFA]
"""
Input:
a numpy array of binaryCS values
a boolean for whether to keep cs sign constraint
Output:
a numpy array to use as a random start CS
makes a random start point for cs matrix and saves as a csv file
"""
def makeRandomStart(binaryCS, signFlag):
startingCS = numpy.hstack((numpy.copy(binaryCS), numpy.ones((len(binaryCS),1))))
for i in range(len(startingCS)):
for j in range(len(binaryCS[i])):
randVal = random.uniform(-10,10)
if signFlag and abs(binaryCS[i][j])==1:
startingCS[i,j]*=abs(randVal)
else:
startingCS[i,j]*=randVal
startingCS[i,-1] = random.uniform(-10,10)
return startingCS
"""
Input:
two data matrices
the first is assumed to be true data
the second is assumed to be predicted/learned/fitted data
Output:
list of var explained and SSE
calculates total error and variance explained between predicted and true data
"""
def calcError(data, dataLearned):
numerator = 0
denominator = 0
for i in range(len(dataLearned)):
sst = numpy.var(data[i])*len(data[i])
sse = sum((data[i]-dataLearned[i])**2)
numerator += sse
denominator += sst
return [1-(numerator/denominator), numerator]
"""
Input:
binary activity matrix A
latest control strength matrix C, including baseline
expression matrix data
freezeFlag to indicate whether doing second step fitting (or else constrain avg TFA)
fileTag for use w/logging gurobi output
Output:
False if learning failed
learned A matrix if learning succeeded
makes a least squares optimization problem for gurobi to optimize
"""
def learnTFA(A, lastC, data, freezeFlag, fileTag):
numGenes = len(data)
numSamples = len(data[0])
numTFs = len(A)
# initialize gurobi model
model = Model()
model.setParam('LogToConsole', False)
model.setParam('LogFile', 'logFiles/'+fileTag+".log")
#threads for parallelization
model.setParam('Threads', 1)
# Add tfa variables to the model
varsMatrix = [] # holds the activity matrix, with pointers to coeff where relevant
for i in range(numTFs):
constraintCounter = 0 # counts the number of coeff in a row
varsMatrix.append([]) # start a new row in the activity matrix
constraint = LinExpr() # initialize the constraint that each row's avg coeff value is 1
for j in range(numSamples):
if A[i][j]==0:
varsMatrix[i].append(0)
else:
v = model.addVar(lb=0.0001, vtype=GRB.CONTINUOUS, name='A['+str(i)+','+str(j)+']')
varsMatrix[i].append(v)
constraint += v
constraintCounter += 1
if not freezeFlag: # add the scaling constraint if not second round fitting
model.addConstr(constraint/constraintCounter, GRB.EQUAL, 1.0, "c"+str(i))
model.update()
# Add overexpression perturb constraints to the model
for i in range(numTFs):
for j in range(numSamples):
if A[i][j]==2:
model.addConstr(varsMatrix[i][j], GRB.GREATER_EQUAL, 1.1*varsMatrix[i][-1])
model.update()
# Populate objective
obj = QuadExpr()
for i in range(numGenes):
for j in range(numSamples):
geneExpr = LinExpr()
geneExpr += lastC[i][numTFs]
for k in range(numTFs):
if type(varsMatrix[k][j])==Var:
geneExpr += lastC[i][k]*varsMatrix[k][j]
geneError = data[i][j] - geneExpr
obj += geneError * geneError
model.setObjective(obj)
model.update()
# Solve
try:
model.optimize()
except:
return False
# Write model to a file
model.write('modelFiles/'+fileTag+'.lp')
# check that optimization succeeded
if model.status != GRB.Status.OPTIMAL:
return False
#convert back to matrix
Atemp = []
for i in range(numTFs):
Atemp.append([])
for j in range(numSamples):
if A[i][j] == 0:
Atemp[i].append(0)
else:
Atemp[i].append(model.getAttr('x', [varsMatrix[i][j]])[0])
return Atemp
"""
Input:
latest activity matrix A
binary cs matrix C, or latest cs matrix if validating
expression matrix data
morFlag to indicate whether to constrain mode of regulation (sign of CS values)
freezeFlag to indicate whether doing second step fitting (or else constrain avg TFA)
fileTag for use w/logging gurobi output
Output:
False if learning failed
learned CS matrix if learning succeeded
makes a least squares optimization problem for gurobi to optimize
"""
def learnCS(lastA, C, data, morFlag, fileTag):
numGenes = len(data)
numSamples = len(data[0])
numTFs = len(lastA)
# Initialize the model
model = Model()
model.setParam('LogToConsole', False)
model.setParam('LogFile', "logFiles/"+fileTag+".log")
model.setParam('Threads', 1)
# Add cs variables to the model
varsMatrix = [] # holds the cs matrix, with pointers to coeff where relevant
lassoConstraint = LinExpr() # Intialize the LASSO constraint
for i in range(numGenes):
varsMatrix.append([]) # add a row to the cs matrix
for j in range(numTFs):
#if freezeFlag==1:
# varsMatrix[i].append(C[i][j])
#else:
if C[i][j]==0: #no influence
varsMatrix[i].append(0)
elif not morFlag: #an influence to be learned, no sign to constrain
v = model.addVar(lb=-GRB.INFINITY, ub=GRB.INFINITY, vtype=GRB.CONTINUOUS, name='C['+str(i)+','+str(j)+']')
varsMatrix[i].append(v)
else:
if C[i][j] > 0: #an influence to be learned, activating
v = model.addVar(lb=0.0001, ub=GRB.INFINITY, vtype=GRB.CONTINUOUS, name='C['+str(i)+','+str(j)+']')
varsMatrix[i].append(v)
else: #an influence to be learned, repressing
v = model.addVar(lb=-GRB.INFINITY, ub=-0.0001, vtype=GRB.CONTINUOUS, name='C['+str(i)+','+str(j)+']')
varsMatrix[i].append(v)
#learning baseline expression
v = model.addVar(lb=-GRB.INFINITY, ub=GRB.INFINITY, vtype=GRB.CONTINUOUS, name='C['+str(i)+','+str(numTFs)+']')
varsMatrix[i].append(v)
model.update()
# Populate objective
obj = QuadExpr()
for i in range(numGenes):
for j in range(numSamples):
geneExpr = LinExpr()
geneExpr += varsMatrix[i][numTFs]
for k in range(numTFs):
geneExpr += varsMatrix[i][k]*lastA[k][j]
geneError = data[i][j] - geneExpr
obj += geneError * geneError
model.setObjective(obj)
model.update()
# Solve
try:
model.optimize()
except:
return False
# Write model to a file
model.write('modelFiles/'+fileTag+'.lp')
# check that optimization succeeded
if model.status != GRB.Status.OPTIMAL:
return False
#convert back to matrix
"""
if freezeFlag==1:
Ctemp = []
for i in range(numGenes):
Ctemp.append([])
for j in range(numTFs+1):
if j<numTFs:
Ctemp[i].append(C[i][j])
else:
Ctemp[i].append(model.getAttr('x', [varsMatrix[i][j]])[0])
else:
"""
Ctemp = []
for i in range(numGenes):
Ctemp.append([])
for j in range(numTFs+1):
if j<numTFs and C[i][j] == 0:
Ctemp[i].append(0)
else:
Ctemp[i].append(model.getAttr('x', [varsMatrix[i][j]])[0])
return Ctemp
def main():
start = time.time()
[args, expressionValues, startCS, binaryTFA] = parseInputs()
crossFlag = False
if args.crossFile != None:
try:
crossExpression = numpy.genfromtxt(args.crossFile, delimiter=',', dtype=float)
crossBinaryTFA = numpy.ones((len(startCS[0])-1,len(crossExpression[0])))
crossFlag = True
except IOError as e:
print("Error:\t", e)
quit()
numGenes = len(startCS)
numSamples = len(expressionValues[0])
numTFs = len(binaryTFA)
Atemp = learnTFA(binaryTFA, startCS, expressionValues, False, args.fileTag+"TFA")
if Atemp == False:
print("Could not learn the activity matrix")
return
numpy.savetxt("logFiles/"+args.fileTag+"CSiteration0.csv", startCS, delimiter=',', fmt='%1.5f')
numpy.savetxt("logFiles/"+args.fileTag+"TFAiteration0.csv", Atemp, delimiter=',', fmt='%1.5f')
predictedExpression = numpy.matmul(startCS, numpy.append(Atemp, numpy.ones((1,len(Atemp[0]))),axis=0))
[ve, sse] = calcError(expressionValues, predictedExpression)
print("Variance explained and error from random CS values: ", round(ve, 3), round(sse,3))
if crossFlag:
crossA = learnTFA(crossBinaryTFA, startCS, crossExpression, True, args.fileTag+"CrossCheck")
if crossA == False:
print("Could not cross check")
else:
predictedExpression = numpy.matmul(startCS, numpy.append(crossA, numpy.ones((1,len(crossA[0]))),axis=0))
[ve, sse] = calcError(crossExpression, predictedExpression)
print("cross check: ", round(numpy.mean(crossA[0]),2), round(ve, 3), round(sse,3))
for itr in range(1,args.maxIter):
print("\niteration ", itr, "\n")
Ctemp = learnCS(Atemp, startCS, expressionValues, args.morFlag==0, args.fileTag+"CS")
if Ctemp == False:
print("Could not learn the control strength matrix")
return
Atemp = learnTFA(binaryTFA, Ctemp, expressionValues, False, args.fileTag+"TFA")
if Atemp == False:
print("Could not learn the activity matrix")
return
numpy.savetxt("logFiles/"+args.fileTag+"CSiteration"+str(itr)+".csv", Ctemp, delimiter=',', fmt='%1.5f')
numpy.savetxt("logFiles/"+args.fileTag+"TFAiteration"+str(itr)+".csv", Atemp, delimiter=',', fmt='%1.5f')
predictedExpression = numpy.matmul(Ctemp, numpy.append(Atemp, numpy.ones((1,len(Atemp[0]))),axis=0))
[ve, sse] = calcError(expressionValues, predictedExpression)
print("Variance explained and error at iteration ", itr, ": ", round(ve, 3), round(sse,3))
if crossFlag:
crossA = learnTFA(crossBinaryTFA, Ctemp, crossExpression, True, args.fileTag+"CrossCheck")
if crossA == False:
print("Could not cross check")
else:
predictedExpression = numpy.matmul(Ctemp, numpy.append(crossA, numpy.ones((1,len(crossA[0]))),axis=0))
[ve, sse] = calcError(crossExpression, predictedExpression)
print("cross check: ", round(numpy.mean(crossA[0]),2), round(ve, 3), round(sse,3))
end = time.time()
print(round(end-start))
main()
| [
"argparse.ArgumentParser",
"numpy.copy",
"random.uniform",
"numpy.savetxt",
"numpy.genfromtxt",
"time.time",
"numpy.mean",
"numpy.var"
] | [((237, 299), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Infer CS and TFA values"""'}), "(description='Infer CS and TFA values')\n", (260, 299), False, 'import argparse\n'), ((11831, 11842), 'time.time', 'time.time', ([], {}), '()\n', (11840, 11842), False, 'import time\n'), ((12477, 12580), 'numpy.savetxt', 'numpy.savetxt', (["('logFiles/' + args.fileTag + 'CSiteration0.csv')", 'startCS'], {'delimiter': '""","""', 'fmt': '"""%1.5f"""'}), "('logFiles/' + args.fileTag + 'CSiteration0.csv', startCS,\n delimiter=',', fmt='%1.5f')\n", (12490, 12580), False, 'import numpy\n'), ((12575, 12677), 'numpy.savetxt', 'numpy.savetxt', (["('logFiles/' + args.fileTag + 'TFAiteration0.csv')", 'Atemp'], {'delimiter': '""","""', 'fmt': '"""%1.5f"""'}), "('logFiles/' + args.fileTag + 'TFAiteration0.csv', Atemp,\n delimiter=',', fmt='%1.5f')\n", (12588, 12677), False, 'import numpy\n'), ((14794, 14805), 'time.time', 'time.time', ([], {}), '()\n', (14803, 14805), False, 'import time\n'), ((2827, 2879), 'numpy.genfromtxt', 'numpy.genfromtxt', (['args.expressionFile'], {'delimiter': '""","""'}), "(args.expressionFile, delimiter=',')\n", (2843, 2879), False, 'import numpy\n'), ((5273, 5296), 'random.uniform', 'random.uniform', (['(-10)', '(10)'], {}), '(-10, 10)\n', (5287, 5296), False, 'import random\n'), ((3038, 3098), 'numpy.genfromtxt', 'numpy.genfromtxt', (['args.startFile'], {'delimiter': '""","""', 'dtype': 'float'}), "(args.startFile, delimiter=',', dtype=float)\n", (3054, 3098), False, 'import numpy\n'), ((3570, 3615), 'numpy.genfromtxt', 'numpy.genfromtxt', (['args.tfaFile'], {'delimiter': '""","""'}), "(args.tfaFile, delimiter=',')\n", (3586, 3615), False, 'import numpy\n'), ((4955, 4975), 'numpy.copy', 'numpy.copy', (['binaryCS'], {}), '(binaryCS)\n', (4965, 4975), False, 'import numpy\n'), ((5098, 5121), 'random.uniform', 'random.uniform', (['(-10)', '(10)'], {}), '(-10, 10)\n', (5112, 5121), False, 'import random\n'), ((5687, 5705), 'numpy.var', 'numpy.var', (['data[i]'], {}), '(data[i])\n', (5696, 5705), False, 'import numpy\n'), ((11988, 12048), 'numpy.genfromtxt', 'numpy.genfromtxt', (['args.crossFile'], {'delimiter': '""","""', 'dtype': 'float'}), "(args.crossFile, delimiter=',', dtype=float)\n", (12004, 12048), False, 'import numpy\n'), ((3218, 3275), 'numpy.genfromtxt', 'numpy.genfromtxt', (['args.csFile'], {'delimiter': '""","""', 'dtype': 'float'}), "(args.csFile, delimiter=',', dtype=float)\n", (3234, 3275), False, 'import numpy\n'), ((13343, 13364), 'numpy.mean', 'numpy.mean', (['crossA[0]'], {}), '(crossA[0])\n', (13353, 13364), False, 'import numpy\n'), ((14731, 14752), 'numpy.mean', 'numpy.mean', (['crossA[0]'], {}), '(crossA[0])\n', (14741, 14752), False, 'import numpy\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 2 13:04:57 2018
@author: pscog
"""
#import sys
#import os
#sys.path.append(os.path.dirname(os.getcwd()))
from multiprocessing import Process, Pool
import PyThurstonian
from PyThurstonian import thurstonian, hdi
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from simulate_data import simulate_data
import ranking as rk
import pdb
#Set plotting preferences
sns.set_context("paper")
sns.set_style("whitegrid")
output_folder = "article/Figures/"
#Load and preprocess data
K = 3 #Number of items
J = 30 #Number of participants
L = 1 #Number of trials
C = 2 #Number of conditions
beta = np.array([[0.0, 1.0, 2.0],
[0.0, 0.0, 0.0]])
N_samples = 5000
def get_p(i):
data, sim_scale = simulate_data(K, J, L, C, beta = beta)
data['Subj'] = pd.factorize(data['Subj'])[0]+1
myThurst = thurstonian(design_formula = '~0+Condition', data = data, subject_name = "Subj")
return myThurst.sra_p_value(level_dict = {'Condition': 'C2'})[0]
if __name__ == '__main__':
with Pool(6) as p:
all_p = p.map(get_p, range(5000))
all_p = np.array(all_p)
print(all_p)
print(np.sum(all_p < 0.05) / all_p.size)
plt.hist(all_p, normed = True)
plt.show() | [
"seaborn.set_style",
"PyThurstonian.thurstonian",
"matplotlib.pyplot.show",
"numpy.sum",
"matplotlib.pyplot.hist",
"simulate_data.simulate_data",
"numpy.array",
"multiprocessing.Pool",
"pandas.factorize",
"seaborn.set_context"
] | [((470, 494), 'seaborn.set_context', 'sns.set_context', (['"""paper"""'], {}), "('paper')\n", (485, 494), True, 'import seaborn as sns\n'), ((499, 525), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (512, 525), True, 'import seaborn as sns\n'), ((705, 749), 'numpy.array', 'np.array', (['[[0.0, 1.0, 2.0], [0.0, 0.0, 0.0]]'], {}), '([[0.0, 1.0, 2.0], [0.0, 0.0, 0.0]])\n', (713, 749), True, 'import numpy as np\n'), ((836, 872), 'simulate_data.simulate_data', 'simulate_data', (['K', 'J', 'L', 'C'], {'beta': 'beta'}), '(K, J, L, C, beta=beta)\n', (849, 872), False, 'from simulate_data import simulate_data\n'), ((942, 1016), 'PyThurstonian.thurstonian', 'thurstonian', ([], {'design_formula': '"""~0+Condition"""', 'data': 'data', 'subject_name': '"""Subj"""'}), "(design_formula='~0+Condition', data=data, subject_name='Subj')\n", (953, 1016), False, 'from PyThurstonian import thurstonian, hdi\n'), ((1216, 1231), 'numpy.array', 'np.array', (['all_p'], {}), '(all_p)\n', (1224, 1231), True, 'import numpy as np\n'), ((1313, 1341), 'matplotlib.pyplot.hist', 'plt.hist', (['all_p'], {'normed': '(True)'}), '(all_p, normed=True)\n', (1321, 1341), True, 'import matplotlib.pyplot as plt\n'), ((1348, 1358), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1356, 1358), True, 'import matplotlib.pyplot as plt\n'), ((1147, 1154), 'multiprocessing.Pool', 'Pool', (['(6)'], {}), '(6)\n', (1151, 1154), False, 'from multiprocessing import Process, Pool\n'), ((895, 921), 'pandas.factorize', 'pd.factorize', (["data['Subj']"], {}), "(data['Subj'])\n", (907, 921), True, 'import pandas as pd\n'), ((1269, 1289), 'numpy.sum', 'np.sum', (['(all_p < 0.05)'], {}), '(all_p < 0.05)\n', (1275, 1289), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
import tbmodels
import numpy as np
def test_consistency(get_model_clean, models_equal, sparse):
model1 = get_model_clean(0.1, 0.2, sparse=sparse)
hoppings = []
for k, v in model1.hop.items():
hoppings.extend(tbmodels.helpers.matrix_to_hop(np.array(v), R=k))
model2 = tbmodels.Model.from_hop_list(
size=2, hop_list=hoppings, contains_cc=False, occ=1, pos=((0., ) * 3, (0.5, 0.5, 0.)), sparse=sparse
)
models_equal(model1, model2)
| [
"tbmodels.Model.from_hop_list",
"numpy.array"
] | [((357, 492), 'tbmodels.Model.from_hop_list', 'tbmodels.Model.from_hop_list', ([], {'size': '(2)', 'hop_list': 'hoppings', 'contains_cc': '(False)', 'occ': '(1)', 'pos': '((0.0,) * 3, (0.5, 0.5, 0.0))', 'sparse': 'sparse'}), '(size=2, hop_list=hoppings, contains_cc=False,\n occ=1, pos=((0.0,) * 3, (0.5, 0.5, 0.0)), sparse=sparse)\n', (385, 492), False, 'import tbmodels\n'), ((324, 335), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (332, 335), True, 'import numpy as np\n')] |
# coding: utf-8
"""
This module offers a simple, matlab-style API built on top of the gr package.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import functools
import sys
import warnings
import numpy as np
import gr
import gr3
try:
basestring
except NameError:
basestring = str
def _close_gks_on_error(func):
"""
Wrap an API function to make sure GKS is closed on error.
Not closing GKS after an error occurred during plotting could lead to
an unexpected GKS state, e.g. printing of more than one output page.
:param func: the mlab API function to wrap
:return: the wrapped API function
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception:
gr.emergencyclosegks()
raise
return wrapper
@_close_gks_on_error
def plot(*args, **kwargs):
"""
Draw one or more line plots.
This function can receive one or more of the following:
- x values and y values, or
- x values and a callable to determine y values, or
- y values only, with their indices as x values
:param args: the data to plot
**Usage examples:**
>>> # Create example data
>>> x = np.linspace(-2, 2, 40)
>>> y = 2*x+4
>>> # Plot x and y
>>> mlab.plot(x, y)
>>> # Plot x and a callable
>>> mlab.plot(x, lambda x: x**3 + x**2 + x)
>>> # Plot y, using its indices for the x values
>>> mlab.plot(y)
"""
global _plt
_plt.kwargs.update(kwargs)
if _plt.kwargs['ax']:
_plt.args += _plot_args(args)
else:
_plt.args = _plot_args(args)
_plot_data(kind='line')
@_close_gks_on_error
def oplot(*args, **kwargs):
"""
Draw one or more line plots over another plot.
This function can receive one or more of the following:
- x values and y values, or
- x values and a callable to determine y values, or
- y values only, with their indices as x values
:param args: the data to plot
**Usage examples:**
>>> # Create example data
>>> x = np.linspace(-2, 2, 40)
>>> y = 2*x+4
>>> # Draw the first plot
>>> mlab.plot(x, y)
>>> # Plot graph over it
>>> mlab.oplot(x, lambda x: x**3 + x**2 + x)
"""
global _plt
_plt.kwargs.update(kwargs)
_plt.args += _plot_args(args)
_plot_data(kind='line')
@_close_gks_on_error
def step(*args, **kwargs):
"""
Draw one or more step or staircase plots.
This function can receive one or more of the following:
- x values and y values, or
- x values and a callable to determine y values, or
- y values only, with their indices as x values
:param args: the data to plot
:param where: pre, mid or post, to decide where the step between two y values should be placed
**Usage examples:**
>>> # Create example data
>>> x = np.linspace(-2, 2, 40)
>>> y = 2*x+4
>>> # Plot x and y
>>> mlab.step(x, y)
>>> # Plot x and a callable
>>> mlab.step(x, lambda x: x**3 + x**2 + x)
>>> # Plot y, using its indices for the x values
>>> mlab.step(y)
>>> # Use next y step directly after x each position
>>> mlab.step(y, where='pre')
>>> # Use next y step between two x positions
>>> mlab.step(y, where='mid')
>>> # Use next y step immediately before next x position
>>> mlab.step(y, where='post')
"""
global _plt
if 'where' in kwargs:
_plt.kwargs['step_where'] = kwargs.pop('where')
_plt.kwargs.update(kwargs)
if _plt.kwargs['ax']:
_plt.args += _plot_args(args)
else:
_plt.args = _plot_args(args)
_plot_data(kind='step')
@_close_gks_on_error
def scatter(*args, **kwargs):
"""
Draw one or more scatter plots.
This function can receive one or more of the following:
- x values and y values, or
- x values and a callable to determine y values, or
- y values only, with their indices as x values
Additional to x and y values, you can provide values for the markers'
size and color. Size values will determine the marker size in percent of
the regular size, and color values will be used in combination with the
current colormap.
:param args: the data to plot
**Usage examples:**
>>> # Create example data
>>> x = np.linspace(-2, 2, 40)
>>> y = 0.2*x+0.4
>>> # Plot x and y
>>> mlab.scatter(x, y)
>>> # Plot x and a callable
>>> mlab.scatter(x, lambda x: 0.2*x + 0.4)
>>> # Plot y, using its indices for the x values
>>> mlab.scatter(y)
>>> # Plot a diagonal with increasing size and color
>>> x = np.linspace(0, 1, 11)
>>> y = np.linspace(0, 1, 11)
>>> s = np.linspace(50, 400, 11)
>>> c = np.linspace(0, 255, 11)
>>> mlab.scatter(x, y, s, c)
"""
global _plt
_plt.kwargs.update(kwargs)
_plt.args = _plot_args(args, fmt='xyac')
_plot_data(kind='scatter')
@_close_gks_on_error
def quiver(x, y, u, v, **kwargs):
"""
Draw a quiver plot.
This function draws arrows to visualize a vector for each point of a grid.
:param x: the X coordinates of the grid
:param y: the Y coordinates of the grid
:param u: the U component for each point on the grid
:param v: the V component for each point on the grid
**Usage examples:**
>>> # Create example data
>>> x = np.linspace(-1, 1, 30)
>>> y = np.linspace(-1, 1, 20)
>>> u = np.repeat(x[np.newaxis, :], len(y), axis=0)
>>> v = np.repeat(y[:, np.newaxis], len(x), axis=1)
>>> # Draw arrows on grid
>>> mlab.quiver(x, y, u, v)
"""
global _plt
_plt.kwargs.update(kwargs)
_plt.args = _plot_args((x, y, u, v), fmt='xyuv')
_plot_data(kind='quiver')
@_close_gks_on_error
def polar(*args, **kwargs):
"""
Draw one or more polar plots.
This function can receive one or more of the following:
- angle values and radius values, or
- angle values and a callable to determine radius values
:param args: the data to plot
**Usage examples:**
>>> # Create example data
>>> angles = np.linspace(0, 2*math.pi, 40)
>>> radii = np.linspace(0, 2, 40)
>>> # Plot angles and radii
>>> mlab.polar(angles, radii)
>>> # Plot angles and a callable
>>> mlab.polar(angles, lambda radius: math.cos(radius)**2)
"""
global _plt
_plt.kwargs.update(kwargs)
_plt.args = _plot_args(args)
_plot_data(kind='polar')
@_close_gks_on_error
def trisurf(*args, **kwargs):
"""
Draw a triangular surface plot.
This function uses the current colormap to display a series of points
as a triangular surface plot. It will use a Delaunay triangulation to
interpolate the z values between x and y values. If the series of points
is concave, this can lead to interpolation artifacts on the edges of the
plot, as the interpolation may occur in very acute triangles.
:param x: the x coordinates to plot
:param y: the y coordinates to plot
:param z: the z coordinates to plot
**Usage examples:**
>>> # Create example point data
>>> x = np.random.uniform(-4, 4, 100)
>>> y = np.random.uniform(-4, 4, 100)
>>> z = np.sin(x) + np.cos(y)
>>> # Draw the triangular surface plot
>>> mlab.trisurf(x, y, z)
"""
global _plt
_plt.kwargs.update(kwargs)
_plt.args = _plot_args(args, fmt='xyzc')
_plot_data(kind='trisurf')
@_close_gks_on_error
def tricont(x, y, z, *args, **kwargs):
"""
Draw a triangular contour plot.
This function uses the current colormap to display a series of points
as a triangular contour plot. It will use a Delaunay triangulation to
interpolate the z values between x and y values. If the series of points
is concave, this can lead to interpolation artifacts on the edges of the
plot, as the interpolation may occur in very acute triangles.
:param x: the x coordinates to plot
:param y: the y coordinates to plot
:param z: the z coordinates to plot
**Usage examples:**
>>> # Create example point data
>>> x = np.random.uniform(-4, 4, 100)
>>> y = np.random.uniform(-4, 4, 100)
>>> z = np.sin(x) + np.cos(y)
>>> # Draw the triangular contour plot
>>> mlab.tricont(x, y, z)
"""
global _plt
_plt.kwargs.update(kwargs)
args = [x, y, z] + list(args)
_plt.args = _plot_args(args, fmt='xyzc')
_plot_data(kind='tricont')
@_close_gks_on_error
def stem(*args, **kwargs):
"""
Draw a stem plot.
This function can receive one or more of the following:
- x values and y values, or
- x values and a callable to determine y values, or
- y values only, with their indices as x values
:param args: the data to plot
**Usage examples:**
>>> # Create example data
>>> x = np.linspace(-2, 2, 40)
>>> y = 0.2*x+0.4
>>> # Plot x and y
>>> mlab.stem(x, y)
>>> # Plot x and a callable
>>> mlab.stem(x, lambda x: x**3 + x**2 + x + 6)
>>> # Plot y, using its indices for the x values
>>> mlab.stem(y)
"""
global _plt
_plt.kwargs.update(kwargs)
_plt.args = _plot_args(args)
_plot_data(kind='stem')
def _hist(x, nbins=0, weights=None):
x = np.array(x)
x_min = x.min()
x_max = x.max()
if nbins <= 1:
nbins = int(np.round(3.3 * np.log10(len(x)))) + 1
binned_x = np.array(np.floor((x - x_min) / (x_max - x_min) * nbins), dtype=int)
binned_x[binned_x == nbins] = nbins - 1
counts = np.bincount(binned_x, weights=weights)
edges = np.linspace(x_min, x_max, nbins + 1)
return counts, edges
@_close_gks_on_error
def histogram(x, num_bins=0, weights=None, **kwargs):
r"""
Draw a histogram.
If **num_bins** is 0, this function computes the number of
bins as :math:`\text{round}(3.3\cdot\log_{10}(n))+1` with n as the number
of elements in x, otherwise the given number of bins is used for the
histogram.
:param x: the values to draw as histogram
:param num_bins: the number of bins in the histogram
:param weights: weights for the x values
**Usage examples:**
>>> # Create example data
>>> x = np.random.uniform(-1, 1, 100)
>>> # Draw the histogram
>>> mlab.histogram(x)
>>> # Draw the histogram with 19 bins
>>> mlab.histogram(x, num_bins=19)
>>> # Draw the histogram with weights
>>> weights = np.ones_like(x)
>>> weights[x < 0] = 2.5
>>> mlab.histogram(x, weights=weights)
"""
global _plt
_plt.kwargs.update(kwargs)
hist, bins = _hist(x, nbins=num_bins, weights=weights)
_plt.args = [(np.array(bins), np.array(hist), None, None, "")]
_plot_data(kind='hist')
@_close_gks_on_error
def contour(*args, **kwargs):
"""
Draw a contour plot.
This function uses the current colormap to display a either a series of
points or a two-dimensional array as a contour plot. It can receive one
or more of the following:
- x values, y values and z values, or
- N x values, M y values and z values on a MxN grid, or
- N x values, M y values and a callable to determine z values
If a series of points is passed to this function, their values will be
interpolated on a grid. For grid points outside the convex hull of the
provided points, a value of 0 will be used.
:param args: the data to plot
:param levels: Number of contour lines
**Usage examples:**
>>> # Create example point data
>>> x = np.random.uniform(-4, 4, 100)
>>> y = np.random.uniform(-4, 4, 100)
>>> z = np.sin(x) + np.cos(y)
>>> # Draw the contour plot
>>> mlab.contour(x, y, z)
>>> # Create example grid data
>>> x = np.linspace(-2, 2, 40)
>>> y = np.linspace(0, np.pi, 20)
>>> z = np.sin(x[np.newaxis, :]) + np.cos(y[:, np.newaxis])
>>> # Draw the contour plot
>>> mlab.contour(x, y, z, levels=10)
>>> # Draw the contour plot using a callable
>>> mlab.contour(x, y, lambda x, y: np.sin(x) + np.cos(y))
"""
global _plt
_plt.kwargs.update(kwargs)
_plt.args = _plot_args(args, fmt='xyzc')
_plot_data(kind='contour')
@_close_gks_on_error
def contourf(*args, **kwargs):
"""
Draw a filled contour plot.
This function uses the current colormap to display a either a series of
points or a two-dimensional array as a filled contour plot. It can
receive one or more of the following:
- x values, y values and z values, or
- N x values, M y values and z values on a MxN grid, or
- N x values, M y values and a callable to determine z values
If a series of points is passed to this function, their values will be
interpolated on a grid. For grid points outside the convex hull of the
provided points, a value of 0 will be used.
:param args: the data to plot
:param levels: Number of contour lines
**Usage examples:**
>>> # Create example point data
>>> x = np.random.uniform(-4, 4, 100)
>>> y = np.random.uniform(-4, 4, 100)
>>> z = np.sin(x) + np.cos(y)
>>> # Draw the filled contour plot
>>> mlab.contourf(x, y, z)
>>> # Create example grid data
>>> x = np.linspace(-2, 2, 40)
>>> y = np.linspace(0, np.pi, 20)
>>> z = np.sin(x[np.newaxis, :]) + np.cos(y[:, np.newaxis])
>>> # Draw the filled contour plot
>>> mlab.contourf(x, y, z, levels=10)
>>> # Draw the filled contour plot using a callable
>>> mlab.contourf(x, y, lambda x, y: np.sin(x) + np.cos(y))
"""
global _plt
_plt.kwargs.update(kwargs)
_plt.args = _plot_args(args, fmt='xyzc')
_plot_data(kind='contourf')
@_close_gks_on_error
def hexbin(*args, **kwargs):
"""
Draw a hexagon binning plot.
This function uses hexagonal binning and the the current colormap to
display a series of points. It can receive one or more of the following:
- x values and y values, or
- x values and a callable to determine y values, or
- y values only, with their indices as x values
:param args: the data to plot
**Usage examples:**
>>> # Create example data
>>> x = np.random.normal(0, 1, 100000)
>>> y = np.random.normal(0, 1, 100000)
>>> # Draw the hexbin plot
>>> mlab.hexbin(x, y)
"""
global _plt
_plt.kwargs.update(kwargs)
_plt.args = _plot_args(args)
_plot_data(kind='hexbin')
@_close_gks_on_error
def heatmap(data, **kwargs):
"""
Draw a heatmap.
This function uses the current colormap to display a two-dimensional
array as a heatmap. The array is drawn with its first value in the upper
left corner, so in some cases it may be necessary to flip the columns
(see the example below).
By default the function will use the row and column indices for the x- and
y-axes, so setting the axis limits is recommended. Also note that the
values in the array must lie within the current z-axis limits so it may
be necessary to adjust these limits or clip the range of array values.
:param data: the heatmap data
**Usage examples:**
>>> # Create example data
>>> x = np.linspace(-2, 2, 40)
>>> y = np.linspace(0, np.pi, 20)
>>> z = np.sin(x[:, np.newaxis]) + np.cos(y[np.newaxis, :])
>>> # Draw the heatmap
>>> mlab.heatmap(z[::-1, :], xlim=(-2, 2), ylim=(0, np.pi))
"""
global _plt
data = np.array(data, copy=False)
if len(data.shape) != 2:
raise ValueError('expected 2-D array')
_plt.kwargs.update(kwargs)
xlim = _plt.kwargs.get('xlim', None)
ylim = _plt.kwargs.get('ylim', None)
_plt.args = [(xlim, ylim, data, None, "")]
_plot_data(kind='heatmap')
@_close_gks_on_error
def polar_heatmap(data, **kwargs):
"""
Draw a polar heatmap.
This function uses the current colormap to display a two-dimensional
array mapped to a disk using polar coordinates. The array is mapped
by interpreting the rows as the angle and the columns as the radius.
By default the function will use an inner radius of 0 and the number
of columns as the outer radius and draw a complete circle, so setting
the axis limits is recommended. Also note that the values in the array
must lie within the current z-axis limits so it may be necessary to
adjust these limits or clip the range of array values.
:param data: the heatmap data
**Usage examples:**
>>> # Create example data
>>> x = np.linspace(-2, 2, 100)
>>> y = np.linspace(0, np.pi, 200)
>>> z = np.sin(x[:, np.newaxis]) + np.cos(y[np.newaxis, :])
>>> # Draw the heatmap
>>> mlab.polar_heatmap(z, rlim=(1, 10), philim=(0, np.pi / 2))
"""
global _plt
data = np.array(data, copy=False)
if len(data.shape) != 2:
raise ValueError('expected 2-D array')
_plt.kwargs.update(kwargs)
rlim = _plt.kwargs.get('rlim', None)
philim = _plt.kwargs.get('philim', None)
_plt.args = [(rlim, philim, data, None, "")]
_plot_data(kind='polar_heatmap')
@_close_gks_on_error
def shade(*args, **kwargs):
"""
Draw a point or line based heatmap.
This function uses the current colormap to display a series of points or
polylines. For line data, NaN values can be used as separator.
:param args: the data to plot
:param xform: the transformation type used for color mapping
The available transformation types are:
+----------------+---+--------------------+
|XFORM_BOOLEAN | 0|boolean |
+----------------+---+--------------------+
|XFORM_LINEAR | 1|linear |
+----------------+---+--------------------+
|XFORM_LOG | 2|logarithmic |
+----------------+---+--------------------+
|XFORM_LOGLOG | 3|double logarithmic |
+----------------+---+--------------------+
|XFORM_CUBIC | 4|cubic |
+----------------+---+--------------------+
|XFORM_EQUALIZED | 5|histogram equalized |
+----------------+---+--------------------+
**Usage examples:**
>>> # Create point data
>>> x = np.random.normal(size=100000)
>>> y = np.random.normal(size=100000)
>>> mlab.shade(x, y)
>>> # Create line data with NaN as polyline separator
>>> x = np.concatenate((np.random.normal(size=10000), [np.nan], np.random.normal(loc=5, size=10000)))
>>> y = np.concatenate((np.random.normal(size=10000), [np.nan], np.random.normal(loc=5, size=10000)))
>>> mlab.shade(x, y)
"""
global _plt
_plt.kwargs.update(kwargs)
_plt.args = _plot_args(args, fmt='xys')
_plot_data(kind='shade')
@_close_gks_on_error
def wireframe(*args, **kwargs):
"""
Draw a three-dimensional wireframe plot.
This function uses the current colormap to display a either a series of
points or a two-dimensional array as a wireframe plot. It can receive one
or more of the following:
- x values, y values and z values, or
- N x values, M y values and z values on a MxN grid, or
- N x values, M y values and a callable to determine z values
If a series of points is passed to this function, their values will be
interpolated on a grid. For grid points outside the convex hull of the
provided points, a value of 0 will be used.
:param args: the data to plot
**Usage examples:**
>>> # Create example point data
>>> x = np.random.uniform(-4, 4, 100)
>>> y = np.random.uniform(-4, 4, 100)
>>> z = np.sin(x) + np.cos(y)
>>> # Draw the wireframe plot
>>> mlab.wireframe(x, y, z)
>>> # Create example grid data
>>> x = np.linspace(-2, 2, 40)
>>> y = np.linspace(0, np.pi, 20)
>>> z = np.sin(x[np.newaxis, :]) + np.cos(y[:, np.newaxis])
>>> # Draw the wireframe plot
>>> mlab.wireframe(x, y, z)
>>> # Draw the wireframe plot using a callable
>>> mlab.wireframe(x, y, lambda x, y: np.sin(x) + np.cos(y))
"""
global _plt
_plt.kwargs.update(kwargs)
_plt.args = _plot_args(args, fmt='xyzc')
_plot_data(kind='wireframe')
@_close_gks_on_error
def surface(*args, **kwargs):
"""
Draw a three-dimensional surface plot.
This function uses the current colormap to display a either a series of
points or a two-dimensional array as a surface plot. It can receive one or
more of the following:
- x values, y values and z values, or
- N x values, M y values and z values on a MxN grid, or
- N x values, M y values and a callable to determine z values
If a series of points is passed to this function, their values will be
interpolated on a grid. For grid points outside the convex hull of the
provided points, a value of 0 will be used.
:param args: the data to plot
**Usage examples:**
>>> # Create example point data
>>> x = np.random.uniform(-4, 4, 100)
>>> y = np.random.uniform(-4, 4, 100)
>>> z = np.sin(x) + np.cos(y)
>>> # Draw the surface plot
>>> mlab.surface(x, y, z)
>>> # Create example grid data
>>> x = np.linspace(-2, 2, 40)
>>> y = np.linspace(0, np.pi, 20)
>>> z = np.sin(x[np.newaxis, :]) + np.cos(y[:, np.newaxis])
>>> # Draw the surface plot
>>> mlab.surface(x, y, z)
>>> # Draw the surface plot using a callable
>>> mlab.surface(x, y, lambda x, y: np.sin(x) + np.cos(y))
"""
global _plt
_plt.kwargs.update(kwargs)
_plt.args = _plot_args(args, fmt='xyzc')
_plot_data(kind='surface')
@_close_gks_on_error
def bar(y, *args, **kwargs):
"""
Draw a two-dimensional bar plot.
This function uses a list of y values to create a two dimensional bar
plot. It can receive the following parameters:
- a list of y values, or
- a list of same length lists which contain y values (multiple bars)
- a list of colors
- one or more of the following key value pairs:
- a parameter bar_width
- a parameter edge_width
- a parameter bar_color (either an int or a list of rgb values)
- a parameter edge_color (either an int or a list of rgb values)
- a list of xnotations
- a list ind_bar_color, ind_edge_color which can be one color pack or
a list with multiple ones for individual bars (color pack: first
value: list with the indices or one index as an int, second value:
color (rgb list))
- a list ind_edge_width can be one width pack or a list with multiple
ones for individual bars (width pack: first value: list with the
indices or one index as an int, second value: width (float))
- a style parameter (only for multi-bar)
All parameters except for xnotations, ind_bar_color, ind_edge_color and
ind_edge_width will be saved for the next bar plot (set to None for
default)
:param y: the data to plot
:param args: the list of colors
**Usage examples:**
>>> # Create example data
>>> y = np.random.uniform(0, 10, 5)
>>> # Draw the bar plot
>>> mlab.bar(y)
>>> # Draw the bar plot with locations specified by x
>>> x = ['a', 'b', 'c', 'd', 'e']
>>> mlab.bar(y, xnotations=x)
>>> # Draw the bar plot with different bar_width, edge_width, edge_color and bar_color
>>> # (bar_width: float in (0;1], edge_width: float value bigger or equal to 0, color: int in [0;1255] or rgb list)
>>> mlab.bar(y, bar_width=0.6, edge_width=2, bar_color=999, edge_color=3)
>>> mlab.bar(y, bar_color=[0.66, 0.66, 0.66], edge_color=[0.33, 0.33, 0.33])
>>> # Draw the bar plot with bars that have individual bar_color, edge_color, edge_with
>>> mlab.bar(y, ind_bar_color=[1, [0.4, 0.4, 0.4]], ind_edge_color=[[1, 2], [0.66, 0.33, 0.33]], ind_edge_width=[[1, 2], 3])
>>> # Draw the bar plot with bars that have multiple individual colors
>>> mlab.bar(y, ind_bar_color=[[1, [1, 0.3, 0]], [[2, 3], [1, 0.5, 0]]])
>>> # Draw the bar plot with default bar_width, edge_width, edge_color and bar_color
>>> mlab.bar(y, bar_width=None, edge_width=None, bar_color=None, edge_color=None)
>>> # Draw the bar plot with colorlist
>>> mlab.bar(y, [989, 982, 980, 981, 996])
>>> # Create example sublist data
>>> yy = np.random.rand(3, 3)
>>> # Draw the bar plot lined / stacked / with colorlist
>>> mlab.bar(yy, bar_style='lined')
>>> mlab.bar(yy, bar_style='stacked')
>>> mlab.bar(yy, [989, 998, 994])
"""
global _plt
_plt.kwargs.update(kwargs)
if 'bar_style' not in _plt.kwargs:
_plt.kwargs['bar_style'] = 'stacked'
if isinstance(y[0], list) or (isinstance(y, np.ndarray) and len(y.shape) == 2):
if isinstance(y, np.ndarray) and len(y.shape) > 2:
raise IndexError('Numpy array has to be of dimension 2 or lower!')
_plt.kwargs['multi_bar'] = True
if _plt.kwargs['bar_style'] == 'lined':
new_arg = []
for ls in y:
new_arg.append(max(ls))
else:
new_arg = []
for ls in y:
new_arg.append(sum(ls))
new_args = [new_arg, y]
else:
_plt.kwargs['multi_bar'] = False
new_args = [y, None]
if args:
c = args[0]
if not isinstance(c, list):
raise TypeError('C has to be of type list!')
if _plt.kwargs['multi_bar']:
if len(c) != len(y[0]):
raise IndexError('The length of c has to equal the length of the sublists when using a multi-bar!')
else:
if len(c) != len(y):
raise IndexError('The length of c has to equal the amount of y-values!')
for color in c:
if isinstance(color, int):
if color < 0:
raise ValueError('The values in c have to be bigger or equal to 0!')
if isinstance(color, list):
if len(color) != 3:
raise IndexError("RGB list has to contain 3 values!")
rgb = color
for v in rgb:
if v < 0 or v > 1:
raise ValueError('The values of a rgb color have to be in [1;0]!')
new_args.append(c)
_plt.args = _plot_args(new_args, fmt='y')
_plot_data(kind='bar')
def polar_histogram(*args, **kwargs):
"""
Draw a polar histogram plot.
This function uses certain input values to display a polar histogram.
It must receive only one of the following:
- theta: a list containing angles between 0 and 2 * pi
- bin_counts: a list containing integer values for the height of bins
It can also receive:
- normalization: type of normalization in which the histogram will be displayed
+ count: the default normalization. The height of each bar is the number of observations in each bin.
+ probability: The height of each bar is the relative number of observations,
(number of observations in bin/total number of observations).
The sum of the bar heights is 1.
+ countdensity: The height of each bar is the number of observations in bin/width of bin.
+ pdf: Probability density function estimate. The height of each bar is,
(number of observations in the bin)/(total number of observations * width of bin).
The area of each bar is the relative number of observations. The sum of the bar areas is 1
+ cumcount: The height of each bar is the cumulative number of observations in each bin and all previous bins.
The height of the last bar is numel (theta).
+ cdf: Cumulative density function estimate. The height of each bar is equal to the cumulative relative
number of observations in the bin and all previous bins. The height of the last bar is 1.
- stairs: Boolean, display the histogram outline only
- face_alpha: float value between 0 and 1 inclusive. Transparency of histogram bars. A value of 1 means fully opaque
and 0 means completely transparent (invisible). Default is 0.75
- face_color: Histogram bar color. Either an integer between 0 and 1008 or a list with three floats between 0 and 1
representing an RGB Triplet
- edge_color: Histogram bar edgecolor. Either an integer between 0 and 1008 or a list with three floats
between 0 and 1 representing an RGB Triplet
- num_bins: Number of bins specified as a positive integer. If no bins are given, the polarhistogram will automatically
calculate the number of bins
- philim: a tuple containing two angle values [min, max]. This option plots a histogram using the input values
(theta) that fall between min and max inclusive.
- rlim: a tuple containing two values between 0 and 1 (min, max). This option plots a histogram with bins starting at
min and ending at max
- bin_edges: a list of angles between 0 and 2 * pi which specify the Edges of each bin/bar.
NOT COMPATIBLE with bin_limits. When used with bin_counts: len(bin_edges) == len(bin_counts) + 1
- bin_width: Width across the top of bins, specified as a float less than 2 * pi
- colormap: A triple tuple used to create a Colormap. (x, y, size). x = index for the first Colormap. Y for the second.
size for the size of the colormap. each component must be given! At least (None, None, None)
- draw_edges: Boolean, draws the edges of each bin when using a colormap. Only works with a colormap
:param args: a list
**Usage examples:**
>>> # create a theta list
>>> theta = [0, 0.6, 1, 1.3, np.pi/2, np.pi, np.pi*2, np.pi*2]
>>> polar_histogram(theta)
>>> # create bin_counts
>>> bc = [1, 2, 6, 9]
>>> polar_histogram(theta, bin_counts=bc)
>>> #create bin_edges
>>> be = [1, 2, np.pi*1.3, np.pi]
>>> polar_histogram(theta, bin_edges=be)
>>> polar_histogram(theta, normalization='cdf')
>>> polar_histogram(theta, colormap=(0,0,1000))
"""
global _plt
if _plt.kwargs.get('ax', False) is False:
temp_ax = False
else:
temp_ax = True
_plt.kwargs['ax'] = True
_plt.args = _plot_args(args, fmt='xys')
def find_max(classes, normalization):
if normalization == 'cumcount':
sum = 0
for x in range(len(classes)):
sum += len(classes[x])
return sum
else:
max = len(classes[0])
for x in range(len(classes)):
if max < len(classes[x]):
max = len(classes[x])
return max
_plt.kwargs.update(kwargs)
classes = []
gr.setlinecolorind(1)
angles = []
# Color Map
if _plt.kwargs.get('colormap') is not None:
is_colormap = True
colormap = _plt.kwargs['colormap']
if isinstance(colormap, tuple):
colormap = _create_colormap(colormap)
else:
is_colormap = False
if _plt.kwargs.get('draw_edges') is True and not is_colormap:
raise ValueError('draw_edges only usable with a colormap')
# bin_edges
if _plt.kwargs.get('bin_edges', None) is not None:
binedges = _plt.kwargs['bin_edges']
is_binedges = True
else:
is_binedges = False
# normalization
if _plt.kwargs.get('normalization') is not None:
normalization = _plt.kwargs['normalization']
else:
normalization = 'count'
_plt.kwargs['normalization'] = 'count'
# Bincounts or theta
if _plt.kwargs.get('theta') is not None:
theta = _plt.kwargs['theta']
else:
theta = args[0]
# theta check
if len(theta) == 0:
raise ValueError('List is empty')
for obj in theta:
if isinstance(obj, float):
has_theta = True
break
elif isinstance(obj, int):
has_theta = False
if not has_theta:
if is_binedges:
if len(binedges) is not len(theta) + 1:
raise ValueError('Number bin_edges must be number of Bincounts + 1')
for x in range(len(theta)):
classes.append([])
if theta[x] == 0:
classes[x].append(None)
continue
for y in range(theta[x]):
classes[x].append(y)
num_bins = len(theta)
width = 2 * np.pi / num_bins
if is_colormap:
if not is_binedges:
angles = np.linspace(0, np.pi * 2, num_bins + 1)
# Philim for bincounts
if _plt.kwargs.get('philim', None) is not None:
binlimits = _plt.kwargs['philim']
if binlimits[0] is None:
binlimits = (0, binlimits[1])
if binlimits[1] is None:
binlimits = (binlimits[0], 2 * np.pi)
if not is_binedges:
binedges = np.linspace(binlimits[0], binlimits[1], num_bins + 1)
is_binedges = True
_plt.kwargs['temp_bin_edges'] = binedges
else:
binedges = [angle for angle in binedges if (binlimits[0] <= angle <= binlimits[1])]
if len(binedges) != num_bins + 1:
raise ValueError('The given binedges is not compatible with philim since the number of angles,'
' which are in between the philims, does not equal len(bincounts) + 1 ')
is_binedges = True
_plt.kwargs['temp_bin_edges'] = binedges
# No bin_counts
else:
# Number of bins
if _plt.kwargs.get('num_bins', None) is not None:
num_bins = _plt.kwargs['num_bins']
if num_bins < 1:
raise ValueError('Number of num_bins must be 1 or larger')
else:
# Auto generated num_bins
num_bins = min(int(len(theta) / 2 - 1), 200)
if is_binedges:
num_bins = len(binedges) - 1
start = 0
# Bin Width --> will overwrite the number of bins but it will not exceed 200
if _plt.kwargs.get('bin_width') is not None:
width = _plt.kwargs['bin_width']
if not width < np.pi * 2 or not width > 0:
raise ValueError('bin_width not correct! Must be between 0 and 2*pi')
num_bins = int(2 * np.pi / width)
if num_bins > 200:
num_bins = 200
width = 2 * np.pi / num_bins
else:
width = 2 * np.pi / num_bins
# Philim
if _plt.kwargs.get('philim', None) is not None:
is_binlimits = True
binlimits = _plt.kwargs['philim']
if binlimits[0] is None:
binlimits = (0, binlimits[1])
if binlimits[1] is None:
binlimits = (binlimits[0], 2 * np.pi)
if not is_binedges:
if _plt.kwargs.get('num_bins', None) is None:
num_bins = max(int(num_bins * (binlimits[1] - binlimits[0]) / (np.pi * 2)), 3)
binedges = np.linspace(binlimits[0], binlimits[1], num_bins + 1)
is_binedges = True
_plt.kwargs['temp_bin_edges'] = binedges
else:
binedges = [angle for angle in binedges if (binlimits[0] <= angle <= binlimits[1])]
is_binedges = True
_plt.kwargs['temp_bin_edges'] = binedges
num_bins = len(binedges) - 1
else:
is_binlimits = False
# grouping the data from given theta
if is_binedges:
for x in range(len(binedges)):
start = binedges[x]
classes.append([])
for y in range(len(theta)):
try:
if start < theta[y] <= binedges[x + 1]:
if is_binlimits:
if binlimits[0] <= theta[y] <= binlimits[1]:
classes[x].append(theta[y])
else:
classes[x].append(theta[y])
except Exception:
pass
if len(classes[x]) == 0:
classes[x].append(None)
# no Binedges
else:
# Optional Bin Limits
if is_binlimits:
theta = [angle for angle in theta if binlimits[0] <= angle <= binlimits[1]]
interval = 2 * np.pi / num_bins
for x in range(num_bins):
classes.append([])
for y in range(len(theta)):
if start <= theta[y] < (start + interval):
classes[x].append(theta[y])
if is_colormap:
# angles list for colormap
angles.append([start, start + interval])
if len(classes[x]) == 0:
classes[x].append(None)
start += interval
_plt.kwargs['classes'] = classes
# calc total
total = 0
for temp in classes:
total += len(temp) - temp.count(None)
if is_binedges:
if normalization == 'pdf':
norm_factor = total
elif normalization == 'countdensity':
norm_factor = 1
binwidths = []
classes = [temp for temp in classes if len(temp) > 0]
for i in range(len(binedges) - 1):
binwidths.append(binedges[i + 1] - binedges[i])
binwidths.append(binedges[-1] - binedges[-2])
if normalization == 'countdensity' or normalization == 'pdf':
bin_value = [len(x) / (norm_factor * binwidths[i]) for (i, x) in enumerate(classes)]
exp = 0
if normalization == 'probability' or normalization == 'pdf':
if normalization == 'probability':
maximum = find_max(classes, normalization) / total
elif normalization == 'pdf':
if is_binedges:
maximum = max(bin_value)
else:
maximum = find_max(classes, normalization) / (total * width)
exp = round(np.log10(maximum) + 0.5)
border = round((maximum * (10 ** (abs(exp) + 1))) / (10 ** (round(abs(exp) + 1))))
while border < maximum or border * 10 ** (abs(exp) + 2) % 4 != 0:
border += 10 ** (exp - 1)
elif normalization == 'cdf':
border = 1
else:
# interval for x-Axis
if is_binedges:
if normalization == 'countdensity':
maximum = round(max(bin_value) + 0.49)
else:
maximum = int(find_max(classes, normalization))
else:
if normalization == 'countdensity':
maximum = int(find_max(classes, normalization) / width)
else:
maximum = int(find_max(classes, normalization))
border = maximum
outer = True
e = 1
while True and outer:
if border <= 40:
while border % 4 != 0:
border += 1
break
else:
for x in range(9):
border += x * 10 ** (e - 1)
if border % 4 == 0:
outer = False
break
e += 1
# calc colormap image
if normalization == 'probability':
norm_factor = total
elif normalization == 'countdensity':
norm_factor = width
elif normalization == 'pdf':
norm_factor = total * width
elif normalization == 'cdf':
norm_factor = total
elif normalization == 'count':
norm_factor = 1
elif normalization == 'cumcount':
norm_factor = 1
else:
raise ValueError("Incorrect normalization Value")
_plt.kwargs['norm_factor'] = norm_factor
if is_colormap:
# r_lim
if _plt.kwargs.get('rlim', None) is not None:
if not has_theta:
pass
r_lim = _plt.kwargs['rlim']
if r_lim[0] is None:
r_lim = (0, r_lim[1])
if r_lim[1] is None:
r_lim = (r_lim[0], 1)
else:
r_lim = (0, 1)
height = 2000
width = height
center = width / 2
cumulative = []
length = 0
max_radius = center * 0.8
r_min = r_lim[0] * max_radius
r_max = r_lim[1] * max_radius
del max_radius
if normalization == 'cumcount' or normalization == 'cdf':
for temp in classes:
if temp[0] is None:
if len(cumulative) > 0:
cumulative.append(cumulative[-1])
else:
cumulative.append(0)
else:
length += len(temp) / norm_factor / border * 0.8 * center
cumulative.append(length)
factor_angle_b = len(colormap[0]) / (2 * np.pi)
Y, X = np.ogrid[:height, :width]
radiusc = np.sqrt((X - center) ** 2 + (Y - center) ** 2)
angle_c = np.arctan2(Y - center, X - center)
angle_c[angle_c < 0] += 2 * np.pi
angle_b = angle_c * factor_angle_b
factor_radiusb2 = (len(colormap) - 1) / (center * 2 ** (1 / 2))
radiusb2 = radiusc * factor_radiusb2
colormap = np.array(colormap)
lineardata = colormap[radiusb2.astype(np.int), angle_b.astype(np.int)].flatten()
if is_binedges:
if normalization == 'pdf':
norm_factor = total
if normalization == 'countdensity':
norm_factor = 1
binwidths = []
for i in range(len(classes)):
if len(classes[i]) < 1:
classes.pop(i)
for i in range(len(binedges) - 1):
binwidths.append(binedges[i + 1] - binedges[i])
binwidths.append(binedges[-1] - binedges[-2])
if normalization == 'countdensity' or normalization == 'pdf':
bin_value = np.array(
[len(x) / (norm_factor * binwidths[i]) / border * 0.8 * center if x[0] is not None else 0
for (i, x) in enumerate(classes)])
else:
bin_value = np.array([len(x) / norm_factor / border * 0.8 * center if x[0] is not None else 0
for x in classes])
# no bin_edges
else:
bin_value = np.array([len(x) / norm_factor / border * 0.8 * center if x[0] is not None else 0
for x in classes])
if not is_binedges:
if normalization == 'cdf' or normalization == 'cumcount':
boolmap = np.zeros((height, width))
for (angle1, angle2), radius in zip(angles, cumulative):
tempmap = angle1 <= angle_c
tempmap = np.logical_and(angle_c <= angle2, tempmap)
tempmap = np.logical_and(radiusc <= radius, tempmap)
tempmap = np.logical_and(radiusc <= r_max, tempmap)
tempmap = np.logical_and(r_min <= radiusc, tempmap)
boolmap = np.logical_or(tempmap, boolmap)
else:
boolmap = np.zeros((height, width))
for (angle1, angle2), radius in zip(angles, bin_value):
tempmap = angle1 <= angle_c
tempmap = np.logical_and(angle_c <= angle2, tempmap)
tempmap = np.logical_and(radiusc <= radius, tempmap)
tempmap = np.logical_and(radiusc <= r_max, tempmap)
tempmap = np.logical_and(r_min <= radiusc, tempmap)
boolmap = np.logical_or(tempmap, boolmap)
# Binedges
else:
angles = []
for i in range(len(binedges) - 1):
angles.append([])
angles[i].append(binedges[i])
angles[i].append(binedges[i + 1])
if normalization == 'cdf' or normalization == 'cumcount':
boolmap = np.zeros((height, width))
for (angle1, angle2), radius in zip(angles, cumulative):
tempmap = angle1 <= angle_c
tempmap = np.logical_and(angle_c <= angle2, tempmap)
tempmap = np.logical_and(radiusc <= radius, tempmap)
tempmap = np.logical_and(radiusc <= r_max, tempmap)
tempmap = np.logical_and(r_min <= radiusc, tempmap)
boolmap = np.logical_or(tempmap, boolmap)
else:
boolmap = np.zeros((height, width))
for (angle1, angle2), radius in zip(angles, bin_value):
tempmap = angle1 <= angle_c
tempmap = np.logical_and(angle_c <= angle2, tempmap)
tempmap = np.logical_and(radiusc <= radius, tempmap)
tempmap = np.logical_and(radiusc <= r_max, tempmap)
tempmap = np.logical_and(r_min <= radiusc, tempmap)
boolmap = np.logical_or(tempmap, boolmap)
lineardata[np.logical_not(boolmap.flatten())] = 0
_plt.kwargs['temp_colormap'] = (height, width, lineardata)
_plt.kwargs['border_exp'] = (border, exp)
if is_binedges:
if _plt.kwargs['normalization'] == 'pdf':
_plt.kwargs['norm_factor'] = total
_plot_data(kind='polar_histogram')
_plt.kwargs['ax'] = temp_ax
del temp_ax
@_close_gks_on_error
def plot3(*args, **kwargs):
"""
Draw one or more three-dimensional line plots.
:param x: the x coordinates to plot
:param y: the y coordinates to plot
:param z: the z coordinates to plot
**Usage examples:**
>>> # Create example data
>>> x = np.linspace(0, 30, 1000)
>>> y = np.cos(x) * x
>>> z = np.sin(x) * x
>>> # Plot the points
>>> mlab.plot3(x, y, z)
"""
global _plt
_plt.kwargs.update(kwargs)
_plt.args = _plot_args(args, fmt='xyac')
_plot_data(kind='plot3')
@_close_gks_on_error
def scatter3(x, y, z, c=None, *args, **kwargs):
"""
Draw one or more three-dimensional scatter plots.
Additional to x, y and z values, you can provide values for the markers'
color. Color values will be used in combination with the current colormap.
:param x: the x coordinates to plot
:param y: the y coordinates to plot
:param z: the z coordinates to plot
:param c: the optional color values to plot
**Usage examples:**
>>> # Create example data
>>> x = np.random.uniform(-1, 1, 100)
>>> y = np.random.uniform(-1, 1, 100)
>>> z = np.random.uniform(-1, 1, 100)
>>> c = np.random.uniform(1, 1000, 100)
>>> # Plot the points
>>> mlab.scatter3(x, y, z)
>>> # Plot the points with colors
>>> mlab.scatter3(x, y, z, c)
"""
global _plt
_plt.kwargs.update(kwargs)
args = [x, y, z] + list(args)
if c is not None:
args.append(c)
_plt.args = _plot_args(args, fmt='xyac')
_plot_data(kind='scatter3')
@_close_gks_on_error
def isosurface(v, **kwargs):
"""
Draw an isosurface.
This function can draw an isosurface from a three-dimensional numpy array.
Values greater than the isovalue will be seen as outside the isosurface,
while values less than the isovalue will be seen as inside the isosurface.
:param v: the volume data
:param isovalue: the isovalue
**Usage examples:**
>>> # Create example data
>>> x = np.linspace(-1, 1, 40)[:, np.newaxis, np.newaxis]
>>> y = np.linspace(-1, 1, 40)[np.newaxis, :, np.newaxis]
>>> z = np.linspace(-1, 1, 40)[np.newaxis, np.newaxis, :]
>>> v = 1-(x**2 + y**2 + z**2)**0.5
>>> # Draw the isosurace.
>>> mlab.isosurface(v, isovalue=0.2)
"""
global _plt
_plt.kwargs.update(kwargs)
_plt.args = [(None, None, v, None, '')]
_plot_data(kind='isosurface')
@_close_gks_on_error
def volume(v, **kwargs):
"""
Draw a volume.
This function can draw a three-dimensional numpy array using volume rendering.
The volume data is reduced to a two-dimensional image using an emission or
absorption model or by a maximum intensity projection. After the projection
the current colormap is applied to the result.
:param v: the volume data
:param algorithm: the algorithm used to reduce the volume data.
Available algorithms are "maximum", "emission" and "absorption".
:param dmin: the minimum data value when applying the colormap
:param dmax: the maximum data value when applying the colormap
**Usage examples:**
>>> # Create example data
>>> x = np.linspace(-1, 1, 40)[:, np.newaxis, np.newaxis]
>>> y = np.linspace(-1, 1, 40)[np.newaxis, :, np.newaxis]
>>> z = np.linspace(-1, 1, 40)[np.newaxis, np.newaxis, :]
>>> v = 1 - (x**2 + y**2 + z**2)**0.5 - np.random.uniform(0, 0.25, (40, 40, 40))
>>> # Draw the 3d volume data
>>> mlab.volume(v)
>>> # Draw the 3d volume data using an emission model
>>> mlab.volume(v, algorithm='emission', dmin=0.1, dmax=0.4)
"""
global _plt
_plt.kwargs.update(kwargs)
nz, ny, nx = v.shape
_plt.args = [(np.arange(nx + 1), np.arange(nz + 1), np.arange(ny + 1), v, '')]
_plot_data(kind='volume')
@_close_gks_on_error
def imshow(image, **kwargs):
"""
Draw an image.
This function can draw an image either from reading a file or using a
two-dimensional array and the current colormap.
:param image: an image file name or two-dimensional array
**Usage examples:**
>>> # Create example data
>>> x = np.linspace(-2, 2, 40)
>>> y = np.linspace(0, np.pi, 20)
>>> z = np.sin(x[np.newaxis, :]) + np.cos(y[:, np.newaxis])
>>> # Draw an image from a 2d array
>>> mlab.imshow(z)
>>> # Draw an image from a file
>>> mlab.imshow("example.png")
"""
global _plt
_plt.kwargs.update(kwargs)
_plt.args = [(None, None, image, None, "")]
_plot_data(kind='imshow')
@_close_gks_on_error
def size(*size):
"""
Set the size of the output window.
The size can be passed in several formats, allowing both pixel sizes and
metric sizes, as well as mixes of those. If nothing is passed to this
function, the default of 600 by 450 pixels is restored.
:param size: aseries of values or a tuple containing:
- two numbers (width and height as pixels), or
- two strings (width and height as another unit), or
- two numbers and a string (width, height and their unit), or
- a number, a string, another number and another string (width and
height, along with their units)
**Usage examples:**
>>> # Set the size to 640 by 480 pixels
>>> mlab.size(600, 450)
>>> # Set the size to 640 by 480 pixels using a tuple
>>> mlab.size((600, 450))
>>> # Reset size to its default of 600 by 450 pixels
>>> mlab.size()
>>> # Set the size to 6 centimeters by 200 pixels
>>> mlab.size("6cm", 200)
>>> # Set the size to 120 millimeters by 9 centimeters
>>> mlab.size("120mm", "9cm")
>>> # Set the size to 4 by 3 inches
>>> mlab.size(4, 3, "in")
>>> # Set the size to 0.5 feet by 0.1 meters
>>> mlab.size(0.5, "ft", 0.1, "m")
"""
if len(size) == 1 and hasattr(size[0], '__getitem__'):
size = size[0]
if len(size) == 0:
size = (600, 450)
_plot_data(size=size)
@_close_gks_on_error
def title(title=""):
"""
Set the plot title.
The plot title is drawn using the extended text function
:py:func:`gr.textext`. You can use a subset of LaTeX math syntax, but will
need to escape certain characters, e.g. parentheses. For more information
see the documentation of :py:func:`gr.textext`.
:param title: the plot title
**Usage examples:**
>>> # Set the plot title to "Example Plot"
>>> mlab.title("Example Plot")
>>> # Clear the plot title
>>> mlab.title()
"""
_plot_data(title=title)
@_close_gks_on_error
def xlabel(x_label=""):
"""
Set the x-axis label.
The axis labels are drawn using the extended text function
:py:func:`gr.textext`. You can use a subset of LaTeX math syntax, but will
need to escape certain characters, e.g. parentheses. For more information
see the documentation of :py:func:`gr.textext`.
:param x_label: the x-axis label
**Usage examples:**
>>> # Set the x-axis label to "x"
>>> mlab.xlabel("x")
>>> # Clear the x-axis label
>>> mlab.xlabel()
"""
_plot_data(xlabel=x_label)
@_close_gks_on_error
def ylabel(y_label=""):
r"""
Set the y-axis label.
The axis labels are drawn using the extended text function
:py:func:`gr.textext`. You can use a subset of LaTeX math syntax, but will
need to escape certain characters, e.g. parentheses. For more information
see the documentation of :py:func:`gr.textext`.
:param y_label: the y-axis label
**Usage examples:**
>>> # Set the y-axis label to "y\(x\)"
>>> mlab.ylabel("y\(x\)")
>>> # Clear the y-axis label
>>> mlab.ylabel()
"""
_plot_data(ylabel=y_label)
@_close_gks_on_error
def zlabel(z_label=""):
r"""
Set the z-axis label.
The axis labels are drawn using the extended text function
:py:func:`gr.textext`. You can use a subset of LaTeX math syntax, but will
need to escape certain characters, e.g. parentheses. For more information
see the documentation of :py:func:`gr.textext`.
:param z_label: the z-axis label
**Usage examples:**
>>> # Set the z-axis label to "z(x, y)"
>>> mlab.zlabel("z\(x, y\)")
>>> # Clear the z-axis label
>>> mlab.zlabel()
"""
_plot_data(zlabel=z_label)
@_close_gks_on_error
def dlabel(d_label=""):
r"""
Set the volume intensity label.
This label is drawn using the extended text function
:py:func:`gr.textext`. You can use a subset of LaTeX math syntax, but will
need to escape certain characters, e.g. parentheses. For more information
see the documentation of :py:func:`gr.textext`.
:param d_label: the volume intensity label
**Usage examples:**
>>> # Set the volume intensity label to "Intensity"
>>> mlab.dlabel("Intensity")
>>> # Clear the volume intensity label
>>> mlab.dlabel()
"""
_plot_data(dlabel=d_label)
@_close_gks_on_error
def xlim(x_min=None, x_max=None, adjust=True):
"""
Set the limits for the x-axis.
The x-axis limits can either be passed as individual arguments or as a
tuple of (**x_min**, **x_max**). Setting either limit to **None** will
cause it to be automatically determined based on the data, which is the
default behavior.
:param x_min:
- the x-axis lower limit, or
- **None** to use an automatic lower limit, or
- a tuple of both x-axis limits
:param x_max:
- the x-axis upper limit, or
- **None** to use an automatic upper limit, or
- **None** if both x-axis limits were passed as first argument
:param adjust: whether or not the limits may be adjusted
**Usage examples:**
>>> # Set the x-axis limits to -1 and 1
>>> mlab.xlim(-1, 1)
>>> # Set the x-axis limits to -1 and 1 using a tuple
>>> mlab.xlim((-1, 1))
>>> # Reset the x-axis limits to be determined automatically
>>> mlab.xlim()
>>> # Reset the x-axis upper limit and set the lower limit to 0
>>> mlab.xlim(0, None)
>>> # Reset the x-axis lower limit and set the upper limit to 1
>>> mlab.xlim(None, 1)
"""
if x_max is None and x_min is not None:
try:
x_min, x_max = x_min
except TypeError:
pass
_plot_data(xlim=(x_min, x_max), adjust_xlim=adjust)
@_close_gks_on_error
def ylim(y_min=None, y_max=None, adjust=True):
"""
Set the limits for the y-axis.
The y-axis limits can either be passed as individual arguments or as a
tuple of (**y_min**, **y_max**). Setting either limit to **None** will
cause it to be automatically determined based on the data, which is the
default behavior.
:param y_min:
- the y-axis lower limit, or
- **None** to use an automatic lower limit, or
- a tuple of both y-axis limits
:param y_max:
- the y-axis upper limit, or
- **None** to use an automatic upper limit, or
- **None** if both y-axis limits were passed as first argument
:param adjust: whether or not the limits may be adjusted
**Usage examples:**
>>> # Set the y-axis limits to -1 and 1
>>> mlab.ylim(-1, 1)
>>> # Set the y-axis limits to -1 and 1 using a tuple
>>> mlab.ylim((-1, 1))
>>> # Reset the y-axis limits to be determined automatically
>>> mlab.ylim()
>>> # Reset the y-axis upper limit and set the lower limit to 0
>>> mlab.ylim(0, None)
>>> # Reset the y-axis lower limit and set the upper limit to 1
>>> mlab.ylim(None, 1)
"""
if y_max is None and y_min is not None:
try:
y_min, y_max = y_min
except TypeError:
pass
_plot_data(ylim=(y_min, y_max), adjust_ylim=adjust)
@_close_gks_on_error
def zlim(z_min=None, z_max=None, adjust=True):
"""
Set the limits for the z-axis.
The z-axis limits can either be passed as individual arguments or as a
tuple of (**z_min**, **z_max**). Setting either limit to **None** will
cause it to be automatically determined based on the data, which is the
default behavior.
:param z_min:
- the z-axis lower limit, or
- **None** to use an automatic lower limit, or
- a tuple of both z-axis limits
:param z_max:
- the z-axis upper limit, or
- **None** to use an automatic upper limit, or
- **None** if both z-axis limits were passed as first argument
:param adjust: whether or not the limits may be adjusted
**Usage examples:**
>>> # Set the z-axis limits to -1 and 1
>>> mlab.zlim(-1, 1)
>>> # Set the z-axis limits to -1 and 1 using a tuple
>>> mlab.zlim((-1, 1))
>>> # Reset the z-axis limits to be determined automatically
>>> mlab.zlim()
>>> # Reset the z-axis upper limit and set the lower limit to 0
>>> mlab.zlim(0, None)
>>> # Reset the z-axis lower limit and set the upper limit to 1
>>> mlab.zlim(None, 1)
"""
if z_max is None and z_min is not None:
try:
z_min, z_max = z_min
except TypeError:
pass
_plot_data(zlim=(z_min, z_max), adjust_zlim=adjust)
@_close_gks_on_error
def rlim(r_min=None, r_max=None, adjust=True):
"""
Set the limits for the radii in polar plots.
The inner and outer radius can either be passed as individual arguments
or as a tuple of (**r_min**, **r_max**). Setting either limit to **None**
will cause it to be automatically determined based on the data, which is the
default behavior. **r_min** and **r_max** must both be greater or equal 0.
:param r_min:
- the inner radius, or
- **None** to use an automatic inner radius, or
- a tuple of both radii
:param r_max:
- the outer radius, or
- **None** to use an automatic outer radius, or
- **None** if both radii were passed as first argument
:param adjust: whether or not the radii may be adjusted
**Usage examples:**
>>> # Set the radii in polar plots to 0 and 1
>>> mlab.rlim(0, 1)
>>> # Set the radii in polar plots to 0 and 1 using a tuple
>>> mlab.rlim((0, 1))
>>> # Reset the radii to be determined automatically
>>> mlab.rlim()
>>> # Reset the outer radius and set the inner radius to 0
>>> mlab.rlim(0, None)
>>> # Reset the inner radius and set the outer radius to 1
>>> mlab.rlim(None, 1)
"""
if r_max is None and r_min is not None:
try:
r_min, r_max = r_min
except TypeError:
pass
_plot_data(rlim=(r_min, r_max), adjust_rlim=adjust)
@_close_gks_on_error
def philim(phi_min=None, phi_max=None, adjust=True):
"""
Set the start and end angle for polar plots in radians.
The angles can either be passed as individual arguments or as a tuple
of (**phi_min**, **phi_max**). Setting either angle to **None** will
cause it to be automatically determined based on the data, which is the
default behavior.
:param phi_min:
- the start angle in radians, or
- **None** to use an automatic start angle, or
- a tuple of both angles
:param phi_max:
- the end angle in radians, or
- **None** to use an automatic end angle, or
- **None** if both angles were passed as first argument
:param adjust: whether or not the angles may be adjusted
**Usage examples:**
>>> # Set the angle limits to 0 and 2 Pi
>>> mlab.philim(0, 2*np.pi)
>>> # Set the angle limits to 0 and 2 Pi using a tuple
>>> mlab.philim((0, 2*np.pi))
>>> # Reset the angle limits to be determined automatically
>>> mlab.philim()
>>> # Reset the end angle and set the start angle to 0
>>> mlab.philim(0, None)
>>> # Reset the start angle and set end angle to 2 Pi
>>> mlab.philim(None, 2*np.pi)
"""
if phi_max is None and phi_min is not None:
try:
phi_min, phi_max = phi_min
except TypeError:
pass
_plot_data(philim=(phi_min, phi_max), adjust_philim=adjust)
@_close_gks_on_error
def xlog(xlog=True):
"""
Enable or disable a logarithmic scale for the x-axis.
:param xlog: whether or not the x-axis should be logarithmic
**Usage examples:**
>>> # Enable a logarithic x-axis
>>> mlab.xlog()
>>> # Disable it again
>>> mlab.xlog(False)
"""
_plot_data(xlog=xlog)
@_close_gks_on_error
def ylog(ylog=True):
"""
Enable or disable a logarithmic scale for the y-axis.
:param ylog: whether or not the y-axis should be logarithmic
**Usage examples:**
>>> # Enable a logarithic y-axis
>>> mlab.ylog()
>>> # Disable it again
>>> mlab.ylog(False)
"""
_plot_data(ylog=ylog)
@_close_gks_on_error
def zlog(zlog=True):
"""
Enable or disable a logarithmic scale for the z-axis.
:param zlog: whether or not the z-axis should be logarithmic
**Usage examples:**
>>> # Enable a logarithic z-axis
>>> mlab.zlog()
>>> # Disable it again
>>> mlab.zlog(False)
"""
_plot_data(zlog=zlog)
@_close_gks_on_error
def xflip(xflip=True):
"""
Enable or disable x-axis flipping/reversal.
:param xflip: whether or not the x-axis should be flipped
**Usage examples:**
>>> # Flips/Reverses the x-axis
>>> mlab.xflip()
>>> # Restores the x-axis
>>> mlab.xflip(False)
"""
_plot_data(xflip=xflip)
@_close_gks_on_error
def yflip(yflip=True):
"""
Enable or disable y-axis flipping/reversal.
:param yflip: whether or not the y-axis should be flipped
**Usage examples:**
>>> # Flips/Reverses the y-axis
>>> mlab.yflip()
>>> # Restores the y-axis
>>> mlab.yflip(False)
"""
_plot_data(yflip=yflip)
@_close_gks_on_error
def zflip(zflip=True):
"""
Enable or disable z-axis flipping/reversal.
:param zflip: whether or not the z-axis should be flipped
**Usage examples:**
>>> # Flips/Reverses the z-axis
>>> mlab.zflip()
>>> # Restores the z-axis
>>> mlab.zflip(False)
"""
_plot_data(zflip=zflip)
@_close_gks_on_error
def rflip(rflip=True):
"""
Enable or disable flipping/reversal of the radius.
:param rflip: whether or not the inner and outer radius should be flipped
**Usage examples:**
>>> # Flips/Reverses the inner and outer radius
>>> mlab.rflip()
>>> # Restores the radius
>>> mlab.rflip(False)
"""
_plot_data(rflip=rflip)
@_close_gks_on_error
def phiflip(phiflip=True):
"""
Enable or disable flipping/reversal of the polar angles.
:param zflip: whether or not the start and end angle should be flipped
**Usage examples:**
>>> # Flips/Reverses the start and end angle
>>> mlab.phiflip()
>>> # Restores the angles
>>> mlab.phiflip(False)
"""
_plot_data(phiflip=phiflip)
@_close_gks_on_error
def colormap(colormap=''):
"""
Get or set the colormap for the current plot or enable manual colormap control.
:param colormap:
- The name of a gr colormap
- One of the gr colormap constants (**gr.COLORMAP_...**)
- A list of red-green-blue tuples as colormap
- A dict mapping a normalized position to the corresponding red-green-blue tuple
- **None**, if the colormap should use the current colors set by
:py:func:`gr.setcolorrep`
- No parameter or an empty string (default) to get the colormap as a
list of red-green-blue tuples
**Usage examples:**
>>> # Use one of the built-in colormap names
>>> mlab.colormap('viridis')
>>> # Use one of the built-in colormap constants
>>> mlab.colormap(gr.COLORMAP_BWR)
>>> # Use a list of red-green-blue tuples as colormap
>>> mlab.colormap([(0, 0, 1), (1, 1, 1), (1, 0, 0)])
>>> # Use a dict mapping a normalized position to the corresponding red-green-blue tuple as colormap
>>> mlab.colormap({0.0: (0, 0, 1), 0.25: (1, 1, 1), 1.0: (1, 0, 0)})
>>> # Use a custom colormap using gr.setcolorrep directly
>>> for i in range(256):
... gr.setcolorrep(1.0-i/255.0, 1.0, i/255.0)
...
>>> mlab.colormap(None)
>>> # Get the current colormap as list of red-green-blue tuples
>>> colormap = mlab.colormap()
"""
if colormap == '':
_set_colormap()
return [(c[0], c[1], c[2]) for c in _colormap()]
_plot_data(colormap=colormap)
@_close_gks_on_error
def field_of_view(field_of_view):
"""
Set the vertical field of view of the current 3D plot.
Depending on how the field of view is set, a different projection will be
used for 3D plots. Setting field of view to a value between 0 and 180
degrees (exclusively) will use a perspective projection with the given
vertical field of view. Setting field of view to 0 or NaN will use an
orthographic projection and setting Field of view to None will use a
custom projection with limited camera positioning.
By default, a field of view set to None will be assumed.
:param field_of_view: the vertical field of view or None
**Usage examples:**
>>> # Create example data
>>> x = np.random.uniform(0, 1, 100)
>>> y = np.random.uniform(0, 1, 100)
>>> z = np.random.uniform(0, 1, 100)
>>> # Set the field of view to 20 degrees and draw an example plot
>>> mlab.field_of_view(20)
>>> mlab.plot3(x, y, z)
>>> # Select an orthographic projection instead
>>> mlab.field_of_view(0)
>>> mlab.plot3(x, y, z)
>>> # Restore the default projection
>>> mlab.field_of_view(None)
>>> mlab.plot3(x, y, z)
"""
_plot_data(tilt=tilt)
@_close_gks_on_error
def tilt(tilt):
"""
Set the 3d axis tilt of the current plot.
How the tilt is interpreted depends on the current field_of_view setting.
For the default projection, with field_of_view set to None, the tilt can
be any value between 0 and 90, and controls the angle between the viewer
and the X-Y-plane. For the orthographic or perspective projection, with
field_of_view set to a value between 0 and 180 or to NaN, the tilt can
be any value between 0 and 180 and controls the angle between the viewer
and the z-axis.
:param tilt: the 3d axis tilt
**Usage examples:**
>>> # Create example data
>>> x = np.random.uniform(0, 1, 100)
>>> y = np.random.uniform(0, 1, 100)
>>> z = np.random.uniform(0, 1, 100)
>>> # Set the tilt and draw an example plot
>>> mlab.tilt(45)
>>> mlab.plot3(x, y, z)
"""
_plot_data(tilt=tilt)
@_close_gks_on_error
def rotation(rotation):
"""
Set the 3d axis rotation of the current plot.
The rotation controls the angle between the viewer projected onto the
X-Y-plane and the x-axis, setting the camera position for 3D plots in
combination with the tilt setting.
The range of values for the rotation depends on the current field_of_view
setting. For the default projection, with field_of_view set to None, the
rotation can be any value between 0 and 90 degrees. For the orthographic
or perspective projection, with field_of_view set to a value between 0 and
180 or to NaN, the rotation can be any value between 0 and 360 degrees.
:param rotation: the 3d axis rotation
**Usage examples:**
>>> # Create example data
>>> x = np.random.uniform(0, 1, 100)
>>> y = np.random.uniform(0, 1, 100)
>>> z = np.random.uniform(0, 1, 100)
>>> # Set the rotation and draw an example plot
>>> mlab.rotation(45)
>>> mlab.plot3(x, y, z)
"""
_plot_data(rotation=rotation)
@_close_gks_on_error
def legend(*labels, **kwargs):
r"""
Set the labels and location for the legend of the current plot.
The labels for the legend are drawn using the extended text function
:py:func:`gr.textext`. You can use a subset of LaTeX math syntax, but will
need to escape certain characters, e.g. parentheses. For more information
see the documentation of :py:func:`gr.textext`.
:param labels: the labels for each graph in the current plot
:param location: the location of the legend (from 1 to 10)
**Usage examples:**
>>> # Set the labels for the two graphs "f(x)" and "g(x)"
>>> mlab.legend("f\(x\)", "g\(x\)")
>>> # Set the labels and draws the legend in the lower right corner
>>> mlab.legend("f\(x\)", "g\(x\)", location=4)
>>> # Resets the legend
>>> mlab.legend()
"""
if not all(isinstance(label, basestring) for label in labels):
raise TypeError('list of strings expected')
_plot_data(labels=labels, **kwargs)
@_close_gks_on_error
def savefig(filename):
"""
Save the current figure to a file.
This function draw the current figure using one of GR's workstation types
to create a file of the given name. Which file types are supported depends
on the installed workstation types, but GR usually is built with support
for .png, .jpg, .pdf, .ps, .gif and various other file formats.
:param filename: the filename the figure should be saved to
**Usage examples:**
>>> # Create a simple plot
>>> mlab.plot(range(100), lambda x: 1/(x+1))
>>> # Save the figure to a file
>>> mlab.savefig("example.png")
"""
gr.beginprint(filename)
_plot_data()
gr.endprint()
@_close_gks_on_error
def figure(**kwargs):
"""
Create a new figure with the given settings.
Settings like the current colormap, title or axis limits as stored in the
current figure. This function creates a new figure, restores the default
settings and applies any settings passed to the function as keyword
arguments.
**Usage examples:**
>>> # Restore all default settings
>>> mlab.figure()
>>> # Restore all default settings and set the title
>>> mlab.figure(title="Example Figure")
"""
global _plt
_plt = _Figure()
_plt.kwargs.update(kwargs)
return _plt
@_close_gks_on_error
def hold(flag):
"""
Set the hold flag for combining multiple plots.
The hold flag prevents drawing of axes and clearing of previous plots, so
that the next plot will be drawn on top of the previous one.
:param flag: the value of the hold flag
**Usage examples:**
>>> # Create example data
>>> x = np.linspace(0, 1, 100)
>>> # Draw the first plot
>>> mlab.plot(x, lambda x: x**2)
>>> # Set the hold flag
>>> mlab.hold(True)
>>> # Draw additional plots
>>> mlab.plot(x, lambda x: x**4)
>>> mlab.plot(x, lambda x: x**8)
>>> # Reset the hold flag
>>> mlab.hold(False)
"""
global _plt
_plt.kwargs['ax'] = flag
_plt.kwargs['clear'] = not flag
@_close_gks_on_error
def subplot(num_rows, num_columns, subplot_indices):
"""
Set current subplot index.
By default, the current plot will cover the whole window. To display more
than one plot, the window can be split into a number of rows and columns,
with the current plot covering one or more cells in the resulting grid.
Subplot indices are one-based and start at the upper left corner, with a
new row starting after every **num_columns** subplots.
:param num_rows: the number of subplot rows
:param num_columns: the number of subplot columns
:param subplot_indices:
- the subplot index to be used by the current plot
- a pair of subplot indices, setting which subplots should be covered
by the current plot
**Usage examples:**
>>> # Set the current plot to the second subplot in a 2x3 grid
>>> mlab.subplot(2, 3, 2)
>>> # Set the current plot to cover the first two rows of a 4x2 grid
>>> mlab.subplot(4, 2, (1, 4))
>>> # Use the full window for the current plot
>>> mlab.subplot(1, 1, 1)
"""
global _plt
x_min = y_min = 1
x_max = y_max = 0
if isinstance(subplot_indices, int):
subplot_indices = (subplot_indices,)
for subplot_index in subplot_indices:
row = num_rows - (subplot_index - 1.0) // num_columns
column = (subplot_index - 1.0) % num_columns + 1
x_min = min(x_min, (column - 1) / num_columns)
x_max = max(x_max, column / num_columns)
y_min = min(y_min, (row - 1) / num_rows)
y_max = max(y_max, row / num_rows)
_plt.kwargs['subplot'] = [x_min, x_max, y_min, y_max]
_plt.kwargs['clear'] = (subplot_indices[0] == 1)
_plt.kwargs['update'] = (subplot_indices[-1] == num_rows * num_columns)
class _Figure(object):
def __init__(self, width=600, height=450):
self.args = []
self.kwargs = {
'size': (width, height),
'ax': False,
'subplot': [0, 1, 0, 1],
'clear': True,
'update': True
}
_plt = _Figure()
_gr3_available = None
def _gr3_is_available():
global _gr3_available
if _gr3_available is None:
try:
gr3.init()
except gr3.GR3_Exception:
_gr3_available = False
else:
_gr3_available = True
return _gr3_available
def _colormap():
rgba = np.ones((256, 4), np.float32)
for color_index in range(256):
color = gr.inqcolor(1000 + color_index)
rgba[color_index, 0] = (color % 256) / 255.0
rgba[color_index, 1] = ((color >> 8) % 256) / 255.0
rgba[color_index, 2] = ((color >> 16) % 256) / 255.0
return rgba
def _set_colormap():
global _plt
if 'cmap' in _plt.kwargs:
warnings.warn('The parameter "cmap" has been replaced by "colormap". The value of "cmap" will be ignored.', stacklevel=3)
colormap = _plt.kwargs.get('colormap', gr.COLORMAP_VIRIDIS)
if colormap is None:
return
if isinstance(colormap, int):
gr.setcolormap(colormap)
return
if hasattr(colormap, 'upper'):
colormap_name = 'COLORMAP_' + colormap.upper()
colormap = getattr(gr, colormap_name)
gr.setcolormap(colormap)
return
if isinstance(colormap, dict):
positions, colors = zip(*sorted(list(colormap.items())))
if isinstance(colormap, tuple):
if isinstance(colormap[0], int):
gr.setcolormap(colormap[0])
elif isinstance(colormap[1], int):
gr.setcolormap(colormap[1])
else:
gr.setcolormap(1)
return
else:
positions = None
colors = colormap
gr.setcolormapfromrgb(colors, positions)
def _interpret_size(size, dpi, default_size=(600, 450)):
units = {
'px': (1, 0),
'in': (0, 1),
'"': (0, 1),
'ft': (0, 12),
'\'': (0, 12),
'mm': (0, 0.1 / 2.54),
'cm': (0, 1 / 2.54),
'dm': (0, 10 / 2.54),
'm': (0, 100 / 2.54)
}
def pixels_per_unit(unit, dpi):
return units[unit][0] + units[unit][1] * dpi
def interpret_length(length, dpi, default_length):
if length is None:
return default_length
if is_floatish(length):
return float(length)
if hasattr(length, 'endswith'):
for unit in units:
if length.endswith(unit) and is_floatish(length[:-len(unit)]):
length_value = float(length[:-len(unit)])
return length_value * pixels_per_unit(unit, dpi)
print("Unable to interpret length '{}', falling back to default value: {} pixels".format(length, default_length), file=sys.stderr)
return default_length
def is_floatish(number):
try:
float(number)
return True
except ValueError:
return False
except TypeError:
return False
if len(size) == 2:
width = interpret_length(size[0], dpi, default_size[0])
height = interpret_length(size[1], dpi, default_size[1])
return width, height
if len(size) == 3 and is_floatish(size[0]) and is_floatish(size[1]) and size[2] in units:
width_value = float(size[0])
height_value = float(size[1])
unit = size[2]
width = width_value * pixels_per_unit(unit, dpi)
height = height_value * pixels_per_unit(unit, dpi)
return width, height
if len(size) == 4 and is_floatish(size[0]) and size[1] in units and is_floatish(size[2]) and size[3] in units:
width_value = float(size[0])
unit = size[1]
width = width_value * pixels_per_unit(unit, dpi)
height_value = float(size[2])
unit = size[3]
height = height_value * pixels_per_unit(unit, dpi)
return width, height
print("Unable to interpret size '{}', falling back to default size: {} x {} pixels".format(repr(size), default_size[0], default_size[1]), file=sys.stderr)
return default_size
def _set_viewport(kind, subplot):
global _plt
metric_width, metric_height, pixel_width, pixel_height = gr.inqdspsize()
if 'figsize' in _plt.kwargs:
horizontal_pixels_per_inch = pixel_width * 0.0254 / metric_width
vertical_pixels_per_inch = pixel_height * 0.0254 / metric_height
width = _plt.kwargs['figsize'][0] * horizontal_pixels_per_inch
height = _plt.kwargs['figsize'][1] * vertical_pixels_per_inch
else:
dpi = pixel_width / metric_width * 0.0254
width, height = _interpret_size(_plt.kwargs['size'], dpi)
viewport = [0, 0, 0, 0]
vp = subplot[:]
if width > height:
aspect_ratio = height / width
metric_size = metric_width * width / pixel_width
gr.setwsviewport(0, metric_size, 0, metric_size * aspect_ratio)
gr.setwswindow(0, 1, 0, aspect_ratio)
vp[2] *= aspect_ratio
vp[3] *= aspect_ratio
else:
aspect_ratio = width / height
metric_size = metric_height * height / pixel_height
gr.setwsviewport(0, metric_size * aspect_ratio, 0, metric_size)
gr.setwswindow(0, aspect_ratio, 0, 1)
vp[0] *= aspect_ratio
vp[1] *= aspect_ratio
if kind in ('wireframe', 'surface', 'plot3', 'scatter3', 'trisurf', 'volume'):
if kind in ('surface', 'trisurf', 'volume'):
extent = min(vp[1] - vp[0] - 0.1, vp[3] - vp[2])
else:
extent = min(vp[1] - vp[0], vp[3] - vp[2])
vp0 = 0.5 * (vp[0] + vp[1] - extent)
vp1 = 0.5 * (vp[0] + vp[1] + extent)
vp2 = 0.5 * (vp[2] + vp[3] - extent)
vp3 = 0.5 * (vp[2] + vp[3] + extent)
vp = (vp0, vp1, vp2, vp3)
viewport[0] = vp[0] + 0.125 * (vp[1] - vp[0])
viewport[1] = vp[0] + 0.925 * (vp[1] - vp[0])
viewport[2] = vp[2] + 0.125 * (vp[3] - vp[2])
viewport[3] = vp[2] + 0.925 * (vp[3] - vp[2])
if width > height:
viewport[2] += (1 - (subplot[3] - subplot[2])**2) * 0.02
colorbar = _plt.kwargs.get('colorbar', None)
if colorbar or kind in ('contour', 'contourf', 'heatmap', 'polar_heatmap', 'hexbin', 'quiver'):
viewport[1] -= 0.1
gr.setviewport(*viewport)
_plt.kwargs['viewport'] = viewport
_plt.kwargs['vp'] = vp
_plt.kwargs['ratio'] = aspect_ratio
if 'backgroundcolor' in _plt.kwargs:
gr.savestate()
gr.selntran(0)
gr.setfillintstyle(gr.INTSTYLE_SOLID)
gr.setfillcolorind(_plt.kwargs['backgroundcolor'])
if width > height:
gr.fillrect(subplot[0], subplot[1],
subplot[2] * aspect_ratio, subplot[3] * aspect_ratio)
else:
gr.fillrect(subplot[0] * aspect_ratio, subplot[1] * aspect_ratio,
subplot[2], subplot[3])
gr.selntran(1)
gr.restorestate()
if kind == 'polar':
x_min, x_max, y_min, y_max = viewport
x_center = 0.5 * (x_min + x_max)
y_center = 0.5 * (y_min + y_max)
r = 0.5 * min(x_max - x_min, y_max - y_min)
gr.setviewport(x_center - r, x_center + r, y_center - r, y_center + r)
def _fix_minmax(v_min, v_max):
if v_min == v_max:
if v_min == 0:
v_min -= 0.1
v_max += 0.1
else:
v_min -= 0.1 * np.abs(v_min)
v_max += 0.1 * np.abs(v_max)
return v_min, v_max
def _minmax(kind=None):
global _plt
x_min = y_min = z_min = float('infinity')
x_max = y_max = z_max = float('-infinity')
x_step = y_step = float('-infinity')
for x, y, z, c, spec in _plt.args:
if x is None and kind in ('heatmap',):
x_min = -0.5
x_max = z.shape[1] - 0.5
elif x is None and kind in ('polar_heatmap',):
x_min = 0
x_max = z.shape[0]
else:
x_min = min(np.nanmin(x), x_min)
x_max = max(np.nanmax(x), x_max)
if y is None and kind in ('heatmap',):
y_min = -0.5
y_max = z.shape[0] - 0.5
elif y is None and kind in ('polar_heatmap',):
y_min = 0
y_max = z.shape[1]
else:
y_min = min(np.nanmin(y), y_min)
y_max = max(np.nanmax(y), y_max)
if z is not None and kind not in ('bar', ):
z_min = min(np.nanmin(z), z_min)
z_max = max(np.nanmax(z), z_max)
if kind in ('quiver',):
if len(x) > 1:
x_step = max(np.abs(x[1:] - x[:-1]).max(), x_step)
if len(y) > 1:
y_step = max(np.abs(y[1:] - y[:-1]).max(), y_step)
if kind in ('quiver',):
if x_step is not None and x_step > 0:
x_min -= x_step
x_max += x_step
if y_step is not None and y_step > 0:
y_min -= y_step
y_max += y_step
# Use vector length for colormap
x, y, u, v, spec = _plt.args[0]
lengths_squared = u**2 + v**2
z_min = np.sqrt(np.min(lengths_squared))
z_max = np.sqrt(np.max(lengths_squared))
if kind in ('bar'):
x_min -= 1
x_max += 1
else:
x_min, x_max = _fix_minmax(x_min, x_max)
y_min, y_max = _fix_minmax(y_min, y_max)
z_min, z_max = _fix_minmax(z_min, z_max)
x_range = _plt.kwargs.get('xlim', (x_min, x_max))
y_range = _plt.kwargs.get('ylim', (y_min, y_max))
z_range = _plt.kwargs.get('zlim', (z_min, z_max))
r_range = _plt.kwargs.get('rlim', (0, y_max))
phi_range = _plt.kwargs.get('philim', (None, None))
# Replace None with values determined above
if x_range[0] is None:
x_range = (x_min, x_range[1])
if x_range[1] is None:
x_range = (x_range[0], x_max)
if y_range[0] is None:
y_range = (y_min, y_range[1])
if y_range[1] is None:
y_range = (y_range[0], y_max)
if z_range[0] is None:
z_range = (z_min, z_range[1])
if z_range[1] is None:
z_range = (z_range[0], z_max)
if r_range[0] is None:
r_range = (0, r_range[1])
if r_range[1] is None:
r_range = (r_range[0], y_max)
if phi_range[0] is None:
phi_range = (0, phi_range[1])
else:
phi_range = (np.degrees(phi_range[0]), phi_range[1])
if phi_range[1] is None:
phi_range = (phi_range[0], 360)
else:
phi_range = (phi_range[0], np.degrees(phi_range[1]))
_plt.kwargs['xrange'] = x_range
_plt.kwargs['yrange'] = y_range
_plt.kwargs['zrange'] = z_range
_plt.kwargs['rrange'] = r_range
_plt.kwargs['phirange'] = phi_range
def _set_window(kind):
global _plt
scale = 0
if kind != 'polar':
scale |= gr.OPTION_X_LOG if _plt.kwargs.get('xlog', False) else 0
scale |= gr.OPTION_Y_LOG if _plt.kwargs.get('ylog', False) else 0
scale |= gr.OPTION_Z_LOG if _plt.kwargs.get('zlog', False) else 0
scale |= gr.OPTION_FLIP_X if _plt.kwargs.get('xflip', False) else 0
scale |= gr.OPTION_FLIP_Y if _plt.kwargs.get('yflip', False) else 0
scale |= gr.OPTION_FLIP_Z if _plt.kwargs.get('zflip', False) else 0
_minmax(kind)
if kind in ('wireframe', 'surface', 'plot3', 'scatter3', 'polar', 'trisurf', 'volume'):
major_count = 2
else:
major_count = 5
x_min, x_max = _plt.kwargs['xrange']
if not scale & gr.OPTION_X_LOG:
if _plt.kwargs.get('adjust_xlim', True) and kind not in ('heatmap',):
x_min, x_max = gr.adjustlimits(x_min, x_max)
if kind in ('bar'):
x_tick = 1
if 'xnotations' in _plt.kwargs:
x_major_count = 0
else:
x_major_count = 1
else:
x_major_count = major_count
x_tick = gr.tick(x_min, x_max) / x_major_count
else:
x_tick = x_major_count = 1
if not scale & gr.OPTION_FLIP_X:
xorg = (x_min, x_max)
else:
xorg = (x_max, x_min)
_plt.kwargs['xaxis'] = x_tick, xorg, x_major_count
y_min, y_max = _plt.kwargs['yrange']
if kind in ('hist', 'bar') and 'ylim' not in _plt.kwargs:
if scale & gr.OPTION_Y_LOG:
y_min = 1
else:
y_min = 0
if not scale & gr.OPTION_Y_LOG:
if _plt.kwargs.get('adjust_ylim', True) and kind not in ('heatmap',):
y_min, y_max = gr.adjustlimits(y_min, y_max)
y_major_count = major_count
y_tick = gr.tick(y_min, y_max) / y_major_count
else:
y_tick = y_major_count = 1
if not scale & gr.OPTION_FLIP_Y:
yorg = (y_min, y_max)
else:
yorg = (y_max, y_min)
_plt.kwargs['yaxis'] = y_tick, yorg, y_major_count
_plt.kwargs['window'] = (x_min, x_max, y_min, y_max)
if kind in ('polar', 'polar_heatmap'):
phi_min, phi_max = _phase_wrapped_philim(adjust=_plt.kwargs.get('adjust_philim', True))
r_min, r_max = _plt.kwargs['rrange']
if _plt.kwargs.get('adjust_rlim', True):
r_min, r_max = gr.adjustlimits(r_min, r_max)
r_rrel_min = r_min / r_max
angles = [a for a in (0, 90, 180, 270, 360) if phi_min < a < phi_max] + [phi_min, phi_max]
bbox = [1, -1, 1, -1]
for angle in angles:
sinf = np.sin(np.radians(angle))
cosf = np.cos(np.radians(angle))
min_x = min(cosf * 1.12, r_rrel_min * cosf)
max_x = max(cosf * 1.12, r_rrel_min * cosf)
min_y = min(sinf * 1.12, r_rrel_min * sinf)
max_y = max(sinf * 1.12, r_rrel_min * sinf)
bbox[0] = min(bbox[0], min_x)
bbox[1] = max(bbox[1], max_x)
bbox[2] = min(bbox[2], min_y)
bbox[3] = max(bbox[3], max_y)
viewport = _plt.kwargs['viewport']
vp = _plt.kwargs['vp']
viewport_aspect = (viewport[1] - viewport[0]) / (viewport[3] - viewport[2])
vp_aspect = (vp[1] - vp[0]) / (vp[3] - vp[2])
vp_aspect = vp_aspect / viewport_aspect
width = (bbox[1] - bbox[0]) / vp_aspect
height = bbox[3] - bbox[2]
aspect = width / height
if aspect > 1:
d = (width - height) / 2
bbox[3] += d
bbox[2] -= d
else:
d = (height - width) / 2
bbox[1] += d
bbox[0] -= d
gr.setwindow(*bbox)
else:
gr.setwindow(x_min, x_max, y_min, y_max)
if kind in ('wireframe', 'surface', 'plot3', 'scatter3', 'trisurf', 'volume'):
z_min, z_max = _plt.kwargs['zrange']
if not scale & gr.OPTION_Z_LOG:
if _plt.kwargs.get('adjust_zlim', True):
z_min, z_max = gr.adjustlimits(z_min, z_max)
z_major_count = major_count
z_tick = gr.tick(z_min, z_max) / z_major_count
else:
z_tick = z_major_count = 1
if not scale & gr.OPTION_FLIP_Z:
zorg = (z_min, z_max)
else:
zorg = (z_max, z_min)
_plt.kwargs['zaxis'] = z_tick, zorg, z_major_count
rotation = _plt.kwargs.get('rotation', 40)
tilt = _plt.kwargs.get('tilt', 70)
gr.setspace(z_min, z_max, rotation, tilt)
fov = _plt.kwargs.get('field_of_view', None)
if fov is not None:
gr.setwindow3d(x_min, x_max, y_min, y_max, z_min, z_max)
gr.setwindow(-1, 1, -1, 1)
rotation %= 360
tilt %= 380
tilt = min(max(tilt, 0), 180)
gr.setspace3d(-rotation, tilt, fov, 0)
_plt.kwargs['scale'] = scale
gr.setscale(scale)
def _phase_wrapped_philim(phirange=None, adjust=False):
if phirange is None:
phi_min, phi_max = _plt.kwargs.get('phirange', (0, 360))
else:
phi_min, phi_max = phirange
if phi_min == 0 and phi_max == 360:
return phi_min, phi_max
phi_min -= np.floor(phi_min / 360.) * 360
phi_max -= np.floor(phi_max / 360.) * 360
if abs(phi_min - phi_max) < 1e-6:
phi_max += 360
if phi_min > phi_max:
phi_max, phi_min = phi_min, phi_max
if adjust:
phi_min, phi_max = gr.adjustlimits(phi_min / 3., phi_max / 3.)
phi_min *= 3
phi_max *= 3
if phi_max > 360:
phi_max = 360
if phi_min > 360:
phi_min = 360
return phi_min, phi_max
def _draw_axes(kind, pass_=1):
global _plt
viewport = _plt.kwargs['viewport']
vp = _plt.kwargs['vp']
x_tick, x_org, x_major_count = _plt.kwargs['xaxis']
y_tick, y_org, y_major_count = _plt.kwargs['yaxis']
# enforce uniform axis labels for logarithmic labels
# otherwise the labels will switch between decimal and exponential notation
if _plt.kwargs['scale'] & gr.OPTION_X_LOG:
x_tick = 10
if _plt.kwargs['scale'] & gr.OPTION_Y_LOG:
y_tick = 10
gr.setlinecolorind(1)
gr.setlinewidth(1)
diag = ((viewport[1] - viewport[0])**2 + (viewport[3] - viewport[2])**2)**0.5
charheight = max(0.018 * diag, 0.012)
gr.setcharheight(charheight)
ticksize = 0.0075 * diag
if kind in ('wireframe', 'surface', 'plot3', 'scatter3', 'trisurf', 'volume'):
z_tick, z_org, z_major_count = _plt.kwargs['zaxis']
fov = _plt.kwargs.get('field_of_view', None)
if fov is None:
if pass_ == 1:
gr.grid3d(x_tick, 0, z_tick, x_org[0], y_org[1], z_org[0], 2, 0, 2)
gr.grid3d(0, y_tick, 0, x_org[0], y_org[1], z_org[0], 0, 2, 0)
else:
gr.axes3d(x_tick, 0, z_tick, x_org[0], y_org[0], z_org[0], x_major_count, 0, z_major_count, -ticksize)
gr.axes3d(0, y_tick, 0, x_org[1], y_org[0], z_org[0], 0, y_major_count, 0, ticksize)
else:
rotation = _plt.kwargs.get('rotation', 40)
tilt = _plt.kwargs.get('tilt', 70)
rotation %= 360
tilt %= 380
tilt = min(max(tilt, 0), 180)
zi = 0 if 0 <= tilt <= 90 else 1
if pass_ == 1:
if 0 <= rotation < 90:
gr.grid3d(x_tick, 0, z_tick, x_org[0], y_org[1], z_org[zi], 2, 0, 2)
gr.grid3d(0, y_tick, 0, x_org[0], y_org[1], z_org[zi], 0, 2, 0)
elif 90 <= rotation < 180:
gr.grid3d(x_tick, 0, z_tick, x_org[1], y_org[1], z_org[zi], 2, 0, 2)
gr.grid3d(0, y_tick, 0, x_org[1], y_org[1], z_org[zi], 0, 2, 0)
elif 180 <= rotation < 270:
gr.grid3d(x_tick, 0, z_tick, x_org[1], y_org[0], z_org[zi], 2, 0, 2)
gr.grid3d(0, y_tick, 0, x_org[1], y_org[0], z_org[zi], 0, 2, 0)
else:
gr.grid3d(x_tick, 0, z_tick, x_org[0], y_org[0], z_org[0], 2, 0, 2)
gr.grid3d(0, y_tick, 0, x_org[0], y_org[0], z_org[zi], 0, 2, 0)
else:
if 0 <= rotation < 90:
gr.axes3d(x_tick, 0, z_tick, x_org[0], y_org[0], z_org[zi], x_major_count, 0, z_major_count, -ticksize)
gr.axes3d(0, y_tick, 0, x_org[1], y_org[0], z_org[zi], 0, y_major_count, 0, ticksize)
elif 90 <= rotation < 180:
gr.axes3d(0, 0, z_tick, x_org[0], y_org[1], z_org[zi], 0, 0, z_major_count, -ticksize)
gr.axes3d(x_tick, y_tick, 0, x_org[0], y_org[0], z_org[zi], x_major_count, y_major_count, 0, -ticksize)
elif 180 <= rotation < 270:
gr.axes3d(x_tick, 0, z_tick, x_org[1], y_org[1], z_org[zi], x_major_count, 0, z_major_count, ticksize)
gr.axes3d(0, y_tick, 0, x_org[0], y_org[0], z_org[zi], 0, y_major_count, 0, -ticksize)
else:
gr.axes3d(0, 0, z_tick, x_org[1], y_org[0], z_org[zi], 0, 0, z_major_count, -ticksize)
gr.axes3d(x_tick, y_tick, 0, x_org[1], y_org[1], z_org[zi], x_major_count, y_major_count, 0, ticksize)
else:
if kind in ('heatmap', 'shade'):
ticksize = -ticksize
if kind not in ('shade',):
gr.grid(x_tick, y_tick, 0, 0, x_major_count, y_major_count)
gr.axes(x_tick, y_tick, x_org[0], y_org[0], x_major_count, y_major_count, ticksize)
gr.axes(x_tick, y_tick, x_org[1], y_org[1], -x_major_count, -y_major_count, -ticksize)
if 'title' in _plt.kwargs:
gr.savestate()
gr.settextalign(gr.TEXT_HALIGN_CENTER, gr.TEXT_VALIGN_TOP)
gr.textext(0.5 * (viewport[0] + viewport[1]), vp[3], _plt.kwargs['title'])
gr.restorestate()
if kind in ('wireframe', 'surface', 'plot3', 'scatter3', 'trisurf', 'volume'):
x_label = _plt.kwargs.get('xlabel', '')
y_label = _plt.kwargs.get('ylabel', '')
z_label = _plt.kwargs.get('zlabel', '')
gr.titles3d(x_label, y_label, z_label)
else:
if 'xlabel' in _plt.kwargs:
gr.savestate()
gr.settextalign(gr.TEXT_HALIGN_CENTER, gr.TEXT_VALIGN_BOTTOM)
gr.textext(0.5 * (viewport[0] + viewport[1]), vp[2] + 0.5 * charheight, _plt.kwargs['xlabel'])
gr.restorestate()
if 'ylabel' in _plt.kwargs:
gr.savestate()
gr.settextalign(gr.TEXT_HALIGN_CENTER, gr.TEXT_VALIGN_TOP)
gr.setcharup(-1, 0)
gr.textext(vp[0] + 0.5 * charheight, 0.5 * (viewport[2] + viewport[3]), _plt.kwargs['ylabel'])
gr.restorestate()
if kind in ('bar'):
if 'xnotations' in _plt.kwargs:
x_notations = _plt.kwargs.pop('xnotations')
yval = _plt.args[0][2] if _plt.kwargs['multi_bar'] else _plt.args[0][1]
if len(x_notations) == len(yval):
window = _plt.kwargs['window']
gr.setcharheight(charheight)
gr.settextalign(2, 1)
for i in range(1, len(x_notations) + 1):
x = viewport[0] + ((viewport[1] - viewport[0]) * i) / (window[1] - window[0])
y = viewport[2] - 0.5 * charheight
gr.textext(x, y, x_notations[i - 1])
else:
raise IndexError('The length of xnotations has to equal the amount of y-values!')
def _draw_polar_axes():
global _plt
viewport = _plt.kwargs['viewport']
diag = ((viewport[1] - viewport[0])**2 + (viewport[3] - viewport[2])**2)**0.5
charheight = max(0.018 * diag, 0.012)
r_min, r_max = _plt.kwargs['rrange']
if _plt.kwargs.get('adjust_rlim', True):
r_min, r_max = gr.adjustlimits(r_min, r_max)
phi_min, phi_max = _phase_wrapped_philim(adjust=_plt.kwargs.get('adjust_philim', True))
gr.savestate()
gr.setcharheight(charheight)
gr.setlinetype(gr.LINETYPE_SOLID)
tick = gr.tick(phi_min / 6., phi_max / 6.) * 1.5
n = int(round((phi_max - phi_min) / tick + 0.5))
for i in range(n + 1):
angle_label = phi_min + i * tick
if _plt.kwargs.get('phiflip', False):
angle = phi_max - i * tick
else:
angle = angle_label
sinf = np.sin(np.radians(angle))
cosf = np.cos(np.radians(angle))
pline = np.array([r_min / r_max, 1])
if phi_min <= angle <= phi_max:
if i % 2 == 0 and not (i == n and phi_max % 360 == phi_min % 360):
gr.setlinecolorind(88)
gr.settextalign(gr.TEXT_HALIGN_CENTER, gr.TEXT_VALIGN_HALF)
x, y = gr.wctondc(1.1 * cosf, 1.1 * sinf)
gr.textext(x, y, "%g\xb0" % angle_label)
else:
gr.setlinecolorind(90)
else:
angle = np.clip(angle, phi_min, phi_max)
gr.setlinecolorind(88)
sinf = np.sin(np.radians(angle))
cosf = np.cos(np.radians(angle))
gr.polyline(cosf * pline, sinf * pline)
tick = 0.5 * gr.tick(r_min, r_max)
n = int(round((r_max - r_min) / tick + 0.5))
for i in range(n + 1):
r = (r_min + i * tick) / r_max
gr.setlinecolorind(88)
r = np.clip(r, 0, 1)
if i % 2 == 1 and r <= 1:
gr.setlinecolorind(90)
gr.drawarc(-r, r, -r, r, phi_min, phi_max)
if i % 2 == 0 and not r > 1:
gr.settextalign(gr.TEXT_HALIGN_CENTER, gr.TEXT_VALIGN_HALF)
sinf = np.sin(np.radians(phi_min))
cosf = np.cos(np.radians(phi_min))
x, y = gr.wctondc(r * cosf + sinf * 0.05, r * sinf - cosf * 0.05)
if _plt.kwargs.get('rflip', False):
r_label = r_max - i * tick
else:
r_label = r_min + i * tick
gr.text(x, y, "%g" % r_label)
gr.restorestate()
def _draw_legend():
global _plt
viewport = _plt.kwargs['viewport']
num_labels = len(_plt.kwargs['labels'])
location = _plt.kwargs.get('location', 1)
gr.savestate()
gr.selntran(0)
gr.setscale(0)
w = 0
for label in _plt.kwargs['labels']:
tbx, tby = gr.inqtextext(0, 0, label)
w = max(w, tbx[2])
num_lines = len(_plt.args)
h = (num_lines + 1) * 0.03
if location in (8, 9, 10):
px = 0.5 * (viewport[0] + viewport[1] - w)
elif location in (2, 3, 6):
px = viewport[0] + 0.11
else:
px = viewport[1] - 0.05 - w
if location in (5, 6, 7, 10):
py = 0.5 * (viewport[2] + viewport[3] + h) - 0.03
elif location in (3, 4, 8):
py = viewport[2] + h
else:
py = viewport[3] - 0.06
gr.setfillintstyle(gr.INTSTYLE_SOLID)
gr.setfillcolorind(0)
gr.fillrect(px - 0.08, px + w + 0.02, py + 0.03, py - 0.03 * num_lines)
gr.setlinetype(gr.LINETYPE_SOLID)
gr.setlinecolorind(1)
gr.setlinewidth(1)
gr.drawrect(px - 0.08, px + w + 0.02, py + 0.03, py - 0.03 * num_lines)
i = 0
gr.uselinespec(" ")
for (x, y, z, c, spec) in _plt.args:
gr.savestate()
mask = gr.uselinespec(spec)
if mask in (0, 1, 3, 4, 5):
gr.polyline([px - 0.07, px - 0.01], [py, py])
if mask & 2:
gr.polymarker([px - 0.06, px - 0.02], [py, py])
gr.restorestate()
gr.settextalign(gr.TEXT_HALIGN_LEFT, gr.TEXT_VALIGN_HALF)
if i < num_labels:
gr.textext(px, py, _plt.kwargs['labels'][i])
i += 1
py -= 0.03
gr.selntran(1)
gr.restorestate()
def _colorbar(off=0.0, colors=256, label_name='zlabel'):
global _plt
gr.savestate()
viewport = _plt.kwargs['viewport']
zmin, zmax = _plt.kwargs['zrange']
zlog = _plt.kwargs.get('zlog', None)
gr.setviewport(viewport[1] + 0.02 + off, viewport[1] + 0.05 + off,
viewport[2], viewport[3])
gr.setwindow(0, 1, 0, 1)
if colors == 1:
data = [1000]
else:
data = [1000 + int(255 * i / (colors - 1)) for i in range(colors)]
gr.setlinecolorind(1)
gr.setscale(0)
if _plt.kwargs['scale'] & gr.OPTION_FLIP_Z:
gr.cellarray(0, 1, 0, 1, 1, colors, data)
else:
gr.cellarray(0, 1, 1, 0, 1, colors, data)
diag = ((viewport[1] - viewport[0]) ** 2 + (viewport[3] - viewport[2]) ** 2) ** 0.5
charheight = max(0.016 * diag, 0.012)
gr.setcharheight(charheight)
if 0 < zmin < zmax and zlog:
gr.setwindow(0, 1, zmin, zmax)
if _plt.kwargs['scale'] & gr.OPTION_FLIP_Z:
gr.setscale(gr.OPTION_Y_LOG | gr.OPTION_FLIP_Y)
else:
gr.setscale(gr.OPTION_Y_LOG)
gr.axes(0, 2, 1, zmin, 0, 1, 0.005)
elif zmin <= zmax and not zlog:
if _plt.kwargs['scale'] & gr.OPTION_FLIP_Z:
gr.setscale(gr.OPTION_FLIP_Y)
else:
gr.setscale(0)
ztick = 0.5 * gr.tick(zmin, zmax)
gr.setwindow(0, 1, zmin, zmax)
gr.axes(0, ztick, 1, zmin, 0, 1, 0.005)
else:
if zmin == float('inf') and zmax != float('-inf') and not (zmax < 0 and zlog):
labels = ("min", str(zmax))
elif zmax == float('-inf') and zmin != float('inf') and not (zmin < 0 and zlog):
labels = (str(zmin), "max")
else:
labels = ("min", "max")
def axeslbl_callback(x, y, svalue, value, labels=labels):
gr.text(x, y, labels[int(value)])
if _plt.kwargs['scale'] & gr.OPTION_FLIP_Z:
gr.setscale(gr.OPTION_FLIP_Y)
else:
gr.setscale(0)
gr.axeslbl(0, 1, 1, 0, 0, 1, 0.005, 0, axeslbl_callback)
label = _plt.kwargs.get(label_name, None)
if label:
diag = ((viewport[1] - viewport[0])**2 + (viewport[3] - viewport[2])**2)**0.5
charheight = max(0.018 * diag, 0.012)
gr.setcharheight(charheight)
gr.settextalign(gr.TEXT_HALIGN_CENTER, gr.TEXT_VALIGN_BASE)
gr.textext(viewport[1] + 0.035 + off, viewport[3] + 0.01, label)
gr.restorestate()
def _plot_data(**kwargs):
global _plt
_plt.kwargs.update(kwargs)
colorbar = _plt.kwargs.get('colorbar', None)
if not _plt.args:
return
kind = _plt.kwargs.get('kind', 'line')
if _plt.kwargs['clear']:
gr.clearws()
if kind in ('imshow', 'isosurface'):
_set_viewport(kind, _plt.kwargs['subplot'])
elif not _plt.kwargs['ax']:
_set_viewport(kind, _plt.kwargs['subplot'])
_set_window(kind)
if kind == 'polar':
_draw_polar_axes()
elif kind == 'polar_heatmap':
pass
else:
_draw_axes(kind)
elif kind in ('polar', 'polar_heatmap'):
_set_viewport(kind, _plt.kwargs['subplot'])
_set_window(kind)
_set_colormap()
if colorbar and kind not in ('quiver', 'hexbin', 'contour', 'contourf', 'surface', 'trisurf', 'heatmap', 'volume' 'polar_heatmap'):
_colorbar()
gr.uselinespec(" ")
for x, y, z, c, spec in _plt.args:
gr.savestate()
if 'alpha' in _plt.kwargs:
gr.settransparency(_plt.kwargs['alpha'])
if kind == 'line':
mask = gr.uselinespec(spec)
if mask in (0, 1, 3, 4, 5):
gr.polyline(x, y)
if mask & 2:
gr.polymarker(x, y)
if kind == 'step':
mask = gr.uselinespec(spec)
if mask in (0, 1, 3, 4, 5):
where = _plt.kwargs.get('step_where', 'mid')
if where == 'pre':
n = len(x)
x_step_boundaries = np.zeros(2 * n - 1)
y_step_values = np.zeros(2 * n - 1)
x_step_boundaries[0] = x[0]
x_step_boundaries[1::2] = x[:-1]
x_step_boundaries[2::2] = x[1:]
y_step_values[0] = y[0]
y_step_values[1::2] = y[1:]
y_step_values[2::2] = y[1:]
elif where == 'post':
n = len(x)
x_step_boundaries = np.zeros(2 * n - 1)
y_step_values = np.zeros(2 * n - 1)
x_step_boundaries[0::2] = x
x_step_boundaries[1::2] = x[1:]
x_step_boundaries[-1] = x[-1]
y_step_values[0::2] = y
y_step_values[1::2] = y[:-1]
y_step_values[-1] = y[-1]
else:
n = len(x)
x_step_boundaries = np.zeros(2 * n)
x_step_boundaries[0] = x[0]
x_step_boundaries[1:-1][0::2] = (x[1:] + x[:-1]) / 2
x_step_boundaries[1:-1][1::2] = (x[1:] + x[:-1]) / 2
x_step_boundaries[-1] = x[-1]
y_step_values = np.zeros(2 * n)
y_step_values[0::2] = y
y_step_values[1::2] = y
gr.polyline(x_step_boundaries, y_step_values)
if mask & 2:
gr.polymarker(x, y)
elif kind == 'scatter':
gr.setmarkertype(gr.MARKERTYPE_SOLID_CIRCLE)
if z is not None or c is not None:
if c is not None:
c_min = c.min()
c_ptp = c.ptp()
for i in range(len(x)):
if z is not None:
gr.setmarkersize(z[i] / 100.0)
if c is not None:
c_index = 1000 + int(255 * (c[i] - c_min) / c_ptp)
gr.setmarkercolorind(c_index)
gr.polymarker([x[i]], [y[i]])
else:
gr.polymarker(x, y)
elif kind == 'bar':
_plot_bar()
elif kind == 'polar_histogram':
_plot_polar_histogram()
elif kind == 'quiver':
u = z
v = c
gr.quiver(len(x), len(y), x, y, u, v, True)
if colorbar or colorbar is None:
_colorbar(0.05)
elif kind == 'stem':
gr.setlinecolorind(1)
gr.polyline(_plt.kwargs['window'][:2], [0, 0])
gr.setmarkertype(gr.MARKERTYPE_SOLID_CIRCLE)
gr.uselinespec(spec)
for xi, yi in zip(x, y):
gr.polyline([xi, xi], [0, yi])
gr.polymarker(x, y)
elif kind == 'hist':
y_min = _plt.kwargs['window'][2]
for i in range(1, len(y) + 1):
gr.setfillcolorind(989)
gr.setfillintstyle(gr.INTSTYLE_SOLID)
gr.fillrect(x[i - 1], x[i], y_min, y[i - 1])
gr.setfillcolorind(1)
gr.setfillintstyle(gr.INTSTYLE_HOLLOW)
gr.fillrect(x[i - 1], x[i], y_min, y[i - 1])
elif kind == 'contour':
z_min, z_max = _plt.kwargs['zrange']
gr.setspace(z_min, z_max, 0, 90)
num_levels = _plt.kwargs.get('levels', 20)
if x.shape == y.shape == z.shape:
x, y, z = gr.gridit(x, y, z, 200, 200)
z = np.array(z)
z_min, z_max = _plt.kwargs.get('zlim', (np.min(z), np.max(z)))
else:
z = np.ascontiguousarray(z)
if _plt.kwargs['scale'] & gr.OPTION_Z_LOG:
h = [np.exp(np.log(z_min) + i / num_levels * (np.log(z_max) - np.log(z_min))) for i in range(num_levels)]
else:
h = [z_min + i / num_levels * (z_max - z_min) for i in range(num_levels)]
z.shape = np.prod(z.shape)
gr.contour(x, y, h, z, 1000)
if colorbar or colorbar is None:
_colorbar(colors=num_levels)
elif kind == 'contourf':
z_min, z_max = _plt.kwargs['zrange']
gr.setspace(z_min, z_max, 0, 90)
scale = _plt.kwargs['scale']
num_levels = _plt.kwargs.get('levels', 20)
gr.setscale(scale)
if x.shape == y.shape == z.shape:
x, y, z = gr.gridit(x, y, z, 200, 200)
z = np.array(z)
z_min, z_max = _plt.kwargs.get('zlim', (np.min(z), np.max(z)))
else:
z = np.ascontiguousarray(z)
if _plt.kwargs['scale'] & gr.OPTION_Z_LOG:
h = [np.exp(np.log(z_min) + i / num_levels * (np.log(z_max) - np.log(z_min))) for i in range(num_levels)]
else:
h = [z_min + i / num_levels * (z_max - z_min) for i in range(num_levels)]
z.shape = np.prod(z.shape)
if colorbar or colorbar is None:
_colorbar(colors=num_levels)
gr.setlinecolorind(1)
gr.contourf(x, y, h, z, 0)
elif kind == 'hexbin':
nbins = _plt.kwargs.get('nbins', 40)
cntmax = gr.hexbin(x, y, nbins)
if cntmax > 0:
_plt.kwargs['zrange'] = (0, cntmax)
if colorbar or colorbar is None:
_colorbar()
elif kind == 'heatmap':
x_min, x_max, y_min, y_max = _plt.kwargs['window']
if x is not None:
x_min, x_max = x
if y is not None:
y_min, y_max = y
height, width = z.shape
cmap = _colormap()
icmap = np.zeros(256, np.uint32)
for i in range(256):
r, g, b, a = cmap[i]
icmap[i] = (int(r * 255) << 0) + (int(g * 255) << 8) + (int(b * 255) << 16) + (int(a * 255) << 24)
z_min, z_max = _plt.kwargs.get('zlim', (np.min(z), np.max(z)))
if z_max < z_min:
z_max, z_min = z_min, z_max
if _plt.kwargs.get('zlog', False):
z = np.log(z)
z_min = np.log(z_min)
z_max = np.log(z_max)
if z_max > z_min:
data = (z - z_min) / (z_max - z_min) * 255
else:
data = np.zeros((height, width))
rgba = np.zeros((height, width), np.uint32)
for x in range(width):
for y in range(height):
rgba[y, x] = icmap[int(data[y, x])]
y_min, y_max = y_max, y_min
gr.drawimage(x_min, x_max, y_min, y_max, width, height, rgba)
if colorbar or colorbar is None:
_colorbar()
elif kind == 'polar_heatmap':
height, width = z.shape
z_min, z_max = _plt.kwargs.get('zlim', (np.min(z), np.max(z)))
if z_max < z_min:
z_max, z_min = z_min, z_max
if _plt.kwargs.get('zlog', False):
z = np.log(z)
z_min = np.log(z_min)
z_max = np.log(z_max)
if z_max > z_min:
data = (1000 + (z - z_min) / (z_max - z_min) * 255).astype(np.int)
else:
data = np.zeros((height, width), dtype=np.int)
if y is not None:
phi_range = np.degrees(y)
else:
phi_range = None
phi_min, phi_max = _phase_wrapped_philim(phi_range)
if _plt.kwargs.get('phiflip', False):
phi_min_adj, phi_max_adj = _phase_wrapped_philim(adjust=_plt.kwargs.get('adjust_philim', True))
phi_offset = phi_max_adj + phi_min_adj - phi_max - phi_min
phi_min, phi_max = phi_max + phi_offset, phi_min + phi_offset
if x is not None:
r_range = np.array(x)
else:
r_range = np.array(_plt.kwargs['rrange'])
r_min, r_max = _plt.kwargs['rrange']
relative_r_min = r_min / r_max
r_range = ((r_range - r_min) / (r_max - r_min) * (1 - relative_r_min)) + relative_r_min
if _plt.kwargs.get('rflip', False):
r_range = 1 + relative_r_min - r_range
gr.polarcellarray(0, 0, phi_min, phi_max, r_range[0], r_range[1], width, height, data)
_draw_polar_axes()
if colorbar or colorbar is None:
_colorbar()
elif kind == 'wireframe':
if x.shape == y.shape == z.shape:
x, y, z = gr.gridit(x, y, z, 50, 50)
z = np.array(z)
else:
z = np.ascontiguousarray(z)
z.shape = np.prod(z.shape)
gr.setfillcolorind(0)
gr.surface(x, y, z, gr.OPTION_FILLED_MESH)
_draw_axes(kind, 2)
elif kind == 'surface':
if x.shape == y.shape == z.shape:
x, y, z = gr.gridit(x, y, z, 200, 200)
z = np.array(z)
else:
z = np.ascontiguousarray(z)
z.shape = np.prod(z.shape)
if _plt.kwargs.get('accelerate', True) and _gr3_is_available():
gr3.clear()
gr3.surface(x, y, z, gr.OPTION_COLORED_MESH)
else:
gr.surface(x, y, z, gr.OPTION_COLORED_MESH)
_draw_axes(kind, 2)
if colorbar or colorbar is None:
_colorbar(0.05)
elif kind == 'plot3':
gr.polyline3d(x, y, z)
_draw_axes(kind, 2)
elif kind == 'scatter3':
gr.setmarkertype(gr.MARKERTYPE_SOLID_CIRCLE)
if c is not None:
c_min = c.min()
c_ptp = c.ptp()
for i in range(len(x)):
c_index = 1000 + int(255 * (c[i] - c_min) / c_ptp)
gr.setmarkercolorind(c_index)
gr.polymarker3d([x[i]], [y[i]], [z[i]])
else:
gr.polymarker3d(x, y, z)
_draw_axes(kind, 2)
elif kind == 'imshow':
_plot_img(z)
elif kind == 'isosurface':
_plot_iso(z)
elif kind == 'volume':
algorithm = _plt.kwargs.get('algorithm', 'maximum').lower()
if algorithm == 'emission':
_algorithm = 0
elif algorithm == 'absorption':
_algorithm = 1
elif algorithm in ('mip', 'maximum'):
_algorithm = 2
else:
raise ValueError('Invalid volume algorithm. Use "emission", "absorption" or "maximum".')
dmin = _plt.kwargs.get('dmin', -1)
dmax = _plt.kwargs.get('dmax', -1)
if dmin is None:
dmin = -1
if dmax is None:
dmax = -1
width, height, device_pixel_ratio = gr.inqvpsize()
gr.setpicturesizeforvolume(int(width * device_pixel_ratio), int(height * device_pixel_ratio))
dmin, dmax = gr.volume(c, algorithm=_algorithm, dmin=dmin, dmax=dmax)
prev_zrange = _plt.kwargs.get('zrange', None)
_plt.kwargs['zrange'] = (dmin, dmax)
if colorbar or colorbar is None:
_colorbar(0.05, label_name='dlabel')
_plt.kwargs['zrange'] = prev_zrange
_draw_axes(kind, 2)
elif kind == 'polar':
gr.uselinespec(spec)
_plot_polar(x, y)
elif kind == 'trisurf':
gr.trisurface(x, y, z)
_draw_axes(kind, 2)
if colorbar or colorbar is None:
_colorbar(0.05)
elif kind == 'tricont':
zmin, zmax = _plt.kwargs['zrange']
levels = np.linspace(zmin, zmax, 20)
gr.tricontour(x, y, z, levels)
elif kind == 'shade':
xform = _plt.kwargs.get('xform', 5)
if np.any(np.isnan(x)):
gr.shadelines(x, y, xform=xform)
else:
gr.shadepoints(x, y, xform=xform)
gr.restorestate()
if kind in ('line', 'step', 'scatter', 'stem') and 'labels' in _plt.kwargs:
_draw_legend()
if _plt.kwargs['update']:
gr.updatews()
if gr.isinline():
return gr.show()
def _plot_img(image):
global _plt
if isinstance(image, basestring):
width, height, data = gr.readimage(image)
if width == 0 or height == 0:
return
else:
image = np.array(image)
height, width = image.shape
data = np.array(1000 + (1.0 * image - image.min()) / image.ptp() * 255, np.int32)
if _plt.kwargs['clear']:
gr.clearws()
if not _plt.kwargs['ax']:
_set_viewport('line', _plt.kwargs['subplot'])
viewport = _plt.kwargs['viewport']
vp = _plt.kwargs['vp']
if width * (viewport[3] - viewport[2]) < height * (viewport[1] - viewport[0]):
w = width / height * (viewport[3] - viewport[2])
x_min = max(0.5 * (viewport[0] + viewport[1] - w), viewport[0])
x_max = min(0.5 * (viewport[0] + viewport[1] + w), viewport[1])
y_min = viewport[2]
y_max = viewport[3]
else:
h = height / width * (viewport[1] - viewport[0])
x_min = viewport[0]
x_max = viewport[1]
y_min = max(0.5 * (viewport[3] + viewport[2] - h), viewport[2])
y_max = min(0.5 * (viewport[3] + viewport[2] + h), viewport[3])
_set_colormap()
gr.selntran(0)
if isinstance(image, basestring):
gr.drawimage(x_min, x_max, y_min, y_max, width, height, data)
else:
gr.cellarray(x_min, x_max, y_min, y_max, width, height, data)
if 'title' in _plt.kwargs:
gr.savestate()
gr.settextalign(gr.TEXT_HALIGN_CENTER, gr.TEXT_VALIGN_TOP)
gr.textext(0.5 * (viewport[0] + viewport[1]), vp[3], _plt.kwargs['title'])
gr.restorestate()
gr.selntran(1)
def _plot_iso(v):
global _plt
if not _gr3_is_available():
raise RuntimeError("Unable to initialize GR3, please ensure that your system supports OpenGL")
viewport = _plt.kwargs['viewport']
if viewport[3] - viewport[2] < viewport[1] - viewport[0]:
width = viewport[3] - viewport[2]
center_x = 0.5 * (viewport[0] + viewport[1])
x_min = max(center_x - 0.5 * width, viewport[0])
x_max = min(center_x + 0.5 * width, viewport[1])
y_min = viewport[2]
y_max = viewport[3]
else:
height = viewport[1] - viewport[0]
center_y = 0.5 * (viewport[2] + viewport[3])
x_min = viewport[0]
x_max = viewport[1]
y_min = max(center_y - 0.5 * height, viewport[2])
y_max = min(center_y + 0.5 * height, viewport[3])
gr.selntran(0)
usable_vs = v[np.abs(v) != np.inf]
if np.prod(usable_vs.shape) == 0:
return
v_max = usable_vs.max()
v_min = usable_vs.min()
if v_min == v_max:
return
uint16_max = np.iinfo(np.uint16).max
if v.dtype == np.uint16:
isovalue = int(_plt.kwargs.get('isovalue', 0.5 * uint16_max))
else:
isovalue = _plt.kwargs.get('isovalue', 0.5)
isovalue = int((isovalue - v_min) / (v_max - v_min) * uint16_max)
v = (np.clip(v, v_min, v_max) - v_min) / (v_max - v_min) * uint16_max
values = v.astype(np.uint16)
nx, ny, nz = v.shape
rotation = np.radians(_plt.kwargs.get('rotation', 40))
tilt = np.radians(_plt.kwargs.get('tilt', 70))
gr3.clear()
mesh = gr3.createisosurfacemesh(values, (2 / (nx - 1), 2 / (ny - 1), 2 / (nz - 1)),
(-1., -1., -1.), isovalue)
color = _plt.kwargs.get('color', (0.0, 0.5, 0.8))
gr3.setbackgroundcolor(1, 1, 1, 0)
gr3.drawmesh(mesh, 1, (0, 0, 0), (0, 0, 1), (0, 1, 0), color, (1, 1, 1))
r = 2.5
if tilt != 0:
up = (0, 1, 0)
else:
up = (0, 0, 1)
gr3.cameralookat(
r * np.sin(tilt) * np.sin(rotation), r * np.cos(tilt), r * np.sin(tilt) * np.cos(rotation),
0, 0, 0,
up[0], up[1], up[2]
)
gr3.drawimage(x_min, x_max, y_min, y_max, 500, 500, gr3.GR3_Drawable.GR3_DRAWABLE_GKS)
gr3.deletemesh(mesh)
gr.selntran(1)
def _plot_polar(phi, rho):
global _plt
r_min, r_max = _plt.kwargs['rrange']
if _plt.kwargs.get('adjust_rlim', True):
r_min, r_max = gr.adjustlimits(r_min, r_max)
phi_min, phi_max = np.radians(_phase_wrapped_philim(adjust=_plt.kwargs.get('adjust_philim', True)))
phi = np.fmod(phi, 2 * np.pi)
split_rho = np.logical_or(rho < r_min, rho > r_max)
split_phi = np.logical_or(phi < phi_min, phi > phi_max)
split = np.where(np.logical_or(split_phi, split_rho))[0].astype(np.int)
relative_r_min = r_min / r_max
rho = ((rho - r_min) / (r_max - r_min) * (1 - relative_r_min)) + relative_r_min
if _plt.kwargs.get('rflip', False):
rho = 1 + relative_r_min - rho
if _plt.kwargs.get('phiflip', False):
phi = phi_max + phi_min - phi
for r, a in zip(np.split(rho, split), np.split(phi, split)):
if not len(r) > 1:
continue
x = r * np.cos(a)
y = r * np.sin(a)
gr.polyline(x, y)
def _convert_to_array(obj, may_be_2d=False, xvalues=None, always_flatten=False):
global _plt
if callable(obj):
if xvalues is None:
raise TypeError('object is callable, but xvalues is None')
if len(xvalues.shape) == 1:
a = np.fromiter((obj(x) for x in xvalues), np.float64)
else:
a = np.fromiter((obj(x, y) for x, y in xvalues), np.float64)
else:
a = obj
try:
if obj.__iter__() is obj:
a = np.fromiter(obj, np.float64, len(xvalues))
except (AttributeError, TypeError):
pass
try:
a = np.array(a, copy=False)
except TypeError:
raise TypeError("expected a sequence, but got '{}'".format(type(obj)))
if always_flatten:
# Only contiguous arrays can be flattened this way
a = np.ascontiguousarray(a)
a.shape = np.prod(a.shape)
# Ensure a shape of (n,) or (n, 2) if may_be_2d is True
dimension = sum(i != 1 for i in a.shape)
if may_be_2d and dimension > 2:
raise TypeError("expected a 1d- or 2d-sequence, but got shape {}".format(a.shape))
if not may_be_2d and dimension > 1:
raise TypeError("expected a 1d-sequence, but got shape {}".format(a.shape))
if dimension == 0:
dimension = 1
a.shape = [1]
else:
a.shape = [i for i in a.shape if i != 1]
if dimension == 2:
if 2 not in a.shape:
raise TypeError("expected a sequence of pairs, but got shape {}".format(a.shape))
if a.shape[0] == 2 and a.shape[1] != 2:
a = a.T
n = len(a)
assert a.shape == (n,) or a.shape == (n, 2)
if a.dtype == complex:
if dimension != 1:
raise TypeError("expected a sequence of complex values, but got shape {}".format(a.shape))
a = np.hstack((np.real(a), np.imag(a)))
dimension = 2
elif a.dtype != np.float64:
try:
a = a.astype(np.float64)
except (TypeError, ValueError):
raise TypeError("expected a sequence of real values, but got '{}'".format(a.dtype))
return a
def _plot_bar():
global _plt
kwargs = _plt.kwargs
args = _plt.args[0]
multi_bar = kwargs['multi_bar']
style = kwargs['bar_style']
# Default
bar_width = 1
edgewidth = 1
edgecolor = [0, 0, 0]
std_colors = [989, 982, 980, 981, 996, 983, 995, 988, 986, 990, 991, 984, 992, 993, 994, 987, 985, 997, 998, 999]
color = std_colors[0]
color_save_spot = 1000
# Args
if multi_bar:
y_values = args[2]
colorlist = args[3]
else:
y_values = args[1]
colorlist = args[3]
# Width
if kwargs.get('bar_width'):
bar_width = kwargs['bar_width']
if not isinstance(bar_width, (int, float)):
raise TypeError('Bar_width has to be an int or float!')
elif bar_width > 1 or bar_width <= 0:
raise ValueError('Bar_width has to be in (0;1]!')
# Color
if kwargs.get('bar_color'):
color = kwargs['bar_color']
if isinstance(kwargs['bar_color'], int):
if not color >= 0 or not color < 1256:
raise ValueError('Bar_color has to be in [0;1256)!')
elif isinstance(kwargs['bar_color'], list) and len(kwargs['bar_color']) == 3:
for c in color:
if c < 0 or c > 1:
raise ValueError('The values of bar_color have to be in [1;0]!')
else:
raise TypeError('Bar_color has to be an int or list of RGB values!')
# EdgeColor
if kwargs.get('edge_color'):
edgecolor = kwargs['edge_color']
if isinstance(kwargs['edge_color'], int):
if not edgecolor >= 0 or not edgecolor < 1256:
raise ValueError('Edge_color has to be in [0;1256)!')
elif isinstance(kwargs['edge_color'], list) and len(kwargs['edge_color']) == 3:
for c in edgecolor:
if c < 0 or c > 1:
raise ValueError('The values of edge_color have to be in [1;0]!')
else:
raise TypeError('Edge_color has to be an int or list of RGB values!')
# EdgeWidth
if kwargs.get('edge_width'):
if not isinstance(kwargs['edge_width'], (float, int)):
raise TypeError('Edge_width has to be of type int or float')
edgewidth = kwargs['edge_width']
if edgewidth < 0:
raise ValueError('Edge_width has to be bigger or equal to 0!')
# Change individual color
pos_indcolor = []
changecolor = False
indcolors = [None for i in range(len(y_values) + 1)]
if 'ind_bar_color' in kwargs:
changecolor = True
indcolor = kwargs.pop('ind_bar_color')
if not isinstance(indcolor, list):
raise TypeError('Ind_bar_color has to be a list!')
if not indcolor:
raise IndexError('Ind_bar_color can`t be empty!')
if isinstance(indcolor[0], list) and not indcolor[0]:
raise IndexError('Ind_bar_color[0] can`t be empty!')
try:
if isinstance(indcolor[0], list) and isinstance(indcolor[0][1], int) or isinstance(indcolor[0], int):
has_pairs = False
elif isinstance(indcolor[0], list) and isinstance(indcolor[0][1], list):
has_pairs = True
else:
raise TypeError('Ind_bar_color[0] has to be an int or list!')
except IndexError:
raise IndexError('Ind_bar_color[0] has to be a colorpack, a list bigger than 1 or an int!')
if not has_pairs:
indcolor = [indcolor]
for colorpack in indcolor:
if not isinstance(colorpack[0], (list, int)) or not isinstance(colorpack[1], list):
raise TypeError('Colorpack[0] has to be an int or list and colorpack[1] has to be a list!')
if not len(colorpack[1]) == 3:
raise IndexError('Colorpack[1] has to be of length 3!')
for c in colorpack[1]:
if c < 0 or c > 1:
raise ValueError('The values in colorpack[1] have to be in [0;1]!')
indcolor_rgb = [colorpack[1][0], colorpack[1][1], colorpack[1][2]]
if isinstance(colorpack[0], list):
pos_indcolor.extend(colorpack[0])
for index in colorpack[0]:
indcolors[index] = indcolor_rgb
elif isinstance(colorpack[0], int):
pos_indcolor.append(colorpack[0])
indcolors[colorpack[0]] = indcolor_rgb
# Change individual edgecolor
pos_indedgecolor = []
changeedgecolor = False
indedgecolors = [None for i in range(len(y_values) + 1)]
if 'ind_edge_color' in kwargs:
changeedgecolor = True
indedgecolor = kwargs.pop('ind_edge_color')
if not isinstance(indedgecolor, list):
raise TypeError('Ind_edge_color has to be a list!')
try:
if isinstance(indedgecolor[0], list) and isinstance(indedgecolor[0][1], int) or isinstance(indedgecolor[0], int):
has_pairs = False
elif isinstance(indedgecolor[0], list) and isinstance(indedgecolor[0][1], list):
has_pairs = True
else:
raise TypeError('Ind_edge_color[0] has to be an int or list!')
except IndexError:
raise IndexError('Ind_edge_color[0] has to be a colorpack, a list bigger than 1 or an int!')
if not has_pairs:
indedgecolor = [indedgecolor]
for colorpack in indedgecolor:
if not isinstance(colorpack[0], (list, int)):
raise TypeError('Colorpack[0] has to be an int or list!')
if not isinstance(colorpack[1], list):
raise TypeError('Colorpack[1] has to be a list!')
if len(colorpack[1]) != 3:
raise IndexError('Colorpack[1] has to be of length 3!')
for c in colorpack[1]:
if c < 0 or c > 1:
raise ValueError('The values in colorpack[1] have to be in [0;1]!')
indedgecolor_rgb = [colorpack[1][0], colorpack[1][1], colorpack[1][2]]
if isinstance(colorpack[0], list):
pos_indedgecolor.extend(colorpack[0])
for index in colorpack[0]:
indedgecolors[index] = indedgecolor_rgb
elif isinstance(colorpack[0], int):
pos_indedgecolor.append(colorpack[0])
indedgecolors[colorpack[0]] = indedgecolor_rgb
# Change individual edge width
pos_indedgewidth = []
changeedgewidth = False
indedgewidths = [None for i in range(len(y_values) + 1)]
if 'ind_edge_width' in kwargs:
indedgewidth = kwargs.pop('ind_edge_width')
if not isinstance(indedgewidth, list):
raise TypeError('Ind_edge_width has to be of type list!')
if isinstance(indedgewidth[1], (float, int)) and isinstance(indedgewidth[0], (int, list)):
has_pairs = False
elif isinstance(indedgewidth[1], list):
has_pairs = True
else:
raise TypeError('Ind_edge_width[0] has to be of type int or list and Ind_edge_width[1] of type int '
'or float!')
if not has_pairs:
indedgewidth = [indedgewidth]
for widthpack in indedgewidth:
if isinstance(widthpack[1], (float, int)) and isinstance(widthpack[0], (int, list)):
if not widthpack[1] >= 0:
raise ValueError('Widthpack[1] has to be bigger or equal to 0!')
changeedgewidth = True
ind_edgewidth = widthpack[1]
if isinstance(widthpack[0], list):
pos_indedgewidth.extend(widthpack[0])
for index in widthpack[0]:
indedgewidths[index] = ind_edgewidth
elif isinstance(widthpack[0], int):
pos_indedgewidth.append(widthpack[0])
indedgewidths[widthpack[0]] = ind_edgewidth
# Draw bars
wfac = bar_width * 0.8
gr.setfillintstyle(1)
for a, y in enumerate(y_values):
x = a + 1
if style == 'stacked' and multi_bar:
c = 0
for i in range(0, len(y), 1):
if changecolor and (i + 1 in pos_indcolor):
current_rgb = indcolors[i + 1]
gr.setcolorrep(color_save_spot, current_rgb[0], current_rgb[1], current_rgb[2])
gr.setfillcolorind(color_save_spot)
else:
if colorlist:
if isinstance(colorlist[i], int):
gr.setfillcolorind(colorlist[i])
else:
current_rgb = colorlist[i]
gr.setcolorrep(color_save_spot, current_rgb[0], current_rgb[1], current_rgb[2])
gr.setfillcolorind(color_save_spot)
else:
gr.setfillcolorind(std_colors[i % len(std_colors)])
gr.fillrect((x - 0.5 * bar_width), (x + 0.5 * bar_width),
c, (y[i] + c))
c = y[i] + c
elif style == 'lined' and multi_bar:
bar_width = wfac / (len(y))
for i in range(0, len(y), 1):
if changecolor and (i + 1 in pos_indcolor):
current_rgb = indcolors[i + 1]
gr.setcolorrep(color_save_spot, current_rgb[0], current_rgb[1], current_rgb[2])
gr.setfillcolorind(color_save_spot)
else:
if colorlist:
if isinstance(colorlist[i], int):
gr.setfillcolorind(colorlist[i])
else:
current_rgb = colorlist[i]
gr.setcolorrep(color_save_spot, current_rgb[0], current_rgb[1], current_rgb[2])
gr.setfillcolorind(color_save_spot)
else:
gr.setfillcolorind(std_colors[i % len(std_colors)])
gr.fillrect((x - 0.5 * wfac + bar_width * i),
(x - 0.5 * wfac + bar_width + bar_width * i), 0,
y[i])
else:
if changecolor and (x in pos_indcolor):
current_rgb = indcolors[x]
gr.setcolorrep(color_save_spot, current_rgb[0], current_rgb[1], current_rgb[2])
gr.setfillcolorind(color_save_spot)
else:
if colorlist:
if isinstance(colorlist[a], int):
gr.setfillcolorind(colorlist[a])
else:
current_rgb = colorlist[a]
gr.setcolorrep(color_save_spot, current_rgb[0], current_rgb[1], current_rgb[2])
gr.setfillcolorind(color_save_spot)
else:
if isinstance(color, int):
gr.setfillcolorind(color)
else:
current_rgb = color
gr.setcolorrep(color_save_spot, current_rgb[0], current_rgb[1], current_rgb[2])
gr.setfillcolorind(color_save_spot)
gr.fillrect((x - 0.5 * bar_width), (x + 0.5 * bar_width), 0, y)
# Draw edges
gr.setfillintstyle(0)
for a, y in enumerate(y_values):
x = a + 1
if style == 'stacked' and multi_bar:
c = 0
for i in range(0, len(y), 1):
if changeedgecolor and (i + 1 in pos_indedgecolor):
current_rgb = indedgecolors[i + 1]
gr.setcolorrep(color_save_spot, current_rgb[0], current_rgb[1], current_rgb[2])
gr.setlinecolorind(color_save_spot)
else:
if isinstance(edgecolor, int):
gr.setlinecolorind(edgecolor)
else:
current_rgb = edgecolor
gr.setcolorrep(color_save_spot, current_rgb[0], current_rgb[1], current_rgb[2])
gr.setlinecolorind(color_save_spot)
if changeedgewidth and (i + 1 in pos_indedgewidth):
gr.setlinewidth(indedgewidths[i + 1])
else:
gr.setlinewidth(edgewidth)
gr.drawrect((x - 0.5 * bar_width), (x + 0.5 * bar_width),
c, (y[i] + c))
c = y[i] + c
elif style == 'lined' and multi_bar:
bar_width = wfac / (len(y))
for i in range(0, len(y), 1):
if changeedgecolor and (i + 1 in pos_indedgecolor):
current_rgb = indedgecolors[i + 1]
gr.setcolorrep(color_save_spot, current_rgb[0], current_rgb[1], current_rgb[2])
gr.setlinecolorind(color_save_spot)
else:
if isinstance(edgecolor, int):
gr.setlinecolorind(edgecolor)
else:
current_rgb = edgecolor
gr.setcolorrep(color_save_spot, current_rgb[0], current_rgb[1], current_rgb[2])
gr.setlinecolorind(color_save_spot)
if changeedgewidth and (i + 1 in pos_indedgewidth):
gr.setlinewidth(indedgewidths[i + 1])
else:
gr.setlinewidth(edgewidth)
gr.drawrect((x - 0.5 * wfac + bar_width * i),
(x - 0.5 * wfac + bar_width + bar_width * i), 0,
y[i])
else:
if changeedgecolor and (x in pos_indedgecolor):
current_rgb = indedgecolors[x]
gr.setcolorrep(color_save_spot, current_rgb[0], current_rgb[1], current_rgb[2])
gr.setlinecolorind(color_save_spot)
else:
if isinstance(edgecolor, int):
gr.setlinecolorind(edgecolor)
else:
current_rgb = edgecolor
gr.setcolorrep(color_save_spot, current_rgb[0], current_rgb[1], current_rgb[2])
gr.setlinecolorind(color_save_spot)
if changeedgewidth and (x in pos_indedgewidth):
gr.setlinewidth(indedgewidths[x])
else:
gr.setlinewidth(edgewidth)
gr.drawrect((x - 0.5 * bar_width), (x + 0.5 * bar_width), 0, y)
def _create_colormap(tup):
if len(tup) != 3:
raise ValueError('colormap must be a triple tuple!')
if tup[2] is None:
size = 700
else:
size = tup[2]
if tup[0] is None and tup[1] is None:
gr.setcolormap(0)
COLORMAP_Y = np.array(_colormap() * 255, dtype=int)
COLORMAP_X = np.zeros((256, 4))
factor = 1
elif tup[0] is not None and tup[1] is None:
if isinstance(tup[0], int):
gr.setcolormap(tup[0])
elif isinstance(tup[0], str):
gr.setcolormap(gr.COLORMAPS[tup[0]])
else:
raise ValueError('Invalid colormap parameter')
COLORMAP_X = np.array(_colormap() * 255, dtype=int)
COLORMAP_Y = np.zeros((256, 4))
factor = 1
elif tup[0] is None and tup[1] is not None:
if isinstance(tup[1], int):
gr.setcolormap(tup[1])
elif isinstance(tup[1], str):
gr.setcolormap(gr.COLORMAPS[tup[1]])
else:
raise ValueError('Invalid colormap parameter')
COLORMAP_X = np.zeros((256, 4))
COLORMAP_Y = np.array(_colormap() * 255, dtype=int)
factor = 1
else:
if isinstance(tup[0], int):
gr.setcolormap(tup[0])
elif isinstance(tup[0], str):
gr.setcolormap(gr.COLORMAPS[tup[0]])
else:
raise ValueError('Invalid colormap parameter')
COLORMAP_X = np.array(_colormap() * 255, dtype=int)
if isinstance(tup[1], int):
gr.setcolormap(tup[1])
elif isinstance(tup[1], str):
gr.setcolormap(gr.COLORMAPS[tup[1]])
else:
raise ValueError('Invalid colormap parameter')
COLORMAP_Y = np.array(_colormap() * 255, dtype=int)
factor = 2
size_range = range(256)
lins_float = np.linspace(0, 255, num=size)
r1 = np.array(np.interp(lins_float, size_range, COLORMAP_X[size_range, 0])).reshape(1, -1)
r2 = np.array(np.interp(lins_float, size_range, COLORMAP_Y[size_range, 0])).reshape(-1, 1)
r12 = np.array((r1 + r2) / factor, dtype=np.int)
g1 = np.array(np.interp(lins_float, size_range, COLORMAP_X[size_range, 1])).reshape(1, -1)
g2 = np.array(np.interp(lins_float, size_range, COLORMAP_Y[size_range, 1])).reshape(-1, 1)
g12 = np.left_shift(np.array((g1 + g2) / factor, dtype=np.int), 8)
b1 = np.array(np.interp(lins_float, size_range, COLORMAP_X[size_range, 2])).reshape(1, -1)
b2 = np.array(np.interp(lins_float, size_range, COLORMAP_Y[size_range, 2])).reshape(-1, 1)
b12 = np.left_shift(np.array((b1 + b2) / factor, dtype=np.int), 16)
a1 = np.array(np.interp(lins_float, size_range, COLORMAP_X[size_range, 3])).reshape(1, -1)
a2 = np.array(np.interp(lins_float, size_range, COLORMAP_Y[size_range, 3])).reshape(-1, 1)
a12 = np.left_shift(np.array((a1 + a2) / factor, dtype=np.int), 24)
pixmap = np.array(r12 | g12 | b12 | a12)
return np.ascontiguousarray(pixmap)
def _plot_polar_histogram():
def moivre(r, x, n):
list1 = [1, 0]
if n != 0:
list1[0] = r ** (1 / n) * (np.cos((2 * x * np.pi) / n))
list1[1] = r ** (1 / n) * (np.sin((2 * x * np.pi) / n))
return list1
global _plt
edgecolor = 1
facecolor = 989
facealpha = 0.75
temp_face = None
temp_edge = None
vp = _plt.kwargs['subplot']
if vp[1] - vp[0] > 0.99 and vp[3] - vp[2] > 0.99:
if 'title' in _plt.kwargs:
gr.savestate()
gr.settextalign(gr.TEXT_HALIGN_CENTER, gr.TEXT_VALIGN_TOP)
gr.textext(0.5 * (vp[0] + vp[1]), 0.95, _plt.kwargs['title'])
gr.restorestate()
vp = [0.1, 0.9, 0.1, 0.9]
else:
vp[3] = vp[2] + vp[1] - vp[0]
_set_viewport('polar_histogram', vp)
vp = gr.inqviewport()
vp[3] = vp[2] + vp[1] - vp[0]
temp_add = (vp[1] - vp[0]) * 0.25
if 'title' in _plt.kwargs:
factor = 0.3
vp[0] += temp_add * 2 * factor
vp[1] -= temp_add * 2 * factor
gr.savestate()
gr.settextalign(gr.TEXT_HALIGN_CENTER, gr.TEXT_VALIGN_TOP)
gr.textext(0.5 * (vp[0] + vp[1]), vp[3] - temp_add * 4 * 0.25, _plt.kwargs['title'])
gr.restorestate()
vp[3] -= temp_add * 4 * factor
del factor
else:
vp[1] -= temp_add
vp[3] -= temp_add
del temp_add
gr.setviewport(vp[0], vp[1], vp[2], vp[3])
gr.setlinewidth(1)
gr.setwindow(0, 1, 0, 1)
convert = 180 / np.pi
norm_factor = _plt.kwargs['norm_factor']
normalization = _plt.kwargs['normalization']
classes = _plt.kwargs['classes']
del _plt.kwargs['classes']
if _plt.kwargs.get('bin_edges', None) is not None:
is_binedges = True
binedges = _plt.kwargs['bin_edges']
else:
is_binedges = False
if _plt.kwargs.get('temp_bin_edges', None) is not None:
is_binedges = True
binedges = _plt.kwargs['temp_bin_edges']
del _plt.kwargs['temp_bin_edges']
temp = _plt.kwargs['border_exp']
border = temp[0]
exp = temp[1]
del temp
del _plt.kwargs['border_exp']
gr.settransparency(0.8)
num_bins = len(classes)
if _plt.kwargs.get('rlim', None) is not None:
r_min = _plt.kwargs['rlim']
r_max = r_min[1]
r_min = r_min[0]
is_rlim = True
else:
is_rlim = False
# face_color either a number or an rgb triplet [x,y,z]
if 'face_color' in _plt.kwargs:
facecolor = _plt.kwargs['face_color']
if isinstance(facecolor, list):
if len(facecolor) == 3:
temp_face = gr.inqcolor(1004)
gr.setcolorrep(1004, facecolor[0], facecolor[1], facecolor[2])
facecolor = 1004
else:
raise ValueError('RGB Triplet not correct')
elif not isinstance(facecolor, int):
raise ValueError('face_color not correct')
# face_alpha
if 'face_alpha' in _plt.kwargs:
facealpha = _plt.kwargs['face_alpha']
if not (0 <= facealpha <= 1):
raise ValueError('face_alpha not correct')
# edge_color
if 'edge_color' in _plt.kwargs:
edgecolor = _plt.kwargs['edge_color']
if isinstance(edgecolor, list):
if len(edgecolor) == 3:
temp_edge = gr.inqcolor(1005)
gr.setcolorrep(1005, edgecolor[0], edgecolor[1], edgecolor[2])
edgecolor = 1005
elif not isinstance(edgecolor, int):
raise ValueError('Incorrect Color Value. Either Integer or rgb triplet in a list')
# drawing the circles + x-Axis numbers
gr.settextalign(1, 1)
window_width = (vp[1] - vp[0])
center_x = window_width / 2 + vp[0]
center_y = (vp[3] - vp[2]) / 2 + vp[2]
for x in range(4):
gr.drawarc((0.1 + x * 0.1), (0.9 - 0.1 * x), (0.1 + x * 0.1), (0.9 - 0.1 * x), 0, 360)
if normalization == 'count' or normalization == 'cumcount':
gr.text(center_x * 1.02, center_y + (x + 1) * window_width * 0.8 / 2 / 4, str(int((x + 1) * border / 4)))
else:
if normalization == 'probability':
gr.text(center_x * 1.02, center_y + (x + 1) * window_width * 0.8 / 2 / 4,
str(round(((x + 1) * border / 4) * 10 ** (exp + 2 + int(num_bins / 25))) / 10 ** (
exp + 2 + int(num_bins / 25))))
else:
gr.text(center_x * 1.02, center_y + (x + 1) * window_width * 0.8 / 2 / 4,
str(round(((x + 1) * border / 4) * 10 ** (exp + 3)) / 10 ** (exp + 3)))
# drawinng lines + angle
number = 0
gr.settextalign(2, 3)
for x in range(12):
liste = moivre(0.4 ** 12, x, 12)
gr.polyline([0.5, 0.5 + (1 * liste[0])], [0.5, 0.5 + (1 * liste[1])])
gr.text(center_x + (liste[0]) * window_width * 1.15, center_y + liste[1] * window_width * 1.15, str(number))
number += 30
gr.settransparency(facealpha)
length = 0
# colormap
if 'temp_colormap' in _plt.kwargs:
triple = _plt.kwargs['temp_colormap']
del _plt.kwargs['temp_colormap']
gr.drawimage(0, 1, 1, 0, triple[0], triple[1], triple[2])
# draw_edges for Colormap: if draw_edges is given, the edges will be drawn
if 'draw_edges' in _plt.kwargs:
if _plt.kwargs['draw_edges'] is True:
gr.setlinecolorind(edgecolor)
gr.setlinewidth(1.5)
if is_binedges:
if normalization == 'countdensity':
norm_factor = 1
if normalization == 'countdensity' or normalization == 'pdf':
binwidths = []
classes = [temp for temp in classes if len(temp) > 0]
for i in range(len(binedges) - 1):
binwidths.append(binedges[i + 1] - binedges[i])
binwidths.append(binedges[-1] - binedges[-2])
bin_value = [len(x) / (norm_factor * binwidths[i]) if x[0] is not None else 0
for (i, x) in enumerate(classes)]
else:
bin_value = [len(x) / (norm_factor) if x[0] is not None else 0
for (i, x) in enumerate(classes)]
else:
bin_value = [len(x) / norm_factor if x is not None else 0 for x in classes]
length = 0
mlist = []
for x in range(len(classes)):
if normalization == 'cumcount' or normalization == 'cdf':
if classes[x][0] is None:
pass
else:
length = len(classes[x]) / norm_factor + length
elif classes[x][0] is None:
continue
elif normalization == 'pdf' or normalization == 'countdensity':
length = bin_value[x]
else:
length = len(classes[x]) / norm_factor
r = (length / border * 0.4) ** (num_bins * 2)
liste = moivre(r, (2 * x), num_bins * 2)
rect = np.sqrt(liste[0] ** 2 + liste[1] ** 2)
if is_rlim:
liste2 = moivre(r, (2 * x + 2), (num_bins * 2))
mlist.append(liste)
mlist.append(liste2)
r_min_list = moivre((r_min * 0.4) ** (num_bins * 2), (x * 2), num_bins * 2)
r_min_list2 = moivre((r_min * 0.4) ** (num_bins * 2), (x * 2 + 2), num_bins * 2)
for kaman in (-1, -2):
temporary = abs(np.sqrt(mlist[kaman][0]**2 + mlist[kaman][1]**2))
if temporary > (r_max * 0.4):
factor = abs(r_max * 0.4 / temporary)
mlist[kaman][0] *= factor
mlist[kaman][1] *= factor
del temporary
gr.settransparency(1)
gr.setfillintstyle(0)
gr.setfillcolorind(edgecolor)
if is_binedges:
if is_rlim:
rect = int(rect * 10000)
rect = rect / 10000
if round(rect, 3) > r_min * 0.4:
try:
gr.drawarc(0.5 - min(rect, r_max * 0.4), 0.5 + min(rect, r_max * 0.4),
0.5 - min(rect, r_max * 0.4), 0.5 + min(rect, r_max * 0.4),
binedges[x] * convert,
binedges[x + 1] * convert)
gr.drawarc(0.5 - r_min * 0.4, 0.5 + r_min * 0.4, 0.5 - r_min * 0.4, 0.5 + r_min * 0.4,
binedges[x] * convert,
binedges[x + 1] * convert)
gr.polyline([0.5 + r_min * 0.4 * np.cos(binedges[x]), 0.5 + min(rect, r_max * 0.4) * np.cos(binedges[x])],
[0.5 + r_min * 0.4 * np.sin(binedges[x]), 0.5 + min(rect, r_max * 0.4) * np.sin(binedges[x])])
gr.polyline([0.5 + r_min * 0.4 * np.cos(binedges[x + 1]),
0.5 + min(rect, r_max * 0.4) * np.cos(binedges[x + 1])],
[0.5 + r_min * 0.4 * np.sin(binedges[x + 1]),
0.5 + min(rect, r_max * 0.4) * np.sin(binedges[x + 1])])
except Exception:
pass
# no rlim
else:
try:
gr.fillarc(0.5 - rect, 0.5 + rect, 0.5 - rect, 0.5 + rect, binedges[x] * convert,
binedges[x + 1] * convert)
except Exception:
pass
# no binedges
else:
if is_rlim:
rect = int(rect * 10000)
rect = rect / 10000
if round(rect, 3) > r_min * 0.4:
gr.drawarc(0.5 - min(rect, r_max * 0.4), 0.5 + min(rect, r_max * 0.4),
0.5 - min(rect, r_max * 0.4), 0.5 + min(rect, r_max * 0.4),
x * (360 / num_bins),
(x + 1) * (360 / num_bins))
gr.drawarc(0.5 - r_min * 0.4, 0.5 + r_min * 0.4, 0.5 - r_min * 0.4, 0.5 + r_min * 0.4,
x * (360 / num_bins), (x + 1) * (360 / num_bins))
gr.polyline([0.5 + r_min_list[0], 0.5 + mlist[2 * x][0]],
[0.5 + r_min_list[1], 0.5 + mlist[2 * x][1]])
gr.polyline([0.5 + r_min_list2[0], 0.5 + mlist[2 * x + 1][0]],
[0.5 + r_min_list2[1], 0.5 + mlist[2 * x + 1][1]])
# Normal (no rlim)
else:
gr.fillarc(0.5 - rect, 0.5 + rect, 0.5 - rect, 0.5 + rect, x * (360 / num_bins),
(x + 1) * (360 / num_bins))
del mlist
# No Colormap
else:
if is_binedges:
if normalization == 'pdf':
pass
elif normalization == 'countdensity':
norm_factor = 1
binwidths = []
classes = [bin for bin in classes if len(bin) > 0]
for i in range(len(binedges) - 1):
binwidths.append(binedges[i + 1] - binedges[i])
binwidths.append(binedges[-1] - binedges[-2])
if normalization == 'countdensity' or normalization == 'pdf':
bin_value = [len(x) / (norm_factor * binwidths[i]) for (i, x) in enumerate(classes)]
else:
bin_value = [len(x) / norm_factor if x[0] is not None else 0 for x in classes]
# No binedges
else:
if normalization == 'countdensity':
bin_value = [len(x) / norm_factor if x[0] is not None else 0 for x in classes]
elif normalization == 'pdf':
bin_value = [len(x) / (norm_factor) if x[0] is not None else 0 for x in classes]
else:
bin_value = [len(x) / norm_factor if x[0] is not None else 0 for x in classes]
# no stairs
if _plt.kwargs.get('stairs', False) is False:
mlist = []
for x in range(len(classes)):
if normalization == 'cumcount' or normalization == 'cdf':
if classes[x][0] is None:
pass
else:
length = len(classes[x]) / norm_factor + length
elif normalization == 'pdf' or normalization == 'countdensity':
length = bin_value[x]
elif classes[x][0] is None:
continue
else:
length = len(classes[x]) / norm_factor
r = (length / border * 0.4) ** (num_bins * 2)
liste = moivre(r, (2 * x), num_bins * 2)
rect = np.sqrt(liste[0] ** 2 + liste[1] ** 2)
gr.setfillcolorind(facecolor)
gr.settransparency(facealpha)
gr.setfillintstyle(1)
if is_rlim:
liste2 = moivre(r, (2 * x + 2), (num_bins * 2))
mlist.append(liste)
mlist.append(liste2)
r_min_list = moivre((r_min * 0.4) ** (num_bins * 2), (x * 2), num_bins * 2)
r_min_list2 = moivre((r_min * 0.4) ** (num_bins * 2), (x * 2 + 2), num_bins * 2)
for kaman in (-1, -2):
temporary = abs(np.sqrt(mlist[kaman][0] ** 2 + mlist[kaman][1] ** 2))
if temporary > (r_max * 0.4):
factor = abs(r_max * 0.4 / temporary)
mlist[kaman][0] *= factor
mlist[kaman][1] *= factor
del temporary
r = length / border * 0.4
if r > r_max * 0.4:
r = r_max * 0.4
if is_binedges:
if is_rlim:
try:
if r > r_min * 0.4:
start_angle = binedges[x]
end_angle = binedges[x + 1]
diff_angle = end_angle - start_angle
num_angle = int(diff_angle / (0.2 / convert)) * 1j
phi_array = np.array(
np.ogrid[start_angle: end_angle:num_angle], dtype=np.float)
arc_1_x = [r * np.cos(phi) + 0.5 for phi in phi_array]
arc_1_y = [r * np.sin(phi) + 0.5 for phi in phi_array]
arc_2_x = [r_min * 0.4 * np.cos(phi) + 0.5 for phi in phi_array]
arc_2_y = [r_min * 0.4 * np.sin(phi) + 0.5 for phi in phi_array]
line_1_x = [0.5 + r_min * 0.4 * np.cos(binedges[x]),
0.5 + min(rect, r_max * 0.4) * np.cos(binedges[x])]
line_1_y = [0.5 + r_min * 0.4 * np.sin(binedges[x]),
0.5 + min(rect, r_max * 0.4) * np.sin(binedges[x])]
line_2_x = [0.5 + r_min * 0.4 * np.cos(binedges[x + 1]),
0.5 + min(rect, r_max * 0.4) * np.cos(binedges[x + 1])]
line_2_y = [0.5 + r_min * 0.4 * np.sin(binedges[x + 1]),
0.5 + min(rect, r_max * 0.4) * np.sin(binedges[x + 1])]
gr.setfillintstyle(1)
gr.fillarea(np.hstack((
line_1_x, arc_1_x, line_2_x[::-1], arc_2_x[::-1],)),
np.hstack((
line_1_y, arc_1_y, line_2_y[::-1], arc_2_y[::-1],))
)
gr.setfillintstyle(0)
gr.setfillcolorind(edgecolor)
gr.fillarea(np.hstack((
line_1_x, arc_1_x, line_2_x[::-1], arc_2_x[::-1],)),
np.hstack((
line_1_y, arc_1_y, line_2_y[::-1], arc_2_y[::-1],))
)
except Exception:
pass
pass
else:
try:
gr.fillarc(0.5 - rect, 0.5 + rect, 0.5 - rect, 0.5 + rect, binedges[x] * convert,
binedges[x + 1] * convert)
except Exception:
pass
gr.settransparency(1)
gr.setfillintstyle(0)
gr.setfillcolorind(edgecolor)
try:
gr.fillarc(0.5 - rect, 0.5 + rect, 0.5 - rect, 0.5 + rect, binedges[x] * convert,
binedges[x + 1] * convert)
except Exception:
pass
# no binedges
else:
if is_rlim:
try:
if r > r_min * 0.4:
start_angle = x * (360 / num_bins) / convert
end_angle = (x + 1) * (360 / num_bins) / convert
diff_angle = end_angle - start_angle
num_angle = int(diff_angle / (0.2 / convert)) * 1j
phi_array = np.array(
np.ogrid[start_angle: end_angle:num_angle], dtype=np.float)
arc_1_x = [r * np.cos(phi) + 0.5 for phi in phi_array]
arc_1_y = [r * np.sin(phi) + 0.5 for phi in phi_array]
arc_2_x = [r_min * 0.4 * np.cos(phi) + 0.5 for phi in phi_array]
arc_2_y = [r_min * 0.4 * np.sin(phi) + 0.5 for phi in phi_array]
line_1_x = [0.5 + r_min_list[0], 0.5 + mlist[2 * x][0]]
line_1_y = [0.5 + r_min_list[1], 0.5 + mlist[2 * x][1]]
line_2_x = [0.5 + r_min_list2[0], 0.5 + mlist[2 * x + 1][0]]
line_2_y = [0.5 + r_min_list2[1], 0.5 + mlist[2 * x + 1][1]]
gr.setfillintstyle(1)
gr.fillarea(np.hstack((
line_1_x, arc_1_x, line_2_x[::-1], arc_2_x[::-1],)),
np.hstack((
line_1_y, arc_1_y, line_2_y[::-1], arc_2_y[::-1],))
)
gr.setfillintstyle(0)
gr.setfillcolorind(edgecolor)
gr.fillarea(np.hstack((
line_1_x, arc_1_x, line_2_x[::-1], arc_2_x[::-1],)),
np.hstack((
line_1_y, arc_1_y, line_2_y[::-1], arc_2_y[::-1],))
)
except Exception:
pass
# no rlim
else:
gr.fillarc(0.5 - rect, 0.5 + rect, 0.5 - rect, 0.5 + rect, x * (360 / num_bins),
(x + 1) * (360 / num_bins))
gr.settransparency(1)
gr.setfillintstyle(0)
gr.setfillcolorind(edgecolor)
gr.fillarc(0.5 - rect, 0.5 + rect, 0.5 - rect, 0.5 + rect, x * (360 / num_bins),
(x + 1) * (360 / num_bins))
# stairs
else:
gr.setlinewidth(2.3)
gr.setlinecolorind(edgecolor)
# With given bin_edges
if is_binedges:
mlist = []
rectlist = []
for x in range(len(classes)):
if normalization == 'cumcount' or normalization == 'cdf':
if classes[x][0] is None:
pass
else:
length = len(classes[x]) / norm_factor + length
elif normalization == 'pdf' or normalization == 'countdensity':
length = bin_value[x]
elif classes[x][0] is None:
length = 0
else:
length = len(classes[x]) / norm_factor
r = (length / border * 0.4) ** (num_bins * 2)
liste = moivre(r, (2 * x), num_bins * 2)
liste2 = moivre(r, (2 * x + 2), (num_bins * 2))
mlist.append(liste)
mlist.append(liste2)
rect = np.sqrt(liste[0] ** 2 + liste[1] ** 2)
if is_rlim:
for kaman in (-1, -2):
temporary = abs(np.sqrt(mlist[kaman][0]**2 + mlist[kaman][1]**2))
if temporary > (r_max * 0.4):
factor = abs(r_max * 0.4 / temporary)
mlist[kaman][0] *= factor
mlist[kaman][1] *= factor
del temporary
if rect < r_min * 0.4:
rectlist.append(r_min * 0.4)
elif rect > r_max * 0.4:
rectlist.append(r_max * 0.4)
else:
rectlist.append(rect)
else:
rectlist.append(rect)
if is_rlim:
rect = int(rect * 10000)
rect = rect / 10000
if round(rect, 3) > r_min * 0.4:
try:
gr.drawarc(0.5 - min(rect, r_max * 0.4), 0.5 + min(rect, r_max * 0.4),
0.5 - min(rect, r_max * 0.4), 0.5 + min(rect, r_max * 0.4),
binedges[x] * convert,
binedges[x + 1] * convert)
gr.drawarc(0.5 - r_min * 0.4, 0.5 + r_min * 0.4, 0.5 - r_min * 0.4, 0.5 + r_min * 0.4,
binedges[x] * convert,
binedges[x + 1] * convert)
except Exception:
pass
else:
try:
gr.drawarc(0.5 - rect, 0.5 + rect, 0.5 - rect, 0.5 + rect, binedges[x] * convert,
binedges[x + 1] * convert)
except Exception:
pass
if is_rlim:
startx = max(rectlist[0] * np.cos(binedges[0]), r_min * 0.4 * np.cos(binedges[0]))
starty = max(rectlist[0] * np.sin(binedges[0]), r_min * 0.4 * np.sin(binedges[0]))
for x in range(len(binedges)):
if not (binedges[0] == 0 and binedges[len(binedges) - 1] == 2 * np.pi and (
x == len(binedges) - 1 or x == 0)):
try:
gr.polyline([0.5 + startx, 0.5 + rectlist[x] * np.cos(binedges[x])],
[0.5 + starty, 0.5 + rectlist[x] * np.sin(binedges[x])])
except Exception:
pass
try:
startx = (rectlist[x] * np.cos(binedges[x + 1]))
starty = (rectlist[x] * np.sin(binedges[x + 1]))
except Exception:
pass
gr.polyline([0.5 + r_min * 0.4 * np.cos(binedges[0]),
0.5 + rectlist[0] * np.cos(binedges[0])],
[0.5 + r_min * 0.4 * np.sin(binedges[0]),
0.5 + rectlist[0] * np.sin(binedges[0])])
gr.polyline([0.5 + r_min * 0.4 * np.cos(binedges[-1]),
0.5 + rectlist[-1] * np.cos(binedges[-1])],
[0.5 + r_min * 0.4 * np.sin(binedges[-1]),
0.5 + rectlist[-1] * np.sin(binedges[-1])])
# no rlim
else:
startx = 0
starty = 0
for x in range(len(binedges)):
pass
try:
gr.polyline([0.5 + startx, 0.5 + rectlist[x] * np.cos(binedges[x])],
[0.5 + starty, 0.5 + rectlist[x] * np.sin(binedges[x])])
startx = (rectlist[x] * np.cos(binedges[x + 1]))
starty = (rectlist[x] * np.sin(binedges[x + 1]))
except Exception:
pass
if binedges[0] == 0 and binedges[-1] == 2 * np.pi:
gr.polyline([0.5 + rectlist[0] * np.cos(binedges[0]), 0.5 + startx],
[0.5 + rectlist[0] * np.sin(binedges[0]), 0.5 + starty])
else:
gr.polyline([0.5 + rectlist[-1] * np.cos(binedges[-1]), 0.5],
[0.5 + rectlist[-1] * np.sin(binedges[-1]), 0.5])
# Normal stairs (no bin_edges)
else:
mlist = []
for x in range(len(classes)):
if normalization == 'cumcount' or normalization == 'cdf':
if classes[x][0] is None:
pass
else:
length = length + len(classes[x]) / norm_factor
elif classes[x][0] is None:
length = 0
else:
length = len(classes[x]) / norm_factor
r = (length / border * 0.4) ** (num_bins * 2)
liste = moivre(r, (2 * x), num_bins * 2)
liste2 = moivre(r, (2 * x + 2), (num_bins * 2))
mlist.append(liste)
mlist.append(liste2)
rect = np.sqrt(liste[0] ** 2 + liste[1] ** 2)
gr.setfillcolorind(edgecolor)
if is_rlim:
for kaman in (-1, -2):
temporary = abs(np.sqrt(mlist[kaman][0] ** 2 + mlist[kaman][1] ** 2))
if temporary > (r_max * 0.4):
factor = abs(r_max * 0.4 / temporary)
mlist[kaman][0] *= factor
mlist[kaman][1] *= factor
del temporary
rect = int(rect * 10000)
rect = rect / 10000
if round(rect, 3) > r_min * 0.4:
gr.drawarc(0.5 - min(rect, r_max * 0.4), 0.5 + min(rect, r_max * 0.4),
0.5 - min(rect, r_max * 0.4), 0.5 + min(rect, r_max * 0.4),
x * (360 / num_bins),
(x + 1) * (360 / num_bins))
gr.drawarc(0.5 - r_min * 0.4, 0.5 + r_min * 0.4, 0.5 - r_min * 0.4, 0.5 + r_min * 0.4,
x * (360 / num_bins),
(x + 1) * (360 / num_bins))
# no rlim
else:
gr.drawarc(0.5 - rect, 0.5 + rect, 0.5 - rect, 0.5 + rect, x * (360 / num_bins),
(x + 1) * (360 / num_bins))
if is_rlim:
for x in range(len(classes) * 2):
if x > 1 and x % 2 == 0:
rect1 = np.sqrt(mlist[x][0]**2 + mlist[x][1]**2)
rect1 = round(int(rect1 * 10000) / 10000, 3)
rect2 = np.sqrt(mlist[x - 1][0]**2 + mlist[x - 1][1]**2)
rect2 = round(int(rect2 * 10000) / 10000, 3)
if rect1 < (r_min * 0.4) and rect2 < (r_min * 0.4):
continue
if rect1 < r_min * 0.4:
mlist[x][0] = r_min * 0.4 * np.cos(np.pi / len(classes) * x)
mlist[x][1] = r_min * 0.4 * np.sin(np.pi / len(classes) * x)
if rect2 < r_min * 0.4:
mlist[x - 1][0] = r_min * 0.4 * np.cos(np.pi / len(classes) * x)
mlist[x - 1][1] = r_min * 0.4 * np.sin(np.pi / len(classes) * x)
gr.polyline([0.5 + mlist[x][0], 0.5 + mlist[x - 1][0]],
[0.5 + mlist[x][1], 0.5 + mlist[x - 1][1]])
mlist[-1][0] = max(mlist[-1][0], r_min * 0.4 * np.cos(0))
mlist[-1][1] = max(mlist[-1][1], r_min * 0.4 * np.sin(0))
mlist[0][0] = max(mlist[0][0], r_min * 0.4 * np.cos(0))
mlist[0][1] = max(mlist[0][1], r_min * 0.4 * np.sin(0))
gr.polyline([0.5 + mlist[-1][0], 0.5 + mlist[0][0]],
[0.5 + mlist[-1][1], 0.5 + mlist[0][1]])
else:
for x in range(len(classes) * 2):
if x > 1 and x % 2 == 0:
gr.polyline([0.5 + mlist[x][0], 0.5 + mlist[x - 1][0]],
[0.5 + mlist[x][1], 0.5 + mlist[x - 1][1]])
gr.polyline([0.5 + mlist[-1][0], 0.5 + mlist[0][0]],
[0.5 + mlist[-1][1], 0.5 + mlist[0][1]])
if temp_edge is not None:
red = (temp_edge % 256) / 255
green = ((temp_edge >> 8) % 256) / 255
blue = ((temp_edge >> 16) % 256) / 255
gr.setcolorrep(1004, red, green, blue)
if temp_face is not None:
red = (temp_face % 256) / 255
green = ((temp_face >> 8) % 256) / 255
blue = ((temp_face >> 16) % 256) / 255
gr.setcolorrep(1004, red, green, blue)
gr.updatews()
def _plot_args(args, fmt='xys'):
global _plt
args = list(args)
parsed_args = []
while args:
# Try to read x, y, z and c
x = y = z = c = None
if fmt == 'xyuv':
if len(args) == 4:
x, y, u, v = args
x = _convert_to_array(x)
y = _convert_to_array(y)
u = _convert_to_array(u, always_flatten=True)
v = _convert_to_array(v, always_flatten=True)
if u.shape != (len(x) * len(y),):
raise TypeError('expected an array of len(y) * len(x) u values')
if v.shape != (len(x) * len(y),):
raise TypeError('expected an array of len(y) * len(x) v values')
parsed_args.append((x, y, u.reshape(len(y), len(x)), v.reshape(len(y), len(x)), ""))
break
else:
raise TypeError('expected x, y, u and v')
if fmt == 'xyzc' and len(args) == 1:
try:
a = np.array(args[0])
if max(a.shape) == 1:
a.shape = [1, 1]
else:
a.shape = [i for i in a.shape if i != 1]
if len(a.shape) == 1:
a.shape = [len(a), 1]
if a.dtype == complex:
raise TypeError()
x = np.arange(1, a.shape[1] + 1)
y = np.arange(1, a.shape[0] + 1)
z = a.astype(np.float64)
args.pop(0)
except TypeError:
x = y = z = c = None
if x is None:
a = _convert_to_array(args.pop(0), may_be_2d=True)
if len(a.shape) == 2:
x, y = a[:, 0], a[:, 1]
else:
if fmt == 'xys':
try:
x = a
y = _convert_to_array(args[0], xvalues=x)
args.pop(0)
except (TypeError, IndexError):
y = a
x = np.arange(1, len(a) + 1)
elif fmt == 'y':
y = a
x = np.arange(1, len(a) + 1)
elif fmt == 'xyac' or fmt == 'xyzc':
try:
x = a
if fmt == 'xyac' and len(args) >= 1:
y = _convert_to_array(args[0], xvalues=x)
if len(args) >= 2:
y = _convert_to_array(args[0])
xy_y, xy_x = np.meshgrid(y, x)
xy_x.shape = np.prod(xy_x.shape)
xy_y.shape = np.prod(xy_y.shape)
xy = np.stack((xy_x, xy_y), axis=1)
if fmt == 'xyzc':
z = _convert_to_array(args[1], xvalues=xy, always_flatten=True)
if z.shape != x.shape or z.shape != y.shape:
z.shape = (len(y), len(x))
else:
z = _convert_to_array(args[1], xvalues=x)
if len(args) >= 3:
if fmt == 'xyzc':
c = _convert_to_array(args[2], xvalues=xy, always_flatten=True)
c.shape = (len(y), len(x))
else:
c = _convert_to_array(args[2], xvalues=x)
except TypeError:
pass
if y is None:
raise TypeError('expected callable or sequence of real values')
for v in (y, z, c):
if v is None:
break
args.pop(0)
else:
raise TypeError("Invalid format: '{}'".format(fmt))
# Remove unused values
if z is None:
if len(x) > len(y):
x = x[:len(y)]
elif len(x) < len(y):
y = y[:len(x)]
else:
if fmt == 'xyzc':
if z.shape[0] > len(y):
z = z[:len(y), :]
elif z.shape[0] < len(y):
y = y[:z.shape[0]]
if len(z.shape) > 1 and z.shape[1] > len(x):
z = z[:, len(x)]
elif len(z.shape) > 1 and z.shape[1] < len(x):
x = x[:z.shape[1]]
if c is not None:
if c.shape[0] > len(y):
c = c[:len(y), :]
elif c.shape[0] < len(y):
y = y[:c.shape[0]]
z = z[:c.shape[0], 0]
if c.shape[1] > len(x):
c = c[:, len(x)]
elif c.shape[1] < len(x):
x = x[:c.shape[1]]
z = z[:, :c.shape[1]]
if z is not None:
z = np.ascontiguousarray(z)
if c is not None:
c = np.ascontiguousarray(c)
else:
if len(x) > len(y):
x = x[:len(y)]
else:
y = y[:len(x)]
if len(z) > len(x):
z = z[:len(x)]
else:
x = x[:len(z)]
y = y[:len(z)]
if c is not None:
if len(c) > len(z):
c = c[:len(z)]
else:
x = x[:len(c)]
y = y[:len(c)]
z = z[:len(c)]
# Try to read the spec if available
spec = ""
if fmt == 'xys' and len(args) > 0 and isinstance(args[0], basestring):
spec = args.pop(0)
if fmt == 'y' and args:
z = args.pop(0)
if args:
c = args.pop(0)
parsed_args.append((x, y, z, c, spec))
return parsed_args
| [
"gr.text",
"gr.setviewport",
"gr.setwswindow",
"gr3.clear",
"numpy.arctan2",
"gr.shadepoints",
"numpy.ones",
"gr.setwindow",
"numpy.arange",
"numpy.interp",
"gr.endprint",
"gr.contour",
"gr.uselinespec",
"numpy.degrees",
"gr.restorestate",
"numpy.max",
"gr3.deletemesh",
"gr.trisurf... | [((749, 770), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (764, 770), False, 'import functools\n'), ((9365, 9376), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (9373, 9376), True, 'import numpy as np\n'), ((9635, 9673), 'numpy.bincount', 'np.bincount', (['binned_x'], {'weights': 'weights'}), '(binned_x, weights=weights)\n', (9646, 9673), True, 'import numpy as np\n'), ((9686, 9722), 'numpy.linspace', 'np.linspace', (['x_min', 'x_max', '(nbins + 1)'], {}), '(x_min, x_max, nbins + 1)\n', (9697, 9722), True, 'import numpy as np\n'), ((15487, 15513), 'numpy.array', 'np.array', (['data'], {'copy': '(False)'}), '(data, copy=False)\n', (15495, 15513), True, 'import numpy as np\n'), ((16805, 16831), 'numpy.array', 'np.array', (['data'], {'copy': '(False)'}), '(data, copy=False)\n', (16813, 16831), True, 'import numpy as np\n'), ((30654, 30675), 'gr.setlinecolorind', 'gr.setlinecolorind', (['(1)'], {}), '(1)\n', (30672, 30675), False, 'import gr\n'), ((70826, 70849), 'gr.beginprint', 'gr.beginprint', (['filename'], {}), '(filename)\n', (70839, 70849), False, 'import gr\n'), ((70871, 70884), 'gr.endprint', 'gr.endprint', ([], {}), '()\n', (70882, 70884), False, 'import gr\n'), ((74669, 74698), 'numpy.ones', 'np.ones', (['(256, 4)', 'np.float32'], {}), '((256, 4), np.float32)\n', (74676, 74698), True, 'import numpy as np\n'), ((75966, 76006), 'gr.setcolormapfromrgb', 'gr.setcolormapfromrgb', (['colors', 'positions'], {}), '(colors, positions)\n', (75987, 76006), False, 'import gr\n'), ((78430, 78445), 'gr.inqdspsize', 'gr.inqdspsize', ([], {}), '()\n', (78443, 78445), False, 'import gr\n'), ((80474, 80499), 'gr.setviewport', 'gr.setviewport', (['*viewport'], {}), '(*viewport)\n', (80488, 80499), False, 'import gr\n'), ((89759, 89777), 'gr.setscale', 'gr.setscale', (['scale'], {}), '(scale)\n', (89770, 89777), False, 'import gr\n'), ((91014, 91035), 'gr.setlinecolorind', 'gr.setlinecolorind', (['(1)'], {}), '(1)\n', (91032, 91035), False, 'import gr\n'), ((91040, 91058), 'gr.setlinewidth', 'gr.setlinewidth', (['(1)'], {}), '(1)\n', (91055, 91058), False, 'import gr\n'), ((91187, 91215), 'gr.setcharheight', 'gr.setcharheight', (['charheight'], {}), '(charheight)\n', (91203, 91215), False, 'import gr\n'), ((96779, 96793), 'gr.savestate', 'gr.savestate', ([], {}), '()\n', (96791, 96793), False, 'import gr\n'), ((96798, 96826), 'gr.setcharheight', 'gr.setcharheight', (['charheight'], {}), '(charheight)\n', (96814, 96826), False, 'import gr\n'), ((96831, 96864), 'gr.setlinetype', 'gr.setlinetype', (['gr.LINETYPE_SOLID'], {}), '(gr.LINETYPE_SOLID)\n', (96845, 96864), False, 'import gr\n'), ((98757, 98774), 'gr.restorestate', 'gr.restorestate', ([], {}), '()\n', (98772, 98774), False, 'import gr\n'), ((98946, 98960), 'gr.savestate', 'gr.savestate', ([], {}), '()\n', (98958, 98960), False, 'import gr\n'), ((98965, 98979), 'gr.selntran', 'gr.selntran', (['(0)'], {}), '(0)\n', (98976, 98979), False, 'import gr\n'), ((98984, 98998), 'gr.setscale', 'gr.setscale', (['(0)'], {}), '(0)\n', (98995, 98998), False, 'import gr\n'), ((99577, 99614), 'gr.setfillintstyle', 'gr.setfillintstyle', (['gr.INTSTYLE_SOLID'], {}), '(gr.INTSTYLE_SOLID)\n', (99595, 99614), False, 'import gr\n'), ((99619, 99640), 'gr.setfillcolorind', 'gr.setfillcolorind', (['(0)'], {}), '(0)\n', (99637, 99640), False, 'import gr\n'), ((99645, 99716), 'gr.fillrect', 'gr.fillrect', (['(px - 0.08)', '(px + w + 0.02)', '(py + 0.03)', '(py - 0.03 * num_lines)'], {}), '(px - 0.08, px + w + 0.02, py + 0.03, py - 0.03 * num_lines)\n', (99656, 99716), False, 'import gr\n'), ((99721, 99754), 'gr.setlinetype', 'gr.setlinetype', (['gr.LINETYPE_SOLID'], {}), '(gr.LINETYPE_SOLID)\n', (99735, 99754), False, 'import gr\n'), ((99759, 99780), 'gr.setlinecolorind', 'gr.setlinecolorind', (['(1)'], {}), '(1)\n', (99777, 99780), False, 'import gr\n'), ((99785, 99803), 'gr.setlinewidth', 'gr.setlinewidth', (['(1)'], {}), '(1)\n', (99800, 99803), False, 'import gr\n'), ((99808, 99879), 'gr.drawrect', 'gr.drawrect', (['(px - 0.08)', '(px + w + 0.02)', '(py + 0.03)', '(py - 0.03 * num_lines)'], {}), '(px - 0.08, px + w + 0.02, py + 0.03, py - 0.03 * num_lines)\n', (99819, 99879), False, 'import gr\n'), ((99894, 99913), 'gr.uselinespec', 'gr.uselinespec', (['""" """'], {}), "(' ')\n", (99908, 99913), False, 'import gr\n'), ((100407, 100421), 'gr.selntran', 'gr.selntran', (['(1)'], {}), '(1)\n', (100418, 100421), False, 'import gr\n'), ((100426, 100443), 'gr.restorestate', 'gr.restorestate', ([], {}), '()\n', (100441, 100443), False, 'import gr\n'), ((100523, 100537), 'gr.savestate', 'gr.savestate', ([], {}), '()\n', (100535, 100537), False, 'import gr\n'), ((100662, 100759), 'gr.setviewport', 'gr.setviewport', (['(viewport[1] + 0.02 + off)', '(viewport[1] + 0.05 + off)', 'viewport[2]', 'viewport[3]'], {}), '(viewport[1] + 0.02 + off, viewport[1] + 0.05 + off, viewport\n [2], viewport[3])\n', (100676, 100759), False, 'import gr\n'), ((100778, 100802), 'gr.setwindow', 'gr.setwindow', (['(0)', '(1)', '(0)', '(1)'], {}), '(0, 1, 0, 1)\n', (100790, 100802), False, 'import gr\n'), ((100936, 100957), 'gr.setlinecolorind', 'gr.setlinecolorind', (['(1)'], {}), '(1)\n', (100954, 100957), False, 'import gr\n'), ((100962, 100976), 'gr.setscale', 'gr.setscale', (['(0)'], {}), '(0)\n', (100973, 100976), False, 'import gr\n'), ((101269, 101297), 'gr.setcharheight', 'gr.setcharheight', (['charheight'], {}), '(charheight)\n', (101285, 101297), False, 'import gr\n'), ((102886, 102903), 'gr.restorestate', 'gr.restorestate', ([], {}), '()\n', (102901, 102903), False, 'import gr\n'), ((103822, 103841), 'gr.uselinespec', 'gr.uselinespec', (['""" """'], {}), "(' ')\n", (103836, 103841), False, 'import gr\n'), ((117933, 117947), 'gr.selntran', 'gr.selntran', (['(0)'], {}), '(0)\n', (117944, 117947), False, 'import gr\n'), ((118371, 118385), 'gr.selntran', 'gr.selntran', (['(1)'], {}), '(1)\n', (118382, 118385), False, 'import gr\n'), ((119206, 119220), 'gr.selntran', 'gr.selntran', (['(0)'], {}), '(0)\n', (119217, 119220), False, 'import gr\n'), ((119929, 119940), 'gr3.clear', 'gr3.clear', ([], {}), '()\n', (119938, 119940), False, 'import gr3\n'), ((119952, 120062), 'gr3.createisosurfacemesh', 'gr3.createisosurfacemesh', (['values', '(2 / (nx - 1), 2 / (ny - 1), 2 / (nz - 1))', '(-1.0, -1.0, -1.0)', 'isovalue'], {}), '(values, (2 / (nx - 1), 2 / (ny - 1), 2 / (nz - 1)),\n (-1.0, -1.0, -1.0), isovalue)\n', (119976, 120062), False, 'import gr3\n'), ((120150, 120184), 'gr3.setbackgroundcolor', 'gr3.setbackgroundcolor', (['(1)', '(1)', '(1)', '(0)'], {}), '(1, 1, 1, 0)\n', (120172, 120184), False, 'import gr3\n'), ((120189, 120261), 'gr3.drawmesh', 'gr3.drawmesh', (['mesh', '(1)', '(0, 0, 0)', '(0, 0, 1)', '(0, 1, 0)', 'color', '(1, 1, 1)'], {}), '(mesh, 1, (0, 0, 0), (0, 0, 1), (0, 1, 0), color, (1, 1, 1))\n', (120201, 120261), False, 'import gr3\n'), ((120525, 120616), 'gr3.drawimage', 'gr3.drawimage', (['x_min', 'x_max', 'y_min', 'y_max', '(500)', '(500)', 'gr3.GR3_Drawable.GR3_DRAWABLE_GKS'], {}), '(x_min, x_max, y_min, y_max, 500, 500, gr3.GR3_Drawable.\n GR3_DRAWABLE_GKS)\n', (120538, 120616), False, 'import gr3\n'), ((120616, 120636), 'gr3.deletemesh', 'gr3.deletemesh', (['mesh'], {}), '(mesh)\n', (120630, 120636), False, 'import gr3\n'), ((120641, 120655), 'gr.selntran', 'gr.selntran', (['(1)'], {}), '(1)\n', (120652, 120655), False, 'import gr\n'), ((120954, 120977), 'numpy.fmod', 'np.fmod', (['phi', '(2 * np.pi)'], {}), '(phi, 2 * np.pi)\n', (120961, 120977), True, 'import numpy as np\n'), ((120994, 121033), 'numpy.logical_or', 'np.logical_or', (['(rho < r_min)', '(rho > r_max)'], {}), '(rho < r_min, rho > r_max)\n', (121007, 121033), True, 'import numpy as np\n'), ((121050, 121093), 'numpy.logical_or', 'np.logical_or', (['(phi < phi_min)', '(phi > phi_max)'], {}), '(phi < phi_min, phi > phi_max)\n', (121063, 121093), True, 'import numpy as np\n'), ((131775, 131796), 'gr.setfillintstyle', 'gr.setfillintstyle', (['(1)'], {}), '(1)\n', (131793, 131796), False, 'import gr\n'), ((135113, 135134), 'gr.setfillintstyle', 'gr.setfillintstyle', (['(0)'], {}), '(0)\n', (135131, 135134), False, 'import gr\n'), ((140106, 140135), 'numpy.linspace', 'np.linspace', (['(0)', '(255)'], {'num': 'size'}), '(0, 255, num=size)\n', (140117, 140135), True, 'import numpy as np\n'), ((140337, 140379), 'numpy.array', 'np.array', (['((r1 + r2) / factor)'], {'dtype': 'np.int'}), '((r1 + r2) / factor, dtype=np.int)\n', (140345, 140379), True, 'import numpy as np\n'), ((141182, 141213), 'numpy.array', 'np.array', (['(r12 | g12 | b12 | a12)'], {}), '(r12 | g12 | b12 | a12)\n', (141190, 141213), True, 'import numpy as np\n'), ((141226, 141254), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['pixmap'], {}), '(pixmap)\n', (141246, 141254), True, 'import numpy as np\n'), ((142732, 142774), 'gr.setviewport', 'gr.setviewport', (['vp[0]', 'vp[1]', 'vp[2]', 'vp[3]'], {}), '(vp[0], vp[1], vp[2], vp[3])\n', (142746, 142774), False, 'import gr\n'), ((142779, 142797), 'gr.setlinewidth', 'gr.setlinewidth', (['(1)'], {}), '(1)\n', (142794, 142797), False, 'import gr\n'), ((142802, 142826), 'gr.setwindow', 'gr.setwindow', (['(0)', '(1)', '(0)', '(1)'], {}), '(0, 1, 0, 1)\n', (142814, 142826), False, 'import gr\n'), ((143490, 143513), 'gr.settransparency', 'gr.settransparency', (['(0.8)'], {}), '(0.8)\n', (143508, 143513), False, 'import gr\n'), ((145006, 145027), 'gr.settextalign', 'gr.settextalign', (['(1)', '(1)'], {}), '(1, 1)\n', (145021, 145027), False, 'import gr\n'), ((146022, 146043), 'gr.settextalign', 'gr.settextalign', (['(2)', '(3)'], {}), '(2, 3)\n', (146037, 146043), False, 'import gr\n'), ((146330, 146359), 'gr.settransparency', 'gr.settransparency', (['facealpha'], {}), '(facealpha)\n', (146348, 146359), False, 'import gr\n'), ((173219, 173232), 'gr.updatews', 'gr.updatews', ([], {}), '()\n', (173230, 173232), False, 'import gr\n'), ((9518, 9565), 'numpy.floor', 'np.floor', (['((x - x_min) / (x_max - x_min) * nbins)'], {}), '((x - x_min) / (x_max - x_min) * nbins)\n', (9526, 9565), True, 'import numpy as np\n'), ((40992, 41038), 'numpy.sqrt', 'np.sqrt', (['((X - center) ** 2 + (Y - center) ** 2)'], {}), '((X - center) ** 2 + (Y - center) ** 2)\n', (40999, 41038), True, 'import numpy as np\n'), ((41057, 41091), 'numpy.arctan2', 'np.arctan2', (['(Y - center)', '(X - center)'], {}), '(Y - center, X - center)\n', (41067, 41091), True, 'import numpy as np\n'), ((41313, 41331), 'numpy.array', 'np.array', (['colormap'], {}), '(colormap)\n', (41321, 41331), True, 'import numpy as np\n'), ((74750, 74781), 'gr.inqcolor', 'gr.inqcolor', (['(1000 + color_index)'], {}), '(1000 + color_index)\n', (74761, 74781), False, 'import gr\n'), ((75049, 75180), 'warnings.warn', 'warnings.warn', (['"""The parameter "cmap" has been replaced by "colormap". The value of "cmap" will be ignored."""'], {'stacklevel': '(3)'}), '(\n \'The parameter "cmap" has been replaced by "colormap". The value of "cmap" will be ignored.\'\n , stacklevel=3)\n', (75062, 75180), False, 'import warnings\n'), ((75317, 75341), 'gr.setcolormap', 'gr.setcolormap', (['colormap'], {}), '(colormap)\n', (75331, 75341), False, 'import gr\n'), ((75501, 75525), 'gr.setcolormap', 'gr.setcolormap', (['colormap'], {}), '(colormap)\n', (75515, 75525), False, 'import gr\n'), ((79067, 79130), 'gr.setwsviewport', 'gr.setwsviewport', (['(0)', 'metric_size', '(0)', '(metric_size * aspect_ratio)'], {}), '(0, metric_size, 0, metric_size * aspect_ratio)\n', (79083, 79130), False, 'import gr\n'), ((79139, 79176), 'gr.setwswindow', 'gr.setwswindow', (['(0)', '(1)', '(0)', 'aspect_ratio'], {}), '(0, 1, 0, aspect_ratio)\n', (79153, 79176), False, 'import gr\n'), ((79353, 79416), 'gr.setwsviewport', 'gr.setwsviewport', (['(0)', '(metric_size * aspect_ratio)', '(0)', 'metric_size'], {}), '(0, metric_size * aspect_ratio, 0, metric_size)\n', (79369, 79416), False, 'import gr\n'), ((79425, 79462), 'gr.setwswindow', 'gr.setwswindow', (['(0)', 'aspect_ratio', '(0)', '(1)'], {}), '(0, aspect_ratio, 0, 1)\n', (79439, 79462), False, 'import gr\n'), ((80656, 80670), 'gr.savestate', 'gr.savestate', ([], {}), '()\n', (80668, 80670), False, 'import gr\n'), ((80679, 80693), 'gr.selntran', 'gr.selntran', (['(0)'], {}), '(0)\n', (80690, 80693), False, 'import gr\n'), ((80702, 80739), 'gr.setfillintstyle', 'gr.setfillintstyle', (['gr.INTSTYLE_SOLID'], {}), '(gr.INTSTYLE_SOLID)\n', (80720, 80739), False, 'import gr\n'), ((80748, 80798), 'gr.setfillcolorind', 'gr.setfillcolorind', (["_plt.kwargs['backgroundcolor']"], {}), "(_plt.kwargs['backgroundcolor'])\n", (80766, 80798), False, 'import gr\n'), ((81100, 81114), 'gr.selntran', 'gr.selntran', (['(1)'], {}), '(1)\n', (81111, 81114), False, 'import gr\n'), ((81123, 81140), 'gr.restorestate', 'gr.restorestate', ([], {}), '()\n', (81138, 81140), False, 'import gr\n'), ((81354, 81424), 'gr.setviewport', 'gr.setviewport', (['(x_center - r)', '(x_center + r)', '(y_center - r)', '(y_center + r)'], {}), '(x_center - r, x_center + r, y_center - r, y_center + r)\n', (81368, 81424), False, 'import gr\n'), ((88547, 88566), 'gr.setwindow', 'gr.setwindow', (['*bbox'], {}), '(*bbox)\n', (88559, 88566), False, 'import gr\n'), ((88585, 88625), 'gr.setwindow', 'gr.setwindow', (['x_min', 'x_max', 'y_min', 'y_max'], {}), '(x_min, x_max, y_min, y_max)\n', (88597, 88625), False, 'import gr\n'), ((89345, 89386), 'gr.setspace', 'gr.setspace', (['z_min', 'z_max', 'rotation', 'tilt'], {}), '(z_min, z_max, rotation, tilt)\n', (89356, 89386), False, 'import gr\n'), ((90059, 90084), 'numpy.floor', 'np.floor', (['(phi_min / 360.0)'], {}), '(phi_min / 360.0)\n', (90067, 90084), True, 'import numpy as np\n'), ((90105, 90130), 'numpy.floor', 'np.floor', (['(phi_max / 360.0)'], {}), '(phi_max / 360.0)\n', (90113, 90130), True, 'import numpy as np\n'), ((90309, 90354), 'gr.adjustlimits', 'gr.adjustlimits', (['(phi_min / 3.0)', '(phi_max / 3.0)'], {}), '(phi_min / 3.0, phi_max / 3.0)\n', (90324, 90354), False, 'import gr\n'), ((94300, 94387), 'gr.axes', 'gr.axes', (['x_tick', 'y_tick', 'x_org[0]', 'y_org[0]', 'x_major_count', 'y_major_count', 'ticksize'], {}), '(x_tick, y_tick, x_org[0], y_org[0], x_major_count, y_major_count,\n ticksize)\n', (94307, 94387), False, 'import gr\n'), ((94392, 94482), 'gr.axes', 'gr.axes', (['x_tick', 'y_tick', 'x_org[1]', 'y_org[1]', '(-x_major_count)', '(-y_major_count)', '(-ticksize)'], {}), '(x_tick, y_tick, x_org[1], y_org[1], -x_major_count, -y_major_count,\n -ticksize)\n', (94399, 94482), False, 'import gr\n'), ((94519, 94533), 'gr.savestate', 'gr.savestate', ([], {}), '()\n', (94531, 94533), False, 'import gr\n'), ((94542, 94600), 'gr.settextalign', 'gr.settextalign', (['gr.TEXT_HALIGN_CENTER', 'gr.TEXT_VALIGN_TOP'], {}), '(gr.TEXT_HALIGN_CENTER, gr.TEXT_VALIGN_TOP)\n', (94557, 94600), False, 'import gr\n'), ((94609, 94683), 'gr.textext', 'gr.textext', (['(0.5 * (viewport[0] + viewport[1]))', 'vp[3]', "_plt.kwargs['title']"], {}), "(0.5 * (viewport[0] + viewport[1]), vp[3], _plt.kwargs['title'])\n", (94619, 94683), False, 'import gr\n'), ((94692, 94709), 'gr.restorestate', 'gr.restorestate', ([], {}), '()\n', (94707, 94709), False, 'import gr\n'), ((94946, 94984), 'gr.titles3d', 'gr.titles3d', (['x_label', 'y_label', 'z_label'], {}), '(x_label, y_label, z_label)\n', (94957, 94984), False, 'import gr\n'), ((96651, 96680), 'gr.adjustlimits', 'gr.adjustlimits', (['r_min', 'r_max'], {}), '(r_min, r_max)\n', (96666, 96680), False, 'import gr\n'), ((96876, 96913), 'gr.tick', 'gr.tick', (['(phi_min / 6.0)', '(phi_max / 6.0)'], {}), '(phi_min / 6.0, phi_max / 6.0)\n', (96883, 96913), False, 'import gr\n'), ((97268, 97296), 'numpy.array', 'np.array', (['[r_min / r_max, 1]'], {}), '([r_min / r_max, 1])\n', (97276, 97296), True, 'import numpy as np\n'), ((97903, 97942), 'gr.polyline', 'gr.polyline', (['(cosf * pline)', '(sinf * pline)'], {}), '(cosf * pline, sinf * pline)\n', (97914, 97942), False, 'import gr\n'), ((97960, 97981), 'gr.tick', 'gr.tick', (['r_min', 'r_max'], {}), '(r_min, r_max)\n', (97967, 97981), False, 'import gr\n'), ((98105, 98127), 'gr.setlinecolorind', 'gr.setlinecolorind', (['(88)'], {}), '(88)\n', (98123, 98127), False, 'import gr\n'), ((98140, 98156), 'numpy.clip', 'np.clip', (['r', '(0)', '(1)'], {}), '(r, 0, 1)\n', (98147, 98156), True, 'import numpy as np\n'), ((98234, 98276), 'gr.drawarc', 'gr.drawarc', (['(-r)', 'r', '(-r)', 'r', 'phi_min', 'phi_max'], {}), '(-r, r, -r, r, phi_min, phi_max)\n', (98244, 98276), False, 'import gr\n'), ((99068, 99094), 'gr.inqtextext', 'gr.inqtextext', (['(0)', '(0)', 'label'], {}), '(0, 0, label)\n', (99081, 99094), False, 'import gr\n'), ((99963, 99977), 'gr.savestate', 'gr.savestate', ([], {}), '()\n', (99975, 99977), False, 'import gr\n'), ((99993, 100013), 'gr.uselinespec', 'gr.uselinespec', (['spec'], {}), '(spec)\n', (100007, 100013), False, 'import gr\n'), ((100197, 100214), 'gr.restorestate', 'gr.restorestate', ([], {}), '()\n', (100212, 100214), False, 'import gr\n'), ((100223, 100280), 'gr.settextalign', 'gr.settextalign', (['gr.TEXT_HALIGN_LEFT', 'gr.TEXT_VALIGN_HALF'], {}), '(gr.TEXT_HALIGN_LEFT, gr.TEXT_VALIGN_HALF)\n', (100238, 100280), False, 'import gr\n'), ((101033, 101074), 'gr.cellarray', 'gr.cellarray', (['(0)', '(1)', '(0)', '(1)', '(1)', 'colors', 'data'], {}), '(0, 1, 0, 1, 1, colors, data)\n', (101045, 101074), False, 'import gr\n'), ((101093, 101134), 'gr.cellarray', 'gr.cellarray', (['(0)', '(1)', '(1)', '(0)', '(1)', 'colors', 'data'], {}), '(0, 1, 1, 0, 1, colors, data)\n', (101105, 101134), False, 'import gr\n'), ((101340, 101370), 'gr.setwindow', 'gr.setwindow', (['(0)', '(1)', 'zmin', 'zmax'], {}), '(0, 1, zmin, zmax)\n', (101352, 101370), False, 'import gr\n'), ((101546, 101581), 'gr.axes', 'gr.axes', (['(0)', '(2)', '(1)', 'zmin', '(0)', '(1)', '(0.005)'], {}), '(0, 2, 1, zmin, 0, 1, 0.005)\n', (101553, 101581), False, 'import gr\n'), ((102712, 102740), 'gr.setcharheight', 'gr.setcharheight', (['charheight'], {}), '(charheight)\n', (102728, 102740), False, 'import gr\n'), ((102749, 102808), 'gr.settextalign', 'gr.settextalign', (['gr.TEXT_HALIGN_CENTER', 'gr.TEXT_VALIGN_BASE'], {}), '(gr.TEXT_HALIGN_CENTER, gr.TEXT_VALIGN_BASE)\n', (102764, 102808), False, 'import gr\n'), ((102817, 102881), 'gr.textext', 'gr.textext', (['(viewport[1] + 0.035 + off)', '(viewport[3] + 0.01)', 'label'], {}), '(viewport[1] + 0.035 + off, viewport[3] + 0.01, label)\n', (102827, 102881), False, 'import gr\n'), ((103145, 103157), 'gr.clearws', 'gr.clearws', ([], {}), '()\n', (103155, 103157), False, 'import gr\n'), ((103889, 103903), 'gr.savestate', 'gr.savestate', ([], {}), '()\n', (103901, 103903), False, 'import gr\n'), ((116514, 116531), 'gr.restorestate', 'gr.restorestate', ([], {}), '()\n', (116529, 116531), False, 'import gr\n'), ((116674, 116687), 'gr.updatews', 'gr.updatews', ([], {}), '()\n', (116685, 116687), False, 'import gr\n'), ((116699, 116712), 'gr.isinline', 'gr.isinline', ([], {}), '()\n', (116710, 116712), False, 'import gr\n'), ((116852, 116871), 'gr.readimage', 'gr.readimage', (['image'], {}), '(image)\n', (116864, 116871), False, 'import gr\n'), ((116955, 116970), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (116963, 116970), True, 'import numpy as np\n'), ((117135, 117147), 'gr.clearws', 'gr.clearws', ([], {}), '()\n', (117145, 117147), False, 'import gr\n'), ((117994, 118055), 'gr.drawimage', 'gr.drawimage', (['x_min', 'x_max', 'y_min', 'y_max', 'width', 'height', 'data'], {}), '(x_min, x_max, y_min, y_max, width, height, data)\n', (118006, 118055), False, 'import gr\n'), ((118074, 118135), 'gr.cellarray', 'gr.cellarray', (['x_min', 'x_max', 'y_min', 'y_max', 'width', 'height', 'data'], {}), '(x_min, x_max, y_min, y_max, width, height, data)\n', (118086, 118135), False, 'import gr\n'), ((118176, 118190), 'gr.savestate', 'gr.savestate', ([], {}), '()\n', (118188, 118190), False, 'import gr\n'), ((118199, 118257), 'gr.settextalign', 'gr.settextalign', (['gr.TEXT_HALIGN_CENTER', 'gr.TEXT_VALIGN_TOP'], {}), '(gr.TEXT_HALIGN_CENTER, gr.TEXT_VALIGN_TOP)\n', (118214, 118257), False, 'import gr\n'), ((118266, 118340), 'gr.textext', 'gr.textext', (['(0.5 * (viewport[0] + viewport[1]))', 'vp[3]', "_plt.kwargs['title']"], {}), "(0.5 * (viewport[0] + viewport[1]), vp[3], _plt.kwargs['title'])\n", (118276, 118340), False, 'import gr\n'), ((118349, 118366), 'gr.restorestate', 'gr.restorestate', ([], {}), '()\n', (118364, 118366), False, 'import gr\n'), ((119267, 119291), 'numpy.prod', 'np.prod', (['usable_vs.shape'], {}), '(usable_vs.shape)\n', (119274, 119291), True, 'import numpy as np\n'), ((119424, 119443), 'numpy.iinfo', 'np.iinfo', (['np.uint16'], {}), '(np.uint16)\n', (119432, 119443), True, 'import numpy as np\n'), ((120810, 120839), 'gr.adjustlimits', 'gr.adjustlimits', (['r_min', 'r_max'], {}), '(r_min, r_max)\n', (120825, 120839), False, 'import gr\n'), ((121468, 121488), 'numpy.split', 'np.split', (['rho', 'split'], {}), '(rho, split)\n', (121476, 121488), True, 'import numpy as np\n'), ((121490, 121510), 'numpy.split', 'np.split', (['phi', 'split'], {}), '(phi, split)\n', (121498, 121510), True, 'import numpy as np\n'), ((121621, 121638), 'gr.polyline', 'gr.polyline', (['x', 'y'], {}), '(x, y)\n', (121632, 121638), False, 'import gr\n'), ((122271, 122294), 'numpy.array', 'np.array', (['a'], {'copy': '(False)'}), '(a, copy=False)\n', (122279, 122294), True, 'import numpy as np\n'), ((122490, 122513), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['a'], {}), '(a)\n', (122510, 122513), True, 'import numpy as np\n'), ((122532, 122548), 'numpy.prod', 'np.prod', (['a.shape'], {}), '(a.shape)\n', (122539, 122548), True, 'import numpy as np\n'), ((138511, 138528), 'gr.setcolormap', 'gr.setcolormap', (['(0)'], {}), '(0)\n', (138525, 138528), False, 'import gr\n'), ((138610, 138628), 'numpy.zeros', 'np.zeros', (['(256, 4)'], {}), '((256, 4))\n', (138618, 138628), True, 'import numpy as np\n'), ((140595, 140637), 'numpy.array', 'np.array', (['((g1 + g2) / factor)'], {'dtype': 'np.int'}), '((g1 + g2) / factor, dtype=np.int)\n', (140603, 140637), True, 'import numpy as np\n'), ((140857, 140899), 'numpy.array', 'np.array', (['((b1 + b2) / factor)'], {'dtype': 'np.int'}), '((b1 + b2) / factor, dtype=np.int)\n', (140865, 140899), True, 'import numpy as np\n'), ((141120, 141162), 'numpy.array', 'np.array', (['((a1 + a2) / factor)'], {'dtype': 'np.int'}), '((a1 + a2) / factor, dtype=np.int)\n', (141128, 141162), True, 'import numpy as np\n'), ((142095, 142111), 'gr.inqviewport', 'gr.inqviewport', ([], {}), '()\n', (142109, 142111), False, 'import gr\n'), ((145178, 145256), 'gr.drawarc', 'gr.drawarc', (['(0.1 + x * 0.1)', '(0.9 - 0.1 * x)', '(0.1 + x * 0.1)', '(0.9 - 0.1 * x)', '(0)', '(360)'], {}), '(0.1 + x * 0.1, 0.9 - 0.1 * x, 0.1 + x * 0.1, 0.9 - 0.1 * x, 0, 360)\n', (145188, 145256), False, 'import gr\n'), ((146117, 146182), 'gr.polyline', 'gr.polyline', (['[0.5, 0.5 + 1 * liste[0]]', '[0.5, 0.5 + 1 * liste[1]]'], {}), '([0.5, 0.5 + 1 * liste[0]], [0.5, 0.5 + 1 * liste[1]])\n', (146128, 146182), False, 'import gr\n'), ((146525, 146582), 'gr.drawimage', 'gr.drawimage', (['(0)', '(1)', '(1)', '(0)', 'triple[0]', 'triple[1]', 'triple[2]'], {}), '(0, 1, 1, 0, triple[0], triple[1], triple[2])\n', (146537, 146582), False, 'import gr\n'), ((172965, 173003), 'gr.setcolorrep', 'gr.setcolorrep', (['(1004)', 'red', 'green', 'blue'], {}), '(1004, red, green, blue)\n', (172979, 173003), False, 'import gr\n'), ((173175, 173213), 'gr.setcolorrep', 'gr.setcolorrep', (['(1004)', 'red', 'green', 'blue'], {}), '(1004, red, green, blue)\n', (173189, 173213), False, 'import gr\n'), ((10749, 10763), 'numpy.array', 'np.array', (['bins'], {}), '(bins)\n', (10757, 10763), True, 'import numpy as np\n'), ((10765, 10779), 'numpy.array', 'np.array', (['hist'], {}), '(hist)\n', (10773, 10779), True, 'import numpy as np\n'), ((49245, 49262), 'numpy.arange', 'np.arange', (['(nx + 1)'], {}), '(nx + 1)\n', (49254, 49262), True, 'import numpy as np\n'), ((49264, 49281), 'numpy.arange', 'np.arange', (['(nz + 1)'], {}), '(nz + 1)\n', (49273, 49281), True, 'import numpy as np\n'), ((49283, 49300), 'numpy.arange', 'np.arange', (['(ny + 1)'], {}), '(ny + 1)\n', (49292, 49300), True, 'import numpy as np\n'), ((74485, 74495), 'gr3.init', 'gr3.init', ([], {}), '()\n', (74493, 74495), False, 'import gr3\n'), ((75731, 75758), 'gr.setcolormap', 'gr.setcolormap', (['colormap[0]'], {}), '(colormap[0])\n', (75745, 75758), False, 'import gr\n'), ((80838, 80931), 'gr.fillrect', 'gr.fillrect', (['subplot[0]', 'subplot[1]', '(subplot[2] * aspect_ratio)', '(subplot[3] * aspect_ratio)'], {}), '(subplot[0], subplot[1], subplot[2] * aspect_ratio, subplot[3] *\n aspect_ratio)\n', (80849, 80931), False, 'import gr\n'), ((80978, 81072), 'gr.fillrect', 'gr.fillrect', (['(subplot[0] * aspect_ratio)', '(subplot[1] * aspect_ratio)', 'subplot[2]', 'subplot[3]'], {}), '(subplot[0] * aspect_ratio, subplot[1] * aspect_ratio, subplot[2\n ], subplot[3])\n', (80989, 81072), False, 'import gr\n'), ((83270, 83293), 'numpy.min', 'np.min', (['lengths_squared'], {}), '(lengths_squared)\n', (83276, 83293), True, 'import numpy as np\n'), ((83319, 83342), 'numpy.max', 'np.max', (['lengths_squared'], {}), '(lengths_squared)\n', (83325, 83342), True, 'import numpy as np\n'), ((84486, 84510), 'numpy.degrees', 'np.degrees', (['phi_range[0]'], {}), '(phi_range[0])\n', (84496, 84510), True, 'import numpy as np\n'), ((84640, 84664), 'numpy.degrees', 'np.degrees', (['phi_range[1]'], {}), '(phi_range[1])\n', (84650, 84664), True, 'import numpy as np\n'), ((85732, 85761), 'gr.adjustlimits', 'gr.adjustlimits', (['x_min', 'x_max'], {}), '(x_min, x_max)\n', (85747, 85761), False, 'import gr\n'), ((86602, 86631), 'gr.adjustlimits', 'gr.adjustlimits', (['y_min', 'y_max'], {}), '(y_min, y_max)\n', (86617, 86631), False, 'import gr\n'), ((86685, 86706), 'gr.tick', 'gr.tick', (['y_min', 'y_max'], {}), '(y_min, y_max)\n', (86692, 86706), False, 'import gr\n'), ((87248, 87277), 'gr.adjustlimits', 'gr.adjustlimits', (['r_min', 'r_max'], {}), '(r_min, r_max)\n', (87263, 87277), False, 'import gr\n'), ((89480, 89536), 'gr.setwindow3d', 'gr.setwindow3d', (['x_min', 'x_max', 'y_min', 'y_max', 'z_min', 'z_max'], {}), '(x_min, x_max, y_min, y_max, z_min, z_max)\n', (89494, 89536), False, 'import gr\n'), ((89549, 89575), 'gr.setwindow', 'gr.setwindow', (['(-1)', '(1)', '(-1)', '(1)'], {}), '(-1, 1, -1, 1)\n', (89561, 89575), False, 'import gr\n'), ((89682, 89720), 'gr.setspace3d', 'gr.setspace3d', (['(-rotation)', 'tilt', 'fov', '(0)'], {}), '(-rotation, tilt, fov, 0)\n', (89695, 89720), False, 'import gr\n'), ((94232, 94291), 'gr.grid', 'gr.grid', (['x_tick', 'y_tick', '(0)', '(0)', 'x_major_count', 'y_major_count'], {}), '(x_tick, y_tick, 0, 0, x_major_count, y_major_count)\n', (94239, 94291), False, 'import gr\n'), ((95043, 95057), 'gr.savestate', 'gr.savestate', ([], {}), '()\n', (95055, 95057), False, 'import gr\n'), ((95070, 95131), 'gr.settextalign', 'gr.settextalign', (['gr.TEXT_HALIGN_CENTER', 'gr.TEXT_VALIGN_BOTTOM'], {}), '(gr.TEXT_HALIGN_CENTER, gr.TEXT_VALIGN_BOTTOM)\n', (95085, 95131), False, 'import gr\n'), ((95144, 95242), 'gr.textext', 'gr.textext', (['(0.5 * (viewport[0] + viewport[1]))', '(vp[2] + 0.5 * charheight)', "_plt.kwargs['xlabel']"], {}), "(0.5 * (viewport[0] + viewport[1]), vp[2] + 0.5 * charheight,\n _plt.kwargs['xlabel'])\n", (95154, 95242), False, 'import gr\n'), ((95251, 95268), 'gr.restorestate', 'gr.restorestate', ([], {}), '()\n', (95266, 95268), False, 'import gr\n'), ((95317, 95331), 'gr.savestate', 'gr.savestate', ([], {}), '()\n', (95329, 95331), False, 'import gr\n'), ((95344, 95402), 'gr.settextalign', 'gr.settextalign', (['gr.TEXT_HALIGN_CENTER', 'gr.TEXT_VALIGN_TOP'], {}), '(gr.TEXT_HALIGN_CENTER, gr.TEXT_VALIGN_TOP)\n', (95359, 95402), False, 'import gr\n'), ((95415, 95434), 'gr.setcharup', 'gr.setcharup', (['(-1)', '(0)'], {}), '(-1, 0)\n', (95427, 95434), False, 'import gr\n'), ((95447, 95545), 'gr.textext', 'gr.textext', (['(vp[0] + 0.5 * charheight)', '(0.5 * (viewport[2] + viewport[3]))', "_plt.kwargs['ylabel']"], {}), "(vp[0] + 0.5 * charheight, 0.5 * (viewport[2] + viewport[3]),\n _plt.kwargs['ylabel'])\n", (95457, 95545), False, 'import gr\n'), ((95554, 95571), 'gr.restorestate', 'gr.restorestate', ([], {}), '()\n', (95569, 95571), False, 'import gr\n'), ((97192, 97209), 'numpy.radians', 'np.radians', (['angle'], {}), '(angle)\n', (97202, 97209), True, 'import numpy as np\n'), ((97233, 97250), 'numpy.radians', 'np.radians', (['angle'], {}), '(angle)\n', (97243, 97250), True, 'import numpy as np\n'), ((97737, 97769), 'numpy.clip', 'np.clip', (['angle', 'phi_min', 'phi_max'], {}), '(angle, phi_min, phi_max)\n', (97744, 97769), True, 'import numpy as np\n'), ((97782, 97804), 'gr.setlinecolorind', 'gr.setlinecolorind', (['(88)'], {}), '(88)\n', (97800, 97804), False, 'import gr\n'), ((98203, 98225), 'gr.setlinecolorind', 'gr.setlinecolorind', (['(90)'], {}), '(90)\n', (98221, 98225), False, 'import gr\n'), ((98326, 98385), 'gr.settextalign', 'gr.settextalign', (['gr.TEXT_HALIGN_CENTER', 'gr.TEXT_VALIGN_HALF'], {}), '(gr.TEXT_HALIGN_CENTER, gr.TEXT_VALIGN_HALF)\n', (98341, 98385), False, 'import gr\n'), ((98499, 98557), 'gr.wctondc', 'gr.wctondc', (['(r * cosf + sinf * 0.05)', '(r * sinf - cosf * 0.05)'], {}), '(r * cosf + sinf * 0.05, r * sinf - cosf * 0.05)\n', (98509, 98557), False, 'import gr\n'), ((98722, 98751), 'gr.text', 'gr.text', (['x', 'y', "('%g' % r_label)"], {}), "(x, y, '%g' % r_label)\n", (98729, 98751), False, 'import gr\n'), ((100062, 100107), 'gr.polyline', 'gr.polyline', (['[px - 0.07, px - 0.01]', '[py, py]'], {}), '([px - 0.07, px - 0.01], [py, py])\n', (100073, 100107), False, 'import gr\n'), ((100141, 100188), 'gr.polymarker', 'gr.polymarker', (['[px - 0.06, px - 0.02]', '[py, py]'], {}), '([px - 0.06, px - 0.02], [py, py])\n', (100154, 100188), False, 'import gr\n'), ((100320, 100364), 'gr.textext', 'gr.textext', (['px', 'py', "_plt.kwargs['labels'][i]"], {}), "(px, py, _plt.kwargs['labels'][i])\n", (100330, 100364), False, 'import gr\n'), ((101435, 101482), 'gr.setscale', 'gr.setscale', (['(gr.OPTION_Y_LOG | gr.OPTION_FLIP_Y)'], {}), '(gr.OPTION_Y_LOG | gr.OPTION_FLIP_Y)\n', (101446, 101482), False, 'import gr\n'), ((101509, 101537), 'gr.setscale', 'gr.setscale', (['gr.OPTION_Y_LOG'], {}), '(gr.OPTION_Y_LOG)\n', (101520, 101537), False, 'import gr\n'), ((101803, 101833), 'gr.setwindow', 'gr.setwindow', (['(0)', '(1)', 'zmin', 'zmax'], {}), '(0, 1, zmin, zmax)\n', (101815, 101833), False, 'import gr\n'), ((101842, 101881), 'gr.axes', 'gr.axes', (['(0)', 'ztick', '(1)', 'zmin', '(0)', '(1)', '(0.005)'], {}), '(0, ztick, 1, zmin, 0, 1, 0.005)\n', (101849, 101881), False, 'import gr\n'), ((102454, 102510), 'gr.axeslbl', 'gr.axeslbl', (['(0)', '(1)', '(1)', '(0)', '(0)', '(1)', '(0.005)', '(0)', 'axeslbl_callback'], {}), '(0, 1, 1, 0, 0, 1, 0.005, 0, axeslbl_callback)\n', (102464, 102510), False, 'import gr\n'), ((103951, 103991), 'gr.settransparency', 'gr.settransparency', (["_plt.kwargs['alpha']"], {}), "(_plt.kwargs['alpha'])\n", (103969, 103991), False, 'import gr\n'), ((104038, 104058), 'gr.uselinespec', 'gr.uselinespec', (['spec'], {}), '(spec)\n', (104052, 104058), False, 'import gr\n'), ((104240, 104260), 'gr.uselinespec', 'gr.uselinespec', (['spec'], {}), '(spec)\n', (104254, 104260), False, 'import gr\n'), ((116733, 116742), 'gr.show', 'gr.show', ([], {}), '()\n', (116740, 116742), False, 'import gr\n'), ((119239, 119248), 'numpy.abs', 'np.abs', (['v'], {}), '(v)\n', (119245, 119248), True, 'import numpy as np\n'), ((120397, 120413), 'numpy.sin', 'np.sin', (['rotation'], {}), '(rotation)\n', (120403, 120413), True, 'import numpy as np\n'), ((120419, 120431), 'numpy.cos', 'np.cos', (['tilt'], {}), '(tilt)\n', (120425, 120431), True, 'import numpy as np\n'), ((120452, 120468), 'numpy.cos', 'np.cos', (['rotation'], {}), '(rotation)\n', (120458, 120468), True, 'import numpy as np\n'), ((121577, 121586), 'numpy.cos', 'np.cos', (['a'], {}), '(a)\n', (121583, 121586), True, 'import numpy as np\n'), ((121603, 121612), 'numpy.sin', 'np.sin', (['a'], {}), '(a)\n', (121609, 121612), True, 'import numpy as np\n'), ((139010, 139028), 'numpy.zeros', 'np.zeros', (['(256, 4)'], {}), '((256, 4))\n', (139018, 139028), True, 'import numpy as np\n'), ((141763, 141777), 'gr.savestate', 'gr.savestate', ([], {}), '()\n', (141775, 141777), False, 'import gr\n'), ((141790, 141848), 'gr.settextalign', 'gr.settextalign', (['gr.TEXT_HALIGN_CENTER', 'gr.TEXT_VALIGN_TOP'], {}), '(gr.TEXT_HALIGN_CENTER, gr.TEXT_VALIGN_TOP)\n', (141805, 141848), False, 'import gr\n'), ((141861, 141922), 'gr.textext', 'gr.textext', (['(0.5 * (vp[0] + vp[1]))', '(0.95)', "_plt.kwargs['title']"], {}), "(0.5 * (vp[0] + vp[1]), 0.95, _plt.kwargs['title'])\n", (141871, 141922), False, 'import gr\n'), ((141935, 141952), 'gr.restorestate', 'gr.restorestate', ([], {}), '()\n', (141950, 141952), False, 'import gr\n'), ((142351, 142365), 'gr.savestate', 'gr.savestate', ([], {}), '()\n', (142363, 142365), False, 'import gr\n'), ((142378, 142436), 'gr.settextalign', 'gr.settextalign', (['gr.TEXT_HALIGN_CENTER', 'gr.TEXT_VALIGN_TOP'], {}), '(gr.TEXT_HALIGN_CENTER, gr.TEXT_VALIGN_TOP)\n', (142393, 142436), False, 'import gr\n'), ((142449, 142538), 'gr.textext', 'gr.textext', (['(0.5 * (vp[0] + vp[1]))', '(vp[3] - temp_add * 4 * 0.25)', "_plt.kwargs['title']"], {}), "(0.5 * (vp[0] + vp[1]), vp[3] - temp_add * 4 * 0.25, _plt.kwargs[\n 'title'])\n", (142459, 142538), False, 'import gr\n'), ((142546, 142563), 'gr.restorestate', 'gr.restorestate', ([], {}), '()\n', (142561, 142563), False, 'import gr\n'), ((162401, 162421), 'gr.setlinewidth', 'gr.setlinewidth', (['(2.3)'], {}), '(2.3)\n', (162416, 162421), False, 'import gr\n'), ((162434, 162463), 'gr.setlinecolorind', 'gr.setlinecolorind', (['edgecolor'], {}), '(edgecolor)\n', (162452, 162463), False, 'import gr\n'), ((897, 919), 'gr.emergencyclosegks', 'gr.emergencyclosegks', ([], {}), '()\n', (917, 919), False, 'import gr\n'), ((32452, 32491), 'numpy.linspace', 'np.linspace', (['(0)', '(np.pi * 2)', '(num_bins + 1)'], {}), '(0, np.pi * 2, num_bins + 1)\n', (32463, 32491), True, 'import numpy as np\n'), ((32860, 32913), 'numpy.linspace', 'np.linspace', (['binlimits[0]', 'binlimits[1]', '(num_bins + 1)'], {}), '(binlimits[0], binlimits[1], num_bins + 1)\n', (32871, 32913), True, 'import numpy as np\n'), ((35041, 35094), 'numpy.linspace', 'np.linspace', (['binlimits[0]', 'binlimits[1]', '(num_bins + 1)'], {}), '(binlimits[0], binlimits[1], num_bins + 1)\n', (35052, 35094), True, 'import numpy as np\n'), ((38084, 38101), 'numpy.log10', 'np.log10', (['maximum'], {}), '(maximum)\n', (38092, 38101), True, 'import numpy as np\n'), ((42699, 42724), 'numpy.zeros', 'np.zeros', (['(height, width)'], {}), '((height, width))\n', (42707, 42724), True, 'import numpy as np\n'), ((43243, 43268), 'numpy.zeros', 'np.zeros', (['(height, width)'], {}), '((height, width))\n', (43251, 43268), True, 'import numpy as np\n'), ((44074, 44099), 'numpy.zeros', 'np.zeros', (['(height, width)'], {}), '((height, width))\n', (44082, 44099), True, 'import numpy as np\n'), ((44619, 44644), 'numpy.zeros', 'np.zeros', (['(height, width)'], {}), '((height, width))\n', (44627, 44644), True, 'import numpy as np\n'), ((75814, 75841), 'gr.setcolormap', 'gr.setcolormap', (['colormap[1]'], {}), '(colormap[1])\n', (75828, 75841), False, 'import gr\n'), ((75868, 75885), 'gr.setcolormap', 'gr.setcolormap', (['(1)'], {}), '(1)\n', (75882, 75885), False, 'import gr\n'), ((81595, 81608), 'numpy.abs', 'np.abs', (['v_min'], {}), '(v_min)\n', (81601, 81608), True, 'import numpy as np\n'), ((81636, 81649), 'numpy.abs', 'np.abs', (['v_max'], {}), '(v_max)\n', (81642, 81649), True, 'import numpy as np\n'), ((82608, 82620), 'numpy.nanmin', 'np.nanmin', (['z'], {}), '(z)\n', (82617, 82620), True, 'import numpy as np\n'), ((82653, 82665), 'numpy.nanmax', 'np.nanmax', (['z'], {}), '(z)\n', (82662, 82665), True, 'import numpy as np\n'), ((86018, 86039), 'gr.tick', 'gr.tick', (['x_min', 'x_max'], {}), '(x_min, x_max)\n', (86025, 86039), False, 'import gr\n'), ((87497, 87514), 'numpy.radians', 'np.radians', (['angle'], {}), '(angle)\n', (87507, 87514), True, 'import numpy as np\n'), ((87542, 87559), 'numpy.radians', 'np.radians', (['angle'], {}), '(angle)\n', (87552, 87559), True, 'import numpy as np\n'), ((88879, 88908), 'gr.adjustlimits', 'gr.adjustlimits', (['z_min', 'z_max'], {}), '(z_min, z_max)\n', (88894, 88908), False, 'import gr\n'), ((88970, 88991), 'gr.tick', 'gr.tick', (['z_min', 'z_max'], {}), '(z_min, z_max)\n', (88977, 88991), False, 'import gr\n'), ((91508, 91575), 'gr.grid3d', 'gr.grid3d', (['x_tick', '(0)', 'z_tick', 'x_org[0]', 'y_org[1]', 'z_org[0]', '(2)', '(0)', '(2)'], {}), '(x_tick, 0, z_tick, x_org[0], y_org[1], z_org[0], 2, 0, 2)\n', (91517, 91575), False, 'import gr\n'), ((91592, 91654), 'gr.grid3d', 'gr.grid3d', (['(0)', 'y_tick', '(0)', 'x_org[0]', 'y_org[1]', 'z_org[0]', '(0)', '(2)', '(0)'], {}), '(0, y_tick, 0, x_org[0], y_org[1], z_org[0], 0, 2, 0)\n', (91601, 91654), False, 'import gr\n'), ((91689, 91795), 'gr.axes3d', 'gr.axes3d', (['x_tick', '(0)', 'z_tick', 'x_org[0]', 'y_org[0]', 'z_org[0]', 'x_major_count', '(0)', 'z_major_count', '(-ticksize)'], {}), '(x_tick, 0, z_tick, x_org[0], y_org[0], z_org[0], x_major_count, 0,\n z_major_count, -ticksize)\n', (91698, 91795), False, 'import gr\n'), ((91808, 91896), 'gr.axes3d', 'gr.axes3d', (['(0)', 'y_tick', '(0)', 'x_org[1]', 'y_org[0]', 'z_org[0]', '(0)', 'y_major_count', '(0)', 'ticksize'], {}), '(0, y_tick, 0, x_org[1], y_org[0], z_org[0], 0, y_major_count, 0,\n ticksize)\n', (91817, 91896), False, 'import gr\n'), ((95886, 95914), 'gr.setcharheight', 'gr.setcharheight', (['charheight'], {}), '(charheight)\n', (95902, 95914), False, 'import gr\n'), ((95931, 95952), 'gr.settextalign', 'gr.settextalign', (['(2)', '(1)'], {}), '(2, 1)\n', (95946, 95952), False, 'import gr\n'), ((97432, 97454), 'gr.setlinecolorind', 'gr.setlinecolorind', (['(88)'], {}), '(88)\n', (97450, 97454), False, 'import gr\n'), ((97471, 97530), 'gr.settextalign', 'gr.settextalign', (['gr.TEXT_HALIGN_CENTER', 'gr.TEXT_VALIGN_HALF'], {}), '(gr.TEXT_HALIGN_CENTER, gr.TEXT_VALIGN_HALF)\n', (97486, 97530), False, 'import gr\n'), ((97554, 97588), 'gr.wctondc', 'gr.wctondc', (['(1.1 * cosf)', '(1.1 * sinf)'], {}), '(1.1 * cosf, 1.1 * sinf)\n', (97564, 97588), False, 'import gr\n'), ((97605, 97642), 'gr.textext', 'gr.textext', (['x', 'y', "('%g°' % angle_label)"], {}), "(x, y, '%g°' % angle_label)\n", (97615, 97642), False, 'import gr\n'), ((97680, 97702), 'gr.setlinecolorind', 'gr.setlinecolorind', (['(90)'], {}), '(90)\n', (97698, 97702), False, 'import gr\n'), ((97831, 97848), 'numpy.radians', 'np.radians', (['angle'], {}), '(angle)\n', (97841, 97848), True, 'import numpy as np\n'), ((97876, 97893), 'numpy.radians', 'np.radians', (['angle'], {}), '(angle)\n', (97886, 97893), True, 'import numpy as np\n'), ((98412, 98431), 'numpy.radians', 'np.radians', (['phi_min'], {}), '(phi_min)\n', (98422, 98431), True, 'import numpy as np\n'), ((98459, 98478), 'numpy.radians', 'np.radians', (['phi_min'], {}), '(phi_min)\n', (98469, 98478), True, 'import numpy as np\n'), ((101682, 101711), 'gr.setscale', 'gr.setscale', (['gr.OPTION_FLIP_Y'], {}), '(gr.OPTION_FLIP_Y)\n', (101693, 101711), False, 'import gr\n'), ((101738, 101752), 'gr.setscale', 'gr.setscale', (['(0)'], {}), '(0)\n', (101749, 101752), False, 'import gr\n'), ((101775, 101794), 'gr.tick', 'gr.tick', (['zmin', 'zmax'], {}), '(zmin, zmax)\n', (101782, 101794), False, 'import gr\n'), ((102375, 102404), 'gr.setscale', 'gr.setscale', (['gr.OPTION_FLIP_Y'], {}), '(gr.OPTION_FLIP_Y)\n', (102386, 102404), False, 'import gr\n'), ((102431, 102445), 'gr.setscale', 'gr.setscale', (['(0)'], {}), '(0)\n', (102442, 102445), False, 'import gr\n'), ((104115, 104132), 'gr.polyline', 'gr.polyline', (['x', 'y'], {}), '(x, y)\n', (104126, 104132), False, 'import gr\n'), ((104174, 104193), 'gr.polymarker', 'gr.polymarker', (['x', 'y'], {}), '(x, y)\n', (104187, 104193), False, 'import gr\n'), ((105820, 105865), 'gr.polyline', 'gr.polyline', (['x_step_boundaries', 'y_step_values'], {}), '(x_step_boundaries, y_step_values)\n', (105831, 105865), False, 'import gr\n'), ((105907, 105926), 'gr.polymarker', 'gr.polymarker', (['x', 'y'], {}), '(x, y)\n', (105920, 105926), False, 'import gr\n'), ((105971, 106015), 'gr.setmarkertype', 'gr.setmarkertype', (['gr.MARKERTYPE_SOLID_CIRCLE'], {}), '(gr.MARKERTYPE_SOLID_CIRCLE)\n', (105987, 106015), False, 'import gr\n'), ((119692, 119716), 'numpy.clip', 'np.clip', (['v', 'v_min', 'v_max'], {}), '(v, v_min, v_max)\n', (119699, 119716), True, 'import numpy as np\n'), ((120382, 120394), 'numpy.sin', 'np.sin', (['tilt'], {}), '(tilt)\n', (120388, 120394), True, 'import numpy as np\n'), ((120437, 120449), 'numpy.sin', 'np.sin', (['tilt'], {}), '(tilt)\n', (120443, 120449), True, 'import numpy as np\n'), ((123489, 123499), 'numpy.real', 'np.real', (['a'], {}), '(a)\n', (123496, 123499), True, 'import numpy as np\n'), ((123501, 123511), 'numpy.imag', 'np.imag', (['a'], {}), '(a)\n', (123508, 123511), True, 'import numpy as np\n'), ((132774, 132840), 'gr.fillrect', 'gr.fillrect', (['(x - 0.5 * bar_width)', '(x + 0.5 * bar_width)', 'c', '(y[i] + c)'], {}), '(x - 0.5 * bar_width, x + 0.5 * bar_width, c, y[i] + c)\n', (132785, 132840), False, 'import gr\n'), ((135027, 135086), 'gr.fillrect', 'gr.fillrect', (['(x - 0.5 * bar_width)', '(x + 0.5 * bar_width)', '(0)', 'y'], {}), '(x - 0.5 * bar_width, x + 0.5 * bar_width, 0, y)\n', (135038, 135086), False, 'import gr\n'), ((136150, 136216), 'gr.drawrect', 'gr.drawrect', (['(x - 0.5 * bar_width)', '(x + 0.5 * bar_width)', 'c', '(y[i] + c)'], {}), '(x - 0.5 * bar_width, x + 0.5 * bar_width, c, y[i] + c)\n', (136161, 136216), False, 'import gr\n'), ((138208, 138267), 'gr.drawrect', 'gr.drawrect', (['(x - 0.5 * bar_width)', '(x + 0.5 * bar_width)', '(0)', 'y'], {}), '(x - 0.5 * bar_width, x + 0.5 * bar_width, 0, y)\n', (138219, 138267), False, 'import gr\n'), ((138745, 138767), 'gr.setcolormap', 'gr.setcolormap', (['tup[0]'], {}), '(tup[0])\n', (138759, 138767), False, 'import gr\n'), ((139349, 139367), 'numpy.zeros', 'np.zeros', (['(256, 4)'], {}), '((256, 4))\n', (139357, 139367), True, 'import numpy as np\n'), ((140155, 140215), 'numpy.interp', 'np.interp', (['lins_float', 'size_range', 'COLORMAP_X[size_range, 0]'], {}), '(lins_float, size_range, COLORMAP_X[size_range, 0])\n', (140164, 140215), True, 'import numpy as np\n'), ((140250, 140310), 'numpy.interp', 'np.interp', (['lins_float', 'size_range', 'COLORMAP_Y[size_range, 0]'], {}), '(lins_float, size_range, COLORMAP_Y[size_range, 0])\n', (140259, 140310), True, 'import numpy as np\n'), ((140399, 140459), 'numpy.interp', 'np.interp', (['lins_float', 'size_range', 'COLORMAP_X[size_range, 1]'], {}), '(lins_float, size_range, COLORMAP_X[size_range, 1])\n', (140408, 140459), True, 'import numpy as np\n'), ((140494, 140554), 'numpy.interp', 'np.interp', (['lins_float', 'size_range', 'COLORMAP_Y[size_range, 1]'], {}), '(lins_float, size_range, COLORMAP_Y[size_range, 1])\n', (140503, 140554), True, 'import numpy as np\n'), ((140661, 140721), 'numpy.interp', 'np.interp', (['lins_float', 'size_range', 'COLORMAP_X[size_range, 2]'], {}), '(lins_float, size_range, COLORMAP_X[size_range, 2])\n', (140670, 140721), True, 'import numpy as np\n'), ((140756, 140816), 'numpy.interp', 'np.interp', (['lins_float', 'size_range', 'COLORMAP_Y[size_range, 2]'], {}), '(lins_float, size_range, COLORMAP_Y[size_range, 2])\n', (140765, 140816), True, 'import numpy as np\n'), ((140924, 140984), 'numpy.interp', 'np.interp', (['lins_float', 'size_range', 'COLORMAP_X[size_range, 3]'], {}), '(lins_float, size_range, COLORMAP_X[size_range, 3])\n', (140933, 140984), True, 'import numpy as np\n'), ((141019, 141079), 'numpy.interp', 'np.interp', (['lins_float', 'size_range', 'COLORMAP_Y[size_range, 3]'], {}), '(lins_float, size_range, COLORMAP_Y[size_range, 3])\n', (141028, 141079), True, 'import numpy as np\n'), ((141392, 141417), 'numpy.cos', 'np.cos', (['(2 * x * np.pi / n)'], {}), '(2 * x * np.pi / n)\n', (141398, 141417), True, 'import numpy as np\n'), ((141460, 141485), 'numpy.sin', 'np.sin', (['(2 * x * np.pi / n)'], {}), '(2 * x * np.pi / n)\n', (141466, 141485), True, 'import numpy as np\n'), ((143983, 144000), 'gr.inqcolor', 'gr.inqcolor', (['(1004)'], {}), '(1004)\n', (143994, 144000), False, 'import gr\n'), ((144017, 144079), 'gr.setcolorrep', 'gr.setcolorrep', (['(1004)', 'facecolor[0]', 'facecolor[1]', 'facecolor[2]'], {}), '(1004, facecolor[0], facecolor[1], facecolor[2])\n', (144031, 144079), False, 'import gr\n'), ((144688, 144705), 'gr.inqcolor', 'gr.inqcolor', (['(1005)'], {}), '(1005)\n', (144699, 144705), False, 'import gr\n'), ((144722, 144784), 'gr.setcolorrep', 'gr.setcolorrep', (['(1005)', 'edgecolor[0]', 'edgecolor[1]', 'edgecolor[2]'], {}), '(1005, edgecolor[0], edgecolor[1], edgecolor[2])\n', (144736, 144784), False, 'import gr\n'), ((146773, 146802), 'gr.setlinecolorind', 'gr.setlinecolorind', (['edgecolor'], {}), '(edgecolor)\n', (146791, 146802), False, 'import gr\n'), ((146819, 146839), 'gr.setlinewidth', 'gr.setlinewidth', (['(1.5)'], {}), '(1.5)\n', (146834, 146839), False, 'import gr\n'), ((155170, 155208), 'numpy.sqrt', 'np.sqrt', (['(liste[0] ** 2 + liste[1] ** 2)'], {}), '(liste[0] ** 2 + liste[1] ** 2)\n', (155177, 155208), True, 'import numpy as np\n'), ((155226, 155255), 'gr.setfillcolorind', 'gr.setfillcolorind', (['facecolor'], {}), '(facecolor)\n', (155244, 155255), False, 'import gr\n'), ((155272, 155301), 'gr.settransparency', 'gr.settransparency', (['facealpha'], {}), '(facealpha)\n', (155290, 155301), False, 'import gr\n'), ((155318, 155339), 'gr.setfillintstyle', 'gr.setfillintstyle', (['(1)'], {}), '(1)\n', (155336, 155339), False, 'import gr\n'), ((174256, 174273), 'numpy.array', 'np.array', (['args[0]'], {}), '(args[0])\n', (174264, 174273), True, 'import numpy as np\n'), ((174609, 174637), 'numpy.arange', 'np.arange', (['(1)', '(a.shape[1] + 1)'], {}), '(1, a.shape[1] + 1)\n', (174618, 174637), True, 'import numpy as np\n'), ((174658, 174686), 'numpy.arange', 'np.arange', (['(1)', '(a.shape[0] + 1)'], {}), '(1, a.shape[0] + 1)\n', (174667, 174686), True, 'import numpy as np\n'), ((42876, 42918), 'numpy.logical_and', 'np.logical_and', (['(angle_c <= angle2)', 'tempmap'], {}), '(angle_c <= angle2, tempmap)\n', (42890, 42918), True, 'import numpy as np\n'), ((42949, 42991), 'numpy.logical_and', 'np.logical_and', (['(radiusc <= radius)', 'tempmap'], {}), '(radiusc <= radius, tempmap)\n', (42963, 42991), True, 'import numpy as np\n'), ((43022, 43063), 'numpy.logical_and', 'np.logical_and', (['(radiusc <= r_max)', 'tempmap'], {}), '(radiusc <= r_max, tempmap)\n', (43036, 43063), True, 'import numpy as np\n'), ((43094, 43135), 'numpy.logical_and', 'np.logical_and', (['(r_min <= radiusc)', 'tempmap'], {}), '(r_min <= radiusc, tempmap)\n', (43108, 43135), True, 'import numpy as np\n'), ((43166, 43197), 'numpy.logical_or', 'np.logical_or', (['tempmap', 'boolmap'], {}), '(tempmap, boolmap)\n', (43179, 43197), True, 'import numpy as np\n'), ((43419, 43461), 'numpy.logical_and', 'np.logical_and', (['(angle_c <= angle2)', 'tempmap'], {}), '(angle_c <= angle2, tempmap)\n', (43433, 43461), True, 'import numpy as np\n'), ((43492, 43534), 'numpy.logical_and', 'np.logical_and', (['(radiusc <= radius)', 'tempmap'], {}), '(radiusc <= radius, tempmap)\n', (43506, 43534), True, 'import numpy as np\n'), ((43565, 43606), 'numpy.logical_and', 'np.logical_and', (['(radiusc <= r_max)', 'tempmap'], {}), '(radiusc <= r_max, tempmap)\n', (43579, 43606), True, 'import numpy as np\n'), ((43637, 43678), 'numpy.logical_and', 'np.logical_and', (['(r_min <= radiusc)', 'tempmap'], {}), '(r_min <= radiusc, tempmap)\n', (43651, 43678), True, 'import numpy as np\n'), ((43709, 43740), 'numpy.logical_or', 'np.logical_or', (['tempmap', 'boolmap'], {}), '(tempmap, boolmap)\n', (43722, 43740), True, 'import numpy as np\n'), ((44251, 44293), 'numpy.logical_and', 'np.logical_and', (['(angle_c <= angle2)', 'tempmap'], {}), '(angle_c <= angle2, tempmap)\n', (44265, 44293), True, 'import numpy as np\n'), ((44324, 44366), 'numpy.logical_and', 'np.logical_and', (['(radiusc <= radius)', 'tempmap'], {}), '(radiusc <= radius, tempmap)\n', (44338, 44366), True, 'import numpy as np\n'), ((44397, 44438), 'numpy.logical_and', 'np.logical_and', (['(radiusc <= r_max)', 'tempmap'], {}), '(radiusc <= r_max, tempmap)\n', (44411, 44438), True, 'import numpy as np\n'), ((44469, 44510), 'numpy.logical_and', 'np.logical_and', (['(r_min <= radiusc)', 'tempmap'], {}), '(r_min <= radiusc, tempmap)\n', (44483, 44510), True, 'import numpy as np\n'), ((44541, 44572), 'numpy.logical_or', 'np.logical_or', (['tempmap', 'boolmap'], {}), '(tempmap, boolmap)\n', (44554, 44572), True, 'import numpy as np\n'), ((44795, 44837), 'numpy.logical_and', 'np.logical_and', (['(angle_c <= angle2)', 'tempmap'], {}), '(angle_c <= angle2, tempmap)\n', (44809, 44837), True, 'import numpy as np\n'), ((44868, 44910), 'numpy.logical_and', 'np.logical_and', (['(radiusc <= radius)', 'tempmap'], {}), '(radiusc <= radius, tempmap)\n', (44882, 44910), True, 'import numpy as np\n'), ((44941, 44982), 'numpy.logical_and', 'np.logical_and', (['(radiusc <= r_max)', 'tempmap'], {}), '(radiusc <= r_max, tempmap)\n', (44955, 44982), True, 'import numpy as np\n'), ((45013, 45054), 'numpy.logical_and', 'np.logical_and', (['(r_min <= radiusc)', 'tempmap'], {}), '(r_min <= radiusc, tempmap)\n', (45027, 45054), True, 'import numpy as np\n'), ((45085, 45116), 'numpy.logical_or', 'np.logical_or', (['tempmap', 'boolmap'], {}), '(tempmap, boolmap)\n', (45098, 45116), True, 'import numpy as np\n'), ((82145, 82157), 'numpy.nanmin', 'np.nanmin', (['x'], {}), '(x)\n', (82154, 82157), True, 'import numpy as np\n'), ((82190, 82202), 'numpy.nanmax', 'np.nanmax', (['x'], {}), '(x)\n', (82199, 82202), True, 'import numpy as np\n'), ((82466, 82478), 'numpy.nanmin', 'np.nanmin', (['y'], {}), '(y)\n', (82475, 82478), True, 'import numpy as np\n'), ((82511, 82523), 'numpy.nanmax', 'np.nanmax', (['y'], {}), '(y)\n', (82520, 82523), True, 'import numpy as np\n'), ((92234, 92302), 'gr.grid3d', 'gr.grid3d', (['x_tick', '(0)', 'z_tick', 'x_org[0]', 'y_org[1]', 'z_org[zi]', '(2)', '(0)', '(2)'], {}), '(x_tick, 0, z_tick, x_org[0], y_org[1], z_org[zi], 2, 0, 2)\n', (92243, 92302), False, 'import gr\n'), ((92323, 92386), 'gr.grid3d', 'gr.grid3d', (['(0)', 'y_tick', '(0)', 'x_org[0]', 'y_org[1]', 'z_org[zi]', '(0)', '(2)', '(0)'], {}), '(0, y_tick, 0, x_org[0], y_org[1], z_org[zi], 0, 2, 0)\n', (92332, 92386), False, 'import gr\n'), ((93091, 93199), 'gr.axes3d', 'gr.axes3d', (['x_tick', '(0)', 'z_tick', 'x_org[0]', 'y_org[0]', 'z_org[zi]', 'x_major_count', '(0)', 'z_major_count', '(-ticksize)'], {}), '(x_tick, 0, z_tick, x_org[0], y_org[0], z_org[zi], x_major_count, \n 0, z_major_count, -ticksize)\n', (93100, 93199), False, 'import gr\n'), ((93215, 93304), 'gr.axes3d', 'gr.axes3d', (['(0)', 'y_tick', '(0)', 'x_org[1]', 'y_org[0]', 'z_org[zi]', '(0)', 'y_major_count', '(0)', 'ticksize'], {}), '(0, y_tick, 0, x_org[1], y_org[0], z_org[zi], 0, y_major_count, 0,\n ticksize)\n', (93224, 93304), False, 'import gr\n'), ((96183, 96219), 'gr.textext', 'gr.textext', (['x', 'y', 'x_notations[i - 1]'], {}), '(x, y, x_notations[i - 1])\n', (96193, 96219), False, 'import gr\n'), ((104468, 104487), 'numpy.zeros', 'np.zeros', (['(2 * n - 1)'], {}), '(2 * n - 1)\n', (104476, 104487), True, 'import numpy as np\n'), ((104524, 104543), 'numpy.zeros', 'np.zeros', (['(2 * n - 1)'], {}), '(2 * n - 1)\n', (104532, 104543), True, 'import numpy as np\n'), ((106553, 106572), 'gr.polymarker', 'gr.polymarker', (['x', 'y'], {}), '(x, y)\n', (106566, 106572), False, 'import gr\n'), ((121115, 121150), 'numpy.logical_or', 'np.logical_or', (['split_phi', 'split_rho'], {}), '(split_phi, split_rho)\n', (121128, 121150), True, 'import numpy as np\n'), ((132088, 132167), 'gr.setcolorrep', 'gr.setcolorrep', (['color_save_spot', 'current_rgb[0]', 'current_rgb[1]', 'current_rgb[2]'], {}), '(color_save_spot, current_rgb[0], current_rgb[1], current_rgb[2])\n', (132102, 132167), False, 'import gr\n'), ((132188, 132223), 'gr.setfillcolorind', 'gr.setfillcolorind', (['color_save_spot'], {}), '(color_save_spot)\n', (132206, 132223), False, 'import gr\n'), ((133848, 133949), 'gr.fillrect', 'gr.fillrect', (['(x - 0.5 * wfac + bar_width * i)', '(x - 0.5 * wfac + bar_width + bar_width * i)', '(0)', 'y[i]'], {}), '(x - 0.5 * wfac + bar_width * i, x - 0.5 * wfac + bar_width + \n bar_width * i, 0, y[i])\n', (133859, 133949), False, 'import gr\n'), ((134130, 134209), 'gr.setcolorrep', 'gr.setcolorrep', (['color_save_spot', 'current_rgb[0]', 'current_rgb[1]', 'current_rgb[2]'], {}), '(color_save_spot, current_rgb[0], current_rgb[1], current_rgb[2])\n', (134144, 134209), False, 'import gr\n'), ((134226, 134261), 'gr.setfillcolorind', 'gr.setfillcolorind', (['color_save_spot'], {}), '(color_save_spot)\n', (134244, 134261), False, 'import gr\n'), ((135438, 135517), 'gr.setcolorrep', 'gr.setcolorrep', (['color_save_spot', 'current_rgb[0]', 'current_rgb[1]', 'current_rgb[2]'], {}), '(color_save_spot, current_rgb[0], current_rgb[1], current_rgb[2])\n', (135452, 135517), False, 'import gr\n'), ((135538, 135573), 'gr.setlinecolorind', 'gr.setlinecolorind', (['color_save_spot'], {}), '(color_save_spot)\n', (135556, 135573), False, 'import gr\n'), ((136027, 136064), 'gr.setlinewidth', 'gr.setlinewidth', (['indedgewidths[i + 1]'], {}), '(indedgewidths[i + 1])\n', (136042, 136064), False, 'import gr\n'), ((136107, 136133), 'gr.setlinewidth', 'gr.setlinewidth', (['edgewidth'], {}), '(edgewidth)\n', (136122, 136133), False, 'import gr\n'), ((137262, 137363), 'gr.drawrect', 'gr.drawrect', (['(x - 0.5 * wfac + bar_width * i)', '(x - 0.5 * wfac + bar_width + bar_width * i)', '(0)', 'y[i]'], {}), '(x - 0.5 * wfac + bar_width * i, x - 0.5 * wfac + bar_width + \n bar_width * i, 0, y[i])\n', (137273, 137363), False, 'import gr\n'), ((137556, 137635), 'gr.setcolorrep', 'gr.setcolorrep', (['color_save_spot', 'current_rgb[0]', 'current_rgb[1]', 'current_rgb[2]'], {}), '(color_save_spot, current_rgb[0], current_rgb[1], current_rgb[2])\n', (137570, 137635), False, 'import gr\n'), ((137652, 137687), 'gr.setlinecolorind', 'gr.setlinecolorind', (['color_save_spot'], {}), '(color_save_spot)\n', (137670, 137687), False, 'import gr\n'), ((138101, 138134), 'gr.setlinewidth', 'gr.setlinewidth', (['indedgewidths[x]'], {}), '(indedgewidths[x])\n', (138116, 138134), False, 'import gr\n'), ((138169, 138195), 'gr.setlinewidth', 'gr.setlinewidth', (['edgewidth'], {}), '(edgewidth)\n', (138184, 138195), False, 'import gr\n'), ((138818, 138854), 'gr.setcolormap', 'gr.setcolormap', (['gr.COLORMAPS[tup[0]]'], {}), '(gr.COLORMAPS[tup[0]])\n', (138832, 138854), False, 'import gr\n'), ((139145, 139167), 'gr.setcolormap', 'gr.setcolormap', (['tup[1]'], {}), '(tup[1])\n', (139159, 139167), False, 'import gr\n'), ((139506, 139528), 'gr.setcolormap', 'gr.setcolormap', (['tup[0]'], {}), '(tup[0])\n', (139520, 139528), False, 'import gr\n'), ((139798, 139820), 'gr.setcolormap', 'gr.setcolormap', (['tup[1]'], {}), '(tup[1])\n', (139812, 139820), False, 'import gr\n'), ((148677, 148715), 'numpy.sqrt', 'np.sqrt', (['(liste[0] ** 2 + liste[1] ** 2)'], {}), '(liste[0] ** 2 + liste[1] ** 2)\n', (148684, 148715), True, 'import numpy as np\n'), ((149561, 149582), 'gr.settransparency', 'gr.settransparency', (['(1)'], {}), '(1)\n', (149579, 149582), False, 'import gr\n'), ((149603, 149624), 'gr.setfillintstyle', 'gr.setfillintstyle', (['(0)'], {}), '(0)\n', (149621, 149624), False, 'import gr\n'), ((149645, 149674), 'gr.setfillcolorind', 'gr.setfillcolorind', (['edgecolor'], {}), '(edgecolor)\n', (149663, 149674), False, 'import gr\n'), ((163507, 163545), 'numpy.sqrt', 'np.sqrt', (['(liste[0] ** 2 + liste[1] ** 2)'], {}), '(liste[0] ** 2 + liste[1] ** 2)\n', (163514, 163545), True, 'import numpy as np\n'), ((169187, 169225), 'numpy.sqrt', 'np.sqrt', (['(liste[0] ** 2 + liste[1] ** 2)'], {}), '(liste[0] ** 2 + liste[1] ** 2)\n', (169194, 169225), True, 'import numpy as np\n'), ((169247, 169276), 'gr.setfillcolorind', 'gr.setfillcolorind', (['edgecolor'], {}), '(edgecolor)\n', (169265, 169276), False, 'import gr\n'), ((172228, 172326), 'gr.polyline', 'gr.polyline', (['[0.5 + mlist[-1][0], 0.5 + mlist[0][0]]', '[0.5 + mlist[-1][1], 0.5 + mlist[0][1]]'], {}), '([0.5 + mlist[-1][0], 0.5 + mlist[0][0]], [0.5 + mlist[-1][1], \n 0.5 + mlist[0][1]])\n', (172239, 172326), False, 'import gr\n'), ((172668, 172766), 'gr.polyline', 'gr.polyline', (['[0.5 + mlist[-1][0], 0.5 + mlist[0][0]]', '[0.5 + mlist[-1][1], 0.5 + mlist[0][1]]'], {}), '([0.5 + mlist[-1][0], 0.5 + mlist[0][0]], [0.5 + mlist[-1][1], \n 0.5 + mlist[0][1]])\n', (172679, 172766), False, 'import gr\n'), ((178312, 178335), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['z'], {}), '(z)\n', (178332, 178335), True, 'import numpy as np\n'), ((178394, 178417), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['c'], {}), '(c)\n', (178414, 178417), True, 'import numpy as np\n'), ((92450, 92518), 'gr.grid3d', 'gr.grid3d', (['x_tick', '(0)', 'z_tick', 'x_org[1]', 'y_org[1]', 'z_org[zi]', '(2)', '(0)', '(2)'], {}), '(x_tick, 0, z_tick, x_org[1], y_org[1], z_org[zi], 2, 0, 2)\n', (92459, 92518), False, 'import gr\n'), ((92539, 92602), 'gr.grid3d', 'gr.grid3d', (['(0)', 'y_tick', '(0)', 'x_org[1]', 'y_org[1]', 'z_org[zi]', '(0)', '(2)', '(0)'], {}), '(0, y_tick, 0, x_org[1], y_org[1], z_org[zi], 0, 2, 0)\n', (92548, 92602), False, 'import gr\n'), ((93364, 93454), 'gr.axes3d', 'gr.axes3d', (['(0)', '(0)', 'z_tick', 'x_org[0]', 'y_org[1]', 'z_org[zi]', '(0)', '(0)', 'z_major_count', '(-ticksize)'], {}), '(0, 0, z_tick, x_org[0], y_org[1], z_org[zi], 0, 0, z_major_count,\n -ticksize)\n', (93373, 93454), False, 'import gr\n'), ((93471, 93578), 'gr.axes3d', 'gr.axes3d', (['x_tick', 'y_tick', '(0)', 'x_org[0]', 'y_org[0]', 'z_org[zi]', 'x_major_count', 'y_major_count', '(0)', '(-ticksize)'], {}), '(x_tick, y_tick, 0, x_org[0], y_org[0], z_org[zi], x_major_count,\n y_major_count, 0, -ticksize)\n', (93480, 93578), False, 'import gr\n'), ((104946, 104965), 'numpy.zeros', 'np.zeros', (['(2 * n - 1)'], {}), '(2 * n - 1)\n', (104954, 104965), True, 'import numpy as np\n'), ((105002, 105021), 'numpy.zeros', 'np.zeros', (['(2 * n - 1)'], {}), '(2 * n - 1)\n', (105010, 105021), True, 'import numpy as np\n'), ((105404, 105419), 'numpy.zeros', 'np.zeros', (['(2 * n)'], {}), '(2 * n)\n', (105412, 105419), True, 'import numpy as np\n'), ((105700, 105715), 'numpy.zeros', 'np.zeros', (['(2 * n)'], {}), '(2 * n)\n', (105708, 105715), True, 'import numpy as np\n'), ((106489, 106518), 'gr.polymarker', 'gr.polymarker', (['[x[i]]', '[y[i]]'], {}), '([x[i]], [y[i]])\n', (106502, 106518), False, 'import gr\n'), ((133162, 133241), 'gr.setcolorrep', 'gr.setcolorrep', (['color_save_spot', 'current_rgb[0]', 'current_rgb[1]', 'current_rgb[2]'], {}), '(color_save_spot, current_rgb[0], current_rgb[1], current_rgb[2])\n', (133176, 133241), False, 'import gr\n'), ((133262, 133297), 'gr.setfillcolorind', 'gr.setfillcolorind', (['color_save_spot'], {}), '(color_save_spot)\n', (133280, 133297), False, 'import gr\n'), ((135671, 135700), 'gr.setlinecolorind', 'gr.setlinecolorind', (['edgecolor'], {}), '(edgecolor)\n', (135689, 135700), False, 'import gr\n'), ((135799, 135878), 'gr.setcolorrep', 'gr.setcolorrep', (['color_save_spot', 'current_rgb[0]', 'current_rgb[1]', 'current_rgb[2]'], {}), '(color_save_spot, current_rgb[0], current_rgb[1], current_rgb[2])\n', (135813, 135878), False, 'import gr\n'), ((135903, 135938), 'gr.setlinecolorind', 'gr.setlinecolorind', (['color_save_spot'], {}), '(color_save_spot)\n', (135921, 135938), False, 'import gr\n'), ((136550, 136629), 'gr.setcolorrep', 'gr.setcolorrep', (['color_save_spot', 'current_rgb[0]', 'current_rgb[1]', 'current_rgb[2]'], {}), '(color_save_spot, current_rgb[0], current_rgb[1], current_rgb[2])\n', (136564, 136629), False, 'import gr\n'), ((136650, 136685), 'gr.setlinecolorind', 'gr.setlinecolorind', (['color_save_spot'], {}), '(color_save_spot)\n', (136668, 136685), False, 'import gr\n'), ((137139, 137176), 'gr.setlinewidth', 'gr.setlinewidth', (['indedgewidths[i + 1]'], {}), '(indedgewidths[i + 1])\n', (137154, 137176), False, 'import gr\n'), ((137219, 137245), 'gr.setlinewidth', 'gr.setlinewidth', (['edgewidth'], {}), '(edgewidth)\n', (137234, 137245), False, 'import gr\n'), ((137773, 137802), 'gr.setlinecolorind', 'gr.setlinecolorind', (['edgecolor'], {}), '(edgecolor)\n', (137791, 137802), False, 'import gr\n'), ((137889, 137968), 'gr.setcolorrep', 'gr.setcolorrep', (['color_save_spot', 'current_rgb[0]', 'current_rgb[1]', 'current_rgb[2]'], {}), '(color_save_spot, current_rgb[0], current_rgb[1], current_rgb[2])\n', (137903, 137968), False, 'import gr\n'), ((137989, 138024), 'gr.setlinecolorind', 'gr.setlinecolorind', (['color_save_spot'], {}), '(color_save_spot)\n', (138007, 138024), False, 'import gr\n'), ((139218, 139254), 'gr.setcolormap', 'gr.setcolormap', (['gr.COLORMAPS[tup[1]]'], {}), '(gr.COLORMAPS[tup[1]])\n', (139232, 139254), False, 'import gr\n'), ((139579, 139615), 'gr.setcolormap', 'gr.setcolormap', (['gr.COLORMAPS[tup[0]]'], {}), '(gr.COLORMAPS[tup[0]])\n', (139593, 139615), False, 'import gr\n'), ((139871, 139907), 'gr.setcolormap', 'gr.setcolormap', (['gr.COLORMAPS[tup[1]]'], {}), '(gr.COLORMAPS[tup[1]])\n', (139885, 139907), False, 'import gr\n'), ((159169, 159190), 'gr.settransparency', 'gr.settransparency', (['(1)'], {}), '(1)\n', (159187, 159190), False, 'import gr\n'), ((159215, 159236), 'gr.setfillintstyle', 'gr.setfillintstyle', (['(0)'], {}), '(0)\n', (159233, 159236), False, 'import gr\n'), ((159261, 159290), 'gr.setfillcolorind', 'gr.setfillcolorind', (['edgecolor'], {}), '(edgecolor)\n', (159279, 159290), False, 'import gr\n'), ((161897, 162009), 'gr.fillarc', 'gr.fillarc', (['(0.5 - rect)', '(0.5 + rect)', '(0.5 - rect)', '(0.5 + rect)', '(x * (360 / num_bins))', '((x + 1) * (360 / num_bins))'], {}), '(0.5 - rect, 0.5 + rect, 0.5 - rect, 0.5 + rect, x * (360 /\n num_bins), (x + 1) * (360 / num_bins))\n', (161907, 162009), False, 'import gr\n'), ((162066, 162087), 'gr.settransparency', 'gr.settransparency', (['(1)'], {}), '(1)\n', (162084, 162087), False, 'import gr\n'), ((162112, 162133), 'gr.setfillintstyle', 'gr.setfillintstyle', (['(0)'], {}), '(0)\n', (162130, 162133), False, 'import gr\n'), ((162158, 162187), 'gr.setfillcolorind', 'gr.setfillcolorind', (['edgecolor'], {}), '(edgecolor)\n', (162176, 162187), False, 'import gr\n'), ((162213, 162325), 'gr.fillarc', 'gr.fillarc', (['(0.5 - rect)', '(0.5 + rect)', '(0.5 - rect)', '(0.5 + rect)', '(x * (360 / num_bins))', '((x + 1) * (360 / num_bins))'], {}), '(0.5 - rect, 0.5 + rect, 0.5 - rect, 0.5 + rect, x * (360 /\n num_bins), (x + 1) * (360 / num_bins))\n', (162223, 162325), False, 'import gr\n'), ((170539, 170651), 'gr.drawarc', 'gr.drawarc', (['(0.5 - rect)', '(0.5 + rect)', '(0.5 - rect)', '(0.5 + rect)', '(x * (360 / num_bins))', '((x + 1) * (360 / num_bins))'], {}), '(0.5 - rect, 0.5 + rect, 0.5 - rect, 0.5 + rect, x * (360 /\n num_bins), (x + 1) * (360 / num_bins))\n', (170549, 170651), False, 'import gr\n'), ((82762, 82784), 'numpy.abs', 'np.abs', (['(x[1:] - x[:-1])'], {}), '(x[1:] - x[:-1])\n', (82768, 82784), True, 'import numpy as np\n'), ((82856, 82878), 'numpy.abs', 'np.abs', (['(y[1:] - y[:-1])'], {}), '(y[1:] - y[:-1])\n', (82862, 82878), True, 'import numpy as np\n'), ((92667, 92735), 'gr.grid3d', 'gr.grid3d', (['x_tick', '(0)', 'z_tick', 'x_org[1]', 'y_org[0]', 'z_org[zi]', '(2)', '(0)', '(2)'], {}), '(x_tick, 0, z_tick, x_org[1], y_org[0], z_org[zi], 2, 0, 2)\n', (92676, 92735), False, 'import gr\n'), ((92756, 92819), 'gr.grid3d', 'gr.grid3d', (['(0)', 'y_tick', '(0)', 'x_org[1]', 'y_org[0]', 'z_org[zi]', '(0)', '(2)', '(0)'], {}), '(0, y_tick, 0, x_org[1], y_org[0], z_org[zi], 0, 2, 0)\n', (92765, 92819), False, 'import gr\n'), ((92862, 92929), 'gr.grid3d', 'gr.grid3d', (['x_tick', '(0)', 'z_tick', 'x_org[0]', 'y_org[0]', 'z_org[0]', '(2)', '(0)', '(2)'], {}), '(x_tick, 0, z_tick, x_org[0], y_org[0], z_org[0], 2, 0, 2)\n', (92871, 92929), False, 'import gr\n'), ((92950, 93013), 'gr.grid3d', 'gr.grid3d', (['(0)', 'y_tick', '(0)', 'x_org[0]', 'y_org[0]', 'z_org[zi]', '(0)', '(2)', '(0)'], {}), '(0, y_tick, 0, x_org[0], y_org[0], z_org[zi], 0, 2, 0)\n', (92959, 93013), False, 'import gr\n'), ((93639, 93746), 'gr.axes3d', 'gr.axes3d', (['x_tick', '(0)', 'z_tick', 'x_org[1]', 'y_org[1]', 'z_org[zi]', 'x_major_count', '(0)', 'z_major_count', 'ticksize'], {}), '(x_tick, 0, z_tick, x_org[1], y_org[1], z_org[zi], x_major_count, \n 0, z_major_count, ticksize)\n', (93648, 93746), False, 'import gr\n'), ((93762, 93852), 'gr.axes3d', 'gr.axes3d', (['(0)', 'y_tick', '(0)', 'x_org[0]', 'y_org[0]', 'z_org[zi]', '(0)', 'y_major_count', '(0)', '(-ticksize)'], {}), '(0, y_tick, 0, x_org[0], y_org[0], z_org[zi], 0, y_major_count, 0,\n -ticksize)\n', (93771, 93852), False, 'import gr\n'), ((93891, 93981), 'gr.axes3d', 'gr.axes3d', (['(0)', '(0)', 'z_tick', 'x_org[1]', 'y_org[0]', 'z_org[zi]', '(0)', '(0)', 'z_major_count', '(-ticksize)'], {}), '(0, 0, z_tick, x_org[1], y_org[0], z_org[zi], 0, 0, z_major_count,\n -ticksize)\n', (93900, 93981), False, 'import gr\n'), ((93998, 94104), 'gr.axes3d', 'gr.axes3d', (['x_tick', 'y_tick', '(0)', 'x_org[1]', 'y_org[1]', 'z_org[zi]', 'x_major_count', 'y_major_count', '(0)', 'ticksize'], {}), '(x_tick, y_tick, 0, x_org[1], y_org[1], z_org[zi], x_major_count,\n y_major_count, 0, ticksize)\n', (94007, 94104), False, 'import gr\n'), ((106271, 106301), 'gr.setmarkersize', 'gr.setmarkersize', (['(z[i] / 100.0)'], {}), '(z[i] / 100.0)\n', (106287, 106301), False, 'import gr\n'), ((106439, 106468), 'gr.setmarkercolorind', 'gr.setmarkercolorind', (['c_index'], {}), '(c_index)\n', (106459, 106468), False, 'import gr\n'), ((132366, 132398), 'gr.setfillcolorind', 'gr.setfillcolorind', (['colorlist[i]'], {}), '(colorlist[i])\n', (132384, 132398), False, 'import gr\n'), ((132512, 132591), 'gr.setcolorrep', 'gr.setcolorrep', (['color_save_spot', 'current_rgb[0]', 'current_rgb[1]', 'current_rgb[2]'], {}), '(color_save_spot, current_rgb[0], current_rgb[1], current_rgb[2])\n', (132526, 132591), False, 'import gr\n'), ((132620, 132655), 'gr.setfillcolorind', 'gr.setfillcolorind', (['color_save_spot'], {}), '(color_save_spot)\n', (132638, 132655), False, 'import gr\n'), ((134388, 134420), 'gr.setfillcolorind', 'gr.setfillcolorind', (['colorlist[a]'], {}), '(colorlist[a])\n', (134406, 134420), False, 'import gr\n'), ((134522, 134601), 'gr.setcolorrep', 'gr.setcolorrep', (['color_save_spot', 'current_rgb[0]', 'current_rgb[1]', 'current_rgb[2]'], {}), '(color_save_spot, current_rgb[0], current_rgb[1], current_rgb[2])\n', (134536, 134601), False, 'import gr\n'), ((134626, 134661), 'gr.setfillcolorind', 'gr.setfillcolorind', (['color_save_spot'], {}), '(color_save_spot)\n', (134644, 134661), False, 'import gr\n'), ((134755, 134780), 'gr.setfillcolorind', 'gr.setfillcolorind', (['color'], {}), '(color)\n', (134773, 134780), False, 'import gr\n'), ((134875, 134954), 'gr.setcolorrep', 'gr.setcolorrep', (['color_save_spot', 'current_rgb[0]', 'current_rgb[1]', 'current_rgb[2]'], {}), '(color_save_spot, current_rgb[0], current_rgb[1], current_rgb[2])\n', (134889, 134954), False, 'import gr\n'), ((134979, 135014), 'gr.setfillcolorind', 'gr.setfillcolorind', (['color_save_spot'], {}), '(color_save_spot)\n', (134997, 135014), False, 'import gr\n'), ((136783, 136812), 'gr.setlinecolorind', 'gr.setlinecolorind', (['edgecolor'], {}), '(edgecolor)\n', (136801, 136812), False, 'import gr\n'), ((136911, 136990), 'gr.setcolorrep', 'gr.setcolorrep', (['color_save_spot', 'current_rgb[0]', 'current_rgb[1]', 'current_rgb[2]'], {}), '(color_save_spot, current_rgb[0], current_rgb[1], current_rgb[2])\n', (136925, 136990), False, 'import gr\n'), ((137015, 137050), 'gr.setlinecolorind', 'gr.setlinecolorind', (['color_save_spot'], {}), '(color_save_spot)\n', (137033, 137050), False, 'import gr\n'), ((153010, 153122), 'gr.fillarc', 'gr.fillarc', (['(0.5 - rect)', '(0.5 + rect)', '(0.5 - rect)', '(0.5 + rect)', '(x * (360 / num_bins))', '((x + 1) * (360 / num_bins))'], {}), '(0.5 - rect, 0.5 + rect, 0.5 - rect, 0.5 + rect, x * (360 /\n num_bins), (x + 1) * (360 / num_bins))\n', (153020, 153122), False, 'import gr\n'), ((155799, 155851), 'numpy.sqrt', 'np.sqrt', (['(mlist[kaman][0] ** 2 + mlist[kaman][1] ** 2)'], {}), '(mlist[kaman][0] ** 2 + mlist[kaman][1] ** 2)\n', (155806, 155851), True, 'import numpy as np\n'), ((158920, 159032), 'gr.fillarc', 'gr.fillarc', (['(0.5 - rect)', '(0.5 + rect)', '(0.5 - rect)', '(0.5 + rect)', '(binedges[x] * convert)', '(binedges[x + 1] * convert)'], {}), '(0.5 - rect, 0.5 + rect, 0.5 - rect, 0.5 + rect, binedges[x] *\n convert, binedges[x + 1] * convert)\n', (158930, 159032), False, 'import gr\n'), ((159348, 159460), 'gr.fillarc', 'gr.fillarc', (['(0.5 - rect)', '(0.5 + rect)', '(0.5 - rect)', '(0.5 + rect)', '(binedges[x] * convert)', '(binedges[x + 1] * convert)'], {}), '(0.5 - rect, 0.5 + rect, 0.5 - rect, 0.5 + rect, binedges[x] *\n convert, binedges[x + 1] * convert)\n', (159358, 159460), False, 'import gr\n'), ((165349, 165461), 'gr.drawarc', 'gr.drawarc', (['(0.5 - rect)', '(0.5 + rect)', '(0.5 - rect)', '(0.5 + rect)', '(binedges[x] * convert)', '(binedges[x + 1] * convert)'], {}), '(0.5 - rect, 0.5 + rect, 0.5 - rect, 0.5 + rect, binedges[x] *\n convert, binedges[x + 1] * convert)\n', (165359, 165461), False, 'import gr\n'), ((165648, 165667), 'numpy.cos', 'np.cos', (['binedges[0]'], {}), '(binedges[0])\n', (165654, 165667), True, 'import numpy as np\n'), ((165683, 165702), 'numpy.cos', 'np.cos', (['binedges[0]'], {}), '(binedges[0])\n', (165689, 165702), True, 'import numpy as np\n'), ((165751, 165770), 'numpy.sin', 'np.sin', (['binedges[0]'], {}), '(binedges[0])\n', (165757, 165770), True, 'import numpy as np\n'), ((165786, 165805), 'numpy.sin', 'np.sin', (['binedges[0]'], {}), '(binedges[0])\n', (165792, 165805), True, 'import numpy as np\n'), ((170244, 170385), 'gr.drawarc', 'gr.drawarc', (['(0.5 - r_min * 0.4)', '(0.5 + r_min * 0.4)', '(0.5 - r_min * 0.4)', '(0.5 + r_min * 0.4)', '(x * (360 / num_bins))', '((x + 1) * (360 / num_bins))'], {}), '(0.5 - r_min * 0.4, 0.5 + r_min * 0.4, 0.5 - r_min * 0.4, 0.5 + \n r_min * 0.4, x * (360 / num_bins), (x + 1) * (360 / num_bins))\n', (170254, 170385), False, 'import gr\n'), ((170850, 170894), 'numpy.sqrt', 'np.sqrt', (['(mlist[x][0] ** 2 + mlist[x][1] ** 2)'], {}), '(mlist[x][0] ** 2 + mlist[x][1] ** 2)\n', (170857, 170894), True, 'import numpy as np\n'), ((171000, 171052), 'numpy.sqrt', 'np.sqrt', (['(mlist[x - 1][0] ** 2 + mlist[x - 1][1] ** 2)'], {}), '(mlist[x - 1][0] ** 2 + mlist[x - 1][1] ** 2)\n', (171007, 171052), True, 'import numpy as np\n'), ((171758, 171861), 'gr.polyline', 'gr.polyline', (['[0.5 + mlist[x][0], 0.5 + mlist[x - 1][0]]', '[0.5 + mlist[x][1], 0.5 + mlist[x - 1][1]]'], {}), '([0.5 + mlist[x][0], 0.5 + mlist[x - 1][0]], [0.5 + mlist[x][1],\n 0.5 + mlist[x - 1][1]])\n', (171769, 171861), False, 'import gr\n'), ((171966, 171975), 'numpy.cos', 'np.cos', (['(0)'], {}), '(0)\n', (171972, 171975), True, 'import numpy as np\n'), ((172044, 172053), 'numpy.sin', 'np.sin', (['(0)'], {}), '(0)\n', (172050, 172053), True, 'import numpy as np\n'), ((172120, 172129), 'numpy.cos', 'np.cos', (['(0)'], {}), '(0)\n', (172126, 172129), True, 'import numpy as np\n'), ((172196, 172205), 'numpy.sin', 'np.sin', (['(0)'], {}), '(0)\n', (172202, 172205), True, 'import numpy as np\n'), ((172508, 172611), 'gr.polyline', 'gr.polyline', (['[0.5 + mlist[x][0], 0.5 + mlist[x - 1][0]]', '[0.5 + mlist[x][1], 0.5 + mlist[x - 1][1]]'], {}), '([0.5 + mlist[x][0], 0.5 + mlist[x - 1][0]], [0.5 + mlist[x][1],\n 0.5 + mlist[x - 1][1]])\n', (172519, 172611), False, 'import gr\n'), ((106942, 106963), 'gr.setlinecolorind', 'gr.setlinecolorind', (['(1)'], {}), '(1)\n', (106960, 106963), False, 'import gr\n'), ((106976, 107022), 'gr.polyline', 'gr.polyline', (["_plt.kwargs['window'][:2]", '[0, 0]'], {}), "(_plt.kwargs['window'][:2], [0, 0])\n", (106987, 107022), False, 'import gr\n'), ((107035, 107079), 'gr.setmarkertype', 'gr.setmarkertype', (['gr.MARKERTYPE_SOLID_CIRCLE'], {}), '(gr.MARKERTYPE_SOLID_CIRCLE)\n', (107051, 107079), False, 'import gr\n'), ((107092, 107112), 'gr.uselinespec', 'gr.uselinespec', (['spec'], {}), '(spec)\n', (107106, 107112), False, 'import gr\n'), ((107209, 107228), 'gr.polymarker', 'gr.polymarker', (['x', 'y'], {}), '(x, y)\n', (107222, 107228), False, 'import gr\n'), ((133440, 133472), 'gr.setfillcolorind', 'gr.setfillcolorind', (['colorlist[i]'], {}), '(colorlist[i])\n', (133458, 133472), False, 'import gr\n'), ((133586, 133665), 'gr.setcolorrep', 'gr.setcolorrep', (['color_save_spot', 'current_rgb[0]', 'current_rgb[1]', 'current_rgb[2]'], {}), '(color_save_spot, current_rgb[0], current_rgb[1], current_rgb[2])\n', (133600, 133665), False, 'import gr\n'), ((133694, 133729), 'gr.setfillcolorind', 'gr.setfillcolorind', (['color_save_spot'], {}), '(color_save_spot)\n', (133712, 133729), False, 'import gr\n'), ((149208, 149260), 'numpy.sqrt', 'np.sqrt', (['(mlist[kaman][0] ** 2 + mlist[kaman][1] ** 2)'], {}), '(mlist[kaman][0] ** 2 + mlist[kaman][1] ** 2)\n', (149215, 149260), True, 'import numpy as np\n'), ((151486, 151598), 'gr.fillarc', 'gr.fillarc', (['(0.5 - rect)', '(0.5 + rect)', '(0.5 - rect)', '(0.5 + rect)', '(binedges[x] * convert)', '(binedges[x + 1] * convert)'], {}), '(0.5 - rect, 0.5 + rect, 0.5 - rect, 0.5 + rect, binedges[x] *\n convert, binedges[x + 1] * convert)\n', (151496, 151598), False, 'import gr\n'), ((152356, 152497), 'gr.drawarc', 'gr.drawarc', (['(0.5 - r_min * 0.4)', '(0.5 + r_min * 0.4)', '(0.5 - r_min * 0.4)', '(0.5 + r_min * 0.4)', '(x * (360 / num_bins))', '((x + 1) * (360 / num_bins))'], {}), '(0.5 - r_min * 0.4, 0.5 + r_min * 0.4, 0.5 - r_min * 0.4, 0.5 + \n r_min * 0.4, x * (360 / num_bins), (x + 1) * (360 / num_bins))\n', (152366, 152497), False, 'import gr\n'), ((152569, 152677), 'gr.polyline', 'gr.polyline', (['[0.5 + r_min_list[0], 0.5 + mlist[2 * x][0]]', '[0.5 + r_min_list[1], 0.5 + mlist[2 * x][1]]'], {}), '([0.5 + r_min_list[0], 0.5 + mlist[2 * x][0]], [0.5 + r_min_list\n [1], 0.5 + mlist[2 * x][1]])\n', (152580, 152677), False, 'import gr\n'), ((152750, 152867), 'gr.polyline', 'gr.polyline', (['[0.5 + r_min_list2[0], 0.5 + mlist[2 * x + 1][0]]', '[0.5 + r_min_list2[1], 0.5 + mlist[2 * x + 1][1]]'], {}), '([0.5 + r_min_list2[0], 0.5 + mlist[2 * x + 1][0]], [0.5 +\n r_min_list2[1], 0.5 + mlist[2 * x + 1][1]])\n', (152761, 152867), False, 'import gr\n'), ((156702, 156769), 'numpy.array', 'np.array', (['np.ogrid[start_angle:end_angle:num_angle]'], {'dtype': 'np.float'}), '(np.ogrid[start_angle:end_angle:num_angle], dtype=np.float)\n', (156710, 156769), True, 'import numpy as np\n'), ((157953, 157974), 'gr.setfillintstyle', 'gr.setfillintstyle', (['(1)'], {}), '(1)\n', (157971, 157974), False, 'import gr\n'), ((158327, 158348), 'gr.setfillintstyle', 'gr.setfillintstyle', (['(0)'], {}), '(0)\n', (158345, 158348), False, 'import gr\n'), ((158381, 158410), 'gr.setfillcolorind', 'gr.setfillcolorind', (['edgecolor'], {}), '(edgecolor)\n', (158399, 158410), False, 'import gr\n'), ((160090, 160157), 'numpy.array', 'np.array', (['np.ogrid[start_angle:end_angle:num_angle]'], {'dtype': 'np.float'}), '(np.ogrid[start_angle:end_angle:num_angle], dtype=np.float)\n', (160098, 160157), True, 'import numpy as np\n'), ((160963, 160984), 'gr.setfillintstyle', 'gr.setfillintstyle', (['(1)'], {}), '(1)\n', (160981, 160984), False, 'import gr\n'), ((161337, 161358), 'gr.setfillintstyle', 'gr.setfillintstyle', (['(0)'], {}), '(0)\n', (161355, 161358), False, 'import gr\n'), ((161391, 161420), 'gr.setfillcolorind', 'gr.setfillcolorind', (['edgecolor'], {}), '(edgecolor)\n', (161409, 161420), False, 'import gr\n'), ((163671, 163723), 'numpy.sqrt', 'np.sqrt', (['(mlist[kaman][0] ** 2 + mlist[kaman][1] ** 2)'], {}), '(mlist[kaman][0] ** 2 + mlist[kaman][1] ** 2)\n', (163678, 163723), True, 'import numpy as np\n'), ((164959, 165100), 'gr.drawarc', 'gr.drawarc', (['(0.5 - r_min * 0.4)', '(0.5 + r_min * 0.4)', '(0.5 - r_min * 0.4)', '(0.5 + r_min * 0.4)', '(binedges[x] * convert)', '(binedges[x + 1] * convert)'], {}), '(0.5 - r_min * 0.4, 0.5 + r_min * 0.4, 0.5 - r_min * 0.4, 0.5 + \n r_min * 0.4, binedges[x] * convert, binedges[x + 1] * convert)\n', (164969, 165100), False, 'import gr\n'), ((166427, 166450), 'numpy.cos', 'np.cos', (['binedges[x + 1]'], {}), '(binedges[x + 1])\n', (166433, 166450), True, 'import numpy as np\n'), ((166504, 166527), 'numpy.sin', 'np.sin', (['binedges[x + 1]'], {}), '(binedges[x + 1])\n', (166510, 166527), True, 'import numpy as np\n'), ((167673, 167696), 'numpy.cos', 'np.cos', (['binedges[x + 1]'], {}), '(binedges[x + 1])\n', (167679, 167696), True, 'import numpy as np\n'), ((167750, 167773), 'numpy.sin', 'np.sin', (['binedges[x + 1]'], {}), '(binedges[x + 1])\n', (167756, 167773), True, 'import numpy as np\n'), ((169402, 169454), 'numpy.sqrt', 'np.sqrt', (['(mlist[kaman][0] ** 2 + mlist[kaman][1] ** 2)'], {}), '(mlist[kaman][0] ** 2 + mlist[kaman][1] ** 2)\n', (169409, 169454), True, 'import numpy as np\n'), ((107166, 107196), 'gr.polyline', 'gr.polyline', (['[xi, xi]', '[0, yi]'], {}), '([xi, xi], [0, yi])\n', (107177, 107196), False, 'import gr\n'), ((150345, 150486), 'gr.drawarc', 'gr.drawarc', (['(0.5 - r_min * 0.4)', '(0.5 + r_min * 0.4)', '(0.5 - r_min * 0.4)', '(0.5 + r_min * 0.4)', '(binedges[x] * convert)', '(binedges[x + 1] * convert)'], {}), '(0.5 - r_min * 0.4, 0.5 + r_min * 0.4, 0.5 - r_min * 0.4, 0.5 + \n r_min * 0.4, binedges[x] * convert, binedges[x + 1] * convert)\n', (150355, 150486), False, 'import gr\n'), ((158019, 158080), 'numpy.hstack', 'np.hstack', (['(line_1_x, arc_1_x, line_2_x[::-1], arc_2_x[::-1])'], {}), '((line_1_x, arc_1_x, line_2_x[::-1], arc_2_x[::-1]))\n', (158028, 158080), True, 'import numpy as np\n'), ((158156, 158217), 'numpy.hstack', 'np.hstack', (['(line_1_y, arc_1_y, line_2_y[::-1], arc_2_y[::-1])'], {}), '((line_1_y, arc_1_y, line_2_y[::-1], arc_2_y[::-1]))\n', (158165, 158217), True, 'import numpy as np\n'), ((158455, 158516), 'numpy.hstack', 'np.hstack', (['(line_1_x, arc_1_x, line_2_x[::-1], arc_2_x[::-1])'], {}), '((line_1_x, arc_1_x, line_2_x[::-1], arc_2_x[::-1]))\n', (158464, 158516), True, 'import numpy as np\n'), ((158592, 158653), 'numpy.hstack', 'np.hstack', (['(line_1_y, arc_1_y, line_2_y[::-1], arc_2_y[::-1])'], {}), '((line_1_y, arc_1_y, line_2_y[::-1], arc_2_y[::-1]))\n', (158601, 158653), True, 'import numpy as np\n'), ((161029, 161090), 'numpy.hstack', 'np.hstack', (['(line_1_x, arc_1_x, line_2_x[::-1], arc_2_x[::-1])'], {}), '((line_1_x, arc_1_x, line_2_x[::-1], arc_2_x[::-1]))\n', (161038, 161090), True, 'import numpy as np\n'), ((161166, 161227), 'numpy.hstack', 'np.hstack', (['(line_1_y, arc_1_y, line_2_y[::-1], arc_2_y[::-1])'], {}), '((line_1_y, arc_1_y, line_2_y[::-1], arc_2_y[::-1]))\n', (161175, 161227), True, 'import numpy as np\n'), ((161465, 161526), 'numpy.hstack', 'np.hstack', (['(line_1_x, arc_1_x, line_2_x[::-1], arc_2_x[::-1])'], {}), '((line_1_x, arc_1_x, line_2_x[::-1], arc_2_x[::-1]))\n', (161474, 161526), True, 'import numpy as np\n'), ((161602, 161663), 'numpy.hstack', 'np.hstack', (['(line_1_y, arc_1_y, line_2_y[::-1], arc_2_y[::-1])'], {}), '((line_1_y, arc_1_y, line_2_y[::-1], arc_2_y[::-1]))\n', (161611, 161663), True, 'import numpy as np\n'), ((166658, 166677), 'numpy.cos', 'np.cos', (['binedges[0]'], {}), '(binedges[0])\n', (166664, 166677), True, 'import numpy as np\n'), ((166732, 166751), 'numpy.cos', 'np.cos', (['binedges[0]'], {}), '(binedges[0])\n', (166738, 166751), True, 'import numpy as np\n'), ((166807, 166826), 'numpy.sin', 'np.sin', (['binedges[0]'], {}), '(binedges[0])\n', (166813, 166826), True, 'import numpy as np\n'), ((166881, 166900), 'numpy.sin', 'np.sin', (['binedges[0]'], {}), '(binedges[0])\n', (166887, 166900), True, 'import numpy as np\n'), ((166957, 166977), 'numpy.cos', 'np.cos', (['binedges[-1]'], {}), '(binedges[-1])\n', (166963, 166977), True, 'import numpy as np\n'), ((167033, 167053), 'numpy.cos', 'np.cos', (['binedges[-1]'], {}), '(binedges[-1])\n', (167039, 167053), True, 'import numpy as np\n'), ((167109, 167129), 'numpy.sin', 'np.sin', (['binedges[-1]'], {}), '(binedges[-1])\n', (167115, 167129), True, 'import numpy as np\n'), ((167185, 167205), 'numpy.sin', 'np.sin', (['binedges[-1]'], {}), '(binedges[-1])\n', (167191, 167205), True, 'import numpy as np\n'), ((175817, 175834), 'numpy.meshgrid', 'np.meshgrid', (['y', 'x'], {}), '(y, x)\n', (175828, 175834), True, 'import numpy as np\n'), ((175876, 175895), 'numpy.prod', 'np.prod', (['xy_x.shape'], {}), '(xy_x.shape)\n', (175883, 175895), True, 'import numpy as np\n'), ((175937, 175956), 'numpy.prod', 'np.prod', (['xy_y.shape'], {}), '(xy_y.shape)\n', (175944, 175956), True, 'import numpy as np\n'), ((175990, 176020), 'numpy.stack', 'np.stack', (['(xy_x, xy_y)'], {'axis': '(1)'}), '((xy_x, xy_y), axis=1)\n', (175998, 176020), True, 'import numpy as np\n'), ((107362, 107385), 'gr.setfillcolorind', 'gr.setfillcolorind', (['(989)'], {}), '(989)\n', (107380, 107385), False, 'import gr\n'), ((107402, 107439), 'gr.setfillintstyle', 'gr.setfillintstyle', (['gr.INTSTYLE_SOLID'], {}), '(gr.INTSTYLE_SOLID)\n', (107420, 107439), False, 'import gr\n'), ((107456, 107500), 'gr.fillrect', 'gr.fillrect', (['x[i - 1]', 'x[i]', 'y_min', 'y[i - 1]'], {}), '(x[i - 1], x[i], y_min, y[i - 1])\n', (107467, 107500), False, 'import gr\n'), ((107517, 107538), 'gr.setfillcolorind', 'gr.setfillcolorind', (['(1)'], {}), '(1)\n', (107535, 107538), False, 'import gr\n'), ((107555, 107593), 'gr.setfillintstyle', 'gr.setfillintstyle', (['gr.INTSTYLE_HOLLOW'], {}), '(gr.INTSTYLE_HOLLOW)\n', (107573, 107593), False, 'import gr\n'), ((107610, 107654), 'gr.fillrect', 'gr.fillrect', (['x[i - 1]', 'x[i]', 'y_min', 'y[i - 1]'], {}), '(x[i - 1], x[i], y_min, y[i - 1])\n', (107621, 107654), False, 'import gr\n'), ((107748, 107780), 'gr.setspace', 'gr.setspace', (['z_min', 'z_max', '(0)', '(90)'], {}), '(z_min, z_max, 0, 90)\n', (107759, 107780), False, 'import gr\n'), ((108417, 108433), 'numpy.prod', 'np.prod', (['z.shape'], {}), '(z.shape)\n', (108424, 108433), True, 'import numpy as np\n'), ((108446, 108474), 'gr.contour', 'gr.contour', (['x', 'y', 'h', 'z', '(1000)'], {}), '(x, y, h, z, 1000)\n', (108456, 108474), False, 'import gr\n'), ((167979, 167998), 'numpy.cos', 'np.cos', (['binedges[0]'], {}), '(binedges[0])\n', (167985, 167998), True, 'import numpy as np\n'), ((168072, 168091), 'numpy.sin', 'np.sin', (['binedges[0]'], {}), '(binedges[0])\n', (168078, 168091), True, 'import numpy as np\n'), ((168192, 168212), 'numpy.cos', 'np.cos', (['binedges[-1]'], {}), '(binedges[-1])\n', (168198, 168212), True, 'import numpy as np\n'), ((168278, 168298), 'numpy.sin', 'np.sin', (['binedges[-1]'], {}), '(binedges[-1])\n', (168284, 168298), True, 'import numpy as np\n'), ((107908, 107936), 'gr.gridit', 'gr.gridit', (['x', 'y', 'z', '(200)', '(200)'], {}), '(x, y, z, 200, 200)\n', (107917, 107936), False, 'import gr\n'), ((107957, 107968), 'numpy.array', 'np.array', (['z'], {}), '(z)\n', (107965, 107968), True, 'import numpy as np\n'), ((108086, 108109), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['z'], {}), '(z)\n', (108106, 108109), True, 'import numpy as np\n'), ((108659, 108691), 'gr.setspace', 'gr.setspace', (['z_min', 'z_max', '(0)', '(90)'], {}), '(z_min, z_max, 0, 90)\n', (108670, 108691), False, 'import gr\n'), ((108800, 108818), 'gr.setscale', 'gr.setscale', (['scale'], {}), '(scale)\n', (108811, 108818), False, 'import gr\n'), ((109400, 109416), 'numpy.prod', 'np.prod', (['z.shape'], {}), '(z.shape)\n', (109407, 109416), True, 'import numpy as np\n'), ((109519, 109540), 'gr.setlinecolorind', 'gr.setlinecolorind', (['(1)'], {}), '(1)\n', (109537, 109540), False, 'import gr\n'), ((109553, 109579), 'gr.contourf', 'gr.contourf', (['x', 'y', 'h', 'z', '(0)'], {}), '(x, y, h, z, 0)\n', (109564, 109579), False, 'import gr\n'), ((156856, 156867), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (156862, 156867), True, 'import numpy as np\n'), ((156943, 156954), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (156949, 156954), True, 'import numpy as np\n'), ((157041, 157052), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (157047, 157052), True, 'import numpy as np\n'), ((157138, 157149), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (157144, 157149), True, 'import numpy as np\n'), ((157243, 157262), 'numpy.cos', 'np.cos', (['binedges[x]'], {}), '(binedges[x])\n', (157249, 157262), True, 'import numpy as np\n'), ((157339, 157358), 'numpy.cos', 'np.cos', (['binedges[x]'], {}), '(binedges[x])\n', (157345, 157358), True, 'import numpy as np\n'), ((157424, 157443), 'numpy.sin', 'np.sin', (['binedges[x]'], {}), '(binedges[x])\n', (157430, 157443), True, 'import numpy as np\n'), ((157520, 157539), 'numpy.sin', 'np.sin', (['binedges[x]'], {}), '(binedges[x])\n', (157526, 157539), True, 'import numpy as np\n'), ((157606, 157629), 'numpy.cos', 'np.cos', (['binedges[x + 1]'], {}), '(binedges[x + 1])\n', (157612, 157629), True, 'import numpy as np\n'), ((157706, 157729), 'numpy.cos', 'np.cos', (['binedges[x + 1]'], {}), '(binedges[x + 1])\n', (157712, 157729), True, 'import numpy as np\n'), ((157795, 157818), 'numpy.sin', 'np.sin', (['binedges[x + 1]'], {}), '(binedges[x + 1])\n', (157801, 157818), True, 'import numpy as np\n'), ((157895, 157918), 'numpy.sin', 'np.sin', (['binedges[x + 1]'], {}), '(binedges[x + 1])\n', (157901, 157918), True, 'import numpy as np\n'), ((160244, 160255), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (160250, 160255), True, 'import numpy as np\n'), ((160331, 160342), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (160337, 160342), True, 'import numpy as np\n'), ((160429, 160440), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (160435, 160440), True, 'import numpy as np\n'), ((160526, 160537), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (160532, 160537), True, 'import numpy as np\n'), ((167502, 167521), 'numpy.cos', 'np.cos', (['binedges[x]'], {}), '(binedges[x])\n', (167508, 167521), True, 'import numpy as np\n'), ((167599, 167618), 'numpy.sin', 'np.sin', (['binedges[x]'], {}), '(binedges[x])\n', (167605, 167618), True, 'import numpy as np\n'), ((108891, 108919), 'gr.gridit', 'gr.gridit', (['x', 'y', 'z', '(200)', '(200)'], {}), '(x, y, z, 200, 200)\n', (108900, 108919), False, 'import gr\n'), ((108940, 108951), 'numpy.array', 'np.array', (['z'], {}), '(z)\n', (108948, 108951), True, 'import numpy as np\n'), ((109069, 109092), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['z'], {}), '(z)\n', (109089, 109092), True, 'import numpy as np\n'), ((109681, 109703), 'gr.hexbin', 'gr.hexbin', (['x', 'y', 'nbins'], {}), '(x, y, nbins)\n', (109690, 109703), False, 'import gr\n'), ((166140, 166159), 'numpy.cos', 'np.cos', (['binedges[x]'], {}), '(binedges[x])\n', (166146, 166159), True, 'import numpy as np\n'), ((166241, 166260), 'numpy.sin', 'np.sin', (['binedges[x]'], {}), '(binedges[x])\n', (166247, 166260), True, 'import numpy as np\n'), ((108025, 108034), 'numpy.min', 'np.min', (['z'], {}), '(z)\n', (108031, 108034), True, 'import numpy as np\n'), ((108036, 108045), 'numpy.max', 'np.max', (['z'], {}), '(z)\n', (108042, 108045), True, 'import numpy as np\n'), ((110172, 110196), 'numpy.zeros', 'np.zeros', (['(256)', 'np.uint32'], {}), '(256, np.uint32)\n', (110180, 110196), True, 'import numpy as np\n'), ((110859, 110895), 'numpy.zeros', 'np.zeros', (['(height, width)', 'np.uint32'], {}), '((height, width), np.uint32)\n', (110867, 110895), True, 'import numpy as np\n'), ((111079, 111140), 'gr.drawimage', 'gr.drawimage', (['x_min', 'x_max', 'y_min', 'y_max', 'width', 'height', 'rgba'], {}), '(x_min, x_max, y_min, y_max, width, height, rgba)\n', (111091, 111140), False, 'import gr\n'), ((150646, 150665), 'numpy.cos', 'np.cos', (['binedges[x]'], {}), '(binedges[x])\n', (150652, 150665), True, 'import numpy as np\n'), ((150698, 150717), 'numpy.cos', 'np.cos', (['binedges[x]'], {}), '(binedges[x])\n', (150704, 150717), True, 'import numpy as np\n'), ((150789, 150808), 'numpy.sin', 'np.sin', (['binedges[x]'], {}), '(binedges[x])\n', (150795, 150808), True, 'import numpy as np\n'), ((150841, 150860), 'numpy.sin', 'np.sin', (['binedges[x]'], {}), '(binedges[x])\n', (150847, 150860), True, 'import numpy as np\n'), ((150933, 150956), 'numpy.cos', 'np.cos', (['binedges[x + 1]'], {}), '(binedges[x + 1])\n', (150939, 150956), True, 'import numpy as np\n'), ((151038, 151061), 'numpy.cos', 'np.cos', (['binedges[x + 1]'], {}), '(binedges[x + 1])\n', (151044, 151061), True, 'import numpy as np\n'), ((151133, 151156), 'numpy.sin', 'np.sin', (['binedges[x + 1]'], {}), '(binedges[x + 1])\n', (151139, 151156), True, 'import numpy as np\n'), ((151238, 151261), 'numpy.sin', 'np.sin', (['binedges[x + 1]'], {}), '(binedges[x + 1])\n', (151244, 151261), True, 'import numpy as np\n'), ((108193, 108206), 'numpy.log', 'np.log', (['z_min'], {}), '(z_min)\n', (108199, 108206), True, 'import numpy as np\n'), ((109008, 109017), 'numpy.min', 'np.min', (['z'], {}), '(z)\n', (109014, 109017), True, 'import numpy as np\n'), ((109019, 109028), 'numpy.max', 'np.max', (['z'], {}), '(z)\n', (109025, 109028), True, 'import numpy as np\n'), ((110598, 110607), 'numpy.log', 'np.log', (['z'], {}), '(z)\n', (110604, 110607), True, 'import numpy as np\n'), ((110632, 110645), 'numpy.log', 'np.log', (['z_min'], {}), '(z_min)\n', (110638, 110645), True, 'import numpy as np\n'), ((110670, 110683), 'numpy.log', 'np.log', (['z_max'], {}), '(z_max)\n', (110676, 110683), True, 'import numpy as np\n'), ((110814, 110839), 'numpy.zeros', 'np.zeros', (['(height, width)'], {}), '((height, width))\n', (110822, 110839), True, 'import numpy as np\n'), ((112737, 112827), 'gr.polarcellarray', 'gr.polarcellarray', (['(0)', '(0)', 'phi_min', 'phi_max', 'r_range[0]', 'r_range[1]', 'width', 'height', 'data'], {}), '(0, 0, phi_min, phi_max, r_range[0], r_range[1], width,\n height, data)\n', (112754, 112827), False, 'import gr\n'), ((109176, 109189), 'numpy.log', 'np.log', (['z_min'], {}), '(z_min)\n', (109182, 109189), True, 'import numpy as np\n'), ((110434, 110443), 'numpy.min', 'np.min', (['z'], {}), '(z)\n', (110440, 110443), True, 'import numpy as np\n'), ((110445, 110454), 'numpy.max', 'np.max', (['z'], {}), '(z)\n', (110451, 110454), True, 'import numpy as np\n'), ((111504, 111513), 'numpy.log', 'np.log', (['z'], {}), '(z)\n', (111510, 111513), True, 'import numpy as np\n'), ((111538, 111551), 'numpy.log', 'np.log', (['z_min'], {}), '(z_min)\n', (111544, 111551), True, 'import numpy as np\n'), ((111576, 111589), 'numpy.log', 'np.log', (['z_max'], {}), '(z_max)\n', (111582, 111589), True, 'import numpy as np\n'), ((111744, 111783), 'numpy.zeros', 'np.zeros', (['(height, width)'], {'dtype': 'np.int'}), '((height, width), dtype=np.int)\n', (111752, 111783), True, 'import numpy as np\n'), ((111842, 111855), 'numpy.degrees', 'np.degrees', (['y'], {}), '(y)\n', (111852, 111855), True, 'import numpy as np\n'), ((112342, 112353), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (112350, 112353), True, 'import numpy as np\n'), ((112398, 112429), 'numpy.array', 'np.array', (["_plt.kwargs['rrange']"], {}), "(_plt.kwargs['rrange'])\n", (112406, 112429), True, 'import numpy as np\n'), ((113177, 113193), 'numpy.prod', 'np.prod', (['z.shape'], {}), '(z.shape)\n', (113184, 113193), True, 'import numpy as np\n'), ((113206, 113227), 'gr.setfillcolorind', 'gr.setfillcolorind', (['(0)'], {}), '(0)\n', (113224, 113227), False, 'import gr\n'), ((113240, 113282), 'gr.surface', 'gr.surface', (['x', 'y', 'z', 'gr.OPTION_FILLED_MESH'], {}), '(x, y, z, gr.OPTION_FILLED_MESH)\n', (113250, 113282), False, 'import gr\n'), ((108227, 108240), 'numpy.log', 'np.log', (['z_max'], {}), '(z_max)\n', (108233, 108240), True, 'import numpy as np\n'), ((108243, 108256), 'numpy.log', 'np.log', (['z_min'], {}), '(z_min)\n', (108249, 108256), True, 'import numpy as np\n'), ((111340, 111349), 'numpy.min', 'np.min', (['z'], {}), '(z)\n', (111346, 111349), True, 'import numpy as np\n'), ((111351, 111360), 'numpy.max', 'np.max', (['z'], {}), '(z)\n', (111357, 111360), True, 'import numpy as np\n'), ((113034, 113060), 'gr.gridit', 'gr.gridit', (['x', 'y', 'z', '(50)', '(50)'], {}), '(x, y, z, 50, 50)\n', (113043, 113060), False, 'import gr\n'), ((113081, 113092), 'numpy.array', 'np.array', (['z'], {}), '(z)\n', (113089, 113092), True, 'import numpy as np\n'), ((113131, 113154), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['z'], {}), '(z)\n', (113151, 113154), True, 'import numpy as np\n'), ((113564, 113580), 'numpy.prod', 'np.prod', (['z.shape'], {}), '(z.shape)\n', (113571, 113580), True, 'import numpy as np\n'), ((109210, 109223), 'numpy.log', 'np.log', (['z_max'], {}), '(z_max)\n', (109216, 109223), True, 'import numpy as np\n'), ((109226, 109239), 'numpy.log', 'np.log', (['z_min'], {}), '(z_min)\n', (109232, 109239), True, 'import numpy as np\n'), ((113419, 113447), 'gr.gridit', 'gr.gridit', (['x', 'y', 'z', '(200)', '(200)'], {}), '(x, y, z, 200, 200)\n', (113428, 113447), False, 'import gr\n'), ((113468, 113479), 'numpy.array', 'np.array', (['z'], {}), '(z)\n', (113476, 113479), True, 'import numpy as np\n'), ((113518, 113541), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['z'], {}), '(z)\n', (113538, 113541), True, 'import numpy as np\n'), ((113673, 113684), 'gr3.clear', 'gr3.clear', ([], {}), '()\n', (113682, 113684), False, 'import gr3\n'), ((113701, 113745), 'gr3.surface', 'gr3.surface', (['x', 'y', 'z', 'gr.OPTION_COLORED_MESH'], {}), '(x, y, z, gr.OPTION_COLORED_MESH)\n', (113712, 113745), False, 'import gr3\n'), ((113780, 113823), 'gr.surface', 'gr.surface', (['x', 'y', 'z', 'gr.OPTION_COLORED_MESH'], {}), '(x, y, z, gr.OPTION_COLORED_MESH)\n', (113790, 113823), False, 'import gr\n'), ((113975, 113997), 'gr.polyline3d', 'gr.polyline3d', (['x', 'y', 'z'], {}), '(x, y, z)\n', (113988, 113997), False, 'import gr\n'), ((114075, 114119), 'gr.setmarkertype', 'gr.setmarkertype', (['gr.MARKERTYPE_SOLID_CIRCLE'], {}), '(gr.MARKERTYPE_SOLID_CIRCLE)\n', (114091, 114119), False, 'import gr\n'), ((114469, 114493), 'gr.polymarker3d', 'gr.polymarker3d', (['x', 'y', 'z'], {}), '(x, y, z)\n', (114484, 114493), False, 'import gr\n'), ((114345, 114374), 'gr.setmarkercolorind', 'gr.setmarkercolorind', (['c_index'], {}), '(c_index)\n', (114365, 114374), False, 'import gr\n'), ((114395, 114434), 'gr.polymarker3d', 'gr.polymarker3d', (['[x[i]]', '[y[i]]', '[z[i]]'], {}), '([x[i]], [y[i]], [z[i]])\n', (114410, 114434), False, 'import gr\n'), ((115347, 115361), 'gr.inqvpsize', 'gr.inqvpsize', ([], {}), '()\n', (115359, 115361), False, 'import gr\n'), ((115493, 115549), 'gr.volume', 'gr.volume', (['c'], {'algorithm': '_algorithm', 'dmin': 'dmin', 'dmax': 'dmax'}), '(c, algorithm=_algorithm, dmin=dmin, dmax=dmax)\n', (115502, 115549), False, 'import gr\n'), ((115877, 115897), 'gr.uselinespec', 'gr.uselinespec', (['spec'], {}), '(spec)\n', (115891, 115897), False, 'import gr\n'), ((115972, 115994), 'gr.trisurface', 'gr.trisurface', (['x', 'y', 'z'], {}), '(x, y, z)\n', (115985, 115994), False, 'import gr\n'), ((116204, 116231), 'numpy.linspace', 'np.linspace', (['zmin', 'zmax', '(20)'], {}), '(zmin, zmax, 20)\n', (116215, 116231), True, 'import numpy as np\n'), ((116244, 116274), 'gr.tricontour', 'gr.tricontour', (['x', 'y', 'z', 'levels'], {}), '(x, y, z, levels)\n', (116257, 116274), False, 'import gr\n'), ((116375, 116386), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (116383, 116386), True, 'import numpy as np\n'), ((116405, 116437), 'gr.shadelines', 'gr.shadelines', (['x', 'y'], {'xform': 'xform'}), '(x, y, xform=xform)\n', (116418, 116437), False, 'import gr\n'), ((116472, 116505), 'gr.shadepoints', 'gr.shadepoints', (['x', 'y'], {'xform': 'xform'}), '(x, y, xform=xform)\n', (116486, 116505), False, 'import gr\n')] |
import numpy as np
from copy import deepcopy
from math import ceil, modf
from scipy.integrate import odeint
dWdN = 1e6 * 24 * 3600 / (1.23 / 235)
class Calculate():
def __init__(self, data, settings):
self.DATA = data
self.FUEL = {}
for nuclide, weight_ratio in settings['Initial'].items():
self.FUEL[nuclide] = weight_ratio / (int(nuclide[0:3]) * 1e-3)
self.DT = settings['DT'] * 24 * 60 * 60
self.METHOD = settings['Method']
if self.METHOD == 'Flux':
self.phi = settings['Flux'] * 1e-24
else:
self.POWER = settings['Power'] * 1e6
self.POWER_MAX = self.POWER * 100
self.GENEALOGY = {}
def find_parents(row):
if row[2] == 0:
self.GENEALOGY[row[1]] = []
elif row[1] not in self.GENEALOGY.keys():
parent_row = self.DATA[self.DATA[:, 0].tolist().index(row[2])]
if parent_row[1] in self.GENEALOGY.keys():
self.GENEALOGY[row[1]] = self.GENEALOGY[parent_row[1]] + [parent_row[1]]
else:
self.GENEALOGY[row[1]] = find_parents(parent_row) + [parent_row[1]]
return self.GENEALOGY[row[1]]
for nuclide in self.FUEL.keys():
find_parents(self.DATA[self.DATA[:, 1].tolist().index(nuclide)])
def Cal_A(self, row):
if row[4] == 'Neutron Absorbtion':
return -self.phi*(float(row[5]) + float(row[6]))
elif row[4] == 'Neutron Absorbtion and Beta Decay':
return -self.phi*(float(row[5]) + float(row[6])) - float(row[7])
def Cal_A_(self, parent_row, tf_type):
if tf_type == 'Neutron Absorbtion':
return self.phi*float(parent_row[6])
elif tf_type == 'Beta Decay':
return float(parent_row[7])
def power2phi(self, fuel_current):
SUM_N = 0
for nuclide in self.DATA:
SUM_N += nuclide[5] * fuel_current[nuclide[1]]
if SUM_N == 0:
return None
else:
power = self.POWER / (SUM_N * dWdN)
return power if power < self.POWER_MAX else None
class Analytic(Calculate):
def __init__(self, data, setting):
super().__init__(data, setting)
def main(self, t, precision=1e-3):
self.numerical_result = {}
self.analytic_result = []
self.t_result = None
self.time_sequence = np.array([])
fuel_current = deepcopy(self.FUEL)
if self.METHOD == 'Power':
self.numerical_result['Power'] = self.POWER * 1e-6
steps = int(t / self.DT) + 1
t = modf(t / self.DT)[0]
time_sequence_DT = np.linspace(
0, self.DT, num=ceil(1/precision), endpoint=True)
for index in range(steps):
self.phi = self.power2phi(fuel_current) if self.METHOD == 'Power' else self.phi
if self.phi is None:
break
result_A, result_C, fuel_current = self.step(fuel_current, time_sequence_DT)
self.analytic_result.append({
'time': {
'start': index * self.DT,
'end': index * self.DT
},
'A_ii': result_A,
'C_ij': result_C
})
self.time_sequence = np.hstack((self.time_sequence, time_sequence_DT + index * self.DT))
def step(self, fuel_current, time_sequence):
result_A = {}
result_C = {}
fuel_new = {}
def Calculate_A_and_C(row):
if row[1] in result_A.keys() and row[1] in result_C.keys():
pass
elif row[2] == 0:
result_A[row[1]] = self.Cal_A(row)
result_C[row[1]] = {
row[1]: fuel_current[row[1]]
}
else:
parent_row = self.DATA[self.DATA[:, 0].tolist().index(row[2])]
Calculate_A_and_C(parent_row)
result_A[row[1]] = self.Cal_A(row)
A_ = self.Cal_A_(parent_row, row[3])
result_C[row[1]] = {
row[1]: fuel_current[row[1]]
}
for nuclide in self.GENEALOGY[row[1]]:
result_C[row[1]][nuclide] = A_ * result_C[parent_row[1]][nuclide] / (result_A[nuclide] - result_A[row[1]])
result_C[row[1]][row[1]] -= result_C[row[1]][nuclide]
for row in self.DATA:
N_sequence = np.zeros(len(time_sequence))
fuel_new[row[1]] = 0
Calculate_A_and_C(row)
for nuclide, C_ij in result_C[row[1]].items():
N_sequence += C_ij * np.exp(result_A[nuclide] * time_sequence)
fuel_new[row[1]] += C_ij * np.exp(result_A[nuclide] * self.DT)
if row[1] in self.numerical_result.keys():
self.numerical_result[row[1]] = np.hstack((self.numerical_result[row[1]], N_sequence))
else:
self.numerical_result[row[1]] = N_sequence
return result_A, result_C, fuel_new
class Numerical(Calculate):
def __init__(self, data, setting):
super().__init__(data, setting)
def main(self, t, precision=1e-3):
self.numerical_result = {}
self.analytic_result = []
self.t_result = None
self.time_sequence = np.array([])
fuel_current = deepcopy(self.FUEL)
if self.METHOD == 'Power':
self.numerical_result['Power'] = self.POWER * 1e-6
steps = int(t / self.DT) + 1
t = modf(t / self.DT)[0]
time_sequence_DT = np.linspace(
0, self.DT, num=ceil(1/precision), endpoint=True)
for index in range(steps):
self.phi = self.power2phi(fuel_current) if self.METHOD == 'Power' else self.phi
if self.phi is None:
break
fuel_current = self.step(fuel_current, time_sequence_DT)
self.time_sequence = np.hstack((self.time_sequence, time_sequence_DT + index * self.DT))
def step(self, fuel_current, time_sequence):
fuel_new = {}
fuel_current = [fuel_current[nuclide] for nuclide in self.DATA[:, 1]]
def Calculate(N, t):
dNdt = []
for n_id, n in enumerate(N):
row = self.DATA[n_id]
if row[2] == 0:
dNdt.append(self.Cal_A(row) * n)
else:
parent_row_id = self.DATA[:, 0].tolist().index(row[2])
parent_row = self.DATA[parent_row_id]
parent_n = N[parent_row_id]
dNdt.append(self.Cal_A_(parent_row, row[3]) * parent_n + self.Cal_A(row) * n)
return dNdt
sol = odeint(Calculate, fuel_current, time_sequence)
for nuclide_id, nuclide in enumerate(self.DATA[:, 1]):
N_sequence = sol[:, nuclide_id]
if nuclide in self.numerical_result.keys():
self.numerical_result[nuclide] = np.hstack((self.numerical_result[nuclide], N_sequence))
else:
self.numerical_result[nuclide] = N_sequence
fuel_new[nuclide] = N_sequence[-1]
return fuel_new
| [
"copy.deepcopy",
"math.ceil",
"scipy.integrate.odeint",
"math.modf",
"numpy.hstack",
"numpy.array",
"numpy.exp"
] | [((2462, 2474), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2470, 2474), True, 'import numpy as np\n'), ((2498, 2517), 'copy.deepcopy', 'deepcopy', (['self.FUEL'], {}), '(self.FUEL)\n', (2506, 2517), False, 'from copy import deepcopy\n'), ((5413, 5425), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5421, 5425), True, 'import numpy as np\n'), ((5449, 5468), 'copy.deepcopy', 'deepcopy', (['self.FUEL'], {}), '(self.FUEL)\n', (5457, 5468), False, 'from copy import deepcopy\n'), ((6832, 6878), 'scipy.integrate.odeint', 'odeint', (['Calculate', 'fuel_current', 'time_sequence'], {}), '(Calculate, fuel_current, time_sequence)\n', (6838, 6878), False, 'from scipy.integrate import odeint\n'), ((2679, 2696), 'math.modf', 'modf', (['(t / self.DT)'], {}), '(t / self.DT)\n', (2683, 2696), False, 'from math import ceil, modf\n'), ((3377, 3444), 'numpy.hstack', 'np.hstack', (['(self.time_sequence, time_sequence_DT + index * self.DT)'], {}), '((self.time_sequence, time_sequence_DT + index * self.DT))\n', (3386, 3444), True, 'import numpy as np\n'), ((5630, 5647), 'math.modf', 'modf', (['(t / self.DT)'], {}), '(t / self.DT)\n', (5634, 5647), False, 'from math import ceil, modf\n'), ((6050, 6117), 'numpy.hstack', 'np.hstack', (['(self.time_sequence, time_sequence_DT + index * self.DT)'], {}), '((self.time_sequence, time_sequence_DT + index * self.DT))\n', (6059, 6117), True, 'import numpy as np\n'), ((2768, 2787), 'math.ceil', 'ceil', (['(1 / precision)'], {}), '(1 / precision)\n', (2772, 2787), False, 'from math import ceil, modf\n'), ((4956, 5010), 'numpy.hstack', 'np.hstack', (['(self.numerical_result[row[1]], N_sequence)'], {}), '((self.numerical_result[row[1]], N_sequence))\n', (4965, 5010), True, 'import numpy as np\n'), ((5719, 5738), 'math.ceil', 'ceil', (['(1 / precision)'], {}), '(1 / precision)\n', (5723, 5738), False, 'from math import ceil, modf\n'), ((7091, 7146), 'numpy.hstack', 'np.hstack', (['(self.numerical_result[nuclide], N_sequence)'], {}), '((self.numerical_result[nuclide], N_sequence))\n', (7100, 7146), True, 'import numpy as np\n'), ((4732, 4773), 'numpy.exp', 'np.exp', (['(result_A[nuclide] * time_sequence)'], {}), '(result_A[nuclide] * time_sequence)\n', (4738, 4773), True, 'import numpy as np\n'), ((4817, 4852), 'numpy.exp', 'np.exp', (['(result_A[nuclide] * self.DT)'], {}), '(result_A[nuclide] * self.DT)\n', (4823, 4852), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pcap, socket, struct
import numpy as np
from cymru import *
'''open a lot of pcaps at once, using a number of worker processes'''
def inspect_pcaps(pcaps, processes=1):
from multiprocessing import Pool
p = Pool(processes)
flows = p.map(flow, pcaps)
return flows
'''retrieves a 4-tupel identifying a tcp/ip connection'''
def _get_flow_tuple(ip_pkt):
s_ip = ip_pkt[12:16]
d_ip = ip_pkt[16:20]
ihl = ord(ip_pkt[0]) % 16
tcp = ip_pkt[ihl*4:]
s_port, d_port = struct.unpack("!HH", tcp[:4])
return (s_ip, s_port, d_ip, d_port)
'''returns a bytes/s array for the given data in the interval [start_ts, end_ts] with a given sampling_frequency'''
def computeTrafficGraph(data, start_ts, end_ts, sampling_frequency=1):
duration = end_ts - start_ts
if end_ts == float("inf"): duration = data[-1][0] - start_ts
traffic = [0] * int(duration * sampling_frequency + 3)
for ts, length, dscp in data:
traffic[int((ts-start_ts) * sampling_frequency + 1)] += length * sampling_frequency
return traffic
'''returns a number of gaps for the given traffic graph'''
def computeGapCount(traffic):
gaps = 0
inGap = True
for i, t in enumerate(traffic):
# entering gap
if not inGap and t == 0:
inGap = True
gaps += 1
# leaving gap
if inGap and t > 0:
inGap = False
return gaps
def loadLabels(path):
import csv
labels = {}
with open(path) as csvfile:
reader = csv.DictReader(csvfile, delimiter=";", quotechar='"')
for row in reader:
labels[row["filename"]] = row["label"]
return labels
'''returns standart metrics for the given data.'''
def computeMetrics(data, start_ts, end_ts):
metrics = {}
if len(data) == 0:
metrics["packets"] = 0
metrics["bytes"] = 0
metrics["bytes_avg"] = 0
metrics["bytes_std"] = 1
metrics["iat_avg"] = 0
metrics["iat_std"] = 1
metrics["traffic_avg"] = 0
metrics["traffic_std"] = 1
metrics["dscp_median"] = 0
return metrics
lengths = zip(*data)[1]
dscps = zip(*data)[2]
traffic = computeTrafficGraph(data, start_ts, end_ts)
metrics["packets"] = len(lengths)
metrics["bytes"] = sum(lengths)
metrics["bytes_avg"] = np.average(lengths)
metrics["bytes_std"] = np.std(lengths)
interarrival = []
last = 0.0
for ts in zip(*data)[0][1:]:
interarrival.append(ts-last)
last = ts
if len(interarrival) == 0:
metrics["iat_avg"] = end_ts-start_ts
metrics["iat_std"] = 0
else:
metrics["iat_avg"] = np.average(interarrival)
metrics["iat_std"] = np.std(interarrival)
metrics["traffic_avg"] = np.average(traffic)
metrics["traffic_std"] = np.std(traffic)
metrics["dscp_median"] = int(np.median(dscps))
return metrics
'''representation of relevant information inside a tcp-flow'''
class flow:
'''Creation of a flow object from a provided pcap-flow file.'''
def __init__(self, filename):
self._filename = filename
# read file and save basic information
self._readPcap(filename)
# compute basic metrics
self.fmetric = computeMetrics(self.forward, 0.0, self.duration)
self.bmetric = computeMetrics(self.backward, 0.0, self.duration)
def _readPcap(self, filename):
thePcap = pcap.pcap(filename)
self.forward = []
self.backward = []
ts, pkt = thePcap.next()
ip = pkt[14:]
dscp = ord(ip[1]) / 4
# set flow tupel and cap start
self.srcip, self.srcport, self.dstip, self.dstport = _get_flow_tuple(ip)
self.start = ts
self.end = ts
# add syn packet
self.forward.append((0.0, len(ip), dscp))
for ts, pkt in thePcap:
# skip over malformed packets
if len(pkt) < 14+20: continue
if ts < self.start: continue
# set the latest packet as a potential end
self.end = ts
ip = pkt[14:]
dscp = ord(ip[1]) / 4
srcip = _get_flow_tuple(ip)[0]
if srcip == self.srcip:
self.forward.append((ts-self.start, len(ip), dscp))
else:
self.backward.append((ts-self.start, len(ip), dscp))
del(thePcap)
self.duration = self.end - self.start
self.proto = struct.unpack("!B", ip[9:10])[0]
INTERVALS = [0]+[2**x for x in range(12)]
FEATURE_ORDER = ["packets","bytes","bytes_avg","bytes_std","iat_avg","iat_std","traffic_avg","traffic_std","dscp_median"]
'''returns metrics for all defined flow.INTERVALS'''
def _getIntervalMetrics(self, cumulative):
'''maps flow.INTERVALS to the provided data indices'''
def __mapDataToInterval(data):
index_list = []
interval_index = 0
data_index = 0
# add all indices, that relate to an existing timesstamp
# either the the last ts is only < float("Inf") -> done
for data_index, (ts, _, _) in enumerate(data):
while ts >= flow.INTERVALS[interval_index]:
index_list.append(data_index)
if interval_index < len(flow.INTERVALS)-1:
interval_index += 1
else:
break
# or the last ts is eg < 16 -> added here (and padded with empty flows)
for interval_index in range(interval_index, len(flow.INTERVALS)):
index_list.append(data_index)
return index_list
'''compute the halfflow metrics for flow.INTERVALS and the provided data.'''
def __halfflowMetrics(data):
index_list = __mapDataToInterval(data)
metrics = []
for i in range(len(flow.INTERVALS)-1):
if cumulative:
m = computeMetrics(data[:index_list[i+1]], 0.0, flow.INTERVALS[i+1])
else:
m = computeMetrics(data[index_list[i]:index_list[i+1]], flow.INTERVALS[i], flow.INTERVALS[i+1])
metrics.append(m)
if len(metrics) != len(flow.INTERVALS)-1: raise Exception("At least one interval is missing in the metrics!")
return metrics
fmetrics = __halfflowMetrics(self.forward)
bmetrics = __halfflowMetrics(self.backward)
return (fmetrics, bmetrics)
@staticmethod
def getAutoencoderMetricsVectorHeader():
out = []
for direction in ["f","b"]:
for ts in flow.INTERVALS[1:]:
out += ["{}_{}_{}".format(ts, direction, attr) for attr in flow.FEATURE_ORDER]
return out
@staticmethod
def getAutoencoderMetricsVectorLength():
return len(flow.getAutoencoderMetricsVectorHeader())
'''returns a vector of the concatenated interval metrics'''
def getAutoencoderMetricsVector(self, cumulative=True):
fmetrics, bmetrics = self._getIntervalMetrics(cumulative)
out = []
for metric in fmetrics+bmetrics:
out += [metric[f] for f in flow.FEATURE_ORDER]
return out
@staticmethod
def getCSVHeader(additionals=[]):
return "duration_ms,f_bytes,b_bytes,f_packets,b_packets,proto,as_name,fb_ratio,f_port,b_port,f_bps,b_bps,filename,f_dscp,b_dscp,f_iat_avg,f_iat_std,b_iat_avg,b_iat_std,f_gaps,b_gaps".split(",") + sorted(additionals)
def getCSVRepr(self, additionals={}):
asname = self.as_name if hasattr(self, 'as_name') else ""
try: fb_ratio = float(self.fmetric["bytes"])/float(self.bmetric["bytes"])
except ZeroDivisionError: fb_ratio = "NaN"
try: b_bps = int(float(self.bmetric["bytes"])/float(self.duration))
except ZeroDivisionError: b_bps = "NaN"
try: f_bps = int(float(self.fmetric["bytes"])/float(self.duration))
except ZeroDivisionError: f_bps = "NaN"
f_gaps, b_gaps = self.defaultGapCounts()
return [
int(self.duration*1000),
self.fmetric["bytes"],
self.bmetric["bytes"],
self.fmetric["packets"],
self.bmetric["packets"],
self.proto,
asname,
fb_ratio,
self.srcport,
self.dstport,
f_bps,
b_bps,
self._filename,
self.fmetric["dscp_median"],
self.bmetric["dscp_median"],
self.fmetric["iat_avg"],
self.fmetric["iat_std"],
self.bmetric["iat_avg"],
self.bmetric["iat_std"],
f_gaps,
b_gaps
] + [v for k,v in sorted(additionals.items())]
'''lookup ASN information for the flow.'''
def lookupASN(self):
asn = lookup(socket.inet_ntoa(self.dstip))
if asn:
try: self.asn = int(asn["AS"])
except ValueError: pass
self.as_name = asn["AS Name"]
self.bgp_prefix = asn["BGP Prefix"]
def __repr__(self, header=False):
out = []
out.append("{}:{} -> {}:{}".format(socket.inet_ntoa(self.srcip), self.srcport, socket.inet_ntoa(self.dstip), self.dstport))
out.append("forward metrics: {}".format(str(self.fmetric)))
out.append("backward metrics: {}".format(str(self.bmetric)))
if hasattr(self, "asn"): out.append("asn: {} - {} [{}]".format(self.asn, self.bgp_prefix, self.as_name))
return "flow({}\n {})".format(out[0], "\n ".join(out[1:]))
'''show a plot of the traffic graphs'''
def show(self, scale="linear", show=True):
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
fig = plt.figure()
sampling_frequency = 1.0
x_axis = np.arange(0, int(self.duration + 20), 1.0/sampling_frequency)
forward_traffic = computeTrafficGraph(self.forward, 0.0, self.duration, sampling_frequency)
backward_traffic = computeTrafficGraph(self.backward, 0.0, self.duration, sampling_frequency)
ax1 = fig.add_subplot(111)
ax1.plot(x_axis[:len(forward_traffic)], forward_traffic, lw=2, c="red")
ax1.plot(x_axis[:len(backward_traffic)], backward_traffic, lw=2, c="blue")
ax2 = ax1.twinx()
ax2.scatter([t for t, s, _ in self.forward], [s for t, s, _ in self.forward], alpha=0.3, c="red")
ax2.scatter([t for t, s, _ in self.backward], [s for t, s, _ in self.backward], alpha=0.3, c="blue")
if show:
plt.legend(handles = [mpatches.Patch(color='red', label='forward'), mpatches.Patch(color='blue', label='backward')])
ax1.set_xlabel('time (s)')
ax1.set_ylabel('traffic (bytes/s)')
ax2.set_ylabel('packet size (byte)')
ax1.set_ylim(bottom=0)
ax2.set_ylim(bottom=0)
ax1.set_xlim(left=0, right=(len(forward_traffic)-1)/sampling_frequency)
plt.yscale(scale)
if show:
plt.show(block=False)
else:
return plt
def defaultGapCounts(self):
forward_traffic = computeTrafficGraph(self.forward, 0.0, self.duration)
backward_traffic = computeTrafficGraph(self.backward, 0.0, self.duration)
return (computeGapCount(forward_traffic), computeGapCount(backward_traffic))
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="load flow-pcap and write out basic information")
parser.add_argument("-q", "--quiet", action="store_true", help="don't show the bandwidth graph")
parser.add_argument("pcap", help="flow pcap file")
args = parser.parse_args()
testflow = flow(args.pcap)
testflow.lookupASN()
print(testflow)
print(testflow.getCSVRepr())
testflow.show()
raw_input("Press Enter to exit...")
| [
"matplotlib.pyplot.yscale",
"numpy.average",
"argparse.ArgumentParser",
"pcap.pcap",
"matplotlib.pyplot.show",
"numpy.std",
"csv.DictReader",
"numpy.median",
"struct.unpack",
"matplotlib.pyplot.figure",
"multiprocessing.Pool",
"matplotlib.patches.Patch",
"socket.inet_ntoa"
] | [((268, 283), 'multiprocessing.Pool', 'Pool', (['processes'], {}), '(processes)\n', (272, 283), False, 'from multiprocessing import Pool\n'), ((551, 580), 'struct.unpack', 'struct.unpack', (['"""!HH"""', 'tcp[:4]'], {}), "('!HH', tcp[:4])\n", (564, 580), False, 'import pcap, socket, struct\n'), ((2403, 2422), 'numpy.average', 'np.average', (['lengths'], {}), '(lengths)\n', (2413, 2422), True, 'import numpy as np\n'), ((2450, 2465), 'numpy.std', 'np.std', (['lengths'], {}), '(lengths)\n', (2456, 2465), True, 'import numpy as np\n'), ((2851, 2870), 'numpy.average', 'np.average', (['traffic'], {}), '(traffic)\n', (2861, 2870), True, 'import numpy as np\n'), ((2900, 2915), 'numpy.std', 'np.std', (['traffic'], {}), '(traffic)\n', (2906, 2915), True, 'import numpy as np\n'), ((11659, 11749), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""load flow-pcap and write out basic information"""'}), "(description=\n 'load flow-pcap and write out basic information')\n", (11682, 11749), False, 'import argparse\n'), ((1574, 1627), 'csv.DictReader', 'csv.DictReader', (['csvfile'], {'delimiter': '""";"""', 'quotechar': '"""\\""""'}), '(csvfile, delimiter=\';\', quotechar=\'"\')\n', (1588, 1627), False, 'import csv\n'), ((2742, 2766), 'numpy.average', 'np.average', (['interarrival'], {}), '(interarrival)\n', (2752, 2766), True, 'import numpy as np\n'), ((2796, 2816), 'numpy.std', 'np.std', (['interarrival'], {}), '(interarrival)\n', (2802, 2816), True, 'import numpy as np\n'), ((2954, 2970), 'numpy.median', 'np.median', (['dscps'], {}), '(dscps)\n', (2963, 2970), True, 'import numpy as np\n'), ((3522, 3541), 'pcap.pcap', 'pcap.pcap', (['filename'], {}), '(filename)\n', (3531, 3541), False, 'import pcap, socket, struct\n'), ((9998, 10010), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10008, 10010), True, 'import matplotlib.pyplot as plt\n'), ((11212, 11229), 'matplotlib.pyplot.yscale', 'plt.yscale', (['scale'], {}), '(scale)\n', (11222, 11229), True, 'import matplotlib.pyplot as plt\n'), ((4596, 4625), 'struct.unpack', 'struct.unpack', (['"""!B"""', 'ip[9:10]'], {}), "('!B', ip[9:10])\n", (4609, 4625), False, 'import pcap, socket, struct\n'), ((9064, 9092), 'socket.inet_ntoa', 'socket.inet_ntoa', (['self.dstip'], {}), '(self.dstip)\n', (9080, 9092), False, 'import pcap, socket, struct\n'), ((11259, 11280), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (11267, 11280), True, 'import matplotlib.pyplot as plt\n'), ((9386, 9414), 'socket.inet_ntoa', 'socket.inet_ntoa', (['self.srcip'], {}), '(self.srcip)\n', (9402, 9414), False, 'import pcap, socket, struct\n'), ((9430, 9458), 'socket.inet_ntoa', 'socket.inet_ntoa', (['self.dstip'], {}), '(self.dstip)\n', (9446, 9458), False, 'import pcap, socket, struct\n'), ((10843, 10887), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': '"""red"""', 'label': '"""forward"""'}), "(color='red', label='forward')\n", (10857, 10887), True, 'import matplotlib.patches as mpatches\n'), ((10889, 10935), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': '"""blue"""', 'label': '"""backward"""'}), "(color='blue', label='backward')\n", (10903, 10935), True, 'import matplotlib.patches as mpatches\n')] |
import numpy as np
from PIL import Image
import cv2
import sys
def output_to_image(net_input, output, name, path):
l = net_input.cpu().detach().numpy()[0][0] # l channel
a = output.cpu().detach().numpy()[0][0] # a channel
b = output.cpu().detach().numpy()[0][1] # b channel
mapped_l_channel = [[0 for x in range(np.size(l, 1))] for x in range(np.size(l, 0))]
mapped_a_channel = [[0 for x in range(np.size(a, 1))] for x in range(np.size(a, 0))]
mapped_b_channel = [[0 for x in range(np.size(b, 1))] for x in range(np.size(b, 0))]
for j in range(np.size(l, 1)):
for i in range(np.size(l, 0)):
mapped_l_channel[i][j] = (((l[i][j] + 1.0) * (255.0)) / (2.0))
for j in range(np.size(a, 1)):
for i in range(np.size(a, 0)):
mapped_a_channel[i][j] = (((a[i][j] + 1.0) * (255.0)) / 2.0)
for j in range(np.size(b, 1)):
for i in range(np.size(b, 0)):
mapped_b_channel[i][j] = (((b[i][j] + 1.0) * (255.0)) / 2.0)
merged_channels = cv2.merge((np.array(mapped_l_channel, dtype=np.uint8), np.array(mapped_a_channel, dtype=np.uint8),
np.array(mapped_b_channel, dtype=np.uint8)))
final_image = cv2.cvtColor(merged_channels, cv2.COLOR_LAB2RGB)
# Image.fromarray(final_image).save(f"../images/output/{now.strftime('%d-%m-%Y %H:%M:%S')}-lab-img{index}.tiff")
Image.fromarray(final_image).save(f"{path}/{name}.png")
del l
del a
del b
mapped_l_channel.clear()
del mapped_l_channel[:]
mapped_a_channel.clear()
del mapped_a_channel[:]
mapped_b_channel.clear()
del mapped_b_channel[:]
del merged_channels
| [
"cv2.cvtColor",
"numpy.size",
"numpy.array",
"PIL.Image.fromarray"
] | [((1223, 1271), 'cv2.cvtColor', 'cv2.cvtColor', (['merged_channels', 'cv2.COLOR_LAB2RGB'], {}), '(merged_channels, cv2.COLOR_LAB2RGB)\n', (1235, 1271), False, 'import cv2\n'), ((579, 592), 'numpy.size', 'np.size', (['l', '(1)'], {}), '(l, 1)\n', (586, 592), True, 'import numpy as np\n'), ((729, 742), 'numpy.size', 'np.size', (['a', '(1)'], {}), '(a, 1)\n', (736, 742), True, 'import numpy as np\n'), ((877, 890), 'numpy.size', 'np.size', (['b', '(1)'], {}), '(b, 1)\n', (884, 890), True, 'import numpy as np\n'), ((618, 631), 'numpy.size', 'np.size', (['l', '(0)'], {}), '(l, 0)\n', (625, 631), True, 'import numpy as np\n'), ((768, 781), 'numpy.size', 'np.size', (['a', '(0)'], {}), '(a, 0)\n', (775, 781), True, 'import numpy as np\n'), ((916, 929), 'numpy.size', 'np.size', (['b', '(0)'], {}), '(b, 0)\n', (923, 929), True, 'import numpy as np\n'), ((1039, 1081), 'numpy.array', 'np.array', (['mapped_l_channel'], {'dtype': 'np.uint8'}), '(mapped_l_channel, dtype=np.uint8)\n', (1047, 1081), True, 'import numpy as np\n'), ((1083, 1125), 'numpy.array', 'np.array', (['mapped_a_channel'], {'dtype': 'np.uint8'}), '(mapped_a_channel, dtype=np.uint8)\n', (1091, 1125), True, 'import numpy as np\n'), ((1160, 1202), 'numpy.array', 'np.array', (['mapped_b_channel'], {'dtype': 'np.uint8'}), '(mapped_b_channel, dtype=np.uint8)\n', (1168, 1202), True, 'import numpy as np\n'), ((1394, 1422), 'PIL.Image.fromarray', 'Image.fromarray', (['final_image'], {}), '(final_image)\n', (1409, 1422), False, 'from PIL import Image\n'), ((365, 378), 'numpy.size', 'np.size', (['l', '(0)'], {}), '(l, 0)\n', (372, 378), True, 'import numpy as np\n'), ((454, 467), 'numpy.size', 'np.size', (['a', '(0)'], {}), '(a, 0)\n', (461, 467), True, 'import numpy as np\n'), ((543, 556), 'numpy.size', 'np.size', (['b', '(0)'], {}), '(b, 0)\n', (550, 556), True, 'import numpy as np\n'), ((334, 347), 'numpy.size', 'np.size', (['l', '(1)'], {}), '(l, 1)\n', (341, 347), True, 'import numpy as np\n'), ((423, 436), 'numpy.size', 'np.size', (['a', '(1)'], {}), '(a, 1)\n', (430, 436), True, 'import numpy as np\n'), ((512, 525), 'numpy.size', 'np.size', (['b', '(1)'], {}), '(b, 1)\n', (519, 525), True, 'import numpy as np\n')] |
from dataloader import colmap_loader
import os
import sys
import argparse
import numpy as np
import cv2
import math
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
parser = argparse.ArgumentParser()
parser.add_argument('seq_path', help='the path to the sequence of images.')
parser.add_argument('--input_views', help='comma-separated list of the indices in the sequence to use as inputs')
args = parser.parse_args()
input_views = []
if args.input_views is not None:
input_views = [int(i) for i in args.input_views.split(',')]
print(args)
img_list, list_cam_params \
= colmap_loader.COLMAPData.extract_data_to_param(args.seq_path)
#if len(input_views) > 0:
# list_img = [list_img[i] for i in input_views]
# list_depth = [list_depth[i] for i in input_views]
# list_cam_params = [list_cam_params[i] for i in input_views]
#print('list_img', list_img)
#print('list_depth', list_depth)
fig = plt.figure(1)
ax = fig.gca(projection='3d')
np.set_printoptions(formatter={'float_kind': lambda x: "{0:0.3f}".format(x)})
X = []
Y = []
Z = []
ext = []
R = np.zeros((3,len(list_cam_params)))
T = np.zeros((3,len(list_cam_params)))
for i in range(len(list_cam_params)):
x = list_cam_params[i]['extrinsic'][0][3]
y = list_cam_params[i]['extrinsic'][1][3]
z = list_cam_params[i]['extrinsic'][2][3]
X.append(x)
Y.append(y)
Z.append(z)
print(img_list[i])
print('intrinsic')
print(list_cam_params[i]['intrinsic'])
print('extrinsic')
print(list_cam_params[i]['extrinsic'])
ext_array = list_cam_params[i]['extrinsic']
ext_array = ext_array.reshape(4,4)
rotation = ext_array[0:3, 0:3]
position = ext_array[0:3, 3]
vec = np.array([0., 0., 0.])
cv2.Rodrigues(rotation, vec, jacobian=None)
norm = math.sqrt(vec[0]*vec[0] + vec[1]*vec[1] + vec[2]*vec[2])
vec = vec/norm *0.1
R[:,i] = vec
T[:,i] = position
print(R.transpose())
print(T.transpose())
# ax.scatter(X, Y, Z)
ax.quiver(T[0,:], T[1,:], T[2,:], T[0,:]+ R[0,:], T[1,:] + R[1,:], T[2,:]+ R[2,:], length = 0.5, normalize = True)
for i, txt in enumerate(img_list):
#label = '(%d, %d, %d), text=%s' % (X[i], Y[i], Z[i], txt)
#ax.text(X[i], Y[i], Z[i], txt, 'x')
ax.text(X[i], Y[i], Z[i], i, 'x')
#ax.annotate(txt, (X[i], Y[i]))
plt.show()
# print('list_cam_params', list_cam_params)
print(len(list_cam_params))
| [
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"dataloader.colmap_loader.COLMAPData.extract_data_to_param",
"math.sqrt",
"matplotlib.pyplot.figure",
"cv2.Rodrigues",
"numpy.array"
] | [((200, 225), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (223, 225), False, 'import argparse\n'), ((604, 665), 'dataloader.colmap_loader.COLMAPData.extract_data_to_param', 'colmap_loader.COLMAPData.extract_data_to_param', (['args.seq_path'], {}), '(args.seq_path)\n', (650, 665), False, 'from dataloader import colmap_loader\n'), ((935, 948), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (945, 948), True, 'import matplotlib.pyplot as plt\n'), ((2244, 2254), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2252, 2254), True, 'import matplotlib.pyplot as plt\n'), ((1667, 1692), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (1675, 1692), True, 'import numpy as np\n'), ((1692, 1735), 'cv2.Rodrigues', 'cv2.Rodrigues', (['rotation', 'vec'], {'jacobian': 'None'}), '(rotation, vec, jacobian=None)\n', (1705, 1735), False, 'import cv2\n'), ((1744, 1806), 'math.sqrt', 'math.sqrt', (['(vec[0] * vec[0] + vec[1] * vec[1] + vec[2] * vec[2])'], {}), '(vec[0] * vec[0] + vec[1] * vec[1] + vec[2] * vec[2])\n', (1753, 1806), False, 'import math\n')] |
"""Unit tests for radar_plotting.py."""
import unittest
import numpy
from gewittergefahr.gg_utils import radar_utils
from gewittergefahr.deep_learning import input_examples
from gewittergefahr.plotting import radar_plotting
METRES_TO_KM = 0.001
# The following constants are used to test layer_operations_to_names.
THESE_FIELD_NAMES = (
[radar_utils.REFL_NAME] * 3 +
[radar_utils.SPECTRUM_WIDTH_NAME] * 3 +
[radar_utils.VORTICITY_NAME] * 3 +
[radar_utils.VORTICITY_NAME] * 3
)
LAYER_OPERATION_NAMES = [
input_examples.MIN_OPERATION_NAME, input_examples.MEAN_OPERATION_NAME,
input_examples.MAX_OPERATION_NAME
] * 4
MIN_HEIGHTS_M_AGL = numpy.array(
[1000] * 3 + [1000] * 3 + [2000] * 3 + [5000] * 3,
dtype=int
)
MAX_HEIGHTS_M_AGL = numpy.array(
[3000] * 3 + [3000] * 3 + [4000] * 3 + [8000] * 3,
dtype=int
)
FIELD_NAMES_WITH_LAYER_OPS = THESE_FIELD_NAMES + []
NUM_LAYER_OPERATIONS = len(THESE_FIELD_NAMES)
LIST_OF_LAYER_OPERATION_DICTS = [{}] * NUM_LAYER_OPERATIONS
for k in range(NUM_LAYER_OPERATIONS):
LIST_OF_LAYER_OPERATION_DICTS[k] = {
input_examples.RADAR_FIELD_KEY: THESE_FIELD_NAMES[k],
input_examples.OPERATION_NAME_KEY: LAYER_OPERATION_NAMES[k],
input_examples.MIN_HEIGHT_KEY: MIN_HEIGHTS_M_AGL[k],
input_examples.MAX_HEIGHT_KEY: MAX_HEIGHTS_M_AGL[k]
}
PANEL_NAMES_WITH_OPS_SANS_UNITS = [
'Reflectivity\nMIN from 1-3 km AGL',
'Reflectivity\nMEAN from 1-3 km AGL',
'Reflectivity\nMAX from 1-3 km AGL',
'Spectrum width\nMIN from 1-3 km AGL',
'Spectrum width\nMEAN from 1-3 km AGL',
'Spectrum width\nMAX from 1-3 km AGL',
'Vorticity\nMIN from 2-4 km AGL',
'Vorticity\nMEAN from 2-4 km AGL',
'Vorticity\nMAX from 2-4 km AGL',
'Vorticity\nMIN from 5-8 km AGL',
'Vorticity\nMEAN from 5-8 km AGL',
'Vorticity\nMAX from 5-8 km AGL'
]
PANEL_NAMES_WITH_OPS_WITH_UNITS = [
'Reflectivity (dBZ)\nMIN from 1-3 km AGL',
'Reflectivity (dBZ)\nMEAN from 1-3 km AGL',
'Reflectivity (dBZ)\nMAX from 1-3 km AGL',
'Spectrum width (m s$^{-1}$)\nMIN from 1-3 km AGL',
'Spectrum width (m s$^{-1}$)\nMEAN from 1-3 km AGL',
'Spectrum width (m s$^{-1}$)\nMAX from 1-3 km AGL',
'Vorticity (ks$^{-1}$)\nMIN from 2-4 km AGL',
'Vorticity (ks$^{-1}$)\nMEAN from 2-4 km AGL',
'Vorticity (ks$^{-1}$)\nMAX from 2-4 km AGL',
'Vorticity (ks$^{-1}$)\nMIN from 5-8 km AGL',
'Vorticity (ks$^{-1}$)\nMEAN from 5-8 km AGL',
'Vorticity (ks$^{-1}$)\nMAX from 5-8 km AGL'
]
# The following constants are used to test
# fields_and_heights_to_names.
FIELD_NAME_BY_PAIR = [
radar_utils.REFL_NAME, radar_utils.SPECTRUM_WIDTH_NAME,
radar_utils.VORTICITY_NAME, radar_utils.REFL_NAME
]
HEIGHT_BY_PAIR_M_AGL = numpy.array([1000, 3000, 2000, 10000])
PANEL_NAMES_SANS_OPS_WITH_UNITS = [
'Reflectivity (dBZ)\nat 1 km AGL',
'Spectrum width (m s$^{-1}$)\nat 3 km AGL',
'Vorticity (ks$^{-1}$)\nat 2 km AGL',
'Reflectivity (dBZ)\nat 10 km AGL'
]
PANEL_NAMES_SANS_OPS_SANS_UNITS = [
'Reflectivity\nat 1 km AGL',
'Spectrum width\nat 3 km AGL',
'Vorticity\nat 2 km AGL',
'Reflectivity\nat 10 km AGL'
]
class RadarPlottingTests(unittest.TestCase):
"""Each method is a unit test for radar_plotting.py."""
def test_layer_operations_to_names_with_units(self):
"""Ensures correct output from layer_operations_to_names.
In this case, expecting panel names with units.
"""
these_field_names, these_panel_names = (
radar_plotting.layer_operations_to_names(
list_of_layer_operation_dicts=LIST_OF_LAYER_OPERATION_DICTS,
include_units=True)
)
self.assertTrue(these_field_names == FIELD_NAMES_WITH_LAYER_OPS)
self.assertTrue(these_panel_names == PANEL_NAMES_WITH_OPS_WITH_UNITS)
def test_layer_operations_to_names_sans_units(self):
"""Ensures correct output from layer_operations_to_names.
In this case, expecting panel names without units.
"""
these_field_names, these_panel_names = (
radar_plotting.layer_operations_to_names(
list_of_layer_operation_dicts=LIST_OF_LAYER_OPERATION_DICTS,
include_units=False)
)
self.assertTrue(these_field_names == FIELD_NAMES_WITH_LAYER_OPS)
self.assertTrue(these_panel_names == PANEL_NAMES_WITH_OPS_SANS_UNITS)
def test_fields_and_heights_to_names_with_units(self):
"""Ensures correctness of fields_and_heights_to_names.
In this case, expecting panel names with units.
"""
these_panel_names = radar_plotting.fields_and_heights_to_names(
field_names=FIELD_NAME_BY_PAIR,
heights_m_agl=HEIGHT_BY_PAIR_M_AGL, include_units=True)
self.assertTrue(these_panel_names == PANEL_NAMES_SANS_OPS_WITH_UNITS)
def test_fields_and_heights_to_names_sans_units(self):
"""Ensures correctness of fields_and_heights_to_names.
In this case, expecting panel names without units.
"""
these_panel_names = radar_plotting.fields_and_heights_to_names(
field_names=FIELD_NAME_BY_PAIR,
heights_m_agl=HEIGHT_BY_PAIR_M_AGL, include_units=False)
self.assertTrue(these_panel_names == PANEL_NAMES_SANS_OPS_SANS_UNITS)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"gewittergefahr.plotting.radar_plotting.fields_and_heights_to_names",
"gewittergefahr.plotting.radar_plotting.layer_operations_to_names",
"numpy.array"
] | [((663, 736), 'numpy.array', 'numpy.array', (['([1000] * 3 + [1000] * 3 + [2000] * 3 + [5000] * 3)'], {'dtype': 'int'}), '([1000] * 3 + [1000] * 3 + [2000] * 3 + [5000] * 3, dtype=int)\n', (674, 736), False, 'import numpy\n'), ((768, 841), 'numpy.array', 'numpy.array', (['([3000] * 3 + [3000] * 3 + [4000] * 3 + [8000] * 3)'], {'dtype': 'int'}), '([3000] * 3 + [3000] * 3 + [4000] * 3 + [8000] * 3, dtype=int)\n', (779, 841), False, 'import numpy\n'), ((2760, 2798), 'numpy.array', 'numpy.array', (['[1000, 3000, 2000, 10000]'], {}), '([1000, 3000, 2000, 10000])\n', (2771, 2798), False, 'import numpy\n'), ((5380, 5395), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5393, 5395), False, 'import unittest\n'), ((3538, 3664), 'gewittergefahr.plotting.radar_plotting.layer_operations_to_names', 'radar_plotting.layer_operations_to_names', ([], {'list_of_layer_operation_dicts': 'LIST_OF_LAYER_OPERATION_DICTS', 'include_units': '(True)'}), '(list_of_layer_operation_dicts=\n LIST_OF_LAYER_OPERATION_DICTS, include_units=True)\n', (3578, 3664), False, 'from gewittergefahr.plotting import radar_plotting\n'), ((4113, 4240), 'gewittergefahr.plotting.radar_plotting.layer_operations_to_names', 'radar_plotting.layer_operations_to_names', ([], {'list_of_layer_operation_dicts': 'LIST_OF_LAYER_OPERATION_DICTS', 'include_units': '(False)'}), '(list_of_layer_operation_dicts=\n LIST_OF_LAYER_OPERATION_DICTS, include_units=False)\n', (4153, 4240), False, 'from gewittergefahr.plotting import radar_plotting\n'), ((4652, 4786), 'gewittergefahr.plotting.radar_plotting.fields_and_heights_to_names', 'radar_plotting.fields_and_heights_to_names', ([], {'field_names': 'FIELD_NAME_BY_PAIR', 'heights_m_agl': 'HEIGHT_BY_PAIR_M_AGL', 'include_units': '(True)'}), '(field_names=FIELD_NAME_BY_PAIR,\n heights_m_agl=HEIGHT_BY_PAIR_M_AGL, include_units=True)\n', (4694, 4786), False, 'from gewittergefahr.plotting import radar_plotting\n'), ((5111, 5246), 'gewittergefahr.plotting.radar_plotting.fields_and_heights_to_names', 'radar_plotting.fields_and_heights_to_names', ([], {'field_names': 'FIELD_NAME_BY_PAIR', 'heights_m_agl': 'HEIGHT_BY_PAIR_M_AGL', 'include_units': '(False)'}), '(field_names=FIELD_NAME_BY_PAIR,\n heights_m_agl=HEIGHT_BY_PAIR_M_AGL, include_units=False)\n', (5153, 5246), False, 'from gewittergefahr.plotting import radar_plotting\n')] |
from sklearn.feature_selection import VarianceThreshold, SelectFromModel
from sklearn.svm import LinearSVC
import numpy as np
def remove_features(data, target, fn):
"""
Function that transforms the data for the selected knn algorithm
:param target:
:param fn:
:param k:
:param data:
:return:
"""
selected_data = []
if fn == 'variance':
sel = VarianceThreshold(threshold=(.1 * (1 - .8)))
selected_data = sel.fit_transform(data)
elif fn == 'L1':
lsvc = LinearSVC(C=0.01, penalty="l1", dual=False).fit(data, target)
model = SelectFromModel(lsvc, prefit=True)
selected_data = model.transform(data)
selected_t = np.transpose(selected_data)
data_t = np.transpose(data)
i = 0
kept_cols = []
removed_cols = []
for i, col in enumerate(data_t):
if col not in selected_t:
removed_cols.append(i)
else:
kept_cols.append(i)
return kept_cols, removed_cols
| [
"sklearn.svm.LinearSVC",
"sklearn.feature_selection.SelectFromModel",
"numpy.transpose",
"sklearn.feature_selection.VarianceThreshold"
] | [((698, 725), 'numpy.transpose', 'np.transpose', (['selected_data'], {}), '(selected_data)\n', (710, 725), True, 'import numpy as np\n'), ((739, 757), 'numpy.transpose', 'np.transpose', (['data'], {}), '(data)\n', (751, 757), True, 'import numpy as np\n'), ((392, 436), 'sklearn.feature_selection.VarianceThreshold', 'VarianceThreshold', ([], {'threshold': '(0.1 * (1 - 0.8))'}), '(threshold=0.1 * (1 - 0.8))\n', (409, 436), False, 'from sklearn.feature_selection import VarianceThreshold, SelectFromModel\n'), ((599, 633), 'sklearn.feature_selection.SelectFromModel', 'SelectFromModel', (['lsvc'], {'prefit': '(True)'}), '(lsvc, prefit=True)\n', (614, 633), False, 'from sklearn.feature_selection import VarianceThreshold, SelectFromModel\n'), ((521, 564), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'C': '(0.01)', 'penalty': '"""l1"""', 'dual': '(False)'}), "(C=0.01, penalty='l1', dual=False)\n", (530, 564), False, 'from sklearn.svm import LinearSVC\n')] |
import numpy as np
import torch
import argparse
def strToBool(value):
if value.lower() in {'false', 'f', '0', 'no', 'n'}:
return False
elif value.lower() in {'true', 't', '1', 'yes', 'y'}:
return True
raise ValueError(f'{value} is not a valid boolean value')
parser = argparse.ArgumentParser()
env_group = parser.add_argument_group('environment')
env_group.add_argument('--env', type=str, default='close_loop_block_picking', help='close_loop_block_pulling, close_loop_household_picking, close_loop_drawer_opening, close_loop_block_stacking, close_loop_house_building_1, close_loop_block_picking_corner')
env_group.add_argument('--simulator', type=str, default='pybullet')
env_group.add_argument('--robot', type=str, default='kuka')
env_group.add_argument('--max_episode_steps', type=int, default=100)
env_group.add_argument('--action_sequence', type=str, default='pxyzr')
env_group.add_argument('--random_orientation', type=strToBool, default=True)
env_group.add_argument('--num_processes', type=int, default=5)
env_group.add_argument('--num_eval_processes', type=int, default=5)
env_group.add_argument('--render', type=strToBool, default=False)
env_group.add_argument('--workspace_size', type=float, default=0.4)
env_group.add_argument('--heightmap_size', type=int, default=128)
env_group.add_argument('--view_type', type=str, default='camera_center_xyz')
env_group.add_argument('--obs_type', type=str, default='pixel')
training_group = parser.add_argument_group('training')
training_group.add_argument('--alg', default='sac')
training_group.add_argument('--model', type=str, default='equi_both')
training_group.add_argument('--lr', type=float, default=1e-3)
training_group.add_argument('--actor_lr', type=float, default=None)
training_group.add_argument('--critic_lr', type=float, default=None)
training_group.add_argument('--gamma', type=float, default=0.99)
training_group.add_argument('--explore', type=int, default=0)
training_group.add_argument('--fixed_eps', action='store_true')
training_group.add_argument('--init_eps', type=float, default=1.0)
training_group.add_argument('--final_eps', type=float, default=0.)
training_group.add_argument('--training_iters', type=int, default=1)
training_group.add_argument('--training_offset', type=int, default=100)
training_group.add_argument('--max_train_step', type=int, default=20000)
training_group.add_argument('--device_name', type=str, default='cuda')
training_group.add_argument('--target_update_freq', type=int, default=100)
training_group.add_argument('--save_freq', type=int, default=500)
training_group.add_argument('--load_model_pre', type=str, default=None)
training_group.add_argument('--planner_episode', type=int, default=20)
training_group.add_argument('--note', type=str, default=None)
training_group.add_argument('--seed', type=int, default=None)
training_group.add_argument('--load_buffer', type=str, default=None)
training_group.add_argument('--load_n', type=int, default=1000000)
training_group.add_argument('--pre_train_step', type=int, default=0)
training_group.add_argument('--tau', type=float, default=1e-2)
training_group.add_argument('--init_temp', type=float, default=1e-2)
training_group.add_argument('--dpos', type=float, default=0.05)
training_group.add_argument('--drot_n', type=int, default=8)
training_group.add_argument('--demon_w', type=float, default=1)
training_group.add_argument('--equi_n', type=int, default=8)
training_group.add_argument('--n_hidden', type=int, default=64)
training_group.add_argument('--crop_size', type=int, default=128)
training_group.add_argument('--aug', type=strToBool, default=False)
training_group.add_argument('--buffer_aug_type', type=str, choices=['se2', 'so2', 't', 'dqn_c4', 'so2_vec', 'shift', 'crop'], default='so2')
training_group.add_argument('--aug_type', type=str, choices=['se2', 'so2', 't', 'dqn_c4', 'so2_vec', 'shift', 'crop'], default='so2')
training_group.add_argument('--buffer_aug_n', type=int, default=4)
training_group.add_argument('--expert_aug_n', type=int, default=0)
eval_group = parser.add_argument_group('eval')
eval_group.add_argument('--eval_freq', default=500, type=int)
eval_group.add_argument('--num_eval_episodes', default=100, type=int)
buffer_group = parser.add_argument_group('buffer')
buffer_group.add_argument('--buffer', default='aug', choices=['normal', 'per', 'expert', 'per_expert', 'aug', 'per_expert_aug'])
buffer_group.add_argument('--per_eps', type=float, default=1e-6, help='Epsilon parameter for PER')
buffer_group.add_argument('--per_alpha', type=float, default=0.6, help='Alpha parameter for PER')
buffer_group.add_argument('--per_beta', type=float, default=0.4, help='Initial beta parameter for PER')
buffer_group.add_argument('--per_expert_eps', type=float, default=1)
buffer_group.add_argument('--batch_size', type=int, default=64)
buffer_group.add_argument('--buffer_size', type=int, default=100000)
logging_group = parser.add_argument_group('logging')
logging_group.add_argument('--log_pre', type=str, default='outputs')
logging_group.add_argument('--log_sub', type=str, default=None)
logging_group.add_argument('--no_bar', action='store_true')
logging_group.add_argument('--time_limit', type=float, default=10000)
logging_group.add_argument('--load_sub', type=str, default=None)
args = parser.parse_args()
# env
random_orientation = args.random_orientation
env = args.env
simulator = args.simulator
max_episode_steps = args.max_episode_steps
action_sequence = args.action_sequence
num_processes = args.num_processes
num_eval_processes = args.num_eval_processes
render = args.render
robot = args.robot
workspace_size = args.workspace_size
workspace = np.asarray([[0.45-workspace_size/2, 0.45+workspace_size/2],
[0-workspace_size/2, 0+workspace_size/2],
[0.01, 0.25]])
heightmap_size = args.heightmap_size
heightmap_resolution = workspace_size/heightmap_size
action_space = [0, heightmap_size]
view_type = args.view_type
obs_type = args.obs_type
if env in ['close_loop_block_stacking', 'close_loop_house_building_1', 'close_loop_block_pulling']:
num_objects = 2
else:
num_objects = 1
######################################################################################
# training
alg = args.alg
model = args.model
lr = args.lr
actor_lr = args.actor_lr
critic_lr = args.critic_lr
if actor_lr is None:
actor_lr = lr
if critic_lr is None:
critic_lr = lr
gamma = args.gamma
explore = args.explore
fixed_eps = args.fixed_eps
init_eps = args.init_eps
final_eps = args.final_eps
training_iters = args.training_iters
training_offset = args.training_offset
max_train_step = args.max_train_step
device = torch.device(args.device_name)
target_update_freq = args.target_update_freq
save_freq = args.save_freq
planner_episode = args.planner_episode
load_model_pre = args.load_model_pre
note = args.note
seed = args.seed
tau = args.tau
init_temp = args.init_temp
demon_w = args.demon_w
equi_n = args.equi_n
n_hidden = args.n_hidden
crop_size = args.crop_size
aug = args.aug
aug_type = args.aug_type
buffer_aug_type = args.buffer_aug_type
buffer_aug_n = args.buffer_aug_n
expert_aug_n = args.expert_aug_n
# eval
eval_freq = args.eval_freq
num_eval_episodes = args.num_eval_episodes
# pre train
load_buffer = args.load_buffer
load_n = args.load_n
pre_train_step = args.pre_train_step
# buffer
buffer_type = args.buffer
per_eps = args.per_eps
per_alpha = args.per_alpha
per_beta = args.per_beta
per_expert_eps = args.per_expert_eps
batch_size = args.batch_size
buffer_size = args.buffer_size
# logging
log_pre = args.log_pre
log_sub = args.log_sub
no_bar = args.no_bar
time_limit = args.time_limit
load_sub = args.load_sub
if load_sub == 'None':
load_sub = None
dpos = args.dpos
drot = np.pi/args.drot_n
######################################################################################
env_config = {'workspace': workspace, 'max_steps': max_episode_steps, 'obs_size': heightmap_size,
'fast_mode': True, 'action_sequence': action_sequence, 'render': render, 'num_objects': num_objects,
'random_orientation':random_orientation, 'robot': robot,
'workspace_check': 'point', 'object_scale_range': (1, 1),
'hard_reset_freq': 1000, 'physics_mode' : 'fast', 'view_type': view_type, 'obs_type': obs_type}
planner_config = {'random_orientation':random_orientation, 'dpos': dpos, 'drot': drot}
if env == 'close_loop_household_picking':
env_config['object_scale_range'] = (0.6, 0.6)
if seed is not None:
env_config['seed'] = seed
######################################################################################
hyper_parameters = {}
for key in sorted(vars(args)):
hyper_parameters[key] = vars(args)[key]
for key in hyper_parameters:
print('{}: {}'.format(key, hyper_parameters[key])) | [
"numpy.asarray",
"argparse.ArgumentParser",
"torch.device"
] | [((298, 323), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (321, 323), False, 'import argparse\n'), ((5660, 5797), 'numpy.asarray', 'np.asarray', (['[[0.45 - workspace_size / 2, 0.45 + workspace_size / 2], [0 - \n workspace_size / 2, 0 + workspace_size / 2], [0.01, 0.25]]'], {}), '([[0.45 - workspace_size / 2, 0.45 + workspace_size / 2], [0 - \n workspace_size / 2, 0 + workspace_size / 2], [0.01, 0.25]])\n', (5670, 5797), True, 'import numpy as np\n'), ((6671, 6701), 'torch.device', 'torch.device', (['args.device_name'], {}), '(args.device_name)\n', (6683, 6701), False, 'import torch\n')] |
import cv2
import numpy as np
LEFT_EYE_INDICES = [36, 37, 38, 39, 40, 41]
RIGHT_EYE_INDICES = [42, 43, 44, 45, 46, 47]
class MyFaceAligner:
def __init__(self, desiredLeftEye=(0.35, 0.35),
desiredFaceWidth=256, desiredFaceHeight=None):
# store the facial landmark predictor, desired output left
# eye position, and desired output face width + height
# self.predictor = predictor
self.desiredLeftEye = desiredLeftEye
self.desiredFaceWidth = desiredFaceWidth
self.desiredFaceHeight = desiredFaceHeight
# if the desired face height is None, set it to be the
# desired face width (normal behavior)
if self.desiredFaceHeight is None:
self.desiredFaceHeight = self.desiredFaceWidth
def align(self, image, shape):
# convert the landmark (x, y)-coordinates to a NumPy array
# shape = self.predictor(gray, rect)
# shape = shape_to_np(shape)
# extract the left and right eye (x, y)-coordinates
# (lStart, lEnd) = FACIAL_LANDMARKS_IDXS["left_eye"]
# (rStart, rEnd) = FACIAL_LANDMARKS_IDXS["right_eye"]
# leftEyePts = shape[lStart:lEnd]
# rightEyePts = shape[rStart:rEnd]
leftEyePts = shape[:, 36:42]
rightEyePts = shape[:, 42:48]
# compute the center of mass for each eye
leftEyeCenter = leftEyePts.mean(axis=1).astype("int")
rightEyeCenter = rightEyePts.mean(axis=1).astype("int")
# compute the angle between the eye centroids
dY = rightEyeCenter[1] - leftEyeCenter[1]
dX = rightEyeCenter[0] - leftEyeCenter[0]
angle = np.degrees(np.arctan2(dY, dX))
# compute the desired right eye x-coordinate based on the
# desired x-coordinate of the left eye
desiredRightEyeX = 1.0 - self.desiredLeftEye[0]
# determine the scale of the new resulting image by taking
# the ratio of the distance between eyes in the *current*
# image to the ratio of distance between eyes in the
# *desired* image
dist = np.sqrt((dX ** 2) + (dY ** 2))
desiredDist = (desiredRightEyeX - self.desiredLeftEye[0])
desiredDist *= self.desiredFaceWidth
scale = desiredDist / dist
# compute center (x, y)-coordinates (i.e., the median point)
# between the two eyes in the input image
eyesCenter = ((leftEyeCenter[0] + rightEyeCenter[0]) // 2,
(leftEyeCenter[1] + rightEyeCenter[1]) // 2)
# grab the rotation matrix for rotating and scaling the face
M = cv2.getRotationMatrix2D(eyesCenter, angle, scale)
# update the translation component of the matrix
tX = self.desiredFaceWidth * 0.5
tY = self.desiredFaceHeight * self.desiredLeftEye[1]
M[0, 2] += (tX - eyesCenter[0])
M[1, 2] += (tY - eyesCenter[1])
# apply the affine transformation
(w, h) = (self.desiredFaceWidth, self.desiredFaceHeight)
output = cv2.warpAffine(image, M, (w, h),
flags=cv2.INTER_CUBIC)
# return the aligned face
return output
class MyFaceAligner_RAF:
def __init__(self, desiredLeftEye=(0.35, 0.35),
desiredFaceWidth=256, desiredFaceHeight=None):
# store the facial landmark predictor, desired output left
# eye position, and desired output face width + height
# self.predictor = predictor
self.desiredLeftEye = desiredLeftEye
self.desiredFaceWidth = desiredFaceWidth
self.desiredFaceHeight = desiredFaceHeight
# if the desired face height is None, set it to be the
# desired face width (normal behavior)
if self.desiredFaceHeight is None:
self.desiredFaceHeight = self.desiredFaceWidth
def align(self, image, shape):
# convert the landmark (x, y)-coordinates to a NumPy array
# shape = self.predictor(gray, rect)
# shape = shape_to_np(shape)
# extract the left and right eye (x, y)-coordinates
# (lStart, lEnd) = FACIAL_LANDMARKS_IDXS["left_eye"]
# (rStart, rEnd) = FACIAL_LANDMARKS_IDXS["right_eye"]
# leftEyePts = shape[lStart:lEnd]
# rightEyePts = shape[rStart:rEnd]
# leftEyePts = shape[:, 36:42]
# rightEyePts = shape[:, 42:48]
# compute the center of mass for each eye
leftEyeCenter = shape[:, 0].astype("int")
rightEyeCenter = shape[:, 1].astype("int")
# compute the angle between the eye centroids
dY = rightEyeCenter[1] - leftEyeCenter[1]
dX = rightEyeCenter[0] - leftEyeCenter[0]
angle = np.degrees(np.arctan2(dY, dX))
# compute the desired right eye x-coordinate based on the
# desired x-coordinate of the left eye
desiredRightEyeX = 1.0 - self.desiredLeftEye[0]
# determine the scale of the new resulting image by taking
# the ratio of the distance between eyes in the *current*
# image to the ratio of distance between eyes in the
# *desired* image
dist = np.sqrt((dX ** 2) + (dY ** 2))
desiredDist = (desiredRightEyeX - self.desiredLeftEye[0])
desiredDist *= self.desiredFaceWidth
scale = desiredDist / dist
# compute center (x, y)-coordinates (i.e., the median point)
# between the two eyes in the input image
eyesCenter = ((leftEyeCenter[0] + rightEyeCenter[0]) // 2,
(leftEyeCenter[1] + rightEyeCenter[1]) // 2)
# grab the rotation matrix for rotating and scaling the face
M = cv2.getRotationMatrix2D(eyesCenter, angle, scale)
# update the translation component of the matrix
tX = self.desiredFaceWidth * 0.5
tY = self.desiredFaceHeight * self.desiredLeftEye[1]
M[0, 2] += (tX - eyesCenter[0])
M[1, 2] += (tY - eyesCenter[1])
# apply the affine transformation
(w, h) = (self.desiredFaceWidth, self.desiredFaceHeight)
output = cv2.warpAffine(image, M, (w, h),
flags=cv2.INTER_CUBIC)
# return the aligned face
return output
#
# detector = dlib.get_frontal_face_detector()
# predictor = dlib.shape_predictor("/scratch_net/biwidl213/emotion/shape_predictor_68_face_landmarks.dat")
#
# image = cv2.imread('img.jpg')
# fa = FaceAligner(predictor, desiredLeftEye=(0.3, 0.3), desiredFaceWidth=256)
#
# faceAligned = fa.align(image, shape)
# cv2.imwrite("Aligned.jpg", faceAligned) | [
"cv2.warpAffine",
"numpy.arctan2",
"cv2.getRotationMatrix2D",
"numpy.sqrt"
] | [((2099, 2125), 'numpy.sqrt', 'np.sqrt', (['(dX ** 2 + dY ** 2)'], {}), '(dX ** 2 + dY ** 2)\n', (2106, 2125), True, 'import numpy as np\n'), ((2612, 2661), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['eyesCenter', 'angle', 'scale'], {}), '(eyesCenter, angle, scale)\n', (2635, 2661), False, 'import cv2\n'), ((3027, 3082), 'cv2.warpAffine', 'cv2.warpAffine', (['image', 'M', '(w, h)'], {'flags': 'cv2.INTER_CUBIC'}), '(image, M, (w, h), flags=cv2.INTER_CUBIC)\n', (3041, 3082), False, 'import cv2\n'), ((5134, 5160), 'numpy.sqrt', 'np.sqrt', (['(dX ** 2 + dY ** 2)'], {}), '(dX ** 2 + dY ** 2)\n', (5141, 5160), True, 'import numpy as np\n'), ((5647, 5696), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['eyesCenter', 'angle', 'scale'], {}), '(eyesCenter, angle, scale)\n', (5670, 5696), False, 'import cv2\n'), ((6062, 6117), 'cv2.warpAffine', 'cv2.warpAffine', (['image', 'M', '(w, h)'], {'flags': 'cv2.INTER_CUBIC'}), '(image, M, (w, h), flags=cv2.INTER_CUBIC)\n', (6076, 6117), False, 'import cv2\n'), ((1673, 1691), 'numpy.arctan2', 'np.arctan2', (['dY', 'dX'], {}), '(dY, dX)\n', (1683, 1691), True, 'import numpy as np\n'), ((4708, 4726), 'numpy.arctan2', 'np.arctan2', (['dY', 'dX'], {}), '(dY, dX)\n', (4718, 4726), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# @Time : 2019/10/10 17:06
# @Author : <NAME>
# @FileName: DataLoader.py
# @Email: <EMAIL>
import os
from skimage import io
import torch
import pandas as pd
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from torchvision.models import resnet18
from random import randint
from skimage import io, transform
import numpy as np
class img_dataset(Dataset):
"""
image dataset
"""
def __init__(self, root_dir, transform=None):
"""
:param root_dir: the root path of both imgs and ground truth
:param transform: transform of the imag and tag
"""
self.root_dir = root_dir
self.transform = transform
# self.img_transform = transforms
def __len__(self):
path = os.path.abspath(os.path.dirname(__file__))
count = len(os.listdir(path + "\data"))
return int(count / 2)
def __getitem__(self, idx=0):
"""
:param idx: the index of the video
:return: the dict of path of video and path of tag
"""
if torch.is_tensor(idx):
idx = idx.tolist()
video_name = self.root_dir + "\\" + str(idx)
tag_name = self.root_dir + "\\" + str(idx) + ".txt"
# with open(tag_name, 'r') as file:
# lines = file.readlines()
sample = {'video_name': video_name, 'tag_name': tag_name}
pair = self.get_imgs(sample=sample)
return pair
def read_tag(self, file):
lines = list()
with open(file, 'r') as f:
while True:
line = f.readline()
if not line:
break
if line[-1] == "\n":
line = line[:-1]
line = list(map(int, line.split(",")))
line = np.array(line)
lines.append(line)
f.close()
return lines
def get_imgs(self, sample):
video_name = sample['video_name']
tag_name = sample['tag_name']
# sample_list = list()
# with open(tag_name, 'r') as file:
# lines = file.readlines()
# file.close()
lines = self.read_tag(tag_name)
start_point = randint(0, len(lines) - 1)
# end_point = start_point + 64
# for i in range(start_point, end_point):
# img_name = video_name + "\{}.png".format(i)
# tag = lines[i]
# img = io.imread(img_name)
# pair = {"img": img, "tag": tag}
# if self.transform:
# pair = self.transform(pair)
# sample_list.append(pair)
img_name = video_name + "\{}.png".format(start_point)
tag = lines[start_point]
img = io.imread(img_name)
pair = {"img": img, "tag": tag}
if self.transform:
pair = self.transform(pair)
pair['image'] = pair['image'][0]
return pair
class Rescale(object):
"""
Rescale the image in a sample to a given size
"""
def __init__(self, output_size):
"""
:param output_size: Desired output size. If it is tuple, output is matched to output_size.
If int, smaller edge of image is matched to output_size keeping aspect ratio the same
"""
assert isinstance(output_size, (int, tuple))
self.output_size = output_size
def __call__(self, sample):
image, tag = sample["img"], sample["tag"]
# h stand for height and w stand for width
h, w = image.shape[:2]
if isinstance(self.output_size, int):
if h > w:
new_h, new_w = self.output_size * h / w, self.output_size
else:
new_h, new_w = self.output_size, self.output_size * w / h
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
img = transform.resize(image, (new_h, new_w))
return {"img": img, "tag": tag}
class RandomCrop(object):
"""
Crop randomly the image in a sample
"""
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
if isinstance(output_size, int):
self.output_size = (output_size, output_size)
else:
assert len(output_size) == 2
self.output_size = output_size
def __call__(self, sample):
image, tag = sample['img'], sample['tag']
h, w = image.shape[:2]
new_h, new_w = self.output_size
top = np.random.randint(0, h - new_h)
left = np.random.randint(0, w - new_w)
# randomly add some value on edge, new edge is between given number and original ones
image = image[top: top + new_h,
left: left + new_w]
return {"img": image, "tag": tag}
class ToTensor(object):
"""
Convert ndarrays in sample to Tensors
"""
def __call__(self, sample):
image, tag = sample['img'], sample['tag']
image = image.transpose((2, 0, 1))
image = ((torch.from_numpy(image)).unsqueeze(0)).float()
tag = ((torch.from_numpy(tag)).unsqueeze(0)).float()
return {'image': image,
'tag': tag}
class Normalize(object):
"""
normalize the given data
"""
def __call__(self, sample, mean, std):
pass
if __name__ == '__main__':
dataset = img_dataset(root_dir="F:\img_training\data", transform=transforms.Compose([
Rescale(256),
RandomCrop(224),
ToTensor(),
# transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]))
dataloader = DataLoader(dataset, batch_size=4, shuffle=True, num_workers=4)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
i = 0
# batch size == 4, so the length of dataloader = length of dataset / 4 = 9
for item in dataloader:
# .to(device)
img = item['image'].to(device)
print(i)
tag = item['tag'].to(device)
i += 1
print(img.size(), tag.size())
# dataiter = iter(dataloader)
# img = next(dataiter)["image"]
# tag = next(dataiter)["tag"]
# imgs, labels = next(dataiter)
| [
"torch.utils.data.DataLoader",
"os.path.dirname",
"numpy.random.randint",
"skimage.transform.resize",
"torch.cuda.is_available",
"numpy.array",
"torch.is_tensor",
"os.listdir",
"skimage.io.imread",
"torch.from_numpy"
] | [((5623, 5685), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': '(4)', 'shuffle': '(True)', 'num_workers': '(4)'}), '(dataset, batch_size=4, shuffle=True, num_workers=4)\n', (5633, 5685), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((1102, 1122), 'torch.is_tensor', 'torch.is_tensor', (['idx'], {}), '(idx)\n', (1117, 1122), False, 'import torch\n'), ((2755, 2774), 'skimage.io.imread', 'io.imread', (['img_name'], {}), '(img_name)\n', (2764, 2774), False, 'from skimage import io, transform\n'), ((3901, 3940), 'skimage.transform.resize', 'transform.resize', (['image', '(new_h, new_w)'], {}), '(image, (new_h, new_w))\n', (3917, 3940), False, 'from skimage import io, transform\n'), ((4521, 4552), 'numpy.random.randint', 'np.random.randint', (['(0)', '(h - new_h)'], {}), '(0, h - new_h)\n', (4538, 4552), True, 'import numpy as np\n'), ((4568, 4599), 'numpy.random.randint', 'np.random.randint', (['(0)', '(w - new_w)'], {}), '(0, w - new_w)\n', (4585, 4599), True, 'import numpy as np\n'), ((825, 850), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (840, 850), False, 'import os\n'), ((872, 899), 'os.listdir', 'os.listdir', (["(path + '\\\\data')"], {}), "(path + '\\\\data')\n", (882, 899), False, 'import os\n'), ((5724, 5749), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5747, 5749), False, 'import torch\n'), ((1837, 1851), 'numpy.array', 'np.array', (['line'], {}), '(line)\n', (1845, 1851), True, 'import numpy as np\n'), ((5040, 5063), 'torch.from_numpy', 'torch.from_numpy', (['image'], {}), '(image)\n', (5056, 5063), False, 'import torch\n'), ((5103, 5124), 'torch.from_numpy', 'torch.from_numpy', (['tag'], {}), '(tag)\n', (5119, 5124), False, 'import torch\n')] |
"""
HSDS data extraction functions
"""
import dateutil
import h5pyd
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
from pyproj import Proj
from scipy.spatial import cKDTree
import seaborn as sns
mpl.rcParams['font.sans-serif'] = 'DejaVu Sans'
mpl.rcParams['pdf.fonttype'] = 42
mpl.rc('xtick', labelsize=14)
mpl.rc('ytick', labelsize=14)
mpl.rc('font', size=16)
sns.set_style("white")
sns.set_style("ticks")
def WTK_idx(wtk, lat_lon):
"""
Function to find the nearest x/y WTK indices for a given lat/lon using
Proj4 projection library
Parameters
----------
wtk : 'h5pyd.File'
h5pyd File instance for the WTK
lat_lon : tuple | list
(lat, lon) coordinates of interest
Results
-------
ij : 'tuple'
x/y coordinate in the database of the closest pixel to coordinate of
interest
"""
dset_coords = wtk['coordinates']
projstring = """+proj=lcc +lat_1=30 +lat_2=60
+lat_0=38.47240422490422 +lon_0=-96.0
+x_0=0 +y_0=0 +ellps=sphere
+units=m +no_defs """
projectLcc = Proj(projstring)
# Grab origin directly from database
origin_ll = reversed(dset_coords[0][0])
origin = projectLcc(*origin_ll)
lon_lat = reversed(lat_lon)
coords = projectLcc(*lon_lat)
delta = np.subtract(coords, origin)
ij = [int(round(x / 2000)) for x in delta]
return tuple(reversed(ij))
def NSRDB_idx(nsrdb, lat_lon):
"""
Function to find the NSRDB site index for a given lat/lon using a KDTree
Parameters
----------
nsrdb : 'h5pyd.File'
h5pyd File instance for the NSRDB
lat_lon : tuple | list
(lat, lon) coordinates of interest
Results
-------
ij : 'tuple'
x/y coordinate in the database of the closest pixel to coordinate of
interest
"""
dset_coords = nsrdb['coordinates'][...]
tree = cKDTree(dset_coords)
_, pos = tree.query(np.array(lat_lon))
return pos
def datetimeIndex(f):
"""
Function returns a dataframe containing the time-dimension
index and parsed timestamps
Parameters
----------
f : 'h5pyd.File'
h5pyd File instance for the wtk_us.h5
Results
-------
dt : 'pd.DataFrame'
DataFrame containing parsed 'datetime' stamps
"""
dt = f["datetime"]
dt = pd.DataFrame({"datetime": dt[:]}, index=range(0, dt.shape[0]))
dt['datetime'] = dt['datetime'].apply(dateutil.parser.parse)
return dt
class HSDS:
"""
HSDS Resource handler class
"""
def __init__(self, hsds_path, preload=False):
"""
Parameters
----------
hsds_path : h5pyd.File instance
"""
self._h5d = h5pyd.File(hsds_path, mode='r')
if preload:
self.preload()
else:
self._time_index = None
self._meta = None
self._tree = None
@property
def time_index(self):
"""
Returns
-------
_time_index : pd.Datetime_Index
Datetime index vector for given HSDS file
"""
if self._time_index is None:
time_index = self._h5d['time_index'][...].astype(str)
self._time_index = pd.to_datetime(time_index)
return self._time_index
@property
def meta(self):
"""
Returns
-------
_meta : pd.DataFrame
Site meta data for give HSDS file
"""
if self._meta is None:
self._meta = pd.DataFrame(self._h5d['meta'][...])
return self._meta
@property
def tree(self):
"""
Returns
-------
_tree : cKDTree
KDTree on site coordinates (latitude, longitude)
"""
if self._tree is None:
site_coords = self._h5d['coordinates'][...]
self._tree = cKDTree(site_coords)
return self._tree
def preload(self):
"""
Preload time_index, meta, and tree
"""
time_index = self._h5d['time_index'][...].astype(str)
self._time_index = pd.to_datetime(time_index)
site_coords = self._h5d['coordinates'][...]
self._tree = cKDTree(site_coords)
self._meta = pd.DataFrame(self._h5d['meta'][...])
def _nearest_site(self, coords):
"""
Find nearest site to coordinate (lat, lon) of interest
Parameters
----------
coords : tuple
(lat, lon) coordinates of interest
Returns
-------
site_idx : int
Site index in the datasets
"""
lat_lon = np.array(coords)
_, site_idx = self.tree.query(lat_lon)
return site_idx
def _nearest_timestep(self, timestep):
"""
Find the nearest timestep to timestep of interest
Parameters
----------
timestep : datetime
Datetime step of interest
Returns
-------
time_idx : int
Time index in the datasets
"""
delta = np.abs(self.time_index - timestep)
time_idx = delta.argmin()
return time_idx
def _get_region_idx(self, value, column='state'):
"""
Find sites associated with given region
Parameters
----------
value : str
Regional value filter to
column : str
Column in the meta data to filter on
Returns
-------
region_idx : list
Indices of all sites corresponding to region of interest
"""
if column in self.meta:
col_data = self.meta[column].str.decode('utf-8')
region_idx = self.meta.index[col_data == value].values
else:
raise ValueError('{} is not a valid column in meta'
.format(column))
return region_idx
def _get_conus_idx(self):
"""
Find sites associated with CONUS
Returns
-------
conus_idx : list
Indices of all sites in CONUS
"""
country_data = self.meta['country'].str.decode('utf-8')
us_idx = country_data == 'United States'
state_data = self.meta.loc[us_idx, 'state'].str.decode('utf-8')
conus_idx = state_data.isin(['Alaska', 'Hawaii', 'AK', 'HI', 'None'])
conus_idx = state_data.index[~conus_idx].values
return conus_idx
def get_timeseries(self, variable, coords, local=True):
"""
Extract time-series data for the given variable at the given
coordinates
Parameters
----------
variable : str
Variable to extract time-series for
coords : tuple
(lat, lon) coordinates of interest
local : bool
Shift time-series to local time
Returns
-------
ts : pd.DataFrame
Time-series DataFrame
"""
site_idx = self._nearest_site(coords)
time_index = self.time_index.copy()
if local:
utc_dt = self.meta.iloc[site_idx]['timezone']
utc_dt = pd.Timedelta('{}h'.format(utc_dt))
time_index += utc_dt
ds = self._h5d[variable]
ts = ds[:, site_idx] / ds.attrs.get('scale_factor', 1)
ts = pd.DataFrame({variable: ts, 'Datetime': time_index,
'Date': time_index.date, 'Month': time_index.month,
'Day': time_index.day, 'Hour': time_index.hour})
return ts
@staticmethod
def create_boxplots(df, variable, dpi=100, figsize=(12, 4)):
"""
Create monthly and diurnal box plots
"""
fig = plt.figure(figsize=figsize, dpi=dpi)
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
sns.boxplot(x="Month", y=variable, data=df, ax=ax1)
ax1.set_xlabel('Month', fontsize=16)
ax1.set_ylabel(variable, fontsize=16)
sns.boxplot(x="Hour", y=variable, data=df, ax=ax2)
ax2.set_xlabel('Hour', fontsize=16)
ax2.set_ylabel(variable, fontsize=16)
sns.despine(offset=10, trim=False)
fig.tight_layout()
plt.show()
def get_timestep(self, variable, timestep):
"""
Extract a days worth of data for the given day for CONUS
Parameters
----------
variable : str
Variable to extract time-series for
timestep : str
Datetimestep to extract
local : bool
Shift time-series to local time
Returns
-------
day : pd.DataFrame
"""
conus_idx = self._get_conus_idx()
time_idx = self._nearest_timestep(pd.to_datetime(timestep))
meta = self.meta.iloc[conus_idx]
lon = meta['longitude'].values
lat = meta['latitude'].values
ds = self._h5d[variable]
sf = ds.attrs.get('scale_factor', 1)
data = self._h5d[variable][time_idx][conus_idx] / sf
df = pd.DataFrame({'longitude': lon, 'latitude': lat, variable: data})
return df
@staticmethod
def create_scatter(df, variable, cbar_label=None, title=None,
cmap='Rainbow', dpi=100, figsize=(8, 4)):
"""
Create scatter plot from lon, lat, and data and save to f_out
Parameters
----------
df : pd.DataFrame
DataFrame containing data to plot
cbar_label : str
Colorbar label
title : str
Title to plot
cmap : str
Colormap to use
dpi : int
plot resolution
figsize : tuple
Figure size
"""
fig = plt.figure(figsize=figsize, dpi=dpi)
if title is not None:
fig.suptitle(title, fontsize=16)
ax = fig.add_subplot(111)
lon = df['longitude'].values
lat = df['latitude'].values
data = df[variable].values
if cbar_label is None:
cbar_label = variable
vmax = np.max(data)
sc = ax.scatter(lon, lat, c=data, cmap=cmap, vmin=0, vmax=vmax)
cbar = plt.colorbar(sc)
cbar.ax.set_ylabel(cbar_label, rotation=90)
ax.axis('off')
fig.tight_layout()
plt.show()
def get_day(self, variable, date, local=True):
"""
Extract a days worth of data for the given day for CONUS
Parameters
----------
variable : str
Variable to extract time-series for
date : str
Date to extract a days worth of data for
local : bool
Shift time-series to local time
Returns
-------
day : pd.DataFrame
"""
conus_idx = self._get_conus_idx()
time_index = self.time_index
if local:
utc_dt = self.meta.iloc[conus_idx]['timezone'].mean()
utc_dt = pd.Timedelta('{}h'.format(utc_dt))
time_index += utc_dt
date = pd.to_datetime(date).date()
time_idx = np.where(time_index.date == date)[0]
time_slice = slice(time_idx[0], time_idx[-1] + 1)
day_df = pd.DataFrame(self._h5d[variable][time_slice][:, conus_idx],
index=time_idx, columns=conus_idx)
return day_df
@staticmethod
def create_map(lon, lat, data, cbar_label, f_out=None, vmax=None,
title=None, cmap='Rainbow', dpi=100, figsize=(8, 4)):
"""
Create scatter plot from lon, lat, and data and save to f_out
Parameters
----------
lon : ndarray
Longitude vector
lat : ndarray
Latitude vector
cbar_label : str
Colorbar label
f_out : str
File to save plot to
vmax : float
Max value for colormap
cmap : str
Colormap to use
dpi : int
plot resolution
figsize : tuple
Figure size
"""
fig = plt.figure(figsize=figsize, dpi=dpi)
if title is not None:
fig.suptitle(title, fontsize=16)
ax = fig.add_subplot(111)
if vmax is None:
vmax = np.max(data)
sc = ax.scatter(lon, lat, c=data, cmap=cmap, s=10,
vmin=0, vmax=vmax)
cbar = plt.colorbar(sc)
cbar.ax.set_ylabel(cbar_label, rotation=90)
ax.axis('off')
fig.tight_layout()
if f_out is not None:
plt.savefig(f_out, dpi=dpi, transparent=True,
bbox_inches='tight')
else:
plt.show()
@staticmethod
def creat_gif(fig_dir, file_prefix):
"""
Create gif from all files in
"""
def create_nsrdb_gif(self, date, variable='dni'):
"""
Extract, plot, and create gif for given NSRDB date and variable
Parameters
----------
date : str
Date to extract
variable : str
Variable to extract
"""
day_df = self.get_day(variable, date)
label = '{} W/m^2'.format(variable)
vmax = np.max(day_df.values)
meta = self.meta.iloc[day_df.columns]
lon = meta['longitude'].values
lat = meta['latitude'].values
fig_dir = '../bin/gifs'
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
for i in len(day_df):
data = day_df.iloc[i]
f_out = os.path.join(fig_dir, 'nsrdb_{:03d}.png'.format(i))
self.create_map(lon, lat, data, label, f_out, vmax=vmax,
cmap='YlOrRd')
| [
"matplotlib.rc",
"numpy.abs",
"matplotlib.pyplot.figure",
"h5pyd.File",
"scipy.spatial.cKDTree",
"pandas.DataFrame",
"os.path.exists",
"matplotlib.pyplot.colorbar",
"numpy.max",
"seaborn.set_style",
"matplotlib.pyplot.show",
"seaborn.boxplot",
"pandas.to_datetime",
"numpy.subtract",
"os.... | [((337, 366), 'matplotlib.rc', 'mpl.rc', (['"""xtick"""'], {'labelsize': '(14)'}), "('xtick', labelsize=14)\n", (343, 366), True, 'import matplotlib as mpl\n'), ((367, 396), 'matplotlib.rc', 'mpl.rc', (['"""ytick"""'], {'labelsize': '(14)'}), "('ytick', labelsize=14)\n", (373, 396), True, 'import matplotlib as mpl\n'), ((397, 420), 'matplotlib.rc', 'mpl.rc', (['"""font"""'], {'size': '(16)'}), "('font', size=16)\n", (403, 420), True, 'import matplotlib as mpl\n'), ((421, 443), 'seaborn.set_style', 'sns.set_style', (['"""white"""'], {}), "('white')\n", (434, 443), True, 'import seaborn as sns\n'), ((444, 466), 'seaborn.set_style', 'sns.set_style', (['"""ticks"""'], {}), "('ticks')\n", (457, 466), True, 'import seaborn as sns\n'), ((1168, 1184), 'pyproj.Proj', 'Proj', (['projstring'], {}), '(projstring)\n', (1172, 1184), False, 'from pyproj import Proj\n'), ((1385, 1412), 'numpy.subtract', 'np.subtract', (['coords', 'origin'], {}), '(coords, origin)\n', (1396, 1412), True, 'import numpy as np\n'), ((1976, 1996), 'scipy.spatial.cKDTree', 'cKDTree', (['dset_coords'], {}), '(dset_coords)\n', (1983, 1996), False, 'from scipy.spatial import cKDTree\n'), ((2021, 2038), 'numpy.array', 'np.array', (['lat_lon'], {}), '(lat_lon)\n', (2029, 2038), True, 'import numpy as np\n'), ((2799, 2830), 'h5pyd.File', 'h5pyd.File', (['hsds_path'], {'mode': '"""r"""'}), "(hsds_path, mode='r')\n", (2809, 2830), False, 'import h5pyd\n'), ((4175, 4201), 'pandas.to_datetime', 'pd.to_datetime', (['time_index'], {}), '(time_index)\n', (4189, 4201), True, 'import pandas as pd\n'), ((4276, 4296), 'scipy.spatial.cKDTree', 'cKDTree', (['site_coords'], {}), '(site_coords)\n', (4283, 4296), False, 'from scipy.spatial import cKDTree\n'), ((4319, 4355), 'pandas.DataFrame', 'pd.DataFrame', (["self._h5d['meta'][...]"], {}), "(self._h5d['meta'][...])\n", (4331, 4355), True, 'import pandas as pd\n'), ((4703, 4719), 'numpy.array', 'np.array', (['coords'], {}), '(coords)\n', (4711, 4719), True, 'import numpy as np\n'), ((5133, 5167), 'numpy.abs', 'np.abs', (['(self.time_index - timestep)'], {}), '(self.time_index - timestep)\n', (5139, 5167), True, 'import numpy as np\n'), ((7377, 7533), 'pandas.DataFrame', 'pd.DataFrame', (["{variable: ts, 'Datetime': time_index, 'Date': time_index.date, 'Month':\n time_index.month, 'Day': time_index.day, 'Hour': time_index.hour}"], {}), "({variable: ts, 'Datetime': time_index, 'Date': time_index.date,\n 'Month': time_index.month, 'Day': time_index.day, 'Hour': time_index.hour})\n", (7389, 7533), True, 'import pandas as pd\n'), ((7770, 7806), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize', 'dpi': 'dpi'}), '(figsize=figsize, dpi=dpi)\n', (7780, 7806), True, 'import matplotlib.pyplot as plt\n'), ((7886, 7937), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': '"""Month"""', 'y': 'variable', 'data': 'df', 'ax': 'ax1'}), "(x='Month', y=variable, data=df, ax=ax1)\n", (7897, 7937), True, 'import seaborn as sns\n'), ((8037, 8087), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': '"""Hour"""', 'y': 'variable', 'data': 'df', 'ax': 'ax2'}), "(x='Hour', y=variable, data=df, ax=ax2)\n", (8048, 8087), True, 'import seaborn as sns\n'), ((8186, 8220), 'seaborn.despine', 'sns.despine', ([], {'offset': '(10)', 'trim': '(False)'}), '(offset=10, trim=False)\n', (8197, 8220), True, 'import seaborn as sns\n'), ((8257, 8267), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8265, 8267), True, 'import matplotlib.pyplot as plt\n'), ((9082, 9147), 'pandas.DataFrame', 'pd.DataFrame', (["{'longitude': lon, 'latitude': lat, variable: data}"], {}), "({'longitude': lon, 'latitude': lat, variable: data})\n", (9094, 9147), True, 'import pandas as pd\n'), ((9775, 9811), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize', 'dpi': 'dpi'}), '(figsize=figsize, dpi=dpi)\n', (9785, 9811), True, 'import matplotlib.pyplot as plt\n'), ((10110, 10122), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (10116, 10122), True, 'import numpy as np\n'), ((10211, 10227), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['sc'], {}), '(sc)\n', (10223, 10227), True, 'import matplotlib.pyplot as plt\n'), ((10338, 10348), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10346, 10348), True, 'import matplotlib.pyplot as plt\n'), ((11226, 11324), 'pandas.DataFrame', 'pd.DataFrame', (['self._h5d[variable][time_slice][:, conus_idx]'], {'index': 'time_idx', 'columns': 'conus_idx'}), '(self._h5d[variable][time_slice][:, conus_idx], index=time_idx,\n columns=conus_idx)\n', (11238, 11324), True, 'import pandas as pd\n'), ((12086, 12122), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize', 'dpi': 'dpi'}), '(figsize=figsize, dpi=dpi)\n', (12096, 12122), True, 'import matplotlib.pyplot as plt\n'), ((12408, 12424), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['sc'], {}), '(sc)\n', (12420, 12424), True, 'import matplotlib.pyplot as plt\n'), ((13215, 13236), 'numpy.max', 'np.max', (['day_df.values'], {}), '(day_df.values)\n', (13221, 13236), True, 'import numpy as np\n'), ((3313, 3339), 'pandas.to_datetime', 'pd.to_datetime', (['time_index'], {}), '(time_index)\n', (3327, 3339), True, 'import pandas as pd\n'), ((3595, 3631), 'pandas.DataFrame', 'pd.DataFrame', (["self._h5d['meta'][...]"], {}), "(self._h5d['meta'][...])\n", (3607, 3631), True, 'import pandas as pd\n'), ((3947, 3967), 'scipy.spatial.cKDTree', 'cKDTree', (['site_coords'], {}), '(site_coords)\n', (3954, 3967), False, 'from scipy.spatial import cKDTree\n'), ((8785, 8809), 'pandas.to_datetime', 'pd.to_datetime', (['timestep'], {}), '(timestep)\n', (8799, 8809), True, 'import pandas as pd\n'), ((11113, 11146), 'numpy.where', 'np.where', (['(time_index.date == date)'], {}), '(time_index.date == date)\n', (11121, 11146), True, 'import numpy as np\n'), ((12277, 12289), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (12283, 12289), True, 'import numpy as np\n'), ((12569, 12635), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f_out'], {'dpi': 'dpi', 'transparent': '(True)', 'bbox_inches': '"""tight"""'}), "(f_out, dpi=dpi, transparent=True, bbox_inches='tight')\n", (12580, 12635), True, 'import matplotlib.pyplot as plt\n'), ((12686, 12696), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12694, 12696), True, 'import matplotlib.pyplot as plt\n'), ((13407, 13430), 'os.path.exists', 'os.path.exists', (['fig_dir'], {}), '(fig_dir)\n', (13421, 13430), False, 'import os\n'), ((13444, 13464), 'os.makedirs', 'os.makedirs', (['fig_dir'], {}), '(fig_dir)\n', (13455, 13464), False, 'import os\n'), ((11066, 11086), 'pandas.to_datetime', 'pd.to_datetime', (['date'], {}), '(date)\n', (11080, 11086), True, 'import pandas as pd\n')] |
# %%
# Python standard library
import math
import random
# Scientific stack
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.patches as patches
# Visualization
# ------------------------------------------------------------------------------
def rotate(x1y1, x2y2):
"Rotate a segment +90° with respect to its center"
x1, y1 = x1y1
x2, y2 = x2y2
cx, cy = 0.5 * (x1 + x2), 0.5 * (y1 + y2)
x3y3 = cx - (y1 - cy), cy + (x1 - cx)
x4y4 = cx - (y2 - cy), cy + (x2 - cx)
return x3y3, x4y4
def display_maze(graph, path=None, map=None):
vertices, edges, weights = graph
width = max(w for (w, h) in vertices) + 1
height = max(h for (w, h) in vertices) + 1
wh_ratio = width / height
fig_width = 14 # inches
fig_height = fig_width / wh_ratio
fig, axes = plt.subplots(figsize=(fig_width, fig_height))
axes.axis("equal")
for x in range(width):
for y in range(height):
for (dx, dy) in [(-1, 0), (0, -1), (1, 0), (0, 1)]:
xn, yn = x+dx, y+dy
if ((x, y), (xn, yn)) in edges:
style = {"color": "grey", "linestyle": ":"}
else:
style = {"color": "black", "linestyle": "-"}
w1, w2 = rotate((x + 0.5, y + 0.5), (xn + 0.5, yn + 0.5)) # wall segment
axes.plot([w1[0], w2[0]], [w1[1], w2[1]], **style)
axes.axis("off")
if path:
xs = np.array([x for (x, y) in path])
ys = np.array([y for (x, y) in path])
axes.plot(xs + 0.5, ys + 0.5, "r-")
if map:
if isinstance(map, set):
map = {k: 1.0 for k in map}
d_max = max(map.values())
cmap = mpl.cm.get_cmap("viridis")
for v, d in map.items():
dx, dy = 1, 1
rect = patches.Rectangle(v, dx, dy, facecolor=cmap(d / d_max))
axes.add_patch(rect)
# Mazes generation
# ------------------------------------------------------------------------------
def empty_maze(width, height):
vertices = {(i, j) for i in range(width) for j in range(height)}
edges = set()
for vertex in vertices:
i, j = vertex
neighbors = {(i + k, j + l) for k, l in [(1, 0), (0, 1), (-1, 0), (0, -1)]}
for neighbor in neighbors:
if neighbor in vertices:
edges.add((vertex, neighbor))
weights = {edge: 1.0 for edge in edges}
return vertices, edges, weights
def full_maze(width, height):
vertices, edges, weights = empty_maze(width, height)
i = width // 2 # boundary (vertical wall in the middle)
right_crossing = {((i - 1, j), (i, j)) for j in range(height)}
left_crossing = {((i, j), (i - 1, j)) for j in range(height)}
crossing = left_crossing | right_crossing
edges = edges - crossing
weights = {edge: 1.0 for edge in edges}
return vertices, edges, weights
def punctured_maze(width, height):
vertices, edges, weights = full_maze(width, height)
i = width // 2 # boundary (vertical wall in the middle)
j = 0 # hole in the wall
edge_1 = ((i - 1, j), (i, j))
edge_2 = ((i, j), (i - 1, j))
edges.add(edge_1)
edges.add(edge_2)
weights[edge_1] = 1.0
weights[edge_2] = 1.0
return vertices, edges, weights
def dense_maze(width, height):
random.seed(0)
vertices = {(i, j) for i in range(width) for j in range(height)}
edges = set()
todo = {(0, 0)} # visited but some neighbors not tested yet,
done = set() # all neighbors have been tested.
while todo:
i, j = current = random.choice(list(todo))
neighbors = {(i + k, j + l) for k, l in [(1, 0), (0, 1), (-1, 0), (0, -1)]}
# neighbors in the maze and not explored yet
candidates = (neighbors & vertices) - done - todo
if candidates:
new = random.choice(list(candidates))
edges.add((current, new))
edges.add((new, current)) # both directions are allowed
todo.add(new)
if len(candidates) <= 1:
todo.remove(current)
done.add(current)
weights = {edge: 1.0 for edge in edges}
return vertices, edges, weights
# Visualize maze examples
# ------------------------------------------------------------------------------
# %%
width, height = 50, 25
em = empty_maze(width, height)
display_maze(em)
wm = full_maze(width, height)
display_maze(wm)
pm = punctured_maze(width, height)
display_maze(pm)
dm = dense_maze(width, height)
display_maze(dm)
# Reachable Sets & Paths
# ------------------------------------------------------------------------------
# %%
def reachable_set(graph, source):
vertices, edges, weights = graph
todo = {source}
done = set()
while todo:
current = todo.pop()
neighbors = {v for v in vertices if (current, v) in edges}
for n in neighbors:
if n not in done:
todo.add(n)
done.add(current)
return done
def reachable_path(graph, source):
vertices, edges, weights = graph
todo = {source}
done = set()
path = {source: [source]}
while todo:
current = todo.pop()
neighbors = {v for v in vertices if (current, v) in edges}
for n in neighbors:
if n not in done:
todo.add(n)
path[n] = path[current] + [n]
done.add(current)
return path
# Test reachability functions
# %%
source = (0, 0)
target = (width - 1, height - 1)
mazes = [em, wm, pm, dm]
for maze in mazes:
reached = reachable_set(maze, source)
if target in reached:
path = reachable_path(maze, source)[target]
else:
path = None
display_maze(maze, path=path, map=reached)
# Shortest Path & Distance
# ------------------------------------------------------------------------------
# %%
def shortest_path(graph, source):
"Non-greedy version"
vertices, edges, weight = graph
distance, path = {}, {}
todo = {source}
distance[source] = 0
path[source] = [source]
while todo:
current = todo.pop()
neighbors = {v for v in vertices if (current, v) in edges}
for n in neighbors:
d = distance[current] + weight[(current, n)]
if d < distance.get(n, math.inf):
distance[n] = d
path[n] = path[current] + [n]
todo.add(n)
return path, distance
# Visualize shortest path & distance
# ------------------------------------------------------------------------------
# %%
mazes = [em, wm, pm, dm]
for maze in mazes:
path, distance = shortest_path(maze, source)
path = path.get(target)
display_maze(maze, path=path, map=distance)
| [
"numpy.array",
"random.seed",
"matplotlib.pyplot.subplots",
"matplotlib.cm.get_cmap"
] | [((846, 891), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(fig_width, fig_height)'}), '(figsize=(fig_width, fig_height))\n', (858, 891), True, 'import matplotlib.pyplot as plt\n'), ((3358, 3372), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (3369, 3372), False, 'import random\n'), ((1497, 1527), 'numpy.array', 'np.array', (['[x for x, y in path]'], {}), '([x for x, y in path])\n', (1505, 1527), True, 'import numpy as np\n'), ((1543, 1573), 'numpy.array', 'np.array', (['[y for x, y in path]'], {}), '([y for x, y in path])\n', (1551, 1573), True, 'import numpy as np\n'), ((1755, 1781), 'matplotlib.cm.get_cmap', 'mpl.cm.get_cmap', (['"""viridis"""'], {}), "('viridis')\n", (1770, 1781), True, 'import matplotlib as mpl\n')] |
"""
--- Day 3: Squares With Three Sides ---
https://adventofcode.com/2016/day/3
"""
import numpy as np
from aocd import data
A = np.fromstring(data, sep=" ", dtype=int)
R = A.reshape(-1, 3).T
C = R.reshape(-1, 3).T
for A in R, C:
T = A[0] + A[1] > A[2]
T &= A[0] + A[2] > A[1]
T &= A[1] + A[2] > A[0]
print(T.sum())
| [
"numpy.fromstring"
] | [((131, 170), 'numpy.fromstring', 'np.fromstring', (['data'], {'sep': '""" """', 'dtype': 'int'}), "(data, sep=' ', dtype=int)\n", (144, 170), True, 'import numpy as np\n')] |
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from base import BaseModel, BaseModule
from model.model_helpers import Beam
from torch.autograd import Variable
import matplotlib.pyplot as plt
import math
class KWSModel(BaseModel):
def __init__(self, hiddenDNNV, dimRnn3, inputDimV, hiddenDimV, birnnV, d_word_emb,
outdpV, p_size, g_size, d_embed, d_hidden, embDO, beam_size, num_heads,rnn2, fixed_length_embedding, shortcut, loc_acc,g2p):
super().__init__()
self.classifier_init = Classifier_init(
inputDimV=inputDimV,
hiddenDimV=hiddenDimV,
d_word_emb=d_word_emb,
hiddenDNNV=hiddenDNNV,
beam_size=beam_size,
birnnV=birnnV,
outdpV=outdpV,
p_size=p_size,
g_size=g_size,
d_embed=d_embed,
d_hidden=d_hidden,
embDO=embDO,
num_heads=num_heads,
rnn2=rnn2,
fixed_length_embedding=fixed_length_embedding,
shortcut = shortcut,
loc_acc = loc_acc,
g2p=g2p
)
self.classifier_BE = Classifier_BE(hiddenDNNV=hiddenDNNV, dimRnn3=dimRnn3)
def forward(self, epoch, vis_feats, vis_feat_lens, p_lengths, graphemes, phonemes, use_BE_localiser, config):
if epoch == config["data_loader"]["args"]["start_BEloc_epoch"]:
for param in self.classifier_init.parameters():
param.requires_grad = False
else:
for param in self.classifier_init.parameters():
param.requires_grad = True
odec, vis_feat_lens, o_init, o_rnn, plotted_mask, o_logits, indices = self.classifier_init(
x=vis_feats,
x_lengths=vis_feat_lens,
phoneme=phonemes,
grapheme=graphemes,
p_lengths=p_lengths
)
if epoch >= config["data_loader"]["args"]["start_BEloc_epoch"]:
o, odec, idx_max, o_logits = self.classifier_BE(o_rnn, odec, vis_feat_lens)
else:
idx_max = Variable(torch.LongTensor(vis_feats.size(0)).fill_(0).cuda())
o = o_init
keyword_prob = torch.sigmoid(o)
return {"max_logit": o, "odec": odec, "idx_max": indices,
"keyword_prob": keyword_prob, "plot": plotted_mask, "o_logits": o_logits}
class Classifier_init(nn.Module):
def __init__(self, inputDimV, hiddenDimV, birnnV, d_word_emb, outdpV, hiddenDNNV,
p_size, g_size, d_embed, d_hidden, embDO, beam_size, num_heads,rnn2, fixed_length_embedding, shortcut, loc_acc,g2p):
super().__init__()
self.rnn1 = nn.LSTM(
input_size=inputDimV,
hidden_size=hiddenDimV,
batch_first=True,
bidirectional=birnnV,
num_layers=1,
dropout=0,
).float()
self.linrnn = nn.Linear(2 * hiddenDimV, hiddenDimV).float()
self.rnn2_present = rnn2
self.rnn2 = nn.LSTM(
input_size=hiddenDimV *2,
hidden_size=hiddenDimV,
bidirectional=birnnV,
batch_first=True,
num_layers=1,
dropout=0,
)
self.d_word_emb = d_word_emb
self.inputDimV = inputDimV
self.hiddenDimV = hiddenDimV
self.inBN = nn.BatchNorm1d(inputDimV)
self.lin_logits = nn.Conv1d(
in_channels=128,
out_channels=1,
kernel_size=1,
dilation=1,
padding=0,
stride=1,
groups=1,
bias=1,
)
Nh = 2 if birnnV else 1
self.rnnBN = nn.BatchNorm1d(hiddenDimV)
self.wBN = nn.BatchNorm1d(d_word_emb)
self.outProj_L0 = nn.Linear(in_features=Nh * hiddenDimV, out_features=hiddenDNNV)
self.outDO_L0 = nn.Dropout(p=outdpV)
self.DNNnonlinear_L0 = nn.LeakyReLU().float()
self.DNNnonlinear_L1 = nn.LeakyReLU().float()
self.DNNnonlinear_L2 = nn.LeakyReLU().float()
self.outProj_L1 = nn.Linear(in_features=hiddenDNNV, out_features=hiddenDNNV // 2)
self.outDO_L1 = nn.Dropout(p=outdpV)
self.outProj_L2 = nn.Linear(in_features=hiddenDNNV // 2, out_features=1)
self.indpV = 0.20
self.g2p = g2p
if self.g2p:
self.enc_dec = G2P(
p_size=g_size,
g_size=p_size,
d_embed=d_embed,
d_hidden=d_hidden,
d_word_emb=d_word_emb,
embDO=embDO,
beam_size=beam_size,
)
else:
self.enc_dec = G2P(
p_size=p_size,
g_size=g_size,
d_embed=d_embed,
d_hidden=d_hidden,
d_word_emb=d_word_emb,
embDO=embDO,
beam_size=beam_size,
)
self.linear_attn_keys = nn.Linear(256,512)
self.linear_attn_values = nn.Linear(384,512)
self.linear_attn_queries = nn.Linear(128,512)
self.num_heads = num_heads
self.d_k = inputDimV // self.num_heads
self.linear_mask = nn.Linear(self.num_heads,512)
self.final_lin = nn.Linear(512,512)
self.shortcut = shortcut
if self.shortcut == True:
self.conv3 = nn.Conv2d(1,32, kernel_size=5, stride=(1,1), padding=(2,2)) #padding used to (2,2)
self.conv1 = nn.Conv2d(64, 128, kernel_size=5, stride=(2,2), padding=(2,2)) #padding used to (2,2)
self.conv2 = nn.Conv2d(128, 256, kernel_size=5, stride=(1,2), padding=(2,2)) #padding used to be (2,2)
self.batch1 = nn.BatchNorm2d(32)
self.max_pool = nn.MaxPool2d(2, stride=(1,2))
self.batch2 = nn.BatchNorm2d(128)
self.batch3 = nn.BatchNorm2d(256)
self.dropout = nn.Dropout(0.2)
self.fc1 = nn.Linear(256, 512)
self.fc2 = nn.Linear(512, 128)
self.fc3 = nn.Linear(128, 1)
else:
self.conv1 = nn.Conv2d(1, 32, kernel_size=5, stride=(2,2), padding=(2,2)) #padding used to (2,2)
self.conv2 = nn.Conv2d(32, 64, kernel_size=5, stride=(1,2), padding=(2,2)) #padding used to be (2,2)
self.batch1 = nn.BatchNorm2d(32)
self.max_pool = nn.MaxPool2d(2, stride=(1,2))
self.batch2 = nn.BatchNorm2d(64)
self.dropout = nn.Dropout(0.2)
self.fc1 = nn.Linear(64, 256)
self.fc2 = nn.Linear(256, 128)
self.fc3 = nn.Linear(128, 1)
self.softmax = nn.LogSoftmax(dim=1)
self.linear_conv= nn.Linear(self.num_heads,1)
self.fixed_length_embedding = fixed_length_embedding
self.project_query = nn.Linear(512,32)
self.linear_baseline = nn.Linear(384,512)
self.linear_shortcut = nn.Linear(128, self.num_heads)
self.loc_acc = loc_acc
def forward(self, x, x_lengths, p_lengths, grapheme, phoneme):
batch_size = x.data.size(0)
if len(x.data.size())==1:
import pdb; pdb.set_trace()
T = x.data.size(1)
if self.g2p:
emb, all_emb = self.enc_dec.encoder(grapheme)
odec, _, __ = self.enc_dec.decoder(
x_seq=phoneme,
emb=emb,
projectEmb=True,
)
else:
emb, all_emb = self.enc_dec.encoder(phoneme)
odec, _, __ = self.enc_dec.decoder(
x_seq=grapheme,
emb=emb,
projectEmb=True,
)
if self.fixed_length_embedding==True:
word_embedding = self.wBN(emb)
word_embedding = word_embedding.unsqueeze(1).expand(batch_size, x.data.size(1), self.d_word_emb) # nb videos x number of frames x 128
else:
word_embedding = self.wBN(all_emb.transpose(-2,-1)).transpose(-2,-1)
if self.loc_acc:
x = self.inBN(x.reshape(-1, self.inputDimV)).view(batch_size, -1, self.inputDimV)
else:
x = self.inBN(x.view(-1, self.inputDimV)).view(batch_size, -1, self.inputDimV)
if self.training:
mask = torch.FloatTensor(int(x.size(0)/2), 1, self.inputDimV)
mask = mask.fill_(1).bernoulli_(1 - self.indpV).div(1 - self.indpV)
mask = mask.expand(int(x.size(0)/2), x.size(1), x.size(2))
mask = torch.cat((mask, mask), 0)
mask = Variable(mask.cuda())
x = x.mul(mask)
pack = torch.nn.utils.rnn.pack_padded_sequence(x, x_lengths, batch_first=True)
o, _ = self.rnn1(pack)
o, _ = torch.nn.utils.rnn.pad_packed_sequence(o, batch_first=True)
o = self.linrnn(o.contiguous().view(-1, 2 * self.hiddenDimV))
o = o.view(batch_size, -1, self.hiddenDimV)
o = self.rnnBN(o.view(-1, self.hiddenDimV))
o = o.view(batch_size, -1, self.hiddenDimV)
if self.rnn2_present ==False:
key = self.linear_attn_keys(o)
value = []
query = self.linear_attn_queries(word_embedding)
batch_size, nb_videos, dimensions = key.size()
attention_mask = attention(query, key, value, batch_size, nb_videos,self.num_heads, self.d_k)
plotted_mask=attention_mask
if self.rnn2_present== True:
plotted_mask= None
o = torch.cat((o, word_embedding), 2)
attention_mask = self.linear_baseline(o)
pack = torch.nn.utils.rnn.pack_padded_sequence(attention_mask, x_lengths, batch_first=True)
o, _ = self.rnn2(pack)
o, _ = torch.nn.utils.rnn.pad_packed_sequence(o, batch_first=True)
o = o.contiguous().view(batch_size * T, -1)
o = self.outProj_L0(o) #
o = self.DNNnonlinear_L0(o)
o_rnn = o.view(batch_size, T, -1)
o = torch.sum(o_rnn, 1).div(40)
o = self.outDO_L1(o)
o = self.outProj_L1(o)
o = self.DNNnonlinear_L1(o)
o = self.outProj_L2(o).view(batch_size, 1)
logits = None
indices = 0
else:
o_rnn = None
if self.shortcut:
o = self.conv3(attention_mask.transpose(-3,-1))
o = self.batch1(o)
o = F.relu(o)
shortcut = self.project_query(query).transpose(-2,-1).unsqueeze(-2).expand(-1,-1,nb_videos,-1)
o = torch.cat((o, shortcut), 1)
o = self.conv1(o.transpose(-2,-1))
o = self.batch2(o)
o = F.relu(o)
o = self.max_pool(o)
o = self.conv2(o)
o = self.batch3(o)
o = F.relu(o)
o = self.dropout(o)
o = o.mean(-2)
bs, _, v_dim = o.shape
o_prelogits = o.permute([0,2,1]).reshape([-1, o.shape[1]])
o = F.relu(self.fc1(o_prelogits))
o = self.dropout(F.relu(self.fc2(o)))
logits = self.fc3(o)
logits = logits.reshape([bs, -1, logits.shape[-1]])
o, indices = logits.max(1)
else:
o = self.conv1(attention_mask.transpose(-1,-3).transpose(-1,-2))
o = self.batch1(o)
o = F.relu(o)
o = self.max_pool(o)
o = self.conv2(o)
o = self.batch2(o)
o = F.relu(o)
o = self.dropout(o)
o = o.mean(-2)
bs, _, v_dim = o.shape
o_prelogits = o.permute([0,2,1]).reshape([-1, o.shape[1]])
o = F.relu(self.fc1(o_prelogits))
o = self.dropout(F.relu(self.fc2(o)))
logits = self.fc3(o)
logits = logits.reshape([bs, -1, logits.shape[-1]])
o, indices = logits.max(1)
return odec, x_lengths, o, o_rnn, plotted_mask, logits, indices
def attention(query, key,value, batch_size, nb_v_frames, num_heads, d_k):
if len(value)==0:
key = key.view(batch_size,-1, num_heads, d_k)
query= query.view(batch_size,-1, num_heads, d_k)
key = key.transpose(1,2).unsqueeze(3)
query = query.transpose(1,2).unsqueeze(2)
attention_mask = key[:,:,:,None,:]*query[:, :, None, :,:]
attention_mask = attention_mask/math.sqrt(d_k)
attention_mask = attention_mask.sum(-1).squeeze(-2)
attention_mask = attention_mask.transpose(-3,-1)
else:
key = key.view(batch_size,-1,num_heads,d_k)
value = value.view(batch_size,-1, num_heads, d_k)
query= query.view(batch_size,-1,num_heads, d_k)
key = key.transpose(1,2).unsqueeze(3)
query = query.transpose(1,2).unsqueeze(2)
value = value.transpose(1,2)
attention_mask = key[:,:,:,None,:]*query[:, :, None, :,:]
attention_mask = attention_mask/math.sqrt(d_k)
attention_mask = attention_mask.squeeze().sum(-1)
attention_mask = F.softmax(attention_mask, dim=-2)
attention_mask = attention_mask.transpose(-1,-2)
b, h, len_p, f = attention_mask.size()
attention_mask = torch.matmul(attention_mask,value).view(batch_size, len_p, num_heads*d_k)
return attention_mask
class Classifier_BE(BaseModel):
def __init__(self, hiddenDNNV, dimRnn3):
super().__init__()
self.DNNnonlinear_L1 = nn.LeakyReLU().float()
self.outProj_L1 = nn.Linear(hiddenDNNV, dimRnn3).float()
self.BEBN = nn.BatchNorm1d(hiddenDNNV)
self.dimRnn3 = dimRnn3
self.rnn3 = nn.LSTM(
input_size=dimRnn3,
hidden_size=dimRnn3,
batch_first=True,
bidirectional=True,
num_layers=1,
dropout=0,
).float()
self.linRnn3 = nn.Linear(2*self.dimRnn3, 1)
self.FBN = nn.BatchNorm1d(2*self.dimRnn3)
def forward(self, o_rnn, odec, Lens):
o = o_rnn
batch_size = o.data.size(0)
T = o.data.size(1)
o = self.outProj_L1(o)
o = self.DNNnonlinear_L1(o)
o = o.contiguous().view(batch_size, T, self.dimRnn3)
pack = torch.nn.utils.rnn.pack_padded_sequence(o, Lens, batch_first=True)
o, _ = self.rnn3(pack)
o, _ = torch.nn.utils.rnn.pad_packed_sequence(o, batch_first=True)
o = o.contiguous().view(-1, 2 * self.dimRnn3)
o = self.FBN(o)
o = self.linRnn3(o)
o_logits = o.view(batch_size, T, 1)
max_logit = o_logits.max(1)
o = max_logit[0].view(batch_size, 1)
return o, odec, max_logit[1], o_logits
class G2P(BaseModel):
def __init__(self, p_size, g_size, d_embed, d_hidden, d_word_emb, embDO, beam_size):
super().__init__()
self.beam_size = beam_size
self.decoder = Decoder(g_size, d_embed, d_hidden, d_word_emb)
self.encoder = Encoder(p_size, d_embed, d_hidden, d_word_emb, embDO)
def forward(self, g_seq, p_seq=None):
emb, all_emb = self.encoder(g_seq, True)
context = None
if p_seq is not None:
return self.decoder(p_seq, emb, True, context)
else:
assert g_seq.size(1) == 1
raise NotImplementedError()
def _generate(self, emb, context):
beam = Beam(self.beam_size, cuda=self.config.cuda)
h = emb[0].expand(beam.size, emb[0].size(1))
c = emb[1].expand(beam.size, emb[1].size(1))
if context is not None:
context = context.expand(beam.size, context.size(1), context.size(2))
for _ in range(self.config.max_len):
x = beam.get_current_state()
o, h, c = self.decoder(x.unsqueeze(0), h, c, context)
if beam.advance(o.data.squeeze(0)):
break
h.data.copy_(h.data.index_select(0, beam.get_current_origin()))
c.data.copy_(c.data.index_select(0, beam.get_current_origin()))
tt = torch.cuda if self.config.cuda else torch
return tt.LongTensor(beam.get_hyp(0))
class Encoder(BaseModule):
def __init__(self, vocab_size, d_embed, d_hidden, d_word_emb, embDO):
super().__init__()
self.embedding = nn.Embedding(vocab_size, d_embed)
self.lstm = nn.LSTM(d_embed, d_hidden, batch_first=True, bidirectional=True)
self.d_hidden = d_hidden
self.JHCE = nn.Linear(d_hidden * 2, d_word_emb)
self.pdo = embDO
self.linear = nn.Linear(1000, 500)
self.linear_2 = nn.Linear(1000,500)
def forward(self, x_seq, cuda=True):
e_seq = self.embedding(x_seq).permute(1,0,2)
tt = torch.cuda if cuda else torch
if self.embedding.training and self.pdo > 0:
pdo = self.pdo
vs = e_seq.size(2)
mask = np.random.binomial(1, 1 - pdo, (vs, )).astype('float') / (1 - pdo)
xnp = x_seq.data.cpu().numpy()
mask = torch.from_numpy(mask[xnp]).float()
mask = mask.cuda() if cuda else mask
mask = Variable(mask).detach()
mask = mask.unsqueeze(2).expand(e_seq.size(0), e_seq.size(1), e_seq.size(2))
e_seq = e_seq.mul(mask)
output, (hidden_state, cell_state) = self.lstm(e_seq)
hidden_state = torch.mean(output,1)
hidden_state = self.linear(hidden_state)
cell_state = torch.mean(cell_state,0)
emb = self.JHCE(torch.cat((hidden_state,cell_state),1))
all_hidden_states = self.linear_2(output)
batch_size, nb_phonemes, dimensions = all_hidden_states.size()
all_cell_states = cell_state.unsqueeze(-1).transpose(-2,-1).repeat(1,nb_phonemes,1)
all_emb = self.JHCE(torch.cat((all_hidden_states, all_cell_states), 2))
return emb, all_emb
class Decoder(nn.Module):
def __init__(self, vocab_size, d_embed, d_hidden, d_word_emb):
super(Decoder, self).__init__()
self.embedding = nn.Embedding(vocab_size, d_embed)
self.lstm = nn.LSTMCell(d_embed, d_hidden)
self.linear = nn.Linear(d_hidden, vocab_size)
self.JHCE2h = nn.Linear(d_word_emb, d_hidden)
self.JHCE2c = nn.Linear(d_word_emb, d_hidden)
def forward(self, x_seq, emb, projectEmb):
hidden_state = []
e_seq = self.embedding(x_seq)
if projectEmb:
h_ = self.JHCE2h(emb)
c_ = self.JHCE2c(emb)
else:
h_ = emb[0]
c_ = emb[1]
for e in e_seq.chunk(e_seq.size(0), 0):
e = e.squeeze(0)
h_, c_ = self.lstm(e, (h_, c_))
hidden_state.append(h_)
hidden_state = torch.stack(hidden_state, 0)
o = self.linear(hidden_state.view(-1, h_.size(1)))
return F.log_softmax(o, dim=1).view(x_seq.size(0), -1, o.size(1)), h_, c_
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--embDO', type=float, default=0,
help="dropout prob on G2P encoder")
parser.add_argument('--birnnV', type=int, default=1, help="use bidirectional lstms")
parser.add_argument('--outdpV', default=0.2,
help="dropout prob. on penultimate linear layer of KWS model")
parser.add_argument('--beam_size', default=3, type=int)
parser.add_argument("--dimRnn3", type=int, default=16,
help="dimension of hidden state of BiLSTM KWS classifier")
parser.add_argument("--g_size", type=int, default=34, help="size of grapheme vocab")
parser.add_argument("--p_size", type=int, default=73, help="size of phoneme vocab")
parser.add_argument("--d_word_emb", type=int, default=128,
help="dimension of `uprojected` hidden state of the G2P model")
parser.add_argument("--hiddenDimV", type=int, default=256)
parser.add_argument("--d_embed", type=int, default=64,
help="dimension of hidden state of G2P embeddings")
parser.add_argument('--d_hidden', default=500, type=int,
help="dimension of hidden state of G2P LSTMs")
parser.add_argument("--inputDimV", type=int, default=512,
help="dimension of the visual features used by KWS Model")
parser.add_argument("--hiddenDNNV", type=int, default=128,
help=("determines the size of the linear layers at the end of the"
"KWS model. There are three such layers, with shape:"
"(x * B, x) -> (x, x / 2) -> (x / 2 , 2), where x is the"
"value of `hiddenDNNV` and `B = 2` if a BiLSTM was used for"
"the encoding, and `B = 1` otherwise"))
args = parser.parse_args()
Nh = 2 if args.birnnV else 1
model = KWSModel(
dimRnn3=args.dimRnn3,
hiddenDNNV=args.hiddenDNNV,
inputDimV=args.inputDimV,
hiddenDimV=args.hiddenDimV,
d_word_emb=args.d_word_emb,
beam_size=args.beam_size,
birnnV=args.birnnV,
outdpV=args.outdpV,
p_size=args.p_size,
g_size=args.g_size,
d_embed=args.d_embed,
d_hidden=args.d_hidden,
embDO=args.embDO,
Nh=Nh,
)
outs = model(
x=input_var,
x_lengths=x_lengths,
grapheme=graphemeTensor.detach(),
phoneme=phonemeTensor[:-1].detach(),
istrain=False,
BE_loc=False,
adaptInit=False,
adaptEnc=False,
)
| [
"torch.nn.Dropout",
"argparse.ArgumentParser",
"torch.nn.Embedding",
"torch.nn.LSTMCell",
"torch.cat",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.nn.Conv1d",
"torch.nn.functional.log_softmax",
"model.model_helpers.Beam",
"torch.nn.Linear",
"torc... | [((18884, 18909), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (18907, 18909), False, 'import argparse\n'), ((2216, 2232), 'torch.sigmoid', 'torch.sigmoid', (['o'], {}), '(o)\n', (2229, 2232), False, 'import torch\n'), ((3023, 3151), 'torch.nn.LSTM', 'nn.LSTM', ([], {'input_size': '(hiddenDimV * 2)', 'hidden_size': 'hiddenDimV', 'bidirectional': 'birnnV', 'batch_first': '(True)', 'num_layers': '(1)', 'dropout': '(0)'}), '(input_size=hiddenDimV * 2, hidden_size=hiddenDimV, bidirectional=\n birnnV, batch_first=True, num_layers=1, dropout=0)\n', (3030, 3151), True, 'import torch.nn as nn\n'), ((3358, 3383), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['inputDimV'], {}), '(inputDimV)\n', (3372, 3383), True, 'import torch.nn as nn\n'), ((3410, 3522), 'torch.nn.Conv1d', 'nn.Conv1d', ([], {'in_channels': '(128)', 'out_channels': '(1)', 'kernel_size': '(1)', 'dilation': '(1)', 'padding': '(0)', 'stride': '(1)', 'groups': '(1)', 'bias': '(1)'}), '(in_channels=128, out_channels=1, kernel_size=1, dilation=1,\n padding=0, stride=1, groups=1, bias=1)\n', (3419, 3522), True, 'import torch.nn as nn\n'), ((3679, 3705), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['hiddenDimV'], {}), '(hiddenDimV)\n', (3693, 3705), True, 'import torch.nn as nn\n'), ((3725, 3751), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['d_word_emb'], {}), '(d_word_emb)\n', (3739, 3751), True, 'import torch.nn as nn\n'), ((3778, 3841), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': '(Nh * hiddenDimV)', 'out_features': 'hiddenDNNV'}), '(in_features=Nh * hiddenDimV, out_features=hiddenDNNV)\n', (3787, 3841), True, 'import torch.nn as nn\n'), ((3866, 3886), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'outdpV'}), '(p=outdpV)\n', (3876, 3886), True, 'import torch.nn as nn\n'), ((4075, 4138), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'hiddenDNNV', 'out_features': '(hiddenDNNV // 2)'}), '(in_features=hiddenDNNV, out_features=hiddenDNNV // 2)\n', (4084, 4138), True, 'import torch.nn as nn\n'), ((4163, 4183), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'outdpV'}), '(p=outdpV)\n', (4173, 4183), True, 'import torch.nn as nn\n'), ((4210, 4264), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': '(hiddenDNNV // 2)', 'out_features': '(1)'}), '(in_features=hiddenDNNV // 2, out_features=1)\n', (4219, 4264), True, 'import torch.nn as nn\n'), ((4943, 4962), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(512)'], {}), '(256, 512)\n', (4952, 4962), True, 'import torch.nn as nn\n'), ((4996, 5015), 'torch.nn.Linear', 'nn.Linear', (['(384)', '(512)'], {}), '(384, 512)\n', (5005, 5015), True, 'import torch.nn as nn\n'), ((5050, 5069), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(512)'], {}), '(128, 512)\n', (5059, 5069), True, 'import torch.nn as nn\n'), ((5178, 5208), 'torch.nn.Linear', 'nn.Linear', (['self.num_heads', '(512)'], {}), '(self.num_heads, 512)\n', (5187, 5208), True, 'import torch.nn as nn\n'), ((5233, 5252), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(512)'], {}), '(512, 512)\n', (5242, 5252), True, 'import torch.nn as nn\n'), ((6555, 6575), 'torch.nn.LogSoftmax', 'nn.LogSoftmax', ([], {'dim': '(1)'}), '(dim=1)\n', (6568, 6575), True, 'import torch.nn as nn\n'), ((6602, 6630), 'torch.nn.Linear', 'nn.Linear', (['self.num_heads', '(1)'], {}), '(self.num_heads, 1)\n', (6611, 6630), True, 'import torch.nn as nn\n'), ((6720, 6738), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(32)'], {}), '(512, 32)\n', (6729, 6738), True, 'import torch.nn as nn\n'), ((6769, 6788), 'torch.nn.Linear', 'nn.Linear', (['(384)', '(512)'], {}), '(384, 512)\n', (6778, 6788), True, 'import torch.nn as nn\n'), ((6827, 6857), 'torch.nn.Linear', 'nn.Linear', (['(128)', 'self.num_heads'], {}), '(128, self.num_heads)\n', (6836, 6857), True, 'import torch.nn as nn\n'), ((8509, 8580), 'torch.nn.utils.rnn.pack_padded_sequence', 'torch.nn.utils.rnn.pack_padded_sequence', (['x', 'x_lengths'], {'batch_first': '(True)'}), '(x, x_lengths, batch_first=True)\n', (8548, 8580), False, 'import torch\n'), ((8627, 8686), 'torch.nn.utils.rnn.pad_packed_sequence', 'torch.nn.utils.rnn.pad_packed_sequence', (['o'], {'batch_first': '(True)'}), '(o, batch_first=True)\n', (8665, 8686), False, 'import torch\n'), ((13037, 13070), 'torch.nn.functional.softmax', 'F.softmax', (['attention_mask'], {'dim': '(-2)'}), '(attention_mask, dim=-2)\n', (13046, 13070), True, 'import torch.nn.functional as F\n'), ((13555, 13581), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['hiddenDNNV'], {}), '(hiddenDNNV)\n', (13569, 13581), True, 'import torch.nn as nn\n'), ((13860, 13890), 'torch.nn.Linear', 'nn.Linear', (['(2 * self.dimRnn3)', '(1)'], {}), '(2 * self.dimRnn3, 1)\n', (13869, 13890), True, 'import torch.nn as nn\n'), ((13908, 13940), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(2 * self.dimRnn3)'], {}), '(2 * self.dimRnn3)\n', (13922, 13940), True, 'import torch.nn as nn\n'), ((14206, 14272), 'torch.nn.utils.rnn.pack_padded_sequence', 'torch.nn.utils.rnn.pack_padded_sequence', (['o', 'Lens'], {'batch_first': '(True)'}), '(o, Lens, batch_first=True)\n', (14245, 14272), False, 'import torch\n'), ((14319, 14378), 'torch.nn.utils.rnn.pad_packed_sequence', 'torch.nn.utils.rnn.pad_packed_sequence', (['o'], {'batch_first': '(True)'}), '(o, batch_first=True)\n', (14357, 14378), False, 'import torch\n'), ((15341, 15384), 'model.model_helpers.Beam', 'Beam', (['self.beam_size'], {'cuda': 'self.config.cuda'}), '(self.beam_size, cuda=self.config.cuda)\n', (15345, 15384), False, 'from model.model_helpers import Beam\n'), ((16237, 16270), 'torch.nn.Embedding', 'nn.Embedding', (['vocab_size', 'd_embed'], {}), '(vocab_size, d_embed)\n', (16249, 16270), True, 'import torch.nn as nn\n'), ((16291, 16355), 'torch.nn.LSTM', 'nn.LSTM', (['d_embed', 'd_hidden'], {'batch_first': '(True)', 'bidirectional': '(True)'}), '(d_embed, d_hidden, batch_first=True, bidirectional=True)\n', (16298, 16355), True, 'import torch.nn as nn\n'), ((16409, 16444), 'torch.nn.Linear', 'nn.Linear', (['(d_hidden * 2)', 'd_word_emb'], {}), '(d_hidden * 2, d_word_emb)\n', (16418, 16444), True, 'import torch.nn as nn\n'), ((16493, 16513), 'torch.nn.Linear', 'nn.Linear', (['(1000)', '(500)'], {}), '(1000, 500)\n', (16502, 16513), True, 'import torch.nn as nn\n'), ((16538, 16558), 'torch.nn.Linear', 'nn.Linear', (['(1000)', '(500)'], {}), '(1000, 500)\n', (16547, 16558), True, 'import torch.nn as nn\n'), ((17303, 17324), 'torch.mean', 'torch.mean', (['output', '(1)'], {}), '(output, 1)\n', (17313, 17324), False, 'import torch\n'), ((17394, 17419), 'torch.mean', 'torch.mean', (['cell_state', '(0)'], {}), '(cell_state, 0)\n', (17404, 17419), False, 'import torch\n'), ((17974, 18007), 'torch.nn.Embedding', 'nn.Embedding', (['vocab_size', 'd_embed'], {}), '(vocab_size, d_embed)\n', (17986, 18007), True, 'import torch.nn as nn\n'), ((18028, 18058), 'torch.nn.LSTMCell', 'nn.LSTMCell', (['d_embed', 'd_hidden'], {}), '(d_embed, d_hidden)\n', (18039, 18058), True, 'import torch.nn as nn\n'), ((18081, 18112), 'torch.nn.Linear', 'nn.Linear', (['d_hidden', 'vocab_size'], {}), '(d_hidden, vocab_size)\n', (18090, 18112), True, 'import torch.nn as nn\n'), ((18135, 18166), 'torch.nn.Linear', 'nn.Linear', (['d_word_emb', 'd_hidden'], {}), '(d_word_emb, d_hidden)\n', (18144, 18166), True, 'import torch.nn as nn\n'), ((18189, 18220), 'torch.nn.Linear', 'nn.Linear', (['d_word_emb', 'd_hidden'], {}), '(d_word_emb, d_hidden)\n', (18198, 18220), True, 'import torch.nn as nn\n'), ((18672, 18700), 'torch.stack', 'torch.stack', (['hidden_state', '(0)'], {}), '(hidden_state, 0)\n', (18683, 18700), False, 'import torch\n'), ((5342, 5404), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(32)'], {'kernel_size': '(5)', 'stride': '(1, 1)', 'padding': '(2, 2)'}), '(1, 32, kernel_size=5, stride=(1, 1), padding=(2, 2))\n', (5351, 5404), True, 'import torch.nn as nn\n'), ((5448, 5512), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(128)'], {'kernel_size': '(5)', 'stride': '(2, 2)', 'padding': '(2, 2)'}), '(64, 128, kernel_size=5, stride=(2, 2), padding=(2, 2))\n', (5457, 5512), True, 'import torch.nn as nn\n'), ((5557, 5622), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(256)'], {'kernel_size': '(5)', 'stride': '(1, 2)', 'padding': '(2, 2)'}), '(128, 256, kernel_size=5, stride=(1, 2), padding=(2, 2))\n', (5566, 5622), True, 'import torch.nn as nn\n'), ((5671, 5689), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(32)'], {}), '(32)\n', (5685, 5689), True, 'import torch.nn as nn\n'), ((5716, 5746), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {'stride': '(1, 2)'}), '(2, stride=(1, 2))\n', (5728, 5746), True, 'import torch.nn as nn\n'), ((5770, 5789), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (5784, 5789), True, 'import torch.nn as nn\n'), ((5814, 5833), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (5828, 5833), True, 'import torch.nn as nn\n'), ((5859, 5874), 'torch.nn.Dropout', 'nn.Dropout', (['(0.2)'], {}), '(0.2)\n', (5869, 5874), True, 'import torch.nn as nn\n'), ((5896, 5915), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(512)'], {}), '(256, 512)\n', (5905, 5915), True, 'import torch.nn as nn\n'), ((5937, 5956), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(128)'], {}), '(512, 128)\n', (5946, 5956), True, 'import torch.nn as nn\n'), ((5978, 5995), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(1)'], {}), '(128, 1)\n', (5987, 5995), True, 'import torch.nn as nn\n'), ((6033, 6095), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(32)'], {'kernel_size': '(5)', 'stride': '(2, 2)', 'padding': '(2, 2)'}), '(1, 32, kernel_size=5, stride=(2, 2), padding=(2, 2))\n', (6042, 6095), True, 'import torch.nn as nn\n'), ((6140, 6203), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', '(64)'], {'kernel_size': '(5)', 'stride': '(1, 2)', 'padding': '(2, 2)'}), '(32, 64, kernel_size=5, stride=(1, 2), padding=(2, 2))\n', (6149, 6203), True, 'import torch.nn as nn\n'), ((6252, 6270), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(32)'], {}), '(32)\n', (6266, 6270), True, 'import torch.nn as nn\n'), ((6297, 6327), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {'stride': '(1, 2)'}), '(2, stride=(1, 2))\n', (6309, 6327), True, 'import torch.nn as nn\n'), ((6351, 6369), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (6365, 6369), True, 'import torch.nn as nn\n'), ((6395, 6410), 'torch.nn.Dropout', 'nn.Dropout', (['(0.2)'], {}), '(0.2)\n', (6405, 6410), True, 'import torch.nn as nn\n'), ((6432, 6450), 'torch.nn.Linear', 'nn.Linear', (['(64)', '(256)'], {}), '(64, 256)\n', (6441, 6450), True, 'import torch.nn as nn\n'), ((6472, 6491), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(128)'], {}), '(256, 128)\n', (6481, 6491), True, 'import torch.nn as nn\n'), ((6513, 6530), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(1)'], {}), '(128, 1)\n', (6522, 6530), True, 'import torch.nn as nn\n'), ((7062, 7077), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (7075, 7077), False, 'import pdb\n'), ((8396, 8422), 'torch.cat', 'torch.cat', (['(mask, mask)', '(0)'], {}), '((mask, mask), 0)\n', (8405, 8422), False, 'import torch\n'), ((9382, 9415), 'torch.cat', 'torch.cat', (['(o, word_embedding)', '(2)'], {}), '((o, word_embedding), 2)\n', (9391, 9415), False, 'import torch\n'), ((9489, 9577), 'torch.nn.utils.rnn.pack_padded_sequence', 'torch.nn.utils.rnn.pack_padded_sequence', (['attention_mask', 'x_lengths'], {'batch_first': '(True)'}), '(attention_mask, x_lengths,\n batch_first=True)\n', (9528, 9577), False, 'import torch\n'), ((9628, 9687), 'torch.nn.utils.rnn.pad_packed_sequence', 'torch.nn.utils.rnn.pad_packed_sequence', (['o'], {'batch_first': '(True)'}), '(o, batch_first=True)\n', (9666, 9687), False, 'import torch\n'), ((12382, 12396), 'math.sqrt', 'math.sqrt', (['d_k'], {}), '(d_k)\n', (12391, 12396), False, 'import math\n'), ((12938, 12952), 'math.sqrt', 'math.sqrt', (['d_k'], {}), '(d_k)\n', (12947, 12952), False, 'import math\n'), ((17443, 17483), 'torch.cat', 'torch.cat', (['(hidden_state, cell_state)', '(1)'], {}), '((hidden_state, cell_state), 1)\n', (17452, 17483), False, 'import torch\n'), ((17726, 17776), 'torch.cat', 'torch.cat', (['(all_hidden_states, all_cell_states)', '(2)'], {}), '((all_hidden_states, all_cell_states), 2)\n', (17735, 17776), False, 'import torch\n'), ((2692, 2814), 'torch.nn.LSTM', 'nn.LSTM', ([], {'input_size': 'inputDimV', 'hidden_size': 'hiddenDimV', 'batch_first': '(True)', 'bidirectional': 'birnnV', 'num_layers': '(1)', 'dropout': '(0)'}), '(input_size=inputDimV, hidden_size=hiddenDimV, batch_first=True,\n bidirectional=birnnV, num_layers=1, dropout=0)\n', (2699, 2814), True, 'import torch.nn as nn\n'), ((2924, 2961), 'torch.nn.Linear', 'nn.Linear', (['(2 * hiddenDimV)', 'hiddenDimV'], {}), '(2 * hiddenDimV, hiddenDimV)\n', (2933, 2961), True, 'import torch.nn as nn\n'), ((3918, 3932), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (3930, 3932), True, 'import torch.nn as nn\n'), ((3972, 3986), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (3984, 3986), True, 'import torch.nn as nn\n'), ((4026, 4040), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (4038, 4040), True, 'import torch.nn as nn\n'), ((10317, 10326), 'torch.nn.functional.relu', 'F.relu', (['o'], {}), '(o)\n', (10323, 10326), True, 'import torch.nn.functional as F\n'), ((10457, 10484), 'torch.cat', 'torch.cat', (['(o, shortcut)', '(1)'], {}), '((o, shortcut), 1)\n', (10466, 10484), False, 'import torch\n'), ((10595, 10604), 'torch.nn.functional.relu', 'F.relu', (['o'], {}), '(o)\n', (10601, 10604), True, 'import torch.nn.functional as F\n'), ((10725, 10734), 'torch.nn.functional.relu', 'F.relu', (['o'], {}), '(o)\n', (10731, 10734), True, 'import torch.nn.functional as F\n'), ((11341, 11350), 'torch.nn.functional.relu', 'F.relu', (['o'], {}), '(o)\n', (11347, 11350), True, 'import torch.nn.functional as F\n'), ((11471, 11480), 'torch.nn.functional.relu', 'F.relu', (['o'], {}), '(o)\n', (11477, 11480), True, 'import torch.nn.functional as F\n'), ((13201, 13236), 'torch.matmul', 'torch.matmul', (['attention_mask', 'value'], {}), '(attention_mask, value)\n', (13213, 13236), False, 'import torch\n'), ((13447, 13461), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (13459, 13461), True, 'import torch.nn as nn\n'), ((13496, 13526), 'torch.nn.Linear', 'nn.Linear', (['hiddenDNNV', 'dimRnn3'], {}), '(hiddenDNNV, dimRnn3)\n', (13505, 13526), True, 'import torch.nn as nn\n'), ((13634, 13749), 'torch.nn.LSTM', 'nn.LSTM', ([], {'input_size': 'dimRnn3', 'hidden_size': 'dimRnn3', 'batch_first': '(True)', 'bidirectional': '(True)', 'num_layers': '(1)', 'dropout': '(0)'}), '(input_size=dimRnn3, hidden_size=dimRnn3, batch_first=True,\n bidirectional=True, num_layers=1, dropout=0)\n', (13641, 13749), True, 'import torch.nn as nn\n'), ((9884, 9903), 'torch.sum', 'torch.sum', (['o_rnn', '(1)'], {}), '(o_rnn, 1)\n', (9893, 9903), False, 'import torch\n'), ((16965, 16992), 'torch.from_numpy', 'torch.from_numpy', (['mask[xnp]'], {}), '(mask[xnp])\n', (16981, 16992), False, 'import torch\n'), ((17069, 17083), 'torch.autograd.Variable', 'Variable', (['mask'], {}), '(mask)\n', (17077, 17083), False, 'from torch.autograd import Variable\n'), ((18775, 18798), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['o'], {'dim': '(1)'}), '(o, dim=1)\n', (18788, 18798), True, 'import torch.nn.functional as F\n'), ((16836, 16873), 'numpy.random.binomial', 'np.random.binomial', (['(1)', '(1 - pdo)', '(vs,)'], {}), '(1, 1 - pdo, (vs,))\n', (16854, 16873), True, 'import numpy as np\n')] |
"""
Find the row in the stacked_tgas file given the KIC id.
"""
import numpy as np
import pandas as pd
from astropy.table import Table
def row_ind(kepid, tgas=None):
"""
Takes a KIC id (kepid) and returns the indices of the corresponding rows
in stacked tgas.
returns:
matched_star_ind: (int)
The row index of the star with the given kepid in stacked tgas.fits
other_star_ind: (int)
The row index of the other star in the pair in stacked tgas.fits
"""
# load the kplr_tgas file.
kt = pd.read_csv("kic_tgas.csv")
# Find the source id.
m = kt.kepid.values == kepid
source_id = int(kt.tgas_source_id.values[m])
# Find the row in stackekd_tgas.
if tgas is None:
table = Table.read('stacked_tgas.fits')
else:
table = tgas
# stacked_tgas_df = table.to_pandas()
# k = stacked_tgas_df.source_id.values == source_id
# r = np.arange(len(k))[k]
return np.where(table['source_id'] == source_id)
t2 = Table.read('pairindices_cp1.fits')
pair = t2.to_pandas()
s1 = pair.star1.values == r
s2 = pair.star2.values == r
if len(pair.star1.values[s1]):
matched_star_ind = int(pair.star1.values[s1])
other_star_ind = int(pair.star2.values[s1])
elif len(pair.star2.values[s2]):
matched_star_ind = int(pair.star2.values[s2])
other_star_ind = int(pair.star1.values[s2])
return matched_star_ind, other_star_ind
if __name__ == "__main__":
print(row_ind(10454113))
| [
"pandas.read_csv",
"numpy.where",
"astropy.table.Table.read"
] | [((540, 567), 'pandas.read_csv', 'pd.read_csv', (['"""kic_tgas.csv"""'], {}), "('kic_tgas.csv')\n", (551, 567), True, 'import pandas as pd\n'), ((955, 996), 'numpy.where', 'np.where', (["(table['source_id'] == source_id)"], {}), "(table['source_id'] == source_id)\n", (963, 996), True, 'import numpy as np\n'), ((1007, 1041), 'astropy.table.Table.read', 'Table.read', (['"""pairindices_cp1.fits"""'], {}), "('pairindices_cp1.fits')\n", (1017, 1041), False, 'from astropy.table import Table\n'), ((752, 783), 'astropy.table.Table.read', 'Table.read', (['"""stacked_tgas.fits"""'], {}), "('stacked_tgas.fits')\n", (762, 783), False, 'from astropy.table import Table\n')] |
"""
top level module for pytorch
<NAME> <EMAIL>
September 16, 2019
"""
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
torch.set_printoptions(precision=10)
#data
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
import torch.optim as optim
import operators as op
import utilities
import shift
import transform
from aperture import Pupil
from propagation import SingleSlicePropagation, Defocus, MultislicePropagation
from regularizers import Regularizer
import scipy.io as sio
import numpy as np
bin_obj = utilities.BinObject.apply
complex_exp = op.ComplexExp.apply
complex_mul = op.ComplexMul.apply
complex_abs = op.ComplexAbs.apply
field_defocus = Defocus.apply
class TorchTomographySolver:
def __init__(self, **kwargs):
"""
Creating tomography solver object.
Required Args:
shape: shape of the object in [y, x, z]
voxel_size: size of voxel in [y, x, z]
wavelength: wavelength of probing wave, scalar
sigma: sigma used in calculating transmittance function (exp(1i * sigma * object)), scalar
tilt_angles: an array of sample rotation angles
defocus_list: an array of defocus values
Optional Args [default]
amplitude_measurements: measurements for reconstruction, not needed for forward evaluation of the model only [None]
numerical_aperture: numerical aperture of the system, scalar [1.0]
binning_factor: bins the number of slices together to save computation, scalar [1]
pad_size: padding reconstruction from measurements in [dy,dx], final size will be measurement.shape + 2*[dy, dx], [0, 0]
shuffle: random shuffle of measurements, boolean [True]
pupil: inital value for the pupil function [None]
maxitr: maximum number of iterations [100]
step_size: step_size for each gradient update [0.1]
momentum: [0.0 NOTIMPLEMENTED]
-- transform alignment parameters (currently only support rigid body transform alignment) --
transform_align: whether to turn on transform alignment, boolean, [False]
ta_method: "optical_flow"
ta_start_iteration: alignment process will not start until then, int, [0]
-- Shift alignment parameters --
shift_align: whether to turn on alignment, boolean, [False]
sa_method: shift alignment method, can be "gradient", "hybrid_correlation", "cross_correlation", or "phase_correlation", string, ["gradient"]
sa_step_size: step_size of shift parameters, float, [0.1]
sa_start_iteration: alignment process will not start until then, int, [0]
-- Defocus refinement parameters --
defocus_refine: whether to turn on defocus refinement for each measurement, boolean, [False]
dr_method: defocus refinement method, can be "gradient", string, ["gradient"]
dr_step_size: step_size of defocus refinement parameters, float, [0.1]
dr_start_iteration: refinement process will not start until then, int, [0]
-- regularizer parameters --
regularizer_total_variation: boolean [False]
regularizer_total_variation_gpu: boolean [False]
regularizer_total_variation_parameter: controls amount of total variation, scalar or vector of length maxitr. [scalar 1.0]
regularizer_total_variation_maxitr: number of iterations for total variation, integer [15]
regularizer_total_variation_order: differential order, scalar [1], higher order not yet implemented
regularizer_pure_real: boolean [False]
regularizer_pure_imag: boolean [False]
regularizer_pure_amplitude: boolean [False]
regularizer_pure_phase: boolean [False]
regularizer_positivity_real: boolean [False]
regularizer_positivity_imag: boolean [False]
regularizer_negativity_real: boolean [False]
regularizer_negativity_imag: boolean [False]
regularizer_dtype: torch dtype class [torch.float32]
"""
self.shape = kwargs.get("shape")
self.shuffle = kwargs.get("shuffle", True)
self.optim_max_itr = kwargs.get("maxitr", 100)
self.optim_step_size = kwargs.get("step_size", 0.1)
self.optim_momentum = kwargs.get("momentum", 0.0)
self.obj_update_iterations = kwargs.get("obj_update_iterations", np.arange(self.optim_max_itr))
#parameters for transform alignment
self.transform_align = kwargs.get("transform_align", False)
self.ta_method = kwargs.get("ta_method", "optical_flow")
self.ta_start_iteration = kwargs.get("ta_start_iteration", 0)
#parameters for shift alignment
self.shift_align = kwargs.get("shift_align", False)
self.sa_method = kwargs.get("sa_method", "gradient")
self.sa_step_size = kwargs.get("sa_step_size", 0.1)
self.sa_start_iteration = kwargs.get("sa_start_iteration", 0)
#parameters for defocus refinement
self.defocus_refine = kwargs.get("defocus_refine", False)
self.dr_method = kwargs.get("dr_method", "gradient")
self.dr_step_size = kwargs.get("dr_step_size", 0.1)
self.dr_start_iteration = kwargs.get("dr_start_iteration", 0)
if not shift.is_valid_method(self.sa_method):
raise ValueError('Shift alignment method not valid.')
if self.shift_align and shift.is_correlation_method(self.sa_method):
self.shift_obj = shift.ImageShiftCorrelationBased(kwargs["amplitude_measurements"].shape[0:2], \
upsample_factor = 10, method = self.sa_method, \
device=torch.device('cpu'))
if self.transform_align:
self.transform_obj = transform.ImageTransformOpticalFlow(kwargs["amplitude_measurements"].shape[0:2],\
method = self.ta_method)
self.dataset = AETDataset(**kwargs)
self.num_defocus = self.dataset.get_all_defocus_lists().shape[0]
self.num_rotation = len(self.dataset.tilt_angles)
self.tomography_obj = PhaseContrastScattering(**kwargs)
self.regularizer_obj = Regularizer(**kwargs)
self.rotation_obj = utilities.ImageRotation(self.shape, axis = 0)
self.cost_function = nn.MSELoss(reduction='sum')
def run(self, obj_init=None, forward_only=False, callback=None):
"""
run tomography solver
Args:
forward_only: True -- only runs forward model on estimated object
False -- runs reconstruction
"""
if forward_only:
assert obj_init is not None
self.shuffle = False
amplitude_list = []
self.dataloader = DataLoader(self.dataset, batch_size = 1, shuffle=self.shuffle)
error = []
#initialize object
self.obj = obj_init
if self.obj is None:
self.obj = op.r2c(torch.zeros(self.shape).cuda())
else:
if not self.obj.is_cuda:
self.obj = self.obj.cuda()
if len(self.obj.shape) == 3:
self.obj = op.r2c(self.obj)
#initialize shift parameters
self.yx_shifts = None
if self.shift_align:
self.sa_pixel_count = []
self.yx_shift_all = []
self.yx_shifts = torch.zeros((2, self.num_defocus, self.num_rotation))
if self.transform_align:
self.xy_transform_all = []
self.xy_transforms = torch.zeros((3, self.num_defocus, self.num_rotation))
# TEMPP
# defocus_list_grad = torch.zeros((self.num_defocus, self.num_rotation), dtype = torch.float32)
#begin iteration
for itr_idx in range(self.optim_max_itr):
sys.stdout.flush()
running_cost = 0.0
#defocus_list_grad[:] = 0.0
if self.shift_align and itr_idx >= self.sa_start_iteration:
running_sa_pixel_count = 0.0
for data_idx, data in enumerate(self.dataloader, 0):
#parse data
if not forward_only:
amplitudes, rotation_angle, defocus_list, rotation_idx = data
amplitudes = torch.squeeze(amplitudes)
if len(amplitudes.shape) < 3:
amplitudes = amplitudes.unsqueeze(-1)
else:
rotation_angle, defocus_list, rotation_idx = data[-3:]
#prepare tilt specific parameters
defocus_list = torch.flatten(defocus_list).cuda()
rotation_angle = rotation_angle.item()
yx_shift = None
if self.shift_align and self.sa_method == "gradient" and itr_idx >= self.sa_start_iteration:
yx_shift = self.yx_shifts[:,:,rotation_idx]
yx_shift = yx_shift.cuda()
yx_shift.requires_grad_()
if self.defocus_refine and self.dr_method == "gradient" and itr_idx >= self.dr_start_iteration:
defocus_list.requires_grad_()
#rotate object
if data_idx == 0:
self.obj = self.rotation_obj.forward(self.obj, rotation_angle)
else:
if abs(rotation_angle - previous_angle) > 90:
self.obj = self.rotation_obj.forward(self.obj, -1 * previous_angle)
self.obj = self.rotation_obj.forward(self.obj, rotation_angle)
else:
self.obj = self.rotation_obj.forward(self.obj, rotation_angle - previous_angle)
if not forward_only:
#define optimizer
optimizer_params = []
if itr_idx in self.obj_update_iterations:
self.obj.requires_grad_()
optimizer_params.append({'params': self.obj, 'lr': self.optim_step_size})
if self.shift_align and self.sa_method == "gradient" and itr_idx >= self.sa_start_iteration:
optimizer_params.append({'params': yx_shift, 'lr': self.sa_step_size})
if self.defocus_refine and self.dr_method == "gradient" and itr_idx >= self.dr_start_iteration:
optimizer_params.append({'params': defocus_list, 'lr': self.dr_step_size})
optimizer = optim.SGD(optimizer_params)
#forward scattering
estimated_amplitudes = self.tomography_obj(self.obj, defocus_list, yx_shift)
#Correlation based shift estimation
if self.shift_align and shift.is_correlation_method(self.sa_method) and itr_idx >= self.sa_start_iteration:
if abs(rotation_angle) - 0.0 > 1e-2:
amplitudes, yx_shift, _ = self.shift_obj.estimate(estimated_amplitudes, amplitudes)
yx_shift = yx_shift.unsqueeze(-1)
self.dataset.update_amplitudes(amplitudes, rotation_idx)
if self.transform_align and itr_idx >= self.ta_start_iteration:
if abs(rotation_angle) - 0.0 > 1e-2:
amplitudes, xy_transform = self.transform_obj.estimate(estimated_amplitudes, amplitudes)
xy_transform = xy_transform.unsqueeze(-1)
self.dataset.update_amplitudes(amplitudes, rotation_idx)
if not forward_only:
#compute cost
cost = self.cost_function(estimated_amplitudes, amplitudes.cuda())
running_cost += cost.item()
#backpropagation
cost.backward()
#update object
# if itr_idx >= self.dr_start_iteration:
# # print(torch.norm(defocus_list.grad.data))
# defocus_list_grad[:,data_idx] = defocus_list.grad.data * self.dr_step_size
optimizer.step()
optimizer.zero_grad()
del cost
else:
#store measurement
amplitude_list.append(estimated_amplitudes.cpu().detach())
del estimated_amplitudes
self.obj.requires_grad = False
#keep track of shift alignment for the tilt
if self.shift_align and itr_idx >= self.sa_start_iteration:
if yx_shift is not None:
yx_shift.requires_grad = False
if abs(rotation_angle) - 0.0 > 1e-2:
self.yx_shifts[:,:,rotation_idx] = yx_shift[:].cpu()
running_sa_pixel_count += torch.sum(torch.abs(yx_shift.cpu().flatten()))
#keep track of transform alignment for the tilt
if self.transform_align and itr_idx >= self.ta_start_iteration:
if abs(rotation_angle) - 0.0 > 1e-2:
self.xy_transforms[...,rotation_idx] = xy_transform[:].cpu()
#keep track of defocus alignment for the tilt
if self.defocus_refine and itr_idx >= self.dr_start_iteration:
defocus_list.requires_grad = False
self.dataset.update_defocus_list(defocus_list[:].cpu().detach(), rotation_idx)
previous_angle = rotation_angle
#rotate object back
if data_idx == (self.dataset.__len__() - 1):
previous_angle = 0.0
self.obj = self.rotation_obj.forward(self.obj, -1.0*rotation_angle)
print("Rotation {:03d}/{:03d}.".format(data_idx+1, self.dataset.__len__()), end="\r")
#apply regularization
amplitudes = None
torch.cuda.empty_cache()
if itr_idx in self.obj_update_iterations:
self.obj = self.regularizer_obj.apply(self.obj)
error.append(running_cost)
#keep track of shift alignment results
if self.shift_align and itr_idx >= self.sa_start_iteration:
self.sa_pixel_count.append(running_sa_pixel_count)
self.yx_shift_all.append(np.array(self.yx_shifts).copy())
#keep track of transform alignment results
if self.transform_align and itr_idx >= self.ta_start_iteration:
self.xy_transform_all.append(np.array(self.xy_transforms).copy())
if callback is not None:
callback(self.obj.cpu().detach(), error)
#TEMPPPPP
# callback(defocus_list_grad, self.dataset.get_all_defocus_lists(), error)
if forward_only and itr_idx == 0:
return torch.cat([torch.unsqueeze(amplitude_list[idx],-1) for idx in range(len(amplitude_list))], axis=-1)
print("Iteration {:03d}/{:03d}. Error: {:03f}".format(itr_idx+1, self.optim_max_itr, np.log10(running_cost)))
self.defocus_list = self.dataset.get_all_defocus_lists()
return self.obj.cpu().detach(), error
class AETDataset(Dataset):
def __init__(self, amplitude_measurements=None, tilt_angles=[0], defocus_list=None, **kwargs):
"""
Args:
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.amplitude_measurements = amplitude_measurements
if self.amplitude_measurements is not None:
self.amplitude_measurements = amplitude_measurements.astype("float32")
if tilt_angles is not None:
self.tilt_angles = tilt_angles * 1.0
if defocus_list is not None:
if not torch.is_tensor(defocus_list):
defocus_list = torch.tensor(defocus_list)
if len(defocus_list.shape) == 1:
self.defocus_list = defocus_list.unsqueeze(1).repeat(1, len(self.tilt_angles)) * 1.0
elif len(defocus_list.shape) == 2:
assert defocus_list.shape[1] == len(tilt_angles)
self.defocus_list = defocus_list * 1.0
else:
raise ValueError('Invalid defocus_list shape.')
def __len__(self):
return self.tilt_angles.shape[0]
def __getitem__(self, idx):
#X x Y x #defocus
if self.amplitude_measurements is not None:
return self.amplitude_measurements[...,idx], self.tilt_angles[idx], self.defocus_list[:,idx], idx
else:
return self.tilt_angles[idx], self.defocus_list[:,idx], idx
def update_defocus_list(self,defocus_list, idx):
self.defocus_list[:,idx] = defocus_list.unsqueeze(-1)
return
def update_amplitudes(self, amplitudes, idx):
self.amplitude_measurements[...,idx] = amplitudes
return
def get_all_defocus_lists(self):
return self.defocus_list
def get_all_measurements(self):
return self.amplitude_measurements
class PhaseContrastScattering(nn.Module):
def __init__(self, shape, voxel_size, wavelength, sigma=None, binning_factor=1, pad_size=[0,0], **kwargs):
"""
Phase contrast scattering model
Starts from a plane wave, 3D object, and a list of defocus distance (in Angstrom).
Computes intensity phase contrast image after electron scatters through the sample using multislice algorithm
Required Args:
shape: shape of the object in [y, x, z]
voxel_size: size of voxel in [y, x, z]
wavelength: wavelength of probing wave, scalar
Optional Args [default]:
sigma: sigma used in calculating transmittance function (exp(1i * sigma * object)), scalar [None]
binning_factor: bins the number of slices together to save computation (loses accuracy), scalar [1]
pad_size: padding reconstruction from measurements in [dy,dx], final size will be measurement.shape + 2*[dy, dx], [0, 0]
"""
super(PhaseContrastScattering, self).__init__()
self.binning_factor = binning_factor
self.shape = shape
self.pad_size = pad_size
self.voxel_size = voxel_size
self.wavelength = wavelength
#forward propagation
self.shape_prop = self.shape.copy()
self.shape_prop[2] //= self.binning_factor
self.voxel_size_prop = self.voxel_size.copy()
self.voxel_size_prop[2] *= self.binning_factor
self._propagation = MultislicePropagation(self.shape_prop, self.voxel_size_prop, self.wavelength, **kwargs)
self.sigma = sigma
if self.sigma is None:
self.sigma = (2 * np.pi / self.wavelength) * self.voxel_size_prop[2]
#filter with aperture
self._pupil = Pupil(self.shape[0:2], self.voxel_size[0], self.wavelength, **kwargs)
#defocus operator
# self._defocus = Defocus()
#shift correction
self._shift = shift.ImageShiftGradientBased(self.shape[0:2], **kwargs)
def forward(self, obj, defocus_list, yx_shift=None):
#bin object
obj = bin_obj(obj, self.binning_factor)
#raise to transmittance
obj = complex_exp(complex_mul(op._j, self.sigma * obj))
#forward propagation & defocus
field = self._propagation(obj)
#pupil
field = self._pupil(field)
#defocus
field = field_defocus(field, self._propagation.propagate.kernel_phase, defocus_list)
# field = self._defocus(field, self._propagation.propagate.kernel_phase, defocus_list)
#shift
field = self._shift(field, yx_shift)
#crop
field = F.pad(field, (0,0,0,0, \
-1 * self.pad_size[1], -1 * self.pad_size[1], \
-1 * self.pad_size[0], -1 * self.pad_size[0]))
#compute amplitude
amplitudes = complex_abs(field)
return amplitudes
| [
"transform.ImageTransformOpticalFlow",
"shift.ImageShiftGradientBased",
"utilities.ImageRotation",
"shift.is_correlation_method",
"numpy.arange",
"sys.stdout.flush",
"torch.device",
"torch.nn.functional.pad",
"torch.flatten",
"torch.nn.MSELoss",
"torch.utils.data.DataLoader",
"torch.squeeze",
... | [((157, 193), 'torch.set_printoptions', 'torch.set_printoptions', ([], {'precision': '(10)'}), '(precision=10)\n', (179, 193), False, 'import torch\n'), ((5959, 5980), 'regularizers.Regularizer', 'Regularizer', ([], {}), '(**kwargs)\n', (5970, 5980), False, 'from regularizers import Regularizer\n'), ((6008, 6051), 'utilities.ImageRotation', 'utilities.ImageRotation', (['self.shape'], {'axis': '(0)'}), '(self.shape, axis=0)\n', (6031, 6051), False, 'import utilities\n'), ((6086, 6113), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {'reduction': '"""sum"""'}), "(reduction='sum')\n", (6096, 6113), True, 'import torch.nn as nn\n'), ((6455, 6515), 'torch.utils.data.DataLoader', 'DataLoader', (['self.dataset'], {'batch_size': '(1)', 'shuffle': 'self.shuffle'}), '(self.dataset, batch_size=1, shuffle=self.shuffle)\n', (6465, 6515), False, 'from torch.utils.data import DataLoader\n'), ((16115, 16207), 'propagation.MultislicePropagation', 'MultislicePropagation', (['self.shape_prop', 'self.voxel_size_prop', 'self.wavelength'], {}), '(self.shape_prop, self.voxel_size_prop, self.\n wavelength, **kwargs)\n', (16136, 16207), False, 'from propagation import SingleSlicePropagation, Defocus, MultislicePropagation\n'), ((16374, 16443), 'aperture.Pupil', 'Pupil', (['self.shape[0:2]', 'self.voxel_size[0]', 'self.wavelength'], {}), '(self.shape[0:2], self.voxel_size[0], self.wavelength, **kwargs)\n', (16379, 16443), False, 'from aperture import Pupil\n'), ((16532, 16588), 'shift.ImageShiftGradientBased', 'shift.ImageShiftGradientBased', (['self.shape[0:2]'], {}), '(self.shape[0:2], **kwargs)\n', (16561, 16588), False, 'import shift\n'), ((17143, 17265), 'torch.nn.functional.pad', 'F.pad', (['field', '(0, 0, 0, 0, -1 * self.pad_size[1], -1 * self.pad_size[1], -1 * self.\n pad_size[0], -1 * self.pad_size[0])'], {}), '(field, (0, 0, 0, 0, -1 * self.pad_size[1], -1 * self.pad_size[1], -1 *\n self.pad_size[0], -1 * self.pad_size[0]))\n', (17148, 17265), True, 'import torch.nn.functional as F\n'), ((4169, 4198), 'numpy.arange', 'np.arange', (['self.optim_max_itr'], {}), '(self.optim_max_itr)\n', (4178, 4198), True, 'import numpy as np\n'), ((5108, 5145), 'shift.is_valid_method', 'shift.is_valid_method', (['self.sa_method'], {}), '(self.sa_method)\n', (5129, 5145), False, 'import shift\n'), ((5230, 5273), 'shift.is_correlation_method', 'shift.is_correlation_method', (['self.sa_method'], {}), '(self.sa_method)\n', (5257, 5273), False, 'import shift\n'), ((5552, 5660), 'transform.ImageTransformOpticalFlow', 'transform.ImageTransformOpticalFlow', (["kwargs['amplitude_measurements'].shape[0:2]"], {'method': 'self.ta_method'}), "(kwargs['amplitude_measurements'].shape[\n 0:2], method=self.ta_method)\n", (5587, 5660), False, 'import transform\n'), ((6940, 6993), 'torch.zeros', 'torch.zeros', (['(2, self.num_defocus, self.num_rotation)'], {}), '((2, self.num_defocus, self.num_rotation))\n', (6951, 6993), False, 'import torch\n'), ((7076, 7129), 'torch.zeros', 'torch.zeros', (['(3, self.num_defocus, self.num_rotation)'], {}), '((3, self.num_defocus, self.num_rotation))\n', (7087, 7129), False, 'import torch\n'), ((7306, 7324), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (7322, 7324), False, 'import sys\n'), ((12041, 12065), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (12063, 12065), False, 'import torch\n'), ((6768, 6784), 'operators.r2c', 'op.r2c', (['self.obj'], {}), '(self.obj)\n', (6774, 6784), True, 'import operators as op\n'), ((13657, 13686), 'torch.is_tensor', 'torch.is_tensor', (['defocus_list'], {}), '(defocus_list)\n', (13672, 13686), False, 'import torch\n'), ((13707, 13733), 'torch.tensor', 'torch.tensor', (['defocus_list'], {}), '(defocus_list)\n', (13719, 13733), False, 'import torch\n'), ((5477, 5496), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (5489, 5496), False, 'import torch\n'), ((7660, 7685), 'torch.squeeze', 'torch.squeeze', (['amplitudes'], {}), '(amplitudes)\n', (7673, 7685), False, 'import torch\n'), ((9371, 9398), 'torch.optim.SGD', 'optim.SGD', (['optimizer_params'], {}), '(optimizer_params)\n', (9380, 9398), True, 'import torch.optim as optim\n'), ((9578, 9621), 'shift.is_correlation_method', 'shift.is_correlation_method', (['self.sa_method'], {}), '(self.sa_method)\n', (9605, 9621), False, 'import shift\n'), ((13008, 13030), 'numpy.log10', 'np.log10', (['running_cost'], {}), '(running_cost)\n', (13016, 13030), True, 'import numpy as np\n'), ((6622, 6645), 'torch.zeros', 'torch.zeros', (['self.shape'], {}), '(self.shape)\n', (6633, 6645), False, 'import torch\n'), ((7893, 7920), 'torch.flatten', 'torch.flatten', (['defocus_list'], {}), '(defocus_list)\n', (7906, 7920), False, 'import torch\n'), ((12831, 12871), 'torch.unsqueeze', 'torch.unsqueeze', (['amplitude_list[idx]', '(-1)'], {}), '(amplitude_list[idx], -1)\n', (12846, 12871), False, 'import torch\n'), ((12385, 12409), 'numpy.array', 'np.array', (['self.yx_shifts'], {}), '(self.yx_shifts)\n', (12393, 12409), True, 'import numpy as np\n'), ((12568, 12596), 'numpy.array', 'np.array', (['self.xy_transforms'], {}), '(self.xy_transforms)\n', (12576, 12596), True, 'import numpy as np\n')] |
import random
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn import tree
from sklearn import svm
from sklearn.model_selection import cross_validate
import numpy as np
import time
import click
import feature_extraction
start_time = time.time()
X = []
Y = []
def store_features(path):
# saving final extracted features for probabilistic future use
file = open(path, 'w')
for i, x in enumerate(X):
for item in x:
file.write('{},'.format(str(item)))
file.write(str(Y[i][0]) + '\n')
file.close()
def load_features(path):
X = []
Y = []
file = open(path, 'r')
lines = file.readlines()
for i, line in enumerate(lines):
X.append([float(x) for x in line.split(',')[0:-1]])
Y.append(int(line.split(',')[-1]))
file.close()
return X, Y
def load_data(malwarepath, benignpath, benignheaderfieldspath, malwareheaderfieldspath, malwaresectionnamespath,
benignsectionnamespath):
file = open(malwareheaderfieldspath, 'r')
malware_header_fields = file.readlines()
file.close()
file = open(malwaresectionnamespath, 'r')
malware_section_names = file.readlines()
file.close()
file = open(benignheaderfieldspath, 'r')
benign_header_fields = file.readlines()
file.close()
file = open(benignsectionnamespath, 'r')
benign_section_names = file.readlines()
file.close()
return malwarepath, benignpath, benign_header_fields, malware_header_fields, benign_section_names, malware_section_names
def log(message):
print(message)
def final_features_extraction(path, header_fields, section_names, label):
for i, row in enumerate(header_fields):
final_features = []
Y.append([label])
row = row.split('\t,')
sample_name = row[-1].strip('\n')
# derived features
entropies = feature_extraction.entropy(sample_name, path)
final_features.append(entropies[0])
final_features.append(entropies[1])
final_features.append(entropies[2])
sectionnames = section_names[i]
sectionnames = sectionnames.split(',')
sectionnames.remove(sectionnames[-1])
section_name_features = feature_extraction.section_name_checker(sectionnames)
final_features.append(section_name_features[0])
final_features.append(section_name_features[1])
final_features.append(feature_extraction.compilation_time(row[21]))
final_features.append(feature_extraction.extract_file_size(sample_name, path))
final_features.append(feature_extraction.extract_file_info(sample_name, path))
final_features.append(feature_extraction.Image_Base_checker(row[34]))
final_features.append(feature_extraction.sectionalignment_checker(int(row[35]), int(row[36])))
final_features.append(feature_extraction.filealignment_checker(int(row[35]), int(row[36])))
final_features.append(feature_extraction.sizeofimage_checker(int(row[44]), int(row[35])))
final_features.append(feature_extraction.size_of_header_checker(sample_name, path))
# Expanded features
zerofill = bin(int(row[25]))[2:].zfill(16)
characteristics = zerofill[0:6] + zerofill[7:]
for c in characteristics:
final_features.append(c)
Dllzerofill = bin(int(row[48]))[2:].zfill(16)
dllcharacteristics = Dllzerofill[5:]
for d in dllcharacteristics:
final_features.append(d)
# raw features
final_features.append(row[0])
final_features.append(row[1])
final_features.append(row[2])
final_features.append(row[3])
final_features.append(row[4])
final_features.append(row[5])
final_features.append(row[19])
final_features.append(row[26])
final_features.append(row[27])
final_features.append(row[28])
final_features.append(row[29])
final_features.append(row[30])
final_features.append(row[31])
final_features.append(row[32])
final_features.append(row[33])
final_features.append(row[34])
final_features.append(row[35])
final_features.append(row[36])
final_features.append(row[37])
final_features.append(row[38])
final_features.append(row[39])
final_features.append(row[40])
final_features.append(row[41])
final_features.append(row[42])
final_features.append(row[43])
final_features.append(row[44])
final_features.append(row[45])
final_features.append(row[46])
X.append(final_features)
return X, Y
def learning(X, Y):
algorithms = {
"RandomForest": RandomForestClassifier(),
"SVM": svm.SVC(),
"Knn": KNeighborsClassifier(n_neighbors=5),
"DecisionTree": tree.DecisionTreeClassifier(),
}
for algo in algorithms:
print('{} results'.format(algo))
start_time = time.time()
clf = algorithms[algo]
scores = cross_validate(clf, X, Y, cv=10, scoring=('accuracy', 'f1', 'recall', 'precision'))
for score_name in ['test_accuracy', 'test_precision', 'test_recall', 'test_f1']:
print('{} : {}'.format(score_name, np.mean(scores[score_name])))
end_time = time.time()
execution_time = end_time - start_time
print('{} execution time {} \n'.format(algo, execution_time))
@click.command()
@click.option("--malwarepath", required=True, help="path of malware samples")
@click.option("--benignpath", required=True, help="path of benign samples")
@click.option("--benignheaderfieldspath", required=True, help="path of stored header fields file for benign samples")
@click.option("--malwareheaderfieldspath", required=True, help="path of stored header fields file for malware samples")
@click.option("--malwaresectionnamespath", required=True, help="path of stored header fields file for malware samples")
@click.option("--benignsectionnamespath", required=True, help="path of stored header fields file for malware samples")
def main(malwarepath, benignpath, benignheaderfieldspath, malwareheaderfieldspath, malwaresectionnamespath,
benignsectionnamespath):
malware_path, benign_path, benign_header_fields, malware_header_fields, benign_section_names, malware_section_names = \
load_data(malwarepath, benignpath, benignheaderfieldspath, malwareheaderfieldspath, malwaresectionnamespath,
benignsectionnamespath)
log("processing malwares for extracting features")
X, Y = final_features_extraction(malware_path, malware_header_fields, malware_section_names, 1)
log("processing benign samples for extracting features")
X, Y = final_features_extraction(benign_path, benign_header_fields, benign_section_names, 0)
global start_time
end_time = time.time()
feature_extraction_time = end_time - start_time
print('feature extraction time {}'.format(feature_extraction_time))
# saving final extracted features for probabilistic future use
store_features('final_features.txt')
# extracted features loading
X, Y = load_features('final_features.txt')
# shuffle
start_time = time.time()
features_label = list(zip(X, Y))
random.shuffle(features_label)
X, Y = zip(*features_label)
# learning
learning(X, Y)
if __name__ == '__main__':
main()
| [
"sklearn.ensemble.RandomForestClassifier",
"sklearn.model_selection.cross_validate",
"random.shuffle",
"feature_extraction.section_name_checker",
"feature_extraction.compilation_time",
"click.option",
"feature_extraction.extract_file_info",
"sklearn.tree.DecisionTreeClassifier",
"click.command",
"... | [((301, 312), 'time.time', 'time.time', ([], {}), '()\n', (310, 312), False, 'import time\n'), ((5492, 5507), 'click.command', 'click.command', ([], {}), '()\n', (5505, 5507), False, 'import click\n'), ((5509, 5585), 'click.option', 'click.option', (['"""--malwarepath"""'], {'required': '(True)', 'help': '"""path of malware samples"""'}), "('--malwarepath', required=True, help='path of malware samples')\n", (5521, 5585), False, 'import click\n'), ((5587, 5661), 'click.option', 'click.option', (['"""--benignpath"""'], {'required': '(True)', 'help': '"""path of benign samples"""'}), "('--benignpath', required=True, help='path of benign samples')\n", (5599, 5661), False, 'import click\n'), ((5663, 5784), 'click.option', 'click.option', (['"""--benignheaderfieldspath"""'], {'required': '(True)', 'help': '"""path of stored header fields file for benign samples"""'}), "('--benignheaderfieldspath', required=True, help=\n 'path of stored header fields file for benign samples')\n", (5675, 5784), False, 'import click\n'), ((5781, 5904), 'click.option', 'click.option', (['"""--malwareheaderfieldspath"""'], {'required': '(True)', 'help': '"""path of stored header fields file for malware samples"""'}), "('--malwareheaderfieldspath', required=True, help=\n 'path of stored header fields file for malware samples')\n", (5793, 5904), False, 'import click\n'), ((5901, 6024), 'click.option', 'click.option', (['"""--malwaresectionnamespath"""'], {'required': '(True)', 'help': '"""path of stored header fields file for malware samples"""'}), "('--malwaresectionnamespath', required=True, help=\n 'path of stored header fields file for malware samples')\n", (5913, 6024), False, 'import click\n'), ((6021, 6143), 'click.option', 'click.option', (['"""--benignsectionnamespath"""'], {'required': '(True)', 'help': '"""path of stored header fields file for malware samples"""'}), "('--benignsectionnamespath', required=True, help=\n 'path of stored header fields file for malware samples')\n", (6033, 6143), False, 'import click\n'), ((6917, 6928), 'time.time', 'time.time', ([], {}), '()\n', (6926, 6928), False, 'import time\n'), ((7275, 7286), 'time.time', 'time.time', ([], {}), '()\n', (7284, 7286), False, 'import time\n'), ((7328, 7358), 'random.shuffle', 'random.shuffle', (['features_label'], {}), '(features_label)\n', (7342, 7358), False, 'import random\n'), ((1930, 1975), 'feature_extraction.entropy', 'feature_extraction.entropy', (['sample_name', 'path'], {}), '(sample_name, path)\n', (1956, 1975), False, 'import feature_extraction\n'), ((2274, 2327), 'feature_extraction.section_name_checker', 'feature_extraction.section_name_checker', (['sectionnames'], {}), '(sectionnames)\n', (2313, 2327), False, 'import feature_extraction\n'), ((4775, 4799), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {}), '()\n', (4797, 4799), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((4816, 4825), 'sklearn.svm.SVC', 'svm.SVC', ([], {}), '()\n', (4823, 4825), False, 'from sklearn import svm\n'), ((4842, 4877), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(5)'}), '(n_neighbors=5)\n', (4862, 4877), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((4903, 4932), 'sklearn.tree.DecisionTreeClassifier', 'tree.DecisionTreeClassifier', ([], {}), '()\n', (4930, 4932), False, 'from sklearn import tree\n'), ((5031, 5042), 'time.time', 'time.time', ([], {}), '()\n', (5040, 5042), False, 'import time\n'), ((5091, 5178), 'sklearn.model_selection.cross_validate', 'cross_validate', (['clf', 'X', 'Y'], {'cv': '(10)', 'scoring': "('accuracy', 'f1', 'recall', 'precision')"}), "(clf, X, Y, cv=10, scoring=('accuracy', 'f1', 'recall',\n 'precision'))\n", (5105, 5178), False, 'from sklearn.model_selection import cross_validate\n'), ((5360, 5371), 'time.time', 'time.time', ([], {}), '()\n', (5369, 5371), False, 'import time\n'), ((2471, 2515), 'feature_extraction.compilation_time', 'feature_extraction.compilation_time', (['row[21]'], {}), '(row[21])\n', (2506, 2515), False, 'import feature_extraction\n'), ((2548, 2603), 'feature_extraction.extract_file_size', 'feature_extraction.extract_file_size', (['sample_name', 'path'], {}), '(sample_name, path)\n', (2584, 2603), False, 'import feature_extraction\n'), ((2636, 2691), 'feature_extraction.extract_file_info', 'feature_extraction.extract_file_info', (['sample_name', 'path'], {}), '(sample_name, path)\n', (2672, 2691), False, 'import feature_extraction\n'), ((2724, 2770), 'feature_extraction.Image_Base_checker', 'feature_extraction.Image_Base_checker', (['row[34]'], {}), '(row[34])\n', (2761, 2770), False, 'import feature_extraction\n'), ((3107, 3167), 'feature_extraction.size_of_header_checker', 'feature_extraction.size_of_header_checker', (['sample_name', 'path'], {}), '(sample_name, path)\n', (3148, 3167), False, 'import feature_extraction\n'), ((5311, 5338), 'numpy.mean', 'np.mean', (['scores[score_name]'], {}), '(scores[score_name])\n', (5318, 5338), True, 'import numpy as np\n')] |
"""Default network architectures for TD3."""
from typing import Dict, Sequence
from acme import specs
from acme.jax import networks as networks_lib
from acme.jax import utils
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
def apply_policy_sample(networks, eval_mode: bool):
def policy_network(params, key, observation):
action_mean = networks["policy"].apply(params, observation)
if eval_mode:
return action_mean
else:
return networks["sample"](action_mean, key)
return policy_network
def make_networks(
spec: specs.EnvironmentSpec,
policy_layer_sizes: Sequence[int] = (256, 256),
critic_layer_sizes: Sequence[int] = (256, 256),
sigma: float = 0.1,
) -> Dict[str, hk.Transformed]:
"""Make default networks used by TD3."""
action_size = np.prod(spec.actions.shape, dtype=int)
def _critic(h):
output = hk.nets.MLP(
list(critic_layer_sizes) + [1],
w_init=hk.initializers.VarianceScaling(1.0, "fan_in", "uniform"),
)(h)
return jnp.squeeze(output, axis=-1)
def _double_critic(obs, a):
h = jnp.concatenate([obs, a], axis=-1)
q1 = _critic(h)
q2 = _critic(h)
return q1, q2
def _policy(obs):
return hk.Sequential(
[
hk.nets.MLP(
policy_layer_sizes,
w_init=hk.initializers.VarianceScaling(1.0, "fan_in", "uniform"),
activate_final=True,
),
hk.Linear(
action_size,
hk.initializers.VarianceScaling(1.0, "fan_in", "uniform"),
),
jnp.tanh,
]
)(obs)
def _sample_fn(action_mean, key):
exploration_noise = jax.random.normal(key, action_mean.shape) * sigma
sampled_action = action_mean + exploration_noise
sampled_action = jnp.clip(
sampled_action, spec.actions.minimum, spec.actions.maximum
)
return sampled_action
critic = hk.without_apply_rng(hk.transform(_double_critic))
policy = hk.without_apply_rng(hk.transform(_policy))
# Create dummy observations and actions to create network parameters.
dummy_action = utils.zeros_like(spec.actions)
dummy_obs = utils.zeros_like(spec.observations)
dummy_action = utils.add_batch_dim(dummy_action)
dummy_obs = utils.add_batch_dim(dummy_obs)
return {
"policy": networks_lib.FeedForwardNetwork(
lambda key: policy.init(key, dummy_obs), policy.apply
),
"critic": networks_lib.FeedForwardNetwork(
lambda key: critic.init(key, dummy_obs, dummy_action), critic.apply
),
"sample": _sample_fn,
}
| [
"jax.numpy.squeeze",
"jax.random.normal",
"acme.jax.utils.zeros_like",
"jax.numpy.concatenate",
"haiku.initializers.VarianceScaling",
"acme.jax.utils.add_batch_dim",
"haiku.transform",
"jax.numpy.clip",
"numpy.prod"
] | [((848, 886), 'numpy.prod', 'np.prod', (['spec.actions.shape'], {'dtype': 'int'}), '(spec.actions.shape, dtype=int)\n', (855, 886), True, 'import numpy as np\n'), ((2297, 2327), 'acme.jax.utils.zeros_like', 'utils.zeros_like', (['spec.actions'], {}), '(spec.actions)\n', (2313, 2327), False, 'from acme.jax import utils\n'), ((2344, 2379), 'acme.jax.utils.zeros_like', 'utils.zeros_like', (['spec.observations'], {}), '(spec.observations)\n', (2360, 2379), False, 'from acme.jax import utils\n'), ((2399, 2432), 'acme.jax.utils.add_batch_dim', 'utils.add_batch_dim', (['dummy_action'], {}), '(dummy_action)\n', (2418, 2432), False, 'from acme.jax import utils\n'), ((2449, 2479), 'acme.jax.utils.add_batch_dim', 'utils.add_batch_dim', (['dummy_obs'], {}), '(dummy_obs)\n', (2468, 2479), False, 'from acme.jax import utils\n'), ((1088, 1116), 'jax.numpy.squeeze', 'jnp.squeeze', (['output'], {'axis': '(-1)'}), '(output, axis=-1)\n', (1099, 1116), True, 'import jax.numpy as jnp\n'), ((1162, 1196), 'jax.numpy.concatenate', 'jnp.concatenate', (['[obs, a]'], {'axis': '(-1)'}), '([obs, a], axis=-1)\n', (1177, 1196), True, 'import jax.numpy as jnp\n'), ((1961, 2029), 'jax.numpy.clip', 'jnp.clip', (['sampled_action', 'spec.actions.minimum', 'spec.actions.maximum'], {}), '(sampled_action, spec.actions.minimum, spec.actions.maximum)\n', (1969, 2029), True, 'import jax.numpy as jnp\n'), ((2117, 2145), 'haiku.transform', 'hk.transform', (['_double_critic'], {}), '(_double_critic)\n', (2129, 2145), True, 'import haiku as hk\n'), ((2181, 2202), 'haiku.transform', 'hk.transform', (['_policy'], {}), '(_policy)\n', (2193, 2202), True, 'import haiku as hk\n'), ((1829, 1870), 'jax.random.normal', 'jax.random.normal', (['key', 'action_mean.shape'], {}), '(key, action_mean.shape)\n', (1846, 1870), False, 'import jax\n'), ((1001, 1058), 'haiku.initializers.VarianceScaling', 'hk.initializers.VarianceScaling', (['(1.0)', '"""fan_in"""', '"""uniform"""'], {}), "(1.0, 'fan_in', 'uniform')\n", (1032, 1058), True, 'import haiku as hk\n'), ((1629, 1686), 'haiku.initializers.VarianceScaling', 'hk.initializers.VarianceScaling', (['(1.0)', '"""fan_in"""', '"""uniform"""'], {}), "(1.0, 'fan_in', 'uniform')\n", (1660, 1686), True, 'import haiku as hk\n'), ((1430, 1487), 'haiku.initializers.VarianceScaling', 'hk.initializers.VarianceScaling', (['(1.0)', '"""fan_in"""', '"""uniform"""'], {}), "(1.0, 'fan_in', 'uniform')\n", (1461, 1487), True, 'import haiku as hk\n')] |
import collections
import gym
import numpy as np
import tensorflow as tf
def make_env(gym_id, seed):
def thunk():
env = gym.make(gym_id)
env = gym.wrappers.RecordEpisodeStatistics(env)
env.seed(seed)
env.action_space.seed(seed)
env.observation_space.seed(seed)
return env
return thunk
def normc_initializer(std=1.0):
"""Custom kernel initalizer copied from OpenAI baselines"""
def _initializer(shape, dtype=None, partition_info=None):
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
PPOLossInfo = collections.namedtuple(
"LossInfo", ("total_loss", "value_loss", "policy_loss", "entropy_loss", "approx_kl", "clip_fracs",)
)
| [
"gym.make",
"numpy.random.randn",
"numpy.square",
"tensorflow.constant",
"collections.namedtuple",
"gym.wrappers.RecordEpisodeStatistics"
] | [((709, 835), 'collections.namedtuple', 'collections.namedtuple', (['"""LossInfo"""', "('total_loss', 'value_loss', 'policy_loss', 'entropy_loss', 'approx_kl',\n 'clip_fracs')"], {}), "('LossInfo', ('total_loss', 'value_loss',\n 'policy_loss', 'entropy_loss', 'approx_kl', 'clip_fracs'))\n", (731, 835), False, 'import collections\n'), ((135, 151), 'gym.make', 'gym.make', (['gym_id'], {}), '(gym_id)\n', (143, 151), False, 'import gym\n'), ((166, 207), 'gym.wrappers.RecordEpisodeStatistics', 'gym.wrappers.RecordEpisodeStatistics', (['env'], {}), '(env)\n', (202, 207), False, 'import gym\n'), ((651, 667), 'tensorflow.constant', 'tf.constant', (['out'], {}), '(out)\n', (662, 667), True, 'import tensorflow as tf\n'), ((521, 544), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (536, 544), True, 'import numpy as np\n'), ((593, 607), 'numpy.square', 'np.square', (['out'], {}), '(out)\n', (602, 607), True, 'import numpy as np\n')] |
#!/usr/bin/python
# (c) 2017 Treadco software.
# this defaults to python 2 on my machine
import numpy as np
import sys,os
from PIL import Image
from PIL import ImageChops
from pylab import *
import kernel
def normalize( a ):
# written generally
# a tuple resolves to an index
# just need to find the right tuple
lindx = []
for i in range(0, a.ndim):
lindx.append(0)
div = 1.0/a[tuple(lindx)]
return a.__mul__(div)
def generate_psf( a):
# first get the autocorrelation function
acf = normalize(np.fft.ifftn( np.absolute( np.fft.fftn(rescale(a,1.0))).__ipow__(2)))
lindx = []
for i in range(0, a.ndim):
lindx.append(0)
volume = a.size
acf = rescale(np.real(acf),1.0/volume)
acf[tuple(lindx)] = 0.0
return np.real(acf)
def jacobi_step( a, n):
# peform n steps of jacobi sharpening on a
aft = np.fft.fftn(a)
psf = np.fft.fftn(generate_psf(a))
b = a.__mul__(1.0) # make a copy
for i in range(0,n):
delta = np.real(np.fft.ifftn( np.multiply(aft,psf)))
# b = np.add( b, np.subtract(a,delta))
b = np.subtract(a,delta)
aft = np.fft.fftn(b)
return np.real(b)
def jacobi_step_with_kernel( a, kern, n):
# peform n steps of jacobi sharpening on a
# for i in range(0,10):
# print(i,a[i,0],kern[i,0]);
# sys.stdout.flush()
aft = np.fft.fftn(a)
sys.stdout.flush()
psf = np.fft.fftn(kern)
b = a.__mul__(1.0) # make a copy
for i in range(0,n):
delta = np.real(np.fft.ifftn( np.multiply(aft,psf)))
# b = np.add( b, np.subtract(a,delta))
b = np.subtract(a,delta)
aft = np.fft.fftn(b)
return np.real(b)
def rescale(a, upper):
amax = a.max()
amin = a.min()
amax -= amin
return (a.__sub__(amin)).__mul__(upper/amax)
# b = a.__sub__(amin)
# c = b.__mul__(upper/amax)
# return c
def main():
try:
image = Image.open(sys.argv[1])
except IOError:
print("Could not open the input \nUsage tick_jpg inputfile.")
sys.exit()
r,g,b = image.split()
rr = np.real(np.array(r))
gr = np.real(np.array(g))
br = np.real(np.array(b))
# too big kern = kernel.gaussian(rr, 30.0)
# kern = kernel.gaussian(rr, 20.0)
kern = kernel.gaussian(rr, 10.0)
kern[0,0] = 0.0
rp = jacobi_step_with_kernel(rr,kern,5)
gp = jacobi_step_with_kernel(gr,kern,5)
bp = jacobi_step_with_kernel(br,kern,5)
rn = Image.fromarray(np.uint8(rescale(rp,255.0)))
gn = Image.fromarray(np.uint8(rescale(gp,255.0)))
bn = Image.fromarray(np.uint8(rescale(bp,255.0)))
inew = Image.merge("RGB",(rn,gn,bn))
inew.save('after.jpg')
ix = ImageChops.subtract(inew, image,0.1)
ix.save('difference.jpg')
main()
| [
"kernel.gaussian",
"numpy.multiply",
"numpy.subtract",
"numpy.fft.fftn",
"PIL.Image.open",
"sys.stdout.flush",
"numpy.array",
"numpy.real",
"PIL.ImageChops.subtract",
"sys.exit",
"PIL.Image.merge"
] | [((756, 768), 'numpy.real', 'np.real', (['acf'], {}), '(acf)\n', (763, 768), True, 'import numpy as np\n'), ((846, 860), 'numpy.fft.fftn', 'np.fft.fftn', (['a'], {}), '(a)\n', (857, 860), True, 'import numpy as np\n'), ((1131, 1141), 'numpy.real', 'np.real', (['b'], {}), '(b)\n', (1138, 1141), True, 'import numpy as np\n'), ((1323, 1337), 'numpy.fft.fftn', 'np.fft.fftn', (['a'], {}), '(a)\n', (1334, 1337), True, 'import numpy as np\n'), ((1341, 1359), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1357, 1359), False, 'import sys, os\n'), ((1369, 1386), 'numpy.fft.fftn', 'np.fft.fftn', (['kern'], {}), '(kern)\n', (1380, 1386), True, 'import numpy as np\n'), ((1619, 1629), 'numpy.real', 'np.real', (['b'], {}), '(b)\n', (1626, 1629), True, 'import numpy as np\n'), ((2198, 2223), 'kernel.gaussian', 'kernel.gaussian', (['rr', '(10.0)'], {}), '(rr, 10.0)\n', (2213, 2223), False, 'import kernel\n'), ((2552, 2584), 'PIL.Image.merge', 'Image.merge', (['"""RGB"""', '(rn, gn, bn)'], {}), "('RGB', (rn, gn, bn))\n", (2563, 2584), False, 'from PIL import Image\n'), ((2618, 2655), 'PIL.ImageChops.subtract', 'ImageChops.subtract', (['inew', 'image', '(0.1)'], {}), '(inew, image, 0.1)\n', (2637, 2655), False, 'from PIL import ImageChops\n'), ((694, 706), 'numpy.real', 'np.real', (['acf'], {}), '(acf)\n', (701, 706), True, 'import numpy as np\n'), ((1073, 1094), 'numpy.subtract', 'np.subtract', (['a', 'delta'], {}), '(a, delta)\n', (1084, 1094), True, 'import numpy as np\n'), ((1106, 1120), 'numpy.fft.fftn', 'np.fft.fftn', (['b'], {}), '(b)\n', (1117, 1120), True, 'import numpy as np\n'), ((1561, 1582), 'numpy.subtract', 'np.subtract', (['a', 'delta'], {}), '(a, delta)\n', (1572, 1582), True, 'import numpy as np\n'), ((1594, 1608), 'numpy.fft.fftn', 'np.fft.fftn', (['b'], {}), '(b)\n', (1605, 1608), True, 'import numpy as np\n'), ((1857, 1880), 'PIL.Image.open', 'Image.open', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (1867, 1880), False, 'from PIL import Image\n'), ((2030, 2041), 'numpy.array', 'np.array', (['r'], {}), '(r)\n', (2038, 2041), True, 'import numpy as np\n'), ((2060, 2071), 'numpy.array', 'np.array', (['g'], {}), '(g)\n', (2068, 2071), True, 'import numpy as np\n'), ((2090, 2101), 'numpy.array', 'np.array', (['b'], {}), '(b)\n', (2098, 2101), True, 'import numpy as np\n'), ((1975, 1985), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1983, 1985), False, 'import sys, os\n'), ((995, 1016), 'numpy.multiply', 'np.multiply', (['aft', 'psf'], {}), '(aft, psf)\n', (1006, 1016), True, 'import numpy as np\n'), ((1483, 1504), 'numpy.multiply', 'np.multiply', (['aft', 'psf'], {}), '(aft, psf)\n', (1494, 1504), True, 'import numpy as np\n')] |
import numpy as np
def norm(x, axis=None):
return np.linalg.norm(x, axis=axis)
#-------------------------
#----- Poincaré Disk -----
#-------------------------
# NOTE: POSSIBLE ISSUE WITH DIFFERENT WAYS TO SPECIFY MINKOWSKI DOT PRODUCT
# arbritray sign gives different signatures (+, +, +, -), (+, -, -, -)
# distance in poincare disk
def poincare_dist(u, v, eps=1e-5):
d = 1 + 2 * norm(u-v)**2 / ((1 - norm(u)**2) * (1 - norm(v)**2) + eps)
return np.arccosh(d)
# compute symmetric poincare distance matrix
def poincare_distances(embedding):
n = embedding.shape[0]
dist_matrix = np.zeros((n, n))
for i in range(n):
for j in range(i+1, n):
dist_matrix[i][j] = poincare_dist(embedding[i], embedding[j])
return dist_matrix
# convert array from poincare disk to hyperboloid
def poincare_pts_to_hyperboloid(Y, eps=1e-6, metric='lorentz'):
mink_pts = np.zeros((Y.shape[0], Y.shape[1]+1))
# print('Minkowski pts shape: {}'.format(mink_pts.shape))
r = norm(Y, axis=1)
if metric == 'minkowski':
mink_pts[:, 0] = 2/(1 - r**2 + eps) * (1 + r**2)/2
for i in range(1, mink_pts.shape[1]):
mink_pts[:, i] = 2/(1 - r**2 + eps) * Y[:, i - 1]
else:
mink_pts[:, Y.shape[1]] = 2/(1 - r**2 + eps) * (1 + r**2)/2
for i in range(0, Y.shape[1]):
mink_pts[:, i] = 2/(1 - r**2 + eps) * Y[:, i]
"""
if metric == 'minkowski':
mink_pts[:, 0] = 2/(1 - r**2 + eps) * (1 + r**2)/2
mink_pts[:, 1] = 2/(1 - r**2 + eps) * Y[:, 0]
mink_pts[:, 2] = 2/(1 - r**2 + eps) * Y[:, 1]
else:
mink_pts[:, 0] = 2/(1 - r**2 + eps) * Y[:, 0]
mink_pts[:, 1] = 2/(1 - r**2 + eps) * Y[:, 1]
mink_pts[:, 2] = 2/(1 - r**2 + eps) * (1 + r**2)/2
"""
return mink_pts
# convert single point to hyperboloid
def poincare_pt_to_hyperboloid(y, eps=1e-6, metric='lorentz'):
mink_pt = np.zeros((y.shape[0] + 1, ))
# print('mink_pt.shape: {}'.format(mink_pt.shape))
r = norm(y)
if metric == 'minkowski':
mink_pt[0] = 2/(1 - r**2 + eps) * (1 + r**2)/2
for i in range(1, mink_pt.shape[0]):
mink_pt[i] = 2/(1 - r**2 + eps) * y[i - 1]
else:
mink_pt[y.shape[0]] = 2/(1 - r**2 + eps) * (1 + r**2)/2
for i in range(0, y.shape[0]):
mink_pt[i] = 2/(1 - r**2 + eps) * y[i]
"""
mink_pt = np.zeros((3, ))
r = norm(y)
if metric == 'minkowski':
mink_pt[0] = 2/(1 - r**2 + eps) * (1 + r**2)/2
mink_pt[1] = 2/(1 - r**2 + eps) * y[0]
mink_pt[2] = 2/(1 - r**2 + eps) * y[1]
else:
mink_pt[0] = 2/(1 - r**2 + eps) * y[0]
mink_pt[1] = 2/(1 - r**2 + eps) * y[1]
mink_pt[2] = 2/(1 - r**2 + eps) * (1 + r**2)/2
"""
return mink_pt
#------------------------------
#----- Hyperboloid Model ------
#------------------------------
# NOTE: POSSIBLE ISSUE WITH DIFFERENT WAYS TO SPECIFY MINKOWSKI DOT PRODUCT
# arbritray sign gives different signatures (+, +, +, -), (+, -, -, -)
# define hyperboloid bilinear form
def hyperboloid_dot(u, v):
# print('U dim: {}'.format(u.shape))
# print('V dim: {}'.format(v.shape))
return np.dot(u[:-1], v[:-1]) - u[-1]*v[-1]
# define alternate minkowski/hyperboloid bilinear form
def minkowski_dot(u, v):
return u[0]*v[0] - np.dot(u[1:], v[1:])
# hyperboloid distance function
def hyperboloid_dist(u, v, eps=1e-6, metric='lorentz'):
if metric == 'minkowski':
dist = np.arccosh(-1*minkowski_dot(u, v))
else:
dist = np.arccosh(-1*hyperboloid_dot(u, v))
if np.isnan(dist):
#print('Hyperboloid dist returned nan value')
return eps
else:
return dist
# compute symmetric hyperboloid distance matrix
def hyperboloid_distances(embedding):
n = embedding.shape[0]
dist_matrix = np.zeros((n, n))
for i in range(n):
for j in range(i+1, n):
dist_matrix[i][j] = hyperboloid_dist(embedding[i], embedding[j])
return dist_matrix
# convert array to poincare disk
def hyperboloid_pts_to_poincare(X, eps=1e-6, metric='lorentz'):
poincare_pts = np.zeros((X.shape[0], X.shape[1]-1))
if metric == 'minkowski':
for i in range(0, X.shape[1]):
poincare_pts[:, i] = X[:, i + 1] / ((X[:, 0] + 1) + eps)
else:
for i in range(0, X.shape[1]):
poincare_pts[:, i] = X[:, i] / ((X[:, -1] + 1) + eps)
"""
if metric == 'minkowski':
poincare_pts[:, 0] = X[:, 1] / ((X[:, 0]+1) + eps)
poincare_pts[:, 1] = X[:, 2] / ((X[:, 0]+1) + eps)
else:
poincare_pts[:, 0] = X[:, 0] / ((X[:, 2]+1) + eps)
poincare_pts[:, 1] = X[:, 1] / ((X[:, 2]+1) + eps)
"""
return poincare_pts
# project within disk
def proj(theta,eps=1e-3):
if norm(theta) >= 1:
theta = theta/norm(theta) - eps
return theta
# convert single point to poincare
def hyperboloid_pt_to_poincare(x, eps=1e-6, metric='lorentz'):
poincare_pt = np.zeros((x.shape[0] - 1, ))
if metric == 'minkowski':
for i in range(0, poincare_pt.shape[0]):
poincare_pt[i] = x[i + 1] / ((x[0] + 1) + eps)
else:
for i in range(0, poincare_pt.shape[0]):
poincare_pt[i] = x[i] / ((x[-1] + 1) + eps)
"""
poincare_pt = np.zeros((2, ))
if metric == 'minkowski':
poincare_pt[0] = x[1] / ((x[0]+1) + eps)
poincare_pt[1] = x[2] / ((x[0]+1) + eps)
else:
poincare_pt[0] = x[0] / ((x[2]+1) + eps)
poincare_pt[1] = x[1] / ((x[2]+1) + eps)
"""
return proj(poincare_pt)
# helper function to generate samples
def generate_data(n, radius=0.7, hyperboloid=False):
theta = np.random.uniform(0, 2*np.pi, n)
u = np.random.uniform(0, radius, n)
r = np.sqrt(u)
x = r * np.cos(theta)
y = r * np.sin(theta)
init_data = np.hstack((x.reshape(-1,1), y.reshape(-1,1)))
if hyperboloid:
return poincare_pts_to_hyperboloid(init_data)
else:
return init_data
| [
"numpy.random.uniform",
"numpy.zeros",
"numpy.isnan",
"numpy.arccosh",
"numpy.sin",
"numpy.linalg.norm",
"numpy.cos",
"numpy.dot",
"numpy.sqrt"
] | [((55, 83), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {'axis': 'axis'}), '(x, axis=axis)\n', (69, 83), True, 'import numpy as np\n'), ((468, 481), 'numpy.arccosh', 'np.arccosh', (['d'], {}), '(d)\n', (478, 481), True, 'import numpy as np\n'), ((608, 624), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (616, 624), True, 'import numpy as np\n'), ((907, 945), 'numpy.zeros', 'np.zeros', (['(Y.shape[0], Y.shape[1] + 1)'], {}), '((Y.shape[0], Y.shape[1] + 1))\n', (915, 945), True, 'import numpy as np\n'), ((1928, 1955), 'numpy.zeros', 'np.zeros', (['(y.shape[0] + 1,)'], {}), '((y.shape[0] + 1,))\n', (1936, 1955), True, 'import numpy as np\n'), ((3599, 3613), 'numpy.isnan', 'np.isnan', (['dist'], {}), '(dist)\n', (3607, 3613), True, 'import numpy as np\n'), ((3850, 3866), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (3858, 3866), True, 'import numpy as np\n'), ((4139, 4177), 'numpy.zeros', 'np.zeros', (['(X.shape[0], X.shape[1] - 1)'], {}), '((X.shape[0], X.shape[1] - 1))\n', (4147, 4177), True, 'import numpy as np\n'), ((5001, 5028), 'numpy.zeros', 'np.zeros', (['(x.shape[0] - 1,)'], {}), '((x.shape[0] - 1,))\n', (5009, 5028), True, 'import numpy as np\n'), ((5706, 5740), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(2 * np.pi)', 'n'], {}), '(0, 2 * np.pi, n)\n', (5723, 5740), True, 'import numpy as np\n'), ((5747, 5778), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'radius', 'n'], {}), '(0, radius, n)\n', (5764, 5778), True, 'import numpy as np\n'), ((5787, 5797), 'numpy.sqrt', 'np.sqrt', (['u'], {}), '(u)\n', (5794, 5797), True, 'import numpy as np\n'), ((3198, 3220), 'numpy.dot', 'np.dot', (['u[:-1]', 'v[:-1]'], {}), '(u[:-1], v[:-1])\n', (3204, 3220), True, 'import numpy as np\n'), ((3339, 3359), 'numpy.dot', 'np.dot', (['u[1:]', 'v[1:]'], {}), '(u[1:], v[1:])\n', (3345, 3359), True, 'import numpy as np\n'), ((5810, 5823), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (5816, 5823), True, 'import numpy as np\n'), ((5836, 5849), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (5842, 5849), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 7 23:34:50 2020
@author: panpanhuang
"""
import numpy as np
from numpy.random import default_rng
import xraylib as xlib
import xraylib_np as xlib_np
import torch as tc
import torch.nn.functional as F
import os
from tqdm import tqdm
import pickle
# Note: xraylib uses keV
# an array of sub-lines of K line with the required format by xraylib.
fl_K = np.array([xlib.KA1_LINE, xlib.KA2_LINE, xlib.KA3_LINE, xlib.KB1_LINE, xlib.KB2_LINE,
xlib.KB3_LINE, xlib.KB4_LINE, xlib.KB5_LINE])
# an array of sub-lines of L line with the required format by xraylib.
fl_L = np.array([xlib.LA1_LINE, xlib.LA2_LINE, xlib.LB1_LINE, xlib.LB2_LINE, xlib.LB3_LINE,
xlib.LB4_LINE, xlib.LB5_LINE, xlib.LB6_LINE, xlib.LB7_LINE, xlib.LB9_LINE,
xlib.LB10_LINE, xlib.LB15_LINE, xlib.LB17_LINE])
# an array of sub-lines of M line with the required format by xraylib.
fl_M = np.array([xlib.MA1_LINE, xlib.MA2_LINE, xlib.MB_LINE])
fl_line_groups = np.array(["K", "L", "M"])
def rotate(arr, theta, dev):
"""
This function rotates the grid concentration with dimension: (n_element, sample_height_n, sample_size_n, sample_size_n)
The rotational axis is along dim 1 of the grid
Parameters
----------
arr : torch tensor
grid concentration
theta : float
rotation angle in radians (clockwise)
dev : string
specify "cpu" or the cuda diveice (ex: cuda:0)
Returns
-------
q : torch tensor
the rotated grid concentration
"""
m0 = tc.tensor([tc.cos(theta), -tc.sin(theta), 0.0], device=dev)
m1 = tc.tensor([tc.sin(theta), tc.cos(theta), 0.0], device=dev)
m = tc.stack([m0, m1]).view(1, 2, 3)
m = m.repeat([arr.shape[0], 1, 1])
g = F.affine_grid(m, arr.shape)
q = F.grid_sample(arr, g, padding_mode='border')
return q
def attenuation_3d(src_path, theta_st, theta_end, n_theta, sample_height_n, sample_size_n,
sample_size_cm, this_aN_dic, probe_energy, dev):
"""
Parameters
----------
src_path : string
the path of the elemental concentration grid
theta_st: float
The initial angle of the sample
theta_end: float
The final angle of the sample
n_theta: integer
The number of sample angles
sample_height_n : integer
The height of the sample along the rotational axis (in number of pixels)
sample_size_n: int scalar
sample size in number of pixles on the side along the probe propagation axis
sample_size_cm: scalar
sample size in cm on the side along the probe propagation axis
this_aN_dic: dictionary
a dictionary of items with key = element symbol (string), and value = atomic number
e.g. this_aN_dic = {"C":6, "O": 8}
probe_energy : ndarray
This array is an array with only 1 element. The element is the keV energy of the incident beam.
dev : string
specify "cpu" or the cuda diveice (ex: cuda:0)
Returns
-------
attenuation_map_flat : torch tensor
an array of attenuation ratio before the probe enters each voxel.
dim 0: all angles of the sample
dim 1: all voxels (flattened 3D array)
transmission : TYPE
DESCRIPTION.
"""
n_element = len(this_aN_dic)
theta_ls = - tc.linspace(theta_st, theta_end, n_theta + 1)[:-1]
grid_concentration = tc.tensor(np.load(src_path)).float().to(dev)
aN_ls = np.array(list(this_aN_dic.values()))
probe_attCS_ls = tc.tensor(xlib_np.CS_Total(aN_ls, probe_energy).flatten()).float().to(dev)
att_exponent_acc_map = tc.zeros((len(theta_ls), sample_height_n, sample_size_n, sample_size_n+1), device=dev)
for i , theta in enumerate(theta_ls):
theta = tc.tensor(theta, device=dev)
concentration_map_rot = rotate(grid_concentration, theta, dev)
for j in range(n_element):
lac_single = concentration_map_rot[j] * probe_attCS_ls[j]
lac_acc = tc.cumsum(lac_single, axis=2)
lac_acc = tc.cat((tc.zeros((sample_height_n, sample_size_n, 1), device=dev), lac_acc), dim = 2)
att_exponent_acc = lac_acc * (sample_size_cm / sample_size_n)
att_exponent_acc_map[i,:,:,:] += att_exponent_acc
attenuation_map_flat = tc.exp(-(att_exponent_acc_map[:,:,:,:-1])).view(n_theta, sample_height_n * sample_size_n * sample_size_n).float().to(dev)
transmission = tc.exp(-att_exponent_acc_map[:,:,:,-1]).view(n_theta, sample_height_n * sample_size_n).float().to(dev)
return attenuation_map_flat, transmission
def create_XRT_data_3d(src_path, theta_st, theta_end, n_theta, sample_height_n, sample_size_n,
sample_size_cm, this_aN_dic, probe_energy, probe_cts, save_path, save_fname, theta_sep, Poisson_noise, dev):
"""
Parameters
----------
src_path: string
the path of the elemental concentration grid
theta_st: float
The initial angle of the sample
theta_end: float
The final angle of the sample
n_theta: integer
The number of sample angles
sample_height_n : integer
The height of the sample along the rotational axis (in number of pixels)
sample_size_n: int scalar
sample size in number of pixles on the side along the probe propagation axis
sample_size_cm: scalar
sample size in cm on the side along the probe propagation axis
this_aN_dic: dictionary
a dictionary of items with key = element symbol (string), and value = atomic number
e.g. this_aN_dic = {"C":6, "O": 8}
probe_energy : ndarray
This array is an array with only 1 element. The element is the keV energy of the incident beam.
probe_cts : float
The incident photon counts/s
save_path : string
The directory of saving the XRT_data
Returns
-------
XRT_data : ndarray
The dimension of the array is (n_theta, sample_height_n * sample_size_n)
[note: sample_size may not be the same as the input argument because of padding]
"""
XRT_data = probe_cts * attenuation_3d(src_path, theta_st, theta_end, n_theta, sample_height_n, sample_size_n,
sample_size_cm, this_aN_dic, probe_energy, dev)[1]
if Poisson_noise == True:
random_noise_generator = default_rng()
XRT_data = random_noise_generator.poisson(XRT_data)
if not os.path.exists(save_path):
os.mkdir(save_path)
else:
pass
if theta_sep == True:
for this_theta_idx in tqdm(range(n_theta)):
np.save(os.path.join(save_path, save_fname +'_{}'.format(this_theta_idx)), XRT_data[this_theta_idx])
else:
np.save(os.path.join(save_path, save_fname), XRT_data.cpu())
return XRT_data
def MakeFLlinesDictionary(this_aN_dic, probe_energy,
sample_size_n, sample_size_cm,
fl_line_groups = np.array(["K", "L", "M"]), fl_K = fl_K, fl_L = fl_L, fl_M = fl_M,
group_lines = True):
"""
Parameters
----------
this_aN_dic: dictionary
a dictionary of items with key = element symbol (string), and value = atomic number
e.g. this_aN_dic = {"C":6, "O": 8}
probe_energy : ndarray
This array is an array with only 1 element. The element is the keV energy of the incident beam.
sample_size_n: int scalar
sample size in number of pixles on the side along the probe propagation axis
sample_size_cm: scalar
sample size in cm on the side along the probe propagation axis
fl_line_groups : ndarray of string, optional
representing XRF line group. The default is np.array(["K", "L", "M"]).
fl_K : ndarray, optional
The default is fl_K, an array of sub-lines of K line with the required format by xraylib.
fl_L : ndarray, optional
The default is fl_L, an array of sub-lines of L line with the required format by xraylib.
fl_M : ndarray, optional
The default is fl_M, an array of sub-lines of M line with the required format by xraylib.
group_lines : boolean, optional
Whether treating all K (or L, M) sub-lines as a single line. The default is True.
Returns
-------
FL_all_elements_dic : dictionary
The dictionary has 3 items.
1st item
key: "(element_name, Line)"
value: an ndarray of ndarrays of 2 elements(type: string), [element symbol, line group]
e.g. [['C', 'K'], ['O', 'K'], ['Si', 'K'], ['Si', 'L']]
2nd item
key: "fl_energy"
value: float, Fluorescence energy in keV for each line of all element
3rd item:
key: "detected_fl_unit_concentration"
value: a 1D array of the fluorescence ratio generated assuming unit concentration [1 g/cm^3 ] for all element in this_aN_dic
4th item:
key: "n_line_group_each_element"
value: an array indicating the number of fluorescence line groups for each element specified in this_aN_dictionary
5th item:
key: "n_lines"
total number of fluorescence lines (grouped) in this system
"""
element_ls = np.array(list(this_aN_dic.keys()))
aN_ls = np.array(list(this_aN_dic.values()))
n_line_group = len(fl_line_groups)
FL_all_elements_dic = {"element_Line": [], "fl_energy": np.array([]), "detected_fl_unit_concentration": np.array([])}
voxel_size = sample_size_cm/sample_size_n
fl_cs_K = xlib_np.CS_FluorLine_Kissel_Cascade(aN_ls, fl_K, probe_energy)
fl_cs_L = xlib_np.CS_FluorLine_Kissel_Cascade(aN_ls, fl_L, probe_energy)
fl_cs_M = xlib_np.CS_FluorLine_Kissel_Cascade(aN_ls, fl_M, probe_energy)
# Remove the extra dimension with only 1 element
fl_cs_K = np.reshape(fl_cs_K, (fl_cs_K.shape[:-1]))
fl_cs_L = np.reshape(fl_cs_L, (fl_cs_L.shape[:-1]))
fl_cs_M = np.reshape(fl_cs_M, (fl_cs_M.shape[:-1]))
fl_energy_K = xlib_np.LineEnergy(aN_ls, fl_K)
fl_energy_L = xlib_np.LineEnergy(aN_ls, fl_L)
fl_energy_M = xlib_np.LineEnergy(aN_ls, fl_M)
FL_all_elements_dic = {"(element_name, Line)": [], "fl_energy": np.array([]), "detected_fl_unit_concentration": np.array([]),
"n_line_group_each_element": np.array([]), "n_lines": None}
if group_lines == True:
fl_energy_group = np.zeros((len(element_ls),n_line_group))
fl_cs_group = np.zeros((len(element_ls),n_line_group))
for i, element_name in enumerate(element_ls):
if np.sum(fl_cs_K[i] != 0):
fl_energy_group[i,0] = np.average(fl_energy_K[i], weights=fl_cs_K[i])
fl_cs_group[i,0] = np.sum(fl_cs_K[i])
else:
fl_energy_group[i,0] = 0
fl_cs_group[i,0] = 0
if np.sum(fl_cs_L[i] != 0):
fl_energy_group[i,1] = np.average(fl_energy_L[i], weights=fl_cs_L[i])
fl_cs_group[i,1] = np.sum(fl_cs_L[i])
else:
fl_energy_group[i,1] = 0
fl_cs_group[i,1] = 0
if np.sum(fl_cs_M[i] != 0):
fl_energy_group[i,2] = np.average(fl_energy_M[i], weights=fl_cs_M[i])
fl_cs_group[i,2] = np.sum(fl_cs_M[i])
else:
fl_energy_group[i,2] = 0
fl_cs_group[i,2] = 0
element_Line = fl_line_groups[fl_energy_group[i]!= 0]
element_Line = [[element_name, element_Line[j]] for j in range(len(element_Line))]
for k in range(len(element_Line)):
FL_all_elements_dic["(element_name, Line)"].append(element_Line[k])
Line_energy = fl_energy_group[i][fl_energy_group[i]!=0]
FL_all_elements_dic["fl_energy"] = np.append(FL_all_elements_dic["fl_energy"], Line_energy)
fl_unit_con = fl_cs_group[i][fl_energy_group[i]!=0] * voxel_size
FL_all_elements_dic["detected_fl_unit_concentration"] = np.append(FL_all_elements_dic["detected_fl_unit_concentration"], fl_unit_con)
FL_all_elements_dic["n_line_group_each_element"] = np.append(FL_all_elements_dic["n_line_group_each_element"], len(fl_unit_con))
FL_all_elements_dic["(element_name, Line)"] = np.array(FL_all_elements_dic["(element_name, Line)"])
FL_all_elements_dic["n_lines"] = len(FL_all_elements_dic["(element_name, Line)"])
return FL_all_elements_dic
def generate_fl_signal_from_each_voxel_3d(src_path, theta_st, theta_end, n_theta, sample_size_n, sample_height_n, sample_size_cm, this_aN_dic, probe_energy, dev):
"""
This function calculates the ratio of fluoresence signal gen
The rotational axis is along dim 1 of the grid
Parameters
----------
src_path: string
the path of the elemental concentration grid
theta_st: float
The initial angle of the sample
theta_end: float
The final angle of the sample
n_theta: integer
The number of sample angles
sample_size_n: int scalar
sample size in number of pixles on the side along the probe propagation axis
sample_height_n : integer
The height of the sample along the rotational axis (in number of pixels)
sample_size_cm: scalar
sample size in cm on the side along the probe propagation axis
this_aN_dic: dictionary
a dictionary of items with key = element symbol (string), and value = atomic number
e.g. this_aN_dic = {"C":6, "O": 8}
probe_energy : ndarray
This array is an array with only 1 element. The element is the keV energy of the incident beam.
dev : string
specify "cpu" or the cuda diveice (ex: cuda:0)
Returns
-------
fl_map_tot : TYPE
DESCRIPTION.
"""
element_ls = np.array(list(this_aN_dic.keys()))
n_element = tc.tensor(len(element_ls)).to(dev)
theta_ls = - tc.linspace(theta_st, theta_end, n_theta+1)[:-1].to(dev)
grid_concentration = tc.tensor(np.load(src_path)).float().to(dev)
fl_all_lines_dic = MakeFLlinesDictionary(this_aN_dic, probe_energy,
sample_size_n.cpu().numpy(), sample_size_cm.cpu().numpy(),
fl_line_groups = np.array(["K", "L", "M"]), fl_K = fl_K, fl_L = fl_L, fl_M = fl_M,
group_lines = True)
fl_map_tot = tc.zeros((n_theta, fl_all_lines_dic["n_lines"], sample_height_n * sample_size_n * sample_size_n), device=dev)
for i, theta in enumerate(theta_ls):
concentration_map_rot = rotate(grid_concentration, tc.tensor(theta, dtype=tc.float32), dev)
concentration_map_rot_flat = concentration_map_rot.view(len(element_ls), sample_height_n * sample_size_n * sample_size_n)
line_idx = 0
for j in range(n_element):
## fetch the generated fl signal at unit concentration for the calculated voxel size
fl_unit = fl_all_lines_dic["detected_fl_unit_concentration"][line_idx:line_idx + int(fl_all_lines_dic["n_line_group_each_element"][j])]
## FL signal over the current elemental lines for each voxel
fl_map = [concentration_map_rot_flat[j] * fl_unit_single_line for fl_unit_single_line in fl_unit]
fl_map = tc.stack(fl_map).float()
fl_map_tot[i, line_idx:line_idx + fl_map.shape[0],:] = fl_map
line_idx = line_idx + len(fl_unit)
return fl_map_tot
### The following trace_beam functions solves the intersection of a ray with planes
### There're 3 types of plane could be specified: x = some constant (d_x), y = some constant (d_y) and z = some constant (d_z)
### The correspoinding intersecting points can be solved using trace_beam_x, trace_beam_y, trace_beam_z respectively
# The ray uses a parametric form with a parameter, t: R(t) = (1-t) * S + t * D, S and D are the coordinates which spefify the points of sample voxels and the detector points
# The intersecting coordinates: (x, y, z) = (Ix, Iy, Iz) at t=t'
# 4 equations are used to solve the intersecting point:
# From the parametric function of the ray
# Iz = (1-t') * z_s + t' * z_d
# Ix = (1-t') * x_s + t' * x_d
# Iy = (1-t') * y_s + t' * y_d
# From the function fo the plane:
# Ix = some constant (d_x), Iy = some constant (d_y) or Iz = some constant (d_z)
# Rearrange the equations above to solve (Iz, Ix, Iy, t')
# Define the system of equation AX = b to solve the intersecting point, A is with the dimension: (n_batch, 4, 4), b is with the dimension: (n_batch, 4, 1)
# n_batch is the number of planes we put into the equation that we want to solve the intersecting point with the the ray
def trace_beam_z(z_s, x_s, y_s, z_d, x_d, y_d, d_z_ls):
if len(d_z_ls) == 0 or z_s == z_d:
Z = np.stack((np.array([]), np.array([]), np.array([])), axis=-1)
else:
A = tc.tensor([[1, 0, 0, z_s - z_d],[0, 1, 0, x_s - x_d],[0, 0, 1, y_s - y_d],[1, 0, 0, 0]])
A = A.repeat([len(d_z_ls), 1, 1])
b1 = tc.tensor([[[z_s], [x_s], [y_s]]]).repeat([len(d_z_ls), 1, 1])
b2 = tc.tensor([[[d_z]] for d_z in d_z_ls])
b = tc.cat((b1, b2), dim=1)
Z, LU = tc.solve(b, A)
Z = np.array(Z[:,:-1].view(len(d_z_ls), 3))
# t = X[:,-1]
return Z
def trace_beam_x(z_s, x_s, y_s, z_d, x_d, y_d, d_x_ls):
if len(d_x_ls) == 0:
X = np.stack((np.array([]), np.array([]), np.array([])), axis=-1)
else:
A = tc.tensor([[1, 0, 0, z_s - z_d],[0, 1, 0, x_s - x_d],[0, 0, 1, y_s - y_d],[0, 1, 0, 0]])
A = A.repeat([len(d_x_ls), 1, 1])
b1 = tc.tensor([[[z_s], [x_s], [y_s]]]).repeat([len(d_x_ls), 1, 1])
b2 = tc.tensor([[[d_x]] for d_x in d_x_ls])
b = tc.cat((b1, b2), dim=1)
X, LU = tc.solve(b, A)
X = np.array(X[:,:-1].view(len(d_x_ls), 3))
# t = Y[:,-1]
return X
def trace_beam_y(z_s, x_s, y_s, z_d, x_d, y_d, d_y_ls):
if len(d_y_ls) == 0 or y_s == y_d:
Y = np.stack((np.array([]), np.array([]), np.array([])), axis=-1)
else:
A = tc.tensor([[1, 0, 0, z_s - z_d],[0, 1, 0, x_s - x_d],[0, 0, 1, y_s - y_d],[0, 0, 1, 0]])
A = A.repeat([len(d_y_ls), 1, 1])
b1 = tc.tensor([[[z_s], [x_s], [y_s]]]).repeat([len(d_y_ls), 1, 1])
b2 = tc.tensor([[[d_y]] for d_y in d_y_ls])
b = tc.cat((b1, b2), dim=1)
Y, LU = tc.solve(b, A)
Y = np.array(Y[:,:-1].view(len(d_y_ls), 3))
# t = Z[:,-1]
return Y
def intersecting_length_fl_detectorlet_3d(det_size_cm, det_from_sample_cm, det_ds_spacing_cm, sample_size_n, sample_size_cm, sample_height_n, P_save_path):
"""
Parameters
----------
det_size_cm : float
The diameter of the circle to distribute the detector points
det_from_sample_cm : float
The distance between the detector plane and the sample boundary plane
det_ds_spacing_cm : float
The spacing between detector points
sample_size_n: int scalar
sample size in number of pixles on the side along the probe propagation axis
sample_size_cm: scalar
sample size in cm on the side along the probe propagation axis
sample_height_n : integer
The height of the sample along the rotational axis (in number of pixels)
P_save_path : string
The path that saves the tensor P
Returns
-------
n_det : integer
The number of the detector points within the circle with the diatmeter, det_size_cm.
P : torch tensor
a tensor with the dimension (n_det, 3, n_voxels * diagnal_length_n)
n_voxels: the number of voxels of the sample.
diagnal_length_n: the number of voxels along the diagnol direction of the sample
P tensor contains the information of intersecting voxels of the emitted XRF rays (along the connection between each FL emitting source voxel and each detector point)
For each detector point (total: n_det), 3 rows of values representing the following values:
1st row, the index of the FL emitting soruce voxel. The index is the index of the flattened grid of the sample.
2nd row, the index of the intersecting voxels.
3rd row, the intersecting length in cm.
For example:
[[0, 0, 0, 0, 0, 0, ..., 0, 1, 1, 1, 1, 0, ..., 0, 2, 2, 2, 0, ..., 0, ......, 0, ...,0]
|_________| \________|
\ \The remain (diagnal_length_n - 4) spaces are then set to 0
\4 intersecting voxels from the emitting source at index 1
[5,10,15,20,25, 0, ..., 0, 6,11,16,21, 0, ..., 0, 7,12,17, 0, ..., 0, ......, 0, ...,0]
|_________| \________|
\ \The remain (diagnal_length_n - 4) spaces are then set to 0
\4 intersecting voxels at index 6, 11, 16, 21 from the emitting source at index 1
[0.1, 0.1, 0.1, 0.1, 0, 0, ..., 0, 0.2, 0.2, 0.2 ,0.2, 0, ..., 0, 0.3, 0.3, 0.3, 0, ..., 0, ......, 0, ...,0]]
|_________________| \________|
\ \The remain (diagnal_length_n - 4) spaces are then set to 0
\4 intersecting lengths corresponging to the intersecting voxels in the 2nd row of this tensor
The intersecting number of voxels from each source is not always the same. The maximal possible intersecting number of voxels
is the number of voxels along the diagnol direction of the sample.
Therefore, diagnal_length_n spaces are used to store the intersecting voxels for each emitting source.
In most cases, the number of intersecting voxels for each source voxel is less than diagnal_length_n, The remaining spaces are filled with zeros.
"""
if os.path.isfile(P_save_path + ".npy"):
P = np.load(P_save_path + ".npy")
n_det = P.shape[0]
longest_int_length = P.shape[2]//(sample_height_n * sample_size_n**2)
print(f"numbder of detecting points: {n_det}")
else:
### Calculating voxel size in cm
voxel_size_cm = sample_size_cm/sample_size_n
### Calculating the diameter of the XRF detector with
det_size_n = int(np.ceil(det_size_cm/voxel_size_cm))
### Set the desired spacing between detectorlets, and then convert the unit of spacing to the number of the sample voxels
det_ds_spacing_n = int(det_ds_spacing_cm/voxel_size_cm)
# Define position of center of the source voxel (z_s, x_s, y_s), note that it's shifted by 0.5 from the voxel idx to represent the loc of center
z_s, x_s, y_s = np.indices((int(sample_height_n), int(sample_size_n), int(sample_size_n))) + 0.5
voxel_pos_ls_flat = np.stack((z_s.flatten(), x_s.flatten(), y_s.flatten()), axis=-1)
### Define the location of the detectorlets, the detector is parallel to the yz-plane
### The x-posision depends on the distance between the sample and the detecor
## x index of the location of the XRF detector
det_axis_1_idx = sample_size_n + np.ceil(det_from_sample_cm/voxel_size_cm) + 0.5
### y, z index of the location of the XRF detector
## Define the center of the detector on yz-plane
det_center_yz = (int(sample_size_n)/2., int(sample_size_n)/2.)
## Define the y and z loc(namely the loc along axis 2 and axis 0) of the detectorlets. The y and z loc are confined to be within a circle on the yz plane
end_det_axis_2_idx_ls = np.array([int((sample_size_n - det_ds_spacing_n * np.floor(det_size_n/det_ds_spacing_n))/2.),
int((sample_size_n + det_ds_spacing_n * np.floor(det_size_n/det_ds_spacing_n))/2.)])
det_axis_2_idx_ls = np.linspace(end_det_axis_2_idx_ls[0], end_det_axis_2_idx_ls[1], np.int(det_size_n/det_ds_spacing_n + 1))
end_det_axis_0_idx_ls = np.array([int((sample_height_n - det_ds_spacing_n * np.floor(det_size_n/det_ds_spacing_n))/2.),
int((sample_height_n + det_ds_spacing_n * np.floor(det_size_n/det_ds_spacing_n))/2.)])
det_axis_0_idx_ls = np.linspace(end_det_axis_0_idx_ls[0], end_det_axis_0_idx_ls[1], np.int(det_size_n/det_ds_spacing_n + 1))
## Create the meshgrid of y and z coordinates and keep only the coordinates within the detector circle
y_d, z_d = np.meshgrid(det_axis_2_idx_ls, det_axis_0_idx_ls)
yz_mask = ((y_d - det_center_yz[0])**2 + (z_d - det_center_yz[1])**2 <= (det_size_n/2)**2).flatten()
y_d_flat, z_d_flat = y_d.flatten()[yz_mask], z_d.flatten()[yz_mask]
## The number of x posision needed to fill into the coodinates depends on the number of the y(or z) coodinates within the circle of detector
x_d_flat = np.full((y_d_flat.shape), det_axis_1_idx)
##
det_pos_ls_flat = np.stack((z_d_flat, x_d_flat, y_d_flat), axis=-1)
n_det = len(det_pos_ls_flat)
print(f"numbder of detecting points: {n_det}")
## define sample edges:
## sample_x_edge is the edge that is closer to the XRF detector
## sample_y_edge has two components representing the left and the right edge
sample_x_edge = np.array([sample_size_n])
sample_y_edge = np.array([0, sample_size_n])
sample_z_edge = np.array([0, sample_height_n])
dia_len_n = int((sample_height_n**2 + sample_size_n**2 + sample_size_n**2)**0.5)
P = tc.zeros(n_det, 3, dia_len_n * sample_height_n * sample_size_n**2)
longest_int_length = 0
for i, det_pos in enumerate(det_pos_ls_flat):
for j, v in enumerate(tqdm(voxel_pos_ls_flat)):
# Solving the intersection of the ray with the sample boundary along axis-0
bdx_int = trace_beam_x(v[0], v[1], v[2], det_pos[0], det_pos[1], det_pos[2], sample_x_edge) # pick the 0th component just because the coordinate is doubly braced
# Solving the intersection of the ray with the sample boundaries along axis-1 and axis-2, we will get 2 solutions for each axis since there're 2 bdry plane on each axis
# The desired intersecting point is within the segment(voxel - detectorlet) which is always the one with the larger x coordinate
bdy_int = trace_beam_y(v[0], v[1], v[2], det_pos[0], det_pos[1], det_pos[2], sample_y_edge)
if len(bdy_int) != 0:
bdy_int = np.array([bdy_int[np.argmax(bdy_int[:,1])]])
else:
pass
bdz_int = trace_beam_z(v[0], v[1], v[2], det_pos[0], det_pos[1], det_pos[2], sample_z_edge)
if len(bdz_int) != 0:
bdz_int = np.array([bdz_int[np.argmax(bdz_int[:,1])]])
else:
pass
# Pick the intersecting point that first hit the boundary plan. This point is with the least x value among the 3 intersections.
bd_int_ls = np.concatenate((bdz_int, bdx_int, bdy_int))
bd_int = np.clip(np.abs((bd_int_ls[np.argmin(bd_int_ls[:,1])])), 0, sample_size_n)
# when the beam intersects with a voxel, it either intersects with the x or y or z boundary plane of the voxel
# find the x,y,z-value of the voxel boundary except the ones on the sample edge
z_edge_ls = np.where(bd_int[0] > v[0], np.linspace(np.ceil(bd_int[0])-1, np.ceil(v[0]), int(np.abs(np.ceil(bd_int[0]) - np.ceil(v[0])))),
np.linspace(np.ceil(v[0])-1, np.ceil(bd_int[0]), int(np.abs(np.ceil(bd_int[0]) - np.ceil(v[0])))))
x_edge_ls = np.where(bd_int[1] > v[1], np.linspace(np.ceil(bd_int[1])-1, np.ceil(v[1]), int(np.abs(np.ceil(bd_int[1]) - np.ceil(v[1])))),
np.linspace(np.ceil(v[1])-1, np.ceil(bd_int[1]), int(np.abs(np.ceil(bd_int[1]) - np.ceil(v[1])))))
y_edge_ls = np.where(bd_int[2] > v[2], np.linspace(np.ceil(bd_int[2])-1, np.ceil(v[2]), int(np.abs(np.ceil(bd_int[2]) - np.ceil(v[2])))),
np.linspace(np.ceil(v[2])-1, np.ceil(bd_int[2]), int(np.abs(np.ceil(bd_int[2]) - np.ceil(v[2])))))
z_edge_int_ls = trace_beam_z(v[0], v[1], v[2], det_pos[0], det_pos[1], det_pos[2], z_edge_ls)
x_edge_int_ls = trace_beam_x(v[0], v[1], v[2], det_pos[0], det_pos[1], det_pos[2], x_edge_ls)
y_edge_int_ls = trace_beam_y(v[0], v[1], v[2], det_pos[0], det_pos[1], det_pos[2], y_edge_ls)
# Collect all intersecting points and sort all intersections using the x coordinate
int_ls = np.concatenate((x_edge_int_ls, y_edge_int_ls, z_edge_int_ls, np.array(bd_int)[np.newaxis,:]))
int_ls = int_ls[np.argsort(int_ls[:,1])]
# calculate the intersecting length in the intersecting voxels
int_length = np.sqrt(np.diff(int_ls[:,0])**2 + np.diff(int_ls[:,1])**2 + np.diff(int_ls[:,2])**2)
# just in case that we count some intersections twice, delete the duplicates
idx_duplicate = np.array(np.where(int_length==0)).flatten()
int_ls = np.delete(int_ls, idx_duplicate, 0)
int_length = np.delete(int_length, idx_duplicate)
# determine the indices of the intersecting voxels according to the intersecting x,y,z-coordinates
int_ls_shift = np.zeros((int_ls.shape))
int_ls_shift[1:] = int_ls[:-1]
int_idx = np.floor((int_ls + int_ls_shift)/2)[1:]
# int_idx = (int_idx[:,0].astype('int'), int_idx[:,1].astype('int'), int_idx[:,2].astype('int'))
int_idx_flat = int_idx[:,0] * (sample_height_n.item() * sample_size_n.item()) + int_idx[:,1] * sample_size_n.item() + int_idx[:,2]
if len(int_idx_flat) > longest_int_length:
longest_int_length = len(int_idx_flat)
P[i, 0, j * dia_len_n: j * dia_len_n + len(int_idx_flat)] = j
P[i, 1, j * dia_len_n: j * dia_len_n + len(int_idx_flat)] = tc.tensor(int_idx_flat)
P[i, 2, j * dia_len_n: j * dia_len_n + len(int_idx_flat)] = tc.tensor(int_length * voxel_size_cm.item())
tqdm._instances.clear()
P_short = tc.zeros(n_det, 3, longest_int_length * sample_height_n * sample_size_n**2)
for j, v in enumerate(tqdm(voxel_pos_ls_flat)):
P_short[:,:,j * longest_int_length: (j+1) * longest_int_length] = P[:,:, j * dia_len_n: j * dia_len_n + longest_int_length]
P = P.numpy()
P_short = P_short.numpy()
np.save(P_save_path + '_short.npy', P_short)
np.save(P_save_path + ".npy", P)
return longest_int_length, n_det, P
def self_absorption_att_ratio_single_theta_3d(src_path, n_det, P, det_size_cm, det_from_sample_cm, det_ds_spacing_cm, sample_size_n, sample_size_cm, sample_height_n,
this_aN_dic, probe_energy, dev, theta):
fl_all_lines_dic = MakeFLlinesDictionary(this_aN_dic, probe_energy, sample_size_n.cpu().numpy(), sample_size_cm.cpu().numpy(),
fl_line_groups = np.array(["K", "L", "M"]), fl_K = fl_K, fl_L = fl_L, fl_M = fl_M, group_lines = True)
n_voxel = sample_height_n * sample_size_n * sample_size_n
dia_len_n = int((sample_height_n**2 + sample_size_n**2 + sample_size_n**2)**0.5)
n_lines = tc.as_tensor(fl_all_lines_dic["n_lines"]).to(dev)
aN_ls = np.array(list(this_aN_dic.values()))
grid_concentration = tc.from_numpy(np.load(src_path)).float().to(dev)
n_element = len(this_aN_dic)
# generate an arrary of total attenuation cross section with the dimension: (n_element, n_elemental_lines)
# The component in the array represents the total attenuation cross section at some line energy in some element (with unitary concentration)
FL_line_attCS_ls = tc.as_tensor(xlib_np.CS_Total(aN_ls, fl_all_lines_dic["fl_energy"])).float().to(dev)
concentration_map_rot = rotate(grid_concentration, theta, dev).float()
concentration_map_rot_flat = concentration_map_rot.view(n_element, n_voxel).float()
# lac: linear attenuation coefficient = concentration * attenuation_cross_section,
# dimension: n_element, n_lines, n_voxel(FL source), n_voxel)
lac = concentration_map_rot_flat.view(n_element, 1, 1, n_voxel) * FL_line_attCS_ls.view(n_element, n_lines, 1, 1)
lac = lac.expand(-1, -1, n_voxel, -1).float()
att_exponent = tc.stack([lac[:,:, P[m][0].to(dtype=tc.long), P[m][1].to(dtype=tc.long)] * P[m][2].view(1, 1, -1).repeat(n_element, n_lines, 1) for m in range(n_det)])
## summing over the attenation exponent contributed by all intersecting voxels, dim = (n_det, n_element, n_lines, n_voxel (FL source))
att_exponent_voxel_sum = tc.sum(att_exponent.view(n_det, n_element, n_lines, n_voxel, dia_len_n), axis=-1)
## calculate the attenuation caused by all elements and get an array of dim = (n_det, n_lines, n_voxel (FL source)), and then take the average over n_det FL ray paths
## Final dim = (n_lines, n_voxel (FL source)) representing the attenuation ratio of each fluorescence line emitting from each source voxel.
SA_att = tc.mean(tc.exp(-tc.sum(att_exponent_voxel_sum, axis=1)), axis=0)
return SA_att
def create_XRF_data_single_theta_3d(n_det, P, theta_st, theta_end, n_theta, src_path, det_size_cm, det_from_sample_cm, det_ds_spacing_cm, sample_size_n,
sample_size_cm, sample_height_n, this_aN_dic, probe_cts, probe_energy, save_path, save_fname, Poisson_noise, dev, this_theta_idx):
# (n_theta, sample_size_n * sample_size_n)
theta_ls = - tc.linspace(theta_st, theta_end, n_theta + 1)[:-1]
theta = theta_ls[this_theta_idx]
probe_before_attenuation_flat = probe_cts * tc.ones((sample_height_n * sample_size_n * sample_size_n), device=dev)
att_ratio_map_flat = attenuation_3d(src_path, theta_st, theta_end, n_theta, sample_height_n, sample_size_n, sample_size_cm, this_aN_dic, probe_energy, dev)[0][this_theta_idx]
SA_att_ratio = self_absorption_att_ratio_single_theta_3d(src_path, n_det, P, det_size_cm, det_from_sample_cm, det_ds_spacing_cm, sample_size_n, sample_size_cm, sample_height_n,
this_aN_dic, probe_energy, dev, theta)
# probe_after_attenuation_flat: dimension (sample_height_n * sample_size_n * sample_size_n)
probe_after_attenuation_flat = probe_before_attenuation_flat * att_ratio_map_flat
#(n_elemental_line, sample_height * sample_size * sample_size)
fl_ratio_map_tot = generate_fl_signal_from_each_voxel_3d(src_path, theta_st, theta_end, n_theta, sample_size_n, sample_height_n, sample_size_cm, this_aN_dic, probe_energy, dev)[this_theta_idx]
#calculate fluorescence after self-absorption. dimension: (n_line, n_voxel (FL source))
fl_signal_SA = tc.unsqueeze(probe_after_attenuation_flat, dim=0) * fl_ratio_map_tot * SA_att_ratio
fl_signal_SA = fl_signal_SA.view(-1, sample_height_n * sample_size_n, sample_size_n)
## summing over the XRF signal collected from strip of voxels along the probe propagation direction
fl_signal_SA = tc.sum(fl_signal_SA, axis=-1)
## Calculate the signal collected within the solid angle covered by the detector
fl_signal_SA = fl_signal_SA * ((np.pi * (det_size_cm/2)**2) / det_from_sample_cm**2)/(4*np.pi)
if Poisson_noise == True:
random_noise_generator = default_rng()
fl_signal_SA = random_noise_generator.poisson(fl_signal_SA)
np.save(os.path.join(save_path, save_fname +'_{}'.format(this_theta_idx)), fl_signal_SA)
return fl_signal_SA
def create_XRF_data_3d(P_save_path, theta_st, theta_end, n_theta, src_path, det_size_cm, det_from_sample_cm, det_ds_spacing_cm, sample_size_n,
sample_size_cm, sample_height_n, this_aN_dic, probe_cts, probe_energy, save_path, save_fname, Poisson_noise, dev):
longest_int_length, n_det, P = intersecting_length_fl_detectorlet_3d(det_size_cm, det_from_sample_cm, det_ds_spacing_cm, sample_size_n.cpu(), sample_size_cm.cpu(), sample_height_n.cpu(), P_save_path)
P = tc.from_numpy(P).to(tc.float)
theta_ls = - tc.linspace(theta_st, theta_end, n_theta + 1)[:-1]
for this_theta_idx, theta in enumerate(tqdm(theta_ls)):
create_XRF_data_single_theta_3d(n_det, P, theta_st, theta_end, n_theta, src_path, det_size_cm, det_from_sample_cm, det_ds_spacing_cm, sample_size_n,
sample_size_cm, sample_height_n, this_aN_dic, probe_cts, probe_energy, save_path, save_fname, Poisson_noise, dev, this_theta_idx)
| [
"os.mkdir",
"numpy.load",
"torch.nn.functional.affine_grid",
"numpy.sum",
"xraylib_np.CS_Total",
"numpy.argmax",
"numpy.floor",
"torch.cat",
"numpy.argmin",
"xraylib_np.LineEnergy",
"numpy.random.default_rng",
"torch.cos",
"os.path.isfile",
"numpy.argsort",
"os.path.join",
"numpy.full"... | [((425, 560), 'numpy.array', 'np.array', (['[xlib.KA1_LINE, xlib.KA2_LINE, xlib.KA3_LINE, xlib.KB1_LINE, xlib.KB2_LINE,\n xlib.KB3_LINE, xlib.KB4_LINE, xlib.KB5_LINE]'], {}), '([xlib.KA1_LINE, xlib.KA2_LINE, xlib.KA3_LINE, xlib.KB1_LINE, xlib.\n KB2_LINE, xlib.KB3_LINE, xlib.KB4_LINE, xlib.KB5_LINE])\n', (433, 560), True, 'import numpy as np\n'), ((652, 869), 'numpy.array', 'np.array', (['[xlib.LA1_LINE, xlib.LA2_LINE, xlib.LB1_LINE, xlib.LB2_LINE, xlib.LB3_LINE,\n xlib.LB4_LINE, xlib.LB5_LINE, xlib.LB6_LINE, xlib.LB7_LINE, xlib.\n LB9_LINE, xlib.LB10_LINE, xlib.LB15_LINE, xlib.LB17_LINE]'], {}), '([xlib.LA1_LINE, xlib.LA2_LINE, xlib.LB1_LINE, xlib.LB2_LINE, xlib.\n LB3_LINE, xlib.LB4_LINE, xlib.LB5_LINE, xlib.LB6_LINE, xlib.LB7_LINE,\n xlib.LB9_LINE, xlib.LB10_LINE, xlib.LB15_LINE, xlib.LB17_LINE])\n', (660, 869), True, 'import numpy as np\n'), ((974, 1028), 'numpy.array', 'np.array', (['[xlib.MA1_LINE, xlib.MA2_LINE, xlib.MB_LINE]'], {}), '([xlib.MA1_LINE, xlib.MA2_LINE, xlib.MB_LINE])\n', (982, 1028), True, 'import numpy as np\n'), ((1049, 1074), 'numpy.array', 'np.array', (["['K', 'L', 'M']"], {}), "(['K', 'L', 'M'])\n", (1057, 1074), True, 'import numpy as np\n'), ((1854, 1881), 'torch.nn.functional.affine_grid', 'F.affine_grid', (['m', 'arr.shape'], {}), '(m, arr.shape)\n', (1867, 1881), True, 'import torch.nn.functional as F\n'), ((1890, 1934), 'torch.nn.functional.grid_sample', 'F.grid_sample', (['arr', 'g'], {'padding_mode': '"""border"""'}), "(arr, g, padding_mode='border')\n", (1903, 1934), True, 'import torch.nn.functional as F\n'), ((7233, 7258), 'numpy.array', 'np.array', (["['K', 'L', 'M']"], {}), "(['K', 'L', 'M'])\n", (7241, 7258), True, 'import numpy as np\n'), ((9821, 9883), 'xraylib_np.CS_FluorLine_Kissel_Cascade', 'xlib_np.CS_FluorLine_Kissel_Cascade', (['aN_ls', 'fl_K', 'probe_energy'], {}), '(aN_ls, fl_K, probe_energy)\n', (9856, 9883), True, 'import xraylib_np as xlib_np\n'), ((9898, 9960), 'xraylib_np.CS_FluorLine_Kissel_Cascade', 'xlib_np.CS_FluorLine_Kissel_Cascade', (['aN_ls', 'fl_L', 'probe_energy'], {}), '(aN_ls, fl_L, probe_energy)\n', (9933, 9960), True, 'import xraylib_np as xlib_np\n'), ((9975, 10037), 'xraylib_np.CS_FluorLine_Kissel_Cascade', 'xlib_np.CS_FluorLine_Kissel_Cascade', (['aN_ls', 'fl_M', 'probe_energy'], {}), '(aN_ls, fl_M, probe_energy)\n', (10010, 10037), True, 'import xraylib_np as xlib_np\n'), ((10106, 10145), 'numpy.reshape', 'np.reshape', (['fl_cs_K', 'fl_cs_K.shape[:-1]'], {}), '(fl_cs_K, fl_cs_K.shape[:-1])\n', (10116, 10145), True, 'import numpy as np\n'), ((10162, 10201), 'numpy.reshape', 'np.reshape', (['fl_cs_L', 'fl_cs_L.shape[:-1]'], {}), '(fl_cs_L, fl_cs_L.shape[:-1])\n', (10172, 10201), True, 'import numpy as np\n'), ((10218, 10257), 'numpy.reshape', 'np.reshape', (['fl_cs_M', 'fl_cs_M.shape[:-1]'], {}), '(fl_cs_M, fl_cs_M.shape[:-1])\n', (10228, 10257), True, 'import numpy as np\n'), ((10279, 10310), 'xraylib_np.LineEnergy', 'xlib_np.LineEnergy', (['aN_ls', 'fl_K'], {}), '(aN_ls, fl_K)\n', (10297, 10310), True, 'import xraylib_np as xlib_np\n'), ((10329, 10360), 'xraylib_np.LineEnergy', 'xlib_np.LineEnergy', (['aN_ls', 'fl_L'], {}), '(aN_ls, fl_L)\n', (10347, 10360), True, 'import xraylib_np as xlib_np\n'), ((10379, 10410), 'xraylib_np.LineEnergy', 'xlib_np.LineEnergy', (['aN_ls', 'fl_M'], {}), '(aN_ls, fl_M)\n', (10397, 10410), True, 'import xraylib_np as xlib_np\n'), ((14756, 14869), 'torch.zeros', 'tc.zeros', (["(n_theta, fl_all_lines_dic['n_lines'], sample_height_n * sample_size_n *\n sample_size_n)"], {'device': 'dev'}), "((n_theta, fl_all_lines_dic['n_lines'], sample_height_n *\n sample_size_n * sample_size_n), device=dev)\n", (14764, 14869), True, 'import torch as tc\n'), ((22665, 22701), 'os.path.isfile', 'os.path.isfile', (["(P_save_path + '.npy')"], {}), "(P_save_path + '.npy')\n", (22679, 22701), False, 'import os\n'), ((36445, 36474), 'torch.sum', 'tc.sum', (['fl_signal_SA'], {'axis': '(-1)'}), '(fl_signal_SA, axis=-1)\n', (36451, 36474), True, 'import torch as tc\n'), ((3949, 3977), 'torch.tensor', 'tc.tensor', (['theta'], {'device': 'dev'}), '(theta, device=dev)\n', (3958, 3977), True, 'import torch as tc\n'), ((6594, 6607), 'numpy.random.default_rng', 'default_rng', ([], {}), '()\n', (6605, 6607), False, 'from numpy.random import default_rng\n'), ((6684, 6709), 'os.path.exists', 'os.path.exists', (['save_path'], {}), '(save_path)\n', (6698, 6709), False, 'import os\n'), ((6719, 6738), 'os.mkdir', 'os.mkdir', (['save_path'], {}), '(save_path)\n', (6727, 6738), False, 'import os\n'), ((9695, 9707), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (9703, 9707), True, 'import numpy as np\n'), ((9743, 9755), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (9751, 9755), True, 'import numpy as np\n'), ((10480, 10492), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (10488, 10492), True, 'import numpy as np\n'), ((10528, 10540), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (10536, 10540), True, 'import numpy as np\n'), ((10598, 10610), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (10606, 10610), True, 'import numpy as np\n'), ((12587, 12640), 'numpy.array', 'np.array', (["FL_all_elements_dic['(element_name, Line)']"], {}), "(FL_all_elements_dic['(element_name, Line)'])\n", (12595, 12640), True, 'import numpy as np\n'), ((17244, 17339), 'torch.tensor', 'tc.tensor', (['[[1, 0, 0, z_s - z_d], [0, 1, 0, x_s - x_d], [0, 0, 1, y_s - y_d], [1, 0, 0, 0]\n ]'], {}), '([[1, 0, 0, z_s - z_d], [0, 1, 0, x_s - x_d], [0, 0, 1, y_s - y_d],\n [1, 0, 0, 0]])\n', (17253, 17339), True, 'import torch as tc\n'), ((17465, 17503), 'torch.tensor', 'tc.tensor', (['[[[d_z]] for d_z in d_z_ls]'], {}), '([[[d_z]] for d_z in d_z_ls])\n', (17474, 17503), True, 'import torch as tc\n'), ((17516, 17539), 'torch.cat', 'tc.cat', (['(b1, b2)'], {'dim': '(1)'}), '((b1, b2), dim=1)\n', (17522, 17539), True, 'import torch as tc\n'), ((17557, 17571), 'torch.solve', 'tc.solve', (['b', 'A'], {}), '(b, A)\n', (17565, 17571), True, 'import torch as tc\n'), ((17847, 17942), 'torch.tensor', 'tc.tensor', (['[[1, 0, 0, z_s - z_d], [0, 1, 0, x_s - x_d], [0, 0, 1, y_s - y_d], [0, 1, 0, 0]\n ]'], {}), '([[1, 0, 0, z_s - z_d], [0, 1, 0, x_s - x_d], [0, 0, 1, y_s - y_d],\n [0, 1, 0, 0]])\n', (17856, 17942), True, 'import torch as tc\n'), ((18068, 18106), 'torch.tensor', 'tc.tensor', (['[[[d_x]] for d_x in d_x_ls]'], {}), '([[[d_x]] for d_x in d_x_ls])\n', (18077, 18106), True, 'import torch as tc\n'), ((18119, 18142), 'torch.cat', 'tc.cat', (['(b1, b2)'], {'dim': '(1)'}), '((b1, b2), dim=1)\n', (18125, 18142), True, 'import torch as tc\n'), ((18160, 18174), 'torch.solve', 'tc.solve', (['b', 'A'], {}), '(b, A)\n', (18168, 18174), True, 'import torch as tc\n'), ((18459, 18554), 'torch.tensor', 'tc.tensor', (['[[1, 0, 0, z_s - z_d], [0, 1, 0, x_s - x_d], [0, 0, 1, y_s - y_d], [0, 0, 1, 0]\n ]'], {}), '([[1, 0, 0, z_s - z_d], [0, 1, 0, x_s - x_d], [0, 0, 1, y_s - y_d],\n [0, 0, 1, 0]])\n', (18468, 18554), True, 'import torch as tc\n'), ((18680, 18718), 'torch.tensor', 'tc.tensor', (['[[[d_y]] for d_y in d_y_ls]'], {}), '([[[d_y]] for d_y in d_y_ls])\n', (18689, 18718), True, 'import torch as tc\n'), ((18731, 18754), 'torch.cat', 'tc.cat', (['(b1, b2)'], {'dim': '(1)'}), '((b1, b2), dim=1)\n', (18737, 18754), True, 'import torch as tc\n'), ((18772, 18786), 'torch.solve', 'tc.solve', (['b', 'A'], {}), '(b, A)\n', (18780, 18786), True, 'import torch as tc\n'), ((22715, 22744), 'numpy.load', 'np.load', (["(P_save_path + '.npy')"], {}), "(P_save_path + '.npy')\n", (22722, 22744), True, 'import numpy as np\n'), ((25296, 25345), 'numpy.meshgrid', 'np.meshgrid', (['det_axis_2_idx_ls', 'det_axis_0_idx_ls'], {}), '(det_axis_2_idx_ls, det_axis_0_idx_ls)\n', (25307, 25345), True, 'import numpy as np\n'), ((25702, 25741), 'numpy.full', 'np.full', (['y_d_flat.shape', 'det_axis_1_idx'], {}), '(y_d_flat.shape, det_axis_1_idx)\n', (25709, 25741), True, 'import numpy as np\n'), ((25782, 25831), 'numpy.stack', 'np.stack', (['(z_d_flat, x_d_flat, y_d_flat)'], {'axis': '(-1)'}), '((z_d_flat, x_d_flat, y_d_flat), axis=-1)\n', (25790, 25831), True, 'import numpy as np\n'), ((26138, 26163), 'numpy.array', 'np.array', (['[sample_size_n]'], {}), '([sample_size_n])\n', (26146, 26163), True, 'import numpy as np\n'), ((26188, 26216), 'numpy.array', 'np.array', (['[0, sample_size_n]'], {}), '([0, sample_size_n])\n', (26196, 26216), True, 'import numpy as np\n'), ((26242, 26272), 'numpy.array', 'np.array', (['[0, sample_height_n]'], {}), '([0, sample_height_n])\n', (26250, 26272), True, 'import numpy as np\n'), ((26376, 26444), 'torch.zeros', 'tc.zeros', (['n_det', '(3)', '(dia_len_n * sample_height_n * sample_size_n ** 2)'], {}), '(n_det, 3, dia_len_n * sample_height_n * sample_size_n ** 2)\n', (26384, 26444), True, 'import torch as tc\n'), ((31416, 31493), 'torch.zeros', 'tc.zeros', (['n_det', '(3)', '(longest_int_length * sample_height_n * sample_size_n ** 2)'], {}), '(n_det, 3, longest_int_length * sample_height_n * sample_size_n ** 2)\n', (31424, 31493), True, 'import torch as tc\n'), ((31775, 31819), 'numpy.save', 'np.save', (["(P_save_path + '_short.npy')", 'P_short'], {}), "(P_save_path + '_short.npy', P_short)\n", (31782, 31819), True, 'import numpy as np\n'), ((31828, 31860), 'numpy.save', 'np.save', (["(P_save_path + '.npy')", 'P'], {}), "(P_save_path + '.npy', P)\n", (31835, 31860), True, 'import numpy as np\n'), ((35028, 35096), 'torch.ones', 'tc.ones', (['(sample_height_n * sample_size_n * sample_size_n)'], {'device': 'dev'}), '(sample_height_n * sample_size_n * sample_size_n, device=dev)\n', (35035, 35096), True, 'import torch as tc\n'), ((36732, 36745), 'numpy.random.default_rng', 'default_rng', ([], {}), '()\n', (36743, 36745), False, 'from numpy.random import default_rng\n'), ((37606, 37620), 'tqdm.tqdm', 'tqdm', (['theta_ls'], {}), '(theta_ls)\n', (37610, 37620), False, 'from tqdm import tqdm\n'), ((1644, 1657), 'torch.cos', 'tc.cos', (['theta'], {}), '(theta)\n', (1650, 1657), True, 'import torch as tc\n'), ((1713, 1726), 'torch.sin', 'tc.sin', (['theta'], {}), '(theta)\n', (1719, 1726), True, 'import torch as tc\n'), ((1728, 1741), 'torch.cos', 'tc.cos', (['theta'], {}), '(theta)\n', (1734, 1741), True, 'import torch as tc\n'), ((1769, 1787), 'torch.stack', 'tc.stack', (['[m0, m1]'], {}), '([m0, m1])\n', (1777, 1787), True, 'import torch as tc\n'), ((3506, 3551), 'torch.linspace', 'tc.linspace', (['theta_st', 'theta_end', '(n_theta + 1)'], {}), '(theta_st, theta_end, n_theta + 1)\n', (3517, 3551), True, 'import torch as tc\n'), ((4177, 4206), 'torch.cumsum', 'tc.cumsum', (['lac_single'], {'axis': '(2)'}), '(lac_single, axis=2)\n', (4186, 4206), True, 'import torch as tc\n'), ((7000, 7035), 'os.path.join', 'os.path.join', (['save_path', 'save_fname'], {}), '(save_path, save_fname)\n', (7012, 7035), False, 'import os\n'), ((10867, 10890), 'numpy.sum', 'np.sum', (['(fl_cs_K[i] != 0)'], {}), '(fl_cs_K[i] != 0)\n', (10873, 10890), True, 'import numpy as np\n'), ((11145, 11168), 'numpy.sum', 'np.sum', (['(fl_cs_L[i] != 0)'], {}), '(fl_cs_L[i] != 0)\n', (11151, 11168), True, 'import numpy as np\n'), ((11423, 11446), 'numpy.sum', 'np.sum', (['(fl_cs_M[i] != 0)'], {}), '(fl_cs_M[i] != 0)\n', (11429, 11446), True, 'import numpy as np\n'), ((12099, 12155), 'numpy.append', 'np.append', (["FL_all_elements_dic['fl_energy']", 'Line_energy'], {}), "(FL_all_elements_dic['fl_energy'], Line_energy)\n", (12108, 12155), True, 'import numpy as np\n'), ((12301, 12378), 'numpy.append', 'np.append', (["FL_all_elements_dic['detected_fl_unit_concentration']", 'fl_unit_con'], {}), "(FL_all_elements_dic['detected_fl_unit_concentration'], fl_unit_con)\n", (12310, 12378), True, 'import numpy as np\n'), ((14622, 14647), 'numpy.array', 'np.array', (["['K', 'L', 'M']"], {}), "(['K', 'L', 'M'])\n", (14630, 14647), True, 'import numpy as np\n'), ((14966, 15000), 'torch.tensor', 'tc.tensor', (['theta'], {'dtype': 'tc.float32'}), '(theta, dtype=tc.float32)\n', (14975, 15000), True, 'import torch as tc\n'), ((23119, 23155), 'numpy.ceil', 'np.ceil', (['(det_size_cm / voxel_size_cm)'], {}), '(det_size_cm / voxel_size_cm)\n', (23126, 23155), True, 'import numpy as np\n'), ((24733, 24774), 'numpy.int', 'np.int', (['(det_size_n / det_ds_spacing_n + 1)'], {}), '(det_size_n / det_ds_spacing_n + 1)\n', (24739, 24774), True, 'import numpy as np\n'), ((25125, 25166), 'numpy.int', 'np.int', (['(det_size_n / det_ds_spacing_n + 1)'], {}), '(det_size_n / det_ds_spacing_n + 1)\n', (25131, 25166), True, 'import numpy as np\n'), ((31531, 31554), 'tqdm.tqdm', 'tqdm', (['voxel_pos_ls_flat'], {}), '(voxel_pos_ls_flat)\n', (31535, 31554), False, 'from tqdm import tqdm\n'), ((32337, 32362), 'numpy.array', 'np.array', (["['K', 'L', 'M']"], {}), "(['K', 'L', 'M'])\n", (32345, 32362), True, 'import numpy as np\n'), ((32589, 32630), 'torch.as_tensor', 'tc.as_tensor', (["fl_all_lines_dic['n_lines']"], {}), "(fl_all_lines_dic['n_lines'])\n", (32601, 32630), True, 'import torch as tc\n'), ((34892, 34937), 'torch.linspace', 'tc.linspace', (['theta_st', 'theta_end', '(n_theta + 1)'], {}), '(theta_st, theta_end, n_theta + 1)\n', (34903, 34937), True, 'import torch as tc\n'), ((36135, 36184), 'torch.unsqueeze', 'tc.unsqueeze', (['probe_after_attenuation_flat'], {'dim': '(0)'}), '(probe_after_attenuation_flat, dim=0)\n', (36147, 36184), True, 'import torch as tc\n'), ((37460, 37476), 'torch.from_numpy', 'tc.from_numpy', (['P'], {}), '(P)\n', (37473, 37476), True, 'import torch as tc\n'), ((37507, 37552), 'torch.linspace', 'tc.linspace', (['theta_st', 'theta_end', '(n_theta + 1)'], {}), '(theta_st, theta_end, n_theta + 1)\n', (37518, 37552), True, 'import torch as tc\n'), ((1660, 1673), 'torch.sin', 'tc.sin', (['theta'], {}), '(theta)\n', (1666, 1673), True, 'import torch as tc\n'), ((10931, 10977), 'numpy.average', 'np.average', (['fl_energy_K[i]'], {'weights': 'fl_cs_K[i]'}), '(fl_energy_K[i], weights=fl_cs_K[i])\n', (10941, 10977), True, 'import numpy as np\n'), ((11014, 11032), 'numpy.sum', 'np.sum', (['fl_cs_K[i]'], {}), '(fl_cs_K[i])\n', (11020, 11032), True, 'import numpy as np\n'), ((11209, 11255), 'numpy.average', 'np.average', (['fl_energy_L[i]'], {'weights': 'fl_cs_L[i]'}), '(fl_energy_L[i], weights=fl_cs_L[i])\n', (11219, 11255), True, 'import numpy as np\n'), ((11292, 11310), 'numpy.sum', 'np.sum', (['fl_cs_L[i]'], {}), '(fl_cs_L[i])\n', (11298, 11310), True, 'import numpy as np\n'), ((11487, 11533), 'numpy.average', 'np.average', (['fl_energy_M[i]'], {'weights': 'fl_cs_M[i]'}), '(fl_energy_M[i], weights=fl_cs_M[i])\n', (11497, 11533), True, 'import numpy as np\n'), ((11570, 11588), 'numpy.sum', 'np.sum', (['fl_cs_M[i]'], {}), '(fl_cs_M[i])\n', (11576, 11588), True, 'import numpy as np\n'), ((17170, 17182), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (17178, 17182), True, 'import numpy as np\n'), ((17184, 17196), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (17192, 17196), True, 'import numpy as np\n'), ((17198, 17210), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (17206, 17210), True, 'import numpy as np\n'), ((17389, 17423), 'torch.tensor', 'tc.tensor', (['[[[z_s], [x_s], [y_s]]]'], {}), '([[[z_s], [x_s], [y_s]]])\n', (17398, 17423), True, 'import torch as tc\n'), ((17769, 17781), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (17777, 17781), True, 'import numpy as np\n'), ((17783, 17795), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (17791, 17795), True, 'import numpy as np\n'), ((17797, 17809), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (17805, 17809), True, 'import numpy as np\n'), ((17992, 18026), 'torch.tensor', 'tc.tensor', (['[[[z_s], [x_s], [y_s]]]'], {}), '([[[z_s], [x_s], [y_s]]])\n', (18001, 18026), True, 'import torch as tc\n'), ((18385, 18397), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (18393, 18397), True, 'import numpy as np\n'), ((18399, 18411), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (18407, 18411), True, 'import numpy as np\n'), ((18413, 18425), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (18421, 18425), True, 'import numpy as np\n'), ((18604, 18638), 'torch.tensor', 'tc.tensor', (['[[[z_s], [x_s], [y_s]]]'], {}), '([[[z_s], [x_s], [y_s]]])\n', (18613, 18638), True, 'import torch as tc\n'), ((23981, 24024), 'numpy.ceil', 'np.ceil', (['(det_from_sample_cm / voxel_size_cm)'], {}), '(det_from_sample_cm / voxel_size_cm)\n', (23988, 24024), True, 'import numpy as np\n'), ((26572, 26595), 'tqdm.tqdm', 'tqdm', (['voxel_pos_ls_flat'], {}), '(voxel_pos_ls_flat)\n', (26576, 26595), False, 'from tqdm import tqdm\n'), ((27912, 27955), 'numpy.concatenate', 'np.concatenate', (['(bdz_int, bdx_int, bdy_int)'], {}), '((bdz_int, bdx_int, bdy_int))\n', (27926, 27955), True, 'import numpy as np\n'), ((30209, 30244), 'numpy.delete', 'np.delete', (['int_ls', 'idx_duplicate', '(0)'], {}), '(int_ls, idx_duplicate, 0)\n', (30218, 30244), True, 'import numpy as np\n'), ((30274, 30310), 'numpy.delete', 'np.delete', (['int_length', 'idx_duplicate'], {}), '(int_length, idx_duplicate)\n', (30283, 30310), True, 'import numpy as np\n'), ((30459, 30481), 'numpy.zeros', 'np.zeros', (['int_ls.shape'], {}), '(int_ls.shape)\n', (30467, 30481), True, 'import numpy as np\n'), ((31151, 31174), 'torch.tensor', 'tc.tensor', (['int_idx_flat'], {}), '(int_idx_flat)\n', (31160, 31174), True, 'import torch as tc\n'), ((31357, 31380), 'tqdm.tqdm._instances.clear', 'tqdm._instances.clear', ([], {}), '()\n', (31378, 31380), False, 'from tqdm import tqdm\n'), ((34434, 34472), 'torch.sum', 'tc.sum', (['att_exponent_voxel_sum'], {'axis': '(1)'}), '(att_exponent_voxel_sum, axis=1)\n', (34440, 34472), True, 'import torch as tc\n'), ((4237, 4294), 'torch.zeros', 'tc.zeros', (['(sample_height_n, sample_size_n, 1)'], {'device': 'dev'}), '((sample_height_n, sample_size_n, 1), device=dev)\n', (4245, 4294), True, 'import torch as tc\n'), ((14285, 14330), 'torch.linspace', 'tc.linspace', (['theta_st', 'theta_end', '(n_theta + 1)'], {}), '(theta_st, theta_end, n_theta + 1)\n', (14296, 14330), True, 'import torch as tc\n'), ((15645, 15661), 'torch.stack', 'tc.stack', (['fl_map'], {}), '(fl_map)\n', (15653, 15661), True, 'import torch as tc\n'), ((29796, 29820), 'numpy.argsort', 'np.argsort', (['int_ls[:, 1]'], {}), '(int_ls[:, 1])\n', (29806, 29820), True, 'import numpy as np\n'), ((30557, 30594), 'numpy.floor', 'np.floor', (['((int_ls + int_ls_shift) / 2)'], {}), '((int_ls + int_ls_shift) / 2)\n', (30565, 30594), True, 'import numpy as np\n'), ((3592, 3609), 'numpy.load', 'np.load', (['src_path'], {}), '(src_path)\n', (3599, 3609), True, 'import numpy as np\n'), ((14378, 14395), 'numpy.load', 'np.load', (['src_path'], {}), '(src_path)\n', (14385, 14395), True, 'import numpy as np\n'), ((28370, 28383), 'numpy.ceil', 'np.ceil', (['v[0]'], {}), '(v[0])\n', (28377, 28383), True, 'import numpy as np\n'), ((28519, 28537), 'numpy.ceil', 'np.ceil', (['bd_int[0]'], {}), '(bd_int[0])\n', (28526, 28537), True, 'import numpy as np\n'), ((28679, 28692), 'numpy.ceil', 'np.ceil', (['v[1]'], {}), '(v[1])\n', (28686, 28692), True, 'import numpy as np\n'), ((28828, 28846), 'numpy.ceil', 'np.ceil', (['bd_int[1]'], {}), '(bd_int[1])\n', (28835, 28846), True, 'import numpy as np\n'), ((28988, 29001), 'numpy.ceil', 'np.ceil', (['v[2]'], {}), '(v[2])\n', (28995, 29001), True, 'import numpy as np\n'), ((29137, 29155), 'numpy.ceil', 'np.ceil', (['bd_int[2]'], {}), '(bd_int[2])\n', (29144, 29155), True, 'import numpy as np\n'), ((32730, 32747), 'numpy.load', 'np.load', (['src_path'], {}), '(src_path)\n', (32737, 32747), True, 'import numpy as np\n'), ((33095, 33149), 'xraylib_np.CS_Total', 'xlib_np.CS_Total', (['aN_ls', "fl_all_lines_dic['fl_energy']"], {}), "(aN_ls, fl_all_lines_dic['fl_energy'])\n", (33111, 33149), True, 'import xraylib_np as xlib_np\n'), ((4480, 4523), 'torch.exp', 'tc.exp', (['(-att_exponent_acc_map[:, :, :, :-1])'], {}), '(-att_exponent_acc_map[:, :, :, :-1])\n', (4486, 4523), True, 'import torch as tc\n'), ((4621, 4663), 'torch.exp', 'tc.exp', (['(-att_exponent_acc_map[:, :, :, -1])'], {}), '(-att_exponent_acc_map[:, :, :, -1])\n', (4627, 4663), True, 'import torch as tc\n'), ((28007, 28033), 'numpy.argmin', 'np.argmin', (['bd_int_ls[:, 1]'], {}), '(bd_int_ls[:, 1])\n', (28016, 28033), True, 'import numpy as np\n'), ((28348, 28366), 'numpy.ceil', 'np.ceil', (['bd_int[0]'], {}), '(bd_int[0])\n', (28355, 28366), True, 'import numpy as np\n'), ((28502, 28515), 'numpy.ceil', 'np.ceil', (['v[0]'], {}), '(v[0])\n', (28509, 28515), True, 'import numpy as np\n'), ((28657, 28675), 'numpy.ceil', 'np.ceil', (['bd_int[1]'], {}), '(bd_int[1])\n', (28664, 28675), True, 'import numpy as np\n'), ((28811, 28824), 'numpy.ceil', 'np.ceil', (['v[1]'], {}), '(v[1])\n', (28818, 28824), True, 'import numpy as np\n'), ((28966, 28984), 'numpy.ceil', 'np.ceil', (['bd_int[2]'], {}), '(bd_int[2])\n', (28973, 28984), True, 'import numpy as np\n'), ((29120, 29133), 'numpy.ceil', 'np.ceil', (['v[2]'], {}), '(v[2])\n', (29127, 29133), True, 'import numpy as np\n'), ((29726, 29742), 'numpy.array', 'np.array', (['bd_int'], {}), '(bd_int)\n', (29734, 29742), True, 'import numpy as np\n'), ((29990, 30011), 'numpy.diff', 'np.diff', (['int_ls[:, 2]'], {}), '(int_ls[:, 2])\n', (29997, 30011), True, 'import numpy as np\n'), ((30149, 30174), 'numpy.where', 'np.where', (['(int_length == 0)'], {}), '(int_length == 0)\n', (30157, 30174), True, 'import numpy as np\n'), ((3707, 3744), 'xraylib_np.CS_Total', 'xlib_np.CS_Total', (['aN_ls', 'probe_energy'], {}), '(aN_ls, probe_energy)\n', (3723, 3744), True, 'import xraylib_np as xlib_np\n'), ((24462, 24501), 'numpy.floor', 'np.floor', (['(det_size_n / det_ds_spacing_n)'], {}), '(det_size_n / det_ds_spacing_n)\n', (24470, 24501), True, 'import numpy as np\n'), ((24588, 24627), 'numpy.floor', 'np.floor', (['(det_size_n / det_ds_spacing_n)'], {}), '(det_size_n / det_ds_spacing_n)\n', (24596, 24627), True, 'import numpy as np\n'), ((24859, 24898), 'numpy.floor', 'np.floor', (['(det_size_n / det_ds_spacing_n)'], {}), '(det_size_n / det_ds_spacing_n)\n', (24867, 24898), True, 'import numpy as np\n'), ((24987, 25026), 'numpy.floor', 'np.floor', (['(det_size_n / det_ds_spacing_n)'], {}), '(det_size_n / det_ds_spacing_n)\n', (24995, 25026), True, 'import numpy as np\n'), ((27395, 27419), 'numpy.argmax', 'np.argmax', (['bdy_int[:, 1]'], {}), '(bdy_int[:, 1])\n', (27404, 27419), True, 'import numpy as np\n'), ((27665, 27689), 'numpy.argmax', 'np.argmax', (['bdz_int[:, 1]'], {}), '(bdz_int[:, 1])\n', (27674, 27689), True, 'import numpy as np\n'), ((29938, 29959), 'numpy.diff', 'np.diff', (['int_ls[:, 0]'], {}), '(int_ls[:, 0])\n', (29945, 29959), True, 'import numpy as np\n'), ((29964, 29985), 'numpy.diff', 'np.diff', (['int_ls[:, 1]'], {}), '(int_ls[:, 1])\n', (29971, 29985), True, 'import numpy as np\n'), ((28396, 28414), 'numpy.ceil', 'np.ceil', (['bd_int[0]'], {}), '(bd_int[0])\n', (28403, 28414), True, 'import numpy as np\n'), ((28417, 28430), 'numpy.ceil', 'np.ceil', (['v[0]'], {}), '(v[0])\n', (28424, 28430), True, 'import numpy as np\n'), ((28550, 28568), 'numpy.ceil', 'np.ceil', (['bd_int[0]'], {}), '(bd_int[0])\n', (28557, 28568), True, 'import numpy as np\n'), ((28571, 28584), 'numpy.ceil', 'np.ceil', (['v[0]'], {}), '(v[0])\n', (28578, 28584), True, 'import numpy as np\n'), ((28705, 28723), 'numpy.ceil', 'np.ceil', (['bd_int[1]'], {}), '(bd_int[1])\n', (28712, 28723), True, 'import numpy as np\n'), ((28726, 28739), 'numpy.ceil', 'np.ceil', (['v[1]'], {}), '(v[1])\n', (28733, 28739), True, 'import numpy as np\n'), ((28859, 28877), 'numpy.ceil', 'np.ceil', (['bd_int[1]'], {}), '(bd_int[1])\n', (28866, 28877), True, 'import numpy as np\n'), ((28880, 28893), 'numpy.ceil', 'np.ceil', (['v[1]'], {}), '(v[1])\n', (28887, 28893), True, 'import numpy as np\n'), ((29014, 29032), 'numpy.ceil', 'np.ceil', (['bd_int[2]'], {}), '(bd_int[2])\n', (29021, 29032), True, 'import numpy as np\n'), ((29035, 29048), 'numpy.ceil', 'np.ceil', (['v[2]'], {}), '(v[2])\n', (29042, 29048), True, 'import numpy as np\n'), ((29168, 29186), 'numpy.ceil', 'np.ceil', (['bd_int[2]'], {}), '(bd_int[2])\n', (29175, 29186), True, 'import numpy as np\n'), ((29189, 29202), 'numpy.ceil', 'np.ceil', (['v[2]'], {}), '(v[2])\n', (29196, 29202), True, 'import numpy as np\n')] |
# Authors: <NAME> <<EMAIL>>
# License: BSD 3 clause
from yyskmultilearn.base import MLClassifierBase
import numpy as np
import scipy.sparse as sp
from scipy.linalg import norm
from scipy.sparse.linalg import inv as inv_sparse
from scipy.linalg import inv as inv_dense
class MLTSVM(MLClassifierBase):
"""Twin multi-Label Support Vector Machines
Parameters
----------
c_k : int
the empirical risk penalty parameter that determines the trade-off between the loss terms
sor_omega: float (default is 1.0)
the smoothing parameter
threshold : int (default is 1e-6)
threshold above which a label should be assigned
lambda_param : float (default is 1.0)
the regularization parameter
max_iteration : int (default is 500)
maximum number of iterations to use in successive overrelaxation
References
----------
If you use this classifier please cite the original paper introducing the method:
.. code :: bibtex
@article{chen2016mltsvm,
title={MLTSVM: a novel twin support vector machine to multi-label learning},
author={<NAME> and <NAME> and <NAME>},
journal={Pattern Recognition},
volume={52},
pages={61--74},
year={2016},
publisher={Elsevier}
}
Examples
--------
Here's a very simple example of using MLTSVM with a fixed number of neighbors:
.. code :: python
from yyskmultilearn.adapt import MLTSVM
classifier = MLTSVM(c_k = 2**-1)
# train
classifier.fit(X_train, y_train)
# predict
predictions = classifier.predict(X_test)
You can also use :class:`~sklearn.model_selection.GridSearchCV` to find an optimal set of parameters:
.. code :: python
from yyskmultilearn.adapt import MLTSVM
from sklearn.model_selection import GridSearchCV
parameters = {'c_k': [2**i for i in range(-5, 5, 2)]}
score = 'f1-macro
clf = GridSearchCV(MLTSVM(), parameters, scoring=score)
clf.fit(X, y)
print (clf.best_params_, clf.best_score_)
# output
{'c_k': 0.03125} 0.347518217573
"""
def __init__(self, c_k=0, sor_omega=1.0, threshold=1e-6, lambda_param=1.0, max_iteration=500):
super(MLClassifierBase, self).__init__()
self.max_iteration = max_iteration
self.threshold = threshold
self.lambda_param = lambda_param # TODO: possibility to add different lambda to different labels
self.c_k = c_k
self.sor_omega = sor_omega
self.copyable_attrs = ['c_k', 'sor_omega', 'lambda_param', 'threshold', 'max_iteration']
def fit(self, X, Y):
n_labels = Y.shape[1]
m = X.shape[1] # Count of features
self.wk_bk = np.zeros([n_labels, m + 1], dtype=float)
if sp.issparse(X):
identity_matrix = sp.identity(m + 1)
_inv = inv_sparse
else:
identity_matrix = np.identity(m + 1)
_inv = inv_dense
X_bias = _hstack(X, np.ones((X.shape[0], 1), dtype=X.dtype))
self.iteration_count = []
for label in range(0, n_labels):
# Calculate the parameter Q for overrelaxation
H_k = _get_x_class_instances(X_bias, Y, label)
G_k = _get_x_noclass_instances(X_bias, Y, label)
Q_knoPrefixGk = _inv((H_k.T).dot(H_k) + self.lambda_param * identity_matrix).dot(G_k.T)
Q_k = G_k.dot(Q_knoPrefixGk).A
Q_k = (Q_k + Q_k.T) / 2.0
# Calculate other
alpha_k = self._successive_overrelaxation(self.sor_omega, Q_k)
if sp.issparse(X):
self.wk_bk[label] = -Q_knoPrefixGk.dot(alpha_k).T
else:
self.wk_bk[label] = (-np.dot(Q_knoPrefixGk, alpha_k)).T
self.wk_norms = norm(self.wk_bk, axis=1)
self.treshold = 1.0 / np.max(self.wk_norms)
def predict(self, X):
X_with_bias = _hstack(X, np.ones((X.shape[0], 1), dtype=X.dtype))
wk_norms_multiplicated = self.wk_norms[np.newaxis, :] # change to form [[wk1, wk2, ..., wkk]]
all_distances = (-X_with_bias.dot(self.wk_bk.T)) / wk_norms_multiplicated
predicted_y = np.where(all_distances < self.treshold, 1, 0)
# TODO: It's possible to add condition to: add label if no labels is in row.
return predicted_y
def _successive_overrelaxation(self, omegaW, Q):
# Initialization
D = np.diag(Q) # Only one dimension vector - is enough
D_inv = 1.0 / D # D-1 simplify form
small_l = Q.shape[1]
oldnew_alpha = np.zeros([small_l, 1]) # buffer
is_not_enough = True
was_going_down = False
last_alfa_norm_change = -1
nr_iter = 0
while is_not_enough: # do while
oldAlpha = oldnew_alpha
for j in range(0, small_l): # It's from last alpha to first
oldnew_alpha[j] = oldAlpha[j] - omegaW * D_inv[j] * (Q[j, :].T.dot(oldnew_alpha) - 1)
oldnew_alpha = oldnew_alpha.clip(0.0, self.c_k)
alfa_norm_change = norm(oldnew_alpha - oldAlpha)
if not was_going_down and last_alfa_norm_change > alfa_norm_change:
was_going_down = True
is_not_enough = alfa_norm_change > self.threshold and \
nr_iter < self.max_iteration \
and ((not was_going_down) or last_alfa_norm_change > alfa_norm_change)
# TODO: maybe add any(oldnew_alpha != oldAlpha)
last_alfa_norm_change = alfa_norm_change
nr_iter += 1
self.iteration_count.append(nr_iter)
return oldnew_alpha
def _get_x_noclass_instances(X, Y, label_class):
if sp.issparse(Y):
indices = np.where(Y[:, 1].A == 0)[0]
else:
indices = np.where(Y[:, 1] == 0)[0]
return X[indices, :]
def _get_x_class_instances(X, Y, label_class):
if sp.issparse(Y):
indices = Y[:, label_class].nonzero()[0]
else:
indices = np.nonzero(Y[:, label_class])[0]
return X[indices, :]
def _hstack(X, Y):
if sp.issparse(X):
return sp.hstack([X, Y], format=X.format)
else:
return np.hstack([X, Y])
| [
"scipy.sparse.issparse",
"numpy.zeros",
"numpy.identity",
"numpy.ones",
"numpy.hstack",
"numpy.nonzero",
"numpy.max",
"numpy.where",
"scipy.linalg.norm",
"scipy.sparse.identity",
"numpy.dot",
"numpy.diag",
"scipy.sparse.hstack"
] | [((5807, 5821), 'scipy.sparse.issparse', 'sp.issparse', (['Y'], {}), '(Y)\n', (5818, 5821), True, 'import scipy.sparse as sp\n'), ((6004, 6018), 'scipy.sparse.issparse', 'sp.issparse', (['Y'], {}), '(Y)\n', (6015, 6018), True, 'import scipy.sparse as sp\n'), ((6183, 6197), 'scipy.sparse.issparse', 'sp.issparse', (['X'], {}), '(X)\n', (6194, 6197), True, 'import scipy.sparse as sp\n'), ((2823, 2863), 'numpy.zeros', 'np.zeros', (['[n_labels, m + 1]'], {'dtype': 'float'}), '([n_labels, m + 1], dtype=float)\n', (2831, 2863), True, 'import numpy as np\n'), ((2876, 2890), 'scipy.sparse.issparse', 'sp.issparse', (['X'], {}), '(X)\n', (2887, 2890), True, 'import scipy.sparse as sp\n'), ((3886, 3910), 'scipy.linalg.norm', 'norm', (['self.wk_bk'], {'axis': '(1)'}), '(self.wk_bk, axis=1)\n', (3890, 3910), False, 'from scipy.linalg import norm\n'), ((4271, 4316), 'numpy.where', 'np.where', (['(all_distances < self.treshold)', '(1)', '(0)'], {}), '(all_distances < self.treshold, 1, 0)\n', (4279, 4316), True, 'import numpy as np\n'), ((4520, 4530), 'numpy.diag', 'np.diag', (['Q'], {}), '(Q)\n', (4527, 4530), True, 'import numpy as np\n'), ((4669, 4691), 'numpy.zeros', 'np.zeros', (['[small_l, 1]'], {}), '([small_l, 1])\n', (4677, 4691), True, 'import numpy as np\n'), ((6214, 6248), 'scipy.sparse.hstack', 'sp.hstack', (['[X, Y]'], {'format': 'X.format'}), '([X, Y], format=X.format)\n', (6223, 6248), True, 'import scipy.sparse as sp\n'), ((6274, 6291), 'numpy.hstack', 'np.hstack', (['[X, Y]'], {}), '([X, Y])\n', (6283, 6291), True, 'import numpy as np\n'), ((2922, 2940), 'scipy.sparse.identity', 'sp.identity', (['(m + 1)'], {}), '(m + 1)\n', (2933, 2940), True, 'import scipy.sparse as sp\n'), ((3015, 3033), 'numpy.identity', 'np.identity', (['(m + 1)'], {}), '(m + 1)\n', (3026, 3033), True, 'import numpy as np\n'), ((3092, 3131), 'numpy.ones', 'np.ones', (['(X.shape[0], 1)'], {'dtype': 'X.dtype'}), '((X.shape[0], 1), dtype=X.dtype)\n', (3099, 3131), True, 'import numpy as np\n'), ((3689, 3703), 'scipy.sparse.issparse', 'sp.issparse', (['X'], {}), '(X)\n', (3700, 3703), True, 'import scipy.sparse as sp\n'), ((3941, 3962), 'numpy.max', 'np.max', (['self.wk_norms'], {}), '(self.wk_norms)\n', (3947, 3962), True, 'import numpy as np\n'), ((4023, 4062), 'numpy.ones', 'np.ones', (['(X.shape[0], 1)'], {'dtype': 'X.dtype'}), '((X.shape[0], 1), dtype=X.dtype)\n', (4030, 4062), True, 'import numpy as np\n'), ((5162, 5191), 'scipy.linalg.norm', 'norm', (['(oldnew_alpha - oldAlpha)'], {}), '(oldnew_alpha - oldAlpha)\n', (5166, 5191), False, 'from scipy.linalg import norm\n'), ((5841, 5865), 'numpy.where', 'np.where', (['(Y[:, 1].A == 0)'], {}), '(Y[:, 1].A == 0)\n', (5849, 5865), True, 'import numpy as np\n'), ((5897, 5919), 'numpy.where', 'np.where', (['(Y[:, 1] == 0)'], {}), '(Y[:, 1] == 0)\n', (5905, 5919), True, 'import numpy as np\n'), ((6097, 6126), 'numpy.nonzero', 'np.nonzero', (['Y[:, label_class]'], {}), '(Y[:, label_class])\n', (6107, 6126), True, 'import numpy as np\n'), ((3827, 3857), 'numpy.dot', 'np.dot', (['Q_knoPrefixGk', 'alpha_k'], {}), '(Q_knoPrefixGk, alpha_k)\n', (3833, 3857), True, 'import numpy as np\n')] |
import torch
import pandas as pd
import numpy as np
from torch.utils.data import Dataset, DataLoader
from sklearn.model_selection import train_test_split
from deepfm import DeepFM
torch.manual_seed(2022)
data = pd.read_csv('./temp_data.csv').reset_index(drop=True)
category_cols = ['CODE_GENDER', 'FLAG_OWN_CAR', 'FLAG_OWN_REALTY']
dummy_cols = ['SK_ID_CURR']
target_col = 'TARGET'
numeric_cols = list(set(data.columns) - set(category_cols + dummy_cols + [target_col]))
def data_massage(data, category_cols, numeric_cols):
feat_cols = category_cols + numeric_cols
fields = []
for feat_col in feat_cols:
if feat_col not in category_cols:
fields.append(1)
else:
fields.append(data[feat_col].nunique())
start_idx = [0] + np.cumsum(fields)[:-1].tolist()
return feat_cols, start_idx, fields
class FMDataset(Dataset):
def __init__(self, data, feat_start_idx, fields_size, feat_cols, target_col):
self.data = data
self.label = np.asarray(self.data[target_col])
self.feat_cols = feat_cols
self.fields = fields_size
self.start_idx = feat_start_idx
def __getitem__(self, index):
row = self.data.loc[index, self.feat_cols]
idxs = list()
vals = list()
# label = self.data.loc[index, self.]
label = self.label[index]
for i in range(len(row)):
if self.fields[i] == 1:
idxs.append(self.start_idx[i])
vals.append(row[i])
else:
idxs.append(int(self.start_idx[i] + row[i]))
vals.append(1)
label = torch.tensor(label, dtype=torch.float32)
idxs = torch.tensor(idxs, dtype=torch.long)
vals = torch.tensor(vals, dtype=torch.float32)
return label, idxs, vals
def __len__(self):
return len(self.data)
feat_cols, feat_start_idx, fields_size = data_massage(data, category_cols, numeric_cols)
args = {
'batch_size': 256,
'gpuid': '0',
'lr': 0.001,
'l2_reg': 0.,
'epochs': 10,
'num_features': len(feat_cols),
'embedding_dim': 8,
'field_size': fields_size,
'dense_size': [32, 32],
'1o_dropout_p': 1.,
'2o_dropout_p': 1.,
'deep_dropout_p': 0.5,
'batch_norm': True,
'deep_layer_act': 'relu',
'opt_name': 'adam'
}
train_data, test_data = train_test_split(data, test_size=0.2)
train_data, test_data = train_data.reset_index(drop=True), test_data.reset_index(drop=True)
train_dataset = FMDataset(train_data, feat_start_idx, fields_size, feat_cols, target_col)
train_loader = DataLoader(train_dataset, batch_size=args['batch_size'], shuffle=True)
test_dataset = FMDataset(test_data, feat_start_idx, fields_size, feat_cols, target_col)
test_loader = DataLoader(test_dataset, batch_size=len(test_dataset))
model = DeepFM(args)
model.fit(train_loader)
model.predict(test_loader)
| [
"torch.utils.data.DataLoader",
"pandas.read_csv",
"torch.manual_seed",
"sklearn.model_selection.train_test_split",
"deepfm.DeepFM",
"numpy.asarray",
"numpy.cumsum",
"torch.tensor"
] | [((182, 205), 'torch.manual_seed', 'torch.manual_seed', (['(2022)'], {}), '(2022)\n', (199, 205), False, 'import torch\n'), ((2380, 2417), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data'], {'test_size': '(0.2)'}), '(data, test_size=0.2)\n', (2396, 2417), False, 'from sklearn.model_selection import train_test_split\n'), ((2616, 2686), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': "args['batch_size']", 'shuffle': '(True)'}), "(train_dataset, batch_size=args['batch_size'], shuffle=True)\n", (2626, 2686), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((2854, 2866), 'deepfm.DeepFM', 'DeepFM', (['args'], {}), '(args)\n', (2860, 2866), False, 'from deepfm import DeepFM\n'), ((213, 243), 'pandas.read_csv', 'pd.read_csv', (['"""./temp_data.csv"""'], {}), "('./temp_data.csv')\n", (224, 243), True, 'import pandas as pd\n'), ((1007, 1040), 'numpy.asarray', 'np.asarray', (['self.data[target_col]'], {}), '(self.data[target_col])\n', (1017, 1040), True, 'import numpy as np\n'), ((1641, 1681), 'torch.tensor', 'torch.tensor', (['label'], {'dtype': 'torch.float32'}), '(label, dtype=torch.float32)\n', (1653, 1681), False, 'import torch\n'), ((1697, 1733), 'torch.tensor', 'torch.tensor', (['idxs'], {'dtype': 'torch.long'}), '(idxs, dtype=torch.long)\n', (1709, 1733), False, 'import torch\n'), ((1749, 1788), 'torch.tensor', 'torch.tensor', (['vals'], {'dtype': 'torch.float32'}), '(vals, dtype=torch.float32)\n', (1761, 1788), False, 'import torch\n'), ((779, 796), 'numpy.cumsum', 'np.cumsum', (['fields'], {}), '(fields)\n', (788, 796), True, 'import numpy as np\n')] |
import numpy, Image
for n in xrange(10000):
a = numpy.random.rand(64,64,3) * 255
im_out = Image.fromarray(a.astype('uint8')).convert('RGBA')
im_out.save('%000d.jpg' % n) | [
"numpy.random.rand"
] | [((53, 81), 'numpy.random.rand', 'numpy.random.rand', (['(64)', '(64)', '(3)'], {}), '(64, 64, 3)\n', (70, 81), False, 'import numpy, Image\n')] |
#!/usr/bin/env python
import cPickle
import cv2
import numpy as np
import h5py
from vizdoom import *
import math
import os
import os.path
import sys
import random
import scipy.misc
from constants import *
from video_writer import *
import cv2
import os
import cPickle
import numpy as np
np.random.seed(DEFAULT_RANDOM_SEED)
import keras
import random
random.seed(DEFAULT_RANDOM_SEED)
def mean(numbers):
return float(sum(numbers)) / max(len(numbers), 1)
def wait_idle(game, wait_idle_tics=WAIT_IDLE_TICS):
if wait_idle_tics > 0:
game.make_action(STAY_IDLE, wait_idle_tics)
def game_make_action_wrapper(game, action, repeat):
game.make_action(action, repeat)
wait_idle(game)
return None
def save_list_of_arrays_to_hdf5(input, prefix):
stacked = np.array(input)
h5f = h5py.File(prefix + HDF5_NAME, 'w')
h5f.create_dataset('dataset', data=stacked)
h5f.close()
def load_array_from_hdf5(prefix):
h5f = h5py.File(prefix + HDF5_NAME,'r')
data = h5f['dataset'][:]
h5f.close()
return data
class StateRecorder():
def __init__(self, game):
self.game = game
self.game_variables = []
self.actions = []
self.rewards = []
self.screen_buffers = []
def record_buffers(self, state):
self.screen_buffers.append(state.screen_buffer.transpose(VIZDOOM_TO_TF))
'''records current state, then makes the provided action'''
def record(self, action_index, repeat):
state = self.game.get_state()
self.record_buffers(state)
self.game_variables.append(state.game_variables)
r = game_make_action_wrapper(self.game, ACTIONS_LIST[action_index], repeat)
self.actions.append(action_index)
self.rewards.append(r)
def save_recorded_buffers(self):
save_list_of_arrays_to_hdf5(self.screen_buffers, SCREEN_BUFFERS_PATH)
def save_recorded(self):
self.save_recorded_buffers()
data = (self.game_variables,
self.actions,
self.rewards)
with open(NAVIGATION_RECORDING_PATH, 'wb') as output_file:
cPickle.dump(data, output_file)
def downsample(input, factor):
for _ in xrange(factor):
input = cv2.pyrDown(input)
return input
def double_downsampling(input):
return cv2.pyrDown(cv2.pyrDown(input))
def double_upsampling(input):
return cv2.pyrUp(cv2.pyrUp(input))
def color2gray(input):
return cv2.cvtColor(input, cv2.COLOR_RGB2GRAY)
def doom_navigation_setup(seed, wad):
game = DoomGame()
game.load_config(DEFAULT_CONFIG)
game.set_doom_scenario_path(wad)
game.set_seed(seed)
game.add_game_args("+sv_cheats 1")
game.init()
return game
def calculate_distance_angle(start_coordinates, current_coordinates):
distance = math.sqrt((start_coordinates[0] - current_coordinates[0]) ** 2 +
(start_coordinates[1] - current_coordinates[1]) ** 2 +
(start_coordinates[2] - current_coordinates[2]) ** 2)
abs_angle_difference = math.fabs(start_coordinates[3] - current_coordinates[3])
angle = min(abs_angle_difference, 360.0 - abs_angle_difference)
return distance, angle
def generator(x, y, batch_size, max_action_distance):
while True:
number_of_samples = x.shape[0]
x_list = []
y_list = []
for index in xrange(batch_size):
choice = random.randint(0, number_of_samples - max_action_distance - 1)
distance = random.randint(1, max_action_distance)
current_x = x[choice]
current_y = y[choice]
future_x = x[choice + distance]
x_list.append(np.concatenate((current_x, future_x), axis=2))
y_list.append(current_y)
yield np.array(x_list), np.array(y_list)
def vertically_stack_image_list(input_image_list):
image_list = []
for image in input_image_list:
image_list.append(image)
image_list.append(np.zeros([SHOW_BORDER, image.shape[1], SHOW_CHANNELS], dtype=np.uint8))
return np.concatenate(image_list, axis=0)
def save_np_array_as_png(input, path):
scipy.misc.toimage(input, cmin=0.0, cmax=255.0).save(path)
class NavigationVideoWriter():
def __init__(self, save_path, nonstop=False):
self.nonstop = nonstop
self.video_writer = VideoWriter(save_path,
(2 * SHOW_WIDTH + SHOW_BORDER, SHOW_HEIGHT),
mode='replace',
framerate=FPS)
def side_by_side(self, first, second):
if not HIGH_RESOLUTION_VIDEO:
first = double_upsampling(first)
second = double_upsampling(second)
return np.concatenate((first,
np.zeros([SHOW_HEIGHT, SHOW_BORDER, SHOW_CHANNELS], dtype=np.uint8),
second), axis=1)
def write(self, left, right, counter, deep_net_actions):
side_by_side_screen = self.side_by_side(left, right)
if not self.nonstop:
if counter == 0:
for _ in xrange(START_PAUSE_FRAMES):
self.video_writer.add_frame(side_by_side_screen)
elif counter + 1 < deep_net_actions:
self.video_writer.add_frame(side_by_side_screen)
else:
for _ in xrange(END_PAUSE_FRAMES):
self.video_writer.add_frame(side_by_side_screen)
for _ in xrange(DELIMITER_FRAMES):
self.video_writer.add_frame(np.zeros_like(side_by_side_screen))
else:
self.video_writer.add_frame(side_by_side_screen)
def close(self):
self.video_writer.close()
def make_deep_action(current_screen, goal_screen, model, game, repeat, randomized):
x = np.expand_dims(np.concatenate((current_screen,
goal_screen), axis=2), axis=0)
action_probabilities = np.squeeze(model.predict(x,
batch_size=1))
action_index = None
if randomized:
action_index = np.random.choice(len(ACTIONS_LIST), p=action_probabilities)
else:
action_index = np.argmax(action_probabilities)
game_make_action_wrapper(game, ACTIONS_LIST[action_index], repeat)
return action_index, action_probabilities, current_screen
def current_make_deep_action(goal_screen, model, game, repeat, randomized):
state = game.get_state()
current_screen = state.screen_buffer.transpose(VIZDOOM_TO_TF)
return make_deep_action(current_screen, goal_screen, model, game, repeat, randomized)
def get_deep_prediction(current_screen, goal_screen, model):
x = np.expand_dims(np.concatenate((current_screen,
goal_screen), axis=2), axis=0)
return np.squeeze(model.predict(x, batch_size=1))
def current_get_deep_prediction(goal_screen, model, game):
state = game.get_state()
current_screen = state.screen_buffer.transpose(VIZDOOM_TO_TF)
return get_deep_prediction(current_screen, goal_screen, model)
def explore(game, number_of_actions):
is_left = random.random() > 0.5
start_moving_straight = random.randint(0, number_of_actions)
for counter in xrange(number_of_actions):
if counter >= start_moving_straight:
action_index = INVERSE_ACTION_NAMES_INDEX['MOVE_FORWARD']
else:
if is_left:
action_index = INVERSE_ACTION_NAMES_INDEX['TURN_LEFT']
else:
action_index = INVERSE_ACTION_NAMES_INDEX['TURN_RIGHT']
game_make_action_wrapper(game, ACTIONS_LIST[action_index], TEST_REPEAT)
def get_distance(first_point, second_point):
return math.sqrt((first_point[0] - second_point[0]) ** 2 +
(first_point[1] - second_point[1]) ** 2)
| [
"h5py.File",
"numpy.zeros_like",
"numpy.random.seed",
"random.randint",
"math.sqrt",
"math.fabs",
"cv2.cvtColor",
"numpy.argmax",
"numpy.zeros",
"random.random",
"cPickle.dump",
"numpy.array",
"random.seed",
"cv2.pyrDown",
"numpy.concatenate",
"cv2.pyrUp"
] | [((289, 324), 'numpy.random.seed', 'np.random.seed', (['DEFAULT_RANDOM_SEED'], {}), '(DEFAULT_RANDOM_SEED)\n', (303, 324), True, 'import numpy as np\n'), ((352, 384), 'random.seed', 'random.seed', (['DEFAULT_RANDOM_SEED'], {}), '(DEFAULT_RANDOM_SEED)\n', (363, 384), False, 'import random\n'), ((764, 779), 'numpy.array', 'np.array', (['input'], {}), '(input)\n', (772, 779), True, 'import numpy as np\n'), ((788, 822), 'h5py.File', 'h5py.File', (['(prefix + HDF5_NAME)', '"""w"""'], {}), "(prefix + HDF5_NAME, 'w')\n", (797, 822), False, 'import h5py\n'), ((926, 960), 'h5py.File', 'h5py.File', (['(prefix + HDF5_NAME)', '"""r"""'], {}), "(prefix + HDF5_NAME, 'r')\n", (935, 960), False, 'import h5py\n'), ((2310, 2349), 'cv2.cvtColor', 'cv2.cvtColor', (['input', 'cv2.COLOR_RGB2GRAY'], {}), '(input, cv2.COLOR_RGB2GRAY)\n', (2322, 2349), False, 'import cv2\n'), ((2651, 2834), 'math.sqrt', 'math.sqrt', (['((start_coordinates[0] - current_coordinates[0]) ** 2 + (start_coordinates[\n 1] - current_coordinates[1]) ** 2 + (start_coordinates[2] -\n current_coordinates[2]) ** 2)'], {}), '((start_coordinates[0] - current_coordinates[0]) ** 2 + (\n start_coordinates[1] - current_coordinates[1]) ** 2 + (\n start_coordinates[2] - current_coordinates[2]) ** 2)\n', (2660, 2834), False, 'import math\n'), ((2897, 2953), 'math.fabs', 'math.fabs', (['(start_coordinates[3] - current_coordinates[3])'], {}), '(start_coordinates[3] - current_coordinates[3])\n', (2906, 2953), False, 'import math\n'), ((3824, 3858), 'numpy.concatenate', 'np.concatenate', (['image_list'], {'axis': '(0)'}), '(image_list, axis=0)\n', (3838, 3858), True, 'import numpy as np\n'), ((6779, 6815), 'random.randint', 'random.randint', (['(0)', 'number_of_actions'], {}), '(0, number_of_actions)\n', (6793, 6815), False, 'import random\n'), ((7263, 7359), 'math.sqrt', 'math.sqrt', (['((first_point[0] - second_point[0]) ** 2 + (first_point[1] - second_point[1\n ]) ** 2)'], {}), '((first_point[0] - second_point[0]) ** 2 + (first_point[1] -\n second_point[1]) ** 2)\n', (7272, 7359), False, 'import math\n'), ((2101, 2119), 'cv2.pyrDown', 'cv2.pyrDown', (['input'], {}), '(input)\n', (2112, 2119), False, 'import cv2\n'), ((2189, 2207), 'cv2.pyrDown', 'cv2.pyrDown', (['input'], {}), '(input)\n', (2200, 2207), False, 'import cv2\n'), ((2259, 2275), 'cv2.pyrUp', 'cv2.pyrUp', (['input'], {}), '(input)\n', (2268, 2275), False, 'import cv2\n'), ((5449, 5502), 'numpy.concatenate', 'np.concatenate', (['(current_screen, goal_screen)'], {'axis': '(2)'}), '((current_screen, goal_screen), axis=2)\n', (5463, 5502), True, 'import numpy as np\n'), ((5812, 5843), 'numpy.argmax', 'np.argmax', (['action_probabilities'], {}), '(action_probabilities)\n', (5821, 5843), True, 'import numpy as np\n'), ((6312, 6365), 'numpy.concatenate', 'np.concatenate', (['(current_screen, goal_screen)'], {'axis': '(2)'}), '((current_screen, goal_screen), axis=2)\n', (6326, 6365), True, 'import numpy as np\n'), ((6731, 6746), 'random.random', 'random.random', ([], {}), '()\n', (6744, 6746), False, 'import random\n'), ((1998, 2029), 'cPickle.dump', 'cPickle.dump', (['data', 'output_file'], {}), '(data, output_file)\n', (2010, 2029), False, 'import cPickle\n'), ((3233, 3295), 'random.randint', 'random.randint', (['(0)', '(number_of_samples - max_action_distance - 1)'], {}), '(0, number_of_samples - max_action_distance - 1)\n', (3247, 3295), False, 'import random\n'), ((3313, 3351), 'random.randint', 'random.randint', (['(1)', 'max_action_distance'], {}), '(1, max_action_distance)\n', (3327, 3351), False, 'import random\n'), ((3743, 3813), 'numpy.zeros', 'np.zeros', (['[SHOW_BORDER, image.shape[1], SHOW_CHANNELS]'], {'dtype': 'np.uint8'}), '([SHOW_BORDER, image.shape[1], SHOW_CHANNELS], dtype=np.uint8)\n', (3751, 3813), True, 'import numpy as np\n'), ((3466, 3511), 'numpy.concatenate', 'np.concatenate', (['(current_x, future_x)'], {'axis': '(2)'}), '((current_x, future_x), axis=2)\n', (3480, 3511), True, 'import numpy as np\n'), ((3554, 3570), 'numpy.array', 'np.array', (['x_list'], {}), '(x_list)\n', (3562, 3570), True, 'import numpy as np\n'), ((3572, 3588), 'numpy.array', 'np.array', (['y_list'], {}), '(y_list)\n', (3580, 3588), True, 'import numpy as np\n'), ((4515, 4582), 'numpy.zeros', 'np.zeros', (['[SHOW_HEIGHT, SHOW_BORDER, SHOW_CHANNELS]'], {'dtype': 'np.uint8'}), '([SHOW_HEIGHT, SHOW_BORDER, SHOW_CHANNELS], dtype=np.uint8)\n', (4523, 4582), True, 'import numpy as np\n'), ((5192, 5226), 'numpy.zeros_like', 'np.zeros_like', (['side_by_side_screen'], {}), '(side_by_side_screen)\n', (5205, 5226), True, 'import numpy as np\n')] |
import streamlit as st
from streamlit_player import st_player
import cv2
import numpy as np
import tempfile
import time
from PIL import Image
############################################################
############################################################
import os
import collections
# comment out below line to enable tensorflow logging outputs
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import time
import tensorflow as tf
import core.yolov4
import core.utils as utils
from core.yolov4 import filter_boxes
from tensorflow.python.saved_model import tag_constants
from core.config import cfg
from PIL import Image
import cv2
import numpy as np
import matplotlib.pyplot as plt
from tensorflow._api.v2.compat.v1 import ConfigProto
from tensorflow._api.v2.compat.v1 import InteractiveSession
from deep_sort import preprocessing, nn_matching
from deep_sort.detection import Detection
from deep_sort.tracker import Tracker
from tools import generate_detections as gdet
max_cosine_distance = 0.4
nn_budget = None
nms_max_overlap = 1.0
# initialize deep sort
model_filename = 'model_data/mars-small128.pb'
encoder = gdet.create_box_encoder(model_filename, batch_size=1)
# calculate cosine distance metric
metric = nn_matching.NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
# initialize tracker
tracker = Tracker(metric)
# load configuration for object detector
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
STRIDES = np.array(cfg.YOLO.STRIDES)
ANCHORS = utils.get_anchors(cfg.YOLO.ANCHORS_TINY, True)
NUM_CLASS = len(utils.read_class_names(cfg.YOLO.CLASSES))
XYSCALE = cfg.YOLO.XYSCALE
FRAMEWORK = 'tf'
input_size = 416
video_path = './data/video/fall_sample2.mp4'
saved_model_loaded = tf.saved_model.load('./checkpoints/yolov4-416', tags=[tag_constants.SERVING])
infer = saved_model_loaded.signatures['serving_default']
############################################################
############################################################
DEMO_VIDEO = 'demo_video.mp4'
st.title('Fall Detection Application Using YOLO')
st.markdown(
"""
<style>
[data-testid="stSidebar"][aria-expanded="true"] > div:first-child {
width: 300px;
}
[data-testid="stSidebar"][aria-expanded="false"] > div:first-child {
width: 300px;
margin-left: -300px;
}
</style>
""",
unsafe_allow_html=True,
)
st.sidebar.title('Menu')
# st.sidebar.subheader('Parameters')
@st.cache()
def image_resize(image, width=None, height=None, inter=cv2.INTER_AREA):
# initialize the dimensions of the image to be resized and
# grab the image size
dim = None
(h, w) = image.shape[:2]
# if both the width and height are None, then return the
# original image
if width is None and height is None:
return image
# check to see if the width is None
if width is None:
# calculate the ratio of the height and construct the
# dimensions
r = height / float(h)
dim = (int(w * r), height)
# otherwise, the height is None
else:
# calculate the ratio of the width and construct the
# dimensions
r = width / float(w)
dim = (width, int(h * r))
# resize the image
resized = cv2.resize(image, dim, interpolation=inter)
# return the resized image
return resized
app_mode = st.sidebar.selectbox('Please Select',
['About', 'Sample Videos', 'Help', 'Run on Video']
)
if app_mode =='About':
st.markdown('''
This is an application for fall detection of individuals based on the **YOLO V.4** object detection algorithm.
The method used in this algorithm is suitable for detecting falls from a standing position or while walking. \n
This method is based on the proposed method in **<NAME>., & <NAME>. (2018).
An image-based fall detection system for the elderly. Applied Sciences, 8(10), 1995.**
''')
st.markdown(
"""
<style>
[data-testid="stSidebar"][aria-expanded="true"] > div:first-child {
width: 300px;
}
[data-testid="stSidebar"][aria-expanded="false"] > div:first-child {
width: 300px;
margin-left: -300px;
}
</style>
""",
unsafe_allow_html=True,
)
st.image('TEAM_LOGO.jpg')
elif app_mode == 'Sample Videos':
st.video('demo1.mp4', format='video/mp4', start_time=0)
st.video('demo2.mp4', format='video/mp4', start_time=0)
st.video('demo3.mp4', format='video/mp4', start_time=0)
st.video('demo4.mp4', format='video/mp4', start_time=0)
elif app_mode == 'Help':
st.markdown('''
- The Ratio Factor is a factor which multiplied by the height of the bounding box of
the person at 1.5 seconds before each moment. If the height of the bounding box at each
moment is less than the multiplication value, the algorithm will detect a falling-down occurrence.
The suggested value is 5.5, but values between 5 and 7 are good choices. The higher values will lead to more
conservative results. \n
''')
st.markdown(
"""
<style>
[data-testid="stSidebar"][aria-expanded="true"] > div:first-child {
width: 300px;
}
[data-testid="stSidebar"][aria-expanded="false"] > div:first-child {
width: 300px;
margin-left: -300px;
}
</style>
""",
unsafe_allow_html=True,
)
####################################################################
####################################################################
elif app_mode == 'Run on Video':
st.set_option('deprecation.showfileUploaderEncoding', False)
st.sidebar.markdown('---')
ratio = st.sidebar.slider('Ratio', min_value=1.0, max_value=8.0, value=5.5, step=0.5)
st.sidebar.markdown('---')
st.markdown(' ## Output')
st.markdown(
"""
<style>
[data-testid="stSidebar"][aria-expanded="true"] > div:first-child {
width: 300px;
}
[data-testid="stSidebar"][aria-expanded="false"] > div:first-child {
width: 300px;
margin-left: -300px;
}
</style>
""",
unsafe_allow_html=True,
)
stframe = st.empty()
video_file_buffer = st.sidebar.file_uploader("Upload a video", type=['mp4'])
tffile = tempfile.NamedTemporaryFile(delete=False)
if not video_file_buffer:
vid = cv2.VideoCapture(DEMO_VIDEO)
tffile.name = DEMO_VIDEO
else:
tffile.write(video_file_buffer.read())
vid = cv2.VideoCapture(tffile.name)
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps_input = int(vid.get(cv2.CAP_PROP_FPS))
# codec = cv2.VideoWriter_fourcc(*FLAGS.output_format)
codec = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output_res.avi', codec, fps_input, (width, height))
st.sidebar.text('Input Video')
st.sidebar.video(tffile.name)
fps = 0
i = 0
kpi1, kpi2, kpi3 = st.beta_columns(3)
with kpi1:
kpi1 = st.markdown("**Frame Rate**")
kpi1_text = st.markdown("0")
with kpi2:
st.markdown("**Tracked Individuals**")
kpi2_text = st.markdown("0")
with kpi3:
st.markdown("**Fall Detection Status**")
kpi3_text = st.markdown('')
kpi3_text.write(f"<h1 style='text-align: center; color: green;'>{'No Fall'}</h1>", unsafe_allow_html=True)
st.markdown("<hr/>", unsafe_allow_html=True)
###################################################
###################################################
frame_num = 0
# while video is running
# DEFINING A DICTIONARY FOR TRACKING
id_Locs = collections.defaultdict(list) # FOR METHOD THREE
id_ylocs = collections.defaultdict(list) # FOR METHOD ONE
yLocs = []
falls = 0
track_dict = dict()
frame_list = []
while vid.isOpened():
i += 1
ret, frame = vid.read()
if not ret:
continue
if ret:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = Image.fromarray(frame)
else:
print('Video has ended or failed, try a different video format!')
break
frame_num += 1
frame_size = frame.shape[:2]
image_data = cv2.resize(frame, (input_size, input_size))
image_data = image_data / 255.
image_data = image_data[np.newaxis, ...].astype(np.float32)
start_time = time.time()
batch_data = tf.constant(image_data)
pred_bbox = infer(batch_data)
for key, value in pred_bbox.items():
boxes = value[:, :, 0:4]
pred_conf = value[:, :, 4:]
boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),
scores=tf.reshape(
pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),
max_output_size_per_class=50,
max_total_size=50,
iou_threshold=0.3,
score_threshold=0.2
)
# convert data to numpy arrays and slice out unused elements
num_objects = valid_detections.numpy()[0]
bboxes = boxes.numpy()[0]
bboxes = bboxes[0:int(num_objects)]
scores = scores.numpy()[0]
scores = scores[0:int(num_objects)]
classes = classes.numpy()[0]
classes = classes[0:int(num_objects)]
# format bounding boxes from normalized ymin, xmin, ymax, xmax ---> xmin, ymin, width, height
original_h, original_w, _ = frame.shape
bboxes = utils.format_boxes(bboxes, original_h, original_w)
# store all predictions in one parameter for simplicity when calling functions
pred_bbox = [bboxes, scores, classes, num_objects]
# read in all class names from config
class_names = utils.read_class_names(cfg.YOLO.CLASSES)
# by default allow all classes in .names file
# allowed_classes = list(class_names.values())
# custom allowed classes (uncomment line below to customize tracker for only people)
allowed_classes = ['person']
# loop through objects and use class index to get class name, allow only classes in allowed_classes list
names = []
deleted_indx = []
for i in range(num_objects):
class_indx = int(classes[i])
class_name = class_names[class_indx]
if class_name not in allowed_classes:
deleted_indx.append(i)
else:
names.append(class_name)
names = np.array(names)
count = len(names)
# cv2.putText(frame, "Objects being tracked: {}".format(count), (5, 35), cv2.FONT_HERSHEY_COMPLEX_SMALL, 2,
# (0, 255, 0), 2)
# print("Objects being tracked: {}".format(count))
# delete detections that are not in allowed_classes
bboxes = np.delete(bboxes, deleted_indx, axis=0)
scores = np.delete(scores, deleted_indx, axis=0)
# encode yolo detections and feed to tracker
features = encoder(frame, bboxes)
detections = [Detection(bbox, score, class_name, feature) for bbox, score, class_name, feature in
zip(bboxes, scores, names, features)]
# initialize color map
cmap = plt.get_cmap('tab20b')
colors = [cmap(i)[:3] for i in np.linspace(0, 1, 20)]
# run non-maxima supression
boxs = np.array([d.tlwh for d in detections])
scores = np.array([d.confidence for d in detections])
classes = np.array([d.class_name for d in detections])
indices = preprocessing.non_max_suppression(boxs, classes, nms_max_overlap, scores)
detections = [detections[i] for i in indices]
# Call the tracker
tracker.predict()
tracker.update(detections)
# update tracks
for track in tracker.tracks:
if not track.is_confirmed() or track.time_since_update > 1:
continue
bbox = track.to_tlbr()
class_name = track.get_class()
# draw bbox on screen
color = colors[int(track.track_id) % len(colors)]
color = [i * 255 for i in color]
cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), color, 2)
cv2.rectangle(frame, (int(bbox[0]), int(bbox[1] - 30)),
(int(bbox[0]) + (len(class_name) + len(str(track.track_id))) * 17, int(bbox[1])), color, -1)
# cv2.circle(frame, (int((bbox[0] + bbox[2]) / 2), int((bbox[1] + bbox[3]) / 2)), 5, color, -1)
# cv2.circle(frame, (int((bbox[0] + bbox[2]) / 2), int((bbox[1] + bbox[3]) / 2)), 15, (0, 255, 0), -1)
cv2.putText(frame, class_name + "-" + str(track.track_id), (int(bbox[0]), int(bbox[1] - 10)), 0, 0.75,
(255, 255, 255), 2)
#################################################
## PAPER METHOD FOR FALL DETECTION #############
#################################################
frameRate = 25
id_Locs[track.track_id].append([int(bbox[3] - bbox[1]), int(bbox[2] - bbox[0])])
for key, value in id_Locs.items():
if len(value) > int(np.floor(frameRate * 1.5)): # 1.5econds after detection a person:
# if value[-1][0] < (7/8) * value[-1 * int(np.floor(frameRate * 1.5))][0]:
# if value[-1][0] < (5.5 / 8) * value[-1 * int(np.floor(frameRate * 1.5))][0]:
if value[-1][0] < (ratio / 8) * value[-1 * int(np.floor(frameRate * 1.5))][0]:
print("Fall Detected")
cv2.putText(frame, "Person " + str(key) + " Fell Down", (70, 250), cv2.FONT_HERSHEY_PLAIN, 2,
(0, 0, 255), 3)
falls += 1
########################################################
# if enable, then print details about each track
# print("Tracker ID: {}, Class: {}, BBox Coords (xmin, ymin, xmax, ymax): {}".format(str(track.track_id),
# class_name, (
# int(bbox[0]),
# int(bbox[1]),
# int(bbox[2]),
# int(bbox[3]))))
each_id_list = [frame_num, str(track.track_id), int((bbox[0] + bbox[2]) / 2), int((bbox[1] + bbox[3]) / 2)]
frame_list.append(each_id_list)
# calculate frames per second of running detections
fps = 1.0 / (time.time() - start_time)
kpi1_text.write(f"<h1 style='text-align: center; color: red;'>{round(fps, 1)}</h1>", unsafe_allow_html=True)
kpi2_text.write(f"<h1 style='text-align: center; color: red;'>{count}</h1>", unsafe_allow_html=True)
if falls > 0:
cv2.putText(frame, "Fall Detected", (50, 100), cv2.FONT_HERSHEY_PLAIN, 3,
(255, 0, 0), 5)
kpi3_text.write(f"<h1 style='text-align: center; color: red;'>{'Fall Detected'}</h1>", unsafe_allow_html=True)
frame = cv2.resize(frame, (0, 0), fx=0.8, fy=0.8)
frame = image_resize(image=frame, width=640)
stframe.image(frame, channels='RGB', use_column_width=True)
out.write(frame)
vid.release()
out.release()
| [
"streamlit.sidebar.slider",
"core.utils.get_anchors",
"tensorflow._api.v2.compat.v1.InteractiveSession",
"streamlit.image",
"cv2.VideoWriter_fourcc",
"numpy.floor",
"streamlit.title",
"collections.defaultdict",
"streamlit.sidebar.title",
"streamlit.sidebar.selectbox",
"cv2.VideoWriter",
"deep_... | [((1119, 1172), 'tools.generate_detections.create_box_encoder', 'gdet.create_box_encoder', (['model_filename'], {'batch_size': '(1)'}), '(model_filename, batch_size=1)\n', (1142, 1172), True, 'from tools import generate_detections as gdet\n'), ((1217, 1304), 'deep_sort.nn_matching.NearestNeighborDistanceMetric', 'nn_matching.NearestNeighborDistanceMetric', (['"""cosine"""', 'max_cosine_distance', 'nn_budget'], {}), "('cosine', max_cosine_distance,\n nn_budget)\n", (1258, 1304), False, 'from deep_sort import preprocessing, nn_matching\n'), ((1332, 1347), 'deep_sort.tracker.Tracker', 'Tracker', (['metric'], {}), '(metric)\n', (1339, 1347), False, 'from deep_sort.tracker import Tracker\n'), ((1398, 1411), 'tensorflow._api.v2.compat.v1.ConfigProto', 'ConfigProto', ([], {}), '()\n', (1409, 1411), False, 'from tensorflow._api.v2.compat.v1 import ConfigProto\n'), ((1461, 1494), 'tensorflow._api.v2.compat.v1.InteractiveSession', 'InteractiveSession', ([], {'config': 'config'}), '(config=config)\n', (1479, 1494), False, 'from tensorflow._api.v2.compat.v1 import InteractiveSession\n'), ((1505, 1531), 'numpy.array', 'np.array', (['cfg.YOLO.STRIDES'], {}), '(cfg.YOLO.STRIDES)\n', (1513, 1531), True, 'import numpy as np\n'), ((1542, 1588), 'core.utils.get_anchors', 'utils.get_anchors', (['cfg.YOLO.ANCHORS_TINY', '(True)'], {}), '(cfg.YOLO.ANCHORS_TINY, True)\n', (1559, 1588), True, 'import core.utils as utils\n'), ((1774, 1851), 'tensorflow.saved_model.load', 'tf.saved_model.load', (['"""./checkpoints/yolov4-416"""'], {'tags': '[tag_constants.SERVING]'}), "('./checkpoints/yolov4-416', tags=[tag_constants.SERVING])\n", (1793, 1851), True, 'import tensorflow as tf\n'), ((2063, 2112), 'streamlit.title', 'st.title', (['"""Fall Detection Application Using YOLO"""'], {}), "('Fall Detection Application Using YOLO')\n", (2071, 2112), True, 'import streamlit as st\n'), ((2114, 2427), 'streamlit.markdown', 'st.markdown', (['"""\n <style>\n [data-testid="stSidebar"][aria-expanded="true"] > div:first-child {\n width: 300px;\n }\n [data-testid="stSidebar"][aria-expanded="false"] > div:first-child {\n width: 300px;\n margin-left: -300px;\n }\n </style>\n """'], {'unsafe_allow_html': '(True)'}), '(\n """\n <style>\n [data-testid="stSidebar"][aria-expanded="true"] > div:first-child {\n width: 300px;\n }\n [data-testid="stSidebar"][aria-expanded="false"] > div:first-child {\n width: 300px;\n margin-left: -300px;\n }\n </style>\n """\n , unsafe_allow_html=True)\n', (2125, 2427), True, 'import streamlit as st\n'), ((2430, 2454), 'streamlit.sidebar.title', 'st.sidebar.title', (['"""Menu"""'], {}), "('Menu')\n", (2446, 2454), True, 'import streamlit as st\n'), ((2495, 2505), 'streamlit.cache', 'st.cache', ([], {}), '()\n', (2503, 2505), True, 'import streamlit as st\n'), ((3399, 3492), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""Please Select"""', "['About', 'Sample Videos', 'Help', 'Run on Video']"], {}), "('Please Select', ['About', 'Sample Videos', 'Help',\n 'Run on Video'])\n", (3419, 3492), True, 'import streamlit as st\n'), ((1605, 1645), 'core.utils.read_class_names', 'utils.read_class_names', (['cfg.YOLO.CLASSES'], {}), '(cfg.YOLO.CLASSES)\n', (1627, 1645), True, 'import core.utils as utils\n'), ((3293, 3336), 'cv2.resize', 'cv2.resize', (['image', 'dim'], {'interpolation': 'inter'}), '(image, dim, interpolation=inter)\n', (3303, 3336), False, 'import cv2\n'), ((3518, 3954), 'streamlit.markdown', 'st.markdown', (['"""\n This is an application for fall detection of individuals based on the **YOLO V.4** object detection algorithm.\n The method used in this algorithm is suitable for detecting falls from a standing position or while walking. \n\n This method is based on the proposed method in **<NAME>., & <NAME>. (2018). \n An image-based fall detection system for the elderly. Applied Sciences, 8(10), 1995.**\n """'], {}), '(\n """\n This is an application for fall detection of individuals based on the **YOLO V.4** object detection algorithm.\n The method used in this algorithm is suitable for detecting falls from a standing position or while walking. \n\n This method is based on the proposed method in **<NAME>., & <NAME>. (2018). \n An image-based fall detection system for the elderly. Applied Sciences, 8(10), 1995.**\n """\n )\n', (3529, 3954), True, 'import streamlit as st\n'), ((3951, 4264), 'streamlit.markdown', 'st.markdown', (['"""\n <style>\n [data-testid="stSidebar"][aria-expanded="true"] > div:first-child {\n width: 300px;\n }\n [data-testid="stSidebar"][aria-expanded="false"] > div:first-child {\n width: 300px;\n margin-left: -300px;\n }\n </style>\n """'], {'unsafe_allow_html': '(True)'}), '(\n """\n <style>\n [data-testid="stSidebar"][aria-expanded="true"] > div:first-child {\n width: 300px;\n }\n [data-testid="stSidebar"][aria-expanded="false"] > div:first-child {\n width: 300px;\n margin-left: -300px;\n }\n </style>\n """\n , unsafe_allow_html=True)\n', (3962, 4264), True, 'import streamlit as st\n'), ((4274, 4299), 'streamlit.image', 'st.image', (['"""TEAM_LOGO.jpg"""'], {}), "('TEAM_LOGO.jpg')\n", (4282, 4299), True, 'import streamlit as st\n'), ((4339, 4394), 'streamlit.video', 'st.video', (['"""demo1.mp4"""'], {'format': '"""video/mp4"""', 'start_time': '(0)'}), "('demo1.mp4', format='video/mp4', start_time=0)\n", (4347, 4394), True, 'import streamlit as st\n'), ((4399, 4454), 'streamlit.video', 'st.video', (['"""demo2.mp4"""'], {'format': '"""video/mp4"""', 'start_time': '(0)'}), "('demo2.mp4', format='video/mp4', start_time=0)\n", (4407, 4454), True, 'import streamlit as st\n'), ((4459, 4514), 'streamlit.video', 'st.video', (['"""demo3.mp4"""'], {'format': '"""video/mp4"""', 'start_time': '(0)'}), "('demo3.mp4', format='video/mp4', start_time=0)\n", (4467, 4514), True, 'import streamlit as st\n'), ((4519, 4574), 'streamlit.video', 'st.video', (['"""demo4.mp4"""'], {'format': '"""video/mp4"""', 'start_time': '(0)'}), "('demo4.mp4', format='video/mp4', start_time=0)\n", (4527, 4574), True, 'import streamlit as st\n'), ((4605, 5092), 'streamlit.markdown', 'st.markdown', (['"""\n - The Ratio Factor is a factor which multiplied by the height of the bounding box of \n the person at 1.5 seconds before each moment. If the height of the bounding box at each \n moment is less than the multiplication value, the algorithm will detect a falling-down occurrence. \n The suggested value is 5.5, but values between 5 and 7 are good choices. The higher values will lead to more \n conservative results. \n\n """'], {}), '(\n """\n - The Ratio Factor is a factor which multiplied by the height of the bounding box of \n the person at 1.5 seconds before each moment. If the height of the bounding box at each \n moment is less than the multiplication value, the algorithm will detect a falling-down occurrence. \n The suggested value is 5.5, but values between 5 and 7 are good choices. The higher values will lead to more \n conservative results. \n\n """\n )\n', (4616, 5092), True, 'import streamlit as st\n'), ((5089, 5442), 'streamlit.markdown', 'st.markdown', (['"""\n <style>\n [data-testid="stSidebar"][aria-expanded="true"] > div:first-child {\n width: 300px;\n }\n [data-testid="stSidebar"][aria-expanded="false"] > div:first-child {\n width: 300px;\n margin-left: -300px;\n }\n </style>\n """'], {'unsafe_allow_html': '(True)'}), '(\n """\n <style>\n [data-testid="stSidebar"][aria-expanded="true"] > div:first-child {\n width: 300px;\n }\n [data-testid="stSidebar"][aria-expanded="false"] > div:first-child {\n width: 300px;\n margin-left: -300px;\n }\n </style>\n """\n , unsafe_allow_html=True)\n', (5100, 5442), True, 'import streamlit as st\n'), ((5635, 5695), 'streamlit.set_option', 'st.set_option', (['"""deprecation.showfileUploaderEncoding"""', '(False)'], {}), "('deprecation.showfileUploaderEncoding', False)\n", (5648, 5695), True, 'import streamlit as st\n'), ((5700, 5726), 'streamlit.sidebar.markdown', 'st.sidebar.markdown', (['"""---"""'], {}), "('---')\n", (5719, 5726), True, 'import streamlit as st\n'), ((5739, 5816), 'streamlit.sidebar.slider', 'st.sidebar.slider', (['"""Ratio"""'], {'min_value': '(1.0)', 'max_value': '(8.0)', 'value': '(5.5)', 'step': '(0.5)'}), "('Ratio', min_value=1.0, max_value=8.0, value=5.5, step=0.5)\n", (5756, 5816), True, 'import streamlit as st\n'), ((5821, 5847), 'streamlit.sidebar.markdown', 'st.sidebar.markdown', (['"""---"""'], {}), "('---')\n", (5840, 5847), True, 'import streamlit as st\n'), ((5852, 5877), 'streamlit.markdown', 'st.markdown', (['""" ## Output"""'], {}), "(' ## Output')\n", (5863, 5877), True, 'import streamlit as st\n'), ((5883, 6236), 'streamlit.markdown', 'st.markdown', (['"""\n <style>\n [data-testid="stSidebar"][aria-expanded="true"] > div:first-child {\n width: 300px;\n }\n [data-testid="stSidebar"][aria-expanded="false"] > div:first-child {\n width: 300px;\n margin-left: -300px;\n }\n </style>\n """'], {'unsafe_allow_html': '(True)'}), '(\n """\n <style>\n [data-testid="stSidebar"][aria-expanded="true"] > div:first-child {\n width: 300px;\n }\n [data-testid="stSidebar"][aria-expanded="false"] > div:first-child {\n width: 300px;\n margin-left: -300px;\n }\n </style>\n """\n , unsafe_allow_html=True)\n', (5894, 6236), True, 'import streamlit as st\n'), ((6265, 6275), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (6273, 6275), True, 'import streamlit as st\n'), ((6300, 6356), 'streamlit.sidebar.file_uploader', 'st.sidebar.file_uploader', (['"""Upload a video"""'], {'type': "['mp4']"}), "('Upload a video', type=['mp4'])\n", (6324, 6356), True, 'import streamlit as st\n'), ((6370, 6411), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(False)'}), '(delete=False)\n', (6397, 6411), False, 'import tempfile\n'), ((6844, 6875), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), "(*'XVID')\n", (6866, 6875), False, 'import cv2\n'), ((6886, 6954), 'cv2.VideoWriter', 'cv2.VideoWriter', (['"""output_res.avi"""', 'codec', 'fps_input', '(width, height)'], {}), "('output_res.avi', codec, fps_input, (width, height))\n", (6901, 6954), False, 'import cv2\n'), ((6960, 6990), 'streamlit.sidebar.text', 'st.sidebar.text', (['"""Input Video"""'], {}), "('Input Video')\n", (6975, 6990), True, 'import streamlit as st\n'), ((6995, 7024), 'streamlit.sidebar.video', 'st.sidebar.video', (['tffile.name'], {}), '(tffile.name)\n', (7011, 7024), True, 'import streamlit as st\n'), ((7071, 7089), 'streamlit.beta_columns', 'st.beta_columns', (['(3)'], {}), '(3)\n', (7086, 7089), True, 'import streamlit as st\n'), ((7509, 7553), 'streamlit.markdown', 'st.markdown', (['"""<hr/>"""'], {'unsafe_allow_html': '(True)'}), "('<hr/>', unsafe_allow_html=True)\n", (7520, 7553), True, 'import streamlit as st\n'), ((7769, 7798), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (7792, 7798), False, 'import collections\n'), ((7834, 7863), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (7857, 7863), False, 'import collections\n'), ((6457, 6485), 'cv2.VideoCapture', 'cv2.VideoCapture', (['DEMO_VIDEO'], {}), '(DEMO_VIDEO)\n', (6473, 6485), False, 'import cv2\n'), ((6590, 6619), 'cv2.VideoCapture', 'cv2.VideoCapture', (['tffile.name'], {}), '(tffile.name)\n', (6606, 6619), False, 'import cv2\n'), ((7121, 7150), 'streamlit.markdown', 'st.markdown', (['"""**Frame Rate**"""'], {}), "('**Frame Rate**')\n", (7132, 7150), True, 'import streamlit as st\n'), ((7171, 7187), 'streamlit.markdown', 'st.markdown', (['"""0"""'], {}), "('0')\n", (7182, 7187), True, 'import streamlit as st\n'), ((7212, 7250), 'streamlit.markdown', 'st.markdown', (['"""**Tracked Individuals**"""'], {}), "('**Tracked Individuals**')\n", (7223, 7250), True, 'import streamlit as st\n'), ((7271, 7287), 'streamlit.markdown', 'st.markdown', (['"""0"""'], {}), "('0')\n", (7282, 7287), True, 'import streamlit as st\n'), ((7312, 7352), 'streamlit.markdown', 'st.markdown', (['"""**Fall Detection Status**"""'], {}), "('**Fall Detection Status**')\n", (7323, 7352), True, 'import streamlit as st\n'), ((7373, 7388), 'streamlit.markdown', 'st.markdown', (['""""""'], {}), "('')\n", (7384, 7388), True, 'import streamlit as st\n'), ((8379, 8422), 'cv2.resize', 'cv2.resize', (['frame', '(input_size, input_size)'], {}), '(frame, (input_size, input_size))\n', (8389, 8422), False, 'import cv2\n'), ((8551, 8562), 'time.time', 'time.time', ([], {}), '()\n', (8560, 8562), False, 'import time\n'), ((8584, 8607), 'tensorflow.constant', 'tf.constant', (['image_data'], {}), '(image_data)\n', (8595, 8607), True, 'import tensorflow as tf\n'), ((9713, 9763), 'core.utils.format_boxes', 'utils.format_boxes', (['bboxes', 'original_h', 'original_w'], {}), '(bboxes, original_h, original_w)\n', (9731, 9763), True, 'import core.utils as utils\n'), ((9978, 10018), 'core.utils.read_class_names', 'utils.read_class_names', (['cfg.YOLO.CLASSES'], {}), '(cfg.YOLO.CLASSES)\n', (10000, 10018), True, 'import core.utils as utils\n'), ((10707, 10722), 'numpy.array', 'np.array', (['names'], {}), '(names)\n', (10715, 10722), True, 'import numpy as np\n'), ((11040, 11079), 'numpy.delete', 'np.delete', (['bboxes', 'deleted_indx'], {'axis': '(0)'}), '(bboxes, deleted_indx, axis=0)\n', (11049, 11079), True, 'import numpy as np\n'), ((11097, 11136), 'numpy.delete', 'np.delete', (['scores', 'deleted_indx'], {'axis': '(0)'}), '(scores, deleted_indx, axis=0)\n', (11106, 11136), True, 'import numpy as np\n'), ((11444, 11466), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""tab20b"""'], {}), "('tab20b')\n", (11456, 11466), True, 'import matplotlib.pyplot as plt\n'), ((11580, 11618), 'numpy.array', 'np.array', (['[d.tlwh for d in detections]'], {}), '([d.tlwh for d in detections])\n', (11588, 11618), True, 'import numpy as np\n'), ((11636, 11680), 'numpy.array', 'np.array', (['[d.confidence for d in detections]'], {}), '([d.confidence for d in detections])\n', (11644, 11680), True, 'import numpy as np\n'), ((11699, 11743), 'numpy.array', 'np.array', (['[d.class_name for d in detections]'], {}), '([d.class_name for d in detections])\n', (11707, 11743), True, 'import numpy as np\n'), ((11762, 11835), 'deep_sort.preprocessing.non_max_suppression', 'preprocessing.non_max_suppression', (['boxs', 'classes', 'nms_max_overlap', 'scores'], {}), '(boxs, classes, nms_max_overlap, scores)\n', (11795, 11835), False, 'from deep_sort import preprocessing, nn_matching\n'), ((15643, 15684), 'cv2.resize', 'cv2.resize', (['frame', '(0, 0)'], {'fx': '(0.8)', 'fy': '(0.8)'}), '(frame, (0, 0), fx=0.8, fy=0.8)\n', (15653, 15684), False, 'import cv2\n'), ((8106, 8144), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB'], {}), '(frame, cv2.COLOR_BGR2RGB)\n', (8118, 8144), False, 'import cv2\n'), ((8165, 8187), 'PIL.Image.fromarray', 'Image.fromarray', (['frame'], {}), '(frame)\n', (8180, 8187), False, 'from PIL import Image\n'), ((11254, 11297), 'deep_sort.detection.Detection', 'Detection', (['bbox', 'score', 'class_name', 'feature'], {}), '(bbox, score, class_name, feature)\n', (11263, 11297), False, 'from deep_sort.detection import Detection\n'), ((15389, 15483), 'cv2.putText', 'cv2.putText', (['frame', '"""Fall Detected"""', '(50, 100)', 'cv2.FONT_HERSHEY_PLAIN', '(3)', '(255, 0, 0)', '(5)'], {}), "(frame, 'Fall Detected', (50, 100), cv2.FONT_HERSHEY_PLAIN, 3, (\n 255, 0, 0), 5)\n", (15400, 15483), False, 'import cv2\n'), ((11506, 11527), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(20)'], {}), '(0, 1, 20)\n', (11517, 11527), True, 'import numpy as np\n'), ((15101, 15112), 'time.time', 'time.time', ([], {}), '()\n', (15110, 15112), False, 'import time\n'), ((13416, 13441), 'numpy.floor', 'np.floor', (['(frameRate * 1.5)'], {}), '(frameRate * 1.5)\n', (13424, 13441), True, 'import numpy as np\n'), ((8895, 8910), 'tensorflow.shape', 'tf.shape', (['boxes'], {}), '(boxes)\n', (8903, 8910), True, 'import tensorflow as tf\n'), ((8986, 9005), 'tensorflow.shape', 'tf.shape', (['pred_conf'], {}), '(pred_conf)\n', (8994, 9005), True, 'import tensorflow as tf\n'), ((9014, 9033), 'tensorflow.shape', 'tf.shape', (['pred_conf'], {}), '(pred_conf)\n', (9022, 9033), True, 'import tensorflow as tf\n'), ((13744, 13769), 'numpy.floor', 'np.floor', (['(frameRate * 1.5)'], {}), '(frameRate * 1.5)\n', (13752, 13769), True, 'import numpy as np\n')] |
import pickle
from datetime import datetime
import numpy as np
from gensim.models import KeyedVectors
from sklearn.metrics import accuracy_score, f1_score
from sklearn.metrics import confusion_matrix
from utils_d.ml_models import train_lgb
from utils_d.ml_utils import Ensemble
def annotate(
match_df, algorithm, level, vector_method="topdown", limit=100):
checking = []
annotation_values = ["1", "0", "0.5", "skip"]
limit = 100
# try:
# match_df = pd.read_csv(
# f"match_df_{level}_{algorithm}_{vector_method}.csv")
# except Exception as ex:
# print(ex)
for value_i, value in enumerate(match_df.values):
if value_i >= limit:
break
if value_i % 10 == 0:
match_df.to_csv(
f"match_df_{level}_{algorithm}_{vector_method}.csv")
print(value_i, "out of", len(match_df.values))
print(value)
annotation = None
while annotation not in annotation_values:
try:
annotation = input(
"{} :".format(" or ".join(annotation_values)))
except Exception as ex:
print(ex)
annotation = "0"
if annotation not in annotation_values:
print("wrong value")
if annotation != "skip":
checking.append(annotation)
if len(checking) < match_df.shape[0]:
checking += [None] * (match_df.shape[0] - len(checking))
match_df["check"] = checking
match_df.to_csv(
f"annotations/match_df_{level}_{algorithm}_{vector_method}_vec.csv")
print("accuracy is", match_df.check.astype(float).sum())
return match_df
def get_now():
"""
convert datetime to string
"""
now = datetime.now().isoformat()
now = now.split(".")[0]
now = now.replace(":", "_")
return now
def load_embeddings(lang, muse=True):
if muse:
path = (f'muse_embeddings/wiki.multi.{lang}.vec')
else:
path = (f'muse_embeddings/wiki.{lang}.align.vec')
model = KeyedVectors.load_word2vec_format(path)
print("loaded")
return model
def train_gbm(x_train, x_test, y_train, y_test, name, k_fold=False):
objective = 'multiclass'
thresholds, weights = None, None
# check whether target is binary
if len(y_train.shape) > 1:
categ_nums = np.unique(y_train).shape[0]
else:
categ_nums = None
objective = "binary"
gbm_params = {
'objective': objective,
'max_depth': 8,
'num_leaves': 12,
'subsample': 0.8,
'learning_rate': 0.1,
'estimators': 1000,
'num_trees': 10000,
'num_class': categ_nums,
'early_stopping_rounds': 10,
'verbose': -1,
"silent": True}
gbm, score, weights = train_lgb(
x_train, x_test, y_train, y_test, k_fold=k_fold, params=gbm_params,
n_splits=2, n_repeats=2)
if not k_fold:
gbm = gbm[0]
else:
gbm = Ensemble(gbm)
pickle.dump(gbm, open(f"{name}gbm_{score}.pcl", "wb"))
preds = gbm.predict(x_test)
if categ_nums:
for threshold in (0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99):
print(threshold)
mask = [i for i in range(len(preds)) if any(preds[i] > threshold)]
mask_preds = preds[mask]
mask_preds = np.argmax(mask_preds, axis=1)
mask_y = y_test[mask]
print("acc", accuracy_score(mask_y, mask_preds))
print("f1", f1_score(mask_y, mask_preds))
print(confusion_matrix(mask_y, mask_preds))
else:
for threshold in range(1, 10):
threshold = threshold / 10
print(threshold)
mask_preds = preds > threshold
mask_preds = mask_preds.astype(int)
print("acc", accuracy_score(y_test, mask_preds))
print("f1", f1_score(y_test, mask_preds))
return gbm, thresholds, weights
| [
"utils_d.ml_utils.Ensemble",
"sklearn.metrics.confusion_matrix",
"numpy.argmax",
"sklearn.metrics.accuracy_score",
"sklearn.metrics.f1_score",
"gensim.models.KeyedVectors.load_word2vec_format",
"utils_d.ml_models.train_lgb",
"datetime.datetime.now",
"numpy.unique"
] | [((2062, 2101), 'gensim.models.KeyedVectors.load_word2vec_format', 'KeyedVectors.load_word2vec_format', (['path'], {}), '(path)\n', (2095, 2101), False, 'from gensim.models import KeyedVectors\n'), ((2814, 2921), 'utils_d.ml_models.train_lgb', 'train_lgb', (['x_train', 'x_test', 'y_train', 'y_test'], {'k_fold': 'k_fold', 'params': 'gbm_params', 'n_splits': '(2)', 'n_repeats': '(2)'}), '(x_train, x_test, y_train, y_test, k_fold=k_fold, params=\n gbm_params, n_splits=2, n_repeats=2)\n', (2823, 2921), False, 'from utils_d.ml_models import train_lgb\n'), ((2998, 3011), 'utils_d.ml_utils.Ensemble', 'Ensemble', (['gbm'], {}), '(gbm)\n', (3006, 3011), False, 'from utils_d.ml_utils import Ensemble\n'), ((1769, 1783), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1781, 1783), False, 'from datetime import datetime\n'), ((3362, 3391), 'numpy.argmax', 'np.argmax', (['mask_preds'], {'axis': '(1)'}), '(mask_preds, axis=1)\n', (3371, 3391), True, 'import numpy as np\n'), ((2365, 2383), 'numpy.unique', 'np.unique', (['y_train'], {}), '(y_train)\n', (2374, 2383), True, 'import numpy as np\n'), ((3451, 3485), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['mask_y', 'mask_preds'], {}), '(mask_y, mask_preds)\n', (3465, 3485), False, 'from sklearn.metrics import accuracy_score, f1_score\n'), ((3511, 3539), 'sklearn.metrics.f1_score', 'f1_score', (['mask_y', 'mask_preds'], {}), '(mask_y, mask_preds)\n', (3519, 3539), False, 'from sklearn.metrics import accuracy_score, f1_score\n'), ((3559, 3595), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['mask_y', 'mask_preds'], {}), '(mask_y, mask_preds)\n', (3575, 3595), False, 'from sklearn.metrics import confusion_matrix\n'), ((3830, 3864), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'mask_preds'], {}), '(y_test, mask_preds)\n', (3844, 3864), False, 'from sklearn.metrics import accuracy_score, f1_score\n'), ((3890, 3918), 'sklearn.metrics.f1_score', 'f1_score', (['y_test', 'mask_preds'], {}), '(y_test, mask_preds)\n', (3898, 3918), False, 'from sklearn.metrics import accuracy_score, f1_score\n')] |
#!/usr/bin/python
"""Plot LFE difference of a binary OP against another OP."""
import argparse
import sys
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import numpy as np
import pandas as pd
from origamipy import plot
def main():
args = parse_args()
tag2 = args.tag2
figsize = (plot.cm_to_inches(8.5), plot.cm_to_inches(7))
tag1s = ['staplestates{}'.format(i) for i in range(1, args.stapletypes + 1)]
for tag1 in tag1s:
inp_filebase = '{}/{}-{}_{}-{}-lfes'.format(args.input_dir, args.system,
args.vari, tag1, tag2)
out_filebase = '{}/{}-{}_{}-{}-lfes-all'.format(args.output_dir,
args.system, args.vari, tag1, tag2)
plot.set_default_appearance()
f = plt.figure(figsize=figsize, dpi=300)
gs = gridspec.GridSpec(2, 1, width_ratios=[1], height_ratios=[20, 1])
gs_main = gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec=gs[0, :],
wspace=0, hspace=0)
gs_cb = gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec=gs[1, :])
ax = f.add_subplot(gs[0])
ax.set_ylabel('$k_\mathrm{b}T$')
norm = mpl.colors.Normalize(vmin=args.temps[0], vmax=args.temps[-1])
cmap = mpl.cm.viridis
scalarmap = mpl.cm.ScalarMappable(norm=norm, cmap=cmap)
aves_filename = '{}.aves'.format(inp_filebase, tag1, tag2)
aves = pd.read_csv(aves_filename, sep=' ')
aves_one = aves[aves[tag1] == 1].sort_values(tag2).set_index(tag2)
aves_zero = aves[aves[tag1] == 0].sort_values(tag2).set_index(tag2)
aves_diff = aves_one - aves_zero
stds_filename = '{}.stds'.format(inp_filebase, tag1, tag2)
stds = pd.read_csv(stds_filename, sep=' ')
stds_one = stds[stds[tag1] == 1].sort_values(tag2).set_index(tag2)
stds_zero = stds[stds[tag1] == 0].sort_values(tag2).set_index(tag2)
stds_prop = np.sqrt(stds_one**2 + stds_zero**2)
for i, temp in enumerate(args.temps):
temp_key = '{:.3f}'.format(temp)
ax.errorbar(aves_diff.index, aves_diff[temp_key],
yerr=stds_prop[temp_key], marker='o',
color=scalarmap.to_rgba(temp))
# Colorbar
ax = f.add_subplot(gs_cb[0])
cbar = mpl.colorbar.ColorbarBase(ax, orientation='horizontal', cmap=cmap,
norm=norm)
ax.set_xlabel('Temperature / K')
plt.tight_layout(pad=0.5, h_pad=0, w_pad=0)
f.savefig('{}.png'.format(out_filebase), transparent=True)
f.savefig('{}.pdf'.format(out_filebase), transparent=True)
def parse_args():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'input_dir',
type=str,
help='Directory of inputs')
parser.add_argument(
'output_dir',
type=str,
help='Output directory')
parser.add_argument(
'system',
type=str,
help='System')
parser.add_argument(
'vari',
type=str,
help='Simulation variant')
parser.add_argument(
'stapletypes',
type=int,
help='Number of staple types')
parser.add_argument(
'tag2',
type=str,
help='Cumaltive OP tag to plot against')
parser.add_argument(
'--temps',
nargs='+',
type=float,
help='Temperatures')
return parser.parse_args()
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser",
"matplotlib.colors.Normalize",
"pandas.read_csv",
"matplotlib.cm.ScalarMappable",
"matplotlib.pyplot.figure",
"matplotlib.use",
"matplotlib.colorbar.ColorbarBase",
"matplotlib.gridspec.GridSpec",
"matplotlib.gridspec.GridSpecFromSubplotSpec",
"matplotlib.pyplot.tight_lay... | [((134, 148), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (141, 148), True, 'import matplotlib as mpl\n'), ((2727, 2830), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__', 'formatter_class': 'argparse.RawDescriptionHelpFormatter'}), '(description=__doc__, formatter_class=argparse.\n RawDescriptionHelpFormatter)\n', (2750, 2830), False, 'import argparse\n'), ((362, 384), 'origamipy.plot.cm_to_inches', 'plot.cm_to_inches', (['(8.5)'], {}), '(8.5)\n', (379, 384), False, 'from origamipy import plot\n'), ((386, 406), 'origamipy.plot.cm_to_inches', 'plot.cm_to_inches', (['(7)'], {}), '(7)\n', (403, 406), False, 'from origamipy import plot\n'), ((767, 796), 'origamipy.plot.set_default_appearance', 'plot.set_default_appearance', ([], {}), '()\n', (794, 796), False, 'from origamipy import plot\n'), ((809, 845), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize', 'dpi': '(300)'}), '(figsize=figsize, dpi=300)\n', (819, 845), True, 'import matplotlib.pyplot as plt\n'), ((859, 923), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(2)', '(1)'], {'width_ratios': '[1]', 'height_ratios': '[20, 1]'}), '(2, 1, width_ratios=[1], height_ratios=[20, 1])\n', (876, 923), True, 'import matplotlib.gridspec as gridspec\n'), ((942, 1027), 'matplotlib.gridspec.GridSpecFromSubplotSpec', 'gridspec.GridSpecFromSubplotSpec', (['(1)', '(1)'], {'subplot_spec': 'gs[0, :]', 'wspace': '(0)', 'hspace': '(0)'}), '(1, 1, subplot_spec=gs[0, :], wspace=0,\n hspace=0)\n', (974, 1027), True, 'import matplotlib.gridspec as gridspec\n'), ((1091, 1152), 'matplotlib.gridspec.GridSpecFromSubplotSpec', 'gridspec.GridSpecFromSubplotSpec', (['(1)', '(1)'], {'subplot_spec': 'gs[1, :]'}), '(1, 1, subplot_spec=gs[1, :])\n', (1123, 1152), True, 'import matplotlib.gridspec as gridspec\n'), ((1244, 1305), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', ([], {'vmin': 'args.temps[0]', 'vmax': 'args.temps[-1]'}), '(vmin=args.temps[0], vmax=args.temps[-1])\n', (1264, 1305), True, 'import matplotlib as mpl\n'), ((1356, 1399), 'matplotlib.cm.ScalarMappable', 'mpl.cm.ScalarMappable', ([], {'norm': 'norm', 'cmap': 'cmap'}), '(norm=norm, cmap=cmap)\n', (1377, 1399), True, 'import matplotlib as mpl\n'), ((1483, 1518), 'pandas.read_csv', 'pd.read_csv', (['aves_filename'], {'sep': '""" """'}), "(aves_filename, sep=' ')\n", (1494, 1518), True, 'import pandas as pd\n'), ((1794, 1829), 'pandas.read_csv', 'pd.read_csv', (['stds_filename'], {'sep': '""" """'}), "(stds_filename, sep=' ')\n", (1805, 1829), True, 'import pandas as pd\n'), ((2001, 2040), 'numpy.sqrt', 'np.sqrt', (['(stds_one ** 2 + stds_zero ** 2)'], {}), '(stds_one ** 2 + stds_zero ** 2)\n', (2008, 2040), True, 'import numpy as np\n'), ((2372, 2449), 'matplotlib.colorbar.ColorbarBase', 'mpl.colorbar.ColorbarBase', (['ax'], {'orientation': '"""horizontal"""', 'cmap': 'cmap', 'norm': 'norm'}), "(ax, orientation='horizontal', cmap=cmap, norm=norm)\n", (2397, 2449), True, 'import matplotlib as mpl\n'), ((2516, 2559), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(0.5)', 'h_pad': '(0)', 'w_pad': '(0)'}), '(pad=0.5, h_pad=0, w_pad=0)\n', (2532, 2559), True, 'import matplotlib.pyplot as plt\n')] |
import time
import numpy as np
import torch
import sqapy
class RBM:
def __init__(self, n_visible=784, n_hidden=2, alpha=0.01, device='cpu'):
self.n_visible = n_visible
self.n_hidden = n_hidden
self.alpha = alpha
self.device = device
self.data = None
self.weight = torch.FloatTensor(self.n_visible, self.n_hidden).uniform_(-1, 1).to(self.device)
self.b = torch.FloatTensor(self.n_visible).uniform_(-1, 1).to(self.device)
self.c = torch.FloatTensor(self.n_hidden).uniform_(-1, 1).to(self.device)
self.energy_records = []
def train(self, data, n_epochs=2, n_CD=1, sampler="cd"):
self.energy_records.clear()
self.data = data
if sampler == "cd":
self.__contrastive_divergence(self.data, n_epochs, n_CD)
elif sampler == "sqa":
self.__sqa(self.data, n_epochs)
else:
pass
print("Training finished")
def sample(self, n_iter=5, v_init=None):
if v_init is None:
v_init = torch.randint(2, size=(1, self.n_visible)).float().to(self.device)
v_t = v_init.view(self.n_visible)
for _ in range(n_iter):
h_t = self.__forward(v_t)
v_t = self.__backward(h_t)
return v_t, h_t
def __sqa(self, data, n_epochs, batch_size=10000):
train_time = []
for e in range(n_epochs):
self.energy_list = []
start = time.time()
for i in range(0, data.shape[0], batch_size):
batch = data[i:i+batch_size]
if len(batch) != batch_size:
break
v_0 = batch.mean(axis=0)
h0_sampled = self.__forward(v_0)
b = torch.Tensor.numpy(self.b)
c = torch.Tensor.numpy(self.c)
weight = torch.Tensor.numpy(self.weight)
model = sqapy.BipartiteGraph(b, c, weight)
sampler = sqapy.SQASampler(model, steps=100)
_, states = sampler.sample()
v_sampled= torch.from_numpy(np.array(states[0][:len(self.b)])).float()
h_sampled = torch.from_numpy(np.array(states[0][len(self.b):])).float()
self.__update_params(v_0, v_sampled, h0_sampled, h_sampled)
self.energy_list.append(self._energy(v_0, h_sampled).item())
end = time.time()
avg_energy = np.mean(self.energy_list)
print("[epoch {}] takes {:.2f}s, average energy={}".format(
e, end - start, avg_energy))
self.energy_records.append(avg_energy)
train_time.append(end - start)
print("Average Training Time: {:.2f}".format(np.mean(train_time)))
def __contrastive_divergence(self, data, n_epochs, n_CD):
train_time = []
for e in range(n_epochs):
self.energy_list = []
start = time.time()
for v_0 in data:
h0_sampled = self.__forward(v_0)
h_sampled = h0_sampled
for _ in range(n_CD):
v_sampled = self.__backward(h_sampled)
h_sampled = self.__forward(v_sampled)
self.__update_params(v_0, v_sampled, h0_sampled, h_sampled)
self.energy_list.append(self._energy(v_0, h_sampled).item())
end = time.time()
avg_energy = np.mean(self.energy_list)
print("[epoch {}] takes {:.2f}s, average energy={}".format(
e, end - start, avg_energy))
self.energy_records.append(avg_energy)
train_time.append(end - start)
print("Average Training Time: {:.2f}".format(np.mean(train_time)))
def __update_params(self, v_0, v_sampled, h0, h_sampled):
self.weight += self.alpha * \
(torch.matmul(v_0.view(-1, 1), h0.view(1, -1)) -
torch.matmul(v_sampled.view(-1, 1), h_sampled.view(1, -1)))
self.b += self.alpha * (v_0 - v_sampled)
self.c += self.alpha * (h0 - h_sampled)
def __forward(self, v):
p_h = torch.sigmoid(
torch.matmul(torch.t(self.weight), v) + self.c)
return self.__sampling(p_h)
def __backward(self, h):
p_v = torch.sigmoid(torch.matmul(self.weight, h) + self.b)
return self.__sampling(p_v)
def __sampling(self, p):
dim = p.shape[0]
true_list = torch.rand(dim).to(self.device) <= p
sampled = torch.zeros(dim).to(self.device)
sampled[true_list] = 1
return sampled
def _energy(self, v, h):
return - torch.dot(self.b, v) - torch.dot(self.c, h) \
- torch.matmul(torch.matmul(torch.t(v), self.weight), h)
| [
"torch.dot",
"torch.t",
"torch.randint",
"sqapy.SQASampler",
"torch.Tensor.numpy",
"sqapy.BipartiteGraph",
"torch.FloatTensor",
"time.time",
"numpy.mean",
"torch.rand",
"torch.zeros",
"torch.matmul"
] | [((1471, 1482), 'time.time', 'time.time', ([], {}), '()\n', (1480, 1482), False, 'import time\n'), ((2411, 2422), 'time.time', 'time.time', ([], {}), '()\n', (2420, 2422), False, 'import time\n'), ((2448, 2473), 'numpy.mean', 'np.mean', (['self.energy_list'], {}), '(self.energy_list)\n', (2455, 2473), True, 'import numpy as np\n'), ((2936, 2947), 'time.time', 'time.time', ([], {}), '()\n', (2945, 2947), False, 'import time\n'), ((3393, 3404), 'time.time', 'time.time', ([], {}), '()\n', (3402, 3404), False, 'import time\n'), ((3430, 3455), 'numpy.mean', 'np.mean', (['self.energy_list'], {}), '(self.energy_list)\n', (3437, 3455), True, 'import numpy as np\n'), ((1767, 1793), 'torch.Tensor.numpy', 'torch.Tensor.numpy', (['self.b'], {}), '(self.b)\n', (1785, 1793), False, 'import torch\n'), ((1814, 1840), 'torch.Tensor.numpy', 'torch.Tensor.numpy', (['self.c'], {}), '(self.c)\n', (1832, 1840), False, 'import torch\n'), ((1866, 1897), 'torch.Tensor.numpy', 'torch.Tensor.numpy', (['self.weight'], {}), '(self.weight)\n', (1884, 1897), False, 'import torch\n'), ((1922, 1956), 'sqapy.BipartiteGraph', 'sqapy.BipartiteGraph', (['b', 'c', 'weight'], {}), '(b, c, weight)\n', (1942, 1956), False, 'import sqapy\n'), ((1983, 2017), 'sqapy.SQASampler', 'sqapy.SQASampler', (['model'], {'steps': '(100)'}), '(model, steps=100)\n', (1999, 2017), False, 'import sqapy\n'), ((2738, 2757), 'numpy.mean', 'np.mean', (['train_time'], {}), '(train_time)\n', (2745, 2757), True, 'import numpy as np\n'), ((3720, 3739), 'numpy.mean', 'np.mean', (['train_time'], {}), '(train_time)\n', (3727, 3739), True, 'import numpy as np\n'), ((4308, 4336), 'torch.matmul', 'torch.matmul', (['self.weight', 'h'], {}), '(self.weight, h)\n', (4320, 4336), False, 'import torch\n'), ((4513, 4529), 'torch.zeros', 'torch.zeros', (['dim'], {}), '(dim)\n', (4524, 4529), False, 'import torch\n'), ((4670, 4690), 'torch.dot', 'torch.dot', (['self.c', 'h'], {}), '(self.c, h)\n', (4679, 4690), False, 'import torch\n'), ((4179, 4199), 'torch.t', 'torch.t', (['self.weight'], {}), '(self.weight)\n', (4186, 4199), False, 'import torch\n'), ((4458, 4473), 'torch.rand', 'torch.rand', (['dim'], {}), '(dim)\n', (4468, 4473), False, 'import torch\n'), ((4647, 4667), 'torch.dot', 'torch.dot', (['self.b', 'v'], {}), '(self.b, v)\n', (4656, 4667), False, 'import torch\n'), ((4736, 4746), 'torch.t', 'torch.t', (['v'], {}), '(v)\n', (4743, 4746), False, 'import torch\n'), ((326, 374), 'torch.FloatTensor', 'torch.FloatTensor', (['self.n_visible', 'self.n_hidden'], {}), '(self.n_visible, self.n_hidden)\n', (343, 374), False, 'import torch\n'), ((424, 457), 'torch.FloatTensor', 'torch.FloatTensor', (['self.n_visible'], {}), '(self.n_visible)\n', (441, 457), False, 'import torch\n'), ((507, 539), 'torch.FloatTensor', 'torch.FloatTensor', (['self.n_hidden'], {}), '(self.n_hidden)\n', (524, 539), False, 'import torch\n'), ((1060, 1102), 'torch.randint', 'torch.randint', (['(2)'], {'size': '(1, self.n_visible)'}), '(2, size=(1, self.n_visible))\n', (1073, 1102), False, 'import torch\n')] |
import numpy as np
import scipy
import metrics
FEPS = 1e-8
NTESTS = 1000
def true_svd(x):
u, s, vt = np.linalg.svd(x)
return u, s, vt
def my_svd(x):
return x, x, x
def check_svd(x, u, s, vt):
m = x.shape[0]
n = x.shape[1]
k = min(m, n)
if u.shape[0] != m or u.shape[1] != m:
raise Exception('Invalid size for u')
if vt.shape[0] != n or vt.shape[1] != n:
raise Exception('Invalid size for v')
if len(s) != k:
raise Exception('Invalid size for s')
d = metrics.tdist(u @ u.T, np.eye(m))
if d >= FEPS:
raise Exception('u @ u.t != I => dist = {}'.format(d))
d = metrics.tdist(u.T @ u, np.eye(m))
if d >= FEPS:
raise Exception('u.t @ u != I => dist = {}'.format(d))
d = metrics.tdist(vt.T @ vt, np.eye(n))
if d >= FEPS:
raise Exception('v @ v.t != I => dist = {}'.format(d))
d = metrics.tdist(vt @ vt.T, np.eye(n))
if d >= FEPS:
raise Exception('v.t @ v != I => dist = {}'.format(d))
s_mat = np.zeros((m, n))
for i in range(k):
s_mat[i][i] = s[i]
d = metrics.tdist(u @ s_mat @ vt, x)
if d >= FEPS:
raise Exception('u @ s @ v.t != x => dist = {}'.format(d))
if __name__ == '__main__':
for i in range(NTESTS):
print('Test {}/{}'.format(i+1, NTESTS))
m = np.random.randint(1, 10)
n = np.random.randint(1, 10)
x = np.random.randn(m, n) * 10
#u, s, vt = true_svd(x)
u, s, vt = my_svd(x)
check_svd(x, u, s, vt)
| [
"numpy.random.randn",
"numpy.zeros",
"numpy.linalg.svd",
"numpy.random.randint",
"numpy.eye",
"metrics.tdist"
] | [((110, 126), 'numpy.linalg.svd', 'np.linalg.svd', (['x'], {}), '(x)\n', (123, 126), True, 'import numpy as np\n'), ((1024, 1040), 'numpy.zeros', 'np.zeros', (['(m, n)'], {}), '((m, n))\n', (1032, 1040), True, 'import numpy as np\n'), ((1108, 1140), 'metrics.tdist', 'metrics.tdist', (['(u @ s_mat @ vt)', 'x'], {}), '(u @ s_mat @ vt, x)\n', (1121, 1140), False, 'import metrics\n'), ((545, 554), 'numpy.eye', 'np.eye', (['m'], {}), '(m)\n', (551, 554), True, 'import numpy as np\n'), ((668, 677), 'numpy.eye', 'np.eye', (['m'], {}), '(m)\n', (674, 677), True, 'import numpy as np\n'), ((794, 803), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (800, 803), True, 'import numpy as np\n'), ((919, 928), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (925, 928), True, 'import numpy as np\n'), ((1347, 1371), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (1364, 1371), True, 'import numpy as np\n'), ((1384, 1408), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (1401, 1408), True, 'import numpy as np\n'), ((1421, 1442), 'numpy.random.randn', 'np.random.randn', (['m', 'n'], {}), '(m, n)\n', (1436, 1442), True, 'import numpy as np\n')] |
import csv, os, argparse
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--main_dir', nargs=1, default=['./gama'],
help='Directory containing GAMA data')
parser.add_argument('-o', '--output_dir', nargs=1, default=['./output'],
help='Sub-directory containing output')
parser.add_argument("-n", "--num_values", type=int, nargs=1, default=[10],
help="Numbr of likelihood values to use in mean calculation")
args = parser.parse_args()
main_dir = args.main_dir[0]
output_dir = args.output_dir[0]
num_values = args.num_values[0]
param_file = os.path.join(main_dir, "parameter_values.txt")
point_mle_file = os.path.join(main_dir, output_dir, "param_point_estimates.csv")
mean_mle_file = os.path.join(main_dir, output_dir, "param_mean_sdev.csv")
true_a0 = 0
true_a1 = 0
true_b0 = 0
true_b1 = 0
with open(param_file, 'r') as f:
reader = csv.reader(f, dialect="excel")
next(reader)
for row in reader:
if row[0] == 'A0': true_a0 = float(row[1])
if row[0] == 'A1': true_a1 = float(row[1])
if row[0] == 'B0': true_b0 = float(row[1])
if row[0] == 'B1': true_b1 = float(row[1])
#print(true_a0, true_a1, true_b0, true_b1)
point_a00 = 0
point_a01 = 0
point_a10 = 0
point_a11 = 0
with open(point_mle_file, 'r') as f:
reader = csv.reader(f, dialect="excel")
next(reader)
for row in reader:
if row[0] == 'a00': point_a00 = float(row[1])
if row[0] == 'a01': point_a01 = float(row[1])
if row[0] == 'a10': point_a10 = float(row[1])
if row[0] == 'a11': point_a11 = float(row[1])
#print(point_a00, point_a01, point_a10, point_a11)
mean_a00 = 0
mean_a01 = 0
mean_a10 = 0
mean_a11 = 0
with open(mean_mle_file, 'r') as f:
reader = csv.reader(f, dialect="excel")
next(reader)
for row in reader:
if row[0] == 'a00': mean_a00 = float(row[1])
if row[0] == 'a01': mean_a01 = float(row[1])
if row[0] == 'a10': mean_a10 = float(row[1])
if row[0] == 'a11': mean_a11 = float(row[1])
#print(mean_a00, mean_a01, mean_a10, mean_a11)
tr_a0a1 = true_a0/true_a1
tr_b0b1 = true_b0/true_b1
p1_a0a1 = point_a01/point_a11
p2_a0a1 = point_a00/point_a10
m1_a0a1 = mean_a01/mean_a11
m2_a0a1 = mean_a00/mean_a10
p1_b0b1 = point_a00/point_a01
p2_b0b1 = point_a10/point_a11
m1_b0b1 = mean_a00/mean_a01
m2_b0b1 = mean_a10/mean_a11
print("R1) a0/a1=%.3f point=%.3f mean=%.3f" % (tr_a0a1, p1_a0a1, m1_a0a1))
print("R2) a0/a1=%.3f point=%.3f mean=%.3f" % (tr_a0a1, p2_a0a1, m2_a0a1))
print("R1) b0/b1=%.3f point=%.3f mean=%.3f" % (tr_b0b1, p1_b0b1, m1_b0b1))
print("R2) b0/b1=%.3f point=%.3f mean=%.3f" % (tr_b0b1, p2_b0b1, m2_b0b1))
pm_a0a1 = 0.5 * (p1_a0a1 + p2_a0a1)
mm_a0a1 = 0.5 * (m1_a0a1 + m2_a0a1)
pm_b0b1 = 0.5 * (p1_b0b1 + p2_b0b1)
mm_b0b1 = 0.5 * (m1_b0b1 + m2_b0b1)
print("R1+R2) a0/a1=%.3f point=%.3f mean=%.3f" % (tr_a0a1, pm_a0a1, mm_a0a1))
print("R1+R2) b0/b1=%.3f point=%.3f mean=%.3f" % (tr_b0b1, pm_b0b1, mm_b0b1))
print("============================")
import pandas as pd
import numpy as np
search_file = os.path.join(main_dir, output_dir, "param_mle_global_search.csv")
df = pd.read_csv(search_file)
df = df.sort_values(['loglik'], ascending=[False]).head(num_values)
df['a0a1_1'] = df['a01'] / df['a11']
df['a0a1_2'] = df['a00'] / df['a10']
df['b0b1_1'] = df['a00'] / df['a01']
df['b0b1_2'] = df['a10'] / df['a11']
raw_a0a1 = np.concatenate((np.array(df['a0a1_1']), np.array(df['a0a1_2'])))
raw_b0b1 = np.concatenate((np.array(df['b0b1_1']), np.array(df['b0b1_2'])))
print("a0/a1=%.3f - MLE mean=%.3f std=%.3f" % (tr_a0a1, np.mean(raw_a0a1), np.std(raw_a0a1)))
print("b0/b1=%.3f - MLE mean=%.3f std=%.3f" % (tr_b0b1, np.mean(raw_b0b1), np.std(raw_b0b1)))
| [
"csv.reader",
"argparse.ArgumentParser",
"pandas.read_csv",
"numpy.std",
"numpy.mean",
"numpy.array",
"os.path.join"
] | [((35, 60), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (58, 60), False, 'import csv, os, argparse\n'), ((613, 659), 'os.path.join', 'os.path.join', (['main_dir', '"""parameter_values.txt"""'], {}), "(main_dir, 'parameter_values.txt')\n", (625, 659), False, 'import csv, os, argparse\n'), ((678, 741), 'os.path.join', 'os.path.join', (['main_dir', 'output_dir', '"""param_point_estimates.csv"""'], {}), "(main_dir, output_dir, 'param_point_estimates.csv')\n", (690, 741), False, 'import csv, os, argparse\n'), ((758, 815), 'os.path.join', 'os.path.join', (['main_dir', 'output_dir', '"""param_mean_sdev.csv"""'], {}), "(main_dir, output_dir, 'param_mean_sdev.csv')\n", (770, 815), False, 'import csv, os, argparse\n'), ((3096, 3161), 'os.path.join', 'os.path.join', (['main_dir', 'output_dir', '"""param_mle_global_search.csv"""'], {}), "(main_dir, output_dir, 'param_mle_global_search.csv')\n", (3108, 3161), False, 'import csv, os, argparse\n'), ((3167, 3191), 'pandas.read_csv', 'pd.read_csv', (['search_file'], {}), '(search_file)\n', (3178, 3191), True, 'import pandas as pd\n'), ((911, 941), 'csv.reader', 'csv.reader', (['f'], {'dialect': '"""excel"""'}), "(f, dialect='excel')\n", (921, 941), False, 'import csv, os, argparse\n'), ((1337, 1367), 'csv.reader', 'csv.reader', (['f'], {'dialect': '"""excel"""'}), "(f, dialect='excel')\n", (1347, 1367), False, 'import csv, os, argparse\n'), ((1778, 1808), 'csv.reader', 'csv.reader', (['f'], {'dialect': '"""excel"""'}), "(f, dialect='excel')\n", (1788, 1808), False, 'import csv, os, argparse\n'), ((3436, 3458), 'numpy.array', 'np.array', (["df['a0a1_1']"], {}), "(df['a0a1_1'])\n", (3444, 3458), True, 'import numpy as np\n'), ((3460, 3482), 'numpy.array', 'np.array', (["df['a0a1_2']"], {}), "(df['a0a1_2'])\n", (3468, 3482), True, 'import numpy as np\n'), ((3512, 3534), 'numpy.array', 'np.array', (["df['b0b1_1']"], {}), "(df['b0b1_1'])\n", (3520, 3534), True, 'import numpy as np\n'), ((3536, 3558), 'numpy.array', 'np.array', (["df['b0b1_2']"], {}), "(df['b0b1_2'])\n", (3544, 3558), True, 'import numpy as np\n'), ((3618, 3635), 'numpy.mean', 'np.mean', (['raw_a0a1'], {}), '(raw_a0a1)\n', (3625, 3635), True, 'import numpy as np\n'), ((3637, 3653), 'numpy.std', 'np.std', (['raw_a0a1'], {}), '(raw_a0a1)\n', (3643, 3653), True, 'import numpy as np\n'), ((3712, 3729), 'numpy.mean', 'np.mean', (['raw_b0b1'], {}), '(raw_b0b1)\n', (3719, 3729), True, 'import numpy as np\n'), ((3731, 3747), 'numpy.std', 'np.std', (['raw_b0b1'], {}), '(raw_b0b1)\n', (3737, 3747), True, 'import numpy as np\n')] |
import os
import imp
import numpy as np
from scipy.ndimage import imread as imread0
import tifffile as tiff
def conv_labels2dto3d(labels):
lbnums = np.unique(labels)
arr = np.zeros((labels.shape[0], labels.shape[1], len(lbnums)), np.uint8)
for i in lbnums:
arr[:, :, i] = labels == i
return arr
def normalize(orig_img):
percentile = 99.9
high = np.percentile(orig_img, percentile)
low = np.percentile(orig_img, 100-percentile)
img = np.minimum(high, orig_img)
img = np.maximum(low, img)
img = (img - low) / (high - low)
return img
def make_outputdir(output):
try:
os.makedirs(output)
except:
pass
def imread_check_tiff(path):
img = imread0(path)
if img.dtype == 'object' or path.endswith('tif'):
img = tiff.imread(path)
return img
def imread(path):
if isinstance(path, tuple) or isinstance(path, list):
st = []
for p in path:
st.append(imread_check_tiff(p))
img = np.dstack(st)
if img.shape[2] == 1:
np.squeeze(img, axis=2)
return img
else:
return imread_check_tiff(path)
def parse_image_files(inputs):
if "/" not in inputs:
return (inputs, )
store = []
li = []
while inputs:
element = inputs.pop(0)
if element == "/":
store.append(li)
li = []
else:
li.append(element)
store.append(li)
return zip(*store)
def pad_image(image):
# assumes the image is an np array of dimensions d0 x d1 x d2 x d3
# where d1 is height, d2 is width, and d3 is colors
# returns a list of padded image, hpadding (tuple), wpadding (tuple)
height = image.shape[1]
hdelta = 8 - (height % 8)
if (hdelta == 8):
hpadding = (0, 0)
elif (hdelta % 2) == 0:
hpadding = (int(hdelta/2.0), int(hdelta/2.0))
else:
hpadding = (int(hdelta/2.0), int(hdelta/2.0)+1)
width = image.shape[2]
wdelta = 8 - (width % 8)
if (wdelta == 8):
wpadding = (0, 0)
elif (wdelta % 2) == 0:
wpadding = (int(wdelta/2.0), int(wdelta/2.0))
else:
wpadding = (int(wdelta/2.0), int(wdelta/2.0)+1)
return [np.pad(image, ((0, 0), hpadding, wpadding, (0, 0)), 'constant', constant_values=0.0), hpadding, wpadding]
def normalize_predictions(predictions):
# predictions is typically a 3 x h x w list
predictions = np.array(predictions)
num_rows = predictions.shape[1]
num_cols = predictions.shape[2]
for i in range(num_rows):
for j in range(num_cols):
prediction = predictions[:, i, j]
if (prediction.sum() != 0):
predictions[:, i, j] = prediction / prediction.sum()
return predictions
| [
"numpy.dstack",
"numpy.pad",
"numpy.minimum",
"numpy.maximum",
"os.makedirs",
"numpy.percentile",
"numpy.array",
"numpy.squeeze",
"tifffile.imread",
"scipy.ndimage.imread",
"numpy.unique"
] | [((155, 172), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (164, 172), True, 'import numpy as np\n'), ((382, 417), 'numpy.percentile', 'np.percentile', (['orig_img', 'percentile'], {}), '(orig_img, percentile)\n', (395, 417), True, 'import numpy as np\n'), ((428, 469), 'numpy.percentile', 'np.percentile', (['orig_img', '(100 - percentile)'], {}), '(orig_img, 100 - percentile)\n', (441, 469), True, 'import numpy as np\n'), ((478, 504), 'numpy.minimum', 'np.minimum', (['high', 'orig_img'], {}), '(high, orig_img)\n', (488, 504), True, 'import numpy as np\n'), ((515, 535), 'numpy.maximum', 'np.maximum', (['low', 'img'], {}), '(low, img)\n', (525, 535), True, 'import numpy as np\n'), ((721, 734), 'scipy.ndimage.imread', 'imread0', (['path'], {}), '(path)\n', (728, 734), True, 'from scipy.ndimage import imread as imread0\n'), ((2444, 2465), 'numpy.array', 'np.array', (['predictions'], {}), '(predictions)\n', (2452, 2465), True, 'import numpy as np\n'), ((635, 654), 'os.makedirs', 'os.makedirs', (['output'], {}), '(output)\n', (646, 654), False, 'import os\n'), ((803, 820), 'tifffile.imread', 'tiff.imread', (['path'], {}), '(path)\n', (814, 820), True, 'import tifffile as tiff\n'), ((1011, 1024), 'numpy.dstack', 'np.dstack', (['st'], {}), '(st)\n', (1020, 1024), True, 'import numpy as np\n'), ((2230, 2318), 'numpy.pad', 'np.pad', (['image', '((0, 0), hpadding, wpadding, (0, 0))', '"""constant"""'], {'constant_values': '(0.0)'}), "(image, ((0, 0), hpadding, wpadding, (0, 0)), 'constant',\n constant_values=0.0)\n", (2236, 2318), True, 'import numpy as np\n'), ((1067, 1090), 'numpy.squeeze', 'np.squeeze', (['img'], {'axis': '(2)'}), '(img, axis=2)\n', (1077, 1090), True, 'import numpy as np\n')] |
import numpy as np
from typing import List
import ipal_iids.settings as settings
from .preprocessor import Preprocessor
class MeanPreprocessor(Preprocessor):
_name = "mean"
_description = "Scale by mean-standard deviation"
means: List[float]
stds: List[float]
def __init__(self, features):
super().__init__(features)
self.means = [None] * len(self.features)
self.stds = [None] * len(self.features)
def fit(self, values):
if len(values[0]) != len(self.features):
settings.logger.critical("Feature length does not match data length!")
for i in range(len(self.features)):
if not self.features[i]:
continue
X = [v[i] for v in values if v[i] is not None]
self.means[i] = float(np.mean(X))
self.stds[i] = float(np.std(X))
if self.stds[i] == 0:
settings.logger.info(
"Standard deviation is zero. Adjusting values of {} to std 1.0".format(
i
)
)
self.stds[i] = 1
def transform(self, value):
if len(value) != len(self.features):
settings.logger.critical("Feature length does not match data length!")
for i in range(len(self.features)):
if not self.features[i] or value[i] is None:
continue
value[i] = (value[i] - self.means[i]) / self.stds[i]
return value
def get_fitted_model(self):
return {"features": self.features, "means": self.means, "stds": self.stds}
@classmethod
def from_fitted_model(cls, model):
mean = MeanPreprocessor(model["features"])
mean.means = model["means"]
mean.stds = model["stds"]
return mean
| [
"ipal_iids.settings.logger.critical",
"numpy.mean",
"numpy.std"
] | [((538, 608), 'ipal_iids.settings.logger.critical', 'settings.logger.critical', (['"""Feature length does not match data length!"""'], {}), "('Feature length does not match data length!')\n", (562, 608), True, 'import ipal_iids.settings as settings\n'), ((1220, 1290), 'ipal_iids.settings.logger.critical', 'settings.logger.critical', (['"""Feature length does not match data length!"""'], {}), "('Feature length does not match data length!')\n", (1244, 1290), True, 'import ipal_iids.settings as settings\n'), ((810, 820), 'numpy.mean', 'np.mean', (['X'], {}), '(X)\n', (817, 820), True, 'import numpy as np\n'), ((855, 864), 'numpy.std', 'np.std', (['X'], {}), '(X)\n', (861, 864), True, 'import numpy as np\n')] |
"""
Tests scikit-learn's binarizer converter.
"""
import unittest
import numpy
from sklearn.feature_extraction.text import TfidfVectorizer
from skl2onnx import convert_sklearn
from skl2onnx.common.data_types import StringTensorType
from test_utils import dump_data_and_model
class TestSklearnTfidfVectorizer(unittest.TestCase):
def test_model_tfidf_vectorizer11(self):
corpus = numpy.array([
'This is the first document.',
'This document is the second document.',
'And this is the third one.',
'Is this the first document?',
]).reshape((4, 1))
vect = TfidfVectorizer(ngram_range=(1, 1), norm=None)
vect.fit(corpus.ravel())
pred = vect.transform(corpus.ravel())
model_onnx = convert_sklearn(vect, 'TfidfVectorizer',
[('input', StringTensorType([1, 1]))])
self.assertTrue(model_onnx is not None)
dump_data_and_model(corpus, vect, model_onnx, basename="SklearnTfidfVectorizer11-OneOff-SklCol",
allow_failure="StrictVersion(onnxruntime.__version__) <= StrictVersion('0.1.4')")
def test_model_tfidf_vectorizer22(self):
corpus = numpy.array([
'This is the first document.',
'This document is the second document.',
'And this is the third one.',
'Is this the first document?',
]).reshape((4, 1))
vect = TfidfVectorizer(ngram_range=(2, 2), norm=None)
vect.fit(corpus.ravel())
pred = vect.transform(corpus.ravel())
model_onnx = convert_sklearn(vect, 'TfidfVectorizer',
[('input', StringTensorType([1, 1]))])
self.assertTrue(model_onnx is not None)
dump_data_and_model(corpus, vect, model_onnx, basename="SklearnTfidfVectorizer22-OneOff-SklCol",
allow_failure="StrictVersion(onnxruntime.__version__) <= StrictVersion('0.1.4')")
def test_model_tfidf_vectorizer12(self):
corpus = numpy.array([
'AA AA',
'AA AA BB',
]).reshape((2, 1))
vect = TfidfVectorizer(ngram_range=(1, 2), norm=None)
vect.fit(corpus.ravel())
pred = vect.transform(corpus.ravel())
model_onnx = convert_sklearn(vect, 'TfidfVectorizer',
[('input', StringTensorType([1, 1]))])
self.assertTrue(model_onnx is not None)
dump_data_and_model(corpus, vect, model_onnx, basename="SklearnTfidfVectorizer22S-OneOff-SklCol",
allow_failure="StrictVersion(onnxruntime.__version__) <= StrictVersion('0.1.4')")
def test_model_tfidf_vectorizer12(self):
corpus = numpy.array([
'This is the first document.',
'This document is the second document.',
'And this is the third one.',
'Is this the first document?',
]).reshape((4, 1))
vect = TfidfVectorizer(ngram_range=(1, 2), norm=None)
vect.fit(corpus.ravel())
pred = vect.transform(corpus.ravel())
model_onnx = convert_sklearn(vect, 'TfidfVectorizer',
[('input', StringTensorType([1, 1]))])
self.assertTrue(model_onnx is not None)
dump_data_and_model(corpus, vect, model_onnx, basename="SklearnTfidfVectorizer22-OneOff-SklCol",
allow_failure="StrictVersion(onnxruntime.__version__) <= StrictVersion('0.1.4')")
def test_model_tfidf_vectorizer12_normL1(self):
corpus = numpy.array([
'This is the first document.',
'This document is the second document.',
'And this is the third one.',
'Is this the first document?',
]).reshape((4, 1))
vect = TfidfVectorizer(ngram_range=(1, 2), norm='l1')
vect.fit(corpus.ravel())
pred = vect.transform(corpus.ravel())
model_onnx = convert_sklearn(vect, 'TfidfVectorizer',
[('input', StringTensorType([1, 1]))])
self.assertTrue(model_onnx is not None)
dump_data_and_model(corpus, vect, model_onnx, basename="SklearnTfidfVectorizer22L1-OneOff-SklCol",
allow_failure="StrictVersion(onnxruntime.__version__) <= StrictVersion('0.1.4')")
def test_model_tfidf_vectorizer12_normL2(self):
corpus = numpy.array([
'This is the first document.',
'This document is the second document.',
'And this is the third one.',
'Is this the first document?',
]).reshape((4, 1))
vect = TfidfVectorizer(ngram_range=(1, 2), norm='l2')
vect.fit(corpus.ravel())
pred = vect.transform(corpus.ravel())
model_onnx = convert_sklearn(vect, 'TfidfVectorizer',
[('input', StringTensorType([1, 1]))])
self.assertTrue(model_onnx is not None)
dump_data_and_model(corpus, vect, model_onnx, basename="SklearnTfidfVectorizer22L2-OneOff-SklCol",
allow_failure="StrictVersion(onnxruntime.__version__) <= StrictVersion('0.1.4')")
def test_model_tfidf_vectorizer13(self):
corpus = numpy.array([
'This is the first document.',
'This document is the second document.',
'And this is the third one.',
'Is this the first document?',
]).reshape((4, 1))
vect = TfidfVectorizer(ngram_range=(1, 3), norm=None)
vect.fit(corpus.ravel())
pred = vect.transform(corpus.ravel())
model_onnx = convert_sklearn(vect, 'TfidfVectorizer',
[('input', StringTensorType([1, 1]))])
self.assertTrue(model_onnx is not None)
dump_data_and_model(corpus, vect, model_onnx, basename="SklearnTfidfVectorizer13-OneOff-SklCol",
allow_failure="StrictVersion(onnxruntime.__version__) <= StrictVersion('0.1.4')")
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"sklearn.feature_extraction.text.TfidfVectorizer",
"test_utils.dump_data_and_model",
"skl2onnx.common.data_types.StringTensorType",
"numpy.array"
] | [((6195, 6210), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6208, 6210), False, 'import unittest\n'), ((654, 700), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'ngram_range': '(1, 1)', 'norm': 'None'}), '(ngram_range=(1, 1), norm=None)\n', (669, 700), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((974, 1162), 'test_utils.dump_data_and_model', 'dump_data_and_model', (['corpus', 'vect', 'model_onnx'], {'basename': '"""SklearnTfidfVectorizer11-OneOff-SklCol"""', 'allow_failure': '"""StrictVersion(onnxruntime.__version__) <= StrictVersion(\'0.1.4\')"""'}), '(corpus, vect, model_onnx, basename=\n \'SklearnTfidfVectorizer11-OneOff-SklCol\', allow_failure=\n "StrictVersion(onnxruntime.__version__) <= StrictVersion(\'0.1.4\')")\n', (993, 1162), False, 'from test_utils import dump_data_and_model\n'), ((1505, 1551), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'ngram_range': '(2, 2)', 'norm': 'None'}), '(ngram_range=(2, 2), norm=None)\n', (1520, 1551), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((1825, 2013), 'test_utils.dump_data_and_model', 'dump_data_and_model', (['corpus', 'vect', 'model_onnx'], {'basename': '"""SklearnTfidfVectorizer22-OneOff-SklCol"""', 'allow_failure': '"""StrictVersion(onnxruntime.__version__) <= StrictVersion(\'0.1.4\')"""'}), '(corpus, vect, model_onnx, basename=\n \'SklearnTfidfVectorizer22-OneOff-SklCol\', allow_failure=\n "StrictVersion(onnxruntime.__version__) <= StrictVersion(\'0.1.4\')")\n', (1844, 2013), False, 'from test_utils import dump_data_and_model\n'), ((2212, 2258), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'ngram_range': '(1, 2)', 'norm': 'None'}), '(ngram_range=(1, 2), norm=None)\n', (2227, 2258), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((2532, 2721), 'test_utils.dump_data_and_model', 'dump_data_and_model', (['corpus', 'vect', 'model_onnx'], {'basename': '"""SklearnTfidfVectorizer22S-OneOff-SklCol"""', 'allow_failure': '"""StrictVersion(onnxruntime.__version__) <= StrictVersion(\'0.1.4\')"""'}), '(corpus, vect, model_onnx, basename=\n \'SklearnTfidfVectorizer22S-OneOff-SklCol\', allow_failure=\n "StrictVersion(onnxruntime.__version__) <= StrictVersion(\'0.1.4\')")\n', (2551, 2721), False, 'from test_utils import dump_data_and_model\n'), ((3064, 3110), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'ngram_range': '(1, 2)', 'norm': 'None'}), '(ngram_range=(1, 2), norm=None)\n', (3079, 3110), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((3384, 3572), 'test_utils.dump_data_and_model', 'dump_data_and_model', (['corpus', 'vect', 'model_onnx'], {'basename': '"""SklearnTfidfVectorizer22-OneOff-SklCol"""', 'allow_failure': '"""StrictVersion(onnxruntime.__version__) <= StrictVersion(\'0.1.4\')"""'}), '(corpus, vect, model_onnx, basename=\n \'SklearnTfidfVectorizer22-OneOff-SklCol\', allow_failure=\n "StrictVersion(onnxruntime.__version__) <= StrictVersion(\'0.1.4\')")\n', (3403, 3572), False, 'from test_utils import dump_data_and_model\n'), ((3922, 3968), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'ngram_range': '(1, 2)', 'norm': '"""l1"""'}), "(ngram_range=(1, 2), norm='l1')\n", (3937, 3968), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((4242, 4432), 'test_utils.dump_data_and_model', 'dump_data_and_model', (['corpus', 'vect', 'model_onnx'], {'basename': '"""SklearnTfidfVectorizer22L1-OneOff-SklCol"""', 'allow_failure': '"""StrictVersion(onnxruntime.__version__) <= StrictVersion(\'0.1.4\')"""'}), '(corpus, vect, model_onnx, basename=\n \'SklearnTfidfVectorizer22L1-OneOff-SklCol\', allow_failure=\n "StrictVersion(onnxruntime.__version__) <= StrictVersion(\'0.1.4\')")\n', (4261, 4432), False, 'from test_utils import dump_data_and_model\n'), ((4782, 4828), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'ngram_range': '(1, 2)', 'norm': '"""l2"""'}), "(ngram_range=(1, 2), norm='l2')\n", (4797, 4828), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((5102, 5292), 'test_utils.dump_data_and_model', 'dump_data_and_model', (['corpus', 'vect', 'model_onnx'], {'basename': '"""SklearnTfidfVectorizer22L2-OneOff-SklCol"""', 'allow_failure': '"""StrictVersion(onnxruntime.__version__) <= StrictVersion(\'0.1.4\')"""'}), '(corpus, vect, model_onnx, basename=\n \'SklearnTfidfVectorizer22L2-OneOff-SklCol\', allow_failure=\n "StrictVersion(onnxruntime.__version__) <= StrictVersion(\'0.1.4\')")\n', (5121, 5292), False, 'from test_utils import dump_data_and_model\n'), ((5635, 5681), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'ngram_range': '(1, 3)', 'norm': 'None'}), '(ngram_range=(1, 3), norm=None)\n', (5650, 5681), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((5955, 6143), 'test_utils.dump_data_and_model', 'dump_data_and_model', (['corpus', 'vect', 'model_onnx'], {'basename': '"""SklearnTfidfVectorizer13-OneOff-SklCol"""', 'allow_failure': '"""StrictVersion(onnxruntime.__version__) <= StrictVersion(\'0.1.4\')"""'}), '(corpus, vect, model_onnx, basename=\n \'SklearnTfidfVectorizer13-OneOff-SklCol\', allow_failure=\n "StrictVersion(onnxruntime.__version__) <= StrictVersion(\'0.1.4\')")\n', (5974, 6143), False, 'from test_utils import dump_data_and_model\n'), ((393, 547), 'numpy.array', 'numpy.array', (["['This is the first document.', 'This document is the second document.',\n 'And this is the third one.', 'Is this the first document?']"], {}), "(['This is the first document.',\n 'This document is the second document.', 'And this is the third one.',\n 'Is this the first document?'])\n", (404, 547), False, 'import numpy\n'), ((1244, 1398), 'numpy.array', 'numpy.array', (["['This is the first document.', 'This document is the second document.',\n 'And this is the third one.', 'Is this the first document?']"], {}), "(['This is the first document.',\n 'This document is the second document.', 'And this is the third one.',\n 'Is this the first document?'])\n", (1255, 1398), False, 'import numpy\n'), ((2095, 2129), 'numpy.array', 'numpy.array', (["['AA AA', 'AA AA BB']"], {}), "(['AA AA', 'AA AA BB'])\n", (2106, 2129), False, 'import numpy\n'), ((2803, 2957), 'numpy.array', 'numpy.array', (["['This is the first document.', 'This document is the second document.',\n 'And this is the third one.', 'Is this the first document?']"], {}), "(['This is the first document.',\n 'This document is the second document.', 'And this is the third one.',\n 'Is this the first document?'])\n", (2814, 2957), False, 'import numpy\n'), ((3661, 3815), 'numpy.array', 'numpy.array', (["['This is the first document.', 'This document is the second document.',\n 'And this is the third one.', 'Is this the first document?']"], {}), "(['This is the first document.',\n 'This document is the second document.', 'And this is the third one.',\n 'Is this the first document?'])\n", (3672, 3815), False, 'import numpy\n'), ((4521, 4675), 'numpy.array', 'numpy.array', (["['This is the first document.', 'This document is the second document.',\n 'And this is the third one.', 'Is this the first document?']"], {}), "(['This is the first document.',\n 'This document is the second document.', 'And this is the third one.',\n 'Is this the first document?'])\n", (4532, 4675), False, 'import numpy\n'), ((5374, 5528), 'numpy.array', 'numpy.array', (["['This is the first document.', 'This document is the second document.',\n 'And this is the third one.', 'Is this the first document?']"], {}), "(['This is the first document.',\n 'This document is the second document.', 'And this is the third one.',\n 'Is this the first document?'])\n", (5385, 5528), False, 'import numpy\n'), ((890, 914), 'skl2onnx.common.data_types.StringTensorType', 'StringTensorType', (['[1, 1]'], {}), '([1, 1])\n', (906, 914), False, 'from skl2onnx.common.data_types import StringTensorType\n'), ((1741, 1765), 'skl2onnx.common.data_types.StringTensorType', 'StringTensorType', (['[1, 1]'], {}), '([1, 1])\n', (1757, 1765), False, 'from skl2onnx.common.data_types import StringTensorType\n'), ((2448, 2472), 'skl2onnx.common.data_types.StringTensorType', 'StringTensorType', (['[1, 1]'], {}), '([1, 1])\n', (2464, 2472), False, 'from skl2onnx.common.data_types import StringTensorType\n'), ((3300, 3324), 'skl2onnx.common.data_types.StringTensorType', 'StringTensorType', (['[1, 1]'], {}), '([1, 1])\n', (3316, 3324), False, 'from skl2onnx.common.data_types import StringTensorType\n'), ((4158, 4182), 'skl2onnx.common.data_types.StringTensorType', 'StringTensorType', (['[1, 1]'], {}), '([1, 1])\n', (4174, 4182), False, 'from skl2onnx.common.data_types import StringTensorType\n'), ((5018, 5042), 'skl2onnx.common.data_types.StringTensorType', 'StringTensorType', (['[1, 1]'], {}), '([1, 1])\n', (5034, 5042), False, 'from skl2onnx.common.data_types import StringTensorType\n'), ((5871, 5895), 'skl2onnx.common.data_types.StringTensorType', 'StringTensorType', (['[1, 1]'], {}), '([1, 1])\n', (5887, 5895), False, 'from skl2onnx.common.data_types import StringTensorType\n')] |
# Copyright (c) 2020 SMHI, Swedish Meteorological and Hydrological Institute
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
"""
Created on 2021-03-11 13:52
@author: johannes
"""
# import os
# os.environ['PROJ_LIB'] = '/_PYTHON_INSTALLATION_PATH_/Library/share/proj'
import numpy as np
from datetime import datetime
from satpy import Scene, find_files_and_readers
from senplot.plotting.map import PlotSatProd
if __name__ == '__main__':
sen3_data_l2 = 'C:/Temp/Satellit/sentinel_data'
# datetime(YEAR, MOMNTH, DAY, HOUR, MINUTE)
filenames = find_files_and_readers(
start_time=datetime(2021, 2, 13, 7, 10),
end_time=datetime(2021, 2, 13, 12, 50),
base_dir=sen3_data_l2,
reader='olci_l2',
sensor='olci',
)
""" Create Scene object """
scn = Scene(filenames=filenames)
""" Load selected datasets
Available OLCI Level 2 datasets are:
'chl_nn.nc','chl_oc4me.nc',
'iop_nn.nc','iwv.nc','par.nc','trsp.nc','tsm_nn.nc','w_aer.nc',
'Oa01_reflectance.nc','Oa02_reflectance.nc','Oa03_reflectance.nc','Oa04_reflectance.nc','Oa05_reflectance.nc','Oa06_reflectance.nc','Oa07_reflectance.nc','Oa08_reflectance.nc','Oa09_reflectance.nc','Oa10_reflectance.nc','Oa11_reflectance.nc','Oa12_reflectance.nc','Oa16_reflectance.nc','Oa17_reflectance.nc','Oa18_reflectance.nc','Oa21_reflectance.nc',
'wqsf.nc'
"""
datasets = [
'chl_nn',
# 'chl_oc4me',
]
scn.load(datasets)
""" Resample data to grid (projecting) """
# area_spec = 'baws' # 1000 m resolution grid over the Baltic Sea incl. Kattegatt and Skagerrak
# scn = scn.resample(area_spec, radius_of_influence=300)
""" Chlorophyll data are stored as logarithmic values. Convert to real values: """
scn['chl_nn'] = np.power(10, scn['chl_nn'])
""" Simple plot """
# scn['chl_nn'].plot()
""" Save as geotiff?.. and possibly drag into QGIS? """
# scn.save_dataset(
# 'chl_nn',
# filename='/__PATH_TO_FOLDER__/__FILENAME__.tiff',
# writer='geotiff',
# dtype=np.float32,
# enhance=False,
# )
""" Advanced map plot. PlotSatProd needs to be cleaned up!!!
See PlotSatProd.__init__(...) for more options """
lons, lats = scn['chl_nn'].area.get_lonlats()
PlotSatProd(
data_mat=scn['chl_nn'].data,
lat_mat=lats,
lon_mat=lons,
cbar_label='Chl µg/l',
cmap_step=2,
max_tick=10,
min_tick=0,
resolution='i',
map_frame={'lat_min': 54., 'lat_max': 60., 'lon_min': 5., 'lon_max': 14.},
p_color=True,
show_fig=True,
save_fig=True,
fig_title='OLCI Level 2 - chl_nn - 2021-02-13',
fig_name='chl_test_plot.png',
)
| [
"numpy.power",
"satpy.Scene",
"senplot.plotting.map.PlotSatProd",
"datetime.datetime"
] | [((839, 865), 'satpy.Scene', 'Scene', ([], {'filenames': 'filenames'}), '(filenames=filenames)\n', (844, 865), False, 'from satpy import Scene, find_files_and_readers\n'), ((1860, 1887), 'numpy.power', 'np.power', (['(10)', "scn['chl_nn']"], {}), "(10, scn['chl_nn'])\n", (1868, 1887), True, 'import numpy as np\n'), ((2374, 2738), 'senplot.plotting.map.PlotSatProd', 'PlotSatProd', ([], {'data_mat': "scn['chl_nn'].data", 'lat_mat': 'lats', 'lon_mat': 'lons', 'cbar_label': '"""Chl µg/l"""', 'cmap_step': '(2)', 'max_tick': '(10)', 'min_tick': '(0)', 'resolution': '"""i"""', 'map_frame': "{'lat_min': 54.0, 'lat_max': 60.0, 'lon_min': 5.0, 'lon_max': 14.0}", 'p_color': '(True)', 'show_fig': '(True)', 'save_fig': '(True)', 'fig_title': '"""OLCI Level 2 - chl_nn - 2021-02-13"""', 'fig_name': '"""chl_test_plot.png"""'}), "(data_mat=scn['chl_nn'].data, lat_mat=lats, lon_mat=lons,\n cbar_label='Chl µg/l', cmap_step=2, max_tick=10, min_tick=0, resolution\n ='i', map_frame={'lat_min': 54.0, 'lat_max': 60.0, 'lon_min': 5.0,\n 'lon_max': 14.0}, p_color=True, show_fig=True, save_fig=True, fig_title\n ='OLCI Level 2 - chl_nn - 2021-02-13', fig_name='chl_test_plot.png')\n", (2385, 2738), False, 'from senplot.plotting.map import PlotSatProd\n'), ((632, 660), 'datetime.datetime', 'datetime', (['(2021)', '(2)', '(13)', '(7)', '(10)'], {}), '(2021, 2, 13, 7, 10)\n', (640, 660), False, 'from datetime import datetime\n'), ((679, 708), 'datetime.datetime', 'datetime', (['(2021)', '(2)', '(13)', '(12)', '(50)'], {}), '(2021, 2, 13, 12, 50)\n', (687, 708), False, 'from datetime import datetime\n')] |
#!/usr/bin/env python3
"""
Put a cutoff on DIAMOND prioritization
"""
__author__ = "<NAME>"
__version__ = "0.1.0"
__license__ = "MIT"
import argparse
import logging
from funcs import utils
import numpy as np
import matplotlib.pyplot as plt
def main(args):
logging.basicConfig(level=logging.INFO,
format='%(module)s:%(levelname)s:%(asctime)s:%(message)s',
handlers=[logging.FileHandler("../logs/report.log"),logging.StreamHandler()])
logging.info(args)
seeds = utils.read_gene_list(args.in_seed_path)
candidates = utils.read_gene_list(args.in_candidate_path)
net = utils.read_network(args.net_path)
ext = [gene for gene in utils.read_gene_list(args.in_extgenes_path) if gene not in seeds and gene in net]
pvals = np.zeros(len(candidates))
for i in range(len(candidates)):
pvals[i] = utils.fisher_overlap_set(candidates[:i+1], ext, list(net.nodes()))
i_min = np.argmin(pvals)
module = seeds + candidates[:i_min+1]
np.savetxt(args.out_pvals_path, pvals)
utils.write_gene_list(args.out_module_path, module)
if args.plot:
import matplotlib.pyplot as plt
plt.semilogy(pvals)
plt.title("Cutoff: {}, size: {}".format(i_min,len(module)))
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('in_seed_path', type=str, help='Filepath of input seed genes file')
parser.add_argument('in_candidate_path', type=str, help='Filepath of input prioritized candidate genes file')
parser.add_argument('in_extgenes_path', type=str, help='Filepath of genes from external source')
parser.add_argument('net_path', type=str, default=None, help='Network filepath')
parser.add_argument('out_module_path', type=str, help='Filepath of output module file')
parser.add_argument('out_pvals_path', type=str, help='Filepath of output pvalues file')
parser.add_argument('--plot', action='store_true', help='Plot the result')
args = parser.parse_args()
main(args) | [
"funcs.utils.read_gene_list",
"funcs.utils.read_network",
"argparse.ArgumentParser",
"matplotlib.pyplot.show",
"logging.FileHandler",
"funcs.utils.write_gene_list",
"numpy.savetxt",
"logging.StreamHandler",
"numpy.argmin",
"logging.info",
"matplotlib.pyplot.semilogy"
] | [((496, 514), 'logging.info', 'logging.info', (['args'], {}), '(args)\n', (508, 514), False, 'import logging\n'), ((528, 567), 'funcs.utils.read_gene_list', 'utils.read_gene_list', (['args.in_seed_path'], {}), '(args.in_seed_path)\n', (548, 567), False, 'from funcs import utils\n'), ((585, 629), 'funcs.utils.read_gene_list', 'utils.read_gene_list', (['args.in_candidate_path'], {}), '(args.in_candidate_path)\n', (605, 629), False, 'from funcs import utils\n'), ((640, 673), 'funcs.utils.read_network', 'utils.read_network', (['args.net_path'], {}), '(args.net_path)\n', (658, 673), False, 'from funcs import utils\n'), ((959, 975), 'numpy.argmin', 'np.argmin', (['pvals'], {}), '(pvals)\n', (968, 975), True, 'import numpy as np\n'), ((1023, 1061), 'numpy.savetxt', 'np.savetxt', (['args.out_pvals_path', 'pvals'], {}), '(args.out_pvals_path, pvals)\n', (1033, 1061), True, 'import numpy as np\n'), ((1066, 1117), 'funcs.utils.write_gene_list', 'utils.write_gene_list', (['args.out_module_path', 'module'], {}), '(args.out_module_path, module)\n', (1087, 1117), False, 'from funcs import utils\n'), ((1333, 1358), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1356, 1358), False, 'import argparse\n'), ((1185, 1204), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['pvals'], {}), '(pvals)\n', (1197, 1204), True, 'import matplotlib.pyplot as plt\n'), ((1281, 1291), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1289, 1291), True, 'import matplotlib.pyplot as plt\n'), ((702, 745), 'funcs.utils.read_gene_list', 'utils.read_gene_list', (['args.in_extgenes_path'], {}), '(args.in_extgenes_path)\n', (722, 745), False, 'from funcs import utils\n'), ((424, 465), 'logging.FileHandler', 'logging.FileHandler', (['"""../logs/report.log"""'], {}), "('../logs/report.log')\n", (443, 465), False, 'import logging\n'), ((466, 489), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (487, 489), False, 'import logging\n')] |
import os
import numpy as np
from envs.mujoco.envs import kuka_env
from envs.mujoco.envs.assets import kuka_asset_dir
from envs.mujoco.utils.insertion import hole_insertion_samples
from envs.mujoco.utils.kinematics import forwardKinSite, forwardKinJacobianSite
from envs.mujoco.utils.projection import rotate_cost_by_matrix
from envs.mujoco.utils.quaternion import mat2Quat, subQuat
class PegInsertionEnv(kuka_env.KukaEnv):
"""
peg in hole environment
"""
def __init__(self,
*args,
hole_id=99,
gravity=True,
obs_scaling=0.1,
sample_good_states=False,
use_ft_sensor=False,
use_rel_pos_err=False,
quadratic_cost=True,
quadratic_rot_cost=True,
regularize_pose=False,
linear_cost=False,
logarithmic_cost=False,
sparse_cost=False,
observe_joints=True,
in_peg_frame=False,
model_path="/single_peg_hole/full_peg_insertion_experiment_no_gravity_moving_hole_id=025.xml",
random_hole_file='random_reachable_holes_small_randomness.npy',
init_randomness=0.01,
**kwargs):
# Store arguments.
self.obs_scaling = obs_scaling
self.sample_good_states = sample_good_states
self.use_ft_sensor = use_ft_sensor
self.use_rel_pos_err = use_rel_pos_err
self.regularize_pose = regularize_pose
self.quadratic_cost = quadratic_cost
self.linear_cost = linear_cost
self.logarithmic_cost = logarithmic_cost
self.sparse_cost = sparse_cost
self.quadratic_rot_cost = quadratic_rot_cost
self.observe_joints = observe_joints
self.in_peg_frame = in_peg_frame
self.init_randomness = init_randomness
# Resolve the models path based on the hole_id.
gravity_string = '' if gravity else '_no_gravity'
# if hole_id >= 0:
# kwargs['model_path'] = kwargs.get('model_path',
# 'full_peg_insertion_experiment{}_moving_hole_id={:03d}.xml'.format(
# gravity_string, hole_id))
# else:
# kwargs['model_path'] = kwargs.get('model_path',
# 'full_peg_insertion_experiment_no_hole{}.xml').format(gravity_string)
kwargs['model_path'] = model_path
super(PegInsertionEnv, self).__init__(*args, **kwargs)
self.Q_pos = np.diag([100, 100, 100])
self.Q_rot = np.diag([30, 30, 30])
if self.regularize_pose:
self.Q_pose_reg = np.eye(7)
# Compute good states using inverse kinematics.
if self.random_target:
self.reachable_holes = np.load(os.path.join(kuka_asset_dir(), random_hole_file), allow_pickle=True)
self._reset_target()
else:
self.reachable_holes = np.load(os.path.join(kuka_asset_dir(), random_hole_file), allow_pickle=True)
self._reset_fix_target()
# self.good_states = hole_insertion_samples(self.sim, range=[0., 0.06])
def _get_reward(self, state, action):
'''
Compute single step reward.
'''
# compute position and rotation error
pos, rot = forwardKinSite(self.sim, ['peg_tip', 'hole_base'], recompute=False)
pos_err = pos[0] - pos[1]
# dist = np.sqrt(pos_err.dot(pos_err))
peg_quat = mat2Quat(rot[0])
hole_quat = mat2Quat(rot[1])
rot_err = subQuat(peg_quat, hole_quat)
pose_err = self.sim.data.qpos - self.good_states[0]
peg_tip_id = self.model.site_name2id('peg_tip')
jacp, jacv = forwardKinJacobianSite(self.sim, peg_tip_id, recompute=False)
peg_tip_vel = jacp.dot(self.data.qvel[:])
# quadratic cost on the error and action
# rotate the cost terms to align with the hole
Q_pos = rotate_cost_by_matrix(self.Q_pos, rot[1].T)
# Q_vel = rotate_cost_by_matrix(self.Q_vel,rot[1].T)
Q_rot = self.Q_rot
reward_info = dict()
reward = 0.0
# reward_info['quaternion_reward'] = -rot_err.dot(Q_rot).dot(rot_err)
if self.quadratic_rot_cost:
reward_info['quadratic_orientation_reward'] = -rot_err.dot(Q_rot).dot(rot_err)
reward += reward_info['quadratic_orientation_reward']
if self.quadratic_cost:
reward_info['quadratic_position_reward'] = -pos_err.dot(Q_pos).dot(pos_err)
reward += reward_info['quadratic_position_reward']
if self.linear_cost:
reward_info['linear_position_reward'] = -np.sqrt(pos_err.dot(Q_pos).dot(pos_err))
reward += reward_info['linear_position_reward']
if self.logarithmic_cost:
rew_scale = 2
eps = 10.0 ** (-rew_scale)
zero_crossing = 0.05
reward_info['logarithmic_position_reward'] = -np.log10(
np.sqrt(pos_err.dot(Q_pos).dot(pos_err)) / zero_crossing * (1 - eps) + eps)
reward += reward_info['logarithmic_position_reward']
if self.sparse_cost:
reward_info['sparse_position_reward'] = 10.0 if np.sqrt(pos_err.dot(pos_err)) < 1e-2 else 0
reward += reward_info['sparse_position_reward']
if self.regularize_pose:
reward_info['pose_regularizer_reward'] = -pose_err.dot(self.Q_pose_reg).dot(pose_err)
reward += reward_info['pose_regularizer_reward']
# reward_info['velocity_reward'] = -np.sqrt(peg_tip_vel.dot(Q_vel).dot(peg_tip_vel))
# reward += reward_info['velocity_reward']
return reward, reward_info
def _get_info(self):
info = dict()
pos, rot = forwardKinSite(self.sim, ['peg_tip', 'hole_base'], recompute=False)
pos_err = pos[0] - pos[1]
dist = np.sqrt(pos_err.dot(pos_err))
info['tip_distance'] = dist
info['success'] = float(dist < 1e-2)
info['qpos'] = self.data.qpos.copy()
info['qvel'] = self.data.qvel.copy()
info['action'] = self.last_action
return info
def _get_state_obs(self):
'''
Compute the observation at the current state.
'''
if self.observe_joints:
obs = super(PegInsertionEnv, self)._get_state_obs()
else:
obs = np.zeros(0)
# Return superclass observation stacked with the ft observation.
if not self.initialized:
ft_obs = np.zeros(6)
else:
# Compute F/T sensor data
ft_obs = self.sim.data.sensordata
# print("ft_obs :::", ft_obs)
obs = obs/self.obs_scaling
if self.use_ft_sensor:
obs = np.concatenate([obs, ft_obs])
# End effector position
pos, rot = forwardKinSite(self.sim, ['peg_tip', 'hole_base', 'hole_top'])
if self.use_rel_pos_err:
pos_obs = pos[1] - pos[0]
quat_peg_tip = mat2Quat(rot[0])
quat_hole_base = mat2Quat(rot[1])
rot_obs = subQuat(quat_hole_base, quat_peg_tip).copy()
hole_top_obs = pos[2] - pos[0]
else:
# TODO: we probably also want the EE position in the world
pos_obs = pos[1].copy()
rot_obs = mat2Quat(rot[1])
hole_top_obs = pos[2]
# End effector velocity
peg_tip_id = self.model.site_name2id('peg_tip')
jacp, jacr = forwardKinJacobianSite(self.sim, peg_tip_id, recompute=False)
peg_tip_lin_vel = jacp.dot(self.sim.data.qvel)
peg_tip_rot_vel = jacr.dot(self.sim.data.qvel)
# Transform into end effector frame
if self.in_peg_frame:
pos_obs = rot[0].T.dot(pos_obs)
hole_top_obs = rot[0].T.dot(hole_top_obs)
peg_tip_lin_vel = rot[0].T.dot(peg_tip_lin_vel)
peg_tip_rot_vel = rot[0].T.dot(peg_tip_rot_vel)
obs = np.concatenate([obs, pos_obs, rot_obs, peg_tip_lin_vel, peg_tip_rot_vel, hole_top_obs])
return obs
def _get_target_obs(self):
# Compute relative position error
pos, rot = forwardKinSite(self.sim, ['peg_tip', 'hole_base', 'hole_top'])
# if self.use_rel_pos_err:
# pos_obs = pos[1] - pos[0]
# quat_peg_tip = mat2Quat(rot[0])
# quat_hole_base = mat2Quat(rot[1])
# rot_obs = subQuat(quat_hole_base, quat_peg_tip).copy()
# hole_top_obs = pos[2] - pos[0]
# else:
# pos_obs = pos[1].copy()
# rot_obs = mat2Quat(rot[1])
# hole_top_obs = pos[2]
#
# if self.in_peg_frame:
# pos_obs = rot[0].T.dot(pos_obs)
# hole_top_obs = rot[0].T.dot(hole_top_obs)
# peg_tip_lin_vel = rot[0].T.dot(peg_tip_lin_vel)
# peg_tip_rot_vel = rot[0].T.dot(peg_tip_rot_vel)
return pos, rot
def _reset_state(self):
'''
Reset the robot state and return the observation.
'''
qvel = np.zeros(7)
if self.sample_good_states and self.np_random.uniform() < 0.5:
qpos = self.np_random.choice(self.good_states)
self.set_state(qpos, qvel)
self.sim.forward()
else:
qpos = self.good_states[-1] + self.np_random.uniform(-self.init_randomness, self.init_randomness, 7)
self.set_state(qpos, qvel)
self.sim.forward()
while self.sim.data.ncon > 0:
qpos = self.good_states[-1] + self.np_random.uniform(-self.init_randomness, self.init_randomness, 7)
self.set_state(qpos, qvel)
self.sim.forward()
def _reset_target(self):
'''
Resets the hole position
'''
hole_data = self.np_random.choice(self.reachable_holes)
self.good_states = hole_data['good_poses']
self.sim.data.set_mocap_pos('hole', hole_data['hole_pos'])
self.sim.data.set_mocap_quat('hole', hole_data['hole_quat'])
def _reset_fix_target(self):
'''
Resets the hole position
'''
hole_data = self.reachable_holes[0]
self.good_states = hole_data['good_poses']
self.sim.data.set_mocap_pos('hole', hole_data['hole_pos'])
self.sim.data.set_mocap_quat('hole', hole_data['hole_quat'])
| [
"envs.mujoco.utils.projection.rotate_cost_by_matrix",
"envs.mujoco.utils.kinematics.forwardKinSite",
"envs.mujoco.utils.quaternion.mat2Quat",
"numpy.eye",
"numpy.zeros",
"envs.mujoco.utils.quaternion.subQuat",
"envs.mujoco.envs.assets.kuka_asset_dir",
"numpy.diag",
"envs.mujoco.utils.kinematics.forw... | [((2161, 2185), 'numpy.diag', 'np.diag', (['[100, 100, 100]'], {}), '([100, 100, 100])\n', (2168, 2185), True, 'import numpy as np\n'), ((2201, 2222), 'numpy.diag', 'np.diag', (['[30, 30, 30]'], {}), '([30, 30, 30])\n', (2208, 2222), True, 'import numpy as np\n'), ((2837, 2904), 'envs.mujoco.utils.kinematics.forwardKinSite', 'forwardKinSite', (['self.sim', "['peg_tip', 'hole_base']"], {'recompute': '(False)'}), "(self.sim, ['peg_tip', 'hole_base'], recompute=False)\n", (2851, 2904), False, 'from envs.mujoco.utils.kinematics import forwardKinSite, forwardKinJacobianSite\n'), ((2987, 3003), 'envs.mujoco.utils.quaternion.mat2Quat', 'mat2Quat', (['rot[0]'], {}), '(rot[0])\n', (2995, 3003), False, 'from envs.mujoco.utils.quaternion import mat2Quat, subQuat\n'), ((3018, 3034), 'envs.mujoco.utils.quaternion.mat2Quat', 'mat2Quat', (['rot[1]'], {}), '(rot[1])\n', (3026, 3034), False, 'from envs.mujoco.utils.quaternion import mat2Quat, subQuat\n'), ((3047, 3075), 'envs.mujoco.utils.quaternion.subQuat', 'subQuat', (['peg_quat', 'hole_quat'], {}), '(peg_quat, hole_quat)\n', (3054, 3075), False, 'from envs.mujoco.utils.quaternion import mat2Quat, subQuat\n'), ((3201, 3262), 'envs.mujoco.utils.kinematics.forwardKinJacobianSite', 'forwardKinJacobianSite', (['self.sim', 'peg_tip_id'], {'recompute': '(False)'}), '(self.sim, peg_tip_id, recompute=False)\n', (3223, 3262), False, 'from envs.mujoco.utils.kinematics import forwardKinSite, forwardKinJacobianSite\n'), ((3412, 3455), 'envs.mujoco.utils.projection.rotate_cost_by_matrix', 'rotate_cost_by_matrix', (['self.Q_pos', 'rot[1].T'], {}), '(self.Q_pos, rot[1].T)\n', (3433, 3455), False, 'from envs.mujoco.utils.projection import rotate_cost_by_matrix\n'), ((5002, 5069), 'envs.mujoco.utils.kinematics.forwardKinSite', 'forwardKinSite', (['self.sim', "['peg_tip', 'hole_base']"], {'recompute': '(False)'}), "(self.sim, ['peg_tip', 'hole_base'], recompute=False)\n", (5016, 5069), False, 'from envs.mujoco.utils.kinematics import forwardKinSite, forwardKinJacobianSite\n'), ((5901, 5963), 'envs.mujoco.utils.kinematics.forwardKinSite', 'forwardKinSite', (['self.sim', "['peg_tip', 'hole_base', 'hole_top']"], {}), "(self.sim, ['peg_tip', 'hole_base', 'hole_top'])\n", (5915, 5963), False, 'from envs.mujoco.utils.kinematics import forwardKinSite, forwardKinJacobianSite\n'), ((6433, 6494), 'envs.mujoco.utils.kinematics.forwardKinJacobianSite', 'forwardKinJacobianSite', (['self.sim', 'peg_tip_id'], {'recompute': '(False)'}), '(self.sim, peg_tip_id, recompute=False)\n', (6455, 6494), False, 'from envs.mujoco.utils.kinematics import forwardKinSite, forwardKinJacobianSite\n'), ((6851, 6942), 'numpy.concatenate', 'np.concatenate', (['[obs, pos_obs, rot_obs, peg_tip_lin_vel, peg_tip_rot_vel, hole_top_obs]'], {}), '([obs, pos_obs, rot_obs, peg_tip_lin_vel, peg_tip_rot_vel,\n hole_top_obs])\n', (6865, 6942), True, 'import numpy as np\n'), ((7031, 7093), 'envs.mujoco.utils.kinematics.forwardKinSite', 'forwardKinSite', (['self.sim', "['peg_tip', 'hole_base', 'hole_top']"], {}), "(self.sim, ['peg_tip', 'hole_base', 'hole_top'])\n", (7045, 7093), False, 'from envs.mujoco.utils.kinematics import forwardKinSite, forwardKinJacobianSite\n'), ((7805, 7816), 'numpy.zeros', 'np.zeros', (['(7)'], {}), '(7)\n', (7813, 7816), True, 'import numpy as np\n'), ((2274, 2283), 'numpy.eye', 'np.eye', (['(7)'], {}), '(7)\n', (2280, 2283), True, 'import numpy as np\n'), ((5522, 5533), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (5530, 5533), True, 'import numpy as np\n'), ((5643, 5654), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (5651, 5654), True, 'import numpy as np\n'), ((5829, 5858), 'numpy.concatenate', 'np.concatenate', (['[obs, ft_obs]'], {}), '([obs, ft_obs])\n', (5843, 5858), True, 'import numpy as np\n'), ((6041, 6057), 'envs.mujoco.utils.quaternion.mat2Quat', 'mat2Quat', (['rot[0]'], {}), '(rot[0])\n', (6049, 6057), False, 'from envs.mujoco.utils.quaternion import mat2Quat, subQuat\n'), ((6078, 6094), 'envs.mujoco.utils.quaternion.mat2Quat', 'mat2Quat', (['rot[1]'], {}), '(rot[1])\n', (6086, 6094), False, 'from envs.mujoco.utils.quaternion import mat2Quat, subQuat\n'), ((6297, 6313), 'envs.mujoco.utils.quaternion.mat2Quat', 'mat2Quat', (['rot[1]'], {}), '(rot[1])\n', (6305, 6313), False, 'from envs.mujoco.utils.quaternion import mat2Quat, subQuat\n'), ((2407, 2423), 'envs.mujoco.envs.assets.kuka_asset_dir', 'kuka_asset_dir', ([], {}), '()\n', (2421, 2423), False, 'from envs.mujoco.envs.assets import kuka_asset_dir\n'), ((2542, 2558), 'envs.mujoco.envs.assets.kuka_asset_dir', 'kuka_asset_dir', ([], {}), '()\n', (2556, 2558), False, 'from envs.mujoco.envs.assets import kuka_asset_dir\n'), ((6108, 6145), 'envs.mujoco.utils.quaternion.subQuat', 'subQuat', (['quat_hole_base', 'quat_peg_tip'], {}), '(quat_hole_base, quat_peg_tip)\n', (6115, 6145), False, 'from envs.mujoco.utils.quaternion import mat2Quat, subQuat\n')] |
import numpy as np
import torch
from ignite.engine import Engine
def mixup_data(x, y, alpha=1.0, use_cuda=True):
'''Returns mixed inputs, pairs of targets, and lambda'''
if alpha > 0:
lam = np.random.beta(alpha, alpha)
else:
lam = 1
batch_size = x.size()[0]
if use_cuda:
index = torch.randperm(batch_size).cuda()
else:
index = torch.randperm(batch_size)
mixed_x = lam * x + (1 - lam) * x[index, :]
y_a, y_b = y, y[index]
return mixed_x, y_a, y_b, lam
def mixup_criterion(criterion, pred, y_a, y_b, lam):
return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)
def create_mixup_trainer(model, optimizer, loss_fn, alpha=1.0, device=None):
"""
Factory function for creating a trainer for mixup augmented models
Args:
model (`torch.nn.Module`): the model to train
optimizer (`torch.optim.Optimizer`): the optimizer to use
loss_fn (torch.nn loss function): the loss function to use
device (str, optional): device type specification (default: None).
Applies to both model and batches.
Returns:
Engine: a trainer engine with supervised update function
"""
if device:
model.to(device)
def _update(engine, batch):
from ignite.engine import _prepare_batch
model.train()
optimizer.zero_grad()
inputs, targets = _prepare_batch(batch, device=device)
inputs, targets_a, targets_b, lam = mixup_data(inputs, targets,
alpha, use_cuda=(device=="cuda"))
outputs = model(inputs)
loss = mixup_criterion(loss_fn, outputs, targets_a, targets_b, lam)
loss.backward()
optimizer.step()
return loss.item()
return Engine(_update)
def create_mixup_trainer_x2(model, optimizer, loss_fn, alpha=1.0, device=None):
"""
Factory function for creating a trainer for mixup augmented models.
It expects that the model outputs two outputs (like the Inception3 auxlogits).
Args:
model (`torch.nn.Module`): the model to train
optimizer (`torch.optim.Optimizer`): the optimizer to use
loss_fn (torch.nn loss function): the loss function to use
device (str, optional): device type specification (default: None).
Applies to both model and batches.
Returns:
Engine: a trainer engine with supervised update function
"""
if device:
model.to(device)
def _update(engine, batch):
from ignite.engine import _prepare_batch
model.train()
optimizer.zero_grad()
inputs, targets = _prepare_batch(batch, device=device)
inputs, targets_a, targets_b, lam = mixup_data(inputs, targets,
alpha, use_cuda=(device=="cuda"))
outputs1, outputs2 = model(inputs)
loss1 = mixup_criterion(loss_fn, outputs1, targets_a, targets_b, lam)
loss2 = mixup_criterion(loss_fn, outputs2, targets_a, targets_b, lam)
loss = loss1 + loss2
loss.backward()
optimizer.step()
return loss.item()
return Engine(_update)
| [
"numpy.random.beta",
"ignite.engine.Engine",
"torch.randperm",
"ignite.engine._prepare_batch"
] | [((1820, 1835), 'ignite.engine.Engine', 'Engine', (['_update'], {}), '(_update)\n', (1826, 1835), False, 'from ignite.engine import Engine\n'), ((3209, 3224), 'ignite.engine.Engine', 'Engine', (['_update'], {}), '(_update)\n', (3215, 3224), False, 'from ignite.engine import Engine\n'), ((210, 238), 'numpy.random.beta', 'np.random.beta', (['alpha', 'alpha'], {}), '(alpha, alpha)\n', (224, 238), True, 'import numpy as np\n'), ((389, 415), 'torch.randperm', 'torch.randperm', (['batch_size'], {}), '(batch_size)\n', (403, 415), False, 'import torch\n'), ((1423, 1459), 'ignite.engine._prepare_batch', '_prepare_batch', (['batch'], {'device': 'device'}), '(batch, device=device)\n', (1437, 1459), False, 'from ignite.engine import _prepare_batch\n'), ((2691, 2727), 'ignite.engine._prepare_batch', '_prepare_batch', (['batch'], {'device': 'device'}), '(batch, device=device)\n', (2705, 2727), False, 'from ignite.engine import _prepare_batch\n'), ((329, 355), 'torch.randperm', 'torch.randperm', (['batch_size'], {}), '(batch_size)\n', (343, 355), False, 'import torch\n')] |
import os
import sys
from datetime import datetime
sys.path.append("../../")
import numpy as np
import pandas as pd
from bandit.env import MultiBanditEnv
class PatientEnv(MultiBanditEnv):
def __init__(self, iterations, epochs):
super().__init__(iterations, epochs)
self._average_adhs = dict()
self.result = 0
# this is not an observable data in the real world
@property
def average_adhs(self):
return pd.DataFrame(self._average_adhs)
@property
def rewards(self):
return pd.DataFrame(self._rewards)
def _reset_per_epoch(self, agent):
agent.reset()
self._average_adhs = dict()
def run(self, agent, tailored=True):
for _ in range(self.epochs):
self._run_epoch(agent, tailored)
self.result += self.average_adhs
self.average_adhs_once = self.average_adhs
self._reset_per_epoch(agent)
self.result = self.result / self.epochs
self._average_adhs = self.result
def _run_epoch(self, agent, tailored):
if tailored:
policies = ['rl', 'random', 'tailored']
else:
policies = ['rl', 'random']
for policy in policies:
self.reset_env()
self._rewards[policy] = []
average_adh = []
average_adh.append(np.mean([
bandit.adherence for bandit in self.m_bandits.bandits]))
for iteration in range(self.iterations):
if iteration == self.iterations/2:
self.barrier_change()
for _ in range(len(self.m_bandits.bandits)):
self.m_bandits.get_bandit()
agent.get_state(self.m_bandits.bandit)
# Only contextual bandit has method get_state
action = self._get_action(agent, policy, iteration)
agent.last_action = action
reward = self.m_bandits.pull(action)[0]
self._rewards[policy].append(reward)
agent.observe(reward)
average_adh.append(np.mean(
[bandit.adherence for bandit in self.m_bandits.bandits]))
# originally bnadit._adherence
self._average_adhs[policy] = average_adh
def barrier_change(self):
pass
def _get_action(self, agent, policy, iteration):
if policy == 'rl':
return agent.choose()
elif policy == 'random':
return np.random.randint(agent.k)
elif policy == 'tailored':
if 20 <= self.m_bandits.bandit.patient_id < 40:
action = 2
else:
action = np.random.choice(
np.where(self.m_bandits.bandit.barriers == 1)[0])
return action
def plot_adherence(self, alpha):
ax = self.result.plot(title="Average adherence rate of patients",
legend=True,
yticks=[0.5, 0.6, 0.7, 0.8, 0.9])
ax.set(xlabel = "alpha = {}".format(alpha))
plt.show()
def save_result(self, alpha, epochs, filename, save_path='./'):
#self.result.to_csv(
# os.path.join(save_path,
# filename + datetime.strftime(datetime.now(),
# '%Y%m%d-%H%M.csv'))
#)
ax = self.result.plot(title="Average adherence rate of patients",
legend=True,
yticks=[0.5, 0.6, 0.7, 0.8, 0.9])
ax.set(xlabel = "alpha = {}, epochs = {}".format(alpha, epochs))
fig = ax.get_figure()
fig.savefig(
os.path.join(save_path,
filename + datetime.strftime(datetime.now(),
'plot_%Y%m%d-%H%M.png'))
)
class PatientEnv2(PatientEnv):
def __init__(self, iterations, epochs):
super().__init__(iterations, epochs)
def _get_action(self, agent, policy, iteration):
if policy == 'rl':
return agent.choose()
elif policy == 'random':
return np.random.randint(agent.k)
elif policy == 'tailored':
if iteration >= self.iterations / 2 and 20 <= self.m_bandits.bandit.patient_id < 40:
action = 1
else:
action = np.random.choice(np.where(self.m_bandits.bandit.barriers == 1)[0])
return action
def barrier_change(self):
for bandit in self.m_bandits.bandits:
if bandit.barriers[1] == 1:
bandit.barriers[1] = 0
bandit.barriers[0] = 1
| [
"sys.path.append",
"pandas.DataFrame",
"numpy.mean",
"numpy.random.randint",
"numpy.where",
"datetime.datetime.now"
] | [((51, 76), 'sys.path.append', 'sys.path.append', (['"""../../"""'], {}), "('../../')\n", (66, 76), False, 'import sys\n'), ((457, 489), 'pandas.DataFrame', 'pd.DataFrame', (['self._average_adhs'], {}), '(self._average_adhs)\n', (469, 489), True, 'import pandas as pd\n'), ((543, 570), 'pandas.DataFrame', 'pd.DataFrame', (['self._rewards'], {}), '(self._rewards)\n', (555, 570), True, 'import pandas as pd\n'), ((1355, 1419), 'numpy.mean', 'np.mean', (['[bandit.adherence for bandit in self.m_bandits.bandits]'], {}), '([bandit.adherence for bandit in self.m_bandits.bandits])\n', (1362, 1419), True, 'import numpy as np\n'), ((2535, 2561), 'numpy.random.randint', 'np.random.randint', (['agent.k'], {}), '(agent.k)\n', (2552, 2561), True, 'import numpy as np\n'), ((4213, 4239), 'numpy.random.randint', 'np.random.randint', (['agent.k'], {}), '(agent.k)\n', (4230, 4239), True, 'import numpy as np\n'), ((2136, 2200), 'numpy.mean', 'np.mean', (['[bandit.adherence for bandit in self.m_bandits.bandits]'], {}), '([bandit.adherence for bandit in self.m_bandits.bandits])\n', (2143, 2200), True, 'import numpy as np\n'), ((3819, 3833), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3831, 3833), False, 'from datetime import datetime\n'), ((2765, 2810), 'numpy.where', 'np.where', (['(self.m_bandits.bandit.barriers == 1)'], {}), '(self.m_bandits.bandit.barriers == 1)\n', (2773, 2810), True, 'import numpy as np\n'), ((4459, 4504), 'numpy.where', 'np.where', (['(self.m_bandits.bandit.barriers == 1)'], {}), '(self.m_bandits.bandit.barriers == 1)\n', (4467, 4504), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
Trains the neural network on the training data, supports resuming training
"""
import glob
import logging
import multiprocessing as mp
import os
import random
from argparse import Namespace
import numpy as np
import tensorflow as tf
from wormpose.commands import _log_parameters
from wormpose.config import default_paths
from wormpose.config.default_paths import SYNTH_TRAIN_DATASET_NAMES, REAL_EVAL_DATASET_NAMES, CONFIG_FILENAME
from wormpose.config.experiment_config import load_config, add_config_argument
from wormpose.dataset.loader import get_dataset_name
from wormpose.machine_learning import model
from wormpose.machine_learning.best_models_saver import BestModels
from wormpose.machine_learning.loss import symmetric_angle_difference
from wormpose.machine_learning.tfrecord_file import get_tfrecord_dataset
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
tf.get_logger().setLevel(logging.INFO)
def _find_tfrecord_files(experiment_dir: str):
training_data_dir = os.path.join(experiment_dir, default_paths.TRAINING_DATA_DIR)
train_tfrecord_filenames = glob.glob(os.path.join(training_data_dir, SYNTH_TRAIN_DATASET_NAMES.format(index="*")))
eval_tfrecord_filenames = glob.glob(os.path.join(training_data_dir, REAL_EVAL_DATASET_NAMES.format(index="*")))
if len(train_tfrecord_filenames) == 0 or len(eval_tfrecord_filenames) == 0:
raise FileNotFoundError("Training/Eval dataset not found.")
return train_tfrecord_filenames, eval_tfrecord_filenames
def _parse_arguments(dataset_path: str, kwargs: dict):
if kwargs.get("work_dir") is None:
kwargs["work_dir"] = default_paths.WORK_DIR
if kwargs.get("batch_size") is None:
kwargs["batch_size"] = 128
if kwargs.get("epochs") is None:
kwargs["epochs"] = 100
if kwargs.get("network_model") is None:
kwargs["network_model"] = model.build_model
if kwargs.get("optimizer") is None:
kwargs["optimizer"] = "adam"
if kwargs.get("loss") is None:
kwargs["loss"] = symmetric_angle_difference
if kwargs.get("random_seed") is None:
kwargs["random_seed"] = None
dataset_name = get_dataset_name(dataset_path)
kwargs["experiment_dir"] = os.path.join(kwargs["work_dir"], dataset_name)
if kwargs.get("config") is None:
kwargs["config"] = os.path.join(kwargs["experiment_dir"], CONFIG_FILENAME)
_log_parameters(logger.info, {"dataset_path": dataset_path})
_log_parameters(logger.info, kwargs)
return Namespace(**kwargs)
def train(dataset_path: str, **kwargs):
"""
Train a neural network with the TFrecord files generated with the script generate_training_data
Save the best model performing on evaluation data
:param dataset_path: Root path of the dataset containing videos of worm
"""
args = _parse_arguments(dataset_path, kwargs)
mp.set_start_method("spawn", force=True)
if args.random_seed is not None:
os.environ["TF_DETERMINISTIC_OPS"] = "1"
random.seed(args.random_seed)
np.random.seed(args.random_seed)
tf.random.set_seed(args.random_seed)
models_dir = os.path.join(args.experiment_dir, default_paths.MODELS_DIRS)
if not os.path.exists(models_dir):
os.mkdir(models_dir)
train_tfrecord_filenames, eval_tfrecord_filenames = _find_tfrecord_files(args.experiment_dir)
config = load_config(args.config)
if config.num_eval_samples < args.batch_size or config.num_train_samples < args.batch_size:
raise ValueError("The number of samples in the train and eval datasets must be higher than the batch size.")
train_dataset = get_tfrecord_dataset(
filenames=train_tfrecord_filenames,
image_shape=config.image_shape,
batch_size=args.batch_size,
theta_dims=config.theta_dimensions,
is_train=True,
)
validation_dataset = get_tfrecord_dataset(
filenames=eval_tfrecord_filenames,
image_shape=config.image_shape,
batch_size=args.batch_size,
theta_dims=config.theta_dimensions,
is_train=False,
)
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=os.path.join(args.experiment_dir, "tensorboard_log"), histogram_freq=1
)
best_models_callback = BestModels(models_dir)
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
best_models_callback.models_name_pattern,
save_best_only=False,
save_weights_only=False,
monitor="val_loss",
mode="min",
)
keras_model = args.network_model(input_shape=config.image_shape, out_dim=config.theta_dimensions)
last_model_path = best_models_callback.last_model_path
if os.path.isfile(last_model_path):
keras_model = tf.keras.models.load_model(last_model_path, compile=False)
keras_model.compile(optimizer=args.optimizer, loss=args.loss)
keras_model.fit(
train_dataset,
epochs=args.epochs,
steps_per_epoch=config.num_train_samples // args.batch_size,
shuffle=False,
initial_epoch=best_models_callback.epoch,
validation_data=validation_dataset,
validation_steps=config.num_eval_samples // args.batch_size,
callbacks=[tensorboard_callback, checkpoint_callback, best_models_callback],
)
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("dataset_path", type=str)
add_config_argument(parser)
parser.add_argument("--batch_size", type=int, help="Batch size for training")
parser.add_argument("--epochs", type=int, help="How many epochs to train the network")
parser.add_argument("--work_dir", type=str, help="Root folder for all experiments")
parser.add_argument("--optimizer", type=str, help="Which optimizer for training, 'adam' by default.")
parser.add_argument("--random_seed", type=int, help="Optional random seed for deterministic results")
args = parser.parse_args()
train(**vars(args))
if __name__ == "__main__":
main()
| [
"argparse.Namespace",
"tensorflow.random.set_seed",
"wormpose.commands._log_parameters",
"os.mkdir",
"numpy.random.seed",
"argparse.ArgumentParser",
"wormpose.config.experiment_config.load_config",
"multiprocessing.set_start_method",
"tensorflow.keras.callbacks.ModelCheckpoint",
"os.path.isfile",
... | [((848, 869), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (867, 869), False, 'import logging\n'), ((879, 906), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (896, 906), False, 'import logging\n'), ((1050, 1111), 'os.path.join', 'os.path.join', (['experiment_dir', 'default_paths.TRAINING_DATA_DIR'], {}), '(experiment_dir, default_paths.TRAINING_DATA_DIR)\n', (1062, 1111), False, 'import os\n'), ((2207, 2237), 'wormpose.dataset.loader.get_dataset_name', 'get_dataset_name', (['dataset_path'], {}), '(dataset_path)\n', (2223, 2237), False, 'from wormpose.dataset.loader import get_dataset_name\n'), ((2269, 2315), 'os.path.join', 'os.path.join', (["kwargs['work_dir']", 'dataset_name'], {}), "(kwargs['work_dir'], dataset_name)\n", (2281, 2315), False, 'import os\n'), ((2442, 2502), 'wormpose.commands._log_parameters', '_log_parameters', (['logger.info', "{'dataset_path': dataset_path}"], {}), "(logger.info, {'dataset_path': dataset_path})\n", (2457, 2502), False, 'from wormpose.commands import _log_parameters\n'), ((2507, 2543), 'wormpose.commands._log_parameters', '_log_parameters', (['logger.info', 'kwargs'], {}), '(logger.info, kwargs)\n', (2522, 2543), False, 'from wormpose.commands import _log_parameters\n'), ((2556, 2575), 'argparse.Namespace', 'Namespace', ([], {}), '(**kwargs)\n', (2565, 2575), False, 'from argparse import Namespace\n'), ((2920, 2960), 'multiprocessing.set_start_method', 'mp.set_start_method', (['"""spawn"""'], {'force': '(True)'}), "('spawn', force=True)\n", (2939, 2960), True, 'import multiprocessing as mp\n'), ((3190, 3250), 'os.path.join', 'os.path.join', (['args.experiment_dir', 'default_paths.MODELS_DIRS'], {}), '(args.experiment_dir, default_paths.MODELS_DIRS)\n', (3202, 3250), False, 'import os\n'), ((3432, 3456), 'wormpose.config.experiment_config.load_config', 'load_config', (['args.config'], {}), '(args.config)\n', (3443, 3456), False, 'from wormpose.config.experiment_config import load_config, add_config_argument\n'), ((3691, 3868), 'wormpose.machine_learning.tfrecord_file.get_tfrecord_dataset', 'get_tfrecord_dataset', ([], {'filenames': 'train_tfrecord_filenames', 'image_shape': 'config.image_shape', 'batch_size': 'args.batch_size', 'theta_dims': 'config.theta_dimensions', 'is_train': '(True)'}), '(filenames=train_tfrecord_filenames, image_shape=config\n .image_shape, batch_size=args.batch_size, theta_dims=config.\n theta_dimensions, is_train=True)\n', (3711, 3868), False, 'from wormpose.machine_learning.tfrecord_file import get_tfrecord_dataset\n'), ((3931, 4108), 'wormpose.machine_learning.tfrecord_file.get_tfrecord_dataset', 'get_tfrecord_dataset', ([], {'filenames': 'eval_tfrecord_filenames', 'image_shape': 'config.image_shape', 'batch_size': 'args.batch_size', 'theta_dims': 'config.theta_dimensions', 'is_train': '(False)'}), '(filenames=eval_tfrecord_filenames, image_shape=config.\n image_shape, batch_size=args.batch_size, theta_dims=config.\n theta_dimensions, is_train=False)\n', (3951, 4108), False, 'from wormpose.machine_learning.tfrecord_file import get_tfrecord_dataset\n'), ((4326, 4348), 'wormpose.machine_learning.best_models_saver.BestModels', 'BestModels', (['models_dir'], {}), '(models_dir)\n', (4336, 4348), False, 'from wormpose.machine_learning.best_models_saver import BestModels\n'), ((4375, 4539), 'tensorflow.keras.callbacks.ModelCheckpoint', 'tf.keras.callbacks.ModelCheckpoint', (['best_models_callback.models_name_pattern'], {'save_best_only': '(False)', 'save_weights_only': '(False)', 'monitor': '"""val_loss"""', 'mode': '"""min"""'}), "(best_models_callback.models_name_pattern,\n save_best_only=False, save_weights_only=False, monitor='val_loss', mode\n ='min')\n", (4409, 4539), True, 'import tensorflow as tf\n'), ((4747, 4778), 'os.path.isfile', 'os.path.isfile', (['last_model_path'], {}), '(last_model_path)\n', (4761, 4778), False, 'import os\n'), ((5394, 5419), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5417, 5419), False, 'import argparse\n'), ((5476, 5503), 'wormpose.config.experiment_config.add_config_argument', 'add_config_argument', (['parser'], {}), '(parser)\n', (5495, 5503), False, 'from wormpose.config.experiment_config import load_config, add_config_argument\n'), ((938, 953), 'tensorflow.get_logger', 'tf.get_logger', ([], {}), '()\n', (951, 953), True, 'import tensorflow as tf\n'), ((2381, 2436), 'os.path.join', 'os.path.join', (["kwargs['experiment_dir']", 'CONFIG_FILENAME'], {}), "(kwargs['experiment_dir'], CONFIG_FILENAME)\n", (2393, 2436), False, 'import os\n'), ((3056, 3085), 'random.seed', 'random.seed', (['args.random_seed'], {}), '(args.random_seed)\n', (3067, 3085), False, 'import random\n'), ((3094, 3126), 'numpy.random.seed', 'np.random.seed', (['args.random_seed'], {}), '(args.random_seed)\n', (3108, 3126), True, 'import numpy as np\n'), ((3135, 3171), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['args.random_seed'], {}), '(args.random_seed)\n', (3153, 3171), True, 'import tensorflow as tf\n'), ((3262, 3288), 'os.path.exists', 'os.path.exists', (['models_dir'], {}), '(models_dir)\n', (3276, 3288), False, 'import os\n'), ((3298, 3318), 'os.mkdir', 'os.mkdir', (['models_dir'], {}), '(models_dir)\n', (3306, 3318), False, 'import os\n'), ((4802, 4860), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['last_model_path'], {'compile': '(False)'}), '(last_model_path, compile=False)\n', (4828, 4860), True, 'import tensorflow as tf\n'), ((1185, 1228), 'wormpose.config.default_paths.SYNTH_TRAIN_DATASET_NAMES.format', 'SYNTH_TRAIN_DATASET_NAMES.format', ([], {'index': '"""*"""'}), "(index='*')\n", (1217, 1228), False, 'from wormpose.config.default_paths import SYNTH_TRAIN_DATASET_NAMES, REAL_EVAL_DATASET_NAMES, CONFIG_FILENAME\n'), ((1303, 1344), 'wormpose.config.default_paths.REAL_EVAL_DATASET_NAMES.format', 'REAL_EVAL_DATASET_NAMES.format', ([], {'index': '"""*"""'}), "(index='*')\n", (1333, 1344), False, 'from wormpose.config.default_paths import SYNTH_TRAIN_DATASET_NAMES, REAL_EVAL_DATASET_NAMES, CONFIG_FILENAME\n'), ((4222, 4274), 'os.path.join', 'os.path.join', (['args.experiment_dir', '"""tensorboard_log"""'], {}), "(args.experiment_dir, 'tensorboard_log')\n", (4234, 4274), False, 'import os\n')] |
"""
Train TC on all *good* objects in the 11,057 overlap set
(9594 objects)
"""
import numpy as np
import glob
import matplotlib.pyplot as plt
import sys
import os
import pyfits
from astropy.table import Table
from matplotlib.colors import LogNorm
from matplotlib import rc
rc('font', family='serif')
rc('text', usetex=True)
from TheCannon import lamost
from TheCannon import dataset
from TheCannon import model
def load_data():
print("Loading all data...")
a = pyfits.open("../data/label_file.fits")
tbdata = a[1].data
a.close()
apogee_teff = tbdata['apogee_teff']
apogee_logg = tbdata['apogee_logg']
apogee_mh = tbdata['apogee_mh']
apogee_alpham = tbdata['apogee_alpham']
apogee_reddening = tbdata['AK_WISE']
tr_label = np.vstack((apogee_teff,apogee_logg,apogee_mh,apogee_alpham,apogee_reddening)).T
tr_id_full = tbdata['lamost_id']
tr_id = np.array([val.strip() for val in tr_id_full])
# Load data for all 11,057 overlap objects & select training data
all_id = np.load("../data/all_ids.npz")['arr_0']
all_flux = np.load("../data/test_flux.npz")['arr_0']
all_ivar = np.load("../data/test_ivar.npz")['arr_0']
print("Selecting training data...")
good = np.array([np.where(all_id==f)[0][0] for f in tr_id])
good_flux = all_flux[good,:]
good_ivar = all_ivar[good,:]
np.savez("tr_id.npz", tr_id)
np.savez("tr_label.npz", tr_label)
np.savez("tr_flux.npz", good_flux)
np.savez("tr_ivar.npz", good_ivar)
def train():
# Load training set
wl = np.load("../data/wl.npz")['arr_0']
tr_id = np.load("tr_id.npz")['arr_0']
tr_label = np.load("tr_label.npz")['arr_0']
tr_flux = np.load("tr_flux.npz")['arr_0']
tr_ivar = np.load("tr_ivar.npz")['arr_0']
ds = dataset.Dataset(
wl, tr_id, tr_flux, tr_ivar, tr_label, tr_id, tr_flux, tr_ivar)
ds.set_label_names(['T_{eff}', '\log g', '[M/H]', '[\\alpha/Fe]', 'AKWISE'])
ds.diagnostics_SNR()
ds.diagnostics_ref_labels()
np.savez("./tr_snr.npz", ds.tr_SNR)
m = model.CannonModel(2)
m.fit(ds)
np.savez("./coeffs.npz", m.coeffs)
np.savez("./scatters.npz", m.scatters)
np.savez("./chisqs.npz", m.chisqs)
np.savez("./pivots.npz", m.pivots)
m.diagnostics_leading_coeffs(ds)
m.diagnostics_leading_coeffs_triangle(ds)
m.diagnostics_plot_chisq(ds)
if __name__=="__main__":
#load_data()
train()
| [
"matplotlib.rc",
"numpy.load",
"pyfits.open",
"numpy.where",
"numpy.vstack",
"numpy.savez",
"TheCannon.model.CannonModel",
"TheCannon.dataset.Dataset"
] | [((276, 302), 'matplotlib.rc', 'rc', (['"""font"""'], {'family': '"""serif"""'}), "('font', family='serif')\n", (278, 302), False, 'from matplotlib import rc\n'), ((303, 326), 'matplotlib.rc', 'rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (305, 326), False, 'from matplotlib import rc\n'), ((474, 512), 'pyfits.open', 'pyfits.open', (['"""../data/label_file.fits"""'], {}), "('../data/label_file.fits')\n", (485, 512), False, 'import pyfits\n'), ((1358, 1386), 'numpy.savez', 'np.savez', (['"""tr_id.npz"""', 'tr_id'], {}), "('tr_id.npz', tr_id)\n", (1366, 1386), True, 'import numpy as np\n'), ((1391, 1425), 'numpy.savez', 'np.savez', (['"""tr_label.npz"""', 'tr_label'], {}), "('tr_label.npz', tr_label)\n", (1399, 1425), True, 'import numpy as np\n'), ((1430, 1464), 'numpy.savez', 'np.savez', (['"""tr_flux.npz"""', 'good_flux'], {}), "('tr_flux.npz', good_flux)\n", (1438, 1464), True, 'import numpy as np\n'), ((1469, 1503), 'numpy.savez', 'np.savez', (['"""tr_ivar.npz"""', 'good_ivar'], {}), "('tr_ivar.npz', good_ivar)\n", (1477, 1503), True, 'import numpy as np\n'), ((1779, 1858), 'TheCannon.dataset.Dataset', 'dataset.Dataset', (['wl', 'tr_id', 'tr_flux', 'tr_ivar', 'tr_label', 'tr_id', 'tr_flux', 'tr_ivar'], {}), '(wl, tr_id, tr_flux, tr_ivar, tr_label, tr_id, tr_flux, tr_ivar)\n', (1794, 1858), False, 'from TheCannon import dataset\n'), ((2014, 2049), 'numpy.savez', 'np.savez', (['"""./tr_snr.npz"""', 'ds.tr_SNR'], {}), "('./tr_snr.npz', ds.tr_SNR)\n", (2022, 2049), True, 'import numpy as np\n'), ((2059, 2079), 'TheCannon.model.CannonModel', 'model.CannonModel', (['(2)'], {}), '(2)\n', (2076, 2079), False, 'from TheCannon import model\n'), ((2098, 2132), 'numpy.savez', 'np.savez', (['"""./coeffs.npz"""', 'm.coeffs'], {}), "('./coeffs.npz', m.coeffs)\n", (2106, 2132), True, 'import numpy as np\n'), ((2137, 2175), 'numpy.savez', 'np.savez', (['"""./scatters.npz"""', 'm.scatters'], {}), "('./scatters.npz', m.scatters)\n", (2145, 2175), True, 'import numpy as np\n'), ((2180, 2214), 'numpy.savez', 'np.savez', (['"""./chisqs.npz"""', 'm.chisqs'], {}), "('./chisqs.npz', m.chisqs)\n", (2188, 2214), True, 'import numpy as np\n'), ((2219, 2253), 'numpy.savez', 'np.savez', (['"""./pivots.npz"""', 'm.pivots'], {}), "('./pivots.npz', m.pivots)\n", (2227, 2253), True, 'import numpy as np\n'), ((767, 852), 'numpy.vstack', 'np.vstack', (['(apogee_teff, apogee_logg, apogee_mh, apogee_alpham, apogee_reddening)'], {}), '((apogee_teff, apogee_logg, apogee_mh, apogee_alpham,\n apogee_reddening))\n', (776, 852), True, 'import numpy as np\n'), ((1026, 1056), 'numpy.load', 'np.load', (['"""../data/all_ids.npz"""'], {}), "('../data/all_ids.npz')\n", (1033, 1056), True, 'import numpy as np\n'), ((1081, 1113), 'numpy.load', 'np.load', (['"""../data/test_flux.npz"""'], {}), "('../data/test_flux.npz')\n", (1088, 1113), True, 'import numpy as np\n'), ((1138, 1170), 'numpy.load', 'np.load', (['"""../data/test_ivar.npz"""'], {}), "('../data/test_ivar.npz')\n", (1145, 1170), True, 'import numpy as np\n'), ((1552, 1577), 'numpy.load', 'np.load', (['"""../data/wl.npz"""'], {}), "('../data/wl.npz')\n", (1559, 1577), True, 'import numpy as np\n'), ((1599, 1619), 'numpy.load', 'np.load', (['"""tr_id.npz"""'], {}), "('tr_id.npz')\n", (1606, 1619), True, 'import numpy as np\n'), ((1644, 1667), 'numpy.load', 'np.load', (['"""tr_label.npz"""'], {}), "('tr_label.npz')\n", (1651, 1667), True, 'import numpy as np\n'), ((1691, 1713), 'numpy.load', 'np.load', (['"""tr_flux.npz"""'], {}), "('tr_flux.npz')\n", (1698, 1713), True, 'import numpy as np\n'), ((1737, 1759), 'numpy.load', 'np.load', (['"""tr_ivar.npz"""'], {}), "('tr_ivar.npz')\n", (1744, 1759), True, 'import numpy as np\n'), ((1242, 1263), 'numpy.where', 'np.where', (['(all_id == f)'], {}), '(all_id == f)\n', (1250, 1263), True, 'import numpy as np\n')] |
#from scripts.category_tree import CategoryTree
import multiprocessing
from keras.activations import softmax
import keras.backend as K
from keras import metrics
from .per_group_func import PerGroupFunc
import numpy as np
# original algorithm/code is from this URL
# https://github.com/pjreddie/darknet/blob/1e729804f61c8627eb257fba8b83f74e04945db7/src/tree.c
class CategoryTree:
def __init__(self, tree, leaf=None, n_jobs=multiprocessing.cpu_count()):
self.tree = tree
self.leaf = leaf
self.n_jobs=n_jobs
# constract parent/child links and group segments
self.labels, self.parents, self.is_leaf = self.serialize(tree,leaf=leaf)
self.nlabels = len(self.labels)
# alias
self.nclasses = self.nlabels
self.ncats = self.nlabels
self.group_segments, group_nums, _ = self.rlencode(self.parents)
# append a large index to access by zip(group_segments[:-1],group_segments[1:])
self.group_segments = np.append(self.group_segments, [self.nlabels])
self.child_group = [None] * self.nlabels
for idx in range(self.nlabels)[::-1]:
p_idx = self.parents[idx]
if p_idx <0:
continue
gidx = np.where([idx in range(*r) for r in self.group_iter()])[0][0]
self.child_group[p_idx]=gidx
# encoderは子クラスへのインデックスを優先する.
# 例えば,下記のように「ほうれん草」の下に「紫ほうれん草」しかない場合,
# 紫ほうれん草の背反クラスとして「ほうれん草」が自動的に加えられている(see: serialize_one_depth)
# ほうれん草:{紫ほうれん草:None}
# ↓
# ほうれん草:{紫ほうれん草:None, ほうれん草:None}
# これを ほうれん草A:{..., ほうれん草B:None}とすると,encoderにはほうれん草Bのidxが入る.
# これはself.labelsの順序が「親クラスほど前にある」ことから保障される
self.encoder = {l:idx for idx, l in enumerate(self.labels)}
self.depth = self.get_depth(tree, leaf=leaf)
@staticmethod
def get_depth(tree, leaf=None):
depths = [CategoryTree.get_depth(v) for k,v in tree.items() if v is not None and v != leaf]
if len(depths)==0:
return 1
else:
return max(depths)+1
def get_sorting_code(self, categorical_label, key=None):
if key is None:
key = [i for i in range(self.n_labels)]
idx = np.where(categorical_label==1)[0]
code = [key[i] for i in idx] + [-1] * (self.depth - len(idx))
assert(len(code)==self.depth)
return code
@staticmethod
def set_label(cat, encoder, lut4conv, parents):
if isinstance(cat, list):
if len(cat)==0:
return np.zeros((len(parents),))
if len(cat)==1:
# treat cat as a single value
cat = cat[0]
else:
# apply set_label for each element.
return np.array([CategoryTree.set_label(c, encoder, lut4conv, parents) for c in cat])
# if cat is not a list
n_hot_vector = np.zeros((len(parents),))
if isinstance(cat,str):
cat = encoder[cat]
if lut4conv is not None:
cat = lut4conv[cat]
n_hot_vector[cat] = 1
parent = parents[cat]
while parent >=0:
n_hot_vector[parent] = 1
parent = parents[parent]
return n_hot_vector
@staticmethod
def set_label_wrap(args):
return CategoryTree.set_label(*args)
def to_hierarchical_categorical(self, ys, lut4conv=None):
with multiprocessing.Pool(self.n_jobs) as p:
n_hot_vectors = p.map(CategoryTree.set_label_wrap, [(y, self.encoder, lut4conv, self.parents) for y in ys])
return np.array(n_hot_vectors)
def generate_func(self, primitive_func, func_name, per_group_func=None):
def _func(y_true,y_pred):
weights = K.sum(y_true,axis=-1,keepdims=False)
val = primitive_func(y_true,y_pred)
#sum_weights = K.sum(weights,axis=-1,keepdims=False)
#return K.sum(weights * primitive_func(y_true,y_pred), axis=-1,keepdims=False)/sum_weights
return weights * val, weights
if per_group_func is None:
per_group_func = PerGroupFunc()
else:
per_group_func = per_group_func
for (s,e) in self.group_iter():
per_group_func.register(_func,(s,e))
func_name = per_group_func.call
return func_name
def generate_loss_func(self, primitive_func=K.categorical_crossentropy, func_name='hierarchical_categorical_loss', **kwargs):
return self.generate_func(primitive_func, func_name,**kwargs)
def generate_acc_func(self, primitive_func=metrics.categorical_accuracy, func_name='hierarchical_categorical_acc', **kwargs):
return self.generate_func(primitive_func, func_name,**kwargs)
def hierarchical_softmax(self, x, axis=-1):
bufs = [softmax(x[:,s:e],axis) for (s,e) in self.group_iter()]
return K.concatenate(bufs, axis=-1)
def group_iter(self):
return zip(self.group_segments[:-1], self.group_segments[1:])
def print_debug(self):
print("labels: ",self.labels)
print("parents: ",self.parents)
print("is_leaf: ",self.is_leaf)
print("group_segments: ", self.group_segments)
print("child_group: ", self.child_group)
print("encoder: ",self.encoder)
def decode(self, cat):
return self.labels[cat]
def encode(self, label):
return self.encoder[label]
def get_hierarchy_probability(self, predictions, cat, prob=1.0):
assert(len(predictions)==self.nlabels)
parent = self.parents[cat]
prob = predictions[cat] * prob
if parent<0:
return prob
return self.get_hierarchy_probability(predictions, parent, prob)
def hierarchy_predictions(self, _predictions, only_leaves=False):
assert(len(_predictions)==self.nlabels)
predictions = _predictions.copy()
for idx, p in enumerate(predictions):
parent = self.parents[idx]
if parent<0:
continue
p_parent = predictions[parent]
predictions[idx] = p * p_parent
if only_leaves:
predictions[not self.is_leaf] = 0.0
return predictions
# trace the maximum prediction node in brothers at each depth level.
def hierarchy_top_prediction(self, predictions, thresh):
assert(len(predictions)==self.nlabels)
prob = 1.0
gidx = 0
max_idx = -1
while gidx != None:
g_range = self.group_segments[gidx:gidx+2]
g_max_idx = np.argmax(predictions[g_range[0]:g_range[1]]) +g_range[0]
if(prob*predictions[g_max_idx] < thresh):
# no children satisfies prob > thresh
if max_idx == -1:
return None, 0.0
return max_idx, prob
prob *= predictions[g_max_idx]
max_idx = g_max_idx
gidx = self.child_group[max_idx]
return max_idx, prob
@staticmethod
def rlencode(x, dropna=False):
"""
Run length encoding.
Based on http://stackoverflow.com/a/32681075, which is based on the rle
function from R.
Parameters
----------
x : 1D array_like
Input array to encode
dropna: bool, optional
Drop all runs of NaNs.
Returns
-------
start positions, run lengths, run values
"""
where = np.flatnonzero
x = np.asarray(x)
n = len(x)
if n == 0:
return (np.array([], dtype=int),
np.array([], dtype=int),
np.array([], dtype=x.dtype))
starts = np.r_[0, where(~np.isclose(x[1:], x[:-1], equal_nan=True)) + 1]
lengths = np.diff(np.r_[starts, n])
values = x[starts]
if dropna:
mask = ~np.isnan(values)
starts, lengths, values = starts[mask], lengths[mask], values[mask]
return starts, lengths, values
@staticmethod
def serialize_one_depth(tree, parent_label,parent_idx,leaf):
labels = list(tree.keys())
if parent_idx >=0 and len(labels)==1:
# this key has no brother.
# add parent as the non-'labels[0]' class)
labels.append(parent_label)
labels.sort()
n_labels = len(labels)
parents = [parent_idx] * n_labels
is_leaf = [key not in tree.keys() or tree[key]==None for key in labels]
subtrees = [None] * n_labels
for idx, key in enumerate(labels):
if is_leaf[idx]:
continue
subtrees[idx] = tree[key]
return labels, parents, is_leaf, subtrees
@staticmethod
def serialize(tree, parent_label=None, leaf=None):
# return self.labels, self.parents, self.is_leaf
arg_stacks = [(tree, parent_label,-1)]
labels = []
parents = []
is_leaf = []
while len(arg_stacks)>0:
l=[]
p=[]
i=[]
subtrees = []
for args in arg_stacks:
_l,_p,_i,_s = CategoryTree.serialize_one_depth(*args,leaf)
l += _l
p += _p
i += _i
subtrees += _s
arg_stacks = []
for idx, child in enumerate(l):
if i[idx]:
continue
arg_stacks.append((subtrees[idx], child, len(labels)+idx))
labels += l
parents += p
is_leaf += i
return np.array(labels),np.array(parents),np.array(is_leaf)
def prune_rare_cat(self, frequency, pruning_threshold=0.00, background_cats =['END']):
if pruning_threshold < 1.0:
thresh = int(pruning_threshold * sum(frequency.values()) / len(frequeny) )
else:
thresh = pruning_threshold
freq_hierarchical = np.zeros(len(self.parents))
for l,f in frequency.items():
p=self.encoder[l]
while(p>=0):
freq_hierarchical[p]+=f
p = self.parents[p]
def delete_rare(subtree):
#
if not isinstance(subtree,dict):
return subtree
del_list = []
for key in subtree.keys():
f = freq_hierarchical[self.encoder[key]]
if f <= thresh and key not in background_cats:
del_list.append(key)
continue
if self.is_leaf[self.encoder[key]]:
continue
subtree[key] = delete_rare(subtree[key])
for key in del_list:
del subtree[key]
# subtreeが1つの要素しかなく,かつ,その要素がさらに木構造を持つならば,
# その階層は飛ばす
if len(subtree.keys())==1:
key,val = next(iter(subtree.items()))
if not self.is_leaf[self.encoder[key]]:
subtree = delete_rare(val)
if isinstance(subtree,dict):
if len(subtree)==0:
return None
return subtree
tr = delete_rare(self.tree)
return CategoryTree(tr, self.leaf,n_jobs=self.n_jobs)
def cats(self):
return self.encoder.keys()
| [
"keras.backend.concatenate",
"numpy.argmax",
"numpy.asarray",
"keras.backend.sum",
"numpy.isnan",
"numpy.append",
"numpy.isclose",
"numpy.diff",
"numpy.array",
"numpy.where",
"multiprocessing.Pool",
"keras.activations.softmax",
"multiprocessing.cpu_count"
] | [((429, 456), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (454, 456), False, 'import multiprocessing\n'), ((1011, 1057), 'numpy.append', 'np.append', (['self.group_segments', '[self.nlabels]'], {}), '(self.group_segments, [self.nlabels])\n', (1020, 1057), True, 'import numpy as np\n'), ((3666, 3689), 'numpy.array', 'np.array', (['n_hot_vectors'], {}), '(n_hot_vectors)\n', (3674, 3689), True, 'import numpy as np\n'), ((4993, 5021), 'keras.backend.concatenate', 'K.concatenate', (['bufs'], {'axis': '(-1)'}), '(bufs, axis=-1)\n', (5006, 5021), True, 'import keras.backend as K\n'), ((7709, 7722), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (7719, 7722), True, 'import numpy as np\n'), ((8002, 8027), 'numpy.diff', 'np.diff', (['np.r_[starts, n]'], {}), '(np.r_[starts, n])\n', (8009, 8027), True, 'import numpy as np\n'), ((2273, 2305), 'numpy.where', 'np.where', (['(categorical_label == 1)'], {}), '(categorical_label == 1)\n', (2281, 2305), True, 'import numpy as np\n'), ((3491, 3524), 'multiprocessing.Pool', 'multiprocessing.Pool', (['self.n_jobs'], {}), '(self.n_jobs)\n', (3511, 3524), False, 'import multiprocessing\n'), ((3849, 3887), 'keras.backend.sum', 'K.sum', (['y_true'], {'axis': '(-1)', 'keepdims': '(False)'}), '(y_true, axis=-1, keepdims=False)\n', (3854, 3887), True, 'import keras.backend as K\n'), ((4923, 4947), 'keras.activations.softmax', 'softmax', (['x[:, s:e]', 'axis'], {}), '(x[:, s:e], axis)\n', (4930, 4947), False, 'from keras.activations import softmax\n'), ((9882, 9898), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (9890, 9898), True, 'import numpy as np\n'), ((9899, 9916), 'numpy.array', 'np.array', (['parents'], {}), '(parents)\n', (9907, 9916), True, 'import numpy as np\n'), ((9917, 9934), 'numpy.array', 'np.array', (['is_leaf'], {}), '(is_leaf)\n', (9925, 9934), True, 'import numpy as np\n'), ((6726, 6771), 'numpy.argmax', 'np.argmax', (['predictions[g_range[0]:g_range[1]]'], {}), '(predictions[g_range[0]:g_range[1]])\n', (6735, 6771), True, 'import numpy as np\n'), ((7781, 7804), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (7789, 7804), True, 'import numpy as np\n'), ((7827, 7850), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (7835, 7850), True, 'import numpy as np\n'), ((7873, 7900), 'numpy.array', 'np.array', (['[]'], {'dtype': 'x.dtype'}), '([], dtype=x.dtype)\n', (7881, 7900), True, 'import numpy as np\n'), ((8095, 8111), 'numpy.isnan', 'np.isnan', (['values'], {}), '(values)\n', (8103, 8111), True, 'import numpy as np\n'), ((7936, 7977), 'numpy.isclose', 'np.isclose', (['x[1:]', 'x[:-1]'], {'equal_nan': '(True)'}), '(x[1:], x[:-1], equal_nan=True)\n', (7946, 7977), True, 'import numpy as np\n')] |
from builtins import range
import numpy as np
from random import shuffle
from past.builtins import xrange
def softmax_loss_naive(W, X, y, reg):
"""
Softmax loss function, naive implementation (with loops)
Inputs have dimension D, there are C classes, and we operate on minibatches
of N examples.
Inputs:
- W: A numpy array of shape (D, C) containing weights.
- X: A numpy array of shape (N, D) containing a minibatch of data.
- y: A numpy array of shape (N,) containing training labels; y[i] = c means
that X[i] has label c, where 0 <= c < C.
- reg: (float) regularization strength
Returns a tuple of:
- loss as single float
- gradient with respect to weights W; an array of same shape as W
"""
# Initialize the loss and gradient to zero.
loss = 0.0
dW = np.zeros_like(W)
#############################################################################
# TODO: Compute the softmax loss and its gradient using explicit loops. #
# Store the loss in loss and the gradient in dW. If you are not careful #
# here, it is easy to run into numeric instability. Don't forget the #
# regularization! #
#############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
num_train = X.shape[0]
num_class = W.shape[1]
dim = X.shape[1]
for i in range(num_train):
score = X[i].dot(W)
# max_score = np.max(score)
# score -= max_score
score = np.exp(score)
sum_score = np.sum(score)
for j in range(num_class):
prob = score[j] / sum_score
if j == y[i]:
loss += -np.log(prob)
dW[:, j] += (prob - 1) * X[i].T
else:
dW[:, j] += prob * X[i].T
loss /= num_train
dW /= num_train
loss += reg * np.sum(W * W)
dW += 2 * reg * W
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return loss, dW
def softmax_loss_vectorized(W, X, y, reg):
"""
Softmax loss function, vectorized version.
Inputs and outputs are the same as softmax_loss_naive.
"""
# Initialize the loss and gradient to zero.
loss = 0.0
dW = np.zeros_like(W)
#############################################################################
# TODO: Compute the softmax loss and its gradient using no explicit loops. #
# Store the loss in loss and the gradient in dW. If you are not careful #
# here, it is easy to run into numeric instability. Don't forget the #
# regularization! #
#############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
num_train = X.shape[0]
correct_class_index = [np.reshape(np.arange(num_train), (num_train, 1)),\
np.reshape(y, (num_train, 1))]
score = X.dot(W)
score -= np.max(score, axis=1).reshape(-1, 1)
score = np.exp(score)
prob = score / np.sum(score, axis=1).reshape(-1, 1)
loss = -np.sum(np.log(prob[correct_class_index])) / num_train
loss += reg * np.sum(W * W)
prob[correct_class_index] -= 1
dW = X.T.dot(prob) / num_train
dW += 2 * reg * W
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return loss, dW
| [
"numpy.zeros_like",
"numpy.sum",
"numpy.log",
"numpy.max",
"numpy.arange",
"numpy.exp",
"numpy.reshape",
"builtins.range"
] | [((829, 845), 'numpy.zeros_like', 'np.zeros_like', (['W'], {}), '(W)\n', (842, 845), True, 'import numpy as np\n'), ((1496, 1512), 'builtins.range', 'range', (['num_train'], {}), '(num_train)\n', (1501, 1512), False, 'from builtins import range\n'), ((2313, 2329), 'numpy.zeros_like', 'np.zeros_like', (['W'], {}), '(W)\n', (2326, 2329), True, 'import numpy as np\n'), ((3139, 3152), 'numpy.exp', 'np.exp', (['score'], {}), '(score)\n', (3145, 3152), True, 'import numpy as np\n'), ((1615, 1628), 'numpy.exp', 'np.exp', (['score'], {}), '(score)\n', (1621, 1628), True, 'import numpy as np\n'), ((1647, 1660), 'numpy.sum', 'np.sum', (['score'], {}), '(score)\n', (1653, 1660), True, 'import numpy as np\n'), ((1677, 1693), 'builtins.range', 'range', (['num_class'], {}), '(num_class)\n', (1682, 1693), False, 'from builtins import range\n'), ((1949, 1962), 'numpy.sum', 'np.sum', (['(W * W)'], {}), '(W * W)\n', (1955, 1962), True, 'import numpy as np\n'), ((3024, 3053), 'numpy.reshape', 'np.reshape', (['y', '(num_train, 1)'], {}), '(y, (num_train, 1))\n', (3034, 3053), True, 'import numpy as np\n'), ((3294, 3307), 'numpy.sum', 'np.sum', (['(W * W)'], {}), '(W * W)\n', (3300, 3307), True, 'import numpy as np\n'), ((2957, 2977), 'numpy.arange', 'np.arange', (['num_train'], {}), '(num_train)\n', (2966, 2977), True, 'import numpy as np\n'), ((3090, 3111), 'numpy.max', 'np.max', (['score'], {'axis': '(1)'}), '(score, axis=1)\n', (3096, 3111), True, 'import numpy as np\n'), ((3172, 3193), 'numpy.sum', 'np.sum', (['score'], {'axis': '(1)'}), '(score, axis=1)\n', (3178, 3193), True, 'import numpy as np\n'), ((3229, 3262), 'numpy.log', 'np.log', (['prob[correct_class_index]'], {}), '(prob[correct_class_index])\n', (3235, 3262), True, 'import numpy as np\n'), ((1772, 1784), 'numpy.log', 'np.log', (['prob'], {}), '(prob)\n', (1778, 1784), True, 'import numpy as np\n')] |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Testing BasicTokenizer op in DE
"""
import numpy as np
import mindspore.dataset as ds
from mindspore import log as logger
import mindspore.dataset.text as nlp
BASIC_TOKENIZER_FILE = "../data/dataset/testTokenizerData/basic_tokenizer.txt"
test_paras = [
dict(
first=1,
last=6,
expected_tokens=
[['Welcome', 'to', 'Beijing', '北', '京', '欢', '迎', '您'],
['長', '風', '破', '浪', '會', '有', '時', ',', '直', '掛', '雲', '帆', '濟', '滄', '海'],
['😀', '嘿', '嘿', '😃', '哈', '哈', '😄', '大', '笑', '😁', '嘻', '嘻'],
['明', '朝', '(', '1368', '—', '1644', '年', ')', '和', '清', '朝',
'(', '1644', '—', '1911', '年', ')', ',', '是', '中', '国', '封',
'建', '王', '朝', '史', '上', '最', '后', '两', '个', '朝', '代'],
['明', '代', '(', '1368', '-', '1644', ')', 'と', '清', '代',
'(', '1644', '-', '1911', ')', 'は', '、', '中', '国', 'の', '封',
'建', '王', '朝', 'の', '歴', '史', 'における', '最', '後', 'の2つの', '王', '朝', 'でした'],
['명나라', '(', '1368', '-', '1644', ')', '와', '청나라', '(', '1644', '-', '1911', ')', '는',
'중국', '봉건', '왕조의', '역사에서', '마지막', '두', '왕조였다']]
),
dict(
first=7,
last=7,
expected_tokens=[['this', 'is', 'a', 'funky', 'string']],
lower_case=True
),
]
def check_basic_tokenizer(first, last, expected_tokens, lower_case=False, keep_whitespace=False,
normalization_form=nlp.utils.NormalizeForm.NONE, preserve_unused_token=False):
dataset = ds.TextFileDataset(BASIC_TOKENIZER_FILE, shuffle=False)
if first > 1:
dataset = dataset.skip(first - 1)
if last >= first:
dataset = dataset.take(last - first + 1)
basic_tokenizer = nlp.BasicTokenizer(lower_case=lower_case,
keep_whitespace=keep_whitespace,
normalization_form=normalization_form,
preserve_unused_token=preserve_unused_token)
dataset = dataset.map(operations=basic_tokenizer)
count = 0
for i in dataset.create_dict_iterator():
text = nlp.to_str(i['text'])
logger.info("Out:", text)
logger.info("Exp:", expected_tokens[count])
np.testing.assert_array_equal(text, expected_tokens[count])
count = count + 1
def test_basic_tokenizer():
"""
Test BasicTokenizer
"""
for paras in test_paras:
check_basic_tokenizer(**paras)
if __name__ == '__main__':
test_basic_tokenizer()
| [
"mindspore.dataset.text.BasicTokenizer",
"numpy.testing.assert_array_equal",
"mindspore.dataset.text.to_str",
"mindspore.dataset.TextFileDataset",
"mindspore.log.info"
] | [((2167, 2222), 'mindspore.dataset.TextFileDataset', 'ds.TextFileDataset', (['BASIC_TOKENIZER_FILE'], {'shuffle': '(False)'}), '(BASIC_TOKENIZER_FILE, shuffle=False)\n', (2185, 2222), True, 'import mindspore.dataset as ds\n'), ((2377, 2544), 'mindspore.dataset.text.BasicTokenizer', 'nlp.BasicTokenizer', ([], {'lower_case': 'lower_case', 'keep_whitespace': 'keep_whitespace', 'normalization_form': 'normalization_form', 'preserve_unused_token': 'preserve_unused_token'}), '(lower_case=lower_case, keep_whitespace=keep_whitespace,\n normalization_form=normalization_form, preserve_unused_token=\n preserve_unused_token)\n', (2395, 2544), True, 'import mindspore.dataset.text as nlp\n'), ((2788, 2809), 'mindspore.dataset.text.to_str', 'nlp.to_str', (["i['text']"], {}), "(i['text'])\n", (2798, 2809), True, 'import mindspore.dataset.text as nlp\n'), ((2818, 2843), 'mindspore.log.info', 'logger.info', (['"""Out:"""', 'text'], {}), "('Out:', text)\n", (2829, 2843), True, 'from mindspore import log as logger\n'), ((2852, 2895), 'mindspore.log.info', 'logger.info', (['"""Exp:"""', 'expected_tokens[count]'], {}), "('Exp:', expected_tokens[count])\n", (2863, 2895), True, 'from mindspore import log as logger\n'), ((2904, 2963), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['text', 'expected_tokens[count]'], {}), '(text, expected_tokens[count])\n', (2933, 2963), True, 'import numpy as np\n')] |
import os
from termcolor import colored
from datetime import datetime
import socket
import numpy as np
from tqdm import tqdm
import shutil, errno
from pathlib import Path
import statistics
import torch
import logging
import argparse
def basic_logger(name='basic_logger'):
logger = logging.getLogger(name)
return logger
# Training
def red(text):
return colored(text, 'yellow', 'on_red')
def print_red(text):
print(red(text))
# Data
def white_blue(text):
coloredd = colored(text, 'white', 'on_blue')
return coloredd
def white_magenta(text):
coloredd = colored(text, 'white', 'on_magenta')
return coloredd
def blue_text(text):
coloredd = colored(text, 'blue')
return coloredd
def print_white_blue(text):
print(white_blue(text))
def green(text):
coloredd = colored(text, 'blue', 'on_green')
return coloredd
def print_green(text):
print(green(text))
def yellow(text):
coloredd = colored(text, 'blue', 'on_yellow')
return coloredd
# Model
def magenta(text):
coloredd = colored(text, 'white', 'on_magenta')
return coloredd
def print_magenta(text):
print(magenta(text))
def copy_file(origin_dest):
os.system('cp %s %s/'%(origin_dest[0], origin_dest[1]))
def copy_py_files(root_path, dest_path, exclude_paths=[]):
from multiprocessing import Pool
origin_path_list = []
for root, dirs, files in os.walk(root_path):
for file in files:
if ((file.endswith(".py") or file.endswith(".yaml"))) and (not file.endswith(".pyc")):
origin_path = os.path.join(root, file)
# print(os.path.join(root, file))
exclude_flag = False
for exclude_path in exclude_paths:
if exclude_path != '' and exclude_path in origin_path:
exclude_flag = True
break
else:
origin_path_list.append([origin_path, dest_path])
# os.system('cp %s %s/'%(origin_path, dest_path))
# print('Copied ' + origin_path)
with Pool(processes=12, initializer=np.random.seed(123456)) as pool:
for _ in list(tqdm(pool.imap_unordered(copy_file, origin_path_list), total=len(origin_path_list))):
pass
def copyanything(src, dst):
try:
shutil.copytree(src, dst)
except OSError as exc: # python >2.5
if exc.errno == errno.ENOTDIR:
shutil.copy(src, dst)
else: raise
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('True', 'yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('False', 'no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expectedl; got: %s'%v)
def get_datetime():
# today = date.today()
now = datetime.now()
d1 = now.strftime("%Y%m%d-%H%M%S")
return d1
def tryPort(port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = False
try:
sock.bind(("0.0.0.0", port))
result = True
except:
print("Port is in use")
sock.close()
return result
def nextPort(port):
assert isinstance(port, int), 'port number should be int! e.g. 6006'
while not tryPort(port):
port += 1
return port
def checkEqual1(iterator):
"""Check if elements in a list are all equal"""
iterator = iter(iterator)
try:
first = next(iterator)
except StopIteration:
return True
return all(first == rest for rest in iterator)
def get_key(dict_in, key_in, if_bool=False, default_bool=False, default=None):
if key_in in dict_in:
return dict_in[key_in]
else:
if if_bool:
return default_bool
else:
return default
def randomize():
np.random.seed()
def dict_get_with_key_list(x_dict, key_list):
return_list = []
for key in key_list:
assert key in x_dict, '[dict_get_with_key_list] Key %s not found in dict!'%key
return_list.append(x_dict[key])
if len(return_list) == 1:
return return_list[0]
return return_list
def flatten_list(list_of_lists):
# flatten = lambda t: [item for sublist in list_of_lists for item in sublist]
flat_list = [item for sublist in list_of_lists for item in sublist]
return flat_list
from datetime import datetime
def get_datetime():
# today = date.today()
now = datetime.now()
d1 = now.strftime("%Y%m%d-%H%M%S-")
return d1
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, meter_name='N/A'):
self.meter_name = meter_name
self.val = None
self.avg = None
self.median = None
self.sum = None
self.count = None
self.all_list = []
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.median = 0
self.sum = 0
self.count = 0
self.all_list = []
def update(self, val, n=1):
# if self.meter_name == 'inv_depth_median_error_meter':
# print('>>>>>>> val: ', val)
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
self.all_list.append(val)
# if self.meter_name == 'inv_depth_median_error_meter':
# print('-------', self.all_list)
def get_median(self):
return statistics.median(self.all_list)
class ListMeter(object):
def __init__(self, meter_name='N/A'):
self.meter_name = meter_name
# self.val = None
# self.avg = None
# self.median = None
# self.sum = None
self.count = None
self.all_list = []
self.reset()
def reset(self):
# self.val = 0
# self.avg = 0
# self.median = 0
# self.sum = 0
self.count = 0
self.all_list = []
def update(self, val, n=1):
# if self.meter_name == 'inv_depth_median_error_meter':
# print('>>>>>>> val: ', val)
# self.val = val
# self.sum += val * n
self.count += n
# self.avg = self.sum / self.count
self.all_list.append(val)
# if self.meter_name == 'inv_depth_median_error_meter':
# print('-------', self.all_list)
def concat(self):
return torch.cat(self.all_list)
# def get_time_meters():
# time_meters = {}
# time_meters['data_to_gpu'] = AverageMeter()
# time_meters['forward'] = AverageMeter()
# time_meters['loss'] = AverageMeter()
# time_meters['backward'] = AverageMeter()
# time_meters['ts'] = 0
# return time_meters
def only1true(l):
true_found = False
for v in l:
if v:
# a True was found!
if true_found:
# found too many True's
return False
else:
# found the first True
true_found = True
# found zero or one True value
return true_found
def nonetrue(l):
return not any(l) | [
"numpy.random.seed",
"statistics.median",
"os.path.join",
"os.walk",
"socket.socket",
"os.system",
"torch.cat",
"termcolor.colored",
"shutil.copy",
"shutil.copytree",
"datetime.datetime.now",
"logging.getLogger",
"argparse.ArgumentTypeError"
] | [((286, 309), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (303, 309), False, 'import logging\n'), ((366, 399), 'termcolor.colored', 'colored', (['text', '"""yellow"""', '"""on_red"""'], {}), "(text, 'yellow', 'on_red')\n", (373, 399), False, 'from termcolor import colored\n'), ((488, 521), 'termcolor.colored', 'colored', (['text', '"""white"""', '"""on_blue"""'], {}), "(text, 'white', 'on_blue')\n", (495, 521), False, 'from termcolor import colored\n'), ((583, 619), 'termcolor.colored', 'colored', (['text', '"""white"""', '"""on_magenta"""'], {}), "(text, 'white', 'on_magenta')\n", (590, 619), False, 'from termcolor import colored\n'), ((677, 698), 'termcolor.colored', 'colored', (['text', '"""blue"""'], {}), "(text, 'blue')\n", (684, 698), False, 'from termcolor import colored\n'), ((809, 842), 'termcolor.colored', 'colored', (['text', '"""blue"""', '"""on_green"""'], {}), "(text, 'blue', 'on_green')\n", (816, 842), False, 'from termcolor import colored\n'), ((944, 978), 'termcolor.colored', 'colored', (['text', '"""blue"""', '"""on_yellow"""'], {}), "(text, 'blue', 'on_yellow')\n", (951, 978), False, 'from termcolor import colored\n'), ((1042, 1078), 'termcolor.colored', 'colored', (['text', '"""white"""', '"""on_magenta"""'], {}), "(text, 'white', 'on_magenta')\n", (1049, 1078), False, 'from termcolor import colored\n'), ((1183, 1240), 'os.system', 'os.system', (["('cp %s %s/' % (origin_dest[0], origin_dest[1]))"], {}), "('cp %s %s/' % (origin_dest[0], origin_dest[1]))\n", (1192, 1240), False, 'import os\n'), ((1391, 1409), 'os.walk', 'os.walk', (['root_path'], {}), '(root_path)\n', (1398, 1409), False, 'import os\n'), ((2873, 2887), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2885, 2887), False, 'from datetime import datetime\n'), ((2972, 3021), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (2985, 3021), False, 'import socket\n'), ((3854, 3870), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (3868, 3870), True, 'import numpy as np\n'), ((4476, 4490), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4488, 4490), False, 'from datetime import datetime\n'), ((2339, 2364), 'shutil.copytree', 'shutil.copytree', (['src', 'dst'], {}), '(src, dst)\n', (2354, 2364), False, 'import shutil, errno\n'), ((5487, 5519), 'statistics.median', 'statistics.median', (['self.all_list'], {}), '(self.all_list)\n', (5504, 5519), False, 'import statistics\n'), ((6417, 6441), 'torch.cat', 'torch.cat', (['self.all_list'], {}), '(self.all_list)\n', (6426, 6441), False, 'import torch\n'), ((2749, 2815), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (["('Boolean value expectedl; got: %s' % v)"], {}), "('Boolean value expectedl; got: %s' % v)\n", (2775, 2815), False, 'import argparse\n'), ((1567, 1591), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (1579, 1591), False, 'import os\n'), ((2135, 2157), 'numpy.random.seed', 'np.random.seed', (['(123456)'], {}), '(123456)\n', (2149, 2157), True, 'import numpy as np\n'), ((2457, 2478), 'shutil.copy', 'shutil.copy', (['src', 'dst'], {}), '(src, dst)\n', (2468, 2478), False, 'import shutil, errno\n')] |
import numpy as np
from model.rta import ChunkCalculator
min_nperseg = 512
stride = 25
def test_first_data_is_too_small():
cc = ChunkCalculator(min_nperseg, stride)
data = np.arange(60).reshape(3, 20).transpose()
chunks = cc.recalc('test', data)
assert chunks is None
assert 'test' not in cc.last_idx
def test_first_data_is_exactly_one_chunk():
cc = ChunkCalculator(min_nperseg, stride)
data = np.arange(min_nperseg * 3).reshape(3, min_nperseg).transpose()
chunks = cc.recalc('test', data)
assert chunks is not None
assert len(chunks) == 1
assert chunks[0][:, 0][0] == 0
assert chunks[0][:, 0][-1] == min_nperseg - 1
assert 'test' in cc.last_idx
assert cc.last_idx['test'] == min_nperseg - 1
def test_first_data_is_more_than_a_chunk():
cc = ChunkCalculator(min_nperseg, stride)
data = np.arange((min_nperseg + 20) * 3).reshape(3, min_nperseg + 20).transpose()
chunks = cc.recalc('test', data)
assert chunks is not None
assert len(chunks) == 1
assert chunks[0][:, 0][0] == 0
assert chunks[0][:, 0][-1] == min_nperseg - 1
assert 'test' in cc.last_idx
assert cc.last_idx['test'] == min_nperseg - 1
def test_first_data_is_many_chunks():
cc = ChunkCalculator(min_nperseg, stride)
data = np.arange((min_nperseg + 105) * 3).reshape(3, min_nperseg + 105).transpose()
chunks = cc.recalc('test', data)
assert chunks is not None
assert len(chunks) == 5
last = min_nperseg - 1
first = 0
for i in range(0, 5):
assert chunks[i][:, 0][0] == first
assert chunks[i][:, 0][-1] == last
first += stride
last += stride
assert 'test' in cc.last_idx
assert cc.last_idx['test'] == min_nperseg - 1 + 100
def test_next_data_is_less_than_chunk():
cc = ChunkCalculator(min_nperseg, stride)
cc.last_idx['test'] = min_nperseg - 1
data = np.arange((min_nperseg + 20) * 3).reshape(3, min_nperseg + 20).transpose()
chunks = cc.recalc('test', data)
assert chunks is None
assert 'test' in cc.last_idx
assert cc.last_idx['test'] == min_nperseg - 1
def test_next_data_fills_a_chunk():
cc = ChunkCalculator(min_nperseg, stride)
cc.last_idx['test'] = min_nperseg - 1
data = np.arange((min_nperseg + stride) * 3).reshape(3, min_nperseg + stride).transpose()
chunks = cc.recalc('test', data)
assert chunks is not None
assert len(chunks) == 1
assert chunks[0][:, 0][0] == stride
assert chunks[0][:, 0][-1] == min_nperseg - 1 + stride
assert 'test' in cc.last_idx
assert cc.last_idx['test'] == min_nperseg - 1 + stride
def test_next_data_is_between_chunks():
cc = ChunkCalculator(min_nperseg, stride)
cc.last_idx['test'] = min_nperseg - 1
data = np.arange((min_nperseg + stride + 10) * 3).reshape(3, min_nperseg + stride + 10).transpose()
chunks = cc.recalc('test', data)
assert chunks is not None
assert len(chunks) == 1
assert chunks[0][:, 0][0] == stride
assert chunks[0][:, 0][-1] == min_nperseg - 1 + stride
assert 'test' in cc.last_idx
assert cc.last_idx['test'] == min_nperseg - 1 + stride
def test_next_data_is_many_chunks():
cc = ChunkCalculator(min_nperseg, stride)
cc.last_idx['test'] = min_nperseg - 1
data = np.arange((min_nperseg + stride * 3 + 10) * 3).reshape(3, min_nperseg + stride * 3 + 10).transpose()
chunks = cc.recalc('test', data)
assert chunks is not None
assert len(chunks) == 3
assert chunks[0][:, 0][0] == stride
assert chunks[0][:, 0][-1] == min_nperseg - 1 + stride
assert chunks[1][:, 0][0] == stride * 2
assert chunks[1][:, 0][-1] == stride * 2 - 1 + min_nperseg
assert chunks[2][:, 0][0] == stride * 3
assert chunks[2][:, 0][-1] == stride * 3 - 1 + min_nperseg
assert 'test' in cc.last_idx
assert cc.last_idx['test'] == stride * 3 - 1 + min_nperseg
| [
"model.rta.ChunkCalculator",
"numpy.arange"
] | [((135, 171), 'model.rta.ChunkCalculator', 'ChunkCalculator', (['min_nperseg', 'stride'], {}), '(min_nperseg, stride)\n', (150, 171), False, 'from model.rta import ChunkCalculator\n'), ((379, 415), 'model.rta.ChunkCalculator', 'ChunkCalculator', (['min_nperseg', 'stride'], {}), '(min_nperseg, stride)\n', (394, 415), False, 'from model.rta import ChunkCalculator\n'), ((808, 844), 'model.rta.ChunkCalculator', 'ChunkCalculator', (['min_nperseg', 'stride'], {}), '(min_nperseg, stride)\n', (823, 844), False, 'from model.rta import ChunkCalculator\n'), ((1243, 1279), 'model.rta.ChunkCalculator', 'ChunkCalculator', (['min_nperseg', 'stride'], {}), '(min_nperseg, stride)\n', (1258, 1279), False, 'from model.rta import ChunkCalculator\n'), ((1804, 1840), 'model.rta.ChunkCalculator', 'ChunkCalculator', (['min_nperseg', 'stride'], {}), '(min_nperseg, stride)\n', (1819, 1840), False, 'from model.rta import ChunkCalculator\n'), ((2162, 2198), 'model.rta.ChunkCalculator', 'ChunkCalculator', (['min_nperseg', 'stride'], {}), '(min_nperseg, stride)\n', (2177, 2198), False, 'from model.rta import ChunkCalculator\n'), ((2672, 2708), 'model.rta.ChunkCalculator', 'ChunkCalculator', (['min_nperseg', 'stride'], {}), '(min_nperseg, stride)\n', (2687, 2708), False, 'from model.rta import ChunkCalculator\n'), ((3189, 3225), 'model.rta.ChunkCalculator', 'ChunkCalculator', (['min_nperseg', 'stride'], {}), '(min_nperseg, stride)\n', (3204, 3225), False, 'from model.rta import ChunkCalculator\n'), ((183, 196), 'numpy.arange', 'np.arange', (['(60)'], {}), '(60)\n', (192, 196), True, 'import numpy as np\n'), ((427, 453), 'numpy.arange', 'np.arange', (['(min_nperseg * 3)'], {}), '(min_nperseg * 3)\n', (436, 453), True, 'import numpy as np\n'), ((856, 889), 'numpy.arange', 'np.arange', (['((min_nperseg + 20) * 3)'], {}), '((min_nperseg + 20) * 3)\n', (865, 889), True, 'import numpy as np\n'), ((1291, 1325), 'numpy.arange', 'np.arange', (['((min_nperseg + 105) * 3)'], {}), '((min_nperseg + 105) * 3)\n', (1300, 1325), True, 'import numpy as np\n'), ((1894, 1927), 'numpy.arange', 'np.arange', (['((min_nperseg + 20) * 3)'], {}), '((min_nperseg + 20) * 3)\n', (1903, 1927), True, 'import numpy as np\n'), ((2252, 2289), 'numpy.arange', 'np.arange', (['((min_nperseg + stride) * 3)'], {}), '((min_nperseg + stride) * 3)\n', (2261, 2289), True, 'import numpy as np\n'), ((2762, 2804), 'numpy.arange', 'np.arange', (['((min_nperseg + stride + 10) * 3)'], {}), '((min_nperseg + stride + 10) * 3)\n', (2771, 2804), True, 'import numpy as np\n'), ((3279, 3325), 'numpy.arange', 'np.arange', (['((min_nperseg + stride * 3 + 10) * 3)'], {}), '((min_nperseg + stride * 3 + 10) * 3)\n', (3288, 3325), True, 'import numpy as np\n')] |
import dataclasses
import itertools
import logging
import os.path
import numpy as np
import openmdao.api as om
import xlwings
from pywintypes import com_error
from .macro_execution import run_and_raise_macro, wrap_macros
from .timeout_utils import TimeoutComponentMixin, kill_pid
logger = logging.getLogger(__package__)
def nans(shape):
return np.ones(shape) * np.nan
@dataclasses.dataclass(frozen=True)
class ExcelVar:
name: str
range: str
shape = (1,)
class ExcelComponent(TimeoutComponentMixin, om.ExplicitComponent):
def initialize(self):
self.options.declare("file_path", types=str)
self.options.declare("inputs", types=list)
self.options.declare("outputs", types=list)
self.options.declare("pre_macros", types=list, default=[])
self.options.declare("main_macros", types=list, default=[])
self.options.declare("post_macros", types=list, default=[])
self.app = None
self.app_pid = None
def setup(self):
for var in self.options["inputs"]:
self.add_input(name=var.name, val=nans(var.shape))
for var in self.options["outputs"]:
self.add_output(name=var.name, val=nans(var.shape))
self.ensure_app()
def ensure_app(self):
if not self.app_pid:
logger.debug("Starting Excel...")
self.app = xlwings.App(visible=False, add_book=False)
self.app_pid = self.app.pid
logger.info(f"Excel started, PID {self.app_pid}.")
self.app.display_alerts = False
self.app.screen_updating = False
def open_and_run(self, inputs, outputs, discrete_inputs, discrete_outputs):
self.ensure_app()
file_path = self.options["file_path"]
logger.debug(f"Opening {file_path}...")
book = self.app.books.open(file_path)
book.api.EnableAutoRecover = False
all_macros = set(
itertools.chain(
self.options["pre_macros"],
self.options["main_macros"],
self.options["post_macros"],
)
)
logger.debug("Wrapping macros...")
if len(all_macros):
wrap_macros(book, all_macros)
for macro in self.options["pre_macros"]:
run_and_raise_macro(book, macro, "pre")
self.app.calculation = "manual"
for var in self.options["inputs"]:
self.app.range(var.range).options(convert=np.array).value = inputs[var.name]
logger.debug(f"Input variable {var.name} set to range {var.range}.")
self.app.calculation = "automatic"
self.app.calculate()
logger.debug("Workbook re-calculated.")
for macro in self.options["main_macros"]:
run_and_raise_macro(book, macro, "main")
for var in self.options["outputs"]:
outputs[var.name] = (
self.app.range(var.range).options(convert=np.array).value
)
logger.debug(f"Output variable {var.name} set from range {var.range}.")
for macro in self.options["post_macros"]:
run_and_raise_macro(book, macro, "post")
# Closes without saving
book.close()
logger.debug(f"Closed {file_path}.")
def compute(self, inputs, outputs, discrete_inputs=None, discrete_outputs=None):
try:
self.open_and_run(
inputs, outputs, discrete_inputs or {}, discrete_outputs or {}
)
except Exception as exc:
if self.timeout_state.reached:
raise om.AnalysisError("Timeout reached!")
else:
raise exc
def handle_timeout(self):
logger.info(f"Excel component timed out. Killing PID {self.app_pid}.")
kill_pid(self.app_pid)
self.app_pid = None
def cleanup(self):
if self.app_pid:
try:
self.app.quit()
except com_error as exc:
pass
kill_pid(self.app_pid)
super().cleanup()
| [
"openmdao.api.AnalysisError",
"xlwings.App",
"numpy.ones",
"logging.getLogger",
"itertools.chain",
"dataclasses.dataclass"
] | [((292, 322), 'logging.getLogger', 'logging.getLogger', (['__package__'], {}), '(__package__)\n', (309, 322), False, 'import logging\n'), ((380, 414), 'dataclasses.dataclass', 'dataclasses.dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (401, 414), False, 'import dataclasses\n'), ((353, 367), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (360, 367), True, 'import numpy as np\n'), ((1373, 1415), 'xlwings.App', 'xlwings.App', ([], {'visible': '(False)', 'add_book': '(False)'}), '(visible=False, add_book=False)\n', (1384, 1415), False, 'import xlwings\n'), ((1937, 2042), 'itertools.chain', 'itertools.chain', (["self.options['pre_macros']", "self.options['main_macros']", "self.options['post_macros']"], {}), "(self.options['pre_macros'], self.options['main_macros'],\n self.options['post_macros'])\n", (1952, 2042), False, 'import itertools\n'), ((3582, 3618), 'openmdao.api.AnalysisError', 'om.AnalysisError', (['"""Timeout reached!"""'], {}), "('Timeout reached!')\n", (3598, 3618), True, 'import openmdao.api as om\n')] |
############################################################################
# Copyright ESIEE Paris (2018) #
# #
# Contributor(s) : <NAME> #
# #
# Distributed under the terms of the CECILL-B License. #
# #
# The full license is in the file LICENSE, distributed with this software. #
############################################################################
import unittest
import scipy.sparse as sp
import numpy as np
import higra as hg
class TestAlgorithmGraphCore(unittest.TestCase):
@staticmethod
def graph_equal(g1, w1, g2, w2):
dg1 = {}
for s, t, w in zip(*g1.edge_list(), w1):
dg1[(s, t)] = w
dg2 = {}
for s, t, w in zip(*g2.edge_list(), w2):
dg2[(s, t)] = w
return dg1 == dg2
def test_graph_cut_2_labelisation(self):
graph = hg.get_4_adjacency_graph((3, 3))
edge_weights = np.asarray((1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0), dtype=np.int32)
labels = hg.graph_cut_2_labelisation(graph, edge_weights)
ref_labels = np.asarray((1, 2, 2, 1, 1, 3, 1, 3, 3), dtype=np.int32)
self.assertTrue(hg.is_in_bijection(labels, ref_labels))
def test_labelisation_2_graph_cut(self):
graph = hg.get_4_adjacency_graph((3, 3))
labels = np.asarray((1, 2, 2, 1, 1, 3, 1, 3, 3), dtype=np.int32)
edge_weights = hg.labelisation_2_graph_cut(graph, labels)
ref_edge_weights = np.asarray((1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0), dtype=np.int32)
self.assertTrue(hg.is_in_bijection(edge_weights, ref_edge_weights))
def test_undirected_graph_2_adjacency_matrix(self):
graph = hg.UndirectedGraph(5)
graph.add_edge(0, 1)
graph.add_edge(0, 2)
graph.add_edge(0, 3)
graph.add_edge(0, 4)
graph.add_edge(1, 2)
graph.add_edge(2, 3)
graph.add_edge(2, 4)
edge_weights = np.asarray((1, 2, 3, 4, 5, 6, 7))
adj_mat = hg.undirected_graph_2_adjacency_matrix(graph, edge_weights, non_edge_value=-1, sparse=False)
ref_adj_mat = np.asarray(((-1, 1, 2, 3, 4),
(1, -1, 5, -1, -1),
(2, 5, -1, 6, 7),
(3, -1, 6, -1, -1),
(4, -1, 7, -1, -1)))
self.assertTrue(np.all(ref_adj_mat == adj_mat))
self.assertTrue(isinstance(adj_mat, np.ndarray))
t = hg.Tree(np.asarray((5, 5, 6, 6, 6, 7, 7, 7)))
edge_weights = np.asarray((1, 2, 3, 4, 5, 6, 7))
adj_mat = hg.undirected_graph_2_adjacency_matrix(t, edge_weights)
ref_adj_mat = np.asarray(((0, 0, 0, 0, 0, 1, 0, 0),
(0, 0, 0, 0, 0, 2, 0, 0),
(0, 0, 0, 0, 0, 0, 3, 0),
(0, 0, 0, 0, 0, 0, 4, 0),
(0, 0, 0, 0, 0, 0, 5, 0),
(1, 2, 0, 0, 0, 0, 0, 6),
(0, 0, 3, 4, 5, 0, 0, 7),
(0, 0, 0, 0, 0, 6, 7, 0)))
self.assertTrue(np.all(ref_adj_mat == adj_mat))
self.assertTrue(sp.issparse(adj_mat))
t = hg.Tree(np.asarray((5, 5, 6, 6, 6, 7, 7, 7)))
adj_mat = hg.undirected_graph_2_adjacency_matrix(t)
ref_adj_mat = np.asarray(((0, 0, 0, 0, 0, 1, 0, 0),
(0, 0, 0, 0, 0, 1, 0, 0),
(0, 0, 0, 0, 0, 0, 1, 0),
(0, 0, 0, 0, 0, 0, 1, 0),
(0, 0, 0, 0, 0, 0, 1, 0),
(1, 1, 0, 0, 0, 0, 0, 1),
(0, 0, 1, 1, 1, 0, 0, 1),
(0, 0, 0, 0, 0, 1, 1, 0)))
self.assertTrue(np.all(ref_adj_mat == adj_mat))
self.assertTrue(sp.issparse(adj_mat))
with self.assertRaises(Exception):
hg.undirected_graph_2_adjacency_matrix(t, non_edge_value=-1, sparse=True)
def test_adjacency_matrix_2_undirected_graph(self):
ref_adj_mat = np.asarray(((0, 0.1),
(0.1, 0)), dtype=np.float64)
graph, edge_weights = hg.adjacency_matrix_2_undirected_graph(ref_adj_mat)
ref_graph = hg.UndirectedGraph(2)
ref_graph.add_edge(0, 1)
ref_edge_weights = np.asarray((0.1,))
self.assertTrue(edge_weights.dtype == np.float64)
self.assertTrue(np.all(edge_weights == ref_edge_weights))
self.assertTrue(graph.num_vertices() == ref_graph.num_vertices())
self.assertTrue(graph.num_edges() == ref_graph.num_edges())
for (e1, e2) in zip(graph.edges(), ref_graph.edges()):
self.assertTrue(e1 == e2)
def test_adjacency_matrix_2_undirected_graph_non_edge_values(self):
ref_adj_mat = np.asarray(((-1, 1, 2, 3, 4),
(1, -1, 5, -1, -1),
(2, 5, -1, 6, 7),
(3, -1, 6, -1, -1),
(4, -1, 7, -1, -1)))
graph, edge_weights = hg.adjacency_matrix_2_undirected_graph(ref_adj_mat, -1)
ref_graph = hg.UndirectedGraph(5)
ref_graph.add_edge(0, 1)
ref_graph.add_edge(0, 2)
ref_graph.add_edge(0, 3)
ref_graph.add_edge(0, 4)
ref_graph.add_edge(1, 2)
ref_graph.add_edge(2, 3)
ref_graph.add_edge(2, 4)
ref_edge_weights = np.asarray((1, 2, 3, 4, 5, 6, 7))
self.assertTrue(np.all(edge_weights == ref_edge_weights))
self.assertTrue(graph.num_vertices() == ref_graph.num_vertices())
self.assertTrue(graph.num_edges() == ref_graph.num_edges())
for (e1, e2) in zip(graph.edges(), ref_graph.edges()):
self.assertTrue(e1 == e2)
def test_adjacency_matrix_2_undirected_graph_sparse(self):
ref_adj_mat = np.asarray(((0, 1, 2, 3, 4),
(1, 0, 5, 0, 0),
(2, 5, 0, 6, 7),
(3, 0, 6, 0, 0),
(4, 0, 7, 0, 0)))
ref_adj_mat = sp.csr_matrix(ref_adj_mat)
graph, edge_weights = hg.adjacency_matrix_2_undirected_graph(ref_adj_mat)
ref_graph = {}
ref_graph[(0, 1)] = 1
ref_graph[(0, 2)] = 2
ref_graph[(0, 3)] = 3
ref_graph[(0, 4)] = 4
ref_graph[(1, 2)] = 5
ref_graph[(2, 3)] = 6
ref_graph[(2, 4)] = 7
res_graph = {}
for s, t, w in zip(*graph.edge_list(), edge_weights):
res_graph[(s, t)] = w
self.assertTrue(res_graph == ref_graph)
with self.assertRaises(ValueError):
hg.adjacency_matrix_2_undirected_graph(ref_adj_mat, non_edge_value=-1)
def ultrametric_open(self):
graph = hg.get_4_adjacency_graph((3, 3))
edge_weights = np.asarray((2, 3, 9, 5, 10, 1, 5, 8, 2, 2, 4, 3), dtype=np.int32)
subd_ultrametric = hg.ultrametric_open(graph, edge_weights)
ref_subd_ultrametric = np.asarray((2, 3, 9, 3, 9, 1, 4, 3, 2, 2, 4, 3), dtype=np.int32)
self.assertTrue(hg.is_in_bijection(subd_ultrametric, ref_subd_ultrametric))
def test_minimum_spanning_tree(self):
graph = hg.get_4_adjacency_graph((2, 3))
edge_weights = np.asarray((1, 0, 2, 1, 1, 1, 2))
mst = hg.minimum_spanning_tree(graph, edge_weights)
mst_edge_map = hg.CptMinimumSpanningTree.get_edge_map(mst)
self.assertTrue(mst.num_vertices() == 6)
self.assertTrue(mst.num_edges() == 5)
ref_sources = (0, 0, 1, 2, 1)
ref_targets = (3, 1, 4, 5, 2)
sources, targets = mst.edge_list()
self.assertTrue(np.all(sources == ref_sources))
self.assertTrue(np.all(targets == ref_targets))
self.assertTrue(np.all(mst_edge_map == (1, 0, 3, 4, 2)))
def test_minimum_spanning_forest(self):
graph = hg.UndirectedGraph(6)
graph.add_edges((0, 0, 1, 3, 3, 4), (1, 2, 2, 4, 5, 5))
edge_weights = np.asarray((0, 1, 2, 3, 4, 5))
mst = hg.minimum_spanning_tree(graph, edge_weights)
mst_edge_map = hg.CptMinimumSpanningTree.get_edge_map(mst)
self.assertTrue(mst.num_vertices() == 6)
self.assertTrue(mst.num_edges() == 4)
ref_sources = (0, 0, 3, 3)
ref_targets = (1, 2, 4, 5)
sources, targets = mst.edge_list()
self.assertTrue(np.all(sources == ref_sources))
self.assertTrue(np.all(targets == ref_targets))
self.assertTrue(np.all(mst_edge_map == (0, 1, 3, 4)))
def test_make_graph_from_points_complete(self):
X = np.asarray(((0, 0), (0, 1), (1, 0)))
sqrt2 = np.sqrt(2)
g, ew = hg.make_graph_from_points(X, graph_type="complete")
g_ref = hg.UndirectedGraph(3)
g_ref.add_edges((0, 0, 1), (1, 2, 2))
w_ref = (1, 1, sqrt2)
self.assertTrue(TestAlgorithmGraphCore.graph_equal(g, ew, g_ref, w_ref))
def test_make_graph_from_points_knn(self):
X = np.asarray(((0, 0), (0, 1), (1, 0), (0, 3), (0, 4), (1, 3), (2, 3)))
sqrt2 = np.sqrt(2)
g, ew = hg.make_graph_from_points(X, graph_type="knn", symmetrization="max", n_neighbors=2)
g_ref = hg.UndirectedGraph(7)
g_ref.add_edges((0, 0, 1, 3, 3, 4, 5, 3), (1, 2, 2, 5, 4, 5, 6, 6))
w_ref = (1, 1, sqrt2, 1, 1, sqrt2, 1, 2)
self.assertTrue(TestAlgorithmGraphCore.graph_equal(g, ew, g_ref, w_ref))
g, ew = hg.make_graph_from_points(X, graph_type="knn", symmetrization="min", n_neighbors=2)
g_ref = hg.UndirectedGraph(7)
g_ref.add_edges((0, 0, 1, 3, 3, 5), (1, 2, 2, 5, 4, 6))
w_ref = (1, 1, sqrt2, 1, 1, 1)
self.assertTrue(TestAlgorithmGraphCore.graph_equal(g, ew, g_ref, w_ref))
def test_make_graph_from_points_knn_and_mst(self):
X = np.asarray(((0, 0), (0, 1), (1, 0), (0, 3), (0, 4), (1, 3), (2, 3)))
sqrt2 = np.sqrt(2)
g, ew = hg.make_graph_from_points(X, graph_type="knn+mst", symmetrization="max", n_neighbors=2)
g_ref = hg.UndirectedGraph(7)
g_ref.add_edges((0, 0, 1, 3, 3, 4, 5, 3, 1), (1, 2, 2, 5, 4, 5, 6, 6, 3))
w_ref = (1, 1, sqrt2, 1, 1, sqrt2, 1, 2, 2)
self.assertTrue(TestAlgorithmGraphCore.graph_equal(g, ew, g_ref, w_ref))
g, ew = hg.make_graph_from_points(X, graph_type="knn+mst", symmetrization="min", n_neighbors=2)
g_ref = hg.UndirectedGraph(7)
g_ref.add_edges((0, 0, 1, 3, 3, 5, 1), (1, 2, 2, 5, 4, 6, 3))
w_ref = (1, 1, sqrt2, 1, 1, 1, 2)
self.assertTrue(TestAlgorithmGraphCore.graph_equal(g, ew, g_ref, w_ref))
def test_make_graph_from_points_delaunay(self):
X = np.asarray(((0, 0), (0, 1), (1, 0), (0, 3), (0, 4), (1, 3), (2, 3)))
sqrt2 = np.sqrt(2)
g, ew = hg.make_graph_from_points(X, graph_type="delaunay", symmetrization="max", n_neighbors=2)
g_ref = hg.UndirectedGraph(7)
g_ref.add_edges((0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5), (2, 1, 2, 5, 3, 5, 6, 5, 4, 5, 6, 6))
w_ref = (1, 1, sqrt2, np.sqrt(5), 2, 3, np.sqrt(10), 1, 1, sqrt2, np.sqrt(5), 1)
self.assertTrue(TestAlgorithmGraphCore.graph_equal(g, ew, g_ref, w_ref))
def test_subgraph_spanning(self):
graph = hg.get_4_adjacency_graph((2, 2))
edge_indices = np.asarray((3, 0))
subgraph = hg.subgraph(graph, edge_indices, spanning=True)
self.assertTrue(subgraph.num_vertices() == graph.num_vertices())
self.assertTrue(subgraph.num_edges() == len(edge_indices))
sources, targets = subgraph.edge_list()
self.assertTrue(np.all(sources == (2, 0)))
self.assertTrue(np.all(targets == (3, 1)))
def test_subgraph_spanning(self):
graph = hg.UndirectedGraph(6)
graph.add_edges(np.arange(5), np.arange(1, 6))
edge_indices = np.asarray((4, 0, 3))
subgraph, vertex_map = hg.subgraph(graph, edge_indices, spanning=False, return_vertex_map=True)
self.assertTrue(subgraph.num_vertices() == 5)
self.assertTrue(subgraph.num_edges() == len(edge_indices))
sources, targets = subgraph.edge_list()
self.assertTrue(np.all(vertex_map == (0, 1, 3, 4, 5)))
self.assertTrue(np.all(vertex_map[sources] == (4, 0, 3)))
self.assertTrue(np.all(vertex_map[targets] == (5, 1, 4)))
def test_line_graph_ugraph(self):
graph = hg.get_8_adjacency_graph((2, 2))
linegraph = hg.line_graph(graph)
self.assertTrue(linegraph.num_vertices() == 6)
self.assertTrue(linegraph.num_edges() == 12)
ref = [
{1, 2, 3, 4},
{0, 2, 3, 5},
{0, 1, 4, 5},
{0, 1, 4, 5},
{0, 2, 3, 5},
{1, 2, 3, 4}
]
for v in linegraph.vertices():
res = set()
for e in linegraph.out_edges(v):
res.add(e[1])
self.assertTrue(res == ref[v])
def test_line_graph_tree(self):
graph = hg.Tree((5, 5, 6, 6, 6, 7, 8, 8, 8))
linegraph = hg.line_graph(graph)
self.assertTrue(linegraph.num_vertices() == 8)
self.assertTrue(linegraph.num_edges() == 11)
ref = [
{1, 5},
{0, 5},
{3, 4, 6},
{2, 4, 6},
{2, 3, 6},
{0, 1, 7},
{2, 3, 4, 7},
{5, 6},
]
for v in linegraph.vertices():
res = set()
for e in linegraph.out_edges(v):
res.add(e[1])
self.assertTrue(res == ref[v])
if __name__ == '__main__':
unittest.main()
| [
"higra.adjacency_matrix_2_undirected_graph",
"higra.UndirectedGraph",
"scipy.sparse.issparse",
"numpy.arange",
"higra.get_4_adjacency_graph",
"unittest.main",
"higra.CptMinimumSpanningTree.get_edge_map",
"higra.subgraph",
"higra.line_graph",
"higra.is_in_bijection",
"higra.undirected_graph_2_adj... | [((13996, 14011), 'unittest.main', 'unittest.main', ([], {}), '()\n', (14009, 14011), False, 'import unittest\n'), ((1150, 1182), 'higra.get_4_adjacency_graph', 'hg.get_4_adjacency_graph', (['(3, 3)'], {}), '((3, 3))\n', (1174, 1182), True, 'import higra as hg\n'), ((1206, 1270), 'numpy.asarray', 'np.asarray', (['(1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0)'], {'dtype': 'np.int32'}), '((1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0), dtype=np.int32)\n', (1216, 1270), True, 'import numpy as np\n'), ((1289, 1337), 'higra.graph_cut_2_labelisation', 'hg.graph_cut_2_labelisation', (['graph', 'edge_weights'], {}), '(graph, edge_weights)\n', (1316, 1337), True, 'import higra as hg\n'), ((1360, 1415), 'numpy.asarray', 'np.asarray', (['(1, 2, 2, 1, 1, 3, 1, 3, 3)'], {'dtype': 'np.int32'}), '((1, 2, 2, 1, 1, 3, 1, 3, 3), dtype=np.int32)\n', (1370, 1415), True, 'import numpy as np\n'), ((1542, 1574), 'higra.get_4_adjacency_graph', 'hg.get_4_adjacency_graph', (['(3, 3)'], {}), '((3, 3))\n', (1566, 1574), True, 'import higra as hg\n'), ((1592, 1647), 'numpy.asarray', 'np.asarray', (['(1, 2, 2, 1, 1, 3, 1, 3, 3)'], {'dtype': 'np.int32'}), '((1, 2, 2, 1, 1, 3, 1, 3, 3), dtype=np.int32)\n', (1602, 1647), True, 'import numpy as np\n'), ((1672, 1714), 'higra.labelisation_2_graph_cut', 'hg.labelisation_2_graph_cut', (['graph', 'labels'], {}), '(graph, labels)\n', (1699, 1714), True, 'import higra as hg\n'), ((1743, 1807), 'numpy.asarray', 'np.asarray', (['(1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0)'], {'dtype': 'np.int32'}), '((1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0), dtype=np.int32)\n', (1753, 1807), True, 'import numpy as np\n'), ((1958, 1979), 'higra.UndirectedGraph', 'hg.UndirectedGraph', (['(5)'], {}), '(5)\n', (1976, 1979), True, 'import higra as hg\n'), ((2207, 2240), 'numpy.asarray', 'np.asarray', (['(1, 2, 3, 4, 5, 6, 7)'], {}), '((1, 2, 3, 4, 5, 6, 7))\n', (2217, 2240), True, 'import numpy as np\n'), ((2259, 2356), 'higra.undirected_graph_2_adjacency_matrix', 'hg.undirected_graph_2_adjacency_matrix', (['graph', 'edge_weights'], {'non_edge_value': '(-1)', 'sparse': '(False)'}), '(graph, edge_weights, non_edge_value=\n -1, sparse=False)\n', (2297, 2356), True, 'import higra as hg\n'), ((2375, 2487), 'numpy.asarray', 'np.asarray', (['((-1, 1, 2, 3, 4), (1, -1, 5, -1, -1), (2, 5, -1, 6, 7), (3, -1, 6, -1, -1),\n (4, -1, 7, -1, -1))'], {}), '(((-1, 1, 2, 3, 4), (1, -1, 5, -1, -1), (2, 5, -1, 6, 7), (3, -1,\n 6, -1, -1), (4, -1, 7, -1, -1)))\n', (2385, 2487), True, 'import numpy as np\n'), ((2815, 2848), 'numpy.asarray', 'np.asarray', (['(1, 2, 3, 4, 5, 6, 7)'], {}), '((1, 2, 3, 4, 5, 6, 7))\n', (2825, 2848), True, 'import numpy as np\n'), ((2867, 2922), 'higra.undirected_graph_2_adjacency_matrix', 'hg.undirected_graph_2_adjacency_matrix', (['t', 'edge_weights'], {}), '(t, edge_weights)\n', (2905, 2922), True, 'import higra as hg\n'), ((2946, 3174), 'numpy.asarray', 'np.asarray', (['((0, 0, 0, 0, 0, 1, 0, 0), (0, 0, 0, 0, 0, 2, 0, 0), (0, 0, 0, 0, 0, 0, 3, \n 0), (0, 0, 0, 0, 0, 0, 4, 0), (0, 0, 0, 0, 0, 0, 5, 0), (1, 2, 0, 0, 0,\n 0, 0, 6), (0, 0, 3, 4, 5, 0, 0, 7), (0, 0, 0, 0, 0, 6, 7, 0))'], {}), '(((0, 0, 0, 0, 0, 1, 0, 0), (0, 0, 0, 0, 0, 2, 0, 0), (0, 0, 0, 0,\n 0, 0, 3, 0), (0, 0, 0, 0, 0, 0, 4, 0), (0, 0, 0, 0, 0, 0, 5, 0), (1, 2,\n 0, 0, 0, 0, 0, 6), (0, 0, 3, 4, 5, 0, 0, 7), (0, 0, 0, 0, 0, 6, 7, 0)))\n', (2956, 3174), True, 'import numpy as np\n'), ((3584, 3625), 'higra.undirected_graph_2_adjacency_matrix', 'hg.undirected_graph_2_adjacency_matrix', (['t'], {}), '(t)\n', (3622, 3625), True, 'import higra as hg\n'), ((3649, 3877), 'numpy.asarray', 'np.asarray', (['((0, 0, 0, 0, 0, 1, 0, 0), (0, 0, 0, 0, 0, 1, 0, 0), (0, 0, 0, 0, 0, 0, 1, \n 0), (0, 0, 0, 0, 0, 0, 1, 0), (0, 0, 0, 0, 0, 0, 1, 0), (1, 1, 0, 0, 0,\n 0, 0, 1), (0, 0, 1, 1, 1, 0, 0, 1), (0, 0, 0, 0, 0, 1, 1, 0))'], {}), '(((0, 0, 0, 0, 0, 1, 0, 0), (0, 0, 0, 0, 0, 1, 0, 0), (0, 0, 0, 0,\n 0, 0, 1, 0), (0, 0, 0, 0, 0, 0, 1, 0), (0, 0, 0, 0, 0, 0, 1, 0), (1, 1,\n 0, 0, 0, 0, 0, 1), (0, 0, 1, 1, 1, 0, 0, 1), (0, 0, 0, 0, 0, 1, 1, 0)))\n', (3659, 3877), True, 'import numpy as np\n'), ((4419, 4469), 'numpy.asarray', 'np.asarray', (['((0, 0.1), (0.1, 0))'], {'dtype': 'np.float64'}), '(((0, 0.1), (0.1, 0)), dtype=np.float64)\n', (4429, 4469), True, 'import numpy as np\n'), ((4534, 4585), 'higra.adjacency_matrix_2_undirected_graph', 'hg.adjacency_matrix_2_undirected_graph', (['ref_adj_mat'], {}), '(ref_adj_mat)\n', (4572, 4585), True, 'import higra as hg\n'), ((4607, 4628), 'higra.UndirectedGraph', 'hg.UndirectedGraph', (['(2)'], {}), '(2)\n', (4625, 4628), True, 'import higra as hg\n'), ((4690, 4708), 'numpy.asarray', 'np.asarray', (['(0.1,)'], {}), '((0.1,))\n', (4700, 4708), True, 'import numpy as np\n'), ((5173, 5285), 'numpy.asarray', 'np.asarray', (['((-1, 1, 2, 3, 4), (1, -1, 5, -1, -1), (2, 5, -1, 6, 7), (3, -1, 6, -1, -1),\n (4, -1, 7, -1, -1))'], {}), '(((-1, 1, 2, 3, 4), (1, -1, 5, -1, -1), (2, 5, -1, 6, 7), (3, -1,\n 6, -1, -1), (4, -1, 7, -1, -1)))\n', (5183, 5285), True, 'import numpy as np\n'), ((5448, 5503), 'higra.adjacency_matrix_2_undirected_graph', 'hg.adjacency_matrix_2_undirected_graph', (['ref_adj_mat', '(-1)'], {}), '(ref_adj_mat, -1)\n', (5486, 5503), True, 'import higra as hg\n'), ((5525, 5546), 'higra.UndirectedGraph', 'hg.UndirectedGraph', (['(5)'], {}), '(5)\n', (5543, 5546), True, 'import higra as hg\n'), ((5806, 5839), 'numpy.asarray', 'np.asarray', (['(1, 2, 3, 4, 5, 6, 7)'], {}), '((1, 2, 3, 4, 5, 6, 7))\n', (5816, 5839), True, 'import numpy as np\n'), ((6237, 6338), 'numpy.asarray', 'np.asarray', (['((0, 1, 2, 3, 4), (1, 0, 5, 0, 0), (2, 5, 0, 6, 7), (3, 0, 6, 0, 0), (4, 0,\n 7, 0, 0))'], {}), '(((0, 1, 2, 3, 4), (1, 0, 5, 0, 0), (2, 5, 0, 6, 7), (3, 0, 6, 0,\n 0), (4, 0, 7, 0, 0)))\n', (6247, 6338), True, 'import numpy as np\n'), ((6493, 6519), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['ref_adj_mat'], {}), '(ref_adj_mat)\n', (6506, 6519), True, 'import scipy.sparse as sp\n'), ((6550, 6601), 'higra.adjacency_matrix_2_undirected_graph', 'hg.adjacency_matrix_2_undirected_graph', (['ref_adj_mat'], {}), '(ref_adj_mat)\n', (6588, 6601), True, 'import higra as hg\n'), ((7182, 7214), 'higra.get_4_adjacency_graph', 'hg.get_4_adjacency_graph', (['(3, 3)'], {}), '((3, 3))\n', (7206, 7214), True, 'import higra as hg\n'), ((7238, 7303), 'numpy.asarray', 'np.asarray', (['(2, 3, 9, 5, 10, 1, 5, 8, 2, 2, 4, 3)'], {'dtype': 'np.int32'}), '((2, 3, 9, 5, 10, 1, 5, 8, 2, 2, 4, 3), dtype=np.int32)\n', (7248, 7303), True, 'import numpy as np\n'), ((7332, 7372), 'higra.ultrametric_open', 'hg.ultrametric_open', (['graph', 'edge_weights'], {}), '(graph, edge_weights)\n', (7351, 7372), True, 'import higra as hg\n'), ((7405, 7469), 'numpy.asarray', 'np.asarray', (['(2, 3, 9, 3, 9, 1, 4, 3, 2, 2, 4, 3)'], {'dtype': 'np.int32'}), '((2, 3, 9, 3, 9, 1, 4, 3, 2, 2, 4, 3), dtype=np.int32)\n', (7415, 7469), True, 'import numpy as np\n'), ((7614, 7646), 'higra.get_4_adjacency_graph', 'hg.get_4_adjacency_graph', (['(2, 3)'], {}), '((2, 3))\n', (7638, 7646), True, 'import higra as hg\n'), ((7671, 7704), 'numpy.asarray', 'np.asarray', (['(1, 0, 2, 1, 1, 1, 2)'], {}), '((1, 0, 2, 1, 1, 1, 2))\n', (7681, 7704), True, 'import numpy as np\n'), ((7720, 7765), 'higra.minimum_spanning_tree', 'hg.minimum_spanning_tree', (['graph', 'edge_weights'], {}), '(graph, edge_weights)\n', (7744, 7765), True, 'import higra as hg\n'), ((7789, 7832), 'higra.CptMinimumSpanningTree.get_edge_map', 'hg.CptMinimumSpanningTree.get_edge_map', (['mst'], {}), '(mst)\n', (7827, 7832), True, 'import higra as hg\n'), ((8289, 8310), 'higra.UndirectedGraph', 'hg.UndirectedGraph', (['(6)'], {}), '(6)\n', (8307, 8310), True, 'import higra as hg\n'), ((8399, 8429), 'numpy.asarray', 'np.asarray', (['(0, 1, 2, 3, 4, 5)'], {}), '((0, 1, 2, 3, 4, 5))\n', (8409, 8429), True, 'import numpy as np\n'), ((8445, 8490), 'higra.minimum_spanning_tree', 'hg.minimum_spanning_tree', (['graph', 'edge_weights'], {}), '(graph, edge_weights)\n', (8469, 8490), True, 'import higra as hg\n'), ((8514, 8557), 'higra.CptMinimumSpanningTree.get_edge_map', 'hg.CptMinimumSpanningTree.get_edge_map', (['mst'], {}), '(mst)\n', (8552, 8557), True, 'import higra as hg\n'), ((9009, 9045), 'numpy.asarray', 'np.asarray', (['((0, 0), (0, 1), (1, 0))'], {}), '(((0, 0), (0, 1), (1, 0)))\n', (9019, 9045), True, 'import numpy as np\n'), ((9062, 9072), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (9069, 9072), True, 'import numpy as np\n'), ((9089, 9140), 'higra.make_graph_from_points', 'hg.make_graph_from_points', (['X'], {'graph_type': '"""complete"""'}), "(X, graph_type='complete')\n", (9114, 9140), True, 'import higra as hg\n'), ((9158, 9179), 'higra.UndirectedGraph', 'hg.UndirectedGraph', (['(3)'], {}), '(3)\n', (9176, 9179), True, 'import higra as hg\n'), ((9398, 9466), 'numpy.asarray', 'np.asarray', (['((0, 0), (0, 1), (1, 0), (0, 3), (0, 4), (1, 3), (2, 3))'], {}), '(((0, 0), (0, 1), (1, 0), (0, 3), (0, 4), (1, 3), (2, 3)))\n', (9408, 9466), True, 'import numpy as np\n'), ((9483, 9493), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (9490, 9493), True, 'import numpy as np\n'), ((9510, 9597), 'higra.make_graph_from_points', 'hg.make_graph_from_points', (['X'], {'graph_type': '"""knn"""', 'symmetrization': '"""max"""', 'n_neighbors': '(2)'}), "(X, graph_type='knn', symmetrization='max',\n n_neighbors=2)\n", (9535, 9597), True, 'import higra as hg\n'), ((9611, 9632), 'higra.UndirectedGraph', 'hg.UndirectedGraph', (['(7)'], {}), '(7)\n', (9629, 9632), True, 'import higra as hg\n'), ((9857, 9944), 'higra.make_graph_from_points', 'hg.make_graph_from_points', (['X'], {'graph_type': '"""knn"""', 'symmetrization': '"""min"""', 'n_neighbors': '(2)'}), "(X, graph_type='knn', symmetrization='min',\n n_neighbors=2)\n", (9882, 9944), True, 'import higra as hg\n'), ((9958, 9979), 'higra.UndirectedGraph', 'hg.UndirectedGraph', (['(7)'], {}), '(7)\n', (9976, 9979), True, 'import higra as hg\n'), ((10233, 10301), 'numpy.asarray', 'np.asarray', (['((0, 0), (0, 1), (1, 0), (0, 3), (0, 4), (1, 3), (2, 3))'], {}), '(((0, 0), (0, 1), (1, 0), (0, 3), (0, 4), (1, 3), (2, 3)))\n', (10243, 10301), True, 'import numpy as np\n'), ((10318, 10328), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (10325, 10328), True, 'import numpy as np\n'), ((10345, 10436), 'higra.make_graph_from_points', 'hg.make_graph_from_points', (['X'], {'graph_type': '"""knn+mst"""', 'symmetrization': '"""max"""', 'n_neighbors': '(2)'}), "(X, graph_type='knn+mst', symmetrization='max',\n n_neighbors=2)\n", (10370, 10436), True, 'import higra as hg\n'), ((10450, 10471), 'higra.UndirectedGraph', 'hg.UndirectedGraph', (['(7)'], {}), '(7)\n', (10468, 10471), True, 'import higra as hg\n'), ((10705, 10796), 'higra.make_graph_from_points', 'hg.make_graph_from_points', (['X'], {'graph_type': '"""knn+mst"""', 'symmetrization': '"""min"""', 'n_neighbors': '(2)'}), "(X, graph_type='knn+mst', symmetrization='min',\n n_neighbors=2)\n", (10730, 10796), True, 'import higra as hg\n'), ((10810, 10831), 'higra.UndirectedGraph', 'hg.UndirectedGraph', (['(7)'], {}), '(7)\n', (10828, 10831), True, 'import higra as hg\n'), ((11091, 11159), 'numpy.asarray', 'np.asarray', (['((0, 0), (0, 1), (1, 0), (0, 3), (0, 4), (1, 3), (2, 3))'], {}), '(((0, 0), (0, 1), (1, 0), (0, 3), (0, 4), (1, 3), (2, 3)))\n', (11101, 11159), True, 'import numpy as np\n'), ((11176, 11186), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (11183, 11186), True, 'import numpy as np\n'), ((11203, 11295), 'higra.make_graph_from_points', 'hg.make_graph_from_points', (['X'], {'graph_type': '"""delaunay"""', 'symmetrization': '"""max"""', 'n_neighbors': '(2)'}), "(X, graph_type='delaunay', symmetrization='max',\n n_neighbors=2)\n", (11228, 11295), True, 'import higra as hg\n'), ((11309, 11330), 'higra.UndirectedGraph', 'hg.UndirectedGraph', (['(7)'], {}), '(7)\n', (11327, 11330), True, 'import higra as hg\n'), ((11657, 11689), 'higra.get_4_adjacency_graph', 'hg.get_4_adjacency_graph', (['(2, 2)'], {}), '((2, 2))\n', (11681, 11689), True, 'import higra as hg\n'), ((11713, 11731), 'numpy.asarray', 'np.asarray', (['(3, 0)'], {}), '((3, 0))\n', (11723, 11731), True, 'import numpy as np\n'), ((11751, 11798), 'higra.subgraph', 'hg.subgraph', (['graph', 'edge_indices'], {'spanning': '(True)'}), '(graph, edge_indices, spanning=True)\n', (11762, 11798), True, 'import higra as hg\n'), ((12145, 12166), 'higra.UndirectedGraph', 'hg.UndirectedGraph', (['(6)'], {}), '(6)\n', (12163, 12166), True, 'import higra as hg\n'), ((12245, 12266), 'numpy.asarray', 'np.asarray', (['(4, 0, 3)'], {}), '((4, 0, 3))\n', (12255, 12266), True, 'import numpy as np\n'), ((12298, 12370), 'higra.subgraph', 'hg.subgraph', (['graph', 'edge_indices'], {'spanning': '(False)', 'return_vertex_map': '(True)'}), '(graph, edge_indices, spanning=False, return_vertex_map=True)\n', (12309, 12370), True, 'import higra as hg\n'), ((12791, 12823), 'higra.get_8_adjacency_graph', 'hg.get_8_adjacency_graph', (['(2, 2)'], {}), '((2, 2))\n', (12815, 12823), True, 'import higra as hg\n'), ((12845, 12865), 'higra.line_graph', 'hg.line_graph', (['graph'], {}), '(graph)\n', (12858, 12865), True, 'import higra as hg\n'), ((13390, 13426), 'higra.Tree', 'hg.Tree', (['(5, 5, 6, 6, 6, 7, 8, 8, 8)'], {}), '((5, 5, 6, 6, 6, 7, 8, 8, 8))\n', (13397, 13426), True, 'import higra as hg\n'), ((13448, 13468), 'higra.line_graph', 'hg.line_graph', (['graph'], {}), '(graph)\n', (13461, 13468), True, 'import higra as hg\n'), ((1440, 1478), 'higra.is_in_bijection', 'hg.is_in_bijection', (['labels', 'ref_labels'], {}), '(labels, ref_labels)\n', (1458, 1478), True, 'import higra as hg\n'), ((1833, 1883), 'higra.is_in_bijection', 'hg.is_in_bijection', (['edge_weights', 'ref_edge_weights'], {}), '(edge_weights, ref_edge_weights)\n', (1851, 1883), True, 'import higra as hg\n'), ((2644, 2674), 'numpy.all', 'np.all', (['(ref_adj_mat == adj_mat)'], {}), '(ref_adj_mat == adj_mat)\n', (2650, 2674), True, 'import numpy as np\n'), ((2754, 2790), 'numpy.asarray', 'np.asarray', (['(5, 5, 6, 6, 6, 7, 7, 7)'], {}), '((5, 5, 6, 6, 6, 7, 7, 7))\n', (2764, 2790), True, 'import numpy as np\n'), ((3429, 3459), 'numpy.all', 'np.all', (['(ref_adj_mat == adj_mat)'], {}), '(ref_adj_mat == adj_mat)\n', (3435, 3459), True, 'import numpy as np\n'), ((3485, 3505), 'scipy.sparse.issparse', 'sp.issparse', (['adj_mat'], {}), '(adj_mat)\n', (3496, 3505), True, 'import scipy.sparse as sp\n'), ((3528, 3564), 'numpy.asarray', 'np.asarray', (['(5, 5, 6, 6, 6, 7, 7, 7)'], {}), '((5, 5, 6, 6, 6, 7, 7, 7))\n', (3538, 3564), True, 'import numpy as np\n'), ((4132, 4162), 'numpy.all', 'np.all', (['(ref_adj_mat == adj_mat)'], {}), '(ref_adj_mat == adj_mat)\n', (4138, 4162), True, 'import numpy as np\n'), ((4188, 4208), 'scipy.sparse.issparse', 'sp.issparse', (['adj_mat'], {}), '(adj_mat)\n', (4199, 4208), True, 'import scipy.sparse as sp\n'), ((4266, 4339), 'higra.undirected_graph_2_adjacency_matrix', 'hg.undirected_graph_2_adjacency_matrix', (['t'], {'non_edge_value': '(-1)', 'sparse': '(True)'}), '(t, non_edge_value=-1, sparse=True)\n', (4304, 4339), True, 'import higra as hg\n'), ((4792, 4832), 'numpy.all', 'np.all', (['(edge_weights == ref_edge_weights)'], {}), '(edge_weights == ref_edge_weights)\n', (4798, 4832), True, 'import numpy as np\n'), ((5865, 5905), 'numpy.all', 'np.all', (['(edge_weights == ref_edge_weights)'], {}), '(edge_weights == ref_edge_weights)\n', (5871, 5905), True, 'import numpy as np\n'), ((7062, 7132), 'higra.adjacency_matrix_2_undirected_graph', 'hg.adjacency_matrix_2_undirected_graph', (['ref_adj_mat'], {'non_edge_value': '(-1)'}), '(ref_adj_mat, non_edge_value=-1)\n', (7100, 7132), True, 'import higra as hg\n'), ((7495, 7553), 'higra.is_in_bijection', 'hg.is_in_bijection', (['subd_ultrametric', 'ref_subd_ultrametric'], {}), '(subd_ultrametric, ref_subd_ultrametric)\n', (7513, 7553), True, 'import higra as hg\n'), ((8074, 8104), 'numpy.all', 'np.all', (['(sources == ref_sources)'], {}), '(sources == ref_sources)\n', (8080, 8104), True, 'import numpy as np\n'), ((8130, 8160), 'numpy.all', 'np.all', (['(targets == ref_targets)'], {}), '(targets == ref_targets)\n', (8136, 8160), True, 'import numpy as np\n'), ((8187, 8226), 'numpy.all', 'np.all', (['(mst_edge_map == (1, 0, 3, 4, 2))'], {}), '(mst_edge_map == (1, 0, 3, 4, 2))\n', (8193, 8226), True, 'import numpy as np\n'), ((8793, 8823), 'numpy.all', 'np.all', (['(sources == ref_sources)'], {}), '(sources == ref_sources)\n', (8799, 8823), True, 'import numpy as np\n'), ((8849, 8879), 'numpy.all', 'np.all', (['(targets == ref_targets)'], {}), '(targets == ref_targets)\n', (8855, 8879), True, 'import numpy as np\n'), ((8906, 8942), 'numpy.all', 'np.all', (['(mst_edge_map == (0, 1, 3, 4))'], {}), '(mst_edge_map == (0, 1, 3, 4))\n', (8912, 8942), True, 'import numpy as np\n'), ((11461, 11471), 'numpy.sqrt', 'np.sqrt', (['(5)'], {}), '(5)\n', (11468, 11471), True, 'import numpy as np\n'), ((11479, 11490), 'numpy.sqrt', 'np.sqrt', (['(10)'], {}), '(10)\n', (11486, 11490), True, 'import numpy as np\n'), ((11505, 11515), 'numpy.sqrt', 'np.sqrt', (['(5)'], {}), '(5)\n', (11512, 11515), True, 'import numpy as np\n'), ((12012, 12037), 'numpy.all', 'np.all', (['(sources == (2, 0))'], {}), '(sources == (2, 0))\n', (12018, 12037), True, 'import numpy as np\n'), ((12063, 12088), 'numpy.all', 'np.all', (['(targets == (3, 1))'], {}), '(targets == (3, 1))\n', (12069, 12088), True, 'import numpy as np\n'), ((12191, 12203), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (12200, 12203), True, 'import numpy as np\n'), ((12205, 12220), 'numpy.arange', 'np.arange', (['(1)', '(6)'], {}), '(1, 6)\n', (12214, 12220), True, 'import numpy as np\n'), ((12565, 12602), 'numpy.all', 'np.all', (['(vertex_map == (0, 1, 3, 4, 5))'], {}), '(vertex_map == (0, 1, 3, 4, 5))\n', (12571, 12602), True, 'import numpy as np\n'), ((12628, 12668), 'numpy.all', 'np.all', (['(vertex_map[sources] == (4, 0, 3))'], {}), '(vertex_map[sources] == (4, 0, 3))\n', (12634, 12668), True, 'import numpy as np\n'), ((12694, 12734), 'numpy.all', 'np.all', (['(vertex_map[targets] == (5, 1, 4))'], {}), '(vertex_map[targets] == (5, 1, 4))\n', (12700, 12734), True, 'import numpy as np\n')] |
import cv2
import numpy as np
from argparse import ArgumentParser
from time import perf_counter
from cs229.display import open_window, show_image
from cs229.files import read_video, write_video
from cs229.image import img_to_mask, bound_point
from cs229.contour import find_contours
from cs229.patch import crop_to_contour
from cs229.train_is_fly import IsFlyPredictor
from cs229.train_id import IdPredictor
from cs229.train_orientation import PosePredictor
from cs229.load_data_wing import male_fly_patch
from cs229.train_wing import WingPredictor
from cs229.timing import Profiler
def arrow_from_point(img, point, length, angle, color):
ax = point[0] + length * np.cos(angle)
ay = point[1] - length * np.sin(angle)
tip = bound_point((ax, ay), img)
cv2.arrowedLine(img, point, tip, color, 5, tipLength=0.3)
def main():
# parse command-line arguments
parser = ArgumentParser()
parser.add_argument('--no_display', action='store_true')
parser.add_argument('--write_video', action='store_true')
parser.add_argument('-i', '--input', type=str, default='test4.mp4')
parser.add_argument('-o', '--output', type=str, default='output.avi')
args = parser.parse_args()
# prepare video
cap, props = read_video(args.input)
_, img = cap.read()
img = img[:, :, 0]
mask = img_to_mask(img)
if args.write_video:
video_writer = write_video(args.output, props)
# load predictors
is_fly_predictor = IsFlyPredictor()
id_predictor = IdPredictor()
pose_predictor = {type: PosePredictor(type) for type in ['male', 'female']}
wing_predictor = WingPredictor()
# display-specific actions
if not args.no_display:
open_window()
colors = {'female': (255, 0, 0), 'male': (0, 0, 255), 'both': (255, 0, 255), 'neither': None}
frames = 0
tick = perf_counter()
prof = Profiler()
while True:
frame_start = perf_counter()
# read frame
prof.tick('I/O')
ok, img = cap.read()
prof.tock('I/O')
if not ok:
break
if not args.no_display:
out = img.copy()
img = img[:, :, 0]
frames += 1
# extract contours
prof.tick('Find fly contours')
contours = find_contours(img, mask=mask, type='core')
# draw contours with a color associated with the class
contours_by_label = {'neither': [], 'one': [], 'both': []}
# sort contours into bins: zero flies, one fly and two flies
for contour in contours:
label = is_fly_predictor.predict(contour)
contours_by_label[label].append(contour)
prof.tock('Find fly contours')
results = {}
if len(contours_by_label['one'])==2 and len(contours_by_label['both'])==0:
prof.tick('ID as male/female')
contour_1 = contours_by_label['one'][0]
contour_2 = contours_by_label['one'][1]
patch_1 = crop_to_contour(img, contour_1)
patch_2 = crop_to_contour(img, contour_2)
label = id_predictor.predict(contour_1, contour_2, patch_1, patch_2)
if label == 'mf':
results['male'] = dict(contour=contour_1, patch=patch_1)
results['female'] = dict(contour=contour_2, patch=patch_2)
elif label == 'fm':
results['female'] = dict(contour=contour_1, patch=patch_1)
results['male'] = dict(contour=contour_2, patch=patch_2)
prof.tock('ID as male/female')
prof.tick('Determine orientation')
for type in ['male', 'female']:
result = results[type]
(cx, cy), angle = pose_predictor[type].predict(result['patch'])
result.update(dict(cx=cx, cy=cy, angle=angle))
prof.tock('Determine orientation')
# predict wing angle
prof.tick('Determine wing angles')
patch_m = male_fly_patch(img, mask, (results['male']['cx'], results['male']['cy']),
results['male']['angle'])
if patch_m is not None:
wing_angle_right, wing_angle_left = wing_predictor.predict(patch_m)
if wing_angle_right is not None:
results['male']['wing_angle_right'] = wing_angle_right
if wing_angle_left is not None:
results['male']['wing_angle_left'] = wing_angle_left
prof.tock('Determine wing angles')
elif len(contours_by_label['one'])==0 and len(contours_by_label['both'])==1:
results['both'] = dict(contour=contours_by_label['both'][0])
results['both']['patch'] = crop_to_contour(img, results['both']['contour'])
results['both']['cx'], results['both']['cy'] = results['both']['patch'].estimate_center(absolute=True)
else:
print('Unexpected case.')
continue
# when profiling, skip the drawing steps
if args.no_display:
continue
# illustrate the results
for label, result in results.items():
# draw outline
cv2.drawContours(out, [result['contour']], -1, colors[label], 3)
if label not in ['male', 'female']:
continue
# draw center
center = bound_point((result['cx'], result['cy']), out)
cv2.circle(out, center, 5, colors[label], -1)
# draw arrow in direction of orientation
MA, ma = result['patch'].estimate_axes()
arrow_from_point(out, center, 0.3*MA, result['angle'], colors[label])
if label == 'male':
if 'wing_angle_right' in result:
arrow_angle = result['angle'] + result['wing_angle_right'] - np.pi
arrow_from_point(out, center, 0.3*MA, arrow_angle, (255, 255, 0))
if 'wing_angle_left' in result:
arrow_angle = result['angle'] - result['wing_angle_left'] - np.pi
arrow_from_point(out, center, 0.3*MA, arrow_angle, (255, 255, 0))
# display image
show_image(out, downsamp=2)
if args.write_video:
video_writer.write(out)
# figure out how much extra time the GUI should wait before proceeding
t_frame_ms = 1e3*(perf_counter() - frame_start)
t_extra_ms = int(round(props.t_ms - t_frame_ms))
# handle GUI tasks
key = cv2.waitKey(max(t_extra_ms, 1))
# process input keys
if key == ord('q'):
break
tock = perf_counter()
prof.stop()
if args.write_video:
video_writer.release()
print('Total frames: {}'.format(frames))
print('Elapsed time: {}'.format(tock-tick))
print('Throughput: {:0.3f}'.format(frames/(tock-tick)))
if __name__ == '__main__':
main() | [
"cs229.image.img_to_mask",
"cs229.files.write_video",
"argparse.ArgumentParser",
"cs229.display.show_image",
"numpy.sin",
"cs229.files.read_video",
"cs229.load_data_wing.male_fly_patch",
"cv2.drawContours",
"cs229.image.bound_point",
"cs229.train_wing.WingPredictor",
"cv2.circle",
"cs229.train... | [((737, 763), 'cs229.image.bound_point', 'bound_point', (['(ax, ay)', 'img'], {}), '((ax, ay), img)\n', (748, 763), False, 'from cs229.image import img_to_mask, bound_point\n'), ((768, 825), 'cv2.arrowedLine', 'cv2.arrowedLine', (['img', 'point', 'tip', 'color', '(5)'], {'tipLength': '(0.3)'}), '(img, point, tip, color, 5, tipLength=0.3)\n', (783, 825), False, 'import cv2\n'), ((887, 903), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (901, 903), False, 'from argparse import ArgumentParser\n'), ((1242, 1264), 'cs229.files.read_video', 'read_video', (['args.input'], {}), '(args.input)\n', (1252, 1264), False, 'from cs229.files import read_video, write_video\n'), ((1323, 1339), 'cs229.image.img_to_mask', 'img_to_mask', (['img'], {}), '(img)\n', (1334, 1339), False, 'from cs229.image import img_to_mask, bound_point\n'), ((1467, 1483), 'cs229.train_is_fly.IsFlyPredictor', 'IsFlyPredictor', ([], {}), '()\n', (1481, 1483), False, 'from cs229.train_is_fly import IsFlyPredictor\n'), ((1503, 1516), 'cs229.train_id.IdPredictor', 'IdPredictor', ([], {}), '()\n', (1514, 1516), False, 'from cs229.train_id import IdPredictor\n'), ((1618, 1633), 'cs229.train_wing.WingPredictor', 'WingPredictor', ([], {}), '()\n', (1631, 1633), False, 'from cs229.train_wing import WingPredictor\n'), ((1845, 1859), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (1857, 1859), False, 'from time import perf_counter\n'), ((1871, 1881), 'cs229.timing.Profiler', 'Profiler', ([], {}), '()\n', (1879, 1881), False, 'from cs229.timing import Profiler\n'), ((6610, 6624), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (6622, 6624), False, 'from time import perf_counter\n'), ((1389, 1420), 'cs229.files.write_video', 'write_video', (['args.output', 'props'], {}), '(args.output, props)\n', (1400, 1420), False, 'from cs229.files import read_video, write_video\n'), ((1545, 1564), 'cs229.train_orientation.PosePredictor', 'PosePredictor', (['type'], {}), '(type)\n', (1558, 1564), False, 'from cs229.train_orientation import PosePredictor\n'), ((1702, 1715), 'cs229.display.open_window', 'open_window', ([], {}), '()\n', (1713, 1715), False, 'from cs229.display import open_window, show_image\n'), ((1921, 1935), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (1933, 1935), False, 'from time import perf_counter\n'), ((2272, 2314), 'cs229.contour.find_contours', 'find_contours', (['img'], {'mask': 'mask', 'type': '"""core"""'}), "(img, mask=mask, type='core')\n", (2285, 2314), False, 'from cs229.contour import find_contours\n'), ((6161, 6188), 'cs229.display.show_image', 'show_image', (['out'], {'downsamp': '(2)'}), '(out, downsamp=2)\n', (6171, 6188), False, 'from cs229.display import open_window, show_image\n'), ((670, 683), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (676, 683), True, 'import numpy as np\n'), ((713, 726), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (719, 726), True, 'import numpy as np\n'), ((2972, 3003), 'cs229.patch.crop_to_contour', 'crop_to_contour', (['img', 'contour_1'], {}), '(img, contour_1)\n', (2987, 3003), False, 'from cs229.patch import crop_to_contour\n'), ((3026, 3057), 'cs229.patch.crop_to_contour', 'crop_to_contour', (['img', 'contour_2'], {}), '(img, contour_2)\n', (3041, 3057), False, 'from cs229.patch import crop_to_contour\n'), ((3969, 4072), 'cs229.load_data_wing.male_fly_patch', 'male_fly_patch', (['img', 'mask', "(results['male']['cx'], results['male']['cy'])", "results['male']['angle']"], {}), "(img, mask, (results['male']['cx'], results['male']['cy']),\n results['male']['angle'])\n", (3983, 4072), False, 'from cs229.load_data_wing import male_fly_patch\n'), ((5172, 5236), 'cv2.drawContours', 'cv2.drawContours', (['out', "[result['contour']]", '(-1)', 'colors[label]', '(3)'], {}), "(out, [result['contour']], -1, colors[label], 3)\n", (5188, 5236), False, 'import cv2\n'), ((5359, 5405), 'cs229.image.bound_point', 'bound_point', (["(result['cx'], result['cy'])", 'out'], {}), "((result['cx'], result['cy']), out)\n", (5370, 5405), False, 'from cs229.image import img_to_mask, bound_point\n'), ((5418, 5463), 'cv2.circle', 'cv2.circle', (['out', 'center', '(5)', 'colors[label]', '(-1)'], {}), '(out, center, 5, colors[label], -1)\n', (5428, 5463), False, 'import cv2\n'), ((4717, 4765), 'cs229.patch.crop_to_contour', 'crop_to_contour', (['img', "results['both']['contour']"], {}), "(img, results['both']['contour'])\n", (4732, 4765), False, 'from cs229.patch import crop_to_contour\n'), ((6361, 6375), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (6373, 6375), False, 'from time import perf_counter\n')] |
import base64
import cloudpickle
from copy import deepcopy
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple
import numpy as np
from triad import assert_or_throw, to_uuid
from triad.utils.convert import get_full_type_path
from tune._utils import product
from tune._utils.math import (
normal_to_continuous,
normal_to_discrete,
normal_to_integers,
uniform_to_continuous,
uniform_to_discrete,
uniform_to_integers,
)
class TuningParameterExpression:
"""Base class of all tuning parameter expressions"""
pass
class Grid(TuningParameterExpression):
"""Grid search, every value will be used.
Please read |SpaceTutorial|.
:param args: values for the grid search
"""
def __init__(self, *args: Any):
assert_or_throw(
len(args) > 0, ValueError("Grid must take at least one element")
)
self._values = list(args)
def __iter__(self) -> Iterable[Any]:
yield from self._values
def __eq__(self, other: Any):
"""Compare two ``Grid``"""
return isinstance(other, type(self)) and self._values == other._values
def __uuid__(self) -> str:
return to_uuid("grid", self._values)
def __repr__(self) -> str:
return "Grid(" + repr(self._values)[1:-1] + ")"
class StochasticExpression(TuningParameterExpression):
"""Stochastic search base class.
Please read |SpaceTutorial|.
"""
@property
def jsondict(self) -> Dict[str, Any]: # pragma: no cover
"""Dict representation of the expression that is json serializable"""
raise NotImplementedError
def __eq__(self, other: Any):
"""Compare two ``StochasticExpression``"""
return isinstance(other, type(self)) and self.jsondict == other.jsondict
def generate(self, seed: Any = None) -> Any: # pragma: no cover
"""Return a randomly chosen value.
:param seed: if set, it will be used to call :func:`~np:numpy.random.seed`
, defaults to None
"""
raise NotImplementedError
def generate_many(self, n: int, seed: Any = None) -> List[Any]:
"""Generate ``n`` randomly chosen values
:param n: number of random values to generate
:param seed: random seed, defaults to None
:return: a list of values
"""
if seed is not None:
np.random.seed(seed)
return [self.generate() for _ in range(n)]
def __uuid__(self) -> str:
"""Unique id for the expression"""
return to_uuid(self.jsondict)
class Choice(StochasticExpression):
"""A random choice of values.
Please read |SpaceTutorial|.
:param args: values to choose from
"""
def __init__(self, *args: Any):
assert_or_throw(
len(args) > 0, ValueError("Choice must take at least one element")
)
self._values = list(args)
@property
def values(self) -> List[Any]:
"""values to choose from"""
return self._values
@property
def jsondict(self) -> Dict[str, Any]:
return dict(_expr_="choice", values=self.values)
def generate(self, seed: Any = None) -> Any:
if seed is not None:
np.random.seed(seed)
value = np.random.choice(self._values)
if isinstance(value, np.generic):
return value.item()
return value # pragma: no cover
def __repr__(self) -> str:
return "Choice(" + repr(self._values)[1:-1] + ")"
class TransitionChoice(Choice):
"""An ordered random choice of values.
Please read |SpaceTutorial|.
:param args: values to choose from
"""
@property
def jsondict(self) -> Dict[str, Any]:
return dict(_expr_="tchoice", values=self.values)
def __repr__(self) -> str:
return "TransitionChoice(" + repr(self._values)[1:-1] + ")"
class RandBase(StochasticExpression):
"""Base class for continuous random variables.
Please read |SpaceTutorial|.
:param q: step between adjacent values, if set, the value will be rounded
using ``q``, defaults to None
:param log: whether to do uniform sampling in log space, defaults to False.
If True, lower values get higher chance to be sampled
"""
def __init__(
self,
q: Optional[float] = None,
log: bool = False,
):
if q is not None:
assert_or_throw(q > 0, f"{q} <= 0")
self.q = q
self.log = log
class Rand(RandBase):
"""Continuous uniform random variables.
Please read |SpaceTutorial|.
:param low: range low bound (inclusive)
:param high: range high bound (exclusive)
:param q: step between adjacent values, if set, the value will be rounded
using ``q``, defaults to None
:param log: whether to do uniform sampling in log space, defaults to False.
If True, ``low`` must be positive and lower values get higher chance to be sampled
"""
def __init__(
self,
low: float,
high: float,
q: Optional[float] = None,
log: bool = False,
include_high: bool = True,
):
if include_high:
assert_or_throw(high >= low, ValueError(f"{high} < {low}"))
else:
assert_or_throw(high > low, ValueError(f"{high} <= {low}"))
assert_or_throw(q is None or q > 0, ValueError(q))
if log:
assert_or_throw(
low > 0.0,
ValueError(f"for log sampling, low ({low}) must be greater than 0.0"),
)
self.low = low
self.high = high
self.include_high = include_high
super().__init__(q, log)
@property
def jsondict(self) -> Dict[str, Any]:
res = dict(
_expr_="rand",
low=self.low,
high=self.high,
q=self.q,
log=self.log,
include_high=self.include_high,
)
return res
def generate(self, seed: Any = None) -> float:
if seed is not None:
np.random.seed(seed)
value = np.random.uniform()
if self.q is None:
return float(
uniform_to_continuous(value, self.low, self.high, log=self.log)
)
else:
return float(
uniform_to_discrete(
value,
self.low,
self.high,
q=self.q,
log=self.log,
include_high=self.include_high,
)
)
def __repr__(self) -> str:
return (
f"Rand(low={self.low}, high={self.high}, q={self.q},"
f" log={self.log}, include_high={self.include_high})"
)
class RandInt(RandBase):
"""Uniform distributed random integer values.
Please read |SpaceTutorial|.
:param low: range low bound (inclusive)
:param high: range high bound (exclusive)
:param log: whether to do uniform sampling in log space, defaults to False.
If True, ``low`` must be ``>=1`` and lower values get higher chance to be sampled
"""
def __init__(
self,
low: int,
high: int,
q: int = 1,
log: bool = False,
include_high: bool = True,
):
if include_high:
assert_or_throw(high >= low, ValueError(f"{high} < {low}"))
else:
assert_or_throw(high > low, ValueError(f"{high} <= {low}"))
assert_or_throw(q > 0, ValueError(q))
if log:
assert_or_throw(
low >= 1.0,
ValueError(
f"for log sampling, low ({low}) must be greater or equal to 1.0"
),
)
self.low = low
self.high = high
self.include_high = include_high
super().__init__(q, log)
@property
def jsondict(self) -> Dict[str, Any]:
return dict(
_expr_="randint",
low=self.low,
high=self.high,
q=self.q,
log=self.log,
include_high=self.include_high,
)
def generate(self, seed: Any = None) -> float:
if seed is not None:
np.random.seed(seed)
value = np.random.uniform()
return int(
uniform_to_integers( # type: ignore
value,
self.low,
self.high,
q=int(self.q), # type: ignore
log=self.log,
include_high=self.include_high,
)
)
def __repr__(self) -> str:
return (
f"RandInt(low={self.low}, high={self.high}, q={self.q},"
f" log={self.log}, include_high={self.include_high})"
)
class NormalRand(RandBase):
"""Continuous normally distributed random variables.
Please read |SpaceTutorial|.
:param mu: mean of the normal distribution
:param sigma: standard deviation of the normal distribution
:param q: step between adjacent values, if set, the value will be rounded
using ``q``, defaults to None
"""
def __init__(
self,
mu: float,
sigma: float,
q: Optional[float] = None,
):
assert_or_throw(sigma > 0, ValueError(sigma))
assert_or_throw(q is None or q > 0, ValueError(q))
self.mu = mu
self.sigma = sigma
super().__init__(q)
@property
def jsondict(self) -> Dict[str, Any]:
res = dict(
_expr_="randn",
mu=self.mu,
sigma=self.sigma,
)
if self.q is not None:
res["q"] = self.q
return res
def generate(self, seed: Any = None) -> float:
if seed is not None:
np.random.seed(seed)
value = np.random.normal()
if self.q is not None:
return normal_to_discrete(value, mean=self.mu, sigma=self.sigma, q=self.q)
else:
return normal_to_continuous(value, mean=self.mu, sigma=self.sigma)
def __repr__(self) -> str:
return f"NormalRand(mu={self.mu}, sigma={self.sigma}, q={self.q})"
class NormalRandInt(RandBase):
"""Normally distributed random integer values.
Please read |SpaceTutorial|.
:param mu: mean of the normal distribution
:param sigma: standard deviation of the normal distribution
"""
def __init__(
self,
mu: int,
sigma: float,
q: int = 1,
):
assert_or_throw(sigma > 0, ValueError(sigma))
assert_or_throw(q > 0, ValueError(q))
self.mu = mu
self.sigma = sigma
self.q = q
super().__init__(q)
@property
def jsondict(self) -> Dict[str, Any]:
return dict(
_expr_="randnint",
mu=self.mu,
sigma=self.sigma,
q=self.q,
)
def generate(self, seed: Any = None) -> int:
if seed is not None:
np.random.seed(seed)
value = np.random.normal()
return int(
normal_to_integers( # type: ignore
value,
mean=self.mu,
sigma=self.sigma,
q=self.q, # type: ignore
)
)
def __repr__(self) -> str:
return f"NormalRandInt(mu={self.mu}, sigma={self.sigma}, q={self.q})"
class FuncParam:
"""Function paramter. It defers the function call after all its parameters
are no longer tuning parameters
:param func: function to generate parameter value
:param args: list arguments
:param kwargs: key-value arguments
.. code-block:: python
s = Space(a=1, b=FuncParam(lambda x, y: x + y, x=Grid(0, 1), y=Grid(3, 4)))
assert [
dict(a=1, b=3),
dict(a=1, b=4),
dict(a=1, b=4),
dict(a=1, b=5),
] == list(s)
"""
def __init__(self, func: Callable, *args: Any, **kwargs: Any):
self._func = func
self._args = list(args)
self._kwargs = dict(kwargs)
def __uuid__(self) -> str:
"""Unique id for this expression"""
return to_uuid(get_full_type_path(self._func), self._args, self._kwargs)
def __call__(self) -> Any:
"""Call the function to generate value"""
return self._func(*self._args, **self._kwargs)
def __setitem__(self, key: Any, item: Any) -> None:
"""Update argument value
:param key: key to set, if int, then set in ``args`` else set in ``kwargs``
:param item: value to use
"""
if isinstance(key, int):
self._args[key] = item
else:
self._kwargs[key] = item
def __getitem__(self, key: Any) -> Any:
"""Get argument value
:param key: key to get, if int, then get in ``args`` else get in ``kwargs``
:return: the correspondent value
"""
if isinstance(key, int):
return self._args[key]
else:
return self._kwargs[key]
def __eq__(self, other: Any) -> bool:
"""Whether the expression equals to the other one
:param other: another ``FuncParam``
:return: whether they are equal
"""
return (
self._func is other._func
and self._args == other._args
and self._kwargs == other._kwargs
)
def __repr__(self) -> str:
a: List[str] = [self._func.__name__]
a += [repr(x) for x in self._args]
a += [f"{k}={repr(v)}" for k, v in self._kwargs.items()]
return "FuncParam(" + ", ".join(a) + ")"
class _MapUnit:
def __init__(self, expr: TuningParameterExpression):
self.expr = expr
self.positions: List[List[Any]] = []
def __eq__(self, other: Any) -> bool:
return self.expr == other.expr and self.positions == other.positions
def __uuid__(self) -> str:
return to_uuid(self.expr, self.positions)
def copy(self) -> "_MapUnit":
res = _MapUnit(self.expr)
res.positions = list(self.positions)
return res
class TuningParametersTemplate:
"""Parameter template to extract tuning parameter expressions from
nested data structure
:param raw: the dictionary of input parameters.
.. note::
Please use :func:`~tune.concepts.space.parameters.to_template`
to initialize this class.
.. code-block:: python
# common cases
to_template(dict(a=1, b=1))
to_template(dict(a=Rand(0, 1), b=1))
# expressions may nest in dicts or arrays
template = to_template(
dict(a=dict(x1=Rand(0, 1), x2=Rand(3,4)), b=[Grid("a", "b")]))
assert [Rand(0, 1), Rand(3, 4), Grid("a", "b")] == template.params
assert dict(
p0=Rand(0, 1), p1=Rand(3, 4), p2=Grid("a", "b")
) == template.params_dict
assert dict(a=1, x2=3), b=["a"]) == template.fill([1, 3, "a"])
assert dict(a=1, x2=3), b=["a"]) == template.fill_dict(
dict(p2="a", p1=3, p0=1)
)
"""
def __init__(self, raw: Dict[str, Any]):
self._units: List[_MapUnit] = []
self._has_grid = False
self._has_stochastic = False
self._func_positions: List[List[Any]] = []
self._template: Dict[str, Any] = self._copy(raw, [], {})
self._uuid = ""
def __eq__(self, other: Any) -> bool:
"""Check if the other object represents the same template
:param other: an object convertible to ``TuningParametersTemplate``
by :func:`~tune.concepts.space.parameters.to_template`
:return: whether they are the same
"""
o = to_template(other)
return (
self._has_grid == o._has_grid
and self._has_stochastic == o._has_stochastic
and self._template == o._template
and self._units == o._units
and self._func_positions == o._func_positions
)
def __uuid__(self):
"""The unique id representing this template"""
if self._uuid == "":
self._uuid = to_uuid(self._units, self._template)
return self._uuid
def __repr__(self) -> str:
return repr(self.fill([x.expr for x in self._units]))
@property
def template(self) -> Dict[str, Any]:
"""The template dictionary, all tuning
expressions will be replaced by ``None``
"""
return self._template
@property
def simple_value(self) -> Dict[str, Any]:
"""If the template contains no tuning expression, it's simple
and it will return parameters dictionary, otherwise, ``ValueError``
will be raised
"""
assert_or_throw(self.empty, ValueError("template contains tuning expressions"))
if len(self._func_positions) == 0:
return self._template
return self._fill_funcs(deepcopy(self._template))
@property
def empty(self) -> bool:
"""Whether the template contains any tuning expression"""
return len(self._units) == 0
@property
def has_grid(self) -> bool:
"""Whether the template contains grid expressions"""
return self._has_grid
@property
def has_stochastic(self) -> bool:
"""Whether the template contains stochastic expressions"""
return self._has_stochastic
@property
def params(self) -> List[TuningParameterExpression]:
"""Get all tuning parameter expressions in depth-first order"""
return [x.expr for x in self._units]
@property
def params_dict(self) -> Dict[str, TuningParameterExpression]:
"""Get all tuning parameter expressions in depth-first order,
with correspondent made-up new keys p0, p1, p2, ...
"""
return {f"p{i}": x for i, x in enumerate(self.params)}
def fill(self, params: List[Any]) -> Dict[str, Any]:
"""Fill the original data structure with values
:param params: the list of values to be filled into the original
data structure, in depth-first order
:param copy: whether to return a deeply copied paramters,
defaults to False
:return: the original data structure filled with values
"""
assert_or_throw(
len(self._units) == len(params),
ValueError("params count does not match template requirment"),
)
template = deepcopy(self._template)
i = 0
for u in self._units:
for path in u.positions:
self._fill_path(template, path, params[i])
i += 1
return self._fill_funcs(template)
def fill_dict(self, params: Dict[str, Any]) -> Dict[str, Any]:
"""Fill the original data structure with dictionary of values
:param params: the dictionary of values to be filled into the original
data structure, keys must be p0, p1, p2, ...
:param copy: whether to return a deeply copied paramters,
defaults to False
:return: the original data structure filled with values
"""
temp = [params[f"p{i}"] for i in range(len(params))]
return self.fill(temp)
def encode(self) -> str:
"""Convert the template to a base64 string"""
return base64.b64encode(cloudpickle.dumps(self)).decode("ascii")
@staticmethod
def decode(data: str) -> "TuningParametersTemplate":
"""Retrieve the template from a base64 string"""
return cloudpickle.loads(base64.b64decode(data.encode("ascii"))) # type: ignore
def product_grid(self) -> Iterable["TuningParametersTemplate"]:
"""cross product all grid parameters
:yield: new templates with the grid paramters filled
.. code-block:: python
assert [dict(a=1,b=Rand(0,1)), dict(a=2,b=Rand(0,1))] == \
list(to_template(dict(a=Grid(1,2),b=Rand(0,1))).product_grid())
"""
if not self.has_grid:
yield self
else:
gu: List[Tuple[int, List[Any]]] = [
(i, list(u.expr)) # type: ignore
for i, u in enumerate(self._units)
if isinstance(u.expr, Grid)
]
yield from self._partial_fill(
[x[0] for x in gu],
product([data for _, data in gu], safe=True, remove_empty=True),
)
def sample(self, n: int, seed: Any = None) -> Iterable["TuningParametersTemplate"]:
"""sample all stochastic parameters
:param n: number of samples, must be a positive integer
:param seed: random seed defaulting to None.
It will take effect if it is not None.
:yield: new templates with the grid paramters filled
.. code-block:: python
assert [dict(a=1.1,b=Grid(0,1)), dict(a=1.5,b=Grid(0,1))] == \
list(to_template(dict(a=Rand(1,2),b=Grid(0,1))).sample(2,0))
"""
assert_or_throw(n > 0, ValueError("sample count must be positive"))
if not self.has_stochastic:
yield self
else:
if seed is not None:
np.random.seed(seed)
gu: List[Tuple[int, List[Any]]] = [
(i, u.expr.generate_many(n))
for i, u in enumerate(self._units)
if isinstance(u.expr, StochasticExpression)
]
yield from self._partial_fill(
[x[0] for x in gu], zip(*[data for _, data in gu])
)
def concat(self, other: "TuningParametersTemplate") -> "TuningParametersTemplate":
"""Concatenate with another template and generate a new template.
.. note::
The other template must not have any key existed in this template, otherwise
``ValueError`` will be raised
:return: the merged template
"""
res = TuningParametersTemplate({})
res._units = [x.copy() for x in self._units]
res._has_grid = self._has_grid | other._has_grid
res._has_stochastic = self._has_stochastic | other._has_stochastic
res._template = dict(self._template)
res._func_positions = self._func_positions + other._func_positions
for k, v in other._template.items():
assert_or_throw(
k not in res._template,
ValueError(f"{k} already exists in the original template"),
)
res._template[k] = v
if not other.empty:
temp_map = {id(x.expr): x for x in res._units}
for u in other._units:
if id(u.expr) in temp_map:
temp_map[id(u.expr)].positions += u.positions
else:
res._units.append(u.copy())
return res
def _fill_funcs(self, obj: Dict[str, Any]) -> Dict[str, Any]:
def realize_func(path: List[Any]) -> None:
r: Any = obj
for p in path[:-1]:
r = r[p]
r[path[-1]] = r[path[-1]]()
for path in self._func_positions:
realize_func(path)
return obj
def _copy(self, src: Any, keys: List[Any], idx: Dict[int, _MapUnit]) -> Any:
if isinstance(src, dict):
ddest: Dict[str, Any] = {}
for k, v in src.items():
nk = keys + [k]
if isinstance(v, TuningParameterExpression):
ddest[k] = None
self._add(nk, v, idx)
else:
ddest[k] = self._copy(v, nk, idx)
return ddest
elif isinstance(src, list):
adest: List[Any] = []
for i in range(len(src)):
nk = keys + [i]
if isinstance(src[i], TuningParameterExpression):
adest.append(None)
self._add(nk, src[i], idx)
else:
adest.append(self._copy(src[i], nk, idx))
return adest
elif isinstance(src, FuncParam):
self._func_positions.append(keys)
args = self._copy(src._args, keys, idx)
kwargs = self._copy(src._kwargs, keys, idx)
return FuncParam(src._func, *args, **kwargs)
else:
return src
def _add(
self, keys: List[Any], expr: TuningParameterExpression, idx: Dict[int, _MapUnit]
) -> None:
if id(expr) not in idx:
mu = _MapUnit(expr)
self._units.append(mu)
idx[id(expr)] = mu
else:
mu = idx[id(expr)]
mu.positions.append(keys)
if isinstance(expr, Grid):
self._has_grid = True
else:
self._has_stochastic = True
def _fill_path(self, root: Dict[str, Any], path: List[Any], v: Any) -> None:
r = root
for p in path[:-1]:
r = r[p]
r[path[-1]] = v
def _partial_fill(
self, idx: List[int], params_list: Iterable[List[Any]]
) -> Iterable["TuningParametersTemplate"]:
new_units = [u for i, u in enumerate(self._units) if i not in idx]
has_grid = any(isinstance(x.expr, Grid) for x in new_units)
has_stochastic = any(
isinstance(x.expr, StochasticExpression) for x in new_units
)
for params in params_list:
new_template = deepcopy(self._template)
for pi, i in enumerate(idx):
for path in self._units[i].positions:
self._fill_path(new_template, path, params[pi])
t = TuningParametersTemplate({})
t._units = new_units
t._template = new_template
t._has_grid = has_grid
t._has_stochastic = has_stochastic
t._func_positions = self._func_positions
if t.empty and len(t._func_positions) > 0:
t._template = t._fill_funcs(t._template)
t._func_positions = []
yield t
def to_template(data: Any) -> TuningParametersTemplate:
"""Convert an oject to ``TuningParametersTemplate``
:param data: data object (``dict`` or ``TuningParametersTemplate``
or ``str`` (encoded string))
:return: the template object
"""
if isinstance(data, TuningParametersTemplate):
return data
if isinstance(data, dict):
return TuningParametersTemplate(data)
if isinstance(data, str):
return TuningParametersTemplate.decode(data)
raise ValueError(f"can't convert to template: {data}")
| [
"tune._utils.product",
"numpy.random.uniform",
"copy.deepcopy",
"numpy.random.seed",
"tune._utils.math.uniform_to_discrete",
"tune._utils.math.normal_to_discrete",
"triad.assert_or_throw",
"triad.utils.convert.get_full_type_path",
"cloudpickle.dumps",
"tune._utils.math.normal_to_continuous",
"tr... | [((1189, 1218), 'triad.to_uuid', 'to_uuid', (['"""grid"""', 'self._values'], {}), "('grid', self._values)\n", (1196, 1218), False, 'from triad import assert_or_throw, to_uuid\n'), ((2543, 2565), 'triad.to_uuid', 'to_uuid', (['self.jsondict'], {}), '(self.jsondict)\n', (2550, 2565), False, 'from triad import assert_or_throw, to_uuid\n'), ((3260, 3290), 'numpy.random.choice', 'np.random.choice', (['self._values'], {}), '(self._values)\n', (3276, 3290), True, 'import numpy as np\n'), ((6081, 6100), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (6098, 6100), True, 'import numpy as np\n'), ((8252, 8271), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (8269, 8271), True, 'import numpy as np\n'), ((9798, 9816), 'numpy.random.normal', 'np.random.normal', ([], {}), '()\n', (9814, 9816), True, 'import numpy as np\n'), ((10989, 11007), 'numpy.random.normal', 'np.random.normal', ([], {}), '()\n', (11005, 11007), True, 'import numpy as np\n'), ((13895, 13929), 'triad.to_uuid', 'to_uuid', (['self.expr', 'self.positions'], {}), '(self.expr, self.positions)\n', (13902, 13929), False, 'from triad import assert_or_throw, to_uuid\n'), ((18394, 18418), 'copy.deepcopy', 'deepcopy', (['self._template'], {}), '(self._template)\n', (18402, 18418), False, 'from copy import deepcopy\n'), ((2381, 2401), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2395, 2401), True, 'import numpy as np\n'), ((3223, 3243), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (3237, 3243), True, 'import numpy as np\n'), ((4396, 4431), 'triad.assert_or_throw', 'assert_or_throw', (['(q > 0)', 'f"""{q} <= 0"""'], {}), "(q > 0, f'{q} <= 0')\n", (4411, 4431), False, 'from triad import assert_or_throw, to_uuid\n'), ((6044, 6064), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (6058, 6064), True, 'import numpy as np\n'), ((8215, 8235), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (8229, 8235), True, 'import numpy as np\n'), ((9761, 9781), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (9775, 9781), True, 'import numpy as np\n'), ((9867, 9934), 'tune._utils.math.normal_to_discrete', 'normal_to_discrete', (['value'], {'mean': 'self.mu', 'sigma': 'self.sigma', 'q': 'self.q'}), '(value, mean=self.mu, sigma=self.sigma, q=self.q)\n', (9885, 9934), False, 'from tune._utils.math import normal_to_continuous, normal_to_discrete, normal_to_integers, uniform_to_continuous, uniform_to_discrete, uniform_to_integers\n'), ((9968, 10027), 'tune._utils.math.normal_to_continuous', 'normal_to_continuous', (['value'], {'mean': 'self.mu', 'sigma': 'self.sigma'}), '(value, mean=self.mu, sigma=self.sigma)\n', (9988, 10027), False, 'from tune._utils.math import normal_to_continuous, normal_to_discrete, normal_to_integers, uniform_to_continuous, uniform_to_discrete, uniform_to_integers\n'), ((10952, 10972), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (10966, 10972), True, 'import numpy as np\n'), ((11040, 11107), 'tune._utils.math.normal_to_integers', 'normal_to_integers', (['value'], {'mean': 'self.mu', 'sigma': 'self.sigma', 'q': 'self.q'}), '(value, mean=self.mu, sigma=self.sigma, q=self.q)\n', (11058, 11107), False, 'from tune._utils.math import normal_to_continuous, normal_to_discrete, normal_to_integers, uniform_to_continuous, uniform_to_discrete, uniform_to_integers\n'), ((12131, 12161), 'triad.utils.convert.get_full_type_path', 'get_full_type_path', (['self._func'], {}), '(self._func)\n', (12149, 12161), False, 'from triad.utils.convert import get_full_type_path\n'), ((16080, 16116), 'triad.to_uuid', 'to_uuid', (['self._units', 'self._template'], {}), '(self._units, self._template)\n', (16087, 16116), False, 'from triad import assert_or_throw, to_uuid\n'), ((16871, 16895), 'copy.deepcopy', 'deepcopy', (['self._template'], {}), '(self._template)\n', (16879, 16895), False, 'from copy import deepcopy\n'), ((25304, 25328), 'copy.deepcopy', 'deepcopy', (['self._template'], {}), '(self._template)\n', (25312, 25328), False, 'from copy import deepcopy\n'), ((6170, 6233), 'tune._utils.math.uniform_to_continuous', 'uniform_to_continuous', (['value', 'self.low', 'self.high'], {'log': 'self.log'}), '(value, self.low, self.high, log=self.log)\n', (6191, 6233), False, 'from tune._utils.math import normal_to_continuous, normal_to_discrete, normal_to_integers, uniform_to_continuous, uniform_to_discrete, uniform_to_integers\n'), ((6304, 6411), 'tune._utils.math.uniform_to_discrete', 'uniform_to_discrete', (['value', 'self.low', 'self.high'], {'q': 'self.q', 'log': 'self.log', 'include_high': 'self.include_high'}), '(value, self.low, self.high, q=self.q, log=self.log,\n include_high=self.include_high)\n', (6323, 6411), False, 'from tune._utils.math import normal_to_continuous, normal_to_discrete, normal_to_integers, uniform_to_continuous, uniform_to_discrete, uniform_to_integers\n'), ((21111, 21131), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (21125, 21131), True, 'import numpy as np\n'), ((19271, 19294), 'cloudpickle.dumps', 'cloudpickle.dumps', (['self'], {}), '(self)\n', (19288, 19294), False, 'import cloudpickle\n'), ((20275, 20338), 'tune._utils.product', 'product', (['[data for _, data in gu]'], {'safe': '(True)', 'remove_empty': '(True)'}), '([data for _, data in gu], safe=True, remove_empty=True)\n', (20282, 20338), False, 'from tune._utils import product\n')] |
#######################################################
# #
# This file is test #
# maybe contains wrong logical operation #
# ignore this file #
# #
#######################################################
import cv2
import numpy as np
from os import path
import matplotlib.pyplot as plt
from skimage.util import random_noise
cameraman = cv2.imread(path.abspath(
path.join(__file__, "..", "..", "images", "cameraman.tif")))
cameraman = cv2.cvtColor(cameraman, cv2.COLOR_BGR2RGB)
babon = cv2.imread(path.abspath(
path.join(__file__, "..", "..", "images", "mandril.tiff")))
babon = cv2.cvtColor(babon, cv2.COLOR_BGR2RGB)
# f, subplt = plt.subplots(1, 2, figsize=(10, 5))
# subplt[0].imshow(cameraman)
# subplt[0].set_title("Cameraman")
# subplt[1].imshow(babon)
# subplt[1].set_title("Babon")
def log_transform(image):
image = image * 255
c = 255 / np.log(1 + 255)
return c * np.log(image + 1)
def exp_transform(image):
c = 255 / np.log(1 + 255)
return (np.exp(image / c))
sp_noised_cameraman = log_transform(random_noise(cameraman, mode='s&p'))
gaussian_noised_cameraman = log_transform(
random_noise(cameraman, mode='gaussian'))
sp_noised_babon = log_transform(random_noise(babon, mode='s&p'))
gaussian_noised_babon = log_transform(random_noise(babon, mode='gaussian'))
sp_noised_cameraman_mean_filer_3 = exp_transform(
cv2.blur(sp_noised_cameraman, (3, 3)))
sp_noised_cameraman_mean_filer_5 = cv2.blur(sp_noised_cameraman, (5, 5))
sp_noised_babon_mean_filer_3 = cv2.blur(sp_noised_babon, (3, 3))
sp_noised_babon_mean_filer_5 = cv2.blur(sp_noised_babon, (5, 5))
plt.imshow(sp_noised_cameraman_mean_filer_3.astype('uint8'))
plt.show()
| [
"matplotlib.pyplot.show",
"numpy.log",
"cv2.cvtColor",
"skimage.util.random_noise",
"cv2.blur",
"numpy.exp",
"os.path.join"
] | [((628, 670), 'cv2.cvtColor', 'cv2.cvtColor', (['cameraman', 'cv2.COLOR_BGR2RGB'], {}), '(cameraman, cv2.COLOR_BGR2RGB)\n', (640, 670), False, 'import cv2\n'), ((777, 815), 'cv2.cvtColor', 'cv2.cvtColor', (['babon', 'cv2.COLOR_BGR2RGB'], {}), '(babon, cv2.COLOR_BGR2RGB)\n', (789, 815), False, 'import cv2\n'), ((1628, 1665), 'cv2.blur', 'cv2.blur', (['sp_noised_cameraman', '(5, 5)'], {}), '(sp_noised_cameraman, (5, 5))\n', (1636, 1665), False, 'import cv2\n'), ((1698, 1731), 'cv2.blur', 'cv2.blur', (['sp_noised_babon', '(3, 3)'], {}), '(sp_noised_babon, (3, 3))\n', (1706, 1731), False, 'import cv2\n'), ((1763, 1796), 'cv2.blur', 'cv2.blur', (['sp_noised_babon', '(5, 5)'], {}), '(sp_noised_babon, (5, 5))\n', (1771, 1796), False, 'import cv2\n'), ((1860, 1870), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1868, 1870), True, 'import matplotlib.pyplot as plt\n'), ((1174, 1191), 'numpy.exp', 'np.exp', (['(image / c)'], {}), '(image / c)\n', (1180, 1191), True, 'import numpy as np\n'), ((1231, 1266), 'skimage.util.random_noise', 'random_noise', (['cameraman'], {'mode': '"""s&p"""'}), "(cameraman, mode='s&p')\n", (1243, 1266), False, 'from skimage.util import random_noise\n'), ((1315, 1355), 'skimage.util.random_noise', 'random_noise', (['cameraman'], {'mode': '"""gaussian"""'}), "(cameraman, mode='gaussian')\n", (1327, 1355), False, 'from skimage.util import random_noise\n'), ((1390, 1421), 'skimage.util.random_noise', 'random_noise', (['babon'], {'mode': '"""s&p"""'}), "(babon, mode='s&p')\n", (1402, 1421), False, 'from skimage.util import random_noise\n'), ((1461, 1497), 'skimage.util.random_noise', 'random_noise', (['babon'], {'mode': '"""gaussian"""'}), "(babon, mode='gaussian')\n", (1473, 1497), False, 'from skimage.util import random_noise\n'), ((1554, 1591), 'cv2.blur', 'cv2.blur', (['sp_noised_cameraman', '(3, 3)'], {}), '(sp_noised_cameraman, (3, 3))\n', (1562, 1591), False, 'import cv2\n'), ((555, 613), 'os.path.join', 'path.join', (['__file__', '""".."""', '""".."""', '"""images"""', '"""cameraman.tif"""'], {}), "(__file__, '..', '..', 'images', 'cameraman.tif')\n", (564, 613), False, 'from os import path\n'), ((709, 766), 'os.path.join', 'path.join', (['__file__', '""".."""', '""".."""', '"""images"""', '"""mandril.tiff"""'], {}), "(__file__, '..', '..', 'images', 'mandril.tiff')\n", (718, 766), False, 'from os import path\n'), ((1055, 1070), 'numpy.log', 'np.log', (['(1 + 255)'], {}), '(1 + 255)\n', (1061, 1070), True, 'import numpy as np\n'), ((1086, 1103), 'numpy.log', 'np.log', (['(image + 1)'], {}), '(image + 1)\n', (1092, 1103), True, 'import numpy as np\n'), ((1146, 1161), 'numpy.log', 'np.log', (['(1 + 255)'], {}), '(1 + 255)\n', (1152, 1161), True, 'import numpy as np\n')] |
'''
3D FFT tester for Python Interface on CPUs
-- heFFTe --
Univ. of Tennessee, Knoxville
'''
import sys, math
import cmath
import numpy as np
from mpi4py import MPI
import heffte
# * Allocate and initialize data
def make_data(fftsize):
global work, work2
work = np.arange(1,fftsize+1).astype(np.float32)
work2 = np.zeros(2*fftsize).astype(np.float32)
# =============
#* Main program
# =============
# MPI setup
mpi_comm = MPI.COMM_WORLD
me = mpi_comm.rank
nprocs = mpi_comm.size
# define cube geometry
size_fft = [2, 2, 2] # = [nx, ny, nz]
world = heffte.box3d([0, 0, 0], [size_fft[0]-1, size_fft[1]-1, size_fft[2]-1])
fftsize = world.count()
# create a processor grid (if user does not have one)
proc_i = heffte.proc_setup(world, nprocs)
proc_o = heffte.proc_setup(world, nprocs)
# distribute sub boxes among processors
inboxes = heffte.split_world(world, proc_i)
outboxes = heffte.split_world(world, proc_o)
# create plan
fft = heffte.fft3d(heffte.backend.fftw, inboxes[me], outboxes[me], mpi_comm)
# NOTE: If user has a different split function to define low and high vertices, can do as follows:
# low_me = [x,x,x]
# high_me = [x,x,x]
# order_me = [x,x,x]
# fft = heffte.fft3d(heffte.backend.fftw, heffte.box3d(low_me, high_me), heffte.box3d(low_me, high_me), mpi_comm)
# fft = heffte.fft3d(heffte.backend.fftw, heffte.box3d(low_me, high_me, order_me), heffte.box3d(low_me, high_me, order_me), mpi_comm)
# Initialize data
make_data(fftsize)
print("Initial data:")
print(work)
mpi_comm.Barrier()
time1 = MPI.Wtime()
fft.forward(work, work2, heffte.scale.none)
mpi_comm.Barrier()
print("---------------------------")
print("\nComputed FFT:")
print(work2.view(dtype=np.complex64))
time2 = MPI.Wtime()
t_exec = time2 - time1
Gflops = 5*fftsize*math.log(fftsize) / t_exec / 1E9
print("--------------------------")
print(f"Execution time = {t_exec:.2g}")
print(f"Gflop/s = {Gflops:.2g}")
print("---------------------------")
| [
"heffte.box3d",
"mpi4py.MPI.Wtime",
"heffte.split_world",
"numpy.zeros",
"heffte.fft3d",
"numpy.arange",
"heffte.proc_setup",
"math.log"
] | [((582, 658), 'heffte.box3d', 'heffte.box3d', (['[0, 0, 0]', '[size_fft[0] - 1, size_fft[1] - 1, size_fft[2] - 1]'], {}), '([0, 0, 0], [size_fft[0] - 1, size_fft[1] - 1, size_fft[2] - 1])\n', (594, 658), False, 'import heffte\n'), ((741, 773), 'heffte.proc_setup', 'heffte.proc_setup', (['world', 'nprocs'], {}), '(world, nprocs)\n', (758, 773), False, 'import heffte\n'), ((783, 815), 'heffte.proc_setup', 'heffte.proc_setup', (['world', 'nprocs'], {}), '(world, nprocs)\n', (800, 815), False, 'import heffte\n'), ((868, 901), 'heffte.split_world', 'heffte.split_world', (['world', 'proc_i'], {}), '(world, proc_i)\n', (886, 901), False, 'import heffte\n'), ((913, 946), 'heffte.split_world', 'heffte.split_world', (['world', 'proc_o'], {}), '(world, proc_o)\n', (931, 946), False, 'import heffte\n'), ((968, 1038), 'heffte.fft3d', 'heffte.fft3d', (['heffte.backend.fftw', 'inboxes[me]', 'outboxes[me]', 'mpi_comm'], {}), '(heffte.backend.fftw, inboxes[me], outboxes[me], mpi_comm)\n', (980, 1038), False, 'import heffte\n'), ((1564, 1575), 'mpi4py.MPI.Wtime', 'MPI.Wtime', ([], {}), '()\n', (1573, 1575), False, 'from mpi4py import MPI\n'), ((1751, 1762), 'mpi4py.MPI.Wtime', 'MPI.Wtime', ([], {}), '()\n', (1760, 1762), False, 'from mpi4py import MPI\n'), ((289, 314), 'numpy.arange', 'np.arange', (['(1)', '(fftsize + 1)'], {}), '(1, fftsize + 1)\n', (298, 314), True, 'import numpy as np\n'), ((343, 364), 'numpy.zeros', 'np.zeros', (['(2 * fftsize)'], {}), '(2 * fftsize)\n', (351, 364), True, 'import numpy as np\n'), ((1805, 1822), 'math.log', 'math.log', (['fftsize'], {}), '(fftsize)\n', (1813, 1822), False, 'import sys, math\n')] |
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Layer
from tensorflow.keras.layers import (Input, Add, BatchNormalization,
Conv2D, Conv3D, Dense, ReLU, LeakyReLU,
UpSampling2D, UpSampling3D)
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.applications import vgg19
from . import create_vgg_model_2d, create_vgg_model_3d
import numpy as np
import os
import matplotlib.pyplot as plot
import ants
class SuperResolutionGanModel(object):
"""
Super resolution GAN model
Super resolution generative adverserial network from the paper:
https://arxiv.org/abs/1609.04802
and ported from the Keras implementation:
https://github.com/eriklindernoren/Keras-GAN/blob/master/wgan/wgan.py
Arguments
---------
input_image_size : tuple
Used for specifying the input tensor shape. The shape (or dimension) of
that tensor is the image dimensions followed by the number of channels
(e.g., red, green, and blue).
low_resolution_image_size : tuple
Size of the input image.
scale_factor : integer
Upsampling factor for the output super-resolution image.
use_image_net_weights : boolean
Determines whether or not one uses the image-net weights. Only valid for
2-D images.
number_of_residual_blocks : 16
Number of residual blocks used in constructing the generator.
number_of_filters_at_base_layer : tuple of length 2
Number of filters at the base layer for the generator and discriminator,
respectively.
Returns
-------
Keras model
A Keras model defining the network.
"""
def __init__(self, low_resolution_image_size, scale_factor=2,
use_image_net_weights=True, number_of_residual_blocks=16,
number_of_filters_at_base_layer=(64, 64)):
super(SuperResolutionGanModel, self).__init__()
self.low_resolution_image_size = low_resolution_image_size
self.number_of_channels = self.low_resolution_image_size[-1]
self.number_of_residual_blocks = number_of_residual_blocks
self.number_of_filters_at_base_layer = number_of_filters_at_base_layer
self.use_image_net_weights = use_image_net_weights
self.scale_factor = scale_factor
if not self.scale_factor in set([1, 2, 4, 8]):
raise ValueError("Scale factor must be one of 1, 2, 4, or 8.")
self.dimensionality = None
if len(self.low_resolution_image_size) == 3:
self.dimensionality = 2
elif len(self.low_resolution_image_size) == 4:
self.dimensionality = 3
if self.use_image_net_weights == True:
self.use_image_net_weights = False
print("Warning: imageNet weights are unavailable for 3D.")
else:
raise ValueError("Incorrect size for low_resolution_image_size.")
optimizer = Adam(lr=0.0002, beta_1=0.5)
# Images
tmp = list(self.low_resolution_image_size)
for i in range(self.dimensionality):
tmp[i] *= self.scale_factor
self.high_resolution_image_size = tuple(tmp)
high_resolution_image = Input(shape=self.high_resolution_image_size)
low_resolution_image = Input(shape=self.low_resolution_image_size)
# Build generator
self.generator = self.build_generator()
fake_high_resolution_image = self.generator(low_resolution_image)
# Build discriminator
self.discriminator = self.build_discriminator()
self.discriminator.compile(loss='mse',
optimizer=optimizer, metrics=['acc'])
# Vgg
self.vgg_model = self.build_truncated_vgg_model()
self.vgg_model.trainable = False
self.vgg_model.compile(loss='mse', optimizer=optimizer, metrics=['acc'])
if self.dimensionality == 2:
self.discriminator_patch_size = (16, 16, 1)
else:
self.discriminator_patch_size = (16, 16, 16, 1)
# Discriminator
self.discriminator.trainable = False
validity = self.discriminator(fake_high_resolution_image)
# Combined model
if self.use_image_net_weights == True:
fake_features = self.vgg_model(fake_high_resolution_image)
self.combined_model = Model(inputs=[low_resolution_image, high_resolution_image],
outputs=[validity, fake_features])
self.combined_model.compile(loss=['binary_crossentropy', 'mse'],
loss_weights=[1e-3, 1], optimizer=optimizer)
else:
self.combined_model = Model(inputs=[low_resolution_image, high_resolution_image],
outputs=validity)
self.combined_model.compile(loss=['binary_crossentropy'], optimizer=optimizer)
def build_truncated_vgg_model(self):
vgg_tmp = None
if self.dimensionality == 2:
if self.use_image_net_weights == True:
vgg_tmp = create_vgg_model_2d((224, 224, 3), style=19)
keras_vgg = vgg19(weights='imagenet')
vgg_tmp.set_weights(keras_vgg.get_weights())
else:
vgg_tmp = create_vgg_model_2d(
self.high_resolution_image_size, style=19)
else:
vgg_tmp = create_vgg_model_3d(self.high_resolution_image_size, style=19)
vgg_tmp.outputs = [vgg_tmp.layers[9].output]
high_resolution_image = Input(shape=self.high_resolution_image_size)
high_resolution_image_features = vgg_tmp(high_resolution_image)
vgg_model = Model(inputs=high_resolution_image,
outputs=high_resolution_image_features)
return(vgg_model)
def build_generator(self, number_of_filters=64):
def build_residual_block(input, number_of_filters, kernel_size=3):
shortcut = input
if self.dimensionality == 2:
input = Conv2D(filters=number_of_filters,
kernel_size=kernel_size,
strides=1,
padding='same')(input)
else:
input = Conv3D(filters=number_of_filters,
kernel_size=kernel_size,
strides=1,
padding='same')(input)
input = ReLU()(input)
input = BatchNormalization(momentum=0.8)(input)
if self.dimensionality == 2:
input = Conv2D(filters=number_of_filters,
kernel_size=kernel_size,
strides=1,
padding='same')(input)
else:
input = Conv3D(filters=number_of_filters,
kernel_size=kernel_size,
strides=1,
padding='same')(input)
input = BatchNormalization(momentum=0.8)(input)
input = Add()([input, shortcut])
return(input)
def build_deconvolution_layer(input, number_of_filters=256, kernel_size=3):
model = input
if self.dimensionality == 2:
model = UpSampling2D(size=2)(model)
input = Conv2D(filters=number_of_filters,
kernel_size=kernel_size,
strides=1,
padding='same')(model)
else:
model = UpSampling3D(size=2)(model)
input = Conv3D(filters=number_of_filters,
kernel_size=kernel_size,
strides=1,
padding='same')(model)
model = ReLU()(model)
return(model)
image = Input(shape=self.low_resolution_image_size)
pre_residual = image
if self.dimensionality == 2:
pre_residual = Conv2D(filters=number_of_filters,
kernel_size=9,
strides=1,
padding='same')(pre_residual)
else:
pre_residual = Conv3D(filters=number_of_filters,
kernel_size=9,
strides=1,
padding='same')(pre_residual)
residuals = build_residual_block(pre_residual,
number_of_filters=self.number_of_filters_at_base_layer[0])
for i in range(self.number_of_residual_blocks - 1):
residuals = build_residual_block(residuals,
number_of_filters=self.number_of_filters_at_base_layer[0])
post_residual = residuals
if self.dimensionality == 2:
post_residual = Conv2D(filters=number_of_filters,
kernel_size=3,
strides=1,
padding='same')(post_residual)
else:
post_residual = Conv3D(filters=number_of_filters,
kernel_size=3,
strides=1,
padding='same')(post_residual)
post_residual = BatchNormalization(momentum=0.8)(post_residual)
model = Add()([post_residual, pre_residual])
# upsampling
if self.scale_factor >= 2:
model = build_deconvolution_layer(model)
if self.scale_factor >= 4:
model = build_deconvolution_layer(model)
if self.scale_factor == 8:
model = build_deconvolution_layer(model)
if self.dimensionality == 2:
model = Conv2D(filters=self.number_of_channels,
kernel_size=9,
strides=1,
padding='same',
activation='tanh')(model)
else:
model = Conv3D(filters=self.number_of_channels,
kernel_size=9,
strides=1,
padding='same',
activation='tanh')(model)
generator = Model(inputs=image, outputs=model)
return(generator)
def build_discriminator(self):
def build_layer(input, number_of_filters, strides=1, kernel_size=3,
normalization=True):
layer = input
if self.dimensionality == 2:
layer = Conv2D(filters=number_of_filters,
kernel_size=kernel_size,
strides=strides,
padding='same')(layer)
else:
layer = Conv2D(filters=number_of_filters,
kernel_size=kernel_size,
strides=strides,
padding='same')(layer)
layer = LeakyReLU(alpha=0.2)(layer)
if normalization == True:
layer = BatchNormalization(momentum=0.8)(layer)
return(layer)
image = Input(shape=self.high_resolution_image_size)
model = build_layer(image, self.number_of_filters_at_base_layer[1],
normalization = False)
model = build_layer(model, self.number_of_filters_at_base_layer[1],
strides=2)
model = build_layer(model, self.number_of_filters_at_base_layer[1] * 2)
model = build_layer(model, self.number_of_filters_at_base_layer[1] * 2,
strides=2)
model = build_layer(model, self.number_of_filters_at_base_layer[1] * 4)
model = build_layer(model, self.number_of_filters_at_base_layer[1] * 4,
strides=2)
model = build_layer(model, self.number_of_filters_at_base_layer[1] * 8)
model = build_layer(model, self.number_of_filters_at_base_layer[1] * 8,
strides=2)
model = Dense(units=self.number_of_filters_at_base_layer[1] * 16)(model)
model = LeakyReLU(alpha=0.2)(model)
validity = Dense(units=1, activation = 'sigmoid')(model)
discriminator = Model(inputs=image, outputs=validity)
return(discriminator)
def train(self, X_train_low_resolution, X_train_high_resolution, number_of_epochs, batch_size=128,
sample_interval=None, sample_file_prefix='sample'):
valid = np.ones((batch_size, *self.discriminator_patch_size))
fake = np.zeros((batch_size, *self.discriminator_patch_size))
for epoch in range(number_of_epochs):
indices = np.random.randint(0, X_train_low_resolution.shape[0] - 1, batch_size)
low_resolution_images = None
high_resolution_images = None
if self.dimensionality == 2:
low_resolution_images = X_train_low_resolution[indices,:,:,:]
high_resolution_images = X_train_high_resolution[indices,:,:,:]
else:
low_resolution_images = X_train_low_resolution[indices,:,:,:,:]
high_resolution_images = X_train_high_resolution[indices,:,:,:,:]
# train discriminator
fake_high_resolution_images = self.generator.predict(low_resolution_images)
d_loss_real = self.discriminator.train_on_batch(high_resolution_images, valid)
d_loss_fake = self.discriminator.train_on_batch(fake_high_resolution_images, fake)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# train generator
g_loss = None
if self.use_image_net_weights == True:
image_features = self.vgg_model.predict(high_resolution_images)
g_loss = self.combined_model.train_on_batch(
[low_resolution_images, high_resolution_images], [valid, image_features])
print("Epoch ", epoch, ": [Discriminator loss: ", d_loss[0],
"] ", "[Generator loss: ", g_loss[0], "] ")
else:
g_loss = self.combined_model.train_on_batch(
[low_resolution_images, high_resolution_images], valid)
print("Epoch ", epoch, ": [Discriminator loss: ", d_loss[0],
"] ", "[Generator loss: ", g_loss, "] ")
if self.dimensionality == 2:
if sample_interval != None:
if epoch % sample_interval == 0:
# Do a 2x3 grid
#
# low res image | high res image | original high res image
# low res image | high res image | original high res image
X = list()
index = np.random.randint(0, X_train_low_resolution.shape[0] - 1, 1)
low_resolution_image = X_train_low_resolution[index,:,:,:]
high_resolution_image = X_train_high_resolution[index,:,:,:]
X.append(self.generator.predict(low_resolution_image))
X.append(high_resolution_image)
index = np.random.randint(0, X_train_low_resolution.shape[0] - 1, 1)
low_resolution_image = X_train_low_resolution[index,:,:,:]
high_resolution_image = X_train_high_resolution[index,:,:,:]
X.append(self.generator.predict(low_resolution_image))
X.append(high_resolution_image)
plot_images = np.concatenate(X)
plot_images = 0.5 * plot_images + 0.5
titles = ['Predicted', 'Original']
figure, axes = plot.subplots(2, 2)
count = 0
for i in range(2):
for j in range(2):
axes[i, j].imshow(plot_images[count])
axes[i, j].set_title(titles[j])
axes[i, j].axis('off')
count += 1
image_file_name = sample_file_prefix + "_iteration" + str(epoch) + ".jpg"
dir_name = os.path.dirname(sample_file_prefix)
if not os.path.exists(dir_name):
os.mkdir(dir_name)
figure.savefig(image_file_name)
plot.close()
| [
"os.mkdir",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv3D",
"numpy.ones",
"tensorflow.keras.layers.LeakyReLU",
"numpy.random.randint",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.applications.vgg19",
"matplotlib.pyplot.close",
"os.path.dirname",
"tensorflow... | [((3104, 3131), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.0002)', 'beta_1': '(0.5)'}), '(lr=0.0002, beta_1=0.5)\n', (3108, 3131), False, 'from tensorflow.keras.optimizers import Adam\n'), ((3373, 3417), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': 'self.high_resolution_image_size'}), '(shape=self.high_resolution_image_size)\n', (3378, 3417), False, 'from tensorflow.keras.layers import Input, Add, BatchNormalization, Conv2D, Conv3D, Dense, ReLU, LeakyReLU, UpSampling2D, UpSampling3D\n'), ((3450, 3493), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': 'self.low_resolution_image_size'}), '(shape=self.low_resolution_image_size)\n', (3455, 3493), False, 'from tensorflow.keras.layers import Input, Add, BatchNormalization, Conv2D, Conv3D, Dense, ReLU, LeakyReLU, UpSampling2D, UpSampling3D\n'), ((5713, 5757), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': 'self.high_resolution_image_size'}), '(shape=self.high_resolution_image_size)\n', (5718, 5757), False, 'from tensorflow.keras.layers import Input, Add, BatchNormalization, Conv2D, Conv3D, Dense, ReLU, LeakyReLU, UpSampling2D, UpSampling3D\n'), ((5851, 5926), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'high_resolution_image', 'outputs': 'high_resolution_image_features'}), '(inputs=high_resolution_image, outputs=high_resolution_image_features)\n', (5856, 5926), False, 'from tensorflow.keras.models import Model, Sequential\n'), ((8066, 8109), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': 'self.low_resolution_image_size'}), '(shape=self.low_resolution_image_size)\n', (8071, 8109), False, 'from tensorflow.keras.layers import Input, Add, BatchNormalization, Conv2D, Conv3D, Dense, ReLU, LeakyReLU, UpSampling2D, UpSampling3D\n'), ((10431, 10465), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'image', 'outputs': 'model'}), '(inputs=image, outputs=model)\n', (10436, 10465), False, 'from tensorflow.keras.models import Model, Sequential\n'), ((11360, 11404), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': 'self.high_resolution_image_size'}), '(shape=self.high_resolution_image_size)\n', (11365, 11404), False, 'from tensorflow.keras.layers import Input, Add, BatchNormalization, Conv2D, Conv3D, Dense, ReLU, LeakyReLU, UpSampling2D, UpSampling3D\n'), ((12371, 12408), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'image', 'outputs': 'validity'}), '(inputs=image, outputs=validity)\n', (12376, 12408), False, 'from tensorflow.keras.models import Model, Sequential\n'), ((12625, 12678), 'numpy.ones', 'np.ones', (['(batch_size, *self.discriminator_patch_size)'], {}), '((batch_size, *self.discriminator_patch_size))\n', (12632, 12678), True, 'import numpy as np\n'), ((12694, 12748), 'numpy.zeros', 'np.zeros', (['(batch_size, *self.discriminator_patch_size)'], {}), '((batch_size, *self.discriminator_patch_size))\n', (12702, 12748), True, 'import numpy as np\n'), ((4534, 4633), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[low_resolution_image, high_resolution_image]', 'outputs': '[validity, fake_features]'}), '(inputs=[low_resolution_image, high_resolution_image], outputs=[\n validity, fake_features])\n', (4539, 4633), False, 'from tensorflow.keras.models import Model, Sequential\n'), ((4853, 4930), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[low_resolution_image, high_resolution_image]', 'outputs': 'validity'}), '(inputs=[low_resolution_image, high_resolution_image], outputs=validity)\n', (4858, 4930), False, 'from tensorflow.keras.models import Model, Sequential\n'), ((9498, 9530), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'momentum': '(0.8)'}), '(momentum=0.8)\n', (9516, 9530), False, 'from tensorflow.keras.layers import Input, Add, BatchNormalization, Conv2D, Conv3D, Dense, ReLU, LeakyReLU, UpSampling2D, UpSampling3D\n'), ((9562, 9567), 'tensorflow.keras.layers.Add', 'Add', ([], {}), '()\n', (9565, 9567), False, 'from tensorflow.keras.layers import Input, Add, BatchNormalization, Conv2D, Conv3D, Dense, ReLU, LeakyReLU, UpSampling2D, UpSampling3D\n'), ((12172, 12229), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': '(self.number_of_filters_at_base_layer[1] * 16)'}), '(units=self.number_of_filters_at_base_layer[1] * 16)\n', (12177, 12229), False, 'from tensorflow.keras.layers import Input, Add, BatchNormalization, Conv2D, Conv3D, Dense, ReLU, LeakyReLU, UpSampling2D, UpSampling3D\n'), ((12253, 12273), 'tensorflow.keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (12262, 12273), False, 'from tensorflow.keras.layers import Input, Add, BatchNormalization, Conv2D, Conv3D, Dense, ReLU, LeakyReLU, UpSampling2D, UpSampling3D\n'), ((12300, 12336), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': '(1)', 'activation': '"""sigmoid"""'}), "(units=1, activation='sigmoid')\n", (12305, 12336), False, 'from tensorflow.keras.layers import Input, Add, BatchNormalization, Conv2D, Conv3D, Dense, ReLU, LeakyReLU, UpSampling2D, UpSampling3D\n'), ((12819, 12888), 'numpy.random.randint', 'np.random.randint', (['(0)', '(X_train_low_resolution.shape[0] - 1)', 'batch_size'], {}), '(0, X_train_low_resolution.shape[0] - 1, batch_size)\n', (12836, 12888), True, 'import numpy as np\n'), ((5314, 5339), 'tensorflow.keras.applications.vgg19', 'vgg19', ([], {'weights': '"""imagenet"""'}), "(weights='imagenet')\n", (5319, 5339), False, 'from tensorflow.keras.applications import vgg19\n'), ((6625, 6631), 'tensorflow.keras.layers.ReLU', 'ReLU', ([], {}), '()\n', (6629, 6631), False, 'from tensorflow.keras.layers import Input, Add, BatchNormalization, Conv2D, Conv3D, Dense, ReLU, LeakyReLU, UpSampling2D, UpSampling3D\n'), ((6658, 6690), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'momentum': '(0.8)'}), '(momentum=0.8)\n', (6676, 6690), False, 'from tensorflow.keras.layers import Input, Add, BatchNormalization, Conv2D, Conv3D, Dense, ReLU, LeakyReLU, UpSampling2D, UpSampling3D\n'), ((7186, 7218), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'momentum': '(0.8)'}), '(momentum=0.8)\n', (7204, 7218), False, 'from tensorflow.keras.layers import Input, Add, BatchNormalization, Conv2D, Conv3D, Dense, ReLU, LeakyReLU, UpSampling2D, UpSampling3D\n'), ((7245, 7250), 'tensorflow.keras.layers.Add', 'Add', ([], {}), '()\n', (7248, 7250), False, 'from tensorflow.keras.layers import Input, Add, BatchNormalization, Conv2D, Conv3D, Dense, ReLU, LeakyReLU, UpSampling2D, UpSampling3D\n'), ((8009, 8015), 'tensorflow.keras.layers.ReLU', 'ReLU', ([], {}), '()\n', (8013, 8015), False, 'from tensorflow.keras.layers import Input, Add, BatchNormalization, Conv2D, Conv3D, Dense, ReLU, LeakyReLU, UpSampling2D, UpSampling3D\n'), ((8204, 8279), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': 'number_of_filters', 'kernel_size': '(9)', 'strides': '(1)', 'padding': '"""same"""'}), "(filters=number_of_filters, kernel_size=9, strides=1, padding='same')\n", (8210, 8279), False, 'from tensorflow.keras.layers import Input, Add, BatchNormalization, Conv2D, Conv3D, Dense, ReLU, LeakyReLU, UpSampling2D, UpSampling3D\n'), ((8437, 8512), 'tensorflow.keras.layers.Conv3D', 'Conv3D', ([], {'filters': 'number_of_filters', 'kernel_size': '(9)', 'strides': '(1)', 'padding': '"""same"""'}), "(filters=number_of_filters, kernel_size=9, strides=1, padding='same')\n", (8443, 8512), False, 'from tensorflow.keras.layers import Input, Add, BatchNormalization, Conv2D, Conv3D, Dense, ReLU, LeakyReLU, UpSampling2D, UpSampling3D\n'), ((9043, 9118), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': 'number_of_filters', 'kernel_size': '(3)', 'strides': '(1)', 'padding': '"""same"""'}), "(filters=number_of_filters, kernel_size=3, strides=1, padding='same')\n", (9049, 9118), False, 'from tensorflow.keras.layers import Input, Add, BatchNormalization, Conv2D, Conv3D, Dense, ReLU, LeakyReLU, UpSampling2D, UpSampling3D\n'), ((9281, 9356), 'tensorflow.keras.layers.Conv3D', 'Conv3D', ([], {'filters': 'number_of_filters', 'kernel_size': '(3)', 'strides': '(1)', 'padding': '"""same"""'}), "(filters=number_of_filters, kernel_size=3, strides=1, padding='same')\n", (9287, 9356), False, 'from tensorflow.keras.layers import Input, Add, BatchNormalization, Conv2D, Conv3D, Dense, ReLU, LeakyReLU, UpSampling2D, UpSampling3D\n'), ((9944, 10049), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': 'self.number_of_channels', 'kernel_size': '(9)', 'strides': '(1)', 'padding': '"""same"""', 'activation': '"""tanh"""'}), "(filters=self.number_of_channels, kernel_size=9, strides=1, padding=\n 'same', activation='tanh')\n", (9950, 10049), False, 'from tensorflow.keras.layers import Input, Add, BatchNormalization, Conv2D, Conv3D, Dense, ReLU, LeakyReLU, UpSampling2D, UpSampling3D\n'), ((10194, 10299), 'tensorflow.keras.layers.Conv3D', 'Conv3D', ([], {'filters': 'self.number_of_channels', 'kernel_size': '(9)', 'strides': '(1)', 'padding': '"""same"""', 'activation': '"""tanh"""'}), "(filters=self.number_of_channels, kernel_size=9, strides=1, padding=\n 'same', activation='tanh')\n", (10200, 10299), False, 'from tensorflow.keras.layers import Input, Add, BatchNormalization, Conv2D, Conv3D, Dense, ReLU, LeakyReLU, UpSampling2D, UpSampling3D\n'), ((11187, 11207), 'tensorflow.keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), '(alpha=0.2)\n', (11196, 11207), False, 'from tensorflow.keras.layers import Input, Add, BatchNormalization, Conv2D, Conv3D, Dense, ReLU, LeakyReLU, UpSampling2D, UpSampling3D\n'), ((13690, 13722), 'numpy.add', 'np.add', (['d_loss_real', 'd_loss_fake'], {}), '(d_loss_real, d_loss_fake)\n', (13696, 13722), True, 'import numpy as np\n'), ((6200, 6289), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': 'number_of_filters', 'kernel_size': 'kernel_size', 'strides': '(1)', 'padding': '"""same"""'}), "(filters=number_of_filters, kernel_size=kernel_size, strides=1,\n padding='same')\n", (6206, 6289), False, 'from tensorflow.keras.layers import Input, Add, BatchNormalization, Conv2D, Conv3D, Dense, ReLU, LeakyReLU, UpSampling2D, UpSampling3D\n'), ((6423, 6512), 'tensorflow.keras.layers.Conv3D', 'Conv3D', ([], {'filters': 'number_of_filters', 'kernel_size': 'kernel_size', 'strides': '(1)', 'padding': '"""same"""'}), "(filters=number_of_filters, kernel_size=kernel_size, strides=1,\n padding='same')\n", (6429, 6512), False, 'from tensorflow.keras.layers import Input, Add, BatchNormalization, Conv2D, Conv3D, Dense, ReLU, LeakyReLU, UpSampling2D, UpSampling3D\n'), ((6761, 6850), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': 'number_of_filters', 'kernel_size': 'kernel_size', 'strides': '(1)', 'padding': '"""same"""'}), "(filters=number_of_filters, kernel_size=kernel_size, strides=1,\n padding='same')\n", (6767, 6850), False, 'from tensorflow.keras.layers import Input, Add, BatchNormalization, Conv2D, Conv3D, Dense, ReLU, LeakyReLU, UpSampling2D, UpSampling3D\n'), ((6984, 7073), 'tensorflow.keras.layers.Conv3D', 'Conv3D', ([], {'filters': 'number_of_filters', 'kernel_size': 'kernel_size', 'strides': '(1)', 'padding': '"""same"""'}), "(filters=number_of_filters, kernel_size=kernel_size, strides=1,\n padding='same')\n", (6990, 7073), False, 'from tensorflow.keras.layers import Input, Add, BatchNormalization, Conv2D, Conv3D, Dense, ReLU, LeakyReLU, UpSampling2D, UpSampling3D\n'), ((7471, 7491), 'tensorflow.keras.layers.UpSampling2D', 'UpSampling2D', ([], {'size': '(2)'}), '(size=2)\n', (7483, 7491), False, 'from tensorflow.keras.layers import Input, Add, BatchNormalization, Conv2D, Conv3D, Dense, ReLU, LeakyReLU, UpSampling2D, UpSampling3D\n'), ((7523, 7612), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': 'number_of_filters', 'kernel_size': 'kernel_size', 'strides': '(1)', 'padding': '"""same"""'}), "(filters=number_of_filters, kernel_size=kernel_size, strides=1,\n padding='same')\n", (7529, 7612), False, 'from tensorflow.keras.layers import Input, Add, BatchNormalization, Conv2D, Conv3D, Dense, ReLU, LeakyReLU, UpSampling2D, UpSampling3D\n'), ((7751, 7771), 'tensorflow.keras.layers.UpSampling3D', 'UpSampling3D', ([], {'size': '(2)'}), '(size=2)\n', (7763, 7771), False, 'from tensorflow.keras.layers import Input, Add, BatchNormalization, Conv2D, Conv3D, Dense, ReLU, LeakyReLU, UpSampling2D, UpSampling3D\n'), ((7803, 7892), 'tensorflow.keras.layers.Conv3D', 'Conv3D', ([], {'filters': 'number_of_filters', 'kernel_size': 'kernel_size', 'strides': '(1)', 'padding': '"""same"""'}), "(filters=number_of_filters, kernel_size=kernel_size, strides=1,\n padding='same')\n", (7809, 7892), False, 'from tensorflow.keras.layers import Input, Add, BatchNormalization, Conv2D, Conv3D, Dense, ReLU, LeakyReLU, UpSampling2D, UpSampling3D\n'), ((10741, 10836), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': 'number_of_filters', 'kernel_size': 'kernel_size', 'strides': 'strides', 'padding': '"""same"""'}), "(filters=number_of_filters, kernel_size=kernel_size, strides=strides,\n padding='same')\n", (10747, 10836), False, 'from tensorflow.keras.layers import Input, Add, BatchNormalization, Conv2D, Conv3D, Dense, ReLU, LeakyReLU, UpSampling2D, UpSampling3D\n'), ((10975, 11070), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': 'number_of_filters', 'kernel_size': 'kernel_size', 'strides': 'strides', 'padding': '"""same"""'}), "(filters=number_of_filters, kernel_size=kernel_size, strides=strides,\n padding='same')\n", (10981, 11070), False, 'from tensorflow.keras.layers import Input, Add, BatchNormalization, Conv2D, Conv3D, Dense, ReLU, LeakyReLU, UpSampling2D, UpSampling3D\n'), ((11277, 11309), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'momentum': '(0.8)'}), '(momentum=0.8)\n', (11295, 11309), False, 'from tensorflow.keras.layers import Input, Add, BatchNormalization, Conv2D, Conv3D, Dense, ReLU, LeakyReLU, UpSampling2D, UpSampling3D\n'), ((14938, 14998), 'numpy.random.randint', 'np.random.randint', (['(0)', '(X_train_low_resolution.shape[0] - 1)', '(1)'], {}), '(0, X_train_low_resolution.shape[0] - 1, 1)\n', (14955, 14998), True, 'import numpy as np\n'), ((15336, 15396), 'numpy.random.randint', 'np.random.randint', (['(0)', '(X_train_low_resolution.shape[0] - 1)', '(1)'], {}), '(0, X_train_low_resolution.shape[0] - 1, 1)\n', (15353, 15396), True, 'import numpy as np\n'), ((15740, 15757), 'numpy.concatenate', 'np.concatenate', (['X'], {}), '(X)\n', (15754, 15757), True, 'import numpy as np\n'), ((15919, 15938), 'matplotlib.pyplot.subplots', 'plot.subplots', (['(2)', '(2)'], {}), '(2, 2)\n', (15932, 15938), True, 'import matplotlib.pyplot as plot\n'), ((16430, 16465), 'os.path.dirname', 'os.path.dirname', (['sample_file_prefix'], {}), '(sample_file_prefix)\n', (16445, 16465), False, 'import os\n'), ((16650, 16662), 'matplotlib.pyplot.close', 'plot.close', ([], {}), '()\n', (16660, 16662), True, 'import matplotlib.pyplot as plot\n'), ((16497, 16521), 'os.path.exists', 'os.path.exists', (['dir_name'], {}), '(dir_name)\n', (16511, 16521), False, 'import os\n'), ((16551, 16569), 'os.mkdir', 'os.mkdir', (['dir_name'], {}), '(dir_name)\n', (16559, 16569), False, 'import os\n')] |
from argparse import RawDescriptionHelpFormatter
import open3d as o3d
import numpy as np
from glob import glob
from scipy.spatial.transform import Rotation as R
import os
import pandas as pd
import argparse
# lidar_ext = [-2.502, -0.004, 2.033, 3.5, -0.2, 0 ]
lidar_ext = [0, 0, -0.3, -2.5, 0, 0]
radar_ext = [0.06, -0.2, 0.7, -3.5, 2, 180]
def csv2geometry(fname):
pts = pd.read_table(fname,sep=",", header=None).values[1:,1:4]
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(pts)
return pcd
def get_matrix_from_ext(ext):
rot = R.from_euler('ZYX', ext[3:], degrees=True)
rot_m = rot.as_matrix()
x, y, z = ext[:3]
tr = np.eye(4)
tr[:3,:3] = rot_m
tr[:3, 3] = np.array([x, y, z]).T
return tr
def get_rotation(arr):
x,y,_ = arr
yaw = np.arctan(y/x)
angle = np.array([0, 0, yaw])
r = R.from_euler('XYZ', angle)
return r.as_matrix()
def get_bbx_param(obj_info):
center = obj_info[2:5] #+ np.array([-2.5, 0, 0])
extent = obj_info[5:8]
angle = obj_info[8:-1]
# angle[0] = -angle[0]
rot_m = get_rotation(angle)
# rot_m = np.eye(3)
obbx = o3d.geometry.OrientedBoundingBox(center.T, rot_m, extent.T)
return obbx
def vis_pcl(load_dir):
lidar_files = sorted(glob(os.path.join(load_dir, "lidar", "*.pcd")))
gt_files = sorted(glob(os.path.join(load_dir, "gt", "*.csv")))
radar_files = sorted(glob(os.path.join(load_dir, "radar", "*.csv")))
img_path = load_dir + "img_vis_all/"
save_img = False
if save_img:
if not os.path.exists(img_path):
os.mkdir(img_path)
else:
os.system("rm -r " + img_path)
os.mkdir(img_path)
start = 30
gt_fname = gt_files[4]
lidar_tr = get_matrix_from_ext(lidar_ext)
radar_tr = get_matrix_from_ext(radar_ext)
lidar_pcd = o3d.io.read_point_cloud(lidar_files[3])
lidar_pcd.transform(lidar_tr)
lidar_pcd.paint_uniform_color([1, 0, 0])
radar_pcd = o3d.geometry.PointCloud()
radar_temp_pcd = csv2geometry(radar_files[0])
radar_pcd.points = radar_temp_pcd.points
radar_pcd.paint_uniform_color([0, 0, 1])
radar_pcd.transform(radar_tr)
vis = o3d.visualization.Visualizer()
vis.create_window(width=1920,height=1080)
vis.add_geometry(lidar_pcd)
vis.add_geometry(radar_pcd)
# print(gt_fname)
gt_data = np.loadtxt(gt_fname)
box_list = []
for obj_info in gt_data:
obj_bbx = get_bbx_param(obj_info)
box_list += [obj_bbx]
vis.add_geometry(obj_bbx)
for idx in range(start, len(lidar_files)):
temp_pcd = o3d.io.read_point_cloud(lidar_files[idx])
lidar_pcd.points = temp_pcd.points
lidar_pcd.paint_uniform_color([0, 0, 1])
lidar_pcd.transform(lidar_tr)
temp_pcd = csv2geometry(radar_files[idx])
radar_pcd.points = temp_pcd.points
radar_pcd.paint_uniform_color([1, 0, 0])
radar_pcd.transform(radar_tr)
vis.update_geometry(lidar_pcd)
vis.update_geometry(radar_pcd)
for box in box_list:
vis.remove_geometry(box, reset_bounding_box=False)
box_list = []
gt_data = np.loadtxt(gt_files[idx])
for obj_info in gt_data:
obj_bbx = get_bbx_param(obj_info)
box_list += [obj_bbx]
vis.add_geometry(obj_bbx, reset_bounding_box=False)
vis.poll_events()
vis.update_renderer()
if save_img:
fname = os.path.join(img_path, str(idx).zfill(9) + '.png')
vis.capture_screen_image(fname)
def main():
parser = argparse.ArgumentParser(description='Visualise Radar, LiDAR and GT data')
parser.add_argument('data_root', help='path to root of directory containing unprocessed data')
args = parser.parse_args()
vis_pcl(args.data_root)
if __name__ == '__main__':
main()
| [
"os.mkdir",
"open3d.visualization.Visualizer",
"argparse.ArgumentParser",
"os.path.join",
"open3d.geometry.OrientedBoundingBox",
"open3d.geometry.PointCloud",
"open3d.io.read_point_cloud",
"os.path.exists",
"os.system",
"open3d.utility.Vector3dVector",
"numpy.array",
"numpy.loadtxt",
"pandas... | [((446, 471), 'open3d.geometry.PointCloud', 'o3d.geometry.PointCloud', ([], {}), '()\n', (469, 471), True, 'import open3d as o3d\n'), ((489, 520), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['pts'], {}), '(pts)\n', (515, 520), True, 'import open3d as o3d\n'), ((577, 619), 'scipy.spatial.transform.Rotation.from_euler', 'R.from_euler', (['"""ZYX"""', 'ext[3:]'], {'degrees': '(True)'}), "('ZYX', ext[3:], degrees=True)\n", (589, 619), True, 'from scipy.spatial.transform import Rotation as R\n'), ((679, 688), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (685, 688), True, 'import numpy as np\n'), ((813, 829), 'numpy.arctan', 'np.arctan', (['(y / x)'], {}), '(y / x)\n', (822, 829), True, 'import numpy as np\n'), ((840, 861), 'numpy.array', 'np.array', (['[0, 0, yaw]'], {}), '([0, 0, yaw])\n', (848, 861), True, 'import numpy as np\n'), ((870, 896), 'scipy.spatial.transform.Rotation.from_euler', 'R.from_euler', (['"""XYZ"""', 'angle'], {}), "('XYZ', angle)\n", (882, 896), True, 'from scipy.spatial.transform import Rotation as R\n'), ((1173, 1232), 'open3d.geometry.OrientedBoundingBox', 'o3d.geometry.OrientedBoundingBox', (['center.T', 'rot_m', 'extent.T'], {}), '(center.T, rot_m, extent.T)\n', (1205, 1232), True, 'import open3d as o3d\n'), ((1876, 1915), 'open3d.io.read_point_cloud', 'o3d.io.read_point_cloud', (['lidar_files[3]'], {}), '(lidar_files[3])\n', (1899, 1915), True, 'import open3d as o3d\n'), ((2012, 2037), 'open3d.geometry.PointCloud', 'o3d.geometry.PointCloud', ([], {}), '()\n', (2035, 2037), True, 'import open3d as o3d\n'), ((2223, 2253), 'open3d.visualization.Visualizer', 'o3d.visualization.Visualizer', ([], {}), '()\n', (2251, 2253), True, 'import open3d as o3d\n'), ((2401, 2421), 'numpy.loadtxt', 'np.loadtxt', (['gt_fname'], {}), '(gt_fname)\n', (2411, 2421), True, 'import numpy as np\n'), ((3636, 3709), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Visualise Radar, LiDAR and GT data"""'}), "(description='Visualise Radar, LiDAR and GT data')\n", (3659, 3709), False, 'import argparse\n'), ((727, 746), 'numpy.array', 'np.array', (['[x, y, z]'], {}), '([x, y, z])\n', (735, 746), True, 'import numpy as np\n'), ((2642, 2683), 'open3d.io.read_point_cloud', 'o3d.io.read_point_cloud', (['lidar_files[idx]'], {}), '(lidar_files[idx])\n', (2665, 2683), True, 'import open3d as o3d\n'), ((3206, 3231), 'numpy.loadtxt', 'np.loadtxt', (['gt_files[idx]'], {}), '(gt_files[idx])\n', (3216, 3231), True, 'import numpy as np\n'), ((379, 421), 'pandas.read_table', 'pd.read_table', (['fname'], {'sep': '""","""', 'header': 'None'}), "(fname, sep=',', header=None)\n", (392, 421), True, 'import pandas as pd\n'), ((1303, 1343), 'os.path.join', 'os.path.join', (['load_dir', '"""lidar"""', '"""*.pcd"""'], {}), "(load_dir, 'lidar', '*.pcd')\n", (1315, 1343), False, 'import os\n'), ((1373, 1410), 'os.path.join', 'os.path.join', (['load_dir', '"""gt"""', '"""*.csv"""'], {}), "(load_dir, 'gt', '*.csv')\n", (1385, 1410), False, 'import os\n'), ((1443, 1483), 'os.path.join', 'os.path.join', (['load_dir', '"""radar"""', '"""*.csv"""'], {}), "(load_dir, 'radar', '*.csv')\n", (1455, 1483), False, 'import os\n'), ((1580, 1604), 'os.path.exists', 'os.path.exists', (['img_path'], {}), '(img_path)\n', (1594, 1604), False, 'import os\n'), ((1618, 1636), 'os.mkdir', 'os.mkdir', (['img_path'], {}), '(img_path)\n', (1626, 1636), False, 'import os\n'), ((1663, 1693), 'os.system', 'os.system', (["('rm -r ' + img_path)"], {}), "('rm -r ' + img_path)\n", (1672, 1693), False, 'import os\n'), ((1706, 1724), 'os.mkdir', 'os.mkdir', (['img_path'], {}), '(img_path)\n', (1714, 1724), False, 'import os\n')] |
import numpy as np
from scipy.interpolate import interp1d
class Planktonic_agent:
'''
Hetta kemur at verða tær forskelligu Føstu lísnar
'''
def __init__(self, delta_time, save_time=40):
'''
:params:
born <NAME> # TODO Fjerna meg
save_time how many days we look back
'''
self.delta_time = delta_time
cols = int(np.ceil(save_time/delta_time))
# the vector that holds the data for the last {save_time} steps
self.smittu_count = np.zeros(
(cols),
dtype=float
)
# The matrix to convert from raw data to lice per day
self.A = np.zeros((save_time, cols), dtype=float)
for i in range(cols):
start = i*delta_time
start_iter = start
end = (i+1)*delta_time
while start_iter < min(end, save_time):
c = np.floor(start_iter) + 1
if c < end:
self.A[int(np.floor(start_iter)), i] = (c - start_iter)/(end - start)
start_iter = c
else:
self.A[int(np.floor(start_iter)), i] = (end - start_iter)/(end - start)
start_iter = end
self.interp_egg = interp1d(
x = [6,12,18], # remember which is which this should be date of fish
y = [28.9,80.9,90.8], # remember which is which this should be number of fish
bounds_error = False,
fill_value = 0
)
def update(self, farm, temp):
#alle = (10 * farm.get_fordeiling()[4] * 0.5 / farm.fish_count) / (1 + 10 * farm.get_fordeiling()[4] * 0.5 / farm.fish_count) # her manglar at deilast við tal av fiski
#self.smittu_count = np.vstack((self.smittu_count, [farm.get_fordeiling()[5] * self.interp_egg(temp) * (alle*0+1)*self.delta_time, 0]))
self.smittu_count = np.hstack(([farm.get_fordeiling()[5] * self.interp_egg(temp) * self.delta_time], self.smittu_count[:-1]))
smittu_count_out_test = np.dot(self.A, self.smittu_count)
return smittu_count_out_test
| [
"numpy.ceil",
"numpy.floor",
"numpy.zeros",
"scipy.interpolate.interp1d",
"numpy.dot"
] | [((526, 553), 'numpy.zeros', 'np.zeros', (['cols'], {'dtype': 'float'}), '(cols, dtype=float)\n', (534, 553), True, 'import numpy as np\n'), ((671, 711), 'numpy.zeros', 'np.zeros', (['(save_time, cols)'], {'dtype': 'float'}), '((save_time, cols), dtype=float)\n', (679, 711), True, 'import numpy as np\n'), ((1270, 1349), 'scipy.interpolate.interp1d', 'interp1d', ([], {'x': '[6, 12, 18]', 'y': '[28.9, 80.9, 90.8]', 'bounds_error': '(False)', 'fill_value': '(0)'}), '(x=[6, 12, 18], y=[28.9, 80.9, 90.8], bounds_error=False, fill_value=0)\n', (1278, 1349), False, 'from scipy.interpolate import interp1d\n'), ((2046, 2079), 'numpy.dot', 'np.dot', (['self.A', 'self.smittu_count'], {}), '(self.A, self.smittu_count)\n', (2052, 2079), True, 'import numpy as np\n'), ((394, 425), 'numpy.ceil', 'np.ceil', (['(save_time / delta_time)'], {}), '(save_time / delta_time)\n', (401, 425), True, 'import numpy as np\n'), ((914, 934), 'numpy.floor', 'np.floor', (['start_iter'], {}), '(start_iter)\n', (922, 934), True, 'import numpy as np\n'), ((998, 1018), 'numpy.floor', 'np.floor', (['start_iter'], {}), '(start_iter)\n', (1006, 1018), True, 'import numpy as np\n'), ((1145, 1165), 'numpy.floor', 'np.floor', (['start_iter'], {}), '(start_iter)\n', (1153, 1165), True, 'import numpy as np\n')] |
""" Module containing custom MPI functions """
import numpy as np
import sys
from os import getpid
from time import time
#from ...base.Error import Error as Err
def print_msg_mpi(aStr='', end='\n'):
"""Prints the str to sys.stdout and flushes the buffer so that printing is immediate
Parameters
----------
aStr : str, optional
A string to print.
end : str, optional
string appended after the last value, default is a newline.
"""
if not isinstance(aStr, str):
aStr = str(aStr)
sys.stdout.write(aStr + end)
sys.stdout.flush()
def print_mpi(aStr, world, end='\n', rank=None):
"""Print to the screen with a flush.
Parameters
----------
aStr : str
A string to print.
end : str
string appended after the last value, default is a newline.
world : mpi4py.MPI.Comm
MPI parallel communicator.
rank : int
The rank to print from, default is the master rank, 0.
"""
if not rank is None:
if (world.rank == rank):
print_msg_mpi(aStr, end)
else:
print_msg_mpi(aStr, end)
def banner(world, aStr=None, end='\n', rank=0):
"""Prints a String with Separators above and below
Parameters
----------
world : mpi4py.MPI.Comm
MPI parallel communicator.
aStr : str
A string to print.
end : str
string appended after the last value, default is a newline.
rank : int
The rank to print from, default is the master rank, 0.
"""
if (aStr is None):
return
msg = "="*78
msg += end + aStr + end + aStr
msg += "="*78
print_mpi(world, msg, end=end, rank=rank)
def ordered_print(world, values, title=None):
"""Prints numbers from each rank in order of rank
This routine will print an item from each rank in order of rank.
This routine is SLOW due to lots of communication, but is useful for illustration purposes, or debugging.
Do not use this in production code! The title is used in a banner
Parameters
----------
world : mpi4py.MPI.Comm
MPI parallel communicator.
values : array_like
Variable to print, must exist on every rank in the communicator.
title : str, optional
Creates a banner to separate output with a clear indication of what is being written.
"""
if (world.rank > 0):
world.send(this, dest=0, tag=14)
else:
banner(world, title)
print('Rank 0 {}'.format(this))
for i in range(1, world.size):
tmp = world.recv(source=i, tag=14)
print("Rank {} {}".format(i, tmp))
def hello_world(world):
"""Print hello from every rank in an MPI communicator
Parameters
----------
world : mpi4py.MPI.Comm
MPI parallel communicator.
"""
print_msg_mpi('Hello from {}/{}'.format(world.rank + 1, world.size), "Hello From!")
def parallel_seed(world, time_function):
"""Generate a basic parallel random seed using time and the process id
Returns
-------
seed : int
The seed on each core
time_function : function
Function to obtain time, usually MPI.Wtime
"""
i = getpid()
t = time_function()
seed = np.int64(np.abs(((t*181)*((i-83)*359))%104729))
return seed
def parallel_prng(world, time_function):
"""Generate a basic parallel random seed using time and the process id
Returns
-------
seed : int
The seed on each core
time_function : function
Function to obtain time, usually MPI.Wtime
"""
return np.random.RandomState(parallel_seed(world, time_functions))
def loadBalance1D_shrinkingArrays(N, n_chunks):
"""Splits the length of an array into a number of chunks. Load balances the chunks in a shrinking arrays fashion.
Given length, N, split N up into n_chunks and return the starting index and size of each chunk.
After being split equally among the chunks, the remainder is distributed so that chunks 0:remainder
get +1 in size. e.g. N=10, n_chunks=3 would return starts=[0,4,7] chunks=[4,3,3]
Parameters
----------
N : int
A size to split into chunks.
n_chunks : int
The number of chunks to split N into. Usually the number of ranks, world.size.
Returns
-------
starts : ndarray of ints
The starting indices of each chunk.
chunks : ndarray of ints
The size of each chunk.
"""
chunks = np.full(n_chunks, fill_value=N/n_chunks, dtype=np.int)
mod = np.int(N % n_chunks)
chunks[:mod] += 1
starts = np.cumsum(chunks) - chunks[0]
if (mod > 0):
starts[mod:] += 1
return starts, chunks
def loadBalance2D_shrinkingArrays(shape, n_chunks):
"""Splits the shape of a 2D array into n_chunks.
The chunks are as close in size as possible.
The larger axes have more chunks along that dimension.
Parameters
----------
shape : ints
2D shape to split
n_chunks : int
Number of chunks
Returns
-------
starts : ints
Starting indices of each chunk. Has shape (n_chunks, 2)
chunks : ints
Size of each chunk. Has shape (n_chunks, 2)
"""
assert n_chunks % 2 == 0, ValueError("n_chunks must be even.")
target = shape / np.linalg.norm(shape)
best = None
bestFit = 1e20
for i in range(2, np.int32(n_chunks/2)+1):
j = int(n_chunks/(i))
nBlocks = np.asarray([i, j])
total = np.prod(nBlocks)
if total == n_chunks:
fraction = nBlocks / np.linalg.norm(nBlocks)
fit = np.linalg.norm(fraction - target)
if fit < bestFit:
best = nBlocks
bestFit = fit
s0, c0 = loadBalance1D_shrinkingArrays(shape[0], best[0])
s1, c1 = loadBalance1D_shrinkingArrays(shape[1], best[1])
a = np.repeat(s0, int(n_chunks/s0.size))
b = np.tile(np.repeat(s1, int(n_chunks/(s0.size*s1.size))), s0.size)
starts = np.vstack([a, b]).T
a = np.repeat(c0, int(n_chunks/c0.size))
b = np.tile(np.repeat(c1, int(n_chunks/(c0.size*c1.size))), c0.size)
chunks = np.vstack([a, b]).T
return starts, chunks
def loadBalance3D_shrinkingArrays(shape, n_chunks):
"""Splits the shape of a 3D array into n_chunks.
The chunks are as close in size as possible.
The larger axes have more chunks along that dimension.
Parameters
----------
shape : ints
3D shape to split
n_chunks : int
Number of chunks
Returns
-------
starts : ints
Starting indices of each chunk. Has shape (n_chunks, 3)
chunks : ints
Size of each chunk. Has shape (n_chunks, 3)
"""
# Find the "optimal" three product whose prod equals n_chunks
# and whose relative amounts match as closely to shape as possible.
assert n_chunks % 2 == 0, ValueError("n_chunks must be even.")
target = shape / np.linalg.norm(shape)
best = None
bestFit = 1e20
for i in range(1, int(n_chunks/2)+1):
for j in range(1, int(n_chunks/i)):
k = int(n_chunks/(i*j))
nBlocks = np.asarray([i, j, k])
total = np.prod(nBlocks)
if total == n_chunks:
fraction = nBlocks / np.linalg.norm(nBlocks)
fit = np.linalg.norm(fraction - target)
if fit < bestFit:
best = nBlocks
bestFit = fit
s0, c0 = loadBalance1D_shrinkingArrays(shape[0], best[0])
s1, c1 = loadBalance1D_shrinkingArrays(shape[1], best[1])
s2, c2 = loadBalance1D_shrinkingArrays(shape[2], best[2])
a = np.repeat(s0, int(n_chunks/s0.size))
b = np.tile(np.repeat(s1, int(n_chunks/(s0.size*s1.size))), s0.size)
c = np.tile(s2, int(n_chunks/s2.size))
starts = np.vstack([a, b, c]).T
a = np.repeat(c0, int(n_chunks/c0.size))
b = np.tile(np.repeat(c1, int(n_chunks/(c0.size*c1.size))), c0.size)
c = np.tile(c2, int(n_chunks/c2.size))
chunks = np.vstack([a, b, c]).T
return starts, chunks
def Isend(self, dest, world, dtype=None, ndim=None, shape=None):
"""Isend wrapper.
Automatically determines data type and shape. Must be accompanied by Irecv on the dest rank.
Parameters
----------
dest : int
Rank to send to
world : mpi4py.MPI.COMM_WORLD
MPI communicator
dtype : dtype, optional
Pre-determined data type if known. Faster
Defaults to None.
ndim : int, optional
Number of dimension if known. Faster
Defaults to None.
shape : ints, optional
values shape if known. Faster
Defaults to None.
"""
# Send the data type
if dtype is None:
dtype = _isendDtype(self, dest=dest, world=world)
assert (not dtype == 'list'), TypeError("Cannot Send/Recv a list")
# Broadcast the number of dimensions
if ndim is None:
ndim = Isend_1int(np.ndim(self), dest=dest, world=world)
if (ndim == 0): # For a single number
this = np.full(1, self, dtype=dtype) # Initialize on each worker
world.Send(this, dest=dest) # Broadcast
elif (ndim == 1): # For a 1D array
if shape is None:
shape = Isend_1int(np.size(self), dest=dest, world=world) # Broadcast the array size
world.Send(self, dest=dest) # Broadcast
elif (ndim > 1): # nD Array
if shape is None:
world.Send(np.asarray(self.shape), dest=dest) # Broadcast the shape
world.Send(self, dest=dest) # Broadcast
def Irecv(source, world, dtype=None, ndim=None, shape=None):
"""Irecv wrapper.
Automatically determines data type and shape. Must be accompanied by Isend on the source rank.
Parameters
----------
source : int
Rank to receive to
world : mpi4py.MPI.COMM_WORLD
MPI communicator
dtype : dtype, optional
Pre-determined data type if known.
Defaults to None.
ndim : int, optional
Number of dimension if known.
Defaults to None.
shape : ints, optional
values shape if known.
Defaults to None.
Returns
-------
out : scalar or array_like
returned type depends on what was sent.
"""
if dtype is None:
dtype = _irecvDtype(source, world)
assert not dtype == 'list', TypeError("Cannot Send/Recv a list")
if ndim is None:
ndim = Irecv_1int(source, world)
if (ndim == 0): # For a single number
this = np.empty(1, dtype=dtype) # Initialize on each worker
world.Recv(this, source=source) # Broadcast
this = this[0]
elif (ndim == 1): # For a 1D array
if shape is None:
shape = Irecv_1int(source=source, world=world)
this = np.empty(shape, dtype=dtype)
world.Recv(this, source=source)
elif (ndim > 1): # Nd Array
if shape is None:
shape = np.empty(ndim, dtype=np.int)
world.Recv(shape, source=source)
this = np.empty(shape, dtype=dtype)
world.Recv(this, source=source)
return this
def _isendDtype(value, dest, world):
"""Gets the data type of an object and sends it.
Must be used within an if statement.
if (world.rank == source): _sendDtype()
Must be accompanied by _irecvDtype on the dest rank.
Parameters
----------
value : object
For numpy arrays and numpy scalars, a numpy data type will be sent.
For arbitrary objects, the attached __class__.__name__ will be sent.
For lists, the data type will be list
dest : int
Rank to send to.
world : mpi4py.MPI.Comm
MPI parallel communicator.
Returns
-------
out : object
The data type.
"""
try:
tmp = str(value.dtype) # Try to get the dtype attribute
except:
tmp = str(value.__class__.__name__) # Otherwise use the type finder
world.send(tmp, dest=dest)
return tmp
def _irecvDtype(source, world):
"""Receives a data type.
Must be used within an if statement.
if (world.rank == dest): _recvDtype()
Must be accompanied by _isendDtype on the source rank.
Parameters
----------
self : object
For numpy arrays and numpy scalars, a numpy data type will be received.
For arbitrary objects, the attached __class__.__name__ will be received.
For lists, the data type will be list
source : int
Receive from source
world : mpi4py.MPI.Comm
MPI parallel communicator.
Returns
-------
out : object
The data type.
"""
tmp = world.recv(source=source)
if (tmp == 'list'):
return 'list'
return eval('np.{}'.format(tmp)) # Return the evaluated string
def Isend_1int(value, dest, world):
"""Send a single integer. Must be accompanied by Irecv_1int on the dest rank.
Parameters
----------
value : int
The integer to Send.
dest : int
Rank to receive
world : mpi4py.MPI.Comm
MPI parallel communicator.
Returns
-------
out : int
The sent integer.
"""
#Examples
#--------
#Given an integer instantiated on the master rank 0, in order to broadcast it, I must also instantiate a variable with the same name on all other ranks.
#>>> import numpy as np
#>>> from mpi4py import MPI
#>>> import mpi4py_utilities as mpiu
#>>> world = MPI.COMM_WORLD
#>>> if world.rank == 0:
#>>> i = 5
#>>> # Instantiate on all other ranks before broadcasting
#>>> else:
#>>> i = None
#>>> i = mpiu.Bcast(i, world)
#"""
this = np.full(1, value, np.int64)
world.Send(this, dest=dest)
return this[0]
def Irecv_1int(source, world):
"""Recv a single integer. Must be accompanied by Isend_1int on the source rank.
Parameters
----------
source : int
Receive from this rank.
world : mpi4py.MPI.Comm
MPI parallel communicator.
Returns
-------
out : int
The received integer.
"""
this = np.empty(1, np.int64)
req = world.Recv(this, source=source)
return this[0]
def IsendToLeft(self, world, wrap=True):
"""ISend an array to the rank left of world.rank.
"""
dest = world.size - 1 if world.rank == 0 else world.rank - 1
Isend(self, dest = dest, world = world)
def IsendToRight(self, world, wrap=True):
"""ISend an array to the rank left of world.rank.
"""
dest = 0 if world.rank == world.size - 1 else world.rank + 1
Isend(self, dest = dest, world=world)
def IrecvFromRight(world, wrap=True):
"""IRecv an array from the rank right of world.rank.
"""
source = 0 if world.rank == world.size - 1 else world.rank + 1
return Irecv(source=source, world=world)
def IrecvFromLeft(world, wrap=True):
"""Irecv an array from the rank left of world.rank.
"""
source = world.size - 1 if world.rank == 0 else world.rank - 1
return Irecv(source=source, world=world)
def Bcast(self, world, root=0, dtype=None, ndim=None, shape=None):
"""Broadcast a string or a numpy array
Broadcast a string or a numpy array from a root rank to all ranks in an MPI communicator.
Must be called collectively.
In order to call this function collectively, the variable 'self' must be instantiated on every rank.
See the example section for more details.
Parameters
----------
self : str or numpy.ndarray
A string or numpy array to broadcast from root.
world : mpi4py.MPI.Comm
MPI parallel communicator.
root : int, optional
The MPI rank to broadcast from. Default is 0.
dtype : dtype, optional
Pre-determined data type if known.
Defaults to None.
ndim : int, optional
Number of dimension if known.
Defaults to None.
shape : ints, optional
values shape if known.
Defaults to None.
Returns
-------
out : same type as self
The broadcast object on every rank.
Raises
------
TypeError
If self is a list, tell the user to use the specific Bcast_list function.
While it has less code and seems like it might be faster, MPI actually pickles the list,
broadcasts that binary stream, and unpickles on the other side. For a large number of lists,
this can take a long time. This way, the user is made aware of the time benefits of using numpy arrays.
Examples
--------
Given a numpy array instantiated on the master rank 0, in order to broadcast it,
I must also instantiate a variable with the same name on all other ranks.
>>> import numpy as np
>>> from mpi4py import MPI
>>> import mpi4py_utilities as mpiu
>>> world = MPI.COMM_WORLD
>>> if world.rank == 0:
>>> x = np.arange(10)
>>> # Instantiate on all other ranks before broadcasting
>>> else:
>>> x=None
>>> y = mpiu.Bcast(x, world)
>>>
>>> # A string example
>>> if (world.rank == 0):
>>> s = 'some string' # This may have been read in through an input file for production code
>>> else:
>>> s = ''
>>> s = mpiu.Bcast(s,world)
"""
if (type(self) == str):
this = None
if (world.rank == root):
this = self
this = world.bcast(this, root=root)
return this
# Broadcast the data type
if dtype is None:
dtype = bcast_type(self, world, root=root)
assert dtype != 'list', TypeError("Use MPI.Bcast_list for lists")
# Broadcast the number of dimensions
if ndim is None:
ndim = Bcast_1int(np.ndim(self), world, root=root)
if (ndim == 0): # For a single number
this = np.empty(1, dtype=dtype) # Initialize on each worker
if (world.rank == root):
this[0] = self # Assign on the master
world.Bcast(this) # Broadcast
return this[0]
if (ndim == 1): # For a 1D array
if shape is None:
shape = Bcast_1int(np.size(self), world, root=root) # Broadcast the array size
this = np.empty(shape, dtype=dtype)
if (world.rank == root): # Assign on the root
this[:] = self
world.Bcast(this, root=root) # Broadcast
return this
if (ndim > 1): # nD Array
if shape is None:
shape = Bcast(np.asarray(self.shape), world, root=root) # Broadcast the shape
this = np.empty(shape, dtype=dtype)
if (world.rank == root): # Assign on the root
this[:] = self
world.Bcast(this, root=root) # Broadcast
return this
def bcast_type(self, world, root=0):
"""Gets the type of an object and broadcasts it to every rank in an MPI communicator.
Adaptively broadcasts the type of an object. Must be called collectively.
Parameters
----------
self : object
For numpy arrays and numpy scalars, a numpy data type will be broadcast.
For arbitrary objects, the attached __class__.__name__ will be broadcast.
For lists, the data type will be list
world : mpi4py.MPI.Comm
MPI parallel communicator.
root : int, optional
The MPI rank to broadcast from. Default is 0.
Returns
-------
out : object
The data type broadcast to every rank including the rank broadcast from.
"""
if (world.rank == root):
try:
tmp = str(self.dtype) # Try to get the dtype attribute
except:
tmp = str(self.__class__.__name__) # Otherwise use the type finder
else:
tmp = None # Initialize tmp on all workers
tmp = world.bcast(tmp, root=root) # Bcast out to all
if (tmp == 'list'):
return 'list'
return eval('np.{}'.format(tmp)) # Return the evaluated string
def Bcast_1int(self, world, root=0):
"""Broadcast a single integer
In order to broadcast scalar values using the faster numpy approach, the value must cast into a 1D ndarray.
Must be called collectively.
Parameters
----------
self : int
The integer to broadcast.
world : mpi4py.MPI.Comm
MPI parallel communicator.
root : int, optional
The MPI rank to broadcast from. Default is 0.
Returns
-------
out : int
The broadcast integer.
Examples
--------
Given an integer instantiated on the master rank 0, in order to broadcast it, I must also instantiate a variable with the same name on all other ranks.
>>> import numpy as np
>>> from mpi4py import MPI
>>> from geobipy.src.base import MPI as myMPI
>>> world = MPI.COMM_WORLD
>>> if world.rank == 0:
>>> i = 5
>>> # Instantiate on all other ranks before broadcasting
>>> else:
>>> i=None
>>> i = myMPI.Bcast(i, world)
"""
if (world.rank == root):
this = np.full(1, self, np.int64)
else:
this = np.empty(1, np.int64)
world.Bcast(this, root=root)
return this[0]
def Bcast_list(self, world, root=0):
"""Broadcast a list by pickling, sending, and unpickling.
This is slower than using numpy arrays and uppercase (Bcast) mpi4py routines.
Must be called collectively.
Parameters
----------
self : list
A list to broadcast.
world : mpi4py.MPI.Comm
MPI parallel communicator.
root : int, optional
The MPI rank to broadcast from. Default is 0.
Returns
-------
out : list
The broadcast list on every MPI rank.
"""
this = world.bcast(self, root=root)
return this
def Scatterv(self, starts, chunks, world, axis=0, root=0):
"""ScatterV an array to all ranks in an MPI communicator.
Each rank gets a chunk defined by a starting index and chunk size. Must be called collectively.
The 'starts' and 'chunks' must be available on every MPI rank. Must be called collectively.
See the example for more details.
Parameters
----------
self : numpy.ndarray
A numpy array to broadcast from root.
starts : array of ints
1D array of ints with size equal to the number of MPI ranks. Each element gives the starting index for a chunk to be sent to that core. e.g. starts[0] is the starting index for rank = 0.
chunks : array of ints
1D array of ints with size equal to the number of MPI ranks. Each element gives the size of a chunk to be sent to that core. e.g. chunks[0] is the chunk size for rank = 0.
world : mpi4py.MPI.Comm
MPI parallel communicator.
axis : int, optional
Axis along which to Scatterv to the ranks if self is a 2D numpy array. Default is 0
root : int, optional
The MPI rank to broadcast from. Default is 0.
Returns
-------
out : numpy.ndarray
A chunk of self on each MPI rank with size chunk[world.rank].
Examples
--------
>>> import numpy as np
>>> from mpi4py import MPI
>>> from geobipy.src.base import MPI as myMPI
>>> world = MPI.COMM_WORLD
>>> # Globally define a size N
>>> N = 1000
>>> # On each rank, compute the starting indices and chunk size for the given world.
>>> starts,chunks=loadBalance_shrinkingArrays(N, world.size)
>>> # Create an array on the master rank
>>> if (world.rank == 0):
>>> x = np.arange(N)
>>> else:
>>> x = None
>>> # Scatter the array x among ranks.
>>> myChunk = myMPI.Scatterv(x, starts, chunks, world, root=0)
"""
# Brodacast the type
dtype = bcast_type(self, world, root=root)
assert dtype != 'list', TypeError("Use Scatterv_list for lists!")
return Scatterv_numpy(self, starts, chunks, dtype, world, axis, root)
def Scatterv_list(self, starts, chunks, world, root=0):
"""Scatterv a list by pickling, sending, receiving, and unpickling. This is slower than using numpy arrays and uppercase (Scatterv) mpi4py routines. Must be called collectively.
Parameters
----------
self : list
A list to scatterv.
starts : array of ints
1D array of ints with size equal to the number of MPI ranks. Each element gives the starting index for a chunk to be sent to that core. e.g. starts[0] is the starting index for rank = 0.
chunks : array of ints
1D array of ints with size equal to the number of MPI ranks. Each element gives the size of a chunk to be sent to that core. e.g. chunks[0] is the chunk size for rank = 0.
world : mpi4py.MPI.Comm
MPI parallel communicator.
root : int, optional
The MPI rank to broadcast from. Default is 0.
Returns
-------
out : list
A chunk of self on each MPI rank with size chunk[world.rank].
"""
for i in range(world.size):
if (i != root):
if (world.rank == root):
this = self[starts[i]:starts[i] + chunks[i]]
world.send(this, dest=i)
if (world.rank == i):
this = world.recv(source=root)
return this
if (world.rank == root):
return self[:chunks[root]]
def Scatterv_numpy(self, starts, chunks, dtype, world, axis=0, root=0):
"""ScatterV a numpy array to all ranks in an MPI communicator.
Each rank gets a chunk defined by a starting index and chunk size. Must be called collectively. The 'starts' and 'chunks' must be available on every MPI rank. See the example for more details. Must be called collectively.
Parameters
----------
self : numpy.ndarray
A numpy array to broadcast from root.
starts : array of ints
1D array of ints with size equal to the number of MPI ranks.
Each element gives the starting index for a chunk to be sent to that core.
e.g. starts[0] is the starting index for rank = 0.
Must exist on all ranks
chunks : array of ints
1D array of ints with size equal to the number of MPI ranks.
Each element gives the size of a chunk to be sent to that core.
e.g. chunks[0] is the chunk size for rank = 0.
Must exist on all ranks
dtype : type
The type of the numpy array being scattered. Must exist on all ranks.
world : mpi4py.MPI.Comm
MPI parallel communicator.
axis : int, optional
Axis along which to Scatterv to the ranks if self is a 2D numpy array. Default is 0
root : int, optional
The MPI rank to broadcast from. Default is 0.
Returns
-------
out : numpy.ndarray
A chunk of self on each MPI rank with size chunk[world.rank].
"""
# Broadcast the number of dimensions
ndim = Bcast_1int(np.ndim(self), world, root=root)
if (ndim == 1): # For a 1D Array
this = np.empty(chunks[world.rank], dtype=dtype)
world.Scatterv([self, chunks, starts, None], this[:], root=root)
return this
# For a 2D Array
# MPI cannot send and receive arrays of more than one dimension.
# Therefore higher dimensional arrays must be unpacked to 1D, and then repacked on the other side.
if (ndim == 2):
s = Bcast_1int(np.size(self, 1 - axis), world, root=root)
tmpChunks = chunks * s
tmpStarts = starts * s
self_unpk = None
if (world.rank == root):
if (axis == 0):
self_unpk = np.reshape(self, np.size(self))
else:
self_unpk = np.reshape(self.T, np.size(self))
this_unpk = np.empty(tmpChunks[world.rank], dtype=dtype)
world.Scatterv([self_unpk, tmpChunks, tmpStarts, None], this_unpk, root=root)
this = np.reshape(this_unpk, [chunks[world.rank], s])
return this.T if axis == 1 else this
| [
"sys.stdout.write",
"numpy.full",
"numpy.size",
"os.getpid",
"numpy.abs",
"numpy.empty",
"numpy.asarray",
"numpy.ndim",
"numpy.cumsum",
"numpy.int",
"sys.stdout.flush",
"numpy.linalg.norm",
"numpy.reshape",
"numpy.int32",
"numpy.vstack",
"numpy.prod"
] | [((537, 565), 'sys.stdout.write', 'sys.stdout.write', (['(aStr + end)'], {}), '(aStr + end)\n', (553, 565), False, 'import sys\n'), ((570, 588), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (586, 588), False, 'import sys\n'), ((3198, 3206), 'os.getpid', 'getpid', ([], {}), '()\n', (3204, 3206), False, 'from os import getpid\n'), ((4478, 4534), 'numpy.full', 'np.full', (['n_chunks'], {'fill_value': '(N / n_chunks)', 'dtype': 'np.int'}), '(n_chunks, fill_value=N / n_chunks, dtype=np.int)\n', (4485, 4534), True, 'import numpy as np\n'), ((4543, 4563), 'numpy.int', 'np.int', (['(N % n_chunks)'], {}), '(N % n_chunks)\n', (4549, 4563), True, 'import numpy as np\n'), ((13671, 13698), 'numpy.full', 'np.full', (['(1)', 'value', 'np.int64'], {}), '(1, value, np.int64)\n', (13678, 13698), True, 'import numpy as np\n'), ((14098, 14119), 'numpy.empty', 'np.empty', (['(1)', 'np.int64'], {}), '(1, np.int64)\n', (14106, 14119), True, 'import numpy as np\n'), ((3251, 3294), 'numpy.abs', 'np.abs', (['(t * 181 * ((i - 83) * 359) % 104729)'], {}), '(t * 181 * ((i - 83) * 359) % 104729)\n', (3257, 3294), True, 'import numpy as np\n'), ((4599, 4616), 'numpy.cumsum', 'np.cumsum', (['chunks'], {}), '(chunks)\n', (4608, 4616), True, 'import numpy as np\n'), ((5310, 5331), 'numpy.linalg.norm', 'np.linalg.norm', (['shape'], {}), '(shape)\n', (5324, 5331), True, 'import numpy as np\n'), ((5462, 5480), 'numpy.asarray', 'np.asarray', (['[i, j]'], {}), '([i, j])\n', (5472, 5480), True, 'import numpy as np\n'), ((5497, 5513), 'numpy.prod', 'np.prod', (['nBlocks'], {}), '(nBlocks)\n', (5504, 5513), True, 'import numpy as np\n'), ((6002, 6019), 'numpy.vstack', 'np.vstack', (['[a, b]'], {}), '([a, b])\n', (6011, 6019), True, 'import numpy as np\n'), ((6154, 6171), 'numpy.vstack', 'np.vstack', (['[a, b]'], {}), '([a, b])\n', (6163, 6171), True, 'import numpy as np\n'), ((6950, 6971), 'numpy.linalg.norm', 'np.linalg.norm', (['shape'], {}), '(shape)\n', (6964, 6971), True, 'import numpy as np\n'), ((7827, 7847), 'numpy.vstack', 'np.vstack', (['[a, b, c]'], {}), '([a, b, c])\n', (7836, 7847), True, 'import numpy as np\n'), ((8025, 8045), 'numpy.vstack', 'np.vstack', (['[a, b, c]'], {}), '([a, b, c])\n', (8034, 8045), True, 'import numpy as np\n'), ((9053, 9082), 'numpy.full', 'np.full', (['(1)', 'self'], {'dtype': 'dtype'}), '(1, self, dtype=dtype)\n', (9060, 9082), True, 'import numpy as np\n'), ((10524, 10548), 'numpy.empty', 'np.empty', (['(1)'], {'dtype': 'dtype'}), '(1, dtype=dtype)\n', (10532, 10548), True, 'import numpy as np\n'), ((17764, 17788), 'numpy.empty', 'np.empty', (['(1)'], {'dtype': 'dtype'}), '(1, dtype=dtype)\n', (17772, 17788), True, 'import numpy as np\n'), ((18136, 18164), 'numpy.empty', 'np.empty', (['shape'], {'dtype': 'dtype'}), '(shape, dtype=dtype)\n', (18144, 18164), True, 'import numpy as np\n'), ((18481, 18509), 'numpy.empty', 'np.empty', (['shape'], {'dtype': 'dtype'}), '(shape, dtype=dtype)\n', (18489, 18509), True, 'import numpy as np\n'), ((20904, 20930), 'numpy.full', 'np.full', (['(1)', 'self', 'np.int64'], {}), '(1, self, np.int64)\n', (20911, 20930), True, 'import numpy as np\n'), ((20956, 20977), 'numpy.empty', 'np.empty', (['(1)', 'np.int64'], {}), '(1, np.int64)\n', (20964, 20977), True, 'import numpy as np\n'), ((26644, 26657), 'numpy.ndim', 'np.ndim', (['self'], {}), '(self)\n', (26651, 26657), True, 'import numpy as np\n'), ((26730, 26771), 'numpy.empty', 'np.empty', (['chunks[world.rank]'], {'dtype': 'dtype'}), '(chunks[world.rank], dtype=dtype)\n', (26738, 26771), True, 'import numpy as np\n'), ((27453, 27497), 'numpy.empty', 'np.empty', (['tmpChunks[world.rank]'], {'dtype': 'dtype'}), '(tmpChunks[world.rank], dtype=dtype)\n', (27461, 27497), True, 'import numpy as np\n'), ((27599, 27645), 'numpy.reshape', 'np.reshape', (['this_unpk', '[chunks[world.rank], s]'], {}), '(this_unpk, [chunks[world.rank], s])\n', (27609, 27645), True, 'import numpy as np\n'), ((5389, 5411), 'numpy.int32', 'np.int32', (['(n_chunks / 2)'], {}), '(n_chunks / 2)\n', (5397, 5411), True, 'import numpy as np\n'), ((5620, 5653), 'numpy.linalg.norm', 'np.linalg.norm', (['(fraction - target)'], {}), '(fraction - target)\n', (5634, 5653), True, 'import numpy as np\n'), ((7151, 7172), 'numpy.asarray', 'np.asarray', (['[i, j, k]'], {}), '([i, j, k])\n', (7161, 7172), True, 'import numpy as np\n'), ((7193, 7209), 'numpy.prod', 'np.prod', (['nBlocks'], {}), '(nBlocks)\n', (7200, 7209), True, 'import numpy as np\n'), ((8955, 8968), 'numpy.ndim', 'np.ndim', (['self'], {}), '(self)\n', (8962, 8968), True, 'import numpy as np\n'), ((10793, 10821), 'numpy.empty', 'np.empty', (['shape'], {'dtype': 'dtype'}), '(shape, dtype=dtype)\n', (10801, 10821), True, 'import numpy as np\n'), ((17672, 17685), 'numpy.ndim', 'np.ndim', (['self'], {}), '(self)\n', (17679, 17685), True, 'import numpy as np\n'), ((27102, 27125), 'numpy.size', 'np.size', (['self', '(1 - axis)'], {}), '(self, 1 - axis)\n', (27109, 27125), True, 'import numpy as np\n'), ((5578, 5601), 'numpy.linalg.norm', 'np.linalg.norm', (['nBlocks'], {}), '(nBlocks)\n', (5592, 5601), True, 'import numpy as np\n'), ((7328, 7361), 'numpy.linalg.norm', 'np.linalg.norm', (['(fraction - target)'], {}), '(fraction - target)\n', (7342, 7361), True, 'import numpy as np\n'), ((11029, 11057), 'numpy.empty', 'np.empty', (['shape'], {'dtype': 'dtype'}), '(shape, dtype=dtype)\n', (11037, 11057), True, 'import numpy as np\n'), ((18060, 18073), 'numpy.size', 'np.size', (['self'], {}), '(self)\n', (18067, 18073), True, 'import numpy as np\n'), ((18401, 18423), 'numpy.asarray', 'np.asarray', (['self.shape'], {}), '(self.shape)\n', (18411, 18423), True, 'import numpy as np\n'), ((7282, 7305), 'numpy.linalg.norm', 'np.linalg.norm', (['nBlocks'], {}), '(nBlocks)\n', (7296, 7305), True, 'import numpy as np\n'), ((9259, 9272), 'numpy.size', 'np.size', (['self'], {}), '(self)\n', (9266, 9272), True, 'import numpy as np\n'), ((10940, 10968), 'numpy.empty', 'np.empty', (['ndim'], {'dtype': 'np.int'}), '(ndim, dtype=np.int)\n', (10948, 10968), True, 'import numpy as np\n'), ((27338, 27351), 'numpy.size', 'np.size', (['self'], {}), '(self)\n', (27345, 27351), True, 'import numpy as np\n'), ((27418, 27431), 'numpy.size', 'np.size', (['self'], {}), '(self)\n', (27425, 27431), True, 'import numpy as np\n'), ((9458, 9480), 'numpy.asarray', 'np.asarray', (['self.shape'], {}), '(self.shape)\n', (9468, 9480), True, 'import numpy as np\n')] |
import numpy as np
def make_zero(x):
mat = x
operation = []
for i,j in enumerate(mat):
if i==0 : continue
operation.append(f"R{i+1} -> {j[0]}*R1 - R{i+1}")
mat[i] = j[0]*mat[0] - j
return mat,operation
print(make_zero(np.array([[1,2],[3,4],[3,4],[5,9]]))[1])
def rank(x,y=None):
# x be the matrix in any dimension and y be the shape
x = np.array(x)
if y==None: y = x.shape
matrix = np.array(x).reshape(y)
# Initial process
print("Find the rank of the given matrix:-")
print(matrix,'\n')
# step one
x = [i for i in [matrix[j] for j in range(y[0])]]
print(x)
# import numpy as np
# mat = np.array([[1,2],[3,4],[3,4],[5,9]])
# # if mat[0][0] != 1:
# # R0 = [mat[i][0] for i in range(mat.shape[0])]
# # else:
# mat
# pnt = f"R{} -> {}*R{} - R1"
# Testing....
# x = [1,4,3,-2,1,-2,-3,-1,4,3,-1,6,7,2,9,-3,3,6,6,12]
# print(rank(x,(4,5))) | [
"numpy.array"
] | [((385, 396), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (393, 396), True, 'import numpy as np\n'), ((258, 300), 'numpy.array', 'np.array', (['[[1, 2], [3, 4], [3, 4], [5, 9]]'], {}), '([[1, 2], [3, 4], [3, 4], [5, 9]])\n', (266, 300), True, 'import numpy as np\n'), ((438, 449), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (446, 449), True, 'import numpy as np\n')] |
from __future__ import print_function
import logging
import math
import sys
import numpy as np
import matplotlib.pyplot as plt
from .solution import Solution
class Animator:
def __init__(self, grid, tfinal):
self._grid = grid
self._tfinal = tfinal
plt.ion()
self._fig, (self._ax1, self._ax2, self._ax3) = plt.subplots(3, 1)
self._cid_close = self._fig.canvas.mpl_connect('close_event',
self.handle_close)
self._line_u, = self._ax1.plot(
self._grid, np.zeros_like(self._grid), '-')
self._line_lamda, = self._ax2.plot(
self._grid, np.zeros_like(self._grid), '-')
self._line_d, = self._ax3.plot([], [], '-')
self._time_history = []
self._det_speed_history = []
def animate(self, cur_time_step, cur_time, soln):
solution = Solution(soln)
self._time_history.append(cur_time)
self._det_speed_history.append(solution.d)
if cur_time_step % 10 == 0 or cur_time == self._tfinal:
fig = self._fig
ax1 = self._ax1
ax2 = self._ax2
ax3 = self._ax3
line_u = self._line_u
line_lamda = self._line_lamda
line_d = self._line_d
ax1.set_title('Cur_time = {}, time step = {}'.format(
cur_time, cur_time_step))
line_u.set_data(self._grid, solution.u)
min_u = np.min(solution.u)
if min_u > 0:
min_u = 0.99 * min_u
else:
min_u = 1.01 * min_u
max_u = np.max(solution.u)
if max_u > 0:
max_u = 1.01 * max_u
else:
max_u = 0.99 * max_u
ax1.set_ylim((min_u, max_u))
#ax1.set_xlim(-13, -11)
#ax1.set_ylim(1.3051, 1.3052)
line_lamda.set_data(self._grid, solution.lamda)
#ax2.set_xlim((-3.0, 0))
min_lamda = np.min(solution.lamda)
if min_lamda > 0:
min_lamda = 0.99 * min_lamda
else:
min_lamda = 1.01 * min_lamda
max_lamda = np.max(solution.lamda)
if max_lamda > 0:
max_lamda = 1.01 * max_lamda
else:
max_lamda = 0.99 * max_lamda
ax2.set_ylim((min_lamda, max_lamda))
# 3. Plotting time series of detonation velocity.
line_d.set_data(self._time_history, self._det_speed_history)
if self._time_history[-1] == 0.0:
d_xlim_right = 1.0
else:
d_xlim_right = math.ceil(self._time_history[-1])
ax3.set_xlim(0, d_xlim_right)
d_xlim_lower = 0.99 * np.min(self._det_speed_history)
d_xlim_upper = 1.01 * np.max(self._det_speed_history)
ax3.set_ylim(d_xlim_lower, d_xlim_upper)
fig.tight_layout()
plt.pause(0.02)
def simulation_finished(self):
"""Event handler for the event 'Simulation finished'.
We need to disconnect handler for the 'close' event in this case, so
that the program does not exit.
"""
self._fig.canvas.mpl_disconnect(self._cid_close)
def handle_close(self, event):
logger = logging.getLogger(__name__)
logger.info('Close animation window and exit program')
sys.exit(3)
| [
"numpy.zeros_like",
"math.ceil",
"logging.getLogger",
"matplotlib.pyplot.ion",
"numpy.min",
"numpy.max",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.subplots",
"sys.exit"
] | [((282, 291), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (289, 291), True, 'import matplotlib.pyplot as plt\n'), ((347, 365), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {}), '(3, 1)\n', (359, 365), True, 'import matplotlib.pyplot as plt\n'), ((3337, 3364), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (3354, 3364), False, 'import logging\n'), ((3436, 3447), 'sys.exit', 'sys.exit', (['(3)'], {}), '(3)\n', (3444, 3447), False, 'import sys\n'), ((575, 600), 'numpy.zeros_like', 'np.zeros_like', (['self._grid'], {}), '(self._grid)\n', (588, 600), True, 'import numpy as np\n'), ((675, 700), 'numpy.zeros_like', 'np.zeros_like', (['self._grid'], {}), '(self._grid)\n', (688, 700), True, 'import numpy as np\n'), ((1480, 1498), 'numpy.min', 'np.min', (['solution.u'], {}), '(solution.u)\n', (1486, 1498), True, 'import numpy as np\n'), ((1637, 1655), 'numpy.max', 'np.max', (['solution.u'], {}), '(solution.u)\n', (1643, 1655), True, 'import numpy as np\n'), ((2014, 2036), 'numpy.min', 'np.min', (['solution.lamda'], {}), '(solution.lamda)\n', (2020, 2036), True, 'import numpy as np\n'), ((2199, 2221), 'numpy.max', 'np.max', (['solution.lamda'], {}), '(solution.lamda)\n', (2205, 2221), True, 'import numpy as np\n'), ((2982, 2997), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.02)'], {}), '(0.02)\n', (2991, 2997), True, 'import matplotlib.pyplot as plt\n'), ((2676, 2709), 'math.ceil', 'math.ceil', (['self._time_history[-1]'], {}), '(self._time_history[-1])\n', (2685, 2709), False, 'import math\n'), ((2787, 2818), 'numpy.min', 'np.min', (['self._det_speed_history'], {}), '(self._det_speed_history)\n', (2793, 2818), True, 'import numpy as np\n'), ((2853, 2884), 'numpy.max', 'np.max', (['self._det_speed_history'], {}), '(self._det_speed_history)\n', (2859, 2884), True, 'import numpy as np\n')] |
import json
import os
from typing import Any, Dict, Generator, List, Optional, Set, Tuple, Union
import qcelemental as qcel
import qcportal as ptl
import tqdm
from openforcefield import topology as off
from pydantic import PositiveInt, constr, validator
from qcfractal.interface import FractalClient
from qcportal.models.common_models import (
DriverEnum,
OptimizationSpecification,
QCSpecification,
)
from typing_extensions import Literal
from ..common_structures import (
ClientHandler,
DatasetConfig,
IndexCleaner,
Metadata,
QCSpec,
QCSpecificationHandler,
TorsionIndexer,
)
from ..constraints import Constraints
from ..exceptions import (
DatasetCombinationError,
DatasetInputError,
MissingBasisCoverageError,
QCSpecificationError,
UnsupportedFiletypeError,
)
from ..procedures import GeometricProcedure
from ..results import SingleResult
from ..serializers import deserialize, serialize
from ..validators import scf_property_validator
from .entries import DatasetEntry, FilterEntry, OptimizationEntry, TorsionDriveEntry
class ComponentResult:
"""
Class to contain molecules after the execution of a workflow component this automatically applies de-duplication to
the molecules. For example if a molecule is already in the molecules list it will not be added but any conformers
will be kept and transferred.
If a molecule in the molecules list is then filtered it will be removed from the molecules list.
"""
def __init__(
self,
component_name: str,
component_description: Dict[str, str],
component_provenance: Dict[str, str],
molecules: Optional[Union[List[off.Molecule], off.Molecule]] = None,
input_file: Optional[str] = None,
input_directory: Optional[str] = None,
skip_unique_check: Optional[bool] = False,
verbose: bool = True,
):
"""Register the list of molecules to process.
Parameters
----------
component_name: str
The name of the component that produced this result.
component_description: Dict[str, str]
The dictionary representation of the component which details the function and running parameters.
component_provenance: Dict[str, str]
The dictionary of the modules used and there version number when running the component.
molecules: Optional[Union[List[off.Molecule], off.Molecule]], default=None,
The list of molecules that have been possessed by a component and returned as a result.
input_file: Optional[str], default=None
The name of the input file used to produce the result if not from a component.
input_directory: Optional[str], default=None
The name of the input directory which contains input molecule files.
verbose: bool, default=False
If the timing information and progress bar should be shown while doing deduplication.
skip_unique_check: bool. default=False
Set to True if it is sure that all molecules will be unique in this result
"""
self._molecules: Dict[str, off.Molecule] = {}
self._filtered: Dict[str, off.Molecule] = {}
self.component_name: str = component_name
self.component_description: Dict = component_description
self.component_provenance: Dict = component_provenance
self.skip_unique_check: bool = skip_unique_check
assert (
molecules is None or input_file is None
), "Provide either a list of molecules or an input file name."
# if we have an input file load it
if input_file is not None:
molecules = off.Molecule.from_file(
file_path=input_file, allow_undefined_stereo=True
)
if not isinstance(molecules, list):
molecules = [
molecules,
]
if input_directory is not None:
molecules = []
for file in os.listdir(input_directory):
# each file could have many molecules in it so combine
mols = off.Molecule.from_file(
file_path=os.path.join(input_directory, file),
allow_undefined_stereo=True,
)
try:
molecules.extend(mols)
except TypeError:
molecules.append(mols)
# now lets process the molecules and add them to the class
if molecules is not None:
for molecule in tqdm.tqdm(
molecules,
total=len(molecules),
ncols=80,
desc="{:30s}".format("Deduplication"),
disable=not verbose,
):
self.add_molecule(molecule)
@property
def molecules(self) -> List[off.Molecule]:
"""
Get the list of molecules which can be iterated over.
"""
return list(self._molecules.values())
@property
def filtered(self) -> List[off.Molecule]:
"""
Get the list of molecule that have been filtered to iterate over.
"""
return list(self._filtered.values())
@property
def n_molecules(self) -> int:
"""
Returns:
The number of molecules saved in the result.
"""
return len(self._molecules)
@property
def n_conformers(self) -> int:
"""
Returns:
The number of conformers stored in the molecules.
"""
conformers = sum(
[molecule.n_conformers for molecule in self._molecules.values()]
)
return conformers
@property
def n_filtered(self) -> int:
"""
Returns:
The number of filtered molecules.
"""
return len(self._filtered)
def add_molecule(self, molecule: off.Molecule) -> bool:
"""
Add a molecule to the molecule list after checking that it is not present already. If it is de-duplicate the
record and condense the conformers and metadata.
"""
import numpy as np
from simtk import unit
# make a unique molecule hash independent of atom order or conformers
molecule_hash = molecule.to_inchikey(fixed_hydrogens=True)
if not self.skip_unique_check and molecule_hash in self._molecules:
# we need to align the molecules and transfer the coords and properties
# get the mapping, drop some comparisons to match inchikey
isomorphic, mapping = off.Molecule.are_isomorphic(
molecule,
self._molecules[molecule_hash],
return_atom_map=True,
formal_charge_matching=False,
bond_order_matching=False,
)
assert isomorphic is True
# transfer any torsion indexes for similar fragments
if "dihedrals" in molecule.properties:
# we need to transfer the properties; get the current molecule dihedrals indexer
# if one is missing create a new one
current_indexer = self._molecules[molecule_hash].properties.get(
"dihedrals", TorsionIndexer()
)
# update it with the new molecule info
current_indexer.update(
torsion_indexer=molecule.properties["dihedrals"],
reorder_mapping=mapping,
)
# store it back
self._molecules[molecule_hash].properties["dihedrals"] = current_indexer
if molecule.n_conformers != 0:
# transfer the coordinates
for conformer in molecule.conformers:
new_conformer = np.zeros((molecule.n_atoms, 3))
for i in range(molecule.n_atoms):
new_conformer[i] = conformer[mapping[i]].value_in_unit(
unit.angstrom
)
new_conf = unit.Quantity(value=new_conformer, unit=unit.angstrom)
# check if the conformer is already on the molecule
for old_conformer in self._molecules[molecule_hash].conformers:
if old_conformer.tolist() == new_conf.tolist():
break
else:
self._molecules[molecule_hash].add_conformer(
new_conformer * unit.angstrom
)
else:
# molecule already in list and coords not present so just return
return True
else:
self._molecules[molecule_hash] = molecule
return False
def filter_molecule(self, molecule: off.Molecule):
"""
Filter out a molecule that has not passed this workflow component. If the molecule is already in the pass list
remove it and ensure it is only in the filtered list.
"""
molecule_hash = molecule.to_inchikey(fixed_hydrogens=True)
try:
del self._molecules[molecule_hash]
except KeyError:
pass
finally:
if molecule not in self._filtered:
self._filtered[molecule_hash] = molecule
def __repr__(self):
return f"ComponentResult(name={self.component_name}, molecules={self.n_molecules}, filtered={self.n_filtered})"
def __str__(self):
return f"<ComponentResult name='{self.component_name}' molecules='{self.n_molecules}' filtered='{self.n_filtered}'>"
class BasicDataset(IndexCleaner, ClientHandler, QCSpecificationHandler, DatasetConfig):
"""
The general qcfractal dataset class which contains all of the molecules and information about them prior to
submission.
The class is a simple holder of the dataset and information about it and can do simple checks on the data before
submitting it such as ensuring that the molecules have cmiles information
and a unique index to be identified by.
Note:
The molecules in this dataset are all expanded so that different conformers are unique submissions.
"""
dataset_name: str = "BasicDataset"
dataset_tagline: constr(
min_length=8, regex="[a-zA-Z]"
) = "OpenForcefield single point evaluations."
dataset_type: Literal["DataSet"] = "DataSet"
maxiter: PositiveInt = 200
driver: DriverEnum = DriverEnum.energy
scf_properties: List[str] = [
"dipole",
"quadrupole",
"wiberg_lowdin_indices",
"mayer_indices",
]
priority: str = "normal"
description: constr(
min_length=8, regex="[a-zA-Z]"
) = f"A basic dataset using the {driver} driver."
dataset_tags: List[str] = ["openff"]
compute_tag: str = "openff"
metadata: Metadata = Metadata()
provenance: Dict[str, str] = {}
dataset: Dict[str, DatasetEntry] = {}
filtered_molecules: Dict[str, FilterEntry] = {}
_file_writers = {"json": json.dump}
_entry_class = DatasetEntry
_scf_validator = validator("scf_properties", each_item=True, allow_reuse=True)(
scf_property_validator
)
def __init__(self, **kwargs):
"""
Make sure the metadata has been assigned correctly if not autofill some information.
"""
super().__init__(**kwargs)
# set the collection type here
self.metadata.collection_type = self.dataset_type
self.metadata.dataset_name = self.dataset_name
# some fields can be reused here
if self.metadata.short_description is None:
self.metadata.short_description = self.dataset_tagline
if self.metadata.long_description is None:
self.metadata.long_description = self.description
def __add__(self, other: "BasicDataset") -> "BasicDataset":
"""
Add two Basicdatasets together.
"""
import copy
# make sure the dataset types match
if self.dataset_type != other.dataset_type:
raise DatasetCombinationError(
f"The datasets must be the same type, you can not add types {self.dataset_type} and {other.dataset_type}"
)
# create a new datset
new_dataset = copy.deepcopy(self)
# update the elements in the dataset
new_dataset.metadata.elements.update(other.metadata.elements)
for index, entry in other.dataset.items():
# search for the molecule
entry_ids = new_dataset.get_molecule_entry(
entry.get_off_molecule(include_conformers=False)
)
if not entry_ids:
new_dataset.dataset[index] = entry
else:
mol_id = entry_ids[0]
current_entry = new_dataset.dataset[mol_id]
_, atom_map = off.Molecule.are_isomorphic(
entry.get_off_molecule(include_conformers=False),
current_entry.get_off_molecule(include_conformers=False),
return_atom_map=True,
)
# remap the molecule and all conformers
entry_mol = entry.get_off_molecule(include_conformers=True)
mapped_mol = entry_mol.remap(mapping_dict=atom_map, current_to_new=True)
for i in range(mapped_mol.n_conformers):
mapped_schema = mapped_mol.to_qcschema(
conformer=i, extras=current_entry.initial_molecules[0].extras
)
if mapped_schema not in current_entry.initial_molecules:
current_entry.initial_molecules.append(mapped_schema)
return new_dataset
@classmethod
def parse_file(cls, file_name: str):
"""
Add decompression to the parse file method.
"""
data = deserialize(file_name=file_name)
return cls(**data)
def get_molecule_entry(self, molecule: Union[off.Molecule, str]) -> List[str]:
"""
Search through the dataset for a molecule and return the dataset index of any exact molecule matches.
Parameters:
molecule: The smiles string for the molecule or an openforcefield.topology.Molecule that is to be searched for.
Returns:
A list of dataset indices which contain the target molecule.
"""
# if we have a smiles string convert it
if isinstance(molecule, str):
molecule = off.Molecule.from_smiles(molecule, allow_undefined_stereo=True)
# make a unique inchi key
inchi_key = molecule.to_inchikey(fixed_hydrogens=False)
hits = []
for entry in self.dataset.values():
if inchi_key == entry.attributes["inchi_key"]:
# they have same basic inchi now match the molecule
if molecule == entry.get_off_molecule(include_conformers=False):
hits.append(entry.index)
return hits
@property
def filtered(self) -> off.Molecule:
"""
A generator for the molecules that have been filtered.
Returns:
offmol: A molecule representation created from the filtered molecule lists
Note:
Modifying the molecule will have no effect on the data stored.
"""
for component, data in self.filtered_molecules.items():
for smiles in data.molecules:
offmol = off.Molecule.from_smiles(smiles, allow_undefined_stereo=True)
yield offmol
@property
def n_filtered(self) -> int:
"""
Calculate the total number of molecules filtered by the components used in a workflow to create this dataset.
Returns:
filtered: The total number of molecules filtered by components.
"""
filtered = sum(
[len(data.molecules) for data in self.filtered_molecules.values()]
)
return filtered
@property
def n_records(self) -> int:
"""
Return the total number of records that will be created on submission of the dataset.
Returns:
The number of records that will be added to the collection.
Note:
* The number returned will be different depending on the dataset used.
* The amount of unqiue molecule can be found using `n_molecules`
* see also the [n_molecules][qcsubmit.datasets.BasicDataset.n_molecules]
"""
n_records = sum([len(data.initial_molecules) for data in self.dataset.values()])
return n_records
@property
def n_molecules(self) -> int:
"""
Calculate the number of unique molecules to be submitted.
Returns:
The number of unique molecules in dataset
Notes:
* This method has been improved for better performance on large datasets and has been tested on an optimization dataset of over 10500 molecules.
* This function does not calculate the total number of entries of the dataset see `n_records`
"""
molecules = {}
for entry in self.dataset.values():
inchikey = entry.attributes["inchi_key"]
try:
like_mols = molecules[inchikey]
mol_to_add = entry.get_off_molecule(False).to_inchikey(
fixed_hydrogens=True
)
for index in like_mols:
if mol_to_add == self.dataset[index].get_off_molecule(
False
).to_inchikey(fixed_hydrogens=True):
break
else:
molecules[inchikey].append(entry.index)
except KeyError:
molecules[inchikey] = [
entry.index,
]
return sum([len(value) for value in molecules.values()])
@property
def molecules(self) -> Generator[off.Molecule, None, None]:
"""
A generator that creates an openforcefield.topology.Molecule one by one from the dataset.
Returns:
The instance of the molecule from the dataset.
Note:
Editing the molecule will not effect the data stored in the dataset as it is immutable.
"""
for molecule_data in self.dataset.values():
# create the molecule from the cmiles data
yield molecule_data.get_off_molecule(include_conformers=True)
@property
def n_components(self) -> int:
"""
Return the amount of components that have been ran during generating the dataset.
Returns:
The number of components that were ran while generating the dataset.
"""
n_filtered = len(self.filtered_molecules)
return n_filtered
@property
def components(self) -> List[Dict[str, Union[str, Dict[str, str]]]]:
"""
Gather the details of the components that were ran during the creation of this dataset.
Returns:
A list of dictionaries containing information about the components ran during the generation of the dataset.
"""
components = []
for component in self.filtered_molecules.values():
components.append(component.dict(exclude={"molecules"}))
return components
def filter_molecules(
self,
molecules: Union[off.Molecule, List[off.Molecule]],
component_name: str,
component_description: Dict[str, Any],
component_provenance: Dict[str, str],
) -> None:
"""
Filter a molecule or list of molecules by the component they failed.
Parameters:
molecules:
A molecule or list of molecules to be filtered.
component_description:
The dictionary representation of the component that filtered this set of molecules.
component_name:
The name of the component.
component_provenance:
The dictionary representation of the component provenance.
"""
if isinstance(molecules, off.Molecule):
# make into a list
molecules = [molecules]
if component_name in self.filtered_molecules:
filter_mols = [
molecule.to_smiles(isomeric=True, explicit_hydrogens=True)
for molecule in molecules
]
self.filtered_molecules[component_name].molecules.extend(filter_mols)
else:
filter_data = FilterEntry(
off_molecules=molecules,
component_name=component_name,
component_provenance=component_provenance,
component_description=component_description,
)
self.filtered_molecules[filter_data.component_name] = filter_data
def add_molecule(
self,
index: str,
molecule: off.Molecule,
attributes: Dict[str, Any],
extras: Optional[Dict[str, Any]] = None,
keywords: Optional[Dict[str, Any]] = None,
**kwargs,
) -> None:
"""
Add a molecule to the dataset under the given index with the passed cmiles.
Parameters:
index : str
The molecule index that was generated by the factory.
molecule : openforcefield.topology.Molecule
The instance of the molecule which contains its conformer information.
attributes : Dict[str, str]
The attributes dictionary containing all of the relevant identifier tags for the molecule and
extra meta information on the calculation.
extras : Dict[str, Any], optional, default=None
The extras that should be supplied into the qcportal.moldels.Molecule.
keywords : Dict[str, Any], optional, default=None,
Any extra keywords which are required for the calculation.
Note:
Each molecule in this basic dataset should have all of its conformers expanded out into separate entries.
Thus here we take the general molecule index and increment it.
"""
try:
data_entry = self._entry_class(
off_molecule=molecule,
index=index,
attributes=attributes,
extras=extras or {},
keywords=keywords or {},
**kwargs,
)
self.dataset[index] = data_entry
# add any extra elements to the metadata
self.metadata.elements.update(data_entry.initial_molecules[0].symbols)
except qcel.exceptions.ValidationError:
# the molecule has some qcschema issue and should be removed
self.filter_molecules(
molecules=molecule,
component_name="QCSchemaIssues",
component_description={
"component_description": "The molecule was removed as a valid QCSchema could not be made",
"component_name": "QCSchemaIssues",
},
component_provenance=self.provenance,
)
def _get_missing_basis_coverage(
self, raise_errors: bool = True
) -> Dict[str, Set[str]]:
"""
Work out if the selected basis set covers all of the elements in the dataset for each specification if not return the missing
element symbols.
Parameters:
raise_errors: bool, default=True
if True the function will raise an error for missing basis coverage, else we return the missing data and just print warnings.
"""
import re
import warnings
import basis_set_exchange as bse
from simtk.openmm.app import Element
basis_report = {}
for spec in self.qc_specifications.values():
if spec.program.lower() == "torchani":
# check ani1 first
ani_coverage = {
"ani1x": {"C", "H", "N", "O"},
"ani1ccx": {"C", "H", "N", "O"},
"ani2x": {"C", "H", "N", "O", "S", "F", "Cl"},
}
covered_elements = ani_coverage[spec.method.lower()]
# this is validated at the spec level so we should not get an error here
difference = self.metadata.elements.difference(covered_elements)
elif spec.program.lower() == "psi4":
# now check psi4
# TODO this list should be updated with more basis transforms as we find them
psi4_converter = {"dzvp": "dgauss-dzvp"}
month_subs = {"jun-", "mar-", "apr-", "may-", "feb-"}
basis = psi4_converter.get(spec.basis.lower(), spec.basis.lower())
# here we need to apply conversions for special characters to match bse
# replace the *
basis = re.sub("\*", "_st_", basis)
# replace any /
basis = re.sub("/", "_sl_", basis)
# check for heavy tags
basis = re.sub("heavy-", "", basis)
try:
basis_meta = bse.get_metadata()[basis]
except KeyError:
# now try and do month subs
for month in month_subs:
if month in basis:
basis = re.sub(month, "", basis)
# now try and get the basis again
basis_meta = bse.get_metadata()[basis]
elements = basis_meta["versions"][basis_meta["latest_version"]][
"elements"
]
covered_elements = set(
[
Element.getByAtomicNumber(int(element)).symbol
for element in elements
]
)
difference = self.metadata.elements.difference(covered_elements)
elif spec.program.lower() == "openmm":
# smirnoff covered elements
covered_elements = {"C", "H", "N", "O", "P", "S", "Cl", "Br", "F", "I"}
difference = self.metadata.elements.difference(covered_elements)
elif spec.program.lower() == "rdkit":
# all atoms are defined in the uff so return an empty set.
difference = set()
else:
# xtb
# all atoms are covered and this must be xtb
difference = set()
basis_report[spec.spec_name] = difference
for spec_name, report in basis_report.items():
if report:
if raise_errors:
raise MissingBasisCoverageError(
f"The following elements: {report} are not covered by the selected basis : {self.qc_specifications[spec_name].basis} and method : {self.qc_specifications[spec_name].method}"
)
else:
warnings.warn(
f"The following elements: {report} are not covered by the selected basis : {self.qc_specifications[spec_name].basis} and method : {self.qc_specifications[spec_name].method}",
UserWarning,
)
if not raise_errors:
return basis_report
def submit(
self,
client: Union[str, ptl.FractalClient, FractalClient],
await_result: Optional[bool] = False,
ignore_errors: bool = False,
) -> SingleResult:
"""
Submit the dataset to the chosen qcarchive address and finish or wait for the results and return the
corresponding result class.
Parameters:
client : Union[str, qcportal.FractalClient]
The name of the file containing the client information or an actual client instance.
await_result : bool, optional, default=False
If the user wants to wait for the calculation to finish before returning.
ignore_errors : bool, default=False
If the user wants to submit the compute regardless of errors set this to True. Mainly to override basis coverage.
Returns:
The collection of the results which have completed.
Raises:
MissingBasisCoverageError: If the chosen basis set does not cover some of the elements in the dataset.
"""
# pre submission checks
# make sure we have some QCSpec to submit
self._check_qc_specs()
# basis set coverage check
self._get_missing_basis_coverage(raise_errors=not ignore_errors)
target_client = self._activate_client(client)
# work out if we are extending a collection
try:
collection = target_client.get_collection("Dataset", self.dataset_name)
except KeyError:
# we are making a new dataset so make sure the metadata is complete
# we hard code the default to be psi4 but each spec can submit their own program to use
self.metadata.validate_metadata(raise_errors=True)
collection = ptl.collections.Dataset(
name=self.dataset_name,
client=target_client,
default_driver=self.driver,
default_program="psi4",
tagline=self.dataset_tagline,
tags=self.dataset_tags,
description=self.description,
provenance=self.provenance,
metadata=self.metadata.dict(),
)
# here we need to add a spec for each program and set the default
for spec in self.qc_specifications.values():
# generate the keyword set
kw = self._get_spec_keywords(spec=spec)
try:
# try and add the keywords if present then continue
collection.add_keywords(
alias=spec.spec_name,
program=spec.program,
keyword=kw,
default=False,
)
collection.save()
except (KeyError, AttributeError):
pass
i = 0
# now add the molecules to the database, saving every 30 for speed
for index, data in self.dataset.items():
# check if the index we have been supplied has a number tag already if so start from this tag
index, tag = self._clean_index(index=index)
for j, molecule in enumerate(data.initial_molecules):
name = index + f"-{tag + j}"
try:
collection.add_entry(name=name, molecule=molecule)
i += 1
except KeyError:
continue
finally:
if i % 30 == 0:
# save the added entries
collection.save()
# save the final dataset
collection.save()
# submit the calculations for each spec
responses = {}
for spec in self.qc_specifications.values():
response = collection.compute(
method=spec.method,
basis=spec.basis,
keywords=spec.spec_name,
program=spec.program,
tag=self.compute_tag,
priority=self.priority,
protocols={"wavefunction": spec.store_wavefunction.value},
)
collection.save()
# save the response per submission
responses[spec.spec_name] = response
return responses
def _get_spec_keywords(self, spec: QCSpec) -> ptl.models.KeywordSet:
"""
Build a keyword set which is specific to this QC specification and accounts for implicit solvent when requested.
"""
# use the basic keywords we allow
data = self.dict(include={"maxiter", "scf_properties"})
# account for implicit solvent
if spec.implicit_solvent is not None:
data["pcm"] = True
data["pcm__input"] = spec.implicit_solvent.to_string()
return ptl.models.KeywordSet(values=data)
def export_dataset(self, file_name: str, compression: Optional[str] = None) -> None:
"""
Export the dataset to file so that it can be used to make another dataset quickly.
Parameters:
file_name: The name of the file the dataset should be wrote to.
compression: The type of compression that should be added to the export.
Note:
The supported file types are:
- `json`
Additionally, the file will automatically compressed depending on the
final extension if compression is not explicitly supplied:
- `json.xz`
- `json.gz`
- `json.bz2`
Check serializers.py for more details. Right now bz2 seems to
produce the smallest files.
Raises:
UnsupportedFiletypeError: If the requested file type is not supported.
"""
# Check here early, just to filter anything non-json for now
# Ideally the serializers should be checking this
split = file_name.split(".")
split = split[-1:] if len(split) == 1 else split[-2:]
if "json" not in split:
raise UnsupportedFiletypeError(
f"The dataset export file name with leading extension {split[-1]} is not supported, "
"please end the file name with json."
)
serialize(serializable=self, file_name=file_name, compression=compression)
def coverage_report(self, forcefields: List[str]) -> Dict:
"""
Produce a coverage report of all of the parameters that are exercised by the molecules in the dataset.
Parameters:
forcefields: The name of the openforcefield force field which should be included in the coverage report.
Returns:
A dictionary for each of the force fields which break down which parameters are exercised by their
parameter type.
"""
from openforcefield.typing.engines.smirnoff import ForceField
coverage = {}
param_types = {
"a": "Angles",
"b": "Bonds",
"c": "Constraints",
"t": "ProperTorsions",
"i": "ImproperTorsions",
"n": "vdW",
}
if isinstance(forcefields, str):
forcefields = [
forcefields,
]
for forcefield in forcefields:
result = {}
ff = ForceField(forcefield)
for molecule in self.molecules:
labels = ff.label_molecules(molecule.to_topology())[0]
# format the labels into a set
covered_types = set(
[label.id for types in labels.values() for label in types.values()]
)
# now insert into the forcefield dict
for parameter in covered_types:
p_id = parameter[0]
result.setdefault(param_types[p_id], set()).add(parameter)
# now store the force field dict into the main result
coverage[forcefield] = result
return coverage
def visualize(self, file_name: str, columns: int = 4, toolkit: str = None) -> None:
"""
Create a pdf file of the molecules with any torsions highlighted using either openeye or rdkit.
Parameters:
file_name: The name of the pdf file which will be produced.
columns: The number of molecules per row.
toolkit: The option to specify the backend toolkit used to produce the pdf file.
"""
from openforcefield.utils.toolkits import OPENEYE_AVAILABLE, RDKIT_AVAILABLE
toolkits = {
"openeye": (OPENEYE_AVAILABLE, self._create_openeye_pdf),
"rdkit": (RDKIT_AVAILABLE, self._create_rdkit_pdf),
}
if toolkit:
try:
_, pdf_func = toolkits[toolkit.lower()]
return pdf_func(file_name, columns)
except KeyError:
raise ValueError(
f"The requested toolkit backend: {toolkit} is not supported, chose from {toolkits.keys()}"
)
else:
for toolkit in toolkits:
available, pdf_func = toolkits[toolkit]
if available:
return pdf_func(file_name, columns)
raise ImportError(
f"No backend toolkit was found to generate the pdf please install openeye and/or rdkit."
)
def _create_openeye_pdf(self, file_name: str, columns: int) -> None:
"""
Make the pdf of the molecules use openeye.
"""
from openeye import oechem, oedepict
itf = oechem.OEInterface()
suppress_h = True
rows = 10
cols = columns
ropts = oedepict.OEReportOptions(rows, cols)
ropts.SetHeaderHeight(25)
ropts.SetFooterHeight(25)
ropts.SetCellGap(2)
ropts.SetPageMargins(10)
report = oedepict.OEReport(ropts)
cellwidth, cellheight = report.GetCellWidth(), report.GetCellHeight()
opts = oedepict.OE2DMolDisplayOptions(
cellwidth, cellheight, oedepict.OEScale_Default * 0.5
)
opts.SetAromaticStyle(oedepict.OEAromaticStyle_Circle)
pen = oedepict.OEPen(oechem.OEBlack, oechem.OEBlack, oedepict.OEFill_On, 1.0)
opts.SetDefaultBondPen(pen)
oedepict.OESetup2DMolDisplayOptions(opts, itf)
# now we load the molecules
for data in self.dataset.values():
off_mol = data.get_off_molecule(include_conformers=False)
off_mol.name = None
cell = report.NewCell()
mol = off_mol.to_openeye()
oedepict.OEPrepareDepiction(mol, False, suppress_h)
disp = oedepict.OE2DMolDisplay(mol, opts)
if hasattr(data, "dihedrals"):
# work out if we have a double or single torsion
if len(data.dihedrals) == 1:
dihedrals = data.dihedrals[0]
center_bonds = dihedrals[1:3]
else:
# double torsion case
dihedrals = [*data.dihedrals[0], *data.dihedrals[1]]
center_bonds = [*data.dihedrals[0][1:3], *data.dihedrals[1][1:3]]
# Highlight element of interest
class NoAtom(oechem.OEUnaryAtomPred):
def __call__(self, atom):
return False
class AtomInTorsion(oechem.OEUnaryAtomPred):
def __call__(self, atom):
return atom.GetIdx() in dihedrals
class NoBond(oechem.OEUnaryBondPred):
def __call__(self, bond):
return False
class CentralBondInTorsion(oechem.OEUnaryBondPred):
def __call__(self, bond):
return (bond.GetBgn().GetIdx() in center_bonds) and (
bond.GetEnd().GetIdx() in center_bonds
)
atoms = mol.GetAtoms(AtomInTorsion())
bonds = mol.GetBonds(NoBond())
abset = oechem.OEAtomBondSet(atoms, bonds)
oedepict.OEAddHighlighting(
disp,
oechem.OEColor(oechem.OEYellow),
oedepict.OEHighlightStyle_BallAndStick,
abset,
)
atoms = mol.GetAtoms(NoAtom())
bonds = mol.GetBonds(CentralBondInTorsion())
abset = oechem.OEAtomBondSet(atoms, bonds)
oedepict.OEAddHighlighting(
disp,
oechem.OEColor(oechem.OEOrange),
oedepict.OEHighlightStyle_BallAndStick,
abset,
)
oedepict.OERenderMolecule(cell, disp)
oedepict.OEWriteReport(file_name, report)
def _create_rdkit_pdf(self, file_name: str, columns: int) -> None:
"""
Make the pdf of the molecules using rdkit.
"""
from rdkit.Chem import AllChem, Draw
molecules = []
tagged_atoms = []
images = []
for data in self.dataset.values():
rdkit_mol = data.get_off_molecule(include_conformers=False).to_rdkit()
AllChem.Compute2DCoords(rdkit_mol)
molecules.append(rdkit_mol)
if hasattr(data, "dihedrals"):
tagged_atoms.extend(data.dihedrals)
# if no atoms are to be tagged set to None
if not tagged_atoms:
tagged_atoms = None
# evey 24 molecules split the page
for i in range(0, len(molecules), 24):
mol_chunk = molecules[i : i + 24]
if tagged_atoms is not None:
tag_chunk = tagged_atoms[i : i + 24]
else:
tag_chunk = None
# now make the image
image = Draw.MolsToGridImage(
mol_chunk,
molsPerRow=columns,
subImgSize=(500, 500),
highlightAtomLists=tag_chunk,
)
# write the pdf to bytes and pass straight to the pdf merger
images.append(image)
images[0].save(file_name, append_images=images[1:], save_all=True)
def molecules_to_file(self, file_name: str, file_type: str) -> None:
"""
Write the molecules to the requested file type.
Important:
The supported file types are:
- SMI
- INCHI
- INCKIKEY
"""
file_writers = {
"smi": self._molecules_to_smiles,
"inchi": self._molecules_to_inchi,
"inchikey": self._molecules_to_inchikey,
}
try:
# get the list of molecules
molecules = file_writers[file_type.lower()]()
with open(file_name, "w") as output:
for molecule in molecules:
output.write(f"{molecule}\n")
except KeyError:
raise UnsupportedFiletypeError(
f"The requested file type {file_type} is not supported, supported types are"
f"{file_writers.keys()}."
)
def _molecules_to_smiles(self) -> List[str]:
"""
Create a list of molecules canonical isomeric smiles.
"""
smiles = [
data.attributes["canonical_isomeric_smiles"]
for data in self.dataset.values()
]
return smiles
def _molecules_to_inchi(self) -> List[str]:
"""
Create a list of the molecules standard InChI.
"""
inchi = [data.attributes["standard_inchi"] for data in self.dataset.values()]
return inchi
def _molecules_to_inchikey(self) -> List[str]:
"""
Create a list of the molecules standard InChIKey.
"""
inchikey = [data.attributes["inchi_key"] for data in self.dataset.values()]
return inchikey
class OptimizationDataset(BasicDataset):
"""
An optimisation dataset class which handles submission of settings differently from the basic dataset, and creates
optimization datasets in the public or local qcarcive instance.
"""
dataset_name = "OptimizationDataset"
dataset_tagline: constr(
min_length=8, regex="[a-zA-Z]"
) = "OpenForcefield optimizations."
dataset: Dict[str, OptimizationEntry] = {}
dataset_type: Literal["OptimizationDataset"] = "OptimizationDataset"
description: constr(
min_length=8, regex="[a-zA-Z]"
) = "An optimization dataset using geometric."
metadata: Metadata = Metadata(collection_type=dataset_type)
driver: DriverEnum = DriverEnum.gradient
optimization_procedure: GeometricProcedure = GeometricProcedure()
_entry_class = OptimizationEntry
@validator("driver")
def _check_driver(cls, driver):
"""Make sure that the driver is set to gradient only and not changed."""
if driver.value != "gradient":
driver = DriverEnum.gradient
return driver
def __add__(self, other: "OptimizationDataset") -> "OptimizationDataset":
"""
Add two Optimization datasets together, if the constraints are different then the entries are considered different.
"""
import copy
from ..utils import remap_list
# make sure the dataset types match
if self.dataset_type != other.dataset_type:
raise DatasetCombinationError(
f"The datasets must be the same type, you can not add types {self.dataset_type} and {other.dataset_type}"
)
# create a new dataset
new_dataset = copy.deepcopy(self)
# update the elements in the dataset
new_dataset.metadata.elements.update(other.metadata.elements)
for entry in other.dataset.values():
# search for the molecule
entry_ids = new_dataset.get_molecule_entry(
entry.get_off_molecule(include_conformers=False)
)
if entry_ids:
records = 0
for mol_id in entry_ids:
current_entry = new_dataset.dataset[mol_id]
# for each entry count the number of inputs incase we need a new entry
records += len(current_entry.initial_molecules)
_, atom_map = off.Molecule.are_isomorphic(
entry.get_off_molecule(include_conformers=False),
current_entry.get_off_molecule(include_conformers=False),
return_atom_map=True,
)
current_constraints = current_entry.constraints
# make sure all constraints are the same
# remap the entry to compare
entry_constraints = Constraints()
for constraint in entry.constraints.freeze:
entry_constraints.add_freeze_constraint(
constraint.type, remap_list(constraint.indices, atom_map)
)
for constraint in entry.constraints.set:
entry_constraints.add_set_constraint(
constraint.type,
remap_list(constraint.indices, atom_map),
constraint.value,
)
if current_constraints == entry_constraints:
# transfer the entries
# remap and transfer
off_mol = entry.get_off_molecule(include_conformers=True)
mapped_mol = off_mol.remap(
mapping_dict=atom_map, current_to_new=True
)
for i in range(mapped_mol.n_conformers):
mapped_schema = mapped_mol.to_qcschema(
conformer=i,
extras=current_entry.initial_molecules[0].extras,
)
if mapped_schema not in current_entry.initial_molecules:
current_entry.initial_molecules.append(mapped_schema)
break
# else:
# # if they are not the same move on to the next entry
# continue
else:
# we did not break so add the entry with a new unique index
core, tag = self._clean_index(entry.index)
entry.index = core + f"-{tag + records}"
new_dataset.dataset[entry.index] = entry
else:
# if no other molecules just add it
new_dataset.dataset[entry.index] = entry
return new_dataset
def _add_keywords(self, client: ptl.FractalClient, spec: QCSpec) -> str:
"""
Add the keywords to the client and return the index number of the keyword set.
Returns:
kw_id: The keyword index number in the client.
"""
kw = self._get_spec_keywords(spec=spec)
kw_id = client.add_keywords([kw])[0]
return kw_id
def get_qc_spec(self, spec_name: str, keyword_id: str) -> QCSpecification:
"""
Create the QC specification for the computation.
Parameters:
spec_name: The name of the spec we want to convert to a QCSpecification
keyword_id: The string of the keyword set id number.
Returns:
The dictionary representation of the QC specification
"""
spec = self.qc_specifications[spec_name]
qc_spec = QCSpecification(
driver=self.driver,
method=spec.method,
basis=spec.basis,
keywords=keyword_id,
program=spec.program,
protocols={"wavefunction": spec.store_wavefunction},
)
return qc_spec
def add_dataset_specification(
self,
spec: QCSpec,
opt_spec: OptimizationSpecification,
collection: Union[
ptl.collections.OptimizationDataset, ptl.collections.TorsionDriveDataset
],
) -> bool:
"""
Try to add the local qc specification to the given collection, this will check if a spec under this name has already been added and if it should be overwritten.
Parameters:
spec: The QCSpec we are trying to add to the collection
opt_spec: The qcportal style optimization spec
collection: The collection we are trying to add this compute specification to
Notes:
If a specification is already stored under this name in the collection we have options:
- If a spec with the same name but different details has been added and used we must raise an error to change the name of the new spec
- If the spec has been added and has not been used then overwrite it.
Returns:
`True` if the specification is present in the collection and is exactly the same as what we are trying to add.
`False` if no specification can be found in the collection with the given name.
Raises:
QCSpecificationError: If a specification with the same name is already added to the collection but has different settings.
"""
# build the qcportal version of our spec
kw_id = self._add_keywords(client=collection.client, spec=spec)
qcportal_spec = self.get_qc_spec(spec_name=spec.spec_name, keyword_id=kw_id)
# see if the spec is in the history
if spec.spec_name.lower() in collection.data.history:
collection_spec = collection.get_specification(name=spec.spec_name)
# check they are the same
if (
collection_spec.optimization_spec == opt_spec
and qcportal_spec == collection_spec.qc_spec
):
# the spec is already there and is the same so just skip adding it
return True
else:
raise QCSpecificationError(
f"A specification with the name {spec.spec_name} is already registered with the collection but has different settings and has already been used and should not be overwriten. "
f"Please change the name of this specification to continue."
)
else:
# the spec either has not been added or has not been used so set the new default
collection.add_specification(
name=spec.spec_name,
optimization_spec=opt_spec,
qc_spec=qcportal_spec,
description=spec.spec_description,
overwrite=True,
)
return True
def submit(
self,
client: Union[str, ptl.FractalClient, FractalClient],
await_result: bool = False,
ignore_errors: bool = False,
) -> SingleResult:
"""
Submit the dataset to the chosen qcarchive address and finish or wait for the results and return the
corresponding result class.
Parameters:
await_result: If the user wants to wait for the calculation to finish before returning.
client: The name of the file containing the client information or the client instance.
ignore_errors: If the user wants to ignore basis coverage errors and submit the dataset.
Returns:
Either `None` if we are not waiting for the results or a BasicResult instance with all of the completed
calculations.
Raises:
MissingBasisCoverageError: If the chosen basis set does not cover some of the elements in the dataset.
"""
# pre submission checks
# check for qcspecs
self._check_qc_specs()
# basis set coverage check
self._get_missing_basis_coverage(raise_errors=not ignore_errors)
target_client = self._activate_client(client)
# work out if we are extending a collection
try:
collection = target_client.get_collection(
"OptimizationDataset", self.dataset_name
)
except KeyError:
# we are making a new dataset so make sure the url metadata is supplied
if self.metadata.long_description_url is None:
raise DatasetInputError(
"Please provide a long_description_url for the metadata before submitting."
)
collection = ptl.collections.OptimizationDataset(
name=self.dataset_name,
client=target_client,
tagline=self.dataset_tagline,
tags=self.dataset_tags,
description=self.description,
provenance=self.provenance,
metadata=self.metadata.dict(),
)
# create the optimization specification
opt_spec = self.optimization_procedure.get_optimzation_spec()
# create the qc specification and add them all
for spec in self.qc_specifications.values():
self.add_dataset_specification(
spec=spec, opt_spec=opt_spec, collection=collection
)
i = 0
# now add the molecules to the database, saving every 30 for speed
for index, data in self.dataset.items():
# check if the index we have been supplied has a number tag already if so start from this tag
index, tag = self._clean_index(index=index)
for j, molecule in enumerate(data.initial_molecules):
name = index + f"-{tag + j}"
try:
collection.add_entry(
name=name,
initial_molecule=molecule,
attributes=data.attributes,
additional_keywords=data.formatted_keywords,
save=False,
)
i += 1
except KeyError:
continue
finally:
if i % 30 == 0:
# save the added entries
collection.save()
# save the added entries
collection.save()
responses = {}
# submit the calculations for each spec
for spec_name in self.qc_specifications.keys():
response = collection.compute(
specification=spec_name, tag=self.compute_tag, priority=self.priority
)
responses[spec_name] = response
return responses
class TorsiondriveDataset(OptimizationDataset):
"""
An torsiondrive dataset class which handles submission of settings differently from the basic dataset, and creates
torsiondrive datasets in the public or local qcarcive instance.
Important:
The dihedral_ranges for the whole dataset can be defined here or if different scan ranges are required on a case
by case basis they can be defined for each torsion in a molecule separately in the keywords of the torsiondrive entry.
"""
dataset_name = "TorsionDriveDataset"
dataset_tagline: constr(
min_length=8, regex="[a-zA-Z]"
) = "OpenForcefield TorsionDrives."
dataset: Dict[str, TorsionDriveEntry] = {}
dataset_type: Literal["TorsiondriveDataset"] = "TorsiondriveDataset"
description: constr(
min_length=8, regex="[a-zA-Z]"
) = "A TorsionDrive dataset using geometric."
metadata: Metadata = Metadata()
optimization_procedure: GeometricProcedure = GeometricProcedure.parse_obj(
{"enforce": 0.1, "reset": True, "qccnv": True, "epsilon": 0.0}
)
grid_spacing: List[int] = [15]
energy_upper_limit: float = 0.05
dihedral_ranges: Optional[List[Tuple[int, int]]] = None
energy_decrease_thresh: Optional[float] = None
_entry_class = TorsionDriveEntry
def __add__(self, other: "TorsiondriveDataset") -> "TorsiondriveDataset":
"""
Add two TorsiondriveDatasets together, if the central bond in the dihedral is the same the entries are considered the same.
"""
import copy
# make sure the dataset types match
if self.dataset_type != other.dataset_type:
raise DatasetCombinationError(
f"The datasets must be the same type, you can not add types {self.dataset_type} and {other.dataset_type}"
)
# create a new dataset
new_dataset = copy.deepcopy(self)
# update the elements in the dataset
new_dataset.metadata.elements.update(other.metadata.elements)
for index, entry in other.dataset.items():
# search for the molecule
entry_ids = new_dataset.get_molecule_entry(
entry.get_off_molecule(include_conformers=False)
)
for mol_id in entry_ids:
current_entry = new_dataset.dataset[mol_id]
_, atom_map = off.Molecule.are_isomorphic(
entry.get_off_molecule(include_conformers=False),
current_entry.get_off_molecule(include_conformers=False),
return_atom_map=True,
)
# gather the current dihedrals forward and backwards
current_dihedrals = set(
[(dihedral[1:3]) for dihedral in current_entry.dihedrals]
)
for dihedral in current_entry.dihedrals:
current_dihedrals.add((dihedral[1:3]))
current_dihedrals.add((dihedral[2:0:-1]))
# now gather the other entry dihedrals forwards and backwards
other_dihedrals = set()
for dihedral in entry.dihedrals:
other_dihedrals.add(tuple(atom_map[i] for i in dihedral[1:3]))
other_dihedrals.add(tuple(atom_map[i] for i in dihedral[2:0:-1]))
difference = current_dihedrals - other_dihedrals
if not difference:
# the entry is already there so add new conformers and skip
off_mol = entry.get_off_molecule(include_conformers=True)
mapped_mol = off_mol.remap(
mapping_dict=atom_map, current_to_new=True
)
for i in range(mapped_mol.n_conformers):
mapped_schema = mapped_mol.to_qcschema(
conformer=i,
extras=current_entry.initial_molecules[0].extras,
)
if mapped_schema not in current_entry.initial_molecules:
current_entry.initial_molecules.append(mapped_schema)
break
else:
# none of the entries matched so add it
new_dataset.dataset[index] = entry
return new_dataset
@property
def n_records(self) -> int:
"""
Calculate the number of records that will be submitted.
"""
return len(self.dataset)
def submit(
self,
client: Union[str, ptl.FractalClient, FractalClient],
await_result: bool = False,
ignore_errors: bool = False,
) -> SingleResult:
"""
Submit the dataset to the chosen qcarchive address and finish or wait for the results and return the
corresponding result class.
Parameters:
await_result: If the user wants to wait for the calculation to finish before returning.
client: The name of the file containing the client information or the client instance.
ignore_errors: If the user wants to ignore basis coverage issues and submit the dataset.
Returns:
Either `None` if we are not waiting for the results or a BasicResult instance with all of the completed
calculations.
Raises:
MissingBasisCoverageError: If the chosen basis set does not cover some of the elements in the dataset.
"""
# pre submission checks
# check for qcspecs
self._check_qc_specs()
# basis set coverage check
self._get_missing_basis_coverage(raise_errors=not ignore_errors)
target_client = self._activate_client(client)
# work out if we are extending a collection
try:
collection = target_client.get_collection(
"TorsionDriveDataset", self.dataset_name
)
except KeyError:
# we are making a new dataset so make sure the metadata is complete
self.metadata.validate_metadata(raise_errors=True)
collection = ptl.collections.TorsionDriveDataset(
name=self.dataset_name,
client=target_client,
tagline=self.dataset_tagline,
tags=self.dataset_tags,
description=self.description,
provenance=self.provenance,
metadata=self.metadata.dict(),
)
# create the optimization specification
opt_spec = self.optimization_procedure.get_optimzation_spec()
# create the qc specification for each spec
for spec in self.qc_specifications.values():
self.add_dataset_specification(
spec=spec, opt_spec=opt_spec, collection=collection
)
# start add the molecule to the dataset, multiple conformers/molecules can be used as the starting geometry
for i, (index, data) in enumerate(self.dataset.items()):
try:
# for each TD setting check if we have a unique value to replace the global setting
collection.add_entry(
name=index,
initial_molecules=data.initial_molecules,
dihedrals=data.dihedrals,
grid_spacing=data.keywords.grid_spacing or self.grid_spacing,
energy_upper_limit=data.keywords.energy_upper_limit
or self.energy_upper_limit,
attributes=data.attributes,
energy_decrease_thresh=data.keywords.energy_decrease_thresh
or self.energy_decrease_thresh,
dihedral_ranges=data.keywords.dihedral_ranges
or self.dihedral_ranges,
)
except KeyError:
continue
finally:
if i % 30 == 0:
collection.save()
collection.save()
responses = {}
# submit the calculations for each spec
for spec_name in self.qc_specifications.keys():
response = collection.compute(
specification=spec_name, tag=self.compute_tag, priority=self.priority
)
responses[spec_name] = response
return responses
| [
"openeye.oedepict.OERenderMolecule",
"openeye.oedepict.OEPen",
"os.path.join",
"basis_set_exchange.get_metadata",
"openeye.oedepict.OE2DMolDisplayOptions",
"qcportal.models.KeywordSet",
"openforcefield.topology.Molecule.from_smiles",
"openeye.oechem.OEAtomBondSet",
"openeye.oedepict.OEPrepareDepicti... | [((10344, 10382), 'pydantic.constr', 'constr', ([], {'min_length': '(8)', 'regex': '"""[a-zA-Z]"""'}), "(min_length=8, regex='[a-zA-Z]')\n", (10350, 10382), False, 'from pydantic import PositiveInt, constr, validator\n'), ((10749, 10787), 'pydantic.constr', 'constr', ([], {'min_length': '(8)', 'regex': '"""[a-zA-Z]"""'}), "(min_length=8, regex='[a-zA-Z]')\n", (10755, 10787), False, 'from pydantic import PositiveInt, constr, validator\n'), ((43694, 43732), 'pydantic.constr', 'constr', ([], {'min_length': '(8)', 'regex': '"""[a-zA-Z]"""'}), "(min_length=8, regex='[a-zA-Z]')\n", (43700, 43732), False, 'from pydantic import PositiveInt, constr, validator\n'), ((43918, 43956), 'pydantic.constr', 'constr', ([], {'min_length': '(8)', 'regex': '"""[a-zA-Z]"""'}), "(min_length=8, regex='[a-zA-Z]')\n", (43924, 43956), False, 'from pydantic import PositiveInt, constr, validator\n'), ((44238, 44257), 'pydantic.validator', 'validator', (['"""driver"""'], {}), "('driver')\n", (44247, 44257), False, 'from pydantic import PositiveInt, constr, validator\n'), ((56737, 56775), 'pydantic.constr', 'constr', ([], {'min_length': '(8)', 'regex': '"""[a-zA-Z]"""'}), "(min_length=8, regex='[a-zA-Z]')\n", (56743, 56775), False, 'from pydantic import PositiveInt, constr, validator\n'), ((56961, 56999), 'pydantic.constr', 'constr', ([], {'min_length': '(8)', 'regex': '"""[a-zA-Z]"""'}), "(min_length=8, regex='[a-zA-Z]')\n", (56967, 56999), False, 'from pydantic import PositiveInt, constr, validator\n'), ((11183, 11244), 'pydantic.validator', 'validator', (['"""scf_properties"""'], {'each_item': '(True)', 'allow_reuse': '(True)'}), "('scf_properties', each_item=True, allow_reuse=True)\n", (11192, 11244), False, 'from pydantic import PositiveInt, constr, validator\n'), ((12376, 12395), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (12389, 12395), False, 'import copy\n'), ((32249, 32283), 'qcportal.models.KeywordSet', 'ptl.models.KeywordSet', ([], {'values': 'data'}), '(values=data)\n', (32270, 32283), True, 'import qcportal as ptl\n'), ((37030, 37050), 'openeye.oechem.OEInterface', 'oechem.OEInterface', ([], {}), '()\n', (37048, 37050), False, 'from openeye import oechem, oedepict\n'), ((37134, 37170), 'openeye.oedepict.OEReportOptions', 'oedepict.OEReportOptions', (['rows', 'cols'], {}), '(rows, cols)\n', (37158, 37170), False, 'from openeye import oechem, oedepict\n'), ((37317, 37341), 'openeye.oedepict.OEReport', 'oedepict.OEReport', (['ropts'], {}), '(ropts)\n', (37334, 37341), False, 'from openeye import oechem, oedepict\n'), ((37435, 37525), 'openeye.oedepict.OE2DMolDisplayOptions', 'oedepict.OE2DMolDisplayOptions', (['cellwidth', 'cellheight', '(oedepict.OEScale_Default * 0.5)'], {}), '(cellwidth, cellheight, oedepict.\n OEScale_Default * 0.5)\n', (37465, 37525), False, 'from openeye import oechem, oedepict\n'), ((37620, 37691), 'openeye.oedepict.OEPen', 'oedepict.OEPen', (['oechem.OEBlack', 'oechem.OEBlack', 'oedepict.OEFill_On', '(1.0)'], {}), '(oechem.OEBlack, oechem.OEBlack, oedepict.OEFill_On, 1.0)\n', (37634, 37691), False, 'from openeye import oechem, oedepict\n'), ((37736, 37782), 'openeye.oedepict.OESetup2DMolDisplayOptions', 'oedepict.OESetup2DMolDisplayOptions', (['opts', 'itf'], {}), '(opts, itf)\n', (37771, 37782), False, 'from openeye import oechem, oedepict\n'), ((40256, 40297), 'openeye.oedepict.OEWriteReport', 'oedepict.OEWriteReport', (['file_name', 'report'], {}), '(file_name, report)\n', (40278, 40297), False, 'from openeye import oechem, oedepict\n'), ((45094, 45113), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (45107, 45113), False, 'import copy\n'), ((49163, 49340), 'qcportal.models.common_models.QCSpecification', 'QCSpecification', ([], {'driver': 'self.driver', 'method': 'spec.method', 'basis': 'spec.basis', 'keywords': 'keyword_id', 'program': 'spec.program', 'protocols': "{'wavefunction': spec.store_wavefunction}"}), "(driver=self.driver, method=spec.method, basis=spec.basis,\n keywords=keyword_id, program=spec.program, protocols={'wavefunction':\n spec.store_wavefunction})\n", (49178, 49340), False, 'from qcportal.models.common_models import DriverEnum, OptimizationSpecification, QCSpecification\n'), ((58055, 58074), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (58068, 58074), False, 'import copy\n'), ((3725, 3798), 'openforcefield.topology.Molecule.from_file', 'off.Molecule.from_file', ([], {'file_path': 'input_file', 'allow_undefined_stereo': '(True)'}), '(file_path=input_file, allow_undefined_stereo=True)\n', (3747, 3798), True, 'from openforcefield import topology as off\n'), ((4048, 4075), 'os.listdir', 'os.listdir', (['input_directory'], {}), '(input_directory)\n', (4058, 4075), False, 'import os\n'), ((6631, 6788), 'openforcefield.topology.Molecule.are_isomorphic', 'off.Molecule.are_isomorphic', (['molecule', 'self._molecules[molecule_hash]'], {'return_atom_map': '(True)', 'formal_charge_matching': '(False)', 'bond_order_matching': '(False)'}), '(molecule, self._molecules[molecule_hash],\n return_atom_map=True, formal_charge_matching=False, bond_order_matching\n =False)\n', (6658, 6788), True, 'from openforcefield import topology as off\n'), ((14601, 14664), 'openforcefield.topology.Molecule.from_smiles', 'off.Molecule.from_smiles', (['molecule'], {'allow_undefined_stereo': '(True)'}), '(molecule, allow_undefined_stereo=True)\n', (14625, 14664), True, 'from openforcefield import topology as off\n'), ((34747, 34769), 'openforcefield.typing.engines.smirnoff.ForceField', 'ForceField', (['forcefield'], {}), '(forcefield)\n', (34757, 34769), False, 'from openforcefield.typing.engines.smirnoff import ForceField\n'), ((38052, 38103), 'openeye.oedepict.OEPrepareDepiction', 'oedepict.OEPrepareDepiction', (['mol', '(False)', 'suppress_h'], {}), '(mol, False, suppress_h)\n', (38079, 38103), False, 'from openeye import oechem, oedepict\n'), ((38123, 38157), 'openeye.oedepict.OE2DMolDisplay', 'oedepict.OE2DMolDisplay', (['mol', 'opts'], {}), '(mol, opts)\n', (38146, 38157), False, 'from openeye import oechem, oedepict\n'), ((40209, 40246), 'openeye.oedepict.OERenderMolecule', 'oedepict.OERenderMolecule', (['cell', 'disp'], {}), '(cell, disp)\n', (40234, 40246), False, 'from openeye import oechem, oedepict\n'), ((40698, 40732), 'rdkit.Chem.AllChem.Compute2DCoords', 'AllChem.Compute2DCoords', (['rdkit_mol'], {}), '(rdkit_mol)\n', (40721, 40732), False, 'from rdkit.Chem import AllChem, Draw\n'), ((41316, 41424), 'rdkit.Chem.Draw.MolsToGridImage', 'Draw.MolsToGridImage', (['mol_chunk'], {'molsPerRow': 'columns', 'subImgSize': '(500, 500)', 'highlightAtomLists': 'tag_chunk'}), '(mol_chunk, molsPerRow=columns, subImgSize=(500, 500),\n highlightAtomLists=tag_chunk)\n', (41336, 41424), False, 'from rdkit.Chem import AllChem, Draw\n'), ((15569, 15630), 'openforcefield.topology.Molecule.from_smiles', 'off.Molecule.from_smiles', (['smiles'], {'allow_undefined_stereo': '(True)'}), '(smiles, allow_undefined_stereo=True)\n', (15593, 15630), True, 'from openforcefield import topology as off\n'), ((39537, 39571), 'openeye.oechem.OEAtomBondSet', 'oechem.OEAtomBondSet', (['atoms', 'bonds'], {}), '(atoms, bonds)\n', (39557, 39571), False, 'from openeye import oechem, oedepict\n'), ((39933, 39967), 'openeye.oechem.OEAtomBondSet', 'oechem.OEAtomBondSet', (['atoms', 'bonds'], {}), '(atoms, bonds)\n', (39953, 39967), False, 'from openeye import oechem, oedepict\n'), ((7857, 7888), 'numpy.zeros', 'np.zeros', (['(molecule.n_atoms, 3)'], {}), '((molecule.n_atoms, 3))\n', (7865, 7888), True, 'import numpy as np\n'), ((8123, 8177), 'simtk.unit.Quantity', 'unit.Quantity', ([], {'value': 'new_conformer', 'unit': 'unit.angstrom'}), '(value=new_conformer, unit=unit.angstrom)\n', (8136, 8177), False, 'from simtk import unit\n'), ((25023, 25051), 're.sub', 're.sub', (['"""\\\\*"""', '"""_st_"""', 'basis'], {}), "('\\\\*', '_st_', basis)\n", (25029, 25051), False, 'import re\n'), ((25107, 25133), 're.sub', 're.sub', (['"""/"""', '"""_sl_"""', 'basis'], {}), "('/', '_sl_', basis)\n", (25113, 25133), False, 'import re\n'), ((25197, 25224), 're.sub', 're.sub', (['"""heavy-"""', '""""""', 'basis'], {}), "('heavy-', '', basis)\n", (25203, 25224), False, 'import re\n'), ((27126, 27337), 'warnings.warn', 'warnings.warn', (['f"""The following elements: {report} are not covered by the selected basis : {self.qc_specifications[spec_name].basis} and method : {self.qc_specifications[spec_name].method}"""', 'UserWarning'], {}), "(\n f'The following elements: {report} are not covered by the selected basis : {self.qc_specifications[spec_name].basis} and method : {self.qc_specifications[spec_name].method}'\n , UserWarning)\n", (27139, 27337), False, 'import warnings\n'), ((39662, 39693), 'openeye.oechem.OEColor', 'oechem.OEColor', (['oechem.OEYellow'], {}), '(oechem.OEYellow)\n', (39676, 39693), False, 'from openeye import oechem, oedepict\n'), ((40058, 40089), 'openeye.oechem.OEColor', 'oechem.OEColor', (['oechem.OEOrange'], {}), '(oechem.OEOrange)\n', (40072, 40089), False, 'from openeye import oechem, oedepict\n'), ((4225, 4260), 'os.path.join', 'os.path.join', (['input_directory', 'file'], {}), '(input_directory, file)\n', (4237, 4260), False, 'import os\n'), ((25279, 25297), 'basis_set_exchange.get_metadata', 'bse.get_metadata', ([], {}), '()\n', (25295, 25297), True, 'import basis_set_exchange as bse\n'), ((25622, 25640), 'basis_set_exchange.get_metadata', 'bse.get_metadata', ([], {}), '()\n', (25638, 25640), True, 'import basis_set_exchange as bse\n'), ((25510, 25534), 're.sub', 're.sub', (['month', '""""""', 'basis'], {}), "(month, '', basis)\n", (25516, 25534), False, 'import re\n')] |
# imports
import os
from os.path import join
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
from scipy.interpolate import griddata, Akima1DInterpolator
from scipy.stats import norm
from scipy.stats import pearsonr, spearmanr
from sklearn.neighbors import KernelDensity
from sklearn.utils.fixes import parse_version
from utils import bin, fit, functions, io, plotting
from utils.plotting import lighten_color
from correction import correct
import analyze
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.pyplot import cm
from matplotlib.ticker import (MultipleLocator, AutoMinorLocator)
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib import collections, colors, transforms
# formatting
plt.rcParams['legend.title_fontsize'] = 'large'
plt.rcParams['legend.fontsize'] = 'medium'
fontP = FontProperties()
fontP.set_size('medium')
plt.style.use(['science', 'ieee', 'std-colors'])
# plt.style.use(['science', 'scatter'])
fig, ax = plt.subplots()
sci_color_cycle = plt.rcParams['axes.prop_cycle'].by_key()['color']
sci_color_cycler = ax._get_lines.prop_cycler
size_x_inches, size_y_inches = fig.get_size_inches()
plt.close(fig)
# -------------------------------------- SPCT STATS ANALYSIS FUNCTIONS -------------------------------------------------
def plot_spct_stats(base_dir):
# modifiers
calc_plane_angle = True
spct_image_stats = True
position_3d_by_frame = True
precision = True
per_mindx_id = True
per_percent_dx_diameter_id = True
sampling_frequency = True
# export results & show, save plots
export_results = True
save_figs = False
show_figs = False
# filepaths
path_calib_coords = join(base_dir, 'coords/calib-coords')
path_results = join(base_dir, 'results')
path_figs = join(base_dir, 'figs')
# --- experimental details
method = 'spct'
# read calibration coords
dfc, dfcpid, dfcpop, dfcstats = io.read_calib_coords(path_calib_coords, method=method)
# read from spct analysis
mag_eff, zf, c1, c2 = io.read_pop_gauss_diameter_properties(dfcpop)
if mag_eff == 10:
microns_per_pixel = 1.6
elif mag_eff == 20:
microns_per_pixel = 0.8
elif mag_eff == 5:
microns_per_pixel = 3.2
else:
raise ValueError('Effective magnification is not equal to 5, 10 or 20?')
# spct stats
image_dimensions = (512, 512)
area = (image_dimensions[0] * microns_per_pixel) * (image_dimensions[1] * microns_per_pixel)
particle_ids = dfcstats.id.unique()
num_pids = len(dfcstats.id.unique())
num_frames = len(dfcstats.frame.unique())
measurement_depth = dfcstats.z_corr.max() - dfcstats.z_corr.min()
# dictionaries
dict_spct_pid_plane_angle = None
dict_penetrance = None
dict_spct_stats_bin_z = None
dict_1d_static_precision_id = None
dict_spct_stats_sampling_frequency = None
# ------------------------------------------------- FIT PLANE ------------------------------------------------------
if calc_plane_angle:
# --------------------------------------- FIT PLANE (MICRONS) --------------------------------------------------
# Fit a 3D plane to the boundary particles to get true zero-plane (UNITS: MICRONS)
if dfcpid is not None:
if 'x' in dfcpid.columns:
dfcg = dfcpid
param_zf = 'zf_from_peak_int'
df_for_plane_angle = 'dfcpid_xy'
elif dfc is not None:
dfcg = dfc.groupby('id').mean()
param_zf = 'z_f'
df_for_plane_angle = 'dfc_correction_coords'
else:
raise ValueError('Either dfpid_xy (pid_defocus) or dfc (correction coords) must be available.')
# fit plane (x, y, z units: microns)
# This is used for calculating a tilt angle
points_microns = np.stack((dfcg.x * microns_per_pixel,
dfcg.y * microns_per_pixel,
dfcg[param_zf])).T
px_microns, py_microns, pz_microns, popt_microns = fit.fit_3d_plane(points_microns)
# tilt angle (degrees)
tilt_x = np.rad2deg(np.arctan((pz_microns[0, 1] - pz_microns[0, 0]) / (px_microns[0, 1] - px_microns[0, 0])))
tilt_y = np.rad2deg(np.arctan((pz_microns[1, 0] - pz_microns[0, 0]) / (py_microns[1, 0] - py_microns[0, 0])))
print("x-tilt = {} degrees".format(np.round(tilt_x, 3)))
print("y-tilt = {} degrees".format(np.round(tilt_y, 3)))
dict_spct_pid_plane_angle = {'param_zf': param_zf,
'df_used_for_plane_angle': df_for_plane_angle,
'x_tilt_degrees': tilt_x,
'y_tilt_degrees': tilt_y,
}
# ---------------------------------------------- SPCT IMAGE STATS --------------------------------------------------
if spct_image_stats:
df = dfcstats.dropna()
cleaned_up = False
if cleaned_up:
dfg = df.groupby('frame').count()
fig, ax = plt.subplots()
ax.plot(dfg.index, dfg.z_corr)
plt.show()
# check if there is mean displacement of the entire particle group
df = df[(df['z_corr'] > -50) & (df['z_corr'] < 50)]
dfcounts = df.groupby('id').count().reset_index()
df = df[df['id'].isin(dfcounts[dfcounts['id'] == dfcounts.id.max()].id.unique())]
dfg = df.groupby('frame').mean().reset_index()
fig, ax = plt.subplots()
axr = ax.twinx()
ax.plot(dfg.z_corr, dfg.x, label='x')
axr.plot(dfg.z_corr, dfg.y, label='y')
plt.show()
# ----------------- SPCT (SINGLE IDs, Z)
# 3D scatter plot of apparent lateral position as a function of axial position
inspect_pids = np.random.choice(df.id.unique(), 10, replace=False)
df = df[(df['z_corr'] > -50) & (df['z_corr'] < 50)]
for pid in inspect_pids:
dfpid = df[df['id'] == pid]
# correct gaussian centers
dfpid['gauss_xc_corr'] = df['gauss_xc'] - dfpid['x'] + (dfpid['y'] - dfpid.iloc[0].y)
dfpid['gauss_yc_corr'] = dfpid['gauss_yc'] - dfpid['y'] + (dfpid['x'] - dfpid.iloc[0].x)
dfpid = dfpid[['id', 'z', 'z_corr', 'x', 'y',
'gauss_xc', 'gauss_yc', 'gauss_xc_corr', 'gauss_yc_corr']]
fig = plt.figure(figsize=(size_x_inches / 1.5, size_y_inches * 2))
ax = Axes3D(fig, box_aspect=(1.0, 1.0, 3.0))
# 3D scatter
ax.scatter(dfpid.gauss_xc_corr, dfpid.gauss_yc_corr, dfpid.z_corr, s=3, marker='o')
# 2D scatter on bottom of plot
ax.plot(dfpid.gauss_xc_corr, dfpid.gauss_yc_corr, np.ones_like(dfpid.gauss_xc_corr.values) * -19.95,
'-o', ms=0.5, color='gray')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlim3d(bottom=-20, top=20)
zticks = [-20, -10, 0, 10, 20]
ax.set_zticks(zticks)
ax.set_zticklabels(zticks)
ax.xaxis._axinfo["grid"].update({"linewidth": 0.0125})
ax.yaxis._axinfo["grid"].update({"linewidth": 0.0125})
ax.zaxis._axinfo["grid"].update({"linewidth": 0.25})
plt.grid(visible=False)
plt.grid(visible=True, which='major')
# ax.view_init(elev, azim)
plt.savefig(path_figs + '/pid{}_x{}y{}_xcg-ycg-zcorr_3d.png'.format(pid,
np.round(dfpid.x.mean(), 0),
np.round(dfpid.y.mean(), 0)
)
)
plt.show()
plt.close()
# plot single particles
xparam = 'z_corr'
plot_columns = ['gauss_xc_corr', 'gauss_yc_corr', 'gauss_sigma_x_y']
particle_ids = 44
plot_spct_stats_id_by_param(df, xparam, plot_columns, particle_ids, save_figs, path_figs, show_figs)
# ----------------------------------------------- SPCT (ID, Z) -------------------------------------------------
columns_to_bin = ['x', 'y']
bins = [5, 11]
plot_columns = ['gauss_xc_corr', 'gauss_yc_corr', 'gauss_sigma_x_y']
column_to_plot_along = 'z_corr'
round_to_decimals = [1, 1]
# ----------------- SPCT (IDs sliced along X, Y, Z)
for low_level_bins_to_plot in range(bins[1]):
plot_spct_stats_compare_ids_by_along_param(df,
columns_to_bin,
bins,
low_level_bins_to_plot,
plot_columns,
column_to_plot_along,
round_to_decimals,
save_figs, path_figs, show_figs)
# ----------------- SPCT (ID PAIRS, X, Z)
xparam = 'z_corr'
# filter by x-y deviation
dfg = df.groupby('id').std().reset_index()
dfg['xy_std'] = np.mean([dfg.x, dfg.y])
passing_ids = [p for p in particle_ids if p not in dfg[dfg['xy_std'] > 1.35].id.unique()]
df = df[df.id.isin(passing_ids)]
# compare particles on opposite sides of x-axis
compare_param = 'x'
x_left_pids = df.sort_values('x', ascending=True).id.unique()[0:5]
x_right_pids = df.sort_values('x', ascending=False).id.unique()[0:5]
# plot particle pairs
particle_ids = [[i, j] for i, j in zip(x_left_pids, x_right_pids)]
plot_spct_stats_compare_ids_by_param(df, xparam, compare_param, plot_columns, particle_ids,
save_figs, path_figs, show_figs)
# ----------------- SPCT (ID PAIRS, Y, Z)
# compare particles on opposite sides of x-axis
compare_param = 'y'
y_top_pids = df.sort_values('y', ascending=True).id.unique()[0:3]
y_bottom_pids = df.sort_values('y', ascending=False).id.unique()[0:3]
# plot particle pairs
particle_ids = [[i, j] for i, j in zip(y_top_pids, y_bottom_pids)]
plot_spct_stats_compare_ids_by_param(df, xparam, compare_param, plot_columns, particle_ids,
save_figs, path_figs, show_figs)
# ------------------------------------------- SPCT PENETRANCE --------------------------------------------------
"""
penetrance = evaluation of particles in >80% of max possible frames.
* max possible frames = the number of frames for the maximally identified particles.
** This is necessary because for some datasets, there will be many images where zero particles are
identified. Thus the penetrance here might == 0; rendering this evaluation not extremely useful.
"""
dfcounts = df.groupby('id').count().reset_index()
max_num_frames = dfcounts.z_corr.max()
df_penetrance = dfcounts[dfcounts['z_corr'] > max_num_frames * 0.8]
penetrance_num_frames = max_num_frames * 0.8
penetrance_num_pids = len(df_penetrance.id.unique())
dict_penetrance = {'max_idd_num_frames': max_num_frames,
'penetrance_num_frames': penetrance_num_frames,
'penetrance_num_pids': penetrance_num_pids,
}
# --------------------------------------------- SPCT (Z, R) ----------------------------------------------------
columns_to_bin = ['z_corr', 'r']
plot_columns = ['gauss_sigma_x_y']
column_to_count = 'id'
bin_z = [-20, -10, 0, 10, 20]
bin_r = 4
min_num_bin = 10
plot_spct_stats_bin_2d(df,
columns_to_bin,
column_to_count,
bins=[bin_z, bin_r],
round_to_decimals=[0, 0],
min_num_bin=min_num_bin,
save_figs=save_figs,
path_figs=path_figs,
show_figs=show_figs,
export_results=export_results,
path_results=path_results,
plot_columns=plot_columns
)
# ----------------------------------------------- SPCT (Z, X) --------------------------------------------------
columns_to_bin = ['z_corr', 'x']
bin_x = 4
plot_spct_stats_bin_2d(df,
columns_to_bin,
column_to_count,
bins=[bin_z, bin_x],
round_to_decimals=[0, 0],
min_num_bin=min_num_bin,
save_figs=save_figs,
path_figs=path_figs,
show_figs=show_figs,
export_results=export_results,
path_results=path_results,
plot_columns=plot_columns
)
# ----------------------------------------------- SPCT (Z, Y) --------------------------------------------------
columns_to_bin = ['z_corr', 'y']
bin_y = 4
plot_spct_stats_bin_2d(df,
columns_to_bin,
column_to_count,
bins=[bin_z, bin_y],
round_to_decimals=[0, 0],
min_num_bin=min_num_bin,
save_figs=save_figs,
path_figs=path_figs,
show_figs=show_figs,
export_results=export_results,
path_results=path_results,
plot_columns=plot_columns
)
# ----------------------------------------------- SPCT (Z) -----------------------------------------------------
column_to_bin = 'z_corr'
column_to_count = 'id'
bins = num_frames
round_to_decimal = 4
dict_spct_stats_bin_z = plot_spct_stats_bin_z(df,
column_to_bin,
column_to_count,
bins,
round_to_decimal,
save_figs,
path_figs,
show_figs,
export_results=export_results,
path_results=path_results,
)
# ----------------------------------------------- SPCT (ID) ----------------------------------------------------
column_to_count = 'frame'
plot_spct_stats_bin_id(df, column_to_count, num_pids, save_figs, path_figs, show_figs,
export_results=export_results, path_results=path_results)
# ------------------------------------------ PRECISION (Z, R, ID) --------------------------------------------------
if position_3d_by_frame:
df = dfcstats
particle_ids_to_inspect = [21, 27, 29, 44, 45, 65]
for pid in particle_ids_to_inspect:
dfpid = df[df['id'] == pid]
fig, [ax1, ax2] = plt.subplots(nrows=2, sharex=True, figsize=(size_x_inches, size_y_inches * 1.25))
ax1.plot(dfpid.frame, dfpid.gauss_xc, label='Gaussian')
ax1.plot(dfpid.frame, dfpid.x, label='Centroid')
ax1.set_ylabel('x')
ax2.plot(dfpid.frame, dfpid.gauss_xc)
ax2.plot(dfpid.frame, dfpid.x)
ax2.set_ylabel('y')
ax2.set_xlabel('frame')
plt.show()
# ------------------------------------------ PRECISION (Z, R, ID) --------------------------------------------------
if precision:
df = dfcstats
if 'gauss_rc' not in df.columns:
df['gauss_rc'] = np.sqrt((df['gauss_xc'] - image_dimensions[0] / 2) ** 2 +
(df['gauss_yc'] - image_dimensions[1] / 2) ** 2
)
bin_z, round_z = int(np.round(measurement_depth / 5, 0)), 4
bin_r, round_r = 4, 0
# ------------------------------------------ PRECISION (Z, R, ID) ----------------------------------------------
columns_to_bin = ['z_corr', 'r']
precision_columns = ['x', 'gauss_xc', 'y', 'gauss_yc', 'gauss_rc']
bins = [bin_z // 2, bin_r]
round_to_decimals = [round_z, round_r]
bin_plot_spct_stats_3d_static_precision_z_r_id(df, columns_to_bin, precision_columns, bins, round_to_decimals,
export_results, path_results, save_figs, path_figs, show_figs)
# ------------------------------------------ PRECISION (Z, ID) -------------------------------------------------
column_to_bin = 'z_corr'
precision_columns = ['x', 'gauss_xc', 'y', 'gauss_yc', 'gauss_rc']
bins = bin_z
round_to_decimal = round_z
bin_plot_spct_stats_2d_static_precision_z_id(df, column_to_bin, precision_columns, bins, round_to_decimal,
export_results, path_results, save_figs, path_figs, show_figs)
# ------------------------------------------ PRECISION (ID) ----------------------------------------------------
precision_columns = ['x', 'gauss_xc', 'y', 'gauss_yc', 'gauss_rc']
bins = particle_ids
bin_count_threshold = num_frames / 5
dict_1d_static_precision_id = bin_plot_spct_stats_1d_static_precision_id(df,
precision_columns,
bins,
export_results,
path_results,
bin_count_threshold,
save_figs,
path_figs,
show_figs)
# ---------------------------------------- PRECISION (MIN DX, ID) --------------------------------------------------
if per_mindx_id:
df = dfcstats
column_to_bin = 'min_dx'
precision_columns = ['x', 'gauss_xc', 'y', 'gauss_yc']
bin_mdx = 5
round_mdx = 4
bin_count_threshold = 20
bin_plot_spct_stats_2d_static_precision_mindx_id(df, column_to_bin, precision_columns, bin_mdx, round_mdx,
bin_count_threshold,
export_results, path_results, save_figs, path_figs, show_figs)
# ----------------------------------- PRECISION (PERCENT DIAMETER OVERLAP, ID) -------------------------------------
if per_percent_dx_diameter_id:
df = dfcstats
column_to_bin = 'percent_dx_diameter'
precision_columns = ['x', 'gauss_xc', 'y', 'gauss_yc']
bin_pdo = 4
round_pdo = 4
bin_count_threshold = 20
pdo_threshold = -3
bin_plot_spct_stats_2d_static_precision_pdo_id(df, column_to_bin, precision_columns, bin_pdo, round_pdo,
pdo_threshold, bin_count_threshold,
export_results, path_results, save_figs, path_figs, show_figs)
# ---------------------------------------- SAMPLING FREQUENCY (DX) -------------------------------------------------
if sampling_frequency:
df = dfcstats
# ---------------------------------------- SAMPLING AVERAGE ----------------------------------------------------
# emitter density
emitter_density = num_pids / area
# lateral sampling frequency
dfid = df.groupby('id').mean()
mean_mean_points_spacing = dfid.num_dx.mean()
mean_mean_lateral_spacing = dfid.mean_dx.mean() * microns_per_pixel
mean_min_lateral_spacing = dfid.min_dx.mean() * microns_per_pixel
nyquist_mean_sampling = 2 * mean_mean_lateral_spacing
nyquist_min_sampling = 2 * mean_min_lateral_spacing
if 'contour_diameter' in df.columns:
zf_contour_diameter = df[(df['z_corr'] < 1) & (
df['z_corr'] > -1)].contour_diameter.mean() * microns_per_pixel
zmin_contour_diameter = df[df['frame'] == df.frame.min()].gauss_diameter.mean() * microns_per_pixel
zmax_contour_diameter = df[df['frame'] == df.frame.max()].gauss_diameter.mean() * microns_per_pixel
nyquist_sampling_min_no_contour_overlap = 2 * zf_contour_diameter
nyquist_sampling_max_no_contour_overlap = 2 * np.max([zmin_contour_diameter, zmax_contour_diameter])
k_zmin = zf_contour_diameter / zmin_contour_diameter
k_zmax = zf_contour_diameter / zmax_contour_diameter
dict_contour = {'zf_contour_diameter_microns': zf_contour_diameter,
'zmin_contour_diameter_microns': zmin_contour_diameter,
'k_contour_zmin': k_zmin,
'zmax_contour_diameter_microns': zmax_contour_diameter,
'k_contour_zmax': k_zmax,
'nyquist_sampling_min_no_contour_overlap_microns': nyquist_sampling_min_no_contour_overlap,
'nyquist_sampling_max_no_contour_overlap_microns': nyquist_sampling_max_no_contour_overlap,
}
else:
dict_contour = None
if 'gauss_diameter' in df.columns:
zf_gauss_diameter = df[(df['z_corr'] < 1) & (df['z_corr'] > -1)].gauss_diameter.mean() * microns_per_pixel
zmin_gauss_diameter = df[df['frame'] == df.frame.min()].gauss_diameter.mean() * microns_per_pixel
zmax_gauss_diameter = df[df['frame'] == df.frame.max()].gauss_diameter.mean() * microns_per_pixel
nyquist_sampling_min_no_overlap = 2 * zf_gauss_diameter
nyquist_sampling_max_no_overlap = 2 * np.max([zmin_gauss_diameter, zmax_gauss_diameter])
k_zmin = zf_gauss_diameter / zmin_gauss_diameter
k_zmax = zf_gauss_diameter / zmax_gauss_diameter
dict_gauss = {'zf_gauss_diameter_microns': zf_gauss_diameter,
'zmin_gauss_diameter_microns': zmin_gauss_diameter,
'k_gauss_zmin': k_zmin,
'zmax_gauss_diameter_microns': zmax_gauss_diameter,
'k_gauss_zmax': k_zmax,
'nyquist_sampling_min_no_overlap_microns': nyquist_sampling_min_no_overlap,
'nyquist_sampling_max_no_overlap_microns': nyquist_sampling_max_no_overlap,
}
else:
dict_gauss = None
# package results to update overview dictionary
dict_spct_stats_sampling_frequency = {'emitter_density_microns_squared': emitter_density,
'num_points_spacing': mean_mean_points_spacing,
'mean_mean_lateral_spacing_microns': mean_mean_lateral_spacing,
'mean_min_lateral_spacing_microns': mean_min_lateral_spacing,
'nyquist_sampling_mean_dx_microns': nyquist_mean_sampling,
'nyquist_sampling_min_dx_microns': nyquist_min_sampling,
}
if dict_contour is not None:
dict_spct_stats_sampling_frequency.update(dict_contour)
if dict_gauss is not None:
dict_spct_stats_sampling_frequency.update(dict_gauss)
# ---------------------------------------- SAMPLING FREQUENCY (Z) ----------------------------------------------
export_results = True
save_figs = True
show_figs = False
column_to_bin = 'z_corr'
bins = num_frames
bin_plot_spct_stats_sampling_frequency_z_id(df, column_to_bin, bins, area, microns_per_pixel,
export_results, path_results, save_figs, path_figs, show_figs)
# ----------------------------------------- PACKAGE AND EXPORT RESULTS ---------------------------------------------
spct_results = {'method': method,
'mag_eff': mag_eff,
'microns_per_pixel': microns_per_pixel,
'area_microns_squared': area,
'num_particles': num_pids,
'num_frames': num_frames,
'measurement_depth': measurement_depth,
'zf': zf,
'c1': c1,
'c2': c2,
}
if dict_spct_pid_plane_angle is not None:
spct_results.update(dict_spct_pid_plane_angle)
if dict_penetrance is not None:
spct_results.update(dict_penetrance)
if dict_spct_stats_bin_z is not None:
spct_results.update(dict_spct_stats_bin_z)
if dict_1d_static_precision_id is not None:
spct_results.update(dict_1d_static_precision_id)
if dict_spct_stats_sampling_frequency is not None:
spct_results.update(dict_spct_stats_sampling_frequency)
df_spct_results = pd.DataFrame.from_dict(spct_results, orient='index', columns=['value'])
# export
df_spct_results.to_excel(path_results + '/spct-stats-overview-results.xlsx')
# ------------------------------------------ META ANALYSIS FUNCTIONS ---------------------------------------------------
def plot_meta_assessment(base_dir, method, min_cm, min_percent_layers, microns_per_pixel, path_calib_spct_pop=None,
save_figs=True, show_figs=False):
if method == 'idpt' and path_calib_spct_pop is None:
raise ValueError('Must specifiy path_calib_spct_pop for IDPT analyses.')
# filepaths
path_test_coords = join(base_dir, 'coords/test-coords')
path_calib_coords = join(base_dir, 'coords/calib-coords')
path_similarity = join(base_dir, 'similarity')
path_results = join(base_dir, 'results')
path_figs = join(base_dir, 'figs')
# --- --- META ASSESSMENT
# calibration coords
dfc, dfcpid, dfcpop, dfcstats = io.read_calib_coords(path_calib_coords, method=method)
if 'x' not in dfcpid.columns:
print('Running correct.merge_calib_pid_defocus_and_correction_coords(path_calib_coords) to merge x-y.')
dfcpid = correct.merge_calib_pid_defocus_and_correction_coords(path_calib_coords=path_calib_coords,
method=method,
dfs=[dfc, dfcpid])
# inspect initial distribution of in-focus particle positions
fig, ax = plotting.scatter_z_by_xy(df=dfcpid, z_params=['zf_from_peak_int', 'zf_from_nsv'])
fig.savefig(path_figs + '/zf_scatter_xy_int-and-nsv.png')
dict_fit_plane, fig_xy, fig_xyz, fig_plane = correct.inspect_calibration_surface(df=dfcpid,
param_zf='zf_from_nsv',
microns_per_pixel=microns_per_pixel)
fig_xy.savefig(path_figs + '/zf_scatter_xy.png')
plt.close(fig_xy)
fig_xyz.savefig(path_figs + '/zf_scatter_xyz.png')
plt.close(fig_xyz)
fig_plane.savefig(path_figs + '/zf_fit-3d-plane.png')
plt.close(fig_plane)
# read diameter paramaters
if path_calib_spct_pop is not None:
mag_eff, zf, c1, c2 = io.read_pop_gauss_diameter_properties(path_calib_spct_pop)
else:
mag_eff, zf, c1, c2 = io.read_pop_gauss_diameter_properties(dfcpop)
# test coords
dft = io.read_test_coords(path_test_coords)
# correct test coords
dft = correct.correct_z_by_plane_tilt(dfcpid,
dft,
param_zf='zf_from_nsv',
param_z='z',
param_z_true='z_true')
# --- CALIBRATION CURVE
fig, ax = plt.subplots()
ax.scatter(dft.z_true_corr, dft.z_corr, s=0.5, alpha=0.25)
ax.set_xlabel(r'$z_{true} \: (\mu m)$')
ax.set_ylabel(r'$z \: (\mu m)$')
plt.tight_layout()
if save_figs:
plt.savefig(path_figs + '/calibration_curve_corrected.png')
if show_figs:
plt.show()
plt.close()
# --- RMSE
# bin
dfrmse = bin.bin_local_rmse_z(dft, column_to_bin='z_true_corr', bins=25, min_cm=min_cm, z_range=None,
round_to_decimal=4, df_ground_truth=None, dropna=True, error_column='error')
dfrmse_mean = bin.bin_local_rmse_z(dft, column_to_bin='z_true_corr', bins=1, min_cm=min_cm, z_range=None,
round_to_decimal=4, df_ground_truth=None, dropna=True, error_column='error')
# export
dfrmse.to_excel(path_results + '/meta_rmse-z_binned.xlsx')
dfrmse_mean.to_excel(path_results + '/meta_rmse-z_mean.xlsx')
# plot
fig, ax = plt.subplots()
ax.plot(dfrmse.index, dfrmse.rmse_z, '-o')
ax.set_xlabel(r'$z_{true} \: (\mu m)$')
ax.set_ylabel(r'$\sigma_{z} \: (\mu m)$')
plt.tight_layout()
if save_figs:
plt.savefig(path_figs + '/meta_rmse-z_corrected.png')
if show_figs:
plt.show()
plt.close()
# --- --- --- PARTICLE IMAGE SIMILARITY
# --- CALIBRATION STACK SIMILARITY
# read
dfs, dfsf, dfsm, dfas, dfcs = io.read_similarity(path_similarity)
# plot
if dfsf is not None:
fig, ax = plotting.plot_calib_stack_self_similarity(dfsf, min_percent_layers=min_percent_layers)
ax.set_xlabel(r'$z_{calib.} \: (\mu m)$')
ax.set_ylabel(r'$\overline{S}_{(i, i+1)}$')
plt.tight_layout()
plt.savefig(path_figs + '/calib_self-similarity-forward.png')
plt.show()
if dfsm is not None:
fig, ax = plotting.plot_calib_stack_self_similarity(dfsm, min_percent_layers=min_percent_layers)
ax.set_xlabel(r'$z_{calib.} \: (\mu m)$')
ax.set_ylabel(r'$\overline{S}_{(i-1, i, i+1)}$')
plt.tight_layout()
plt.savefig(path_figs + '/calib_self-similarity-middle.png')
plt.show()
if dfas is not None:
fig, ax = plotting.plot_particle_to_particle_similarity(dfcs, min_particles_per_frame=10)
ax.set_xlabel(r'$z_{calib.} \: (\mu m)$')
ax.set_ylabel(r'$\overline{S}_{i}(p_{i}, p_{N})$')
plt.tight_layout()
plt.savefig(path_figs + '/calib_per-frame_particle-to-particle-similarity.png')
plt.show()
# --- --- INTRINSIC ABERRATIONS ASSESSMENT
# --- RAW
# evaluate
dict_ia = analyze.evaluate_intrinsic_aberrations(dfs,
z_f=zf,
min_cm=min_cm,
param_z_true='z_true',
param_z_cm='z_cm')
dict_ia = analyze.fit_intrinsic_aberrations(dict_ia)
io.export_dict_intrinsic_aberrations(dict_ia, path_results, unique_id='raw')
# plot
fig, ax = plotting.plot_intrinsic_aberrations(dict_ia, cubic=True, quartic=True)
ax.set_xlabel(r'$z_{raw} \: (\mu m)$')
ax.set_ylabel(r'$S_{max}(z_{l}) / S_{max}(z_{r})$')
ax.grid(alpha=0.125)
ax.legend(['Data', 'Cubic', 'Quartic'])
plt.tight_layout()
plt.savefig(path_figs + '/intrinsic-aberrations_raw.png')
plt.show()
# --- CORRECTED
# evaluate
dfs_corr = correct.correct_z_by_plane_tilt(dfcal=dfcpid,
dftest=dfs,
param_zf='zf_from_nsv',
param_z='z_est',
param_z_true='z_true',
params_correct=['z_cm'])
dict_iac = analyze.evaluate_intrinsic_aberrations(dfs_corr,
z_f=0,
min_cm=min_cm,
param_z_true='z_true_corr',
param_z_cm='z_cm_corr')
dict_iac = analyze.fit_intrinsic_aberrations(dict_iac)
io.export_dict_intrinsic_aberrations(dict_iac, path_results, unique_id='corrected')
# plot
fig, ax = plotting.plot_intrinsic_aberrations(dict_iac, cubic=True, quartic=True)
ax.set_xlabel(r'$z_{true} \: (\mu m)$')
ax.set_ylabel(r'$S_{max}(z_{l}) / S_{max}(z_{r})$')
ax.grid(alpha=0.125)
ax.legend(['Data', 'Cubic', 'Quartic'])
plt.tight_layout()
plt.savefig(path_figs + '/intrinsic-aberrations_corrected.png')
plt.show()
# ------------------------------- TEST RIGID DISPLACEMENT ANALYSIS FUNCTIONS -------------------------------------------
def plot_rigid_displacement_test(test_coords_path, spct_stats_path=None):
# details
microns_per_pixel = 0.8
area = (512 * microns_per_pixel) ** 2
# read dataframe
df = pd.read_excel(test_coords_path)
# get number of particles
pids = df.id.unique()
num_pids = len(pids)
# compute the radial distance
if 'r' not in df.columns:
df['r'] = np.sqrt((256 - df.x) ** 2 + (256 - df.y) ** 2)
# ------------------------------------------- PRECISION (R, Z) ---------------------------------------------------
precision = True
if precision:
# ----------------------------------------- PRECISION (R, ID) --------------------------------------------------
# ------------------------------------------- PRECISION (ID) ---------------------------------------------------
# precision @ z
per_id = True
if per_id:
pos = ['z', 'r', 'x', 'y']
dfp_id, dfpm = analyze.evaluate_1d_static_precision(df,
column_to_bin='id',
precision_columns=pos,
bins=pids,
round_to_decimal=0)
# plot bin(id)
count_column = 'counts'
plot_columns = ['z']
xparams = ['r_m', 'x_m', 'y_m']
# axial localization precision: z(id, r)
for xparam in xparams:
for pc in plot_columns:
dfp_id = dfp_id.sort_values(xparam)
# measure statistical significance of correlation
measurement = dfp_id[pc].to_numpy()
dependency = dfp_id[xparam].to_numpy()
pearson_r, pearson_pval = np.round(pearsonr(measurement, dependency), 3)
spearman_r, spearman_pval = np.round(spearmanr(measurement, dependency), 3)
fig, ax = plt.subplots()
ax.plot(dfp_id[xparam], dfp_id[pc], '-o')
# fit line
popt, pcov, ffunc = fit.fit(dfp_id[xparam], dfp_id[pc], fit_function=functions.line)
ax.plot(dfp_id[xparam], ffunc(dfp_id[xparam], *popt), linestyle='--', color='black',
label='Fit: d{}/d{} = {} \n'
'Pearson(r, p): {}, {}\n'
'Spearman(r, p): {}, {}'.format(pc,
xparam,
np.format_float_scientific(popt[0],
precision=2,
exp_digits=2),
pearson_r, pearson_pval,
spearman_r, spearman_pval,
)
)
ax.set_xlabel(xparam)
ax.set_ylabel('{} precision'.format(pc))
ax.legend()
axr = ax.twinx()
axr.plot(dfp_id[xparam], dfp_id[count_column], '-s', markersize=2, alpha=0.25)
axr.set_ylabel(r'$N_{p} \: (\#)$', color='gray')
axr.set_ylim([0, int(np.round(dfp_id[count_column].max() + 6, -1))])
plt.tight_layout()
plt.show()
# ------------------------------------------------- FIT PLANE ------------------------------------------------------
fit_plane = False
if fit_plane and spct_stats_path:
# group test dataframe by particle id
dft = df.groupby('id').mean()
# get mean z from test
z_test_mean = dft.z.mean()
# read calibration dataframe to fit plane for in-focus image
dfc = pd.read_excel(spct_stats_path)
# --------------------------------------- FIT PLANE TO CALIBRATION (PIXELS) ------------------------------------
# get z in-focs
zf_nearest = dfc.iloc[np.argmin(np.abs(dfc['z_true'] - z_test_mean))].z_true
dff = dfc[dfc['z_true'] == zf_nearest]
# fit plane on calibration in-focus (x, y, z units: pixels)
points_pixels = np.stack((dff.x,
dff.y,
dff.z_corr)).T
px_pixels, py_pixels, pz_microns, popt_calib = fit.fit_3d(points_pixels, fit_function='plane')
# --------------------------------------- FIT PLANE TO TEST (PIXELS) ------------------------------------
# fit plane to test (x, y, z units: pixels)
points_pixels = np.stack((dft.x,
dft.y,
dft.z)).T
px_pixels, py_pixels, pz_microns, popt_test = fit.fit_3d(points_pixels, fit_function='plane')
# -------------------------------- CALCULATE PLANE POSITIONS FOR TEST PARTICLES --------------------------------
# calculate z on fitted 3D plane for all particle locations
# in-focus (from calibration)
dft['z_plane_f'] = functions.calculate_z_of_3d_plane(dft.x, dft.y, popt=popt_calib)
# for test
dft['z_plane_t'] = functions.calculate_z_of_3d_plane(dft.x, dft.y, popt=popt_test)
# calculate error
dft['z_error_t'] = dft['z_plane_t'] - dft['z']
# ------------------------------------- PLOT DEVIATIONS FROM FITTED PLANE --------------------------------------
# scatter plot
fig, ax = plt.subplots()
sc = ax.scatter(dft.x, dft.y, c=dft.z_error_t)
plt.colorbar(sc)
ax.set_xlabel('x')
ax.set_ylabel('y')
plt.tight_layout()
plt.show()
# heat map 1
# move x, y, z series to numpy arrays
x = dft.x.to_numpy()
y = dft.y.to_numpy()
z = dft.z_error_t.to_numpy()
# get spatial coordinate extents
xspace = np.max(x) - np.min(x)
yspace = np.max(y) - np.min(y)
zspace = np.max(z) - np.min(z)
# Create grid values first.
xi = np.linspace(np.min(x), np.max(x), 250)
yi = np.linspace(np.min(y), np.max(y), 250)
zi = griddata((x, y), z, (xi[None, :], yi[:, None]), method='cubic')
X, Y = np.meshgrid(xi, yi)
fig, ax = plt.subplots()
sc = ax.pcolormesh(X, Y, zi, shading='auto', cmap="RdBu_r")
ax.scatter(x, y, color='black', label=r"$p_{i}$")
cbar = fig.colorbar(sc, ax=ax)
cbar.ax.set_title(r'$\epsilon_{z,plane}$')
ax.set_xlabel(r'$x \: (pix)$')
ax.set_xlim([0, 512])
ax.set_ylabel(r'$y \: (pix)$')
ax.set_ylim([0, 512])
ax.legend()
plt.axis("equal")
plt.tight_layout()
plt.show()
# ------------------------------------------- DISTORTIONS (R, Z) ---------------------------------------------------
distortions = False
if distortions:
# -------------------------------------- DISTORTIONS (R, Z) ----------------------------------------------------
# deviations from fitted plane
pass
# ------------------------------------------ TEST ANALYSIS FUNCTIONS ---------------------------------------------------
def plot_test_coords(df, evaluate):
"""
Inputs:
* A filtered, corrected dataframe.
Note: This means metrics like 'percent_measured' should have already been calculated.
Core Functions:
A. Precision
1. mean lateral and axial precision.
2. z-dependent lateral and axial precision.
B. Error
1. mean lateral and axial r.m.s. error.
2. z-dependent lateral and axial r.m.s. error.
C. Sampling Frequency
1. number density
2. mean particle to particle spacing
3. Nyquist sampling frequency
Optional Functions:
Opt.A. Precision
1. r-dependence
Opt.B. Error
1. r-dependence
:param df:
:param evaluate:
:return:
"""
# ----------------------------------------------- PRECISION ANALYSIS -----------------------------------------------
"""
Precision Analysis Module:
1. Mean lateral precision
a. measure lateral precision by particle ID at each discrete z-step (requires n >= 3 samples).
b. weighted-average (1.a) to get mean lateral precision.
2. Z-dependent lateral precision
b. weighted-average for each discrete z-step.
3. Mean axial precision
a. measure axial precision by particle ID at each discrete z-step (requires n >= 3 samples).
b. weighted-average (3.a) to get mean axial precision.
4. Z-dependent axial precision
a. weighted-average (3.a) for each discrete z-step.
"""
# ------------------------------------------------ ERROR ANALYSIS ------------------------------------------------------
def plot_error_analysis(dft, path_figs, path_results):
# setup error
error_threshold = 7.5
bins_r = 5
bins_z = 33
# plot
xlim = [-57.5, 62.5]
xyticks = [-50, -25, 0, 25, 50]
yerr_lims = [-7.5, 7.5]
yerr_ticks = [-5, 0, 5]
# basic
image_dimensions = (512, 512)
# figs
fig_1, fig_2, fig_3, fig_4, fig_5 = True, True, True, True, True
# test-coords without (mostly) focal plane bias errors
df_error = dft[dft['error'].abs() < error_threshold]
df_error = df_error.sort_values('z_true')
# --- PLOTTING
# FIGURE 1: all errors by z_true & fit quadratic
if fig_1:
fig, ax = plt.subplots()
# data
ax.scatter(df_error.z_true, df_error.error, s=0.125, marker='.', label='Data')
# fit quadratic
popt, pcov = curve_fit(functions.quadratic, df_error.z_true, df_error.error)
ax.plot(df_error.z_true, functions.quadratic(df_error.z_true, *popt), linestyle='--', color='black',
label='Fit')
ax.set_xlabel(r'$z_{true} \: (\mu m)$')
ax.set_xlim(xlim)
ax.set_xticks(ticks=xyticks, labels=xyticks)
ax.set_ylabel(r'$\epsilon_{z} \: (\mu m)$')
ax.set_title('Fit: {}'.format(np.round(popt[0], 4)) + r'$x^2$' +
' + {}'.format(np.round(popt[1], 4)) + r'$x$' +
' + {}'.format(np.round(popt[2], 4))
)
ax.legend(loc='upper right')
plt.tight_layout()
plt.savefig(path_figs + '/all_normalized-z-errors_by_z_and_fit-quadratic_errlim{}.png'.format(error_threshold))
plt.show()
# ---
# FIGURE 2: bin(r) - all errors by z_true & fit quadratic
if fig_2:
if 'r' not in df_error.columns:
df_error['r'] = np.sqrt(
(df_error.x - image_dimensions[0] / 2) ** 2 +
(df_error.y - image_dimensions[1] / 2) ** 2
)
# bin(r)
column_to_bin = 'r'
column_to_count = 'id'
round_to_decimal = 1
return_groupby = False
dfb = bin.bin_generic(df_error,
column_to_bin,
column_to_count,
bins_r,
round_to_decimal,
return_groupby
)
dfb = dfb.sort_values('bin')
# plot
fig, ax = plt.subplots(nrows=bins_r, sharex=True, figsize=(size_x_inches, size_y_inches * bins_r / 2.4))
popts_r = []
lbls_r = []
for i, bin_r in enumerate(dfb.bin.unique()):
dfbr = dfb[dfb['bin'] == bin_r]
dfbr = dfbr.sort_values('z_true')
# formatting
bin_id = int(np.round(bin_r, 0))
lbl_bin = r'$r_{bin}=$' + '{}'.format(bin_id)
lbls_r.append(bin_id)
# data
sc, = ax[i].plot(dfbr.z_true, dfbr.error,
marker='o',
ms=0.5,
linestyle='',
color=sci_color_cycle[i],
alpha=0.5,
label=lbl_bin)
# fit quadratic
popt, pcov = curve_fit(functions.quadratic, dfbr.z_true, dfbr.error)
popts_r.append(popt)
lbl_quadratic = r'Fit: {}'.format(np.round(popt[0], 4)) + r'$x^2$' + \
r' + {}'.format(np.round(popt[1], 4)) + r'$x$' + \
r' + {}'.format(np.round(popt[2], 4))
ax[i].plot(dfbr.z_true, functions.quadratic(dfbr.z_true, *popt),
linestyle='--',
color=lighten_color(sc.get_color(), amount=1.25),
)
# label each figure
ax[i].set_ylim(yerr_lims)
ax[i].set_yticks(ticks=yerr_ticks, labels=yerr_ticks)
ax[i].legend(loc='upper right')
ax[bins_r - 1].set_xlabel(r'$z_{true} \: (\mu m)$')
ax[bins_r - 1].set_xlim(xlim)
ax[bins_r - 1].set_xticks(ticks=xyticks, labels=xyticks)
ax[int(np.floor(bins_r / 2))].set_ylabel(r'$\epsilon_{z} \: (\mu m)$')
plt.savefig(
path_figs + '/bin-r_normalized-z-errors_by_z_and_fit-quadratic_errlim{}.png'.format(error_threshold))
plt.tight_layout()
plt.show()
plt.close()
# ---
# FIGURE 3: plot only fit quadratics
if fig_3:
fit_z_true = np.linspace(df_error.z_true.min(), df_error.z_true.max(), 100)
# plot
fig, ax = plt.subplots()
for i, popt in enumerate(popts_r):
ax.plot(fit_z_true, functions.quadratic(fit_z_true, *popt),
linestyle='-',
color=sci_color_cycle[i],
label=lbls_r[i])
ax.set_xlabel(r'$z_{true} \: (\mu m)$')
ax.set_xlim(xlim)
ax.set_xticks(ticks=xyticks, labels=xyticks)
ax.set_ylabel(r'$\epsilon_{z} \: (\mu m)$')
ax.set_ylim(yerr_lims)
ax.set_yticks(ticks=yerr_ticks, labels=yerr_ticks)
ax.legend(loc='upper right')
plt.savefig(
path_figs + '/bin-r_normalized-z-errors_by_z_fit-quadratic_errlim{}.png'.format(error_threshold))
plt.tight_layout()
plt.show()
plt.close()
# ---
# FIGURE 4: bin(r).groupby(r).mean() - all errors by z_true & fit quadratic
if fig_4:
return_groupby = True
dfm, dfstd = bin.bin_generic(df_error,
column_to_bin,
column_to_count,
bins_r,
round_to_decimal,
return_groupby)
dfm = dfm.sort_values('bin')
dfstd = dfstd.sort_values('bin')
fig, ax = plt.subplots()
ax.errorbar(dfm.bin, dfm.error, yerr=dfstd.error, fmt='-o', elinewidth=1, capsize=2)
ax.set_xlabel(r'$r_{bin} \: (\mu m)$')
ax.set_xlim([-10, 350])
ax.set_xticks(ticks=[0, 100, 200, 300])
ax.set_ylabel(r'$\epsilon_{z} \: (\mu m)$')
plt.savefig(
path_figs + '/bin-r_normalized-z-errors_by_z_mean+std_errlim{}.png'.format(error_threshold))
plt.tight_layout()
plt.show()
plt.close()
# export
dfm.to_excel(path_results + '/bin-r_mean-z-errors_errlim{}.xlsx'.format(error_threshold))
dfstd.to_excel(path_results + '/bin-r_std-z-errors_errlim{}.xlsx'.format(error_threshold))
# ---
# FIGURE 4: bin(r, z).groupby(r).mean() - all errors by z_true & fit quadratic
# bin(r)
plot_fit = False
if fig_5:
columns_to_bin = ['r', 'z_true']
column_to_count = 'id'
bins = [bins_r, bins_z]
round_to_decimals = [1, 3]
min_num_bin = 10
return_groupby = True
dfm, dfstd = bin.bin_generic_2d(df_error,
columns_to_bin,
column_to_count,
bins,
round_to_decimals,
min_num_bin,
return_groupby
)
# resolve floating point bin selecting
dfm = dfm.round({'bin_tl': 0, 'bin_ll': 2})
dfstd = dfstd.round({'bin_tl': 0, 'bin_ll': 2})
dfm = dfm.sort_values(['bin_tl', 'bin_ll'])
dfstd = dfstd.sort_values(['bin_tl', 'bin_ll'])
# plot
fig, ax = plt.subplots()
for i, bin_r in enumerate(dfm.bin_tl.unique()):
dfbr = dfm[dfm['bin_tl'] == bin_r]
dfbr_std = dfstd[dfstd['bin_tl'] == bin_r]
# scatter: mean +/- std
ax.errorbar(dfbr.bin_ll, dfbr.error, yerr=dfbr_std.error,
fmt='-o', ms=2, elinewidth=0.5, capsize=1, label=int(np.round(bin_r, 0)))
# plot: fit
if plot_fit:
fit_bin_ll = np.linspace(dfbr.bin_ll.min(), dfbr.bin_ll.max())
ax.plot(fit_bin_ll, functions.quadratic(fit_bin_ll, *popts_r[i]),
linestyle='--',
color=lighten_color(sci_color_cycle[i], amount=1.25),
)
ax.set_xlabel(r'$z_{true} \: (\mu m)$')
ax.set_xlim(xlim)
ax.set_xticks(ticks=xyticks, labels=xyticks)
ax.set_ylabel(r'$\epsilon_{z} \: (\mu m)$')
ax.set_ylim(yerr_lims)
ax.set_yticks(ticks=yerr_ticks, labels=yerr_ticks)
ax.legend(loc='upper right', title=r'$r_{bin}$',
borderpad=0.2, handletextpad=0.6, borderaxespad=0.25, markerscale=0.75)
plt.savefig(
path_figs + '/bin-r-z_normalized-z-errors_by_z_and_fit-quadratic_errlim{}.png'.format(error_threshold))
plt.tight_layout()
plt.show()
plt.close()
# ---------------------------------------------- HELPER FUNCTIONS ------------------------------------------------------
def plot_spct_stats_bin_z(df, column_to_bin, column_to_count, bins, round_to_decimal, save_figs, path_figs, show_figs,
export_results, path_results):
if save_figs:
path_save_figs = path_figs + '/spct-stats_bin-z'
if not os.path.exists(path_save_figs):
os.makedirs(path_save_figs)
dfm, dfstd = bin.bin_generic(df,
column_to_bin=column_to_bin,
column_to_count=column_to_count,
bins=bins,
round_to_decimal=round_to_decimal,
return_groupby=True)
# export results
if export_results:
path_save_results = path_results + '/spct-stats_bin-z'
if not os.path.exists(path_save_results):
os.makedirs(path_save_results)
dfm.to_excel(path_save_results + '/spct-stats_bin-z_dfm.xlsx')
dfstd.to_excel(path_save_results + '/spct-stats_bin-z_dfstd.xlsx')
# plot
count_column = 'count_{}'.format(column_to_count)
plot_columns = ['peak_int', 'snr', 'nsv', 'nsv_signal',
'solidity', 'thinness_ratio', 'gauss_diameter', 'gauss_dia_x_y', 'gauss_sigma_x_y',
'min_dx', 'mean_dxo', 'num_dxo', 'percent_dx_diameter']
for pc in plot_columns:
fig, ax = plt.subplots()
ax.errorbar(dfm.bin, dfm[pc], yerr=dfstd[pc], fmt='o', linewidth=2, capsize=6, color='tab:blue',
alpha=0.25, label=r'$\mu + \sigma$')
ax.plot(dfm.bin, dfm[pc], color='tab:blue')
ax.set_xlabel(r'$z_{corr}$')
ax.set_ylabel(pc)
ax.legend()
axr = ax.twinx()
axr.plot(dfm.bin, dfm[count_column], '-o', markersize=2, color='gray', alpha=0.125)
axr.set_ylabel(r'$N_{p} \: (\#)$', color='gray')
axr.set_ylim([0, int(np.round(dfm[count_column].max() + 6, -1))])
plt.tight_layout()
if save_figs:
plt.savefig(path_save_figs + '/spct-stats_bin-z_{}.png'.format(pc))
if show_figs:
plt.show()
plt.close()
# package results to update overview dictionary
dict_spct_stats_bin_z = {'zmin_snr': dfm.iloc[0].snr,
'zmax_snr': dfm.iloc[-1].snr,
'zmin_peak_int': dfm.iloc[0].peak_int,
'zmax_peak_int': dfm.iloc[-1].peak_int,
}
return dict_spct_stats_bin_z
def plot_spct_stats_bin_id(df, column_to_count, num_pids, save_figs, path_figs, show_figs, export_results,
path_results):
if save_figs:
path_save_figs = path_figs + '/spct-stats_bin-id'
if not os.path.exists(path_save_figs):
os.makedirs(path_save_figs)
dfm, dfstd = bin.bin_generic(df,
column_to_bin='id',
column_to_count=column_to_count,
bins=num_pids,
round_to_decimal=0,
return_groupby=True)
# export results
if export_results:
path_save_results = path_results + '/spct-stats_bin-id'
if not os.path.exists(path_save_results):
os.makedirs(path_save_results)
dfm.to_excel(path_save_results + '/spct-stats_bin-id_dfm.xlsx')
dfstd.to_excel(path_save_results + '/spct-stats_bin-id_dfstd.xlsx')
count_column = 'count_{}'.format(column_to_count)
plot_columns = ['peak_int', 'snr', 'nsv', 'nsv_signal',
'solidity', 'thinness_ratio', 'gauss_diameter', 'gauss_dia_x_y', 'gauss_sigma_x_y',
'min_dx', 'mean_dxo', 'num_dxo', 'percent_dx_diameter']
for pc in plot_columns:
fig, ax = plt.subplots()
ax.errorbar(dfm.bin, dfm[pc], yerr=dfstd[pc], fmt='o', linewidth=2, capsize=6, color='tab:blue',
alpha=0.25,
label=r'$\mu + \sigma$')
ax.plot(dfm.bin, dfm[pc], color='tab:blue')
ax.set_xlabel(r'$p_{ID}$')
ax.set_ylabel(pc)
ax.legend()
axr = ax.twinx()
axr.plot(dfm.bin, dfm[count_column], '-o', markersize=2, color='gray', alpha=0.125)
axr.set_ylabel(r'$N_{frames} \: (\#)$', color='gray')
axr.set_ylim([0, int(np.round(dfm[count_column].max() + 6, -1))])
plt.tight_layout()
if save_figs:
plt.savefig(path_save_figs + '/spct-stats_bin-id_{}.png'.format(pc))
if show_figs:
plt.show()
plt.close()
def plot_spct_stats_bin_2d(df, columns_to_bin, column_to_count, bins, round_to_decimals, min_num_bin,
save_figs, path_figs, show_figs, export_results, path_results, plot_columns):
column_to_bin_top_level = columns_to_bin[0]
column_to_bin_low_level = columns_to_bin[1]
id_string = 'spct-stats_bin-{}-{}'.format(column_to_bin_top_level, column_to_bin_low_level)
if save_figs:
path_save_figs = path_figs + '/' + id_string
if not os.path.exists(path_save_figs):
os.makedirs(path_save_figs)
dfm, dfstd = bin.bin_generic_2d(df,
columns_to_bin,
column_to_count,
bins,
round_to_decimals,
min_num_bin,
return_groupby=True,
)
# export results
if export_results:
path_save_results = path_results + '/' + id_string
if not os.path.exists(path_save_results):
os.makedirs(path_save_results)
dfm.to_excel(path_save_results + '/{}_dfm.xlsx'.format(id_string))
dfstd.to_excel(path_save_results + '/{}_dfstd.xlsx'.format(id_string))
# plot
count_column = 'count_{}'.format(column_to_count)
for pc in plot_columns:
fig, [ax, axr] = plt.subplots(nrows=2, sharex=True, figsize=(size_x_inches * 1.25, size_y_inches * 1.25))
for i, bin_tl in enumerate(dfm.bin_tl.unique()):
dfbm_tl = dfm[dfm['bin_tl'] == bin_tl]
dfbstd_tl = dfstd[dfstd['bin_tl'] == bin_tl]
sx = np.mean(dfbm_tl.bin_ll.diff()) / 50 * (i - np.floor(len(dfm.bin_tl.unique()) / 2))
ax.errorbar(dfbm_tl.bin_ll + sx, dfbm_tl[pc], yerr=dfbstd_tl[pc],
fmt='-o', ms=3, linewidth=1, capsize=2, alpha=0.75, label=np.round(bin_tl, 1))
axr.plot(dfbm_tl.bin_ll + sx, dfbm_tl[count_column], '-d', markersize=2)
ax.set_ylabel(pc + r' $\: (\mu \pm \sigma)$')
if pc in ['gauss_dia_x_y', 'gauss_sigma_x_y']:
ax.set_ylim([0.7, 1.3])
ax.legend(title=column_to_bin_top_level, loc='upper left', bbox_to_anchor=(1, 1))
axr.set_xlabel(column_to_bin_low_level)
axr.set_ylabel(r'$N_{p} \: (\#)$', color='gray')
axr.set_ylim([0, int(np.round(dfm[count_column].max() + 6, -1))])
plt.tight_layout()
if save_figs:
plt.savefig(path_save_figs + '/{}_{}.png'.format(id_string, pc))
if show_figs:
plt.show()
plt.close()
def bin_plot_spct_stats_3d_static_precision_z_r_id(df, columns_to_bin, precision_columns, bins, round_to_decimals,
export_results, path_results, save_figs, path_figs, show_figs):
df_bin_z_r_id, df_bin_z_r = analyze.evaluate_3d_static_precision(df,
columns_to_bin=columns_to_bin,
precision_columns=precision_columns,
bins=bins,
round_to_decimals=round_to_decimals)
# export results
if export_results:
path_save_results = path_results + '/spct-stats_3d-precision_z-r-id'
if not os.path.exists(path_save_results):
os.makedirs(path_save_results)
df_bin_z_r_id.to_excel(path_save_results + '/spct-stats_bin-id-r-z.xlsx')
df_bin_z_r.to_excel(path_save_results + '/spct-stats_bin-id-r-z_weighted-average.xlsx')
# save and/or show plots
if save_figs or show_figs:
path_save_figs = path_figs + '/spct-stats_3d-precision_z-r-id'
if not os.path.exists(path_save_figs):
os.makedirs(path_save_figs)
# plot bin(z, r)
count_column = 'counts'
plot_columns = precision_columns
xparam = columns_to_bin[0]
pparams = columns_to_bin[1]
line_plots = np.unique(df_bin_z_r[pparams].to_numpy())
for pc in plot_columns:
fig, ax = plt.subplots()
axr = ax.twinx()
for lpt in line_plots:
dfpt = df_bin_z_r[df_bin_z_r[pparams] == lpt]
ax.plot(dfpt[xparam], dfpt[pc], '-o', markersize=3, label=np.round(lpt, 1))
axr.plot(dfpt[xparam], dfpt[count_column], '-s', markersize=2, alpha=0.25)
ax.set_xlabel(xparam)
ax.set_ylabel('{} precision'.format(pc))
ax.legend(title=pparams)
axr.set_ylabel(r'$N_{p} \: (\#)$', color='gray')
axr.set_ylim([0, int(np.round(df_bin_z_r[count_column].max() + 6, -1))])
plt.tight_layout()
if save_figs:
plt.savefig(path_save_figs + '/spct-stats_bin-z-r-id_{}.png'.format(pc))
if show_figs:
plt.show()
plt.close()
def bin_plot_spct_stats_2d_static_precision_z_id(df, column_to_bin, precision_columns, bins, round_to_decimal,
export_results, path_results, save_figs, path_figs, show_figs):
df_bin_z_id, df_bin_z = analyze.evaluate_2d_static_precision(df,
column_to_bin=column_to_bin,
precision_columns=precision_columns,
bins=bins,
round_to_decimal=round_to_decimal)
# export results
if export_results:
path_save_results = path_results + '/spct-stats_2d-precision_z-id'
if not os.path.exists(path_save_results):
os.makedirs(path_save_results)
df_bin_z_id.to_excel(path_save_results + '/spct-stats_bin-id-z.xlsx')
df_bin_z.to_excel(path_save_results + '/spct-stats_bin-id-z_weighted-average.xlsx')
# save and/or show plots
if save_figs or show_figs:
path_save_figs = path_figs + '/spct-stats_2d-precision_z-id'
if not os.path.exists(path_save_figs):
os.makedirs(path_save_figs)
# plot bin(z, id)
count_column = 'counts'
plot_columns = precision_columns
xparam = column_to_bin
for pc in plot_columns:
fig, ax = plt.subplots()
ax.plot(df_bin_z[xparam], df_bin_z[pc], '-o', markersize=3)
ax.set_xlabel(xparam)
ax.set_ylabel('{} precision'.format(pc))
axr = ax.twinx()
axr.plot(df_bin_z[xparam], df_bin_z[count_column], '-s', markersize=2, alpha=0.25)
axr.set_ylabel(r'$N_{p} \: (\#)$', color='gray')
axr.set_ylim([0, int(np.round(df_bin_z[count_column].max() + 6, -1))])
plt.tight_layout()
if save_figs:
plt.savefig(path_save_figs + '/spct-stats_bin-z-id_{}.png'.format(pc))
if show_figs:
plt.show()
plt.close()
def bin_plot_spct_stats_2d_static_precision_mindx_id(df, column_to_bin, precision_columns, bins, round_to_decimals,
bin_count_threshold,
export_results, path_results, save_figs, path_figs, show_figs):
df_bin_mindx_id, df_bin_mindx = analyze.evaluate_2d_static_precision(df,
column_to_bin=column_to_bin,
precision_columns=precision_columns,
bins=bins,
round_to_decimal=round_to_decimals)
# export results
if export_results:
path_save_results = path_results + '/spct-stats_2d-precision_mindx-id'
if not os.path.exists(path_save_results):
os.makedirs(path_save_results)
df_bin_mindx_id.to_excel(path_save_results + '/spct-stats_bin-mindx-id.xlsx')
df_bin_mindx.to_excel(path_save_results + '/spct-stats_bin-mindx-id_weighted-average.xlsx')
# save and/or show plots
if save_figs or show_figs:
path_save_figs = path_figs + '/spct-stats_2d-precision_mindx-id'
if not os.path.exists(path_save_figs):
os.makedirs(path_save_figs)
count_column = 'counts'
if bin_count_threshold is not None:
df_bin_mindx = df_bin_mindx[df_bin_mindx[count_column] > bin_count_threshold]
for pc in precision_columns:
fig, ax = plt.subplots()
ax.plot(df_bin_mindx[column_to_bin], df_bin_mindx[pc], '-o', markersize=3)
ax.set_xlabel(column_to_bin)
ax.set_ylabel('{} precision'.format(pc))
axr = ax.twinx()
axr.plot(df_bin_mindx[column_to_bin], df_bin_mindx[count_column], '-s', markersize=2, alpha=0.25)
axr.set_ylabel(r'$N_{p} \: (\#)$', color='gray')
axr.set_ylim([0, int(np.round(df_bin_mindx[count_column].max() + 6, -1))])
plt.tight_layout()
if save_figs:
plt.savefig(path_save_figs + '/spct-stats_bin-mindx-id_{}.png'.format(pc))
if show_figs:
plt.show()
plt.close()
def bin_plot_spct_stats_2d_static_precision_pdo_id(df, column_to_bin, precision_columns, bins, round_to_decimal,
pdo_threshold, bin_count_threshold,
export_results, path_results, save_figs, path_figs, show_figs):
if pdo_threshold is not None:
df[column_to_bin] = df[column_to_bin].where(df[column_to_bin] > pdo_threshold, pdo_threshold)
df_bin_pdxo_id, df_bin_pdxo = analyze.evaluate_2d_static_precision(df,
column_to_bin=column_to_bin,
precision_columns=precision_columns,
bins=bins,
round_to_decimal=round_to_decimal)
# export results
if export_results:
path_save_results = path_results + '/spct-stats_2d-precision_pdo-id'
if not os.path.exists(path_save_results):
os.makedirs(path_save_results)
df_bin_pdxo_id.to_excel(path_save_results + '/spct-stats_bin-pdo-id.xlsx')
df_bin_pdxo.to_excel(path_save_results + '/spct-stats_bin-pdo-id_weighted-average.xlsx')
# save and/or show plots
if save_figs or show_figs:
path_save_figs = path_figs + '/spct-stats_2d-precision_pdo-id'
if not os.path.exists(path_save_figs):
os.makedirs(path_save_figs)
count_column = 'counts'
if bin_count_threshold is not None:
df_bin_pdxo = df_bin_pdxo[df_bin_pdxo[count_column] > bin_count_threshold]
# plot bin(percent dx diameter, id)
for pc in precision_columns:
fig, ax = plt.subplots()
ax.plot(df_bin_pdxo[column_to_bin], df_bin_pdxo[pc], '-o', markersize=3)
ax.set_xlabel(column_to_bin)
ax.set_ylabel('{} precision'.format(pc))
axr = ax.twinx()
axr.plot(df_bin_pdxo[column_to_bin], df_bin_pdxo[count_column], '-s', markersize=2, alpha=0.25)
axr.set_ylabel(r'$N_{p} \: (\#)$', color='gray')
axr.set_ylim([0, int(np.round(df_bin_pdxo[count_column].max() + 6, -1))])
plt.tight_layout()
if save_figs:
plt.savefig(path_save_figs + '/spct-stats_bin-pdo-id_{}.png'.format(pc))
if show_figs:
plt.show()
plt.close()
def bin_plot_spct_stats_1d_static_precision_id(df, precision_columns, bins, export_results, path_results,
bin_count_threshold, save_figs, path_figs, show_figs):
xparam = 'id'
round_to_decimal = 0
dfp_id, dfpm = analyze.evaluate_1d_static_precision(df,
column_to_bin=xparam,
precision_columns=precision_columns,
bins=bins,
round_to_decimal=round_to_decimal)
# export results
if export_results:
path_save_results = path_results + '/spct-stats_1d-precision_id'
if not os.path.exists(path_save_results):
os.makedirs(path_save_results)
dfp_id.to_excel(path_save_results + '/spct-stats_bin-id.xlsx')
dfpm.to_excel(path_save_results + '/spct-stats_bin-id_weighted-average.xlsx')
# save and/or show plots
if save_figs or show_figs:
path_save_figs = path_figs + '/spct-stats_1d-precision_id'
if not os.path.exists(path_save_figs):
os.makedirs(path_save_figs)
count_column = 'counts'
if bin_count_threshold is not None:
dfp_id = dfp_id[dfp_id[count_column] > bin_count_threshold]
for pc in precision_columns:
fig, ax = plt.subplots()
ax.plot(dfp_id[xparam], dfp_id[pc], '-o', markersize=3)
ax.set_xlabel(xparam)
ax.set_ylabel('{} precision'.format(pc))
axr = ax.twinx()
axr.plot(dfp_id[xparam], dfp_id[count_column], '-s', markersize=2, alpha=0.25)
axr.set_ylabel(r'$N_{p} \: (\#)$', color='gray')
axr.set_ylim([0, int(np.round(dfp_id[count_column].max() + 6, -1))])
plt.tight_layout()
if save_figs:
plt.savefig(path_save_figs + '/spct-stats_bin-id_{}.png'.format(pc))
if show_figs:
plt.show()
plt.close()
# package precision to update overview dictionary
dict_1d_static_precision_id = {'precision_x': dfpm.loc['x'],
'precision_gauss_xc': dfpm.loc['gauss_xc'],
'precision_y': dfpm.loc['y'],
'precision_gauss_yc': dfpm.loc['gauss_yc'],
'precision_gauss_rc': dfpm.loc['gauss_rc'],
}
return dict_1d_static_precision_id
def bin_plot_spct_stats_sampling_frequency_z_id(df, column_to_bin, bins, area, microns_per_pixel,
export_results, path_results, save_figs, path_figs, show_figs):
dfzm, dfzstd = bin.bin_generic(df,
column_to_bin=column_to_bin,
column_to_count='id',
bins=bins,
round_to_decimal=4,
return_groupby=True)
# emitter density
dfzm['emitter_density'] = dfzm.count_id / area
# lateral sampling frequency
dfzm['nyquist_mean_dx'] = 2 * dfzm.mean_dx * microns_per_pixel
dfzm['nyquist_min_dx'] = 2 * dfzm.min_dx * microns_per_pixel
# minimum lateral Nyquist sampling
if 'contour_diameter' in dfzm.columns:
dfzm['nyquist_min_no_contour_overlap'] = 2 * dfzm.contour_diameter * microns_per_pixel
if 'gauss_diameter' in dfzm.columns:
dfzm['nyquist_min_no_overlap'] = 2 * dfzm.gauss_diameter * microns_per_pixel
# export results
if export_results:
path_save_results = path_results + '/spct-stats_sampling_frequency_z-id'
if not os.path.exists(path_save_results):
os.makedirs(path_save_results)
dfzm.to_excel(path_save_results + '/spct-stats_sampling_frequency_z-id_mean.xlsx')
dfzstd.to_excel(path_save_results + '/spct-stats_sampling_frequency_z-id_std.xlsx')
# save and/or show plots
if save_figs or show_figs:
path_save_figs = path_figs + '/spct-stats_sampling_frequency_z-id'
if not os.path.exists(path_save_figs):
os.makedirs(path_save_figs)
# plot: emitter density (z)
fig, ax = plt.subplots()
ax.plot(dfzm.bin, dfzm.emitter_density, '-o', markersize=3)
ax.set_xlabel(r'$z \: (\mu m)$')
ax.set_ylabel(r'emitter density $(\#/\mu m^2)$')
plt.tight_layout()
if save_figs:
plt.savefig(path_save_figs + '/spct-stats_emitter-density.png')
if show_figs:
plt.show()
plt.close()
# plot: Nyquist sampling (z)
fig, ax = plt.subplots()
ax.plot(dfzm.bin, dfzm.nyquist_mean_dx, '-o', markersize=3, label=r'$\overline {\delta x}$')
ax.plot(dfzm.bin, dfzm.nyquist_min_dx, '-o', markersize=3, label=r'$\delta x_{min}$')
ax.plot(dfzm.bin, dfzm.nyquist_min_no_overlap, '-o', markersize=3,
label=r'$\delta x_{min, N.O.}^{Gaussian}$')
ax.plot(dfzm.bin, dfzm.nyquist_min_no_contour_overlap, '-o', markersize=3,
label=r'$\delta x_{min, N.O.}^{Contour}$')
ax.set_xlabel(r'$z \: (\mu m)$')
ax.set_ylabel(r'$f_{Nyquist} \: (\mu m)$')
ax.legend()
plt.tight_layout()
if save_figs:
plt.savefig(path_save_figs + '/spct-stats_nyquist-sampling.png')
if show_figs:
plt.show()
plt.close()
def plot_spct_stats_id_by_param(df, xparam, plot_columns, particle_ids, save_figs, path_figs, show_figs):
# save and/or show plots
if save_figs:
path_save_figs = path_figs + '/spct-stats_id_by_param'
if not os.path.exists(path_save_figs):
os.makedirs(path_save_figs)
df = df.copy().dropna()
all_pids = df.id.unique()
if particle_ids == 'all':
particle_ids = all_pids
elif (isinstance(particle_ids, (list, np.ndarray))):
pass
elif (isinstance(particle_ids, float)):
particle_ids = np.random.choice(all_pids, int(particle_ids), replace=False)
elif particle_ids is None:
particle_ids = np.random.choice(all_pids, 5, replace=False)
elif (isinstance(particle_ids, int)):
particle_ids = [particle_ids]
# filter out particle that translate too much
dfg = df.groupby('id').std().reset_index()
dfg['xy_std'] = np.mean([dfg.x, dfg.y])
particle_ids = [p for p in particle_ids if p not in dfg[dfg['xy_std'] > 1.35].id.unique()]
for pid in particle_ids:
dfpid = df[df['id'] == pid]
# correct gaussian centers
if 'gauss_xc_corr' in plot_columns:
dfpid['gauss_xc_corr'] = df['gauss_xc'] - dfpid['x'] + (dfpid['y'] - dfpid.iloc[0].y)
if 'gauss_yc_corr' in plot_columns:
dfpid['gauss_yc_corr'] = dfpid['gauss_yc'] - dfpid['y'] + (dfpid['x'] - dfpid.iloc[0].x)
path_save_figs_pid = path_save_figs + '/pid{}_x{}_y{}'.format(pid,
int(np.round(dfpid.x.mean(), 0)),
int(np.round(dfpid.y.mean(), 0))
)
if not os.path.exists(path_save_figs_pid):
os.makedirs(path_save_figs_pid)
for pc in plot_columns:
fig, ax = plt.subplots()
if isinstance(pc, list):
for ppc in pc:
ax.plot(dfpid[xparam], dfpid[ppc], '-o', markersize=3, label=ppc)
ax.legend()
else:
ax.plot(dfpid[xparam], dfpid[pc], '-o', markersize=3)
ax.set_xlabel(xparam)
ax.set_ylabel(pc)
ax.set_title(r'$p_{ID}$' + '{} (x={}, y={})'.format(pid,
np.round(dfpid.x.mean(), 1),
np.round(dfpid.y.mean(), 1)
)
)
plt.tight_layout()
if save_figs:
plt.savefig(path_save_figs_pid + '/spct-stats_pid{}_plot-{}_by-{}.png'.format(pid, pid, pc, xparam))
if show_figs:
plt.show()
plt.close(fig)
def plot_spct_stats_compare_ids_by_param(df, xparam, compare_param, plot_columns, particle_ids,
save_figs, path_figs, show_figs):
# save and/or show plots
if save_figs:
path_save_figs = path_figs + '/spct-stats_compare-id_by_param/compare-{}'.format(compare_param)
if not os.path.exists(path_save_figs):
os.makedirs(path_save_figs)
# get number of axes
num_axes = len(plot_columns)
# make axes labels
axes_labels = []
for pc in plot_columns:
if pc == 'gauss_xc_corr':
pc = r'$x_{c} \: (G)$'
elif pc == 'gauss_yc_corr':
pc = r'$y_{c} \: (G)$'
elif pc == 'gauss_sigma_x_y':
pc = r'$\sigma_{x} / \sigma_{y} \: (G)$'
axes_labels.append(pc)
# make sure particle ids is a list of lists
if not isinstance(particle_ids[0], list):
particle_ids = [particle_ids]
# copy the dataframe
df = df.copy().dropna()
for pair_ids in particle_ids:
save_this_fig = True
fig, ax = plt.subplots(nrows=num_axes, sharex=True, figsize=(size_x_inches,
size_y_inches * (num_axes - 1) / 1.5))
save_strings = []
for pid in pair_ids:
dfpid = df[df['id'] == pid]
# create save string
save_strings.append([int(np.round(dfpid.x.mean(), 0)), int(np.round(dfpid.y.mean(), 0))])
# correct gaussian centers
if 'gauss_xc_corr' in plot_columns:
dfpid['gauss_xc_corr'] = df['gauss_xc'] - dfpid['x'] + (dfpid['y'] - dfpid.iloc[0].y)
if 'gauss_yc_corr' in plot_columns:
dfpid['gauss_yc_corr'] = dfpid['gauss_yc'] - dfpid['y'] + (dfpid['x'] - dfpid.iloc[0].x)
for i, pc in enumerate(plot_columns):
ax[i].plot(dfpid[xparam], dfpid[pc], '-o', markersize=3)
# modifying for not saving bad plots
if dfpid[pc].diff().max() > 1:
save_this_fig = False
# axes labels
ax[num_axes - 1].set_xlabel(xparam)
for i, lbl in enumerate(axes_labels):
ax[i].set_ylabel(lbl)
plt.tight_layout()
if save_figs and save_this_fig:
plt.savefig(path_save_figs + '/pids_{}-x{}y{}_{}-x{}y{}_plot_by-{}.png'.format(pair_ids[0],
save_strings[0][0],
save_strings[0][1],
pair_ids[1],
save_strings[1][0],
save_strings[1][1],
xparam,
)
)
if show_figs:
plt.show()
plt.close(fig)
def plot_spct_stats_compare_ids_by_along_param(df, columns_to_bin, bins, low_level_bins_to_plot,
plot_columns, column_to_plot_along, round_to_decimals,
save_figs, path_figs, show_figs):
if isinstance(plot_columns, str):
plot_columns = [plot_columns]
# save and/or show plots
if save_figs:
path_save_figs = path_figs + '/spct-stats_compare-along'
if not os.path.exists(path_save_figs):
os.makedirs(path_save_figs)
# get number of axes
num_axes = len(plot_columns)
markersize = 1.5
# make axes labels
axes_labels = []
for pc in plot_columns:
if pc == 'gauss_xc':
pc = r'$x_{c}$'
elif pc == 'gauss_yc':
pc = r'$y_{c}$'
elif pc == 'gauss_sigma_x_y':
pc = r'$\sigma_{x} / \sigma_{y}$'
axes_labels.append(pc)
# get particle ID's by binning
df = bin.bin_generic_2d(df,
columns_to_bin=columns_to_bin,
column_to_count='id',
bins=bins,
round_to_decimals=round_to_decimals,
min_num_bin=1,
return_groupby=False,
)
# # columns_to_bin, bins, bins_to_plot, plot_columns, column_to_plot_along,
top_level_bins_to_plot = df.sort_values('bin_tl').bin_tl.unique()
low_level_bin_to_plot = df.sort_values('bin_ll').bin_ll.unique()[low_level_bins_to_plot]
df = df.loc[df['bin_ll'] == low_level_bin_to_plot]
# plot
fig, ax = plt.subplots(nrows=num_axes, sharex=True, figsize=(size_x_inches * 1.2,
size_y_inches * (num_axes - 1) / 1.25)
)
legend_handles = []
for bntl in top_level_bins_to_plot:
# get the dataframe of this bin only
bin_pids = df[df['bin_tl'] == bntl].id.unique()
# sometimes there won't be a particle in a bin so you have to skip
if len(bin_pids) < 1:
continue
# get a random particle id in this bin
pid = np.random.choice(bin_pids, 1)[0]
# get the dataframe for pid only
dfpid = df[df['id'] == pid].reset_index()
# filter 1: remove particles with segmentation errors
if dfpid.gauss_xc.diff().abs().max() > 0.5:
continue
if dfpid.gauss_yc.diff().abs().max() > 0.5:
continue
# add a column for gaussian location displacement
if 'gauss_dxc' in plot_columns:
dfpid['gauss_dxc'] = dfpid['gauss_xc'] - dfpid.iloc[dfpid.z_corr.abs().idxmin()].gauss_xc
if 'gauss_dyc' in plot_columns:
dfpid['gauss_dyc'] = dfpid['gauss_yc'] - dfpid.iloc[dfpid.z_corr.abs().idxmin()].gauss_yc
add_to_legend = True
for i, pc in enumerate(plot_columns):
ax[i].plot(dfpid[column_to_plot_along], dfpid[pc], '-o', markersize=markersize)
if add_to_legend is True:
legend_handles.append("{}: {}, {}".format(pid,
np.round(dfpid.x.mean(), 1),
np.round(dfpid.y.mean(), 1)))
add_to_legend = False
# axes labels
ax[num_axes - 1].set_xlabel(column_to_plot_along)
for i, lbl in enumerate(axes_labels):
ax[i].set_ylabel(lbl)
ax[int(np.floor(num_axes // 2))].legend(legend_handles,
loc='upper left',
bbox_to_anchor=(1, 1),
title=r'$p_{ID}: x, y$'
)
plt.tight_layout()
if save_figs:
plt.savefig(path_save_figs + '/slice-{}_along-{}={}_plot-{}.png'.format(columns_to_bin[0],
columns_to_bin[1],
low_level_bin_to_plot,
column_to_plot_along,
))
if show_figs:
plt.show()
plt.close(fig) | [
"utils.plotting.lighten_color",
"numpy.abs",
"utils.io.read_calib_coords",
"numpy.floor",
"correction.correct.merge_calib_pid_defocus_and_correction_coords",
"matplotlib.pyplot.style.use",
"numpy.mean",
"utils.plotting.plot_particle_to_particle_similarity",
"matplotlib.pyplot.figure",
"analyze.eva... | [((932, 948), 'matplotlib.font_manager.FontProperties', 'FontProperties', ([], {}), '()\n', (946, 948), False, 'from matplotlib.font_manager import FontProperties\n'), ((975, 1023), 'matplotlib.pyplot.style.use', 'plt.style.use', (["['science', 'ieee', 'std-colors']"], {}), "(['science', 'ieee', 'std-colors'])\n", (988, 1023), True, 'import matplotlib.pyplot as plt\n'), ((1074, 1088), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1086, 1088), True, 'import matplotlib.pyplot as plt\n'), ((1255, 1269), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (1264, 1269), True, 'import matplotlib.pyplot as plt\n'), ((1795, 1832), 'os.path.join', 'join', (['base_dir', '"""coords/calib-coords"""'], {}), "(base_dir, 'coords/calib-coords')\n", (1799, 1832), False, 'from os.path import join\n'), ((1852, 1877), 'os.path.join', 'join', (['base_dir', '"""results"""'], {}), "(base_dir, 'results')\n", (1856, 1877), False, 'from os.path import join\n'), ((1894, 1916), 'os.path.join', 'join', (['base_dir', '"""figs"""'], {}), "(base_dir, 'figs')\n", (1898, 1916), False, 'from os.path import join\n'), ((2036, 2090), 'utils.io.read_calib_coords', 'io.read_calib_coords', (['path_calib_coords'], {'method': 'method'}), '(path_calib_coords, method=method)\n', (2056, 2090), False, 'from utils import bin, fit, functions, io, plotting\n'), ((2148, 2193), 'utils.io.read_pop_gauss_diameter_properties', 'io.read_pop_gauss_diameter_properties', (['dfcpop'], {}), '(dfcpop)\n', (2185, 2193), False, 'from utils import bin, fit, functions, io, plotting\n'), ((27072, 27143), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['spct_results'], {'orient': '"""index"""', 'columns': "['value']"}), "(spct_results, orient='index', columns=['value'])\n", (27094, 27143), True, 'import pandas as pd\n'), ((27717, 27753), 'os.path.join', 'join', (['base_dir', '"""coords/test-coords"""'], {}), "(base_dir, 'coords/test-coords')\n", (27721, 27753), False, 'from os.path import join\n'), ((27778, 27815), 'os.path.join', 'join', (['base_dir', '"""coords/calib-coords"""'], {}), "(base_dir, 'coords/calib-coords')\n", (27782, 27815), False, 'from os.path import join\n'), ((27838, 27866), 'os.path.join', 'join', (['base_dir', '"""similarity"""'], {}), "(base_dir, 'similarity')\n", (27842, 27866), False, 'from os.path import join\n'), ((27886, 27911), 'os.path.join', 'join', (['base_dir', '"""results"""'], {}), "(base_dir, 'results')\n", (27890, 27911), False, 'from os.path import join\n'), ((27928, 27950), 'os.path.join', 'join', (['base_dir', '"""figs"""'], {}), "(base_dir, 'figs')\n", (27932, 27950), False, 'from os.path import join\n'), ((28044, 28098), 'utils.io.read_calib_coords', 'io.read_calib_coords', (['path_calib_coords'], {'method': 'method'}), '(path_calib_coords, method=method)\n', (28064, 28098), False, 'from utils import bin, fit, functions, io, plotting\n'), ((28611, 28696), 'utils.plotting.scatter_z_by_xy', 'plotting.scatter_z_by_xy', ([], {'df': 'dfcpid', 'z_params': "['zf_from_peak_int', 'zf_from_nsv']"}), "(df=dfcpid, z_params=['zf_from_peak_int',\n 'zf_from_nsv'])\n", (28635, 28696), False, 'from utils import bin, fit, functions, io, plotting\n'), ((28804, 28915), 'correction.correct.inspect_calibration_surface', 'correct.inspect_calibration_surface', ([], {'df': 'dfcpid', 'param_zf': '"""zf_from_nsv"""', 'microns_per_pixel': 'microns_per_pixel'}), "(df=dfcpid, param_zf='zf_from_nsv',\n microns_per_pixel=microns_per_pixel)\n", (28839, 28915), False, 'from correction import correct\n'), ((29139, 29156), 'matplotlib.pyplot.close', 'plt.close', (['fig_xy'], {}), '(fig_xy)\n', (29148, 29156), True, 'import matplotlib.pyplot as plt\n'), ((29216, 29234), 'matplotlib.pyplot.close', 'plt.close', (['fig_xyz'], {}), '(fig_xyz)\n', (29225, 29234), True, 'import matplotlib.pyplot as plt\n'), ((29297, 29317), 'matplotlib.pyplot.close', 'plt.close', (['fig_plane'], {}), '(fig_plane)\n', (29306, 29317), True, 'import matplotlib.pyplot as plt\n'), ((29594, 29631), 'utils.io.read_test_coords', 'io.read_test_coords', (['path_test_coords'], {}), '(path_test_coords)\n', (29613, 29631), False, 'from utils import bin, fit, functions, io, plotting\n'), ((29669, 29777), 'correction.correct.correct_z_by_plane_tilt', 'correct.correct_z_by_plane_tilt', (['dfcpid', 'dft'], {'param_zf': '"""zf_from_nsv"""', 'param_z': '"""z"""', 'param_z_true': '"""z_true"""'}), "(dfcpid, dft, param_zf='zf_from_nsv',\n param_z='z', param_z_true='z_true')\n", (29700, 29777), False, 'from correction import correct\n'), ((29985, 29999), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (29997, 29999), True, 'import matplotlib.pyplot as plt\n'), ((30148, 30166), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (30164, 30166), True, 'import matplotlib.pyplot as plt\n'), ((30294, 30305), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (30303, 30305), True, 'import matplotlib.pyplot as plt\n'), ((30345, 30524), 'utils.bin.bin_local_rmse_z', 'bin.bin_local_rmse_z', (['dft'], {'column_to_bin': '"""z_true_corr"""', 'bins': '(25)', 'min_cm': 'min_cm', 'z_range': 'None', 'round_to_decimal': '(4)', 'df_ground_truth': 'None', 'dropna': '(True)', 'error_column': '"""error"""'}), "(dft, column_to_bin='z_true_corr', bins=25, min_cm=\n min_cm, z_range=None, round_to_decimal=4, df_ground_truth=None, dropna=\n True, error_column='error')\n", (30365, 30524), False, 'from utils import bin, fit, functions, io, plotting\n'), ((30568, 30746), 'utils.bin.bin_local_rmse_z', 'bin.bin_local_rmse_z', (['dft'], {'column_to_bin': '"""z_true_corr"""', 'bins': '(1)', 'min_cm': 'min_cm', 'z_range': 'None', 'round_to_decimal': '(4)', 'df_ground_truth': 'None', 'dropna': '(True)', 'error_column': '"""error"""'}), "(dft, column_to_bin='z_true_corr', bins=1, min_cm=\n min_cm, z_range=None, round_to_decimal=4, df_ground_truth=None, dropna=\n True, error_column='error')\n", (30588, 30746), False, 'from utils import bin, fit, functions, io, plotting\n'), ((30945, 30959), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (30957, 30959), True, 'import matplotlib.pyplot as plt\n'), ((31101, 31119), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (31117, 31119), True, 'import matplotlib.pyplot as plt\n'), ((31241, 31252), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (31250, 31252), True, 'import matplotlib.pyplot as plt\n'), ((31383, 31418), 'utils.io.read_similarity', 'io.read_similarity', (['path_similarity'], {}), '(path_similarity)\n', (31401, 31418), False, 'from utils import bin, fit, functions, io, plotting\n'), ((32590, 32702), 'analyze.evaluate_intrinsic_aberrations', 'analyze.evaluate_intrinsic_aberrations', (['dfs'], {'z_f': 'zf', 'min_cm': 'min_cm', 'param_z_true': '"""z_true"""', 'param_z_cm': '"""z_cm"""'}), "(dfs, z_f=zf, min_cm=min_cm,\n param_z_true='z_true', param_z_cm='z_cm')\n", (32628, 32702), False, 'import analyze\n'), ((32926, 32968), 'analyze.fit_intrinsic_aberrations', 'analyze.fit_intrinsic_aberrations', (['dict_ia'], {}), '(dict_ia)\n', (32959, 32968), False, 'import analyze\n'), ((32973, 33049), 'utils.io.export_dict_intrinsic_aberrations', 'io.export_dict_intrinsic_aberrations', (['dict_ia', 'path_results'], {'unique_id': '"""raw"""'}), "(dict_ia, path_results, unique_id='raw')\n", (33009, 33049), False, 'from utils import bin, fit, functions, io, plotting\n'), ((33076, 33146), 'utils.plotting.plot_intrinsic_aberrations', 'plotting.plot_intrinsic_aberrations', (['dict_ia'], {'cubic': '(True)', 'quartic': '(True)'}), '(dict_ia, cubic=True, quartic=True)\n', (33111, 33146), False, 'from utils import bin, fit, functions, io, plotting\n'), ((33319, 33337), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (33335, 33337), True, 'import matplotlib.pyplot as plt\n'), ((33342, 33399), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path_figs + '/intrinsic-aberrations_raw.png')"], {}), "(path_figs + '/intrinsic-aberrations_raw.png')\n", (33353, 33399), True, 'import matplotlib.pyplot as plt\n'), ((33404, 33414), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (33412, 33414), True, 'import matplotlib.pyplot as plt\n'), ((33466, 33622), 'correction.correct.correct_z_by_plane_tilt', 'correct.correct_z_by_plane_tilt', ([], {'dfcal': 'dfcpid', 'dftest': 'dfs', 'param_zf': '"""zf_from_nsv"""', 'param_z': '"""z_est"""', 'param_z_true': '"""z_true"""', 'params_correct': "['z_cm']"}), "(dfcal=dfcpid, dftest=dfs, param_zf=\n 'zf_from_nsv', param_z='z_est', param_z_true='z_true', params_correct=[\n 'z_cm'])\n", (33497, 33622), False, 'from correction import correct\n'), ((33864, 33990), 'analyze.evaluate_intrinsic_aberrations', 'analyze.evaluate_intrinsic_aberrations', (['dfs_corr'], {'z_f': '(0)', 'min_cm': 'min_cm', 'param_z_true': '"""z_true_corr"""', 'param_z_cm': '"""z_cm_corr"""'}), "(dfs_corr, z_f=0, min_cm=min_cm,\n param_z_true='z_true_corr', param_z_cm='z_cm_corr')\n", (33902, 33990), False, 'import analyze\n'), ((34219, 34262), 'analyze.fit_intrinsic_aberrations', 'analyze.fit_intrinsic_aberrations', (['dict_iac'], {}), '(dict_iac)\n', (34252, 34262), False, 'import analyze\n'), ((34267, 34355), 'utils.io.export_dict_intrinsic_aberrations', 'io.export_dict_intrinsic_aberrations', (['dict_iac', 'path_results'], {'unique_id': '"""corrected"""'}), "(dict_iac, path_results, unique_id=\n 'corrected')\n", (34303, 34355), False, 'from utils import bin, fit, functions, io, plotting\n'), ((34377, 34448), 'utils.plotting.plot_intrinsic_aberrations', 'plotting.plot_intrinsic_aberrations', (['dict_iac'], {'cubic': '(True)', 'quartic': '(True)'}), '(dict_iac, cubic=True, quartic=True)\n', (34412, 34448), False, 'from utils import bin, fit, functions, io, plotting\n'), ((34622, 34640), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (34638, 34640), True, 'import matplotlib.pyplot as plt\n'), ((34645, 34708), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path_figs + '/intrinsic-aberrations_corrected.png')"], {}), "(path_figs + '/intrinsic-aberrations_corrected.png')\n", (34656, 34708), True, 'import matplotlib.pyplot as plt\n'), ((34713, 34723), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (34721, 34723), True, 'import matplotlib.pyplot as plt\n'), ((35038, 35069), 'pandas.read_excel', 'pd.read_excel', (['test_coords_path'], {}), '(test_coords_path)\n', (35051, 35069), True, 'import pandas as pd\n'), ((53639, 53796), 'utils.bin.bin_generic', 'bin.bin_generic', (['df'], {'column_to_bin': 'column_to_bin', 'column_to_count': 'column_to_count', 'bins': 'bins', 'round_to_decimal': 'round_to_decimal', 'return_groupby': '(True)'}), '(df, column_to_bin=column_to_bin, column_to_count=\n column_to_count, bins=bins, round_to_decimal=round_to_decimal,\n return_groupby=True)\n', (53654, 53796), False, 'from utils import bin, fit, functions, io, plotting\n'), ((56116, 56248), 'utils.bin.bin_generic', 'bin.bin_generic', (['df'], {'column_to_bin': '"""id"""', 'column_to_count': 'column_to_count', 'bins': 'num_pids', 'round_to_decimal': '(0)', 'return_groupby': '(True)'}), "(df, column_to_bin='id', column_to_count=column_to_count,\n bins=num_pids, round_to_decimal=0, return_groupby=True)\n", (56131, 56248), False, 'from utils import bin, fit, functions, io, plotting\n'), ((58463, 58581), 'utils.bin.bin_generic_2d', 'bin.bin_generic_2d', (['df', 'columns_to_bin', 'column_to_count', 'bins', 'round_to_decimals', 'min_num_bin'], {'return_groupby': '(True)'}), '(df, columns_to_bin, column_to_count, bins,\n round_to_decimals, min_num_bin, return_groupby=True)\n', (58481, 58581), False, 'from utils import bin, fit, functions, io, plotting\n'), ((60801, 60966), 'analyze.evaluate_3d_static_precision', 'analyze.evaluate_3d_static_precision', (['df'], {'columns_to_bin': 'columns_to_bin', 'precision_columns': 'precision_columns', 'bins': 'bins', 'round_to_decimals': 'round_to_decimals'}), '(df, columns_to_bin=columns_to_bin,\n precision_columns=precision_columns, bins=bins, round_to_decimals=\n round_to_decimals)\n', (60837, 60966), False, 'import analyze\n'), ((63213, 63374), 'analyze.evaluate_2d_static_precision', 'analyze.evaluate_2d_static_precision', (['df'], {'column_to_bin': 'column_to_bin', 'precision_columns': 'precision_columns', 'bins': 'bins', 'round_to_decimal': 'round_to_decimal'}), '(df, column_to_bin=column_to_bin,\n precision_columns=precision_columns, bins=bins, round_to_decimal=\n round_to_decimal)\n', (63249, 63374), False, 'import analyze\n'), ((65427, 65589), 'analyze.evaluate_2d_static_precision', 'analyze.evaluate_2d_static_precision', (['df'], {'column_to_bin': 'column_to_bin', 'precision_columns': 'precision_columns', 'bins': 'bins', 'round_to_decimal': 'round_to_decimals'}), '(df, column_to_bin=column_to_bin,\n precision_columns=precision_columns, bins=bins, round_to_decimal=\n round_to_decimals)\n', (65463, 65589), False, 'import analyze\n'), ((67927, 68088), 'analyze.evaluate_2d_static_precision', 'analyze.evaluate_2d_static_precision', (['df'], {'column_to_bin': 'column_to_bin', 'precision_columns': 'precision_columns', 'bins': 'bins', 'round_to_decimal': 'round_to_decimal'}), '(df, column_to_bin=column_to_bin,\n precision_columns=precision_columns, bins=bins, round_to_decimal=\n round_to_decimal)\n', (67963, 68088), False, 'import analyze\n'), ((70227, 70381), 'analyze.evaluate_1d_static_precision', 'analyze.evaluate_1d_static_precision', (['df'], {'column_to_bin': 'xparam', 'precision_columns': 'precision_columns', 'bins': 'bins', 'round_to_decimal': 'round_to_decimal'}), '(df, column_to_bin=xparam,\n precision_columns=precision_columns, bins=bins, round_to_decimal=\n round_to_decimal)\n', (70263, 70381), False, 'import analyze\n'), ((72777, 72904), 'utils.bin.bin_generic', 'bin.bin_generic', (['df'], {'column_to_bin': 'column_to_bin', 'column_to_count': '"""id"""', 'bins': 'bins', 'round_to_decimal': '(4)', 'return_groupby': '(True)'}), "(df, column_to_bin=column_to_bin, column_to_count='id', bins\n =bins, round_to_decimal=4, return_groupby=True)\n", (72792, 72904), False, 'from utils import bin, fit, functions, io, plotting\n'), ((74295, 74309), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (74307, 74309), True, 'import matplotlib.pyplot as plt\n'), ((74469, 74487), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (74485, 74487), True, 'import matplotlib.pyplot as plt\n'), ((74619, 74630), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (74628, 74630), True, 'import matplotlib.pyplot as plt\n'), ((74679, 74693), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (74691, 74693), True, 'import matplotlib.pyplot as plt\n'), ((75251, 75269), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (75267, 75269), True, 'import matplotlib.pyplot as plt\n'), ((75402, 75413), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (75411, 75413), True, 'import matplotlib.pyplot as plt\n'), ((76337, 76360), 'numpy.mean', 'np.mean', (['[dfg.x, dfg.y]'], {}), '([dfg.x, dfg.y])\n', (76344, 76360), True, 'import numpy as np\n'), ((82545, 82713), 'utils.bin.bin_generic_2d', 'bin.bin_generic_2d', (['df'], {'columns_to_bin': 'columns_to_bin', 'column_to_count': '"""id"""', 'bins': 'bins', 'round_to_decimals': 'round_to_decimals', 'min_num_bin': '(1)', 'return_groupby': '(False)'}), "(df, columns_to_bin=columns_to_bin, column_to_count='id',\n bins=bins, round_to_decimals=round_to_decimals, min_num_bin=1,\n return_groupby=False)\n", (82563, 82713), False, 'from utils import bin, fit, functions, io, plotting\n'), ((83229, 83345), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'num_axes', 'sharex': '(True)', 'figsize': '(size_x_inches * 1.2, size_y_inches * (num_axes - 1) / 1.25)'}), '(nrows=num_axes, sharex=True, figsize=(size_x_inches * 1.2, \n size_y_inches * (num_axes - 1) / 1.25))\n', (83241, 83345), True, 'import matplotlib.pyplot as plt\n'), ((85405, 85423), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (85421, 85423), True, 'import matplotlib.pyplot as plt\n'), ((85970, 85984), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (85979, 85984), True, 'import matplotlib.pyplot as plt\n'), ((4179, 4211), 'utils.fit.fit_3d_plane', 'fit.fit_3d_plane', (['points_microns'], {}), '(points_microns)\n', (4195, 4211), False, 'from utils import bin, fit, functions, io, plotting\n'), ((28263, 28392), 'correction.correct.merge_calib_pid_defocus_and_correction_coords', 'correct.merge_calib_pid_defocus_and_correction_coords', ([], {'path_calib_coords': 'path_calib_coords', 'method': 'method', 'dfs': '[dfc, dfcpid]'}), '(path_calib_coords=\n path_calib_coords, method=method, dfs=[dfc, dfcpid])\n', (28316, 28392), False, 'from correction import correct\n'), ((29420, 29478), 'utils.io.read_pop_gauss_diameter_properties', 'io.read_pop_gauss_diameter_properties', (['path_calib_spct_pop'], {}), '(path_calib_spct_pop)\n', (29457, 29478), False, 'from utils import bin, fit, functions, io, plotting\n'), ((29519, 29564), 'utils.io.read_pop_gauss_diameter_properties', 'io.read_pop_gauss_diameter_properties', (['dfcpop'], {}), '(dfcpop)\n', (29556, 29564), False, 'from utils import bin, fit, functions, io, plotting\n'), ((30193, 30252), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path_figs + '/calibration_curve_corrected.png')"], {}), "(path_figs + '/calibration_curve_corrected.png')\n", (30204, 30252), True, 'import matplotlib.pyplot as plt\n'), ((30279, 30289), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (30287, 30289), True, 'import matplotlib.pyplot as plt\n'), ((31146, 31199), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path_figs + '/meta_rmse-z_corrected.png')"], {}), "(path_figs + '/meta_rmse-z_corrected.png')\n", (31157, 31199), True, 'import matplotlib.pyplot as plt\n'), ((31226, 31236), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (31234, 31236), True, 'import matplotlib.pyplot as plt\n'), ((31474, 31565), 'utils.plotting.plot_calib_stack_self_similarity', 'plotting.plot_calib_stack_self_similarity', (['dfsf'], {'min_percent_layers': 'min_percent_layers'}), '(dfsf, min_percent_layers=\n min_percent_layers)\n', (31515, 31565), False, 'from utils import bin, fit, functions, io, plotting\n'), ((31671, 31689), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (31687, 31689), True, 'import matplotlib.pyplot as plt\n'), ((31698, 31759), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path_figs + '/calib_self-similarity-forward.png')"], {}), "(path_figs + '/calib_self-similarity-forward.png')\n", (31709, 31759), True, 'import matplotlib.pyplot as plt\n'), ((31768, 31778), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (31776, 31778), True, 'import matplotlib.pyplot as plt\n'), ((31823, 31914), 'utils.plotting.plot_calib_stack_self_similarity', 'plotting.plot_calib_stack_self_similarity', (['dfsm'], {'min_percent_layers': 'min_percent_layers'}), '(dfsm, min_percent_layers=\n min_percent_layers)\n', (31864, 31914), False, 'from utils import bin, fit, functions, io, plotting\n'), ((32025, 32043), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (32041, 32043), True, 'import matplotlib.pyplot as plt\n'), ((32052, 32112), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path_figs + '/calib_self-similarity-middle.png')"], {}), "(path_figs + '/calib_self-similarity-middle.png')\n", (32063, 32112), True, 'import matplotlib.pyplot as plt\n'), ((32121, 32131), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (32129, 32131), True, 'import matplotlib.pyplot as plt\n'), ((32176, 32255), 'utils.plotting.plot_particle_to_particle_similarity', 'plotting.plot_particle_to_particle_similarity', (['dfcs'], {'min_particles_per_frame': '(10)'}), '(dfcs, min_particles_per_frame=10)\n', (32221, 32255), False, 'from utils import bin, fit, functions, io, plotting\n'), ((32373, 32391), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (32389, 32391), True, 'import matplotlib.pyplot as plt\n'), ((32400, 32479), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path_figs + '/calib_per-frame_particle-to-particle-similarity.png')"], {}), "(path_figs + '/calib_per-frame_particle-to-particle-similarity.png')\n", (32411, 32479), True, 'import matplotlib.pyplot as plt\n'), ((32488, 32498), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (32496, 32498), True, 'import matplotlib.pyplot as plt\n'), ((35235, 35281), 'numpy.sqrt', 'np.sqrt', (['((256 - df.x) ** 2 + (256 - df.y) ** 2)'], {}), '((256 - df.x) ** 2 + (256 - df.y) ** 2)\n', (35242, 35281), True, 'import numpy as np\n'), ((39022, 39052), 'pandas.read_excel', 'pd.read_excel', (['spct_stats_path'], {}), '(spct_stats_path)\n', (39035, 39052), True, 'import pandas as pd\n'), ((39587, 39634), 'utils.fit.fit_3d', 'fit.fit_3d', (['points_pixels'], {'fit_function': '"""plane"""'}), "(points_pixels, fit_function='plane')\n", (39597, 39634), False, 'from utils import bin, fit, functions, io, plotting\n'), ((39983, 40030), 'utils.fit.fit_3d', 'fit.fit_3d', (['points_pixels'], {'fit_function': '"""plane"""'}), "(points_pixels, fit_function='plane')\n", (39993, 40030), False, 'from utils import bin, fit, functions, io, plotting\n'), ((40288, 40352), 'utils.functions.calculate_z_of_3d_plane', 'functions.calculate_z_of_3d_plane', (['dft.x', 'dft.y'], {'popt': 'popt_calib'}), '(dft.x, dft.y, popt=popt_calib)\n', (40321, 40352), False, 'from utils import bin, fit, functions, io, plotting\n'), ((40400, 40463), 'utils.functions.calculate_z_of_3d_plane', 'functions.calculate_z_of_3d_plane', (['dft.x', 'dft.y'], {'popt': 'popt_test'}), '(dft.x, dft.y, popt=popt_test)\n', (40433, 40463), False, 'from utils import bin, fit, functions, io, plotting\n'), ((40710, 40724), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (40722, 40724), True, 'import matplotlib.pyplot as plt\n'), ((40789, 40805), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['sc'], {}), '(sc)\n', (40801, 40805), True, 'import matplotlib.pyplot as plt\n'), ((40870, 40888), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (40886, 40888), True, 'import matplotlib.pyplot as plt\n'), ((40897, 40907), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (40905, 40907), True, 'import matplotlib.pyplot as plt\n'), ((41384, 41447), 'scipy.interpolate.griddata', 'griddata', (['(x, y)', 'z', '(xi[None, :], yi[:, None])'], {'method': '"""cubic"""'}), "((x, y), z, (xi[None, :], yi[:, None]), method='cubic')\n", (41392, 41447), False, 'from scipy.interpolate import griddata, Akima1DInterpolator\n'), ((41464, 41483), 'numpy.meshgrid', 'np.meshgrid', (['xi', 'yi'], {}), '(xi, yi)\n', (41475, 41483), True, 'import numpy as np\n'), ((41503, 41517), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (41515, 41517), True, 'import matplotlib.pyplot as plt\n'), ((41901, 41918), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (41909, 41918), True, 'import matplotlib.pyplot as plt\n'), ((41927, 41945), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (41943, 41945), True, 'import matplotlib.pyplot as plt\n'), ((41954, 41964), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (41962, 41964), True, 'import matplotlib.pyplot as plt\n'), ((44805, 44819), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (44817, 44819), True, 'import matplotlib.pyplot as plt\n'), ((44969, 45032), 'scipy.optimize.curve_fit', 'curve_fit', (['functions.quadratic', 'df_error.z_true', 'df_error.error'], {}), '(functions.quadratic, df_error.z_true, df_error.error)\n', (44978, 45032), False, 'from scipy.optimize import curve_fit\n'), ((45620, 45638), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (45636, 45638), True, 'import matplotlib.pyplot as plt\n'), ((45767, 45777), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (45775, 45777), True, 'import matplotlib.pyplot as plt\n'), ((46231, 46334), 'utils.bin.bin_generic', 'bin.bin_generic', (['df_error', 'column_to_bin', 'column_to_count', 'bins_r', 'round_to_decimal', 'return_groupby'], {}), '(df_error, column_to_bin, column_to_count, bins_r,\n round_to_decimal, return_groupby)\n', (46246, 46334), False, 'from utils import bin, fit, functions, io, plotting\n'), ((46584, 46683), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'bins_r', 'sharex': '(True)', 'figsize': '(size_x_inches, size_y_inches * bins_r / 2.4)'}), '(nrows=bins_r, sharex=True, figsize=(size_x_inches, \n size_y_inches * bins_r / 2.4))\n', (46596, 46683), True, 'import matplotlib.pyplot as plt\n'), ((48515, 48533), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (48531, 48533), True, 'import matplotlib.pyplot as plt\n'), ((48542, 48552), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (48550, 48552), True, 'import matplotlib.pyplot as plt\n'), ((48561, 48572), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (48570, 48572), True, 'import matplotlib.pyplot as plt\n'), ((48759, 48773), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (48771, 48773), True, 'import matplotlib.pyplot as plt\n'), ((49455, 49473), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (49471, 49473), True, 'import matplotlib.pyplot as plt\n'), ((49482, 49492), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (49490, 49492), True, 'import matplotlib.pyplot as plt\n'), ((49501, 49512), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (49510, 49512), True, 'import matplotlib.pyplot as plt\n'), ((49670, 49773), 'utils.bin.bin_generic', 'bin.bin_generic', (['df_error', 'column_to_bin', 'column_to_count', 'bins_r', 'round_to_decimal', 'return_groupby'], {}), '(df_error, column_to_bin, column_to_count, bins_r,\n round_to_decimal, return_groupby)\n', (49685, 49773), False, 'from utils import bin, fit, functions, io, plotting\n'), ((50053, 50067), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (50065, 50067), True, 'import matplotlib.pyplot as plt\n'), ((50474, 50492), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (50490, 50492), True, 'import matplotlib.pyplot as plt\n'), ((50501, 50511), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (50509, 50511), True, 'import matplotlib.pyplot as plt\n'), ((50520, 50531), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (50529, 50531), True, 'import matplotlib.pyplot as plt\n'), ((51107, 51226), 'utils.bin.bin_generic_2d', 'bin.bin_generic_2d', (['df_error', 'columns_to_bin', 'column_to_count', 'bins', 'round_to_decimals', 'min_num_bin', 'return_groupby'], {}), '(df_error, columns_to_bin, column_to_count, bins,\n round_to_decimals, min_num_bin, return_groupby)\n', (51125, 51226), False, 'from utils import bin, fit, functions, io, plotting\n'), ((51803, 51817), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (51815, 51817), True, 'import matplotlib.pyplot as plt\n'), ((53100, 53118), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (53116, 53118), True, 'import matplotlib.pyplot as plt\n'), ((53127, 53137), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (53135, 53137), True, 'import matplotlib.pyplot as plt\n'), ((53146, 53157), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (53155, 53157), True, 'import matplotlib.pyplot as plt\n'), ((54656, 54670), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (54668, 54670), True, 'import matplotlib.pyplot as plt\n'), ((55226, 55244), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (55242, 55244), True, 'import matplotlib.pyplot as plt\n'), ((55400, 55411), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (55409, 55411), True, 'import matplotlib.pyplot as plt\n'), ((57104, 57118), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (57116, 57118), True, 'import matplotlib.pyplot as plt\n'), ((57697, 57715), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (57713, 57715), True, 'import matplotlib.pyplot as plt\n'), ((57872, 57883), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (57881, 57883), True, 'import matplotlib.pyplot as plt\n'), ((59305, 59398), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'sharex': '(True)', 'figsize': '(size_x_inches * 1.25, size_y_inches * 1.25)'}), '(nrows=2, sharex=True, figsize=(size_x_inches * 1.25, \n size_y_inches * 1.25))\n', (59317, 59398), True, 'import matplotlib.pyplot as plt\n'), ((60354, 60372), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (60370, 60372), True, 'import matplotlib.pyplot as plt\n'), ((60525, 60536), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (60534, 60536), True, 'import matplotlib.pyplot as plt\n'), ((74514, 74577), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path_save_figs + '/spct-stats_emitter-density.png')"], {}), "(path_save_figs + '/spct-stats_emitter-density.png')\n", (74525, 74577), True, 'import matplotlib.pyplot as plt\n'), ((74604, 74614), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (74612, 74614), True, 'import matplotlib.pyplot as plt\n'), ((75296, 75360), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path_save_figs + '/spct-stats_nyquist-sampling.png')"], {}), "(path_save_figs + '/spct-stats_nyquist-sampling.png')\n", (75307, 75360), True, 'import matplotlib.pyplot as plt\n'), ((75387, 75397), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (75395, 75397), True, 'import matplotlib.pyplot as plt\n'), ((79384, 79493), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'num_axes', 'sharex': '(True)', 'figsize': '(size_x_inches, size_y_inches * (num_axes - 1) / 1.5)'}), '(nrows=num_axes, sharex=True, figsize=(size_x_inches, \n size_y_inches * (num_axes - 1) / 1.5))\n', (79396, 79493), True, 'import matplotlib.pyplot as plt\n'), ((80557, 80575), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (80573, 80575), True, 'import matplotlib.pyplot as plt\n'), ((81542, 81556), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (81551, 81556), True, 'import matplotlib.pyplot as plt\n'), ((85954, 85964), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (85962, 85964), True, 'import matplotlib.pyplot as plt\n'), ((3965, 4052), 'numpy.stack', 'np.stack', (['(dfcg.x * microns_per_pixel, dfcg.y * microns_per_pixel, dfcg[param_zf])'], {}), '((dfcg.x * microns_per_pixel, dfcg.y * microns_per_pixel, dfcg[\n param_zf]))\n', (3973, 4052), True, 'import numpy as np\n'), ((4272, 4364), 'numpy.arctan', 'np.arctan', (['((pz_microns[0, 1] - pz_microns[0, 0]) / (px_microns[0, 1] - px_microns[0, 0]))'], {}), '((pz_microns[0, 1] - pz_microns[0, 0]) / (px_microns[0, 1] -\n px_microns[0, 0]))\n', (4281, 4364), True, 'import numpy as np\n'), ((4390, 4482), 'numpy.arctan', 'np.arctan', (['((pz_microns[1, 0] - pz_microns[0, 0]) / (py_microns[1, 0] - py_microns[0, 0]))'], {}), '((pz_microns[1, 0] - pz_microns[0, 0]) / (py_microns[1, 0] -\n py_microns[0, 0]))\n', (4399, 4482), True, 'import numpy as np\n'), ((5219, 5233), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5231, 5233), True, 'import matplotlib.pyplot as plt\n'), ((5289, 5299), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5297, 5299), True, 'import matplotlib.pyplot as plt\n'), ((5682, 5696), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5694, 5696), True, 'import matplotlib.pyplot as plt\n'), ((5839, 5849), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5847, 5849), True, 'import matplotlib.pyplot as plt\n'), ((9791, 9814), 'numpy.mean', 'np.mean', (['[dfg.x, dfg.y]'], {}), '([dfg.x, dfg.y])\n', (9798, 9814), True, 'import numpy as np\n'), ((16593, 16679), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'sharex': '(True)', 'figsize': '(size_x_inches, size_y_inches * 1.25)'}), '(nrows=2, sharex=True, figsize=(size_x_inches, size_y_inches * \n 1.25))\n', (16605, 16679), True, 'import matplotlib.pyplot as plt\n'), ((17011, 17021), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17019, 17021), True, 'import matplotlib.pyplot as plt\n'), ((17257, 17367), 'numpy.sqrt', 'np.sqrt', (["((df['gauss_xc'] - image_dimensions[0] / 2) ** 2 + (df['gauss_yc'] - \n image_dimensions[1] / 2) ** 2)"], {}), "((df['gauss_xc'] - image_dimensions[0] / 2) ** 2 + (df['gauss_yc'] -\n image_dimensions[1] / 2) ** 2)\n", (17264, 17367), True, 'import numpy as np\n'), ((35820, 35938), 'analyze.evaluate_1d_static_precision', 'analyze.evaluate_1d_static_precision', (['df'], {'column_to_bin': '"""id"""', 'precision_columns': 'pos', 'bins': 'pids', 'round_to_decimal': '(0)'}), "(df, column_to_bin='id',\n precision_columns=pos, bins=pids, round_to_decimal=0)\n", (35856, 35938), False, 'import analyze\n'), ((39425, 39461), 'numpy.stack', 'np.stack', (['(dff.x, dff.y, dff.z_corr)'], {}), '((dff.x, dff.y, dff.z_corr))\n', (39433, 39461), True, 'import numpy as np\n'), ((39827, 39858), 'numpy.stack', 'np.stack', (['(dft.x, dft.y, dft.z)'], {}), '((dft.x, dft.y, dft.z))\n', (39835, 39858), True, 'import numpy as np\n'), ((41130, 41139), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (41136, 41139), True, 'import numpy as np\n'), ((41142, 41151), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (41148, 41151), True, 'import numpy as np\n'), ((41169, 41178), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (41175, 41178), True, 'import numpy as np\n'), ((41181, 41190), 'numpy.min', 'np.min', (['y'], {}), '(y)\n', (41187, 41190), True, 'import numpy as np\n'), ((41208, 41217), 'numpy.max', 'np.max', (['z'], {}), '(z)\n', (41214, 41217), True, 'import numpy as np\n'), ((41220, 41229), 'numpy.min', 'np.min', (['z'], {}), '(z)\n', (41226, 41229), True, 'import numpy as np\n'), ((41292, 41301), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (41298, 41301), True, 'import numpy as np\n'), ((41303, 41312), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (41309, 41312), True, 'import numpy as np\n'), ((41344, 41353), 'numpy.min', 'np.min', (['y'], {}), '(y)\n', (41350, 41353), True, 'import numpy as np\n'), ((41355, 41364), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (41361, 41364), True, 'import numpy as np\n'), ((45066, 45109), 'utils.functions.quadratic', 'functions.quadratic', (['df_error.z_true', '*popt'], {}), '(df_error.z_true, *popt)\n', (45085, 45109), False, 'from utils import bin, fit, functions, io, plotting\n'), ((45934, 46037), 'numpy.sqrt', 'np.sqrt', (['((df_error.x - image_dimensions[0] / 2) ** 2 + (df_error.y - \n image_dimensions[1] / 2) ** 2)'], {}), '((df_error.x - image_dimensions[0] / 2) ** 2 + (df_error.y - \n image_dimensions[1] / 2) ** 2)\n', (45941, 46037), True, 'import numpy as np\n'), ((47415, 47470), 'scipy.optimize.curve_fit', 'curve_fit', (['functions.quadratic', 'dfbr.z_true', 'dfbr.error'], {}), '(functions.quadratic, dfbr.z_true, dfbr.error)\n', (47424, 47470), False, 'from scipy.optimize import curve_fit\n'), ((53549, 53579), 'os.path.exists', 'os.path.exists', (['path_save_figs'], {}), '(path_save_figs)\n', (53563, 53579), False, 'import os\n'), ((53593, 53620), 'os.makedirs', 'os.makedirs', (['path_save_figs'], {}), '(path_save_figs)\n', (53604, 53620), False, 'import os\n'), ((54077, 54110), 'os.path.exists', 'os.path.exists', (['path_save_results'], {}), '(path_save_results)\n', (54091, 54110), False, 'import os\n'), ((54124, 54154), 'os.makedirs', 'os.makedirs', (['path_save_results'], {}), '(path_save_results)\n', (54135, 54154), False, 'import os\n'), ((55381, 55391), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (55389, 55391), True, 'import matplotlib.pyplot as plt\n'), ((56026, 56056), 'os.path.exists', 'os.path.exists', (['path_save_figs'], {}), '(path_save_figs)\n', (56040, 56056), False, 'import os\n'), ((56070, 56097), 'os.makedirs', 'os.makedirs', (['path_save_figs'], {}), '(path_save_figs)\n', (56081, 56097), False, 'import os\n'), ((56535, 56568), 'os.path.exists', 'os.path.exists', (['path_save_results'], {}), '(path_save_results)\n', (56549, 56568), False, 'import os\n'), ((56582, 56612), 'os.makedirs', 'os.makedirs', (['path_save_results'], {}), '(path_save_results)\n', (56593, 56612), False, 'import os\n'), ((57853, 57863), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (57861, 57863), True, 'import matplotlib.pyplot as plt\n'), ((58373, 58403), 'os.path.exists', 'os.path.exists', (['path_save_figs'], {}), '(path_save_figs)\n', (58387, 58403), False, 'import os\n'), ((58417, 58444), 'os.makedirs', 'os.makedirs', (['path_save_figs'], {}), '(path_save_figs)\n', (58428, 58444), False, 'import os\n'), ((58952, 58985), 'os.path.exists', 'os.path.exists', (['path_save_results'], {}), '(path_save_results)\n', (58966, 58985), False, 'import os\n'), ((58999, 59029), 'os.makedirs', 'os.makedirs', (['path_save_results'], {}), '(path_save_results)\n', (59010, 59029), False, 'import os\n'), ((60506, 60516), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (60514, 60516), True, 'import matplotlib.pyplot as plt\n'), ((61371, 61404), 'os.path.exists', 'os.path.exists', (['path_save_results'], {}), '(path_save_results)\n', (61385, 61404), False, 'import os\n'), ((61418, 61448), 'os.makedirs', 'os.makedirs', (['path_save_results'], {}), '(path_save_results)\n', (61429, 61448), False, 'import os\n'), ((61776, 61806), 'os.path.exists', 'os.path.exists', (['path_save_figs'], {}), '(path_save_figs)\n', (61790, 61806), False, 'import os\n'), ((61820, 61847), 'os.makedirs', 'os.makedirs', (['path_save_figs'], {}), '(path_save_figs)\n', (61831, 61847), False, 'import os\n'), ((62136, 62150), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (62148, 62150), True, 'import matplotlib.pyplot as plt\n'), ((62748, 62766), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (62764, 62766), True, 'import matplotlib.pyplot as plt\n'), ((62947, 62958), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (62956, 62958), True, 'import matplotlib.pyplot as plt\n'), ((63762, 63795), 'os.path.exists', 'os.path.exists', (['path_save_results'], {}), '(path_save_results)\n', (63776, 63795), False, 'import os\n'), ((63809, 63839), 'os.makedirs', 'os.makedirs', (['path_save_results'], {}), '(path_save_results)\n', (63820, 63839), False, 'import os\n'), ((64157, 64187), 'os.path.exists', 'os.path.exists', (['path_save_figs'], {}), '(path_save_figs)\n', (64171, 64187), False, 'import os\n'), ((64201, 64228), 'os.makedirs', 'os.makedirs', (['path_save_figs'], {}), '(path_save_figs)\n', (64212, 64228), False, 'import os\n'), ((64415, 64429), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (64427, 64429), True, 'import matplotlib.pyplot as plt\n'), ((64873, 64891), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (64889, 64891), True, 'import matplotlib.pyplot as plt\n'), ((65070, 65081), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (65079, 65081), True, 'import matplotlib.pyplot as plt\n'), ((66013, 66046), 'os.path.exists', 'os.path.exists', (['path_save_results'], {}), '(path_save_results)\n', (66027, 66046), False, 'import os\n'), ((66060, 66090), 'os.makedirs', 'os.makedirs', (['path_save_results'], {}), '(path_save_results)\n', (66071, 66090), False, 'import os\n'), ((66428, 66458), 'os.path.exists', 'os.path.exists', (['path_save_figs'], {}), '(path_save_figs)\n', (66442, 66458), False, 'import os\n'), ((66472, 66499), 'os.makedirs', 'os.makedirs', (['path_save_figs'], {}), '(path_save_figs)\n', (66483, 66499), False, 'import os\n'), ((66727, 66741), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (66739, 66741), True, 'import matplotlib.pyplot as plt\n'), ((67226, 67244), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (67242, 67244), True, 'import matplotlib.pyplot as plt\n'), ((67427, 67438), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (67436, 67438), True, 'import matplotlib.pyplot as plt\n'), ((68502, 68535), 'os.path.exists', 'os.path.exists', (['path_save_results'], {}), '(path_save_results)\n', (68516, 68535), False, 'import os\n'), ((68549, 68579), 'os.makedirs', 'os.makedirs', (['path_save_results'], {}), '(path_save_results)\n', (68560, 68579), False, 'import os\n'), ((68909, 68939), 'os.path.exists', 'os.path.exists', (['path_save_figs'], {}), '(path_save_figs)\n', (68923, 68939), False, 'import os\n'), ((68953, 68980), 'os.makedirs', 'os.makedirs', (['path_save_figs'], {}), '(path_save_figs)\n', (68964, 68980), False, 'import os\n'), ((69249, 69263), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (69261, 69263), True, 'import matplotlib.pyplot as plt\n'), ((69743, 69761), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (69759, 69761), True, 'import matplotlib.pyplot as plt\n'), ((69942, 69953), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (69951, 69953), True, 'import matplotlib.pyplot as plt\n'), ((70731, 70764), 'os.path.exists', 'os.path.exists', (['path_save_results'], {}), '(path_save_results)\n', (70745, 70764), False, 'import os\n'), ((70778, 70808), 'os.makedirs', 'os.makedirs', (['path_save_results'], {}), '(path_save_results)\n', (70789, 70808), False, 'import os\n'), ((71111, 71141), 'os.path.exists', 'os.path.exists', (['path_save_figs'], {}), '(path_save_figs)\n', (71125, 71141), False, 'import os\n'), ((71155, 71182), 'os.makedirs', 'os.makedirs', (['path_save_figs'], {}), '(path_save_figs)\n', (71166, 71182), False, 'import os\n'), ((71392, 71406), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (71404, 71406), True, 'import matplotlib.pyplot as plt\n'), ((71840, 71858), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (71856, 71858), True, 'import matplotlib.pyplot as plt\n'), ((72035, 72046), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (72044, 72046), True, 'import matplotlib.pyplot as plt\n'), ((73762, 73795), 'os.path.exists', 'os.path.exists', (['path_save_results'], {}), '(path_save_results)\n', (73776, 73795), False, 'import os\n'), ((73809, 73839), 'os.makedirs', 'os.makedirs', (['path_save_results'], {}), '(path_save_results)\n', (73820, 73839), False, 'import os\n'), ((74176, 74206), 'os.path.exists', 'os.path.exists', (['path_save_figs'], {}), '(path_save_figs)\n', (74190, 74206), False, 'import os\n'), ((74220, 74247), 'os.makedirs', 'os.makedirs', (['path_save_figs'], {}), '(path_save_figs)\n', (74231, 74247), False, 'import os\n'), ((75648, 75678), 'os.path.exists', 'os.path.exists', (['path_save_figs'], {}), '(path_save_figs)\n', (75662, 75678), False, 'import os\n'), ((75692, 75719), 'os.makedirs', 'os.makedirs', (['path_save_figs'], {}), '(path_save_figs)\n', (75703, 75719), False, 'import os\n'), ((77217, 77251), 'os.path.exists', 'os.path.exists', (['path_save_figs_pid'], {}), '(path_save_figs_pid)\n', (77231, 77251), False, 'import os\n'), ((77265, 77296), 'os.makedirs', 'os.makedirs', (['path_save_figs_pid'], {}), '(path_save_figs_pid)\n', (77276, 77296), False, 'import os\n'), ((77353, 77367), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (77365, 77367), True, 'import matplotlib.pyplot as plt\n'), ((78065, 78083), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (78081, 78083), True, 'import matplotlib.pyplot as plt\n'), ((78292, 78306), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (78301, 78306), True, 'import matplotlib.pyplot as plt\n'), ((78646, 78676), 'os.path.exists', 'os.path.exists', (['path_save_figs'], {}), '(path_save_figs)\n', (78660, 78676), False, 'import os\n'), ((78690, 78717), 'os.makedirs', 'os.makedirs', (['path_save_figs'], {}), '(path_save_figs)\n', (78701, 78717), False, 'import os\n'), ((81522, 81532), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (81530, 81532), True, 'import matplotlib.pyplot as plt\n'), ((82044, 82074), 'os.path.exists', 'os.path.exists', (['path_save_figs'], {}), '(path_save_figs)\n', (82058, 82074), False, 'import os\n'), ((82088, 82115), 'os.makedirs', 'os.makedirs', (['path_save_figs'], {}), '(path_save_figs)\n', (82099, 82115), False, 'import os\n'), ((83790, 83819), 'numpy.random.choice', 'np.random.choice', (['bin_pids', '(1)'], {}), '(bin_pids, 1)\n', (83806, 83819), True, 'import numpy as np\n'), ((4523, 4542), 'numpy.round', 'np.round', (['tilt_x', '(3)'], {}), '(tilt_x, 3)\n', (4531, 4542), True, 'import numpy as np\n'), ((4588, 4607), 'numpy.round', 'np.round', (['tilt_y', '(3)'], {}), '(tilt_y, 3)\n', (4596, 4607), True, 'import numpy as np\n'), ((6649, 6709), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(size_x_inches / 1.5, size_y_inches * 2)'}), '(figsize=(size_x_inches / 1.5, size_y_inches * 2))\n', (6659, 6709), True, 'import matplotlib.pyplot as plt\n'), ((6731, 6770), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {'box_aspect': '(1.0, 1.0, 3.0)'}), '(fig, box_aspect=(1.0, 1.0, 3.0))\n', (6737, 6770), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((7594, 7617), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'visible': '(False)'}), '(visible=False)\n', (7602, 7617), True, 'import matplotlib.pyplot as plt\n'), ((7634, 7671), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'visible': '(True)', 'which': '"""major"""'}), "(visible=True, which='major')\n", (7642, 7671), True, 'import matplotlib.pyplot as plt\n'), ((8161, 8171), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8169, 8171), True, 'import matplotlib.pyplot as plt\n'), ((8188, 8199), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8197, 8199), True, 'import matplotlib.pyplot as plt\n'), ((17469, 17503), 'numpy.round', 'np.round', (['(measurement_depth / 5)', '(0)'], {}), '(measurement_depth / 5, 0)\n', (17477, 17503), True, 'import numpy as np\n'), ((22415, 22469), 'numpy.max', 'np.max', (['[zmin_contour_diameter, zmax_contour_diameter]'], {}), '([zmin_contour_diameter, zmax_contour_diameter])\n', (22421, 22469), True, 'import numpy as np\n'), ((23776, 23826), 'numpy.max', 'np.max', (['[zmin_gauss_diameter, zmax_gauss_diameter]'], {}), '([zmin_gauss_diameter, zmax_gauss_diameter])\n', (23782, 23826), True, 'import numpy as np\n'), ((46915, 46933), 'numpy.round', 'np.round', (['bin_r', '(0)'], {}), '(bin_r, 0)\n', (46923, 46933), True, 'import numpy as np\n'), ((47769, 47808), 'utils.functions.quadratic', 'functions.quadratic', (['dfbr.z_true', '*popt'], {}), '(dfbr.z_true, *popt)\n', (47788, 47808), False, 'from utils import bin, fit, functions, io, plotting\n'), ((48850, 48888), 'utils.functions.quadratic', 'functions.quadratic', (['fit_z_true', '*popt'], {}), '(fit_z_true, *popt)\n', (48869, 48888), False, 'from utils import bin, fit, functions, io, plotting\n'), ((62924, 62934), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (62932, 62934), True, 'import matplotlib.pyplot as plt\n'), ((65047, 65057), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (65055, 65057), True, 'import matplotlib.pyplot as plt\n'), ((67404, 67414), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (67412, 67414), True, 'import matplotlib.pyplot as plt\n'), ((69919, 69929), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (69927, 69929), True, 'import matplotlib.pyplot as plt\n'), ((72012, 72022), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (72020, 72022), True, 'import matplotlib.pyplot as plt\n'), ((78269, 78279), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (78277, 78279), True, 'import matplotlib.pyplot as plt\n'), ((36923, 36937), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (36935, 36937), True, 'import matplotlib.pyplot as plt\n'), ((37073, 37137), 'utils.fit.fit', 'fit.fit', (['dfp_id[xparam]', 'dfp_id[pc]'], {'fit_function': 'functions.line'}), '(dfp_id[xparam], dfp_id[pc], fit_function=functions.line)\n', (37080, 37137), False, 'from utils import bin, fit, functions, io, plotting\n'), ((38554, 38572), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (38570, 38572), True, 'import matplotlib.pyplot as plt\n'), ((38593, 38603), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (38601, 38603), True, 'import matplotlib.pyplot as plt\n'), ((39240, 39275), 'numpy.abs', 'np.abs', (["(dfc['z_true'] - z_test_mean)"], {}), "(dfc['z_true'] - z_test_mean)\n", (39246, 39275), True, 'import numpy as np\n'), ((45529, 45549), 'numpy.round', 'np.round', (['popt[2]', '(4)'], {}), '(popt[2], 4)\n', (45537, 45549), True, 'import numpy as np\n'), ((47710, 47730), 'numpy.round', 'np.round', (['popt[2]', '(4)'], {}), '(popt[2], 4)\n', (47718, 47730), True, 'import numpy as np\n'), ((52347, 52391), 'utils.functions.quadratic', 'functions.quadratic', (['fit_bin_ll', '*popts_r[i]'], {}), '(fit_bin_ll, *popts_r[i])\n', (52366, 52391), False, 'from utils import bin, fit, functions, io, plotting\n'), ((59822, 59841), 'numpy.round', 'np.round', (['bin_tl', '(1)'], {}), '(bin_tl, 1)\n', (59830, 59841), True, 'import numpy as np\n'), ((76094, 76138), 'numpy.random.choice', 'np.random.choice', (['all_pids', '(5)'], {'replace': '(False)'}), '(all_pids, 5, replace=False)\n', (76110, 76138), True, 'import numpy as np\n'), ((85108, 85131), 'numpy.floor', 'np.floor', (['(num_axes // 2)'], {}), '(num_axes // 2)\n', (85116, 85131), True, 'import numpy as np\n'), ((7015, 7055), 'numpy.ones_like', 'np.ones_like', (['dfpid.gauss_xc_corr.values'], {}), '(dfpid.gauss_xc_corr.values)\n', (7027, 7055), True, 'import numpy as np\n'), ((36758, 36791), 'scipy.stats.pearsonr', 'pearsonr', (['measurement', 'dependency'], {}), '(measurement, dependency)\n', (36766, 36791), False, 'from scipy.stats import pearsonr, spearmanr\n'), ((36853, 36887), 'scipy.stats.spearmanr', 'spearmanr', (['measurement', 'dependency'], {}), '(measurement, dependency)\n', (36862, 36887), False, 'from scipy.stats import pearsonr, spearmanr\n'), ((48307, 48327), 'numpy.floor', 'np.floor', (['(bins_r / 2)'], {}), '(bins_r / 2)\n', (48315, 48327), True, 'import numpy as np\n'), ((52161, 52179), 'numpy.round', 'np.round', (['bin_r', '(0)'], {}), '(bin_r, 0)\n', (52169, 52179), True, 'import numpy as np\n'), ((52463, 52509), 'utils.plotting.lighten_color', 'lighten_color', (['sci_color_cycle[i]'], {'amount': '(1.25)'}), '(sci_color_cycle[i], amount=1.25)\n', (52476, 52509), False, 'from utils.plotting import lighten_color\n'), ((62353, 62369), 'numpy.round', 'np.round', (['lpt', '(1)'], {}), '(lpt, 1)\n', (62361, 62369), True, 'import numpy as np\n'), ((45460, 45480), 'numpy.round', 'np.round', (['popt[1]', '(4)'], {}), '(popt[1], 4)\n', (45468, 45480), True, 'import numpy as np\n'), ((47631, 47651), 'numpy.round', 'np.round', (['popt[1]', '(4)'], {}), '(popt[1], 4)\n', (47639, 47651), True, 'import numpy as np\n'), ((37570, 37632), 'numpy.format_float_scientific', 'np.format_float_scientific', (['popt[0]'], {'precision': '(2)', 'exp_digits': '(2)'}), '(popt[0], precision=2, exp_digits=2)\n', (37596, 37632), True, 'import numpy as np\n'), ((45389, 45409), 'numpy.round', 'np.round', (['popt[0]', '(4)'], {}), '(popt[0], 4)\n', (45397, 45409), True, 'import numpy as np\n'), ((47550, 47570), 'numpy.round', 'np.round', (['popt[0]', '(4)'], {}), '(popt[0], 4)\n', (47558, 47570), True, 'import numpy as np\n')] |
#!/usr/bin/env python
'''This script plots tables as a pdf. The type of plot is determined
based on the columns in the input dataframe.'''
from __future__ import division
#Our standard Modules
import argparse
import numpy as np
import sys
import pandas as pd
import mpathic.utils as utils
import matplotlib.pyplot as plt
import weblogolib
import matplotlib as mpl
from scipy.optimize import newton
def draw_library(df,seq_dict):
'''Make a Zipf plot of number vs rank order.'''
fig, ax = plt.subplots()
df = df.sort(columns='ct',ascending=False)
ax.loglog(range(len(df['ct'])),df['ct'])
plt.title('Zipf Frequency Plot',fontsize=15)
plt.xlabel('Rank Order of Counts',fontsize=15)
plt.ylabel('Counts',fontsize=15)
return fig
def draw_enrichment_profile(df,seq_dict,inv_dict):
'''For proteins, make a plot of enrichment of each residue
at each position.'''
fig,ax = plt.subplots()
column_headers = ['le_' + str(inv_dict[i]) for i in range(len(seq_dict))]
#set special color for NaN values
masked_array = np.ma.array(
df[column_headers], mask=np.isnan(df[column_headers]))
cmap = mpl.cm.coolwarm
cmap.set_bad('w',1.)
plt.pcolor(masked_array, cmap=cmap)
ax.set_xticks(np.arange(len(seq_dict))+0.5, minor=False)
ax.set_yticks(np.arange(len(df['pos']))+0.5, minor=False)
plt.colorbar()
ax.set_xticklabels([inv_dict[z] for z in range(len(seq_dict))], minor=False)
ax.set_yticklabels(df['pos'], minor=False)
plt.title('Enrichment Profile',fontsize=20)
plt.xlabel('Amino Acid',fontsize=20)
plt.ylabel('Position',fontsize=20)
return fig
def draw_info_profile(df):
'''Draw bar chart of mutual information btw position identity and batch.'''
fig = plt.figure()
ax = fig.add_subplot(111)
df['pos'] = [a - .45 for a in df['pos']]
ax.bar(df['pos'],df['info'],color = 'b')
plt.xlabel('Base Position',fontsize = 20)
plt.ylabel('Mutual Information (bits)', fontsize = 20)
figtitle = 'Information Profile'
plt.title(figtitle, fontsize = 22)
return fig
def draw_mutrate(df):
'''Plot total mutation rate at each position'''
'''Draw bar chart of mutual information btw position identity and batch.'''
fig = plt.figure()
ax = fig.add_subplot(111)
df['pos'] = [a - .45 for a in df['pos']]
ax.bar(df['pos'],df['mut'],color = 'b')
plt.xlabel('Base Position',fontsize = 20)
plt.ylabel('Mutation Rate', fontsize = 20)
figtitle = 'Mutation Profile'
plt.title(figtitle, fontsize = 22)
return fig
def draw_matrix(df,seq_dict,inv_dict):
'''Draw heatmap of linear energy matrix.'''
column_headers = ['val_' + inv_dict[i] for i in range(len(seq_dict))]
data = np.transpose(np.array(df[column_headers]))
data = data - data.min(axis=0)
fig, ax1 = plt.subplots()
ax1.imshow(data,interpolation='nearest')
ax1.set_xlabel('Position w.r.t. transcription start site')
ax1.set_yticks(range(len(seq_dict)))
ax1.set_yticklabels([inv_dict[i] for i in range(len(seq_dict))])
indices = np.arange(0,len(df['pos']),5)
xtick_labels = list(df['pos'][indices])
ax1.set_xticks(indices)
ax1.set_xticklabels(xtick_labels)
return fig
def PSSM_info(x,matrix):
'''Calculate the mutual information contained in a PSSM'''
L_matrix = matrix.shape[1]
temp_pssm = utils.get_PSSM_from_weight_matrix(matrix,x)
info = utils.compute_PSSM_self_information(temp_pssm)
return info-L_matrix
def draw_logo_from_matrix(df,seq_dict,inv_dict,dicttype,x0=None):
'''Display energy matrix as a logo.'''
column_headers = ['val_' + inv_dict[i] for i in range(len(seq_dict))]
matrix = np.transpose(np.array(df[column_headers]))
matrix = utils.fix_matrix_gauge(matrix)
rename_headers = ['freq_' + inv_dict[i] for i in range(len(seq_dict))]
'''We need to supply a scalar to determine pssm total info,
we want total info content of approx 1 bit per basepair,
so we will solve info - L = 0'''
if not x0:
optimal_x = newton(PSSM_info,x0=19,args=(matrix,))
else:
optimal_x = x0
pssm_df = pd.DataFrame(
np.transpose(utils.get_PSSM_from_weight_matrix(matrix,optimal_x)),
columns=rename_headers)
pssm_df = pd.concat([df['pos'],pssm_df],axis=1)
myimage = draw_logo(pssm_df,seq_dict,inv_dict,dicttype)
return myimage
def draw_counts(df,seq_dict,inv_dict):
'''Draw heatmap of counts of each base at each position.'''
L = len(df['pos'])
column_headers = ['ct_' + inv_dict[i] for i in range(len(seq_dict))]
counts_arr = np.transpose(np.array(df[column_headers]))
fig, ax = plt.subplots()
plt.pcolor(counts_arr,cmap=plt.cm.coolwarm)
indices = np.arange(0,len(df['pos']),5)
xtick_labels = list(df['pos'][indices])
ax.set_xticks(indices)
ax.set_xticklabels(xtick_labels)
ax.set_yticks(np.arange(len(seq_dict))+0.5, minor=False)
ax.set_yticklabels([inv_dict[i] for i in range(len(seq_dict))], minor=False)
plt.title('Mutation Profile',fontsize=20)
plt.xlabel('Position',fontsize=20)
plt.ylabel('Counts',fontsize=20)
return fig
def draw_logo(df,seq_dict,inv_dict,dicttype):
'''Draw logo of sequences.'''
#Set Logo options
'''stack width in points, not default size of 10.8, but set to this
in weblogo call below'''
stackwidth = 9.5
barheight = 5.5 # height of bars in points if using overlay
barspacing = 2.0 # spacing between bars in points if using overlay
'''ratio of stack height:width, doesn't count part going
over maximum value of 1'''
stackaspectratio = 4.4
ymax = 1.0
logo_options = weblogolib.LogoOptions()
logo_options.fineprint = None
#logo_options.stacks_per_line = nperline
logo_options.stack_aspect_ratio = stackaspectratio
logo_options.show_errorbars = True
logo_options.errorbar_fraction = .75
logo_options.errorbar_gray = .9
logo_options.errorbar_width_fraction = .9
logo_options.stack_width = stackwidth
#Command to uncomment if you want each column to have height = 1
#logo_options.unit_name = 'probability'
logo_options.show_yaxis = False
#logo_options.yaxis_scale = ymax
#for dna
if dicttype == 'dna':
al = weblogolib.unambiguous_dna_alphabet
column_headers = ['freq_' + inv_dict[i] for i in range(len(seq_dict))]
counts_arr = np.array(df[column_headers])
data = weblogolib.LogoData.from_counts(al,counts_arr)
colormapping = {}
colormapping['A'] = '#008000'
colormapping['T'] = '#FF0000'
colormapping['C'] = '#0000FF'
colormapping['G'] = '#FFA500'
color_scheme = weblogolib.colorscheme.ColorScheme()
for x in [inv_dict[i] for i in range(len(seq_dict))]:
if hasattr(color_scheme, 'rules'):
color_scheme.rules.append(weblogolib.colorscheme.SymbolColor(x, colormapping[x], "'%s'" % x))
else:
# this part is needed for weblogo 3.4
color_scheme.groups.append(
weblogolib.colorscheme.ColorGroup(x, colormapping[x], "'%s'" % x))
logo_options.color_scheme = color_scheme
#for protein
if dicttype == 'protein':
al = weblogolib.unambiguous_protein_alphabet
column_headers = ['freq_' + inv_dict[i] for i in range(len(seq_dict))]
counts_arr = np.array(df[column_headers])
data = weblogolib.LogoData.from_counts(al,counts_arr)
#for rna
if dicttype == 'rna':
al = weblogolib.unambiguous_rna_alphabet
column_headers = ['freq_' + inv_dict[i] for i in range(len(seq_dict))]
counts_arr = np.array(df[column_headers])
data = weblogolib.LogoData.from_counts(al,counts_arr)
#set logo format and output
myformat = weblogolib.LogoFormat(data,logo_options)
myimage = weblogolib.pdf_formatter(data,myformat)
return myimage
def draw_compare_predictiveinfo(df,title=None):
'''Draws a heat map of predictive info between Test data sets and models.
The values displayed will be normalized by the maximum values in each
column.'''
#Get row labels, then Remove lableing column
exprows = df['Training/Test']
try:
df.drop('Training/Test',axis=1,inplace=True)
except:
pass
normalization = df.max(axis=0)
normalized_df = df.div(normalization,axis=1)
expcolumns = df.columns
fig, ax = plt.subplots()
heatmap = ax.pcolor(normalized_df, cmap=plt.cm.coolwarm,vmax=1,vmin=0)
fig.set_figheight(10)
fig.set_figwidth(15)
cbar = plt.colorbar(heatmap)
#cbar.set_label('Mutual Information', rotation=270)
ax.set_xticks(np.arange(len(expcolumns))+0.5, minor=False)
ax.set_yticks(np.arange(len(exprows))+0.5, minor=False)
ax.set_xticklabels([expcolumns[z] for z in range(len(expcolumns))], minor=False)
ax.set_yticklabels([exprows[z] for z in range(len(exprows))], minor=False)
if title:
plt.title('Model Comparison ' + title,fontsize=20)
plt.xlabel('Test Data Set',fontsize=20)
plt.ylabel('Training Data Set',fontsize=20)
return fig
def main(df,dicttype,logo=False,title=None,x0=None):
seq_dict,inv_dict = utils.choose_dict(dicttype)
matrix_headers = ['val_' + inv_dict[i] for i in range(len(seq_dict))]
columns = df.columns
'''some functions can be output through plt.savefig, others must
be output via a write method'''
output_via_write = False
#Autodetect the type of draw function to use.
if {'ct','seq'}.issubset(columns):
myimage = draw_library(df,seq_dict)
elif (set(matrix_headers).issubset(columns) and not logo):
myimage = draw_matrix(df,seq_dict,inv_dict)
elif (set(matrix_headers).issubset(columns) and logo):
myimage = draw_logo_from_matrix(df,seq_dict,inv_dict,dicttype,x0=x0)
output_via_write = True
elif set(['freq_' + inv_dict[i] for i in range(len(seq_dict))]).issubset(columns):
myimage = draw_logo(df,seq_dict,inv_dict,dicttype)
output_via_write = True
elif set(['ct_' + inv_dict[i] for i in range(len(seq_dict))]).issubset(columns):
myimage = draw_counts(df,seq_dict,inv_dict)
elif {'pos','info'}.issubset(columns):
myimage = draw_info_profile(df)
elif {'pos','mut'}.issubset(columns):
myimage = draw_mutrate(df)
return myimage,output_via_write
# Define commandline wrapper
def wrapper(args):
dicttype = args.type
logo = args.logo
# Run funciton
if args.i:
df = pd.io.parsers.read_csv(args.i,delim_whitespace=True)
else:
df = pd.io.parsers.read_csv(sys.stdin,delim_whitespace=True)
myimage,output_via_write = main(df,dicttype,logo=args.logo,title=args.title,x0 = args.x)
if args.out:
outloc = open(args.out,'w')
else:
outloc = sys.stdout
if not output_via_write:
plt.savefig(outloc,format='pdf')
else:
outloc.write(myimage)
outloc.close()
# Connects argparse to wrapper
def add_subparser(subparsers):
p = subparsers.add_parser('draw')
p.add_argument('-w','--wtseq',default=None,help ='Wild Type Sequence')
p.add_argument('--logo',action='store_true',help='Draw matrix as logo')
p.add_argument('-i', '--i', default=None,help='''Input file, otherwise input
through the standard input.''')
p.add_argument('--title',default=None,type=str)
p.add_argument('-t', '--type', choices=['dna','rna','protein'], default='dna')
p.add_argument('-o', '--out', default=None)
p.add_argument('-x', '--x', type=int,default=None,help='''This parameter controls the total information
content of a sequence logo. Higher values will increase the relative heights of bases. If no value
is supplied the program will attempt to calculate one automatically.''')
p.set_defaults(func=wrapper)
| [
"matplotlib.pyplot.title",
"weblogolib.colorscheme.ColorGroup",
"weblogolib.colorscheme.ColorScheme",
"numpy.isnan",
"matplotlib.pyplot.figure",
"scipy.optimize.newton",
"weblogolib.colorscheme.SymbolColor",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.subplots",
"pandas.concat",
"mpathic.ut... | [((502, 516), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (514, 516), True, 'import matplotlib.pyplot as plt\n'), ((613, 658), 'matplotlib.pyplot.title', 'plt.title', (['"""Zipf Frequency Plot"""'], {'fontsize': '(15)'}), "('Zipf Frequency Plot', fontsize=15)\n", (622, 658), True, 'import matplotlib.pyplot as plt\n'), ((662, 709), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Rank Order of Counts"""'], {'fontsize': '(15)'}), "('Rank Order of Counts', fontsize=15)\n", (672, 709), True, 'import matplotlib.pyplot as plt\n'), ((713, 746), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Counts"""'], {'fontsize': '(15)'}), "('Counts', fontsize=15)\n", (723, 746), True, 'import matplotlib.pyplot as plt\n'), ((919, 933), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (931, 933), True, 'import matplotlib.pyplot as plt\n'), ((1206, 1241), 'matplotlib.pyplot.pcolor', 'plt.pcolor', (['masked_array'], {'cmap': 'cmap'}), '(masked_array, cmap=cmap)\n', (1216, 1241), True, 'import matplotlib.pyplot as plt\n'), ((1369, 1383), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (1381, 1383), True, 'import matplotlib.pyplot as plt\n'), ((1521, 1565), 'matplotlib.pyplot.title', 'plt.title', (['"""Enrichment Profile"""'], {'fontsize': '(20)'}), "('Enrichment Profile', fontsize=20)\n", (1530, 1565), True, 'import matplotlib.pyplot as plt\n'), ((1569, 1606), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Amino Acid"""'], {'fontsize': '(20)'}), "('Amino Acid', fontsize=20)\n", (1579, 1606), True, 'import matplotlib.pyplot as plt\n'), ((1610, 1645), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Position"""'], {'fontsize': '(20)'}), "('Position', fontsize=20)\n", (1620, 1645), True, 'import matplotlib.pyplot as plt\n'), ((1778, 1790), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1788, 1790), True, 'import matplotlib.pyplot as plt\n'), ((1915, 1955), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Base Position"""'], {'fontsize': '(20)'}), "('Base Position', fontsize=20)\n", (1925, 1955), True, 'import matplotlib.pyplot as plt\n'), ((1961, 2013), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Mutual Information (bits)"""'], {'fontsize': '(20)'}), "('Mutual Information (bits)', fontsize=20)\n", (1971, 2013), True, 'import matplotlib.pyplot as plt\n'), ((2057, 2089), 'matplotlib.pyplot.title', 'plt.title', (['figtitle'], {'fontsize': '(22)'}), '(figtitle, fontsize=22)\n', (2066, 2089), True, 'import matplotlib.pyplot as plt\n'), ((2275, 2287), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2285, 2287), True, 'import matplotlib.pyplot as plt\n'), ((2411, 2451), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Base Position"""'], {'fontsize': '(20)'}), "('Base Position', fontsize=20)\n", (2421, 2451), True, 'import matplotlib.pyplot as plt\n'), ((2457, 2497), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Mutation Rate"""'], {'fontsize': '(20)'}), "('Mutation Rate', fontsize=20)\n", (2467, 2497), True, 'import matplotlib.pyplot as plt\n'), ((2538, 2570), 'matplotlib.pyplot.title', 'plt.title', (['figtitle'], {'fontsize': '(22)'}), '(figtitle, fontsize=22)\n', (2547, 2570), True, 'import matplotlib.pyplot as plt\n'), ((2857, 2871), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2869, 2871), True, 'import matplotlib.pyplot as plt\n'), ((3402, 3446), 'mpathic.utils.get_PSSM_from_weight_matrix', 'utils.get_PSSM_from_weight_matrix', (['matrix', 'x'], {}), '(matrix, x)\n', (3435, 3446), True, 'import mpathic.utils as utils\n'), ((3457, 3503), 'mpathic.utils.compute_PSSM_self_information', 'utils.compute_PSSM_self_information', (['temp_pssm'], {}), '(temp_pssm)\n', (3492, 3503), True, 'import mpathic.utils as utils\n'), ((3782, 3812), 'mpathic.utils.fix_matrix_gauge', 'utils.fix_matrix_gauge', (['matrix'], {}), '(matrix)\n', (3804, 3812), True, 'import mpathic.utils as utils\n'), ((4317, 4356), 'pandas.concat', 'pd.concat', (["[df['pos'], pssm_df]"], {'axis': '(1)'}), "([df['pos'], pssm_df], axis=1)\n", (4326, 4356), True, 'import pandas as pd\n'), ((4722, 4736), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4734, 4736), True, 'import matplotlib.pyplot as plt\n'), ((4741, 4785), 'matplotlib.pyplot.pcolor', 'plt.pcolor', (['counts_arr'], {'cmap': 'plt.cm.coolwarm'}), '(counts_arr, cmap=plt.cm.coolwarm)\n', (4751, 4785), True, 'import matplotlib.pyplot as plt\n'), ((5083, 5125), 'matplotlib.pyplot.title', 'plt.title', (['"""Mutation Profile"""'], {'fontsize': '(20)'}), "('Mutation Profile', fontsize=20)\n", (5092, 5125), True, 'import matplotlib.pyplot as plt\n'), ((5129, 5164), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Position"""'], {'fontsize': '(20)'}), "('Position', fontsize=20)\n", (5139, 5164), True, 'import matplotlib.pyplot as plt\n'), ((5168, 5201), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Counts"""'], {'fontsize': '(20)'}), "('Counts', fontsize=20)\n", (5178, 5201), True, 'import matplotlib.pyplot as plt\n'), ((5747, 5771), 'weblogolib.LogoOptions', 'weblogolib.LogoOptions', ([], {}), '()\n', (5769, 5771), False, 'import weblogolib\n'), ((7963, 8004), 'weblogolib.LogoFormat', 'weblogolib.LogoFormat', (['data', 'logo_options'], {}), '(data, logo_options)\n', (7984, 8004), False, 'import weblogolib\n'), ((8018, 8058), 'weblogolib.pdf_formatter', 'weblogolib.pdf_formatter', (['data', 'myformat'], {}), '(data, myformat)\n', (8042, 8058), False, 'import weblogolib\n'), ((8610, 8624), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (8622, 8624), True, 'import matplotlib.pyplot as plt\n'), ((8762, 8783), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['heatmap'], {}), '(heatmap)\n', (8774, 8783), True, 'import matplotlib.pyplot as plt\n'), ((9204, 9244), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Test Data Set"""'], {'fontsize': '(20)'}), "('Test Data Set', fontsize=20)\n", (9214, 9244), True, 'import matplotlib.pyplot as plt\n'), ((9248, 9292), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Training Data Set"""'], {'fontsize': '(20)'}), "('Training Data Set', fontsize=20)\n", (9258, 9292), True, 'import matplotlib.pyplot as plt\n'), ((9386, 9413), 'mpathic.utils.choose_dict', 'utils.choose_dict', (['dicttype'], {}), '(dicttype)\n', (9403, 9413), True, 'import mpathic.utils as utils\n'), ((2777, 2805), 'numpy.array', 'np.array', (['df[column_headers]'], {}), '(df[column_headers])\n', (2785, 2805), True, 'import numpy as np\n'), ((3739, 3767), 'numpy.array', 'np.array', (['df[column_headers]'], {}), '(df[column_headers])\n', (3747, 3767), True, 'import numpy as np\n'), ((4096, 4136), 'scipy.optimize.newton', 'newton', (['PSSM_info'], {'x0': '(19)', 'args': '(matrix,)'}), '(PSSM_info, x0=19, args=(matrix,))\n', (4102, 4136), False, 'from scipy.optimize import newton\n'), ((4668, 4696), 'numpy.array', 'np.array', (['df[column_headers]'], {}), '(df[column_headers])\n', (4676, 4696), True, 'import numpy as np\n'), ((6491, 6519), 'numpy.array', 'np.array', (['df[column_headers]'], {}), '(df[column_headers])\n', (6499, 6519), True, 'import numpy as np\n'), ((6535, 6582), 'weblogolib.LogoData.from_counts', 'weblogolib.LogoData.from_counts', (['al', 'counts_arr'], {}), '(al, counts_arr)\n', (6566, 6582), False, 'import weblogolib\n'), ((6814, 6850), 'weblogolib.colorscheme.ColorScheme', 'weblogolib.colorscheme.ColorScheme', ([], {}), '()\n', (6848, 6850), False, 'import weblogolib\n'), ((7541, 7569), 'numpy.array', 'np.array', (['df[column_headers]'], {}), '(df[column_headers])\n', (7549, 7569), True, 'import numpy as np\n'), ((7585, 7632), 'weblogolib.LogoData.from_counts', 'weblogolib.LogoData.from_counts', (['al', 'counts_arr'], {}), '(al, counts_arr)\n', (7616, 7632), False, 'import weblogolib\n'), ((7824, 7852), 'numpy.array', 'np.array', (['df[column_headers]'], {}), '(df[column_headers])\n', (7832, 7852), True, 'import numpy as np\n'), ((7868, 7915), 'weblogolib.LogoData.from_counts', 'weblogolib.LogoData.from_counts', (['al', 'counts_arr'], {}), '(al, counts_arr)\n', (7899, 7915), False, 'import weblogolib\n'), ((9149, 9200), 'matplotlib.pyplot.title', 'plt.title', (["('Model Comparison ' + title)"], {'fontsize': '(20)'}), "('Model Comparison ' + title, fontsize=20)\n", (9158, 9200), True, 'import matplotlib.pyplot as plt\n'), ((10727, 10780), 'pandas.io.parsers.read_csv', 'pd.io.parsers.read_csv', (['args.i'], {'delim_whitespace': '(True)'}), '(args.i, delim_whitespace=True)\n', (10749, 10780), True, 'import pandas as pd\n'), ((10803, 10859), 'pandas.io.parsers.read_csv', 'pd.io.parsers.read_csv', (['sys.stdin'], {'delim_whitespace': '(True)'}), '(sys.stdin, delim_whitespace=True)\n', (10825, 10859), True, 'import pandas as pd\n'), ((11101, 11134), 'matplotlib.pyplot.savefig', 'plt.savefig', (['outloc'], {'format': '"""pdf"""'}), "(outloc, format='pdf')\n", (11112, 11134), True, 'import matplotlib.pyplot as plt\n'), ((1115, 1143), 'numpy.isnan', 'np.isnan', (['df[column_headers]'], {}), '(df[column_headers])\n', (1123, 1143), True, 'import numpy as np\n'), ((4217, 4269), 'mpathic.utils.get_PSSM_from_weight_matrix', 'utils.get_PSSM_from_weight_matrix', (['matrix', 'optimal_x'], {}), '(matrix, optimal_x)\n', (4250, 4269), True, 'import mpathic.utils as utils\n'), ((7010, 7076), 'weblogolib.colorscheme.SymbolColor', 'weblogolib.colorscheme.SymbolColor', (['x', 'colormapping[x]', '("\'%s\'" % x)'], {}), '(x, colormapping[x], "\'%s\'" % x)\n', (7044, 7076), False, 'import weblogolib\n'), ((7225, 7290), 'weblogolib.colorscheme.ColorGroup', 'weblogolib.colorscheme.ColorGroup', (['x', 'colormapping[x]', '("\'%s\'" % x)'], {}), '(x, colormapping[x], "\'%s\'" % x)\n', (7258, 7290), False, 'import weblogolib\n')] |
from __future__ import absolute_import
from typing import Union, Optional, List, Dict, Any, Tuple
import math
import numpy as np
import tensorflow as tf
from .factory import AbstractFactory, AbstractTensor, AbstractConstant, AbstractVariable, AbstractPlaceholder
from .shared import binarize, im2col
class PrimeTensor(AbstractTensor):
def __init__(self, value: Union[np.ndarray, tf.Tensor], factory: 'PrimeFactory') -> None:
self._factory = factory
self.modulus = factory.modulus
self.value = value
def to_native(self) -> Union[tf.Tensor, np.ndarray]:
return self.value
def to_bits(self, factory: Optional[AbstractFactory] = None) -> 'PrimeTensor':
factory = factory or self.factory
bitsize = math.ceil(math.log2(self.modulus))
return factory.tensor(binarize(self.value % self.modulus, bitsize))
def __getitem__(self, slice: Any) -> Union[tf.Tensor, np.ndarray]:
return self.factory.tensor(self.value[slice])
def __repr__(self) -> str:
return 'PrimeTensor(shape={}, modulus={})'.format(self.shape, self.modulus)
@property
def shape(self) -> Union[Tuple[int, ...], tf.TensorShape]:
return self.value.shape
@property
def factory(self) -> AbstractFactory:
return self._factory
def __add__(self, other) -> 'PrimeTensor':
return self.add(other)
def __sub__(self, other) -> 'PrimeTensor':
return self.sub(other)
def __mul__(self, other) -> 'PrimeTensor':
return self.mul(other)
def __mod__(self, k: int) -> 'PrimeTensor':
return self.mod(k)
def add(self, other) -> 'PrimeTensor':
x, y = _lift(self, other)
return self.factory.tensor((x.value + y.value) % self.modulus)
def sub(self, other) -> 'PrimeTensor':
x, y = _lift(self, other)
return self.factory.tensor((x.value - y.value) % self.modulus)
def mul(self, other) -> 'PrimeTensor':
x, y = _lift(self, other)
return self.factory.tensor((x.value * y.value) % self.modulus)
def negative(self) -> 'PrimeTensor':
return self.mul(-1)
def matmul(self, other) -> 'PrimeTensor':
x, y = _lift(self, other)
return self.factory.tensor(tf.matmul(x.value, y.value) % self.modulus)
def im2col(self, h_filter, w_filter, padding, strides) -> 'PrimeTensor':
return self.factory.tensor(im2col(self.value, h_filter, w_filter, padding, strides))
def conv2d(self, other, strides, padding='SAME') -> 'PrimeTensor':
raise NotImplementedError()
def mod(self, k: int) -> 'PrimeTensor':
return self.factory.tensor((self.value % k) % self.modulus)
def transpose(self, perm: Union[List[int], Tuple[int]]) -> 'PrimeTensor':
return self.factory.tensor(tf.transpose(self.value, perm))
def strided_slice(self, args: Any, kwargs: Any) -> 'PrimeTensor':
return self.factory.tensor(tf.strided_slice(self.value, *args, **kwargs))
def split(self, num_split: int, axis: int=0) -> List['PrimeTensor']:
values = tf.split(self.value, num_split, axis=axis)
return [self.factory.tensor(value) for value in values]
def reshape(self, axes: Union[tf.Tensor, List[int]]) -> 'PrimeTensor':
return self.factory.tensor(tf.reshape(self.value, axes))
def expand_dims(self, axis: int) -> 'PrimeTensor':
return self.factory.tensor(tf.expand_dims(self.value, axis))
def reduce_sum(self, axis, keepdims) -> 'PrimeTensor':
return self.factory.tensor(tf.reduce_sum(self.value, axis, keepdims) % self.modulus)
def sum(self, axis, keepdims) -> 'PrimeTensor':
return self.reduce_sum(axis, keepdims)
def cumsum(self, axis, exclusive, reverse) -> 'PrimeTensor':
return self.factory.tensor(
tf.cumsum(self.value, axis=axis, exclusive=exclusive, reverse=reverse) % self.modulus
)
def equal_zero(self, out_dtype: Optional[AbstractFactory]=None) -> 'PrimeTensor':
out_dtype = out_dtype or self.factory
return out_dtype.tensor(tf.cast(tf.equal(self.value, 0), dtype=out_dtype.native_type))
def cast(self, factory):
return factory.tensor(self.value)
def _lift(x, y) -> Tuple[PrimeTensor, PrimeTensor]:
if isinstance(x, PrimeTensor) and isinstance(y, PrimeTensor):
assert x.modulus == y.modulus, "Incompatible moduli: {} and {}".format(x.modulus, y.modulus)
return x, y
if isinstance(x, PrimeTensor) and isinstance(y, int):
return x, x.factory.tensor(np.array([y]))
if isinstance(x, int) and isinstance(y, PrimeTensor):
return y.factory.tensor(np.array([x])), y
raise TypeError("Don't know how to lift {} {}".format(type(x), type(y)))
class PrimeConstant(PrimeTensor, AbstractConstant):
def __init__(self, value: Union[tf.Tensor, np.ndarray], factory) -> None:
v = tf.constant(value, dtype=factory.native_type)
super(PrimeConstant, self).__init__(v, factory)
def __repr__(self) -> str:
return 'PrimeConstant({})'.format(self.shape)
class PrimePlaceholder(PrimeTensor, AbstractPlaceholder):
def __init__(self, shape: List[int], factory) -> None:
placeholder = tf.placeholder(factory.native_type, shape=shape)
super(PrimePlaceholder, self).__init__(placeholder, factory)
self.placeholder = placeholder
def __repr__(self) -> str:
return 'PrimePlaceholder({})'.format(self.shape)
def feed_from_native(self, value: np.ndarray) -> Dict[tf.Tensor, np.ndarray]:
assert isinstance(value, np.ndarray), type(value)
return self.feed_from_same(self.factory.tensor(value))
def feed_from_same(self, value: PrimeTensor) -> Dict[tf.Tensor, np.ndarray]:
assert isinstance(value, PrimeTensor), type(value)
return {
self.placeholder: value.value
}
class PrimeVariable(PrimeTensor, AbstractVariable):
def __init__(self, initial_value: Union[tf.Tensor, np.ndarray], factory) -> None:
self.variable = tf.Variable(initial_value, dtype=factory.native_type, trainable=False)
self.initializer = self.variable.initializer
super(PrimeVariable, self).__init__(self.variable.read_value(), factory)
def __repr__(self) -> str:
return 'PrimeVariable({})'.format(self.shape)
def assign_from_native(self, value: np.ndarray) -> tf.Operation:
assert isinstance(value, np.ndarray), type(value)
return self.assign_from_same(self.factory.tensor(value))
def assign_from_same(self, value: PrimeTensor) -> tf.Operation:
assert isinstance(value, (PrimeTensor,)), type(value)
return tf.assign(self.variable, value.value).op
class PrimeFactory(AbstractFactory):
def __init__(self, modulus, native_type=tf.int32):
self._modulus = modulus
self.native_type = native_type
@property
def modulus(self):
return self._modulus
def sample_uniform(self, shape: Union[Tuple[int, ...], tf.TensorShape], minval: Optional[int] = 0) -> PrimeTensor:
value = tf.random_uniform(shape=shape, dtype=self.native_type, minval=minval, maxval=self.modulus)
return PrimeTensor(value, self)
def sample_bounded(self, shape: List[int], bitlength: int) -> PrimeTensor:
maxval = 2 ** bitlength
assert self.modulus > maxval
value = tf.random_uniform(shape=shape, dtype=self.native_type, minval=0, maxval=maxval)
return PrimeTensor(value, self)
def stack(self, xs: List[PrimeTensor], axis: int = 0) -> PrimeTensor:
assert all(isinstance(x, PrimeTensor) for x in xs)
value = tf.stack([x.value for x in xs], axis=axis)
return PrimeTensor(value, self)
def concat(self, xs: List[PrimeTensor], axis: int = 0) -> PrimeTensor:
assert all(isinstance(x, PrimeTensor) for x in xs)
value = tf.concat([v.value for v in xs], axis=axis)
return PrimeTensor(value, self)
def tensor(self, value) -> PrimeTensor:
if isinstance(value, (tf.Tensor, np.ndarray)):
return PrimeTensor(value, self)
if isinstance(value, PrimeTensor):
assert value.modulus == self.modulus, "Incompatible modulus: {}, (expected {})".format(value.modulus, self.modulus)
return PrimeTensor(value.value, self)
raise TypeError("Don't know how to handle {}".format(type(value)))
def constant(self, value) -> PrimeConstant:
if isinstance(value, (tf.Tensor, np.ndarray)):
return PrimeConstant(value, self)
if isinstance(value, PrimeTensor):
assert value.modulus == self.modulus, "Incompatible modulus: {}, (expected {})".format(value.modulus, self.modulus)
return PrimeConstant(value.value, self)
raise TypeError("Don't know how to handle {}".format(type(value)))
def variable(self, initial_value) -> PrimeVariable:
if isinstance(initial_value, (tf.Tensor, np.ndarray)):
return PrimeVariable(initial_value, self)
if isinstance(initial_value, PrimeTensor):
assert initial_value.modulus == self.modulus, "Incompatible modulus: {}, (expected {})".format(initial_value.modulus, self.modulus)
return PrimeVariable(initial_value.value, self)
raise TypeError("Don't know how to handle {}".format(type(initial_value)))
def placeholder(self, shape: List[int]) -> PrimePlaceholder:
return PrimePlaceholder(shape, self)
| [
"tensorflow.random_uniform",
"math.log2",
"tensorflow.reduce_sum",
"tensorflow.cumsum",
"tensorflow.strided_slice",
"tensorflow.reshape",
"tensorflow.concat",
"tensorflow.constant",
"tensorflow.transpose",
"tensorflow.stack",
"tensorflow.placeholder",
"tensorflow.assign",
"tensorflow.Variabl... | [((3081, 3123), 'tensorflow.split', 'tf.split', (['self.value', 'num_split'], {'axis': 'axis'}), '(self.value, num_split, axis=axis)\n', (3089, 3123), True, 'import tensorflow as tf\n'), ((4900, 4945), 'tensorflow.constant', 'tf.constant', (['value'], {'dtype': 'factory.native_type'}), '(value, dtype=factory.native_type)\n', (4911, 4945), True, 'import tensorflow as tf\n'), ((5230, 5278), 'tensorflow.placeholder', 'tf.placeholder', (['factory.native_type'], {'shape': 'shape'}), '(factory.native_type, shape=shape)\n', (5244, 5278), True, 'import tensorflow as tf\n'), ((6055, 6125), 'tensorflow.Variable', 'tf.Variable', (['initial_value'], {'dtype': 'factory.native_type', 'trainable': '(False)'}), '(initial_value, dtype=factory.native_type, trainable=False)\n', (6066, 6125), True, 'import tensorflow as tf\n'), ((7095, 7189), 'tensorflow.random_uniform', 'tf.random_uniform', ([], {'shape': 'shape', 'dtype': 'self.native_type', 'minval': 'minval', 'maxval': 'self.modulus'}), '(shape=shape, dtype=self.native_type, minval=minval,\n maxval=self.modulus)\n', (7112, 7189), True, 'import tensorflow as tf\n'), ((7391, 7470), 'tensorflow.random_uniform', 'tf.random_uniform', ([], {'shape': 'shape', 'dtype': 'self.native_type', 'minval': '(0)', 'maxval': 'maxval'}), '(shape=shape, dtype=self.native_type, minval=0, maxval=maxval)\n', (7408, 7470), True, 'import tensorflow as tf\n'), ((7661, 7703), 'tensorflow.stack', 'tf.stack', (['[x.value for x in xs]'], {'axis': 'axis'}), '([x.value for x in xs], axis=axis)\n', (7669, 7703), True, 'import tensorflow as tf\n'), ((7895, 7938), 'tensorflow.concat', 'tf.concat', (['[v.value for v in xs]'], {'axis': 'axis'}), '([v.value for v in xs], axis=axis)\n', (7904, 7938), True, 'import tensorflow as tf\n'), ((770, 793), 'math.log2', 'math.log2', (['self.modulus'], {}), '(self.modulus)\n', (779, 793), False, 'import math\n'), ((2805, 2835), 'tensorflow.transpose', 'tf.transpose', (['self.value', 'perm'], {}), '(self.value, perm)\n', (2817, 2835), True, 'import tensorflow as tf\n'), ((2943, 2988), 'tensorflow.strided_slice', 'tf.strided_slice', (['self.value', '*args'], {}), '(self.value, *args, **kwargs)\n', (2959, 2988), True, 'import tensorflow as tf\n'), ((3299, 3327), 'tensorflow.reshape', 'tf.reshape', (['self.value', 'axes'], {}), '(self.value, axes)\n', (3309, 3327), True, 'import tensorflow as tf\n'), ((3420, 3452), 'tensorflow.expand_dims', 'tf.expand_dims', (['self.value', 'axis'], {}), '(self.value, axis)\n', (3434, 3452), True, 'import tensorflow as tf\n'), ((6685, 6722), 'tensorflow.assign', 'tf.assign', (['self.variable', 'value.value'], {}), '(self.variable, value.value)\n', (6694, 6722), True, 'import tensorflow as tf\n'), ((2255, 2282), 'tensorflow.matmul', 'tf.matmul', (['x.value', 'y.value'], {}), '(x.value, y.value)\n', (2264, 2282), True, 'import tensorflow as tf\n'), ((3549, 3590), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.value', 'axis', 'keepdims'], {}), '(self.value, axis, keepdims)\n', (3562, 3590), True, 'import tensorflow as tf\n'), ((3821, 3891), 'tensorflow.cumsum', 'tf.cumsum', (['self.value'], {'axis': 'axis', 'exclusive': 'exclusive', 'reverse': 'reverse'}), '(self.value, axis=axis, exclusive=exclusive, reverse=reverse)\n', (3830, 3891), True, 'import tensorflow as tf\n'), ((4090, 4113), 'tensorflow.equal', 'tf.equal', (['self.value', '(0)'], {}), '(self.value, 0)\n', (4098, 4113), True, 'import tensorflow as tf\n'), ((4553, 4566), 'numpy.array', 'np.array', (['[y]'], {}), '([y])\n', (4561, 4566), True, 'import numpy as np\n'), ((4659, 4672), 'numpy.array', 'np.array', (['[x]'], {}), '([x])\n', (4667, 4672), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
# @Time : 23/10/18 9:10 PM
# @Author : <NAME>
# @FileName: fuzzy_control.py
# @Software: PyCharm
# @Github : https://github.com/hzm2016
"""
import matplotlib.pyplot as plt
import tensorflow as tf
import argparse
import numpy as np
import copy as cp
import skfuzzy.control as ctrl
from mpl_toolkits.mplot3d import Axes3D # Required for 3D plotting
class fuzzy_control(object):
# the input is six forces and moments
# the output is the hyperpapermeters [Kpz, kpx, kpy, krx, kry, krz]
def __init__(self,
low_input=np.array([-40, -40, -40, -5, -5, -5]),
high_input=np.array([40, 40, 0, 5, 5, 5]),
low_output=np.array([0., 0., 0., 0., 0., 0.]),
high_output=np.array([0.015, 0.015, 0.02, 0.015, 0.015, 0.015])):
self.low_input = low_input
self.high_input = high_input
self.low_output = low_output
self.high_output = high_output
self.num_input = 5
self.num_output = 3
self.num_mesh = 21
self.sim_kpx = self.build_fuzzy_kpx()
self.sim_kpy = self.build_fuzzy_kpy()
self.sim_kpz = self.build_fuzzy_kpz()
self.sim_krx = self.build_fuzzy_krx()
self.sim_kry = self.build_fuzzy_kry()
self.sim_krz = self.build_fuzzy_krz()
# self.sim_kpx, self.sim_kpy, self.sim_kpz, \
# self.sim_krx, self.sim_kry, self.sim_krz = self.build_fuzzy_system()
def get_output(self, force):
self.sim_kpx.input['fx'] = force[0]
self.sim_kpx.input['my'] = force[4]
self.sim_kpx.compute()
kpx = self.sim_kpx.output['kpx']
index_3 = force[1]
index_4 = force[3]
self.sim_kpy.input['fy'] = index_3
self.sim_kpy.input['mx'] = index_4
self.sim_kpy.compute()
kpy = self.sim_kpy.output['kpy']
index_5 = force[0]
index_6 = force[1]
self.sim_kpz.input['fx'] = index_5
self.sim_kpz.input['fy'] = index_6
self.sim_kpz.compute()
kpz = self.sim_kpz.output['kpz']
index_7 = force[1]
index_8 = force[3]
self.sim_krx.input['fy'] = index_7
self.sim_krx.input['mx'] = index_8
self.sim_krx.compute()
krx = self.sim_krx.output['krx']
index_9 = force[0]
index_10 = force[4]
self.sim_kry.input['fx'] = index_9
self.sim_kry.input['my'] = index_10
self.sim_kry.compute()
kry = self.sim_kry.output['kry']
index_11 = force[5]
index_12 = force[3]
self.sim_krz.input['mz'] = index_11
self.sim_krz.input['mx'] = index_12
self.sim_krz.compute()
krz = self.sim_krz.output['krz']
# index_1 = (force[0] - self.low_input[0])/(self.high_input[0] - self.low_input[0]) * self.num_mesh
# index_2 = (force[4] - self.low_input[4])/(self.high_input[4] - self.low_input[4]) * self.num_mesh
# self.sim_kpx.input['fx'] = index_1
# self.sim_kpx.input['my'] = index_2
# self.sim_kpx.compute()
# kpx = self.sim_kpx.output['kpx']
#
# index_3 = (force[1] - self.low_input[1]) / (self.high_input[1] - self.low_input[1]) * self.num_mesh
# index_4 = (force[3] - self.low_input[3]) / (self.high_input[3] - self.low_input[3]) * self.num_mesh
# self.sim_kry.input['fy'] = index_3
# self.sim_kry.input['mx'] = index_4
# self.sim_kry.compute()
# kpy = self.sim_kry.output['kpy']
#
# index_5 = (force[0] - self.low_input[0]) / (self.high_input[0] - self.low_input[0]) * self.num_mesh
# index_6 = (force[1] - self.low_input[1]) / (self.high_input[1] - self.low_input[1]) * self.num_mesh
# self.sim_kpz.input['fx'] = index_5
# self.sim_krz.input['fy'] = index_6
# self.sim_krz.compute()
# kpz = self.sim_krz.output['kpz']
#
# index_7 = (force[1] - self.low_input[1]) / (self.high_input[1] - self.low_input[1]) * self.num_mesh
# index_8 = (force[3] - self.low_input[3]) / (self.high_input[3] - self.low_input[3]) * self.num_mesh
# self.sim_krx.input['fy'] = index_7
# self.sim_krx.input['mx'] = index_8
# self.sim_krx.compute()
# krx = self.sim_krx.output['krx']
#
# index_9 = (force[0] - self.low_input[0]) / (self.high_input[0] - self.low_input[0]) * self.num_mesh
# index_10 = (force[4] - self.low_input[4]) / (self.high_input[4] - self.low_input[4]) * self.num_mesh
# self.sim_kry.input['fy'] = index_9
# self.sim_kry.input['mx'] = index_10
# self.sim_kry.compute()
# kry = self.sim_kry.output['kry']
#
# index_11 = (force[5] - self.low_input[5]) / (self.high_input[5] - self.low_input[5]) * self.num_mesh
# index_12 = (force[3] - self.low_input[3]) / (self.high_input[3] - self.low_input[3]) * self.num_mesh
# self.sim_krz.input['mz'] = index_11
# self.sim_krz.input['mx'] = index_12
# self.sim_krz.compute()
# krz = self.sim_krz.output['krx']
return [round(kpx, 5), round(kpy, 5), round(kpz, 5,), round(krx, 5), round(kry, 5), round(krz, 5)]
def plot_rules(self):
self.unsampled = []
for i in range(6):
self.unsampled.append(np.linspace(self.low_input[i], self.high_input[i], 21))
plt.figure(figsize=(15, 15), dpi=100)
plt.title('Fuzzy Rules')
plt.tight_layout(pad=3, w_pad=0.5, h_pad=1.0)
plt.subplots_adjust(left=0.065, bottom=0.1, right=0.995, top=0.9, wspace=0.2, hspace=0.2)
"""kpx"""
upsampled_x = self.unsampled[0]
upsampled_y = self.unsampled[4]
x, y = np.meshgrid(upsampled_x, upsampled_y)
z = np.zeros_like(x)
# Loop through the system 21*21 times to collect the control surface
for i in range(21):
for j in range(21):
self.sim_kpx.input['fx'] = x[i, j]
self.sim_kpx.input['my'] = y[i, j]
self.sim_kpx.compute()
z[i, j] = self.sim_kpx.output['kpx']
ax = plt.subplot(231, projection='3d')
# ax = fig.add_subplot(231, projection='3d')
surf = ax.plot_surface(x, y, z, rstride=1, cstride=1, cmap='viridis',
linewidth=0.4, antialiased=True)
ax.view_init(45, 200)
"""kpy"""
upsampled_x = self.unsampled[1]
upsampled_y = self.unsampled[3]
x, y = np.meshgrid(upsampled_x, upsampled_y)
z = np.zeros_like(x)
# Loop through the system 21*21 times to collect the control surface
for i in range(21):
for j in range(21):
self.sim_kpy.input['fy'] = x[i, j]
self.sim_kpy.input['mx'] = y[i, j]
self.sim_kpy.compute()
z[i, j] = self.sim_kpy.output['kpy']
ax = plt.subplot(232, projection='3d')
# ax = fig.add_subplot(232, projection='3d')
surf = ax.plot_surface(x, y, z, rstride=1, cstride=1, cmap='viridis',
linewidth=0.4, antialiased=True)
ax.view_init(20, 200)
"""kpz"""
upsampled_x = self.unsampled[0]
upsampled_y = self.unsampled[1]
x, y = np.meshgrid(upsampled_x, upsampled_y)
z = np.zeros_like(x)
# Loop through the system 21*21 times to collect the control surface
for i in range(21):
for j in range(21):
self.sim_kpz.input['fx'] = x[i, j]
self.sim_kpz.input['fy'] = y[i, j]
self.sim_kpz.compute()
z[i, j] = self.sim_kpz.output['kpz']
ax = plt.subplot(233, projection='3d')
# ax = fig.add_subplot(233, projection='3d')
surf = ax.plot_surface(x, y, z, rstride=1, cstride=1, cmap='viridis',
linewidth=0.4, antialiased=True)
ax.view_init(20, 200)
"""krx"""
upsampled_x = self.unsampled[1]
upsampled_y = self.unsampled[3]
x, y = np.meshgrid(upsampled_x, upsampled_y)
z = np.zeros_like(x)
# Loop through the system 21*21 times to collect the control surface
for i in range(21):
for j in range(21):
self.sim_krx.input['fy'] = x[i, j]
self.sim_krx.input['mx'] = y[i, j]
self.sim_krx.compute()
z[i, j] = self.sim_krx.output['krx']
ax = plt.subplot(234, projection='3d')
# ax = fig.add_subplot(234, projection='3d')
surf = ax.plot_surface(x, y, z, rstride=1, cstride=1, cmap='viridis',
linewidth=0.4, antialiased=True)
ax.view_init(20, 200)
"""kry"""
upsampled_x = self.unsampled[1]
upsampled_y = self.unsampled[3]
x, y = np.meshgrid(upsampled_x, upsampled_y)
z = np.zeros_like(x)
# Loop through the system 21*21 times to collect the control surface
for i in range(21):
for j in range(21):
self.sim_kry.input['fx'] = x[i, j]
self.sim_kry.input['my'] = y[i, j]
self.sim_kry.compute()
z[i, j] = self.sim_kry.output['kry']
ax = plt.subplot(235, projection='3d')
# ax = fig.add_subplot(235, projection='3d')
surf = ax.plot_surface(x, y, z, rstride=1, cstride=1, cmap='viridis',
linewidth=0.4, antialiased=True)
ax.view_init(20, 200)
"""krz"""
upsampled_x = self.unsampled[3]
upsampled_y = self.unsampled[5]
x, y = np.meshgrid(upsampled_x, upsampled_y)
z = np.zeros_like(x)
for i in range(21):
for j in range(21):
self.sim_krz.input['mx'] = x[i, j]
self.sim_krz.input['mz'] = y[i, j]
self.sim_krz.compute()
z[i, j] = self.sim_krz.output['krz']
ax = plt.subplot(236, projection='3d')
# ax = fig.add_subplot(236, projection='3d')
surf = ax.plot_surface(x, y, z, rstride=1, cstride=1, cmap='viridis',
linewidth=0.4, antialiased=True)
ax.view_init(20, 200)
plt.show()
def build_fuzzy_kpx(self):
fx_universe = np.linspace(self.low_input[0], self.high_input[0], self.num_input)
my_universe = np.linspace(self.low_input[4], self.high_input[4], self.num_input)
fx = ctrl.Antecedent(fx_universe, 'fx')
my = ctrl.Antecedent(my_universe, 'my')
input_names = ['nb', 'ns', 'ze', 'ps', 'pb']
fx.automf(names=input_names)
my.automf(names=input_names)
kpx_universe = np.linspace(self.low_output[0], self.high_output[0], self.num_output)
kpx = ctrl.Consequent(kpx_universe, 'kpx')
output_names_3 = ['nb', 'ze', 'pb']
kpx.automf(names=output_names_3)
rule_kpx_0 = ctrl.Rule(antecedent=((fx['nb'] & my['ze']) |
(fx['nb'] & my['ns']) |
(fx['pb'] & my['ze']) |
(fx['pb'] & my['ps'])),
consequent=kpx['pb'], label='rule kpx pb')
rule_kpx_1 = ctrl.Rule(antecedent=((fx['ns'] & my['ze']) |
(fx['ns'] & my['ns']) |
(fx['ns'] & my['nb']) |
(fx['nb'] & my['nb']) |
(fx['pb'] & my['pb']) |
(fx['ps'] & my['ps']) |
(fx['ps'] & my['pb']) |
(fx['ps'] & my['ze'])),
consequent=kpx['ze'], label='rule kpx ze')
rule_kpx_2 = ctrl.Rule(antecedent=((fx['ze'] & my['ze']) |
(fx['ze'] & my['ps']) |
(fx['ze'] & my['ns']) |
(fx['ze'] & my['pb']) |
(fx['ze'] & my['nb']) |
(fx['nb'] & my['ps']) |
(fx['nb'] & my['pb']) |
(fx['pb'] & my['ns']) |
(fx['pb'] & my['nb']) |
(fx['ns'] & my['ps']) |
(fx['ns'] & my['pb']) |
(fx['ps'] & my['nb']) |
(fx['ps'] & my['ns'])),
consequent=kpx['nb'], label='rule kpx nb')
system_kpx = ctrl.ControlSystem(rules=[rule_kpx_2, rule_kpx_1, rule_kpx_0])
sim_kpx = ctrl.ControlSystemSimulation(system_kpx, flush_after_run=self.num_mesh * self.num_mesh + 1)
"""kpx"""
# upsampled_x = self.unsampled[0]
# upsampled_y = self.unsampled[4]
# x, y = np.meshgrid(upsampled_x, upsampled_y)
# z = np.zeros_like(x)
#
# # Loop through the system 21*21 times to collect the control surface
# for i in range(21):
# for j in range(21):
# sim_kpx.input['fx'] = x[i, j]
# sim_kpx.input['my'] = y[i, j]
# sim_kpx.compute()
# z[i, j] = sim_kpx.output['kpx']
#
# """ Plot the result in pretty 3D with alpha blending"""
# fig = plt.figure(figsize=(8, 8))
# ax = fig.add_subplot(111, projection='3d')
#
# surf = ax.plot_surface(x, y, z, rstride=1, cstride=1, cmap='viridis',
# linewidth=0.4, antialiased=True)
# plt.show()
return sim_kpx
def build_fuzzy_kpy(self):
fy_universe = np.linspace(self.low_input[1], self.high_input[1], self.num_input)
mx_universe = np.linspace(self.low_input[3], self.high_input[3], self.num_input)
fy = ctrl.Antecedent(fy_universe, 'fy')
mx = ctrl.Antecedent(mx_universe, 'mx')
input_names = ['nb', 'ns', 'ze', 'ps', 'pb']
fy.automf(names=input_names)
mx.automf(names=input_names)
kpy_universe = np.linspace(self.low_output[1], self.high_output[1], self.num_output)
kpy = ctrl.Consequent(kpy_universe, 'kpy')
output_names_3 = ['nb', 'ze', 'pb']
kpy.automf(names=output_names_3)
rule_kpy_0 = ctrl.Rule(antecedent=((fy['nb'] & mx['ns']) |
(fy['nb'] & mx['ze']) |
(fy['pb'] & mx['ze']) |
(fy['pb'] & mx['ps'])),
consequent=kpy['pb'], label='rule_kpy_pb')
rule_kpy_1 = ctrl.Rule(antecedent=((fy['ns'] & mx['ze']) |
(fy['ns'] & mx['ns']) |
(fy['ns'] & mx['nb']) |
(fy['ps'] & mx['ps']) |
(fy['ps'] & mx['pb']) |
(fy['ps'] & mx['ze']) |
(fy['nb'] & mx['nb']) |
(fy['pb'] & mx['pb'])),
consequent=kpy['ze'], label='rule_kpy_ze')
rule_kpy_2 = ctrl.Rule(antecedent=((fy['ze']) |
(fy['nb'] & mx['ps']) |
(fy['nb'] & mx['pb']) |
(fy['pb'] & mx['ns']) |
(fy['pb'] & mx['nb']) |
(fy['ns'] & mx['ps']) |
(fy['ns'] & mx['pb']) |
(fy['ps'] & mx['nb']) |
(fy['ps'] & mx['ns'])),
consequent=kpy['nb'], label='rule_kpy_nb')
system_kpy = ctrl.ControlSystem(rules=[rule_kpy_0, rule_kpy_1, rule_kpy_2])
sim_kpy = ctrl.ControlSystemSimulation(system_kpy, flush_after_run=self.num_mesh * self.num_mesh + 1)
"""kpx"""
# upsampled_x = self.unsampled[1]
# upsampled_y = self.unsampled[3]
# x, y = np.meshgrid(upsampled_x, upsampled_y)
# z = np.zeros_like(x)
#
# # Loop through the system 21*21 times to collect the control surface
# for i in range(21):
# for j in range(21):
# sim_kpy.input['fy'] = x[i, j]
# sim_kpy.input['mx'] = y[i, j]
# sim_kpy.compute()
# z[i, j] = sim_kpy.output['kpy']
#
# """ Plot the result in pretty 3D with alpha blending"""
# fig = plt.figure(figsize=(8, 8))
# ax = fig.add_subplot(111, projection='3d')
#
# surf = ax.plot_surface(x, y, z, rstride=1, cstride=1, cmap='viridis',
# linewidth=0.4, antialiased=True)
# plt.show()
return sim_kpy
def build_fuzzy_kpz(self):
fy_universe = np.linspace(self.low_input[1], self.high_input[1], self.num_input)
fx_universe = np.linspace(self.low_input[0], self.high_input[0], self.num_input)
fy = ctrl.Antecedent(fy_universe, 'fy')
fx = ctrl.Antecedent(fx_universe, 'fx')
input_names = ['nb', 'ns', 'ze', 'ps', 'pb']
fy.automf(names=input_names)
fx.automf(names=input_names)
kpz_universe = np.linspace(self.low_output[2], self.high_output[2], self.num_output)
kpz = ctrl.Consequent(kpz_universe, 'kpz')
output_names_3 = ['nb', 'ze', 'pb']
kpz.automf(names=output_names_3)
rule_kpz_0 = ctrl.Rule(antecedent=((fx['ze'] & fy['ze']) |
(fx['ze'] & fy['ns']) |
(fx['ns'] & fy['ze']) |
(fx['ze'] & fy['ps']) |
(fx['ps'] & fy['ze'])),
consequent=kpz['pb'], label='rule_kpz_pb')
rule_kpz_1 = ctrl.Rule(antecedent=((fx['ns'] & fy['ns']) |
(fx['ps'] & fy['ps']) |
(fx['ns'] & fy['ps']) |
(fx['ps'] & fy['ns'])),
consequent=kpz['ze'], label='rule_kpz_ze')
rule_kpz_2 = ctrl.Rule(antecedent=((fx['nb']) |
(fx['pb']) |
(fy['nb']) |
(fy['pb'])),
consequent=kpz['nb'], label='rule_kpz_nb')
system_kpz = ctrl.ControlSystem(rules=[rule_kpz_0, rule_kpz_1, rule_kpz_2])
sim_kpz = ctrl.ControlSystemSimulation(system_kpz, flush_after_run=self.num_mesh * self.num_mesh + 1)
"""kpx"""
# upsampled_x = self.unsampled[0]
# upsampled_y = self.unsampled[1]
# x, y = np.meshgrid(upsampled_x, upsampled_y)
# z = np.zeros_like(x)
# Loop through the system 21*21 times to collect the control surface
# for i in range(21):
# for j in range(21):
# sim_kpz.input['fx'] = x[i, j]
# sim_kpz.input['fy'] = y[i, j]
# sim_kpz.compute()
# z[i, j] = sim_kpz.output['kpz']
#
# """ Plot the result in pretty 3D with alpha blending"""
# fig = plt.figure(figsize=(8, 8))
# ax = fig.add_subplot(111, projection='3d')
#
# surf = ax.plot_surface(x, y, z, rstride=1, cstride=1, cmap='viridis',
# linewidth=0.4, antialiased=True)
# plt.show()
return sim_kpz
def build_fuzzy_krx(self):
fy_universe = np.linspace(self.low_input[1], self.high_input[1], self.num_input)
mx_universe = np.linspace(self.low_input[3], self.high_input[3], self.num_input)
fy = ctrl.Antecedent(fy_universe, 'fy')
mx = ctrl.Antecedent(mx_universe, 'mx')
input_names = ['nb', 'ns', 'ze', 'ps', 'pb']
fy.automf(names=input_names)
mx.automf(names=input_names)
krx_universe = np.linspace(self.low_output[3], self.high_output[3], 3)
krx = ctrl.Consequent(krx_universe, 'krx')
output_names_2 = ['nb', 'ze', 'pb']
krx.automf(names=output_names_2)
rule_krx_0 = ctrl.Rule(antecedent=((mx['nb'] & fy['ze']) |
(mx['nb'] & fy['ns']) |
(mx['pb'] & fy['ze']) |
(mx['pb'] & fy['ps'])),
consequent=krx['pb'], label='rule_krx_pb')
rule_krx_1 = ctrl.Rule(antecedent=((mx['ze']) |
(mx['ns']) |
(mx['ps']) |
(mx['nb'] & fy['nb']) |
(mx['nb'] & fy['ps']) |
(mx['nb'] & fy['pb']) |
(mx['pb'] & fy['pb']) |
(mx['pb'] & fy['ns']) |
(mx['pb'] & fy['nb'])),
consequent=krx['nb'], label='rule_krx_ze')
system_krx = ctrl.ControlSystem(rules=[rule_krx_0, rule_krx_1])
sim_krx = ctrl.ControlSystemSimulation(system_krx, flush_after_run=self.num_mesh * self.num_mesh + 1)
# upsampled_x = self.unsampled[1]
# upsampled_y = self.unsampled[3]
# x, y = np.meshgrid(upsampled_x, upsampled_y)
# z = np.zeros_like(x)
#
# # Loop through the system 21*21 times to collect the control surface
# for i in range(21):
# for j in range(21):
# sim_krx.input['fy'] = x[i, j]
# sim_krx.input['mx'] = y[i, j]
# sim_krx.compute()
# z[i, j] = sim_krx.output['krx']
#
# """ Plot the result in pretty 3D with alpha blending"""
# fig = plt.figure(figsize=(8, 8))
# ax = fig.add_subplot(111, projection='3d')
#
# surf = ax.plot_surface(x, y, z, rstride=1, cstride=1, cmap='viridis',
# linewidth=0.4, antialiased=True)
# plt.show()
return sim_krx
def build_fuzzy_kry(self):
fx_universe = np.linspace(self.low_input[0], self.high_input[0], self.num_input)
my_universe = np.linspace(self.low_input[4], self.high_input[4], self.num_input)
fx = ctrl.Antecedent(fx_universe, 'fx')
my = ctrl.Antecedent(my_universe, 'my')
input_names = ['nb', 'ns', 'ze', 'ps', 'pb']
fx.automf(names=input_names)
my.automf(names=input_names)
kry_universe = np.linspace(self.low_output[4], self.high_output[4], 3)
kry = ctrl.Consequent(kry_universe, 'kry')
output_names_2 = ['nb', 'ze', 'pb']
kry.automf(names=output_names_2)
rule_kry_0 = ctrl.Rule(antecedent=((my['nb'] & fx['ze']) |
(my['nb'] & fx['ns']) |
(my['pb'] & fx['ze']) |
(my['pb'] & fx['ps'])),
consequent=kry['pb'], label='rule_kry_pb')
rule_kry_1 = ctrl.Rule(antecedent=((my['ze']) |
(my['ns']) |
(my['ps']) |
(my['nb'] & fx['nb']) |
(my['pb'] & fx['pb']) |
(my['nb'] & fx['ps']) |
(my['pb'] & fx['ns']) |
(my['nb'] & fx['pb']) |
(my['pb'] & fx['nb'])),
consequent=kry['nb'], label='rule_kry_nb')
system_kry = ctrl.ControlSystem(rules=[rule_kry_0, rule_kry_1])
sim_kry = ctrl.ControlSystemSimulation(system_kry, flush_after_run=self.num_mesh * self.num_mesh + 1)
# upsampled_x = self.unsampled[1]
# upsampled_y = self.unsampled[3]
# x, y = np.meshgrid(upsampled_x, upsampled_y)
# z = np.zeros_like(x)
#
# # Loop through the system 21*21 times to collect the control surface
# for i in range(21):
# for j in range(21):
# sim_kry.input['fx'] = x[i, j]
# sim_kry.input['my'] = y[i, j]
# sim_kry.compute()
# z[i, j] = sim_kry.output['kry']
#
# """ Plot the result in pretty 3D with alpha blending"""
# fig = plt.figure(figsize=(8, 8))
# ax = fig.add_subplot(111, projection='3d')
#
# surf = ax.plot_surface(x, y, z, rstride=1, cstride=1, cmap='viridis',
# linewidth=0.4, antialiased=True)
# plt.show()
return sim_kry
def build_fuzzy_krz(self):
mx_universe = np.linspace(self.low_input[3], self.high_input[3], self.num_input)
mz_universe = np.linspace(self.low_input[5], self.high_input[5], self.num_input)
mx = ctrl.Antecedent(mx_universe, 'mx')
mz = ctrl.Antecedent(mz_universe, 'mz')
input_names = ['nb', 'ns', 'ze', 'ps', 'pb']
mx.automf(names=input_names)
mz.automf(names=input_names)
krz_universe = np.linspace(self.low_output[5], self.high_output[5], 3)
krz = ctrl.Consequent(krz_universe, 'krz')
output_names_2 = ['nb', 'ze', 'pb']
krz.automf(names=output_names_2)
rule_krz_0 = ctrl.Rule(antecedent=((mz['nb'] & mx['ze']) |
(mz['nb'] & mx['ps']) |
(mz['nb'] & mx['ns']) |
(mz['pb'] & mx['ns']) |
(mz['pb'] & mx['ze']) |
(mz['pb'] & mx['ps'])),
consequent=krz['pb'], label='rule_krz_pb')
rule_krz_1 = ctrl.Rule(antecedent=((mz['ze']) |
(mz['ns']) |
(mz['ps']) |
(mz['nb'] & mx['nb']) |
(mz['nb'] & mx['pb']) |
(mz['pb'] & mx['pb']) |
(mz['pb'] & mx['nb'])),
consequent=krz['nb'], label='rule_krz_nb')
system_krz = ctrl.ControlSystem(rules=[rule_krz_0, rule_krz_1])
sim_krz = ctrl.ControlSystemSimulation(system_krz, flush_after_run=self.num_mesh * self.num_mesh + 1)
# upsampled_x = self.unsampled[3]
# upsampled_y = self.unsampled[5]
# x, y = np.meshgrid(upsampled_x, upsampled_y)
# z = np.zeros_like(x)
#
# for i in range(21):
# for j in range(21):
# sim_krz.input['mx'] = x[i, j]
# sim_krz.input['mz'] = y[i, j]
# sim_krz.compute()
# z[i, j] = sim_krz.output['krz']
#
# """ Plot the result in pretty 3D with alpha blending"""
# fig = plt.figure(figsize=(8, 8))
# ax = fig.add_subplot(111, projection='3d')
#
# surf = ax.plot_surface(x, y, z, rstride=1, cstride=1, cmap='viridis',
# linewidth=0.4, antialiased=True)
# plt.show()
return sim_krz
def build_fuzzy_system(self):
# Sparse universe makes calculations faster, without sacrifice accuracy.
# Only the critical points are included here; making it higher resolution is
# unnecessary.
"""============================================================="""
low_force = self.low_input
high_force = self.high_input
num_input = self.num_input
fx_universe = np.linspace(low_force[0], high_force[0], num_input)
fy_universe = np.linspace(low_force[1], high_force[1], num_input)
fz_universe = np.linspace(low_force[2], high_force[2], num_input)
mx_universe = np.linspace(low_force[3], low_force[3], num_input)
my_universe = np.linspace(low_force[4], low_force[4], num_input)
mz_universe = np.linspace(low_force[5], low_force[5], num_input)
"""Create the three fuzzy variables - two inputs, one output"""
fx = ctrl.Antecedent(fx_universe, 'fx')
fy = ctrl.Antecedent(fy_universe, 'fy')
fz = ctrl.Antecedent(fz_universe, 'fz')
mx = ctrl.Antecedent(mx_universe, 'mx')
my = ctrl.Antecedent(my_universe, 'my')
mz = ctrl.Antecedent(mz_universe, 'mz')
input_names = ['nb', 'ns', 'ze', 'ps', 'pb']
fx.automf(names=input_names)
fy.automf(names=input_names)
fz.automf(names=input_names)
mx.automf(names=input_names)
my.automf(names=input_names)
mz.automf(names=input_names)
"""============================================================="""
"""Create the outputs"""
kpx_universe = np.linspace(self.low_output[0], self.high_output[0], self.num_output)
kpy_universe = np.linspace(self.low_output[1], self.high_output[1], self.num_output)
kpz_universe = np.linspace(self.low_output[2], self.high_output[2], self.num_output)
krx_universe = np.linspace(self.low_output[3], self.high_output[3], 3)
kry_universe = np.linspace(self.low_output[4], self.high_output[4], 3)
krz_universe = np.linspace(self.low_output[5], self.high_output[5], 3)
kpx = ctrl.Consequent(kpx_universe, 'kpx')
kpy = ctrl.Consequent(kpy_universe, 'kpy')
kpz = ctrl.Consequent(kpz_universe, 'kpz')
krx = ctrl.Consequent(krx_universe, 'krx')
kry = ctrl.Consequent(kry_universe, 'kry')
krz = ctrl.Consequent(krz_universe, 'krz')
output_names_3 = ['nb', 'ze', 'pb']
# Here we use the convenience `automf` to populate the fuzzy variables with
# terms. The optional kwarg `names=` lets us specify the names of our Terms.
kpx.automf(names=output_names_3)
kpy.automf(names=output_names_3)
kpz.automf(names=output_names_3)
krx.automf(names=output_names_3)
kry.automf(names=output_names_3)
krz.automf(names=output_names_3)
# define the rules for the desired force fx and my
# ===============================================================
rule_kpx_0 = ctrl.Rule(antecedent=((fx['nb'] & my['ze']) |
(fx['nb'] & my['ns']) |
(fx['pb'] & my['ze']) |
(fx['pb'] & my['ps'])),
consequent=kpx['pb'], label='rule kpx pb')
rule_kpx_1 = ctrl.Rule(antecedent=((fx['ns'] & my['ze']) |
(fx['ns'] & my['ns']) |
(fx['ns'] & my['nb']) |
(fx['nb'] & my['nb']) |
(fx['pb'] & my['pb']) |
(fx['ps'] & my['ps']) |
(fx['ps'] & my['pb']) |
(fx['ps'] & my['ze'])),
consequent=kpx['ze'], label='rule kpx ze')
rule_kpx_2 = ctrl.Rule(antecedent=((fx['ze'] & my['ze']) |
(fx['ze'] & my['ps']) |
(fx['ze'] & my['ns']) |
(fx['ze'] & my['pb']) |
(fx['ze'] & my['nb']) |
(fx['nb'] & my['ps']) |
(fx['nb'] & my['pb']) |
(fx['pb'] & my['ns']) |
(fx['pb'] & my['nb']) |
(fx['ns'] & my['ps']) |
(fx['ns'] & my['pb']) |
(fx['ps'] & my['nb']) |
(fx['ps'] & my['ns'])),
consequent=kpx['nb'], label='rule kpx nb')
system_kpx = ctrl.ControlSystem(rules=[rule_kpx_2, rule_kpx_1, rule_kpx_0])
sim_kpx = ctrl.ControlSystemSimulation(system_kpx, flush_after_run=self.num_mesh * self.num_mesh + 1)
# define the rules for the desired force fy and mz
# ===============================================================
rule_kpy_0 = ctrl.Rule(antecedent=((fy['nb'] & mx['ns']) |
(fy['nb'] & mx['ze']) |
(fy['pb'] & mx['ze']) |
(fy['pb'] & mx['ps'])),
consequent=kpy['pb'], label='rule_kpy_pb')
rule_kpy_1 = ctrl.Rule(antecedent=((fy['ns'] & mx['ze']) |
(fy['ns'] & mx['ns']) |
(fy['ns'] & mx['nb']) |
(fy['ps'] & mx['ps']) |
(fy['ps'] & mx['pb']) |
(fy['ps'] & mx['ze']) |
(fy['nb'] & mx['nb']) |
(fy['pb'] & mx['pb'])),
consequent=kpy['ze'], label='rule_kpy_ze')
rule_kpy_2 = ctrl.Rule(antecedent=((fy['ze']) |
(fy['nb'] & mx['ps']) |
(fy['nb'] & mx['pb']) |
(fy['pb'] & mx['ns']) |
(fy['pb'] & mx['nb']) |
(fy['ns'] & mx['ps']) |
(fy['ns'] & mx['pb']) |
(fy['ps'] & mx['nb']) |
(fy['ps'] & mx['ns'])),
consequent=kpy['nb'], label='rule_kpy_nb')
system_kpy = ctrl.ControlSystem(rules=[rule_kpy_0, rule_kpy_1, rule_kpy_2])
sim_kpy = ctrl.ControlSystemSimulation(system_kpy, flush_after_run=self.num_mesh * self.num_mesh + 1)
# ===============================================================
rule_kpz_0 = ctrl.Rule(antecedent=((fx['ze'] & fy['ze']) |
(fx['ze'] & fy['ns']) |
(fx['ns'] & fy['ze']) |
(fx['ze'] & fy['ps']) |
(fx['ps'] & fy['ze'])),
consequent=kpz['pb'], label='rule_kpz_pb')
rule_kpz_1 = ctrl.Rule(antecedent=((fx['ns'] & fy['ns']) |
(fx['ps'] & fy['ps']) |
(fx['ns'] & fy['ps']) |
(fx['ps'] & fy['ns'])),
consequent=kpz['ze'], label='rule_kpz_ze')
rule_kpz_2 = ctrl.Rule(antecedent=((fx['nb']) |
(fx['pb']) |
(fy['nb']) |
(fy['pb'])),
consequent=kpz['nb'], label='rule_kpz_nb')
system_kpz = ctrl.ControlSystem(rules=[rule_kpz_0, rule_kpz_1, rule_kpz_2])
sim_kpz = ctrl.ControlSystemSimulation(system_kpz, flush_after_run=self.num_mesh * self.num_mesh + 1)
# ===============================================================
rule_krx_0 = ctrl.Rule(antecedent=((mx['nb'] & fy['ze']) |
(mx['nb'] & fy['ns']) |
(mx['pb'] & fy['ze']) |
(mx['pb'] & fy['ps'])),
consequent=krx['pb'], label='rule_krx_pb')
rule_krx_1 = ctrl.Rule(antecedent=((mx['ze']) |
(mx['ns']) |
(mx['ps']) |
(mx['nb'] & fy['nb']) |
(mx['nb'] & fy['ps']) |
(mx['nb'] & fy['pb']) |
(mx['pb'] & fy['pb']) |
(mx['pb'] & fy['ns']) |
(mx['pb'] & fy['nb'])),
consequent=krx['nb'], label='rule_krx_ze')
system_krx = ctrl.ControlSystem(rules=[rule_krx_0, rule_krx_1])
sim_krx = ctrl.ControlSystemSimulation(system_krx, flush_after_run=self.num_mesh * self.num_mesh + 1)
# ===============================================================
rule_kry_0 = ctrl.Rule(antecedent=((my['nb'] & fx['ze']) |
(my['nb'] & fx['ns']) |
(my['pb'] & fx['ze']) |
(my['pb'] & fx['ps'])),
consequent=kry['pb'], label='rule_kry_pb')
rule_kry_1 = ctrl.Rule(antecedent=((my['ze']) |
(my['ns']) |
(my['ps']) |
(my['nb'] & fx['nb']) |
(my['pb'] & fx['pb']) |
(my['nb'] & fx['ps']) |
(my['pb'] & fx['ns']) |
(my['nb'] & fx['pb']) |
(my['pb'] & fx['nb'])),
consequent=kry['nb'], label='rule_kry_nb')
system_kry = ctrl.ControlSystem(rules=[rule_kry_0, rule_kry_1])
sim_kry = ctrl.ControlSystemSimulation(system_kry, flush_after_run=self.num_mesh * self.num_mesh + 1)
# ===============================================================
rule_krz_0 = ctrl.Rule(antecedent=((mz['nb'] & mx['ze']) |
(mz['nb'] & mx['ps']) |
(mz['nb'] & mx['ps']) |
(mz['pb'] & mx['ns']) |
(mz['pb'] & mx['ze']) |
(mz['pb'] & mx['ps'])),
consequent=krz['pb'], label='rule_krz_pb')
rule_krz_1 = ctrl.Rule(antecedent=((mz['ze']) |
(mz['ns']) |
(mz['ps']) |
(mz['nb'] & mx['nb']) |
(mz['pb'] & mx['pb']) |
(mz['nb'] & mx['pb']) |
(mz['pb'] & mx['nb'])),
consequent=krz['nb'], label='rule_krz_nb')
system_krz = ctrl.ControlSystem(rules=[rule_krz_0, rule_krz_1])
sim_krz = ctrl.ControlSystemSimulation(system_krz, flush_after_run=self.num_mesh * self.num_mesh + 1)
return sim_kpx, sim_kpy, sim_kpz, sim_krx, sim_kry, sim_krz
if __name__ == "__main__":
fuzzy_system = fuzzy_control(low_output=np.array([0., 0., 0., 0., 0., 0.]), high_output=np.array([0.02, 0.02, 0.025, 0.015, 0.015, 0.015]))
fuzzy_system.plot_rules()
# kp = fuzzy_system.get_output(np.array([-20, -20, -30, 0, 0.9, 0.7]))[:3]
# fuzzy_system.build_fuzzy_kpx() | [
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"skfuzzy.control.ControlSystemSimulation",
"numpy.meshgrid",
"numpy.zeros_like",
"matplotlib.pyplot.show",
"skfuzzy.control.Rule",
"skfuzzy.control.Antecedent",
"matplotlib.pyplot.figure",
"numpy.array",
"numpy.linspace",
"matplotlib.pypl... | [((580, 617), 'numpy.array', 'np.array', (['[-40, -40, -40, -5, -5, -5]'], {}), '([-40, -40, -40, -5, -5, -5])\n', (588, 617), True, 'import numpy as np\n'), ((647, 677), 'numpy.array', 'np.array', (['[40, 40, 0, 5, 5, 5]'], {}), '([40, 40, 0, 5, 5, 5])\n', (655, 677), True, 'import numpy as np\n'), ((707, 747), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n', (715, 747), True, 'import numpy as np\n'), ((772, 823), 'numpy.array', 'np.array', (['[0.015, 0.015, 0.02, 0.015, 0.015, 0.015]'], {}), '([0.015, 0.015, 0.02, 0.015, 0.015, 0.015])\n', (780, 823), True, 'import numpy as np\n'), ((5392, 5429), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 15)', 'dpi': '(100)'}), '(figsize=(15, 15), dpi=100)\n', (5402, 5429), True, 'import matplotlib.pyplot as plt\n'), ((5438, 5462), 'matplotlib.pyplot.title', 'plt.title', (['"""Fuzzy Rules"""'], {}), "('Fuzzy Rules')\n", (5447, 5462), True, 'import matplotlib.pyplot as plt\n'), ((5471, 5516), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(3)', 'w_pad': '(0.5)', 'h_pad': '(1.0)'}), '(pad=3, w_pad=0.5, h_pad=1.0)\n', (5487, 5516), True, 'import matplotlib.pyplot as plt\n'), ((5525, 5619), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.065)', 'bottom': '(0.1)', 'right': '(0.995)', 'top': '(0.9)', 'wspace': '(0.2)', 'hspace': '(0.2)'}), '(left=0.065, bottom=0.1, right=0.995, top=0.9, wspace=\n 0.2, hspace=0.2)\n', (5544, 5619), True, 'import matplotlib.pyplot as plt\n'), ((5729, 5766), 'numpy.meshgrid', 'np.meshgrid', (['upsampled_x', 'upsampled_y'], {}), '(upsampled_x, upsampled_y)\n', (5740, 5766), True, 'import numpy as np\n'), ((5779, 5795), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (5792, 5795), True, 'import numpy as np\n'), ((6142, 6175), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(231)'], {'projection': '"""3d"""'}), "(231, projection='3d')\n", (6153, 6175), True, 'import matplotlib.pyplot as plt\n'), ((6515, 6552), 'numpy.meshgrid', 'np.meshgrid', (['upsampled_x', 'upsampled_y'], {}), '(upsampled_x, upsampled_y)\n', (6526, 6552), True, 'import numpy as np\n'), ((6565, 6581), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (6578, 6581), True, 'import numpy as np\n'), ((6928, 6961), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(232)'], {'projection': '"""3d"""'}), "(232, projection='3d')\n", (6939, 6961), True, 'import matplotlib.pyplot as plt\n'), ((7301, 7338), 'numpy.meshgrid', 'np.meshgrid', (['upsampled_x', 'upsampled_y'], {}), '(upsampled_x, upsampled_y)\n', (7312, 7338), True, 'import numpy as np\n'), ((7351, 7367), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (7364, 7367), True, 'import numpy as np\n'), ((7714, 7747), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(233)'], {'projection': '"""3d"""'}), "(233, projection='3d')\n", (7725, 7747), True, 'import matplotlib.pyplot as plt\n'), ((8087, 8124), 'numpy.meshgrid', 'np.meshgrid', (['upsampled_x', 'upsampled_y'], {}), '(upsampled_x, upsampled_y)\n', (8098, 8124), True, 'import numpy as np\n'), ((8137, 8153), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (8150, 8153), True, 'import numpy as np\n'), ((8500, 8533), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(234)'], {'projection': '"""3d"""'}), "(234, projection='3d')\n", (8511, 8533), True, 'import matplotlib.pyplot as plt\n'), ((8873, 8910), 'numpy.meshgrid', 'np.meshgrid', (['upsampled_x', 'upsampled_y'], {}), '(upsampled_x, upsampled_y)\n', (8884, 8910), True, 'import numpy as np\n'), ((8923, 8939), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (8936, 8939), True, 'import numpy as np\n'), ((9286, 9319), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(235)'], {'projection': '"""3d"""'}), "(235, projection='3d')\n", (9297, 9319), True, 'import matplotlib.pyplot as plt\n'), ((9659, 9696), 'numpy.meshgrid', 'np.meshgrid', (['upsampled_x', 'upsampled_y'], {}), '(upsampled_x, upsampled_y)\n', (9670, 9696), True, 'import numpy as np\n'), ((9709, 9725), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (9722, 9725), True, 'import numpy as np\n'), ((9995, 10028), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(236)'], {'projection': '"""3d"""'}), "(236, projection='3d')\n", (10006, 10028), True, 'import matplotlib.pyplot as plt\n'), ((10262, 10272), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10270, 10272), True, 'import matplotlib.pyplot as plt\n'), ((10327, 10393), 'numpy.linspace', 'np.linspace', (['self.low_input[0]', 'self.high_input[0]', 'self.num_input'], {}), '(self.low_input[0], self.high_input[0], self.num_input)\n', (10338, 10393), True, 'import numpy as np\n'), ((10416, 10482), 'numpy.linspace', 'np.linspace', (['self.low_input[4]', 'self.high_input[4]', 'self.num_input'], {}), '(self.low_input[4], self.high_input[4], self.num_input)\n', (10427, 10482), True, 'import numpy as np\n'), ((10497, 10531), 'skfuzzy.control.Antecedent', 'ctrl.Antecedent', (['fx_universe', '"""fx"""'], {}), "(fx_universe, 'fx')\n", (10512, 10531), True, 'import skfuzzy.control as ctrl\n'), ((10545, 10579), 'skfuzzy.control.Antecedent', 'ctrl.Antecedent', (['my_universe', '"""my"""'], {}), "(my_universe, 'my')\n", (10560, 10579), True, 'import skfuzzy.control as ctrl\n'), ((10732, 10801), 'numpy.linspace', 'np.linspace', (['self.low_output[0]', 'self.high_output[0]', 'self.num_output'], {}), '(self.low_output[0], self.high_output[0], self.num_output)\n', (10743, 10801), True, 'import numpy as np\n'), ((10816, 10852), 'skfuzzy.control.Consequent', 'ctrl.Consequent', (['kpx_universe', '"""kpx"""'], {}), "(kpx_universe, 'kpx')\n", (10831, 10852), True, 'import skfuzzy.control as ctrl\n'), ((10961, 11115), 'skfuzzy.control.Rule', 'ctrl.Rule', ([], {'antecedent': "(fx['nb'] & my['ze'] | fx['nb'] & my['ns'] | fx['pb'] & my['ze'] | fx['pb'] &\n my['ps'])", 'consequent': "kpx['pb']", 'label': '"""rule kpx pb"""'}), "(antecedent=fx['nb'] & my['ze'] | fx['nb'] & my['ns'] | fx['pb'] &\n my['ze'] | fx['pb'] & my['ps'], consequent=kpx['pb'], label='rule kpx pb')\n", (10970, 11115), True, 'import skfuzzy.control as ctrl\n'), ((11303, 11554), 'skfuzzy.control.Rule', 'ctrl.Rule', ([], {'antecedent': "(fx['ns'] & my['ze'] | fx['ns'] & my['ns'] | fx['ns'] & my['nb'] | fx['nb'] &\n my['nb'] | fx['pb'] & my['pb'] | fx['ps'] & my['ps'] | fx['ps'] & my[\n 'pb'] | fx['ps'] & my['ze'])", 'consequent': "kpx['ze']", 'label': '"""rule kpx ze"""'}), "(antecedent=fx['ns'] & my['ze'] | fx['ns'] & my['ns'] | fx['ns'] &\n my['nb'] | fx['nb'] & my['nb'] | fx['pb'] & my['pb'] | fx['ps'] & my[\n 'ps'] | fx['ps'] & my['pb'] | fx['ps'] & my['ze'], consequent=kpx['ze'],\n label='rule kpx ze')\n", (11312, 11554), True, 'import skfuzzy.control as ctrl\n'), ((11913, 12279), 'skfuzzy.control.Rule', 'ctrl.Rule', ([], {'antecedent': "(fx['ze'] & my['ze'] | fx['ze'] & my['ps'] | fx['ze'] & my['ns'] | fx['ze'] &\n my['pb'] | fx['ze'] & my['nb'] | fx['nb'] & my['ps'] | fx['nb'] & my[\n 'pb'] | fx['pb'] & my['ns'] | fx['pb'] & my['nb'] | fx['ns'] & my['ps'] |\n fx['ns'] & my['pb'] | fx['ps'] & my['nb'] | fx['ps'] & my['ns'])", 'consequent': "kpx['nb']", 'label': '"""rule kpx nb"""'}), "(antecedent=fx['ze'] & my['ze'] | fx['ze'] & my['ps'] | fx['ze'] &\n my['ns'] | fx['ze'] & my['pb'] | fx['ze'] & my['nb'] | fx['nb'] & my[\n 'ps'] | fx['nb'] & my['pb'] | fx['pb'] & my['ns'] | fx['pb'] & my['nb'] |\n fx['ns'] & my['ps'] | fx['ns'] & my['pb'] | fx['ps'] & my['nb'] | fx[\n 'ps'] & my['ns'], consequent=kpx['nb'], label='rule kpx nb')\n", (11922, 12279), True, 'import skfuzzy.control as ctrl\n'), ((12858, 12920), 'skfuzzy.control.ControlSystem', 'ctrl.ControlSystem', ([], {'rules': '[rule_kpx_2, rule_kpx_1, rule_kpx_0]'}), '(rules=[rule_kpx_2, rule_kpx_1, rule_kpx_0])\n', (12876, 12920), True, 'import skfuzzy.control as ctrl\n'), ((12939, 13034), 'skfuzzy.control.ControlSystemSimulation', 'ctrl.ControlSystemSimulation', (['system_kpx'], {'flush_after_run': '(self.num_mesh * self.num_mesh + 1)'}), '(system_kpx, flush_after_run=self.num_mesh *\n self.num_mesh + 1)\n', (12967, 13034), True, 'import skfuzzy.control as ctrl\n'), ((13981, 14047), 'numpy.linspace', 'np.linspace', (['self.low_input[1]', 'self.high_input[1]', 'self.num_input'], {}), '(self.low_input[1], self.high_input[1], self.num_input)\n', (13992, 14047), True, 'import numpy as np\n'), ((14070, 14136), 'numpy.linspace', 'np.linspace', (['self.low_input[3]', 'self.high_input[3]', 'self.num_input'], {}), '(self.low_input[3], self.high_input[3], self.num_input)\n', (14081, 14136), True, 'import numpy as np\n'), ((14151, 14185), 'skfuzzy.control.Antecedent', 'ctrl.Antecedent', (['fy_universe', '"""fy"""'], {}), "(fy_universe, 'fy')\n", (14166, 14185), True, 'import skfuzzy.control as ctrl\n'), ((14199, 14233), 'skfuzzy.control.Antecedent', 'ctrl.Antecedent', (['mx_universe', '"""mx"""'], {}), "(mx_universe, 'mx')\n", (14214, 14233), True, 'import skfuzzy.control as ctrl\n'), ((14386, 14455), 'numpy.linspace', 'np.linspace', (['self.low_output[1]', 'self.high_output[1]', 'self.num_output'], {}), '(self.low_output[1], self.high_output[1], self.num_output)\n', (14397, 14455), True, 'import numpy as np\n'), ((14470, 14506), 'skfuzzy.control.Consequent', 'ctrl.Consequent', (['kpy_universe', '"""kpy"""'], {}), "(kpy_universe, 'kpy')\n", (14485, 14506), True, 'import skfuzzy.control as ctrl\n'), ((14615, 14769), 'skfuzzy.control.Rule', 'ctrl.Rule', ([], {'antecedent': "(fy['nb'] & mx['ns'] | fy['nb'] & mx['ze'] | fy['pb'] & mx['ze'] | fy['pb'] &\n mx['ps'])", 'consequent': "kpy['pb']", 'label': '"""rule_kpy_pb"""'}), "(antecedent=fy['nb'] & mx['ns'] | fy['nb'] & mx['ze'] | fy['pb'] &\n mx['ze'] | fy['pb'] & mx['ps'], consequent=kpy['pb'], label='rule_kpy_pb')\n", (14624, 14769), True, 'import skfuzzy.control as ctrl\n'), ((14957, 15208), 'skfuzzy.control.Rule', 'ctrl.Rule', ([], {'antecedent': "(fy['ns'] & mx['ze'] | fy['ns'] & mx['ns'] | fy['ns'] & mx['nb'] | fy['ps'] &\n mx['ps'] | fy['ps'] & mx['pb'] | fy['ps'] & mx['ze'] | fy['nb'] & mx[\n 'nb'] | fy['pb'] & mx['pb'])", 'consequent': "kpy['ze']", 'label': '"""rule_kpy_ze"""'}), "(antecedent=fy['ns'] & mx['ze'] | fy['ns'] & mx['ns'] | fy['ns'] &\n mx['nb'] | fy['ps'] & mx['ps'] | fy['ps'] & mx['pb'] | fy['ps'] & mx[\n 'ze'] | fy['nb'] & mx['nb'] | fy['pb'] & mx['pb'], consequent=kpy['ze'],\n label='rule_kpy_ze')\n", (14966, 15208), True, 'import skfuzzy.control as ctrl\n'), ((15567, 15829), 'skfuzzy.control.Rule', 'ctrl.Rule', ([], {'antecedent': "(fy['ze'] | fy['nb'] & mx['ps'] | fy['nb'] & mx['pb'] | fy['pb'] & mx['ns'] |\n fy['pb'] & mx['nb'] | fy['ns'] & mx['ps'] | fy['ns'] & mx['pb'] | fy[\n 'ps'] & mx['nb'] | fy['ps'] & mx['ns'])", 'consequent': "kpy['nb']", 'label': '"""rule_kpy_nb"""'}), "(antecedent=fy['ze'] | fy['nb'] & mx['ps'] | fy['nb'] & mx['pb'] |\n fy['pb'] & mx['ns'] | fy['pb'] & mx['nb'] | fy['ns'] & mx['ps'] | fy[\n 'ns'] & mx['pb'] | fy['ps'] & mx['nb'] | fy['ps'] & mx['ns'],\n consequent=kpy['nb'], label='rule_kpy_nb')\n", (15576, 15829), True, 'import skfuzzy.control as ctrl\n'), ((16233, 16295), 'skfuzzy.control.ControlSystem', 'ctrl.ControlSystem', ([], {'rules': '[rule_kpy_0, rule_kpy_1, rule_kpy_2]'}), '(rules=[rule_kpy_0, rule_kpy_1, rule_kpy_2])\n', (16251, 16295), True, 'import skfuzzy.control as ctrl\n'), ((16314, 16409), 'skfuzzy.control.ControlSystemSimulation', 'ctrl.ControlSystemSimulation', (['system_kpy'], {'flush_after_run': '(self.num_mesh * self.num_mesh + 1)'}), '(system_kpy, flush_after_run=self.num_mesh *\n self.num_mesh + 1)\n', (16342, 16409), True, 'import skfuzzy.control as ctrl\n'), ((17357, 17423), 'numpy.linspace', 'np.linspace', (['self.low_input[1]', 'self.high_input[1]', 'self.num_input'], {}), '(self.low_input[1], self.high_input[1], self.num_input)\n', (17368, 17423), True, 'import numpy as np\n'), ((17446, 17512), 'numpy.linspace', 'np.linspace', (['self.low_input[0]', 'self.high_input[0]', 'self.num_input'], {}), '(self.low_input[0], self.high_input[0], self.num_input)\n', (17457, 17512), True, 'import numpy as np\n'), ((17527, 17561), 'skfuzzy.control.Antecedent', 'ctrl.Antecedent', (['fy_universe', '"""fy"""'], {}), "(fy_universe, 'fy')\n", (17542, 17561), True, 'import skfuzzy.control as ctrl\n'), ((17575, 17609), 'skfuzzy.control.Antecedent', 'ctrl.Antecedent', (['fx_universe', '"""fx"""'], {}), "(fx_universe, 'fx')\n", (17590, 17609), True, 'import skfuzzy.control as ctrl\n'), ((17762, 17831), 'numpy.linspace', 'np.linspace', (['self.low_output[2]', 'self.high_output[2]', 'self.num_output'], {}), '(self.low_output[2], self.high_output[2], self.num_output)\n', (17773, 17831), True, 'import numpy as np\n'), ((17846, 17882), 'skfuzzy.control.Consequent', 'ctrl.Consequent', (['kpz_universe', '"""kpz"""'], {}), "(kpz_universe, 'kpz')\n", (17861, 17882), True, 'import skfuzzy.control as ctrl\n'), ((17991, 18172), 'skfuzzy.control.Rule', 'ctrl.Rule', ([], {'antecedent': "(fx['ze'] & fy['ze'] | fx['ze'] & fy['ns'] | fx['ns'] & fy['ze'] | fx['ze'] &\n fy['ps'] | fx['ps'] & fy['ze'])", 'consequent': "kpz['pb']", 'label': '"""rule_kpz_pb"""'}), "(antecedent=fx['ze'] & fy['ze'] | fx['ze'] & fy['ns'] | fx['ns'] &\n fy['ze'] | fx['ze'] & fy['ps'] | fx['ps'] & fy['ze'], consequent=kpz[\n 'pb'], label='rule_kpz_pb')\n", (18000, 18172), True, 'import skfuzzy.control as ctrl\n'), ((18420, 18574), 'skfuzzy.control.Rule', 'ctrl.Rule', ([], {'antecedent': "(fx['ns'] & fy['ns'] | fx['ps'] & fy['ps'] | fx['ns'] & fy['ps'] | fx['ps'] &\n fy['ns'])", 'consequent': "kpz['ze']", 'label': '"""rule_kpz_ze"""'}), "(antecedent=fx['ns'] & fy['ns'] | fx['ps'] & fy['ps'] | fx['ns'] &\n fy['ps'] | fx['ps'] & fy['ns'], consequent=kpz['ze'], label='rule_kpz_ze')\n", (18429, 18574), True, 'import skfuzzy.control as ctrl\n'), ((18762, 18873), 'skfuzzy.control.Rule', 'ctrl.Rule', ([], {'antecedent': "(fx['nb'] | fx['pb'] | fy['nb'] | fy['pb'])", 'consequent': "kpz['nb']", 'label': '"""rule_kpz_nb"""'}), "(antecedent=fx['nb'] | fx['pb'] | fy['nb'] | fy['pb'], consequent=\n kpz['nb'], label='rule_kpz_nb')\n", (18771, 18873), True, 'import skfuzzy.control as ctrl\n'), ((19060, 19122), 'skfuzzy.control.ControlSystem', 'ctrl.ControlSystem', ([], {'rules': '[rule_kpz_0, rule_kpz_1, rule_kpz_2]'}), '(rules=[rule_kpz_0, rule_kpz_1, rule_kpz_2])\n', (19078, 19122), True, 'import skfuzzy.control as ctrl\n'), ((19141, 19236), 'skfuzzy.control.ControlSystemSimulation', 'ctrl.ControlSystemSimulation', (['system_kpz'], {'flush_after_run': '(self.num_mesh * self.num_mesh + 1)'}), '(system_kpz, flush_after_run=self.num_mesh *\n self.num_mesh + 1)\n', (19169, 19236), True, 'import skfuzzy.control as ctrl\n'), ((20174, 20240), 'numpy.linspace', 'np.linspace', (['self.low_input[1]', 'self.high_input[1]', 'self.num_input'], {}), '(self.low_input[1], self.high_input[1], self.num_input)\n', (20185, 20240), True, 'import numpy as np\n'), ((20263, 20329), 'numpy.linspace', 'np.linspace', (['self.low_input[3]', 'self.high_input[3]', 'self.num_input'], {}), '(self.low_input[3], self.high_input[3], self.num_input)\n', (20274, 20329), True, 'import numpy as np\n'), ((20344, 20378), 'skfuzzy.control.Antecedent', 'ctrl.Antecedent', (['fy_universe', '"""fy"""'], {}), "(fy_universe, 'fy')\n", (20359, 20378), True, 'import skfuzzy.control as ctrl\n'), ((20392, 20426), 'skfuzzy.control.Antecedent', 'ctrl.Antecedent', (['mx_universe', '"""mx"""'], {}), "(mx_universe, 'mx')\n", (20407, 20426), True, 'import skfuzzy.control as ctrl\n'), ((20579, 20634), 'numpy.linspace', 'np.linspace', (['self.low_output[3]', 'self.high_output[3]', '(3)'], {}), '(self.low_output[3], self.high_output[3], 3)\n', (20590, 20634), True, 'import numpy as np\n'), ((20649, 20685), 'skfuzzy.control.Consequent', 'ctrl.Consequent', (['krx_universe', '"""krx"""'], {}), "(krx_universe, 'krx')\n", (20664, 20685), True, 'import skfuzzy.control as ctrl\n'), ((20794, 20948), 'skfuzzy.control.Rule', 'ctrl.Rule', ([], {'antecedent': "(mx['nb'] & fy['ze'] | mx['nb'] & fy['ns'] | mx['pb'] & fy['ze'] | mx['pb'] &\n fy['ps'])", 'consequent': "krx['pb']", 'label': '"""rule_krx_pb"""'}), "(antecedent=mx['nb'] & fy['ze'] | mx['nb'] & fy['ns'] | mx['pb'] &\n fy['ze'] | mx['pb'] & fy['ps'], consequent=krx['pb'], label='rule_krx_pb')\n", (20803, 20948), True, 'import skfuzzy.control as ctrl\n'), ((21136, 21377), 'skfuzzy.control.Rule', 'ctrl.Rule', ([], {'antecedent': "(mx['ze'] | mx['ns'] | mx['ps'] | mx['nb'] & fy['nb'] | mx['nb'] & fy['ps'] |\n mx['nb'] & fy['pb'] | mx['pb'] & fy['pb'] | mx['pb'] & fy['ns'] | mx[\n 'pb'] & fy['nb'])", 'consequent': "krx['nb']", 'label': '"""rule_krx_ze"""'}), "(antecedent=mx['ze'] | mx['ns'] | mx['ps'] | mx['nb'] & fy['nb'] |\n mx['nb'] & fy['ps'] | mx['nb'] & fy['pb'] | mx['pb'] & fy['pb'] | mx[\n 'pb'] & fy['ns'] | mx['pb'] & fy['nb'], consequent=krx['nb'], label=\n 'rule_krx_ze')\n", (21145, 21377), True, 'import skfuzzy.control as ctrl\n'), ((21780, 21830), 'skfuzzy.control.ControlSystem', 'ctrl.ControlSystem', ([], {'rules': '[rule_krx_0, rule_krx_1]'}), '(rules=[rule_krx_0, rule_krx_1])\n', (21798, 21830), True, 'import skfuzzy.control as ctrl\n'), ((21849, 21944), 'skfuzzy.control.ControlSystemSimulation', 'ctrl.ControlSystemSimulation', (['system_krx'], {'flush_after_run': '(self.num_mesh * self.num_mesh + 1)'}), '(system_krx, flush_after_run=self.num_mesh *\n self.num_mesh + 1)\n', (21877, 21944), True, 'import skfuzzy.control as ctrl\n'), ((22873, 22939), 'numpy.linspace', 'np.linspace', (['self.low_input[0]', 'self.high_input[0]', 'self.num_input'], {}), '(self.low_input[0], self.high_input[0], self.num_input)\n', (22884, 22939), True, 'import numpy as np\n'), ((22962, 23028), 'numpy.linspace', 'np.linspace', (['self.low_input[4]', 'self.high_input[4]', 'self.num_input'], {}), '(self.low_input[4], self.high_input[4], self.num_input)\n', (22973, 23028), True, 'import numpy as np\n'), ((23043, 23077), 'skfuzzy.control.Antecedent', 'ctrl.Antecedent', (['fx_universe', '"""fx"""'], {}), "(fx_universe, 'fx')\n", (23058, 23077), True, 'import skfuzzy.control as ctrl\n'), ((23091, 23125), 'skfuzzy.control.Antecedent', 'ctrl.Antecedent', (['my_universe', '"""my"""'], {}), "(my_universe, 'my')\n", (23106, 23125), True, 'import skfuzzy.control as ctrl\n'), ((23278, 23333), 'numpy.linspace', 'np.linspace', (['self.low_output[4]', 'self.high_output[4]', '(3)'], {}), '(self.low_output[4], self.high_output[4], 3)\n', (23289, 23333), True, 'import numpy as np\n'), ((23348, 23384), 'skfuzzy.control.Consequent', 'ctrl.Consequent', (['kry_universe', '"""kry"""'], {}), "(kry_universe, 'kry')\n", (23363, 23384), True, 'import skfuzzy.control as ctrl\n'), ((23493, 23647), 'skfuzzy.control.Rule', 'ctrl.Rule', ([], {'antecedent': "(my['nb'] & fx['ze'] | my['nb'] & fx['ns'] | my['pb'] & fx['ze'] | my['pb'] &\n fx['ps'])", 'consequent': "kry['pb']", 'label': '"""rule_kry_pb"""'}), "(antecedent=my['nb'] & fx['ze'] | my['nb'] & fx['ns'] | my['pb'] &\n fx['ze'] | my['pb'] & fx['ps'], consequent=kry['pb'], label='rule_kry_pb')\n", (23502, 23647), True, 'import skfuzzy.control as ctrl\n'), ((23835, 24076), 'skfuzzy.control.Rule', 'ctrl.Rule', ([], {'antecedent': "(my['ze'] | my['ns'] | my['ps'] | my['nb'] & fx['nb'] | my['pb'] & fx['pb'] |\n my['nb'] & fx['ps'] | my['pb'] & fx['ns'] | my['nb'] & fx['pb'] | my[\n 'pb'] & fx['nb'])", 'consequent': "kry['nb']", 'label': '"""rule_kry_nb"""'}), "(antecedent=my['ze'] | my['ns'] | my['ps'] | my['nb'] & fx['nb'] |\n my['pb'] & fx['pb'] | my['nb'] & fx['ps'] | my['pb'] & fx['ns'] | my[\n 'nb'] & fx['pb'] | my['pb'] & fx['nb'], consequent=kry['nb'], label=\n 'rule_kry_nb')\n", (23844, 24076), True, 'import skfuzzy.control as ctrl\n'), ((24479, 24529), 'skfuzzy.control.ControlSystem', 'ctrl.ControlSystem', ([], {'rules': '[rule_kry_0, rule_kry_1]'}), '(rules=[rule_kry_0, rule_kry_1])\n', (24497, 24529), True, 'import skfuzzy.control as ctrl\n'), ((24548, 24643), 'skfuzzy.control.ControlSystemSimulation', 'ctrl.ControlSystemSimulation', (['system_kry'], {'flush_after_run': '(self.num_mesh * self.num_mesh + 1)'}), '(system_kry, flush_after_run=self.num_mesh *\n self.num_mesh + 1)\n', (24576, 24643), True, 'import skfuzzy.control as ctrl\n'), ((25573, 25639), 'numpy.linspace', 'np.linspace', (['self.low_input[3]', 'self.high_input[3]', 'self.num_input'], {}), '(self.low_input[3], self.high_input[3], self.num_input)\n', (25584, 25639), True, 'import numpy as np\n'), ((25662, 25728), 'numpy.linspace', 'np.linspace', (['self.low_input[5]', 'self.high_input[5]', 'self.num_input'], {}), '(self.low_input[5], self.high_input[5], self.num_input)\n', (25673, 25728), True, 'import numpy as np\n'), ((25743, 25777), 'skfuzzy.control.Antecedent', 'ctrl.Antecedent', (['mx_universe', '"""mx"""'], {}), "(mx_universe, 'mx')\n", (25758, 25777), True, 'import skfuzzy.control as ctrl\n'), ((25791, 25825), 'skfuzzy.control.Antecedent', 'ctrl.Antecedent', (['mz_universe', '"""mz"""'], {}), "(mz_universe, 'mz')\n", (25806, 25825), True, 'import skfuzzy.control as ctrl\n'), ((25979, 26034), 'numpy.linspace', 'np.linspace', (['self.low_output[5]', 'self.high_output[5]', '(3)'], {}), '(self.low_output[5], self.high_output[5], 3)\n', (25990, 26034), True, 'import numpy as np\n'), ((26049, 26085), 'skfuzzy.control.Consequent', 'ctrl.Consequent', (['krz_universe', '"""krz"""'], {}), "(krz_universe, 'krz')\n", (26064, 26085), True, 'import skfuzzy.control as ctrl\n'), ((26193, 26396), 'skfuzzy.control.Rule', 'ctrl.Rule', ([], {'antecedent': "(mz['nb'] & mx['ze'] | mz['nb'] & mx['ps'] | mz['nb'] & mx['ns'] | mz['pb'] &\n mx['ns'] | mz['pb'] & mx['ze'] | mz['pb'] & mx['ps'])", 'consequent': "krz['pb']", 'label': '"""rule_krz_pb"""'}), "(antecedent=mz['nb'] & mx['ze'] | mz['nb'] & mx['ps'] | mz['nb'] &\n mx['ns'] | mz['pb'] & mx['ns'] | mz['pb'] & mx['ze'] | mz['pb'] & mx[\n 'ps'], consequent=krz['pb'], label='rule_krz_pb')\n", (26202, 26396), True, 'import skfuzzy.control as ctrl\n'), ((26669, 26860), 'skfuzzy.control.Rule', 'ctrl.Rule', ([], {'antecedent': "(mz['ze'] | mz['ns'] | mz['ps'] | mz['nb'] & mx['nb'] | mz['nb'] & mx['pb'] |\n mz['pb'] & mx['pb'] | mz['pb'] & mx['nb'])", 'consequent': "krz['nb']", 'label': '"""rule_krz_nb"""'}), "(antecedent=mz['ze'] | mz['ns'] | mz['ps'] | mz['nb'] & mx['nb'] |\n mz['nb'] & mx['pb'] | mz['pb'] & mx['pb'] | mz['pb'] & mx['nb'],\n consequent=krz['nb'], label='rule_krz_nb')\n", (26678, 26860), True, 'import skfuzzy.control as ctrl\n'), ((27179, 27229), 'skfuzzy.control.ControlSystem', 'ctrl.ControlSystem', ([], {'rules': '[rule_krz_0, rule_krz_1]'}), '(rules=[rule_krz_0, rule_krz_1])\n', (27197, 27229), True, 'import skfuzzy.control as ctrl\n'), ((27248, 27343), 'skfuzzy.control.ControlSystemSimulation', 'ctrl.ControlSystemSimulation', (['system_krz'], {'flush_after_run': '(self.num_mesh * self.num_mesh + 1)'}), '(system_krz, flush_after_run=self.num_mesh *\n self.num_mesh + 1)\n', (27276, 27343), True, 'import skfuzzy.control as ctrl\n'), ((28570, 28621), 'numpy.linspace', 'np.linspace', (['low_force[0]', 'high_force[0]', 'num_input'], {}), '(low_force[0], high_force[0], num_input)\n', (28581, 28621), True, 'import numpy as np\n'), ((28644, 28695), 'numpy.linspace', 'np.linspace', (['low_force[1]', 'high_force[1]', 'num_input'], {}), '(low_force[1], high_force[1], num_input)\n', (28655, 28695), True, 'import numpy as np\n'), ((28718, 28769), 'numpy.linspace', 'np.linspace', (['low_force[2]', 'high_force[2]', 'num_input'], {}), '(low_force[2], high_force[2], num_input)\n', (28729, 28769), True, 'import numpy as np\n'), ((28793, 28843), 'numpy.linspace', 'np.linspace', (['low_force[3]', 'low_force[3]', 'num_input'], {}), '(low_force[3], low_force[3], num_input)\n', (28804, 28843), True, 'import numpy as np\n'), ((28866, 28916), 'numpy.linspace', 'np.linspace', (['low_force[4]', 'low_force[4]', 'num_input'], {}), '(low_force[4], low_force[4], num_input)\n', (28877, 28916), True, 'import numpy as np\n'), ((28939, 28989), 'numpy.linspace', 'np.linspace', (['low_force[5]', 'low_force[5]', 'num_input'], {}), '(low_force[5], low_force[5], num_input)\n', (28950, 28989), True, 'import numpy as np\n'), ((29076, 29110), 'skfuzzy.control.Antecedent', 'ctrl.Antecedent', (['fx_universe', '"""fx"""'], {}), "(fx_universe, 'fx')\n", (29091, 29110), True, 'import skfuzzy.control as ctrl\n'), ((29124, 29158), 'skfuzzy.control.Antecedent', 'ctrl.Antecedent', (['fy_universe', '"""fy"""'], {}), "(fy_universe, 'fy')\n", (29139, 29158), True, 'import skfuzzy.control as ctrl\n'), ((29172, 29206), 'skfuzzy.control.Antecedent', 'ctrl.Antecedent', (['fz_universe', '"""fz"""'], {}), "(fz_universe, 'fz')\n", (29187, 29206), True, 'import skfuzzy.control as ctrl\n'), ((29221, 29255), 'skfuzzy.control.Antecedent', 'ctrl.Antecedent', (['mx_universe', '"""mx"""'], {}), "(mx_universe, 'mx')\n", (29236, 29255), True, 'import skfuzzy.control as ctrl\n'), ((29269, 29303), 'skfuzzy.control.Antecedent', 'ctrl.Antecedent', (['my_universe', '"""my"""'], {}), "(my_universe, 'my')\n", (29284, 29303), True, 'import skfuzzy.control as ctrl\n'), ((29317, 29351), 'skfuzzy.control.Antecedent', 'ctrl.Antecedent', (['mz_universe', '"""mz"""'], {}), "(mz_universe, 'mz')\n", (29332, 29351), True, 'import skfuzzy.control as ctrl\n'), ((29762, 29831), 'numpy.linspace', 'np.linspace', (['self.low_output[0]', 'self.high_output[0]', 'self.num_output'], {}), '(self.low_output[0], self.high_output[0], self.num_output)\n', (29773, 29831), True, 'import numpy as np\n'), ((29855, 29924), 'numpy.linspace', 'np.linspace', (['self.low_output[1]', 'self.high_output[1]', 'self.num_output'], {}), '(self.low_output[1], self.high_output[1], self.num_output)\n', (29866, 29924), True, 'import numpy as np\n'), ((29948, 30017), 'numpy.linspace', 'np.linspace', (['self.low_output[2]', 'self.high_output[2]', 'self.num_output'], {}), '(self.low_output[2], self.high_output[2], self.num_output)\n', (29959, 30017), True, 'import numpy as np\n'), ((30042, 30097), 'numpy.linspace', 'np.linspace', (['self.low_output[3]', 'self.high_output[3]', '(3)'], {}), '(self.low_output[3], self.high_output[3], 3)\n', (30053, 30097), True, 'import numpy as np\n'), ((30121, 30176), 'numpy.linspace', 'np.linspace', (['self.low_output[4]', 'self.high_output[4]', '(3)'], {}), '(self.low_output[4], self.high_output[4], 3)\n', (30132, 30176), True, 'import numpy as np\n'), ((30200, 30255), 'numpy.linspace', 'np.linspace', (['self.low_output[5]', 'self.high_output[5]', '(3)'], {}), '(self.low_output[5], self.high_output[5], 3)\n', (30211, 30255), True, 'import numpy as np\n'), ((30271, 30307), 'skfuzzy.control.Consequent', 'ctrl.Consequent', (['kpx_universe', '"""kpx"""'], {}), "(kpx_universe, 'kpx')\n", (30286, 30307), True, 'import skfuzzy.control as ctrl\n'), ((30322, 30358), 'skfuzzy.control.Consequent', 'ctrl.Consequent', (['kpy_universe', '"""kpy"""'], {}), "(kpy_universe, 'kpy')\n", (30337, 30358), True, 'import skfuzzy.control as ctrl\n'), ((30373, 30409), 'skfuzzy.control.Consequent', 'ctrl.Consequent', (['kpz_universe', '"""kpz"""'], {}), "(kpz_universe, 'kpz')\n", (30388, 30409), True, 'import skfuzzy.control as ctrl\n'), ((30425, 30461), 'skfuzzy.control.Consequent', 'ctrl.Consequent', (['krx_universe', '"""krx"""'], {}), "(krx_universe, 'krx')\n", (30440, 30461), True, 'import skfuzzy.control as ctrl\n'), ((30476, 30512), 'skfuzzy.control.Consequent', 'ctrl.Consequent', (['kry_universe', '"""kry"""'], {}), "(kry_universe, 'kry')\n", (30491, 30512), True, 'import skfuzzy.control as ctrl\n'), ((30527, 30563), 'skfuzzy.control.Consequent', 'ctrl.Consequent', (['krz_universe', '"""krz"""'], {}), "(krz_universe, 'krz')\n", (30542, 30563), True, 'import skfuzzy.control as ctrl\n'), ((31182, 31336), 'skfuzzy.control.Rule', 'ctrl.Rule', ([], {'antecedent': "(fx['nb'] & my['ze'] | fx['nb'] & my['ns'] | fx['pb'] & my['ze'] | fx['pb'] &\n my['ps'])", 'consequent': "kpx['pb']", 'label': '"""rule kpx pb"""'}), "(antecedent=fx['nb'] & my['ze'] | fx['nb'] & my['ns'] | fx['pb'] &\n my['ze'] | fx['pb'] & my['ps'], consequent=kpx['pb'], label='rule kpx pb')\n", (31191, 31336), True, 'import skfuzzy.control as ctrl\n'), ((31524, 31775), 'skfuzzy.control.Rule', 'ctrl.Rule', ([], {'antecedent': "(fx['ns'] & my['ze'] | fx['ns'] & my['ns'] | fx['ns'] & my['nb'] | fx['nb'] &\n my['nb'] | fx['pb'] & my['pb'] | fx['ps'] & my['ps'] | fx['ps'] & my[\n 'pb'] | fx['ps'] & my['ze'])", 'consequent': "kpx['ze']", 'label': '"""rule kpx ze"""'}), "(antecedent=fx['ns'] & my['ze'] | fx['ns'] & my['ns'] | fx['ns'] &\n my['nb'] | fx['nb'] & my['nb'] | fx['pb'] & my['pb'] | fx['ps'] & my[\n 'ps'] | fx['ps'] & my['pb'] | fx['ps'] & my['ze'], consequent=kpx['ze'],\n label='rule kpx ze')\n", (31533, 31775), True, 'import skfuzzy.control as ctrl\n'), ((32134, 32500), 'skfuzzy.control.Rule', 'ctrl.Rule', ([], {'antecedent': "(fx['ze'] & my['ze'] | fx['ze'] & my['ps'] | fx['ze'] & my['ns'] | fx['ze'] &\n my['pb'] | fx['ze'] & my['nb'] | fx['nb'] & my['ps'] | fx['nb'] & my[\n 'pb'] | fx['pb'] & my['ns'] | fx['pb'] & my['nb'] | fx['ns'] & my['ps'] |\n fx['ns'] & my['pb'] | fx['ps'] & my['nb'] | fx['ps'] & my['ns'])", 'consequent': "kpx['nb']", 'label': '"""rule kpx nb"""'}), "(antecedent=fx['ze'] & my['ze'] | fx['ze'] & my['ps'] | fx['ze'] &\n my['ns'] | fx['ze'] & my['pb'] | fx['ze'] & my['nb'] | fx['nb'] & my[\n 'ps'] | fx['nb'] & my['pb'] | fx['pb'] & my['ns'] | fx['pb'] & my['nb'] |\n fx['ns'] & my['ps'] | fx['ns'] & my['pb'] | fx['ps'] & my['nb'] | fx[\n 'ps'] & my['ns'], consequent=kpx['nb'], label='rule kpx nb')\n", (32143, 32500), True, 'import skfuzzy.control as ctrl\n'), ((33079, 33141), 'skfuzzy.control.ControlSystem', 'ctrl.ControlSystem', ([], {'rules': '[rule_kpx_2, rule_kpx_1, rule_kpx_0]'}), '(rules=[rule_kpx_2, rule_kpx_1, rule_kpx_0])\n', (33097, 33141), True, 'import skfuzzy.control as ctrl\n'), ((33160, 33255), 'skfuzzy.control.ControlSystemSimulation', 'ctrl.ControlSystemSimulation', (['system_kpx'], {'flush_after_run': '(self.num_mesh * self.num_mesh + 1)'}), '(system_kpx, flush_after_run=self.num_mesh *\n self.num_mesh + 1)\n', (33188, 33255), True, 'import skfuzzy.control as ctrl\n'), ((33407, 33561), 'skfuzzy.control.Rule', 'ctrl.Rule', ([], {'antecedent': "(fy['nb'] & mx['ns'] | fy['nb'] & mx['ze'] | fy['pb'] & mx['ze'] | fy['pb'] &\n mx['ps'])", 'consequent': "kpy['pb']", 'label': '"""rule_kpy_pb"""'}), "(antecedent=fy['nb'] & mx['ns'] | fy['nb'] & mx['ze'] | fy['pb'] &\n mx['ze'] | fy['pb'] & mx['ps'], consequent=kpy['pb'], label='rule_kpy_pb')\n", (33416, 33561), True, 'import skfuzzy.control as ctrl\n'), ((33749, 34000), 'skfuzzy.control.Rule', 'ctrl.Rule', ([], {'antecedent': "(fy['ns'] & mx['ze'] | fy['ns'] & mx['ns'] | fy['ns'] & mx['nb'] | fy['ps'] &\n mx['ps'] | fy['ps'] & mx['pb'] | fy['ps'] & mx['ze'] | fy['nb'] & mx[\n 'nb'] | fy['pb'] & mx['pb'])", 'consequent': "kpy['ze']", 'label': '"""rule_kpy_ze"""'}), "(antecedent=fy['ns'] & mx['ze'] | fy['ns'] & mx['ns'] | fy['ns'] &\n mx['nb'] | fy['ps'] & mx['ps'] | fy['ps'] & mx['pb'] | fy['ps'] & mx[\n 'ze'] | fy['nb'] & mx['nb'] | fy['pb'] & mx['pb'], consequent=kpy['ze'],\n label='rule_kpy_ze')\n", (33758, 34000), True, 'import skfuzzy.control as ctrl\n'), ((34359, 34621), 'skfuzzy.control.Rule', 'ctrl.Rule', ([], {'antecedent': "(fy['ze'] | fy['nb'] & mx['ps'] | fy['nb'] & mx['pb'] | fy['pb'] & mx['ns'] |\n fy['pb'] & mx['nb'] | fy['ns'] & mx['ps'] | fy['ns'] & mx['pb'] | fy[\n 'ps'] & mx['nb'] | fy['ps'] & mx['ns'])", 'consequent': "kpy['nb']", 'label': '"""rule_kpy_nb"""'}), "(antecedent=fy['ze'] | fy['nb'] & mx['ps'] | fy['nb'] & mx['pb'] |\n fy['pb'] & mx['ns'] | fy['pb'] & mx['nb'] | fy['ns'] & mx['ps'] | fy[\n 'ns'] & mx['pb'] | fy['ps'] & mx['nb'] | fy['ps'] & mx['ns'],\n consequent=kpy['nb'], label='rule_kpy_nb')\n", (34368, 34621), True, 'import skfuzzy.control as ctrl\n'), ((35025, 35087), 'skfuzzy.control.ControlSystem', 'ctrl.ControlSystem', ([], {'rules': '[rule_kpy_0, rule_kpy_1, rule_kpy_2]'}), '(rules=[rule_kpy_0, rule_kpy_1, rule_kpy_2])\n', (35043, 35087), True, 'import skfuzzy.control as ctrl\n'), ((35106, 35201), 'skfuzzy.control.ControlSystemSimulation', 'ctrl.ControlSystemSimulation', (['system_kpy'], {'flush_after_run': '(self.num_mesh * self.num_mesh + 1)'}), '(system_kpy, flush_after_run=self.num_mesh *\n self.num_mesh + 1)\n', (35134, 35201), True, 'import skfuzzy.control as ctrl\n'), ((35294, 35475), 'skfuzzy.control.Rule', 'ctrl.Rule', ([], {'antecedent': "(fx['ze'] & fy['ze'] | fx['ze'] & fy['ns'] | fx['ns'] & fy['ze'] | fx['ze'] &\n fy['ps'] | fx['ps'] & fy['ze'])", 'consequent': "kpz['pb']", 'label': '"""rule_kpz_pb"""'}), "(antecedent=fx['ze'] & fy['ze'] | fx['ze'] & fy['ns'] | fx['ns'] &\n fy['ze'] | fx['ze'] & fy['ps'] | fx['ps'] & fy['ze'], consequent=kpz[\n 'pb'], label='rule_kpz_pb')\n", (35303, 35475), True, 'import skfuzzy.control as ctrl\n'), ((35703, 35857), 'skfuzzy.control.Rule', 'ctrl.Rule', ([], {'antecedent': "(fx['ns'] & fy['ns'] | fx['ps'] & fy['ps'] | fx['ns'] & fy['ps'] | fx['ps'] &\n fy['ns'])", 'consequent': "kpz['ze']", 'label': '"""rule_kpz_ze"""'}), "(antecedent=fx['ns'] & fy['ns'] | fx['ps'] & fy['ps'] | fx['ns'] &\n fy['ps'] | fx['ps'] & fy['ns'], consequent=kpz['ze'], label='rule_kpz_ze')\n", (35712, 35857), True, 'import skfuzzy.control as ctrl\n'), ((36045, 36156), 'skfuzzy.control.Rule', 'ctrl.Rule', ([], {'antecedent': "(fx['nb'] | fx['pb'] | fy['nb'] | fy['pb'])", 'consequent': "kpz['nb']", 'label': '"""rule_kpz_nb"""'}), "(antecedent=fx['nb'] | fx['pb'] | fy['nb'] | fy['pb'], consequent=\n kpz['nb'], label='rule_kpz_nb')\n", (36054, 36156), True, 'import skfuzzy.control as ctrl\n'), ((36343, 36405), 'skfuzzy.control.ControlSystem', 'ctrl.ControlSystem', ([], {'rules': '[rule_kpz_0, rule_kpz_1, rule_kpz_2]'}), '(rules=[rule_kpz_0, rule_kpz_1, rule_kpz_2])\n', (36361, 36405), True, 'import skfuzzy.control as ctrl\n'), ((36424, 36519), 'skfuzzy.control.ControlSystemSimulation', 'ctrl.ControlSystemSimulation', (['system_kpz'], {'flush_after_run': '(self.num_mesh * self.num_mesh + 1)'}), '(system_kpz, flush_after_run=self.num_mesh *\n self.num_mesh + 1)\n', (36452, 36519), True, 'import skfuzzy.control as ctrl\n'), ((36612, 36766), 'skfuzzy.control.Rule', 'ctrl.Rule', ([], {'antecedent': "(mx['nb'] & fy['ze'] | mx['nb'] & fy['ns'] | mx['pb'] & fy['ze'] | mx['pb'] &\n fy['ps'])", 'consequent': "krx['pb']", 'label': '"""rule_krx_pb"""'}), "(antecedent=mx['nb'] & fy['ze'] | mx['nb'] & fy['ns'] | mx['pb'] &\n fy['ze'] | mx['pb'] & fy['ps'], consequent=krx['pb'], label='rule_krx_pb')\n", (36621, 36766), True, 'import skfuzzy.control as ctrl\n'), ((36954, 37195), 'skfuzzy.control.Rule', 'ctrl.Rule', ([], {'antecedent': "(mx['ze'] | mx['ns'] | mx['ps'] | mx['nb'] & fy['nb'] | mx['nb'] & fy['ps'] |\n mx['nb'] & fy['pb'] | mx['pb'] & fy['pb'] | mx['pb'] & fy['ns'] | mx[\n 'pb'] & fy['nb'])", 'consequent': "krx['nb']", 'label': '"""rule_krx_ze"""'}), "(antecedent=mx['ze'] | mx['ns'] | mx['ps'] | mx['nb'] & fy['nb'] |\n mx['nb'] & fy['ps'] | mx['nb'] & fy['pb'] | mx['pb'] & fy['pb'] | mx[\n 'pb'] & fy['ns'] | mx['pb'] & fy['nb'], consequent=krx['nb'], label=\n 'rule_krx_ze')\n", (36963, 37195), True, 'import skfuzzy.control as ctrl\n'), ((37598, 37648), 'skfuzzy.control.ControlSystem', 'ctrl.ControlSystem', ([], {'rules': '[rule_krx_0, rule_krx_1]'}), '(rules=[rule_krx_0, rule_krx_1])\n', (37616, 37648), True, 'import skfuzzy.control as ctrl\n'), ((37667, 37762), 'skfuzzy.control.ControlSystemSimulation', 'ctrl.ControlSystemSimulation', (['system_krx'], {'flush_after_run': '(self.num_mesh * self.num_mesh + 1)'}), '(system_krx, flush_after_run=self.num_mesh *\n self.num_mesh + 1)\n', (37695, 37762), True, 'import skfuzzy.control as ctrl\n'), ((37855, 38009), 'skfuzzy.control.Rule', 'ctrl.Rule', ([], {'antecedent': "(my['nb'] & fx['ze'] | my['nb'] & fx['ns'] | my['pb'] & fx['ze'] | my['pb'] &\n fx['ps'])", 'consequent': "kry['pb']", 'label': '"""rule_kry_pb"""'}), "(antecedent=my['nb'] & fx['ze'] | my['nb'] & fx['ns'] | my['pb'] &\n fx['ze'] | my['pb'] & fx['ps'], consequent=kry['pb'], label='rule_kry_pb')\n", (37864, 38009), True, 'import skfuzzy.control as ctrl\n'), ((38197, 38438), 'skfuzzy.control.Rule', 'ctrl.Rule', ([], {'antecedent': "(my['ze'] | my['ns'] | my['ps'] | my['nb'] & fx['nb'] | my['pb'] & fx['pb'] |\n my['nb'] & fx['ps'] | my['pb'] & fx['ns'] | my['nb'] & fx['pb'] | my[\n 'pb'] & fx['nb'])", 'consequent': "kry['nb']", 'label': '"""rule_kry_nb"""'}), "(antecedent=my['ze'] | my['ns'] | my['ps'] | my['nb'] & fx['nb'] |\n my['pb'] & fx['pb'] | my['nb'] & fx['ps'] | my['pb'] & fx['ns'] | my[\n 'nb'] & fx['pb'] | my['pb'] & fx['nb'], consequent=kry['nb'], label=\n 'rule_kry_nb')\n", (38206, 38438), True, 'import skfuzzy.control as ctrl\n'), ((38841, 38891), 'skfuzzy.control.ControlSystem', 'ctrl.ControlSystem', ([], {'rules': '[rule_kry_0, rule_kry_1]'}), '(rules=[rule_kry_0, rule_kry_1])\n', (38859, 38891), True, 'import skfuzzy.control as ctrl\n'), ((38910, 39005), 'skfuzzy.control.ControlSystemSimulation', 'ctrl.ControlSystemSimulation', (['system_kry'], {'flush_after_run': '(self.num_mesh * self.num_mesh + 1)'}), '(system_kry, flush_after_run=self.num_mesh *\n self.num_mesh + 1)\n', (38938, 39005), True, 'import skfuzzy.control as ctrl\n'), ((39098, 39301), 'skfuzzy.control.Rule', 'ctrl.Rule', ([], {'antecedent': "(mz['nb'] & mx['ze'] | mz['nb'] & mx['ps'] | mz['nb'] & mx['ps'] | mz['pb'] &\n mx['ns'] | mz['pb'] & mx['ze'] | mz['pb'] & mx['ps'])", 'consequent': "krz['pb']", 'label': '"""rule_krz_pb"""'}), "(antecedent=mz['nb'] & mx['ze'] | mz['nb'] & mx['ps'] | mz['nb'] &\n mx['ps'] | mz['pb'] & mx['ns'] | mz['pb'] & mx['ze'] | mz['pb'] & mx[\n 'ps'], consequent=krz['pb'], label='rule_krz_pb')\n", (39107, 39301), True, 'import skfuzzy.control as ctrl\n'), ((39574, 39765), 'skfuzzy.control.Rule', 'ctrl.Rule', ([], {'antecedent': "(mz['ze'] | mz['ns'] | mz['ps'] | mz['nb'] & mx['nb'] | mz['pb'] & mx['pb'] |\n mz['nb'] & mx['pb'] | mz['pb'] & mx['nb'])", 'consequent': "krz['nb']", 'label': '"""rule_krz_nb"""'}), "(antecedent=mz['ze'] | mz['ns'] | mz['ps'] | mz['nb'] & mx['nb'] |\n mz['pb'] & mx['pb'] | mz['nb'] & mx['pb'] | mz['pb'] & mx['nb'],\n consequent=krz['nb'], label='rule_krz_nb')\n", (39583, 39765), True, 'import skfuzzy.control as ctrl\n'), ((40084, 40134), 'skfuzzy.control.ControlSystem', 'ctrl.ControlSystem', ([], {'rules': '[rule_krz_0, rule_krz_1]'}), '(rules=[rule_krz_0, rule_krz_1])\n', (40102, 40134), True, 'import skfuzzy.control as ctrl\n'), ((40153, 40248), 'skfuzzy.control.ControlSystemSimulation', 'ctrl.ControlSystemSimulation', (['system_krz'], {'flush_after_run': '(self.num_mesh * self.num_mesh + 1)'}), '(system_krz, flush_after_run=self.num_mesh *\n self.num_mesh + 1)\n', (40181, 40248), True, 'import skfuzzy.control as ctrl\n'), ((40387, 40427), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n', (40395, 40427), True, 'import numpy as np\n'), ((40435, 40485), 'numpy.array', 'np.array', (['[0.02, 0.02, 0.025, 0.015, 0.015, 0.015]'], {}), '([0.02, 0.02, 0.025, 0.015, 0.015, 0.015])\n', (40443, 40485), True, 'import numpy as np\n'), ((5327, 5381), 'numpy.linspace', 'np.linspace', (['self.low_input[i]', 'self.high_input[i]', '(21)'], {}), '(self.low_input[i], self.high_input[i], 21)\n', (5338, 5381), True, 'import numpy as np\n')] |
### This is inteded to be an easily modifiable VAE parent class for everyone who uses VAE-models
### Based on the pytorch lightning bolts VAE code
### neptune.AI support added (specifiy username and token in input var)
from argparse import ArgumentParser
import pytorch_lightning as pl
import torch
import torch.nn as nn
from torch.nn import functional as F
import neptune
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import numpy as np
from numpy.random import choice
import time
import pickle
import random
from sklearn.metrics import confusion_matrix, f1_score
import pandas as pd
import seaborn as sn
from utils_.tsne import tsne
import copy
import sys
sys.path.append('../..')
from systems.ecgresnet_uncertainty import ECGResNetUncertaintySystem
from systems.Mixture_of_gaussians.latent_probability_models import Mixture_of_Gaussians
from systems.Mixture_of_gaussians.ResNet import ResNet18Dec
from datetime import datetime
class VAE_with_Mixture_of_Gaussians(pl.LightningModule):
"""
VAE with mixture of gaussians for classification and uncertainty estimation.
"""
def __init__(self,**args ):
super(VAE_with_Mixture_of_Gaussians, self).__init__()
print("args is")
print(args)
self.lr = args['learning_rate']
self.beta = args['beta']
self.batch_size = args['batch_size']
self.latent_dim = args['latent_dim']
self.show_figures = args['visualize'] == "True"
self.losses = args['losses']
self.current_batch_idx = 1
args['include_classification'] = False
self.encoder = ECGResNetUncertaintySystem(**args)
if 'pretrained_decoder' in args:
self.decoder = pickle.load(open(args['pretrained_decoder'], 'rb'))
i = 0
for param in self.decoder.parameters():
if (i<5):
param.requires_grad = True
else:
param.requires_grad = False
i += 1
else:
self.decoder = ResNet18Dec(**args)
self.latent_probability_model = Mixture_of_Gaussians(args['latent_dim'], args['encoder_output_dim'])
#self.init_weights([self.encoder, self.decoder, self.latent_probability_model])
if self.show_figures :
self.figure = self.initialize_figure(args['data_dir'])
self.cm_all = torch.zeros(6,6)
self.cm_certain = torch.zeros(6,6)
self.cm_uncertain = torch.zeros(6,6)
self.acc = []
self.uncertain_prediction = []
self.certain_prediction = []
self.predictions_certain = []
self.true_labels_certain = []
self.predictions_all = []
self.true_labels_all = []
self.prediction_uncertainties = []
self.train_acc =0
self.best_mF1 = 0
self.cat_weights = [1,1,1,1,1,1]
self.configure_optimizers()
self.initialize_classifier(args['latent_dim'])
### init neptune for logging
project_name = "balinthompot/VAE-test"
neptune_token_path = "/workspace/ecgnet/api_key.txt"
neptune_test_name = 'ecg-test-'
neptune_token = open(neptune_token_path, "r").read()
neptune.init(project_qualified_name=project_name,
api_token=neptune_token)
neptune.create_experiment(name=neptune_test_name +
datetime.now().strftime("%d/%m/%Y %H:%M:%S"),
params=args)
print("VAE initialized")
def initialize_classifier(self, latent_dim):
self.classification_layer = nn.Sequential(nn.Linear(latent_dim , 6))
def initialize_figure(self, data_dir):
plt.ion()
fig, axs = plt.subplots(8, 4, figsize=(20,30))
if data_dir == "/raw_data/umcu_median":
input_width = 600
else:
input_width = 5000
line = []
for sub_fig in range(4):
for channel in range(0,8):
x = range(0,input_width)
y = choice(range(-500, 500), input_width)
new_line, = axs[channel,sub_fig].plot(x, y, 'r-')
axs[channel,sub_fig].set_title("Channel " + str(channel + 1))
line.append(new_line)
axs[0,0].annotate('Generated', xy=(0.5, 1), xytext=(0, 30),
xycoords='axes fraction', textcoords='offset points',
size='large', ha='center', va='baseline')
axs[0,1].annotate('Reconstructed', xy=(0.5, 1), xytext=(0, 30),
xycoords='axes fraction', textcoords='offset points',
size='large', ha='center', va='baseline')
axs[0,2].annotate('Original', xy=(0.5, 1), xytext=(0, 30),
xycoords='axes fraction', textcoords='offset points',
size='large', ha='center', va='baseline')
axs[0,3].annotate('Difference', xy=(0.5, 1), xytext=(0, 30),
xycoords='axes fraction', textcoords='offset points',
size='large', ha='center', va='baseline')
data = {
'figure':fig,
'axes' : axs,
'lines':line
}
return data
def init_weights(self, models):
for m in models:
for p in m.parameters():
if len(p.data.shape) > 1:
torch.nn.init.xavier_normal(p.data)
else:
torch.nn.init.normal(p.data)
def forward(self, x):
aux,x = self.encoder(x)
latent_params = self.latent_probability_model.get_latent_params(x)
p, q, z = self.sample(latent_params)
return self.decoder(z)
def _run_step(self, x):
aux,x = self.encoder(x)
#print(x)
#print("----")
#print(x.shape)
#print("---")
latent_params = self.latent_probability_model.get_latent_params(x)
#print("++++++")
#print(latent_params)
#print("++++++")
p, q, z = self.sample(latent_params)
#print(y_hat)
#exit()
return z, p, q
def sample(self, latent_params):
p = self.latent_probability_model.p
q = self.latent_probability_model.q(latent_params)
z = self.latent_probability_model.get_z(q)
return p, q, z
def get_expert_features(self,batch):
keys = list(batch.keys())
m = torch.stack(([batch[label] for label in keys[4:]]), 0)
y = m.T
y = y.type(torch.FloatTensor).to(self.device)
y -= exp_feat_means
y /= exp_feat_stds
return y
def get_triage_mask(self, batch):
triage_cats = [0,1,2,3,4,5]
triage_labels = batch['label']
batch_size = batch['waveform'].shape[0]
### replace cat 5 with cat 4, as in the paper
triage_mask = torch.stack([ torch.stack([torch.full([self.latent_dim], self.cat_weights[triage_cat]) if triage_labels[ind] == triage_cat else torch.full([self.latent_dim], 0) for ind in range(batch_size)] , 0) for triage_cat in triage_cats], 0)
return triage_mask.to('cuda')
def step(self, batch, batch_idx):
x = batch['waveform'].float()
#y = self.get_expert_features(batch)
self.batch = x
self.batch_full = batch
z, p, q = self._run_step(x)
triage_mask = self.get_triage_mask(batch)
kl, kl_all = self.latent_probability_model.KL(p,q,z, triage_mask)
#self.beta = (torch.max(recon_loss) / kl).item()
kl *= self.beta
## loss part to represent the expert features in the first dimension
##expert_feature_loss = F.mse_loss(z[:, :15], y) * 100
if torch.isnan(kl).any():
self.on_nan_loss(z,x,x_hat, "kl")
y = batch['label']
#kl_all = torch.reshape(kl_all, (kl_all.shape[1], kl_all.shape[0] * kl_all.shape[2]))
##y_hat = self.classification_layer(z)
##classification_loss = F.cross_entropy(y_hat , y)
##classification_loss = self.classification_loss(y_hat , y, True)
neptune.log_metric('kl_loss', kl.item())
loss = kl ##+ classification_loss
if 'reconstruction' in self.losses:
recon = self.decoder(z)
recon_loss = F.mse_loss(recon, x, reduction='sum')
neptune.log_metric('recon_loss', recon_loss.item())
self.recon = recon
loss += recon_loss
if self.show_figures and batch_idx%20 == 0:
self.visualize_data_point(self.batch[0] , 2)
if 'reconstruction' in self.losses:
self.visualize_data_point(self.recon[0] , 1)
self.visualize_data_point(self.generate_data()[0], 0)
self.visualize_data_point(self.batch[0] - self.recon[0] , 3)
logs = {
"loss": loss,
}
neptune.log_metric('total_loss', loss)
self.current_batch_idx += 1
return loss, logs
def on_nan_loss(self, z, x, x_hat, loss_type):
print(loss_type + " is nan")
print("max of z")
print(torch.max(z))
print("max of x")
print(torch.max(x))
print("max of x_hat")
print(torch.max(x_hat))
print("----")
print("min of z")
print(torch.min(z))
print("min of x")
print(torch.min(x))
print("min of x_hat")
print(torch.min(x_hat))
print("z:")
print(z)
exit()
def training_step(self, batch, batch_idx):
loss, logs = self.step(batch, batch_idx)
self.log_dict({f"train_{k}": v
for k, v in logs.items()},
on_step=True,
on_epoch=False)
##self.calc_data_probability(copy.deepcopy(batch))
return loss
def training_epoch_end(self, training_step_out):
avg_train_loss = torch.mean(
torch.Tensor([o['loss'] for o in training_step_out]))
print('average epoch loss - train: ' + str(avg_train_loss))
def validation_step(self, batch, batch_idx):
###self.calc_data_probability(batch)
x = batch['waveform'].float()
z, p, q = self._run_step(x)
self.calc_data_probability(batch)
return 0
def validation_epoch_end(self, validation_step_out):
self.ranked_evaluation()
self.visualize_confusion_matrices()
self.cm_all = torch.zeros(6,6)
self.cm_certain = torch.zeros(6,6)
self.cm_uncertain = torch.zeros(6,6)
self.acc = []
self.uncertain_prediction = []
self.certain_prediction = []
self.predictions_certain = []
self.true_labels_certain = []
self.train_acc = 0
self.current_batch_idx = 0
self.prediction_uncertainties = []
self.predictions_all = []
self.true_labels_all = []
def test_step(self, batch, batch_idx):
x = batch['waveform'].float()
z, p, q = self._run_step(x)
logs = {
"test_accuracy": 0, ###TODO
}
self.calc_data_probability(batch)
return logs
##TODO
def test_epoch_end(self, test_out):
self.ranked_evaluation()
self.visualize_confusion_matrices()
self.cm_all = torch.zeros(6,6)
self.cm_certain = torch.zeros(6,6)
self.cm_uncertain = torch.zeros(6,6)
self.acc = []
self.uncertain_prediction = []
self.certain_prediction = []
self.predictions_certain = []
self.true_labels_certain = []
self.prediction_uncertainties = []
self.predictions_all = []
self.true_labels_all = []
self.train_acc = 0
filename = './VAE-base/checkpoints/pretrained/pretrained_encoder.sav'
#pickle.dump(self.encoder, open(filename, 'wb'))
'''
avg_test_acc = torch.mean(
torch.Tensor([o['test_accuracy'] for o in test_out]))
print('average test accuracy: ' + str(avg_test_acc))
'''
def unfreeze_subnet(self,net):
for param in net.parameters():
param.requires_grad = True
def freeze_subnet(self, net):
for param in net.parameters():
param.requires_grad = False
def configure_optimizers(self):
### if this is a new net, we freeze the enc/dec for the first few iteration
##self.freeze_subnet(self.encoder)
#self.freeze_subnet(self.decoder)
return torch.optim.Adam(self.parameters(), lr=self.lr)
### TODO ############################
def generate_data(self,z_original = None, number_of_samples=1):
if z_original == None:
z = self.latent_probability_model.sample_prior(number_of_samples)
else:
mult = torch.ones(z_original[0].shape).to('cuda')
mult[4] = -1
z = z_original[0] * mult
z = torch.unsqueeze(z, 0)
return self.decoder(z)
def visualize_data_point(self, data_point, figure_axis):
data_point = data_point.cpu().detach().numpy()
for channelInd in range(data_point.shape[0]):
channel_data = data_point[channelInd]
self.figure['axes'][channelInd, figure_axis].set_ylim(np.min(channel_data), np.max(channel_data))
self.figure['lines'][channelInd + 8 *figure_axis].set_ydata(channel_data)
self.figure['figure'].canvas.draw()
self.figure['figure'].canvas.flush_events()
def calc_confusion_matrix(self, true_labels, predicted_labels):
cm = np.zeros((6,6))
for ind in range(true_labels.shape[0]):
cm[true_labels[ind]][predicted_labels[ind]] += 1
return cm
def calibration_evaluation(self):
self.prediction_uncertainties = np.asarray(self.prediction_uncertainties)
self.predictions_all = np.asarray(self.predictions_all)
self.true_labels_all = np.asarray(self.true_labels_all)
prediction_order_by_uncertainty = self.prediction_uncertainties.argsort()
ordered_predictions = self.predictions_all[prediction_order_by_uncertainty]
ordered_labels = self.true_labels_all[prediction_order_by_uncertainty]
bin_size = 1
cutoff_point = 1
accuracies = []
fraction = []
while cutoff_point < np.size(ordered_predictions):
a = ordered_predictions[:cutoff_point] == ordered_labels[:cutoff_point]
accuracies.append(np.sum(a) / len(a))
fraction.append(cutoff_point / np.size(ordered_predictions))
cutoff_point += bin_size
fig = plt.figure(5)
plt.xlabel("Fraction of included data points")
plt.ylabel("Accuracy")
plt.plot(fraction, accuracies)
plt.show()
def ranked_evaluation(self):
self.prediction_uncertainties = np.asarray(self.prediction_uncertainties)
self.predictions_all = np.asarray(self.predictions_all)
self.true_labels_all = np.asarray(self.true_labels_all)
prediction_order_by_uncertainty = self.prediction_uncertainties.argsort()
ordered_predictions = self.predictions_all[prediction_order_by_uncertainty]
ordered_labels = self.true_labels_all[prediction_order_by_uncertainty]
twentyfive_p = False
fifty_p = False
seventyfive_p = False
bin_size = 10
cutoff_point = bin_size
accuracies = []
binned_acc = []
fraction = []
while cutoff_point < np.size(ordered_predictions):
a = ordered_predictions[:cutoff_point] == ordered_labels[:cutoff_point]
a_binned= ordered_predictions[cutoff_point - bin_size:cutoff_point] == ordered_labels[cutoff_point-bin_size:cutoff_point]
ac = np.sum(a) / len(a)
ac_binned = np.sum(a_binned) / len(a_binned)
accuracies.append(ac)
binned_acc.append(ac_binned)
f = cutoff_point / np.size(ordered_predictions)
fraction.append(f)
if f >= 0.25 and not twentyfive_p:
print("acc at 75 percent removed: " + str(ac))
twentyfive_p = True
if f >= 0.5 and not fifty_p:
print("acc at 50 percent removed: " + str(ac))
fifty_p = True
if f >= 0.75 and not seventyfive_p:
print("acc at 25 percent removed: " + str(ac))
seventyfive_p = True
cutoff_point += bin_size
fig, axs = plt.subplots(1, 2, figsize=(20,30))
axs[0].plot(fraction, accuracies)
axs[ 0].set_title('Cumulative')
axs[0].set_xlabel("Fraction of included data points")
axs[0].set_ylabel("Accuracy")
axs[1].plot(fraction, binned_acc)
axs[1].set_title('Binned')
axs[1].set_xlabel("Bin")
axs[1].set_ylabel("Accuracy")
plt.show()
plt.savefig("ranked_eval.png")
def visualize_confusion_matrices(self):
triage_cat_names = ['Normal', 'Not acute', 'Subacute', 'Acute-arrythmia', 'Acute-ischaemia', 'Normal-pacemaker']
self.acc = torch.cat(self.acc)
self.uncertain_prediction = torch.cat(self.uncertain_prediction)
self.certain_prediction = torch.cat(self.certain_prediction)
print("train accuracy: ")
print(self.train_acc / self.current_batch_idx)
print("prediction accuracy: ")
print(torch.sum(self.acc) / self.acc.shape[0])
print("accuracy on certain prediction - " + str(torch.sum(self.certain_prediction)))
print(torch.sum(self.acc * self.certain_prediction) / torch.sum(self.certain_prediction))
print("accuracy on uncertain prediction - " + str(torch.sum(self.uncertain_prediction)))
print(torch.sum(self.acc * self.uncertain_prediction) / torch.sum(self.uncertain_prediction))
cm_all_df = pd.DataFrame(self.cm_all, index = triage_cat_names,
columns = triage_cat_names)
cm_all_df = cm_all_df[cm_all_df.columns].astype(float)
cm_certain_df = pd.DataFrame(self.cm_certain, index = triage_cat_names,
columns = triage_cat_names)
cm_certain_df = cm_certain_df[cm_certain_df.columns].astype(float)
cm_uncertain_df = pd.DataFrame(self.cm_uncertain, index = triage_cat_names,
columns = triage_cat_names)
cm_uncertain_df = cm_uncertain_df[cm_uncertain_df.columns].astype(float)
fig, ax = plt.subplots(1,3, figsize=(20,30))
fig.suptitle('Confusion matrices')
ax[0].set_title("All predictions (" + str(self.acc.shape[0]) + ")")
ax[1].set_title("Certain predictions (" + str(torch.sum(self.certain_prediction).item()) + ")")
ax[2].set_title("Uncertain predictions (" + str( torch.sum(self.uncertain_prediction).item()) + ")")
sn.heatmap(cm_all_df, annot=True, ax = ax[0])
sn.heatmap(cm_certain_df, annot=True, ax = ax[1])
sn.heatmap(cm_uncertain_df, annot=True, ax = ax[2])
print("Macro F1 score")
mF1 = f1_score(self.true_labels_certain, self.predictions_certain, average='macro')
print(mF1)
F1_class = f1_score(self.true_labels_certain, self.predictions_certain, average=None)
print("F1 score per class")
print(F1_class)
'''
for class_ind in range(len(F1_class)):
self.cat_weights[class_ind] = 1-F1_class[class_ind]
'''
if self.best_mF1 < mF1:
self.best_mF1 = mF1
#print("saving model")
#filename = '/workspace/uncertainty/project/systems/Mixture_of_gaussians/checkpoints/pretrained_full.sav'
#pickle.dump(self, open(filename, 'wb'))
print("Weighted F1 score")
print(f1_score(self.true_labels_certain, self.predictions_certain, average='weighted'))
print("showing")
plt.savefig("test_res.png")
plt.show()
def calc_data_probability(self, batch):
inp = batch['waveform']
###inp += ((torch.rand(inp.shape) - 0.5) * 1000).to('cuda')
aux,x = self.encoder(inp)
triage_labels = batch['label']
latent_params = self.latent_probability_model.get_latent_params(x)
p = self.latent_probability_model.p
q = self.latent_probability_model.q(latent_params)
mc_sample_num = 1024
samples = q.sample([mc_sample_num])
triage_cats = [0,1,2,3,4, 5]
#class_prior_probs = torch.Tensor([0.5, 0.5, 0.2, 0.1])
triage_cat_names = ['Normal', 'Not acute', 'Subacute', 'Acute']
probs_prior = torch.stack([p[triage_cat].log_prob(samples) for triage_cat in triage_cats],0)
latent_weights = q.log_prob(samples)
probs_weighted = probs_prior + latent_weights
probs = torch.sum(probs_prior + latent_weights ,(1,3))
full_prob_mass = torch.logsumexp((probs), 0)
full_prob_mass /= torch.min(full_prob_mass)
#probs = self.classification_layer(x)
probs_std = torch.std(probs_prior + latent_weights ,(1,3))
#probs_entropy = - torch.sum(torch.exp(probs_weighted) * probs_weighted ,(1,3))
#print(probs_entropy)
'''
cat_id = 1
idx_of_cat = triage_labels == cat_id
print("cat probs:")
print(torch.mean(probs[cat_id][idx_of_cat]))
print(torch.mean(probs[cat_id][torch.logical_not(idx_of_cat)]))
print("cat probs std:")
print(torch.mean(probs_std[cat_id][idx_of_cat]))
print(torch.mean(probs_std[cat_id][torch.logical_not(idx_of_cat)]))
#exit()
'''
'''
normal_prob_thresh = -1000000000
not_acute_prob_thresh = -90000
subacute_prob_thresh = -120000
arrythmia_thresh = -200000
isch_thresh = -200000
pacemaker_thresh = -100000
category_thresholds = [normal_prob_thresh, not_acute_prob_thresh, subacute_prob_thresh, arrythmia_thresh, isch_thresh, pacemaker_thresh]
category_order = [5,4,3,2,1,0]
max_probs = []
for index in range(probs.shape[1]):
for i in category_order:
if probs[i][index] > category_thresholds[i]:
max_probs.append(i)
break
max_probs = torch.LongTensor(max_probs).to('cuda')
'''
##probs = probs * torch.Tensor([[0.5], [0.4], [0.3], [0.2]]).to('cuda')
max_prob_vals,max_probs = torch.max(probs, 0)
##max_probs = torch.min(probs_std, 0)[1] + 1
##prob_vals = torch.max(probs, 0)[0]
##std_of_predicted = torch.stack([probs_std[triage_labels[instance] - 1][instance] for instance in range(triage_labels.shape[1])])
##std_of_predicted = torch.stack([probs_std[triage_labels[instance] - 1][instance] for instance in range(triage_labels.shape[1])])
std_of_predicted = torch.stack([probs_std[max_probs[ind]][ind] for ind in range(len(max_probs))])
std_of_predicted /= torch.max(std_of_predicted)
#print(full_prob_mass)
top2_std = torch.topk(probs_std, 2,0, largest=False)[0]
top2_diff = top2_std[1] - top2_std[0]
top2_prob = torch.topk(probs, 2,0, largest=False)[0]
top2_diff_prob = top2_prob[1] - top2_prob[0]
top2_diff_prob /= torch.max(top2_diff_prob)
top2_diff_prob = 1- top2_diff_prob
#print(top2_diff_prob)
#std_of_predicted = (full_prob_mass * std_of_predicted) / (full_prob_mass + std_of_predicted)
#print(full_prob_mass)
#print(std_of_predicted)
#print(top2_diff_prob)
#print(full_prob_mass * std_of_predicted)
std_of_predicted += full_prob_mass
#std_of_predicted *= top2_diff_prob
#print(std_of_predicted)
##std_of_predicted = torch.squeeze(std_of_predicted)
##std_of_predicted = torch.std(probs_std ,(0))
acc = max_probs == triage_labels
'''
fig, axs = plt.subplots(3, 2, figsize=(20,30))
axs[0, 0].scatter(probs[0].cpu(), probs_std[0].cpu(), c=(triage_labels == 0).cpu(), alpha=0.5)
axs[0, 0].set_title('Normal')
axs[0, 1].scatter(probs[1].cpu(), probs_std[1].cpu(), c=(triage_labels == 1).cpu(), alpha=0.5)
axs[0, 1].set_title('Not acute')
axs[1, 0].scatter(probs[2].cpu(), probs_std[2].cpu(), c=(triage_labels == 2).cpu(), alpha=0.5)
axs[1, 0].set_title('Subacute')
axs[1, 1].scatter(probs[3].cpu(), probs_std[3].cpu(), c=(triage_labels == 3).cpu(), alpha=0.5)
axs[1, 1].set_title('Arrythmia')
axs[2, 0].scatter(probs[4].cpu(), probs_std[4].cpu(), c=(triage_labels == 4).cpu(), alpha=0.5)
axs[2, 0].set_title('Isch')
axs[2, 1].scatter(probs[5].cpu(), probs_std[5].cpu(), c=(triage_labels == 5).cpu(), alpha=0.5)
axs[2, 1].set_title('Pacemaker')
plt.show()
'''
self.acc.append(acc)
std_thresh = [1.6,1.6,1.6,1.6,1.6,1.6]
#low_std = [std_of_predicted[i] < std_thresh[max_probs[i]] for i in range(max_probs.shape[0])]
#low_std = torch.BoolTensor(low_std).to('cuda')
low_std = (std_of_predicted < 1.8)
#stds_apart = top2_diff > 0.7
#entropy_of_predicted = torch.stack([probs_entropy[max_probs[ind]][ind] for ind in range(len(max_probs))])
#print(entropy_of_predicted)
#low_entropy = entropy_of_predicted < 2040
certain_prediction = low_std #* stds_apart
'''
tsne(p, samples[:,0,:], mc_sample_num, self.latent_dim )
tsne(p, samples[:,1,:], mc_sample_num, self.latent_dim )
tsne(p, samples[:,2,:], mc_sample_num, self.latent_dim )
tsne(p, samples[:,3,:], mc_sample_num, self.latent_dim )
'''
uncertain_prediction = certain_prediction == False
self.certain_prediction.append(certain_prediction)
self.uncertain_prediction.append(uncertain_prediction)
certainly_predicted_labes = max_probs[certain_prediction]
uncertainly_predicted_labes = max_probs[uncertain_prediction]
certainly_predicted_labes_true = triage_labels[certain_prediction]
uncertainly_predicted_labes_true = triage_labels[uncertain_prediction]
### store to calculate F1 scores on certain preds, and ranked eval on all
self.predictions_certain.extend(certainly_predicted_labes.cpu().detach().numpy())
self.true_labels_certain.extend(certainly_predicted_labes_true.cpu().detach().numpy())
self.predictions_all.extend(max_probs.cpu().detach().numpy())
self.true_labels_all.extend(triage_labels.cpu().detach().numpy())
self.prediction_uncertainties.extend(std_of_predicted.cpu().detach().numpy())
### confusion matrices
cm_all = self.calc_confusion_matrix(triage_labels.cpu(), max_probs.cpu())
self.cm_all += cm_all
cm_certain = self.calc_confusion_matrix(certainly_predicted_labes_true.cpu(), certainly_predicted_labes.cpu())
self.cm_certain += cm_certain
cm_uncertain = self.calc_confusion_matrix(uncertainly_predicted_labes_true.cpu(), uncertainly_predicted_labes.cpu())
self.cm_uncertain += cm_uncertain
def save_model(self):
pass
########################################
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument('--model_name', type=str, default='VAE_mixture')
parser.add_argument('--ensembling_method', type=bool, default=False)
return parser
| [
"seaborn.heatmap",
"argparse.ArgumentParser",
"systems.Mixture_of_gaussians.ResNet.ResNet18Dec",
"numpy.sum",
"torch.cat",
"torch.full",
"matplotlib.pyplot.figure",
"sklearn.metrics.f1_score",
"torch.std",
"torch.isnan",
"sys.path.append",
"neptune.init",
"pandas.DataFrame",
"torch.nn.init... | [((687, 711), 'sys.path.append', 'sys.path.append', (['"""../.."""'], {}), "('../..')\n", (702, 711), False, 'import sys\n'), ((1628, 1662), 'systems.ecgresnet_uncertainty.ECGResNetUncertaintySystem', 'ECGResNetUncertaintySystem', ([], {}), '(**args)\n', (1654, 1662), False, 'from systems.ecgresnet_uncertainty import ECGResNetUncertaintySystem\n'), ((2134, 2202), 'systems.Mixture_of_gaussians.latent_probability_models.Mixture_of_Gaussians', 'Mixture_of_Gaussians', (["args['latent_dim']", "args['encoder_output_dim']"], {}), "(args['latent_dim'], args['encoder_output_dim'])\n", (2154, 2202), False, 'from systems.Mixture_of_gaussians.latent_probability_models import Mixture_of_Gaussians\n'), ((2414, 2431), 'torch.zeros', 'torch.zeros', (['(6)', '(6)'], {}), '(6, 6)\n', (2425, 2431), False, 'import torch\n'), ((2457, 2474), 'torch.zeros', 'torch.zeros', (['(6)', '(6)'], {}), '(6, 6)\n', (2468, 2474), False, 'import torch\n'), ((2502, 2519), 'torch.zeros', 'torch.zeros', (['(6)', '(6)'], {}), '(6, 6)\n', (2513, 2519), False, 'import torch\n'), ((3253, 3327), 'neptune.init', 'neptune.init', ([], {'project_qualified_name': 'project_name', 'api_token': 'neptune_token'}), '(project_qualified_name=project_name, api_token=neptune_token)\n', (3265, 3327), False, 'import neptune\n'), ((3750, 3759), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (3757, 3759), True, 'import matplotlib.pyplot as plt\n'), ((3784, 3820), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(8)', '(4)'], {'figsize': '(20, 30)'}), '(8, 4, figsize=(20, 30))\n', (3796, 3820), True, 'import matplotlib.pyplot as plt\n'), ((6573, 6625), 'torch.stack', 'torch.stack', (['[batch[label] for label in keys[4:]]', '(0)'], {}), '([batch[label] for label in keys[4:]], 0)\n', (6584, 6625), False, 'import torch\n'), ((9076, 9114), 'neptune.log_metric', 'neptune.log_metric', (['"""total_loss"""', 'loss'], {}), "('total_loss', loss)\n", (9094, 9114), False, 'import neptune\n'), ((10723, 10740), 'torch.zeros', 'torch.zeros', (['(6)', '(6)'], {}), '(6, 6)\n', (10734, 10740), False, 'import torch\n'), ((10767, 10784), 'torch.zeros', 'torch.zeros', (['(6)', '(6)'], {}), '(6, 6)\n', (10778, 10784), False, 'import torch\n'), ((10813, 10830), 'torch.zeros', 'torch.zeros', (['(6)', '(6)'], {}), '(6, 6)\n', (10824, 10830), False, 'import torch\n'), ((11591, 11608), 'torch.zeros', 'torch.zeros', (['(6)', '(6)'], {}), '(6, 6)\n', (11602, 11608), False, 'import torch\n'), ((11635, 11652), 'torch.zeros', 'torch.zeros', (['(6)', '(6)'], {}), '(6, 6)\n', (11646, 11652), False, 'import torch\n'), ((11681, 11698), 'torch.zeros', 'torch.zeros', (['(6)', '(6)'], {}), '(6, 6)\n', (11692, 11698), False, 'import torch\n'), ((13876, 13892), 'numpy.zeros', 'np.zeros', (['(6, 6)'], {}), '((6, 6))\n', (13884, 13892), True, 'import numpy as np\n'), ((14099, 14140), 'numpy.asarray', 'np.asarray', (['self.prediction_uncertainties'], {}), '(self.prediction_uncertainties)\n', (14109, 14140), True, 'import numpy as np\n'), ((14172, 14204), 'numpy.asarray', 'np.asarray', (['self.predictions_all'], {}), '(self.predictions_all)\n', (14182, 14204), True, 'import numpy as np\n'), ((14236, 14268), 'numpy.asarray', 'np.asarray', (['self.true_labels_all'], {}), '(self.true_labels_all)\n', (14246, 14268), True, 'import numpy as np\n'), ((14933, 14946), 'matplotlib.pyplot.figure', 'plt.figure', (['(5)'], {}), '(5)\n', (14943, 14946), True, 'import matplotlib.pyplot as plt\n'), ((14955, 15001), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Fraction of included data points"""'], {}), "('Fraction of included data points')\n", (14965, 15001), True, 'import matplotlib.pyplot as plt\n'), ((15010, 15032), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (15020, 15032), True, 'import matplotlib.pyplot as plt\n'), ((15042, 15072), 'matplotlib.pyplot.plot', 'plt.plot', (['fraction', 'accuracies'], {}), '(fraction, accuracies)\n', (15050, 15072), True, 'import matplotlib.pyplot as plt\n'), ((15081, 15091), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15089, 15091), True, 'import matplotlib.pyplot as plt\n'), ((15167, 15208), 'numpy.asarray', 'np.asarray', (['self.prediction_uncertainties'], {}), '(self.prediction_uncertainties)\n', (15177, 15208), True, 'import numpy as np\n'), ((15240, 15272), 'numpy.asarray', 'np.asarray', (['self.predictions_all'], {}), '(self.predictions_all)\n', (15250, 15272), True, 'import numpy as np\n'), ((15304, 15336), 'numpy.asarray', 'np.asarray', (['self.true_labels_all'], {}), '(self.true_labels_all)\n', (15314, 15336), True, 'import numpy as np\n'), ((16833, 16869), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(20, 30)'}), '(1, 2, figsize=(20, 30))\n', (16845, 16869), True, 'import matplotlib.pyplot as plt\n'), ((17214, 17224), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17222, 17224), True, 'import matplotlib.pyplot as plt\n'), ((17234, 17264), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""ranked_eval.png"""'], {}), "('ranked_eval.png')\n", (17245, 17264), True, 'import matplotlib.pyplot as plt\n'), ((17462, 17481), 'torch.cat', 'torch.cat', (['self.acc'], {}), '(self.acc)\n', (17471, 17481), False, 'import torch\n'), ((17518, 17554), 'torch.cat', 'torch.cat', (['self.uncertain_prediction'], {}), '(self.uncertain_prediction)\n', (17527, 17554), False, 'import torch\n'), ((17589, 17623), 'torch.cat', 'torch.cat', (['self.certain_prediction'], {}), '(self.certain_prediction)\n', (17598, 17623), False, 'import torch\n'), ((18227, 18302), 'pandas.DataFrame', 'pd.DataFrame', (['self.cm_all'], {'index': 'triage_cat_names', 'columns': 'triage_cat_names'}), '(self.cm_all, index=triage_cat_names, columns=triage_cat_names)\n', (18239, 18302), True, 'import pandas as pd\n'), ((18413, 18492), 'pandas.DataFrame', 'pd.DataFrame', (['self.cm_certain'], {'index': 'triage_cat_names', 'columns': 'triage_cat_names'}), '(self.cm_certain, index=triage_cat_names, columns=triage_cat_names)\n', (18425, 18492), True, 'import pandas as pd\n'), ((18617, 18703), 'pandas.DataFrame', 'pd.DataFrame', (['self.cm_uncertain'], {'index': 'triage_cat_names', 'columns': 'triage_cat_names'}), '(self.cm_uncertain, index=triage_cat_names, columns=\n triage_cat_names)\n', (18629, 18703), True, 'import pandas as pd\n'), ((18822, 18858), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(20, 30)'}), '(1, 3, figsize=(20, 30))\n', (18834, 18858), True, 'import matplotlib.pyplot as plt\n'), ((19201, 19244), 'seaborn.heatmap', 'sn.heatmap', (['cm_all_df'], {'annot': '(True)', 'ax': 'ax[0]'}), '(cm_all_df, annot=True, ax=ax[0])\n', (19211, 19244), True, 'import seaborn as sn\n'), ((19255, 19302), 'seaborn.heatmap', 'sn.heatmap', (['cm_certain_df'], {'annot': '(True)', 'ax': 'ax[1]'}), '(cm_certain_df, annot=True, ax=ax[1])\n', (19265, 19302), True, 'import seaborn as sn\n'), ((19313, 19362), 'seaborn.heatmap', 'sn.heatmap', (['cm_uncertain_df'], {'annot': '(True)', 'ax': 'ax[2]'}), '(cm_uncertain_df, annot=True, ax=ax[2])\n', (19323, 19362), True, 'import seaborn as sn\n'), ((19413, 19490), 'sklearn.metrics.f1_score', 'f1_score', (['self.true_labels_certain', 'self.predictions_certain'], {'average': '"""macro"""'}), "(self.true_labels_certain, self.predictions_certain, average='macro')\n", (19421, 19490), False, 'from sklearn.metrics import confusion_matrix, f1_score\n'), ((19529, 19603), 'sklearn.metrics.f1_score', 'f1_score', (['self.true_labels_certain', 'self.predictions_certain'], {'average': 'None'}), '(self.true_labels_certain, self.predictions_certain, average=None)\n', (19537, 19603), False, 'from sklearn.metrics import confusion_matrix, f1_score\n'), ((20235, 20262), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""test_res.png"""'], {}), "('test_res.png')\n", (20246, 20262), True, 'import matplotlib.pyplot as plt\n'), ((20271, 20281), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (20279, 20281), True, 'import matplotlib.pyplot as plt\n'), ((21182, 21229), 'torch.sum', 'torch.sum', (['(probs_prior + latent_weights)', '(1, 3)'], {}), '(probs_prior + latent_weights, (1, 3))\n', (21191, 21229), False, 'import torch\n'), ((21255, 21280), 'torch.logsumexp', 'torch.logsumexp', (['probs', '(0)'], {}), '(probs, 0)\n', (21270, 21280), False, 'import torch\n'), ((21310, 21335), 'torch.min', 'torch.min', (['full_prob_mass'], {}), '(full_prob_mass)\n', (21319, 21335), False, 'import torch\n'), ((21402, 21449), 'torch.std', 'torch.std', (['(probs_prior + latent_weights)', '(1, 3)'], {}), '(probs_prior + latent_weights, (1, 3))\n', (21411, 21449), False, 'import torch\n'), ((22846, 22865), 'torch.max', 'torch.max', (['probs', '(0)'], {}), '(probs, 0)\n', (22855, 22865), False, 'import torch\n'), ((23381, 23408), 'torch.max', 'torch.max', (['std_of_predicted'], {}), '(std_of_predicted)\n', (23390, 23408), False, 'import torch\n'), ((23690, 23715), 'torch.max', 'torch.max', (['top2_diff_prob'], {}), '(top2_diff_prob)\n', (23699, 23715), False, 'import torch\n'), ((27796, 27851), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'parents': '[parent_parser]', 'add_help': '(False)'}), '(parents=[parent_parser], add_help=False)\n', (27810, 27851), False, 'from argparse import ArgumentParser\n'), ((2074, 2093), 'systems.Mixture_of_gaussians.ResNet.ResNet18Dec', 'ResNet18Dec', ([], {}), '(**args)\n', (2085, 2093), False, 'from systems.Mixture_of_gaussians.ResNet import ResNet18Dec\n'), ((3666, 3690), 'torch.nn.Linear', 'nn.Linear', (['latent_dim', '(6)'], {}), '(latent_dim, 6)\n', (3675, 3690), True, 'import torch.nn as nn\n'), ((8469, 8506), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['recon', 'x'], {'reduction': '"""sum"""'}), "(recon, x, reduction='sum')\n", (8479, 8506), True, 'from torch.nn import functional as F\n'), ((9328, 9340), 'torch.max', 'torch.max', (['z'], {}), '(z)\n', (9337, 9340), False, 'import torch\n'), ((9390, 9402), 'torch.max', 'torch.max', (['x'], {}), '(x)\n', (9399, 9402), False, 'import torch\n'), ((9456, 9472), 'torch.max', 'torch.max', (['x_hat'], {}), '(x_hat)\n', (9465, 9472), False, 'import torch\n'), ((9548, 9560), 'torch.min', 'torch.min', (['z'], {}), '(z)\n', (9557, 9560), False, 'import torch\n'), ((9610, 9622), 'torch.min', 'torch.min', (['x'], {}), '(x)\n', (9619, 9622), False, 'import torch\n'), ((9676, 9692), 'torch.min', 'torch.min', (['x_hat'], {}), '(x_hat)\n', (9685, 9692), False, 'import torch\n'), ((10211, 10263), 'torch.Tensor', 'torch.Tensor', (["[o['loss'] for o in training_step_out]"], {}), "([o['loss'] for o in training_step_out])\n", (10223, 10263), False, 'import torch\n'), ((13226, 13247), 'torch.unsqueeze', 'torch.unsqueeze', (['z', '(0)'], {}), '(z, 0)\n', (13241, 13247), False, 'import torch\n'), ((14645, 14673), 'numpy.size', 'np.size', (['ordered_predictions'], {}), '(ordered_predictions)\n', (14652, 14673), True, 'import numpy as np\n'), ((15822, 15850), 'numpy.size', 'np.size', (['ordered_predictions'], {}), '(ordered_predictions)\n', (15829, 15850), True, 'import numpy as np\n'), ((20119, 20204), 'sklearn.metrics.f1_score', 'f1_score', (['self.true_labels_certain', 'self.predictions_certain'], {'average': '"""weighted"""'}), "(self.true_labels_certain, self.predictions_certain, average='weighted'\n )\n", (20127, 20204), False, 'from sklearn.metrics import confusion_matrix, f1_score\n'), ((23459, 23501), 'torch.topk', 'torch.topk', (['probs_std', '(2)', '(0)'], {'largest': '(False)'}), '(probs_std, 2, 0, largest=False)\n', (23469, 23501), False, 'import torch\n'), ((23570, 23608), 'torch.topk', 'torch.topk', (['probs', '(2)', '(0)'], {'largest': '(False)'}), '(probs, 2, 0, largest=False)\n', (23580, 23608), False, 'import torch\n'), ((7899, 7914), 'torch.isnan', 'torch.isnan', (['kl'], {}), '(kl)\n', (7910, 7914), False, 'import torch\n'), ((13568, 13588), 'numpy.min', 'np.min', (['channel_data'], {}), '(channel_data)\n', (13574, 13588), True, 'import numpy as np\n'), ((13590, 13610), 'numpy.max', 'np.max', (['channel_data'], {}), '(channel_data)\n', (13596, 13610), True, 'import numpy as np\n'), ((16087, 16096), 'numpy.sum', 'np.sum', (['a'], {}), '(a)\n', (16093, 16096), True, 'import numpy as np\n'), ((16130, 16146), 'numpy.sum', 'np.sum', (['a_binned'], {}), '(a_binned)\n', (16136, 16146), True, 'import numpy as np\n'), ((16269, 16297), 'numpy.size', 'np.size', (['ordered_predictions'], {}), '(ordered_predictions)\n', (16276, 16297), True, 'import numpy as np\n'), ((17766, 17785), 'torch.sum', 'torch.sum', (['self.acc'], {}), '(self.acc)\n', (17775, 17785), False, 'import torch\n'), ((17914, 17959), 'torch.sum', 'torch.sum', (['(self.acc * self.certain_prediction)'], {}), '(self.acc * self.certain_prediction)\n', (17923, 17959), False, 'import torch\n'), ((17962, 17996), 'torch.sum', 'torch.sum', (['self.certain_prediction'], {}), '(self.certain_prediction)\n', (17971, 17996), False, 'import torch\n'), ((18109, 18156), 'torch.sum', 'torch.sum', (['(self.acc * self.uncertain_prediction)'], {}), '(self.acc * self.uncertain_prediction)\n', (18118, 18156), False, 'import torch\n'), ((18159, 18195), 'torch.sum', 'torch.sum', (['self.uncertain_prediction'], {}), '(self.uncertain_prediction)\n', (18168, 18195), False, 'import torch\n'), ((5526, 5561), 'torch.nn.init.xavier_normal', 'torch.nn.init.xavier_normal', (['p.data'], {}), '(p.data)\n', (5553, 5561), False, 'import torch\n'), ((5604, 5632), 'torch.nn.init.normal', 'torch.nn.init.normal', (['p.data'], {}), '(p.data)\n', (5624, 5632), False, 'import torch\n'), ((13105, 13136), 'torch.ones', 'torch.ones', (['z_original[0].shape'], {}), '(z_original[0].shape)\n', (13115, 13136), False, 'import torch\n'), ((14789, 14798), 'numpy.sum', 'np.sum', (['a'], {}), '(a)\n', (14795, 14798), True, 'import numpy as np\n'), ((14852, 14880), 'numpy.size', 'np.size', (['ordered_predictions'], {}), '(ordered_predictions)\n', (14859, 14880), True, 'import numpy as np\n'), ((17863, 17897), 'torch.sum', 'torch.sum', (['self.certain_prediction'], {}), '(self.certain_prediction)\n', (17872, 17897), False, 'import torch\n'), ((18056, 18092), 'torch.sum', 'torch.sum', (['self.uncertain_prediction'], {}), '(self.uncertain_prediction)\n', (18065, 18092), False, 'import torch\n'), ((3440, 3454), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3452, 3454), False, 'from datetime import datetime\n'), ((7047, 7106), 'torch.full', 'torch.full', (['[self.latent_dim]', 'self.cat_weights[triage_cat]'], {}), '([self.latent_dim], self.cat_weights[triage_cat])\n', (7057, 7106), False, 'import torch\n'), ((7148, 7180), 'torch.full', 'torch.full', (['[self.latent_dim]', '(0)'], {}), '([self.latent_dim], 0)\n', (7158, 7180), False, 'import torch\n'), ((19032, 19066), 'torch.sum', 'torch.sum', (['self.certain_prediction'], {}), '(self.certain_prediction)\n', (19041, 19066), False, 'import torch\n'), ((19139, 19175), 'torch.sum', 'torch.sum', (['self.uncertain_prediction'], {}), '(self.uncertain_prediction)\n', (19148, 19175), False, 'import torch\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2017, AGB & GC
# Full license can be found in License.md
# ----------------------------------------------------------------------------
""" Functions that specify the boundary location as a function of MLT
Functions
---------
circular(mlt, [r_add])
Return a circular boundary correction for a specified offset
elliptical(mlt, [instrument, method])
Return the ellptical boundary correction for a data set and method
harmonic(mlt, [instrument, method])
Return the harmonic boundary correction for a data set and method
References
----------
<NAME>. et al.: AMPERE Polar Cap Boundaries, Ann. Geophys., 38, 481-490,
doi:10.5194/angeo-38-481-2020, 2020.
"""
import numpy as np
from ocbpy.ocb_time import hr2rad
def circular(mlt, r_add=0.0):
"""Return a circular boundary correction
Parameters
----------
mlt : (float or array-like)
Magnetic local time in hours (not actually used)
r_add : (float)
Offset added to default radius in degrees. Positive values shift the
boundary equatorward, whilst negative values shift the boundary
poleward. (default=0.0)
Returns
-------
r_corr : (float or array-like)
Radius correction in degrees at this MLT
"""
mlt = np.asarray(mlt)
r_corr = np.full(shape=mlt.shape, fill_value=r_add)
return r_corr
def elliptical(mlt, instrument='ampere', method='median'):
""" Return the results of an elliptical correction to the data boundary
Parameters
----------
mlt : (float or array-like)
Magnetic local time in hours
instrument : (str)
Data set's instrument name (default='ampere')
method : (str)
Method used to calculate the elliptical correction, accepts
'median' or 'gaussian'. (default='median')
Returns
-------
r_corr : (float or array-like)
Radius correction in degrees at this MLT
References
----------
<NAME>. et al.: AMPERE Polar Cap Boundaries, Ann. Geophys., 38,
481-490, doi:10.5194/angeo-38-481-2020, 2020.
"""
if instrument.lower() != 'ampere':
raise ValueError("no elliptical correction for {:}".format(instrument))
method = method.lower()
coeff = {"median": {"a": 4.01, "e": 0.55, "t": -0.92},
"gaussian": {"a": 4.41, "e": 0.51, "t": -0.95}}
if method not in coeff.keys():
raise ValueError("unknown coefficient computation method")
mlt_rad = hr2rad(mlt)
r_corr = (coeff[method]["a"] * (1.0-coeff[method]["e"]**2) /
(1.0 + coeff[method]["e"]*np.cos(mlt_rad-coeff[method]["t"])))
# Because this is a poleward correction, return the negative
return -r_corr
def harmonic(mlt, instrument='ampere', method='median'):
"""Return the results of a harmonic fit correction to the data boundary
Parameters
----------
mlt : (float or array-like)
Magnetic local time in hours
instrument : (str)
Data set's instrument name (default='ampere')
method : (str)
Method used to determine coefficients; accepts 'median' or
'gaussian' (default='median')
Returns
-------
r_corr : (float or array-like)
Radius correction in degrees at this MLT
References
----------
<NAME>. et al.: AMPERE Polar Cap Boundaries, Ann. Geophys., 38,
481-490, doi:10.5194/angeo-38-481-2020, 2020.
"""
if instrument.lower() != 'ampere':
raise ValueError("no harmonic correction for {:}".format(instrument))
method = method.lower()
coeff = {'median': [3.31000535, -0.5452934, -1.24389141, 2.42619653,
-0.66677988, -1.03467488, -0.30763009, 0.52426756,
0.04359299, 0.60201848, 0.50618522, 1.04360529,
0.25186405],
'gaussian': [3.80100827, 0.98555723, -3.43760943, 1.85084271,
-0.36730751, -0.81975654, -1.02823832, 1.30637288,
-0.53599218, 0.40380183, -1.22462708, -1.2733629,
-0.62743381]}
if method not in coeff.keys():
raise ValueError("unknown coefficient computation method")
rad_mlt = hr2rad(mlt)
r_corr = coeff[method][0] \
+ coeff[method][1] * np.cos(rad_mlt+coeff[method][2]) \
+ coeff[method][3] * np.sin(rad_mlt+coeff[method][4]) \
+ coeff[method][5] * np.cos(2.0 * (rad_mlt+coeff[method][6])) \
+ coeff[method][7] * np.sin(2.0 * (rad_mlt+coeff[method][8])) \
+ coeff[method][9] * np.cos(3.0 * (rad_mlt+coeff[method][10])) \
+ coeff[method][11] * np.sin(3.0 * (rad_mlt+coeff[method][12]))
# Because this is a poleward shift, return the negative of the correction
return -r_corr
| [
"numpy.full",
"numpy.asarray",
"numpy.sin",
"ocbpy.ocb_time.hr2rad",
"numpy.cos"
] | [((1313, 1328), 'numpy.asarray', 'np.asarray', (['mlt'], {}), '(mlt)\n', (1323, 1328), True, 'import numpy as np\n'), ((1342, 1384), 'numpy.full', 'np.full', ([], {'shape': 'mlt.shape', 'fill_value': 'r_add'}), '(shape=mlt.shape, fill_value=r_add)\n', (1349, 1384), True, 'import numpy as np\n'), ((2510, 2521), 'ocbpy.ocb_time.hr2rad', 'hr2rad', (['mlt'], {}), '(mlt)\n', (2516, 2521), False, 'from ocbpy.ocb_time import hr2rad\n'), ((4241, 4252), 'ocbpy.ocb_time.hr2rad', 'hr2rad', (['mlt'], {}), '(mlt)\n', (4247, 4252), False, 'from ocbpy.ocb_time import hr2rad\n'), ((4660, 4703), 'numpy.sin', 'np.sin', (['(3.0 * (rad_mlt + coeff[method][12]))'], {}), '(3.0 * (rad_mlt + coeff[method][12]))\n', (4666, 4703), True, 'import numpy as np\n'), ((2627, 2663), 'numpy.cos', 'np.cos', (["(mlt_rad - coeff[method]['t'])"], {}), "(mlt_rad - coeff[method]['t'])\n", (2633, 2663), True, 'import numpy as np\n'), ((4586, 4629), 'numpy.cos', 'np.cos', (['(3.0 * (rad_mlt + coeff[method][10]))'], {}), '(3.0 * (rad_mlt + coeff[method][10]))\n', (4592, 4629), True, 'import numpy as np\n'), ((4514, 4556), 'numpy.sin', 'np.sin', (['(2.0 * (rad_mlt + coeff[method][8]))'], {}), '(2.0 * (rad_mlt + coeff[method][8]))\n', (4520, 4556), True, 'import numpy as np\n'), ((4442, 4484), 'numpy.cos', 'np.cos', (['(2.0 * (rad_mlt + coeff[method][6]))'], {}), '(2.0 * (rad_mlt + coeff[method][6]))\n', (4448, 4484), True, 'import numpy as np\n'), ((4378, 4412), 'numpy.sin', 'np.sin', (['(rad_mlt + coeff[method][4])'], {}), '(rad_mlt + coeff[method][4])\n', (4384, 4412), True, 'import numpy as np\n'), ((4314, 4348), 'numpy.cos', 'np.cos', (['(rad_mlt + coeff[method][2])'], {}), '(rad_mlt + coeff[method][2])\n', (4320, 4348), True, 'import numpy as np\n')] |
# from mmdet.apis import init_detector, inference_detector
# import mmcv
# import cv2
# import demo1
# import numpy as np
# # Specify the path to model config and checkpoint file
# config_file = 'configs/hrnet/cascade_rcnn_hrnetv2p_w32_20e_coco.py'
# #checkpoint_file = 'work_dirs/cascade_rcnn_hrnetv2p_w32_20e_coco/latest.pth'
# checkpoint_file = 'work_dirs/cascade_rcnn_hrnetv2p_w32_20e_coco/epoch_500.pth'
# # build the model from a config file and a checkpoint file
# #model = init_detector(config_file, checkpoint_file, device='cpu')
# model = init_detector(config_file, checkpoint_file, device='cuda:0')
# # test a single image and show the results
# img = 'demo/demo.jpg' # or img = mmcv.imread(img), which will only load it once
# result = inference_detector(model, img)
# # visualize the results in a new window
# # model.show_result(img, result)
# # or save the visualization results to image files
# model.show_result(img, result, out_file='res/result.jpg')
# # liang
# import glob
# import os
# file_path = "demo/images/" # 文件夹路径
# savepath = "demo/res/"
# resultpath = "demo/cut/"
# images_path = glob.glob(os.path.join(file_path + '*.png')) # 所有图片路径
# for image_path in images_path:
# img = image_path
# result = inference_detector(model, img)
# # print(result[1].size)
# # print(result)
# if result[8].size == 0:
# continue
# bbox_=[int(i) for i in result[8][0][0:-1]]
# image = cv2.imread(img)
# new_img = image[bbox_[1]:bbox_[3],bbox_[0]:bbox_[2],:]
# w = new_img.shape[1]
# h = new_img.shape[0]
# len_w = w//4
# new_img_1 = new_img[:,0:len_w,:]
# new_img_2 = new_img[:,len_w:len_w*2,:]
# new_img_3 = new_img[:,len_w*2:len_w*3,:]
# new_img_4 = new_img[:,len_w*3:,:]
# cv2.imwrite(resultpath+"new_img_1.jpg",new_img_1)
# cv2.imwrite(resultpath+"new_img_2.jpg",new_img_2)
# cv2.imwrite(resultpath+"new_img_3.jpg",new_img_3)
# cv2.imwrite(resultpath+"new_img_4.jpg",new_img_4)
# demo1.cut_liang()
# pingjie_path="demo/cut/res/"
# img_name='new_img_'
# jzg = cv2.imread(pingjie_path+img_name+'1.jpg')
# h=jzg.shape[0]
# w=jzg.shape[1]
# for i in range(3):
# #lgz = io.imread(r'C:\Users\123\Desktop\hanjie\crop\dataset\res\W0003_0006_1.jpg') # np.ndarray, [h, w, c], 值域(0, 255), RGB
# lgz = cv2.imread(pingjie_path + img_name + str(i + 2) + '.jpg')
# w1 = lgz.shape[1]
# pj1 = np.zeros((h, w+w1, 3)) # 横向拼接
# pj1[:, :w, :] = jzg.copy() # 图片jzg在左
# pj1[:, w:, :] = lgz.copy() # 图片lgz在右
# pj1 = np.array(pj1, dtype=np.uint8) # 将pj1数组元素数据类型的改为"uint8"
# jzg = pj1
# w=jzg.shape[1]
# print(image_path.split('/')[-1])
# cv2.imwrite("demo/cut/res_pingjie/"+image_path.split('/')[-1],pj1)
# model.show_result(img, result, out_file=savepath + os.path.basename(image_path))
# hh=pj1.shape[0]
# ww=pj1.shape[1]
# pj1_new = pj1[1:hh-1,1:ww-1,:]
# imageeeeee = cv2.imread("demo/res/"+image_path.split('/')[-1])
# imageeeeee[bbox_[1]+1:bbox_[1]+1+hh-2, bbox_[0]+1:bbox_[0]+1+ww-2, :]=pj1_new.copy()
# cv2.imwrite("demo/cut/res_last/"+image_path.split('/')[-1],imageeeeee)
#东锅数据检测
#两步骤检测,先检测焊缝,再检测小缺陷
# from mmdet.apis import init_detector, inference_detector
# import mmcv
# import cv2
# import demo1
# import numpy as np
# config_file = 'configs/hrnet/cascade_rcnn_hrnetv2p_w32_20e_coco.py'
# checkpoint_file = 'work_dirs/cascade_rcnn_hrnetv2p_w32_20e_coco/Kok/16bit_pth_hanfeng_hist/epoch_10.pth'
# model = init_detector(config_file, checkpoint_file, device='cuda:0')
# import glob
# import os
# file_path = "res/" # 文件夹路径
# savepath = "res/res/"
# resultpath = "res/cut/"
# images_path = glob.glob(os.path.join(file_path + '*.jpg')) # 所有图片路径
# for image_path in images_path:
# img = image_path
# result = inference_detector(model, img)
# if result[8].size == 0:
# continue
# bbox_ = [int(i) for i in result[8][0][0:-1]]
# image = cv2.imread(img)
# new_img = image[bbox_[1]:bbox_[3], bbox_[0]:bbox_[2], :]
# w = new_img.shape[1]
# h = new_img.shape[0]
# cv2.imwrite("res/cut/"+image_path.split('/')[-1],new_img)
# model.show_result(img, result, out_file="res/hanfeng/" + os.path.basename(image_path))
# demo1.cut_liang()
# cut_img = cv2.imread("res/res/"+image_path.split('/')[-1])
# hh = cut_img.shape[0]
# ww = cut_img.shape[1]
# pj1_new = cut_img[1:hh - 1, 1:ww - 1, :]
# imageeeeee = cv2.imread("res/hanfeng/" + image_path.split('/')[-1])
# imageeeeee[bbox_[1] + 1:bbox_[1] + 1 + hh - 2, bbox_[0] + 1:bbox_[0] + 1 + ww - 2, :] = pj1_new.copy()
# cv2.imwrite("res/last/" + image_path.split('/')[-1], imageeeeee)
# # 东锅数据集,切成4块检测
# from mmdet.apis import init_detector, inference_detector
# import mmcv
# import cv2
# import demo1
# import numpy as np
# from PIL import Image
# config_file = 'configs/hrnet/cascade_rcnn_hrnetv2p_w32_20e_coco.py'
# checkpoint_file = 'work_dirs/cascade_rcnn_hrnetv2p_w32_20e_coco/Kok/16bit_pth_hanfeng/epoch_100.pth'
# model = init_detector(config_file, checkpoint_file, device='cuda:0')
# # liang
# import glob
# import os
# file_path = "to_xml_test/images/" # 文件夹路径
# savepath = "demo/res/"
# resultpath = "demo/cut/"
# images_path = glob.glob(os.path.join(file_path + '*.jpg')) # 所有图片路径
# for image_path in images_path:
# img = image_path
# result = inference_detector(model, img)
# # print(result[1].size)
# print(result)
# if result[8].size == 0:
# continue
# bbox_=[int(i) for i in result[8][0][0:-1]]
# # image = cv2.imread(img,2)
# image = Image.open(img)
# new_img = image.crop((bbox_[1],bbox_[0],bbox_[3],bbox_[2]))
# new_img = np.array(new_img)
# w = new_img.shape[1]
# h = new_img.shape[0]
# len_w = w//2
# len_h = h//2
# new_img_1 = new_img[0:len_h,0:len_w]
# new_img_2 = new_img[0:len_h,len_w:]
# new_img_3 = new_img[len_h:,0:len_w]
# new_img_4 = new_img[len_h:,len_w:]
# # cv2.imwrite(resultpath+"new_img_1.jpg",new_img_1)
# # cv2.imwrite(resultpath+"new_img_2.jpg",new_img_2)
# # cv2.imwrite(resultpath+"new_img_3.jpg",new_img_3)
# # cv2.imwrite(resultpath+"new_img_4.jpg",new_img_4)
# new_img_1 = Image.fromarray(new_img_1)
# new_img_2 = Image.fromarray(new_img_2)
# new_img_3 = Image.fromarray(new_img_3)
# new_img_4 = Image.fromarray(new_img_4)
# new_img_1.save(resultpath+"new_img_1.tif")
# new_img_2.save(resultpath + "new_img_2.tif")
# new_img_3.save(resultpath + "new_img_3.tif")
# new_img_4.save(resultpath + "new_img_4.tif")
# demo1.cut_liang()
# pingjie_path="demo/cut/res/"
# img_name='new_img_'
# # jzg = cv2.imread(pingjie_path + img_name + '1.jpg')
# jzg = Image.open(pingjie_path + img_name + '1.tif')
# jzgg = np.array(jzg)
# h = jzgg.shape[0]
# w = jzgg.shape[1]
# # lgz = cv2.imread(pingjie_path + img_name + str(2) + '.jpg',2)
# lgz = Image.open(pingjie_path + img_name + '2.tif')
# pj1 = np.zeros((h, w + w, 3)) # 横向拼接
# pj1[:, :w] = jzg.copy() # 图片jzg在左
# pj1[:, w:] = lgz.copy() # 图片lgz在右
# # pj1 = np.array(pj1, dtype=np.uint8) # 将pj1数组元素数据类型的改为"uint8"
# # jzg = cv2.imread(pingjie_path + img_name + '3.jpg') # np.ndarray, [h, w, c], 值域(0, 255), RGB
# jzg = Image.open(pingjie_path + img_name + '3.tif')
# # lgz = cv2.imread(pingjie_path + img_name + str(4) + '.jpg',2)
# lgz = Image.open(pingjie_path + img_name + '4.tif')
# pj2 = np.zeros((h, w + w, 3)) # 横向拼接
# pj2[:, :w] = jzg.copy() # 图片jzg在左
# pj2[:, w:] = lgz.copy() # 图片lgz在右
# # pj2 = np.array(pj2, dtype=np.uint8) # 将pj1数组元素数据类型的改为"uint8"
# pp1 = np.zeros((h + h, w + w, 3)) # 向拼接
# pp1[:h, :] = pj1.copy() # 图片jzg在上
# pp1[h:, :] = pj2.copy() # 图片lgz在下
# # cv2.imwrite("demo/cut/res_pingjie/"+image_path.split('/')[-1],pp1)
# pp1.save("demo/cut/res_pingjie/"+image_path.split('/')[-1].split('.')[0]+'.tif')
# model.show_result(img, result, out_file=savepath + os.path.basename(image_path))
# hh=pp1.shape[0]
# ww=pp1.shape[1]
# pp1_new = pp1[1:hh-1,1:ww-1,:]
# imageeeeee = cv2.imread("demo/res/"+image_path.split('/')[-1])
# imageeeeee[bbox_[1]+1:bbox_[1]+1+hh-2, bbox_[0]+1:bbox_[0]+1+ww-2, :]=pp1_new.copy()
# cv2.imwrite("demo/cut/res_last/"+image_path.split('/')[-1],imageeeeee)
# 保存成xml
from mmdet.apis import init_detector, inference_detector
import mmcv
import cv2
import numpy as np
from PIL import Image
import glob
import os
from xml.dom.minidom import Document
images_path = 'to_xml_test/images/'
hanfeng_path = 'to_xml_test/hanfeng/'
config_file = 'configs/hrnet/cascade_rcnn_hrnetv2p_w32_20e_coco.py'
checkpoint_file ='work_dirs/cascade_rcnn_hrnetv2p_w32_20e_coco/Kok/16bit_pth_hanfeng_hist/epoch_50.pth'
model = init_detector(config_file, checkpoint_file, device='cuda:0')
checkpoint_file2 = 'work_dirs/cascade_rcnn_hrnetv2p_w32_20e_coco/Kok/16bit_pth_three_crop_hist/epoch_40.pth'
model2 = init_detector(config_file, checkpoint_file2, device='cuda:0')
for image_path in glob.glob(os.path.join(images_path + '*.jpg')):
result_hanfeng = inference_detector(model, image_path)
if result_hanfeng[8].size == 0:
continue
bbox_ = [int(i) for i in result_hanfeng[8][0][0:-1]]
image = cv2.imread(image_path, 2)
print(image.shape)
new_img = image[bbox_[1]:bbox_[3], bbox_[0]:bbox_[2]] #(ymin:ymax,xmin:xmax)
new_img = Image.fromarray(np.uint16(new_img))
save_path = os.path.join(hanfeng_path + os.path.basename(image_path).split('.')[0] + '.tif')
new_img.save(save_path)
portion = os.path.splitext(save_path)
if portion[1] ==".tif":
newname = portion[0]+".jpg"
os.rename(os.path.join(save_path),os.path.join(newname))
other_path = newname
result_others = inference_detector(model2, other_path)
doc = Document()
annotation = doc.createElement("annotation")
doc.appendChild(annotation)
filename = doc.createElement("filename")
annotation.appendChild(filename)
filename.appendChild(doc.createTextNode(image_path))
if result_others[0].size != 0:
for i in range(0,len(result_others[0])):
bbox_others = [int(j) for j in result_others[0][i][0:-1]]
object_name = "气孔"
xmin_data = bbox_others[0] + bbox_[0]
ymin_data = bbox_others[1] + bbox_[1]
xmax_data = bbox_others[2] + bbox_[0]
ymax_data = bbox_others[3] + bbox_[1]
object = doc.createElement("object")
annotation.appendChild(object)
name = doc.createElement("name")
object.appendChild(name)
name.appendChild(doc.createTextNode(object_name))
bndbox = doc.createElement("bndbox")
object.appendChild(bndbox)
xmin = doc.createElement("xmin")
bndbox.appendChild(xmin)
xmin.appendChild(doc.createTextNode(str(xmin_data)))
ymin = doc.createElement("ymin")
bndbox.appendChild(ymin)
ymin.appendChild(doc.createTextNode(str(ymin_data)))
xmax = doc.createElement("xmax")
bndbox.appendChild(xmax)
xmax.appendChild(doc.createTextNode(str(xmax_data)))
ymax = doc.createElement("ymax")
bndbox.appendChild(ymax)
ymax.appendChild(doc.createTextNode(str(ymax_data)))
if result_others[5].size != 0:
for i in range(0,len(result_others[5])):
bbox_others = [int(j) for j in result_others[5][i][0:-1]]
object_name = "未熔合"
xmin_data = bbox_others[0] + bbox_[0]
ymin_data = bbox_others[1] + bbox_[1]
xmax_data = bbox_others[2] + bbox_[0]
ymax_data = bbox_others[3] + bbox_[1]
object = doc.createElement("object")
annotation.appendChild(object)
name = doc.createElement("name")
object.appendChild(name)
name.appendChild(doc.createTextNode(object_name))
bndbox = doc.createElement("bndbox")
object.appendChild(bndbox)
xmin = doc.createElement("xmin")
bndbox.appendChild(xmin)
xmin.appendChild(doc.createTextNode(str(xmin_data)))
ymin = doc.createElement("ymin")
bndbox.appendChild(ymin)
ymin.appendChild(doc.createTextNode(str(ymin_data)))
xmax = doc.createElement("xmax")
bndbox.appendChild(xmax)
xmax.appendChild(doc.createTextNode(str(xmax_data)))
ymax = doc.createElement("ymax")
bndbox.appendChild(ymax)
ymax.appendChild(doc.createTextNode(str(ymax_data)))
if result_others[6].size != 0:
for i in range(0,len(result_others[6])):
bbox_others = [int(j) for j in result_others[6][i][0:-1]]
object_name = "未焊透"
xmin_data = bbox_others[0] + bbox_[0]
ymin_data = bbox_others[1] + bbox_[1]
xmax_data = bbox_others[2] + bbox_[0]
ymax_data = bbox_others[3] + bbox_[1]
object = doc.createElement("object")
annotation.appendChild(object)
name = doc.createElement("name")
object.appendChild(name)
name.appendChild(doc.createTextNode(object_name))
bndbox = doc.createElement("bndbox")
object.appendChild(bndbox)
xmin = doc.createElement("xmin")
bndbox.appendChild(xmin)
xmin.appendChild(doc.createTextNode(str(xmin_data)))
ymin = doc.createElement("ymin")
bndbox.appendChild(ymin)
ymin.appendChild(doc.createTextNode(str(ymin_data)))
xmax = doc.createElement("xmax")
bndbox.appendChild(xmax)
xmax.appendChild(doc.createTextNode(str(xmax_data)))
ymax = doc.createElement("ymax")
bndbox.appendChild(ymax)
ymax.appendChild(doc.createTextNode(str(ymax_data)))
filename_save = image_path.split('.')[0]+".xml"
f = open(filename_save, "w",encoding="utf8")
f.write(doc.toprettyxml(indent=" "))
f.close()
| [
"xml.dom.minidom.Document",
"os.path.basename",
"mmdet.apis.init_detector",
"mmdet.apis.inference_detector",
"cv2.imread",
"os.path.splitext",
"numpy.uint16",
"os.path.join"
] | [((9623, 9683), 'mmdet.apis.init_detector', 'init_detector', (['config_file', 'checkpoint_file'], {'device': '"""cuda:0"""'}), "(config_file, checkpoint_file, device='cuda:0')\n", (9636, 9683), False, 'from mmdet.apis import init_detector, inference_detector\n'), ((9802, 9863), 'mmdet.apis.init_detector', 'init_detector', (['config_file', 'checkpoint_file2'], {'device': '"""cuda:0"""'}), "(config_file, checkpoint_file2, device='cuda:0')\n", (9815, 9863), False, 'from mmdet.apis import init_detector, inference_detector\n'), ((9893, 9928), 'os.path.join', 'os.path.join', (["(images_path + '*.jpg')"], {}), "(images_path + '*.jpg')\n", (9905, 9928), False, 'import os\n'), ((9952, 9989), 'mmdet.apis.inference_detector', 'inference_detector', (['model', 'image_path'], {}), '(model, image_path)\n', (9970, 9989), False, 'from mmdet.apis import init_detector, inference_detector\n'), ((10113, 10138), 'cv2.imread', 'cv2.imread', (['image_path', '(2)'], {}), '(image_path, 2)\n', (10123, 10138), False, 'import cv2\n'), ((10436, 10463), 'os.path.splitext', 'os.path.splitext', (['save_path'], {}), '(save_path)\n', (10452, 10463), False, 'import os\n'), ((10643, 10681), 'mmdet.apis.inference_detector', 'inference_detector', (['model2', 'other_path'], {}), '(model2, other_path)\n', (10661, 10681), False, 'from mmdet.apis import init_detector, inference_detector\n'), ((10697, 10707), 'xml.dom.minidom.Document', 'Document', ([], {}), '()\n', (10705, 10707), False, 'from xml.dom.minidom import Document\n'), ((10277, 10295), 'numpy.uint16', 'np.uint16', (['new_img'], {}), '(new_img)\n', (10286, 10295), True, 'import numpy as np\n'), ((10546, 10569), 'os.path.join', 'os.path.join', (['save_path'], {}), '(save_path)\n', (10558, 10569), False, 'import os\n'), ((10570, 10591), 'os.path.join', 'os.path.join', (['newname'], {}), '(newname)\n', (10582, 10591), False, 'import os\n'), ((10341, 10369), 'os.path.basename', 'os.path.basename', (['image_path'], {}), '(image_path)\n', (10357, 10369), False, 'import os\n')] |
# encoding=utf8
# pylint: disable=mixed-indentation, trailing-whitespace, multiple-statements, attribute-defined-outside-init, logging-not-lazy, no-self-use, line-too-long, arguments-differ, bad-continuation
import logging
from numpy import apply_along_axis, zeros, argsort, concatenate, array, exp, cos, pi
from NiaPy.algorithms.algorithm import Algorithm
logging.basicConfig()
logger = logging.getLogger('NiaPy.algorithms.basic')
logger.setLevel('INFO')
__all__ = ['MothFlameOptimizer']
class MothFlameOptimizer(Algorithm):
r"""MothFlameOptimizer of Moth flame optimizer.
Algorithm:
Moth flame optimizer
Date:
2018
Author:
<NAME> and <NAME>
License:
MIT
Reference paper:
Mirjalili, Seyedali. "Moth-flame optimization algorithm: A novel nature-inspired heuristic paradigm." Knowledge-Based Systems 89 (2015): 228-249.
Attributes:
Name (List[str]): List of strings representing algorithm name.
See Also:
* :class:`NiaPy.algorithms.algorithm.Algorithm`
"""
Name = ['MothFlameOptimizer', 'MFO']
@staticmethod
def algorithmInfo():
r"""Get basic information of algorithm.
Returns:
str: Basic information.
See Also:
* :func:`NiaPy.algorithms.Algorithm.algorithmInfo`
"""
return r"""Mirjalili, Seyedali. "Moth-flame optimization algorithm: A novel nature-inspired heuristic paradigm." Knowledge-Based Systems 89 (2015): 228-249."""
@staticmethod
def typeParameters():
r"""Get dictionary with functions for checking values of parameters.
Returns:
Dict[str, Callable]: TODO
See Also:
* :func:`NiaPy.algorithms.algorithm.Algorithm.typeParameters`
"""
return Algorithm.typeParameters()
def setParameters(self, NP=25, **ukwargs):
r"""Set the algorithm parameters.
Arguments:
NP (int): Number of individuals in population
See Also:
* :func:`NiaPy.algorithms.algorithm.Algorithm.setParameters`
"""
Algorithm.setParameters(self, NP=NP, **ukwargs)
if ukwargs: logger.info('Unused arguments: %s' % (ukwargs))
def initPopulation(self, task):
r"""Initialize starting population.
Args:
task (Task): Optimization task
Returns:
Tuple[numpy.ndarray, numpy.ndarray[float], Dict[str, Any]]:
1. Initialized population
2. Initialized population function/fitness values
3. Additional arguments:
* best_flames (numpy.ndarray): Best individuals
* best_flame_fitness (numpy.ndarray): Best individuals fitness/function values
* previous_population (numpy.ndarray): Previous population
* previous_fitness (numpy.ndarray[float]): Previous population fitness/function values
See Also:
* :func:`NiaPy.algorithms.algorithm.Algorithm.initPopulation`
"""
moth_pos, moth_fitness, d = Algorithm.initPopulation(self, task)
# Create best population
indexes = argsort(moth_fitness)
best_flames, best_flame_fitness = moth_pos[indexes], moth_fitness[indexes]
# Init previous population
previous_population, previous_fitness = zeros((self.NP, task.D)), zeros(self.NP)
d.update({'best_flames': best_flames, 'best_flame_fitness': best_flame_fitness, 'previous_population': previous_population, 'previous_fitness': previous_fitness})
return moth_pos, moth_fitness, d
def runIteration(self, task, moth_pos, moth_fitness, xb, fxb, best_flames, best_flame_fitness, previous_population, previous_fitness, **dparams):
r"""Core function of MothFlameOptimizer algorithm.
Args:
task (Task): Optimization task.
moth_pos (numpy.ndarray): Current population.
moth_fitness (numpy.ndarray[float]): Current population fitness/function values.
xb (numpy.ndarray): Current population best individual.
fxb (float): Current best individual
best_flames (numpy.ndarray): Best found individuals
best_flame_fitness (numpy.ndarray[float]): Best found individuals fitness/function values
previous_population (numpy.ndarray): Previous population
previous_fitness (numpy.ndarray[float]): Previous population fitness/function values
**dparams (Dict[str, Any]): Additional parameters
Returns:
Tuple[numpy.ndarray, numpy.ndarray[float], Dict[str, Any]]:
1. New population.
2. New population fitness/function values.
3. Additional arguments:
* best_flames (numpy.ndarray): Best individuals.
* best_flame_fitness (numpy.ndarray[float]): Best individuals fitness/function values.
* previous_population (numpy.ndarray): Previous population.
* previous_fitness (numpy.ndarray[float]): Previous population fitness/function values.
"""
# Previous positions
previous_population, previous_fitness = moth_pos, moth_fitness
# Create sorted population
indexes = argsort(moth_fitness)
sorted_population = moth_pos[indexes]
# Some parameters
flame_no, a = round(self.NP - task.Iters * ((self.NP - 1) / task.nGEN)), -1 + task.Iters * ((-1) / task.nGEN)
for i in range(self.NP):
for j in range(task.D):
distance_to_flame, b, t = abs(sorted_population[i, j] - moth_pos[i, j]), 1, (a - 1) * self.rand() + 1
if i <= flame_no: moth_pos[i, j] = distance_to_flame * exp(b * t) * cos(2 * pi * t) + sorted_population[i, j]
else: moth_pos[i, j] = distance_to_flame * exp(b * t) * cos(2 * pi * t) + sorted_population[flame_no, j]
moth_pos = apply_along_axis(task.repair, 1, moth_pos, self.Rand)
moth_fitness = apply_along_axis(task.eval, 1, moth_pos)
double_population, double_fitness = concatenate((previous_population, best_flames), axis=0), concatenate((previous_fitness, best_flame_fitness), axis=0)
indexes = argsort(double_fitness)
double_sorted_fitness, double_sorted_population = double_fitness[indexes], double_population[indexes]
for newIdx in range(2 * self.NP): double_sorted_population[newIdx] = array(double_population[indexes[newIdx], :])
best_flame_fitness, best_flames = double_sorted_fitness[:self.NP], double_sorted_population[:self.NP]
return moth_pos, moth_fitness, {'best_flames': best_flames, 'best_flame_fitness': best_flame_fitness, 'previous_population': previous_population, 'previous_fitness': previous_fitness}
# vim: tabstop=3 noexpandtab shiftwidth=3 softtabstop=3
| [
"numpy.concatenate",
"logging.basicConfig",
"NiaPy.algorithms.algorithm.Algorithm.initPopulation",
"numpy.zeros",
"numpy.argsort",
"numpy.apply_along_axis",
"numpy.array",
"numpy.exp",
"numpy.cos",
"NiaPy.algorithms.algorithm.Algorithm.typeParameters",
"logging.getLogger",
"NiaPy.algorithms.al... | [((360, 381), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (379, 381), False, 'import logging\n'), ((391, 434), 'logging.getLogger', 'logging.getLogger', (['"""NiaPy.algorithms.basic"""'], {}), "('NiaPy.algorithms.basic')\n", (408, 434), False, 'import logging\n'), ((1627, 1653), 'NiaPy.algorithms.algorithm.Algorithm.typeParameters', 'Algorithm.typeParameters', ([], {}), '()\n', (1651, 1653), False, 'from NiaPy.algorithms.algorithm import Algorithm\n'), ((1883, 1930), 'NiaPy.algorithms.algorithm.Algorithm.setParameters', 'Algorithm.setParameters', (['self'], {'NP': 'NP'}), '(self, NP=NP, **ukwargs)\n', (1906, 1930), False, 'from NiaPy.algorithms.algorithm import Algorithm\n'), ((2703, 2739), 'NiaPy.algorithms.algorithm.Algorithm.initPopulation', 'Algorithm.initPopulation', (['self', 'task'], {}), '(self, task)\n', (2727, 2739), False, 'from NiaPy.algorithms.algorithm import Algorithm\n'), ((2779, 2800), 'numpy.argsort', 'argsort', (['moth_fitness'], {}), '(moth_fitness)\n', (2786, 2800), False, 'from numpy import apply_along_axis, zeros, argsort, concatenate, array, exp, cos, pi\n'), ((4629, 4650), 'numpy.argsort', 'argsort', (['moth_fitness'], {}), '(moth_fitness)\n', (4636, 4650), False, 'from numpy import apply_along_axis, zeros, argsort, concatenate, array, exp, cos, pi\n'), ((5219, 5272), 'numpy.apply_along_axis', 'apply_along_axis', (['task.repair', '(1)', 'moth_pos', 'self.Rand'], {}), '(task.repair, 1, moth_pos, self.Rand)\n', (5235, 5272), False, 'from numpy import apply_along_axis, zeros, argsort, concatenate, array, exp, cos, pi\n'), ((5290, 5330), 'numpy.apply_along_axis', 'apply_along_axis', (['task.eval', '(1)', 'moth_pos'], {}), '(task.eval, 1, moth_pos)\n', (5306, 5330), False, 'from numpy import apply_along_axis, zeros, argsort, concatenate, array, exp, cos, pi\n'), ((5498, 5521), 'numpy.argsort', 'argsort', (['double_fitness'], {}), '(double_fitness)\n', (5505, 5521), False, 'from numpy import apply_along_axis, zeros, argsort, concatenate, array, exp, cos, pi\n'), ((2949, 2973), 'numpy.zeros', 'zeros', (['(self.NP, task.D)'], {}), '((self.NP, task.D))\n', (2954, 2973), False, 'from numpy import apply_along_axis, zeros, argsort, concatenate, array, exp, cos, pi\n'), ((2975, 2989), 'numpy.zeros', 'zeros', (['self.NP'], {}), '(self.NP)\n', (2980, 2989), False, 'from numpy import apply_along_axis, zeros, argsort, concatenate, array, exp, cos, pi\n'), ((5369, 5424), 'numpy.concatenate', 'concatenate', (['(previous_population, best_flames)'], {'axis': '(0)'}), '((previous_population, best_flames), axis=0)\n', (5380, 5424), False, 'from numpy import apply_along_axis, zeros, argsort, concatenate, array, exp, cos, pi\n'), ((5426, 5485), 'numpy.concatenate', 'concatenate', (['(previous_fitness, best_flame_fitness)'], {'axis': '(0)'}), '((previous_fitness, best_flame_fitness), axis=0)\n', (5437, 5485), False, 'from numpy import apply_along_axis, zeros, argsort, concatenate, array, exp, cos, pi\n'), ((5697, 5741), 'numpy.array', 'array', (['double_population[indexes[newIdx], :]'], {}), '(double_population[indexes[newIdx], :])\n', (5702, 5741), False, 'from numpy import apply_along_axis, zeros, argsort, concatenate, array, exp, cos, pi\n'), ((5055, 5070), 'numpy.cos', 'cos', (['(2 * pi * t)'], {}), '(2 * pi * t)\n', (5058, 5070), False, 'from numpy import apply_along_axis, zeros, argsort, concatenate, array, exp, cos, pi\n'), ((5157, 5172), 'numpy.cos', 'cos', (['(2 * pi * t)'], {}), '(2 * pi * t)\n', (5160, 5172), False, 'from numpy import apply_along_axis, zeros, argsort, concatenate, array, exp, cos, pi\n'), ((5042, 5052), 'numpy.exp', 'exp', (['(b * t)'], {}), '(b * t)\n', (5045, 5052), False, 'from numpy import apply_along_axis, zeros, argsort, concatenate, array, exp, cos, pi\n'), ((5144, 5154), 'numpy.exp', 'exp', (['(b * t)'], {}), '(b * t)\n', (5147, 5154), False, 'from numpy import apply_along_axis, zeros, argsort, concatenate, array, exp, cos, pi\n')] |
import os
from os.path import join
import numpy as np
from shutil import which
import pkg_resources
from .. import prepare
from .. import runstatus as rs
from .. import runinfo as ri
from .qsub import qsub
from .qsub import QUEUE_NAME
from .qstat import qstat
from ..tools import jsonlog
QSUB_OBS_PRODUCE_PREFIX = 'phs_obs_produce'
fact_tools_xml_path = pkg_resources.resource_filename(
'photon_stream_production',
os.path.join('resources', 'observations_pass4.xml')
)
_fact_tools_jar_path = join(
'/', 'home', 'guest', 'relleums', 'fact_photon_stream', 'fact-tools',
'target', 'fact-tools-0.18.1.jar',
)
def produce(
only_a_fraction=1.0,
fact_raw_dir='/fact/raw',
fact_drs_dir='/fact/raw',
fact_aux_dir='/fact/aux',
phs_dir='/gpfs0/fact/processing/public/phs',
java_path='/home/guest/relleums/java8/jdk1.8.0_111',
fact_tools_jar_path=_fact_tools_jar_path,
fact_tools_xml_path=fact_tools_xml_path,
tmp_dir_base_name='phs_obs_',
queue=QUEUE_NAME,
max_jobs_in_qsub=256,
runs_in_qstat=None,
):
jsonlog('Start')
obs_dir = join(phs_dir, 'obs')
runstatus_path = join(obs_dir, 'runstatus.csv')
if runs_in_qstat is None:
runs_in_qstat = qstat(is_in_JB_name=QSUB_OBS_PRODUCE_PREFIX )
jsonlog('{:d} production-jobs are still running in qsub'.format(len(runs_in_qstat)))
runstatus = rs.read(runstatus_path)
runs_to_be_converted = runstatus[np.isnan(runstatus['PhsSize'])]
runs_to_be_converted = ri.remove_from_first_when_also_in_second(
first=runs_to_be_converted,
second=runs_in_qstat,
)
jobs, tree = prepare.jobs_and_directory_tree(
phs_dir=phs_dir,
only_a_fraction=only_a_fraction,
fact_raw_dir=fact_raw_dir,
fact_drs_dir=fact_drs_dir,
fact_aux_dir=fact_aux_dir,
java_path=java_path,
fact_tools_jar_path=fact_tools_jar_path,
fact_tools_xml_path=fact_tools_xml_path,
tmp_dir_base_name=tmp_dir_base_name,
runstatus=runs_to_be_converted,
)
prepare.output_tree(tree)
num_jobs_for_qsub = max_jobs_in_qsub - len(runs_in_qstat)
i = 0
for job in jobs:
if i > num_jobs_for_qsub:
break
i += 1
qsub(
job=job,
exe_path=which('phs.isdc.obs.produce.worker'),
queue=queue,
)
jsonlog('{:d} production-jobs submitted to qsub'.format(i))
jsonlog('End')
| [
"os.path.join",
"shutil.which",
"numpy.isnan"
] | [((503, 616), 'os.path.join', 'join', (['"""/"""', '"""home"""', '"""guest"""', '"""relleums"""', '"""fact_photon_stream"""', '"""fact-tools"""', '"""target"""', '"""fact-tools-0.18.1.jar"""'], {}), "('/', 'home', 'guest', 'relleums', 'fact_photon_stream', 'fact-tools',\n 'target', 'fact-tools-0.18.1.jar')\n", (507, 616), False, 'from os.path import join\n'), ((425, 476), 'os.path.join', 'os.path.join', (['"""resources"""', '"""observations_pass4.xml"""'], {}), "('resources', 'observations_pass4.xml')\n", (437, 476), False, 'import os\n'), ((1096, 1116), 'os.path.join', 'join', (['phs_dir', '"""obs"""'], {}), "(phs_dir, 'obs')\n", (1100, 1116), False, 'from os.path import join\n'), ((1138, 1168), 'os.path.join', 'join', (['obs_dir', '"""runstatus.csv"""'], {}), "(obs_dir, 'runstatus.csv')\n", (1142, 1168), False, 'from os.path import join\n'), ((1437, 1467), 'numpy.isnan', 'np.isnan', (["runstatus['PhsSize']"], {}), "(runstatus['PhsSize'])\n", (1445, 1467), True, 'import numpy as np\n'), ((2297, 2333), 'shutil.which', 'which', (['"""phs.isdc.obs.produce.worker"""'], {}), "('phs.isdc.obs.produce.worker')\n", (2302, 2333), False, 'from shutil import which\n')] |
#!/usr/bin/env python
import sys
sys.path.append('../neural_networks')
import numpy as np
import numpy.matlib
import pickle
import copy
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import matplotlib.pyplot as plt
import os
import time
import copy
from gym_collision_avoidance.envs.policies.CADRL.scripts.neural_networks import neural_network_regr_multi as nn
from gym_collision_avoidance.envs.policies.CADRL.scripts.neural_networks.multiagent_network_param import Multiagent_network_param
from gym_collision_avoidance.envs.policies.CADRL.scripts.neural_networks.nn_training_param import NN_training_param
from gym_collision_avoidance.envs.policies.CADRL.scripts.multi import pedData_processing_multi as pedData
from gym_collision_avoidance.envs.policies.CADRL.scripts.multi import global_var as gb
from gym_collision_avoidance.envs.policies.CADRL.scripts.multi import gen_rand_testcases as gen_tc
# setting up global variables
COLLISION_COST = gb.COLLISION_COST
DIST_2_GOAL_THRES = gb.DIST_2_GOAL_THRES
GETTING_CLOSE_PENALTY = gb.GETTING_CLOSE_PENALTY
GETTING_CLOSE_RANGE = gb.GETTING_CLOSE_RANGE
EPS = gb.EPS
# terminal states
NON_TERMINAL = gb.NON_TERMINAL
COLLIDED = gb.COLLIDED
REACHED_GOAL = gb.REACHED_GOAL
# plotting colors
plt_colors = gb.plt_colors
GAMMA = gb.RL_gamma
DT_NORMAL = gb.RL_dt_normal
SMOOTH_COST = gb.SMOOTH_COST
# for 'rotate_constr'
TURNING_LIMIT = np.pi/6.0
# neural network
NN_ranges = gb.NN_ranges
# assume no kinematic constraints
def find_action_grids():
nom_speed = 1.0
num_angles = 18
num_speeds = 5
angles = np.linspace(0, 2 * np.pi, num_angles, endpoint=False)
speeds = np.linspace(nom_speed, 0.0, num_speeds, endpoint=False)
angles, speeds = np.meshgrid(angles, speeds)
angles = np.append([0.0], angles.flatten())
speeds = np.append([0.0], speeds.flatten())
actions = np.vstack((speeds, angles)).transpose()
assert(actions.shape[0] == num_angles*num_speeds + 1)
assert(actions.shape[1] == 2)
# plot (for debugging)
# fig = plt.figure(frameon=False)
x = actions[:,0] * np.cos(actions[:,1])
y = actions[:,0] * np.sin(actions[:,1])
# plt.scatter(x, y, 50, cmap="rainbow")
# plt.title('discrete actions')
# plt.draw()
# plt.show()
return actions
# assume min/max acceleration over 1 seconds
def find_close_actions():
num_angles = 6
num_speeds = 4
nom_speed = 1.0
angles = np.linspace(-TURNING_LIMIT, TURNING_LIMIT, num_angles, endpoint=True)
speeds = np.linspace(nom_speed, 0.0, num_speeds, endpoint=False)
angles, speeds = np.meshgrid(angles, speeds)
angles = np.append([0.0], angles.flatten())
speeds = np.append([0.0], speeds.flatten())
actions = np.vstack((speeds, angles)).transpose()
# plot (for debugging)
# fig = plt.figure(frameon=False)
# x = actions[:,0] * np.cos(actions[:,1])
# y = actions[:,0] * np.sin(actions[:,1])
# plt.scatter(x, y, 50, cmap="rainbow")
# plt.title('discrete actions')
# plt.draw()
# plt.show()
return actions
# angle_1 - angle_2
# contains direction in range [-3.14, 3.14]
def find_angle_diff(angle_1, angle_2):
angle_diff_raw = angle_1 - angle_2
angle_diff = (angle_diff_raw + np.pi) % (2 * np.pi) - np.pi
return angle_diff
def compute_multi_net_param(num_agents):
num_others = num_agents - 1
# no max layer
# layers_type = []
# layers_info = []
# layers_info.append(np.array([[1, 7], [num_others, 8] ])); layers_type.append('conn')
# layers_info.append(np.array([[1, 50], [num_others, 50]])); layers_type.append('conn')
# layers_info.append(np.array([[1, 50], [num_others, 50]])); layers_type.append('conn')
# layers_info.append(np.array([[1, 50]])); layers_type.append('conn')
# layers_info.append(np.array([[1, 25]])); layers_type.append('conn')
# layers_info.append(np.array([[1, 1]]));
# with max layer
layers_type = []
layers_info = []
layers_info.append(np.array([[1, 7], [num_others, 8] ])); layers_type.append('conn')
layers_info.append(np.array([[1, 50], [num_others, 50]])); layers_type.append('conn')
layers_info.append(np.array([[1, 50], [num_others, 50]])); layers_type.append('max')
layers_info.append(np.array([[1, 50], [1, 50]])); layers_type.append('conn')
layers_info.append(np.array([[1, 50]])); layers_type.append('conn')
# layers_info.append(np.array([[1, 100]])); layers_type.append('conn')
# layers_info.append(np.array([[1, 25]])); layers_type.append('conn')
layers_info.append(np.array([[1, 1]]));
# with max and self layer
# layers_type = []
# layers_info = []
# layers_info.append(np.array([[1, 7], [num_others, 8] ])); layers_type.append('self')
# layers_info.append(np.array([[1, 50], [num_others, 50] ])); layers_type.append('conn')
# layers_info.append(np.array([[1, 50], [num_others, 50]])); layers_type.append('conn')
# layers_info.append(np.array([[1, 50], [num_others, 50]])); layers_type.append('max')
# layers_info.append(np.array([[1, 50], [1, 50]])); layers_type.append('conn')
# layers_info.append(np.array([[1, 50]])); layers_type.append('conn')
# # layers_info.append(np.array([[1, 100]])); layers_type.append('conn')
# # layers_info.append(np.array([[1, 25]])); layers_type.append('conn')
# layers_info.append(np.array([[1, 1]]));
multi_net_param = Multiagent_network_param(layers_info, layers_type)
# for layer in range(len(layers_info)-1):
# print 'layer', layer
# print multi_net_param.symmetric_indices[layer]
# raw_input()
# print multi_net_param.symmetric_indices_b
# raw_input()
return layers_info, layers_type, multi_net_param
def find_nn_ranges(num_agents, NN_ranges):
num_states = 7 + 8 * (num_agents-1)
input_avg_vec = np.zeros((num_states,)); input_avg_vec[0:7] = NN_ranges[0][0].copy()
input_std_vec = np.zeros((num_states,)); input_std_vec[0:7] = NN_ranges[1][0].copy()
for i in range(num_agents-1):
a = 7 + 8 * i
b = 7 + 8 * (i+1)
input_avg_vec[a:b] = NN_ranges[0][1].copy()
input_std_vec[a:b] = NN_ranges[1][1].copy()
output_avg_vec = NN_ranges[2]
output_std_vec = NN_ranges[3]
NN_ranges_processed = []
NN_ranges_processed.append(input_avg_vec); NN_ranges_processed.append(input_std_vec)
NN_ranges_processed.append(output_avg_vec); NN_ranges_processed.append(output_std_vec)
return NN_ranges_processed
class NN_navigation_value:
def __init__(self, num_agents, nn_training_param, mode='no_constr', passing_side='none'):
self.num_agents = num_agents
self.nn_training_param = nn_training_param
self.nn = nn.Neural_network_regr_multi(self.nn_training_param)
self.current_value = 0
self.plot_actions = find_action_grids()
self.close_actions = find_close_actions()
self.test_vel_data = None
self.dt_forward = 1.0
self.radius_buffer = 0.0 # buffer around collision radius
self.mode = mode
self.passing_side = passing_side
self.training_passing_side_weight = 0.5 #0.7 #0.2
self.old_value_net = None
# setup neural network
def initialize_nn(self, num_agents):
layers_info, layers_type, multi_net_param = compute_multi_net_param(num_agents)
self.nn.initialize_network_param(layers_info, layers_type, multiagent_net_param=multi_net_param)
def train_neural_network(self, training_data, test_data, test_vel_data=None):
if not hasattr(self.nn, 'W'):
self.initialize_nn(self.num_agents)
# for plotting
self.test_vel_data = test_vel_data
self.nn.set_plotting_func(self.plot_ped_testCase_rand, test_data[0])
# initialize neural network
ERM = 0
# self.nn.set_training_stepsize('sqrt_decay', 1.0, 0.1)
# self.nn.set_training_stepsize('sum_of_grad', 0.1, 0.1)
self.nn.set_training_stepsize('rmsprop', 0.1, 0.1)
NN_ranges_processed = find_nn_ranges(self.num_agents, NN_ranges)
self.nn.train_nn(training_data, ERM, test_data, input_output_ranges=NN_ranges_processed)
self.test_vel_data = None
def plot_ped_testCase_rand(self, X, Y_hat, title_string, figure_name=None):
# Y_hat = [value]
ind = np.random.randint(0, X.shape[0])
# ind = 5
x = X[ind,:]
y_hat = Y_hat[ind,:]
if (self.test_vel_data is None) or (ind > self.test_vel_data.shape[0]):
y = None
else:
y = self.test_vel_data[ind,:]
self.plot_ped_testCase(x, y_hat, title_string, figure_name, y)
def plot_ped_testCase(self, x, y_hat, title_string, figure_name, y=None, plt_colors_custom=None):
# print x.shape
# print y_hat.shape
# print y
# raw_input()
# new figure
if figure_name is None:
fig = plt.figure(figsize=(15, 6), frameon=False)
else:
fig = plt.figure(figure_name,figsize=(15, 6), frameon=False)
plt.clf()
if plt_colors_custom is None:
plt_colors_local = plt_colors
else:
plt_colors_local = plt_colors_custom
# convert to representation that's easier to plot
a_s, other_agent_states = pedData.agentCentricState_2_rawState_noRotate(x)
# print 'x', x
# print 'a_s', a_s
# print 'len(other_agent_states)', len(other_agent_states)
# print a_s
# print len(other_agent_states)
# print other_agent_states
# raw_input()
# subfigure 1
ax = fig.add_subplot(1, 2, 1)
# agent at (0,0)
circ1 = plt.Circle((0.0, 0.0), radius=a_s[8], fc='w', ec=plt_colors_local[0])
ax.add_patch(circ1)
# goal
plt.plot(a_s[6], a_s[7], c=plt_colors_local[0], marker='*', markersize=20)
# pref speed
plt.arrow(0.0, 0.0, a_s[5], 0.0, fc='m', ec='m', head_width=0.05, head_length=0.1)
vel_pref, = plt.plot([0.0, a_s[5]], [0.0, 0.0], 'm', linewidth=2)
# current speed
plt.arrow(0.0, 0.0, a_s[2], a_s[3], fc='k', ec='k', head_width=0.05, head_length=0.1)
vel_cur, = plt.plot([0.0, a_s[2]], [0.0, a_s[3]], 'k', linewidth=2)
# actual speed
if y != None:
x_vel = y[0]
y_vel = y[1]
plt.arrow(0.0, 0.0, x_vel, y_vel, fc=plt_colors_local[0], \
ec=plt_colors_local[0], head_width=0.05, head_length=0.1)
vel_select, = plt.plot([0.0, x_vel], [0.0, y_vel], \
c=plt_colors_local[0], linewidth=2)
# other agents
for i, o_s in enumerate(other_agent_states):
circ = plt.Circle((o_s[0], o_s[1]), radius=o_s[8], fc='w', ec=plt_colors_local[i+1])
ax.add_patch(circ)
# other agent's speed
plt.arrow(o_s[0], o_s[1], o_s[2], o_s[3], fc=plt_colors_local[i+1], \
ec=plt_colors_local[i+1], head_width=0.05, head_length=0.1)
# vel_other, = plt.plot([o_s[0], o_s[0]+o_s[2]], [o_s[1], o_s[1]+o_s[3]], \
# c=plt_colors_local[i+1], linewidth=2)
# meta data
agent_state = a_s
action = self.find_next_action(agent_state, other_agent_states)
x_tmp = action[0] * np.cos(action[1])
y_tmp = action[0] * np.sin(action[1])
plt.arrow(0.0, 0.0, x_tmp, y_tmp, fc='g',\
ec='g', head_width=0.05, head_length=0.1)
vel_nn, = plt.plot([0.0, x_tmp], [0.0, y_tmp], 'g-', linewidth=2)
value = self.find_next_states_values(agent_state, action.reshape([1,2]), other_agent_states)
# plt.title(title_string + '; pred_value: %f, true_value: %f' % \
# (y_hat[0], y[0]))
plt.xlabel('x (m)')
plt.ylabel('y (m)')
dist_2_other_agents = np.zeros((len(other_agent_states), ))
for i, other_agent_state in enumerate(other_agent_states):
dist_2_other_agents[i] = np.linalg.norm(agent_state[0:2]-other_agent_state[0:2]) - \
agent_state[8] - other_agent_state[8]
dist_2_other_agent = np.min(dist_2_other_agents)
# print 'dist_2_other_agents', dist_2_other_agents
# print 'dist_2_other_agent', dist_2_other_agent
# raw_input()
plt.title(title_string + '\n pref_speed: %.3f, min_dist_2_others: %.3f' % \
(a_s[5],dist_2_other_agent))
# if y != None:
# plt.legend([vel_pref, vel_cur,vel_other, vel_select, vel_nn], \
# ['vel_pref', 'vel_cur', 'vel_other', 'vel_select', 'vel_nn'])
# else:
# plt.legend([vel_pref, vel_cur, vel_other, vel_nn], \
# ['vel_pref', 'vel_cur', 'vel_other', 'vel_nn'])
plt.legend([vel_cur, vel_pref, vel_nn], \
['${heading}$', '$v_{pref}$','$v_{select}$'], \
loc='lower left', fontsize=30, frameon=False)
ax.axis('equal')
xlim = ax.get_xlim()
new_xlim = np.array((xlim[0], xlim[1]+0.5))
ax.set_xlim(new_xlim)
# plotting style (only show axis on bottom and left)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
# second subfigure
# ax = fig.add_subplot(1, 2, 2, projection='3d')
ax = fig.add_subplot(1, 2, 2)
# print 'before', agent_state
# agent_state, other_agent_states = \
# pedData.agentCentricState_2_rawState_noRotate(x)
# print 'after', agent_state
if self.mode == 'no_constr':
''' all possible actions '''
''' actions choice 1 '''
default_action_xy = agent_state[2:4]
speed = np.linalg.norm(default_action_xy)
angle_select = np.arctan2(default_action_xy[1], default_action_xy[0])
# default_action_theta = np.array([speed, angle_select])
# actions = self.find_actions_theta(agent_state, default_action_theta)
# circular for plotting
actions = self.plot_actions.copy()
actions[:,1] = (actions[:,1] + np.pi) % (2 * np.pi) - np.pi
actions[:,0] *= a_s[5]
# turning needs to be slower
# angle_diff_abs = abs(find_angle_diff(actions[:,1],agent_state[4]))
# actions[:,0] *= (1 - angle_diff_abs / (2*np.pi))
elif self.mode == 'rotate_constr':
''' dynamically feasible actions '''
# print 'x', x
# print 'agent_state', agent_state
# print 'other_agent_state', other_agent_state
cur_heading = agent_state[4]; desired_speed = agent_state[5]
actions = self.close_actions.copy()
actions[:,0] *= desired_speed
print('rotate_constr')
actions[:,1] = actions[:,1] + cur_heading
actions[:,1] = (actions[:,1] + np.pi) % (2 * np.pi) - np.pi
# actions = self.find_actions_theta_dynConstr(agent_state, 1.0)
# print cur_heading
else:
assert(0)
# agent_state, other_agent_state = \
# pedData.agentCentricState_2_rawState_noRotate(x)
# actions, accels_theta = self.find_actions_theta_dynConstr(agent_state, 1.0)
plot_x = actions[:,0] * np.cos(actions[:,1])
plot_y = actions[:,0] * np.sin(actions[:,1])
# assert(0)
# print 'before, agent state', agent_state
# print 'before, other_agent state',other_agent_state
# print 'after, agent state', agent_state
# print 'after, other_agent state', other_agent_state
plot_z = self.find_next_states_values(agent_state, actions, other_agent_states)
# print actions.shape, plot_x.shape
# print np.hstack((actions, plot_z.reshape(plot_z.shape[0], 1)))
value = np.amax(plot_z)
ind = np.argmax(plot_z)
x_tmp = actions[ind, 0] * np.cos(actions[ind, 1])
y_tmp = actions[ind, 0] * np.sin(actions[ind, 1])
''' plot using plot_trisurf (2D view of a 3D plot)'''
# print plot_x.shape, plot_y.shape, plot_z.shape
# ax = fig.gca(projection='3d')
# im = ax.plot_trisurf(plot_x, plot_y, plot_z, cmap=cm.jet, linewidth=0.2)
# ax.view_init(90.0, 270.0)
# plt.title('value of best action: %.3f' % value)
# fig.colorbar(im, shrink=0.5)
''' plot using tripcolor (2D plot) '''
# triang = tri.Triangulation(plot_x, plot_y)
color_min_inds = np.where(plot_z>0)[0]
if len(color_min_inds) > 0:
color_min = np.amin(plot_z[color_min_inds]) - 0.05
else:
color_min = 0.0
color_max = max(np.amax(plot_z),0.0)
# plt.tripcolor(plot_x, plot_y, plot_z, shading='flat', \
# cmap=plt.cm.rainbow, edgecolors='k',vmin=color_min, vmax=color_max)
plt.tripcolor(plot_x, plot_y, plot_z, shading='flat', \
cmap=plt.cm.rainbow, vmin=color_min, vmax=color_max)
if actions[ind, 0] > EPS:
plt.title('value of best action: %.3f \n action_x %.3f, action_y %.3f' \
% (value, x_tmp, y_tmp))
else:
plt.title('value of best action: %.3f \n action_speed %.3f, action_angle %.3f' \
% (value, actions[ind, 0], actions[ind, 1]))
plt.xlabel('v_x (m/s)')
plt.ylabel('v_y (m/s)')
cbar = plt.colorbar()
cbar.set_ticks([color_min,(color_min+color_max)/2.0,color_max])
cbar.ax.set_yticklabels(['%.3f'%color_min, \
'%.3f'%((color_min+color_max)/2.0), \
'%.3f'%color_max])
# plotting style (only show axis on bottom and left)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
plt.draw()
plt.pause(0.0001)
# raw_input()
def find_actions_theta(self, agent_state, default_action_theta):
# action = [speed theta]
num_near_actions = 10
num_rand_actions = 5
# zero action
zero_action = np.zeros((1,2))
# desired action
# cur_state = agent_state[0:2]
# goal = agent_state[6:8]
# nom_speed = agent_state[5]
# desired_act = self.computePrefVel(cur_state, goal, nom_speed)
desired_act = np.array([agent_state[5], \
np.arctan2(agent_state[7]-agent_state[1], \
agent_state[6]-agent_state[0])])
desired_actions = np.matlib.repmat(desired_act, 5, 1)
desired_actions[1,0] *= 0.80
desired_actions[2,0] *= 0.60
desired_actions[3,0] *= 0.40
desired_actions[4,0] *= 0.20
# near by actions: default action with perturbed action & reduced speed
tmp_action_theta = default_action_theta.copy()
# tmp_action_theta[0] = np.amax((0.75 * desired_act[0], default_action_theta[0]))
tmp_action_theta[0] = agent_state[5]
# tmp_action_theta[1] = desired_act[1]
near_actions = np.matlib.repmat(tmp_action_theta, num_near_actions, 1)
near_actions[:,1] += np.linspace(-np.pi/3.0, np.pi/3.0, num=num_near_actions)
near_actions_reduced = near_actions.copy()
near_actions_reduced_1 = near_actions.copy()
near_actions_reduced_2 = near_actions.copy()
near_actions_reduced[:,0] *= 0.75 #0.75
near_actions_reduced_1[:,0] *= 0.50 #0.75
near_actions_reduced_2[:,0] *= 0.25 #0.75
# near_actions = np.vstack((near_actions, near_actions_reduced))
near_actions = np.vstack((near_actions, near_actions_reduced, \
near_actions_reduced_1, near_actions_reduced_2))
# random actions: random actions with max speed at desired speed
# pref_speed = agent_state[5]
# rand_actions = np.zeros((num_rand_actions, 2))
# rand_actions[:,0] = pref_speed * np.random.rand(num_rand_actions,)
# rand_actions[:,1] = 2 * np.pi * np.random.rand(num_rand_actions,) - np.pi
# put all actions together
actions = np.vstack((default_action_theta, desired_actions, \
zero_action, near_actions)) #, rand_actions))
# avoid oscillation
# angles_diff = find_angle_diff(actions[:,1], agent_state[4])
# if agent_state[9] > EPS:
# valid_inds = np.where(angles_diff > -EPS)[0]
# actions = actions[valid_inds,:]
# elif agent_state[9] < -EPS:
# valid_inds = np.where(angles_diff < EPS)[0]
# actions = actions[valid_inds,:]
# plot_actions = self.plot_actions.copy()
# plot_actions[:,1] += default_action_theta[1]
# plot_actions[:,0] *= agent_state[5]
# actions = np.vstack((default_action_theta, desired_actions, \
# plot_actions))
# actions = np.vstack((default_action_theta, zero_action, near_actions, rand_actions))
actions[:,1] = (actions[:,1] + np.pi) % (np.pi * 2) - np.pi
# turning needs to be slower
# angle_diff_abs = abs(find_angle_diff(actions[:,1],agent_state[4]))
# actions[:,0] *= (1 - angle_diff_abs / (2*np.pi))
return actions
def find_actions_theta_dynConstr(self, agent_state, dt):
assert(dt > 0.9)
angle_lim = TURNING_LIMIT*min(dt,1.0)
# action = [speed theta]
cur_heading = agent_state[4]
desired_speed = agent_state[5]
# near by actions: default action
num_near_actions = 10
cur_speed = np.linalg.norm(agent_state[2:4])
actions = self.close_actions.copy()
# actions[:,0] *= max(0.75 * desired_speed, cur_speed)
actions[:,0] *= desired_speed
actions[:,1] = actions[:,1] + cur_heading
# desired action: add if desired_heading is within reachable range
# of cur_heading
desired_heading = np.arctan2(agent_state[7]-agent_state[1], \
agent_state[6]-agent_state[0])
angle_diff_abs = abs(find_angle_diff(desired_heading, cur_heading))
if angle_diff_abs < angle_lim:
desired_act = np.array([desired_speed, desired_heading])
desired_actions = np.matlib.repmat(desired_act, 5, 1)
desired_actions[1,0] *= 0.80
desired_actions[2,0] *= 0.60
desired_actions[3,0] *= 0.40
desired_actions[4,0] *= 0.20
# put all actions together
actions = np.vstack((desired_actions, actions))
# print '---- desired actions added'
# default action: add if default_heading is within reachable range
# of cur_heading
default_heading = np.arctan2(agent_state[3], agent_state[2])
angle_diff_abs = abs(find_angle_diff(default_heading, cur_heading))
if angle_diff_abs < angle_lim and cur_speed > 0.05:
default_act = np.array([cur_speed, default_heading])
default_actions = np.matlib.repmat(default_act, 2, 1)
default_actions[1,0] *= 0.75
# put all actions together
actions = np.vstack((default_actions, actions))
# print '---- default actions added'
# turning on spot
# min_turning_radius = 0.5
limit = TURNING_LIMIT
# print 'desired_speed, dt, limit', desired_speed, dt, limit
# print 'dt', dt
added_actions = np.array([[0.0, limit + cur_heading], \
[0.0, 0.66 * limit + cur_heading], \
[0.0, 0.33 * limit + cur_heading], \
[0.0, -0.33 * limit + cur_heading], \
[0.0, -0.66 * limit + cur_heading], \
[0.0, -limit + cur_heading]])
actions = np.vstack((actions, added_actions))
# getting unique rows
# print 'before', actions
actions = np.asarray(np.vstack([tuple(row) for row in actions]))
# raw_input()
# print 'after', actions
# raw_input()
actions[:,1] = (actions[:,1] + np.pi) % (np.pi * 2) - np.pi
# turning needs to be slower
# angle_diff_abs = abs(find_angle_diff(actions[:,1],agent_state[4]))
# actions[:,0] *= (1 - angle_diff_abs / (2*np.pi))
return actions
# cost of action (smoothness)
def find_state_action_cost(self, agent_state, actions_theta, dt_forward):
cur_heading = agent_state[4]
angle_diff = find_angle_diff(actions_theta[:,1], cur_heading)
speed_diff = (actions_theta[:,0] - np.linalg.norm(agent_state[2:4]))
# bias turning right early on
# left_inds = np.where((angle_diff>0))[0]
# angle_diff_abs = angle_diff
# if np.linalg.norm(agent_state[0:2]-agent_state[6:8]) > 3.0:
# angle_diff_abs[left_inds] = 1.0 * abs(angle_diff[left_inds])
# else:
# angle_diff_abs[left_inds] = abs(angle_diff[left_inds])
# right_inds = np.where((angle_diff<0))[0]
# angle_diff_abs[right_inds] = abs(angle_diff[right_inds])
angle_diff_abs = abs(angle_diff) / np.pi
zero_inds = np.where((actions_theta[:,1] < EPS) | (angle_diff_abs < np.pi/12.0) )[0]
angle_diff_abs[zero_inds] = 0
speed_diff_abs = abs(speed_diff) / agent_state[5]
zero_inds = np.where( (speed_diff_abs < 0.5) )[0]
speed_diff_abs[zero_inds] = 0
# print 'angle_diff', angle_diff
# print 'angle_diff_abs', angle_diff_abs
# print 'speed_diff', speed_diff
# print 'speed_diff_abs', speed_diff_abs
assert(SMOOTH_COST < 0)
cost = np.clip(angle_diff_abs * SMOOTH_COST, -0.25, 0) + \
np.clip(speed_diff_abs * SMOOTH_COST, -0.25, 0)
# cost = np.clip(cost, -0.5, 0.0)
# print cost
d = np.linalg.norm(agent_state[0:2] - agent_state[6:8])
v = agent_state[5]
getting_close_penalty = abs(GAMMA ** (d/DT_NORMAL) * (1.0 - GAMMA ** (-v/DT_NORMAL)))
assert(np.all(cost <= 0))
# return cost
smoothness_cost = cost * getting_close_penalty
# print smoothness_cost
# raw_input()
return smoothness_cost
# limited to this application
# future implementation should generalize
def find_action_rewards(self, agent_state, cur_dist, min_dists, dt_forward):
# print 'cur_dist', cur_dist
# print 'min_dists', min_dists
rewards = np.zeros((len(min_dists),))
# collision
if cur_dist < 0:
rewards[:] = COLLISION_COST
return rewards
d = np.linalg.norm(agent_state[0:2] - agent_state[6:8])
v = agent_state[5]
getting_close_penalty = GAMMA ** (d/DT_NORMAL) * (1.0 - GAMMA ** (-v/DT_NORMAL))
# getting to close
close_inds = np.where((min_dists > 0) & \
(min_dists < GETTING_CLOSE_RANGE))[0]
# current pos
if cur_dist < GETTING_CLOSE_RANGE:
assert(GETTING_CLOSE_RANGE - cur_dist > 0)
rewards[:] = getting_close_penalty
# future pos
rewards[close_inds] += getting_close_penalty
collision_inds = np.where(min_dists < 0)[0]
rewards[collision_inds] = COLLISION_COST
# rewards[close_inds] = rewards[close_inds] + GETTING_CLOSE_PENALTY \
# - 0.3 * (GETTING_CLOSE_RANGE - min_dists[close_inds])
scaling_cur = 2
scaling_future = 5
# scaling_future = 20
rewards[close_inds] = scaling_cur * rewards[close_inds] \
+ scaling_future * getting_close_penalty * (GETTING_CLOSE_RANGE - min_dists[close_inds])
rewards[close_inds] = np.clip(rewards[close_inds], COLLISION_COST+0.01, 0.0)
assert(np.all(GETTING_CLOSE_RANGE - min_dists[close_inds]>0))
# other states are
return rewards
def find_passing_side_cost(self, agent_state, actions_theta, other_agents_state, \
other_agents_action_theta, dt_forward):
weight = self.training_passing_side_weight
# print weight
num_pts = len(actions_theta)
num_states = 7 + 8 * (self.num_agents-1)
agent_centric_states = np.zeros((num_pts, num_states))
agent_next_states = self.update_states(agent_state, \
actions_theta, dt_forward)
# only use the closest other agent
dist_2_others = [(np.linalg.norm(other_agent_state[0:2]-agent_state[0:2]) - \
other_agent_state[8] - agent_state[8]) for other_agent_state in other_agents_state]
agent_num = np.argmin(np.array(dist_2_others))
other_agent_next_state = self.update_state(other_agents_state[agent_num], \
other_agents_action_theta[agent_num], dt_forward)
other_agents_next_state = [other_agent_next_state]
ref_prll_vec, ref_orth_vec, agent_centric_states = \
pedData.rawStates_2_agentCentricStates(\
agent_next_states, other_agents_next_state, self.num_agents)
# for i in range(num_pts):
# ref_prll, ref_orth, agent_centric_states[i,:] = \
# pedData.rawState_2_agentCentricState( \
# agent_next_states[i,:], other_agent_next_state)
bad_inds_oppo, bad_inds_same, bad_inds_tangent = \
self.find_bad_inds(agent_centric_states)
#scaling factor
d = np.linalg.norm(agent_state[0:2] - agent_state[6:8])
v = agent_state[5]
getting_close_penalty =np.ones((num_pts,)) \
* GAMMA ** (d/DT_NORMAL) * (1.0 - GAMMA ** (-v/DT_NORMAL))
penalty = np.zeros((num_pts,))
penalty[bad_inds_oppo] = weight * getting_close_penalty[bad_inds_oppo]
penalty[bad_inds_same] = 1.0 * weight * getting_close_penalty[bad_inds_same]
penalty[bad_inds_tangent] = weight * getting_close_penalty[bad_inds_tangent]
return penalty
# for RL
# back out information required by find_next_states_values
def find_next_state_pair_value_and_action_reward(self, agent_state, agent_next_state, \
other_agents_state, other_agents_next_state, dt_forward):
# agents
# action_xy = agent_next_state[2:4]
action_xy = (agent_next_state[0:2] - agent_state[0:2]) / dt_forward
action_speed = np.linalg.norm(action_xy)
if action_speed > EPS:
action_angle = np.arctan2(action_xy[1], action_xy[0])
else:
action_angle = agent_next_state[4]
action_theta = np.array([[action_speed, action_angle]])
# other agents (TODO: vectorize)
num_other_agents = len(other_agents_state)
other_actions_theta = []
for i, other_agent_next_state in enumerate(other_agents_next_state):
# other_action_xy = other_agent_next_state[2:4]
other_action_xy = (other_agent_next_state[0:2] - other_agents_state[i][0:2]) / dt_forward
other_action_speed = np.linalg.norm(other_action_xy)
if other_action_speed > EPS:
other_action_angle = np.arctan2(other_action_xy[1], other_action_xy[0])
else:
other_action_angle = other_agent_next_state[4]
other_actions_theta.append(np.array([other_action_speed, other_action_angle]))
state_value, action_reward = \
self.find_values_and_action_rewards(agent_state, action_theta, \
other_agents_state, other_actions_theta, dt_forward)
return state_value, action_reward
def find_values_and_action_rewards(self, agent_state, actions_theta, \
other_agents_state_in, other_agents_action=None, dt_forward=None):
actions_theta_copy = actions_theta.copy()
other_agents_state = copy.deepcopy(other_agents_state_in)
# ref_prll, ref_orth, state_nn = \
# pedData.rawState_2_agentCentricState( \
# agent_state, other_agents_state, self.num_agents)
num_actions = actions_theta.shape[0]
# update other agent state
num_other_agents = len(other_agents_state)
if other_agents_action is None:
other_agents_action = []
for tt in range(num_other_agents):
other_agent_speed = np.linalg.norm(other_agents_state_in[tt][2:4])
other_agent_angle = np.arctan2(other_agents_state_in[tt][3], other_agents_state_in[tt][2])
other_agents_action.append(np.array([other_agent_speed, other_agent_angle]))
# update other agents' velocity
for tt in range(num_other_agents):
other_agents_state[tt][2] = other_agents_action[tt][0] * np.cos(other_agents_action[tt][1])
other_agents_state[tt][3] = other_agents_action[tt][0] * np.sin(other_agents_action[tt][1])
# assume other agent is heading toward the vehicle
# rel_pos = agent_state[0:2] - other_agent_state[0:2]
# rel_angle = np.arctan2(rel_pos[1], rel_pos[0])
# angle_diff = find_angle_diff(rel_angle, other_agent_action[1])
# other_agent_action[1] += max(-np.pi/9.0, min(np.pi/9.0, angle_diff))
# print 'other_agent_state, other_agent_action', other_agent_state, other_agent_action
other_agents_next_state = []
for tt in range(num_other_agents):
# dt_forward_other = min(1,0, dt_forward)
other_agents_next_state.append(self.update_state(other_agents_state[tt], \
other_agents_action[tt], dt_forward))
# other_agent_next_state = (other_agent_state + other_agent_next_state) / 2.0
agent_desired_speed = agent_state[5]
# print 'other_agent_action', other_agent_action
# compute values for each state
state_values = np.zeros((num_actions,))
# collide (not just getting close)
min_dists_mat = np.zeros((num_actions, num_other_agents))
if_collide_mat = np.zeros((num_actions, num_other_agents))
cur_dist_vec = np.zeros((num_other_agents,))
for tt in range(num_other_agents):
min_dists_mat[:,tt], if_collide_mat[:,tt] = self.if_actions_collide(agent_state,
actions_theta, other_agents_state[tt], other_agents_action[tt], dt_forward)
radius = agent_state[8] + other_agents_state[tt][8] + self.radius_buffer
cur_dist_vec[tt] = np.linalg.norm(agent_state[0:2]-other_agents_state[tt][0:2]) - radius
min_dists = np.min(min_dists_mat, axis=1)
if_collide = np.max(if_collide_mat, axis=1)
cur_dist = np.min(cur_dist_vec)
# print 'min_dists_mat', min_dists_mat
# print 'if_collide_mat', if_collide_mat
# print 'cur_dist_vec', cur_dist_vec
# print 'min_dists', min_dists
# print 'if_collide', if_collide
# print 'cur_dist', cur_dist
# raw_input()
action_rewards = self.find_action_rewards(agent_state, cur_dist, min_dists, dt_forward)
# print 'action_rewards', action_rewards
# print 'action_rewards', action_rewards
non_collision_inds = np.where(if_collide==False)[0]
# find states_values in batch
gamma = GAMMA
dt_normal = DT_NORMAL
if len(non_collision_inds) > 0:
agent_next_states = self.update_states(agent_state, \
actions_theta[non_collision_inds,:], dt_forward)
# find next_states that have reached the goal
dists_to_goal = np.linalg.norm(agent_next_states[:,0:2]-agent_next_states[:,6:8],axis=1)
reached_goals_inds = np.where((dists_to_goal < DIST_2_GOAL_THRES) & \
(min_dists[non_collision_inds]>GETTING_CLOSE_RANGE))[0]
# not_reached_goals_inds = np.where(dists_to_goal >= DIST_2_GOAL_THRES)[0]
not_reached_goals_inds = np.setdiff1d(np.arange(len(non_collision_inds)), reached_goals_inds)
non_collision_reached_goals_inds = non_collision_inds[reached_goals_inds]
non_collision_not_reached_goals_inds = non_collision_inds[not_reached_goals_inds]
state_values[non_collision_not_reached_goals_inds] = \
self.find_states_values(agent_next_states[not_reached_goals_inds], other_agents_next_state)
# state_values[non_collision_reached_goals_inds] = gamma ** (dists_to_goal / dt_normal)
# print 'dists_to_goal', dists_to_goal
# print 'reached_goals_inds', reached_goals_inds
# print 'non_collision_reached_goals_inds', non_collision_reached_goals_inds
# print 'non_collision_not_reached_goals_inds', non_collision_not_reached_goals_inds
state_values[non_collision_reached_goals_inds] = \
gamma ** (dists_to_goal[reached_goals_inds] / dt_normal)
try:
assert(np.all(action_rewards + state_values < 1.0001))
except:
print('agent_state', agent_state)
print('actions_theta', actions_theta)
print('other_agent_state', other_agent_state)
print('other_agent_action', other_agent_action)
print('dt_forward', dt_forward)
print('dists_to_goal', dists_to_goal)
print('actions_rewerds', action_rewards)
print('state_values', state_values)
print('values', action_rewards + gamma ** (dt_forward * \
agent_desired_speed / dt_normal) * state_values)
assert(0)
# np.set_printoptions(precision=4,formatter={'float': '{: 0.3f}'.format})
# if True:#np.max(actions_theta[:,1]) - np.min(actions_theta[:,1]) < 1.5 * np.pi:
# actions_theta[:,1] -= agent_state[4]
# print '---------------------------'
# dist_between_agents = (np.linalg.norm(agent_state[0:2]-other_agents_state[0][0:2]) - \
# agent_state[8] - other_agents_state[0][8])
# print 'dist between agents %.3f' % dist_between_agents
# values = action_rewards + gamma ** (dt_forward * \
# agent_desired_speed/ dt_normal) *state_values
# print np.hstack((actions_theta, np.vstack((min_dists, action_rewards,\
# values, state_values)).transpose()))
# best_action_ind = np.argmax(values)
# print 'current heading', agent_state[4]
# print 'best_action', actions_theta[best_action_ind,:]
# print 'next_best_state', agent_next_states[best_action_ind,:]
# print 'other_agents_state[0]', other_agents_state[0]
# print 'other_agents_next_state[0]', other_agents_next_state[0]
# print 'other_agents_action[0]', other_agents_action[0]
# print other_agents_state_in[0]
# actions_theta[:,1] += agent_state[4]
# # print gamma ** (dt_forward * \
# # agent_desired_speed/ dt_normal)
# # print 'state_values', state_values
# # print 'dt_forward', dt_forward
# # print 'agent_state', agent_state
# # print 'other_agent_state', other_agent_state
# # print 'other_agent_action', other_agent_action
# # print 'other_agent_next_state', other_agent_next_state
# # print agent_next_states.shape
# # print actions_theta.shape
# # print state_values[:,np.newaxis].shape
# # print 'agent_next_states', np.hstack((agent_next_states, actions_theta[non_collision_inds], \
# # state_values[non_collision_inds,np.newaxis]))
# print 'state_nn', state_nn
# # print 'dt_forward', dt_forward
# actions_theta[:,1] += agent_state[4]
# # raw_input()
# # if dist_between_agents < GETTING_CLOSE_RANGE:
# # raw_input()
# assert(np.sum(actions_theta_copy - actions_theta) < 0.0001)
# smoothness_cost = self.find_state_action_cost(agent_state, actions_theta, dt_forward)
passing_side_cost = self.find_passing_side_cost(agent_state, actions_theta,\
other_agents_state, other_agents_action, dt_forward)
# print passing_side_cost
# raw_input()
# return state_values, action_rewards + smoothness_cost + passing_side_cost
# if len(state_values)>5:
# print 'state_values', state_values
# print 'action_rewards', action_rewards
# print 'min_dists', min_dists
# num_states = len(state_values)
# # dt_forward_vec = 0.5 * np.ones((num_states,))
# # dt_forward_vec += 0.5 * actions_theta[:,0] / agent_desired_speed
# dt_forward_vec = np.ones((num_states, ))
# dt_forward_vec *= dt_forward
# total = action_rewards + gamma ** (dt_forward_vec * \
# agent_desired_speed / dt_normal) * state_values
# best_ind = np.argmax(total)
# d = np.linalg.norm(agent_state[0:2]-agent_state[6:8])
# v = agent_state[5]
# getting_close_penalty = GAMMA ** (d/DT_NORMAL) * (1.0 - GAMMA ** (-v/DT_NORMAL))
# print 'getting_close_penalty', getting_close_penalty
# print 'best_ind %d, best_value %.3f, min_dist[min_ind] %.3f, action_rewards[min_ind] %.3f, state_values[min_ind] %.3f\n' % \
# (best_ind, total[best_ind], min_dists[best_ind], \
# action_rewards[best_ind], state_values[best_ind])
# print 'dt_forward', dt_forward
# print 'current_dist', np.linalg.norm(agent_state[0:2] - agent_state[6:8])
# print 'next_dist_2_goal', np.linalg.norm(agent_next_states[:,0:2]-agent_next_states[:,6:8], axis=1)
# print 'speed', actions_theta[:,0]
return state_values, action_rewards + passing_side_cost #+ smoothness_cost
# assuming the other agent follows its current speed
# agent_state: agent's current state
# actions_theta: multiple actions considered
# other_agent_state: other agent's current state
def find_next_states_values(self, agent_state, actions_theta, \
other_agents_state, other_agents_action=None, dt_forward=None):
# making sure look ahead time is not too long (reaching goal)
# or too short (if agent's desired speed is too slow)
if dt_forward is None:
agent_speed = agent_state[5]
dt_forward_max = max(self.dt_forward, 0.5/agent_speed)
# dt_forward_max = self.dt_forward
dist_to_goal = np.linalg.norm(agent_state[6:8]- agent_state[0:2])
time_to_goal = dist_to_goal / agent_speed
dt_forward = min(dt_forward_max, time_to_goal) #1.0
state_values, action_rewards = \
self.find_values_and_action_rewards(agent_state, actions_theta, \
other_agents_state, other_agents_action, dt_forward)
gamma = GAMMA
dt_normal = DT_NORMAL
agent_desired_speed = agent_state[5]
num_states = len(actions_theta)
# method 1
# dt_forward_vec = np.ones((num_states,))
# dt_forward_vec *= dt_forward
# method 2
dt_forward_vec = 0.2 * np.ones((num_states,)) * dt_forward
dt_forward_vec += 0.8 * actions_theta[:,0] / agent_desired_speed * dt_forward
# print 'dt_forwards', dt_forward
# print 'dt_forwards_vec', dt_forward_vec
# raw_input()
return action_rewards + gamma ** (dt_forward_vec * \
agent_desired_speed / dt_normal) * state_values
def find_feasible_actions(self, agent_state, static_constraints=None):
# print 'agent_state', agent_state
if self.mode == 'no_constr':
''' actions choice 1 '''
default_action_xy = agent_state[2:4]
speed = np.linalg.norm(default_action_xy)
# angle_select = np.arctan2(default_action_xy[1], default_action_xy[0])
angle_select = agent_state[4]
default_action_theta = np.array([speed, angle_select])
actions_theta = self.find_actions_theta(agent_state, default_action_theta)
# possibly stuck
# if speed < agent_state[5] * 0.1:
# actions_theta_tmp = self.plot_actions.copy()
# actions_theta_tmp[:,0] *= agent_state[5]
# actions_theta_tmp[:,1] = (actions_theta_tmp[:,1] - np.pi) / 2.0
# actions_theta_tmp[:,1] += np.arctan2(agent_state[7]-agent_state[1], \
# agent_state[6]-agent_state[0])
# actions_theta = np.vstack((actions_theta, actions_theta_tmp))
# actions_theta[:,1] = (actions_theta[:,1] + np.pi) % (np.pi * 2) - np.pi
# print 'agent_state', agent_state
# print 'actions_theta', actions_theta
elif self.mode == 'rotate_constr':
''' actions choice 2 '''
# need to handle acceleration better (limits on changing velocity as a function of time)
actions_theta = self.find_actions_theta_dynConstr(agent_state, 1.0)
else:
assert(0)
# print actions_theta
# print '---'
# print 'before, max_speed', np.max(actions_theta[:,0])
# prune actions if there are static constraints (from static obstacles in map)
# static_constraints: max_speeds, angles
if static_constraints is not None:
# print static_constraints
angle_incr = abs(static_constraints[2,1] - static_constraints[1,1])
# check
# angle_incr_vec = abs(static_constraints[1:,1] - static_constraints[0:-1,1])
# try:
# assert(np.all(angle_incr_vec - angle_incr<EPS))
# except AssertionError:
# print 'static_constraints error (angle spacing)',
# print static_constraints
# assert(0)
# min_angle = static_constraints[0,1]
# lower_inds = ((actions_theta[:,1]-min_angle) / angle_incr).astype(int)
# upper_inds = lower_inds + 1
# print 'theta_col:',actions_theta[:,1]
# print 'stat col:',static_constraints[:,1]
# print 'len theta', len(actions_theta[:,1])
# print 'len stat', len(static_constraints[:,1])
upper_inds = np.digitize(actions_theta[:,1], static_constraints[:,1])
# print 'lower_inds', lower_inds
lower_inds = upper_inds - 1
# print 'upper_inds', upper_inds
try:
assert(np.all(lower_inds<len(static_constraints)))
except AssertionError:
print('lower_inds:',lower_inds)
print('len(actions_theta):', len(actions_theta))
bad_ind = np.where(lower_inds>=len(actions_theta))[0]
print('actions theta:',actions_theta)
print('bad_ind', actions_theta[bad_ind,1])
print('static_constraints', static_constraints)
assert(0)
alpha = (actions_theta[:,1] - static_constraints[lower_inds,1]) / angle_incr
# print 'static_constraints', static_constraints
# print 'before---', actions_theta
# print static_constraints
# print 'alpha', alpha
max_speeds_int = alpha * static_constraints[upper_inds,0] + (1-alpha) * static_constraints[lower_inds,0]
pref_speed = agent_state[5]
# assert(np.all(max_speeds_int <=pref_speed))
actions_theta[:,0] = actions_theta[:,0] * max_speeds_int / pref_speed
# unique rows
actions_theta = np.asarray(np.vstack({tuple(row) for row in actions_theta}))
# print 'after---', actions_theta
# print 'cur_pos, goal', agent_state[0:2], agent_state[6:8]
# print 'after, max_speed', np.max(actions_theta[:,0])
return actions_theta
# state raw is in global frame
# velocity output should also be in global frame
def find_next_action(self, agent_state, other_agents_state, other_agents_action=None, static_constraints=None):
# state_raw = 2x [pos.x, pos.y, vel.x, vel.y,prefVelocity.x, \
# prefVelocity.y, goals[0].x, goals[0].y]
# state_nn = [dist_to_goal, pref_speed, vx, vy, \
# other_vx, other_vx, rel_pos_x, rel_pos_y]
actions_theta = self.find_feasible_actions(agent_state, static_constraints=static_constraints)
state_values = self.find_next_states_values(agent_state, \
actions_theta, other_agents_state, other_agents_action)
# print "actions_theta:", actions_theta
# print "state_values:", state_values
best_action_ind = np.argmax(state_values)
best_action = actions_theta[best_action_ind]
# print '------'
# print 'best_action_ind', best_action_ind
# print 'best_action', best_action
# best_action_xy = np.array([best_action[0] * np.cos(best_action[1]), best_action[0] * np.sin(best_action[1])])
# select_action_xy = np.array([actions_theta[1,0] * np.cos(actions_theta[1,1]), actions_theta[1,0] * np.sin(actions_theta[1,1])])
# next_state_best_action_dist = np.linalg.norm(agent_state[6:8] - agent_state[0:2] - 1.0 * best_action_xy)
# next_state_straight_line_dist = np.linalg.norm(agent_state[6:8] - agent_state[0:2] - 1.0 * select_action_xy)
# bnd_best = 0.97 ** (next_state_best_action_dist / 0.2)
# bnd_straight = 0.97 ** (next_state_straight_line_dist / 0.2)
# actions_theta[:,1] -= np.arctan2(agent_state[3], agent_state[2])
# print 'best action: ', best_action, '; value: ', state_values[best_action_ind], 'bnd: ', bnd_best
# print 'straight line action: ', actions_theta[1,:], '; value: ', state_values[1], 'bnd: ', bnd_straight
# print 'dist_2_goal', np.linalg.norm(agent_state[6:8] - agent_state[0:2])
# print 'dist_2_other_agent', np.linalg.norm(agent_state[0:2] - other_agents_state[0][0:2]) \
# - agent_state[8] - other_agents_state[0][8]
# actions_theta[:,1] += np.arctan2(agent_state[3], agent_state[2])
# print 'cur_value: %f, best_next_value: %f, best action (r, theta) %f, %f' % \
# (self.current_value, state_values[best_action_ind], best_action[0], best_action[1])
if self.if_collide_with_other_agents(agent_state, other_agents_state):
self.current_value = COLLISION_COST
else:
self.current_value = state_values[best_action_ind]
# print 'best_action', best_action
# raw_input()
# print 'action_chosen', best_action[0] * np.cos(best_action[1]), \
# best_action[0] * np.sin(best_action[1])
return best_action
def find_subgoal(self, agent_state,\
other_agents_state, min_dist, other_agents_action=None, static_constraints=None):
# Find subgoals that are far away and have good diffusion costs
diff_map_goal = agent_state[6:8]
angle_to_diff_map_goal = np.arctan2(diff_map_goal[1]-agent_state[1],diff_map_goal[0]-agent_state[0])
lower_cost_to_go_inds = np.where((static_constraints[:,2] > 6.5) & \
(abs(static_constraints[:,1]-angle_to_diff_map_goal) < np.pi/8))[0]
print('MIN_DIST:', min_dist)
# if len(lower_cost_to_go_inds)>0:# and min_dist < 7.0:
if False:
lower_cost_to_go_actions = static_constraints[lower_cost_to_go_inds,:]
x = lower_cost_to_go_actions[:,2] * np.cos(lower_cost_to_go_actions[:,1]) + agent_state[0]
y = lower_cost_to_go_actions[:,2] * np.sin(lower_cost_to_go_actions[:,1]) + agent_state[1]
lower_cost_to_go_subgoals = np.column_stack((x,y))
# print 'static_constraints:', static_constraints
# print 'lower cost2go inds:', lower_cost_to_go_inds
print('lower cost2go actions:', lower_cost_to_go_actions)
# print 'lower cost2go subgoals:', lower_cost_to_go_subgoals
agent_states = np.zeros((len(lower_cost_to_go_inds),len(agent_state)))
for i in range(len(lower_cost_to_go_inds)):
state = copy.deepcopy(agent_state)
state[6:8] = lower_cost_to_go_subgoals[i,0:2]
# state[2] = agent_state[5] * np.cos(agent_state[4])
# state[3] = agent_state[5] * np.sin(agent_state[4])
agent_states[i] = state
# print 'states:', agent_states
# Query the NN about every potential subgoal and pick the best one
state_values = self.find_states_values(agent_states, other_agents_state)
best_subgoal_ind = np.argmax(state_values)
# print 'best_subgoal ind', best_subgoal_ind
best_subgoal = lower_cost_to_go_subgoals[best_subgoal_ind]
# print 'states_values:', state_values
agent_state[6:8] = best_subgoal
print('Good Subgoal Found')
# print 'Best subgoal:', best_subgoal
else:
# use subgoal directly from diffusion map
lower_cost_to_go_subgoals = None
best_subgoal = agent_state[6:8]
print('No good subgoal found.')
return best_subgoal, lower_cost_to_go_subgoals
# def find_next_action_using_cost_to_go(self, agent_state,\
# other_agents_state, min_dist, other_agents_action=None, static_constraints=None):
# # Use that best subgoal to query every feasible action
# actions_theta = self.find_feasible_actions(agent_state, static_constraints=static_constraints)
# state_values = self.find_next_states_values(agent_state, \
# actions_theta, other_agents_state, other_agents_action)
# best_action_ind = np.argmax(state_values)
# best_action = actions_theta[best_action_ind]
# if state_values[best_action_ind] < 0.2:
# best_action[0] = 0.0
# best_action[1] = agent_state[4]
# print '------'
# # print 'best_action_ind', best_action_ind
# print 'best_action', best_action
# if self.if_collide_with_other_agents(agent_state, other_agents_state):
# self.current_value = COLLISION_COST
# else:
# self.current_value = state_values[best_action_ind]
# return best_action
def if_collide_with_other_agents(self, agent_state, other_agents_state):
num_other_agents = len(other_agents_state)
for other_agent_state in other_agents_state:
if self.if_pos_collide(agent_state[0:2], other_agent_state[0:2], \
agent_state[8] + other_agent_state[8]):
return True
return False
def if_terminal_state(self, agent_state, other_agents_state):
# check collision
if self.if_collide_with_other_agents(agent_state, other_agents_state):
return COLLIDED
# check if reached goal
for other_agent_state in other_agents_state:
if not ((( np.linalg.norm(agent_state[0:2] - agent_state[6:8])<DIST_2_GOAL_THRES) \
and (np.linalg.norm(agent_state[0:2] - other_agent_state[0:2]) > \
agent_state[8] + other_agent_state[8] + GETTING_CLOSE_RANGE or \
np.linalg.norm(other_agent_state[0:2] - other_agent_state[6:8])<DIST_2_GOAL_THRES))):
break
return REACHED_GOAL
return NON_TERMINAL
# find a random action
def find_rand_action(self, agent_state, other_agents_state, \
other_agents_action=None, isBestAction=False, static_constraints=None):
rand_float = np.random.rand()
cur_state = agent_state[0:2]
goal = agent_state[6:8]
nom_speed = agent_state[5]
# if rand_float < 0.3: # prefered speed toward goal
# action = self.computePrefVel(cur_state, goal, nom_speed)
# elif rand_float < 0.6: # zero action
# action = np.zeros((2,))
# else: # random action
# action = np.zeros((2,))
# action[0] = np.random.uniform(0, nom_speed)
# action[1] = np.random.uniform(0, 2 * np.pi) - np.pi
actions_theta = self.find_feasible_actions(agent_state, static_constraints=static_constraints)
state_values = self.find_next_states_values(agent_state, \
actions_theta, other_agents_state, other_agents_action)
if isBestAction:
max_inds = np.argpartition(state_values, -4)[-1:]
else:
max_inds = np.argpartition(state_values, -4)[-5:-2]
# ind = np.random.randint(len(actions_theta))
ind = np.random.randint(len(max_inds))
ind = max_inds[ind]
action = actions_theta[ind,:]
return action
def update_state(self, state, action_theta, dt):
speed = action_theta[0]
angle_select = action_theta[1]
next_state = copy.deepcopy(state)
pref_speed = state[5]
# try:
# assert(speed < pref_speed + 0.0001)
# except:
# print 'state', state
# print 'speed', speed
# print 'pref_speed', pref_speed
# assert(0)
# print angle_select
next_state[0] += speed * np.cos(angle_select) * dt
next_state[1] += speed * np.sin(angle_select) * dt
# next_state[2] = 0.5 * next_state[2] + 0.5 * speed * np.cos(angle_select)
# next_state[3] = 0.5 * next_state[3] + 0.5 * speed * np.sin(angle_select)
next_state[2] = speed * np.cos(angle_select)
next_state[3] = speed * np.sin(angle_select)
next_state[5] = pref_speed
# next_state[9] = min(3.0, next_state[9] + dt)
# turning direction and previous osscilation magnitude
angle_diff = find_angle_diff(action_theta[1], state[4])
# next_state[9] = angle_diff
if abs(next_state[9]) < EPS:
next_state[9] = 0.11 * np.sign(angle_diff)
elif next_state[9] * angle_diff < 0:
next_state[9] = max(-np.pi, min(np.pi, -next_state[9] + angle_diff))
else:
next_state[9] = np.sign(next_state[9]) * max(0.0, abs(next_state[9])-0.1)
# print 'state[9]', state[9]
# print 'angle_diff', angle_diff
# print 'next_state[9]', next_state[9]
# raw_input()
if self.mode == 'no_constr':
next_state[4] = angle_select
elif self.mode == 'rotate_constr':
min_turning_radius = 0.5
limit = pref_speed / min_turning_radius * dt
angle_diff = find_angle_diff(action_theta[1], state[4])
delta_heading_lb = - limit
delta_heading_ub = limit
next_state[4] = state[4] + np.clip(angle_diff, \
delta_heading_lb, delta_heading_ub)
next_state[4] = (next_state[4] + np.pi) % (2 * np.pi) - np.pi
else:
assert(0)
# if np.linalg.norm(next_state[0:2] - next_state[6:8]) == 0:
# next_state[5] /= (np.linalg.norm(next_state[4:6]+EPS) * 100.0)
# try:
# assert(next_state[4] > -np.pi-EPS and next_state[4] < np.pi+EPS)
# except AssertionError:
# print action_theta
# assert(0)
return next_state
def update_states(self, state, actions_theta, dt):
speeds = actions_theta[:,0]
angles_select = actions_theta[:,1]
num_actions = actions_theta.shape[0]
next_states = np.matlib.repmat(state, num_actions, 1)
pref_speed = state[5]
# try:
# assert(np.all(actions_theta[:,0] < pref_speed + 0.0001))
# except:
# print 'pref_speed', pref_speed
# print 'actions_theta[:,0]', actions_theta[:,0]
# assert(0)
# print angle_select
next_states[:,0] += speeds * np.cos(angles_select) * dt
next_states[:,1] += speeds * np.sin(angles_select) * dt
# next_states[:,2] = 0.5 * next_states[:,2] + 0.5 * speeds * np.cos(angles_select)
# next_states[:,3] = 0.5 * next_states[:,3] + 0.5 * speeds * np.sin(angles_select)
next_states[:,2] = speeds * np.cos(angles_select)
next_states[:,3] = speeds * np.sin(angles_select)
next_states[:,5] = pref_speed
# next_states[:,9] = np.clip(next_states[:,9] + dt, 0.0, 3.0)
# angles_diff = find_angle_diff(actions_theta[:,1], state[4])
# next_states[:,9] = angles_diff
angles_diff = find_angle_diff(actions_theta[:,1], state[4])
# next_states[:,9] = angles_diff
zero_inds = np.where(abs(next_states[:,9]) < EPS)[0]
oscillation_inds = np.where(next_states[:,9] * angles_diff < 0)[0]
oscillation_inds = np.setdiff1d(oscillation_inds, zero_inds)
same_dir_inds = np.where(next_states[:,9] * angles_diff > -EPS)[0]
same_dir_inds = np.setdiff1d(same_dir_inds, np.union1d(zero_inds, oscillation_inds))
next_states[zero_inds,9] = 0.11 * np.sign(angles_diff[zero_inds])
next_states[oscillation_inds,9] = np.clip(-next_states[oscillation_inds,9] \
+ angles_diff[oscillation_inds], -np.pi, np.pi)
next_states[same_dir_inds,9] = np.sign(next_states[same_dir_inds,9]) * \
np.clip(abs(next_states[same_dir_inds,9]) - 0.1, 0.0, np.pi)
# print 'states[:,9], angles_diff, next_state[:,9]',
# print np.vstack((np.matlib.repmat(state[9], 1, len(actions_theta)),\
# angles_diff, next_states[:,9])).transpose()
# raw_input()
if self.mode == 'no_constr':
next_states[:,4] = angles_select
elif self.mode == 'rotate_constr':
min_turning_radius = 0.5
limit = pref_speed / min_turning_radius * dt
angles_diff = find_angle_diff(actions_theta[:,1], state[4])
# assert(len(angles_diff) == actions_theta.shape[0])
max_angle_diff = np.amax(abs(angles_diff))
# try:
# assert(max_angle_diff < limit/dt + 0.01)
# except:
# print '---\n state[4]', state[4]
# print 'actions_theta', actions_theta
# print 'angles_diff', angles_diff
# print 'max_angle_diff', max_angle_diff
# print 'pref_speed, dt, limit', pref_speed, dt, limit/dt
# assert(0)
delta_heading_lb = -limit
delta_heading_ub = limit
next_states[:,4] = state[4] + np.clip(angles_diff, \
delta_heading_lb, delta_heading_ub)
next_states[:,4] = (next_states[:,4] + np.pi) % (2 * np.pi) - np.pi
else:
assert(0)
# compute pref_vecs
# avoid divide by zero problem
# next_states[:,4:6] /= (np.linalg.norm(next_states[:,4:6], axis=1)[:,np.newaxis] * 100.0)
# tmp_vec = next_states[:,6:8] - next_states[:,0:2]
# pref_speeds = np.linalg.norm(tmp_vec, axis=1)[:,np.newaxis]
# valid_inds = np.where(pref_speeds > 0)[0]
# pref_vecs = tmp_vec[valid_inds] / pref_speeds[valid_inds] * pref_speed
# next_states[valid_inds,4] = pref_vecs[:,0]
# next_states[valid_inds,5] = pref_vecs[:,1]
# try:
# assert(np.all(next_states[:,4] > -np.pi-EPS) and np.all(next_states[:,4] < np.pi+EPS))
# except AssertionError:
# print actions_theta
# assert(0)
return next_states
# check collision
def if_action_collide(self, agent_state, agent_action, other_agent_state, \
other_agent_action, delta_t):
# sampling version
# t = 0.0
# incr = np.min((delta_t, 0.25))
# radius = agent_state[8] + other_agent_state[8] + self.radius_buffer
# agent_v = np.zeros((2,))
# agent_v[0] = agent_action[0] * np.cos(agent_action[1])
# agent_v[1] = agent_action[0] * np.sin(agent_action[1])
# other_agent_v = other_agent_state[2:4]
# T = np.linspace(0, delta_t, num=np.ceil((delta_t+0.0001) / incr))
# for t in T:
# agent_pos = agent_state[0:2] + agent_v * t
# other_agent_pos = other_agent_state[0:2] + other_agent_v * t
# if self.if_pos_collide(agent_pos, other_agent_pos, radius):
# return True
# return False
# analytical version
agent_v = np.zeros((2,))
agent_v[0] = agent_action[0] * np.cos(agent_action[1])
agent_v[1] = agent_action[0] * np.sin(agent_action[1])
other_agent_v = np.zeros((2,))
other_agent_v[0] = other_agent_action[0] * np.cos(other_agent_action[1])
other_agent_v[1] = other_agent_action[0] * np.sin(other_agent_action[1])
# other_agent_v[0] = other_agent_action[0] * np.cos(other_agent_action[1] + np.random.randn()*np.pi/36)
# other_agent_v[1] = other_agent_action[0] * np.sin(other_agent_action[1] + np.random.randn()*np.pi/36)
# modifying distance calculation
rel_pos_angle = np.arctan2(other_agent_state[1] - agent_state[1], \
other_agent_state[0] - agent_state[0])
agent_speed_angle = np.arctan2(agent_v[1], agent_v[0])
angle_diff = find_angle_diff(rel_pos_angle, agent_speed_angle)
# other agent in front, zero out other agent's speed in the same direction
if abs(angle_diff) < np.pi:
dot_product = np.sum(agent_v * other_agent_v)
if dot_product > EPS:
other_agent_v = other_agent_v - dot_prouct / np.linalg.norm(agent_v) * agent_v
# blind spot
# elif abs(angle_diff) > 3.0 * np.pi/4.0:
radius = agent_state[8] + other_agent_state[8] + self.radius_buffer
x1 = agent_state[0:2]
x2 = agent_state[0:2] + delta_t * agent_v
y1 = other_agent_state[0:2]
y2 = other_agent_state[0:2] + delta_t * other_agent_v
# return if_segs_collide(x1, x2, y1, y2, radius)
min_dist = ind_dist_between_segs(x1, x2, y1, y2)
# min_dist = min_dist - 0.03 * np.random.random()
cur_dist = np.linalg.norm(x1-y1)
if cur_dist < radius:
if_collide = True
else:
if_collide = min_dists < radius
# dist_future = x2 - (0.75*y1 + 0.25*y2)
# dist_future = np.linalg.norm(dist_future)
# min_dist = min(dist_future, min_dist)
min_dist = min_dist - radius
return min_dist, min_dist < 0
# check collision
def if_actions_collide(self, agent_state, agent_actions, other_agent_state, \
other_agent_action, delta_t):
# bnd
agent_pref_speed = agent_state[5]
other_agent_speed = other_agent_action[0]
radius = agent_state[8] + other_agent_state[8] + self.radius_buffer
num_actions = agent_actions.shape[0]
if_collide = np.zeros((num_actions), dtype=bool)
min_dists = (radius + GETTING_CLOSE_RANGE + EPS) * np.ones((num_actions), dtype=bool)
# two agents are too far away for collision
if np.linalg.norm(agent_state[0:2] - other_agent_state[0:2]) > \
(agent_pref_speed+other_agent_speed) * delta_t + radius:
return min_dists, if_collide # default to no collision
# check for each action individually
agent_vels = np.zeros((num_actions,2))
agent_vels[:,0] = agent_actions[:,0] * np.cos(agent_actions[:,1])
agent_vels[:,1] = agent_actions[:,0] * np.sin(agent_actions[:,1])
other_agent_v = np.zeros((2,))
other_agent_v[0] = other_agent_action[0] * np.cos(other_agent_action[1])
other_agent_v[1] = other_agent_action[0] * np.sin(other_agent_action[1])
# other_agent_v[0] = other_agent_action[0] * np.cos(other_agent_action[1] + np.random.randn()*np.pi/36)
# other_agent_v[1] = other_agent_action[0] * np.sin(other_agent_action[1] + np.random.randn()*np.pi/36)
other_agent_vels = np.matlib.repmat(other_agent_v, num_actions, 1)
# modifying distance calculation
p_oa_angle = np.arctan2(other_agent_state[1] - agent_state[1], \
other_agent_state[0] - agent_state[0])
agent_speed_angles = np.arctan2(agent_vels[:,1], agent_vels[:,0])
other_speed_angle = np.arctan2(other_agent_v[1], other_agent_v[0])
heading_diff = find_angle_diff(agent_speed_angles, other_speed_angle)
agent_heading_2_other = find_angle_diff(agent_speed_angles, p_oa_angle)
r = agent_state[8] + other_agent_state[8] + GETTING_CLOSE_RANGE
coll_angle = abs(np.arcsin(min(0.95, r / np.linalg.norm(\
agent_state[0:2]-other_agent_state[0:2]))))
# other agent in front, zero out other agent's speed in the same direction
front_inds = np.where( (abs(agent_heading_2_other)<coll_angle) & \
(abs(heading_diff) < np.pi/2.0))[0]
if len(front_inds)>0:
# print other_agent_vels[front_inds,:].shape
# print other_agent_vels[front_inds,:].shape
# print np.matlib.repmat(np.linalg.norm(agent_vels[front_inds,:], axis=1),2,1).transpose().shape
# print agent_vels[front_inds,:].shape
dot_product = np.sum(agent_vels * other_agent_vels, axis=1)
valid_inds = np.where(agent_vels[:,0]>EPS)[0]
dot_product[valid_inds] /= np.linalg.norm(agent_vels[valid_inds,:], axis=1)
other_agent_vels[front_inds,:] = other_agent_vels[front_inds,:] - \
np.matlib.repmat(dot_product[front_inds], 2, 1).transpose()\
* agent_vels[front_inds,:] / 2.0
# analytical version
x1 = agent_state[0:2] # shape = (2,)
x2 = x1 + min(1.0, delta_t) * agent_vels # shape = (num_actions, 2)
y1 = other_agent_state[0:2] # shape = (2,)
y2 = y1 + min(1.0, delta_t) * other_agent_vels # shape = (num_actions, 2)
# end points
# if np.linalg.norm(x1-y1) < radius:
# return np.ones((num_actions,), dtype=bool)
# if_collide = np.linalg.norm(x2-y2, axis=1) < radius
# # critical points (where d/dt = 0)
# z_bar = (x2 - x1) - (y2 - y1) # shape = (num_actions, 2)
# inds = np.where((np.linalg.norm(z_bar,axis=1)>0) & (if_collide == False))[0]
# t_bar = - np.sum((x1-y1) * z_bar[inds,:], axis=1) \
# / np.sum(z_bar[inds,:] * z_bar[inds,:], axis=1)
# t_bar_rep = np.matlib.repmat(t_bar, 2, 1).transpose()
# dist_bar = np.linalg.norm(x1 + (x2[inds,:]-x1) * t_bar_rep \
# - y1 - (y2-y1) * t_bar_rep, axis=1)
# if_collide[inds] = np.logical_and(np.logical_and(dist_bar < radius, t_bar > 0), t_bar < 1.0)
min_dists = gen_tc.find_dist_between_segs(x1, x2, y1, y2)
# min_dists = min_dists - 0.03 * np.random.random(min_dists.shape)
# if other agent in back, and will run into the agent
# p_ao_angle = np.arctan2(agent_state[1] - other_agent_state[1], \
# agent_state[0] - other_agent_state[0])
# other_heading_2_agent = find_angle_diff(other_speed_angle, p_ao_angle)
# back_inds = np.where( (abs(other_heading_2_agent)<coll_angle) & \
# (abs(heading_diff) < np.pi/2.0))[0]
# back_inds = np.setdiff1d(back_inds, front_inds)
# min_dists[back_inds] += GETTING_CLOSE_RANGE
# print find_dist_between_segs(x1, x2, y1, y2)
# print if_collide_2
# print radius
# print if_collide
# print if_collide - if_collide_2
# assert(np.all(if_collide == if_collide_2))
cur_dist = np.linalg.norm(x1-y1)
if cur_dist < radius:
if_collide[:] = True
else:
if_collide = min_dists < radius
# dist_future = x2 - (0.75 * y1 + 0.25 * y2)
# dist_future = np.linalg.norm(dist_future, axis=1)
# min_dists = np.minimum(dist_future, min_dists)
min_dists = min_dists - radius
return min_dists, if_collide
# sampling vector version
# incr = 0.25 #np.min((0.25, delta_t))
# T = np.linspace(0, delta_t, num=np.ceil((delta_t+0.0001) / incr))
# for t in T:
# dist_vec = agent_vels * t + np.matlib.repmat(agent_state[0:2] - \
# (other_agent_state[0:2] + other_agent_v * t), num_actions, 1)
# if_collide_pt = np.linalg.norm(dist_vec, axis=1) < radius
# if_collide = np.logical_or(if_collide, if_collide_pt)
# sampling loop version
# for i in range(num_actions):
# t = 0.0
# while t <= delta_t:
# agent_pos = agent_state[0:2] + agent_vels[i,:] * t
# other_agent_pos = other_agent_state[0:2] + other_agent_v * t
# if self.if_pos_collide(agent_pos, other_agent_pos, radius):
# if_collide[i] = True
# break
# t += np.min((incr, delta_t))
# return if_collide
# check collision between 2 pairs of initial and end states
# def if_state_pairs_collide(self, agent_state, agent_next_state, \
# other_agent_state, other_agent_next_state):
# radius = agent_state[8] + other_agent_state[8] + self.radius_buffer
# num_incr = 9
# agent1_incr = (agent_next_state[0:2] - agent_state[0:2]) / float(num_incr)
# agent2_incr = (other_agent_next_state[0:2] - other_agent_state[0:2]) / float(num_incr)
# for i in range(num_incr + 1):
# agent_pos = agent_state[0:2] + agent1_incr * i
# other_agent_pos = other_agent_state[0:2] + agent2_incr * i
# if self.if_pos_collide(agent_pos, other_agent_pos, radius):
# return True
# return False
def if_pos_collide(self, agent1_pos, agent2_pos, radius):
if np.linalg.norm(agent1_pos - agent2_pos) < radius:
return True
return False
# query state values from the neural network
# don't consider terminal state
# (expect the neural network to have learned values of the terminal states)
def find_states_values(self, agent_states, other_agents_state):
# print 'agent_states in find_states_values', agent_states
# print 'other_agent_state', other_agent_state
if agent_states.ndim == 1:
ref_prll, ref_orth, state_nn = \
pedData.rawState_2_agentCentricState( \
agent_states, other_agents_state, self.num_agents)
# try upper bounding with max
value = np.squeeze(self.nn.make_prediction_raw(state_nn).clip(min=-0.25, max=1.0))
# print 'state_nn', state_nn
# print 'before, value', value
upper_bnd = GAMMA ** (state_nn[0] / DT_NORMAL)
value = min(upper_bnd, value)
# print 'after, value', value
return value
else:
num_states = agent_states.shape[0]
# states_nn = np.zeros((num_states, 14))
# for i in range(num_states):
# ref_prll, ref_orth, states_nn[i,:] = \
# pedData.rawState_2_agentCentricState( \
# agent_states[i,:], other_agent_state)
ref_prll_vec, ref_orth_vec, states_nn = \
pedData.rawStates_2_agentCentricStates(\
agent_states, other_agents_state, self.num_agents)
# print 'states_nn[0,:]', states_nn[0,:]
values = np.squeeze(self.nn.make_prediction_raw(states_nn).clip(min=-0.25, max=1.0))
# print 'states_nn', states_nn
# print 'before, values', values
upper_bnd = GAMMA ** (states_nn[:,0] / DT_NORMAL)
# print 'upper_bnd', upper_bnd
# print 'values', values
values = np.minimum(upper_bnd, values)
# print 'values[-1]', values
# raw_input()
return values
def find_agent_next_state(self, agent_state, other_agents_state, \
other_agents_action, dt):
action_theta = self.find_next_action(agent_state, \
other_agents_state, other_agents_action)
next_state = self.update_state(agent_state, action_theta, dt)
return next_state
def find_agent_next_rand_state(self, agent_state, other_agents_state, random_action_theta, dt):
# action_theta = self.find_rand_action(agent_state, other_agent_state)
next_state = self.update_state(agent_state, random_action_theta, dt)
return next_state
def testcase_2_agentStates(self, test_case, ifRandSpeed=False, ifRandHeading=False):
assert(test_case[0, 5] > 0.1 or ifRandSpeed==True)
num_agents = len(test_case)
agents_states = []
for i in range(num_agents):
agent_state = np.zeros((10,));
agent_state[0:2] = test_case[i,0:2]; agent_state[6:8] = test_case[i,2:4];
agent_state[9] = 0.0
if ifRandSpeed == True:
agent_state[5] = np.random.rand() * (1.5-0.1) + 0.1
agent_state[8] = 0.3 + np.random.uniform(0,0.2)
else:
agent_state[5] = test_case[i,4]
agent_state[8] = test_case[i,5]
# assume originally facing the goal direction
if ifRandHeading == True:
agent_state[4] = np.random.rand() * 2 * np.pi - np.pi
else:
agent_state[4] = np.arctan2(test_case[i,3]-test_case[i,1], \
test_case[i,2]-test_case[i,0])
# if agent is not stationary, assume it's travelling along its initially heading
if np.linalg.norm(agent_state[0:2] - agent_state[6:8]) > EPS:
heading_dir = np.array([np.cos(agent_state[4]), np.sin(agent_state[4])])
# agent_state[2:4] = agent_state[5] * heading_dir
try:
assert(agent_state[5] > 0.05)
except:
print(agent_state)
print(test_case)
assert(0)
agents_states.append(agent_state)
return agents_states
# generate a trajectory using the current value neural network
# test_case: [ [start_x, start_y, end_x, end_y, desired_speed, radius],
# [start_x, start_y, end_x, end_y, desired_speed, radius] ]
# rl_epsilon: explore a new action with rl_epsilon probability
# ifRandSpeed: whether generate random [desired_speed and radius] for test case
# figure_name: name of figure, "no_plot" for no figure
def generate_traj(self, test_case, rl_epsilon=0.0, \
ifRandSpeed=False, figure_name=None, stopOnCollision=True, ifRandHeading=False, ifNonCoop=False):
time_start = time.time()
num_agents = test_case.shape[0]
time_vec = []
dt_vec = []
traj_raw_multi = []
agent_num_thres = self.num_agents / 4.0
if_non_coop_vec = np.zeros((num_agents,), dtype=bool)
if ifNonCoop:
if_non_coop_vec = np.random.randint(2, size=(num_agents,))
for i in range(num_agents):
traj_raw_multi.append([])
time_to_complete = np.zeros((num_agents,))
agents_dt = np.ones((num_agents,))
if_completed_vec = np.zeros((num_agents,), dtype=bool)
time_past_one_ind = 0 # ind of time one second ago in time_vec
filtered_actions_theta = [None] * num_agents
filtered_actions_theta_close = [None] * num_agents
filtered_actions_theta_far = [None] * num_agents
dist_mat = np.zeros((num_agents, num_agents))
# initialize
agents_states = self.testcase_2_agentStates(test_case, ifRandSpeed, ifRandHeading)
next_agents_states = copy.deepcopy(agents_states)
agents_speeds = np.zeros((num_agents,))
max_t_vec = np.zeros((num_agents,))
for i in range(num_agents):
agents_speeds[i] = agents_states[i][5]
max_t_vec[i] = 3.0 * (np.linalg.norm(test_case[i, 2:4] - test_case[i, 0:2]) / agents_speeds[i])
max_t = np.amax(max_t_vec)
max_t = 0.2 ###################### delete later
dt_nominal = 0.1
t = 0
# for epsilon random action
rand_action_time_thres = 0.5
if_agents_rand_action = np.zeros((num_agents,), dtype=bool)
agents_rand_action_duration = np.zeros((num_agents, ))
agents_rand_action_theta = np.zeros((num_agents,2))
while True:
# append to stats
time_vec.append(t)
for i in range(num_agents):
traj_raw_multi[i].append(agents_states[i].copy())
# compute if completed and dt
# dt_nominal = 0.1
for i in range(num_agents):
dist_2_goal = np.linalg.norm(agents_states[i][0:2] - agents_states[i][6:8])
if_completed_vec[i] = dist_2_goal < DIST_2_GOAL_THRES
agents_dt[i] = dist_2_goal/agents_speeds[i]
if if_completed_vec[i]:
agents_states[i][2:4] = 0
agents_dt[i] = 1
dt = min(dt_nominal,np.amin(agents_dt))
t += dt
dt_vec.append(dt)
# collision (TODO)
for i in range(num_agents):
for j in range(i):
dist_mat[i,j] = np.linalg.norm(agents_states[i][0:2] - agents_states[j][0:2]) - \
agents_states[i][8] - agents_states[j][8]
dist_mat[j,i] = dist_mat[i,j]
min_sepDist = np.min(dist_mat)
if stopOnCollision and min_sepDist < 0:
break
# time too long, probably diverged
if t > max_t:
break
# completed
if np.all(if_completed_vec):
break
# idling, not reaching goal (TODO)
# filter velocities
# while time_vec[-1] - time_vec[time_past_one_ind] > 0.45:
while t - time_vec[time_past_one_ind] > 0.45:
time_past_one_ind += 1
dt_past_vec = np.asarray(dt_vec[time_past_one_ind:])
for i in range(num_agents):
if len(time_vec) != 0:
# if np.random.rand()>0.5 or filtered_actions_theta_close[i] is None:
past_vel = np.asarray(traj_raw_multi[i][time_past_one_ind:])[:,2:5]
# filtered_actions_theta[i] = nn_nav.filter_vel(dt_past_vec, past_vel)
filtered_actions_theta_close[i] = filter_vel(dt_past_vec, past_vel,ifClose=True)
filtered_actions_theta_far[i] = filter_vel(dt_past_vec, past_vel,ifClose=False)
# propagate forward in time
rl_random = np.random.rand()
for i in range(num_agents):
agent_state = agents_states[i]
if not if_completed_vec[i]:
other_agents_states = [x for j, x in enumerate(agents_states) if j!=i]
# other_agents_actions = [x for j, x in enumerate(filtered_actions_theta) if i!=j]
other_agents_actions = []
for j in range(num_agents):
if j != i:
if dist_mat[i,j] < 0.5:
other_agents_actions.append(filtered_actions_theta_close[j])
else:
other_agents_actions.append(filtered_actions_theta_far[j])
# non-cooperative (for plotting)
if i > 0 and if_non_coop_vec[i] == True:
desired_velocity = self.computePrefVel(agent_state[0:2], \
agent_state[6:8], agent_state[5])
next_agents_states[i] = agent_state.copy()
next_agents_states[i][2:4] = desired_velocity
next_agents_states[i][0:2] += dt * agent_state[2:4]
next_agents_states[i][4] = np.arctan2(desired_velocity[1], desired_velocity[0])
else:
# cooperative
if if_agents_rand_action[i] == False and rl_random < rl_epsilon \
and np.linalg.norm(agent_state[0:2] - agent_state[6:8]) > 1.0 and i < agent_num_thres: # only first agent freeze
if_agents_rand_action[i] = True
agents_rand_action_duration[i] = 0.0
if i < agent_num_thres:
agents_rand_action_theta[i] = self.find_rand_action(agent_state, other_agents_states, \
other_agents_action = other_agents_actions, isBestAction=False)
else:
agents_rand_action_theta[i] = self.find_rand_action(agent_state, other_agents_states, \
other_agents_action = other_agents_actions, isBestAction=True)
elif agents_rand_action_duration[i] > rand_action_time_thres:
if_agents_rand_action[i] = False
if if_agents_rand_action[i] == True:
next_agents_states[i] = self.find_agent_next_rand_state(\
agent_state, other_agents_states, agents_rand_action_theta[i], dt)
agents_rand_action_duration[i] += dt
else:
next_agents_states[i] = self.find_agent_next_state(\
agent_state, other_agents_states, other_agents_actions, dt)
time_to_complete[i] = t
else:
next_agents_states[i] = agent_state.copy()
agents_states = copy.deepcopy(next_agents_states)
# add time to trajectory list
traj_raw_multi = [time_vec] + traj_raw_multi
for i, vec in enumerate(traj_raw_multi):
traj_raw_multi[i] = np.asarray(vec)
# print 'traj_raw', traj_raw
if figure_name != "no_plot":
title_string = 'total time: %f' % (np.sum(time_to_complete))
print('time_to_complete', time_to_complete)
pedData.plot_traj_raw_multi(traj_raw_multi, title_string, figure_name)
print('finished generating trajectory with %d points in %fs' % \
(len(traj_raw_multi[0]), time.time()-time_start))
return traj_raw_multi, time_to_complete
# find preferred velocity vector (toward goal at desired(nominal) speed)
def computePrefVel(self, cur_state, goal, nom_speed):
pref_vec = goal - cur_state
# print 'goal, cur_state', goal, cur_state
# print 'nom_speed', nom_speed
# print 'pref_vec before normalizing', pref_vec
pref_speed = np.linalg.norm(pref_vec)
if pref_speed > 0:
pref_vec = pref_vec / pref_speed * nom_speed
else:
pref_vec = np.array([EPS, 0])
# print 'pref_vec', pref_vec
return pref_vec
# compute agent_centric_states
def find_bad_inds(self, agent_centric_states):
# closest other agent is the first agent
# dist_2_others (see README.txt)
agent_vel = agent_centric_states[:,4:6]
agent_speed = np.linalg.norm(agent_vel, axis=1)
agent_vx = agent_vel[:,0]
agent_vy = agent_vel[:,1]
agent_heading = agent_centric_states[:,3]
dist_2_goal = agent_centric_states[:,0]
other_pos = agent_centric_states[:,9:11]
other_px = other_pos[:,0]
other_py = other_pos[:,1]
other_vel = agent_centric_states[:,7:9]
other_vx = other_vel[:,0]
other_vy = other_vel[:,1]
other_speed = np.linalg.norm(other_vel, axis=1)
other_heading = np.arctan2(other_vy, other_vx)
rel_vel = agent_vel - other_vel
rel_vel_angle = np.arctan2(rel_vel[:,1], rel_vel[:,0])
rel_pos_angle = np.arctan2(0-other_py, 0-other_px)
rot_angle = find_angle_diff(rel_vel_angle, rel_pos_angle)
bad_inds_oppo = []
bad_inds_same = []
bad_inds_tangent = []
if self.passing_side == 'right':
# opposite direction
# same direction
# faster
bad_inds_same_fast = np.where( (dist_2_goal > 1) & (other_speed > EPS) & \
(agent_speed > EPS) & \
(agent_speed > other_speed + 0.1) & \
(other_py > -0.5) & (other_py < 2) & \
(other_px > 0) & (other_px < 3) & \
(agent_heading < 0) & \
(abs(other_heading) < np.pi/6.0))[0]
# slower
bad_inds_same_slow = np.where( (dist_2_goal > 1) & (other_speed > EPS) & \
(agent_speed > EPS) & \
(agent_speed < other_speed - 0.1) & \
(other_py < 0) & (other_py > -2) & \
(other_px < 0) & (other_px > -3) & \
(agent_heading > 0) & \
(abs(other_heading) < np.pi/6.0))[0]
bad_inds_same = np.union1d(bad_inds_same_fast, bad_inds_same_slow)
# opposite direction
bad_inds_oppo = np.where( (dist_2_goal > 1) & (other_speed > EPS) & \
(agent_speed > EPS) & \
(other_py < 0) & (other_py > -2) & \
(other_px > 0) & (other_px < 5) & \
(agent_heading > EPS) & \
(other_heading < -5.0*np.pi/6.0))[0]
# tangent direction
agent_speed = agent_centric_states[0,1]
other_rel_dist = np.linalg.norm(other_pos, axis=1)
bad_inds_tangent = np.where( (dist_2_goal > 1) & (other_speed > EPS) & \
(agent_speed > EPS) & \
# (other_py < 3 * agent_speed) & (other_py > -3 * agent_speed) & \
# (other_px > 0 * agent_speed) & (other_px < 3 * agent_speed) & \
(other_px > 0) & (other_rel_dist < 3) & \
(rot_angle < 0) & \
(abs(other_heading) > np.pi/4.0) & \
(agent_speed > other_speed - 0.2)) [0]
# & (abs(other_heading) < 5.0* np.pi/6.0) )[0]
elif self.passing_side == 'left':
# opposite direction
# same direction
# faster
bad_inds_same_fast = np.where( (dist_2_goal > 1) & (other_speed > EPS) & \
(agent_speed > EPS) & \
(agent_speed > other_speed + 0.1) & \
(other_py > -2) & (other_py < 0.5) & \
(other_px > 0) & (other_px < 3) & \
(agent_heading > 0) & \
(abs(other_heading) < np.pi/6.0))[0]
# slower
bad_inds_same_slow = np.where( (dist_2_goal > 1) & (other_speed > EPS) & \
(agent_speed > EPS) & \
(agent_speed < other_speed - 0.1) & \
(other_py < 2) & (other_py > 0) & \
(other_px < 0) & (other_px > -3) & \
(agent_heading > 0) & \
(abs(other_heading) < np.pi/6.0))[0]
bad_inds_same = np.union1d(bad_inds_same_fast, bad_inds_same_slow)
# opposite direction
bad_inds_oppo = np.where( (dist_2_goal > 1) & (other_speed > EPS) & \
(agent_speed > EPS) & \
(other_py < 2) & (other_py > 0) & \
(other_px > 0) & (other_px < 5) & \
(agent_heading < EPS) & \
(other_heading > 5.0*np.pi/6.0))[0]
# tangent direction
agent_speed = agent_centric_states[0,1]
other_rel_dist = np.linalg.norm(other_pos, axis=1)
bad_inds_tangent = np.where( (dist_2_goal > 1) & (other_speed > EPS) & \
(agent_speed > EPS) & \
# (other_py < 3* agent_speed) & (other_py > -3* agent_speed) & \
# (other_px > 0* agent_speed) & (other_px < 3* agent_speed) & \
(other_px > 0) & (other_rel_dist < 3) & \
(rot_angle > 0) & \
(abs(other_heading) > np.pi/4.0) & \
(agent_speed > other_speed - 0.2))[0]
# (abs(other_heading) < 5.0* np.pi/6.0) )[0]
# return [], bad_inds_same, []
return bad_inds_oppo, bad_inds_same, bad_inds_tangent
# speed filter
# input: past velocity in (x,y) at time_past
# output: filtered velocity in (speed, theta)
def filter_vel(dt_vec, agent_past_vel, ifClose=False):
# if np.sum(dt_vec) == 0:
# print 'dt_vec', dt_vec
# print 'agent_past_vel_xy', agent_past_vel_xy
# assert(0)
# check if there is oscillation
# num_pts = agent_past_vel.shape[0]
# if_oscillate = False
# if num_pts > 1:
# angle_diff_vec = find_angle_diff( agent_past_vel[1:,2], \
# agent_past_vel[0:-1,2] )
# if np.all(angle_diff_vec>-EPS) or np.all(angle_diff_vec<EPS):
# speed = np.linalg.norm(agent_past_vel[-1,0:2])
# angle = agent_past_vel[-1,2]
# return np.array([speed, angle])
if True: #ifClose == False:
agent_past_vel_xy = agent_past_vel[-2:,0:2]
agent_heading = agent_past_vel[-2:,2]
dt_vec = dt_vec[-2:]
else:
length = min(2, len(dt_vec)-1)
# length = len(dt_vec)-1
agent_past_vel_xy = agent_past_vel[-length:,0:2]
agent_heading = agent_past_vel[-length:,2]
dt_vec = dt_vec[-length:]
# print 'len(dt_vec)', len(dt_vec)
average_x = np.sum(dt_vec * agent_past_vel_xy[:,0]) / np.sum(dt_vec)
average_y = np.sum(dt_vec * agent_past_vel_xy[:,1]) / np.sum(dt_vec)
speeds = np.linalg.norm(agent_past_vel_xy, axis=1)
# speed = np.mean(speeds)
# speed = speeds[-1]
# speed = np.linalg.norm(agent_past_vel_xy[-1,:])
# assert(speed == speed1)
speed = np.linalg.norm(np.array([average_x, average_y]))
angle = np.arctan2(average_y, average_x)
return np.array([speed, angle])
# load NN_navigation
def load_NN_navigation_value(file_dir, num_agents, mode, passing_side, filename=None, ifPrint=True):
''' initializing neural network '''
try:
# Messed up from transfer into this directory structure. I don't plan to ever change these settings.
# nn_training_param_ = pickle.load(open(file_dir+"/../../pickle_files/multi/nn_training_param.p", "rb"))
d = {'sgd_step_size': 0.5, 'sgd_step_c': 0.1, 'reg_lambda': 0.001, 'w_scale': 0.1, 'sgd_step_epsilon': 0.1, 'nb_iter': 1000, 'sgd_stepsize_mode': 'fixed_decay', 'sgd_batch_size': 500}
nn_training_param = NN_training_param(d['sgd_step_size'],d['reg_lambda'], d['nb_iter'], d['sgd_batch_size'], d['w_scale'])
assert(0)
except AssertionError:
sgd_step_size = 10.0/20.0
reg_lambda = 1.0/1000.0
nb_iter = 1000
sgd_batch_size = 500
w_scale = 0.1
sgd_stepsize_mode = 'training data'
sgd_step_c = 0.1
sgd_step_epsilon = 0.1
nn_training_param = nn.NN_training_param(sgd_step_size, reg_lambda, nb_iter, sgd_batch_size, w_scale)
pickle.dump(nn_training_param, open(file_dir+"/../../pickle_files/multi/nn_training_param.p", "wb"))
nn_navigation = NN_navigation_value(num_agents, nn_training_param, mode=mode, passing_side=passing_side)
''' load nn_navigation '''
if filename is None:
nn_filename = file_dir + "/../../pickle_files/multi/%d_agents_nn_regr_value_data.p" % num_agents
print('loading NN trained on cadrl at ' + nn_filename)
else:
nn_filename = file_dir + "/../../pickle_files/multi/" + mode + "_" + passing_side + \
"/RL_selfplay/" + filename
if ifPrint:
print('loading NN trained on RL self-play ' + nn_filename)
# loading neural network
try:
# assert (0)
nn_navigation.nn.load_neural_network(nn_filename)
# train neural network for nn_navigation
except AssertionError:
print('failed to load navigation value network', nn_filename)
assert(0)
# load data
dataset_ped_train, dataset_ped_test, dataset_ped_test_vel = load_ped_data(file_dir, num_agents)
# train neural network
nn_navigation.train_neural_network(dataset_ped_train, dataset_ped_test, dataset_ped_test_vel)
nn_navigation.nn.save_neural_network(nn_filename)
print('saved nn_navigation')
return nn_navigation
def load_ped_data(file_dir, num_agents):
try:
# print 'num_agents', num_agents
dataset_ped_train = pickle.load(open(file_dir+\
"/../../pickle_files/multi/%d_agents_dataset_value_train.p"%num_agents, "rb"))
# print 'radius', dataset_ped_train[0][:,8:10]
dataset_ped_test = pickle.load(open(file_dir+\
"/../../pickle_files/multi/%d_agents_dataset_value_test.p"%num_agents, "rb"))
# print 'loaded dataset'
num_train_pts = dataset_ped_train[0].shape[0]
num_test_pts = dataset_ped_test[0].shape[0]
# print 'here'
dataset_ped_test_vel = pickle.load(open(file_dir+\
"/../../pickle_files/multi/%d_agents_dataset_regr_test.p"%num_agents, "rb"))
dataset_ped_test_vel = dataset_ped_test_vel[1]
print('dataset contains %d pts, training set has %d pts, test set has %d pts' % \
(num_train_pts+num_test_pts, num_train_pts, num_test_pts))
except AssertionError:
print('pickle file does not exist, exiting')
assert(0)
return dataset_ped_train, dataset_ped_test, dataset_ped_test_vel
if __name__ == '__main__':
print('hello world from nn_navigation_value_multi.py')
file_dir = os.path.dirname(os.path.realpath(__file__))
plt.rcParams.update({'font.size': 18})
''' load nn_navigation_value and train if pickle file doesnot exist '''
num_agents = 4
mode = 'no_constr'
# mode = 'rotate_constr'
nn_navigation = load_NN_navigation_value(file_dir, num_agents, mode, 'none')
''' make prediction '''
dataset_ped_train, dataset_ped_test, dataset_ped_test_vel = load_ped_data(file_dir, num_agents)
scores = nn_navigation.nn.make_prediction_raw(dataset_ped_test[0])
# nn_navigation.plot_ped_testCase_rand(dataset_ped_test[0], scores, 'testing')
# nn_navigation.plot_ped_testCase_rand(dataset_ped_test[0], scores, 'testing')
# nn_navigation.plot_ped_testCase_rand(dataset_ped_test[0], scores, 'testing')
test_case = np.array([[-3.0, 0.0, 3.0, 0.0, 1.0, 0.4],\
[0.0, 3.0, 0.0, -3.0, 1.0, 0.4], \
[0.0, -3.0, 0.0, 3.0, 1.0, 0.4], \
[3.0, 0.0, -3.0, 0.0, 1.0, 0.4]])
nn_navigation.generate_traj(test_case[0:num_agents], ifRandSpeed=False)
test_case = gen_tc.generate_rand_test_case_multi(num_agents, 4.0, np.array([0.5,1.2]), np.array([0.3, 0.5]))
nn_navigation.generate_traj(test_case)
plt.show()
| [
"matplotlib.pyplot.title",
"numpy.arctan2",
"numpy.sum",
"numpy.amin",
"numpy.argmax",
"matplotlib.pyplot.clf",
"numpy.ones",
"numpy.clip",
"numpy.argpartition",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.linalg.norm",
"numpy.random.randint",
"matplotlib.pyplot.arrow",
"sys.path.app... | [((33, 70), 'sys.path.append', 'sys.path.append', (['"""../neural_networks"""'], {}), "('../neural_networks')\n", (48, 70), False, 'import sys\n'), ((1584, 1637), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', 'num_angles'], {'endpoint': '(False)'}), '(0, 2 * np.pi, num_angles, endpoint=False)\n', (1595, 1637), True, 'import numpy as np\n'), ((1651, 1706), 'numpy.linspace', 'np.linspace', (['nom_speed', '(0.0)', 'num_speeds'], {'endpoint': '(False)'}), '(nom_speed, 0.0, num_speeds, endpoint=False)\n', (1662, 1706), True, 'import numpy as np\n'), ((1733, 1760), 'numpy.meshgrid', 'np.meshgrid', (['angles', 'speeds'], {}), '(angles, speeds)\n', (1744, 1760), True, 'import numpy as np\n'), ((2435, 2504), 'numpy.linspace', 'np.linspace', (['(-TURNING_LIMIT)', 'TURNING_LIMIT', 'num_angles'], {'endpoint': '(True)'}), '(-TURNING_LIMIT, TURNING_LIMIT, num_angles, endpoint=True)\n', (2446, 2504), True, 'import numpy as np\n'), ((2518, 2573), 'numpy.linspace', 'np.linspace', (['nom_speed', '(0.0)', 'num_speeds'], {'endpoint': '(False)'}), '(nom_speed, 0.0, num_speeds, endpoint=False)\n', (2529, 2573), True, 'import numpy as np\n'), ((2600, 2627), 'numpy.meshgrid', 'np.meshgrid', (['angles', 'speeds'], {}), '(angles, speeds)\n', (2611, 2627), True, 'import numpy as np\n'), ((5425, 5475), 'gym_collision_avoidance.envs.policies.CADRL.scripts.neural_networks.multiagent_network_param.Multiagent_network_param', 'Multiagent_network_param', (['layers_info', 'layers_type'], {}), '(layers_info, layers_type)\n', (5449, 5475), False, 'from gym_collision_avoidance.envs.policies.CADRL.scripts.neural_networks.multiagent_network_param import Multiagent_network_param\n'), ((5847, 5870), 'numpy.zeros', 'np.zeros', (['(num_states,)'], {}), '((num_states,))\n', (5855, 5870), True, 'import numpy as np\n'), ((5936, 5959), 'numpy.zeros', 'np.zeros', (['(num_states,)'], {}), '((num_states,))\n', (5944, 5959), True, 'import numpy as np\n'), ((94000, 94041), 'numpy.linalg.norm', 'np.linalg.norm', (['agent_past_vel_xy'], {'axis': '(1)'}), '(agent_past_vel_xy, axis=1)\n', (94014, 94041), True, 'import numpy as np\n'), ((94254, 94286), 'numpy.arctan2', 'np.arctan2', (['average_y', 'average_x'], {}), '(average_y, average_x)\n', (94264, 94286), True, 'import numpy as np\n'), ((94299, 94323), 'numpy.array', 'np.array', (['[speed, angle]'], {}), '([speed, angle])\n', (94307, 94323), True, 'import numpy as np\n'), ((98069, 98107), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 18}"], {}), "({'font.size': 18})\n", (98088, 98107), True, 'import matplotlib.pyplot as plt\n'), ((98806, 98952), 'numpy.array', 'np.array', (['[[-3.0, 0.0, 3.0, 0.0, 1.0, 0.4], [0.0, 3.0, 0.0, -3.0, 1.0, 0.4], [0.0, -\n 3.0, 0.0, 3.0, 1.0, 0.4], [3.0, 0.0, -3.0, 0.0, 1.0, 0.4]]'], {}), '([[-3.0, 0.0, 3.0, 0.0, 1.0, 0.4], [0.0, 3.0, 0.0, -3.0, 1.0, 0.4],\n [0.0, -3.0, 0.0, 3.0, 1.0, 0.4], [3.0, 0.0, -3.0, 0.0, 1.0, 0.4]])\n', (98814, 98952), True, 'import numpy as np\n'), ((99311, 99321), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (99319, 99321), True, 'import matplotlib.pyplot as plt\n'), ((2093, 2114), 'numpy.cos', 'np.cos', (['actions[:, 1]'], {}), '(actions[:, 1])\n', (2099, 2114), True, 'import numpy as np\n'), ((2137, 2158), 'numpy.sin', 'np.sin', (['actions[:, 1]'], {}), '(actions[:, 1])\n', (2143, 2158), True, 'import numpy as np\n'), ((4001, 4036), 'numpy.array', 'np.array', (['[[1, 7], [num_others, 8]]'], {}), '([[1, 7], [num_others, 8]])\n', (4009, 4036), True, 'import numpy as np\n'), ((4090, 4127), 'numpy.array', 'np.array', (['[[1, 50], [num_others, 50]]'], {}), '([[1, 50], [num_others, 50]])\n', (4098, 4127), True, 'import numpy as np\n'), ((4180, 4217), 'numpy.array', 'np.array', (['[[1, 50], [num_others, 50]]'], {}), '([[1, 50], [num_others, 50]])\n', (4188, 4217), True, 'import numpy as np\n'), ((4270, 4298), 'numpy.array', 'np.array', (['[[1, 50], [1, 50]]'], {}), '([[1, 50], [1, 50]])\n', (4278, 4298), True, 'import numpy as np\n'), ((4352, 4371), 'numpy.array', 'np.array', (['[[1, 50]]'], {}), '([[1, 50]])\n', (4360, 4371), True, 'import numpy as np\n'), ((4576, 4594), 'numpy.array', 'np.array', (['[[1, 1]]'], {}), '([[1, 1]])\n', (4584, 4594), True, 'import numpy as np\n'), ((6729, 6781), 'gym_collision_avoidance.envs.policies.CADRL.scripts.neural_networks.neural_network_regr_multi.Neural_network_regr_multi', 'nn.Neural_network_regr_multi', (['self.nn_training_param'], {}), '(self.nn_training_param)\n', (6757, 6781), True, 'from gym_collision_avoidance.envs.policies.CADRL.scripts.neural_networks import neural_network_regr_multi as nn\n'), ((8340, 8372), 'numpy.random.randint', 'np.random.randint', (['(0)', 'X.shape[0]'], {}), '(0, X.shape[0])\n', (8357, 8372), True, 'import numpy as np\n'), ((9324, 9372), 'gym_collision_avoidance.envs.policies.CADRL.scripts.multi.pedData_processing_multi.agentCentricState_2_rawState_noRotate', 'pedData.agentCentricState_2_rawState_noRotate', (['x'], {}), '(x)\n', (9369, 9372), True, 'from gym_collision_avoidance.envs.policies.CADRL.scripts.multi import pedData_processing_multi as pedData\n'), ((9709, 9778), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(0.0, 0.0)'], {'radius': 'a_s[8]', 'fc': '"""w"""', 'ec': 'plt_colors_local[0]'}), "((0.0, 0.0), radius=a_s[8], fc='w', ec=plt_colors_local[0])\n", (9719, 9778), True, 'import matplotlib.pyplot as plt\n'), ((9830, 9904), 'matplotlib.pyplot.plot', 'plt.plot', (['a_s[6]', 'a_s[7]'], {'c': 'plt_colors_local[0]', 'marker': '"""*"""', 'markersize': '(20)'}), "(a_s[6], a_s[7], c=plt_colors_local[0], marker='*', markersize=20)\n", (9838, 9904), True, 'import matplotlib.pyplot as plt\n'), ((9934, 10020), 'matplotlib.pyplot.arrow', 'plt.arrow', (['(0.0)', '(0.0)', 'a_s[5]', '(0.0)'], {'fc': '"""m"""', 'ec': '"""m"""', 'head_width': '(0.05)', 'head_length': '(0.1)'}), "(0.0, 0.0, a_s[5], 0.0, fc='m', ec='m', head_width=0.05,\n head_length=0.1)\n", (9943, 10020), True, 'import matplotlib.pyplot as plt\n'), ((10037, 10090), 'matplotlib.pyplot.plot', 'plt.plot', (['[0.0, a_s[5]]', '[0.0, 0.0]', '"""m"""'], {'linewidth': '(2)'}), "([0.0, a_s[5]], [0.0, 0.0], 'm', linewidth=2)\n", (10045, 10090), True, 'import matplotlib.pyplot as plt\n'), ((10123, 10212), 'matplotlib.pyplot.arrow', 'plt.arrow', (['(0.0)', '(0.0)', 'a_s[2]', 'a_s[3]'], {'fc': '"""k"""', 'ec': '"""k"""', 'head_width': '(0.05)', 'head_length': '(0.1)'}), "(0.0, 0.0, a_s[2], a_s[3], fc='k', ec='k', head_width=0.05,\n head_length=0.1)\n", (10132, 10212), True, 'import matplotlib.pyplot as plt\n'), ((10228, 10284), 'matplotlib.pyplot.plot', 'plt.plot', (['[0.0, a_s[2]]', '[0.0, a_s[3]]', '"""k"""'], {'linewidth': '(2)'}), "([0.0, a_s[2]], [0.0, a_s[3]], 'k', linewidth=2)\n", (10236, 10284), True, 'import matplotlib.pyplot as plt\n'), ((11416, 11503), 'matplotlib.pyplot.arrow', 'plt.arrow', (['(0.0)', '(0.0)', 'x_tmp', 'y_tmp'], {'fc': '"""g"""', 'ec': '"""g"""', 'head_width': '(0.05)', 'head_length': '(0.1)'}), "(0.0, 0.0, x_tmp, y_tmp, fc='g', ec='g', head_width=0.05,\n head_length=0.1)\n", (11425, 11503), True, 'import matplotlib.pyplot as plt\n'), ((11531, 11586), 'matplotlib.pyplot.plot', 'plt.plot', (['[0.0, x_tmp]', '[0.0, y_tmp]', '"""g-"""'], {'linewidth': '(2)'}), "([0.0, x_tmp], [0.0, y_tmp], 'g-', linewidth=2)\n", (11539, 11586), True, 'import matplotlib.pyplot as plt\n'), ((11800, 11819), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x (m)"""'], {}), "('x (m)')\n", (11810, 11819), True, 'import matplotlib.pyplot as plt\n'), ((11828, 11847), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y (m)"""'], {}), "('y (m)')\n", (11838, 11847), True, 'import matplotlib.pyplot as plt\n'), ((12175, 12202), 'numpy.min', 'np.min', (['dist_2_other_agents'], {}), '(dist_2_other_agents)\n', (12181, 12202), True, 'import numpy as np\n'), ((12349, 12459), 'matplotlib.pyplot.title', 'plt.title', (['(title_string + """\n pref_speed: %.3f, min_dist_2_others: %.3f""" % (a_s[5],\n dist_2_other_agent))'], {}), '(title_string + """\n pref_speed: %.3f, min_dist_2_others: %.3f""" %\n (a_s[5], dist_2_other_agent))\n', (12358, 12459), True, 'import matplotlib.pyplot as plt\n'), ((12817, 12953), 'matplotlib.pyplot.legend', 'plt.legend', (['[vel_cur, vel_pref, vel_nn]', "['${heading}$', '$v_{pref}$', '$v_{select}$']"], {'loc': '"""lower left"""', 'fontsize': '(30)', 'frameon': '(False)'}), "([vel_cur, vel_pref, vel_nn], ['${heading}$', '$v_{pref}$',\n '$v_{select}$'], loc='lower left', fontsize=30, frameon=False)\n", (12827, 12953), True, 'import matplotlib.pyplot as plt\n'), ((13063, 13097), 'numpy.array', 'np.array', (['(xlim[0], xlim[1] + 0.5)'], {}), '((xlim[0], xlim[1] + 0.5))\n', (13071, 13097), True, 'import numpy as np\n'), ((15996, 16011), 'numpy.amax', 'np.amax', (['plot_z'], {}), '(plot_z)\n', (16003, 16011), True, 'import numpy as np\n'), ((16026, 16043), 'numpy.argmax', 'np.argmax', (['plot_z'], {}), '(plot_z)\n', (16035, 16043), True, 'import numpy as np\n'), ((17035, 17145), 'matplotlib.pyplot.tripcolor', 'plt.tripcolor', (['plot_x', 'plot_y', 'plot_z'], {'shading': '"""flat"""', 'cmap': 'plt.cm.rainbow', 'vmin': 'color_min', 'vmax': 'color_max'}), "(plot_x, plot_y, plot_z, shading='flat', cmap=plt.cm.rainbow,\n vmin=color_min, vmax=color_max)\n", (17048, 17145), True, 'import matplotlib.pyplot as plt\n'), ((17496, 17519), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""v_x (m/s)"""'], {}), "('v_x (m/s)')\n", (17506, 17519), True, 'import matplotlib.pyplot as plt\n'), ((17528, 17551), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""v_y (m/s)"""'], {}), "('v_y (m/s)')\n", (17538, 17551), True, 'import matplotlib.pyplot as plt\n'), ((17567, 17581), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (17579, 17581), True, 'import matplotlib.pyplot as plt\n'), ((18071, 18081), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (18079, 18081), True, 'import matplotlib.pyplot as plt\n'), ((18090, 18107), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.0001)'], {}), '(0.0001)\n', (18099, 18107), True, 'import matplotlib.pyplot as plt\n'), ((18337, 18353), 'numpy.zeros', 'np.zeros', (['(1, 2)'], {}), '((1, 2))\n', (18345, 18353), True, 'import numpy as np\n'), ((18770, 18805), 'numpy.matlib.repmat', 'np.matlib.repmat', (['desired_act', '(5)', '(1)'], {}), '(desired_act, 5, 1)\n', (18786, 18805), True, 'import numpy as np\n'), ((19297, 19352), 'numpy.matlib.repmat', 'np.matlib.repmat', (['tmp_action_theta', 'num_near_actions', '(1)'], {}), '(tmp_action_theta, num_near_actions, 1)\n', (19313, 19352), True, 'import numpy as np\n'), ((19382, 19442), 'numpy.linspace', 'np.linspace', (['(-np.pi / 3.0)', '(np.pi / 3.0)'], {'num': 'num_near_actions'}), '(-np.pi / 3.0, np.pi / 3.0, num=num_near_actions)\n', (19393, 19442), True, 'import numpy as np\n'), ((19843, 19942), 'numpy.vstack', 'np.vstack', (['(near_actions, near_actions_reduced, near_actions_reduced_1,\n near_actions_reduced_2)'], {}), '((near_actions, near_actions_reduced, near_actions_reduced_1,\n near_actions_reduced_2))\n', (19852, 19942), True, 'import numpy as np\n'), ((20339, 20416), 'numpy.vstack', 'np.vstack', (['(default_action_theta, desired_actions, zero_action, near_actions)'], {}), '((default_action_theta, desired_actions, zero_action, near_actions))\n', (20348, 20416), True, 'import numpy as np\n'), ((21803, 21835), 'numpy.linalg.norm', 'np.linalg.norm', (['agent_state[2:4]'], {}), '(agent_state[2:4])\n', (21817, 21835), True, 'import numpy as np\n'), ((22158, 22234), 'numpy.arctan2', 'np.arctan2', (['(agent_state[7] - agent_state[1])', '(agent_state[6] - agent_state[0])'], {}), '(agent_state[7] - agent_state[1], agent_state[6] - agent_state[0])\n', (22168, 22234), True, 'import numpy as np\n'), ((22965, 23007), 'numpy.arctan2', 'np.arctan2', (['agent_state[3]', 'agent_state[2]'], {}), '(agent_state[3], agent_state[2])\n', (22975, 23007), True, 'import numpy as np\n'), ((23676, 23895), 'numpy.array', 'np.array', (['[[0.0, limit + cur_heading], [0.0, 0.66 * limit + cur_heading], [0.0, 0.33 *\n limit + cur_heading], [0.0, -0.33 * limit + cur_heading], [0.0, -0.66 *\n limit + cur_heading], [0.0, -limit + cur_heading]]'], {}), '([[0.0, limit + cur_heading], [0.0, 0.66 * limit + cur_heading], [\n 0.0, 0.33 * limit + cur_heading], [0.0, -0.33 * limit + cur_heading], [\n 0.0, -0.66 * limit + cur_heading], [0.0, -limit + cur_heading]])\n', (23684, 23895), True, 'import numpy as np\n'), ((23994, 24029), 'numpy.vstack', 'np.vstack', (['(actions, added_actions)'], {}), '((actions, added_actions))\n', (24003, 24029), True, 'import numpy as np\n'), ((26050, 26101), 'numpy.linalg.norm', 'np.linalg.norm', (['(agent_state[0:2] - agent_state[6:8])'], {}), '(agent_state[0:2] - agent_state[6:8])\n', (26064, 26101), True, 'import numpy as np\n'), ((26238, 26255), 'numpy.all', 'np.all', (['(cost <= 0)'], {}), '(cost <= 0)\n', (26244, 26255), True, 'import numpy as np\n'), ((26830, 26881), 'numpy.linalg.norm', 'np.linalg.norm', (['(agent_state[0:2] - agent_state[6:8])'], {}), '(agent_state[0:2] - agent_state[6:8])\n', (26844, 26881), True, 'import numpy as np\n'), ((27922, 27978), 'numpy.clip', 'np.clip', (['rewards[close_inds]', '(COLLISION_COST + 0.01)', '(0.0)'], {}), '(rewards[close_inds], COLLISION_COST + 0.01, 0.0)\n', (27929, 27978), True, 'import numpy as np\n'), ((27992, 28047), 'numpy.all', 'np.all', (['(GETTING_CLOSE_RANGE - min_dists[close_inds] > 0)'], {}), '(GETTING_CLOSE_RANGE - min_dists[close_inds] > 0)\n', (27998, 28047), True, 'import numpy as np\n'), ((28433, 28464), 'numpy.zeros', 'np.zeros', (['(num_pts, num_states)'], {}), '((num_pts, num_states))\n', (28441, 28464), True, 'import numpy as np\n'), ((29179, 29282), 'gym_collision_avoidance.envs.policies.CADRL.scripts.multi.pedData_processing_multi.rawStates_2_agentCentricStates', 'pedData.rawStates_2_agentCentricStates', (['agent_next_states', 'other_agents_next_state', 'self.num_agents'], {}), '(agent_next_states,\n other_agents_next_state, self.num_agents)\n', (29217, 29282), True, 'from gym_collision_avoidance.envs.policies.CADRL.scripts.multi import pedData_processing_multi as pedData\n'), ((29664, 29715), 'numpy.linalg.norm', 'np.linalg.norm', (['(agent_state[0:2] - agent_state[6:8])'], {}), '(agent_state[0:2] - agent_state[6:8])\n', (29678, 29715), True, 'import numpy as np\n'), ((29885, 29905), 'numpy.zeros', 'np.zeros', (['(num_pts,)'], {}), '((num_pts,))\n', (29893, 29905), True, 'import numpy as np\n'), ((30593, 30618), 'numpy.linalg.norm', 'np.linalg.norm', (['action_xy'], {}), '(action_xy)\n', (30607, 30618), True, 'import numpy as np\n'), ((30800, 30840), 'numpy.array', 'np.array', (['[[action_speed, action_angle]]'], {}), '([[action_speed, action_angle]])\n', (30808, 30840), True, 'import numpy as np\n'), ((32064, 32100), 'copy.deepcopy', 'copy.deepcopy', (['other_agents_state_in'], {}), '(other_agents_state_in)\n', (32077, 32100), False, 'import copy\n'), ((34053, 34077), 'numpy.zeros', 'np.zeros', (['(num_actions,)'], {}), '((num_actions,))\n', (34061, 34077), True, 'import numpy as np\n'), ((34147, 34188), 'numpy.zeros', 'np.zeros', (['(num_actions, num_other_agents)'], {}), '((num_actions, num_other_agents))\n', (34155, 34188), True, 'import numpy as np\n'), ((34214, 34255), 'numpy.zeros', 'np.zeros', (['(num_actions, num_other_agents)'], {}), '((num_actions, num_other_agents))\n', (34222, 34255), True, 'import numpy as np\n'), ((34279, 34308), 'numpy.zeros', 'np.zeros', (['(num_other_agents,)'], {}), '((num_other_agents,))\n', (34287, 34308), True, 'import numpy as np\n'), ((34762, 34791), 'numpy.min', 'np.min', (['min_dists_mat'], {'axis': '(1)'}), '(min_dists_mat, axis=1)\n', (34768, 34791), True, 'import numpy as np\n'), ((34813, 34843), 'numpy.max', 'np.max', (['if_collide_mat'], {'axis': '(1)'}), '(if_collide_mat, axis=1)\n', (34819, 34843), True, 'import numpy as np\n'), ((34863, 34883), 'numpy.min', 'np.min', (['cur_dist_vec'], {}), '(cur_dist_vec)\n', (34869, 34883), True, 'import numpy as np\n'), ((48961, 48984), 'numpy.argmax', 'np.argmax', (['state_values'], {}), '(state_values)\n', (48970, 48984), True, 'import numpy as np\n'), ((51296, 51381), 'numpy.arctan2', 'np.arctan2', (['(diff_map_goal[1] - agent_state[1])', '(diff_map_goal[0] - agent_state[0])'], {}), '(diff_map_goal[1] - agent_state[1], diff_map_goal[0] - agent_state[0]\n )\n', (51306, 51381), True, 'import numpy as np\n'), ((55884, 55900), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (55898, 55900), True, 'import numpy as np\n'), ((57148, 57168), 'copy.deepcopy', 'copy.deepcopy', (['state'], {}), '(state)\n', (57161, 57168), False, 'import copy\n'), ((59689, 59728), 'numpy.matlib.repmat', 'np.matlib.repmat', (['state', 'num_actions', '(1)'], {}), '(state, num_actions, 1)\n', (59705, 59728), True, 'import numpy as np\n'), ((60941, 60982), 'numpy.setdiff1d', 'np.setdiff1d', (['oscillation_inds', 'zero_inds'], {}), '(oscillation_inds, zero_inds)\n', (60953, 60982), True, 'import numpy as np\n'), ((61278, 61372), 'numpy.clip', 'np.clip', (['(-next_states[oscillation_inds, 9] + angles_diff[oscillation_inds])', '(-np.pi)', 'np.pi'], {}), '(-next_states[oscillation_inds, 9] + angles_diff[oscillation_inds], \n -np.pi, np.pi)\n', (61285, 61372), True, 'import numpy as np\n'), ((64587, 64601), 'numpy.zeros', 'np.zeros', (['(2,)'], {}), '((2,))\n', (64595, 64601), True, 'import numpy as np\n'), ((64752, 64766), 'numpy.zeros', 'np.zeros', (['(2,)'], {}), '((2,))\n', (64760, 64766), True, 'import numpy as np\n'), ((65219, 65311), 'numpy.arctan2', 'np.arctan2', (['(other_agent_state[1] - agent_state[1])', '(other_agent_state[0] - agent_state[0])'], {}), '(other_agent_state[1] - agent_state[1], other_agent_state[0] -\n agent_state[0])\n', (65229, 65311), True, 'import numpy as np\n'), ((65362, 65396), 'numpy.arctan2', 'np.arctan2', (['agent_v[1]', 'agent_v[0]'], {}), '(agent_v[1], agent_v[0])\n', (65372, 65396), True, 'import numpy as np\n'), ((66294, 66317), 'numpy.linalg.norm', 'np.linalg.norm', (['(x1 - y1)'], {}), '(x1 - y1)\n', (66308, 66317), True, 'import numpy as np\n'), ((67070, 67103), 'numpy.zeros', 'np.zeros', (['num_actions'], {'dtype': 'bool'}), '(num_actions, dtype=bool)\n', (67078, 67103), True, 'import numpy as np\n'), ((67529, 67555), 'numpy.zeros', 'np.zeros', (['(num_actions, 2)'], {}), '((num_actions, 2))\n', (67537, 67555), True, 'import numpy as np\n'), ((67727, 67741), 'numpy.zeros', 'np.zeros', (['(2,)'], {}), '((2,))\n', (67735, 67741), True, 'import numpy as np\n'), ((68155, 68202), 'numpy.matlib.repmat', 'np.matlib.repmat', (['other_agent_v', 'num_actions', '(1)'], {}), '(other_agent_v, num_actions, 1)\n', (68171, 68202), True, 'import numpy as np\n'), ((68266, 68358), 'numpy.arctan2', 'np.arctan2', (['(other_agent_state[1] - agent_state[1])', '(other_agent_state[0] - agent_state[0])'], {}), '(other_agent_state[1] - agent_state[1], other_agent_state[0] -\n agent_state[0])\n', (68276, 68358), True, 'import numpy as np\n'), ((68410, 68456), 'numpy.arctan2', 'np.arctan2', (['agent_vels[:, 1]', 'agent_vels[:, 0]'], {}), '(agent_vels[:, 1], agent_vels[:, 0])\n', (68420, 68456), True, 'import numpy as np\n'), ((68483, 68529), 'numpy.arctan2', 'np.arctan2', (['other_agent_v[1]', 'other_agent_v[0]'], {}), '(other_agent_v[1], other_agent_v[0])\n', (68493, 68529), True, 'import numpy as np\n'), ((71010, 71055), 'gym_collision_avoidance.envs.policies.CADRL.scripts.multi.gen_rand_testcases.find_dist_between_segs', 'gen_tc.find_dist_between_segs', (['x1', 'x2', 'y1', 'y2'], {}), '(x1, x2, y1, y2)\n', (71039, 71055), True, 'from gym_collision_avoidance.envs.policies.CADRL.scripts.multi import gen_rand_testcases as gen_tc\n'), ((71889, 71912), 'numpy.linalg.norm', 'np.linalg.norm', (['(x1 - y1)'], {}), '(x1 - y1)\n', (71903, 71912), True, 'import numpy as np\n'), ((78991, 79002), 'time.time', 'time.time', ([], {}), '()\n', (79000, 79002), False, 'import time\n'), ((79187, 79222), 'numpy.zeros', 'np.zeros', (['(num_agents,)'], {'dtype': 'bool'}), '((num_agents,), dtype=bool)\n', (79195, 79222), True, 'import numpy as np\n'), ((79418, 79441), 'numpy.zeros', 'np.zeros', (['(num_agents,)'], {}), '((num_agents,))\n', (79426, 79441), True, 'import numpy as np\n'), ((79462, 79484), 'numpy.ones', 'np.ones', (['(num_agents,)'], {}), '((num_agents,))\n', (79469, 79484), True, 'import numpy as np\n'), ((79512, 79547), 'numpy.zeros', 'np.zeros', (['(num_agents,)'], {'dtype': 'bool'}), '((num_agents,), dtype=bool)\n', (79520, 79547), True, 'import numpy as np\n'), ((79807, 79841), 'numpy.zeros', 'np.zeros', (['(num_agents, num_agents)'], {}), '((num_agents, num_agents))\n', (79815, 79841), True, 'import numpy as np\n'), ((79984, 80012), 'copy.deepcopy', 'copy.deepcopy', (['agents_states'], {}), '(agents_states)\n', (79997, 80012), False, 'import copy\n'), ((80037, 80060), 'numpy.zeros', 'np.zeros', (['(num_agents,)'], {}), '((num_agents,))\n', (80045, 80060), True, 'import numpy as np\n'), ((80081, 80104), 'numpy.zeros', 'np.zeros', (['(num_agents,)'], {}), '((num_agents,))\n', (80089, 80104), True, 'import numpy as np\n'), ((80316, 80334), 'numpy.amax', 'np.amax', (['max_t_vec'], {}), '(max_t_vec)\n', (80323, 80334), True, 'import numpy as np\n'), ((80539, 80574), 'numpy.zeros', 'np.zeros', (['(num_agents,)'], {'dtype': 'bool'}), '((num_agents,), dtype=bool)\n', (80547, 80574), True, 'import numpy as np\n'), ((80613, 80636), 'numpy.zeros', 'np.zeros', (['(num_agents,)'], {}), '((num_agents,))\n', (80621, 80636), True, 'import numpy as np\n'), ((80673, 80698), 'numpy.zeros', 'np.zeros', (['(num_agents, 2)'], {}), '((num_agents, 2))\n', (80681, 80698), True, 'import numpy as np\n'), ((87154, 87178), 'numpy.linalg.norm', 'np.linalg.norm', (['pref_vec'], {}), '(pref_vec)\n', (87168, 87178), True, 'import numpy as np\n'), ((87628, 87661), 'numpy.linalg.norm', 'np.linalg.norm', (['agent_vel'], {'axis': '(1)'}), '(agent_vel, axis=1)\n', (87642, 87661), True, 'import numpy as np\n'), ((88085, 88118), 'numpy.linalg.norm', 'np.linalg.norm', (['other_vel'], {'axis': '(1)'}), '(other_vel, axis=1)\n', (88099, 88118), True, 'import numpy as np\n'), ((88143, 88173), 'numpy.arctan2', 'np.arctan2', (['other_vy', 'other_vx'], {}), '(other_vy, other_vx)\n', (88153, 88173), True, 'import numpy as np\n'), ((88239, 88279), 'numpy.arctan2', 'np.arctan2', (['rel_vel[:, 1]', 'rel_vel[:, 0]'], {}), '(rel_vel[:, 1], rel_vel[:, 0])\n', (88249, 88279), True, 'import numpy as np\n'), ((88302, 88340), 'numpy.arctan2', 'np.arctan2', (['(0 - other_py)', '(0 - other_px)'], {}), '(0 - other_py, 0 - other_px)\n', (88312, 88340), True, 'import numpy as np\n'), ((93857, 93897), 'numpy.sum', 'np.sum', (['(dt_vec * agent_past_vel_xy[:, 0])'], {}), '(dt_vec * agent_past_vel_xy[:, 0])\n', (93863, 93897), True, 'import numpy as np\n'), ((93899, 93913), 'numpy.sum', 'np.sum', (['dt_vec'], {}), '(dt_vec)\n', (93905, 93913), True, 'import numpy as np\n'), ((93930, 93970), 'numpy.sum', 'np.sum', (['(dt_vec * agent_past_vel_xy[:, 1])'], {}), '(dt_vec * agent_past_vel_xy[:, 1])\n', (93936, 93970), True, 'import numpy as np\n'), ((93972, 93986), 'numpy.sum', 'np.sum', (['dt_vec'], {}), '(dt_vec)\n', (93978, 93986), True, 'import numpy as np\n'), ((94208, 94240), 'numpy.array', 'np.array', (['[average_x, average_y]'], {}), '([average_x, average_y])\n', (94216, 94240), True, 'import numpy as np\n'), ((94940, 95048), 'gym_collision_avoidance.envs.policies.CADRL.scripts.neural_networks.nn_training_param.NN_training_param', 'NN_training_param', (["d['sgd_step_size']", "d['reg_lambda']", "d['nb_iter']", "d['sgd_batch_size']", "d['w_scale']"], {}), "(d['sgd_step_size'], d['reg_lambda'], d['nb_iter'], d[\n 'sgd_batch_size'], d['w_scale'])\n", (94957, 95048), False, 'from gym_collision_avoidance.envs.policies.CADRL.scripts.neural_networks.nn_training_param import NN_training_param\n'), ((98037, 98063), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (98053, 98063), False, 'import os\n'), ((99220, 99240), 'numpy.array', 'np.array', (['[0.5, 1.2]'], {}), '([0.5, 1.2])\n', (99228, 99240), True, 'import numpy as np\n'), ((99241, 99261), 'numpy.array', 'np.array', (['[0.3, 0.5]'], {}), '([0.3, 0.5])\n', (99249, 99261), True, 'import numpy as np\n'), ((1872, 1899), 'numpy.vstack', 'np.vstack', (['(speeds, angles)'], {}), '((speeds, angles))\n', (1881, 1899), True, 'import numpy as np\n'), ((2743, 2770), 'numpy.vstack', 'np.vstack', (['(speeds, angles)'], {}), '((speeds, angles))\n', (2752, 2770), True, 'import numpy as np\n'), ((8935, 8977), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 6)', 'frameon': '(False)'}), '(figsize=(15, 6), frameon=False)\n', (8945, 8977), True, 'import matplotlib.pyplot as plt\n'), ((9010, 9065), 'matplotlib.pyplot.figure', 'plt.figure', (['figure_name'], {'figsize': '(15, 6)', 'frameon': '(False)'}), '(figure_name, figsize=(15, 6), frameon=False)\n', (9020, 9065), True, 'import matplotlib.pyplot as plt\n'), ((9077, 9086), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (9084, 9086), True, 'import matplotlib.pyplot as plt\n'), ((10397, 10517), 'matplotlib.pyplot.arrow', 'plt.arrow', (['(0.0)', '(0.0)', 'x_vel', 'y_vel'], {'fc': 'plt_colors_local[0]', 'ec': 'plt_colors_local[0]', 'head_width': '(0.05)', 'head_length': '(0.1)'}), '(0.0, 0.0, x_vel, y_vel, fc=plt_colors_local[0], ec=\n plt_colors_local[0], head_width=0.05, head_length=0.1)\n', (10406, 10517), True, 'import matplotlib.pyplot as plt\n'), ((10558, 10630), 'matplotlib.pyplot.plot', 'plt.plot', (['[0.0, x_vel]', '[0.0, y_vel]'], {'c': 'plt_colors_local[0]', 'linewidth': '(2)'}), '([0.0, x_vel], [0.0, y_vel], c=plt_colors_local[0], linewidth=2)\n', (10566, 10630), True, 'import matplotlib.pyplot as plt\n'), ((10745, 10824), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(o_s[0], o_s[1])'], {'radius': 'o_s[8]', 'fc': '"""w"""', 'ec': 'plt_colors_local[i + 1]'}), "((o_s[0], o_s[1]), radius=o_s[8], fc='w', ec=plt_colors_local[i + 1])\n", (10755, 10824), True, 'import matplotlib.pyplot as plt\n'), ((10900, 11036), 'matplotlib.pyplot.arrow', 'plt.arrow', (['o_s[0]', 'o_s[1]', 'o_s[2]', 'o_s[3]'], {'fc': 'plt_colors_local[i + 1]', 'ec': 'plt_colors_local[i + 1]', 'head_width': '(0.05)', 'head_length': '(0.1)'}), '(o_s[0], o_s[1], o_s[2], o_s[3], fc=plt_colors_local[i + 1], ec=\n plt_colors_local[i + 1], head_width=0.05, head_length=0.1)\n', (10909, 11036), True, 'import matplotlib.pyplot as plt\n'), ((11343, 11360), 'numpy.cos', 'np.cos', (['action[1]'], {}), '(action[1])\n', (11349, 11360), True, 'import numpy as np\n'), ((11390, 11407), 'numpy.sin', 'np.sin', (['action[1]'], {}), '(action[1])\n', (11396, 11407), True, 'import numpy as np\n'), ((13860, 13893), 'numpy.linalg.norm', 'np.linalg.norm', (['default_action_xy'], {}), '(default_action_xy)\n', (13874, 13893), True, 'import numpy as np\n'), ((13921, 13975), 'numpy.arctan2', 'np.arctan2', (['default_action_xy[1]', 'default_action_xy[0]'], {}), '(default_action_xy[1], default_action_xy[0])\n', (13931, 13975), True, 'import numpy as np\n'), ((15455, 15476), 'numpy.cos', 'np.cos', (['actions[:, 1]'], {}), '(actions[:, 1])\n', (15461, 15476), True, 'import numpy as np\n'), ((15508, 15529), 'numpy.sin', 'np.sin', (['actions[:, 1]'], {}), '(actions[:, 1])\n', (15514, 15529), True, 'import numpy as np\n'), ((16078, 16101), 'numpy.cos', 'np.cos', (['actions[ind, 1]'], {}), '(actions[ind, 1])\n', (16084, 16101), True, 'import numpy as np\n'), ((16136, 16159), 'numpy.sin', 'np.sin', (['actions[ind, 1]'], {}), '(actions[ind, 1])\n', (16142, 16159), True, 'import numpy as np\n'), ((16669, 16689), 'numpy.where', 'np.where', (['(plot_z > 0)'], {}), '(plot_z > 0)\n', (16677, 16689), True, 'import numpy as np\n'), ((16856, 16871), 'numpy.amax', 'np.amax', (['plot_z'], {}), '(plot_z)\n', (16863, 16871), True, 'import numpy as np\n'), ((17206, 17308), 'matplotlib.pyplot.title', 'plt.title', (['("""value of best action: %.3f \n action_x %.3f, action_y %.3f""" % (value,\n x_tmp, y_tmp))'], {}), '("""value of best action: %.3f \n action_x %.3f, action_y %.3f""" %\n (value, x_tmp, y_tmp))\n', (17215, 17308), True, 'import matplotlib.pyplot as plt\n'), ((17346, 17481), 'matplotlib.pyplot.title', 'plt.title', (['("""value of best action: %.3f \n action_speed %.3f, action_angle %.3f""" %\n (value, actions[ind, 0], actions[ind, 1]))'], {}), '(\n """value of best action: %.3f \n action_speed %.3f, action_angle %.3f""" %\n (value, actions[ind, 0], actions[ind, 1]))\n', (17355, 17481), True, 'import matplotlib.pyplot as plt\n'), ((22407, 22449), 'numpy.array', 'np.array', (['[desired_speed, desired_heading]'], {}), '([desired_speed, desired_heading])\n', (22415, 22449), True, 'import numpy as np\n'), ((22480, 22515), 'numpy.matlib.repmat', 'np.matlib.repmat', (['desired_act', '(5)', '(1)'], {}), '(desired_act, 5, 1)\n', (22496, 22515), True, 'import numpy as np\n'), ((22743, 22780), 'numpy.vstack', 'np.vstack', (['(desired_actions, actions)'], {}), '((desired_actions, actions))\n', (22752, 22780), True, 'import numpy as np\n'), ((23171, 23209), 'numpy.array', 'np.array', (['[cur_speed, default_heading]'], {}), '([cur_speed, default_heading])\n', (23179, 23209), True, 'import numpy as np\n'), ((23240, 23275), 'numpy.matlib.repmat', 'np.matlib.repmat', (['default_act', '(2)', '(1)'], {}), '(default_act, 2, 1)\n', (23256, 23275), True, 'import numpy as np\n'), ((23379, 23416), 'numpy.vstack', 'np.vstack', (['(default_actions, actions)'], {}), '((default_actions, actions))\n', (23388, 23416), True, 'import numpy as np\n'), ((24783, 24815), 'numpy.linalg.norm', 'np.linalg.norm', (['agent_state[2:4]'], {}), '(agent_state[2:4])\n', (24797, 24815), True, 'import numpy as np\n'), ((25366, 25437), 'numpy.where', 'np.where', (['((actions_theta[:, 1] < EPS) | (angle_diff_abs < np.pi / 12.0))'], {}), '((actions_theta[:, 1] < EPS) | (angle_diff_abs < np.pi / 12.0))\n', (25374, 25437), True, 'import numpy as np\n'), ((25556, 25586), 'numpy.where', 'np.where', (['(speed_diff_abs < 0.5)'], {}), '(speed_diff_abs < 0.5)\n', (25564, 25586), True, 'import numpy as np\n'), ((25862, 25909), 'numpy.clip', 'np.clip', (['(angle_diff_abs * SMOOTH_COST)', '(-0.25)', '(0)'], {}), '(angle_diff_abs * SMOOTH_COST, -0.25, 0)\n', (25869, 25909), True, 'import numpy as np\n'), ((25926, 25973), 'numpy.clip', 'np.clip', (['(speed_diff_abs * SMOOTH_COST)', '(-0.25)', '(0)'], {}), '(speed_diff_abs * SMOOTH_COST, -0.25, 0)\n', (25933, 25973), True, 'import numpy as np\n'), ((27055, 27116), 'numpy.where', 'np.where', (['((min_dists > 0) & (min_dists < GETTING_CLOSE_RANGE))'], {}), '((min_dists > 0) & (min_dists < GETTING_CLOSE_RANGE))\n', (27063, 27116), True, 'import numpy as np\n'), ((27412, 27435), 'numpy.where', 'np.where', (['(min_dists < 0)'], {}), '(min_dists < 0)\n', (27420, 27435), True, 'import numpy as np\n'), ((28850, 28873), 'numpy.array', 'np.array', (['dist_2_others'], {}), '(dist_2_others)\n', (28858, 28873), True, 'import numpy as np\n'), ((30677, 30715), 'numpy.arctan2', 'np.arctan2', (['action_xy[1]', 'action_xy[0]'], {}), '(action_xy[1], action_xy[0])\n', (30687, 30715), True, 'import numpy as np\n'), ((31239, 31270), 'numpy.linalg.norm', 'np.linalg.norm', (['other_action_xy'], {}), '(other_action_xy)\n', (31253, 31270), True, 'import numpy as np\n'), ((35392, 35421), 'numpy.where', 'np.where', (['(if_collide == False)'], {}), '(if_collide == False)\n', (35400, 35421), True, 'import numpy as np\n'), ((35776, 35853), 'numpy.linalg.norm', 'np.linalg.norm', (['(agent_next_states[:, 0:2] - agent_next_states[:, 6:8])'], {'axis': '(1)'}), '(agent_next_states[:, 0:2] - agent_next_states[:, 6:8], axis=1)\n', (35790, 35853), True, 'import numpy as np\n'), ((37163, 37209), 'numpy.all', 'np.all', (['(action_rewards + state_values < 1.0001)'], {}), '(action_rewards + state_values < 1.0001)\n', (37169, 37209), True, 'import numpy as np\n'), ((42774, 42825), 'numpy.linalg.norm', 'np.linalg.norm', (['(agent_state[6:8] - agent_state[0:2])'], {}), '(agent_state[6:8] - agent_state[0:2])\n', (42788, 42825), True, 'import numpy as np\n'), ((44060, 44093), 'numpy.linalg.norm', 'np.linalg.norm', (['default_action_xy'], {}), '(default_action_xy)\n', (44074, 44093), True, 'import numpy as np\n'), ((44256, 44287), 'numpy.array', 'np.array', (['[speed, angle_select]'], {}), '([speed, angle_select])\n', (44264, 44287), True, 'import numpy as np\n'), ((46573, 46631), 'numpy.digitize', 'np.digitize', (['actions_theta[:, 1]', 'static_constraints[:, 1]'], {}), '(actions_theta[:, 1], static_constraints[:, 1])\n', (46584, 46631), True, 'import numpy as np\n'), ((51985, 52008), 'numpy.column_stack', 'np.column_stack', (['(x, y)'], {}), '((x, y))\n', (52000, 52008), True, 'import numpy as np\n'), ((52948, 52971), 'numpy.argmax', 'np.argmax', (['state_values'], {}), '(state_values)\n', (52957, 52971), True, 'import numpy as np\n'), ((57756, 57776), 'numpy.cos', 'np.cos', (['angle_select'], {}), '(angle_select)\n', (57762, 57776), True, 'import numpy as np\n'), ((57809, 57829), 'numpy.sin', 'np.sin', (['angle_select'], {}), '(angle_select)\n', (57815, 57829), True, 'import numpy as np\n'), ((60369, 60390), 'numpy.cos', 'np.cos', (['angles_select'], {}), '(angles_select)\n', (60375, 60390), True, 'import numpy as np\n'), ((60427, 60448), 'numpy.sin', 'np.sin', (['angles_select'], {}), '(angles_select)\n', (60433, 60448), True, 'import numpy as np\n'), ((60866, 60911), 'numpy.where', 'np.where', (['(next_states[:, 9] * angles_diff < 0)'], {}), '(next_states[:, 9] * angles_diff < 0)\n', (60874, 60911), True, 'import numpy as np\n'), ((61008, 61056), 'numpy.where', 'np.where', (['(next_states[:, 9] * angles_diff > -EPS)'], {}), '(next_states[:, 9] * angles_diff > -EPS)\n', (61016, 61056), True, 'import numpy as np\n'), ((61111, 61150), 'numpy.union1d', 'np.union1d', (['zero_inds', 'oscillation_inds'], {}), '(zero_inds, oscillation_inds)\n', (61121, 61150), True, 'import numpy as np\n'), ((61204, 61235), 'numpy.sign', 'np.sign', (['angles_diff[zero_inds]'], {}), '(angles_diff[zero_inds])\n', (61211, 61235), True, 'import numpy as np\n'), ((61420, 61458), 'numpy.sign', 'np.sign', (['next_states[same_dir_inds, 9]'], {}), '(next_states[same_dir_inds, 9])\n', (61427, 61458), True, 'import numpy as np\n'), ((64641, 64664), 'numpy.cos', 'np.cos', (['agent_action[1]'], {}), '(agent_action[1])\n', (64647, 64664), True, 'import numpy as np\n'), ((64704, 64727), 'numpy.sin', 'np.sin', (['agent_action[1]'], {}), '(agent_action[1])\n', (64710, 64727), True, 'import numpy as np\n'), ((64818, 64847), 'numpy.cos', 'np.cos', (['other_agent_action[1]'], {}), '(other_agent_action[1])\n', (64824, 64847), True, 'import numpy as np\n'), ((64899, 64928), 'numpy.sin', 'np.sin', (['other_agent_action[1]'], {}), '(other_agent_action[1])\n', (64905, 64928), True, 'import numpy as np\n'), ((65613, 65644), 'numpy.sum', 'np.sum', (['(agent_v * other_agent_v)'], {}), '(agent_v * other_agent_v)\n', (65619, 65644), True, 'import numpy as np\n'), ((67165, 67197), 'numpy.ones', 'np.ones', (['num_actions'], {'dtype': 'bool'}), '(num_actions, dtype=bool)\n', (67172, 67197), True, 'import numpy as np\n'), ((67263, 67320), 'numpy.linalg.norm', 'np.linalg.norm', (['(agent_state[0:2] - other_agent_state[0:2])'], {}), '(agent_state[0:2] - other_agent_state[0:2])\n', (67277, 67320), True, 'import numpy as np\n'), ((67602, 67629), 'numpy.cos', 'np.cos', (['agent_actions[:, 1]'], {}), '(agent_actions[:, 1])\n', (67608, 67629), True, 'import numpy as np\n'), ((67676, 67703), 'numpy.sin', 'np.sin', (['agent_actions[:, 1]'], {}), '(agent_actions[:, 1])\n', (67682, 67703), True, 'import numpy as np\n'), ((67793, 67822), 'numpy.cos', 'np.cos', (['other_agent_action[1]'], {}), '(other_agent_action[1])\n', (67799, 67822), True, 'import numpy as np\n'), ((67874, 67903), 'numpy.sin', 'np.sin', (['other_agent_action[1]'], {}), '(other_agent_action[1])\n', (67880, 67903), True, 'import numpy as np\n'), ((69439, 69484), 'numpy.sum', 'np.sum', (['(agent_vels * other_agent_vels)'], {'axis': '(1)'}), '(agent_vels * other_agent_vels, axis=1)\n', (69445, 69484), True, 'import numpy as np\n'), ((69582, 69631), 'numpy.linalg.norm', 'np.linalg.norm', (['agent_vels[valid_inds, :]'], {'axis': '(1)'}), '(agent_vels[valid_inds, :], axis=1)\n', (69596, 69631), True, 'import numpy as np\n'), ((74102, 74141), 'numpy.linalg.norm', 'np.linalg.norm', (['(agent1_pos - agent2_pos)'], {}), '(agent1_pos - agent2_pos)\n', (74116, 74141), True, 'import numpy as np\n'), ((74650, 74742), 'gym_collision_avoidance.envs.policies.CADRL.scripts.multi.pedData_processing_multi.rawState_2_agentCentricState', 'pedData.rawState_2_agentCentricState', (['agent_states', 'other_agents_state', 'self.num_agents'], {}), '(agent_states, other_agents_state, self\n .num_agents)\n', (74686, 74742), True, 'from gym_collision_avoidance.envs.policies.CADRL.scripts.multi import pedData_processing_multi as pedData\n'), ((75543, 75636), 'gym_collision_avoidance.envs.policies.CADRL.scripts.multi.pedData_processing_multi.rawStates_2_agentCentricStates', 'pedData.rawStates_2_agentCentricStates', (['agent_states', 'other_agents_state', 'self.num_agents'], {}), '(agent_states, other_agents_state,\n self.num_agents)\n', (75581, 75636), True, 'from gym_collision_avoidance.envs.policies.CADRL.scripts.multi import pedData_processing_multi as pedData\n'), ((76053, 76082), 'numpy.minimum', 'np.minimum', (['upper_bnd', 'values'], {}), '(upper_bnd, values)\n', (76063, 76082), True, 'import numpy as np\n'), ((77058, 77073), 'numpy.zeros', 'np.zeros', (['(10,)'], {}), '((10,))\n', (77066, 77073), True, 'import numpy as np\n'), ((79275, 79315), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(num_agents,)'}), '(2, size=(num_agents,))\n', (79292, 79315), True, 'import numpy as np\n'), ((81822, 81838), 'numpy.min', 'np.min', (['dist_mat'], {}), '(dist_mat)\n', (81828, 81838), True, 'import numpy as np\n'), ((82050, 82074), 'numpy.all', 'np.all', (['if_completed_vec'], {}), '(if_completed_vec)\n', (82056, 82074), True, 'import numpy as np\n'), ((82373, 82411), 'numpy.asarray', 'np.asarray', (['dt_vec[time_past_one_ind:]'], {}), '(dt_vec[time_past_one_ind:])\n', (82383, 82411), True, 'import numpy as np\n'), ((83027, 83043), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (83041, 83043), True, 'import numpy as np\n'), ((86113, 86146), 'copy.deepcopy', 'copy.deepcopy', (['next_agents_states'], {}), '(next_agents_states)\n', (86126, 86146), False, 'import copy\n'), ((86320, 86335), 'numpy.asarray', 'np.asarray', (['vec'], {}), '(vec)\n', (86330, 86335), True, 'import numpy as np\n'), ((86553, 86623), 'gym_collision_avoidance.envs.policies.CADRL.scripts.multi.pedData_processing_multi.plot_traj_raw_multi', 'pedData.plot_traj_raw_multi', (['traj_raw_multi', 'title_string', 'figure_name'], {}), '(traj_raw_multi, title_string, figure_name)\n', (86580, 86623), True, 'from gym_collision_avoidance.envs.policies.CADRL.scripts.multi import pedData_processing_multi as pedData\n'), ((87301, 87319), 'numpy.array', 'np.array', (['[EPS, 0]'], {}), '([EPS, 0])\n', (87309, 87319), True, 'import numpy as np\n'), ((89424, 89474), 'numpy.union1d', 'np.union1d', (['bad_inds_same_fast', 'bad_inds_same_slow'], {}), '(bad_inds_same_fast, bad_inds_same_slow)\n', (89434, 89474), True, 'import numpy as np\n'), ((89945, 89978), 'numpy.linalg.norm', 'np.linalg.norm', (['other_pos'], {'axis': '(1)'}), '(other_pos, axis=1)\n', (89959, 89978), True, 'import numpy as np\n'), ((95358, 95443), 'gym_collision_avoidance.envs.policies.CADRL.scripts.neural_networks.neural_network_regr_multi.NN_training_param', 'nn.NN_training_param', (['sgd_step_size', 'reg_lambda', 'nb_iter', 'sgd_batch_size', 'w_scale'], {}), '(sgd_step_size, reg_lambda, nb_iter, sgd_batch_size,\n w_scale)\n', (95378, 95443), True, 'from gym_collision_avoidance.envs.policies.CADRL.scripts.neural_networks import neural_network_regr_multi as nn\n'), ((16751, 16782), 'numpy.amin', 'np.amin', (['plot_z[color_min_inds]'], {}), '(plot_z[color_min_inds])\n', (16758, 16782), True, 'import numpy as np\n'), ((18635, 18711), 'numpy.arctan2', 'np.arctan2', (['(agent_state[7] - agent_state[1])', '(agent_state[6] - agent_state[0])'], {}), '(agent_state[7] - agent_state[1], agent_state[6] - agent_state[0])\n', (18645, 18711), True, 'import numpy as np\n'), ((29774, 29793), 'numpy.ones', 'np.ones', (['(num_pts,)'], {}), '((num_pts,))\n', (29781, 29793), True, 'import numpy as np\n'), ((31349, 31399), 'numpy.arctan2', 'np.arctan2', (['other_action_xy[1]', 'other_action_xy[0]'], {}), '(other_action_xy[1], other_action_xy[0])\n', (31359, 31399), True, 'import numpy as np\n'), ((31520, 31570), 'numpy.array', 'np.array', (['[other_action_speed, other_action_angle]'], {}), '([other_action_speed, other_action_angle])\n', (31528, 31570), True, 'import numpy as np\n'), ((32550, 32596), 'numpy.linalg.norm', 'np.linalg.norm', (['other_agents_state_in[tt][2:4]'], {}), '(other_agents_state_in[tt][2:4])\n', (32564, 32596), True, 'import numpy as np\n'), ((32633, 32703), 'numpy.arctan2', 'np.arctan2', (['other_agents_state_in[tt][3]', 'other_agents_state_in[tt][2]'], {}), '(other_agents_state_in[tt][3], other_agents_state_in[tt][2])\n', (32643, 32703), True, 'import numpy as np\n'), ((32949, 32983), 'numpy.cos', 'np.cos', (['other_agents_action[tt][1]'], {}), '(other_agents_action[tt][1])\n', (32955, 32983), True, 'import numpy as np\n'), ((33053, 33087), 'numpy.sin', 'np.sin', (['other_agents_action[tt][1]'], {}), '(other_agents_action[tt][1])\n', (33059, 33087), True, 'import numpy as np\n'), ((34671, 34733), 'numpy.linalg.norm', 'np.linalg.norm', (['(agent_state[0:2] - other_agents_state[tt][0:2])'], {}), '(agent_state[0:2] - other_agents_state[tt][0:2])\n', (34685, 34733), True, 'import numpy as np\n'), ((35883, 35989), 'numpy.where', 'np.where', (['((dists_to_goal < DIST_2_GOAL_THRES) & (min_dists[non_collision_inds] >\n GETTING_CLOSE_RANGE))'], {}), '((dists_to_goal < DIST_2_GOAL_THRES) & (min_dists[\n non_collision_inds] > GETTING_CLOSE_RANGE))\n', (35891, 35989), True, 'import numpy as np\n'), ((43441, 43463), 'numpy.ones', 'np.ones', (['(num_states,)'], {}), '((num_states,))\n', (43448, 43463), True, 'import numpy as np\n'), ((52442, 52468), 'copy.deepcopy', 'copy.deepcopy', (['agent_state'], {}), '(agent_state)\n', (52455, 52468), False, 'import copy\n'), ((56694, 56727), 'numpy.argpartition', 'np.argpartition', (['state_values', '(-4)'], {}), '(state_values, -4)\n', (56709, 56727), True, 'import numpy as np\n'), ((56771, 56804), 'numpy.argpartition', 'np.argpartition', (['state_values', '(-4)'], {}), '(state_values, -4)\n', (56786, 56804), True, 'import numpy as np\n'), ((57473, 57493), 'numpy.cos', 'np.cos', (['angle_select'], {}), '(angle_select)\n', (57479, 57493), True, 'import numpy as np\n'), ((57532, 57552), 'numpy.sin', 'np.sin', (['angle_select'], {}), '(angle_select)\n', (57538, 57552), True, 'import numpy as np\n'), ((58156, 58175), 'numpy.sign', 'np.sign', (['angle_diff'], {}), '(angle_diff)\n', (58163, 58175), True, 'import numpy as np\n'), ((60060, 60081), 'numpy.cos', 'np.cos', (['angles_select'], {}), '(angles_select)\n', (60066, 60081), True, 'import numpy as np\n'), ((60124, 60145), 'numpy.sin', 'np.sin', (['angles_select'], {}), '(angles_select)\n', (60130, 60145), True, 'import numpy as np\n'), ((69510, 69542), 'numpy.where', 'np.where', (['(agent_vels[:, 0] > EPS)'], {}), '(agent_vels[:, 0] > EPS)\n', (69518, 69542), True, 'import numpy as np\n'), ((77695, 77780), 'numpy.arctan2', 'np.arctan2', (['(test_case[i, 3] - test_case[i, 1])', '(test_case[i, 2] - test_case[i, 0])'], {}), '(test_case[i, 3] - test_case[i, 1], test_case[i, 2] - test_case[i, 0]\n )\n', (77705, 77780), True, 'import numpy as np\n'), ((77903, 77954), 'numpy.linalg.norm', 'np.linalg.norm', (['(agent_state[0:2] - agent_state[6:8])'], {}), '(agent_state[0:2] - agent_state[6:8])\n', (77917, 77954), True, 'import numpy as np\n'), ((81030, 81091), 'numpy.linalg.norm', 'np.linalg.norm', (['(agents_states[i][0:2] - agents_states[i][6:8])'], {}), '(agents_states[i][0:2] - agents_states[i][6:8])\n', (81044, 81091), True, 'import numpy as np\n'), ((81377, 81395), 'numpy.amin', 'np.amin', (['agents_dt'], {}), '(agents_dt)\n', (81384, 81395), True, 'import numpy as np\n'), ((86459, 86483), 'numpy.sum', 'np.sum', (['time_to_complete'], {}), '(time_to_complete)\n', (86465, 86483), True, 'import numpy as np\n'), ((89537, 89750), 'numpy.where', 'np.where', (['((dist_2_goal > 1) & (other_speed > EPS) & (agent_speed > EPS) & (other_py <\n 0) & (other_py > -2) & (other_px > 0) & (other_px < 5) & (agent_heading >\n EPS) & (other_heading < -5.0 * np.pi / 6.0))'], {}), '((dist_2_goal > 1) & (other_speed > EPS) & (agent_speed > EPS) & (\n other_py < 0) & (other_py > -2) & (other_px > 0) & (other_px < 5) & (\n agent_heading > EPS) & (other_heading < -5.0 * np.pi / 6.0))\n', (89545, 89750), True, 'import numpy as np\n'), ((91472, 91522), 'numpy.union1d', 'np.union1d', (['bad_inds_same_fast', 'bad_inds_same_slow'], {}), '(bad_inds_same_fast, bad_inds_same_slow)\n', (91482, 91522), True, 'import numpy as np\n'), ((91991, 92024), 'numpy.linalg.norm', 'np.linalg.norm', (['other_pos'], {'axis': '(1)'}), '(other_pos, axis=1)\n', (92005, 92024), True, 'import numpy as np\n'), ((12020, 12077), 'numpy.linalg.norm', 'np.linalg.norm', (['(agent_state[0:2] - other_agent_state[0:2])'], {}), '(agent_state[0:2] - other_agent_state[0:2])\n', (12034, 12077), True, 'import numpy as np\n'), ((28660, 28717), 'numpy.linalg.norm', 'np.linalg.norm', (['(other_agent_state[0:2] - agent_state[0:2])'], {}), '(other_agent_state[0:2] - agent_state[0:2])\n', (28674, 28717), True, 'import numpy as np\n'), ((32747, 32795), 'numpy.array', 'np.array', (['[other_agent_speed, other_agent_angle]'], {}), '([other_agent_speed, other_agent_angle])\n', (32755, 32795), True, 'import numpy as np\n'), ((51787, 51825), 'numpy.cos', 'np.cos', (['lower_cost_to_go_actions[:, 1]'], {}), '(lower_cost_to_go_actions[:, 1])\n', (51793, 51825), True, 'import numpy as np\n'), ((51890, 51928), 'numpy.sin', 'np.sin', (['lower_cost_to_go_actions[:, 1]'], {}), '(lower_cost_to_go_actions[:, 1])\n', (51896, 51928), True, 'import numpy as np\n'), ((58344, 58366), 'numpy.sign', 'np.sign', (['next_state[9]'], {}), '(next_state[9])\n', (58351, 58366), True, 'import numpy as np\n'), ((58951, 59006), 'numpy.clip', 'np.clip', (['angle_diff', 'delta_heading_lb', 'delta_heading_ub'], {}), '(angle_diff, delta_heading_lb, delta_heading_ub)\n', (58958, 59006), True, 'import numpy as np\n'), ((62693, 62749), 'numpy.clip', 'np.clip', (['angles_diff', 'delta_heading_lb', 'delta_heading_ub'], {}), '(angles_diff, delta_heading_lb, delta_heading_ub)\n', (62700, 62749), True, 'import numpy as np\n'), ((77338, 77363), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(0.2)'], {}), '(0, 0.2)\n', (77355, 77363), True, 'import numpy as np\n'), ((80226, 80279), 'numpy.linalg.norm', 'np.linalg.norm', (['(test_case[i, 2:4] - test_case[i, 0:2])'], {}), '(test_case[i, 2:4] - test_case[i, 0:2])\n', (80240, 80279), True, 'import numpy as np\n'), ((91585, 91796), 'numpy.where', 'np.where', (['((dist_2_goal > 1) & (other_speed > EPS) & (agent_speed > EPS) & (other_py <\n 2) & (other_py > 0) & (other_px > 0) & (other_px < 5) & (agent_heading <\n EPS) & (other_heading > 5.0 * np.pi / 6.0))'], {}), '((dist_2_goal > 1) & (other_speed > EPS) & (agent_speed > EPS) & (\n other_py < 2) & (other_py > 0) & (other_px > 0) & (other_px < 5) & (\n agent_heading < EPS) & (other_heading > 5.0 * np.pi / 6.0))\n', (91593, 91796), True, 'import numpy as np\n'), ((55262, 55313), 'numpy.linalg.norm', 'np.linalg.norm', (['(agent_state[0:2] - agent_state[6:8])'], {}), '(agent_state[0:2] - agent_state[6:8])\n', (55276, 55313), True, 'import numpy as np\n'), ((68809, 68866), 'numpy.linalg.norm', 'np.linalg.norm', (['(agent_state[0:2] - other_agent_state[0:2])'], {}), '(agent_state[0:2] - other_agent_state[0:2])\n', (68823, 68866), True, 'import numpy as np\n'), ((77264, 77280), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (77278, 77280), True, 'import numpy as np\n'), ((78002, 78024), 'numpy.cos', 'np.cos', (['agent_state[4]'], {}), '(agent_state[4])\n', (78008, 78024), True, 'import numpy as np\n'), ((78026, 78048), 'numpy.sin', 'np.sin', (['agent_state[4]'], {}), '(agent_state[4])\n', (78032, 78048), True, 'import numpy as np\n'), ((82613, 82662), 'numpy.asarray', 'np.asarray', (['traj_raw_multi[i][time_past_one_ind:]'], {}), '(traj_raw_multi[i][time_past_one_ind:])\n', (82623, 82662), True, 'import numpy as np\n'), ((84300, 84352), 'numpy.arctan2', 'np.arctan2', (['desired_velocity[1]', 'desired_velocity[0]'], {}), '(desired_velocity[1], desired_velocity[0])\n', (84310, 84352), True, 'import numpy as np\n'), ((55356, 55413), 'numpy.linalg.norm', 'np.linalg.norm', (['(agent_state[0:2] - other_agent_state[0:2])'], {}), '(agent_state[0:2] - other_agent_state[0:2])\n', (55370, 55413), True, 'import numpy as np\n'), ((55517, 55580), 'numpy.linalg.norm', 'np.linalg.norm', (['(other_agent_state[0:2] - other_agent_state[6:8])'], {}), '(other_agent_state[0:2] - other_agent_state[6:8])\n', (55531, 55580), True, 'import numpy as np\n'), ((65740, 65763), 'numpy.linalg.norm', 'np.linalg.norm', (['agent_v'], {}), '(agent_v)\n', (65754, 65763), True, 'import numpy as np\n'), ((77607, 77623), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (77621, 77623), True, 'import numpy as np\n'), ((81602, 81663), 'numpy.linalg.norm', 'np.linalg.norm', (['(agents_states[i][0:2] - agents_states[j][0:2])'], {}), '(agents_states[i][0:2] - agents_states[j][0:2])\n', (81616, 81663), True, 'import numpy as np\n'), ((86742, 86753), 'time.time', 'time.time', ([], {}), '()\n', (86751, 86753), False, 'import time\n'), ((69728, 69775), 'numpy.matlib.repmat', 'np.matlib.repmat', (['dot_product[front_inds]', '(2)', '(1)'], {}), '(dot_product[front_inds], 2, 1)\n', (69744, 69775), True, 'import numpy as np\n'), ((84540, 84591), 'numpy.linalg.norm', 'np.linalg.norm', (['(agent_state[0:2] - agent_state[6:8])'], {}), '(agent_state[0:2] - agent_state[6:8])\n', (84554, 84591), True, 'import numpy as np\n')] |
# support/Layer.py
import numpy as np
from support.AbstractLayer import Layers
class Layer(Layers): #weigth layer
def __init__(self, inputSize, outputSize):
self.weights = np.random.randn(inputSize, outputSize) * 0.1 # keep initial weights small
self.bias = np.random.rand(1, outputSize) * 0.1 # np.random.rand(1, outputSize) *0.1
# Adam
self.alpha = 0.001 #Stepsize
self.beta1 = 0.9 # Exponential decay rates for the moment estimates
self.beta2 = 0.999 # Exponential decay rates for the moment estimates
self.epsilon =1e-8
self.m = 0 # first moment vector
self.v = 0 # second moment vector
self.mb = 0 # first moment vector (bais)
self.vb = 0 # second moment vector (bais)
self.t = 0 # time step
def changeSize(self, layer, inputSize, outputSize): # change size of layer
layer.weights = np.random.randn(inputSize, outputSize) * 0.1
layer.bias = np.random.randn(1, outputSize) * 0.1
def jitterWeights(self): # gaussian noise to each weight with prob 0.3, x dist-> N(0, 0.01)
numrows, numcols = self.weights.shape
prob = 0.3
sigma = 0.2
mu = 0
index = np.random.rand(numrows, numcols) < prob # index of jitter weights
noise = np.random.normal(mu, sigma, size = (numrows, numcols)) # noise matrix
noise[index==False] = 0.0 # only the weights that have been selected by prob
self.weights = self.weights + noise
def pruningWeights(self): # sets smallest p% of weights to zero starting from input layer then iterates to outputlayer
# excluding outputlayer until nrPruns is satisfied
numrows, numcols = self.weights.shape
absweights = abs(self.weights)
min_= np.min(np.where(absweights==0, absweights.max(), absweights))
index = min_== absweights
zeros = np.zeros((numrows, numcols))
self.weights[index==True] = zeros[index==True]
return np.sum(index)
def forwardPropagation(self, inputData): # returns output for a give input
self.input = inputData
self.output = np.dot(self.input, self.weights) + self.bias #dotproduct
return self.output
def backwardPropagation(self, outputError):
inputError = np.dot(outputError, self.weights.T)
weightsError = np.dot(self.input.T, outputError)
#Adam
self.t = self.t+1
# weights
self.m = self.beta1 * self.m + (1 - self.beta1) * weightsError
self.v = self.beta2 * self.v + (1 - self.beta2) * (weightsError**2)
# bias
self.mb = self.beta1 * self.mb + (1 - self.beta1) * outputError
self.vb = self.beta2 * self.vb + (1 - self.beta2) * (outputError**2)
# Bias-Correcton
# weights
mHat = self.m/(1 - self.beta1**self.t)
vHat = self.v/(1 - self.beta2**self.t)
# bias
mHatb = self.mb/(1 - self.beta1**self.t)
vHatb = self.vb/(1 - self.beta2**self.t)
# update
self.weights = self.weights - self.alpha * ((mHat)/(np.sqrt(vHat) - self.epsilon))
self.bias = self.bias - self.alpha * ((mHatb)/(np.sqrt(vHatb) - self.epsilon))
return inputError
| [
"numpy.sum",
"numpy.random.randn",
"numpy.zeros",
"numpy.random.normal",
"numpy.random.rand",
"numpy.dot",
"numpy.sqrt"
] | [((1302, 1354), 'numpy.random.normal', 'np.random.normal', (['mu', 'sigma'], {'size': '(numrows, numcols)'}), '(mu, sigma, size=(numrows, numcols))\n', (1318, 1354), True, 'import numpy as np\n'), ((1903, 1931), 'numpy.zeros', 'np.zeros', (['(numrows, numcols)'], {}), '((numrows, numcols))\n', (1911, 1931), True, 'import numpy as np\n'), ((2002, 2015), 'numpy.sum', 'np.sum', (['index'], {}), '(index)\n', (2008, 2015), True, 'import numpy as np\n'), ((2316, 2351), 'numpy.dot', 'np.dot', (['outputError', 'self.weights.T'], {}), '(outputError, self.weights.T)\n', (2322, 2351), True, 'import numpy as np\n'), ((2376, 2409), 'numpy.dot', 'np.dot', (['self.input.T', 'outputError'], {}), '(self.input.T, outputError)\n', (2382, 2409), True, 'import numpy as np\n'), ((187, 225), 'numpy.random.randn', 'np.random.randn', (['inputSize', 'outputSize'], {}), '(inputSize, outputSize)\n', (202, 225), True, 'import numpy as np\n'), ((281, 310), 'numpy.random.rand', 'np.random.rand', (['(1)', 'outputSize'], {}), '(1, outputSize)\n', (295, 310), True, 'import numpy as np\n'), ((904, 942), 'numpy.random.randn', 'np.random.randn', (['inputSize', 'outputSize'], {}), '(inputSize, outputSize)\n', (919, 942), True, 'import numpy as np\n'), ((970, 1000), 'numpy.random.randn', 'np.random.randn', (['(1)', 'outputSize'], {}), '(1, outputSize)\n', (985, 1000), True, 'import numpy as np\n'), ((1220, 1252), 'numpy.random.rand', 'np.random.rand', (['numrows', 'numcols'], {}), '(numrows, numcols)\n', (1234, 1252), True, 'import numpy as np\n'), ((2153, 2185), 'numpy.dot', 'np.dot', (['self.input', 'self.weights'], {}), '(self.input, self.weights)\n', (2159, 2185), True, 'import numpy as np\n'), ((3124, 3137), 'numpy.sqrt', 'np.sqrt', (['vHat'], {}), '(vHat)\n', (3131, 3137), True, 'import numpy as np\n'), ((3210, 3224), 'numpy.sqrt', 'np.sqrt', (['vHatb'], {}), '(vHatb)\n', (3217, 3224), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 18 12:47:56 2020
@author: Sila
"""
# Monte Carlo methods, or Monte Carlo experiments, are a broad class of computational algorithms
# that rely on repeated random sampling to obtain numerical results.
import numpy as np
import math
import random
import matplotlib.pyplot as plt
# Initialization
square_size = 1
points_inside_circle = 0
points_inside_square = 0
sample_size = 1000
arc = np.linspace(0, np.pi / 2, 100)
# A function called generate_points which generates random points inside the square.
def generate_points(size):
x = random.random() * size
y = random.random() * size
return (x, y)
# A function called is_in_circle which will check if the point we generated falls within the circle.
def is_in_circle(point, size):
return math.sqrt(point[0] ** 2 + point[1] ** 2) <= size
# square size = r^2 = 1, circle size = r^2*pi / 4 = 1^2* pi / 4 = pi / 4.
# circle_size / square_size = pi / 4
# The program keeps track of how many points it's picked so far (N) and how many of those points fell inside the circle (M).
# Pi is then approximated as follows:
#
#
# pi = 4*M / N
#
#
def compute_pi(points_inside_circle, points_inside_square):
return 4 * (points_inside_circle / points_inside_square)
plt.axes().set_aspect('equal')
plt.plot(1 * np.cos(arc), 1 * np.sin(arc))
for i in range(sample_size):
point = generate_points(square_size)
plt.plot(point[0], point[1], 'c.')
points_inside_square += 1
if is_in_circle(point, square_size):
points_inside_circle += 1
plt.show()
print("Approximate value of pi is {}".format(compute_pi(points_inside_circle, points_inside_square)))
print("Awesome")
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"math.sqrt",
"matplotlib.pyplot.axes",
"random.random",
"numpy.sin",
"numpy.linspace",
"numpy.cos"
] | [((436, 466), 'numpy.linspace', 'np.linspace', (['(0)', '(np.pi / 2)', '(100)'], {}), '(0, np.pi / 2, 100)\n', (447, 466), True, 'import numpy as np\n'), ((1570, 1580), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1578, 1580), True, 'import matplotlib.pyplot as plt\n'), ((1428, 1462), 'matplotlib.pyplot.plot', 'plt.plot', (['point[0]', 'point[1]', '"""c."""'], {}), "(point[0], point[1], 'c.')\n", (1436, 1462), True, 'import matplotlib.pyplot as plt\n'), ((589, 604), 'random.random', 'random.random', ([], {}), '()\n', (602, 604), False, 'import random\n'), ((620, 635), 'random.random', 'random.random', ([], {}), '()\n', (633, 635), False, 'import random\n'), ((806, 846), 'math.sqrt', 'math.sqrt', (['(point[0] ** 2 + point[1] ** 2)'], {}), '(point[0] ** 2 + point[1] ** 2)\n', (815, 846), False, 'import math\n'), ((1279, 1289), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (1287, 1289), True, 'import matplotlib.pyplot as plt\n'), ((1323, 1334), 'numpy.cos', 'np.cos', (['arc'], {}), '(arc)\n', (1329, 1334), True, 'import numpy as np\n'), ((1340, 1351), 'numpy.sin', 'np.sin', (['arc'], {}), '(arc)\n', (1346, 1351), True, 'import numpy as np\n')] |
import cv2
import numpy as np
img = cv2.imread("resources/example.jpg")
kernal = np.ones((2, 2), np.uint8)
"""
What this function does it makes the edges of the picture thick
so that the canny function can process them in a better way, and also
numpy is used here in the kernal variable to do that main part which is
deciding the amount of thinkness needed for canny to process the lines of
the edges in a better way
"""
imgCanny = cv2.Canny(img, 150, 200)
imgDialation = cv2.dilate(imgCanny, kernal)
# Displaying the output
cv2.imshow("Output", imgDialation)
# Adding delay to the image
cv2.waitKey(0) | [
"cv2.Canny",
"cv2.dilate",
"cv2.waitKey",
"numpy.ones",
"cv2.imread",
"cv2.imshow"
] | [((37, 72), 'cv2.imread', 'cv2.imread', (['"""resources/example.jpg"""'], {}), "('resources/example.jpg')\n", (47, 72), False, 'import cv2\n'), ((82, 107), 'numpy.ones', 'np.ones', (['(2, 2)', 'np.uint8'], {}), '((2, 2), np.uint8)\n', (89, 107), True, 'import numpy as np\n'), ((441, 465), 'cv2.Canny', 'cv2.Canny', (['img', '(150)', '(200)'], {}), '(img, 150, 200)\n', (450, 465), False, 'import cv2\n'), ((481, 509), 'cv2.dilate', 'cv2.dilate', (['imgCanny', 'kernal'], {}), '(imgCanny, kernal)\n', (491, 509), False, 'import cv2\n'), ((535, 569), 'cv2.imshow', 'cv2.imshow', (['"""Output"""', 'imgDialation'], {}), "('Output', imgDialation)\n", (545, 569), False, 'import cv2\n'), ((598, 612), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (609, 612), False, 'import cv2\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 21 15:21:36 2019
@author: rakshit
"""
# Confirmed code works perfectly. Do not display.
import os
import cv2
import sys
import json
import argparse
import matplotlib
import numpy as np
import deepdish as dd
import scipy.io as scio
from matplotlib.patches import Ellipse
from skimage.draw import ellipse as drawEllipse
sys.path.append('..')
from helperfunctions import ransac, ElliFit, my_ellipse
from helperfunctions import generateEmptyStorage, getValidPoints
def mypause(interval):
backend = plt.rcParams['backend']
if backend in matplotlib.rcsetup.interactive_bk:
figManager = matplotlib._pylab_helpers.Gcf.get_active()
if figManager is not None:
canvas = figManager.canvas
if canvas.figure.stale:
canvas.draw()
canvas.start_event_loop(interval)
return
parser = argparse.ArgumentParser()
parser.add_argument('--noDisp', help='Specify flag to display labelled images', type=int, default=1)
parser.add_argument('--path2ds',
help='Path to dataset',
type=str,
default='/media/rakshit/Monster/Datasets')
args = parser.parse_args()
if args.noDisp:
noDisp = True
print('No graphics')
else:
noDisp = False
print('Showing figures')
gui_env = ['Qt5Agg','WXAgg','TKAgg','GTKAgg']
for gui in gui_env:
try:
print("testing: {}".format(gui))
matplotlib.use(gui,warn=False, force=True)
from matplotlib import pyplot as plt
break
except:
continue
print("Using: {}".format(matplotlib.get_backend()))
plt.ion()
ds_num = 0
PATH_OPENEDS = os.path.join(args.path2ds, 'OpenEDS')
PATH_DIR = os.path.join(args.path2ds, 'OpenEDS', 'Semantic_Segmentation_Dataset')
PATH_DS = os.path.join(args.path2ds, 'All')
PATH_MASTER = os.path.join(args.path2ds, 'MasterKey')
print('Extracting OpenEDS')
# Don't append the test set.
listDir = os.listdir(PATH_DIR)
listDir.remove('test')
for dirCond in listDir:
ds_name = 'OpenEDS_{}_{}'.format(dirCond, ds_num)
print('Opening the {} folder'.format(dirCond))
# Read UID
path2json = os.path.join(PATH_OPENEDS, 'OpenEDS_{}_userID_mapping_to_images.json'.format(dirCond))
im2UID = json.load(open(path2json, 'r'))
PATH_IMAGES = os.path.join(PATH_DIR, dirCond, 'images')
PATH_LABELS = os.path.join(PATH_DIR, dirCond, 'labels')
PATH_FITS = os.path.join(PATH_DIR, dirCond, 'fits')
listIm = os.listdir(PATH_IMAGES)
Data, keydict = generateEmptyStorage(name='OpenEDS', subset=dirCond)
i = 0
if not noDisp:
fig, plts = plt.subplots(1,1)
for pData in im2UID:
# Image number and UID for each person
listIm = pData['semantic_segmenation_images']
pid = int(pData['id'].replace('U', '')) - 111
for imName_full in listIm:
imName, _ = os.path.splitext(imName_full)
# Do not save images without a proper ellipse and iris fit
# Load image, label map and fits
I = cv2.imread(os.path.join(PATH_IMAGES, imName_full), 0)
LabelMat = np.load(os.path.join(PATH_LABELS, imName+'.npy'))
#%% Make sure images are 640x480
r = np.where(LabelMat)[0]
c = int(0.5*(np.max(r) + np.min(r)))
top, bot = (0, c+150-(c-150)) if c-150<0 else (c-150, c+150)
I = I[top:bot, :]
LabelMat = LabelMat[top:bot, :]
I = cv2.resize(I, (640, 480), interpolation=cv2.INTER_LANCZOS4)
LabelMat = cv2.resize(LabelMat, (640, 480), interpolation=cv2.INTER_NEAREST)
#%%
pupilPts, irisPts = getValidPoints(LabelMat)
if np.sum(LabelMat == 3) > 150 and type(pupilPts) is not list:
model_pupil = ransac(pupilPts, ElliFit, 15, 40, 5e-3, 15).loop()
pupil_fit_error = my_ellipse(model_pupil.model).verify(pupilPts)
else:
print('Not enough pupil points')
model_pupil = type('model', (object, ), {})
model_pupil.model = np.array([-1, -1, -1, -1, -1])
pupil_fit_error = np.inf
if np.sum(LabelMat == 2) > 200 and type(irisPts) is not list:
model_iris = ransac(irisPts, ElliFit, 15, 40, 5e-3, 15).loop()
iris_fit_error = my_ellipse(model_iris.model).verify(irisPts)
else:
print('Not enough iris points')
model_iris = type('model', (object, ), {})
model_iris.model = np.array([-1, -1, -1, -1, -1])
model_iris.Phi = np.array([-1, -1, -1, -1, -1])
iris_fit_error = np.inf
if pupil_fit_error >= 0.1:
print('Not recording pupil. Unacceptable fit.')
print('Pupil fit error: {}'.format(pupil_fit_error))
model_pupil.model = np.array([-1, -1, -1, -1, -1])
if iris_fit_error >= 0.1:
print('Not recording iris. Unacceptable fit.')
print('Iris fit error: {}'.format(iris_fit_error))
model_iris.model = np.array([-1, -1, -1, -1, -1])
pupil_loc = model_pupil.model[:2]
# Draw mask no skin
rr, cc = drawEllipse(pupil_loc[1],
pupil_loc[0],
model_pupil.model[3],
model_pupil.model[2],
rotation=-model_pupil.model[-1])
pupMask = np.zeros_like(I)
pupMask[rr.clip(0, I.shape[0]-1), cc.clip(0, I.shape[1]-1)] = 1
rr, cc = drawEllipse(model_iris.model[1],
model_iris.model[0],
model_iris.model[3],
model_iris.model[2],
rotation=-model_iris.model[-1])
iriMask = np.zeros_like(I)
iriMask[rr.clip(0, I.shape[0]-1), cc.clip(0, I.shape[1]-1)] = 1
if (np.any(pupMask) and np.any(iriMask)) and ((pupil_fit_error<0.1) and (iris_fit_error<0.1)):
mask_woSkin = 2*iriMask + pupMask # Iris = 2, Pupil = 3
else:
# Neither fit exists, mask should be -1s.
print('Found bad mask: {}'.format(imName))
mask_woSkin = -np.ones(I.shape)
continue
# Add model information
keydict['archive'].append(ds_name)
keydict['resolution'].append(I.shape)
keydict['pupil_loc'].append(pupil_loc)
# Append images and label map
Data['Images'].append(I)
Data['Info'].append(imName_full) # Train or valid
Data['Masks'].append(LabelMat)
Data['Masks_noSkin'].append(mask_woSkin)
Data['pupil_loc'].append(pupil_loc)
# Append fits
Data['Fits']['pupil'].append(model_pupil.model)
Data['Fits']['iris'].append(model_iris.model)
keydict['Fits']['pupil'].append(model_pupil.model)
keydict['Fits']['iris'].append(model_iris.model)
if not noDisp:
if i == 0:
cE = Ellipse(tuple(pupil_loc),
2*model_pupil.model[2],
2*model_pupil.model[3],
angle=np.rad2deg(model_pupil.model[4]))
cL = Ellipse(tuple(model_iris.model[0:2]),
2*model_iris.model[2],
2*model_iris.model[3],
np.rad2deg(model_iris.model[4]))
cE.set_facecolor('None')
cE.set_edgecolor((1.0, 0.0, 0.0))
cL.set_facecolor('None')
cL.set_edgecolor((0.0, 1.0, 0.0))
cI = plts.imshow(I)
cM = plts.imshow(mask_woSkin, alpha=0.5)
plts.add_patch(cE)
plts.add_patch(cL)
plt.show()
plt.pause(.01)
else:
cE.center = tuple(pupil_loc)
cE.angle = np.rad2deg(model_pupil.model[4])
cE.width = 2*model_pupil.model[2]
cE.height = 2*model_pupil.model[3]
cL.center = tuple(model_iris.model[0:2])
cL.width = 2*model_iris.model[2]
cL.height = 2*model_iris.model[3]
cL.angle = np.rad2deg(model_iris.model[-1])
cI.set_data(I)
cM.set_data(mask_woSkin)
mypause(0.01)
i = i + 1
print('{} images: {}'.format(dirCond, i))
# Stack data
Data['Images'] = np.stack(Data['Images'], axis=0)
Data['Masks'] = np.stack(Data['Masks'], axis=0)
Data['Masks_noSkin'] = np.stack(Data['Masks_noSkin'], axis=0)
Data['pupil_loc'] = np.stack(Data['pupil_loc'], axis=0)
Data['Fits']['pupil'] = np.stack(Data['Fits']['pupil'], axis=0)
Data['Fits']['iris'] = np.stack(Data['Fits']['iris'], axis=0)
keydict['resolution'] = np.stack(keydict['resolution'], axis=0)
keydict['archive'] = np.stack(keydict['archive'], axis=0)
keydict['pupil_loc'] = np.stack(keydict['pupil_loc'], axis=0)
# Save data
dd.io.save(os.path.join(PATH_DS, ds_name+'.h5'), Data)
scio.savemat(os.path.join(PATH_MASTER, str(ds_name)+'.mat'), keydict, appendmat=True)
ds_num=ds_num+1 | [
"helperfunctions.generateEmptyStorage",
"numpy.sum",
"argparse.ArgumentParser",
"numpy.ones",
"helperfunctions.my_ellipse",
"helperfunctions.getValidPoints",
"matplotlib.get_backend",
"os.path.join",
"helperfunctions.ransac",
"sys.path.append",
"numpy.zeros_like",
"numpy.max",
"matplotlib.py... | [((390, 411), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (405, 411), False, 'import sys\n'), ((928, 953), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (951, 953), False, 'import argparse\n'), ((1675, 1684), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (1682, 1684), True, 'from matplotlib import pyplot as plt\n'), ((1712, 1749), 'os.path.join', 'os.path.join', (['args.path2ds', '"""OpenEDS"""'], {}), "(args.path2ds, 'OpenEDS')\n", (1724, 1749), False, 'import os\n'), ((1761, 1831), 'os.path.join', 'os.path.join', (['args.path2ds', '"""OpenEDS"""', '"""Semantic_Segmentation_Dataset"""'], {}), "(args.path2ds, 'OpenEDS', 'Semantic_Segmentation_Dataset')\n", (1773, 1831), False, 'import os\n'), ((1842, 1875), 'os.path.join', 'os.path.join', (['args.path2ds', '"""All"""'], {}), "(args.path2ds, 'All')\n", (1854, 1875), False, 'import os\n'), ((1890, 1929), 'os.path.join', 'os.path.join', (['args.path2ds', '"""MasterKey"""'], {}), "(args.path2ds, 'MasterKey')\n", (1902, 1929), False, 'import os\n'), ((1999, 2019), 'os.listdir', 'os.listdir', (['PATH_DIR'], {}), '(PATH_DIR)\n', (2009, 2019), False, 'import os\n'), ((2356, 2397), 'os.path.join', 'os.path.join', (['PATH_DIR', 'dirCond', '"""images"""'], {}), "(PATH_DIR, dirCond, 'images')\n", (2368, 2397), False, 'import os\n'), ((2416, 2457), 'os.path.join', 'os.path.join', (['PATH_DIR', 'dirCond', '"""labels"""'], {}), "(PATH_DIR, dirCond, 'labels')\n", (2428, 2457), False, 'import os\n'), ((2474, 2513), 'os.path.join', 'os.path.join', (['PATH_DIR', 'dirCond', '"""fits"""'], {}), "(PATH_DIR, dirCond, 'fits')\n", (2486, 2513), False, 'import os\n'), ((2527, 2550), 'os.listdir', 'os.listdir', (['PATH_IMAGES'], {}), '(PATH_IMAGES)\n', (2537, 2550), False, 'import os\n'), ((2572, 2624), 'helperfunctions.generateEmptyStorage', 'generateEmptyStorage', ([], {'name': '"""OpenEDS"""', 'subset': 'dirCond'}), "(name='OpenEDS', subset=dirCond)\n", (2592, 2624), False, 'from helperfunctions import generateEmptyStorage, getValidPoints\n'), ((8903, 8935), 'numpy.stack', 'np.stack', (["Data['Images']"], {'axis': '(0)'}), "(Data['Images'], axis=0)\n", (8911, 8935), True, 'import numpy as np\n'), ((8956, 8987), 'numpy.stack', 'np.stack', (["Data['Masks']"], {'axis': '(0)'}), "(Data['Masks'], axis=0)\n", (8964, 8987), True, 'import numpy as np\n'), ((9015, 9053), 'numpy.stack', 'np.stack', (["Data['Masks_noSkin']"], {'axis': '(0)'}), "(Data['Masks_noSkin'], axis=0)\n", (9023, 9053), True, 'import numpy as np\n'), ((9078, 9113), 'numpy.stack', 'np.stack', (["Data['pupil_loc']"], {'axis': '(0)'}), "(Data['pupil_loc'], axis=0)\n", (9086, 9113), True, 'import numpy as np\n'), ((9142, 9181), 'numpy.stack', 'np.stack', (["Data['Fits']['pupil']"], {'axis': '(0)'}), "(Data['Fits']['pupil'], axis=0)\n", (9150, 9181), True, 'import numpy as np\n'), ((9209, 9247), 'numpy.stack', 'np.stack', (["Data['Fits']['iris']"], {'axis': '(0)'}), "(Data['Fits']['iris'], axis=0)\n", (9217, 9247), True, 'import numpy as np\n'), ((9277, 9316), 'numpy.stack', 'np.stack', (["keydict['resolution']"], {'axis': '(0)'}), "(keydict['resolution'], axis=0)\n", (9285, 9316), True, 'import numpy as np\n'), ((9342, 9378), 'numpy.stack', 'np.stack', (["keydict['archive']"], {'axis': '(0)'}), "(keydict['archive'], axis=0)\n", (9350, 9378), True, 'import numpy as np\n'), ((9406, 9444), 'numpy.stack', 'np.stack', (["keydict['pupil_loc']"], {'axis': '(0)'}), "(keydict['pupil_loc'], axis=0)\n", (9414, 9444), True, 'import numpy as np\n'), ((670, 712), 'matplotlib._pylab_helpers.Gcf.get_active', 'matplotlib._pylab_helpers.Gcf.get_active', ([], {}), '()\n', (710, 712), False, 'import matplotlib\n'), ((1491, 1534), 'matplotlib.use', 'matplotlib.use', (['gui'], {'warn': '(False)', 'force': '(True)'}), '(gui, warn=False, force=True)\n', (1505, 1534), False, 'import matplotlib\n'), ((1648, 1672), 'matplotlib.get_backend', 'matplotlib.get_backend', ([], {}), '()\n', (1670, 1672), False, 'import matplotlib\n'), ((2675, 2693), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (2687, 2693), True, 'from matplotlib import pyplot as plt\n'), ((9477, 9515), 'os.path.join', 'os.path.join', (['PATH_DS', "(ds_name + '.h5')"], {}), "(PATH_DS, ds_name + '.h5')\n", (9489, 9515), False, 'import os\n'), ((2933, 2962), 'os.path.splitext', 'os.path.splitext', (['imName_full'], {}), '(imName_full)\n', (2949, 2962), False, 'import os\n'), ((3520, 3579), 'cv2.resize', 'cv2.resize', (['I', '(640, 480)'], {'interpolation': 'cv2.INTER_LANCZOS4'}), '(I, (640, 480), interpolation=cv2.INTER_LANCZOS4)\n', (3530, 3579), False, 'import cv2\n'), ((3603, 3668), 'cv2.resize', 'cv2.resize', (['LabelMat', '(640, 480)'], {'interpolation': 'cv2.INTER_NEAREST'}), '(LabelMat, (640, 480), interpolation=cv2.INTER_NEAREST)\n', (3613, 3668), False, 'import cv2\n'), ((3718, 3742), 'helperfunctions.getValidPoints', 'getValidPoints', (['LabelMat'], {}), '(LabelMat)\n', (3732, 3742), False, 'from helperfunctions import generateEmptyStorage, getValidPoints\n'), ((5318, 5439), 'skimage.draw.ellipse', 'drawEllipse', (['pupil_loc[1]', 'pupil_loc[0]', 'model_pupil.model[3]', 'model_pupil.model[2]'], {'rotation': '(-model_pupil.model[-1])'}), '(pupil_loc[1], pupil_loc[0], model_pupil.model[3], model_pupil.\n model[2], rotation=-model_pupil.model[-1])\n', (5329, 5439), True, 'from skimage.draw import ellipse as drawEllipse\n'), ((5589, 5605), 'numpy.zeros_like', 'np.zeros_like', (['I'], {}), '(I)\n', (5602, 5605), True, 'import numpy as np\n'), ((5703, 5834), 'skimage.draw.ellipse', 'drawEllipse', (['model_iris.model[1]', 'model_iris.model[0]', 'model_iris.model[3]', 'model_iris.model[2]'], {'rotation': '(-model_iris.model[-1])'}), '(model_iris.model[1], model_iris.model[0], model_iris.model[3],\n model_iris.model[2], rotation=-model_iris.model[-1])\n', (5714, 5834), True, 'from skimage.draw import ellipse as drawEllipse\n'), ((5989, 6005), 'numpy.zeros_like', 'np.zeros_like', (['I'], {}), '(I)\n', (6002, 6005), True, 'import numpy as np\n'), ((3107, 3145), 'os.path.join', 'os.path.join', (['PATH_IMAGES', 'imName_full'], {}), '(PATH_IMAGES, imName_full)\n', (3119, 3145), False, 'import os\n'), ((3181, 3223), 'os.path.join', 'os.path.join', (['PATH_LABELS', "(imName + '.npy')"], {}), "(PATH_LABELS, imName + '.npy')\n", (3193, 3223), False, 'import os\n'), ((3285, 3303), 'numpy.where', 'np.where', (['LabelMat'], {}), '(LabelMat)\n', (3293, 3303), True, 'import numpy as np\n'), ((4143, 4173), 'numpy.array', 'np.array', (['[-1, -1, -1, -1, -1]'], {}), '([-1, -1, -1, -1, -1])\n', (4151, 4173), True, 'import numpy as np\n'), ((4607, 4637), 'numpy.array', 'np.array', (['[-1, -1, -1, -1, -1]'], {}), '([-1, -1, -1, -1, -1])\n', (4615, 4637), True, 'import numpy as np\n'), ((4671, 4701), 'numpy.array', 'np.array', (['[-1, -1, -1, -1, -1]'], {}), '([-1, -1, -1, -1, -1])\n', (4679, 4701), True, 'import numpy as np\n'), ((4951, 4981), 'numpy.array', 'np.array', (['[-1, -1, -1, -1, -1]'], {}), '([-1, -1, -1, -1, -1])\n', (4959, 4981), True, 'import numpy as np\n'), ((5186, 5216), 'numpy.array', 'np.array', (['[-1, -1, -1, -1, -1]'], {}), '([-1, -1, -1, -1, -1])\n', (5194, 5216), True, 'import numpy as np\n'), ((3758, 3779), 'numpy.sum', 'np.sum', (['(LabelMat == 3)'], {}), '(LabelMat == 3)\n', (3764, 3779), True, 'import numpy as np\n'), ((4231, 4252), 'numpy.sum', 'np.sum', (['(LabelMat == 2)'], {}), '(LabelMat == 2)\n', (4237, 4252), True, 'import numpy as np\n'), ((6099, 6114), 'numpy.any', 'np.any', (['pupMask'], {}), '(pupMask)\n', (6105, 6114), True, 'import numpy as np\n'), ((6119, 6134), 'numpy.any', 'np.any', (['iriMask'], {}), '(iriMask)\n', (6125, 6134), True, 'import numpy as np\n'), ((6428, 6444), 'numpy.ones', 'np.ones', (['I.shape'], {}), '(I.shape)\n', (6435, 6444), True, 'import numpy as np\n'), ((8160, 8170), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8168, 8170), True, 'from matplotlib import pyplot as plt\n'), ((8191, 8206), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.01)'], {}), '(0.01)\n', (8200, 8206), True, 'from matplotlib import pyplot as plt\n'), ((8308, 8340), 'numpy.rad2deg', 'np.rad2deg', (['model_pupil.model[4]'], {}), '(model_pupil.model[4])\n', (8318, 8340), True, 'import numpy as np\n'), ((8649, 8681), 'numpy.rad2deg', 'np.rad2deg', (['model_iris.model[-1]'], {}), '(model_iris.model[-1])\n', (8659, 8681), True, 'import numpy as np\n'), ((3332, 3341), 'numpy.max', 'np.max', (['r'], {}), '(r)\n', (3338, 3341), True, 'import numpy as np\n'), ((3344, 3353), 'numpy.min', 'np.min', (['r'], {}), '(r)\n', (3350, 3353), True, 'import numpy as np\n'), ((3848, 3892), 'helperfunctions.ransac', 'ransac', (['pupilPts', 'ElliFit', '(15)', '(40)', '(0.005)', '(15)'], {}), '(pupilPts, ElliFit, 15, 40, 0.005, 15)\n', (3854, 3892), False, 'from helperfunctions import ransac, ElliFit, my_ellipse\n'), ((3933, 3962), 'helperfunctions.my_ellipse', 'my_ellipse', (['model_pupil.model'], {}), '(model_pupil.model)\n', (3943, 3962), False, 'from helperfunctions import ransac, ElliFit, my_ellipse\n'), ((4319, 4362), 'helperfunctions.ransac', 'ransac', (['irisPts', 'ElliFit', '(15)', '(40)', '(0.005)', '(15)'], {}), '(irisPts, ElliFit, 15, 40, 0.005, 15)\n', (4325, 4362), False, 'from helperfunctions import ransac, ElliFit, my_ellipse\n'), ((4402, 4430), 'helperfunctions.my_ellipse', 'my_ellipse', (['model_iris.model'], {}), '(model_iris.model)\n', (4412, 4430), False, 'from helperfunctions import ransac, ElliFit, my_ellipse\n'), ((7730, 7761), 'numpy.rad2deg', 'np.rad2deg', (['model_iris.model[4]'], {}), '(model_iris.model[4])\n', (7740, 7761), True, 'import numpy as np\n'), ((7470, 7502), 'numpy.rad2deg', 'np.rad2deg', (['model_pupil.model[4]'], {}), '(model_pupil.model[4])\n', (7480, 7502), True, 'import numpy as np\n')] |
import numpy as np
import math
from matplotlib import cm
from matplotlib.colors import ListedColormap
from losses import binaryNegLikelihood
from tensorflow import keras
from data_read import get_toy_paths, get_TPS_and_TIS_paths
class Const():
def __init__(self, dataSetType):
self._dataSetType = dataSetType
if dataSetType == "DW" or dataSetType == "ZP":
self._name_to_list_position = {
"x_{1}": 0, "x_{2}": 1, "x_{3}": 2, "x_{4}": 3, "x_{5}": 4,
"x_{6}": 5, "x_{7}": 6, "x_{8}": 7, "x_{9}": 8, "x_{10}": 9}
self._used_variable_names = [
"x_{1}", "x_{2}", "x_{3}", "x_{4}", "x_{5}"]
# Name of the folder in which the toy data is found
self._toy_folder_name = dataSetType
self._path_getter_function = get_toy_paths
elif dataSetType == "MH":
self._name_to_list_position = {
"MCG": 0, "N_{w,4}": 1, "N_{w,3}": 2, "N_{w,2}": 3,
"N_{sw,3-4}": 4, "N_{sw,2-3}": 5, "F4": 6, "R_g": 7,
"5^{12}6^{2}": 8, "5^{12}": 9, "CR": 10, "N_{s,2}": 11,
"N_{s,3}": 12, "N_{c,2}": 13, "N_{c,3}": 14, "N_{s,4}": 15,
"N_{c,4}": 16, "5^{12}6^{3}": 17, "5^{12}6^{4}": 18,
"4^{1}5^{10}6^{2}": 19, "4^{1}5^{10}6^{3}": 20,
"4^{1}5^{10}6^{4}": 21}
self._used_variable_names = [
"MCG", "N_{w,4}", "N_{w,3}", "N_{w,2}", "N_{sw,3-4}",
"N_{sw,2-3}", "F4", "R_g", "5^{12}6^{2}", "5^{12}", "CR",
"N_{s,2}", "N_{s,3}", "N_{c,2}", "N_{c,3}", "N_{s,4}",
"N_{c,4}", "5^{12}6^{3}", "5^{12}6^{4}", "4^{1}5^{10}6^{2}",
"4^{1}5^{10}6^{3}", "4^{1}5^{10}6^{4}"]
# Name of the folder in which the TIS data is found
self._TIS_folder_name = "RPE_org"
self._TIS_highest_interface_name = "mcg100"
# Name of the folder in which the TPS paths are found
self._TPS_folder_name = "TPS"
# MCG threshold below which a snapshot belongs to state A
self._mcg_A = 18
# MCG threshold above which a snapshot belongs to state B
self._mcg_B = 120
self._path_getter_function = get_TPS_and_TIS_paths
# Fraction of paths used from the read files
self._used_dataset_fraction = 1
self._used_name_to_list_position = {
self._used_variable_names[i]: i
for i in range(len(self._used_variable_names))}
self._used_list_positions = [
self._name_to_list_position[name]
for name in self._used_variable_names]
# Labels assigned to the four types of paths
self._AA_label = 0.0
self._AB_label = 1.0
self._BA_label = 0.0
self._BB_label = 1.0
# Precision to which data is rounded
self._precision = 2
# List of labels to keep
self._keep_labels = ["AA", "AB", "BA", "BB"]
# Ratio of training set compared to the whole dataset
self._train_ratio = 0.6
# Ratio of validation set compared to whole dataset
self._val_ratio = 0.1
# Fraction of most extreme values that are considered
# outliers to both sides
self._outlier_cutoff = 0.02
# Number of bins to balance the pBs
self._balance_bins = 10
"""System parameters"""
# Number of cores used
self._cores_used = 2
"""Tf-Dataset parameters"""
# set size of batches
self._batch_size = 64
"""Model parameters"""
# Number of bottleneck nodes
self._bottleneck_size = 1
# Factor of hidden layer nodes relative to input nodes
self._node_mult = 4
# Number ob hidden layers in the encoder
self._encoder_hidden = 4
# Number ob hidden layers in the decoder_1
self._decoder_1_hidden = 4
# Number ob hidden layers in the decoder_2
self._decoder_2_hidden = 4
# Activation function in the encoder
self._encoder_act_func = "tanh"
# Activation function in the decoder_1
self._decoder_1_act_func = "sigmoid"
# Activation function in the decoder_2
self._decoder_2_act_func = "tanh"
# Ratio of weights for label and reconstruction loss
self._loss_weights = [1, 0.1]
# Names of input and output in the model.
self._input_name = "Input"
self._output_name_1 = "Committor"
self._output_name_2 = "Reconstruction"
# List off losses determined by the model.
self._loss_names = ["total", self._output_name_1, self._output_name_2]
# Loss functions used for the autoencoder_1
self._loss_function_1 = binaryNegLikelihood
# Loss functions used for the autoencoder_2
self._loss_function_2 = keras.losses.MeanAbsoluteError()
# Number of epochs used for model training
self._epochs = 10
"""Visualization parameters"""
# Resolution for the calc_* and plot_* functions
self._resolution = 25
# Sub-figure size for the plot_* functions
# self._subfig_size = 5
self._subfig_size = 2
# Lower bondary for a logarithmic colormap
self._logvmin = 10**(-4)
# Colormap used for the heat map plots
self._label_cmap = make_banded_label_colormap(self._logvmin)
# Colormap used for the desnity plots
self._density_cmap = make_density_colormap()
# List of colors for plt.plots
self._plt_colors = [
"c", "g", "r", "indigo", "y", "m",
"k", "lightpink", "orange", "olive", "b", "darkviolet"]
self._projection_steps = 20
self._unprojection_steps = 11
@property
def dataSetType(self):
return self._dataSetType
@property
def name_to_list_position(self):
return self._name_to_list_position
@property
def used_variable_names(self):
return self._used_variable_names
@property
def used_name_to_list_position(self):
return self._used_name_to_list_position
@property
def used_list_positions(self):
return self._used_list_positions
@property
def path_getter_function(self):
return self._path_getter_function
@property
def toy_folder_name(self):
return self._toy_folder_name
@property
def TIS_folder_name(self):
return self._TIS_folder_name
@property
def TIS_highest_interface_name(self):
return self._TIS_highest_interface_name
@property
def TPS_folder_name(self):
return self._TPS_folder_name
@property
def mcg_A(self):
return self._mcg_A
@property
def mcg_B(self):
return self._mcg_B
@property
def used_dataset_fraction(self):
return self._used_dataset_fraction
@property
def AA_label(self):
return self._AA_label
@property
def AB_label(self):
return self._AB_label
@property
def BA_label(self):
return self._BA_label
@property
def BB_label(self):
return self._BB_label
@property
def min_label(self):
return min(
self._AA_label, self._AB_label, self._BA_label, self._BB_label)
@property
def max_label(self):
return max(
self._AA_label, self._AB_label, self._BA_label, self._BB_label)
@property
def precision(self):
return self._precision
@property
def keep_labels(self):
return self._keep_labels
@property
def train_ratio(self):
assert isinstance(self._train_ratio, float) \
and self._train_ratio > 0.0, \
"train_ratio needs to be a float higher than 0.0"
return self._train_ratio
@property
def val_ratio(self):
assert isinstance(self._val_ratio, float) \
and self._val_ratio > 0.0, \
"val_ratio needs to be a float higher than 0.0"
return self._val_ratio
@property
def outlier_cutoff(self):
return self._outlier_cutoff
@property
def balance_bins(self):
return self._balance_bins
@property
def cores_used(self):
return self._cores_used
@property
def batch_size(self):
return self._batch_size
@property
def bottleneck_size(self):
return self._bottleneck_size
@property
def node_mult(self):
return self._node_mult
@property
def encoder_hidden(self):
return self._encoder_hidden
@property
def decoder_1_hidden(self):
return self._decoder_1_hidden
@property
def decoder_2_hidden(self):
return self._decoder_2_hidden
@property
def encoder_act_func(self):
return self._encoder_act_func
@property
def decoder_1_act_func(self):
return self._decoder_1_act_func
@property
def decoder_2_act_func(self):
return self._decoder_2_act_func
@property
def loss_weights(self):
return self._loss_weights
@property
def label_loss_weight(self):
return self._loss_weights[0]
@property
def reconstruction_loss_weight(self):
return self._loss_weights[1]
@property
def input_name(self):
return self._input_name
@property
def output_name_1(self):
return self._output_name_1
@property
def output_name_2(self):
return self._output_name_2
@property
def loss_names(self):
return self._loss_names
@property
def loss_type_cnt(self):
return len(self._loss_names)
@property
def loss_function_1(self):
return self._loss_function_1
@property
def loss_function_2(self):
return self._loss_function_2
@property
def epochs(self):
return self._epochs
@property
def resolution(self):
return self._resolution
@property
def subfig_size(self):
return self._subfig_size
@property
def logvmin(self):
return self._logvmin
@property
def label_cmap(self):
return self._label_cmap
@property
def density_cmap(self):
return self._density_cmap
@property
def plt_colors(self):
return self._plt_colors
@property
def projection_steps(self):
return self._projection_steps
@property
def unprojection_steps(self):
return self._unprojection_steps
@property
def data_stamp(self):
return f"kl{'_'.join(self._keep_labels)}_oc{self._outlier_cutoff}"
@property
def model_stamp(self):
return f"bn{self._bottleneck_size}_{self._node_mult}*"\
+ f"({self._encoder_hidden}{self._encoder_act_func}+"\
+ f"{self._decoder_1_hidden}{self._decoder_1_act_func}|"\
+ f"{self._decoder_2_hidden}{self._decoder_2_act_func})_"\
+ f"lw{self._loss_weights[0]}:{self._loss_weights[1]}_"\
+ f"e{self._epochs}"
# Define setter methods for all variables that can be changed.
@used_variable_names.setter
def used_variable_names(self, x):
assert isinstance(x, list), "Can only be set to type list"
self._used_variable_names = x
self._used_name_to_list_position = {
self._used_variable_names[i]: i
for i in range(len(self._used_variable_names))}
self._used_list_positions = [
self._name_to_list_position[name]
for name in self._used_variable_names]
@bottleneck_size.setter
def bottleneck_size(self, x):
assert isinstance(x, int), "Can only be set to type int"
self._bottleneck_size = x
@epochs.setter
def epochs(self, x):
assert isinstance(x, int), "Can only be set to type int"
self._epochs = x
def make_banded_label_colormap(logvmin):
resolution = 1001
bandwidth = 0.1
band_bottom_fraction = \
translate_value_to_colormap_fraction(0.5 - bandwidth / 2, logvmin)
band_bottom_index = round(band_bottom_fraction * resolution)
band_top_fraction = \
translate_value_to_colormap_fraction(0.5 + bandwidth / 2, logvmin)
band_top_index = round(band_top_fraction * resolution)
bottom_map = cm.get_cmap("summer", resolution)
cut_bottom_map = bottom_map(np.linspace(
0, 1 - band_bottom_fraction, resolution - band_bottom_index))
middle_map = cm.get_cmap("Greys", 10)
cut_middle_map = middle_map(np.linspace(
0.9, 1.0, band_bottom_index - band_top_index))
top_map = cm.get_cmap("summer", resolution)
cut_top_map = top_map(np.linspace(
1 - band_top_fraction, 1, band_top_index))
c_map = ListedColormap(np.vstack((
cut_bottom_map, cut_middle_map, cut_top_map)), "SplitSummer")
return c_map
def translate_value_to_colormap_fraction(value, logvmin):
return math.log(value, 10)/math.log(logvmin, 10)
def make_density_colormap():
resolution = 1001
cmap = cm.get_cmap("autumn", resolution)
return cmap
| [
"matplotlib.cm.get_cmap",
"tensorflow.keras.losses.MeanAbsoluteError",
"numpy.linspace",
"math.log",
"numpy.vstack"
] | [((12325, 12358), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""summer"""', 'resolution'], {}), "('summer', resolution)\n", (12336, 12358), False, 'from matplotlib import cm\n'), ((12495, 12519), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""Greys"""', '(10)'], {}), "('Greys', 10)\n", (12506, 12519), False, 'from matplotlib import cm\n'), ((12638, 12671), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""summer"""', 'resolution'], {}), "('summer', resolution)\n", (12649, 12671), False, 'from matplotlib import cm\n'), ((13069, 13102), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""autumn"""', 'resolution'], {}), "('autumn', resolution)\n", (13080, 13102), False, 'from matplotlib import cm\n'), ((4899, 4931), 'tensorflow.keras.losses.MeanAbsoluteError', 'keras.losses.MeanAbsoluteError', ([], {}), '()\n', (4929, 4931), False, 'from tensorflow import keras\n'), ((12391, 12463), 'numpy.linspace', 'np.linspace', (['(0)', '(1 - band_bottom_fraction)', '(resolution - band_bottom_index)'], {}), '(0, 1 - band_bottom_fraction, resolution - band_bottom_index)\n', (12402, 12463), True, 'import numpy as np\n'), ((12552, 12609), 'numpy.linspace', 'np.linspace', (['(0.9)', '(1.0)', '(band_bottom_index - band_top_index)'], {}), '(0.9, 1.0, band_bottom_index - band_top_index)\n', (12563, 12609), True, 'import numpy as np\n'), ((12698, 12751), 'numpy.linspace', 'np.linspace', (['(1 - band_top_fraction)', '(1)', 'band_top_index'], {}), '(1 - band_top_fraction, 1, band_top_index)\n', (12709, 12751), True, 'import numpy as np\n'), ((12793, 12849), 'numpy.vstack', 'np.vstack', (['(cut_bottom_map, cut_middle_map, cut_top_map)'], {}), '((cut_bottom_map, cut_middle_map, cut_top_map))\n', (12802, 12849), True, 'import numpy as np\n'), ((12963, 12982), 'math.log', 'math.log', (['value', '(10)'], {}), '(value, 10)\n', (12971, 12982), False, 'import math\n'), ((12983, 13004), 'math.log', 'math.log', (['logvmin', '(10)'], {}), '(logvmin, 10)\n', (12991, 13004), False, 'import math\n')] |
from distutils.core import setup, Extension
import os
import numpy
H2PACK_DIR = ".."
OPENBLAS_INSTALL_DIR = "/usr/local/opt/openblas"
#C_DIR = "/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk/usr/include"
extra_cflags = ["-I"+H2PACK_DIR+"/include"]
extra_cflags += ["-I"+OPENBLAS_INSTALL_DIR+"/include"]
extra_cflags += ["-g", "-std=gnu99", "-O3"]
extra_cflags += ["-DUSE_OPENBLAS", "-fopenmp", "-march=native"]
extra_cflags += ["-Wno-unused-result", "-Wno-unused-function"]
LIB = [H2PACK_DIR+"/lib/libH2Pack.a", OPENBLAS_INSTALL_DIR+"/lib/libopenblas.a"]
extra_lflags = LIB + ["-g", "-O3", "-fopenmp", "-lm", "-lgfortran"]
def main():
setup(name="pyh2pack",
version="1.0.0",
description="Python interface for H2Pack",
author="<NAME>, <NAME>, and <NAME>",
author_email="<EMAIL>",
ext_modules=[Extension(
name = "pyh2pack",
sources = ["pyh2pack.c"],
include_dirs=[H2PACK_DIR+"/include", numpy.get_include()],
extra_compile_args = extra_cflags,
extra_link_args= extra_lflags,
)
]
)
if __name__ == "__main__":
main()
| [
"numpy.get_include"
] | [((975, 994), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (992, 994), False, 'import numpy\n')] |
"""
"""
import numpy as np
from ..probabilistic_binning import fuzzy_digitize
__all__ = ('test1', )
def test1():
""" Enforce assigned centroid span the sensible range
"""
npts = int(1e5)
x = np.random.uniform(-1, 1, npts)
nbins = 100
centroids = np.linspace(-2, 2, nbins)
centroid_numbers = fuzzy_digitize(x, centroids)
assert centroid_numbers.shape == (npts, )
assert np.all(centroid_numbers >= 0)
assert np.all(centroid_numbers <= nbins-1)
def test2():
""" Enforce no assigned centroid has fewer than min_counts elements
"""
npts = 100
x = np.random.uniform(-1, 1, npts)
nbins = 100
centroids = np.linspace(-2, 2, nbins)
centroid_numbers = fuzzy_digitize(x, centroids, min_counts=2)
uvals, counts = np.unique(centroid_numbers, return_counts=True)
assert np.all(counts >= 2)
centroid_numbers = fuzzy_digitize(x, centroids, min_counts=0)
uvals, counts = np.unique(centroid_numbers, return_counts=True)
assert np.any(counts == 1)
def test3():
""" Enforce deterministic behavior when passing a seed
"""
npts = 100
x = np.random.uniform(-1, 1, npts)
nbins = 100
centroids = np.linspace(-2, 2, nbins)
centroid_numbers_1 = fuzzy_digitize(x, centroids, min_counts=2, seed=8)
centroid_numbers_2 = fuzzy_digitize(x, centroids, min_counts=2, seed=8)
centroid_numbers_3 = fuzzy_digitize(x, centroids, min_counts=2, seed=9)
assert np.all(centroid_numbers_1==centroid_numbers_2)
assert not np.all(centroid_numbers_1==centroid_numbers_3)
def test4():
""" Enforce centroid assignment preferentially assigns membership to bins that are nearby.
"""
npts = int(1e6)
x = np.random.uniform(-1, 1, npts)
nbins = 10
centroids = np.linspace(-1.01, 1.01, nbins)
centroid_numbers = fuzzy_digitize(x, centroids, min_counts=2, seed=43)
itest = 4
test_mask = (x >= centroids[itest]) & (x < centroids[itest+1])
assert set(centroid_numbers[test_mask]) == set((itest, itest+1))
assert np.allclose(np.mean(centroid_numbers[test_mask]), itest+0.5, rtol=0.1)
dx_bin = centroids[itest+1] - centroids[itest]
test_mask2 = test_mask & (x < centroids[itest] + dx_bin/10.)
assert np.mean(centroid_numbers[test_mask2]) < itest + 0.25
test_mask3 = test_mask & (x > centroids[itest+1] - dx_bin/10.)
assert np.mean(centroid_numbers[test_mask3]) > itest + 1 - 0.25
def test5():
""" Enforce centroid assignment is deterministic for points that are coincident with a centroid
"""
npts = int(1e6)
x = np.random.uniform(-1, 1, npts)
nbins = 10
centroids = np.linspace(-1.01, 1.01, nbins)
random_mask = np.random.randint(0, 2, npts).astype(bool)
x[random_mask] = np.random.choice(centroids[1:-1], np.count_nonzero(random_mask))
centroid_numbers = fuzzy_digitize(x, centroids, min_counts=2, seed=43)
for i, center in enumerate(centroids):
mask = x == center
assert np.all(centroid_numbers[mask] == i)
| [
"numpy.random.uniform",
"numpy.count_nonzero",
"numpy.all",
"numpy.any",
"numpy.mean",
"numpy.random.randint",
"numpy.linspace",
"numpy.unique"
] | [((211, 241), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', 'npts'], {}), '(-1, 1, npts)\n', (228, 241), True, 'import numpy as np\n'), ((274, 299), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', 'nbins'], {}), '(-2, 2, nbins)\n', (285, 299), True, 'import numpy as np\n'), ((410, 439), 'numpy.all', 'np.all', (['(centroid_numbers >= 0)'], {}), '(centroid_numbers >= 0)\n', (416, 439), True, 'import numpy as np\n'), ((451, 488), 'numpy.all', 'np.all', (['(centroid_numbers <= nbins - 1)'], {}), '(centroid_numbers <= nbins - 1)\n', (457, 488), True, 'import numpy as np\n'), ((605, 635), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', 'npts'], {}), '(-1, 1, npts)\n', (622, 635), True, 'import numpy as np\n'), ((668, 693), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', 'nbins'], {}), '(-2, 2, nbins)\n', (679, 693), True, 'import numpy as np\n'), ((781, 828), 'numpy.unique', 'np.unique', (['centroid_numbers'], {'return_counts': '(True)'}), '(centroid_numbers, return_counts=True)\n', (790, 828), True, 'import numpy as np\n'), ((840, 859), 'numpy.all', 'np.all', (['(counts >= 2)'], {}), '(counts >= 2)\n', (846, 859), True, 'import numpy as np\n'), ((947, 994), 'numpy.unique', 'np.unique', (['centroid_numbers'], {'return_counts': '(True)'}), '(centroid_numbers, return_counts=True)\n', (956, 994), True, 'import numpy as np\n'), ((1006, 1025), 'numpy.any', 'np.any', (['(counts == 1)'], {}), '(counts == 1)\n', (1012, 1025), True, 'import numpy as np\n'), ((1131, 1161), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', 'npts'], {}), '(-1, 1, npts)\n', (1148, 1161), True, 'import numpy as np\n'), ((1194, 1219), 'numpy.linspace', 'np.linspace', (['(-2)', '(2)', 'nbins'], {}), '(-2, 2, nbins)\n', (1205, 1219), True, 'import numpy as np\n'), ((1460, 1508), 'numpy.all', 'np.all', (['(centroid_numbers_1 == centroid_numbers_2)'], {}), '(centroid_numbers_1 == centroid_numbers_2)\n', (1466, 1508), True, 'import numpy as np\n'), ((1715, 1745), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', 'npts'], {}), '(-1, 1, npts)\n', (1732, 1745), True, 'import numpy as np\n'), ((1777, 1808), 'numpy.linspace', 'np.linspace', (['(-1.01)', '(1.01)', 'nbins'], {}), '(-1.01, 1.01, nbins)\n', (1788, 1808), True, 'import numpy as np\n'), ((2585, 2615), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', 'npts'], {}), '(-1, 1, npts)\n', (2602, 2615), True, 'import numpy as np\n'), ((2647, 2678), 'numpy.linspace', 'np.linspace', (['(-1.01)', '(1.01)', 'nbins'], {}), '(-1.01, 1.01, nbins)\n', (2658, 2678), True, 'import numpy as np\n'), ((1522, 1570), 'numpy.all', 'np.all', (['(centroid_numbers_1 == centroid_numbers_3)'], {}), '(centroid_numbers_1 == centroid_numbers_3)\n', (1528, 1570), True, 'import numpy as np\n'), ((2059, 2095), 'numpy.mean', 'np.mean', (['centroid_numbers[test_mask]'], {}), '(centroid_numbers[test_mask])\n', (2066, 2095), True, 'import numpy as np\n'), ((2246, 2283), 'numpy.mean', 'np.mean', (['centroid_numbers[test_mask2]'], {}), '(centroid_numbers[test_mask2])\n', (2253, 2283), True, 'import numpy as np\n'), ((2377, 2414), 'numpy.mean', 'np.mean', (['centroid_numbers[test_mask3]'], {}), '(centroid_numbers[test_mask3])\n', (2384, 2414), True, 'import numpy as np\n'), ((2795, 2824), 'numpy.count_nonzero', 'np.count_nonzero', (['random_mask'], {}), '(random_mask)\n', (2811, 2824), True, 'import numpy as np\n'), ((2988, 3023), 'numpy.all', 'np.all', (['(centroid_numbers[mask] == i)'], {}), '(centroid_numbers[mask] == i)\n', (2994, 3023), True, 'import numpy as np\n'), ((2697, 2726), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', 'npts'], {}), '(0, 2, npts)\n', (2714, 2726), True, 'import numpy as np\n')] |
"""
@author: LXA
Date: 2020 年 5 月 31 日
"""
import os
import sys
import tensorflow as tf
import numpy as np
import matplotlib
import platform
import shutil
import time
import DNN_base
import DNN_tools
import General_Laplace
import MS_LaplaceEqs
import MS_BoltzmannEqs
import Load_data2Mat
import DNN_data
import saveData
import plotData
import DNN_Print_Log
def solve_Multiscale_PDE(R):
log_out_path = R['FolderName'] # 将路径从字典 R 中提取出来
if not os.path.exists(log_out_path): # 判断路径是否已经存在
os.mkdir(log_out_path) # 无 log_out_path 路径,创建一个 log_out_path 路径
outfile_name1 = '%s%s.txt' % ('log2', 'train')
log_fileout_NN = open(os.path.join(log_out_path, outfile_name1), 'w') # 在这个路径下创建并打开一个可写的 log_train.txt文件
DNN_Print_Log.dictionary_out2file(R, log_fileout_NN, actName2normal=R['act_name2Normal'],
actName2scale=R['act_name2Scale'])
# 一般 laplace 问题需要的设置
batchsize_it = R['batch_size2interior']
batchsize_bd = R['batch_size2boundary']
bd_penalty_init = R['init_boundary_penalty'] # Regularization parameter for boundary conditions
init_UdotU_penalty = R['init_penalty2orthogonal']
penalty2WB = R['penalty2weight_biases'] # Regularization parameter for weights and biases
lr_decay = R['learning_rate_decay']
learning_rate = R['learning_rate']
hidden2normal = R['hidden2normal']
hidden2scale = R['hidden2scale']
input_dim = R['input_dim']
out_dim = R['output_dim']
alpha = R['contrib2scale']
act2Normal = R['act_name2Normal']
act2Scale = R['act_name2Scale']
if R['contrib_scale2orthogonal'] == 'with_contrib':
using_scale2orthogonal = R['contrib2scale']
else:
using_scale2orthogonal = 1.0
if R['opt2loss_bd'] != 'unified_boundary' and R['contrib_scale2orthogonal'] == 'with_contrib':
using_scale2boundary = R['contrib2scale']
else:
using_scale2boundary = 1.0
# 问题区域,每个方向设置为一样的长度。等网格划分,对于二维是方形区域
region_lb = 0.0
region_rt = 1.0
if R['PDE_type'] == 'general_Laplace':
# -laplace u = f
region_lb = 0.0
region_rt = 1.0
f, u_true, u_left, u_right, u_bottom, u_top = General_Laplace.get_infos2Laplace_5D(
input_dim=input_dim, out_dim=out_dim, intervalL=region_lb, intervalR=region_rt, equa_name=R['equa_name'])
elif R['PDE_type'] == 'pLaplace':
# 求解如下方程, A_eps(x) 震荡的比较厉害,具有多个尺度
# d **** d ****
# - ---- | A_eps(x)* ---- u_eps(x) | =f(x), x \in R^n
# dx **** dx ****
p_index = R['order2pLaplace_operator']
mesh_number = R['mesh_number']
u_true, f, A_eps, u00, u01, u10, u11, u20, u21, u30, u31, u40, u41 = MS_LaplaceEqs.get_infos2pLaplace_5D(
input_dim=input_dim, out_dim=out_dim, mesh_number=R['mesh_number'], intervalL=0.0, intervalR=1.0,
equa_name=R['equa_name'])
elif R['PDE_type'] == 'Possion_Boltzmann':
# 求解如下方程, A_eps(x) 震荡的比较厉害,具有多个尺度
# d **** d ****
# - ---- | A_eps(x)* ---- u_eps(x) | + K *u_eps =f(x), x \in R^n
# dx **** dx ****
p_index = R['order2pLaplace_operator']
u_true, f, A_eps, kappa, u00, u01, u10, u11, u20, u21, u30, u31, u40, u41 = MS_BoltzmannEqs.get_infos2Boltzmann_5D(
intervalL=region_lb, intervalR=region_rt, equa_name=R['equa_name'])
flag2Normal = 'WB2normal'
flag2Scale = 'WB2scale'
if R['model2Normal'] == 'Fourier_DNN':
Ws_Normal, Bs_Normal = DNN_base.Xavier_init_NN_Fourier(input_dim, out_dim, hidden2normal, flag2Normal)
else:
Ws_Normal, Bs_Normal = DNN_base.Xavier_init_NN(input_dim, out_dim, hidden2normal, flag2Normal)
if R['model2Scale'] == 'Fourier_DNN':
Ws_Scale, Bs_Scale = DNN_base.Xavier_init_NN_Fourier(input_dim, out_dim, hidden2scale, flag2Scale)
else:
Ws_Scale, Bs_Scale = DNN_base.Xavier_init_NN(input_dim, out_dim, hidden2scale, flag2Scale)
global_steps = tf.Variable(0, trainable=False)
with tf.device('/gpu:%s' % (R['gpuNo'])):
with tf.variable_scope('vscope', reuse=tf.AUTO_REUSE):
XYZST_it = tf.placeholder(tf.float32, name='XYZST_it', shape=[None, input_dim])
XYZST00 = tf.placeholder(tf.float32, name='XYZST00', shape=[None, input_dim])
XYZST01 = tf.placeholder(tf.float32, name='XYZST01', shape=[None, input_dim])
XYZST10 = tf.placeholder(tf.float32, name='XYZST10', shape=[None, input_dim])
XYZST11 = tf.placeholder(tf.float32, name='XYZST11', shape=[None, input_dim])
XYZST20 = tf.placeholder(tf.float32, name='XYZST20', shape=[None, input_dim])
XYZST21 = tf.placeholder(tf.float32, name='XYZST21', shape=[None, input_dim])
XYZST30 = tf.placeholder(tf.float32, name='XYZST30', shape=[None, input_dim])
XYZST31 = tf.placeholder(tf.float32, name='XYZST31', shape=[None, input_dim])
XYZST40 = tf.placeholder(tf.float32, name='XYZST40', shape=[None, input_dim])
XYZST41 = tf.placeholder(tf.float32, name='XYZST41', shape=[None, input_dim])
bd_penalty = tf.placeholder_with_default(input=1e2, shape=[], name='bd_p')
UdotU_penalty = tf.placeholder_with_default(input=1.0, shape=[], name='p_powU')
in_learning_rate = tf.placeholder_with_default(input=1e-5, shape=[], name='lr')
if R['model2Normal'] == 'DNN':
UNN_Normal = DNN_base.DNN(XYZST_it, Ws_Normal, Bs_Normal, hidden2normal, activateIn_name=act2Normal,
activate_name=act2Normal)
U00_NN_Normal = DNN_base.DNN(XYZST00, Ws_Normal, Bs_Normal, hidden2normal, activateIn_name=act2Normal,
activate_name=act2Normal)
U01_NN_Normal = DNN_base.DNN(XYZST01, Ws_Normal, Bs_Normal, hidden2normal, activateIn_name=act2Normal,
activate_name=act2Normal)
U10_NN_Normal = DNN_base.DNN(XYZST10, Ws_Normal, Bs_Normal, hidden2normal, activateIn_name=act2Normal,
activate_name=act2Normal)
U11_NN_Normal = DNN_base.DNN(XYZST11, Ws_Normal, Bs_Normal, hidden2normal, activateIn_name=act2Normal,
activate_name=act2Normal)
U20_NN_Normal = DNN_base.DNN(XYZST20, Ws_Normal, Bs_Normal, hidden2normal, activateIn_name=act2Normal,
activate_name=act2Normal)
U21_NN_Normal = DNN_base.DNN(XYZST21, Ws_Normal, Bs_Normal, hidden2normal, activateIn_name=act2Normal,
activate_name=act2Normal)
U30_NN_Normal = DNN_base.DNN(XYZST30, Ws_Normal, Bs_Normal, hidden2normal, activateIn_name=act2Normal,
activate_name=act2Normal)
U31_NN_Normal = DNN_base.DNN(XYZST31, Ws_Normal, Bs_Normal, hidden2normal, activateIn_name=act2Normal,
activate_name=act2Normal)
U40_NN_Normal = DNN_base.DNN(XYZST40, Ws_Normal, Bs_Normal, hidden2normal, activateIn_name=act2Normal,
activate_name=act2Normal)
U41_NN_Normal = DNN_base.DNN(XYZST41, Ws_Normal, Bs_Normal, hidden2normal, activateIn_name=act2Normal,
activate_name=act2Normal)
elif R['model2Normal'] == 'Fourier_DNN':
UNN_Normal = DNN_base.DNN_FourierBase(XYZST_it, Ws_Normal, Bs_Normal, hidden2normal, R['freq2Normal'],
activate_name=act2Normal, repeat_Highfreq=False,
sFourier=R['sFourier2Normal'])
U00_NN_Normal = DNN_base.DNN_FourierBase(XYZST00, Ws_Normal, Bs_Normal, hidden2normal, R['freq2Normal'],
activate_name=act2Normal, repeat_Highfreq=False,
sFourier=R['sFourier2Normal'])
U01_NN_Normal = DNN_base.DNN_FourierBase(XYZST01, Ws_Normal, Bs_Normal, hidden2normal, R['freq2Normal'],
activate_name=act2Normal, repeat_Highfreq=False,
sFourier=R['sFourier2Normal'])
U10_NN_Normal = DNN_base.DNN_FourierBase(XYZST10, Ws_Normal, Bs_Normal, hidden2normal, R['freq2Normal'],
activate_name=act2Normal, repeat_Highfreq=False,
sFourier=R['sFourier2Normal'])
U11_NN_Normal = DNN_base.DNN_FourierBase(XYZST11, Ws_Normal, Bs_Normal, hidden2normal, R['freq2Normal'],
activate_name=act2Normal, repeat_Highfreq=False,
sFourier=R['sFourier2Normal'])
U20_NN_Normal = DNN_base.DNN_FourierBase(XYZST20, Ws_Normal, Bs_Normal, hidden2normal, R['freq2Normal'],
activate_name=act2Normal, repeat_Highfreq=False,
sFourier=R['sFourier2Normal'])
U21_NN_Normal = DNN_base.DNN_FourierBase(XYZST21, Ws_Normal, Bs_Normal, hidden2normal, R['freq2Normal'],
activate_name=act2Normal, repeat_Highfreq=False,
sFourier=R['sFourier2Normal'])
U30_NN_Normal = DNN_base.DNN_FourierBase(XYZST30, Ws_Normal, Bs_Normal, hidden2normal, R['freq2Normal'],
activate_name=act2Normal, repeat_Highfreq=False,
sFourier=R['sFourier2Normal'])
U31_NN_Normal = DNN_base.DNN_FourierBase(XYZST31, Ws_Normal, Bs_Normal, hidden2normal, R['freq2Normal'],
activate_name=act2Normal, repeat_Highfreq=False,
sFourier=R['sFourier2Normal'])
U40_NN_Normal = DNN_base.DNN_FourierBase(XYZST40, Ws_Normal, Bs_Normal, hidden2normal, R['freq2Normal'],
activate_name=act2Normal, repeat_Highfreq=False,
sFourier=R['sFourier2Normal'])
U41_NN_Normal = DNN_base.DNN_FourierBase(XYZST41, Ws_Normal, Bs_Normal, hidden2normal, R['freq2Normal'],
activate_name=act2Normal, repeat_Highfreq=False,
sFourier=R['sFourier2Normal'])
if R['model2Scale'] == 'DNN_scale':
UNN_Scale = DNN_base.DNN_scale(XYZST_it, Ws_Scale, Bs_Scale, hidden2scale, R['freq2Scale'],
activateIn_name=act2Scale, activate_name=act2Scale)
U00_NN_Scale = DNN_base.DNN_scale(XYZST00, Ws_Scale, Bs_Scale, hidden2scale, R['freq2Scale'],
activateIn_name=act2Scale, activate_name=act2Scale)
U01_NN_Scale = DNN_base.DNN_scale(XYZST01, Ws_Scale, Bs_Scale, hidden2scale, R['freq2Scale'],
activateIn_name=act2Scale, activate_name=act2Scale)
U10_NN_Scale = DNN_base.DNN_scale(XYZST10, Ws_Scale, Bs_Scale, hidden2scale, R['freq2Scale'],
activateIn_name=act2Scale, activate_name=act2Scale)
U11_NN_Scale = DNN_base.DNN_scale(XYZST11, Ws_Scale, Bs_Scale, hidden2scale, R['freq2Scale'],
activateIn_name=act2Scale, activate_name=act2Scale)
U20_NN_Scale = DNN_base.DNN_scale(XYZST20, Ws_Scale, Bs_Scale, hidden2scale, R['freq2Scale'],
activateIn_name=act2Scale, activate_name=act2Scale)
U21_NN_Scale = DNN_base.DNN_scale(XYZST21, Ws_Scale, Bs_Scale, hidden2scale, R['freq2Scale'],
activateIn_name=act2Scale, activate_name=act2Scale)
U30_NN_Scale = DNN_base.DNN_scale(XYZST30, Ws_Scale, Bs_Scale, hidden2scale, R['freq2Scale'],
activateIn_name=act2Scale, activate_name=act2Scale)
U31_NN_Scale = DNN_base.DNN_scale(XYZST31, Ws_Scale, Bs_Scale, hidden2scale, R['freq2Scale'],
activateIn_name=act2Scale, activate_name=act2Scale)
U40_NN_Scale = DNN_base.DNN_scale(XYZST40, Ws_Scale, Bs_Scale, hidden2scale, R['freq2Scale'],
activateIn_name=act2Scale, activate_name=act2Scale)
U41_NN_Scale = DNN_base.DNN_scale(XYZST41, Ws_Scale, Bs_Scale, hidden2scale, R['freq2Scale'],
activateIn_name=act2Scale, activate_name=act2Scale)
elif R['model2Scale'] == 'Adapt_scale_DNN':
UNN_Scale = DNN_base.DNN_adapt_scale(XYZST_it, Ws_Scale, Bs_Scale, hidden2scale, R['freq2Scale'],
activateIn_name=act2Scale, activate_name=act2Scale)
U00_NN_Scale = DNN_base.DNN_adapt_scale(XYZST00, Ws_Scale, Bs_Scale, hidden2scale, R['freq2Scale'],
activateIn_name=act2Scale, activate_name=act2Scale)
U01_NN_Scale = DNN_base.DNN_adapt_scale(XYZST01, Ws_Scale, Bs_Scale, hidden2scale, R['freq2Scale'],
activateIn_name=act2Scale, activate_name=act2Scale)
U10_NN_Scale = DNN_base.DNN_adapt_scale(XYZST10, Ws_Scale, Bs_Scale, hidden2scale, R['freq2Scale'],
activateIn_name=act2Scale, activate_name=act2Scale)
U11_NN_Scale = DNN_base.DNN_adapt_scale(XYZST11, Ws_Scale, Bs_Scale, hidden2scale, R['freq2Scale'],
activateIn_name=act2Scale, activate_name=act2Scale)
U20_NN_Scale = DNN_base.DNN_adapt_scale(XYZST20, Ws_Scale, Bs_Scale, hidden2scale, R['freq2Scale'],
activateIn_name=act2Scale, activate_name=act2Scale)
U21_NN_Scale = DNN_base.DNN_adapt_scale(XYZST21, Ws_Scale, Bs_Scale, hidden2scale, R['freq2Scale'],
activateIn_name=act2Scale, activate_name=act2Scale)
U30_NN_Scale = DNN_base.DNN_adapt_scale(XYZST30, Ws_Scale, Bs_Scale, hidden2scale, R['freq2Scale'],
activateIn_name=act2Scale, activate_name=act2Scale)
U31_NN_Scale = DNN_base.DNN_adapt_scale(XYZST31, Ws_Scale, Bs_Scale, hidden2scale, R['freq2Scale'],
activateIn_name=act2Scale, activate_name=act2Scale)
U40_NN_Scale = DNN_base.DNN_adapt_scale(XYZST40, Ws_Scale, Bs_Scale, hidden2scale, R['freq2Scale'],
activateIn_name=act2Scale, activate_name=act2Scale)
U41_NN_Scale = DNN_base.DNN_adapt_scale(XYZST41, Ws_Scale, Bs_Scale, hidden2scale, R['freq2Scale'],
activateIn_name=act2Scale, activate_name=act2Scale)
elif R['model2Scale'] == 'Fourier_DNN':
UNN_Scale = DNN_base.DNN_FourierBase(XYZST_it, Ws_Scale, Bs_Scale, hidden2scale, R['freq2Scale'],
activate_name=act2Scale, sFourier=R['sFourier2Scale'])
U00_NN_Scale = DNN_base.DNN_FourierBase(XYZST00, Ws_Scale, Bs_Scale, hidden2scale, R['freq2Scale'],
activate_name=act2Scale, sFourier=R['sFourier2Scale'])
U01_NN_Scale = DNN_base.DNN_FourierBase(XYZST01, Ws_Scale, Bs_Scale, hidden2scale, R['freq2Scale'],
activate_name=act2Scale, sFourier=R['sFourier2Scale'])
U10_NN_Scale = DNN_base.DNN_FourierBase(XYZST10, Ws_Scale, Bs_Scale, hidden2scale, R['freq2Scale'],
activate_name=act2Scale, sFourier=R['sFourier2Scale'])
U11_NN_Scale = DNN_base.DNN_FourierBase(XYZST11, Ws_Scale, Bs_Scale, hidden2scale, R['freq2Scale'],
activate_name=act2Scale, sFourier=R['sFourier2Scale'])
U20_NN_Scale = DNN_base.DNN_FourierBase(XYZST20, Ws_Scale, Bs_Scale, hidden2scale, R['freq2Scale'],
activate_name=act2Scale, sFourier=R['sFourier2Scale'])
U21_NN_Scale = DNN_base.DNN_FourierBase(XYZST21, Ws_Scale, Bs_Scale, hidden2scale, R['freq2Scale'],
activate_name=act2Scale, sFourier=R['sFourier2Scale'])
U30_NN_Scale = DNN_base.DNN_FourierBase(XYZST30, Ws_Scale, Bs_Scale, hidden2scale, R['freq2Scale'],
activate_name=act2Scale, sFourier=R['sFourier2Scale'])
U31_NN_Scale = DNN_base.DNN_FourierBase(XYZST31, Ws_Scale, Bs_Scale, hidden2scale, R['freq2Scale'],
activate_name=act2Scale, sFourier=R['sFourier2Scale'])
U40_NN_Scale = DNN_base.DNN_FourierBase(XYZST40, Ws_Scale, Bs_Scale, hidden2scale, R['freq2Scale'],
activate_name=act2Scale, sFourier=R['sFourier2Scale'])
U41_NN_Scale = DNN_base.DNN_FourierBase(XYZST41, Ws_Scale, Bs_Scale, hidden2scale, R['freq2Scale'],
activate_name=act2Scale, sFourier=R['sFourier2Scale'])
X_it = tf.reshape(XYZST_it[:, 0], shape=[-1, 1])
Y_it = tf.reshape(XYZST_it[:, 1], shape=[-1, 1])
Z_it = tf.reshape(XYZST_it[:, 2], shape=[-1, 1])
S_it = tf.reshape(XYZST_it[:, 3], shape=[-1, 1])
T_it = tf.reshape(XYZST_it[:, 4], shape=[-1, 1])
UNN = UNN_Normal + alpha * UNN_Scale
dUNN_Normal = tf.gradients(UNN_Normal, XYZST_it)[0] # * 行 5 列
dUNN_Scale = tf.gradients(UNN_Scale, XYZST_it)[0] # * 行 5 列
if R['loss_type'] == 'variational_loss':
dUNN = tf.add(dUNN_Normal, alpha * dUNN_Scale)
norm2dUNN = tf.reshape(tf.reduce_sum(tf.square(dUNN), axis=-1), shape=[-1, 1]) # 按行求和
if R['PDE_type'] == 'general_Laplace':
loss_it_variational2NN = (1.0 / 2) *norm2dUNN - tf.multiply(f(X_it, Y_it, Z_it, S_it, T_it), UNN)
elif R['PDE_type'] == 'pLaplace':
a_eps = A_eps(X_it, Y_it, Z_it, S_it, T_it) # * 行 1 列
AdUNN_pNorm = tf.multiply(a_eps, norm2dUNN)
if R['equa_name'] == 'multi_scale5D_4' or R['equa_name'] == 'multi_scale5D_5' or \
R['equa_name'] == 'multi_scale5D_6' or R['equa_name'] == 'multi_scale5D_7' or \
R['equa_name'] == 'multi_scale5D_8' or R['equa_name'] == 'multi_scale5D_9':
fxyzst = MS_LaplaceEqs.get_forceSide2pLaplace5D(x=X_it, y=Y_it, z=Z_it, s=S_it, t=T_it,
equa_name=R['equa_name'])
loss_it_variational2NN = (1.0 / 2) * AdUNN_pNorm - \
tf.multiply(tf.reshape(fxyzst, shape=[-1, 1]), UNN)
else:
loss_it_variational2NN = (1.0 / 2) * AdUNN_pNorm - tf.multiply(f(X_it, Y_it, Z_it, S_it, T_it), UNN)
elif R['PDE_type'] == 'Possion_Boltzmann':
a_eps = A_eps(X_it, Y_it, Z_it, S_it, T_it) # * 行 1 列
Kappa = kappa(X_it, Y_it, Z_it, S_it, T_it) # * 行 1 列
AdUNN_pNorm = tf.multiply(a_eps, norm2dUNN)
if R['equa_name'] == 'multi_scale5D_4' or R['equa_name'] == 'multi_scale5D_5' or \
R['equa_name'] == 'multi_scale5D_6' or R['equa_name'] == 'multi_scale5D_7':
fxyzst = MS_BoltzmannEqs.get_forceSide2Boltzmann_5D(x=X_it, y=Y_it, z=Z_it, s=S_it, t=T_it,
equa_name=R['equa_name'])
loss_it_variational2NN = (1.0 / 2) * (AdUNN_pNorm + Kappa * UNN * UNN) - \
tf.multiply(fxyzst, UNN)
else:
loss_it_variational2NN = (1.0 / 2) * (AdUNN_pNorm + Kappa * UNN * UNN) - \
tf.multiply(f(X_it, Y_it, Z_it, S_it, T_it), UNN)
elif R['variational_loss'] == 2:
# 0.5*|grad Uc|^p + 0.5*|grad Uf|^p - f(x)*(Uc+Uf)
# 0.5*a(x)*|grad Uc|^p + 0.5*a(x)*|grad Uf|^p - f(x)*(Uc+Uf)
norm2dUNN_Normal = tf.reshape(tf.sqrt(tf.reduce_sum(tf.square(dUNN_Normal), axis=-1)), shape=[-1, 1])
norm2dUNN_Scale = tf.reshape(tf.sqrt(tf.reduce_sum(tf.square(dUNN_Scale), axis=-1)), shape=[-1, 1])
if R['PDE_type'] == 'Possion_Boltzmann':
a_eps = A_eps(X_it, Y_it, Z_it, S_it, T_it)
Kappa = kappa(X_it, Y_it, Z_it, S_it, T_it)
ApNorm2dUNN = a_eps * tf.pow(norm2dUNN_Normal, p_index) + \
a_eps * tf.pow(alpha*norm2dUNN_Scale, p_index)
if R['equa_name'] == 'multi_scale5D_4' or R['equa_name'] == 'multi_scale5D_5' or \
R['equa_name'] == 'multi_scale5D_6' or R['equa_name'] == 'multi_scale5D_7':
fxyzst = MS_BoltzmannEqs.get_forceSide2Boltzmann_5D(x=X_it, y=Y_it, z=Z_it, s=S_it, t=T_it,
equa_name=R['equa_name'])
loss_it_variational2NN = (1.0 / p_index) * (ApNorm2dUNN + Kappa * UNN * UNN) - \
tf.multiply(fxyzst, UNN)
else:
loss_it_variational2NN = (1.0 / p_index) * (ApNorm2dUNN + Kappa * UNN * UNN) - \
tf.multiply(f(X_it, Y_it, Z_it, S_it), UNN)
else:
a_eps = A_eps(X_it, Y_it, Z_it, S_it, T_it) # * 行 1 列
ApNorm2dUNN = a_eps * tf.pow(norm2dUNN_Normal, p_index) + \
a_eps * tf.pow(alpha * norm2dUNN_Scale, p_index)
if R['equa_name'] == 'multi_scale5D_4' or R['equa_name'] == 'multi_scale5D_7' or \
R['equa_name'] == 'multi_scale5D_8' or R['equa_name'] == 'multi_scale5D_9':
fxyzst = MS_LaplaceEqs.get_forceSide2pLaplace5D(x=X_it, y=Y_it, z=Z_it, s=S_it, t=T_it)
loss_it_variational2NN = (1.0 / p_index) * ApNorm2dUNN - \
tf.multiply(tf.reshape(fxyzst, shape=[-1, 1]), UNN)
else:
loss_it_variational2NN = (1.0 / p_index) * ApNorm2dUNN - \
tf.multiply(f(X_it, Y_it, Z_it, S_it, T_it), UNN)
Loss_it2NN = tf.reduce_mean(loss_it_variational2NN)
if R['opt2loss_udotu'] == 'with_orthogonal':
if R['opt2orthogonal'] == 0: # L2 正交
point_UdU = tf.multiply(UNN_Normal, using_scale2orthogonal * UNN_Scale)
UNN_dot_UNN = tf.square(tf.reduce_mean(point_UdU))
elif R['opt2orthogonal'] == 1: # 逐点平方正交 |Uc*Uf|^2-->0 Uc 和 Uf 是两个列向量 形状为(*, 1)
norm2UdU = tf.square(tf.multiply(UNN_Normal, using_scale2orthogonal*UNN_Scale))
UNN_dot_UNN = tf.reduce_mean(norm2UdU)
elif R['opt2orthogonal'] == 2:
# |a(x)*(grad Uc)*(grad Uf)|^2-->0 a(x) 是 (*,1)的;(grad Uc)*(grad Uf)是向量相乘(*,2)·(*,2)
dU_dot_dU = tf.multiply(dUNN_Normal, using_scale2orthogonal*dUNN_Scale)
sum2dUdU = tf.reshape(tf.reduce_sum(dU_dot_dU, axis=-1), shape=[-1, 1])
norm2AdUdU = tf.square(tf.multiply(a_eps, sum2dUdU))
# norm2AdUdU = tf.square(sum2dUdU)
UNN_dot_UNN = tf.reduce_mean(norm2AdUdU)
else: # |Uc*Uf|^2-->0 + |a(x)*(grad Uc)*(grad Uf)|^2-->0
U_dot_U = tf.reduce_sum(tf.square(tf.multiply(UNN_Normal, using_scale2orthogonal*UNN_Scale)), axis=-1)
dU_dot_dU = tf.multiply(dUNN_Normal, using_scale2orthogonal*dUNN_Scale)
sum2dUdU = tf.reshape(tf.reduce_sum(dU_dot_dU, axis=-1), shape=[-1, 1])
norm2AdUdU = tf.square(tf.multiply(a_eps, sum2dUdU))
UNN_dot_UNN = tf.reduce_mean(norm2AdUdU) + tf.reduce_mean(U_dot_U)
else:
UNN_dot_UNN = tf.constant(0.0)
Loss2UNN_dot_UNN = UdotU_penalty * UNN_dot_UNN
if R['opt2loss_bd'] == 'unified_boundary':
U00_NN = U00_NN_Normal + using_scale2boundary * U00_NN_Scale
U01_NN = U01_NN_Normal + using_scale2boundary * U01_NN_Scale
U10_NN = U10_NN_Normal + using_scale2boundary * U10_NN_Scale
U11_NN = U11_NN_Normal + using_scale2boundary * U11_NN_Scale
U20_NN = U20_NN_Normal + using_scale2boundary * U20_NN_Scale
U21_NN = U21_NN_Normal + using_scale2boundary * U21_NN_Scale
U30_NN = U30_NN_Normal + using_scale2boundary * U30_NN_Scale
U31_NN = U31_NN_Normal + using_scale2boundary * U31_NN_Scale
U40_NN = U30_NN_Normal + using_scale2boundary * U30_NN_Scale
U41_NN = U31_NN_Normal + using_scale2boundary * U31_NN_Scale
loss_bd_square = tf.square(U00_NN) + tf.square(U01_NN) + tf.square(U10_NN) + \
tf.square(U11_NN) + tf.square(U20_NN) + tf.square(U21_NN) + \
tf.square(U30_NN) + tf.square(U31_NN) + tf.square(U40_NN) + \
tf.square(U41_NN)
Loss_bd2NNs = tf.reduce_mean(loss_bd_square)
else:
loss_bd_square2Normal = tf.square(U00_NN_Normal) + tf.square(U01_NN_Normal) + tf.square(U10_NN_Normal) + \
tf.square(U11_NN_Normal) + tf.square(U20_NN_Normal) + tf.square(U21_NN_Normal) + \
tf.square(U30_NN_Normal) + tf.square(U31_NN_Normal) + tf.square(U40_NN_Normal) + \
tf.square(U41_NN_Normal)
loss_bd_square2Scale = tf.square(using_scale2boundary*U00_NN_Scale) + tf.square(using_scale2boundary*U01_NN_Scale) + \
tf.square(using_scale2boundary*U10_NN_Scale) + tf.square(using_scale2boundary*U11_NN_Scale) + \
tf.square(using_scale2boundary*U20_NN_Scale) + tf.square(using_scale2boundary*U21_NN_Scale) + \
tf.square(using_scale2boundary*U30_NN_Scale) + tf.square(using_scale2boundary*U31_NN_Scale) + \
tf.square(using_scale2boundary*U40_NN_Scale) + tf.square(using_scale2boundary*U41_NN_Scale)
Loss_bd2Normal = tf.reduce_mean(loss_bd_square2Normal)
Loss_bd2Scale = tf.reduce_mean(loss_bd_square2Scale)
Loss_bd2NNs = Loss_bd2Normal + Loss_bd2Scale
if R['regular_wb_model'] == 'L1':
regularSum2WB_Normal = DNN_base.regular_weights_biases_L1(Ws_Normal, Bs_Normal) # 正则化权重和偏置 L1正则化
regularSum2WB_Scale = DNN_base.regular_weights_biases_L1(Ws_Scale, Bs_Scale) # 正则化权重和偏置 L1正则化
elif R['regular_wb_model'] == 'L2':
regularSum2WB_Normal = DNN_base.regular_weights_biases_L2(Ws_Normal, Bs_Normal) # 正则化权重和偏置 L2正则化
regularSum2WB_Scale = DNN_base.regular_weights_biases_L2(Ws_Scale, Bs_Scale) # 正则化权重和偏置 L2正则化
else:
regularSum2WB_Normal = tf.constant(0.0) # 无正则化权重参数
regularSum2WB_Scale = tf.constant(0.0)
PWB = penalty2WB * (regularSum2WB_Normal + regularSum2WB_Scale)
Loss2NN = Loss_it2NN + bd_penalty*Loss_bd2NNs + Loss2UNN_dot_UNN + PWB
my_optimizer = tf.train.AdamOptimizer(in_learning_rate)
if R['loss_type'] == 'variational_loss':
if R['train_model'] == 'training_group2':
train_op1 = my_optimizer.minimize(Loss2NN, global_step=global_steps)
train_op2 = my_optimizer.minimize(Loss_bd2NNs, global_step=global_steps)
train_Loss2NN = tf.group(train_op1, train_op2)
elif R['train_model'] == 'training_group3':
train_op1 = my_optimizer.minimize(Loss_it2NN, global_step=global_steps)
train_op2 = my_optimizer.minimize(Loss_bd2NNs, global_step=global_steps)
train_op3 = my_optimizer.minimize(Loss2NN, global_step=global_steps)
train_Loss2NN = tf.group(train_op1, train_op2, train_op3)
elif R['train_model'] == 'training_union':
train_Loss2NN = my_optimizer.minimize(Loss2NN, global_step=global_steps)
elif R['loss_type'] == 0 or R['loss_type'] == 'variational_loss2':
if R['train_model'] == 'training_group2':
train_op3 = my_optimizer.minimize(Loss2UNN_dot_UNN, global_step=global_steps)
train_op4 = my_optimizer.minimize(Loss2NN, global_step=global_steps)
train_Loss2NN = tf.group(train_op3, train_op4)
elif R['train_model'] == 'training_group3':
train_op1 = my_optimizer.minimize(Loss_it2NN, global_step=global_steps)
train_op2 = my_optimizer.minimize(Loss_bd2NNs, global_step=global_steps)
train_op3 = my_optimizer.minimize(Loss2NN, global_step=global_steps)
train_Loss2NN = tf.group(train_op1, train_op2, train_op3)
elif R['train_model'] == 'training_group4':
train_op1 = my_optimizer.minimize(Loss_it2NN, global_step=global_steps)
train_op2 = my_optimizer.minimize(Loss_bd2NNs, global_step=global_steps)
train_op3 = my_optimizer.minimize(Loss2UNN_dot_UNN, global_step=global_steps)
train_op4 = my_optimizer.minimize(Loss2NN, global_step=global_steps)
train_Loss2NN = tf.group(train_op1, train_op2, train_op3, train_op4)
elif R['train_model'] == 'training_group4_1':
train_op1 = my_optimizer.minimize(Loss_it2NN, global_step=global_steps)
train_op2 = my_optimizer.minimize(Loss_bd2NNs, global_step=global_steps)
train_op3 = my_optimizer.minimize(Loss2UNN_dot_UNN, global_step=global_steps)
train_op4 = my_optimizer.minimize(Loss2NN, global_step=global_steps)
train_Loss2NN = tf.group(train_op1, train_op2, train_op3, train_op4)
elif R['train_model'] == 'training_union':
train_Loss2NN = my_optimizer.minimize(Loss2NN, global_step=global_steps)
if R['PDE_type'] == 'general_Laplace' or R['PDE_type'] == 'pLaplace' or R['PDE_type'] == 'Possion_Boltzmann':
# 训练上的真解值和训练结果的误差
U_true = u_true(X_it, Y_it, Z_it, S_it, T_it)
train_mse2NN = tf.reduce_mean(tf.square(U_true - UNN))
train_rel2NN = train_mse2NN / tf.reduce_mean(tf.square(U_true))
else:
train_mse2NN = tf.constant(0.0)
train_rel2NN = tf.constant(0.0)
t0 = time.time()
# 空列表, 使用 append() 添加元素
lossIt_all2NN, lossBD_all2NN, loss_all2NN, train_mse_all2NN, train_rel_all2NN = [], [], [], [], []
UDU_NN = []
test_mse_all2NN, test_rel_all2NN = [], []
test_epoch = []
# 画网格解图
if R['testData_model'] == 'random_generate':
# 画网格热力解图 ---- 生成测试数据,用于测试训练后的网络
# test_bach_size = 400
# size2test = 20
# test_bach_size = 900
# size2test = 30
test_bach_size = 1600
size2test = 40
# test_bach_size = 4900
# size2test = 70
# test_bach_size = 10000
# size2test = 100
test_xyzst_bach = DNN_data.rand_it(test_bach_size, input_dim, region_lb, region_rt)
saveData.save_testData_or_solus2mat(test_xyzst_bach, dataName='testXYZST', outPath=R['FolderName'])
elif R['testData_model'] == 'loadData':
test_bach_size = 1600
size2test = 40
mat_data_path = 'dataMat_highDim'
test_xyzst_bach = Load_data2Mat.get_randomData2mat(dim=input_dim, data_path=mat_data_path)
saveData.save_testData_or_solus2mat(test_xyzst_bach, dataName='testXYZST', outPath=R['FolderName'])
# ConfigProto 加上allow_soft_placement=True就可以使用 gpu 了
config = tf.ConfigProto(allow_soft_placement=True) # 创建sess的时候对sess进行参数配置
config.gpu_options.allow_growth = True # True是让TensorFlow在运行过程中动态申请显存,避免过多的显存占用。
config.allow_soft_placement = True # 当指定的设备不存在时,允许选择一个存在的设备运行。比如gpu不存在,自动降到cpu上运行
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
tmp_lr = learning_rate
for i_epoch in range(R['max_epoch'] + 1):
xyzst_it_batch = DNN_data.rand_it(batchsize_it, input_dim, region_a=region_lb, region_b=region_rt)
xyzst00_batch, xyzst01_batch, xyzst10_batch, xyzst11_batch, xyzst20_batch, xyzst21_batch, xyzst30_batch,\
xyzst31_batch, xyzst40_batch, xyzst41_batch = DNN_data.rand_bd_5D(batchsize_bd, input_dim, region_a=region_lb, region_b=region_rt)
tmp_lr = tmp_lr * (1 - lr_decay)
if R['activate_penalty2bd_increase'] == 1:
if i_epoch < int(R['max_epoch'] / 10):
temp_penalty_bd = bd_penalty_init
elif i_epoch < int(R['max_epoch'] / 5):
temp_penalty_bd = 10 * bd_penalty_init
elif i_epoch < int(R['max_epoch'] / 4):
temp_penalty_bd = 50 * bd_penalty_init
elif i_epoch < int(R['max_epoch'] / 2):
temp_penalty_bd = 100 * bd_penalty_init
elif i_epoch < int(3 * R['max_epoch'] / 4):
temp_penalty_bd = 200 * bd_penalty_init
else:
temp_penalty_bd = 500 * bd_penalty_init
else:
temp_penalty_bd = bd_penalty_init
if R['activate_powSolus_increase'] == 1:
if i_epoch < int(R['max_epoch'] / 10):
temp_penalty_powU = init_UdotU_penalty
elif i_epoch < int(R['max_epoch'] / 5):
temp_penalty_powU = 10 * init_UdotU_penalty
elif i_epoch < int(R['max_epoch'] / 4):
temp_penalty_powU = 50 * init_UdotU_penalty
elif i_epoch < int(R['max_epoch'] / 2):
temp_penalty_powU = 100 * init_UdotU_penalty
elif i_epoch < int(3 * R['max_epoch'] / 4):
temp_penalty_powU = 200 * init_UdotU_penalty
else:
temp_penalty_powU = 500 * init_UdotU_penalty
else:
temp_penalty_powU = init_UdotU_penalty
_, loss_it_nn, loss_bd_nn, loss_nn, udu_nn, train_mse_nn, train_rel_nn, pwb = sess.run(
[train_Loss2NN, Loss_it2NN, Loss_bd2NNs, Loss2NN, UNN_dot_UNN, train_mse2NN, train_rel2NN, PWB],
feed_dict={XYZST_it: xyzst_it_batch, XYZST00: xyzst00_batch, XYZST01: xyzst01_batch,
XYZST10: xyzst10_batch, XYZST11: xyzst11_batch, XYZST20: xyzst20_batch,
XYZST21: xyzst21_batch, XYZST30: xyzst30_batch, XYZST31: xyzst31_batch,
XYZST40: xyzst40_batch, XYZST41: xyzst41_batch,
bd_penalty: temp_penalty_bd, UdotU_penalty: temp_penalty_powU})
lossIt_all2NN.append(loss_it_nn)
lossBD_all2NN.append(loss_bd_nn)
loss_all2NN.append(loss_nn)
UDU_NN.append(udu_nn)
train_mse_all2NN.append(train_mse_nn)
train_rel_all2NN.append(train_rel_nn)
if i_epoch % 1000 == 0:
run_times = time.time() - t0
DNN_tools.print_and_log_train_one_epoch(
i_epoch, run_times, tmp_lr, temp_penalty_bd, temp_penalty_powU, pwb, loss_it_nn, loss_bd_nn,
loss_nn, udu_nn, train_mse_nn, train_rel_nn, log_out=log_fileout_NN)
# --------------------------- test network ----------------------------------------------
test_epoch.append(i_epoch / 1000)
u_true2test, utest_nn, utest_normal, utest_freqs = sess.run(
[U_true, UNN, UNN_Normal, alpha*UNN_Scale], feed_dict={XYZST_it: test_xyzst_bach})
point_ERR2NN = np.square(u_true2test - utest_nn)
test_mse2nn = np.mean(point_ERR2NN)
test_mse_all2NN.append(test_mse2nn)
test_rel2nn = test_mse2nn / np.mean(np.square(u_true2test))
test_rel_all2NN.append(test_rel2nn)
DNN_tools.print_and_log_test_one_epoch(test_mse2nn, test_rel2nn, log_out=log_fileout_NN)
# ------------------- save the testing results into mat file and plot them -------------------------
saveData.save_trainLoss2mat_1actFunc(lossIt_all2NN, lossBD_all2NN, loss_all2NN, actName=act2Normal,
outPath=R['FolderName'])
saveData.save_train_MSE_REL2mat(train_mse_all2NN, train_rel_all2NN, actName=act2Normal, outPath=R['FolderName'])
plotData.plotTrain_loss_1act_func(lossIt_all2NN, lossType='loss_it', seedNo=R['seed'], outPath=R['FolderName'])
plotData.plotTrain_loss_1act_func(lossBD_all2NN, lossType='loss_bd', seedNo=R['seed'],
outPath=R['FolderName'], yaxis_scale=True)
plotData.plotTrain_loss_1act_func(loss_all2NN, lossType='loss', seedNo=R['seed'], outPath=R['FolderName'])
plotData.plotTrain_loss_1act_func(UDU_NN, lossType='udu', seedNo=R['seed'], outPath=R['FolderName'])
plotData.plotTrain_MSE_REL_1act_func(train_mse_all2NN, train_rel_all2NN, actName=act2Normal,
seedNo=R['seed'], outPath=R['FolderName'], yaxis_scale=True)
# ----------------- save test data to mat file and plot the testing results into figures -----------------------
if R['PDE_type'] == 'general_laplace' or R['PDE_type'] == 'p_laplace2multi_scale':
saveData.save_testData_or_solus2mat(u_true2test, dataName='Utrue', outPath=R['FolderName'])
saveData.save_testData_or_solus2mat(utest_nn, dataName='test', outPath=R['FolderName'])
saveData.save_testData_or_solus2mat(utest_normal, dataName='normal', outPath=R['FolderName'])
saveData.save_testData_or_solus2mat(utest_freqs, dataName='scale', outPath=R['FolderName'])
if R['hot_power'] == 1:
# ----------------------------------------------------------------------------------------------------------
# 绘制解的热力图(真解和DNN解)
# ----------------------------------------------------------------------------------------------------------
plotData.plot_Hot_solution2test(u_true2test, size_vec2mat=size2test, actName='Utrue',
seedNo=R['seed'], outPath=R['FolderName'])
plotData.plot_Hot_solution2test(utest_nn, size_vec2mat=size2test, actName=act2Normal,
seedNo=R['seed'], outPath=R['FolderName'])
saveData.save_testMSE_REL2mat(test_mse_all2NN, test_rel_all2NN, actName=act2Normal,
outPath=R['FolderName'])
plotData.plotTest_MSE_REL(test_mse_all2NN, test_rel_all2NN, test_epoch, actName=act2Normal,
seedNo=R['seed'],
outPath=R['FolderName'], yaxis_scale=True)
saveData.save_test_point_wise_err2mat(point_ERR2NN, actName=act2Normal, outPath=R['FolderName'])
plotData.plot_Hot_point_wise_err(point_ERR2NN, size_vec2mat=size2test, actName=act2Normal,
seedNo=R['seed'], outPath=R['FolderName'])
if __name__ == "__main__":
R={}
# -------------------------------------- CPU or GPU 选择 -----------------------------------------------
R['gpuNo'] = 0
if platform.system() == 'Windows':
os.environ["CDUA_VISIBLE_DEVICES"] = "%s" % (R['gpuNo'])
else:
print('-------------------------------------- linux -----------------------------------------------')
# Linux终端没有GUI, 需要添加如下代码,而且必须添加在 import matplotlib.pyplot 之前,否则无效。
matplotlib.use('Agg')
if tf.test.is_gpu_available():
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3" # 设置当前使用的GPU设备仅为第 0,1,2,3 块GPU, 设备名称为'/gpu:0'
else:
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
# 文件保存路径设置
# store_file = 'Laplace5D'
store_file = 'pLaplace5D'
# store_file = 'Boltzmann5D'
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
OUT_DIR = os.path.join(BASE_DIR, store_file)
if not os.path.exists(OUT_DIR):
print('---------------------- OUT_DIR ---------------------:', OUT_DIR)
os.mkdir(OUT_DIR)
R['seed'] = np.random.randint(1e5)
seed_str = str(R['seed']) # int 型转为字符串型
FolderName = os.path.join(OUT_DIR, seed_str) # 路径连接
R['FolderName'] = FolderName
if not os.path.exists(FolderName):
print('--------------------- FolderName -----------------:', FolderName)
os.mkdir(FolderName)
# ---------------------------------------- 复制并保存当前文件 -----------------------------------------
if platform.system() == 'Windows':
tf.compat.v1.reset_default_graph()
shutil.copy(__file__, '%s/%s' % (FolderName, os.path.basename(__file__)))
else:
shutil.copy(__file__, '%s/%s' % (FolderName, os.path.basename(__file__)))
# if the value of step_stop_flag is not 0, it will activate stop condition of step to kill program
# step_stop_flag = input('please input an integer number to activate step-stop----0:no---!0:yes--:')
# R['activate_stop'] = int(step_stop_flag)
R['activate_stop'] = 0
# if the value of step_stop_flag is not 0, it will activate stop condition of step to kill program
# R['max_epoch'] = 200000
R['max_epoch'] = 100000
if 0 != R['activate_stop']:
epoch_stop = input('please input a stop epoch:')
R['max_epoch'] = int(epoch_stop)
# ---------------------------- Setup of multi-scale problem------------------------------
if store_file == 'Laplace5D':
R['PDE_type'] = 'general_Laplace'
R['equa_name'] = 'PDE1'
# R['equa_name'] = 'PDE2'
# R['equa_name'] = 'PDE3'
# R['equa_name'] = 'PDE4'
# R['equa_name'] = 'PDE5'
# R['equa_name'] = 'PDE6'.
# R['equa_name'] = 'PDE7'
elif store_file == 'pLaplace5D':
R['PDE_type'] = 'pLaplace'
# R['equa_name'] = 'multi_scale5D_1' # general laplace
# R['equa_name'] = 'multi_scale5D_2' # multi-scale laplace
# R['equa_name'] = 'multi_scale5D_3' # multi-scale laplace
R['equa_name'] = 'multi_scale5D_4' # multi-scale laplace
# R['equa_name'] = 'multi_scale5D_5' # multi-scale laplace
# R['equa_name'] = 'multi_scale5D_6' # multi-scale laplace
# R['equa_name'] = 'multi_scale5D_7' # multi-scale laplace
# R['equa_name'] = 'multi_scale5D_8' # multi-scale laplace
# R['equa_name'] = 'multi_scale5D_9' # multi-scale laplace
elif store_file == 'Boltzmann5D':
R['PDE_type'] = 'Possion_Boltzmann'
# R['equa_name'] = 'multi_scale5D_4'
# R['equa_name'] = 'multi_scale5D_5'
# R['equa_name'] = 'multi_scale5D_6'
R['equa_name'] = 'multi_scale5D_7'
if R['PDE_type'] == 'general_Laplace':
R['mesh_number'] = 1
R['epsilon'] = 0.1
R['order2pLaplace_operator'] = 2
R['batch_size2interior'] = 12500 # 内部训练数据的批大小
# R['batch_size2interior'] = 10000 # 内部训练数据的批大小
# R['batch_size2boundary'] = 1500
R['batch_size2boundary'] = 2000
elif R['PDE_type'] == 'pLaplace' or R['PDE_type'] == 'Possion_Boltzmann':
R['mesh_number'] = 1
R['epsilon'] = 0.1
R['order2pLaplace_operator'] = 2
R['batch_size2interior'] = 12500 # 内部训练数据的批大小
# R['batch_size2interior'] = 10000 # 内部训练数据的批大小
# R['batch_size2boundary'] = 1500
R['batch_size2boundary'] = 2000
R['input_dim'] = 5 # 输入维数,即问题的维数(几元问题)
R['output_dim'] = 1 # 输出维数
# ---------------------------- Setup of DNN -------------------------------
R['testData_model'] = 'loadData'
R['loss_type'] = 'variational_loss' # PDE变分
# R['loss_type'] = 'variational_loss2' # PDE变分
# R['loss_type'] = 'L2_loss' # L2 loss
R['opt2orthogonal'] = 0 # 0: L2-orthogonal(LO) 1: pointwise square orthogonal(PSO) 2:energy
# R['opt2orthogonal'] = 1 # 0: L2-orthogonal(LO) 1: pointwise square orthogonal(PSO) 2:energy
# R['opt2orthogonal'] = 2 # 0: L2-orthogonal(LO) 1: pointwise square orthogonal(PSO) 2:energy
R['regular_wb_model'] = 'L0'
# R['regular_wb_model'] = 'L1'
# R['regular_wb_model'] = 'L2'
R['penalty2weight_biases'] = 0.000 # Regularization parameter for weights
# R['penalty2weight_biases'] = 0.001 # Regularization parameter for weights
# R['penalty2weight_biases'] = 0.0025 # Regularization parameter for weights
R['activate_penalty2bd_increase'] = 1
R['init_boundary_penalty'] = 100 # Regularization parameter for boundary conditions
R['activate_powSolus_increase'] = 0
if R['activate_powSolus_increase'] == 1:
R['init_penalty2orthogonal'] = 5.0
elif R['activate_powSolus_increase'] == 2:
R['init_penalty2orthogonal'] = 10000.0
else:
R['init_penalty2orthogonal'] = 20.0
# R['init_penalty2orthogonal'] = 0.0
R['learning_rate'] = 2e-4 # 学习率
R['learning_rate_decay'] = 5e-5 # 学习率 decay
R['optimizer_name'] = 'Adam' # 优化器
R['train_model'] = 'training_union' # 训练模式, 一个 loss 联结训练
# R['train_model'] = 'training_group1' # 训练模式, 多个 loss 组团训练
# R['train_model'] = 'training_group2'
# R['train_model'] = 'training_group3'
# R['train_model'] = 'training_group4'
# R['model2Normal'] = 'DNN' # 使用的网络模型
# R['model2Normal'] = 'DNN_scale'
# R['model2Normal'] = 'Adapt_scale_DNN'
R['model2Normal'] = 'Fourier_DNN'
# R['model2Scale'] = 'DNN' # 使用的网络模型
# R['model2Scale'] = 'DNN_scale'
# R['model2Scale'] = 'Adapt_scale_DNN'
R['model2Scale'] = 'Fourier_DNN'
# 单纯的 MscaleDNN 网络 FourierBase(250,400,400,300,300,200) 250+500*400+400*400+400*300+300*300+300*200+200=630450
# 单纯的 MscaleDNN 网络 GeneralBase(500,400,400,300,300,200) 500+500*400+400*400+400*300+300*300+300*200+200=630700
# FourierBase normal 和 FourierBase scale 网络的总参数数目:200730 + 422950 = 623680
# GeneralBase normal 和 FourierBase scale 网络的总参数数目:200810 + 423200 = 624101
if R['model2Normal'] == 'Fourier_DNN':
R['hidden2normal'] = (80, 300, 200, 200, 150, 150) # 80+160*300+300*200+200*200+200*150+150*150+150=200730
else:
R['hidden2normal'] = (160, 300, 200, 200, 150, 150) # 160+160*300+300*200+200*200+200*150+150*150+150=200810
# R['hidden2normal'] = (250, 300, 250, 200, 200, 100) # 260350
# R['hidden2normal'] = (200, 100, 100, 80, 80, 50)
# R['hidden2normal'] = (300, 200, 200, 100, 100, 50)
# R['hidden2normal'] = (500, 400, 300, 200, 100)
# R['hidden2normal'] = (500, 400, 300, 300, 200, 100)
if R['model2Scale'] == 'Fourier_DNN':
# R['hidden2scale'] = (250, 400, 200, 150, 150, 100)
# R['hidden2scale'] = (250, 400, 300, 200, 200, 100)
# R['hidden2scale'] = (250, 400, 350, 200, 200, 150)
R['hidden2scale'] = (250, 360, 250, 250, 200, 200) # 250+500*360+360*250+250*250+250*200+200*200+200 =422950
# R['hidden2scale'] = (350, 300, 300, 250, 250, 150)
# R['hidden2scale'] = (500, 400, 300, 200, 100)
else:
# R['hidden2scale'] = (12, 10, 8, 8, 6)
# R['hidden2scale'] = (100, 80, 60, 60, 40, 40, 20)
# R['hidden2scale'] = (200, 100, 100, 80, 80, 50)
# R['hidden2scale'] = (400, 300, 300, 250, 250, 150)
# R['hidden2scale'] = (500, 400, 200, 150, 150, 100)
R['hidden2scale'] = (500, 360, 250, 250, 200, 200) # 250+500*360+360*250+250*250+250*200+200*200+200 =423200
# R['hidden2scale'] = (500, 400, 300, 300, 200, 100)
# R['hidden2scale'] = (500, 400, 300, 200, 200, 100)
# 用于在本地笔记本测试网络
# R['batch_size2interior'] = 500
# R['batch_size2boundary'] = 10
# R['hidden2normal'] = (12, 10, 8, 8, 6)
# R['hidden2scale'] = (12, 10, 8, 8, 6)
# --------------------------------------
# R['freq2Normal'] = np.arange(10, 100)
# R['freq2Normal'] = np.concatenate(([1, 1, 1, 1, 1], np.arange(1, 26)), axis=0)
# R['freq2Normal'] = np.concatenate(([1, 2, 3, 4, 5, 1, 2, 3, 4, 5], np.arange(1, 21)), axis=0)
# R['freq2Normal'] = np.concatenate(([1, 2, 3, 4, 5, 1, 2, 3, 4, 5], np.arange(1, 26)), axis=0)
# R['freq2Normal'] = np.concatenate(([1, 2, 3, 4, 5, 1, 2, 3, 4, 5], np.arange(1, 31)), axis=0)
# R['freq2Normal'] = np.concatenate(([1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5], np.arange(5, 31)), axis=0)
R['freq2Normal'] = np.arange(1, 41) * 0.5
if R['model2Scale'] == 'Fourier_DNN':
# R['freq2Scale'] = np.arange(11, 101)
R['freq2Scale'] = np.arange(21, 121)
# R['freq2Scale'] = np.arange(16, 101)
# R['freq2Scale'] = np.arange(21, 101)
# R['freq2Scale'] = np.arange(6, 105)
else:
# R['freq2Scale'] = np.arange(11, 101)
R['freq2Scale'] = np.arange(21, 121)
# R['freq2Scale'] = np.arange(6, 105)
# R['freq2Scale'] = np.arange(1, 101)
# 激活函数的选择
# R['act_name2Normal'] = 'relu'
R['act_name2Normal'] = 'tanh'
# R['act_name2Normal'] = 'srelu'
# R['act_name2Normal'] = 'sin'
# R['act_name2Scale'] = 'relu'
# R['act_name2Scale']' = leaky_relu'
# R['act_name2Scale'] = 'srelu'
R['act_name2Scale'] = 's2relu'
# R['act_name2Scale'] = 'tanh'
# R['act_name2Scale'] = 'elu'
# R['act_name2Scale'] = 'phi'
if R['model2Normal'] == 'Fourier_DNN' and R['act_name2Normal'] == 'tanh':
R['sFourier2Normal'] = 1.0
elif R['model2Normal'] == 'Fourier_DNN' and R['act_name2Normal'] == 's2relu':
R['sFourier2Normal'] = 0.5
elif R['model2Normal'] == 'Fourier_DNN' and R['act_name2Normal'] == 'sin':
R['sFourier2Normal'] = 1.0
if R['model2Scale'] == 'Fourier_DNN' and R['act_name2Scale'] == 'tanh':
R['sFourier2Scale'] = 1.0
elif R['model2Scale'] == 'Fourier_DNN' and R['act_name2Scale'] == 's2relu':
R['sFourier2Scale'] = 0.5
if R['loss_type'] == 'L2_loss':
R['act_name2Scale'] = 'tanh'
if R['loss_type'] == 'variational_loss' or R['loss_type'] == 'L2_loss':
R['init_penalty2orthogonal'] = 20.0
# R['init_penalty2orthogonal'] = 25.0
# R['contrib2scale'] = 0.01
R['contrib2scale'] = 0.05
# R['contrib2scale'] = 0.1
# R['contrib2scale'] = 0.5
# R['contrib2scale'] = 1.0
elif R['loss_type'] == 'variational_loss2':
R['init_penalty2orthogonal'] = 20.0
# R['init_penalty2orthogonal'] = 25.0
# R['contrib2scale'] = 0.01
R['contrib2scale'] = 0.05
# R['contrib2scale'] = 0.1
# R['contrib2scale'] = 0.5
# R['contrib2scale'] = 1.0
R['hot_power'] = 1
R['opt2loss_udotu'] = 'with_orthogonal'
# R['opt2loss_udotu'] = 'without_orthogonal'
# R['opt2loss_bd'] = 'unified_boundary'
R['opt2loss_bd'] = 'individual_boundary'
R['contrib_scale2orthogonal'] = 'with_contrib'
# R['contrib_scale2orthogonal'] = 'without_contrib'
R['contrib_scale2boundary'] = 'with_contrib'
# R['contrib_scale2boundary'] = 'without_contrib'
solve_Multiscale_PDE(R)
| [
"os.mkdir",
"tensorflow.square",
"tensorflow.reduce_sum",
"DNN_tools.print_and_log_train_one_epoch",
"tensorflow.reshape",
"tensorflow.ConfigProto",
"tensorflow.multiply",
"tensorflow.Variable",
"numpy.random.randint",
"numpy.arange",
"numpy.mean",
"DNN_Print_Log.dictionary_out2file",
"os.pa... | [((767, 896), 'DNN_Print_Log.dictionary_out2file', 'DNN_Print_Log.dictionary_out2file', (['R', 'log_fileout_NN'], {'actName2normal': "R['act_name2Normal']", 'actName2scale': "R['act_name2Scale']"}), "(R, log_fileout_NN, actName2normal=R[\n 'act_name2Normal'], actName2scale=R['act_name2Scale'])\n", (800, 896), False, 'import DNN_Print_Log\n'), ((4206, 4237), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)'}), '(0, trainable=False)\n', (4217, 4237), True, 'import tensorflow as tf\n'), ((33402, 33413), 'time.time', 'time.time', ([], {}), '()\n', (33411, 33413), False, 'import time\n'), ((34658, 34699), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), '(allow_soft_placement=True)\n', (34672, 34699), True, 'import tensorflow as tf\n'), ((43305, 43330), 'sys.path.append', 'sys.path.append', (['BASE_DIR'], {}), '(BASE_DIR)\n', (43320, 43330), False, 'import sys\n'), ((43346, 43380), 'os.path.join', 'os.path.join', (['BASE_DIR', 'store_file'], {}), '(BASE_DIR, store_file)\n', (43358, 43380), False, 'import os\n'), ((43545, 43572), 'numpy.random.randint', 'np.random.randint', (['(100000.0)'], {}), '(100000.0)\n', (43562, 43572), True, 'import numpy as np\n'), ((43651, 43682), 'os.path.join', 'os.path.join', (['OUT_DIR', 'seed_str'], {}), '(OUT_DIR, seed_str)\n', (43663, 43682), False, 'import os\n'), ((479, 507), 'os.path.exists', 'os.path.exists', (['log_out_path'], {}), '(log_out_path)\n', (493, 507), False, 'import os\n'), ((532, 554), 'os.mkdir', 'os.mkdir', (['log_out_path'], {}), '(log_out_path)\n', (540, 554), False, 'import os\n'), ((678, 719), 'os.path.join', 'os.path.join', (['log_out_path', 'outfile_name1'], {}), '(log_out_path, outfile_name1)\n', (690, 719), False, 'import os\n'), ((2284, 2430), 'General_Laplace.get_infos2Laplace_5D', 'General_Laplace.get_infos2Laplace_5D', ([], {'input_dim': 'input_dim', 'out_dim': 'out_dim', 'intervalL': 'region_lb', 'intervalR': 'region_rt', 'equa_name': "R['equa_name']"}), "(input_dim=input_dim, out_dim=out_dim,\n intervalL=region_lb, intervalR=region_rt, equa_name=R['equa_name'])\n", (2320, 2430), False, 'import General_Laplace\n'), ((3727, 3806), 'DNN_base.Xavier_init_NN_Fourier', 'DNN_base.Xavier_init_NN_Fourier', (['input_dim', 'out_dim', 'hidden2normal', 'flag2Normal'], {}), '(input_dim, out_dim, hidden2normal, flag2Normal)\n', (3758, 3806), False, 'import DNN_base\n'), ((3850, 3921), 'DNN_base.Xavier_init_NN', 'DNN_base.Xavier_init_NN', (['input_dim', 'out_dim', 'hidden2normal', 'flag2Normal'], {}), '(input_dim, out_dim, hidden2normal, flag2Normal)\n', (3873, 3921), False, 'import DNN_base\n'), ((3995, 4072), 'DNN_base.Xavier_init_NN_Fourier', 'DNN_base.Xavier_init_NN_Fourier', (['input_dim', 'out_dim', 'hidden2scale', 'flag2Scale'], {}), '(input_dim, out_dim, hidden2scale, flag2Scale)\n', (4026, 4072), False, 'import DNN_base\n'), ((4114, 4183), 'DNN_base.Xavier_init_NN', 'DNN_base.Xavier_init_NN', (['input_dim', 'out_dim', 'hidden2scale', 'flag2Scale'], {}), '(input_dim, out_dim, hidden2scale, flag2Scale)\n', (4137, 4183), False, 'import DNN_base\n'), ((4248, 4281), 'tensorflow.device', 'tf.device', (["('/gpu:%s' % R['gpuNo'])"], {}), "('/gpu:%s' % R['gpuNo'])\n", (4257, 4281), True, 'import tensorflow as tf\n'), ((34057, 34122), 'DNN_data.rand_it', 'DNN_data.rand_it', (['test_bach_size', 'input_dim', 'region_lb', 'region_rt'], {}), '(test_bach_size, input_dim, region_lb, region_rt)\n', (34073, 34122), False, 'import DNN_data\n'), ((34132, 34235), 'saveData.save_testData_or_solus2mat', 'saveData.save_testData_or_solus2mat', (['test_xyzst_bach'], {'dataName': '"""testXYZST"""', 'outPath': "R['FolderName']"}), "(test_xyzst_bach, dataName='testXYZST',\n outPath=R['FolderName'])\n", (34167, 34235), False, 'import saveData\n'), ((34937, 34962), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (34947, 34962), True, 'import tensorflow as tf\n'), ((39336, 39464), 'saveData.save_trainLoss2mat_1actFunc', 'saveData.save_trainLoss2mat_1actFunc', (['lossIt_all2NN', 'lossBD_all2NN', 'loss_all2NN'], {'actName': 'act2Normal', 'outPath': "R['FolderName']"}), "(lossIt_all2NN, lossBD_all2NN,\n loss_all2NN, actName=act2Normal, outPath=R['FolderName'])\n", (39372, 39464), False, 'import saveData\n'), ((39516, 39633), 'saveData.save_train_MSE_REL2mat', 'saveData.save_train_MSE_REL2mat', (['train_mse_all2NN', 'train_rel_all2NN'], {'actName': 'act2Normal', 'outPath': "R['FolderName']"}), "(train_mse_all2NN, train_rel_all2NN, actName\n =act2Normal, outPath=R['FolderName'])\n", (39547, 39633), False, 'import saveData\n'), ((39640, 39756), 'plotData.plotTrain_loss_1act_func', 'plotData.plotTrain_loss_1act_func', (['lossIt_all2NN'], {'lossType': '"""loss_it"""', 'seedNo': "R['seed']", 'outPath': "R['FolderName']"}), "(lossIt_all2NN, lossType='loss_it', seedNo\n =R['seed'], outPath=R['FolderName'])\n", (39673, 39756), False, 'import plotData\n'), ((39761, 39895), 'plotData.plotTrain_loss_1act_func', 'plotData.plotTrain_loss_1act_func', (['lossBD_all2NN'], {'lossType': '"""loss_bd"""', 'seedNo': "R['seed']", 'outPath': "R['FolderName']", 'yaxis_scale': '(True)'}), "(lossBD_all2NN, lossType='loss_bd', seedNo\n =R['seed'], outPath=R['FolderName'], yaxis_scale=True)\n", (39794, 39895), False, 'import plotData\n'), ((39943, 40054), 'plotData.plotTrain_loss_1act_func', 'plotData.plotTrain_loss_1act_func', (['loss_all2NN'], {'lossType': '"""loss"""', 'seedNo': "R['seed']", 'outPath': "R['FolderName']"}), "(loss_all2NN, lossType='loss', seedNo=R[\n 'seed'], outPath=R['FolderName'])\n", (39976, 40054), False, 'import plotData\n'), ((40059, 40163), 'plotData.plotTrain_loss_1act_func', 'plotData.plotTrain_loss_1act_func', (['UDU_NN'], {'lossType': '"""udu"""', 'seedNo': "R['seed']", 'outPath': "R['FolderName']"}), "(UDU_NN, lossType='udu', seedNo=R['seed'],\n outPath=R['FolderName'])\n", (40092, 40163), False, 'import plotData\n'), ((40171, 40332), 'plotData.plotTrain_MSE_REL_1act_func', 'plotData.plotTrain_MSE_REL_1act_func', (['train_mse_all2NN', 'train_rel_all2NN'], {'actName': 'act2Normal', 'seedNo': "R['seed']", 'outPath': "R['FolderName']", 'yaxis_scale': '(True)'}), "(train_mse_all2NN, train_rel_all2NN,\n actName=act2Normal, seedNo=R['seed'], outPath=R['FolderName'],\n yaxis_scale=True)\n", (40207, 40332), False, 'import plotData\n'), ((40703, 40795), 'saveData.save_testData_or_solus2mat', 'saveData.save_testData_or_solus2mat', (['utest_nn'], {'dataName': '"""test"""', 'outPath': "R['FolderName']"}), "(utest_nn, dataName='test', outPath=R[\n 'FolderName'])\n", (40738, 40795), False, 'import saveData\n'), ((40800, 40897), 'saveData.save_testData_or_solus2mat', 'saveData.save_testData_or_solus2mat', (['utest_normal'], {'dataName': '"""normal"""', 'outPath': "R['FolderName']"}), "(utest_normal, dataName='normal',\n outPath=R['FolderName'])\n", (40835, 40897), False, 'import saveData\n'), ((40903, 40999), 'saveData.save_testData_or_solus2mat', 'saveData.save_testData_or_solus2mat', (['utest_freqs'], {'dataName': '"""scale"""', 'outPath': "R['FolderName']"}), "(utest_freqs, dataName='scale', outPath=\n R['FolderName'])\n", (40938, 40999), False, 'import saveData\n'), ((41728, 41841), 'saveData.save_testMSE_REL2mat', 'saveData.save_testMSE_REL2mat', (['test_mse_all2NN', 'test_rel_all2NN'], {'actName': 'act2Normal', 'outPath': "R['FolderName']"}), "(test_mse_all2NN, test_rel_all2NN, actName=\n act2Normal, outPath=R['FolderName'])\n", (41757, 41841), False, 'import saveData\n'), ((41885, 42045), 'plotData.plotTest_MSE_REL', 'plotData.plotTest_MSE_REL', (['test_mse_all2NN', 'test_rel_all2NN', 'test_epoch'], {'actName': 'act2Normal', 'seedNo': "R['seed']", 'outPath': "R['FolderName']", 'yaxis_scale': '(True)'}), "(test_mse_all2NN, test_rel_all2NN, test_epoch,\n actName=act2Normal, seedNo=R['seed'], outPath=R['FolderName'],\n yaxis_scale=True)\n", (41910, 42045), False, 'import plotData\n'), ((42119, 42219), 'saveData.save_test_point_wise_err2mat', 'saveData.save_test_point_wise_err2mat', (['point_ERR2NN'], {'actName': 'act2Normal', 'outPath': "R['FolderName']"}), "(point_ERR2NN, actName=act2Normal,\n outPath=R['FolderName'])\n", (42156, 42219), False, 'import saveData\n'), ((42227, 42364), 'plotData.plot_Hot_point_wise_err', 'plotData.plot_Hot_point_wise_err', (['point_ERR2NN'], {'size_vec2mat': 'size2test', 'actName': 'act2Normal', 'seedNo': "R['seed']", 'outPath': "R['FolderName']"}), "(point_ERR2NN, size_vec2mat=size2test,\n actName=act2Normal, seedNo=R['seed'], outPath=R['FolderName'])\n", (42259, 42364), False, 'import plotData\n'), ((42581, 42598), 'platform.system', 'platform.system', ([], {}), '()\n', (42596, 42598), False, 'import platform\n'), ((42886, 42907), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (42900, 42907), False, 'import matplotlib\n'), ((42922, 42948), 'tensorflow.test.is_gpu_available', 'tf.test.is_gpu_available', ([], {}), '()\n', (42946, 42948), True, 'import tensorflow as tf\n'), ((43273, 43298), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (43288, 43298), False, 'import os\n'), ((43393, 43416), 'os.path.exists', 'os.path.exists', (['OUT_DIR'], {}), '(OUT_DIR)\n', (43407, 43416), False, 'import os\n'), ((43508, 43525), 'os.mkdir', 'os.mkdir', (['OUT_DIR'], {}), '(OUT_DIR)\n', (43516, 43525), False, 'import os\n'), ((43737, 43763), 'os.path.exists', 'os.path.exists', (['FolderName'], {}), '(FolderName)\n', (43751, 43763), False, 'import os\n'), ((43856, 43876), 'os.mkdir', 'os.mkdir', (['FolderName'], {}), '(FolderName)\n', (43864, 43876), False, 'import os\n'), ((43988, 44005), 'platform.system', 'platform.system', ([], {}), '()\n', (44003, 44005), False, 'import platform\n'), ((44029, 44063), 'tensorflow.compat.v1.reset_default_graph', 'tf.compat.v1.reset_default_graph', ([], {}), '()\n', (44061, 44063), True, 'import tensorflow as tf\n'), ((52202, 52218), 'numpy.arange', 'np.arange', (['(1)', '(41)'], {}), '(1, 41)\n', (52211, 52218), True, 'import numpy as np\n'), ((52345, 52363), 'numpy.arange', 'np.arange', (['(21)', '(121)'], {}), '(21, 121)\n', (52354, 52363), True, 'import numpy as np\n'), ((52593, 52611), 'numpy.arange', 'np.arange', (['(21)', '(121)'], {}), '(21, 121)\n', (52602, 52611), True, 'import numpy as np\n'), ((2863, 3031), 'MS_LaplaceEqs.get_infos2pLaplace_5D', 'MS_LaplaceEqs.get_infos2pLaplace_5D', ([], {'input_dim': 'input_dim', 'out_dim': 'out_dim', 'mesh_number': "R['mesh_number']", 'intervalL': '(0.0)', 'intervalR': '(1.0)', 'equa_name': "R['equa_name']"}), "(input_dim=input_dim, out_dim=out_dim,\n mesh_number=R['mesh_number'], intervalL=0.0, intervalR=1.0, equa_name=R\n ['equa_name'])\n", (2898, 3031), False, 'import MS_LaplaceEqs\n'), ((4299, 4347), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""vscope"""'], {'reuse': 'tf.AUTO_REUSE'}), "('vscope', reuse=tf.AUTO_REUSE)\n", (4316, 4347), True, 'import tensorflow as tf\n'), ((4373, 4441), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""XYZST_it"""', 'shape': '[None, input_dim]'}), "(tf.float32, name='XYZST_it', shape=[None, input_dim])\n", (4387, 4441), True, 'import tensorflow as tf\n'), ((4465, 4532), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""XYZST00"""', 'shape': '[None, input_dim]'}), "(tf.float32, name='XYZST00', shape=[None, input_dim])\n", (4479, 4532), True, 'import tensorflow as tf\n'), ((4556, 4623), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""XYZST01"""', 'shape': '[None, input_dim]'}), "(tf.float32, name='XYZST01', shape=[None, input_dim])\n", (4570, 4623), True, 'import tensorflow as tf\n'), ((4647, 4714), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""XYZST10"""', 'shape': '[None, input_dim]'}), "(tf.float32, name='XYZST10', shape=[None, input_dim])\n", (4661, 4714), True, 'import tensorflow as tf\n'), ((4738, 4805), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""XYZST11"""', 'shape': '[None, input_dim]'}), "(tf.float32, name='XYZST11', shape=[None, input_dim])\n", (4752, 4805), True, 'import tensorflow as tf\n'), ((4829, 4896), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""XYZST20"""', 'shape': '[None, input_dim]'}), "(tf.float32, name='XYZST20', shape=[None, input_dim])\n", (4843, 4896), True, 'import tensorflow as tf\n'), ((4920, 4987), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""XYZST21"""', 'shape': '[None, input_dim]'}), "(tf.float32, name='XYZST21', shape=[None, input_dim])\n", (4934, 4987), True, 'import tensorflow as tf\n'), ((5011, 5078), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""XYZST30"""', 'shape': '[None, input_dim]'}), "(tf.float32, name='XYZST30', shape=[None, input_dim])\n", (5025, 5078), True, 'import tensorflow as tf\n'), ((5102, 5169), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""XYZST31"""', 'shape': '[None, input_dim]'}), "(tf.float32, name='XYZST31', shape=[None, input_dim])\n", (5116, 5169), True, 'import tensorflow as tf\n'), ((5193, 5260), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""XYZST40"""', 'shape': '[None, input_dim]'}), "(tf.float32, name='XYZST40', shape=[None, input_dim])\n", (5207, 5260), True, 'import tensorflow as tf\n'), ((5284, 5351), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'name': '"""XYZST41"""', 'shape': '[None, input_dim]'}), "(tf.float32, name='XYZST41', shape=[None, input_dim])\n", (5298, 5351), True, 'import tensorflow as tf\n'), ((5378, 5441), 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', ([], {'input': '(100.0)', 'shape': '[]', 'name': '"""bd_p"""'}), "(input=100.0, shape=[], name='bd_p')\n", (5405, 5441), True, 'import tensorflow as tf\n'), ((5469, 5532), 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', ([], {'input': '(1.0)', 'shape': '[]', 'name': '"""p_powU"""'}), "(input=1.0, shape=[], name='p_powU')\n", (5496, 5532), True, 'import tensorflow as tf\n'), ((5565, 5626), 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', ([], {'input': '(1e-05)', 'shape': '[]', 'name': '"""lr"""'}), "(input=1e-05, shape=[], name='lr')\n", (5592, 5626), True, 'import tensorflow as tf\n'), ((18848, 18889), 'tensorflow.reshape', 'tf.reshape', (['XYZST_it[:, 0]'], {'shape': '[-1, 1]'}), '(XYZST_it[:, 0], shape=[-1, 1])\n', (18858, 18889), True, 'import tensorflow as tf\n'), ((18910, 18951), 'tensorflow.reshape', 'tf.reshape', (['XYZST_it[:, 1]'], {'shape': '[-1, 1]'}), '(XYZST_it[:, 1], shape=[-1, 1])\n', (18920, 18951), True, 'import tensorflow as tf\n'), ((18972, 19013), 'tensorflow.reshape', 'tf.reshape', (['XYZST_it[:, 2]'], {'shape': '[-1, 1]'}), '(XYZST_it[:, 2], shape=[-1, 1])\n', (18982, 19013), True, 'import tensorflow as tf\n'), ((19034, 19075), 'tensorflow.reshape', 'tf.reshape', (['XYZST_it[:, 3]'], {'shape': '[-1, 1]'}), '(XYZST_it[:, 3], shape=[-1, 1])\n', (19044, 19075), True, 'import tensorflow as tf\n'), ((19096, 19137), 'tensorflow.reshape', 'tf.reshape', (['XYZST_it[:, 4]'], {'shape': '[-1, 1]'}), '(XYZST_it[:, 4], shape=[-1, 1])\n', (19106, 19137), True, 'import tensorflow as tf\n'), ((24565, 24603), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss_it_variational2NN'], {}), '(loss_it_variational2NN)\n', (24579, 24603), True, 'import tensorflow as tf\n'), ((29895, 29935), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['in_learning_rate'], {}), '(in_learning_rate)\n', (29917, 29935), True, 'import tensorflow as tf\n'), ((34402, 34474), 'Load_data2Mat.get_randomData2mat', 'Load_data2Mat.get_randomData2mat', ([], {'dim': 'input_dim', 'data_path': 'mat_data_path'}), '(dim=input_dim, data_path=mat_data_path)\n', (34434, 34474), False, 'import Load_data2Mat\n'), ((34484, 34587), 'saveData.save_testData_or_solus2mat', 'saveData.save_testData_or_solus2mat', (['test_xyzst_bach'], {'dataName': '"""testXYZST"""', 'outPath': "R['FolderName']"}), "(test_xyzst_bach, dataName='testXYZST',\n outPath=R['FolderName'])\n", (34519, 34587), False, 'import saveData\n'), ((34990, 35023), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (35021, 35023), True, 'import tensorflow as tf\n'), ((35138, 35224), 'DNN_data.rand_it', 'DNN_data.rand_it', (['batchsize_it', 'input_dim'], {'region_a': 'region_lb', 'region_b': 'region_rt'}), '(batchsize_it, input_dim, region_a=region_lb, region_b=\n region_rt)\n', (35154, 35224), False, 'import DNN_data\n'), ((35398, 35487), 'DNN_data.rand_bd_5D', 'DNN_data.rand_bd_5D', (['batchsize_bd', 'input_dim'], {'region_a': 'region_lb', 'region_b': 'region_rt'}), '(batchsize_bd, input_dim, region_a=region_lb, region_b=\n region_rt)\n', (35417, 35487), False, 'import DNN_data\n'), ((40600, 40696), 'saveData.save_testData_or_solus2mat', 'saveData.save_testData_or_solus2mat', (['u_true2test'], {'dataName': '"""Utrue"""', 'outPath': "R['FolderName']"}), "(u_true2test, dataName='Utrue', outPath=\n R['FolderName'])\n", (40635, 40696), False, 'import saveData\n'), ((41356, 41488), 'plotData.plot_Hot_solution2test', 'plotData.plot_Hot_solution2test', (['u_true2test'], {'size_vec2mat': 'size2test', 'actName': '"""Utrue"""', 'seedNo': "R['seed']", 'outPath': "R['FolderName']"}), "(u_true2test, size_vec2mat=size2test,\n actName='Utrue', seedNo=R['seed'], outPath=R['FolderName'])\n", (41387, 41488), False, 'import plotData\n'), ((41543, 41676), 'plotData.plot_Hot_solution2test', 'plotData.plot_Hot_solution2test', (['utest_nn'], {'size_vec2mat': 'size2test', 'actName': 'act2Normal', 'seedNo': "R['seed']", 'outPath': "R['FolderName']"}), "(utest_nn, size_vec2mat=size2test, actName=\n act2Normal, seedNo=R['seed'], outPath=R['FolderName'])\n", (41574, 41676), False, 'import plotData\n'), ((3467, 3578), 'MS_BoltzmannEqs.get_infos2Boltzmann_5D', 'MS_BoltzmannEqs.get_infos2Boltzmann_5D', ([], {'intervalL': 'region_lb', 'intervalR': 'region_rt', 'equa_name': "R['equa_name']"}), "(intervalL=region_lb, intervalR=\n region_rt, equa_name=R['equa_name'])\n", (3505, 3578), False, 'import MS_BoltzmannEqs\n'), ((5700, 5818), 'DNN_base.DNN', 'DNN_base.DNN', (['XYZST_it', 'Ws_Normal', 'Bs_Normal', 'hidden2normal'], {'activateIn_name': 'act2Normal', 'activate_name': 'act2Normal'}), '(XYZST_it, Ws_Normal, Bs_Normal, hidden2normal, activateIn_name\n =act2Normal, activate_name=act2Normal)\n', (5712, 5818), False, 'import DNN_base\n'), ((5890, 6007), 'DNN_base.DNN', 'DNN_base.DNN', (['XYZST00', 'Ws_Normal', 'Bs_Normal', 'hidden2normal'], {'activateIn_name': 'act2Normal', 'activate_name': 'act2Normal'}), '(XYZST00, Ws_Normal, Bs_Normal, hidden2normal, activateIn_name=\n act2Normal, activate_name=act2Normal)\n', (5902, 6007), False, 'import DNN_base\n'), ((6082, 6199), 'DNN_base.DNN', 'DNN_base.DNN', (['XYZST01', 'Ws_Normal', 'Bs_Normal', 'hidden2normal'], {'activateIn_name': 'act2Normal', 'activate_name': 'act2Normal'}), '(XYZST01, Ws_Normal, Bs_Normal, hidden2normal, activateIn_name=\n act2Normal, activate_name=act2Normal)\n', (6094, 6199), False, 'import DNN_base\n'), ((6274, 6391), 'DNN_base.DNN', 'DNN_base.DNN', (['XYZST10', 'Ws_Normal', 'Bs_Normal', 'hidden2normal'], {'activateIn_name': 'act2Normal', 'activate_name': 'act2Normal'}), '(XYZST10, Ws_Normal, Bs_Normal, hidden2normal, activateIn_name=\n act2Normal, activate_name=act2Normal)\n', (6286, 6391), False, 'import DNN_base\n'), ((6466, 6583), 'DNN_base.DNN', 'DNN_base.DNN', (['XYZST11', 'Ws_Normal', 'Bs_Normal', 'hidden2normal'], {'activateIn_name': 'act2Normal', 'activate_name': 'act2Normal'}), '(XYZST11, Ws_Normal, Bs_Normal, hidden2normal, activateIn_name=\n act2Normal, activate_name=act2Normal)\n', (6478, 6583), False, 'import DNN_base\n'), ((6658, 6775), 'DNN_base.DNN', 'DNN_base.DNN', (['XYZST20', 'Ws_Normal', 'Bs_Normal', 'hidden2normal'], {'activateIn_name': 'act2Normal', 'activate_name': 'act2Normal'}), '(XYZST20, Ws_Normal, Bs_Normal, hidden2normal, activateIn_name=\n act2Normal, activate_name=act2Normal)\n', (6670, 6775), False, 'import DNN_base\n'), ((6850, 6967), 'DNN_base.DNN', 'DNN_base.DNN', (['XYZST21', 'Ws_Normal', 'Bs_Normal', 'hidden2normal'], {'activateIn_name': 'act2Normal', 'activate_name': 'act2Normal'}), '(XYZST21, Ws_Normal, Bs_Normal, hidden2normal, activateIn_name=\n act2Normal, activate_name=act2Normal)\n', (6862, 6967), False, 'import DNN_base\n'), ((7042, 7159), 'DNN_base.DNN', 'DNN_base.DNN', (['XYZST30', 'Ws_Normal', 'Bs_Normal', 'hidden2normal'], {'activateIn_name': 'act2Normal', 'activate_name': 'act2Normal'}), '(XYZST30, Ws_Normal, Bs_Normal, hidden2normal, activateIn_name=\n act2Normal, activate_name=act2Normal)\n', (7054, 7159), False, 'import DNN_base\n'), ((7234, 7351), 'DNN_base.DNN', 'DNN_base.DNN', (['XYZST31', 'Ws_Normal', 'Bs_Normal', 'hidden2normal'], {'activateIn_name': 'act2Normal', 'activate_name': 'act2Normal'}), '(XYZST31, Ws_Normal, Bs_Normal, hidden2normal, activateIn_name=\n act2Normal, activate_name=act2Normal)\n', (7246, 7351), False, 'import DNN_base\n'), ((7426, 7543), 'DNN_base.DNN', 'DNN_base.DNN', (['XYZST40', 'Ws_Normal', 'Bs_Normal', 'hidden2normal'], {'activateIn_name': 'act2Normal', 'activate_name': 'act2Normal'}), '(XYZST40, Ws_Normal, Bs_Normal, hidden2normal, activateIn_name=\n act2Normal, activate_name=act2Normal)\n', (7438, 7543), False, 'import DNN_base\n'), ((7618, 7735), 'DNN_base.DNN', 'DNN_base.DNN', (['XYZST41', 'Ws_Normal', 'Bs_Normal', 'hidden2normal'], {'activateIn_name': 'act2Normal', 'activate_name': 'act2Normal'}), '(XYZST41, Ws_Normal, Bs_Normal, hidden2normal, activateIn_name=\n act2Normal, activate_name=act2Normal)\n', (7630, 7735), False, 'import DNN_base\n'), ((11401, 11537), 'DNN_base.DNN_scale', 'DNN_base.DNN_scale', (['XYZST_it', 'Ws_Scale', 'Bs_Scale', 'hidden2scale', "R['freq2Scale']"], {'activateIn_name': 'act2Scale', 'activate_name': 'act2Scale'}), "(XYZST_it, Ws_Scale, Bs_Scale, hidden2scale, R[\n 'freq2Scale'], activateIn_name=act2Scale, activate_name=act2Scale)\n", (11419, 11537), False, 'import DNN_base\n'), ((11613, 11748), 'DNN_base.DNN_scale', 'DNN_base.DNN_scale', (['XYZST00', 'Ws_Scale', 'Bs_Scale', 'hidden2scale', "R['freq2Scale']"], {'activateIn_name': 'act2Scale', 'activate_name': 'act2Scale'}), "(XYZST00, Ws_Scale, Bs_Scale, hidden2scale, R[\n 'freq2Scale'], activateIn_name=act2Scale, activate_name=act2Scale)\n", (11631, 11748), False, 'import DNN_base\n'), ((11827, 11962), 'DNN_base.DNN_scale', 'DNN_base.DNN_scale', (['XYZST01', 'Ws_Scale', 'Bs_Scale', 'hidden2scale', "R['freq2Scale']"], {'activateIn_name': 'act2Scale', 'activate_name': 'act2Scale'}), "(XYZST01, Ws_Scale, Bs_Scale, hidden2scale, R[\n 'freq2Scale'], activateIn_name=act2Scale, activate_name=act2Scale)\n", (11845, 11962), False, 'import DNN_base\n'), ((12041, 12176), 'DNN_base.DNN_scale', 'DNN_base.DNN_scale', (['XYZST10', 'Ws_Scale', 'Bs_Scale', 'hidden2scale', "R['freq2Scale']"], {'activateIn_name': 'act2Scale', 'activate_name': 'act2Scale'}), "(XYZST10, Ws_Scale, Bs_Scale, hidden2scale, R[\n 'freq2Scale'], activateIn_name=act2Scale, activate_name=act2Scale)\n", (12059, 12176), False, 'import DNN_base\n'), ((12255, 12390), 'DNN_base.DNN_scale', 'DNN_base.DNN_scale', (['XYZST11', 'Ws_Scale', 'Bs_Scale', 'hidden2scale', "R['freq2Scale']"], {'activateIn_name': 'act2Scale', 'activate_name': 'act2Scale'}), "(XYZST11, Ws_Scale, Bs_Scale, hidden2scale, R[\n 'freq2Scale'], activateIn_name=act2Scale, activate_name=act2Scale)\n", (12273, 12390), False, 'import DNN_base\n'), ((12469, 12604), 'DNN_base.DNN_scale', 'DNN_base.DNN_scale', (['XYZST20', 'Ws_Scale', 'Bs_Scale', 'hidden2scale', "R['freq2Scale']"], {'activateIn_name': 'act2Scale', 'activate_name': 'act2Scale'}), "(XYZST20, Ws_Scale, Bs_Scale, hidden2scale, R[\n 'freq2Scale'], activateIn_name=act2Scale, activate_name=act2Scale)\n", (12487, 12604), False, 'import DNN_base\n'), ((12683, 12818), 'DNN_base.DNN_scale', 'DNN_base.DNN_scale', (['XYZST21', 'Ws_Scale', 'Bs_Scale', 'hidden2scale', "R['freq2Scale']"], {'activateIn_name': 'act2Scale', 'activate_name': 'act2Scale'}), "(XYZST21, Ws_Scale, Bs_Scale, hidden2scale, R[\n 'freq2Scale'], activateIn_name=act2Scale, activate_name=act2Scale)\n", (12701, 12818), False, 'import DNN_base\n'), ((12897, 13032), 'DNN_base.DNN_scale', 'DNN_base.DNN_scale', (['XYZST30', 'Ws_Scale', 'Bs_Scale', 'hidden2scale', "R['freq2Scale']"], {'activateIn_name': 'act2Scale', 'activate_name': 'act2Scale'}), "(XYZST30, Ws_Scale, Bs_Scale, hidden2scale, R[\n 'freq2Scale'], activateIn_name=act2Scale, activate_name=act2Scale)\n", (12915, 13032), False, 'import DNN_base\n'), ((13111, 13246), 'DNN_base.DNN_scale', 'DNN_base.DNN_scale', (['XYZST31', 'Ws_Scale', 'Bs_Scale', 'hidden2scale', "R['freq2Scale']"], {'activateIn_name': 'act2Scale', 'activate_name': 'act2Scale'}), "(XYZST31, Ws_Scale, Bs_Scale, hidden2scale, R[\n 'freq2Scale'], activateIn_name=act2Scale, activate_name=act2Scale)\n", (13129, 13246), False, 'import DNN_base\n'), ((13325, 13460), 'DNN_base.DNN_scale', 'DNN_base.DNN_scale', (['XYZST40', 'Ws_Scale', 'Bs_Scale', 'hidden2scale', "R['freq2Scale']"], {'activateIn_name': 'act2Scale', 'activate_name': 'act2Scale'}), "(XYZST40, Ws_Scale, Bs_Scale, hidden2scale, R[\n 'freq2Scale'], activateIn_name=act2Scale, activate_name=act2Scale)\n", (13343, 13460), False, 'import DNN_base\n'), ((13539, 13674), 'DNN_base.DNN_scale', 'DNN_base.DNN_scale', (['XYZST41', 'Ws_Scale', 'Bs_Scale', 'hidden2scale', "R['freq2Scale']"], {'activateIn_name': 'act2Scale', 'activate_name': 'act2Scale'}), "(XYZST41, Ws_Scale, Bs_Scale, hidden2scale, R[\n 'freq2Scale'], activateIn_name=act2Scale, activate_name=act2Scale)\n", (13557, 13674), False, 'import DNN_base\n'), ((19219, 19253), 'tensorflow.gradients', 'tf.gradients', (['UNN_Normal', 'XYZST_it'], {}), '(UNN_Normal, XYZST_it)\n', (19231, 19253), True, 'import tensorflow as tf\n'), ((19295, 19328), 'tensorflow.gradients', 'tf.gradients', (['UNN_Scale', 'XYZST_it'], {}), '(UNN_Scale, XYZST_it)\n', (19307, 19328), True, 'import tensorflow as tf\n'), ((19426, 19465), 'tensorflow.add', 'tf.add', (['dUNN_Normal', '(alpha * dUNN_Scale)'], {}), '(dUNN_Normal, alpha * dUNN_Scale)\n', (19432, 19465), True, 'import tensorflow as tf\n'), ((26283, 26299), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {}), '(0.0)\n', (26294, 26299), True, 'import tensorflow as tf\n'), ((27592, 27622), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss_bd_square'], {}), '(loss_bd_square)\n', (27606, 27622), True, 'import tensorflow as tf\n'), ((28788, 28825), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss_bd_square2Normal'], {}), '(loss_bd_square2Normal)\n', (28802, 28825), True, 'import tensorflow as tf\n'), ((28859, 28895), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss_bd_square2Scale'], {}), '(loss_bd_square2Scale)\n', (28873, 28895), True, 'import tensorflow as tf\n'), ((29047, 29103), 'DNN_base.regular_weights_biases_L1', 'DNN_base.regular_weights_biases_L1', (['Ws_Normal', 'Bs_Normal'], {}), '(Ws_Normal, Bs_Normal)\n', (29081, 29103), False, 'import DNN_base\n'), ((29161, 29215), 'DNN_base.regular_weights_biases_L1', 'DNN_base.regular_weights_biases_L1', (['Ws_Scale', 'Bs_Scale'], {}), '(Ws_Scale, Bs_Scale)\n', (29195, 29215), False, 'import DNN_base\n'), ((33324, 33340), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {}), '(0.0)\n', (33335, 33340), True, 'import tensorflow as tf\n'), ((33373, 33389), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {}), '(0.0)\n', (33384, 33389), True, 'import tensorflow as tf\n'), ((38214, 38423), 'DNN_tools.print_and_log_train_one_epoch', 'DNN_tools.print_and_log_train_one_epoch', (['i_epoch', 'run_times', 'tmp_lr', 'temp_penalty_bd', 'temp_penalty_powU', 'pwb', 'loss_it_nn', 'loss_bd_nn', 'loss_nn', 'udu_nn', 'train_mse_nn', 'train_rel_nn'], {'log_out': 'log_fileout_NN'}), '(i_epoch, run_times, tmp_lr,\n temp_penalty_bd, temp_penalty_powU, pwb, loss_it_nn, loss_bd_nn,\n loss_nn, udu_nn, train_mse_nn, train_rel_nn, log_out=log_fileout_NN)\n', (38253, 38423), False, 'import DNN_tools\n'), ((38837, 38870), 'numpy.square', 'np.square', (['(u_true2test - utest_nn)'], {}), '(u_true2test - utest_nn)\n', (38846, 38870), True, 'import numpy as np\n'), ((38902, 38923), 'numpy.mean', 'np.mean', (['point_ERR2NN'], {}), '(point_ERR2NN)\n', (38909, 38923), True, 'import numpy as np\n'), ((39126, 39219), 'DNN_tools.print_and_log_test_one_epoch', 'DNN_tools.print_and_log_test_one_epoch', (['test_mse2nn', 'test_rel2nn'], {'log_out': 'log_fileout_NN'}), '(test_mse2nn, test_rel2nn, log_out=\n log_fileout_NN)\n', (39164, 39219), False, 'import DNN_tools\n'), ((7861, 8039), 'DNN_base.DNN_FourierBase', 'DNN_base.DNN_FourierBase', (['XYZST_it', 'Ws_Normal', 'Bs_Normal', 'hidden2normal', "R['freq2Normal']"], {'activate_name': 'act2Normal', 'repeat_Highfreq': '(False)', 'sFourier': "R['sFourier2Normal']"}), "(XYZST_it, Ws_Normal, Bs_Normal, hidden2normal, R[\n 'freq2Normal'], activate_name=act2Normal, repeat_Highfreq=False,\n sFourier=R['sFourier2Normal'])\n", (7885, 8039), False, 'import DNN_base\n'), ((8174, 8351), 'DNN_base.DNN_FourierBase', 'DNN_base.DNN_FourierBase', (['XYZST00', 'Ws_Normal', 'Bs_Normal', 'hidden2normal', "R['freq2Normal']"], {'activate_name': 'act2Normal', 'repeat_Highfreq': '(False)', 'sFourier': "R['sFourier2Normal']"}), "(XYZST00, Ws_Normal, Bs_Normal, hidden2normal, R[\n 'freq2Normal'], activate_name=act2Normal, repeat_Highfreq=False,\n sFourier=R['sFourier2Normal'])\n", (8198, 8351), False, 'import DNN_base\n'), ((8492, 8669), 'DNN_base.DNN_FourierBase', 'DNN_base.DNN_FourierBase', (['XYZST01', 'Ws_Normal', 'Bs_Normal', 'hidden2normal', "R['freq2Normal']"], {'activate_name': 'act2Normal', 'repeat_Highfreq': '(False)', 'sFourier': "R['sFourier2Normal']"}), "(XYZST01, Ws_Normal, Bs_Normal, hidden2normal, R[\n 'freq2Normal'], activate_name=act2Normal, repeat_Highfreq=False,\n sFourier=R['sFourier2Normal'])\n", (8516, 8669), False, 'import DNN_base\n'), ((8810, 8987), 'DNN_base.DNN_FourierBase', 'DNN_base.DNN_FourierBase', (['XYZST10', 'Ws_Normal', 'Bs_Normal', 'hidden2normal', "R['freq2Normal']"], {'activate_name': 'act2Normal', 'repeat_Highfreq': '(False)', 'sFourier': "R['sFourier2Normal']"}), "(XYZST10, Ws_Normal, Bs_Normal, hidden2normal, R[\n 'freq2Normal'], activate_name=act2Normal, repeat_Highfreq=False,\n sFourier=R['sFourier2Normal'])\n", (8834, 8987), False, 'import DNN_base\n'), ((9128, 9305), 'DNN_base.DNN_FourierBase', 'DNN_base.DNN_FourierBase', (['XYZST11', 'Ws_Normal', 'Bs_Normal', 'hidden2normal', "R['freq2Normal']"], {'activate_name': 'act2Normal', 'repeat_Highfreq': '(False)', 'sFourier': "R['sFourier2Normal']"}), "(XYZST11, Ws_Normal, Bs_Normal, hidden2normal, R[\n 'freq2Normal'], activate_name=act2Normal, repeat_Highfreq=False,\n sFourier=R['sFourier2Normal'])\n", (9152, 9305), False, 'import DNN_base\n'), ((9446, 9623), 'DNN_base.DNN_FourierBase', 'DNN_base.DNN_FourierBase', (['XYZST20', 'Ws_Normal', 'Bs_Normal', 'hidden2normal', "R['freq2Normal']"], {'activate_name': 'act2Normal', 'repeat_Highfreq': '(False)', 'sFourier': "R['sFourier2Normal']"}), "(XYZST20, Ws_Normal, Bs_Normal, hidden2normal, R[\n 'freq2Normal'], activate_name=act2Normal, repeat_Highfreq=False,\n sFourier=R['sFourier2Normal'])\n", (9470, 9623), False, 'import DNN_base\n'), ((9764, 9941), 'DNN_base.DNN_FourierBase', 'DNN_base.DNN_FourierBase', (['XYZST21', 'Ws_Normal', 'Bs_Normal', 'hidden2normal', "R['freq2Normal']"], {'activate_name': 'act2Normal', 'repeat_Highfreq': '(False)', 'sFourier': "R['sFourier2Normal']"}), "(XYZST21, Ws_Normal, Bs_Normal, hidden2normal, R[\n 'freq2Normal'], activate_name=act2Normal, repeat_Highfreq=False,\n sFourier=R['sFourier2Normal'])\n", (9788, 9941), False, 'import DNN_base\n'), ((10082, 10259), 'DNN_base.DNN_FourierBase', 'DNN_base.DNN_FourierBase', (['XYZST30', 'Ws_Normal', 'Bs_Normal', 'hidden2normal', "R['freq2Normal']"], {'activate_name': 'act2Normal', 'repeat_Highfreq': '(False)', 'sFourier': "R['sFourier2Normal']"}), "(XYZST30, Ws_Normal, Bs_Normal, hidden2normal, R[\n 'freq2Normal'], activate_name=act2Normal, repeat_Highfreq=False,\n sFourier=R['sFourier2Normal'])\n", (10106, 10259), False, 'import DNN_base\n'), ((10400, 10577), 'DNN_base.DNN_FourierBase', 'DNN_base.DNN_FourierBase', (['XYZST31', 'Ws_Normal', 'Bs_Normal', 'hidden2normal', "R['freq2Normal']"], {'activate_name': 'act2Normal', 'repeat_Highfreq': '(False)', 'sFourier': "R['sFourier2Normal']"}), "(XYZST31, Ws_Normal, Bs_Normal, hidden2normal, R[\n 'freq2Normal'], activate_name=act2Normal, repeat_Highfreq=False,\n sFourier=R['sFourier2Normal'])\n", (10424, 10577), False, 'import DNN_base\n'), ((10718, 10895), 'DNN_base.DNN_FourierBase', 'DNN_base.DNN_FourierBase', (['XYZST40', 'Ws_Normal', 'Bs_Normal', 'hidden2normal', "R['freq2Normal']"], {'activate_name': 'act2Normal', 'repeat_Highfreq': '(False)', 'sFourier': "R['sFourier2Normal']"}), "(XYZST40, Ws_Normal, Bs_Normal, hidden2normal, R[\n 'freq2Normal'], activate_name=act2Normal, repeat_Highfreq=False,\n sFourier=R['sFourier2Normal'])\n", (10742, 10895), False, 'import DNN_base\n'), ((11036, 11213), 'DNN_base.DNN_FourierBase', 'DNN_base.DNN_FourierBase', (['XYZST41', 'Ws_Normal', 'Bs_Normal', 'hidden2normal', "R['freq2Normal']"], {'activate_name': 'act2Normal', 'repeat_Highfreq': '(False)', 'sFourier': "R['sFourier2Normal']"}), "(XYZST41, Ws_Normal, Bs_Normal, hidden2normal, R[\n 'freq2Normal'], activate_name=act2Normal, repeat_Highfreq=False,\n sFourier=R['sFourier2Normal'])\n", (11060, 11213), False, 'import DNN_base\n'), ((13807, 13949), 'DNN_base.DNN_adapt_scale', 'DNN_base.DNN_adapt_scale', (['XYZST_it', 'Ws_Scale', 'Bs_Scale', 'hidden2scale', "R['freq2Scale']"], {'activateIn_name': 'act2Scale', 'activate_name': 'act2Scale'}), "(XYZST_it, Ws_Scale, Bs_Scale, hidden2scale, R[\n 'freq2Scale'], activateIn_name=act2Scale, activate_name=act2Scale)\n", (13831, 13949), False, 'import DNN_base\n'), ((14031, 14172), 'DNN_base.DNN_adapt_scale', 'DNN_base.DNN_adapt_scale', (['XYZST00', 'Ws_Scale', 'Bs_Scale', 'hidden2scale', "R['freq2Scale']"], {'activateIn_name': 'act2Scale', 'activate_name': 'act2Scale'}), "(XYZST00, Ws_Scale, Bs_Scale, hidden2scale, R[\n 'freq2Scale'], activateIn_name=act2Scale, activate_name=act2Scale)\n", (14055, 14172), False, 'import DNN_base\n'), ((14257, 14398), 'DNN_base.DNN_adapt_scale', 'DNN_base.DNN_adapt_scale', (['XYZST01', 'Ws_Scale', 'Bs_Scale', 'hidden2scale', "R['freq2Scale']"], {'activateIn_name': 'act2Scale', 'activate_name': 'act2Scale'}), "(XYZST01, Ws_Scale, Bs_Scale, hidden2scale, R[\n 'freq2Scale'], activateIn_name=act2Scale, activate_name=act2Scale)\n", (14281, 14398), False, 'import DNN_base\n'), ((14483, 14624), 'DNN_base.DNN_adapt_scale', 'DNN_base.DNN_adapt_scale', (['XYZST10', 'Ws_Scale', 'Bs_Scale', 'hidden2scale', "R['freq2Scale']"], {'activateIn_name': 'act2Scale', 'activate_name': 'act2Scale'}), "(XYZST10, Ws_Scale, Bs_Scale, hidden2scale, R[\n 'freq2Scale'], activateIn_name=act2Scale, activate_name=act2Scale)\n", (14507, 14624), False, 'import DNN_base\n'), ((14709, 14850), 'DNN_base.DNN_adapt_scale', 'DNN_base.DNN_adapt_scale', (['XYZST11', 'Ws_Scale', 'Bs_Scale', 'hidden2scale', "R['freq2Scale']"], {'activateIn_name': 'act2Scale', 'activate_name': 'act2Scale'}), "(XYZST11, Ws_Scale, Bs_Scale, hidden2scale, R[\n 'freq2Scale'], activateIn_name=act2Scale, activate_name=act2Scale)\n", (14733, 14850), False, 'import DNN_base\n'), ((14935, 15076), 'DNN_base.DNN_adapt_scale', 'DNN_base.DNN_adapt_scale', (['XYZST20', 'Ws_Scale', 'Bs_Scale', 'hidden2scale', "R['freq2Scale']"], {'activateIn_name': 'act2Scale', 'activate_name': 'act2Scale'}), "(XYZST20, Ws_Scale, Bs_Scale, hidden2scale, R[\n 'freq2Scale'], activateIn_name=act2Scale, activate_name=act2Scale)\n", (14959, 15076), False, 'import DNN_base\n'), ((15161, 15302), 'DNN_base.DNN_adapt_scale', 'DNN_base.DNN_adapt_scale', (['XYZST21', 'Ws_Scale', 'Bs_Scale', 'hidden2scale', "R['freq2Scale']"], {'activateIn_name': 'act2Scale', 'activate_name': 'act2Scale'}), "(XYZST21, Ws_Scale, Bs_Scale, hidden2scale, R[\n 'freq2Scale'], activateIn_name=act2Scale, activate_name=act2Scale)\n", (15185, 15302), False, 'import DNN_base\n'), ((15387, 15528), 'DNN_base.DNN_adapt_scale', 'DNN_base.DNN_adapt_scale', (['XYZST30', 'Ws_Scale', 'Bs_Scale', 'hidden2scale', "R['freq2Scale']"], {'activateIn_name': 'act2Scale', 'activate_name': 'act2Scale'}), "(XYZST30, Ws_Scale, Bs_Scale, hidden2scale, R[\n 'freq2Scale'], activateIn_name=act2Scale, activate_name=act2Scale)\n", (15411, 15528), False, 'import DNN_base\n'), ((15613, 15754), 'DNN_base.DNN_adapt_scale', 'DNN_base.DNN_adapt_scale', (['XYZST31', 'Ws_Scale', 'Bs_Scale', 'hidden2scale', "R['freq2Scale']"], {'activateIn_name': 'act2Scale', 'activate_name': 'act2Scale'}), "(XYZST31, Ws_Scale, Bs_Scale, hidden2scale, R[\n 'freq2Scale'], activateIn_name=act2Scale, activate_name=act2Scale)\n", (15637, 15754), False, 'import DNN_base\n'), ((15839, 15980), 'DNN_base.DNN_adapt_scale', 'DNN_base.DNN_adapt_scale', (['XYZST40', 'Ws_Scale', 'Bs_Scale', 'hidden2scale', "R['freq2Scale']"], {'activateIn_name': 'act2Scale', 'activate_name': 'act2Scale'}), "(XYZST40, Ws_Scale, Bs_Scale, hidden2scale, R[\n 'freq2Scale'], activateIn_name=act2Scale, activate_name=act2Scale)\n", (15863, 15980), False, 'import DNN_base\n'), ((16065, 16206), 'DNN_base.DNN_adapt_scale', 'DNN_base.DNN_adapt_scale', (['XYZST41', 'Ws_Scale', 'Bs_Scale', 'hidden2scale', "R['freq2Scale']"], {'activateIn_name': 'act2Scale', 'activate_name': 'act2Scale'}), "(XYZST41, Ws_Scale, Bs_Scale, hidden2scale, R[\n 'freq2Scale'], activateIn_name=act2Scale, activate_name=act2Scale)\n", (16089, 16206), False, 'import DNN_base\n'), ((24759, 24818), 'tensorflow.multiply', 'tf.multiply', (['UNN_Normal', '(using_scale2orthogonal * UNN_Scale)'], {}), '(UNN_Normal, using_scale2orthogonal * UNN_Scale)\n', (24770, 24818), True, 'import tensorflow as tf\n'), ((27543, 27560), 'tensorflow.square', 'tf.square', (['U41_NN'], {}), '(U41_NN)\n', (27552, 27560), True, 'import tensorflow as tf\n'), ((28053, 28077), 'tensorflow.square', 'tf.square', (['U41_NN_Normal'], {}), '(U41_NN_Normal)\n', (28062, 28077), True, 'import tensorflow as tf\n'), ((28709, 28755), 'tensorflow.square', 'tf.square', (['(using_scale2boundary * U41_NN_Scale)'], {}), '(using_scale2boundary * U41_NN_Scale)\n', (28718, 28755), True, 'import tensorflow as tf\n'), ((29326, 29382), 'DNN_base.regular_weights_biases_L2', 'DNN_base.regular_weights_biases_L2', (['Ws_Normal', 'Bs_Normal'], {}), '(Ws_Normal, Bs_Normal)\n', (29360, 29382), False, 'import DNN_base\n'), ((29440, 29494), 'DNN_base.regular_weights_biases_L2', 'DNN_base.regular_weights_biases_L2', (['Ws_Scale', 'Bs_Scale'], {}), '(Ws_Scale, Bs_Scale)\n', (29474, 29494), False, 'import DNN_base\n'), ((29575, 29591), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {}), '(0.0)\n', (29586, 29591), True, 'import tensorflow as tf\n'), ((29683, 29699), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {}), '(0.0)\n', (29694, 29699), True, 'import tensorflow as tf\n'), ((30270, 30300), 'tensorflow.group', 'tf.group', (['train_op1', 'train_op2'], {}), '(train_op1, train_op2)\n', (30278, 30300), True, 'import tensorflow as tf\n'), ((33167, 33190), 'tensorflow.square', 'tf.square', (['(U_true - UNN)'], {}), '(U_true - UNN)\n', (33176, 33190), True, 'import tensorflow as tf\n'), ((38180, 38191), 'time.time', 'time.time', ([], {}), '()\n', (38189, 38191), False, 'import time\n'), ((44118, 44144), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (44134, 44144), False, 'import os\n'), ((44212, 44238), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (44228, 44238), False, 'import os\n'), ((16341, 16486), 'DNN_base.DNN_FourierBase', 'DNN_base.DNN_FourierBase', (['XYZST_it', 'Ws_Scale', 'Bs_Scale', 'hidden2scale', "R['freq2Scale']"], {'activate_name': 'act2Scale', 'sFourier': "R['sFourier2Scale']"}), "(XYZST_it, Ws_Scale, Bs_Scale, hidden2scale, R[\n 'freq2Scale'], activate_name=act2Scale, sFourier=R['sFourier2Scale'])\n", (16365, 16486), False, 'import DNN_base\n'), ((16568, 16712), 'DNN_base.DNN_FourierBase', 'DNN_base.DNN_FourierBase', (['XYZST00', 'Ws_Scale', 'Bs_Scale', 'hidden2scale', "R['freq2Scale']"], {'activate_name': 'act2Scale', 'sFourier': "R['sFourier2Scale']"}), "(XYZST00, Ws_Scale, Bs_Scale, hidden2scale, R[\n 'freq2Scale'], activate_name=act2Scale, sFourier=R['sFourier2Scale'])\n", (16592, 16712), False, 'import DNN_base\n'), ((16797, 16941), 'DNN_base.DNN_FourierBase', 'DNN_base.DNN_FourierBase', (['XYZST01', 'Ws_Scale', 'Bs_Scale', 'hidden2scale', "R['freq2Scale']"], {'activate_name': 'act2Scale', 'sFourier': "R['sFourier2Scale']"}), "(XYZST01, Ws_Scale, Bs_Scale, hidden2scale, R[\n 'freq2Scale'], activate_name=act2Scale, sFourier=R['sFourier2Scale'])\n", (16821, 16941), False, 'import DNN_base\n'), ((17026, 17170), 'DNN_base.DNN_FourierBase', 'DNN_base.DNN_FourierBase', (['XYZST10', 'Ws_Scale', 'Bs_Scale', 'hidden2scale', "R['freq2Scale']"], {'activate_name': 'act2Scale', 'sFourier': "R['sFourier2Scale']"}), "(XYZST10, Ws_Scale, Bs_Scale, hidden2scale, R[\n 'freq2Scale'], activate_name=act2Scale, sFourier=R['sFourier2Scale'])\n", (17050, 17170), False, 'import DNN_base\n'), ((17255, 17399), 'DNN_base.DNN_FourierBase', 'DNN_base.DNN_FourierBase', (['XYZST11', 'Ws_Scale', 'Bs_Scale', 'hidden2scale', "R['freq2Scale']"], {'activate_name': 'act2Scale', 'sFourier': "R['sFourier2Scale']"}), "(XYZST11, Ws_Scale, Bs_Scale, hidden2scale, R[\n 'freq2Scale'], activate_name=act2Scale, sFourier=R['sFourier2Scale'])\n", (17279, 17399), False, 'import DNN_base\n'), ((17484, 17628), 'DNN_base.DNN_FourierBase', 'DNN_base.DNN_FourierBase', (['XYZST20', 'Ws_Scale', 'Bs_Scale', 'hidden2scale', "R['freq2Scale']"], {'activate_name': 'act2Scale', 'sFourier': "R['sFourier2Scale']"}), "(XYZST20, Ws_Scale, Bs_Scale, hidden2scale, R[\n 'freq2Scale'], activate_name=act2Scale, sFourier=R['sFourier2Scale'])\n", (17508, 17628), False, 'import DNN_base\n'), ((17713, 17857), 'DNN_base.DNN_FourierBase', 'DNN_base.DNN_FourierBase', (['XYZST21', 'Ws_Scale', 'Bs_Scale', 'hidden2scale', "R['freq2Scale']"], {'activate_name': 'act2Scale', 'sFourier': "R['sFourier2Scale']"}), "(XYZST21, Ws_Scale, Bs_Scale, hidden2scale, R[\n 'freq2Scale'], activate_name=act2Scale, sFourier=R['sFourier2Scale'])\n", (17737, 17857), False, 'import DNN_base\n'), ((17942, 18086), 'DNN_base.DNN_FourierBase', 'DNN_base.DNN_FourierBase', (['XYZST30', 'Ws_Scale', 'Bs_Scale', 'hidden2scale', "R['freq2Scale']"], {'activate_name': 'act2Scale', 'sFourier': "R['sFourier2Scale']"}), "(XYZST30, Ws_Scale, Bs_Scale, hidden2scale, R[\n 'freq2Scale'], activate_name=act2Scale, sFourier=R['sFourier2Scale'])\n", (17966, 18086), False, 'import DNN_base\n'), ((18171, 18315), 'DNN_base.DNN_FourierBase', 'DNN_base.DNN_FourierBase', (['XYZST31', 'Ws_Scale', 'Bs_Scale', 'hidden2scale', "R['freq2Scale']"], {'activate_name': 'act2Scale', 'sFourier': "R['sFourier2Scale']"}), "(XYZST31, Ws_Scale, Bs_Scale, hidden2scale, R[\n 'freq2Scale'], activate_name=act2Scale, sFourier=R['sFourier2Scale'])\n", (18195, 18315), False, 'import DNN_base\n'), ((18400, 18544), 'DNN_base.DNN_FourierBase', 'DNN_base.DNN_FourierBase', (['XYZST40', 'Ws_Scale', 'Bs_Scale', 'hidden2scale', "R['freq2Scale']"], {'activate_name': 'act2Scale', 'sFourier': "R['sFourier2Scale']"}), "(XYZST40, Ws_Scale, Bs_Scale, hidden2scale, R[\n 'freq2Scale'], activate_name=act2Scale, sFourier=R['sFourier2Scale'])\n", (18424, 18544), False, 'import DNN_base\n'), ((18629, 18773), 'DNN_base.DNN_FourierBase', 'DNN_base.DNN_FourierBase', (['XYZST41', 'Ws_Scale', 'Bs_Scale', 'hidden2scale', "R['freq2Scale']"], {'activate_name': 'act2Scale', 'sFourier': "R['sFourier2Scale']"}), "(XYZST41, Ws_Scale, Bs_Scale, hidden2scale, R[\n 'freq2Scale'], activate_name=act2Scale, sFourier=R['sFourier2Scale'])\n", (18653, 18773), False, 'import DNN_base\n'), ((19520, 19535), 'tensorflow.square', 'tf.square', (['dUNN'], {}), '(dUNN)\n', (19529, 19535), True, 'import tensorflow as tf\n'), ((19931, 19960), 'tensorflow.multiply', 'tf.multiply', (['a_eps', 'norm2dUNN'], {}), '(a_eps, norm2dUNN)\n', (19942, 19960), True, 'import tensorflow as tf\n'), ((24864, 24889), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['point_UdU'], {}), '(point_UdU)\n', (24878, 24889), True, 'import tensorflow as tf\n'), ((25129, 25153), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['norm2UdU'], {}), '(norm2UdU)\n', (25143, 25153), True, 'import tensorflow as tf\n'), ((27480, 27497), 'tensorflow.square', 'tf.square', (['U40_NN'], {}), '(U40_NN)\n', (27489, 27497), True, 'import tensorflow as tf\n'), ((27983, 28007), 'tensorflow.square', 'tf.square', (['U40_NN_Normal'], {}), '(U40_NN_Normal)\n', (27992, 28007), True, 'import tensorflow as tf\n'), ((28662, 28708), 'tensorflow.square', 'tf.square', (['(using_scale2boundary * U40_NN_Scale)'], {}), '(using_scale2boundary * U40_NN_Scale)\n', (28671, 28708), True, 'import tensorflow as tf\n'), ((30676, 30717), 'tensorflow.group', 'tf.group', (['train_op1', 'train_op2', 'train_op3'], {}), '(train_op1, train_op2, train_op3)\n', (30684, 30717), True, 'import tensorflow as tf\n'), ((31237, 31267), 'tensorflow.group', 'tf.group', (['train_op3', 'train_op4'], {}), '(train_op3, train_op4)\n', (31245, 31267), True, 'import tensorflow as tf\n'), ((33254, 33271), 'tensorflow.square', 'tf.square', (['U_true'], {}), '(U_true)\n', (33263, 33271), True, 'import tensorflow as tf\n'), ((39030, 39052), 'numpy.square', 'np.square', (['u_true2test'], {}), '(u_true2test)\n', (39039, 39052), True, 'import numpy as np\n'), ((20313, 20422), 'MS_LaplaceEqs.get_forceSide2pLaplace5D', 'MS_LaplaceEqs.get_forceSide2pLaplace5D', ([], {'x': 'X_it', 'y': 'Y_it', 'z': 'Z_it', 's': 'S_it', 't': 'T_it', 'equa_name': "R['equa_name']"}), "(x=X_it, y=Y_it, z=Z_it, s=S_it, t=\n T_it, equa_name=R['equa_name'])\n", (20351, 20422), False, 'import MS_LaplaceEqs\n'), ((21092, 21121), 'tensorflow.multiply', 'tf.multiply', (['a_eps', 'norm2dUNN'], {}), '(a_eps, norm2dUNN)\n', (21103, 21121), True, 'import tensorflow as tf\n'), ((22967, 23079), 'MS_BoltzmannEqs.get_forceSide2Boltzmann_5D', 'MS_BoltzmannEqs.get_forceSide2Boltzmann_5D', ([], {'x': 'X_it', 'y': 'Y_it', 'z': 'Z_it', 's': 'S_it', 't': 'T_it', 'equa_name': "R['equa_name']"}), "(x=X_it, y=Y_it, z=Z_it, s=S_it,\n t=T_it, equa_name=R['equa_name'])\n", (23009, 23079), False, 'import MS_BoltzmannEqs\n'), ((24064, 24142), 'MS_LaplaceEqs.get_forceSide2pLaplace5D', 'MS_LaplaceEqs.get_forceSide2pLaplace5D', ([], {'x': 'X_it', 'y': 'Y_it', 'z': 'Z_it', 's': 'S_it', 't': 'T_it'}), '(x=X_it, y=Y_it, z=Z_it, s=S_it, t=T_it)\n', (24102, 24142), False, 'import MS_LaplaceEqs\n'), ((25035, 25094), 'tensorflow.multiply', 'tf.multiply', (['UNN_Normal', '(using_scale2orthogonal * UNN_Scale)'], {}), '(UNN_Normal, using_scale2orthogonal * UNN_Scale)\n', (25046, 25094), True, 'import tensorflow as tf\n'), ((25341, 25402), 'tensorflow.multiply', 'tf.multiply', (['dUNN_Normal', '(using_scale2orthogonal * dUNN_Scale)'], {}), '(dUNN_Normal, using_scale2orthogonal * dUNN_Scale)\n', (25352, 25402), True, 'import tensorflow as tf\n'), ((25659, 25685), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['norm2AdUdU'], {}), '(norm2AdUdU)\n', (25673, 25685), True, 'import tensorflow as tf\n'), ((25918, 25979), 'tensorflow.multiply', 'tf.multiply', (['dUNN_Normal', '(using_scale2orthogonal * dUNN_Scale)'], {}), '(dUNN_Normal, using_scale2orthogonal * dUNN_Scale)\n', (25929, 25979), True, 'import tensorflow as tf\n'), ((27460, 27477), 'tensorflow.square', 'tf.square', (['U31_NN'], {}), '(U31_NN)\n', (27469, 27477), True, 'import tensorflow as tf\n'), ((27956, 27980), 'tensorflow.square', 'tf.square', (['U31_NN_Normal'], {}), '(U31_NN_Normal)\n', (27965, 27980), True, 'import tensorflow as tf\n'), ((28573, 28619), 'tensorflow.square', 'tf.square', (['(using_scale2boundary * U31_NN_Scale)'], {}), '(using_scale2boundary * U31_NN_Scale)\n', (28582, 28619), True, 'import tensorflow as tf\n'), ((31643, 31684), 'tensorflow.group', 'tf.group', (['train_op1', 'train_op2', 'train_op3'], {}), '(train_op1, train_op2, train_op3)\n', (31651, 31684), True, 'import tensorflow as tf\n'), ((21365, 21477), 'MS_BoltzmannEqs.get_forceSide2Boltzmann_5D', 'MS_BoltzmannEqs.get_forceSide2Boltzmann_5D', ([], {'x': 'X_it', 'y': 'Y_it', 'z': 'Z_it', 's': 'S_it', 't': 'T_it', 'equa_name': "R['equa_name']"}), "(x=X_it, y=Y_it, z=Z_it, s=S_it,\n t=T_it, equa_name=R['equa_name'])\n", (21407, 21477), False, 'import MS_BoltzmannEqs\n'), ((22206, 22228), 'tensorflow.square', 'tf.square', (['dUNN_Normal'], {}), '(dUNN_Normal)\n', (22215, 22228), True, 'import tensorflow as tf\n'), ((22324, 22345), 'tensorflow.square', 'tf.square', (['dUNN_Scale'], {}), '(dUNN_Scale)\n', (22333, 22345), True, 'import tensorflow as tf\n'), ((22604, 22637), 'tensorflow.pow', 'tf.pow', (['norm2dUNN_Normal', 'p_index'], {}), '(norm2dUNN_Normal, p_index)\n', (22610, 22637), True, 'import tensorflow as tf\n'), ((22685, 22725), 'tensorflow.pow', 'tf.pow', (['(alpha * norm2dUNN_Scale)', 'p_index'], {}), '(alpha * norm2dUNN_Scale, p_index)\n', (22691, 22725), True, 'import tensorflow as tf\n'), ((23309, 23333), 'tensorflow.multiply', 'tf.multiply', (['fxyzst', 'UNN'], {}), '(fxyzst, UNN)\n', (23320, 23333), True, 'import tensorflow as tf\n'), ((23699, 23732), 'tensorflow.pow', 'tf.pow', (['norm2dUNN_Normal', 'p_index'], {}), '(norm2dUNN_Normal, p_index)\n', (23705, 23732), True, 'import tensorflow as tf\n'), ((23780, 23820), 'tensorflow.pow', 'tf.pow', (['(alpha * norm2dUNN_Scale)', 'p_index'], {}), '(alpha * norm2dUNN_Scale, p_index)\n', (23786, 23820), True, 'import tensorflow as tf\n'), ((25444, 25477), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['dU_dot_dU'], {'axis': '(-1)'}), '(dU_dot_dU, axis=-1)\n', (25457, 25477), True, 'import tensorflow as tf\n'), ((25538, 25566), 'tensorflow.multiply', 'tf.multiply', (['a_eps', 'sum2dUdU'], {}), '(a_eps, sum2dUdU)\n', (25549, 25566), True, 'import tensorflow as tf\n'), ((26021, 26054), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['dU_dot_dU'], {'axis': '(-1)'}), '(dU_dot_dU, axis=-1)\n', (26034, 26054), True, 'import tensorflow as tf\n'), ((26115, 26143), 'tensorflow.multiply', 'tf.multiply', (['a_eps', 'sum2dUdU'], {}), '(a_eps, sum2dUdU)\n', (26126, 26143), True, 'import tensorflow as tf\n'), ((26180, 26206), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['norm2AdUdU'], {}), '(norm2AdUdU)\n', (26194, 26206), True, 'import tensorflow as tf\n'), ((26209, 26232), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['U_dot_U'], {}), '(U_dot_U)\n', (26223, 26232), True, 'import tensorflow as tf\n'), ((27440, 27457), 'tensorflow.square', 'tf.square', (['U30_NN'], {}), '(U30_NN)\n', (27449, 27457), True, 'import tensorflow as tf\n'), ((27929, 27953), 'tensorflow.square', 'tf.square', (['U30_NN_Normal'], {}), '(U30_NN_Normal)\n', (27938, 27953), True, 'import tensorflow as tf\n'), ((28526, 28572), 'tensorflow.square', 'tf.square', (['(using_scale2boundary * U30_NN_Scale)'], {}), '(using_scale2boundary * U30_NN_Scale)\n', (28535, 28572), True, 'import tensorflow as tf\n'), ((32159, 32211), 'tensorflow.group', 'tf.group', (['train_op1', 'train_op2', 'train_op3', 'train_op4'], {}), '(train_op1, train_op2, train_op3, train_op4)\n', (32167, 32211), True, 'import tensorflow as tf\n'), ((20628, 20661), 'tensorflow.reshape', 'tf.reshape', (['fxyzst'], {'shape': '[-1, 1]'}), '(fxyzst, shape=[-1, 1])\n', (20638, 20661), True, 'import tensorflow as tf\n'), ((21697, 21721), 'tensorflow.multiply', 'tf.multiply', (['fxyzst', 'UNN'], {}), '(fxyzst, UNN)\n', (21708, 21721), True, 'import tensorflow as tf\n'), ((24286, 24319), 'tensorflow.reshape', 'tf.reshape', (['fxyzst'], {'shape': '[-1, 1]'}), '(fxyzst, shape=[-1, 1])\n', (24296, 24319), True, 'import tensorflow as tf\n'), ((25816, 25875), 'tensorflow.multiply', 'tf.multiply', (['UNN_Normal', '(using_scale2orthogonal * UNN_Scale)'], {}), '(UNN_Normal, using_scale2orthogonal * UNN_Scale)\n', (25827, 25875), True, 'import tensorflow as tf\n'), ((27377, 27394), 'tensorflow.square', 'tf.square', (['U21_NN'], {}), '(U21_NN)\n', (27386, 27394), True, 'import tensorflow as tf\n'), ((27860, 27884), 'tensorflow.square', 'tf.square', (['U21_NN_Normal'], {}), '(U21_NN_Normal)\n', (27869, 27884), True, 'import tensorflow as tf\n'), ((28437, 28483), 'tensorflow.square', 'tf.square', (['(using_scale2boundary * U21_NN_Scale)'], {}), '(using_scale2boundary * U21_NN_Scale)\n', (28446, 28483), True, 'import tensorflow as tf\n'), ((32688, 32740), 'tensorflow.group', 'tf.group', (['train_op1', 'train_op2', 'train_op3', 'train_op4'], {}), '(train_op1, train_op2, train_op3, train_op4)\n', (32696, 32740), True, 'import tensorflow as tf\n'), ((27357, 27374), 'tensorflow.square', 'tf.square', (['U20_NN'], {}), '(U20_NN)\n', (27366, 27374), True, 'import tensorflow as tf\n'), ((27833, 27857), 'tensorflow.square', 'tf.square', (['U20_NN_Normal'], {}), '(U20_NN_Normal)\n', (27842, 27857), True, 'import tensorflow as tf\n'), ((28390, 28436), 'tensorflow.square', 'tf.square', (['(using_scale2boundary * U20_NN_Scale)'], {}), '(using_scale2boundary * U20_NN_Scale)\n', (28399, 28436), True, 'import tensorflow as tf\n'), ((27337, 27354), 'tensorflow.square', 'tf.square', (['U11_NN'], {}), '(U11_NN)\n', (27346, 27354), True, 'import tensorflow as tf\n'), ((27806, 27830), 'tensorflow.square', 'tf.square', (['U11_NN_Normal'], {}), '(U11_NN_Normal)\n', (27815, 27830), True, 'import tensorflow as tf\n'), ((28301, 28347), 'tensorflow.square', 'tf.square', (['(using_scale2boundary * U11_NN_Scale)'], {}), '(using_scale2boundary * U11_NN_Scale)\n', (28310, 28347), True, 'import tensorflow as tf\n'), ((27274, 27291), 'tensorflow.square', 'tf.square', (['U10_NN'], {}), '(U10_NN)\n', (27283, 27291), True, 'import tensorflow as tf\n'), ((27737, 27761), 'tensorflow.square', 'tf.square', (['U10_NN_Normal'], {}), '(U10_NN_Normal)\n', (27746, 27761), True, 'import tensorflow as tf\n'), ((28254, 28300), 'tensorflow.square', 'tf.square', (['(using_scale2boundary * U10_NN_Scale)'], {}), '(using_scale2boundary * U10_NN_Scale)\n', (28263, 28300), True, 'import tensorflow as tf\n'), ((27234, 27251), 'tensorflow.square', 'tf.square', (['U00_NN'], {}), '(U00_NN)\n', (27243, 27251), True, 'import tensorflow as tf\n'), ((27254, 27271), 'tensorflow.square', 'tf.square', (['U01_NN'], {}), '(U01_NN)\n', (27263, 27271), True, 'import tensorflow as tf\n'), ((27683, 27707), 'tensorflow.square', 'tf.square', (['U00_NN_Normal'], {}), '(U00_NN_Normal)\n', (27692, 27707), True, 'import tensorflow as tf\n'), ((27710, 27734), 'tensorflow.square', 'tf.square', (['U01_NN_Normal'], {}), '(U01_NN_Normal)\n', (27719, 27734), True, 'import tensorflow as tf\n'), ((28118, 28164), 'tensorflow.square', 'tf.square', (['(using_scale2boundary * U00_NN_Scale)'], {}), '(using_scale2boundary * U00_NN_Scale)\n', (28127, 28164), True, 'import tensorflow as tf\n'), ((28165, 28211), 'tensorflow.square', 'tf.square', (['(using_scale2boundary * U01_NN_Scale)'], {}), '(using_scale2boundary * U01_NN_Scale)\n', (28174, 28211), True, 'import tensorflow as tf\n')] |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
import unittest
import numpy as np
from paddle.device.cuda.graphs import wrap_cuda_graph, is_cuda_graph_supported, cuda_graph_transform
paddle.enable_static()
class SimpleModel(nn.Layer):
def __init__(self, in_size, out_size):
super(SimpleModel, self).__init__()
self.linear = nn.Linear(in_size, out_size)
self.dropout_1 = paddle.nn.Dropout(0.1)
self.relu = nn.ReLU()
self.dropout_2 = paddle.nn.Dropout(0.5)
self.gelu = nn.GELU()
def forward(self, x):
x = self.linear(x)
x = self.dropout_1(x)
x = self.relu(x)
x = self.dropout_2(x)
x = self.gelu(x)
return x
class TestCudaGraphAttrAll(unittest.TestCase):
def setUp(self):
paddle.set_flags({'FLAGS_eager_delete_tensor_gb': 0.0})
def get_model(self, use_cuda_graph=False):
x = paddle.static.data(shape=[3, 10], dtype='float32', name='x')
model_start = SimpleModel(10, 20)
if use_cuda_graph:
model_start = wrap_cuda_graph(model_start)
model_inter = SimpleModel(20, 20)
model_end = SimpleModel(20, 10)
if use_cuda_graph:
model_end = wrap_cuda_graph(model_end, memory_pool='new')
start_out = model_start(x)
inter_out = model_inter(start_out)
end_out = model_end(inter_out)
loss = paddle.mean(end_out)
opt = paddle.optimizer.SGD()
opt.minimize(loss)
return loss
def run_with_cuda_graph(self, x_data):
# run with cuda graph
paddle.seed(1024)
main_prog = paddle.static.Program()
start_prog = paddle.static.Program()
with paddle.static.program_guard(main_prog, start_prog):
loss = self.get_model(use_cuda_graph=True)
section_programs = cuda_graph_transform(main_prog)
assert len(section_programs) == 4
block = main_prog.global_block()
run_program_op_num = 0
for op in block.ops:
if op.type == 'run_program':
run_program_op_num += 1
assert run_program_op_num == 4
exe = paddle.static.Executor(paddle.CUDAPlace(0))
exe.run(start_prog)
for i in range(10):
rst = exe.run(main_prog, feed={'x': x_data}, fetch_list=[loss])
return rst
def normal_run(self, x_data):
# run without cuda graph
paddle.seed(1024)
main_prog = paddle.static.Program()
start_prog = paddle.static.Program()
with paddle.static.program_guard(main_prog, start_prog):
loss = self.get_model()
exe = paddle.static.Executor(paddle.CUDAPlace(0))
exe.run(start_prog)
for i in range(10):
rst = exe.run(main_prog, feed={'x': x_data}, fetch_list=[loss])
return rst
def test_static_mode_cuda_graph(self):
if not is_cuda_graph_supported():
return
x_data = np.random.random((3, 10)).astype('float32')
cuda_graph_rst = self.run_with_cuda_graph(x_data)
normal_run_rst = self.normal_run(x_data)
assert np.array_equal(cuda_graph_rst, normal_run_rst)
if __name__ == "__main__":
unittest.main()
| [
"paddle.nn.Dropout",
"paddle.enable_static",
"paddle.static.program_guard",
"paddle.set_flags",
"paddle.device.cuda.graphs.is_cuda_graph_supported",
"paddle.device.cuda.graphs.cuda_graph_transform",
"unittest.main",
"paddle.optimizer.SGD",
"paddle.nn.GELU",
"paddle.seed",
"paddle.device.cuda.gra... | [((785, 807), 'paddle.enable_static', 'paddle.enable_static', ([], {}), '()\n', (805, 807), False, 'import paddle\n'), ((3830, 3845), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3843, 3845), False, 'import unittest\n'), ((949, 977), 'paddle.nn.Linear', 'nn.Linear', (['in_size', 'out_size'], {}), '(in_size, out_size)\n', (958, 977), True, 'import paddle.nn as nn\n'), ((1003, 1025), 'paddle.nn.Dropout', 'paddle.nn.Dropout', (['(0.1)'], {}), '(0.1)\n', (1020, 1025), False, 'import paddle\n'), ((1046, 1055), 'paddle.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1053, 1055), True, 'import paddle.nn as nn\n'), ((1081, 1103), 'paddle.nn.Dropout', 'paddle.nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (1098, 1103), False, 'import paddle\n'), ((1124, 1133), 'paddle.nn.GELU', 'nn.GELU', ([], {}), '()\n', (1131, 1133), True, 'import paddle.nn as nn\n'), ((1394, 1449), 'paddle.set_flags', 'paddle.set_flags', (["{'FLAGS_eager_delete_tensor_gb': 0.0}"], {}), "({'FLAGS_eager_delete_tensor_gb': 0.0})\n", (1410, 1449), False, 'import paddle\n'), ((1510, 1570), 'paddle.static.data', 'paddle.static.data', ([], {'shape': '[3, 10]', 'dtype': '"""float32"""', 'name': '"""x"""'}), "(shape=[3, 10], dtype='float32', name='x')\n", (1528, 1570), False, 'import paddle\n'), ((2010, 2030), 'paddle.mean', 'paddle.mean', (['end_out'], {}), '(end_out)\n', (2021, 2030), False, 'import paddle\n'), ((2046, 2068), 'paddle.optimizer.SGD', 'paddle.optimizer.SGD', ([], {}), '()\n', (2066, 2068), False, 'import paddle\n'), ((2199, 2216), 'paddle.seed', 'paddle.seed', (['(1024)'], {}), '(1024)\n', (2210, 2216), False, 'import paddle\n'), ((2238, 2261), 'paddle.static.Program', 'paddle.static.Program', ([], {}), '()\n', (2259, 2261), False, 'import paddle\n'), ((2283, 2306), 'paddle.static.Program', 'paddle.static.Program', ([], {}), '()\n', (2304, 2306), False, 'import paddle\n'), ((2456, 2487), 'paddle.device.cuda.graphs.cuda_graph_transform', 'cuda_graph_transform', (['main_prog'], {}), '(main_prog)\n', (2476, 2487), False, 'from paddle.device.cuda.graphs import wrap_cuda_graph, is_cuda_graph_supported, cuda_graph_transform\n'), ((3040, 3057), 'paddle.seed', 'paddle.seed', (['(1024)'], {}), '(1024)\n', (3051, 3057), False, 'import paddle\n'), ((3079, 3102), 'paddle.static.Program', 'paddle.static.Program', ([], {}), '()\n', (3100, 3102), False, 'import paddle\n'), ((3124, 3147), 'paddle.static.Program', 'paddle.static.Program', ([], {}), '()\n', (3145, 3147), False, 'import paddle\n'), ((3750, 3796), 'numpy.array_equal', 'np.array_equal', (['cuda_graph_rst', 'normal_run_rst'], {}), '(cuda_graph_rst, normal_run_rst)\n', (3764, 3796), True, 'import numpy as np\n'), ((1667, 1695), 'paddle.device.cuda.graphs.wrap_cuda_graph', 'wrap_cuda_graph', (['model_start'], {}), '(model_start)\n', (1682, 1695), False, 'from paddle.device.cuda.graphs import wrap_cuda_graph, is_cuda_graph_supported, cuda_graph_transform\n'), ((1831, 1876), 'paddle.device.cuda.graphs.wrap_cuda_graph', 'wrap_cuda_graph', (['model_end'], {'memory_pool': '"""new"""'}), "(model_end, memory_pool='new')\n", (1846, 1876), False, 'from paddle.device.cuda.graphs import wrap_cuda_graph, is_cuda_graph_supported, cuda_graph_transform\n'), ((2321, 2371), 'paddle.static.program_guard', 'paddle.static.program_guard', (['main_prog', 'start_prog'], {}), '(main_prog, start_prog)\n', (2348, 2371), False, 'import paddle\n'), ((2790, 2809), 'paddle.CUDAPlace', 'paddle.CUDAPlace', (['(0)'], {}), '(0)\n', (2806, 2809), False, 'import paddle\n'), ((3162, 3212), 'paddle.static.program_guard', 'paddle.static.program_guard', (['main_prog', 'start_prog'], {}), '(main_prog, start_prog)\n', (3189, 3212), False, 'import paddle\n'), ((3288, 3307), 'paddle.CUDAPlace', 'paddle.CUDAPlace', (['(0)'], {}), '(0)\n', (3304, 3307), False, 'import paddle\n'), ((3521, 3546), 'paddle.device.cuda.graphs.is_cuda_graph_supported', 'is_cuda_graph_supported', ([], {}), '()\n', (3544, 3546), False, 'from paddle.device.cuda.graphs import wrap_cuda_graph, is_cuda_graph_supported, cuda_graph_transform\n'), ((3584, 3609), 'numpy.random.random', 'np.random.random', (['(3, 10)'], {}), '((3, 10))\n', (3600, 3609), True, 'import numpy as np\n')] |
#%%
import os
import json
import pathlib
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from collections import defaultdict
def tsplot(d, x=None, ax=None, label=None, color=None, ci=True):
if ax is None:
ax = plt
line = d.mean(0)
std = d.std(0)
if x is None:
x = np.arange(len(line))
ax.plot(x, line, label=label, color=color)
if ci:
n = d.shape[0]
ci_95 = 1.96 * std / (n ** 0.5)
shade = ci_95
else:
shade = std
ax.fill_between(x, line + shade, line - shade, alpha=0.2, color=color)
def get_info(runs_path, main_path, f=None):
values, times, steps = [], [], []
hparams = []
for path in (runs_path / main_path).iterdir():
try:
dfile = open(path / "results.json", "r")
except:
continue
try:
with open(path / "time.json", "r") as file:
optim_time = json.load(file)["mean_poptim_time"]
except:
optim_time = None
data = json.load(dfile)
dfile.close()
v = np.array([d[0] for d in data])
time = np.array([d[1] for d in data])
time = time - time[0] # get runtime
step = np.array([d[2] for d in data])
# if smooth > 1:
# y = np.ones(smooth)
# z = np.ones(len(v))
# v = np.convolve(v,y,'same') / np.convolve(z,y,'same')
dfile = open(path / "hparams.json", "r")
hp = json.load(dfile)
dfile.close()
hp["time"] = optim_time
if f is not None and not f(hp):
print("skipping", path)
continue # skip
values.append(v)
times.append(time)
steps.append(step)
hparams.append(hp)
print(f"{main_path} found {len(values)} runs...")
return values, times, steps, hparams
def path2valuestimes(runs_path, main_path, f=None, hparams=False, debug=False):
values, times, steps, hp = get_info(runs_path, main_path, f)
if debug:
print([v.shape for v in values], [h["seed"] for h in hp])
values = np.stack(values)
times = np.stack(times)
steps = np.stack(steps)
if hparams:
return values, times, steps, hp
else:
return values, times, steps
| [
"numpy.stack",
"json.load",
"numpy.array"
] | [((2111, 2127), 'numpy.stack', 'np.stack', (['values'], {}), '(values)\n', (2119, 2127), True, 'import numpy as np\n'), ((2140, 2155), 'numpy.stack', 'np.stack', (['times'], {}), '(times)\n', (2148, 2155), True, 'import numpy as np\n'), ((2168, 2183), 'numpy.stack', 'np.stack', (['steps'], {}), '(steps)\n', (2176, 2183), True, 'import numpy as np\n'), ((1045, 1061), 'json.load', 'json.load', (['dfile'], {}), '(dfile)\n', (1054, 1061), False, 'import json\n'), ((1096, 1126), 'numpy.array', 'np.array', (['[d[0] for d in data]'], {}), '([d[0] for d in data])\n', (1104, 1126), True, 'import numpy as np\n'), ((1142, 1172), 'numpy.array', 'np.array', (['[d[1] for d in data]'], {}), '([d[1] for d in data])\n', (1150, 1172), True, 'import numpy as np\n'), ((1233, 1263), 'numpy.array', 'np.array', (['[d[2] for d in data]'], {}), '([d[2] for d in data])\n', (1241, 1263), True, 'import numpy as np\n'), ((1489, 1505), 'json.load', 'json.load', (['dfile'], {}), '(dfile)\n', (1498, 1505), False, 'import json\n'), ((947, 962), 'json.load', 'json.load', (['file'], {}), '(file)\n', (956, 962), False, 'import json\n')] |
"""
misc_plots.py
This script contains miscellanous plotting functions.
Authors: <NAME>
Date: January, 2021
Note: this code uses python 3.7.
"""
import logging
import numpy as np
import pandas as pd
from util import logger_util, gen_util, plot_util
from sess_util import sess_plot_util
from plot_fcts import plot_helper_fcts, seq_plots
logger = logging.getLogger(__name__)
TAB = " "
#############################################
def plot_decoder_data(scores_df, analyspar, sesspar, permpar, figpar,
title=None):
"""
plot_decoder_data(scores_df, analyspar, sesspar, permpar, figpar)
Plots Gabor decoding scores across sessions.
Required args:
- scores_dfs (pd.DataFrame):
dataframe with logistic regression score statistics, shuffled score
confidence intervals, and test set p-values for each
line/plane/session, in addition to the basic sess_df columns
- analyspar (AnalysPar):
named tuple containing analysis parameters
- sesspar (SessPar):
named tuple containing session parameters
- permpar (PermPar):
named tuple containing permutation parameters
- figpar (dict):
dictionary containing the following figure parameter dictionaries
["init"] (dict): dictionary with figure initialization parameters
["save"] (dict): dictionary with figure saving parameters
["dirs"] (dict): dictionary with additional figure parameters
Returns:
- ax (2D array):
array of subplots
"""
score_col = "test_balanced_accuracy"
# add expected columns, and convert to percentage
scores_df = scores_df.copy(deep=True)
stat = scores_df[f"{score_col}_stat"].to_numpy().reshape(-1, 1) * 100
error = np.asarray(scores_df[f"{score_col}_err"].tolist()) * 100
error = error.reshape(len(error), -1)
scores_df[score_col] = np.concatenate([stat, error], axis=1).tolist()
null_CI_perc = np.asarray(scores_df[f"{score_col}_null_CIs"].tolist()) * 100
scores_df["null_CIs"] = null_CI_perc.tolist()
scores_df["p_vals"] = scores_df[f"{score_col}_p_vals"]
ax = seq_plots.plot_sess_data(
scores_df,
analyspar=analyspar,
sesspar=sesspar,
permpar=permpar,
figpar=figpar,
title=title,
wide=True,
between_sess_sig=False,
data_col=score_col,
decoder_data=True,
)
return ax
#############################################
def plot_snr_sigmeans_nrois(data_df, figpar, datatype="snrs", title="ROI SNRs"):
"""
plot_snr_sigmeans_nrois(data_df, figpar)
Plots SNR, signal means or number of ROIs, depending on the case.
Required args:
- data_df (pd.DataFrame):
dataframe with SNR, signal mean or number of ROIs data for each
session, in addition to the basic sess_df columns
- figpar (dict):
dictionary containing the following figure parameter dictionaries
["init"] (dict): dictionary with figure initialization parameters
["save"] (dict): dictionary with figure saving parameters
["dirs"] (dict): dictionary with additional figure parameters
Optional args:
- datatype (str):
type of data to plot, also corresponding to column name
default: "snrs"
- title (str):
plot title
default: "ROI SNRs"
Returns:
- ax (2D array):
array of subplots
"""
sess_ns = np.arange(data_df.sess_ns.min(), data_df.sess_ns.max() + 1)
figpar = sess_plot_util.fig_init_linpla(figpar, kind="reg")
figpar["init"]["sharey"] = "row"
figpar["init"]["subplot_hei"] = 4.4
figpar["init"]["gs"] = {"wspace": 0.2, "hspace": 0.2}
if datatype != "nrois":
figpar["init"]["subplot_wid"] = 3.2
else:
figpar["init"]["subplot_wid"] = 2.5
fig, ax = plot_util.init_fig(4, **figpar["init"])
if title is not None:
fig.suptitle(title, y=0.97, weight="bold")
for (line, plane), lp_df in data_df.groupby(["lines", "planes"]):
li, pl, col, dash = plot_helper_fcts.get_line_plane_idxs(line, plane)
sub_ax = ax[pl, li]
if datatype == "snrs":
sub_ax.axhline(y=1, ls=plot_helper_fcts.HDASH, c="k", lw=3.0,
alpha=0.5)
elif datatype == "signal_means":
sub_ax.axhline(y=0, ls=plot_helper_fcts.HDASH, c="k", lw=3.0,
alpha=0.5)
elif datatype != "nrois":
gen_util.accepted_values_error(
"datatype", datatype, ["snrs", "signal_means", "nrois"]
)
if datatype == "nrois":
plot_nrois(sub_ax, lp_df, sess_ns=sess_ns, col=col, dash=dash)
continue
data = []
use_sess_ns = []
for sess_n in sess_ns:
rows = lp_df.loc[lp_df["sess_ns"] == sess_n]
if len(rows) > 0:
use_sess_ns.append(sess_n)
data.append(np.concatenate(rows[datatype].tolist()))
sub_ax.boxplot(
data, positions=use_sess_ns, notch=True, patch_artist=True,
whis=[5, 95], widths=0.6,
boxprops=dict(facecolor="white", color=col, linewidth=3.0),
capprops=dict(color=col, linewidth=3.0),
whiskerprops=dict(color=col, linewidth=3.0),
flierprops=dict(color=col, markeredgecolor=col, markersize=8),
medianprops=dict(color=col, linewidth=3.0)
)
sess_plot_util.format_linpla_subaxes(ax, datatype="roi",
ylab="", xticks=sess_ns, kind="reg", single_lab=True)
return ax
#############################################
def plot_nrois(sub_ax, sess_df, sess_ns=None, col="k", dash=None):
"""
plot_nrois(sub_ax, sess_df)
Plots number of ROIs per mice, across sessions.
Required args:
- sub_ax (plt subplot):
subplot
- sess_df (pd.DataFrame)
dataframe with the basic sess_df columns
Optional args:
- sess_ns (array-like):
session numbers to use as x values. Inferred if None.
default: None
- col (str):
plotting color
default: "k"
- dash (str or tuple):
dash style
default: None
"""
if sess_ns is None:
sess_ns = np.arange(sess_df.sess_ns.min(), sess_df.sess_ns.max() + 1)
unique_mice = sorted(sess_df["mouse_ns"].unique())
mouse_cols = plot_util.get_hex_color_range(
len(unique_mice), col=col, interval=plot_helper_fcts.MOUSE_COL_INTERVAL
)
for mouse, mouse_col in zip(unique_mice, mouse_cols):
nrois = []
for sess_n in sess_ns:
row = sess_df.loc[
(sess_df["mouse_ns"] == mouse) &
(sess_df["sess_ns"] == sess_n)]
if len(row):
if len(row) > 1:
raise RuntimeError("No more than one match expected.")
else:
nrois.append(row["nrois"].values[0])
else:
nrois.append(np.nan)
plot_util.plot_errorbars(
sub_ax, nrois, x=sess_ns, color=mouse_col, alpha=0.6, ls=dash, lw=5,
mew=5, markersize=8, xticks="auto"
)
#############################################
def set_symlog_scale(ax, log_base=2, col_per_grp=3, n_ticks=4):
"""
set_symlog_scale(ax)
Converts y axis to symmetrical log scale (log scale with a linear range
to 0), and updates ticks.
Required args:
- ax (2D array):
array of subplots
Optional args:
- log_base (int):
log base for the log scale
default: 2
- col_per_grp (int):
number of columns in subplot axis groups that share y axis markers
default: 3
- n_ticks (int):
number of log ticks
default: 4
"""
for i in range(ax.shape[0]):
for j in range(ax.shape[1]):
sub_ax = ax[i, j]
if i == 0 and j == 0:
# adjust to avoid bins going to negative infinity
base_lin = sub_ax.get_ylim()[0]
sub_ax.set_yscale(
"symlog", base=log_base, linthresh=base_lin, linscale=0.5
)
yticks = sub_ax.get_yticks()
n_ticks = 4
n = len(yticks) // n_ticks
yticks = [
ytick for y, ytick in enumerate(yticks) if not(y % n)
]
sub_ax.set_yticks(yticks)
if not (j % col_per_grp):
# use minor ticks to create a break in the axis between the
# linear and log ranges
yticks = sub_ax.get_yticks()
low = yticks[0] + (base_lin - yticks[0]) * 0.55
high = base_lin
sub_ax.set_yticks([low, high], minor=True)
sub_ax.set_yticklabels(["", ""], minor=True)
sub_ax.tick_params(
axis="y", direction="inout", which="minor", length=16,
width=3
)
if i == 1:
xticks = sub_ax.get_xticks()
xticks = [int(t) if int(t) == t else t for t in xticks]
sub_ax.set_xticks(xticks)
sub_ax.set_xticklabels(xticks, fontweight="bold")
#############################################
def plot_roi_correlations(corr_df, figpar, title=None, log_scale=True):
"""
plot_roi_correlations(corr_df, figpar)
Plots correlation histograms.
Required args:
- corr_df (pd.DataFrame):
dataframe with one row per session/line/plane, and the
following columns, in addition to the basic sess_df columns:
- bin_edges (list): first and last bin edge
- corrs_binned (list): number of correlation values per bin
- figpar (dict):
dictionary containing the following figure parameter dictionaries
["init"] (dict): dictionary with figure initialization parameters
["save"] (dict): dictionary with figure saving parameters
["dirs"] (dict): dictionary with additional figure parameters
Optional args:
- title (str):
plot title
default: None
- log_scale (bool):
if True, a near logarithmic scale is used for the y axis (with a
linear range to reach 0, and break marks to mark the transition
from linear to log range)
default: True
Returns:
- ax (2D array):
array of subplots
"""
sess_ns = np.arange(corr_df.sess_ns.min(), corr_df.sess_ns.max() + 1)
n_sess = len(sess_ns)
figpar = sess_plot_util.fig_init_linpla(
figpar, kind="prog", n_sub=len(sess_ns)
)
figpar["init"]["subplot_hei"] = 3.0
figpar["init"]["subplot_wid"] = 2.8
figpar["init"]["sharex"] = log_scale
if log_scale:
figpar["init"]["sharey"] = True
fig, ax = plot_util.init_fig(4 * len(sess_ns), **figpar["init"])
if title is not None:
fig.suptitle(title, y=1.02, weight="bold")
sess_plot_util.format_linpla_subaxes(ax, datatype="roi",
ylab="Density", xlab="Correlation", sess_ns=sess_ns, kind="prog",
single_lab=True)
log_base = 2
for (line, plane), lp_df in corr_df.groupby(["lines", "planes"]):
li, pl, col, _ = plot_helper_fcts.get_line_plane_idxs(line, plane)
for s, sess_n in enumerate(sess_ns):
sess_rows = lp_df.loc[lp_df["sess_ns"] == sess_n]
if len(sess_rows) == 0:
continue
elif len(sess_rows) > 1:
raise RuntimeError("Expected exactly one row.")
sess_row = sess_rows.loc[sess_rows.index[0]]
sub_ax = ax[pl, s + li * n_sess]
weights = np.asarray(sess_row["corrs_binned"])
bin_edges = np.linspace(*sess_row["bin_edges"], len(weights) + 1)
sub_ax.hist(
bin_edges[:-1], bin_edges, weights=weights, color=col,
alpha=0.6, density=True
)
sub_ax.axvline(
0, ls=plot_helper_fcts.VDASH, c="k", lw=3.0, alpha=0.5
)
sub_ax.spines["bottom"].set_visible(True)
sub_ax.tick_params(axis="x", which="both", bottom=True, top=False)
if log_scale:
sub_ax.set_yscale("log", base=log_base)
sub_ax.set_xlim(-1, 1)
else:
sub_ax.autoscale(axis="x", tight=True)
sub_ax.autoscale(axis="y", tight=True)
if log_scale: # update x ticks
set_symlog_scale(ax, log_base=log_base, col_per_grp=n_sess, n_ticks=4)
else: # update x and y ticks
for i in range(ax.shape[0]):
for j in range(int(ax.shape[1] / n_sess)):
sub_axes = ax[i, j * n_sess : (j + 1) * n_sess]
plot_util.set_interm_ticks(
sub_axes, 4, axis="y", share=True, update_ticks=True
)
plot_util.set_interm_ticks(
ax, 4, axis="x", share=log_scale, update_ticks=True, fontweight="bold"
)
return ax
| [
"util.plot_util.plot_errorbars",
"numpy.concatenate",
"numpy.asarray",
"plot_fcts.plot_helper_fcts.get_line_plane_idxs",
"util.gen_util.accepted_values_error",
"sess_util.sess_plot_util.fig_init_linpla",
"util.plot_util.init_fig",
"sess_util.sess_plot_util.format_linpla_subaxes",
"util.plot_util.set... | [((353, 380), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (370, 380), False, 'import logging\n'), ((2230, 2430), 'plot_fcts.seq_plots.plot_sess_data', 'seq_plots.plot_sess_data', (['scores_df'], {'analyspar': 'analyspar', 'sesspar': 'sesspar', 'permpar': 'permpar', 'figpar': 'figpar', 'title': 'title', 'wide': '(True)', 'between_sess_sig': '(False)', 'data_col': 'score_col', 'decoder_data': '(True)'}), '(scores_df, analyspar=analyspar, sesspar=sesspar,\n permpar=permpar, figpar=figpar, title=title, wide=True,\n between_sess_sig=False, data_col=score_col, decoder_data=True)\n', (2254, 2430), False, 'from plot_fcts import plot_helper_fcts, seq_plots\n'), ((3694, 3744), 'sess_util.sess_plot_util.fig_init_linpla', 'sess_plot_util.fig_init_linpla', (['figpar'], {'kind': '"""reg"""'}), "(figpar, kind='reg')\n", (3724, 3744), False, 'from sess_util import sess_plot_util\n'), ((4042, 4081), 'util.plot_util.init_fig', 'plot_util.init_fig', (['(4)'], {}), "(4, **figpar['init'])\n", (4060, 4081), False, 'from util import logger_util, gen_util, plot_util\n'), ((5661, 5776), 'sess_util.sess_plot_util.format_linpla_subaxes', 'sess_plot_util.format_linpla_subaxes', (['ax'], {'datatype': '"""roi"""', 'ylab': '""""""', 'xticks': 'sess_ns', 'kind': '"""reg"""', 'single_lab': '(True)'}), "(ax, datatype='roi', ylab='', xticks=\n sess_ns, kind='reg', single_lab=True)\n", (5697, 5776), False, 'from sess_util import sess_plot_util\n'), ((11446, 11589), 'sess_util.sess_plot_util.format_linpla_subaxes', 'sess_plot_util.format_linpla_subaxes', (['ax'], {'datatype': '"""roi"""', 'ylab': '"""Density"""', 'xlab': '"""Correlation"""', 'sess_ns': 'sess_ns', 'kind': '"""prog"""', 'single_lab': '(True)'}), "(ax, datatype='roi', ylab='Density',\n xlab='Correlation', sess_ns=sess_ns, kind='prog', single_lab=True)\n", (11482, 11589), False, 'from sess_util import sess_plot_util\n'), ((13423, 13526), 'util.plot_util.set_interm_ticks', 'plot_util.set_interm_ticks', (['ax', '(4)'], {'axis': '"""x"""', 'share': 'log_scale', 'update_ticks': '(True)', 'fontweight': '"""bold"""'}), "(ax, 4, axis='x', share=log_scale, update_ticks=\n True, fontweight='bold')\n", (13449, 13526), False, 'from util import logger_util, gen_util, plot_util\n'), ((4258, 4307), 'plot_fcts.plot_helper_fcts.get_line_plane_idxs', 'plot_helper_fcts.get_line_plane_idxs', (['line', 'plane'], {}), '(line, plane)\n', (4294, 4307), False, 'from plot_fcts import plot_helper_fcts, seq_plots\n'), ((7303, 7437), 'util.plot_util.plot_errorbars', 'plot_util.plot_errorbars', (['sub_ax', 'nrois'], {'x': 'sess_ns', 'color': 'mouse_col', 'alpha': '(0.6)', 'ls': 'dash', 'lw': '(5)', 'mew': '(5)', 'markersize': '(8)', 'xticks': '"""auto"""'}), "(sub_ax, nrois, x=sess_ns, color=mouse_col, alpha=\n 0.6, ls=dash, lw=5, mew=5, markersize=8, xticks='auto')\n", (7327, 7437), False, 'from util import logger_util, gen_util, plot_util\n'), ((11717, 11766), 'plot_fcts.plot_helper_fcts.get_line_plane_idxs', 'plot_helper_fcts.get_line_plane_idxs', (['line', 'plane'], {}), '(line, plane)\n', (11753, 11766), False, 'from plot_fcts import plot_helper_fcts, seq_plots\n'), ((1978, 2015), 'numpy.concatenate', 'np.concatenate', (['[stat, error]'], {'axis': '(1)'}), '([stat, error], axis=1)\n', (1992, 2015), True, 'import numpy as np\n'), ((12163, 12199), 'numpy.asarray', 'np.asarray', (["sess_row['corrs_binned']"], {}), "(sess_row['corrs_binned'])\n", (12173, 12199), True, 'import numpy as np\n'), ((13299, 13384), 'util.plot_util.set_interm_ticks', 'plot_util.set_interm_ticks', (['sub_axes', '(4)'], {'axis': '"""y"""', 'share': '(True)', 'update_ticks': '(True)'}), "(sub_axes, 4, axis='y', share=True, update_ticks=True\n )\n", (13325, 13384), False, 'from util import logger_util, gen_util, plot_util\n'), ((4664, 4755), 'util.gen_util.accepted_values_error', 'gen_util.accepted_values_error', (['"""datatype"""', 'datatype', "['snrs', 'signal_means', 'nrois']"], {}), "('datatype', datatype, ['snrs',\n 'signal_means', 'nrois'])\n", (4694, 4755), False, 'from util import logger_util, gen_util, plot_util\n')] |
# -*- coding: utf8 -*-
# Copyright 2019 JSALT2019 Distant Supervision Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import errno
import functools
import importlib
import json
import os
import re
import six
import json
import math
import sys
import numpy as np
import torch
import torch.nn as nn
import logging
import editdistance
log_msg_cache = set()
class Reshape(nn.Module):
def __init__(self, *shape):
super(Reshape, self).__init__()
self.shape = shape
def forward(self, x):
return x.contiguous().view(x.size(0), *self.shape)
class Permute(nn.Module):
def __init__(self, *dims):
super(Permute, self).__init__()
self.dims = dims
def forward(self, x):
return x.permute(*self.dims).contiguous()
def is_scalar(t):
return isinstance(t, (float, int)) or (torch.is_tensor(t) and len(t.size()) == 0)
def maybe_get_scalar(t):
if isinstance(t, (float, int)):
return True, t
if torch.is_tensor(t) and len(t.size()) == 0:
return True, t.item()
return False, None
def safe_squeeze(t, dim):
assert t.size(dim) == 1
return t.squeeze(dim)
class DebugStats:
def __init__(self, logger):
self.reset()
self.logger = logger
def reset(self):
self.acts = []
self.bw_acts = []
@staticmethod
def make_range(values):
m = min(values)
M = max(values)
r = M - m
return (m - 0.1 * r, M + 0.1 * r)
def _show(self, what):
import matplotlib.pyplot as plt
xs = [x[1] for x in what]
fig = plt.figure()
ax = fig.add_subplot(121)
ax.title.set_text("mean")
ax.scatter(x=xs, y=[x[0] for x in what])
ax.set_xlim(DebugStats.make_range(xs))
for item in ([ax.xaxis.label, ax.yaxis.label] + ax.get_xticklabels() +
ax.get_yticklabels()):
item.set_fontsize(8)
xs = [x[2] for x in what]
ax = fig.add_subplot(122)
ax.title.set_text("variance")
ax.scatter(x=xs, y=[x[0] for x in what])
ax.get_yaxis().set_visible(False)
ax.set_xscale("symlog")
ax.set_xlim(DebugStats.make_range(xs))
for item in ([ax.xaxis.label, ax.yaxis.label] + ax.get_xticklabels() +
ax.get_yticklabels()):
item.set_fontsize(8)
return fig
def show(self):
if not self.logger.is_currently_logging():
return
if len(self.acts) != 0:
self.logger.log_mpl_figure("forward stats", self._show(self.acts))
if len(self.bw_acts) != 0:
self.logger.log_mpl_figure(
"backward stats", self._show(list(reversed(self.bw_acts)))
)
def save(self, name, mod, mod_in, mod_out, bw=False):
if bw:
if isinstance(mod_out, tuple):
mod_out = mod_out[0]
m = mod_out.mean().detach().cpu().item()
v = mod_out.var().detach().cpu().item()
self.bw_acts.append([name, m, v])
else:
m = mod_out.mean().detach().cpu().item()
v = mod_out.var().detach().cpu().item()
self.acts.append([name, m, v])
conv = (nn.Conv1d, nn.Conv2d, nn.Conv3d)
convt = (nn.ConvTranspose1d, nn.ConvTranspose2d, nn.ConvTranspose3d)
convs = conv + convt
bn = (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)
ln = (nn.LayerNorm,)
inn = (nn.InstanceNorm1d, nn.InstanceNorm2d, nn.InstanceNorm3d)
norms = bn + inn + ln
layer_dbg = (nn.Linear,) + convs + norms
@staticmethod
def attach(model, logger):
dbg = DebugStats(logger)
for nm, m in model.named_modules():
if isinstance(m, DebugStats.layer_dbg):
m.register_forward_hook(lambda *x, nm=nm: dbg.save(nm, *x))
m.register_backward_hook(lambda *x, nm=nm: dbg.save(nm, *x, bw=True))
return dbg
def log(msg, extra_msg='', other_keys=tuple(), level=logging.INFO, once=False):
global log_msg_cache
if log_msg_cache is None:
log_msg_cache = set()
msg_cache_key = (msg, *other_keys)
if once and msg_cache_key in log_msg_cache:
return
logging.log(level, msg + ' ' + extra_msg)
if once:
log_msg_cache.add(msg_cache_key)
def edit_distance(x, y):
"""Returns the edit distance between sequences x and y. We are using
dynamic programming to compute the minimal number of operations needed
to transform sequence x into y."""
dp = np.zeros((len(x) + 1, len(y) + 1), dtype="int64")
for i in range(len(x) + 1):
dp[i][0] = i
for i in range(len(y) + 1):
dp[0][i] = i
for i in range(1, len(x) + 1):
for j in range(1, len(y) + 1):
dp[i][j] = min(
dp[i - 1][j] + 1, # insertion
dp[i][j - 1] + 1, # deletion
dp[i - 1][j - 1] + (0 if x[i - 1] == y[j - 1] else 1),
)
return dp[-1][-1]
def edit_distance_with_stats(x, y):
dp = np.zeros((len(x) + 1, len(y) + 1), dtype="int64")
op = np.zeros((len(x) + 1, len(y) + 1), dtype="int64")
for i in range(len(x) + 1):
dp[i][0] = i
op[i][0] = 0
for i in range(len(y) + 1):
dp[0][i] = i
op[0][i] = 1
for i in range(1, len(x) + 1):
for j in range(1, len(y) + 1):
operations = (
dp[i - 1][j] + 1, # insertion
dp[i][j - 1] + 1, # deletion
dp[i - 1][j - 1] + (0 if x[i - 1] == y[j - 1] else 1),
)
choosen_op = np.argmin(operations)
op[i][j] = choosen_op
dp[i][j] = operations[choosen_op]
i = len(x)
j = len(y)
operations = [0, 0, 0]
while i >= 0 and j >= 0:
print(i, j)
old_op = op[i][j]
ni = i if old_op == 1 else i - 1
nj = j if old_op == 0 else j - 1
if dp[i][j] > dp[ni][nj]:
operations[old_op] += 1
i = ni
j = nj
print(i, j)
return (
dp[-1][-1],
{"ins": operations[0], "del": operations[1], "sub": operations[2]},
)
"""
NOTE: the example decoded sequence for 'tee' should include e.g.
' 0 0 0 e 0 e 0 0' i.e. removing repetitions
does not remove 'ee' in decoded sequence.
"""
def remove_reps_blanks(preds):
"""remove duplicates, then blanks"""
ret = []
prev = -1
for pred in preds:
if pred == prev:
continue
prev = pred
if prev != 0:
ret.append(prev)
return ret
def greedy_ctc_decode(log_probs, log_prob_lens, return_raw=False):
# log_probs: (t x bs x nc)
preds = log_probs.argmax(-1).to("cpu").int().numpy()
bsz = log_prob_lens.size(0)
decodes = [remove_reps_blanks(preds[: log_prob_lens[i], i]) for i in range(bsz)]
if return_raw:
decodes_raw = [preds[: log_prob_lens[i], i] for i in range(bsz)]
return decodes, decodes_raw
else:
return decodes
def tensorList2list(h_):
result = []
for it in h_:
result.append(it.item())
return result
def error_rate(hyps, targets):
verbose = 0
assert len(hyps) == len(targets)
tot_edits = 0.0
tot_len = 0.0
idx = 0
for h, t in zip(hyps, targets):
distance = editdistance.distance(np.array(h), np.array(t))
if verbose > 0:
# If necessary, get 'alphabet' as argument after which you can compare strings.
# CHECK: Make sure no blanks/ class #0 in here
print("error_rate() [" + str(idx) + "] hyps: " + str(tensorList2list(h)))
print("error_rate() [" + str(idx) + "] targets: " + str(tensorList2list(t)))
print("error_rate() [" + str(idx) + "] distance: " + str(distance))
tot_edits += distance
tot_len += len(t)
idx += 1
# end for
# Compute character error rate (CER) == label error rate (LER)
cer = (tot_edits * 100.0) / tot_len
return cer
def get_class(str_or_class, default_mod=None):
if isinstance(str_or_class, six.string_types):
parts = str_or_class.split(".")
mod_name = ".".join(parts[:-1])
class_name = parts[-1]
if mod_name:
mod = importlib.import_module(mod_name)
elif default_mod is not None:
mod = importlib.import_module(default_mod)
else:
raise ValueError("Specify a module for %s" % (str_or_class,))
return getattr(mod, class_name)
else:
return str_or_class
def construct_from_kwargs(
object_or_kwargs, default_mod=None, additional_parameters=None
):
if not isinstance(object_or_kwargs, dict):
assert not additional_parameters
return object_or_kwargs
object_kwargs = dict(object_or_kwargs)
class_name = object_kwargs.pop("class_name")
klass = get_class(class_name, default_mod)
if additional_parameters:
object_kwargs.update(additional_parameters)
obj = klass(**object_kwargs)
return obj
def uniq(inlist):
"""
Behaves like UNIX uniq command - removes repeating items.
Returns list of (start, end) pairs such that list[start:end] has only
one distinct element
"""
if inlist == []:
return []
outl = []
current_start = 0
current_element = inlist[0]
for i, elem in enumerate(inlist[1:], start=1):
if current_element != elem:
outl.append((current_start, i))
current_start = i
current_element = elem
outl.append((current_start, i + 1))
return outl
def ensure_dir(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def conv_weights_xavier_init(m):
classname = m.__class__.__name__
if classname.find('Conv') == -1 and classname.find('Linear') == -1:
return
if hasattr(m, 'weight') and m.weight.requires_grad:
try:
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
m.bias.data.fill_(0)
except AttributeError:
print("Skipping initialization of ", classname)
def get_mask1d(lengths, mask_length=None, batch_first=True):
"""Get mask that is 1 for sequences shorter than lengths and 0 otherwise.
The mask is on the device of lengths.
"""
if mask_length is None:
mask_length = lengths.max()
lengths = lengths.long()
arange = torch.arange(mask_length, device=lengths.device)
if batch_first:
mask = arange < lengths[:, None]
else:
mask = arange[:, None] < lengths
return mask.float()
def get_mask2d(lengths, shape_as, mask_length=None, batch_first=True):
m = get_mask1d(lengths, mask_length, batch_first)
m = m.view(m.shape[0], 1, 1, m.shape[1])
return m.expand_as(shape_as)
def get_mlp(num_inputs, hidden_dims, activation):
net = torch.nn.Sequential()
activation = getattr(torch.nn, activation)
for i, dim in enumerate(hidden_dims):
net.add_module(f"fc{i}", torch.nn.Linear(num_inputs, dim))
net.add_module(f"act{i}", activation())
num_inputs = dim
return net, num_inputs
def extract_modify_dict(modify_config):
if modify_config is None:
return {}
# Flatten
modify_config = functools.reduce(lambda x,y: x+y, modify_config)
even_list, odd_list = modify_config[::2], modify_config[1::2]
if any(v.lower() == 'none' for v in odd_list):
raise ValueError('Specify None values as "null"')
if(len(even_list) != len(odd_list)):
raise Exception(
"Modify config list should have even number of elements")
return dict(zip(even_list, odd_list))
# https://stackoverflow.com/questions/5967500/how-to-correctly-sort-a-string-with-a-number-inside
def _atoi(text):
return int(text) if text.isdigit() else text
def natural_keys(text):
"""
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
"""
return [_atoi(c) for c in re.split(r"(\d+)", text)]
def str2bool(v):
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
def gather_nd_pt(table, k):
# equivalent to tf.gather_nd
return table.permute(2, 0, 1)[
list((torch.arange(table.permute(2, 0, 1).size(0)), *k.chunk(2, 1)))
]
def compute_nmi(cluster_assignments, class_assignments):
"""Computes the Normalized Mutual Information between cluster and class assignments.
Compare to https://nlp.stanford.edu/IR-book/html/htmledition/evaluation-of-clustering-1.html
Args:
cluster_assignments (list): List of cluster assignments for every point.
class_assignments (list): List of class assignments for every point.
Returns:
float: The NMI value.
"""
assert len(cluster_assignments) == len(
class_assignments
), "The inputs have to be of the same length."
clusters = np.unique(cluster_assignments)
classes = np.unique(class_assignments)
num_samples = len(cluster_assignments)
num_clusters = len(clusters)
num_classes = len(classes)
assert num_classes > 1, "There should be more than one class."
cluster_class_counts = {
cluster_: {class_: 0 for class_ in classes} for cluster_ in clusters
}
for cluster_, class_ in zip(cluster_assignments, class_assignments):
cluster_class_counts[cluster_][class_] += 1
cluster_sizes = {
cluster_: sum(list(class_dict.values()))
for cluster_, class_dict in cluster_class_counts.items()
}
class_sizes = {
class_: sum([cluster_class_counts[clus][class_] for clus in clusters])
for class_ in classes
}
I_cluster_class = H_cluster = H_class = 0
for cluster_ in clusters:
for class_ in classes:
if cluster_class_counts[cluster_][class_] == 0:
pass
else:
I_cluster_class += (
cluster_class_counts[cluster_][class_] / num_samples
) * (
np.log(
(cluster_class_counts[cluster_][class_] * num_samples)
/ (cluster_sizes[cluster_] * class_sizes[class_])
)
)
for cluster_ in clusters:
H_cluster -= (cluster_sizes[cluster_] / num_samples) * np.log(
cluster_sizes[cluster_] / num_samples
)
for class_ in classes:
H_class -= (class_sizes[class_] / num_samples) * np.log(
class_sizes[class_] / num_samples
)
NMI = (2 * I_cluster_class) / (H_cluster + H_class)
return NMI
def compute_purity(cluster_assignments, class_assignments):
"""Computes the purity between cluster and class assignments.
Compare to https://nlp.stanford.edu/IR-book/html/htmledition/evaluation-of-clustering-1.html
Args:
cluster_assignments (list): List of cluster assignments for every point.
class_assignments (list): List of class assignments for every point.
Returns:
float: The purity value.
"""
assert len(cluster_assignments) == len(class_assignments)
num_samples = len(cluster_assignments)
num_clusters = len(np.unique(cluster_assignments))
num_classes = len(np.unique(class_assignments))
cluster_class_counts = {
cluster_: {class_: 0 for class_ in np.unique(class_assignments)}
for cluster_ in np.unique(cluster_assignments)
}
for cluster_, class_ in zip(cluster_assignments, class_assignments):
cluster_class_counts[cluster_][class_] += 1
total_intersection = sum(
[
max(list(class_dict.values()))
for cluster_, class_dict in cluster_class_counts.items()
]
)
purity = total_intersection / num_samples
return purity
def ptvsd(host="0.0.0.0", port=5678):
import ptvsd
ptvsd.enable_attach(address=(host, port), redirect_output=True)
print(f"PTVSD waiting for attach at {host}:{port}")
ptvsd.wait_for_attach()
breakpoint()
def sample_truncated_normal(starts, ends, stddev):
dist = torch.distributions.Normal(
torch.tensor([0.0]), torch.tensor([math.sqrt(stddev)])
)
batch_size = len(starts)
sample = dist.sample((batch_size,)).squeeze()
middle = starts.float() + (ends - starts).float() / 2.0
untruncated_sampled_positions = (torch.round(middle + sample)).long()
return torch.min(torch.max(starts, untruncated_sampled_positions), ends)
def reverse_sequences(mini_batch, seq_lengths):
reversed_mini_batch = torch.zeros_like(mini_batch)
for b in range(mini_batch.size(0)):
T = seq_lengths[b]
time_slice = torch.arange(T - 1, -1, -1, device=mini_batch.device)
reversed_sequence = torch.index_select(mini_batch[b, :, :], 0, time_slice)
reversed_mini_batch[b, 0:T, :] = reversed_sequence
return reversed_mini_batch
def pad_and_reverse(rnn_output, seq_lengths):
rnn_output, _ = nn.utils.rnn.pad_packed_sequence(rnn_output, batch_first=True)
reversed_output = reverse_sequences(rnn_output, seq_lengths)
return reversed_output
def get_mini_batch_mask(mini_batch, seq_lengths):
mask = torch.zeros(mini_batch.shape[0:2])
for b in range(mini_batch.shape[0]):
mask[b, 0 : seq_lengths[b]] = torch.ones(seq_lengths[b])
return mask
def calc_au(means, VAR_THRESHOLD=1e-2):
# get number of active units
z_means = torch.cat(means, dim=0)
var_z = torch.std(z_means, dim=0).pow(2)
active_units = torch.arange(0, z_means.size(1))[var_z > VAR_THRESHOLD].long()
n_active_z = len(active_units)
return n_active_z, active_units
def rleEncode(x):
"""Run length encoding of a 1D torch tensor.
Input is like [ 0 0 0 0 0 1 1 3 3 3 3]
and output a 2d tensors of pairs (class,length) like [ [ 0,5] [1,2] [3,4] ]
# TEST as : print ( distsup.utils.rleEncode( torch.tensor([ 0, 0, 0, 0, 0, 1, 1, 3, 3, 3, 3]) ))
"""
assert len(x.shape) == 1
where = np.flatnonzero
n = len(x)
starts = np.r_[0, where(~np.isclose(x[1:], x[:-1], equal_nan=True)) + 1]
lengths = np.diff(np.r_[starts, n])
values = x[starts]
result = torch.IntTensor(
[[starts[i], starts[i] + lengths[i] - 1] for i in range(len(starts))]
)
return result, values
def _make_liftering(N, Q):
return 1 + 0.5*Q*np.sin(np.pi*np.arange(N)/Q).astype(np.float32)
def _make_dct(input_dim, output_dim, inv, normalize):
from scipy.fftpack import dct, idct
if normalize:
norm = 'ortho'
else:
norm = None
if inv:
C = idct(np.eye(input_dim), type=2, norm=norm, overwrite_x=True)
else:
C = dct(np.eye(input_dim), type=2, norm=norm, overwrite_x=True)
return C[:,:output_dim].astype(np.float32)
class FBANK2MFCC(nn.Module):
def __init__(self, input_dim, keep_energy=True):
super(FBANK2MFCC, self).__init__()
input_dim = input_dim - 1
self.lift = nn.Parameter(torch.from_numpy(_make_liftering(input_dim, input_dim-1)), requires_grad=False)
self.dct = nn.Parameter(torch.from_numpy(_make_dct(input_dim, input_dim, inv=False, normalize=True)), requires_grad=False)
self.keep_energy = keep_energy
def forward(self, x):
x = x.permute(0, 1, 3, 2)
x[:, :, :, 1:] = nn.functional.linear(x[:, :, :, 1:], self.dct) * self.lift
if not self.keep_energy:
x = x[:, :, :, 1:]
return x[:, :, :1, :].permute(0, 1, 3, 2)
# https://github.com/allenai/allennlp/blob/30c4271f7f04babb1cb546ab017a104bda011e7c/allennlp/nn/util.py#L376
def masked_flip(padded_sequence, sequence_lengths):
"""
Flips a padded tensor along the time dimension without affecting masked entries.
Parameters
----------
padded_sequence : ``torch.Tensor``
The tensor to flip along the time dimension.
Assumed to be of dimensions (batch size, num timesteps, ...)
sequence_lengths : ``torch.Tensor``
A list containing the lengths of each unpadded sequence in the batch.
Returns
-------
A ``torch.Tensor`` of the same shape as padded_sequence.
"""
assert padded_sequence.size(0) == len(sequence_lengths), \
f'sequence_lengths length ${len(sequence_lengths)} does not match batch size ${padded_sequence.size(0)}'
num_timesteps = padded_sequence.size(1)
flipped_padded_sequence = torch.flip(padded_sequence, [1])
sequences = [flipped_padded_sequence[i, num_timesteps - length:] for i, length in enumerate(sequence_lengths)]
return torch.nn.utils.rnn.pad_sequence(sequences, batch_first=True)
def corrupt(x, p):
'''Zeroes values with probability p'''
ber = torch.distributions.bernoulli.Bernoulli(
torch.tensor([1.0 - p], device=x.device))
mask = safe_squeeze(ber.sample(sample_shape=x.size()), -1)
# mask = torch.empty_like(x).uniform_() > self.gap_corruption
return x * mask
| [
"ptvsd.enable_attach",
"torch.cat",
"numpy.argmin",
"matplotlib.pyplot.figure",
"torch.std",
"numpy.isclose",
"torch.arange",
"torch.nn.utils.rnn.pad_packed_sequence",
"numpy.arange",
"numpy.unique",
"argparse.ArgumentTypeError",
"torch.ones",
"logging.log",
"torch.nn.Linear",
"torch.zer... | [((4872, 4913), 'logging.log', 'logging.log', (['level', "(msg + ' ' + extra_msg)"], {}), "(level, msg + ' ' + extra_msg)\n", (4883, 4913), False, 'import logging\n'), ((11144, 11192), 'torch.arange', 'torch.arange', (['mask_length'], {'device': 'lengths.device'}), '(mask_length, device=lengths.device)\n', (11156, 11192), False, 'import torch\n'), ((11596, 11617), 'torch.nn.Sequential', 'torch.nn.Sequential', ([], {}), '()\n', (11615, 11617), False, 'import torch\n'), ((11999, 12050), 'functools.reduce', 'functools.reduce', (['(lambda x, y: x + y)', 'modify_config'], {}), '(lambda x, y: x + y, modify_config)\n', (12015, 12050), False, 'import functools\n'), ((13859, 13889), 'numpy.unique', 'np.unique', (['cluster_assignments'], {}), '(cluster_assignments)\n', (13868, 13889), True, 'import numpy as np\n'), ((13904, 13932), 'numpy.unique', 'np.unique', (['class_assignments'], {}), '(class_assignments)\n', (13913, 13932), True, 'import numpy as np\n'), ((16819, 16882), 'ptvsd.enable_attach', 'ptvsd.enable_attach', ([], {'address': '(host, port)', 'redirect_output': '(True)'}), '(address=(host, port), redirect_output=True)\n', (16838, 16882), False, 'import ptvsd\n'), ((16943, 16966), 'ptvsd.wait_for_attach', 'ptvsd.wait_for_attach', ([], {}), '()\n', (16964, 16966), False, 'import ptvsd\n'), ((17511, 17539), 'torch.zeros_like', 'torch.zeros_like', (['mini_batch'], {}), '(mini_batch)\n', (17527, 17539), False, 'import torch\n'), ((17923, 17985), 'torch.nn.utils.rnn.pad_packed_sequence', 'nn.utils.rnn.pad_packed_sequence', (['rnn_output'], {'batch_first': '(True)'}), '(rnn_output, batch_first=True)\n', (17955, 17985), True, 'import torch.nn as nn\n'), ((18141, 18175), 'torch.zeros', 'torch.zeros', (['mini_batch.shape[0:2]'], {}), '(mini_batch.shape[0:2])\n', (18152, 18175), False, 'import torch\n'), ((18387, 18410), 'torch.cat', 'torch.cat', (['means'], {'dim': '(0)'}), '(means, dim=0)\n', (18396, 18410), False, 'import torch\n'), ((19075, 19100), 'numpy.diff', 'np.diff', (['np.r_[starts, n]'], {}), '(np.r_[starts, n])\n', (19082, 19100), True, 'import numpy as np\n'), ((21402, 21434), 'torch.flip', 'torch.flip', (['padded_sequence', '[1]'], {}), '(padded_sequence, [1])\n', (21412, 21434), False, 'import torch\n'), ((21561, 21621), 'torch.nn.utils.rnn.pad_sequence', 'torch.nn.utils.rnn.pad_sequence', (['sequences'], {'batch_first': '(True)'}), '(sequences, batch_first=True)\n', (21592, 21621), False, 'import torch\n'), ((1621, 1639), 'torch.is_tensor', 'torch.is_tensor', (['t'], {}), '(t)\n', (1636, 1639), False, 'import torch\n'), ((2244, 2256), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2254, 2256), True, 'import matplotlib.pyplot as plt\n'), ((10295, 10312), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (10306, 10312), False, 'import os\n'), ((16148, 16178), 'numpy.unique', 'np.unique', (['cluster_assignments'], {}), '(cluster_assignments)\n', (16157, 16178), True, 'import numpy as np\n'), ((16202, 16230), 'numpy.unique', 'np.unique', (['class_assignments'], {}), '(class_assignments)\n', (16211, 16230), True, 'import numpy as np\n'), ((17084, 17103), 'torch.tensor', 'torch.tensor', (['[0.0]'], {}), '([0.0])\n', (17096, 17103), False, 'import torch\n'), ((17379, 17427), 'torch.max', 'torch.max', (['starts', 'untruncated_sampled_positions'], {}), '(starts, untruncated_sampled_positions)\n', (17388, 17427), False, 'import torch\n'), ((17628, 17681), 'torch.arange', 'torch.arange', (['(T - 1)', '(-1)', '(-1)'], {'device': 'mini_batch.device'}), '(T - 1, -1, -1, device=mini_batch.device)\n', (17640, 17681), False, 'import torch\n'), ((17710, 17764), 'torch.index_select', 'torch.index_select', (['mini_batch[b, :, :]', '(0)', 'time_slice'], {}), '(mini_batch[b, :, :], 0, time_slice)\n', (17728, 17764), False, 'import torch\n'), ((18255, 18281), 'torch.ones', 'torch.ones', (['seq_lengths[b]'], {}), '(seq_lengths[b])\n', (18265, 18281), False, 'import torch\n'), ((21745, 21785), 'torch.tensor', 'torch.tensor', (['[1.0 - p]'], {'device': 'x.device'}), '([1.0 - p], device=x.device)\n', (21757, 21785), False, 'import torch\n'), ((1485, 1503), 'torch.is_tensor', 'torch.is_tensor', (['t'], {}), '(t)\n', (1500, 1503), False, 'import torch\n'), ((6258, 6279), 'numpy.argmin', 'np.argmin', (['operations'], {}), '(operations)\n', (6267, 6279), True, 'import numpy as np\n'), ((7994, 8005), 'numpy.array', 'np.array', (['h'], {}), '(h)\n', (8002, 8005), True, 'import numpy as np\n'), ((8007, 8018), 'numpy.array', 'np.array', (['t'], {}), '(t)\n', (8015, 8018), True, 'import numpy as np\n'), ((8916, 8949), 'importlib.import_module', 'importlib.import_module', (['mod_name'], {}), '(mod_name)\n', (8939, 8949), False, 'import importlib\n'), ((10649, 10682), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['m.weight'], {}), '(m.weight)\n', (10672, 10682), True, 'import torch.nn as nn\n'), ((11740, 11772), 'torch.nn.Linear', 'torch.nn.Linear', (['num_inputs', 'dim'], {}), '(num_inputs, dim)\n', (11755, 11772), False, 'import torch\n'), ((12806, 12830), 're.split', 're.split', (['"""(\\\\d+)"""', 'text'], {}), "('(\\\\d+)', text)\n", (12814, 12830), False, 'import re\n'), ((13022, 13075), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['"""Boolean value expected."""'], {}), "('Boolean value expected.')\n", (13048, 13075), False, 'import argparse\n'), ((15281, 15326), 'numpy.log', 'np.log', (['(cluster_sizes[cluster_] / num_samples)'], {}), '(cluster_sizes[cluster_] / num_samples)\n', (15287, 15326), True, 'import numpy as np\n'), ((15434, 15475), 'numpy.log', 'np.log', (['(class_sizes[class_] / num_samples)'], {}), '(class_sizes[class_] / num_samples)\n', (15440, 15475), True, 'import numpy as np\n'), ((16359, 16389), 'numpy.unique', 'np.unique', (['cluster_assignments'], {}), '(cluster_assignments)\n', (16368, 16389), True, 'import numpy as np\n'), ((17321, 17349), 'torch.round', 'torch.round', (['(middle + sample)'], {}), '(middle + sample)\n', (17332, 17349), False, 'import torch\n'), ((18423, 18448), 'torch.std', 'torch.std', (['z_means'], {'dim': '(0)'}), '(z_means, dim=0)\n', (18432, 18448), False, 'import torch\n'), ((19558, 19575), 'numpy.eye', 'np.eye', (['input_dim'], {}), '(input_dim)\n', (19564, 19575), True, 'import numpy as np\n'), ((19640, 19657), 'numpy.eye', 'np.eye', (['input_dim'], {}), '(input_dim)\n', (19646, 19657), True, 'import numpy as np\n'), ((20273, 20319), 'torch.nn.functional.linear', 'nn.functional.linear', (['x[:, :, :, 1:]', 'self.dct'], {}), '(x[:, :, :, 1:], self.dct)\n', (20293, 20319), True, 'import torch.nn as nn\n'), ((9006, 9042), 'importlib.import_module', 'importlib.import_module', (['default_mod'], {}), '(default_mod)\n', (9029, 9042), False, 'import importlib\n'), ((16305, 16333), 'numpy.unique', 'np.unique', (['class_assignments'], {}), '(class_assignments)\n', (16314, 16333), True, 'import numpy as np\n'), ((17119, 17136), 'math.sqrt', 'math.sqrt', (['stddev'], {}), '(stddev)\n', (17128, 17136), False, 'import math\n'), ((14986, 15101), 'numpy.log', 'np.log', (['(cluster_class_counts[cluster_][class_] * num_samples / (cluster_sizes[\n cluster_] * class_sizes[class_]))'], {}), '(cluster_class_counts[cluster_][class_] * num_samples / (\n cluster_sizes[cluster_] * class_sizes[class_]))\n', (14992, 15101), True, 'import numpy as np\n'), ((19013, 19054), 'numpy.isclose', 'np.isclose', (['x[1:]', 'x[:-1]'], {'equal_nan': '(True)'}), '(x[1:], x[:-1], equal_nan=True)\n', (19023, 19054), True, 'import numpy as np\n'), ((19327, 19339), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (19336, 19339), True, 'import numpy as np\n')] |
import numpy as np
from utils import mean_err
def variance(X):
cov = np.cov(X.T)
var = np.trace(cov)/cov.shape[0]
# var = np.mean(np.var(X, axis=0))
# var = np.linalg.det(cov)
# var = var**(1./cov.shape[0])
return var
def rdiv(X_train, X_gen):
''' Relative div '''
X_train = np.squeeze(X_train)
# train_div = np.sum(np.var(X_train, axis=0))
# gen_div = np.sum(np.var(X_gen, axis=0))
X_train = X_train.reshape((X_train.shape[0], -1))
train_div = variance(X_train)
X_gen = X_gen.reshape((X_gen.shape[0], -1))
gen_div = variance(X_gen)
# n = 100
# gen_div = train_div = 0
# for i in range(n):
# a, b = np.random.choice(X_gen.shape[0], 2, replace=False)
# gen_div += np.linalg.norm(X_gen[a] - X_gen[b])
# c, d = np.random.choice(X_train.shape[0], 2, replace=False)
# train_div += np.linalg.norm(X_train[c] - X_train[d])
rdiv = gen_div/train_div
return rdiv
def ci_rdiv(n, X_train, gen_func, d=None, k=None, bounds=None):
rdivs = np.zeros(n)
for i in range(n):
if d is None or k is None or bounds is None:
X_gen = gen_func(X_train.shape[0])
else:
latent = np.random.uniform(bounds[0], bounds[1])*np.ones((X_train.shape[0], d))
latent[:, k] = np.random.uniform(bounds[0], bounds[1], size=X_train.shape[0])
X_gen = gen_func(latent)
# from shape_plot import plot_samples
# plot_samples(None, X_gen[:10], scatter=True, s=1, alpha=.7, c='k', fname='gen_%d' % k)
rdivs[i] = rdiv(X_train, X_gen)
mean, err = mean_err(rdivs)
return mean, err
| [
"numpy.random.uniform",
"numpy.trace",
"utils.mean_err",
"numpy.zeros",
"numpy.ones",
"numpy.squeeze",
"numpy.cov"
] | [((75, 86), 'numpy.cov', 'np.cov', (['X.T'], {}), '(X.T)\n', (81, 86), True, 'import numpy as np\n'), ((307, 326), 'numpy.squeeze', 'np.squeeze', (['X_train'], {}), '(X_train)\n', (317, 326), True, 'import numpy as np\n'), ((1029, 1040), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1037, 1040), True, 'import numpy as np\n'), ((1602, 1617), 'utils.mean_err', 'mean_err', (['rdivs'], {}), '(rdivs)\n', (1610, 1617), False, 'from utils import mean_err\n'), ((97, 110), 'numpy.trace', 'np.trace', (['cov'], {}), '(cov)\n', (105, 110), True, 'import numpy as np\n'), ((1297, 1359), 'numpy.random.uniform', 'np.random.uniform', (['bounds[0]', 'bounds[1]'], {'size': 'X_train.shape[0]'}), '(bounds[0], bounds[1], size=X_train.shape[0])\n', (1314, 1359), True, 'import numpy as np\n'), ((1199, 1238), 'numpy.random.uniform', 'np.random.uniform', (['bounds[0]', 'bounds[1]'], {}), '(bounds[0], bounds[1])\n', (1216, 1238), True, 'import numpy as np\n'), ((1239, 1269), 'numpy.ones', 'np.ones', (['(X_train.shape[0], d)'], {}), '((X_train.shape[0], d))\n', (1246, 1269), True, 'import numpy as np\n')] |
import cv2
import numpy as np
from preprocess_inputs import pose_estimation, text_detection, car_meta
# Image locations
POSE_IMAGE = cv2.imread("images/sitting-on-car.jpg")
TEXT_IMAGE = cv2.imread("images/sign.jpg")
CAR_IMAGE = cv2.imread("images/blue-car.jpg")
# Test names
test_names = ["Pose Estimation", "Text Detection", "Car Meta"]
# Hold solution functions
global solution_funcs
def test_pose():
comparison = test(pose_estimation, test_names[0], POSE_IMAGE)
return comparison
def test_text():
comparison = test(text_detection, test_names[1], TEXT_IMAGE)
return comparison
def test_car():
comparison = test(car_meta, test_names[2], CAR_IMAGE)
return comparison
def test(test_func, test_name, test_image):
# Try the student's code first
try:
student_processed = test_func(test_image)
except:
print_exception(test_name)
return
# Run the solution code and compare to student example
solution = solution_funcs[test_name](test_image)
comparison = np.array_equal(student_processed, solution)
print_test_result(test_name, comparison)
return comparison
def print_exception(test_name):
print("Failed to run test on {}.".format(test_name))
print("The code should be valid Python and return the preprocessed image.")
def print_test_result(test_name, result):
if result:
print("Passed {} test.".format(test_name))
else:
print("Failed {} test, did not obtain expected preprocessed image."
.format(test_name))
def feedback(tests_passed):
print("You passed {} of 3 tests.".format(int(tests_passed)))
if tests_passed == 3:
print("Congratulations!")
else:
print("See above for additional feedback.")
def set_solution_functions():
global solution_funcs
solution_funcs = {test_names[0]: pose_solution,
test_names[1]: text_solution,
test_names[2]: car_solution}
def preprocessing(input_image, height, width):
image = cv2.resize(input_image, (width, height))
image = image.transpose((2,0,1))
image = image.reshape(1, 3, height, width)
return image
def pose_solution(input_image):
return preprocessing(input_image, 256, 456)
def text_solution(input_image):
return preprocessing(input_image, 768, 1280)
def car_solution(input_image):
return preprocessing(input_image, 72, 72)
def main():
set_solution_functions()
counter = test_pose() + test_text() + test_car()
feedback(counter)
if __name__ == "__main__":
main()
| [
"numpy.array_equal",
"cv2.imread",
"cv2.resize"
] | [((135, 174), 'cv2.imread', 'cv2.imread', (['"""images/sitting-on-car.jpg"""'], {}), "('images/sitting-on-car.jpg')\n", (145, 174), False, 'import cv2\n'), ((188, 217), 'cv2.imread', 'cv2.imread', (['"""images/sign.jpg"""'], {}), "('images/sign.jpg')\n", (198, 217), False, 'import cv2\n'), ((230, 263), 'cv2.imread', 'cv2.imread', (['"""images/blue-car.jpg"""'], {}), "('images/blue-car.jpg')\n", (240, 263), False, 'import cv2\n'), ((1031, 1074), 'numpy.array_equal', 'np.array_equal', (['student_processed', 'solution'], {}), '(student_processed, solution)\n', (1045, 1074), True, 'import numpy as np\n'), ((2039, 2079), 'cv2.resize', 'cv2.resize', (['input_image', '(width, height)'], {}), '(input_image, (width, height))\n', (2049, 2079), False, 'import cv2\n')] |
import cv2
import imutils
import numpy as np
from frigate.config import MotionConfig
class MotionDetector:
def __init__(self, frame_shape, config: MotionConfig):
self.config = config
self.frame_shape = frame_shape
self.resize_factor = frame_shape[0] / config.frame_height
self.motion_frame_size = (
config.frame_height,
config.frame_height * frame_shape[1] // frame_shape[0],
)
self.avg_frame = np.zeros(self.motion_frame_size, np.float)
self.avg_delta = np.zeros(self.motion_frame_size, np.float)
self.motion_frame_count = 0
self.frame_counter = 0
resized_mask = cv2.resize(
config.mask,
dsize=(self.motion_frame_size[1], self.motion_frame_size[0]),
interpolation=cv2.INTER_LINEAR,
)
self.mask = np.where(resized_mask == [0])
self.save_images = False
def detect(self, frame):
motion_boxes = []
gray = frame[0 : self.frame_shape[0], 0 : self.frame_shape[1]]
# resize frame
resized_frame = cv2.resize(
gray,
dsize=(self.motion_frame_size[1], self.motion_frame_size[0]),
interpolation=cv2.INTER_LINEAR,
)
# Improve contrast
if self.config.improve_contrast:
minval = np.percentile(resized_frame, 4)
maxval = np.percentile(resized_frame, 96)
# don't adjust if the image is a single color
if minval < maxval:
resized_frame = np.clip(resized_frame, minval, maxval)
resized_frame = (
((resized_frame - minval) / (maxval - minval)) * 255
).astype(np.uint8)
# mask frame
resized_frame[self.mask] = [255]
# it takes ~30 frames to establish a baseline
# dont bother looking for motion
if self.frame_counter < 30:
self.frame_counter += 1
else:
if self.save_images:
self.frame_counter += 1
# compare to average
frameDelta = cv2.absdiff(resized_frame, cv2.convertScaleAbs(self.avg_frame))
# compute the average delta over the past few frames
# higher values mean the current frame impacts the delta a lot, and a single raindrop may
# register as motion, too low and a fast moving person wont be detected as motion
cv2.accumulateWeighted(frameDelta, self.avg_delta, self.config.delta_alpha)
# compute the threshold image for the current frame
current_thresh = cv2.threshold(
frameDelta, self.config.threshold, 255, cv2.THRESH_BINARY
)[1]
# black out everything in the avg_delta where there isnt motion in the current frame
avg_delta_image = cv2.convertScaleAbs(self.avg_delta)
avg_delta_image = cv2.bitwise_and(avg_delta_image, current_thresh)
# then look for deltas above the threshold, but only in areas where there is a delta
# in the current frame. this prevents deltas from previous frames from being included
thresh = cv2.threshold(
avg_delta_image, self.config.threshold, 255, cv2.THRESH_BINARY
)[1]
# dilate the thresholded image to fill in holes, then find contours
# on thresholded image
thresh_dilated = cv2.dilate(thresh, None, iterations=2)
cnts = cv2.findContours(
thresh_dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
)
cnts = imutils.grab_contours(cnts)
# loop over the contours
for c in cnts:
# if the contour is big enough, count it as motion
contour_area = cv2.contourArea(c)
if contour_area > self.config.contour_area:
x, y, w, h = cv2.boundingRect(c)
motion_boxes.append(
(
int(x * self.resize_factor),
int(y * self.resize_factor),
int((x + w) * self.resize_factor),
int((y + h) * self.resize_factor),
)
)
if self.save_images:
thresh_dilated = cv2.cvtColor(thresh_dilated, cv2.COLOR_GRAY2BGR)
# print("--------")
# print(self.frame_counter)
for c in cnts:
contour_area = cv2.contourArea(c)
# print(contour_area)
if contour_area > self.config.contour_area:
x, y, w, h = cv2.boundingRect(c)
cv2.rectangle(
thresh_dilated,
(x, y),
(x + w, y + h),
(0, 0, 255),
2,
)
# print("--------")
image_row_1 = cv2.hconcat(
[
cv2.cvtColor(frameDelta, cv2.COLOR_GRAY2BGR),
cv2.cvtColor(avg_delta_image, cv2.COLOR_GRAY2BGR),
]
)
image_row_2 = cv2.hconcat(
[cv2.cvtColor(thresh, cv2.COLOR_GRAY2BGR), thresh_dilated]
)
combined_image = cv2.vconcat([image_row_1, image_row_2])
cv2.imwrite(f"motion/motion-{self.frame_counter}.jpg", combined_image)
if len(motion_boxes) > 0:
self.motion_frame_count += 1
if self.motion_frame_count >= 10:
# only average in the current frame if the difference persists for a bit
cv2.accumulateWeighted(
resized_frame, self.avg_frame, self.config.frame_alpha
)
else:
# when no motion, just keep averaging the frames together
cv2.accumulateWeighted(
resized_frame, self.avg_frame, self.config.frame_alpha
)
self.motion_frame_count = 0
return motion_boxes
| [
"cv2.findContours",
"cv2.contourArea",
"cv2.bitwise_and",
"cv2.dilate",
"cv2.cvtColor",
"cv2.accumulateWeighted",
"cv2.threshold",
"cv2.vconcat",
"numpy.zeros",
"cv2.imwrite",
"numpy.clip",
"numpy.percentile",
"cv2.rectangle",
"numpy.where",
"cv2.convertScaleAbs",
"imutils.grab_contour... | [((473, 515), 'numpy.zeros', 'np.zeros', (['self.motion_frame_size', 'np.float'], {}), '(self.motion_frame_size, np.float)\n', (481, 515), True, 'import numpy as np\n'), ((541, 583), 'numpy.zeros', 'np.zeros', (['self.motion_frame_size', 'np.float'], {}), '(self.motion_frame_size, np.float)\n', (549, 583), True, 'import numpy as np\n'), ((674, 796), 'cv2.resize', 'cv2.resize', (['config.mask'], {'dsize': '(self.motion_frame_size[1], self.motion_frame_size[0])', 'interpolation': 'cv2.INTER_LINEAR'}), '(config.mask, dsize=(self.motion_frame_size[1], self.\n motion_frame_size[0]), interpolation=cv2.INTER_LINEAR)\n', (684, 796), False, 'import cv2\n'), ((859, 888), 'numpy.where', 'np.where', (['(resized_mask == [0])'], {}), '(resized_mask == [0])\n', (867, 888), True, 'import numpy as np\n'), ((1098, 1213), 'cv2.resize', 'cv2.resize', (['gray'], {'dsize': '(self.motion_frame_size[1], self.motion_frame_size[0])', 'interpolation': 'cv2.INTER_LINEAR'}), '(gray, dsize=(self.motion_frame_size[1], self.motion_frame_size[0\n ]), interpolation=cv2.INTER_LINEAR)\n', (1108, 1213), False, 'import cv2\n'), ((1346, 1377), 'numpy.percentile', 'np.percentile', (['resized_frame', '(4)'], {}), '(resized_frame, 4)\n', (1359, 1377), True, 'import numpy as np\n'), ((1399, 1431), 'numpy.percentile', 'np.percentile', (['resized_frame', '(96)'], {}), '(resized_frame, 96)\n', (1412, 1431), True, 'import numpy as np\n'), ((2449, 2524), 'cv2.accumulateWeighted', 'cv2.accumulateWeighted', (['frameDelta', 'self.avg_delta', 'self.config.delta_alpha'], {}), '(frameDelta, self.avg_delta, self.config.delta_alpha)\n', (2471, 2524), False, 'import cv2\n'), ((2853, 2888), 'cv2.convertScaleAbs', 'cv2.convertScaleAbs', (['self.avg_delta'], {}), '(self.avg_delta)\n', (2872, 2888), False, 'import cv2\n'), ((2919, 2967), 'cv2.bitwise_and', 'cv2.bitwise_and', (['avg_delta_image', 'current_thresh'], {}), '(avg_delta_image, current_thresh)\n', (2934, 2967), False, 'import cv2\n'), ((3441, 3479), 'cv2.dilate', 'cv2.dilate', (['thresh', 'None'], {'iterations': '(2)'}), '(thresh, None, iterations=2)\n', (3451, 3479), False, 'import cv2\n'), ((3499, 3575), 'cv2.findContours', 'cv2.findContours', (['thresh_dilated', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(thresh_dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (3515, 3575), False, 'import cv2\n'), ((3625, 3652), 'imutils.grab_contours', 'imutils.grab_contours', (['cnts'], {}), '(cnts)\n', (3646, 3652), False, 'import imutils\n'), ((6034, 6112), 'cv2.accumulateWeighted', 'cv2.accumulateWeighted', (['resized_frame', 'self.avg_frame', 'self.config.frame_alpha'], {}), '(resized_frame, self.avg_frame, self.config.frame_alpha)\n', (6056, 6112), False, 'import cv2\n'), ((1554, 1592), 'numpy.clip', 'np.clip', (['resized_frame', 'minval', 'maxval'], {}), '(resized_frame, minval, maxval)\n', (1561, 1592), True, 'import numpy as np\n'), ((2138, 2173), 'cv2.convertScaleAbs', 'cv2.convertScaleAbs', (['self.avg_frame'], {}), '(self.avg_frame)\n', (2157, 2173), False, 'import cv2\n'), ((2619, 2691), 'cv2.threshold', 'cv2.threshold', (['frameDelta', 'self.config.threshold', '(255)', 'cv2.THRESH_BINARY'], {}), '(frameDelta, self.config.threshold, 255, cv2.THRESH_BINARY)\n', (2632, 2691), False, 'import cv2\n'), ((3185, 3262), 'cv2.threshold', 'cv2.threshold', (['avg_delta_image', 'self.config.threshold', '(255)', 'cv2.THRESH_BINARY'], {}), '(avg_delta_image, self.config.threshold, 255, cv2.THRESH_BINARY)\n', (3198, 3262), False, 'import cv2\n'), ((3816, 3834), 'cv2.contourArea', 'cv2.contourArea', (['c'], {}), '(c)\n', (3831, 3834), False, 'import cv2\n'), ((4370, 4418), 'cv2.cvtColor', 'cv2.cvtColor', (['thresh_dilated', 'cv2.COLOR_GRAY2BGR'], {}), '(thresh_dilated, cv2.COLOR_GRAY2BGR)\n', (4382, 4418), False, 'import cv2\n'), ((5467, 5506), 'cv2.vconcat', 'cv2.vconcat', (['[image_row_1, image_row_2]'], {}), '([image_row_1, image_row_2])\n', (5478, 5506), False, 'import cv2\n'), ((5523, 5593), 'cv2.imwrite', 'cv2.imwrite', (['f"""motion/motion-{self.frame_counter}.jpg"""', 'combined_image'], {}), "(f'motion/motion-{self.frame_counter}.jpg', combined_image)\n", (5534, 5593), False, 'import cv2\n'), ((5821, 5899), 'cv2.accumulateWeighted', 'cv2.accumulateWeighted', (['resized_frame', 'self.avg_frame', 'self.config.frame_alpha'], {}), '(resized_frame, self.avg_frame, self.config.frame_alpha)\n', (5843, 5899), False, 'import cv2\n'), ((3928, 3947), 'cv2.boundingRect', 'cv2.boundingRect', (['c'], {}), '(c)\n', (3944, 3947), False, 'import cv2\n'), ((4565, 4583), 'cv2.contourArea', 'cv2.contourArea', (['c'], {}), '(c)\n', (4580, 4583), False, 'import cv2\n'), ((4727, 4746), 'cv2.boundingRect', 'cv2.boundingRect', (['c'], {}), '(c)\n', (4743, 4746), False, 'import cv2\n'), ((4771, 4840), 'cv2.rectangle', 'cv2.rectangle', (['thresh_dilated', '(x, y)', '(x + w, y + h)', '(0, 0, 255)', '(2)'], {}), '(thresh_dilated, (x, y), (x + w, y + h), (0, 0, 255), 2)\n', (4784, 4840), False, 'import cv2\n'), ((5133, 5177), 'cv2.cvtColor', 'cv2.cvtColor', (['frameDelta', 'cv2.COLOR_GRAY2BGR'], {}), '(frameDelta, cv2.COLOR_GRAY2BGR)\n', (5145, 5177), False, 'import cv2\n'), ((5203, 5252), 'cv2.cvtColor', 'cv2.cvtColor', (['avg_delta_image', 'cv2.COLOR_GRAY2BGR'], {}), '(avg_delta_image, cv2.COLOR_GRAY2BGR)\n', (5215, 5252), False, 'import cv2\n'), ((5358, 5398), 'cv2.cvtColor', 'cv2.cvtColor', (['thresh', 'cv2.COLOR_GRAY2BGR'], {}), '(thresh, cv2.COLOR_GRAY2BGR)\n', (5370, 5398), False, 'import cv2\n')] |
#!/usr/bin/env python
# coding=utf-8
try:
import mock
except ImportError:
import unittest.mock as mock
import numpy as np
from marvin_image_classification_engine.training import MetricsEvaluator
@mock.patch('marvin_image_classification_engine.training.metrics_evaluator.sk_metrics.accuracy_score')
@mock.patch('marvin_image_classification_engine.training.metrics_evaluator.cv2.imread')
def test_execute(mocked_imread, mocked_score, mocked_params):
test_data = {
'train': ['t0'],
'val': ['t1']
}
mocked_params = {
'TEST_STEPS': 20
}
mocked_imread.return_value = np.array([[[0, 1, 2], [1,2, 3], [2,3, 4]], [[0, 1, 2], [1,2, 3], [2,3, 4]], [[0, 1, 2], [1,2, 3], [2,3, 4]]])
mocked_model = mock.MagicMock()
ac = MetricsEvaluator(model=mocked_model, dataset=test_data)
ac.execute(params=mocked_params)
mocked_imread.assert_called_once()
mocked_score.assert_called_once()
| [
"unittest.mock.patch",
"numpy.array",
"unittest.mock.MagicMock",
"marvin_image_classification_engine.training.MetricsEvaluator"
] | [((209, 320), 'unittest.mock.patch', 'mock.patch', (['"""marvin_image_classification_engine.training.metrics_evaluator.sk_metrics.accuracy_score"""'], {}), "(\n 'marvin_image_classification_engine.training.metrics_evaluator.sk_metrics.accuracy_score'\n )\n", (219, 320), True, 'import unittest.mock as mock\n'), ((312, 403), 'unittest.mock.patch', 'mock.patch', (['"""marvin_image_classification_engine.training.metrics_evaluator.cv2.imread"""'], {}), "(\n 'marvin_image_classification_engine.training.metrics_evaluator.cv2.imread')\n", (322, 403), True, 'import unittest.mock as mock\n'), ((621, 741), 'numpy.array', 'np.array', (['[[[0, 1, 2], [1, 2, 3], [2, 3, 4]], [[0, 1, 2], [1, 2, 3], [2, 3, 4]], [[0,\n 1, 2], [1, 2, 3], [2, 3, 4]]]'], {}), '([[[0, 1, 2], [1, 2, 3], [2, 3, 4]], [[0, 1, 2], [1, 2, 3], [2, 3, \n 4]], [[0, 1, 2], [1, 2, 3], [2, 3, 4]]])\n', (629, 741), True, 'import numpy as np\n'), ((751, 767), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (765, 767), True, 'import unittest.mock as mock\n'), ((778, 833), 'marvin_image_classification_engine.training.MetricsEvaluator', 'MetricsEvaluator', ([], {'model': 'mocked_model', 'dataset': 'test_data'}), '(model=mocked_model, dataset=test_data)\n', (794, 833), False, 'from marvin_image_classification_engine.training import MetricsEvaluator\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.