code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
# Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
# See: https://spdx.org/licenses/
import unittest
import numpy as np
from lava.lib.optimization.problems.constraints import (
DiscreteConstraints,
EqualityConstraints,
InequalityConstraints,
ArithmeticConstraints,
Constraints,
)
from lava.lib.optimization.problems.coefficients import (
CoefficientTensorsMixin,
)
class TestDiscreteConstraint(unittest.TestCase):
def setUp(self) -> None:
constraints = [(0, 1, np.logical_not(np.eye(5))), (1, 2, np.eye(5, 4))]
self.relations_2d = [np.logical_not(np.eye(5)), np.eye(5, 4)]
self.dconstraint = DiscreteConstraints(constraints)
def test_create_obj(self):
self.assertIsInstance(self.dconstraint, DiscreteConstraints)
def test_var_subset(self):
self.assertEqual(self.dconstraint.var_subsets, [(0, 1), (1, 2)])
def test_var_subset_is_required(self):
with self.assertRaises(TypeError):
DiscreteConstraints()
def test_relation(self):
for n, relation in enumerate(self.relations_2d):
with self.subTest(msg=f"Test id {n}"):
self.assertTrue(
(self.dconstraint.relations[n] == relation).all()
)
def test__input_validation_relation_matches_var_subset_dimension(self):
constraints = [
(0, 1, 2, np.logical_not(np.eye(5))),
(1, 2, np.eye(5, 4)),
]
with self.assertRaises(ValueError):
DiscreteConstraints(constraints)
def test_set_constraints(self):
new_constraints = [
(1, 2, np.logical_not(np.eye(5, 4))),
(0, 1, np.eye(5)),
]
self.dconstraint.constraints = new_constraints
self.assertIs(self.dconstraint.constraints, new_constraints)
def test_setted_relation(self):
new_constraints = [
(1, 2, np.logical_not(np.eye(5, 4))),
(0, 1, np.eye(5)),
]
self.dconstraint.constraints = new_constraints
new_relations = [np.logical_not(np.eye(5, 4)), np.eye(5)]
for n, relation in enumerate(new_relations):
with self.subTest(msg=f"Test id {n}"):
self.assertTrue(
(self.dconstraint.relations[n] == relation).all()
)
def test_setted_var_subset(self):
new_constraints = [
(1, 2, np.logical_not(np.eye(5, 4))),
(0, 1, np.eye(5)),
]
self.dconstraint.constraints = new_constraints
self.assertEqual(self.dconstraint.var_subsets, [(1, 2), (0, 1)])
def test_var_subsets_from_function_set_relations_var_subsets(self):
new_constraints = [
(1, 2, np.logical_not(np.eye(5, 4))),
(0, 1, np.eye(5)),
]
self.dconstraint.set_relations_var_subsets(new_constraints)
self.assertEqual(self.dconstraint._var_subset, [(1, 2), (0, 1)])
def test__relations_from_function_set_relations_var_subsets(self):
new_constraints = [
(1, 2, np.logical_not(np.eye(5, 4))),
(0, 1, np.eye(5)),
]
self.dconstraint.set_relations_var_subsets(new_constraints)
for n, relation in enumerate(
[np.logical_not(np.eye(5, 4)), np.eye(5)]
):
with self.subTest(msg=f"Relation index {n}"):
self.assertTrue(
(self.dconstraint._relations[n] == relation).all()
)
class TestEqualityConstraint(unittest.TestCase):
def setUp(self) -> None:
coefficients_np = (
np.asarray(1),
np.ones(2),
np.ones((2, 2)),
np.ones((2, 2, 2)),
)
self.constraint = EqualityConstraints(*coefficients_np)
def test_create_obj(self):
self.assertIsInstance(self.constraint, EqualityConstraints)
def test_created_obj_includes_mixin(self):
self.assertIsInstance(self.constraint, CoefficientTensorsMixin)
class TestInequalityConstraint(unittest.TestCase):
def setUp(self) -> None:
coefficients_np = (
np.asarray(1),
np.ones(2),
np.ones((2, 2)),
np.ones((2, 2, 2)),
)
self.constraint = InequalityConstraints(*coefficients_np)
def test_create_obj(self):
self.assertIsInstance(self.constraint, InequalityConstraints)
def test_created_obj_includes_mixin(self):
self.assertIsInstance(self.constraint, CoefficientTensorsMixin)
class TestArithmeticConstraint(unittest.TestCase):
def setUp(self) -> None:
self.constraint = ArithmeticConstraints()
def test_create_obj(self):
self.assertIsInstance(self.constraint, ArithmeticConstraints)
def test_set_arithmetic_constraints_equality(self):
new_constraints_eq = (
np.asarray(1),
np.ones(2),
np.ones((2, 2)),
)
self.constraint.equality = new_constraints_eq
for n, coefficient in enumerate(new_constraints_eq):
with self.subTest(msg=f"{n}"):
self.assertTrue(
(
coefficient == self.constraint.equality.coefficients[n]
).all()
)
def test_set_arithmetic_constraints_inequality(self):
new_constraints_ineq = (
np.asarray(1),
np.ones(2),
np.ones((2, 2)),
)
self.constraint.inequality = new_constraints_ineq
for n, coefficient in enumerate(new_constraints_ineq):
with self.subTest(msg=f"{n}"):
self.assertTrue(
(
coefficient
== self.constraint.inequality.coefficients[n]
).all()
)
class TestConstraints(unittest.TestCase):
def setUp(self) -> None:
self.constraints = Constraints()
def test_create_obj(self):
self.assertIsInstance(self.constraints, Constraints)
def test_discrete_defaults_to_none(self):
self.assertIsNone(self.constraints.discrete)
def test_arithmetic_defaults_to_none(self):
self.assertIsNone(self.constraints.arithmetic)
def test_set_discrete_constraints(self):
new_constraints = [(0, 1, np.eye(5))]
self.constraints.discrete = DiscreteConstraints(new_constraints)
self.assertIs(self.constraints.discrete._constraints, new_constraints)
def test_class_of_setted_discrete_constraints(self):
new_constraints = [(0, 1, np.eye(5))]
self.constraints.discrete = DiscreteConstraints(new_constraints)
self.assertIsInstance(self.constraints.discrete, DiscreteConstraints)
def teest_set_arithmetic_constraint(self):
new_constraint = ArithmeticConstraints()
self.constraints.arithmetic = new_constraint
self.assertIs(self.constraints.arithmetic, new_constraint)
def test_class_of_setted_arithmetic_constraints(self):
new_constraint = ArithmeticConstraints()
self.constraints.arithmetic = new_constraint
self.assertIsInstance(
self.constraints.arithmetic, ArithmeticConstraints
)
if __name__ == "__main__":
unittest.main()
| [
"numpy.eye",
"numpy.ones",
"lava.lib.optimization.problems.constraints.ArithmeticConstraints",
"lava.lib.optimization.problems.constraints.Constraints",
"numpy.asarray",
"lava.lib.optimization.problems.constraints.EqualityConstraints",
"lava.lib.optimization.problems.constraints.DiscreteConstraints",
... | [((7287, 7302), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7300, 7302), False, 'import unittest\n'), ((677, 709), 'lava.lib.optimization.problems.constraints.DiscreteConstraints', 'DiscreteConstraints', (['constraints'], {}), '(constraints)\n', (696, 709), False, 'from lava.lib.optimization.problems.constraints import DiscreteConstraints, EqualityConstraints, InequalityConstraints, ArithmeticConstraints, Constraints\n'), ((3779, 3816), 'lava.lib.optimization.problems.constraints.EqualityConstraints', 'EqualityConstraints', (['*coefficients_np'], {}), '(*coefficients_np)\n', (3798, 3816), False, 'from lava.lib.optimization.problems.constraints import DiscreteConstraints, EqualityConstraints, InequalityConstraints, ArithmeticConstraints, Constraints\n'), ((4295, 4334), 'lava.lib.optimization.problems.constraints.InequalityConstraints', 'InequalityConstraints', (['*coefficients_np'], {}), '(*coefficients_np)\n', (4316, 4334), False, 'from lava.lib.optimization.problems.constraints import DiscreteConstraints, EqualityConstraints, InequalityConstraints, ArithmeticConstraints, Constraints\n'), ((4665, 4688), 'lava.lib.optimization.problems.constraints.ArithmeticConstraints', 'ArithmeticConstraints', ([], {}), '()\n', (4686, 4688), False, 'from lava.lib.optimization.problems.constraints import DiscreteConstraints, EqualityConstraints, InequalityConstraints, ArithmeticConstraints, Constraints\n'), ((5961, 5974), 'lava.lib.optimization.problems.constraints.Constraints', 'Constraints', ([], {}), '()\n', (5972, 5974), False, 'from lava.lib.optimization.problems.constraints import DiscreteConstraints, EqualityConstraints, InequalityConstraints, ArithmeticConstraints, Constraints\n'), ((6400, 6436), 'lava.lib.optimization.problems.constraints.DiscreteConstraints', 'DiscreteConstraints', (['new_constraints'], {}), '(new_constraints)\n', (6419, 6436), False, 'from lava.lib.optimization.problems.constraints import DiscreteConstraints, EqualityConstraints, InequalityConstraints, ArithmeticConstraints, Constraints\n'), ((6656, 6692), 'lava.lib.optimization.problems.constraints.DiscreteConstraints', 'DiscreteConstraints', (['new_constraints'], {}), '(new_constraints)\n', (6675, 6692), False, 'from lava.lib.optimization.problems.constraints import DiscreteConstraints, EqualityConstraints, InequalityConstraints, ArithmeticConstraints, Constraints\n'), ((6844, 6867), 'lava.lib.optimization.problems.constraints.ArithmeticConstraints', 'ArithmeticConstraints', ([], {}), '()\n', (6865, 6867), False, 'from lava.lib.optimization.problems.constraints import DiscreteConstraints, EqualityConstraints, InequalityConstraints, ArithmeticConstraints, Constraints\n'), ((7073, 7096), 'lava.lib.optimization.problems.constraints.ArithmeticConstraints', 'ArithmeticConstraints', ([], {}), '()\n', (7094, 7096), False, 'from lava.lib.optimization.problems.constraints import DiscreteConstraints, EqualityConstraints, InequalityConstraints, ArithmeticConstraints, Constraints\n'), ((636, 648), 'numpy.eye', 'np.eye', (['(5)', '(4)'], {}), '(5, 4)\n', (642, 648), True, 'import numpy as np\n'), ((1015, 1036), 'lava.lib.optimization.problems.constraints.DiscreteConstraints', 'DiscreteConstraints', ([], {}), '()\n', (1034, 1036), False, 'from lava.lib.optimization.problems.constraints import DiscreteConstraints, EqualityConstraints, InequalityConstraints, ArithmeticConstraints, Constraints\n'), ((1547, 1579), 'lava.lib.optimization.problems.constraints.DiscreteConstraints', 'DiscreteConstraints', (['constraints'], {}), '(constraints)\n', (1566, 1579), False, 'from lava.lib.optimization.problems.constraints import DiscreteConstraints, EqualityConstraints, InequalityConstraints, ArithmeticConstraints, Constraints\n'), ((2126, 2135), 'numpy.eye', 'np.eye', (['(5)'], {}), '(5)\n', (2132, 2135), True, 'import numpy as np\n'), ((3643, 3656), 'numpy.asarray', 'np.asarray', (['(1)'], {}), '(1)\n', (3653, 3656), True, 'import numpy as np\n'), ((3670, 3680), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (3677, 3680), True, 'import numpy as np\n'), ((3694, 3709), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (3701, 3709), True, 'import numpy as np\n'), ((3723, 3741), 'numpy.ones', 'np.ones', (['(2, 2, 2)'], {}), '((2, 2, 2))\n', (3730, 3741), True, 'import numpy as np\n'), ((4159, 4172), 'numpy.asarray', 'np.asarray', (['(1)'], {}), '(1)\n', (4169, 4172), True, 'import numpy as np\n'), ((4186, 4196), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (4193, 4196), True, 'import numpy as np\n'), ((4210, 4225), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (4217, 4225), True, 'import numpy as np\n'), ((4239, 4257), 'numpy.ones', 'np.ones', (['(2, 2, 2)'], {}), '((2, 2, 2))\n', (4246, 4257), True, 'import numpy as np\n'), ((4891, 4904), 'numpy.asarray', 'np.asarray', (['(1)'], {}), '(1)\n', (4901, 4904), True, 'import numpy as np\n'), ((4918, 4928), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (4925, 4928), True, 'import numpy as np\n'), ((4942, 4957), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (4949, 4957), True, 'import numpy as np\n'), ((5412, 5425), 'numpy.asarray', 'np.asarray', (['(1)'], {}), '(1)\n', (5422, 5425), True, 'import numpy as np\n'), ((5439, 5449), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (5446, 5449), True, 'import numpy as np\n'), ((5463, 5478), 'numpy.ones', 'np.ones', (['(2, 2)'], {}), '((2, 2))\n', (5470, 5478), True, 'import numpy as np\n'), ((565, 577), 'numpy.eye', 'np.eye', (['(5)', '(4)'], {}), '(5, 4)\n', (571, 577), True, 'import numpy as np\n'), ((624, 633), 'numpy.eye', 'np.eye', (['(5)'], {}), '(5)\n', (630, 633), True, 'import numpy as np\n'), ((1466, 1478), 'numpy.eye', 'np.eye', (['(5)', '(4)'], {}), '(5, 4)\n', (1472, 1478), True, 'import numpy as np\n'), ((1714, 1723), 'numpy.eye', 'np.eye', (['(5)'], {}), '(5)\n', (1720, 1723), True, 'import numpy as np\n'), ((1994, 2003), 'numpy.eye', 'np.eye', (['(5)'], {}), '(5)\n', (2000, 2003), True, 'import numpy as np\n'), ((2111, 2123), 'numpy.eye', 'np.eye', (['(5)', '(4)'], {}), '(5, 4)\n', (2117, 2123), True, 'import numpy as np\n'), ((2498, 2507), 'numpy.eye', 'np.eye', (['(5)'], {}), '(5)\n', (2504, 2507), True, 'import numpy as np\n'), ((2818, 2827), 'numpy.eye', 'np.eye', (['(5)'], {}), '(5)\n', (2824, 2827), True, 'import numpy as np\n'), ((3150, 3159), 'numpy.eye', 'np.eye', (['(5)'], {}), '(5)\n', (3156, 3159), True, 'import numpy as np\n'), ((3321, 3330), 'numpy.eye', 'np.eye', (['(5)'], {}), '(5)\n', (3327, 3330), True, 'import numpy as np\n'), ((6352, 6361), 'numpy.eye', 'np.eye', (['(5)'], {}), '(5)\n', (6358, 6361), True, 'import numpy as np\n'), ((6608, 6617), 'numpy.eye', 'np.eye', (['(5)'], {}), '(5)\n', (6614, 6617), True, 'import numpy as np\n'), ((545, 554), 'numpy.eye', 'np.eye', (['(5)'], {}), '(5)\n', (551, 554), True, 'import numpy as np\n'), ((1434, 1443), 'numpy.eye', 'np.eye', (['(5)'], {}), '(5)\n', (1440, 1443), True, 'import numpy as np\n'), ((1679, 1691), 'numpy.eye', 'np.eye', (['(5)', '(4)'], {}), '(5, 4)\n', (1685, 1691), True, 'import numpy as np\n'), ((1959, 1971), 'numpy.eye', 'np.eye', (['(5)', '(4)'], {}), '(5, 4)\n', (1965, 1971), True, 'import numpy as np\n'), ((2463, 2475), 'numpy.eye', 'np.eye', (['(5)', '(4)'], {}), '(5, 4)\n', (2469, 2475), True, 'import numpy as np\n'), ((2783, 2795), 'numpy.eye', 'np.eye', (['(5)', '(4)'], {}), '(5, 4)\n', (2789, 2795), True, 'import numpy as np\n'), ((3115, 3127), 'numpy.eye', 'np.eye', (['(5)', '(4)'], {}), '(5, 4)\n', (3121, 3127), True, 'import numpy as np\n'), ((3306, 3318), 'numpy.eye', 'np.eye', (['(5)', '(4)'], {}), '(5, 4)\n', (3312, 3318), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 1 11:59:48 2020
@author: elizabeth_mckenzie
"""
import nibabel as nib
import numpy as np
import os
import glob
def main(predicted_imgs, cropped_imgs, uncropped_imgs, masks):
#save .npz dataset images as .nii.gz for testing
pred_files = np.sort(glob.glob(os.path.join(predicted_imgs, '*.npz')))
cropped_files = np.sort(glob.glob(os.path.join(cropped_imgs, '*.npz')))
uncropped_files = np.sort(glob.glob(os.path.join(uncropped_imgs, '*.npz')))
mask_files = np.sort(glob.glob(os.path.join(masks, '*.npz')))
for file, file_original, file_target, file_mask in zip(pred_files, cropped_files, uncropped_files, mask_files):
load_and_transform(predicted_imgs, file.split('/')[-1])
load_and_transform(cropped_imgs, file_original.split('/')[-1])
load_and_transform(uncropped_imgs, file_target.split('/')[-1])
load_and_transform(masks, file_mask.split('/')[-1])
print('saved %s as nifti file' % file.split('/')[-1])
print('saved %s as nifti file' % file_original.split('/')[-1])
def load_and_transform(path, file):
this_file = os.path.join(path, file)
npvol = np.load(this_file)['vol_data'].astype('float32')
img_nii = nib.Nifti1Image(npvol, np.eye(4))
nib.save(img_nii, os.path.join(path, file.replace('.npz', '.nii.gz')))
if __name__ == '__main__':
main() | [
"numpy.eye",
"numpy.load",
"os.path.join"
] | [((1194, 1218), 'os.path.join', 'os.path.join', (['path', 'file'], {}), '(path, file)\n', (1206, 1218), False, 'import os\n'), ((1317, 1326), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (1323, 1326), True, 'import numpy as np\n'), ((336, 373), 'os.path.join', 'os.path.join', (['predicted_imgs', '"""*.npz"""'], {}), "(predicted_imgs, '*.npz')\n", (348, 373), False, 'import os\n'), ((414, 449), 'os.path.join', 'os.path.join', (['cropped_imgs', '"""*.npz"""'], {}), "(cropped_imgs, '*.npz')\n", (426, 449), False, 'import os\n'), ((492, 529), 'os.path.join', 'os.path.join', (['uncropped_imgs', '"""*.npz"""'], {}), "(uncropped_imgs, '*.npz')\n", (504, 529), False, 'import os\n'), ((567, 595), 'os.path.join', 'os.path.join', (['masks', '"""*.npz"""'], {}), "(masks, '*.npz')\n", (579, 595), False, 'import os\n'), ((1231, 1249), 'numpy.load', 'np.load', (['this_file'], {}), '(this_file)\n', (1238, 1249), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
visualization.py: visualizing the results of Random Forest image classification
@author: <NAME>, <NAME>
"""
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.patches import Patch
import matplotlib as mpl
import numpy as np
import pandas as pd
def get_meta(number):
"""
Retrieve the level 3 classname and official RGB value for each clc class.
Parameters
----------
number: int
CLC_CODE of the class
Examples
--------
>>> get_meta(111)
Returns
-------
Official RGB colors as (r,g,b)
Level 3 name of the class
"""
if number!=0:
df=pd.read_csv(r'../data/clc_legend.txt', header=None)
rgb=((df.loc[df[0]==number,1].values[0]/255),(df.loc[df[0]==number,2].values[0]/255),(df.loc[df[0]==number,3].values[0]/255))
name=df.loc[df[0]==number,5].values[0]
else:
rgb=(1,1,1)
name='other'
return rgb,name
def plotResult(prediction,imagedim=[127,455],show=True,fp=None):
"""
Plot predicted image
Parameters
----------
prediction: array
array with predicted labels
imagedim: list
containing height and width of resulting image
show: boolean (True)
If True, plot will be shown
fp: str (optional)
filepath to save the plot on disk
Examples
--------
>>> plotResult([127, 455], base_prediction)
Returns
-------
Nothing
"""
grid = prediction.reshape((imagedim[0], imagedim[1]))
values = np.unique(prediction.ravel())
img = np.empty((grid.shape[0], grid.shape[1], 3))
legend_elements=[]
for i in values:
# get the official Corine Land Cover RGB values and level 3 class name
rgb,name=get_meta(i)
img[np.where(grid==i)]=rgb
legend_elements.append(
Patch(facecolor=rgb, edgecolor=None,
label=name)
)
plt.figure(figsize=(10,10))
plt.imshow(img, interpolation='none')
plt.legend(handles=legend_elements, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0. )
plt.tight_layout()
if fp != None:
plt.savefig(fp)
if show:
plt.show()
| [
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.savefig",
"pandas.read_csv",
"numpy.where",
"matplotlib.pyplot.figure",
"numpy.empty",
"matplotlib.patches.Patch",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((1596, 1639), 'numpy.empty', 'np.empty', (['(grid.shape[0], grid.shape[1], 3)'], {}), '((grid.shape[0], grid.shape[1], 3))\n', (1604, 1639), True, 'import numpy as np\n'), ((1959, 1987), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (1969, 1987), True, 'import matplotlib.pyplot as plt\n'), ((1991, 2028), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {'interpolation': '"""none"""'}), "(img, interpolation='none')\n", (2001, 2028), True, 'import matplotlib.pyplot as plt\n'), ((2033, 2124), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'handles': 'legend_elements', 'bbox_to_anchor': '(1.05, 1)', 'loc': '(2)', 'borderaxespad': '(0.0)'}), '(handles=legend_elements, bbox_to_anchor=(1.05, 1), loc=2,\n borderaxespad=0.0)\n', (2043, 2124), True, 'import matplotlib.pyplot as plt\n'), ((2125, 2143), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2141, 2143), True, 'import matplotlib.pyplot as plt\n'), ((665, 715), 'pandas.read_csv', 'pd.read_csv', (['"""../data/clc_legend.txt"""'], {'header': 'None'}), "('../data/clc_legend.txt', header=None)\n", (676, 715), True, 'import pandas as pd\n'), ((2171, 2186), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fp'], {}), '(fp)\n', (2182, 2186), True, 'import matplotlib.pyplot as plt\n'), ((2208, 2218), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2216, 2218), True, 'import matplotlib.pyplot as plt\n'), ((1804, 1823), 'numpy.where', 'np.where', (['(grid == i)'], {}), '(grid == i)\n', (1812, 1823), True, 'import numpy as np\n'), ((1871, 1919), 'matplotlib.patches.Patch', 'Patch', ([], {'facecolor': 'rgb', 'edgecolor': 'None', 'label': 'name'}), '(facecolor=rgb, edgecolor=None, label=name)\n', (1876, 1919), False, 'from matplotlib.patches import Patch\n')] |
import numpy as np
import torch
import torch.nn as nn
import hyper_parameters as parameters
import parameter_calculator as calculator
from torch.utils.data import DataLoader, Dataset
# The path to save the trained generator model.
GEN_PATH = 'saved_models/generator_net.pt'
# The element compositions of the 278 existing CCAs.
cca_compositions = np.genfromtxt('data/train_composition.csv', delimiter=',')
# The empirical parameters of the 278 existing CCAs.
cca_parameters = np.loadtxt('data/train_parameter.csv', delimiter=',')
param_mean = cca_parameters.mean(axis=0)
param_std = cca_parameters.std(axis=0)
# —————————————————————————————————— Customize the training set ————————————————————————————————————————
class TrainingSet(Dataset):
"""
A customized Dataset used to train the cardiGAN model. It includes the element compositions and empirical
parameters of the 278 exisitng CCAs.
"""
def __init__(self):
# Load the element compositions of the 278 existing CCAs.
compositions = np.loadtxt('data/train_composition.csv', delimiter=',')
compositions = np.concatenate(
(compositions, np.ones((compositions.shape[0], 1)) - 1), axis=1)
# Load the empirical parameters and normalize it into a Gaussian distribution.
cca_params = np.loadtxt('data/train_parameter.csv', delimiter=',')
cca_params = (cca_params - param_mean) / param_std
# Build the training set by concatenating the composition and parameter datasets.
self.data = torch.from_numpy(
np.concatenate((compositions, cca_params[:, parameters.GAN_param_selection]), axis=1)).float()
self.len = compositions.shape[0] # The length of the training set.
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return self.len
# ————————————————————————————————— Define the neural networks ————————————————————————————————————
class Generator(nn.Module):
"""
The generator neural network of the cardiGAN model. This network is trained to learn the mapping between the
latent space (a Gaussian distribution) to the distribution of existing CCAs. This network has one hidden layer.
The output layer is activated by ReLU to produce non-negative sparse vectors (the element compositions of novel
CCA candidates).
"""
def __init__(self):
super(Generator, self).__init__()
self.model = nn.Sequential(
nn.Linear(parameters.num_latent, parameters.num_elements),
nn.LeakyReLU(),
nn.Linear(parameters.num_elements, parameters.num_elements),
nn.ReLU()
)
def forward(self, noise_input):
cca_candidates = self.model(noise_input)
return cca_candidates
class Discriminator(nn.Module):
"""
The discriminator neural network (the critic) of the cardiGAN model. This network is trained to fit a k-Lipschitz
function that can be used to measure the Wasserstein distance between the generated and training distributions.
This network has two hidden layers. The output of this network is a scalar value.
"""
def __init__(self):
super(Discriminator, self).__init__()
self.model = nn.Sequential(
nn.Linear(parameters.num_elements + parameters.num_params + 1,
parameters.num_elements + parameters.num_params + 1),
nn.LeakyReLU(),
nn.Linear(parameters.num_elements + parameters.num_params + 1,
parameters.num_elements + parameters.num_params + 1),
nn.LeakyReLU(),
nn.Linear(parameters.num_elements + parameters.num_params + 1, 1),
)
def forward(self, x):
y = self.model(x)
return y
class Classifier(nn.Module):
"""
The phase classifier neural network of the cardiGAN model. The network used in the GAN training is pre-trained on
the 12 empirical parameters and reported phases of the 278 existing CCAs.
The reported phases are divided into 3 classes: single solid-solution, mixed solid-solution, solid-solution with
secondary phases.
"""
def __init__(self):
super(Classifier, self).__init__()
# Set the model to have two latent layers with LeakyReLU activation functions.
self.model = nn.Sequential(
nn.Linear(12, 12),
nn.LeakyReLU(),
nn.Linear(12, 12),
nn.LeakyReLU(),
nn.Linear(12, 3),
)
def forward(self, x):
predicted_phase = self.model(x)
return predicted_phase
# ————————————————————————————————— Set up the neural networks ————————————————————————————————————
generator = Generator()
discriminator = Discriminator()
# Load the trained phase classifier model.
classifier_path = 'saved_models/classifier_net.pt'
classifier = Classifier()
classifier.load_state_dict(torch.load(classifier_path))
classifier.eval()
# As recommended in 'Wasserstein GAN' (https://arxiv.org/abs/1701.07875), both networks apply RMSprop optimization.
optimizer_G = torch.optim.RMSprop(generator.parameters(), lr=parameters.lr_generator, )
optimizer_D = torch.optim.RMSprop(discriminator.parameters(), lr=parameters.lr_discriminator, )
# ————————————————————————————————— Set up train functions ————————————————————————————————————————
def generate_novel_input(size):
"""
This function applies the generator and phase classifier to generate novel CCA candidates and calculate their
empirical parameters.
:param size: The number of generated candidates.
:return: The element compositions and empirical parameters of the generated candidates.
"""
# Use a Gaussian distributed noise to generate novel CCA compositions.
noise = torch.tensor(np.random.randn(size, parameters.num_latent)).float()
novel_alloy = generator(noise) + 1e-9
novel_alloy_norm = novel_alloy / (torch.sum(novel_alloy, axis=1).view(noise.shape[0], -1) + 1e-6)
# Use the parameter calculator to calculate the empirical parameters of the novel alloys.
novel_param = (calculator.calculate_parameters(novel_alloy_norm) - torch.tensor(
param_mean[parameters.GAN_param_selection]).float()) / torch.tensor(
param_std[parameters.GAN_param_selection]).float()
phase_param = (calculator.calculate_phase_parameters(novel_alloy_norm) - torch.tensor(
param_mean[parameters.ANN_param_selection]).float()) / torch.tensor(
param_std[parameters.ANN_param_selection]).float()
# Concatenate the generated CCA candidates and their calculated empirical parameters as inputs of the discriminator.
novel_alloy = torch.cat(
[novel_alloy_norm, torch.sum(novel_alloy, axis=1).view((-1, 1)) - 1], dim=1)
novel_input = torch.cat([novel_alloy, novel_param], dim=1)
return novel_input, phase_param
def train_generator(real_input):
"""
This function trains the generator network.
:param real_input: The element compositions and empirical parameters of existing CCAs.
"""
size = real_input.shape[0]
optimizer_G.zero_grad()
# Use the generator to generate CCA candidates and calculate their empirical parameters.
cca_candidates, candidate_param = generate_novel_input(size)
# Use the phase classifier to predict the phases of the generated cca candidates and produce a (phase) loss.
phase_matrix = torch.softmax(classifier(candidate_param), dim=1).view(cca_candidates.shape[0], -1)
phase_matrix = torch.mul(phase_matrix, phase_matrix)
phase_loss = torch.mean(phase_matrix)
# The loss of the generator comprises the Wasserstein distance and the "phase loss".
g_loss = -torch.mean(discriminator(cca_candidates))
g_loss = g_loss - abs(g_loss.item()) * phase_loss
g_loss.backward()
optimizer_G.step()
def train_discriminator(real_input):
"""
This function trains the discriminator network.
:param real_input: The element compositions and empirical parameters of existing CCAs.
:return: The value of discriminator loss which indicates the Wasserstein distance.
"""
size = real_input.shape[0]
optimizer_D.zero_grad()
# Use the generator to generate CCA candidates and calculate their empirical parameters.
cca_candidates, candidate_param = generate_novel_input(size)
# The loss of the discriminator is the Wasserstein distance between the two distributions.
d_loss = -torch.mean(discriminator(real_input)) + torch.mean(discriminator(cca_candidates.detach()))
d_loss.backward()
optimizer_D.step()
return d_loss.item()
if __name__ == "__main__":
# ————————————————————————————————— Load the training set ————————————————————————————————————————
training_set = TrainingSet()
loader = DataLoader(dataset=training_set, batch_size=parameters.size_batch, shuffle=True, )
# ————————————————————————————————— Start GAN training ————————————————————————————————————————————
for epoch in range(parameters.num_epoch):
sum_d_loss = 0 # The sum of discriminator losses of the current epoch.
for i, real_cca in enumerate(loader):
# train the generator network.
train_generator(real_cca)
# set up clip value for discriminator
for p in discriminator.parameters():
p.data.clamp_(-parameters.clip_range, parameters.clip_range)
for j in range(5):
# train the discriminator and accumulate real input and fake input's loss
sum_d_loss += -train_discriminator(real_cca)
print('Epoch:', epoch, "discriminator loss:", sum_d_loss)
# Save the model for every 100 epochs.
if epoch % 100 == 99:
torch.save(generator.state_dict(), GEN_PATH)
torch.save(generator.state_dict(), GEN_PATH) | [
"torch.mul",
"torch.nn.ReLU",
"parameter_calculator.calculate_parameters",
"torch.nn.LeakyReLU",
"numpy.ones",
"torch.mean",
"torch.load",
"torch.tensor",
"numpy.random.randn",
"torch.sum",
"torch.nn.Linear",
"torch.utils.data.DataLoader",
"numpy.concatenate",
"numpy.loadtxt",
"numpy.gen... | [((347, 405), 'numpy.genfromtxt', 'np.genfromtxt', (['"""data/train_composition.csv"""'], {'delimiter': '""","""'}), "('data/train_composition.csv', delimiter=',')\n", (360, 405), True, 'import numpy as np\n'), ((476, 529), 'numpy.loadtxt', 'np.loadtxt', (['"""data/train_parameter.csv"""'], {'delimiter': '""","""'}), "('data/train_parameter.csv', delimiter=',')\n", (486, 529), True, 'import numpy as np\n'), ((4915, 4942), 'torch.load', 'torch.load', (['classifier_path'], {}), '(classifier_path)\n', (4925, 4942), False, 'import torch\n'), ((6793, 6837), 'torch.cat', 'torch.cat', (['[novel_alloy, novel_param]'], {'dim': '(1)'}), '([novel_alloy, novel_param], dim=1)\n', (6802, 6837), False, 'import torch\n'), ((7517, 7554), 'torch.mul', 'torch.mul', (['phase_matrix', 'phase_matrix'], {}), '(phase_matrix, phase_matrix)\n', (7526, 7554), False, 'import torch\n'), ((7572, 7596), 'torch.mean', 'torch.mean', (['phase_matrix'], {}), '(phase_matrix)\n', (7582, 7596), False, 'import torch\n'), ((8794, 8879), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'training_set', 'batch_size': 'parameters.size_batch', 'shuffle': '(True)'}), '(dataset=training_set, batch_size=parameters.size_batch, shuffle=True\n )\n', (8804, 8879), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((1026, 1081), 'numpy.loadtxt', 'np.loadtxt', (['"""data/train_composition.csv"""'], {'delimiter': '""","""'}), "('data/train_composition.csv', delimiter=',')\n", (1036, 1081), True, 'import numpy as np\n'), ((1307, 1360), 'numpy.loadtxt', 'np.loadtxt', (['"""data/train_parameter.csv"""'], {'delimiter': '""","""'}), "('data/train_parameter.csv', delimiter=',')\n", (1317, 1360), True, 'import numpy as np\n'), ((2476, 2533), 'torch.nn.Linear', 'nn.Linear', (['parameters.num_latent', 'parameters.num_elements'], {}), '(parameters.num_latent, parameters.num_elements)\n', (2485, 2533), True, 'import torch.nn as nn\n'), ((2547, 2561), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (2559, 2561), True, 'import torch.nn as nn\n'), ((2575, 2634), 'torch.nn.Linear', 'nn.Linear', (['parameters.num_elements', 'parameters.num_elements'], {}), '(parameters.num_elements, parameters.num_elements)\n', (2584, 2634), True, 'import torch.nn as nn\n'), ((2648, 2657), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2655, 2657), True, 'import torch.nn as nn\n'), ((3274, 3394), 'torch.nn.Linear', 'nn.Linear', (['(parameters.num_elements + parameters.num_params + 1)', '(parameters.num_elements + parameters.num_params + 1)'], {}), '(parameters.num_elements + parameters.num_params + 1, parameters.\n num_elements + parameters.num_params + 1)\n', (3283, 3394), True, 'import torch.nn as nn\n'), ((3425, 3439), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (3437, 3439), True, 'import torch.nn as nn\n'), ((3453, 3573), 'torch.nn.Linear', 'nn.Linear', (['(parameters.num_elements + parameters.num_params + 1)', '(parameters.num_elements + parameters.num_params + 1)'], {}), '(parameters.num_elements + parameters.num_params + 1, parameters.\n num_elements + parameters.num_params + 1)\n', (3462, 3573), True, 'import torch.nn as nn\n'), ((3604, 3618), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (3616, 3618), True, 'import torch.nn as nn\n'), ((3632, 3697), 'torch.nn.Linear', 'nn.Linear', (['(parameters.num_elements + parameters.num_params + 1)', '(1)'], {}), '(parameters.num_elements + parameters.num_params + 1, 1)\n', (3641, 3697), True, 'import torch.nn as nn\n'), ((4365, 4382), 'torch.nn.Linear', 'nn.Linear', (['(12)', '(12)'], {}), '(12, 12)\n', (4374, 4382), True, 'import torch.nn as nn\n'), ((4396, 4410), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (4408, 4410), True, 'import torch.nn as nn\n'), ((4424, 4441), 'torch.nn.Linear', 'nn.Linear', (['(12)', '(12)'], {}), '(12, 12)\n', (4433, 4441), True, 'import torch.nn as nn\n'), ((4455, 4469), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (4467, 4469), True, 'import torch.nn as nn\n'), ((4483, 4499), 'torch.nn.Linear', 'nn.Linear', (['(12)', '(3)'], {}), '(12, 3)\n', (4492, 4499), True, 'import torch.nn as nn\n'), ((6111, 6160), 'parameter_calculator.calculate_parameters', 'calculator.calculate_parameters', (['novel_alloy_norm'], {}), '(novel_alloy_norm)\n', (6142, 6160), True, 'import parameter_calculator as calculator\n'), ((6332, 6387), 'parameter_calculator.calculate_phase_parameters', 'calculator.calculate_phase_parameters', (['novel_alloy_norm'], {}), '(novel_alloy_norm)\n', (6369, 6387), True, 'import parameter_calculator as calculator\n'), ((5800, 5844), 'numpy.random.randn', 'np.random.randn', (['size', 'parameters.num_latent'], {}), '(size, parameters.num_latent)\n', (5815, 5844), True, 'import numpy as np\n'), ((6240, 6295), 'torch.tensor', 'torch.tensor', (['param_std[parameters.GAN_param_selection]'], {}), '(param_std[parameters.GAN_param_selection])\n', (6252, 6295), False, 'import torch\n'), ((6467, 6522), 'torch.tensor', 'torch.tensor', (['param_std[parameters.ANN_param_selection]'], {}), '(param_std[parameters.ANN_param_selection])\n', (6479, 6522), False, 'import torch\n'), ((1148, 1183), 'numpy.ones', 'np.ones', (['(compositions.shape[0], 1)'], {}), '((compositions.shape[0], 1))\n', (1155, 1183), True, 'import numpy as np\n'), ((1561, 1651), 'numpy.concatenate', 'np.concatenate', (['(compositions, cca_params[:, parameters.GAN_param_selection])'], {'axis': '(1)'}), '((compositions, cca_params[:, parameters.GAN_param_selection]\n ), axis=1)\n', (1575, 1651), True, 'import numpy as np\n'), ((5934, 5964), 'torch.sum', 'torch.sum', (['novel_alloy'], {'axis': '(1)'}), '(novel_alloy, axis=1)\n', (5943, 5964), False, 'import torch\n'), ((6163, 6219), 'torch.tensor', 'torch.tensor', (['param_mean[parameters.GAN_param_selection]'], {}), '(param_mean[parameters.GAN_param_selection])\n', (6175, 6219), False, 'import torch\n'), ((6390, 6446), 'torch.tensor', 'torch.tensor', (['param_mean[parameters.ANN_param_selection]'], {}), '(param_mean[parameters.ANN_param_selection])\n', (6402, 6446), False, 'import torch\n'), ((6717, 6747), 'torch.sum', 'torch.sum', (['novel_alloy'], {'axis': '(1)'}), '(novel_alloy, axis=1)\n', (6726, 6747), False, 'import torch\n')] |
import neurokit2 as nk
import numpy as np
from matplotlib import pyplot as plt
from scipy import signal as scisig
LAD_recordings = np.load('../data/test_sets0.npy')
for recording in LAD_recordings:
# iterate 12 leads
b, a = scisig.butter(4, 0.0003, btype='highpass')
for lead in recording:
filtered = scisig.filtfilt(b, a, lead)
lead, _ = nk.ecg_process(lead, 257)
nk.ecg_plot(lead)
plt.show()
break
break
| [
"scipy.signal.filtfilt",
"scipy.signal.butter",
"neurokit2.ecg_plot",
"neurokit2.ecg_process",
"numpy.load",
"matplotlib.pyplot.show"
] | [((132, 165), 'numpy.load', 'np.load', (['"""../data/test_sets0.npy"""'], {}), "('../data/test_sets0.npy')\n", (139, 165), True, 'import numpy as np\n'), ((234, 276), 'scipy.signal.butter', 'scisig.butter', (['(4)', '(0.0003)'], {'btype': '"""highpass"""'}), "(4, 0.0003, btype='highpass')\n", (247, 276), True, 'from scipy import signal as scisig\n'), ((325, 352), 'scipy.signal.filtfilt', 'scisig.filtfilt', (['b', 'a', 'lead'], {}), '(b, a, lead)\n', (340, 352), True, 'from scipy import signal as scisig\n'), ((373, 398), 'neurokit2.ecg_process', 'nk.ecg_process', (['lead', '(257)'], {}), '(lead, 257)\n', (387, 398), True, 'import neurokit2 as nk\n'), ((407, 424), 'neurokit2.ecg_plot', 'nk.ecg_plot', (['lead'], {}), '(lead)\n', (418, 424), True, 'import neurokit2 as nk\n'), ((433, 443), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (441, 443), True, 'from matplotlib import pyplot as plt\n')] |
import yfinance as yf
import numpy as np
import pandas as pd
from datetime import date,timedelta
from sklearn.linear_model import LinearRegression
from sklearn.svm import SVR
from sklearn.model_selection import train_test_split, GridSearchCV
import datetime as dt
from sklearn.preprocessing import MinMaxScaler
from dash.dependencies import Input, Output, State
# from tensorflow.keras.models import Sequential
# from tensorflow.keras.layers import Dense, Dropout, LSTM
def forecast_indicator(start_date,end_date,input1,input2):
# df = yf.download(input2,start_date,end_date)
# scaler = MinMaxScaler(feature_range=(0,1))
# scaled_data = scaler.fit_transform(df['Close'].values.reshape(-1,1))
# prediction_days = 60
# x_train = []
# y_train = []
# for x in range(prediction_days, len(scaled_data)):
# x_train.append(scaled_data[x-prediction_days:x, 0])
# y_train.append(scaled_data[x, 0])
# x_train, y_train = np.array(x_train), np.array(y_train)
# x_train = np.atleast_2d(x_train)
# x_train = np.reshape(x_train, (x_train.shape[0],x_train.shape[1],1))
# model = Sequential()
# model.add(LSTM(units=50, return_sequences=True, input_shape=(x_train.shape[1], 1)))
# model.add(Dropout(0.2))
# model.add(LSTM(units=50, return_sequences=True))
# model.add(Dropout(0.2))
# model.add(LSTM(units=50))
# model.add(Dropout(0.2))
# model.add(Dense(units=1))
# model.compile(optimizer='adam', loss='mean_squared_error')
# model.fit(x_train, y_train, epochs=60, batch_size=32)
# test_start = end_date
# test_end = pd.to_datetime(end_date) + pd.DateOffset(days=input1)
# test_data = yf.download(input2,test_start,test_end)
# actual_prices = test_data['Close'].values
# total_dataset = pd.concat((df['Close'], test_data['Close']), axis=0)
# model_inputs = total_dataset[len(total_dataset) -len(test_data) - prediction_days:].values
# model_inputs = model_inputs.reshape(-1, 1)
# model_inputs = scaler.transform(model_inputs)
# x_test =[]
# for x in range(prediction_days, len(model_inputs)):
# x_test.append(model_inputs[x-prediction_days:x, 0])
# x_test = np.array(x_test)
# x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))
# predicted_prices = model.predict(x_test)
# predicted_prices = scaler.inverse_transform(predicted_prices)
df = yf.download(input2,start_date,end_date)
# df.head()
df = df[['Close']]
forecast_out = input1
df['Prediction'] = df[['Close']].shift(-forecast_out)
X = np.array(df.drop(['Prediction'],1))
X = X[:-forecast_out]
y = np.array(df['Prediction'])
y = y[:-forecast_out]
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.1)
test_start = end_date
test_end = pd.to_datetime(end_date) + pd.tseries.offsets.DateOffset(days=input1)
df2 = yf.download(input2,test_start,test_end)
gsc = GridSearchCV(
estimator=SVR(kernel='rbf'),
param_grid={
'C': [0.1, 1, 100, 1000],
'epsilon': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1, 5, 10],
'gamma': [0.0001, 0.001, 0.005, 0.1, 1, 3, 5]
},
cv=5, scoring='neg_mean_squared_error', verbose=0, n_jobs=-1)
grid_result = gsc.fit(X, y)
best_params = grid_result.best_params_
best_svr = SVR(kernel='rbf', C=best_params["C"], epsilon=best_params["epsilon"], gamma=best_params["gamma"],
coef0=0.1, shrinking=True,
tol=0.001, cache_size=200, verbose=False, max_iter=-1)
best_result = best_svr.fit(X,y)
x_forecast = np.array(df.drop(['Prediction'],1))[-forecast_out:]
grid_prediction = best_result.predict(x_forecast)
return grid_prediction
| [
"sklearn.model_selection.train_test_split",
"pandas.tseries.offsets.DateOffset",
"yfinance.download",
"numpy.array",
"sklearn.svm.SVR",
"pandas.to_datetime"
] | [((2489, 2530), 'yfinance.download', 'yf.download', (['input2', 'start_date', 'end_date'], {}), '(input2, start_date, end_date)\n', (2500, 2530), True, 'import yfinance as yf\n'), ((2758, 2784), 'numpy.array', 'np.array', (["df['Prediction']"], {}), "(df['Prediction'])\n", (2766, 2784), True, 'import numpy as np\n'), ((2856, 2893), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.1)'}), '(X, y, test_size=0.1)\n', (2872, 2893), False, 'from sklearn.model_selection import train_test_split, GridSearchCV\n'), ((3024, 3065), 'yfinance.download', 'yf.download', (['input2', 'test_start', 'test_end'], {}), '(input2, test_start, test_end)\n', (3035, 3065), True, 'import yfinance as yf\n'), ((3519, 3708), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""rbf"""', 'C': "best_params['C']", 'epsilon': "best_params['epsilon']", 'gamma': "best_params['gamma']", 'coef0': '(0.1)', 'shrinking': '(True)', 'tol': '(0.001)', 'cache_size': '(200)', 'verbose': '(False)', 'max_iter': '(-1)'}), "(kernel='rbf', C=best_params['C'], epsilon=best_params['epsilon'], gamma\n =best_params['gamma'], coef0=0.1, shrinking=True, tol=0.001, cache_size\n =200, verbose=False, max_iter=-1)\n", (3522, 3708), False, 'from sklearn.svm import SVR\n'), ((2943, 2967), 'pandas.to_datetime', 'pd.to_datetime', (['end_date'], {}), '(end_date)\n', (2957, 2967), True, 'import pandas as pd\n'), ((2970, 3012), 'pandas.tseries.offsets.DateOffset', 'pd.tseries.offsets.DateOffset', ([], {'days': 'input1'}), '(days=input1)\n', (2999, 3012), True, 'import pandas as pd\n'), ((3110, 3127), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""rbf"""'}), "(kernel='rbf')\n", (3113, 3127), False, 'from sklearn.svm import SVR\n')] |
import numpy as np
import random
class Game():
def __init__(self):
self.labels = [["FREE",
"'I played that so well'",
"Unintelligible moaning",
"Has incorrect runes"],
["Initiates an FF vote",
"Types in all chat asking if there are any single ladies",
"Picks an assassin champion",
"Praises himself after making a play"],
["Switches to ARAM after losing a normal game",
"Picks Nasus",
"Picks Evelynn",
"Leaves voice chat without saying goodbye"],
["Goes to eat dinner then comes back immediately and asks you to stream the game",
"'I think we lost this one' (Must be said in champ select)",
"Gets camped by jungler during laning phase",
"Gets solo killed within the first 15 minutes"]]
self.state = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
def checkWin(self):
#Check rows
for row in self.state:
if row[0] == 1 and row.count(row[0]) == len(row):
return True
#Check columns
state = np.array(self.state)
transposedState = np.transpose(state)
for row in transposedState:
#Convert numpy array to list
row = row.tolist()
if row[0] == 1 and row.count(row[0]) == len(row):
return True
#Check diagonals
diagOne = True
diagTwo = True
for i in range (0, len(self.state)):
#Initial check, false if both diagonals have a 0
if self.state[i][i] == 0 and self.state[i][len(self.state) - 1 - i] == 0:
diagOne = False
diagTwo = False
break
#Top left to bottom right
if self.state[i][i] == 0:
diagOne = False
#Bottom left to top right
if self.state[i][len(self.state) - 1 - i] == 0:
diagTwo = False
#At least one diagonal is a win
if diagOne or diagTwo:
return True
return False
def updateState(self, x, y):
if self.state[x][y] == 0:
self.state[x][y] = 1
else:
self.state[x][y] = 0
def resetState(self):
for i in range(0, len(self.state)):
for j in range(0, len(self.state)):
self.state[i][j] = 0
def randomiseLabels(self):
npLabels = np.array(self.labels)
npLabels = npLabels.ravel()
random.shuffle(npLabels)
npLabels = npLabels.reshape(4, 4)
self.labels = npLabels.tolist()
def changeLabels(self, newLabels):
self.labels = newLabels
| [
"numpy.array",
"numpy.transpose",
"random.shuffle"
] | [((1351, 1371), 'numpy.array', 'np.array', (['self.state'], {}), '(self.state)\n', (1359, 1371), True, 'import numpy as np\n'), ((1398, 1417), 'numpy.transpose', 'np.transpose', (['state'], {}), '(state)\n', (1410, 1417), True, 'import numpy as np\n'), ((2735, 2756), 'numpy.array', 'np.array', (['self.labels'], {}), '(self.labels)\n', (2743, 2756), True, 'import numpy as np\n'), ((2801, 2825), 'random.shuffle', 'random.shuffle', (['npLabels'], {}), '(npLabels)\n', (2815, 2825), False, 'import random\n')] |
import numpy as np
import matplotlib.pyplot as plt
import gpflow
def dbtime(X):
p1 = X[:,0]
p2 = X[:,1]
p3 = X[:,2]
p4 = X[:,3]
p5 = X[:,4]
p6 = X[:,5]
p7 = X[:,6]
p8 = X[:,7]
p9 = X[:,8]
p10 = X[:,9]
return np.sin(p1)*10+np.sin(p2)*5+np.sin(p3)+np.sin(p4)+np.sin(p5)+np.sin(p6)+np.sin(p7)+np.sin(p8)+np.sin(p9)+np.sin(p10)+3*8+10+5
class Optimize():
def __init__(self, func, start_point, nb_param ):
self.func = func
self.nb_param= nb_param
self.start_point = start_point
self.X = np.random.rand(start_point,1)*10
print("Random first parameter:")
print(self.X)
for i in range(self.nb_param-1):
print("Random next parameter:")
xp = np.random.rand(start_point,1)*10
self.X = np.concatenate( (self.X, xp), axis=1 )
self.Y = self.func(self.X)
self.Y = self.Y.reshape(len(self.Y), 1)
self.run = 0
self.plt = plt
def buildModelGaussien(self):
'''
Réalise la régression gaussienne avec GPFlow
'''
self.k = None
# Definition du kernel
for i in range(self.nb_param):
k = gpflow.kernels.Matern52(1, active_dims=[i], lengthscales=0.3)
k.lengthscales.trainable = False
k.lengthscales = 1.0
if self.k == None:
self.k = k
else:
self.k += k
# Definition du model
self.m = gpflow.models.GPR(self.X, self.Y, kern=self.k)
# Ajustement du lengthscales
# Compilation du model
self.m.compile()
# Optimisation
gpflow.train.ScipyOptimizer().minimize(self.m)
def getNextPoint(self):
'''
Determine le prochain point à explorer
Se base sur une focntion d'acquisition: fct = -mean+var
'''
# On construit un vecteur de 100 point entre 0 et 10 qui est notre abscisse graphique
xx = None
for xx1 in range(0,100):
for xx2 in range(0,100):
if xx is None:
xx = np.array([[xx1,xx2]])
else:
xx = np.concatenate((xx, [[xx1,xx2]]))
xx = xx / 10
# On réalise toutes les prédictions sur depuis ce vecteur pour obtenir mean et var
# mean et var sont aussi des vecteurs
mean, var = self.m.predict_y(xx)
#print("variance %s:" % (var))
# Vecteur resultat de la fonction d'acquisition
# acqu est un matric: une colonne par paramètre
acqu = (-mean+var)
# Dans cette exemple il n'y a qu'un paramètre, on met a plat la matrice
acquflatten = acqu.flatten()
# On cherche la plus grande valeur
maxvalue = max(acquflatten)
#print("Max found: %s " % maxvalue)
# On trouve le paramètre assicié
whereisit = np.where(acquflatten ==maxvalue )[0][0]
# Le prochain points d'asbcisse est donc:
next_abs = xx[whereisit]
# On enrichie X et Y
self.X = np.concatenate( (self.X, [next_abs]))
result = self.func(next_abs.reshape((1,2)))
#print("After search %s: " % result)
self.Y = np.concatenate( (self.Y, result.reshape((1,1)) ) )
self.run += 1
return next_abs
def print(self):
print("Content of X:")
print(self.X)
print("Content of Y:")
print(self.Y)
print ('Starting...')
opt = Optimize( dbtime, 10, 2 )
opt.print()
opt.buildModelGaussien()
next_abs = opt.getNextPoint()
print ('Searching for min...')
for i in range(20):
next_abs = opt.getNextPoint()
print("Next point to explore: %s and dbtime(x)=%s" % (next_abs,dbtime(next_abs.reshape((1,2)))))
opt.buildModelGaussien()
| [
"gpflow.kernels.Matern52",
"numpy.random.rand",
"numpy.where",
"gpflow.train.ScipyOptimizer",
"gpflow.models.GPR",
"numpy.array",
"numpy.concatenate",
"numpy.sin"
] | [((1568, 1614), 'gpflow.models.GPR', 'gpflow.models.GPR', (['self.X', 'self.Y'], {'kern': 'self.k'}), '(self.X, self.Y, kern=self.k)\n', (1585, 1614), False, 'import gpflow\n'), ((3184, 3220), 'numpy.concatenate', 'np.concatenate', (['(self.X, [next_abs])'], {}), '((self.X, [next_abs]))\n', (3198, 3220), True, 'import numpy as np\n'), ((599, 629), 'numpy.random.rand', 'np.random.rand', (['start_point', '(1)'], {}), '(start_point, 1)\n', (613, 629), True, 'import numpy as np\n'), ((857, 893), 'numpy.concatenate', 'np.concatenate', (['(self.X, xp)'], {'axis': '(1)'}), '((self.X, xp), axis=1)\n', (871, 893), True, 'import numpy as np\n'), ((1261, 1322), 'gpflow.kernels.Matern52', 'gpflow.kernels.Matern52', (['(1)'], {'active_dims': '[i]', 'lengthscales': '(0.3)'}), '(1, active_dims=[i], lengthscales=0.3)\n', (1284, 1322), False, 'import gpflow\n'), ((802, 832), 'numpy.random.rand', 'np.random.rand', (['start_point', '(1)'], {}), '(start_point, 1)\n', (816, 832), True, 'import numpy as np\n'), ((1744, 1773), 'gpflow.train.ScipyOptimizer', 'gpflow.train.ScipyOptimizer', ([], {}), '()\n', (1771, 1773), False, 'import gpflow\n'), ((3011, 3044), 'numpy.where', 'np.where', (['(acquflatten == maxvalue)'], {}), '(acquflatten == maxvalue)\n', (3019, 3044), True, 'import numpy as np\n'), ((383, 394), 'numpy.sin', 'np.sin', (['p10'], {}), '(p10)\n', (389, 394), True, 'import numpy as np\n'), ((2213, 2235), 'numpy.array', 'np.array', (['[[xx1, xx2]]'], {}), '([[xx1, xx2]])\n', (2221, 2235), True, 'import numpy as np\n'), ((2284, 2318), 'numpy.concatenate', 'np.concatenate', (['(xx, [[xx1, xx2]])'], {}), '((xx, [[xx1, xx2]]))\n', (2298, 2318), True, 'import numpy as np\n'), ((372, 382), 'numpy.sin', 'np.sin', (['p9'], {}), '(p9)\n', (378, 382), True, 'import numpy as np\n'), ((361, 371), 'numpy.sin', 'np.sin', (['p8'], {}), '(p8)\n', (367, 371), True, 'import numpy as np\n'), ((350, 360), 'numpy.sin', 'np.sin', (['p7'], {}), '(p7)\n', (356, 360), True, 'import numpy as np\n'), ((339, 349), 'numpy.sin', 'np.sin', (['p6'], {}), '(p6)\n', (345, 349), True, 'import numpy as np\n'), ((328, 338), 'numpy.sin', 'np.sin', (['p5'], {}), '(p5)\n', (334, 338), True, 'import numpy as np\n'), ((317, 327), 'numpy.sin', 'np.sin', (['p4'], {}), '(p4)\n', (323, 327), True, 'import numpy as np\n'), ((306, 316), 'numpy.sin', 'np.sin', (['p3'], {}), '(p3)\n', (312, 316), True, 'import numpy as np\n'), ((279, 289), 'numpy.sin', 'np.sin', (['p1'], {}), '(p1)\n', (285, 289), True, 'import numpy as np\n'), ((293, 303), 'numpy.sin', 'np.sin', (['p2'], {}), '(p2)\n', (299, 303), True, 'import numpy as np\n')] |
if __name__ == '__main__':
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from carl.utils.doc_building.plotting import radar_factory
env_context_feature_names = {
'CARLMountainCarEnv': ['force',
'goal_position',
'goal_velocity',
'gravity',
'max_position',
'max_speed',
'min_position',
'start_position',
'start_position_std',
'start_velocity',
'start_velocity_std'],
'CARLPendulumEnv': ['dt', 'g', 'l', 'm', 'max_speed'],
'CARLAcrobotEnv': ['link_com_1',
'link_com_2',
'link_length_1',
'link_length_2',
'link_mass_1',
'link_mass_2',
'link_moi',
'max_velocity_1',
'max_velocity_2'],
'CARLCartPoleEnv': ['force_magnifier',
'gravity',
'masscart',
'masspole',
'pole_length',
'update_interval'],
'CARLMountainCarContinuousEnv': ['goal_position',
'goal_velocity',
'max_position',
'max_position_start',
'max_speed',
'max_velocity_start',
'min_position',
'min_position_start',
'min_velocity_start',
'power'],
'CARLLunarLanderEnv': ['FPS',
'GRAVITY_X',
'GRAVITY_Y',
'INITIAL_RANDOM',
'LEG_AWAY',
'LEG_DOWN',
'LEG_H',
'LEG_SPRING_TORQUE',
'LEG_W',
'MAIN_ENGINE_POWER',
'SCALE',
'SIDE_ENGINE_AWAY',
'SIDE_ENGINE_HEIGHT',
'SIDE_ENGINE_POWER',
'VIEWPORT_H',
'VIEWPORT_W'],
'CARLVehicleRacingEnv': ['VEHICLE'],
'CARLBipedalWalkerEnv': ['FPS',
'FRICTION',
'GRAVITY_X',
'GRAVITY_Y',
'INITIAL_RANDOM',
'LEG_DOWN',
'LEG_H',
'LEG_W',
'LIDAR_RANGE',
'MOTORS_TORQUE',
'SCALE',
'SPEED_HIP',
'SPEED_KNEE',
'TERRAIN_GRASS',
'TERRAIN_HEIGHT',
'TERRAIN_LENGTH',
'TERRAIN_STARTPAD',
'TERRAIN_STEP',
'VIEWPORT_H',
'VIEWPORT_W'],
'CARLAnt': ['actuator_strength',
'angular_damping',
'friction',
'gravity',
'joint_angular_damping',
'joint_stiffness',
'torso_mass'],
'CARLHalfcheetah': ['angular_damping',
'friction',
'gravity',
'joint_angular_damping',
'joint_stiffness',
'torso_mass'],
'CARLHumanoid': ['angular_damping',
'friction',
'gravity',
'joint_angular_damping',
'torso_mass'],
'CARLFetch': ['actuator_strength',
'angular_damping',
'friction',
'gravity',
'joint_angular_damping',
'joint_stiffness',
'target_distance',
'target_radius',
'torso_mass'],
'CARLGrasp': ['actuator_strength',
'angular_damping',
'friction',
'gravity',
'joint_angular_damping',
'joint_stiffness',
'target_distance',
'target_height',
'target_radius'],
'CARLUr5e': ['actuator_strength',
'angular_damping',
'friction',
'gravity',
'joint_angular_damping',
'joint_stiffness',
'target_distance',
'target_radius',
'torso_mass'],
'CARLRnaDesignEnv': ['mutation_threshold',
'reward_exponent',
'state_radius',
'dataset',
'target_structure_ids'],
'CARLMarioEnv': ['level_index', 'noise', 'mario_state']
}
action_space_sizes = [(3,),
(1,),
(3,),
(2,),
(1,),
(4,),
(3,),
(4,),
(8,),
(6,),
(17,),
(10,),
(19,),
(6,),
(8,),
(10,)]
state_space_sizes = [(2,),
(3,),
(6,),
(4,),
(2,),
(8,),
(96, 96, 3),
(24,),
(87,),
(23,),
(299,),
(101,),
(132,),
(66,),
(11,),
(64, 64, 3)]
n_context_features = [11, 5, 9, 6, 10, 16, 1, 20, 7, 6, 5, 9, 9, 9, 5, 3]
env_names = ['CARLMountainCarEnv',
'CARLPendulumEnv',
'CARLAcrobotEnv',
'CARLCartPoleEnv',
'CARLMountainCarContinuousEnv',
'CARLLunarLanderEnv',
'CARLVehicleRacingEnv',
'CARLBipedalWalkerEnv',
'CARLAnt',
'CARLHalfcheetah',
'CARLHumanoid',
'CARLFetch',
'CARLGrasp',
'CARLUr5e',
'CARLRnaDesignEnv',
'CARLMarioEnv']
n_cfs_d = [11, 5, 8, 6, 10, 16, 1, 20, 7, 6, 5, 9, 9, 9, 4, 3]
n_cfs_r = [0, 0, 0, 0, 0, 4, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0]
n_cfs = 131
n_dynami_changing = 129
n_reward_changing = 7
n_float_cfs = 114
percentage_float_cfs = n_float_cfs / n_cfs
env_types = {
"classic_control": ["CARLAcrobotEnv", "CARLCartPoleEnv", "CARLMountainCarEnv", "CARLMountainCarContinuousEnv", "CARLPendulumEnv"],
"box2d": ["CARLBipedalWalkerEnv", "CARLLunarLanderEnv", "CARLVehicleRacingEnv"],
"brax": ["CARLAnt", "CARLFetch", "CARLGrasp", "CARLHumanoid", "CARLUr5e"],
"misc": ["CARLMarioEnv", "CARLRnaDesignEnv"]
}
data = []
for env_type in env_types:
envs = env_types[env_type]
title = env_type
ids = [env_names.index(e) for e in envs]
# ss_sizes = [state_space_sizes[i][0] for i in ids]
# as_sizes = [action_space_sizes[i][0] for i in ids]
ss_sizes = [np.prod(state_space_sizes[i]) for i in ids]
as_sizes = [np.prod(action_space_sizes[i]) for i in ids]
reward_changing = [n_cfs_r[i] for i in ids]
dynamics_changing = [n_cfs_d[i] for i in ids]
cf_numbers = [len(env_context_feature_names[env_names[i]]) for i in ids]
# print(ss_sizes, as_sizes, cf_numbers)
data.append(pd.DataFrame({
"env_type": [env_type] * len(ids),
"env_name": envs,
"state_space_size": ss_sizes,
"action_space_size": as_sizes,
"n_context_features": cf_numbers,
"n_cf_reward": reward_changing,
"n_cf_dyna": dynamics_changing,
}))
data = pd.concat(data)
# normalize values
cols = [c for c in data.columns if c not in ["env_type", "env_name"]]
max_values_per_col = []
for col in cols:
if col == "state_space_size":
data[col] = np.log(data[col])
max_val = data[col].max()
max_values_per_col.append(max_val)
data[col] /= max_val
cols_plot = ["state_space_size", "action_space_size", "n_cf_reward", "n_cf_dyna", "n_context_features", ]
xticklabels = ["state space size", "action\nspace \nsize", "$n_{cf, reward}$", "$n_{cf,dynamics}$", "$n_{cf}$",]
figtitle = "Environments"
N = len(cols_plot)
theta = radar_factory(N, frame='polygon')
figsize = (10, 2.5)
dpi = 250
fig, axs = plt.subplots(figsize=figsize, nrows=1, ncols=4,
subplot_kw=dict(projection='radar'), dpi=dpi)
# fig.subplots_adjust(wspace=0.25, hspace=0.20, top=0.99, bottom=0.01)
# Plot the four cases from the example data on separate axes
for ax, env_type in zip(axs.flat, env_types):
D = data[data["env_type"] == env_type]
labels = D["env_name"].to_list()
color_palette_name = "colorblind"
n = len(D)
colors = sns.color_palette(color_palette_name, n)
plot_data = D[cols_plot].to_numpy()
ax.set_rgrids([0.2, 0.4, 0.6, 0.8])
title = env_type.replace("_", " ")
if title == "misc":
title = "RNA + Mario"
ax.set_title(title, weight='normal', size='medium', # position=(0.5, 0.25), transform=ax.transAxes,
horizontalalignment='center', verticalalignment='center', pad=15, fontsize=12)
for i, (d, color) in enumerate(zip(plot_data, colors)):
ax.plot(theta, d, color=color, label=labels[i])
ax.fill(theta, d, facecolor=color, alpha=0.25)
ax.set_varlabels(xticklabels, horizontalalignment='center', verticalalignment='center')
# ax.legend(loc=(0.25, -.5), labelspacing=0.1, fontsize='small')
rticks = np.linspace(0, 1, 5)
ax.set_rticks(rticks)
plt.setp(ax.get_yticklabels(), visible=False)
# add legend relative to top-left plot
# labels = ('Factor 1', 'Factor 2', 'Factor 3', 'Factor 4', 'Factor 5')
# legend = axs[0, 0].legend(labels, loc=(0.9, .95),
# labelspacing=0.1, fontsize='small')
# fig.text(0.5, 0.965, figtitle,
# horizontalalignment='center', color='black', weight='bold',
# size='large')
fig.set_tight_layout(True)
figfname = "utils/radar_env_space.png"
fig.savefig(figfname, bbox_inches="tight")
plt.show()
| [
"numpy.prod",
"seaborn.color_palette",
"numpy.log",
"numpy.linspace",
"pandas.concat",
"carl.utils.doc_building.plotting.radar_factory",
"matplotlib.pyplot.show"
] | [((6401, 6416), 'pandas.concat', 'pd.concat', (['data'], {}), '(data)\n', (6410, 6416), True, 'import pandas as pd\n'), ((7047, 7080), 'carl.utils.doc_building.plotting.radar_factory', 'radar_factory', (['N'], {'frame': '"""polygon"""'}), "(N, frame='polygon')\n", (7060, 7080), False, 'from carl.utils.doc_building.plotting import radar_factory\n'), ((9049, 9059), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9057, 9059), True, 'import matplotlib.pyplot as plt\n'), ((7614, 7654), 'seaborn.color_palette', 'sns.color_palette', (['color_palette_name', 'n'], {}), '(color_palette_name, n)\n', (7631, 7654), True, 'import seaborn as sns\n'), ((8432, 8452), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(5)'], {}), '(0, 1, 5)\n', (8443, 8452), True, 'import numpy as np\n'), ((5703, 5732), 'numpy.prod', 'np.prod', (['state_space_sizes[i]'], {}), '(state_space_sizes[i])\n', (5710, 5732), True, 'import numpy as np\n'), ((5767, 5797), 'numpy.prod', 'np.prod', (['action_space_sizes[i]'], {}), '(action_space_sizes[i])\n', (5774, 5797), True, 'import numpy as np\n'), ((6626, 6643), 'numpy.log', 'np.log', (['data[col]'], {}), '(data[col])\n', (6632, 6643), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import os
import subprocess
import numpy as np
from PIL import Image, ImageChops
from PIL import ImageFont
from PIL import ImageDraw
from meld_classifier.dataset import load_combined_hemisphere_data
from meld_classifier.meld_cohort import MeldCohort, MeldSubject
import matplotlib_surface_plotting.matplotlib_surface_plotting as msp
import meld_classifier.paths as paths
import nibabel as nb
def trim(im):
bg = Image.new(im.mode, im.size, im.getpixel((0,0)))
diff = ImageChops.difference(im, bg)
diff = ImageChops.add(diff, diff, 2.0, -100)
bbox = diff.getbbox()
if bbox:
return im.crop(bbox)
else:
return im
def rotate90(im):
return im.transpose(method=Image.ROTATE_270)
def plot_single_subject(data_to_plots, lesion, feature_names=None, out_filename="tmp.png"):
"""create a grid of flatmap plots for a single subject"""
# load in meshes
flat = nb.load(os.path.join(paths.BASE_PATH, "fsaverage_sym", "surf", "lh.full.patch.flat.gii"))
vertices, faces = flat.darrays[0].data, flat.darrays[1].data
cortex = MeldCohort().cortex_label
cortex_bin = np.zeros(len(vertices)).astype(bool)
cortex_bin[cortex] = 1
# round up to get the square grid size
gridsize = np.ceil(np.sqrt(len(data_to_plots))).astype(int)
ims = np.zeros((gridsize, gridsize), dtype=object)
random = np.random.choice(100000)
for k, data_to_plot in enumerate(data_to_plots):
msp.plot_surf(
vertices,
faces,
data_to_plot,
flat_map=True,
base_size=10,
mask=~cortex_bin,
pvals=np.ones_like(cortex_bin),
parcel=lesion,
vmin=np.percentile(data_to_plot[cortex_bin], 1),
vmax=np.percentile(data_to_plot[cortex_bin], 99),
cmap="viridis",
colorbar=False,
filename=out_filename,
)
plt.close()
# subprocess.call(f"convert {out_filename} -trim ./tmp{random}1.png", shell=True)
# subprocess.call(f"convert ./tmp{random}1.png -rotate 90 {out_filename}", shell=True)
# os.remove(f"./tmp{random}1.png")
im = Image.open(out_filename)
im = trim(im)
im = rotate90(im)
im = im.convert("RGBA")
#fnt = ImageFont.truetype("Pillow/Tests/fonts/FreeSansBold.ttf", 25)
fnt = ImageFont.load_default()
f_name = ""
if feature_names is not None:
if k == 0:
f_name = feature_names[k]
base = np.array(im.convert("RGBA"))
else:
f_name = feature_names[k][35:-9]
draw = ImageDraw.Draw(im)
draw.text((100, 0), f_name, (255, 0, 0), font=fnt)
arr_im = np.array(im.convert("RGBA"))
s0 = np.min([base.shape[0], arr_im.shape[0]])
s1 = np.min([base.shape[1], arr_im.shape[1]])
base[:s0, :s1, :3] = arr_im[:s0, :s1, :3]
# make transparent white
# cropped[cropped[:,:,3]==0]=255
base = base[:, :, :3]
ims[k // gridsize, k % gridsize] = base.copy()
rows = np.zeros(1 + k // gridsize, dtype=object)
for j in np.arange(1 + k // gridsize):
try:
rows[j] = np.hstack(ims[j])
except ValueError:
ims[j, k % gridsize + 1] = np.ones_like(base) * 255
ims[j, k % gridsize + 2] = np.ones_like(base) * 255
rows[j] = np.hstack(ims[j])
grid_ims = np.vstack(rows)
im = Image.fromarray(grid_ims)
im.save(out_filename)
| [
"PIL.ImageChops.difference",
"PIL.Image.fromarray",
"PIL.Image.open",
"numpy.ones_like",
"PIL.ImageFont.load_default",
"numpy.hstack",
"numpy.random.choice",
"os.path.join",
"PIL.ImageChops.add",
"matplotlib.pyplot.close",
"numpy.zeros",
"PIL.ImageDraw.Draw",
"numpy.vstack",
"numpy.min",
... | [((507, 536), 'PIL.ImageChops.difference', 'ImageChops.difference', (['im', 'bg'], {}), '(im, bg)\n', (528, 536), False, 'from PIL import Image, ImageChops\n'), ((548, 585), 'PIL.ImageChops.add', 'ImageChops.add', (['diff', 'diff', '(2.0)', '(-100)'], {}), '(diff, diff, 2.0, -100)\n', (562, 585), False, 'from PIL import Image, ImageChops\n'), ((1334, 1378), 'numpy.zeros', 'np.zeros', (['(gridsize, gridsize)'], {'dtype': 'object'}), '((gridsize, gridsize), dtype=object)\n', (1342, 1378), True, 'import numpy as np\n'), ((1392, 1416), 'numpy.random.choice', 'np.random.choice', (['(100000)'], {}), '(100000)\n', (1408, 1416), True, 'import numpy as np\n'), ((3128, 3169), 'numpy.zeros', 'np.zeros', (['(1 + k // gridsize)'], {'dtype': 'object'}), '(1 + k // gridsize, dtype=object)\n', (3136, 3169), True, 'import numpy as np\n'), ((3183, 3211), 'numpy.arange', 'np.arange', (['(1 + k // gridsize)'], {}), '(1 + k // gridsize)\n', (3192, 3211), True, 'import numpy as np\n'), ((3476, 3491), 'numpy.vstack', 'np.vstack', (['rows'], {}), '(rows)\n', (3485, 3491), True, 'import numpy as np\n'), ((3501, 3526), 'PIL.Image.fromarray', 'Image.fromarray', (['grid_ims'], {}), '(grid_ims)\n', (3516, 3526), False, 'from PIL import Image, ImageChops\n'), ((949, 1034), 'os.path.join', 'os.path.join', (['paths.BASE_PATH', '"""fsaverage_sym"""', '"""surf"""', '"""lh.full.patch.flat.gii"""'], {}), "(paths.BASE_PATH, 'fsaverage_sym', 'surf', 'lh.full.patch.flat.gii'\n )\n", (961, 1034), False, 'import os\n'), ((1110, 1122), 'meld_classifier.meld_cohort.MeldCohort', 'MeldCohort', ([], {}), '()\n', (1120, 1122), False, 'from meld_classifier.meld_cohort import MeldCohort, MeldSubject\n'), ((1946, 1957), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1955, 1957), True, 'import matplotlib.pyplot as plt\n'), ((2196, 2220), 'PIL.Image.open', 'Image.open', (['out_filename'], {}), '(out_filename)\n', (2206, 2220), False, 'from PIL import Image, ImageChops\n'), ((2392, 2416), 'PIL.ImageFont.load_default', 'ImageFont.load_default', ([], {}), '()\n', (2414, 2416), False, 'from PIL import ImageFont\n'), ((2674, 2692), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['im'], {}), '(im)\n', (2688, 2692), False, 'from PIL import ImageDraw\n'), ((2811, 2851), 'numpy.min', 'np.min', (['[base.shape[0], arr_im.shape[0]]'], {}), '([base.shape[0], arr_im.shape[0]])\n', (2817, 2851), True, 'import numpy as np\n'), ((2865, 2905), 'numpy.min', 'np.min', (['[base.shape[1], arr_im.shape[1]]'], {}), '([base.shape[1], arr_im.shape[1]])\n', (2871, 2905), True, 'import numpy as np\n'), ((3248, 3265), 'numpy.hstack', 'np.hstack', (['ims[j]'], {}), '(ims[j])\n', (3257, 3265), True, 'import numpy as np\n'), ((1661, 1685), 'numpy.ones_like', 'np.ones_like', (['cortex_bin'], {}), '(cortex_bin)\n', (1673, 1685), True, 'import numpy as np\n'), ((1731, 1773), 'numpy.percentile', 'np.percentile', (['data_to_plot[cortex_bin]', '(1)'], {}), '(data_to_plot[cortex_bin], 1)\n', (1744, 1773), True, 'import numpy as np\n'), ((1792, 1835), 'numpy.percentile', 'np.percentile', (['data_to_plot[cortex_bin]', '(99)'], {}), '(data_to_plot[cortex_bin], 99)\n', (1805, 1835), True, 'import numpy as np\n'), ((3443, 3460), 'numpy.hstack', 'np.hstack', (['ims[j]'], {}), '(ims[j])\n', (3452, 3460), True, 'import numpy as np\n'), ((3332, 3350), 'numpy.ones_like', 'np.ones_like', (['base'], {}), '(base)\n', (3344, 3350), True, 'import numpy as np\n'), ((3396, 3414), 'numpy.ones_like', 'np.ones_like', (['base'], {}), '(base)\n', (3408, 3414), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: <NAME>
# email: <EMAIL>
"""
Group astronomical images by fields and epochs.
Example of usage:
python stacking.py --path_data pathtoyourdata/ --radius 10 --deltaT 1
will stack all images in pathtoyourdata/ whose referenced RA and Dec
(CRVAL1 and CRVAL2) are separated by 10 arcmin maximum and taken
within time interval of 1 hour.
SWARP is required to perform the stacking.
On linux machines it can be installed with:
sudo apt install swarp
"""
import errno
import glob
import os
import subprocess
import shutil
import tempfile
import time as time1
from astropy.io import fits
from astropy.table import Table
import argparse
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy import time, wcs
import numpy as np
from gmadet.utils import list_files
def rm_p(src):
try:
# shutil.rmtree(src, ignore_errors=True)
os.remove(src)
except BaseException:
pass
def mv_p(src, dest):
try:
shutil.move(src, dest)
except BaseException:
pass
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def table_obs(path_data, radius, deltaT, exclude=None):
""" Create astropy table to group epochs and fields """
# List of all raw files
filenames = list_files(path_data,
exclude=exclude,
get_subdirs=False)
names = []
RA = []
Dec = []
Time = []
telescopes = []
instruments = []
filters = []
for ima in filenames:
# print("processing " + ima + " ...\x1b[2K", end='\r', flush=True),
hdr = fits.open(ima, memmap=False)[0].header
# Get time of observation in hours
try:
date = time.Time(hdr["DATE-OBS"], format="fits")
# convert Julian day in hours
hr = date.jd * 24
except BaseException:
try:
hr = float(hdr["JD"]) * 24.0
except BaseException:
print(
"No keyword is found for the date of observation.\n"
"Expected: `DATE-OBS` or `JD`"
)
w = wcs.WCS(hdr)
names.append(ima)
RA.append(w.wcs.crval[0])
Dec.append(w.wcs.crval[1])
Time.append(hr)
telescopes.append(hdr["TELESCOP"])
instruments.append(hdr["INSTRUME"])
filters.append(hdr["FILTER"])
# Add unique index identifier per image
idx = np.arange(len(names))
# id to identify same field of view within given radius
field_id = np.zeros(len(names), dtype=int)
# id to identify epoch of same field within given time
epoch_id = np.zeros(len(names), dtype=int)
# Column to indicate the name of the stacked image
stack_name = [None] * len(names)
# RA and Dec took as reference for one field
ra_ref = [None] * len(names)
dec_ref = [None] * len(names)
obs_table = Table(
[
idx,
names,
telescopes,
instruments,
filters,
RA,
Dec,
Time,
field_id,
epoch_id,
ra_ref,
dec_ref,
stack_name,
],
names=[
"idx",
"filename",
"Telescope",
"Instrument",
"Filter",
"RA",
"Dec",
"JD",
"fieldID",
"epochID",
"RA_ref",
"Dec_ref",
"stack_filename",
],
)
# Sort by obs-time
obs_table.sort("JD")
field_id = 0
for tel, inst, filt in obs_table.group_by(
["Telescope", "Instrument", "Filter"]
).groups.keys:
mask = (
(obs_table["Telescope"] == tel)
& (obs_table["Instrument"] == inst)
& (obs_table["Filter"] == filt)
)
# Group by field of view
# initialise with first image data
ccrval_ref = SkyCoord(
obs_table[mask]["RA"][0],
obs_table[mask]["Dec"][0],
unit=(u.deg, u.deg),
frame="icrs",
)
field_id = 1
mask_idx = obs_table["idx"] == obs_table[mask]["idx"][0]
obs_table["fieldID"][mask_idx] = field_id
obs_table["RA_ref"][mask_idx] = obs_table[mask]["RA"][0]
obs_table["Dec_ref"][mask_idx] = obs_table[mask]["Dec"][0]
for data in obs_table[mask]:
if data["fieldID"] == 0:
# If image has not been associated to a field yet
# Check for the closest field
# otherwise create new field ID
ccrval = SkyCoord(
data["RA"], data["Dec"],
unit=(u.deg, u.deg),
frame="icrs"
)
mask2 = (obs_table["fieldID"] != 0) & mask
sep_min = 100 # in degrees
field_ref = -1
for j, key in enumerate(
obs_table[mask2].group_by("fieldID").groups.keys
):
# Assume that ra and dec of one field is defined by first
# image for that field
mask3 = (obs_table["fieldID"] == key[0]) & mask2
ra_ref = np.atleast_1d(obs_table[mask3]["RA"])[0]
dec_ref = np.atleast_1d(obs_table[mask3]["Dec"])[0]
ccrval_ref = SkyCoord(
ra_ref, dec_ref, unit=(u.deg, u.deg), frame="icrs"
)
sep = ccrval.separation(ccrval_ref).degree
if (sep < radius) & (sep < sep_min):
sep_min = sep
field_ref = key[0]
if field_ref != -1:
mask_idx = obs_table["idx"] == data["idx"]
obs_table["fieldID"][mask_idx] = field_ref
obs_table["RA_ref"][mask_idx] = ra_ref
obs_table["Dec_ref"][mask_idx] = dec_ref
else:
field_id += 1
mask_idx = obs_table["idx"] == data["idx"]
obs_table["fieldID"][mask_idx] = field_id
obs_table["RA_ref"][mask_idx] = data["RA"]
obs_table["Dec_ref"][mask_idx] = data["Dec"]
# Group fields by epochs
for tel, inst, filt in obs_table.group_by(
["Telescope", "Instrument", "Filter"]
).groups.keys:
mask = (
(obs_table["Telescope"] == tel)
& (obs_table["Instrument"] == inst)
& (obs_table["Filter"] == filt)
)
for field_id in obs_table[mask].group_by("fieldID").groups.keys:
mask_field = (obs_table["fieldID"] == field_id[0]) & mask
JD_ref = obs_table[mask_field]["JD"][0]
epoch_id = 1
for data in obs_table[mask_field]:
if data["JD"] <= JD_ref + deltaT:
mask_idx = obs_table["idx"] == data["idx"]
obs_table["epochID"][mask_idx] = epoch_id
else:
epoch_id += 1
JD_ref = data["JD"]
mask_idx = obs_table["idx"] == data["idx"]
obs_table["epochID"][mask_idx] = epoch_id
# obs_table.show_in_browser()
return obs_table
def makelists(path_data, path_lists, radius, deltaT, exclude=None):
"""
Group images by fields and epochs
Parameters
----------
path_data : path to images, string
directory path to loop through all the fits file it contains
path_lists : path to folder containing list of grouped images, string
radius : radius in arcmin, float
radius in arcmin used to group fields based on CRVAL values
deltaT : time in hours, float
maximum time interval for one epoch, i.e. from one image taken at
time t, all images of the same field taken before t + deltaT
are stacked
Returns
-------
No variable is returned. Files containing the images to stack are
created in stacklists/ folder
"""
# Create folder for lists, delete existing files
if not os.path.isdir(path_lists):
# rm_p(path_lists)
mkdir_p(path_lists)
# Convert radius in degrees
radius = radius / 60
# Create observation table with images grouped by field and epoch
fields = table_obs(path_data, radius, deltaT, exclude=exclude)
# Create ascii files containing images to stack.
# These files are the input of SWARP
fields_list = open(os.path.join(path_lists, "fields.slist"), "w")
for tel, inst, filt in fields.group_by(
["Telescope", "Instrument", "Filter"]
).groups.keys:
mask = (
(fields["Telescope"] == tel)
& (fields["Instrument"] == inst)
& (fields["Filter"] == filt)
)
for field_id, epoch_id in (
fields[mask].group_by(["fieldID", "epochID"]).groups.keys
):
mask_field = (
(fields["fieldID"] == field_id) & (
fields["epochID"] == epoch_id) & mask
)
tel = str(fields["Telescope"][mask_field][0]).replace(" ", "")
band = str(fields["Filter"][mask_field][0]).replace(" ", "")
ra = str(
np.round(
fields["RA_ref"][mask_field][0],
3)).replace(
".",
"")
dec = str(
np.round(
fields["Dec_ref"][mask_field][0],
3)).replace(
".",
"")
filename = (
tel
+ "_"
+ band
+ "_"
+ ra
+ "_"
+ dec
+ "_field_%03d_%03d" % (field_id, epoch_id)
)
# filename = prefix + "_%03d_%03d" % (field_id, epoch_id)
f = open(os.path.join(path_lists, filename + ".list"), "w")
for data in fields[mask_field]:
f.write(data["filename"] + "\n")
mask_idx = fields["idx"] == data["idx"]
fields["stack_filename"][mask_idx] = filename
f.close()
fields_list.write(filename + " ")
fields_list.close()
# fields.show_in_browser()
def stacking(path_data, radius, deltaT, useweight=False,
subBack=True, path_results="gmadet_stacking", gain=1, keep=False):
"""Stack images"""
# Add '/' at the end of the paths if they are missing
if path_data[-1] != "/":
path_data = path_data + "/"
path_stacks = path_results # path_data + "gmadet_stacking/"
# Rename folder if already existing
if os.path.exists(path_stacks):
if keep:
mv_p(path_stacks,
path_stacks + '_' + time1.strftime("%Y%m%d-%H%M%S"))
else:
shutil.rmtree(path_stacks)
mkdir_p(path_stacks)
path_lists = tempfile.mkdtemp() # Temporary dir for fieldlists
useweight = bool(useweight)
# Whether to substrack background
if subBack:
subBack = "Y"
else:
subBack = "N"
# Make list of images to stack
makelists(path_data, path_lists, radius, deltaT)
# Get all the prefixes corresponding to one field
filenames = glob.glob(os.path.join(path_lists, "*.list"))
prefixes = []
for filename in filenames:
splitfilename = os.path.splitext(
os.path.split(filename)[-1])[0].split("_")
prefi = ""
for i in range(len(splitfilename) - 1):
prefi += splitfilename[i] + "_"
prefixes.append(prefi)
# Discard duplicates
prefixes = np.unique(prefixes)
# Loop over fields
for pref in prefixes:
imalists = []
epochs = []
# Loop over epochs
for imalist in glob.glob(os.path.join(path_lists, pref + "???.list")):
# Check that there are at least 2 images to stack
# Otherwise skip it
file = np.genfromtxt(imalist, dtype=str)
if len(np.atleast_1d(file)) < 2:
continue
epochs += [os.path.join(
path_stacks,
os.path.splitext(os.path.split(imalist)[-1])[0])
]
imalists += ["@" + imalist]
point = path_stacks + pref
subprocess.call(
[
"swarp",
"-HEADER_ONLY", "Y",
"-IMAGEOUT_NAME", point + ".head",
"-GAIN_DEFAULT", str(gain),
]
+ imalists
)
subprocess.call(
[
"sed",
"-i",
"s/MJD-OBS/COMMENT/; s/EXPTIME/COMMENT/; s/GAIN /COMMENT/; s/SATURATE/COMMENT /",
point + ".head",
]
)
for i, imalist in enumerate(imalists):
epoch = epochs[i]
shutil.copy(point + ".head", epoch + ".head")
if useweight:
subprocess.call(
[
"swarp",
"-IMAGEOUT_NAME", epoch + ".fits",
"-SUBTRACT_BACK", subBack,
"-BACK_SIZE", "128",
"-BACK_FILTERSIZE", "3",
"-WEIGHTOUT_NAME", epoch + ".weight.fits",
"-RESAMPLING_TYPE", "LANCZOS3",
"-OVERSAMPLING", "0",
"-COMBINE_TYPE", "MEDIAN",
"-GAIN_DEFAULT", str(gain),
"-COPY_KEYWORDS", "FILTER",
]
+ [imalist]
)
else:
subprocess.call(
[
"swarp",
"-IMAGEOUT_NAME", epoch + ".fits",
"-GAIN_DEFAULT", str(gain),
"-SUBTRACT_BACK", subBack,
"-BACK_SIZE", "128",
"-BACK_FILTERSIZE", "3",
"-RESAMPLING_TYPE", "LANCZOS3",
"-OVERSAMPLING", "0",
"-COMBINE_TYPE", "MEDIAN",
"-COPY_KEYWORDS", "FILTER",
]
+ [imalist]
)
rm_p(epoch + ".head")
rm_p(point + ".head")
rm_p('swarp.xml')
rm_p('coadd.weight.fits')
# Do we really need to keep them?..
# Yes! :)
# Might be useful to know how the code grouped the files (in time
# and RADEC) to stack when you have hundreds of them.
mv_p(path_lists, os.path.join(path_stacks, 'fieldlists'))
| [
"astropy.table.Table",
"astropy.io.fits.open",
"numpy.genfromtxt",
"os.remove",
"os.path.exists",
"shutil.move",
"os.path.split",
"os.path.isdir",
"subprocess.call",
"numpy.round",
"tempfile.mkdtemp",
"shutil.copy",
"numpy.atleast_1d",
"gmadet.utils.list_files",
"numpy.unique",
"os.mak... | [((1463, 1520), 'gmadet.utils.list_files', 'list_files', (['path_data'], {'exclude': 'exclude', 'get_subdirs': '(False)'}), '(path_data, exclude=exclude, get_subdirs=False)\n', (1473, 1520), False, 'from gmadet.utils import list_files\n'), ((3114, 3387), 'astropy.table.Table', 'Table', (['[idx, names, telescopes, instruments, filters, RA, Dec, Time, field_id,\n epoch_id, ra_ref, dec_ref, stack_name]'], {'names': "['idx', 'filename', 'Telescope', 'Instrument', 'Filter', 'RA', 'Dec', 'JD',\n 'fieldID', 'epochID', 'RA_ref', 'Dec_ref', 'stack_filename']"}), "([idx, names, telescopes, instruments, filters, RA, Dec, Time,\n field_id, epoch_id, ra_ref, dec_ref, stack_name], names=['idx',\n 'filename', 'Telescope', 'Instrument', 'Filter', 'RA', 'Dec', 'JD',\n 'fieldID', 'epochID', 'RA_ref', 'Dec_ref', 'stack_filename'])\n", (3119, 3387), False, 'from astropy.table import Table\n'), ((11004, 11031), 'os.path.exists', 'os.path.exists', (['path_stacks'], {}), '(path_stacks)\n', (11018, 11031), False, 'import os\n'), ((11247, 11265), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (11263, 11265), False, 'import tempfile\n'), ((11975, 11994), 'numpy.unique', 'np.unique', (['prefixes'], {}), '(prefixes)\n', (11984, 11994), True, 'import numpy as np\n'), ((935, 949), 'os.remove', 'os.remove', (['src'], {}), '(src)\n', (944, 949), False, 'import os\n'), ((1029, 1051), 'shutil.move', 'shutil.move', (['src', 'dest'], {}), '(src, dest)\n', (1040, 1051), False, 'import shutil\n'), ((1129, 1146), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (1140, 1146), False, 'import os\n'), ((2337, 2349), 'astropy.wcs.WCS', 'wcs.WCS', (['hdr'], {}), '(hdr)\n', (2344, 2349), False, 'from astropy import time, wcs\n'), ((4175, 4275), 'astropy.coordinates.SkyCoord', 'SkyCoord', (["obs_table[mask]['RA'][0]", "obs_table[mask]['Dec'][0]"], {'unit': '(u.deg, u.deg)', 'frame': '"""icrs"""'}), "(obs_table[mask]['RA'][0], obs_table[mask]['Dec'][0], unit=(u.deg,\n u.deg), frame='icrs')\n", (4183, 4275), False, 'from astropy.coordinates import SkyCoord\n'), ((8402, 8427), 'os.path.isdir', 'os.path.isdir', (['path_lists'], {}), '(path_lists)\n', (8415, 8427), False, 'import os\n'), ((8797, 8837), 'os.path.join', 'os.path.join', (['path_lists', '"""fields.slist"""'], {}), "(path_lists, 'fields.slist')\n", (8809, 8837), False, 'import os\n'), ((11611, 11645), 'os.path.join', 'os.path.join', (['path_lists', '"""*.list"""'], {}), "(path_lists, '*.list')\n", (11623, 11645), False, 'import os\n'), ((12887, 13027), 'subprocess.call', 'subprocess.call', (["['sed', '-i',\n 's/MJD-OBS/COMMENT/; s/EXPTIME/COMMENT/; s/GAIN /COMMENT/; s/SATURATE/COMMENT /'\n , point + '.head']"], {}), "(['sed', '-i',\n 's/MJD-OBS/COMMENT/; s/EXPTIME/COMMENT/; s/GAIN /COMMENT/; s/SATURATE/COMMENT /'\n , point + '.head'])\n", (12902, 13027), False, 'import subprocess\n'), ((14929, 14968), 'os.path.join', 'os.path.join', (['path_stacks', '"""fieldlists"""'], {}), "(path_stacks, 'fieldlists')\n", (14941, 14968), False, 'import os\n'), ((1919, 1960), 'astropy.time.Time', 'time.Time', (["hdr['DATE-OBS']"], {'format': '"""fits"""'}), "(hdr['DATE-OBS'], format='fits')\n", (1928, 1960), False, 'from astropy import time, wcs\n'), ((11176, 11202), 'shutil.rmtree', 'shutil.rmtree', (['path_stacks'], {}), '(path_stacks)\n', (11189, 11202), False, 'import shutil\n'), ((12147, 12190), 'os.path.join', 'os.path.join', (['path_lists', "(pref + '???.list')"], {}), "(path_lists, pref + '???.list')\n", (12159, 12190), False, 'import os\n'), ((12306, 12339), 'numpy.genfromtxt', 'np.genfromtxt', (['imalist'], {'dtype': 'str'}), '(imalist, dtype=str)\n', (12319, 12339), True, 'import numpy as np\n'), ((13210, 13255), 'shutil.copy', 'shutil.copy', (["(point + '.head')", "(epoch + '.head')"], {}), "(point + '.head', epoch + '.head')\n", (13221, 13255), False, 'import shutil\n'), ((1230, 1249), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (1243, 1249), False, 'import os\n'), ((1804, 1832), 'astropy.io.fits.open', 'fits.open', (['ima'], {'memmap': '(False)'}), '(ima, memmap=False)\n', (1813, 1832), False, 'from astropy.io import fits\n'), ((4862, 4930), 'astropy.coordinates.SkyCoord', 'SkyCoord', (["data['RA']", "data['Dec']"], {'unit': '(u.deg, u.deg)', 'frame': '"""icrs"""'}), "(data['RA'], data['Dec'], unit=(u.deg, u.deg), frame='icrs')\n", (4870, 4930), False, 'from astropy.coordinates import SkyCoord\n'), ((10218, 10262), 'os.path.join', 'os.path.join', (['path_lists', "(filename + '.list')"], {}), "(path_lists, filename + '.list')\n", (10230, 10262), False, 'import os\n'), ((5638, 5698), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['ra_ref', 'dec_ref'], {'unit': '(u.deg, u.deg)', 'frame': '"""icrs"""'}), "(ra_ref, dec_ref, unit=(u.deg, u.deg), frame='icrs')\n", (5646, 5698), False, 'from astropy.coordinates import SkyCoord\n'), ((11117, 11148), 'time.strftime', 'time1.strftime', (['"""%Y%m%d-%H%M%S"""'], {}), "('%Y%m%d-%H%M%S')\n", (11131, 11148), True, 'import time as time1\n'), ((12359, 12378), 'numpy.atleast_1d', 'np.atleast_1d', (['file'], {}), '(file)\n', (12372, 12378), True, 'import numpy as np\n'), ((5492, 5529), 'numpy.atleast_1d', 'np.atleast_1d', (["obs_table[mask3]['RA']"], {}), "(obs_table[mask3]['RA'])\n", (5505, 5529), True, 'import numpy as np\n'), ((5563, 5601), 'numpy.atleast_1d', 'np.atleast_1d', (["obs_table[mask3]['Dec']"], {}), "(obs_table[mask3]['Dec'])\n", (5576, 5601), True, 'import numpy as np\n'), ((9562, 9606), 'numpy.round', 'np.round', (["fields['RA_ref'][mask_field][0]", '(3)'], {}), "(fields['RA_ref'][mask_field][0], 3)\n", (9570, 9606), True, 'import numpy as np\n'), ((9738, 9783), 'numpy.round', 'np.round', (["fields['Dec_ref'][mask_field][0]", '(3)'], {}), "(fields['Dec_ref'][mask_field][0], 3)\n", (9746, 9783), True, 'import numpy as np\n'), ((11750, 11773), 'os.path.split', 'os.path.split', (['filename'], {}), '(filename)\n', (11763, 11773), False, 'import os\n'), ((12510, 12532), 'os.path.split', 'os.path.split', (['imalist'], {}), '(imalist)\n', (12523, 12532), False, 'import os\n')] |
#--------------------------------------------------------------------------------------------------
# Import required libraries
from tkinter import *
from tkinter import colorchooser
from tkinter import messagebox
from tkinter import ttk
from tkinter import filedialog
import cv2
from PIL import Image, ImageTk
import numpy as np
import os
#--------------------------------------------------------------------------------------------------
global array
global array2
global array3
array=[]
array2=[]
array3=[]
#--------------------------------------------------------------------------------------------------
App = Tk()
App.iconbitmap(default='favicon.ico')
App.title("Image Segmentation Tool")
App.geometry("400x400")
#--------------------------------------------------------------------------------------------------
# Functions and Actions
def selectfolder():
global filename
filename = filedialog.askdirectory()
isFile=os.path.isdir(filename + "/" + "Segmentation")
if(not isFile):
os.mkdir(filename + "/" + "Segmentation")
print(filename)
for r, d, files in os.walk(filename):
for file in files:
directoryview.insert(END,file)
landingPage.destroy()
App.geometry("{0}x{1}+0+0".format(App.winfo_screenwidth()-20, App.winfo_screenheight()-80))
App.resizable(0,0)
imsegpage.pack(fill=BOTH)
def showimg(event):
n = directoryview.curselection()
global fname,img,segmap
fname = directoryview.get(n)
imsegcanvas.delete("all")
imgpath=filename+"/"+fname
img = Image.open(imgpath)
imgwidth, imgheight = img.size
# img = img.resize((300, 300), Image.ANTIALIAS)
# Segmentation Map
segmap = np.zeros((imgheight, imgwidth, 3), np.uint8)
img = ImageTk.PhotoImage(img)
imsegcanvas.config(width=imgwidth,height=imgheight,scrollregion=(0,0,imgwidth,imgheight))
imsegcanvas.create_image(0,0, anchor=NW, image=img)
def choose_color():
global color_code
global clf
clf = colorchooser.askcolor(title="Choose color")
color_code=clf[1]
print(color_code)
def point(event):
try:
x1, y1 = (imsegcanvas.canvasx(event.x) - 1), (imsegcanvas.canvasy(event.y) - 1)
imsegcanvas.create_oval(x1 - 2, y1 - 2, x1 + 2, y1 + 2, fill="#ff0000")
array.append(x1)
array.append(y1)
array2.append([x1, y1])
except:
messagebox.showerror("Error", "Error Occured")
def clearcanvas(event):
imsegcanvas.delete("all")
imsegcanvas.create_image(0, 0, anchor=NW, image=img)
imsegcanvas.image = img
messagebox.showinfo("Message", "Segmap Cleared")
def genpolygon(event):
try:
imsegcanvas.create_polygon(array, outline=color_code, fill=color_code, width=3, stipple="gray50")
pts = np.array(array2, np.int32)
pts = pts.reshape((-1, 1, 2))
cv2.fillPoly(segmap, [pts], [clf[0][2],clf[0][1],clf[0][0]], 1)
array2.clear()
array.clear()
except:
messagebox.showerror("Error", "Error Occured")
def outline(event):
try:
x1, y1 = (imsegcanvas.canvasx(event.x) - 1), (imsegcanvas.canvasy(event.y) - 1)
imsegcanvas.create_oval(x1 - 1, y1 - 1, x1 + 1, y1 + 1, fill="#ff0000")
array.append(x1)
array.append(y1)
array2.append([x1, y1])
except:
messagebox.showerror("Error", "Error Occured")
def save():
print(filename+"/Segmentation/"+fname)
cv2.imwrite(filename+"/Segmentation/"+fname, segmap)
messagebox.showinfo("Message", "Image Saved")
# def bbox(event):
# if len(array3)>=3:
# imsegcanvas.create_rectangle(array3[0],array3[1],array3[2],array3[3],fill="")
# array3.clear()
# x1, y1 = (imsegcanvas.canvasx(event.x) - 1), (imsegcanvas.canvasy(event.y) - 1)
# imsegcanvas.create_oval(x1 - 2, y1 - 2, x1 + 2, y1 + 2, fill="#ff0000")
# array3.append(x1)
# array3.append(y1)
#--------------------------------------------------------------------------------------------------
# Landing Page
global landingPage
landingPage = Frame(App)
landingText = Label(landingPage,text="An Image Segmentation Tool using Tkinter and OpenCV")
selectFolder = Button(landingPage,text="Select Image Folder",command=selectfolder)
canvas = Canvas(landingPage, width = 300, height = 300)
imgland = Image.open("Segvizlogo.png")
imgland = imgland.resize((300, 300), Image.ANTIALIAS)
imgland=ImageTk.PhotoImage(imgland)
canvas.create_image(20,20, anchor=NW, image=imgland)
canvas.pack()
selectFolder.pack(side=BOTTOM,fill=BOTH)
landingText.pack(fill=BOTH,side=BOTTOM)
landingPage.pack()
#--------------------------------------------------------------------------------------------------
# Image Segmentation Tool
global imsegpage
global canvasimage
global imsegcanvas
global imageoncanvas
global wt,ht
imsegpage = Frame(App)
currentimage=Image.open("segvizbg.png")
currentimage=currentimage.resize((250, 250), Image.ANTIALIAS)
wt,ht=currentimage.size
imsegcanvas = Canvas(imsegpage,width=wt,height=ht)
canvasimage = ImageTk.PhotoImage(currentimage)
imsegcanvas.create_image(0,0, anchor=NW, image=canvasimage)
# List Box for files
global directoryview
directoryview=Listbox(imsegpage)
directoryview.bind("<<ListboxSelect>>", showimg)
directoryview.pack(side="left", fill=Y,expand=False)
# Scrollbars for Image
scroll_x = Scrollbar(imsegpage, orient="horizontal", command=imsegcanvas.xview)
scroll_y = Scrollbar(imsegpage, orient="vertical", command=imsegcanvas.yview)
imsegcanvas.configure(yscrollcommand=scroll_y.set, xscrollcommand=scroll_x.set)
scroll_x.pack(side=BOTTOM,fill=X)
scroll_y.pack(side=RIGHT,fill=Y)
# Tab Control
tabControl = ttk.Notebook(imsegpage)
tab1 = ttk.Frame(tabControl)
selectcolor = Button(tab1,text="Select Color",command=choose_color)
save = Button(tab1,text="Save Segmentation",command=save)
tabControl.add(tab1, text='Tools')
# Pack the widgets
selectcolor.pack(fill=BOTH)
save.pack(fill=BOTH)
tabControl.pack(side=TOP,fill=X)
# Bind the canvas actions
imsegcanvas.bind("<Double-1>",point)
# imsegcanvas.bind("<Button-1>",bbox)
imsegcanvas.bind("<Button-3>",genpolygon)
imsegcanvas.bind("<B1-Motion>",outline)
imsegcanvas.bind("<Button-2>",clearcanvas)
#--------------------------------------------------------------------------------------------------
imsegcanvas.pack()
App.mainloop() | [
"cv2.imwrite",
"tkinter.filedialog.askdirectory",
"PIL.Image.open",
"cv2.fillPoly",
"tkinter.messagebox.showerror",
"tkinter.ttk.Frame",
"os.walk",
"numpy.array",
"numpy.zeros",
"os.path.isdir",
"os.mkdir",
"tkinter.ttk.Notebook",
"tkinter.colorchooser.askcolor",
"tkinter.messagebox.showin... | [((4331, 4359), 'PIL.Image.open', 'Image.open', (['"""Segvizlogo.png"""'], {}), "('Segvizlogo.png')\n", (4341, 4359), False, 'from PIL import Image, ImageTk\n'), ((4422, 4449), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', (['imgland'], {}), '(imgland)\n', (4440, 4449), False, 'from PIL import Image, ImageTk\n'), ((4874, 4900), 'PIL.Image.open', 'Image.open', (['"""segvizbg.png"""'], {}), "('segvizbg.png')\n", (4884, 4900), False, 'from PIL import Image, ImageTk\n'), ((5052, 5084), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', (['currentimage'], {}), '(currentimage)\n', (5070, 5084), False, 'from PIL import Image, ImageTk\n'), ((5682, 5705), 'tkinter.ttk.Notebook', 'ttk.Notebook', (['imsegpage'], {}), '(imsegpage)\n', (5694, 5705), False, 'from tkinter import ttk\n'), ((5713, 5734), 'tkinter.ttk.Frame', 'ttk.Frame', (['tabControl'], {}), '(tabControl)\n', (5722, 5734), False, 'from tkinter import ttk\n'), ((907, 932), 'tkinter.filedialog.askdirectory', 'filedialog.askdirectory', ([], {}), '()\n', (930, 932), False, 'from tkinter import filedialog\n'), ((944, 990), 'os.path.isdir', 'os.path.isdir', (["(filename + '/' + 'Segmentation')"], {}), "(filename + '/' + 'Segmentation')\n", (957, 990), False, 'import os\n'), ((1104, 1121), 'os.walk', 'os.walk', (['filename'], {}), '(filename)\n', (1111, 1121), False, 'import os\n'), ((1561, 1580), 'PIL.Image.open', 'Image.open', (['imgpath'], {}), '(imgpath)\n', (1571, 1580), False, 'from PIL import Image, ImageTk\n'), ((1705, 1749), 'numpy.zeros', 'np.zeros', (['(imgheight, imgwidth, 3)', 'np.uint8'], {}), '((imgheight, imgwidth, 3), np.uint8)\n', (1713, 1749), True, 'import numpy as np\n'), ((1760, 1783), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', (['img'], {}), '(img)\n', (1778, 1783), False, 'from PIL import Image, ImageTk\n'), ((2005, 2048), 'tkinter.colorchooser.askcolor', 'colorchooser.askcolor', ([], {'title': '"""Choose color"""'}), "(title='Choose color')\n", (2026, 2048), False, 'from tkinter import colorchooser\n'), ((2588, 2636), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""Message"""', '"""Segmap Cleared"""'], {}), "('Message', 'Segmap Cleared')\n", (2607, 2636), False, 'from tkinter import messagebox\n'), ((3455, 3511), 'cv2.imwrite', 'cv2.imwrite', (["(filename + '/Segmentation/' + fname)", 'segmap'], {}), "(filename + '/Segmentation/' + fname, segmap)\n", (3466, 3511), False, 'import cv2\n'), ((3512, 3557), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', (['"""Message"""', '"""Image Saved"""'], {}), "('Message', 'Image Saved')\n", (3531, 3557), False, 'from tkinter import messagebox\n'), ((1019, 1060), 'os.mkdir', 'os.mkdir', (["(filename + '/' + 'Segmentation')"], {}), "(filename + '/' + 'Segmentation')\n", (1027, 1060), False, 'import os\n'), ((2793, 2819), 'numpy.array', 'np.array', (['array2', 'np.int32'], {}), '(array2, np.int32)\n', (2801, 2819), True, 'import numpy as np\n'), ((2866, 2931), 'cv2.fillPoly', 'cv2.fillPoly', (['segmap', '[pts]', '[clf[0][2], clf[0][1], clf[0][0]]', '(1)'], {}), '(segmap, [pts], [clf[0][2], clf[0][1], clf[0][0]], 1)\n', (2878, 2931), False, 'import cv2\n'), ((2394, 2440), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""Error Occured"""'], {}), "('Error', 'Error Occured')\n", (2414, 2440), False, 'from tkinter import messagebox\n'), ((2995, 3041), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""Error Occured"""'], {}), "('Error', 'Error Occured')\n", (3015, 3041), False, 'from tkinter import messagebox\n'), ((3345, 3391), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""Error Occured"""'], {}), "('Error', 'Error Occured')\n", (3365, 3391), False, 'from tkinter import messagebox\n')] |
import argparse
import os
import pickle
import numpy as np
import pandas as pd
import torch
from PIL import PngImagePlugin
from torch.utils.data import DataLoader
from tqdm import tqdm
from benchmarks.wit.dataset_class import FoodiMLDataset
from benchmarks.wit.evaluator import adapter, compute_valid_answers
from benchmarks.wit.network import load_saved_model
LARGE_ENOUGH_NUMBER = 100
PngImagePlugin.MAX_TEXT_CHUNK = LARGE_ENOUGH_NUMBER * (1024 ** 2)
def generate_embeddings(model, dataloader_val, EMBEDDING_SIZE=512):
"""Generate image and text embeddings with model for the images and
captions provided by dataloader_val.
Parameters
----------
model : WIT_NN
Must implement the forward_embeds function.
dataloader_val : torch.utils.data.DataLoader
Dataloader for the validation set.
Returns -------
img_embs : torch.Tensor tensor of size (len(
dataloader_val.dataset), EMBEDDING_SIZE) containing all image embeddings
for all the dataset.
txt_embs : torch.Tensor
tensor of size (len(dataloader_val.dataset),
EMBEDDING_SIZE) containing all text embeddings for all the dataset.
"""
batch_size = dataloader_val.batch_size
img_embs = torch.zeros(len(dataloader_val.dataset), EMBEDDING_SIZE)
txt_embs = torch.zeros(len(dataloader_val.dataset), EMBEDDING_SIZE)
assert len(len(dataloader_val.dataset)) % batch_size == 0, f"batch size " \
f"similarity ({batch_size}) must be divisor of num_embeddings ({len(dataloader_val.dataset)}), here's a list of some of them below 1000 {compute_divisors(len(dataloader_val.dataset))} "
for i, batch in tqdm(enumerate(dataloader_val)):
img_emb, txt_emb = model.forward_embeds(batch)
img_embs[i * batch_size: i * batch_size + batch_size,
:] = img_emb.cpu()
txt_embs[i * batch_size: i * batch_size + batch_size,
:] = txt_emb.cpu()
return img_embs, txt_embs
def init_recalls(k_list, length):
"""
Initializes the binary arrays for each top K recalls that we want to assess
k_list: list of the top K positions of a given set of ordered hits (i.e [1, 5, 10])
length: number of total queries that we will make, for each query we will have a 0 or 1 in that position
of the array, indicating if we found the query in the top hits (=1) or not (=0)
"""
r_at_dict = {}
for k in k_list:
r_at_dict[k] = np.zeros(length)
return r_at_dict
def report(task, recall_dict):
report_dict = {}
for k in recall_dict:
report_dict[k] = 100.0 * np.round(
(np.sum(recall_dict[k]) / len(recall_dict[k])), 4)
print(f"{task}: Recall at {k}: ", np.round(report_dict[k], 2), "%")
return report_dict
def compute_divisors(n):
return [i for i in range(1, 1000) if n % i == 0]
def sim_matrix(a, b):
return torch.mm(a, b.transpose(0, 1))
def compute_metrics_sequentially(im, tx, valid_answers, adapter,
metric="t2i",
batch_size_similarity=70):
"""Compute recall at k for the embeddings of images and text given by im and tx respectively.
Parameters
----------
im : torch.Tensor (N, EMBEDDING_SIZE)
Tensor containing image embeddings for all the validation set.
tx : torch.Tensor (N, EMBEDDING_SIZE)
Tensor containing text embeddings for all the validation set.
valid_answers : dict
Dictionary containing the valid answers for each item on the validation set. This dictionary is the result of running the function compute_valid_answers over the validation set. (Code of this function can be found in benchmarks/wit/evaluator.py)
adapter:
class handling the indexes of the valid answers of the validation dataframe.
metric : str
Either 't2i' or 'i2t'
batch_size_similarity : int
Batch size to iterate over the validation set, must be a divisor of the validation set size.
Returns
-------
r_at_k : dict
Dictionary containing the Recall at k (R@k)
- keys (different k for recall), typically 1, 5, 10
- Values: Recalls
"""
assert metric == "t2i" or metric == "i2t", f"metric should be either t2i or i2t, {metric} is not recognized as a metric"
num_embeddings = im.size()[0]
ks = [1, 5, 10]
r_at_k = init_recalls(ks, num_embeddings)
assert num_embeddings % batch_size_similarity == 0, f"batch size similarity ({batch_size_similarity}) must be divisor of num_embeddings ({num_embeddings}), here's a list of some of them below 1000 {compute_divisors(num_embeddings)}"
for i in tqdm(range(int(num_embeddings / batch_size_similarity))):
if metric == "t2i":
txt_i = tx[
i * batch_size_similarity: i * batch_size_similarity + batch_size_similarity,
:] # b_s_s x 512
sims = sim_matrix(im, txt_i)
elif metric == "i2t":
img_i = im[
i * batch_size_similarity: i * batch_size_similarity + batch_size_similarity,
:]
sims = sim_matrix(tx, img_i)
pos_matrix = i * batch_size_similarity
sims = sims.cpu().numpy()
inds = np.argsort(sims, axis=0)[::-1]
for j in range(batch_size_similarity):
image_id = adapter.image_ids[pos_matrix + j]
for k in ks:
intersection = np.intersect1d(inds[:k, j],
valid_answers[image_id])
r_at_k[k][pos_matrix + j] = 1 if len(intersection) > 0 else 0
return r_at_k
if __name__ == '__main__':
"""
This script computes the metrics for the validation set of foodi-ml-dataset. To run this script:
python benchmarks/wit/evaluate_network_bigdata.py --dataset-path PATH_TO_DATASET_FOLDER --code-path PATH_TO_REPO_FOLDER --model-weights PATH_TO_MODEL_WEIGHTS
"""
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
parser = argparse.ArgumentParser()
parser.add_argument("--dataset-path", type=str,
help="Path of the downloaded dataset",
default="../../../dataset")
parser.add_argument("--code-path", type=str,
help="Path to DATASET_NAME repository",
default="../../")
parser.add_argument("--model-weights",
type=str, help="Path to the model weights",
default="./trained_model_30.pth")
args = parser.parse_args()
DATASET_PATH = args.dataset_path
CODE_PATH = args.code_path
WEIGHTS_PATH = args.model_weights
PATH_T2I_METRICS = "./t2i_metrics.pkl"
PATH_I2T_METRICS = "./i2t_metrics.pkl"
PATH_IMG_EMB = os.path.join(DATASET_PATH, f"img_embeddings.pt")
PATH_TXT_EMB = os.path.join(DATASET_PATH, f"txt_embeddings.pt")
PARQUET_PATH = os.path.join(DATASET_PATH, 'samples', 'split=val')
df_val = pd.read_parquet(PARQUET_PATH)
print(f"df_val shape: {df_val.shape}")
print("Computing valid answers...")
answers = compute_valid_answers(df_val)
adapter = adapter(df_val)
adapter = adapter.get_adapter()
ds_val = FoodiMLDataset(df_val, (224, 224))
BATCH_SIZE_VALIDATION = 70 # optimized for a machine with 1 GPU and 32 GB
dataloader_val = DataLoader(dataset=ds_val,
batch_size=BATCH_SIZE_VALIDATION,
drop_last=True, num_workers=8)
t2i_epochs = {}
i2t_epochs = {}
# Load or generate embeddings
if not os.path.isfile(PATH_IMG_EMB):
# load trained model
print(f"Loading trained model: {WEIGHTS_PATH}")
model = load_saved_model(device=device,
path=WEIGHTS_PATH)
im, tx = generate_embeddings(model, dataloader_val)
torch.save(im, PATH_IMG_EMB)
torch.save(tx, PATH_TXT_EMB)
else:
im = torch.load(PATH_IMG_EMB)
tx = torch.load(PATH_TXT_EMB)
metrics = compute_metrics_sequentially(im, tx, answers, adapter,
metric="t2i",
batch_size_similarity=BATCH_SIZE_VALIDATION)
t2i_epochs[WEIGHTS_PATH] = report("t2i", metrics)
metrics = compute_metrics_sequentially(im, tx, answers, adapter,
metric="i2t",
batch_size_similarity=BATCH_SIZE_VALIDATION)
i2t_epochs[WEIGHTS_PATH] = report("i2t", metrics)
with open(PATH_T2I_METRICS, 'wb') as fh:
pickle.dump(t2i_epochs, fh, protocol=pickle.HIGHEST_PROTOCOL)
with open(PATH_I2T_METRICS, 'wb') as fh:
pickle.dump(i2t_epochs, fh, protocol=pickle.HIGHEST_PROTOCOL)
| [
"pandas.read_parquet",
"numpy.argsort",
"torch.cuda.is_available",
"benchmarks.wit.network.load_saved_model",
"benchmarks.wit.evaluator.compute_valid_answers",
"argparse.ArgumentParser",
"benchmarks.wit.evaluator.adapter.get_adapter",
"benchmarks.wit.evaluator.adapter",
"numpy.round",
"benchmarks.... | [((6101, 6126), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6124, 6126), False, 'import argparse\n'), ((6861, 6909), 'os.path.join', 'os.path.join', (['DATASET_PATH', 'f"""img_embeddings.pt"""'], {}), "(DATASET_PATH, f'img_embeddings.pt')\n", (6873, 6909), False, 'import os\n'), ((6929, 6977), 'os.path.join', 'os.path.join', (['DATASET_PATH', 'f"""txt_embeddings.pt"""'], {}), "(DATASET_PATH, f'txt_embeddings.pt')\n", (6941, 6977), False, 'import os\n'), ((6997, 7047), 'os.path.join', 'os.path.join', (['DATASET_PATH', '"""samples"""', '"""split=val"""'], {}), "(DATASET_PATH, 'samples', 'split=val')\n", (7009, 7047), False, 'import os\n'), ((7062, 7091), 'pandas.read_parquet', 'pd.read_parquet', (['PARQUET_PATH'], {}), '(PARQUET_PATH)\n', (7077, 7091), True, 'import pandas as pd\n'), ((7189, 7218), 'benchmarks.wit.evaluator.compute_valid_answers', 'compute_valid_answers', (['df_val'], {}), '(df_val)\n', (7210, 7218), False, 'from benchmarks.wit.evaluator import adapter, compute_valid_answers\n'), ((7233, 7248), 'benchmarks.wit.evaluator.adapter', 'adapter', (['df_val'], {}), '(df_val)\n', (7240, 7248), False, 'from benchmarks.wit.evaluator import adapter, compute_valid_answers\n'), ((7263, 7284), 'benchmarks.wit.evaluator.adapter.get_adapter', 'adapter.get_adapter', ([], {}), '()\n', (7282, 7284), False, 'from benchmarks.wit.evaluator import adapter, compute_valid_answers\n'), ((7299, 7333), 'benchmarks.wit.dataset_class.FoodiMLDataset', 'FoodiMLDataset', (['df_val', '(224, 224)'], {}), '(df_val, (224, 224))\n', (7313, 7333), False, 'from benchmarks.wit.dataset_class import FoodiMLDataset\n'), ((7434, 7529), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'ds_val', 'batch_size': 'BATCH_SIZE_VALIDATION', 'drop_last': '(True)', 'num_workers': '(8)'}), '(dataset=ds_val, batch_size=BATCH_SIZE_VALIDATION, drop_last=True,\n num_workers=8)\n', (7444, 7529), False, 'from torch.utils.data import DataLoader\n'), ((2475, 2491), 'numpy.zeros', 'np.zeros', (['length'], {}), '(length)\n', (2483, 2491), True, 'import numpy as np\n'), ((6036, 6061), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6059, 6061), False, 'import torch\n'), ((6012, 6032), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (6024, 6032), False, 'import torch\n'), ((6067, 6086), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (6079, 6086), False, 'import torch\n'), ((7677, 7705), 'os.path.isfile', 'os.path.isfile', (['PATH_IMG_EMB'], {}), '(PATH_IMG_EMB)\n', (7691, 7705), False, 'import os\n'), ((7809, 7859), 'benchmarks.wit.network.load_saved_model', 'load_saved_model', ([], {'device': 'device', 'path': 'WEIGHTS_PATH'}), '(device=device, path=WEIGHTS_PATH)\n', (7825, 7859), False, 'from benchmarks.wit.network import load_saved_model\n'), ((7961, 7989), 'torch.save', 'torch.save', (['im', 'PATH_IMG_EMB'], {}), '(im, PATH_IMG_EMB)\n', (7971, 7989), False, 'import torch\n'), ((7998, 8026), 'torch.save', 'torch.save', (['tx', 'PATH_TXT_EMB'], {}), '(tx, PATH_TXT_EMB)\n', (8008, 8026), False, 'import torch\n'), ((8050, 8074), 'torch.load', 'torch.load', (['PATH_IMG_EMB'], {}), '(PATH_IMG_EMB)\n', (8060, 8074), False, 'import torch\n'), ((8088, 8112), 'torch.load', 'torch.load', (['PATH_TXT_EMB'], {}), '(PATH_TXT_EMB)\n', (8098, 8112), False, 'import torch\n'), ((8705, 8766), 'pickle.dump', 'pickle.dump', (['t2i_epochs', 'fh'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(t2i_epochs, fh, protocol=pickle.HIGHEST_PROTOCOL)\n', (8716, 8766), False, 'import pickle\n'), ((8820, 8881), 'pickle.dump', 'pickle.dump', (['i2t_epochs', 'fh'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(i2t_epochs, fh, protocol=pickle.HIGHEST_PROTOCOL)\n', (8831, 8881), False, 'import pickle\n'), ((2741, 2768), 'numpy.round', 'np.round', (['report_dict[k]', '(2)'], {}), '(report_dict[k], 2)\n', (2749, 2768), True, 'import numpy as np\n'), ((5304, 5328), 'numpy.argsort', 'np.argsort', (['sims'], {'axis': '(0)'}), '(sims, axis=0)\n', (5314, 5328), True, 'import numpy as np\n'), ((5495, 5547), 'numpy.intersect1d', 'np.intersect1d', (['inds[:k, j]', 'valid_answers[image_id]'], {}), '(inds[:k, j], valid_answers[image_id])\n', (5509, 5547), True, 'import numpy as np\n'), ((2649, 2671), 'numpy.sum', 'np.sum', (['recall_dict[k]'], {}), '(recall_dict[k])\n', (2655, 2671), True, 'import numpy as np\n')] |
from tracking_model.qp import qp_solver
from random import randint, choices, uniform, sample
import pandas as pd
import numpy as np
from time import time
def track_index(
data:pd.DataFrame,
K:int,
index_name:str,
P:int=15,
cross:float=1,
mut:float=0.85,
cut:int=4,
weight_limits:np.ndarray=None,
max_time:float=10,
mse_limit:float=5*(10**(-10))
) -> tuple:
"""Tracking model using Genetic Algorithm and Quadratic Optimization as shown in Amorim et al. (2020).
See more in the references. The objective is to imitate a time-series, called 'Index', composed of a linear
combination of other time-series, called 'stocks', with a quantity less than the original of those stocks.
Args:
data (pd.DataFrame): time-series of returns dataframe containing the stocks
and the index. They are ordered in ascending order by the date.
main_ts (str): the column name of the index
P (int): initial population size.
cross (float): probability of crossover.
mut (float): probability of mutation.
cut (int): where to cut the binary genome of the fathers (e.g. [0, 1, 1, 0, ...]) to construct the child.
K (int): the maximum quantity of stocks to invest.
limits (np.ndarray): the lower and upper boundary to invest of each stock.
It is a np.ndarray of shape (K, 2). Defaults to None.
max_time (float): maximum time in seconds for the algorithm to run. If it takes longer than this,
returns the current best. Defaults to 60 seconds or 1 minutes.
mse_limit (float): miminum value of the objective function to achieve. If achieved, stop the algorithm.
Returns:
(tuple): Tuple containing the names, the weights and the total time of the operation, in that order.
"""
t0 = time()
N = data.loc[:, ~data.columns.str.match(index_name)].shape[1]
pop = list(gen_initial_pop(N, P, K))
stop = False
flag_mse_limit = False
results_df = None
while not stop:
children = None
if uniform(0, 1) <= cross:
parents = choices(pop, k=2)
children = crossover(parents, cut, K)
if uniform(0, 1) <= mut:
children = mutate(children)
pop.extend(children)
results_df, flag_mse_limit = select_top(pop, data,
mse_limit, index_name)
pop = results_df['pop'].to_numpy().tolist()
if((time() - t0) >= max_time) or (flag_mse_limit == True):
stop = True
return results_df.head(1)['obj_values'][0], (time() - t0)
def gen_initial_pop(N:int, P:int, K:int):
for i in range(P):
initial_population = np.zeros((N,), dtype=int)
sample_values = sample(list(range(N)), k=K)
for activated_index in sample_values:
initial_population[activated_index] = 1
yield initial_population
def crossover(parents:list, cut:float, K:int):
def correct(child:np.array):
index_array = np.array(range(len(child)))
while child.sum() > K:
index_of_change = index_array[child == 1]
i_change = sample(index_of_change.tolist(), 1)
child[i_change] = 0
while child.sum() < K:
index_of_change = index_array[child == 0]
i_change = sample(index_of_change.tolist(), 1)
child[i_change] = 1
return child
def cut_and_join(indexes:list):
cut_parent_1 = parents[indexes[0]].tolist()[0:cut]
cut_parent_2 = parents[indexes[1]].tolist()[cut:]
child = np.array(cut_parent_1 + cut_parent_2, dtype=int)
if child.sum() != K:
child = correct(child)
return child
children = []
children.append(cut_and_join([0,1]))
children.append(cut_and_join([1,0]))
return children
def mutate(children:list):
for child in children:
index_array = np.array(range(len(child)))
on = index_array[child == 1]
on_sample = sample(on.tolist(), 1)
off = index_array[child == 0]
off_sample = sample(off.tolist(), 1)
child[on_sample] = 0
child[off_sample] = 1
return children
def select_top(
pop:list,
data:pd.DataFrame,
mse_limit:float,
index_name:str
) -> tuple:
data = data.copy()
select_data = pd.DataFrame({
'pop':pop,
'obj_values':[objective_fun(i, data, index_name) for i in pop]
}).sort_values(
by='obj_values', key = lambda col: col.apply(lambda elem: elem['cost value'])).\
reset_index()
select_data['cost'] = select_data['obj_values'].apply(lambda x: x['cost value'])
select_data['check_mse_list'] = select_data['cost'] <= mse_limit
check_mse = any(select_data['check_mse_list'][:-2].to_numpy())
n = select_data.shape[0]
select_data = select_data.loc[:, ['pop', 'obj_values']].head(n - 2)
return select_data, check_mse
def objective_fun(
element:np.ndarray,
data:pd.DataFrame,
index_name:str
) -> tuple:
data = data.copy()
selected_data = data.loc[:, ~data.columns.str.match(index_name)]
selected_data = selected_data.loc[:, element == 1]
selected_data[index_name] = data[index_name]
qp = qp_solver(df=selected_data, col_index=index_name)
sol = qp.solve()
return {'weights': sol['x'],
'names': qp.weights,
'cost value': sol['cost value']}
| [
"random.uniform",
"numpy.array",
"numpy.zeros",
"random.choices",
"tracking_model.qp.qp_solver",
"time.time"
] | [((1836, 1842), 'time.time', 'time', ([], {}), '()\n', (1840, 1842), False, 'from time import time\n'), ((5400, 5449), 'tracking_model.qp.qp_solver', 'qp_solver', ([], {'df': 'selected_data', 'col_index': 'index_name'}), '(df=selected_data, col_index=index_name)\n', (5409, 5449), False, 'from tracking_model.qp import qp_solver\n'), ((2768, 2793), 'numpy.zeros', 'np.zeros', (['(N,)'], {'dtype': 'int'}), '((N,), dtype=int)\n', (2776, 2793), True, 'import numpy as np\n'), ((3691, 3739), 'numpy.array', 'np.array', (['(cut_parent_1 + cut_parent_2)'], {'dtype': 'int'}), '(cut_parent_1 + cut_parent_2, dtype=int)\n', (3699, 3739), True, 'import numpy as np\n'), ((2071, 2084), 'random.uniform', 'uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (2078, 2084), False, 'from random import randint, choices, uniform, sample\n'), ((2117, 2134), 'random.choices', 'choices', (['pop'], {'k': '(2)'}), '(pop, k=2)\n', (2124, 2134), False, 'from random import randint, choices, uniform, sample\n'), ((2657, 2663), 'time.time', 'time', ([], {}), '()\n', (2661, 2663), False, 'from time import time\n'), ((2213, 2226), 'random.uniform', 'uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (2220, 2226), False, 'from random import randint, choices, uniform, sample\n'), ((2520, 2526), 'time.time', 'time', ([], {}), '()\n', (2524, 2526), False, 'from time import time\n')] |
import glob
import os
import cv2
from jinja2 import Environment, FileSystemLoader, select_autoescape
import numpy
def get_gray_resized_image(image_path):
image = cv2.imread(image_path)
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray_resized_image = cv2.resize(gray_image, (500, 500))
return gray_resized_image
def get_label(path):
return int(path.split(os.sep)[2].split('_')[0])
def get_label_name(path):
return path.split(os.sep)[2].split('_')[1]
label_and_names = {
get_label(path): get_label_name(path) for path in glob.glob('./src/*')
}
train_images = []
train_labels = []
for image_path in glob.glob('./train/*/*.jpg'):
train_images.append(get_gray_resized_image(image_path))
train_labels.append(get_label(image_path))
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.train(train_images, numpy.array(train_labels))
test_results = []
for image_path in glob.glob('./test/*/*.jpg'):
image_name = os.path.basename(image_path)
gray_resized_image = get_gray_resized_image(image_path)
predicted_label, confidence = recognizer.predict(gray_resized_image)
test_results.append(
(image_path,
label_and_names[predicted_label],
confidence,
get_label_name(image_path))
)
env = Environment(
loader=FileSystemLoader('./templates'),
autoescape=select_autoescape()
)
template = env.get_template('02.html')
template.stream(test_results=test_results).dump('output.html')
| [
"cv2.face.LBPHFaceRecognizer_create",
"numpy.array",
"jinja2.select_autoescape",
"os.path.basename",
"cv2.cvtColor",
"jinja2.FileSystemLoader",
"cv2.resize",
"cv2.imread",
"glob.glob"
] | [((645, 673), 'glob.glob', 'glob.glob', (['"""./train/*/*.jpg"""'], {}), "('./train/*/*.jpg')\n", (654, 673), False, 'import glob\n'), ((795, 831), 'cv2.face.LBPHFaceRecognizer_create', 'cv2.face.LBPHFaceRecognizer_create', ([], {}), '()\n', (829, 831), False, 'import cv2\n'), ((927, 954), 'glob.glob', 'glob.glob', (['"""./test/*/*.jpg"""'], {}), "('./test/*/*.jpg')\n", (936, 954), False, 'import glob\n'), ((171, 193), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (181, 193), False, 'import cv2\n'), ((211, 250), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (223, 250), False, 'import cv2\n'), ((276, 310), 'cv2.resize', 'cv2.resize', (['gray_image', '(500, 500)'], {}), '(gray_image, (500, 500))\n', (286, 310), False, 'import cv2\n'), ((863, 888), 'numpy.array', 'numpy.array', (['train_labels'], {}), '(train_labels)\n', (874, 888), False, 'import numpy\n'), ((973, 1001), 'os.path.basename', 'os.path.basename', (['image_path'], {}), '(image_path)\n', (989, 1001), False, 'import os\n'), ((567, 587), 'glob.glob', 'glob.glob', (['"""./src/*"""'], {}), "('./src/*')\n", (576, 587), False, 'import glob\n'), ((1319, 1350), 'jinja2.FileSystemLoader', 'FileSystemLoader', (['"""./templates"""'], {}), "('./templates')\n", (1335, 1350), False, 'from jinja2 import Environment, FileSystemLoader, select_autoescape\n'), ((1367, 1386), 'jinja2.select_autoescape', 'select_autoescape', ([], {}), '()\n', (1384, 1386), False, 'from jinja2 import Environment, FileSystemLoader, select_autoescape\n')] |
"""Input and output from and to files of movement primitives."""
import inspect
import json
import pickle
import yaml
import numpy as np
basic_types = (int, float, bool, str, type(None))
basic_types_and_sequences = (int, float, bool, str, list, tuple, type(None))
def write_pickle(filename, obj):
"""Write object to pickle format.
Parameters
----------
filename : str
Output file.
obj : object
Any object.
"""
with open(filename, "wb") as f:
pickle.dump(obj, f)
def read_pickle(filename):
"""Read object from pickle format.
Parameters
----------
filename : str
Input file.
Returns
-------
obj : object
Python object.
"""
with open(filename, "rb") as f:
return pickle.load(f)
def write_yaml(filename, obj):
"""Write object to YAML format.
Parameters
----------
filename : str
Output file.
obj : object
Any custom object that is a hierarchical composition of basic data
types and numpy arrays.
"""
export = _recursive_to_dict(obj, True)
with open(filename, "w") as f:
yaml.dump(export, f)
def read_yaml(filename):
"""Read object from YAML format.
Parameters
----------
filename : str
Input file.
Returns
-------
obj : object
Python object.
"""
with open(filename, "r") as f:
export = yaml.safe_load(f)
return _dict_to_object(export)
def write_json(filename, obj):
"""Write object to JSON format.
Parameters
----------
filename : str
Output file.
obj : object
Any custom object that is a hierarchical composition of basic data
types and numpy arrays.
"""
export = _recursive_to_dict(obj)
with open(filename, "w") as f:
json.dump(export, f)
def read_json(filename):
"""Read object from JSON format.
Parameters
----------
filename : str
Input file.
Returns
-------
obj : object
Python object.
"""
with open(filename, "r") as f:
export = json.load(f)
return _dict_to_object(export)
def _recursive_to_dict(obj, convert_tuple=False):
result = {"module": obj.__module__, "class": obj.__class__.__name__}
for k, v in obj.__dict__.items():
if convert_tuple and isinstance(v, tuple):
result[k] = list(v)
elif isinstance(v, basic_types_and_sequences):
result[k] = v
elif isinstance(v, np.ndarray):
result[k] = v.tolist()
else:
result[k] = _recursive_to_dict(v)
return result
def _recursive_from_dict(obj, export):
for k, v in export.items():
if isinstance(v, basic_types):
setattr(obj, k, v)
elif isinstance(v, (tuple, list)):
if isinstance(obj.__dict__[k], np.ndarray):
obj.__dict__[k] = np.array(v)
else:
setattr(obj, k, v)
else:
_recursive_from_dict(getattr(obj, k), v)
def _dict_to_object(export):
module_name = export.pop("module")
module = __import__(module_name, {}, {}, fromlist=["dummy"], level=0)
class_dict = dict(inspect.getmembers(module))
class_name = export.pop("class")
if class_name not in class_dict:
raise ImportError(f"cannot import name '{class_name}' from '{module}'")
clazz = class_dict[class_name]
argspec = inspect.getfullargspec(clazz)
ctor_kwargs = {}
for arg in argspec.args:
if arg in export:
ctor_kwargs[arg] = export.pop(arg)
obj = clazz(**ctor_kwargs)
_recursive_from_dict(obj, export)
return obj
| [
"pickle.dump",
"inspect.getmembers",
"yaml.dump",
"pickle.load",
"inspect.getfullargspec",
"yaml.safe_load",
"numpy.array",
"json.load",
"json.dump"
] | [((3456, 3485), 'inspect.getfullargspec', 'inspect.getfullargspec', (['clazz'], {}), '(clazz)\n', (3478, 3485), False, 'import inspect\n'), ((500, 519), 'pickle.dump', 'pickle.dump', (['obj', 'f'], {}), '(obj, f)\n', (511, 519), False, 'import pickle\n'), ((782, 796), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (793, 796), False, 'import pickle\n'), ((1156, 1176), 'yaml.dump', 'yaml.dump', (['export', 'f'], {}), '(export, f)\n', (1165, 1176), False, 'import yaml\n'), ((1436, 1453), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (1450, 1453), False, 'import yaml\n'), ((1842, 1862), 'json.dump', 'json.dump', (['export', 'f'], {}), '(export, f)\n', (1851, 1862), False, 'import json\n'), ((2122, 2134), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2131, 2134), False, 'import json\n'), ((3224, 3250), 'inspect.getmembers', 'inspect.getmembers', (['module'], {}), '(module)\n', (3242, 3250), False, 'import inspect\n'), ((2926, 2937), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (2934, 2937), True, 'import numpy as np\n')] |
import numpy as np
import os
from os.path import isfile, join, exists
from os import listdir
from tqdm import tqdm
from soccer import calibration
import json
import cv2
import pickle
import glog
import yaml
import matplotlib
import pycocotools.mask as mask_util
import utils
import utils.io as io
import utils.misc as misc_utils
import utils.camera as cam_utils
import utils.draw as draw_utils
import utils.files as file_utils
from utils.nms.nms_wrapper import nms
## SEMESTER PROJECT ##
from soccer.calibration.semesterproject_calibration import filter_parameters
class SoccerVideo:
def __init__(self, path_to_dataset):
image_extensions = ['jpg', 'png']
# Load images
self.path_to_dataset = path_to_dataset
# all images name
self.frame_basenames = [f for f in listdir(join(path_to_dataset, 'images'))
if isfile(join(path_to_dataset, 'images', f)) and any(i in f for i in image_extensions)]
self.frame_fullnames = [join(path_to_dataset, 'images', f) for f in self.frame_basenames]
# remove '.jpg' or '.png' ending
self.frame_basenames = [f[:-4] for f in self.frame_basenames]
self.frame_basenames.sort()
self.frame_fullnames.sort()
# save the poses from openpose
self.poses = {f: None for f in self.frame_basenames}
self.estimated_poses = {f: None for f in self.frame_basenames}
# number of frames
self.n_frames = len(self.frame_basenames)
self.ext = self.frame_fullnames[0][-3:]
# bbox from detectron
self.bbox = {f: None for f in self.frame_basenames}
# mask from detectron
self.mask = {f: None for f in self.frame_basenames}
# camera calibration infos
self.calib = {f: None for f in self.frame_basenames}
# detectron output
self.detectron = {f: None for f in self.frame_basenames}
# can use those infos for ball?
self.ball = {f: None for f in self.frame_basenames}
self.tracks = None
self.name = None
# Make the txt file
txt_file = join(path_to_dataset, 'youtube.txt')
if not exists(txt_file):
np.savetxt(txt_file, self.frame_fullnames, fmt='%s')
if not exists(join(path_to_dataset, 'metadata')):
os.mkdir(join(path_to_dataset, 'metadata'))
# save the shape of the images
img_ = self.get_frame(0)
self.shape = img_.shape
# ---------------------------------------------------------------------------
def _load_metadata(self, filename, attr):
if exists(filename):
with open(filename, 'rb') as f:
setattr(self, attr, pickle.load(f))
glog.info('{0}: {1}\tfrom {2}'.format(attr, exists(
filename), file_utils.extract_basename(filename)[0]))
def digest_metadata(self):
calib_file = join(self.path_to_dataset, 'metadata', 'calib.p')
self._load_metadata(calib_file, 'calib')
pose_coarse_file = join(self.path_to_dataset, 'metadata', 'poses.p')
self._load_metadata(pose_coarse_file, 'poses')
detectron_file = join(self.path_to_dataset, 'metadata', 'detectron.p')
self._load_metadata(detectron_file, 'detectron')
def get_frame(self, frame_number, dtype=np.float32, sfactor=1.0, image_type='rgb'):
return io.imread(self.frame_fullnames[frame_number], dtype=dtype, sfactor=sfactor, image_type=image_type)
def get_frame_index(self, frame_name):
return self.frame_basenames.index(frame_name)
def calibrate_camera(self, vis_every=-1):
if not exists(join(self.path_to_dataset, 'calib')):
os.mkdir(join(self.path_to_dataset, 'calib'))
calib_file = join(self.path_to_dataset, 'metadata', 'calib.p')
if exists(calib_file):
glog.info('Loading coarse detections from: {0}'.format(calib_file))
with open(calib_file, 'rb') as f:
self.calib = pickle.load(f)
else:
if not self.file_lists_match(listdir(join(self.path_to_dataset, 'calib'))):
# The first frame is estimated by manual clicking
manual_calib = join(self.path_to_dataset, 'calib',
'{0}.npy'.format(self.frame_basenames[0]))
if exists(manual_calib):
calib_npy = np.load(manual_calib).item()
A, R, T = calib_npy['A'], calib_npy['R'], calib_npy['T']
else:
img = self.get_frame(0)
coarse_mask = self.get_mask_from_detectron(0)
A, R, T = calibration.calibrate_by_click(img, coarse_mask)
if A is None:
glog.error('Manual calibration failed!')
else:
np.save(join(self.path_to_dataset, 'calib', '{0}'.format(self.frame_basenames[0])),
{'A': A, 'R': R, 'T': T})
for i in tqdm(range(1, self.n_frames)):
# glog.info('Calibrating frame {0} ({1}/{2})'.format(self.frame_basenames[i], i, self.n_frames))
img = self.get_frame(i)
coarse_mask = self.get_mask_from_detectron(i)
if i % vis_every == 0:
vis = True
else:
vis = False
A, R, T, __ = calibration.calibrate_from_initialization(
img, coarse_mask, A, R, T, vis)
np.save(join(self.path_to_dataset, 'calib', '{0}'.format(self.frame_basenames[i])),
{'A': A, 'R': R, 'T': T})
## SEMESTER PROJECT ##
# Call function that applies a filter on the estimated parameter. Possibly filter_type are 'mean' and 'median',
# default is 'mean'. The kernel size defines the number of estimations to consider during filtering, default 5.
filter_parameters(join(self.path_to_dataset, 'calib'), filter_type='mean', kernel_size=5)
## END ##
for i, basename in enumerate(tqdm(self.frame_basenames)):
calib_npy = np.load(join(self.path_to_dataset, 'calib',
'{0}.npy'.format(basename))).item()
A, R, T = calib_npy['A'], calib_npy['R'], calib_npy['T']
self.calib[basename] = {'A': A, 'R': R, 'T': T}
with open(calib_file, 'wb') as f:
pickle.dump(self.calib, f)
# ---------------------------------------------------------------------------
# customized slightly from tabletop project
# estimates the poses with openpose and saves them in class soccer.poses per frame
def estimate_openposes(self, redo=False, openpose_dir='~/installations/openpose', pad=150):
pose_file_coarse = join(self.path_to_dataset, 'metadata', 'poses.p')
if exists(pose_file_coarse) and not redo:
glog.info('Loading fine detections from: {0}'.format(pose_file_coarse))
with open(pose_file_coarse, 'rb') as f:
self.poses = pickle.load(f)
else:
h, w = self.shape[:2]
openposebin = './build/examples/openpose/openpose.bin'
tmp_dir = join(self.path_to_dataset, 'tmp')
if not exists(tmp_dir):
os.mkdir(tmp_dir)
for i, basename in enumerate(tqdm(self.frame_basenames)):
# Remove previous files
previous_files = [f for f in os.listdir(tmp_dir)]
for f in previous_files:
os.remove(join(tmp_dir, f))
img = self.get_frame(i)
bbox = self.bbox[basename]
# save the crops in a temp file
for j in range(bbox.shape[0]):
x1, y1, x2, y2 = bbox[j, 0:4]
x1, y1 = int(np.maximum(np.minimum(x1 - pad, w - 1), 0)), int(
np.maximum(np.minimum(y1 - pad, h - 1), 0))
x2, y2 = int(np.maximum(np.minimum(x2 + pad, w - 1), 0)), int(
np.maximum(np.minimum(y2 + pad, h - 1), 0))
crop = img[y1:y2, x1:x2, :]
# Save crop
cv2.imwrite(join(self.path_to_dataset, 'tmp',
'{0}.jpg'.format(j)), crop[:, :, (2, 1, 0)] * 255)
exit()
cwd = os.getcwd()
os.chdir(openpose_dir)
# ./build/examples/openpose/openpose.bin --model_pose COCO --image_dir ~/Data/K1/images --write_json ~/Data/K1/images --write_images ~/Data/K1/results --display 0 --render_pose 0
# openpose command
# display & render_pose disable video output
command = '{0} --model_pose COCO --image_dir {1} --write_json {2} --display 0 --render_pose 0'.format(
openposebin, tmp_dir, tmp_dir)
os.system(command)
os.chdir(cwd)
poses = []
for j in range(bbox.shape[0]):
x1, y1, x2, y2 = bbox[j, 0:4]
x1, y1 = int(np.maximum(np.minimum(x1 - pad, w - 1), 0)), int(
np.maximum(np.minimum(y1 - pad, h - 1), 0))
with open(join(join(self.path_to_dataset, 'tmp'), '{0}_keypoints.json'.format(j))) as data_file:
# for iii in range(2):
# _ = data_file.readline()
data_json = json.load(data_file)
if len(data_json['people']) == 0:
continue
# sz = data_json['sizes']
n_persons = len(data_json['people'])
# keypoints = np.array(data_json['data']).reshape(sz)
for k in range(n_persons):
keypoints_ = np.array(
data_json['people'][k]['pose_keypoints_2d']).reshape((18, 3))
keypoints_[:, 0] += x1
keypoints_[:, 1] += y1
poses.append(keypoints_)
self.poses[basename] = poses
with open(pose_file_coarse, 'wb') as f:
pickle.dump(self.poses, f)
return 0
def estimate_openpose_without_detectron(self, openpose_dir='/path/to/openpose'):
openposebin = './build/examples/openpose/openpose.bin'
# tmp directory to store the output of openpose
tmp_dir = join(self.path_to_dataset, 'tmp')
if not exists(tmp_dir):
os.mkdir(tmp_dir)
# Remove previous stored files
previous_files = [f for f in os.listdir(tmp_dir)]
for f in previous_files:
os.remove(join(tmp_dir, f))
cwd = os.getcwd()
os.chdir(openpose_dir)
# openpose demo which gets executed with arguments as specified
# model_pose COCO or default?
# --maximize_positives: lower threshold -> more detections but less correct
command = '{0} --model_pose COCO --image_dir {1} --write_json {2} --maximize_positives'.format(
openposebin, join(self.path_to_dataset, 'images'), tmp_dir)
os.system(command)
os.chdir(cwd)
# achtung: format of output file?
for i, basename in enumerate(tqdm(self.frame_basenames)):
poses = []
with open(join(join(self.path_to_dataset, 'tmp'), '{0}_keypoints.json'.format(basename))) as data_file:
data_json = json.load(data_file)
if len(data_json['people']) == 0:
continue
n_persons = len(data_json['people'])
# extract the x,y for the keypoints for all persons
for k in range(n_persons):
keypoints_ = np.array(data_json['people'][k]
['pose_keypoints_2d']).reshape((18, 3))
# keypoints_[:, 0] += x1
# keypoints_[:, 1] += y1
poses.append(keypoints_)
self.poses[basename] = poses
return 0
def refine_poses(self, keypoint_thresh=10, score_thresh=0.5, neck_thresh=0.59, margin=0.0):
W, H = 103.0, 67.0
for i, basename in enumerate(tqdm(self.frame_basenames)):
poses = self.poses[basename]
# remove the poses with few keypoints or they
keep = []
for ii in range(len(poses)):
keypoints = poses[ii]
valid = (keypoints[:, 2] > 0.).nonzero()[0]
score = np.sum(keypoints[valid, 2])
if len(valid) > keypoint_thresh and score > score_thresh and keypoints[1, 2] > neck_thresh:
keep.append(ii)
if (len(keep) == 0):
continue
poses = [poses[ii] for ii in keep]
root_part = 1
root_box = []
for ii in range(len(poses)):
root_tmp = poses[ii][root_part, :]
valid_keypoints = (poses[ii][:, 2] > 0).nonzero()
root_box.append(
[root_tmp[0] - 10, root_tmp[1] - 10, root_tmp[0] + 10, root_tmp[1] + 10,
np.sum(poses[ii][valid_keypoints, 2])])
root_box = np.array(root_box)
# Perform Neck NMS
if len(root_box.shape) == 1:
root_box = root_box[None, :]
keep2 = [0]
else:
keep2 = nms(root_box.astype(np.float32), 0.1)
poses = [poses[ii] for ii in keep2]
# Remove poses outside of field
keep3 = []
cam_mat = self.calib[basename]
cam = cam_utils.Camera(
basename, cam_mat['A'], cam_mat['R'], cam_mat['T'], self.shape[0], self.shape[1])
for ii in range(len(poses)):
kp3 = misc_utils.lift_keypoints_in_3d(cam, poses[ii])
if (-W / 2. - margin) <= kp3[1, 0] <= (W / 2. + margin) and (-H / 2. - margin) <= kp3[1, 2] <= (H / 2. + margin):
keep3.append(ii)
poses = [poses[ii] for ii in keep3]
self.poses[basename] = poses
def file_lists_match(self, list2):
list2 = [file_utils.extract_basename(f)[0] for f in list2]
hash_table = dict.fromkeys(list2)
all_included = True
for i in self.frame_basenames:
if i not in hash_table:
all_included = False
break
return all_included
def dump_video(self, vidtype, scale=1, mot_tracks=None, one_color=True):
if vidtype not in ['calib', 'poses', 'detections', 'tracks', 'mask']:
raise Exception('Uknown video format')
if vidtype == 'tracks' and mot_tracks is None:
raise Exception('No MOT tracks provided')
glog.info('Dumping {0} video'.format(vidtype))
fourcc = cv2.VideoWriter_fourcc(*'mp4v') # MP4V
out_file = join(self.path_to_dataset, '{0}.mp4'.format(vidtype))
# 25 FPS
out = cv2.VideoWriter(out_file, fourcc, 25.0,
(self.shape[1] // scale, self.shape[0] // scale))
font = cv2.FONT_HERSHEY_SIMPLEX
cmap = matplotlib.cm.get_cmap('hsv')
if mot_tracks is not None:
n_tracks = max(np.unique(mot_tracks[:, 1]))
for i, basename in enumerate(tqdm(self.frame_basenames)):
img = self.get_frame(i, dtype=np.uint8)
if vidtype == 'poses':
# Pose
poses = self.poses[basename]
draw_utils.draw_skeleton_on_image(img, poses, cmap, one_color=one_color)
if vidtype == 'calib':
# Calib
cam = cam_utils.Camera('tmp', self.calib[basename]['A'], self.calib[basename]
['R'], self.calib[basename]['T'], self.shape[0], self.shape[1])
canvas, mask = draw_utils.draw_field(cam)
canvas = cv2.dilate(canvas.astype(np.uint8), np.ones(
(15, 15), dtype=np.uint8)).astype(float)
img = img * (1 - canvas)[:, :, None] + np.dstack((canvas *
255, np.zeros_like(canvas), np.zeros_like(canvas)))
elif vidtype == 'detections':
# Detection
bbox = self.bbox[basename].astype(np.int32)
if self.ball[basename] is not None:
ball = self.ball[basename].astype(np.int32)
else:
ball = np.zeros((0, 4), dtype=np.int32)
for j in range(bbox.shape[0]):
cv2.rectangle(img, (bbox[j, 0], bbox[j, 1]),
(bbox[j, 2], bbox[j, 3]), (255, 0, 0), 10)
for j in range(ball.shape[0]):
cv2.rectangle(img, (ball[j, 0], ball[j, 1]),
(ball[j, 2], ball[j, 3]), (0, 255, 0), 10)
elif vidtype == 'tracks':
# Tracks
cur_id = mot_tracks[:, 0] - 1 == i
current_boxes = mot_tracks[cur_id, :]
for j in range(current_boxes.shape[0]):
track_id, x, y, w, h = current_boxes[j, 1:6]
clr = cmap(track_id / float(n_tracks))
cv2.rectangle(img, (int(x), int(y)), (int(x + w), int(y + h)),
(clr[0] * 255, clr[1] * 255, clr[2] * 255), 10)
cv2.putText(img, str(int(track_id)), (int(x), int(y)),
font, 2, (255, 255, 255), 2, cv2.LINE_AA)
elif vidtype == 'mask':
# Mask
mask = self.get_mask_from_detectron(i)*255
img = np.dstack((mask, mask, mask))
img = cv2.resize(img, (self.shape[1] // scale, self.shape[0] // scale))
out.write(np.uint8(img[:, :, (2, 1, 0)]))
# Release everything if job is finished
out.release()
cv2.destroyAllWindows()
def gather_detectron(self):
glog.info('Gathering Detectron')
if not exists(join(self.path_to_dataset, 'detectron')):
os.mkdir(join(self.path_to_dataset, 'detectron'))
detectron_file = join(self.path_to_dataset, 'metadata', 'detectron.p')
if exists(detectron_file):
glog.info('Loading coarse detections from: {0}'.format(detectron_file))
with open(detectron_file, 'rb') as f:
self.detectron = pickle.load(f)
else:
for i, basename in enumerate(tqdm(self.frame_basenames)):
with open(join(self.path_to_dataset, 'detectron', '{0}.yml'.format(basename)), 'rb') as stream:
#data = yaml.load(stream)
data = yaml.unsafe_load(stream)
boxes, classes, segms = data['boxes'], data['classes'], data['segms']
self.detectron[basename] = {'boxes': boxes,
'segms': segms, 'keyps': None, 'classes': classes}
with open(detectron_file, 'wb') as f:
pickle.dump(self.detectron, f)
def get_number_of_players(self):
players_in_frame = np.zeros((self.n_frames,))
for i, basename in enumerate(self.frame_basenames):
players_in_frame[i] = len(self.bbox[basename])
return players_in_frame
def get_boxes_in_2d(self):
boxes2d = []
for i, basename in enumerate(self.frame_basenames):
bbox = self.bbox[basename]
boxes2d.append(bbox[:, :4].reshape(bbox.shape[0], 2, 2))
return boxes2d
def get_keypoints_in_2d(self):
keypoints = []
for i, basename in enumerate(self.frame_basenames):
kp = self.poses[basename]
keypoints.append(kp)
return keypoints
def get_boxes_in_3d(self):
boxes3d = []
for i, basename in enumerate(self.frame_basenames):
bbox = self.bbox[basename]
cam_mat = self.calib[basename]
cam = cam_utils.Camera(
basename, cam_mat['A'], cam_mat['R'], cam_mat['T'], self.shape[0], self.shape[1])
bbox3d = misc_utils.lift_box_in_3d(cam, bbox)
boxes3d.append(bbox3d)
return boxes3d
def get_mask_from_detectron(self, frame_number):
return io.imread(join(self.path_to_dataset, 'detectron', self.frame_basenames[frame_number]+'.png'))[:, :, 0]
def get_ball_from_detectron(self, thresh=0.0, nms_thresh=0.5):
for i, basename in enumerate(tqdm(self.frame_basenames)):
data = self.detectron[basename]
boxes, segms, keyps, classes = data['boxes'], data['segms'], data['keyps'], data['classes']
valid = (boxes[:, 4] > thresh)*([j == 33 for j in classes])
boxes = boxes[valid, :]
valid_nms = nms(boxes.astype(np.float32), nms_thresh)
boxes = boxes[valid_nms, :]
self.ball[basename] = boxes
def get_color_from_detections(self, frame_number):
basename = self.frame_basenames[frame_number]
img = self.get_frame(frame_number)
boxes = self.bbox[basename]
n_boxes = boxes.shape[0]
box_color = np.zeros((n_boxes, 3))
segms = self.detectron[basename]['segms']
for i in range(n_boxes):
masks = mask_util.decode(segms[i])
II, JJ = (masks > 0).nonzero()
crop = img[II, JJ, :].reshape((-1, 3))
box_color[i, :] = np.mean(crop, axis=0)
return box_color
def refine_detectron(self, basename, score_thresh=0.9, nms_thresh=0.5, min_height=0.0, min_area=200):
data = self.detectron[basename]
boxes, segms, keyps, classes = data['boxes'], data['segms'], data['keyps'], data['classes']
valid = (boxes[:, 4] > score_thresh) * ([j == 1 for j in classes])
valid = (valid == True).nonzero()[0]
boxes = boxes[valid, :]
segms = [segms[i] for i in valid]
classes = [classes[i] for i in valid]
cam_mat = self.calib[basename]
cam = cam_utils.Camera(
basename, cam_mat['A'], cam_mat['R'], cam_mat['T'], self.shape[0], self.shape[1])
# indices of the boxes to keep
keep, __ = misc_utils.putting_objects_in_perspective(cam, boxes, min_height=min_height)
boxes = boxes[keep, :]
segms = [segms[i] for i in keep]
classes = [classes[i] for i in keep]
valid_nms = nms(boxes.astype(np.float32), nms_thresh)
boxes = boxes[valid_nms, :]
segms = [segms[i] for i in valid_nms]
classes = [classes[i] for i in valid_nms]
areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
valid_area = (areas > min_area).nonzero()[0]
boxes = boxes[valid_area, :]
segms = [segms[i] for i in valid_area]
classes = [classes[i] for i in valid_area]
return boxes, segms, keyps, classes
def get_boxes_from_detectron(self, score_thresh=0.9, nms_thresh=0.5, min_height=0.0, min_area=200):
for i, basename in enumerate(tqdm(self.frame_basenames)):
boxes, segms, keyps, classes = self.refine_detectron(basename, score_thresh=score_thresh,
nms_thresh=nms_thresh, min_height=min_height,
min_area=min_area)
self.bbox[basename] = boxes
| [
"numpy.uint8",
"cv2.rectangle",
"numpy.array",
"cv2.destroyAllWindows",
"utils.draw.draw_skeleton_on_image",
"utils.files.extract_basename",
"os.path.exists",
"numpy.mean",
"os.listdir",
"utils.misc.lift_box_in_3d",
"cv2.VideoWriter",
"os.mkdir",
"cv2.VideoWriter_fourcc",
"glog.info",
"m... | [((2124, 2160), 'os.path.join', 'join', (['path_to_dataset', '"""youtube.txt"""'], {}), "(path_to_dataset, 'youtube.txt')\n", (2128, 2160), False, 'from os.path import isfile, join, exists\n'), ((2620, 2636), 'os.path.exists', 'exists', (['filename'], {}), '(filename)\n', (2626, 2636), False, 'from os.path import isfile, join, exists\n'), ((2914, 2963), 'os.path.join', 'join', (['self.path_to_dataset', '"""metadata"""', '"""calib.p"""'], {}), "(self.path_to_dataset, 'metadata', 'calib.p')\n", (2918, 2963), False, 'from os.path import isfile, join, exists\n'), ((3041, 3090), 'os.path.join', 'join', (['self.path_to_dataset', '"""metadata"""', '"""poses.p"""'], {}), "(self.path_to_dataset, 'metadata', 'poses.p')\n", (3045, 3090), False, 'from os.path import isfile, join, exists\n'), ((3172, 3225), 'os.path.join', 'join', (['self.path_to_dataset', '"""metadata"""', '"""detectron.p"""'], {}), "(self.path_to_dataset, 'metadata', 'detectron.p')\n", (3176, 3225), False, 'from os.path import isfile, join, exists\n'), ((3387, 3489), 'utils.io.imread', 'io.imread', (['self.frame_fullnames[frame_number]'], {'dtype': 'dtype', 'sfactor': 'sfactor', 'image_type': 'image_type'}), '(self.frame_fullnames[frame_number], dtype=dtype, sfactor=sfactor,\n image_type=image_type)\n', (3396, 3489), True, 'import utils.io as io\n'), ((3771, 3820), 'os.path.join', 'join', (['self.path_to_dataset', '"""metadata"""', '"""calib.p"""'], {}), "(self.path_to_dataset, 'metadata', 'calib.p')\n", (3775, 3820), False, 'from os.path import isfile, join, exists\n'), ((3832, 3850), 'os.path.exists', 'exists', (['calib_file'], {}), '(calib_file)\n', (3838, 3850), False, 'from os.path import isfile, join, exists\n'), ((6982, 7031), 'os.path.join', 'join', (['self.path_to_dataset', '"""metadata"""', '"""poses.p"""'], {}), "(self.path_to_dataset, 'metadata', 'poses.p')\n", (6986, 7031), False, 'from os.path import isfile, join, exists\n'), ((10737, 10770), 'os.path.join', 'join', (['self.path_to_dataset', '"""tmp"""'], {}), "(self.path_to_dataset, 'tmp')\n", (10741, 10770), False, 'from os.path import isfile, join, exists\n'), ((11019, 11030), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (11028, 11030), False, 'import os\n'), ((11039, 11061), 'os.chdir', 'os.chdir', (['openpose_dir'], {}), '(openpose_dir)\n', (11047, 11061), False, 'import os\n'), ((11440, 11458), 'os.system', 'os.system', (['command'], {}), '(command)\n', (11449, 11458), False, 'import os\n'), ((11467, 11480), 'os.chdir', 'os.chdir', (['cwd'], {}), '(cwd)\n', (11475, 11480), False, 'import os\n'), ((15179, 15210), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'mp4v'"], {}), "(*'mp4v')\n", (15201, 15210), False, 'import cv2\n'), ((15323, 15417), 'cv2.VideoWriter', 'cv2.VideoWriter', (['out_file', 'fourcc', '(25.0)', '(self.shape[1] // scale, self.shape[0] // scale)'], {}), '(out_file, fourcc, 25.0, (self.shape[1] // scale, self.shape\n [0] // scale))\n', (15338, 15417), False, 'import cv2\n'), ((15499, 15528), 'matplotlib.cm.get_cmap', 'matplotlib.cm.get_cmap', (['"""hsv"""'], {}), "('hsv')\n", (15521, 15528), False, 'import matplotlib\n'), ((18333, 18356), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (18354, 18356), False, 'import cv2\n'), ((18398, 18430), 'glog.info', 'glog.info', (['"""Gathering Detectron"""'], {}), "('Gathering Detectron')\n", (18407, 18430), False, 'import glog\n'), ((18584, 18637), 'os.path.join', 'join', (['self.path_to_dataset', '"""metadata"""', '"""detectron.p"""'], {}), "(self.path_to_dataset, 'metadata', 'detectron.p')\n", (18588, 18637), False, 'from os.path import isfile, join, exists\n'), ((18649, 18671), 'os.path.exists', 'exists', (['detectron_file'], {}), '(detectron_file)\n', (18655, 18671), False, 'from os.path import isfile, join, exists\n'), ((19557, 19583), 'numpy.zeros', 'np.zeros', (['(self.n_frames,)'], {}), '((self.n_frames,))\n', (19565, 19583), True, 'import numpy as np\n'), ((21592, 21614), 'numpy.zeros', 'np.zeros', (['(n_boxes, 3)'], {}), '((n_boxes, 3))\n', (21600, 21614), True, 'import numpy as np\n'), ((22460, 22563), 'utils.camera.Camera', 'cam_utils.Camera', (['basename', "cam_mat['A']", "cam_mat['R']", "cam_mat['T']", 'self.shape[0]', 'self.shape[1]'], {}), "(basename, cam_mat['A'], cam_mat['R'], cam_mat['T'], self.\n shape[0], self.shape[1])\n", (22476, 22563), True, 'import utils.camera as cam_utils\n'), ((22631, 22707), 'utils.misc.putting_objects_in_perspective', 'misc_utils.putting_objects_in_perspective', (['cam', 'boxes'], {'min_height': 'min_height'}), '(cam, boxes, min_height=min_height)\n', (22672, 22707), True, 'import utils.misc as misc_utils\n'), ((1006, 1040), 'os.path.join', 'join', (['path_to_dataset', '"""images"""', 'f'], {}), "(path_to_dataset, 'images', f)\n", (1010, 1040), False, 'from os.path import isfile, join, exists\n'), ((2176, 2192), 'os.path.exists', 'exists', (['txt_file'], {}), '(txt_file)\n', (2182, 2192), False, 'from os.path import isfile, join, exists\n'), ((2206, 2258), 'numpy.savetxt', 'np.savetxt', (['txt_file', 'self.frame_fullnames'], {'fmt': '"""%s"""'}), "(txt_file, self.frame_fullnames, fmt='%s')\n", (2216, 2258), True, 'import numpy as np\n'), ((7043, 7067), 'os.path.exists', 'exists', (['pose_file_coarse'], {}), '(pose_file_coarse)\n', (7049, 7067), False, 'from os.path import isfile, join, exists\n'), ((7399, 7432), 'os.path.join', 'join', (['self.path_to_dataset', '"""tmp"""'], {}), "(self.path_to_dataset, 'tmp')\n", (7403, 7432), False, 'from os.path import isfile, join, exists\n'), ((10786, 10801), 'os.path.exists', 'exists', (['tmp_dir'], {}), '(tmp_dir)\n', (10792, 10801), False, 'from os.path import isfile, join, exists\n'), ((10815, 10832), 'os.mkdir', 'os.mkdir', (['tmp_dir'], {}), '(tmp_dir)\n', (10823, 10832), False, 'import os\n'), ((11385, 11421), 'os.path.join', 'join', (['self.path_to_dataset', '"""images"""'], {}), "(self.path_to_dataset, 'images')\n", (11389, 11421), False, 'from os.path import isfile, join, exists\n'), ((11561, 11587), 'tqdm.tqdm', 'tqdm', (['self.frame_basenames'], {}), '(self.frame_basenames)\n', (11565, 11587), False, 'from tqdm import tqdm\n'), ((12526, 12552), 'tqdm.tqdm', 'tqdm', (['self.frame_basenames'], {}), '(self.frame_basenames)\n', (12530, 12552), False, 'from tqdm import tqdm\n'), ((13541, 13559), 'numpy.array', 'np.array', (['root_box'], {}), '(root_box)\n', (13549, 13559), True, 'import numpy as np\n'), ((13964, 14067), 'utils.camera.Camera', 'cam_utils.Camera', (['basename', "cam_mat['A']", "cam_mat['R']", "cam_mat['T']", 'self.shape[0]', 'self.shape[1]'], {}), "(basename, cam_mat['A'], cam_mat['R'], cam_mat['T'], self.\n shape[0], self.shape[1])\n", (13980, 14067), True, 'import utils.camera as cam_utils\n'), ((15658, 15684), 'tqdm.tqdm', 'tqdm', (['self.frame_basenames'], {}), '(self.frame_basenames)\n', (15662, 15684), False, 'from tqdm import tqdm\n'), ((18134, 18199), 'cv2.resize', 'cv2.resize', (['img', '(self.shape[1] // scale, self.shape[0] // scale)'], {}), '(img, (self.shape[1] // scale, self.shape[0] // scale))\n', (18144, 18199), False, 'import cv2\n'), ((20408, 20511), 'utils.camera.Camera', 'cam_utils.Camera', (['basename', "cam_mat['A']", "cam_mat['R']", "cam_mat['T']", 'self.shape[0]', 'self.shape[1]'], {}), "(basename, cam_mat['A'], cam_mat['R'], cam_mat['T'], self.\n shape[0], self.shape[1])\n", (20424, 20511), True, 'import utils.camera as cam_utils\n'), ((20545, 20581), 'utils.misc.lift_box_in_3d', 'misc_utils.lift_box_in_3d', (['cam', 'bbox'], {}), '(cam, bbox)\n', (20570, 20581), True, 'import utils.misc as misc_utils\n'), ((20917, 20943), 'tqdm.tqdm', 'tqdm', (['self.frame_basenames'], {}), '(self.frame_basenames)\n', (20921, 20943), False, 'from tqdm import tqdm\n'), ((21719, 21745), 'pycocotools.mask.decode', 'mask_util.decode', (['segms[i]'], {}), '(segms[i])\n', (21735, 21745), True, 'import pycocotools.mask as mask_util\n'), ((21870, 21891), 'numpy.mean', 'np.mean', (['crop'], {'axis': '(0)'}), '(crop, axis=0)\n', (21877, 21891), True, 'import numpy as np\n'), ((23471, 23497), 'tqdm.tqdm', 'tqdm', (['self.frame_basenames'], {}), '(self.frame_basenames)\n', (23475, 23497), False, 'from tqdm import tqdm\n'), ((2282, 2315), 'os.path.join', 'join', (['path_to_dataset', '"""metadata"""'], {}), "(path_to_dataset, 'metadata')\n", (2286, 2315), False, 'from os.path import isfile, join, exists\n'), ((2339, 2372), 'os.path.join', 'join', (['path_to_dataset', '"""metadata"""'], {}), "(path_to_dataset, 'metadata')\n", (2343, 2372), False, 'from os.path import isfile, join, exists\n'), ((2786, 2802), 'os.path.exists', 'exists', (['filename'], {}), '(filename)\n', (2792, 2802), False, 'from os.path import isfile, join, exists\n'), ((3653, 3688), 'os.path.join', 'join', (['self.path_to_dataset', '"""calib"""'], {}), "(self.path_to_dataset, 'calib')\n", (3657, 3688), False, 'from os.path import isfile, join, exists\n'), ((3712, 3747), 'os.path.join', 'join', (['self.path_to_dataset', '"""calib"""'], {}), "(self.path_to_dataset, 'calib')\n", (3716, 3747), False, 'from os.path import isfile, join, exists\n'), ((4007, 4021), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4018, 4021), False, 'import pickle\n'), ((4358, 4378), 'os.path.exists', 'exists', (['manual_calib'], {}), '(manual_calib)\n', (4364, 4378), False, 'from os.path import isfile, join, exists\n'), ((6235, 6261), 'tqdm.tqdm', 'tqdm', (['self.frame_basenames'], {}), '(self.frame_basenames)\n', (6239, 6261), False, 'from tqdm import tqdm\n'), ((6613, 6639), 'pickle.dump', 'pickle.dump', (['self.calib', 'f'], {}), '(self.calib, f)\n', (6624, 6639), False, 'import pickle\n'), ((7247, 7261), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (7258, 7261), False, 'import pickle\n'), ((7452, 7467), 'os.path.exists', 'exists', (['tmp_dir'], {}), '(tmp_dir)\n', (7458, 7467), False, 'from os.path import isfile, join, exists\n'), ((7485, 7502), 'os.mkdir', 'os.mkdir', (['tmp_dir'], {}), '(tmp_dir)\n', (7493, 7502), False, 'import os\n'), ((7545, 7571), 'tqdm.tqdm', 'tqdm', (['self.frame_basenames'], {}), '(self.frame_basenames)\n', (7549, 7571), False, 'from tqdm import tqdm\n'), ((8584, 8595), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (8593, 8595), False, 'import os\n'), ((8612, 8634), 'os.chdir', 'os.chdir', (['openpose_dir'], {}), '(openpose_dir)\n', (8620, 8634), False, 'import os\n'), ((9114, 9132), 'os.system', 'os.system', (['command'], {}), '(command)\n', (9123, 9132), False, 'import os\n'), ((9149, 9162), 'os.chdir', 'os.chdir', (['cwd'], {}), '(cwd)\n', (9157, 9162), False, 'import os\n'), ((10468, 10494), 'pickle.dump', 'pickle.dump', (['self.poses', 'f'], {}), '(self.poses, f)\n', (10479, 10494), False, 'import pickle\n'), ((10910, 10929), 'os.listdir', 'os.listdir', (['tmp_dir'], {}), '(tmp_dir)\n', (10920, 10929), False, 'import os\n'), ((10986, 11002), 'os.path.join', 'join', (['tmp_dir', 'f'], {}), '(tmp_dir, f)\n', (10990, 11002), False, 'from os.path import isfile, join, exists\n'), ((11757, 11777), 'json.load', 'json.load', (['data_file'], {}), '(data_file)\n', (11766, 11777), False, 'import json\n'), ((12840, 12867), 'numpy.sum', 'np.sum', (['keypoints[valid, 2]'], {}), '(keypoints[valid, 2])\n', (12846, 12867), True, 'import numpy as np\n'), ((14143, 14190), 'utils.misc.lift_keypoints_in_3d', 'misc_utils.lift_keypoints_in_3d', (['cam', 'poses[ii]'], {}), '(cam, poses[ii])\n', (14174, 14190), True, 'import utils.misc as misc_utils\n'), ((14506, 14536), 'utils.files.extract_basename', 'file_utils.extract_basename', (['f'], {}), '(f)\n', (14533, 14536), True, 'import utils.files as file_utils\n'), ((15591, 15618), 'numpy.unique', 'np.unique', (['mot_tracks[:, 1]'], {}), '(mot_tracks[:, 1])\n', (15600, 15618), True, 'import numpy as np\n'), ((15860, 15932), 'utils.draw.draw_skeleton_on_image', 'draw_utils.draw_skeleton_on_image', (['img', 'poses', 'cmap'], {'one_color': 'one_color'}), '(img, poses, cmap, one_color=one_color)\n', (15893, 15932), True, 'import utils.draw as draw_utils\n'), ((16015, 16154), 'utils.camera.Camera', 'cam_utils.Camera', (['"""tmp"""', "self.calib[basename]['A']", "self.calib[basename]['R']", "self.calib[basename]['T']", 'self.shape[0]', 'self.shape[1]'], {}), "('tmp', self.calib[basename]['A'], self.calib[basename]['R'\n ], self.calib[basename]['T'], self.shape[0], self.shape[1])\n", (16031, 16154), True, 'import utils.camera as cam_utils\n'), ((16221, 16247), 'utils.draw.draw_field', 'draw_utils.draw_field', (['cam'], {}), '(cam)\n', (16242, 16247), True, 'import utils.draw as draw_utils\n'), ((18222, 18252), 'numpy.uint8', 'np.uint8', (['img[:, :, (2, 1, 0)]'], {}), '(img[:, :, (2, 1, 0)])\n', (18230, 18252), True, 'import numpy as np\n'), ((18454, 18493), 'os.path.join', 'join', (['self.path_to_dataset', '"""detectron"""'], {}), "(self.path_to_dataset, 'detectron')\n", (18458, 18493), False, 'from os.path import isfile, join, exists\n'), ((18517, 18556), 'os.path.join', 'join', (['self.path_to_dataset', '"""detectron"""'], {}), "(self.path_to_dataset, 'detectron')\n", (18521, 18556), False, 'from os.path import isfile, join, exists\n'), ((18840, 18854), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (18851, 18854), False, 'import pickle\n'), ((18912, 18938), 'tqdm.tqdm', 'tqdm', (['self.frame_basenames'], {}), '(self.frame_basenames)\n', (18916, 18938), False, 'from tqdm import tqdm\n'), ((19460, 19490), 'pickle.dump', 'pickle.dump', (['self.detectron', 'f'], {}), '(self.detectron, f)\n', (19471, 19490), False, 'import pickle\n'), ((20719, 20807), 'os.path.join', 'join', (['self.path_to_dataset', '"""detectron"""', "(self.frame_basenames[frame_number] + '.png')"], {}), "(self.path_to_dataset, 'detectron', self.frame_basenames[frame_number] +\n '.png')\n", (20723, 20807), False, 'from os.path import isfile, join, exists\n'), ((819, 850), 'os.path.join', 'join', (['path_to_dataset', '"""images"""'], {}), "(path_to_dataset, 'images')\n", (823, 850), False, 'from os.path import isfile, join, exists\n'), ((2718, 2732), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2729, 2732), False, 'import pickle\n'), ((2817, 2854), 'utils.files.extract_basename', 'file_utils.extract_basename', (['filename'], {}), '(filename)\n', (2844, 2854), True, 'import utils.files as file_utils\n'), ((4680, 4728), 'soccer.calibration.calibrate_by_click', 'calibration.calibrate_by_click', (['img', 'coarse_mask'], {}), '(img, coarse_mask)\n', (4710, 4728), False, 'from soccer import calibration\n'), ((4780, 4820), 'glog.error', 'glog.error', (['"""Manual calibration failed!"""'], {}), "('Manual calibration failed!')\n", (4790, 4820), False, 'import glog\n'), ((6095, 6130), 'os.path.join', 'join', (['self.path_to_dataset', '"""calib"""'], {}), "(self.path_to_dataset, 'calib')\n", (6099, 6130), False, 'from os.path import isfile, join, exists\n'), ((19126, 19150), 'yaml.unsafe_load', 'yaml.unsafe_load', (['stream'], {}), '(stream)\n', (19142, 19150), False, 'import yaml\n'), ((894, 928), 'os.path.join', 'join', (['path_to_dataset', '"""images"""', 'f'], {}), "(path_to_dataset, 'images', f)\n", (898, 928), False, 'from os.path import isfile, join, exists\n'), ((4087, 4122), 'os.path.join', 'join', (['self.path_to_dataset', '"""calib"""'], {}), "(self.path_to_dataset, 'calib')\n", (4091, 4122), False, 'from os.path import isfile, join, exists\n'), ((5495, 5568), 'soccer.calibration.calibrate_from_initialization', 'calibration.calibrate_from_initialization', (['img', 'coarse_mask', 'A', 'R', 'T', 'vis'], {}), '(img, coarse_mask, A, R, T, vis)\n', (5536, 5568), False, 'from soccer import calibration\n'), ((7660, 7679), 'os.listdir', 'os.listdir', (['tmp_dir'], {}), '(tmp_dir)\n', (7670, 7679), False, 'import os\n'), ((7752, 7768), 'os.path.join', 'join', (['tmp_dir', 'f'], {}), '(tmp_dir, f)\n', (7756, 7768), False, 'from os.path import isfile, join, exists\n'), ((9695, 9715), 'json.load', 'json.load', (['data_file'], {}), '(data_file)\n', (9704, 9715), False, 'import json\n'), ((11640, 11673), 'os.path.join', 'join', (['self.path_to_dataset', '"""tmp"""'], {}), "(self.path_to_dataset, 'tmp')\n", (11644, 11673), False, 'from os.path import isfile, join, exists\n'), ((13478, 13515), 'numpy.sum', 'np.sum', (['poses[ii][valid_keypoints, 2]'], {}), '(poses[ii][valid_keypoints, 2])\n', (13484, 13515), True, 'import numpy as np\n'), ((16868, 16900), 'numpy.zeros', 'np.zeros', (['(0, 4)'], {'dtype': 'np.int32'}), '((0, 4), dtype=np.int32)\n', (16876, 16900), True, 'import numpy as np\n'), ((16969, 17060), 'cv2.rectangle', 'cv2.rectangle', (['img', '(bbox[j, 0], bbox[j, 1])', '(bbox[j, 2], bbox[j, 3])', '(255, 0, 0)', '(10)'], {}), '(img, (bbox[j, 0], bbox[j, 1]), (bbox[j, 2], bbox[j, 3]), (255,\n 0, 0), 10)\n', (16982, 17060), False, 'import cv2\n'), ((17158, 17250), 'cv2.rectangle', 'cv2.rectangle', (['img', '(ball[j, 0], ball[j, 1])', '(ball[j, 2], ball[j, 3])', '(0, 255, 0)', '(10)'], {}), '(img, (ball[j, 0], ball[j, 1]), (ball[j, 2], ball[j, 3]), (0, \n 255, 0), 10)\n', (17171, 17250), False, 'import cv2\n'), ((4412, 4433), 'numpy.load', 'np.load', (['manual_calib'], {}), '(manual_calib)\n', (4419, 4433), True, 'import numpy as np\n'), ((12056, 12109), 'numpy.array', 'np.array', (["data_json['people'][k]['pose_keypoints_2d']"], {}), "(data_json['people'][k]['pose_keypoints_2d'])\n", (12064, 12109), True, 'import numpy as np\n'), ((16309, 16342), 'numpy.ones', 'np.ones', (['(15, 15)'], {'dtype': 'np.uint8'}), '((15, 15), dtype=np.uint8)\n', (16316, 16342), True, 'import numpy as np\n'), ((16525, 16546), 'numpy.zeros_like', 'np.zeros_like', (['canvas'], {}), '(canvas)\n', (16538, 16546), True, 'import numpy as np\n'), ((16548, 16569), 'numpy.zeros_like', 'np.zeros_like', (['canvas'], {}), '(canvas)\n', (16561, 16569), True, 'import numpy as np\n'), ((18085, 18114), 'numpy.dstack', 'np.dstack', (['(mask, mask, mask)'], {}), '((mask, mask, mask))\n', (18094, 18114), True, 'import numpy as np\n'), ((8044, 8071), 'numpy.minimum', 'np.minimum', (['(x1 - pad)', '(w - 1)'], {}), '(x1 - pad, w - 1)\n', (8054, 8071), True, 'import numpy as np\n'), ((8118, 8145), 'numpy.minimum', 'np.minimum', (['(y1 - pad)', '(h - 1)'], {}), '(y1 - pad, h - 1)\n', (8128, 8145), True, 'import numpy as np\n'), ((8195, 8222), 'numpy.minimum', 'np.minimum', (['(x2 + pad)', '(w - 1)'], {}), '(x2 + pad, w - 1)\n', (8205, 8222), True, 'import numpy as np\n'), ((8269, 8296), 'numpy.minimum', 'np.minimum', (['(y2 + pad)', '(h - 1)'], {}), '(y2 + pad, h - 1)\n', (8279, 8296), True, 'import numpy as np\n'), ((9332, 9359), 'numpy.minimum', 'np.minimum', (['(x1 - pad)', '(w - 1)'], {}), '(x1 - pad, w - 1)\n', (9342, 9359), True, 'import numpy as np\n'), ((9406, 9433), 'numpy.minimum', 'np.minimum', (['(y1 - pad)', '(h - 1)'], {}), '(y1 - pad, h - 1)\n', (9416, 9433), True, 'import numpy as np\n'), ((9475, 9508), 'os.path.join', 'join', (['self.path_to_dataset', '"""tmp"""'], {}), "(self.path_to_dataset, 'tmp')\n", (9479, 9508), False, 'from os.path import isfile, join, exists\n'), ((10094, 10147), 'numpy.array', 'np.array', (["data_json['people'][k]['pose_keypoints_2d']"], {}), "(data_json['people'][k]['pose_keypoints_2d'])\n", (10102, 10147), True, 'import numpy as np\n')] |
import sys
import numpy as np
from .astropy_search.matching import search_around_sky
from .graph import GraphDataStructure
from astropy.coordinates import SkyCoord, Angle
from itertools import accumulate, chain, product
import datetime
from functools import partial
import multiprocessing
from busypal import busy, BusyPal, session
import pandas as pd
from collections import Counter
import colored as cl
# TODO: MPI - almost there in `skylink.py`!!
# FIXME: progressbars and busy indicators (in particular the ones that use decorators) still show up with `silent=True`
# Complex Network Analysis: The Need for Speed (Benchmark Paper)
# http://m3nets.de/publications/CCC2016d.pdf
# networkit parallelism
# nk.setNumberOfThreads(8) # set the maximum number of available threads
# nk.getMaxNumberOfThreads() # see maximum number of available threads
# nk.getCurrentNumberOfThreads() # the number of threads currently executing
# try with 100 or smaller - sometimes (rarely) it hangs eternally!
__all__ = ['fastmatch']
def update_labels(items, labels): # stackoverflow... find the link!
i_dict, l_dict, ranks = {}, {}, {}
for i in range(len(items)):
label = i_dict.setdefault(items[i], labels[i])
if labels[i] not in ranks:
ranks[labels[i]] = i
if label != labels[i]:
label1 = label
label2 = labels[i]
while label1 is not None and label2 is not None:
if ranks[label1] > ranks[label2]:
tmp = l_dict.get(label1)
l_dict[label1] = label2
label1 = tmp
elif ranks[label1] < ranks[label2]:
tmp = l_dict.get(label2)
l_dict[label2] = label1
label2 = tmp
else:
break
labels[i] = label
for i in range(len(labels)):
val = 0
label = labels[i]
while val != -1:
val = l_dict.get(label, -1)
if val != -1:
label = val
if label != labels[i]:
labels[i] = label
return labels
# TODO: also make a function to stitch overlapping patches with pre-calculated group_ids in general
def stitch_group_ids(items, labels, graph_lib='networkit', verbose=True, num_threads=None):
labels_max = labels.max() # max() was slower!
items = items+labels_max+1 # we somehow mark them since they need to be removed from clustered integers later on #+labels_max+1 #map(str, items)
edges = zip(items,labels)
nnodes = items.max()+labels_max+1 if graph_lib=='networkit' else None # not needed otherwise
gds = GraphDataStructure(graph_lib, num_threads=num_threads) # num_threads=4 # for networkit only
graph = gds.build_graph_from_edges(edges, verbose=verbose, nnodes=nnodes)
del edges
cc = find_clusters(graph, graph_lib=graph_lib, verbose=False) #verbose)
a = list(map(list, (x for x in cc if len(x)>2))) #list(map(sorted, A))
del cc
a = [[w for w in x if w<=labels_max] for x in a] # slowest!! #[[w for w in x if not isinstance(w, str)] for x in a]
b = [[x[0]]*(len(x)-1) for x in a]
for x in a:
del x[0]
a = list(chain(*a)) # orders of mag faster than np.concatenate(a) in this case with lists
b = list(chain(*b))
mapping = np.arange(0,labels_max+1)
mapping[a] = b
new_labels = mapping[labels]
return new_labels
## slow:
# def stitch_group_ids(items, labels, graph_lib='networkx', verbose=True, num_threads=None, nnodes=None):
# items = map(str, items)
# edges = zip(items,labels)
# # G=nx.Graph(edges)
# # cc = nx.connected_components(G)
# gds = GraphDataStructure(graph_lib, num_threads=num_threads) # num_threads=4 # for networkit only
# graph = gds.build_graph_from_edges(edges, verbose=verbose, nnodes=nnodes)
# cc = find_clusters(graph, graph_lib=graph_lib, verbose=verbose)
# a = (x for x in cc if len(x)>2)
# a = map(list, a)
# a = [[w for w in x if not isinstance(w, str)] for x in a]
# b = [[x[0]]*(len(x)-1) for x in a]
# for x in a:
# del x[0]
# a = list(chain(*a))
# b = list(chain(*b))
# mapping = np.arange(0,max(labels)+1)
# mapping[a] = b
# new_labels = mapping[labels]
# return new_labels
# https://python.hotexamples.com/examples/astropy.coordinates/SkyCoord/to_pixel/python-skycoord-to_pixel-method-examples.html
def radec2xy(coords, ra_unit='deg', dec_unit='deg', wcs=None, mode='all', pixel_scale=1):
"""Project a catalog onto the image plane.
The default is the tangent image plane, but any WCS transformation
can be used.
Parameters
----------
cat : astropy SkyCoord, Quantity, or Angle
The coordinate list. If a `Quantity` or `Angle`, `cat` must be
a 2xN array: (lat, long).
wcs : astropy.wcs.WCS, optional
The world coordinate system object for transformation. The
default assumes the tangent plane centered on the latitude
and longitude of the catalog.
mode : string, optional
The projection mode for `SkyCoord` objects: 'wcs' or 'all'. See
`SkyCoord.to_pixel`.
Returns
-------
flat_cat : ndarray
A 2xN array of pixel positions, (x, y).
zero-indexed pixel coord because of (0,0)
"""
# https://github.com/astropy/astropy/issues/2847
from astropy.wcs import WCS
from astropy.coordinates import SkyCoord
if not isinstance(coords, SkyCoord):
assert len(coords) == 2
coords = SkyCoord(ra=coords[0], dec=coords[1], unit=(ra_unit,dec_unit))
# - create the projection without having to deal with FITS files
if wcs is None:
wcs = WCS(naxis=2)
wcs.wcs.crpix = (0, 0) # pixel coordinate of the reference point --> the projection is centered at 0, 0
ra_tan = (coords.ra.max() - coords.ra.min()) / 2.0 + coords.ra.min()
dec_tan = (coords.dec.max() - coords.dec.min()) / 2.0 + coords.dec.min()
wcs.wcs.crval = (ra_tan.value, dec_tan.value) # coordinate value at reference point
wcs.wcs.ctype = ["RA---TAN", "DEC--TAN"] # axis type
wcs.wcs.cdelt = [pixel_scale/3600., pixel_scale/3600.] # coordinate increment > for pixel_scale=1, The pixel scale is 1"/pixel, this outputs values in arcsecond offset
x, y = coords.to_pixel(wcs, mode=mode)
# x, y = wcs.wcs_world2pix(coords.ra,coords.dec,0)
if (~np.isfinite(x)).sum()!=0:
raise ValueError('We encountered one or more non-finite values in the projected coordinates.')
if (~np.isfinite(x)).sum()==len(x):
raise ValueError('The projection led to all non-finite pixel coordinates, probably because it did not converge. Make sure your ra and dec ranges are valid (0<ra<360 and -45<dec<45 in degrees) and gnomic-projectable onto a plane. Also see https://docs.astropy.org/en/stable/_modules/astropy/wcs/wcs.html')
return x, y #np.vstack((x, y))
# tqdm automatically switches to the text-based
# progress bar if not running in Jupyter
try: # https://github.com/tqdm/tqdm/issues/506
ipy_str = str(type(get_ipython()))
if 'zmqshell' in ipy_str: # jupyter
from tqdm.notebook import tqdm
# # https://github.com/bstriner/keras-tqdm/issues/21
# # this removes the vertical whitespace that remains when tqdm progressbar disappears
# from IPython.core.display import HTML
# HTML("""
# <style>
# .p-Widget.jp-OutputPrompt.jp-OutputArea-prompt:empty {
# padding: 0;
# border: 0;
# }
# </style>
# """)
if 'terminal' in ipy_str: # ipython
from tqdm import tqdm
except: # terminal
if sys.stderr.isatty():
from tqdm import tqdm
else:
def tqdm(iterable, **kwargs):
return iterable
# def make_graph(coords=None, coords1=None,coords2=None,coords1_idxshift=None,coords2_idxshift=None,storekdtree=None,job_num=0,linking_length=None,graph_lib='igraph', verbose=1, show_progress=True, silent=False, tqdm_kwargs={}):
# # coord1 should not be partial!!
# # coord2 can be partial
# # !!! good thing about networkx: does not make unnecessary isolated ---> faster performance at least in the serial mode
# # graph_lib: 'igraph', 'igraph', 'networkit'
# # networkx does not make unnecessary isolated nodes, the other two do
# # so networkx is better for when we expect many isolated coords in a huge dataset
# # however, networkx is purely in python, the other two are in c and networkit has the added benefit of openMP!
# # igraph and networkx add the edges at onece but networkit needs to use a loop to add them one by one
# # (networkit's c and R version has addlist that does it at once but couldn't find the same functionality in tehir python version)
# # check the new versions of these libraries to see if they added more capabilities
# # if coords1==coords2: print('same....')
# t0 = datetime.datetime.now()
# tqdm_kwargs['position'] = job_num
# if silent:
# verbose=0
# # if job_num!=0:
# # verbose=False
# # silent=True
# idx1, idx2 = search_around_sky(coords1, coords2, Angle(f'{linking_length}s'), storekdtree=storekdtree, verbose=verbose, show_progress=show_progress, silent=silent, tqdm_kwargs=tqdm_kwargs) #coords1.search_around_sky(coords2, Angle(f'{linking_length}s'))
# if verbose:
# print(f'kdtree done in about {str(datetime.timedelta(seconds=round((datetime.datetime.now()-t0).seconds)))} hms.')
# idx1 += coords1_idxshift
# idx2 += coords2_idxshift
# graph_edges = set((a,b) if a<b else (b,a) for a,b in zip(idx1, idx2) if a!=b) # delete duplicates and 1:1 singles (It is important that a<b is enforced for networkit nnodes finder to work)
# # gds = GraphDataStructure(graph_lib) # num_threads=4 # for networkit only
# # graph = gds.build_graph_from_edges(graph_edges, verbose=verbose)
# return graph_edges
def get_group_ids(coords=None, coords1=None,coords2=None, coords_idxshift=None, coords1_idxshift=None, coords2_idxshift=None, overidx=None, overidx1=None, overidx2=None, storekdtree=None,job_num=0,linking_length=None,graph_lib='igraph', num_threads=None, num_objects=None, verbose=1, show_progress=True, silent=False, tqdm_kwargs={}):
# coord1 should not be partial!!
# coord2 can be partial
# !!! good thing about networkx: does not make unnecessary isolated ---> faster performance at least in the serial mode
# graph_lib: 'igraph', 'igraph', 'networkit'
# networkx does not make unnecessary isolated nodes, the other two do
# so networkx is better for when we expect many isolated coords in a huge dataset
# however, networkx is purely in python, the other two are in c and networkit has the added benefit of openMP!
# igraph and networkx add the edges at onece but networkit needs to use a loop to add them one by one
# (networkit's c and R version has addlist that does it at once but couldn't find the same functionality in tehir python version)
# check the new versions of these libraries to see if they added more capabilities
# if coords1==coords2: print('same....')
t0 = datetime.datetime.now()
if job_num==-1:
job_num = 0
parallel = False
else:
parallel = True
tqdm_kwargs['position'] = job_num
# if job_num!=0:
# verbose=False
# silent=True
if job_num != 0:
silent = True
skip_busypal = -1 if show_progress else 1
if silent:
verbose=0
skip_busypal = 2
disable_tqdm = True
else:
disable_tqdm = False
# print('session.viewedonscreen()',session.viewedonscreen())
idx1, idx2 = search_around_sky(coords=coords, coords1=coords1, coords2=coords2, seplimit=Angle(f'{linking_length}s'), storekdtree=storekdtree, verbose=verbose, show_progress=show_progress, silent=silent, tqdm_kwargs=tqdm_kwargs) #coords1.search_around_sky(coords2, Angle(f'{linking_length}s'))
if verbose:
pass
# print(f'kdtree done in about {str(datetime.timedelta(seconds=round((datetime.datetime.now()-t0).seconds)))} hms.')
# multoprocessing has barrier as well:
# 'waiting for all processes to catch up'
# idx1 += coords1_idxshift
# idx2 += coords2_idxshift
graph_edges = set((a,b) if a<b else (b,a) for a,b in zip(idx1, idx2) if a!=b) # delete duplicates and 1:1 singles (It is important that a<b is enforced for networkit nnodes finder to work)
# gds = GraphDataStructure(graph_lib) # num_threads=4 # for networkit only
# graph = gds.build_graph_from_edges(graph_edges, verbose=verbose)
num_objects_chunk = len(coords) if coords is not None else len(coords1)+len(coord2)
with BusyPal('Building the representative graph/network', style={'id':6,'color':'sandy_brown'}, fmt='{spinner} {message}', skip=skip_busypal):
gds = GraphDataStructure(graph_lib, num_threads=num_threads) # threads are for networkit only
#with BusyPal(f'Building the graph using the {graph_lib} library'):
final_graph = gds.build_graph_from_edges(graph_edges, verbose=False, nnodes=num_objects_chunk)
clusters = find_clusters(final_graph, graph_lib=graph_lib, verbose=False)
nclusters = len(clusters) #max(chain.from_iterable(seq))
starting_id = (job_num+2)*num_objects+coords_idxshift # make them very far from each other (absolutely no chance of a conflict)
group_ids = np.arange(starting_id, starting_id+num_objects_chunk)
linked_mask = np.zeros(num_objects_chunk, dtype=bool)
del tqdm_kwargs['desc']
if overidx is not None:
overidx = set(overidx) # makes the lookups very fast!
for idx, cluster in enumerate(tqdm(clusters, total=nclusters, desc='Finding connected components of the '+'graphs and shared components' if parallel else 'graph', disable=disable_tqdm, **tqdm_kwargs)):
if any(gidx in overidx for gidx in cluster): # if any of the connected components has a foot on the overlap region that whole group should be involved in stitching later
# print('yes!')
linked_mask[cluster] = True
group_ids[cluster] = idx+coords_idxshift
# for galaxy_idx in cluster:
# group_ids[galaxy_idx] = idx+coords_idxshift
del clusters
if verbose:
print('\r\r'+cl.stylize('✔', cl.fg('green')+cl.attr('bold'))+' Assigned group ids for each chunk by using connected components of the '+'graphs' if parallel else 'by using connected components of the graph')
return group_ids, linked_mask
else: # it might be parallel but it is not using linked_mask
for idx, cluster in enumerate(tqdm(clusters, total=nclusters, desc='Finding connected components of the '+'graphs' if parallel else 'graph', disable=disable_tqdm, **tqdm_kwargs)):
group_ids[cluster] = idx+coords_idxshift
# for galaxy_idx in cluster:
# group_ids[galaxy_idx] = idx+coords_idxshift
del clusters
if verbose:
print('\r\r'+cl.stylize('✔', cl.fg('green')+cl.attr('bold'))+' Assigned group ids for each chunk' if parallel else ' Assigned group ids')
return group_ids
# idx_isolated = (group_ids==-1)
# with BusyPal('np.arange'):
# group_ids[idx_isolated] = np.arange(nclusters, nclusters+idx_isolated.sum())
# print('***',group_ids)
# return group_ids, linked_mask
# @busy('Clustering')
def find_clusters(graphs,graph_lib='igraph',verbose=True):
# graphs can be a list/tuple or just a single graph
t0 = datetime.datetime.now()
gds = GraphDataStructure(graph_lib) # num_threads=4 # for networkit only
# - merge and cluster (it does the merging internally if you pass a list or tuple of graphs)
clusters = gds.cluster(graphs,verbose=verbose)
if verbose:
print(f'clustering done in {str(datetime.timedelta(seconds=round((datetime.datetime.now()-t0).seconds)))} hms.')
# - remove isolated points
# t0 = datetime.datetime.now()
# clusters = list(filter(lambda x: len(x)>1, clusters))
# clusters = [sl for sl in clusters if len(sl)>1] # not as elegant
# print(f'removing isolated clusters for {graph_lib} generated cluster done in {str(datetime.timedelta(seconds=round((datetime.datetime.now()-t0).seconds)))} hms.')
return clusters
# # modified from https://stackoverflow.com/questions/56120273/quicker-way-to-implement-numpy-isin-followed-by-sum
# def fast_isin_int(A,a): # at least 2X faster than np.isin for integers (numpy might make it's np.isin ~10X faster in the future, watch its github)
# # suitable for arrays containing small integers like less than 1e7
# grid = np.zeros(max(np.max(A),np.max(a))+1, bool)
# grid[a] = True
# return grid[A]
@busy('Mosaicking data', style={'id':6,'color':'sandy_brown'}, fmt='{spinner} {message}') #TODO: make it paralllel!!! change plural masoiacs to mosaic -- since mosaic is plural by default!
def get_mosaic_sets(coords=None, coords1=None, coords2=None, linking_length=None, wcs=None, mode='all', nside_mosaics=None, njobs=None, overlap=1.0, use_linked_mask=False):
# nside_mosaics : resolution parameter
# print(coords1,coords2,coords)
# if coords1==coords2==None:
# print('ggg')
# assert (coords is not None and not (coords1==coords2==None))
# Note: x and y are projection of ra and dec onto the image plane (temporarilly made them for splitting purpose)
x, y = radec2xy(coords, wcs=wcs, mode=mode)
if nside_mosaics is None:
nside_mosaics = int(2*np.sqrt(njobs)) # a func of njobs somehow!! this way each job takes care of multiple mosaics
H, xedges, yedges = np.histogram2d(x, y, bins=nside_mosaics)
idx_filled_mosaics = np.where(H>0)
num_filled_mosaics = len(idx_filled_mosaics[0])
xbounds, ybounds, refidx_inside, refidx_overlap = [], [], [], []
for idx_x, idx_y in zip(*idx_filled_mosaics):
xbounds.append([xedges[idx_x], xedges[idx_x+1]])
ybounds.append([yedges[idx_y], yedges[idx_y+1]])
for xbound, ybound in zip(xbounds, ybounds):
# - create overlapping mosaics?
# if yes, technically 0.5*l (overlap=1) leads to one linking length overlap, but the user can choose to be more conservative
x0 = xbound[0]-linking_length*float(overlap)/2.0
x1 = xbound[1]+linking_length*float(overlap)/2.0
y0 = ybound[0]-linking_length*float(overlap)/2.0
y1 = ybound[1]+linking_length*float(overlap)/2.0
cx0 = x>=x0
cx1 = x<=x1 if x1==xedges[-1] else x<x1 # x < x1 is enough if overlapping_mosaics = False
cy0 = y>=y0
cy1 = y<=y1 if y1==yedges[-1] else y<y1 # y < y1 is enough if overlapping_mosaics = False
refidx_inside.append(np.where(cx0 & cx1 & cy0 & cy1)[0])
# idx_inside = np.where(cx0 & cx1 & cy0 & cy1) # integers not bools
#coords_mosaics.append(coords[idx_inside])
if use_linked_mask:
# find the internal bounds it makes with other overlapping mosaics (used for stitching the group id chunks later after concatenating results from different procs)
x0_p = xbound[0]+linking_length*float(overlap)/2.0
x1_p = xbound[1]-linking_length*float(overlap)/2.0
y0_p = ybound[0]+linking_length*float(overlap)/2.0
y1_p = ybound[1]-linking_length*float(overlap)/2.0
cx0_p = x<=x0_p #(x0<=x) & (x<=x0_p)
cx1_p = x1_p<=x #(x1_p<=x) & (x<=x1) if x1==xedges[-1] else (x1_p<=x) & (x<x1) # x < x1 is enough if overlapping_mosaics = False
cy0_p = y<=y0_p #(y0<=y) & (y<=y0_p)
cy1_p = y1_p<=y #(y1_p<=y) & (y<=y1) if y1==yedges[-1] else (y1_p<=y) & (y<y1) # y < y1 is enough if overlapping_mosaics = False
refidx_overlap.append(np.where( (cx0 & cx1 & cy0 & cy1) & (cx0_p | cx1_p | cy0_p | cy1_p) )[0])
idx_mosaics_for_chunks = np.array_split(range(num_filled_mosaics), njobs)
# chunks can consist of one or more mosaics which are assigned to each job
coords_chunks, refidx_chunks = [], []
overidx_chunks = [] if use_linked_mask else [None]*njobs
for idx_mosaics in idx_mosaics_for_chunks:
refidx_inside_unified = np.array([], dtype=np.int64)
if use_linked_mask:
refidx_overlap_unified = np.array([], dtype=np.int64)
for m in idx_mosaics:
refidx_inside_unified = np.append(refidx_inside_unified, refidx_inside[m])
if use_linked_mask:
refidx_overlap_unified = np.append(refidx_overlap_unified, refidx_overlap[m])
refidx_inside_unified = np.unique(refidx_inside_unified) # there might be some duplicates since little mosaics have overlaps
coords_chunks.append(coords[refidx_inside_unified])
refidx_chunks.append(refidx_inside_unified)
if use_linked_mask:
refidx_overlap_unified = np.unique(refidx_overlap_unified) # there might be some duplicates since little mosaics have overlaps
idx_overlap_unified = np.where(np.isin(refidx_inside_unified, refidx_overlap_unified))[0] # gives indices to the chunk array (not the ref catalog)
overidx_chunks.append(idx_overlap_unified)
# refidx_chunks # indices to the main catalog
if coords is not None:
return coords_chunks, refidx_chunks, overidx_chunks # refidx indices to the main catalog
else:
return coords1_chunks, coords2_chunks, refidx_chunks, overidx_chunks #..... FIXME!
def fastmatch(coords=None, coords1=None, coords2=None,linking_length=None, periodic_box_size=None,
reassign_group_indices=True, njobs=1, overlap=1.0, graph_lib='igraph',
num_threads=None, storekdtree=True, use_linked_mask=False,
verbose=1, show_progress=True, silent=False, **tqdm_kwargs):
'''
use_linked_mask: bool
An experimental feature that generates a mask to be applied to the arrays before stitching.
This reduces the time to create a graph in strich_group_ids() but might have some amount of overhead
(it can be negligible or a bit significant depending on the data) while making the mask through
get_mosaics() and get_group_ids(). Experiment it with your data.
overlap: 1 should be enough to compensate for lost pairs that cross the boundaries (or maybe 1.01 just in case).
'''
# - define aliass for graph libraries names
if graph_lib=='nk':
graph_lib='networkit'
elif graph_lib=='nx':
graph_lib='networkx'
elif graph_lib=='ig':
graph_lib='igraph'
if use_linked_mask and graph_lib=='networkx':
raise ValueError('TODO: The `networkx` graph library does not give the right results with use_linked_mask=True. Use `networkit` and `igraph` libraries instead if you would like to set use_linked_mask=True.')
# if num_threads is None:
# num_threads = njobs
if coords is None and None in (coords1, coords2):
raise ValueError('either pass `coords` for internal matching or a pair of coordinate lists/arrays (`coords1` and `coords2`) for cross-matching')
elif (coords1 is not None and coords2 is not None) and coords is not None:
raise ValueError('either pass `coords` for internal matching or a pair of coordinate lists/arrays (`coords1` and `coords2`) for cross-matching')
# --- --- --- ---
if coords is not None:
num_objects = len(coords)
if njobs>1:
coords_chunks, refidx_chunks, overidx_chunks = get_mosaic_sets(coords=coords, coords1=None, coords2=None, linking_length=linking_length, wcs=None, mode='all', nside_mosaics=None, njobs=njobs, overlap=overlap, use_linked_mask=use_linked_mask)
idxshift_list = [0]+list(np.cumsum([len(x) for x in coords_chunks[:-1]]))
else:
num_objects = len(coords1)+len(coords2)
coord1_chunks, coord2_chunks, refidx1_chunks, refidx2_chunks, overidx1_chunks, overidx2_chunks = get_mosaic_sets(coords=None, coords1=coords1, coords2=coords2, linking_length=linking_length, wcs=None, mode='all', nside_mosaics=None, njobs=njobs, overlap=overlap, use_linked_mask=use_linked_mask)
idxshift_list1 = [0]+list(np.cumsum([len(x) for x in coords1_chunks[:-1]]))
idxshift_list2 = [0]+list(np.cumsum([len(x) for x in coords2_chunks[:-1]]))
# --- --- --- ---
if tqdm_kwargs is None:
tqdm_kwargs={}
tqdm_kwargs.setdefault('desc', 'Creating matching lists');
tqdm_kwargs.setdefault('bar_format', '{elapsed}|{bar}|{remaining} ({desc}: {percentage:0.0f}%)')
show_progress = show_progress and session.viewedonscreen()
kwargs = {'linking_length':linking_length, 'graph_lib':graph_lib, 'num_threads': num_threads, 'num_objects': num_objects,
'verbose':verbose, 'show_progress':show_progress, 'silent':silent, 'tqdm_kwargs': tqdm_kwargs}
make_partial_group_ids = partial(get_group_ids, **kwargs)
if coords is not None:
if njobs>1:
args = ((coords_chunks[job_num], None, None, idxshift_list[job_num], None, None, overidx_chunks[job_num], None, None, f'kdtree_sky_{job_num}' if storekdtree else False, job_num) for job_num in range(njobs))
else:
args = (coords, None, None, 0, None, None, None, None, None, f'kdtree_sky' if storekdtree else False, -1)
else:
if njobs>1:
args = ((None, coords1_chunks[i], coords2_chunks[i], idxshift_list1[cidx1], idxshift_list1[cidx1], None, overidx_chunks1[job_num], overidx_chunks2[job_num] , f'kdtree_sky_{cidx1}_{cidx2}' if storekdtree else False, job_num) for job_num, (cidx1, cidx2) in enumerate(cross_idx))
else:
pass # ... add here
if njobs>1:
# with MPIPoolExecutor(max_workers=njobs) as pool:
with multiprocessing.Pool(processes=njobs) as pool:
# it is very crucial that Pool keeps the original order of data passed to starmap
res = pool.starmap(make_partial_group_ids, args)
with BusyPal('Concatenating the results from different processes', style={'id':6,'color':'sandy_brown'}, fmt='{spinner} {message}'):
refidx = np.concatenate(refidx_chunks)
if use_linked_mask:
group_ids_chunks = [item[0] for item in res]
linked_mask_chunks = [item[1] for item in res]
group_ids = np.concatenate(group_ids_chunks)
linked_mask = np.concatenate(linked_mask_chunks)
assert len(group_ids)==len(linked_mask)==len(refidx)
else:
group_ids = np.concatenate(res, axis=0)
assert len(group_ids)==len(refidx)
linked_mask = None
del res, refidx_chunks
else:
# - serial
group_ids = make_partial_group_ids(*args)
# - merge the duplicates and find the underlying network shared between them!
if njobs>1:
# - stitch fragmented group ids from different patches/mosaics
# # weed out first IF you don't have that many groups - many isolated (usually removes only 20% though, not worth it performance-wise)
# if weedout_mask is None:
# # narrow things down a bit for faster grah computation
# cr, cg = Counter(refidx), Counter(group_ids)
# len_data = len(refidx)
# weedout_mask = [j for j in range(len_data) if cr[refidx[j]]>1 or (cg[group_ids[j]]>1)]
# print('=== len_data, len(weedout_mask) ', len_data, len(weedout_mask))
if linked_mask is not None:
with BusyPal('Stitch fragmented group ids from different mosaic sets', style={'id':6,'color':'sandy_brown'}, fmt='{spinner} {message}'):
group_ids[linked_mask] = update_labels(refidx[linked_mask], group_ids[linked_mask]) #stitch_group_ids(refidx[linked_mask], group_ids[linked_mask], graph_lib=graph_lib, verbose=False, num_threads=-1) #update_labels(refidx[linked_mask], group_ids[linked_mask])
# group_ids[linked_mask] = linked_group_ids
else:
with BusyPal('Stitch fragmented group ids from different mosaics - no linked_mask', style={'id':6,'color':'sandy_brown'}, fmt='{spinner} {message}'):
group_ids = update_labels(refidx, group_ids) #stitch_group_ids(refidx, group_ids, graph_lib=graph_lib, verbose=False, num_threads=-1)
# group_ids[weedout_mask] = update_labels(refidx[weedout_mask], group_ids[weedout_mask])
# - put the two arrays in a dataframe
df = pd.DataFrame({'idx': refidx, 'group_id': group_ids})
with BusyPal('Rearrange indices to be the same as original input', style={'id':6,'color':'sandy_brown'}, fmt='{spinner} {message}'):
# - we should drop duplicate idx records from the df
df.drop_duplicates(subset='idx', keep='first', inplace=True)
df.sort_values(by='idx', inplace=True)
assert len(df)==num_objects
group_ids = df['group_id']
if reassign_group_indices:
group_ids = np.unique(group_ids, return_inverse=True)[1]
return group_ids
| [
"itertools.chain",
"numpy.sqrt",
"sys.stderr.isatty",
"numpy.isin",
"numpy.array",
"numpy.isfinite",
"numpy.arange",
"astropy.coordinates.Angle",
"numpy.where",
"busypal.session.viewedonscreen",
"busypal.busy",
"numpy.concatenate",
"pandas.DataFrame",
"numpy.histogram2d",
"colored.fg",
... | [((16978, 17074), 'busypal.busy', 'busy', (['"""Mosaicking data"""'], {'style': "{'id': 6, 'color': 'sandy_brown'}", 'fmt': '"""{spinner} {message}"""'}), "('Mosaicking data', style={'id': 6, 'color': 'sandy_brown'}, fmt=\n '{spinner} {message}')\n", (16982, 17074), False, 'from busypal import busy, BusyPal, session\n'), ((3346, 3374), 'numpy.arange', 'np.arange', (['(0)', '(labels_max + 1)'], {}), '(0, labels_max + 1)\n', (3355, 3374), True, 'import numpy as np\n'), ((11321, 11344), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11342, 11344), False, 'import datetime\n'), ((13616, 13671), 'numpy.arange', 'np.arange', (['starting_id', '(starting_id + num_objects_chunk)'], {}), '(starting_id, starting_id + num_objects_chunk)\n', (13625, 13671), True, 'import numpy as np\n'), ((13688, 13727), 'numpy.zeros', 'np.zeros', (['num_objects_chunk'], {'dtype': 'bool'}), '(num_objects_chunk, dtype=bool)\n', (13696, 13727), True, 'import numpy as np\n'), ((15767, 15790), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (15788, 15790), False, 'import datetime\n'), ((17889, 17929), 'numpy.histogram2d', 'np.histogram2d', (['x', 'y'], {'bins': 'nside_mosaics'}), '(x, y, bins=nside_mosaics)\n', (17903, 17929), True, 'import numpy as np\n'), ((17955, 17970), 'numpy.where', 'np.where', (['(H > 0)'], {}), '(H > 0)\n', (17963, 17970), True, 'import numpy as np\n'), ((25103, 25135), 'functools.partial', 'partial', (['get_group_ids'], {}), '(get_group_ids, **kwargs)\n', (25110, 25135), False, 'from functools import partial\n'), ((3226, 3235), 'itertools.chain', 'chain', (['*a'], {}), '(*a)\n', (3231, 3235), False, 'from itertools import accumulate, chain, product\n'), ((3320, 3329), 'itertools.chain', 'chain', (['*b'], {}), '(*b)\n', (3325, 3329), False, 'from itertools import accumulate, chain, product\n'), ((5579, 5642), 'astropy.coordinates.SkyCoord', 'SkyCoord', ([], {'ra': 'coords[0]', 'dec': 'coords[1]', 'unit': '(ra_unit, dec_unit)'}), '(ra=coords[0], dec=coords[1], unit=(ra_unit, dec_unit))\n', (5587, 5642), False, 'from astropy.coordinates import SkyCoord\n'), ((5746, 5758), 'astropy.wcs.WCS', 'WCS', ([], {'naxis': '(2)'}), '(naxis=2)\n', (5749, 5758), False, 'from astropy.wcs import WCS\n'), ((7797, 7816), 'sys.stderr.isatty', 'sys.stderr.isatty', ([], {}), '()\n', (7814, 7816), False, 'import sys\n'), ((12901, 13044), 'busypal.BusyPal', 'BusyPal', (['"""Building the representative graph/network"""'], {'style': "{'id': 6, 'color': 'sandy_brown'}", 'fmt': '"""{spinner} {message}"""', 'skip': 'skip_busypal'}), "('Building the representative graph/network', style={'id': 6,\n 'color': 'sandy_brown'}, fmt='{spinner} {message}', skip=skip_busypal)\n", (12908, 13044), False, 'from busypal import busy, BusyPal, session\n'), ((20429, 20457), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int64'}), '([], dtype=np.int64)\n', (20437, 20457), True, 'import numpy as np\n'), ((20827, 20859), 'numpy.unique', 'np.unique', (['refidx_inside_unified'], {}), '(refidx_inside_unified)\n', (20836, 20859), True, 'import numpy as np\n'), ((24813, 24837), 'busypal.session.viewedonscreen', 'session.viewedonscreen', ([], {}), '()\n', (24835, 24837), False, 'from busypal import busy, BusyPal, session\n'), ((28734, 28786), 'pandas.DataFrame', 'pd.DataFrame', (["{'idx': refidx, 'group_id': group_ids}"], {}), "({'idx': refidx, 'group_id': group_ids})\n", (28746, 28786), True, 'import pandas as pd\n'), ((11936, 11963), 'astropy.coordinates.Angle', 'Angle', (['f"""{linking_length}s"""'], {}), "(f'{linking_length}s')\n", (11941, 11963), False, 'from astropy.coordinates import SkyCoord, Angle\n'), ((13889, 14069), 'tqdm.tqdm', 'tqdm', (['clusters'], {'total': 'nclusters', 'desc': "('Finding connected components of the ' + 'graphs and shared components' if\n parallel else 'graph')", 'disable': 'disable_tqdm'}), "(clusters, total=nclusters, desc='Finding connected components of the ' +\n 'graphs and shared components' if parallel else 'graph', disable=\n disable_tqdm, **tqdm_kwargs)\n", (13893, 14069), False, 'from tqdm import tqdm\n'), ((14879, 15032), 'tqdm.tqdm', 'tqdm', (['clusters'], {'total': 'nclusters', 'desc': "('Finding connected components of the ' + 'graphs' if parallel else 'graph')", 'disable': 'disable_tqdm'}), "(clusters, total=nclusters, desc='Finding connected components of the ' +\n 'graphs' if parallel else 'graph', disable=disable_tqdm, **tqdm_kwargs)\n", (14883, 15032), False, 'from tqdm import tqdm\n'), ((20523, 20551), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int64'}), '([], dtype=np.int64)\n', (20531, 20551), True, 'import numpy as np\n'), ((20618, 20668), 'numpy.append', 'np.append', (['refidx_inside_unified', 'refidx_inside[m]'], {}), '(refidx_inside_unified, refidx_inside[m])\n', (20627, 20668), True, 'import numpy as np\n'), ((21105, 21138), 'numpy.unique', 'np.unique', (['refidx_overlap_unified'], {}), '(refidx_overlap_unified)\n', (21114, 21138), True, 'import numpy as np\n'), ((25996, 26033), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': 'njobs'}), '(processes=njobs)\n', (26016, 26033), False, 'import multiprocessing\n'), ((26212, 26346), 'busypal.BusyPal', 'BusyPal', (['"""Concatenating the results from different processes"""'], {'style': "{'id': 6, 'color': 'sandy_brown'}", 'fmt': '"""{spinner} {message}"""'}), "('Concatenating the results from different processes', style={'id': \n 6, 'color': 'sandy_brown'}, fmt='{spinner} {message}')\n", (26219, 26346), False, 'from busypal import busy, BusyPal, session\n'), ((26361, 26390), 'numpy.concatenate', 'np.concatenate', (['refidx_chunks'], {}), '(refidx_chunks)\n', (26375, 26390), True, 'import numpy as np\n'), ((28802, 28936), 'busypal.BusyPal', 'BusyPal', (['"""Rearrange indices to be the same as original input"""'], {'style': "{'id': 6, 'color': 'sandy_brown'}", 'fmt': '"""{spinner} {message}"""'}), "('Rearrange indices to be the same as original input', style={'id': \n 6, 'color': 'sandy_brown'}, fmt='{spinner} {message}')\n", (28809, 28936), False, 'from busypal import busy, BusyPal, session\n'), ((29244, 29285), 'numpy.unique', 'np.unique', (['group_ids'], {'return_inverse': '(True)'}), '(group_ids, return_inverse=True)\n', (29253, 29285), True, 'import numpy as np\n'), ((17767, 17781), 'numpy.sqrt', 'np.sqrt', (['njobs'], {}), '(njobs)\n', (17774, 17781), True, 'import numpy as np\n'), ((18981, 19012), 'numpy.where', 'np.where', (['(cx0 & cx1 & cy0 & cy1)'], {}), '(cx0 & cx1 & cy0 & cy1)\n', (18989, 19012), True, 'import numpy as np\n'), ((20742, 20794), 'numpy.append', 'np.append', (['refidx_overlap_unified', 'refidx_overlap[m]'], {}), '(refidx_overlap_unified, refidx_overlap[m])\n', (20751, 20794), True, 'import numpy as np\n'), ((26577, 26609), 'numpy.concatenate', 'np.concatenate', (['group_ids_chunks'], {}), '(group_ids_chunks)\n', (26591, 26609), True, 'import numpy as np\n'), ((26640, 26674), 'numpy.concatenate', 'np.concatenate', (['linked_mask_chunks'], {}), '(linked_mask_chunks)\n', (26654, 26674), True, 'import numpy as np\n'), ((26790, 26817), 'numpy.concatenate', 'np.concatenate', (['res'], {'axis': '(0)'}), '(res, axis=0)\n', (26804, 26817), True, 'import numpy as np\n'), ((27774, 27912), 'busypal.BusyPal', 'BusyPal', (['"""Stitch fragmented group ids from different mosaic sets"""'], {'style': "{'id': 6, 'color': 'sandy_brown'}", 'fmt': '"""{spinner} {message}"""'}), "('Stitch fragmented group ids from different mosaic sets', style={\n 'id': 6, 'color': 'sandy_brown'}, fmt='{spinner} {message}')\n", (27781, 27912), False, 'from busypal import busy, BusyPal, session\n'), ((28274, 28424), 'busypal.BusyPal', 'BusyPal', (['"""Stitch fragmented group ids from different mosaics - no linked_mask"""'], {'style': "{'id': 6, 'color': 'sandy_brown'}", 'fmt': '"""{spinner} {message}"""'}), "('Stitch fragmented group ids from different mosaics - no linked_mask',\n style={'id': 6, 'color': 'sandy_brown'}, fmt='{spinner} {message}')\n", (28281, 28424), False, 'from busypal import busy, BusyPal, session\n'), ((6493, 6507), 'numpy.isfinite', 'np.isfinite', (['x'], {}), '(x)\n', (6504, 6507), True, 'import numpy as np\n'), ((6640, 6654), 'numpy.isfinite', 'np.isfinite', (['x'], {}), '(x)\n', (6651, 6654), True, 'import numpy as np\n'), ((20014, 20079), 'numpy.where', 'np.where', (['(cx0 & cx1 & cy0 & cy1 & (cx0_p | cx1_p | cy0_p | cy1_p))'], {}), '(cx0 & cx1 & cy0 & cy1 & (cx0_p | cx1_p | cy0_p | cy1_p))\n', (20022, 20079), True, 'import numpy as np\n'), ((21250, 21304), 'numpy.isin', 'np.isin', (['refidx_inside_unified', 'refidx_overlap_unified'], {}), '(refidx_inside_unified, refidx_overlap_unified)\n', (21257, 21304), True, 'import numpy as np\n'), ((15269, 15283), 'colored.fg', 'cl.fg', (['"""green"""'], {}), "('green')\n", (15274, 15283), True, 'import colored as cl\n'), ((15284, 15299), 'colored.attr', 'cl.attr', (['"""bold"""'], {}), "('bold')\n", (15291, 15299), True, 'import colored as cl\n'), ((14559, 14573), 'colored.fg', 'cl.fg', (['"""green"""'], {}), "('green')\n", (14564, 14573), True, 'import colored as cl\n'), ((14574, 14589), 'colored.attr', 'cl.attr', (['"""bold"""'], {}), "('bold')\n", (14581, 14589), True, 'import colored as cl\n'), ((16106, 16129), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (16127, 16129), False, 'import datetime\n')] |
from numpy import array, all, ones_like
from pynucastro.nucdata import PartitionFunctionTable, PartitionFunctionCollection
import os
nucdata_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
pf_dir = os.path.join(nucdata_dir, 'PartitionFunction')
dir_etfsiq_low = os.path.join(pf_dir, 'etfsiq_low.txt')
dir_frdm_low = os.path.join(pf_dir, 'frdm_low.txt')
dir_etfsiq_high = os.path.join(pf_dir, 'etfsiq_high.txt')
dir_frdm_high = os.path.join(pf_dir, 'frdm_high.txt')
ANSWER_ETFSIQ_LOW = array([1.000271, 1.002656, 1.009124, 1.035543, 1.076750, 1.128518, 1.187847, 1.252797,
1.322103, 1.394926, 1.470693, 1.883077, 2.339548, 2.835353, 3.371056, 3.949365,
4.574281, 5.250894, 5.985411, 7.659520, 9.675912, 12.147961, 15.237089, 19.172457])
ANSWER_FRDM_LOW = array([1.000157, 1.001534, 1.005265, 1.020486, 1.044185, 1.073899, 1.107886, 1.145013,
1.184544, 1.225988, 1.269010, 1.501456, 1.755494, 2.027655, 2.317420, 2.625339,
2.952505, 3.300375, 3.670713, 4.487378, 5.423159, 6.505528, 7.771334, 9.270601])
ANSWER_ETFSIQ_HIGH = array([5.79E+000, 1.07E+001, 2.13E+001, 4.38E+001, 9.23E+001, 1.97E+002, 4.23E+002,
9.12E+002, 1.97E+003, 4.25E+003, 2.92E+004, 2.00E+005, 1.36E+006, 9.31E+006,
6.34E+007, 4.31E+008, 2.92E+009, 1.97E+010, 1.33E+011, 8.93E+011, 5.98E+012,
3.99E+013, 2.65E+014, 1.76E+015, 1.16E+016, 7.66E+016, 5.03E+017, 3.30E+018,
2.16E+019, 1.41E+020, 9.21E+020, 6.00E+021, 3.91E+022, 2.54E+023, 1.65E+024,
1.07E+025, 6.97E+025, 4.52E+026, 2.94E+027, 1.91E+028, 8.07E+029, 3.42E+031,
1.46E+033, 6.23E+034, 2.68E+036, 1.16E+038, 5.03E+039, 6.45E+043])
ANSWER_FRDM_HIGH = array([9.40e+007, 2.81e+009, 4.93e010, 1.95e+012, 8.84e+013, 3.66e+015, 1.44e+017,
5.48e+018, 2.04e+020, 7.48e+021, 5.72e+025, 4.07e+029, 2.69e+033, 1.66e+037,
9.60e+040, 5.20e+044, 2.65e+048, 1.28e+052, 5.85e+055, 2.55e+059, 1.06e+063,
4.27e+066, 1.65e+070, 6.16e+073, 2.23e+077, 7.87e+080, 2.71e+084, 9.15e+087,
3.03e+091, 9.86e+094, 3.17e+098, 1.00e+102, 3.14e+105, 9.77e+108, 3.01e+112,
9.23e+115, 2.82e+119, 8.56e+122, 2.59e+126, 7.85e+129, 7.18e+136, 6.59e+143,
6.11e+150, 5.74e+157, 5.48e+164, 5.35e+171, 5.34e+178, 1.88e+196])
TEMPERATURES_LOW = array([0.01E+9, 0.15E+9, 0.2E+9, 0.3E+9, 0.4E+9, 0.5E+9, 0.6E+9,
0.7E+9, 0.8E+9, 0.9E+9, 1.0E+9, 1.5E+9, 2.0E+9, 2.5E+9,
3.0E+9, 3.5E+9, 4.0E+9, 4.5E+9, 5.0E+9, 6.0E+9, 7.0E+9,
8.0E+9, 9.0E+9, 10.0E+9])
TEMPERATURES_HIGH = array([12.0E+9, 14.0E+9, 16.0E+9, 18.0E+9, 20.0E+9, 22.0E+9, 24.0E+9,
26.0E+9, 28.0E+9, 30.0E+9, 35.0E+9, 40.0E+9, 45.0E+9, 50.0E+9,
55.0E+9, 60.0E+9, 65.0E+9, 70.0E+9, 75.0E+9, 80.0E+9, 85.0E+9,
90.0E+9, 95.0E+9, 100.0E+9, 105.0E+9, 110.0E+9, 115.0E+9, 120.0E+9,
125.0E+9, 130.0E+9, 135.0E+9, 140.0E+9, 145.0E+9, 150.0E+9, 155.0E+9,
160.0E+9, 165.0E+9, 170.0E+9, 175.0E+9, 180.0E+9, 190.0E+9, 200.0E+9,
210.0E+9, 220.0E+9, 230.0E+9, 240.0E+9, 250.0E+9, 275.0E+9])
DEFAULT = ones_like(TEMPERATURES_LOW)
class TestPartition:
@classmethod
def setup_class(cls):
""" this is run once for each class before any tests """
pass
@classmethod
def teardown_class(cls):
""" this is run once for each class before any tests """
pass
def setup_method(self):
""" this is run once for each class before any tests """
self.pf_table_etfsiq_low = PartitionFunctionTable(dir_etfsiq_low)
self.pf_table_frdm_low = PartitionFunctionTable(dir_frdm_low)
self.pf_table_etfsiq_high = PartitionFunctionTable(dir_etfsiq_high)
self.pf_table_frdm_high = PartitionFunctionTable(dir_frdm_high)
self.co46_pf_etfsiq_low = self.pf_table_etfsiq_low.get_partition_function('co46')
self.ne37_pf_frdm_low = self.pf_table_frdm_low.get_partition_function('ne37')
self.fe47_pf_etfsiq_high = self.pf_table_etfsiq_high.get_partition_function('fe47')
self.po188_pf_frdm_high = self.pf_table_frdm_high.get_partition_function('po188')
self.ne19_pf_frdm_low = self.pf_table_frdm_low.get_partition_function('ne19')
self.ne19_pf_frdm_high = self.pf_table_frdm_high.get_partition_function('ne19')
self.co60_pf_etfsiq_low = self.pf_table_etfsiq_low.get_partition_function('co60')
self.co60_pf_etfsiq_high = self.pf_table_etfsiq_high.get_partition_function('co60')
self.pf_collection_frdm = PartitionFunctionCollection(use_set='frdm')
self.pf_collection_etfsiq = PartitionFunctionCollection(use_set='etfsiq')
def teardown_method(self):
""" this is run once for each class before any tests """
pass
def test_pf(self):
assert all(self.pf_collection_frdm.get_partition_function('p').partition_function == DEFAULT)
assert all(self.pf_collection_etfsiq.get_partition_function('n').partition_function == DEFAULT)
def test_pf_table(self):
assert all(self.co46_pf_etfsiq_low.partition_function == ANSWER_ETFSIQ_LOW)
assert all(self.co46_pf_etfsiq_low.temperature == TEMPERATURES_LOW)
assert all(self.ne37_pf_frdm_low.partition_function == ANSWER_FRDM_LOW)
assert all(self.ne37_pf_frdm_low.temperature == TEMPERATURES_LOW)
assert all(self.fe47_pf_etfsiq_high.partition_function == ANSWER_ETFSIQ_HIGH)
assert all(self.fe47_pf_etfsiq_high.temperature == TEMPERATURES_HIGH)
assert all(self.po188_pf_frdm_high.partition_function == ANSWER_FRDM_HIGH)
assert all(self.po188_pf_frdm_high.temperature == TEMPERATURES_HIGH)
def test_pfsum(self):
assert self.pf_collection_etfsiq.get_partition_function('co60') == self.co60_pf_etfsiq_low + self.co60_pf_etfsiq_high
assert self.pf_collection_frdm.get_partition_function('ne19') == self.ne19_pf_frdm_high + self.ne19_pf_frdm_low
| [
"pynucastro.nucdata.PartitionFunctionCollection",
"numpy.ones_like",
"os.path.join",
"os.path.realpath",
"numpy.array",
"pynucastro.nucdata.PartitionFunctionTable",
"numpy.all"
] | [((218, 264), 'os.path.join', 'os.path.join', (['nucdata_dir', '"""PartitionFunction"""'], {}), "(nucdata_dir, 'PartitionFunction')\n", (230, 264), False, 'import os\n'), ((283, 321), 'os.path.join', 'os.path.join', (['pf_dir', '"""etfsiq_low.txt"""'], {}), "(pf_dir, 'etfsiq_low.txt')\n", (295, 321), False, 'import os\n'), ((337, 373), 'os.path.join', 'os.path.join', (['pf_dir', '"""frdm_low.txt"""'], {}), "(pf_dir, 'frdm_low.txt')\n", (349, 373), False, 'import os\n'), ((392, 431), 'os.path.join', 'os.path.join', (['pf_dir', '"""etfsiq_high.txt"""'], {}), "(pf_dir, 'etfsiq_high.txt')\n", (404, 431), False, 'import os\n'), ((448, 485), 'os.path.join', 'os.path.join', (['pf_dir', '"""frdm_high.txt"""'], {}), "(pf_dir, 'frdm_high.txt')\n", (460, 485), False, 'import os\n'), ((507, 769), 'numpy.array', 'array', (['[1.000271, 1.002656, 1.009124, 1.035543, 1.07675, 1.128518, 1.187847, \n 1.252797, 1.322103, 1.394926, 1.470693, 1.883077, 2.339548, 2.835353, \n 3.371056, 3.949365, 4.574281, 5.250894, 5.985411, 7.65952, 9.675912, \n 12.147961, 15.237089, 19.172457]'], {}), '([1.000271, 1.002656, 1.009124, 1.035543, 1.07675, 1.128518, 1.187847,\n 1.252797, 1.322103, 1.394926, 1.470693, 1.883077, 2.339548, 2.835353, \n 3.371056, 3.949365, 4.574281, 5.250894, 5.985411, 7.65952, 9.675912, \n 12.147961, 15.237089, 19.172457])\n', (512, 769), False, 'from numpy import array, all, ones_like\n'), ((831, 1090), 'numpy.array', 'array', (['[1.000157, 1.001534, 1.005265, 1.020486, 1.044185, 1.073899, 1.107886, \n 1.145013, 1.184544, 1.225988, 1.26901, 1.501456, 1.755494, 2.027655, \n 2.31742, 2.625339, 2.952505, 3.300375, 3.670713, 4.487378, 5.423159, \n 6.505528, 7.771334, 9.270601]'], {}), '([1.000157, 1.001534, 1.005265, 1.020486, 1.044185, 1.073899, 1.107886,\n 1.145013, 1.184544, 1.225988, 1.26901, 1.501456, 1.755494, 2.027655, \n 2.31742, 2.625339, 2.952505, 3.300375, 3.670713, 4.487378, 5.423159, \n 6.505528, 7.771334, 9.270601])\n', (836, 1090), False, 'from numpy import array, all, ones_like\n'), ((1151, 1697), 'numpy.array', 'array', (['[5.79, 10.7, 21.3, 43.8, 92.3, 197.0, 423.0, 912.0, 1970.0, 4250.0, 29200.0,\n 200000.0, 1360000.0, 9310000.0, 63400000.0, 431000000.0, 2920000000.0, \n 19700000000.0, 133000000000.0, 893000000000.0, 5980000000000.0, \n 39900000000000.0, 265000000000000.0, 1760000000000000.0, 1.16e+16, \n 7.66e+16, 5.03e+17, 3.3e+18, 2.16e+19, 1.41e+20, 9.21e+20, 6e+21, \n 3.91e+22, 2.54e+23, 1.65e+24, 1.07e+25, 6.97e+25, 4.52e+26, 2.94e+27, \n 1.91e+28, 8.07e+29, 3.42e+31, 1.46e+33, 6.23e+34, 2.68e+36, 1.16e+38, \n 5.03e+39, 6.45e+43]'], {}), '([5.79, 10.7, 21.3, 43.8, 92.3, 197.0, 423.0, 912.0, 1970.0, 4250.0, \n 29200.0, 200000.0, 1360000.0, 9310000.0, 63400000.0, 431000000.0, \n 2920000000.0, 19700000000.0, 133000000000.0, 893000000000.0, \n 5980000000000.0, 39900000000000.0, 265000000000000.0, \n 1760000000000000.0, 1.16e+16, 7.66e+16, 5.03e+17, 3.3e+18, 2.16e+19, \n 1.41e+20, 9.21e+20, 6e+21, 3.91e+22, 2.54e+23, 1.65e+24, 1.07e+25, \n 6.97e+25, 4.52e+26, 2.94e+27, 1.91e+28, 8.07e+29, 3.42e+31, 1.46e+33, \n 6.23e+34, 2.68e+36, 1.16e+38, 5.03e+39, 6.45e+43])\n', (1156, 1697), False, 'from numpy import array, all, ones_like\n'), ((1881, 2450), 'numpy.array', 'array', (['[94000000.0, 2810000000.0, 49300000000.0, 1950000000000.0, 88400000000000.0,\n 3660000000000000.0, 1.44e+17, 5.48e+18, 2.04e+20, 7.48e+21, 5.72e+25, \n 4.07e+29, 2.69e+33, 1.66e+37, 9.6e+40, 5.2e+44, 2.65e+48, 1.28e+52, \n 5.85e+55, 2.55e+59, 1.06e+63, 4.27e+66, 1.65e+70, 6.16e+73, 2.23e+77, \n 7.87e+80, 2.71e+84, 9.15e+87, 3.03e+91, 9.86e+94, 3.17e+98, 1e+102, \n 3.14e+105, 9.77e+108, 3.01e+112, 9.23e+115, 2.82e+119, 8.56e+122, \n 2.59e+126, 7.85e+129, 7.18e+136, 6.59e+143, 6.11e+150, 5.74e+157, \n 5.48e+164, 5.35e+171, 5.34e+178, 1.88e+196]'], {}), '([94000000.0, 2810000000.0, 49300000000.0, 1950000000000.0, \n 88400000000000.0, 3660000000000000.0, 1.44e+17, 5.48e+18, 2.04e+20, \n 7.48e+21, 5.72e+25, 4.07e+29, 2.69e+33, 1.66e+37, 9.6e+40, 5.2e+44, \n 2.65e+48, 1.28e+52, 5.85e+55, 2.55e+59, 1.06e+63, 4.27e+66, 1.65e+70, \n 6.16e+73, 2.23e+77, 7.87e+80, 2.71e+84, 9.15e+87, 3.03e+91, 9.86e+94, \n 3.17e+98, 1e+102, 3.14e+105, 9.77e+108, 3.01e+112, 9.23e+115, 2.82e+119,\n 8.56e+122, 2.59e+126, 7.85e+129, 7.18e+136, 6.59e+143, 6.11e+150, \n 5.74e+157, 5.48e+164, 5.35e+171, 5.34e+178, 1.88e+196])\n', (1886, 2450), False, 'from numpy import array, all, ones_like\n'), ((2592, 2945), 'numpy.array', 'array', (['[10000000.0, 150000000.0, 200000000.0, 300000000.0, 400000000.0, \n 500000000.0, 600000000.0, 700000000.0, 800000000.0, 900000000.0, \n 1000000000.0, 1500000000.0, 2000000000.0, 2500000000.0, 3000000000.0, \n 3500000000.0, 4000000000.0, 4500000000.0, 5000000000.0, 6000000000.0, \n 7000000000.0, 8000000000.0, 9000000000.0, 10000000000.0]'], {}), '([10000000.0, 150000000.0, 200000000.0, 300000000.0, 400000000.0, \n 500000000.0, 600000000.0, 700000000.0, 800000000.0, 900000000.0, \n 1000000000.0, 1500000000.0, 2000000000.0, 2500000000.0, 3000000000.0, \n 3500000000.0, 4000000000.0, 4500000000.0, 5000000000.0, 6000000000.0, \n 7000000000.0, 8000000000.0, 9000000000.0, 10000000000.0])\n', (2597, 2945), False, 'from numpy import array, all, ones_like\n'), ((2894, 3701), 'numpy.array', 'array', (['[12000000000.0, 14000000000.0, 16000000000.0, 18000000000.0, 20000000000.0,\n 22000000000.0, 24000000000.0, 26000000000.0, 28000000000.0, \n 30000000000.0, 35000000000.0, 40000000000.0, 45000000000.0, \n 50000000000.0, 55000000000.0, 60000000000.0, 65000000000.0, \n 70000000000.0, 75000000000.0, 80000000000.0, 85000000000.0, \n 90000000000.0, 95000000000.0, 100000000000.0, 105000000000.0, \n 110000000000.0, 115000000000.0, 120000000000.0, 125000000000.0, \n 130000000000.0, 135000000000.0, 140000000000.0, 145000000000.0, \n 150000000000.0, 155000000000.0, 160000000000.0, 165000000000.0, \n 170000000000.0, 175000000000.0, 180000000000.0, 190000000000.0, \n 200000000000.0, 210000000000.0, 220000000000.0, 230000000000.0, \n 240000000000.0, 250000000000.0, 275000000000.0]'], {}), '([12000000000.0, 14000000000.0, 16000000000.0, 18000000000.0, \n 20000000000.0, 22000000000.0, 24000000000.0, 26000000000.0, \n 28000000000.0, 30000000000.0, 35000000000.0, 40000000000.0, \n 45000000000.0, 50000000000.0, 55000000000.0, 60000000000.0, \n 65000000000.0, 70000000000.0, 75000000000.0, 80000000000.0, \n 85000000000.0, 90000000000.0, 95000000000.0, 100000000000.0, \n 105000000000.0, 110000000000.0, 115000000000.0, 120000000000.0, \n 125000000000.0, 130000000000.0, 135000000000.0, 140000000000.0, \n 145000000000.0, 150000000000.0, 155000000000.0, 160000000000.0, \n 165000000000.0, 170000000000.0, 175000000000.0, 180000000000.0, \n 190000000000.0, 200000000000.0, 210000000000.0, 220000000000.0, \n 230000000000.0, 240000000000.0, 250000000000.0, 275000000000.0])\n', (2899, 3701), False, 'from numpy import array, all, ones_like\n'), ((3532, 3559), 'numpy.ones_like', 'ones_like', (['TEMPERATURES_LOW'], {}), '(TEMPERATURES_LOW)\n', (3541, 3559), False, 'from numpy import array, all, ones_like\n'), ((180, 206), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (196, 206), False, 'import os\n'), ((3960, 3998), 'pynucastro.nucdata.PartitionFunctionTable', 'PartitionFunctionTable', (['dir_etfsiq_low'], {}), '(dir_etfsiq_low)\n', (3982, 3998), False, 'from pynucastro.nucdata import PartitionFunctionTable, PartitionFunctionCollection\n'), ((4032, 4068), 'pynucastro.nucdata.PartitionFunctionTable', 'PartitionFunctionTable', (['dir_frdm_low'], {}), '(dir_frdm_low)\n', (4054, 4068), False, 'from pynucastro.nucdata import PartitionFunctionTable, PartitionFunctionCollection\n'), ((4105, 4144), 'pynucastro.nucdata.PartitionFunctionTable', 'PartitionFunctionTable', (['dir_etfsiq_high'], {}), '(dir_etfsiq_high)\n', (4127, 4144), False, 'from pynucastro.nucdata import PartitionFunctionTable, PartitionFunctionCollection\n'), ((4179, 4216), 'pynucastro.nucdata.PartitionFunctionTable', 'PartitionFunctionTable', (['dir_frdm_high'], {}), '(dir_frdm_high)\n', (4201, 4216), False, 'from pynucastro.nucdata import PartitionFunctionTable, PartitionFunctionCollection\n'), ((4969, 5012), 'pynucastro.nucdata.PartitionFunctionCollection', 'PartitionFunctionCollection', ([], {'use_set': '"""frdm"""'}), "(use_set='frdm')\n", (4996, 5012), False, 'from pynucastro.nucdata import PartitionFunctionTable, PartitionFunctionCollection\n'), ((5049, 5094), 'pynucastro.nucdata.PartitionFunctionCollection', 'PartitionFunctionCollection', ([], {'use_set': '"""etfsiq"""'}), "(use_set='etfsiq')\n", (5076, 5094), False, 'from pynucastro.nucdata import PartitionFunctionTable, PartitionFunctionCollection\n'), ((5482, 5550), 'numpy.all', 'all', (['(self.co46_pf_etfsiq_low.partition_function == ANSWER_ETFSIQ_LOW)'], {}), '(self.co46_pf_etfsiq_low.partition_function == ANSWER_ETFSIQ_LOW)\n', (5485, 5550), False, 'from numpy import array, all, ones_like\n'), ((5566, 5626), 'numpy.all', 'all', (['(self.co46_pf_etfsiq_low.temperature == TEMPERATURES_LOW)'], {}), '(self.co46_pf_etfsiq_low.temperature == TEMPERATURES_LOW)\n', (5569, 5626), False, 'from numpy import array, all, ones_like\n'), ((5643, 5707), 'numpy.all', 'all', (['(self.ne37_pf_frdm_low.partition_function == ANSWER_FRDM_LOW)'], {}), '(self.ne37_pf_frdm_low.partition_function == ANSWER_FRDM_LOW)\n', (5646, 5707), False, 'from numpy import array, all, ones_like\n'), ((5723, 5781), 'numpy.all', 'all', (['(self.ne37_pf_frdm_low.temperature == TEMPERATURES_LOW)'], {}), '(self.ne37_pf_frdm_low.temperature == TEMPERATURES_LOW)\n', (5726, 5781), False, 'from numpy import array, all, ones_like\n'), ((5798, 5868), 'numpy.all', 'all', (['(self.fe47_pf_etfsiq_high.partition_function == ANSWER_ETFSIQ_HIGH)'], {}), '(self.fe47_pf_etfsiq_high.partition_function == ANSWER_ETFSIQ_HIGH)\n', (5801, 5868), False, 'from numpy import array, all, ones_like\n'), ((5884, 5946), 'numpy.all', 'all', (['(self.fe47_pf_etfsiq_high.temperature == TEMPERATURES_HIGH)'], {}), '(self.fe47_pf_etfsiq_high.temperature == TEMPERATURES_HIGH)\n', (5887, 5946), False, 'from numpy import array, all, ones_like\n'), ((5963, 6030), 'numpy.all', 'all', (['(self.po188_pf_frdm_high.partition_function == ANSWER_FRDM_HIGH)'], {}), '(self.po188_pf_frdm_high.partition_function == ANSWER_FRDM_HIGH)\n', (5966, 6030), False, 'from numpy import array, all, ones_like\n'), ((6046, 6107), 'numpy.all', 'all', (['(self.po188_pf_frdm_high.temperature == TEMPERATURES_HIGH)'], {}), '(self.po188_pf_frdm_high.temperature == TEMPERATURES_HIGH)\n', (6049, 6107), False, 'from numpy import array, all, ones_like\n')] |
#!/usr/bin/env python
# coding: utf-8
# Process Aerosol SSP
# Binary vs. netCDF
# <NAME>, Aug 1, 2020
from netCDF4 import Dataset
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import math
def readnc(infile, varname):
nc_fid = Dataset(infile,'r')
out = np.array(nc_fid.variables[varname][:])
return out
Aerosol_Type_String = ['DUST','SEASALT_SSAM','SEASALT_SSCM1','SEASALT_SSCM2',
'SEASALT_SSCM3','ORGANIC_CARBON','BLACK_CARBON','SULFATE']
# Read aerosol coefficint in netCDF format from the netCDF file directly
ncfile = '../fix/AerosolCoeff.nc'
n_Wavelengths = 61 ;
n_Radii = 36 ;
n_Types = 8 ;
n_RH = 36 ;
n_Legendre_Terms = 38 ;
n_Phase_Elem = 1;
nc_Aerosol_Type = readnc(ncfile,'Aerosol_Type')
nc_Aerosol_Type_Name = readnc(ncfile,'Aerosol_Type_Name')
nc_Wavelength = readnc(ncfile,'Wavelength')
nc_Reff = readnc(ncfile,'Reff')
nc_RH = readnc(ncfile,'RH')
nc_ke = readnc(ncfile,'ke')
nc_w = readnc(ncfile,'w')
nc_g = readnc(ncfile,'g')
nc_pcoeff = readnc(ncfile,'pcoeff')
# Read aerosol coefficient obtain from CRTM IO output
# binary I/O
# This folder contains the following output acquied from CRTM Binary LUT:
# Asymmery_factor.txt
# Legendre_terms.txt
# Wavelength.txt
# Extinction_Coefficients.txt
# Radii.txt
# General_Info.txt
# SingleScatAlbedo.txt
crtm_bnfile = '../build/output_aerosol/Binary/'
[bn_iAero, bn_iWvl, bn_iRad] = np.loadtxt(crtm_bnfile+'netCDF_information.txt'). astype(int)
crtm_bn_Reff = np.loadtxt(crtm_bnfile+'Radii.txt')
crtm_bn_pcoef = np.loadtxt(crtm_bnfile+'Legendre_terms.txt')
crtm_bn_Wavelength = np.loadtxt(crtm_bnfile+'Wavelength.txt')
crtm_bn_g = np.reshape(np.loadtxt(crtm_bnfile+'Asymmery_factor.txt'), (36,61))
crtm_bn_ke = np.reshape(np.loadtxt(crtm_bnfile+'Extinction_Coefficients.txt'), (36,61))
crtm_bn_w = np.reshape(np.loadtxt(crtm_bnfile+'SingleScatAlbedo.txt'), (36,61))
# netCDF I/O
# This folder contains the following output acquied from CRTM Binary LUT:
# Asymmery_factor.txt
# Legendre_terms.txt
# Wavelength.txt
# Extinction_Coefficients.txt
# Radii.txt
# General_Info.txt
# SingleScatAlbedo.txt
crtm_ncfile = '../build/output_aerosol/NetCDF/'
[nc_iAero, nc_iWvl, nc_iRad] = np.loadtxt(crtm_ncfile+'netCDF_information.txt'). astype(int)
crtm_nc_Reff = np.loadtxt(crtm_bnfile+'Radii.txt')
crtm_nc_pcoef = np.loadtxt(crtm_bnfile+'Legendre_terms.txt')
crtm_nc_Wavelength = np.loadtxt(crtm_bnfile+'Wavelength.txt')
crtm_nc_g = np.reshape(np.loadtxt(crtm_bnfile+'Asymmery_factor.txt'), (36,61))
crtm_nc_ke = np.reshape(np.loadtxt(crtm_bnfile+'Extinction_Coefficients.txt'), (36,61))
crtm_nc_w = np.reshape(np.loadtxt(crtm_bnfile+'SingleScatAlbedo.txt'), (36,61))
# Plot
fig = plt.figure(figsize = (12,10))
clines = plt.cm.PiYG(np.linspace(0,1,n_Radii))
# radii
ax = fig.add_subplot(3,2,1)
ax.plot(np.linspace(1, len(crtm_bn_Reff), len(crtm_bn_Reff)), crtm_bn_Reff, '-c', label = 'CRTM_Binary')
ax.plot(np.linspace(1, len(crtm_nc_Reff), len(crtm_nc_Reff)), crtm_nc_Reff, 'om', label = 'CRTM_NetCDF')
ax.plot(np.linspace(1, n_Radii, n_Radii), nc_Reff[bn_iAero-1,:], '.k', label = 'Python NetCDF')
ax.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax.tick_params(labelsize=12)
ax.set_xlabel('#', fontsize = 14)
ax.set_ylabel('radius ($\mathrm{\mu m}$)', fontsize = 14)
plt.legend(loc=2, prop={'size': 12},ncol=1)
# wavelength
ax = fig.add_subplot(3,2,2)
ax.plot(np.linspace(1, len(crtm_bn_Wavelength), len(crtm_bn_Wavelength)), crtm_bn_Wavelength, '-c')
ax.plot(np.linspace(1, len(crtm_nc_Wavelength), len(crtm_nc_Wavelength)), crtm_nc_Wavelength, 'om')
ax.plot(np.linspace(1, n_Wavelengths, n_Wavelengths), nc_Wavelength, '.k')
ax.set_xlabel('#', fontsize = 14)
ax.set_ylabel('wavelength ($\mathrm{\mu m}$)', fontsize = 14)
# single-scatteirng albedo
ax = fig.add_subplot(3,2,3)
# for ir in range(n_Radii):
# ax.plot(crtm_bn_Wavelength,crtm_bn_w[ir,:], '-', color = clines[ir])
# ax.plot(nc_Wavelength,nc_w[bn_iAero-1,ir,:],'.k')
ax.plot(crtm_bn_Wavelength,crtm_bn_w[bn_iRad-1,:], '-c')
ax.plot(crtm_nc_Wavelength,crtm_nc_w[nc_iRad-1,:], 'om')
ax.plot(nc_Wavelength,nc_w[bn_iAero-1,bn_iRad-1,:],'.k')
ax.set_xlabel('wavelength ($\mathrm{\mu m}$)', fontsize = 14)
ax.set_ylabel('single-scattering albedo', fontsize = 14)
ax.set_xlim(0.16, 50)
ax.set_xscale('log')
# extiction coefficient
ax = fig.add_subplot(3,2,4)
# for ir in range(n_Radii):
# ax.plot(crtm_bn_Wavelength,crtm_bn_ke[ir,:], '-', color = clines[ir])
# ax.plot(nc_Wavelength,nc_ke[bn_iAero-1,ir,:],'.k')
ax.plot(crtm_bn_Wavelength,crtm_bn_ke[bn_iRad-1,:], '-c')
ax.plot(crtm_nc_Wavelength,crtm_nc_ke[nc_iRad-1,:], 'om')
ax.plot(nc_Wavelength,nc_ke[bn_iAero-1,bn_iRad-1,:],'.k')
ax.set_xlabel('wavelength ($\mathrm{\mu m}$)', fontsize = 14)
ax.set_ylabel('extinction coefficient ($\mathrm{m^2 kg^-1}$)', fontsize = 14)
ax.set_xlim(0.16, 50)
ax.set_xscale('log')
# Aysmmetry factor
ax = fig.add_subplot(3,2,5)
# for ir in range(n_Radii):
# ax.plot(crtm_bn_Wavelength,crtm_bn_g[ir,:], '-', color = clines[ir])
# ax.plot(nc_Wavelength,nc_g[bn_iAero-1,ir,:],'.k')
ax.plot(crtm_bn_Wavelength,crtm_bn_g[bn_iRad-1,:], '-c')
ax.plot(crtm_nc_Wavelength,crtm_nc_g[nc_iRad-1,:], 'om')
ax.plot(nc_Wavelength,nc_g[bn_iAero-1,bn_iRad-1,:],'.k')
ax.set_xlabel('wavelength ($\mathrm{\mu m}$)', fontsize = 14)
ax.set_ylabel('asymmetry factor', fontsize = 14)
ax.set_xlim(0.16, 50)
ax.set_xscale('log')
# Phase element
ax = fig.add_subplot(3,2,6)
strwvl = str(round(crtm_bn_Wavelength[bn_iWvl-1],2))
strrad = str(round(crtm_bn_Reff[bn_iRad-1],2))
ax.plot(np.linspace(1, len(crtm_bn_pcoef), len(crtm_bn_pcoef)), crtm_bn_pcoef, '-+c')
ax.plot(np.linspace(1, len(crtm_nc_pcoef), len(crtm_nc_pcoef)), crtm_nc_pcoef, 'om')
nctmp = nc_pcoeff[0, :, bn_iAero-1, bn_iRad-1, bn_iWvl-1]
ax.plot(np.linspace(1, len(nctmp), len(nctmp)), nctmp, '.k')
ax.set_xlabel('Legendre terms' +
' ($\mathrm{\lambda}$ = ' + strwvl + '$\mathrm{\mu m}$' +
', r = ' + strrad + '$\mathrm{\mu m}$)',
fontsize = 14)
st = plt.suptitle(Aerosol_Type_String[bn_iAero-1] + ', radius = ' + strrad + '$\mathrm{\mu m}$' , fontsize=22)
st.set_position([.5, 0.95])
plt.show()
| [
"netCDF4.Dataset",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.linspace",
"matplotlib.ticker.ScalarFormatter",
"numpy.loadtxt",
"matplotlib.pyplot.suptitle",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((1493, 1530), 'numpy.loadtxt', 'np.loadtxt', (["(crtm_bnfile + 'Radii.txt')"], {}), "(crtm_bnfile + 'Radii.txt')\n", (1503, 1530), True, 'import numpy as np\n'), ((1545, 1591), 'numpy.loadtxt', 'np.loadtxt', (["(crtm_bnfile + 'Legendre_terms.txt')"], {}), "(crtm_bnfile + 'Legendre_terms.txt')\n", (1555, 1591), True, 'import numpy as np\n'), ((1611, 1653), 'numpy.loadtxt', 'np.loadtxt', (["(crtm_bnfile + 'Wavelength.txt')"], {}), "(crtm_bnfile + 'Wavelength.txt')\n", (1621, 1653), True, 'import numpy as np\n'), ((2289, 2326), 'numpy.loadtxt', 'np.loadtxt', (["(crtm_bnfile + 'Radii.txt')"], {}), "(crtm_bnfile + 'Radii.txt')\n", (2299, 2326), True, 'import numpy as np\n'), ((2341, 2387), 'numpy.loadtxt', 'np.loadtxt', (["(crtm_bnfile + 'Legendre_terms.txt')"], {}), "(crtm_bnfile + 'Legendre_terms.txt')\n", (2351, 2387), True, 'import numpy as np\n'), ((2407, 2449), 'numpy.loadtxt', 'np.loadtxt', (["(crtm_bnfile + 'Wavelength.txt')"], {}), "(crtm_bnfile + 'Wavelength.txt')\n", (2417, 2449), True, 'import numpy as np\n'), ((2712, 2740), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 10)'}), '(figsize=(12, 10))\n', (2722, 2740), True, 'import matplotlib.pyplot as plt\n'), ((3324, 3368), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(2)', 'prop': "{'size': 12}", 'ncol': '(1)'}), "(loc=2, prop={'size': 12}, ncol=1)\n", (3334, 3368), True, 'import matplotlib.pyplot as plt\n'), ((6064, 6176), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (["(Aerosol_Type_String[bn_iAero - 1] + ', radius = ' + strrad +\n '$\\\\mathrm{\\\\mu m}$')"], {'fontsize': '(22)'}), "(Aerosol_Type_String[bn_iAero - 1] + ', radius = ' + strrad +\n '$\\\\mathrm{\\\\mu m}$', fontsize=22)\n", (6076, 6176), True, 'import matplotlib.pyplot as plt\n'), ((6200, 6210), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6208, 6210), True, 'import matplotlib.pyplot as plt\n'), ((258, 278), 'netCDF4.Dataset', 'Dataset', (['infile', '"""r"""'], {}), "(infile, 'r')\n", (265, 278), False, 'from netCDF4 import Dataset\n'), ((289, 327), 'numpy.array', 'np.array', (['nc_fid.variables[varname][:]'], {}), '(nc_fid.variables[varname][:])\n', (297, 327), True, 'import numpy as np\n'), ((1676, 1723), 'numpy.loadtxt', 'np.loadtxt', (["(crtm_bnfile + 'Asymmery_factor.txt')"], {}), "(crtm_bnfile + 'Asymmery_factor.txt')\n", (1686, 1723), True, 'import numpy as np\n'), ((1756, 1811), 'numpy.loadtxt', 'np.loadtxt', (["(crtm_bnfile + 'Extinction_Coefficients.txt')"], {}), "(crtm_bnfile + 'Extinction_Coefficients.txt')\n", (1766, 1811), True, 'import numpy as np\n'), ((1844, 1892), 'numpy.loadtxt', 'np.loadtxt', (["(crtm_bnfile + 'SingleScatAlbedo.txt')"], {}), "(crtm_bnfile + 'SingleScatAlbedo.txt')\n", (1854, 1892), True, 'import numpy as np\n'), ((2472, 2519), 'numpy.loadtxt', 'np.loadtxt', (["(crtm_bnfile + 'Asymmery_factor.txt')"], {}), "(crtm_bnfile + 'Asymmery_factor.txt')\n", (2482, 2519), True, 'import numpy as np\n'), ((2552, 2607), 'numpy.loadtxt', 'np.loadtxt', (["(crtm_bnfile + 'Extinction_Coefficients.txt')"], {}), "(crtm_bnfile + 'Extinction_Coefficients.txt')\n", (2562, 2607), True, 'import numpy as np\n'), ((2640, 2688), 'numpy.loadtxt', 'np.loadtxt', (["(crtm_bnfile + 'SingleScatAlbedo.txt')"], {}), "(crtm_bnfile + 'SingleScatAlbedo.txt')\n", (2650, 2688), True, 'import numpy as np\n'), ((2763, 2789), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'n_Radii'], {}), '(0, 1, n_Radii)\n', (2774, 2789), True, 'import numpy as np\n'), ((3043, 3075), 'numpy.linspace', 'np.linspace', (['(1)', 'n_Radii', 'n_Radii'], {}), '(1, n_Radii, n_Radii)\n', (3054, 3075), True, 'import numpy as np\n'), ((3166, 3201), 'matplotlib.ticker.ScalarFormatter', 'matplotlib.ticker.ScalarFormatter', ([], {}), '()\n', (3199, 3201), False, 'import matplotlib\n'), ((3619, 3663), 'numpy.linspace', 'np.linspace', (['(1)', 'n_Wavelengths', 'n_Wavelengths'], {}), '(1, n_Wavelengths, n_Wavelengths)\n', (3630, 3663), True, 'import numpy as np\n'), ((1416, 1466), 'numpy.loadtxt', 'np.loadtxt', (["(crtm_bnfile + 'netCDF_information.txt')"], {}), "(crtm_bnfile + 'netCDF_information.txt')\n", (1426, 1466), True, 'import numpy as np\n'), ((2212, 2262), 'numpy.loadtxt', 'np.loadtxt', (["(crtm_ncfile + 'netCDF_information.txt')"], {}), "(crtm_ncfile + 'netCDF_information.txt')\n", (2222, 2262), True, 'import numpy as np\n')] |
"""
INCLUDE ONLY, DO NOT EXECUTE
"""
from settings import *
import numpy as np
from tensorflow.keras.utils import Sequence
import cv2 as cv
src_train_folder = os.path.join(data_folder, 'IM_SAT_TRAIN', 'img')
src_train_folder_gt = os.path.join(data_folder, 'IM_SAT_TRAIN', 'gt')
src_test_folder = os.path.join(data_folder, 'IM_SAT_VAL_SMALL', 'img')
src_train_images = os.listdir(src_train_folder)
src_test_images = os.listdir(src_test_folder)
train_folder = src_train_folder
train_folder_gt = src_train_folder_gt
#train_folder_root = os.path.join(data_folder, 'train_{}x{}'.format(image_size, image_size))
#train_folder = os.path.join(train_folder_root, 'images')
#train_folder_gt = os.path.join(train_folder_root, 'gt')
def create_gaussian(size=image_size, sigma=0.55):
x, y = np.meshgrid(np.linspace(-1, 1, size), np.linspace(-1, 1, size))
d = np.sqrt(x * x + y * y)
gaussian = np.exp(-(d ** 2 / (2.0 * sigma ** 2)))
return gaussian
debug_folder = os.path.join(tmp_folder, 'debug')
class DataAugmentation(Sequence):
def __init__(self, batch_size, validation, validation_set, process_input, border, debug=False):
assert(0 <= validation_set <= 6)
self.batch_size = batch_size
self.validation = validation
self.validation_set = validation_set
self.process_input = process_input
self.border = border
self.debug = debug
if self.debug:
if not os.path.exists(debug_folder):
os.makedirs(debug_folder)
# Build image list
self.images = []
for fname in os.listdir(train_folder):
name = fname.split('_')[0]
i = len(name) - 1
while name[i].isdigit():
i -= 1
i += 1
n = int(name[i:])
if validation_set > 0:
if self.validation:
if (n - 1) // 6 == self.validation_set - 1:
self.images.append(fname)
else:
if (n - 1) // 6 != self.validation_set - 1:
self.images.append(fname)
elif not self.validation:
self.images.append(fname)
# Shuffle data
if self.validation:
self.images = np.random.RandomState(0).permutation(self.images)
print("validation_elements = " + str(len(self.images)))
else:
self.images = np.random.RandomState(0).permutation(self.images)
print("training_elements = " + str(len(self.images)))
# Create border structuring element
if self.border:
self.structuring_element = cv.getStructuringElement(cv.MORPH_RECT, (3, 3))
def __len__(self):
return int(np.ceil(len(self.images) / self.batch_size))
def __getitem__(self, idx):
batch_start = idx * self.batch_size
batch_end = min(len(self.images), (idx + 1) * self.batch_size)
batch_images = self.images[batch_start:batch_end]
batch_x = np.zeros((len(batch_images), image_size, image_size, 3), dtype=np.float32)
if self.border:
batch_y = np.zeros((len(batch_images), image_size, image_size, 2), dtype=np.float32)
else:
batch_y = np.zeros((len(batch_images), image_size, image_size, 1), dtype=np.float32)
for i in range(len(batch_images)):
fname = batch_images[i]
fpath = os.path.join(train_folder, fname)
fpath_gt = os.path.join(train_folder_gt, fname[:-4] + '.png')
image = cv.imread(fpath)
image = cv.resize(image, (384,384), interpolation= cv.INTER_LINEAR)
image_gt = cv.imread(fpath_gt, 0)
image_gt = cv.resize(image_gt, (384,384), interpolation= cv.INTER_LINEAR)
image_gt = np.expand_dims(image_gt, -1)
if not self.validation:
t = self.get_random_transform()
image = self.transform(image, t)
image_gt = self.transform(image_gt, t)
batch_x[i] = self.process_input(image)
if self.border:
border = cv.dilate(image_gt, self.structuring_element) - cv.erode(image_gt, self.structuring_element)
border = np.reshape(border, (image_size, image_size, 1))
batch_y[i] = np.concatenate((image_gt, border), axis=-1) / 255
else:
batch_y[i] = image_gt / 255
if self.debug:
cv.imwrite(os.path.join(debug_folder, fname), image)
cv.imwrite(os.path.join(debug_folder, fname[:-4] + '.png'), image_gt)
if self.border:
cv.imwrite(os.path.join(debug_folder, fname[:-4] + '_b.png'), border)
return batch_x, batch_y
@staticmethod
def get_random_transform():
tc = 6
t = min(tc-1, int(np.floor(tc * np.random.rand())))
return t
@staticmethod
def transform(img, t):
if t == 1:
return np.fliplr(img)
if t == 2:
return np.flipud(img)
if t == 3:
return np.rot90(img, 2)
if t == 4:
return np.rot90(img, -1)
if t == 5:
return np.rot90(img, 1)
return img
@staticmethod
def inverse_transform(img, t):
if t == 1:
return np.fliplr(img)
if t == 2:
return np.flipud(img)
if t == 3:
return np.rot90(img, -2)
if t == 4:
return np.rot90(img, 1)
if t == 5:
return np.rot90(img, -1)
return img
def test_data_augmentation():
img = cv.imread(os.path.join(train_folder, 'austin1_9_0.jpg'))
for i in range(100):
t = DataAugmentation.get_random_transform()
img_aug = DataAugmentation.transform(img, t)
img_aug = np.clip(img_aug, 0, 255).astype(np.uint8)
cv.imwrite(os.path.join(tmp_folder, str(i) + '.jpg'), img_aug)
#test_data_augmentation()
| [
"numpy.clip",
"numpy.sqrt",
"numpy.reshape",
"cv2.dilate",
"numpy.flipud",
"numpy.random.rand",
"numpy.fliplr",
"cv2.erode",
"numpy.exp",
"numpy.linspace",
"numpy.expand_dims",
"numpy.rot90",
"numpy.concatenate",
"cv2.resize",
"cv2.getStructuringElement",
"numpy.random.RandomState",
... | [((865, 887), 'numpy.sqrt', 'np.sqrt', (['(x * x + y * y)'], {}), '(x * x + y * y)\n', (872, 887), True, 'import numpy as np\n'), ((903, 941), 'numpy.exp', 'np.exp', (['(-(d ** 2 / (2.0 * sigma ** 2)))'], {}), '(-(d ** 2 / (2.0 * sigma ** 2)))\n', (909, 941), True, 'import numpy as np\n'), ((805, 829), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'size'], {}), '(-1, 1, size)\n', (816, 829), True, 'import numpy as np\n'), ((831, 855), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'size'], {}), '(-1, 1, size)\n', (842, 855), True, 'import numpy as np\n'), ((2663, 2710), 'cv2.getStructuringElement', 'cv.getStructuringElement', (['cv.MORPH_RECT', '(3, 3)'], {}), '(cv.MORPH_RECT, (3, 3))\n', (2687, 2710), True, 'import cv2 as cv\n'), ((3559, 3575), 'cv2.imread', 'cv.imread', (['fpath'], {}), '(fpath)\n', (3568, 3575), True, 'import cv2 as cv\n'), ((3596, 3655), 'cv2.resize', 'cv.resize', (['image', '(384, 384)'], {'interpolation': 'cv.INTER_LINEAR'}), '(image, (384, 384), interpolation=cv.INTER_LINEAR)\n', (3605, 3655), True, 'import cv2 as cv\n'), ((3684, 3706), 'cv2.imread', 'cv.imread', (['fpath_gt', '(0)'], {}), '(fpath_gt, 0)\n', (3693, 3706), True, 'import cv2 as cv\n'), ((3730, 3792), 'cv2.resize', 'cv.resize', (['image_gt', '(384, 384)'], {'interpolation': 'cv.INTER_LINEAR'}), '(image_gt, (384, 384), interpolation=cv.INTER_LINEAR)\n', (3739, 3792), True, 'import cv2 as cv\n'), ((3816, 3844), 'numpy.expand_dims', 'np.expand_dims', (['image_gt', '(-1)'], {}), '(image_gt, -1)\n', (3830, 3844), True, 'import numpy as np\n'), ((5012, 5026), 'numpy.fliplr', 'np.fliplr', (['img'], {}), '(img)\n', (5021, 5026), True, 'import numpy as np\n'), ((5065, 5079), 'numpy.flipud', 'np.flipud', (['img'], {}), '(img)\n', (5074, 5079), True, 'import numpy as np\n'), ((5118, 5134), 'numpy.rot90', 'np.rot90', (['img', '(2)'], {}), '(img, 2)\n', (5126, 5134), True, 'import numpy as np\n'), ((5173, 5190), 'numpy.rot90', 'np.rot90', (['img', '(-1)'], {}), '(img, -1)\n', (5181, 5190), True, 'import numpy as np\n'), ((5229, 5245), 'numpy.rot90', 'np.rot90', (['img', '(1)'], {}), '(img, 1)\n', (5237, 5245), True, 'import numpy as np\n'), ((5357, 5371), 'numpy.fliplr', 'np.fliplr', (['img'], {}), '(img)\n', (5366, 5371), True, 'import numpy as np\n'), ((5410, 5424), 'numpy.flipud', 'np.flipud', (['img'], {}), '(img)\n', (5419, 5424), True, 'import numpy as np\n'), ((5463, 5480), 'numpy.rot90', 'np.rot90', (['img', '(-2)'], {}), '(img, -2)\n', (5471, 5480), True, 'import numpy as np\n'), ((5519, 5535), 'numpy.rot90', 'np.rot90', (['img', '(1)'], {}), '(img, 1)\n', (5527, 5535), True, 'import numpy as np\n'), ((5574, 5591), 'numpy.rot90', 'np.rot90', (['img', '(-1)'], {}), '(img, -1)\n', (5582, 5591), True, 'import numpy as np\n'), ((4258, 4305), 'numpy.reshape', 'np.reshape', (['border', '(image_size, image_size, 1)'], {}), '(border, (image_size, image_size, 1))\n', (4268, 4305), True, 'import numpy as np\n'), ((5858, 5882), 'numpy.clip', 'np.clip', (['img_aug', '(0)', '(255)'], {}), '(img_aug, 0, 255)\n', (5865, 5882), True, 'import numpy as np\n'), ((2281, 2305), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (2302, 2305), True, 'import numpy as np\n'), ((2439, 2463), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (2460, 2463), True, 'import numpy as np\n'), ((4140, 4185), 'cv2.dilate', 'cv.dilate', (['image_gt', 'self.structuring_element'], {}), '(image_gt, self.structuring_element)\n', (4149, 4185), True, 'import cv2 as cv\n'), ((4188, 4232), 'cv2.erode', 'cv.erode', (['image_gt', 'self.structuring_element'], {}), '(image_gt, self.structuring_element)\n', (4196, 4232), True, 'import cv2 as cv\n'), ((4335, 4378), 'numpy.concatenate', 'np.concatenate', (['(image_gt, border)'], {'axis': '(-1)'}), '((image_gt, border), axis=-1)\n', (4349, 4378), True, 'import numpy as np\n'), ((4891, 4907), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4905, 4907), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
# This file (neurone.py) is part of neurone_loader -
# (https://www.github.com/heilerich/neurone_loader) -
# Copyright © 2019 <NAME>. -
# -
# This code is released under the MIT License -
# https://opensource.org/licenses/mit-license.php -
# Please see the file LICENSE for details. -
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# This file contains code originally from export2hdf5 -
# (https://github.com/bwrc/export2hdf5) -
# Created by <NAME> <<EMAIL>>, -
# Finnish Institute of Occupational Health -
# ------------------------------------------------------------------------------
"""
Contains functions for reading data recorded with a
Bittium NeurOne device. This module currently supports
reading of data and events.
"""
import numpy as np
import xml.etree.ElementTree
from os import path
from construct import Struct, Int32sl, Int64ul
from datetime import datetime
from collections import namedtuple
def read_neurone_protocol(fpath):
"""
Read the measurement protocol from an XML file.
Arguments:
- fpath: the path to the directory holding the
NeurOne measurement (i.e., the
directory Protocol.xml and Session.xml
files.
Returns:
- a dictionary containing (i) the names of the channels
in the recording and (ii) meta information
(recording start/stop times, sampling rate).
{"meta" : <dict with metadata>,
"channels" : <array with channel names>}
"""
# Define filename
fname_protocol = path.join(fpath, "Protocol.xml")
fname_session = path.join(fpath, "Session.xml")
# --------------------------------------------------
# Read the protocol data
# --------------------------------------------------
# Define the XML namespace as a shorthand
ns = {'xmlns': 'http://www.megaemg.com/DataSetGeneralProtocol.xsd'}
# Get channel names and organise them according to their
# physical order (InputNumber), which is the order
# in which the channels are being sampled.
doc_root = xml.etree.ElementTree.parse(fname_protocol).getroot()
channels = doc_root.findall("xmlns:TableInput", namespaces=ns)
channel_names = [(0, 0)] * len(channels)
for i, ch in enumerate(channels):
channel_names[i] = (int(ch.findall("xmlns:PhysicalInputNumber", namespaces=ns)[0].text),
ch.findall("xmlns:Name", namespaces=ns)[0].text)
channel_names = [i for _, i in sorted(channel_names)]
# Get the sampling rate
sampling_rate = int(doc_root.findall("xmlns:TableProtocol", namespaces=ns)[0]
.findall("xmlns:ActualSamplingFrequency", namespaces=ns)[0].text)
# --------------------------------------------------
# Read the session data
# --------------------------------------------------
# Define the XML namespace as a shorthand
ns2 = {'xmlns': 'http://www.megaemg.com/DataSetGeneralSession.xsd'}
# Get channel names and organise them according to their
# physical order (InputNumber), which is the order
# in which the channels are being sampled.
doc_root = xml.etree.ElementTree.parse(fname_session).getroot()
session = doc_root.findall("xmlns:TableSession", namespaces=ns2)
time_start = session[0].findall("xmlns:StartDateTime", namespaces=ns2)[0].text
time_stop = session[0].findall("xmlns:StopDateTime", namespaces=ns2)[0].text
phases = [{'number': phase.findall("xmlns:Folder", namespaces=ns2)[0].text.split("\\")[-1],
'time_start': phase.findall("xmlns:StartDateTime", namespaces=ns2)[0].text,
'time_stop': phase.findall("xmlns:StopDateTime", namespaces=ns2)[0].text}
for phase in doc_root.findall("xmlns:TableSessionPhase", namespaces=ns2)]
subject_info = doc_root.find('xmlns:TablePerson', namespaces=ns2)
subject_id = subject_info.find('xmlns:PersonID', namespaces=ns2).text
subject_first_name = subject_info.find('xmlns:FirstName', namespaces=ns2).text
subject_last_name = subject_info.find('xmlns:LastName', namespaces=ns2).text
subject_dob = subject_info.find('xmlns:DateOfBirth', namespaces=ns2).text
# --------------------------------------------------
# Package the information
# --------------------------------------------------
meta = {}
meta["time_start"] = _convert_time(time_start)
meta["time_stop"] = _convert_time(time_stop)
meta["sampling_rate"] = sampling_rate
meta["subject"] = dict(
id=subject_id,
first_name=subject_first_name,
last_name=subject_last_name,
date_of_birth=_convert_time(subject_dob)
)
for phase in phases:
phase['time_start'] = _convert_time(phase['time_start'])
phase['time_stop'] = _convert_time(phase['time_stop'])
return {'channels': channel_names, 'meta': meta, 'phases': phases}
def _convert_time(inp_str):
"""Converts ISO timestrings from protocols to datetime objects"""
p_index = inp_str.find('+')
if p_index == -1 and inp_str.count('-') == 3:
p_index = inp_str.rfind('-')
if p_index >= 0:
time_str = inp_str[0:p_index]
utc_offset_str = inp_str[p_index:]
else:
time_str = inp_str
utc_offset_str = ''
if len(time_str) > 26:
time_str = time_str[0:26]
elif len(time_str) == 19:
time_str += '.'
time_str = time_str.ljust(26, '0')
return datetime.fromisoformat(f"{time_str}{utc_offset_str}")
def read_neurone_data(fpath, session_phase=1, protocol=None):
"""
Read the NeurOne signal data from a binary file.
Arguments:
- fpath: the path to the directory holding the
NeurOne measurement (i.e., the
directory Protocol.xml and Session.xml
files.
- session_phase:
The phase of the measurement. Currently
only reading of the first phase (1) is
supported.
- protocol:
The dictionary obtained using the function
read_neurone_protocol. This argument is optional
and if not given, the protocol is automatically read.
Returns:
- A numpy ndarray with the data, where each columns stores
the data for one channel.
"""
fname = path.join(fpath, str(session_phase), '1.bin')
# Read the protocol unless provided
if protocol is None:
protocol = read_neurone_protocol(fpath)
# Determine number of samples to read
n_samples, n_channels = read_neurone_data_info(fpath, session_phase, protocol)
# Read the data and store the data
# in an ndarray
data = np.fromfile(fname, dtype='<i4')
data.shape = (n_samples, n_channels)
return data
def read_neurone_data_info(fpath, session_phase=1, protocol=None):
"""
Read the sample and channel count from a NeurOne signal binary file.
Arguments:
- fpath: the path to the directory holding the
NeurOne measurement (i.e., the
directory Protocol.xml and Session.xml
files.
- session_phase:
The phase of the measurement. Currently
only reading of the first phase (1) is
supported.
- protocol:
The dictionary obtained using the function
read_neurone_protocol. This argument is optional
and if not given, the protocol is automatically read.
Returns:
Returns:
- a named tuple containing (i) the number of channels
and (ii) the number of samples in the recording.
( n_samples, n_channels )
"""
fname = path.join(fpath, str(session_phase), '1.bin')
# Read the protocol unless provided
if protocol is None:
protocol = read_neurone_protocol(fpath)
# Determine number of samples and channels
f_info = path.getsize(fname)
n_channels = len(protocol['channels'])
n_samples = int(f_info / 4 / n_channels)
DataInfo = namedtuple('DataInfo', ['n_samples', 'n_channels'])
return DataInfo(n_samples, n_channels)
def get_n1_event_format():
"""
Define the format for the events in a neurone recording.
Arguments: None.
Returns:
- A Struct (from the construct library) describing the
event format.
"""
# Define the data format of the events
# noinspection PyUnresolvedReferences
return Struct(
"Revision" / Int32sl,
"RFU1" / Int32sl,
"Type" / Int32sl,
"SourcePort" / Int32sl,
"ChannelNumber" / Int32sl,
"Code" / Int32sl,
"StartSampleIndex" / Int64ul,
"StopSampleIndex" / Int64ul,
"DescriptionLength" / Int64ul,
"DescriptionOffset" / Int64ul,
"DataLength" / Int64ul,
"DataOffset" / Int64ul,
"RFU2" / Int32sl,
"RFU3" / Int32sl,
"RFU4" / Int32sl,
"RFU5" / Int32sl
)
def read_neurone_events(fpath, session_phase=1, sampling_rate=None):
"""
Read the NeurOne events from a binary file.
Arguments:
- fpath: the path to the directory holding the
NeurOne measurement (i.e., the
directory Protocol.xml and Session.xml
files.
- sampling_rate:
The sampling rate of the recording.
This argument is optional and if not given,
the protocol is automatically read.
- session_phase:
The phase of the measurement. Currently
only reading of the first phase (1) is
supported.
Returns:
- A dict containing the events and the data type for the events.
{"events" : <numpy structured array with the events>,
"events_dtype" : <array with the numpy dtype for the events>}
"""
fname = path.join(fpath, str(session_phase), "events.bin")
# Get the sampling rate unless provided
if sampling_rate is None:
protocol = read_neurone_protocol(fpath)
sampling_rate = protocol['meta']['sampling_rate']
# Determine number of events
f_info = path.getsize(fname)
n_events = int(f_info / 88)
events = [{}] * n_events
# Read events in chunks of 88 bytes and unpack
# also add start / stop time for each event
# and remove 'reserved for future use' (RFU) fields
event_format = get_n1_event_format()
with open(fname, mode='rb') as file:
for i in range(n_events):
events[i] = event_format.parse(file.read(88))
events[i]['StartTime'] = events[i]['StartSampleIndex'] / sampling_rate
events[i]['StopTime'] = events[i]['StopSampleIndex'] / sampling_rate
for j in range(5):
del events[i]['RFU' + str(j+1)]
del events[i]['_io']
# Create a numpy structured array from the events
events_dtype = np.dtype([("Revision", np.int32),
("Type", np.int32),
("SourcePort", np.int32),
("ChannelNumber", np.int32),
("Code", np.int32),
("StartSampleIndex", np.int64),
("StopSampleIndex", np.int64),
("DescriptionLength", np.int64),
("DescriptionOffset", np.int64),
("DataLength", np.int64),
("DataOffset", np.int64),
("StartTime", np.int64),
("StopTime", np.int64)])
# convert array of event dicts to an array of tuples
if len(events) == 0:
return {'events': np.array([], dtype=events_dtype), 'dtype': events_dtype}
key_list = [k for k, v in events[0].items()]
tmp = [tuple([e[k] for k in key_list]) for e in events]
events = np.array(tmp, dtype=events_dtype)
return {'events': events, 'dtype': events_dtype}
| [
"os.path.getsize",
"numpy.fromfile",
"collections.namedtuple",
"os.path.join",
"numpy.array",
"datetime.datetime.fromisoformat",
"numpy.dtype",
"construct.Struct"
] | [((2144, 2176), 'os.path.join', 'path.join', (['fpath', '"""Protocol.xml"""'], {}), "(fpath, 'Protocol.xml')\n", (2153, 2176), False, 'from os import path\n'), ((2197, 2228), 'os.path.join', 'path.join', (['fpath', '"""Session.xml"""'], {}), "(fpath, 'Session.xml')\n", (2206, 2228), False, 'from os import path\n'), ((6050, 6103), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['f"""{time_str}{utc_offset_str}"""'], {}), "(f'{time_str}{utc_offset_str}')\n", (6072, 6103), False, 'from datetime import datetime\n'), ((7329, 7360), 'numpy.fromfile', 'np.fromfile', (['fname'], {'dtype': '"""<i4"""'}), "(fname, dtype='<i4')\n", (7340, 7360), True, 'import numpy as np\n'), ((8603, 8622), 'os.path.getsize', 'path.getsize', (['fname'], {}), '(fname)\n', (8615, 8622), False, 'from os import path\n'), ((8731, 8782), 'collections.namedtuple', 'namedtuple', (['"""DataInfo"""', "['n_samples', 'n_channels']"], {}), "('DataInfo', ['n_samples', 'n_channels'])\n", (8741, 8782), False, 'from collections import namedtuple\n'), ((9154, 9553), 'construct.Struct', 'Struct', (["('Revision' / Int32sl)", "('RFU1' / Int32sl)", "('Type' / Int32sl)", "('SourcePort' / Int32sl)", "('ChannelNumber' / Int32sl)", "('Code' / Int32sl)", "('StartSampleIndex' / Int64ul)", "('StopSampleIndex' / Int64ul)", "('DescriptionLength' / Int64ul)", "('DescriptionOffset' / Int64ul)", "('DataLength' / Int64ul)", "('DataOffset' / Int64ul)", "('RFU2' / Int32sl)", "('RFU3' / Int32sl)", "('RFU4' / Int32sl)", "('RFU5' / Int32sl)"], {}), "('Revision' / Int32sl, 'RFU1' / Int32sl, 'Type' / Int32sl, \n 'SourcePort' / Int32sl, 'ChannelNumber' / Int32sl, 'Code' / Int32sl, \n 'StartSampleIndex' / Int64ul, 'StopSampleIndex' / Int64ul, \n 'DescriptionLength' / Int64ul, 'DescriptionOffset' / Int64ul, \n 'DataLength' / Int64ul, 'DataOffset' / Int64ul, 'RFU2' / Int32sl, \n 'RFU3' / Int32sl, 'RFU4' / Int32sl, 'RFU5' / Int32sl)\n", (9160, 9553), False, 'from construct import Struct, Int32sl, Int64ul\n'), ((10860, 10879), 'os.path.getsize', 'path.getsize', (['fname'], {}), '(fname)\n', (10872, 10879), False, 'from os import path\n'), ((11621, 12005), 'numpy.dtype', 'np.dtype', (["[('Revision', np.int32), ('Type', np.int32), ('SourcePort', np.int32), (\n 'ChannelNumber', np.int32), ('Code', np.int32), ('StartSampleIndex', np\n .int64), ('StopSampleIndex', np.int64), ('DescriptionLength', np.int64),\n ('DescriptionOffset', np.int64), ('DataLength', np.int64), (\n 'DataOffset', np.int64), ('StartTime', np.int64), ('StopTime', np.int64)]"], {}), "([('Revision', np.int32), ('Type', np.int32), ('SourcePort', np.\n int32), ('ChannelNumber', np.int32), ('Code', np.int32), (\n 'StartSampleIndex', np.int64), ('StopSampleIndex', np.int64), (\n 'DescriptionLength', np.int64), ('DescriptionOffset', np.int64), (\n 'DataLength', np.int64), ('DataOffset', np.int64), ('StartTime', np.\n int64), ('StopTime', np.int64)])\n", (11629, 12005), True, 'import numpy as np\n'), ((12617, 12650), 'numpy.array', 'np.array', (['tmp'], {'dtype': 'events_dtype'}), '(tmp, dtype=events_dtype)\n', (12625, 12650), True, 'import numpy as np\n'), ((12438, 12470), 'numpy.array', 'np.array', (['[]'], {'dtype': 'events_dtype'}), '([], dtype=events_dtype)\n', (12446, 12470), True, 'import numpy as np\n')] |
"""
``nn()`` is used to train an instance of ``globalemu`` on the preprocessed
data in ``base_dir``. All of the parameters for ``nn()`` are kwargs and
a number of them can be left at their default values however you will
need to set the ``base_dir`` and possibly ``epochs`` and ``xHI`` (see below and
the tutorial for details).
"""
import tensorflow as tf
from tensorflow import keras
import numpy as np
import time
import os
from globalemu.models import network_models
from globalemu.losses import loss_functions
class nn():
r"""
**kwargs:**
batch_size: **int / default: 100**
| The batch size used by ``tensorflow`` when performing training.
Corresponds to the number of samples propagated before the
networks hyperparameters are updated. Keep the value ~100 as
this will help with memory management and training speed.
epochs: **int / default: 10**
| The number of epochs to train the network on. An epoch
corresponds to training on x batches where x is sufficiently
large for every sample to have influenced an update of the
network hyperparameters.
activation: **string / default: 'tanh'**
| The type of activation function used in the neural networks
hidden layers. The activation function effects the way that the
network learns and updates its hyperparameters. The defualt
is a commonly used activation for regression neural networks.
lr: **float / default: 0.001**
| The learning rate acts as a "step size" in the optimization and
its value can effect the quality of the emulation. Typical
values fall in the range 0.001-0.1.
dropout: **float / default: 0**
| The dropout for the neural network training. ``globalemu`` is
designed so that you shouldn't need dropout to prevent
overfitting but we leave it as an option.
input_shape: **int / default: 8**
| The number of input parameters (astrophysical parameters
plus redshift) for the neural network. The default accounts
for 7 astrophysical
parameters and a single redshift input.
output_shape: **int / default: 1**
| The number of ouputs (temperature) from the neural network.
This shouldn't need changing.
layer_sizes: **list / default: [input_shape, input_shape]**
| The number of hidden layers and the number of nodes in each
layer. For example ``layer_sizes=[8, 8]`` will create
two hidden layers both with 8 nodes (this is the default).
base_dir: **string / default: 'model_dir/'**
| This should be the same as the ``base_dir`` used when
preprocessing. It contains the data that the network will
work with and is the directory in which the trained model will
be saved in.
early_stop: **Bool / default: False**
| If ``early_stop`` is set too ``True`` then the network will stop
learning if the loss has not changed up to an accuracy given
by ``early_stop_lim`` within the last ten epochs.
early_stop_lim: **float / default: 1e-4**
| The precision with which to assess the change in loss over the
last ten epochs when ``early_stop=True``. The value of this
parameter is strongly dependent on the magnitude of the
evaluated loss at each epoch and the default may be to high or
too low for the desired outcome. For example if our loss value
is initially 0.01 and decreases with each epoch then a
``epoch_stop_lim`` of 0.1 will cause training to stop after
10 epochs and give poor results.
xHI: **Bool / default: False**
| If True then ``globalemu`` will act as if it is training a
neutral fraction history emulator.
output_activation: **string / default: 'linear'**
| Determines the output activation function for the network.
Modifying this
is useful if the emulator output is required to be positive or
negative etc. If xHI is True then the output activation is
set to 'relu' else the function is 'linear'. See the tensorflow
documentation for more details on the types of activation
functions available.
loss_function: **Callable/ default: None**
| By default the code uses an MSE loss however users are able to
pass their own loss functions when training the neural
network. These should be functions that take in the true labels
(temperatures) and the predicted labels and return some measure
of loss. Care needs to be taken to ensure that the correct loss
function is supplied when resuming the training of
a previous run as ``globalemu`` will not check this. In order
for the loss function to work it must be built
using the tensorflow.keras backend. An example would be
.. code:: python
from tensorflow.keras import backend as K
def custom_loss(true_labels, predicted_labels,
netowrk_inputs):
return K.mean(K.abs(true_labels - predicted_labels))
The function must take in as arguments the `true_labels`,
the `predicted_labels` and the `network_inputs`.
resume: **Bool / default: False**
| If set to ``True`` then ``globalemu`` will look in the
``base_dir`` for a trained model and ``loss_history.txt``
file (which contains the loss recorded at each epoch) and
load these in to continue training. If ``resume`` is ``True``
then you need to make sure all of the kwargs are set the
with the same values that they had in the initial training
for a consistent run.
There will be a human readable file in ``base_dir`` called
"kwargs.txt" detailing
the values of the kwargs that were provided for the
initial training run. Anything missing from this file will
of had its default value. This file will not be overwritten
if ``resume=True``.
random_seed: **int or float / default: None**
| This kwarg sets the random seed used by tensorflow with the
function ``tf.random.set_seed(random_seed)``. It should
be used if you want to have reproducible results but note
that it may cause an 'out of memory' error if training on
large amounts of data
(see https://github.com/tensorflow/tensorflow/issues/37252).
"""
def __init__(self, **kwargs):
for key, values in kwargs.items():
if key not in set(
['batch_size', 'activation', 'epochs',
'lr', 'dropout', 'input_shape',
'output_shape', 'layer_sizes', 'base_dir',
'early_stop', 'early_stop_lim', 'xHI', 'resume',
'random_seed', 'output_activation',
'loss_function']):
raise KeyError("Unexpected keyword argument in nn()")
self.resume = kwargs.pop('resume', False)
self.base_dir = kwargs.pop('base_dir', 'model_dir/')
if type(self.base_dir) is not str:
raise TypeError("'base_dir' must be a sting.")
elif self.base_dir.endswith('/') is False:
raise KeyError("'base_dir' must end with '/'.")
if self.resume is not True:
with open(self.base_dir + 'kwargs.txt', 'w') as f:
for key, values in kwargs.items():
f.write(str(key) + ': ' + str(values) + '\n')
f.close()
self.batch_size = kwargs.pop('batch_size', 100)
self.activation = kwargs.pop('activation', 'tanh')
if type(self.activation) is not str:
raise TypeError("'activation' must be a string.")
self.epochs = kwargs.pop('epochs', 10)
self.lr = kwargs.pop('lr', 1e-3)
self.drop_val = kwargs.pop('dropout', 0)
self.input_shape = kwargs.pop('input_shape', 8)
self.output_shape = kwargs.pop('output_shape', 1)
self.layer_sizes = kwargs.pop(
'layer_sizes', [self.input_shape, self.input_shape])
if type(self.layer_sizes) is not list:
raise TypeError("'layer_sizes' must be a list.")
self.early_stop_lim = kwargs.pop('early_stop_lim', 1e-4)
self.early_stop = kwargs.pop('early_stop', False)
self.xHI = kwargs.pop('xHI', False)
self.random_seed = kwargs.pop('random_seed', None)
boolean_kwargs = [self.resume, self.early_stop, self.xHI]
boolean_strings = ['resume', 'early_stop', 'xHI']
for i in range(len(boolean_kwargs)):
if type(boolean_kwargs[i]) is not bool:
raise TypeError("'" + boolean_strings[i] + "' must be a bool.")
int_kwargs = [self.batch_size, self.epochs, self.input_shape,
self.output_shape]
int_strings = ['batch_size', 'epochs', 'input_shape',
'output_shape']
for i in range(len(int_kwargs)):
if type(int_kwargs[i]) is not int:
raise TypeError("'" + int_strings[i] + "' must be a int.")
float_kwargs = [self.lr, self.early_stop_lim, self.drop_val,
self.random_seed]
float_strings = ['lr', 'early_stop_lim', 'dropout', 'random_seed']
for i in range(len(float_kwargs)):
if float_kwargs[i] is not None:
if type(float_kwargs[i]) not in set([float, int]):
raise TypeError("'" + float_strings[i] +
"' must be a float.")
loss_function = kwargs.pop('loss_function', None)
if loss_function is not None:
if not callable(loss_function):
raise TypeError('loss_function should be a callable.')
if self.random_seed is not None:
tf.random.set_seed(self.random_seed)
if not os.path.exists(self.base_dir):
os.mkdir(self.base_dir)
pwd = os.getcwd()
train_dataset_fp = pwd + '/' + self.base_dir + 'train_dataset.csv'
column_names = [
'p' + str(i)
for i in range(self.input_shape + self.output_shape)]
label_names = column_names[-1]
train_dataset = tf.data.experimental.make_csv_dataset(
train_dataset_fp,
self.batch_size,
column_names=column_names,
label_name=label_names,
num_epochs=1)
def pack_features_vector(features, labels):
return tf.stack(list(features.values()), axis=1), labels
train_dataset = train_dataset.map(pack_features_vector)
self.output_activation = kwargs.pop('output_activation', 'linear')
if self.xHI is True:
self.output_activation = 'relu'
if self.resume is True:
model = keras.models.load_model(
self.base_dir + 'model.h5',
compile=False)
else:
model = network_models().basic_model(
self.input_shape, self.output_shape,
self.layer_sizes, self.activation, self.drop_val,
self.output_activation)
def loss(model, x, y, training):
y_ = tf.transpose(model(x, training=training))[0]
lf = loss_functions(y, y_)
if loss_function is None:
return lf.mse(), lf.rmse()
else:
return loss_function(y, y_, x), lf.rmse()
def grad(model, inputs, targets):
with tf.GradientTape() as tape:
loss_value, rmse = loss(model, inputs, targets, training=True)
return loss_value, rmse, tape.gradient(
loss_value, model.trainable_variables)
optimizer = keras.optimizers.Adam(learning_rate=self.lr)
if self.resume is True:
train_loss_results = list(
np.loadtxt(self.base_dir + 'loss_history.txt'))
else:
train_loss_results = []
train_rmse_results = []
num_epochs = self.epochs
for epoch in range(num_epochs):
s = time.time()
epoch_loss_avg = tf.keras.metrics.Mean()
epoch_rmse_avg = tf.keras.metrics.Mean()
for x, y in train_dataset:
loss_values, rmse, grads = grad(model, x, y)
optimizer.apply_gradients(
zip(grads, model.trainable_variables))
epoch_loss_avg.update_state(loss_values)
epoch_rmse_avg.update_state(rmse)
train_loss_results.append(epoch_loss_avg.result())
train_rmse_results.append(epoch_rmse_avg.result())
e = time.time()
print(
'Epoch: {:03d}, Loss: {:.5f}, RMSE: {:.5f}, Time: {:.3f}'
.format(
epoch, epoch_loss_avg.result(),
epoch_rmse_avg.result(), e-s))
if self.early_stop is True:
if len(train_loss_results) > 10:
if np.isclose(
train_loss_results[-10], train_loss_results[-1],
self.early_stop_lim, self.early_stop_lim):
print('Early Stop')
model.save(self.base_dir + 'model.h5')
break
if (epoch + 1) % 10 == 0:
model.save(self.base_dir + 'model.h5')
np.savetxt(
self.base_dir + 'loss_history.txt', train_loss_results)
model.save(self.base_dir + 'model.h5')
np.savetxt(self.base_dir + 'loss_history.txt', train_loss_results)
| [
"os.path.exists",
"globalemu.losses.loss_functions",
"numpy.isclose",
"tensorflow.random.set_seed",
"tensorflow.keras.metrics.Mean",
"globalemu.models.network_models",
"tensorflow.data.experimental.make_csv_dataset",
"os.getcwd",
"tensorflow.keras.optimizers.Adam",
"tensorflow.GradientTape",
"te... | [((10734, 10745), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (10743, 10745), False, 'import os\n'), ((11002, 11143), 'tensorflow.data.experimental.make_csv_dataset', 'tf.data.experimental.make_csv_dataset', (['train_dataset_fp', 'self.batch_size'], {'column_names': 'column_names', 'label_name': 'label_names', 'num_epochs': '(1)'}), '(train_dataset_fp, self.batch_size,\n column_names=column_names, label_name=label_names, num_epochs=1)\n', (11039, 11143), True, 'import tensorflow as tf\n'), ((12507, 12551), 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'learning_rate': 'self.lr'}), '(learning_rate=self.lr)\n', (12528, 12551), False, 'from tensorflow import keras\n'), ((14327, 14393), 'numpy.savetxt', 'np.savetxt', (["(self.base_dir + 'loss_history.txt')", 'train_loss_results'], {}), "(self.base_dir + 'loss_history.txt', train_loss_results)\n", (14337, 14393), True, 'import numpy as np\n'), ((10599, 10635), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['self.random_seed'], {}), '(self.random_seed)\n', (10617, 10635), True, 'import tensorflow as tf\n'), ((10652, 10681), 'os.path.exists', 'os.path.exists', (['self.base_dir'], {}), '(self.base_dir)\n', (10666, 10681), False, 'import os\n'), ((10695, 10718), 'os.mkdir', 'os.mkdir', (['self.base_dir'], {}), '(self.base_dir)\n', (10703, 10718), False, 'import os\n'), ((11590, 11656), 'tensorflow.keras.models.load_model', 'keras.models.load_model', (["(self.base_dir + 'model.h5')"], {'compile': '(False)'}), "(self.base_dir + 'model.h5', compile=False)\n", (11613, 11656), False, 'from tensorflow import keras\n'), ((12034, 12055), 'globalemu.losses.loss_functions', 'loss_functions', (['y', 'y_'], {}), '(y, y_)\n', (12048, 12055), False, 'from globalemu.losses import loss_functions\n'), ((12859, 12870), 'time.time', 'time.time', ([], {}), '()\n', (12868, 12870), False, 'import time\n'), ((12900, 12923), 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {}), '()\n', (12921, 12923), True, 'import tensorflow as tf\n'), ((12953, 12976), 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {}), '()\n', (12974, 12976), True, 'import tensorflow as tf\n'), ((13430, 13441), 'time.time', 'time.time', ([], {}), '()\n', (13439, 13441), False, 'import time\n'), ((12273, 12290), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (12288, 12290), True, 'import tensorflow as tf\n'), ((12640, 12686), 'numpy.loadtxt', 'np.loadtxt', (["(self.base_dir + 'loss_history.txt')"], {}), "(self.base_dir + 'loss_history.txt')\n", (12650, 12686), True, 'import numpy as np\n'), ((14183, 14249), 'numpy.savetxt', 'np.savetxt', (["(self.base_dir + 'loss_history.txt')", 'train_loss_results'], {}), "(self.base_dir + 'loss_history.txt', train_loss_results)\n", (14193, 14249), True, 'import numpy as np\n'), ((11724, 11740), 'globalemu.models.network_models', 'network_models', ([], {}), '()\n', (11738, 11740), False, 'from globalemu.models import network_models\n'), ((13777, 13883), 'numpy.isclose', 'np.isclose', (['train_loss_results[-10]', 'train_loss_results[-1]', 'self.early_stop_lim', 'self.early_stop_lim'], {}), '(train_loss_results[-10], train_loss_results[-1], self.\n early_stop_lim, self.early_stop_lim)\n', (13787, 13883), True, 'import numpy as np\n')] |
#!/usr/bin/python3 -W ignore
import gzip
import math
import pickle
from functools import partial
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pyproj
from matplotlib.collections import PatchCollection
from matplotlib.colors import LinearSegmentedColormap, Normalize
from matplotlib.patches import PathPatch
from matplotlib.path import Path
from mpl_toolkits.axes_grid1 import make_axes_locatable
from shapely.geometry import Point
from shapely.ops import transform
def create_colormap(name, colors, alphas=None, xs=None):
def get_rgb1(c, alpha=1):
return tuple(list(matplotlib.colors.hex2color(c)) + [alpha])
if str(matplotlib.__version__).startswith("2"):
get_rgb = matplotlib.colors.to_rgba
else:
get_rgb = get_rgb1
if alphas is None:
colors = [get_rgb(c) for c in colors]
else:
colors = [get_rgb(c, alpha=a) for c, a in zip(colors, alphas)]
if xs is None:
xs = np.linspace(0, 1, len(colors))
res = LinearSegmentedColormap(
name,
{
channel: tuple(
(x, float(c[channel_id]), float(c[channel_id]))
for c, x in zip(colors, xs)
)
for channel_id, channel in enumerate(["red", "green", "blue", "alpha"])
},
N=2048,
)
res.set_under(colors[0])
res.set_over(colors[-1])
return res
def make_map(
patchespickle_file,
regions,
data,
show_cbar=True,
cm=None,
outfile=None,
ax=None,
cax=None,
extend_c="both",
ignore_regions=None,
invalid_edgecolor="lightgrey",
invalid_facecolor="lightgrey",
linewidth=0.1,
norm_color=None,
numbering=None,
numbering_fontsize=10,
rasterize=True,
title=None,
title_fontsize=10,
valid_edgecolor="black",
y_label=None,
y_label_fontsize=10,
y_ticks=None,
y_tick_labels=None,
y_ticks_fontsize=8,
lims=None,
only_usa=False,
v_limits=None
):
if ignore_regions is None:
ignore_regions = ["ATA"]
if cm is None:
cm = create_colormap("custom", ["red", "white", "blue"], xs=[0, 0.5, 1])
patchespickle = pickle.load(gzip.GzipFile(patchespickle_file, "rb"))
patches = patchespickle["patches"]
projection_name = patchespickle["projection"]
if y_ticks is None:
vmin = np.min(data)
vmax = np.max(data)
else:
vmin = y_ticks[0]
vmax = y_ticks[-1]
if v_limits is not None:
(vmin, vmax) = v_limits
if norm_color is None:
norm_color = Normalize(vmin=vmin, vmax=vmax)
def EmptyPatch():
return PathPatch(Path([(0, 0)], [Path.MOVETO]))
def my_transform(scale, t, trans, x, y):
p = trans(x, y)
return (p[0] * scale + t[0], p[1] * scale + t[1])
def get_projection(to, scale=1, translate=(0, 0)):
return partial(
my_transform,
scale,
translate,
partial(
pyproj.transform,
pyproj.Proj("+proj=lonlat +datum=WGS84 +no_defs"),
pyproj.Proj(f"+proj={to} +datum=WGS84 +no_defs"),
),
)
projection = get_projection(projection_name)
if lims is None:
miny, maxy, minx, maxx = -58, 89, -156, 170
else:
miny, maxy, minx, maxx = lims
minx = transform(projection, Point(minx, 0)).x
maxx = transform(projection, Point(maxx, 0)).x
miny = transform(projection, Point(0, miny)).y
maxy = transform(projection, Point(0, maxy)).y
width_ratios = [1] # , 0.005, 0.03] TODO
if isinstance(outfile, str):
# figure widths: 2.25 inches (1 column) or 4.75 inches (2 columns)
fig = plt.figure(figsize=(4.75, 3))
gs_base = plt.GridSpec(1, len(width_ratios), width_ratios=width_ratios, wspace=0)
elif ax is None:
fig = outfile.get_gridspec().figure
gs_base = outfile.subgridspec(1, len(width_ratios), width_ratios=width_ratios, wspace=0)
if ax is None:
ax = fig.add_subplot(gs_base[:, 0])
ax.set_xlim(minx, maxx)
ax.set_ylim(miny, maxy)
ax.set_aspect(1)
ax.set_xticks([])
ax.set_yticks([])
ax.axis("off")
if title is not None:
ax.set_title(title, fontsize=title_fontsize)
invpatches = []
validpatches = []
regions_with_data = set([])
for r, d in zip(regions, data):
if r in patches:
level, subregions, patch = patches[r]
if only_usa:
if r in 'US.AK':
patch.set_transform(patch.get_transform() + matplotlib.transforms.Affine2D().scale(
0.4) + matplotlib.transforms.ScaledTranslation(transform(projection, Point(-25, 0)).x,
transform(projection, Point(0, -14)).y,
patch.get_transform())) #
elif r == 'US.HI':
patch.set_transform(
patch.get_transform() + matplotlib.transforms.ScaledTranslation(
transform(projection, Point(-15, 0)).x,
transform(projection, Point(0, -18)).y,
patch.get_transform()))
if math.isnan(d):
validpatches.append(EmptyPatch())
invpatches.append(patch)
print('NAN data for region {}'.format(r))
elif r in ignore_regions:
validpatches.append(EmptyPatch())
invpatches.append(patch)
print('Ignore region {}'.format(r))
else:
validpatches.append(patch)
regions_with_data.update(subregions)
else:
validpatches.append(EmptyPatch())
# for r, (level, subregions, patch) in patches.items():
# if not level and (r not in ignore_regions and subregions.isdisjoint(regions_with_data)):
# invpatches.append(patch)
ax.add_collection(
PatchCollection(
invpatches,
hatch="///",
facecolors=invalid_facecolor,
edgecolors=invalid_edgecolor,
linewidths=linewidth,
rasterized=rasterize,
)
)
if numbering is not None:
ax.text(
0.0, 1.0, numbering, fontsize=numbering_fontsize, transform=ax.transAxes, fontweight='bold'
)
region_collection = ax.add_collection(
PatchCollection(
validpatches,
edgecolors=valid_edgecolor,
facecolors="black",
linewidths=linewidth,
rasterized=rasterize,
)
)
if show_cbar:
if cax is None:
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.1)
cbar = matplotlib.colorbar.ColorbarBase(
cax,
cmap=cm,
norm=norm_color,
ticks=y_ticks,
orientation="vertical",
spacing="proportional",
extend=extend_c,
)
cbar.minorticks_on()
if y_tick_labels is not None:
cbar.ax.set_yticklabels(y_tick_labels)
if y_label is not None:
cax.set_ylabel(y_label, fontsize=y_label_fontsize)
cax.tick_params(axis="y", labelsize=y_ticks_fontsize)
# region_collection.set_facecolors('r')
region_collection.set_facecolors(cm(norm_color(data)))
if isinstance(outfile, str):
# plt.subplots_adjust(bottom=0.02, top=0.98, left=0.05, right=0.9)
plt.tight_layout()
fig.savefig(outfile, dpi=300)
cm = create_colormap("custom", ["red", "white", "blue"], xs=[0, 0.6667, 1])
def do_plot(d, label, numbering, fig, ax, cax, cm=None):
# d = pd.read_csv(filename)
regions = d['region'].array
data = d["consumption_deviation"].array
# if min(data) < 0:
# cm = create_colormap("custom", ["red", "white", "blue"], xs=[0, -min(data) / (max(data) - min(data)), 1])
# else:
# cm = create_colormap("custom", ["white", "blue"], xs=[0, 1])
if cm is None:
cm = create_colormap("custom", ["red", "white", "blue"], xs=[0, 0.6667, 1])
make_map(
patchespickle_file="../data/external/maps/map_robinson_0.1simplified.pkl.gz",
regions=regions,
data=data,
y_ticks=[-1.0, -0.75, -0.5, -0.25, 0.0, 0.25, 0.5],
y_label=label,
numbering=numbering,
extend_c="both",
ax=fig.add_subplot(ax),
cax=fig.add_subplot(cax),
cm=cm,
)
| [
"matplotlib.path.Path",
"matplotlib.colorbar.ColorbarBase",
"numpy.max",
"matplotlib.collections.PatchCollection",
"gzip.GzipFile",
"matplotlib.pyplot.figure",
"shapely.geometry.Point",
"matplotlib.colors.hex2color",
"matplotlib.transforms.Affine2D",
"matplotlib.colors.Normalize",
"numpy.min",
... | [((2307, 2346), 'gzip.GzipFile', 'gzip.GzipFile', (['patchespickle_file', '"""rb"""'], {}), "(patchespickle_file, 'rb')\n", (2320, 2346), False, 'import gzip\n'), ((2477, 2489), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (2483, 2489), True, 'import numpy as np\n'), ((2505, 2517), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (2511, 2517), True, 'import numpy as np\n'), ((2692, 2723), 'matplotlib.colors.Normalize', 'Normalize', ([], {'vmin': 'vmin', 'vmax': 'vmax'}), '(vmin=vmin, vmax=vmax)\n', (2701, 2723), False, 'from matplotlib.colors import LinearSegmentedColormap, Normalize\n'), ((3836, 3865), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4.75, 3)'}), '(figsize=(4.75, 3))\n', (3846, 3865), True, 'import matplotlib.pyplot as plt\n'), ((6184, 6332), 'matplotlib.collections.PatchCollection', 'PatchCollection', (['invpatches'], {'hatch': '"""///"""', 'facecolors': 'invalid_facecolor', 'edgecolors': 'invalid_edgecolor', 'linewidths': 'linewidth', 'rasterized': 'rasterize'}), "(invpatches, hatch='///', facecolors=invalid_facecolor,\n edgecolors=invalid_edgecolor, linewidths=linewidth, rasterized=rasterize)\n", (6199, 6332), False, 'from matplotlib.collections import PatchCollection\n'), ((6632, 6758), 'matplotlib.collections.PatchCollection', 'PatchCollection', (['validpatches'], {'edgecolors': 'valid_edgecolor', 'facecolors': '"""black"""', 'linewidths': 'linewidth', 'rasterized': 'rasterize'}), "(validpatches, edgecolors=valid_edgecolor, facecolors=\n 'black', linewidths=linewidth, rasterized=rasterize)\n", (6647, 6758), False, 'from matplotlib.collections import PatchCollection\n'), ((7002, 7150), 'matplotlib.colorbar.ColorbarBase', 'matplotlib.colorbar.ColorbarBase', (['cax'], {'cmap': 'cm', 'norm': 'norm_color', 'ticks': 'y_ticks', 'orientation': '"""vertical"""', 'spacing': '"""proportional"""', 'extend': 'extend_c'}), "(cax, cmap=cm, norm=norm_color, ticks=\n y_ticks, orientation='vertical', spacing='proportional', extend=extend_c)\n", (7034, 7150), False, 'import matplotlib\n'), ((7737, 7755), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7753, 7755), True, 'import matplotlib.pyplot as plt\n'), ((2772, 2801), 'matplotlib.path.Path', 'Path', (['[(0, 0)]', '[Path.MOVETO]'], {}), '([(0, 0)], [Path.MOVETO])\n', (2776, 2801), False, 'from matplotlib.path import Path\n'), ((3496, 3510), 'shapely.geometry.Point', 'Point', (['minx', '(0)'], {}), '(minx, 0)\n', (3501, 3510), False, 'from shapely.geometry import Point\n'), ((3547, 3561), 'shapely.geometry.Point', 'Point', (['maxx', '(0)'], {}), '(maxx, 0)\n', (3552, 3561), False, 'from shapely.geometry import Point\n'), ((3598, 3612), 'shapely.geometry.Point', 'Point', (['(0)', 'miny'], {}), '(0, miny)\n', (3603, 3612), False, 'from shapely.geometry import Point\n'), ((3649, 3663), 'shapely.geometry.Point', 'Point', (['(0)', 'maxy'], {}), '(0, maxy)\n', (3654, 3663), False, 'from shapely.geometry import Point\n'), ((5438, 5451), 'math.isnan', 'math.isnan', (['d'], {}), '(d)\n', (5448, 5451), False, 'import math\n'), ((6896, 6919), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (6915, 6919), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((3150, 3199), 'pyproj.Proj', 'pyproj.Proj', (['"""+proj=lonlat +datum=WGS84 +no_defs"""'], {}), "('+proj=lonlat +datum=WGS84 +no_defs')\n", (3161, 3199), False, 'import pyproj\n'), ((3217, 3265), 'pyproj.Proj', 'pyproj.Proj', (['f"""+proj={to} +datum=WGS84 +no_defs"""'], {}), "(f'+proj={to} +datum=WGS84 +no_defs')\n", (3228, 3265), False, 'import pyproj\n'), ((612, 642), 'matplotlib.colors.hex2color', 'matplotlib.colors.hex2color', (['c'], {}), '(c)\n', (639, 642), False, 'import matplotlib\n'), ((4709, 4741), 'matplotlib.transforms.Affine2D', 'matplotlib.transforms.Affine2D', ([], {}), '()\n', (4739, 4741), False, 'import matplotlib\n'), ((4842, 4855), 'shapely.geometry.Point', 'Point', (['(-25)', '(0)'], {}), '(-25, 0)\n', (4847, 4855), False, 'from shapely.geometry import Point\n'), ((4953, 4966), 'shapely.geometry.Point', 'Point', (['(0)', '(-14)'], {}), '(0, -14)\n', (4958, 4966), False, 'from shapely.geometry import Point\n'), ((5285, 5298), 'shapely.geometry.Point', 'Point', (['(-15)', '(0)'], {}), '(-15, 0)\n', (5290, 5298), False, 'from shapely.geometry import Point\n'), ((5353, 5366), 'shapely.geometry.Point', 'Point', (['(0)', '(-18)'], {}), '(0, -18)\n', (5358, 5366), False, 'from shapely.geometry import Point\n')] |
import argparse
import numpy as np
import matplotlib.pyplot as plt
from randomwalk import RandomWalk
from utils import vhat_st_agg, nab_vhat_st_agg
from off_lam_ret import OffLamRet
# from on_lam_ret import OnLamRet
from semi_grad_td_lam import SemiGradTDLam
from true_online_td import TrueOnlineTD
from mountain_car import MountainCar, X_MAX, X_MIN, V_MAX, V_MIN
from tiles_sutton import IHT, tiles
from sarsa_lam import SarsaLam, SarsaLamAcc, SarsaLamClr
from true_online_sarsa import TrueOnlineSarsa
plt.switch_backend('Qt5Agg')
BIG_FONT = 20
MED_FONT = 15
SMA_FONT = 13
FIG_12_3_LAM_L = [0, .4, .8, .9, .95, .975, .99, 1]
FIG_12_3_N_EP = 10
FIG_12_3_N_ST = 19
FIG_12_3_N_RUNS = 1
FIG_12_3_G = 1
FIG_12_6_LAM_L = FIG_12_3_LAM_L
FIG_12_6_N_EP = FIG_12_3_N_EP
FIG_12_6_N_ST = FIG_12_3_N_ST
FIG_12_6_N_RUNS = FIG_12_3_N_RUNS
FIG_12_6_G = FIG_12_3_G
N_TIL = 4096
N_TLGS = 8
FIG_12_10_G = 1
FIG_12_10_EPS = 0
FIG_12_10_LAM_L = [0, .68, .84, .92, .96, .98, .99]
FIG_12_10_ALP_MIN, FIG_12_10_ALP_MAX = 0.4, 1.5
FIG_12_10_N_PTS = 10
FIG_12_10_N_RUNS = 20
FIG_12_10_N_EP = 50
FIG_12_10_MAX_STEPS = 1000
FIG_12_11_G = FIG_12_10_G
FIG_12_11_EPS = FIG_12_10_EPS
FIG_12_11_N_PTS = FIG_12_10_N_PTS
FIG_12_11_N_RUNS = 5
FIG_12_11_N_EP = 20
FIG_12_11_MAX_STEPS = 5000
FIG_12_11_LAM = 0.92
FIG_12_11_ALP_BND = {
SarsaLamClr: [.2, 2],
SarsaLam: [.2, 2],
TrueOnlineSarsa: [.2, 1.8],
SarsaLamAcc: [.2, .5],
}
FIG_12_11_ALG_STR = {
SarsaLamClr: "Sarsa(Lambda) w/ replacing/clearing traces",
SarsaLam: "Sarsa(Lambda) w/ replacing traces",
SarsaLamAcc: "Sarsa(Lambda) w/ accumulating traces",
TrueOnlineSarsa: "True Online Sarsa(Lambda)",
}
def get_idxs(iht, x, xdot, a):
return tiles(iht, N_TLGS, [N_TLGS * x / (X_MAX - X_MIN),
N_TLGS * xdot / (V_MAX - V_MIN)], [a])
def get_fn_mc(n_til, n_tlgs):
iht = IHT(N_TIL)
def idxs(s, a): return get_idxs(iht, s[0], s[1], a)
def qhat(s, a, w): return np.sum(w[idxs(s, a)])
return idxs, qhat
def save_plot(filename, dpi=None):
plt.savefig('plots/' + filename + '.png', dpi=dpi)
def plot_figure(ax, title, xticks, xnames, xlabel, yticks, ynames, ylabel,
labelpad=15, font=SMA_FONT, loc='upper left'):
ax.set_title(title, fontsize=font)
ax.set_xticks(xticks)
ax.set_xticklabels(xnames)
ax.set_yticks(yticks)
ax.set_yticklabels(ynames)
ax.set_xlim([min(xticks), max(xticks)])
ax.set_ylim([min(yticks), max(yticks)])
ax.set_xlabel(xlabel, fontsize=font)
ax.set_ylabel(ylabel, rotation=0, fontsize=font, labelpad=labelpad)
plt.legend(loc=loc)
def run_random_walks(ax, alg, lam_l, n_ep, n_runs, sub=3):
pi = {(a, s): 1.0 for s in alg.env.states for a in alg.env.moves_d[s]}
true_vals = np.linspace(-1, 1, alg.env.n_states + 2)[1:-1]
for (k, lam) in enumerate(lam_l):
alg.lam = lam
print(f"[LAMBDA={lam}]")
err_l = []
alpha_max = 1 if (lam <= 0.95) else 1 / (2 * (k - sub))
alpha_l = np.linspace(0, alpha_max, 31)
for alpha in alpha_l:
alg.a = alpha
print(f"[ALPHA={alpha}]")
err_sum = 0
for seed in range(n_runs):
alg.reset()
alg.seed(seed)
for ep in range(n_ep):
alg.pol_eva(pi, n_ep=1)
v_arr = np.array(alg.get_value_list()[:-1])
err_sum += np.sqrt(np.sum((v_arr-true_vals) ** 2) / alg.env.n_states)
err_l.append(err_sum / (n_runs * n_ep))
plt.plot(alpha_l, err_l, label=f'lam={lam}')
def benchmark(alg_class, title, fn, sub=3):
fig, ax = plt.subplots()
fig.suptitle(title, fontsize=BIG_FONT)
fig.set_size_inches(20, 14)
def vhat(s, w): return vhat_st_agg(s, w, FIG_12_3_N_ST)
def nab_vhat(s, w): return nab_vhat_st_agg(s, w, FIG_12_3_N_ST)
alg = alg_class(RandomWalk(), None, FIG_12_3_N_ST, None, vhat, nab_vhat,
FIG_12_3_G)
xticks, yticks = np.linspace(0, 1, 6), np.linspace(0.25, 0.55, 7)
def short_str(x): return str(x)[:3]
xnames, ynames = map(short_str, xticks), map(short_str, yticks)
run_random_walks(ax, alg, FIG_12_3_LAM_L, FIG_12_3_N_EP, FIG_12_3_N_RUNS, sub)
plot_figure(ax, '', xticks, xnames, 'alpha', yticks, ynames,
(f'Average\nRMS error\n({FIG_12_3_N_ST} states,\n ' +
f'{FIG_12_3_N_EP} episodes)'), font=MED_FONT, labelpad=40,
loc='upper right')
save_plot(fn, dpi=100)
plt.show()
def fig_12_3():
benchmark(OffLamRet, 'Figure 12.3', 'fig12.3')
def fig_12_6():
benchmark(SemiGradTDLam, 'Figure 12.6', 'fig12.6')
def fig_12_8():
# benchmark(OnLamRet, 'Figure 12.8', 'fig12.8')
benchmark(TrueOnlineTD, 'Figure 12.8', 'fig12.8', sub=4)
def fig_12_10():
fig, ax = plt.subplots()
for lam in FIG_12_10_LAM_L:
print(f"[LAM={lam}]")
steps_l = []
alpha_l = np.linspace(FIG_12_10_ALP_MIN, FIG_12_10_ALP_MAX, FIG_12_10_N_PTS)
for alpha in alpha_l:
F, qhat = get_fn_mc(N_TIL, N_TLGS)
alg = SarsaLam(MountainCar(), alpha / N_TLGS, N_TIL * N_TLGS, lam, F,
qhat, FIG_12_10_EPS, FIG_12_10_G)
print(f"[ALPHA={alg.a}]")
tot_steps = 0
for seed in range(FIG_12_10_N_RUNS):
print(f"[RUN #{seed}]")
alg.reset()
alg.seed(seed)
for ep in range(FIG_12_10_N_EP):
print(f"[EP #{ep}]")
tot_steps += alg.pol_eva(None, 1, max_steps=FIG_12_10_MAX_STEPS)[0]
steps_l.append(tot_steps / (FIG_12_10_N_RUNS * FIG_12_10_N_EP))
plt.plot(alpha_l, steps_l, label=f'lam={lam}')
xticks, yticks = np.linspace(0.5, 1.5, 5), np.linspace(180, 300, 7)
left_title = (f'Mountain Car\nSteps per\nepisode\n(averaged \nover ' +
f'first\n{FIG_12_10_N_EP} episodes\n{FIG_12_10_N_RUNS} runs)')
plot_figure(ax, 'Figure 12.10', list(xticks) + [1.6], xticks,
f'alpha * number of tilings ({N_TLGS})',
[160] + list(yticks), yticks, left_title, labelpad=35)
fig.set_size_inches(20, 14)
plt.legend()
save_plot('fig12.10', dpi=100)
plt.show()
def fig_12_11():
fig, ax = plt.subplots()
F, qhat = get_fn_mc(N_TIL, N_TLGS)
for alg_name in FIG_12_11_ALG_STR.keys():
steps_l = []
alpha_l = np.linspace(*FIG_12_11_ALP_BND[alg_name], FIG_12_11_N_PTS)
for alpha in alpha_l:
alg = alg_name(MountainCar(), alpha / N_TLGS, N_TIL * N_TLGS,
FIG_12_11_LAM, F, qhat, FIG_12_11_EPS, FIG_12_11_G)
print(f"[ALPHA={alg.a}]")
tot_steps = 0
for seed in range(FIG_12_11_N_RUNS):
print(f"[RUN #{seed}]")
alg.reset()
alg.seed(seed)
for ep in range(FIG_12_11_N_EP):
tot_steps += alg.pol_eva(None, 1, max_steps=FIG_12_11_MAX_STEPS)[0]
steps_l.append(tot_steps / (FIG_12_11_N_RUNS * FIG_12_11_N_EP))
plt.plot(alpha_l, -np.array(steps_l), label=FIG_12_11_ALG_STR[alg_name])
xticks, yticks = np.linspace(0.2, 2, 10), np.linspace(-550, -150, 9)
xnames = map(lambda x: str(x)[:3], xticks)
left_title = (f'Mountain Car\nReward per\nepisode\n(averaged \nover ' +
f'first\n{FIG_12_11_N_EP} episodes\n{FIG_12_11_N_RUNS} runs)')
plot_figure(ax, 'Figure 12.11', xticks, xnames,
f'alpha * number of tilings ({N_TLGS})',
yticks, yticks, left_title, labelpad=45)
fig.set_size_inches(20, 14)
plt.legend()
save_plot('fig12.11', dpi=100)
plt.show()
PLOT_FUNCTION = {
'12.3': fig_12_3,
'12.6': fig_12_6,
'12.8': fig_12_8,
'12.10': fig_12_10,
'12.11': fig_12_11,
}
def main():
parser = argparse.ArgumentParser()
parser.add_argument('figure', type=str, default=None,
help='Figure to reproduce.',
choices=list(PLOT_FUNCTION.keys()) + ['all'])
args = parser.parse_args()
if args.figure == 'all':
for key, f in PLOT_FUNCTION.items():
print(f"[{key}]")
f()
else:
print(f"[{args.figure}]")
PLOT_FUNCTION[args.figure]()
if __name__ == "__main__":
main()
| [
"matplotlib.pyplot.savefig",
"tiles_sutton.IHT",
"argparse.ArgumentParser",
"utils.vhat_st_agg",
"matplotlib.pyplot.plot",
"tiles_sutton.tiles",
"numpy.array",
"numpy.linspace",
"numpy.sum",
"utils.nab_vhat_st_agg",
"matplotlib.pyplot.switch_backend",
"randomwalk.RandomWalk",
"mountain_car.M... | [((504, 532), 'matplotlib.pyplot.switch_backend', 'plt.switch_backend', (['"""Qt5Agg"""'], {}), "('Qt5Agg')\n", (522, 532), True, 'import matplotlib.pyplot as plt\n'), ((1686, 1778), 'tiles_sutton.tiles', 'tiles', (['iht', 'N_TLGS', '[N_TLGS * x / (X_MAX - X_MIN), N_TLGS * xdot / (V_MAX - V_MIN)]', '[a]'], {}), '(iht, N_TLGS, [N_TLGS * x / (X_MAX - X_MIN), N_TLGS * xdot / (V_MAX -\n V_MIN)], [a])\n', (1691, 1778), False, 'from tiles_sutton import IHT, tiles\n'), ((1830, 1840), 'tiles_sutton.IHT', 'IHT', (['N_TIL'], {}), '(N_TIL)\n', (1833, 1840), False, 'from tiles_sutton import IHT, tiles\n'), ((2004, 2054), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('plots/' + filename + '.png')"], {'dpi': 'dpi'}), "('plots/' + filename + '.png', dpi=dpi)\n", (2015, 2054), True, 'import matplotlib.pyplot as plt\n'), ((2533, 2552), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': 'loc'}), '(loc=loc)\n', (2543, 2552), True, 'import matplotlib.pyplot as plt\n'), ((3474, 3488), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3486, 3488), True, 'import matplotlib.pyplot as plt\n'), ((4307, 4317), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4315, 4317), True, 'import matplotlib.pyplot as plt\n'), ((4614, 4628), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4626, 4628), True, 'import matplotlib.pyplot as plt\n'), ((5864, 5876), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5874, 5876), True, 'import matplotlib.pyplot as plt\n'), ((5912, 5922), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5920, 5922), True, 'import matplotlib.pyplot as plt\n'), ((5954, 5968), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5966, 5968), True, 'import matplotlib.pyplot as plt\n'), ((7204, 7216), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7214, 7216), True, 'import matplotlib.pyplot as plt\n'), ((7252, 7262), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7260, 7262), True, 'import matplotlib.pyplot as plt\n'), ((7414, 7439), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7437, 7439), False, 'import argparse\n'), ((2701, 2741), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(alg.env.n_states + 2)'], {}), '(-1, 1, alg.env.n_states + 2)\n', (2712, 2741), True, 'import numpy as np\n'), ((2920, 2949), 'numpy.linspace', 'np.linspace', (['(0)', 'alpha_max', '(31)'], {}), '(0, alpha_max, 31)\n', (2931, 2949), True, 'import numpy as np\n'), ((3371, 3415), 'matplotlib.pyplot.plot', 'plt.plot', (['alpha_l', 'err_l'], {'label': 'f"""lam={lam}"""'}), "(alpha_l, err_l, label=f'lam={lam}')\n", (3379, 3415), True, 'import matplotlib.pyplot as plt\n'), ((3585, 3617), 'utils.vhat_st_agg', 'vhat_st_agg', (['s', 'w', 'FIG_12_3_N_ST'], {}), '(s, w, FIG_12_3_N_ST)\n', (3596, 3617), False, 'from utils import vhat_st_agg, nab_vhat_st_agg\n'), ((3647, 3683), 'utils.nab_vhat_st_agg', 'nab_vhat_st_agg', (['s', 'w', 'FIG_12_3_N_ST'], {}), '(s, w, FIG_12_3_N_ST)\n', (3662, 3683), False, 'from utils import vhat_st_agg, nab_vhat_st_agg\n'), ((3702, 3714), 'randomwalk.RandomWalk', 'RandomWalk', ([], {}), '()\n', (3712, 3714), False, 'from randomwalk import RandomWalk\n'), ((3808, 3828), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(6)'], {}), '(0, 1, 6)\n', (3819, 3828), True, 'import numpy as np\n'), ((3830, 3856), 'numpy.linspace', 'np.linspace', (['(0.25)', '(0.55)', '(7)'], {}), '(0.25, 0.55, 7)\n', (3841, 3856), True, 'import numpy as np\n'), ((4716, 4782), 'numpy.linspace', 'np.linspace', (['FIG_12_10_ALP_MIN', 'FIG_12_10_ALP_MAX', 'FIG_12_10_N_PTS'], {}), '(FIG_12_10_ALP_MIN, FIG_12_10_ALP_MAX, FIG_12_10_N_PTS)\n', (4727, 4782), True, 'import numpy as np\n'), ((5375, 5421), 'matplotlib.pyplot.plot', 'plt.plot', (['alpha_l', 'steps_l'], {'label': 'f"""lam={lam}"""'}), "(alpha_l, steps_l, label=f'lam={lam}')\n", (5383, 5421), True, 'import matplotlib.pyplot as plt\n'), ((5441, 5465), 'numpy.linspace', 'np.linspace', (['(0.5)', '(1.5)', '(5)'], {}), '(0.5, 1.5, 5)\n', (5452, 5465), True, 'import numpy as np\n'), ((5467, 5491), 'numpy.linspace', 'np.linspace', (['(180)', '(300)', '(7)'], {}), '(180, 300, 7)\n', (5478, 5491), True, 'import numpy as np\n'), ((6081, 6139), 'numpy.linspace', 'np.linspace', (['*FIG_12_11_ALP_BND[alg_name]', 'FIG_12_11_N_PTS'], {}), '(*FIG_12_11_ALP_BND[alg_name], FIG_12_11_N_PTS)\n', (6092, 6139), True, 'import numpy as np\n'), ((6762, 6785), 'numpy.linspace', 'np.linspace', (['(0.2)', '(2)', '(10)'], {}), '(0.2, 2, 10)\n', (6773, 6785), True, 'import numpy as np\n'), ((6787, 6813), 'numpy.linspace', 'np.linspace', (['(-550)', '(-150)', '(9)'], {}), '(-550, -150, 9)\n', (6798, 6813), True, 'import numpy as np\n'), ((4871, 4884), 'mountain_car.MountainCar', 'MountainCar', ([], {}), '()\n', (4882, 4884), False, 'from mountain_car import MountainCar, X_MAX, X_MIN, V_MAX, V_MIN\n'), ((6187, 6200), 'mountain_car.MountainCar', 'MountainCar', ([], {}), '()\n', (6198, 6200), False, 'from mountain_car import MountainCar, X_MAX, X_MIN, V_MAX, V_MIN\n'), ((6689, 6706), 'numpy.array', 'np.array', (['steps_l'], {}), '(steps_l)\n', (6697, 6706), True, 'import numpy as np\n'), ((3270, 3302), 'numpy.sum', 'np.sum', (['((v_arr - true_vals) ** 2)'], {}), '((v_arr - true_vals) ** 2)\n', (3276, 3302), True, 'import numpy as np\n')] |
import os
import numpy as np
import ase.db
def get_layer_system(db, formula, phase):
system_list = list(db.select('formula={},xc=PBE,phase={}'.format(formula, phase)))
if len(system_list) > 1:
# TODO - handle this better
raise ValueError("found multiple matches for {}, PBE, {} phase".format(formula, phase))
elif len(system_list) == 0:
raise ValueError("found no matches for {}, PBE, {} phase".format(formula, phase))
layer_system = system_list[0].toatoms()
return layer_system
# Consistent ordering of lattice vectors and atoms for (Mo,W)(S,Se,Te)2 family.
def a_from_2H(layer_system):
cell = layer_system.get_cell()
a = cell[0][0]
return a
def h_from_2H(layer_system):
pos = layer_system.get_positions()
c_S2 = pos[2][2]
c_S1 = pos[1][2]
return c_S2 - c_S1
def symbols_from_2H(layer_system):
at_syms = layer_system.get_chemical_symbols()
# Consistent M, X, X order.
return at_syms[0], at_syms[1]
def make_cell(db, syms, c_sep, vacuum_dist, AB_stacking=True, layer_shifts=None):
layer_systems = [get_layer_system(db, sym, 'H') for sym in syms]
# Choose lattice constant from first layer.
a = a_from_2H(layer_systems[0])
hs = [h_from_2H(layer_system) for layer_system in layer_systems]
# Setyawan and Curtarolo 2010 basis
a1 = a * np.array([1/2, -float(np.sqrt(3)/2), 0.0])
a2 = a * np.array([1/2, float(np.sqrt(3)/2), 0.0])
latvecs_2D = np.array([a1[:2], a2[:2]]) # 2D part of D^T
# Relative shifts of each layer. By default, do not shift.
if layer_shifts is None:
layer_shifts = [(0.0, 0.0)] * len(layer_systems)
base_z, base_pos = 0.0, 'A'
at_syms, cartpos = [], []
for layer_system, h, layer_shift in zip(layer_systems, hs, layer_shifts):
# Add [X, M, X] to list of atomic symbols.
layer_M_sym, layer_X_sym = symbols_from_2H(layer_system)
at_syms.extend([layer_X_sym, layer_M_sym, layer_X_sym])
# z axis coordinates for this layer.
X1_z = base_z
M_z = base_z + h/2
X2_z = base_z + h
# Shift of this layer, if any.
d_a, d_b = layer_shift
# In-plane coordinates for this layer.
if base_pos == 'A':
X1_lat = np.array([(0.0 + d_a) % 1, (0.0 + d_b) % 1])
M_lat = np.array([(1/3 + d_a) % 1, (2/3 + d_b) % 1])
X2_lat = X1_lat
else:
X1_lat = np.array([(1/3 + d_a) % 1, (2/3 + d_b) % 1])
M_lat = np.array([(0.0 + d_a) % 1, (0.0 + d_b) % 1])
X2_lat = X1_lat
layer_cartpos_2D = [np.dot(atpos_lat, latvecs_2D)
for atpos_lat in [X1_lat, M_lat, X2_lat]]
# 3D Cartesian coordinates for this layer.
cartpos.extend([np.array([pos[0], pos[1], z_pos])
for pos, z_pos in zip(layer_cartpos_2D, [X1_z, M_z, X2_z])])
base_z += h + c_sep
# Two stacking modes: AB (2H) and AA (1T).
# In AB-stacking mode, alternate base_pos between TMD layers.
# in AA-stacking mode, keep base_pos constant.
if AB_stacking:
if base_pos == 'A':
base_pos = 'B'
else:
base_pos = 'A'
# Assume atoms are given in order of z value.
# TODO - enforce this?
a3 = np.array([0.0, 0.0, cartpos[-1][2] + vacuum_dist])
latvecs = np.array([a1, a2, a3])
return latvecs, at_syms, cartpos
| [
"numpy.array",
"numpy.dot",
"numpy.sqrt"
] | [((1471, 1497), 'numpy.array', 'np.array', (['[a1[:2], a2[:2]]'], {}), '([a1[:2], a2[:2]])\n', (1479, 1497), True, 'import numpy as np\n'), ((3319, 3369), 'numpy.array', 'np.array', (['[0.0, 0.0, cartpos[-1][2] + vacuum_dist]'], {}), '([0.0, 0.0, cartpos[-1][2] + vacuum_dist])\n', (3327, 3369), True, 'import numpy as np\n'), ((3384, 3406), 'numpy.array', 'np.array', (['[a1, a2, a3]'], {}), '([a1, a2, a3])\n', (3392, 3406), True, 'import numpy as np\n'), ((2275, 2319), 'numpy.array', 'np.array', (['[(0.0 + d_a) % 1, (0.0 + d_b) % 1]'], {}), '([(0.0 + d_a) % 1, (0.0 + d_b) % 1])\n', (2283, 2319), True, 'import numpy as np\n'), ((2340, 2388), 'numpy.array', 'np.array', (['[(1 / 3 + d_a) % 1, (2 / 3 + d_b) % 1]'], {}), '([(1 / 3 + d_a) % 1, (2 / 3 + d_b) % 1])\n', (2348, 2388), True, 'import numpy as np\n'), ((2448, 2496), 'numpy.array', 'np.array', (['[(1 / 3 + d_a) % 1, (2 / 3 + d_b) % 1]'], {}), '([(1 / 3 + d_a) % 1, (2 / 3 + d_b) % 1])\n', (2456, 2496), True, 'import numpy as np\n'), ((2513, 2557), 'numpy.array', 'np.array', (['[(0.0 + d_a) % 1, (0.0 + d_b) % 1]'], {}), '([(0.0 + d_a) % 1, (0.0 + d_b) % 1])\n', (2521, 2557), True, 'import numpy as np\n'), ((2615, 2644), 'numpy.dot', 'np.dot', (['atpos_lat', 'latvecs_2D'], {}), '(atpos_lat, latvecs_2D)\n', (2621, 2644), True, 'import numpy as np\n'), ((2779, 2812), 'numpy.array', 'np.array', (['[pos[0], pos[1], z_pos]'], {}), '([pos[0], pos[1], z_pos])\n', (2787, 2812), True, 'import numpy as np\n'), ((1433, 1443), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (1440, 1443), True, 'import numpy as np\n'), ((1378, 1388), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (1385, 1388), True, 'import numpy as np\n')] |
from enum import Enum
from collections import namedtuple
import numpy as np
from PIL import Image
import networkx as nx
import matplotlib.pyplot as plt
#---------- Globals ----------
class ACTION_ENUM(Enum):
NORTH = 0
SOUTH = 1
EAST = 2
WEST = 3
inverse_action = np.array((1, 0, 3, 2), dtype=int)
Point = namedtuple('Point', ['x', 'y'])
State = namedtuple('State', ['action', 'point'])
start_color = (255, 255, 0)
#PIL holds (0,0) @ top left corner so N and S modifiers are flipped
state_modifier = [Point(0, -1), Point(0, 1), Point(1, 0), Point(-1, 0)]
#---------- Class ----------
class MapGraph:
# Constructor
def __init__(self, imageFileName, showDebugGraph = False):
#### Public Vars ####
self.image_file_name = imageFileName
self.width = None
self.height = None
self.map_graph = nx.Graph()
##### Private Vars ####
self.__start_flag = -1
self.__start_state = None
self.__debug_stack = False
self.__debug_graph = True
self.__debug_graph_gui = showDebugGraph
self.__max_stack_size = -1
self.__set_size = -1
self.__walked_nodes = -1
self.__traveled_distance = -1
#---------- Public Methods ----------
# Returns Map Graph
def get_map(self):
return self.map_graph
def get_start_point(self):
return self.__start_state.point
#Runs Map Parsinf
def parse_map(self):
# Prepare main Data
img = self.__load_image()
# Get starting state
# TODO: make this an init var instead
self.__start_state = self.__find_start(img)
# Prepare data structures for graphing
stack = [self.__start_state]
visited_states = set()
# Prepare loop globals (for counting)
max_stack_size = 0
total_distance = 0
walked_nodes = 0
while(stack != []):
# Take current state off the stack
state = stack.pop()
# Checking condition if it's the first iteration
is_start = True if state.action == self.__start_flag else False
# If first iter define needed params
if is_start:
state_prev = state
state_path_root = state
counter = 0
##### Determine Path Branching ####
# If new vector path (changed action/direction)
if self.__is_new_path(state.action, state_path_root.action):
# Graph Debug Printing
if self.__debug_graph: self.__graph_debugging(state_path_root, state, state_prev, counter, adjacent, stack)
# Add to Graph
self.map_graph.add_edge((state_path_root.point.x, state_path_root.point.y), (state_prev.point.x, state_prev.point.y))
#TODO: better way to output Point tuple without name attributes showing? Makes printing & Graph id messy
visited_states.add(state_path_root.point)
# Reset path tracking
if adjacent:
state_path_root = State(state.action, state_prev.point)
else:
state_path_root = State(state.action, self.__calc_state(state.point, inverse_action[state.action]).point)
total_distance += counter
counter = 1
# If same path increment counter
else:
counter += 1
##### Determine Valid Next Actions ####
# Stack Debug Printing
if self.__debug_stack: print("State: (", "Start" if state.action == -1 else ACTION_ENUM(state.action).name[0], ", ", state.point.x, ", ", state.point.y, ")", sep="")
# Set an adjacent state (when at least one node is added)
adjacent = False
# Check only orthogonal actions and append
if state.point not in visited_states:
for action in ACTION_ENUM:
if action.value != state.action and action.value != inverse_action[state.action]:
state_next = self.__calc_state(state.point, action.value)
if self.__is_valid_state(state_next.point, img):
adjacent = True
stack.append(state_next)
# If previous action is still valid append it last (LIFO)
state_next = self.__calc_state(state.point, state.action)
if not is_start and self.__is_valid_state(state_next.point, img):
adjacent = True
stack.append(state_next)
# --- Prepare for next Iteration
if max_stack_size < len(stack): max_stack_size = len(stack)
state_prev = state
walked_nodes += 1
# --- END While Loop ---
# Set class properties
self.__max_stack_size = max_stack_size
self.__set_size = len(visited_states)
self.__walked_nodes = walked_nodes
self.__traveled_distance = total_distance
# Stack Debug Printing
if self.__debug_stack: print("Visited States\n", visited_states)
# Draw Graph Debug
if self.__debug_graph_gui:
# Inject data into graph nodes
#for node in MapGraph.nodes:
node_positions = {node: (node[0], -node[1]) for node in self.map_graph.nodes}
#print(MapGraph.nodes(data=True))
nx.draw(self.map_graph, pos=node_positions, with_labels=True, font_weight='bold')
plt.show()
#### END parse_map ####
#---------- Private Methods ----------
#### Helper Functions ####
# Load image and define some parameters
def __load_image(self):
# Load image
#TODO: use load?
img = Image.open(self.image_file_name).convert('RGB')
if img == None:
print("ERROR: Unable to open map")
exit(1)
# Define image size
self.width, self.height = img.size
return img
# Find & set start state
def __find_start(self, img):
# Search for starting location
start_point = None
for x in range(0, self.width):
for y in range(0, self.height):
if img.getpixel((x,y)) == start_color:
start_point = Point(x, y)
if start_point == None:
print("MAP_ERROR: No start position found")
exit(1)
return State(self.__start_flag, start_point)
def __calc_state(self, state, numAction):
return State(numAction, Point(state.x + state_modifier[numAction].x, state.y + state_modifier[numAction].y))
def __is_valid_state(self, state, img):
# TODO: python define func in func to simplify these if statements, keep short circuiting
x, y = state
w, h = self.width, self.height
#if is in_bounds and is a road
in_bounds = True if x >= 0 and x < w and y >= 0 and y < h else False
return True if in_bounds and img.getpixel(state) == (0, 0, 0) else False
def __is_new_path(self, stateAction, pathAction):
return True if stateAction != pathAction else False
#### Debugging Printers ####
def __print_stack(self, stack):
print("[", sep="", end="")
for state in stack: print("(", ACTION_ENUM(state.action).name[0], ", ", state.point.x, ", ", state.point.y, "), ", sep="", end="")
print("]", sep="")
def __graph_debugging(self, state_path_root, state, state_prev, counter, adjacent, stack):
action_root = ("Start" if state_path_root.action == -1 else ACTION_ENUM(state_path_root.action).name[0])
action_prev = ("Start" if state_prev.action == -1 else ACTION_ENUM(state_prev.action).name[0])
#TODO: better way to output Point tuple without name attributes showing? Makes printing & Graph id messy
print("Path", action_root, "of", counter, (action_root, state_path_root.point.x, state_path_root.point.y), "to", (action_prev, state_prev.point.x, state_prev.point.y), "Adjacent" if adjacent else "")
print("State: (", ACTION_ENUM(state.action).name[0], ", ", state.point.x, ", ", state.point.y, ")", sep="")
#print("Action:", ACTION_ENUM(state.action).name[0], "Inverse:", ACTION_ENUM(inverse_action[state.action]).name[0])
self.__print_stack(stack)
print()
| [
"collections.namedtuple",
"PIL.Image.open",
"networkx.Graph",
"numpy.array",
"networkx.draw",
"matplotlib.pyplot.show"
] | [((275, 308), 'numpy.array', 'np.array', (['(1, 0, 3, 2)'], {'dtype': 'int'}), '((1, 0, 3, 2), dtype=int)\n', (283, 308), True, 'import numpy as np\n'), ((318, 349), 'collections.namedtuple', 'namedtuple', (['"""Point"""', "['x', 'y']"], {}), "('Point', ['x', 'y'])\n", (328, 349), False, 'from collections import namedtuple\n'), ((358, 398), 'collections.namedtuple', 'namedtuple', (['"""State"""', "['action', 'point']"], {}), "('State', ['action', 'point'])\n", (368, 398), False, 'from collections import namedtuple\n'), ((826, 836), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (834, 836), True, 'import networkx as nx\n'), ((4879, 4965), 'networkx.draw', 'nx.draw', (['self.map_graph'], {'pos': 'node_positions', 'with_labels': '(True)', 'font_weight': '"""bold"""'}), "(self.map_graph, pos=node_positions, with_labels=True, font_weight=\n 'bold')\n", (4886, 4965), True, 'import networkx as nx\n'), ((4967, 4977), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4975, 4977), True, 'import matplotlib.pyplot as plt\n'), ((5193, 5225), 'PIL.Image.open', 'Image.open', (['self.image_file_name'], {}), '(self.image_file_name)\n', (5203, 5225), False, 'from PIL import Image\n')] |
#%%
from functools import partial
import jax
import jax.numpy as np
from jax import random, vmap, jit, grad
from jax.experimental import stax, optimizers
from jax.experimental.stax import Dense, Relu
import matplotlib.pyplot as plt
from tqdm.notebook import tqdm
#%%
# Use stax to set up network initialization and evaluation functions
net_init, net_apply = stax.serial(
Dense(40), Relu,
Dense(40), Relu,
Dense(1)
)
in_shape = (-1, 1,)
rng = random.PRNGKey(0)
out_shape, params = net_init(rng, in_shape)
#%%
import numpy as onp
def get_wave(wave_gen, n_samples=100, wave_params=False):
x = wave_gen(n_samples)
amp = onp.random.uniform(low=0.1, high=5.0)
phase = onp.random.uniform(low=0., high=onp.pi)
wave_data = x, onp.sin(x + phase) * amp
if wave_params: wave_data = (wave_data, (phase, amp))
return wave_data
def vis_wave_gen(N): # better for visualization
x = onp.linspace(-5, 5, N).reshape((N, 1))
return x
def train_wave_gen(N): # for model training
x = onp.random.uniform(low=-5., high=5., size=(N, 1))
return x
def mse(params, batch):
x, y = batch
ypred = net_apply(params, x)
return np.mean((y - ypred)**2)
#%%
batch = get_wave(vis_wave_gen, 100)
predictions = net_apply(params, batch[0])
losses = mse(params, batch)
plt.plot(batch[0], predictions, label='prediction')
plt.plot(*batch, label='target')
plt.legend()
#%%
opt_init, opt_update, get_params = optimizers.adam(step_size=1e-2)
@jit
def step(i, opt_state, batch):
params = get_params(opt_state)
g = grad(mse)(params, batch)
return opt_update(i, g, opt_state)
#%%
out_shape, params = net_init(rng, in_shape) # re-init model
opt_state = opt_init(params) # init optim
batch = get_wave(vis_wave_gen, 100)
for i in range(200):
opt_state = step(i, opt_state, batch)
params = get_params(opt_state)
xb, yb = batch
plt.plot(xb, net_apply(params, xb), label='prediction')
plt.plot(xb, yb, label='target')
plt.legend()
# %%
### MAML
alpha = 0.1
# inner loop -- take one gradient step on the data
def inner_update(params, batch):
grads = grad(mse)(params, batch)
sgd_update = lambda param, grad: param - alpha * grad
inner_params = jax.tree_multimap(sgd_update, params, grads)
return inner_params
# outer loop
def maml_loss(params, train_batch, test_batch):
task_params = inner_update(params, train_batch)
loss = mse(task_params, test_batch)
return loss
@jit
def maml_step(i, opt_state, train_batch, test_batch):
params = get_params(opt_state)
g = grad(maml_loss)(params, train_batch, test_batch)
return opt_update(i, g, opt_state)
## task extractor
def get_task(n_train, n_test, wave_params=False):
if not wave_params:
batch = get_wave(train_wave_gen, n_train + n_test)
else:
batch, wparams = get_wave(train_wave_gen, n_train + n_test, wave_params=True)
# extract train/test elements from batch=(xb, yb) with treemap :)
train_batch = jax.tree_map(lambda l: l[:n_train], batch, is_leaf=lambda node: hasattr(node, 'shape'))
test_batch = jax.tree_map(lambda l: l[n_train:], batch, is_leaf=lambda node: hasattr(node, 'shape'))
task = train_batch, test_batch
if wave_params: task = (*task, wparams)
return task
# %%
opt_init, opt_update, get_params = optimizers.adam(step_size=1e-3)
out_shape, params = net_init(rng, in_shape) # re-init model
opt_state = opt_init(params) # init optim
for i in tqdm(range(20000)):
train_batch, test_batch = get_task(20, 1)
opt_state = maml_step(i, opt_state, train_batch, test_batch)
params = get_params(opt_state)
# %%
train_batch, test_batch, wparams = get_task(20, 1, wave_params=True)
# re-create wave smoother for visualization
phase, amp = wparams
x = vis_wave_gen(100)
y = np.sin(x + phase) * amp
plt.plot(x, y, label='targets')
step_params = params.copy()
for i in range(5): # visualize wave at each grad step
ypred = net_apply(step_params, x)
plt.plot(x, ypred, label=f'step{i}')
step_params = inner_update(step_params, train_batch)
plt.legend()
# %%
task_batch_size = 5
tasks = [get_task(20, 1) for _ in range(task_batch_size)]
train_batch, test_batch = jax.tree_multimap(lambda *b: np.stack(b), *tasks, is_leaf=lambda node: hasattr(node, 'shape'))
xb, yb = train_batch
for i in range(len(xb)):
plt.scatter(xb[i], yb[i])
# %%
def batch_maml_loss(params, train_batch, test_batch):
losses = vmap(partial(maml_loss, params))(train_batch, test_batch)
loss = losses.mean()
return loss
@jit
def batch_maml_step(i, opt_state, train_batch, test_batch):
params = get_params(opt_state)
g = grad(batch_maml_loss)(params, train_batch, test_batch)
return opt_update(i, g, opt_state)
# %%
task_batch_size = 4
opt_init, opt_update, get_params = optimizers.adam(step_size=1e-3)
out_shape, params = net_init(rng, in_shape) # re-init model
opt_state = opt_init(params) # init optim
for i in tqdm(range(20000)):
# get batch of tasks
tasks = [get_task(20, 1) for _ in range(task_batch_size)]
train_batch, test_batch = jax.tree_multimap(lambda *b: np.stack(b), *tasks, is_leaf=lambda node: hasattr(node, 'shape'))
# take gradient step over the mean
opt_state = batch_maml_step(i, opt_state, train_batch, test_batch)
params = get_params(opt_state)
# %%
train_batch, test_batch, wparams = get_task(20, 1, wave_params=True)
# re-create wave smoother for visualization
phase, amp = wparams
x = vis_wave_gen(100)
y = np.sin(x + phase) * amp
plt.plot(x, y, label='targets')
plt.scatter(*train_batch, label='train')
step_params = params.copy()
for i in range(5): # visualize wave at each grad step
ypred = net_apply(step_params, x)
plt.plot(x, ypred, label=f'step{i}')
step_params = inner_update(step_params, train_batch)
plt.legend()
# %%
| [
"jax.random.PRNGKey",
"numpy.sin",
"jax.experimental.stax.Dense",
"matplotlib.pyplot.plot",
"jax.numpy.sin",
"jax.experimental.optimizers.adam",
"numpy.linspace",
"jax.grad",
"functools.partial",
"matplotlib.pyplot.scatter",
"numpy.random.uniform",
"jax.tree_multimap",
"jax.numpy.stack",
"... | [((459, 476), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (473, 476), False, 'from jax import random, vmap, jit, grad\n'), ((1313, 1364), 'matplotlib.pyplot.plot', 'plt.plot', (['batch[0]', 'predictions'], {'label': '"""prediction"""'}), "(batch[0], predictions, label='prediction')\n", (1321, 1364), True, 'import matplotlib.pyplot as plt\n'), ((1365, 1397), 'matplotlib.pyplot.plot', 'plt.plot', (['*batch'], {'label': '"""target"""'}), "(*batch, label='target')\n", (1373, 1397), True, 'import matplotlib.pyplot as plt\n'), ((1398, 1410), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1408, 1410), True, 'import matplotlib.pyplot as plt\n'), ((1451, 1482), 'jax.experimental.optimizers.adam', 'optimizers.adam', ([], {'step_size': '(0.01)'}), '(step_size=0.01)\n', (1466, 1482), False, 'from jax.experimental import stax, optimizers\n'), ((1939, 1971), 'matplotlib.pyplot.plot', 'plt.plot', (['xb', 'yb'], {'label': '"""target"""'}), "(xb, yb, label='target')\n", (1947, 1971), True, 'import matplotlib.pyplot as plt\n'), ((1972, 1984), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1982, 1984), True, 'import matplotlib.pyplot as plt\n'), ((3317, 3349), 'jax.experimental.optimizers.adam', 'optimizers.adam', ([], {'step_size': '(0.001)'}), '(step_size=0.001)\n', (3332, 3349), False, 'from jax.experimental import stax, optimizers\n'), ((3818, 3849), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'label': '"""targets"""'}), "(x, y, label='targets')\n", (3826, 3849), True, 'import matplotlib.pyplot as plt\n'), ((4071, 4083), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4081, 4083), True, 'import matplotlib.pyplot as plt\n'), ((4803, 4835), 'jax.experimental.optimizers.adam', 'optimizers.adam', ([], {'step_size': '(0.001)'}), '(step_size=0.001)\n', (4818, 4835), False, 'from jax.experimental import stax, optimizers\n'), ((5518, 5549), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'label': '"""targets"""'}), "(x, y, label='targets')\n", (5526, 5549), True, 'import matplotlib.pyplot as plt\n'), ((5550, 5590), 'matplotlib.pyplot.scatter', 'plt.scatter', (['*train_batch'], {'label': '"""train"""'}), "(*train_batch, label='train')\n", (5561, 5590), True, 'import matplotlib.pyplot as plt\n'), ((5812, 5824), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5822, 5824), True, 'import matplotlib.pyplot as plt\n'), ((380, 389), 'jax.experimental.stax.Dense', 'Dense', (['(40)'], {}), '(40)\n', (385, 389), False, 'from jax.experimental.stax import Dense, Relu\n'), ((401, 410), 'jax.experimental.stax.Dense', 'Dense', (['(40)'], {}), '(40)\n', (406, 410), False, 'from jax.experimental.stax import Dense, Relu\n'), ((422, 430), 'jax.experimental.stax.Dense', 'Dense', (['(1)'], {}), '(1)\n', (427, 430), False, 'from jax.experimental.stax import Dense, Relu\n'), ((643, 680), 'numpy.random.uniform', 'onp.random.uniform', ([], {'low': '(0.1)', 'high': '(5.0)'}), '(low=0.1, high=5.0)\n', (661, 680), True, 'import numpy as onp\n'), ((693, 733), 'numpy.random.uniform', 'onp.random.uniform', ([], {'low': '(0.0)', 'high': 'onp.pi'}), '(low=0.0, high=onp.pi)\n', (711, 733), True, 'import numpy as onp\n'), ((1025, 1076), 'numpy.random.uniform', 'onp.random.uniform', ([], {'low': '(-5.0)', 'high': '(5.0)', 'size': '(N, 1)'}), '(low=-5.0, high=5.0, size=(N, 1))\n', (1043, 1076), True, 'import numpy as onp\n'), ((1176, 1201), 'jax.numpy.mean', 'np.mean', (['((y - ypred) ** 2)'], {}), '((y - ypred) ** 2)\n', (1183, 1201), True, 'import jax.numpy as np\n'), ((2213, 2257), 'jax.tree_multimap', 'jax.tree_multimap', (['sgd_update', 'params', 'grads'], {}), '(sgd_update, params, grads)\n', (2230, 2257), False, 'import jax\n'), ((3793, 3810), 'jax.numpy.sin', 'np.sin', (['(x + phase)'], {}), '(x + phase)\n', (3799, 3810), True, 'import jax.numpy as np\n'), ((3976, 4012), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'ypred'], {'label': 'f"""step{i}"""'}), "(x, ypred, label=f'step{i}')\n", (3984, 4012), True, 'import matplotlib.pyplot as plt\n'), ((4340, 4365), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xb[i]', 'yb[i]'], {}), '(xb[i], yb[i])\n', (4351, 4365), True, 'import matplotlib.pyplot as plt\n'), ((5493, 5510), 'jax.numpy.sin', 'np.sin', (['(x + phase)'], {}), '(x + phase)\n', (5499, 5510), True, 'import jax.numpy as np\n'), ((5717, 5753), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'ypred'], {'label': 'f"""step{i}"""'}), "(x, ypred, label=f'step{i}')\n", (5725, 5753), True, 'import matplotlib.pyplot as plt\n'), ((1563, 1572), 'jax.grad', 'grad', (['mse'], {}), '(mse)\n', (1567, 1572), False, 'from jax import random, vmap, jit, grad\n'), ((2111, 2120), 'jax.grad', 'grad', (['mse'], {}), '(mse)\n', (2115, 2120), False, 'from jax import random, vmap, jit, grad\n'), ((2557, 2572), 'jax.grad', 'grad', (['maml_loss'], {}), '(maml_loss)\n', (2561, 2572), False, 'from jax import random, vmap, jit, grad\n'), ((4223, 4234), 'jax.numpy.stack', 'np.stack', (['b'], {}), '(b)\n', (4231, 4234), True, 'import jax.numpy as np\n'), ((4647, 4668), 'jax.grad', 'grad', (['batch_maml_loss'], {}), '(batch_maml_loss)\n', (4651, 4668), False, 'from jax import random, vmap, jit, grad\n'), ((752, 770), 'numpy.sin', 'onp.sin', (['(x + phase)'], {}), '(x + phase)\n', (759, 770), True, 'import numpy as onp\n'), ((919, 941), 'numpy.linspace', 'onp.linspace', (['(-5)', '(5)', 'N'], {}), '(-5, 5, N)\n', (931, 941), True, 'import numpy as onp\n'), ((4444, 4470), 'functools.partial', 'partial', (['maml_loss', 'params'], {}), '(maml_loss, params)\n', (4451, 4470), False, 'from functools import partial\n'), ((5115, 5126), 'jax.numpy.stack', 'np.stack', (['b'], {}), '(b)\n', (5123, 5126), True, 'import jax.numpy as np\n')] |
'''
Author: <NAME>
Project: deeppop
Purpose: Model Builder - should have this component that stores all kinds of candidate models.
Improvements needed:
(/) - For VAE, try to bring custom loss with KL divergence in
'''
from keras import objectives
from keras.models import Model, Sequential
from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, \
Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM
from keras.regularizers import l1, l2
from keras import backend as K
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
from keras.utils import to_categorical
from chord.chord_generator import NUM_CLASSES
from models.keras_attention_wrapper import AttentionDecoder
from keras.optimizers import Adam
class ModelBuilder:
def __init__(self, X_train, Y_train, X_test, Y_test):
self.X_train = X_train
self.X_test = X_test
self.Y_train = Y_train
self.Y_test = Y_test
def build_seq2seq_model(self, num_encoder_tokens, num_decoder_tokens, latent_dim):
# Define an input sequence and process it.
encoder_inputs = Input(shape=(None, num_encoder_tokens))
encoder = LSTM(latent_dim, return_state=True)
encoder_outputs, state_h, state_c = encoder(encoder_inputs)
# We discard `encoder_outputs` and only keep the states.
encoder_states = [state_h, state_c]
# Set up the decoder, using `encoder_states` as initial state.
decoder_inputs = Input(shape=(None, num_decoder_tokens))
# We set up our decoder to return full output sequences,
# and to return internal states as well. We don't use the
# return states in the training model, but we will use them in inference.
decoder_lstm = LSTM(latent_dim, return_sequences=True, return_state=True)
decoder_outputs, _, _ = decoder_lstm(decoder_inputs,
initial_state=encoder_states)
decoder_dense = Dense(num_decoder_tokens, activation='softmax')
decoder_outputs = decoder_dense(decoder_outputs)
# Define the model that will turn
# `encoder_input_data` & `decoder_input_data` into `decoder_target_data`
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
return model
def build_stacked_autoencoder(self, input_dim, intermediate_dim):
'''
Build a stacked autoencoder.
:param input_dim: input dimension
:param intermediate_dim: eg. [512,256,128,256,512]
:return: model
'''
model = Sequential()
for i in range(len(intermediate_dim)):
if i == 0:
model.add(Dense(units=intermediate_dim[i],
activation='relu', input_shape=(input_dim,)))
else:
model.add(Dense(units=intermediate_dim[i],
activation='intermediate_dim'))
model.add(Dense(input_dim))
return model
def build_and_train_vae(self, input_dim, intermediate_dim, latent_dim,
epochs=200, batch_size=128):
# build vae, encoder and decoder
vae, encoder, generator, z_log_var, z_mean = self.build_vae_model(input_dim,
intermediate_dim, latent_dim)
vae.compile(optimizer='adam', loss=vae_loss_custom(z_log_var, z_mean), metrics=['accuracy'])
history = vae.fit(self.X_train, self.X_train,
shuffle=True,
epochs=epochs,
batch_size=batch_size,
validation_data=(self.X_test, self.X_test))
t(range(len(history.history['loss'])), history.history['loss'], label='train loss')
plt.show()
return vae, encoder, generator
def build_vae_model(self, input_dim, intermediate_dim, latent_dim):
inputs = Input(shape=(input_dim,), name='encoder_input')
x = Dense(intermediate_dim, activation='relu')(inputs)
z_mean = Dense(latent_dim, name='z_mean')(x)
z_log_var = Dense(latent_dim, name='z_log_var')(x)
# random sample
z = Lambda(sampling, name='z')([z_mean, z_log_var])
# build decoder model
decoder_h = Dense(intermediate_dim, activation='relu')
decoder_mean = Dense(input_dim)
h_decoded = decoder_h(z)
x_decoded_mean = decoder_mean(h_decoded)
print('input_dim', input_dim)
print(x_decoded_mean.shape)
# instantiate encoder model
encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder')
# instantiate decoder model (or also known as generator)
decoder_input = Input(shape=(latent_dim,))
_h_decoded = decoder_h(decoder_input)
_x_decoded_mean = decoder_mean(_h_decoded)
generator = Model(decoder_input, _x_decoded_mean)
# instantiate VAE model
vae = Model(inputs, x_decoded_mean, name='vae_mlp')
return vae, encoder, generator, z_log_var, z_mean
def build_basic_rnn_model(self, input_dim, output_dim=None, use_dropout=False):
'''
Build basic RNN model using LSTMs.
:param input_dim: input dimension, normally (100, 128 * 12)
:param use_dropout: whether to use dropout
:return: model
'''
if not output_dim:
output_dim = input_dim[-1]
model = Sequential()
# added reset after flag for CuDNN compatibility purpose
model.add(CuDNNLSTM(64, return_sequences=True, input_shape=input_dim))
model.add(CuDNNLSTM(128, return_sequences=True))
model.add(Dropout(0.8))
# model.add(TimeDistributed(Dense(input_dim[-2] * input_dim[-3])))
model.add(TimeDistributed(Dense(output_dim)))
model.add(Activation('softmax'))
return model
def build_bidirectional_rnn_model_no_embeddings(self, input_dim, output_dim=128):
model = Sequential()
model.add(Bidirectional(LSTM(64, bias_regularizer=l2(0.01), recurrent_regularizer=l2(0.01), return_sequences=True), input_shape=input_dim))
model.add(Dropout(0.4))
model.add(Bidirectional(LSTM(64, bias_regularizer=l2(0.01), recurrent_regularizer=l2(0.01), return_sequences=True)))
model.add(Dropout(0.4))
model.add(TimeDistributed(Dense(output_dim))) # 128 notes to output, multi-class
model.add(Activation('softmax'))
return model
def build_bidirectional_rnn_model(self, input_dim, output_dim=128):
'''
Build bidirectional RNN model using LSTMs.
:param input_dim: input dimension, normally (100, 128 * 12)
:return: model
'''
model = Sequential()
model.add(Embedding(NUM_CLASSES, 32, input_shape=input_dim)) # NUM_CLASSES is the total number of chord IDs
model.add(Bidirectional(LSTM(64, return_sequences=True), input_shape=input_dim))
model.add(Dropout(0.2))
model.add(Bidirectional(LSTM(128, return_sequences=True)))
model.add(Dropout(0.2))
model.add(TimeDistributed(Dense(output_dim))) # 128 notes to output, multi-class
model.add(Activation('softmax'))
return model
def build_attention_bidirectional_rnn_model(self, input_dim):
'''
Build attention bidirectional RNN model using LSTMs.
:param input_dim: input dimension, normally (100, 128 * 12)
:return: model
'''
encoder_input = Input(shape=input_dim)
encoder = Embedding(NUM_CLASSES, 32, input_shape=input_dim)(encoder_input)
encoder = Bidirectional(LSTM(64, return_sequences=True))(encoder)
encoder = Dropout(0.2)(encoder)
encoder = Bidirectional(LSTM(128, return_sequences=True))(encoder)
encoder = Dropout(0.2)(encoder)
decoder = Bidirectional(LSTM(128, return_sequences=True))(encoder)
attention = dot([decoder, encoder], axes=[2, 2])
attention = Activation('softmax', name='attention')(attention)
print('attention', attention)
context = dot([attention, encoder], axes=[2, 1])
print('context', context)
decoder_combined_context = concatenate([context, decoder])
print('decoder_combined_context', decoder_combined_context)
# Has another weight + tanh layer as described in equation (5) of the paper
# output = TimeDistributed(Dense(64, activation="tanh"))(decoder_combined_context)
output = TimeDistributed(Dense(128, activation="softmax"))(decoder_combined_context)
print('output', output)
print('decoder', decoder)
model = Model(inputs=[encoder_input], outputs=[output])
return model
def build_basic_conv2d_rnn_model(self, input_dim, use_dropout=False):
'''
Build basic Conv2d -> RNN model using LSTMs.
:param input_dim: input dimension, normally (100, 128 * 12)
:param use_dropout: whether to use dropout
:return: model
'''
print(input_dim)
model = Sequential()
model.add(TimeDistributed(Conv2D(32, kernel_size=(3, 3), padding='same'), input_shape=input_dim))
model.add(TimeDistributed(MaxPooling2D()))
model.add(TimeDistributed(Flatten()))
model.add(TimeDistributed(Dense(32)))
model.add(GRU(64, return_sequences=True, input_shape=input_dim))
model.add(LeakyReLU(alpha=0.3))
# model.add(LSTM(64, return_sequences=True))
if use_dropout:
model.add(Dropout(0.8))
model.add(TimeDistributed(Dense(input_dim[-2] * input_dim[-3])))
# model.add(TimeDistributed(Dense(input_dim[-1])))
model.add(Activation('sigmoid'))
return model
def build_basic_cnn_model(self, input_dim, use_dropout=False):
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), padding='same'), input_shape=input_dim)
model.add(MaxPooling2D())
model.add(Conv2D(64, kernel_size=(5, 5), padding='same'))
model.add(MaxPooling2D())
model.add(Flatten())
model.add(TimeDistributed(Dense(32)))
def train_model(self, model, epochs, loss='mean_squared_error'):
print(model.summary())
loss_metrics_dict = {
"mean_squared_error": ['accuracy'],
"binary_crossentropy": ['binary_accuracy'],
"categorical_crossentropy": ['categorical_accuracy']
}
optimizer = Adam(clipnorm=1.0)
model.compile(loss=loss, optimizer=optimizer, metrics=loss_metrics_dict[loss])
history = model.fit(self.X_train, self.Y_train, epochs=epochs, validation_data=(self.X_test, self.Y_test))
scores = model.evaluate(self.X_train, self.Y_train, verbose=True)
print('Train loss:', scores[0])
print('Train accuracy:', scores[1])
scores_2 = model.evaluate(self.X_test, self.Y_test, verbose=True)
print('Test loss:', scores_2[0])
print('Test accuracy:', scores_2[1])
plt.plot(range(len(history.history['loss'])), history.history['loss'], label='train loss')
plt.plot(range(len(history.history['val_loss'])), history.history['val_loss'], label='validation loss')
plt.savefig('loss_train_test.png')
open('train_test_accuracy.txt', 'w+').write(
'Train acc: {} Test acc: {} Train_loss: {} Test_loss: {}'.format(scores[1],
scores_2[1],
scores[0],
scores_2[0]))
return model
def train_with_generator(self, model, epochs, loss='mean_squared_error'):
# generate embeddings and one-hot on the fly
from utils import convert_chord_indices_to_embeddings
def generate_training_data():
while 1:
for i in range(int(len(self.X_train) * 0.9), len(self.X_train)):
input_chord, output_note = self.X_test[i], self.Y_test[i]
input_chord = np.array(convert_chord_indices_to_embeddings(input_chord))
output_note = to_categorical(output_note, num_classes=128)
yield (np.expand_dims(input_chord, axis=0),
np.expand_dims(output_note, axis=0))
def generate_validation_data():
while 1:
for i in range(int(len(self.X_train) * 0.9), len(self.X_train)):
input_chord, output_note = self.X_test[i], self.Y_test[i]
input_chord = np.array(convert_chord_indices_to_embeddings(input_chord))
output_note = to_categorical(output_note, num_classes=128)
yield (np.expand_dims(input_chord, axis=0),
np.expand_dims(output_note, axis=0))
print("Train with generator...")
print(model.summary())
loss_metrics_dict = {
"mean_squared_error": ['accuracy'],
"binary_crossentropy": ['binary_accuracy'],
"categorical_crossentropy": ['categorical_accuracy']
}
optimizer = Adam(clipnorm=1.0)
model.compile(loss=loss, optimizer=optimizer, metrics=loss_metrics_dict[loss])
history = model.fit_generator(generate_training_data(),
validation_data=generate_validation_data(),
validation_steps=1,
steps_per_epoch=1458, epochs=epochs)
scores = model.evaluate(self.X_train, self.Y_train, verbose=True)
print('Train loss:', scores[0])
print('Train accuracy:', scores[1])
scores_2 = model.evaluate(self.X_test, self.Y_test, verbose=True)
print('Test loss:', scores_2[0])
print('Test accuracy:', scores_2[1])
plt.plot(range(len(history.history['loss'])), history.history['loss'], label='train loss')
plt.plot(range(len(history.history['val_loss'])), history.history['val_loss'], label='validation loss')
plt.savefig('loss_train_test.png')
open('train_test_accuracy.txt', 'w+').write(
'Train acc: {} Test acc: {} Train_loss: {} Test_loss: {}'.format(scores[1],
scores_2[1],
scores[0],
scores_2[0]))
return model
def sampling(args):
"""
Reparameterization trick by sampling fr an isotropic unit Gaussian.
instead of sampling from Q(z|X), sample eps = N(0,I)
# z = z_mean + sqrt(var)*eps
# Arguments:
args (tensor): mean and log of variance of Q(z|X)
# Returns:
z (tensor): sampled latent vector
"""
z_mean, z_log_var = args
batch = K.shape(z_mean)[0]
dim = K.int_shape(z_mean)[1]
# by default, random_normal has mean=0 and std=1.0
epsilon = K.random_normal(shape=(batch, dim))
return z_mean + K.exp(0.5 * z_log_var) * epsilon
def vae_loss_custom(z_log_var, z_mean):
def vae_loss(x, x_decoded_mean):
print('shape', x.shape, x_decoded_mean.shape)
mse_loss = objectives.mean_squared_error(x, x_decoded_mean)
kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
return mse_loss + kl_loss
return vae_loss
if __name__ == "__main__":
a = ModelBuilder()
| [
"keras.layers.Conv2D",
"keras.backend.shape",
"keras.utils.to_categorical",
"keras.layers.Activation",
"keras.layers.Dense",
"keras.objectives.mean_squared_error",
"keras.backend.square",
"keras.layers.LSTM",
"keras.layers.concatenate",
"keras.models.Model",
"keras.backend.exp",
"keras.optimiz... | [((15235, 15270), 'keras.backend.random_normal', 'K.random_normal', ([], {'shape': '(batch, dim)'}), '(shape=(batch, dim))\n', (15250, 15270), True, 'from keras import backend as K\n'), ((1213, 1252), 'keras.layers.Input', 'Input', ([], {'shape': '(None, num_encoder_tokens)'}), '(shape=(None, num_encoder_tokens))\n', (1218, 1252), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((1271, 1306), 'keras.layers.LSTM', 'LSTM', (['latent_dim'], {'return_state': '(True)'}), '(latent_dim, return_state=True)\n', (1275, 1306), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((1582, 1621), 'keras.layers.Input', 'Input', ([], {'shape': '(None, num_decoder_tokens)'}), '(shape=(None, num_decoder_tokens))\n', (1587, 1621), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((1858, 1916), 'keras.layers.LSTM', 'LSTM', (['latent_dim'], {'return_sequences': '(True)', 'return_state': '(True)'}), '(latent_dim, return_sequences=True, return_state=True)\n', (1862, 1916), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((2077, 2124), 'keras.layers.Dense', 'Dense', (['num_decoder_tokens'], {'activation': '"""softmax"""'}), "(num_decoder_tokens, activation='softmax')\n", (2082, 2124), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((2322, 2378), 'keras.models.Model', 'Model', (['[encoder_inputs, decoder_inputs]', 'decoder_outputs'], {}), '([encoder_inputs, decoder_inputs], decoder_outputs)\n', (2327, 2378), False, 'from keras.models import Model, Sequential\n'), ((2672, 2684), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2682, 2684), False, 'from keras.models import Model, Sequential\n'), ((3880, 3890), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3888, 3890), True, 'import matplotlib.pyplot as plt\n'), ((4021, 4068), 'keras.layers.Input', 'Input', ([], {'shape': '(input_dim,)', 'name': '"""encoder_input"""'}), "(shape=(input_dim,), name='encoder_input')\n", (4026, 4068), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((4380, 4422), 'keras.layers.Dense', 'Dense', (['intermediate_dim'], {'activation': '"""relu"""'}), "(intermediate_dim, activation='relu')\n", (4385, 4422), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((4446, 4462), 'keras.layers.Dense', 'Dense', (['input_dim'], {}), '(input_dim)\n', (4451, 4462), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((4674, 4727), 'keras.models.Model', 'Model', (['inputs', '[z_mean, z_log_var, z]'], {'name': '"""encoder"""'}), "(inputs, [z_mean, z_log_var, z], name='encoder')\n", (4679, 4727), False, 'from keras.models import Model, Sequential\n'), ((4818, 4844), 'keras.layers.Input', 'Input', ([], {'shape': '(latent_dim,)'}), '(shape=(latent_dim,))\n', (4823, 4844), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((4962, 4999), 'keras.models.Model', 'Model', (['decoder_input', '_x_decoded_mean'], {}), '(decoder_input, _x_decoded_mean)\n', (4967, 4999), False, 'from keras.models import Model, Sequential\n'), ((5047, 5092), 'keras.models.Model', 'Model', (['inputs', 'x_decoded_mean'], {'name': '"""vae_mlp"""'}), "(inputs, x_decoded_mean, name='vae_mlp')\n", (5052, 5092), False, 'from keras.models import Model, Sequential\n'), ((5528, 5540), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (5538, 5540), False, 'from keras.models import Model, Sequential\n'), ((6068, 6080), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (6078, 6080), False, 'from keras.models import Model, Sequential\n'), ((6825, 6837), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (6835, 6837), False, 'from keras.models import Model, Sequential\n'), ((7613, 7635), 'keras.layers.Input', 'Input', ([], {'shape': 'input_dim'}), '(shape=input_dim)\n', (7618, 7635), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((8045, 8081), 'keras.layers.dot', 'dot', (['[decoder, encoder]'], {'axes': '[2, 2]'}), '([decoder, encoder], axes=[2, 2])\n', (8048, 8081), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((8210, 8248), 'keras.layers.dot', 'dot', (['[attention, encoder]'], {'axes': '[2, 1]'}), '([attention, encoder], axes=[2, 1])\n', (8213, 8248), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((8319, 8350), 'keras.layers.concatenate', 'concatenate', (['[context, decoder]'], {}), '([context, decoder])\n', (8330, 8350), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((8771, 8818), 'keras.models.Model', 'Model', ([], {'inputs': '[encoder_input]', 'outputs': '[output]'}), '(inputs=[encoder_input], outputs=[output])\n', (8776, 8818), False, 'from keras.models import Model, Sequential\n'), ((9175, 9187), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (9185, 9187), False, 'from keras.models import Model, Sequential\n'), ((9943, 9955), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (9953, 9955), False, 'from keras.models import Model, Sequential\n'), ((10585, 10603), 'keras.optimizers.Adam', 'Adam', ([], {'clipnorm': '(1.0)'}), '(clipnorm=1.0)\n', (10589, 10603), False, 'from keras.optimizers import Adam\n'), ((11346, 11380), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""loss_train_test.png"""'], {}), "('loss_train_test.png')\n", (11357, 11380), True, 'import matplotlib.pyplot as plt\n'), ((13350, 13368), 'keras.optimizers.Adam', 'Adam', ([], {'clipnorm': '(1.0)'}), '(clipnorm=1.0)\n', (13354, 13368), False, 'from keras.optimizers import Adam\n'), ((14274, 14308), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""loss_train_test.png"""'], {}), "('loss_train_test.png')\n", (14285, 14308), True, 'import matplotlib.pyplot as plt\n'), ((15114, 15129), 'keras.backend.shape', 'K.shape', (['z_mean'], {}), '(z_mean)\n', (15121, 15129), True, 'from keras import backend as K\n'), ((15143, 15162), 'keras.backend.int_shape', 'K.int_shape', (['z_mean'], {}), '(z_mean)\n', (15154, 15162), True, 'from keras import backend as K\n'), ((15476, 15524), 'keras.objectives.mean_squared_error', 'objectives.mean_squared_error', (['x', 'x_decoded_mean'], {}), '(x, x_decoded_mean)\n', (15505, 15524), False, 'from keras import objectives\n'), ((3051, 3067), 'keras.layers.Dense', 'Dense', (['input_dim'], {}), '(input_dim)\n', (3056, 3067), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((4081, 4123), 'keras.layers.Dense', 'Dense', (['intermediate_dim'], {'activation': '"""relu"""'}), "(intermediate_dim, activation='relu')\n", (4086, 4123), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((4149, 4181), 'keras.layers.Dense', 'Dense', (['latent_dim'], {'name': '"""z_mean"""'}), "(latent_dim, name='z_mean')\n", (4154, 4181), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((4205, 4240), 'keras.layers.Dense', 'Dense', (['latent_dim'], {'name': '"""z_log_var"""'}), "(latent_dim, name='z_log_var')\n", (4210, 4240), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((4281, 4307), 'keras.layers.Lambda', 'Lambda', (['sampling'], {'name': '"""z"""'}), "(sampling, name='z')\n", (4287, 4307), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((5624, 5683), 'keras.layers.CuDNNLSTM', 'CuDNNLSTM', (['(64)'], {'return_sequences': '(True)', 'input_shape': 'input_dim'}), '(64, return_sequences=True, input_shape=input_dim)\n', (5633, 5683), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((5703, 5740), 'keras.layers.CuDNNLSTM', 'CuDNNLSTM', (['(128)'], {'return_sequences': '(True)'}), '(128, return_sequences=True)\n', (5712, 5740), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((5760, 5772), 'keras.layers.Dropout', 'Dropout', (['(0.8)'], {}), '(0.8)\n', (5767, 5772), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((5921, 5942), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (5931, 5942), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((6247, 6259), 'keras.layers.Dropout', 'Dropout', (['(0.4)'], {}), '(0.4)\n', (6254, 6259), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((6404, 6416), 'keras.layers.Dropout', 'Dropout', (['(0.4)'], {}), '(0.4)\n', (6411, 6416), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((6526, 6547), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (6536, 6547), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((6856, 6905), 'keras.layers.Embedding', 'Embedding', (['NUM_CLASSES', '(32)'], {'input_shape': 'input_dim'}), '(NUM_CLASSES, 32, input_shape=input_dim)\n', (6865, 6905), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((7065, 7077), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (7072, 7077), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((7164, 7176), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (7171, 7176), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((7302, 7323), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (7312, 7323), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((7655, 7704), 'keras.layers.Embedding', 'Embedding', (['NUM_CLASSES', '(32)'], {'input_shape': 'input_dim'}), '(NUM_CLASSES, 32, input_shape=input_dim)\n', (7664, 7704), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((7812, 7824), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (7819, 7824), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((7927, 7939), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (7934, 7939), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((8102, 8141), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {'name': '"""attention"""'}), "('softmax', name='attention')\n", (8112, 8141), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((9457, 9510), 'keras.layers.GRU', 'GRU', (['(64)'], {'return_sequences': '(True)', 'input_shape': 'input_dim'}), '(64, return_sequences=True, input_shape=input_dim)\n', (9460, 9510), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((9530, 9550), 'keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.3)'}), '(alpha=0.3)\n', (9539, 9550), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((9815, 9836), 'keras.layers.Activation', 'Activation', (['"""sigmoid"""'], {}), "('sigmoid')\n", (9825, 9836), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((9975, 10021), 'keras.layers.Conv2D', 'Conv2D', (['(32)'], {'kernel_size': '(3, 3)', 'padding': '"""same"""'}), "(32, kernel_size=(3, 3), padding='same')\n", (9981, 10021), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((10064, 10078), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {}), '()\n', (10076, 10078), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((10098, 10144), 'keras.layers.Conv2D', 'Conv2D', (['(64)'], {'kernel_size': '(5, 5)', 'padding': '"""same"""'}), "(64, kernel_size=(5, 5), padding='same')\n", (10104, 10144), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((10164, 10178), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {}), '()\n', (10176, 10178), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((10198, 10207), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (10205, 10207), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((15291, 15313), 'keras.backend.exp', 'K.exp', (['(0.5 * z_log_var)'], {}), '(0.5 * z_log_var)\n', (15296, 15313), True, 'from keras import backend as K\n'), ((5883, 5900), 'keras.layers.Dense', 'Dense', (['output_dim'], {}), '(output_dim)\n', (5888, 5900), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((6452, 6469), 'keras.layers.Dense', 'Dense', (['output_dim'], {}), '(output_dim)\n', (6457, 6469), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((6990, 7021), 'keras.layers.LSTM', 'LSTM', (['(64)'], {'return_sequences': '(True)'}), '(64, return_sequences=True)\n', (6994, 7021), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((7111, 7143), 'keras.layers.LSTM', 'LSTM', (['(128)'], {'return_sequences': '(True)'}), '(128, return_sequences=True)\n', (7115, 7143), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((7212, 7229), 'keras.layers.Dense', 'Dense', (['output_dim'], {}), '(output_dim)\n', (7217, 7229), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((7752, 7783), 'keras.layers.LSTM', 'LSTM', (['(64)'], {'return_sequences': '(True)'}), '(64, return_sequences=True)\n', (7756, 7783), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((7866, 7898), 'keras.layers.LSTM', 'LSTM', (['(128)'], {'return_sequences': '(True)'}), '(128, return_sequences=True)\n', (7870, 7898), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((7982, 8014), 'keras.layers.LSTM', 'LSTM', (['(128)'], {'return_sequences': '(True)'}), '(128, return_sequences=True)\n', (7986, 8014), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((8628, 8660), 'keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""softmax"""'}), "(128, activation='softmax')\n", (8633, 8660), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((9223, 9269), 'keras.layers.Conv2D', 'Conv2D', (['(32)'], {'kernel_size': '(3, 3)', 'padding': '"""same"""'}), "(32, kernel_size=(3, 3), padding='same')\n", (9229, 9269), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((9329, 9343), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {}), '()\n', (9341, 9343), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((9380, 9389), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (9387, 9389), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((9426, 9435), 'keras.layers.Dense', 'Dense', (['(32)'], {}), '(32)\n', (9431, 9435), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((9651, 9663), 'keras.layers.Dropout', 'Dropout', (['(0.8)'], {}), '(0.8)\n', (9658, 9663), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((9699, 9735), 'keras.layers.Dense', 'Dense', (['(input_dim[-2] * input_dim[-3])'], {}), '(input_dim[-2] * input_dim[-3])\n', (9704, 9735), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((10243, 10252), 'keras.layers.Dense', 'Dense', (['(32)'], {}), '(32)\n', (10248, 10252), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((2781, 2858), 'keras.layers.Dense', 'Dense', ([], {'units': 'intermediate_dim[i]', 'activation': '"""relu"""', 'input_shape': '(input_dim,)'}), "(units=intermediate_dim[i], activation='relu', input_shape=(input_dim,))\n", (2786, 2858), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((2936, 2999), 'keras.layers.Dense', 'Dense', ([], {'units': 'intermediate_dim[i]', 'activation': '"""intermediate_dim"""'}), "(units=intermediate_dim[i], activation='intermediate_dim')\n", (2941, 2999), False, 'from keras.layers import Input, LSTM, Dense, Lambda, Dropout, TimeDistributed, Activation, Conv2D, MaxPooling2D, Flatten, Convolution2D, GRU, LeakyReLU, CuDNNGRU, Embedding, Bidirectional, dot, concatenate, CuDNNLSTM\n'), ((12354, 12398), 'keras.utils.to_categorical', 'to_categorical', (['output_note'], {'num_classes': '(128)'}), '(output_note, num_classes=128)\n', (12368, 12398), False, 'from keras.utils import to_categorical\n'), ((12875, 12919), 'keras.utils.to_categorical', 'to_categorical', (['output_note'], {'num_classes': '(128)'}), '(output_note, num_classes=128)\n', (12889, 12919), False, 'from keras.utils import to_categorical\n'), ((15593, 15609), 'keras.backend.exp', 'K.exp', (['z_log_var'], {}), '(z_log_var)\n', (15598, 15609), True, 'from keras import backend as K\n'), ((6139, 6147), 'keras.regularizers.l2', 'l2', (['(0.01)'], {}), '(0.01)\n', (6141, 6147), False, 'from keras.regularizers import l1, l2\n'), ((6171, 6179), 'keras.regularizers.l2', 'l2', (['(0.01)'], {}), '(0.01)\n', (6173, 6179), False, 'from keras.regularizers import l1, l2\n'), ((6319, 6327), 'keras.regularizers.l2', 'l2', (['(0.01)'], {}), '(0.01)\n', (6321, 6327), False, 'from keras.regularizers import l1, l2\n'), ((6351, 6359), 'keras.regularizers.l2', 'l2', (['(0.01)'], {}), '(0.01)\n', (6353, 6359), False, 'from keras.regularizers import l1, l2\n'), ((12270, 12318), 'utils.convert_chord_indices_to_embeddings', 'convert_chord_indices_to_embeddings', (['input_chord'], {}), '(input_chord)\n', (12305, 12318), False, 'from utils import convert_chord_indices_to_embeddings\n'), ((12791, 12839), 'utils.convert_chord_indices_to_embeddings', 'convert_chord_indices_to_embeddings', (['input_chord'], {}), '(input_chord)\n', (12826, 12839), False, 'from utils import convert_chord_indices_to_embeddings\n'), ((15574, 15590), 'keras.backend.square', 'K.square', (['z_mean'], {}), '(z_mean)\n', (15582, 15590), True, 'from keras import backend as K\n'), ((12426, 12461), 'numpy.expand_dims', 'np.expand_dims', (['input_chord'], {'axis': '(0)'}), '(input_chord, axis=0)\n', (12440, 12461), True, 'import numpy as np\n'), ((12490, 12525), 'numpy.expand_dims', 'np.expand_dims', (['output_note'], {'axis': '(0)'}), '(output_note, axis=0)\n', (12504, 12525), True, 'import numpy as np\n'), ((12947, 12982), 'numpy.expand_dims', 'np.expand_dims', (['input_chord'], {'axis': '(0)'}), '(input_chord, axis=0)\n', (12961, 12982), True, 'import numpy as np\n'), ((13011, 13046), 'numpy.expand_dims', 'np.expand_dims', (['output_note'], {'axis': '(0)'}), '(output_note, axis=0)\n', (13025, 13046), True, 'import numpy as np\n')] |
# Array puzzles
def test_remove_duplicates_sorted():
ns = [1,2,2,3,1,1,2,3,4,1,2,4,5]
def remove_dup(vs):
out = []
def add_to(v, ws):
if len(ws)==0:
return [v]
elif v<ws[0]:
return [v]+ws
elif v>ws[0]:
return [ws[0]] + add_to(v,ws[1:])
else:
return ws
for v in vs:
out = add_to(v,out)
return out
assert remove_dup(ns) == [1,2,3,4,5]
def test_median_one_pass():
ns = [4,3,1,5,3,1,2,5,3,1,5]
def median(vs):
def add_sorted(v, ws):
if len(ws)==0:
return [v]
elif v<=ws[0]:
return [v]+ws
else:
return [ws[0]]+add_sorted(v,ws[1:])
sorted_vs = []
i = 0
m = 0
for v in vs:
sorted_vs = add_sorted(v, sorted_vs)
# median index steps next every even index
if i>0 and i%2==0:
m += 1
i += 1
return sorted_vs[m]
assert median(ns) == 3
def test_trap_rain_water():
# REF: https://leetcode.com/problems/trapping-rain-water/
def eval(blocks):
sum_vol = 0
h = 0
i = 0
for b in blocks:
if b>h:
# Perhaps begin of left bound
h = b
elif b<h:
# Concave!
vol, next_blocks = project_to_right(h, blocks[i:])
if vol>0:
# concave has right boundary
# Break and start a new scan from next right bound
sum_vol += vol
sum_vol += eval(next_blocks)
return sum_vol
else:
# concave has no right boundary, startover
h = b
i += 1
return sum_vol
def project_to_right(h, blocks):
vol = 0
has_end = False
i = 0
next_blocks = blocks[:]
for b in blocks:
if b>=h:
# Find right boundary
return vol, next_blocks
else:
vol += h-b
next_blocks = next_blocks[1:]
return 0, [] # No right boundary
assert eval([0,1,0,2,1,0,1,3,2,1,2,1]) == 6
assert eval([4,2,0,3,2,5]) == 9
def test_first_missing_positives():
# REF: https://leetcode.com/problems/first-missing-positive/
def add_sorted(v, ws):
if len(ws)==0:
return [v]
elif v<=ws[0]:
return [v]+ws
else:
return [ws[0]]+add_sorted(v,ws[1:])
def find_it(ns):
sorted_ns = []
for n in ns:
if n>0:
sorted_ns = add_sorted(n, sorted_ns)
expect = 1
for n in sorted_ns:
if n!=expect:
return expect
else:
expect += 1
return n+1
assert find_it([1,2,0]) == 3
assert find_it([3,4,-1,1]) == 2
assert find_it([7,8,9,11,12]) == 1
def test_nested_flatten():
def flatten(vs):
ws = []
for v in vs:
if type(v) is list:
ws = ws + flatten(v)
else:
ws.append(v)
return ws
assert flatten([1,2,3]) == [1,2,3]
assert flatten([[1,2,[3]],[4]]) == [1,2,3,4]
assert flatten([[],[1,[2,3,[4]]],5]) == [1,2,3,4,5]
def test_spiral_matrix():
# REF: https://leetcode.com/problems/spiral-matrix/
def spiral_walk(mat):
# Create walk steps
steps = create_walk_steps(len(mat), len(mat[0]))
return [mat[row][col] for (row,col) in steps]
def create_walk_steps(rows, cols):
steps = []
row = 0
col = -1
row0, col0 = 0, 0
rowM, colM = rows-1, cols-1
dd = [[0,1],[1,0],[0,-1],[-1,0]] # row, col
while len(steps)<rows*cols:
d = dd[0]
row_ = row + d[0]
col_ = col + d[1]
# walk until hit boundary
while row_ >= row0 and row_ <= rowM and col_ >= col0 and col_ <= colM:
steps.append((row_, col_))
row = row_
col = col_
row_ = row + d[0]
col_ = col + d[1]
# change direction
dd = dd[1:] + dd[:1]
# truncate boundary
if d==[0,1]:
row0 += 1
if d==[1,0]:
colM -= 1
if d==[0,-1]:
rowM -= 1
if d==[-1,0]:
col0 += 1
return steps
assert spiral_walk([[1,2,3,4],[5,6,7,8],[9,10,11,12]]) == [1,2,3,4,8,12,11,10,9,5,6,7]
assert spiral_walk( [[1,2,3],[4,5,6],[7,8,9]]) == [1,2,3,6,9,8,7,4,5]
def test_reverse_make_equal():
# REF: https://www.facebookrecruiting.com/portal/coding_practice_question/?problem_id=2869293499822992
def are_they_equal(array_a, array_b):
# Find the first & last position that they're not equal
min_index = -1
max_index = -1
first_missing = None
for a,b,i in zip(array_a, array_b, range(len(array_a))):
if a != b:
if min_index < 0:
min_index = i
first_missing = a
if min_index>0 and b == first_missing:
max_index = i
break
if max_index < 0:
max_index = len(array_a)
if min_index > 0:
# try swapping subarray
array_b[min_index:max_index+1] = array_b[min_index:max_index+1][::-1]
if array_a == array_b:
return True
return False
assert are_they_equal([1, 2, 3, 4], [1, 4, 3, 2]) == True
def test_minimum_path_sum_triangle_array():
# REF: https://leetcode.com/problems/triangle/
# Input: triangle = [[2],[3,4],[6,5,7],[4,1,8,3]]
# 2
# 3 4
# 6 5 7
# 4 1 8 3
# The minimum path sum from top to bottom is 2 + 3 + 5 + 1 = 11
def min_path_sum(tri):
path_sum = 0
for line in tri:
path_sum += min(line)
return path_sum
assert min_path_sum([[2],[3,4],[6,5,7],[4,1,8,3]]) == 11
assert min_path_sum([[-10]]) == -10
def test_num_pair_sum():
# REF: https://www.facebookrecruiting.com/portal/coding_practice_question/?problem_id=840934449713537
def numberOfWays(arr, k):
# Write your code here
return len(pair(arr, k))
def pair(arr, k):
pp = []
for i,a in enumerate(arr):
tail = arr[i+1:]
for j, b in enumerate(tail):
if a+b==k:
pp.append([a,b])
return pp
assert pair([1,2,3,4], 5) == [[1,4],[2,3]]
assert numberOfWays([1,2,3,4], 5) == 2
def test_combination():
def gen_comb(arr, k):
# Generate combination of [k] length
return comb(arr, k, [])
def comb(arr, k, prefix):
if len(prefix)==k:
return [prefix]
out = []
for i,a in enumerate(arr):
cand = prefix + [a]
tail = arr[i+1:]
for c in comb(tail, k, cand):
out.append(c)
return out
assert gen_comb([1,2,3], 2) == [[1,2],[1,3],[2,3]]
assert gen_comb([1,2,3,4], 3) == [[1,2,3],[1,2,4],[1,3,4],[2,3,4]]
def test_sum_combination():
# Find subarrays which sum up to make a number
# less than or equal the threshold
def find_comb(arr, k):
combs = expand_comb(arr, k)
return combs
def expand_comb(arr, k):
comb = []
for i, a in enumerate(arr):
if a==k:
# Found the last element of the combination
comb.append([a])
continue
tail = arr[i+1:]
# take one element from tail
for j,b in enumerate(tail):
if a+b <= k:
cand = [a,b]
comb.append([a,b])
# recursion
for c in find_comb(tail[j+1:], k-a-b):
comb.append([a,b] + c)
return comb
assert find_comb([1,2,3,4], 5) == [[1,2],[1,3],[1,4],[2,3]]
assert find_comb([1,2,3,1], 4) == [[1,2],[1,2,1],[1,3],[1,1],[2,1],[3,1]]
def test_lego_blocks():
# Given a list of 3x3 lego blocks, find matches (may need to rotate)
def find_matches(blocks):
matches = []
for i,b in enumerate(blocks):
rotatedB = gen_rotates(b)
assert(len(rotatedB)==4)
for j, c in enumerate(blocks[i+1:]):
rotatedC = gen_rotates(c)
assert(len(rotatedC)==4)
found_match = False
for rb in rotatedB:
for rc in rotatedC:
if fit(rb, rc):
matches.append([i, i+j+1])
found_match = True
break
if found_match:
break
return matches
def fit(a,b):
for i in range(len(a)):
for j in range(len(a)):
if a[i][j] + b[i][j] != 1:
return False
return True
def gen_rotates(b):
# Rotate clockwise by 90*
# (0,0) ---> (N,0) (1,0) (0,0)
# (1,0)
# (N,0)
rot = [b]
w = b[:]
N = len(b)
for n in range(3):
ww = []
for i in range(N):
row = [w[N-1-j][i] for j in range(N)]
ww.append(row)
rot.append(ww)
w = ww[:]
return rot
blocks1 = [
[[1,1,0],[1,1,0],[1,1,0]],
[[0,1,0],[0,1,0],[0,1,0]],
[[1,1,1],[0,0,0],[1,1,1]],
[[0,0,0],[0,1,0],[0,0,0]]
]
assert find_matches(blocks1) == [[1,2]]
blocks2 = [
[[1,1,0],[1,1,0],[1,1,0]],
[[0,1,0],[0,1,0],[0,1,0]],
[[1,1,1],[0,0,0],[1,1,1]],
[[0,0,0],[0,1,0],[0,0,0]],
[[1,1,1],[0,0,0],[0,0,0]]
]
assert find_matches(blocks2) == [[0,4],[1,2]]
def test_rotate_submatrix():
# Given big matrix A: NxN
# how many submatrices we can rotate to make them
# identical to the goal matrix?
def count_rotate_submat(M, G):
matsize = len(M)
gsize = len(G)
# Assume both square
n = 0
# gen rotates of G
Grotates = gen_rotates(G)
assert(len(Grotates)==4)
# sliding window
for i in range(matsize-gsize+1):
for j in range(matsize-gsize+1):
sub = [[M[i+b][j+a] for a in range(gsize)] for b in range(gsize)]
# check equality
for R in Grotates:
if eq(sub, R):
n += 1
break
return n
def eq(A,B):
for i in range(len(A)):
for j in range(len(A)):
if A[i][j] != B[i][j]:
return False
return True
def gen_rotates(M):
rot = [M]
W = M[:]
for i in range(0, 3):
# Rotate W by 90* clockwise
# 10 => 30 20 10
# 20
# 30
R = []
for i in range(len(M)):
row = [W[len(M)-1-j][i] for j in range(len(M))]
R.append(row)
rot.append(R)
W = R[:]
return rot
# Test1
A1 = [
[1,1,1,1,1],
[1,1,2,1,1],
[1,1,1,1,1],
[1,1,1,1,1],
[1,1,1,1,1]
]
G1 = [
[0, 0],
[0, 1]
]
#assert(count_rotate_submat(A1, G1)) == 0
# Test2
A2 = [
[1,1,0,1,1],
[0,1,2,1,0],
[0,0,0,0,0],
[0,1,1,0,1],
[1,1,1,1,1]
]
G2 = [
[0, 0],
[0, 1]
]
assert(count_rotate_submat(A2, G2)) == 5
def test_seating_arrangement():
# REF: https://www.facebookrecruiting.com/portal/coding_practice_question/?problem_id=2444722699191194
def minOverallAwkwardness(arr):
# sorted: 1 2 3 4
# take every other element 1 3
# append with reverse of the remaining 1 3 + 4 2
sarr = sorted(arr)
minarr = []
remain = []
for i, a in enumerate(sarr):
if i%2 == 0:
minarr.append(a)
else:
remain = [a] + remain
marr = minarr + remain + [minarr[0]]
print(marr)
awk = 0
for a,b in zip(marr, marr[1:]):
awk = max(awk, abs(a-b))
return awk
arr_1 = [5, 10, 6, 8]
expected_1 = 4
output_1 = minOverallAwkwardness(arr_1)
assert expected_1 == output_1
arr_2 = [1, 2, 5, 3, 7]
expected_2 = 4
output_2 = minOverallAwkwardness(arr_2)
assert expected_2 == output_2
def test_quick_sort():
def qsort(arr):
return qsort_(arr, low=0, high=len(arr)-1)
def qsort_(arr, low, high):
if low < high:
pindex = partition(arr, low, high)
qsort_(arr, low, pindex-1) # left partition of pivot
qsort_(arr, pindex+1, high) # right partition of pivot
return arr
def partition(arr, low, high):
i = low-1
pivot = arr[high]
for n in range(low, high):
if arr[n] <= pivot:
i += 1 # step element for swapping
arr[n], arr[i] = arr[i], arr[n] # swap element smaller than pivot
# dont forget to swap the last with pivot itself
arr[i+1], arr[high] = arr[high], arr[i+1]
return i+1
assert qsort([1,2,3]) == [1,2,3]
assert qsort([4,3,1,5]) == [1,3,4,5]
assert qsort([1,1,5,3]) == [1,1,3,5]
def test_minimum_bound_rect():
"""
Find area of minimum rectable covering all "1" in the matrix
"""
"""
0 0 0 0
0 1 0 0
0 0 1 0
0 1 1 1
"""
def min_rect(mat):
W, H = len(mat[0]), len(mat)
minx, maxx = W-1, W-1
miny, maxy = H-1, H-1
# identify top-left
for x in range(W):
for y in range(miny+1):
if mat[y][x]==1:
miny = y if y<miny else miny
minx = x if x<minx else minx
if x==y==0:
break
# identify bottom-right
for x in range(W-1, minx-1, -1):
for y in range(H-1, miny-1, -1):
if mat[y][x]==1:
maxy = y if y>maxy else maxy
maxx = x if x>maxx else maxx
if x==W-1 and y==H-1:
break
print(f'TL = {minx}, {miny}')
print(f'BR = {maxx}, {maxy}')
area = (maxy-miny+1) * (maxx-minx+1)
return area
R1 = [[1,0],[0,1]]
assert(min_rect(R1)) == 4
R2 = [[0,0,0,0],[0,1,0,1],[0,0,1,0],[0,1,1,0]]
assert(min_rect(R2)) == 9
R3 = [[0,0,0,0],[0,0,1,0],[0,0,1,0],[0,1,0,1],[0,0,0,1]]
assert(min_rect(R3)) == 12
def test_steepest():
"""
Find the maximum steep of the terrain.
Considering 8 directions around cell
"""
def maxsteep(mat):
from heapq import heappush, heappop
# calculate cell gradient of the whole mat
H, W = len(mat), len(mat[0])
G = []
for y in range(len(mat)):
for x in range(len(mat[0])):
# only calculate "*" neighbours, as x were already visited
# x x *
# x *
# * * *
dd = [[-1,1],[0,1],[1,1],[1,0],[1,-1]] # y,x
for dy,dx in dd:
if 0<=x+dx<W and 0<=y+dy<H:
diff = abs(mat[y][x] - mat[y+dy][x+dx])
heappush(G, -diff)
return -heappop(G)
R1 = [[0,0,0],
[0,1,0],
[0,1,0]]
assert maxsteep(R1) == 1
R2 = [[0,0,0,0],
[0,1,2,0],
[0,1,1,0],
[0,1,0,0]]
assert maxsteep(R2) == 2
R3 = [[0,1,1,1,0],
[0,1,2,1,1],
[0,1,3,2,1],
[0,1,1,1,0]]
assert maxsteep(R2) == 2
def test_find_min_rotate_sorted_array():
# REF: https://leetcode.com/problems/find-minimum-in-rotated-sorted-array-ii/
def min_rotate(arr):
# find first element such that arr[n] > arr[n+1]
arr.append(arr[0])
for i in range(len(arr)-1):
if arr[i]>arr[i+1]:
return arr[i+1]
assert min_rotate([1,3,5]) == 1
assert min_rotate([2,2,2,0,1]) == 0
assert min_rotate([1,3,3,4,5,6,0]) == 0
assert min_rotate([1,3,3,4,5,6]) == 1
def test_mini_subarray_sum():
# REF: https://leetcode.com/problems/minimum-size-subarray-sum/
# Find the `minimum` length of subarray
# of which sum is equal or greater than target
# O(n log n) is preferred
def subarray_sum(arr, target):
N = len(arr)
from heapq import heappush, heappop
H = []
# 0 1 2 3
for i in range(N):
s = arr[i]
if s>=target:
return 1
for n in range(1,N-i):
s += arr[i+n]
if s>=target:
heappush(H, n+1)
break
return heappop(H) if len(H)>0 else 0
assert subarray_sum([2,3,1,2,4,3], 7) == 2
assert subarray_sum([1,4,4], 4) == 1
assert subarray_sum([1,1,1,1,1,1,1,1], 11) == 0
def test_fold_spiral():
"""
Given a 1-d array, fold it clockwise to make a 2d matrix
"""
def fold(arr):
import numpy as np
W = np.sqrt(len(arr))
if W!=np.round(W):
return [] # invalid array size, can't fold CW
W = int(W)
M = [[None for _ in range(W)] for _ in range(W)]
row,col = 0, 0
for a in arr:
M[row][col] = a
# next cell
# try R
if col<W-1 and M[row][col+1] is None:
col +=1
# try D
elif row<W-1 and M[row+1][col] is None:
row +=1
# try left
elif col>0 and M[row][col-1] is None:
col -= 1
# try up
elif row>0 and M[row-1][col] is None:
row -= 1
# nowhere to go
else:
return M
return M
assert fold([1,2,3,4]) == [[1,2],[4,3]]
assert fold([]) == []
assert fold([1]) == [[1]]
assert fold([1,2,3,4,5,6,7,8,9]) == [[1,2,3],[8,9,4],[7,6,5]]
def test_count_swap():
"""
Given two arrays, find minimum number of element swap to make them identical
"""
def cswap(arr1, arr2):
if len(arr1)==len(arr2)==0:
return 0
if eq(arr1, arr2):
return 0
M = []
nswap = 0
for i in range(len(arr1)):
if arr1[i]==arr2[i]:
continue
# find an element to swap with
for j in range(i+1, len(arr1)):
if arr1[i]==arr2[j]:
nswap += 1
arr2[j] = arr2[i]
break
return nswap
def eq(arr1, arr2):
return all([a==b for a,b in zip(arr1,arr2)])
assert cswap([],[]) == 0
assert cswap([1],[1]) == 0
assert cswap([1,2,3],[1,3,2]) == 1
assert cswap([1,1,5,3],[1,5,3,1]) == 2
assert cswap([1,6,7,3],[3,6,7,1]) == 1
def test_search_word():
def search(M,w):
V = set()
# locate all the origins
cands = []
for i in range(len(M)):
for j in range(len(M[0])):
if M[i][j] == w[0]:
if walk((i,j), M, V, w[1:]):
return True
return False
def walk(pos, M, V, w):
if len(w)==0:
return True
i, j = pos
valid = lambda a,b: a>=0 and b>=0 and a<len(M) and b<len(M[0])
# expand neighbours, DFS
for di in [-1,0,1]:
for dj in [-1,0,1]:
if abs(di)!=abs(dj) and (i+di, j+dj) not in V and valid(i+di, j+dj):
if M[i+di][j+dj] == w[0]:
V_ = V.copy()
V_.add((i+di, j+dj)) # prevent from repeating the cells
if walk((i+di, j+dj), M, V_, w[1:]):
return True
return False
M1 = [['A','A','C','A'],
['A','C','C','D'],
['D','A','B','C']]
assert search(M1, 'CAAD') == False
assert search(M1, 'BCDA') == True
assert search(M1, 'BFBA') == False
assert search(M1, 'ACCB') == True
assert search(M1, 'ADDC') == False
assert search(M1, 'AADABC') == True
def test_largest_rect_histogram():
# REF: https://leetcode.com/problems/largest-rectangle-in-histogram/
def lg(H):
# complexity : O(N^2)
largest = 0
for i in range(len(H)):
area = expand(H,i)
largest = max(largest, area)
return largest
def expand(H,i):
w = 1
# expand to left
j = i-1
while j>=0 and H[j]>=H[i]:
j-=1
w += 1
# expand to right
j = i+1
while j<len(H) and H[j]>=H[i]:
j+=1
w += 1
return w*H[i]
assert lg([2,1,5,6,2,3]) == 10
assert lg([2,4]) == 4
def test_interval_arrays():
"""
Given a list of intervals, find total amount of overlapping time.
Union all overlapping
"""
def overlap(intervals):
uni = []
# O(N^2)
for i,t1 in enumerate(intervals):
for t2 in intervals[i+1:]:
sec = intersect(t1,t2)
if len(sec)>0:
uni = union(uni, sec)
if len(uni)==0:
return 0
a,b = uni
return b-a
def intersect(t1,t2):
a1,b1 = t1
a2,b2 = t2
l = max(a1,a2)
u = min(b1,b2)
if l>=u: # no intersection
return []
else:
return [l,u]
def union(t1,t2):
if len(t1)==0:
return t2
a1,b1 = t1
a2,b2 = t2
return [min(a1,b1), max(b1,b2)]
assert intersect([0,10],[10,15]) == []
assert intersect([0,10],[5,10]) == [5,10]
assert intersect([0,10],[5,7]) == [5,7]
assert intersect([5,10],[7,15]) == [7,10]
assert intersect([5,10],[1,2]) == []
assert intersect([5,10],[1,6]) == [5,6]
assert overlap([[1,15],[15,60],[25,60],[45,75]]) == 35
assert overlap([[0,30],[45,50],[35,40]]) == 0
assert overlap([[30,100],[10,20],[20,40],[10,50]]) == 20
def test_search_2dmat():
# REF: https://leetcode.com/problems/search-a-2d-matrix/
# Each row is sorted from left -> right
# Each col is sorted from top -> bottom
def search(M, v):
from functools import reduce
# start from mid point
y = (len(M)-1)//2
x = (len(M[0])-1)//2
if v < M[0][0] or v > M[len(M)-1][len(M[0])-1]:
return False # OOB
# Flatten the matrix into 1-d list (Binary tree)
B = list(reduce(lambda x,y: x+y, M))
# 0 1 [2] 3 4
return bsearch(B, v)
def bsearch(B, v):
if len(B)==0:
return False
i = len(B)//2
if B[i]==v:
return True
if B[i]<v:
return bsearch(B[i+1:], v)
else:
return bsearch(B[:i], v)
M1 = [
[1,5],
[6,9]
]
assert search(M1, 4) == False
M2 = [
[1,1,4,5],
[6,8,9,10],
[12,17,30,35],
[36,40,41,50]
]
assert search(M2, 8) == True
assert search(M2, 50) == True
assert search(M2, 18) == False
assert search(M2, 21) == False
def test_get_closest_number_from_list():
"""
Given a sorted list (max first)
find the closest number to the argument
"""
def search(B, v):
print(B)
if v>=B[0]:
return B[0]
if v<=B[-1]:
return B[-1]
i = len(B)//2
if B[i]==v:
return v
if len(B)==2 and B[0]>v>B[1]:
if B[0]-v < v-B[1]:
return B[0]
else:
return B[1]
if B[i]<v:
return search(B[:i+1], v)
else:
return search(B[i:],v)
assert search([10,7,6,6,6,3,1], 4) == 3
assert search([10,7,6,6,6,3,1], 8) == 7
assert search([10,7,6,6,6,3,1], 11) == 10
assert search([10,7,4,1], 4) == 4
assert search([10,7,4,1], 9) == 10
assert search([10,7,4,1], 0) == 1
def test_remove_duplicates_from_sorted_matrix():
"""
Replace duplicate values of sorted matrix with zeros
"""
def redup(M):
p = None
x,y = None,None
for i in range(len(M)):
for j in range(len(M[i])):
if p is not None and M[i][j]==p:
M[i][j] = 0
M[x][y] = 0 # Paint the first element of duplicate
if M[i][j] != p: # Memo the beginning of the duplicate
x,y = i,j
p = M[i][j]
return M
M1 = [
[3]
]
assert redup(M1) == M1
M2 = [
[1,4],
[7,8]
]
assert redup(M2) == M2
M3 = [
[4,4],
[6,7]
]
assert redup(M3) == [[0,0],[6,7]]
M4 = [
[1,3,3],
[4,4,5],
[5,7,8]
]
assert redup(M4) == [[1,0,0],[0,0,0],[0,7,8]] | [
"functools.reduce",
"heapq.heappop",
"heapq.heappush",
"numpy.round"
] | [((13514, 13524), 'heapq.heappop', 'heappop', (['G'], {}), '(G)\n', (13521, 13524), False, 'from heapq import heappush, heappop\n'), ((14820, 14830), 'heapq.heappop', 'heappop', (['H'], {}), '(H)\n', (14827, 14830), False, 'from heapq import heappush, heappop\n'), ((15157, 15168), 'numpy.round', 'np.round', (['W'], {}), '(W)\n', (15165, 15168), True, 'import numpy as np\n'), ((19870, 19899), 'functools.reduce', 'reduce', (['(lambda x, y: x + y)', 'M'], {}), '(lambda x, y: x + y, M)\n', (19876, 19899), False, 'from functools import reduce\n'), ((14776, 14794), 'heapq.heappush', 'heappush', (['H', '(n + 1)'], {}), '(H, n + 1)\n', (14784, 14794), False, 'from heapq import heappush, heappop\n'), ((13482, 13500), 'heapq.heappush', 'heappush', (['G', '(-diff)'], {}), '(G, -diff)\n', (13490, 13500), False, 'from heapq import heappush, heappop\n')] |
import models.baselines.DNN as DNN
import _settings
import numpy as np
import torch
import pandas as pd
import os
import utils.utils as utils
import datetime
import shutil
import tqdm
import ipdb
import data.dataloader as dld
TRAIN, VALID, TEST = 'train', 'val', 'test'
def pretrain_general(X, Y, seed=_settings.RANDOM_SEED, model_setting = 0, quiet=False):
utils.set_all_seeds(seed)
n_dim = X.shape[1]
output_size = Y.shape[1]
model_class, model_kwargs, train_kwargs = DNN.get_DNN_and_trainkwargs(model_setting)
model = model_class(n_dim=n_dim, output_size=output_size, **model_kwargs)
model.fit(X, Y, verbosity=not quiet, **train_kwargs)
readout_layer = model.model[-1]
model = model.eval()
readout_layer = readout_layer.eval()
return model, readout_layer
def get_raw_data(dataset):
if dataset == _settings.YACHT_NAME:
raw_df = pd.read_fwf(os.path.join(_settings.YACHT_PATH, 'yacht_hydrodynamics.data'), header=None)
raw_df.columns = ["X%d" % d for d in range(raw_df.shape[1] - 1)] + ['Y']
raw_df = raw_df.reset_index()
elif dataset == _settings.HOUSING_NAME:
import sklearn.datasets
data = sklearn.datasets.load_boston()
raw_df = pd.DataFrame(data['data'])
raw_df['Y'] = data['target']
raw_df.columns = ["X%d" % d for d in range(raw_df.shape[1] - 1)] + ['Y']
raw_df = raw_df.reset_index()
elif dataset == _settings.ENERGY_NAME: # Energy NN does not train...
raw_df = pd.read_excel(os.path.join(_settings.ENERGY_PATH, 'ENB2012_data.xlsx'), engine='openpyxl')
# raw_df = raw_df.iloc[:, :10]
# raw_df.columns = ["X%d" % d for d in range(self.raw_df.shape[1] - 2)] + ['Y0', 'Y1']
raw_df = raw_df.iloc[:, :9]
raw_df.columns = ["X%d" % d for d in range(raw_df.shape[1] - 1)] + ['Y']
raw_df = raw_df.reset_index()
elif dataset == _settings.KIN8NM_NAME:
raw_df = pd.read_csv(os.path.join(_settings.KIN8NM_PATH, 'dataset_2175_kin8nm.csv'))
raw_df.columns = ["X%d" % d for d in range(raw_df.shape[1] - 1)] + ['Y']
raw_df = raw_df.reset_index()
elif dataset == _settings.CONCRETE_NAME:
raw_df = pd.read_excel(os.path.join(_settings.CONCRETE_PATH, 'Concrete_Data.xls'))
raw_df.columns = ["X%d" % d for d in range(raw_df.shape[1] - 1)] + ['Y']
raw_df = raw_df.reset_index()
elif dataset == _settings.BIKE_NAME: #The base DNN does not learn anything
from zipfile import ZipFile
archive = ZipFile(os.path.join(_settings.BIKE_PATH, 'Bike-Sharing-Dataset.zip'))
#raw_df = pd.read_csv(archive.open('day.csv')).set_index('dteday', verify_integrity=True).drop('instant',axis=1)
raw_df = pd.read_csv(archive.open('hour.csv')).drop('instant', axis=1)#.set_index('instant', verify_integrity=True)
drop_cols = ['yr', 'mnth', 'dteday']
enum_cols = ['season', 'hr', 'weekday', 'weathersit']
raw_df = raw_df.drop(drop_cols, axis=1)
for enum_col in enum_cols:
ser = raw_df[enum_col]
tdf = pd.get_dummies(ser).rename(columns=lambda x: "%s%d"%(enum_col, x))
raw_df = pd.concat([raw_df, tdf], axis=1).drop(enum_col, axis=1)
raw_df = raw_df.reindex(columns=[c for c in raw_df.columns] + ['cnt'])
raw_df.columns = ["X%d" % d for d in range(raw_df.shape[1] - 1)] + ['Y']
raw_df = raw_df.reset_index()
elif dataset == _settings.SYNT_DJKP:
raise NotImplementedError()
else: #TODO: Add more dataset here
raise Exception()
other_cols = [c for c in raw_df.columns if not (c.startswith('X') or c.startswith('Y'))]
Y_col = ['Y']
X_cols = [c for c in raw_df.columns if c.startswith('X')]
raw_df = raw_df.reindex(columns=other_cols + X_cols + Y_col)
#print(raw_df)
return raw_df
class GeneralDataSplitter:
def __init__(self, dataset=_settings.YACHT_NAME,
seed=7, split_ratio=[60, 20, 20],
model_setting = 0, #DNN
init=False, quiet=False):
key = f'seed{seed}-{"-".join(map(str,split_ratio))}'
self.save_path = os.path.join(_settings.WORKSPACE, dataset, key)
self.seed = seed
self.split_ratio = split_ratio
self.dataset = dataset
self.model_setting = model_setting
self.raw_df = None
self.quiet = quiet
if init: self.initialize()
def initialize(self):
self.raw_df = get_raw_data(self.dataset)
# reserved column names: index, X%d, Y (or Y%d)
self.split_and_save()
if self.model_setting is not None:
self.pretrain()
for split in [TRAIN, VALID, TEST]: self.eval(split)
self.pretrain_resid()
for split in [TRAIN, VALID, TEST]: self.eval_resid(split)
def get_data_dir(self):
return self.save_path
def get_data_path(self, split=TRAIN):
assert split in {TRAIN, VALID, TEST}
return os.path.join(self.save_path, '%s.csv'%split)
def get_checkpoint_dir(self, resid=False):
if resid:
return os.path.join(self.get_checkpoint_dir(resid=False), 'resid')
return os.path.join(self.save_path, 'models', 'model%d'%self.model_setting)
def get_embedding_path(self, split=TRAIN):
return os.path.join(self.save_path, 'preds', 'model%d'%self.model_setting, '%s.csv'%split)
def get_readout_weight_path(self):
return os.path.join(self.save_path, 'preds', 'model%d'%self.model_setting, 'readout.pt')
def get_preds_path(self, split=TRAIN, resid=False):
if resid:
dir_ = os.path.dirname(self.get_preds_path(split, resid=False))
return os.path.join(dir_, '%s_pred_resid.csv'%split)
return os.path.join(self.save_path, 'preds', 'model%d'%self.model_setting, '%s_pred.csv'%split)
@classmethod
def _split_df(cls, df, seed=7, split_ratio=[60, 20, 20]):
n = len(df)
np.random.seed(seed)
perm = np.random.permutation(n)
df = df.iloc[perm]
split_ratio = np.concatenate([[0.], np.cumsum(split_ratio) / sum(split_ratio)])
splits = np.round(split_ratio * n).astype(np.int)
return [df.iloc[splits[i]:splits[i+1]] for i in range(len(split_ratio)-1)]
def split_and_save(self):
if not os.path.isdir(self.save_path): os.makedirs(self.save_path)
save_paths = [self.get_data_path(s) for s in [TRAIN, VALID, TEST]]
if all([os.path.isfile(f) for f in save_paths]): return save_paths
dfs = self._split_df(self.raw_df, self.seed, self.split_ratio)
for df, fname in zip(dfs, save_paths): df.to_csv(fname, index=False)
return save_paths
def get_data(self, split=TRAIN, colnames=False):
train_df = pd.read_csv(self.get_data_path(split=split))
Ys_cols = [c for c in train_df.columns if c.startswith('Y')]
Xs_cols = [c for c in train_df.columns if c.startswith('X')]
X, Y = train_df.loc[:, Xs_cols].values, train_df.loc[:, Ys_cols].values
index = train_df['index'].values
if colnames: return X, Y, index, Xs_cols, Ys_cols
return X, Y, index
def pretrain_resid(self, quiet=None):
quiet = quiet or self.quiet
checkpoint_dir = self.get_checkpoint_dir(resid=True)
flag_pkl = os.path.join(checkpoint_dir, 'meta.pkl')
if os.path.isfile(flag_pkl): return checkpoint_dir
if os.path.isdir(checkpoint_dir): #and no meta
shutil.rmtree(checkpoint_dir)
if not os.path.isdir(checkpoint_dir):
os.makedirs(checkpoint_dir)
X, Y, _ = self.get_data(TRAIN)
Yhat = pd.read_csv(self.get_preds_path(TRAIN)).drop('index', axis=1).values
resid = np.abs(Y - Yhat)
model, readout = pretrain_general(X, resid, self.seed, self.model_setting, quiet=quiet)
torch.save(model, os.path.join(checkpoint_dir, 'model.pt'))
pd.to_pickle({"Done": True}, flag_pkl)
return checkpoint_dir
def eval_resid(self, split=VALID):
checkpoint_dir = self.pretrain_resid()
model_resid = torch.load(os.path.join(checkpoint_dir, 'model.pt'))
X, Y, index, X_cols, Y_cols = self.get_data(split, colnames=True)
#save the prediction etc
rhat = model_resid.predict(X)
pred_path = self.get_preds_path(split, resid=True)
pred_df = pd.DataFrame(rhat, columns=Y_cols)
pred_df['index'] = index
pred_df.reindex(columns=['index'] + Y_cols).to_csv(pred_path, index=False)
return pred_path
def pretrain(self, force_retrain=False, quiet=None):
quiet = quiet or self.quiet
checkpoint_dir = self.get_checkpoint_dir()
readout_weight_path = self.get_readout_weight_path()
flag_pkl = os.path.join(checkpoint_dir, 'meta.pkl')
if os.path.isfile(flag_pkl):
if force_retrain:
time_key = "%s" % (datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
old_mv_to = f"{checkpoint_dir}_Copy%s"%time_key
os.rename(checkpoint_dir, old_mv_to)
else:
return checkpoint_dir, readout_weight_path
if os.path.isdir(checkpoint_dir): #and no meta
shutil.rmtree(checkpoint_dir)
if not os.path.isdir(checkpoint_dir):
os.makedirs(checkpoint_dir)
X, Y, _ = self.get_data(TRAIN)
model, readout = pretrain_general(X, Y, self.seed, self.model_setting, quiet=quiet)
#save..
#save the model
torch.save(model, os.path.join(checkpoint_dir, 'model.pt'))
#save the readout layer
if not os.path.isdir(os.path.dirname(readout_weight_path)): os.makedirs(os.path.dirname(readout_weight_path))
torch.save(readout, readout_weight_path)
pd.to_pickle({"Done": True}, flag_pkl)
return checkpoint_dir, readout_weight_path
def get_model(self):
return torch.load(self.get_readout_weight_path()).cpu().eval()
def eval(self, split=VALID):
checkpoint_dir, readout_weight_path = self.pretrain()
model = torch.load(os.path.join(checkpoint_dir, 'model.pt'))
readout = torch.load(readout_weight_path)
X, Y, index, X_cols, Y_cols = self.get_data(split, colnames=True)
#save the prediction etc
Yhat = model.predict(X)
embedding = model.embed(X)
Yhat2 = readout(torch.tensor(embedding).float()).detach().cpu().numpy()
assert np.allclose(Yhat, Yhat2)
#save embeddings
embedding_path = self.get_embedding_path(split)
fcols = ['f%d'%i for i in range(embedding.shape[1])]
embedding_df = pd.DataFrame(embedding, columns=fcols)
embedding_df['index'] = index
embedding_df.reindex(columns=['index'] + fcols).to_csv(embedding_path, index=False)
#save predictions
pred_path = self.get_preds_path(split)
pred_df = pd.DataFrame(Yhat, columns=Y_cols)
pred_df['index'] = index
pred_df.reindex(columns=['index'] + Y_cols).to_csv(pred_path, index=False)
print(f'{split}: MSE={np.mean(np.power(Y - Yhat, 2))}, Data Var={np.mean(np.power(Y - np.mean(Y), 2))}')
return pred_path, embedding_path
def cache(*args, **kwargs):
return GeneralDataSplitter(*args, **kwargs)
if __name__ == '__main__':
task_runner = utils.TaskPartitioner()
for dataset in [_settings.KIN8NM_NAME, _settings.YACHT_NAME, _settings.HOUSING_NAME, _settings.CONCRETE_NAME, _settings.ENERGY_NAME, _settings.BIKE_NAME]:
for seed in tqdm.tqdm(range(10)):
task_runner.add_task(cache, dataset=dataset, seed=seed, model_setting=0, init=True, quiet=True)
task_runner.run_multi_process(8) | [
"pandas.to_pickle",
"numpy.mean",
"models.baselines.DNN.get_DNN_and_trainkwargs",
"os.path.isdir",
"numpy.random.seed",
"pandas.DataFrame",
"numpy.round",
"numpy.random.permutation",
"numpy.abs",
"numpy.allclose",
"os.rename",
"os.path.isfile",
"os.path.dirname",
"torch.save",
"utils.uti... | [((364, 389), 'utils.utils.set_all_seeds', 'utils.set_all_seeds', (['seed'], {}), '(seed)\n', (383, 389), True, 'import utils.utils as utils\n'), ((488, 530), 'models.baselines.DNN.get_DNN_and_trainkwargs', 'DNN.get_DNN_and_trainkwargs', (['model_setting'], {}), '(model_setting)\n', (515, 530), True, 'import models.baselines.DNN as DNN\n'), ((11384, 11407), 'utils.utils.TaskPartitioner', 'utils.TaskPartitioner', ([], {}), '()\n', (11405, 11407), True, 'import utils.utils as utils\n'), ((4149, 4196), 'os.path.join', 'os.path.join', (['_settings.WORKSPACE', 'dataset', 'key'], {}), '(_settings.WORKSPACE, dataset, key)\n', (4161, 4196), False, 'import os\n'), ((4988, 5034), 'os.path.join', 'os.path.join', (['self.save_path', "('%s.csv' % split)"], {}), "(self.save_path, '%s.csv' % split)\n", (5000, 5034), False, 'import os\n'), ((5193, 5263), 'os.path.join', 'os.path.join', (['self.save_path', '"""models"""', "('model%d' % self.model_setting)"], {}), "(self.save_path, 'models', 'model%d' % self.model_setting)\n", (5205, 5263), False, 'import os\n'), ((5325, 5417), 'os.path.join', 'os.path.join', (['self.save_path', '"""preds"""', "('model%d' % self.model_setting)", "('%s.csv' % split)"], {}), "(self.save_path, 'preds', 'model%d' % self.model_setting, \n '%s.csv' % split)\n", (5337, 5417), False, 'import os\n'), ((5464, 5551), 'os.path.join', 'os.path.join', (['self.save_path', '"""preds"""', "('model%d' % self.model_setting)", '"""readout.pt"""'], {}), "(self.save_path, 'preds', 'model%d' % self.model_setting,\n 'readout.pt')\n", (5476, 5551), False, 'import os\n'), ((5777, 5874), 'os.path.join', 'os.path.join', (['self.save_path', '"""preds"""', "('model%d' % self.model_setting)", "('%s_pred.csv' % split)"], {}), "(self.save_path, 'preds', 'model%d' % self.model_setting, \n '%s_pred.csv' % split)\n", (5789, 5874), False, 'import os\n'), ((5974, 5994), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (5988, 5994), True, 'import numpy as np\n'), ((6010, 6034), 'numpy.random.permutation', 'np.random.permutation', (['n'], {}), '(n)\n', (6031, 6034), True, 'import numpy as np\n'), ((7342, 7382), 'os.path.join', 'os.path.join', (['checkpoint_dir', '"""meta.pkl"""'], {}), "(checkpoint_dir, 'meta.pkl')\n", (7354, 7382), False, 'import os\n'), ((7394, 7418), 'os.path.isfile', 'os.path.isfile', (['flag_pkl'], {}), '(flag_pkl)\n', (7408, 7418), False, 'import os\n'), ((7453, 7482), 'os.path.isdir', 'os.path.isdir', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (7466, 7482), False, 'import os\n'), ((7764, 7780), 'numpy.abs', 'np.abs', (['(Y - Yhat)'], {}), '(Y - Yhat)\n', (7770, 7780), True, 'import numpy as np\n'), ((7955, 7993), 'pandas.to_pickle', 'pd.to_pickle', (["{'Done': True}", 'flag_pkl'], {}), "({'Done': True}, flag_pkl)\n", (7967, 7993), True, 'import pandas as pd\n'), ((8408, 8442), 'pandas.DataFrame', 'pd.DataFrame', (['rhat'], {'columns': 'Y_cols'}), '(rhat, columns=Y_cols)\n', (8420, 8442), True, 'import pandas as pd\n'), ((8809, 8849), 'os.path.join', 'os.path.join', (['checkpoint_dir', '"""meta.pkl"""'], {}), "(checkpoint_dir, 'meta.pkl')\n", (8821, 8849), False, 'import os\n'), ((8861, 8885), 'os.path.isfile', 'os.path.isfile', (['flag_pkl'], {}), '(flag_pkl)\n', (8875, 8885), False, 'import os\n'), ((9208, 9237), 'os.path.isdir', 'os.path.isdir', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (9221, 9237), False, 'import os\n'), ((9781, 9821), 'torch.save', 'torch.save', (['readout', 'readout_weight_path'], {}), '(readout, readout_weight_path)\n', (9791, 9821), False, 'import torch\n'), ((9831, 9869), 'pandas.to_pickle', 'pd.to_pickle', (["{'Done': True}", 'flag_pkl'], {}), "({'Done': True}, flag_pkl)\n", (9843, 9869), True, 'import pandas as pd\n'), ((10201, 10232), 'torch.load', 'torch.load', (['readout_weight_path'], {}), '(readout_weight_path)\n', (10211, 10232), False, 'import torch\n'), ((10502, 10526), 'numpy.allclose', 'np.allclose', (['Yhat', 'Yhat2'], {}), '(Yhat, Yhat2)\n', (10513, 10526), True, 'import numpy as np\n'), ((10693, 10731), 'pandas.DataFrame', 'pd.DataFrame', (['embedding'], {'columns': 'fcols'}), '(embedding, columns=fcols)\n', (10705, 10731), True, 'import pandas as pd\n'), ((10954, 10988), 'pandas.DataFrame', 'pd.DataFrame', (['Yhat'], {'columns': 'Y_cols'}), '(Yhat, columns=Y_cols)\n', (10966, 10988), True, 'import pandas as pd\n'), ((897, 959), 'os.path.join', 'os.path.join', (['_settings.YACHT_PATH', '"""yacht_hydrodynamics.data"""'], {}), "(_settings.YACHT_PATH, 'yacht_hydrodynamics.data')\n", (909, 959), False, 'import os\n'), ((1232, 1258), 'pandas.DataFrame', 'pd.DataFrame', (["data['data']"], {}), "(data['data'])\n", (1244, 1258), True, 'import pandas as pd\n'), ((5716, 5763), 'os.path.join', 'os.path.join', (['dir_', "('%s_pred_resid.csv' % split)"], {}), "(dir_, '%s_pred_resid.csv' % split)\n", (5728, 5763), False, 'import os\n'), ((6337, 6366), 'os.path.isdir', 'os.path.isdir', (['self.save_path'], {}), '(self.save_path)\n', (6350, 6366), False, 'import os\n'), ((6368, 6395), 'os.makedirs', 'os.makedirs', (['self.save_path'], {}), '(self.save_path)\n', (6379, 6395), False, 'import os\n'), ((7509, 7538), 'shutil.rmtree', 'shutil.rmtree', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (7522, 7538), False, 'import shutil\n'), ((7554, 7583), 'os.path.isdir', 'os.path.isdir', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (7567, 7583), False, 'import os\n'), ((7597, 7624), 'os.makedirs', 'os.makedirs', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (7608, 7624), False, 'import os\n'), ((7904, 7944), 'os.path.join', 'os.path.join', (['checkpoint_dir', '"""model.pt"""'], {}), "(checkpoint_dir, 'model.pt')\n", (7916, 7944), False, 'import os\n'), ((8144, 8184), 'os.path.join', 'os.path.join', (['checkpoint_dir', '"""model.pt"""'], {}), "(checkpoint_dir, 'model.pt')\n", (8156, 8184), False, 'import os\n'), ((9264, 9293), 'shutil.rmtree', 'shutil.rmtree', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (9277, 9293), False, 'import shutil\n'), ((9309, 9338), 'os.path.isdir', 'os.path.isdir', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (9322, 9338), False, 'import os\n'), ((9352, 9379), 'os.makedirs', 'os.makedirs', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (9363, 9379), False, 'import os\n'), ((9579, 9619), 'os.path.join', 'os.path.join', (['checkpoint_dir', '"""model.pt"""'], {}), "(checkpoint_dir, 'model.pt')\n", (9591, 9619), False, 'import os\n'), ((10141, 10181), 'os.path.join', 'os.path.join', (['checkpoint_dir', '"""model.pt"""'], {}), "(checkpoint_dir, 'model.pt')\n", (10153, 10181), False, 'import os\n'), ((6167, 6192), 'numpy.round', 'np.round', (['(split_ratio * n)'], {}), '(split_ratio * n)\n', (6175, 6192), True, 'import numpy as np\n'), ((6487, 6504), 'os.path.isfile', 'os.path.isfile', (['f'], {}), '(f)\n', (6501, 6504), False, 'import os\n'), ((9083, 9119), 'os.rename', 'os.rename', (['checkpoint_dir', 'old_mv_to'], {}), '(checkpoint_dir, old_mv_to)\n', (9092, 9119), False, 'import os\n'), ((9684, 9720), 'os.path.dirname', 'os.path.dirname', (['readout_weight_path'], {}), '(readout_weight_path)\n', (9699, 9720), False, 'import os\n'), ((9735, 9771), 'os.path.dirname', 'os.path.dirname', (['readout_weight_path'], {}), '(readout_weight_path)\n', (9750, 9771), False, 'import os\n'), ((1520, 1576), 'os.path.join', 'os.path.join', (['_settings.ENERGY_PATH', '"""ENB2012_data.xlsx"""'], {}), "(_settings.ENERGY_PATH, 'ENB2012_data.xlsx')\n", (1532, 1576), False, 'import os\n'), ((6106, 6128), 'numpy.cumsum', 'np.cumsum', (['split_ratio'], {}), '(split_ratio)\n', (6115, 6128), True, 'import numpy as np\n'), ((1958, 2020), 'os.path.join', 'os.path.join', (['_settings.KIN8NM_PATH', '"""dataset_2175_kin8nm.csv"""'], {}), "(_settings.KIN8NM_PATH, 'dataset_2175_kin8nm.csv')\n", (1970, 2020), False, 'import os\n'), ((11144, 11165), 'numpy.power', 'np.power', (['(Y - Yhat)', '(2)'], {}), '(Y - Yhat, 2)\n', (11152, 11165), True, 'import numpy as np\n'), ((2217, 2275), 'os.path.join', 'os.path.join', (['_settings.CONCRETE_PATH', '"""Concrete_Data.xls"""'], {}), "(_settings.CONCRETE_PATH, 'Concrete_Data.xls')\n", (2229, 2275), False, 'import os\n'), ((8952, 8975), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8973, 8975), False, 'import datetime\n'), ((2537, 2598), 'os.path.join', 'os.path.join', (['_settings.BIKE_PATH', '"""Bike-Sharing-Dataset.zip"""'], {}), "(_settings.BIKE_PATH, 'Bike-Sharing-Dataset.zip')\n", (2549, 2598), False, 'import os\n'), ((11200, 11210), 'numpy.mean', 'np.mean', (['Y'], {}), '(Y)\n', (11207, 11210), True, 'import numpy as np\n'), ((3088, 3107), 'pandas.get_dummies', 'pd.get_dummies', (['ser'], {}), '(ser)\n', (3102, 3107), True, 'import pandas as pd\n'), ((3176, 3208), 'pandas.concat', 'pd.concat', (['[raw_df, tdf]'], {'axis': '(1)'}), '([raw_df, tdf], axis=1)\n', (3185, 3208), True, 'import pandas as pd\n'), ((10431, 10454), 'torch.tensor', 'torch.tensor', (['embedding'], {}), '(embedding)\n', (10443, 10454), False, 'import torch\n')] |
import numpy as np
# 0: clean
# 1: weakened
# 2: infected
# 3: flagged
def iterate(pos, direc, totalInfected, nIter):
for t in range(nIter):
# Turn
if grid[int(pos[0]), int(pos[1])] == 0:
# Clean, turn left
direc = turnL[direc]
elif grid[int(pos[0]), int(pos[1])] == 2:
# Infected, turn right
direc = turnR[direc]
elif grid[int(pos[0]), int(pos[1])] == 3:
# Reverae
direc = reverse[direc]
# Change status
if grid[int(pos[0]), int(pos[1])] == 0:
grid[int(pos[0]), int(pos[1])] = 1
elif grid[int(pos[0]), int(pos[1])] == 1:
grid[int(pos[0]), int(pos[1])] = 2
totalInfected += 1
elif grid[int(pos[0]), int(pos[1])] == 2:
grid[int(pos[0]), int(pos[1])] = 3
elif grid[int(pos[0]), int(pos[1])] == 3:
grid[int(pos[0]), int(pos[1])] = 0
# Move
pos += dirs[direc]
return totalInfected
with open("day22.txt") as f:
data = f.readlines()
data = [x.strip() for x in data]
grid_ = np.zeros([len(data), len(data)])
for i in range(len(data)):
for j in range(len(data[0])):
if data[i][j] == "#":
grid_[i][j] = 2
# Add padding
grid = np.pad(grid_, 3000, 'constant')
dirs = {'u': np.array([-1, 0]), 'd': np.array([1, 0]), 'l': np.array([0, -1]), 'r': np.array([0, 1])}
turnR = {'u': 'r', 'r': 'd', 'd': 'l', 'l': 'u'}
turnL = {'u': 'l', 'l': 'd', 'd': 'r', 'r': 'u'}
reverse = {'u': 'd', 'l': 'r', 'd': 'u', 'r': 'l'}
# Pos and dir
pos = np.array([int(np.floor(len(grid)/2)), int(np.floor(len(grid)/2))])
direc = 'u'
# Run
nIter = 10000000
totalInfected = iterate(pos, direc, 0, nIter)
print("Result =", totalInfected)
| [
"numpy.array",
"numpy.pad"
] | [((1284, 1315), 'numpy.pad', 'np.pad', (['grid_', '(3000)', '"""constant"""'], {}), "(grid_, 3000, 'constant')\n", (1290, 1315), True, 'import numpy as np\n'), ((1330, 1347), 'numpy.array', 'np.array', (['[-1, 0]'], {}), '([-1, 0])\n', (1338, 1347), True, 'import numpy as np\n'), ((1354, 1370), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (1362, 1370), True, 'import numpy as np\n'), ((1377, 1394), 'numpy.array', 'np.array', (['[0, -1]'], {}), '([0, -1])\n', (1385, 1394), True, 'import numpy as np\n'), ((1401, 1417), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (1409, 1417), True, 'import numpy as np\n')] |
import numpy as np
from torch.utils.data import Dataset
class RandomDataset(Dataset):
"""
A random dataset that acts like it is WaymoDataset
"""
# Transform to convert the getitem to tensor
def __init__(self, x_min, x_max, y_min, y_max, z_min, z_max,
drop_invalid_point_function,
point_cloud_transform,
min_number_points=50000, max_number_points=150000,
desired_length=1000):
self.max_number_points = max_number_points
self.min_number_points = min_number_points
self.z_max = z_max
self.z_min = z_min
self.y_max = y_max
self.y_min = y_min
self.x_max = x_max
self.x_min = x_min
self._length = desired_length
self._random_state = np.random.default_rng()
self._drop_invalid_point_function = drop_invalid_point_function
self._point_cloud_transform = point_cloud_transform
def __len__(self) -> int:
return self._length
def draw_random_frame(self):
# Random number of points
number_of_points = self._random_state.integers(self.min_number_points, self.max_number_points)
frame = np.zeros((number_of_points, 5))
frame[:, 0] = self._random_state.uniform(self.x_min, self.x_max, size=number_of_points)
frame[:, 1] = self._random_state.uniform(self.y_min, self.y_max, size=number_of_points)
frame[:, 2] = self._random_state.uniform(self.z_min, self.z_max, size=number_of_points)
frame[:, 3:5] = self._random_state.uniform(-5, 5, size=(number_of_points, 2))
return frame
def __getitem__(self, index):
"""
Create a single pointcloud simulated
:param index:
:return: (N_points, 5 features) with x,y,z being the coordinates and the
last two features being the laser features
"""
current_frame = self.draw_random_frame()
previous_frame = self.draw_random_frame()
current_flows = self._random_state.uniform(-20, 20, size=(current_frame.shape[0], 3))
# Drop invalid points according to the method supplied
current_frame, current_flows = self._drop_invalid_point_function(current_frame, current_flows)
previous_frame, _ = self._drop_invalid_point_function(previous_frame, None)
# Perform the pillarization of the point_cloud
current = self._point_cloud_transform(current_frame)
previous = self._point_cloud_transform(previous_frame)
# This returns a tuple of augmented pointcloud and grid indices
return (previous, current), current_flows
| [
"numpy.zeros",
"numpy.random.default_rng"
] | [((798, 821), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (819, 821), True, 'import numpy as np\n'), ((1201, 1232), 'numpy.zeros', 'np.zeros', (['(number_of_points, 5)'], {}), '((number_of_points, 5))\n', (1209, 1232), True, 'import numpy as np\n')] |
from __future__ import division, absolute_import, print_function
from past.builtins import xrange
import numpy as np
import esutil
import time
import matplotlib.pyplot as plt
from .fgcmUtilities import objFlagDict
from .fgcmUtilities import obsFlagDict
from .sharedNumpyMemManager import SharedNumpyMemManager as snmm
class FgcmStars(object):
"""
Class to describe the stars and observations of the stars. Note that
after initialization you must call loadStarsFromFits() or loadStars()
to load the star information. This allows an external caller to clear
out memory after it has been copied to the shared memory buffers.
parameters
----------
fgcmConfig: FgcmConfig
Config variables
----------------
minObsPerBand: int
Minumum number of observations per band to be "good"
sedFitBandFudgeFactors: float array
Fudge factors for computing fnuprime for the fit bands
sedExtraBandFudgeFactors: float array
Fudge factors for computing fnuprime for the extra bands
starColorCuts: list
List that contains lists of [bandIndex0, bandIndex1, minColor, maxColor]
sigma0Phot: float
Floor on photometric error to add to every observation
reserveFraction: float
Fraction of stars to hold in reserve
mapLongitudeRef: float
Reference longitude for plotting maps of stars
mapNSide: int
Healpix nside of map plotting.
superStarSubCCD: bool
Use sub-ccd info to make superstar flats?
obsFile: string, only if using fits mode
Star observation file
indexFile: string, only if using fits mode
Star index file
"""
def __init__(self,fgcmConfig):
self.fgcmLog = fgcmConfig.fgcmLog
self.fgcmLog.info('Initializing stars.')
self.obsFile = fgcmConfig.obsFile
self.indexFile = fgcmConfig.indexFile
self.bands = fgcmConfig.bands
self.nBands = len(fgcmConfig.bands)
self.nCCD = fgcmConfig.nCCD
self.minObsPerBand = fgcmConfig.minObsPerBand
self.fitBands = fgcmConfig.fitBands
self.nFitBands = len(fgcmConfig.fitBands)
self.extraBands = fgcmConfig.extraBands
self.sedFitBandFudgeFactors = fgcmConfig.sedFitBandFudgeFactors
self.sedExtraBandFudgeFactors = fgcmConfig.sedExtraBandFudgeFactors
self.starColorCuts = fgcmConfig.starColorCuts
self.sigma0Phot = fgcmConfig.sigma0Phot
self.ccdStartIndex = fgcmConfig.ccdStartIndex
self.plotPath = fgcmConfig.plotPath
self.outfileBaseWithCycle = fgcmConfig.outfileBaseWithCycle
self.expField = fgcmConfig.expField
self.ccdField = fgcmConfig.ccdField
self.reserveFraction = fgcmConfig.reserveFraction
self.modelMagErrors = fgcmConfig.modelMagErrors
self.inFlagStarFile = fgcmConfig.inFlagStarFile
self.mapLongitudeRef = fgcmConfig.mapLongitudeRef
self.mapNSide = fgcmConfig.mapNSide
self.lambdaStdBand = fgcmConfig.lambdaStdBand
self.bandRequiredFlag = fgcmConfig.bandRequiredFlag
self.bandRequiredIndex = np.where(self.bandRequiredFlag)[0]
self.bandExtraFlag = fgcmConfig.bandExtraFlag
self.bandExtraIndex = np.where(self.bandExtraFlag)[0]
self.lutFilterNames = fgcmConfig.lutFilterNames
self.filterToBand = fgcmConfig.filterToBand
self.superStarSubCCD = fgcmConfig.superStarSubCCD
#self.expArray = fgcmPars.expArray
#self._loadStars(fgcmPars)
self.magStdComputed = False
self.allMagStdComputed = False
self.sedSlopeComputed = False
#if (computeNobs):
# allExps = np.arange(fgcmConfig.expRange[0],fgcmConfig.expRange[1],dtype='i4')
# self.fgcmLog.info('Checking stars with full possible range of exp numbers')
#self.selectStarsMinObs(goodExps=allExps,doPlots=False)
# allExpsIndex = np.arange(fgcmPars.expArray.size)
# self.selectStarsMinObsExpIndex(allExpsIndex)
self.magConstant = 2.5/np.log(10)
self.hasXY = False
def loadStarsFromFits(self,fgcmPars,computeNobs=True):
"""
Load stars from fits files.
parameters
----------
fgcmPars: FgcmParameters
computeNobs: bool, default=True
Compute number of observations of each star/band
Config variables
----------------
indexFile: string
Star index file
obsFile: string
Star observation file
inFlagStarFile: string, optional
Flagged star file
"""
import fitsio
# read in the observation indices...
startTime = time.time()
self.fgcmLog.info('Reading in observation indices...')
index = fitsio.read(self.indexFile, ext='INDEX')
self.fgcmLog.info('Done reading in %d observation indices in %.1f seconds.' %
(index.size, time.time() - startTime))
# read in obsfile and cut
startTime = time.time()
self.fgcmLog.info('Reading in star observations...')
obs = fitsio.read(self.obsFile, ext=1)
# cut down to those that are indexed
obs = obs[index['OBSINDEX']]
self.fgcmLog.info('Done reading in %d observations in %.1f seconds.' %
(obs.size, time.time() - startTime))
# and positions...
startTime = time.time()
self.fgcmLog.info('Reading in star positions...')
pos = fitsio.read(self.indexFile, ext='POS')
self.fgcmLog.info('Done reading in %d unique star positions in %.1f secondds.' %
(pos.size, time.time() - startTime))
#obsBand = np.core.defchararray.strip(obs['BAND'][:])
obsFilterName = np.core.defchararray.strip(obs['FILTERNAME'][:])
if (self.inFlagStarFile is not None):
self.fgcmLog.info('Reading in list of previous flagged stars from %s' %
(self.inFlagStarFile))
inFlagStars = fitsio.read(self.inFlagStarFile, ext=1)
flagID = inFlagStars['OBJID']
flagFlag = inFlagStars['OBJFLAG']
else:
flagID = None
flagFlag = None
# FIXME: add support to x/y from fits files
if ('X' in obs.dtype.names and 'Y' in obs.dtype.names):
self.fgcmLog.info('Found X/Y in input observations')
obsX = obs['X']
obsY = obs['Y']
else:
obsX = None
obsY = None
# process
self.loadStars(fgcmPars,
obs[self.expField],
obs[self.ccdField],
obs['RA'],
obs['DEC'],
obs['MAG'],
obs['MAGERR'],
obsFilterName,
pos['FGCM_ID'],
pos['RA'],
pos['DEC'],
pos['OBSARRINDEX'],
pos['NOBS'],
obsX=obsX,
obsY=obsY,
flagID=flagID,
flagFlag=flagFlag,
computeNobs=computeNobs)
# and clear memory
index = None
obs = None
pos = None
def loadStars(self, fgcmPars,
obsExp, obsCCD, obsRA, obsDec, obsMag, obsMagErr, obsFilterName,
objID, objRA, objDec, objObsIndex, objNobs, obsX=None, obsY=None,
flagID=None, flagFlag=None, computeNobs=True):
"""
Load stars from arrays
parameters
----------
fgcmPars: fgcmParameters
obsExp: int array
Exposure number (or equivalent) for each observation
obsCCD: int array
CCD number (or equivalent) for each observation
obsRA: double array
RA for each observation (degrees)
obsDec: double array
Dec for each observation (degrees)
obsMag: float array
Raw ADU magnitude for each observation
obsMagErr: float array
Raw ADU magnitude error for each observation
obsFilterName: string array
Filter name for each observation
objID: int array
Unique ID number for each object
objRA: double array
RA for each object (degrees)
objDec: double array
Dec for each object (degrees)
objObsIndex: int array
For each object, where in the obs table to look
objNobs: int array
number of observations of this object (all bands)
obsX: float array, optional
x position for each observation
obsY: float array, optional
y position for each observation
flagID: int array, optional
ID of each object that is flagged from previous cycle
flagFlag: int array, optional
Flag value from previous cycle
computeNobs: bool, default=True
Compute number of good observations of each object?
"""
# FIXME: check that these are all the same length!
self.obsIndexHandle = snmm.createArray(obsRA.size, dtype='i4')
snmm.getArray(self.obsIndexHandle)[:] = np.arange(obsRA.size)
# need to stuff into shared memory objects.
# nStarObs: total number of observations of all starus
self.nStarObs = obsRA.size
# obsExp: exposure number of individual observation (pointed by obsIndex)
self.obsExpHandle = snmm.createArray(self.nStarObs,dtype='i4')
# obsExpIndex: exposure index
self.obsExpIndexHandle = snmm.createArray(self.nStarObs,dtype='i4')
# obsCCD: ccd number of individual observation
self.obsCCDHandle = snmm.createArray(self.nStarObs,dtype='i2')
# obsBandIndex: band index of individual observation
self.obsBandIndexHandle = snmm.createArray(self.nStarObs,dtype='i2')
# obsLUTFilterIndex: filter index in LUT of individual observation
self.obsLUTFilterIndexHandle = snmm.createArray(self.nStarObs,dtype='i2')
# obsFlag: individual bad observation
self.obsFlagHandle = snmm.createArray(self.nStarObs,dtype='i2')
# obsRA: RA of individual observation
self.obsRAHandle = snmm.createArray(self.nStarObs,dtype='f8')
# obsDec: Declination of individual observation
self.obsDecHandle = snmm.createArray(self.nStarObs,dtype='f8')
# obsSecZenith: secant(zenith) of individual observation
self.obsSecZenithHandle = snmm.createArray(self.nStarObs,dtype='f8')
# obsMagADU: log raw ADU counts of individual observation
## FIXME: need to know default zeropoint?
self.obsMagADUHandle = snmm.createArray(self.nStarObs,dtype='f4')
# obsMagADUErr: raw ADU counts error of individual observation
self.obsMagADUErrHandle = snmm.createArray(self.nStarObs,dtype='f4')
# obsMagADUModelErr: modeled ADU counts error of individual observation
self.obsMagADUModelErrHandle = snmm.createArray(self.nStarObs,dtype='f4')
# obsSuperStarApplied: SuperStar correction that was applied
self.obsSuperStarAppliedHandle = snmm.createArray(self.nStarObs,dtype='f4')
# obsMagStd: corrected (to standard passband) mag of individual observation
self.obsMagStdHandle = snmm.createArray(self.nStarObs,dtype='f4',syncAccess=True)
if (obsX is not None and obsY is not None):
self.hasXY = True
# obsX: x position on the CCD of the given observation
self.obsXHandle = snmm.createArray(self.nStarObs,dtype='f4')
# obsY: y position on the CCD of the given observation
self.obsYHandle = snmm.createArray(self.nStarObs,dtype='f4')
else:
# hasXY = False
if self.superStarSubCCD:
raise ValueError("Input stars do not have x/y but superStarSubCCD is set.")
snmm.getArray(self.obsExpHandle)[:] = obsExp
snmm.getArray(self.obsCCDHandle)[:] = obsCCD
snmm.getArray(self.obsRAHandle)[:] = obsRA
snmm.getArray(self.obsDecHandle)[:] = obsDec
snmm.getArray(self.obsMagADUHandle)[:] = obsMag
snmm.getArray(self.obsMagADUErrHandle)[:] = obsMagErr
snmm.getArray(self.obsMagStdHandle)[:] = obsMag # same as raw at first
snmm.getArray(self.obsSuperStarAppliedHandle)[:] = 0.0
if self.hasXY:
snmm.getArray(self.obsXHandle)[:] = obsX
snmm.getArray(self.obsYHandle)[:] = obsY
self.fgcmLog.info('Applying sigma0Phot = %.4f to mag errs' %
(self.sigma0Phot))
obsMagADUErr = snmm.getArray(self.obsMagADUErrHandle)
obsFlag = snmm.getArray(self.obsFlagHandle)
bad, = np.where(obsMagADUErr <= 0.0)
obsFlag[bad] |= obsFlagDict['BAD_ERROR']
if (bad.size > 0):
self.fgcmLog.info('Flagging %d observations with bad errors.' %
(bad.size))
obsMagADUErr[:] = np.sqrt(obsMagADUErr[:]**2. + self.sigma0Phot**2.)
# Initially, we set the model error to the observed error
obsMagADUModelErr = snmm.getArray(self.obsMagADUModelErrHandle)
obsMagADUModelErr[:] = obsMagADUErr[:]
startTime = time.time()
self.fgcmLog.info('Matching observations to exposure table.')
obsExpIndex = snmm.getArray(self.obsExpIndexHandle)
obsExpIndex[:] = -1
a,b=esutil.numpy_util.match(fgcmPars.expArray,
snmm.getArray(self.obsExpHandle)[:])
obsExpIndex[b] = a
self.fgcmLog.info('Observations matched in %.1f seconds.' %
(time.time() - startTime))
bad, = np.where(obsExpIndex < 0)
obsFlag[bad] |= obsFlagDict['NO_EXPOSURE']
if (bad.size > 0):
self.fgcmLog.info('Flagging %d observations with no associated exposure.' %
(bad.size))
# match bands and filters to indices
startTime = time.time()
self.fgcmLog.info('Matching observations to bands.')
#for i in xrange(self.nBands):
# use, = np.where(obsBand == self.bands[i])
# if (use.size == 0):
# raise ValueError("No observations in band %s!" % (self.bands[i]))
# snmm.getArray(self.obsBandIndexHandle)[use] = i
# new version for multifilter support
# First, we have the filterNames
for filterIndex,filterName in enumerate(self.lutFilterNames):
#try:
# bandIndex, = np.where(self.filterToBand[filterName] == self.bands)
#except:
# self.fgcmLog.info('WARNING: observations with filter %s not in config' % (filterName))
# bandIndex = -1
try:
bandIndex = self.bands.index(self.filterToBand[filterName])
except:
self.fgcmLog.info('WARNING: observations with filter %s not in config' % (filterName))
bandIndex = -1
# obsFilterName is an array from fits/numpy. filterName needs to be encoded to match
use, = np.where(obsFilterName == filterName.encode('utf-8'))
if use.size == 0:
self.fgcmLog.info('WARNING: no observations in filter %s' % (filterName))
else:
snmm.getArray(self.obsLUTFilterIndexHandle)[use] = filterIndex
snmm.getArray(self.obsBandIndexHandle)[use] = bandIndex
self.fgcmLog.info('Observations matched in %.1f seconds.' %
(time.time() - startTime))
#obs=None
#startTime=time.time()
#self.fgcmLog.info('Reading in star positions...')
#pos=fitsio.read(self.indexFile,ext='POS')
#self.fgcmLog.info('Done reading in %d unique star positions in %.1f secondds.' %
# (pos.size,time.time()-startTime))
# nStars: total number of unique stars
#self.nStars = pos.size
self.nStars = objID.size
# objID: unique object ID
self.objIDHandle = snmm.createArray(self.nStars,dtype='i4')
# objRA: mean RA for object
self.objRAHandle = snmm.createArray(self.nStars,dtype='f8')
# objDec: mean Declination for object
self.objDecHandle = snmm.createArray(self.nStars,dtype='f8')
# objObsIndex: for each object, the first
self.objObsIndexHandle = snmm.createArray(self.nStars,dtype='i4')
# objNobs: number of observations of this object (all bands)
self.objNobsHandle = snmm.createArray(self.nStars,dtype='i4')
# objNGoodObsHandle: number of good observations, per band
self.objNGoodObsHandle = snmm.createArray((self.nStars,self.nBands),dtype='i4')
#snmm.getArray(self.objIDHandle)[:] = pos['FGCM_ID'][:]
#snmm.getArray(self.objRAHandle)[:] = pos['RA'][:]
#snmm.getArray(self.objDecHandle)[:] = pos['DEC'][:]
snmm.getArray(self.objIDHandle)[:] = objID
snmm.getArray(self.objRAHandle)[:] = objRA
snmm.getArray(self.objDecHandle)[:] = objDec
#try:
# new field name
# snmm.getArray(self.objObsIndexHandle)[:] = pos['OBSARRINDEX'][:]
#except:
# old field name
# snmm.getArray(self.objObsIndexHandle)[:] = pos['OBSINDEX'][:]
#snmm.getArray(self.objNobsHandle)[:] = pos['NOBS'][:]
snmm.getArray(self.objObsIndexHandle)[:] = objObsIndex
snmm.getArray(self.objNobsHandle)[:] = objNobs
# minObjID: minimum object ID
self.minObjID = np.min(snmm.getArray(self.objIDHandle))
# maxObjID: maximum object ID
self.maxObjID = np.max(snmm.getArray(self.objIDHandle))
# obsObjIDIndex: object ID Index of each observation
# (to get objID, then objID[obsObjIDIndex]
startTime = time.time()
self.fgcmLog.info('Indexing star observations...')
self.obsObjIDIndexHandle = snmm.createArray(self.nStarObs,dtype='i4')
obsObjIDIndex = snmm.getArray(self.obsObjIDIndexHandle)
objID = snmm.getArray(self.objIDHandle)
obsIndex = snmm.getArray(self.obsIndexHandle)
objObsIndex = snmm.getArray(self.objObsIndexHandle)
objNobs = snmm.getArray(self.objNobsHandle)
## FIXME: check if this extra obsIndex reference is necessary or not.
## probably extraneous.
for i in xrange(self.nStars):
obsObjIDIndex[obsIndex[objObsIndex[i]:objObsIndex[i]+objNobs[i]]] = i
self.fgcmLog.info('Done indexing in %.1f seconds.' %
(time.time() - startTime))
#pos=None
obsObjIDIndex = None
objID = None
obsIndex = None
objObsIndex = None
objNobs = None
# and create a objFlag which flags bad stars as they fall out...
self.objFlagHandle = snmm.createArray(self.nStars,dtype='i2')
# and read in the previous bad stars if available
#if (self.inBadStarFile is not None):
# self.fgcmLog.info('Reading in list of previous bad stars from %s' %
# (self.inBadStarFile))
# objID = snmm.getArray(self.objIDHandle)
# objFlag = snmm.getArray(self.objFlagHandle)
# inBadStars = fitsio.read(self.inBadStarFile,ext=1)
# a,b=esutil.numpy_util.match(inBadStars['OBJID'],
# objID)
# self.fgcmLog.info('Flagging %d stars as bad.' %
# (a.size))
# objFlag[b] = inBadStars['OBJFLAG'][a]
if (flagID is not None):
# the objFlag contains information on RESERVED stars
objID = snmm.getArray(self.objIDHandle)
objFlag = snmm.getArray(self.objFlagHandle)
a,b=esutil.numpy_util.match(flagID, objID)
test,=np.where((flagFlag[a] & objFlagDict['VARIABLE']) > 0)
self.fgcmLog.info('Flagging %d stars as variable from previous cycles.' %
(test.size))
test,=np.where((flagFlag[a] & objFlagDict['RESERVED']) > 0)
self.fgcmLog.info('Flagging %d stars as reserved from previous cycles.' %
(test.size))
objFlag[b] = flagFlag[a]
else:
# we want to reserve stars, if necessary
if self.reserveFraction > 0.0:
objFlag = snmm.getArray(self.objFlagHandle)
nReserve = int(self.reserveFraction * objFlag.size)
reserve = np.random.choice(objFlag.size,
size=nReserve,
replace=False)
self.fgcmLog.info('Reserving %d stars from the fit.' % (nReserve))
objFlag[reserve] |= objFlagDict['RESERVED']
# And we need to record the mean mag, error, SED slopes...
# objMagStdMean: mean standard magnitude of each object, per band
self.objMagStdMeanHandle = snmm.createArray((self.nStars,self.nBands),dtype='f4',
syncAccess=True)
# objMagStdMeanErr: error on the mean standard mag of each object, per band
self.objMagStdMeanErrHandle = snmm.createArray((self.nStars,self.nBands),dtype='f4')
# objSEDSlope: linearized approx. of SED slope of each object, per band
self.objSEDSlopeHandle = snmm.createArray((self.nStars,self.nBands),dtype='f4',
syncAccess=True)
# objMagStdMeanNoChrom: mean std mag of each object, no chromatic correction, per band
self.objMagStdMeanNoChromHandle = snmm.createArray((self.nStars,self.nBands),dtype='f4')
# note: if this takes too long it can be moved to the star computation,
# but it seems pretty damn fast (which may raise the question of
# why it needs to be precomputed...)
# compute secZenith for every observation
startTime=time.time()
self.fgcmLog.info('Computing secZenith for each star observation...')
objRARad = np.radians(snmm.getArray(self.objRAHandle))
objDecRad = np.radians(snmm.getArray(self.objDecHandle))
## FIXME: deal with this at some point...
hi,=np.where(objRARad > np.pi)
objRARad[hi] -= 2*np.pi
obsExpIndex = snmm.getArray(self.obsExpIndexHandle)
obsObjIDIndex = snmm.getArray(self.obsObjIDIndexHandle)
obsIndex = snmm.getArray(self.obsIndexHandle)
obsHARad = (fgcmPars.expTelHA[obsExpIndex] +
fgcmPars.expTelRA[obsExpIndex] -
objRARad[obsObjIDIndex])
tempSecZenith = 1./(np.sin(objDecRad[obsObjIDIndex]) * fgcmPars.sinLatitude +
np.cos(objDecRad[obsObjIDIndex]) * fgcmPars.cosLatitude *
np.cos(obsHARad))
bad,=np.where(obsFlag != 0)
tempSecZenith[bad] = 1.0 # filler here, but these stars aren't used
snmm.getArray(self.obsSecZenithHandle)[:] = tempSecZenith
self.fgcmLog.info('Computed secZenith in %.1f seconds.' %
(time.time() - startTime))
if (computeNobs):
self.fgcmLog.info('Checking stars with all exposure numbers')
allExpsIndex = np.arange(fgcmPars.expArray.size)
self.selectStarsMinObsExpIndex(allExpsIndex)
def selectStarsMinObsExpIndex(self, goodExpsIndex, temporary=False,
minObsPerBand=None):
"""
Select stars that have at least the minimum number of observations per band,
using a list of good exposures
parameters
----------
goodExpsIndex: int array
Array of good (photometric) exposure indices
temporary: bool, default=False
Only flag bad objects temporarily
minObsPerBand: int
Specify the min obs per band, or use self.minObsPerBand
"""
if (minObsPerBand is None):
minObsPerBand = self.minObsPerBand
# Given a list of good exposures, which stars have at least minObs observations
# in each required band?
obsExpIndex = snmm.getArray(self.obsExpIndexHandle)
obsBandIndex = snmm.getArray(self.obsBandIndexHandle)
obsObjIDIndex = snmm.getArray(self.obsObjIDIndexHandle)
objNGoodObs = snmm.getArray(self.objNGoodObsHandle)
obsFlag = snmm.getArray(self.obsFlagHandle)
objFlag = snmm.getArray(self.objFlagHandle)
self.fgcmLog.info('Selecting good stars from %d exposures.' %
(goodExpsIndex.size))
_,goodObs=esutil.numpy_util.match(goodExpsIndex,obsExpIndex)
# Filter out bad (previously flagged) individual observations
gd, = np.where(obsFlag[goodObs] == 0)
goodObs = goodObs[gd]
# count all the good observations
objNGoodObs[:,:] = 0
np.add.at(objNGoodObs,
(obsObjIDIndex[goodObs],
obsBandIndex[goodObs]),
1)
# and find the minimum of all the required bands
minObs = objNGoodObs[:,self.bandRequiredIndex].min(axis=1)
# reset too few obs flag if it's already set
if not temporary:
objFlag &= ~objFlagDict['TOO_FEW_OBS']
# choose the bad objects with too few observations
bad,=np.where(minObs < minObsPerBand)
if (not temporary) :
objFlag[bad] |= objFlagDict['TOO_FEW_OBS']
self.fgcmLog.info('Flagging %d of %d stars with TOO_FEW_OBS' % (bad.size,self.nStars))
else:
objFlag[bad] |= objFlagDict['TEMPORARY_BAD_STAR']
self.fgcmLog.info('Flagging %d of %d stars with TEMPORARY_BAD_STAR' % (bad.size,self.nStars))
def selectStarsMinObsExpAndCCD(self, goodExps, goodCCDs, minObsPerBand=None):
"""
Select stars that have at least the minimum number of observations per band,
using a list of good exposures and ccds.
parameters
----------
goodExps: int array
Array of good (photometric) exposure numbers
goodCCDs: int array
Array of good (photometric) ccd numbers
minObsPerBand: int
Specify the min obs per band, or use self.minObsPerBand
"""
if (minObsPerBand is None):
minObsPerBand = self.minObsPerBand
if (goodExps.size != goodCCDs.size) :
raise ValueError("Length of goodExps and goodCCDs must be the same")
obsExp = snmm.getArray(self.obsExpHandle)
obsExpIndex = snmm.getArray(self.obsExpIndexHandle)
obsCCD = snmm.getArray(self.obsCCDHandle)
obsBandIndex = snmm.getArray(self.obsBandIndexHandle)
obsObjIDIndex = snmm.getArray(self.obsObjIDIndexHandle)
objNGoodObs = snmm.getArray(self.objNGoodObsHandle)
obsFlag = snmm.getArray(self.obsFlagHandle)
objFlag = snmm.getArray(self.objFlagHandle)
self.fgcmLog.info( 'Selecting good stars from %d exposure/ccd pairs.' %
(goodExps.size))
# hash together exposure and ccd and match this
obsHash = obsExp * (self.nCCD + self.ccdStartIndex) + obsCCD
goodHash = goodExps * (self.nCCD + self.ccdStartIndex) + goodCCDs
_,goodObs = esutil.numpy_util.match(goodHash, obsHash)
# Filter out bad (previously flagged) individual observations
gd, = np.where(obsFlag[goodObs] == 0)
goodObs = goodObs[gd]
# count all the good observations
objNGoodObs[:,:] = 0
np.add.at(objNGoodObs,
(obsObjIDIndex[goodObs],
obsBandIndex[goodObs]),
1)
# and find the minimum of all the required bands
minObs = objNGoodObs[:,self.bandRequiredIndex].min(axis=1)
# reset too few obs flag if it's already set
objFlag &= ~objFlagDict['TOO_FEW_OBS']
# choose the bad objects with too few observations
bad,=np.where(minObs < minObsPerBand)
objFlag[bad] |= objFlagDict['TOO_FEW_OBS']
self.fgcmLog.info('Flagging %d of %d stars with TOO_FEW_OBS' % (bad.size,self.nStars))
def plotStarMap(self,mapType='initial'):
"""
Plot star map.
parameters
----------
mapType: string, default='initial'
A key for labeling the map.
"""
import healpy as hp
try:
from .fgcmPlotmaps import plot_hpxmap
except:
self.fgcmLog.info("Map plotting not available. Sorry!")
return
goodStars,=np.where(snmm.getArray(self.objFlagHandle)[:] == 0.0)
theta = (90.0-snmm.getArray(self.objDecHandle)[goodStars])*np.pi/180.
phi = snmm.getArray(self.objRAHandle)[goodStars]*np.pi/180.
ipring = hp.ang2pix(self.mapNSide,theta,phi)
densMap = esutil.stat.histogram(ipring,min=0,max=12*self.mapNSide*self.mapNSide-1)
densMap = densMap.astype(np.float32)
bad,=np.where(densMap == 0)
densMap[bad] = hp.UNSEEN
raStarRot = snmm.getArray(self.objRAHandle)[goodStars]
hi,=np.where(raStarRot > 180.0)
raStarRot[hi] -= 360.0
decStar = snmm.getArray(self.objDecHandle)[goodStars]
fig,ax = plot_hpxmap(densMap,
raRange=[np.min(raStarRot),np.max(raStarRot)],
decRange=[np.min(decStar),np.max(decStar)],
lonRef = self.mapLongitudeRef)
fig.savefig('%s/%s_%sGoodStars.png' % (self.plotPath, self.outfileBaseWithCycle,
mapType))
plt.close(fig)
def computeObjectSEDSlopes(self,objIndicesIn):
"""
Compute fnuprime (object SED slopes) for a list of objects.
Output is saved in objSEDSlope.
parameters
----------
objIndicesIn: int array
Array of object indices to do computation
"""
if self.nBands < 3:
# cannot compute SED slopes ... just leave at 0
return
# work on multiple indices
objMagStdMean = snmm.getArray(self.objMagStdMeanHandle)
objSEDSlope = snmm.getArray(self.objSEDSlopeHandle)
objMagStdMeanLock = snmm.getArrayBase(self.objMagStdMeanHandle).get_lock()
objSEDSlopeLock = snmm.getArrayBase(self.objSEDSlopeHandle).get_lock()
# select out good ones
# NOTE: assumes that the required bands are sequential.
# in fact, this whole thing does.
## FIXME: require required bands to be explicitly sequential
## NOTE: this check is probably redundant, since we already have
# a list of good stars in most cases.
# protect access to copy to local
objMagStdMeanLock.acquire()
objMagStdMeanOI = objMagStdMean[objIndicesIn,:]
# release access
objMagStdMeanLock.release()
# and make a temporary local copy of the SED
objSEDSlopeOI = np.zeros((objIndicesIn.size,self.nBands),dtype='f4')
maxMag = np.max(objMagStdMeanOI[:,self.bandRequiredIndex.min():
self.bandRequiredIndex.max()+1],axis=1)
goodIndicesOI,=np.where(maxMag < 90.0)
# can this be non-looped?
S=np.zeros((goodIndicesOI.size,self.nBands-1),dtype='f8')
for i in xrange(self.nBands-1):
S[:,i] = (-1/self.magConstant) * (objMagStdMeanOI[goodIndicesOI,i+1] -
objMagStdMeanOI[goodIndicesOI,i]) / (
(self.lambdaStdBand[i+1] - self.lambdaStdBand[i]))
## FIXME: will have to handle u band "extra"
tempIndex=self.bandRequiredIndex[0]
objSEDSlopeOI[goodIndicesOI, tempIndex] = (
S[:, tempIndex] + self.sedFitBandFudgeFactors[0] * (
S[:, tempIndex+1] + S[:, tempIndex]))
# and the middle ones...
# these are straight averages
for tempIndex in self.bandRequiredIndex[1:-1]:
objSEDSlopeOI[goodIndicesOI,tempIndex] = (
self.sedFitBandFudgeFactors[tempIndex] * (
S[:,tempIndex-1] + S[:,tempIndex]) / 2.0)
# and the last one
tempIndex = self.bandRequiredIndex[-1]
objSEDSlopeOI[goodIndicesOI,tempIndex] = (
S[:,tempIndex-1] + self.sedFitBandFudgeFactors[-1] * (
(self.lambdaStdBand[tempIndex] - self.lambdaStdBand[tempIndex-1]) /
(self.lambdaStdBand[tempIndex] - self.lambdaStdBand[tempIndex-2])) *
(S[:,tempIndex-1] - S[:,tempIndex-2]))
# and the extra bands, only redward now
#tempIndex = self.bandRequiredIndex[-1]
#for i in xrange(len(self.bandExtraIndex)):
# extraIndex=self.bandExtraIndex[i]
# use,=np.where(objMagStdMeanOI[goodIndicesOI,extraIndex] < 90.0)
# objSEDSlopeOI[goodIndicesOI[use],extraIndex] = (
# S[use,tempIndex-1] + self.sedExtraBandFudgeFactors[i] * (
# (self.lambdaStd[tempIndex] - self.lambdaStd[tempIndex-1]) /
# (self.lambdaStd[tempIndex] - self.lambdaStd[tempIndex-2])) *
# (S[use,tempIndex-1] - S[use,tempIndex-2]))
for i in xrange(len(self.bandExtraIndex)):
extraIndex=self.bandExtraIndex[i]
use,=np.where(objMagStdMeanOI[goodIndicesOI,extraIndex] < 90.0)
objSEDSlopeOI[goodIndicesOI[use],extraIndex] = (
S[use,extraIndex-1] + self.sedExtraBandFudgeFactors[i] * (
(self.lambdaStdBand[extraIndex] - self.lambdaStdBand[extraIndex-1]) /
(self.lambdaStdBand[extraIndex] - self.lambdaStdBand[extraIndex-2])) *
(S[use,extraIndex-1] - S[use,extraIndex-2]))
# and save the values, protected
objSEDSlopeLock.acquire()
objSEDSlope[objIndicesIn,:] = objSEDSlopeOI
objSEDSlopeLock.release()
def computeObjectSEDSlopesLUT(self, objIndicesIn, fgcmLUT):
"""
Compute fnuprime (object SED slopes) for a list of objects, from the SED fit
in the look-up table. Experimental.
Output is saved in objSEDSlope.
parameters
----------
objIndicesIn: int array
Array of object indices to do computation
fgcmLUT: FgcmLUT
"""
objMagStdMean = snmm.getArray(self.objMagStdMeanHandle)
objSEDSlope = snmm.getArray(self.objSEDSlopeHandle)
objMagStdMeanLock = snmm.getArrayBase(self.objMagStdMeanHandle).get_lock()
objSEDSlopeLock = snmm.getArrayBase(self.objSEDSlopeHandle).get_lock()
# protect access to copy to local
objMagStdMeanLock.acquire()
objMagStdMeanOI = objMagStdMean[objIndicesIn,:]
# release access
objMagStdMeanLock.release()
# and make a temporary local copy of the SED
#objSEDSlopeOI = np.zeros((objIndicesIn.size,self.nBands),dtype='f4')
# compute SED color...
## FIXME: make this configurable
objSEDColorOI = objMagStdMeanOI[:,0] - objMagStdMeanOI[:,2]
# do the look-up
objSEDSlopeOI = fgcmLUT.computeSEDSlopes(objSEDColorOI)
# and save the values, protected
objSEDSlopeLock.acquire()
objSEDSlope[objIndicesIn,:] = objSEDSlopeOI
objSEDSlopeLock.release()
def performColorCuts(self):
"""
Make the color cuts that are specified in the config.
"""
if (not self.magStdComputed):
raise ValueError("Must compute magStd before performing color cuts")
objMagStdMean = snmm.getArray(self.objMagStdMeanHandle)
objFlag = snmm.getArray(self.objFlagHandle)
for cCut in self.starColorCuts:
thisColor = objMagStdMean[:,cCut[0]] - objMagStdMean[:,cCut[1]]
bad,=np.where((thisColor < cCut[2]) |
(thisColor > cCut[3]))
objFlag[bad] |= objFlagDict['BAD_COLOR']
self.fgcmLog.info('Flag %d stars of %d with BAD_COLOR' % (bad.size,self.nStars))
def applySuperStarFlat(self,fgcmPars):
"""
Apply superStarFlat to raw magnitudes.
parameters
----------
fgcmPars: FgcmParameters
"""
self.fgcmLog.info('Applying SuperStarFlat to raw magnitudes')
obsMagADU = snmm.getArray(self.obsMagADUHandle)
obsSuperStarApplied = snmm.getArray(self.obsSuperStarAppliedHandle)
obsExpIndex = snmm.getArray(self.obsExpIndexHandle)
obsCCDIndex = snmm.getArray(self.obsCCDHandle) - self.ccdStartIndex
# two different tracks, if x/y available or not.
if self.hasXY:
# new style
from .fgcmUtilities import poly2dFunc
obsX = snmm.getArray(self.obsXHandle)
obsY = snmm.getArray(self.obsYHandle)
epochFilterHash = (fgcmPars.expEpochIndex[obsExpIndex]*
(fgcmPars.nLUTFilter+1)*(fgcmPars.nCCD+1) +
fgcmPars.expLUTFilterIndex[obsExpIndex]*
(fgcmPars.nCCD+1) +
obsCCDIndex)
h, rev = esutil.stat.histogram(epochFilterHash, rev=True)
for i in xrange(h.size):
if h[i] == 0: continue
i1a = rev[rev[i]:rev[i+1]]
# get the indices for this epoch/filter/ccd
epInd = fgcmPars.expEpochIndex[obsExpIndex[i1a[0]]]
fiInd = fgcmPars.expLUTFilterIndex[obsExpIndex[i1a[0]]]
cInd = obsCCDIndex[i1a[0]]
obsSuperStarApplied[i1a] = poly2dFunc(np.vstack((obsX[i1a],
obsY[i1a])),
*fgcmPars.parSuperStarFlat[epInd, fiInd, cInd, :])
else:
# old style
obsSuperStarApplied[:] = fgcmPars.expCCDSuperStar[obsExpIndex,
obsCCDIndex]
# And finally apply the superstar correction
obsMagADU[:] += obsSuperStarApplied[:]
def applyApertureCorrection(self,fgcmPars):
"""
Apply aperture corrections to raw magnitudes.
parameters
----------
fgcmPars: FgcmParameters
"""
self.fgcmLog.info('Applying ApertureCorrections to raw magnitudes')
obsExpIndex = snmm.getArray(self.obsExpIndexHandle)
obsMagADU = snmm.getArray(self.obsMagADUHandle)
# Note that EXP^gray = < <mstd>_j - mstd_ij >
# when we have seeing loss, that makes mstd_ij larger and EXP^gray smaller
# So the slope of aperCorr is negative.
# If we add aperCorr to each of mstd_ij, then we get a smaller (brighter)
# magnitude. And this will bring mstd_ij closer to <mstd>_j
obsMagADU[:] += fgcmPars.expApertureCorrection[obsExpIndex]
def computeModelMagErrors(self, fgcmPars):
"""
Compute model magnitude errors.
parameters
----------
fgcmPars: FgcmParameters
"""
if (fgcmPars.compModelErrFwhmPivot[0] <= 0.0) :
self.fgcmLog.info('No model for mag errors, so mag errors are unchanged.')
return
if not self.modelMagErrors:
self.fgcmLog.info('Model magnitude errors are turned off.')
return
if not self.magStdComputed:
raise RuntimeError("Must run FgcmChisq to compute magStd before computeModelMagErrors")
self.fgcmLog.info('Computing model magnitude errors for photometric observations')
objFlag = snmm.getArray(self.objFlagHandle)
objNGoodObs = snmm.getArray(self.objNGoodObsHandle)
objMagStdMean = snmm.getArray(self.objMagStdMeanHandle)
obsObjIDIndex = snmm.getArray(self.obsObjIDIndexHandle)
obsFlag = snmm.getArray(self.obsFlagHandle)
obsExpIndex = snmm.getArray(self.obsExpIndexHandle)
obsBandIndex = snmm.getArray(self.obsBandIndexHandle)
obsMagADU = snmm.getArray(self.obsMagADUHandle)
obsMagADUErr = snmm.getArray(self.obsMagADUErrHandle)
obsMagADUModelErr = snmm.getArray(self.obsMagADUModelErrHandle)
obsMagStd = snmm.getArray(self.obsMagStdHandle)
obsExptime = fgcmPars.expExptime[obsExpIndex]
obsFwhm = fgcmPars.expFwhm[obsExpIndex]
obsSkyBrightness = fgcmPars.expSkyBrightness[obsExpIndex]
# we will compute all stars that are possibly good, including reserved
resMask = 255 & ~objFlagDict['RESERVED']
goodStars, = np.where((objFlag & resMask) == 0)
goodStarsSub, goodObs = esutil.numpy_util.match(goodStars,
obsObjIDIndex,
presorted=True)
# Do we want to allow more selection of exposures here?
gd, = np.where((obsFlag[goodObs] == 0) &
(fgcmPars.expFlag[obsExpIndex[goodObs]] == 0))
goodObs = goodObs[gd]
goodStarsSub = goodStarsSub[gd]
# loop over bands
for bandIndex in xrange(fgcmPars.nBands):
use, = np.where((obsBandIndex[goodObs] == bandIndex) &
(objNGoodObs[obsObjIDIndex[goodObs], bandIndex] > self.minObsPerBand))
pars = fgcmPars.compModelErrPars[:, bandIndex]
fwhmPivot = fgcmPars.compModelErrFwhmPivot[bandIndex]
skyPivot = fgcmPars.compModelErrSkyPivot[bandIndex]
exptimePivot = fgcmPars.compModelErrExptimePivot[bandIndex]
obsMagADUMeanGOu = (objMagStdMean[obsObjIDIndex[goodObs[use]], bandIndex] -
(obsMagStd[goodObs[use]] - obsMagADU[goodObs[use]]) -
2.5 * np.log10(obsExptime[goodObs[use]] / exptimePivot))
modErr = 10.**(pars[0] + pars[1] * obsMagADUMeanGOu + pars[2] * obsMagADUMeanGOu**2. +
pars[3] * np.log10(obsFwhm[goodObs[use]] / fwhmPivot) +
pars[4] * np.log10(obsSkyBrightness[goodObs[use]] / skyPivot) +
pars[5] * obsMagADUMeanGOu * np.log10(obsFwhm[goodObs[use]] / fwhmPivot) +
pars[6] * obsMagADUMeanGOu * np.log10(obsSkyBrightness[goodObs[use]] / skyPivot))
obsMagADUModelErr[goodObs[use]] = np.sqrt(modErr**2. + self.sigma0Phot**2.)
# debug bit...
"""
plt.set_cmap('viridis')
fig = plt.figure(1, figsize=(8,6))
fig.clf()
ax = fig.add_subplot(111)
ax.hexbin(obsMagADUErr[goodObs[use]], obsMagADUModelErr[goodObs[use]], bins='log')
ax.plot([0., 0.08], [0., 0.08], 'r--')
ax.set_title('band = %s, %d' % (self.bands[bandIndex], use.size))
ax.set_xlabel('Observed error')
ax.set_ylabel('Model Error')
fig.savefig('temp_%s.png' % (self.bands[bandIndex]))
plt.close(fig)
"""
def saveFlagStarIndices(self,flagStarFile):
"""
Save flagged stars to fits.
parameters
----------
flagStarFile: string
Filename to output.
"""
import fitsio
flagObjStruct = self.getFlagStarIndices()
self.fgcmLog.info('Saving %d flagged star indices to %s' %
(flagObjStruct.size,flagStarFile))
# set clobber == True?
fitsio.write(flagStarFile,flagObjStruct,clobber=True)
def getFlagStarIndices(self):
"""
Retrieve flagged star indices.
"""
objID = snmm.getArray(self.objIDHandle)
objFlag = snmm.getArray(self.objFlagHandle)
# we only store VARIABLE and RESERVED stars
# everything else should be recomputed based on the good exposures, calibrations, etc
flagMask = (objFlagDict['VARIABLE'] |
objFlagDict['RESERVED'])
flagged,=np.where((objFlag & flagMask) > 0)
flagObjStruct = np.zeros(flagged.size,dtype=[('OBJID',objID.dtype),
('OBJFLAG',objFlag.dtype)])
flagObjStruct['OBJID'] = objID[flagged]
flagObjStruct['OBJFLAG'] = objFlag[flagged]
return flagObjStruct
def saveStdStars(self, starFile, fgcmPars):
"""
Save standard stars. Note that this does not fill in holes.
parameters
----------
starFile: string
Output star file
fgcmPars: FgcmParameters
"""
import fitsio
self.fgcmLog.info( 'Saving standard stars to %s' % (starFile))
objID = snmm.getArray(self.objIDHandle)
objFlag = snmm.getArray(self.objFlagHandle)
objRA = snmm.getArray(self.objRAHandle)
objDec = snmm.getArray(self.objDecHandle)
objNGoodObs = snmm.getArray(self.objNGoodObsHandle)
objMagStdMean = snmm.getArray(self.objMagStdMeanHandle)
objMagStdMeanErr = snmm.getArray(self.objMagStdMeanErrHandle)
# reset TEMPORARY_BAD_STAR
#objFlag &= ~objFlagDict['TEMPORARY_BAD_STAR']
# only take photometric exposures...
#goodExpsIndex, = np.where(fgcmPars.expFlag == 0)
# this doesn't work because we'd have to recompute all the mags
# this is more honest about what stars are actually well measured
#self.selectStarsMinObsExpIndex(goodExpsIndex, minObsPerBand=1, temporary=True)
rejectMask = (objFlagDict['BAD_COLOR'] | objFlagDict['VARIABLE'] |
objFlagDict['TOO_FEW_OBS'])
goodStars, = np.where((objFlag & rejectMask) == 0)
outCat = np.zeros(goodStars.size, dtype=[('FGCM_ID', 'i8'),
('RA', 'f8'),
('DEC', 'f8'),
('NGOOD', 'i4', self.bands.size),
('MAG_STD', 'f4', self.bands.size),
('MAGERR_STD', 'f4', self.bands.size)])
outCat['FGCM_ID'] = objID[goodStars]
outCat['RA'] = objRA[goodStars]
outCat['DEC'] = objDec[goodStars]
outCat['NGOOD'] = objNGoodObs[goodStars, :]
outCat['MAG_STD'][:, :] = objMagStdMean[goodStars, :]
outCat['MAGERR_STD'][:, :] = objMagStdMeanErr[goodStars, :]
# reset TEMPORARY_BAD_STAR
#objFlag &= ~objFlagDict['TEMPORARY_BAD_STAR']
fitsio.write(starFile, outCat, clobber=True)
def __getstate__(self):
# Don't try to pickle the logger.
state = self.__dict__.copy()
del state['fgcmLog']
return state
| [
"numpy.log10",
"numpy.sqrt",
"numpy.log",
"healpy.ang2pix",
"numpy.sin",
"numpy.add.at",
"numpy.arange",
"past.builtins.xrange",
"numpy.where",
"fitsio.read",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.vstack",
"numpy.min",
"numpy.core.defchararray.strip",
"esutil.numpy_util.match"... | [((4719, 4730), 'time.time', 'time.time', ([], {}), '()\n', (4728, 4730), False, 'import time\n'), ((4810, 4850), 'fitsio.read', 'fitsio.read', (['self.indexFile'], {'ext': '"""INDEX"""'}), "(self.indexFile, ext='INDEX')\n", (4821, 4850), False, 'import fitsio\n'), ((5056, 5067), 'time.time', 'time.time', ([], {}), '()\n', (5065, 5067), False, 'import time\n'), ((5143, 5175), 'fitsio.read', 'fitsio.read', (['self.obsFile'], {'ext': '(1)'}), '(self.obsFile, ext=1)\n', (5154, 5175), False, 'import fitsio\n'), ((5447, 5458), 'time.time', 'time.time', ([], {}), '()\n', (5456, 5458), False, 'import time\n'), ((5531, 5569), 'fitsio.read', 'fitsio.read', (['self.indexFile'], {'ext': '"""POS"""'}), "(self.indexFile, ext='POS')\n", (5542, 5569), False, 'import fitsio\n'), ((5808, 5856), 'numpy.core.defchararray.strip', 'np.core.defchararray.strip', (["obs['FILTERNAME'][:]"], {}), "(obs['FILTERNAME'][:])\n", (5834, 5856), True, 'import numpy as np\n'), ((9321, 9342), 'numpy.arange', 'np.arange', (['obsRA.size'], {}), '(obsRA.size)\n', (9330, 9342), True, 'import numpy as np\n'), ((12912, 12941), 'numpy.where', 'np.where', (['(obsMagADUErr <= 0.0)'], {}), '(obsMagADUErr <= 0.0)\n', (12920, 12941), True, 'import numpy as np\n'), ((13162, 13218), 'numpy.sqrt', 'np.sqrt', (['(obsMagADUErr[:] ** 2.0 + self.sigma0Phot ** 2.0)'], {}), '(obsMagADUErr[:] ** 2.0 + self.sigma0Phot ** 2.0)\n', (13169, 13218), True, 'import numpy as np\n'), ((13420, 13431), 'time.time', 'time.time', ([], {}), '()\n', (13429, 13431), False, 'import time\n'), ((13881, 13906), 'numpy.where', 'np.where', (['(obsExpIndex < 0)'], {}), '(obsExpIndex < 0)\n', (13889, 13906), True, 'import numpy as np\n'), ((14181, 14192), 'time.time', 'time.time', ([], {}), '()\n', (14190, 14192), False, 'import time\n'), ((18054, 18065), 'time.time', 'time.time', ([], {}), '()\n', (18063, 18065), False, 'import time\n'), ((18610, 18629), 'past.builtins.xrange', 'xrange', (['self.nStars'], {}), '(self.nStars)\n', (18616, 18629), False, 'from past.builtins import xrange\n'), ((22241, 22252), 'time.time', 'time.time', ([], {}), '()\n', (22250, 22252), False, 'import time\n'), ((22521, 22547), 'numpy.where', 'np.where', (['(objRARad > np.pi)'], {}), '(objRARad > np.pi)\n', (22529, 22547), True, 'import numpy as np\n'), ((23142, 23164), 'numpy.where', 'np.where', (['(obsFlag != 0)'], {}), '(obsFlag != 0)\n', (23150, 23164), True, 'import numpy as np\n'), ((24925, 24976), 'esutil.numpy_util.match', 'esutil.numpy_util.match', (['goodExpsIndex', 'obsExpIndex'], {}), '(goodExpsIndex, obsExpIndex)\n', (24948, 24976), False, 'import esutil\n'), ((25061, 25092), 'numpy.where', 'np.where', (['(obsFlag[goodObs] == 0)'], {}), '(obsFlag[goodObs] == 0)\n', (25069, 25092), True, 'import numpy as np\n'), ((25203, 25277), 'numpy.add.at', 'np.add.at', (['objNGoodObs', '(obsObjIDIndex[goodObs], obsBandIndex[goodObs])', '(1)'], {}), '(objNGoodObs, (obsObjIDIndex[goodObs], obsBandIndex[goodObs]), 1)\n', (25212, 25277), True, 'import numpy as np\n'), ((25662, 25694), 'numpy.where', 'np.where', (['(minObs < minObsPerBand)'], {}), '(minObs < minObsPerBand)\n', (25670, 25694), True, 'import numpy as np\n'), ((27609, 27651), 'esutil.numpy_util.match', 'esutil.numpy_util.match', (['goodHash', 'obsHash'], {}), '(goodHash, obsHash)\n', (27632, 27651), False, 'import esutil\n'), ((27737, 27768), 'numpy.where', 'np.where', (['(obsFlag[goodObs] == 0)'], {}), '(obsFlag[goodObs] == 0)\n', (27745, 27768), True, 'import numpy as np\n'), ((27879, 27953), 'numpy.add.at', 'np.add.at', (['objNGoodObs', '(obsObjIDIndex[goodObs], obsBandIndex[goodObs])', '(1)'], {}), '(objNGoodObs, (obsObjIDIndex[goodObs], obsBandIndex[goodObs]), 1)\n', (27888, 27953), True, 'import numpy as np\n'), ((28316, 28348), 'numpy.where', 'np.where', (['(minObs < minObsPerBand)'], {}), '(minObs < minObsPerBand)\n', (28324, 28348), True, 'import numpy as np\n'), ((29146, 29183), 'healpy.ang2pix', 'hp.ang2pix', (['self.mapNSide', 'theta', 'phi'], {}), '(self.mapNSide, theta, phi)\n', (29156, 29183), True, 'import healpy as hp\n'), ((29201, 29286), 'esutil.stat.histogram', 'esutil.stat.histogram', (['ipring'], {'min': '(0)', 'max': '(12 * self.mapNSide * self.mapNSide - 1)'}), '(ipring, min=0, max=12 * self.mapNSide * self.mapNSide - 1\n )\n', (29222, 29286), False, 'import esutil\n'), ((29333, 29355), 'numpy.where', 'np.where', (['(densMap == 0)'], {}), '(densMap == 0)\n', (29341, 29355), True, 'import numpy as np\n'), ((29465, 29492), 'numpy.where', 'np.where', (['(raStarRot > 180.0)'], {}), '(raStarRot > 180.0)\n', (29473, 29492), True, 'import numpy as np\n'), ((29990, 30004), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (29999, 30004), True, 'import matplotlib.pyplot as plt\n'), ((31351, 31405), 'numpy.zeros', 'np.zeros', (['(objIndicesIn.size, self.nBands)'], {'dtype': '"""f4"""'}), "((objIndicesIn.size, self.nBands), dtype='f4')\n", (31359, 31405), True, 'import numpy as np\n'), ((31587, 31610), 'numpy.where', 'np.where', (['(maxMag < 90.0)'], {}), '(maxMag < 90.0)\n', (31595, 31610), True, 'import numpy as np\n'), ((31657, 31716), 'numpy.zeros', 'np.zeros', (['(goodIndicesOI.size, self.nBands - 1)'], {'dtype': '"""f8"""'}), "((goodIndicesOI.size, self.nBands - 1), dtype='f8')\n", (31665, 31716), True, 'import numpy as np\n'), ((31730, 31753), 'past.builtins.xrange', 'xrange', (['(self.nBands - 1)'], {}), '(self.nBands - 1)\n', (31736, 31753), False, 'from past.builtins import xrange\n'), ((41029, 41061), 'numpy.where', 'np.where', (['(objFlag & resMask == 0)'], {}), '(objFlag & resMask == 0)\n', (41037, 41061), True, 'import numpy as np\n'), ((41097, 41162), 'esutil.numpy_util.match', 'esutil.numpy_util.match', (['goodStars', 'obsObjIDIndex'], {'presorted': '(True)'}), '(goodStars, obsObjIDIndex, presorted=True)\n', (41120, 41162), False, 'import esutil\n'), ((41354, 41439), 'numpy.where', 'np.where', (['((obsFlag[goodObs] == 0) & (fgcmPars.expFlag[obsExpIndex[goodObs]] == 0))'], {}), '((obsFlag[goodObs] == 0) & (fgcmPars.expFlag[obsExpIndex[goodObs]] ==\n 0))\n', (41362, 41439), True, 'import numpy as np\n'), ((41581, 41604), 'past.builtins.xrange', 'xrange', (['fgcmPars.nBands'], {}), '(fgcmPars.nBands)\n', (41587, 41604), False, 'from past.builtins import xrange\n'), ((43926, 43981), 'fitsio.write', 'fitsio.write', (['flagStarFile', 'flagObjStruct'], {'clobber': '(True)'}), '(flagStarFile, flagObjStruct, clobber=True)\n', (43938, 43981), False, 'import fitsio\n'), ((44435, 44467), 'numpy.where', 'np.where', (['(objFlag & flagMask > 0)'], {}), '(objFlag & flagMask > 0)\n', (44443, 44467), True, 'import numpy as np\n'), ((44495, 44582), 'numpy.zeros', 'np.zeros', (['flagged.size'], {'dtype': "[('OBJID', objID.dtype), ('OBJFLAG', objFlag.dtype)]"}), "(flagged.size, dtype=[('OBJID', objID.dtype), ('OBJFLAG', objFlag.\n dtype)])\n", (44503, 44582), True, 'import numpy as np\n'), ((46092, 46127), 'numpy.where', 'np.where', (['(objFlag & rejectMask == 0)'], {}), '(objFlag & rejectMask == 0)\n', (46100, 46127), True, 'import numpy as np\n'), ((46148, 46346), 'numpy.zeros', 'np.zeros', (['goodStars.size'], {'dtype': "[('FGCM_ID', 'i8'), ('RA', 'f8'), ('DEC', 'f8'), ('NGOOD', 'i4', self.bands\n .size), ('MAG_STD', 'f4', self.bands.size), ('MAGERR_STD', 'f4', self.\n bands.size)]"}), "(goodStars.size, dtype=[('FGCM_ID', 'i8'), ('RA', 'f8'), ('DEC',\n 'f8'), ('NGOOD', 'i4', self.bands.size), ('MAG_STD', 'f4', self.bands.\n size), ('MAGERR_STD', 'f4', self.bands.size)])\n", (46156, 46346), True, 'import numpy as np\n'), ((46993, 47037), 'fitsio.write', 'fitsio.write', (['starFile', 'outCat'], {'clobber': '(True)'}), '(starFile, outCat, clobber=True)\n', (47005, 47037), False, 'import fitsio\n'), ((3127, 3158), 'numpy.where', 'np.where', (['self.bandRequiredFlag'], {}), '(self.bandRequiredFlag)\n', (3135, 3158), True, 'import numpy as np\n'), ((3246, 3274), 'numpy.where', 'np.where', (['self.bandExtraFlag'], {}), '(self.bandExtraFlag)\n', (3254, 3274), True, 'import numpy as np\n'), ((4068, 4078), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (4074, 4078), True, 'import numpy as np\n'), ((6067, 6106), 'fitsio.read', 'fitsio.read', (['self.inFlagStarFile'], {'ext': '(1)'}), '(self.inFlagStarFile, ext=1)\n', (6078, 6106), False, 'import fitsio\n'), ((20015, 20053), 'esutil.numpy_util.match', 'esutil.numpy_util.match', (['flagID', 'objID'], {}), '(flagID, objID)\n', (20038, 20053), False, 'import esutil\n'), ((20073, 20124), 'numpy.where', 'np.where', (["(flagFlag[a] & objFlagDict['VARIABLE'] > 0)"], {}), "(flagFlag[a] & objFlagDict['VARIABLE'] > 0)\n", (20081, 20124), True, 'import numpy as np\n'), ((20273, 20324), 'numpy.where', 'np.where', (["(flagFlag[a] & objFlagDict['RESERVED'] > 0)"], {}), "(flagFlag[a] & objFlagDict['RESERVED'] > 0)\n", (20281, 20324), True, 'import numpy as np\n'), ((23554, 23587), 'numpy.arange', 'np.arange', (['fgcmPars.expArray.size'], {}), '(fgcmPars.expArray.size)\n', (23563, 23587), True, 'import numpy as np\n'), ((33721, 33780), 'numpy.where', 'np.where', (['(objMagStdMeanOI[goodIndicesOI, extraIndex] < 90.0)'], {}), '(objMagStdMeanOI[goodIndicesOI, extraIndex] < 90.0)\n', (33729, 33780), True, 'import numpy as np\n'), ((36237, 36292), 'numpy.where', 'np.where', (['((thisColor < cCut[2]) | (thisColor > cCut[3]))'], {}), '((thisColor < cCut[2]) | (thisColor > cCut[3]))\n', (36245, 36292), True, 'import numpy as np\n'), ((37584, 37632), 'esutil.stat.histogram', 'esutil.stat.histogram', (['epochFilterHash'], {'rev': '(True)'}), '(epochFilterHash, rev=True)\n', (37605, 37632), False, 'import esutil\n'), ((37655, 37669), 'past.builtins.xrange', 'xrange', (['h.size'], {}), '(h.size)\n', (37661, 37669), False, 'from past.builtins import xrange\n'), ((41625, 41748), 'numpy.where', 'np.where', (['((obsBandIndex[goodObs] == bandIndex) & (objNGoodObs[obsObjIDIndex[goodObs],\n bandIndex] > self.minObsPerBand))'], {}), '((obsBandIndex[goodObs] == bandIndex) & (objNGoodObs[obsObjIDIndex[\n goodObs], bandIndex] > self.minObsPerBand))\n', (41633, 41748), True, 'import numpy as np\n'), ((42829, 42876), 'numpy.sqrt', 'np.sqrt', (['(modErr ** 2.0 + self.sigma0Phot ** 2.0)'], {}), '(modErr ** 2.0 + self.sigma0Phot ** 2.0)\n', (42836, 42876), True, 'import numpy as np\n'), ((20758, 20818), 'numpy.random.choice', 'np.random.choice', (['objFlag.size'], {'size': 'nReserve', 'replace': '(False)'}), '(objFlag.size, size=nReserve, replace=False)\n', (20774, 20818), True, 'import numpy as np\n'), ((13839, 13850), 'time.time', 'time.time', ([], {}), '()\n', (13848, 13850), False, 'import time\n'), ((15746, 15757), 'time.time', 'time.time', ([], {}), '()\n', (15755, 15757), False, 'import time\n'), ((18800, 18811), 'time.time', 'time.time', ([], {}), '()\n', (18809, 18811), False, 'import time\n'), ((22938, 22970), 'numpy.sin', 'np.sin', (['objDecRad[obsObjIDIndex]'], {}), '(objDecRad[obsObjIDIndex])\n', (22944, 22970), True, 'import numpy as np\n'), ((23110, 23126), 'numpy.cos', 'np.cos', (['obsHARad'], {}), '(obsHARad)\n', (23116, 23126), True, 'import numpy as np\n'), ((23400, 23411), 'time.time', 'time.time', ([], {}), '()\n', (23409, 23411), False, 'import time\n'), ((29664, 29681), 'numpy.min', 'np.min', (['raStarRot'], {}), '(raStarRot)\n', (29670, 29681), True, 'import numpy as np\n'), ((29682, 29699), 'numpy.max', 'np.max', (['raStarRot'], {}), '(raStarRot)\n', (29688, 29699), True, 'import numpy as np\n'), ((29741, 29756), 'numpy.min', 'np.min', (['decStar'], {}), '(decStar)\n', (29747, 29756), True, 'import numpy as np\n'), ((29757, 29772), 'numpy.max', 'np.max', (['decStar'], {}), '(decStar)\n', (29763, 29772), True, 'import numpy as np\n'), ((38053, 38086), 'numpy.vstack', 'np.vstack', (['(obsX[i1a], obsY[i1a])'], {}), '((obsX[i1a], obsY[i1a]))\n', (38062, 38086), True, 'import numpy as np\n'), ((42246, 42295), 'numpy.log10', 'np.log10', (['(obsExptime[goodObs[use]] / exptimePivot)'], {}), '(obsExptime[goodObs[use]] / exptimePivot)\n', (42254, 42295), True, 'import numpy as np\n'), ((4975, 4986), 'time.time', 'time.time', ([], {}), '()\n', (4984, 4986), False, 'import time\n'), ((5373, 5384), 'time.time', 'time.time', ([], {}), '()\n', (5382, 5384), False, 'import time\n'), ((5695, 5706), 'time.time', 'time.time', ([], {}), '()\n', (5704, 5706), False, 'import time\n'), ((23024, 23056), 'numpy.cos', 'np.cos', (['objDecRad[obsObjIDIndex]'], {}), '(objDecRad[obsObjIDIndex])\n', (23030, 23056), True, 'import numpy as np\n'), ((42729, 42780), 'numpy.log10', 'np.log10', (['(obsSkyBrightness[goodObs[use]] / skyPivot)'], {}), '(obsSkyBrightness[goodObs[use]] / skyPivot)\n', (42737, 42780), True, 'import numpy as np\n'), ((42627, 42670), 'numpy.log10', 'np.log10', (['(obsFwhm[goodObs[use]] / fwhmPivot)'], {}), '(obsFwhm[goodObs[use]] / fwhmPivot)\n', (42635, 42670), True, 'import numpy as np\n'), ((42517, 42568), 'numpy.log10', 'np.log10', (['(obsSkyBrightness[goodObs[use]] / skyPivot)'], {}), '(obsSkyBrightness[goodObs[use]] / skyPivot)\n', (42525, 42568), True, 'import numpy as np\n'), ((42434, 42477), 'numpy.log10', 'np.log10', (['(obsFwhm[goodObs[use]] / fwhmPivot)'], {}), '(obsFwhm[goodObs[use]] / fwhmPivot)\n', (42442, 42477), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import os
from pathlib import Path
from setuptools import find_packages, setup, Extension
__version__ = '2.1.0'
README = (Path(__file__).parent / "README.md").read_text()
requirements_txt = Path(__file__).parent / "requirements.txt"
with open(requirements_txt) as req_file:
requirements = req_file.readlines()
class NumpyExtension(Extension):
# setuptools calls this function after installing dependencies
def _convert_pyx_sources_to_lang(self):
import numpy
self.include_dirs.append(numpy.get_include())
# include libraries and compile flags if not on Windows
if os.name != 'nt':
self.libraries.append('m')
self.extra_compile_args.append('-ffast-math')
super()._convert_pyx_sources_to_lang()
ext_modules = [NumpyExtension('JSSP.genetic_algorithm._ga_helpers',
['JSSP/genetic_algorithm/_ga_helpers.pyx']),
NumpyExtension('JSSP.solution._makespan',
['JSSP/solution/_makespan.pyx']),
NumpyExtension('JSSP.tabu_search._generate_neighbor',
['JSSP/tabu_search/_generate_neighbor.pyx'])
]
setup(
name='JSSP',
version=__version__,
description='Package for solving the job shop schedule problem with sequence dependent set up times.',
author='<NAME> (mcfadd)',
author_email='<EMAIL>',
python_requires='>=3.6.0',
url='https://github.com/mcfadd/Job_Shop_Schedule_Problem',
download_url='https://github.com/mcfadd/Job_Shop_Schedule_Problem/archive/' + __version__ + '.tar.gz',
license='ISC',
keywords=['Job Shop Schedule Problem', 'Optimization', 'Tabu Search', 'Genetic Algorithm'],
long_description=README,
long_description_content_type='text/markdown',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Manufacturing',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: ISC License (ISCL)',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Cython',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Office/Business :: Scheduling',
],
setup_requires=['numpy==1.16.*', 'cython==0.29.*'],
install_requires=requirements,
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
include_package_data=True,
ext_modules=ext_modules,
)
| [
"numpy.get_include",
"setuptools.find_packages",
"pathlib.Path"
] | [((216, 230), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (220, 230), False, 'from pathlib import Path\n'), ((2727, 2794), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['*.tests', '*.tests.*', 'tests.*', 'tests']"}), "(exclude=['*.tests', '*.tests.*', 'tests.*', 'tests'])\n", (2740, 2794), False, 'from setuptools import find_packages, setup, Extension\n'), ((541, 560), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (558, 560), False, 'import numpy\n'), ((148, 162), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (152, 162), False, 'from pathlib import Path\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
r"""
Code for computing characters of Hessenberg varieties.
"""
import itertools as it
import numpy as np
from collections import defaultdict
from fragment import *
from path import *
from perm import *
from util import *
# ---------------------------------------------------------
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# ---------------------------------------------------------
def translators(n):
r"""
Return the lperms for which we can compute character values.
>>> translators(3) == {
... (0, 1, 2): (1, 1, 1),
... (1, 0, 2): (2, 1),
... (2, 0, 1): (3,),
... }
True
"""
result = {}
for cc in compositions(n):
if cc != tuple(sorted(cc, reverse=True)):
continue
cycle_type = tuple(sorted(cc, reverse=True))
bfact = []
for part in cc:
bfact.extend([0]*(part-1))
bfact.append(part-1)
lperm = blist_from_bfact(tuple(bfact))
result[lperm] = cycle_type
return result
# ---------------------------------------------------------
def rpspec(bfact, path):
r"""
Do some voodoo on a bijection and a path to get a root product spec.
"""
mobile, fixed = [], []
blist = []
n = len(bfact)
assert is_bfact(bfact)
assert is_path(path)
assert n == len(path)
for i in range(n):
j = len(blist) - bfact[i]
blist.insert(j, i)
crossed = False
for jj in range(j+1, len(blist)):
if blist[jj] < path[i]:
crossed = True
elif blist[jj] < i:
if crossed:
fixed.append((blist[jj], i, 'f'))
else:
mobile.append((jj, i, 'm'))
return mobile, fixed
def rp(bfact, spec):
r"""
Do some more voodoo to transform a spec into an actual root product.
"""
mobile, fixed = spec
result = []
for (j, i, f) in fixed:
assert f == 'f'
result.append((j, i))
blist = []
for i in range(len(bfact)):
j = len(blist) - bfact[i]
blist.insert(j, i)
for (jj, ii, m) in mobile:
assert m == 'm'
if ii == i:
result.append((blist[jj], i))
return result
_flowup_cache = {}
def flowup(bfact, path):
try:
return _flowup_cache[bfact, path]
except KeyError:
result = _flowup_compute(bfact, path)
_flowup_cache[bfact, path] = result
return result
def _flowup_compute(bfact, path):
r"""
Return a fragment of a flowup basis vector.
"""
spec = rpspec(bfact, path)
result = {}
projections = [
((i, i+1) if i < k else (i,))
for k, i in enumerate(bfact)
]
coords = list(it.product(*projections))
assert all(is_bfact(c) for c in coords)
for c in coords:
result[blist_from_bfact(c)] = rp(c, spec)
return result
# ---------------------------------------------------------
_indices_above_cache = {}
def indices_above(bfact):
try:
return _indices_above_cache[bfact]
except KeyError:
result = _indices_above_compute(bfact)
_indices_above_cache[bfact] = result
return result
def _indices_above_compute(bfact):
result = {}
assert is_bfact(bfact)
n = len(bfact)
for offset in it.product(*([(0,)] + [(0, 1)]*(n-1))):
ofact = tuple(b+o for b, o in zip(bfact, offset))
if is_bfact(ofact):
olist = blist_from_bfact(ofact)
result[offset] = olist
return result
_indices_below_cache = {}
def indices_below(bfact):
try:
return _indices_below_cache[bfact]
except KeyError:
result = _indices_below_compute(bfact)
_indices_below_cache[bfact] = result
return result
def _indices_below_compute(bfact):
result = {}
assert is_bfact(bfact)
n = len(bfact)
maxoff = (0,) + (1,)*(n-1)
bfact = tuple(b-m for b, m in zip(bfact, maxoff))
for offset in it.product(*([(0,)] + [(0, 1)]*(n-1))):
ofact = tuple(b+o for b, o in zip(bfact, offset))
if is_bfact(ofact):
olist = blist_from_bfact(ofact)
result[offset] = olist
return result
def frag_at(n, frag, indices):
result = np.zeros(
(1,) + (2,)*(n-1),
dtype=object,
)
for offset, olist in indices.items():
if olist in frag:
result[offset] = frag[olist]
return result
# ---------------------------------------------------------
def compute_left(path):
assert is_path(path)
n = len(path)
maxoff = (0,) + (1,)*(n-1)
basis = {}
for bfact in iter_bfact(n):
basis[bfact] = frag_at(
n,
lvaluated_fragment(flowup(bfact, path)),
indices_above(bfact),
)
csf = defaultdict(int)
for t in translators(n):
for bfact in iter_bfact(n):
f = flowup(bfact, path)
deg = len(f[blist_from_bfact(bfact)])
work_array = frag_at(
n,
lvaluated_fragment(translated_fragment(t, f)),
indices_below(bfact),
)
for offset in it.product(*([(0,)] + [(0, 1)]*(n-1))):
coeff = work_array[offset]
if coeff == 0:
quo = 0
else:
ofact = tuple(b+o-m for b, o, m in zip(bfact, offset, maxoff))
ovect = basis[ofact]
olead = ovect[(0,)*n]
quo, rem = divmod(coeff, olead)
assert rem == 0
wa_indices = [
slice(None, None, None) if i == 0 else 1
for i in offset
]
ov_indices = [
slice(None, None, None) if i == 0 else 0
for i in offset
]
work_array[wa_indices] -= quo * ovect[ov_indices]
csf[t,deg] += quo
return csf
def compute_right(path):
assert is_path(path)
n = len(path)
maxoff = (0,) + (1,)*(n-1)
basis = {}
for bfact in iter_bfact(n):
basis[bfact] = frag_at(
n,
rvaluated_fragment(flowup(bfact, path)),
indices_above(bfact),
)
csf = defaultdict(int)
for t in translators(n):
for bfact in iter_bfact(n):
f = flowup(bfact, path)
deg = len(f[blist_from_bfact(bfact)])
work_array = frag_at(
n,
rvaluated_fragment(translated_fragment(t, f)),
indices_below(bfact),
)
for offset in it.product(*([(0,)] + [(0, 1)]*(n-1))):
coeff = work_array[offset]
if coeff == 0:
quo = 0
else:
ofact = tuple(b+o-m for b, o, m in zip(bfact, offset, maxoff))
ovect = basis[ofact]
olead = ovect[(0,)*n]
quo, rem = divmod(coeff, olead)
assert rem == 0
wa_indices = [
slice(None, None, None) if i == 0 else 1
for i in offset
]
ov_indices = [
slice(None, None, None) if i == 0 else 0
for i in offset
]
work_array[wa_indices] -= quo * ovect[ov_indices]
csf[t,deg] += quo
return csf
def check_rreg(path):
r"""
>>> all(check_rreg(path) == 24 for path in iter_path(4))
True
"""
assert is_path(path)
sums = defaultdict(int)
for ((t, _), coeff) in compute_right(path).iteritems():
sums[t] += coeff
result = sums.pop(tuple(range(len(path))))
if all(s == 0 for s in sums.itervalues()):
return result
else:
return False
# ---------------------------------------------------------
#check translation classes?
#test vector supports?
# ---------------------------------------------------------
def doctest():
import doctest
doctest.testmod(verbose=False)
# ---------------------------------------------------------
def setup_logging():
handler = logging.StreamHandler()
handler.setFormatter(
logging.Formatter(
'%(module)s (elapsed time %(relativeCreated)d): %(message)s'))
logger.addHandler(handler)
# ---------------------------------------------------------
def argparse():
import argparse
parser = argparse.ArgumentParser(
description='Compute left and right Hessenberg characters for a given Dyck path.',
)
parser.add_argument(
'path',
help='The Dyck path (e.g. triangle is 000, fully disconnected is 012).',
)
args = parser.parse_args()
path = tuple(map(int, args.path))
assert is_path(path)
return path
# ---------------------------------------------------------
left_output_header = r"""hess_left[{path}] = p.sum(
p.term(Partition(index), R(coeffs) / zee(index))
for index, coeffs in [
"""
right_output_header = r"""hess_right[{path}] = p.sum(
p.term(Partition(index), R(coeffs) / zee(index))
for index, coeffs in [
"""
left_output_footer = right_output_footer = r""" ])
"""
def save(path, left, right):
n = len(path)
cycle_type = translators(n)
filename = 'output/hess-' + ''.join(map(str, path)) + '.py'
with open(filename, 'w') as f:
f.write(left_output_header.format(path=path))
tmp = defaultdict(lambda: [0]*(1+n*(n-1)//2))
for ((lperm, deg), coeff) in left.iteritems():
tmp[cycle_type[lperm]][deg] = coeff
for index, coeffs in sorted(tmp.iteritems()):
while coeffs and coeffs[-1] == 0:
coeffs.pop()
if coeffs:
f.write(" ({}, {}),\n".format(list(index), coeffs))
f.write(left_output_footer)
f.write(right_output_header.format(path=path))
tmp = defaultdict(lambda: [0]*(1+n*(n-1)//2))
for ((lperm, deg), coeff) in right.iteritems():
tmp[cycle_type[lperm]][deg] = coeff
for index, coeffs in sorted(tmp.iteritems()):
while coeffs and coeffs[-1] == 0:
coeffs.pop()
if coeffs:
f.write(" ({}, {}),\n".format(list(index), coeffs))
f.write(right_output_footer)
# ---------------------------------------------------------
if __name__ == '__main__':
doctest()
setup_logging()
path = argparse()
logger.info('starting left computation for path %s', path)
left = compute_left(path)
logger.info('starting right computation for path %s', path)
right = compute_right(path)
save(path, left, right)
logger.info('done with path %s', path)
# ---------------------------------------------------------
| [
"logging.getLogger",
"logging.StreamHandler",
"argparse.ArgumentParser",
"doctest",
"logging.Formatter",
"itertools.product",
"argparse",
"numpy.zeros",
"doctest.testmod",
"collections.defaultdict"
] | [((357, 384), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (374, 384), False, 'import logging\n'), ((3420, 3462), 'itertools.product', 'it.product', (['*([(0,)] + [(0, 1)] * (n - 1))'], {}), '(*([(0,)] + [(0, 1)] * (n - 1)))\n', (3430, 3462), True, 'import itertools as it\n'), ((4083, 4125), 'itertools.product', 'it.product', (['*([(0,)] + [(0, 1)] * (n - 1))'], {}), '(*([(0,)] + [(0, 1)] * (n - 1)))\n', (4093, 4125), True, 'import itertools as it\n'), ((4351, 4396), 'numpy.zeros', 'np.zeros', (['((1,) + (2,) * (n - 1))'], {'dtype': 'object'}), '((1,) + (2,) * (n - 1), dtype=object)\n', (4359, 4396), True, 'import numpy as np\n'), ((4912, 4928), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (4923, 4928), False, 'from collections import defaultdict\n'), ((6448, 6464), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (6459, 6464), False, 'from collections import defaultdict\n'), ((7825, 7841), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (7836, 7841), False, 'from collections import defaultdict\n'), ((8287, 8317), 'doctest.testmod', 'doctest.testmod', ([], {'verbose': '(False)'}), '(verbose=False)\n', (8302, 8317), False, 'import doctest\n'), ((8415, 8438), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (8436, 8438), False, 'import logging\n'), ((8709, 8820), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Compute left and right Hessenberg characters for a given Dyck path."""'}), "(description=\n 'Compute left and right Hessenberg characters for a given Dyck path.')\n", (8732, 8820), False, 'import argparse\n'), ((10689, 10698), 'doctest', 'doctest', ([], {}), '()\n', (10696, 10698), False, 'import doctest\n'), ((10730, 10740), 'argparse', 'argparse', ([], {}), '()\n', (10738, 10740), False, 'import argparse\n'), ((2845, 2869), 'itertools.product', 'it.product', (['*projections'], {}), '(*projections)\n', (2855, 2869), True, 'import itertools as it\n'), ((8473, 8552), 'logging.Formatter', 'logging.Formatter', (['"""%(module)s (elapsed time %(relativeCreated)d): %(message)s"""'], {}), "('%(module)s (elapsed time %(relativeCreated)d): %(message)s')\n", (8490, 8552), False, 'import logging\n'), ((9721, 9771), 'collections.defaultdict', 'defaultdict', (['(lambda : [0] * (1 + n * (n - 1) // 2))'], {}), '(lambda : [0] * (1 + n * (n - 1) // 2))\n', (9732, 9771), False, 'from collections import defaultdict\n'), ((10192, 10242), 'collections.defaultdict', 'defaultdict', (['(lambda : [0] * (1 + n * (n - 1) // 2))'], {}), '(lambda : [0] * (1 + n * (n - 1) // 2))\n', (10203, 10242), False, 'from collections import defaultdict\n'), ((5278, 5320), 'itertools.product', 'it.product', (['*([(0,)] + [(0, 1)] * (n - 1))'], {}), '(*([(0,)] + [(0, 1)] * (n - 1)))\n', (5288, 5320), True, 'import itertools as it\n'), ((6814, 6856), 'itertools.product', 'it.product', (['*([(0,)] + [(0, 1)] * (n - 1))'], {}), '(*([(0,)] + [(0, 1)] * (n - 1)))\n', (6824, 6856), True, 'import itertools as it\n')] |
#!/usr/bin/env python
"""
plot navpoints (needs Levels and level-counts.txt)
"""
import numpy as np
from mpl_toolkits.mplot3d import axes3d, Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.colors import colorConverter
import sys
from utils import IndexedPoseVisitor, get_level
import sqlite3
import itertools
def get_navpoints(level):
navpoints = {}
links = []
conn = sqlite3.connect('../navpoints.db')
c = conn.cursor()
c.execute('select id, unreal_id, x, y, z from navpoint where level=?', (level,) )
for row in c:
navpoints[int(row[0])] = (row[1], tuple([float(x) for x in row[2:]]))
c.execute('select origin, destination from link')
for row in c:
origin = int(row[0])
dest = int(row[1])
if origin in navpoints and dest in navpoints:
links.append( (origin, dest) )
c.close()
conn.close()
counts = {}
with open('NavPointIndex.dat') as f:
for line in f:
name, id, path_count, point_count = line.strip().split()
if name.startswith(level):
id, path_count, point_count = int(id), int(path_count), int(point_count)
counts[id] = (path_count, point_count)
return (navpoints, links, counts)
navpointcolors = {}
COLORSOURCE = itertools.cycle([colorConverter.to_rgb(x) for x in ['b', 'g', 'r', 'c', 'm', 'y', 'k']])
class NavpointPlotter(IndexedPoseVisitor):
def before(self, levelstr):
self.level = get_level(levelstr)
self.fig = plt.figure()
self.ax = Axes3D(self.fig)
self.ax.hold(True)
self.empty = True
(self.navpoints, self.links, self.counts) = get_navpoints(self.level)
def plot_navpoints(self):
for npid in self.navpoints:
name, (x,y,z) = self.navpoints[npid]
if npid in navpointcolors:
color = navpointcolors[npid]
else:
color = colorConverter.to_rgb('white')
s = 7
label = self.navpoints[npid][0].split('.')[1]
self.ax.plot( [x], [y], [z], markerfacecolor=color, markersize=s, marker='s', label=label )
def for_each_segment(self, segment, label, color):
c = []
for t, x, y, z, rx, ry, rz, npid in segment:
if npid in navpointcolors:
color = navpointcolors[npid]
else:
color = COLORSOURCE.next()
navpointcolors[npid] = color
c.append(color)
xyz = np.array(segment)[:,1:4]
self.ax.scatter(xyz[:,0], xyz[:,1], xyz[:,2], c=c, edgecolors='none')
self.empty = False
def for_each_game(self, levelstr):
if not self.empty:
self.plot_navpoints()
self.ax.set_xlabel('X')
self.ax.set_ylabel('Y')
self.ax.set_zlabel('Z')
self.fig.suptitle(self.level)
plt.savefig(self.level + '.by_navpoint.png', dpi=300)
else:
plt.close(self.fig)
self.empty = True
def after(self):
#plt.show()
pass
def main():
plotter = NavpointPlotter()
if len(sys.argv) > 1:
plotter.process(sys.argv[1])
else:
plotter.process()
if __name__ == "__main__":
main()
| [
"matplotlib.colors.colorConverter.to_rgb",
"matplotlib.pyplot.savefig",
"sqlite3.connect",
"utils.get_level",
"matplotlib.pyplot.close",
"numpy.array",
"matplotlib.pyplot.figure",
"mpl_toolkits.mplot3d.Axes3D"
] | [((416, 450), 'sqlite3.connect', 'sqlite3.connect', (['"""../navpoints.db"""'], {}), "('../navpoints.db')\n", (431, 450), False, 'import sqlite3\n'), ((1334, 1358), 'matplotlib.colors.colorConverter.to_rgb', 'colorConverter.to_rgb', (['x'], {}), '(x)\n', (1355, 1358), False, 'from matplotlib.colors import colorConverter\n'), ((1507, 1526), 'utils.get_level', 'get_level', (['levelstr'], {}), '(levelstr)\n', (1516, 1526), False, 'from utils import IndexedPoseVisitor, get_level\n'), ((1546, 1558), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1556, 1558), True, 'import matplotlib.pyplot as plt\n'), ((1577, 1593), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['self.fig'], {}), '(self.fig)\n', (1583, 1593), False, 'from mpl_toolkits.mplot3d import axes3d, Axes3D\n'), ((2532, 2549), 'numpy.array', 'np.array', (['segment'], {}), '(segment)\n', (2540, 2549), True, 'import numpy as np\n'), ((2924, 2977), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(self.level + '.by_navpoint.png')"], {'dpi': '(300)'}), "(self.level + '.by_navpoint.png', dpi=300)\n", (2935, 2977), True, 'import matplotlib.pyplot as plt\n'), ((3004, 3023), 'matplotlib.pyplot.close', 'plt.close', (['self.fig'], {}), '(self.fig)\n', (3013, 3023), True, 'import matplotlib.pyplot as plt\n'), ((1966, 1996), 'matplotlib.colors.colorConverter.to_rgb', 'colorConverter.to_rgb', (['"""white"""'], {}), "('white')\n", (1987, 1996), False, 'from matplotlib.colors import colorConverter\n')] |
import numpy as np
def check_data(ID=0):
mesh_dir = "./Meshes/"
data_dir = "./Data/"
soln_dir = "./Solutions/"
mesh_file = mesh_dir + 'mesh_' + str(ID) + '.npy'
mesh = np.load(mesh_file)
data_file = data_dir + 'data_' + str(ID) + '.npy'
data = np.load(data_file)
data_min = np.min(data)
data_max = np.max(data)
#print("Data min/max: {} / {}".format(data_min,data_max))
soln_file = soln_dir + 'solution_' + str(ID) + '.npy'
soln = np.load(soln_file)
SCALING = 10.0
soln = SCALING*soln
soln_min = np.min(soln)
soln_max = np.max(soln)
#print("Solution min/max: {} / {}".format(soln_min,soln_max))
in_domain = (mesh > 0)
domain_count = np.sum(in_domain)
#print("Domain count: {}".format(domain_count))
data_int = np.sum(data[in_domain])/domain_count
soln_int = np.sum(soln[in_domain])/domain_count
#soln_abs_int = np.sum(np.power(soln[in_domain],2))/domain_count
#print("Data Integral: {}".format(data_int))
#print("Solution Integral: {}".format(soln_int))
print("{:2} {:.4e} {:.4e} {:.4e} {:.4e} {:.4e} {:.4e}".format(ID,data_min,data_max,soln_min,soln_max,data_int,soln_int))
#print("{:2} {:.4e} {:.4e} {:.4e} {:.4e} {:.4e} {:.4e} {:.4e} ".format(ID,data_min,data_max,soln_min,soln_max,data_int,soln_int,soln_abs_int))
if __name__ == '__main__':
print("\nID Data min Data max Soln min Soln max Data int Soln int\n"+"-"*74)
for ID in range(0,15):
check_data(ID=ID)
| [
"numpy.max",
"numpy.load",
"numpy.sum",
"numpy.min"
] | [((190, 208), 'numpy.load', 'np.load', (['mesh_file'], {}), '(mesh_file)\n', (197, 208), True, 'import numpy as np\n'), ((275, 293), 'numpy.load', 'np.load', (['data_file'], {}), '(data_file)\n', (282, 293), True, 'import numpy as np\n'), ((309, 321), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (315, 321), True, 'import numpy as np\n'), ((337, 349), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (343, 349), True, 'import numpy as np\n'), ((483, 501), 'numpy.load', 'np.load', (['soln_file'], {}), '(soln_file)\n', (490, 501), True, 'import numpy as np\n'), ((560, 572), 'numpy.min', 'np.min', (['soln'], {}), '(soln)\n', (566, 572), True, 'import numpy as np\n'), ((588, 600), 'numpy.max', 'np.max', (['soln'], {}), '(soln)\n', (594, 600), True, 'import numpy as np\n'), ((716, 733), 'numpy.sum', 'np.sum', (['in_domain'], {}), '(in_domain)\n', (722, 733), True, 'import numpy as np\n'), ((801, 824), 'numpy.sum', 'np.sum', (['data[in_domain]'], {}), '(data[in_domain])\n', (807, 824), True, 'import numpy as np\n'), ((853, 876), 'numpy.sum', 'np.sum', (['soln[in_domain]'], {}), '(soln[in_domain])\n', (859, 876), True, 'import numpy as np\n')] |
import numpy as np
def least_conf(data,num_classes):
num_labels = float(num_classes)
least_conf_ranks = []
prob_dist = calculate_probs(data,num_labels)
simple_least_conf = np.nanmax(prob_dist) # most confident prediction, ignoring NaNs
normalized_least_conf = (1 - simple_least_conf) * (num_labels / (num_labels - 1))
least_conf_ranks.append(normalized_least_conf)
return np.array(least_conf_ranks)
def margin_conf(data,num_classes):
num_labels = float(num_classes)
margin_conf_ranks = []
prob_dist = calculate_probs(data, num_labels)
prob_dist[::-1].sort() # sort probs so that largest is at prob_dist[0]
difference = (prob_dist[0] - prob_dist[1])
margin_conf = 1 - difference
margin_conf_ranks.append(margin_conf)
return np.array(margin_conf_ranks)
def ratio_conf(data,num_classes):
num_labels = float(num_classes)
ratio_conf_ranks = []
prob_dist = calculate_probs(data, num_labels)
prob_dist[::-1].sort() # sort probs so that largest is at prob_dist[0]
ratio_conf = prob_dist[1] / prob_dist[0]
ratio_conf_ranks.append(ratio_conf)
return np.array(ratio_conf_ranks)
def entropy_conf(data,num_classes):
num_labels = float(num_classes)
entropy_conf_ranks = []
prob_dist = calculate_probs(data, num_labels)
log_probs = prob_dist * np.log2(prob_dist+0.00001) # multiply each probability by its base 2 log
raw_entropy = 0 - np.sum(log_probs)
normalized_entropy = raw_entropy / np.log2(prob_dist.size)
entropy_conf_ranks.append(normalized_entropy)
return np.array(entropy_conf_ranks)
def bald_conf(data,num_classes):
# num_labels = float(num_classes)
bald_conf_ranks = []
expected_entropy = - np.mean(np.sum(data * np.log(data + 1e-10), axis=-1), axis=0) # [batch size]
expected_p = data
entropy_expected_p = - np.sum(expected_p * np.log(expected_p + 1e-10), axis=-1) # [batch size]
BALD_acq = entropy_expected_p - expected_entropy
bald_conf_ranks.append(BALD_acq)
return np.array(bald_conf_ranks)
def calculate_probs(predicted_classes, num_classes):
'''
This function is to calculate the probabilities for each class given the softmax output
:param predicted_classes: matrix num_datapoints X num_ensembles (or dropout_iterations)
:param num_classes:
:return: For each datapoint it returns a vector with 10 elements, corresponding to the prob of each class
'''
probs = np.mean(predicted_classes,axis = 1)
return probs | [
"numpy.mean",
"numpy.log",
"numpy.sum",
"numpy.array",
"numpy.nanmax",
"numpy.log2"
] | [((189, 209), 'numpy.nanmax', 'np.nanmax', (['prob_dist'], {}), '(prob_dist)\n', (198, 209), True, 'import numpy as np\n'), ((402, 428), 'numpy.array', 'np.array', (['least_conf_ranks'], {}), '(least_conf_ranks)\n', (410, 428), True, 'import numpy as np\n'), ((787, 814), 'numpy.array', 'np.array', (['margin_conf_ranks'], {}), '(margin_conf_ranks)\n', (795, 814), True, 'import numpy as np\n'), ((1135, 1161), 'numpy.array', 'np.array', (['ratio_conf_ranks'], {}), '(ratio_conf_ranks)\n', (1143, 1161), True, 'import numpy as np\n'), ((1579, 1607), 'numpy.array', 'np.array', (['entropy_conf_ranks'], {}), '(entropy_conf_ranks)\n', (1587, 1607), True, 'import numpy as np\n'), ((2031, 2056), 'numpy.array', 'np.array', (['bald_conf_ranks'], {}), '(bald_conf_ranks)\n', (2039, 2056), True, 'import numpy as np\n'), ((2457, 2491), 'numpy.mean', 'np.mean', (['predicted_classes'], {'axis': '(1)'}), '(predicted_classes, axis=1)\n', (2464, 2491), True, 'import numpy as np\n'), ((1341, 1367), 'numpy.log2', 'np.log2', (['(prob_dist + 1e-05)'], {}), '(prob_dist + 1e-05)\n', (1348, 1367), True, 'import numpy as np\n'), ((1437, 1454), 'numpy.sum', 'np.sum', (['log_probs'], {}), '(log_probs)\n', (1443, 1454), True, 'import numpy as np\n'), ((1494, 1517), 'numpy.log2', 'np.log2', (['prob_dist.size'], {}), '(prob_dist.size)\n', (1501, 1517), True, 'import numpy as np\n'), ((1877, 1903), 'numpy.log', 'np.log', (['(expected_p + 1e-10)'], {}), '(expected_p + 1e-10)\n', (1883, 1903), True, 'import numpy as np\n'), ((1752, 1772), 'numpy.log', 'np.log', (['(data + 1e-10)'], {}), '(data + 1e-10)\n', (1758, 1772), True, 'import numpy as np\n')] |
import numpy as np
from Filters.CMNFFilter import CMNFFilter
from numba import jit
import time
class SimpleCMNFFilter():
"""
Conditionnaly minimax nonlinear filter
for a nonlinear stchastic dicret-time model:
x(t) = Phi(t-1, x(t-1), xHat(t-1)) + W(t) - state dynamics
y(t) = Psi(t, x(t)) + Nu(t) - observations
with t in [0, N]
W, N - Gaussian white noise with zero mean and covariances DW, DNu
Xi, Zeta - basic prediction and correction functions,
in general case can be chosen as follows:
Xi = Phi - by virtue of the system
Zeta = y - Psi - residual
if the structure functions Phi, Psi can not be defined in the
inline manner or require some history, an external object may be used: Phi = Phi(model, ...), Psi = Psi(model, ...)
"""
def __init__(self, Phi, Psi, DW, DNu, Xi, Zeta):
self.Phi = Phi # Nonlinear function of the state dynamics. Phi = Phi(model, t-1, x, xHat)
self.Psi = Psi # Nonlinear function of the observations. Phi = Psi(model, t, x, _) (last parameter is not used here, it is to pretend some other duck)
self.DW = DW # Covariation of the Gaussian noise in the state equation
self.sigmaW = np.sqrt(DW) # Standard deviation of the Gaussian noise in state equation
self.m_W = np.zeros_like(self.sigmaW)
self.DNu = DNu # Covariation of the Gaussian noise in the observations
self.sigmaNu = np.sqrt(DNu) # Standard deviation of the Gaussian noise in the observations
self.m_Nu = np.zeros_like(self.sigmaNu)
self.Xi = Xi # CMNF basic predictor function. Xi = Xi(model, t, x)
self.Zeta = Zeta # CMNF basic correction function. Zeta = Zeta(model, t, x, y)
self.tol = 1e-20 # tolerance for the matrix inverse calculation
def EstimateParameters(self, x, y, XHat0, silent = False):
"""
This function calculates the parameters of the CMNF with Monte-Carlo sampling: we generate a
bunch of sample paths and calculate the sampled covariances of the state, prediction and estimate
X0all - array of initial conditions for the sample paths
XHat0 - array of initial estimates
N - time limit
M - number of samples to generate
filename_template - file template to save the filter parameters
"""
M = x.shape[0]
N = x.shape[1]
zeta_test = self.Zeta(x[0, 0,:], y[0, 0, :])
self.FHat = np.zeros((N, x.shape[2], x.shape[2]))
self.fHat = np.zeros((N, x.shape[2]))
self.HHat = np.zeros((N, x.shape[2], zeta_test.shape[0]))
self.hHat = np.zeros((N, x.shape[2]))
self.KTilde = np.zeros((N, x.shape[2], x.shape[2]))
self.KHat = np.zeros((N, x.shape[2], x.shape[2]))
xHat = np.tile(XHat0, (M,1))
epsilon = 0.1 # regularization for the initial step (otherwise CovXiHat is zero)
xHat = xHat + epsilon * np.random.normal(size=xHat.shape)
start = time.time()
for t in range(0, N):
if t % 10 == 0:
end = time.time()
if not silent:
print(f"estimate params CMNF t={t}, elapsed {end - start}")
start = time.time()
xiHat = np.apply_along_axis(self.Xi, 1, xHat)
CovXiHat = np.cov(xiHat, rowvar=False)
InvCovXiHat = SimpleCMNFFilter.inverse(CovXiHat)
F = SimpleCMNFFilter.cov(x[:, t, :], xiHat) @ InvCovXiHat
f = x[:, t, :].mean(axis=0) - np.dot(F, xiHat.mean(axis=0))
kTilde = np.cov(x[:, t, :], rowvar=False) - np.dot(F, SimpleCMNFFilter.cov(x[:, t, :], xiHat).T)
xTilde = np.apply_along_axis(lambda x: F @ x + f, 1, xiHat)
zetaTilde = np.array(list(map(lambda i: self.Zeta(xTilde[i, :], y[i, t, :]), range(0, M))))
delta_x_xTilde = x[:, t, :] - xTilde
CovZetaTilde = np.cov(zetaTilde, rowvar=False)
InvCovZetaTilde = np.linalg.pinv(CovZetaTilde)
H = SimpleCMNFFilter.cov(delta_x_xTilde, zetaTilde) @ InvCovZetaTilde
h = np.dot(-H, zetaTilde.mean(axis=0))
delta_x = x[:, t, :] - xTilde
kHat = kTilde - np.dot(SimpleCMNFFilter.cov(delta_x, zetaTilde), H.T)
xHat = xTilde + zetaTilde @ H.T + h
self.FHat[t, :, :] = F
self.fHat[t, :] = f
self.HHat[t, :, :] = H
self.hHat[t, :] = h
self.KTilde[t, :, :] = kTilde
self.KHat[t, :, :] = kHat
def Filter(self, y, XHat0, silent = False):
M = y.shape[0]
N = y.shape[1]
xHat = np.zeros((M, N, XHat0.shape[0]))
#xHat[:, 0, :] = np.tile(XHat0, (M, 1))
start = time.time()
for t in range(0, N):
if t % 10 == 0:
end = time.time()
if not silent:
print(f"filter t={t}, elapsed {end - start}")
start = time.time()
if t==0:
xiHat = np.apply_along_axis(self.Xi, 1, np.tile(XHat0, (M, 1)))
else:
xiHat = np.apply_along_axis(self.Xi, 1, xHat[:, t-1, :])
xTilde = np.apply_along_axis(lambda x: self.FHat[t, :, :] @ x + self.fHat[t, :], 1, xiHat)
zetaTilde = np.array(list(map(lambda i: self.Zeta(xTilde[i, :], y[i, t, :]), range(0, M))))
xHat[:, t, :] = xTilde + zetaTilde @ self.HHat[t, :, :].T + self.hHat[t, :]
return(xHat)
def SaveParameters(self, filename_template):
"""
Saves the CMNF parameters calculated by EstimateParameters(...) in files
"""
np.save(filename_template.replace('[param]', 'FMultHat'), self.FHat)
np.save(filename_template.replace('[param]', 'FAddHat'), self.fHat)
np.save(filename_template.replace('[param]', 'HMultHat'), self.HHat)
np.save(filename_template.replace('[param]', 'HAddHat'), self.hHat)
np.save(filename_template.replace('[param]', 'KTilde'), self.KTilde)
np.save(filename_template.replace('[param]', 'KHat'), self.KHat)
def LoadParameters(self, filename_template):
"""
Loads the pre-estimated CMNF parameters
"""
self.FHat = np.load(filename_template.replace('[param]', 'FMultHat'))
self.fHat = np.load(filename_template.replace('[param]', 'FAddHat'))
self.HHat = np.load(filename_template.replace('[param]', 'HMultHat'))
self.hHat = np.load(filename_template.replace('[param]', 'HAddHat'))
self.KTilde = np.load(filename_template.replace('[param]', 'KTilde'))
self.KHat = np.load(filename_template.replace('[param]', 'KHat'))
def Step(self, k, y, xHat_):
"""
One step estimate xHat(t) = Step(model, t, y(t), xHat(t-1))
kHat is for compatibility, not required here
"""
if (k == len(self.FHat)):
# OMG!! Here comes a dirty trick to make the CMNF time scale in line with Kalman filter timescale.
# Otherwise we need to calculate CMNF params on one additional step.
# Note that this affects the quality of the estimate on the final step!!!
k -= 1
xTilde = np.dot(self.FHat[k], self.Xi(xHat_)) + self.fHat[k]
xCorr = np.dot(self.HHat[k], self.Zeta(xTilde, y)) + self.hHat[k]
xHat = xTilde + xCorr
return xHat, self.KHat[k], xTilde, xCorr
# sampled covariation of two sequences
@staticmethod
def cov(X, Y):
n = X.shape[0]
cX = X - np.mean(X, axis=0)
cY = Y - np.mean(Y, axis=0)
return np.dot(cX.T, cY) / (n - 1.)
# inverse with svd decomposition
@staticmethod
def inverse(A):
tol = 1e-2
u, s, vh = np.linalg.svd(A)
nonzero = np.abs(s) > tol
inv_s = 1.0 / (s + np.invert(nonzero)) * (nonzero)
return u @ np.diag(inv_s) @ vh
@staticmethod
def inverseSVD(A):
u, s, vh = np.linalg.svd(A)
# zero = s == 0
inv_s = 1.0 / s # (s + zero) * np.invert(zero)
return u, np.diag(inv_s), vh
| [
"numpy.random.normal",
"numpy.tile",
"numpy.mean",
"numpy.abs",
"numpy.sqrt",
"numpy.linalg.pinv",
"numpy.diag",
"numpy.invert",
"numpy.zeros",
"numpy.apply_along_axis",
"numpy.dot",
"time.time",
"numpy.linalg.svd",
"numpy.cov",
"numpy.zeros_like"
] | [((1289, 1300), 'numpy.sqrt', 'np.sqrt', (['DW'], {}), '(DW)\n', (1296, 1300), True, 'import numpy as np\n'), ((1382, 1408), 'numpy.zeros_like', 'np.zeros_like', (['self.sigmaW'], {}), '(self.sigmaW)\n', (1395, 1408), True, 'import numpy as np\n'), ((1512, 1524), 'numpy.sqrt', 'np.sqrt', (['DNu'], {}), '(DNu)\n', (1519, 1524), True, 'import numpy as np\n'), ((1609, 1636), 'numpy.zeros_like', 'np.zeros_like', (['self.sigmaNu'], {}), '(self.sigmaNu)\n', (1622, 1636), True, 'import numpy as np\n'), ((2540, 2577), 'numpy.zeros', 'np.zeros', (['(N, x.shape[2], x.shape[2])'], {}), '((N, x.shape[2], x.shape[2]))\n', (2548, 2577), True, 'import numpy as np\n'), ((2598, 2623), 'numpy.zeros', 'np.zeros', (['(N, x.shape[2])'], {}), '((N, x.shape[2]))\n', (2606, 2623), True, 'import numpy as np\n'), ((2644, 2689), 'numpy.zeros', 'np.zeros', (['(N, x.shape[2], zeta_test.shape[0])'], {}), '((N, x.shape[2], zeta_test.shape[0]))\n', (2652, 2689), True, 'import numpy as np\n'), ((2710, 2735), 'numpy.zeros', 'np.zeros', (['(N, x.shape[2])'], {}), '((N, x.shape[2]))\n', (2718, 2735), True, 'import numpy as np\n'), ((2758, 2795), 'numpy.zeros', 'np.zeros', (['(N, x.shape[2], x.shape[2])'], {}), '((N, x.shape[2], x.shape[2]))\n', (2766, 2795), True, 'import numpy as np\n'), ((2816, 2853), 'numpy.zeros', 'np.zeros', (['(N, x.shape[2], x.shape[2])'], {}), '((N, x.shape[2], x.shape[2]))\n', (2824, 2853), True, 'import numpy as np\n'), ((2870, 2892), 'numpy.tile', 'np.tile', (['XHat0', '(M, 1)'], {}), '(XHat0, (M, 1))\n', (2877, 2892), True, 'import numpy as np\n'), ((3064, 3075), 'time.time', 'time.time', ([], {}), '()\n', (3073, 3075), False, 'import time\n'), ((4716, 4748), 'numpy.zeros', 'np.zeros', (['(M, N, XHat0.shape[0])'], {}), '((M, N, XHat0.shape[0]))\n', (4724, 4748), True, 'import numpy as np\n'), ((4814, 4825), 'time.time', 'time.time', ([], {}), '()\n', (4823, 4825), False, 'import time\n'), ((7821, 7837), 'numpy.linalg.svd', 'np.linalg.svd', (['A'], {}), '(A)\n', (7834, 7837), True, 'import numpy as np\n'), ((8031, 8047), 'numpy.linalg.svd', 'np.linalg.svd', (['A'], {}), '(A)\n', (8044, 8047), True, 'import numpy as np\n'), ((3336, 3373), 'numpy.apply_along_axis', 'np.apply_along_axis', (['self.Xi', '(1)', 'xHat'], {}), '(self.Xi, 1, xHat)\n', (3355, 3373), True, 'import numpy as np\n'), ((3397, 3424), 'numpy.cov', 'np.cov', (['xiHat'], {'rowvar': '(False)'}), '(xiHat, rowvar=False)\n', (3403, 3424), True, 'import numpy as np\n'), ((3760, 3810), 'numpy.apply_along_axis', 'np.apply_along_axis', (['(lambda x: F @ x + f)', '(1)', 'xiHat'], {}), '(lambda x: F @ x + f, 1, xiHat)\n', (3779, 3810), True, 'import numpy as np\n'), ((3992, 4023), 'numpy.cov', 'np.cov', (['zetaTilde'], {'rowvar': '(False)'}), '(zetaTilde, rowvar=False)\n', (3998, 4023), True, 'import numpy as np\n'), ((4055, 4083), 'numpy.linalg.pinv', 'np.linalg.pinv', (['CovZetaTilde'], {}), '(CovZetaTilde)\n', (4069, 4083), True, 'import numpy as np\n'), ((5264, 5349), 'numpy.apply_along_axis', 'np.apply_along_axis', (['(lambda x: self.FHat[t, :, :] @ x + self.fHat[t, :])', '(1)', 'xiHat'], {}), '(lambda x: self.FHat[t, :, :] @ x + self.fHat[t, :], 1,\n xiHat)\n', (5283, 5349), True, 'import numpy as np\n'), ((7609, 7627), 'numpy.mean', 'np.mean', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (7616, 7627), True, 'import numpy as np\n'), ((7645, 7663), 'numpy.mean', 'np.mean', (['Y'], {'axis': '(0)'}), '(Y, axis=0)\n', (7652, 7663), True, 'import numpy as np\n'), ((7679, 7695), 'numpy.dot', 'np.dot', (['cX.T', 'cY'], {}), '(cX.T, cY)\n', (7685, 7695), True, 'import numpy as np\n'), ((7856, 7865), 'numpy.abs', 'np.abs', (['s'], {}), '(s)\n', (7862, 7865), True, 'import numpy as np\n'), ((8146, 8160), 'numpy.diag', 'np.diag', (['inv_s'], {}), '(inv_s)\n', (8153, 8160), True, 'import numpy as np\n'), ((3014, 3047), 'numpy.random.normal', 'np.random.normal', ([], {'size': 'xHat.shape'}), '(size=xHat.shape)\n', (3030, 3047), True, 'import numpy as np\n'), ((3156, 3167), 'time.time', 'time.time', ([], {}), '()\n', (3165, 3167), False, 'import time\n'), ((3303, 3314), 'time.time', 'time.time', ([], {}), '()\n', (3312, 3314), False, 'import time\n'), ((3650, 3682), 'numpy.cov', 'np.cov', (['x[:, t, :]'], {'rowvar': '(False)'}), '(x[:, t, :], rowvar=False)\n', (3656, 3682), True, 'import numpy as np\n'), ((4906, 4917), 'time.time', 'time.time', ([], {}), '()\n', (4915, 4917), False, 'import time\n'), ((5039, 5050), 'time.time', 'time.time', ([], {}), '()\n', (5048, 5050), False, 'import time\n'), ((5194, 5244), 'numpy.apply_along_axis', 'np.apply_along_axis', (['self.Xi', '(1)', 'xHat[:, t - 1, :]'], {}), '(self.Xi, 1, xHat[:, t - 1, :])\n', (5213, 5244), True, 'import numpy as np\n'), ((7950, 7964), 'numpy.diag', 'np.diag', (['inv_s'], {}), '(inv_s)\n', (7957, 7964), True, 'import numpy as np\n'), ((5128, 5150), 'numpy.tile', 'np.tile', (['XHat0', '(M, 1)'], {}), '(XHat0, (M, 1))\n', (5135, 5150), True, 'import numpy as np\n'), ((7899, 7917), 'numpy.invert', 'np.invert', (['nonzero'], {}), '(nonzero)\n', (7908, 7917), True, 'import numpy as np\n')] |
import keras
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model, load_model
from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D
import numpy as np
num_emotions = 7
batch_size = 256
steps_per_epoch = 112
epochs = 11
emotions = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral']
def split_data():
# Import fer2013.csv
with open("fer2013.csv") as file:
data = file.readlines()
lines = np.array(data)
x_train, y_train, x_test, y_test = [], [], [], []
# Split dataset into training and test sets
for i in range(1,lines.size):
emotion, img, usage = lines[i].split(",")
val = img.split(" ")
pixels = np.array(val, 'float32')
emotion = keras.utils.np_utils.to_categorical(emotion, num_emotions)
if 'Training' in usage:
y_train.append(emotion)
x_train.append(pixels)
elif 'PublicTest' in usage:
y_test.append(emotion)
x_test.append(pixels)
# Cast and normalize data
x_train, y_train, x_test, y_test = np.array(x_train), np.array(y_train), np.array(x_test), np.array(y_test)
x_train, x_test = np.true_divide(x_train, 255.0), np.true_divide(x_test, 255.0)
# Make sure data is in the right shape
x_train, x_test = x_train.reshape( (len(x_train),48,48,1) ), x_test.reshape( (len(x_test),48,48,1) )
print("x_train, y_train, x_test, y_test: ",x_train.shape, y_train.shape, x_test.shape, y_test.shape)
return x_train, y_train, x_test, y_test
def create_model():
inputs = Input(shape=(48, 48, 1, ))
conv = Conv2D(filters=32, kernel_size=(3,3), activation='relu')(inputs)
conv = Conv2D(filters=64, kernel_size=(3,3), activation='relu')(conv)
pool = MaxPooling2D(pool_size=(2,2))(conv)
dropout = Dropout(0.4)(pool)
conv = Conv2D(filters=128, kernel_size=(3,3), activation='relu')(dropout)
pool = MaxPooling2D(pool_size=(2,2))(conv)
conv = Conv2D(filters=128, kernel_size=(3,3), activation='relu')(pool)
pool = MaxPooling2D(pool_size=(2,2))(conv)
dropout = Dropout(0.4)(pool)
flatten = Flatten()(dropout)
dense = Dense(1024, activation='relu')(flatten)
dropout = Dropout(0.5)(dense)
pred = Dense(7, activation='softmax')(dropout)
return Model(inputs=inputs, outputs=pred)
def cnn():
x_train, y_train, x_test, y_test = split_data()
model = create_model()
# Use ImageDataGenerator for better generalizability
datagen = ImageDataGenerator()
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
# Train model, save for quick reload later
model.fit(datagen.flow(x_train, y_train, batch_size=batch_size), epochs=epochs, steps_per_epoch=steps_per_epoch,
validation_data=datagen.flow(x_test, y_test, batch_size=batch_size))
model.save('../models/face-emotion.h5')
def test_cnn():
model = load_model('../models/face-emotion.h5')
x_train, y_train, x_test, y_test = split_data()
print("evaluating facial emotion recognition model")
model.evaluate(x_test, y_test)
cnn()
test_cnn() | [
"keras.layers.Conv2D",
"keras.models.load_model",
"keras.layers.Flatten",
"keras.layers.MaxPooling2D",
"keras.preprocessing.image.ImageDataGenerator",
"numpy.array",
"keras.layers.Input",
"keras.utils.np_utils.to_categorical",
"keras.models.Model",
"numpy.true_divide",
"keras.layers.Dense",
"k... | [((514, 528), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (522, 528), True, 'import numpy as np\n'), ((1592, 1616), 'keras.layers.Input', 'Input', ([], {'shape': '(48, 48, 1)'}), '(shape=(48, 48, 1))\n', (1597, 1616), False, 'from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((2285, 2319), 'keras.models.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': 'pred'}), '(inputs=inputs, outputs=pred)\n', (2290, 2319), False, 'from keras.models import Model, load_model\n'), ((2476, 2496), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {}), '()\n', (2494, 2496), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((2912, 2951), 'keras.models.load_model', 'load_model', (['"""../models/face-emotion.h5"""'], {}), "('../models/face-emotion.h5')\n", (2922, 2951), False, 'from keras.models import Model, load_model\n'), ((750, 774), 'numpy.array', 'np.array', (['val', '"""float32"""'], {}), "(val, 'float32')\n", (758, 774), True, 'import numpy as np\n'), ((791, 849), 'keras.utils.np_utils.to_categorical', 'keras.utils.np_utils.to_categorical', (['emotion', 'num_emotions'], {}), '(emotion, num_emotions)\n', (826, 849), False, 'import keras\n'), ((1113, 1130), 'numpy.array', 'np.array', (['x_train'], {}), '(x_train)\n', (1121, 1130), True, 'import numpy as np\n'), ((1132, 1149), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (1140, 1149), True, 'import numpy as np\n'), ((1151, 1167), 'numpy.array', 'np.array', (['x_test'], {}), '(x_test)\n', (1159, 1167), True, 'import numpy as np\n'), ((1169, 1185), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (1177, 1185), True, 'import numpy as np\n'), ((1206, 1236), 'numpy.true_divide', 'np.true_divide', (['x_train', '(255.0)'], {}), '(x_train, 255.0)\n', (1220, 1236), True, 'import numpy as np\n'), ((1238, 1267), 'numpy.true_divide', 'np.true_divide', (['x_test', '(255.0)'], {}), '(x_test, 255.0)\n', (1252, 1267), True, 'import numpy as np\n'), ((1629, 1686), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3, 3)', 'activation': '"""relu"""'}), "(filters=32, kernel_size=(3, 3), activation='relu')\n", (1635, 1686), False, 'from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((1703, 1760), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(64)', 'kernel_size': '(3, 3)', 'activation': '"""relu"""'}), "(filters=64, kernel_size=(3, 3), activation='relu')\n", (1709, 1760), False, 'from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((1775, 1805), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (1787, 1805), False, 'from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((1823, 1835), 'keras.layers.Dropout', 'Dropout', (['(0.4)'], {}), '(0.4)\n', (1830, 1835), False, 'from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((1852, 1910), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(128)', 'kernel_size': '(3, 3)', 'activation': '"""relu"""'}), "(filters=128, kernel_size=(3, 3), activation='relu')\n", (1858, 1910), False, 'from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((1928, 1958), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (1940, 1958), False, 'from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((1973, 2031), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(128)', 'kernel_size': '(3, 3)', 'activation': '"""relu"""'}), "(filters=128, kernel_size=(3, 3), activation='relu')\n", (1979, 2031), False, 'from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((2046, 2076), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (2058, 2076), False, 'from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((2094, 2106), 'keras.layers.Dropout', 'Dropout', (['(0.4)'], {}), '(0.4)\n', (2101, 2106), False, 'from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((2126, 2135), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (2133, 2135), False, 'from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((2155, 2185), 'keras.layers.Dense', 'Dense', (['(1024)'], {'activation': '"""relu"""'}), "(1024, activation='relu')\n", (2160, 2185), False, 'from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((2207, 2219), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (2214, 2219), False, 'from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n'), ((2236, 2266), 'keras.layers.Dense', 'Dense', (['(7)'], {'activation': '"""softmax"""'}), "(7, activation='softmax')\n", (2241, 2266), False, 'from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D\n')] |
import numpy as np
from pySDC.helpers.stats_helper import filter_stats, sort_stats
from pySDC.implementations.collocation_classes.gauss_radau_right import CollGaussRadau_Right
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.problem_classes.AdvectionEquation_1D_FD import advection1d
from pySDC.implementations.problem_classes.HeatEquation_1D_FD import heat1d
from pySDC.implementations.problem_classes.TestEquation_0D import testequation0d
from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit
from pySDC.implementations.transfer_classes.TransferMesh import mesh_to_mesh
from pySDC.implementations.transfer_classes.TransferMesh_NoCoarse import mesh_to_mesh as mesh_to_mesh_nocoarse
from pySDC.projects.matrixPFASST.controller_matrix_nonMPI import controller_matrix_nonMPI
def diffusion_setup(par=0.0):
"""
Setup routine for advection test
Args:
par (float): parameter for controlling stiffness
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 1E-08
level_params['dt'] = 0.25
level_params['nsweeps'] = [3, 1]
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['collocation_class'] = CollGaussRadau_Right
sweeper_params['num_nodes'] = 3
sweeper_params['QI'] = 'LU' # For the IMEX sweeper, the LU-trick can be activated for the implicit part
sweeper_params['initial_guess'] = 'spread'
# initialize problem parameters
problem_params = dict()
problem_params['nu'] = par # diffusion coefficient
problem_params['freq'] = 4 # frequency for the test value
problem_params['nvars'] = [127, 63] # number of degrees of freedom for each level
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize space transfer parameters
space_transfer_params = dict()
space_transfer_params['rorder'] = 2
space_transfer_params['iorder'] = 2
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
controller_params['all_to_done'] = True
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = heat1d # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = generic_implicit # pass sweeper
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class
description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer
return description, controller_params
def advection_setup(par=0.0):
"""
Setup routine for advection test
Args:
par (float): parameter for controlling stiffness
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 1E-08
level_params['dt'] = 0.25
level_params['nsweeps'] = [3, 1]
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['collocation_class'] = CollGaussRadau_Right
sweeper_params['num_nodes'] = [3]
sweeper_params['QI'] = ['LU'] # For the IMEX sweeper, the LU-trick can be activated for the implicit part
sweeper_params['initial_guess'] = 'spread'
# initialize problem parameters
problem_params = dict()
problem_params['c'] = par
problem_params['freq'] = 4 # frequency for the test value
problem_params['nvars'] = [128, 64] # number of degrees of freedom for each level
problem_params['order'] = 2
problem_params['type'] = 'center'
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize space transfer parameters
space_transfer_params = dict()
space_transfer_params['rorder'] = 2
space_transfer_params['iorder'] = 2
space_transfer_params['periodic'] = True
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
controller_params['all_to_done'] = True
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = advection1d # pass problem class
description['problem_params'] = problem_params
description['sweeper_class'] = generic_implicit # pass sweeper (see part B)
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class
description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer
return description, controller_params
def testequation_setup():
"""
Setup routine for the test equation
Args:
par (float): parameter for controlling stiffness
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 1E-08
level_params['dt'] = 0.25
level_params['nsweeps'] = [3, 1]
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['collocation_class'] = CollGaussRadau_Right
sweeper_params['num_nodes'] = [3, 2]
sweeper_params['QI'] = 'LU'
sweeper_params['initial_guess'] = 'spread'
# initialize problem parameters
problem_params = dict()
problem_params['u0'] = 1.0 # initial value (for all instances)
# use single values like this...
# problem_params['lambdas'] = [[-1.0]]
# .. or a list of values like this ...
# problem_params['lambdas'] = [[-1.0, -2.0, 1j, -1j]]
# .. or a whole block of values like this
ilim_left = -11
ilim_right = 0
rlim_left = 0
rlim_right = 11
ilam = 1j * np.logspace(ilim_left, ilim_right, 11)
rlam = -1 * np.logspace(rlim_left, rlim_right, 11)
lambdas = []
for rl in rlam:
for il in ilam:
lambdas.append(rl + il)
problem_params['lambdas'] = [lambdas]
# note: PFASST will do all of those at once, but without interaction (realized via diagonal matrix).
# The propagation matrix will be diagonal too, corresponding to the respective lambda value.
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
controller_params['all_to_done'] = True
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = testequation0d # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = generic_implicit # pass sweeper
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
description['space_transfer_class'] = mesh_to_mesh_nocoarse # pass spatial transfer class
description['space_transfer_params'] = dict() # pass paramters for spatial transfer
return description, controller_params
def compare_controllers(type=None, par=0.0, f=None):
"""
A simple test program to compare PFASST runs with matrix-based and matrix-free controllers
Args:
type (str): setup type
par (float) parameter for controlling stiffness
f: file handler
"""
# set time parameters
t0 = 0.0
Tend = 1.0
if type == 'diffusion':
description, controller_params = diffusion_setup(par)
elif type == 'advection':
description, controller_params = advection_setup(par)
elif type == 'testequation':
description, controller_params = testequation_setup()
else:
raise ValueError('No valis setup type provided, aborting..')
out = '\nWorking with %s setup and parameter %3.1e..' % (type, par)
f.write(out + '\n')
print(out)
# instantiate controller
controller_mat = controller_matrix_nonMPI(num_procs=4, controller_params=controller_params, description=description)
controller_nomat = controller_nonMPI(num_procs=4, controller_params=controller_params, description=description)
# get initial values on finest level
P = controller_nomat.MS[0].levels[0].prob
uinit = P.u_exact(t0)
uex = P.u_exact(Tend)
# this is where the iteration is happening
uend_mat, stats_mat = controller_mat.run(u0=uinit, t0=t0, Tend=Tend)
uend_nomat, stats_nomat = controller_nomat.run(u0=uinit, t0=t0, Tend=Tend)
diff = abs(uend_mat - uend_nomat)
err_mat = abs(uend_mat - uex)
err_nomat = abs(uend_nomat - uex)
out = ' Error (mat/nomat) vs. exact solution: %6.4e -- %6.4e' % (err_mat, err_nomat)
f.write(out + '\n')
print(out)
out = ' Difference between both results: %6.4e' % diff
f.write(out + '\n')
print(out)
assert diff < 2.3E-15, 'ERROR: difference between matrix-based and matrix-free result is too large, got %s' % diff
# filter statistics by type (number of iterations)
filtered_stats_mat = filter_stats(stats_mat, type='niter')
filtered_stats_nomat = filter_stats(stats_nomat, type='niter')
# convert filtered statistics to list of iterations count, sorted by process
iter_counts_mat = sort_stats(filtered_stats_mat, sortby='time')
iter_counts_nomat = sort_stats(filtered_stats_nomat, sortby='time')
out = ' Iteration counts for matrix-based version: %s' % iter_counts_mat
f.write(out + '\n')
print(out)
out = ' Iteration counts for matrix-free version: %s' % iter_counts_nomat
f.write(out + '\n')
print(out)
assert iter_counts_nomat == iter_counts_mat, \
'ERROR: number of iterations differ between matrix-based and matrix-free controller'
def main():
par_list = [1E-02, 1.0, 1E+02]
f = open('comparison_matrix_vs_nomat_detail.txt', 'w')
for par in par_list:
compare_controllers(type='diffusion', par=par, f=f)
compare_controllers(type='advection', par=par, f=f)
compare_controllers(type='testequation', par=0.0, f=f)
f.close()
if __name__ == "__main__":
main()
| [
"pySDC.helpers.stats_helper.filter_stats",
"pySDC.projects.matrixPFASST.controller_matrix_nonMPI.controller_matrix_nonMPI",
"pySDC.helpers.stats_helper.sort_stats",
"numpy.logspace",
"pySDC.implementations.controller_classes.controller_nonMPI.controller_nonMPI"
] | [((8397, 8500), 'pySDC.projects.matrixPFASST.controller_matrix_nonMPI.controller_matrix_nonMPI', 'controller_matrix_nonMPI', ([], {'num_procs': '(4)', 'controller_params': 'controller_params', 'description': 'description'}), '(num_procs=4, controller_params=controller_params,\n description=description)\n', (8421, 8500), False, 'from pySDC.projects.matrixPFASST.controller_matrix_nonMPI import controller_matrix_nonMPI\n'), ((8521, 8617), 'pySDC.implementations.controller_classes.controller_nonMPI.controller_nonMPI', 'controller_nonMPI', ([], {'num_procs': '(4)', 'controller_params': 'controller_params', 'description': 'description'}), '(num_procs=4, controller_params=controller_params,\n description=description)\n', (8538, 8617), False, 'from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI\n'), ((9496, 9533), 'pySDC.helpers.stats_helper.filter_stats', 'filter_stats', (['stats_mat'], {'type': '"""niter"""'}), "(stats_mat, type='niter')\n", (9508, 9533), False, 'from pySDC.helpers.stats_helper import filter_stats, sort_stats\n'), ((9561, 9600), 'pySDC.helpers.stats_helper.filter_stats', 'filter_stats', (['stats_nomat'], {'type': '"""niter"""'}), "(stats_nomat, type='niter')\n", (9573, 9600), False, 'from pySDC.helpers.stats_helper import filter_stats, sort_stats\n'), ((9705, 9750), 'pySDC.helpers.stats_helper.sort_stats', 'sort_stats', (['filtered_stats_mat'], {'sortby': '"""time"""'}), "(filtered_stats_mat, sortby='time')\n", (9715, 9750), False, 'from pySDC.helpers.stats_helper import filter_stats, sort_stats\n'), ((9775, 9822), 'pySDC.helpers.stats_helper.sort_stats', 'sort_stats', (['filtered_stats_nomat'], {'sortby': '"""time"""'}), "(filtered_stats_nomat, sortby='time')\n", (9785, 9822), False, 'from pySDC.helpers.stats_helper import filter_stats, sort_stats\n'), ((6098, 6136), 'numpy.logspace', 'np.logspace', (['ilim_left', 'ilim_right', '(11)'], {}), '(ilim_left, ilim_right, 11)\n', (6109, 6136), True, 'import numpy as np\n'), ((6153, 6191), 'numpy.logspace', 'np.logspace', (['rlim_left', 'rlim_right', '(11)'], {}), '(rlim_left, rlim_right, 11)\n', (6164, 6191), True, 'import numpy as np\n')] |
# === Start Python 2/3 compatibility
from __future__ import absolute_import, division, print_function, unicode_literals
from future.builtins import * # noqa pylint: disable=W0401, W0614
from future.builtins.disabled import * # noqa pylint: disable=W0401, W0614
# === End Python 2/3 compatibility
import pytest
import numpy as np
import h5py
import glob
import os
import msgpack
import base64
from flask import Flask, jsonify, request as flask_req
from pytest_localserver.http import WSGIServer
from kotekan import runner
from test_compression import float_allclose
# Skip if HDF5 support not built into kotekan
if not runner.has_hdf5():
pytest.skip("HDF5 support not available.", allow_module_level=True)
NULL_DSET_ID = b"00000000000000000000000000000000"
freq = [3, 50, 777, 554]
# frequency to set lost samples
frac_freq = -2
frac_lost = 0.8
frac_rfi = 0.3
# frequency to set empty frames
empty_freq = 3
# gains parameters
start_time = 1_500_000_000
old_timestamp = start_time - 10.0
new_timestamp = start_time + 5.0
old_update_id = f"gains{old_timestamp}"
new_update_id = f"gains{new_timestamp}"
transition_interval = 10.0
new_state = True
writer_params = {
"num_elements": 4,
"num_ev": 2,
"cadence": 1.0,
"total_frames": 10,
"freq": freq,
"chunk_size": [2, 6, 5],
"mode": "test_pattern_simple",
"test_pattern_value": [0, 0],
"file_type": "hdf5fast",
"dataset_manager": {"use_dataset_broker": False},
"updatable_config": "/gains",
"use_local_dataset_man": True,
"gains": {
"kotekan_update_endpoint": "json",
"start_time": old_timestamp,
"update_id": old_update_id,
"transition_interval": transition_interval,
"new_state": new_state,
},
}
stack_params = {
"num_elements": 2048,
"num_ev": 2,
"cadence": 5.0,
"file_length": 3,
"freq": freq,
"chunk_size": [2, 64, 3],
"dataset_manager": {"use_dataset_broker": False},
}
@pytest.fixture(scope="module")
def new_gains():
nfreq = len(freq)
nelm = writer_params["num_elements"]
gain = np.ones((nfreq, nelm), dtype=np.complex64)
weight = gain.astype(np.bool8)
return gain, weight
@pytest.fixture(scope="module")
def old_gains():
nfreq = len(freq)
nelm = writer_params["num_elements"]
gain = np.ones((nfreq, nelm), dtype=np.complex64)
weight = gain.astype(np.bool8)
return gain, weight
@pytest.fixture(scope="module")
def transpose(tmpdir_factory, cal_broker):
writer_params["file_length"] = writer_params["total_frames"]
# Write fake data in raw format
tmpdir = str(tmpdir_factory.mktemp("writer"))
fakevis_buffer = runner.FakeVisBuffer(
freq_ids=writer_params["freq"],
num_frames=writer_params["total_frames"],
cadence=writer_params["cadence"],
mode=writer_params["mode"],
test_pattern_value=writer_params["test_pattern_value"],
wait=True, # make sure cal_broker has time to update
)
# Add an applyGain stage to alter the dataset ID
gain_buf_name = "gains_applied"
fakevis_buffer.buffer_block.update(
{
gain_buf_name: {
"kotekan_buffer": "vis",
"metadata_pool": "vis_pool",
"num_frames": "buffer_depth",
},
}
)
host, port = cal_broker.server_address
fakevis_buffer.stage_block.update(
{
"apply_gains": {
"kotekan_stage": "applyGains",
"in_buf": fakevis_buffer.name,
"out_buf": gain_buf_name,
"log_level": "debug",
"broker_host": host,
"broker_port": port,
},
}
)
# Remove samples from two frequency to test handling of empty frames
fsel_frac_name = "frac_freqsel"
fsel_empty_name = "empty_freqsel"
fsel_buf = {
fsel_frac_name: {
"kotekan_buffer": "vis",
"metadata_pool": "vis_pool",
"num_frames": "buffer_depth",
},
fsel_empty_name: {
"kotekan_buffer": "vis",
"metadata_pool": "vis_pool",
"num_frames": "buffer_depth",
},
}
fakevis_buffer.buffer_block.update(fsel_buf)
fakevis_buffer.stage_block.update(
{
"frac_fsel": {
"kotekan_stage": "visDrop",
"in_buf": gain_buf_name,
"out_buf": fsel_frac_name,
"freq": [writer_params["freq"][frac_freq]],
"frac_lost": frac_lost,
"frac_rfi": frac_rfi,
"log_level": "debug",
},
"empty_fsel": {
"kotekan_stage": "visDrop",
"in_buf": fsel_frac_name,
"out_buf": fsel_empty_name,
"freq": [writer_params["freq"][empty_freq]],
"log_level": "debug",
},
}
)
# update the output buffer name
fakevis_buffer.name = fsel_empty_name
# REST commands for gains update
cmds = [
["wait", 2.0, {}],
[
"post",
"gains",
{
"update_id": new_update_id,
"start_time": new_timestamp,
"transition_interval": transition_interval,
"new_state": new_state,
},
],
]
# Write out the test data in raw and HDF5 simultaneously
tmpdir_h5 = str(tmpdir_factory.mktemp("dump_h5"))
dumph5_conf = writer_params.copy()
dumph5_conf["root_path"] = str(tmpdir_h5)
dumph5_conf["file_name"] = "dumph5"
dumph5_conf["node_mode"] = False
params = writer_params.copy()
params["root_path"] = tmpdir
writer = runner.KotekanStageTester(
"VisWriter",
{"node_mode": False, "write_ev": True, "file_type": "raw"},
fakevis_buffer,
None,
params,
parallel_stage_type="VisWriter",
parallel_stage_config=dumph5_conf,
noise="random",
rest_commands=cmds,
)
writer.run()
# get raw infile
files = sorted(glob.glob(tmpdir + "/20??????T??????Z_*_corr/*.meta"))
assert len(files) == 1
infile = os.path.splitext(files[0])[0]
# get hdf5 infile
files = sorted(glob.glob(tmpdir_h5 + "/20??????T??????Z_*_corr/*.h5"))
assert len(files) == 1
infile_h5 = os.path.splitext(files[0])[0]
# Tranpose and write data
raw_buf = runner.ReadRawBuffer(infile, writer_params["chunk_size"])
outfile = tmpdir + "/transposed"
transposer = runner.KotekanStageTester(
"VisTranspose",
{
"outfile": outfile,
"chunk_size": writer_params["chunk_size"],
"comet_timeout": 120.0,
},
raw_buf,
None,
params,
)
transposer.run()
fh = h5py.File(infile_h5 + ".h5", "r")
fh_t = h5py.File(outfile + ".h5", "r")
yield (fh_t, fh)
fh.close()
fh_t.close()
def test_transpose(transpose):
# The transposed and untransposed files
f_tr = transpose[0]
f = transpose[1]
# some useful params
n_t = writer_params["total_frames"]
n_f = len(writer_params["freq"])
n_elems = writer_params["num_elements"]
n_prod = n_elems * (n_elems + 1) // 2
# check if shapes are correct
assert f_tr["index_map/time"].shape[0] == n_t
assert f_tr["index_map/freq"].shape[0] == n_f
assert f_tr["index_map/prod"].shape[0] == n_prod
assert f_tr["index_map/input"].shape[0] == n_elems
assert f_tr["vis"].shape == (n_f, n_prod, n_t)
assert f_tr["flags/vis_weight"].shape == (n_f, n_prod, n_t)
assert f_tr["eval"].shape == (n_f, writer_params["num_ev"], n_t)
assert f_tr["evec"].shape == (
n_f,
writer_params["num_ev"],
writer_params["num_elements"],
n_t,
)
assert f_tr["erms"].shape == (n_f, n_t)
assert f_tr["gain"].shape == (n_f, n_elems, n_t)
assert f_tr["flags/inputs"].shape == (n_elems, n_t)
assert f_tr["flags/frac_lost"].shape == (n_f, n_t)
assert f_tr["flags/frac_rfi"].shape == (n_f, n_t)
assert f_tr["flags/dataset_id"].shape == (n_f, n_t)
assert (f_tr["flags/frac_lost"][:frac_freq, :] == 0.0).all()
assert np.allclose(f_tr["flags/frac_lost"][frac_freq, :], frac_lost, rtol=1e-3)
assert np.allclose(f_tr["flags/frac_rfi"][frac_freq, :], frac_rfi, rtol=1e-3)
assert (f_tr["flags/dataset_id"][empty_freq, :] == NULL_DSET_ID).all()
# Check dataset ID change is present
# expect starting ID, null ID, and at least one gain update
unique_dsets = np.unique(f_tr["flags/dataset_id"][:])
assert len(unique_dsets) == 3
# transpose with numpy and see if data is the same
dsets = ["vis", "flags/vis_weight", "eval", "evec", "erms"]
for d in dsets:
assert np.all(f_tr[d][:] == np.moveaxis(f[d], 0, -1))
# Check flags were not overwritten by empty frames
assert (f_tr["flags/inputs"][:] == 1.0).all()
@pytest.fixture(scope="module")
def transpose_stack(tmpdir_factory):
# Write fake stacked data in raw format
tmpdir = str(tmpdir_factory.mktemp("writer"))
fakevis_buffer = runner.FakeVisBuffer(
freq_ids=stack_params["freq"],
num_frames=stack_params["file_length"],
cadence=stack_params["cadence"],
mode="chime",
)
# Add stacking stage
stack_buf_name = "fake_stacked"
stack_buf = {
stack_buf_name: {
"kotekan_buffer": "vis",
"metadata_pool": "vis_pool",
"num_frames": "buffer_depth",
}
}
fakevis_buffer.buffer_block.update(stack_buf)
fakevis_buffer.stage_block.update(
{
"fakevis_stack": {
"kotekan_stage": "baselineCompression",
"in_buf": fakevis_buffer.name,
"out_buf": stack_buf_name,
"stack_type": "chime_in_cyl",
}
}
)
fakevis_buffer.name = stack_buf_name
params = stack_params.copy()
params["root_path"] = tmpdir
writer = runner.KotekanStageTester(
"VisWriter",
{"node_mode": False, "write_ev": True, "file_type": "raw"},
fakevis_buffer,
None,
params,
)
writer.run()
# get raw infile
files = sorted(glob.glob(tmpdir + "/20??????T??????Z_*_corr/*.meta"))
assert len(files) == 1
infile = os.path.splitext(files[0])[0]
# Tranpose and write data
raw_buf = runner.ReadRawBuffer(infile, stack_params["chunk_size"])
outfile = tmpdir + "/transposed"
transposer = runner.KotekanStageTester(
"VisTranspose",
{
"outfile": outfile,
"infile": infile,
"chunk_size": writer_params["chunk_size"],
"comet_timeout": 120.0,
},
raw_buf,
None,
params,
)
transposer.run()
fh = h5py.File(outfile + ".h5", "r")
yield (infile, fh)
fh.close()
def test_transpose_stack(transpose_stack):
infile, f = transpose_stack
# some useful params
n_t = stack_params["file_length"]
n_f = len(stack_params["freq"])
n_elems = stack_params["num_elements"]
n_prod = n_elems * (n_elems + 1) // 2
n_stack = 4 * (4 * 256 - 1) + 6 * 4 * 511
n_ev = stack_params["num_ev"]
# check if shapes are correct
assert f["index_map/time"].shape[0] == n_t
assert f["index_map/freq"].shape[0] == n_f
assert f["index_map/prod"].shape[0] == n_prod
assert f["index_map/stack"].shape[0] == n_stack
assert f["index_map/input"].shape[0] == n_elems
assert f["vis"].shape == (n_f, n_stack, n_t)
assert f["flags/vis_weight"].shape == (n_f, n_stack, n_t)
assert f["eval"].shape == (n_f, n_ev, n_t)
assert f["evec"].shape == (n_f, n_ev, n_elems, n_t)
assert f["erms"].shape == (n_f, n_t)
assert f["gain"].shape == (n_f, n_elems, n_t)
assert f["flags/inputs"].shape == (n_elems, n_t)
assert f["flags/frac_lost"].shape == (n_f, n_t)
assert f["flags/frac_rfi"].shape == (n_f, n_t)
assert f["flags/dataset_id"].shape == (n_f, n_t)
assert f["reverse_map/stack"].shape == (n_prod,)
# check the stack against those in the input file
with open(infile + ".meta", "rb") as f_meta:
meta = msgpack.load(f_meta, raw=False)
stack_im = np.array(
[(s["prod"], s["conjugate"]) for s in meta["index_map"]["stack"]],
dtype=f["index_map"]["stack"].dtype,
)
assert (f["index_map"]["stack"][:] == stack_im).all()
stack_rm = np.array(
[(s["stack"], s["conjugate"]) for s in meta["reverse_map"]["stack"]],
dtype=f["reverse_map/stack"].dtype,
)
assert (f["reverse_map/stack"][:] == stack_rm).all()
# check stacked visibilities are still as expected
# this is adapted from test_compression.py
# This is the typical number of entries per polarisation (for XX, XY and YY, not YX)
np1 = 4 * 256 + 6 * 511
for t in range(n_t):
for ff in range(n_f):
a_vis = f["vis"][ff, :, t]
a_weight = f["flags/vis_weight"][ff, :, t]
# Check that the entries in XX and XY are the same
assert float_allclose(a_vis[:np1], a_vis[np1 : (2 * np1)])
v1 = a_vis[:np1]
w1 = a_weight[:np1]
# Loop over all pairs of cylinders for XX
for ci in range(4):
for cj in range(ci, 4):
# These numbers depend if we are within a cyl or not
nv = 256 if ci == cj else 511 # Number of entries to compare
lb = 0 if ci == cj else -255 # The most negative separation
# A list of the feed separations in the NS dir
d = np.arange(lb, 256)
assert float_allclose(v1[:nv], (cj - ci + 1.0j * d))
assert float_allclose(w1[:nv], (256.0 - np.abs(d)))
v1 = v1[nv:]
w1 = w1[nv:]
| [
"kotekan.runner.FakeVisBuffer",
"numpy.abs",
"numpy.allclose",
"msgpack.load",
"numpy.ones",
"numpy.unique",
"kotekan.runner.ReadRawBuffer",
"test_compression.float_allclose",
"numpy.arange",
"os.path.splitext",
"kotekan.runner.has_hdf5",
"h5py.File",
"numpy.array",
"numpy.moveaxis",
"py... | [((1968, 1998), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (1982, 1998), False, 'import pytest\n'), ((2196, 2226), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (2210, 2226), False, 'import pytest\n'), ((2424, 2454), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (2438, 2454), False, 'import pytest\n'), ((8994, 9024), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (9008, 9024), False, 'import pytest\n'), ((626, 643), 'kotekan.runner.has_hdf5', 'runner.has_hdf5', ([], {}), '()\n', (641, 643), False, 'from kotekan import runner\n'), ((649, 716), 'pytest.skip', 'pytest.skip', (['"""HDF5 support not available."""'], {'allow_module_level': '(True)'}), "('HDF5 support not available.', allow_module_level=True)\n", (660, 716), False, 'import pytest\n'), ((2090, 2132), 'numpy.ones', 'np.ones', (['(nfreq, nelm)'], {'dtype': 'np.complex64'}), '((nfreq, nelm), dtype=np.complex64)\n', (2097, 2132), True, 'import numpy as np\n'), ((2318, 2360), 'numpy.ones', 'np.ones', (['(nfreq, nelm)'], {'dtype': 'np.complex64'}), '((nfreq, nelm), dtype=np.complex64)\n', (2325, 2360), True, 'import numpy as np\n'), ((2672, 2910), 'kotekan.runner.FakeVisBuffer', 'runner.FakeVisBuffer', ([], {'freq_ids': "writer_params['freq']", 'num_frames': "writer_params['total_frames']", 'cadence': "writer_params['cadence']", 'mode': "writer_params['mode']", 'test_pattern_value': "writer_params['test_pattern_value']", 'wait': '(True)'}), "(freq_ids=writer_params['freq'], num_frames=\n writer_params['total_frames'], cadence=writer_params['cadence'], mode=\n writer_params['mode'], test_pattern_value=writer_params[\n 'test_pattern_value'], wait=True)\n", (2692, 2910), False, 'from kotekan import runner\n'), ((5746, 5991), 'kotekan.runner.KotekanStageTester', 'runner.KotekanStageTester', (['"""VisWriter"""', "{'node_mode': False, 'write_ev': True, 'file_type': 'raw'}", 'fakevis_buffer', 'None', 'params'], {'parallel_stage_type': '"""VisWriter"""', 'parallel_stage_config': 'dumph5_conf', 'noise': '"""random"""', 'rest_commands': 'cmds'}), "('VisWriter', {'node_mode': False, 'write_ev': \n True, 'file_type': 'raw'}, fakevis_buffer, None, params,\n parallel_stage_type='VisWriter', parallel_stage_config=dumph5_conf,\n noise='random', rest_commands=cmds)\n", (5771, 5991), False, 'from kotekan import runner\n'), ((6458, 6515), 'kotekan.runner.ReadRawBuffer', 'runner.ReadRawBuffer', (['infile', "writer_params['chunk_size']"], {}), "(infile, writer_params['chunk_size'])\n", (6478, 6515), False, 'from kotekan import runner\n'), ((6570, 6732), 'kotekan.runner.KotekanStageTester', 'runner.KotekanStageTester', (['"""VisTranspose"""', "{'outfile': outfile, 'chunk_size': writer_params['chunk_size'],\n 'comet_timeout': 120.0}", 'raw_buf', 'None', 'params'], {}), "('VisTranspose', {'outfile': outfile, 'chunk_size':\n writer_params['chunk_size'], 'comet_timeout': 120.0}, raw_buf, None, params\n )\n", (6595, 6732), False, 'from kotekan import runner\n'), ((6850, 6883), 'h5py.File', 'h5py.File', (["(infile_h5 + '.h5')", '"""r"""'], {}), "(infile_h5 + '.h5', 'r')\n", (6859, 6883), False, 'import h5py\n'), ((6895, 6926), 'h5py.File', 'h5py.File', (["(outfile + '.h5')", '"""r"""'], {}), "(outfile + '.h5', 'r')\n", (6904, 6926), False, 'import h5py\n'), ((8255, 8328), 'numpy.allclose', 'np.allclose', (["f_tr['flags/frac_lost'][frac_freq, :]", 'frac_lost'], {'rtol': '(0.001)'}), "(f_tr['flags/frac_lost'][frac_freq, :], frac_lost, rtol=0.001)\n", (8266, 8328), True, 'import numpy as np\n'), ((8339, 8410), 'numpy.allclose', 'np.allclose', (["f_tr['flags/frac_rfi'][frac_freq, :]", 'frac_rfi'], {'rtol': '(0.001)'}), "(f_tr['flags/frac_rfi'][frac_freq, :], frac_rfi, rtol=0.001)\n", (8350, 8410), True, 'import numpy as np\n'), ((8610, 8648), 'numpy.unique', 'np.unique', (["f_tr['flags/dataset_id'][:]"], {}), "(f_tr['flags/dataset_id'][:])\n", (8619, 8648), True, 'import numpy as np\n'), ((9178, 9321), 'kotekan.runner.FakeVisBuffer', 'runner.FakeVisBuffer', ([], {'freq_ids': "stack_params['freq']", 'num_frames': "stack_params['file_length']", 'cadence': "stack_params['cadence']", 'mode': '"""chime"""'}), "(freq_ids=stack_params['freq'], num_frames=stack_params\n ['file_length'], cadence=stack_params['cadence'], mode='chime')\n", (9198, 9321), False, 'from kotekan import runner\n'), ((10071, 10204), 'kotekan.runner.KotekanStageTester', 'runner.KotekanStageTester', (['"""VisWriter"""', "{'node_mode': False, 'write_ev': True, 'file_type': 'raw'}", 'fakevis_buffer', 'None', 'params'], {}), "('VisWriter', {'node_mode': False, 'write_ev': \n True, 'file_type': 'raw'}, fakevis_buffer, None, params)\n", (10096, 10204), False, 'from kotekan import runner\n'), ((10476, 10532), 'kotekan.runner.ReadRawBuffer', 'runner.ReadRawBuffer', (['infile', "stack_params['chunk_size']"], {}), "(infile, stack_params['chunk_size'])\n", (10496, 10532), False, 'from kotekan import runner\n'), ((10587, 10767), 'kotekan.runner.KotekanStageTester', 'runner.KotekanStageTester', (['"""VisTranspose"""', "{'outfile': outfile, 'infile': infile, 'chunk_size': writer_params[\n 'chunk_size'], 'comet_timeout': 120.0}", 'raw_buf', 'None', 'params'], {}), "('VisTranspose', {'outfile': outfile, 'infile':\n infile, 'chunk_size': writer_params['chunk_size'], 'comet_timeout': \n 120.0}, raw_buf, None, params)\n", (10612, 10767), False, 'from kotekan import runner\n'), ((10897, 10928), 'h5py.File', 'h5py.File', (["(outfile + '.h5')", '"""r"""'], {}), "(outfile + '.h5', 'r')\n", (10906, 10928), False, 'import h5py\n'), ((12329, 12445), 'numpy.array', 'np.array', (["[(s['prod'], s['conjugate']) for s in meta['index_map']['stack']]"], {'dtype': "f['index_map']['stack'].dtype"}), "([(s['prod'], s['conjugate']) for s in meta['index_map']['stack']],\n dtype=f['index_map']['stack'].dtype)\n", (12337, 12445), True, 'import numpy as np\n'), ((12539, 12658), 'numpy.array', 'np.array', (["[(s['stack'], s['conjugate']) for s in meta['reverse_map']['stack']]"], {'dtype': "f['reverse_map/stack'].dtype"}), "([(s['stack'], s['conjugate']) for s in meta['reverse_map']['stack'\n ]], dtype=f['reverse_map/stack'].dtype)\n", (12547, 12658), True, 'import numpy as np\n'), ((6117, 6170), 'glob.glob', 'glob.glob', (["(tmpdir + '/20??????T??????Z_*_corr/*.meta')"], {}), "(tmpdir + '/20??????T??????Z_*_corr/*.meta')\n", (6126, 6170), False, 'import glob\n'), ((6212, 6238), 'os.path.splitext', 'os.path.splitext', (['files[0]'], {}), '(files[0])\n', (6228, 6238), False, 'import os\n'), ((6284, 6338), 'glob.glob', 'glob.glob', (["(tmpdir_h5 + '/20??????T??????Z_*_corr/*.h5')"], {}), "(tmpdir_h5 + '/20??????T??????Z_*_corr/*.h5')\n", (6293, 6338), False, 'import glob\n'), ((6383, 6409), 'os.path.splitext', 'os.path.splitext', (['files[0]'], {}), '(files[0])\n', (6399, 6409), False, 'import os\n'), ((10306, 10359), 'glob.glob', 'glob.glob', (["(tmpdir + '/20??????T??????Z_*_corr/*.meta')"], {}), "(tmpdir + '/20??????T??????Z_*_corr/*.meta')\n", (10315, 10359), False, 'import glob\n'), ((10401, 10427), 'os.path.splitext', 'os.path.splitext', (['files[0]'], {}), '(files[0])\n', (10417, 10427), False, 'import os\n'), ((12281, 12312), 'msgpack.load', 'msgpack.load', (['f_meta'], {'raw': '(False)'}), '(f_meta, raw=False)\n', (12293, 12312), False, 'import msgpack\n'), ((13188, 13235), 'test_compression.float_allclose', 'float_allclose', (['a_vis[:np1]', 'a_vis[np1:2 * np1]'], {}), '(a_vis[:np1], a_vis[np1:2 * np1])\n', (13202, 13235), False, 'from test_compression import float_allclose\n'), ((8859, 8883), 'numpy.moveaxis', 'np.moveaxis', (['f[d]', '(0)', '(-1)'], {}), '(f[d], 0, -1)\n', (8870, 8883), True, 'import numpy as np\n'), ((13758, 13776), 'numpy.arange', 'np.arange', (['lb', '(256)'], {}), '(lb, 256)\n', (13767, 13776), True, 'import numpy as np\n'), ((13805, 13848), 'test_compression.float_allclose', 'float_allclose', (['v1[:nv]', '(cj - ci + 1.0j * d)'], {}), '(v1[:nv], cj - ci + 1.0j * d)\n', (13819, 13848), False, 'from test_compression import float_allclose\n'), ((13911, 13920), 'numpy.abs', 'np.abs', (['d'], {}), '(d)\n', (13917, 13920), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Functions for transforming between color spaces.
Change log:
2015/10/09 -- compand, inverse_compand added; <EMAIL>
"""
import copy
import numpy
import imageutils
import whitepoint
#
# module constants
#
_lab_epsilon = 0.008856
_lab_kappa = 903.3
#
# module utilities
#
def _safe_divide(num, denom, replace=0):
"""Safe division when elements in the denominator might be zeros.
Returns the division of the numerator by the denominator, but replaces
results which have a zero in the denominator by a specified value. The
default is to replace bad divisions with zeros.
"""
#consider moving to a utility module; copy over tests from colorunittests.py
num = _to_npa(num)
denom = _to_npa(denom)
assert(num.shape == denom.shape)
zero_flag = denom == 0.0
if zero_flag.any():
denom_copy = copy.copy(denom)
denom_copy[zero_flag] = 1.0
div = num / denom_copy
div[zero_flag] = replace
else:
div = num / denom
return div
def _to_npa(value):
"""Converts a scalar or list to a numpy array."""
#consider moving to a utility module; copy over tests from colorunittests.py
if isinstance(value, numpy.ndarray):
return value
elif isinstance(value, (list, tuple)):
#note: any /ordered/ iterable should be allowed to work, as long as it
#is numeric
#TODO: include checks for other numeric iterables
return numpy.array(value)
elif isinstance(value, (int, float)):
return numpy.array([value])
else:
raise TypeError
def _bilevel_func(x, func_a, func_b, threshold, thresh_var=None):
"""Applies two different functions to an input depending on a threshold.
For inputs larger than the threshold value, func_a is applied; it is
assumed that func_a is a default. Inputs less than or equal to the
threshold value are then replaced by func_b's outputs.
"""
x_npa = _to_npa(x)
if thresh_var is None:
thresh_var_npa = x_npa
else:
thresh_var_npa = _to_npa(thresh_var)
#if x is a numpy array, _to_npa returns x instead of a new variable
#constructed from x. in that case, it is possible that func_a or func_b can
#modify the input. an x.copy() is used to guard against unexpectedly
#changing x.
res = func_a(x_npa.copy())
small_flag = thresh_var_npa <= threshold
if small_flag.any():
res[small_flag] = func_b(x_npa[small_flag])
return res
#
# transforms!
#
def compand(rgb_linear):
"""Compand from linear RGB to sRGB.
The input is linear RGB, and ranges from [0..1]. The output is a uint8 sRGB
image.
"""
assert(isinstance(rgb_linear, numpy.ndarray))
f_a = lambda x: 1.055 * x ** (1/2.4) - 0.055
f_b = lambda x: x * 12.92
sRGB_double = _bilevel_func(rgb_linear, f_a, f_b, 0.0031308)
return imageutils.float2uint8(sRGB_double)
def inverse_compand(img):
"""Convert from sRGB to linear RGB by inverting the sRGB companding.
The input image should be uint8. The output is linear and is in the range
of [0..1].
"""
assert(isinstance(img, numpy.ndarray))
assert(img.dtype == numpy.uint8) #TODO: throw a TypeError instead
sRGB_double = img / 255. #convert to normalized float
f_a = lambda x: ((x + 0.055) / 1.055) ** 2.4
f_b = lambda x: x / 12.92
return _bilevel_func(sRGB_double, f_a, f_b, 0.0405)
def xyz2xy(vec):
"""Calculate the (x,y) chromaticity coordinates from an XYZ triplet."""
assert(isinstance(vec, numpy.ndarray))
assert(len(vec) == 3)
return vec[0:2] / float(numpy.sum(vec))
def xyz2xyy(vector):
"""Convert from XYZ to xyY.
Note: xyY is sometimes called xyL."""
assert(isinstance(vector, numpy.ndarray))
assert(len(vector) == 3)
#TODO: be careful about the stacking if vector is vert or horiz
return numpy.hstack((vector[0:2] / float(numpy.sum(vector)), vector[1]))
def xyy2xyz(vector):
"""Convert from xyY to XYZ.
Note: xyY is sometimes called xyL."""
#x = vector[0], y = vector[1], Y = vector[2]
assert(isinstance(vector, numpy.ndarray))
assert(len(vector) == 3)
return numpy.array([vector[0]*vector[2] / vector[1],
vector[2],
(1.0 - vector[0] - vector[1]) * vector[2] / vector[1]])
def xy2xyz(vector):
"""Convert (x,y) coordinates to an XYZ triplet, assuming Y=1."""
assert(isinstance(vector, numpy.ndarray))
assert(len(vector) == 2)
return xyy2xyz(numpy.hstack((vector, 1)))
def lab_inverse_compand(v):
"""Inverse companding used when converting XYZ to Lab and Luv.
Note: this is the cube-root companding used to return the f_xyz functions
that are then used directly to compute L*a*b*. The input is the X, Y, or Z
value, normalized against the whitepoint used to encode the XYZ colorspace.
"""
#values from the CIE standard; <NAME>bloom notes that these lead to
#a discontinuity due to truncation
f_a = lambda x: x ** (1 / 3.)
f_b = lambda x: (_lab_kappa * x + 16.) / 116.
return _bilevel_func(v, f_a, f_b, _lab_epsilon)
def xyz2lab(xyz_img, white_ref=whitepoint.D50):
"""Converts from XYZ to CIELAB (aka, L*a*b*).
The white_ref is the whitepoint of the XYZ color space, and defaults to
D50. Use any other whitepoint.WhitePoint object as a reference if needed.
The whitepoint values whould be on the same order as the XYZ values. For
example, if XYZ ranges from [0..1], the whitepoint should have values close
to 1.
"""
#default is D50 whitepoint for XYZ colors; Lab is device independent
assert(isinstance(xyz_img, numpy.ndarray))
#compute the XYZ relative to the whitepoint; note that this assumes the
#whitepoint and the XYZ have the same scale.
X, Y, Z = imageutils.split3(xyz_img)
xr = X / white_ref.X
yr = Y / white_ref.Y
zr = Z / white_ref.Z
#note: xr, yr, zr are scaled so that they are close to [0..1] range;
#it is possible to have values >1, that's not an error.
fy = lab_inverse_compand(yr)
L = 116.0 * fy - 16.0
a = 500.0 * (lab_inverse_compand(xr) - fy)
b = 200.0 * (fy - lab_inverse_compand(zr))
return imageutils.cat3(L, a, b)
def _lab_finv(V):
f_a = lambda f: f ** 3.0
f_b = lambda f: (116. * f - 16) / _lab_kappa
threshold = _lab_epsilon ** (1 / 3.)
return _bilevel_func(V, f_a, f_b, threshold)
def _lab_yinv(L):
f_a = lambda x: ((x + 16.) / 116.) ** 3.
f_b = lambda x: x / _lab_kappa
threshold = _lab_epsilon * _lab_kappa
return _bilevel_func(L, f_a, f_b, threshold)
def lab2xyz(lab_img, white_ref=whitepoint.D50):
"""Converts CIELAB's L*a*b* to XYZ.
The white_ref is the whitepoint of the XYZ color space; use any
whitepoint.WhitePoint object as a reference if needed. The default is D50.
"""
assert(isinstance(lab_img, numpy.ndarray))
L, a, b = imageutils.split3(lab_img)
fy = (L + 16.) / 116.
fx = a / 500. + fy
fz = fy - b / 200.
xr = _lab_finv(fx)
zr = _lab_finv(fz)
yr = _lab_yinv(L)
return imageutils.cat3(xr * white_ref.X,
yr * white_ref.Y,
zr * white_ref.Z)
def _uprime(X, Y, Z):
"""Calculates the u' value used in XYZ<->Luv."""
return _safe_divide(4. * X, X + 15. * Y + 3. * Z)
def _vprime(X, Y, Z):
"""Calculates the v' value used in XYZ<->Luv."""
return _safe_divide(9. * Y, X + 15. * Y + 3. * Z)
def xyz2luv(xyz_img, white_ref=whitepoint.D50):
"""Converts XYZ to CIELUV (aka, L*u*v*).
A whitepoint reference of D50 is assumed for the XYZ values. Any other
whitepoint, as a whitepoint.WhitePoint object, can be used -- and should
have the same scale as the XYZ values.
"""
assert(isinstance(xyz_img, numpy.ndarray))
X, Y, Z = imageutils.split3(xyz_img)
yr = Y / white_ref.Y
uprime = _uprime(X, Y, Z)
vprime = _vprime(X, Y, Z)
uprime_ref = _uprime(*white_ref.XYZ)
vprime_ref = _vprime(*white_ref.XYZ)
f_a = lambda y: 116. * y ** (1 / 3.) - 16.
f_b = lambda y: y * _lab_kappa
L = _bilevel_func(yr, f_a, f_b, _lab_epsilon)
u = 13.0 * L * (uprime - uprime_ref)
v = 13.0 * L * (vprime - vprime_ref)
return imageutils.cat3(L, u, v)
def luv2xyz(luv_img, white_ref=whitepoint.D50):
"""Converts CIELUV to XYZ.
The white_ref is the whitepoint of the XYZ colorspace, and defaults to D50.
Use any other whitepoint.WhitePoint object as needed.
"""
#equation from wikipedia->CIELUV
assert(isinstance(luv_img, numpy.ndarray))
L, u, v = imageutils.split3(luv_img)
f_a = lambda x: ((x + 16.) / 116.) ** 3.
f_b = lambda x: x / _lab_kappa
threshold = _lab_kappa * _lab_epsilon
Y = white_ref.Y * _bilevel_func(L, f_a, f_b, threshold)
u_ref = _uprime(*white_ref.XYZ)
v_ref = _vprime(*white_ref.XYZ)
uprime = _safe_divide(u, 13. * L) + u_ref
vprime = _safe_divide(v, 13. * L) + v_ref
X = Y * _safe_divide(9. * uprime, 4. * vprime)
Z = Y * _safe_divide(12. - 3. * uprime - 20. * vprime, 4. * vprime)
return imageutils.cat3(X, Y, Z)
def uv2xy(vector):
assert(len(vector) == 2)
u = vector[0]
v = vector[1]
denom = 6. * u - 16. * v + 12.
x = _safe_divide(9. * u, denom)
y = _safe_divide(4. * v, denom)
return numpy.array([x, y]).flatten()
def xy2uv(vector):
assert(len(vector) == 2)
x = vector[0]
y = vector[1]
denom = -2. * x + 12. * y + 3.
u = _safe_divide(4. * x, denom)
v = _safe_divide(9. * y, denom)
return numpy.array([u, v]).flatten()
def luv2lch(luv_img):
"""Converts CIELUV to a LCh representation.
L: luminance
C: chroma
h: hue (in radians)
"""
assert(isinstance(luv_img, numpy.ndarray))
L, u, v = imageutils.split3(luv_img)
C = numpy.sqrt(u**2 + v**2)
h = numpy.arctan2(v, u)
return imageutils.cat3(L, C, h)
#TODO: this function isn't strictly related to color transforms; could easily
#move to another module which computes color quantities (aka, "correlates")
def lch_saturation(lch_img):
"""Calculates the saturation correlate for an LCh image."""
assert(isinstance(lch_img, numpy.ndarray))
L, C, _ = imageutils.split3(lch_img)
return C / L
#TODO: add other non-linear transforms
#TODO: add simple method to do linear transforms given an image and a matrix
#TODO: add class that provides metadata tracking for different transforms
| [
"numpy.sqrt",
"numpy.hstack",
"imageutils.split3",
"imageutils.float2uint8",
"numpy.array",
"imageutils.cat3",
"numpy.sum",
"numpy.arctan2",
"copy.copy"
] | [((2893, 2928), 'imageutils.float2uint8', 'imageutils.float2uint8', (['sRGB_double'], {}), '(sRGB_double)\n', (2915, 2928), False, 'import imageutils\n'), ((4193, 4311), 'numpy.array', 'numpy.array', (['[vector[0] * vector[2] / vector[1], vector[2], (1.0 - vector[0] - vector[1]\n ) * vector[2] / vector[1]]'], {}), '([vector[0] * vector[2] / vector[1], vector[2], (1.0 - vector[0] -\n vector[1]) * vector[2] / vector[1]])\n', (4204, 4311), False, 'import numpy\n'), ((5843, 5869), 'imageutils.split3', 'imageutils.split3', (['xyz_img'], {}), '(xyz_img)\n', (5860, 5869), False, 'import imageutils\n'), ((6242, 6266), 'imageutils.cat3', 'imageutils.cat3', (['L', 'a', 'b'], {}), '(L, a, b)\n', (6257, 6266), False, 'import imageutils\n'), ((6950, 6976), 'imageutils.split3', 'imageutils.split3', (['lab_img'], {}), '(lab_img)\n', (6967, 6976), False, 'import imageutils\n'), ((7128, 7197), 'imageutils.cat3', 'imageutils.cat3', (['(xr * white_ref.X)', '(yr * white_ref.Y)', '(zr * white_ref.Z)'], {}), '(xr * white_ref.X, yr * white_ref.Y, zr * white_ref.Z)\n', (7143, 7197), False, 'import imageutils\n'), ((7871, 7897), 'imageutils.split3', 'imageutils.split3', (['xyz_img'], {}), '(xyz_img)\n', (7888, 7897), False, 'import imageutils\n'), ((8290, 8314), 'imageutils.cat3', 'imageutils.cat3', (['L', 'u', 'v'], {}), '(L, u, v)\n', (8305, 8314), False, 'import imageutils\n'), ((8640, 8666), 'imageutils.split3', 'imageutils.split3', (['luv_img'], {}), '(luv_img)\n', (8657, 8666), False, 'import imageutils\n'), ((9147, 9171), 'imageutils.cat3', 'imageutils.cat3', (['X', 'Y', 'Z'], {}), '(X, Y, Z)\n', (9162, 9171), False, 'import imageutils\n'), ((9834, 9860), 'imageutils.split3', 'imageutils.split3', (['luv_img'], {}), '(luv_img)\n', (9851, 9860), False, 'import imageutils\n'), ((9869, 9896), 'numpy.sqrt', 'numpy.sqrt', (['(u ** 2 + v ** 2)'], {}), '(u ** 2 + v ** 2)\n', (9879, 9896), False, 'import numpy\n'), ((9901, 9920), 'numpy.arctan2', 'numpy.arctan2', (['v', 'u'], {}), '(v, u)\n', (9914, 9920), False, 'import numpy\n'), ((9932, 9956), 'imageutils.cat3', 'imageutils.cat3', (['L', 'C', 'h'], {}), '(L, C, h)\n', (9947, 9956), False, 'import imageutils\n'), ((10266, 10292), 'imageutils.split3', 'imageutils.split3', (['lch_img'], {}), '(lch_img)\n', (10283, 10292), False, 'import imageutils\n'), ((869, 885), 'copy.copy', 'copy.copy', (['denom'], {}), '(denom)\n', (878, 885), False, 'import copy\n'), ((4538, 4563), 'numpy.hstack', 'numpy.hstack', (['(vector, 1)'], {}), '((vector, 1))\n', (4550, 4563), False, 'import numpy\n'), ((1470, 1488), 'numpy.array', 'numpy.array', (['value'], {}), '(value)\n', (1481, 1488), False, 'import numpy\n'), ((3628, 3642), 'numpy.sum', 'numpy.sum', (['vec'], {}), '(vec)\n', (3637, 3642), False, 'import numpy\n'), ((9375, 9394), 'numpy.array', 'numpy.array', (['[x, y]'], {}), '([x, y])\n', (9386, 9394), False, 'import numpy\n'), ((9608, 9627), 'numpy.array', 'numpy.array', (['[u, v]'], {}), '([u, v])\n', (9619, 9627), False, 'import numpy\n'), ((1546, 1566), 'numpy.array', 'numpy.array', (['[value]'], {}), '([value])\n', (1557, 1566), False, 'import numpy\n'), ((3929, 3946), 'numpy.sum', 'numpy.sum', (['vector'], {}), '(vector)\n', (3938, 3946), False, 'import numpy\n')] |
import numpy as np
class Configuration:
def __init__(self):
# Problem setting (in canonical form)
# self.constraint = [
# # [coefficient_1, coefficient_2, ... , constant_value]
# [ 4, 1, 22],
# [ 2, -1, 8],
# [ 1, 2, 10]
# ]
# self.object = [-1, -2, 12] # [coefficient_1, coefficient_2, ... , constant_value]
self.constraint = [
# [coefficient_1, coefficient_2, ... , constant_value]
[ 5, -2, 11],
[ 4, 2, 28],
[-3, 3, 6]
]
self.object = [-4, -5, 4] # [coefficient_1, coefficient_2, ... , constant_value]
class Simplex:
def __init__(self, cnf):
self.cnf = cnf
# Make dictionary (= Basis form expression)
stack = np.vstack((self.cnf.constraint, self.cnf.object))
slack = np.eye(stack.shape[0])
self.prob = np.insert(stack, [-1], slack, axis=1)
print("Initial Dictionary:")
print(self.prob)
def run(self):
count = 0
while any(self.prob[-1] < 0): # while having space to be optimized
# Title
count += 1
print("\n<Operation " + str(count) + ">")
# Decide colmun ID with Dantzig's selection method
col_id = self.prob[-1].argmin()
# Decide row ID
cand = self.prob[:,-1]/self.prob[:,col_id]
cand[cand<0] = np.inf
row_id = cand.argmin()
# Calculate a variable to be assigned
pivot_row = self.prob[row_id] / self.prob[row_id,col_id]
# Assign
for i in range(self.prob.shape[0]):
if i == row_id:
self.prob[i] = pivot_row
else:
self.prob[i] = self.prob[i] - (pivot_row * self.prob[i,col_id])
# Show temporary result
print(" Dictionary:")
print(self.prob)
# Avoid infinity loop
if count > 100:
print("\n Failed")
break
# Show final result
if not count > 100:
print("\n <Final result> \n Objective value = " + str(self.prob[-1,-1]) + "\n")
# run
if __name__ == '__main__':
cnf = Configuration()
print("\n\t\t[ Simplex Method ]\n")
slx = Simplex(cnf)
slx.run()
| [
"numpy.insert",
"numpy.eye",
"numpy.vstack"
] | [((892, 941), 'numpy.vstack', 'np.vstack', (['(self.cnf.constraint, self.cnf.object)'], {}), '((self.cnf.constraint, self.cnf.object))\n', (901, 941), True, 'import numpy as np\n'), ((964, 986), 'numpy.eye', 'np.eye', (['stack.shape[0]'], {}), '(stack.shape[0])\n', (970, 986), True, 'import numpy as np\n'), ((1009, 1046), 'numpy.insert', 'np.insert', (['stack', '[-1]', 'slack'], {'axis': '(1)'}), '(stack, [-1], slack, axis=1)\n', (1018, 1046), True, 'import numpy as np\n')] |
"""
Tests split.py
"""
import unittest
import numpy as np
from IoTPy.core.agent import Agent, InList
from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue
from IoTPy.agent_types.check_agent_parameter_types import *
from IoTPy.helper_functions.recent_values import recent_values
from IoTPy.agent_types.split import *
#------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------
# TEST SPLIT
#------------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------------
class test_split_agents(unittest.TestCase):
def test_split_agents(self):
scheduler = Stream.scheduler
s = Stream('s')
u = Stream('u')
v = Stream('v')
w = Stream('w')
y = Stream('y')
z = Stream('z')
# Test split
# func operates on a single element of the single input stream and
# return a list of elements, one for each output stream.
def h(element):
return [element+1, element*2]
def h_args(element, addend, multiplier):
return [element+addend, element*multiplier]
in_stream_split = Stream('in_stream_split')
r = Stream('r')
t = Stream('t')
e = split_element(func=h, in_stream=in_stream_split,
out_streams=[r, t], name='e')
r_split, t_split = split_element_f(function=h, in_stream=in_stream_split,
num_out_streams=2, )
r_args, t_args = split_element_f(
h_args, in_stream_split, 2, addend=1, multiplier=2)
scheduler.step()
assert recent_values(r) == []
assert recent_values(t) == []
assert recent_values(r_split) == recent_values(r)
assert recent_values(t_split) == recent_values(t)
assert recent_values(r_args) == recent_values(r)
assert recent_values(t_args) == recent_values(t)
in_stream_split.extend(list(range(5)))
scheduler.step()
assert recent_values(r) == [1, 2, 3, 4, 5]
assert recent_values(t) == [0, 2, 4, 6, 8]
assert recent_values(r_split) == recent_values(r)
assert recent_values(t_split) == recent_values(t)
assert recent_values(r_args) == recent_values(r)
assert recent_values(t_args) == recent_values(t)
in_stream_split.append(10)
scheduler.step()
assert recent_values(r) == [1, 2, 3, 4, 5, 11]
assert recent_values(t) == [0, 2, 4, 6, 8, 20]
in_stream_split.extend([20, 100])
scheduler.step()
assert recent_values(r) == [1, 2, 3, 4, 5, 11, 21, 101]
assert recent_values(t) == [0, 2, 4, 6, 8, 20, 40, 200]
assert recent_values(r_split) == recent_values(r)
assert recent_values(t_split) == recent_values(t)
assert recent_values(r_args) == recent_values(r)
assert recent_values(t_args) == recent_values(t)
# Test split with kwargs
def f_list(element, list_of_functions):
return [f(element) for f in list_of_functions]
def f_0(element):
return element*2
def f_1(element):
return element+10
x = Stream('x')
rr = Stream('rr')
tt = Stream('tt')
ee = split_element(func=f_list, in_stream=x, out_streams=[rr, tt], name='ee',
list_of_functions=[f_0, f_1])
x.extend(list(range(5)))
scheduler.step()
assert recent_values(rr) == [0, 2, 4, 6, 8]
assert recent_values(tt) == [10, 11, 12, 13, 14]
# ------------------------------------
# Test split with state
# func operates on an element of the single input stream and state.
# func returns a list with one element for each output stream.
def h_state(element, state):
return ([element+state, element*state], state+1)
r_state = Stream(name='r_state')
t_state = Stream(name='t_state')
in_stream_split_state = Stream('in_stream_split_state')
e_state = split_element(
func=h_state, in_stream=in_stream_split_state,
out_streams=[r_state, t_state], name='e', state=0)
scheduler.step()
assert recent_values(r_state) == []
assert recent_values(t_state) == []
in_stream_split_state.extend(list(range(5)))
scheduler.step()
assert recent_values(r_state) == [0, 2, 4, 6, 8]
assert recent_values(t_state) == [0, 1, 4, 9, 16]
in_stream_split_state.append(20)
scheduler.step()
assert recent_values(r_state) == [0, 2, 4, 6, 8, 25]
assert recent_values(t_state) == [0, 1, 4, 9, 16, 100]
in_stream_split_state.extend([44, 93])
scheduler.step()
assert recent_values(r_state) == [0, 2, 4, 6, 8, 25, 50, 100]
assert recent_values(t_state) == [0, 1, 4, 9, 16, 100, 264, 651]
# ------------------------------------
# Test split with state and args
def hh_state(element, state, increment):
return ([element+state, element*state], state+increment)
rr_state = Stream(name='rr_state')
tt_state = Stream(name='tt_state')
in_stream_split_state_funcargs = Stream('in_stream_split_state_funcargs')
ee_state_agent = split_element(
func=hh_state,
in_stream=in_stream_split_state_funcargs,
out_streams=[rr_state, tt_state],
name='ee_state_agent', state=0, increment=10)
scheduler.step()
assert recent_values(rr_state) == []
assert recent_values(tt_state) == []
in_stream_split_state_funcargs.extend(list(range(5)))
scheduler.step()
assert recent_values(rr_state) == [0, 11, 22, 33, 44]
assert recent_values(tt_state) == [0, 10, 40, 90, 160]
#------------------------------------------------------------------------------------------------
# UNZIP AGENT TESTS
#------------------------------------------------------------------------------------------------
s_unzip = Stream('s_unzip')
u_unzip = Stream('u_unzip')
x_unzip = Stream('x_unzip')
# ------------------------------------
# Test unzip
unzip(in_stream=s_unzip, out_streams=[x_unzip, u_unzip])
d_unzip_fn = unzip_f(s_unzip, 2)
s_unzip.extend([(1,10), (2,15), (3,18)])
scheduler.step()
assert recent_values(x_unzip) == [1, 2, 3]
assert recent_values(u_unzip) == [10, 15, 18]
assert recent_values(d_unzip_fn[0]) == x_unzip.recent[:3]
assert recent_values(d_unzip_fn[1]) == u_unzip.recent[:3]
s_unzip.extend([(37,96)])
scheduler.step()
assert recent_values(x_unzip) == [1, 2, 3, 37]
assert recent_values(u_unzip) == [10, 15, 18, 96]
assert recent_values(d_unzip_fn[0]) == x_unzip.recent[:4]
assert recent_values(d_unzip_fn[1]) == u_unzip.recent[:4]
#------------------------------------------------------------------------------------------------
# SEPARATE AGENT TESTS
#------------------------------------------------------------------------------------------------
s_separate = Stream('s separate')
u_separate = Stream('u separate')
x_separate = Stream('x separate')
d_separate = separate(
in_stream=s_separate, out_streams=[x_separate,u_separate],
name='d separate')
x_sep_func, u_sep_func = separate_f(s_separate, 2)
s_separate.extend([(0,10), (1,15), (0,20)])
scheduler.step()
assert recent_values(x_separate) == [10, 20]
assert recent_values(u_separate) == [15]
assert x_sep_func.recent == x_separate.recent
assert u_sep_func.recent == u_separate.recent
s_separate.extend([(1,96)])
scheduler.step()
assert recent_values(x_separate) == [10, 20]
assert recent_values(u_separate) == [15, 96]
assert recent_values(x_sep_func) == recent_values(x_separate)
assert recent_values(u_sep_func) == recent_values(u_separate)
#------------------------------------------------------------------------------------------------
# TIMED_UNZIP TESTS
#------------------------------------------------------------------------------------------------
# timed_unzip tests
t_unzip = Stream()
a_unzip = Stream('a_unzip')
b_unzip = Stream('b_unzip')
timed_unzip(t_unzip, [a_unzip, b_unzip])
t_unzip_0, t_unzip_1 = timed_unzip_f(in_stream=t_unzip, num_out_streams=2)
t_unzip.extend(
[(1, ["A", None]), (5, ["B", "a"]), (7, [None, "b"]),
(9, ["C", "c"]), (10, [None, "d"])])
scheduler.step()
assert recent_values(t_unzip_0) == [(1, 'A'), (5, 'B'), (9, 'C')]
assert recent_values(t_unzip_1) == [(5, 'a'), (7, 'b'), (9, 'c'), (10, 'd')]
assert recent_values(a_unzip) == recent_values(t_unzip_0)
assert recent_values(b_unzip) == recent_values(t_unzip_1)
#------------------------------------------------------------------------------------------------
# TEST SPLIT WITH STREAM_ARRAY
#------------------------------------------------------------------------------------------------
# Test split_element with StreamArray
x = StreamArray('x')
y = StreamArray('y')
z = StreamArray('z')
def h_args(element, addend, multiplier):
return [element+addend, element*multiplier]
this_agent = split_element(func=h_args, in_stream=x, out_streams=[y,z],
addend=1.0 , multiplier=2.0, name='this_agent')
add_to_x = np.linspace(0.0, 4.0, 5)
x.extend(add_to_x)
scheduler.step()
assert np.array_equal(recent_values(y), add_to_x+1.0)
assert np.array_equal(recent_values(z), add_to_x*2.0)
# Test separate with StreamArray
x = StreamArray('x', dimension=2)
y = StreamArray('y')
z = StreamArray('z')
separate(x, [y,z])
x.append(np.array([1.0, 10.0]))
scheduler.step()
assert np.array_equal(recent_values(z), np.array([10.0]))
assert np.array_equal(recent_values(y), np.array([]))
x.extend(np.array([[0.0, 2.0], [1.0, 20.0], [0.0, 4.0]]))
scheduler.step()
assert np.array_equal(recent_values(z), np.array([10.0, 20.0]))
assert np.array_equal(recent_values(y), np.array([2.0, 4.0]))
# ------------------------------------------------------
# TEST split_list
# ------------------------------------------------------
x = Stream('x')
y = Stream('y')
z = Stream('z')
def f(lst):
return [v*2 for v in lst], [v*10 for v in lst]
split_list(f, x, [y, z])
x.extend(list(range(3)))
scheduler.step()
assert recent_values(y) == [v*2 for v in recent_values(x)]
assert recent_values(z) == [v*10 for v in recent_values(x)]
x.append(100)
scheduler.step()
assert recent_values(y) == [v*2 for v in recent_values(x)]
assert recent_values(z) == [v*10 for v in recent_values(x)]
# ------------------------------------------------------
# TEST split_window
# ------------------------------------------------------
def f(window):
return max(window), min(window)
x = Stream('x')
y = Stream('y')
z = Stream('z')
split_window(
func=f, in_stream=x, out_streams=[y, z], window_size=2, step_size=2)
x.extend(list(range(7)))
scheduler.step()
assert recent_values(y) == [1, 3, 5]
assert recent_values(z) == [0, 2, 4]
def f(window):
return max(window), min(window)
x = Stream('x')
y = Stream('y')
z = Stream('z')
split_window(
func=f, in_stream=x, out_streams=[y, z], window_size=3, step_size=3)
x.extend(list(range(12)))
scheduler.step()
assert recent_values(y) == [2, 5, 8, 11]
assert recent_values(z) == [0, 3, 6, 9]
# ------------------------------------------------------
# TEST split_tuple
# ------------------------------------------------------
x = Stream('x')
y = Stream('y')
z = Stream('z')
split_tuple(in_stream=x, out_streams=[y, z])
x.append((0, 'A'))
x.extend([(1, 'B'), (2, 'C')])
scheduler.step()
assert recent_values(y) == [0, 1, 2]
assert recent_values(z) == ['A', 'B', 'C']
def f(window):
return max(window), min(window)
x = Stream('x')
y = Stream('y')
z = Stream('z')
split_window(
func=f, in_stream=x, out_streams=[y, z], window_size=3, step_size=3)
x.extend(list(range(12)))
scheduler.step()
assert recent_values(y) == [2, 5, 8, 11]
assert recent_values(z) == [0, 3, 6, 9]
print ('TEST OF SPLIT IS SUCCESSFUL')
if __name__ == '__main__':
unittest.main()
| [
"IoTPy.helper_functions.recent_values.recent_values",
"IoTPy.core.stream.Stream",
"IoTPy.core.stream.StreamArray",
"numpy.array",
"numpy.linspace",
"unittest.main"
] | [((13749, 13764), 'unittest.main', 'unittest.main', ([], {}), '()\n', (13762, 13764), False, 'import unittest\n'), ((929, 940), 'IoTPy.core.stream.Stream', 'Stream', (['"""s"""'], {}), "('s')\n", (935, 940), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((962, 973), 'IoTPy.core.stream.Stream', 'Stream', (['"""u"""'], {}), "('u')\n", (968, 973), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((986, 997), 'IoTPy.core.stream.Stream', 'Stream', (['"""v"""'], {}), "('v')\n", (992, 997), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((1010, 1021), 'IoTPy.core.stream.Stream', 'Stream', (['"""w"""'], {}), "('w')\n", (1016, 1021), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((1043, 1054), 'IoTPy.core.stream.Stream', 'Stream', (['"""y"""'], {}), "('y')\n", (1049, 1054), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((1067, 1078), 'IoTPy.core.stream.Stream', 'Stream', (['"""z"""'], {}), "('z')\n", (1073, 1078), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((1440, 1465), 'IoTPy.core.stream.Stream', 'Stream', (['"""in_stream_split"""'], {}), "('in_stream_split')\n", (1446, 1465), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((1478, 1489), 'IoTPy.core.stream.Stream', 'Stream', (['"""r"""'], {}), "('r')\n", (1484, 1489), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((1502, 1513), 'IoTPy.core.stream.Stream', 'Stream', (['"""t"""'], {}), "('t')\n", (1508, 1513), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((3486, 3497), 'IoTPy.core.stream.Stream', 'Stream', (['"""x"""'], {}), "('x')\n", (3492, 3497), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((3511, 3523), 'IoTPy.core.stream.Stream', 'Stream', (['"""rr"""'], {}), "('rr')\n", (3517, 3523), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((3537, 3549), 'IoTPy.core.stream.Stream', 'Stream', (['"""tt"""'], {}), "('tt')\n", (3543, 3549), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((4209, 4231), 'IoTPy.core.stream.Stream', 'Stream', ([], {'name': '"""r_state"""'}), "(name='r_state')\n", (4215, 4231), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((4250, 4272), 'IoTPy.core.stream.Stream', 'Stream', ([], {'name': '"""t_state"""'}), "(name='t_state')\n", (4256, 4272), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((4305, 4336), 'IoTPy.core.stream.Stream', 'Stream', (['"""in_stream_split_state"""'], {}), "('in_stream_split_state')\n", (4311, 4336), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((5461, 5484), 'IoTPy.core.stream.Stream', 'Stream', ([], {'name': '"""rr_state"""'}), "(name='rr_state')\n", (5467, 5484), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((5504, 5527), 'IoTPy.core.stream.Stream', 'Stream', ([], {'name': '"""tt_state"""'}), "(name='tt_state')\n", (5510, 5527), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((5569, 5609), 'IoTPy.core.stream.Stream', 'Stream', (['"""in_stream_split_state_funcargs"""'], {}), "('in_stream_split_state_funcargs')\n", (5575, 5609), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((6450, 6467), 'IoTPy.core.stream.Stream', 'Stream', (['"""s_unzip"""'], {}), "('s_unzip')\n", (6456, 6467), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((6486, 6503), 'IoTPy.core.stream.Stream', 'Stream', (['"""u_unzip"""'], {}), "('u_unzip')\n", (6492, 6503), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((6522, 6539), 'IoTPy.core.stream.Stream', 'Stream', (['"""x_unzip"""'], {}), "('x_unzip')\n", (6528, 6539), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((7656, 7676), 'IoTPy.core.stream.Stream', 'Stream', (['"""s separate"""'], {}), "('s separate')\n", (7662, 7676), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((7698, 7718), 'IoTPy.core.stream.Stream', 'Stream', (['"""u separate"""'], {}), "('u separate')\n", (7704, 7718), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((7740, 7760), 'IoTPy.core.stream.Stream', 'Stream', (['"""x separate"""'], {}), "('x separate')\n", (7746, 7760), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((8873, 8881), 'IoTPy.core.stream.Stream', 'Stream', ([], {}), '()\n', (8879, 8881), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((8900, 8917), 'IoTPy.core.stream.Stream', 'Stream', (['"""a_unzip"""'], {}), "('a_unzip')\n", (8906, 8917), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((8936, 8953), 'IoTPy.core.stream.Stream', 'Stream', (['"""b_unzip"""'], {}), "('b_unzip')\n", (8942, 8953), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((9895, 9911), 'IoTPy.core.stream.StreamArray', 'StreamArray', (['"""x"""'], {}), "('x')\n", (9906, 9911), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((9924, 9940), 'IoTPy.core.stream.StreamArray', 'StreamArray', (['"""y"""'], {}), "('y')\n", (9935, 9940), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((9953, 9969), 'IoTPy.core.stream.StreamArray', 'StreamArray', (['"""z"""'], {}), "('z')\n", (9964, 9969), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((10270, 10294), 'numpy.linspace', 'np.linspace', (['(0.0)', '(4.0)', '(5)'], {}), '(0.0, 4.0, 5)\n', (10281, 10294), True, 'import numpy as np\n'), ((10525, 10554), 'IoTPy.core.stream.StreamArray', 'StreamArray', (['"""x"""'], {'dimension': '(2)'}), "('x', dimension=2)\n", (10536, 10554), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((10567, 10583), 'IoTPy.core.stream.StreamArray', 'StreamArray', (['"""y"""'], {}), "('y')\n", (10578, 10583), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((10596, 10612), 'IoTPy.core.stream.StreamArray', 'StreamArray', (['"""z"""'], {}), "('z')\n", (10607, 10612), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((11237, 11248), 'IoTPy.core.stream.Stream', 'Stream', (['"""x"""'], {}), "('x')\n", (11243, 11248), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((11261, 11272), 'IoTPy.core.stream.Stream', 'Stream', (['"""y"""'], {}), "('y')\n", (11267, 11272), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((11285, 11296), 'IoTPy.core.stream.Stream', 'Stream', (['"""z"""'], {}), "('z')\n", (11291, 11296), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((12036, 12047), 'IoTPy.core.stream.Stream', 'Stream', (['"""x"""'], {}), "('x')\n", (12042, 12047), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((12060, 12071), 'IoTPy.core.stream.Stream', 'Stream', (['"""y"""'], {}), "('y')\n", (12066, 12071), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((12084, 12095), 'IoTPy.core.stream.Stream', 'Stream', (['"""z"""'], {}), "('z')\n", (12090, 12095), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((12447, 12458), 'IoTPy.core.stream.Stream', 'Stream', (['"""x"""'], {}), "('x')\n", (12453, 12458), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((12471, 12482), 'IoTPy.core.stream.Stream', 'Stream', (['"""y"""'], {}), "('y')\n", (12477, 12482), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((12495, 12506), 'IoTPy.core.stream.Stream', 'Stream', (['"""z"""'], {}), "('z')\n", (12501, 12506), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((12946, 12957), 'IoTPy.core.stream.Stream', 'Stream', (['"""x"""'], {}), "('x')\n", (12952, 12957), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((12970, 12981), 'IoTPy.core.stream.Stream', 'Stream', (['"""y"""'], {}), "('y')\n", (12976, 12981), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((12994, 13005), 'IoTPy.core.stream.Stream', 'Stream', (['"""z"""'], {}), "('z')\n", (13000, 13005), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((13336, 13347), 'IoTPy.core.stream.Stream', 'Stream', (['"""x"""'], {}), "('x')\n", (13342, 13347), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((13360, 13371), 'IoTPy.core.stream.Stream', 'Stream', (['"""y"""'], {}), "('y')\n", (13366, 13371), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((13384, 13395), 'IoTPy.core.stream.Stream', 'Stream', (['"""z"""'], {}), "('z')\n", (13390, 13395), False, 'from IoTPy.core.stream import StreamArray, Stream, _no_value, _multivalue\n'), ((1927, 1943), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['r'], {}), '(r)\n', (1940, 1943), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((1965, 1981), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['t'], {}), '(t)\n', (1978, 1981), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((2003, 2025), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['r_split'], {}), '(r_split)\n', (2016, 2025), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((2029, 2045), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['r'], {}), '(r)\n', (2042, 2045), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((2061, 2083), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['t_split'], {}), '(t_split)\n', (2074, 2083), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((2087, 2103), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['t'], {}), '(t)\n', (2100, 2103), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((2119, 2140), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['r_args'], {}), '(r_args)\n', (2132, 2140), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((2144, 2160), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['r'], {}), '(r)\n', (2157, 2160), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((2176, 2197), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['t_args'], {}), '(t_args)\n', (2189, 2197), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((2201, 2217), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['t'], {}), '(t)\n', (2214, 2217), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((2306, 2322), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['r'], {}), '(r)\n', (2319, 2322), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((2357, 2373), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['t'], {}), '(t)\n', (2370, 2373), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((2408, 2430), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['r_split'], {}), '(r_split)\n', (2421, 2430), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((2434, 2450), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['r'], {}), '(r)\n', (2447, 2450), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((2466, 2488), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['t_split'], {}), '(t_split)\n', (2479, 2488), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((2492, 2508), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['t'], {}), '(t)\n', (2505, 2508), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((2524, 2545), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['r_args'], {}), '(r_args)\n', (2537, 2545), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((2549, 2565), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['r'], {}), '(r)\n', (2562, 2565), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((2581, 2602), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['t_args'], {}), '(t_args)\n', (2594, 2602), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((2606, 2622), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['t'], {}), '(t)\n', (2619, 2622), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((2699, 2715), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['r'], {}), '(r)\n', (2712, 2715), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((2754, 2770), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['t'], {}), '(t)\n', (2767, 2770), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((2877, 2893), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['r'], {}), '(r)\n', (2890, 2893), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((2941, 2957), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['t'], {}), '(t)\n', (2954, 2957), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((3005, 3027), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['r_split'], {}), '(r_split)\n', (3018, 3027), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((3031, 3047), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['r'], {}), '(r)\n', (3044, 3047), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((3063, 3085), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['t_split'], {}), '(t_split)\n', (3076, 3085), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((3089, 3105), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['t'], {}), '(t)\n', (3102, 3105), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((3121, 3142), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['r_args'], {}), '(r_args)\n', (3134, 3142), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((3146, 3162), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['r'], {}), '(r)\n', (3159, 3162), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((3178, 3199), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['t_args'], {}), '(t_args)\n', (3191, 3199), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((3203, 3219), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['t'], {}), '(t)\n', (3216, 3219), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((3772, 3789), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['rr'], {}), '(rr)\n', (3785, 3789), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((3824, 3841), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['tt'], {}), '(tt)\n', (3837, 3841), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((4543, 4565), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['r_state'], {}), '(r_state)\n', (4556, 4565), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((4587, 4609), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['t_state'], {}), '(t_state)\n', (4600, 4609), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((4710, 4732), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['r_state'], {}), '(r_state)\n', (4723, 4732), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((4767, 4789), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['t_state'], {}), '(t_state)\n', (4780, 4789), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((4892, 4914), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['r_state'], {}), '(r_state)\n', (4905, 4914), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((4953, 4975), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['t_state'], {}), '(t_state)\n', (4966, 4975), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((5089, 5111), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['r_state'], {}), '(r_state)\n', (5102, 5111), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((5159, 5181), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['t_state'], {}), '(t_state)\n', (5172, 5181), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((5877, 5900), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['rr_state'], {}), '(rr_state)\n', (5890, 5900), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((5922, 5945), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['tt_state'], {}), '(tt_state)\n', (5935, 5945), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((6056, 6079), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['rr_state'], {}), '(rr_state)\n', (6069, 6079), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((6118, 6141), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['tt_state'], {}), '(tt_state)\n', (6131, 6141), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((6822, 6844), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['x_unzip'], {}), '(x_unzip)\n', (6835, 6844), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((6873, 6895), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['u_unzip'], {}), '(u_unzip)\n', (6886, 6895), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((6927, 6955), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['d_unzip_fn[0]'], {}), '(d_unzip_fn[0])\n', (6940, 6955), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((6993, 7021), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['d_unzip_fn[1]'], {}), '(d_unzip_fn[1])\n', (7006, 7021), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((7124, 7146), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['x_unzip'], {}), '(x_unzip)\n', (7137, 7146), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((7179, 7201), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['u_unzip'], {}), '(u_unzip)\n', (7192, 7201), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((7237, 7265), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['d_unzip_fn[0]'], {}), '(d_unzip_fn[0])\n', (7250, 7265), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((7303, 7331), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['d_unzip_fn[1]'], {}), '(d_unzip_fn[1])\n', (7316, 7331), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((8047, 8072), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['x_separate'], {}), '(x_separate)\n', (8060, 8072), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((8100, 8125), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['u_separate'], {}), '(u_separate)\n', (8113, 8125), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((8319, 8344), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['x_separate'], {}), '(x_separate)\n', (8332, 8344), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((8372, 8397), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['u_separate'], {}), '(u_separate)\n', (8385, 8397), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((8425, 8450), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['x_sep_func'], {}), '(x_sep_func)\n', (8438, 8450), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((8454, 8479), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['x_separate'], {}), '(x_separate)\n', (8467, 8479), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((8495, 8520), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['u_sep_func'], {}), '(u_sep_func)\n', (8508, 8520), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((8524, 8549), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['u_separate'], {}), '(u_separate)\n', (8537, 8549), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((9278, 9302), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['t_unzip_0'], {}), '(t_unzip_0)\n', (9291, 9302), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((9352, 9376), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['t_unzip_1'], {}), '(t_unzip_1)\n', (9365, 9376), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((9437, 9459), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['a_unzip'], {}), '(a_unzip)\n', (9450, 9459), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((9463, 9487), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['t_unzip_0'], {}), '(t_unzip_0)\n', (9476, 9487), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((9503, 9525), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['b_unzip'], {}), '(b_unzip)\n', (9516, 9525), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((9529, 9553), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['t_unzip_1'], {}), '(t_unzip_1)\n', (9542, 9553), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((10377, 10393), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['y'], {}), '(y)\n', (10390, 10393), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((10439, 10455), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['z'], {}), '(z)\n', (10452, 10455), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((10658, 10679), 'numpy.array', 'np.array', (['[1.0, 10.0]'], {}), '([1.0, 10.0])\n', (10666, 10679), True, 'import numpy as np\n'), ((10736, 10752), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['z'], {}), '(z)\n', (10749, 10752), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((10754, 10770), 'numpy.array', 'np.array', (['[10.0]'], {}), '([10.0])\n', (10762, 10770), True, 'import numpy as np\n'), ((10802, 10818), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['y'], {}), '(y)\n', (10815, 10818), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((10820, 10832), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (10828, 10832), True, 'import numpy as np\n'), ((10852, 10899), 'numpy.array', 'np.array', (['[[0.0, 2.0], [1.0, 20.0], [0.0, 4.0]]'], {}), '([[0.0, 2.0], [1.0, 20.0], [0.0, 4.0]])\n', (10860, 10899), True, 'import numpy as np\n'), ((10956, 10972), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['z'], {}), '(z)\n', (10969, 10972), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((10974, 10996), 'numpy.array', 'np.array', (['[10.0, 20.0]'], {}), '([10.0, 20.0])\n', (10982, 10996), True, 'import numpy as np\n'), ((11028, 11044), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['y'], {}), '(y)\n', (11041, 11044), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((11046, 11066), 'numpy.array', 'np.array', (['[2.0, 4.0]'], {}), '([2.0, 4.0])\n', (11054, 11066), True, 'import numpy as np\n'), ((11485, 11501), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['y'], {}), '(y)\n', (11498, 11501), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((11552, 11568), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['z'], {}), '(z)\n', (11565, 11568), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((11668, 11684), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['y'], {}), '(y)\n', (11681, 11684), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((11735, 11751), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['z'], {}), '(z)\n', (11748, 11751), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((12282, 12298), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['y'], {}), '(y)\n', (12295, 12298), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((12327, 12343), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['z'], {}), '(z)\n', (12340, 12343), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((12694, 12710), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['y'], {}), '(y)\n', (12707, 12710), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((12743, 12759), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['z'], {}), '(z)\n', (12756, 12759), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((13165, 13181), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['y'], {}), '(y)\n', (13178, 13181), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((13210, 13226), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['z'], {}), '(z)\n', (13223, 13226), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((13583, 13599), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['y'], {}), '(y)\n', (13596, 13599), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((13632, 13648), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['z'], {}), '(z)\n', (13645, 13648), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((11519, 11535), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['x'], {}), '(x)\n', (11532, 11535), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((11587, 11603), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['x'], {}), '(x)\n', (11600, 11603), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((11702, 11718), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['x'], {}), '(x)\n', (11715, 11718), False, 'from IoTPy.helper_functions.recent_values import recent_values\n'), ((11770, 11786), 'IoTPy.helper_functions.recent_values.recent_values', 'recent_values', (['x'], {}), '(x)\n', (11783, 11786), False, 'from IoTPy.helper_functions.recent_values import recent_values\n')] |
""" These functions extract the MFCC audio features of each trimmed wav file, and returns the
medians and interquartile ranges of each of the 50 coefficients, plus the trimmed audio length. """
import librosa
import librosa.display
import scipy.stats
import numpy as np
import pandas as pd
def mfccs_features(file_name):
""" Extract a 50 MFCC feature time-series for the audio and return the median, interquartile
range, and duration. Return a series of 0's for all features when the trimmed audio is 0. """
file_name_base = file_name[:-4]
# try and except are used to handle when the trim.wav files have been cut to 0.
try:
# Use "_trim.wav" instead of ".wav" for when you want to use the trimmed audio files.
audio_series, sample_rate = librosa.load(file_name_base+"_trim.wav")
duration = librosa.get_duration(y=audio_series, sr=sample_rate)
mfccs = librosa.feature.mfcc(y=audio_series, sr=sample_rate, n_mfcc=50)
c_iqr = list(scipy.stats.iqr(mfccs, axis=1))
c_med = list(np.median(mfccs, axis=1))
except:
c_iqr = [0] * 50
c_med = [0] * 50
duration = 0
return [file_name_base+"_trim.wav"]+c_iqr+c_med+[str(duration)]
def features_writer(file_name):
""" Return the feature list converted into a dataframe with labeled columns. """
feature_list = mfccs_features(file_name)
return pd.DataFrame([feature_list], columns=['Filename', 'IQR1', 'IQR2', 'IQR3', 'IQR4',\
'IQR5', 'IQR6', 'IQR7', 'IQR8', 'IQR9', 'IQR10', 'IQR11', 'IQR12', 'IQR13', 'IQR14',\
'IQR15', 'IQR16', 'IQR17', 'IQR18', 'IQR19', 'IQR20', 'IQR21', 'IQR22', 'IQR23', 'IQR24',\
'IQR25', 'IQR26', 'IQR27', 'IQR28', 'IQR29', 'IQR30', 'IQR31', 'IQR32', 'IQR33', 'IQR34',\
'IQR35', 'IQR36', 'IQR37', 'IQR38', 'IQR39', 'IQR40', 'IQR41', 'IQR42', 'IQR43', 'IQR44',\
'IQR45', 'IQR46', 'IQR47', 'IQR48', 'IQR49', 'IQR50', 'MED1', 'MED2', 'MED3', 'MED4',\
'MED5', 'MED6', 'MED7', 'MED8', 'MED9', 'MED10', 'MED11', 'MED12', 'MED13', 'MED14',\
'MED15', 'MED16', 'MED17', 'MED18', 'MED19', 'MED20', 'MED21', 'MED22', 'MED23', 'MED24',\
'MED25', 'MED26', 'MED27', 'MED28', 'MED29', 'MED30', 'MED31', 'MED32', 'MED33', 'MED34',\
'MED35', 'MED36', 'MED37', 'MED38', 'MED39', 'MED40', 'MED41', 'MED42', 'MED43', 'MED44',\
'MED45', 'MED46', 'MED47', 'MED48', 'MED49', 'MED50', 'Duration'])
| [
"numpy.median",
"librosa.feature.mfcc",
"librosa.get_duration",
"pandas.DataFrame",
"librosa.load"
] | [((1385, 2381), 'pandas.DataFrame', 'pd.DataFrame', (['[feature_list]'], {'columns': "['Filename', 'IQR1', 'IQR2', 'IQR3', 'IQR4', 'IQR5', 'IQR6', 'IQR7', 'IQR8',\n 'IQR9', 'IQR10', 'IQR11', 'IQR12', 'IQR13', 'IQR14', 'IQR15', 'IQR16',\n 'IQR17', 'IQR18', 'IQR19', 'IQR20', 'IQR21', 'IQR22', 'IQR23', 'IQR24',\n 'IQR25', 'IQR26', 'IQR27', 'IQR28', 'IQR29', 'IQR30', 'IQR31', 'IQR32',\n 'IQR33', 'IQR34', 'IQR35', 'IQR36', 'IQR37', 'IQR38', 'IQR39', 'IQR40',\n 'IQR41', 'IQR42', 'IQR43', 'IQR44', 'IQR45', 'IQR46', 'IQR47', 'IQR48',\n 'IQR49', 'IQR50', 'MED1', 'MED2', 'MED3', 'MED4', 'MED5', 'MED6',\n 'MED7', 'MED8', 'MED9', 'MED10', 'MED11', 'MED12', 'MED13', 'MED14',\n 'MED15', 'MED16', 'MED17', 'MED18', 'MED19', 'MED20', 'MED21', 'MED22',\n 'MED23', 'MED24', 'MED25', 'MED26', 'MED27', 'MED28', 'MED29', 'MED30',\n 'MED31', 'MED32', 'MED33', 'MED34', 'MED35', 'MED36', 'MED37', 'MED38',\n 'MED39', 'MED40', 'MED41', 'MED42', 'MED43', 'MED44', 'MED45', 'MED46',\n 'MED47', 'MED48', 'MED49', 'MED50', 'Duration']"}), "([feature_list], columns=['Filename', 'IQR1', 'IQR2', 'IQR3',\n 'IQR4', 'IQR5', 'IQR6', 'IQR7', 'IQR8', 'IQR9', 'IQR10', 'IQR11',\n 'IQR12', 'IQR13', 'IQR14', 'IQR15', 'IQR16', 'IQR17', 'IQR18', 'IQR19',\n 'IQR20', 'IQR21', 'IQR22', 'IQR23', 'IQR24', 'IQR25', 'IQR26', 'IQR27',\n 'IQR28', 'IQR29', 'IQR30', 'IQR31', 'IQR32', 'IQR33', 'IQR34', 'IQR35',\n 'IQR36', 'IQR37', 'IQR38', 'IQR39', 'IQR40', 'IQR41', 'IQR42', 'IQR43',\n 'IQR44', 'IQR45', 'IQR46', 'IQR47', 'IQR48', 'IQR49', 'IQR50', 'MED1',\n 'MED2', 'MED3', 'MED4', 'MED5', 'MED6', 'MED7', 'MED8', 'MED9', 'MED10',\n 'MED11', 'MED12', 'MED13', 'MED14', 'MED15', 'MED16', 'MED17', 'MED18',\n 'MED19', 'MED20', 'MED21', 'MED22', 'MED23', 'MED24', 'MED25', 'MED26',\n 'MED27', 'MED28', 'MED29', 'MED30', 'MED31', 'MED32', 'MED33', 'MED34',\n 'MED35', 'MED36', 'MED37', 'MED38', 'MED39', 'MED40', 'MED41', 'MED42',\n 'MED43', 'MED44', 'MED45', 'MED46', 'MED47', 'MED48', 'MED49', 'MED50',\n 'Duration'])\n", (1397, 2381), True, 'import pandas as pd\n'), ((767, 809), 'librosa.load', 'librosa.load', (["(file_name_base + '_trim.wav')"], {}), "(file_name_base + '_trim.wav')\n", (779, 809), False, 'import librosa\n'), ((827, 879), 'librosa.get_duration', 'librosa.get_duration', ([], {'y': 'audio_series', 'sr': 'sample_rate'}), '(y=audio_series, sr=sample_rate)\n', (847, 879), False, 'import librosa\n'), ((896, 959), 'librosa.feature.mfcc', 'librosa.feature.mfcc', ([], {'y': 'audio_series', 'sr': 'sample_rate', 'n_mfcc': '(50)'}), '(y=audio_series, sr=sample_rate, n_mfcc=50)\n', (916, 959), False, 'import librosa\n'), ((1034, 1058), 'numpy.median', 'np.median', (['mfccs'], {'axis': '(1)'}), '(mfccs, axis=1)\n', (1043, 1058), True, 'import numpy as np\n')] |
import numpy as np
import scipy as sp
class LapinTransformer:
def __init__(self, sim):
W = sim - np.diag(sim.diagonal())
D = np.diag(W.sum(axis=1))
L = D - W
inv_sqrt_D = np.linalg.inv(sp.linalg.sqrtm(D))
Ln = inv_sqrt_D.dot(L).dot(inv_sqrt_D)
w, v = np.linalg.eig(Ln)
indices = ~np.isclose(w, 0)
v_ = v[:, indices]
w_ = np.diag(w[indices])
Lp = v_.dot(np.linalg.inv(w_)).dot(v_.T)
self.W = W
self.D = D
self.L = L
self.Ln = Ln
self.Lp = Lp
| [
"scipy.linalg.sqrtm",
"numpy.isclose",
"numpy.linalg.eig",
"numpy.diag",
"numpy.linalg.inv"
] | [((309, 326), 'numpy.linalg.eig', 'np.linalg.eig', (['Ln'], {}), '(Ln)\n', (322, 326), True, 'import numpy as np\n'), ((404, 423), 'numpy.diag', 'np.diag', (['w[indices]'], {}), '(w[indices])\n', (411, 423), True, 'import numpy as np\n'), ((226, 244), 'scipy.linalg.sqrtm', 'sp.linalg.sqrtm', (['D'], {}), '(D)\n', (241, 244), True, 'import scipy as sp\n'), ((346, 362), 'numpy.isclose', 'np.isclose', (['w', '(0)'], {}), '(w, 0)\n', (356, 362), True, 'import numpy as np\n'), ((445, 462), 'numpy.linalg.inv', 'np.linalg.inv', (['w_'], {}), '(w_)\n', (458, 462), True, 'import numpy as np\n')] |
import errno
from functools import partial
import glob
import os
import uuid
import numpy as np
import pathlib2 as pathlib
import renderapi
from asap.utilities.pillow_utils import Image
from asap.materialize.render_downsample_sections import (
RenderSectionAtScale)
from asap.dataimport.schemas import (
MakeMontageScapeSectionStackParameters, MakeMontageScapeSectionStackOutput)
from asap.module.render_module import (
StackOutputModule, RenderModuleException)
example = {
"render": {
"host": "http://em-131fs",
"port": 8080,
"owner": "gayathri",
"project": "Tests",
"client_scripts": "/allen/programs/celltypes/workgroups/em-connectomics/gayathrim/nc-em2/Janelia_Pipeline/render_latest/render-ws-java-client/src/main/scripts"
},
"montage_stack": "rough_test_montage_stack",
"output_stack": "rough_test_downsample_montage_stack",
"image_directory": "/allen/programs/celltypes/workgroups/em-connectomics/gayathrim/scratch",
"set_new_z": "False",
"new_z_start": 1020,
"remap_section_ids": False,
"imgformat": "png",
"scale": 0.1,
"apply_scale": "False",
"zstart": 1020,
"zend": 1022,
"pool_size": 20
}
def create_montage_scape_tile_specs(render, input_stack, image_directory,
scale, project, tagstr, imgformat,
Z, apply_scale=False, uuid_prefix=True,
uuid_prefix_length=10,
**kwargs):
z = Z[0]
newz = Z[1]
# create the full path to the images
# directory structure as per Render's RenderSectionClient output
[q, r] = divmod(z, 1000)
s = int(r / 100)
filename = os.path.join(image_directory,
project,
input_stack,
'sections_at_%s' % str(scale),
'%03d' % q,
'%d' % s,
'%s.0.%s' % (str(z), imgformat))
# This is really a slow way of generating the downsample sections
# need to submit the job in a cluster
if not os.path.isfile(filename):
print("Montage scape does not exist for %d. Creating one now..." % z)
tempstack = RenderSectionAtScale.downsample_specific_mipmapLevel(
[z], input_stack, image_directory=image_directory,
scale=scale, render=render, imgformat=imgformat, **kwargs)
filename = os.path.join(image_directory,
project,
tempstack,
'sections_at_%s' % str(scale),
'%03d' % q,
'%d' % s,
'%s.0.%s' % (str(z), imgformat))
# generate tilespec for this z
tilespecs = render.run(renderapi.tilespec.get_tile_specs_from_z,
input_stack,
z)
# generate tilespec for downsampled montage
# tileId is the first tileId from source z
t = tilespecs[0]
if uuid_prefix:
t.tileId = "ds{uid}_{tId}".format(
uid=uuid.uuid4().hex[:uuid_prefix_length],
tId=t.tileId)
with Image.open(filename) as im:
t.width, t.height = im.size
t.ip[0] = renderapi.image_pyramid.MipMap(
imageUrl=pathlib.Path(filename).as_uri())
[t.ip.pop(k) for k in list(t.ip.keys()) if k != '0']
t.minIntensity = 0
t.maxIntensity = 255
t.z = newz
t.layout.sectionId = "%s.0" % str(int(newz))
if apply_scale:
t.tforms = [renderapi.transform.AffineModel(
M00=(1./scale), M11=(1./scale))]
else:
t.tforms = [renderapi.transform.AffineModel(
M00=(1.), M11=(1.))]
allts = [t]
tilespecfilename = os.path.join(image_directory,
project,
input_stack,
'sections_at_%s' % str(scale),
'tilespecs_%s' % tagstr,
'tilespec_%04d.json' % z)
with open(tilespecfilename, 'w') as fp:
renderapi.utils.renderdump(allts, fp, indent=4)
class MakeMontageScapeSectionStack(StackOutputModule):
default_schema = MakeMontageScapeSectionStackParameters
default_output_schema = MakeMontageScapeSectionStackOutput
def run(self):
self.logger.debug('Montage scape stack generation module')
# get the list of z indices
zvalues = self.render.run(
renderapi.stack.get_z_values_for_stack,
self.args['montage_stack'])
zvalues1 = self.zValues
zvalues = list(set(zvalues1).intersection(set(zvalues)))
if not zvalues:
raise RenderModuleException(
'No sections found for stack {}'.format(
self.args['montage_stack']))
# generate tuple of old and new Zs
# setting a new z range does not check whether the range
# overlaps with existing sections/chunks in the output stack
if self.args['set_new_z']:
zvalues = list(np.sort(np.array(zvalues)))
diffarray = [x-zvalues[0] for x in zvalues]
newzvalues = [self.args['new_z_start'] + x for x in diffarray]
else:
newzvalues = zvalues
Z = [[int(oldz), int(newz)] for oldz, newz in zip(zvalues, newzvalues)]
out_stack_exists = self.args['output_stack'] in self.render.run(
renderapi.render.get_stacks_by_owner_project)
if out_stack_exists:
# check whether overwrite z is set to false. If so, then remove those z that is already in output stack
outzvalues = renderapi.stack.get_z_values_for_stack(
self.args['output_stack'],
render=self.render)
if self.overwrite_zlayer:
# stack has to be in loading state
renderapi.stack.set_stack_state(self.args['output_stack'],
'LOADING',
render=self.render)
for oldz, newz in zip(zvalues, newzvalues):
# delete the section from output stack
renderapi.stack.delete_section(self.args['output_stack'],
newz,
render=self.render)
# generate the tag string to add to output tilespec json file name
tagstr = "%s_%s" % (min(zvalues), max(zvalues))
tilespecdir = os.path.join(self.args['image_directory'],
self.args['render']['project'],
self.args['montage_stack'],
'sections_at_%s'%str(self.args['scale']),
'tilespecs_%s'%tagstr)
try:
os.makedirs(tilespecdir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
pass
render_materialize = renderapi.connect(
**self.render.make_kwargs(memGB=self.args['memGB_materialize']))
# process for each z
mypartial = partial(
create_montage_scape_tile_specs,
render_materialize,
self.args['montage_stack'],
self.args['image_directory'],
self.args['scale'],
self.args['render']['project'],
tagstr,
self.args['imgformat'],
apply_scale=self.args['apply_scale'],
level=self.args['level'],
pool_size=1,
doFilter=self.args['doFilter'],
fillWithNoise=self.args['fillWithNoise'],
uuid_prefix=self.args["uuid_prefix"],
uuid_prefix_length=self.args["uuid_length"],
do_mp=False)
with renderapi.client.WithPool(
self.args['pool_size_materialize']) as pool:
pool.map(mypartial, Z)
# get all the output tilespec json files
tspath = os.path.join(self.args['image_directory'],
self.args['render']['project'],
self.args['montage_stack'],
'sections_at_%s' % str(self.args['scale']),
'tilespecs_%s' % tagstr)
jsonfiles = glob.glob("%s/*.json" % tspath)
if not jsonfiles:
raise RenderModuleException('No tilespecs json files were generated')
# create the stack if it doesn't exist
if self.output_stack not in self.render.run(
renderapi.render.get_stacks_by_owner_project):
# stack does not exist
# TODO configurable stack metadata
self.render.run(renderapi.stack.create_stack,
self.output_stack,
cycleNumber=5,
cycleStepNumber=1,
stackResolutionX=1,
stackResolutionY=1)
# import tilespecs to render
self.render.run(renderapi.client.import_jsonfiles_parallel,
self.output_stack,
jsonfiles)
if self.close_stack:
# set stack state to complete
self.render.run(renderapi.stack.set_stack_state,
self.output_stack,
state='COMPLETE')
self.output({'output_stack': self.output_stack})
if __name__ == "__main__":
mod = MakeMontageScapeSectionStack()
mod.run()
| [
"renderapi.stack.delete_section",
"renderapi.transform.AffineModel",
"renderapi.utils.renderdump",
"os.makedirs",
"uuid.uuid4",
"os.path.isfile",
"pathlib2.Path",
"renderapi.stack.get_z_values_for_stack",
"numpy.array",
"functools.partial",
"glob.glob",
"renderapi.stack.set_stack_state",
"as... | [((2177, 2201), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (2191, 2201), False, 'import os\n'), ((2301, 2472), 'asap.materialize.render_downsample_sections.RenderSectionAtScale.downsample_specific_mipmapLevel', 'RenderSectionAtScale.downsample_specific_mipmapLevel', (['[z]', 'input_stack'], {'image_directory': 'image_directory', 'scale': 'scale', 'render': 'render', 'imgformat': 'imgformat'}), '([z], input_stack,\n image_directory=image_directory, scale=scale, render=render, imgformat=\n imgformat, **kwargs)\n', (2353, 2472), False, 'from asap.materialize.render_downsample_sections import RenderSectionAtScale\n'), ((3283, 3303), 'asap.utilities.pillow_utils.Image.open', 'Image.open', (['filename'], {}), '(filename)\n', (3293, 3303), False, 'from asap.utilities.pillow_utils import Image\n'), ((4234, 4281), 'renderapi.utils.renderdump', 'renderapi.utils.renderdump', (['allts', 'fp'], {'indent': '(4)'}), '(allts, fp, indent=4)\n', (4260, 4281), False, 'import renderapi\n'), ((7371, 7846), 'functools.partial', 'partial', (['create_montage_scape_tile_specs', 'render_materialize', "self.args['montage_stack']", "self.args['image_directory']", "self.args['scale']", "self.args['render']['project']", 'tagstr', "self.args['imgformat']"], {'apply_scale': "self.args['apply_scale']", 'level': "self.args['level']", 'pool_size': '(1)', 'doFilter': "self.args['doFilter']", 'fillWithNoise': "self.args['fillWithNoise']", 'uuid_prefix': "self.args['uuid_prefix']", 'uuid_prefix_length': "self.args['uuid_length']", 'do_mp': '(False)'}), "(create_montage_scape_tile_specs, render_materialize, self.args[\n 'montage_stack'], self.args['image_directory'], self.args['scale'],\n self.args['render']['project'], tagstr, self.args['imgformat'],\n apply_scale=self.args['apply_scale'], level=self.args['level'],\n pool_size=1, doFilter=self.args['doFilter'], fillWithNoise=self.args[\n 'fillWithNoise'], uuid_prefix=self.args['uuid_prefix'],\n uuid_prefix_length=self.args['uuid_length'], do_mp=False)\n", (7378, 7846), False, 'from functools import partial\n'), ((8531, 8562), 'glob.glob', 'glob.glob', (["('%s/*.json' % tspath)"], {}), "('%s/*.json' % tspath)\n", (8540, 8562), False, 'import glob\n'), ((3652, 3717), 'renderapi.transform.AffineModel', 'renderapi.transform.AffineModel', ([], {'M00': '(1.0 / scale)', 'M11': '(1.0 / scale)'}), '(M00=1.0 / scale, M11=1.0 / scale)\n', (3683, 3717), False, 'import renderapi\n'), ((3760, 3809), 'renderapi.transform.AffineModel', 'renderapi.transform.AffineModel', ([], {'M00': '(1.0)', 'M11': '(1.0)'}), '(M00=1.0, M11=1.0)\n', (3791, 3809), False, 'import renderapi\n'), ((5814, 5904), 'renderapi.stack.get_z_values_for_stack', 'renderapi.stack.get_z_values_for_stack', (["self.args['output_stack']"], {'render': 'self.render'}), "(self.args['output_stack'], render=\n self.render)\n", (5852, 5904), False, 'import renderapi\n'), ((7063, 7087), 'os.makedirs', 'os.makedirs', (['tilespecdir'], {}), '(tilespecdir)\n', (7074, 7087), False, 'import os\n'), ((8028, 8089), 'renderapi.client.WithPool', 'renderapi.client.WithPool', (["self.args['pool_size_materialize']"], {}), "(self.args['pool_size_materialize'])\n", (8053, 8089), False, 'import renderapi\n'), ((8608, 8671), 'asap.module.render_module.RenderModuleException', 'RenderModuleException', (['"""No tilespecs json files were generated"""'], {}), "('No tilespecs json files were generated')\n", (8629, 8671), False, 'from asap.module.render_module import StackOutputModule, RenderModuleException\n'), ((6063, 6156), 'renderapi.stack.set_stack_state', 'renderapi.stack.set_stack_state', (["self.args['output_stack']", '"""LOADING"""'], {'render': 'self.render'}), "(self.args['output_stack'], 'LOADING',\n render=self.render)\n", (6094, 6156), False, 'import renderapi\n'), ((3410, 3432), 'pathlib2.Path', 'pathlib.Path', (['filename'], {}), '(filename)\n', (3422, 3432), True, 'import pathlib2 as pathlib\n'), ((5232, 5249), 'numpy.array', 'np.array', (['zvalues'], {}), '(zvalues)\n', (5240, 5249), True, 'import numpy as np\n'), ((6388, 6476), 'renderapi.stack.delete_section', 'renderapi.stack.delete_section', (["self.args['output_stack']", 'newz'], {'render': 'self.render'}), "(self.args['output_stack'], newz, render=self\n .render)\n", (6418, 6476), False, 'import renderapi\n'), ((3208, 3220), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3218, 3220), False, 'import uuid\n')] |
import numpy as np
np.random.seed(1)
DATA_PREFIX = '/hdd/data/iwslt18/open-subtitles18/data.raw/OpenSubtitles2018.en-eu'
OUT_DIR = "/hdd/data/iwslt18/open-subtitles18/data.raw"
DEV_NUM = 4000
TEST_NUM = 1000
eu_file = open((DATA_PREFIX+".eu"), "r")
en_file = open((DATA_PREFIX+".en"), "r")
eu_list = eu_file.readlines()
en_list = en_file.readlines()
total_num = len(eu_list)
dev_test_indices = np.random.choice(total_num, DEV_NUM+TEST_NUM, replace=False)
dev_indexes = dev_test_indices[:DEV_NUM]
test_indexes = dev_test_indices[DEV_NUM:]
# write file
train_eu = open((OUT_DIR+"/train.eu"), "w")
train_en = open((OUT_DIR+"/train.en"), "w")
dev_eu = open((OUT_DIR+"/dev.eu"), "w")
dev_en = open((OUT_DIR+"/dev.en"), "w")
test_eu = open((OUT_DIR+"/test.eu"), "w")
test_en = open((OUT_DIR+"/test.en"), "w")
for i, (eu_line, en_line) in enumerate(zip(eu_list, en_list)):
if i in dev_indexes:
# dev
dev_eu.write(eu_line)
dev_en.write(en_line)
elif i in test_indexes:
# test
test_eu.write(eu_line)
test_en.write(en_line)
else:
# train
train_eu.write(eu_line)
train_en.write(en_line)
train_eu.close()
train_en.close()
dev_eu.close()
dev_en.close()
test_eu.close()
test_en.close()
eu_file.close()
en_file.close()
| [
"numpy.random.choice",
"numpy.random.seed"
] | [((20, 37), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (34, 37), True, 'import numpy as np\n'), ((413, 475), 'numpy.random.choice', 'np.random.choice', (['total_num', '(DEV_NUM + TEST_NUM)'], {'replace': '(False)'}), '(total_num, DEV_NUM + TEST_NUM, replace=False)\n', (429, 475), True, 'import numpy as np\n')] |
'''
Train the double-prong model on t_1 mode
Input data: evidential grids produced from subsampling the waymo dataset for moving objects
'''
import importlib
import os
import tensorflow as tf
import random as rn
import matplotlib.pyplot as plt
import hickle as hkl
import numpy as np
import pdb
import argparse
import math
import datetime
import pytz
from keras import backend as K
from keras.models import Model, model_from_json
from keras.layers import Input, Dense, Flatten, Lambda, Concatenate, add
from keras.layers import LSTM
from keras.layers import TimeDistributed
from keras.callbacks import LearningRateScheduler, ModelCheckpoint, TensorBoard
from alt_model_checkpoint.keras import AltModelCheckpoint
from keras.optimizers import Adam
from keras.utils import multi_gpu_model
from data_utils import SequenceGenerator
np.random.seed(123)
rn.seed(123)
tf.set_random_seed(123)
def weighted_loss(y_true, y_pred):
weight_factor = 10
y_pred_static = y_pred[:, 0]
y_pred_dynamic = y_pred[:, 1]
loss_static = mean_abs_error(y_true, y_pred_static)
loss_dynamic = mean_abs_error(y_true, y_pred_dynamic)
loss_final = loss_static + weight_factor*loss_dynamic
return loss_final
def mean_abs_error(y_true, y_pred):
return K.mean(K.abs(y_pred - y_true))
def separate_input_masks(tensors):
# Check input shape but should be (?, nt, 128, 128, 3)
dynamic_mask = tf.expand_dims(tensors[:,:,:,:,-1], axis=-1)
out_dynamic = tf.multiply(tensors[:,:,:,:,0:2], dynamic_mask)
out_static = tf.multiply(tensors[:,:,:,:,0:2], 1-dynamic_mask)
return [out_static, out_dynamic]
# Custom Metrics: return a single tensor value
def err_loss_static(y_true, y_pred):
loss_static = mean_abs_error(y_true, y_pred[:, 0])
return loss_static
def err_loss_dynamic(y_true, y_pred):
loss_dynamic = mean_abs_error(y_true, y_pred[:, 1])
return loss_dynamic
def get_gradient_norm(model):
with K.name_scope('gradient_norm'):
grads = K.gradients(model.total_loss, model.trainable_weights)
norm = K.sqrt(sum([K.sum(K.square(g)) for g in grads]))
return norm
def get_weight_norm(model):
with K.name_scope('w_norm'):
weights = model.trainable_weights
w_norm = K.sqrt(sum([K.sum(K.square(w)) for w in weights]))
return w_norm
ap = argparse.ArgumentParser()
ap.add_argument("-o", "--output", required=True, help="output folder name")
ap.add_argument("-g", "--gpus", type=int, default=1, help="# of GPUs to use for training")
ap.add_argument("-ps", "--predfilestatic", required=True, help="prednet or prednet_dilation")
ap.add_argument("-pd", "--predfiledynamic", required=True, help="prednet or prednet_dilation")
args = vars(ap.parse_args())
prednet_module_static = importlib.import_module(args["predfilestatic"])
PredNet_static = prednet_module_static.PredNet
prednet_module_dynamic = importlib.import_module(args["predfiledynamic"])
PredNet_dynamic = prednet_module_dynamic.PredNet
G = args["gpus"]
# User Inputs
master_data_folder = "../../DATA_evidential_grid_splits/double_prong"
master_save_folder = os.path.join("../../DATA_predictions/double_prong/t_1_mode", args["output"])
if not os.path.exists(master_save_folder):
os.makedirs(master_save_folder)
save_weights_path = os.path.join(master_save_folder, "weights_t_1.hdf5")
save_json_path = os.path.join(master_save_folder, "model_t_1.json")
save_model = True
# Load train/val splits
train_path = os.path.join(master_data_folder, "X_train.hkl")
train_source_path = os.path.join(master_data_folder, "sources_train.hkl")
val_path = os.path.join(master_data_folder, "X_val.hkl")
val_source_path = os.path.join(master_data_folder, "sources_val.hkl")
# Training parameters
num_tsteps = 20
num_epoch = 2 #60
batch_size = 16
samples_per_epoch = 5 #2000
num_seq_val = 780
K.set_learning_phase(1) # set the learning phase
# Model parameters
n_channels_prong, im_height, im_width = (2, 128, 128)
n_channels_input = 3
# K.image_data_format = "channels_last"
if K.image_data_format() == "channels_first":
input_shape = (n_channels_input, im_height, im_width)
else:
input_shape = (im_height, im_width, n_channels_input) # we fall under this case
inputs = Input(shape = (num_tsteps,) + input_shape) # shape of the input is (nt, 128, 128, 3)
input_sep_layer = Lambda(separate_input_masks, trainable=False)
inputs_static, inputs_dynamic = input_sep_layer(inputs)
# Static Prong: 3 layers
stack_sizes_static = (n_channels_prong, 48, 96)
R_stack_sizes_static = stack_sizes_static
A_filt_sizes_static = (3, 3)
Ahat_filt_sizes_static = (3, 3, 3)
R_filt_sizes_static = (3, 3, 3)
prednet_base_static = PredNet_static(stack_sizes_static, R_stack_sizes_static, A_filt_sizes_static, Ahat_filt_sizes_static, R_filt_sizes_static, output_mode = "error", return_sequences = True)
layer_config_base_static = prednet_base_static.get_config()
layer_config_base_static["name"] = "prednet_static"
prednet_static = PredNet_static(**layer_config_base_static)
# Dynamic Prong: 2 layers
stack_sizes = (n_channels_prong, 48)
R_stack_sizes = stack_sizes
A_filt_sizes = (3,)
Ahat_filt_sizes = (3, 3)
R_filt_sizes = (3, 3)
layer_loss_weights_static = np.array([1., 0., 0.])
layer_loss_weights_static = np.expand_dims(layer_loss_weights_static, 1)
time_loss_weights_static = 1./ (num_tsteps - 1) * np.ones((num_tsteps, 1))
time_loss_weights_static[0] = 0.
layer_loss_weights_dynamic = np.array([1., 0.]) # weighting for each layer in final loss; "L_0" model: [1, 0, 0, 0], "L_all": [1, 0.1, 0.1, 0.1]
layer_loss_weights_dynamic = np.expand_dims(layer_loss_weights_dynamic, 1)
time_loss_weights = 1./ (num_tsteps - 1) * np.ones((num_tsteps, 1)) # equally weigh all timesteps except the first
time_loss_weights[0] = 0.
prednet_base_dynamic = PredNet_dynamic(stack_sizes, R_stack_sizes, A_filt_sizes, Ahat_filt_sizes, R_filt_sizes, output_mode = "error", return_sequences = True)
layer_config_base = prednet_base_dynamic.get_config()
layer_config_base["name"] = "prednet_dynamic"
prednet_dynamic = PredNet_dynamic(**layer_config_base)
errors_static = prednet_static(inputs_static) # errors will be (batch_size, nt, nb_layers)
errors_dynamic = prednet_dynamic(inputs_dynamic)
# Error_static
errors_by_time_static = TimeDistributed(Dense(1, trainable = False), weights = [layer_loss_weights_static, np.zeros(1)], trainable=False)(errors_static)
errors_by_time_static = Flatten()(errors_by_time_static) # will be (batch_size, nt)
final_errors_static = Dense(1, weights = [time_loss_weights_static, np.zeros(1)], trainable = False)(errors_by_time_static) # weight errors by time
# Error_dynamic
errors_by_time_dynamic = TimeDistributed(Dense(1, trainable = False), weights = [layer_loss_weights_dynamic, np.zeros(1)], trainable=False)(errors_dynamic)
errors_by_time_dynamic = Flatten()(errors_by_time_dynamic) # will be (batch_size, nt)
final_errors_dynamic = Dense(1, weights = [time_loss_weights, np.zeros(1)], trainable = False)(errors_by_time_dynamic)
errors = Concatenate()([final_errors_static, final_errors_dynamic])
with tf.device('/cpu:0'):
model = Model(inputs = inputs, outputs = errors)
model.compile(loss=weighted_loss, optimizer="adam", metrics=[err_loss_static, err_loss_dynamic])
model.summary()
print("%\n%\n%\n%\n%\n%\n%\n%")
print("\n======== Confirming PredNet class ========\n")
print("prednet_static:", PredNet_static)
print("\nprednet_dynamic:", PredNet_dynamic)
print("%\n%\n%\n%\n%\n%\n%\n%")
# Replicate the model on G GPUs
parallel_model = multi_gpu_model(model, gpus=G)
parallel_model.compile(loss=weighted_loss, optimizer = "adam", metrics=[err_loss_static, err_loss_dynamic])
train_generator = SequenceGenerator(train_path, train_source_path, num_tsteps, batch_size=batch_size, shuffle=True)
val_generator = SequenceGenerator(val_path, val_source_path, num_tsteps, batch_size=batch_size, N_seq=num_seq_val)
print("Shapes: ", train_generator.X.shape, val_generator.X.shape)
print("train generator", np.amax(train_generator.X), np.amin(train_generator.X))
lr_schedule = lambda epoch: 0.0001 if epoch < 75 else 0.0001
lr_callback = LearningRateScheduler(lr_schedule)
if save_model:
weight_callback = AltModelCheckpoint(save_weights_path, model, monitor='val_loss', save_best_only=True)
# Append the "l2 norm of gradients" tensor as a metric sequences for input
parallel_model.metrics_names.append("gradient_norm")
parallel_model.metrics_tensors.append(get_gradient_norm(parallel_model))
parallel_model.metrics_names.append("w_norm")
parallel_model.metrics_tensors.append(get_weight_norm(parallel_model))
# tensorboard
tz = pytz.timezone('America/Los_Angeles')
log_dir_folder = "../../DATA_predictions/double_prong/logs/t_1_mode/"
log_dir = log_dir_folder + args['output'] + "_" + datetime.datetime.now().astimezone(tz).strftime("%Y%m%d-%H:%M")
tensorboard_callback = TensorBoard(log_dir = log_dir)
history = parallel_model.fit_generator(train_generator, int(math.ceil(samples_per_epoch / batch_size)), num_epoch, callbacks=[tensorboard_callback, weight_callback, lr_callback], validation_data=val_generator, validation_steps=int(math.ceil(num_seq_val / batch_size)), verbose=1, use_multiprocessing=True, workers=12)
# summarize history for loss
print(history.history.keys())
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
plt.savefig(os.path.join(master_save_folder, "loss_t_1.png"), dpi = 300)
fig2 = plt.figure()
plt.plot(history.history['gradient_norm'])
plt.plot(history.history['val_gradient_norm'])
plt.title('gradient norm')
plt.ylabel('grad_norm')
plt.xlabel('epoch')
plt.legend(['train_g','val_g'], loc='upper left')
plt.show()
plt.savefig(os.path.join(master_save_folder, "gradient_t_1.png"), dpi=300)
fig3 = plt.figure()
plt.plot(history.history['w_norm'])
plt.plot(history.history['val_w_norm'])
plt.title('weight norm')
plt.ylabel('w_norm')
plt.xlabel('epoch')
plt.legend(['train_w', 'val_w'], loc='upper left')
plt.show()
plt.savefig(os.path.join(master_save_folder, 'weight_t_1.png'), dpi=300)
# save history in a hickle file
hkl.dump(history.history, os.path.join(master_save_folder, "history_t_1.hkl"), mode='w')
if save_model:
json_string = model.to_json()
with open(save_json_path, "w") as f:
f.write(json_string)
| [
"matplotlib.pyplot.ylabel",
"tensorflow.multiply",
"keras.backend.gradients",
"numpy.array",
"keras.layers.Dense",
"tensorflow.set_random_seed",
"pytz.timezone",
"os.path.exists",
"keras.backend.image_data_format",
"argparse.ArgumentParser",
"matplotlib.pyplot.xlabel",
"keras.utils.multi_gpu_m... | [((834, 853), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (848, 853), True, 'import numpy as np\n'), ((854, 866), 'random.seed', 'rn.seed', (['(123)'], {}), '(123)\n', (861, 866), True, 'import random as rn\n'), ((867, 890), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(123)'], {}), '(123)\n', (885, 890), True, 'import tensorflow as tf\n'), ((2320, 2345), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2343, 2345), False, 'import argparse\n'), ((2756, 2803), 'importlib.import_module', 'importlib.import_module', (["args['predfilestatic']"], {}), "(args['predfilestatic'])\n", (2779, 2803), False, 'import importlib\n'), ((2877, 2925), 'importlib.import_module', 'importlib.import_module', (["args['predfiledynamic']"], {}), "(args['predfiledynamic'])\n", (2900, 2925), False, 'import importlib\n'), ((3099, 3175), 'os.path.join', 'os.path.join', (['"""../../DATA_predictions/double_prong/t_1_mode"""', "args['output']"], {}), "('../../DATA_predictions/double_prong/t_1_mode', args['output'])\n", (3111, 3175), False, 'import os\n'), ((3282, 3334), 'os.path.join', 'os.path.join', (['master_save_folder', '"""weights_t_1.hdf5"""'], {}), "(master_save_folder, 'weights_t_1.hdf5')\n", (3294, 3334), False, 'import os\n'), ((3352, 3402), 'os.path.join', 'os.path.join', (['master_save_folder', '"""model_t_1.json"""'], {}), "(master_save_folder, 'model_t_1.json')\n", (3364, 3402), False, 'import os\n'), ((3459, 3506), 'os.path.join', 'os.path.join', (['master_data_folder', '"""X_train.hkl"""'], {}), "(master_data_folder, 'X_train.hkl')\n", (3471, 3506), False, 'import os\n'), ((3527, 3580), 'os.path.join', 'os.path.join', (['master_data_folder', '"""sources_train.hkl"""'], {}), "(master_data_folder, 'sources_train.hkl')\n", (3539, 3580), False, 'import os\n'), ((3592, 3637), 'os.path.join', 'os.path.join', (['master_data_folder', '"""X_val.hkl"""'], {}), "(master_data_folder, 'X_val.hkl')\n", (3604, 3637), False, 'import os\n'), ((3656, 3707), 'os.path.join', 'os.path.join', (['master_data_folder', '"""sources_val.hkl"""'], {}), "(master_data_folder, 'sources_val.hkl')\n", (3668, 3707), False, 'import os\n'), ((3832, 3855), 'keras.backend.set_learning_phase', 'K.set_learning_phase', (['(1)'], {}), '(1)\n', (3852, 3855), True, 'from keras import backend as K\n'), ((4229, 4269), 'keras.layers.Input', 'Input', ([], {'shape': '((num_tsteps,) + input_shape)'}), '(shape=(num_tsteps,) + input_shape)\n', (4234, 4269), False, 'from keras.layers import Input, Dense, Flatten, Lambda, Concatenate, add\n'), ((4332, 4377), 'keras.layers.Lambda', 'Lambda', (['separate_input_masks'], {'trainable': '(False)'}), '(separate_input_masks, trainable=False)\n', (4338, 4377), False, 'from keras.layers import Input, Dense, Flatten, Lambda, Concatenate, add\n'), ((5200, 5225), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0])\n', (5208, 5225), True, 'import numpy as np\n'), ((5251, 5295), 'numpy.expand_dims', 'np.expand_dims', (['layer_loss_weights_static', '(1)'], {}), '(layer_loss_weights_static, 1)\n', (5265, 5295), True, 'import numpy as np\n'), ((5435, 5455), 'numpy.array', 'np.array', (['[1.0, 0.0]'], {}), '([1.0, 0.0])\n', (5443, 5455), True, 'import numpy as np\n'), ((5582, 5627), 'numpy.expand_dims', 'np.expand_dims', (['layer_loss_weights_dynamic', '(1)'], {}), '(layer_loss_weights_dynamic, 1)\n', (5596, 5627), True, 'import numpy as np\n'), ((7527, 7557), 'keras.utils.multi_gpu_model', 'multi_gpu_model', (['model'], {'gpus': 'G'}), '(model, gpus=G)\n', (7542, 7557), False, 'from keras.utils import multi_gpu_model\n'), ((7685, 7787), 'data_utils.SequenceGenerator', 'SequenceGenerator', (['train_path', 'train_source_path', 'num_tsteps'], {'batch_size': 'batch_size', 'shuffle': '(True)'}), '(train_path, train_source_path, num_tsteps, batch_size=\n batch_size, shuffle=True)\n', (7702, 7787), False, 'from data_utils import SequenceGenerator\n'), ((7799, 7902), 'data_utils.SequenceGenerator', 'SequenceGenerator', (['val_path', 'val_source_path', 'num_tsteps'], {'batch_size': 'batch_size', 'N_seq': 'num_seq_val'}), '(val_path, val_source_path, num_tsteps, batch_size=\n batch_size, N_seq=num_seq_val)\n', (7816, 7902), False, 'from data_utils import SequenceGenerator\n'), ((8123, 8157), 'keras.callbacks.LearningRateScheduler', 'LearningRateScheduler', (['lr_schedule'], {}), '(lr_schedule)\n', (8144, 8157), False, 'from keras.callbacks import LearningRateScheduler, ModelCheckpoint, TensorBoard\n'), ((8621, 8657), 'pytz.timezone', 'pytz.timezone', (['"""America/Los_Angeles"""'], {}), "('America/Los_Angeles')\n", (8634, 8657), False, 'import pytz\n'), ((8866, 8894), 'keras.callbacks.TensorBoard', 'TensorBoard', ([], {'log_dir': 'log_dir'}), '(log_dir=log_dir)\n', (8877, 8894), False, 'from keras.callbacks import LearningRateScheduler, ModelCheckpoint, TensorBoard\n'), ((9277, 9310), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['loss']"], {}), "(history.history['loss'])\n", (9285, 9310), True, 'import matplotlib.pyplot as plt\n'), ((9311, 9348), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_loss']"], {}), "(history.history['val_loss'])\n", (9319, 9348), True, 'import matplotlib.pyplot as plt\n'), ((9349, 9372), 'matplotlib.pyplot.title', 'plt.title', (['"""model loss"""'], {}), "('model loss')\n", (9358, 9372), True, 'import matplotlib.pyplot as plt\n'), ((9373, 9391), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (9383, 9391), True, 'import matplotlib.pyplot as plt\n'), ((9392, 9411), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (9402, 9411), True, 'import matplotlib.pyplot as plt\n'), ((9412, 9458), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'val']"], {'loc': '"""upper left"""'}), "(['train', 'val'], loc='upper left')\n", (9422, 9458), True, 'import matplotlib.pyplot as plt\n'), ((9459, 9469), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9467, 9469), True, 'import matplotlib.pyplot as plt\n'), ((9551, 9563), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9561, 9563), True, 'import matplotlib.pyplot as plt\n'), ((9564, 9606), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['gradient_norm']"], {}), "(history.history['gradient_norm'])\n", (9572, 9606), True, 'import matplotlib.pyplot as plt\n'), ((9607, 9653), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_gradient_norm']"], {}), "(history.history['val_gradient_norm'])\n", (9615, 9653), True, 'import matplotlib.pyplot as plt\n'), ((9654, 9680), 'matplotlib.pyplot.title', 'plt.title', (['"""gradient norm"""'], {}), "('gradient norm')\n", (9663, 9680), True, 'import matplotlib.pyplot as plt\n'), ((9681, 9704), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""grad_norm"""'], {}), "('grad_norm')\n", (9691, 9704), True, 'import matplotlib.pyplot as plt\n'), ((9705, 9724), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (9715, 9724), True, 'import matplotlib.pyplot as plt\n'), ((9725, 9775), 'matplotlib.pyplot.legend', 'plt.legend', (["['train_g', 'val_g']"], {'loc': '"""upper left"""'}), "(['train_g', 'val_g'], loc='upper left')\n", (9735, 9775), True, 'import matplotlib.pyplot as plt\n'), ((9775, 9785), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9783, 9785), True, 'import matplotlib.pyplot as plt\n'), ((9869, 9881), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9879, 9881), True, 'import matplotlib.pyplot as plt\n'), ((9882, 9917), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['w_norm']"], {}), "(history.history['w_norm'])\n", (9890, 9917), True, 'import matplotlib.pyplot as plt\n'), ((9918, 9957), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_w_norm']"], {}), "(history.history['val_w_norm'])\n", (9926, 9957), True, 'import matplotlib.pyplot as plt\n'), ((9958, 9982), 'matplotlib.pyplot.title', 'plt.title', (['"""weight norm"""'], {}), "('weight norm')\n", (9967, 9982), True, 'import matplotlib.pyplot as plt\n'), ((9983, 10003), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""w_norm"""'], {}), "('w_norm')\n", (9993, 10003), True, 'import matplotlib.pyplot as plt\n'), ((10004, 10023), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (10014, 10023), True, 'import matplotlib.pyplot as plt\n'), ((10024, 10074), 'matplotlib.pyplot.legend', 'plt.legend', (["['train_w', 'val_w']"], {'loc': '"""upper left"""'}), "(['train_w', 'val_w'], loc='upper left')\n", (10034, 10074), True, 'import matplotlib.pyplot as plt\n'), ((10075, 10085), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10083, 10085), True, 'import matplotlib.pyplot as plt\n'), ((1404, 1452), 'tensorflow.expand_dims', 'tf.expand_dims', (['tensors[:, :, :, :, -1]'], {'axis': '(-1)'}), '(tensors[:, :, :, :, -1], axis=-1)\n', (1418, 1452), True, 'import tensorflow as tf\n'), ((1467, 1518), 'tensorflow.multiply', 'tf.multiply', (['tensors[:, :, :, :, 0:2]', 'dynamic_mask'], {}), '(tensors[:, :, :, :, 0:2], dynamic_mask)\n', (1478, 1518), True, 'import tensorflow as tf\n'), ((1532, 1587), 'tensorflow.multiply', 'tf.multiply', (['tensors[:, :, :, :, 0:2]', '(1 - dynamic_mask)'], {}), '(tensors[:, :, :, :, 0:2], 1 - dynamic_mask)\n', (1543, 1587), True, 'import tensorflow as tf\n'), ((3184, 3218), 'os.path.exists', 'os.path.exists', (['master_save_folder'], {}), '(master_save_folder)\n', (3198, 3218), False, 'import os\n'), ((3225, 3256), 'os.makedirs', 'os.makedirs', (['master_save_folder'], {}), '(master_save_folder)\n', (3236, 3256), False, 'import os\n'), ((4020, 4041), 'keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (4039, 4041), True, 'from keras import backend as K\n'), ((5346, 5370), 'numpy.ones', 'np.ones', (['(num_tsteps, 1)'], {}), '((num_tsteps, 1))\n', (5353, 5370), True, 'import numpy as np\n'), ((5671, 5695), 'numpy.ones', 'np.ones', (['(num_tsteps, 1)'], {}), '((num_tsteps, 1))\n', (5678, 5695), True, 'import numpy as np\n'), ((6420, 6429), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (6427, 6429), False, 'from keras.layers import Input, Dense, Flatten, Lambda, Concatenate, add\n'), ((6828, 6837), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (6835, 6837), False, 'from keras.layers import Input, Dense, Flatten, Lambda, Concatenate, add\n'), ((7019, 7032), 'keras.layers.Concatenate', 'Concatenate', ([], {}), '()\n', (7030, 7032), False, 'from keras.layers import Input, Dense, Flatten, Lambda, Concatenate, add\n'), ((7084, 7103), 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (7093, 7103), True, 'import tensorflow as tf\n'), ((7117, 7153), 'keras.models.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': 'errors'}), '(inputs=inputs, outputs=errors)\n', (7122, 7153), False, 'from keras.models import Model, model_from_json\n'), ((7990, 8016), 'numpy.amax', 'np.amax', (['train_generator.X'], {}), '(train_generator.X)\n', (7997, 8016), True, 'import numpy as np\n'), ((8018, 8044), 'numpy.amin', 'np.amin', (['train_generator.X'], {}), '(train_generator.X)\n', (8025, 8044), True, 'import numpy as np\n'), ((8196, 8285), 'alt_model_checkpoint.keras.AltModelCheckpoint', 'AltModelCheckpoint', (['save_weights_path', 'model'], {'monitor': '"""val_loss"""', 'save_best_only': '(True)'}), "(save_weights_path, model, monitor='val_loss',\n save_best_only=True)\n", (8214, 8285), False, 'from alt_model_checkpoint.keras import AltModelCheckpoint\n'), ((9482, 9530), 'os.path.join', 'os.path.join', (['master_save_folder', '"""loss_t_1.png"""'], {}), "(master_save_folder, 'loss_t_1.png')\n", (9494, 9530), False, 'import os\n'), ((9798, 9850), 'os.path.join', 'os.path.join', (['master_save_folder', '"""gradient_t_1.png"""'], {}), "(master_save_folder, 'gradient_t_1.png')\n", (9810, 9850), False, 'import os\n'), ((10098, 10148), 'os.path.join', 'os.path.join', (['master_save_folder', '"""weight_t_1.png"""'], {}), "(master_save_folder, 'weight_t_1.png')\n", (10110, 10148), False, 'import os\n'), ((10218, 10269), 'os.path.join', 'os.path.join', (['master_save_folder', '"""history_t_1.hkl"""'], {}), "(master_save_folder, 'history_t_1.hkl')\n", (10230, 10269), False, 'import os\n'), ((1266, 1288), 'keras.backend.abs', 'K.abs', (['(y_pred - y_true)'], {}), '(y_pred - y_true)\n', (1271, 1288), True, 'from keras import backend as K\n'), ((1942, 1971), 'keras.backend.name_scope', 'K.name_scope', (['"""gradient_norm"""'], {}), "('gradient_norm')\n", (1954, 1971), True, 'from keras import backend as K\n'), ((1989, 2043), 'keras.backend.gradients', 'K.gradients', (['model.total_loss', 'model.trainable_weights'], {}), '(model.total_loss, model.trainable_weights)\n', (2000, 2043), True, 'from keras import backend as K\n'), ((2162, 2184), 'keras.backend.name_scope', 'K.name_scope', (['"""w_norm"""'], {}), "('w_norm')\n", (2174, 2184), True, 'from keras import backend as K\n'), ((6283, 6308), 'keras.layers.Dense', 'Dense', (['(1)'], {'trainable': '(False)'}), '(1, trainable=False)\n', (6288, 6308), False, 'from keras.layers import Input, Dense, Flatten, Lambda, Concatenate, add\n'), ((6688, 6713), 'keras.layers.Dense', 'Dense', (['(1)'], {'trainable': '(False)'}), '(1, trainable=False)\n', (6693, 6713), False, 'from keras.layers import Input, Dense, Flatten, Lambda, Concatenate, add\n'), ((8958, 8999), 'math.ceil', 'math.ceil', (['(samples_per_epoch / batch_size)'], {}), '(samples_per_epoch / batch_size)\n', (8967, 8999), False, 'import math\n'), ((9129, 9164), 'math.ceil', 'math.ceil', (['(num_seq_val / batch_size)'], {}), '(num_seq_val / batch_size)\n', (9138, 9164), False, 'import math\n'), ((6350, 6361), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (6358, 6361), True, 'import numpy as np\n'), ((6549, 6560), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (6557, 6560), True, 'import numpy as np\n'), ((6756, 6767), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (6764, 6767), True, 'import numpy as np\n'), ((6952, 6963), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (6960, 6963), True, 'import numpy as np\n'), ((8778, 8801), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8799, 8801), False, 'import datetime\n'), ((2077, 2088), 'keras.backend.square', 'K.square', (['g'], {}), '(g)\n', (2085, 2088), True, 'from keras import backend as K\n'), ((2263, 2274), 'keras.backend.square', 'K.square', (['w'], {}), '(w)\n', (2271, 2274), True, 'from keras import backend as K\n')] |
# -------------------------------------------------------------------------------------------------
# scientific
from PyQuantum.Tools.PlotBuilder2D import *
from math import sqrt, exp, pi
import plotly.graph_objs as go
import numpy as np
# -------------------------------------------------------------------------------------------------
# system
from copy import copy
# -------------------------------------------------------------------------------------------------
# PyQuantum.Tools
from PyQuantum.Tools.Distributions import Expectation, GeometricDistribution, Variance
from PyQuantum.Tools.Pickle import *
from PyQuantum.Tools.Print import *
from PyQuantum.Tools.Units import *
# -------------------------------------------------------------------------------------------------
# =================================================================================================
class Sink:
def __init__(self, P=[], T=[]):
self.data = {
'P': copy(P),
'T': copy(T),
}
def set_P(self, P):
self.data['P'] = copy(P)
def set_T(self, T):
self.data['T'] = copy(T)
def print(self):
for i in range(len(self.data['P'])):
print("{:3f}".format(self.data['T'][i]),
': ', self.data['P'][i], sep='')
print()
# =================================================================================================
# w_0 = 't_11_000'
# w_0 = '10_000'
w_0 = '10_D'
# w_0 = '10_0D'
# w_0 = '10_1D'
# w_0 = '10_2D'
# w_0 = 't0'
# path = 'sink3/1ms_l001g'
# path = 'sink/1ms_l001g'
path = 'sink3/0_1ms_01ns_l10g_l10g'
T_list = pickle_load(path+'/T_list_' + w_0 + '.pkl')
T_list = pickle_load(path+'/T_list_' + w_0 + '.pkl')
T = T_list[1:]
# p_sink_list = pickle_load(path+'/sink_list_' + w_0 + '_12.pkl')
p_sink_list = pickle_load(path+'/sink_list_' + w_0 + '.pkl')
p_sink = p_sink_list[1:]
data = Sink(P=p_sink, T=T)
# print(len(p_sink_list))
# print(p_sink_list[1])
# print(time_unit_full(T_list[1]))
# exit(0)
# -------------------------------------------------------------------------------------------------
dt = 100
# -------------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------------
T_dt = T[dt-1::dt]
p_sink_dt = p_sink[dt-1::dt]
data_dt = Sink(p_sink_dt, T_dt)
# -------------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------------
p_, t_ = GeometricDistribution(data_dt.data['P'], T)
data_theor = Sink(p_, t_)
E, E_normed = Expectation(data_theor.data['P'], data_theor.data['T'])
D = Variance(data_theor.data['P'], data_theor.data['T'], E)
print(data_theor.data['T'])
print('E = ', E)
print('E = ', E*dt*1e6)
print('D = ', D*dt*1e6)
print('sigma = ', sqrt(D))
print('sigma = ', time_unit_full(sqrt(D) * dt))
# exit(0)
# -------------------------------------------------------------------------------------------------
# =================================================================================================
cprint('THEORY:', color='yellow', attrs=['bold'])
print('T_click_avg: ', time_unit_full(E * dt))
# =================================================================================================
# print("abs_err:", abs(P_M * dt - T_click_avg))
# =================================================================================================
# print(E*dt*1e6)
# exit(0)
import matplotlib.pyplot as plt
import numpy as np
from math import sqrt, pi
# dt = 500
# states = {
# 't0': {
# 'N': 1000,
# 'x': [],
# 'y': [],
# 'x0': 5957.7974/1000.0,
# },
# 's2': {
# 'N': 1000,
# 'x': [],
# 'y': [],
# 'x0': 5172.5059/1000.0,
# }
# }
# x1 = 0
# x2 = 6.5
# # x1 = 2.5
# # x1 = 20
# # x2 = 21.5
# # x2 = 4.5
# dx = 0.01
def gauss(x, sigma, x0):
x = np.array(x)
return 1.0/(sigma*sqrt(2*pi)) * np.exp((-(x-x0)**2) / (2*sigma**2))
# import numpy as np
# import matplotlib.pyplot as plt
# Fixing random state for reproducibility
# np.random.seed(19680801)
# x = np.arange(x1, x2+dx, dx)
D *= 1000
mu, sigma = E*1e6*dt, sqrt(D)*1e6*dt
# y = gauss(x, sigma, mu)
print(mu, sigma)
print(mu*1e3, sigma*1e3)
exit(0)
# mu, sigma = 100, 15
# y = mu + sigma * np.random.randn(10000)
# print(y)
# the histogram of the data
# n, bins, patches = plt.hist(y, 500, density=True, facecolor='g', alpha=0.75)
# plt.xlabel('Smarts')
# plt.ylabel('Probability')
# plt.title('Histogram of IQ')
# plt.text(60, .025, r'$\mu=100,\ \sigma=15$')
# # plt.xlim(40, 160)
# # plt.ylim(0, 0.03)
# plt.grid(True)
# plt.show()
# Copy to clipboard
import numpy as np
import matplotlib.pyplot as plt
x1 = 0
x2 = 30
dx = 1e-3
x = np.arange(x1, x2+dx, dx)
sigma*=1e3
mu*=1e3
print(sigma, mu)
age = gauss(x, sigma, mu)
# age = np.random.normal(loc=1, size=1000) # a normal distribution
# salaray = np.random.normal(loc=-1, size=10000) # a normal distribution
print(age)
_, bins, _ = plt.hist(age, bins=1000, range=[x1, x2], density=True)
# _ = plt.hist(salaray, bins=bins, alpha=0.5, density=True)
plt.show()
exit(0)
# N_points = 100000
# n_bins = 20
# # Generate a normal distribution, center at x=0 and y=5
# x = np.random.randn(N_points)
# y = .4 * x + np.random.randn(100000) + 5
# fig, axs = plt.plot(1, sharey=True, tight_layout=True)
# # We can set the number of bins with the `bins` kwarg
# axs[0].hist(x, bins=n_bins)
# # axs[1].hist(y, bins=n_bins)
# plt.show()
# import matplotlib.pyplot as plt
# import numpy as np
# def histogram(data, n_bins, cumulative=False, x_label = "", y_label = "", title = ""):
# _, ax = plt.subplots()
# ax.hist(data, n_bins = n_bins, cumulative = cumulative, color = '#539caf')
# ax.set_ylabel(y_label)
# ax.set_xlabel(x_label)
# ax.set_title(title)
# histogram([1,2,3], n_bins=2)
# ==============
states = {
't0': {
'N': 1000,
'x': [],
'y': [],
'x0': 2.245,
},
# 's2': {
# 'N': 1000,
# 'x': [],
# 'y': [],
# 'x0': 5172.5059/1000.0,
# }
}
x1 = 0
x2 = 4
# x1 = 2.5
# x1 = 20
# x2 = 21.5
# x2 = 4.5
dx = 0.0001
# sigma = sqrt(D)
# print(time_unit_full( * dt))
# print(D*1e12)
# print(time_unit_full(sqrt(D)*dt))
# exit(0)
sigma = sqrt(1000)
for k in states.keys():
# sigma = 1.097
# sigma = 1.0/sqrt(states[k]['N'])
x0 = states[k]['x0']
for x in np.arange(x1, x2+dx, dx):
states[k]['x'].append(x)
states[k]['y'].append(1.0/(sigma*sqrt(2*pi)) *
exp(-(x-x0)**2 / (2*sigma**2)))
plt = PlotBuilder2D({
# 'title': 'f(t), T' + sub('click') + ' = ' + str(dt) + ' ns',
'title': 'N(μ,σ'+sup('2')+'), T' + sub('click') + ' = ' + str(dt) + ' ns',
'x_title': 't, mks',
'y_title': 'N(μ,σ'+sup('2')+')',
'html': 'gauss.html',
'to_file': False,
'online': False,
'data': [
go.Scatter(
x=states['t0']['x'],
y=states['t0']['y'],
name='<b>|' + 't' + sub(0) + '〉'+'</b>',
),
# go.Scatter(
# x=states['s2']['x'],
# y=states['s2']['y'],
# name='<b>|' + 's' + sub(2) + '〉'+'</b>',
# ),
],
'as_annotation': True,
}
)
plt.make_plot()
| [
"math.exp",
"matplotlib.pyplot.hist",
"PyQuantum.Tools.Distributions.GeometricDistribution",
"PyQuantum.Tools.Distributions.Expectation",
"math.sqrt",
"matplotlib.pyplot.make_plot",
"numpy.exp",
"numpy.array",
"PyQuantum.Tools.Distributions.Variance",
"copy.copy",
"numpy.arange",
"matplotlib.p... | [((2625, 2668), 'PyQuantum.Tools.Distributions.GeometricDistribution', 'GeometricDistribution', (["data_dt.data['P']", 'T'], {}), "(data_dt.data['P'], T)\n", (2646, 2668), False, 'from PyQuantum.Tools.Distributions import Expectation, GeometricDistribution, Variance\n'), ((2711, 2766), 'PyQuantum.Tools.Distributions.Expectation', 'Expectation', (["data_theor.data['P']", "data_theor.data['T']"], {}), "(data_theor.data['P'], data_theor.data['T'])\n", (2722, 2766), False, 'from PyQuantum.Tools.Distributions import Expectation, GeometricDistribution, Variance\n'), ((2771, 2826), 'PyQuantum.Tools.Distributions.Variance', 'Variance', (["data_theor.data['P']", "data_theor.data['T']", 'E'], {}), "(data_theor.data['P'], data_theor.data['T'], E)\n", (2779, 2826), False, 'from PyQuantum.Tools.Distributions import Expectation, GeometricDistribution, Variance\n'), ((4893, 4919), 'numpy.arange', 'np.arange', (['x1', '(x2 + dx)', 'dx'], {}), '(x1, x2 + dx, dx)\n', (4902, 4919), True, 'import numpy as np\n'), ((5147, 5201), 'matplotlib.pyplot.hist', 'plt.hist', (['age'], {'bins': '(1000)', 'range': '[x1, x2]', 'density': '(True)'}), '(age, bins=1000, range=[x1, x2], density=True)\n', (5155, 5201), True, 'import matplotlib.pyplot as plt\n'), ((5262, 5272), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5270, 5272), True, 'import matplotlib.pyplot as plt\n'), ((6445, 6455), 'math.sqrt', 'sqrt', (['(1000)'], {}), '(1000)\n', (6449, 6455), False, 'from math import sqrt, pi\n'), ((7432, 7447), 'matplotlib.pyplot.make_plot', 'plt.make_plot', ([], {}), '()\n', (7445, 7447), True, 'import matplotlib.pyplot as plt\n'), ((2939, 2946), 'math.sqrt', 'sqrt', (['D'], {}), '(D)\n', (2943, 2946), False, 'from math import sqrt, pi\n'), ((4038, 4049), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (4046, 4049), True, 'import numpy as np\n'), ((6578, 6604), 'numpy.arange', 'np.arange', (['x1', '(x2 + dx)', 'dx'], {}), '(x1, x2 + dx, dx)\n', (6587, 6604), True, 'import numpy as np\n'), ((1067, 1074), 'copy.copy', 'copy', (['P'], {}), '(P)\n', (1071, 1074), False, 'from copy import copy\n'), ((1125, 1132), 'copy.copy', 'copy', (['T'], {}), '(T)\n', (1129, 1132), False, 'from copy import copy\n'), ((4087, 4128), 'numpy.exp', 'np.exp', (['(-(x - x0) ** 2 / (2 * sigma ** 2))'], {}), '(-(x - x0) ** 2 / (2 * sigma ** 2))\n', (4093, 4128), True, 'import numpy as np\n'), ((972, 979), 'copy.copy', 'copy', (['P'], {}), '(P)\n', (976, 979), False, 'from copy import copy\n'), ((998, 1005), 'copy.copy', 'copy', (['T'], {}), '(T)\n', (1002, 1005), False, 'from copy import copy\n'), ((2981, 2988), 'math.sqrt', 'sqrt', (['D'], {}), '(D)\n', (2985, 2988), False, 'from math import sqrt, pi\n'), ((4312, 4319), 'math.sqrt', 'sqrt', (['D'], {}), '(D)\n', (4316, 4319), False, 'from math import sqrt, pi\n'), ((4073, 4085), 'math.sqrt', 'sqrt', (['(2 * pi)'], {}), '(2 * pi)\n', (4077, 4085), False, 'from math import sqrt, pi\n'), ((6722, 6760), 'math.exp', 'exp', (['(-(x - x0) ** 2 / (2 * sigma ** 2))'], {}), '(-(x - x0) ** 2 / (2 * sigma ** 2))\n', (6725, 6760), False, 'from math import sqrt, exp, pi\n'), ((6678, 6690), 'math.sqrt', 'sqrt', (['(2 * pi)'], {}), '(2 * pi)\n', (6682, 6690), False, 'from math import sqrt, pi\n')] |
import numpy as np
from optparse import OptionParser
# get the command line arguments
parser = OptionParser(description="python script to create pvalue and log2(foldchange) filtered ")
parser.add_option('-i', '--input_file',
type=str,
default="results/plotSelection.txt",
metavar="",
help = "path and name of of the input file, being the output file of the R-script DESeq2.R, default = results/plotSelection.txt")
parser.add_option('-f', '--helper_file',
type=str,
default="result/helperFile.csv",
metavar="",
help = "path and name of the helper file, a tab delimited file containing one column samples and a column conditions, default = result/helperFile.csv")
parser.add_option('-o', '--output_file',
type=str,
default="result/plotSelection.txt",
metavar="",
help = "path and name of the output file a tab delimimited file containing normalised reads of significantly differentially expressed genes of all the samples or averaged to the conditions, default = result/plotSelection.txt")
parser.add_option('-v', '--minimum_foldchange',
type=float,
default=2,
metavar="",
help = "minimum log2(fold_change) in any combination of conditions; integer > 1, default = 2")
parser.add_option('-r', '--minimum_reads',
type=int,
default=100,
metavar="",
help = "minimum number of reads of all samples together; integer >= 0, default = 100")
parser.add_option('-p', '--maximum_pvalue',
type=float,
default=0.05,
metavar="",
help = "maximum exepted adjusted pvalue; 0.0 to 1.0, default = 0.05")
parser.add_option('-a', '--average_samples',
type=str,
default="yes",
metavar="",
help = "output needs to contain averages of conditions; yes or no, default = yes")
(options, args) = parser.parse_args()
# get a list of the conditions
helper = open(options.helper_file, "r")
samples = {}
conditions = []
x=0
for l in helper:
l = l.rstrip()
if len(l) > 0:
x += 1
l = l.split(",")
name = l[1]
samples[x] = name
if name not in conditions:
conditions.append(name)
helper.close()
# function to average the samples within a condition
def getAverages(counts):
global samples, conditions
sets = {}
averages = [counts[0]]
for i in samples:
if samples[i] in sets:
sets[samples[i]].append(float(counts[i]))
else:
sets[samples[i]] = [float(counts[i])]
for c in conditions:
averages.append(str(np.mean(sets[c])))
return(averages)
inputData = open(options.input_file, "r")
outAll = open(options.output_file, "w")
totalBoth = 0
count = 0
min_reads = options.minimum_reads
foldchange= options.minimum_foldchange
pvalue = options.maximum_pvalue
avarage = options.average_samples.upper()
for l in inputData:
count += 1
if l.startswith("genes") == False and len(l) > 5:
l = l.replace("NA", "1") # replace NA values in P-value to 1
l = l.replace("#VALUE!", "0") # replace NA values in foldchange to 0
f = l.rstrip() # remove white space and line break at the end.
f = f.split("\t") # split string to list on tab
if len(f) > x+1:
fc = [float(f[i]) for i in range(x+2, len(f), 6)] # list of faultchanges
pv = [float(f[i]) for i in range(x+6, len(f), 6)] # list of adjusted p-values
counts = [float(f[i]) for i in range(1,x+1)]
if sum(counts) > min_reads: # total number of reads more then ~20 per sample
for a1, a2 in zip(fc,pv):
if a1 > foldchange and a2 < pvalue and a2 == min(pv):
if avarage == "YES":
line = "\t".join(getAverages(f[:x+1]))
else:
line = "\t".join(f[:x+1])
outAll.write(line+"\n")
totalBoth += 1
break
elif a1 < -1*foldchange and a2 < pvalue and a2 == min(pv):
if avarage == "YES":
line = "\t".join(getAverages(f[:x+1]))
else:
line = "\t".join(f[:x+1])
outAll.write(line+"\n")
totalBoth += 1
break
else:
print("No fold changes and p-values were found. Try rerunning the DESeq2.R with a higher maximum fraction (to be adjusted in config.yaml)")
else:
if len(l) > 10:
if avarage == "YES":
line = "\t".join(conditions)
else:
line = "\t".join(l.split("\t")[1:x+1])
outAll.write(l.split("\t")[0] + "\t" + line + "\n")
print(f"Total number of genes is: {str(count-1)}.")
print(f"Number of genes kept for plots is: {str(totalBoth)}.")
inputData.close()
outAll.close()
| [
"numpy.mean",
"optparse.OptionParser"
] | [((102, 196), 'optparse.OptionParser', 'OptionParser', ([], {'description': '"""python script to create pvalue and log2(foldchange) filtered """'}), "(description=\n 'python script to create pvalue and log2(foldchange) filtered ')\n", (114, 196), False, 'from optparse import OptionParser\n'), ((3038, 3054), 'numpy.mean', 'np.mean', (['sets[c]'], {}), '(sets[c])\n', (3045, 3054), True, 'import numpy as np\n')] |
# Same as script 23, but with the BAN channel.
from sys_simulator.channels import BANChannel
from sys_simulator import general as gen
from sys_simulator.q_learning.environments.completeEnvironment5 \
import CompleteEnvironment5
from sys_simulator.dqn.agents.dqnAgent import ExternalDQNAgent
from sys_simulator.dqn.externalDQNFramework import ExternalDQNFramework
from sys_simulator.parameters.parameters import \
EnvironmentParameters, TrainingParameters, DQNAgentParameters
from sys_simulator.q_learning import rewards as reward_functions
import torch
import numpy as np
import os
def run():
n_mues = 1 # number of mues
n_d2d = 2 # number of d2d pairs
n_rb = n_mues # number of RBs
bs_radius = 500 # bs radius in m
rb_bandwidth = 180*1e3 # rb bandwidth in Hz
d2d_pair_distance = 50 # d2d pair distance in m
p_max = 23 # max tx power in dBm
noise_power = -116 # noise power per RB in dBm
bs_gain = 17 # macro bs antenna gain in dBi
user_gain = 4 # user antenna gain in dBi
sinr_threshold_train = 6 # mue sinr threshold in dB for training
mue_margin = 2 # mue margin in dB
# conversions from dB to pow
p_max = p_max - 30
p_max = gen.db_to_power(p_max)
noise_power = noise_power - 30
noise_power = gen.db_to_power(noise_power)
bs_gain = gen.db_to_power(bs_gain)
user_gain = gen.db_to_power(user_gain)
sinr_threshold_train = gen.db_to_power(sinr_threshold_train)
mue_margin = gen.db_to_power(mue_margin)
# q-learning parameters
STEPS_PER_EPISODE = 25
MAX_NUM_EPISODES = 480 # medium training
# MAX_NUM_EPISODES = 1 # testing
EPSILON_MIN = 0.05
EPSILON_DECAY = 3.35*1e-4 # medium training
GAMMA = 0.98 # Discount factor
C = 8 # C constant for the improved reward function
TARGET_UPDATE = 10
MAX_NUMBER_OF_AGENTS = 10
max_d2d = MAX_NUMBER_OF_AGENTS
# more parameters
env_params = EnvironmentParameters(
rb_bandwidth, d2d_pair_distance, p_max, noise_power,
bs_gain, user_gain, sinr_threshold_train,
n_mues, n_d2d, n_rb, bs_radius, c_param=C, mue_margin=mue_margin
)
params = TrainingParameters(MAX_NUM_EPISODES, STEPS_PER_EPISODE)
agent_params = DQNAgentParameters(
EPSILON_MIN, EPSILON_DECAY, 1, 10000, 512, GAMMA
)
framework = ExternalDQNFramework(agent_params)
reward_function = reward_functions.dis_reward_tensor
channel = BANChannel()
env = CompleteEnvironment5(env_params, reward_function, channel)
best_reward = float('-inf')
device = torch.device('cuda')
mue_spectral_eff_bag = list()
d2d_spectral_eff_bag = list()
aux_range = range(max_d2d+1)[1:]
epsilon = agent_params.start_epsilon
for episode in range(params.max_episodes):
# actions = np.linspace(1e-4, 1e-2, 5)[::-1] * p_max
actions = np.linspace(1e-4, 8e-3, 5)[::-1] * p_max
# actions = [i*0.82*p_max/5/1000 for i in range(5)] # best result
actions[0] = 0
n_agents = np.random.choice(aux_range)
agents = [ExternalDQNAgent(agent_params, actions)
for _ in range(n_agents)] # 1 agent per d2d tx
counts = np.zeros(len(agents))
awaits = list()
await_steps = [2, 3, 4]
for a in agents:
awaits.append(np.random.choice(await_steps))
a.set_action(torch.tensor(0).long().cuda(), a.actions[0])
a.set_epsilon(epsilon)
env.build_scenario(agents)
done = False
obs = [env.get_state(a) for a in agents]
total_reward = 0.0
i = 0
bag = list()
while not done:
if i >= params.steps_per_episode:
break
else:
actions = torch.zeros([len(agents)], device=device)
for j, agent in enumerate(agents):
if counts[j] < awaits[j]:
counts[j] += 1
else:
agent.get_action(framework, obs[j])
actions[j] = agent.action_index
counts[j] = 0
awaits[j] = np.random.choice(await_steps)
next_obs, rewards, done = env.step(agents)
i += 1
for j, agent in enumerate(agents):
framework.replay_memory.push(obs[j], actions[j],
next_obs[j], rewards[j])
framework.learn()
obs = next_obs
total_reward += torch.sum(rewards)
bag.append(total_reward.item())
obs = next_obs
if episode % TARGET_UPDATE == 0:
framework.target_net.load_state_dict(
framework.policy_net.state_dict()
)
if total_reward > best_reward:
best_reward = total_reward
print("Episode#:{} sum reward:{} best_sum_reward:{} eps:{}".format(
episode, total_reward, best_reward, agents[0].epsilon)
)
# mue spectral eff
mue_spectral_eff_bag.append(env.mue_spectral_eff)
# average d2d spectral eff
d2d_spectral_eff_bag.append(env.d2d_spectral_eff/env.params.n_d2d)
epsilon = agents[0].epsilon
# Return the trained policy
mue_spectral_effs = mue_spectral_eff_bag
d2d_spectral_effs = d2d_spectral_eff_bag
spectral_effs = zip(mue_spectral_effs, d2d_spectral_effs)
# saving the data and the model
cwd = os.getcwd()
filename = gen.path_leaf(__file__)
filename = filename.split('.')[0]
filename_model = filename
filename = f'{cwd}/data/dql/{filename}.pt'
torch.save(framework.policy_net.state_dict(),
f'{cwd}/models/dql/{filename_model}.pt')
torch.save(spectral_effs, filename)
| [
"torch.device",
"sys_simulator.channels.BANChannel",
"numpy.random.choice",
"sys_simulator.parameters.parameters.EnvironmentParameters",
"os.getcwd",
"torch.tensor",
"sys_simulator.q_learning.environments.completeEnvironment5.CompleteEnvironment5",
"numpy.linspace",
"sys_simulator.general.path_leaf"... | [((1214, 1236), 'sys_simulator.general.db_to_power', 'gen.db_to_power', (['p_max'], {}), '(p_max)\n', (1229, 1236), True, 'from sys_simulator import general as gen\n'), ((1290, 1318), 'sys_simulator.general.db_to_power', 'gen.db_to_power', (['noise_power'], {}), '(noise_power)\n', (1305, 1318), True, 'from sys_simulator import general as gen\n'), ((1333, 1357), 'sys_simulator.general.db_to_power', 'gen.db_to_power', (['bs_gain'], {}), '(bs_gain)\n', (1348, 1357), True, 'from sys_simulator import general as gen\n'), ((1374, 1400), 'sys_simulator.general.db_to_power', 'gen.db_to_power', (['user_gain'], {}), '(user_gain)\n', (1389, 1400), True, 'from sys_simulator import general as gen\n'), ((1428, 1465), 'sys_simulator.general.db_to_power', 'gen.db_to_power', (['sinr_threshold_train'], {}), '(sinr_threshold_train)\n', (1443, 1465), True, 'from sys_simulator import general as gen\n'), ((1483, 1510), 'sys_simulator.general.db_to_power', 'gen.db_to_power', (['mue_margin'], {}), '(mue_margin)\n', (1498, 1510), True, 'from sys_simulator import general as gen\n'), ((1952, 2142), 'sys_simulator.parameters.parameters.EnvironmentParameters', 'EnvironmentParameters', (['rb_bandwidth', 'd2d_pair_distance', 'p_max', 'noise_power', 'bs_gain', 'user_gain', 'sinr_threshold_train', 'n_mues', 'n_d2d', 'n_rb', 'bs_radius'], {'c_param': 'C', 'mue_margin': 'mue_margin'}), '(rb_bandwidth, d2d_pair_distance, p_max, noise_power,\n bs_gain, user_gain, sinr_threshold_train, n_mues, n_d2d, n_rb,\n bs_radius, c_param=C, mue_margin=mue_margin)\n', (1973, 2142), False, 'from sys_simulator.parameters.parameters import EnvironmentParameters, TrainingParameters, DQNAgentParameters\n'), ((2178, 2233), 'sys_simulator.parameters.parameters.TrainingParameters', 'TrainingParameters', (['MAX_NUM_EPISODES', 'STEPS_PER_EPISODE'], {}), '(MAX_NUM_EPISODES, STEPS_PER_EPISODE)\n', (2196, 2233), False, 'from sys_simulator.parameters.parameters import EnvironmentParameters, TrainingParameters, DQNAgentParameters\n'), ((2253, 2321), 'sys_simulator.parameters.parameters.DQNAgentParameters', 'DQNAgentParameters', (['EPSILON_MIN', 'EPSILON_DECAY', '(1)', '(10000)', '(512)', 'GAMMA'], {}), '(EPSILON_MIN, EPSILON_DECAY, 1, 10000, 512, GAMMA)\n', (2271, 2321), False, 'from sys_simulator.parameters.parameters import EnvironmentParameters, TrainingParameters, DQNAgentParameters\n'), ((2352, 2386), 'sys_simulator.dqn.externalDQNFramework.ExternalDQNFramework', 'ExternalDQNFramework', (['agent_params'], {}), '(agent_params)\n', (2372, 2386), False, 'from sys_simulator.dqn.externalDQNFramework import ExternalDQNFramework\n'), ((2458, 2470), 'sys_simulator.channels.BANChannel', 'BANChannel', ([], {}), '()\n', (2468, 2470), False, 'from sys_simulator.channels import BANChannel\n'), ((2481, 2539), 'sys_simulator.q_learning.environments.completeEnvironment5.CompleteEnvironment5', 'CompleteEnvironment5', (['env_params', 'reward_function', 'channel'], {}), '(env_params, reward_function, channel)\n', (2501, 2539), False, 'from sys_simulator.q_learning.environments.completeEnvironment5 import CompleteEnvironment5\n'), ((2585, 2605), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (2597, 2605), False, 'import torch\n'), ((5582, 5593), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5591, 5593), False, 'import os\n'), ((5609, 5632), 'sys_simulator.general.path_leaf', 'gen.path_leaf', (['__file__'], {}), '(__file__)\n', (5622, 5632), True, 'from sys_simulator import general as gen\n'), ((5858, 5893), 'torch.save', 'torch.save', (['spectral_effs', 'filename'], {}), '(spectral_effs, filename)\n', (5868, 5893), False, 'import torch\n'), ((3035, 3062), 'numpy.random.choice', 'np.random.choice', (['aux_range'], {}), '(aux_range)\n', (3051, 3062), True, 'import numpy as np\n'), ((3081, 3120), 'sys_simulator.dqn.agents.dqnAgent.ExternalDQNAgent', 'ExternalDQNAgent', (['agent_params', 'actions'], {}), '(agent_params, actions)\n', (3097, 3120), False, 'from sys_simulator.dqn.agents.dqnAgent import ExternalDQNAgent\n'), ((2878, 2907), 'numpy.linspace', 'np.linspace', (['(0.0001)', '(0.008)', '(5)'], {}), '(0.0001, 0.008, 5)\n', (2889, 2907), True, 'import numpy as np\n'), ((3333, 3362), 'numpy.random.choice', 'np.random.choice', (['await_steps'], {}), '(await_steps)\n', (3349, 3362), True, 'import numpy as np\n'), ((4569, 4587), 'torch.sum', 'torch.sum', (['rewards'], {}), '(rewards)\n', (4578, 4587), False, 'import torch\n'), ((4166, 4195), 'numpy.random.choice', 'np.random.choice', (['await_steps'], {}), '(await_steps)\n', (4182, 4195), True, 'import numpy as np\n'), ((3389, 3404), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (3401, 3404), False, 'import torch\n')] |
import tensorflow.keras.backend as K
import tensorflow as tf
import numpy as np
# alpha = 1, beta = 0.03, margin = {0.5, 1, 2, 4, 8}
class modals(tf.keras.Model):
def make_feature_extractor(self, layers, input_dim, hidden_size):
inputs = tf.keras.Input(shape=(None, input_dim))
x = inputs
for i in range(layers - 1):
x = tf.keras.layers.LSTM(hidden_size, return_sequences=True)(x)
outputs = tf.keras.layers.LSTM(hidden_size)(x)
return tf.keras.Model(inputs, outputs, name='feature_extractor')
def make_dense_layer(self, hidden_size, output_dim):
inputs = tf.keras.Input(shape=(hidden_size))
x = tf.keras.layers.Dense(256, activation='relu')(inputs)
# x = tf.keras.layers.BatchNormalization()(x)
# x = tf.keras.layers.Activation('relu')(x)
x = tf.keras.layers.Dropout(0.2)(x)
x = tf.keras.layers.Dense(256, activation='relu')(x)
outputs = tf.keras.layers.Dense(output_dim, activation='softmax')(x)
return tf.keras.Model(inputs, outputs, name='dense_layer')
def make_discriminator(self, hidden_size):
inputs = tf.keras.Input(shape=(hidden_size))
x = tf.keras.layers.Dense(256, activation='relu')(inputs)
# x = tf.keras.layers.BatchNormalization()(x)
# x = tf.keras.layers.Activation('relu')(x)
# x = tf.keras.layers.Dropout(0.2)(x)
# x = tf.keras.layers.Dense(256, activation='relu')(x)
outputs = tf.keras.layers.Dense(1, activation='sigmoid')(x)
return tf.keras.Model(inputs, outputs, name='discriminator')
def cal_latent(self, X, training=False):
return self.fe(X, training=training)
def __cosine_distance(self, a, b):
return 1 - K.sum(a * b) / (K.sqrt(K.sum(K.square(a))) * K.sqrt(K.sum(K.square(b))))
def tf_triplet_selector(self, z, y, training=False):
z_inclass = []
z_outclass = []
for i in range(y.shape[0]):
idx_in = np.random.choice(np.where(np.argmax(y, axis=1) == np.argmax(y[i]))[0], 1)[0]
z_inclass.append(z[idx_in])
idx_out = np.random.choice(np.where(np.argmax(y, axis=1) != np.argmax(y[i]))[0], 1)[0]
z_outclass.append(z[idx_out])
z_inclass = tf.stack(z_inclass)
z_outclass = tf.stack(z_outclass)
return K.mean((self.__cosine_distance(z, z_inclass)) - (self.__cosine_distance(z, z_outclass)) + self.gamma)
def __split_data(self):
class_num_y = np.argmax(self.y_train, axis=1)
self.X_data_dict = {}
unique_class = np.unique(class_num_y)
for i in unique_class:
index = np.where(class_num_y == i)[0]
self.X_data_dict[i] = []
for j in index:
self.X_data_dict[i].append(self.X_train[j])
def __get_inclass_z(self, y):
return self.z_data_dict[y]
def __get_outclass_z(self, y):
classes = [x for x in self.z_data_dict.keys() if x != y]
return self.z_data_dict[classes[np.random.randint(len(classes))]]
def cal_hard_interpolation(self, z, y):
inclass_z = self.__get_inclass_z(y)
center = np.mean(inclass_z, axis=0)
# each distanse
distanse = np.sqrt(np.sum(np.square(center - inclass_z), axis=1))
index_sort = np.argsort(distanse)
top_z = inclass_z.numpy()[index_sort[-max(int(len(inclass_z) * 0.05), 1):]] # 5% select
z_dist = np.sqrt(np.sum(np.square(z - top_z), axis=1))
index_sort = np.argsort(z_dist)
return z + self.param[2]*(top_z[index_sort[0]] - z)
def cal_hard_expolation(self, z, y):
mu = np.mean(self.__get_inclass_z(y), axis=0)
return z - self.param[2]*(z - mu)
def cal_gaussian_noise(self, z, y):
sigma = np.std(self.__get_inclass_z(y))
return z + self.param[2] * np.random.normal(0, sigma, z.shape)
def cal_difference(self, z, y):
# select 2 sample from same calss
X_tmp = self.__get_inclass_z(y)
shuffle_index = np.arange(len(X_tmp))
np.random.shuffle(shuffle_index)
return z + self.param[2]*(X_tmp[shuffle_index[0]] - X_tmp[shuffle_index[1]])
def get_classification_loss(self, z, y, training=False):
z_hat = tf.unstack(z)
for i in range(len(z_hat)):
target_z = z_hat[i]
if np.random.rand() < self.param[1]:
if 'inter' == self.param[0]:
z_hat[i] = self.cal_hard_interpolation(target_z, np.argmax(y[i]))
elif 'exp' == self.param[0]:
z_hat[i] = self.cal_hard_expolation(target_z, np.argmax(y[i]))
elif 'noise' == self.param[0]:
z_hat[i] = self.cal_gaussian_noise(target_z, np.argmax(y[i]))
elif 'diff' == self.param[0]:
z_hat[i] = self.cal_difference(target_z, np.argmax(y[i]))
if len(z_hat[i].shape) != 1:
print(z_hat[i].shape, z[i].shape)#z_hat[i])
z_hat = tf.stack(z_hat)
classes = self.dl(z_hat, training=training)
# return K.mean(tf.keras.losses.categorical_crossentropy(np.array(new_y), classes))
return -K.mean((K.log(K.sum(y * classes, axis=1) + 1e-8)))
def set_param(self, param=['inter', 0.1, 0.1]):
# param = [op, prob, mag]
self.param = param
def set_model(self, layers, input_dim, output_dim, hidden_size):
self.fe = self.make_feature_extractor(layers, input_dim, hidden_size)
self.dl = self.make_dense_layer(hidden_size, output_dim)
# define classifier
inputs = tf.keras.Input(shape=(None, input_dim,))
outputs = self.dl(self.fe(inputs))
self.classifier = tf.keras.Model(inputs=inputs, outputs=outputs, name='classifier')
self.dis = self.make_discriminator(hidden_size)
def __init__(self, gamma=1, batch_size=256):
super(modals, self).__init__()
self.gamma = gamma
self.batch_size = batch_size
self.alpha = 1
self.beta = 0.03
self.set_param() # set initial paramater
def compile(self, optimizer=None, **arg):
super(modals, self).compile(optimizer, loss='mse')
self.M_optimizer = optimizer[0]
self.D_optimizer = optimizer[1]
def train_step(self, batch_data):
X, y = batch_data
self.X_train = X
self.y_train = y
self.__split_data()
self.z_data_dict = {}
if X.shape[0] != None:
with tf.GradientTape() as tape:
z = self.fe(X, training=True)
for key in self.X_data_dict.keys():
self.z_data_dict[key] = z[np.argmax(y, axis=1) == key]#self.cal_latent(self.X_data_dict[key], training=True)
classification_loss = self.get_classification_loss(z, y, training=True)
triplet_loss = self.tf_triplet_selector(z, y, training=True)
adversarial_loss = -K.mean(K.log(self.dis(z, training=False) + 1e-8))
loss_M = classification_loss + self.alpha * adversarial_loss + self.beta * triplet_loss
grads_M = tape.gradient(loss_M, self.classifier.trainable_variables)
self.M_optimizer.apply_gradients(zip(grads_M, self.classifier.trainable_variables))
with tf.GradientTape() as tape:
z = self.fe(X, training=True)
# norm_z = tf.random.normal(z.shape, mean=0.0, stddev=np.std(z, axis=0), dtype=tf.float32)
norm_z = tf.random.normal(z.shape, mean=0.0, stddev=1, dtype=tf.float32)
noise_d = self.dis(norm_z, training=True)
z_d = self.dis(z, training=True)
loss_D = -K.mean(K.log(noise_d + 1e-8) + K.log(1 - z_d + 1e-8))
grads_D = tape.gradient(loss_D, self.dis.trainable_variables)
self.D_optimizer.apply_gradients(zip(grads_D, self.dis.trainable_variables))
return {
'classifier_loss': classification_loss,
'triplet_loss': triplet_loss,
'adversarial_loss': adversarial_loss,
'loss_M': loss_M,
'discriminator_loss': loss_D,
}
else:
print('initial')
print(X, y)
return {
'classifier_loss': -1,
'triplet_loss': -1,
'adversarial_loss': -1,
'loss_M': -1,
'discriminator_loss': -1,
}
def test_step(self, batch_data):
X, y = batch_data
self.X_train = X
self.y_train = y
self.__split_data()
self.z_data_dict = {}
if X.shape[0] != None:
z = self.cal_latent(X, training=False)
for key in self.X_data_dict.keys():
self.z_data_dict[key] = z[np.argmax(y, axis=1) == key]#self.cal_latent(self.X_data_dict[key], training=True)
classification_loss = self.get_classification_loss(z, y)
triplet_loss = self.tf_triplet_selector(z, y, training=True)
adversarial_loss = -K.mean(K.log(self.dis(z)))
loss_M = classification_loss + self.alpha * adversarial_loss + self.beta * tf.cast(triplet_loss, 'float32')
z = np.concatenate([self.z_data_dict[key] for key in self.z_data_dict.keys()], axis=0)
norm_z = tf.random.normal(z.shape, mean=0.0, stddev=np.std(z, axis=0), dtype=tf.float32)
loss_D = -K.mean(K.log(self.dis(norm_z, training=False)) + K.log(1 - self.dis(z, training=False)))
return {
'classifier_loss': classification_loss,
'triplet_loss': triplet_loss,
'adversarial_loss': adversarial_loss,
'loss_M': loss_M,
'discriminator_loss': loss_D,
}
else:
print('initial')
print(X, y)
return {
'classifier_loss': -1,
'triplet_loss': -1,
'adversarial_loss': -1,
'loss_M': -1,
'discriminator_loss': -1,
}
class modals_cov(modals):
def make_feature_extractor(self, layers, input_dim, hidden_size):
inputs = tf.keras.Input(shape=(input_dim))
x = tf.keras.layers.Conv2D(64, (3, 3), activation='relu', padding='same')(inputs)
x = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(x)
x = tf.keras.layers.Conv2D(128, (3, 3), activation='relu', padding='same')(x)
x = tf.keras.layers.MaxPooling2D(pool_size=(2, 2))(x)
x = tf.keras.layers.Conv2D(256, (3, 3), activation='relu', padding='same')(x)
# x = tf.keras.layers.GlobalAveragePooling2D()(x)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(128, activation='relu')(x)
x = tf.keras.layers.Dense(64, activation='relu')(x)
outputs = tf.keras.layers.Dense(hidden_size, activation='linear')(x)
return tf.keras.Model(inputs, outputs, name='cifar10')
def set_model(self, layers, input_dim, output_dim, hidden_size):
self.fe = self.make_feature_extractor(layers, input_dim, hidden_size)
self.dl = self.make_dense_layer(hidden_size, output_dim)
# define classifier
inputs = tf.keras.Input(shape=input_dim)
outputs = self.dl(self.fe(inputs))
self.classifier = tf.keras.Model(inputs=inputs, outputs=outputs, name='classifier')
self.fe.summary()
self.classifier.summary()
self.dis = self.make_discriminator(hidden_size)
| [
"tensorflow.unstack",
"tensorflow.keras.backend.log",
"numpy.random.rand",
"numpy.argsort",
"tensorflow.GradientTape",
"tensorflow.keras.layers.Dense",
"tensorflow.cast",
"numpy.mean",
"tensorflow.random.normal",
"tensorflow.keras.layers.Conv2D",
"numpy.where",
"tensorflow.keras.backend.square... | [((252, 291), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(None, input_dim)'}), '(shape=(None, input_dim))\n', (266, 291), True, 'import tensorflow as tf\n'), ((502, 559), 'tensorflow.keras.Model', 'tf.keras.Model', (['inputs', 'outputs'], {'name': '"""feature_extractor"""'}), "(inputs, outputs, name='feature_extractor')\n", (516, 559), True, 'import tensorflow as tf\n'), ((639, 672), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': 'hidden_size'}), '(shape=hidden_size)\n', (653, 672), True, 'import tensorflow as tf\n'), ((1044, 1095), 'tensorflow.keras.Model', 'tf.keras.Model', (['inputs', 'outputs'], {'name': '"""dense_layer"""'}), "(inputs, outputs, name='dense_layer')\n", (1058, 1095), True, 'import tensorflow as tf\n'), ((1165, 1198), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': 'hidden_size'}), '(shape=hidden_size)\n', (1179, 1198), True, 'import tensorflow as tf\n'), ((1565, 1618), 'tensorflow.keras.Model', 'tf.keras.Model', (['inputs', 'outputs'], {'name': '"""discriminator"""'}), "(inputs, outputs, name='discriminator')\n", (1579, 1618), True, 'import tensorflow as tf\n'), ((2320, 2339), 'tensorflow.stack', 'tf.stack', (['z_inclass'], {}), '(z_inclass)\n', (2328, 2339), True, 'import tensorflow as tf\n'), ((2361, 2381), 'tensorflow.stack', 'tf.stack', (['z_outclass'], {}), '(z_outclass)\n', (2369, 2381), True, 'import tensorflow as tf\n'), ((2628, 2659), 'numpy.argmax', 'np.argmax', (['self.y_train'], {'axis': '(1)'}), '(self.y_train, axis=1)\n', (2637, 2659), True, 'import numpy as np\n'), ((2713, 2735), 'numpy.unique', 'np.unique', (['class_num_y'], {}), '(class_num_y)\n', (2722, 2735), True, 'import numpy as np\n'), ((3317, 3343), 'numpy.mean', 'np.mean', (['inclass_z'], {'axis': '(0)'}), '(inclass_z, axis=0)\n', (3324, 3343), True, 'import numpy as np\n'), ((3463, 3483), 'numpy.argsort', 'np.argsort', (['distanse'], {}), '(distanse)\n', (3473, 3483), True, 'import numpy as np\n'), ((3664, 3682), 'numpy.argsort', 'np.argsort', (['z_dist'], {}), '(z_dist)\n', (3674, 3682), True, 'import numpy as np\n'), ((4226, 4258), 'numpy.random.shuffle', 'np.random.shuffle', (['shuffle_index'], {}), '(shuffle_index)\n', (4243, 4258), True, 'import numpy as np\n'), ((4426, 4439), 'tensorflow.unstack', 'tf.unstack', (['z'], {}), '(z)\n', (4436, 4439), True, 'import tensorflow as tf\n'), ((5196, 5211), 'tensorflow.stack', 'tf.stack', (['z_hat'], {}), '(z_hat)\n', (5204, 5211), True, 'import tensorflow as tf\n'), ((5826, 5865), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(None, input_dim)'}), '(shape=(None, input_dim))\n', (5840, 5865), True, 'import tensorflow as tf\n'), ((5936, 6001), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'inputs', 'outputs': 'outputs', 'name': '"""classifier"""'}), "(inputs=inputs, outputs=outputs, name='classifier')\n", (5950, 6001), True, 'import tensorflow as tf\n'), ((10550, 10581), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': 'input_dim'}), '(shape=input_dim)\n', (10564, 10581), True, 'import tensorflow as tf\n'), ((11286, 11333), 'tensorflow.keras.Model', 'tf.keras.Model', (['inputs', 'outputs'], {'name': '"""cifar10"""'}), "(inputs, outputs, name='cifar10')\n", (11300, 11333), True, 'import tensorflow as tf\n'), ((11606, 11637), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': 'input_dim'}), '(shape=input_dim)\n', (11620, 11637), True, 'import tensorflow as tf\n'), ((11707, 11772), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'inputs', 'outputs': 'outputs', 'name': '"""classifier"""'}), "(inputs=inputs, outputs=outputs, name='classifier')\n", (11721, 11772), True, 'import tensorflow as tf\n'), ((441, 474), 'tensorflow.keras.layers.LSTM', 'tf.keras.layers.LSTM', (['hidden_size'], {}), '(hidden_size)\n', (461, 474), True, 'import tensorflow as tf\n'), ((687, 732), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (708, 732), True, 'import tensorflow as tf\n'), ((859, 887), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.2)'], {}), '(0.2)\n', (882, 887), True, 'import tensorflow as tf\n'), ((903, 948), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (924, 948), True, 'import tensorflow as tf\n'), ((970, 1025), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['output_dim'], {'activation': '"""softmax"""'}), "(output_dim, activation='softmax')\n", (991, 1025), True, 'import tensorflow as tf\n'), ((1213, 1258), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (1234, 1258), True, 'import tensorflow as tf\n'), ((1500, 1546), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (1521, 1546), True, 'import tensorflow as tf\n'), ((10596, 10665), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(64, (3, 3), activation='relu', padding='same')\n", (10618, 10665), True, 'import tensorflow as tf\n'), ((10686, 10732), 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (10714, 10732), True, 'import tensorflow as tf\n'), ((10748, 10818), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(128)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(128, (3, 3), activation='relu', padding='same')\n", (10770, 10818), True, 'import tensorflow as tf\n'), ((10834, 10880), 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (10862, 10880), True, 'import tensorflow as tf\n'), ((10896, 10966), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(256)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(256, (3, 3), activation='relu', padding='same')\n", (10918, 10966), True, 'import tensorflow as tf\n'), ((11040, 11065), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (11063, 11065), True, 'import tensorflow as tf\n'), ((11085, 11130), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (11106, 11130), True, 'import tensorflow as tf\n'), ((11146, 11190), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(64)'], {'activation': '"""relu"""'}), "(64, activation='relu')\n", (11167, 11190), True, 'import tensorflow as tf\n'), ((11212, 11267), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['hidden_size'], {'activation': '"""linear"""'}), "(hidden_size, activation='linear')\n", (11233, 11267), True, 'import tensorflow as tf\n'), ((363, 419), 'tensorflow.keras.layers.LSTM', 'tf.keras.layers.LSTM', (['hidden_size'], {'return_sequences': '(True)'}), '(hidden_size, return_sequences=True)\n', (383, 419), True, 'import tensorflow as tf\n'), ((1777, 1789), 'tensorflow.keras.backend.sum', 'K.sum', (['(a * b)'], {}), '(a * b)\n', (1782, 1789), True, 'import tensorflow.keras.backend as K\n'), ((2787, 2813), 'numpy.where', 'np.where', (['(class_num_y == i)'], {}), '(class_num_y == i)\n', (2795, 2813), True, 'import numpy as np\n'), ((3402, 3431), 'numpy.square', 'np.square', (['(center - inclass_z)'], {}), '(center - inclass_z)\n', (3411, 3431), True, 'import numpy as np\n'), ((3612, 3632), 'numpy.square', 'np.square', (['(z - top_z)'], {}), '(z - top_z)\n', (3621, 3632), True, 'import numpy as np\n'), ((4013, 4048), 'numpy.random.normal', 'np.random.normal', (['(0)', 'sigma', 'z.shape'], {}), '(0, sigma, z.shape)\n', (4029, 4048), True, 'import numpy as np\n'), ((4523, 4539), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4537, 4539), True, 'import numpy as np\n'), ((6766, 6783), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (6781, 6783), True, 'import tensorflow as tf\n'), ((7572, 7589), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (7587, 7589), True, 'import tensorflow as tf\n'), ((7777, 7840), 'tensorflow.random.normal', 'tf.random.normal', (['z.shape'], {'mean': '(0.0)', 'stddev': '(1)', 'dtype': 'tf.float32'}), '(z.shape, mean=0.0, stddev=1, dtype=tf.float32)\n', (7793, 7840), True, 'import tensorflow as tf\n'), ((9495, 9527), 'tensorflow.cast', 'tf.cast', (['triplet_loss', '"""float32"""'], {}), "(triplet_loss, 'float32')\n", (9502, 9527), True, 'import tensorflow as tf\n'), ((9692, 9709), 'numpy.std', 'np.std', (['z'], {'axis': '(0)'}), '(z, axis=0)\n', (9698, 9709), True, 'import numpy as np\n'), ((4671, 4686), 'numpy.argmax', 'np.argmax', (['y[i]'], {}), '(y[i])\n', (4680, 4686), True, 'import numpy as np\n'), ((5395, 5421), 'tensorflow.keras.backend.sum', 'K.sum', (['(y * classes)'], {'axis': '(1)'}), '(y * classes, axis=1)\n', (5400, 5421), True, 'import tensorflow.keras.backend as K\n'), ((9123, 9143), 'numpy.argmax', 'np.argmax', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (9132, 9143), True, 'import numpy as np\n'), ((1806, 1817), 'tensorflow.keras.backend.square', 'K.square', (['a'], {}), '(a)\n', (1814, 1817), True, 'import tensorflow.keras.backend as K\n'), ((1835, 1846), 'tensorflow.keras.backend.square', 'K.square', (['b'], {}), '(b)\n', (1843, 1846), True, 'import tensorflow.keras.backend as K\n'), ((4799, 4814), 'numpy.argmax', 'np.argmax', (['y[i]'], {}), '(y[i])\n', (4808, 4814), True, 'import numpy as np\n'), ((6937, 6957), 'numpy.argmax', 'np.argmax', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (6946, 6957), True, 'import numpy as np\n'), ((7982, 8004), 'tensorflow.keras.backend.log', 'K.log', (['(noise_d + 1e-08)'], {}), '(noise_d + 1e-08)\n', (7987, 8004), True, 'import tensorflow.keras.backend as K\n'), ((8006, 8028), 'tensorflow.keras.backend.log', 'K.log', (['(1 - z_d + 1e-08)'], {}), '(1 - z_d + 1e-08)\n', (8011, 8028), True, 'import tensorflow.keras.backend as K\n'), ((2042, 2062), 'numpy.argmax', 'np.argmax', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (2051, 2062), True, 'import numpy as np\n'), ((2066, 2081), 'numpy.argmax', 'np.argmax', (['y[i]'], {}), '(y[i])\n', (2075, 2081), True, 'import numpy as np\n'), ((2194, 2214), 'numpy.argmax', 'np.argmax', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (2203, 2214), True, 'import numpy as np\n'), ((2218, 2233), 'numpy.argmax', 'np.argmax', (['y[i]'], {}), '(y[i])\n', (2227, 2233), True, 'import numpy as np\n'), ((4928, 4943), 'numpy.argmax', 'np.argmax', (['y[i]'], {}), '(y[i])\n', (4937, 4943), True, 'import numpy as np\n'), ((5052, 5067), 'numpy.argmax', 'np.argmax', (['y[i]'], {}), '(y[i])\n', (5061, 5067), True, 'import numpy as np\n')] |
import logging
import numpy as np
from mlagents.trainers import UnityException
from mlagents.trainers.models import LearningModel
logger = logging.getLogger("mlagents.trainers")
class UnityPolicyException(UnityException):
"""
Related to errors with the Trainer.
"""
pass
class Policy(object):
"""
Contains a learning model, and the necessary
functions to interact with it to perform evaluate and updating.
"""
def __init__(self, seed, brain, trainer_parameters, sess):
"""
Initialized the policy.
:param seed: Random seed to use for TensorFlow.
:param brain: The corresponding Brain for this policy.
:param trainer_parameters: The trainer parameters.
:param sess: The current TensorFlow session.
"""
self.m_size = None
self.model = None
self.inference_dict = {}
self.update_dict = {}
self.sequence_length = 1
self.seed = seed
self.brain = brain
self.variable_scope = trainer_parameters['graph_scope']
self.use_recurrent = trainer_parameters["use_recurrent"]
self.use_continuous_act = (brain.vector_action_space_type == "continuous")
self.sess = sess
if self.use_recurrent:
self.m_size = trainer_parameters["memory_size"]
self.sequence_length = trainer_parameters["sequence_length"]
if self.m_size == 0:
raise UnityPolicyException("The memory size for brain {0} is 0 even "
"though the trainer uses recurrent."
.format(brain.brain_name))
elif self.m_size % 4 != 0:
raise UnityPolicyException("The memory size for brain {0} is {1} "
"but it must be divisible by 4."
.format(brain.brain_name, self.m_size))
def evaluate(self, brain_info):
"""
Evaluates policy for the agent experiences provided.
:param brain_info: BrainInfo input to network.
:return: Output from policy based on self.inference_dict.
"""
raise UnityPolicyException("The evaluate function was not implemented.")
def update(self, mini_batch, num_sequences):
"""
Performs update of the policy.
:param num_sequences: Number of experience trajectories in batch.
:param mini_batch: Batch of experiences.
:return: Results of update.
"""
raise UnityPolicyException("The update function was not implemented.")
def _execute_model(self, feed_dict, out_dict):
"""
Executes model.
:param feed_dict: Input dictionary mapping nodes to input data.
:param out_dict: Output dictionary mapping names to nodes.
:return: Dictionary mapping names to input data.
"""
network_out = self.sess.run(list(out_dict.values()), feed_dict=feed_dict)
run_out = dict(zip(list(out_dict.keys()), network_out))
return run_out
def _fill_eval_dict(self, feed_dict, brain_info):
for i, _ in enumerate(brain_info.visual_observations):
feed_dict[self.model.visual_in[i]] = brain_info.visual_observations[i]
if self.use_vec_obs:
feed_dict[self.model.vector_in] = brain_info.vector_observations
if not self.use_continuous_act:
feed_dict[self.model.action_masks] = brain_info.action_masks
return feed_dict
def make_empty_memory(self, num_agents):
"""
Creates empty memory for use with RNNs
:param num_agents: Number of agents.
:return: Numpy array of zeros.
"""
return np.zeros((num_agents, self.m_size))
@property
def graph_scope(self):
"""
Returns the graph scope of the trainer.
"""
return self.variable_scope
def get_current_step(self):
"""
Gets current model step.
:return: current model step.
"""
step = self.sess.run(self.model.global_step)
return step
def increment_step(self):
"""
Increments model step.
"""
self.sess.run(self.model.increment_step)
def get_inference_vars(self):
"""
:return:list of inference var names
"""
return list(self.inference_dict.keys())
def get_update_vars(self):
"""
:return:list of update var names
"""
return list(self.update_dict.keys())
@property
def vis_obs_size(self):
return self.model.vis_obs_size
@property
def vec_obs_size(self):
return self.model.vec_obs_size
@property
def use_vis_obs(self):
return self.model.vis_obs_size > 0
@property
def use_vec_obs(self):
return self.model.vec_obs_size > 0
| [
"logging.getLogger",
"numpy.zeros"
] | [((141, 179), 'logging.getLogger', 'logging.getLogger', (['"""mlagents.trainers"""'], {}), "('mlagents.trainers')\n", (158, 179), False, 'import logging\n'), ((3752, 3787), 'numpy.zeros', 'np.zeros', (['(num_agents, self.m_size)'], {}), '((num_agents, self.m_size))\n', (3760, 3787), True, 'import numpy as np\n')] |
"""Define the ImplicitFuncComp class."""
from itertools import chain
import numpy as np
from openmdao.core.implicitcomponent import ImplicitComponent
from openmdao.core.constants import INT_DTYPE
import openmdao.func_api as omf
from openmdao.components.func_comp_common import _check_var_name, _copy_with_ignore, _add_options, \
jac_forward, jac_reverse, _get_tangents
try:
import jax
from jax import jit, jacfwd, jacrev
from jax.config import config
config.update("jax_enable_x64", True) # jax by default uses 32 bit floats
except ImportError:
jax = None
class ImplicitFuncComp(ImplicitComponent):
"""
An implicit component that wraps a python function.
Parameters
----------
apply_nonlinear : function
The function to be wrapped by this Component.
solve_nonlinear : function or None
Optional function to perform a nonlinear solve.
linearize : function or None
Optional function to compute partial derivatives.
solve_linear : function or None
Optional function to perform a linear solve.
**kwargs : named args
Args passed down to ImplicitComponent.
Attributes
----------
_apply_nonlinear_func : callable
The function wrapper used by this component.
_apply_nonlinear_func_jax : callable
Function decorated to ensure use of jax numpy.
_solve_nonlinear_func : function or None
Optional function to do a nonlinear solve.
solve_nonlinear : method
Local override of _solve_nonlinear method.
_solve_linear_func : function or None
Optional function to do a linear solve.
solve_linear : method
Local override of solve_linear method.
_linearize_func : function or None
Optional function to compute partial derivatives.
linearize : method
Local override of linearize method.
_linearize_info : object
Some state information to compute in _linearize_func and pass to _solve_linear_func
_tangents : tuple
Tuple of parts of the tangent matrix cached for jax derivative computation.
_jac2func_inds : ndarray
Translation array from jacobian indices to function array indices.
"""
def __init__(self, apply_nonlinear, solve_nonlinear=None, linearize=None, solve_linear=None,
**kwargs):
"""
Initialize attributes.
"""
super().__init__(**kwargs)
self._apply_nonlinear_func = omf.wrap(apply_nonlinear)
self._solve_nonlinear_func = solve_nonlinear
self._solve_linear_func = solve_linear
self._linearize_func = linearize
self._linearize_info = None
self._tangents = None
self._jac2func_inds = None
if solve_nonlinear:
self.solve_nonlinear = self._user_solve_nonlinear
if linearize:
self.linearize = self._user_linearize
if solve_linear:
self.solve_linear = self._user_solve_linear
if self._apply_nonlinear_func._use_jax:
self.options['use_jax'] = True
# setup requires an undecorated, unjitted function, so do it now
if self._apply_nonlinear_func._call_setup:
self._apply_nonlinear_func._setup()
if self.options['use_jax']:
if jax is None:
raise RuntimeError(f"{self.msginfo}: jax is not installed. Try 'pip install jax'.")
self._apply_nonlinear_func_jax = omf.jax_decorate(self._apply_nonlinear_func._f)
if self.options['use_jax'] and self.options['use_jit']:
static_argnums = [i for i, m in enumerate(self._apply_nonlinear_func._inputs.values())
if 'is_option' in m]
try:
with omf.jax_context(self._apply_nonlinear_func._f.__globals__):
self._apply_nonlinear_func_jax = jit(self._apply_nonlinear_func_jax,
static_argnums=static_argnums)
except Exception as err:
raise RuntimeError(f"{self.msginfo}: failed jit compile of solve_nonlinear "
f"function: {err}")
def _declare_options(self):
"""
Declare options before kwargs are processed in the init method.
"""
super()._declare_options()
_add_options(self)
def setup(self):
"""
Define our inputs and outputs.
"""
optignore = {'is_option'}
for name, meta in self._apply_nonlinear_func.get_input_meta():
_check_var_name(self, name)
if 'is_option' in meta and meta['is_option']:
kwargs = _copy_with_ignore(meta, omf._allowed_declare_options_args,
ignore=optignore)
self.options.declare(name, **kwargs)
else:
kwargs = omf._filter_dict(meta, omf._allowed_add_input_args)
self.add_input(name, **kwargs)
for i, (name, meta) in enumerate(self._apply_nonlinear_func.get_output_meta()):
_check_var_name(self, name)
kwargs = _copy_with_ignore(meta, omf._allowed_add_output_args, ignore=('resid',))
self.add_output(name, **kwargs)
def declare_partials(self, *args, **kwargs):
"""
Declare information about this component's subjacobians.
Parameters
----------
*args : list
Positional args to be passed to base class version of declare_partials.
**kwargs : dict
Keyword args to be passed to base class version of declare_partials.
Returns
-------
dict
Metadata dict for the specified partial(s).
"""
if self._linearize_func is None and ('method' not in kwargs or
kwargs['method'] == 'exact'):
raise RuntimeError(f"{self.msginfo}: declare_partials must be called with method equal "
"to 'cs', 'fd', or 'jax'.")
return super().declare_partials(*args, **kwargs)
def _setup_partials(self):
"""
Check that all partials are declared.
"""
kwargs = self._apply_nonlinear_func.get_declare_coloring()
if kwargs is not None:
self.declare_coloring(**kwargs)
for kwargs in self._apply_nonlinear_func.get_declare_partials():
self.declare_partials(**kwargs)
super()._setup_partials()
def apply_nonlinear(self, inputs, outputs, residuals,
discrete_inputs=None, discrete_outputs=None):
"""
R = Ax - b.
Parameters
----------
inputs : Vector
Unscaled, dimensional input variables read via inputs[key].
outputs : Vector
Unscaled, dimensional output variables read via outputs[key].
residuals : Vector
Unscaled, dimensional residuals written to via residuals[key].
discrete_inputs : _DictValues or None
Dict-like object containing discrete inputs.
discrete_outputs : _DictValues or None
Dict-like object containing discrete outputs.
"""
residuals.set_vals(self._apply_nonlinear_func(*self._ordered_func_invals(inputs, outputs)))
def _user_solve_nonlinear(self, inputs, outputs):
"""
Compute outputs. The model is assumed to be in a scaled state.
"""
self._outputs.set_vals(self._solve_nonlinear_func(*self._ordered_func_invals(inputs,
outputs)))
def _linearize(self, jac=None, sub_do_ln=False):
"""
Compute jacobian / factorization. The model is assumed to be in a scaled state.
Parameters
----------
jac : Jacobian or None
Ignored.
sub_do_ln : bool
Flag indicating if the children should call linearize on their linear solvers.
"""
if self.options['use_jax']:
self._check_first_linearize()
self._jax_linearize()
if (jac is None or jac is self._assembled_jac) and self._assembled_jac is not None:
self._assembled_jac._update(self)
else:
super()._linearize(jac, sub_do_ln)
def _jax_linearize(self):
"""
Compute the jacobian using jax.
This updates self._jacobian.
"""
func = self._apply_nonlinear_func
# argnums specifies which position args are to be differentiated
inames = list(func.get_input_names())
argnums = aa = [i for i, m in enumerate(func._inputs.values()) if 'is_option' not in m]
if len(argnums) == len(inames):
argnums = None # speedup if there are no static args
osize = len(self._outputs)
isize = len(self._inputs) + osize
invals = list(self._ordered_func_invals(self._inputs, self._outputs))
coloring = self._coloring_info['coloring']
if self._mode == 'rev': # use reverse mode to compute derivs
outvals = tuple(self._outputs.values())
tangents = self._get_tangents(outvals, 'rev', coloring)
if coloring is not None:
j = [np.asarray(a).reshape((a.shape[0], np.prod(a.shape[1:], dtype=INT_DTYPE)))
for a in jac_reverse(self._apply_nonlinear_func_jax, argnums,
tangents)(*invals)]
j = coloring.expand_jac(np.hstack(self._reorder_col_chunks(j)), 'rev')
else:
j = []
for a in jac_reverse(self._apply_nonlinear_func_jax, argnums, tangents)(*invals):
a = np.asarray(a)
if a.ndim < 2:
a = a.reshape((a.size, 1))
else:
a = a.reshape((a.shape[0], np.prod(a.shape[1:], dtype=INT_DTYPE)))
j.append(a)
j = np.hstack(self._reorder_col_chunks(j)).reshape((osize, isize))
else:
if coloring is not None:
tangents = self._get_tangents(invals, 'fwd', coloring, argnums,
trans=self._get_jac2func_inds(self._inputs,
self._outputs))
j = [np.asarray(a).reshape((np.prod(a.shape[:-1], dtype=INT_DTYPE), a.shape[-1]))
for a in jac_forward(self._apply_nonlinear_func_jax, argnums,
tangents)(*invals)]
j = coloring.expand_jac(np.vstack(j), 'fwd')
else:
tangents = self._get_tangents(invals, 'fwd', coloring, argnums)
j = []
for a in jac_forward(self._apply_nonlinear_func_jax, argnums, tangents)(*invals):
a = np.asarray(a)
if a.ndim < 2:
a = a.reshape((1, a.size))
else:
a = a.reshape((np.prod(a.shape[:-1], dtype=INT_DTYPE), a.shape[-1]))
j.append(a)
j = self._reorder_cols(np.vstack(j).reshape((osize, isize)))
self._jacobian.set_dense_jac(self, j)
def _user_linearize(self, inputs, outputs, jacobian):
"""
Calculate the partials of the residual for each balance.
Parameters
----------
inputs : Vector
Unscaled, dimensional input variables read via inputs[key].
outputs : Vector
Unscaled, dimensional output variables read via outputs[key].
jacobian : Jacobian
Sub-jac components written to jacobian[output_name, input_name].
"""
self._linearize_info = self._linearize_func(*chain(self._ordered_func_invals(inputs,
outputs),
(jacobian,)))
def _user_solve_linear(self, d_outputs, d_residuals, mode):
r"""
Run solve_linear function if there is one.
Parameters
----------
d_outputs : Vector
Unscaled, dimensional quantities read via d_outputs[key].
d_residuals : Vector
Unscaled, dimensional quantities read via d_residuals[key].
mode : str
Derivative solutiion direction, either 'fwd' or 'rev'.
"""
if mode == 'fwd':
d_outputs.set_vals(self._solve_linear_func(*chain(d_residuals.values(),
(mode, self._linearize_info))))
else: # rev
d_residuals.set_vals(self._solve_linear_func(*chain(d_outputs.values(),
(mode, self._linearize_info))))
def _ordered_func_invals(self, inputs, outputs):
"""
Yield function input args in their proper order.
In OpenMDAO, states are outputs, but for our some of our functions they are inputs, so
this function yields the values of the inputs and states in the same order that they
were originally given for the _apply_nonlinear_func.
Parameters
----------
inputs : Vector
The input vector.
outputs : Vector
The output vector (contains the states).
Yields
------
float or ndarray
Value of input or state variable.
"""
inps = inputs.values()
outs = outputs.values()
for name, meta in self._apply_nonlinear_func._inputs.items():
if 'is_option' in meta: # it's an option
yield self.options[name]
elif 'resid' in meta: # it's a state
yield next(outs)
else:
yield next(inps)
def _get_jac2func_inds(self, inputs, outputs):
"""
Return a translation array from jac column indices into function input ordering.
Parameters
----------
inputs : Vector
The input vector.
outputs : Vector
The output vector (contains the states).
Returns
-------
ndarray
Index translation array
"""
if self._jac2func_inds is None:
inds = np.arange(len(outputs) + len(inputs), dtype=INT_DTYPE)
indict = {}
start = end = 0
for n, meta in self._apply_nonlinear_func._inputs.items():
if 'is_option' not in meta:
end += np.prod(meta['shape'], dtype=INT_DTYPE)
indict[n] = inds[start:end]
start = end
inds = [indict[n] for n in chain(outputs, inputs)]
self._jac2func_inds = np.concatenate(inds)
return self._jac2func_inds
def _reorder_col_chunks(self, col_chunks):
"""
Return jacobian column chunks in correct OpenMDAO order (outputs first, then inputs).
This is needed in rev mode because the return values of the jacrev function are ordered
based on the order of the function inputs, which may be different than OpenMDAO's
required order.
Parameters
----------
col_chunks : list of ndarray
List of column chunks to be reordered
Returns
-------
list
Chunks in OpenMDAO jacobian order.
"""
inps = []
ordered_chunks = []
chunk_iter = iter(col_chunks)
for meta in self._apply_nonlinear_func._inputs.values():
if 'is_option' in meta: # it's an option
pass # skip it (don't include in jacobian)
elif 'resid' in meta: # it's a state
ordered_chunks.append(next(chunk_iter))
else:
inps.append(next(chunk_iter))
return ordered_chunks + inps
def _reorder_cols(self, arr, coloring=None):
"""
Reorder the columns of jacobian row chunks in fwd mode.
Parameters
----------
arr : ndarray
Jacobian or compressed jacobian.
coloring : Coloring or None
Coloring object.
Returns
-------
ndarray
Reordered array.
"""
if coloring is None:
trans = self._get_jac2func_inds(self._inputs, self._outputs)
return arr[:, trans]
else:
trans = self._get_jac2func_inds(self._inputs, self._outputs)
J = np.zeros(coloring._shape)
for col, nzpart, icol in coloring.colored_jac_iter(arr, 'fwd', trans):
J[nzpart, icol] = col
return J
def _get_tangents(self, vals, direction, coloring=None, argnums=None, trans=None):
"""
Return a tuple of tangents values for use with vmap.
Parameters
----------
vals : list
List of function input values.
direction : str
Derivative computation direction ('fwd' or 'rev').
coloring : Coloring or None
If not None, the Coloring object used to compute a compressed tangent array.
argnums : list of int or None
Indices of dynamic (differentiable) function args.
trans : ndarray
Translation array from jacobian indices into function arg indices. This is needed
because OpenMDAO expects ordering to be outputs first, then inputs, but function args
could be in any order.
Returns
-------
tuple of ndarray or ndarray
The tangents values to be passed to vmap.
"""
if self._tangents is None:
self._tangents = _get_tangents(vals, direction, coloring, argnums, trans)
return self._tangents
def _compute_coloring(self, recurse=False, **overrides):
"""
Compute a coloring of the partial jacobian.
This assumes that the current System is in a proper state for computing derivatives.
It just calls the base class version and then resets the tangents so that after coloring
a new set of compressed tangents values can be computed.
Parameters
----------
recurse : bool
If True, recurse from this system down the system hierarchy. Whenever a group
is encountered that has specified its coloring metadata, we don't recurse below
that group unless that group has a subsystem that has a nonlinear solver that uses
gradients.
**overrides : dict
Any args that will override either default coloring settings or coloring settings
resulting from an earlier call to declare_coloring.
Returns
-------
list of Coloring
The computed colorings.
"""
ret = super()._compute_coloring(recurse, **overrides)
self._tangents = None # reset to compute new colored tangents later
return ret
| [
"itertools.chain",
"numpy.prod",
"openmdao.components.func_comp_common._get_tangents",
"openmdao.components.func_comp_common.jac_forward",
"openmdao.func_api._filter_dict",
"openmdao.components.func_comp_common._copy_with_ignore",
"numpy.asarray",
"openmdao.func_api.wrap",
"openmdao.func_api.jax_con... | [((473, 510), 'jax.config.config.update', 'config.update', (['"""jax_enable_x64"""', '(True)'], {}), "('jax_enable_x64', True)\n", (486, 510), False, 'from jax.config import config\n'), ((2468, 2493), 'openmdao.func_api.wrap', 'omf.wrap', (['apply_nonlinear'], {}), '(apply_nonlinear)\n', (2476, 2493), True, 'import openmdao.func_api as omf\n'), ((4350, 4368), 'openmdao.components.func_comp_common._add_options', '_add_options', (['self'], {}), '(self)\n', (4362, 4368), False, 'from openmdao.components.func_comp_common import _check_var_name, _copy_with_ignore, _add_options, jac_forward, jac_reverse, _get_tangents\n'), ((3455, 3502), 'openmdao.func_api.jax_decorate', 'omf.jax_decorate', (['self._apply_nonlinear_func._f'], {}), '(self._apply_nonlinear_func._f)\n', (3471, 3502), True, 'import openmdao.func_api as omf\n'), ((4572, 4599), 'openmdao.components.func_comp_common._check_var_name', '_check_var_name', (['self', 'name'], {}), '(self, name)\n', (4587, 4599), False, 'from openmdao.components.func_comp_common import _check_var_name, _copy_with_ignore, _add_options, jac_forward, jac_reverse, _get_tangents\n'), ((5099, 5126), 'openmdao.components.func_comp_common._check_var_name', '_check_var_name', (['self', 'name'], {}), '(self, name)\n', (5114, 5126), False, 'from openmdao.components.func_comp_common import _check_var_name, _copy_with_ignore, _add_options, jac_forward, jac_reverse, _get_tangents\n'), ((5148, 5220), 'openmdao.components.func_comp_common._copy_with_ignore', '_copy_with_ignore', (['meta', 'omf._allowed_add_output_args'], {'ignore': "('resid',)"}), "(meta, omf._allowed_add_output_args, ignore=('resid',))\n", (5165, 5220), False, 'from openmdao.components.func_comp_common import _check_var_name, _copy_with_ignore, _add_options, jac_forward, jac_reverse, _get_tangents\n'), ((14938, 14958), 'numpy.concatenate', 'np.concatenate', (['inds'], {}), '(inds)\n', (14952, 14958), True, 'import numpy as np\n'), ((16686, 16711), 'numpy.zeros', 'np.zeros', (['coloring._shape'], {}), '(coloring._shape)\n', (16694, 16711), True, 'import numpy as np\n'), ((17881, 17937), 'openmdao.components.func_comp_common._get_tangents', '_get_tangents', (['vals', 'direction', 'coloring', 'argnums', 'trans'], {}), '(vals, direction, coloring, argnums, trans)\n', (17894, 17937), False, 'from openmdao.components.func_comp_common import _check_var_name, _copy_with_ignore, _add_options, jac_forward, jac_reverse, _get_tangents\n'), ((4683, 4759), 'openmdao.components.func_comp_common._copy_with_ignore', '_copy_with_ignore', (['meta', 'omf._allowed_declare_options_args'], {'ignore': 'optignore'}), '(meta, omf._allowed_declare_options_args, ignore=optignore)\n', (4700, 4759), False, 'from openmdao.components.func_comp_common import _check_var_name, _copy_with_ignore, _add_options, jac_forward, jac_reverse, _get_tangents\n'), ((4899, 4950), 'openmdao.func_api._filter_dict', 'omf._filter_dict', (['meta', 'omf._allowed_add_input_args'], {}), '(meta, omf._allowed_add_input_args)\n', (4915, 4950), True, 'import openmdao.func_api as omf\n'), ((3756, 3814), 'openmdao.func_api.jax_context', 'omf.jax_context', (['self._apply_nonlinear_func._f.__globals__'], {}), '(self._apply_nonlinear_func._f.__globals__)\n', (3771, 3814), True, 'import openmdao.func_api as omf\n'), ((3869, 3935), 'jax.jit', 'jit', (['self._apply_nonlinear_func_jax'], {'static_argnums': 'static_argnums'}), '(self._apply_nonlinear_func_jax, static_argnums=static_argnums)\n', (3872, 3935), False, 'from jax import jit, jacfwd, jacrev\n'), ((9689, 9751), 'openmdao.components.func_comp_common.jac_reverse', 'jac_reverse', (['self._apply_nonlinear_func_jax', 'argnums', 'tangents'], {}), '(self._apply_nonlinear_func_jax, argnums, tangents)\n', (9700, 9751), False, 'from openmdao.components.func_comp_common import _check_var_name, _copy_with_ignore, _add_options, jac_forward, jac_reverse, _get_tangents\n'), ((9786, 9799), 'numpy.asarray', 'np.asarray', (['a'], {}), '(a)\n', (9796, 9799), True, 'import numpy as np\n'), ((10714, 10726), 'numpy.vstack', 'np.vstack', (['j'], {}), '(j)\n', (10723, 10726), True, 'import numpy as np\n'), ((10881, 10943), 'openmdao.components.func_comp_common.jac_forward', 'jac_forward', (['self._apply_nonlinear_func_jax', 'argnums', 'tangents'], {}), '(self._apply_nonlinear_func_jax, argnums, tangents)\n', (10892, 10943), False, 'from openmdao.components.func_comp_common import _check_var_name, _copy_with_ignore, _add_options, jac_forward, jac_reverse, _get_tangents\n'), ((10978, 10991), 'numpy.asarray', 'np.asarray', (['a'], {}), '(a)\n', (10988, 10991), True, 'import numpy as np\n'), ((14720, 14759), 'numpy.prod', 'np.prod', (["meta['shape']"], {'dtype': 'INT_DTYPE'}), "(meta['shape'], dtype=INT_DTYPE)\n", (14727, 14759), True, 'import numpy as np\n'), ((14880, 14902), 'itertools.chain', 'chain', (['outputs', 'inputs'], {}), '(outputs, inputs)\n', (14885, 14902), False, 'from itertools import chain\n'), ((9316, 9329), 'numpy.asarray', 'np.asarray', (['a'], {}), '(a)\n', (9326, 9329), True, 'import numpy as np\n'), ((9351, 9388), 'numpy.prod', 'np.prod', (['a.shape[1:]'], {'dtype': 'INT_DTYPE'}), '(a.shape[1:], dtype=INT_DTYPE)\n', (9358, 9388), True, 'import numpy as np\n'), ((9421, 9483), 'openmdao.components.func_comp_common.jac_reverse', 'jac_reverse', (['self._apply_nonlinear_func_jax', 'argnums', 'tangents'], {}), '(self._apply_nonlinear_func_jax, argnums, tangents)\n', (9432, 9483), False, 'from openmdao.components.func_comp_common import _check_var_name, _copy_with_ignore, _add_options, jac_forward, jac_reverse, _get_tangents\n'), ((10452, 10465), 'numpy.asarray', 'np.asarray', (['a'], {}), '(a)\n', (10462, 10465), True, 'import numpy as np\n'), ((10475, 10513), 'numpy.prod', 'np.prod', (['a.shape[:-1]'], {'dtype': 'INT_DTYPE'}), '(a.shape[:-1], dtype=INT_DTYPE)\n', (10482, 10513), True, 'import numpy as np\n'), ((10559, 10621), 'openmdao.components.func_comp_common.jac_forward', 'jac_forward', (['self._apply_nonlinear_func_jax', 'argnums', 'tangents'], {}), '(self._apply_nonlinear_func_jax, argnums, tangents)\n', (10570, 10621), False, 'from openmdao.components.func_comp_common import _check_var_name, _copy_with_ignore, _add_options, jac_forward, jac_reverse, _get_tangents\n'), ((11268, 11280), 'numpy.vstack', 'np.vstack', (['j'], {}), '(j)\n', (11277, 11280), True, 'import numpy as np\n'), ((9963, 10000), 'numpy.prod', 'np.prod', (['a.shape[1:]'], {'dtype': 'INT_DTYPE'}), '(a.shape[1:], dtype=INT_DTYPE)\n', (9970, 10000), True, 'import numpy as np\n'), ((11143, 11181), 'numpy.prod', 'np.prod', (['a.shape[:-1]'], {'dtype': 'INT_DTYPE'}), '(a.shape[:-1], dtype=INT_DTYPE)\n', (11150, 11181), True, 'import numpy as np\n')] |
import logging
import math
import os
from abc import abstractmethod, ABC
from typing import Sequence, Tuple, List, Union
import numpy as np
import pandas as pd
from .util import cache
from .util.cache import DelayedUpdateHook
from .util.string import objectRepr
from .util.typing import PandasNamedTuple
log = logging.getLogger(__name__)
class DistanceMetric(ABC):
"""
Abstract base class for (symmetric) distance metrics
"""
@abstractmethod
def distance(self, namedTupleA: PandasNamedTuple, namedTupleB: PandasNamedTuple) -> float:
pass
@abstractmethod
def __str__(self):
super().__str__()
class SingleColumnDistanceMetric(DistanceMetric, ABC):
def __init__(self, column: str):
self.column = column
@abstractmethod
def _distance(self, valueA, valueB) -> float:
pass
def distance(self, namedTupleA: PandasNamedTuple, namedTupleB: PandasNamedTuple):
valueA, valueB = getattr(namedTupleA, self.column), getattr(namedTupleB, self.column)
return self._distance(valueA, valueB)
class DistanceMatrixDFCache(cache.PersistentKeyValueCache):
def __init__(self, picklePath, saveOnUpdate=True, deferredSaveDelaySecs=1.0):
self.deferredSaveDelaySecs = deferredSaveDelaySecs
self.saveOnUpdate = saveOnUpdate
self.picklePath = picklePath
if os.path.exists(self.picklePath):
self.distanceDf = pd.read_pickle(self.picklePath)
log.info(f"Successfully loaded dataframe of shape {self.shape()} from cache. "
f"There are {self.numUnfilledEntries()} unfilled entries")
else:
log.info(f"No cached distance dataframe found in {picklePath}")
self.distanceDf = pd.DataFrame()
self.cachedIdToPosDict = {identifier: pos for pos, identifier in enumerate(self.distanceDf.index)}
self._updateHook = DelayedUpdateHook(self.save, deferredSaveDelaySecs)
def shape(self):
nEntries = len(self.distanceDf)
return nEntries, nEntries
@staticmethod
def _assertTuple(key):
assert isinstance(key, tuple) and len(key) == 2, f"Expected a tuple of two identifiers, instead got {key}"
def set(self, key: Tuple[Union[str, int], Union[str, int]], value):
self._assertTuple(key)
for identifier in key:
if identifier not in self.distanceDf.columns:
log.info(f"Adding new column and row for identifier {identifier}")
self.distanceDf[identifier] = np.nan
self.distanceDf.loc[identifier] = np.nan
i1, i2 = key
log.debug(f"Adding distance value for identifiers {i1}, {i2}")
self.distanceDf.loc[i1, i2] = self.distanceDf.loc[i2, i1] = value
if self.saveOnUpdate:
self._updateHook.handleUpdate()
def save(self):
log.info(f"Saving new distance matrix to {self.picklePath}")
os.makedirs(os.path.dirname(self.picklePath), exist_ok=True)
self.distanceDf.to_pickle(self.picklePath)
def get(self, key: Tuple[Union[str, int], Union[str, int]]):
self._assertTuple(key)
i1, i2 = key
try:
pos1, pos2 = self.cachedIdToPosDict[i1], self.cachedIdToPosDict[i2]
except KeyError:
return None
result = self.distanceDf.iloc[pos1, pos2]
if result is None or np.isnan(result):
return None
return result
def numUnfilledEntries(self):
return self.distanceDf.isnull().sum().sum()
def getAllCached(self, identifier: Union[str, int]):
return self.distanceDf[[identifier]]
class CachedDistanceMetric(DistanceMetric, cache.CachedValueProviderMixin):
"""
A decorator which provides caching for a distance metric, i.e. the metric is computed only if the
value for the given pair of identifiers is not found within the persistent cache
"""
def __init__(self, distanceMetric: DistanceMetric, keyValueCache: cache.PersistentKeyValueCache, persistCache=False):
cache.CachedValueProviderMixin.__init__(self, keyValueCache, persistCache=persistCache)
self.metric = distanceMetric
def __getstate__(self):
return cache.CachedValueProviderMixin.__getstate__(self)
def distance(self, namedTupleA, namedTupleB):
idA, idB = namedTupleA.Index, namedTupleB.Index
if idB < idA:
idA, idB, namedTupleA, namedTupleB = idB, idA, namedTupleB, namedTupleA
return self._provideValue((idA, idB), (namedTupleA, namedTupleB))
def _computeValue(self, key: Tuple[Union[str, int], Union[str, int]], data: Tuple[PandasNamedTuple, PandasNamedTuple]):
valueA, valueB = data
return self.metric.distance(valueA, valueB)
def fillCache(self, dfIndexedById: pd.DataFrame):
"""
Fill cache for all identifiers in the provided dataframe
Args:
dfIndexedById: Dataframe that is indexed by identifiers of the members
"""
for position, valueA in enumerate(dfIndexedById.itertuples()):
if position % 10 == 0:
log.info(f"Processed {round(100 * position / len(dfIndexedById), 2)}%")
for valueB in dfIndexedById[position + 1:].itertuples():
self.distance(valueA, valueB)
def __str__(self):
return str(self.metric)
class LinearCombinationDistanceMetric(DistanceMetric):
def __init__(self, metrics: Sequence[Tuple[float, DistanceMetric]]):
"""
:param metrics: a sequence of tuples (weight, distance metric)
"""
self.metrics = [(w, m) for (w, m) in metrics if w != 0]
if len(self.metrics) == 0:
raise ValueError(f"List of metrics is empty after removing all 0-weight metrics; passed {metrics}")
def distance(self, namedTupleA, namedTupleB):
value = 0
for weight, metric in self.metrics:
value += metric.distance(namedTupleA, namedTupleB) * weight
return value
def __str__(self):
return f"Linear combination of {[(weight, str(metric)) for weight, metric in self.metrics]}"
class HellingerDistanceMetric(SingleColumnDistanceMetric):
_SQRT2 = np.sqrt(2)
def __init__(self, column: str, checkInput=False):
super().__init__(column)
self.checkInput = checkInput
def __str__(self):
return objectRepr(self, ["column"])
def _checkInputValue(self, inputValue):
if not isinstance(inputValue, np.ndarray):
raise ValueError(f"Expected to find numpy arrays in {self.column}")
if not math.isclose(inputValue.sum(), 1):
raise ValueError(f"The entries in {self.column} have to sum to 1")
if not all((inputValue >= 0)*(inputValue <= 1)):
raise ValueError(f"The entries in {self.column} have to be in the range [0, 1]")
def _distance(self, valueA, valueB):
if self.checkInput:
self._checkInputValue(valueA)
self._checkInputValue(valueB)
return np.linalg.norm(np.sqrt(valueA) - np.sqrt(valueB)) / self._SQRT2
class EuclideanDistanceMetric(SingleColumnDistanceMetric):
def __init__(self, column: str):
super().__init__(column)
def _distance(self, valueA, valueB):
return np.linalg.norm(valueA - valueB)
def __str__(self):
return objectRepr(self, ["column"])
class IdentityDistanceMetric(DistanceMetric):
def __init__(self, keys: Union[str, List[str]]):
if not isinstance(keys, list):
keys = [keys]
assert keys != [], "At least one key has to be provided"
self.keys = keys
def distance(self, namedTupleA, namedTupleB):
for key in self.keys:
if getattr(namedTupleA, key) != getattr(namedTupleB, key):
return 1
return 0
def __str__(self):
return f"{self.__class__.__name__} based on keys: {self.keys}"
class RelativeBitwiseEqualityDistanceMetric(SingleColumnDistanceMetric):
def __init__(self, column: str, checkInput=False):
super().__init__(column)
self.checkInput = checkInput
def checkInputValue(self, inputValue):
if not isinstance(inputValue, np.ndarray):
raise ValueError(f"Expected to find numpy arrays in {self.column}")
if not len(inputValue.shape) == 1:
raise ValueError(f"The input array should be of shape (n,)")
if not set(inputValue).issubset({0, 1}):
raise ValueError("The input array should only have entries in {0, 1}")
def _distance(self, valueA, valueB):
if self.checkInput:
self.checkInputValue(valueA)
self.checkInputValue(valueB)
denom = np.count_nonzero(valueA + valueB)
if denom == 0:
return 0
else:
return 1-np.dot(valueA, valueB)/denom
def __str__(self):
return f"{self.__class__.__name__} for column {self.column}"
| [
"logging.getLogger",
"os.path.exists",
"pandas.read_pickle",
"numpy.sqrt",
"numpy.count_nonzero",
"os.path.dirname",
"numpy.dot",
"numpy.isnan",
"numpy.linalg.norm",
"pandas.DataFrame"
] | [((314, 341), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (331, 341), False, 'import logging\n'), ((6214, 6224), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (6221, 6224), True, 'import numpy as np\n'), ((1368, 1399), 'os.path.exists', 'os.path.exists', (['self.picklePath'], {}), '(self.picklePath)\n', (1382, 1399), False, 'import os\n'), ((7298, 7329), 'numpy.linalg.norm', 'np.linalg.norm', (['(valueA - valueB)'], {}), '(valueA - valueB)\n', (7312, 7329), True, 'import numpy as np\n'), ((8736, 8769), 'numpy.count_nonzero', 'np.count_nonzero', (['(valueA + valueB)'], {}), '(valueA + valueB)\n', (8752, 8769), True, 'import numpy as np\n'), ((1431, 1462), 'pandas.read_pickle', 'pd.read_pickle', (['self.picklePath'], {}), '(self.picklePath)\n', (1445, 1462), True, 'import pandas as pd\n'), ((1754, 1768), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1766, 1768), True, 'import pandas as pd\n'), ((2948, 2980), 'os.path.dirname', 'os.path.dirname', (['self.picklePath'], {}), '(self.picklePath)\n', (2963, 2980), False, 'import os\n'), ((3387, 3403), 'numpy.isnan', 'np.isnan', (['result'], {}), '(result)\n', (3395, 3403), True, 'import numpy as np\n'), ((7061, 7076), 'numpy.sqrt', 'np.sqrt', (['valueA'], {}), '(valueA)\n', (7068, 7076), True, 'import numpy as np\n'), ((7079, 7094), 'numpy.sqrt', 'np.sqrt', (['valueB'], {}), '(valueB)\n', (7086, 7094), True, 'import numpy as np\n'), ((8849, 8871), 'numpy.dot', 'np.dot', (['valueA', 'valueB'], {}), '(valueA, valueB)\n', (8855, 8871), True, 'import numpy as np\n')] |
import os
import cv2
import numpy as np
import pandas as pd
import skimage.io
import torch
from torch.utils.data import Dataset
class SpacenetLocDataset(Dataset):
def __init__(self, data_path, mode, fold=0, folds_csv='folds.csv', transforms=None, normalize=None, multiplier=1,
fix_orientation=True, filter_on_border=True):
super().__init__()
self.data_path = data_path
self.mode = mode
self.fix_orientation = fix_orientation
df = pd.read_csv(folds_csv)
self.df = df
self.normalize = normalize
self.fold = fold
if self.mode == "train":
ids = df[df['fold'] != fold]['id'].tolist()
else:
if filter_on_border:
ids = list(set(df[(df['fold'] == fold) & (df["onborder"] == False)]['id'].tolist()))
else:
ids = list(set(df[(df['fold'] == fold)]['id'].tolist()))
self.transforms = transforms
self.names = ids
if mode == "train":
self.names = self.names * multiplier
orientations = pd.read_csv(os.path.join(data_path, "SummaryData/SAR_orientations.txt"), header=None).values
orientations_dict = {}
for row in orientations:
id, o = row[0].split(" ")
orientations_dict[id] = float(o)
self.orientations_dict = orientations_dict
def __len__(self):
return len(self.names)
def __getitem__(self, idx):
name = self.names[idx]
img_path = os.path.join(self.data_path, "SAR-Intensity",
"SN6_Train_AOI_11_Rotterdam_SAR-Intensity_" + name + ".tif")
image = skimage.io.imread(img_path)
image = (image * (255 / 92)).astype(np.uint8)
mask_path = os.path.join("/wdata/masks", name + ".png")
mask = cv2.imread(mask_path)
# water_mask = cv2.imread(os.path.join(self.data_path, "water_masks", name + ".png"), cv2.IMREAD_GRAYSCALE)
# water_mask = np.expand_dims(water_mask, -1)
# mask = np.concatenate([mask, water_mask], -1)
orientation = self.orientations_dict["_".join(name.split("_")[:2])]
if orientation > 0 and self.fix_orientation:
image = cv2.rotate(image, cv2.ROTATE_180)
mask = cv2.rotate(mask, cv2.ROTATE_180)
sample = self.transforms(image=image, mask=mask)
sample['img_name'] = name
sample['orientation'] = orientation
sample['mask'] = torch.from_numpy(np.ascontiguousarray(np.moveaxis(sample["mask"], -1, 0))).float() / 255.
sample['image'] = torch.from_numpy(np.moveaxis(sample["image"] / 255., -1, 0).astype(np.float32))
return sample
class TestSpacenetLocDataset(Dataset):
def __init__(self, data_path, transforms, orientation_csv):
super().__init__()
self.data_path = data_path
self.names = [f[:-4] for f in os.listdir(os.path.join(data_path, "SAR-Intensity")) if f.endswith("tif")]
self.transforms = transforms
orientations = pd.read_csv(orientation_csv, header=None).values
orientations_dict = {}
for row in orientations:
id, o = row[0].split(" ")
orientations_dict[id] = float(o)
self.orientations_dict = orientations_dict
def __len__(self):
return len(self.names)
def __getitem__(self, idx):
name = self.names[idx]
img_path = os.path.join(self.data_path, "SAR-Intensity", name + ".tif")
image = skimage.io.imread(img_path)
image = (image * (255 / 92)).astype(np.uint8)
orientation = self.orientations_dict["_".join(name.split("_")[-4:-2])]
if orientation > 0:
image = cv2.rotate(image, cv2.ROTATE_180)
sample = self.transforms(image=image)
sample['img_name'] = name
sample['orientation'] = orientation
sample['image'] = torch.from_numpy(np.moveaxis(sample["image"] / 255., -1, 0).astype(np.float32))
return sample
| [
"pandas.read_csv",
"os.path.join",
"cv2.rotate",
"numpy.moveaxis",
"cv2.imread"
] | [((495, 517), 'pandas.read_csv', 'pd.read_csv', (['folds_csv'], {}), '(folds_csv)\n', (506, 517), True, 'import pandas as pd\n'), ((1519, 1630), 'os.path.join', 'os.path.join', (['self.data_path', '"""SAR-Intensity"""', "('SN6_Train_AOI_11_Rotterdam_SAR-Intensity_' + name + '.tif')"], {}), "(self.data_path, 'SAR-Intensity', \n 'SN6_Train_AOI_11_Rotterdam_SAR-Intensity_' + name + '.tif')\n", (1531, 1630), False, 'import os\n'), ((1776, 1819), 'os.path.join', 'os.path.join', (['"""/wdata/masks"""', "(name + '.png')"], {}), "('/wdata/masks', name + '.png')\n", (1788, 1819), False, 'import os\n'), ((1835, 1856), 'cv2.imread', 'cv2.imread', (['mask_path'], {}), '(mask_path)\n', (1845, 1856), False, 'import cv2\n'), ((3423, 3483), 'os.path.join', 'os.path.join', (['self.data_path', '"""SAR-Intensity"""', "(name + '.tif')"], {}), "(self.data_path, 'SAR-Intensity', name + '.tif')\n", (3435, 3483), False, 'import os\n'), ((2233, 2266), 'cv2.rotate', 'cv2.rotate', (['image', 'cv2.ROTATE_180'], {}), '(image, cv2.ROTATE_180)\n', (2243, 2266), False, 'import cv2\n'), ((2286, 2318), 'cv2.rotate', 'cv2.rotate', (['mask', 'cv2.ROTATE_180'], {}), '(mask, cv2.ROTATE_180)\n', (2296, 2318), False, 'import cv2\n'), ((3037, 3078), 'pandas.read_csv', 'pd.read_csv', (['orientation_csv'], {'header': 'None'}), '(orientation_csv, header=None)\n', (3048, 3078), True, 'import pandas as pd\n'), ((3709, 3742), 'cv2.rotate', 'cv2.rotate', (['image', 'cv2.ROTATE_180'], {}), '(image, cv2.ROTATE_180)\n', (3719, 3742), False, 'import cv2\n'), ((1101, 1160), 'os.path.join', 'os.path.join', (['data_path', '"""SummaryData/SAR_orientations.txt"""'], {}), "(data_path, 'SummaryData/SAR_orientations.txt')\n", (1113, 1160), False, 'import os\n'), ((2612, 2655), 'numpy.moveaxis', 'np.moveaxis', (["(sample['image'] / 255.0)", '(-1)', '(0)'], {}), "(sample['image'] / 255.0, -1, 0)\n", (2623, 2655), True, 'import numpy as np\n'), ((2913, 2953), 'os.path.join', 'os.path.join', (['data_path', '"""SAR-Intensity"""'], {}), "(data_path, 'SAR-Intensity')\n", (2925, 2953), False, 'import os\n'), ((3910, 3953), 'numpy.moveaxis', 'np.moveaxis', (["(sample['image'] / 255.0)", '(-1)', '(0)'], {}), "(sample['image'] / 255.0, -1, 0)\n", (3921, 3953), True, 'import numpy as np\n'), ((2517, 2551), 'numpy.moveaxis', 'np.moveaxis', (["sample['mask']", '(-1)', '(0)'], {}), "(sample['mask'], -1, 0)\n", (2528, 2551), True, 'import numpy as np\n')] |
import numpy as np
from magenpy.simulation import GWASSimulator
class AnnotatedGWASSimulator(GWASSimulator):
def __init__(self, bed_files, **kwargs):
super().__init__(bed_files, **kwargs)
# For now, we will restrict to 2 mixture components.
assert self.n_mixtures == 2
self.w_h2 = None
self.w_pi = None
def set_w_h2(self, w_h2):
assert len(w_h2) == self.n_annotations
self.w_h2 = w_h2
self.set_per_snp_heritability()
def simulate_w_h2(self, enrichment=None):
pass
def set_w_pi(self, w_pi):
assert len(w_pi) == self.n_annotations
self.w_pi = w_pi
self.set_per_snp_mixture_probability()
def simulate_w_pi(self, enrichment=None):
"""
:param enrichment: A dictionary of enrichment values where the
key is the annotation and the value is the enrichment
"""
enrichment = enrichment or {}
enr = []
for annot in self.annotations[self.chromosomes[0]].annotations:
try:
enr.append(enrichment[annot])
except KeyError:
enr.append(1.)
self.w_pi = np.log(np.array(enr))
def set_per_snp_heritability(self):
if self.w_h2 is None:
return super().set_per_snp_heritability()
self.per_snp_h2g = {}
for c in self.chromosomes:
self.per_snp_h2g[c] = np.clip(np.dot(self.annotations[c].values(), self.w_h2),
a_min=0., a_max=np.inf)
def set_per_snp_mixture_probability(self):
if self.w_pi is None:
return super().set_per_snp_mixture_probability()
self.per_snp_pi = {}
for c in self.chromosomes:
prob = 1./(1. + np.exp(-np.dot(self.annotations[c].values(add_intercept=True),
np.concatenate([[np.log(self.pi[1])], self.w_pi]))))
self.per_snp_pi[c] = np.array([1. - prob, prob]).T
def get_heritability_enrichment(self):
tabs = self.to_true_beta_table(per_chromosome=True)
total_heritability = sum([tab['Heritability'].sum() for c, tab in tabs.items()])
heritability_per_binary_annot = {
bin_annot: 0. for bin_annot in self.annotations[self.chromosomes[0]].binary_annotations
}
n_variants_per_binary_annot = {
bin_annot: 0 for bin_annot in heritability_per_binary_annot
}
for c, c_size in self.shapes.items():
for bin_annot in self.annotations[c].binary_annotations:
annot_idx = self.annotations[c].get_binary_annotation_index(bin_annot)
heritability_per_binary_annot[bin_annot] += tabs[c].iloc[np.array(annot_idx), :]['Heritability'].sum()
n_variants_per_binary_annot[bin_annot] += len(annot_idx)
return {
bin_annot: (ba_h2g/total_heritability) / (n_variants_per_binary_annot[bin_annot] / self.M)
for bin_annot, ba_h2g in heritability_per_binary_annot.items()
}
| [
"numpy.array",
"numpy.log"
] | [((1193, 1206), 'numpy.array', 'np.array', (['enr'], {}), '(enr)\n', (1201, 1206), True, 'import numpy as np\n'), ((1984, 2012), 'numpy.array', 'np.array', (['[1.0 - prob, prob]'], {}), '([1.0 - prob, prob])\n', (1992, 2012), True, 'import numpy as np\n'), ((2760, 2779), 'numpy.array', 'np.array', (['annot_idx'], {}), '(annot_idx)\n', (2768, 2779), True, 'import numpy as np\n'), ((1915, 1933), 'numpy.log', 'np.log', (['self.pi[1]'], {}), '(self.pi[1])\n', (1921, 1933), True, 'import numpy as np\n')] |
import utils
import pandas as pd
import numpy as np
import tqdm
import argparse
from matplotlib import pyplot as plt
import os
def main(args):
data = utils.read_pd_xls(args.datapath)
processed_sentence = utils.content2sentence(data, args.modelpath)
emotion_point = np.array(processed_sentence['emotion label'])
emotion_point.sort()
# save figure
x = np.arange(len(emotion_point))
y = emotion_point
plt.scatter(x, y, s=0.2)
plt.xlabel('review id')
plt.ylabel('emotion score')
plt.savefig(os.path.join(args.savepath, 'curve.png'))
# to csv
processed_sentence.to_csv(os.path.join(args.savepath, 'processed_sentence.csv'),sep='\t', header=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Sentimantic')
parser.add_argument('--datapath', type=str, default='./data/')
parser.add_argument('--modelpath', type=str, default='./checkpoints/tut4-model.pt')
parser.add_argument('--savepath', type=str, default='./results')
args = parser.parse_args()
args.datapath = [os.path.join(args.datapath, 'Ocado data.xls'), os.path.join(args.datapath, 'Ocado _Mid June_update Azar.xlsx')]
main(args) | [
"argparse.ArgumentParser",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"os.path.join",
"numpy.array",
"utils.read_pd_xls",
"matplotlib.pyplot.scatter",
"utils.content2sentence"
] | [((156, 188), 'utils.read_pd_xls', 'utils.read_pd_xls', (['args.datapath'], {}), '(args.datapath)\n', (173, 188), False, 'import utils\n'), ((214, 258), 'utils.content2sentence', 'utils.content2sentence', (['data', 'args.modelpath'], {}), '(data, args.modelpath)\n', (236, 258), False, 'import utils\n'), ((279, 324), 'numpy.array', 'np.array', (["processed_sentence['emotion label']"], {}), "(processed_sentence['emotion label'])\n", (287, 324), True, 'import numpy as np\n'), ((432, 456), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'s': '(0.2)'}), '(x, y, s=0.2)\n', (443, 456), True, 'from matplotlib import pyplot as plt\n'), ((461, 484), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""review id"""'], {}), "('review id')\n", (471, 484), True, 'from matplotlib import pyplot as plt\n'), ((489, 516), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""emotion score"""'], {}), "('emotion score')\n", (499, 516), True, 'from matplotlib import pyplot as plt\n'), ((743, 793), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Sentimantic"""'}), "(description='Sentimantic')\n", (766, 793), False, 'import argparse\n'), ((533, 573), 'os.path.join', 'os.path.join', (['args.savepath', '"""curve.png"""'], {}), "(args.savepath, 'curve.png')\n", (545, 573), False, 'import os\n'), ((619, 672), 'os.path.join', 'os.path.join', (['args.savepath', '"""processed_sentence.csv"""'], {}), "(args.savepath, 'processed_sentence.csv')\n", (631, 672), False, 'import os\n'), ((1090, 1135), 'os.path.join', 'os.path.join', (['args.datapath', '"""Ocado data.xls"""'], {}), "(args.datapath, 'Ocado data.xls')\n", (1102, 1135), False, 'import os\n'), ((1137, 1200), 'os.path.join', 'os.path.join', (['args.datapath', '"""Ocado _Mid June_update Azar.xlsx"""'], {}), "(args.datapath, 'Ocado _Mid June_update Azar.xlsx')\n", (1149, 1200), False, 'import os\n')] |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from emukit.test_functions.quadrature import hennig1D
def test_hennig1D_return_shape():
"""
Test output dimension is 2d
"""
hennig1d_func, _ = hennig1D()
x = np.zeros((2, 1))
result = hennig1d_func.f(x)
assert result.ndim == 2
assert result.shape == (2, 1)
| [
"emukit.test_functions.quadrature.hennig1D",
"numpy.zeros"
] | [((295, 305), 'emukit.test_functions.quadrature.hennig1D', 'hennig1D', ([], {}), '()\n', (303, 305), False, 'from emukit.test_functions.quadrature import hennig1D\n'), ((314, 330), 'numpy.zeros', 'np.zeros', (['(2, 1)'], {}), '((2, 1))\n', (322, 330), True, 'import numpy as np\n')] |
"""
Evaluate vega expressions language
"""
import datetime as dt
from functools import reduce, wraps
import itertools
import math
import operator
import random
import sys
import time as timemod
from typing import Any, Callable, Dict, Optional, List, Union, overload
import numpy as np
import pandas as pd
from dateutil import tz
from altair_transform.utils import evaljs, undefined, JSRegex
def eval_vegajs(expression: str, datum: pd.DataFrame = None) -> pd.DataFrame:
"""Evaluate a vega expression"""
namespace = {"datum": datum} if datum is not None else {}
namespace.update(VEGAJS_NAMESPACE)
return evaljs(expression, namespace)
def vectorize(func: Callable) -> Callable:
@wraps(func)
def wrapper(*args, **kwargs):
series_args = [
arg
for arg in itertools.chain(args, kwargs.values())
if isinstance(arg, pd.Series)
]
if not series_args:
return func(*args, **kwargs)
else:
index = reduce(operator.or_, [s.index for s in series_args])
def _get(x, i):
return x.get(i, math.nan) if isinstance(x, pd.Series) else x
return pd.Series(
[
func(
*(_get(arg, i) for arg in args),
**{k: _get(v, i) for k, v in kwargs.items()},
)
for i in index
],
index=index,
)
if hasattr(func, "__annotations__"):
wrapper.__annotations__ = {
key: Union[pd.Series, val] for key, val in func.__annotations__.items()
}
return wrapper
# Type Checking Functions
@vectorize
def isArray(value: Any) -> bool:
"""Returns true if value is an array, false otherwise."""
return isinstance(value, (list, np.ndarray))
@vectorize
def isBoolean(value: Any) -> bool:
"""Returns true if value is a boolean (true or false), false otherwise."""
return isinstance(value, (bool, np.bool_))
@vectorize
def isDate(value: Any) -> bool:
"""Returns true if value is a Date object, false otherwise.
This method will return false for timestamp numbers or
date-formatted strings; it recognizes Date objects only.
"""
return isinstance(value, dt.datetime)
@vectorize
def isDefined(value: Any) -> bool:
"""Returns true if value is a defined value, false if value equals undefined.
This method will return true for null and NaN values.
"""
# TODO: support implicitly undefined values?
return value is not undefined
@vectorize
def isNumber(value: Any) -> bool:
"""Returns true if value is a number, false otherwise.
NaN and Infinity are considered numbers.
"""
return np.issubdtype(type(value), np.number)
@vectorize
def isObject(value: Any) -> bool:
"""Returns true if value is an object, false otherwise.
Following JavaScript typeof convention, null values are considered objects.
"""
return value is None or isinstance(value, dict)
@vectorize
def isRegExp(value: Any) -> bool:
"""
Returns true if value is a RegExp (regular expression)
object, false otherwise.
"""
return isinstance(value, JSRegex)
@vectorize
def isString(value: Any) -> bool:
"""Returns true if value is a string, false otherwise."""
return isinstance(value, str)
@vectorize
def isValid(value: Any) -> bool:
"""Returns true if value is not null, undefined, or NaN."""
return not (value is None or value is undefined or pd.isna(value))
# Type Coercion Functions
@vectorize
def toBoolean(value: Any) -> bool:
"""
Coerces the input value to a boolean.
Null values and empty strings are mapped to null.
"""
return bool(value)
@vectorize
def toDate(value: Any) -> Optional[float]:
"""
Coerces the input value to a Date instance.
Null values and empty strings are mapped to null.
If an optional parser function is provided, it is used to
perform date parsing, otherwise Date.parse is used.
"""
if isinstance(value, (float, int)):
return value
if value is None or value == "":
return None
return pd.to_datetime(value).timestamp() * 1000
@vectorize
def toNumber(value: Any) -> Optional[float]:
"""
Coerces the input value to a number.
Null values and empty strings are mapped to null.
"""
if value is None or value == "":
return None
return float(value)
@vectorize
def toString(value: Any) -> Optional[str]:
"""
Coerces the input value to a string.
Null values and empty strings are mapped to null.
"""
if value is None or value == "":
return None
if isinstance(value, float) and value % 1 == 0:
return str(int(value))
return str(value)
# Date/Time Functions
def now() -> float:
"""Returns the timestamp for the current time."""
return round(timemod.time() * 1000, 0)
@overload
def datetime() -> dt.datetime:
...
@overload # noqa: F811
def datetime(timestamp: float) -> dt.datetime:
...
@overload # noqa: F811
def datetime(
year: float,
month: int,
day: int = 0,
hour: int = 0,
min: int = 0,
sec: int = 0,
millisec: float = 0,
) -> dt.datetime:
...
@vectorize # noqa: F811
def datetime(*args):
"""Returns a new Date instance.
datetime() # current time
datetime(timestamp)
datetime(year, month[, day, hour, min, sec, millisec])
The month is 0-based, such that 1 represents February.
"""
if len(args) == 0:
return dt.datetime.now()
elif len(args) == 1:
return dt.datetime.fromtimestamp(0.001 * args[0])
elif len(args) == 2:
return dt.datetime(*args, 1)
elif len(args) <= 7:
args = list(map(int, args))
args[1] += 1 # JS month is zero-based
if len(args) == 2:
args.append(0) # Day is required in Python
if len(args) == 7:
args[6] = int(args[6] * 1000) # milliseconds to microseconds
return dt.datetime(*args)
else:
raise ValueError("Too many arguments")
@vectorize
def date(datetime: dt.datetime) -> int:
"""
Returns the day of the month for the given datetime value, in local time.
"""
return datetime.day
@vectorize
def day(datetime: dt.datetime) -> int:
"""
Returns the day of the week for the given datetime value, in local time.
"""
return (datetime.weekday() + 1) % 7
@vectorize
def year(datetime: dt.datetime) -> int:
"""Returns the year for the given datetime value, in local time."""
return datetime.year
@vectorize
def quarter(datetime: dt.datetime) -> int:
"""
Returns the quarter of the year (0-3) for the given datetime value,
in local time.
"""
return (datetime.month - 1) // 3
@vectorize
def month(datetime: dt.datetime) -> int:
"""
Returns the (zero-based) month for the given datetime value, in local time.
"""
return datetime.month - 1
@vectorize
def hours(datetime: dt.datetime) -> int:
"""
Returns the hours component for the given datetime value, in local time.
"""
return datetime.hour
@vectorize
def minutes(datetime: dt.datetime) -> int:
"""
Returns the minutes component for the given datetime value, in local time.
"""
return datetime.minute
@vectorize
def seconds(datetime: dt.datetime) -> int:
"""
Returns the seconds component for the given datetime value, in local time.
"""
return datetime.second
@vectorize
def milliseconds(datetime: dt.datetime) -> float:
"""
Returns the milliseconds component for the given datetime value,
in local time.
"""
return datetime.microsecond / 1000
@vectorize
def time(datetime: dt.datetime) -> float:
"""Returns the epoch-based timestamp for the given datetime value."""
return datetime.timestamp() * 1000
@vectorize
def timezoneoffset(datetime):
# TODO: use tzlocal?
raise NotImplementedError("timezoneoffset()")
@vectorize
def utc(
year: int,
month: int = 0,
day: int = 1,
hour: int = 0,
min: int = 0,
sec: int = 0,
millisec: int = 0,
) -> float:
"""
Returns a timestamp for the given UTC date.
The month is 0-based, such that 1 represents February.
"""
return (
dt.datetime(
int(year),
int(month) + 1,
int(day),
int(hour),
int(min),
int(sec),
int(millisec * 1000),
tzinfo=dt.timezone.utc,
).timestamp()
* 1000
)
@vectorize
def utcdate(datetime: dt.datetime) -> int:
"""Returns the day of the month for the given datetime value, in UTC time."""
return date(datetime.astimezone(tz.tzutc()))
@vectorize
def utcday(datetime: dt.datetime) -> int:
"""Returns the day of the week for the given datetime value, in UTC time."""
return day(datetime.astimezone(tz.tzutc()))
@vectorize
def utcyear(datetime: dt.datetime) -> int:
"""Returns the year for the given datetime value, in UTC time."""
return year(datetime.astimezone(tz.tzutc()))
@vectorize
def utcquarter(datetime: dt.datetime) -> int:
"""Returns the quarter of the year (0-3) for the given datetime value, in UTC time."""
return quarter(datetime.astimezone(tz.tzutc()))
@vectorize
def utcmonth(datetime: dt.datetime) -> int:
"""Returns the (zero-based) month for the given datetime value, in UTC time."""
return month(datetime.astimezone(tz.tzutc()))
@vectorize
def utchours(datetime: dt.datetime) -> int:
"""Returns the hours component for the given datetime value, in UTC time."""
return hours(datetime.astimezone(tz.tzutc()))
@vectorize
def utcminutes(datetime: dt.datetime) -> int:
"""Returns the minutes component for the given datetime value, in UTC time."""
return minutes(datetime.astimezone(tz.tzutc()))
@vectorize
def utcseconds(datetime: dt.datetime) -> int:
"""Returns the seconds component for the given datetime value, in UTC time."""
return seconds(datetime.astimezone(tz.tzutc()))
def utcmilliseconds(datetime: dt.datetime) -> float:
"""Returns the milliseconds component for the given datetime value, in UTC time."""
return milliseconds(datetime.astimezone(tz.tzutc()))
@vectorize
def dayFormat(day: int) -> str:
"""
Formats a (0-6) weekday number as a full week day name, according to the current locale.
For example: dayFormat(0) -> "Sunday".
"""
days = [
"Sunday",
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday",
]
return days[day % 7]
@vectorize
def dayAbbrevFormat(day: int) -> str:
"""
Formats a (0-6) weekday number as an abbreviated week day name, according to the current locale.
For example: dayAbbrevFormat(0) -> "Sun".
"""
days = ["Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"]
return days[day % 7]
@vectorize
def format(value, specifier):
"""Formats a numeric value as a string. The specifier must be a valid d3-format specifier (e.g., format(value, ',.2f')."""
raise NotImplementedError()
@vectorize
def monthFormat(month: int) -> str:
"""Formats a (zero-based) month number as a full month name, according to the current locale. For example: monthFormat(0) -> "January"."""
months = [
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December",
]
return months[month % 12]
@vectorize
def monthAbbrevFormat(month: int) -> str:
"""Formats a (zero-based) month number as an abbreviated month name, according to the current locale. For example: monthAbbrevFormat(0) -> "Jan"."""
months = [
"Jan",
"Feb",
"Ma",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
]
return months[month % 12]
@vectorize
def timeFormat(value, specifier):
"""Formats a datetime value (either a Date object or timestamp) as a string, according to the local time. The specifier must be a valid d3-time-format specifier. For example: timeFormat(timestamp, '%A')."""
raise NotImplementedError()
@vectorize
def timeParse(string, specifier):
"""Parses a string value to a Date object, according to the local time. The specifier must be a valid d3-time-format specifier. For example: timeParse('June 30, 2015', '%B %d, %Y')."""
raise NotImplementedError()
@vectorize
def utcFormat(value, specifier):
"""Formats a datetime value (either a Date object or timestamp) as a string, according to UTC time. The specifier must be a valid d3-time-format specifier. For example: utcFormat(timestamp, '%A')."""
raise NotImplementedError()
@vectorize
def utcParse(value, specifier):
"""Parses a string value to a Date object, according to UTC time. The specifier must be a valid d3-time-format specifier. For example: utcParse('June 30, 2015', '%B %d, %Y')."""
raise NotImplementedError()
# String functions
@vectorize
def indexof(x: Union[str, list], value: Any) -> int:
"""
For string input, returns the first index of substring in the input string.
For array input, returns the first index of value in the input array.
"""
if isinstance(x, str):
return x.find(str(value))
else:
x = list(x)
try:
return x.index(value)
except ValueError:
return -1
@vectorize
def lastindexof(x: Union[str, list], value: Any) -> int:
"""
For string input, returns the last index of substring in the input string.
For array input, returns the last index of value in the input array.
"""
if isinstance(x, str):
return x.rfind(str(value))
else:
x = list(x)
try:
return len(x) - 1 - x[::-1].index(value)
except ValueError:
return -1
@vectorize
def length(x: Union[str, list]) -> int:
"""Returns the length of the input string or array."""
return len(x)
@vectorize
def lower(string: str) -> str:
"""Transforms string to lower-case letters."""
return string.lower()
@vectorize
def pad(string: str, length: int, character: str = " ", align: str = "right"):
"""
Pads a string value with repeated instances of a character
up to a specified length. If character is not specified, a
space (‘ ‘) is used. By default, padding is added to the end
of a string. An optional align parameter specifies if padding
should be added to the 'left' (beginning), 'center', or
'right' (end) of the input string.
"""
string = str(string)
character = str(character)
npad = int(length) - len(string)
if npad <= 0:
return string
elif align == "left":
return npad * character + string
elif align == "center":
return (npad // 2) * character + string + (npad - npad // 2) * character
else:
return string + npad * character
@vectorize
def parseFloat(string: str) -> Optional[float]:
"""
Parses the input string to a floating-point value.
Same as JavaScript’s parseFloat.
"""
# Javascript parses the first N valid characters.
# TODO: use a more efficient approach?
string = str(string).strip().split()[0]
for end in range(len(string), 0, -1):
substr = string[:end]
try:
return float(substr)
except ValueError:
pass
return None
@vectorize
def parseInt(string: str, base: int = 10) -> Optional[int]:
"""
Parses the input string to an integer value.
Same as JavaScript’s parseInt.
"""
# Javascript parses the first N valid characters.
# TODO: use a more efficient approach?
string = str(string).strip().split()[0]
base = int(base)
for end in range(len(string), 0, -1):
substr = string[:end]
try:
return int(substr, base)
except ValueError:
pass
return None
@vectorize
def replace(string: str, pattern: Union[str, JSRegex], replacement: str) -> str:
"""
Returns a new string with some or all matches of pattern replaced by a
replacement string. The pattern can be a string or a regular expression.
If pattern is a string, only the first instance will be replaced.
Same as JavaScript’s String.replace.
"""
if isinstance(pattern, JSRegex):
return pattern.replace(string, replacement)
else:
return str(string).replace(pattern, replacement, 1)
@vectorize
def slice_(
x: Union[str, list], start: int, end: Optional[int] = None
) -> Union[str, list]:
"""
Returns a section of string or array between the start and end indices.
If the end argument is negative, it is treated as an offset from
the end of the string (length(x) + end).
"""
start = int(start)
if end is not None:
end = int(end)
return x[start:end]
@vectorize
def split(s: str, sep: str, limit: int = -1):
"""
Returns an array of tokens created by splitting the input string
according to a provided separator pattern. The result can optionally
be constrained to return at most limit tokens.
"""
return s.split(sep, limit)
@vectorize
def substring(string: str, start: int, end: Optional[int] = None) -> str:
"""Returns a section of string between the start and end indices."""
start = max(0, int(start))
end = len(string) if end is None else max(0, int(end))
if start > end:
end, start = start, end
return string[start:end]
@vectorize
def trim(s: str) -> str:
"""Returns a trimmed string with preceding and trailing whitespace removed."""
return s.strip()
@vectorize
def truncate(
string: str, length: int, align: str = "right", ellipsis: str = "…"
) -> str:
"""
Truncates an input string to a target length. The optional align argument
indicates what part of the string should be truncated:
'left' (the beginning), 'center', or 'right' (the end).
By default, the 'right' end of the string is truncated.
The optional ellipsis argument indicates the string to use to indicate
truncated content; by default the ellipsis character … (\u2026) is used.
"""
string = str(string)
nchars = int(length) - len(ellipsis)
if nchars <= 0:
return ellipsis
elif align == "left":
return ellipsis + string[-nchars:]
elif align == "center":
return string[: nchars - nchars // 2] + ellipsis + string[-(nchars // 2) :]
else:
return string[:nchars] + ellipsis
@vectorize
def upper(s: str) -> str:
"""Transforms string to upper-case letters."""
return s.upper()
# Object functions
@vectorize
def merge(*objs: dict) -> dict:
out = {}
for obj in objs:
out.update(obj)
return out
# Statistical Functions
# TODO: implement without scipy.stats?
@vectorize
def sampleNormal(mean: float = 0, stdev: float = 1) -> float:
"""
Returns a sample from a univariate normal (Gaussian) probability distribution
with specified mean and standard deviation stdev. If unspecified, the mean defaults
to 0 and the standard deviation defaults to 1.
"""
from scipy.stats import norm
return norm(mean, stdev).rvs()
@vectorize
def cumulativeNormal(value: float, mean: float = 0, stdev: float = 1) -> float:
"""
Returns the value of the cumulative distribution function at the given input
domain value for a normal distribution with specified mean and standard
deviation stdev. If unspecified, the mean defaults to 0 and the standard
deviation defaults to 1.
"""
from scipy.stats import norm
return norm(mean, stdev).cdf(value)
@vectorize
def densityNormal(value: float, mean: float = 0, stdev: float = 1) -> float:
"""
Returns the value of the probability density function at the given input domain
value, for a normal distribution with specified mean and standard deviation stdev.
If unspecified, the mean defaults to 0 and the standard deviation defaults to 1.
"""
from scipy.stats import norm
return norm(mean, stdev).pdf(value)
@vectorize
def quantileNormal(probability: float, mean: float = 0, stdev: float = 1) -> float:
"""
Returns the quantile value (the inverse of the cumulative distribution function)
for the given input probability, for a normal distribution with specified mean
and standard deviation stdev. If unspecified, the mean defaults to 0 and the
standard deviation defaults to 1.
"""
from scipy.stats import norm
return norm(mean, stdev).ppf(probability)
@vectorize
def sampleLogNormal(mean: float = 0, stdev: float = 1) -> float:
"""
Returns a sample from a univariate log-normal probability distribution with
specified log mean and log standard deviation stdev. If unspecified, the log
mean defaults to 0 and the log standard deviation defaults to 1.
"""
from scipy.stats import lognorm
return lognorm(s=stdev, scale=np.exp(mean)).rvs()
@vectorize
def cumulativeLogNormal(value: float, mean: float = 0, stdev: float = 1) -> float:
"""
Returns the value of the cumulative distribution function at the given input
domain value for a log-normal distribution with specified log mean and log
standard deviation stdev. If unspecified, the log mean defaults to 0 and the
log standard deviation defaults to 1.
"""
from scipy.stats import lognorm
return lognorm(s=stdev, scale=np.exp(mean)).cdf(value)
@vectorize
def densityLogNormal(value: float, mean: float = 0, stdev: float = 1) -> float:
"""
Returns the value of the probability density function at the given input domain
value, for a log-normal distribution with specified log mean and log standard
deviation stdev. If unspecified, the log mean defaults to 0 and the log standard
deviation defaults to 1.
"""
from scipy.stats import lognorm
return lognorm(s=stdev, scale=np.exp(mean)).pdf(value)
@vectorize
def quantileLogNormal(probability: float, mean: float = 0, stdev: float = 1) -> float:
"""
Returns the quantile value (the inverse of the cumulative distribution function)
for the given input probability, for a log-normal distribution with specified log
mean and log standard deviation stdev. If unspecified, the log mean defaults to 0
and the log standard deviation defaults to 1.
"""
from scipy.stats import lognorm
return lognorm(s=stdev, scale=np.exp(mean)).ppf(probability)
@vectorize
def sampleUniform(min: float = 0, max: float = 1) -> float:
"""
Returns a sample from a univariate continuous uniform probability distribution
over the interval [min, max). If unspecified, min defaults to 0 and max defaults
to 1. If only one argument is provided, it is interpreted as the max value.
"""
from scipy.stats import uniform
return uniform(loc=min, scale=max - min).rvs()
@vectorize
def cumulativeUniform(value: float, min: float = 0, max: float = 1) -> float:
"""
Returns the value of the cumulative distribution function at the given input
domain value for a uniform distribution over the interval [min, max). If
unspecified, min defaults to 0 and max defaults to 1. If only one argument
is provided, it is interpreted as the max value.
"""
from scipy.stats import uniform
return uniform(loc=min, scale=max - min).cdf(value)
@vectorize
def densityUniform(value: float, min: float = 0, max: float = 1) -> float:
"""
Returns the value of the probability density function at the given input domain
value, for a uniform distribution over the interval [min, max). If unspecified,
min defaults to 0 and max defaults to 1. If only one argument is provided, it is
interpreted as the max value.
"""
from scipy.stats import uniform
return uniform(loc=min, scale=max - min).pdf(value)
@vectorize
def quantileUniform(probability: float, min: float = 0, max: float = 1) -> float:
"""
Returns the quantile value (the inverse of the cumulative distribution function)
for the given input probability, for a uniform distribution over the interval
[min, max). If unspecified, min defaults to 0 and max defaults to 1. If only one
argument is provided, it is interpreted as the max value
"""
from scipy.stats import uniform
return uniform(loc=min, scale=max - min).ppf(probability)
# Array functions
@vectorize
def extent(array: List[float]) -> List[float]:
"""
Returns a new [min, max] array with the minimum and maximum values of
the input array, ignoring null, undefined, and NaN values.
"""
array = [val for val in array if isValid(val)]
return [min(array), max(array)]
@vectorize
def clampRange(range_: List[float], min_: float, max_: float) -> List[float]:
"""
Clamps a two-element range array in a span-preserving manner. If the span
of the input range is less than (max - min) and an endpoint exceeds either
the min or max value, the range is translated such that the span is
preserved and one endpoint touches the boundary of the [min, max] range.
If the span exceeds (max - min), the range [min, max] is returned.
"""
range_ = [min(range_[:2]), max(range_[:2])]
span = range_[1] - range_[0]
if span > max_ - min_:
return [min_, max_]
elif range_[0] < min_:
return [min_, min_ + span]
elif range_[1] > max_:
return [max_ - span, max_]
else:
return range_
@vectorize
def inrange(value: float, range_: List[float]) -> bool:
"""
Tests whether value lies within (or is equal to either)
the first and last values of the range array.
"""
return min(range_[:2]) <= value <= max(range_[:2])
@vectorize
def join(array: List[str], separator: str = ",") -> str:
"""
Returns a new string by concatenating all of the elements of the
input array, separated by commas or a specified separator string.
"""
return str(separator).join(map(str, array))
@vectorize
def lerp(array: List[float], fraction: float) -> float:
"""
Returns the linearly interpolated value between the first and last entries
in the array for the provided interpolation fraction (typically between 0 and 1).
For example, lerp([0, 50], 0.5) returns 25.
"""
return array[0] + fraction * (array[-1] - array[0])
@vectorize
def peek(array: List[Any]) -> Any:
"""
Returns the last element in the input array. Similar to the built-in
Array.pop method, except that it does not remove the last element.
This method is a convenient shorthand for array[array.length - 1].
"""
return array[-1]
@vectorize
def reverse(array: List[Any]) -> List[Any]:
"""
Returns a new array with elements in a reverse order of the input array.
The first array element becomes the last, and the last array element
becomes the first.
"""
return array[::-1]
@overload
def sequence(stop=0) -> List[float]:
...
@overload # noqa: F811
def sequence(start, stop, step=1) -> List[float]:
...
@vectorize # noqa: F811
def sequence(*args) -> List[float]:
"""
sequence(stop)
sequence(start, stop, step=1)
Returns an array containing an arithmetic sequence of numbers.
If step is omitted, it defaults to 1. If start is omitted, it defaults to 0.
The stop value is exclusive; it is not included in the result.
If step is positive, the last element is the largest start + i * step less than stop;
if step is negative, the last element is the smallest start + i * step greater than stop.
If the returned array would contain an infinite number of values, an empty range
is returned. The arguments are not required to be integers.
"""
if len(args) == 0:
return []
elif len(args) <= 2:
return np.arange(*args).tolist()
elif args[2] == 0:
return []
else:
return np.arange(*args[:3]).tolist()
@vectorize
def span(array):
"""
Returns the span of array: the difference between the last and
first elements, or array[array.length-1] - array[0].
"""
return array[-1] - array[0]
# Regular Expression Functions
def regexp(pattern: str, flags: str = "") -> JSRegex:
"""
Creates a regular expression instance from an input pattern
string and optional flags. Same as JavaScript’s RegExp.
"""
return JSRegex(pattern, flags)
def test(regexp: JSRegex, string: str = "") -> bool:
"""
Evaluates a regular expression regexp against the input string,
returning true if the string matches the pattern, false otherwise.
For example: test(/\\d{3}/, "32-21-9483") -> true.
"""
return regexp.test(string)
VEGAJS_NAMESPACE: Dict[str, Any] = {
# Constants
"null": None,
"true": True,
"false": False,
"NaN": math.nan,
"E": math.e,
"LN2": math.log(2),
"LN10": math.log(10),
"LOG2E": math.log2(math.e),
"LOG10E": math.log10(math.e),
"MAX_VALUE": sys.float_info.max,
"MIN_VALUE": sys.float_info.min,
"PI": math.pi,
"SQRT1_2": math.sqrt(0.5),
"SQRT2": math.sqrt(2),
# Type Checking
"isArray": isArray,
"isBoolean": isBoolean,
"isDate": isDate,
"isDefined": isDefined,
"isNumber": isNumber,
"isObject": isObject,
"isRegExp": isRegExp,
"isString": isString,
"isValid": isValid,
# Type Coercion
"toBoolean": toBoolean,
"toDate": toDate,
"toNumber": toNumber,
"toString": toString,
# Control Flow Functions
"if": lambda test, if_value, else_value: if_value if test else else_value,
# Math Functions
"isNaN": np.isnan,
"isFinite": np.isfinite,
"abs": np.abs,
"acos": np.arccos,
"asin": np.arcsin,
"atan": np.arctan,
"atan2": np.arctan2,
"ceil": np.ceil,
"clamp": np.clip,
"cos": np.cos,
"exp": np.exp,
"floor": np.floor,
"log": np.log,
"max": vectorize(max),
"min": vectorize(min),
"pow": np.power,
"random": random.random,
"round": np.round,
"sin": np.sin,
"sqrt": np.sqrt,
"tan": np.tan,
# Date/Time Functions
"now": now,
"datetime": datetime,
"date": date,
"day": day,
"year": year,
"quarter": quarter,
"month": month,
"hours": hours,
"minutes": minutes,
"seconds": seconds,
"milliseconds": milliseconds,
"time": time,
"timezoneoffset": timezoneoffset,
"utc": utc,
"utcdate": utcdate,
"utcday": utcday,
"utcyear": utcyear,
"utcquarter": utcquarter,
"utcmonth": utcmonth,
"utchours": utchours,
"utcminutes": utcminutes,
"utcseconds": utcseconds,
"utcmilliseconds": utcmilliseconds,
# String Functions
"indexof": indexof,
"lastindexof": lastindexof,
"length": length,
"lower": lower,
"pad": pad,
"parseFloat": parseFloat,
"parseInt": parseInt,
"replace": replace,
"slice": slice_,
"split": split,
"substring": substring,
"trim": trim,
"truncate": truncate,
"upper": upper,
# Formatting Functions
"dayFormat": dayFormat,
"dayAbbrevFormat": dayAbbrevFormat,
"monthFormat": monthFormat,
"monthAbbrevFormat": monthAbbrevFormat,
# Object Functions
"merge": merge,
# Statistical Functions
"sampleNormal": sampleNormal,
"densityNormal": densityNormal,
"cumulativeNormal": cumulativeNormal,
"quantileNormal": quantileNormal,
"sampleLogNormal": sampleLogNormal,
"densityLogNormal": densityLogNormal,
"cumulativeLogNormal": cumulativeLogNormal,
"quantileLogNormal": quantileLogNormal,
"sampleUniform": sampleUniform,
"densityUniform": densityUniform,
"cumulativeUniform": cumulativeUniform,
"quantileUniform": quantileUniform,
# Array Functions
# indexof, lastindexof, length, and slice defined under string functions.
"extent": extent,
"clampRange": clampRange,
"inrange": inrange,
"join": join,
"lerp": lerp,
"peek": peek,
"reverse": reverse,
"sequence": sequence,
"span": span,
# Regular Expression Functions
"test": test,
"regexp": regexp,
# TODOs:
# Color functions
# Data functions
}
| [
"altair_transform.utils.evaljs",
"dateutil.tz.tzutc",
"altair_transform.utils.JSRegex",
"math.log2",
"math.sqrt",
"math.log",
"math.log10",
"numpy.arange",
"pandas.to_datetime",
"datetime.datetime",
"functools.wraps",
"numpy.exp",
"functools.reduce",
"scipy.stats.uniform",
"pandas.isna",... | [((622, 651), 'altair_transform.utils.evaljs', 'evaljs', (['expression', 'namespace'], {}), '(expression, namespace)\n', (628, 651), False, 'from altair_transform.utils import evaljs, undefined, JSRegex\n'), ((702, 713), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (707, 713), False, 'from functools import reduce, wraps\n'), ((28661, 28684), 'altair_transform.utils.JSRegex', 'JSRegex', (['pattern', 'flags'], {}), '(pattern, flags)\n', (28668, 28684), False, 'from altair_transform.utils import evaljs, undefined, JSRegex\n'), ((29141, 29152), 'math.log', 'math.log', (['(2)'], {}), '(2)\n', (29149, 29152), False, 'import math\n'), ((29166, 29178), 'math.log', 'math.log', (['(10)'], {}), '(10)\n', (29174, 29178), False, 'import math\n'), ((29193, 29210), 'math.log2', 'math.log2', (['math.e'], {}), '(math.e)\n', (29202, 29210), False, 'import math\n'), ((29226, 29244), 'math.log10', 'math.log10', (['math.e'], {}), '(math.e)\n', (29236, 29244), False, 'import math\n'), ((29354, 29368), 'math.sqrt', 'math.sqrt', (['(0.5)'], {}), '(0.5)\n', (29363, 29368), False, 'import math\n'), ((29383, 29395), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (29392, 29395), False, 'import math\n'), ((5588, 5605), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (5603, 5605), True, 'import datetime as dt\n'), ((1005, 1057), 'functools.reduce', 'reduce', (['operator.or_', '[s.index for s in series_args]'], {}), '(operator.or_, [s.index for s in series_args])\n', (1011, 1057), False, 'from functools import reduce, wraps\n'), ((3547, 3561), 'pandas.isna', 'pd.isna', (['value'], {}), '(value)\n', (3554, 3561), True, 'import pandas as pd\n'), ((4928, 4942), 'time.time', 'timemod.time', ([], {}), '()\n', (4940, 4942), True, 'import time as timemod\n'), ((5646, 5688), 'datetime.datetime.fromtimestamp', 'dt.datetime.fromtimestamp', (['(0.001 * args[0])'], {}), '(0.001 * args[0])\n', (5671, 5688), True, 'import datetime as dt\n'), ((8789, 8799), 'dateutil.tz.tzutc', 'tz.tzutc', ([], {}), '()\n', (8797, 8799), False, 'from dateutil import tz\n'), ((8973, 8983), 'dateutil.tz.tzutc', 'tz.tzutc', ([], {}), '()\n', (8981, 8983), False, 'from dateutil import tz\n'), ((9148, 9158), 'dateutil.tz.tzutc', 'tz.tzutc', ([], {}), '()\n', (9156, 9158), False, 'from dateutil import tz\n'), ((9350, 9360), 'dateutil.tz.tzutc', 'tz.tzutc', ([], {}), '()\n', (9358, 9360), False, 'from dateutil import tz\n'), ((9541, 9551), 'dateutil.tz.tzutc', 'tz.tzutc', ([], {}), '()\n', (9549, 9551), False, 'from dateutil import tz\n'), ((9729, 9739), 'dateutil.tz.tzutc', 'tz.tzutc', ([], {}), '()\n', (9737, 9739), False, 'from dateutil import tz\n'), ((9923, 9933), 'dateutil.tz.tzutc', 'tz.tzutc', ([], {}), '()\n', (9931, 9933), False, 'from dateutil import tz\n'), ((10117, 10127), 'dateutil.tz.tzutc', 'tz.tzutc', ([], {}), '()\n', (10125, 10127), False, 'from dateutil import tz\n'), ((10317, 10327), 'dateutil.tz.tzutc', 'tz.tzutc', ([], {}), '()\n', (10325, 10327), False, 'from dateutil import tz\n'), ((19432, 19449), 'scipy.stats.norm', 'norm', (['mean', 'stdev'], {}), '(mean, stdev)\n', (19436, 19449), False, 'from scipy.stats import norm\n'), ((19873, 19890), 'scipy.stats.norm', 'norm', (['mean', 'stdev'], {}), '(mean, stdev)\n', (19877, 19890), False, 'from scipy.stats import norm\n'), ((20309, 20326), 'scipy.stats.norm', 'norm', (['mean', 'stdev'], {}), '(mean, stdev)\n', (20313, 20326), False, 'from scipy.stats import norm\n'), ((20783, 20800), 'scipy.stats.norm', 'norm', (['mean', 'stdev'], {}), '(mean, stdev)\n', (20787, 20800), False, 'from scipy.stats import norm\n'), ((23119, 23152), 'scipy.stats.uniform', 'uniform', ([], {'loc': 'min', 'scale': '(max - min)'}), '(loc=min, scale=max - min)\n', (23126, 23152), False, 'from scipy.stats import uniform\n'), ((23604, 23637), 'scipy.stats.uniform', 'uniform', ([], {'loc': 'min', 'scale': '(max - min)'}), '(loc=min, scale=max - min)\n', (23611, 23637), False, 'from scipy.stats import uniform\n'), ((24088, 24121), 'scipy.stats.uniform', 'uniform', ([], {'loc': 'min', 'scale': '(max - min)'}), '(loc=min, scale=max - min)\n', (24095, 24121), False, 'from scipy.stats import uniform\n'), ((24605, 24638), 'scipy.stats.uniform', 'uniform', ([], {'loc': 'min', 'scale': '(max - min)'}), '(loc=min, scale=max - min)\n', (24612, 24638), False, 'from scipy.stats import uniform\n'), ((4193, 4214), 'pandas.to_datetime', 'pd.to_datetime', (['value'], {}), '(value)\n', (4207, 4214), True, 'import pandas as pd\n'), ((5729, 5750), 'datetime.datetime', 'dt.datetime', (['*args', '(1)'], {}), '(*args, 1)\n', (5740, 5750), True, 'import datetime as dt\n'), ((6058, 6076), 'datetime.datetime', 'dt.datetime', (['*args'], {}), '(*args)\n', (6069, 6076), True, 'import datetime as dt\n'), ((21213, 21225), 'numpy.exp', 'np.exp', (['mean'], {}), '(mean)\n', (21219, 21225), True, 'import numpy as np\n'), ((21699, 21711), 'numpy.exp', 'np.exp', (['mean'], {}), '(mean)\n', (21705, 21711), True, 'import numpy as np\n'), ((22184, 22196), 'numpy.exp', 'np.exp', (['mean'], {}), '(mean)\n', (22190, 22196), True, 'import numpy as np\n'), ((22703, 22715), 'numpy.exp', 'np.exp', (['mean'], {}), '(mean)\n', (22709, 22715), True, 'import numpy as np\n'), ((28099, 28115), 'numpy.arange', 'np.arange', (['*args'], {}), '(*args)\n', (28108, 28115), True, 'import numpy as np\n'), ((28191, 28211), 'numpy.arange', 'np.arange', (['*args[:3]'], {}), '(*args[:3])\n', (28200, 28211), True, 'import numpy as np\n')] |
__version__ = '0.1.5'
import argparse
import cleanlog
import warnings
import sys
import time
import os
import numpy as np
import pandas as pd
from . import util
from . import diffusion
from collections import defaultdict
from .const import DIRECTION
from scipy.stats import beta
from scipy.stats import combine_pvalues
warnings.filterwarnings(action='ignore', category=RuntimeWarning)
logger = cleanlog.ColoredLogger('NetICS', time=True)
logger.setLevel(cleanlog.INFO)
def parse_args():
parser = argparse.ArgumentParser(description='Python implementation of NetICS')
subparsers = parser.add_subparsers(dest='command', help='Subcommands.')
subparser_diffuse = subparsers.add_parser('diffuse', help='Prepare diffusion matrix for the network.')
subparser_diffuse.add_argument(
'-j',
'--adj',
required=True,
type=str,
help='Adjacency matrix of the directed interaction network.'
)
subparser_diffuse.add_argument(
'-b',
'--beta',
type=float,
default=0.4,
help='Restart probability for the insulated diffusion. Default: 0.4 (For the network from Wu et al., 2010)'
)
subparser_diffuse.add_argument(
'-o',
'--output',
required=True,
help='Output filename for diffusion matrix in .npz format.'
)
subparser_rank = subparsers.add_parser('rank', help='Run NetICS algorithm and rank genes.')
subparser_rank.add_argument(
'-a',
'--aberration',
required=True,
type=str,
help='Input two-column table (without headers) containing genetically aberrant genes for each sample. It contain two columns that map every gene (1st column) to the samples that it it genetically aberrant (2nd column).'
)
subparser_rank.add_argument(
'-f',
'--diffusion-matrix',
required=True,
help='Path to .npz file for diffusion matrix.'
)
subparser_rank.add_argument(
'-n',
'--network',
required=True,
help='Input file (without headers) that contains the list of the genes that are present in the network.\nThey should be in the same order as in the rows of the adjacency matrix.'
)
subparser_rank.add_argument(
'-d',
'--degs',
default=None,
help='Two-column table (without headers) with the names of predefined differentially expressed genes and the corresponding samples.',
)
subparser_rank.add_argument(
'-o',
'--output-prefix',
required=True,
help='Prefix of the output file to save raw NetICS result and aggregated ranks.'
)
# subparser_rank.add_argument(
# '-b',
# '--beta',
# type=float,
# default=0.4,
# help='Restart probability for the insulated diffusion. Default: 0.4 (For the network from Wu et al., 2010)'
# )
# parser.add_argument(
# '-r',
# '--rank',
# default='SUM',
# help="'MEDIAN' uses the median of the sample-specific ranks.\n'RRA' uses the Robust Rank Aggregation method to integrate sample-specific ranked lists.\n'SUM' uses the sum of the sample-specific ranks."
# )
subparser_rank.add_argument(
'-v',
'--verbose',
default=False,
action='store_true',
help='Print debug messages'
)
subparser_rank.add_argument(
'-p',
'--permutation',
default=0,
type=int,
help='Perform permutation test to evaluate the significance of rank'
)
# subparser_rank.add_argument(
# '-t',
# '--threads',
# default=1,
# type=int,
# help='Number of thread'
# )
subparser_rank.add_argument(
'-s',
'--seed',
default=42,
type=int,
help='Random seed.'
)
return parser.parse_args()
def diffuse(filename_adj, restart_prob, output):
logger.info('Making diffusion matrix...')
logger.info('Started making forward diffusion matrix...')
adj = np.loadtxt(open(filename_adj), delimiter='\t')
F = diffusion.insulated_diff(util.row_normalize(adj), restart_prob)
logger.info('Done!')
logger.info('Started making backward diffusion matrix...')
F_opposite = diffusion.insulated_diff(util.row_normalize(adj.conj().transpose()), restart_prob)
logger.info('Done!')
util.mkdir(output)
np.savez(output, forward=F, backward=F_opposite)
logger.info(f'Successfully saved diffusion matrix to {output}.')
def netics_fun(
filename_aberration,
filename_genes,
output,
filename_diffusion_matrix,
filename_deg_list=None,
verbose=False,
# rank_method='SUM',
permutation=0,
#threads=1,
seed=42,
):
if verbose:
logger.setLevel(cleanlog.DEBUG)
# Accept arguments and check input.
# if rank_method not in ['SUM', 'MEDIAN', 'RRA']:
# print('Wrong rank method: %s' % rank_method)
# print('Rank method should be MEDIAN, RRA or SUM')
# sys.exit(1)
# unique_samples, mutation_data = read_mutations(filename_aberration)
# Read network genes, line by line.
network_genes = [l.strip().upper() for l in open(filename_genes).readlines()]
gene2idx = {g:i for i, g in enumerate(network_genes)}
network_gene_set = set(network_genes)
# Aberrations.
mutation_df = pd.read_csv(filename_aberration, sep='\t', names=['gene', 'sample'])
mutation_df = mutation_df[mutation_df.gene.isin(network_gene_set)]
mutation_df['idx'] = mutation_df.gene.map(gene2idx)
mutation_df = mutation_df.dropna()
# DEGs.
if filename_deg_list is not None:
deg_df = pd.read_csv(filename_deg_list, sep='\t', names=['gene', 'sample'])
deg_df = deg_df[deg_df.gene.isin(network_gene_set)]
deg_df['idx'] = deg_df.gene.map(gene2idx)
deg_df = deg_df.dropna()
# Determine the direction of the diffusion.
choose_mut_diff = DIRECTION.DOWN if filename_deg_list is None else DIRECTION.BOTH
# Load or compute diffusion matrix.
diffusion_matrices = np.load(filename_diffusion_matrix)
F, F_opposite = diffusion_matrices['forward'], diffusion_matrices['backward']
logger.info('Running NetICS...')
final_result = []
for sample in mutation_df['sample'].unique():
mutation_df_per_sample = mutation_df[mutation_df['sample'] == sample]
if choose_mut_diff == DIRECTION.BOTH:
deg_df_per_sample = deg_df[deg_df['sample'] == sample]
else:
deg_df_per_sample = None
result = prioritization(sample, mutation_df_per_sample, deg_df_per_sample, F, F_opposite, network_genes, choose_mut_diff, permutation, seed)
final_result.append(result)
#pool = mp.Pool(processes=threads)
#manager = mp.Manager()
#final_result = manager.list()
#[pool.apply_async(run_per_sample, args=[sample, mutation_df, deg_df, F, F_opposite, network_genes, choose_mut_diff, permutation, final_result]) for sample in mutation_df['sample'].unique()]
#pool.starmap(run_per_sample, [(sample, mutation_df, deg_df, F, F_opposite, network_genes, choose_mut_diff, permutation, final_result) for sample in mutation_df['sample'].unique()])
#pool.close()
#pool.join()
#sample_list = [sample for sample in mutation_df['sample'].unique()]
#final_result = parmap.starmap(run_per_sample, [(sample, mutation_df, deg_df, F, F_opposite, network_genes, choose_mut_diff, permutation) for sample in mutation_df['sample'].unique()], pm_processes=threads)#, pm_pbar=True)
final_result = pd.concat(final_result)
util.mkdir(output)
logger.info(f'Saving output to {output}.raw.txt...')
final_result.to_csv(f'{output}.raw.csv', index=False)
# Rank aggregation.
rank_agg_result = final_result.pivot_table(values='rank', index='gene', aggfunc=['mean', 'median'])
rank_agg_result.columns = ['rank_mean', 'rank_median']
rank_agg_result.sort_values('rank_mean').to_csv(f'{output}.rank_aggregated.csv')
return final_result
#def read_mutations(filename):
# return pd.read_csv(filename, sep='\t', names=['gene', 'sample'])
#def run_per_sample(sample, mutation_df, deg_df, F, F_opposite, network_genes, choose_mut_diff, permutation):
# mutation_df_per_sample = mutation_df[mutation_df['sample'] == sample]
# deg_df_per_sample = deg_df[deg_df['sample'] == sample]
# result = prioritization(sample, mutation_df_per_sample, deg_df_per_sample, F, F_opposite, network_genes, choose_mut_diff, permutation)
# final_result.append(result)
# return result
#def permutation_test(sample, flag, aberrant_gene_idx, deg_idx, F, F_opposite, permutation, diffusion_score):
def permutation_test(seed, sample, flag, aberrant_gene_idx, deg_idx, F, F_opposite, diffusion_score, num_permutation):
#logger.info(f'Performing permutation test for {sample}.')
np.random.seed(seed)
num_genes = len(F)
aberrant_gene_seeds, deg_seeds = [], []
for _ in range(num_permutation):
aberrant_gene_idx = np.random.choice(np.arange(num_genes), len(aberrant_gene_idx), replace=False)
if deg_idx is not None:
deg_idx = np.random.choice(np.arange(num_genes), len(deg_idx), replace=False)
aberrant_gene_seed, deg_seed = np.zeros(num_genes), np.zeros(num_genes)
# Compose random multi-hot vectors.
for idx in aberrant_gene_idx:
aberrant_gene_seed[idx] = 1
if deg_idx is not None:
for idx in deg_idx:
deg_seed[idx] = 1
aberrant_gene_seeds.append(aberrant_gene_seed)
if deg_idx is not None:
deg_seeds.append(deg_seed)
aberrant_gene_seeds, deg_seeds = np.array(aberrant_gene_seeds), np.array(deg_seeds)
#logger.debug(f'{aberrant_gene_seeds.shape}, {deg_seeds.shape}')
return diffusion.diffuse_all_permutation(flag, aberrant_gene_seeds, deg_seeds, F, F_opposite)
#return pval_list
def prioritization(sample, mutation_df, deg_df, F, F_opposite, network_genes, choose_up_down_flag, permutation, seed):
num_genes = len(network_genes)
result = {
'sample': [sample] * num_genes,
'gene': network_genes,
}
if choose_up_down_flag == DIRECTION.BOTH:
aberrant_gene_idx, deg_idx = mutation_df.idx.astype(int).values, deg_df.idx.astype(int).values
else:
aberrant_gene_idx, deg_idx = mutation_df.idx.astype(int).values, None
#logger.debug(f'{len(aberrant_gene_idx)}, {len(deg_idx)}')
if len(aberrant_gene_idx) == 0:
flag = DIRECTION.UP
else:
flag = choose_up_down_flag
logger.info(f'Computing diffusion scores for {sample}.')
diffusion_score = diffusion.diffuse_all(flag, aberrant_gene_idx, deg_idx, F, F_opposite)
#logger.debug(f'{sample}, {len(mutation_df)}, {len(deg_df)}, {flag}')
result['diffusion_score'] = diffusion_score
if permutation:
logger.info(f'Performing permutation test for {sample}.')
permutation_list = permutation_test(seed, sample, flag, aberrant_gene_idx, deg_idx, F, F_opposite, diffusion_score, num_permutation=permutation)
logger.debug(f'Permutation result shape = {permutation_list.shape}')
permutation_df = pd.DataFrame(permutation_list).T
pval_list = [(permutation_df.iloc[idx] >= score).mean() for idx,score in enumerate(diffusion_score)]
result['permutation_pval'] = pval_list
result = pd.DataFrame(result)
if permutation:
result['rank'] = result['permutation_pval'].rank(ascending=True, method='min')
else:
result['rank'] = result['diffusion_score'].rank(ascending=False, method='min')
return result.sort_values('rank')
def main():
args = parse_args()
if args.command == 'diffuse':
diffuse(
filename_adj=args.adj,
restart_prob=args.beta,
output=args.output
)
else:
netics_fun(
filename_aberration=args.aberration,
filename_genes=args.network,
output=args.output_prefix,
filename_diffusion_matrix=args.diffusion_matrix,
filename_deg_list=args.degs,
verbose=args.verbose,
permutation=args.permutation,
#threads=args.threads,
seed=args.seed
)
if __name__ == '__main__':
main()
| [
"numpy.savez",
"argparse.ArgumentParser",
"pandas.read_csv",
"pandas.DataFrame",
"numpy.array",
"numpy.zeros",
"numpy.random.seed",
"cleanlog.ColoredLogger",
"numpy.load",
"pandas.concat",
"warnings.filterwarnings",
"numpy.arange"
] | [((324, 389), 'warnings.filterwarnings', 'warnings.filterwarnings', ([], {'action': '"""ignore"""', 'category': 'RuntimeWarning'}), "(action='ignore', category=RuntimeWarning)\n", (347, 389), False, 'import warnings\n'), ((400, 443), 'cleanlog.ColoredLogger', 'cleanlog.ColoredLogger', (['"""NetICS"""'], {'time': '(True)'}), "('NetICS', time=True)\n", (422, 443), False, 'import cleanlog\n'), ((507, 577), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Python implementation of NetICS"""'}), "(description='Python implementation of NetICS')\n", (530, 577), False, 'import argparse\n'), ((4393, 4441), 'numpy.savez', 'np.savez', (['output'], {'forward': 'F', 'backward': 'F_opposite'}), '(output, forward=F, backward=F_opposite)\n', (4401, 4441), True, 'import numpy as np\n'), ((5397, 5465), 'pandas.read_csv', 'pd.read_csv', (['filename_aberration'], {'sep': '"""\t"""', 'names': "['gene', 'sample']"}), "(filename_aberration, sep='\\t', names=['gene', 'sample'])\n", (5408, 5465), True, 'import pandas as pd\n'), ((6111, 6145), 'numpy.load', 'np.load', (['filename_diffusion_matrix'], {}), '(filename_diffusion_matrix)\n', (6118, 6145), True, 'import numpy as np\n'), ((7606, 7629), 'pandas.concat', 'pd.concat', (['final_result'], {}), '(final_result)\n', (7615, 7629), True, 'import pandas as pd\n'), ((8904, 8924), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (8918, 8924), True, 'import numpy as np\n'), ((11449, 11469), 'pandas.DataFrame', 'pd.DataFrame', (['result'], {}), '(result)\n', (11461, 11469), True, 'import pandas as pd\n'), ((5700, 5766), 'pandas.read_csv', 'pd.read_csv', (['filename_deg_list'], {'sep': '"""\t"""', 'names': "['gene', 'sample']"}), "(filename_deg_list, sep='\\t', names=['gene', 'sample'])\n", (5711, 5766), True, 'import pandas as pd\n'), ((9724, 9753), 'numpy.array', 'np.array', (['aberrant_gene_seeds'], {}), '(aberrant_gene_seeds)\n', (9732, 9753), True, 'import numpy as np\n'), ((9755, 9774), 'numpy.array', 'np.array', (['deg_seeds'], {}), '(deg_seeds)\n', (9763, 9774), True, 'import numpy as np\n'), ((9075, 9095), 'numpy.arange', 'np.arange', (['num_genes'], {}), '(num_genes)\n', (9084, 9095), True, 'import numpy as np\n'), ((9298, 9317), 'numpy.zeros', 'np.zeros', (['num_genes'], {}), '(num_genes)\n', (9306, 9317), True, 'import numpy as np\n'), ((9319, 9338), 'numpy.zeros', 'np.zeros', (['num_genes'], {}), '(num_genes)\n', (9327, 9338), True, 'import numpy as np\n'), ((11246, 11276), 'pandas.DataFrame', 'pd.DataFrame', (['permutation_list'], {}), '(permutation_list)\n', (11258, 11276), True, 'import pandas as pd\n'), ((9207, 9227), 'numpy.arange', 'np.arange', (['num_genes'], {}), '(num_genes)\n', (9216, 9227), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
from .base import BaseProcessor
from ..plotter import OneDimPlotter, TwoDimPlotter, cdf_pdf
class ImageProcessor(BaseProcessor):
""" Process the information related to image, get several statistical distribution.
Args:
data (dict): Data to be processed.
Examples:
>>> import numpy as np
>>> data = dict(
>>> shapes=np.array([np.array([100,300]), np.array([150, 1000])]),
>>> labels = np.array([np.array([0, 1]), np.array([1])]),
>>> )
>>> self = ImageProcessor(data)
>>> self.default_plot()
>>> # export
>>> self.export('./result', save_mode='folder')
>>> # what statistical data processed
>>> print(self.processor)
"""
def __init__(self, data):
super(ImageProcessor, self).__init__(data)
self.processor = ['hw', 'ratio', 'scale', 'ratio_log2', 'instances_per_image']
if self.data.get('shapes', None) is None:
print("Image size distribution, ratio distribution, scale distribution"
" and log2(ratio) is related to 'shapes'. "
"But got no 'shapes' in input data.")
self.processor = ['instances_per_image']
if self.data.get('labels', None) is None:
print("Instances per image is related to 'labels'. "
"But got no 'labels' in input data.")
self.processor.remove('instances_per_image')
@property
def hw(self):
"""Height and width distribution of image."""
if self.data.get('shapes', None) is None:
return None
w, h = self.data['shapes'][:, 0], self.data['shapes'][:, 1]
return TwoDimPlotter([w, h], 'image hw distribution', plt.scatter,
axis_label=['width', 'height'],
marker='.', alpha=0.1)
@property
def ratio(self):
"""Ratio (height/width) distribution of image."""
if self.data.get('shapes', None) is None:
return None
w, h = self.data['shapes'][:, 0], self.data['shapes'][:, 1]
hw_ratio = h / w
return OneDimPlotter(hw_ratio, r'image h/w ratio',
cdf_pdf,
axis_label=['ratio: h/w', 'normalized number'],
bins=20)
@property
def ratio_log2(self):
"""Ratio (log2(height/width)) distribution of image."""
if self.data.get('shapes', None) is None:
return None
w, h = self.data['shapes'][:, 0], self.data['shapes'][:, 1]
hw_ratio = h / w
log_ratio = np.log2(hw_ratio)
return OneDimPlotter(log_ratio, r'image h/w ratio (log2)',
cdf_pdf,
axis_label=['ratio: log2(h/2)', 'normalized number'],
bins=20)
@property
def scale(self):
"""Scale (sqrt(width*height)) distribution of image."""
if self.data.get('shapes', None) is None:
return None
w, h = self.data['shapes'][:, 0], self.data['shapes'][:, 1]
sqrt_hw = np.sqrt(h * w)
range_ = (np.min(sqrt_hw), np.max(sqrt_hw))
return OneDimPlotter(sqrt_hw, r'image Scale(diagonal length)',
cdf_pdf,
axis_label=['scale: sqrt(wh)', 'normalized number'],
bins=20, range=range_)
@property
def instances_per_image(self):
"""Distribution of instance numbers per image."""
if self.data.get('labels', None) is None:
return None
label = self.data['labels']
label_ = [l.size for l in label]
return OneDimPlotter(label_, 'instance nums per image', plt.hist,
axis_label=['instance nums per image', 'normalized number'], )
| [
"numpy.max",
"numpy.log2",
"numpy.sqrt",
"numpy.min"
] | [((2688, 2705), 'numpy.log2', 'np.log2', (['hw_ratio'], {}), '(hw_ratio)\n', (2695, 2705), True, 'import numpy as np\n'), ((3194, 3208), 'numpy.sqrt', 'np.sqrt', (['(h * w)'], {}), '(h * w)\n', (3201, 3208), True, 'import numpy as np\n'), ((3227, 3242), 'numpy.min', 'np.min', (['sqrt_hw'], {}), '(sqrt_hw)\n', (3233, 3242), True, 'import numpy as np\n'), ((3244, 3259), 'numpy.max', 'np.max', (['sqrt_hw'], {}), '(sqrt_hw)\n', (3250, 3259), True, 'import numpy as np\n')] |
"""
Image alignment.
:func:`~apply_transform_wcs()`: align an image based on WCS.
:func:`~apply_transform_stars()`: align an image based on pixel coordinates of
1, 2, or more stars.
"""
from typing import List as TList, Tuple, Union
from numpy import (
array, empty, float32, full, indices, ma, mgrid, ndarray, ones, sqrt,
transpose, zeros)
from numpy.linalg import lstsq
import scipy.ndimage
from astropy.wcs import WCS
__all__ = ['apply_transform_stars', 'apply_transform_wcs']
def apply_transform_stars(img: Union[ndarray, ma.MaskedArray],
src_stars: Union[TList[Tuple[float, float]],
ndarray],
dst_stars: Union[TList[Tuple[float, float]],
ndarray],
ref_width: int, ref_height: int,
prefilter: bool = True) -> ma.MaskedArray:
"""
Align an image based on pixel coordinates of one or more stars
:param img: input image as 2D NumPy array
:param src_stars: list of (X, Y) coordinates of one or more alignment stars
in the image being aligned
:param dst_stars: list of (X, Y) coordinates of the same stars as in
`src_stars` in the reference image
:param ref_width: reference image width in pixels
:param ref_height: reference image height in pixels
:param prefilter: apply spline filter before interpolation
:return: transformed image
"""
nref = min(len(src_stars), len(dst_stars))
src_x, src_y = transpose(src_stars[:nref])
dst_x, dst_y = transpose(dst_stars[:nref])
# Pad the image if smaller than the reference image
h, w = img.shape
avg = img.mean()
if w < ref_width or h < ref_height:
new_img = full([max(h, ref_height), max(w, ref_width)], avg, img.dtype)
if isinstance(img, ma.MaskedArray) and img.mask.any():
new_img[:h, :w] = img.data
mask = ones([ref_height, ref_width], bool)
mask[:h, :w] = img.mask
img = ma.MaskedArray(new_img, mask)
else:
new_img[:h, :w] = img
img = new_img
if isinstance(img, ma.MaskedArray) and img.mask.any():
# scipy.ndimage does not handle masked arrays; fill masked values with
# global mean and mask them afterwards after transformation
mask = img.mask.astype(float32)
img = img.filled(avg)
else:
mask = zeros(img.shape, float32)
if nref == 1:
# Pure shift
offset = [dst_y[0] - src_y[0], dst_x[0] - src_x[0]]
img = scipy.ndimage.shift(
img, offset, mode='nearest', prefilter=prefilter)
mask = scipy.ndimage.shift(mask, offset, cval=True, prefilter=prefilter)
else:
if nref == 2:
# Partial affine transform (shift + rotation + uniform scale)
# [ src_y ] [ A B ] [ dst_y ] [ dy ]
# [ src_x ] = [ -B A ] [ dst_x ] + [ dx ]
src_dy, src_dx = src_y[0] - src_y[1], src_x[0] - src_x[1]
dst_dy, dst_dx = dst_y[0] - dst_y[1], dst_x[0] - dst_x[1]
d = dst_dx**2 + dst_dy**2
if not d:
raise ValueError(
'Both alignment stars have the same coordinates')
a = (src_dy*dst_dy + src_dx*dst_dx)/d
b = (src_dy*dst_dx - src_dx*dst_dy)/d
mat = array([[a, b], [-b, a]])
offset = [src_y[0] - dst_y[0]*a - dst_x[0]*b,
src_x[0] - dst_x[0]*a + dst_y[0]*b]
else:
# Full affine transform
# [ src_y ] [ A B ] [ dst_y ] [ dy ]
# [ src_x ] = [ C D ] [ dst_x ] + [ dx ]
a = transpose([dst_y, dst_x, ones(nref)])
py = lstsq(a, src_y, rcond=None)[0]
px = lstsq(a, src_x, rcond=None)[0]
mat = array([py[:2], px[:2]])
offset = [py[2], px[2]]
img = scipy.ndimage.affine_transform(
img, mat, offset, mode='nearest', prefilter=prefilter)
mask = scipy.ndimage.affine_transform(
mask, mat, offset, cval=True, prefilter=prefilter) > 0.06
# Match the reference image size
if w > ref_width or h > ref_height:
img = img[:ref_height, :ref_width]
mask = mask[:ref_height, :ref_width]
return ma.masked_array(img, mask, fill_value=avg)
wcs_grid = {
1: (array([1/2]),
array([1/2])),
2: (array([1/3, 2/3]),
array([1/2, 1/2])),
3: (array([1/4, 1/2, 3/4]),
array([1/3, 2/3, 1/3])),
4: (array([1/3, 2/3, 1/3, 2/3]),
array([1/3, 1/3, 2/3, 2/3])),
5: (array([1/3, 2/3, 1/3, 2/3, 1/2]),
array([1/3, 1/3, 2/3, 2/3, 1/2])),
6: (array([1/4, 1/2, 3/4, 1/4, 1/2, 3/4]),
array([1/3, 1/3, 1/3, 2/3, 2/3, 2/3])),
7: (array([1/4, 1/2, 3/4, 1/4, 1/2, 3/4, 1/2]),
array([1/3, 1/3, 1/3, 2/3, 2/3, 2/3, 1/2])),
8: (array([1/4, 1/2, 3/4, 1/4, 1/2, 3/4, 1/3, 2/3]),
array([1/3, 1/3, 1/3, 2/3, 2/3, 2/3, 1/2, 1/2])),
9: (array([1/4, 1/2, 3/4, 1/4, 1/2, 3/4, 1/4, 1/2, 3/4]),
array([1/4, 1/4, 1/4, 1/2, 1/2, 1/2, 3/4, 3/4, 3/4])),
}
def apply_transform_wcs(img: Union[ndarray, ma.MaskedArray],
src_wcs: WCS, dst_wcs: WCS,
ref_width: int, ref_height: int,
grid_points: int = 0,
prefilter: bool = False) -> ma.MaskedArray:
"""
Align an image based on WCS
:param img: input image as 2D NumPy array
:param src_wcs: WCS of image being aligned
:param dst_wcs: reference image WCS
:param ref_width: reference image width in pixels
:param ref_height: reference image height in pixels
:param grid_points: number of grid points for WCS interpolation::
0: transform using WCS calculated for each pixel
1: offset-only alignment using central pixel
2: shift + rotation + uniform scale (2-star) alignment using two points
>= 3: full affine transform using the given number of fake "alignment
stars" generated from the WCS
:param prefilter: apply spline filter before interpolation
:return: transformed image
"""
# Pad the image if smaller than the reference image
h, w = img.shape
avg = img.mean()
if w < ref_width or h < ref_height:
new_img = full([max(h, ref_height), max(w, ref_width)], avg, img.dtype)
if isinstance(img, ma.MaskedArray) and img.mask.any():
new_img[:h, :w] = img.data
mask = ones(new_img.shape, bool)
mask[:h, :w] = img.mask
img = ma.MaskedArray(new_img, mask)
else:
new_img[:h, :w] = img
img = new_img
if grid_points <= 0 or grid_points >= w*h:
# Full geometric transform based on WCS
if isinstance(img, ma.MaskedArray) and img.mask.any():
mask = img.mask.astype(float32)
img = img.filled(avg)
else:
mask = zeros(img.shape, float32)
# Calculate the transformation row by row to avoid problems
# in all_pix2world() for large images
dst_y, dst_x = indices((ref_height, ref_width))
coord = empty((2, h, w), float32)
for i in range(h):
a, d = dst_wcs.all_pix2world(dst_x[i], dst_y[i], 0)
coord[1, i, :], coord[0, i, :] = src_wcs.all_world2pix(
a, d, 0, quiet=True)
res = ma.MaskedArray(
scipy.ndimage.map_coordinates(
img, coord, mode='nearest', prefilter=prefilter),
scipy.ndimage.map_coordinates(
mask, coord, cval=1, prefilter=prefilter) > 0.06,
fill_value=avg)
# Match the reference image size
if w > ref_width or h > ref_height:
res = res[:ref_height, :ref_width]
return res
# Calculate fake alignment stars by sampling WCS on a grid
try:
# Special grid for small number of points
if ref_width >= ref_height:
dst_x, dst_y = wcs_grid[grid_points]
else:
dst_y, dst_x = wcs_grid[grid_points]
# Cannot multiply in place to avoid damaging wcs_grid
dst_x = dst_x*ref_width
dst_y = dst_y*ref_height
except KeyError:
# Generate uniform grid; not necessarily the requested number of points
nx, ny = int(sqrt(grid_points*ref_width/ref_height) + 0.5), \
int(sqrt(grid_points*ref_height/ref_width) + 0.5)
dst_x, dst_y = mgrid[:ref_width:(nx + 2)*1j, :ref_height:(ny + 2)*1j]
dst_x, dst_y = dst_x[1:-1].ravel(), dst_y[1:-1].ravel()
a, d = dst_wcs.all_pix2world(dst_x, dst_y, 0)
src_x, src_y = src_wcs.all_world2pix(a, d, 0, quiet=True)
img = apply_transform_stars(
img, transpose([src_x, src_y]), transpose([dst_x, dst_y]),
ref_width, ref_height, prefilter=prefilter)
# Match the reference image size
if w > ref_width or h > ref_height:
img = img[:ref_height, :ref_width]
return img
| [
"numpy.sqrt",
"numpy.ones",
"numpy.ma.MaskedArray",
"numpy.indices",
"numpy.array",
"numpy.zeros",
"numpy.empty",
"numpy.linalg.lstsq",
"numpy.ma.masked_array",
"numpy.transpose"
] | [((1567, 1594), 'numpy.transpose', 'transpose', (['src_stars[:nref]'], {}), '(src_stars[:nref])\n', (1576, 1594), False, 'from numpy import array, empty, float32, full, indices, ma, mgrid, ndarray, ones, sqrt, transpose, zeros\n'), ((1614, 1641), 'numpy.transpose', 'transpose', (['dst_stars[:nref]'], {}), '(dst_stars[:nref])\n', (1623, 1641), False, 'from numpy import array, empty, float32, full, indices, ma, mgrid, ndarray, ones, sqrt, transpose, zeros\n'), ((4351, 4393), 'numpy.ma.masked_array', 'ma.masked_array', (['img', 'mask'], {'fill_value': 'avg'}), '(img, mask, fill_value=avg)\n', (4366, 4393), False, 'from numpy import array, empty, float32, full, indices, ma, mgrid, ndarray, ones, sqrt, transpose, zeros\n'), ((2478, 2503), 'numpy.zeros', 'zeros', (['img.shape', 'float32'], {}), '(img.shape, float32)\n', (2483, 2503), False, 'from numpy import array, empty, float32, full, indices, ma, mgrid, ndarray, ones, sqrt, transpose, zeros\n'), ((4417, 4431), 'numpy.array', 'array', (['[1 / 2]'], {}), '([1 / 2])\n', (4422, 4431), False, 'from numpy import array, empty, float32, full, indices, ma, mgrid, ndarray, ones, sqrt, transpose, zeros\n'), ((4439, 4453), 'numpy.array', 'array', (['[1 / 2]'], {}), '([1 / 2])\n', (4444, 4453), False, 'from numpy import array, empty, float32, full, indices, ma, mgrid, ndarray, ones, sqrt, transpose, zeros\n'), ((4462, 4483), 'numpy.array', 'array', (['[1 / 3, 2 / 3]'], {}), '([1 / 3, 2 / 3])\n', (4467, 4483), False, 'from numpy import array, empty, float32, full, indices, ma, mgrid, ndarray, ones, sqrt, transpose, zeros\n'), ((4489, 4510), 'numpy.array', 'array', (['[1 / 2, 1 / 2]'], {}), '([1 / 2, 1 / 2])\n', (4494, 4510), False, 'from numpy import array, empty, float32, full, indices, ma, mgrid, ndarray, ones, sqrt, transpose, zeros\n'), ((4517, 4545), 'numpy.array', 'array', (['[1 / 4, 1 / 2, 3 / 4]'], {}), '([1 / 4, 1 / 2, 3 / 4])\n', (4522, 4545), False, 'from numpy import array, empty, float32, full, indices, ma, mgrid, ndarray, ones, sqrt, transpose, zeros\n'), ((4549, 4577), 'numpy.array', 'array', (['[1 / 3, 2 / 3, 1 / 3]'], {}), '([1 / 3, 2 / 3, 1 / 3])\n', (4554, 4577), False, 'from numpy import array, empty, float32, full, indices, ma, mgrid, ndarray, ones, sqrt, transpose, zeros\n'), ((4582, 4617), 'numpy.array', 'array', (['[1 / 3, 2 / 3, 1 / 3, 2 / 3]'], {}), '([1 / 3, 2 / 3, 1 / 3, 2 / 3])\n', (4587, 4617), False, 'from numpy import array, empty, float32, full, indices, ma, mgrid, ndarray, ones, sqrt, transpose, zeros\n'), ((4619, 4654), 'numpy.array', 'array', (['[1 / 3, 1 / 3, 2 / 3, 2 / 3]'], {}), '([1 / 3, 1 / 3, 2 / 3, 2 / 3])\n', (4624, 4654), False, 'from numpy import array, empty, float32, full, indices, ma, mgrid, ndarray, ones, sqrt, transpose, zeros\n'), ((4657, 4699), 'numpy.array', 'array', (['[1 / 3, 2 / 3, 1 / 3, 2 / 3, 1 / 2]'], {}), '([1 / 3, 2 / 3, 1 / 3, 2 / 3, 1 / 2])\n', (4662, 4699), False, 'from numpy import array, empty, float32, full, indices, ma, mgrid, ndarray, ones, sqrt, transpose, zeros\n'), ((4699, 4741), 'numpy.array', 'array', (['[1 / 3, 1 / 3, 2 / 3, 2 / 3, 1 / 2]'], {}), '([1 / 3, 1 / 3, 2 / 3, 2 / 3, 1 / 2])\n', (4704, 4741), False, 'from numpy import array, empty, float32, full, indices, ma, mgrid, ndarray, ones, sqrt, transpose, zeros\n'), ((4742, 4791), 'numpy.array', 'array', (['[1 / 4, 1 / 2, 3 / 4, 1 / 4, 1 / 2, 3 / 4]'], {}), '([1 / 4, 1 / 2, 3 / 4, 1 / 4, 1 / 2, 3 / 4])\n', (4747, 4791), False, 'from numpy import array, empty, float32, full, indices, ma, mgrid, ndarray, ones, sqrt, transpose, zeros\n'), ((4789, 4838), 'numpy.array', 'array', (['[1 / 3, 1 / 3, 1 / 3, 2 / 3, 2 / 3, 2 / 3]'], {}), '([1 / 3, 1 / 3, 1 / 3, 2 / 3, 2 / 3, 2 / 3])\n', (4794, 4838), False, 'from numpy import array, empty, float32, full, indices, ma, mgrid, ndarray, ones, sqrt, transpose, zeros\n'), ((4837, 4893), 'numpy.array', 'array', (['[1 / 4, 1 / 2, 3 / 4, 1 / 4, 1 / 2, 3 / 4, 1 / 2]'], {}), '([1 / 4, 1 / 2, 3 / 4, 1 / 4, 1 / 2, 3 / 4, 1 / 2])\n', (4842, 4893), False, 'from numpy import array, empty, float32, full, indices, ma, mgrid, ndarray, ones, sqrt, transpose, zeros\n'), ((4889, 4945), 'numpy.array', 'array', (['[1 / 3, 1 / 3, 1 / 3, 2 / 3, 2 / 3, 2 / 3, 1 / 2]'], {}), '([1 / 3, 1 / 3, 1 / 3, 2 / 3, 2 / 3, 2 / 3, 1 / 2])\n', (4894, 4945), False, 'from numpy import array, empty, float32, full, indices, ma, mgrid, ndarray, ones, sqrt, transpose, zeros\n'), ((4942, 5005), 'numpy.array', 'array', (['[1 / 4, 1 / 2, 3 / 4, 1 / 4, 1 / 2, 3 / 4, 1 / 3, 2 / 3]'], {}), '([1 / 4, 1 / 2, 3 / 4, 1 / 4, 1 / 2, 3 / 4, 1 / 3, 2 / 3])\n', (4947, 5005), False, 'from numpy import array, empty, float32, full, indices, ma, mgrid, ndarray, ones, sqrt, transpose, zeros\n'), ((4999, 5062), 'numpy.array', 'array', (['[1 / 3, 1 / 3, 1 / 3, 2 / 3, 2 / 3, 2 / 3, 1 / 2, 1 / 2]'], {}), '([1 / 3, 1 / 3, 1 / 3, 2 / 3, 2 / 3, 2 / 3, 1 / 2, 1 / 2])\n', (5004, 5062), False, 'from numpy import array, empty, float32, full, indices, ma, mgrid, ndarray, ones, sqrt, transpose, zeros\n'), ((5057, 5127), 'numpy.array', 'array', (['[1 / 4, 1 / 2, 3 / 4, 1 / 4, 1 / 2, 3 / 4, 1 / 4, 1 / 2, 3 / 4]'], {}), '([1 / 4, 1 / 2, 3 / 4, 1 / 4, 1 / 2, 3 / 4, 1 / 4, 1 / 2, 3 / 4])\n', (5062, 5127), False, 'from numpy import array, empty, float32, full, indices, ma, mgrid, ndarray, ones, sqrt, transpose, zeros\n'), ((5119, 5189), 'numpy.array', 'array', (['[1 / 4, 1 / 4, 1 / 4, 1 / 2, 1 / 2, 1 / 2, 3 / 4, 3 / 4, 3 / 4]'], {}), '([1 / 4, 1 / 4, 1 / 4, 1 / 2, 1 / 2, 1 / 2, 3 / 4, 3 / 4, 3 / 4])\n', (5124, 5189), False, 'from numpy import array, empty, float32, full, indices, ma, mgrid, ndarray, ones, sqrt, transpose, zeros\n'), ((7186, 7218), 'numpy.indices', 'indices', (['(ref_height, ref_width)'], {}), '((ref_height, ref_width))\n', (7193, 7218), False, 'from numpy import array, empty, float32, full, indices, ma, mgrid, ndarray, ones, sqrt, transpose, zeros\n'), ((7235, 7260), 'numpy.empty', 'empty', (['(2, h, w)', 'float32'], {}), '((2, h, w), float32)\n', (7240, 7260), False, 'from numpy import array, empty, float32, full, indices, ma, mgrid, ndarray, ones, sqrt, transpose, zeros\n'), ((8825, 8850), 'numpy.transpose', 'transpose', (['[src_x, src_y]'], {}), '([src_x, src_y])\n', (8834, 8850), False, 'from numpy import array, empty, float32, full, indices, ma, mgrid, ndarray, ones, sqrt, transpose, zeros\n'), ((8852, 8877), 'numpy.transpose', 'transpose', (['[dst_x, dst_y]'], {}), '([dst_x, dst_y])\n', (8861, 8877), False, 'from numpy import array, empty, float32, full, indices, ma, mgrid, ndarray, ones, sqrt, transpose, zeros\n'), ((1982, 2017), 'numpy.ones', 'ones', (['[ref_height, ref_width]', 'bool'], {}), '([ref_height, ref_width], bool)\n', (1986, 2017), False, 'from numpy import array, empty, float32, full, indices, ma, mgrid, ndarray, ones, sqrt, transpose, zeros\n'), ((2072, 2101), 'numpy.ma.MaskedArray', 'ma.MaskedArray', (['new_img', 'mask'], {}), '(new_img, mask)\n', (2086, 2101), False, 'from numpy import array, empty, float32, full, indices, ma, mgrid, ndarray, ones, sqrt, transpose, zeros\n'), ((3418, 3442), 'numpy.array', 'array', (['[[a, b], [-b, a]]'], {}), '([[a, b], [-b, a]])\n', (3423, 3442), False, 'from numpy import array, empty, float32, full, indices, ma, mgrid, ndarray, ones, sqrt, transpose, zeros\n'), ((3883, 3906), 'numpy.array', 'array', (['[py[:2], px[:2]]'], {}), '([py[:2], px[:2]])\n', (3888, 3906), False, 'from numpy import array, empty, float32, full, indices, ma, mgrid, ndarray, ones, sqrt, transpose, zeros\n'), ((6568, 6593), 'numpy.ones', 'ones', (['new_img.shape', 'bool'], {}), '(new_img.shape, bool)\n', (6572, 6593), False, 'from numpy import array, empty, float32, full, indices, ma, mgrid, ndarray, ones, sqrt, transpose, zeros\n'), ((6648, 6677), 'numpy.ma.MaskedArray', 'ma.MaskedArray', (['new_img', 'mask'], {}), '(new_img, mask)\n', (6662, 6677), False, 'from numpy import array, empty, float32, full, indices, ma, mgrid, ndarray, ones, sqrt, transpose, zeros\n'), ((7022, 7047), 'numpy.zeros', 'zeros', (['img.shape', 'float32'], {}), '(img.shape, float32)\n', (7027, 7047), False, 'from numpy import array, empty, float32, full, indices, ma, mgrid, ndarray, ones, sqrt, transpose, zeros\n'), ((3786, 3813), 'numpy.linalg.lstsq', 'lstsq', (['a', 'src_y'], {'rcond': 'None'}), '(a, src_y, rcond=None)\n', (3791, 3813), False, 'from numpy.linalg import lstsq\n'), ((3834, 3861), 'numpy.linalg.lstsq', 'lstsq', (['a', 'src_x'], {'rcond': 'None'}), '(a, src_x, rcond=None)\n', (3839, 3861), False, 'from numpy.linalg import lstsq\n'), ((3756, 3766), 'numpy.ones', 'ones', (['nref'], {}), '(nref)\n', (3760, 3766), False, 'from numpy import array, empty, float32, full, indices, ma, mgrid, ndarray, ones, sqrt, transpose, zeros\n'), ((8407, 8449), 'numpy.sqrt', 'sqrt', (['(grid_points * ref_width / ref_height)'], {}), '(grid_points * ref_width / ref_height)\n', (8411, 8449), False, 'from numpy import array, empty, float32, full, indices, ma, mgrid, ndarray, ones, sqrt, transpose, zeros\n'), ((8477, 8519), 'numpy.sqrt', 'sqrt', (['(grid_points * ref_height / ref_width)'], {}), '(grid_points * ref_height / ref_width)\n', (8481, 8519), False, 'from numpy import array, empty, float32, full, indices, ma, mgrid, ndarray, ones, sqrt, transpose, zeros\n')] |
import numpy as n, pylab as p
from scipy import stats as st
a=st.norm(0,1)
b=st.norm(0.1,1)
domain=n.linspace(-4,4,10000)
avals=a.cdf(domain)
bvals=b.cdf(domain)
diffN=n.abs(avals-bvals).max()
a=st.norm(0,1)
b=st.norm(0,1.2)
domain=n.linspace(-4,4,10000)
avals=a.cdf(domain)
bvals=b.cdf(domain)
diffN2=n.abs(avals-bvals).max()
a=st.uniform(0,1)
b=st.uniform(0.05,1.0)
domain=n.linspace(0,1.05,10000)
avals=a.cdf(domain)
bvals=b.cdf(domain)
diffU=n.abs(avals-bvals).max()
a=st.uniform(0,1)
b=st.uniform(-0.05,1.05)
domain=n.linspace(0,1.05,10000)
avals=a.cdf(domain)
bvals=b.cdf(domain)
diffU2=n.abs(avals-bvals).max()
#a=st.weibull(1.5)
#b=st.weibull(1.7)
#domain=n.linspace(0,1.05,10000)
#avals=a.cdf(domain)
#bvals=b.cdf(domain)
#diffW=n.abs(avals-bvals).max()
#a=st.power(1.5)
#b=st.power(1.7)
#domain=n.linspace(0,1.05,10000)
#avals=a.cdf(domain)
#bvals=b.cdf(domain)
#diffP=n.abs(avals-bvals).max()
#x = n.arange(1,100.)/50.
x=n.linspace(0,20,100000)
step=x[1]-x[0]
def weib(x,nn,a):
return (a / nn) * (x / nn)**(a - 1) * n.exp(-(x / nn)**a)
#count, bins, ignored = p.hist(n.random.weibull(5.,1000))
#x = n.arange(1,100.)/50.
#scale = count.max()/weib(x, 1., 5.).max()
W=weib(x, 1., 1.5)
W_=W/(W*step).sum()
W__=n.cumsum(W_)
W2=weib(x, 1., 1.7)
W2_=W2/(W2*step).sum()
W2__=n.cumsum(W2_)
diffW=n.abs(W_-W2_).max()
#p.plot(x, W_)
#p.plot(x, W2_)
##p.plot(x, weib(x, 1., 5.)*scale)
#p.show()
a=st.powerlaw(1.5)
b=st.powerlaw(1.7)
domain=n.linspace(0,5.05,10000)
avals=a.cdf(domain)
bvals=b.cdf(domain)
diffP=n.abs(avals-bvals).max()
print("distancias de KS para os modelos matematicos:", diffN,diffN2,diffU,diffU2,diffW,diffP)
# distancias de KS para os modelos matematicos:
# 0.0398776116762 0.0439947104098 0.0952338090952 0.047619047619 0.128565475845 0.0460149130584
# X = (-n.ln(U))^{1/a}
lb,rb,NE,shape1,shape2=0,10,10000,1.5,1.7
x=n.linspace(lb,rb,NE)
step=x[1]-x[0]
W=weib(x, 1., shape1)
W_=W/((W*step).sum())
W__=n.cumsum(W_)
W2=weib(x, 1., shape2)
W2_=W2/((W2*step).sum())
W2__=n.cumsum(W2_)
diffW=n.abs(W__-W2__).max()
lb,rb,NE,shape1,shape2=0,10,10000,1.5,1.7
x=n.linspace(lb,rb,NE)
step=x[1]-x[0]
W=weib(x, 1., shape1)
W_=W/((W).sum())
W__=n.cumsum(W_)
W2=weib(x, 1., shape2)
W2_=W2/((W2).sum())
W2__=n.cumsum(W2_)
diffW=n.abs(W__-W2__).max()
| [
"numpy.abs",
"scipy.stats.norm",
"scipy.stats.uniform",
"numpy.exp",
"numpy.linspace",
"scipy.stats.powerlaw",
"numpy.cumsum"
] | [((62, 75), 'scipy.stats.norm', 'st.norm', (['(0)', '(1)'], {}), '(0, 1)\n', (69, 75), True, 'from scipy import stats as st\n'), ((77, 92), 'scipy.stats.norm', 'st.norm', (['(0.1)', '(1)'], {}), '(0.1, 1)\n', (84, 92), True, 'from scipy import stats as st\n'), ((99, 123), 'numpy.linspace', 'n.linspace', (['(-4)', '(4)', '(10000)'], {}), '(-4, 4, 10000)\n', (109, 123), True, 'import numpy as n, pylab as p\n'), ((196, 209), 'scipy.stats.norm', 'st.norm', (['(0)', '(1)'], {}), '(0, 1)\n', (203, 209), True, 'from scipy import stats as st\n'), ((211, 226), 'scipy.stats.norm', 'st.norm', (['(0)', '(1.2)'], {}), '(0, 1.2)\n', (218, 226), True, 'from scipy import stats as st\n'), ((233, 257), 'numpy.linspace', 'n.linspace', (['(-4)', '(4)', '(10000)'], {}), '(-4, 4, 10000)\n', (243, 257), True, 'import numpy as n, pylab as p\n'), ((331, 347), 'scipy.stats.uniform', 'st.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (341, 347), True, 'from scipy import stats as st\n'), ((349, 370), 'scipy.stats.uniform', 'st.uniform', (['(0.05)', '(1.0)'], {}), '(0.05, 1.0)\n', (359, 370), True, 'from scipy import stats as st\n'), ((377, 403), 'numpy.linspace', 'n.linspace', (['(0)', '(1.05)', '(10000)'], {}), '(0, 1.05, 10000)\n', (387, 403), True, 'import numpy as n, pylab as p\n'), ((476, 492), 'scipy.stats.uniform', 'st.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (486, 492), True, 'from scipy import stats as st\n'), ((494, 517), 'scipy.stats.uniform', 'st.uniform', (['(-0.05)', '(1.05)'], {}), '(-0.05, 1.05)\n', (504, 517), True, 'from scipy import stats as st\n'), ((524, 550), 'numpy.linspace', 'n.linspace', (['(0)', '(1.05)', '(10000)'], {}), '(0, 1.05, 10000)\n', (534, 550), True, 'import numpy as n, pylab as p\n'), ((938, 963), 'numpy.linspace', 'n.linspace', (['(0)', '(20)', '(100000)'], {}), '(0, 20, 100000)\n', (948, 963), True, 'import numpy as n, pylab as p\n'), ((1228, 1240), 'numpy.cumsum', 'n.cumsum', (['W_'], {}), '(W_)\n', (1236, 1240), True, 'import numpy as n, pylab as p\n'), ((1289, 1302), 'numpy.cumsum', 'n.cumsum', (['W2_'], {}), '(W2_)\n', (1297, 1302), True, 'import numpy as n, pylab as p\n'), ((1408, 1424), 'scipy.stats.powerlaw', 'st.powerlaw', (['(1.5)'], {}), '(1.5)\n', (1419, 1424), True, 'from scipy import stats as st\n'), ((1427, 1443), 'scipy.stats.powerlaw', 'st.powerlaw', (['(1.7)'], {}), '(1.7)\n', (1438, 1443), True, 'from scipy import stats as st\n'), ((1451, 1477), 'numpy.linspace', 'n.linspace', (['(0)', '(5.05)', '(10000)'], {}), '(0, 5.05, 10000)\n', (1461, 1477), True, 'import numpy as n, pylab as p\n'), ((1855, 1877), 'numpy.linspace', 'n.linspace', (['lb', 'rb', 'NE'], {}), '(lb, rb, NE)\n', (1865, 1877), True, 'import numpy as n, pylab as p\n'), ((1939, 1951), 'numpy.cumsum', 'n.cumsum', (['W_'], {}), '(W_)\n', (1947, 1951), True, 'import numpy as n, pylab as p\n'), ((2005, 2018), 'numpy.cumsum', 'n.cumsum', (['W2_'], {}), '(W2_)\n', (2013, 2018), True, 'import numpy as n, pylab as p\n'), ((2093, 2115), 'numpy.linspace', 'n.linspace', (['lb', 'rb', 'NE'], {}), '(lb, rb, NE)\n', (2103, 2115), True, 'import numpy as n, pylab as p\n'), ((2172, 2184), 'numpy.cumsum', 'n.cumsum', (['W_'], {}), '(W_)\n', (2180, 2184), True, 'import numpy as n, pylab as p\n'), ((2233, 2246), 'numpy.cumsum', 'n.cumsum', (['W2_'], {}), '(W2_)\n', (2241, 2246), True, 'import numpy as n, pylab as p\n'), ((168, 188), 'numpy.abs', 'n.abs', (['(avals - bvals)'], {}), '(avals - bvals)\n', (173, 188), True, 'import numpy as n, pylab as p\n'), ((303, 323), 'numpy.abs', 'n.abs', (['(avals - bvals)'], {}), '(avals - bvals)\n', (308, 323), True, 'import numpy as n, pylab as p\n'), ((448, 468), 'numpy.abs', 'n.abs', (['(avals - bvals)'], {}), '(avals - bvals)\n', (453, 468), True, 'import numpy as n, pylab as p\n'), ((596, 616), 'numpy.abs', 'n.abs', (['(avals - bvals)'], {}), '(avals - bvals)\n', (601, 616), True, 'import numpy as n, pylab as p\n'), ((1037, 1058), 'numpy.exp', 'n.exp', (['(-(x / nn) ** a)'], {}), '(-(x / nn) ** a)\n', (1042, 1058), True, 'import numpy as n, pylab as p\n'), ((1309, 1324), 'numpy.abs', 'n.abs', (['(W_ - W2_)'], {}), '(W_ - W2_)\n', (1314, 1324), True, 'import numpy as n, pylab as p\n'), ((1522, 1542), 'numpy.abs', 'n.abs', (['(avals - bvals)'], {}), '(avals - bvals)\n', (1527, 1542), True, 'import numpy as n, pylab as p\n'), ((2025, 2042), 'numpy.abs', 'n.abs', (['(W__ - W2__)'], {}), '(W__ - W2__)\n', (2030, 2042), True, 'import numpy as n, pylab as p\n'), ((2253, 2270), 'numpy.abs', 'n.abs', (['(W__ - W2__)'], {}), '(W__ - W2__)\n', (2258, 2270), True, 'import numpy as n, pylab as p\n')] |
import argparse
import datasets
from transformers import BertTokenizer
from transformers import EvaluationStrategy
from transformers import BertForSequenceClassification, BertTokenizerFast, Trainer, TrainingArguments
from bs4 import BeautifulSoup
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
import numpy as np
from copy import deepcopy
import nlpaug.augmenter.char as nac
import nlpaug.augmenter.word as naw
import nlpaug.augmenter.sentence as nas
import nlpaug.flow as nafc
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, required=True)
parser.add_argument('--max-noise-level', type=float, required=True)
parser.add_argument('--save', type=str, required=True)
return parser.parse_args()
def add_keyboard_aug(x, max_noise_level):
noise_level = np.random.rand() * max_noise_level
aug = nac.KeyboardAug(aug_word_p=noise_level)
return {'text': aug.augment(x['text']), 'noise_level': noise_level}
if __name__ == '__main__':
args = parse_args()
dataset = datasets.load_from_disk(args.dataset)
print(dataset)
print()
for i, x in enumerate(dataset['train']):
print(x)
if i >= 10:
break
dataset['train'] = dataset['train'].map(lambda x: add_keyboard_aug(x, args.max_noise_level))
print(dataset)
print()
for i, x in enumerate(dataset['train']):
print(x)
if i >= 10:
break
dataset.save_to_disk(args.save)
| [
"numpy.random.rand",
"datasets.load_from_disk",
"nlpaug.augmenter.char.KeyboardAug",
"argparse.ArgumentParser"
] | [((558, 583), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (581, 583), False, 'import argparse\n'), ((924, 963), 'nlpaug.augmenter.char.KeyboardAug', 'nac.KeyboardAug', ([], {'aug_word_p': 'noise_level'}), '(aug_word_p=noise_level)\n', (939, 963), True, 'import nlpaug.augmenter.char as nac\n'), ((1109, 1146), 'datasets.load_from_disk', 'datasets.load_from_disk', (['args.dataset'], {}), '(args.dataset)\n', (1132, 1146), False, 'import datasets\n'), ((878, 894), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (892, 894), True, 'import numpy as np\n')] |
import time
everything_start_time = time.time()
import os
import subprocess
import json
import argparse
import cv2
import numpy
import SRers
parser = argparse.ArgumentParser()
# Input/output file
parser.add_argument('-i', '--input', # Input file
type=str,
help='path of video to be converted')
parser.add_argument('-o', '--output', # Output file
type=str, default='default',
help='Specify output file name. Default: output.mp4')
parser.add_argument('-ot', '--output_type', # Output file type
type=str, choices=['video', 'npz', 'npy', 'tiff', 'png'], default='npy',
help='Output file type, -o needs to be a file and image sequence or npz needs to be a folder')
# Process type
parser.add_argument('-a', '--algorithm', type=str, default='EDVR', # 算法
choices=['EDVR', 'ESRGAN'], help='EDVR or ESRGAN')
parser.add_argument('-mn', '-model_name', type=str, default='mt4r',
choices=['ld', 'ldc', 'l4r', 'l4v', 'l4br', 'm4r', 'mt4r'],
help='ld: L Deblur, ldc: L Deblur Comp, l4r: L SR REDS x4, l4v: L SR vimeo90K 4x, '
'l4br: L SRblur REDS 4x, m4r: M woTSA SR REDS 4x, mt4r: M SR REDS 4x')
# Model directory
parser.add_argument('-md', '--model_path', # 模型路径
type=str, default='default',
help='path of checkpoint for pretrained model')
# Start/End frame
parser.add_argument('-st', '--start_frame', # 开始帧
type=int, default=1,
help='specify start frame (Start from 1)')
parser.add_argument('-ed', '--end_frame', # 结束帧
type=int, default=0,
help='specify end frame. Default: Final frame')
# FFmpeg
parser.add_argument('-fd', '--ffmpeg_dir', # FFmpeg路径
type=str, default='',
help='path to ffmpeg(.exe)')
parser.add_argument('-vc', '--vcodec', # 视频编码
type=str, default='h264',
help='Video codec')
parser.add_argument('-ac', '--acodec', # 音频编码
type=str, default='copy',
help='Audio codec')
parser.add_argument('-br', '--bit_rate', # 视频编码
type=str, default='',
help='Bit rate for output video')
parser.add_argument('-fps', # 目标帧率
type=float,
help='specify fps of output video. Default: original fps * sf.')
parser.add_argument('-mc', '--mac_compatibility', # 让苹果设备可以直接播放
type=bool, default=True,
help='If you want to play it on a mac with QuickTime or iOS, set this to True and the pixel '
'format will be yuv420p. ')
# Other
parser.add_argument('-bs', '--batch_size', # Batch Size
type=int, default=1,
help='Specify batch size for faster conversion. This will depend on your cpu/gpu memory. Default: 1')
parser.add_argument('-ec', '--empty_cache', # Batch Size
type=int, default=0,
help='Empty cache while processing, set to 1 if you get CUDA out of memory errors; If there\'s '
'the process is ok, setting to 1 will slow down the process. ')
# Temporary files
parser.add_argument('-tmp', '--temp_file_path', # 临时文件路径
type=str, default='tmp',
help='Specify temporary file path')
parser.add_argument('-rm', '--remove_temp_file', # 是否移除临时文件
type=bool, default=False,
help='If you want to keep temporary files, select True ')
args = parser.parse_args().__dict__
model_paths = {
'EDVR': {
'ld': 'BasicSR/experiments/pretrained_models/EDVR/EDVR_L_deblur_REDS_official-ca46bd8c.pth',
'ldc': 'BasicSR/experiments/pretrained_models/EDVR/EDVR_L_deblurcomp_REDS_official-0e988e5c.pth',
'l4v': 'BasicSR/experiments/pretrained_models/EDVR/EDVR_L_x4_SR_Vimeo90K_official-162b54e4.pth',
'l4r': 'BasicSR/experiments/pretrained_models/EDVR/EDVR_L_x4_SR_REDS_official-9f5f5039.pth',
'l4br': 'BasicSR/experiments/pretrained_models/EDVR/EDVR_L_x4_SRblur_REDS_official-983d7b8e.pth',
'm4r': 'BasicSR/experiments/pretrained_models/EDVR/EDVR_M_woTSA_x4_SR_REDS_official-1edf645c.pth',
'mt4r': 'BasicSR/experiments/pretrained_models/EDVR/EDVR_M_x4_SR_REDS_official-32075921.pth'
},
'ESRGAN': {
'test': 'ESRGAN.pth'
}
}
def listdir(folder):
disallow = ['.DS_Store', '.ipynb_checkpoints', '$RECYCLE.BIN', 'Thumbs.db', 'desktop.ini']
files = []
for file in os.listdir(folder):
if file not in disallow and file[:2] != '._':
files.append(file)
files.sort()
return files
class data_loader:
def __init__(self, input_dir, input_type, start_frame):
self.input_type = input_type
self.input_dir = input_dir
self.start_frame = start_frame
self.sequence_read_funcs = {'is': cv2.imread,
'npz': lambda path: numpy.load(path)['arr_0'],
'npy': numpy.load
}
self.read = self.video_func if self.input_type == 'video' else self.sequence_func
if input_type == 'video':
self.cap = cv2.VideoCapture(input_dir)
self.cap.set(1, self.start_frame)
self.fps = self.cap.get(5)
self.frame_count = int(self.cap.get(7))
self.height = int(self.cap.get(4))
self.width = int(self.cap.get(3))
else:
self.count = -1
self.files = [f'{input_dir}/{f}' for f in listdir(input_dir)[self.start_frame:]]
self.frame_count = len(self.files)
self.img = self.sequence_read_funcs[input_type](self.files[0]).shape
self.height = self.img[0]
self.width = self.img[1]
del self.img
self.read = self.video_func if self.input_type == 'video' else self.sequence_func
def video_func(self):
return self.cap.read()
def sequence_func(self):
self.count += 1
if self.count < self.frame_count:
img = self.sequence_read_funcs[self.input_type](self.files[self.count])
if img is not None:
return True, img
return False, None
def close(self):
if self.input_type == 'video':
self.cap.close()
def data_writer(output_type):
return {'tiff': lambda path, img: cv2.imwrite(path + '.tiff', img),
'png': lambda path, img: cv2.imwrite(path + '.png', img),
'npz': numpy.savez_compressed,
'npy': numpy.save
}[output_type]
def detect_input_type(input_dir): # 检测输入类型
if os.path.isfile(input_dir):
if os.path.splitext(input_dir)[1].lower() == '.json':
input_type_ = 'continue'
else:
input_type_ = 'video'
else:
files = listdir(input_dir)
if os.path.splitext(files[0])[1].lower() == '.npz':
input_type_ = 'npz'
elif os.path.splitext(files[0])[1].lower() == '.npy':
input_type_ = 'npy'
elif os.path.splitext(files[0])[1].replace('.', '').lower() in \
['dpx', 'jpg', 'jpeg', 'exr', 'psd', 'png', 'tif', 'tiff']:
input_type_ = 'is'
else:
input_type_ = 'mix'
return input_type_
def check_output_dir(dire, ext=''):
if not os.path.exists(os.path.split(dire)[0]): # If mother directory doesn't exist
os.makedirs(os.path.split(dire)[0]) # Create one
if os.path.exists(dire + ext): # If target file/folder exists
count = 2
while os.path.exists(f'{dire}_{count}{ext}'):
count += 1
dire = f'{dire}_{count}{ext}'
else:
dire = f'{dire}{ext}'
if not ext: # Output as folder
os.mkdir(dire)
return dire
def second2time(second: float):
m, s = divmod(second, 60)
h, m = divmod(m, 60)
t = '%d:%02d:%05.2f' % (h, m, s)
return t
input_type = detect_input_type(args['input'])
if input_type == 'mix':
processes = listdir(args['input'])
processes = [os.path.join(args['input'], process) for process in processes]
else:
processes = [args['input']]
# Extra work
args['start_frame'] -= 1
for input_file_path in processes:
input_type = detect_input_type(input_file_path)
if input_type != 'continue':
input_file_name_list = list(os.path.split(input_file_path))
input_file_name_list.extend(os.path.splitext(input_file_name_list[1]))
input_file_name_list.pop(1)
temp_file_path = check_output_dir(os.path.join(args['temp_file_path'], input_file_name_list[1]))
video = data_loader(input_file_path, input_type, args['start_frame'])
frame_count = video.frame_count
frame_count_len = len(str(frame_count))
if args['fps']:
fps = args['fps']
elif input_type == 'video':
fps = video.fps
else:
fps = 30
# Start/End frame
if args['end_frame'] == 0 or args['end_frame'] == frame_count or args['end_frame'] > frame_count:
end_frame = frame_count
else:
end_frame = args['end_frame'] + 1
if args['start_frame'] == 0 or args['start_frame'] >= frame_count:
start_frame = 1
else:
start_frame = args['start_frame']
if args['model_path'] == 'default': # 模型路径
model_path = model_paths[args['algorithm']][args['mn']]
else:
model_path = args['model_path']
output_type = args['output_type']
output_dir = args['output']
if output_dir == 'default':
output_dir = f"{input_file_name_list[0]}/{input_file_name_list[1]}_{args['algorithm']}"
if output_type == 'video':
if input_file_name_list[2]:
ext = input_file_name_list[2]
else:
ext = '.mp4'
else:
output_dir, ext = os.path.splitext(output_dir)
if not os.path.exists(os.path.split(output_dir)[0]):
os.makedirs(os.path.split(output_dir)[0])
if output_type == 'video':
dest_path = check_output_dir(os.path.splitext(output_dir)[0], ext)
output_dir = f'{temp_file_path}/tiff'
output_type = 'tiff'
else:
dest_path = False
os.makedirs(output_dir, exist_ok=True)
cag = {'input_file_path': input_file_path,
'input_type': input_type,
'empty_cache': args['empty_cache'],
'model_path': model_path,
'temp_folder': temp_file_path,
'algorithm': args['algorithm'],
'frame_count': frame_count,
'frame_count_len': len(str(video.frame_count)),
'height': video.height,
'width': video.width,
'start_frame': start_frame,
'end_frame': end_frame,
'model_name': args['mn'],
'batch_size': args['batch_size'],
'output_type': output_type,
'output_dir': output_dir,
'dest_path': dest_path,
'mac_compatibility': args['mac_compatibility'],
'ffmpeg_dir': args['ffmpeg_dir'],
'fps': fps,
'vcodec': args['vcodec'],
'acodec': args['acodec'],
'remove_temp_file': args['remove_temp_file']
}
with open(f'{temp_file_path}/process_info.json', 'w') as f:
json.dump(cag, f)
else:
with open(input_file_path, 'r') as f_:
cag = json.load(f_)
start_frame = len(listdir(cag['output_dir'])) // cag['sf']
video = data_loader(cag['input_file_path'], cag['input_type'], start_frame - 1)
if cag['empty_cache']:
os.environ['CUDA_EMPTY_CACHE'] = str(cag['empty_cache'])
# Model checking
if not os.path.exists(cag['model_path']):
print(f"Model {cag['model_path']} doesn't exist, exiting")
exit(1)
# Start frame
batch_count = (cag['frame_count'] - start_frame + 1) // cag['batch_size']
if (cag['frame_count'] - start_frame) % cag['batch_size']:
batch_count += 1
# Super resolution
SRer = SRers.__dict__[cag['algorithm']].SRer(cag['model_name'], cag['model_path'], cag['height'], cag['width'])
SRer.init_batch(video)
save = data_writer(cag['output_type'])
timer = 0
start_time = time.time()
try:
for i in range(batch_count):
out = SRer.sr(video.read())
save(f"{cag['output_dir']}/{str(i).zfill(cag['frame_count_len'])}", out)
time_spent = time.time() - start_time
start_time = time.time()
if i == 0:
initialize_time = time_spent
print(f'Initialized and processed frame 1/{batch_count} | '
f'{batch_count - i - 1} frames left | '
f'Time spent: {round(initialize_time, 2)}s',
end='')
else:
timer += time_spent
frames_processes = i + 1
frames_left = batch_count - frames_processes
print(f'\rProcessed batch {frames_processes}/{batch_count} | '
f"{frames_left} {'batches' if frames_left > 1 else 'batch'} left | "
f'Time spent: {round(time_spent, 2)}s | '
f'Time left: {second2time(frames_left * timer / i)} | '
f'Total time spend: {second2time(timer + initialize_time)}', end='', flush=True)
except KeyboardInterrupt:
print('\nCaught Ctrl-C, exiting. ')
exit(256)
del video, SRer
print(f'\r{os.path.split(input_file_path)[1]} done! Total time spend: {second2time(timer + initialize_time)}', flush=True)
# Video post process
if cag['dest_path']:
# Mac compatibility
mac_compatibility = ['-pix_fmt', 'yuv420p'] if cag['mac_compatibility'] else ''
if 'hevc' in cag['vcodec']:
mac_compatibility.extend(['-vtag', 'hvc1'])
# Execute command
cmd = [f"'{os.path.join(cag['ffmpeg_dir'], 'ffmpeg')}'",
'-loglevel error', '-vsync 0',
'-r', str(cag['fps']),
'-pattern_type glob',
'-i', f"'{os.path.join(cag['temp_folder'], 'tiff/*.tiff')}'",
'-vcodec', cag['vcodec'], *mac_compatibility, '-crf 20',
f"'{cag['dest_path']}'"]
has_audio = 'streams' in eval(subprocess.getoutput(f"ffprobe -v quiet -show_streams -select_streams a -print_format json '{cag['input_file_path']}'")).keys()
if cag['start_frame'] == 1 and cag['end_frame'] == 0 and has_audio:
cmd.insert(1, '-thread_queue_size 1048576')
cmd.insert(3, f"-vn -i '{cag['input_file_path']}'")
cmd.insert(7, f"-acodec {cag['acodec']}")
cmd = ' '.join(cmd)
os.system(cmd)
if cag['remove_temp_file']:
rmtree(cag['temp_folder'])
print(time.time() - everything_start_time)
| [
"os.path.exists",
"cv2.imwrite",
"os.listdir",
"subprocess.getoutput",
"argparse.ArgumentParser",
"os.makedirs",
"os.path.join",
"os.path.splitext",
"os.path.split",
"os.path.isfile",
"os.mkdir",
"cv2.VideoCapture",
"json.load",
"os.system",
"numpy.load",
"time.time",
"json.dump"
] | [((37, 48), 'time.time', 'time.time', ([], {}), '()\n', (46, 48), False, 'import time\n'), ((155, 180), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (178, 180), False, 'import argparse\n'), ((4710, 4728), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (4720, 4728), False, 'import os\n'), ((6877, 6902), 'os.path.isfile', 'os.path.isfile', (['input_dir'], {}), '(input_dir)\n', (6891, 6902), False, 'import os\n'), ((7722, 7748), 'os.path.exists', 'os.path.exists', (['(dire + ext)'], {}), '(dire + ext)\n', (7736, 7748), False, 'import os\n'), ((12655, 12666), 'time.time', 'time.time', ([], {}), '()\n', (12664, 12666), False, 'import time\n'), ((7814, 7852), 'os.path.exists', 'os.path.exists', (['f"""{dire}_{count}{ext}"""'], {}), "(f'{dire}_{count}{ext}')\n", (7828, 7852), False, 'import os\n'), ((7999, 8013), 'os.mkdir', 'os.mkdir', (['dire'], {}), '(dire)\n', (8007, 8013), False, 'import os\n'), ((8297, 8333), 'os.path.join', 'os.path.join', (["args['input']", 'process'], {}), "(args['input'], process)\n", (8309, 8333), False, 'import os\n'), ((10552, 10590), 'os.makedirs', 'os.makedirs', (['output_dir'], {'exist_ok': '(True)'}), '(output_dir, exist_ok=True)\n', (10563, 10590), False, 'import os\n'), ((12112, 12145), 'os.path.exists', 'os.path.exists', (["cag['model_path']"], {}), "(cag['model_path'])\n", (12126, 12145), False, 'import os\n'), ((15150, 15164), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (15159, 15164), False, 'import os\n'), ((15238, 15249), 'time.time', 'time.time', ([], {}), '()\n', (15247, 15249), False, 'import time\n'), ((5417, 5444), 'cv2.VideoCapture', 'cv2.VideoCapture', (['input_dir'], {}), '(input_dir)\n', (5433, 5444), False, 'import cv2\n'), ((8591, 8621), 'os.path.split', 'os.path.split', (['input_file_path'], {}), '(input_file_path)\n', (8604, 8621), False, 'import os\n'), ((8659, 8700), 'os.path.splitext', 'os.path.splitext', (['input_file_name_list[1]'], {}), '(input_file_name_list[1])\n', (8675, 8700), False, 'import os\n'), ((8780, 8841), 'os.path.join', 'os.path.join', (["args['temp_file_path']", 'input_file_name_list[1]'], {}), "(args['temp_file_path'], input_file_name_list[1])\n", (8792, 8841), False, 'import os\n'), ((10159, 10187), 'os.path.splitext', 'os.path.splitext', (['output_dir'], {}), '(output_dir)\n', (10175, 10187), False, 'import os\n'), ((11724, 11741), 'json.dump', 'json.dump', (['cag', 'f'], {}), '(cag, f)\n', (11733, 11741), False, 'import json\n'), ((11817, 11830), 'json.load', 'json.load', (['f_'], {}), '(f_)\n', (11826, 11830), False, 'import json\n'), ((12913, 12924), 'time.time', 'time.time', ([], {}), '()\n', (12922, 12924), False, 'import time\n'), ((6620, 6652), 'cv2.imwrite', 'cv2.imwrite', (["(path + '.tiff')", 'img'], {}), "(path + '.tiff', img)\n", (6631, 6652), False, 'import cv2\n'), ((6691, 6722), 'cv2.imwrite', 'cv2.imwrite', (["(path + '.png')", 'img'], {}), "(path + '.png', img)\n", (6702, 6722), False, 'import cv2\n'), ((7595, 7614), 'os.path.split', 'os.path.split', (['dire'], {}), '(dire)\n', (7608, 7614), False, 'import os\n'), ((7677, 7696), 'os.path.split', 'os.path.split', (['dire'], {}), '(dire)\n', (7690, 7696), False, 'import os\n'), ((12863, 12874), 'time.time', 'time.time', ([], {}), '()\n', (12872, 12874), False, 'import time\n'), ((5151, 5167), 'numpy.load', 'numpy.load', (['path'], {}), '(path)\n', (5161, 5167), False, 'import numpy\n'), ((10218, 10243), 'os.path.split', 'os.path.split', (['output_dir'], {}), '(output_dir)\n', (10231, 10243), False, 'import os\n'), ((10273, 10298), 'os.path.split', 'os.path.split', (['output_dir'], {}), '(output_dir)\n', (10286, 10298), False, 'import os\n'), ((10379, 10407), 'os.path.splitext', 'os.path.splitext', (['output_dir'], {}), '(output_dir)\n', (10395, 10407), False, 'import os\n'), ((13926, 13956), 'os.path.split', 'os.path.split', (['input_file_path'], {}), '(input_file_path)\n', (13939, 13956), False, 'import os\n'), ((14341, 14382), 'os.path.join', 'os.path.join', (["cag['ffmpeg_dir']", '"""ffmpeg"""'], {}), "(cag['ffmpeg_dir'], 'ffmpeg')\n", (14353, 14382), False, 'import os\n'), ((14533, 14580), 'os.path.join', 'os.path.join', (["cag['temp_folder']", '"""tiff/*.tiff"""'], {}), "(cag['temp_folder'], 'tiff/*.tiff')\n", (14545, 14580), False, 'import os\n'), ((6915, 6942), 'os.path.splitext', 'os.path.splitext', (['input_dir'], {}), '(input_dir)\n', (6931, 6942), False, 'import os\n'), ((7107, 7133), 'os.path.splitext', 'os.path.splitext', (['files[0]'], {}), '(files[0])\n', (7123, 7133), False, 'import os\n'), ((14736, 14865), 'subprocess.getoutput', 'subprocess.getoutput', (['f"""ffprobe -v quiet -show_streams -select_streams a -print_format json \'{cag[\'input_file_path\']}\'"""'], {}), '(\n f"ffprobe -v quiet -show_streams -select_streams a -print_format json \'{cag[\'input_file_path\']}\'"\n )\n', (14756, 14865), False, 'import subprocess\n'), ((7201, 7227), 'os.path.splitext', 'os.path.splitext', (['files[0]'], {}), '(files[0])\n', (7217, 7227), False, 'import os\n'), ((7295, 7321), 'os.path.splitext', 'os.path.splitext', (['files[0]'], {}), '(files[0])\n', (7311, 7321), False, 'import os\n')] |
import tensorflow as tf
from util import blocks
from tensorflow.contrib.rnn.python.ops.core_rnn_cell import BasicLSTMCell
from tensorflow.contrib.rnn.python.ops.rnn_cell import LayerNormBasicLSTMCell
from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate
from my.tensorflow.rnn import bidirectional_dynamic_rnn, dynamic_rnn
from my.tensorflow.rnn_cell import SwitchableDropoutWrapper, AttentionCell
from my.tensorflow import flatten, reconstruct, add_wd, exp_mask
import numpy as np
class MyModel(object):
def __init__(self, config, seq_length, emb_dim, hidden_dim, emb_train, embeddings = None, pred_size = 3, context_seq_len = None, query_seq_len = None):
## Define hyperparameters
# tf.reset_default_graph()
self.embedding_dim = emb_dim
self.dim = hidden_dim
self.LSTM_dim = config.LSTM_dim
self.sequence_length = seq_length
self.pred_size = pred_size
self.context_seq_len = context_seq_len
self.query_seq_len = query_seq_len
# self.config = config
## Define the placeholders
if config.train_babi:
self.premise_x = tf.placeholder(tf.int32, [None, self.context_seq_len], name='premise')
self.hypothesis_x = tf.placeholder(tf.int32, [None, self.query_seq_len], name='hypothesis')
elif config.subword_random_init_embedding:
self.premise_x = tf.placeholder(tf.int32, [None, self.sequence_length, config.subword_feature_len], name='premise')
self.hypothesis_x = tf.placeholder(tf.int32, [None, self.sequence_length, config.subword_feature_len], name='hypothesis')
else:
self.premise_x = tf.placeholder(tf.int32, [None, self.sequence_length], name='premise')
self.hypothesis_x = tf.placeholder(tf.int32, [None, self.sequence_length], name='hypothesis')
self.premise_pos = tf.placeholder(tf.int32, [None, self.sequence_length, 47], name='premise_pos')
self.hypothesis_pos = tf.placeholder(tf.int32, [None, self.sequence_length, 47], name='hypothesis_pos')
self.premise_char = tf.placeholder(tf.int32, [None, self.sequence_length, config.char_in_word_size], name='premise_char')
self.hypothesis_char = tf.placeholder(tf.int32, [None, self.sequence_length, config.char_in_word_size], name='hypothesis_char')
self.premise_exact_match = tf.placeholder(tf.int32, [None, self.sequence_length,1], name='premise_exact_match')
self.hypothesis_exact_match = tf.placeholder(tf.int32, [None, self.sequence_length,1], name='hypothesis_exact_match')
self.premise_itf = tf.placeholder(tf.float32, [None, self.sequence_length,1], name='premise_itf')
self.hypothesis_itf = tf.placeholder(tf.float32, [None, self.sequence_length,1], name='hypothesis_itf')
self.premise_antonym = tf.placeholder(tf.int32, [None, self.sequence_length,1], name='premise_antonym')
self.hypothesis_antonym = tf.placeholder(tf.int32, [None, self.sequence_length,1], name='hypothesis_antonym')
self.premise_NER_feature = tf.placeholder(tf.int32, [None, self.sequence_length, 7], name='premise_ner_feature')
self.hypothesis_NER_feature = tf.placeholder(tf.int32, [None, self.sequence_length, 7], name='hypothesis_ner_feature')
self.positional_encoding = tf.placeholder(tf.float32, [self.sequence_length, 300], name='positional_encoding')
if config.add_tensor_to_tensor_dict:
self.tensor_dict = {}
else:
self.tensor_dict = None
self.global_step = tf.Variable(0, name='global_step', trainable=False)
# print(self.global_step.graph)
if config.dropout_keep_rate_decay:
self.dropout_keep_rate = tf.train.exponential_decay(config.keep_rate, self.global_step, config.dropout_decay_step, config.dropout_decay_rate, staircase=False, name='dropout_keep_rate')
config.keep_rate = self.dropout_keep_rate
tf.summary.scalar('dropout_keep_rate', self.dropout_keep_rate)
if config.use_label_smoothing:
self.y = tf.placeholder(tf.float32, [None, 3], name='label_y')
else:
self.y = tf.placeholder(tf.int32, [None], name='label_y')
self.keep_rate_ph = tf.placeholder(tf.float32, [], name='keep_prob')
self.is_train = tf.placeholder('bool', [], name='is_train')
## Define parameters
# self.E = tf.Variable(embeddings, trainable=emb_train)
## Fucntion for embedding lookup and dropout at embedding layer
def emb_drop(E, x):
emb = tf.nn.embedding_lookup(E, x)
if config.use_positional_encoding:
emb = emb + self.positional_encoding
if config.emb_no_dropout:
return emb
# emb_drop = tf.cond(self.is_train, lambda: tf.nn.dropout(emb, config.keep_rate), lambda: emb)
else:
# emb_drop = tf.nn.dropout(emb, self.keep_rate_ph)
emb_drop = tf.cond(self.is_train, lambda: tf.nn.dropout(emb, config.keep_rate), lambda: emb)
return emb_drop
# Get lengths of unpadded sentences
if config.subword_random_init_embedding:
prem_seq_lengths, prem_mask = blocks.length(tf.reduce_sum(self.premise_x, axis=2))
hyp_seq_lengths, hyp_mask = blocks.length(tf.reduce_sum(self.hypothesis_x, axis=2))
else:
prem_seq_lengths, prem_mask = blocks.length(self.premise_x) # mask [N, L , 1]
hyp_seq_lengths, hyp_mask = blocks.length(self.hypothesis_x)
self.prem_mask = prem_mask
self.hyp_mask = hyp_mask
### Embedding layer ###
if config.subword_random_init_embedding:
with tf.variable_scope("emb_var"), tf.device("/cpu:0"):
self.E = tf.Variable(embeddings, trainable=emb_train)
premise_in = emb_drop(self.E, self.premise_x)
hypothesis_in = emb_drop(self.E, self.hypothesis_x)
with tf.variable_scope("subword_emb_sum"):
premise_in = tf.reduce_sum(premise_in, axis=2)
hypothesis_in = tf.reduce_sum(hypothesis_in, axis=2)
if config.subword_embedding_batch_norm:
premise_in = tf.contrib.layers.batch_norm(premise_in)
hypothesis_in = tf.contrib.layers.batch_norm(hypothesis_in)
else:
with tf.variable_scope("emb"):
if config.train_babi:
with tf.variable_scope("emb_var"):
self.E = tf.get_variable("embedding", shape=[self.pred_size, self.embedding_dim])
premise_in = emb_drop(self.E, self.premise_x) #P
hypothesis_in = emb_drop(self.E, self.hypothesis_x) #H
else:
with tf.variable_scope("emb_var"), tf.device("/cpu:0"):
self.E = tf.Variable(embeddings, trainable=emb_train)
premise_in = emb_drop(self.E, self.premise_x) #P
hypothesis_in = emb_drop(self.E, self.hypothesis_x) #H
# with tf.variable_scope("char_conv"), tf.device("/gpu:0"):
if config.use_char_emb:
with tf.variable_scope("char_emb"):
char_emb_mat = tf.get_variable("char_emb_mat", shape=[config.char_vocab_size, config.char_emb_size])
with tf.variable_scope("char") as scope:
char_pre = tf.nn.embedding_lookup(char_emb_mat, self.premise_char)
char_hyp = tf.nn.embedding_lookup(char_emb_mat, self.hypothesis_char)
filter_sizes = list(map(int, config.out_channel_dims.split(','))) #[100]
heights = list(map(int, config.filter_heights.split(','))) #[5]
assert sum(filter_sizes) == config.char_out_size, (filter_sizes, config.char_out_size)
with tf.variable_scope("conv") as scope:
conv_pre = multi_conv1d(char_pre, filter_sizes, heights, "VALID", self.is_train, config.keep_rate, scope='conv')
scope.reuse_variables()
conv_hyp = multi_conv1d(char_hyp, filter_sizes, heights, "VALID", self.is_train, config.keep_rate, scope='conv')
conv_pre = tf.reshape(conv_pre, [-1, self.sequence_length, config.char_out_size])
conv_hyp = tf.reshape(conv_hyp, [-1, self.sequence_length, config.char_out_size])
if config.char_feature_linear:
with tf.variable_scope("char_linear") as scope:
conv_d = config.char_out_size
conv_pre = linear(conv_pre, conv_d , True, bias_start=0.0, scope="char_linear", \
squeeze=False, wd=config.wd, input_keep_prob=config.keep_rate, is_train=self.is_train)
scope.reuse_variables()
conv_hyp = linear(conv_hyp, conv_d , True, bias_start=0.0, scope="char_linear", \
squeeze=False, wd=config.wd, input_keep_prob=config.keep_rate, is_train=self.is_train)
elif config.char_feature_highway:
with tf.variable_scope("char_highway") as scope:
conv_pre = highway_network(conv_pre, 1, True, scope='char_conv', wd=config.wd, is_train=self.is_train)
scope.reuse_variables()
conv_hyp = highway_network(conv_hyp, 1, True, scope='char_conv', wd=config.wd, is_train=self.is_train)
premise_in = tf.concat([premise_in, conv_pre], axis=2)
hypothesis_in = tf.concat([hypothesis_in, conv_hyp], axis=2)
if config.pos_tagging:
premise_in = tf.concat((premise_in, tf.cast(self.premise_pos, tf.float32)), axis=2)
hypothesis_in = tf.concat((hypothesis_in, tf.cast(self.hypothesis_pos, tf.float32)), axis=2)
if config.use_exact_match_feature:
premise_in = tf.concat([premise_in, tf.cast(self.premise_exact_match, tf.float32)], axis=2)
hypothesis_in = tf.concat([hypothesis_in, tf.cast(self.hypothesis_exact_match, tf.float32)], axis=2)
if config.use_inverse_term_frequency_feature:
premise_in = tf.concat([premise_in, self.premise_itf], axis=2)
hypothesis_in = tf.concat([hypothesis_in, self.hypothesis_itf], axis=2)
if config.use_antonym_feature:
premise_in = tf.concat([premise_in, tf.cast(self.premise_antonym, tf.float32)], axis=2)
hypothesis_in = tf.concat([hypothesis_in, tf.cast(self.hypothesis_antonym, tf.float32)], axis=2)
if config.use_ner_feature:
premise_in = tf.concat([premise_in, tf.cast(self.premise_NER_feature, tf.float32)], axis=2)
hypothesis_in = tf.concat([hypothesis_in, tf.cast(self.hypothesis_NER_feature, tf.float32)], axis=2)
if config.raw_features:
raw_pre = premise_in
raw_hyp = hypothesis_in
# highway network
if config.add_tensor_to_tensor_dict:
self.tensor_dict["premise_with_features"] = premise_in
self.tensor_dict["hypothesis_with_features"] = hypothesis_in
if config.embedding_fuse_gate:
with tf.variable_scope("embedding_fuse_gate") as scope:
premise_in = fuse_gate(config, self.is_train, premise_in, premise_in, scope="embedding_fuse_gate")
scope.reuse_variables()
hypothesis_in = fuse_gate(config, self.is_train, hypothesis_in, hypothesis_in, scope="embedding_fuse_gate")
if config.use_input_dropout:
premise_in = tf.cond(self.is_train, lambda: tf.nn.dropout(premise_in, config.input_keep_rate), lambda: premise_in)
hypothesis_in = tf.cond(self.is_train, lambda: tf.nn.dropout(hypothesis_in, config.input_keep_rate), lambda: hypothesis_in)
if config.highway or config.use_char_emb or config.pos_tagging:
with tf.variable_scope("highway") as scope:
premise_in = highway_network(premise_in, config.highway_num_layers, True, wd=config.wd, is_train=self.is_train, output_size=config.highway_network_output_size)
if config.wo_highway_sharing_but_penalize_diff:
hypothesis_in = highway_network(hypothesis_in, config.highway_num_layers, True, scope='highway_network_h', wd=config.wd, is_train=self.is_train, output_size=config.highway_network_output_size)
else:
scope.reuse_variables()
hypothesis_in = highway_network(hypothesis_in, config.highway_num_layers, True, wd=config.wd, is_train=self.is_train, output_size=config.highway_network_output_size)
if config.add_tensor_to_tensor_dict:
self.tensor_dict["premise_after_highway"] = premise_in
self.tensor_dict["hypothesis_after_highway"] = hypothesis_in
# if config.use_positional_encoding:
# positional_enc_shape = premise_in.get_shape().as_list()[1:]
# print(positional_enc_shape)
# positional_encoding = tf.Variable(tf.random_normal(positional_enc_shape, stddev=0.5), name='positional_encoding')
# premise_in = premise_in + positional_encoding
# hypothesis_in = hypothesis_in + positional_encoding
if not config.layer_norm_LSTM:
cell = BasicLSTMCell(self.LSTM_dim, state_is_tuple=True)
else:
cell = LayerNormBasicLSTMCell(self.LSTM_dim)
d_cell = SwitchableDropoutWrapper(cell, self.is_train, input_keep_prob=config.keep_rate)
with tf.variable_scope("prepro") as scope:
# p bilstm
# tf.nn.bidirectional_dynamic_rnn(cell_fw=lstm_fwd, cell_bw=lstm_bwd, inputs=inputs, sequence_length=seq_len, dtype=tf.float32, scope=name)
if config.self_attention_encoding:
pre = premise_in
hyp = hypothesis_in
for i in range(config.self_att_enc_layers):
with tf.variable_scope(tf.get_variable_scope(), reuse=False):
p = self_attention_layer(config, self.is_train, pre, p_mask=prem_mask, scope="{}_layer_self_att_enc".format(i)) # [N, len, dim]
if config.wo_enc_sharing:
h = self_attention_layer(config, self.is_train, hyp, p_mask=hyp_mask, scope="{}_layer_self_att_enc_h".format(i))
else:
tf.get_variable_scope().reuse_variables()
h = self_attention_layer(config, self.is_train, hyp, p_mask=hyp_mask, scope="{}_layer_self_att_enc".format(i))
if config.self_att_mul_feature:
p = tf.concat([p, p*pre], axis=2)
h = tf.concat([h, h*hyp], axis=2)
elif config.self_att_diff_feature:
p = tf.concat([p, p - pre], axis=2)
h = tf.concat([h, h - hyp], axis=2)
elif config.self_att_orig_mul_feature:
p = tf.concat([pre, p, p * pre], axis=2)
h = tf.concat([hyp, h, h * hyp], axis=2)
elif config.self_att_orig_diff_mul_feature:
p = tf.concat([pre, p, p * pre, p - pre], axis=2)
h = tf.concat([hyp, h, h * hyp, h - hyp], axis=2)
elif config.self_att_orig_feature:
p = tf.concat([p, pre], axis=2)
h = tf.concat([h, hyp], axis=2)
if config.self_att_encoding_with_linear_mapping:
with tf.variable_scope(tf.get_variable_scope(), reuse=False):
p = linear_mapping_with_residual_conn(config, self.is_train, p, p_mask=prem_mask, scope="{}_layer_self_att_linear_mapping".format(i))
tf.get_variable_scope().reuse_variables()
h = linear_mapping_with_residual_conn(config, self.is_train, h, p_mask=hyp_mask, scope="{}_layer_self_att_linear_mapping".format(i))
variable_summaries(p, "p_self_enc_summary_layer_{}".format(i))
variable_summaries(h, "h_self_enc_summary_layer_{}".format(i))
pre = p
hyp = h
elif config.self_cross_att_enc:
p = premise_in
h = hypothesis_in
for i in range(config.self_cross_att_enc_layers):
with tf.variable_scope(tf.get_variable_scope(), reuse=False):
p = self_attention_layer(config, self.is_train, p, p_mask=prem_mask, scope="{}_layer_self_att_enc".format(i))
tf.get_variable_scope().reuse_variables()
h = self_attention_layer(config, self.is_train, h, p_mask=hyp_mask, scope="{}_layer_self_att_enc".format(i))
with tf.variable_scope(tf.get_variable_scope(), reuse=False):
p1 = cross_attention_layer(config, self.is_train, p, h, p_mask=prem_mask, h_mask=hyp_mask, scope="{}_layer_cross_att_enc".format(i))
tf.get_variable_scope().reuse_variables()
h1 = cross_attention_layer(config, self.is_train, h, p, p_mask=hyp_mask, h_mask=prem_mask, scope="{}_layer_cross_att_enc".format(i))
p = p1
h = h1
elif config.linear_fuse_gate_encoding:
p = premise_in
h = hypothesis_in
for i in range(config.linear_fuse_gate_layers):
with tf.variable_scope(tf.get_variable_scope(), reuse=False):
dim = p.get_shape().as_list()[-1]
p1 = linear(p, dim ,True, bias_start=0.0, scope="linear_enc_{}".format(i), squeeze=False, wd=config.wd, input_keep_prob=config.keep_rate, is_train=self.is_train)
p = fuse_gate(config, self.is_train, p, p1, scope="linear_enc_fuse_gate_{}".format(i))
tf.get_variable_scope().reuse_variables()
h1 = linear(h, dim ,True, bias_start=0.0, scope="linear_enc_{}".format(i), squeeze=False, wd=config.wd, input_keep_prob=config.keep_rate, is_train=self.is_train)
h = fuse_gate(config, self.is_train, h, h1, scope="linear_enc_fuse_gate_{}".format(i))
else:
# (fw_p, bw_p), _ = tf.nn.bidirectional_dynamic_rnn(cell_fw=d_cell, cell_bw=d_cell, inputs=premise_in, sequence_length=prem_seq_lengths, dtype=tf.float32, scope='p') # [N, L, d] * 2
# p = tf.concat([fw_p, bw_p], axis=2)
# # p = tf.concat(2, [fw_p, bw_p]) #[N, L, 2d]
# # h bilstm
# scope.reuse_variables()
# (fw_h, bw_h), _ = tf.nn.bidirectional_dynamic_rnn(cell_fw=d_cell, cell_bw=d_cell, inputs=hypothesis_in, sequence_length=hyp_seq_lengths, dtype=tf.float32, scope='p')
# h = tf.concat([fw_h, bw_h], axis=2) #[N, L, 2d]
p = premise_in
h = hypothesis_in
if config.add_tensor_to_tensor_dict:
self.tensor_dict["premise_after_self_attention"] = p
self.tensor_dict["hypothesis_after_self_attention"] = h
if config.use_memory_augmentation:
with tf.variable_scope("mem_augmt") as scope:
p = memory_augment_layer(config, p, prem_mask, self.is_train, config.memory_key_and_values_num, name="memory_augmentation_layer")
scope.reuse_variables()
h = memory_augment_layer(config, h, hyp_mask, self.is_train, config.memory_key_and_values_num, name="memory_augmentation_layer")
if config.LSTM_encoding:
with tf.variable_scope("LSTM_encoding") as scope:
cell = tf.contrib.rnn.LSTMCell(p.get_shape().as_list()[-1])
d_cell = SwitchableDropoutWrapper(cell, self.is_train, input_keep_prob=config.keep_rate)
(fw_p, bw_p), _ = tf.nn.bidirectional_dynamic_rnn(cell_fw=d_cell, cell_bw=d_cell, inputs=p, sequence_length=prem_seq_lengths, dtype=tf.float32, scope='p') # [N, L, d] * 2
p_lstm_enc = tf.concat([fw_p, bw_p], axis=2)
# p = tf.concat(2, [fw_p, bw_p]) #[N, L, 2d]
# h bilstm
scope.reuse_variables()
(fw_h, bw_h), _ = tf.nn.bidirectional_dynamic_rnn(cell_fw=d_cell, cell_bw=d_cell, inputs=h, sequence_length=hyp_seq_lengths, dtype=tf.float32, scope='p')
h_lstm_enc = tf.concat([fw_h, bw_h], axis=2) #[N, L, 2d]
if config.lstm_fuse_gate:
with tf.variable_scope(tf.get_variable_scope(), reuse=False):
p = fuse_gate(config, self.is_train, p, p_lstm_enc, scope="lstm_enc_fuse_gate")
tf.get_variable_scope().reuse_variables()
h = fuse_gate(config, self.is_train, h, h_lstm_enc, scope='lstm_enc_fuse_gate')
else:
p = p_lstm_enc
h = h_lstm_enc
with tf.variable_scope("main") as scope:
def model_one_side(config, main, support, main_length, support_length, main_mask, support_mask, scope):
with tf.variable_scope(scope or "model_one_side"):
# p0 = attention_layer(config, self.is_train, main, support, p_mask=main_mask, h_mask=support_mask, scope="first_hop_att")
if config.add_one_d_feature_to_matrix:
main = self.add_one_d_feature(config, main, main_mask, scope='main')
support = self.add_one_d_feature(config, support, support_mask, scope='support')
bi_att_mx = bi_attention_mx(config, self.is_train, main, support, p_mask=main_mask, h_mask=support_mask) # [N, PL, HL]
# bi_att_mx = tf.expand_dims(bi_att_mx, 3)
print(bi_att_mx.get_shape().as_list())
if config.add_tensor_to_tensor_dict:
self.tensor_dict["dense_attention"] = bi_att_mx
if config.norm_dense_attention_with_last_dim:
bi_att_mx = normalize(bi_att_mx)
if config.dense_attention_dropout:
bi_att_mx = tf.cond(self.is_train, lambda: tf.nn.dropout(bi_att_mx, config.keep_rate), lambda: bi_att_mx)
if config.similarity_matrix_dot:
bi_att_mx = tf.expand_dims(tf.reduce_sum(bi_att_mx, axis=3) , axis=3)
if config.dense_attention_highway:
bi_att_mx = highway_network(bi_att_mx, config.highway_num_layers, True, wd=config.wd, is_train=self.is_train)
elif config.dense_attention_self_fuse_gate:
bi_att_mx = fuse_gate(config, self.is_train, bi_att_mx, bi_att_mx, scope="dense_attention_self_fuse_gate")
if config.dense_attention_linear:
bi_att_mx = linear(bi_att_mx, bi_att_mx.get_shape().as_list()[-1] ,True, bias_start=0.0, scope="DA_linear", squeeze=False, wd=config.wd, input_keep_prob=config.keep_rate,
is_train=self.is_train)
# if config.cos_similarity_feature:
# bi_att_mx = cos_similarity_feature(bi_att_mx)
# if config.dense_attention_shuffle_add:
bi_att_mx = add_features(config, bi_att_mx, main_mask, support_mask)
if config.dense_attention_layer_norm:
bi_att_mx = tf.contrib.layers.layer_norm(bi_att_mx)
print("DenseAttentionFinalSize")
print(bi_att_mx.get_shape().as_list())
if config.use_dense_net:
out_final = dense_net(config, bi_att_mx, self.is_train, self.tensor_dict)
else: #ResNet
conv_filters = [int(item) for item in config.conv_filter_size.split(",")]
conv_features = [conv_blocks(config, bi_att_mx, fn, "conv_block_knl_{}".format(fn), self.is_train, self.tensor_dict) for fn in conv_filters]
out_final = tf.concat(conv_features, axis=1)
#max pooling [N, 3. 3. config.res_conv_3_chan]
return out_final
# Vanilla BiDAF & RR
if config.use_multi_perspective_matching:
tmp_p = self.multi_perspective_merge(config, p, h, scope = "p_MPM")
tmp_h = self.multi_perspective_merge(config, h, p, scope = 'h_MPM')
p = tmp_p
h = tmp_h
if config.cross_alignment:
tmp_p = cross_attention_layer(config, self.is_train, p, h, prem_mask, hyp_mask,scope = "p_cross_att") #cross_attention_layer(config, is_train, p, h, p_mask=None, h_mask=None, scope=None, tensor_dict=None)
tmp_h = cross_attention_layer(config, self.is_train, h, p, hyp_mask, prem_mask,scope = 'h_cross_att')
p = tmp_p
h = tmp_h
if config.raw_features:
p = raw_pre
h = raw_hyp
premise_final = model_one_side(config, p, h, prem_seq_lengths, hyp_seq_lengths, prem_mask, hyp_mask, scope="premise_as_main")
if config.BiBiDAF:
scope.reuse_variables()
hypothesis_final = model_one_side(config, h, p, hyp_seq_lengths, prem_seq_lengths, hyp_mask, prem_mask, scope="premise_as_main")
if config.diff_mul_output:
diff = tf.subtract(premise_final, hypothesis_final)
mul = tf.multiply(premise_final, hypothesis_final)
f0 = tf.concat((premise_final, hypothesis_final, diff, mul), axis=1)
elif config.abs_diff_mul_output:
diff = tf.abs(tf.subtract(premise_final, hypothesis_final))
mul = tf.multiply(premise_final, hypothesis_final)
f0 = tf.concat((premise_final, hypothesis_final, diff, mul), axis=1)
else:
f0 = premise_final
if config.bilinear_out:
with tf.variable_scope(tf.get_variable_scope(), reuse=False):
f0 = tf.nn.relu(linear(f0, self.LSTM_dim ,True, bias_start=0.0, scope="bilinear", squeeze=False, wd=config.wd, input_keep_prob=config.keep_rate,
is_train=self.is_train))
if config.encoding_layer_classification_loss:
p_vec = tf.reduce_max(p, axis=1)
h_vec = tf.reduce_max(h, axis=1)
if config.without_conv:
f0 = tf.concat([p_vec, h_vec, p_vec - h_vec, p_vec * h_vec], axis=1)
else:
f0 = tf.concat([f0, p_vec, h_vec, p_vec - h_vec, p_vec * h_vec], axis=1)
# Get prediction
# self.logits = tf.matmul(h_drop, self.W_cl) + self.b_cl
if config.max_out_logit:
logits = []
for k in range(config.max_out_logit_num):
lgt = linear(f0, self.pred_size ,True, bias_start=0.0, scope="max_out_logit_{}".format(k), squeeze=False, wd=config.wd, input_keep_prob=config.keep_rate,
is_train=self.is_train)
logits.append(lgt)
logtis_aug = [tf.expand_dims(tensor, axis=2) for tensor in logits]
self.logits = tf.reduce_max(tf.concat(logtis_aug, axis=2), axis=2)
elif config.squared_out_logit:
self.logits = linear(f0, self.pred_size ,True, bias_start=0.0, scope="logit", squeeze=False, wd=config.wd, input_keep_prob=config.keep_rate,
is_train=self.is_train)
self.logits = self.logits * self.logits
else:
self.logits = linear(f0, self.pred_size ,True, bias_start=0.0, scope="logit", squeeze=False, wd=config.wd, input_keep_prob=config.keep_rate,
is_train=self.is_train)
tf.summary.histogram('logit_histogram', self.logits)
# Define the cost function
if config.use_label_smoothing:
# label_smoothing_ratio = tf.constant(config.label_smoothing_ratio / 3, dtype='float', shape=[], name='label_smoothing_ratio')
sm_lgt = tf.nn.softmax(self.logits)
self.total_cost = - tf.reduce_mean(tf.reduce_sum(self.y * tf.log(sm_lgt) + (1 - self.y) * (tf.log( 1 - sm_lgt)), axis=1))
self.acc = tf.reduce_mean(tf.cast(tf.equal(tf.arg_max(self.logits, dimension=1),tf.arg_max(self.y,dimension=1)), tf.float32))
else:
self.total_cost = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.y, logits=self.logits))
self.acc = tf.reduce_mean(tf.cast(tf.equal(tf.arg_max(self.logits, dimension=1),tf.cast(self.y,tf.int64)), tf.float32))
tf.summary.scalar('acc', self.acc)
if config.use_encoding_layer_classification_loss:
p_vec = tf.reduce_max(p, axis=1)
h_vec = tf.reduce_max(h, axis=1)
cat = tf.concat([p_vec, h_vec, p_vec - h_vec, p_vec * h_vec], axis=1)
enc_loss_ratio = tf.constant(config.enc_loss_ratio, dtype='float', shape=[], name="encoding_loss_ratio")
enc_logits = linear(cat, 3 ,True, bias_start=0.0, scope="enc_logit", squeeze=False, wd=config.wd, input_keep_prob=config.keep_rate, is_train=self.is_train)
self.total_cost += tf.reduce_mean(tf.reductf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.y, logits=enc_logits)) * enc_loss_ratio
tf.summary.scalar('loss', self.total_cost)
# calculate acc
# L2 Loss
if config.l2_loss:
if config.sigmoid_growing_l2loss:
weights_added = tf.add_n([tf.nn.l2_loss(tensor) for tensor in tf.trainable_variables() if tensor.name.endswith("weights:0") and not tensor.name.endswith("weighted_sum/weights:0")])
full_l2_step = tf.constant(config.weight_l2loss_step_full_reg , dtype=tf.int32, shape=[], name='full_l2reg_step')
full_l2_ratio = tf.constant(config.l2_regularization_ratio , dtype='float', shape=[], name='l2_regularization_ratio')
l2loss_ratio = tf.sigmoid( tf.cast((self.global_step - full_l2_step / 2) * 8, tf.float32) / tf.cast(full_l2_step / 2,tf.float32)) * full_l2_ratio
tf.summary.scalar('l2loss_ratio', l2loss_ratio)
l2loss = weights_added * l2loss_ratio
else:
l2loss = tf.add_n([tf.nn.l2_loss(tensor) for tensor in tf.trainable_variables() if tensor.name.endswith("weights:0")]) * tf.constant(config.l2_regularization_ratio , dtype='float', shape=[], name='l2_regularization_ratio')
tf.summary.scalar('l2loss', l2loss)
self.total_cost += l2loss
if config.wo_enc_sharing or config.wo_highway_sharing_but_penalize_diff and not config.raw_features:
diffs = []
for i in range(config.self_att_enc_layers):
for tensor in tf.trainable_variables():
if config.wo_enc_sharing:
if tensor.name == "prepro/{}_layer_self_att_enc/self_attention/h_logits/first/weights:0".format(i):
l_lg = tensor
elif tensor.name == "prepro/{}_layer_self_att_enc_h/self_attention/h_logits/first/weights:0".format(i):
r_lg = tensor
elif tensor.name == "prepro/{}_layer_self_att_enc/self_att_fuse_gate/lhs_1/weights:0".format(i):
l_fg_lhs_1 = tensor
elif tensor.name == "prepro/{}_layer_self_att_enc_h/self_att_fuse_gate/lhs_1/weights:0".format(i):
r_fg_lhs_1= tensor
elif tensor.name == "prepro/{}_layer_self_att_enc/self_att_fuse_gate/rhs_1/weights:0".format(i):
l_fg_rhs_1= tensor
elif tensor.name == "prepro/{}_layer_self_att_enc_h/self_att_fuse_gate/rhs_1/weights:0".format(i):
r_fg_rhs_1= tensor
elif tensor.name == "prepro/{}_layer_self_att_enc/self_att_fuse_gate/lhs_2/weights:0".format(i):
l_fg_lhs_2= tensor
elif tensor.name == "prepro/{}_layer_self_att_enc_h/self_att_fuse_gate/lhs_2/weights:0".format(i):
r_fg_lhs_2= tensor
elif tensor.name == "prepro/{}_layer_self_att_enc/self_att_fuse_gate/rhs_2/weights:0".format(i):
l_fg_rhs_2= tensor
elif tensor.name == "prepro/{}_layer_self_att_enc_h/self_att_fuse_gate/rhs_2/weights:0".format(i):
r_fg_rhs_2= tensor
if config.two_gate_fuse_gate:
if tensor.name == "prepro/{}_layer_self_att_enc/self_att_fuse_gate/lhs_3/weights:0".format(i):
l_fg_lhs_3 = tensor
elif tensor.name == "prepro/{}_layer_self_att_enc_h/self_att_fuse_gate/lhs_3/weights:0".format(i):
r_fg_lhs_3 = tensor
elif tensor.name == "prepro/{}_layer_self_att_enc/self_att_fuse_gate/rhs_3/weights:0".format(i):
l_fg_rhs_3 = tensor
elif tensor.name == "prepro/{}_layer_self_att_enc_h/self_att_fuse_gate/rhs_3/weights:0".format(i):
r_fg_rhs_3 = tensor
if config.wo_enc_sharing:
diffs += [l_lg - r_lg, l_fg_lhs_1 - r_fg_lhs_1, l_fg_rhs_1 - r_fg_rhs_1, l_fg_lhs_2 - r_fg_lhs_2, l_fg_rhs_2 - r_fg_rhs_2]
if config.two_gate_fuse_gate:
diffs += [l_fg_lhs_3 - r_fg_lhs_3, l_fg_rhs_3 - r_fg_rhs_3]
for tensor in tf.trainable_variables():
if config.wo_highway_sharing_but_penalize_diff:
if tensor.name == "highway/highway_network/layer_0/trans/weights:0":
l_hw_0_trans = tensor
elif tensor.name == "highway/highway_network_h/layer_0/trans/weights:0":
r_hw_0_trans = tensor
elif tensor.name == "highway/highway_network/layer_0/gate/weights:0":
l_hw_0_gate = tensor
elif tensor.name == "highway/highway_network_h/layer_0/gate/weights:0":
r_hw_0_gate = tensor
elif tensor.name == "highway/highway_network/layer_1/trans/weights:0":
l_hw_1_trans = tensor
elif tensor.name == "highway/highway_network_h/layer_1/trans/weights:0":
r_hw_1_trans = tensor
elif tensor.name == "highway/highway_network/layer_1/gate/weights:0":
l_hw_1_gate = tensor
elif tensor.name == "highway/highway_network_h/layer_1/gate/weights:0":
r_hw_1_gate = tensor
if config.wo_highway_sharing_but_penalize_diff:
diffs += [l_hw_0_gate - r_hw_0_gate, l_hw_0_trans - r_hw_0_trans, l_hw_1_trans - r_hw_1_trans, l_hw_1_gate - r_hw_1_gate]
if config.sigmoid_growing_l2_diff_loss:
weights_added = tf.add_n([tf.nn.l2_loss(tensor) for tensor in diffs])
full_l2_step = tf.constant(config.diff_l2_penalty_full_step , dtype=tf.int32, shape=[], name='full_l2reg_step')
diff_l2_ratio = tf.constant(config.diff_penalty_loss_ratio , dtype='float', shape=[], name='diff_penalty_loss_ratio')
diff_l2loss_ratio = tf.sigmoid(tf.cast((self.global_step - full_l2_step / 2) * 8, tf.float32) / tf.cast(full_l2_step / 2,tf.float32)) * diff_l2_ratio
tf.summary.scalar('diff_l2loss_ratio', diff_l2loss_ratio)
diff_loss = weights_added * diff_l2loss_ratio
else:
diff_loss = tf.add_n([tf.nn.l2_loss(tensor) for tensor in diffs]) * tf.constant(config.diff_penalty_loss_ratio , dtype='float', shape=[], name='diff_penalty_loss_ratio')
tf.summary.scalar('diff_penalty_loss', diff_loss)
self.total_cost += diff_loss
if config.similarity_penalty_loss:
# losses = tf.map_fn(lambda x: tf.cond(x[0], lambda: x[1], lambda: 1/(x[1]+0.001)) , (self.y, diff_rel), dtype="float")
p_vec = tf.reduce_max(p, axis=1)
h_vec = tf.reduce_max(h, axis=1)
cos_sim = cosine_similarity(p_vec, h_vec)
entailment_switch = tf.equal(self.y, tf.constant(0, dtype=tf.int32))
neutral_switch = tf.equal(self.y, tf.constant(1, dtype=tf.int32))
contradiction_switch = tf.equal(self.y, tf.constant(2, dtype=tf.int32))
entailment_loss = tf.map_fn(lambda x: tf.cond(x[0], lambda: 1 / x[1], lambda: tf.constant(0.0, dtype=tf.float32)) , (entailment_switch, cos_sim), dtype="float")
neutral_loss = tf.map_fn(lambda x: tf.cond(x[0], lambda: tf.abs(x[1]), lambda: tf.constant(0.0, dtype=tf.float32)) , (neutral_switch, cos_sim), dtype="float")
contradiction_loss = tf.map_fn(lambda x: tf.cond(x[0], lambda: 1 / (-x[1]), lambda: tf.constant(0.0, dtype=tf.float32)) , (contradiction_switch, cos_sim), dtype="float")
self.total_cost += tf.reduce_mean(tf.add_n([entailment_loss, neutral_loss, contradiction_loss]))
self.summary = tf.summary.merge_all()
total_parameters = 0
for v in tf.global_variables():
if not v.name.endswith("weights:0") and not v.name.endswith("biases:0"):
continue
print(v.name)
# print(type(v.name))
shape = v.get_shape().as_list()
param_num = 1
for dim in shape:
param_num *= dim
print(param_num)
total_parameters += param_num
print(total_parameters)
def add_one_d_feature(self, config, matrix, mask, scope):
with tf.variable_scope(scope or "add_one_d_feature"):
features = []
if config.add_max_feature_to_sentence:
features.append(tf.reduce_max(matrix, axis=1))
if config.add_mean_feature_to_sentence:
features.append(tf.reduce_mean(matrix, axis=1))
if config.add_linear_weighted_sum_to_sentence:
wgt = linear(matrix, 1 ,True, bias_start=0.0, scope="weighted_sum", squeeze=False, wd=config.wd, input_keep_prob=config.keep_rate, is_train=self.is_train)
wgt = exp_mask(wgt, mask)
weighted_sum = tf.reduce_sum(tf.nn.softmax(wgt, dim = 1) * matrix ,axis=1)
# weighted_sum = tf.Print(weighted_sum,)
features.append(weighted_sum)
if config.add_some_linear_weighted_sum_to_sentence:
wgt = linear(matrix, 8 ,True, bias_start=0.0, scope="weighted_sum", squeeze=False, wd=config.wd, input_keep_prob=config.keep_rate, is_train=self.is_train)
list_of_logits = tf.unstack(wgt, axis=2)
for logit in list_of_logits:
# print(logit.get_shape().as_list())
logit_tmp = tf.expand_dims(logit, axis=2)
# print(logit_tmp.get_shape().as_list())
wgt_tmp = exp_mask(logit_tmp, mask)
# print(wgt_tmp.get_shape().as_list())
weighted_sum = tf.reduce_sum(tf.nn.softmax(wgt_tmp, dim=1) * matrix, axis=1)
features.append(weighted_sum)
if config.only_some_linear_weighted_sum_to_sentence:
if config.some_linear_weighted_sum_biliear_logit:
tmp_weight = tf.nn.relu(linear(matrix, 200 ,True , bias_start=0.0, scope="weighted_sum_1", squeeze=False, wd=config.wd, input_keep_prob=config.keep_rate, is_train=self.is_train))
wgt = linear(tmp_weight, 48 ,False , bias_start=0.0, scope="weighted_sum", squeeze=False, wd=config.wd, input_keep_prob=config.keep_rate, is_train=self.is_train)
else:
wgt = linear(matrix, 48 ,False , bias_start=0.0, scope="weighted_sum", squeeze=False, wd=config.wd, input_keep_prob=config.keep_rate, is_train=self.is_train)
list_of_logits = tf.unstack(wgt, axis=2)
for logit in list_of_logits:
# print(logit.get_shape().as_list())
logit_tmp = tf.expand_dims(logit, axis=2)
# print(logit_tmp.get_shape().as_list())
wgt_tmp = exp_mask(logit_tmp, mask)
# print(wgt_tmp.get_shape().as_list())
weighted_sum = tf.reduce_sum(tf.nn.softmax(wgt_tmp, dim=1) * matrix, axis=1)
features.append(weighted_sum)
features = [tf.expand_dims(f, axis=1) for f in features]
return tf.concat(features, axis=1)
if config.encoding_dim_as_attention_weight:
list_of_logits = tf.unstack(matrix, axis=2)[:config.num_encoding_dim_as_attention_weight]
for logit in list_of_logits:
# print(logit.get_shape().as_list())
logit_tmp = tf.expand_dims(logit, axis=2)
# print(logit_tmp.get_shape().as_list())
wgt_tmp = exp_mask(logit_tmp, mask)
# print(wgt_tmp.get_shape().as_list())
weighted_sum = tf.reduce_sum(tf.nn.softmax(wgt_tmp, dim=1) * matrix, axis=1)
features.append(weighted_sum)
features = [tf.expand_dims(f, axis=1) for f in features]
return tf.concat(features, axis=1)
if len(features) == 0:
return matrix
else:
features = [tf.expand_dims(f, axis=1) for f in features]
ft = tf.concat(features, axis=1)
return tf.concat([ft, matrix], axis=1)
def multi_perspective_merge(self, config, lhs, rhs, scope = None):
with tf.variable_scope(scope or "multi_perspective_merge"):
features = []
if config.MPM_max_pool:
l = tf.reduce_max(lhs, axis=1)
r = tf.reduce_max(rhs, axis=1)
features.append(self.multi_perspective_generation(config, l, r, 16, "MPM_max_pool"))
if len(features) == 0:
return lhs
else:
ftr = tf.concat(features, axis=1)
print("{} out shape".format(scope))
print(ftr.get_shape().as_list())
return ftr
def multi_perspective_generation(self, config, lhs, rhs, perspectives, scope):
with tf.variable_scope(scope or "multi_perspective_matching"):
dim = lhs.get_shape().as_list()[-1]
comm = lhs * rhs #
comm_aug = tf.tile(tf.expand_dims(comm, axis=1), [1, perspectives, 1])
perspect_weight = tf.get_variable("perspect_weight", shape=[perspectives, dim])
return comm_aug * perspect_weight
def conv_blocks(config, arg, filter_size, name, is_train, tensor_dict=None):
with tf.variable_scope(name or "conv_blocks"):
def conv_pooling(res, name):
with tf.variable_scope(name or "conv_pooling"):
chan = res.get_shape().as_list()[-1]
filters = tf.get_variable("filter", shape=[2,2,chan,chan],dtype='float')
bias = tf.get_variable("bias", shape=[chan], dtype='float')
return tf.nn.conv2d(res, filters, [1,2,2,1], "VALID", name='conv_pooling') + bias
if config.use_elu:
act = tf.nn.elu
elif config.conv_use_tanh_act:
act = tf.tanh
elif config.use_selu:
act = selu
elif config.use_PRelu:
act = PRelu
else:
act = tf.nn.relu
if config.conv_layer_norm:
norm=tf.contrib.layers.layer_norm
else:
norm=tf.contrib.layers.batch_norm
init_dim = arg.get_shape().as_list()[-1]
if config.transitioning_conv_blocks:
res = residual(config, arg, init_dim, 336, filter_size, "res_transition_1", act = act, norm=norm, is_train = is_train, tensor_dict=tensor_dict)
res = residual(config, arg, 336, 224, filter_size, "res_transition_2", act = act, norm=norm, is_train = is_train, tensor_dict=tensor_dict)
res = residual(config, arg, 224, config.res_conv_1_chan, filter_size, "res1", act = act, norm=norm, is_train = is_train, tensor_dict=tensor_dict)
else:
res = residual(config, arg, init_dim, config.res_conv_1_chan, filter_size, "res1", act = act, norm=norm, is_train = is_train, tensor_dict=tensor_dict)
print(res.get_shape().as_list())
# N * 48 * 48 * config.res_conv_1_chan
res = residual(config, res, config.res_conv_1_chan, config.res_conv_1_chan, filter_size, "res2", act = act, norm=norm, is_train = is_train, tensor_dict=tensor_dict)
print(res.get_shape().as_list())
# N * 48 * 48 * config.res_conv_1_chan
# if not config.even_smaller_CNN:
if not config.rm_1_chan1_conv:
res = residual(config, res, config.res_conv_1_chan, config.res_conv_1_chan, filter_size, "res3", act = act, norm=norm, is_train = is_train, tensor_dict=tensor_dict)
#try more poolings (MAX here) [N, 24, 24, config.res_conv_1_chan]
if config.use_stride2_conv_replace_max_pooling:
res = conv_pooling(res, "first_conv_pool")
else:
res = tf.nn.max_pool(res, [1,2,2,1], [1,2,2,1], "VALID")
if not config.even_smaller_CNN:
res = residual(config, res, config.res_conv_1_chan, config.res_conv_1_chan, filter_size, "res4", act = act, norm=norm, is_train = is_train, tensor_dict=tensor_dict)
res = residual(config, res, config.res_conv_1_chan, config.res_conv_1_chan, filter_size, "res5", act = act, norm=norm, is_train = is_train, tensor_dict=tensor_dict)
# N * 24 * 24 * config.res_conv_2_chan
res = residual(config, res, config.res_conv_1_chan, config.res_conv_2_chan, filter_size, "res6", act = act, norm=norm, is_train = is_train, tensor_dict=tensor_dict)
if config.use_stride2_conv_replace_max_pooling:
res = conv_pooling(res, "second_conv_pool")
else:
res = tf.nn.max_pool(res, [1,2,2,1], [1,2,2,1], "VALID")
res = residual(config, res, config.res_conv_2_chan, config.res_conv_2_chan, filter_size, "res7", act = act, norm=norm, is_train = is_train, tensor_dict=tensor_dict)
if not config.even_smaller_CNN:
res = residual(config, res, config.res_conv_2_chan, config.res_conv_2_chan, filter_size, "res8", act = act, norm=norm, is_train = is_train, tensor_dict=tensor_dict)
if config.add_1_chan2_conv:
res = residual(config, res, config.res_conv_2_chan, config.res_conv_2_chan, filter_size, "res8_1", act = act, norm=norm, is_train = is_train, tensor_dict=tensor_dict)
res = residual(config, res, config.res_conv_2_chan, config.res_conv_3_chan, filter_size, "res9", act = act, norm=norm, is_train = is_train, tensor_dict=tensor_dict)
if config.use_stride2_conv_replace_max_pooling:
res = conv_pooling(res, "third_conv_pool")
else:
res = tf.nn.max_pool(res, [1,2,2,1], [1,2,2,1], "VALID")
res = residual(config, res, config.res_conv_3_chan, config.res_conv_3_chan, filter_size, "res13", act = act, norm=norm, is_train = is_train, tensor_dict=tensor_dict)
# if not config.even_smaller_CNN:
if not config.rm_1_chan3_conv:
res = residual(config, res, config.res_conv_3_chan, config.res_conv_3_chan, filter_size, "res14", act = act, norm=norm, is_train = is_train, tensor_dict=tensor_dict)
res = residual(config, res, config.res_conv_3_chan, config.res_conv_3_chan, filter_size, "res15", act = act, norm=norm, is_train = is_train, tensor_dict=tensor_dict)
if config.last_avg_pooling:
res = tf.nn.avg_pool(res, [1,6,6,1],[1,1,1,1],"VALID")
elif config.last_avg_max_pooling:
max_pool = tf.nn.max_pool(res, [1,6,6,1],[1,1,1,1], "VALID")
avg_pool = tf.nn.avg_pool(res, [1,6,6,1],[1,1,1,1], "VALID")
res = tf.concat([max_pool, avg_pool], axis=3)
elif not config.wo_last_max_pool:
res = tf.nn.max_pool(res, [1,2,2,1], [1,2,2,1], "VALID")
shape_list = res.get_shape().as_list()
print(shape_list)
out_final = tf.reshape(res, [-1, shape_list[1]*shape_list[2]*shape_list[3]])
if config.add_tensor_to_tensor_dict:
tensor_dict['conv_out_before_reshape'] = res
tensor_dict['conv_out_after_reshape'] = out_final
return out_final
def shuffle_add(config, dense_tensor):
list_of_logits = tf.unstack(dense_tensor, axis=3)
np.random.shuffle(list_of_logits)
list_of_new_logits = []
for i in range(len(list_of_logits) / 2):
list_of_new_logits.append(list_of_logits[2 * i] + list_of_logits[2 * i + 1])
# if config.full_shuffle_add:
# np.random.shuffle(list_of_logits)
# for i in range(len(list_of_logits) / 2):
# list_of_new_logits.append(list_of_logits[2 * i] + list_of_logits[2 * i + 1])
# list_of_new_logits = [tf.expand_dims(tensor, axis=3) for tensor in list_of_new_logits]
# new_logit = tf.concat(list_of_new_logits, axis=3)
# return new_logit
list_of_new_logits = [tf.expand_dims(tensor, axis=3) for tensor in list_of_new_logits]
new_logit = tf.concat(list_of_new_logits, axis=3)
# bi_att_mx = tf.concat([dense_tensor, new_logit], axis=3)
return new_logit
def add_features(config, dense_attention, p_mask, h_mask):
features = []
PL = dense_attention.get_shape().as_list()[1]
HL = dense_attention.get_shape().as_list()[2]
# p_aug = tf.tile(tf.expand_dims(p, 2), [1,1,HL,1])
# h_aug = tf.tile(tf.expand_dims(h, 1), [1,PL,1,1]) #[N, PL, HL, 2d]
p_mask_aug = tf.reduce_any(tf.cast(tf.tile(tf.expand_dims(p_mask, 2), [1, 1, HL, 1]), tf.bool), axis=3)
h_mask_aug = tf.reduce_any(tf.cast(tf.tile(tf.expand_dims(h_mask, 1), [1, PL, 1, 1]), tf.bool), axis=3)
# ph_mask = p_mask_aug & h_mask_aug TODO
ph_mask = None
# if config.dense_attention_shuffle_add:
# features.append(shuffle_add(config, dense_attention, ph_mask))
if config.dense_attention_max_feature: #including row-wise softmax, column-wise softmax
dense_attention_max_feature(config, dense_attention, features, ph_mask)
# features.append(dense_attention_max_feature(config, dense_attention, ph_mask))
if config.dense_attention_mean_feature: #including row-wise softmax, column-wise softmax
dense_attention_mean_feature(config, dense_attention, features, ph_mask)
if config.dense_attention_min_feature: #including row-wise softmax, column-wise softmax
dense_attention_min_feature(config, dense_attention, features, ph_mask)
if config.dense_attention_sum_feature: #including row-wise softmax, column-wise softmax
dense_attention_sum_feature(config, dense_attention, features, ph_mask)
features.append(dense_attention)
new_dense_attention = tf.concat(features, axis=3)
return new_dense_attention
def dense_attention_max_feature(config, bi_att_mx, collection, ph_mask):
sum_feature = tf.reduce_max(bi_att_mx, axis=3)
collection.append(tf.expand_dims(sum_feature, axis=3))
switch = [False, False]
if config.dense_attention_max_row_wise_softmax_feature:
switch[0] = True
if config.dense_attention_max_column_wise_softmax_feature:
switch[1] = True
dense_logits_softmax_features(config, sum_feature, collection, ph_mask, switch, scope='max_features')
# return tf.expand_dims(sum_feature, axis=3)
def dense_attention_mean_feature(config, bi_att_mx, collection, ph_mask):
mean_feature = tf.reduce_mean(bi_att_mx, axis=3)
collection.append(tf.expand_dims(mean_feature, axis=3))
switch = [False, False]
if config.dense_attention_mean_row_wise_feature:
switch[0] = True
if config.dense_attention_mean_column_wise_feature:
switch[1] = True
dense_logits_softmax_features(config, mean_feature, collection, ph_mask, switch, scope='mean_features')
def dense_attention_min_feature(config, bi_att_mx, collection, ph_mask):
min_feature = tf.reduce_min(bi_att_mx, axis=3)
collection.append(tf.expand_dims(min_feature, axis=3))
switch = [False, False]
if config.dense_attention_min_row_wise_feature:
switch[0] = True
if config.dense_attention_min_column_wise_feature:
switch[1] = True
dense_logits_softmax_features(config, min_feature, collection, ph_mask, switch, scope='mean_features')
def dense_attention_sum_feature(config, bi_att_mx, collection, ph_mask):
sum_feature = tf.reduce_sum(bi_att_mx, axis=3)
collection.append(tf.expand_dims(sum_feature, axis=3))
switch = [False, False]
if config.dense_attention_sum_row_wise_feature:
switch[0] = True
if config.dense_attention_sum_column_wise_feature:
switch[1] = True
dense_logits_softmax_features(config, sum_feature, collection, ph_mask, switch, scope='mean_features')
def bi_attention_mx(config, is_train, p, h, p_mask=None, h_mask=None, scope=None, tensor_dict=None): #[N, L, 2d]
with tf.variable_scope(scope or "dense_logit_bi_attention"):
PL = p.get_shape().as_list()[1]
HL = h.get_shape().as_list()[1]
p_aug = tf.tile(tf.expand_dims(p, 2), [1,1,HL,1])
h_aug = tf.tile(tf.expand_dims(h, 1), [1,PL,1,1]) #[N, PL, HL, 2d]
# if p_mask is None:
# ph_mask = None
# else:
# p_mask_aug = tf.reduce_any(tf.cast(tf.tile(tf.expand_dims(p_mask, 2), [1, 1, HL, 1]), tf.bool), axis=3)
# h_mask_aug = tf.reduce_any(tf.cast(tf.tile(tf.expand_dims(h_mask, 1), [1, PL, 1, 1]), tf.bool), axis=3)
# ph_mask = p_mask_aug & h_mask_aug
ph_mask = None
if config.super_dense_attention:
h_logits = p_aug * h_aug
elif config.super_dense_attention_linear:
h_logits_tmp = linear(p_aug, p_aug.get_shape().as_list()[-1] ,True, bias_start=0.0, scope="super_dense_attention_linear", squeeze=False, wd=config.wd, input_keep_prob=config.keep_rate, is_train=is_train)
h_logits = h_logits_tmp * h_aug
elif config.super_super_dense_attention:
h_logits = tf.concat([p_aug, h_aug, p_aug * h_aug], axis=3)
else:
h_logits = dense_logits(config, [p_aug, h_aug], config.dense_logit_features_num, True, wd=config.wd, mask=ph_mask, is_train=is_train, func=config.dense_att_logit_func, scope='h_logits') # [N, PL, HL]
return h_logits
def dense_logits_softmax_features(config, dense_logit_feature, collection, ph_mask, switch , scope=None):
with tf.variable_scope(scope or "dense_logits_softmax_features"):
# assert p_mask != None
# assert h_mask != None
# PL = dense_logit.get_shape().as_list()[1]
# HL = dense_logit.get_shape().as_list()[2]
# p_mask_aug = tf.reduce_any(tf.cast(tf.tile(tf.expand_dims(p_mask, 2), [1, 1, HL, 1]), tf.bool), axis=3)
# h_mask_aug = tf.reduce_any(tf.cast(tf.tile(tf.expand_dims(h_mask, 1), [1, PL, 1, 1]), tf.bool), axis=3)
# ph_mask = p_mask_aug & h_mask_aug #[N, PL, HL]
# ph_mask_d = tf.tile(tf.expand_dims(ph_mask, 3), [1,1,1,config.dense_logit_features_num])
dense_logit_with_exp_mask = exp_mask(dense_logit_feature, ph_mask) #[N, PL, HL, 20]
dense_logit_softmax_col = None
dense_logit_softmax_row = None
dense_logit_with_exp_mask = tf.expand_dims(dense_logit_with_exp_mask, axis=3)
if switch[0]:
print("dense logit with exp mask size")
print(dense_logit_with_exp_mask.get_shape().as_list())
dense_logit_softmax_row = tf.nn.softmax(dense_logit_with_exp_mask, dim=2, name='softmax_row')
if switch[1]:
dense_logit_softmax_col = tf.nn.softmax(dense_logit_with_exp_mask, dim=1, name='softmax_col')
mask = tf.expand_dims(tf.cast(ph_mask,tf.float32), axis=3)
if dense_logit_softmax_row is not None:
dense_logit_softmax_row = mask * dense_logit_softmax_row
print("mask shape")
print(mask.get_shape().as_list())
print("single layer feature")
print(dense_logit_softmax_row.get_shape().as_list())
collection.append(dense_logit_softmax_row)
if dense_logit_softmax_col is not None:
dense_logit_softmax_col = mask * dense_logit_softmax_col
collection.append(dense_logit_softmax_col)
# return tf.concat([dense_logit, dense_logit_softmax_col, dense_logit_softmax_row], axis=3)
def self_attention(config, is_train, p, p_mask=None, scope=None, tensor_dict=None): #[N, L, 2d]
with tf.variable_scope(scope or "self_attention"):
PL = p.get_shape().as_list()[1]
dim = p.get_shape().as_list()[-1]
# HL = tf.shape(h)[1]
p_aug_1 = tf.tile(tf.expand_dims(p, 2), [1,1,PL,1])
p_aug_2 = tf.tile(tf.expand_dims(p, 1), [1,PL,1,1]) #[N, PL, HL, 2d]
if p_mask is None:
ph_mask = None
else:
p_mask_aug_1 = tf.reduce_any(tf.cast(tf.tile(tf.expand_dims(p_mask, 2), [1, 1, PL, 1]), tf.bool), axis=3)
p_mask_aug_2 = tf.reduce_any(tf.cast(tf.tile(tf.expand_dims(p_mask, 1), [1, PL, 1, 1]), tf.bool), axis=3)
self_mask = p_mask_aug_1 & p_mask_aug_2
if config.use_dense_att_multi_head_self_att:
self_dense_logits = dense_logits(config, [p_aug_1, p_aug_2], config.self_att_head_num, True, bias_start=0.0, scope="dense_logits", mask=self_mask, wd=0.0, input_keep_prob=config.keep_rate, is_train=is_train, func=config.dense_att_logit_func)
list_of_logits = tf.unstack(self_dense_logits, axis=3)
list_of_self_att = [softsel(p_aug_2, logit) for logit in list_of_logits]
self_att = tf.concat(list_of_self_att, axis=2)
print(self_att.get_shape())
self_att = linear(self_att, dim ,True, bias_start=0.0, scope="self_att_rescale", squeeze=False, wd=config.wd, input_keep_prob=config.keep_rate, is_train=is_train)
print(self_att.get_shape())
return self_att
h_logits = get_logits([p_aug_1, p_aug_2], None, True, wd=config.wd, mask=self_mask,
is_train=is_train, func=config.self_att_logit_func, scope='h_logits') # [N, PL, HL]
self_att = softsel(p_aug_2, h_logits)
if config.use_multi_head_self_att:
for i in range(1, config.self_att_head_num):
print(i)
with tf.variable_scope("self_att_head_{}".format(i)):
p_tmp_1 = linear(p, dim ,True, bias_start=0.0, scope="self_att_head_{}_w1".format(i), squeeze=False, wd=config.wd, input_keep_prob=config.keep_rate, is_train=is_train)
p_tmp_2 = linear(p, dim ,True, bias_start=0.0, scope="self_att_head_{}_w2".format(i), squeeze=False, wd=config.wd, input_keep_prob=config.keep_rate, is_train=is_train)
p_aug_tmp_1 = tf.tile(tf.expand_dims(p_tmp_1, 2), [1,1,PL,1])
p_aug_tmp_2 = tf.tile(tf.expand_dims(p_tmp_2, 1), [1,PL,1,1])
logits = get_logits([p_aug_tmp_1, p_aug_tmp_2], None, True, wd=config.wd, mask=self_mask, is_train=is_train, func=config.self_att_logit_func, scope='self_att_head_{}_logit'.format(i))
self_att_tmp = softsel(p_aug_tmp_2, logits)
self_att = tf.concat([self_att, self_att_tmp], axis=2)
self_att = linear(self_att, dim ,True, bias_start=0.0, scope="self_att_rescale", squeeze=False, wd=config.wd, input_keep_prob=config.keep_rate, is_train=is_train)
print(self_att.get_shape())
return self_att
def self_attention_layer(config, is_train, p, p_mask=None, scope=None, tensor_dict=None):
with tf.variable_scope(scope or "self_attention_layer"):
PL = tf.shape(p)[1]
# HL = tf.shape(h)[1]
# if config.q2c_att or config.c2q_att:
self_att = self_attention(config, is_train, p, p_mask=p_mask, tensor_dict=tensor_dict)
print("self_att shape")
print(self_att.get_shape())
if config.self_att_wo_residual_conn:
p0 = self_att
elif config.self_att_fuse_gate_residual_conn:
p0 = fuse_gate(config, is_train, p, self_att, scope="self_att_fuse_gate")
elif config.self_att_linear_map:
tmp_p = tf.concat([p, self_att], axis=2)
p0 = linear(tmp_p, p.get_shape().as_list()[-1] ,True, bias_start=0.0, scope="self_att_linear_map", squeeze=False, wd=config.wd, input_keep_prob=config.keep_rate, is_train=is_train)
elif config.self_att_complex_linear_map_fuse_gate_residual_conn:
tmp = tf.concat([p, self_att, p * self_att], axis=2)
tmp_p = linear(tmp, p.get_shape().as_list()[-1] ,True, bias_start=0.0, scope="self_att_linear_map", squeeze=False, wd=config.wd, input_keep_prob=config.keep_rate, is_train=is_train)
if config.p_base_fuse_gate:
p0 = fuse_gate(config, is_train, p, tmp_p, scope='self_att_fuse_gate_p_base')
elif config.tmp_p_base_fuse_gate:
p0 = fuse_gate(config, is_train, tmp_p, p, scope='self_att_fuse_gate_tmp_p_base')
else:
raise Exception()
elif config.self_att_highway_out:
with tf.variable_scope("highway") as scope:
p0 = highway_network(self_att, config.highway_num_layers, True, wd=config.wd, is_train=is_train)
else:
p0 = p + self_att
if config.att_layer_norm:
p0 = tf.contrib.layers.layer_norm(p0)
if config.norm_encoding_with_last_dim:
p0 = normalize(p0)
# else:
# p0 = tf.concat(3, [p, u_a, p * u_a])
return p0
def linear_mapping_with_residual_conn(config, is_train, p, p_mask=None, scope=None):
with tf.variable_scope(scope or "linear_mapping"):
dim = p.get_shape().as_list()[-1]
p1 = tf.nn.relu(linear(p, dim ,True, bias_start=0.0, scope="linear_maping_1", squeeze=False, wd=config.wd, input_keep_prob=config.keep_rate, is_train=is_train))
p2 = linear(p1, dim ,True, bias_start=0.0, scope="linear_maping_2", squeeze=False, wd=config.wd, input_keep_prob=config.keep_rate, is_train=is_train)
return p + p2
def bi_attention(config, is_train, p, h, p_mask=None, h_mask=None, scope=None, h_value = None): #[N, L, 2d]
with tf.variable_scope(scope or "bi_attention"):
PL = tf.shape(p)[1]
HL = tf.shape(h)[1]
p_aug = tf.tile(tf.expand_dims(p, 2), [1,1,HL,1])
h_aug = tf.tile(tf.expand_dims(h, 1), [1,PL,1,1]) #[N, PL, HL, 2d]
if config.key_value_memory_augmentation:
#h as key
#h_value as value
h_value_aug = tf.tile(tf.expand_dims(h_value, 1), [1,PL,1,1])
if p_mask is None:
ph_mask = None
else:
p_mask_aug = tf.reduce_any(tf.cast(tf.tile(tf.expand_dims(p_mask, 2), [1, 1, HL, 1]), tf.bool), axis=3)
h_mask_aug = tf.reduce_any(tf.cast(tf.tile(tf.expand_dims(h_mask, 1), [1, PL, 1, 1]), tf.bool), axis=3)
ph_mask = p_mask_aug & h_mask_aug
if config.key_value_memory_augmentation:
h_logits = get_logits([p_aug, h_aug], None, True, wd=config.wd, mask=ph_mask,
is_train=is_train, func=config.logit_func, scope='h_logits') # [N, PL, HL]
h_a = softsel(h_value_aug, h_logits)
else:
h_logits = get_logits([p_aug, h_aug], None, True, wd=config.wd, mask=ph_mask,
is_train=is_train, func="mul_linear", scope='h_logits') # [N, PL, HL]
h_a = softsel(h_aug, h_logits)
p_a = softsel(p, tf.reduce_max(h_logits, 2)) # [N, 2d]
p_a = tf.tile(tf.expand_dims(p_a, 1), [1, PL, 1]) #
return h_a, p_a
def cross_attention_layer(config, is_train, p, h, p_mask=None, h_mask=None, scope=None, tensor_dict=None):
with tf.variable_scope(scope or "cross_attention_layer"):
PL = tf.shape(p)[1]
HL = tf.shape(h)[1]
# if config.q2c_att or config.c2q_att:
h_a, p_a = bi_attention(config, is_train, p, h, p_mask=p_mask, h_mask=h_mask)
if config.att_wo_pa:
p0 = tf.concat([p, h_a, p * h_a], axis=2)
else:
p0 = tf.concat([p, h_a, p * h_a, p * p_a], axis=2)
# else:
# p0 = tf.concat(3, [p, u_a, p * u_a])
p1 = linear(p0, p.get_shape().as_list()[-1] ,True, bias_start=0.0, scope="cross_att_linear_scale", squeeze=False, wd=config.wd, input_keep_prob=config.keep_rate, is_train=is_train)
if config.cross_att_residual_conn:
return p + p1
elif config.cross_att_fuse_gate_residual_conn:
return fuse_gate(config, is_train, p, p1, scope="cross_att_fuse_gate")
else:
return p1
def residual(config, x, in_filter, out_filter, kernel_size, name, padding = "SAME", activate_before_residual=False, act = tf.nn.relu, norm=tf.contrib.layers.batch_norm, is_train = True, tensor_dict = None):
# add condition with batch norm
convolution2d = tf.contrib.layers.convolution2d
if config.use_inception_structure:
with tf.variable_scope(name or "inception_CNN"):
pass
else:
with tf.variable_scope(name or "residual_CNN"):
if activate_before_residual:
with tf.variable_scope("shared_activation"):
if config.CNN_normalize:
x = norm(x)
x = act(x)
orig_x = x
else:
with tf.variable_scope("residual_only_activation"):
orig_x = x
if config.CNN_normalize:
x = norm(x)
x = act(x)
if config.residual_block_1_3_1:
with tf.variable_scope("sub1"):
if config.CNN_normalize:
x = convolution2d(x, out_filter, 1, padding=padding, normalizer_fn=norm, activation_fn=act)
else:
x = convolution2d(x, out_filter, 1, padding=padding, activation_fn=act)
with tf.variable_scope("sub2"):
if config.CNN_normalize:
x = convolution2d(x, out_filter, kernel_size, padding=padding, normalizer_fn=norm, activation_fn=act)
else:
x = convolution2d(x, out_filter, kernel_size, padding=padding, activation_fn=act)
with tf.variable_scope("sub3"):
if config.CNN_normalize:
x = convolution2d(x, out_filter, 1, padding=padding, normalizer_fn=norm, activation_fn=act)
else:
x = convolution2d(x, out_filter, 1, padding=padding, activation_fn=act)
elif config.residual_block_dilation:
with tf.variable_scope("sub1"):
if config.residual_block_pre_regular_conv:
x = convolution2d(x, out_filter, kernel_size, padding=padding, activation_fn=act)
else:
filters = tf.get_variable("weights", shape=[kernel_size,kernel_size,in_filter,out_filter],dtype='float', trainable=True)
bias = tf.get_variable("biases", shape=[out_filter], dtype='float')
x = tf.nn.atrous_conv2d(x, filters, rate=2, padding=padding) + bias
x = act(x)
with tf.variable_scope("sub2"):
if config.residual_block_post_regular_conv:
x = convolution2d(x, out_filter, kernel_size, padding=padding, activation_fn=act)
else:
filters = tf.get_variable("weights", shape=[kernel_size,kernel_size,out_filter,out_filter],dtype='float', trainable=True)
bias = tf.get_variable("biases", shape=[out_filter], dtype='float')
x = tf.nn.atrous_conv2d(x, filters, rate=2, padding=padding) + bias
x = act(x)
# elif config.residual_2_3_receptive_field:
# with tf.variable_scope("sub1"):
# with tf.variable_scope("sub1"):
# x1 = convolution2d(x, out_filter / 2, 2, padding=padding, normalizer_fn=norm, activation_fn=act)
# with tf.variable_scope("sub2"):
# x2 = convolution2d(x, out_filter / 2, 3, padding=padding, normalizer_fn=norm, activation_fn=act)
# x = tf.concat([x1, x2], axis=3)
# with tf.variable_scope("sub2"):
# with tf.variable_scope("sub1"):
# x1 = convolution2d(x, out_filter / 2, 2, padding=padding, normalizer_fn=norm, activation_fn=act)
# with tf.variable_scope("sub2"):
# x2 = convolution2d(x, out_filter / 2, 3, padding=padding, normalizer_fn=norm, activation_fn=act)
# x = tf.concat([x1, x2], axis=3)
# elif config.residual_3_5_receptive_field:
# with tf.variable_scope("sub1"):
# with tf.variable_scope("sub1"):
# x1 = convolution2d(x, out_filter / 2, 3, padding=padding, normalizer_fn=norm, activation_fn=act)
# with tf.variable_scope("sub2"):
# x2 = convolution2d(x, out_filter / 2, 5, padding=padding, normalizer_fn=norm, activation_fn=act)
# x = tf.concat([x1, x2], axis=3)
# with tf.variable_scope("sub2"):
# with tf.variable_scope("sub1"):
# x1 = convolution2d(x, out_filter / 2, 3, padding=padding, normalizer_fn=norm, activation_fn=act)
# with tf.variable_scope("sub2"):
# x2 = convolution2d(x, out_filter / 2, 5, padding=padding, normalizer_fn=norm, activation_fn=act)
# x = tf.concat([x1, x2], axis=3)
# elif config.residual_2_3_5_receptive_field:
# with tf.variable_scope("sub1"):
# with tf.variable_scope("sub1"):
# x1 = convolution2d(x, out_filter / 4, 2, padding=padding, normalizer_fn=norm, activation_fn=act)
# with tf.variable_scope("sub2"):
# x2 = convolution2d(x, out_filter / 2, 3, padding=padding, normalizer_fn=norm, activation_fn=act)
# with tf.variable_scope("sub3"):
# x3 = convolution2d(x, out_filter / 4, 5, padding=padding, normalizer_fn=norm, activation_fn=act)
# x = tf.concat([x1, x2, x3], axis=3)
# with tf.variable_scope("sub2"):
# with tf.variable_scope("sub1"):
# x1 = convolution2d(x, out_filter / 4, 2, padding=padding, normalizer_fn=norm, activation_fn=act)
# with tf.variable_scope("sub2"):
# x2 = convolution2d(x, out_filter / 2, 3, padding=padding, normalizer_fn=norm, activation_fn=act)
# with tf.variable_scope("sub3"):
# x3 = convolution2d(x, out_filter / 4, 5, padding=padding, normalizer_fn=norm, activation_fn=act)
# x = tf.concat([x1, x2, x3], axis=3)
else:
with tf.variable_scope("sub1"):
if config.CNN_normalize:
x = convolution2d(x, out_filter, kernel_size, padding=padding, normalizer_fn=norm, activation_fn=act)
else:
x = convolution2d(x, out_filter, kernel_size, padding=padding, activation_fn=act)
if config.add_tensor_to_tensor_dict:
tensor_dict["{}_sub1".format(name)] = x
if config.conv_inter_dropout:
x = tf.cond(is_train, lambda: tf.nn.dropout(x, config.keep_rate), lambda: x)
with tf.variable_scope("sub2"):
if config.CNN_normalize:
x = convolution2d(x, out_filter, kernel_size, padding=padding, normalizer_fn=norm, activation_fn=act)
elif config.CNN_layer_2_wo_act:
x = convolution2d(x, out_filter, kernel_size, padding=padding, activation_fn = None)
else:
x = convolution2d(x, out_filter, kernel_size, padding=padding, activation_fn=act)
if config.add_tensor_to_tensor_dict:
tensor_dict["{}_sub2".format(name)] = x
if config.conv_end_dropout:
x = tf.cond(is_train, lambda: tf.nn.dropout(x, config.keep_rate), lambda: x)
# if config.visualize_dense_attention_logits:
list_of_conv_features = tf.unstack(x, axis=3)
# for i in range(len(list_of_logits)):
tf.summary.image("conv_feature", tf.expand_dims(list_of_conv_features[0],3), max_outputs = 1)
with tf.variable_scope("sub_add"):
if in_filter != out_filter:
if config.linear_mapping_conv_mismatch:
orig_x = linear(orig_x, out_filter ,True, bias_start=0.0, scope="linear_mapping_conv_mismatch", squeeze=False, wd=config.wd, input_keep_prob=config.keep_rate,
is_train=is_train)
elif config.CNN_normalize:
orig_x = convolution2d(orig_x, out_filter, 1, padding=padding, normalizer_fn=norm, activation_fn=act)
elif config.wo_conv_dim_matching_res_conn:
return x
elif config.mismatch_half_conv_1_channel_replicate_to_add:
orig_x = convolution2d(orig_x, out_filter / 2, 1, padding=padding, activation_fn=act)
elif config.mismatch_conv_without_act_for_origx:
orig_x = convolution2d(orig_x, out_filter, 1, padding=padding, activation_fn=None)
else:
orig_x = convolution2d(orig_x, out_filter, 1, padding=padding, activation_fn=act)
if config.conv_fuse_gate_out_origx_base:
x = fuse_gate(config, is_train, orig_x, x, scope='conv_fuse_gate')
elif config.conv_fuse_gate_out_newx_base:
x = fuse_gate(config, is_train, x, orig_x, scope='conv_fuse_gate')
elif config.conv_residual_conn_off:
return x
elif config.conv_shuffle_add_same_mtrx_concat_as_res_conn:
x = shuffle_add(config, x)
orig_x = shuffle_add(config, orig_x)
x = tf.concat([x,orig_x], axis=3)
elif config.mismatch_half_conv_1_channel_replicate_to_add and in_filter != out_filter:
orig_x = tf.concat([orig_x, orig_x], axis=3)
x = x + orig_x
else:
x += orig_x
if config.add_tensor_to_tensor_dict:
tensor_dict['{}_sub_add'.format(name)] = x
return x
def dense_net(config, denseAttention, is_train, tensor_dict = None):
with tf.variable_scope("dense_net"):
if config.rm_first_transition_layer:
fm = denseAttention
elif config.first_scale_down_maxout:
dim = denseAttention.get_shape().as_list()[-1]
act = tf.nn.relu if config.first_scale_down_layer_relu else None
fms = []
for k in range(config.first_scale_down_maxout_num):
afm = tf.contrib.layers.convolution2d(denseAttention, int(dim * config.dense_net_first_scale_down_ratio), config.first_scale_down_kernel, padding="SAME", activation_fn = act)
fms.append(afm)
fms = [tf.expand_dims(tensor, axis=4) for tensor in fms]
fm = tf.reduce_max(tf.concat(fms, axis=4), axis=4)
elif config.first_scale_down_layer:
dim = denseAttention.get_shape().as_list()[-1]
act = tf.nn.relu if config.first_scale_down_layer_relu else None
fm = tf.contrib.layers.convolution2d(denseAttention, int(dim * config.dense_net_first_scale_down_ratio), config.first_scale_down_kernel, padding="SAME", activation_fn = act)
if config.add_tensor_to_tensor_dict:
tensor_dict["first_scale_down_layer"] = fm
else:
fm = dense_net_transition_layer(config, denseAttention, config.first_transition_growth_rate, scope='first_transition_layer', tensor_dict=tensor_dict)
if config.dense_net_skip_join:
fm_tmp = fm
fm = dense_net_block(config, fm, config.dense_net_growth_rate, config.dense_net_layers, config.dense_net_kernel_size, is_train ,scope = "first_dense_net_block", tensor_dict=tensor_dict)
fm = dense_net_transition_layer(config, fm, config.dense_net_transition_rate, scope='second_transition_layer', tensor_dict=tensor_dict)
if config.dense_net_skip_join:
fm, fm_tmp = dense_net_skip_join(fm, fm_tmp)
fm = dense_net_block(config, fm, config.dense_net_growth_rate, config.dense_net_layers, config.dense_net_kernel_size, is_train ,scope = "second_dense_net_block", tensor_dict=tensor_dict)
fm = dense_net_transition_layer(config, fm, config.dense_net_transition_rate, scope='third_transition_layer', tensor_dict=tensor_dict)
if config.dense_net_skip_join:
fm, fm_tmp = dense_net_skip_join(fm, fm_tmp)
fm = dense_net_block(config, fm, config.dense_net_growth_rate, config.dense_net_layers, config.dense_net_kernel_size, is_train ,scope = "third_dense_net_block", tensor_dict=tensor_dict)
if config.replace_last_transition_layer_with_residual_block:
dim = fm.get_shape().as_list()[-1]
fm = residual(config, fm, dim, dim, 3, "last_layer_in_dense_block", padding = "SAME", act = tf.nn.relu, is_train = is_train)
if config.add_max_pool_to_last_residual_block:
fm = tf.nn.max_pool(fm, [1,2,2,1],[1,2,2,1], "VALID")
else:
fm = dense_net_transition_layer(config, fm, config.dense_net_transition_rate, scope='fourth_transition_layer', tensor_dict=tensor_dict)
if config.fourth_dense_block:
fm = dense_net_block(config, fm, config.dense_net_growth_rate, config.dense_net_layers, config.dense_net_kernel_size, is_train ,scope = "fourth_dense_net_block", tensor_dict=tensor_dict)
if config.dense_net_skip_join:
fm, fm_tmp = dense_net_skip_join(fm, fm_tmp)
# fm_tmp = fm
shape_list = fm.get_shape().as_list()
print(shape_list)
out_final = tf.reshape(fm, [-1, shape_list[1]*shape_list[2]*shape_list[3]])
return out_final
def dense_net_block(config, feature_map, growth_rate, layers, kernel_size, is_train ,padding="SAME", act=tf.nn.relu, scope=None, tensor_dict=None):
with tf.variable_scope(scope or "dense_net_block"):
conv2d = tf.contrib.layers.convolution2d
dim = feature_map.get_shape().as_list()[-1]
list_of_features = [feature_map]
features = feature_map
for i in range(layers):
# if config.dense_net_wo_bottleneck:
if config.dense_net_act_before_conv:
if config.BN_on_dense_net_block:
ft = tf.contrib.layers.batch_norm(features)
ft = act(ft)
else:
ft = act(features)
ft = conv2d(ft, growth_rate, (kernel_size, kernel_size), padding=padding, activation_fn=None)
else:
if config.dense_net_dilated_CNN and i % config.dense_net_dilated_CNN_layers_jump_step == 0:
ft = conv2d(features, growth_rate, (kernel_size, kernel_size), padding=padding, activation_fn=act, rate = (2,2))
else:
ft = conv2d(features, growth_rate, (kernel_size, kernel_size), padding=padding, activation_fn=act)
# if config.dual_path_network_on_dense_net:
# res = conv2d(features, dim, (kernel_size, kernel_size), padding=padding, activation_fn=act)
# list_of_features[0] += res
# else:
# if config.dense_net_act_before_conv:
# bt = act(features)
# bt = conv2d(bt, config.dense_net_bottleneck_size, 1, padding=padding, activation_fn=None)
# ft = act(bt)
# ft = conv2d(bt, growth_rate, kernel_size, padding=padding, activation_fn=None)
# else:
# bt = conv2d(features, config.dense_net_bottleneck_size, 1, padding=padding, activation_fn=act)
# ft = conv2d(bt, growth_rate, kernel_size, padding=padding, activation_fn=act)
list_of_features.append(ft)
features = tf.concat(list_of_features, axis=3)
if config.discard_orig_feature_map_to_save_transition_layer:
return tf.concat(list_of_features[1:], axis=3)
if config.norm_dense_block_with_last_dim:
features = normalize(feature_map)
if config.add_tensor_to_tensor_dict:
tensor_dict[scope] = features
print("dense net block out shape")
print(features.get_shape().as_list())
if config.dense_net_block_dropout_at_the_end:
features = tf.cond(is_train, lambda: tf.nn.dropout(features, config.keep_rate), lambda: features)
return features
def dense_net_transition_layer(config, feature_map, transition_rate, scope=None, tensor_dict=None):
with tf.variable_scope(scope or "transition_layer"):
if config.BN_on_dense_net_transition_layer:
feature_map = tf.contrib.layers.batch_norm(feature_map)
if config.transition_layer_pooling_first_then_scale_down:
feature_map = tf.nn.max_pool(feature_map, [1,2,2,1],[1,2,2,1], "VALID")
if config.discard_orig_feature_map_to_save_transition_layer:
feature_map = tf.nn.max_pool(feature_map, [1,2,2,1],[1,2,2,1], "VALID")
return feature_map
if config.addition_as_transition_scale_down_layer:
fm_list = [tf.expand_dims(tensor, axis=3) for tensor in tf.unstack(feature_map, axis=3)]
features_map = tf.concat([fm_list[2 * i] + fm_list[2 * i + 1] for i in range(len(fm_list) / 2)], axis=3)
else:
out_dim = int(feature_map.get_shape().as_list()[-1] * transition_rate)
feature_map = tf.contrib.layers.convolution2d(feature_map, out_dim, 1, padding="SAME", activation_fn = None)
# if config.dense_net_transition_layer_max_pooling:
if not config.transition_layer_pooling_first_then_scale_down:
feature_map = tf.nn.max_pool(feature_map, [1,2,2,1],[1,2,2,1], "VALID")
# else:
# feature_map = tf.nn.avg_pool(feature_map, [1,2,2,1],[1,2,2,1], "VALID")
if config.norm_transition_block_with_last_dim:
feature_map = normalize(feature_map)
if config.add_tensor_to_tensor_dict:
tensor_dict[scope] = feature_map
print("Transition Layer out shape")
print(feature_map.get_shape().as_list())
return feature_map
def dense_net_skip_join(fm, fm_tmp):
down_sampled_tf_tmp = tf.nn.max_pool(fm_tmp, [1,2,2,1],[1,2,2,1], "VALID")
fm = tf.concat([fm, down_sampled_tf_tmp], axis=3)
return fm, fm
def memory_augment_layer(config, x, x_mask, is_train, memory_size, name=None):
with tf.variable_scope(name or "memory_augmentation"):
length = x.get_shape().as_list()[-2]
dim = x.get_shape().as_list()[-1]
if config.key_value_memory_augmentation:
out = x
for i in range(config.memory_augment_layers):
keys = tf.get_variable("memory_keys_{}".format(i), shape=[memory_size, dim])
values = tf.get_variable("memory_values_{}".format(i), shape=[memory_size, dim])
mem_mask = tf.ones([memory_size, 1], name='memory_mask')
mem_mask_aug = tf.expand_dims(mem_mask, 0)
key_aug = tf.expand_dims(keys, 0)
value_aug = tf.expand_dims(values, 0)
attended_x , _ = bi_attention(config, is_train, x, key_aug, p_mask=x_mask, h_mask=mem_mask_aug, scope="attend_x", h_value=value_aug)
if config.memory_augment_layer_add_out:
out = out + attended_x
else:
out = fuse_gate(config, is_train, out, attended_x, scope="fuse_gate_memory")
tf.summary.image("memory_{}_layer_keys".format(i), tf.expand_dims(tf.expand_dims(keys,2),0), max_outputs = 1)
tf.summary.image("memory_{}_layer_values".format(i), tf.expand_dims(tf.expand_dims(values,2),0), max_outputs = 1)
variable_summaries(keys, "memory_keys_{}".format(i))
variable_summaries(values, "memory_values_{}".format(i))
else: #attentional memory augmentation
mem = tf.get_variable("memory_key_and_value", shape=[memory_size, dim])
mem_mask = tf.ones([memory_size, 1], name='memory_mask')
mem_aug = tf.expand_dims(mem, 0)
mem_mask_aug = tf.expand_dims(mem_mask, 0)
attended_x , _ = bi_attention(config, is_train, x, mem_aug, p_mask=x_mask, h_mask=mem_mask_aug, scope="attend_x")
if config.memory_augment_layer_add_out:
out = x + attended_x
else:
out = fuse_gate(config, is_train, x, attended_x, scope="fuse_gate_memory")
return out
def selu(x):
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
return scale * tf.where(x > 0.0, x, alpha * tf.exp(x) - alpha)
def PRelu(_x):
alphas = tf.get_variable('alpha', _x.get_shape()[-1],
initializer = tf.constant_initializer(0.0),
dtype=tf.float32)
pos = tf.nn.relu(_x)
neg = alphas * (_x - abs(_x)) * 0.5
return pos + neg
def normalize(inputs,
epsilon = 1e-8,
scope="ln",
reuse=None):
'''Applies layer normalization.
Args:
inputs: A tensor with 2 or more dimensions, where the first dimension has
`batch_size`.
epsilon: A floating number. A very small number for preventing ZeroDivision Error.
scope: Optional scope for `variable_scope`.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
A tensor with the same shape and data dtype as `inputs`.
'''
with tf.variable_scope(scope, reuse=reuse):
inputs_shape = inputs.get_shape()
params_shape = inputs_shape[-1:]
mean, variance = tf.nn.moments(inputs, [-1], keep_dims=True)
beta= tf.Variable(tf.zeros(params_shape))
gamma = tf.Variable(tf.ones(params_shape))
normalized = (inputs - mean) / ( (variance + epsilon) ** (.5) )
outputs = gamma * normalized + beta
return outputs
| [
"tensorflow.unstack",
"tensorflow.reductf.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.shape",
"tensorflow.get_variable",
"my.tensorflow.nn.dense_logits",
"my.tensorflow.nn.linear",
"tensorflow.nn.bidirectional_dynamic_rnn",
"tensorflow.reduce_sum",
"tensorflow.nn.moments",
"tensorflo... | [((49974, 50006), 'tensorflow.unstack', 'tf.unstack', (['dense_tensor'], {'axis': '(3)'}), '(dense_tensor, axis=3)\n', (49984, 50006), True, 'import tensorflow as tf\n'), ((50011, 50044), 'numpy.random.shuffle', 'np.random.shuffle', (['list_of_logits'], {}), '(list_of_logits)\n', (50028, 50044), True, 'import numpy as np\n'), ((50714, 50751), 'tensorflow.concat', 'tf.concat', (['list_of_new_logits'], {'axis': '(3)'}), '(list_of_new_logits, axis=3)\n', (50723, 50751), True, 'import tensorflow as tf\n'), ((52394, 52421), 'tensorflow.concat', 'tf.concat', (['features'], {'axis': '(3)'}), '(features, axis=3)\n', (52403, 52421), True, 'import tensorflow as tf\n'), ((52547, 52579), 'tensorflow.reduce_max', 'tf.reduce_max', (['bi_att_mx'], {'axis': '(3)'}), '(bi_att_mx, axis=3)\n', (52560, 52579), True, 'import tensorflow as tf\n'), ((53090, 53123), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['bi_att_mx'], {'axis': '(3)'}), '(bi_att_mx, axis=3)\n', (53104, 53123), True, 'import tensorflow as tf\n'), ((53573, 53605), 'tensorflow.reduce_min', 'tf.reduce_min', (['bi_att_mx'], {'axis': '(3)'}), '(bi_att_mx, axis=3)\n', (53586, 53605), True, 'import tensorflow as tf\n'), ((54049, 54081), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['bi_att_mx'], {'axis': '(3)'}), '(bi_att_mx, axis=3)\n', (54062, 54081), True, 'import tensorflow as tf\n'), ((85118, 85177), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['fm_tmp', '[1, 2, 2, 1]', '[1, 2, 2, 1]', '"""VALID"""'], {}), "(fm_tmp, [1, 2, 2, 1], [1, 2, 2, 1], 'VALID')\n", (85132, 85177), True, 'import tensorflow as tf\n'), ((85180, 85224), 'tensorflow.concat', 'tf.concat', (['[fm, down_sampled_tf_tmp]'], {'axis': '(3)'}), '([fm, down_sampled_tf_tmp], axis=3)\n', (85189, 85224), True, 'import tensorflow as tf\n'), ((87818, 87832), 'tensorflow.nn.relu', 'tf.nn.relu', (['_x'], {}), '(_x)\n', (87828, 87832), True, 'import tensorflow as tf\n'), ((1974, 2052), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, self.sequence_length, 47]'], {'name': '"""premise_pos"""'}), "(tf.int32, [None, self.sequence_length, 47], name='premise_pos')\n", (1988, 2052), True, 'import tensorflow as tf\n'), ((2083, 2169), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, self.sequence_length, 47]'], {'name': '"""hypothesis_pos"""'}), "(tf.int32, [None, self.sequence_length, 47], name=\n 'hypothesis_pos')\n", (2097, 2169), True, 'import tensorflow as tf\n'), ((2193, 2299), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, self.sequence_length, config.char_in_word_size]'], {'name': '"""premise_char"""'}), "(tf.int32, [None, self.sequence_length, config.\n char_in_word_size], name='premise_char')\n", (2207, 2299), True, 'import tensorflow as tf\n'), ((2326, 2435), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, self.sequence_length, config.char_in_word_size]'], {'name': '"""hypothesis_char"""'}), "(tf.int32, [None, self.sequence_length, config.\n char_in_word_size], name='hypothesis_char')\n", (2340, 2435), True, 'import tensorflow as tf\n'), ((2466, 2556), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, self.sequence_length, 1]'], {'name': '"""premise_exact_match"""'}), "(tf.int32, [None, self.sequence_length, 1], name=\n 'premise_exact_match')\n", (2480, 2556), True, 'import tensorflow as tf\n'), ((2589, 2682), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, self.sequence_length, 1]'], {'name': '"""hypothesis_exact_match"""'}), "(tf.int32, [None, self.sequence_length, 1], name=\n 'hypothesis_exact_match')\n", (2603, 2682), True, 'import tensorflow as tf\n'), ((2704, 2783), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.sequence_length, 1]'], {'name': '"""premise_itf"""'}), "(tf.float32, [None, self.sequence_length, 1], name='premise_itf')\n", (2718, 2783), True, 'import tensorflow as tf\n'), ((2813, 2900), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.sequence_length, 1]'], {'name': '"""hypothesis_itf"""'}), "(tf.float32, [None, self.sequence_length, 1], name=\n 'hypothesis_itf')\n", (2827, 2900), True, 'import tensorflow as tf\n'), ((2926, 3012), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, self.sequence_length, 1]'], {'name': '"""premise_antonym"""'}), "(tf.int32, [None, self.sequence_length, 1], name=\n 'premise_antonym')\n", (2940, 3012), True, 'import tensorflow as tf\n'), ((3041, 3130), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, self.sequence_length, 1]'], {'name': '"""hypothesis_antonym"""'}), "(tf.int32, [None, self.sequence_length, 1], name=\n 'hypothesis_antonym')\n", (3055, 3130), True, 'import tensorflow as tf\n'), ((3160, 3250), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, self.sequence_length, 7]'], {'name': '"""premise_ner_feature"""'}), "(tf.int32, [None, self.sequence_length, 7], name=\n 'premise_ner_feature')\n", (3174, 3250), True, 'import tensorflow as tf\n'), ((3284, 3377), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, self.sequence_length, 7]'], {'name': '"""hypothesis_ner_feature"""'}), "(tf.int32, [None, self.sequence_length, 7], name=\n 'hypothesis_ner_feature')\n", (3298, 3377), True, 'import tensorflow as tf\n'), ((3408, 3496), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[self.sequence_length, 300]'], {'name': '"""positional_encoding"""'}), "(tf.float32, [self.sequence_length, 300], name=\n 'positional_encoding')\n", (3422, 3496), True, 'import tensorflow as tf\n'), ((3649, 3700), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'name': '"""global_step"""', 'trainable': '(False)'}), "(0, name='global_step', trainable=False)\n", (3660, 3700), True, 'import tensorflow as tf\n'), ((4337, 4385), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[]'], {'name': '"""keep_prob"""'}), "(tf.float32, [], name='keep_prob')\n", (4351, 4385), True, 'import tensorflow as tf\n'), ((4410, 4453), 'tensorflow.placeholder', 'tf.placeholder', (['"""bool"""', '[]'], {'name': '"""is_train"""'}), "('bool', [], name='is_train')\n", (4424, 4453), True, 'import tensorflow as tf\n'), ((13903, 13982), 'my.tensorflow.rnn_cell.SwitchableDropoutWrapper', 'SwitchableDropoutWrapper', (['cell', 'self.is_train'], {'input_keep_prob': 'config.keep_rate'}), '(cell, self.is_train, input_keep_prob=config.keep_rate)\n', (13927, 13982), False, 'from my.tensorflow.rnn_cell import SwitchableDropoutWrapper, AttentionCell\n'), ((28819, 28871), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""logit_histogram"""', 'self.logits'], {}), "('logit_histogram', self.logits)\n", (28839, 28871), True, 'import tensorflow as tf\n'), ((29688, 29722), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""acc"""', 'self.acc'], {}), "('acc', self.acc)\n", (29705, 29722), True, 'import tensorflow as tf\n'), ((30415, 30457), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'self.total_cost'], {}), "('loss', self.total_cost)\n", (30432, 30457), True, 'import tensorflow as tf\n'), ((38469, 38491), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (38489, 38491), True, 'import tensorflow as tf\n'), ((38539, 38560), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (38558, 38560), True, 'import tensorflow as tf\n'), ((44225, 44265), 'tensorflow.variable_scope', 'tf.variable_scope', (["(name or 'conv_blocks')"], {}), "(name or 'conv_blocks')\n", (44242, 44265), True, 'import tensorflow as tf\n'), ((49658, 49726), 'tensorflow.reshape', 'tf.reshape', (['res', '[-1, shape_list[1] * shape_list[2] * shape_list[3]]'], {}), '(res, [-1, shape_list[1] * shape_list[2] * shape_list[3]])\n', (49668, 49726), True, 'import tensorflow as tf\n'), ((50633, 50663), 'tensorflow.expand_dims', 'tf.expand_dims', (['tensor'], {'axis': '(3)'}), '(tensor, axis=3)\n', (50647, 50663), True, 'import tensorflow as tf\n'), ((52602, 52637), 'tensorflow.expand_dims', 'tf.expand_dims', (['sum_feature'], {'axis': '(3)'}), '(sum_feature, axis=3)\n', (52616, 52637), True, 'import tensorflow as tf\n'), ((53146, 53182), 'tensorflow.expand_dims', 'tf.expand_dims', (['mean_feature'], {'axis': '(3)'}), '(mean_feature, axis=3)\n', (53160, 53182), True, 'import tensorflow as tf\n'), ((53628, 53663), 'tensorflow.expand_dims', 'tf.expand_dims', (['min_feature'], {'axis': '(3)'}), '(min_feature, axis=3)\n', (53642, 53663), True, 'import tensorflow as tf\n'), ((54104, 54139), 'tensorflow.expand_dims', 'tf.expand_dims', (['sum_feature'], {'axis': '(3)'}), '(sum_feature, axis=3)\n', (54118, 54139), True, 'import tensorflow as tf\n'), ((54558, 54612), 'tensorflow.variable_scope', 'tf.variable_scope', (["(scope or 'dense_logit_bi_attention')"], {}), "(scope or 'dense_logit_bi_attention')\n", (54575, 54612), True, 'import tensorflow as tf\n'), ((56087, 56146), 'tensorflow.variable_scope', 'tf.variable_scope', (["(scope or 'dense_logits_softmax_features')"], {}), "(scope or 'dense_logits_softmax_features')\n", (56104, 56146), True, 'import tensorflow as tf\n'), ((56740, 56778), 'my.tensorflow.exp_mask', 'exp_mask', (['dense_logit_feature', 'ph_mask'], {}), '(dense_logit_feature, ph_mask)\n', (56748, 56778), False, 'from my.tensorflow import flatten, reconstruct, add_wd, exp_mask\n'), ((56912, 56961), 'tensorflow.expand_dims', 'tf.expand_dims', (['dense_logit_with_exp_mask'], {'axis': '(3)'}), '(dense_logit_with_exp_mask, axis=3)\n', (56926, 56961), True, 'import tensorflow as tf\n'), ((58175, 58219), 'tensorflow.variable_scope', 'tf.variable_scope', (["(scope or 'self_attention')"], {}), "(scope or 'self_attention')\n", (58192, 58219), True, 'import tensorflow as tf\n'), ((59650, 59796), 'my.tensorflow.nn.get_logits', 'get_logits', (['[p_aug_1, p_aug_2]', 'None', '(True)'], {'wd': 'config.wd', 'mask': 'self_mask', 'is_train': 'is_train', 'func': 'config.self_att_logit_func', 'scope': '"""h_logits"""'}), "([p_aug_1, p_aug_2], None, True, wd=config.wd, mask=self_mask,\n is_train=is_train, func=config.self_att_logit_func, scope='h_logits')\n", (59660, 59796), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((59857, 59883), 'my.tensorflow.nn.softsel', 'softsel', (['p_aug_2', 'h_logits'], {}), '(p_aug_2, h_logits)\n', (59864, 59883), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((61306, 61356), 'tensorflow.variable_scope', 'tf.variable_scope', (["(scope or 'self_attention_layer')"], {}), "(scope or 'self_attention_layer')\n", (61323, 61356), True, 'import tensorflow as tf\n'), ((63388, 63432), 'tensorflow.variable_scope', 'tf.variable_scope', (["(scope or 'linear_mapping')"], {}), "(scope or 'linear_mapping')\n", (63405, 63432), True, 'import tensorflow as tf\n'), ((63659, 63808), 'my.tensorflow.nn.linear', 'linear', (['p1', 'dim', '(True)'], {'bias_start': '(0.0)', 'scope': '"""linear_maping_2"""', 'squeeze': '(False)', 'wd': 'config.wd', 'input_keep_prob': 'config.keep_rate', 'is_train': 'is_train'}), "(p1, dim, True, bias_start=0.0, scope='linear_maping_2', squeeze=\n False, wd=config.wd, input_keep_prob=config.keep_rate, is_train=is_train)\n", (63665, 63808), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((63945, 63987), 'tensorflow.variable_scope', 'tf.variable_scope', (["(scope or 'bi_attention')"], {}), "(scope or 'bi_attention')\n", (63962, 63987), True, 'import tensorflow as tf\n'), ((65514, 65565), 'tensorflow.variable_scope', 'tf.variable_scope', (["(scope or 'cross_attention_layer')"], {}), "(scope or 'cross_attention_layer')\n", (65531, 65565), True, 'import tensorflow as tf\n'), ((76963, 76993), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""dense_net"""'], {}), "('dense_net')\n", (76980, 76993), True, 'import tensorflow as tf\n'), ((80474, 80541), 'tensorflow.reshape', 'tf.reshape', (['fm', '[-1, shape_list[1] * shape_list[2] * shape_list[3]]'], {}), '(fm, [-1, shape_list[1] * shape_list[2] * shape_list[3]])\n', (80484, 80541), True, 'import tensorflow as tf\n'), ((80723, 80768), 'tensorflow.variable_scope', 'tf.variable_scope', (["(scope or 'dense_net_block')"], {}), "(scope or 'dense_net_block')\n", (80740, 80768), True, 'import tensorflow as tf\n'), ((83421, 83467), 'tensorflow.variable_scope', 'tf.variable_scope', (["(scope or 'transition_layer')"], {}), "(scope or 'transition_layer')\n", (83438, 83467), True, 'import tensorflow as tf\n'), ((85335, 85383), 'tensorflow.variable_scope', 'tf.variable_scope', (["(name or 'memory_augmentation')"], {}), "(name or 'memory_augmentation')\n", (85352, 85383), True, 'import tensorflow as tf\n'), ((88492, 88529), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {'reuse': 'reuse'}), '(scope, reuse=reuse)\n', (88509, 88529), True, 'import tensorflow as tf\n'), ((88644, 88687), 'tensorflow.nn.moments', 'tf.nn.moments', (['inputs', '[-1]'], {'keep_dims': '(True)'}), '(inputs, [-1], keep_dims=True)\n', (88657, 88687), True, 'import tensorflow as tf\n'), ((1239, 1309), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, self.context_seq_len]'], {'name': '"""premise"""'}), "(tf.int32, [None, self.context_seq_len], name='premise')\n", (1253, 1309), True, 'import tensorflow as tf\n'), ((1342, 1413), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, self.query_seq_len]'], {'name': '"""hypothesis"""'}), "(tf.int32, [None, self.query_seq_len], name='hypothesis')\n", (1356, 1413), True, 'import tensorflow as tf\n'), ((3821, 3990), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['config.keep_rate', 'self.global_step', 'config.dropout_decay_step', 'config.dropout_decay_rate'], {'staircase': '(False)', 'name': '"""dropout_keep_rate"""'}), "(config.keep_rate, self.global_step, config.\n dropout_decay_step, config.dropout_decay_rate, staircase=False, name=\n 'dropout_keep_rate')\n", (3847, 3990), True, 'import tensorflow as tf\n'), ((4047, 4109), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""dropout_keep_rate"""', 'self.dropout_keep_rate'], {}), "('dropout_keep_rate', self.dropout_keep_rate)\n", (4064, 4109), True, 'import tensorflow as tf\n'), ((4171, 4224), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 3]'], {'name': '"""label_y"""'}), "(tf.float32, [None, 3], name='label_y')\n", (4185, 4224), True, 'import tensorflow as tf\n'), ((4260, 4308), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]'], {'name': '"""label_y"""'}), "(tf.int32, [None], name='label_y')\n", (4274, 4308), True, 'import tensorflow as tf\n'), ((4674, 4702), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['E', 'x'], {}), '(E, x)\n', (4696, 4702), True, 'import tensorflow as tf\n'), ((5543, 5572), 'util.blocks.length', 'blocks.length', (['self.premise_x'], {}), '(self.premise_x)\n', (5556, 5572), False, 'from util import blocks\n'), ((5632, 5664), 'util.blocks.length', 'blocks.length', (['self.hypothesis_x'], {}), '(self.hypothesis_x)\n', (5645, 5664), False, 'from util import blocks\n'), ((10626, 10675), 'tensorflow.concat', 'tf.concat', (['[premise_in, self.premise_itf]'], {'axis': '(2)'}), '([premise_in, self.premise_itf], axis=2)\n', (10635, 10675), True, 'import tensorflow as tf\n'), ((10704, 10759), 'tensorflow.concat', 'tf.concat', (['[hypothesis_in, self.hypothesis_itf]'], {'axis': '(2)'}), '([hypothesis_in, self.hypothesis_itf], axis=2)\n', (10713, 10759), True, 'import tensorflow as tf\n'), ((13765, 13814), 'tensorflow.contrib.rnn.python.ops.core_rnn_cell.BasicLSTMCell', 'BasicLSTMCell', (['self.LSTM_dim'], {'state_is_tuple': '(True)'}), '(self.LSTM_dim, state_is_tuple=True)\n', (13778, 13814), False, 'from tensorflow.contrib.rnn.python.ops.core_rnn_cell import BasicLSTMCell\n'), ((13848, 13885), 'tensorflow.contrib.rnn.python.ops.rnn_cell.LayerNormBasicLSTMCell', 'LayerNormBasicLSTMCell', (['self.LSTM_dim'], {}), '(self.LSTM_dim)\n', (13870, 13885), False, 'from tensorflow.contrib.rnn.python.ops.rnn_cell import LayerNormBasicLSTMCell\n'), ((13997, 14024), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""prepro"""'], {}), "('prepro')\n", (14014, 14024), True, 'import tensorflow as tf\n'), ((21742, 21767), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""main"""'], {}), "('main')\n", (21759, 21767), True, 'import tensorflow as tf\n'), ((27352, 27376), 'tensorflow.reduce_max', 'tf.reduce_max', (['p'], {'axis': '(1)'}), '(p, axis=1)\n', (27365, 27376), True, 'import tensorflow as tf\n'), ((27397, 27421), 'tensorflow.reduce_max', 'tf.reduce_max', (['h'], {'axis': '(1)'}), '(h, axis=1)\n', (27410, 27421), True, 'import tensorflow as tf\n'), ((29107, 29133), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['self.logits'], {}), '(self.logits)\n', (29120, 29133), True, 'import tensorflow as tf\n'), ((29803, 29827), 'tensorflow.reduce_max', 'tf.reduce_max', (['p'], {'axis': '(1)'}), '(p, axis=1)\n', (29816, 29827), True, 'import tensorflow as tf\n'), ((29848, 29872), 'tensorflow.reduce_max', 'tf.reduce_max', (['h'], {'axis': '(1)'}), '(h, axis=1)\n', (29861, 29872), True, 'import tensorflow as tf\n'), ((29891, 29954), 'tensorflow.concat', 'tf.concat', (['[p_vec, h_vec, p_vec - h_vec, p_vec * h_vec]'], {'axis': '(1)'}), '([p_vec, h_vec, p_vec - h_vec, p_vec * h_vec], axis=1)\n', (29900, 29954), True, 'import tensorflow as tf\n'), ((29984, 30076), 'tensorflow.constant', 'tf.constant', (['config.enc_loss_ratio'], {'dtype': '"""float"""', 'shape': '[]', 'name': '"""encoding_loss_ratio"""'}), "(config.enc_loss_ratio, dtype='float', shape=[], name=\n 'encoding_loss_ratio')\n", (29995, 30076), True, 'import tensorflow as tf\n'), ((30097, 30244), 'my.tensorflow.nn.linear', 'linear', (['cat', '(3)', '(True)'], {'bias_start': '(0.0)', 'scope': '"""enc_logit"""', 'squeeze': '(False)', 'wd': 'config.wd', 'input_keep_prob': 'config.keep_rate', 'is_train': 'self.is_train'}), "(cat, 3, True, bias_start=0.0, scope='enc_logit', squeeze=False, wd=\n config.wd, input_keep_prob=config.keep_rate, is_train=self.is_train)\n", (30103, 30244), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((31594, 31629), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""l2loss"""', 'l2loss'], {}), "('l2loss', l2loss)\n", (31611, 31629), True, 'import tensorflow as tf\n'), ((34762, 34786), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (34784, 34786), True, 'import tensorflow as tf\n'), ((37158, 37207), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""diff_penalty_loss"""', 'diff_loss'], {}), "('diff_penalty_loss', diff_loss)\n", (37175, 37207), True, 'import tensorflow as tf\n'), ((37441, 37465), 'tensorflow.reduce_max', 'tf.reduce_max', (['p'], {'axis': '(1)'}), '(p, axis=1)\n', (37454, 37465), True, 'import tensorflow as tf\n'), ((37486, 37510), 'tensorflow.reduce_max', 'tf.reduce_max', (['h'], {'axis': '(1)'}), '(h, axis=1)\n', (37499, 37510), True, 'import tensorflow as tf\n'), ((37533, 37564), 'my.tensorflow.nn.cosine_similarity', 'cosine_similarity', (['p_vec', 'h_vec'], {}), '(p_vec, h_vec)\n', (37550, 37564), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((39045, 39092), 'tensorflow.variable_scope', 'tf.variable_scope', (["(scope or 'add_one_d_feature')"], {}), "(scope or 'add_one_d_feature')\n", (39062, 39092), True, 'import tensorflow as tf\n'), ((43101, 43154), 'tensorflow.variable_scope', 'tf.variable_scope', (["(scope or 'multi_perspective_merge')"], {}), "(scope or 'multi_perspective_merge')\n", (43118, 43154), True, 'import tensorflow as tf\n'), ((43776, 43832), 'tensorflow.variable_scope', 'tf.variable_scope', (["(scope or 'multi_perspective_matching')"], {}), "(scope or 'multi_perspective_matching')\n", (43793, 43832), True, 'import tensorflow as tf\n'), ((44027, 44088), 'tensorflow.get_variable', 'tf.get_variable', (['"""perspect_weight"""'], {'shape': '[perspectives, dim]'}), "('perspect_weight', shape=[perspectives, dim])\n", (44042, 44088), True, 'import tensorflow as tf\n'), ((46666, 46722), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['res', '[1, 2, 2, 1]', '[1, 2, 2, 1]', '"""VALID"""'], {}), "(res, [1, 2, 2, 1], [1, 2, 2, 1], 'VALID')\n", (46680, 46722), True, 'import tensorflow as tf\n'), ((47472, 47528), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['res', '[1, 2, 2, 1]', '[1, 2, 2, 1]', '"""VALID"""'], {}), "(res, [1, 2, 2, 1], [1, 2, 2, 1], 'VALID')\n", (47486, 47528), True, 'import tensorflow as tf\n'), ((48446, 48502), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['res', '[1, 2, 2, 1]', '[1, 2, 2, 1]', '"""VALID"""'], {}), "(res, [1, 2, 2, 1], [1, 2, 2, 1], 'VALID')\n", (48460, 48502), True, 'import tensorflow as tf\n'), ((49158, 49214), 'tensorflow.nn.avg_pool', 'tf.nn.avg_pool', (['res', '[1, 6, 6, 1]', '[1, 1, 1, 1]', '"""VALID"""'], {}), "(res, [1, 6, 6, 1], [1, 1, 1, 1], 'VALID')\n", (49172, 49214), True, 'import tensorflow as tf\n'), ((54718, 54738), 'tensorflow.expand_dims', 'tf.expand_dims', (['p', '(2)'], {}), '(p, 2)\n', (54732, 54738), True, 'import tensorflow as tf\n'), ((54776, 54796), 'tensorflow.expand_dims', 'tf.expand_dims', (['h', '(1)'], {}), '(h, 1)\n', (54790, 54796), True, 'import tensorflow as tf\n'), ((57142, 57209), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['dense_logit_with_exp_mask'], {'dim': '(2)', 'name': '"""softmax_row"""'}), "(dense_logit_with_exp_mask, dim=2, name='softmax_row')\n", (57155, 57209), True, 'import tensorflow as tf\n'), ((57271, 57338), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['dense_logit_with_exp_mask'], {'dim': '(1)', 'name': '"""softmax_col"""'}), "(dense_logit_with_exp_mask, dim=1, name='softmax_col')\n", (57284, 57338), True, 'import tensorflow as tf\n'), ((57389, 57417), 'tensorflow.cast', 'tf.cast', (['ph_mask', 'tf.float32'], {}), '(ph_mask, tf.float32)\n', (57396, 57417), True, 'import tensorflow as tf\n'), ((58359, 58379), 'tensorflow.expand_dims', 'tf.expand_dims', (['p', '(2)'], {}), '(p, 2)\n', (58373, 58379), True, 'import tensorflow as tf\n'), ((58419, 58439), 'tensorflow.expand_dims', 'tf.expand_dims', (['p', '(1)'], {}), '(p, 1)\n', (58433, 58439), True, 'import tensorflow as tf\n'), ((58913, 59147), 'my.tensorflow.nn.dense_logits', 'dense_logits', (['config', '[p_aug_1, p_aug_2]', 'config.self_att_head_num', '(True)'], {'bias_start': '(0.0)', 'scope': '"""dense_logits"""', 'mask': 'self_mask', 'wd': '(0.0)', 'input_keep_prob': 'config.keep_rate', 'is_train': 'is_train', 'func': 'config.dense_att_logit_func'}), "(config, [p_aug_1, p_aug_2], config.self_att_head_num, True,\n bias_start=0.0, scope='dense_logits', mask=self_mask, wd=0.0,\n input_keep_prob=config.keep_rate, is_train=is_train, func=config.\n dense_att_logit_func)\n", (58925, 59147), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((59164, 59201), 'tensorflow.unstack', 'tf.unstack', (['self_dense_logits'], {'axis': '(3)'}), '(self_dense_logits, axis=3)\n', (59174, 59201), True, 'import tensorflow as tf\n'), ((59310, 59345), 'tensorflow.concat', 'tf.concat', (['list_of_self_att'], {'axis': '(2)'}), '(list_of_self_att, axis=2)\n', (59319, 59345), True, 'import tensorflow as tf\n'), ((59410, 59570), 'my.tensorflow.nn.linear', 'linear', (['self_att', 'dim', '(True)'], {'bias_start': '(0.0)', 'scope': '"""self_att_rescale"""', 'squeeze': '(False)', 'wd': 'config.wd', 'input_keep_prob': 'config.keep_rate', 'is_train': 'is_train'}), "(self_att, dim, True, bias_start=0.0, scope='self_att_rescale',\n squeeze=False, wd=config.wd, input_keep_prob=config.keep_rate, is_train\n =is_train)\n", (59416, 59570), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((60989, 61149), 'my.tensorflow.nn.linear', 'linear', (['self_att', 'dim', '(True)'], {'bias_start': '(0.0)', 'scope': '"""self_att_rescale"""', 'squeeze': '(False)', 'wd': 'config.wd', 'input_keep_prob': 'config.keep_rate', 'is_train': 'is_train'}), "(self_att, dim, True, bias_start=0.0, scope='self_att_rescale',\n squeeze=False, wd=config.wd, input_keep_prob=config.keep_rate, is_train\n =is_train)\n", (60995, 61149), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((61371, 61382), 'tensorflow.shape', 'tf.shape', (['p'], {}), '(p)\n', (61379, 61382), True, 'import tensorflow as tf\n'), ((63096, 63128), 'tensorflow.contrib.layers.layer_norm', 'tf.contrib.layers.layer_norm', (['p0'], {}), '(p0)\n', (63124, 63128), True, 'import tensorflow as tf\n'), ((63501, 63648), 'my.tensorflow.nn.linear', 'linear', (['p', 'dim', '(True)'], {'bias_start': '(0.0)', 'scope': '"""linear_maping_1"""', 'squeeze': '(False)', 'wd': 'config.wd', 'input_keep_prob': 'config.keep_rate', 'is_train': 'is_train'}), "(p, dim, True, bias_start=0.0, scope='linear_maping_1', squeeze=False,\n wd=config.wd, input_keep_prob=config.keep_rate, is_train=is_train)\n", (63507, 63648), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((64002, 64013), 'tensorflow.shape', 'tf.shape', (['p'], {}), '(p)\n', (64010, 64013), True, 'import tensorflow as tf\n'), ((64030, 64041), 'tensorflow.shape', 'tf.shape', (['h'], {}), '(h)\n', (64038, 64041), True, 'import tensorflow as tf\n'), ((64069, 64089), 'tensorflow.expand_dims', 'tf.expand_dims', (['p', '(2)'], {}), '(p, 2)\n', (64083, 64089), True, 'import tensorflow as tf\n'), ((64127, 64147), 'tensorflow.expand_dims', 'tf.expand_dims', (['h', '(1)'], {}), '(h, 1)\n', (64141, 64147), True, 'import tensorflow as tf\n'), ((64774, 64906), 'my.tensorflow.nn.get_logits', 'get_logits', (['[p_aug, h_aug]', 'None', '(True)'], {'wd': 'config.wd', 'mask': 'ph_mask', 'is_train': 'is_train', 'func': 'config.logit_func', 'scope': '"""h_logits"""'}), "([p_aug, h_aug], None, True, wd=config.wd, mask=ph_mask, is_train\n =is_train, func=config.logit_func, scope='h_logits')\n", (64784, 64906), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((64965, 64995), 'my.tensorflow.nn.softsel', 'softsel', (['h_value_aug', 'h_logits'], {}), '(h_value_aug, h_logits)\n', (64972, 64995), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((65034, 65161), 'my.tensorflow.nn.get_logits', 'get_logits', (['[p_aug, h_aug]', 'None', '(True)'], {'wd': 'config.wd', 'mask': 'ph_mask', 'is_train': 'is_train', 'func': '"""mul_linear"""', 'scope': '"""h_logits"""'}), "([p_aug, h_aug], None, True, wd=config.wd, mask=ph_mask, is_train\n =is_train, func='mul_linear', scope='h_logits')\n", (65044, 65161), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((65220, 65244), 'my.tensorflow.nn.softsel', 'softsel', (['h_aug', 'h_logits'], {}), '(h_aug, h_logits)\n', (65227, 65244), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((65271, 65297), 'tensorflow.reduce_max', 'tf.reduce_max', (['h_logits', '(2)'], {}), '(h_logits, 2)\n', (65284, 65297), True, 'import tensorflow as tf\n'), ((65332, 65354), 'tensorflow.expand_dims', 'tf.expand_dims', (['p_a', '(1)'], {}), '(p_a, 1)\n', (65346, 65354), True, 'import tensorflow as tf\n'), ((65580, 65591), 'tensorflow.shape', 'tf.shape', (['p'], {}), '(p)\n', (65588, 65591), True, 'import tensorflow as tf\n'), ((65608, 65619), 'tensorflow.shape', 'tf.shape', (['h'], {}), '(h)\n', (65616, 65619), True, 'import tensorflow as tf\n'), ((65803, 65839), 'tensorflow.concat', 'tf.concat', (['[p, h_a, p * h_a]'], {'axis': '(2)'}), '([p, h_a, p * h_a], axis=2)\n', (65812, 65839), True, 'import tensorflow as tf\n'), ((65871, 65916), 'tensorflow.concat', 'tf.concat', (['[p, h_a, p * h_a, p * p_a]'], {'axis': '(2)'}), '([p, h_a, p * h_a, p * p_a], axis=2)\n', (65880, 65916), True, 'import tensorflow as tf\n'), ((66765, 66807), 'tensorflow.variable_scope', 'tf.variable_scope', (["(name or 'inception_CNN')"], {}), "(name or 'inception_CNN')\n", (66782, 66807), True, 'import tensorflow as tf\n'), ((66849, 66890), 'tensorflow.variable_scope', 'tf.variable_scope', (["(name or 'residual_CNN')"], {}), "(name or 'residual_CNN')\n", (66866, 66890), True, 'import tensorflow as tf\n'), ((74534, 74555), 'tensorflow.unstack', 'tf.unstack', (['x'], {'axis': '(3)'}), '(x, axis=3)\n', (74544, 74555), True, 'import tensorflow as tf\n'), ((82683, 82718), 'tensorflow.concat', 'tf.concat', (['list_of_features'], {'axis': '(3)'}), '(list_of_features, axis=3)\n', (82692, 82718), True, 'import tensorflow as tf\n'), ((82807, 82846), 'tensorflow.concat', 'tf.concat', (['list_of_features[1:]'], {'axis': '(3)'}), '(list_of_features[1:], axis=3)\n', (82816, 82846), True, 'import tensorflow as tf\n'), ((83547, 83588), 'tensorflow.contrib.layers.batch_norm', 'tf.contrib.layers.batch_norm', (['feature_map'], {}), '(feature_map)\n', (83575, 83588), True, 'import tensorflow as tf\n'), ((83682, 83746), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['feature_map', '[1, 2, 2, 1]', '[1, 2, 2, 1]', '"""VALID"""'], {}), "(feature_map, [1, 2, 2, 1], [1, 2, 2, 1], 'VALID')\n", (83696, 83746), True, 'import tensorflow as tf\n'), ((83836, 83900), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['feature_map', '[1, 2, 2, 1]', '[1, 2, 2, 1]', '"""VALID"""'], {}), "(feature_map, [1, 2, 2, 1], [1, 2, 2, 1], 'VALID')\n", (83850, 83900), True, 'import tensorflow as tf\n'), ((84326, 84422), 'tensorflow.contrib.layers.convolution2d', 'tf.contrib.layers.convolution2d', (['feature_map', 'out_dim', '(1)'], {'padding': '"""SAME"""', 'activation_fn': 'None'}), "(feature_map, out_dim, 1, padding='SAME',\n activation_fn=None)\n", (84357, 84422), True, 'import tensorflow as tf\n'), ((84577, 84641), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['feature_map', '[1, 2, 2, 1]', '[1, 2, 2, 1]', '"""VALID"""'], {}), "(feature_map, [1, 2, 2, 1], [1, 2, 2, 1], 'VALID')\n", (84591, 84641), True, 'import tensorflow as tf\n'), ((86862, 86927), 'tensorflow.get_variable', 'tf.get_variable', (['"""memory_key_and_value"""'], {'shape': '[memory_size, dim]'}), "('memory_key_and_value', shape=[memory_size, dim])\n", (86877, 86927), True, 'import tensorflow as tf\n'), ((86951, 86996), 'tensorflow.ones', 'tf.ones', (['[memory_size, 1]'], {'name': '"""memory_mask"""'}), "([memory_size, 1], name='memory_mask')\n", (86958, 86996), True, 'import tensorflow as tf\n'), ((87019, 87041), 'tensorflow.expand_dims', 'tf.expand_dims', (['mem', '(0)'], {}), '(mem, 0)\n', (87033, 87041), True, 'import tensorflow as tf\n'), ((87069, 87096), 'tensorflow.expand_dims', 'tf.expand_dims', (['mem_mask', '(0)'], {}), '(mem_mask, 0)\n', (87083, 87096), True, 'import tensorflow as tf\n'), ((87732, 87760), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (87755, 87760), True, 'import tensorflow as tf\n'), ((88714, 88736), 'tensorflow.zeros', 'tf.zeros', (['params_shape'], {}), '(params_shape)\n', (88722, 88736), True, 'import tensorflow as tf\n'), ((88766, 88787), 'tensorflow.ones', 'tf.ones', (['params_shape'], {}), '(params_shape)\n', (88773, 88787), True, 'import tensorflow as tf\n'), ((1494, 1597), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, self.sequence_length, config.subword_feature_len]'], {'name': '"""premise"""'}), "(tf.int32, [None, self.sequence_length, config.\n subword_feature_len], name='premise')\n", (1508, 1597), True, 'import tensorflow as tf\n'), ((1625, 1731), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, self.sequence_length, config.subword_feature_len]'], {'name': '"""hypothesis"""'}), "(tf.int32, [None, self.sequence_length, config.\n subword_feature_len], name='hypothesis')\n", (1639, 1731), True, 'import tensorflow as tf\n'), ((1770, 1840), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, self.sequence_length]'], {'name': '"""premise"""'}), "(tf.int32, [None, self.sequence_length], name='premise')\n", (1784, 1840), True, 'import tensorflow as tf\n'), ((1873, 1946), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, self.sequence_length]'], {'name': '"""hypothesis"""'}), "(tf.int32, [None, self.sequence_length], name='hypothesis')\n", (1887, 1946), True, 'import tensorflow as tf\n'), ((5351, 5388), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.premise_x'], {'axis': '(2)'}), '(self.premise_x, axis=2)\n', (5364, 5388), True, 'import tensorflow as tf\n'), ((5444, 5484), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.hypothesis_x'], {'axis': '(2)'}), '(self.hypothesis_x, axis=2)\n', (5457, 5484), True, 'import tensorflow as tf\n'), ((5833, 5861), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""emb_var"""'], {}), "('emb_var')\n", (5850, 5861), True, 'import tensorflow as tf\n'), ((5863, 5882), 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (5872, 5882), True, 'import tensorflow as tf\n'), ((5909, 5953), 'tensorflow.Variable', 'tf.Variable', (['embeddings'], {'trainable': 'emb_train'}), '(embeddings, trainable=emb_train)\n', (5920, 5953), True, 'import tensorflow as tf\n'), ((6101, 6137), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""subword_emb_sum"""'], {}), "('subword_emb_sum')\n", (6118, 6137), True, 'import tensorflow as tf\n'), ((6168, 6201), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['premise_in'], {'axis': '(2)'}), '(premise_in, axis=2)\n', (6181, 6201), True, 'import tensorflow as tf\n'), ((6234, 6270), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['hypothesis_in'], {'axis': '(2)'}), '(hypothesis_in, axis=2)\n', (6247, 6270), True, 'import tensorflow as tf\n'), ((6528, 6552), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""emb"""'], {}), "('emb')\n", (6545, 6552), True, 'import tensorflow as tf\n'), ((11634, 11674), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""embedding_fuse_gate"""'], {}), "('embedding_fuse_gate')\n", (11651, 11674), True, 'import tensorflow as tf\n'), ((11714, 11804), 'my.tensorflow.nn.fuse_gate', 'fuse_gate', (['config', 'self.is_train', 'premise_in', 'premise_in'], {'scope': '"""embedding_fuse_gate"""'}), "(config, self.is_train, premise_in, premise_in, scope=\n 'embedding_fuse_gate')\n", (11723, 11804), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((11872, 11968), 'my.tensorflow.nn.fuse_gate', 'fuse_gate', (['config', 'self.is_train', 'hypothesis_in', 'hypothesis_in'], {'scope': '"""embedding_fuse_gate"""'}), "(config, self.is_train, hypothesis_in, hypothesis_in, scope=\n 'embedding_fuse_gate')\n", (11881, 11968), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((12357, 12385), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""highway"""'], {}), "('highway')\n", (12374, 12385), True, 'import tensorflow as tf\n'), ((12425, 12575), 'my.tensorflow.nn.highway_network', 'highway_network', (['premise_in', 'config.highway_num_layers', '(True)'], {'wd': 'config.wd', 'is_train': 'self.is_train', 'output_size': 'config.highway_network_output_size'}), '(premise_in, config.highway_num_layers, True, wd=config.wd,\n is_train=self.is_train, output_size=config.highway_network_output_size)\n', (12440, 12575), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((19938, 19968), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""mem_augmt"""'], {}), "('mem_augmt')\n", (19955, 19968), True, 'import tensorflow as tf\n'), ((20361, 20395), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""LSTM_encoding"""'], {}), "('LSTM_encoding')\n", (20378, 20395), True, 'import tensorflow as tf\n'), ((20507, 20586), 'my.tensorflow.rnn_cell.SwitchableDropoutWrapper', 'SwitchableDropoutWrapper', (['cell', 'self.is_train'], {'input_keep_prob': 'config.keep_rate'}), '(cell, self.is_train, input_keep_prob=config.keep_rate)\n', (20531, 20586), False, 'from my.tensorflow.rnn_cell import SwitchableDropoutWrapper, AttentionCell\n'), ((20622, 20762), 'tensorflow.nn.bidirectional_dynamic_rnn', 'tf.nn.bidirectional_dynamic_rnn', ([], {'cell_fw': 'd_cell', 'cell_bw': 'd_cell', 'inputs': 'p', 'sequence_length': 'prem_seq_lengths', 'dtype': 'tf.float32', 'scope': '"""p"""'}), "(cell_fw=d_cell, cell_bw=d_cell, inputs=p,\n sequence_length=prem_seq_lengths, dtype=tf.float32, scope='p')\n", (20653, 20762), True, 'import tensorflow as tf\n'), ((20804, 20835), 'tensorflow.concat', 'tf.concat', (['[fw_p, bw_p]'], {'axis': '(2)'}), '([fw_p, bw_p], axis=2)\n', (20813, 20835), True, 'import tensorflow as tf\n'), ((21028, 21167), 'tensorflow.nn.bidirectional_dynamic_rnn', 'tf.nn.bidirectional_dynamic_rnn', ([], {'cell_fw': 'd_cell', 'cell_bw': 'd_cell', 'inputs': 'h', 'sequence_length': 'hyp_seq_lengths', 'dtype': 'tf.float32', 'scope': '"""p"""'}), "(cell_fw=d_cell, cell_bw=d_cell, inputs=h,\n sequence_length=hyp_seq_lengths, dtype=tf.float32, scope='p')\n", (21059, 21167), True, 'import tensorflow as tf\n'), ((21193, 21224), 'tensorflow.concat', 'tf.concat', (['[fw_h, bw_h]'], {'axis': '(2)'}), '([fw_h, bw_h], axis=2)\n', (21202, 21224), True, 'import tensorflow as tf\n'), ((27479, 27542), 'tensorflow.concat', 'tf.concat', (['[p_vec, h_vec, p_vec - h_vec, p_vec * h_vec]'], {'axis': '(1)'}), '([p_vec, h_vec, p_vec - h_vec, p_vec * h_vec], axis=1)\n', (27488, 27542), True, 'import tensorflow as tf\n'), ((27583, 27650), 'tensorflow.concat', 'tf.concat', (['[f0, p_vec, h_vec, p_vec - h_vec, p_vec * h_vec]'], {'axis': '(1)'}), '([f0, p_vec, h_vec, p_vec - h_vec, p_vec * h_vec], axis=1)\n', (27592, 27650), True, 'import tensorflow as tf\n'), ((28155, 28185), 'tensorflow.expand_dims', 'tf.expand_dims', (['tensor'], {'axis': '(2)'}), '(tensor, axis=2)\n', (28169, 28185), True, 'import tensorflow as tf\n'), ((28248, 28277), 'tensorflow.concat', 'tf.concat', (['logtis_aug'], {'axis': '(2)'}), '(logtis_aug, axis=2)\n', (28257, 28277), True, 'import tensorflow as tf\n'), ((28352, 28512), 'my.tensorflow.nn.linear', 'linear', (['f0', 'self.pred_size', '(True)'], {'bias_start': '(0.0)', 'scope': '"""logit"""', 'squeeze': '(False)', 'wd': 'config.wd', 'input_keep_prob': 'config.keep_rate', 'is_train': 'self.is_train'}), "(f0, self.pred_size, True, bias_start=0.0, scope='logit', squeeze=\n False, wd=config.wd, input_keep_prob=config.keep_rate, is_train=self.\n is_train)\n", (28358, 28512), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((28627, 28787), 'my.tensorflow.nn.linear', 'linear', (['f0', 'self.pred_size', '(True)'], {'bias_start': '(0.0)', 'scope': '"""logit"""', 'squeeze': '(False)', 'wd': 'config.wd', 'input_keep_prob': 'config.keep_rate', 'is_train': 'self.is_train'}), "(f0, self.pred_size, True, bias_start=0.0, scope='logit', squeeze=\n False, wd=config.wd, input_keep_prob=config.keep_rate, is_train=self.\n is_train)\n", (28633, 28787), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((29465, 29551), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'self.y', 'logits': 'self.logits'}), '(labels=self.y, logits=self.\n logits)\n', (29511, 29551), True, 'import tensorflow as tf\n'), ((30812, 30913), 'tensorflow.constant', 'tf.constant', (['config.weight_l2loss_step_full_reg'], {'dtype': 'tf.int32', 'shape': '[]', 'name': '"""full_l2reg_step"""'}), "(config.weight_l2loss_step_full_reg, dtype=tf.int32, shape=[],\n name='full_l2reg_step')\n", (30823, 30913), True, 'import tensorflow as tf\n'), ((30943, 31048), 'tensorflow.constant', 'tf.constant', (['config.l2_regularization_ratio'], {'dtype': '"""float"""', 'shape': '[]', 'name': '"""l2_regularization_ratio"""'}), "(config.l2_regularization_ratio, dtype='float', shape=[], name=\n 'l2_regularization_ratio')\n", (30954, 31048), True, 'import tensorflow as tf\n'), ((31223, 31270), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""l2loss_ratio"""', 'l2loss_ratio'], {}), "('l2loss_ratio', l2loss_ratio)\n", (31240, 31270), True, 'import tensorflow as tf\n'), ((31887, 31911), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (31909, 31911), True, 'import tensorflow as tf\n'), ((36409, 36508), 'tensorflow.constant', 'tf.constant', (['config.diff_l2_penalty_full_step'], {'dtype': 'tf.int32', 'shape': '[]', 'name': '"""full_l2reg_step"""'}), "(config.diff_l2_penalty_full_step, dtype=tf.int32, shape=[],\n name='full_l2reg_step')\n", (36420, 36508), True, 'import tensorflow as tf\n'), ((36538, 36643), 'tensorflow.constant', 'tf.constant', (['config.diff_penalty_loss_ratio'], {'dtype': '"""float"""', 'shape': '[]', 'name': '"""diff_penalty_loss_ratio"""'}), "(config.diff_penalty_loss_ratio, dtype='float', shape=[], name=\n 'diff_penalty_loss_ratio')\n", (36549, 36643), True, 'import tensorflow as tf\n'), ((36822, 36879), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""diff_l2loss_ratio"""', 'diff_l2loss_ratio'], {}), "('diff_l2loss_ratio', diff_l2loss_ratio)\n", (36839, 36879), True, 'import tensorflow as tf\n'), ((37614, 37644), 'tensorflow.constant', 'tf.constant', (['(0)'], {'dtype': 'tf.int32'}), '(0, dtype=tf.int32)\n', (37625, 37644), True, 'import tensorflow as tf\n'), ((37692, 37722), 'tensorflow.constant', 'tf.constant', (['(1)'], {'dtype': 'tf.int32'}), '(1, dtype=tf.int32)\n', (37703, 37722), True, 'import tensorflow as tf\n'), ((37776, 37806), 'tensorflow.constant', 'tf.constant', (['(2)'], {'dtype': 'tf.int32'}), '(2, dtype=tf.int32)\n', (37787, 37806), True, 'import tensorflow as tf\n'), ((38381, 38442), 'tensorflow.add_n', 'tf.add_n', (['[entailment_loss, neutral_loss, contradiction_loss]'], {}), '([entailment_loss, neutral_loss, contradiction_loss])\n', (38389, 38442), True, 'import tensorflow as tf\n'), ((39431, 39583), 'my.tensorflow.nn.linear', 'linear', (['matrix', '(1)', '(True)'], {'bias_start': '(0.0)', 'scope': '"""weighted_sum"""', 'squeeze': '(False)', 'wd': 'config.wd', 'input_keep_prob': 'config.keep_rate', 'is_train': 'self.is_train'}), "(matrix, 1, True, bias_start=0.0, scope='weighted_sum', squeeze=False,\n wd=config.wd, input_keep_prob=config.keep_rate, is_train=self.is_train)\n", (39437, 39583), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((39602, 39621), 'my.tensorflow.exp_mask', 'exp_mask', (['wgt', 'mask'], {}), '(wgt, mask)\n', (39610, 39621), False, 'from my.tensorflow import flatten, reconstruct, add_wd, exp_mask\n'), ((39903, 40055), 'my.tensorflow.nn.linear', 'linear', (['matrix', '(8)', '(True)'], {'bias_start': '(0.0)', 'scope': '"""weighted_sum"""', 'squeeze': '(False)', 'wd': 'config.wd', 'input_keep_prob': 'config.keep_rate', 'is_train': 'self.is_train'}), "(matrix, 8, True, bias_start=0.0, scope='weighted_sum', squeeze=False,\n wd=config.wd, input_keep_prob=config.keep_rate, is_train=self.is_train)\n", (39909, 40055), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((40085, 40108), 'tensorflow.unstack', 'tf.unstack', (['wgt'], {'axis': '(2)'}), '(wgt, axis=2)\n', (40095, 40108), True, 'import tensorflow as tf\n'), ((41343, 41366), 'tensorflow.unstack', 'tf.unstack', (['wgt'], {'axis': '(2)'}), '(wgt, axis=2)\n', (41353, 41366), True, 'import tensorflow as tf\n'), ((41951, 41978), 'tensorflow.concat', 'tf.concat', (['features'], {'axis': '(1)'}), '(features, axis=1)\n', (41960, 41978), True, 'import tensorflow as tf\n'), ((42726, 42753), 'tensorflow.concat', 'tf.concat', (['features'], {'axis': '(1)'}), '(features, axis=1)\n', (42735, 42753), True, 'import tensorflow as tf\n'), ((42932, 42959), 'tensorflow.concat', 'tf.concat', (['features'], {'axis': '(1)'}), '(features, axis=1)\n', (42941, 42959), True, 'import tensorflow as tf\n'), ((42983, 43014), 'tensorflow.concat', 'tf.concat', (['[ft, matrix]'], {'axis': '(1)'}), '([ft, matrix], axis=1)\n', (42992, 43014), True, 'import tensorflow as tf\n'), ((43239, 43265), 'tensorflow.reduce_max', 'tf.reduce_max', (['lhs'], {'axis': '(1)'}), '(lhs, axis=1)\n', (43252, 43265), True, 'import tensorflow as tf\n'), ((43286, 43312), 'tensorflow.reduce_max', 'tf.reduce_max', (['rhs'], {'axis': '(1)'}), '(rhs, axis=1)\n', (43299, 43312), True, 'import tensorflow as tf\n'), ((43520, 43547), 'tensorflow.concat', 'tf.concat', (['features'], {'axis': '(1)'}), '(features, axis=1)\n', (43529, 43547), True, 'import tensorflow as tf\n'), ((43945, 43973), 'tensorflow.expand_dims', 'tf.expand_dims', (['comm'], {'axis': '(1)'}), '(comm, axis=1)\n', (43959, 43973), True, 'import tensorflow as tf\n'), ((44321, 44362), 'tensorflow.variable_scope', 'tf.variable_scope', (["(name or 'conv_pooling')"], {}), "(name or 'conv_pooling')\n", (44338, 44362), True, 'import tensorflow as tf\n'), ((44443, 44509), 'tensorflow.get_variable', 'tf.get_variable', (['"""filter"""'], {'shape': '[2, 2, chan, chan]', 'dtype': '"""float"""'}), "('filter', shape=[2, 2, chan, chan], dtype='float')\n", (44458, 44509), True, 'import tensorflow as tf\n'), ((44529, 44581), 'tensorflow.get_variable', 'tf.get_variable', (['"""bias"""'], {'shape': '[chan]', 'dtype': '"""float"""'}), "('bias', shape=[chan], dtype='float')\n", (44544, 44581), True, 'import tensorflow as tf\n'), ((49272, 49328), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['res', '[1, 6, 6, 1]', '[1, 1, 1, 1]', '"""VALID"""'], {}), "(res, [1, 6, 6, 1], [1, 1, 1, 1], 'VALID')\n", (49286, 49328), True, 'import tensorflow as tf\n'), ((49345, 49401), 'tensorflow.nn.avg_pool', 'tf.nn.avg_pool', (['res', '[1, 6, 6, 1]', '[1, 1, 1, 1]', '"""VALID"""'], {}), "(res, [1, 6, 6, 1], [1, 1, 1, 1], 'VALID')\n", (49359, 49401), True, 'import tensorflow as tf\n'), ((49413, 49452), 'tensorflow.concat', 'tf.concat', (['[max_pool, avg_pool]'], {'axis': '(3)'}), '([max_pool, avg_pool], axis=3)\n', (49422, 49452), True, 'import tensorflow as tf\n'), ((51190, 51215), 'tensorflow.expand_dims', 'tf.expand_dims', (['p_mask', '(2)'], {}), '(p_mask, 2)\n', (51204, 51215), True, 'import tensorflow as tf\n'), ((51298, 51323), 'tensorflow.expand_dims', 'tf.expand_dims', (['h_mask', '(1)'], {}), '(h_mask, 1)\n', (51312, 51323), True, 'import tensorflow as tf\n'), ((59234, 59257), 'my.tensorflow.nn.softsel', 'softsel', (['p_aug_2', 'logit'], {}), '(p_aug_2, logit)\n', (59241, 59257), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((61770, 61838), 'my.tensorflow.nn.fuse_gate', 'fuse_gate', (['config', 'is_train', 'p', 'self_att'], {'scope': '"""self_att_fuse_gate"""'}), "(config, is_train, p, self_att, scope='self_att_fuse_gate')\n", (61779, 61838), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((64314, 64340), 'tensorflow.expand_dims', 'tf.expand_dims', (['h_value', '(1)'], {}), '(h_value, 1)\n', (64328, 64340), True, 'import tensorflow as tf\n'), ((66316, 66379), 'my.tensorflow.nn.fuse_gate', 'fuse_gate', (['config', 'is_train', 'p', 'p1'], {'scope': '"""cross_att_fuse_gate"""'}), "(config, is_train, p, p1, scope='cross_att_fuse_gate')\n", (66325, 66379), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((74652, 74695), 'tensorflow.expand_dims', 'tf.expand_dims', (['list_of_conv_features[0]', '(3)'], {}), '(list_of_conv_features[0], 3)\n', (74666, 74695), True, 'import tensorflow as tf\n'), ((74731, 74759), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""sub_add"""'], {}), "('sub_add')\n", (74748, 74759), True, 'import tensorflow as tf\n'), ((79810, 79865), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['fm', '[1, 2, 2, 1]', '[1, 2, 2, 1]', '"""VALID"""'], {}), "(fm, [1, 2, 2, 1], [1, 2, 2, 1], 'VALID')\n", (79824, 79865), True, 'import tensorflow as tf\n'), ((81302, 81394), 'my.tensorflow.nn.conv2d', 'conv2d', (['ft', 'growth_rate', '(kernel_size, kernel_size)'], {'padding': 'padding', 'activation_fn': 'None'}), '(ft, growth_rate, (kernel_size, kernel_size), padding=padding,\n activation_fn=None)\n', (81308, 81394), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((84008, 84038), 'tensorflow.expand_dims', 'tf.expand_dims', (['tensor'], {'axis': '(3)'}), '(tensor, axis=3)\n', (84022, 84038), True, 'import tensorflow as tf\n'), ((85818, 85863), 'tensorflow.ones', 'tf.ones', (['[memory_size, 1]'], {'name': '"""memory_mask"""'}), "([memory_size, 1], name='memory_mask')\n", (85825, 85863), True, 'import tensorflow as tf\n'), ((85895, 85922), 'tensorflow.expand_dims', 'tf.expand_dims', (['mem_mask', '(0)'], {}), '(mem_mask, 0)\n', (85909, 85922), True, 'import tensorflow as tf\n'), ((85949, 85972), 'tensorflow.expand_dims', 'tf.expand_dims', (['keys', '(0)'], {}), '(keys, 0)\n', (85963, 85972), True, 'import tensorflow as tf\n'), ((86001, 86026), 'tensorflow.expand_dims', 'tf.expand_dims', (['values', '(0)'], {}), '(values, 0)\n', (86015, 86026), True, 'import tensorflow as tf\n'), ((87352, 87420), 'my.tensorflow.nn.fuse_gate', 'fuse_gate', (['config', 'is_train', 'x', 'attended_x'], {'scope': '"""fuse_gate_memory"""'}), "(config, is_train, x, attended_x, scope='fuse_gate_memory')\n", (87361, 87420), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((6361, 6401), 'tensorflow.contrib.layers.batch_norm', 'tf.contrib.layers.batch_norm', (['premise_in'], {}), '(premise_in)\n', (6389, 6401), True, 'import tensorflow as tf\n'), ((6438, 6481), 'tensorflow.contrib.layers.batch_norm', 'tf.contrib.layers.batch_norm', (['hypothesis_in'], {}), '(hypothesis_in)\n', (6466, 6481), True, 'import tensorflow as tf\n'), ((7378, 7407), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""char_emb"""'], {}), "('char_emb')\n", (7395, 7407), True, 'import tensorflow as tf\n'), ((7444, 7534), 'tensorflow.get_variable', 'tf.get_variable', (['"""char_emb_mat"""'], {'shape': '[config.char_vocab_size, config.char_emb_size]'}), "('char_emb_mat', shape=[config.char_vocab_size, config.\n char_emb_size])\n", (7459, 7534), True, 'import tensorflow as tf\n'), ((9926, 9967), 'tensorflow.concat', 'tf.concat', (['[premise_in, conv_pre]'], {'axis': '(2)'}), '([premise_in, conv_pre], axis=2)\n', (9935, 9967), True, 'import tensorflow as tf\n'), ((10004, 10048), 'tensorflow.concat', 'tf.concat', (['[hypothesis_in, conv_hyp]'], {'axis': '(2)'}), '([hypothesis_in, conv_hyp], axis=2)\n', (10013, 10048), True, 'import tensorflow as tf\n'), ((10132, 10169), 'tensorflow.cast', 'tf.cast', (['self.premise_pos', 'tf.float32'], {}), '(self.premise_pos, tf.float32)\n', (10139, 10169), True, 'import tensorflow as tf\n'), ((10234, 10274), 'tensorflow.cast', 'tf.cast', (['self.hypothesis_pos', 'tf.float32'], {}), '(self.hypothesis_pos, tf.float32)\n', (10241, 10274), True, 'import tensorflow as tf\n'), ((10377, 10422), 'tensorflow.cast', 'tf.cast', (['self.premise_exact_match', 'tf.float32'], {}), '(self.premise_exact_match, tf.float32)\n', (10384, 10422), True, 'import tensorflow as tf\n'), ((10487, 10535), 'tensorflow.cast', 'tf.cast', (['self.hypothesis_exact_match', 'tf.float32'], {}), '(self.hypothesis_exact_match, tf.float32)\n', (10494, 10535), True, 'import tensorflow as tf\n'), ((10848, 10889), 'tensorflow.cast', 'tf.cast', (['self.premise_antonym', 'tf.float32'], {}), '(self.premise_antonym, tf.float32)\n', (10855, 10889), True, 'import tensorflow as tf\n'), ((10954, 10998), 'tensorflow.cast', 'tf.cast', (['self.hypothesis_antonym', 'tf.float32'], {}), '(self.hypothesis_antonym, tf.float32)\n', (10961, 10998), True, 'import tensorflow as tf\n'), ((11093, 11138), 'tensorflow.cast', 'tf.cast', (['self.premise_NER_feature', 'tf.float32'], {}), '(self.premise_NER_feature, tf.float32)\n', (11100, 11138), True, 'import tensorflow as tf\n'), ((11203, 11251), 'tensorflow.cast', 'tf.cast', (['self.hypothesis_NER_feature', 'tf.float32'], {}), '(self.hypothesis_NER_feature, tf.float32)\n', (11210, 11251), True, 'import tensorflow as tf\n'), ((12059, 12108), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['premise_in', 'config.input_keep_rate'], {}), '(premise_in, config.input_keep_rate)\n', (12072, 12108), True, 'import tensorflow as tf\n'), ((12189, 12241), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['hypothesis_in', 'config.input_keep_rate'], {}), '(hypothesis_in, config.input_keep_rate)\n', (12202, 12241), True, 'import tensorflow as tf\n'), ((12672, 12858), 'my.tensorflow.nn.highway_network', 'highway_network', (['hypothesis_in', 'config.highway_num_layers', '(True)'], {'scope': '"""highway_network_h"""', 'wd': 'config.wd', 'is_train': 'self.is_train', 'output_size': 'config.highway_network_output_size'}), "(hypothesis_in, config.highway_num_layers, True, scope=\n 'highway_network_h', wd=config.wd, is_train=self.is_train, output_size=\n config.highway_network_output_size)\n", (12687, 12858), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((12951, 13105), 'my.tensorflow.nn.highway_network', 'highway_network', (['hypothesis_in', 'config.highway_num_layers', '(True)'], {'wd': 'config.wd', 'is_train': 'self.is_train', 'output_size': 'config.highway_network_output_size'}), '(hypothesis_in, config.highway_num_layers, True, wd=config.\n wd, is_train=self.is_train, output_size=config.highway_network_output_size)\n', (12966, 13105), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((21916, 21960), 'tensorflow.variable_scope', 'tf.variable_scope', (["(scope or 'model_one_side')"], {}), "(scope or 'model_one_side')\n", (21933, 21960), True, 'import tensorflow as tf\n'), ((26363, 26407), 'tensorflow.subtract', 'tf.subtract', (['premise_final', 'hypothesis_final'], {}), '(premise_final, hypothesis_final)\n', (26374, 26407), True, 'import tensorflow as tf\n'), ((26434, 26478), 'tensorflow.multiply', 'tf.multiply', (['premise_final', 'hypothesis_final'], {}), '(premise_final, hypothesis_final)\n', (26445, 26478), True, 'import tensorflow as tf\n'), ((26504, 26567), 'tensorflow.concat', 'tf.concat', (['(premise_final, hypothesis_final, diff, mul)'], {'axis': '(1)'}), '((premise_final, hypothesis_final, diff, mul), axis=1)\n', (26513, 26567), True, 'import tensorflow as tf\n'), ((30286, 30378), 'tensorflow.reductf.nn.sparse_softmax_cross_entropy_with_logits', 'tf.reductf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'self.y', 'logits': 'enc_logits'}), '(labels=self.y,\n logits=enc_logits)\n', (30340, 30378), True, 'import tensorflow as tf\n'), ((31480, 31585), 'tensorflow.constant', 'tf.constant', (['config.l2_regularization_ratio'], {'dtype': '"""float"""', 'shape': '[]', 'name': '"""l2_regularization_ratio"""'}), "(config.l2_regularization_ratio, dtype='float', shape=[], name=\n 'l2_regularization_ratio')\n", (31491, 31585), True, 'import tensorflow as tf\n'), ((37044, 37149), 'tensorflow.constant', 'tf.constant', (['config.diff_penalty_loss_ratio'], {'dtype': '"""float"""', 'shape': '[]', 'name': '"""diff_penalty_loss_ratio"""'}), "(config.diff_penalty_loss_ratio, dtype='float', shape=[], name=\n 'diff_penalty_loss_ratio')\n", (37055, 37149), True, 'import tensorflow as tf\n'), ((39203, 39232), 'tensorflow.reduce_max', 'tf.reduce_max', (['matrix'], {'axis': '(1)'}), '(matrix, axis=1)\n', (39216, 39232), True, 'import tensorflow as tf\n'), ((39318, 39348), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['matrix'], {'axis': '(1)'}), '(matrix, axis=1)\n', (39332, 39348), True, 'import tensorflow as tf\n'), ((40243, 40272), 'tensorflow.expand_dims', 'tf.expand_dims', (['logit'], {'axis': '(2)'}), '(logit, axis=2)\n', (40257, 40272), True, 'import tensorflow as tf\n'), ((40364, 40389), 'my.tensorflow.exp_mask', 'exp_mask', (['logit_tmp', 'mask'], {}), '(logit_tmp, mask)\n', (40372, 40389), False, 'from my.tensorflow import flatten, reconstruct, add_wd, exp_mask\n'), ((40954, 41118), 'my.tensorflow.nn.linear', 'linear', (['tmp_weight', '(48)', '(False)'], {'bias_start': '(0.0)', 'scope': '"""weighted_sum"""', 'squeeze': '(False)', 'wd': 'config.wd', 'input_keep_prob': 'config.keep_rate', 'is_train': 'self.is_train'}), "(tmp_weight, 48, False, bias_start=0.0, scope='weighted_sum', squeeze\n =False, wd=config.wd, input_keep_prob=config.keep_rate, is_train=self.\n is_train)\n", (40960, 41118), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((41158, 41318), 'my.tensorflow.nn.linear', 'linear', (['matrix', '(48)', '(False)'], {'bias_start': '(0.0)', 'scope': '"""weighted_sum"""', 'squeeze': '(False)', 'wd': 'config.wd', 'input_keep_prob': 'config.keep_rate', 'is_train': 'self.is_train'}), "(matrix, 48, False, bias_start=0.0, scope='weighted_sum', squeeze=\n False, wd=config.wd, input_keep_prob=config.keep_rate, is_train=self.\n is_train)\n", (41164, 41318), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((41501, 41530), 'tensorflow.expand_dims', 'tf.expand_dims', (['logit'], {'axis': '(2)'}), '(logit, axis=2)\n', (41515, 41530), True, 'import tensorflow as tf\n'), ((41622, 41647), 'my.tensorflow.exp_mask', 'exp_mask', (['logit_tmp', 'mask'], {}), '(logit_tmp, mask)\n', (41630, 41647), False, 'from my.tensorflow import flatten, reconstruct, add_wd, exp_mask\n'), ((41883, 41908), 'tensorflow.expand_dims', 'tf.expand_dims', (['f'], {'axis': '(1)'}), '(f, axis=1)\n', (41897, 41908), True, 'import tensorflow as tf\n'), ((42069, 42095), 'tensorflow.unstack', 'tf.unstack', (['matrix'], {'axis': '(2)'}), '(matrix, axis=2)\n', (42079, 42095), True, 'import tensorflow as tf\n'), ((42276, 42305), 'tensorflow.expand_dims', 'tf.expand_dims', (['logit'], {'axis': '(2)'}), '(logit, axis=2)\n', (42290, 42305), True, 'import tensorflow as tf\n'), ((42397, 42422), 'my.tensorflow.exp_mask', 'exp_mask', (['logit_tmp', 'mask'], {}), '(logit_tmp, mask)\n', (42405, 42422), False, 'from my.tensorflow import flatten, reconstruct, add_wd, exp_mask\n'), ((42658, 42683), 'tensorflow.expand_dims', 'tf.expand_dims', (['f'], {'axis': '(1)'}), '(f, axis=1)\n', (42672, 42683), True, 'import tensorflow as tf\n'), ((42866, 42891), 'tensorflow.expand_dims', 'tf.expand_dims', (['f'], {'axis': '(1)'}), '(f, axis=1)\n', (42880, 42891), True, 'import tensorflow as tf\n'), ((44606, 44676), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['res', 'filters', '[1, 2, 2, 1]', '"""VALID"""'], {'name': '"""conv_pooling"""'}), "(res, filters, [1, 2, 2, 1], 'VALID', name='conv_pooling')\n", (44618, 44676), True, 'import tensorflow as tf\n'), ((49513, 49569), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['res', '[1, 2, 2, 1]', '[1, 2, 2, 1]', '"""VALID"""'], {}), "(res, [1, 2, 2, 1], [1, 2, 2, 1], 'VALID')\n", (49527, 49569), True, 'import tensorflow as tf\n'), ((55670, 55718), 'tensorflow.concat', 'tf.concat', (['[p_aug, h_aug, p_aug * h_aug]'], {'axis': '(3)'}), '([p_aug, h_aug, p_aug * h_aug], axis=3)\n', (55679, 55718), True, 'import tensorflow as tf\n'), ((55756, 55939), 'my.tensorflow.nn.dense_logits', 'dense_logits', (['config', '[p_aug, h_aug]', 'config.dense_logit_features_num', '(True)'], {'wd': 'config.wd', 'mask': 'ph_mask', 'is_train': 'is_train', 'func': 'config.dense_att_logit_func', 'scope': '"""h_logits"""'}), "(config, [p_aug, h_aug], config.dense_logit_features_num, True,\n wd=config.wd, mask=ph_mask, is_train=is_train, func=config.\n dense_att_logit_func, scope='h_logits')\n", (55768, 55939), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((60861, 60889), 'my.tensorflow.nn.softsel', 'softsel', (['p_aug_tmp_2', 'logits'], {}), '(p_aug_tmp_2, logits)\n', (60868, 60889), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((60921, 60964), 'tensorflow.concat', 'tf.concat', (['[self_att, self_att_tmp]'], {'axis': '(2)'}), '([self_att, self_att_tmp], axis=2)\n', (60930, 60964), True, 'import tensorflow as tf\n'), ((61900, 61932), 'tensorflow.concat', 'tf.concat', (['[p, self_att]'], {'axis': '(2)'}), '([p, self_att], axis=2)\n', (61909, 61932), True, 'import tensorflow as tf\n'), ((66954, 66992), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""shared_activation"""'], {}), "('shared_activation')\n", (66971, 66992), True, 'import tensorflow as tf\n'), ((67188, 67233), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""residual_only_activation"""'], {}), "('residual_only_activation')\n", (67205, 67233), True, 'import tensorflow as tf\n'), ((67445, 67470), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""sub1"""'], {}), "('sub1')\n", (67462, 67470), True, 'import tensorflow as tf\n'), ((67778, 67803), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""sub2"""'], {}), "('sub2')\n", (67795, 67803), True, 'import tensorflow as tf\n'), ((68131, 68156), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""sub3"""'], {}), "('sub3')\n", (68148, 68156), True, 'import tensorflow as tf\n'), ((75960, 76022), 'my.tensorflow.nn.fuse_gate', 'fuse_gate', (['config', 'is_train', 'orig_x', 'x'], {'scope': '"""conv_fuse_gate"""'}), "(config, is_train, orig_x, x, scope='conv_fuse_gate')\n", (75969, 76022), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((77580, 77610), 'tensorflow.expand_dims', 'tf.expand_dims', (['tensor'], {'axis': '(4)'}), '(tensor, axis=4)\n', (77594, 77610), True, 'import tensorflow as tf\n'), ((77661, 77683), 'tensorflow.concat', 'tf.concat', (['fms'], {'axis': '(4)'}), '(fms, axis=4)\n', (77670, 77683), True, 'import tensorflow as tf\n'), ((81148, 81186), 'tensorflow.contrib.layers.batch_norm', 'tf.contrib.layers.batch_norm', (['features'], {}), '(features)\n', (81176, 81186), True, 'import tensorflow as tf\n'), ((81542, 81652), 'my.tensorflow.nn.conv2d', 'conv2d', (['features', 'growth_rate', '(kernel_size, kernel_size)'], {'padding': 'padding', 'activation_fn': 'act', 'rate': '(2, 2)'}), '(features, growth_rate, (kernel_size, kernel_size), padding=padding,\n activation_fn=act, rate=(2, 2))\n', (81548, 81652), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((81697, 81794), 'my.tensorflow.nn.conv2d', 'conv2d', (['features', 'growth_rate', '(kernel_size, kernel_size)'], {'padding': 'padding', 'activation_fn': 'act'}), '(features, growth_rate, (kernel_size, kernel_size), padding=padding,\n activation_fn=act)\n', (81703, 81794), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((83224, 83265), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['features', 'config.keep_rate'], {}), '(features, config.keep_rate)\n', (83237, 83265), True, 'import tensorflow as tf\n'), ((84053, 84084), 'tensorflow.unstack', 'tf.unstack', (['feature_map'], {'axis': '(3)'}), '(feature_map, axis=3)\n', (84063, 84084), True, 'import tensorflow as tf\n'), ((86323, 86393), 'my.tensorflow.nn.fuse_gate', 'fuse_gate', (['config', 'is_train', 'out', 'attended_x'], {'scope': '"""fuse_gate_memory"""'}), "(config, is_train, out, attended_x, scope='fuse_gate_memory')\n", (86332, 86393), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((87596, 87605), 'tensorflow.exp', 'tf.exp', (['x'], {}), '(x)\n', (87602, 87605), True, 'import tensorflow as tf\n'), ((5122, 5158), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['emb', 'config.keep_rate'], {}), '(emb, config.keep_rate)\n', (5135, 5158), True, 'import tensorflow as tf\n'), ((6617, 6645), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""emb_var"""'], {}), "('emb_var')\n", (6634, 6645), True, 'import tensorflow as tf\n'), ((6680, 6752), 'tensorflow.get_variable', 'tf.get_variable', (['"""embedding"""'], {'shape': '[self.pred_size, self.embedding_dim]'}), "('embedding', shape=[self.pred_size, self.embedding_dim])\n", (6695, 6752), True, 'import tensorflow as tf\n'), ((6955, 6983), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""emb_var"""'], {}), "('emb_var')\n", (6972, 6983), True, 'import tensorflow as tf\n'), ((6985, 7004), 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (6994, 7004), True, 'import tensorflow as tf\n'), ((7039, 7083), 'tensorflow.Variable', 'tf.Variable', (['embeddings'], {'trainable': 'emb_train'}), '(embeddings, trainable=emb_train)\n', (7050, 7083), True, 'import tensorflow as tf\n'), ((7555, 7580), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""char"""'], {}), "('char')\n", (7572, 7580), True, 'import tensorflow as tf\n'), ((7626, 7681), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['char_emb_mat', 'self.premise_char'], {}), '(char_emb_mat, self.premise_char)\n', (7648, 7681), True, 'import tensorflow as tf\n'), ((7717, 7775), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['char_emb_mat', 'self.hypothesis_char'], {}), '(char_emb_mat, self.hypothesis_char)\n', (7739, 7775), True, 'import tensorflow as tf\n'), ((21389, 21464), 'my.tensorflow.nn.fuse_gate', 'fuse_gate', (['config', 'self.is_train', 'p', 'p_lstm_enc'], {'scope': '"""lstm_enc_fuse_gate"""'}), "(config, self.is_train, p, p_lstm_enc, scope='lstm_enc_fuse_gate')\n", (21398, 21464), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((21559, 21634), 'my.tensorflow.nn.fuse_gate', 'fuse_gate', (['config', 'self.is_train', 'h', 'h_lstm_enc'], {'scope': '"""lstm_enc_fuse_gate"""'}), "(config, self.is_train, h, h_lstm_enc, scope='lstm_enc_fuse_gate')\n", (21568, 21634), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((23306, 23407), 'my.tensorflow.nn.highway_network', 'highway_network', (['bi_att_mx', 'config.highway_num_layers', '(True)'], {'wd': 'config.wd', 'is_train': 'self.is_train'}), '(bi_att_mx, config.highway_num_layers, True, wd=config.wd,\n is_train=self.is_train)\n', (23321, 23407), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((24284, 24323), 'tensorflow.contrib.layers.layer_norm', 'tf.contrib.layers.layer_norm', (['bi_att_mx'], {}), '(bi_att_mx)\n', (24312, 24323), True, 'import tensorflow as tf\n'), ((24915, 24947), 'tensorflow.concat', 'tf.concat', (['conv_features'], {'axis': '(1)'}), '(conv_features, axis=1)\n', (24924, 24947), True, 'import tensorflow as tf\n'), ((26724, 26768), 'tensorflow.multiply', 'tf.multiply', (['premise_final', 'hypothesis_final'], {}), '(premise_final, hypothesis_final)\n', (26735, 26768), True, 'import tensorflow as tf\n'), ((26794, 26857), 'tensorflow.concat', 'tf.concat', (['(premise_final, hypothesis_final, diff, mul)'], {'axis': '(1)'}), '((premise_final, hypothesis_final, diff, mul), axis=1)\n', (26803, 26857), True, 'import tensorflow as tf\n'), ((29323, 29359), 'tensorflow.arg_max', 'tf.arg_max', (['self.logits'], {'dimension': '(1)'}), '(self.logits, dimension=1)\n', (29333, 29359), True, 'import tensorflow as tf\n'), ((29360, 29391), 'tensorflow.arg_max', 'tf.arg_max', (['self.y'], {'dimension': '(1)'}), '(self.y, dimension=1)\n', (29370, 29391), True, 'import tensorflow as tf\n'), ((29603, 29639), 'tensorflow.arg_max', 'tf.arg_max', (['self.logits'], {'dimension': '(1)'}), '(self.logits, dimension=1)\n', (29613, 29639), True, 'import tensorflow as tf\n'), ((29640, 29665), 'tensorflow.cast', 'tf.cast', (['self.y', 'tf.int64'], {}), '(self.y, tf.int64)\n', (29647, 29665), True, 'import tensorflow as tf\n'), ((30626, 30647), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['tensor'], {}), '(tensor)\n', (30639, 30647), True, 'import tensorflow as tf\n'), ((36334, 36355), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['tensor'], {}), '(tensor)\n', (36347, 36355), True, 'import tensorflow as tf\n'), ((39667, 39692), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['wgt'], {'dim': '(1)'}), '(wgt, dim=1)\n', (39680, 39692), True, 'import tensorflow as tf\n'), ((40773, 40935), 'my.tensorflow.nn.linear', 'linear', (['matrix', '(200)', '(True)'], {'bias_start': '(0.0)', 'scope': '"""weighted_sum_1"""', 'squeeze': '(False)', 'wd': 'config.wd', 'input_keep_prob': 'config.keep_rate', 'is_train': 'self.is_train'}), "(matrix, 200, True, bias_start=0.0, scope='weighted_sum_1', squeeze=\n False, wd=config.wd, input_keep_prob=config.keep_rate, is_train=self.\n is_train)\n", (40779, 40935), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((58596, 58621), 'tensorflow.expand_dims', 'tf.expand_dims', (['p_mask', '(2)'], {}), '(p_mask, 2)\n', (58610, 58621), True, 'import tensorflow as tf\n'), ((58714, 58739), 'tensorflow.expand_dims', 'tf.expand_dims', (['p_mask', '(1)'], {}), '(p_mask, 1)\n', (58728, 58739), True, 'import tensorflow as tf\n'), ((60499, 60525), 'tensorflow.expand_dims', 'tf.expand_dims', (['p_tmp_1', '(2)'], {}), '(p_tmp_1, 2)\n', (60513, 60525), True, 'import tensorflow as tf\n'), ((60581, 60607), 'tensorflow.expand_dims', 'tf.expand_dims', (['p_tmp_2', '(1)'], {}), '(p_tmp_2, 1)\n', (60595, 60607), True, 'import tensorflow as tf\n'), ((62217, 62263), 'tensorflow.concat', 'tf.concat', (['[p, self_att, p * self_att]'], {'axis': '(2)'}), '([p, self_att, p * self_att], axis=2)\n', (62226, 62263), True, 'import tensorflow as tf\n'), ((64478, 64503), 'tensorflow.expand_dims', 'tf.expand_dims', (['p_mask', '(2)'], {}), '(p_mask, 2)\n', (64492, 64503), True, 'import tensorflow as tf\n'), ((64594, 64619), 'tensorflow.expand_dims', 'tf.expand_dims', (['h_mask', '(1)'], {}), '(h_mask, 1)\n', (64608, 64619), True, 'import tensorflow as tf\n'), ((68513, 68538), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""sub1"""'], {}), "('sub1')\n", (68530, 68538), True, 'import tensorflow as tf\n'), ((69122, 69147), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""sub2"""'], {}), "('sub2')\n", (69139, 69147), True, 'import tensorflow as tf\n'), ((73045, 73070), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""sub1"""'], {}), "('sub1')\n", (73062, 73070), True, 'import tensorflow as tf\n'), ((73664, 73689), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""sub2"""'], {}), "('sub2')\n", (73681, 73689), True, 'import tensorflow as tf\n'), ((74899, 75076), 'my.tensorflow.nn.linear', 'linear', (['orig_x', 'out_filter', '(True)'], {'bias_start': '(0.0)', 'scope': '"""linear_mapping_conv_mismatch"""', 'squeeze': '(False)', 'wd': 'config.wd', 'input_keep_prob': 'config.keep_rate', 'is_train': 'is_train'}), "(orig_x, out_filter, True, bias_start=0.0, scope=\n 'linear_mapping_conv_mismatch', squeeze=False, wd=config.wd,\n input_keep_prob=config.keep_rate, is_train=is_train)\n", (74905, 75076), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((76105, 76167), 'my.tensorflow.nn.fuse_gate', 'fuse_gate', (['config', 'is_train', 'x', 'orig_x'], {'scope': '"""conv_fuse_gate"""'}), "(config, is_train, x, orig_x, scope='conv_fuse_gate')\n", (76114, 76167), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((86477, 86500), 'tensorflow.expand_dims', 'tf.expand_dims', (['keys', '(2)'], {}), '(keys, 2)\n', (86491, 86500), True, 'import tensorflow as tf\n'), ((86605, 86630), 'tensorflow.expand_dims', 'tf.expand_dims', (['values', '(2)'], {}), '(values, 2)\n', (86619, 86630), True, 'import tensorflow as tf\n'), ((8109, 8134), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""conv"""'], {}), "('conv')\n", (8126, 8134), True, 'import tensorflow as tf\n'), ((8184, 8289), 'my.tensorflow.nn.multi_conv1d', 'multi_conv1d', (['char_pre', 'filter_sizes', 'heights', '"""VALID"""', 'self.is_train', 'config.keep_rate'], {'scope': '"""conv"""'}), "(char_pre, filter_sizes, heights, 'VALID', self.is_train,\n config.keep_rate, scope='conv')\n", (8196, 8289), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((8379, 8484), 'my.tensorflow.nn.multi_conv1d', 'multi_conv1d', (['char_hyp', 'filter_sizes', 'heights', '"""VALID"""', 'self.is_train', 'config.keep_rate'], {'scope': '"""conv"""'}), "(char_hyp, filter_sizes, heights, 'VALID', self.is_train,\n config.keep_rate, scope='conv')\n", (8391, 8484), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((8520, 8590), 'tensorflow.reshape', 'tf.reshape', (['conv_pre', '[-1, self.sequence_length, config.char_out_size]'], {}), '(conv_pre, [-1, self.sequence_length, config.char_out_size])\n', (8530, 8590), True, 'import tensorflow as tf\n'), ((8630, 8700), 'tensorflow.reshape', 'tf.reshape', (['conv_hyp', '[-1, self.sequence_length, config.char_out_size]'], {}), '(conv_hyp, [-1, self.sequence_length, config.char_out_size])\n', (8640, 8700), True, 'import tensorflow as tf\n'), ((14429, 14452), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (14450, 14452), True, 'import tensorflow as tf\n'), ((15163, 15194), 'tensorflow.concat', 'tf.concat', (['[p, p * pre]'], {'axis': '(2)'}), '([p, p * pre], axis=2)\n', (15172, 15194), True, 'import tensorflow as tf\n'), ((15225, 15256), 'tensorflow.concat', 'tf.concat', (['[h, h * hyp]'], {'axis': '(2)'}), '([h, h * hyp], axis=2)\n', (15234, 15256), True, 'import tensorflow as tf\n'), ((21322, 21345), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (21343, 21345), True, 'import tensorflow as tf\n'), ((23171, 23203), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['bi_att_mx'], {'axis': '(3)'}), '(bi_att_mx, axis=3)\n', (23184, 23203), True, 'import tensorflow as tf\n'), ((23504, 23603), 'my.tensorflow.nn.fuse_gate', 'fuse_gate', (['config', 'self.is_train', 'bi_att_mx', 'bi_att_mx'], {'scope': '"""dense_attention_self_fuse_gate"""'}), "(config, self.is_train, bi_att_mx, bi_att_mx, scope=\n 'dense_attention_self_fuse_gate')\n", (23513, 23603), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((26651, 26695), 'tensorflow.subtract', 'tf.subtract', (['premise_final', 'hypothesis_final'], {}), '(premise_final, hypothesis_final)\n', (26662, 26695), True, 'import tensorflow as tf\n'), ((27008, 27031), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (27029, 27031), True, 'import tensorflow as tf\n'), ((27087, 27249), 'my.tensorflow.nn.linear', 'linear', (['f0', 'self.LSTM_dim', '(True)'], {'bias_start': '(0.0)', 'scope': '"""bilinear"""', 'squeeze': '(False)', 'wd': 'config.wd', 'input_keep_prob': 'config.keep_rate', 'is_train': 'self.is_train'}), "(f0, self.LSTM_dim, True, bias_start=0.0, scope='bilinear', squeeze=\n False, wd=config.wd, input_keep_prob=config.keep_rate, is_train=self.\n is_train)\n", (27093, 27249), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((30662, 30686), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (30684, 30686), True, 'import tensorflow as tf\n'), ((31088, 31150), 'tensorflow.cast', 'tf.cast', (['((self.global_step - full_l2_step / 2) * 8)', 'tf.float32'], {}), '((self.global_step - full_l2_step / 2) * 8, tf.float32)\n', (31095, 31150), True, 'import tensorflow as tf\n'), ((31153, 31190), 'tensorflow.cast', 'tf.cast', (['(full_l2_step / 2)', 'tf.float32'], {}), '(full_l2_step / 2, tf.float32)\n', (31160, 31190), True, 'import tensorflow as tf\n'), ((31378, 31399), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['tensor'], {}), '(tensor)\n', (31391, 31399), True, 'import tensorflow as tf\n'), ((36687, 36749), 'tensorflow.cast', 'tf.cast', (['((self.global_step - full_l2_step / 2) * 8)', 'tf.float32'], {}), '((self.global_step - full_l2_step / 2) * 8, tf.float32)\n', (36694, 36749), True, 'import tensorflow as tf\n'), ((36752, 36789), 'tensorflow.cast', 'tf.cast', (['(full_l2_step / 2)', 'tf.float32'], {}), '(full_l2_step / 2, tf.float32)\n', (36759, 36789), True, 'import tensorflow as tf\n'), ((36998, 37019), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['tensor'], {}), '(tensor)\n', (37011, 37019), True, 'import tensorflow as tf\n'), ((37899, 37933), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {'dtype': 'tf.float32'}), '(0.0, dtype=tf.float32)\n', (37910, 37933), True, 'import tensorflow as tf\n'), ((38051, 38063), 'tensorflow.abs', 'tf.abs', (['x[1]'], {}), '(x[1])\n', (38057, 38063), True, 'import tensorflow as tf\n'), ((38073, 38107), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {'dtype': 'tf.float32'}), '(0.0, dtype=tf.float32)\n', (38084, 38107), True, 'import tensorflow as tf\n'), ((38249, 38283), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {'dtype': 'tf.float32'}), '(0.0, dtype=tf.float32)\n', (38260, 38283), True, 'import tensorflow as tf\n'), ((40499, 40528), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['wgt_tmp'], {'dim': '(1)'}), '(wgt_tmp, dim=1)\n', (40512, 40528), True, 'import tensorflow as tf\n'), ((41757, 41786), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['wgt_tmp'], {'dim': '(1)'}), '(wgt_tmp, dim=1)\n', (41770, 41786), True, 'import tensorflow as tf\n'), ((42532, 42561), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['wgt_tmp'], {'dim': '(1)'}), '(wgt_tmp, dim=1)\n', (42545, 42561), True, 'import tensorflow as tf\n'), ((62519, 62591), 'my.tensorflow.nn.fuse_gate', 'fuse_gate', (['config', 'is_train', 'p', 'tmp_p'], {'scope': '"""self_att_fuse_gate_p_base"""'}), "(config, is_train, p, tmp_p, scope='self_att_fuse_gate_p_base')\n", (62528, 62591), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((68770, 68888), 'tensorflow.get_variable', 'tf.get_variable', (['"""weights"""'], {'shape': '[kernel_size, kernel_size, in_filter, out_filter]', 'dtype': '"""float"""', 'trainable': '(True)'}), "('weights', shape=[kernel_size, kernel_size, in_filter,\n out_filter], dtype='float', trainable=True)\n", (68785, 68888), True, 'import tensorflow as tf\n'), ((68912, 68972), 'tensorflow.get_variable', 'tf.get_variable', (['"""biases"""'], {'shape': '[out_filter]', 'dtype': '"""float"""'}), "('biases', shape=[out_filter], dtype='float')\n", (68927, 68972), True, 'import tensorflow as tf\n'), ((69380, 69499), 'tensorflow.get_variable', 'tf.get_variable', (['"""weights"""'], {'shape': '[kernel_size, kernel_size, out_filter, out_filter]', 'dtype': '"""float"""', 'trainable': '(True)'}), "('weights', shape=[kernel_size, kernel_size, out_filter,\n out_filter], dtype='float', trainable=True)\n", (69395, 69499), True, 'import tensorflow as tf\n'), ((69523, 69583), 'tensorflow.get_variable', 'tf.get_variable', (['"""biases"""'], {'shape': '[out_filter]', 'dtype': '"""float"""'}), "('biases', shape=[out_filter], dtype='float')\n", (69538, 69583), True, 'import tensorflow as tf\n'), ((8789, 8821), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""char_linear"""'], {}), "('char_linear')\n", (8806, 8821), True, 'import tensorflow as tf\n'), ((8937, 9101), 'my.tensorflow.nn.linear', 'linear', (['conv_pre', 'conv_d', '(True)'], {'bias_start': '(0.0)', 'scope': '"""char_linear"""', 'squeeze': '(False)', 'wd': 'config.wd', 'input_keep_prob': 'config.keep_rate', 'is_train': 'self.is_train'}), "(conv_pre, conv_d, True, bias_start=0.0, scope='char_linear', squeeze\n =False, wd=config.wd, input_keep_prob=config.keep_rate, is_train=self.\n is_train)\n", (8943, 9101), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((9234, 9398), 'my.tensorflow.nn.linear', 'linear', (['conv_hyp', 'conv_d', '(True)'], {'bias_start': '(0.0)', 'scope': '"""char_linear"""', 'squeeze': '(False)', 'wd': 'config.wd', 'input_keep_prob': 'config.keep_rate', 'is_train': 'self.is_train'}), "(conv_hyp, conv_d, True, bias_start=0.0, scope='char_linear', squeeze\n =False, wd=config.wd, input_keep_prob=config.keep_rate, is_train=self.\n is_train)\n", (9240, 9398), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((15346, 15377), 'tensorflow.concat', 'tf.concat', (['[p, p - pre]'], {'axis': '(2)'}), '([p, p - pre], axis=2)\n', (15355, 15377), True, 'import tensorflow as tf\n'), ((15410, 15441), 'tensorflow.concat', 'tf.concat', (['[h, h - hyp]'], {'axis': '(2)'}), '([h, h - hyp], axis=2)\n', (15419, 15441), True, 'import tensorflow as tf\n'), ((17071, 17094), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (17092, 17094), True, 'import tensorflow as tf\n'), ((17486, 17509), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (17507, 17509), True, 'import tensorflow as tf\n'), ((21489, 21512), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (21510, 21512), True, 'import tensorflow as tf\n'), ((23003, 23045), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['bi_att_mx', 'config.keep_rate'], {}), '(bi_att_mx, config.keep_rate)\n', (23016, 23045), True, 'import tensorflow as tf\n'), ((29204, 29218), 'tensorflow.log', 'tf.log', (['sm_lgt'], {}), '(sm_lgt)\n', (29210, 29218), True, 'import tensorflow as tf\n'), ((29237, 29255), 'tensorflow.log', 'tf.log', (['(1 - sm_lgt)'], {}), '(1 - sm_lgt)\n', (29243, 29255), True, 'import tensorflow as tf\n'), ((31414, 31438), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (31436, 31438), True, 'import tensorflow as tf\n'), ((62659, 62735), 'my.tensorflow.nn.fuse_gate', 'fuse_gate', (['config', 'is_train', 'tmp_p', 'p'], {'scope': '"""self_att_fuse_gate_tmp_p_base"""'}), "(config, is_train, tmp_p, p, scope='self_att_fuse_gate_tmp_p_base')\n", (62668, 62735), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((62848, 62876), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""highway"""'], {}), "('highway')\n", (62865, 62876), True, 'import tensorflow as tf\n'), ((62908, 63003), 'my.tensorflow.nn.highway_network', 'highway_network', (['self_att', 'config.highway_num_layers', '(True)'], {'wd': 'config.wd', 'is_train': 'is_train'}), '(self_att, config.highway_num_layers, True, wd=config.wd,\n is_train=is_train)\n', (62923, 63003), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((69001, 69057), 'tensorflow.nn.atrous_conv2d', 'tf.nn.atrous_conv2d', (['x', 'filters'], {'rate': '(2)', 'padding': 'padding'}), '(x, filters, rate=2, padding=padding)\n', (69020, 69057), True, 'import tensorflow as tf\n'), ((69612, 69668), 'tensorflow.nn.atrous_conv2d', 'tf.nn.atrous_conv2d', (['x', 'filters'], {'rate': '(2)', 'padding': 'padding'}), '(x, filters, rate=2, padding=padding)\n', (69631, 69668), True, 'import tensorflow as tf\n'), ((73595, 73629), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['x', 'config.keep_rate'], {}), '(x, config.keep_rate)\n', (73608, 73629), True, 'import tensorflow as tf\n'), ((74371, 74405), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['x', 'config.keep_rate'], {}), '(x, config.keep_rate)\n', (74384, 74405), True, 'import tensorflow as tf\n'), ((76452, 76482), 'tensorflow.concat', 'tf.concat', (['[x, orig_x]'], {'axis': '(3)'}), '([x, orig_x], axis=3)\n', (76461, 76482), True, 'import tensorflow as tf\n'), ((9523, 9556), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""char_highway"""'], {}), "('char_highway')\n", (9540, 9556), True, 'import tensorflow as tf\n'), ((9610, 9705), 'my.tensorflow.nn.highway_network', 'highway_network', (['conv_pre', '(1)', '(True)'], {'scope': '"""char_conv"""', 'wd': 'config.wd', 'is_train': 'self.is_train'}), "(conv_pre, 1, True, scope='char_conv', wd=config.wd,\n is_train=self.is_train)\n", (9625, 9705), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((9801, 9896), 'my.tensorflow.nn.highway_network', 'highway_network', (['conv_hyp', '(1)', '(True)'], {'scope': '"""char_conv"""', 'wd': 'config.wd', 'is_train': 'self.is_train'}), "(conv_hyp, 1, True, scope='char_conv', wd=config.wd,\n is_train=self.is_train)\n", (9816, 9896), False, 'from my.tensorflow.nn import softsel, get_logits, highway_network, multi_conv1d, linear, conv2d, cosine_similarity, variable_summaries, dense_logits, fuse_gate\n'), ((14869, 14892), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (14890, 14892), True, 'import tensorflow as tf\n'), ((15537, 15573), 'tensorflow.concat', 'tf.concat', (['[pre, p, p * pre]'], {'axis': '(2)'}), '([pre, p, p * pre], axis=2)\n', (15546, 15573), True, 'import tensorflow as tf\n'), ((15606, 15642), 'tensorflow.concat', 'tf.concat', (['[hyp, h, h * hyp]'], {'axis': '(2)'}), '([hyp, h, h * hyp], axis=2)\n', (15615, 15642), True, 'import tensorflow as tf\n'), ((16171, 16194), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (16192, 16194), True, 'import tensorflow as tf\n'), ((17268, 17291), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (17289, 17291), True, 'import tensorflow as tf\n'), ((17706, 17729), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (17727, 17729), True, 'import tensorflow as tf\n'), ((18191, 18214), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (18212, 18214), True, 'import tensorflow as tf\n'), ((76614, 76649), 'tensorflow.concat', 'tf.concat', (['[orig_x, orig_x]'], {'axis': '(3)'}), '([orig_x, orig_x], axis=3)\n', (76623, 76649), True, 'import tensorflow as tf\n'), ((15743, 15788), 'tensorflow.concat', 'tf.concat', (['[pre, p, p * pre, p - pre]'], {'axis': '(2)'}), '([pre, p, p * pre, p - pre], axis=2)\n', (15752, 15788), True, 'import tensorflow as tf\n'), ((15821, 15866), 'tensorflow.concat', 'tf.concat', (['[hyp, h, h * hyp, h - hyp]'], {'axis': '(2)'}), '([hyp, h, h * hyp, h - hyp], axis=2)\n', (15830, 15866), True, 'import tensorflow as tf\n'), ((16408, 16431), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (16429, 16431), True, 'import tensorflow as tf\n'), ((18609, 18632), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (18630, 18632), True, 'import tensorflow as tf\n'), ((15958, 15985), 'tensorflow.concat', 'tf.concat', (['[p, pre]'], {'axis': '(2)'}), '([p, pre], axis=2)\n', (15967, 15985), True, 'import tensorflow as tf\n'), ((16018, 16045), 'tensorflow.concat', 'tf.concat', (['[h, hyp]'], {'axis': '(2)'}), '([h, hyp], axis=2)\n', (16027, 16045), True, 'import tensorflow as tf\n')] |
import math
import numpy as np
import pandas as pd
import tqdm
from scipy.sparse import coo_matrix
from typing import List, Optional
import matplotlib
from matplotlib import pyplot as plt
import matplotlib.patches as patches
from matplotlib.path import Path
from matplotlib.collections import PathCollection
import matplotlib.transforms as mtransforms
from matplotlib.lines import lineMarkers
from utils import Multidict, get_angle, get_lower_bounds
from database import Graph, CelestialGraph
#from solver import AngularGraphSolution
def visualize_graph_2d(graph: Graph, savePath=None):
fig = plt.figure()
axis = plt.subplot()
axis.axis('off')
_visualize_edges_2d(graph)
_visualize_vertices_2d(graph)
_visualize_celest_body_2d(axis, graph)
if savePath:
if savePath[-3:] == "ipe":
old_backend = matplotlib.get_backend()
matplotlib.use('module://backend_ipe')
save_format = "ipe"
plt.savefig(savePath, format=save_format)
matplotlib.use(old_backend)
else:
plt.savefig(savePath)
else:
plt.show()
def visualize_min_sum_sol_2d(solution: 'AngularGraphSolution'):
graph = solution.graph
fig = plt.figure()
axis = plt.subplot()
axis.axis('off')
_visualize_edges_2d(solution.graph)
_visualize_vertices_2d(solution.graph)
_visualize_celest_body_2d(axis, solution.graph)
# Make an edge order for vertices
vertex_order = Multidict()
ordered_times = solution.get_ordered_times()
for time_key in ordered_times.get_ordered_keys():
for edges in ordered_times[time_key]:
if edges[0] < edges[1]:
vertex_order[edges[0]] = edges
vertex_order[edges[1]] = edges
# Get minimum edge length
min_length = max(np.array(
[
np.linalg.norm(solution.graph.vertices[i] - solution.graph.vertices[j])
for i, j in solution.graph.edges
]
).min(), 0.4)
# Draws the angle paths in a circular fashion
path_list = []
last_points = []
for vertex_key in vertex_order:
last_edge = None
last_direction = None
current_min_length = min_length * 0.3
last_point = None
for edge in vertex_order[vertex_key]:
if last_edge:
other_vertices = np.hstack([
np.setdiff1d(np.array(last_edge), np.array([vertex_key])),
np.setdiff1d(np.array(edge), np.array([vertex_key]))
])
angles = [get_angle(
graph.vertices[vertex_key],
graph.vertices[vertex_key] + [1, 0],
graph.vertices[other_vertex]) for other_vertex in other_vertices]
# If y-coord is below the current vertex we need to calculate the angle different
for i in range(len(angles)):
if graph.vertices[other_vertices[i]][1] < graph.vertices[vertex_key][1]:
angles[i] = 360 - angles[i]
# Calculate if we need to go from angle[0] to angle[1] or other way around
# to not create an arc over 180 degrees
diff = abs(angles[0] - angles[1])
if diff > 180:
diff = 360 - diff
normal_angle_direction = math.isclose((angles[0] + diff) % 360, angles[1], rel_tol=1e-5)
if not normal_angle_direction:
angles = reversed(angles)
# 1 shall be clockwise and -1 counter-clockwise direction
current_direction = 1 if normal_angle_direction else -1
if last_direction:
if current_direction != last_direction: # direction change happened
current_min_length *= 1.25
# Transform the arc to the right position
transform = mtransforms.Affine2D().scale(current_min_length, current_min_length)
transform = transform.translate(*graph.vertices[vertex_key])
arc = Path.arc(*angles)
arc_t = arc.transformed(transform)
if last_direction:
if current_direction != last_direction: # direction change happened
last_vertex = path_list[-1].vertices[-1] if last_direction == 1 else path_list[-1].vertices[0]
new_vertex = arc_t.vertices[0] if current_direction == 1 else arc_t.vertices[-1]
bridge_path = Path([last_vertex, new_vertex])
path_list.append(bridge_path)
last_direction = current_direction
path_list.append(arc_t)
last_point = path_list[-1].vertices[-1] if last_direction == 1 else path_list[-1].vertices[0]
last_points.append(last_point)
last_edge = edge
# Add these points to detect direction
last_points.append(last_point)
path_collection = PathCollection(path_list, edgecolor='r', facecolor='#00000000')
axis.add_collection(path_collection)
a_last_points = np.array([l for l in last_points if l is not None])
plt.plot(a_last_points[:, 0], a_last_points[:, 1], 'r.')
axis.autoscale()
plt.show()
def visualize_solution_2d(solution: 'AngularGraphSolution', title=None, show_used=True):
fig = plt.figure()
if title:
fig.suptitle(title)
#fig.subplots_adjust(hspace=0.3, wspace=0.3)
ordered_times = solution.get_ordered_times()
cells_needed = len(ordered_times)
row_num, col_num = _calculate_row_col_needed(cells_needed)
fig.set_size_inches(fig.get_size_inches()[1], fig.get_size_inches()[1])
i = 1
already_used = []
for time in ordered_times.get_ordered_keys():
axis = plt.subplot(row_num, col_num, i)
#if solution.solution_type in ["makespan"]:
plt.title("t = {0}".format(round(time, 2)))
axis.axis('off')
_visualize_edges_2d(
solution.graph,
ordered_times[time], already_used)
_visualize_vertices_2d(solution.graph)
if show_used:
already_used.extend(ordered_times[time])
_visualize_celest_body_2d(axis, solution.graph)
i += 1
fig.tight_layout()
plt.show()
def _visualize_edges_2d(graph: Graph, taken_edges=None, already_used=None):
if graph.vertices.dtype == np.dtype('O'):
graph.vertices = np.array([p for p in graph.vertices])
for edge in graph.edges:
plt.plot(graph.vertices[edge][:, 0], graph.vertices[edge][:, 1], color='black', marker=',', alpha=0.3)
if already_used:
for indices in already_used:
edge = np.array([graph.vertices[i] for i in indices])
plt.plot(edge[:, 0], edge[:, 1], "y-")
if taken_edges:
for indices in taken_edges:
edge = np.array([graph.vertices[i] for i in indices])
plt.plot(edge[:, 0], edge[:, 1], "r-")
def _visualize_vertices_2d(graph: Graph):
plt.plot(graph.vertices[:, 0], graph.vertices[:, 1], "b.")
def _visualize_celest_body_2d(axis, graph: Graph):
if isinstance(graph, CelestialGraph):
for body in graph.celestial_bodies:
# Add earth as celestial object
image = plt.imread("utils/figures/world-1303628_1920.png")
radius = 870
scale = len(image) / (radius*2)
extent = (
(body.position[0] - float(body.size)) * scale,
(body.position[0] + float(body.size)) * scale,
(body.position[1] - float(body.size)) * scale,
(body.position[1] + float(body.size)) * scale
)
im = axis.imshow(image, extent=extent)
pos = body.position
patch = patches.Circle(pos, radius=float(body.size), transform=axis.transData)
im.set_clip_path(patch)
axis.autoscale_view()
def _visualize_celest_body_2d_old(axis, graph: Graph):
if isinstance(graph, CelestialGraph):
for body in graph.celestial_bodies:
# Add earth as celestial object
image = plt.imread("utils/figures/720px-The_Earth_seen_from_Apollo_17.jpg")
radius = 320
scale = len(image) / (radius*2)
extent = (
(body.position[0] - float(body.size)) * scale,
(body.position[0] + float(body.size)) * scale,
(body.position[1] - float(body.size)) * scale,
(body.position[1] + float(body.size)) * scale
)
im = axis.imshow(image, extent=extent)
pos = body.position
patch = patches.Circle(pos, radius=float(body.size), transform=axis.transData)
im.set_clip_path(patch)
axis.autoscale_view()
def _calculate_row_col_needed(cells_needed: int):
# Calculate the quadratic amount needed
# Aim is to get it as quadratic as possible
# Maybe later aim to get a ratio near display ratio?
quad_num = math.ceil(math.sqrt(cells_needed))
# Calculate how many rows are now actually needed
row_num = math.ceil(cells_needed / quad_num)
return row_num, quad_num
_sol_type_to_label = {"runtime": "Runtime", "min_sum": "MinSum", "local_min_sum": "LocalMinSum", "makespan": "Makespan"}
class VisTypes:
Absolute = 0
VsBest = 1
VsLB = 2
All = 3
LB_Runtime = 4
# From https://stackoverflow.com/questions/55767312/how-to-position-suptitle
def _make_space_above(axes, topmargin=1):
""" increase figure size to make topmargin (in inches) space for
titles, without changing the axes sizes"""
fig = axes.flatten()[0].figure
s = fig.subplotpars
w, h = fig.get_size_inches()
figh = h - (1-s.top)*h + topmargin
fig.subplots_adjust(bottom=s.bottom*h/figh, top=1-topmargin/figh)
fig.set_figheight(figh)
def visualize_solution_scatter(jobs: List['TaskJobs'], title,
path: Optional[str]=None, solution_type: Optional[str]=None,
logscale=False, ylabel=None, vis_type=VisTypes.Absolute,
loc=1, bbox_pos=None, top_margin=0.65,
show_legend=True):
if solution_type is None:
solution_type = _get_dominant_solution_type(jobs)
if not ylabel:
y_label = _sol_type_to_label[solution_type]
for s in tqdm.tqdm(jobs, desc="Calculate lower bounds"):
if s.solution is not None:
_get_LB(s.solution, solution_type)
df = pd.DataFrame(
[
{
"Solver": _get_solver_name(job),
"VertAmount": job.solution.graph.vert_amount,
"EdgeAmount": job.solution.graph.edge_amount,
"Graph_id": job.solution.graph.id,
"Runtime": float(job.solution.runtime) if job.prev_job is None else float(job.prev_job.solution.runtime+job.solution.runtime),
"MinSum": job.solution.min_sum,
"LocalMinSum": job.solution.local_min_sum,
"Makespan": job.solution.makespan,
"LB": _get_LB(job.solution, solution_type)}
for job in tqdm.tqdm(jobs, desc="Collect solution information") if job.solution is not None
])
# Then plot the data
if vis_type == VisTypes.All:
fig, axes = plt.subplots(nrows=2,ncols=2, sharex=True)
#fig.suptitle(title)
if isinstance(logscale, bool):
logscale = [logscale for i in range(4)]
if len(logscale) < 4:
logscale = logscale + [False for i in range(4-len(logscale))]
label_cols = 3
top_margin = top_margin+0.2
columns = _plot_data(df, solution_type, "Edge amount", y_label, logscale=logscale[0], vis_type=VisTypes.Absolute, ax=axes[0,0],)# show_legend=True)
columns = _plot_data(df, solution_type, "Edge amount", y_label, logscale=logscale[1], vis_type=VisTypes.VsBest, ax=axes[0,1],)# show_legend=True)
columns = _plot_data(df, solution_type, "Edge amount", y_label, logscale=logscale[2], vis_type=VisTypes.VsLB, ax=axes[1,0],)# show_legend=True)
columns = _plot_data(df, "runtime", "Edge amount", "Runtime", logscale=logscale[3], vis_type=VisTypes.Absolute, ax=axes[1,1],)# show_legend=True)
fig.set_size_inches(fig.get_size_inches()*1.5)
fig.tight_layout()
handles, labels = axes[1, 1].get_legend_handles_labels()
fig.legend(handles, labels, loc=loc, bbox_to_anchor=bbox_pos,\
ncol=label_cols)
_make_space_above(axes, top_margin)
#fig.legend([m_i[1] for m_i in columns], loc=loc, bbox_to_anchor=bbox_pos)
elif vis_type == VisTypes.LB_Runtime:
fig, axes = plt.subplots(nrows=1,ncols=2, sharex=True)
#fig.suptitle(title)
if isinstance(logscale, bool):
logscale = [logscale for i in range(2)]
if len(logscale) < 4:
logscale = logscale + [False for i in range(2-len(logscale))]
label_cols = 3
top_margin = top_margin+0.25
columns = _plot_data(df, solution_type, "Edge amount", y_label, logscale=logscale[0], vis_type=VisTypes.VsLB, ax=axes[0],)# show_legend=True)
columns = _plot_data(df, "runtime", "Edge amount", "Runtime", logscale=logscale[1], vis_type=VisTypes.Absolute, ax=axes[1],)# show_legend=True)
fig.set_size_inches(fig.get_size_inches()*(1.3, 0.9))
fig.tight_layout()
handles, labels = axes[1].get_legend_handles_labels()
fig.legend(handles, labels, loc=loc, bbox_to_anchor=bbox_pos,\
ncol=label_cols)
_make_space_above(axes, top_margin)
else:
columns = _plot_data(df, solution_type, "Edge amount", y_label, logscale=logscale, vis_type=vis_type, show_legend=True)
plt.title(title)
if path is None:
plt.show()
else:
if path[-3:] == "ipe":
old_backend = matplotlib.get_backend()
matplotlib.use('module://backend_ipe')
save_format = "ipe"
plt.savefig(path, format=save_format)
matplotlib.use(old_backend)
else:
plt.savefig(path)
def _get_LB(sol: "AngularGraphSolution", solution_type):
graph = sol.graph
if solution_type == "local_min_sum":
try:
return _get_LB.local_min_sum_lbs[graph.id]
except KeyError:
lb = max(get_lower_bounds(graph))
_get_LB.local_min_sum_lbs[graph.id] = lb
return lb
if solution_type == "min_sum":
try:
return _get_LB.min_sum_lbs[graph.id]
except KeyError:
lb = sum(get_lower_bounds(graph))
_get_LB.min_sum_lbs[graph.id] = lb
return lb
if solution_type == "makespan":
try:
lb = _get_LB.makespan_lbs[graph.id]
except KeyError:
from solver.coloring_solver import Coloring_CP_Solver
from pyclustering.gcolor.dsatur import dsatur
if graph.edge_amount < 40:
solver = Coloring_CP_Solver()
colors = solver.solve(graph)
else:
dsatur_instance = dsatur(graph.ad_matrix)
dsatur_instance.process()
colors = dsatur_instance.get_colors()
lb = ((math.ceil(math.log2(max(colors)))-2) / 2) * 90
_get_LB.makespan_lbs[graph.id] = lb
if sol.makespan and lb > sol.makespan:
log_c_number = math.ceil(sol.makespan * 2 / 90) + 2
lb2 = ((math.ceil(log_c_number)-2) / 2) * 90
if lb > lb2:
_get_LB.makespan_lbs[graph.id] = lb2
lb = lb2
return lb
_get_LB.min_sum_lbs = {}
_get_LB.local_min_sum_lbs = {}
_get_LB.makespan_lbs = {}
def _get_dominant_solution_type(jobs: List['TaskJobs']):
sol_type = np.array([job.solution.solution_type for job in tqdm.tqdm(jobs, desc="Load solutions") if job.solution is not None])
types, counter = np.unique(sol_type, return_counts=True)
max_index = np.argmax(counter)
return types[max_index]
MARKERS = ['o', '^', 'v', 'h', '*', 'x', 'd', 'P', '1', '.']
def _plot_data(data: pd.DataFrame, solution_type, xlabel, ylabel, logscale=False, markers: Optional[List[str]] = None, vis_type=0, ax: Optional[plt.Axes] = None,
show_legend=False):
if not markers:
markers = MARKERS
markers = [markers[i % len(markers)] for i in range(len(data))]
label = _sol_type_to_label[solution_type]
reshaped = data.pivot_table(data, index=['Graph_id', 'EdgeAmount'], columns=['Solver'])
if vis_type == VisTypes.VsBest:
mins = data[['Graph_id', 'Solver', 'EdgeAmount', label]]\
.groupby(['Graph_id']).min()
mins = mins.pivot_table(mins, index=["Graph_id", "EdgeAmount"])
plot_table = reshaped[label].divide(mins[label], axis=0).reset_index()
elif vis_type == VisTypes.VsLB:
l_bs = data[['Graph_id', 'EdgeAmount', "LB"]]
l_bs = l_bs.pivot_table(l_bs, index=["Graph_id", "EdgeAmount"])
plot_table = reshaped[label].divide(l_bs["LB"], axis=0).reset_index()
plot_table = plot_table.replace([np.inf, -np.inf], np.nan)
else:
plot_table = reshaped[label].reset_index()
if ax:
plot_table.plot(1, range(2, len(plot_table.columns)), ax=ax, alpha=0.5)
else:
ax = plot_table.plot(1, range(2, len(plot_table.columns)))
for i, line in enumerate(ax.get_lines()):
line.set_marker(MARKERS[i])
line.set_linestyle('')
if not show_legend:
ax.legend().set_visible(False)
ax.xaxis.label.set_text(xlabel)
if logscale:
ax.set_yscale("log")
#l = _calculate_labels(ax, plot_table)
#ax.set_yticks(l)
#ax.set_yticklabels(l)
if vis_type == VisTypes.VsBest:
ax.yaxis.label.set_text(f"{ylabel} / best sol")
elif vis_type == VisTypes.VsLB:
ax.yaxis.label.set_text(f"{ylabel} / lower bound")
if solution_type == "makespan" and data["EdgeAmount"].max() > 40:
ax.yaxis.label.set_text(f"{ylabel} / heuristical bound")
"""if max(plot_table[solver_columns].max()) < 25:
ax.set_yticks([1,5,10,20])
ax.set_yticklabels([1,5,10,20])
elif max(plot_table[solver_columns].max()) < 100:
l = [1,10,20,50,80]
ax.set_yticks(l)
ax.set_yticklabels(l)
elif max(plot_table[solver_columns].max()) < 3.25:
ax.set_yticks(np.arange(1, 3.25, step=0.25))
ax.set_yticklabels(np.arange(1, 3.25, step=0.25))"""
else:
ax.yaxis.label.set_text(ylabel)
return reshaped.columns
def _get_solver_name(job: "TaskJobs"):
name = job.solution.solver if job.prev_job is None else job.prev_job.solution.solver+"+"+job.solution.solver
name = name.replace("Angular","")
return name
def _calculate_labels(ax, plot_table):
solver_columns = [solver_columns for solver_columns in plot_table.columns if solver_columns not in ["Graph_id", "EdgeAmount"]]
min_l = min(plot_table[solver_columns].min())
max_l = max(plot_table[solver_columns].max())
min_10_base = math.floor(math.log10(min_l))
max_10_base = math.ceil(math.log10(max_l))
if min_10_base + 1 == max_10_base and (max_l - min_l < 3.25 * 10**min_10_base):
if max_l - min_l < 3.25 * 10**min_10_base:
l = np.arange(1*10**min_10_base, max_l, step=0.2*10**min_10_base)
if max_l - min_l > 3.25 * 10**min_10_base:
l = np.arange(1*10**min_10_base, max_l, step=0.25*10**min_10_base)
else:
l_base = [i for i in range(1,10)]
if min_10_base +2 <= max_10_base:
l_base = [1,2,5]
if min_10_base + 4 <= max_10_base:
l_base = [1, 5]
if min_10_base + 8 < max_10_base:
l_base = [1]
l = []
multiplicator = 10**-3
while not l or (l[-1] < max_l*1.2 and multiplicator < max_l*1.2):
for n in l_base:
if min_l*0.75 <= multiplicator * n <= max_l * 1.2:
l.append(multiplicator * n)
multiplicator *= 10
return l
| [
"matplotlib.collections.PathCollection",
"math.sqrt",
"numpy.array",
"utils.get_lower_bounds",
"numpy.linalg.norm",
"math.log10",
"utils.Multidict",
"numpy.arange",
"matplotlib.path.Path",
"pyclustering.gcolor.dsatur.dsatur",
"matplotlib.pyplot.plot",
"matplotlib.get_backend",
"numpy.dtype",... | [((600, 612), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (610, 612), True, 'from matplotlib import pyplot as plt\n'), ((624, 637), 'matplotlib.pyplot.subplot', 'plt.subplot', ([], {}), '()\n', (635, 637), True, 'from matplotlib import pyplot as plt\n'), ((1274, 1286), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1284, 1286), True, 'from matplotlib import pyplot as plt\n'), ((1298, 1311), 'matplotlib.pyplot.subplot', 'plt.subplot', ([], {}), '()\n', (1309, 1311), True, 'from matplotlib import pyplot as plt\n'), ((1531, 1542), 'utils.Multidict', 'Multidict', ([], {}), '()\n', (1540, 1542), False, 'from utils import Multidict, get_angle, get_lower_bounds\n'), ((5133, 5196), 'matplotlib.collections.PathCollection', 'PathCollection', (['path_list'], {'edgecolor': '"""r"""', 'facecolor': '"""#00000000"""'}), "(path_list, edgecolor='r', facecolor='#00000000')\n", (5147, 5196), False, 'from matplotlib.collections import PathCollection\n'), ((5258, 5309), 'numpy.array', 'np.array', (['[l for l in last_points if l is not None]'], {}), '([l for l in last_points if l is not None])\n', (5266, 5309), True, 'import numpy as np\n'), ((5314, 5370), 'matplotlib.pyplot.plot', 'plt.plot', (['a_last_points[:, 0]', 'a_last_points[:, 1]', '"""r."""'], {}), "(a_last_points[:, 0], a_last_points[:, 1], 'r.')\n", (5322, 5370), True, 'from matplotlib import pyplot as plt\n'), ((5396, 5406), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5404, 5406), True, 'from matplotlib import pyplot as plt\n'), ((5507, 5519), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5517, 5519), True, 'from matplotlib import pyplot as plt\n'), ((6435, 6445), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6443, 6445), True, 'from matplotlib import pyplot as plt\n'), ((7167, 7225), 'matplotlib.pyplot.plot', 'plt.plot', (['graph.vertices[:, 0]', 'graph.vertices[:, 1]', '"""b."""'], {}), "(graph.vertices[:, 0], graph.vertices[:, 1], 'b.')\n", (7175, 7225), True, 'from matplotlib import pyplot as plt\n'), ((9303, 9337), 'math.ceil', 'math.ceil', (['(cells_needed / quad_num)'], {}), '(cells_needed / quad_num)\n', (9312, 9337), False, 'import math\n'), ((10610, 10656), 'tqdm.tqdm', 'tqdm.tqdm', (['jobs'], {'desc': '"""Calculate lower bounds"""'}), "(jobs, desc='Calculate lower bounds')\n", (10619, 10656), False, 'import tqdm\n'), ((16201, 16240), 'numpy.unique', 'np.unique', (['sol_type'], {'return_counts': '(True)'}), '(sol_type, return_counts=True)\n', (16210, 16240), True, 'import numpy as np\n'), ((16257, 16275), 'numpy.argmax', 'np.argmax', (['counter'], {}), '(counter)\n', (16266, 16275), True, 'import numpy as np\n'), ((1161, 1171), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1169, 1171), True, 'from matplotlib import pyplot as plt\n'), ((5939, 5971), 'matplotlib.pyplot.subplot', 'plt.subplot', (['row_num', 'col_num', 'i'], {}), '(row_num, col_num, i)\n', (5950, 5971), True, 'from matplotlib import pyplot as plt\n'), ((6554, 6567), 'numpy.dtype', 'np.dtype', (['"""O"""'], {}), "('O')\n", (6562, 6567), True, 'import numpy as np\n'), ((6594, 6631), 'numpy.array', 'np.array', (['[p for p in graph.vertices]'], {}), '([p for p in graph.vertices])\n', (6602, 6631), True, 'import numpy as np\n'), ((6669, 6776), 'matplotlib.pyplot.plot', 'plt.plot', (['graph.vertices[edge][:, 0]', 'graph.vertices[edge][:, 1]'], {'color': '"""black"""', 'marker': '""","""', 'alpha': '(0.3)'}), "(graph.vertices[edge][:, 0], graph.vertices[edge][:, 1], color=\n 'black', marker=',', alpha=0.3)\n", (6677, 6776), True, 'from matplotlib import pyplot as plt\n'), ((9210, 9233), 'math.sqrt', 'math.sqrt', (['cells_needed'], {}), '(cells_needed)\n', (9219, 9233), False, 'import math\n'), ((11533, 11576), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(2)', 'sharex': '(True)'}), '(nrows=2, ncols=2, sharex=True)\n', (11545, 11576), True, 'from matplotlib import pyplot as plt\n'), ((14051, 14061), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14059, 14061), True, 'from matplotlib import pyplot as plt\n'), ((19461, 19478), 'math.log10', 'math.log10', (['min_l'], {}), '(min_l)\n', (19471, 19478), False, 'import math\n'), ((19508, 19525), 'math.log10', 'math.log10', (['max_l'], {}), '(max_l)\n', (19518, 19525), False, 'import math\n'), ((859, 883), 'matplotlib.get_backend', 'matplotlib.get_backend', ([], {}), '()\n', (881, 883), False, 'import matplotlib\n'), ((896, 934), 'matplotlib.use', 'matplotlib.use', (['"""module://backend_ipe"""'], {}), "('module://backend_ipe')\n", (910, 934), False, 'import matplotlib\n'), ((979, 1020), 'matplotlib.pyplot.savefig', 'plt.savefig', (['savePath'], {'format': 'save_format'}), '(savePath, format=save_format)\n', (990, 1020), True, 'from matplotlib import pyplot as plt\n'), ((1033, 1060), 'matplotlib.use', 'matplotlib.use', (['old_backend'], {}), '(old_backend)\n', (1047, 1060), False, 'import matplotlib\n'), ((1099, 1120), 'matplotlib.pyplot.savefig', 'plt.savefig', (['savePath'], {}), '(savePath)\n', (1110, 1120), True, 'from matplotlib import pyplot as plt\n'), ((6849, 6895), 'numpy.array', 'np.array', (['[graph.vertices[i] for i in indices]'], {}), '([graph.vertices[i] for i in indices])\n', (6857, 6895), True, 'import numpy as np\n'), ((6908, 6946), 'matplotlib.pyplot.plot', 'plt.plot', (['edge[:, 0]', 'edge[:, 1]', '"""y-"""'], {}), "(edge[:, 0], edge[:, 1], 'y-')\n", (6916, 6946), True, 'from matplotlib import pyplot as plt\n'), ((7022, 7068), 'numpy.array', 'np.array', (['[graph.vertices[i] for i in indices]'], {}), '([graph.vertices[i] for i in indices])\n', (7030, 7068), True, 'import numpy as np\n'), ((7081, 7119), 'matplotlib.pyplot.plot', 'plt.plot', (['edge[:, 0]', 'edge[:, 1]', '"""r-"""'], {}), "(edge[:, 0], edge[:, 1], 'r-')\n", (7089, 7119), True, 'from matplotlib import pyplot as plt\n'), ((7428, 7478), 'matplotlib.pyplot.imread', 'plt.imread', (['"""utils/figures/world-1303628_1920.png"""'], {}), "('utils/figures/world-1303628_1920.png')\n", (7438, 7478), True, 'from matplotlib import pyplot as plt\n'), ((8299, 8366), 'matplotlib.pyplot.imread', 'plt.imread', (['"""utils/figures/720px-The_Earth_seen_from_Apollo_17.jpg"""'], {}), "('utils/figures/720px-The_Earth_seen_from_Apollo_17.jpg')\n", (8309, 8366), True, 'from matplotlib import pyplot as plt\n'), ((12920, 12963), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(2)', 'sharex': '(True)'}), '(nrows=1, ncols=2, sharex=True)\n', (12932, 12963), True, 'from matplotlib import pyplot as plt\n'), ((13999, 14015), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (14008, 14015), True, 'from matplotlib import pyplot as plt\n'), ((14129, 14153), 'matplotlib.get_backend', 'matplotlib.get_backend', ([], {}), '()\n', (14151, 14153), False, 'import matplotlib\n'), ((14166, 14204), 'matplotlib.use', 'matplotlib.use', (['"""module://backend_ipe"""'], {}), "('module://backend_ipe')\n", (14180, 14204), False, 'import matplotlib\n'), ((14249, 14286), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {'format': 'save_format'}), '(path, format=save_format)\n', (14260, 14286), True, 'from matplotlib import pyplot as plt\n'), ((14299, 14326), 'matplotlib.use', 'matplotlib.use', (['old_backend'], {}), '(old_backend)\n', (14313, 14326), False, 'import matplotlib\n'), ((14353, 14370), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {}), '(path)\n', (14364, 14370), True, 'from matplotlib import pyplot as plt\n'), ((19678, 19747), 'numpy.arange', 'np.arange', (['(1 * 10 ** min_10_base)', 'max_l'], {'step': '(0.2 * 10 ** min_10_base)'}), '(1 * 10 ** min_10_base, max_l, step=0.2 * 10 ** min_10_base)\n', (19687, 19747), True, 'import numpy as np\n'), ((19807, 19877), 'numpy.arange', 'np.arange', (['(1 * 10 ** min_10_base)', 'max_l'], {'step': '(0.25 * 10 ** min_10_base)'}), '(1 * 10 ** min_10_base, max_l, step=0.25 * 10 ** min_10_base)\n', (19816, 19877), True, 'import numpy as np\n'), ((3438, 3502), 'math.isclose', 'math.isclose', (['((angles[0] + diff) % 360)', 'angles[1]'], {'rel_tol': '(1e-05)'}), '((angles[0] + diff) % 360, angles[1], rel_tol=1e-05)\n', (3450, 3502), False, 'import math\n'), ((4172, 4189), 'matplotlib.path.Path.arc', 'Path.arc', (['*angles'], {}), '(*angles)\n', (4180, 4189), False, 'from matplotlib.path import Path\n'), ((11359, 11411), 'tqdm.tqdm', 'tqdm.tqdm', (['jobs'], {'desc': '"""Collect solution information"""'}), "(jobs, desc='Collect solution information')\n", (11368, 11411), False, 'import tqdm\n'), ((15693, 15725), 'math.ceil', 'math.ceil', (['(sol.makespan * 2 / 90)'], {}), '(sol.makespan * 2 / 90)\n', (15702, 15725), False, 'import math\n'), ((16111, 16149), 'tqdm.tqdm', 'tqdm.tqdm', (['jobs'], {'desc': '"""Load solutions"""'}), "(jobs, desc='Load solutions')\n", (16120, 16149), False, 'import tqdm\n'), ((2639, 2747), 'utils.get_angle', 'get_angle', (['graph.vertices[vertex_key]', '(graph.vertices[vertex_key] + [1, 0])', 'graph.vertices[other_vertex]'], {}), '(graph.vertices[vertex_key], graph.vertices[vertex_key] + [1, 0],\n graph.vertices[other_vertex])\n', (2648, 2747), False, 'from utils import Multidict, get_angle, get_lower_bounds\n'), ((14606, 14629), 'utils.get_lower_bounds', 'get_lower_bounds', (['graph'], {}), '(graph)\n', (14622, 14629), False, 'from utils import Multidict, get_angle, get_lower_bounds\n'), ((14849, 14872), 'utils.get_lower_bounds', 'get_lower_bounds', (['graph'], {}), '(graph)\n', (14865, 14872), False, 'from utils import Multidict, get_angle, get_lower_bounds\n'), ((15253, 15273), 'solver.coloring_solver.Coloring_CP_Solver', 'Coloring_CP_Solver', ([], {}), '()\n', (15271, 15273), False, 'from solver.coloring_solver import Coloring_CP_Solver\n'), ((15371, 15394), 'pyclustering.gcolor.dsatur.dsatur', 'dsatur', (['graph.ad_matrix'], {}), '(graph.ad_matrix)\n', (15377, 15394), False, 'from pyclustering.gcolor.dsatur import dsatur\n'), ((1905, 1976), 'numpy.linalg.norm', 'np.linalg.norm', (['(solution.graph.vertices[i] - solution.graph.vertices[j])'], {}), '(solution.graph.vertices[i] - solution.graph.vertices[j])\n', (1919, 1976), True, 'import numpy as np\n'), ((4004, 4026), 'matplotlib.transforms.Affine2D', 'mtransforms.Affine2D', ([], {}), '()\n', (4024, 4026), True, 'import matplotlib.transforms as mtransforms\n'), ((4660, 4691), 'matplotlib.path.Path', 'Path', (['[last_vertex, new_vertex]'], {}), '([last_vertex, new_vertex])\n', (4664, 4691), False, 'from matplotlib.path import Path\n'), ((15750, 15773), 'math.ceil', 'math.ceil', (['log_c_number'], {}), '(log_c_number)\n', (15759, 15773), False, 'import math\n'), ((2453, 2472), 'numpy.array', 'np.array', (['last_edge'], {}), '(last_edge)\n', (2461, 2472), True, 'import numpy as np\n'), ((2474, 2496), 'numpy.array', 'np.array', (['[vertex_key]'], {}), '([vertex_key])\n', (2482, 2496), True, 'import numpy as np\n'), ((2532, 2546), 'numpy.array', 'np.array', (['edge'], {}), '(edge)\n', (2540, 2546), True, 'import numpy as np\n'), ((2548, 2570), 'numpy.array', 'np.array', (['[vertex_key]'], {}), '([vertex_key])\n', (2556, 2570), True, 'import numpy as np\n')] |
import sys
sys.path.append("..")
import os
import json
import numpy as np
import pandas as pd
from tqdm import tqdm
import functools
import matplotlib.pyplot as plt
import tensorflow as tf
from dqn import molecules
from dqn import deep_q_networks
from dqn.py.SA_Score import sascorer
from chemutil import similarity
from rdkit import Chem, DataStructs
from rdkit.Chem import AllChem, Draw, Descriptors, QED
from tdc import Oracle
qed_oracle = Oracle(name = 'qed')
from tdc import Evaluator
diversity = Evaluator(name = 'Diversity')
import pyscreener
from tdc import Oracle
oracle2 = Oracle(name = 'Docking_Score', software='vina',
pyscreener_path = './',
receptors=['/project/molecular_data/graphnn/pyscreener/testing_inputs/DRD3.pdb'],
center=(9, 22.5, 26), size=(15, 15, 15),
buffer=10, path='./', num_worker=3, ncpu=8)
def get_file_step_number(filename):
return int(filename.split('-')[1].split('.')[0])
ckpt_folder = 'docking1'
input_smiles = 'CO'
result_folder = 'result'
step_list = [int(file.split('.')[0].split('-')[1]) for file in os.listdir(ckpt_folder) if file[:4]=='ckpt' and file[-4:] == 'meta']
file_list = [os.path.join(ckpt_folder, file[:-5]) for file in os.listdir(ckpt_folder) if file[:4]=='ckpt' and file[-4:] == 'meta']
def eval(model_file, input_smiles, epsilon = 0.1):
# hparams_file = os.path.join(model_dir, 'config.json')
hparams_file = "./configs/naive_dqn.json"
fh = open(hparams_file, 'r')
hp_dict = json.load(fh)
hparams = deep_q_networks.get_hparams(**hp_dict)
fh.close()
environment = molecules.Molecule(
atom_types=set(hparams.atom_types),
init_mol=input_smiles,
allow_removal=hparams.allow_removal,
allow_no_modification=hparams.allow_no_modification,
allowed_ring_sizes=set(hparams.allowed_ring_sizes),
allow_bonds_between_rings=hparams.allow_bonds_between_rings,
max_steps=hparams.max_steps_per_episode)
dqn = deep_q_networks.DeepQNetwork(
input_shape=(hparams.batch_size, hparams.fingerprint_length + 1),
q_fn=functools.partial(
deep_q_networks.multi_layer_model, hparams=hparams),
optimizer=hparams.optimizer,
grad_clipping=hparams.grad_clipping,
num_bootstrap_heads=hparams.num_bootstrap_heads,
gamma=hparams.gamma,
epsilon=0.0)
tf.reset_default_graph()
with tf.Session() as sess:
dqn.build()
model_saver = tf.train.Saver(max_to_keep=hparams.max_num_checkpoints)
model_saver.restore(sess, model_file)
smiles_lst = []
environment.initialize()
for step in range(hparams.max_steps_per_episode):
steps_left = hparams.max_steps_per_episode - environment.num_steps_taken
if hparams.num_bootstrap_heads:
head = np.random.randint(hparams.num_bootstrap_heads)
else:
head = 0
valid_actions = list(environment.get_valid_actions())
observations = np.vstack([np.append(deep_q_networks.get_fingerprint(act, hparams), steps_left)
for act in valid_actions])
action = valid_actions[dqn.get_action(
observations, head=head, update_epsilon=epsilon)]
result = environment.step(action)
return result.state
for file in tqdm(file_list):
smiles_lst = []
for i in range(100):
smiles = eval(file, input_smiles)
score = oracle2(smiles)
smiles_lst.append((smiles, score))
result_file = os.path.join(result_folder, file.split('/')[-1])
with open(result_file, 'w') as fout:
for smiles, score in smiles_lst:
fout.write(smiles + '\t' + str(score) + '\n')
| [
"os.listdir",
"tensorflow.reset_default_graph",
"dqn.deep_q_networks.get_hparams",
"tqdm.tqdm",
"os.path.join",
"tensorflow.Session",
"tensorflow.train.Saver",
"tdc.Evaluator",
"dqn.deep_q_networks.get_fingerprint",
"tdc.Oracle",
"numpy.random.randint",
"functools.partial",
"json.load",
"s... | [((11, 32), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (26, 32), False, 'import sys\n'), ((449, 467), 'tdc.Oracle', 'Oracle', ([], {'name': '"""qed"""'}), "(name='qed')\n", (455, 467), False, 'from tdc import Oracle\n'), ((508, 535), 'tdc.Evaluator', 'Evaluator', ([], {'name': '"""Diversity"""'}), "(name='Diversity')\n", (517, 535), False, 'from tdc import Evaluator\n'), ((592, 843), 'tdc.Oracle', 'Oracle', ([], {'name': '"""Docking_Score"""', 'software': '"""vina"""', 'pyscreener_path': '"""./"""', 'receptors': "['/project/molecular_data/graphnn/pyscreener/testing_inputs/DRD3.pdb']", 'center': '(9, 22.5, 26)', 'size': '(15, 15, 15)', 'buffer': '(10)', 'path': '"""./"""', 'num_worker': '(3)', 'ncpu': '(8)'}), "(name='Docking_Score', software='vina', pyscreener_path='./',\n receptors=[\n '/project/molecular_data/graphnn/pyscreener/testing_inputs/DRD3.pdb'],\n center=(9, 22.5, 26), size=(15, 15, 15), buffer=10, path='./',\n num_worker=3, ncpu=8)\n", (598, 843), False, 'from tdc import Oracle\n'), ((3287, 3302), 'tqdm.tqdm', 'tqdm', (['file_list'], {}), '(file_list)\n', (3291, 3302), False, 'from tqdm import tqdm\n'), ((1204, 1240), 'os.path.join', 'os.path.join', (['ckpt_folder', 'file[:-5]'], {}), '(ckpt_folder, file[:-5])\n', (1216, 1240), False, 'import os\n'), ((1522, 1535), 'json.load', 'json.load', (['fh'], {}), '(fh)\n', (1531, 1535), False, 'import json\n'), ((1548, 1586), 'dqn.deep_q_networks.get_hparams', 'deep_q_networks.get_hparams', ([], {}), '(**hp_dict)\n', (1575, 1586), False, 'from dqn import deep_q_networks\n'), ((2370, 2394), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (2392, 2394), True, 'import tensorflow as tf\n'), ((1122, 1145), 'os.listdir', 'os.listdir', (['ckpt_folder'], {}), '(ckpt_folder)\n', (1132, 1145), False, 'import os\n'), ((1253, 1276), 'os.listdir', 'os.listdir', (['ckpt_folder'], {}), '(ckpt_folder)\n', (1263, 1276), False, 'import os\n'), ((2402, 2414), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2412, 2414), True, 'import tensorflow as tf\n'), ((2459, 2514), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': 'hparams.max_num_checkpoints'}), '(max_to_keep=hparams.max_num_checkpoints)\n', (2473, 2514), True, 'import tensorflow as tf\n'), ((2104, 2173), 'functools.partial', 'functools.partial', (['deep_q_networks.multi_layer_model'], {'hparams': 'hparams'}), '(deep_q_networks.multi_layer_model, hparams=hparams)\n', (2121, 2173), False, 'import functools\n'), ((2802, 2848), 'numpy.random.randint', 'np.random.randint', (['hparams.num_bootstrap_heads'], {}), '(hparams.num_bootstrap_heads)\n', (2819, 2848), True, 'import numpy as np\n'), ((2980, 3025), 'dqn.deep_q_networks.get_fingerprint', 'deep_q_networks.get_fingerprint', (['act', 'hparams'], {}), '(act, hparams)\n', (3011, 3025), False, 'from dqn import deep_q_networks\n')] |
# -*- coding: utf-8 -*-
# Brief: The simple drawer used to draw figures easily.
# Author: Gong
# Date: 2020.09.10
#
from matplotlib import pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
def draw3d(func, inputs=None, input_range=None):
fig = plt.figure()
ax = Axes3D(fig)
if input_range is None:
input_range = ((-10, 10), (-10, 10))
if inputs is not None:
X, Y = inputs[0], inputs[1]
else:
X = np.linspace(input_range[0][0], input_range[0][1], 100)
Y = np.linspace(input_range[1][0], input_range[1][1], 100)
X, Y = np.meshgrid(X, Y)
Z = func(X, Y)
ax.plot_surface(X, Y, Z, cmap='rainbow')
fig.show()
def draw2d(func, inputs=None, input_range=None):
input_range = (-10, 10) if input_range is None else input_range
x = np.linspace(input_range[0], input_range[1], 100) if inputs is None else inputs
y = func(x)
fig, ax = plt.subplots()
ax.plot(x, y)
fig.show()
| [
"matplotlib.pyplot.figure",
"numpy.linspace",
"numpy.meshgrid",
"matplotlib.pyplot.subplots",
"mpl_toolkits.mplot3d.Axes3D"
] | [((283, 295), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (293, 295), True, 'from matplotlib import pyplot as plt\n'), ((305, 316), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {}), '(fig)\n', (311, 316), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((608, 625), 'numpy.meshgrid', 'np.meshgrid', (['X', 'Y'], {}), '(X, Y)\n', (619, 625), True, 'import numpy as np\n'), ((942, 956), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (954, 956), True, 'from matplotlib import pyplot as plt\n'), ((475, 529), 'numpy.linspace', 'np.linspace', (['input_range[0][0]', 'input_range[0][1]', '(100)'], {}), '(input_range[0][0], input_range[0][1], 100)\n', (486, 529), True, 'import numpy as np\n'), ((542, 596), 'numpy.linspace', 'np.linspace', (['input_range[1][0]', 'input_range[1][1]', '(100)'], {}), '(input_range[1][0], input_range[1][1], 100)\n', (553, 596), True, 'import numpy as np\n'), ((832, 880), 'numpy.linspace', 'np.linspace', (['input_range[0]', 'input_range[1]', '(100)'], {}), '(input_range[0], input_range[1], 100)\n', (843, 880), True, 'import numpy as np\n')] |
"""
Visualization for fig4
warning. terrible monkeypatched code!
"""
import matplotlib
from data.imsitu_loader import ImSitu, CudaDataLoader
from config import ModelConfig
import torch
from lib.misc import get_ranking
import numpy as np
from lib.imsitu_model import ImsituModel
import pandas as pd
from tqdm import tqdm
from lib.attribute_loss import AttributeLoss
from copy import deepcopy
from torch.autograd import Variable
from data.attribute_loader import Attributes
from config import ATTRIBUTES_SPLIT, ATTRIBUTES_PATH
from scipy.misc import imsave
import matplotlib.pyplot as plt
from PIL import Image
from subprocess import call
from torch.nn import functional as F
from lib.imsitu_model import dap_deploy, ours_deploy, devise_deploy, ours_logits
import pickle as pkl
def _gi(self, index):
fn, ind = self.examples[index]
img = self.transform(Image.open(fn).convert('RGB'))
return img, ind, fn
ImSitu.__getitem__ = _gi
train_data, val_data, test_data = ImSitu.splits(zeroshot=True, test_full=True)
def _load(self, item):
img = Variable(item[0], volatile=self.volatile)
label = Variable(item[1], volatile=self.volatile)
if torch.cuda.is_available():
img = img.cuda()
label = label.cuda()
return img, label, item[2]
CudaDataLoader._load = _load
def collate_fn(data):
imgs, labels, fns = zip(*data)
imgs = torch.stack(imgs, 0)
labels = torch.LongTensor(labels)
return imgs, labels, fns
test_iter = CudaDataLoader(
dataset=test_data,
batch_size=16,
shuffle=False,
num_workers=2,
collate_fn=collate_fn,
volatile=True,
)
att_crit = AttributeLoss(train_data.attributes.domains, size_average=True)
if torch.cuda.is_available():
test_data.attributes.cuda()
att_crit.cuda()
# Recommended hyperparameters
args = ModelConfig(lr=2e-5, batch_size=32, eps=1e-8,
imsitu_model='ours', l2_weight=1e-2,
use_att=True, use_emb=True, ckpt='imsitu_ours/embatt/ckpt_7.tar',
)
m = ImsituModel(
zeroshot=True,
embed_dim=300 if args.use_emb else None,
att_domains=att_crit.domains_per_att if args.use_att else None,
l2_weight=args.l2_weight,
)
m.load_state_dict(torch.load(args.ckpt)['m_state_dict'])
m.eval()
if torch.cuda.is_available():
test_data.attributes.cuda()
att_crit.cuda()
m.cuda()
def ours_logits(m, x):
res = m(x)
if (res.att_pred is not None) and (att_crit is None):
raise ValueError("Attribute learning incomplete")
embed_logits = res.embed_pred @ test_data.attributes.embeds.t()
# We need a matrix of [att_dim, labels] now with everything +1 or -1
att_mat = (-1) * torch.ones(att_crit.input_size, test_data.attributes.atts_matrix.size(0)).cuda()
start_col = 0
for gt_col, d_size in enumerate(att_crit.domains_per_att):
if d_size == 1:
att_mat[start_col] = test_data.attributes.atts_matrix[:, gt_col].data.float() * 2 - 1.0
else:
att_mat[start_col:(start_col + d_size)].scatter_(
0, test_data.attributes.atts_matrix[:, gt_col].data[None, :], 1.0)
start_col += d_size
att_mat = Variable(att_mat, volatile=True)
# For each attribute dot product between att + example and att + label matrix affinitys
att_aff = res.att_pred.t()[:,:,None]
affinities = torch.bmm(att_aff, att_mat[:,None,:]) # number of atts, batch size, labels
return embed_logits, affinities
# Don't take the mean until the end
datoms = []
for img_batch, label_batch, fns in tqdm(test_iter):
batch_inds = np.arange(img_batch.size(0))
gt_label = label_batch.data.cpu().numpy()
embed_contrib, att_contrib = ours_logits(m, img_batch)
preds = att_contrib.sum(0).squeeze() + embed_contrib
sm = F.softmax(preds).data.cpu().numpy()
pred_label = sm.max(1)
for i, (gt, pred, fn, s) in enumerate(zip(gt_label, pred_label, fns, sm)):
if i % 10 == 0:
print("labels gt {} fn {}".format(test_data.attributes.atts_df.index[gt], fn))
if gt == pred:
datoms.append((fn, gt, s))
elif np.random.rand() < 0.1:
datoms.append((fn, gt, s))
with open('cache.pkl', 'wb') as f:
pkl.dump((datoms, test_data.attributes.atts_df.index), f)
| [
"torch.nn.functional.softmax",
"PIL.Image.open",
"pickle.dump",
"lib.imsitu_model.ours_logits",
"numpy.random.rand",
"config.ModelConfig",
"lib.attribute_loss.AttributeLoss",
"torch.LongTensor",
"torch.load",
"tqdm.tqdm",
"torch.stack",
"data.imsitu_loader.ImSitu.splits",
"torch.cuda.is_avai... | [((976, 1020), 'data.imsitu_loader.ImSitu.splits', 'ImSitu.splits', ([], {'zeroshot': '(True)', 'test_full': '(True)'}), '(zeroshot=True, test_full=True)\n', (989, 1020), False, 'from data.imsitu_loader import ImSitu, CudaDataLoader\n'), ((1470, 1591), 'data.imsitu_loader.CudaDataLoader', 'CudaDataLoader', ([], {'dataset': 'test_data', 'batch_size': '(16)', 'shuffle': '(False)', 'num_workers': '(2)', 'collate_fn': 'collate_fn', 'volatile': '(True)'}), '(dataset=test_data, batch_size=16, shuffle=False, num_workers\n =2, collate_fn=collate_fn, volatile=True)\n', (1484, 1591), False, 'from data.imsitu_loader import ImSitu, CudaDataLoader\n'), ((1681, 1744), 'lib.attribute_loss.AttributeLoss', 'AttributeLoss', (['train_data.attributes.domains'], {'size_average': '(True)'}), '(train_data.attributes.domains, size_average=True)\n', (1694, 1744), False, 'from lib.attribute_loss import AttributeLoss\n'), ((1749, 1774), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1772, 1774), False, 'import torch\n'), ((1865, 2024), 'config.ModelConfig', 'ModelConfig', ([], {'lr': '(2e-05)', 'batch_size': '(32)', 'eps': '(1e-08)', 'imsitu_model': '"""ours"""', 'l2_weight': '(0.01)', 'use_att': '(True)', 'use_emb': '(True)', 'ckpt': '"""imsitu_ours/embatt/ckpt_7.tar"""'}), "(lr=2e-05, batch_size=32, eps=1e-08, imsitu_model='ours',\n l2_weight=0.01, use_att=True, use_emb=True, ckpt=\n 'imsitu_ours/embatt/ckpt_7.tar')\n", (1876, 2024), False, 'from config import ModelConfig\n'), ((2077, 2242), 'lib.imsitu_model.ImsituModel', 'ImsituModel', ([], {'zeroshot': '(True)', 'embed_dim': '(300 if args.use_emb else None)', 'att_domains': '(att_crit.domains_per_att if args.use_att else None)', 'l2_weight': 'args.l2_weight'}), '(zeroshot=True, embed_dim=300 if args.use_emb else None,\n att_domains=att_crit.domains_per_att if args.use_att else None,\n l2_weight=args.l2_weight)\n', (2088, 2242), False, 'from lib.imsitu_model import ImsituModel\n'), ((2323, 2348), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2346, 2348), False, 'import torch\n'), ((3601, 3616), 'tqdm.tqdm', 'tqdm', (['test_iter'], {}), '(test_iter)\n', (3605, 3616), False, 'from tqdm import tqdm\n'), ((1055, 1096), 'torch.autograd.Variable', 'Variable', (['item[0]'], {'volatile': 'self.volatile'}), '(item[0], volatile=self.volatile)\n', (1063, 1096), False, 'from torch.autograd import Variable\n'), ((1109, 1150), 'torch.autograd.Variable', 'Variable', (['item[1]'], {'volatile': 'self.volatile'}), '(item[1], volatile=self.volatile)\n', (1117, 1150), False, 'from torch.autograd import Variable\n'), ((1158, 1183), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1181, 1183), False, 'import torch\n'), ((1369, 1389), 'torch.stack', 'torch.stack', (['imgs', '(0)'], {}), '(imgs, 0)\n', (1380, 1389), False, 'import torch\n'), ((1403, 1427), 'torch.LongTensor', 'torch.LongTensor', (['labels'], {}), '(labels)\n', (1419, 1427), False, 'import torch\n'), ((3221, 3253), 'torch.autograd.Variable', 'Variable', (['att_mat'], {'volatile': '(True)'}), '(att_mat, volatile=True)\n', (3229, 3253), False, 'from torch.autograd import Variable\n'), ((3405, 3444), 'torch.bmm', 'torch.bmm', (['att_aff', 'att_mat[:, None, :]'], {}), '(att_aff, att_mat[:, None, :])\n', (3414, 3444), False, 'import torch\n'), ((3744, 3769), 'lib.imsitu_model.ours_logits', 'ours_logits', (['m', 'img_batch'], {}), '(m, img_batch)\n', (3755, 3769), False, 'from lib.imsitu_model import dap_deploy, ours_deploy, devise_deploy, ours_logits\n'), ((4273, 4330), 'pickle.dump', 'pkl.dump', (['(datoms, test_data.attributes.atts_df.index)', 'f'], {}), '((datoms, test_data.attributes.atts_df.index), f)\n', (4281, 4330), True, 'import pickle as pkl\n'), ((2272, 2293), 'torch.load', 'torch.load', (['args.ckpt'], {}), '(args.ckpt)\n', (2282, 2293), False, 'import torch\n'), ((860, 874), 'PIL.Image.open', 'Image.open', (['fn'], {}), '(fn)\n', (870, 874), False, 'from PIL import Image\n'), ((4171, 4187), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4185, 4187), True, 'import numpy as np\n'), ((3837, 3853), 'torch.nn.functional.softmax', 'F.softmax', (['preds'], {}), '(preds)\n', (3846, 3853), True, 'from torch.nn import functional as F\n')] |
import attr
from collections import namedtuple
from enum import IntFlag
import numpy as np # type: ignore
np.set_printoptions(suppress=True)
from colorama import Fore
JointType = IntFlag('JointType', 'revolute prismatic revolute_theda revolute_alpha')
mdh_params = namedtuple("mdh_params", "alpha a theta d ")
from math import pi
rad2deg = 180/pi
deg2rad = pi/180
@attr.s(slots=True)
class RevoluteLink:
"""
RevoluteLink about theta. All other parameters (alpha, a, d) are fixed and
cannot be changed once the link is created.
"""
_alpha = attr.ib()
_a = attr.ib()
theta = attr.ib()
_d = attr.ib()
min = attr.ib(default=-np.pi/2)
max = attr.ib(default=np.pi/2)
@max.validator
def max_check(self, attribute, value):
if self.min > value:
raise Exception()
@property
def alpha(self):
return self._alpha
@property
def a(self):
return self._a
@property
def d(self):
return self._d
#
# @property
# def type(self):
# return JointType.revolute
"""Some of these params are immutable, how do I do that?"""
def transform(self, angle):
"""could move this to link"""
crx = np.cos(self._alpha)
srx = np.sin(self._alpha)
crz = np.cos(angle)
srz = np.sin(angle)
d = self._d
a = self._a
# self.theda = angle
transform = np.array(
[
[crz, -srz, 0, a],
[crx * srz, crx * crz, -srx, -d * srx],
[srx * srz, crz * srx, crx, d * crx],
[0, 0, 0, 1],
],
dtype=np.float64,
)
return transform
def __str__(self):
saa = f"{Fore.CYAN}{self._alpha*rad2deg:4.1f}{Fore.RESET}"
st = f"{Fore.CYAN}{self.theta*rad2deg:4.1f}{Fore.RESET}"
sa = f"{Fore.YELLOW}{self._a:4.1f}{Fore.RESET}"
sd = f"{Fore.YELLOW}{self._d:4.1f}{Fore.RESET}"
return f"Rev[deg]: alpha: {saa} a: {sa} theta: {st} d: {sd}"
| [
"attr.s",
"collections.namedtuple",
"numpy.set_printoptions",
"enum.IntFlag",
"numpy.array",
"numpy.cos",
"numpy.sin",
"attr.ib"
] | [((106, 140), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)'}), '(suppress=True)\n', (125, 140), True, 'import numpy as np\n'), ((181, 253), 'enum.IntFlag', 'IntFlag', (['"""JointType"""', '"""revolute prismatic revolute_theda revolute_alpha"""'], {}), "('JointType', 'revolute prismatic revolute_theda revolute_alpha')\n", (188, 253), False, 'from enum import IntFlag\n'), ((267, 311), 'collections.namedtuple', 'namedtuple', (['"""mdh_params"""', '"""alpha a theta d """'], {}), "('mdh_params', 'alpha a theta d ')\n", (277, 311), False, 'from collections import namedtuple\n'), ((369, 387), 'attr.s', 'attr.s', ([], {'slots': '(True)'}), '(slots=True)\n', (375, 387), False, 'import attr\n'), ((564, 573), 'attr.ib', 'attr.ib', ([], {}), '()\n', (571, 573), False, 'import attr\n'), ((583, 592), 'attr.ib', 'attr.ib', ([], {}), '()\n', (590, 592), False, 'import attr\n'), ((605, 614), 'attr.ib', 'attr.ib', ([], {}), '()\n', (612, 614), False, 'import attr\n'), ((624, 633), 'attr.ib', 'attr.ib', ([], {}), '()\n', (631, 633), False, 'import attr\n'), ((644, 671), 'attr.ib', 'attr.ib', ([], {'default': '(-np.pi / 2)'}), '(default=-np.pi / 2)\n', (651, 671), False, 'import attr\n'), ((680, 706), 'attr.ib', 'attr.ib', ([], {'default': '(np.pi / 2)'}), '(default=np.pi / 2)\n', (687, 706), False, 'import attr\n'), ((1230, 1249), 'numpy.cos', 'np.cos', (['self._alpha'], {}), '(self._alpha)\n', (1236, 1249), True, 'import numpy as np\n'), ((1264, 1283), 'numpy.sin', 'np.sin', (['self._alpha'], {}), '(self._alpha)\n', (1270, 1283), True, 'import numpy as np\n'), ((1298, 1311), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (1304, 1311), True, 'import numpy as np\n'), ((1326, 1339), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (1332, 1339), True, 'import numpy as np\n'), ((1431, 1574), 'numpy.array', 'np.array', (['[[crz, -srz, 0, a], [crx * srz, crx * crz, -srx, -d * srx], [srx * srz, crz *\n srx, crx, d * crx], [0, 0, 0, 1]]'], {'dtype': 'np.float64'}), '([[crz, -srz, 0, a], [crx * srz, crx * crz, -srx, -d * srx], [srx *\n srz, crz * srx, crx, d * crx], [0, 0, 0, 1]], dtype=np.float64)\n', (1439, 1574), True, 'import numpy as np\n')] |
"""
This module produces the outputs/plots.
<NAME>, spring/2013
"""
import pylab
import numpy
from matplotlib import pyplot
def plotPhaseSpace( b, aTheta, aOmega, t, power, k):
pylab.clf()
pylab.cla()
label = str(b)
pylab.subplot(221)
pylab.plot( aTheta, aOmega, color="m", lw = 2)
pylab.xlabel(r"$\theta$ (radians) ", fontsize=10)
pylab.ylabel('$\omega$ (radians/seconds)', fontsize=10)
pylab.grid(True)
pylab.subplot(222)
pylab.plot( t, aTheta, color="g", lw = 2)
pylab.ylabel(r"$\theta$ (radians)", fontsize=10)
pylab.xlabel('t (seconds)', fontsize=10)
pylab.grid(True)
pylab.subplot(223)
pyplot.grid(True)
pyplot.plot(k, power, color="c", lw = 2)
pyplot.ylabel("|F(k)$|^{2}$", fontsize=10)
pyplot.xlabel(r"$\nu_k$ ($s^{-1}$)", fontsize=10)
pylab.subplot(224)
pyplot.yscale('log')
pyplot.plot(2.0*numpy.pi*k, power, color="b", lw = 1)
pylab.xlim(0,6)
pyplot.grid(True)
pyplot.xlabel(r"$\nu_k$ ($s^{-1}$)", fontsize=10)
pyplot.ylabel("log |F(k)$|^{2}$", fontsize=10)
pylab.savefig("plots/b-%s_phase_space.png" % (label) )
return 0
def plotDFT( b, bDFT, k, bDFT_inv, aTheta, t):
pylab.clf()
pylab.cla()
label = str(b)
ymax = max( bDFT.real )
imax = numpy.where(bDFT.real == ymax )[0]
xmax = k[imax]
pylab.subplot(221)
pylab.plot(t, aTheta.real, color="g", lw = 2)
pylab.ylabel( r"$\theta$ (t)", fontsize=10)
pylab.xlabel('t (seconds)', fontsize=10)
pylab.grid(True)
pylab.subplot(222)
pylab.annotate("Frequency has the most power", xy=(xmax, ymax), xycoords='data', xytext=(+10, +30), textcoords='offset points', fontsize=10, arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=.2"))
pylab.plot(k, bDFT.real, color="r", lw = 2, label="Real F(k)")
pylab.plot(k, bDFT.imag, color="b", lw = 1, label="Imaginary F(k)")
leg = pylab.legend(loc=4,labelspacing=0.0005)
ltext = leg.get_texts()
pylab.setp(ltext, fontsize='small')
leg.draw_frame(0)
pylab.ylabel("F(k)", fontsize=10)
pylab.xlabel(r"$\nu_k$ ($s^{-1}$)", fontsize=10)
pylab.grid(True)
pylab.subplot(223)
pylab.plot(k, abs(bDFT.real), color="r", lw = 2)
pylab.xlabel(r"$\nu_k$ ($s^{-1}$)", fontsize=10)
pylab.ylabel("|F(k)|", fontsize=10)
pylab.grid(True)
pylab.subplot(224)
pylab.plot(t, bDFT_inv.real, color="y", lw = 2)
pylab.ylabel("Inverse F(k)", fontsize=10)
pylab.xlabel('t (seconds)', fontsize=10)
pylab.grid(True)
pylab.savefig("plots/b-%s_dft.png" % (label) )
return 0
| [
"pylab.ylabel",
"matplotlib.pyplot.grid",
"pylab.subplot",
"matplotlib.pyplot.ylabel",
"pylab.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"pylab.xlabel",
"pylab.grid",
"pylab.savefig",
"pylab.legend",
"pylab.setp",
"numpy.where",
"pylab.xlim",
"pylab.cla",
"matplotlib.... | [((198, 209), 'pylab.clf', 'pylab.clf', ([], {}), '()\n', (207, 209), False, 'import pylab\n'), ((214, 225), 'pylab.cla', 'pylab.cla', ([], {}), '()\n', (223, 225), False, 'import pylab\n'), ((255, 273), 'pylab.subplot', 'pylab.subplot', (['(221)'], {}), '(221)\n', (268, 273), False, 'import pylab\n'), ((280, 323), 'pylab.plot', 'pylab.plot', (['aTheta', 'aOmega'], {'color': '"""m"""', 'lw': '(2)'}), "(aTheta, aOmega, color='m', lw=2)\n", (290, 323), False, 'import pylab\n'), ((331, 380), 'pylab.xlabel', 'pylab.xlabel', (['"""$\\\\theta$ (radians) """'], {'fontsize': '(10)'}), "('$\\\\theta$ (radians) ', fontsize=10)\n", (343, 380), False, 'import pylab\n'), ((385, 441), 'pylab.ylabel', 'pylab.ylabel', (['"""$\\\\omega$ (radians/seconds)"""'], {'fontsize': '(10)'}), "('$\\\\omega$ (radians/seconds)', fontsize=10)\n", (397, 441), False, 'import pylab\n'), ((445, 461), 'pylab.grid', 'pylab.grid', (['(True)'], {}), '(True)\n', (455, 461), False, 'import pylab\n'), ((471, 489), 'pylab.subplot', 'pylab.subplot', (['(222)'], {}), '(222)\n', (484, 489), False, 'import pylab\n'), ((494, 532), 'pylab.plot', 'pylab.plot', (['t', 'aTheta'], {'color': '"""g"""', 'lw': '(2)'}), "(t, aTheta, color='g', lw=2)\n", (504, 532), False, 'import pylab\n'), ((541, 589), 'pylab.ylabel', 'pylab.ylabel', (['"""$\\\\theta$ (radians)"""'], {'fontsize': '(10)'}), "('$\\\\theta$ (radians)', fontsize=10)\n", (553, 589), False, 'import pylab\n'), ((594, 634), 'pylab.xlabel', 'pylab.xlabel', (['"""t (seconds)"""'], {'fontsize': '(10)'}), "('t (seconds)', fontsize=10)\n", (606, 634), False, 'import pylab\n'), ((639, 655), 'pylab.grid', 'pylab.grid', (['(True)'], {}), '(True)\n', (649, 655), False, 'import pylab\n'), ((661, 679), 'pylab.subplot', 'pylab.subplot', (['(223)'], {}), '(223)\n', (674, 679), False, 'import pylab\n'), ((684, 701), 'matplotlib.pyplot.grid', 'pyplot.grid', (['(True)'], {}), '(True)\n', (695, 701), False, 'from matplotlib import pyplot\n'), ((706, 744), 'matplotlib.pyplot.plot', 'pyplot.plot', (['k', 'power'], {'color': '"""c"""', 'lw': '(2)'}), "(k, power, color='c', lw=2)\n", (717, 744), False, 'from matplotlib import pyplot\n'), ((751, 793), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (['"""|F(k)$|^{2}$"""'], {'fontsize': '(10)'}), "('|F(k)$|^{2}$', fontsize=10)\n", (764, 793), False, 'from matplotlib import pyplot\n'), ((798, 847), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['"""$\\\\nu_k$ ($s^{-1}$)"""'], {'fontsize': '(10)'}), "('$\\\\nu_k$ ($s^{-1}$)', fontsize=10)\n", (811, 847), False, 'from matplotlib import pyplot\n'), ((857, 875), 'pylab.subplot', 'pylab.subplot', (['(224)'], {}), '(224)\n', (870, 875), False, 'import pylab\n'), ((880, 900), 'matplotlib.pyplot.yscale', 'pyplot.yscale', (['"""log"""'], {}), "('log')\n", (893, 900), False, 'from matplotlib import pyplot\n'), ((905, 960), 'matplotlib.pyplot.plot', 'pyplot.plot', (['(2.0 * numpy.pi * k)', 'power'], {'color': '"""b"""', 'lw': '(1)'}), "(2.0 * numpy.pi * k, power, color='b', lw=1)\n", (916, 960), False, 'from matplotlib import pyplot\n'), ((963, 979), 'pylab.xlim', 'pylab.xlim', (['(0)', '(6)'], {}), '(0, 6)\n', (973, 979), False, 'import pylab\n'), ((983, 1000), 'matplotlib.pyplot.grid', 'pyplot.grid', (['(True)'], {}), '(True)\n', (994, 1000), False, 'from matplotlib import pyplot\n'), ((1005, 1054), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['"""$\\\\nu_k$ ($s^{-1}$)"""'], {'fontsize': '(10)'}), "('$\\\\nu_k$ ($s^{-1}$)', fontsize=10)\n", (1018, 1054), False, 'from matplotlib import pyplot\n'), ((1059, 1105), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (['"""log |F(k)$|^{2}$"""'], {'fontsize': '(10)'}), "('log |F(k)$|^{2}$', fontsize=10)\n", (1072, 1105), False, 'from matplotlib import pyplot\n'), ((1116, 1167), 'pylab.savefig', 'pylab.savefig', (["('plots/b-%s_phase_space.png' % label)"], {}), "('plots/b-%s_phase_space.png' % label)\n", (1129, 1167), False, 'import pylab\n'), ((1243, 1254), 'pylab.clf', 'pylab.clf', ([], {}), '()\n', (1252, 1254), False, 'import pylab\n'), ((1259, 1270), 'pylab.cla', 'pylab.cla', ([], {}), '()\n', (1268, 1270), False, 'import pylab\n'), ((1390, 1408), 'pylab.subplot', 'pylab.subplot', (['(221)'], {}), '(221)\n', (1403, 1408), False, 'import pylab\n'), ((1413, 1456), 'pylab.plot', 'pylab.plot', (['t', 'aTheta.real'], {'color': '"""g"""', 'lw': '(2)'}), "(t, aTheta.real, color='g', lw=2)\n", (1423, 1456), False, 'import pylab\n'), ((1464, 1506), 'pylab.ylabel', 'pylab.ylabel', (['"""$\\\\theta$ (t)"""'], {'fontsize': '(10)'}), "('$\\\\theta$ (t)', fontsize=10)\n", (1476, 1506), False, 'import pylab\n'), ((1512, 1552), 'pylab.xlabel', 'pylab.xlabel', (['"""t (seconds)"""'], {'fontsize': '(10)'}), "('t (seconds)', fontsize=10)\n", (1524, 1552), False, 'import pylab\n'), ((1557, 1573), 'pylab.grid', 'pylab.grid', (['(True)'], {}), '(True)\n', (1567, 1573), False, 'import pylab\n'), ((1583, 1601), 'pylab.subplot', 'pylab.subplot', (['(222)'], {}), '(222)\n', (1596, 1601), False, 'import pylab\n'), ((1816, 1876), 'pylab.plot', 'pylab.plot', (['k', 'bDFT.real'], {'color': '"""r"""', 'lw': '(2)', 'label': '"""Real F(k)"""'}), "(k, bDFT.real, color='r', lw=2, label='Real F(k)')\n", (1826, 1876), False, 'import pylab\n'), ((1884, 1949), 'pylab.plot', 'pylab.plot', (['k', 'bDFT.imag'], {'color': '"""b"""', 'lw': '(1)', 'label': '"""Imaginary F(k)"""'}), "(k, bDFT.imag, color='b', lw=1, label='Imaginary F(k)')\n", (1894, 1949), False, 'import pylab\n'), ((1963, 2003), 'pylab.legend', 'pylab.legend', ([], {'loc': '(4)', 'labelspacing': '(0.0005)'}), '(loc=4, labelspacing=0.0005)\n', (1975, 2003), False, 'import pylab\n'), ((2035, 2070), 'pylab.setp', 'pylab.setp', (['ltext'], {'fontsize': '"""small"""'}), "(ltext, fontsize='small')\n", (2045, 2070), False, 'import pylab\n'), ((2097, 2130), 'pylab.ylabel', 'pylab.ylabel', (['"""F(k)"""'], {'fontsize': '(10)'}), "('F(k)', fontsize=10)\n", (2109, 2130), False, 'import pylab\n'), ((2135, 2183), 'pylab.xlabel', 'pylab.xlabel', (['"""$\\\\nu_k$ ($s^{-1}$)"""'], {'fontsize': '(10)'}), "('$\\\\nu_k$ ($s^{-1}$)', fontsize=10)\n", (2147, 2183), False, 'import pylab\n'), ((2188, 2204), 'pylab.grid', 'pylab.grid', (['(True)'], {}), '(True)\n', (2198, 2204), False, 'import pylab\n'), ((2214, 2232), 'pylab.subplot', 'pylab.subplot', (['(223)'], {}), '(223)\n', (2227, 2232), False, 'import pylab\n'), ((2291, 2339), 'pylab.xlabel', 'pylab.xlabel', (['"""$\\\\nu_k$ ($s^{-1}$)"""'], {'fontsize': '(10)'}), "('$\\\\nu_k$ ($s^{-1}$)', fontsize=10)\n", (2303, 2339), False, 'import pylab\n'), ((2344, 2379), 'pylab.ylabel', 'pylab.ylabel', (['"""|F(k)|"""'], {'fontsize': '(10)'}), "('|F(k)|', fontsize=10)\n", (2356, 2379), False, 'import pylab\n'), ((2384, 2400), 'pylab.grid', 'pylab.grid', (['(True)'], {}), '(True)\n', (2394, 2400), False, 'import pylab\n'), ((2410, 2428), 'pylab.subplot', 'pylab.subplot', (['(224)'], {}), '(224)\n', (2423, 2428), False, 'import pylab\n'), ((2433, 2478), 'pylab.plot', 'pylab.plot', (['t', 'bDFT_inv.real'], {'color': '"""y"""', 'lw': '(2)'}), "(t, bDFT_inv.real, color='y', lw=2)\n", (2443, 2478), False, 'import pylab\n'), ((2486, 2527), 'pylab.ylabel', 'pylab.ylabel', (['"""Inverse F(k)"""'], {'fontsize': '(10)'}), "('Inverse F(k)', fontsize=10)\n", (2498, 2527), False, 'import pylab\n'), ((2532, 2572), 'pylab.xlabel', 'pylab.xlabel', (['"""t (seconds)"""'], {'fontsize': '(10)'}), "('t (seconds)', fontsize=10)\n", (2544, 2572), False, 'import pylab\n'), ((2577, 2593), 'pylab.grid', 'pylab.grid', (['(True)'], {}), '(True)\n', (2587, 2593), False, 'import pylab\n'), ((2599, 2642), 'pylab.savefig', 'pylab.savefig', (["('plots/b-%s_dft.png' % label)"], {}), "('plots/b-%s_dft.png' % label)\n", (2612, 2642), False, 'import pylab\n'), ((1331, 1361), 'numpy.where', 'numpy.where', (['(bDFT.real == ymax)'], {}), '(bDFT.real == ymax)\n', (1342, 1361), False, 'import numpy\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = '<NAME>'
__doc__ = r'''
Removes all fourier coefs below threshold in frequency space
Created on 07/04/2020
'''
import numpy
from matplotlib import pyplot
from neodroidaudition.regression.spectral_denoise import fft_denoise
if __name__ == '__main__':
def main():
"""
"""
delta = 0.001
time_ = numpy.arange(0, 1, delta)
time_steps = len(time_)
#
signal = numpy.sin(2 * numpy.pi * 50 * time_) + numpy.sin(2 * numpy.pi * 120 * time_)
noisy_signal = signal + 2.5 * numpy.random.randn(time_steps)
cleaned_signal = fft_denoise(noisy_signal, time_steps)
#
fig, axs = pyplot.subplots(4, 1)
frequencies = (1 / (delta * time_steps)) * numpy.arange(time_steps)
L = numpy.arange(1, numpy.floor(time_steps / 2), dtype=numpy.int)
#
pyplot.sca(axs[0])
pyplot.plot(time_, noisy_signal, color='c', LineWidth=1.5, label='Noisy')
pyplot.plot(time_, signal, color='k', LineWidth=2, label='Clean')
pyplot.xlim(time_[0], time_[-1])
pyplot.ylabel('Amplitude')
pyplot.xlabel('Time')
pyplot.legend()
#
'''
psd_cleaned = power_spectral_density * indices
cleaned_f_coef = noisy_signal_f_coef * indices
pyplot.sca(axs[1])
pyplot.plot(frequencies[L], power_spectral_density[L], color='c', LineWidth=2, label='Noisy')
pyplot.xlim(frequencies[L[0]], frequencies[L[-1]])
pyplot.ylabel('PSD')
pyplot.xlabel('Frequency')
pyplot.legend()
#
pyplot.sca(axs[2])
pyplot.plot(frequencies[L], psd_cleaned[L], color='c', LineWidth=2, label='Cleaned')
pyplot.xlim(frequencies[L[0]], frequencies[L[-1]])
pyplot.ylabel('PSD')
pyplot.xlabel('Frequency')
pyplot.legend()
'''
#
pyplot.sca(axs[3])
pyplot.plot(time_, noisy_signal, color='g', LineWidth=1, label='Noisy')
pyplot.plot(time_, cleaned_signal, color='c', LineWidth=3, label='Cleaned')
pyplot.plot(time_, signal, color='k', LineWidth=2, label='Clean')
pyplot.xlim(time_[0], time_[-1])
pyplot.ylabel('Amplitude')
pyplot.xlabel('Time')
pyplot.legend()
#
pyplot.show()
main()
| [
"matplotlib.pyplot.ylabel",
"neodroidaudition.regression.spectral_denoise.fft_denoise",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.sca",
"numpy.floor",
"numpy.sin",
"matplotlib.pyplot.xlim",
"numpy.random.randn",
"matplotlib.pyplot.subplo... | [((411, 436), 'numpy.arange', 'numpy.arange', (['(0)', '(1)', 'delta'], {}), '(0, 1, delta)\n', (423, 436), False, 'import numpy\n'), ((649, 686), 'neodroidaudition.regression.spectral_denoise.fft_denoise', 'fft_denoise', (['noisy_signal', 'time_steps'], {}), '(noisy_signal, time_steps)\n', (660, 686), False, 'from neodroidaudition.regression.spectral_denoise import fft_denoise\n'), ((708, 729), 'matplotlib.pyplot.subplots', 'pyplot.subplots', (['(4)', '(1)'], {}), '(4, 1)\n', (723, 729), False, 'from matplotlib import pyplot\n'), ((883, 901), 'matplotlib.pyplot.sca', 'pyplot.sca', (['axs[0]'], {}), '(axs[0])\n', (893, 901), False, 'from matplotlib import pyplot\n'), ((906, 979), 'matplotlib.pyplot.plot', 'pyplot.plot', (['time_', 'noisy_signal'], {'color': '"""c"""', 'LineWidth': '(1.5)', 'label': '"""Noisy"""'}), "(time_, noisy_signal, color='c', LineWidth=1.5, label='Noisy')\n", (917, 979), False, 'from matplotlib import pyplot\n'), ((984, 1049), 'matplotlib.pyplot.plot', 'pyplot.plot', (['time_', 'signal'], {'color': '"""k"""', 'LineWidth': '(2)', 'label': '"""Clean"""'}), "(time_, signal, color='k', LineWidth=2, label='Clean')\n", (995, 1049), False, 'from matplotlib import pyplot\n'), ((1054, 1086), 'matplotlib.pyplot.xlim', 'pyplot.xlim', (['time_[0]', 'time_[-1]'], {}), '(time_[0], time_[-1])\n', (1065, 1086), False, 'from matplotlib import pyplot\n'), ((1091, 1117), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (['"""Amplitude"""'], {}), "('Amplitude')\n", (1104, 1117), False, 'from matplotlib import pyplot\n'), ((1122, 1143), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['"""Time"""'], {}), "('Time')\n", (1135, 1143), False, 'from matplotlib import pyplot\n'), ((1148, 1163), 'matplotlib.pyplot.legend', 'pyplot.legend', ([], {}), '()\n', (1161, 1163), False, 'from matplotlib import pyplot\n'), ((1808, 1826), 'matplotlib.pyplot.sca', 'pyplot.sca', (['axs[3]'], {}), '(axs[3])\n', (1818, 1826), False, 'from matplotlib import pyplot\n'), ((1831, 1902), 'matplotlib.pyplot.plot', 'pyplot.plot', (['time_', 'noisy_signal'], {'color': '"""g"""', 'LineWidth': '(1)', 'label': '"""Noisy"""'}), "(time_, noisy_signal, color='g', LineWidth=1, label='Noisy')\n", (1842, 1902), False, 'from matplotlib import pyplot\n'), ((1907, 1982), 'matplotlib.pyplot.plot', 'pyplot.plot', (['time_', 'cleaned_signal'], {'color': '"""c"""', 'LineWidth': '(3)', 'label': '"""Cleaned"""'}), "(time_, cleaned_signal, color='c', LineWidth=3, label='Cleaned')\n", (1918, 1982), False, 'from matplotlib import pyplot\n'), ((1987, 2052), 'matplotlib.pyplot.plot', 'pyplot.plot', (['time_', 'signal'], {'color': '"""k"""', 'LineWidth': '(2)', 'label': '"""Clean"""'}), "(time_, signal, color='k', LineWidth=2, label='Clean')\n", (1998, 2052), False, 'from matplotlib import pyplot\n'), ((2057, 2089), 'matplotlib.pyplot.xlim', 'pyplot.xlim', (['time_[0]', 'time_[-1]'], {}), '(time_[0], time_[-1])\n', (2068, 2089), False, 'from matplotlib import pyplot\n'), ((2094, 2120), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (['"""Amplitude"""'], {}), "('Amplitude')\n", (2107, 2120), False, 'from matplotlib import pyplot\n'), ((2125, 2146), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['"""Time"""'], {}), "('Time')\n", (2138, 2146), False, 'from matplotlib import pyplot\n'), ((2151, 2166), 'matplotlib.pyplot.legend', 'pyplot.legend', ([], {}), '()\n', (2164, 2166), False, 'from matplotlib import pyplot\n'), ((2177, 2190), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (2188, 2190), False, 'from matplotlib import pyplot\n'), ((485, 521), 'numpy.sin', 'numpy.sin', (['(2 * numpy.pi * 50 * time_)'], {}), '(2 * numpy.pi * 50 * time_)\n', (494, 521), False, 'import numpy\n'), ((524, 561), 'numpy.sin', 'numpy.sin', (['(2 * numpy.pi * 120 * time_)'], {}), '(2 * numpy.pi * 120 * time_)\n', (533, 561), False, 'import numpy\n'), ((777, 801), 'numpy.arange', 'numpy.arange', (['time_steps'], {}), '(time_steps)\n', (789, 801), False, 'import numpy\n'), ((826, 853), 'numpy.floor', 'numpy.floor', (['(time_steps / 2)'], {}), '(time_steps / 2)\n', (837, 853), False, 'import numpy\n'), ((596, 626), 'numpy.random.randn', 'numpy.random.randn', (['time_steps'], {}), '(time_steps)\n', (614, 626), False, 'import numpy\n')] |
"""
Module for computing mask defining great circle arc between two endpoints
Specifically for stereographic projection!
"""
import warnings
import numpy as np
import xarray as xr
try:
from ecco_v4_py import scalar_calc
except ImportError:
pass
# -------------------------------------------------------------------------------
# Main function to compute section masks
# -------------------------------------------------------------------------------
def get_section_line_masks(pt1, pt2, cds, grid):
"""Compute 2D mask with 1's along great circle line
from lat/lon1 -> lat/lon2
Parameters
----------
pt1, pt2 : tuple or list with 2 floats
[longitude, latitude] or (longitude, latitude) of endpoints
cds : xarray Dataset
containing grid coordinate information, at least XC, YC
grid : xgcm grid object
Returns
-------
section_mask : xarray DataArray
2D mask along section
"""
# Get cartesian coordinates of end points
x1, y1, z1 = _convert_stereo_to_cartesian(pt1[0],pt1[1])
x2, y2, z2 = _convert_stereo_to_cartesian(pt2[0],pt2[1])
# Compute rotation matrices
# 1. Rotate around x-axis to put first point at z = 0
theta_1 = np.arctan2(-z1, y1)
rot_1 = np.vstack(( [1, 0, 0],
[0, np.cos(theta_1),-np.sin(theta_1)],
[0, np.sin(theta_1), np.cos(theta_1)]))
x1, y1, z1 = _apply_rotation_matrix(rot_1, (x1,y1,z1))
x2, y2, z2 = _apply_rotation_matrix(rot_1, (x2,y2,z2))
# 2. Rotate around z-axis to put first point at y = 0
theta_2 = np.arctan2(x1,y1)
rot_2 = np.vstack(( [np.cos(theta_2),-np.sin(theta_2), 0],
[np.sin(theta_2), np.cos(theta_2), 0],
[0, 0, 1]))
x1, y1, z1 = _apply_rotation_matrix(rot_2, (x1,y1,z1))
x2, y2, z2 = _apply_rotation_matrix(rot_2, (x2,y2,z2))
# 3. Rotate around y-axis to put second point at z = 0
theta_3 = np.arctan2(-z2, -x2)
rot_3 = np.vstack(( [ np.cos(theta_3), 0, np.sin(theta_3)],
[ 0, 1, 0],
[-np.sin(theta_3), 0, np.cos(theta_3)]))
x1, y1, z1 = _apply_rotation_matrix(rot_3, (x1,y1,z1))
x2, y2, z2 = _apply_rotation_matrix(rot_3, (x2,y2,z2))
# Now apply rotations to the grid
# and get cartesian coordinates at cell centers
xc, yc, zc = _rotate_the_grid(cds.XC, cds.YC, rot_1, rot_2, rot_3)
# Interpolate for x,y to west and south edges
xw = grid.interp(xc, 'X', boundary='fill')
yw = grid.interp(yc, 'X', boundary='fill')
xs = grid.interp(xc, 'Y', boundary='fill')
ys = grid.interp(yc, 'Y', boundary='fill')
# Compute the great circle mask, covering the entire globe
maskC = scalar_calc.get_edge_mask(zc>0,grid)
maskW = grid.diff( 1*(zc>0), 'X', boundary='fill')
maskS = grid.diff( 1*(zc>0), 'Y', boundary='fill')
# Get section of mask pt1 -> pt2 only
maskC = _calc_section_along_full_arc_mask(maskC, x1, y1, x2, y2, xc, yc)
maskW = _calc_section_along_full_arc_mask(maskW, x1, y1, x2, y2, xw, yw)
maskS = _calc_section_along_full_arc_mask(maskS, x1, y1, x2, y2, xs, ys)
return maskC, maskW, maskS
# -------------------------------------------------------------------------------
#
# All functions below are non-user facing
#
# -------------------------------------------------------------------------------
# Helper functions for computing section masks
# -------------------------------------------------------------------------------
def _calc_section_along_full_arc_mask( mask, x1, y1, x2, y2, xg, yg ):
"""Given a mask which has a great circle passing through
pt1 = (x1, y1) and pt2 = (x2,y2), grab the section just connecting pt1 and pt2
Parameters
----------
mask : xarray DataArray
2D LLC mask with 1's along great circle across globe, crossing pt1 and pt2
x1,y1,x2,y2 : scalars
cartesian coordinates of rotated pt1 and pt2. Note that z1 = z2 = 0
xg, yg : xarray DataArray
cartesian coordinates of the rotated horizontal grid
Returns
-------
mask : xarray DataArray
mask with great arc passing from pt1 -> pt2
"""
theta_1 = np.arctan2(y1,x1)
theta_2 = np.arctan2(y2,x2)
theta_g = np.arctan2(yg,xg)
if theta_2 < 0:
theta_g = theta_g.where( theta_g > theta_2, theta_g + 2*np.pi )
theta_2 = theta_2 + 2 * np.pi
if (theta_2 - theta_1) <= np.pi:
mask = mask.where( (theta_g <= theta_2) & (theta_g >= theta_1), 0)
else:
mask = mask.where( (theta_g > theta_2) | (theta_g < theta_1), 0)
return mask
def _rotate_the_grid(sx, sy, rot_1, rot_2, rot_3):
"""Rotate the horizontal grid at lon, lat, via rotation matrices rot_1/2/3
Parameters
----------
sx, sy : xarray DataArray
giving longitude, latitude in degrees of LLC horizontal grid
rot_1, rot_2, rot_3 : np.ndarray
rotation matrices
Returns
-------
xg, yg, zg : xarray DataArray
cartesian coordinates of the horizontal grid
"""
# Get cartesian of 1D view of lat/lon
sx_v = sx.values.ravel()
sy_v = sy.values.ravel()
if len(sx_v) != len(sy_v):
sx_v,sy_v = np.meshgrid(sx_v,sy_v)
sx_v = sx_v.ravel()
sy_v = sy_v.ravel()
xg, yg, zg = _convert_stereo_to_cartesian(sx_v,sy_v)
# These rotations result in:
# xg = 0 at pt1
# yg = 1 at pt1
# zg = 0 at pt1 and pt2 (and the great circle that crosses pt1 & pt2)
xg, yg, zg = _apply_rotation_matrix(rot_1, (xg,yg,zg))
xg, yg, zg = _apply_rotation_matrix(rot_2, (xg,yg,zg))
xg, yg, zg = _apply_rotation_matrix(rot_3, (xg,yg,zg))
# Remake into LLC xarray DataArray
tmp = sy*sx # template
def make_xda(fld,template):
return xr.DataArray(np.reshape(fld,template.shape),
coords=tmp.coords,dims=tmp.dims)
xg = make_xda(xg,tmp)
yg = make_xda(yg,tmp)
zg = make_xda(zg,tmp)
return xg, yg, zg
def _apply_rotation_matrix(rot_mat,xyz):
"""Apply a rotation matrix to a tuple x,y,z (each x,y,z possibly being arrays)
Parameters
----------
rot_mat : numpy matrix
2D matrix defining rotation in 3D cartesian coordinates
xyz : tuple of arrays
with cartesian coordinates
Returns
-------
xyz_rot : tuple of arrays
rotated a la rot_mat
"""
# Put tuple into matrix form
xyz_mat = np.vstack( (xyz[0],xyz[1],xyz[2]) )
# Perform rotation
xyz_rot_mat = np.matmul( rot_mat, xyz_mat )
# Either return as scalar or array
if np.isscalar(xyz[0]):
return xyz_rot_mat[0,0], xyz_rot_mat[1,0], xyz_rot_mat[2,0]
else:
return xyz_rot_mat[0,:], xyz_rot_mat[1,:], xyz_rot_mat[2,:]
def _convert_stereo_to_cartesian(sx, sy):
"""Convert ...
Parameters
----------
sx,sy : numpy or dask array
stereographic projection plane x,y coordinates
Returns
-------
x : numpy or dask array
x- component of cartesian coordinate
y : numpy or dask array
z : numpy or dask array
"""
# Get cartesian
denom = (1 + sx**2 + sy**2)
x = 2*sx / denom
y = 2*sy / denom
z = (-1 + sx**2 + sy**2)/denom
return x, y, z
| [
"numpy.reshape",
"numpy.isscalar",
"ecco_v4_py.scalar_calc.get_edge_mask",
"numpy.matmul",
"numpy.arctan2",
"numpy.vstack",
"numpy.cos",
"numpy.sin",
"numpy.meshgrid"
] | [((1228, 1247), 'numpy.arctan2', 'np.arctan2', (['(-z1)', 'y1'], {}), '(-z1, y1)\n', (1238, 1247), True, 'import numpy as np\n'), ((1602, 1620), 'numpy.arctan2', 'np.arctan2', (['x1', 'y1'], {}), '(x1, y1)\n', (1612, 1620), True, 'import numpy as np\n'), ((1975, 1995), 'numpy.arctan2', 'np.arctan2', (['(-z2)', '(-x2)'], {}), '(-z2, -x2)\n', (1985, 1995), True, 'import numpy as np\n'), ((2757, 2796), 'ecco_v4_py.scalar_calc.get_edge_mask', 'scalar_calc.get_edge_mask', (['(zc > 0)', 'grid'], {}), '(zc > 0, grid)\n', (2782, 2796), False, 'from ecco_v4_py import scalar_calc\n'), ((4230, 4248), 'numpy.arctan2', 'np.arctan2', (['y1', 'x1'], {}), '(y1, x1)\n', (4240, 4248), True, 'import numpy as np\n'), ((4262, 4280), 'numpy.arctan2', 'np.arctan2', (['y2', 'x2'], {}), '(y2, x2)\n', (4272, 4280), True, 'import numpy as np\n'), ((4294, 4312), 'numpy.arctan2', 'np.arctan2', (['yg', 'xg'], {}), '(yg, xg)\n', (4304, 4312), True, 'import numpy as np\n'), ((6488, 6523), 'numpy.vstack', 'np.vstack', (['(xyz[0], xyz[1], xyz[2])'], {}), '((xyz[0], xyz[1], xyz[2]))\n', (6497, 6523), True, 'import numpy as np\n'), ((6566, 6593), 'numpy.matmul', 'np.matmul', (['rot_mat', 'xyz_mat'], {}), '(rot_mat, xyz_mat)\n', (6575, 6593), True, 'import numpy as np\n'), ((6643, 6662), 'numpy.isscalar', 'np.isscalar', (['xyz[0]'], {}), '(xyz[0])\n', (6654, 6662), True, 'import numpy as np\n'), ((5252, 5275), 'numpy.meshgrid', 'np.meshgrid', (['sx_v', 'sy_v'], {}), '(sx_v, sy_v)\n', (5263, 5275), True, 'import numpy as np\n'), ((5846, 5877), 'numpy.reshape', 'np.reshape', (['fld', 'template.shape'], {}), '(fld, template.shape)\n', (5856, 5877), True, 'import numpy as np\n'), ((1311, 1326), 'numpy.cos', 'np.cos', (['theta_1'], {}), '(theta_1)\n', (1317, 1326), True, 'import numpy as np\n'), ((1374, 1389), 'numpy.sin', 'np.sin', (['theta_1'], {}), '(theta_1)\n', (1380, 1389), True, 'import numpy as np\n'), ((1391, 1406), 'numpy.cos', 'np.cos', (['theta_1'], {}), '(theta_1)\n', (1397, 1406), True, 'import numpy as np\n'), ((1645, 1660), 'numpy.cos', 'np.cos', (['theta_2'], {}), '(theta_2)\n', (1651, 1660), True, 'import numpy as np\n'), ((1708, 1723), 'numpy.sin', 'np.sin', (['theta_2'], {}), '(theta_2)\n', (1714, 1723), True, 'import numpy as np\n'), ((1725, 1740), 'numpy.cos', 'np.cos', (['theta_2'], {}), '(theta_2)\n', (1731, 1740), True, 'import numpy as np\n'), ((2022, 2037), 'numpy.cos', 'np.cos', (['theta_3'], {}), '(theta_3)\n', (2028, 2037), True, 'import numpy as np\n'), ((2042, 2057), 'numpy.sin', 'np.sin', (['theta_3'], {}), '(theta_3)\n', (2048, 2057), True, 'import numpy as np\n'), ((2142, 2157), 'numpy.cos', 'np.cos', (['theta_3'], {}), '(theta_3)\n', (2148, 2157), True, 'import numpy as np\n'), ((1328, 1343), 'numpy.sin', 'np.sin', (['theta_1'], {}), '(theta_1)\n', (1334, 1343), True, 'import numpy as np\n'), ((1662, 1677), 'numpy.sin', 'np.sin', (['theta_2'], {}), '(theta_2)\n', (1668, 1677), True, 'import numpy as np\n'), ((2122, 2137), 'numpy.sin', 'np.sin', (['theta_3'], {}), '(theta_3)\n', (2128, 2137), True, 'import numpy as np\n')] |
import numpy as np
import cv2
def rescale(img):
img = img.astype(np.float32)
new_img = img - img.min()
new_img /= (new_img.max() + 1e-6)
return new_img
def img_resize(img, resize=None):
img = img.astype(np.float32)
img = rescale(img)
if resize is None or (resize == img.shape[1] and resize == img.shape[2]):
return img
new_img = np.zeros((img.shape[0], resize, resize))
for ii in range(img.shape[0]):
new_img[ii,:,:] = cv2.resize(img[ii,:,:], (resize,resize))
img = new_img.astype(np.float32)
img = rescale(img)
return img
def extend_data(images, labels, databloat):
images_tr = []
labels_tr = []
for ii in range(databloat):
images_tr += images
labels_tr += labels
return images_tr, labels_tr
| [
"numpy.zeros",
"cv2.resize"
] | [((372, 412), 'numpy.zeros', 'np.zeros', (['(img.shape[0], resize, resize)'], {}), '((img.shape[0], resize, resize))\n', (380, 412), True, 'import numpy as np\n'), ((474, 517), 'cv2.resize', 'cv2.resize', (['img[ii, :, :]', '(resize, resize)'], {}), '(img[ii, :, :], (resize, resize))\n', (484, 517), False, 'import cv2\n')] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from Environments.transportEnvOld import transportENV
np.random.seed(999)
dataInit = pd.DataFrame()
dataInit['states1'] = np.zeros(1000)
dataInit['states2'] = np.zeros(1000)
for i in range(1000):
ran = min(float(0.002 *i), 0.002)
print(ran)
dataInit.iloc[i,:] = np.array(np.random.uniform(-ran, ran ,2))
dataInit.to_pickle('initData')
pd.read_pickle('initData').reset_index().plot.scatter(x='states1', y='states2', c='index')
plt.show()
nb_steps = 250
init_angles = []
init_pos = []
init_rewards = []
env = transportENV()
for i in range(nb_steps):
env.reset(dataInit.iloc[i,:])
init_pos.append(env.state)
init_rewards.append(env.reward)
init_angles.append([env.mssb_angle, env.mbb_angle])
init_pos.append([0., min(np.array(init_pos)[:, 1])])
init_pos = np.array(init_pos)
plt.scatter(init_pos[:-1, 0], init_pos[:-1, 1], c=init_rewards, alpha=0.1)
plt.show() | [
"pandas.read_pickle",
"Environments.transportEnvOld.transportENV",
"numpy.zeros",
"numpy.array",
"numpy.random.seed",
"matplotlib.pyplot.scatter",
"numpy.random.uniform",
"pandas.DataFrame",
"matplotlib.pyplot.show"
] | [((127, 146), 'numpy.random.seed', 'np.random.seed', (['(999)'], {}), '(999)\n', (141, 146), True, 'import numpy as np\n'), ((158, 172), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (170, 172), True, 'import pandas as pd\n'), ((195, 209), 'numpy.zeros', 'np.zeros', (['(1000)'], {}), '(1000)\n', (203, 209), True, 'import numpy as np\n'), ((232, 246), 'numpy.zeros', 'np.zeros', (['(1000)'], {}), '(1000)\n', (240, 246), True, 'import numpy as np\n'), ((513, 523), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (521, 523), True, 'import matplotlib.pyplot as plt\n'), ((595, 609), 'Environments.transportEnvOld.transportENV', 'transportENV', ([], {}), '()\n', (607, 609), False, 'from Environments.transportEnvOld import transportENV\n'), ((858, 876), 'numpy.array', 'np.array', (['init_pos'], {}), '(init_pos)\n', (866, 876), True, 'import numpy as np\n'), ((878, 952), 'matplotlib.pyplot.scatter', 'plt.scatter', (['init_pos[:-1, 0]', 'init_pos[:-1, 1]'], {'c': 'init_rewards', 'alpha': '(0.1)'}), '(init_pos[:-1, 0], init_pos[:-1, 1], c=init_rewards, alpha=0.1)\n', (889, 952), True, 'import matplotlib.pyplot as plt\n'), ((953, 963), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (961, 963), True, 'import matplotlib.pyplot as plt\n'), ((356, 387), 'numpy.random.uniform', 'np.random.uniform', (['(-ran)', 'ran', '(2)'], {}), '(-ran, ran, 2)\n', (373, 387), True, 'import numpy as np\n'), ((819, 837), 'numpy.array', 'np.array', (['init_pos'], {}), '(init_pos)\n', (827, 837), True, 'import numpy as np\n'), ((422, 448), 'pandas.read_pickle', 'pd.read_pickle', (['"""initData"""'], {}), "('initData')\n", (436, 448), True, 'import pandas as pd\n')] |
# -*- coding: utf-8 -*-
"""
Network processing and analysis functions.
"""
import numpy as np
import pandas as pd
try:
import networkx as nx
except ImportError:
print('Install networkx if functionality is needed')
############################################
#### Network processing and analysis
def nx_shp(shp_pts, shp_lines, site_col='site'):
"""
Function to import shapefiles into nx networks. The lines become the edges and the points are used as the node (names). The points shapefile should have a site name column that is precisely at the intersection of the shp_lines.
"""
## Read in shapefiles
t2 = nx.read_shp(shp_pts)
t3 = nx.read_shp(shp_lines)
## extract the site names
sites = [i[1][site_col] for i in t2.nodes(data=True)]
## Set up rename dictionaries
rename1 = {(i[0], i[1]): (round(i[0]), round(i[1])) for i in t3.nodes()}
rename2 = {(round(i[0][0]), round(i[0][1])): i[1]['site'] for i in t2.nodes(data=True)}
## Rename nodes
g2 = nx.relabel_nodes(t3, rename1)
g3 = nx.relabel_nodes(g2, rename2)
## Remove unnecessary nodes
remove1 = [g3.nodes()[i] for i in np.where(~np.in1d(g3.nodes(), sites))[0]]
g3.remove_nodes_from(remove1)
return g3
def str_paths(nx1):
"""
Function to process the stream network to determine the nodes and edges to downstream gauging sites. The input is from the output of nx_shp. The output is a two dictorionary object list of site nodes and site edges.
"""
def iter1(g1, d2, site):
keys1 = g1.keys()
sites2 = [i for i in keys1 if ((i != site) & (i < 10000000))]
if not sites2:
output = [site]
else:
len1 = [d2[site][i] for i in sites2]
down_site = sites2[np.argmin(len1)]
output = g1[down_site]
return output
## Determine all paths
p1 = nx.all_pairs_shortest_path(nx1)
d1 = nx.all_pairs_dijkstra_path_length(nx1, None, 'len')
## Make list of all sites
sites = [i for i in nx1.nodes() if (i < 10000000)]
## Extract the paths for all sites (into a dict)
p2 = {i: p1[i] for i in sites}
d2 = {i: d1[i] for i in sites}
site_nodes = {i: iter1(p2[i], d2, i) for i in p2}
site_paths = {i: [j[2] for j in nx1.out_edges(site_nodes[i], data='num')][0:-1] for i in site_nodes}
return site_nodes, site_paths
def up_branch(df, index_col=1):
"""
Function to create a dataframe of all the interconnected values looking upstream from specific locations.
"""
col1 = df.columns[index_col-1]
index1 = df[col1]
df2 = df.drop(col1, axis=1)
catch_set2 = []
for i in index1:
catch1 = df2[index1 == i].dropna(axis=1).values[0]
catch_set1 = catch1
check1 = index1.isin(catch1)
while sum(check1) >= 1:
if sum(check1) > len(catch1):
print('Index numbering is wrong!')
catch2 = df2[check1].values.flatten()
catch3 = catch2[~np.isnan(catch2)]
catch_set1 = np.append(catch_set1, catch3)
check1 = index1.isin(catch3)
catch1 = catch3
catch_set2.append(catch_set1.tolist())
output1 = pd.DataFrame(catch_set2, index=index1)
return output1
| [
"networkx.relabel_nodes",
"networkx.all_pairs_shortest_path",
"networkx.read_shp",
"numpy.append",
"numpy.isnan",
"numpy.argmin",
"pandas.DataFrame",
"networkx.all_pairs_dijkstra_path_length"
] | [((641, 661), 'networkx.read_shp', 'nx.read_shp', (['shp_pts'], {}), '(shp_pts)\n', (652, 661), True, 'import networkx as nx\n'), ((671, 693), 'networkx.read_shp', 'nx.read_shp', (['shp_lines'], {}), '(shp_lines)\n', (682, 693), True, 'import networkx as nx\n'), ((1017, 1046), 'networkx.relabel_nodes', 'nx.relabel_nodes', (['t3', 'rename1'], {}), '(t3, rename1)\n', (1033, 1046), True, 'import networkx as nx\n'), ((1056, 1085), 'networkx.relabel_nodes', 'nx.relabel_nodes', (['g2', 'rename2'], {}), '(g2, rename2)\n', (1072, 1085), True, 'import networkx as nx\n'), ((1889, 1920), 'networkx.all_pairs_shortest_path', 'nx.all_pairs_shortest_path', (['nx1'], {}), '(nx1)\n', (1915, 1920), True, 'import networkx as nx\n'), ((1930, 1981), 'networkx.all_pairs_dijkstra_path_length', 'nx.all_pairs_dijkstra_path_length', (['nx1', 'None', '"""len"""'], {}), "(nx1, None, 'len')\n", (1963, 1981), True, 'import networkx as nx\n'), ((3209, 3247), 'pandas.DataFrame', 'pd.DataFrame', (['catch_set2'], {'index': 'index1'}), '(catch_set2, index=index1)\n', (3221, 3247), True, 'import pandas as pd\n'), ((3048, 3077), 'numpy.append', 'np.append', (['catch_set1', 'catch3'], {}), '(catch_set1, catch3)\n', (3057, 3077), True, 'import numpy as np\n'), ((1778, 1793), 'numpy.argmin', 'np.argmin', (['len1'], {}), '(len1)\n', (1787, 1793), True, 'import numpy as np\n'), ((3005, 3021), 'numpy.isnan', 'np.isnan', (['catch2'], {}), '(catch2)\n', (3013, 3021), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 22 11:08:12 2018
@author: irhamta
Based on Leaf & Melia (2018) paper
"""
# import necessary modules
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# read lensing data which is taken from Leaf et al. (2018)
data = pd.read_csv('lensing_data.csv')
data = data[data.name != 'J0850-0347'] # outlier removal
# define function to calculate angular diameter distance as in Equation 3
# this is used for observational data
def D_obs(theta_E, sigma_0):
# need to convert theta_E from arcsec to radian
theta_E = np.deg2rad(theta_E/3600)
# speed of light in km/s
c = 3*1e5
return c**2 * theta_E / (4*np.pi * sigma_0**2)
# error of observed angular diameter distance as in Equation 10
# this doesn't include the sigma_x
def D_obs_err(theta_E, err_theta_E, sigma_0, err_sigma_0):
return D_obs(theta_E, sigma_0) \
* np.sqrt((err_theta_E/theta_E)**2 + (2*err_sigma_0/sigma_0)**2 + 0.12**2)
# =============================================================================
# This part is to make sure that our written D_obs function is consistent with
# built in function from astropy
# =============================================================================
from astropy.cosmology import LambdaCDM
import astropy.units as u
# cosmological model to calculate cosmological parameters
def cosmolo(Om0,Ode0):
cosmo = LambdaCDM(Om0=Om0, Ode0=Ode0, H0=67.7, Tcmb0=2.725,
Ob0=0.0486, m_nu=u.Quantity([0., 0., 0.06], u.eV))
return cosmo
# angular diameter distance for lens
def D_L(zl,model):
return model.angular_diameter_distance(zl)
#angular diameter distance for source
def D_S(zs,model):
return model.angular_diameter_distance(zs)
#angular diameter distance between lens and source
def D_LS(zl,zs,model):
return model.angular_diameter_distance_z1z2(zl, zs)
# calculate the angular diameter distance
Dobs = D_obs(data['theta_E'], data['sigma_0'])
Dobs_err = D_obs_err(data['theta_E'], data['theta_E']*0.05, data['sigma_0'], data['err_sigma_0'])
Dteo = D_LS(data['zl'], data['zs'], cosmolo(Om0=0.3, Ode0=0.7))/D_S(data['zs'], cosmolo(Om0=0.3, Ode0=0.7))
# uncomment these plots if you need to do some validation
#plt.plot(Dobs, Dteo, 'b.')
#plt.plot(data['sigma_0'], Dteo, 'r.')
#plt.plot(data['sigma_0'], Dobs, 'b.')
# =============================================================================
# Build some functions to calculate theoretical angular diameter distance
# =============================================================================
# cut outliers based on page 3 in the paper
data = data[Dobs < 1]
# recalculate
Dobs = D_obs(data['theta_E'], data['sigma_0'])
Dobs_err = D_obs_err(data['theta_E'], data['theta_E']*0.05, data['sigma_0'], data['err_sigma_0'])
from scipy import integrate
# define cosmological parameters
c = 3 * 1e5 # km/s
H0 = 67.7 #km / (Mpc s)
Omega_m = 0.307
Omega_r = 0 * 1e-5 # too small
Omega_lambda = 1 - Omega_m
wde = -1
# make angular diameter distance calculator
# equivalent to cosmolo.angular_diameter_distance_z1z2()
# based on Equation 4 in Leat et al. (2018)
def ang_dist (z_1, z_2, Omega_m, Omega_lambda, wde):
# integration part
# integration is calculated from redshift=zl to redshift=zs
fn = lambda z: (Omega_r*(1+z)**4. \
+ Omega_m*(1+z)**3 \
+ Omega_lambda*(1+z)**(3*(1+wde)) \
)**-0.5
# return array values
return c/(H0*(1+z_2)) \
* np.asarray([integrate.quad(fn, _z[0], _z[1])[0] for _z in list(zip(z_1, z_2))])
# =============================================================================
# Validation for ang_dist() function
# =============================================================================
#DS = ang_dist(data['zl'], data['zs'], 0.3, 0.7)
#plt.plot(DS, D_LS(data['zl'], data['zs'], cosmolo(Om0=0.3, Ode0=0.7)), 'b.')
# =============================================================================
# D theoretical based on Equation 7 in Leaf et al. (2018)
def D_theory(X, Omega_m, Omega_lambda, wde):
z_1, z_2 = X
return ang_dist(z_1, z_2, Omega_m, Omega_lambda, wde) \
/ ang_dist(0*z_1, z_2, Omega_m, Omega_lambda, wde)
# =============================================================================
# Validation for ang_dist() function
# =============================================================================
# to validate D_theory() function
#plt.plot(Dteo, D_theory((data['zl'], data['zs']), 0.3, 0.7), 'b.')
# =============================================================================
# =============================================================================
# Maximum likelihood fitting
# =============================================================================
# define likelihood function as in Equation 11 in Leaf et al. (2018)
def lnlike(theta, X, y, yerr):
Omega_m, Omega_lambda, wde = theta
model = D_theory(X, Omega_m, Omega_lambda, wde)
# chi-square
chi2 = ((y-model)**2)/yerr**2
return np.sum(1/(np.sqrt(2*np.pi)*yerr) * (np.exp(-chi2/2)))
X = (data['zl'].values, data['zs'].values)
y = Dobs
yerr = Dobs_err
from scipy import optimize
# optimize module minimizes functions whereas we would like to maximize the likelihood
# that's why I put the minus(-) sign
nll = lambda *args: -lnlike(*args)
result = optimize.minimize(nll, [Omega_m, Omega_lambda, wde], args=(X, y, yerr))
m_ml, b_ml, wde_ml = result["x"]
print ('======================================')
print ('Maximum Likelihood Result')
print ('Omega_m = %.2f (%f)' %(m_ml, 0))
print ('Omega_lambda = %.2f (%f)' %(b_ml, 0))
print ('w_de = %.2f (%f)' %(wde_ml, 0))
print ('======================================\n')
# =============================================================================
# MCMC fitting
# see http://dfm.io/emcee/current/user/line/ for the detail
# =============================================================================
# define prior
def lnprior(theta):
Omega_m, Omega_lambda, wde = theta
if 0.1 < Omega_m < 0.9 \
and 0.1 < Omega_lambda < 0.9 \
and -2 < wde < 1:
return 0
return -np.inf
# define the full probability
def lnprob(theta, X, y, yerr):
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, X, y, yerr)
#
ndim, nwalkers = 3, 300
#pos = [result["x"] + 1e-3*np.random.randn(ndim) for i in range(nwalkers)]
pos = [[Omega_m, Omega_lambda, wde] + 1e-4*np.random.randn(ndim) for i in range(nwalkers)]
import emcee
import sys
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(X, y, yerr), threads=3)
nsteps = 1000
width = 30
print ('running MCMC.....')
for i, result in enumerate(sampler.sample(pos, iterations=nsteps)):
n = int((width+1) * float(i) / nsteps)
sys.stdout.write("\r[{0}{1}]".format('#' * n, ' ' * (width - n)))
sys.stdout.write("\n")
samples = sampler.chain[:, 50:, :].reshape((-1, ndim))
import corner
fig = corner.corner(samples, labels=["$\Omega_m$", "$\Omega_\Lambda$", "$w_{\\rm de}$"],
truths=[Omega_m, Omega_lambda, wde])
plt.savefig('result/lensing.png')
plt.show()
m_mcmc, b_mcmc, wde_mcmc = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(samples, [16, 50, 84],
axis=0)))
print ('======================================')
print ('MCMC Result')
print ('Omega_m = ', m_mcmc)
print ('Omega_lambda = ', b_mcmc)
print ('w_de = ', wde_mcmc)
print ('======================================\n')
output_data = pd.DataFrame({'omega_m': samples[:, 0],
'omega_l': samples[:, 1],
'wde' : samples[:, 2]})
output_data.to_csv('result/output_lensing.csv', index=False) | [
"matplotlib.pyplot.savefig",
"numpy.sqrt",
"pandas.read_csv",
"matplotlib.pyplot.show",
"scipy.integrate.quad",
"scipy.optimize.minimize",
"emcee.EnsembleSampler",
"numpy.exp",
"numpy.deg2rad",
"numpy.isfinite",
"pandas.DataFrame",
"numpy.percentile",
"corner.corner",
"numpy.random.randn",... | [((312, 343), 'pandas.read_csv', 'pd.read_csv', (['"""lensing_data.csv"""'], {}), "('lensing_data.csv')\n", (323, 343), True, 'import pandas as pd\n'), ((5466, 5537), 'scipy.optimize.minimize', 'optimize.minimize', (['nll', '[Omega_m, Omega_lambda, wde]'], {'args': '(X, y, yerr)'}), '(nll, [Omega_m, Omega_lambda, wde], args=(X, y, yerr))\n', (5483, 5537), False, 'from scipy import optimize\n'), ((6683, 6758), 'emcee.EnsembleSampler', 'emcee.EnsembleSampler', (['nwalkers', 'ndim', 'lnprob'], {'args': '(X, y, yerr)', 'threads': '(3)'}), '(nwalkers, ndim, lnprob, args=(X, y, yerr), threads=3)\n', (6704, 6758), False, 'import emcee\n'), ((6995, 7017), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (7011, 7017), False, 'import sys\n'), ((7096, 7222), 'corner.corner', 'corner.corner', (['samples'], {'labels': "['$\\\\Omega_m$', '$\\\\Omega_\\\\Lambda$', '$w_{\\\\rm de}$']", 'truths': '[Omega_m, Omega_lambda, wde]'}), "(samples, labels=['$\\\\Omega_m$', '$\\\\Omega_\\\\Lambda$',\n '$w_{\\\\rm de}$'], truths=[Omega_m, Omega_lambda, wde])\n", (7109, 7222), False, 'import corner\n'), ((7239, 7272), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""result/lensing.png"""'], {}), "('result/lensing.png')\n", (7250, 7272), True, 'import matplotlib.pyplot as plt\n'), ((7273, 7283), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7281, 7283), True, 'import matplotlib.pyplot as plt\n'), ((7717, 7809), 'pandas.DataFrame', 'pd.DataFrame', (["{'omega_m': samples[:, 0], 'omega_l': samples[:, 1], 'wde': samples[:, 2]}"], {}), "({'omega_m': samples[:, 0], 'omega_l': samples[:, 1], 'wde':\n samples[:, 2]})\n", (7729, 7809), True, 'import pandas as pd\n'), ((619, 645), 'numpy.deg2rad', 'np.deg2rad', (['(theta_E / 3600)'], {}), '(theta_E / 3600)\n', (629, 645), True, 'import numpy as np\n'), ((959, 1048), 'numpy.sqrt', 'np.sqrt', (['((err_theta_E / theta_E) ** 2 + (2 * err_sigma_0 / sigma_0) ** 2 + 0.12 ** 2)'], {}), '((err_theta_E / theta_E) ** 2 + (2 * err_sigma_0 / sigma_0) ** 2 + \n 0.12 ** 2)\n', (966, 1048), True, 'import numpy as np\n'), ((6368, 6383), 'numpy.isfinite', 'np.isfinite', (['lp'], {}), '(lp)\n', (6379, 6383), True, 'import numpy as np\n'), ((1559, 1593), 'astropy.units.Quantity', 'u.Quantity', (['[0.0, 0.0, 0.06]', 'u.eV'], {}), '([0.0, 0.0, 0.06], u.eV)\n', (1569, 1593), True, 'import astropy.units as u\n'), ((5181, 5198), 'numpy.exp', 'np.exp', (['(-chi2 / 2)'], {}), '(-chi2 / 2)\n', (5187, 5198), True, 'import numpy as np\n'), ((6598, 6619), 'numpy.random.randn', 'np.random.randn', (['ndim'], {}), '(ndim)\n', (6613, 6619), True, 'import numpy as np\n'), ((7392, 7436), 'numpy.percentile', 'np.percentile', (['samples', '[16, 50, 84]'], {'axis': '(0)'}), '(samples, [16, 50, 84], axis=0)\n', (7405, 7436), True, 'import numpy as np\n'), ((3596, 3628), 'scipy.integrate.quad', 'integrate.quad', (['fn', '_z[0]', '_z[1]'], {}), '(fn, _z[0], _z[1])\n', (3610, 3628), False, 'from scipy import integrate\n'), ((5155, 5173), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (5162, 5173), True, 'import numpy as np\n')] |
import logging
from collections import defaultdict
from typing import Dict, List, Optional
import numpy as np
from ..utils.colmap import read_model
from ..utils.quaternions import weighted_pose
logger = logging.getLogger(__name__)
class Model3D:
def __init__(self, path):
logger.info('Reading COLMAP model %s.', path)
self.cameras, self.dbs, self.points3D = read_model(path)
self.name2id = {i.name: i.id for i in self.dbs.values()}
def covisbility_filtering(self, dbids):
clusters = do_covisibility_clustering(dbids, self.dbs, self.points3D)
dbids = clusters[0]
return dbids
def pose_approximation(self, qname, dbids, global_descriptors, alpha=8):
"""Described in:
Benchmarking Image Retrieval for Visual Localization.
<NAME>, <NAME>, <NAME>,
<NAME>, <NAME>. 3DV 2020.
"""
dbs = [self.dbs[i] for i in dbids]
dbdescs = np.stack([global_descriptors[im.name] for im in dbs])
qdesc = global_descriptors[qname]
sim = dbdescs @ qdesc
weights = sim**alpha
weights /= weights.sum()
tvecs = [im.tvec for im in dbs]
qvecs = [im.qvec for im in dbs]
return weighted_pose(tvecs, qvecs, weights)
def get_dbid_to_p3dids(self, p3did_to_dbids):
"""Link the database images to selected 3D points."""
dbid_to_p3dids = defaultdict(list)
for p3id, obs_dbids in p3did_to_dbids.items():
for obs_dbid in obs_dbids:
dbid_to_p3dids[obs_dbid].append(p3id)
return dict(dbid_to_p3dids)
def get_p3did_to_dbids(self, dbids: List, loc: Optional[Dict] = None,
inliers: Optional[List] = None,
point_selection: str = 'all',
min_track_length: int = 3):
"""Return a dictionary mapping 3D point ids to their covisible dbids.
This function can use hloc sfm logs to only select inliers.
Which can be further used to select top reference images / in
sufficient track length selection of points.
"""
p3did_to_dbids = defaultdict(set)
if point_selection == 'all':
for dbid in dbids:
p3dids = self.dbs[dbid].point3D_ids
for p3did in p3dids[p3dids != -1]:
p3did_to_dbids[p3did].add(dbid)
elif point_selection in ['inliers', 'matched']:
if loc is None:
raise ValueError('"{point_selection}" point selection requires'
' localization logs.')
# The given SfM model must match the localization SfM model!
for (p3did, dbidxs), inlier in zip(loc["keypoint_index_to_db"][1],
inliers):
if inlier or point_selection == 'matched':
obs_dbids = set(loc["db"][dbidx] for dbidx in dbidxs)
obs_dbids &= set(dbids)
if len(obs_dbids) > 0:
p3did_to_dbids[p3did] |= obs_dbids
else:
raise ValueError(f"{point_selection} point selection not defined.")
# Filter unstable points (min track length)
p3did_to_dbids = {
i: v
for i, v in p3did_to_dbids.items()
if len(self.points3D[i].image_ids) >= min_track_length
}
return p3did_to_dbids
def rerank_and_filter_db_images(self, dbids: List, ninl_dbs: List,
num_dbs: int, min_matches_db: int = 0):
"""Re-rank the images by inlier count and filter invalid images."""
dbids = [dbids[i] for i in np.argsort(-ninl_dbs)
if ninl_dbs[i] > min_matches_db]
# Keep top num_images matched image images
dbids = dbids[:num_dbs]
return dbids
def get_db_inliers(self, loc: Dict, dbids: List, inliers: List):
"""Get the number of inliers for each db."""
inliers = loc["PnP_ret"]["inliers"]
dbids = loc["db"]
ninl_dbs = np.zeros(len(dbids))
for (_, dbidxs), inl in zip(loc["keypoint_index_to_db"][1], inliers):
if not inl:
continue
for dbidx in dbidxs:
ninl_dbs[dbidx] += 1
return ninl_dbs
def do_covisibility_clustering(frame_ids, all_images, points3D):
clusters = []
visited = set()
for frame_id in frame_ids:
# Check if already labeled
if frame_id in visited:
continue
# New component
clusters.append([])
queue = {frame_id}
while len(queue):
exploration_frame = queue.pop()
# Already part of the component
if exploration_frame in visited:
continue
visited.add(exploration_frame)
clusters[-1].append(exploration_frame)
observed = all_images[exploration_frame].point3D_ids
connected_frames = set(
j for i in observed if i != -1 for j in points3D[i].image_ids)
connected_frames &= set(frame_ids)
connected_frames -= visited
queue |= connected_frames
clusters = sorted(clusters, key=len, reverse=True)
return clusters
| [
"logging.getLogger",
"numpy.stack",
"collections.defaultdict",
"numpy.argsort"
] | [((205, 232), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (222, 232), False, 'import logging\n'), ((965, 1018), 'numpy.stack', 'np.stack', (['[global_descriptors[im.name] for im in dbs]'], {}), '([global_descriptors[im.name] for im in dbs])\n', (973, 1018), True, 'import numpy as np\n'), ((1424, 1441), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1435, 1441), False, 'from collections import defaultdict\n'), ((2178, 2194), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (2189, 2194), False, 'from collections import defaultdict\n'), ((3732, 3753), 'numpy.argsort', 'np.argsort', (['(-ninl_dbs)'], {}), '(-ninl_dbs)\n', (3742, 3753), True, 'import numpy as np\n')] |
import numpy as np
from chainer0 import Function, Variable
from chainer0.functions import exp, sum, log
def _softmax(x):
if x.ndim == 2:
x = x - x.max(axis=1, keepdims=True)
x = np.exp(x)
x /= x.sum(axis=1, keepdims=True)
elif x.ndim == 1:
x = x - np.max(x)
x = np.exp(x) / np.sum(np.exp(x))
return x
def _cross_entropy(y, t):
if y.ndim == 1:
t = t.reshape(1, t.size)
y = y.reshape(1, y.size)
# 教師データがone-hot-vectorの場合、正解ラベルのインデックスに変換
if t.size == y.size:
t = t.argmax(axis=1)
batch_size = y.shape[0]
return -np.sum(np.log(y[np.arange(batch_size), t] + 1e-7)) / batch_size
class SoftmaxCrossEntropy(Function):
def forward(self, x, t):
y = _softmax(x)
self.y = y
# 教師ラベルがone-hotベクトルの場合、正解のインデックスに変換
if t.size == y.size:
t = t.argmax(axis=1)
self.t = t
loss = _cross_entropy(y, t)
return loss
def backward(self, dout):
N = self.y.shape[0]
dx = self.y.copy()
dx[np.arange(N), self.t] -= 1
dx = Variable(dx)
dx *= dout
dx = dx / N
return dx, None
def softmax_cross_entropy(x, t):
f = SoftmaxCrossEntropy()
return f(x, t)
'''
def softmax_cross_entropy(x, t):
tmp = exp(x)
axis = len(x.data.shape) - 1
prob = tmp / sum(tmp, axis=axis, keepdims=True)
t_flatten = t.data.flatten()
p = prob.data.reshape(len(t_flatten), -1)
t
loss =
return total / N
''' | [
"numpy.exp",
"chainer0.Variable",
"numpy.arange",
"numpy.max"
] | [((202, 211), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (208, 211), True, 'import numpy as np\n'), ((1114, 1126), 'chainer0.Variable', 'Variable', (['dx'], {}), '(dx)\n', (1122, 1126), False, 'from chainer0 import Function, Variable\n'), ((292, 301), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (298, 301), True, 'import numpy as np\n'), ((314, 323), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (320, 323), True, 'import numpy as np\n'), ((1074, 1086), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (1083, 1086), True, 'import numpy as np\n'), ((333, 342), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (339, 342), True, 'import numpy as np\n'), ((633, 654), 'numpy.arange', 'np.arange', (['batch_size'], {}), '(batch_size)\n', (642, 654), True, 'import numpy as np\n')] |
"""Convergence rate between iterative-step-z and learn-step-z algorithm for
TA decomposition.
"""
# Authors: <NAME> <<EMAIL>>
# License: BSD (3-clause)
import os
import shutil
import time
import argparse
import json
import pickle
import matplotlib as mpl
mpl.rcParams['pgf.texsystem'] = 'pdflatex'
mpl.rcParams['text.usetex'] = True
mpl.rcParams['text.latex.preamble'] = [r'\usepackage{amssymb}']
mpl.rcParams['xtick.labelsize'] = 18
mpl.rcParams['ytick.labelsize'] = 18
mpl.rcParams['axes.labelsize'] = 18
import matplotlib.pyplot as plt
import numpy as np
from nilearn.input_data import NiftiMasker
from carpet.utils import init_vuz
from pyta import TA
from pyta.hrf_model import double_gamma_hrf
from pyta.convolution import make_toeplitz
from pyta.utils import compute_lbda_max, logspace_layers
from pyta.loss_and_grad import _obj_t_analysis
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--max-iter', type=int, default=20,
help='Max number of iterations for the global loop.')
parser.add_argument('--temp-reg', type=float, default=0.5,
help='Temporal regularisation parameter.')
parser.add_argument('--max-iter-z', type=int, default=100,
help='Max number of iterations for the z-step.')
parser.add_argument('--load-net', type=str, default=None, nargs='+',
help='Load pretrained network parameters.')
parser.add_argument('--max-training-iter', type=int, default=1000,
help='Max number of iterations to train the '
'learnable networks for the z-step.')
parser.add_argument('--n-time-frames', type=int, default=100,
help='Number of timeframes to retain from the the '
'data fMRI.')
parser.add_argument('--plots-dir', type=str, default='outputs',
help='Outputs directory for plots.')
parser.add_argument('--iter-mult', type=float, default='2.0',
help='Multiplicative coefficient to obtain the number'
' of iteration.')
parser.add_argument('--seed', type=int, default=None,
help='Set the seed for the experiment. Can be used '
'for debug or to freeze experiments.')
args = parser.parse_args()
print(__doc__)
print('*' * 80)
t0_global = time.time()
if not os.path.exists(args.plots_dir):
os.makedirs(args.plots_dir)
filename = os.path.join(args.plots_dir, 'command_line.json')
print(f"Archiving '{filename}' under '{args.plots_dir}'")
with open(filename, 'w') as jsonfile:
json.dump(args._get_kwargs(), jsonfile)
print(f"Archiving '{__file__}' under '{args.plots_dir}'")
shutil.copyfile(__file__, os.path.join(args.plots_dir, __file__))
###########################################################################
# Parameters to set for the experiment
hrf_time_frames = 30
nx = ny = nz = 10
lw = 7
###########################################################################
# Real data loading
t_r = 0.735
n_times_valid = args.n_time_frames - hrf_time_frames + 1
h = double_gamma_hrf(t_r, hrf_time_frames)
D = (np.eye(n_times_valid, k=-1) - np.eye(n_times_valid, k=0))[:, :-1]
H = make_toeplitz(h, n_times_valid).T
# load data
sub1_img = 'data/6025086_20227_MNI_RS.nii.gz'
sub2_img = 'data/6025837_20227_MNI_RS.nii.gz'
masker = NiftiMasker(standardize=True, detrend=True, low_pass=0.1,
high_pass=0.01, t_r=t_r, memory='__cache_dir__') # noqa: E128
masker.fit([sub1_img, sub2_img])
y_train = masker.inverse_transform(masker.transform(sub1_img)).get_data()
y_test = masker.inverse_transform(masker.transform(sub2_img)).get_data()
# reduce dimensionality
start_ = 10
mask_roi = (slice(start_, start_ + nx),
slice(start_, start_ + ny),
slice(start_, start_ + nz),
slice(0, args.n_time_frames))
y_train = y_train[mask_roi]
y_test = y_test[mask_roi]
# lbda-max scale data
y_train /= compute_lbda_max(H, y_train, per_sample=False)
y_test /= compute_lbda_max(H, y_test, per_sample=False)
print(f"Shape of the train-set : {y_train.shape}")
print(f"Shape of the test-set : {y_test.shape}")
###########################################################################
# Main experimentation
all_layers = logspace_layers(n_layers=10, max_depth=args.max_iter_z)
params = dict(t_r=t_r, h=h, n_times_valid=n_times_valid,
name='Iterative-z',
max_iter_z=int(args.iter_mult * args.max_iter_z),
solver_type='fista-z-step', verbose=1)
ta_iter = TA(**params)
t0 = time.time()
_, _, _ = ta_iter.prox_t(y_test, args.temp_reg)
print(f"ta_iterative.prox_t finished : {time.time() - t0:.2f}s")
loss_ta_iter = ta_iter.l_loss_prox_t
n_samples = nx * ny * nz
y_test_ravel = y_test.reshape(n_samples, args.n_time_frames)
_, u0, _ = init_vuz(H, D, y_test_ravel, args.temp_reg)
loss_ta_learn = [_obj_t_analysis(u0, y_test_ravel, h, args.temp_reg)]
init_net_params = None
params = dict(t_r=t_r, h=h, n_times_valid=n_times_valid,
net_solver_training_type='recursive',
name='Learned-z', solver_type='learn-z-step', verbose=1,
max_iter_training_net=args.max_training_iter)
for i, n_layers in enumerate(all_layers):
params['max_iter_z'] = n_layers
if args.load_net is not None:
# load and re-used pre-fitted parameters case
filename = sorted(args.load_net)[i] # order is important
with open(filename, 'rb') as pfile:
init_net_params = pickle.load(pfile)
print(f"Loading parameters from '{filename}'")
params['init_net_parameters'] = init_net_params
ta_learn = TA(**params)
else:
# fit parameters and save parameters case
params['init_net_parameters'] = init_net_params
ta_learn = TA(**params)
ta_learn.fit(y_train, args.temp_reg)
init_net_params = ta_learn.net_solver.export_parameters()
filename = f'fitted_params_n_layers_{n_layers:02d}.pkl'
filename = os.path.join(args.plots_dir, filename)
with open(filename, 'wb') as pfile:
pickle.dump(init_net_params, pfile)
print(f"Saving fitted parameters under '{filename}'")
t0 = time.time()
_, u, _ = ta_learn.prox_t(y_test, args.temp_reg, reshape_4d=False)
print(f"ta_learn.prox_t finished : {time.time() - t0:.2f}s")
loss_ta_learn.append(_obj_t_analysis(u, y_test_ravel, h,
args.temp_reg))
loss_ta_learn = np.array(loss_ta_learn)
###########################################################################
# Plotting
params = dict(t_r=t_r, h=h, n_times_valid=n_times_valid, max_iter_z=10000,
name='Ref-z', solver_type='fista-z-step', verbose=0)
ta_ref = TA(**params)
t0 = time.time()
_, _, _ = ta_ref.prox_t(y_test, args.temp_reg)
print(f"ta_ref.prox_t finished : {time.time() - t0:.2f}s")
min_loss = ta_ref.l_loss_prox_t[-1]
all_layers = [0] + all_layers
eps = 1.0e-20
plt.figure(f"[{__file__}] Loss functions", figsize=(6, 3))
xx = np.arange(start=0, stop=int(args.iter_mult * args.max_iter_z + 1))
plt.semilogy(xx, loss_ta_iter - min_loss, lw=lw, color='C1',
label='Accelerated PGD - analysis')
plt.semilogy(all_layers, loss_ta_learn - min_loss, lw=lw, color='C3',
label='LPGD-Taut')
plt.legend(bbox_to_anchor=(0.0, 1.02, 1.0, 0.2), loc="lower left",
mode="expand", borderaxespad=0, ncol=1, fontsize=18)
plt.grid()
plt.xlabel("Layers $t$")
plt.ylabel(r'$\mathbb E \left[P_x(u^{(t)}) - P_x(u^{*}) \right]$')
plt.tight_layout()
filename = os.path.join(args.plots_dir, "loss_comparison.pdf")
plt.savefig(filename, dpi=300)
delta_t = time.time() - t0_global
delta_t = time.strftime("%H h %M min %S s", time.gmtime(delta_t))
print("Script runs in: {}".format(delta_t))
plt.show()
| [
"pyta.hrf_model.double_gamma_hrf",
"matplotlib.pyplot.grid",
"carpet.utils.init_vuz",
"matplotlib.pyplot.ylabel",
"pyta.TA",
"numpy.array",
"pyta.convolution.make_toeplitz",
"matplotlib.pyplot.semilogy",
"os.path.exists",
"argparse.ArgumentParser",
"pyta.utils.logspace_layers",
"matplotlib.pyp... | [((892, 936), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (915, 936), False, 'import argparse\n'), ((2467, 2478), 'time.time', 'time.time', ([], {}), '()\n', (2476, 2478), False, 'import time\n'), ((2575, 2624), 'os.path.join', 'os.path.join', (['args.plots_dir', '"""command_line.json"""'], {}), "(args.plots_dir, 'command_line.json')\n", (2587, 2624), False, 'import os\n'), ((3283, 3321), 'pyta.hrf_model.double_gamma_hrf', 'double_gamma_hrf', (['t_r', 'hrf_time_frames'], {}), '(t_r, hrf_time_frames)\n', (3299, 3321), False, 'from pyta.hrf_model import double_gamma_hrf\n'), ((3570, 3680), 'nilearn.input_data.NiftiMasker', 'NiftiMasker', ([], {'standardize': '(True)', 'detrend': '(True)', 'low_pass': '(0.1)', 'high_pass': '(0.01)', 't_r': 't_r', 'memory': '"""__cache_dir__"""'}), "(standardize=True, detrend=True, low_pass=0.1, high_pass=0.01,\n t_r=t_r, memory='__cache_dir__')\n", (3581, 3680), False, 'from nilearn.input_data import NiftiMasker\n'), ((4219, 4265), 'pyta.utils.compute_lbda_max', 'compute_lbda_max', (['H', 'y_train'], {'per_sample': '(False)'}), '(H, y_train, per_sample=False)\n', (4235, 4265), False, 'from pyta.utils import compute_lbda_max, logspace_layers\n'), ((4280, 4325), 'pyta.utils.compute_lbda_max', 'compute_lbda_max', (['H', 'y_test'], {'per_sample': '(False)'}), '(H, y_test, per_sample=False)\n', (4296, 4325), False, 'from pyta.utils import compute_lbda_max, logspace_layers\n'), ((4560, 4615), 'pyta.utils.logspace_layers', 'logspace_layers', ([], {'n_layers': '(10)', 'max_depth': 'args.max_iter_z'}), '(n_layers=10, max_depth=args.max_iter_z)\n', (4575, 4615), False, 'from pyta.utils import compute_lbda_max, logspace_layers\n'), ((4855, 4867), 'pyta.TA', 'TA', ([], {}), '(**params)\n', (4857, 4867), False, 'from pyta import TA\n'), ((4878, 4889), 'time.time', 'time.time', ([], {}), '()\n', (4887, 4889), False, 'import time\n'), ((5162, 5205), 'carpet.utils.init_vuz', 'init_vuz', (['H', 'D', 'y_test_ravel', 'args.temp_reg'], {}), '(H, D, y_test_ravel, args.temp_reg)\n', (5170, 5205), False, 'from carpet.utils import init_vuz\n'), ((6972, 6995), 'numpy.array', 'np.array', (['loss_ta_learn'], {}), '(loss_ta_learn)\n', (6980, 6995), True, 'import numpy as np\n'), ((7255, 7267), 'pyta.TA', 'TA', ([], {}), '(**params)\n', (7257, 7267), False, 'from pyta import TA\n'), ((7278, 7289), 'time.time', 'time.time', ([], {}), '()\n', (7287, 7289), False, 'import time\n'), ((7502, 7560), 'matplotlib.pyplot.figure', 'plt.figure', (['f"""[{__file__}] Loss functions"""'], {'figsize': '(6, 3)'}), "(f'[{__file__}] Loss functions', figsize=(6, 3))\n", (7512, 7560), True, 'import matplotlib.pyplot as plt\n'), ((7641, 7742), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['xx', '(loss_ta_iter - min_loss)'], {'lw': 'lw', 'color': '"""C1"""', 'label': '"""Accelerated PGD - analysis"""'}), "(xx, loss_ta_iter - min_loss, lw=lw, color='C1', label=\n 'Accelerated PGD - analysis')\n", (7653, 7742), True, 'import matplotlib.pyplot as plt\n'), ((7759, 7852), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['all_layers', '(loss_ta_learn - min_loss)'], {'lw': 'lw', 'color': '"""C3"""', 'label': '"""LPGD-Taut"""'}), "(all_layers, loss_ta_learn - min_loss, lw=lw, color='C3', label\n ='LPGD-Taut')\n", (7771, 7852), True, 'import matplotlib.pyplot as plt\n'), ((7869, 7993), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(0.0, 1.02, 1.0, 0.2)', 'loc': '"""lower left"""', 'mode': '"""expand"""', 'borderaxespad': '(0)', 'ncol': '(1)', 'fontsize': '(18)'}), "(bbox_to_anchor=(0.0, 1.02, 1.0, 0.2), loc='lower left', mode=\n 'expand', borderaxespad=0, ncol=1, fontsize=18)\n", (7879, 7993), True, 'import matplotlib.pyplot as plt\n'), ((8008, 8018), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (8016, 8018), True, 'import matplotlib.pyplot as plt\n'), ((8023, 8047), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Layers $t$"""'], {}), "('Layers $t$')\n", (8033, 8047), True, 'import matplotlib.pyplot as plt\n'), ((8052, 8120), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\mathbb E \\\\left[P_x(u^{(t)}) - P_x(u^{*}) \\\\right]$"""'], {}), "('$\\\\mathbb E \\\\left[P_x(u^{(t)}) - P_x(u^{*}) \\\\right]$')\n", (8062, 8120), True, 'import matplotlib.pyplot as plt\n'), ((8123, 8141), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8139, 8141), True, 'import matplotlib.pyplot as plt\n'), ((8158, 8209), 'os.path.join', 'os.path.join', (['args.plots_dir', '"""loss_comparison.pdf"""'], {}), "(args.plots_dir, 'loss_comparison.pdf')\n", (8170, 8209), False, 'import os\n'), ((8214, 8244), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {'dpi': '(300)'}), '(filename, dpi=300)\n', (8225, 8244), True, 'import matplotlib.pyplot as plt\n'), ((8407, 8417), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8415, 8417), True, 'import matplotlib.pyplot as plt\n'), ((2491, 2521), 'os.path.exists', 'os.path.exists', (['args.plots_dir'], {}), '(args.plots_dir)\n', (2505, 2521), False, 'import os\n'), ((2531, 2558), 'os.makedirs', 'os.makedirs', (['args.plots_dir'], {}), '(args.plots_dir)\n', (2542, 2558), False, 'import os\n'), ((2870, 2908), 'os.path.join', 'os.path.join', (['args.plots_dir', '__file__'], {}), '(args.plots_dir, __file__)\n', (2882, 2908), False, 'import os\n'), ((3405, 3436), 'pyta.convolution.make_toeplitz', 'make_toeplitz', (['h', 'n_times_valid'], {}), '(h, n_times_valid)\n', (3418, 3436), False, 'from pyta.convolution import make_toeplitz\n'), ((5227, 5278), 'pyta.loss_and_grad._obj_t_analysis', '_obj_t_analysis', (['u0', 'y_test_ravel', 'h', 'args.temp_reg'], {}), '(u0, y_test_ravel, h, args.temp_reg)\n', (5242, 5278), False, 'from pyta.loss_and_grad import _obj_t_analysis\n'), ((6669, 6680), 'time.time', 'time.time', ([], {}), '()\n', (6678, 6680), False, 'import time\n'), ((8260, 8271), 'time.time', 'time.time', ([], {}), '()\n', (8269, 8271), False, 'import time\n'), ((8332, 8352), 'time.gmtime', 'time.gmtime', (['delta_t'], {}), '(delta_t)\n', (8343, 8352), False, 'import time\n'), ((3331, 3358), 'numpy.eye', 'np.eye', (['n_times_valid'], {'k': '(-1)'}), '(n_times_valid, k=-1)\n', (3337, 3358), True, 'import numpy as np\n'), ((3361, 3387), 'numpy.eye', 'np.eye', (['n_times_valid'], {'k': '(0)'}), '(n_times_valid, k=0)\n', (3367, 3387), True, 'import numpy as np\n'), ((6062, 6074), 'pyta.TA', 'TA', ([], {}), '(**params)\n', (6064, 6074), False, 'from pyta import TA\n'), ((6227, 6239), 'pyta.TA', 'TA', ([], {}), '(**params)\n', (6229, 6239), False, 'from pyta import TA\n'), ((6450, 6488), 'os.path.join', 'os.path.join', (['args.plots_dir', 'filename'], {}), '(args.plots_dir, filename)\n', (6462, 6488), False, 'import os\n'), ((6854, 6904), 'pyta.loss_and_grad._obj_t_analysis', '_obj_t_analysis', (['u', 'y_test_ravel', 'h', 'args.temp_reg'], {}), '(u, y_test_ravel, h, args.temp_reg)\n', (6869, 6904), False, 'from pyta.loss_and_grad import _obj_t_analysis\n'), ((5901, 5919), 'pickle.load', 'pickle.load', (['pfile'], {}), '(pfile)\n', (5912, 5919), False, 'import pickle\n'), ((6553, 6588), 'pickle.dump', 'pickle.dump', (['init_net_params', 'pfile'], {}), '(init_net_params, pfile)\n', (6564, 6588), False, 'import pickle\n'), ((4986, 4997), 'time.time', 'time.time', ([], {}), '()\n', (4995, 4997), False, 'import time\n'), ((7379, 7390), 'time.time', 'time.time', ([], {}), '()\n', (7388, 7390), False, 'import time\n'), ((6800, 6811), 'time.time', 'time.time', ([], {}), '()\n', (6809, 6811), False, 'import time\n')] |
#!/usr/bin/env python
import ast
import os
import numpy as np
from scipy.stats import gamma
import mapel.elections.models.mallows as mallows
from mapel.main._glossary import *
from mapel.main._inner_distances import hamming
from mapel.elections.models_ import generate_approval_votes, store_votes_in_a_file
from mapel.elections.objects.Election import Election
# from mapel.elections.objects.OrdinalElection import update_params
from mapel.elections.other.winners import compute_sntv_winners, compute_borda_winners, \
compute_stv_winners
from mapel.elections.other.winners2 import generate_winners
from mapel.main._inner_distances import hamming
class ApprovalElection(Election):
def __init__(self, experiment_id, election_id, votes=None, alpha=1, model_id=None,
ballot='approval', num_voters=None, num_candidates=None, _import=False,
shift: bool = False, params=None, variable=None, label=None):
super().__init__(experiment_id, election_id, votes=votes, alpha=alpha,
model_id=model_id, ballot=ballot, num_voters=num_voters,
num_candidates=num_candidates, label=label)
self.params = params
self.variable = variable
self.approvalwise_vector = []
self.coapproval_frequency_vectors = []
self.voterlikeness_vectors = []
self.pairwise_matrix = []
self.candidatelikeness_original_vectors = []
self.candidatelikeness_sorted_vectors = []
self.hamming_candidates = []
self.reverse_approvals = []
if _import and experiment_id != 'virtual':
try:
fake = check_if_fake(experiment_id, election_id)
if fake:
self.model_id, self.params, self.num_voters, self.num_candidates = \
import_fake_app_election(experiment_id, election_id)
else:
self.votes, self.num_voters, self.num_candidates, self.params, \
self.model_id = import_real_app_election(experiment_id, election_id, shift)
try:
self.alpha = self.params['alpha']
except:
self.alpha = 1
pass
except:
pass
def votes_to_approvalwise_vector(self) -> None:
""" Convert votes to approvalwise vectors """
if self.model_id == 'approval_half_1':
self.approvalwise_vector = np.sort(np.array([0.75 for _ in
range(int(self.num_candidates / 2))] +
[0.25 for _ in
range(int(self.num_candidates / 2))]))
elif self.model_id == 'approval_half_2':
self.approvalwise_vector = np.sort(np.array([i / (self.num_candidates - 1) for i in
range(self.num_candidates)]))
elif self.model_id == 'approval_skeleton':
self.approvalwise_vector = np.sort(get_skeleton_approvalwise_vector(self))
else:
approvalwise_vector = np.zeros([self.num_candidates])
for vote in self.votes:
for c in vote:
approvalwise_vector[c] += 1
approvalwise_vector = approvalwise_vector / self.num_voters
self.approvalwise_vector = np.sort(approvalwise_vector)
def votes_to_coapproval_frequency_vectors(self, vector_type='A') -> None:
""" Convert votes to frequency vectors """
vectors = np.zeros([self.num_candidates, self.num_candidates * 2])
for vote in self.votes:
size = len(vote)
for c in range(self.num_candidates):
if c in vote:
if vector_type in ['A', 'B']:
vectors[c][size - 1] += 1
elif vector_type == 'C':
vectors[c][2 * size - 1] += 1
elif vector_type in ['D', 'E']:
vectors[c][self.num_candidates + size - 1] += 1
else:
if vector_type == 'A':
vectors[c][self.num_candidates + size] += 1
elif vector_type == 'B':
vectors[c][2 * self.num_candidates - size - 1] += 1
elif vector_type == 'C':
vectors[c][2 * size] += 1
elif vector_type == 'D':
vectors[c][size] += 1
elif vector_type == 'E':
vectors[c][self.num_candidates - size - 1] += 1
vectors = vectors / self.num_voters
# vectors = vectors / experiment.num_candidates
self.coapproval_frequency_vectors = vectors
def votes_to_pairwise_matrix(self) -> None:
""" Convert votes to pairwise matrix """
matrix = np.zeros([self.num_candidates, self.num_candidates])
for c_1 in range(self.num_candidates):
for c_2 in range(self.num_candidates):
for vote in self.votes:
if (c_1 in vote and c_2 in vote) or (c_1 not in vote and c_2 not in vote):
matrix[c_1][c_2] += 1
matrix = matrix / self.num_voters
self.pairwise_matrix = matrix
def votes_to_candidatelikeness_original_vectors(self) -> None:
""" Convert votes to ... """
matrix = np.zeros([self.num_candidates, self.num_candidates])
for c_1 in range(self.num_candidates):
for c_2 in range(self.num_candidates):
for vote in self.votes:
if (c_1 in vote and c_2 not in vote) or (c_1 not in vote and c_2 in vote):
matrix[c_1][c_2] += 1
matrix = matrix / self.num_voters
self.candidatelikeness_original_vectors = matrix
def votes_to_candidatelikeness_sorted_vectors(self) -> None:
""" Convert votes to ... """
self.votes_to_candidatelikeness_original_vectors()
self.candidatelikeness_sorted_vectors = np.sort(self.candidatelikeness_original_vectors)
def votes_to_voterlikeness_vectors(self, vector_type='hamming') -> None:
""" Convert votes to ... """
vectors = np.zeros([self.num_voters, self.num_voters])
for i in range(self.num_voters):
for j in range(self.num_voters):
set_a = self.votes[i]
set_b = self.votes[j]
if vector_type == 'hamming':
vectors[i][j] = hamming(set_a, set_b)
elif vector_type == 'martin':
vectors[i][j] = len(set_a.intersection(set_b)) - len(set_a)
vectors[i] = sorted(vectors[i])
self.voterlikeness_vectors = vectors
def compute_reverse_approvals(self):
reverse_approvals = [set() for _ in range(self.num_candidates)]
for i, vote in enumerate(self.votes):
for c in vote:
reverse_approvals[c].add({i})
self.reverse_approvals = reverse_approvals
# PREPARE INSTANCE
def prepare_instance(self, store=None, params: dict = None):
if params is None:
params = {}
self.params = params
if self.model_id == 'all_votes':
alpha = 1
else:
params, alpha = update_params(params, self.variable, self.model_id,
self.num_candidates)
self.params = params
self.votes = generate_approval_votes(model_id=self.model_id,
num_candidates=self.num_candidates,
num_voters=self.num_voters, params=params)
self.params = params
if store:
self.store_approval_election()
# STORE
def store_approval_election(self):
""" Store approval election in an .app file """
if self.model_id in APPROVAL_FAKE_MODELS:
path = os.path.join("experiments", str(self.experiment_id),
"elections", (str(self.election_id) + ".app"))
file_ = open(path, 'w')
file_.write(f'$ {self.model_id} {self.params} \n')
file_.write(str(self.num_candidates) + '\n')
file_.write(str(self.num_voters) + '\n')
file_.close()
else:
path = os.path.join("experiments", str(self.experiment_id), "elections",
(str(self.election_id) + ".app"))
store_votes_in_a_file(self, self.model_id, self.num_candidates, self.num_voters,
self.params, path, self.ballot, votes=self.votes)
def compute_distances_between_votes(self, distance_id='hamming'):
distances = np.zeros([self.num_voters, self.num_voters])
for v1 in range(self.num_voters):
for v2 in range(self.num_voters):
if distance_id == 'hamming':
distances[v1][v2] = hamming(self.votes[v1], self.votes[v2])
self.distances = distances
if self.store:
self._store_distances()
return distances
def import_real_app_election(experiment_id: str, election_id: str, shift=False):
""" Import real approval election from .app file """
# print("model", model_id)
print("model")
file_name = f'{election_id}.app'
path = os.path.join(os.getcwd(), "experiments", experiment_id, "elections", file_name)
my_file = open(path, 'r')
params = 0
first_line = my_file.readline()
if first_line[0] != '#':
model_id = 'empty'
num_candidates = int(first_line)
else:
first_line = first_line.strip().split()
model_id = first_line[1]
if len(first_line) <= 2:
params = {}
else:
params = ast.literal_eval(" ".join(first_line[2:]))
num_candidates = int(my_file.readline())
for _ in range(num_candidates):
my_file.readline()
line = my_file.readline().rstrip("\n").split(',')
num_voters = int(line[0])
num_options = int(line[2])
votes = [set() for _ in range(num_voters)]
it = 0
for j in range(num_options):
line = my_file.readline().rstrip("\n").replace("{", ''). \
replace("}", '').replace(' ', '').split(',')
if line[1] != '':
quantity = int(line[0])
for k in range(quantity):
for el in range(len(line) - 1):
votes[it].add(int(line[el + 1]))
it += 1
if model_id in NICE_NAME.values():
rev_dict = dict(zip(NICE_NAME.values(), NICE_NAME.keys()))
model_id = rev_dict[model_id]
if shift:
for i, vote in enumerate(votes):
new_vote = set()
for c in vote:
new_vote.add(c - 1)
votes[i] = new_vote
my_file.close()
return votes, num_voters, num_candidates, params, model_id
def import_fake_app_election(experiment_id: str, name: str):
""" Import fake approval election from .app file """
file_name = f'{name}.app'
path = os.path.join(os.getcwd(), "experiments", experiment_id, "elections", file_name)
my_file = open(path, 'r')
first_line = my_file.readline()
first_line = first_line.strip().split()
fake_model_name = first_line[1]
if len(first_line) <= 2:
params = {}
else:
params = ast.literal_eval(" ".join(first_line[2:]))
num_candidates = int(my_file.readline().strip())
num_voters = int(my_file.readline().strip())
return fake_model_name, params, num_voters, num_candidates
def check_if_fake(experiment_id: str, election_id: str) -> bool:
file_name = f'{election_id}.app'
path = os.path.join(os.getcwd(), "experiments", experiment_id, "elections", file_name)
my_file = open(path, 'r')
line = my_file.readline().strip()
my_file.close()
return line[0] == '$'
def get_skeleton_approvalwise_vector(election):
phi = election.params['phi']
p = election.params['p']
k = int(p * election.num_candidates)
vector = [phi * p for _ in range(election.num_candidates)]
for i in range(k):
vector[i] += 1 - phi
return np.array(vector)
def update_params(params, variable, model_id, num_candidates):
if variable is not None:
if model_id in APPROVAL_MODELS:
if 'p' not in params:
params['p'] = np.random.rand()
elif type(params['p']) is list:
params['p'] = np.random.uniform(low=params['p'][0], high=params['p'][1])
params['alpha'] = params[variable]
params['variable'] = variable
else:
if model_id in ['approval_partylist']:
return params, 1
if model_id in APPROVAL_MODELS:
if 'p' not in params:
params['p'] = np.random.rand()
elif type(params['p']) is list:
params['p'] = np.random.uniform(low=params['p'][0], high=params['p'][1])
if 'phi' in params and type(params['phi']) is list:
params['phi'] = np.random.uniform(low=params['phi'][0], high=params['phi'][1])
if model_id == 'mallows' and params['phi'] is None:
params['phi'] = np.random.random()
elif model_id == 'norm-mallows' and 'norm-phi' not in params:
params['norm-phi'] = np.random.random()
elif model_id in ['urn_model', 'approval_urn'] and 'alpha' not in params:
params['alpha'] = gamma.rvs(0.8)
if model_id == 'norm-mallows':
params['phi'] = mallows.phi_from_relphi(num_candidates, relphi=params['norm-phi'])
if 'weight' not in params:
params['weight'] = 0.
if model_id == 'mallows_matrix_path':
params['norm-phi'] = params['alpha']
params['phi'] = mallows.phi_from_relphi(num_candidates, relphi=params['norm-phi'])
if model_id == 'erdos_renyi_graph' and params['p'] is None:
params['p'] = np.random.random()
if 'alpha' not in params:
if 'norm-phi' in params:
params['alpha'] = params['norm-phi']
elif 'phi' in params:
params['alpha'] = params['phi']
else:
params['alpha'] = np.random.rand()
elif type(params['alpha']) is list:
params['alpha'] = np.random.uniform(low=params['alpha'][0], high=params['alpha'][1])
return params, params['alpha']
| [
"scipy.stats.gamma.rvs",
"numpy.random.rand",
"numpy.random.random",
"numpy.sort",
"mapel.elections.models_.store_votes_in_a_file",
"mapel.elections.models.mallows.phi_from_relphi",
"mapel.main._inner_distances.hamming",
"os.getcwd",
"numpy.array",
"numpy.zeros",
"mapel.elections.models_.generat... | [((12366, 12382), 'numpy.array', 'np.array', (['vector'], {}), '(vector)\n', (12374, 12382), True, 'import numpy as np\n'), ((3670, 3726), 'numpy.zeros', 'np.zeros', (['[self.num_candidates, self.num_candidates * 2]'], {}), '([self.num_candidates, self.num_candidates * 2])\n', (3678, 3726), True, 'import numpy as np\n'), ((5014, 5066), 'numpy.zeros', 'np.zeros', (['[self.num_candidates, self.num_candidates]'], {}), '([self.num_candidates, self.num_candidates])\n', (5022, 5066), True, 'import numpy as np\n'), ((5549, 5601), 'numpy.zeros', 'np.zeros', (['[self.num_candidates, self.num_candidates]'], {}), '([self.num_candidates, self.num_candidates])\n', (5557, 5601), True, 'import numpy as np\n'), ((6191, 6239), 'numpy.sort', 'np.sort', (['self.candidatelikeness_original_vectors'], {}), '(self.candidatelikeness_original_vectors)\n', (6198, 6239), True, 'import numpy as np\n'), ((6374, 6418), 'numpy.zeros', 'np.zeros', (['[self.num_voters, self.num_voters]'], {}), '([self.num_voters, self.num_voters])\n', (6382, 6418), True, 'import numpy as np\n'), ((7629, 7760), 'mapel.elections.models_.generate_approval_votes', 'generate_approval_votes', ([], {'model_id': 'self.model_id', 'num_candidates': 'self.num_candidates', 'num_voters': 'self.num_voters', 'params': 'params'}), '(model_id=self.model_id, num_candidates=self.\n num_candidates, num_voters=self.num_voters, params=params)\n', (7652, 7760), False, 'from mapel.elections.models_ import generate_approval_votes, store_votes_in_a_file\n'), ((8917, 8961), 'numpy.zeros', 'np.zeros', (['[self.num_voters, self.num_voters]'], {}), '([self.num_voters, self.num_voters])\n', (8925, 8961), True, 'import numpy as np\n'), ((9549, 9560), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (9558, 9560), False, 'import os\n'), ((11277, 11288), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (11286, 11288), False, 'import os\n'), ((11904, 11915), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (11913, 11915), False, 'import os\n'), ((8660, 8795), 'mapel.elections.models_.store_votes_in_a_file', 'store_votes_in_a_file', (['self', 'self.model_id', 'self.num_candidates', 'self.num_voters', 'self.params', 'path', 'self.ballot'], {'votes': 'self.votes'}), '(self, self.model_id, self.num_candidates, self.\n num_voters, self.params, path, self.ballot, votes=self.votes)\n', (8681, 8795), False, 'from mapel.elections.models_ import generate_approval_votes, store_votes_in_a_file\n'), ((13246, 13308), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': "params['phi'][0]", 'high': "params['phi'][1]"}), "(low=params['phi'][0], high=params['phi'][1])\n", (13263, 13308), True, 'import numpy as np\n'), ((13398, 13416), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (13414, 13416), True, 'import numpy as np\n'), ((13734, 13800), 'mapel.elections.models.mallows.phi_from_relphi', 'mallows.phi_from_relphi', (['num_candidates'], {'relphi': "params['norm-phi']"}), "(num_candidates, relphi=params['norm-phi'])\n", (13757, 13800), True, 'import mapel.elections.models.mallows as mallows\n'), ((14002, 14068), 'mapel.elections.models.mallows.phi_from_relphi', 'mallows.phi_from_relphi', (['num_candidates'], {'relphi': "params['norm-phi']"}), "(num_candidates, relphi=params['norm-phi'])\n", (14025, 14068), True, 'import mapel.elections.models.mallows as mallows\n'), ((14164, 14182), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (14180, 14182), True, 'import numpy as np\n'), ((12583, 12599), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (12597, 12599), True, 'import numpy as np\n'), ((13007, 13023), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (13021, 13023), True, 'import numpy as np\n'), ((13520, 13538), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (13536, 13538), True, 'import numpy as np\n'), ((14533, 14599), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': "params['alpha'][0]", 'high': "params['alpha'][1]"}), "(low=params['alpha'][0], high=params['alpha'][1])\n", (14550, 14599), True, 'import numpy as np\n'), ((3235, 3266), 'numpy.zeros', 'np.zeros', (['[self.num_candidates]'], {}), '([self.num_candidates])\n', (3243, 3266), True, 'import numpy as np\n'), ((3493, 3521), 'numpy.sort', 'np.sort', (['approvalwise_vector'], {}), '(approvalwise_vector)\n', (3500, 3521), True, 'import numpy as np\n'), ((6663, 6684), 'mapel.main._inner_distances.hamming', 'hamming', (['set_a', 'set_b'], {}), '(set_a, set_b)\n', (6670, 6684), False, 'from mapel.main._inner_distances import hamming\n'), ((9135, 9174), 'mapel.main._inner_distances.hamming', 'hamming', (['self.votes[v1]', 'self.votes[v2]'], {}), '(self.votes[v1], self.votes[v2])\n', (9142, 9174), False, 'from mapel.main._inner_distances import hamming\n'), ((12674, 12732), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': "params['p'][0]", 'high': "params['p'][1]"}), "(low=params['p'][0], high=params['p'][1])\n", (12691, 12732), True, 'import numpy as np\n'), ((13098, 13156), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': "params['p'][0]", 'high': "params['p'][1]"}), "(low=params['p'][0], high=params['p'][1])\n", (13115, 13156), True, 'import numpy as np\n'), ((13651, 13665), 'scipy.stats.gamma.rvs', 'gamma.rvs', (['(0.8)'], {}), '(0.8)\n', (13660, 13665), False, 'from scipy.stats import gamma\n'), ((14442, 14458), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (14456, 14458), True, 'import numpy as np\n')] |
"""
panoptes executable
Created on 02/05/2020
@author: RH
"""
import os
import sys
import tensorflow as tf
import pandas as pd
from openslide import OpenSlide
import numpy as np
import cv2
import time
import matplotlib
import panoptes.prep as prep
import panoptes.cnn as cnn
import panoptes.sample_prep as sample_prep
matplotlib.use('Agg')
def get_default_data():
import panoptes
label = pd.read_csv("{}/sample_label.csv".format(panoptes.__path__[0]), header=0)
split = pd.read_csv("{}/sample_sep_file.csv".format(panoptes.__path__[0]), header=0)
return label, split
def panoptes(mode, outdir, feature, architecture, log_dir, image_dir, tile_dir=None, modeltoload=None,
imagefile=None, batchsize=24, epoch=100, resolution=None, BMI=np.nan, age=np.nan, label_file=None,
split_file=None, cancer='UCEC'):
prep.valid_input(mode, cancer, outdir, feature, architecture, log_dir, tile_dir, image_dir, modeltoload, imagefile,
batchsize, epoch, resolution, BMI, age, label_file, split_file)
tf.reset_default_graph()
if architecture in ["PC1", "PC2", "PC3", "PC4"]:
sup = True
else:
sup = False
if feature == 'subtype':
classes = 4
else:
classes = 2
# input image dimension
INPUT_DIM = [batchsize, 299, 299, 3]
# hyper parameters
HYPERPARAMS = {
"batch_size": batchsize,
"dropout": 0.3,
"learning_rate": 1E-4,
"classes": classes,
"sup": sup
}
LOG_DIR = "{}/{}".format(outdir, log_dir)
out_dir = "{}/out".format(LOG_DIR)
if mode == "test":
start_time = time.time()
modelname = modeltoload.split(sep='/')[-1]
modelpath = '/'.join(modeltoload.split(sep='/')[:-1])
data_dir = LOG_DIR
METAGRAPH_DIR = modelpath
# make directories if not exist
for DIR in (log_dir, image_dir, LOG_DIR, METAGRAPH_DIR, data_dir, out_dir):
try:
os.mkdir(DIR)
except FileExistsError:
pass
if feature == 'histology':
pos_score = "Serous_score"
neg_score = "Endometrioid_score"
else:
pos_score = "POS_score"
neg_score = "NEG_score"
if resolution == 40:
ft = 1
level = 1
elif resolution == 20:
level = 0
ft = 2
else:
if "TCGA" in imagefile:
ft = 1
level = 1
else:
level = 0
ft = 2
slide = OpenSlide(image_dir + '/' + imagefile)
# Get dimension of slide
bounds_width = slide.level_dimensions[level][0]
bounds_height = slide.level_dimensions[level][1]
x = 0
y = 0
half_width_region = 49 * ft
full_width_region = 299 * ft
stepsize = (full_width_region - half_width_region)
# number of tiles can be cut
n_x = int((bounds_width - 1) / stepsize)
n_y = int((bounds_height - 1) / stepsize)
lowres = slide.read_region((x, y), level + 1, (int(n_x * stepsize / 4), int(n_y * stepsize / 4)))
raw_img = np.array(lowres)[:, :, :3]
fct = ft
# cut tiles
if not os.path.isfile(data_dir + '/level1/dict.csv'):
prep.cutter(imagefile, LOG_DIR, imgdir=image_dir, resolution=resolution)
# make tfrecords
if not os.path.isfile(data_dir + '/test.tfrecords'):
prep.testloader(data_dir, imagefile, resolution, BMI, age)
# reload pretrained model
m = cnn.INCEPTION(INPUT_DIM, HYPERPARAMS, meta_graph=modelname, log_dir=LOG_DIR, meta_dir=METAGRAPH_DIR,
model=architecture)
print("Loaded! Ready for test!")
# decode tfrecords
HE = prep.tfreloader(mode, 1, batchsize, classes, None, None, None, data_dir)
# prediction
m.inference(HE, out_dir, realtest=True, bs=batchsize, pmd=feature)
# load tiles dictionary
slist = pd.read_csv(data_dir + '/te_sample.csv', header=0)
# load dictionary of predictions on tiles
teresult = pd.read_csv(out_dir + '/Test.csv', header=0)
# join 2 dictionaries
joined = pd.merge(slist, teresult, how='inner', on=['Num'])
joined = joined.drop(columns=['Num'])
tile_dict = pd.read_csv(data_dir + '/level1/dict.csv', header=0)
tile_dict = tile_dict.rename(index=str, columns={"Loc": "L0path"})
joined_dict = pd.merge(joined, tile_dict, how='inner', on=['L0path'])
# slide level prediction
if joined_dict[pos_score].mean() > 0.5:
print("Positive! Prediction score = " + str(joined_dict[pos_score].mean().round(5)))
else:
print("Negative! Prediction score = " + str(joined_dict[pos_score].mean().round(5)))
# save joined dictionary
joined_dict.to_csv(out_dir + '/finaldict.csv', index=False)
# output heat map of pos and neg.
# initialize a graph and for each RGB channel
opt = np.full((n_x, n_y), 0)
hm_R = np.full((n_x, n_y), 0)
hm_G = np.full((n_x, n_y), 0)
hm_B = np.full((n_x, n_y), 0)
# Positive is labeled red in output heat map
for index, row in joined_dict.iterrows():
opt[int(row["X_pos"]), int(row["Y_pos"])] = 255
if row[pos_score] >= 0.5:
hm_R[int(row["X_pos"]), int(row["Y_pos"])] = 255
hm_G[int(row["X_pos"]), int(row["Y_pos"])] = int((1 - (row[pos_score] - 0.5) * 2) * 255)
hm_B[int(row["X_pos"]), int(row["Y_pos"])] = int((1 - (row[pos_score] - 0.5) * 2) * 255)
else:
hm_B[int(row["X_pos"]), int(row["Y_pos"])] = 255
hm_G[int(row["X_pos"]), int(row["Y_pos"])] = int((1 - (row[neg_score] - 0.5) * 2) * 255)
hm_R[int(row["X_pos"]), int(row["Y_pos"])] = int((1 - (row[neg_score] - 0.5) * 2) * 255)
# expand 5 times
opt = opt.repeat(50, axis=0).repeat(50, axis=1)
# small-scaled original image
ori_img = cv2.resize(raw_img, (np.shape(opt)[0], np.shape(opt)[1]))
ori_img = ori_img[:np.shape(opt)[1], :np.shape(opt)[0], :3]
tq = ori_img[:, :, 0]
ori_img[:, :, 0] = ori_img[:, :, 2]
ori_img[:, :, 2] = tq
cv2.imwrite(out_dir + '/Original_scaled.png', ori_img)
# binary output image
topt = np.transpose(opt)
opt = np.full((np.shape(topt)[0], np.shape(topt)[1], 3), 0)
opt[:, :, 0] = topt
opt[:, :, 1] = topt
opt[:, :, 2] = topt
cv2.imwrite(out_dir + '/Mask.png', opt * 255)
# output heatmap
hm_R = np.transpose(hm_R)
hm_G = np.transpose(hm_G)
hm_B = np.transpose(hm_B)
hm_R = hm_R.repeat(50, axis=0).repeat(50, axis=1)
hm_G = hm_G.repeat(50, axis=0).repeat(50, axis=1)
hm_B = hm_B.repeat(50, axis=0).repeat(50, axis=1)
hm = np.dstack([hm_B, hm_G, hm_R])
cv2.imwrite(out_dir + '/HM.png', hm)
# superimpose heatmap on scaled original image
overlay = ori_img * 0.5 + hm * 0.5
cv2.imwrite(out_dir + '/Overlay.png', overlay)
# # Time measure tool
print("--- %s seconds ---" % (time.time() - start_time))
elif mode == "validate":
modelname = modeltoload.split(sep='/')[-1]
data_dir = "{}/data".format(LOG_DIR)
METAGRAPH_DIR = LOG_DIR
# make directories if not exist
for DIR in (log_dir, image_dir, tile_dir, LOG_DIR, METAGRAPH_DIR, data_dir, out_dir):
try:
os.mkdir(DIR)
except FileExistsError:
pass
# check images to be cut
reff = pd.read_csv(label_file, header=0)
tocut = prep.check_new_image(reff, tile_dir)
# cut into tiles
for im in tocut:
prep.cutter(im[1], tile_dir + '/' + im[0], image_dir, dp=im[2], resolution=resolution)
# get counts of testing, validation, and training datasets;
# if not exist, prepare testing and training datasets from sampling; package into tfrecords
if os.path.isfile(data_dir + '/tr_sample.csv') and os.path.isfile(data_dir + '/te_sample.csv') \
and os.path.isfile(data_dir + '/va_sample.csv'):
trc, tec, vac, weights = prep.counters(data_dir, classes)
trs = pd.read_csv(data_dir + '/tr_sample.csv', header=0)
tes = pd.read_csv(data_dir + '/te_sample.csv', header=0)
vas = pd.read_csv(data_dir + '/va_sample.csv', header=0)
else:
alll = sample_prep.big_image_sum(pmd=feature, path=tile_dir, ref_file=label_file)
trs, tes, vas = sample_prep.set_sep(alll, path=data_dir, cls=classes, cut=0.2,
resolution=resolution, sep_file=split_file, batchsize=batchsize)
trc, tec, vac, weights = prep.counters(data_dir, classes)
if not os.path.isfile(data_dir + '/test.tfrecords'):
prep.loader(data_dir, 'test')
if not os.path.isfile(data_dir + '/train.tfrecords'):
prep.loader(data_dir, 'train')
if not os.path.isfile(data_dir + '/validation.tfrecords'):
prep.loader(data_dir, 'validation')
# reload pretrained model
m = cnn.INCEPTION(INPUT_DIM, HYPERPARAMS, meta_graph=modelname, log_dir=LOG_DIR,
meta_dir=METAGRAPH_DIR, model=architecture, weights=weights)
print("Loaded! Ready for test!")
# validating
if tec >= batchsize:
THE = prep.tfreloader('test', 1, batchsize, classes, trc, tec, vac, data_dir)
m.inference(THE, out_dir, testset=tes, pmd=feature)
else:
print("Not enough testing images!")
else:
data_dir = "{}/data".format(LOG_DIR)
METAGRAPH_DIR = LOG_DIR
# make directories if not exist
for DIR in (log_dir, image_dir, tile_dir, LOG_DIR, METAGRAPH_DIR, data_dir, out_dir):
try:
os.mkdir(DIR)
except FileExistsError:
pass
# determine images to be cut
reff = pd.read_csv(label_file, header=0)
tocut = prep.check_new_image(reff, tile_dir)
# cut images into tiles
for im in tocut:
prep.cutter(im[1], tile_dir + '/' + im[0], image_dir, dp=im[2], resolution=resolution)
# get counts of testing, validation, and training datasets;
# if not exist, prepare testing and training datasets from sampling; package into tfrecords
if os.path.isfile(data_dir + '/tr_sample.csv') and os.path.isfile(data_dir + '/te_sample.csv') \
and os.path.isfile(data_dir + '/va_sample.csv'):
trc, tec, vac, weights = prep.counters(data_dir, classes)
trs = pd.read_csv(data_dir + '/tr_sample.csv', header=0)
tes = pd.read_csv(data_dir + '/te_sample.csv', header=0)
vas = pd.read_csv(data_dir + '/va_sample.csv', header=0)
else:
alll = sample_prep.big_image_sum(pmd=feature, path=tile_dir, ref_file=label_file)
trs, tes, vas = sample_prep.set_sep(alll, path=data_dir, cls=classes, cut=0.2,
resolution=resolution, sep_file=split_file, batchsize=batchsize)
trc, tec, vac, weights = prep.counters(data_dir, classes)
if not os.path.isfile(data_dir + '/test.tfrecords'):
prep.loader(data_dir, 'test')
if not os.path.isfile(data_dir + '/train.tfrecords'):
prep.loader(data_dir, 'train')
if not os.path.isfile(data_dir + '/validation.tfrecords'):
prep.loader(data_dir, 'validation')
if sup:
print("Integrating clinical variables!")
# prepare to train from scratch
m = cnn.INCEPTION(INPUT_DIM, HYPERPARAMS, log_dir=LOG_DIR, model=architecture, weights=weights)
print("Start a new training!")
# decode training and validation sets
HE = prep.tfreloader('train', epoch, batchsize, classes, trc, tec, vac, data_dir)
VHE = prep.tfreloader('validation', epoch*100, batchsize, classes, trc, tec, vac, data_dir)
itt = int(trc * epoch / batchsize) + 1
if trc <= 2 * batchsize or vac <= batchsize:
print("Not enough training/validation images!")
else:
# training
m.train(HE, VHE, trc, batchsize, pmd=feature, dirr=out_dir, max_iter=itt, save=True, outdir=METAGRAPH_DIR)
if tec >= batchsize:
# internal testing
THE = prep.tfreloader('test', 1, batchsize, classes, trc, tec, vac, data_dir)
m.inference(THE, out_dir, testset=tes, pmd=feature)
else:
print("Not enough testing images!")
sys.exit(0)
| [
"pandas.read_csv",
"panoptes.prep.testloader",
"numpy.array",
"panoptes.prep.tfreloader",
"openslide.OpenSlide",
"sys.exit",
"os.mkdir",
"panoptes.sample_prep.big_image_sum",
"panoptes.sample_prep.set_sep",
"numpy.full",
"panoptes.prep.loader",
"matplotlib.use",
"pandas.merge",
"os.path.is... | [((320, 341), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (334, 341), False, 'import matplotlib\n'), ((854, 1041), 'panoptes.prep.valid_input', 'prep.valid_input', (['mode', 'cancer', 'outdir', 'feature', 'architecture', 'log_dir', 'tile_dir', 'image_dir', 'modeltoload', 'imagefile', 'batchsize', 'epoch', 'resolution', 'BMI', 'age', 'label_file', 'split_file'], {}), '(mode, cancer, outdir, feature, architecture, log_dir,\n tile_dir, image_dir, modeltoload, imagefile, batchsize, epoch,\n resolution, BMI, age, label_file, split_file)\n', (870, 1041), True, 'import panoptes.prep as prep\n'), ((1060, 1084), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (1082, 1084), True, 'import tensorflow as tf\n'), ((12896, 12907), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (12904, 12907), False, 'import sys\n'), ((1653, 1664), 'time.time', 'time.time', ([], {}), '()\n', (1662, 1664), False, 'import time\n'), ((2598, 2636), 'openslide.OpenSlide', 'OpenSlide', (["(image_dir + '/' + imagefile)"], {}), "(image_dir + '/' + imagefile)\n", (2607, 2636), False, 'from openslide import OpenSlide\n'), ((3621, 3745), 'panoptes.cnn.INCEPTION', 'cnn.INCEPTION', (['INPUT_DIM', 'HYPERPARAMS'], {'meta_graph': 'modelname', 'log_dir': 'LOG_DIR', 'meta_dir': 'METAGRAPH_DIR', 'model': 'architecture'}), '(INPUT_DIM, HYPERPARAMS, meta_graph=modelname, log_dir=LOG_DIR,\n meta_dir=METAGRAPH_DIR, model=architecture)\n', (3634, 3745), True, 'import panoptes.cnn as cnn\n'), ((3849, 3921), 'panoptes.prep.tfreloader', 'prep.tfreloader', (['mode', '(1)', 'batchsize', 'classes', 'None', 'None', 'None', 'data_dir'], {}), '(mode, 1, batchsize, classes, None, None, None, data_dir)\n', (3864, 3921), True, 'import panoptes.prep as prep\n'), ((4066, 4116), 'pandas.read_csv', 'pd.read_csv', (["(data_dir + '/te_sample.csv')"], {'header': '(0)'}), "(data_dir + '/te_sample.csv', header=0)\n", (4077, 4116), True, 'import pandas as pd\n'), ((4186, 4230), 'pandas.read_csv', 'pd.read_csv', (["(out_dir + '/Test.csv')"], {'header': '(0)'}), "(out_dir + '/Test.csv', header=0)\n", (4197, 4230), True, 'import pandas as pd\n'), ((4278, 4328), 'pandas.merge', 'pd.merge', (['slist', 'teresult'], {'how': '"""inner"""', 'on': "['Num']"}), "(slist, teresult, how='inner', on=['Num'])\n", (4286, 4328), True, 'import pandas as pd\n'), ((4395, 4447), 'pandas.read_csv', 'pd.read_csv', (["(data_dir + '/level1/dict.csv')"], {'header': '(0)'}), "(data_dir + '/level1/dict.csv', header=0)\n", (4406, 4447), True, 'import pandas as pd\n'), ((4545, 4600), 'pandas.merge', 'pd.merge', (['joined', 'tile_dict'], {'how': '"""inner"""', 'on': "['L0path']"}), "(joined, tile_dict, how='inner', on=['L0path'])\n", (4553, 4600), True, 'import pandas as pd\n'), ((5102, 5124), 'numpy.full', 'np.full', (['(n_x, n_y)', '(0)'], {}), '((n_x, n_y), 0)\n', (5109, 5124), True, 'import numpy as np\n'), ((5140, 5162), 'numpy.full', 'np.full', (['(n_x, n_y)', '(0)'], {}), '((n_x, n_y), 0)\n', (5147, 5162), True, 'import numpy as np\n'), ((5178, 5200), 'numpy.full', 'np.full', (['(n_x, n_y)', '(0)'], {}), '((n_x, n_y), 0)\n', (5185, 5200), True, 'import numpy as np\n'), ((5216, 5238), 'numpy.full', 'np.full', (['(n_x, n_y)', '(0)'], {}), '((n_x, n_y), 0)\n', (5223, 5238), True, 'import numpy as np\n'), ((6386, 6440), 'cv2.imwrite', 'cv2.imwrite', (["(out_dir + '/Original_scaled.png')", 'ori_img'], {}), "(out_dir + '/Original_scaled.png', ori_img)\n", (6397, 6440), False, 'import cv2\n'), ((6487, 6504), 'numpy.transpose', 'np.transpose', (['opt'], {}), '(opt)\n', (6499, 6504), True, 'import numpy as np\n'), ((6665, 6710), 'cv2.imwrite', 'cv2.imwrite', (["(out_dir + '/Mask.png')", '(opt * 255)'], {}), "(out_dir + '/Mask.png', opt * 255)\n", (6676, 6710), False, 'import cv2\n'), ((6752, 6770), 'numpy.transpose', 'np.transpose', (['hm_R'], {}), '(hm_R)\n', (6764, 6770), True, 'import numpy as np\n'), ((6786, 6804), 'numpy.transpose', 'np.transpose', (['hm_G'], {}), '(hm_G)\n', (6798, 6804), True, 'import numpy as np\n'), ((6820, 6838), 'numpy.transpose', 'np.transpose', (['hm_B'], {}), '(hm_B)\n', (6832, 6838), True, 'import numpy as np\n'), ((7026, 7055), 'numpy.dstack', 'np.dstack', (['[hm_B, hm_G, hm_R]'], {}), '([hm_B, hm_G, hm_R])\n', (7035, 7055), True, 'import numpy as np\n'), ((7064, 7100), 'cv2.imwrite', 'cv2.imwrite', (["(out_dir + '/HM.png')", 'hm'], {}), "(out_dir + '/HM.png', hm)\n", (7075, 7100), False, 'import cv2\n'), ((7208, 7254), 'cv2.imwrite', 'cv2.imwrite', (["(out_dir + '/Overlay.png')", 'overlay'], {}), "(out_dir + '/Overlay.png', overlay)\n", (7219, 7254), False, 'import cv2\n'), ((3206, 3222), 'numpy.array', 'np.array', (['lowres'], {}), '(lowres)\n', (3214, 3222), True, 'import numpy as np\n'), ((3286, 3331), 'os.path.isfile', 'os.path.isfile', (["(data_dir + '/level1/dict.csv')"], {}), "(data_dir + '/level1/dict.csv')\n", (3300, 3331), False, 'import os\n'), ((3345, 3417), 'panoptes.prep.cutter', 'prep.cutter', (['imagefile', 'LOG_DIR'], {'imgdir': 'image_dir', 'resolution': 'resolution'}), '(imagefile, LOG_DIR, imgdir=image_dir, resolution=resolution)\n', (3356, 3417), True, 'import panoptes.prep as prep\n'), ((3458, 3502), 'os.path.isfile', 'os.path.isfile', (["(data_dir + '/test.tfrecords')"], {}), "(data_dir + '/test.tfrecords')\n", (3472, 3502), False, 'import os\n'), ((3516, 3574), 'panoptes.prep.testloader', 'prep.testloader', (['data_dir', 'imagefile', 'resolution', 'BMI', 'age'], {}), '(data_dir, imagefile, resolution, BMI, age)\n', (3531, 3574), True, 'import panoptes.prep as prep\n'), ((7795, 7828), 'pandas.read_csv', 'pd.read_csv', (['label_file'], {'header': '(0)'}), '(label_file, header=0)\n', (7806, 7828), True, 'import pandas as pd\n'), ((7845, 7881), 'panoptes.prep.check_new_image', 'prep.check_new_image', (['reff', 'tile_dir'], {}), '(reff, tile_dir)\n', (7865, 7881), True, 'import panoptes.prep as prep\n'), ((9398, 9539), 'panoptes.cnn.INCEPTION', 'cnn.INCEPTION', (['INPUT_DIM', 'HYPERPARAMS'], {'meta_graph': 'modelname', 'log_dir': 'LOG_DIR', 'meta_dir': 'METAGRAPH_DIR', 'model': 'architecture', 'weights': 'weights'}), '(INPUT_DIM, HYPERPARAMS, meta_graph=modelname, log_dir=LOG_DIR,\n meta_dir=METAGRAPH_DIR, model=architecture, weights=weights)\n', (9411, 9539), True, 'import panoptes.cnn as cnn\n'), ((10247, 10280), 'pandas.read_csv', 'pd.read_csv', (['label_file'], {'header': '(0)'}), '(label_file, header=0)\n', (10258, 10280), True, 'import pandas as pd\n'), ((10297, 10333), 'panoptes.prep.check_new_image', 'prep.check_new_image', (['reff', 'tile_dir'], {}), '(reff, tile_dir)\n', (10317, 10333), True, 'import panoptes.prep as prep\n'), ((11932, 12027), 'panoptes.cnn.INCEPTION', 'cnn.INCEPTION', (['INPUT_DIM', 'HYPERPARAMS'], {'log_dir': 'LOG_DIR', 'model': 'architecture', 'weights': 'weights'}), '(INPUT_DIM, HYPERPARAMS, log_dir=LOG_DIR, model=architecture,\n weights=weights)\n', (11945, 12027), True, 'import panoptes.cnn as cnn\n'), ((12122, 12198), 'panoptes.prep.tfreloader', 'prep.tfreloader', (['"""train"""', 'epoch', 'batchsize', 'classes', 'trc', 'tec', 'vac', 'data_dir'], {}), "('train', epoch, batchsize, classes, trc, tec, vac, data_dir)\n", (12137, 12198), True, 'import panoptes.prep as prep\n'), ((12213, 12304), 'panoptes.prep.tfreloader', 'prep.tfreloader', (['"""validation"""', '(epoch * 100)', 'batchsize', 'classes', 'trc', 'tec', 'vac', 'data_dir'], {}), "('validation', epoch * 100, batchsize, classes, trc, tec,\n vac, data_dir)\n", (12228, 12304), True, 'import panoptes.prep as prep\n'), ((1996, 2009), 'os.mkdir', 'os.mkdir', (['DIR'], {}), '(DIR)\n', (2004, 2009), False, 'import os\n'), ((7944, 8035), 'panoptes.prep.cutter', 'prep.cutter', (['im[1]', "(tile_dir + '/' + im[0])", 'image_dir'], {'dp': 'im[2]', 'resolution': 'resolution'}), "(im[1], tile_dir + '/' + im[0], image_dir, dp=im[2], resolution=\n resolution)\n", (7955, 8035), True, 'import panoptes.prep as prep\n'), ((8211, 8254), 'os.path.isfile', 'os.path.isfile', (["(data_dir + '/tr_sample.csv')"], {}), "(data_dir + '/tr_sample.csv')\n", (8225, 8254), False, 'import os\n'), ((8259, 8302), 'os.path.isfile', 'os.path.isfile', (["(data_dir + '/te_sample.csv')"], {}), "(data_dir + '/te_sample.csv')\n", (8273, 8302), False, 'import os\n'), ((8325, 8368), 'os.path.isfile', 'os.path.isfile', (["(data_dir + '/va_sample.csv')"], {}), "(data_dir + '/va_sample.csv')\n", (8339, 8368), False, 'import os\n'), ((8407, 8439), 'panoptes.prep.counters', 'prep.counters', (['data_dir', 'classes'], {}), '(data_dir, classes)\n', (8420, 8439), True, 'import panoptes.prep as prep\n'), ((8458, 8508), 'pandas.read_csv', 'pd.read_csv', (["(data_dir + '/tr_sample.csv')"], {'header': '(0)'}), "(data_dir + '/tr_sample.csv', header=0)\n", (8469, 8508), True, 'import pandas as pd\n'), ((8527, 8577), 'pandas.read_csv', 'pd.read_csv', (["(data_dir + '/te_sample.csv')"], {'header': '(0)'}), "(data_dir + '/te_sample.csv', header=0)\n", (8538, 8577), True, 'import pandas as pd\n'), ((8596, 8646), 'pandas.read_csv', 'pd.read_csv', (["(data_dir + '/va_sample.csv')"], {'header': '(0)'}), "(data_dir + '/va_sample.csv', header=0)\n", (8607, 8646), True, 'import pandas as pd\n'), ((8680, 8754), 'panoptes.sample_prep.big_image_sum', 'sample_prep.big_image_sum', ([], {'pmd': 'feature', 'path': 'tile_dir', 'ref_file': 'label_file'}), '(pmd=feature, path=tile_dir, ref_file=label_file)\n', (8705, 8754), True, 'import panoptes.sample_prep as sample_prep\n'), ((8783, 8915), 'panoptes.sample_prep.set_sep', 'sample_prep.set_sep', (['alll'], {'path': 'data_dir', 'cls': 'classes', 'cut': '(0.2)', 'resolution': 'resolution', 'sep_file': 'split_file', 'batchsize': 'batchsize'}), '(alll, path=data_dir, cls=classes, cut=0.2, resolution=\n resolution, sep_file=split_file, batchsize=batchsize)\n', (8802, 8915), True, 'import panoptes.sample_prep as sample_prep\n'), ((8996, 9028), 'panoptes.prep.counters', 'prep.counters', (['data_dir', 'classes'], {}), '(data_dir, classes)\n', (9009, 9028), True, 'import panoptes.prep as prep\n'), ((9044, 9088), 'os.path.isfile', 'os.path.isfile', (["(data_dir + '/test.tfrecords')"], {}), "(data_dir + '/test.tfrecords')\n", (9058, 9088), False, 'import os\n'), ((9102, 9131), 'panoptes.prep.loader', 'prep.loader', (['data_dir', '"""test"""'], {}), "(data_dir, 'test')\n", (9113, 9131), True, 'import panoptes.prep as prep\n'), ((9147, 9192), 'os.path.isfile', 'os.path.isfile', (["(data_dir + '/train.tfrecords')"], {}), "(data_dir + '/train.tfrecords')\n", (9161, 9192), False, 'import os\n'), ((9206, 9236), 'panoptes.prep.loader', 'prep.loader', (['data_dir', '"""train"""'], {}), "(data_dir, 'train')\n", (9217, 9236), True, 'import panoptes.prep as prep\n'), ((9252, 9302), 'os.path.isfile', 'os.path.isfile', (["(data_dir + '/validation.tfrecords')"], {}), "(data_dir + '/validation.tfrecords')\n", (9266, 9302), False, 'import os\n'), ((9316, 9351), 'panoptes.prep.loader', 'prep.loader', (['data_dir', '"""validation"""'], {}), "(data_dir, 'validation')\n", (9327, 9351), True, 'import panoptes.prep as prep\n'), ((9671, 9742), 'panoptes.prep.tfreloader', 'prep.tfreloader', (['"""test"""', '(1)', 'batchsize', 'classes', 'trc', 'tec', 'vac', 'data_dir'], {}), "('test', 1, batchsize, classes, trc, tec, vac, data_dir)\n", (9686, 9742), True, 'import panoptes.prep as prep\n'), ((10403, 10494), 'panoptes.prep.cutter', 'prep.cutter', (['im[1]', "(tile_dir + '/' + im[0])", 'image_dir'], {'dp': 'im[2]', 'resolution': 'resolution'}), "(im[1], tile_dir + '/' + im[0], image_dir, dp=im[2], resolution=\n resolution)\n", (10414, 10494), True, 'import panoptes.prep as prep\n'), ((10670, 10713), 'os.path.isfile', 'os.path.isfile', (["(data_dir + '/tr_sample.csv')"], {}), "(data_dir + '/tr_sample.csv')\n", (10684, 10713), False, 'import os\n'), ((10718, 10761), 'os.path.isfile', 'os.path.isfile', (["(data_dir + '/te_sample.csv')"], {}), "(data_dir + '/te_sample.csv')\n", (10732, 10761), False, 'import os\n'), ((10784, 10827), 'os.path.isfile', 'os.path.isfile', (["(data_dir + '/va_sample.csv')"], {}), "(data_dir + '/va_sample.csv')\n", (10798, 10827), False, 'import os\n'), ((10866, 10898), 'panoptes.prep.counters', 'prep.counters', (['data_dir', 'classes'], {}), '(data_dir, classes)\n', (10879, 10898), True, 'import panoptes.prep as prep\n'), ((10917, 10967), 'pandas.read_csv', 'pd.read_csv', (["(data_dir + '/tr_sample.csv')"], {'header': '(0)'}), "(data_dir + '/tr_sample.csv', header=0)\n", (10928, 10967), True, 'import pandas as pd\n'), ((10986, 11036), 'pandas.read_csv', 'pd.read_csv', (["(data_dir + '/te_sample.csv')"], {'header': '(0)'}), "(data_dir + '/te_sample.csv', header=0)\n", (10997, 11036), True, 'import pandas as pd\n'), ((11055, 11105), 'pandas.read_csv', 'pd.read_csv', (["(data_dir + '/va_sample.csv')"], {'header': '(0)'}), "(data_dir + '/va_sample.csv', header=0)\n", (11066, 11105), True, 'import pandas as pd\n'), ((11139, 11213), 'panoptes.sample_prep.big_image_sum', 'sample_prep.big_image_sum', ([], {'pmd': 'feature', 'path': 'tile_dir', 'ref_file': 'label_file'}), '(pmd=feature, path=tile_dir, ref_file=label_file)\n', (11164, 11213), True, 'import panoptes.sample_prep as sample_prep\n'), ((11242, 11374), 'panoptes.sample_prep.set_sep', 'sample_prep.set_sep', (['alll'], {'path': 'data_dir', 'cls': 'classes', 'cut': '(0.2)', 'resolution': 'resolution', 'sep_file': 'split_file', 'batchsize': 'batchsize'}), '(alll, path=data_dir, cls=classes, cut=0.2, resolution=\n resolution, sep_file=split_file, batchsize=batchsize)\n', (11261, 11374), True, 'import panoptes.sample_prep as sample_prep\n'), ((11455, 11487), 'panoptes.prep.counters', 'prep.counters', (['data_dir', 'classes'], {}), '(data_dir, classes)\n', (11468, 11487), True, 'import panoptes.prep as prep\n'), ((11503, 11547), 'os.path.isfile', 'os.path.isfile', (["(data_dir + '/test.tfrecords')"], {}), "(data_dir + '/test.tfrecords')\n", (11517, 11547), False, 'import os\n'), ((11561, 11590), 'panoptes.prep.loader', 'prep.loader', (['data_dir', '"""test"""'], {}), "(data_dir, 'test')\n", (11572, 11590), True, 'import panoptes.prep as prep\n'), ((11606, 11651), 'os.path.isfile', 'os.path.isfile', (["(data_dir + '/train.tfrecords')"], {}), "(data_dir + '/train.tfrecords')\n", (11620, 11651), False, 'import os\n'), ((11665, 11695), 'panoptes.prep.loader', 'prep.loader', (['data_dir', '"""train"""'], {}), "(data_dir, 'train')\n", (11676, 11695), True, 'import panoptes.prep as prep\n'), ((11711, 11761), 'os.path.isfile', 'os.path.isfile', (["(data_dir + '/validation.tfrecords')"], {}), "(data_dir + '/validation.tfrecords')\n", (11725, 11761), False, 'import os\n'), ((11775, 11810), 'panoptes.prep.loader', 'prep.loader', (['data_dir', '"""validation"""'], {}), "(data_dir, 'validation')\n", (11786, 11810), True, 'import panoptes.prep as prep\n'), ((12693, 12764), 'panoptes.prep.tfreloader', 'prep.tfreloader', (['"""test"""', '(1)', 'batchsize', 'classes', 'trc', 'tec', 'vac', 'data_dir'], {}), "('test', 1, batchsize, classes, trc, tec, vac, data_dir)\n", (12708, 12764), True, 'import panoptes.prep as prep\n'), ((6169, 6182), 'numpy.shape', 'np.shape', (['opt'], {}), '(opt)\n', (6177, 6182), True, 'import numpy as np\n'), ((6187, 6200), 'numpy.shape', 'np.shape', (['opt'], {}), '(opt)\n', (6195, 6200), True, 'import numpy as np\n'), ((6528, 6542), 'numpy.shape', 'np.shape', (['topt'], {}), '(topt)\n', (6536, 6542), True, 'import numpy as np\n'), ((6547, 6561), 'numpy.shape', 'np.shape', (['topt'], {}), '(topt)\n', (6555, 6561), True, 'import numpy as np\n'), ((7324, 7335), 'time.time', 'time.time', ([], {}), '()\n', (7333, 7335), False, 'import time\n'), ((7676, 7689), 'os.mkdir', 'os.mkdir', (['DIR'], {}), '(DIR)\n', (7684, 7689), False, 'import os\n'), ((10124, 10137), 'os.mkdir', 'os.mkdir', (['DIR'], {}), '(DIR)\n', (10132, 10137), False, 'import os\n'), ((6233, 6246), 'numpy.shape', 'np.shape', (['opt'], {}), '(opt)\n', (6241, 6246), True, 'import numpy as np\n'), ((6252, 6265), 'numpy.shape', 'np.shape', (['opt'], {}), '(opt)\n', (6260, 6265), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from matplotlib.gridspec import GridSpec
from matplotlib.ticker import FixedLocator
import numpy as np
from os import listdir
from os.path import join
from lvxml2dict import Cluster
from namegleaner import NameGleaner
from transformer import Transformer
import transformations as tfms
import re
import scipy
from transformations import toggle
from matplotlib.widgets import CheckButtons
""" TODO
- Need a way to detect non-magnetic areas and normalize them differently...
- Or don't normalize any curves and just compute the contrast range that
the pcolor maps should use. This is what we will do if/when we convert
to polarization rotation eventually.
"""
xlim, ylim = 10.0, 1.1
thresh, max = 7, 10 # thresh: where to start fits, max: highest field
filt_ks = 157
default_ps = {
'xlim': 10.0,
'ylim': 1.1,
'thresh': 7,
'max': 10,
'filt_ks': 157
}
def scmoplot(root_path, user_ps):
ps = dict(default_ps)
ps.update(user_ps)
ng = NameGleaner(scan=r'scan=(\d+)', x=r'x=(\d+)', y=r'y=(\d+)',
averaged=r'(averaged)')
tfmr = Transformer(gleaner=ng)
tfmr.add(10, tfms.scale, params={'xsc': 0.1})
tfmr.add(20, tfms.flatten_saturation,
params={'threshold': ps['thresh'], 'polarity': '+'})
tfmr.add(25, tfms.center)
tfmr.add(30, tfms.wrapped_medfilt, params={'ks': ps['filt_ks']})
tfmr.add(40, tfms.saturation_normalize, params={'thresh': ps['thresh']})
tfmr2 = Transformer(gleaner=ng)
tfmr2.add(10, tfms.scale, params={'xsc': 0.1})
tfmr2.add(30, tfms.wrapped_medfilt, params={'ks': ps['filt_ks']})
tfmr2.add(40, tfms.clean)
clust = Cluster(join(root_path, 'parameters.xml')).to_dict()
gx, gy = (clust['Rows'], clust['Cols'])
fig, axarr = plt.subplots(ncols=gx, nrows=gy,
figsize=(10, 10))
for row in axarr:
for ax in row:
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
#ax.set_xlim(-ps['xlim'], ps['xlim'])
#ax.set_ylim(-ps['ylim'], ps['ylim'])
Hcs = [[None for i in range(gx)] for i in range(gy)]
Mrs = [[None for i in range(gx)] for i in range(gy)]
for f in listdir(root_path):
gleaned = ng.glean(f)
if gleaned['averaged']:
print('Plotting %s' % f)
x, y = int(gleaned['x']), int(gleaned['y'])
ax = axarr[y, x]
Bi, Vi = np.loadtxt(join(root_path, f), usecols=(0, 1), unpack=True,
skiprows=7)
B,V = tfmr((Bi,Vi),f)
B2, V2 = tfmr2((Bi, Vi), f)
##data set 2 graphs
lslope,rslope,tan=tfms.x0slope(B2,V2)
lsat,rsat=tfms.sat_field(B2,V2)
area = tfms.loop_area(B2,V2)
data = ax.plot(B2,V2,'k')
tanlines = ax.plot(tan[0],tan[1],'r',tan[2],tan[3],'y*',tan[4],tan[5],'b',tan[6],tan[7], 'y*')
satfields = ax.plot(B2[lsat],V2[lsat],'ro',B2[rsat],V2[rsat],'go')
areatext = ax.text(B2.min(),V2.max(), ("loop area: "+str(area+.0005)[0:6]))
rax = plt.axes([0.05, 0.4, 0.1, 0.15])
check = CheckButtons(rax, ('data', 'tangent lines',
'saturation points', 'loop area'), (True, True, True, True))
def func(label):
if label == 'data': toggle(data)
elif label == 'tangent lines': toggle(tanlines)
elif label == 'saturation points': toggle(satfields)
elif label == 'loop area': areatext.set_visible(not areatext.get_visible())
plt.draw()
check.on_clicked(func)
try:
Hc = tfms.Hc_of(B, V, fit_int=(ps['thresh'], ps['max']))
Hcs[y][x] = Hc
Mr = tfms.Mrem_of(B, V, fit_int=(ps['thresh'], ps['max']))
Mrs[y][x] = Mr
zs = np.zeros(3)
ax.plot(zs, Mr, 'ro', ms=7)
ax.plot(Hc, zs, 'ro', ms=7)
except Exception as e:
print('\t{}'.format(e))
Hcs[y][x] = 0.0
Mrs[y][x] = 0.0
plt.tight_layout(w_pad=0, h_pad=0)
plt.show()
Hcs = np.array([x[1] for row in Hcs for x in row]).reshape(gy, gx)
Mrs = np.array([x[1] for row in Mrs for x in row]).reshape(gy, gx)
gs = GridSpec(10, 10)
ax0 = plt.subplot(gs[0:9, :5])
ax1 = plt.subplot(gs[9, :5])
ax2 = plt.subplot(gs[0:9, 5:])
ax3 = plt.subplot(gs[9, 5:])
fig = ax0.get_figure()
fig.set_size_inches(12, 8)
# Plot Hc pcolor map
n = Normalize(vmin=0.0, vmax=5.0, clip=True)
res = ax0.pcolor(Hcs, cmap='afmhot', norm=n, edgecolors='k')
plt.colorbar(res, cax=ax1, orientation='horizontal', ticks=(0, 2.5, 5))
# Plot Mr pcolor map
n = Normalize(vmin=0.0, vmax=1.0, clip=True)
res = ax2.pcolor(Mrs, cmap='afmhot', norm=n, edgecolors='k')
plt.colorbar(res, cax=ax3, orientation='horizontal', ticks=(0, 0.5, 1))
ax0.set_title('Hc (mT)')
ax0.set_aspect('equal', adjustable='box')
ax2.set_title('Mrem/Msat')
ax2.set_aspect('equal', adjustable='box')
plt.tight_layout()
plt.show()
if __name__ == '__main__':
root_path = '/home/jji/Desktop/scanning_moke_test/trial1_5x5_BFO_test_sample'
# root_path = r'C:\Users\Tor\Desktop\test\trial1_5x5_BFO_test_sample'
ps = {}
scmoplot(root_path, ps)
| [
"transformations.Mrem_of",
"transformations.x0slope",
"numpy.array",
"namegleaner.NameGleaner",
"os.listdir",
"matplotlib.gridspec.GridSpec",
"matplotlib.widgets.CheckButtons",
"transformer.Transformer",
"matplotlib.pyplot.axes",
"matplotlib.colors.Normalize",
"matplotlib.pyplot.draw",
"transf... | [((1060, 1147), 'namegleaner.NameGleaner', 'NameGleaner', ([], {'scan': '"""scan=(\\\\d+)"""', 'x': '"""x=(\\\\d+)"""', 'y': '"""y=(\\\\d+)"""', 'averaged': '"""(averaged)"""'}), "(scan='scan=(\\\\d+)', x='x=(\\\\d+)', y='y=(\\\\d+)', averaged=\n '(averaged)')\n", (1071, 1147), False, 'from namegleaner import NameGleaner\n'), ((1177, 1200), 'transformer.Transformer', 'Transformer', ([], {'gleaner': 'ng'}), '(gleaner=ng)\n', (1188, 1200), False, 'from transformer import Transformer\n'), ((1554, 1577), 'transformer.Transformer', 'Transformer', ([], {'gleaner': 'ng'}), '(gleaner=ng)\n', (1565, 1577), False, 'from transformer import Transformer\n'), ((1857, 1907), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': 'gx', 'nrows': 'gy', 'figsize': '(10, 10)'}), '(ncols=gx, nrows=gy, figsize=(10, 10))\n', (1869, 1907), True, 'import matplotlib.pyplot as plt\n'), ((2293, 2311), 'os.listdir', 'listdir', (['root_path'], {}), '(root_path)\n', (2300, 2311), False, 'from os import listdir\n'), ((4382, 4416), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'w_pad': '(0)', 'h_pad': '(0)'}), '(w_pad=0, h_pad=0)\n', (4398, 4416), True, 'import matplotlib.pyplot as plt\n'), ((4421, 4431), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4429, 4431), True, 'import matplotlib.pyplot as plt\n'), ((4585, 4601), 'matplotlib.gridspec.GridSpec', 'GridSpec', (['(10)', '(10)'], {}), '(10, 10)\n', (4593, 4601), False, 'from matplotlib.gridspec import GridSpec\n'), ((4612, 4636), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0:9, :5]'], {}), '(gs[0:9, :5])\n', (4623, 4636), True, 'import matplotlib.pyplot as plt\n'), ((4647, 4669), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[9, :5]'], {}), '(gs[9, :5])\n', (4658, 4669), True, 'import matplotlib.pyplot as plt\n'), ((4680, 4704), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0:9, 5:]'], {}), '(gs[0:9, 5:])\n', (4691, 4704), True, 'import matplotlib.pyplot as plt\n'), ((4715, 4737), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[9, 5:]'], {}), '(gs[9, 5:])\n', (4726, 4737), True, 'import matplotlib.pyplot as plt\n'), ((4826, 4866), 'matplotlib.colors.Normalize', 'Normalize', ([], {'vmin': '(0.0)', 'vmax': '(5.0)', 'clip': '(True)'}), '(vmin=0.0, vmax=5.0, clip=True)\n', (4835, 4866), False, 'from matplotlib.colors import Normalize\n'), ((4936, 5007), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['res'], {'cax': 'ax1', 'orientation': '"""horizontal"""', 'ticks': '(0, 2.5, 5)'}), "(res, cax=ax1, orientation='horizontal', ticks=(0, 2.5, 5))\n", (4948, 5007), True, 'import matplotlib.pyplot as plt\n'), ((5038, 5078), 'matplotlib.colors.Normalize', 'Normalize', ([], {'vmin': '(0.0)', 'vmax': '(1.0)', 'clip': '(True)'}), '(vmin=0.0, vmax=1.0, clip=True)\n', (5047, 5078), False, 'from matplotlib.colors import Normalize\n'), ((5148, 5219), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['res'], {'cax': 'ax3', 'orientation': '"""horizontal"""', 'ticks': '(0, 0.5, 1)'}), "(res, cax=ax3, orientation='horizontal', ticks=(0, 0.5, 1))\n", (5160, 5219), True, 'import matplotlib.pyplot as plt\n'), ((5377, 5395), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5393, 5395), True, 'import matplotlib.pyplot as plt\n'), ((5400, 5410), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5408, 5410), True, 'import matplotlib.pyplot as plt\n'), ((2862, 2882), 'transformations.x0slope', 'tfms.x0slope', (['B2', 'V2'], {}), '(B2, V2)\n', (2874, 2882), True, 'import transformations as tfms\n'), ((2904, 2926), 'transformations.sat_field', 'tfms.sat_field', (['B2', 'V2'], {}), '(B2, V2)\n', (2918, 2926), True, 'import transformations as tfms\n'), ((2945, 2967), 'transformations.loop_area', 'tfms.loop_area', (['B2', 'V2'], {}), '(B2, V2)\n', (2959, 2967), True, 'import transformations as tfms\n'), ((3310, 3342), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.05, 0.4, 0.1, 0.15]'], {}), '([0.05, 0.4, 0.1, 0.15])\n', (3318, 3342), True, 'import matplotlib.pyplot as plt\n'), ((3363, 3471), 'matplotlib.widgets.CheckButtons', 'CheckButtons', (['rax', "('data', 'tangent lines', 'saturation points', 'loop area')", '(True, True, True, True)'], {}), "(rax, ('data', 'tangent lines', 'saturation points',\n 'loop area'), (True, True, True, True))\n", (3375, 3471), False, 'from matplotlib.widgets import CheckButtons\n'), ((4443, 4487), 'numpy.array', 'np.array', (['[x[1] for row in Hcs for x in row]'], {}), '([x[1] for row in Hcs for x in row])\n', (4451, 4487), True, 'import numpy as np\n'), ((4514, 4558), 'numpy.array', 'np.array', (['[x[1] for row in Mrs for x in row]'], {}), '([x[1] for row in Mrs for x in row])\n', (4522, 4558), True, 'import numpy as np\n'), ((1750, 1783), 'os.path.join', 'join', (['root_path', '"""parameters.xml"""'], {}), "(root_path, 'parameters.xml')\n", (1754, 1783), False, 'from os.path import join\n'), ((2529, 2547), 'os.path.join', 'join', (['root_path', 'f'], {}), '(root_path, f)\n', (2533, 2547), False, 'from os.path import join\n'), ((3803, 3813), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (3811, 3813), True, 'import matplotlib.pyplot as plt\n'), ((3900, 3951), 'transformations.Hc_of', 'tfms.Hc_of', (['B', 'V'], {'fit_int': "(ps['thresh'], ps['max'])"}), "(B, V, fit_int=(ps['thresh'], ps['max']))\n", (3910, 3951), True, 'import transformations as tfms\n'), ((4004, 4057), 'transformations.Mrem_of', 'tfms.Mrem_of', (['B', 'V'], {'fit_int': "(ps['thresh'], ps['max'])"}), "(B, V, fit_int=(ps['thresh'], ps['max']))\n", (4016, 4057), True, 'import transformations as tfms\n'), ((4110, 4121), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (4118, 4121), True, 'import numpy as np\n'), ((3549, 3561), 'transformations.toggle', 'toggle', (['data'], {}), '(data)\n', (3555, 3561), False, 'from transformations import toggle\n'), ((3609, 3625), 'transformations.toggle', 'toggle', (['tanlines'], {}), '(tanlines)\n', (3615, 3625), False, 'from transformations import toggle\n'), ((3677, 3694), 'transformations.toggle', 'toggle', (['satfields'], {}), '(satfields)\n', (3683, 3694), False, 'from transformations import toggle\n')] |
import numpy as np
import piecewisecrf.config.prefs as prefs
FLAGS = prefs.flags.FLAGS
def generate_encoding_decoding_dict(number_of_labels):
'''
Generates mappings between label pairs and indices
Parameters
----------
number_of_labels : int
Number of classes in the dataset
Returns
-------
encoding, decoding: tuple
encoding is a dict that maps (label, label) => index
decoding is a dict that maps index => (label, label)
'''
decoding = dict(enumerate((i, j) for i in range(number_of_labels) for j in range(number_of_labels)))
encoding = {v: k for k, v in list(decoding.items())}
return encoding, decoding
def generate_pairwise_labels(labels, indices_getter, number_of_labels):
'''
Generates ground truth map for pairwise potentials. The neighbourhood is
defined via the indices getter callable.
Parameters
----------
labels: numpy array
Ground truth map for unary potentials (label image from the dataset)
indices_getter: callable
Function that returns two lists with indices denoting neighbour pixels
number_of_labels : int
Number of classes in the dataset
Returns
-------
pairwise_labels: numpy array
Ground truth map for pairwise potentials
'''
flattened_labels = np.reshape(labels, [-1])
first_index, second_index = indices_getter()
index_pairs = list(zip(first_index, second_index))
encoding, decoding = generate_encoding_decoding_dict(number_of_labels)
pairwise_labels = np.array([encoding.get((flattened_labels[x[0]],
flattened_labels[x[1]]), -1) for x in index_pairs])
return pairwise_labels.astype(np.int32)
#######################################################################################################################
# Pairwise potentials modelling surrounding relations #
#######################################################################################################################
def get_indices_surrounding():
'''
Returns two lists with pixel indices for surrounding pixels
Returns
-------
original_container, container: lists
original_container contains indices for the first pixel in the neighbourhood
container contains indices for the second pixel in the neighbourhood
'''
container = []
original_container = []
h = int(FLAGS.img_height / FLAGS.subsample_factor)
w = int(FLAGS.img_width / FLAGS.subsample_factor)
nsize = FLAGS.surrounding_neighbourhood_size
nsize_half = int(nsize / 2)
for i in range(h):
for j in range(w):
index_1d = i * w + j
for n_i in range(i - nsize_half, i + nsize_half + 1):
for n_j in range(j - nsize_half, j + nsize_half + 1):
if n_i < 0 or n_i >= h or n_j < 0 or n_j >= w or (n_i == i and n_j == j):
continue
container.append(n_i * w + n_j)
original_container.append(index_1d)
return original_container, container
def get_number_of_all_neigbhours_surrounding(h, w, nsize):
'''
Returns total number of neighbours for the surrounding neighbourhood
Returns
-------
ret_val: int
Total number of neighbours
'''
ret_val = 0
nsize_half = int(nsize / 2)
for i in range(int(h)):
for j in range(int(w)):
for n_i in range(i - nsize_half, i + nsize_half + 1):
for n_j in range(j - nsize_half, j + nsize_half + 1):
if n_i < 0 or n_i >= h or n_j < 0 or n_j >= w or (n_i == i and n_j == j):
continue
ret_val += 1
return ret_val
FIRST_INDICES_SURR, SECOND_INDICES_SURR = get_indices_surrounding()
NUMBER_OF_NEIGHBOURS_SURR = get_number_of_all_neigbhours_surrounding(
FLAGS.img_height / FLAGS.subsample_factor,
FLAGS.img_width / FLAGS.subsample_factor,
FLAGS.surrounding_neighbourhood_size
)
#######################################################################################################################
# Pairwise potentials modelling above/below relations #
#######################################################################################################################
def get_indices_above_below():
'''
Returns two lists with pixel indices for above/below pixels
Returns
-------
original_container, container: lists
original_container contains indices for the first pixel in the neighbourhood
container contains indices for the second pixel in the neighbourhood
'''
container = []
original_container = []
h = int(FLAGS.img_height / FLAGS.subsample_factor)
w = int(FLAGS.img_width / FLAGS.subsample_factor)
nsize_width = FLAGS.neigbourhood_above_below_width
nsize_height = FLAGS.neigbourhood_above_below_height
nsize_width_half = int(nsize_width / 2)
for i in range(h):
for j in range(w):
index_1d = i * w + j
for n_i in range(i - nsize_height, i):
for n_j in range(j - nsize_width_half, j + nsize_width_half + 1):
if n_i < 0 or n_i >= h or n_j < 0 or n_j >= w or (n_i == i and n_j == j):
continue
container.append(n_i * w + n_j)
original_container.append(index_1d)
return original_container, container
def get_number_of_all_neigbhours_above_below(h, w, nsize_height, nsize_width):
'''
Returns total number of neighbours for the above/below neighbourhood
Returns
-------
ret_val: int
Total number of neighbours
'''
ret_val = 0
nsize_width_half = int(nsize_width / 2)
for i in range(int(h)):
for j in range(int(w)):
for n_i in range(i - nsize_height, i):
for n_j in range(j - nsize_width_half, j + nsize_width_half + 1):
if n_i < 0 or n_i >= h or n_j < 0 or n_j >= w or (n_i == i and n_j == j):
continue
ret_val += 1
return ret_val
FIRST_INDICES_AB, SECOND_INDICES_AB = get_indices_above_below()
NUMBER_OF_NEIGHBOURS_AB = get_number_of_all_neigbhours_above_below(
FLAGS.img_height / FLAGS.subsample_factor,
FLAGS.img_width / FLAGS.subsample_factor,
FLAGS.neigbourhood_above_below_height,
FLAGS.neigbourhood_above_below_width
)
| [
"numpy.reshape"
] | [((1342, 1366), 'numpy.reshape', 'np.reshape', (['labels', '[-1]'], {}), '(labels, [-1])\n', (1352, 1366), True, 'import numpy as np\n')] |
from collections import defaultdict, deque, Counter
from itertools import count
from pprint import pprint
from day import Day, util
import numpy as np
OCCUPIED = '#'
EMPTY = 'L'
FLOOR = '.'
class Day11Part2(Day):
day = 11
part = 2
OFFSETS = (
(0, -1), (0, 1), # left/right
(-1, 0), (1, 0), # top/down
(1, 1), (-1, -1), (-1, 1), (1, -1), # diagonal
)
def get_sample_input(self):
return ('L.LL.LL.LL\n'
'LLLLLLL.LL\n'
'L.L.L..L..\n'
'LLLL.LL.LL\n'
'L.LL.LL.LL\n'
'L.LLLLL.LL\n'
'..L.L.....\n'
'LLLLLLLLLL\n'
'L.LLLLLL.L\n'
'L.LLLLL.LL')
def get_neighbors(self, grid: np.ndarray, pos: tuple):
for y, x in self.OFFSETS:
for i in count(1):
offset_y = y * i
offset_x = x * i
if not (0 <= pos[0] + offset_y < grid.shape[0] and 0 <= pos[1] + offset_x < grid.shape[1]):
break
if (char := grid[pos[0] + offset_y, pos[1] + offset_x]) == FLOOR:
continue
yield char
break
def parse_input(self):
return np.array(tuple(map(list, self.input_text.splitlines())))
def solve(self):
grid = self.parse_input()
buffer = np.full(grid.shape, fill_value=FLOOR)
while True:
for index, char in np.ndenumerate(grid):
if char == FLOOR:
continue
neighbors = tuple(self.get_neighbors(grid, index))
if char == EMPTY and OCCUPIED not in neighbors:
buffer[index] = OCCUPIED
elif char == OCCUPIED and neighbors.count(OCCUPIED) >= 5:
buffer[index] = EMPTY
# print('====================', *map(''.join, buffer), sep='\n')
if (grid == buffer).all():
grid = buffer.copy()
break
grid = buffer.copy()
print('day 11 part 2 answer:', (grid == OCCUPIED).sum())
| [
"numpy.full",
"itertools.count",
"numpy.ndenumerate"
] | [((1398, 1435), 'numpy.full', 'np.full', (['grid.shape'], {'fill_value': 'FLOOR'}), '(grid.shape, fill_value=FLOOR)\n', (1405, 1435), True, 'import numpy as np\n'), ((855, 863), 'itertools.count', 'count', (['(1)'], {}), '(1)\n', (860, 863), False, 'from itertools import count\n'), ((1487, 1507), 'numpy.ndenumerate', 'np.ndenumerate', (['grid'], {}), '(grid)\n', (1501, 1507), True, 'import numpy as np\n')] |
import numpy as np
class Splitter:
"""
Train test split Class.
"""
def __init__(self):
"""
Constructor for Splitter Class.
"""
self.x = None
self.y = None
def fit(self, x: np.ndarray, y: np.ndarray):
"""
Fits the data to the Splitter object.
"""
assert len(x) == len(y)
self.x = x
self.y = y
def transform(self, test_size, random_seed=42):
"""
Splits the data in 4 accordingly to the test size.
:param test_size: Float value with test size.
:param random_seed: Random seed for numpy.
:return: A Tuple.
1) X train array with 1-test size of total data.
2) X test array with test size of total data.
3) Y train array with 1-test size of total data.
4) Y test array with test size of total data.
"""
assert type(test_size) == float, "The test size must be a float value"
assert 0 <= test_size <= 1, "The test size mus be between 0 and 1."
np.random.seed(random_seed)
# Both arrays has same length so we shuffle the indexes
indexes = np.arange(self.x.shape[0])
np.random.shuffle(indexes)
self.x = self.x[indexes]
self.y = self.y[indexes]
sep_index = int((1 - test_size)*self.x.shape[0])
return self.x[:sep_index, :], self.x[sep_index+1:, :], self.y[:sep_index, :], self.y[sep_index+1:, :]
def fit_transform(self, x, y, test_size):
"""
Fits and transforms the data.
:param x: Numpy array with X data to split.
:param y: Numpy array with y data to split.
:param test_size: Float value with test size.
:return: A Tuple.
1) X train array with 1-test size of total data.
2) X test array with test size of total data.
3) Y train array with 1-test size of total data.
4) Y test array with test size of total data.
"""
self.fit(x, y)
return self.transform(test_size)
from sklearn.datasets import load_iris
from src.neural_network.metrics.Metrics import Metrics
np.random.seed(42)
# Loads the dataset
data = load_iris(return_X_y=True)
X = data[0]
# One hot encoding and data set separation
y, classes = Metrics.one_hot_encoding(data[1])
X_train, X_test, y_train, y_test = Splitter().fit_transform(X, y, 0.3) | [
"sklearn.datasets.load_iris",
"numpy.random.seed",
"src.neural_network.metrics.Metrics.Metrics.one_hot_encoding",
"numpy.arange",
"numpy.random.shuffle"
] | [((2739, 2757), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (2753, 2757), True, 'import numpy as np\n'), ((2786, 2812), 'sklearn.datasets.load_iris', 'load_iris', ([], {'return_X_y': '(True)'}), '(return_X_y=True)\n', (2795, 2812), False, 'from sklearn.datasets import load_iris\n'), ((2882, 2915), 'src.neural_network.metrics.Metrics.Metrics.one_hot_encoding', 'Metrics.one_hot_encoding', (['data[1]'], {}), '(data[1])\n', (2906, 2915), False, 'from src.neural_network.metrics.Metrics import Metrics\n'), ((1335, 1362), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (1349, 1362), True, 'import numpy as np\n'), ((1446, 1472), 'numpy.arange', 'np.arange', (['self.x.shape[0]'], {}), '(self.x.shape[0])\n', (1455, 1472), True, 'import numpy as np\n'), ((1481, 1507), 'numpy.random.shuffle', 'np.random.shuffle', (['indexes'], {}), '(indexes)\n', (1498, 1507), True, 'import numpy as np\n')] |
# Compatibility imports
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import tensorflow as tf
import numpy as np
import utils
import train_data
# Some configs
num_features = 13
epoch_save_step = 100 # save the checkpoint every
# Accounting the 0th indice + space + blank label = 28 characters
num_classes = ord('z') - ord('a') + 1 + 1 + 1
# Hyper-parameters
num_epochs = 2000
num_hidden = 200
num_layers = 1
batch_size = 16
initial_learning_rate = 1e-2
momentum = 0.9
# Training data
num_train_data = 10
num_test_data = 5
wav_files_train, wav_files_test = train_data.get_file_list(num_train_data, num_test_data, shuffle=False)
num_examples = len(wav_files_train)
num_batches_per_epoch = int(num_examples/batch_size)
train_inputs = train_data.prepare_inputs(wav_files_train)
train_targets = train_data.prepare_targets(wav_files_train)
test_inputs = train_data.prepare_inputs(wav_files_test)
# THE MAIN CODE!
graph = tf.Graph()
with graph.as_default():
# e.g: log filter bank or MFCC features
# Has size [batch_size, max_stepsize, num_features], but the
# batch_size and max_stepsize can vary along each step
inputs = tf.placeholder(tf.float32, [None, None, num_features])
# Here we use sparse_placeholder that will generate a
# SparseTensor required by ctc_loss op.
targets = tf.sparse_placeholder(tf.int32)
# 1d array of size [batch_size]
seq_len = tf.placeholder(tf.int32, [None])
# Defining the cell
# Can be:
# tf.nn.rnn_cell.RNNCell
# tf.nn.rnn_cell.GRUCell
cell = tf.contrib.rnn.LSTMCell(num_hidden, state_is_tuple=True)
# Stacking rnn cells
stack = tf.contrib.rnn.MultiRNNCell([cell] * num_layers,
state_is_tuple=True)
# The second output is the last state and we will no use that
outputs, _ = tf.nn.dynamic_rnn(stack, inputs, seq_len, dtype=tf.float32)
shape = tf.shape(inputs)
batch_s, max_timesteps = shape[0], shape[1]
# Reshaping to apply the same weights over the timesteps
outputs = tf.reshape(outputs, [-1, num_hidden])
# Truncated normal with mean 0 and stdev=0.1
# Tip: Try another initialization
# see https://www.tensorflow.org/versions/r0.9/api_docs/python/contrib.layers.html#initializers
W = tf.Variable(tf.truncated_normal([num_hidden,
num_classes],
stddev=0.1))
# Zero initialization
# Tip: Is tf.zeros_initializer the same?
b = tf.Variable(tf.constant(0., shape=[num_classes]))
# Doing the affine projection
logits = tf.matmul(outputs, W) + b
# Reshaping back to the original shape
logits = tf.reshape(logits, [batch_s, -1, num_classes])
# Time major
logits = tf.transpose(logits, (1, 0, 2))
loss = tf.nn.ctc_loss(targets, logits, seq_len)
cost = tf.reduce_mean(loss)
# IDK but adam gives better result
#optimizer = tf.train.MomentumOptimizer(initial_learning_rate, momentum).minimize(cost)
optimizer = tf.train.AdamOptimizer(initial_learning_rate).minimize(cost)
# Option 2: tf.contrib.ctc.ctc_beam_search_decoder
# (it's slower but you'll get better results)
decoded, log_prob = tf.nn.ctc_beam_search_decoder(logits, seq_len)
# Inaccuracy: label error rate
ler = tf.reduce_mean(tf.edit_distance(tf.cast(decoded[0], tf.int32),
targets))
def decode_single(session, test_input):
val_feed = {
inputs: np.asarray([test_input]),
seq_len: np.asarray([len(test_input)])
}
# Decoding
d = session.run(decoded[0], feed_dict=val_feed)
dense_decoded = tf.sparse_tensor_to_dense(d, default_value=-1).eval(session=session)
seq = [s for s in dense_decoded[0] if s != -1]
print('Decoded:\t%s' % (utils.decode_result(seq)))
with tf.Session(graph=graph) as session:
saver = tf.train.Saver(tf.global_variables())
snapshot = "ctc"
checkpoint = tf.train.latest_checkpoint(checkpoint_dir="checkpoints")
last_epoch = 0
if checkpoint:
print("[i] LOADING checkpoint " + checkpoint)
try:
saver.restore(session, checkpoint)
last_epoch = int(checkpoint.split('-')[-1]) + 1
print("[i] start from epoch %d" % last_epoch)
except:
print("[!] incompatible checkpoint, restarting from 0")
else:
# Initializate the weights and biases
tf.global_variables_initializer().run()
for curr_epoch in range(last_epoch, num_epochs):
train_cost = train_ler = 0
start = time.time()
try:
for batch in range(num_batches_per_epoch):
# Getting the index
indexes = [i % num_examples for i in range(batch * batch_size, (batch + 1) * batch_size)]
batch_train_inputs = train_inputs[indexes]
# Padding input to max_time_step of this batch
batch_train_inputs, batch_train_seq_len = utils.pad_sequences(batch_train_inputs)
# Converting to sparse representation so as to to feed SparseTensor input
batch_train_targets = utils.sparse_tuple_from(train_targets[indexes])
feed = {inputs: batch_train_inputs,
targets: batch_train_targets,
seq_len: batch_train_seq_len}
batch_cost, _ = session.run([cost, optimizer], feed)
train_cost += batch_cost*batch_size
train_ler += session.run(ler, feed_dict=feed)*batch_size
# Shuffle the data
shuffled_indexes = np.random.permutation(num_examples)
train_inputs = train_inputs[shuffled_indexes]
train_targets = train_targets[shuffled_indexes]
# Metrics mean
train_cost /= num_examples
train_ler /= num_examples
log = "Epoch {}/{}, train_cost = {:.3f}, train_ler = {:.3f}, time = {:.3f}"
print(log.format(curr_epoch, num_epochs, train_cost, train_ler, time.time() - start))
if curr_epoch % epoch_save_step == 0 and curr_epoch > 0:
print("[i] SAVING snapshot %s" % snapshot)
saver.save(session, "checkpoints/" + snapshot + ".ckpt", curr_epoch)
except KeyboardInterrupt:
print("\nTest data:")
for test in test_inputs:
decode_single(session, test)
print("FINISHED")
print("Train data:")
for test in train_inputs:
decode_single(session, test)
print("\nTest data:")
for test in test_inputs:
decode_single(session, test)
| [
"tensorflow.shape",
"tensorflow.sparse_placeholder",
"tensorflow.transpose",
"utils.pad_sequences",
"utils.sparse_tuple_from",
"tensorflow.contrib.rnn.LSTMCell",
"tensorflow.reduce_mean",
"tensorflow.nn.ctc_loss",
"tensorflow.cast",
"tensorflow.Graph",
"tensorflow.placeholder",
"train_data.pre... | [((641, 711), 'train_data.get_file_list', 'train_data.get_file_list', (['num_train_data', 'num_test_data'], {'shuffle': '(False)'}), '(num_train_data, num_test_data, shuffle=False)\n', (665, 711), False, 'import train_data\n'), ((818, 860), 'train_data.prepare_inputs', 'train_data.prepare_inputs', (['wav_files_train'], {}), '(wav_files_train)\n', (843, 860), False, 'import train_data\n'), ((877, 920), 'train_data.prepare_targets', 'train_data.prepare_targets', (['wav_files_train'], {}), '(wav_files_train)\n', (903, 920), False, 'import train_data\n'), ((936, 977), 'train_data.prepare_inputs', 'train_data.prepare_inputs', (['wav_files_test'], {}), '(wav_files_test)\n', (961, 977), False, 'import train_data\n'), ((1005, 1015), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1013, 1015), True, 'import tensorflow as tf\n'), ((1222, 1276), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, None, num_features]'], {}), '(tf.float32, [None, None, num_features])\n', (1236, 1276), True, 'import tensorflow as tf\n'), ((1394, 1425), 'tensorflow.sparse_placeholder', 'tf.sparse_placeholder', (['tf.int32'], {}), '(tf.int32)\n', (1415, 1425), True, 'import tensorflow as tf\n'), ((1477, 1509), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]'], {}), '(tf.int32, [None])\n', (1491, 1509), True, 'import tensorflow as tf\n'), ((1622, 1678), 'tensorflow.contrib.rnn.LSTMCell', 'tf.contrib.rnn.LSTMCell', (['num_hidden'], {'state_is_tuple': '(True)'}), '(num_hidden, state_is_tuple=True)\n', (1645, 1678), True, 'import tensorflow as tf\n'), ((1717, 1786), 'tensorflow.contrib.rnn.MultiRNNCell', 'tf.contrib.rnn.MultiRNNCell', (['([cell] * num_layers)'], {'state_is_tuple': '(True)'}), '([cell] * num_layers, state_is_tuple=True)\n', (1744, 1786), True, 'import tensorflow as tf\n'), ((1911, 1970), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['stack', 'inputs', 'seq_len'], {'dtype': 'tf.float32'}), '(stack, inputs, seq_len, dtype=tf.float32)\n', (1928, 1970), True, 'import tensorflow as tf\n'), ((1984, 2000), 'tensorflow.shape', 'tf.shape', (['inputs'], {}), '(inputs)\n', (1992, 2000), True, 'import tensorflow as tf\n'), ((2125, 2162), 'tensorflow.reshape', 'tf.reshape', (['outputs', '[-1, num_hidden]'], {}), '(outputs, [-1, num_hidden])\n', (2135, 2162), True, 'import tensorflow as tf\n'), ((2772, 2818), 'tensorflow.reshape', 'tf.reshape', (['logits', '[batch_s, -1, num_classes]'], {}), '(logits, [batch_s, -1, num_classes])\n', (2782, 2818), True, 'import tensorflow as tf\n'), ((2850, 2881), 'tensorflow.transpose', 'tf.transpose', (['logits', '(1, 0, 2)'], {}), '(logits, (1, 0, 2))\n', (2862, 2881), True, 'import tensorflow as tf\n'), ((2894, 2934), 'tensorflow.nn.ctc_loss', 'tf.nn.ctc_loss', (['targets', 'logits', 'seq_len'], {}), '(targets, logits, seq_len)\n', (2908, 2934), True, 'import tensorflow as tf\n'), ((2946, 2966), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {}), '(loss)\n', (2960, 2966), True, 'import tensorflow as tf\n'), ((3306, 3352), 'tensorflow.nn.ctc_beam_search_decoder', 'tf.nn.ctc_beam_search_decoder', (['logits', 'seq_len'], {}), '(logits, seq_len)\n', (3335, 3352), True, 'import tensorflow as tf\n'), ((3941, 3964), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (3951, 3964), True, 'import tensorflow as tf\n'), ((4066, 4122), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', ([], {'checkpoint_dir': '"""checkpoints"""'}), "(checkpoint_dir='checkpoints')\n", (4092, 4122), True, 'import tensorflow as tf\n'), ((2371, 2429), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[num_hidden, num_classes]'], {'stddev': '(0.1)'}), '([num_hidden, num_classes], stddev=0.1)\n', (2390, 2429), True, 'import tensorflow as tf\n'), ((2603, 2640), 'tensorflow.constant', 'tf.constant', (['(0.0)'], {'shape': '[num_classes]'}), '(0.0, shape=[num_classes])\n', (2614, 2640), True, 'import tensorflow as tf\n'), ((2689, 2710), 'tensorflow.matmul', 'tf.matmul', (['outputs', 'W'], {}), '(outputs, W)\n', (2698, 2710), True, 'import tensorflow as tf\n'), ((3591, 3615), 'numpy.asarray', 'np.asarray', (['[test_input]'], {}), '([test_input])\n', (3601, 3615), True, 'import numpy as np\n'), ((4005, 4026), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (4024, 4026), True, 'import tensorflow as tf\n'), ((4688, 4699), 'time.time', 'time.time', ([], {}), '()\n', (4697, 4699), False, 'import time\n'), ((3115, 3160), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['initial_learning_rate'], {}), '(initial_learning_rate)\n', (3137, 3160), True, 'import tensorflow as tf\n'), ((3431, 3460), 'tensorflow.cast', 'tf.cast', (['decoded[0]', 'tf.int32'], {}), '(decoded[0], tf.int32)\n', (3438, 3460), True, 'import tensorflow as tf\n'), ((3758, 3804), 'tensorflow.sparse_tensor_to_dense', 'tf.sparse_tensor_to_dense', (['d'], {'default_value': '(-1)'}), '(d, default_value=-1)\n', (3783, 3804), True, 'import tensorflow as tf\n'), ((3907, 3931), 'utils.decode_result', 'utils.decode_result', (['seq'], {}), '(seq)\n', (3926, 3931), False, 'import utils\n'), ((5730, 5765), 'numpy.random.permutation', 'np.random.permutation', (['num_examples'], {}), '(num_examples)\n', (5751, 5765), True, 'import numpy as np\n'), ((4542, 4575), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4573, 4575), True, 'import tensorflow as tf\n'), ((5093, 5132), 'utils.pad_sequences', 'utils.pad_sequences', (['batch_train_inputs'], {}), '(batch_train_inputs)\n', (5112, 5132), False, 'import utils\n'), ((5262, 5309), 'utils.sparse_tuple_from', 'utils.sparse_tuple_from', (['train_targets[indexes]'], {}), '(train_targets[indexes])\n', (5285, 5309), False, 'import utils\n'), ((6154, 6165), 'time.time', 'time.time', ([], {}), '()\n', (6163, 6165), False, 'import time\n')] |
# start import modules
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import emcee
# end import modules
savefig=True
# start generate data
np.random.seed(1) # for repeatability
F_true = 1000 # true flux, say number of photons measured in 1 second
N = 50 # number of measurements
F = stats.poisson(F_true).rvs(N)
# N measurements of the flux
e = np.sqrt(F) # errors on Poisson counts estimated via square root
# end generate data
# start visualize data
fig, ax = plt.subplots()
ax.errorbar(F, np.arange(N), xerr=e, fmt='ok', ecolor='gray', alpha=0.5)
ax.vlines([F_true], 0, N, linewidth=5, alpha=0.2)
ax.set_xlabel("Flux");ax.set_ylabel("measurement number");
# end visualize data
if savefig:
fig.savefig('../fig/singlephotoncount_fig_1.png')
# start frequentist
w=1./e**2
print("""
F_true = {0}
F_est = {1:.0f} +/- {2:.0f} (based on {3} measurements) """\
.format(F_true, (w * F).sum() / w.sum(), w.sum() ** -0.5, N))
# end frequentist
# start bayesian setup
def log_prior(alpha):
return 0 # flat prior
def log_likelihood(alpha, F, e):
return -0.5 * np.sum(np.log(2 * np.pi * e ** 2) \
+ (F - alpha[0]) ** 2 / e ** 2)
def log_posterior(alpha, F, e):
return log_prior(alpha) + log_likelihood(alpha, F, e)
# end bayesian setup
# start bayesian mcmc
ndim = 1 # number of parameters in the model
nwalkers = 50 # number of MCMC walkers
nburn = 1000 # "burn-in" period to let chains stabilize
nsteps = 2000 # number of MCMC steps to take
# we'll start at random locations between 0 and 2000
starting_guesses = 2000 * np.random.rand(nwalkers, ndim)
sampler = emcee.EnsembleSampler(nwalkers, ndim, log_posterior, args=[F,e])
sampler.run_mcmc(starting_guesses, nsteps)
# Shape of sampler.chain = (nwalkers, nsteps, ndim)
# Flatten the sampler chain and discard burn-in points:
samples = sampler.chain[:, nburn:, :].reshape((-1, ndim))
# end bayesian mcmc
# start visualize bayesian
fig, ax = plt.subplots()
ax.hist(samples, bins=50, histtype="stepfilled", alpha=0.3, normed=True)
ax.set_xlabel(r'$F_\mathrm{est}$')
ax.set_ylabel(r'$p(F_\mathrm{est}|D,I)$')
# end visualize bayesian
if savefig:
fig.savefig('../fig/singlephotoncount_fig_2.png')
# plot a best-fit Gaussian
F_est = np.linspace(975, 1025)
pdf = stats.norm(np.mean(samples), np.std(samples)).pdf(F_est)
ax.plot(F_est, pdf, '-k')
# start bayesian CI
sampper=np.percentile(samples, [2.5, 16.5, 50, 83.5, 97.5],axis=0).flatten()
print("""
F_true = {0}
Based on {1} measurements the posterior point estimates are:
...F_est = {2:.0f} +/- {3:.0f}
or using credible intervals:
...F_est = {4:.0f} (posterior median)
...F_est in [{5:.0f}, {6:.0f}] (67% credible interval)
...F_est in [{7:.0f}, {8:.0f}] (95% credible interval) """\
.format(F_true, N, np.mean(samples), np.std(samples), \
sampper[2], sampper[1], sampper[3], sampper[0], sampper[4]))
# end bayesian CI
if not savefig:
plt.show()
| [
"numpy.mean",
"numpy.sqrt",
"numpy.random.rand",
"numpy.log",
"emcee.EnsembleSampler",
"numpy.linspace",
"scipy.stats.poisson",
"numpy.random.seed",
"numpy.std",
"numpy.percentile",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((169, 186), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (183, 186), True, 'import numpy as np\n'), ((428, 438), 'numpy.sqrt', 'np.sqrt', (['F'], {}), '(F)\n', (435, 438), True, 'import numpy as np\n'), ((554, 568), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (566, 568), True, 'import matplotlib.pyplot as plt\n'), ((1740, 1805), 'emcee.EnsembleSampler', 'emcee.EnsembleSampler', (['nwalkers', 'ndim', 'log_posterior'], {'args': '[F, e]'}), '(nwalkers, ndim, log_posterior, args=[F, e])\n', (1761, 1805), False, 'import emcee\n'), ((2073, 2087), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2085, 2087), True, 'import matplotlib.pyplot as plt\n'), ((2366, 2388), 'numpy.linspace', 'np.linspace', (['(975)', '(1025)'], {}), '(975, 1025)\n', (2377, 2388), True, 'import numpy as np\n'), ((584, 596), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (593, 596), True, 'import numpy as np\n'), ((1699, 1729), 'numpy.random.rand', 'np.random.rand', (['nwalkers', 'ndim'], {}), '(nwalkers, ndim)\n', (1713, 1729), True, 'import numpy as np\n'), ((3072, 3082), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3080, 3082), True, 'import matplotlib.pyplot as plt\n'), ((343, 364), 'scipy.stats.poisson', 'stats.poisson', (['F_true'], {}), '(F_true)\n', (356, 364), False, 'from scipy import stats\n'), ((2507, 2566), 'numpy.percentile', 'np.percentile', (['samples', '[2.5, 16.5, 50, 83.5, 97.5]'], {'axis': '(0)'}), '(samples, [2.5, 16.5, 50, 83.5, 97.5], axis=0)\n', (2520, 2566), True, 'import numpy as np\n'), ((2913, 2929), 'numpy.mean', 'np.mean', (['samples'], {}), '(samples)\n', (2920, 2929), True, 'import numpy as np\n'), ((2931, 2946), 'numpy.std', 'np.std', (['samples'], {}), '(samples)\n', (2937, 2946), True, 'import numpy as np\n'), ((2406, 2422), 'numpy.mean', 'np.mean', (['samples'], {}), '(samples)\n', (2413, 2422), True, 'import numpy as np\n'), ((2424, 2439), 'numpy.std', 'np.std', (['samples'], {}), '(samples)\n', (2430, 2439), True, 'import numpy as np\n'), ((1175, 1201), 'numpy.log', 'np.log', (['(2 * np.pi * e ** 2)'], {}), '(2 * np.pi * e ** 2)\n', (1181, 1201), True, 'import numpy as np\n')] |
import wradlib as wrl
import matplotlib.pyplot as pl
import warnings
warnings.filterwarnings("ignore")
try:
get_ipython().magic("matplotlib inline")
except:
pl.ion()
import numpy as np
# load radolan files
rw_filename = wrl.util.get_wradlib_data_file("../data/raa01-sf_10000-0610300750-dwd---bin")
rwdata, rwattrs = wrl.io.read_radolan_composite(rw_filename)
# print the available attributes
# print("RW Attributes:", rwattrs)
# print the available attributes
print("RX Attributes:")
for key, value in rwattrs.items():
print(key + ':', value)
print("----------------------------------------------------------------")
# This is the RADOLAN projection
proj_osr = wrl.georef.create_osr("dwd-radolan")
# Get projected RADOLAN coordinates for corner definition
xy_raw = wrl.georef.get_radolan_grid(900, 900)
data, xy = wrl.georef.set_raster_origin(rwdata, xy_raw, 'upper')
data = np.stack((data, data+100, data+1000))
ds = wrl.georef.create_raster_dataset(data, xy, projection=proj_osr)
wrl.io.write_raster_dataset("/root/out/geotiff.tif", ds, 'GTiff') | [
"wradlib.georef.create_osr",
"wradlib.georef.get_radolan_grid",
"numpy.stack",
"wradlib.util.get_wradlib_data_file",
"wradlib.io.write_raster_dataset",
"wradlib.georef.create_raster_dataset",
"wradlib.io.read_radolan_composite",
"warnings.filterwarnings",
"wradlib.georef.set_raster_origin",
"matpl... | [((69, 102), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (92, 102), False, 'import warnings\n'), ((229, 306), 'wradlib.util.get_wradlib_data_file', 'wrl.util.get_wradlib_data_file', (['"""../data/raa01-sf_10000-0610300750-dwd---bin"""'], {}), "('../data/raa01-sf_10000-0610300750-dwd---bin')\n", (259, 306), True, 'import wradlib as wrl\n'), ((325, 367), 'wradlib.io.read_radolan_composite', 'wrl.io.read_radolan_composite', (['rw_filename'], {}), '(rw_filename)\n', (354, 367), True, 'import wradlib as wrl\n'), ((676, 712), 'wradlib.georef.create_osr', 'wrl.georef.create_osr', (['"""dwd-radolan"""'], {}), "('dwd-radolan')\n", (697, 712), True, 'import wradlib as wrl\n'), ((781, 818), 'wradlib.georef.get_radolan_grid', 'wrl.georef.get_radolan_grid', (['(900)', '(900)'], {}), '(900, 900)\n', (808, 818), True, 'import wradlib as wrl\n'), ((831, 884), 'wradlib.georef.set_raster_origin', 'wrl.georef.set_raster_origin', (['rwdata', 'xy_raw', '"""upper"""'], {}), "(rwdata, xy_raw, 'upper')\n", (859, 884), True, 'import wradlib as wrl\n'), ((892, 933), 'numpy.stack', 'np.stack', (['(data, data + 100, data + 1000)'], {}), '((data, data + 100, data + 1000))\n', (900, 933), True, 'import numpy as np\n'), ((935, 998), 'wradlib.georef.create_raster_dataset', 'wrl.georef.create_raster_dataset', (['data', 'xy'], {'projection': 'proj_osr'}), '(data, xy, projection=proj_osr)\n', (967, 998), True, 'import wradlib as wrl\n'), ((999, 1064), 'wradlib.io.write_raster_dataset', 'wrl.io.write_raster_dataset', (['"""/root/out/geotiff.tif"""', 'ds', '"""GTiff"""'], {}), "('/root/out/geotiff.tif', ds, 'GTiff')\n", (1026, 1064), True, 'import wradlib as wrl\n'), ((165, 173), 'matplotlib.pyplot.ion', 'pl.ion', ([], {}), '()\n', (171, 173), True, 'import matplotlib.pyplot as pl\n')] |
import tensorflow as tf
import numpy as np
from custom_model import OrthogonalButterfly
k = 8
N = 2 ** 16
batch_size = N // 8
width_pow = 6
depth = 32
dtype = tf.float32
np.random.seed(0)
A = np.random.normal(size=[k, k])
Q = np.linalg.svd(A)[0]
if np.linalg.det(Q) < 0:
Q[0, :] = -Q[0, :]
Q = tf.constant(Q, dtype=dtype)
def make_data(N):
X = tf.random.normal([N, k, k], dtype=dtype)
Y = tf.matmul(X, Q)
return X, Y
X_train, Y_train = make_data(N)
X_test, Y_test = make_data(N // 8)
train_ds = tf.data.Dataset.from_tensor_slices((X_train, Y_train)).batch(batch_size)
test_ds = tf.data.Dataset.from_tensors((X_test, Y_test))
model = tf.keras.Sequential([
tf.keras.Input([k, k]),
# tf.keras.layers.Permute([1, 2]),
tf.keras.layers.Reshape([k * k]),
OrthogonalButterfly(width_pow, k * k, depth),
tf.keras.layers.Reshape([k, k]),
tf.keras.layers.Permute([1, 2]),
])
model.summary()
# loss_fn = tf.losses.MeanSquaredError()
loss_fn = tf.losses.MeanAbsoluteError()
model.compile(
optimizer=tf.keras.optimizers.SGD(learning_rate=10.0, momentum=0.95),
# tf.keras.optimizers.Adam(learning_rate=0.01, beta_1=0.95),
loss=loss_fn)
model.fit(train_ds, validation_data=test_ds, epochs=1000)
# for x in tf.sort(tf.reshape(model.layers[1].params, [-1])).numpy().tolist():
# print(x)
| [
"numpy.random.normal",
"tensorflow.random.normal",
"tensorflow.data.Dataset.from_tensors",
"tensorflow.keras.layers.Reshape",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.keras.layers.Permute",
"tensorflow.losses.MeanAbsoluteError",
"numpy.linalg.det",
"tensorflow.keras.optimizers.SGD",... | [((172, 189), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (186, 189), True, 'import numpy as np\n'), ((194, 223), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[k, k]'}), '(size=[k, k])\n', (210, 223), True, 'import numpy as np\n'), ((300, 327), 'tensorflow.constant', 'tf.constant', (['Q'], {'dtype': 'dtype'}), '(Q, dtype=dtype)\n', (311, 327), True, 'import tensorflow as tf\n'), ((599, 645), 'tensorflow.data.Dataset.from_tensors', 'tf.data.Dataset.from_tensors', (['(X_test, Y_test)'], {}), '((X_test, Y_test))\n', (627, 645), True, 'import tensorflow as tf\n'), ((977, 1006), 'tensorflow.losses.MeanAbsoluteError', 'tf.losses.MeanAbsoluteError', ([], {}), '()\n', (1004, 1006), True, 'import tensorflow as tf\n'), ((228, 244), 'numpy.linalg.svd', 'np.linalg.svd', (['A'], {}), '(A)\n', (241, 244), True, 'import numpy as np\n'), ((251, 267), 'numpy.linalg.det', 'np.linalg.det', (['Q'], {}), '(Q)\n', (264, 267), True, 'import numpy as np\n'), ((355, 395), 'tensorflow.random.normal', 'tf.random.normal', (['[N, k, k]'], {'dtype': 'dtype'}), '([N, k, k], dtype=dtype)\n', (371, 395), True, 'import tensorflow as tf\n'), ((404, 419), 'tensorflow.matmul', 'tf.matmul', (['X', 'Q'], {}), '(X, Q)\n', (413, 419), True, 'import tensorflow as tf\n'), ((516, 570), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(X_train, Y_train)'], {}), '((X_train, Y_train))\n', (550, 570), True, 'import tensorflow as tf\n'), ((681, 703), 'tensorflow.keras.Input', 'tf.keras.Input', (['[k, k]'], {}), '([k, k])\n', (695, 703), True, 'import tensorflow as tf\n'), ((748, 780), 'tensorflow.keras.layers.Reshape', 'tf.keras.layers.Reshape', (['[k * k]'], {}), '([k * k])\n', (771, 780), True, 'import tensorflow as tf\n'), ((786, 830), 'custom_model.OrthogonalButterfly', 'OrthogonalButterfly', (['width_pow', '(k * k)', 'depth'], {}), '(width_pow, k * k, depth)\n', (805, 830), False, 'from custom_model import OrthogonalButterfly\n'), ((836, 867), 'tensorflow.keras.layers.Reshape', 'tf.keras.layers.Reshape', (['[k, k]'], {}), '([k, k])\n', (859, 867), True, 'import tensorflow as tf\n'), ((873, 904), 'tensorflow.keras.layers.Permute', 'tf.keras.layers.Permute', (['[1, 2]'], {}), '([1, 2])\n', (896, 904), True, 'import tensorflow as tf\n'), ((1046, 1104), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'learning_rate': '(10.0)', 'momentum': '(0.95)'}), '(learning_rate=10.0, momentum=0.95)\n', (1069, 1104), True, 'import tensorflow as tf\n')] |
import math
import cv2
import base64
import numpy as np
from ndu_gate_camera.utility import geometry_helper
from ndu_gate_camera.utility.ndu_utility import NDUUtility
def image_h_w(image):
# h, w = image.shape[:2]
return image.shape[:2]
# Boyut değişikliğine en uygun interpolation yöntemi ile resize eder.
def resize_best_quality(image, size):
if image.shape[0] == size[0] and image.shape[1] == size[1]:
return image.copy()
size0 = max(image.shape[0], image.shape[1])
size1 = max(size[0], size[1])
if size0 > size1:
# if size0 > 2 * size1:
# image = cv2.pyrDown(image)
return cv2.resize(image, size, interpolation=cv2.INTER_AREA)
else:
return cv2.resize(image, size, interpolation=cv2.INTER_CUBIC)
# İmaj boyutlarından birisi max_dim'den daha büyükse küçültür, değilse aynen döner.
def resize_if_larger(image, max_dim, interpolation=None):
h, w = image.shape[:2]
if w > h:
if w > max_dim:
return resize(image, width=max_dim)
else:
return image
else:
if h > max_dim:
return resize(image, height=max_dim)
else:
return image
# İmaj boyutlarından "büyük olan" min_dim'den daha küçükse resize edilir, değilse aynen döner.
def resize_if_smaller(image, min_dim, interpolation=None):
h, w = image.shape[:2]
if w > h:
if w < min_dim:
return resize(image, width=min_dim)
else:
return image
else:
if h < min_dim:
return resize(image, height=min_dim)
else:
return image
# 'width' veya 'height yoksa en-boy oranını koruyarak resize eder. İkisi de varsa normal resize eder.
# 'interpolation' yoksa en uygununu seçer.
def resize(image, width=None, height=None, interpolation=None):
if width is None and height is None:
return image
h, w = image.shape[:2]
if width is None:
r = height / float(h)
dim = (int(w * r), height)
elif height is None:
r = width / float(w)
dim = (width, int(h * r))
else:
dim = (width, height)
if interpolation is None:
return resize_best_quality(image, dim)
else:
return cv2.resize(image, dim, interpolation=interpolation)
# total_pixel_count sonucun width * height değeridir.
# int yuvarlama yüzünden sonuç w*h değer, tam total_pixel_count olmayabilir.
def resize_total_pixel_count(image, total_pixel_count):
h, w = image.shape[:2]
ratio = w / float(h)
w1 = math.sqrt(total_pixel_count * ratio)
h1 = w1 * h / float(w)
return resize_best_quality(image, (int(w1), int(h1)))
# OpenCV mat nesnesini base64 string yapar
def to_base64(image):
_, buffer = cv2.imencode('.jpg', image)
return base64.b64encode(buffer)
# base64 string'i OpenCV mat nesnesi yapar
def from_base64(base64_text):
original = base64.b64decode(base64_text)
as_np = np.frombuffer(original, dtype=np.uint8)
return cv2.imdecode(as_np, flags=1)
def fill_polyline_transparent(image, pnts, color, opacity, thickness=-1):
blk = np.zeros(image.shape, np.uint8)
cv2.drawContours(blk, pnts, -1, color, -1)
if thickness >= 0:
cv2.polylines(image, pnts, True, color=color, thickness=thickness)
res = cv2.addWeighted(image, 1.0, blk, 0.1, 0)
cv2.copyTo(res, None, image)
def select_areas(frame, window_name, color=(0, 0, 255), opacity=0.3, thickness=4, max_count=None, next_area_key="n", finish_key="s", return_tuples=True, max_point_count=None):
try:
areas = []
area = []
def get_mouse_points(event, x, y, _flags, _param):
if event == cv2.EVENT_LBUTTONDOWN:
area.append((x, y))
cv2.namedWindow(window_name)
cv2.moveWindow(window_name, 40, 30)
cv2.setMouseCallback(window_name, get_mouse_points)
new_area = False
while True:
image = frame.copy()
for area1 in areas:
pts = np.array(area1, np.int32)
fill_polyline_transparent(image, [pts], color=color, opacity=opacity, thickness=thickness)
if not new_area:
if len(area) > 0:
pts = np.array(area, np.int32)
fill_polyline_transparent(image, [pts], color=color, opacity=opacity,
thickness=thickness)
for pnt in area:
cv2.circle(image, pnt, thickness * 2, color, thickness)
else:
if len(area) > 2:
areas.append(area)
if max_count is not None and len(areas) == max_count:
return areas
else:
area = []
new_area = False
cv2.imshow(window_name, image)
k = cv2.waitKey(1)
if k & 0xFF == ord(finish_key):
break
elif k & 0xFF == ord(next_area_key):
new_area = True
elif max_point_count is not None and len(area) == max_point_count:
new_area = True
if len(area) > 2:
areas.append(area)
if not return_tuples:
for i in range(len(areas)):
areas[i] = np.array(areas[i], np.int32)
return areas
finally:
cv2.destroyWindow(window_name)
def select_lines(frame, window_name, color=(0, 255, 255), thickness=4, max_count=None, finish_key="s"):
try:
lines = []
line = []
def get_mouse_points(event, x, y, _flags, _param):
if event == cv2.EVENT_LBUTTONDOWN:
line.append((x, y))
cv2.namedWindow(window_name)
cv2.moveWindow(window_name, 40, 30)
cv2.setMouseCallback(window_name, get_mouse_points)
while True:
image = frame.copy()
for line1 in lines:
pts = np.array(line1, np.int32)
cv2.polylines(image, [pts], False, color=color, thickness=thickness)
for pnt in line:
cv2.circle(image, pnt, thickness * 2, color, thickness)
if len(line) == 2:
lines.append(line)
if max_count is not None and len(lines) == max_count:
return lines
else:
line = []
cv2.imshow(window_name, image)
k = cv2.waitKey(1)
if k & 0xFF == ord(finish_key):
break
return lines
finally:
cv2.destroyWindow(window_name)
def select_points(frame, window_name, color=(0, 255, 255), radius=8, thickness=4, max_count=None, finish_key="s"):
try:
pnts = []
def get_mouse_points(event, x, y, _flags, _param):
if event == cv2.EVENT_LBUTTONDOWN:
pnts.append((x, y))
cv2.namedWindow(window_name)
cv2.moveWindow(window_name, 40, 30)
cv2.setMouseCallback(window_name, get_mouse_points)
while True:
image = frame.copy()
for pnt in pnts:
cv2.circle(image, pnt, radius, color, thickness)
cv2.imshow(window_name, image)
k = cv2.waitKey(1)
if k & 0xFF == ord(finish_key):
break
if max_count is not None and max_count <= len(pnts):
break
return pnts
finally:
cv2.destroyWindow(window_name)
def put_text(img, text_, center, color=None, font_scale=0.5, thickness=1, back_color=None, replace_tur_chars=True):
if replace_tur_chars:
text_ = NDUUtility.debug_replace_tur_chars(text_)
if back_color is None:
back_color = [1, 1, 1]
if color is None:
color = [255, 255, 255]
y = center[1]
# font = cv2.FONT_HERSHEY_COMPLEX
font = cv2.FONT_HERSHEY_DUPLEX
coor = (int(center[0] + 5), int(y))
cv2.putText(img=img, text=text_, org=coor,
fontFace=font, fontScale=font_scale, color=back_color, lineType=cv2.LINE_AA,
thickness=thickness + 2)
cv2.putText(img=img, text=text_, org=coor,
fontFace=font, fontScale=font_scale, color=color,
lineType=cv2.LINE_AA, thickness=thickness)
def rescale_frame(frame, percent):
width = int(frame.shape[1] * percent / 100.0)
height = int(frame.shape[0] * percent / 100.0)
dim = (width, height)
return resize_best_quality(frame, dim)
def frame2base64(frame, scale=40):
scaled_frame = rescale_frame(frame, scale)
res, frame = cv2.imencode('.png', scaled_frame)
base64_data = base64.b64encode(frame)
return base64_data.decode('utf-8')
def normalize_coordinates(image, coords):
h, w = image.shape[:2]
pnts = []
for x, y in coords:
pnts.append((x / w, y / h))
return pnts
def denormalize_coordinates(image, coords):
h, w = image.shape[:2]
pnts = []
for x, y in coords:
pnts.append((int(x * w), int(y * h)))
return pnts
def normalize_lines(image, lines):
lines1 = []
for line in lines:
lines1.append(normalize_coordinates(image, line))
return lines1
def denormalize_lines(image, lines):
lines1 = []
for line in lines:
lines1.append(denormalize_coordinates(image, line))
return lines1
def convert_lines_list2tuple(lines):
res = []
for line in lines:
line1 = []
for c in line:
line1.append(tuple(c))
res.append(line1)
return res
def convert_lines_tuple2list(lines):
res = []
for line in lines:
line1 = []
for c in line:
line1.append(list(c))
res.append(line1)
return res
def crop(frame, rect):
y1 = max(int(rect[0]), 0)
x1 = max(int(rect[1]), 0)
y2 = max(int(rect[2]), 0)
x2 = max(int(rect[3]), 0)
return frame[y1:y2, x1:x2]
def change_brightness(img, value):
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv)
if value > 0:
lim = 255 - value
v[v > lim] = 255
v[v <= lim] += value
else:
lim = 0 - value
v[v < lim] = 0
v[v >= lim] -= abs(value)
final_hsv = cv2.merge((h, s, v))
img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)
return img
def get_mask(shape, areas):
h, w = shape[:2]
mask = np.zeros((h, w), np.uint8)
contours = []
for area in areas:
contours.append(np.array(area, np.int32))
cv2.drawContours(mask, contours, -1, 255, -1)
return mask
def apply_mask(image, mask):
return cv2.bitwise_and(image, image, mask=mask)
def draw_rect(image, rect, color=None):
if color is None:
color = [255, 255, 255]
c = np.array(rect[:4], dtype=np.int32)
c1, c2 = geometry_helper.get_rect_pnts(c)
cv2.rectangle(image, c1, c2, color=[1, 1, 1], thickness=3)
cv2.rectangle(image, c1, c2, color=color, thickness=2)
def equalize_hist_bgr(img):
img_yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV)
img_yuv[:, :, 0] = cv2.equalizeHist(img_yuv[:, :, 0])
return cv2.cvtColor(img_yuv, cv2.COLOR_YUV2BGR)
# img[:, :, 0] = cv2.equalizeHist(img[:, :, 0])
# img[:, :, 1] = cv2.equalizeHist(img[:, :, 1])
# img[:, :, 2] = cv2.equalizeHist(img[:, :, 2])
# return img
def adaptive_equalize_hist_bgr(img):
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
img_yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV)
img_yuv[:, :, 0] = clahe.apply(img_yuv[:, :, 0])
return cv2.cvtColor(img_yuv, cv2.COLOR_YUV2BGR)
# clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
# img[:, :, 0] = clahe.apply(img[:, :, 0])
# img[:, :, 1] = clahe.apply(img[:, :, 1])
# img[:, :, 2] = clahe.apply(img[:, :, 2])
# return img
| [
"cv2.rectangle",
"base64.b64encode",
"math.sqrt",
"cv2.imshow",
"cv2.copyTo",
"numpy.array",
"ndu_gate_camera.utility.geometry_helper.get_rect_pnts",
"cv2.imdecode",
"cv2.setMouseCallback",
"cv2.moveWindow",
"ndu_gate_camera.utility.ndu_utility.NDUUtility.debug_replace_tur_chars",
"cv2.addWeig... | [((2545, 2581), 'math.sqrt', 'math.sqrt', (['(total_pixel_count * ratio)'], {}), '(total_pixel_count * ratio)\n', (2554, 2581), False, 'import math\n'), ((2750, 2777), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'image'], {}), "('.jpg', image)\n", (2762, 2777), False, 'import cv2\n'), ((2789, 2813), 'base64.b64encode', 'base64.b64encode', (['buffer'], {}), '(buffer)\n', (2805, 2813), False, 'import base64\n'), ((2904, 2933), 'base64.b64decode', 'base64.b64decode', (['base64_text'], {}), '(base64_text)\n', (2920, 2933), False, 'import base64\n'), ((2946, 2985), 'numpy.frombuffer', 'np.frombuffer', (['original'], {'dtype': 'np.uint8'}), '(original, dtype=np.uint8)\n', (2959, 2985), True, 'import numpy as np\n'), ((2997, 3025), 'cv2.imdecode', 'cv2.imdecode', (['as_np'], {'flags': '(1)'}), '(as_np, flags=1)\n', (3009, 3025), False, 'import cv2\n'), ((3112, 3143), 'numpy.zeros', 'np.zeros', (['image.shape', 'np.uint8'], {}), '(image.shape, np.uint8)\n', (3120, 3143), True, 'import numpy as np\n'), ((3148, 3190), 'cv2.drawContours', 'cv2.drawContours', (['blk', 'pnts', '(-1)', 'color', '(-1)'], {}), '(blk, pnts, -1, color, -1)\n', (3164, 3190), False, 'import cv2\n'), ((3299, 3339), 'cv2.addWeighted', 'cv2.addWeighted', (['image', '(1.0)', 'blk', '(0.1)', '(0)'], {}), '(image, 1.0, blk, 0.1, 0)\n', (3314, 3339), False, 'import cv2\n'), ((3344, 3372), 'cv2.copyTo', 'cv2.copyTo', (['res', 'None', 'image'], {}), '(res, None, image)\n', (3354, 3372), False, 'import cv2\n'), ((7928, 8082), 'cv2.putText', 'cv2.putText', ([], {'img': 'img', 'text': 'text_', 'org': 'coor', 'fontFace': 'font', 'fontScale': 'font_scale', 'color': 'back_color', 'lineType': 'cv2.LINE_AA', 'thickness': '(thickness + 2)'}), '(img=img, text=text_, org=coor, fontFace=font, fontScale=\n font_scale, color=back_color, lineType=cv2.LINE_AA, thickness=thickness + 2\n )\n', (7939, 8082), False, 'import cv2\n'), ((8109, 8249), 'cv2.putText', 'cv2.putText', ([], {'img': 'img', 'text': 'text_', 'org': 'coor', 'fontFace': 'font', 'fontScale': 'font_scale', 'color': 'color', 'lineType': 'cv2.LINE_AA', 'thickness': 'thickness'}), '(img=img, text=text_, org=coor, fontFace=font, fontScale=\n font_scale, color=color, lineType=cv2.LINE_AA, thickness=thickness)\n', (8120, 8249), False, 'import cv2\n'), ((8585, 8619), 'cv2.imencode', 'cv2.imencode', (['""".png"""', 'scaled_frame'], {}), "('.png', scaled_frame)\n", (8597, 8619), False, 'import cv2\n'), ((8638, 8661), 'base64.b64encode', 'base64.b64encode', (['frame'], {}), '(frame)\n', (8654, 8661), False, 'import base64\n'), ((9951, 9987), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2HSV'], {}), '(img, cv2.COLOR_BGR2HSV)\n', (9963, 9987), False, 'import cv2\n'), ((10002, 10016), 'cv2.split', 'cv2.split', (['hsv'], {}), '(hsv)\n', (10011, 10016), False, 'import cv2\n'), ((10224, 10244), 'cv2.merge', 'cv2.merge', (['(h, s, v)'], {}), '((h, s, v))\n', (10233, 10244), False, 'import cv2\n'), ((10255, 10297), 'cv2.cvtColor', 'cv2.cvtColor', (['final_hsv', 'cv2.COLOR_HSV2BGR'], {}), '(final_hsv, cv2.COLOR_HSV2BGR)\n', (10267, 10297), False, 'import cv2\n'), ((10375, 10401), 'numpy.zeros', 'np.zeros', (['(h, w)', 'np.uint8'], {}), '((h, w), np.uint8)\n', (10383, 10401), True, 'import numpy as np\n'), ((10497, 10542), 'cv2.drawContours', 'cv2.drawContours', (['mask', 'contours', '(-1)', '(255)', '(-1)'], {}), '(mask, contours, -1, 255, -1)\n', (10513, 10542), False, 'import cv2\n'), ((10601, 10641), 'cv2.bitwise_and', 'cv2.bitwise_and', (['image', 'image'], {'mask': 'mask'}), '(image, image, mask=mask)\n', (10616, 10641), False, 'import cv2\n'), ((10746, 10780), 'numpy.array', 'np.array', (['rect[:4]'], {'dtype': 'np.int32'}), '(rect[:4], dtype=np.int32)\n', (10754, 10780), True, 'import numpy as np\n'), ((10794, 10826), 'ndu_gate_camera.utility.geometry_helper.get_rect_pnts', 'geometry_helper.get_rect_pnts', (['c'], {}), '(c)\n', (10823, 10826), False, 'from ndu_gate_camera.utility import geometry_helper\n'), ((10831, 10889), 'cv2.rectangle', 'cv2.rectangle', (['image', 'c1', 'c2'], {'color': '[1, 1, 1]', 'thickness': '(3)'}), '(image, c1, c2, color=[1, 1, 1], thickness=3)\n', (10844, 10889), False, 'import cv2\n'), ((10894, 10948), 'cv2.rectangle', 'cv2.rectangle', (['image', 'c1', 'c2'], {'color': 'color', 'thickness': '(2)'}), '(image, c1, c2, color=color, thickness=2)\n', (10907, 10948), False, 'import cv2\n'), ((10993, 11029), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2YUV'], {}), '(img, cv2.COLOR_BGR2YUV)\n', (11005, 11029), False, 'import cv2\n'), ((11053, 11087), 'cv2.equalizeHist', 'cv2.equalizeHist', (['img_yuv[:, :, 0]'], {}), '(img_yuv[:, :, 0])\n', (11069, 11087), False, 'import cv2\n'), ((11099, 11139), 'cv2.cvtColor', 'cv2.cvtColor', (['img_yuv', 'cv2.COLOR_YUV2BGR'], {}), '(img_yuv, cv2.COLOR_YUV2BGR)\n', (11111, 11139), False, 'import cv2\n'), ((11365, 11416), 'cv2.createCLAHE', 'cv2.createCLAHE', ([], {'clipLimit': '(2.0)', 'tileGridSize': '(8, 8)'}), '(clipLimit=2.0, tileGridSize=(8, 8))\n', (11380, 11416), False, 'import cv2\n'), ((11431, 11467), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2YUV'], {}), '(img, cv2.COLOR_BGR2YUV)\n', (11443, 11467), False, 'import cv2\n'), ((11532, 11572), 'cv2.cvtColor', 'cv2.cvtColor', (['img_yuv', 'cv2.COLOR_YUV2BGR'], {}), '(img_yuv, cv2.COLOR_YUV2BGR)\n', (11544, 11572), False, 'import cv2\n'), ((642, 695), 'cv2.resize', 'cv2.resize', (['image', 'size'], {'interpolation': 'cv2.INTER_AREA'}), '(image, size, interpolation=cv2.INTER_AREA)\n', (652, 695), False, 'import cv2\n'), ((721, 775), 'cv2.resize', 'cv2.resize', (['image', 'size'], {'interpolation': 'cv2.INTER_CUBIC'}), '(image, size, interpolation=cv2.INTER_CUBIC)\n', (731, 775), False, 'import cv2\n'), ((2243, 2294), 'cv2.resize', 'cv2.resize', (['image', 'dim'], {'interpolation': 'interpolation'}), '(image, dim, interpolation=interpolation)\n', (2253, 2294), False, 'import cv2\n'), ((3222, 3288), 'cv2.polylines', 'cv2.polylines', (['image', 'pnts', '(True)'], {'color': 'color', 'thickness': 'thickness'}), '(image, pnts, True, color=color, thickness=thickness)\n', (3235, 3288), False, 'import cv2\n'), ((3749, 3777), 'cv2.namedWindow', 'cv2.namedWindow', (['window_name'], {}), '(window_name)\n', (3764, 3777), False, 'import cv2\n'), ((3786, 3821), 'cv2.moveWindow', 'cv2.moveWindow', (['window_name', '(40)', '(30)'], {}), '(window_name, 40, 30)\n', (3800, 3821), False, 'import cv2\n'), ((3830, 3881), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['window_name', 'get_mouse_points'], {}), '(window_name, get_mouse_points)\n', (3850, 3881), False, 'import cv2\n'), ((5379, 5409), 'cv2.destroyWindow', 'cv2.destroyWindow', (['window_name'], {}), '(window_name)\n', (5396, 5409), False, 'import cv2\n'), ((5714, 5742), 'cv2.namedWindow', 'cv2.namedWindow', (['window_name'], {}), '(window_name)\n', (5729, 5742), False, 'import cv2\n'), ((5751, 5786), 'cv2.moveWindow', 'cv2.moveWindow', (['window_name', '(40)', '(30)'], {}), '(window_name, 40, 30)\n', (5765, 5786), False, 'import cv2\n'), ((5795, 5846), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['window_name', 'get_mouse_points'], {}), '(window_name, get_mouse_points)\n', (5815, 5846), False, 'import cv2\n'), ((6572, 6602), 'cv2.destroyWindow', 'cv2.destroyWindow', (['window_name'], {}), '(window_name)\n', (6589, 6602), False, 'import cv2\n'), ((6899, 6927), 'cv2.namedWindow', 'cv2.namedWindow', (['window_name'], {}), '(window_name)\n', (6914, 6927), False, 'import cv2\n'), ((6936, 6971), 'cv2.moveWindow', 'cv2.moveWindow', (['window_name', '(40)', '(30)'], {}), '(window_name, 40, 30)\n', (6950, 6971), False, 'import cv2\n'), ((6980, 7031), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['window_name', 'get_mouse_points'], {}), '(window_name, get_mouse_points)\n', (7000, 7031), False, 'import cv2\n'), ((7448, 7478), 'cv2.destroyWindow', 'cv2.destroyWindow', (['window_name'], {}), '(window_name)\n', (7465, 7478), False, 'import cv2\n'), ((7639, 7680), 'ndu_gate_camera.utility.ndu_utility.NDUUtility.debug_replace_tur_chars', 'NDUUtility.debug_replace_tur_chars', (['text_'], {}), '(text_)\n', (7673, 7680), False, 'from ndu_gate_camera.utility.ndu_utility import NDUUtility\n'), ((4833, 4863), 'cv2.imshow', 'cv2.imshow', (['window_name', 'image'], {}), '(window_name, image)\n', (4843, 4863), False, 'import cv2\n'), ((4880, 4894), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (4891, 4894), False, 'import cv2\n'), ((6401, 6431), 'cv2.imshow', 'cv2.imshow', (['window_name', 'image'], {}), '(window_name, image)\n', (6411, 6431), False, 'import cv2\n'), ((6448, 6462), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (6459, 6462), False, 'import cv2\n'), ((7192, 7222), 'cv2.imshow', 'cv2.imshow', (['window_name', 'image'], {}), '(window_name, image)\n', (7202, 7222), False, 'import cv2\n'), ((7239, 7253), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (7250, 7253), False, 'import cv2\n'), ((10467, 10491), 'numpy.array', 'np.array', (['area', 'np.int32'], {}), '(area, np.int32)\n', (10475, 10491), True, 'import numpy as np\n'), ((4015, 4040), 'numpy.array', 'np.array', (['area1', 'np.int32'], {}), '(area1, np.int32)\n', (4023, 4040), True, 'import numpy as np\n'), ((5308, 5336), 'numpy.array', 'np.array', (['areas[i]', 'np.int32'], {}), '(areas[i], np.int32)\n', (5316, 5336), True, 'import numpy as np\n'), ((5955, 5980), 'numpy.array', 'np.array', (['line1', 'np.int32'], {}), '(line1, np.int32)\n', (5963, 5980), True, 'import numpy as np\n'), ((5997, 6065), 'cv2.polylines', 'cv2.polylines', (['image', '[pts]', '(False)'], {'color': 'color', 'thickness': 'thickness'}), '(image, [pts], False, color=color, thickness=thickness)\n', (6010, 6065), False, 'import cv2\n'), ((6111, 6166), 'cv2.circle', 'cv2.circle', (['image', 'pnt', '(thickness * 2)', 'color', 'thickness'], {}), '(image, pnt, thickness * 2, color, thickness)\n', (6121, 6166), False, 'import cv2\n'), ((7131, 7179), 'cv2.circle', 'cv2.circle', (['image', 'pnt', 'radius', 'color', 'thickness'], {}), '(image, pnt, radius, color, thickness)\n', (7141, 7179), False, 'import cv2\n'), ((4238, 4262), 'numpy.array', 'np.array', (['area', 'np.int32'], {}), '(area, np.int32)\n', (4246, 4262), True, 'import numpy as np\n'), ((4481, 4536), 'cv2.circle', 'cv2.circle', (['image', 'pnt', '(thickness * 2)', 'color', 'thickness'], {}), '(image, pnt, thickness * 2, color, thickness)\n', (4491, 4536), False, 'import cv2\n')] |
from random import shuffle
import glob
import sys
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.models import model_from_json
from tensorflow.keras.optimizers import Adam
dirsep = '/'
csvdelim = ','
pathData='./ogle/data'
pathWeight = './models/cnn.h5' # The HDF5 weight file generated for the trained model
pathModel = './models/cnn.nn' # The model saved as a JSON file
DATAPATH='./ogle/data'
STEPS = 400
shuffle_data = True # shuffle the addresses before saving
dataset_path = './[IV]/*.dat'
#manifest_path = './OGLE-CEP.txt'
manifest_path = './manifest.txt'
# read addresses and labels from the 'train' folder
addrs = glob.glob(dataset_path)
#labels = [0 if 'cat' in addr else 1 for addr in addrs] # 0 = Cat, 1 = Dog
labels = np.zeros(len(addrs) - 1)
# to shuffle data
if shuffle_data:
c = list(zip(addrs, labels))
shuffle(c)
addrs, labels = zip(*c)
##temp = float_data[:, 1] # <1> temperature (in degrees C)
#plt.plot(range(200000), temp[:200000])
###plt.show()
#
##plt.plot(range(1440), temp[:1440])
##plt.show()
#
#mean = float_data[:200000].mean(axis=0)
#float_data -= mean
#std = float_data[:200000].std(axis=0)
#float_data /= std
#norm = float_data[:, 1] # <1> temperature (in degrees C)
# Divide the data into 60% train, 20% validation, and 20% test
train_addrs = addrs[0:int(0.6*len(addrs))]
train_labels = labels[0:int(0.6*len(labels))]
valid_addrs = addrs[int(0.6*len(addrs)):int(0.8*len(addrs))]
valid_labels = labels[int(0.6*len(addrs)):int(0.8*len(addrs))]
test_addrs = addrs[int(0.8*len(addrs)):]
test_labels = labels[int(0.8*len(labels)):]
def load_manifest(path):
manifest = np.loadtxt(path, dtype={'names': ('star', 'field', 'starID', 'ra', 'dec', 'typ', 'subtyp', 'Imag', 'Vmag', 'pd', 'Iamp'), 'formats': ('S18', 'S10', 'i4', 'f4', 'f4', 'S8', 'S8', 'f4', 'f4', 'f4', 'f4')})
return manifest
#path = './I/OGLE-LMC-CEP-1111.dat'
def load_star(path):
lightcurve = np.loadtxt(path, dtype={'names': ('jd', 'mag', 'err'), 'formats': ('f4', 'f4', 'f4')})
return lightcurve
def normalize(x):
x['jd'] = x['jd'] - x['jd'][0]
min_mag = np.min(x['mag'])
x['mag'] = x['mag'] - min_mag
return x
def pad_or_truncate(x, steps):
z = (0.0, 0.0, 0.0)
n=x.shape[0]
if n > steps:
x = x[:steps]
if n < steps:
x = np.append(x, x[0])
x[n] = z
n += 1
if n < steps:
z = np.tile(x[n-1],steps-n)
x = np.append(x, z)
return x
def _floatvector_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _float_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
manifest = load_manifest(manifest_path)
def get_period(manifest, path):
stars = manifest['star']
key = path.split('/')[2]
key = key.split('.')[0]
ind = np.searchsorted(manifest['star'],key)
#print(key, ind, manifest['star'][ind], manifest['pd'][ind])
# if ind is zero check whether it really is the first star in the list
# return 0.0 on error
#if ind == 0:
# if key != manifest['star'][0]:
# return 0.0
return manifest['pd'][ind], ind
train_filename = '%s/train.tfr'%(DATAPATH) # address to save the TFRecords file
# open the TFRecords file
writer = tf.python_io.TFRecordWriter(train_filename)
for i in range(len(train_addrs)):
# print how many images are saved every 1000 images
if not i % 1000:
print('Train data: {}/{}'.format(i, len(train_addrs)))
sys.stdout.flush()
# Load the image
lightcurve = load_star(train_addrs[i])
lightcurve = normalize(lightcurve)
lightcurve = pad_or_truncate(lightcurve, STEPS)
x = []
list(x.extend(row) for row in lightcurve)
y, star = get_period(manifest, train_addrs[i])
#print("Star %s Period %f"%(star, y))
# Create a feature
feature = {'train/period': _float_feature(y),
'train/data': _floatvector_feature(x)
}
# Create an example protocol buffer
example = tf.train.Example(features=tf.train.Features(feature=feature))
# Serialize to string and write on the file
writer.write(example.SerializeToString())
writer.close()
sys.stdout.flush()
# open the TFRecords file
valid_filename = '%s/valid.tfr'%(DATAPATH) # address to save the TFRecords file
writer = tf.python_io.TFRecordWriter(valid_filename)
for i in range(len(valid_addrs)):
# print how many images are saved every 1000 images
if not i % 1000:
print('Val data: {}/{}'.format(i, len(valid_addrs)))
sys.stdout.flush()
# Load the image
lightcurve = load_star(valid_addrs[i])
lightcurve = normalize(lightcurve)
lightcurve = pad_or_truncate(lightcurve, STEPS)
x = []
list(x.extend(row) for row in lightcurve)
y, star = get_period(manifest, valid_addrs[i])
#print("Star %s Period %f"%(star, y))
# Create a feature
feature = {'train/period': _float_feature(y),
'train/data': _floatvector_feature(x)
}
# Create an example protocol buffer
example = tf.train.Example(features=tf.train.Features(feature=feature))
# Serialize to string and write on the file
writer.write(example.SerializeToString())
writer.close()
sys.stdout.flush()
# open the TFRecords file
test_filename = '%s/test.tfr'%(DATAPATH) # address to save the TFRecords file
writer = tf.python_io.TFRecordWriter(test_filename)
for i in range(len(test_addrs)):
# print how many images are saved every 1000 images
if not i % 1000:
print('Test data: {}/{}'.format(i, len(test_addrs)))
sys.stdout.flush()
# Load the image
lightcurve = load_star(test_addrs[i])
lightcurve = normalize(lightcurve)
lightcurve = pad_or_truncate(lightcurve, STEPS)
x = []
list(x.extend(row) for row in lightcurve)
y, star = get_period(manifest, test_addrs[i])
#print("Star %s Period %f"%(star, y))
# Create a feature
feature = {'train/period': _float_feature(y),
'train/data': _floatvector_feature(x)
}
# Create an example protocol buffer
example = tf.train.Example(features=tf.train.Features(feature=feature))
# Serialize to string and write on the file
writer.write(example.SerializeToString())
writer.close()
sys.stdout.flush()
| [
"sys.stdout.flush",
"numpy.tile",
"random.shuffle",
"numpy.searchsorted",
"numpy.min",
"tensorflow.train.Int64List",
"tensorflow.train.BytesList",
"numpy.append",
"tensorflow.train.Features",
"tensorflow.train.FloatList",
"tensorflow.python_io.TFRecordWriter",
"numpy.loadtxt",
"glob.glob"
] | [((687, 710), 'glob.glob', 'glob.glob', (['dataset_path'], {}), '(dataset_path)\n', (696, 710), False, 'import glob\n'), ((3513, 3556), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['train_filename'], {}), '(train_filename)\n', (3540, 3556), True, 'import tensorflow as tf\n'), ((4440, 4458), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4456, 4458), False, 'import sys\n'), ((4576, 4619), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['valid_filename'], {}), '(valid_filename)\n', (4603, 4619), True, 'import tensorflow as tf\n'), ((5491, 5509), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (5507, 5509), False, 'import sys\n'), ((5625, 5667), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['test_filename'], {}), '(test_filename)\n', (5652, 5667), True, 'import tensorflow as tf\n'), ((6536, 6554), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6552, 6554), False, 'import sys\n'), ((893, 903), 'random.shuffle', 'shuffle', (['c'], {}), '(c)\n', (900, 903), False, 'from random import shuffle\n'), ((1684, 1895), 'numpy.loadtxt', 'np.loadtxt', (['path'], {'dtype': "{'names': ('star', 'field', 'starID', 'ra', 'dec', 'typ', 'subtyp', 'Imag',\n 'Vmag', 'pd', 'Iamp'), 'formats': ('S18', 'S10', 'i4', 'f4', 'f4', 'S8',\n 'S8', 'f4', 'f4', 'f4', 'f4')}"}), "(path, dtype={'names': ('star', 'field', 'starID', 'ra', 'dec',\n 'typ', 'subtyp', 'Imag', 'Vmag', 'pd', 'Iamp'), 'formats': ('S18',\n 'S10', 'i4', 'f4', 'f4', 'S8', 'S8', 'f4', 'f4', 'f4', 'f4')})\n", (1694, 1895), True, 'import numpy as np\n'), ((1983, 2073), 'numpy.loadtxt', 'np.loadtxt', (['path'], {'dtype': "{'names': ('jd', 'mag', 'err'), 'formats': ('f4', 'f4', 'f4')}"}), "(path, dtype={'names': ('jd', 'mag', 'err'), 'formats': ('f4',\n 'f4', 'f4')})\n", (1993, 2073), True, 'import numpy as np\n'), ((2160, 2176), 'numpy.min', 'np.min', (["x['mag']"], {}), "(x['mag'])\n", (2166, 2176), True, 'import numpy as np\n'), ((3091, 3129), 'numpy.searchsorted', 'np.searchsorted', (["manifest['star']", 'key'], {}), "(manifest['star'], key)\n", (3106, 3129), True, 'import numpy as np\n'), ((2367, 2385), 'numpy.append', 'np.append', (['x', 'x[0]'], {}), '(x, x[0])\n', (2376, 2385), True, 'import numpy as np\n'), ((3739, 3757), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3755, 3757), False, 'import sys\n'), ((4800, 4818), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4816, 4818), False, 'import sys\n'), ((5847, 5865), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (5863, 5865), False, 'import sys\n'), ((2456, 2484), 'numpy.tile', 'np.tile', (['x[n - 1]', '(steps - n)'], {}), '(x[n - 1], steps - n)\n', (2463, 2484), True, 'import numpy as np\n'), ((2496, 2511), 'numpy.append', 'np.append', (['x', 'z'], {}), '(x, z)\n', (2505, 2511), True, 'import numpy as np\n'), ((2596, 2627), 'tensorflow.train.FloatList', 'tf.train.FloatList', ([], {'value': 'value'}), '(value=value)\n', (2614, 2627), True, 'import tensorflow as tf\n'), ((2694, 2727), 'tensorflow.train.FloatList', 'tf.train.FloatList', ([], {'value': '[value]'}), '(value=[value])\n', (2712, 2727), True, 'import tensorflow as tf\n'), ((2794, 2827), 'tensorflow.train.Int64List', 'tf.train.Int64List', ([], {'value': '[value]'}), '(value=[value])\n', (2812, 2827), True, 'import tensorflow as tf\n'), ((2894, 2927), 'tensorflow.train.BytesList', 'tf.train.BytesList', ([], {'value': '[value]'}), '(value=[value])\n', (2912, 2927), True, 'import tensorflow as tf\n'), ((4285, 4319), 'tensorflow.train.Features', 'tf.train.Features', ([], {'feature': 'feature'}), '(feature=feature)\n', (4302, 4319), True, 'import tensorflow as tf\n'), ((5346, 5380), 'tensorflow.train.Features', 'tf.train.Features', ([], {'feature': 'feature'}), '(feature=feature)\n', (5363, 5380), True, 'import tensorflow as tf\n'), ((6391, 6425), 'tensorflow.train.Features', 'tf.train.Features', ([], {'feature': 'feature'}), '(feature=feature)\n', (6408, 6425), True, 'import tensorflow as tf\n')] |
# stream latches out to the console through TASHA.
# Latch format is an (n, C) uint16 numpy ndarray, where n is the number of
# latches in the array and C is the number of controllers. The controller order
# is set by the controllers list passed to the LatchStreamer constructor. The
# list is a list of strings (described below) that name each controller. If
# controllers[3] == "p2d0", then latches[:, 3] is expected to contain the
# buttons for player 2 controller on data line 0. If there are duplicate
# controllers, then the data with the greatest index is used. This does waste
# bandwidth and memory on the unused data.
# CONTROLLER NAMES
# "p1d0": player 1, data line 0 (controller pin 4)
# "p1d1": player 1, data line 1 (controller pin 5)
# "p2d0": player 2, data line 0 (controller pin 4)
# "p2d1": player 2, data line 1 (controller pin 5)
# "apu_freq_basic": basic APU frequency adjustment. see snes.py (reg 2)
# "apu_freq_advanced": advanced APU frequency adjustment. see snes.py (reg 3)
# LATCH STREAMER SETTINGS (parameters to connect())
# num_priming_latches: Number of latches to download with the firmware. These
# latches must tide the firmware over until communication is reestablished.
# This must be at least one, and not greater than the latch buffer size. If
# None, the value will be the latch buffer size. At least this many latches
# must be in the latch queue before connecting, as this many will be
# downloaded with the firmware.
# apu_freq_basic and apu_freq_advanced: Configure the initial values for the APU
# basic and advanced frequency setting registers. If None, the defaults
# compiled into the gateware are used. Consult calculate_advanced in
# gateware/apu_calc.py for information on how to choose the value.
import struct
import random
import collections
import itertools
import enum
import numpy as np
import serial
import crcmod.predefined
crc_16_kermit = crcmod.predefined.mkPredefinedCrcFun("kermit")
from ..firmware.latch_streamer import make_firmware, calc_buf_size, ErrorCode
from . import bootload
# status_cb is called with Messages of the appropriate subclass
class Message:
pass
class ConnectionMessage(Message, enum.Enum):
CONNECTING = 1
NOT_RESPONDING = 2
BUILDING = 3
DOWNLOADING = 4
CONNECTED = 5
TRANSFER_DONE = 6
BUFFER_DONE = 7
def __str__(self):
if self == ConnectionMessage.CONNECTING:
return "Connecting to TASHA..."
elif self == ConnectionMessage.NOT_RESPONDING:
return " (no response, please reset TASHA)"
elif self == ConnectionMessage.BUILDING:
return "Building firmware..."
elif self == ConnectionMessage.DOWNLOADING:
return "Downloading and starting firmware..."
elif self == ConnectionMessage.CONNECTED:
return "Initialization complete! Beginning latch transfer..."
elif self == ConnectionMessage.TRANSFER_DONE:
return "Transfer complete! Waiting for device buffer to empty..."
elif self == ConnectionMessage.BUFFER_DONE:
return "All latches successfully latched! Thanks for playing!"
else:
raise Exception("unknown identity {}".format(self))
class DeviceErrorMessage(Message): # messages returned from the device
def __init__(self, code):
if not isinstance(code, ErrorCode):
raise TypeError("must be an ErrorCode")
self.code = code
self.is_fatal = code >= ErrorCode.FATAL_ERROR_START
def __str__(self):
if self.is_fatal:
m = "FATAL ERROR: "
else:
m = "COMM ERROR: "
if self.code == ErrorCode.NONE:
return m + "success"
elif self.code == ErrorCode.INVALID_COMMAND:
return m + "invalid command"
elif self.code == ErrorCode.BAD_CRC:
return m + "bad CRC"
elif self.code == ErrorCode.RX_ERROR:
return m + "receive error/overflow"
elif self.code == ErrorCode.RX_TIMEOUT:
return m + "receive timeout"
elif self.code == ErrorCode.BAD_STREAM_POS:
return m + "incorrect stream position"
elif self.code == ErrorCode.BUFFER_UNDERRUN:
return m + "buffer underrun"
elif self.code == ErrorCode.MISSED_LATCH:
return m + "missed latch"
class InvalidPacketMessage(Message):
def __init__(self, packet):
self.packet = packet
def __str__(self):
return "WARNING: invalid packet received: {!r}".format(self.packet)
# sent every processed status packet
class StatusMessage(Message):
# buffer_use: how much buffer on the device is used in latches
# buffer_size: total device buffer size in latches
# device_pos: stream position the device reported, mod 65536
# pc_pos: stream position we (on the PC) are at, mod 65536
# sent: number of latches sent in response
# in_transit: number of latches in transit (sent last time but not arrived)
def __init__(self, buffer_use, buffer_size,
device_pos, pc_pos, sent, in_transit):
self.buffer_use = buffer_use
self.buffer_size = buffer_size
self.device_pos = device_pos
self.pc_pos = pc_pos
self.sent = sent
self.in_transit = in_transit
def __str__(self):
return "D:{:05d}<-P:{:05d} B:{:05d} T:{:05d} S:{:05d}".format(
self.device_pos, self.pc_pos, self.buffer_size-self.buffer_use,
self.in_transit, self.sent)
# how the communication is proceeding
class ConnectionState(enum.Enum):
# ... no connection
DISCONNECTED = 0
# connect()ed but we haven't got the status packet back
INITIALIZING = 1
# connected and everything is going well
TRANSFERRING = 2
# we're finishing up by emptying the host latch queue
EMPTYING_HOST = 3
# we're finishing up by waiting for the device to empty its buffer
EMPTYING_DEVICE = 4
class LatchStreamer:
def __init__(self, controllers):
self.controllers = controllers
self.num_controllers = len(controllers)
self.device_buf_size = calc_buf_size(self.num_controllers)
self.connected = False
self.latch_queue = collections.deque()
# the queue is composed of arrays with many latches in each. keep track
# of how many latches total are in there.
self.latch_queue_len = 0
self.conn_state = ConnectionState.DISCONNECTED
# everything else will be initialized upon connection
# Add some latches to the stream queue. If latches is None, the latches are
# assumed to be finished and the streamer transitions to waiting for the
# buffers to empty. If it's not None, then normal operation resumes.
def add_latches(self, latches):
if latches is None:
if self.conn_state in (ConnectionState.INITIALIZING,
ConnectionState.TRANSFERRING):
self.conn_state = ConnectionState.EMPTYING_HOST
return
elif self.conn_state != ConnectionState.TRANSFERRING:
if self.conn_state in (ConnectionState.EMPTYING_HOST,
ConnectionState.EMPTYING_DEVICE):
self.conn_state = ConnectionState.TRANSFERRING
if not isinstance(latches, np.ndarray):
raise TypeError("'latches' must be ndarray, not {!r}".format(
type(latches)))
if len(latches.shape) != 2 or latches.shape[1] != self.num_controllers:
raise TypeError("'latches' must be shape (n, {}), not {!r}".format(
self.num_controllers, latches.shape))
if latches.dtype != np.uint16:
raise TypeError("'latches' must be uint16, not {!r}".format(
latches.dtype))
if len(latches) == 0: # no point in storing no latches
return
# copy the array so we don't have to worry that the caller will do
# something weird to it. we need to send the data in C order, so we make
# sure the copy is such.
self.latch_queue.append(np.copy(latches, order="C"))
self.latch_queue_len += len(latches)
# Remove all the latches from the stream queue. Not guaranteed to remove
# everything unless disconnected.
def clear_latch_queue(self):
self.latch_queue = collections.deque()
self.latch_queue_len = 0
# Connect to TASHA. status_cb is basically just print for now.
def connect(self, port, status_cb=print,
num_priming_latches=None,
apu_freq_basic=None,
apu_freq_advanced=None):
if self.conn_state != ConnectionState.DISCONNECTED:
raise ValueError("already connected")
if num_priming_latches is None:
num_priming_latches = self.device_buf_size
# we can't pre-fill the buffer with more latches than fit in it
num_priming_latches = min(num_priming_latches, self.device_buf_size)
if self.latch_queue_len < num_priming_latches:
raise ValueError("{} priming latches requested but only {} "
"available in the queue".format(
num_priming_latches, self.latch_queue_len))
status_cb(ConnectionMessage.CONNECTING)
bootloader = bootload.Bootloader()
# assume the board is responsive and will get back to us quickly
try:
bootloader.connect(port, timeout=1)
connected_quickly = True
except bootload.Timeout: # it isn't
connected_quickly = False
if not connected_quickly:
# ask the user to try and reset the board, then wait for however
# long it takes for the bootloder to start
status_cb(ConnectionMessage.NOT_RESPONDING)
bootloader.connect(port, timeout=None)
bootloader.identify()
status_cb(ConnectionMessage.BUILDING)
# get the priming latch data and convert it back to words. kinda
# inefficient but we only do it once.
priming_latches = struct.unpack(
"<{}H".format(num_priming_latches*self.num_controllers),
self._get_latch_data(num_priming_latches, num_priming_latches))
firmware = make_firmware(self.controllers, priming_latches,
apu_freq_basic=apu_freq_basic,
apu_freq_advanced=apu_freq_advanced)
status_cb(ConnectionMessage.DOWNLOADING)
firmware = tuple(firmware)
bootloader.write_memory(0, firmware)
read_firmware = bootloader.read_memory(0, len(firmware))
if firmware != read_firmware:
raise bootload.BootloadError("verification failed")
bootloader.start_execution(0)
self.port = serial.Serial(port=port, baudrate=2_000_000, timeout=0.001)
# initialize input and output buffers
self.out_chunks = collections.deque()
self.out_curr_chunk = None
self.out_curr_chunk_pos = None
self.in_chunks = bytearray()
# initialize stream
self.stream_pos = num_priming_latches
# keep track of the latches we've sent so we can resend them if there is
# an error
self.resend_buf = collections.deque()
self.resend_buf_len = 0
self.status_cb = status_cb
self.conn_state = ConnectionState.INITIALIZING
# get some latches from the latch queue and return the converted to bytes.
# may return less than at_least if not enough latches are available. if
# at_most is None, there is no upper bound on the number of latches
# returned.
def _get_latch_data(self, at_least, at_most=None):
latch_data = []
num_got = 0
while num_got < at_least and len(self.latch_queue) > 0:
more_latches = self.latch_queue.popleft()
self.latch_queue_len -= len(more_latches)
# would this put us over the max?
if at_most is not None and num_got + len(more_latches) > at_most:
# yes, split it up
remaining = at_most-num_got
more_latches, leftovers = \
more_latches[:remaining], more_latches[remaining:]
# and store the extra for next time
self.latch_queue.appendleft(leftovers)
self.latch_queue_len += len(leftovers)
# convert to raw bytes for transmission
latch_data.append(more_latches.tobytes('C'))
num_got += len(more_latches)
return b''.join(latch_data)
# find, parse, and return the latest packet from in_chunks
def _parse_latest_packet(self):
packet = None
while True:
pos = self.in_chunks.find(b'\x5A\x7A')
if pos == -1: # not found
# we are done if there's no data left
if len(self.in_chunks) == 0:
break
# if the last byte could be the start of the packet, save it
if self.in_chunks[-1] == b'\x5A':
self.in_chunks = self.in_chunks[-1:]
else:
self.in_chunks.clear()
break
packet_data = self.in_chunks[pos:pos+12]
if len(packet_data) < 12: # packet is not complete
# save what we've got for later
self.in_chunks = self.in_chunks[pos:]
break
# is the packet valid?
if crc_16_kermit(packet_data[2:]) != 0:
# nope. throw away the header. maybe a packet starts after it.
self.status_cb(InvalidPacketMessage(packet_data))
self.in_chunks = self.in_chunks[pos+2:]
else:
# it is. parse the useful bits from it
packet = struct.unpack("<3H", packet_data[4:10])
# and remove it from the stream
self.in_chunks = self.in_chunks[pos+12:]
return packet
# Call repeatedly to perform communication. Reads messages from TASHA and
# sends latches back out. Returns True if still connected and False to say
# that the connection has terminated.
def communicate(self):
if self.conn_state == ConnectionState.DISCONNECTED:
raise ValueError("you must connect before communicating")
status_cb = self.status_cb
# receive any status packet pieces, then parse out any status packets
rx_new = self.port.read(65536)
packet = None
if len(rx_new) > 0:
self.in_chunks.extend(rx_new)
packet = self._parse_latest_packet()
# if we got a packet, parse it
if packet is not None:
if self.conn_state == ConnectionState.INITIALIZING:
# let the user know the device is alive
status_cb(ConnectionMessage.CONNECTED)
self.conn_state = ConnectionState.TRANSFERRING
p_error, p_stream_pos, p_buffer_space = packet
if self.conn_state == ConnectionState.EMPTYING_HOST:
# do we have anything more to send to the device? did it get
# everything we sent?
stuff_in_transit = self.stream_pos != p_stream_pos
if len(self.latch_queue) == 0 and not stuff_in_transit:
# yup, we are done sending. now we wait for the device's
# buffer to be emptied.
self.conn_state = ConnectionState.EMPTYING_DEVICE
status_cb(ConnectionMessage.TRANSFER_DONE)
# if there is an error, we need to intervene.
if p_error != 0:
error = ErrorCode(p_error)
if error == ErrorCode.BUFFER_UNDERRUN and \
self.conn_state == ConnectionState.EMPTYING_DEVICE:
# if we're waiting for the device buffer to empty, it's just
# happened and we are done with our job
status_cb(ConnectionMessage.BUFFER_DONE)
self.disconnect()
return False
msg = DeviceErrorMessage(error)
status_cb(msg)
# we can't do anything for fatal errors except disconnect
if msg.is_fatal:
self.disconnect()
return False
# but if it's not, we will restart transmission at the last
# position the device got so it can pick the stream back up.
# how many latches do we need to resend to get the device back
# to the position we are at?
num_to_resend = (self.stream_pos - p_stream_pos) & 0xFFFF
# pull that many out of the resend buffer
to_resend = []
self.resend_buf_len -= num_to_resend
# we will be sending that many back out
self.latch_queue_len += num_to_resend
# because the resend buffer contains whole packets and the
# device can only lose whole packets, we can just move packets
while num_to_resend > 0:
packet = self.resend_buf.pop() # pop latest transmission
# turn it from bytes back into a numpy array
packet = np.frombuffer(packet,
dtype=np.uint16).reshape(-1, self.num_controllers)
to_resend.append(packet)
num_to_resend -= len(packet)
# put what we pulled out back into the send queue. to_resend is
# from most recently to least recently transmitted so the
# packets end up least recently to most recently transmitted.
self.latch_queue.extendleft(to_resend)
# finally set the correct stream position
self.stream_pos = p_stream_pos
# the device us tells us how many latches it's received and we know
# how many we've sent. the difference is the number in transit.
in_transit = (self.stream_pos - p_stream_pos) & 0xFFFF
# we have to remove that number from the amount of space left in the
# device's buffer because those latches will shortly end up there
# and we don't want to overflow it
actual_buffer_space = p_buffer_space - in_transit
# queue that many for transmission. we don't send less than 20
# because it's kind of a waste of time.
actual_sent = 0
# filling the device's buffer with latches is counterproductive to
# emptying it
if self.conn_state == ConnectionState.EMPTYING_DEVICE:
actual_buffer_space = 0 # stop anything from being sent
while actual_buffer_space >= 20:
# we'd like to send at least 20 latches to avoid too much packet
# overhead, but not more than 200 to avoid having to resend a
# lot of latches if there is an error. but of course, we can't
# send so many that we overflow the buffer.
latch_data = self._get_latch_data(
min(20, actual_buffer_space), min(200, actual_buffer_space))
num_sent = len(latch_data)//(self.num_controllers*2)
actual_sent += num_sent
if num_sent == 0: break # queue was empty
# send the latch transmission command
cmd = struct.pack("<5H",
0x7A5A, 0x1003, self.stream_pos, num_sent, 0)
self.out_chunks.append(cmd)
# don't CRC the header
self.out_chunks.append(
crc_16_kermit(cmd[2:]).to_bytes(2, byteorder="little"))
# send all the data along too
self.out_chunks.append(latch_data)
self.out_chunks.append(
crc_16_kermit(latch_data).to_bytes(2, byteorder="little"))
# we've filled up the buffer some
actual_buffer_space -= num_sent
# and advanced the stream position
self.stream_pos = (self.stream_pos + num_sent) & 0xFFFF
# remember what data we sent so we can resend it if necessary
self.resend_buf.append(latch_data)
self.resend_buf_len += num_sent
# clear out old sent data. we never have in transit more latches
# than can be stored in the device buffer, so that is the
# maximum number that we can fail to send and need to resend.
while True:
# how many latches would be left if we removed the oldest?
oldest_len = len(self.resend_buf[0])
oldest_len //= (self.num_controllers*2)
remaining = self.resend_buf_len - oldest_len
# is that more than we could possibly need to resend?
if remaining <= self.device_buf_size:
break # nope, don't do anything
# yup, so remove it
self.resend_buf.popleft()
self.resend_buf_len -= oldest_len
status_cb(StatusMessage(self.device_buf_size-p_buffer_space,
self.device_buf_size, p_stream_pos, self.stream_pos,
actual_sent, in_transit))
# send out the data we prepared earlier
while True:
# get a new chunk
if self.out_curr_chunk is None:
if len(self.out_chunks) == 0:
break
self.out_curr_chunk = self.out_chunks.popleft()
self.out_curr_chunk_pos = 0
# calculate how much data is remaining in it
to_send = len(self.out_curr_chunk) - self.out_curr_chunk_pos
# send out all the data
sent = self.port.write(
self.out_curr_chunk[self.out_curr_chunk_pos:])
if sent != to_send: # did we send all of it?
# nope, remember what we did send
self.out_curr_chunk_pos += sent
# and try to send the rest later
break
else:
# yup, we are done with this chunk
self.out_curr_chunk = None
return True # everything's still going good
def disconnect(self):
if self.conn_state == ConnectionState.DISCONNECTED:
return
# close and delete buffers to avoid hanging on to junk
self.port.close()
del self.port
del self.out_chunks
del self.out_curr_chunk
del self.in_chunks
del self.resend_buf
del self.status_cb
self.conn_state = ConnectionState.DISCONNECTED
| [
"numpy.copy",
"collections.deque",
"struct.pack",
"struct.unpack",
"serial.Serial",
"numpy.frombuffer"
] | [((6244, 6263), 'collections.deque', 'collections.deque', ([], {}), '()\n', (6261, 6263), False, 'import collections\n'), ((8352, 8371), 'collections.deque', 'collections.deque', ([], {}), '()\n', (8369, 8371), False, 'import collections\n'), ((10745, 10802), 'serial.Serial', 'serial.Serial', ([], {'port': 'port', 'baudrate': '(2000000)', 'timeout': '(0.001)'}), '(port=port, baudrate=2000000, timeout=0.001)\n', (10758, 10802), False, 'import serial\n'), ((10878, 10897), 'collections.deque', 'collections.deque', ([], {}), '()\n', (10895, 10897), False, 'import collections\n'), ((11211, 11230), 'collections.deque', 'collections.deque', ([], {}), '()\n', (11228, 11230), False, 'import collections\n'), ((8102, 8129), 'numpy.copy', 'np.copy', (['latches'], {'order': '"""C"""'}), "(latches, order='C')\n", (8109, 8129), True, 'import numpy as np\n'), ((13793, 13832), 'struct.unpack', 'struct.unpack', (['"""<3H"""', 'packet_data[4:10]'], {}), "('<3H', packet_data[4:10])\n", (13806, 13832), False, 'import struct\n'), ((19521, 19582), 'struct.pack', 'struct.pack', (['"""<5H"""', '(31322)', '(4099)', 'self.stream_pos', 'num_sent', '(0)'], {}), "('<5H', 31322, 4099, self.stream_pos, num_sent, 0)\n", (19532, 19582), False, 'import struct\n'), ((17326, 17364), 'numpy.frombuffer', 'np.frombuffer', (['packet'], {'dtype': 'np.uint16'}), '(packet, dtype=np.uint16)\n', (17339, 17364), True, 'import numpy as np\n')] |
import numpy as np
import json
import matplotlib.pyplot as plt
from PIL import Image
import seaborn as sns
sns.set(font_scale=1.5, style='white')
from sklearn.utils import compute_class_weight
from sklearn.metrics import classification_report, confusion_matrix, roc_curve, roc_auc_score, precision_recall_curve, auc, f1_score, average_precision_score
import tensorflow as tf
import tensorflow_addons as tfa
from .utils import CLASS_NAMES
def f1_metric(num_classes, average='weighted', name='f1_score'):
f1_metric = tfa.metrics.F1Score(num_classes=num_classes, average=average, name=name)
return f1_metric
def plot_history(history, title, legend='upper right'):
epochs_range = range(len(history['accuracy']))
plt.figure(figsize=(35, 8))
plt.suptitle(title, fontsize=20)
plt.subplot(131)
plt.plot(epochs_range, history['accuracy'], label='Train')
plt.plot(epochs_range, history['val_accuracy'], label='Val')
plt.legend(loc=legend)
plt.title('Accuracy')
plt.ylim(0,1)
plt.subplot(132)
plt.plot(epochs_range, history['loss'], label='Train')
plt.plot(epochs_range, history['val_loss'], label='Val')
plt.legend(loc=legend)
plt.title('Loss')
plt.yscale('log')
plt.subplot(133)
plt.plot(epochs_range, history['f1_score'], label='Train')
plt.plot(epochs_range, history['val_f1_score'], label='Val')
plt.legend(loc=legend)
plt.title('f1 score')
plt.ylim(0,1);
def plot_from_json(path, title):
with open(path) as f:
history = json.load(f)
plot_history(history[0], title)
def classification_metrics(model, X_val, y_val, normalize=None, show_report=True, fmt='d'):
y_pred = model.predict(X_val)
if show_report:
f1_metric = tfa.metrics.F1Score(num_classes=3, average='weighted', name='f1_score')
f1_metric.update_state(y_val, y_pred)
result = f1_metric.result()
print('F1 SCORE: ', result.numpy())
print(classification_report(np.argmax(y_val, axis=1), np.argmax(y_pred, axis=1), target_names=CLASS_NAMES, zero_division=0))
if normalize:
fmt = '.2f'
cm = confusion_matrix(np.argmax(y_val, axis=1), np.argmax(y_pred, axis=1), normalize=normalize)
# plt.figure(figsize=(9, 8))
sns.heatmap(cm, annot=True, xticklabels=CLASS_NAMES, yticklabels=CLASS_NAMES, cbar=False, fmt=fmt)
plt.xlabel('Predicted')
plt.ylabel('True')
plt.xticks(rotation=0)
plt.yticks(va='center');
def show_both_matrices(model, model_name, X_val, y_val, config, images, title):
plt.figure(figsize=(20,9))
plt.subplot(121)
classification_metrics(model, X_val, y_val)
plt.title(model_name + ' - ' + images + config)
plt.subplot(122)
classification_metrics(model, X_val, y_val, normalize='true', show_report=False)
plt.title(title)
plt.show();
def confusion_matrices(model_name, get_model, get_model_dataaug, filename, X_val, y_val, images, title):
model = get_model
model.load_weights('best-models/' + model_name.lower() + '-weights/' + filename + '.ckpt')
show_both_matrices(model, model_name, X_val, y_val, '', images, title)
model = get_model
model.load_weights('best-models/' + model_name.lower() + '-weights/' + filename + '-classw.ckpt')
show_both_matrices(model, model_name, X_val, y_val, ' + Class Weight', images, title)
model = get_model_dataaug
model.load_weights('best-models/' + model_name.lower() + '-weights/' + filename + '-dataaug.ckpt')
show_both_matrices(model, model_name, X_val, y_val, ' + Data Augmentation', images, title)
model = get_model_dataaug
model.load_weights('best-models/' + model_name.lower() + '-weights/' + filename + '-classw-dataaug.ckpt')
show_both_matrices(model, model_name, X_val, y_val, ' + Class Weight + Data Augmentation', images, title)
def plot_roc_curves(model, X_test, y_test):
labels = ['COV', 'Normal', 'OtherPneumonia']
y_pred = model.predict(X_test)
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(3):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_pred[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_pred.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([-0.05, 1.0])
plt.ylim([0.0, 1.05])
plt.plot(fpr["micro"], tpr["micro"], label='micro-average ROC curve (AUC = {0:0.4f})'.format(roc_auc["micro"]))
for i in range(3):
plt.plot(fpr[i], tpr[i], label='{0} (AUC = {1:0.4f})'.format(labels[i], roc_auc[i]))
plt.legend(fontsize=15)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.xlabel('False Positive Rate', fontsize=15)
plt.ylabel('True Positive Rate', fontsize=15)
def plot_pr_curves(model, X_test, y_test):
labels = ['COV', 'Normal', 'OtherPneumonia']
y_pred = model.predict(X_test)
precision = dict()
recall = dict()
avg_precision = dict()
for i in range(len(labels)):
precision[i], recall[i], _ = precision_recall_curve(y_test[:,i], y_pred[:,i])
avg_precision[i] = average_precision_score(y_test[:,i], y_pred[:,i])
precision['micro'], recall['micro'], _ = precision_recall_curve(y_test.ravel(), y_pred.ravel())
avg_precision["micro"] = average_precision_score(y_test.ravel(), y_pred.ravel())
plt.xlim([0.0, 1.00])
plt.ylim([0.0, 1.05])
plt.plot(recall['micro'], precision['micro'], label='micro-average PR curve (AP = {0:0.4f})'.format(avg_precision['micro']))
for i in range(len(labels)):
plt.plot(recall[i], precision[i], label='{0} (AP = {1:0.4f})'.format(labels[i], avg_precision[i]))
plt.legend(loc=3, fontsize=15)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.xlabel('Recall', fontsize=15)
plt.ylabel('Precision', fontsize=15)
def print_auc(model, model_name, file_name, X_val, y_val, batch_size=32):
model.load_weights('best-models/' + model_name.lower() + '-weights/' + file_name + '.ckpt')
y_pred = model.predict(X_val, batch_size=batch_size)
fpr, tpr, _ = roc_curve(y_val.ravel(), y_pred.ravel())
print(model_name, auc(fpr, tpr))
def calculate_aucs(filename, model_functions, model_functions_rgb, model_bit, X_val, y_val, X_val_rgb, y_val_rgb, batch_size):
for model_name, model in zip(['Simple', 'Tiny', 'Small', 'LargeW', 'LargeT'], model_functions):
print_auc(model, model_name, filename, X_val, y_val, batch_size)
for model_name, model in zip(['EfficientNetB3-ImageNet', 'EfficientNetB3'], model_functions_rgb):
print_auc(model, model_name, filename, X_val_rgb, y_val_rgb, batch_size)
print_auc(model_bit, 'GoogleBiT', filename, X_val_rgb/np.max(X_val_rgb), y_val_rgb)
def plot_f1_scores(f1):
for i in range(len(f1)):
plt.figure(figsize=(11,7))
for j in range(0, len(f1[i]), 4):
plt.bar(j-1.05, f1[i][j], width=0.7, linewidth=0, label='baseline', color=sns.color_palette('deep')[0])
plt.bar(j-0.35, f1[i][j+1], width=0.7, linewidth=0, label='classw', color=sns.color_palette('deep')[2])
plt.bar(j+0.35, f1[i][j+2], width=0.7, linewidth=0, label='dataaug', color=sns.color_palette('Set2')[6])
plt.bar(j+1.05, f1[i][j+3], width=0.7, linewidth=0, label='classw-dataaug', color=sns.color_palette('deep')[4])
plt.ylabel('f1_score')
plt.yticks(np.arange(0,1.1,step=0.1))
plt.xticks(range(0,20,4), ['original', 'nobackg', 'crop', 'lungs-nocrop', 'lungs'])
plt.legend(['baseline', 'classw', 'dataaug', 'classw-dataaug'], bbox_to_anchor=(1,0.5), loc='center left')
plt.ylim(0,1)
plt.title(models[i]); | [
"matplotlib.pyplot.ylabel",
"sklearn.metrics.auc",
"tensorflow_addons.metrics.F1Score",
"sklearn.metrics.roc_curve",
"numpy.arange",
"seaborn.set",
"seaborn.color_palette",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.max",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.ylim",
... | [((107, 145), 'seaborn.set', 'sns.set', ([], {'font_scale': '(1.5)', 'style': '"""white"""'}), "(font_scale=1.5, style='white')\n", (114, 145), True, 'import seaborn as sns\n'), ((524, 596), 'tensorflow_addons.metrics.F1Score', 'tfa.metrics.F1Score', ([], {'num_classes': 'num_classes', 'average': 'average', 'name': 'name'}), '(num_classes=num_classes, average=average, name=name)\n', (543, 596), True, 'import tensorflow_addons as tfa\n'), ((741, 768), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(35, 8)'}), '(figsize=(35, 8))\n', (751, 768), True, 'import matplotlib.pyplot as plt\n'), ((773, 805), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['title'], {'fontsize': '(20)'}), '(title, fontsize=20)\n', (785, 805), True, 'import matplotlib.pyplot as plt\n'), ((810, 826), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(131)'], {}), '(131)\n', (821, 826), True, 'import matplotlib.pyplot as plt\n'), ((831, 889), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', "history['accuracy']"], {'label': '"""Train"""'}), "(epochs_range, history['accuracy'], label='Train')\n", (839, 889), True, 'import matplotlib.pyplot as plt\n'), ((894, 954), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', "history['val_accuracy']"], {'label': '"""Val"""'}), "(epochs_range, history['val_accuracy'], label='Val')\n", (902, 954), True, 'import matplotlib.pyplot as plt\n'), ((959, 981), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': 'legend'}), '(loc=legend)\n', (969, 981), True, 'import matplotlib.pyplot as plt\n'), ((986, 1007), 'matplotlib.pyplot.title', 'plt.title', (['"""Accuracy"""'], {}), "('Accuracy')\n", (995, 1007), True, 'import matplotlib.pyplot as plt\n'), ((1012, 1026), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (1020, 1026), True, 'import matplotlib.pyplot as plt\n'), ((1031, 1047), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(132)'], {}), '(132)\n', (1042, 1047), True, 'import matplotlib.pyplot as plt\n'), ((1052, 1106), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', "history['loss']"], {'label': '"""Train"""'}), "(epochs_range, history['loss'], label='Train')\n", (1060, 1106), True, 'import matplotlib.pyplot as plt\n'), ((1111, 1167), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', "history['val_loss']"], {'label': '"""Val"""'}), "(epochs_range, history['val_loss'], label='Val')\n", (1119, 1167), True, 'import matplotlib.pyplot as plt\n'), ((1172, 1194), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': 'legend'}), '(loc=legend)\n', (1182, 1194), True, 'import matplotlib.pyplot as plt\n'), ((1199, 1216), 'matplotlib.pyplot.title', 'plt.title', (['"""Loss"""'], {}), "('Loss')\n", (1208, 1216), True, 'import matplotlib.pyplot as plt\n'), ((1221, 1238), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (1231, 1238), True, 'import matplotlib.pyplot as plt\n'), ((1244, 1260), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(133)'], {}), '(133)\n', (1255, 1260), True, 'import matplotlib.pyplot as plt\n'), ((1265, 1323), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', "history['f1_score']"], {'label': '"""Train"""'}), "(epochs_range, history['f1_score'], label='Train')\n", (1273, 1323), True, 'import matplotlib.pyplot as plt\n'), ((1328, 1388), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', "history['val_f1_score']"], {'label': '"""Val"""'}), "(epochs_range, history['val_f1_score'], label='Val')\n", (1336, 1388), True, 'import matplotlib.pyplot as plt\n'), ((1393, 1415), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': 'legend'}), '(loc=legend)\n', (1403, 1415), True, 'import matplotlib.pyplot as plt\n'), ((1420, 1441), 'matplotlib.pyplot.title', 'plt.title', (['"""f1 score"""'], {}), "('f1 score')\n", (1429, 1441), True, 'import matplotlib.pyplot as plt\n'), ((1446, 1460), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (1454, 1460), True, 'import matplotlib.pyplot as plt\n'), ((2304, 2407), 'seaborn.heatmap', 'sns.heatmap', (['cm'], {'annot': '(True)', 'xticklabels': 'CLASS_NAMES', 'yticklabels': 'CLASS_NAMES', 'cbar': '(False)', 'fmt': 'fmt'}), '(cm, annot=True, xticklabels=CLASS_NAMES, yticklabels=\n CLASS_NAMES, cbar=False, fmt=fmt)\n', (2315, 2407), True, 'import seaborn as sns\n'), ((2407, 2430), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted"""'], {}), "('Predicted')\n", (2417, 2430), True, 'import matplotlib.pyplot as plt\n'), ((2435, 2453), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True"""'], {}), "('True')\n", (2445, 2453), True, 'import matplotlib.pyplot as plt\n'), ((2458, 2480), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(0)'}), '(rotation=0)\n', (2468, 2480), True, 'import matplotlib.pyplot as plt\n'), ((2485, 2508), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'va': '"""center"""'}), "(va='center')\n", (2495, 2508), True, 'import matplotlib.pyplot as plt\n'), ((2596, 2623), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 9)'}), '(figsize=(20, 9))\n', (2606, 2623), True, 'import matplotlib.pyplot as plt\n'), ((2627, 2643), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (2638, 2643), True, 'import matplotlib.pyplot as plt\n'), ((2696, 2743), 'matplotlib.pyplot.title', 'plt.title', (["(model_name + ' - ' + images + config)"], {}), "(model_name + ' - ' + images + config)\n", (2705, 2743), True, 'import matplotlib.pyplot as plt\n'), ((2748, 2764), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (2759, 2764), True, 'import matplotlib.pyplot as plt\n'), ((2854, 2870), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2863, 2870), True, 'import matplotlib.pyplot as plt\n'), ((2875, 2885), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2883, 2885), True, 'import matplotlib.pyplot as plt\n'), ((4329, 4360), 'sklearn.metrics.auc', 'auc', (["fpr['micro']", "tpr['micro']"], {}), "(fpr['micro'], tpr['micro'])\n", (4332, 4360), False, 'from sklearn.metrics import classification_report, confusion_matrix, roc_curve, roc_auc_score, precision_recall_curve, auc, f1_score, average_precision_score\n'), ((4366, 4397), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]', '"""k--"""'], {}), "([0, 1], [0, 1], 'k--')\n", (4374, 4397), True, 'import matplotlib.pyplot as plt\n'), ((4402, 4424), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-0.05, 1.0]'], {}), '([-0.05, 1.0])\n', (4410, 4424), True, 'import matplotlib.pyplot as plt\n'), ((4429, 4450), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (4437, 4450), True, 'import matplotlib.pyplot as plt\n'), ((4690, 4713), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(15)'}), '(fontsize=15)\n', (4700, 4713), True, 'import matplotlib.pyplot as plt\n'), ((4718, 4741), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(15)'}), '(fontsize=15)\n', (4728, 4741), True, 'import matplotlib.pyplot as plt\n'), ((4746, 4769), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(15)'}), '(fontsize=15)\n', (4756, 4769), True, 'import matplotlib.pyplot as plt\n'), ((4774, 4820), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {'fontsize': '(15)'}), "('False Positive Rate', fontsize=15)\n", (4784, 4820), True, 'import matplotlib.pyplot as plt\n'), ((4825, 4870), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {'fontsize': '(15)'}), "('True Positive Rate', fontsize=15)\n", (4835, 4870), True, 'import matplotlib.pyplot as plt\n'), ((5469, 5489), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (5477, 5489), True, 'import matplotlib.pyplot as plt\n'), ((5495, 5516), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (5503, 5516), True, 'import matplotlib.pyplot as plt\n'), ((5793, 5823), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(3)', 'fontsize': '(15)'}), '(loc=3, fontsize=15)\n', (5803, 5823), True, 'import matplotlib.pyplot as plt\n'), ((5828, 5851), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(15)'}), '(fontsize=15)\n', (5838, 5851), True, 'import matplotlib.pyplot as plt\n'), ((5856, 5879), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(15)'}), '(fontsize=15)\n', (5866, 5879), True, 'import matplotlib.pyplot as plt\n'), ((5884, 5917), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Recall"""'], {'fontsize': '(15)'}), "('Recall', fontsize=15)\n", (5894, 5917), True, 'import matplotlib.pyplot as plt\n'), ((5922, 5958), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Precision"""'], {'fontsize': '(15)'}), "('Precision', fontsize=15)\n", (5932, 5958), True, 'import matplotlib.pyplot as plt\n'), ((1548, 1560), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1557, 1560), False, 'import json\n'), ((1787, 1858), 'tensorflow_addons.metrics.F1Score', 'tfa.metrics.F1Score', ([], {'num_classes': '(3)', 'average': '"""weighted"""', 'name': '"""f1_score"""'}), "(num_classes=3, average='weighted', name='f1_score')\n", (1806, 1858), True, 'import tensorflow_addons as tfa\n'), ((2193, 2217), 'numpy.argmax', 'np.argmax', (['y_val'], {'axis': '(1)'}), '(y_val, axis=1)\n', (2202, 2217), True, 'import numpy as np\n'), ((2219, 2244), 'numpy.argmax', 'np.argmax', (['y_pred'], {'axis': '(1)'}), '(y_pred, axis=1)\n', (2228, 2244), True, 'import numpy as np\n'), ((4148, 4185), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_test[:, i]', 'y_pred[:, i]'], {}), '(y_test[:, i], y_pred[:, i])\n', (4157, 4185), False, 'from sklearn.metrics import classification_report, confusion_matrix, roc_curve, roc_auc_score, precision_recall_curve, auc, f1_score, average_precision_score\n'), ((4207, 4226), 'sklearn.metrics.auc', 'auc', (['fpr[i]', 'tpr[i]'], {}), '(fpr[i], tpr[i])\n', (4210, 4226), False, 'from sklearn.metrics import classification_report, confusion_matrix, roc_curve, roc_auc_score, precision_recall_curve, auc, f1_score, average_precision_score\n'), ((5152, 5202), 'sklearn.metrics.precision_recall_curve', 'precision_recall_curve', (['y_test[:, i]', 'y_pred[:, i]'], {}), '(y_test[:, i], y_pred[:, i])\n', (5174, 5202), False, 'from sklearn.metrics import classification_report, confusion_matrix, roc_curve, roc_auc_score, precision_recall_curve, auc, f1_score, average_precision_score\n'), ((5228, 5279), 'sklearn.metrics.average_precision_score', 'average_precision_score', (['y_test[:, i]', 'y_pred[:, i]'], {}), '(y_test[:, i], y_pred[:, i])\n', (5251, 5279), False, 'from sklearn.metrics import classification_report, confusion_matrix, roc_curve, roc_auc_score, precision_recall_curve, auc, f1_score, average_precision_score\n'), ((6278, 6291), 'sklearn.metrics.auc', 'auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (6281, 6291), False, 'from sklearn.metrics import classification_report, confusion_matrix, roc_curve, roc_auc_score, precision_recall_curve, auc, f1_score, average_precision_score\n'), ((6951, 6978), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(11, 7)'}), '(figsize=(11, 7))\n', (6961, 6978), True, 'import matplotlib.pyplot as plt\n'), ((7515, 7537), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""f1_score"""'], {}), "('f1_score')\n", (7525, 7537), True, 'import matplotlib.pyplot as plt\n'), ((7684, 7795), 'matplotlib.pyplot.legend', 'plt.legend', (["['baseline', 'classw', 'dataaug', 'classw-dataaug']"], {'bbox_to_anchor': '(1, 0.5)', 'loc': '"""center left"""'}), "(['baseline', 'classw', 'dataaug', 'classw-dataaug'],\n bbox_to_anchor=(1, 0.5), loc='center left')\n", (7694, 7795), True, 'import matplotlib.pyplot as plt\n'), ((7799, 7813), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (7807, 7813), True, 'import matplotlib.pyplot as plt\n'), ((7821, 7841), 'matplotlib.pyplot.title', 'plt.title', (['models[i]'], {}), '(models[i])\n', (7830, 7841), True, 'import matplotlib.pyplot as plt\n'), ((6850, 6867), 'numpy.max', 'np.max', (['X_val_rgb'], {}), '(X_val_rgb)\n', (6856, 6867), True, 'import numpy as np\n'), ((7557, 7584), 'numpy.arange', 'np.arange', (['(0)', '(1.1)'], {'step': '(0.1)'}), '(0, 1.1, step=0.1)\n', (7566, 7584), True, 'import numpy as np\n'), ((2022, 2046), 'numpy.argmax', 'np.argmax', (['y_val'], {'axis': '(1)'}), '(y_val, axis=1)\n', (2031, 2046), True, 'import numpy as np\n'), ((2048, 2073), 'numpy.argmax', 'np.argmax', (['y_pred'], {'axis': '(1)'}), '(y_pred, axis=1)\n', (2057, 2073), True, 'import numpy as np\n'), ((7115, 7140), 'seaborn.color_palette', 'sns.color_palette', (['"""deep"""'], {}), "('deep')\n", (7132, 7140), True, 'import seaborn as sns\n'), ((7231, 7256), 'seaborn.color_palette', 'sns.color_palette', (['"""deep"""'], {}), "('deep')\n", (7248, 7256), True, 'import seaborn as sns\n'), ((7348, 7373), 'seaborn.color_palette', 'sns.color_palette', (['"""Set2"""'], {}), "('Set2')\n", (7365, 7373), True, 'import seaborn as sns\n'), ((7472, 7497), 'seaborn.color_palette', 'sns.color_palette', (['"""deep"""'], {}), "('deep')\n", (7489, 7497), True, 'import seaborn as sns\n')] |
import numpy as np
import pytest
import pyccl as ccl
COSMO = ccl.Cosmology(
Omega_c=0.27, Omega_b=0.045, h=0.67, sigma8=0.8, n_s=0.96,
transfer_function='bbks', matter_power_spectrum='linear')
COSMO_NU = ccl.Cosmology(
Omega_c=0.27, Omega_b=0.045, h=0.67, sigma8=0.8, n_s=0.96,
transfer_function='bbks', matter_power_spectrum='linear', m_nu=0.1,
Omega_k=0.1)
AVALS = [
1,
1.0,
0.8,
[0.2, 0.4],
np.array([0.2, 0.4])]
@pytest.mark.parametrize('a', AVALS)
@pytest.mark.parametrize('func', [
ccl.growth_factor,
ccl.growth_rate,
ccl.growth_factor_unnorm,
ccl.scale_factor_of_chi,
ccl.comoving_angular_distance,
ccl.comoving_radial_distance,
ccl.luminosity_distance,
ccl.h_over_h0,
ccl.distance_modulus,
ccl.mu_MG,
ccl.Sig_MG])
def test_background_a_interface(a, func):
if func is ccl.distance_modulus and np.any(a == 1):
with pytest.raises(ccl.CCLError):
func(COSMO, a)
else:
val = func(COSMO, a)
assert np.all(np.isfinite(val))
assert np.shape(val) == np.shape(a)
@pytest.mark.parametrize('a', AVALS)
@pytest.mark.parametrize('kind', [
'matter',
'dark_energy',
'radiation',
'curvature',
'neutrinos_rel',
'neutrinos_massive'])
def test_background_omega_x(a, kind):
val = ccl.omega_x(COSMO_NU, a, kind)
assert np.all(np.isfinite(val))
assert np.shape(val) == np.shape(a)
if np.all(a == 1):
if kind == 'matter':
val_z0 = (
COSMO_NU['Omega_b'] +
COSMO_NU['Omega_c'] +
COSMO_NU['Omega_nu_mass'])
elif kind == 'dark_energy':
val_z0 = COSMO_NU['Omega_l']
elif kind == 'radiation':
val_z0 = COSMO_NU['Omega_g']
elif kind == 'curvature':
val_z0 = COSMO_NU['Omega_k']
elif kind == 'neutrinos_rel':
val_z0 = COSMO_NU['Omega_nu_rel']
elif kind == 'neutrinos_massive':
val_z0 = COSMO_NU['Omega_nu_mass']
assert np.allclose(val, val_z0)
def test_background_omega_x_raises():
with pytest.raises(ValueError):
ccl.omega_x(COSMO, 1, 'blah')
@pytest.mark.parametrize('a', AVALS)
@pytest.mark.parametrize('kind', [
'matter',
'dark_energy',
'radiation',
'curvature',
'neutrinos_rel',
'neutrinos_massive'])
@pytest.mark.parametrize('is_comoving', [True, False])
def test_background_rho_x(a, kind, is_comoving):
val = ccl.rho_x(COSMO_NU, a, kind, is_comoving)
assert np.all(np.isfinite(val))
assert np.shape(val) == np.shape(a)
def test_background_rho_x_raises():
with pytest.raises(ValueError):
ccl.rho_x(COSMO, 1, 'blah', False)
| [
"numpy.shape",
"numpy.allclose",
"pyccl.omega_x",
"pyccl.rho_x",
"numpy.any",
"pytest.mark.parametrize",
"numpy.array",
"numpy.isfinite",
"pytest.raises",
"numpy.all",
"pyccl.Cosmology"
] | [((63, 197), 'pyccl.Cosmology', 'ccl.Cosmology', ([], {'Omega_c': '(0.27)', 'Omega_b': '(0.045)', 'h': '(0.67)', 'sigma8': '(0.8)', 'n_s': '(0.96)', 'transfer_function': '"""bbks"""', 'matter_power_spectrum': '"""linear"""'}), "(Omega_c=0.27, Omega_b=0.045, h=0.67, sigma8=0.8, n_s=0.96,\n transfer_function='bbks', matter_power_spectrum='linear')\n", (76, 197), True, 'import pyccl as ccl\n'), ((215, 376), 'pyccl.Cosmology', 'ccl.Cosmology', ([], {'Omega_c': '(0.27)', 'Omega_b': '(0.045)', 'h': '(0.67)', 'sigma8': '(0.8)', 'n_s': '(0.96)', 'transfer_function': '"""bbks"""', 'matter_power_spectrum': '"""linear"""', 'm_nu': '(0.1)', 'Omega_k': '(0.1)'}), "(Omega_c=0.27, Omega_b=0.045, h=0.67, sigma8=0.8, n_s=0.96,\n transfer_function='bbks', matter_power_spectrum='linear', m_nu=0.1,\n Omega_k=0.1)\n", (228, 376), True, 'import pyccl as ccl\n'), ((463, 498), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""a"""', 'AVALS'], {}), "('a', AVALS)\n", (486, 498), False, 'import pytest\n'), ((500, 785), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func"""', '[ccl.growth_factor, ccl.growth_rate, ccl.growth_factor_unnorm, ccl.\n scale_factor_of_chi, ccl.comoving_angular_distance, ccl.\n comoving_radial_distance, ccl.luminosity_distance, ccl.h_over_h0, ccl.\n distance_modulus, ccl.mu_MG, ccl.Sig_MG]'], {}), "('func', [ccl.growth_factor, ccl.growth_rate, ccl.\n growth_factor_unnorm, ccl.scale_factor_of_chi, ccl.\n comoving_angular_distance, ccl.comoving_radial_distance, ccl.\n luminosity_distance, ccl.h_over_h0, ccl.distance_modulus, ccl.mu_MG,\n ccl.Sig_MG])\n", (523, 785), False, 'import pytest\n'), ((1105, 1140), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""a"""', 'AVALS'], {}), "('a', AVALS)\n", (1128, 1140), False, 'import pytest\n'), ((1142, 1268), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""kind"""', "['matter', 'dark_energy', 'radiation', 'curvature', 'neutrinos_rel',\n 'neutrinos_massive']"], {}), "('kind', ['matter', 'dark_energy', 'radiation',\n 'curvature', 'neutrinos_rel', 'neutrinos_massive'])\n", (1165, 1268), False, 'import pytest\n'), ((2198, 2233), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""a"""', 'AVALS'], {}), "('a', AVALS)\n", (2221, 2233), False, 'import pytest\n'), ((2235, 2361), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""kind"""', "['matter', 'dark_energy', 'radiation', 'curvature', 'neutrinos_rel',\n 'neutrinos_massive']"], {}), "('kind', ['matter', 'dark_energy', 'radiation',\n 'curvature', 'neutrinos_rel', 'neutrinos_massive'])\n", (2258, 2361), False, 'import pytest\n'), ((2384, 2437), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""is_comoving"""', '[True, False]'], {}), "('is_comoving', [True, False])\n", (2407, 2437), False, 'import pytest\n'), ((438, 458), 'numpy.array', 'np.array', (['[0.2, 0.4]'], {}), '([0.2, 0.4])\n', (446, 458), True, 'import numpy as np\n'), ((1338, 1368), 'pyccl.omega_x', 'ccl.omega_x', (['COSMO_NU', 'a', 'kind'], {}), '(COSMO_NU, a, kind)\n', (1349, 1368), True, 'import pyccl as ccl\n'), ((1453, 1467), 'numpy.all', 'np.all', (['(a == 1)'], {}), '(a == 1)\n', (1459, 1467), True, 'import numpy as np\n'), ((2497, 2538), 'pyccl.rho_x', 'ccl.rho_x', (['COSMO_NU', 'a', 'kind', 'is_comoving'], {}), '(COSMO_NU, a, kind, is_comoving)\n', (2506, 2538), True, 'import pyccl as ccl\n'), ((894, 908), 'numpy.any', 'np.any', (['(a == 1)'], {}), '(a == 1)\n', (900, 908), True, 'import numpy as np\n'), ((1387, 1403), 'numpy.isfinite', 'np.isfinite', (['val'], {}), '(val)\n', (1398, 1403), True, 'import numpy as np\n'), ((1416, 1429), 'numpy.shape', 'np.shape', (['val'], {}), '(val)\n', (1424, 1429), True, 'import numpy as np\n'), ((1433, 1444), 'numpy.shape', 'np.shape', (['a'], {}), '(a)\n', (1441, 1444), True, 'import numpy as np\n'), ((2056, 2080), 'numpy.allclose', 'np.allclose', (['val', 'val_z0'], {}), '(val, val_z0)\n', (2067, 2080), True, 'import numpy as np\n'), ((2130, 2155), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2143, 2155), False, 'import pytest\n'), ((2165, 2194), 'pyccl.omega_x', 'ccl.omega_x', (['COSMO', '(1)', '"""blah"""'], {}), "(COSMO, 1, 'blah')\n", (2176, 2194), True, 'import pyccl as ccl\n'), ((2557, 2573), 'numpy.isfinite', 'np.isfinite', (['val'], {}), '(val)\n', (2568, 2573), True, 'import numpy as np\n'), ((2586, 2599), 'numpy.shape', 'np.shape', (['val'], {}), '(val)\n', (2594, 2599), True, 'import numpy as np\n'), ((2603, 2614), 'numpy.shape', 'np.shape', (['a'], {}), '(a)\n', (2611, 2614), True, 'import numpy as np\n'), ((2662, 2687), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2675, 2687), False, 'import pytest\n'), ((2697, 2731), 'pyccl.rho_x', 'ccl.rho_x', (['COSMO', '(1)', '"""blah"""', '(False)'], {}), "(COSMO, 1, 'blah', False)\n", (2706, 2731), True, 'import pyccl as ccl\n'), ((923, 950), 'pytest.raises', 'pytest.raises', (['ccl.CCLError'], {}), '(ccl.CCLError)\n', (936, 950), False, 'import pytest\n'), ((1040, 1056), 'numpy.isfinite', 'np.isfinite', (['val'], {}), '(val)\n', (1051, 1056), True, 'import numpy as np\n'), ((1073, 1086), 'numpy.shape', 'np.shape', (['val'], {}), '(val)\n', (1081, 1086), True, 'import numpy as np\n'), ((1090, 1101), 'numpy.shape', 'np.shape', (['a'], {}), '(a)\n', (1098, 1101), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.