code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
from __future__ import division
from __future__ import absolute_import
from builtins import object
from past.utils import old_div
from nose.tools import (assert_equal, assert_not_equal, raises,
assert_almost_equal)
from nose.plugins.skip import SkipTest
from .test_helpers import assert_items_almost_equal, assert_items_equal
import pandas as pd
import numpy as np
import openpathsampling as paths
import logging
logging.getLogger('openpathsampling.initialization').setLevel(logging.CRITICAL)
logging.getLogger('openpathsampling.ensemble').setLevel(logging.CRITICAL)
logging.getLogger('openpathsampling.storage').setLevel(logging.CRITICAL)
logging.getLogger('openpathsampling.netcdfplus').setLevel(logging.CRITICAL)
class TestWHAM(object):
def setup(self):
self.exact = [1.0, 0.5, 0.25, 0.125, 0.0625, 0.03125, 0.015625]
self.iface1 = [2.0, 1.0, 0.5, 0.25, 0.125, 0.0625, 0.0]
self.iface2 = [1.0, 1.0, 1.0, 0.5, 0.25, 0.125, 0.0625]
self.iface3 = [3.0, 3.0, 3.0, 3.0, 3.0, 1.5, 0.75]
# self.iface1 = [1.0, 0.5, 0.25, 0.125, 0.0625, 0.0, 0.0]
# self.iface2 = [1.0, 1.0, 1.0, 0.5, 0.25, 0.125, 0.0625]
# self.iface3 = [1.0, 1.0, 1.0, 1.0, 1.0, 0.5, 0.25]
# self.iface1 = [2.0, 0.5, 0.125, 0.0]
# self.iface2 = [1.0, 1.0, 0.25, 0.0625]
# self.iface3 = [3.0, 3.0, 3.0, 0.75]
# self.index = [0.0, 0.2, 0.4, 0.6]
self.columns = ["Interface 1", "Interface 2", "Interface 3"]
self.index = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6]
self.input_df = pd.DataFrame(
data=np.array([self.iface1, self.iface2, self.iface3]).T,
index=self.index,
columns=self.columns
)
self.expected_cleaned = np.array([[2.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[0.5, 1.0, 0.0],
[0.25, 0.5, 0.0],
[0.0, 0.25, 3.0],
[0.0, 0.125, 1.5],
[0.0, 0.0, 0.75]])
self.cleaned = pd.DataFrame(data=self.expected_cleaned,
index=self.index,
columns=self.columns)
self.wham = paths.numerics.WHAM(cutoff=0.1)
def test_prep_reverse_cumulative(self):
cleaned = self.wham.prep_reverse_cumulative(self.input_df)
np.testing.assert_allclose(cleaned.values,
self.expected_cleaned)
def test_prep_reverse_cumulative_with_interfaces(self):
wham = paths.numerics.WHAM(cutoff=0.1, interfaces=[0.0, 0.2, 0.3])
cleaned = wham.prep_reverse_cumulative(self.input_df)
np.testing.assert_allclose(cleaned.values,
np.array([[2.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[0.5, 1.0, 0.0],
[0.25, 0.5, 3.0],
[0.0, 0.25, 3.0],
[0.0, 0.125, 1.5],
[0.0, 0.0, 0.75]]))
def test_unweighting_tis(self):
unweighting = self.wham.unweighting_tis(self.cleaned)
expected = np.array([[1.0, 0.0, 0.0],
[1.0, 0.0, 0.0],
[1.0, 1.0, 0.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 1.0],
[0.0, 1.0, 1.0],
[0.0, 0.0, 1.0]])
np.testing.assert_allclose(unweighting.values, expected)
def test_sum_k_Hk_Q(self):
sum_k_Hk_Q = self.wham.sum_k_Hk_Q(self.cleaned)
expected = np.array([2.0, 1.0, 1.5, 0.75, 3.25, 1.625, 0.75])
np.testing.assert_allclose(sum_k_Hk_Q.values, expected)
def test_n_entries(self):
n_entries = self.wham.n_entries(self.cleaned)
expected = np.array([3.75, 1.875, 5.25])
np.testing.assert_allclose(n_entries.values, expected)
def test_weighted_counts_tis(self):
n_entries = self.wham.n_entries(self.cleaned)
unweighting = self.wham.unweighting_tis(self.cleaned)
weighted_counts = self.wham.weighted_counts_tis(unweighting,
n_entries)
expected = np.array([[3.75, 0.0, 0.0],
[3.75, 0.0, 0.0],
[3.75, 1.875, 0.0],
[3.75, 1.875, 0.0],
[0.0, 1.875, 5.25],
[0.0, 1.875, 5.25],
[0.0, 0.0, 5.25]])
np.testing.assert_allclose(weighted_counts.values, expected)
def test_generate_lnZ(self):
guess = [1.0, 1.0, 1.0]
expected_lnZ = np.log([1.0, old_div(1.0,4.0), old_div(7.0,120.0)])
# TODO: I'm not sure the last is log(7/120)
# however, I got the same result out of the old version, too, and
# this does combine into the correct result in the end (see
# test_output_histogram)
unweighting = self.wham.unweighting_tis(self.cleaned)
sum_k_Hk_Q = self.wham.sum_k_Hk_Q(self.cleaned)
weighted_counts = self.wham.weighted_counts_tis(
unweighting,
self.wham.n_entries(self.cleaned)
)
lnZ = self.wham.generate_lnZ(guess, unweighting, weighted_counts,
sum_k_Hk_Q)
np.testing.assert_allclose(lnZ.values, expected_lnZ)
def test_output_histogram(self):
sum_k_Hk_Q = self.wham.sum_k_Hk_Q(self.cleaned)
n_entries = self.wham.n_entries(self.cleaned)
unweighting = self.wham.unweighting_tis(self.cleaned)
weighted_counts = self.wham.weighted_counts_tis(unweighting,
n_entries)
lnZ = pd.Series(data=np.log([1.0, old_div(1.0,4.0), old_div(7.0,120.0)]),
index=n_entries.index)
wham_hist = self.wham.output_histogram(lnZ, sum_k_Hk_Q,
weighted_counts)
normed = self.wham.normalize_cumulative(wham_hist)
np.testing.assert_allclose(normed.values, np.array(self.exact))
def test_guess_lnZ_crossing_probability(self):
input_data = np.array([[2.0, 1.0, 5.0],
[1.0, 1.0, 5.0],
[0.5, 1.0, 5.0],
[0.1, 0.2, 5.0],
[0.0, 0.04, 1.0],
[0.0, 0.02, 0.2]])
input_df = pd.DataFrame(data=input_data,
index=self.index[0:6],
columns=self.columns)
cleaned = self.wham.prep_reverse_cumulative(input_df)
guess_lnZ = self.wham.guess_lnZ_crossing_probability(cleaned)
expected_Z = np.array([1.0, 0.25, 0.25*0.2])
np.testing.assert_allclose(guess_lnZ.values, np.log(expected_Z))
def test_wham_bam_histogram(self):
wham_hist = self.wham.wham_bam_histogram(self.input_df)
np.testing.assert_allclose(wham_hist.values, self.exact)
@raises(RuntimeError)
def test_check_overlaps_no_overlap_with_first(self):
bad_data = np.array([[1.0, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.5, 1.0],
[0.0, 0.1, 0.2]])
bad_df = pd.DataFrame(data=bad_data,
index=self.index[0:5],
columns=self.columns)
self.wham.check_cleaned_overlaps(bad_df)
@raises(RuntimeError)
def test_check_overlaps_no_overlap_with_final(self):
bad_data = np.array([[1.0, 0.0, 0.0],
[0.5, 0.0, 0.0],
[0.2, 1.0, 0.0],
[0.1, 0.5, 0.0],
[0.0, 0.0, 1.0],
[0.0, 0.0, 0.5]])
bad_df = pd.DataFrame(data=bad_data,
index=self.index[0:6],
columns=self.columns)
self.wham.check_cleaned_overlaps(bad_df)
@raises(RuntimeError)
def test_check_overlaps_no_overlap_in_middle(self):
bad_data = np.array([[1.0, 0.0, 0.0, 0.0],
[0.5, 1.0, 0.0, 0.0],
[0.1, 0.2, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[0.0, 0.0, 0.5, 1.0],
[0.0, 0.0, 0.1, 0.2]])
bad_df = pd.DataFrame(data=bad_data,
index=self.index[0:6],
columns=self.columns + ['Interface 4'])
self.wham.check_cleaned_overlaps(bad_df)
| [
"logging.getLogger",
"numpy.testing.assert_allclose",
"numpy.log",
"past.utils.old_div",
"numpy.array",
"nose.tools.raises",
"pandas.DataFrame",
"openpathsampling.numerics.WHAM"
] | [((7392, 7412), 'nose.tools.raises', 'raises', (['RuntimeError'], {}), '(RuntimeError)\n', (7398, 7412), False, 'from nose.tools import assert_equal, assert_not_equal, raises, assert_almost_equal\n'), ((7906, 7926), 'nose.tools.raises', 'raises', (['RuntimeError'], {}), '(RuntimeError)\n', (7912, 7926), False, 'from nose.tools import assert_equal, assert_not_equal, raises, assert_almost_equal\n'), ((8466, 8486), 'nose.tools.raises', 'raises', (['RuntimeError'], {}), '(RuntimeError)\n', (8472, 8486), False, 'from nose.tools import assert_equal, assert_not_equal, raises, assert_almost_equal\n'), ((439, 491), 'logging.getLogger', 'logging.getLogger', (['"""openpathsampling.initialization"""'], {}), "('openpathsampling.initialization')\n", (456, 491), False, 'import logging\n'), ((519, 565), 'logging.getLogger', 'logging.getLogger', (['"""openpathsampling.ensemble"""'], {}), "('openpathsampling.ensemble')\n", (536, 565), False, 'import logging\n'), ((593, 638), 'logging.getLogger', 'logging.getLogger', (['"""openpathsampling.storage"""'], {}), "('openpathsampling.storage')\n", (610, 638), False, 'import logging\n'), ((666, 714), 'logging.getLogger', 'logging.getLogger', (['"""openpathsampling.netcdfplus"""'], {}), "('openpathsampling.netcdfplus')\n", (683, 714), False, 'import logging\n'), ((1771, 1910), 'numpy.array', 'np.array', (['[[2.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.5, 1.0, 0.0], [0.25, 0.5, 0.0], [0.0,\n 0.25, 3.0], [0.0, 0.125, 1.5], [0.0, 0.0, 0.75]]'], {}), '([[2.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.5, 1.0, 0.0], [0.25, 0.5, \n 0.0], [0.0, 0.25, 3.0], [0.0, 0.125, 1.5], [0.0, 0.0, 0.75]])\n', (1779, 1910), True, 'import numpy as np\n'), ((2182, 2267), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'self.expected_cleaned', 'index': 'self.index', 'columns': 'self.columns'}), '(data=self.expected_cleaned, index=self.index, columns=self.columns\n )\n', (2194, 2267), True, 'import pandas as pd\n'), ((2355, 2386), 'openpathsampling.numerics.WHAM', 'paths.numerics.WHAM', ([], {'cutoff': '(0.1)'}), '(cutoff=0.1)\n', (2374, 2386), True, 'import openpathsampling as paths\n'), ((2508, 2573), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['cleaned.values', 'self.expected_cleaned'], {}), '(cleaned.values, self.expected_cleaned)\n', (2534, 2573), True, 'import numpy as np\n'), ((2685, 2744), 'openpathsampling.numerics.WHAM', 'paths.numerics.WHAM', ([], {'cutoff': '(0.1)', 'interfaces': '[0.0, 0.2, 0.3]'}), '(cutoff=0.1, interfaces=[0.0, 0.2, 0.3])\n', (2704, 2744), True, 'import openpathsampling as paths\n'), ((3418, 3552), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [1.0, 1.0, 0.0], [0.0, \n 1.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]]'], {}), '([[1.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [1.0, 1.0, 0.0\n ], [0.0, 1.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]])\n', (3426, 3552), True, 'import numpy as np\n'), ((3730, 3786), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['unweighting.values', 'expected'], {}), '(unweighting.values, expected)\n', (3756, 3786), True, 'import numpy as np\n'), ((3894, 3944), 'numpy.array', 'np.array', (['[2.0, 1.0, 1.5, 0.75, 3.25, 1.625, 0.75]'], {}), '([2.0, 1.0, 1.5, 0.75, 3.25, 1.625, 0.75])\n', (3902, 3944), True, 'import numpy as np\n'), ((3953, 4008), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['sum_k_Hk_Q.values', 'expected'], {}), '(sum_k_Hk_Q.values, expected)\n', (3979, 4008), True, 'import numpy as np\n'), ((4113, 4142), 'numpy.array', 'np.array', (['[3.75, 1.875, 5.25]'], {}), '([3.75, 1.875, 5.25])\n', (4121, 4142), True, 'import numpy as np\n'), ((4151, 4205), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['n_entries.values', 'expected'], {}), '(n_entries.values, expected)\n', (4177, 4205), True, 'import numpy as np\n'), ((4525, 4674), 'numpy.array', 'np.array', (['[[3.75, 0.0, 0.0], [3.75, 0.0, 0.0], [3.75, 1.875, 0.0], [3.75, 1.875, 0.0],\n [0.0, 1.875, 5.25], [0.0, 1.875, 5.25], [0.0, 0.0, 5.25]]'], {}), '([[3.75, 0.0, 0.0], [3.75, 0.0, 0.0], [3.75, 1.875, 0.0], [3.75, \n 1.875, 0.0], [0.0, 1.875, 5.25], [0.0, 1.875, 5.25], [0.0, 0.0, 5.25]])\n', (4533, 4674), True, 'import numpy as np\n'), ((4853, 4913), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['weighted_counts.values', 'expected'], {}), '(weighted_counts.values, expected)\n', (4879, 4913), True, 'import numpy as np\n'), ((5670, 5722), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['lnZ.values', 'expected_lnZ'], {}), '(lnZ.values, expected_lnZ)\n', (5696, 5722), True, 'import numpy as np\n'), ((6531, 6650), 'numpy.array', 'np.array', (['[[2.0, 1.0, 5.0], [1.0, 1.0, 5.0], [0.5, 1.0, 5.0], [0.1, 0.2, 5.0], [0.0, \n 0.04, 1.0], [0.0, 0.02, 0.2]]'], {}), '([[2.0, 1.0, 5.0], [1.0, 1.0, 5.0], [0.5, 1.0, 5.0], [0.1, 0.2, 5.0\n ], [0.0, 0.04, 1.0], [0.0, 0.02, 0.2]])\n', (6539, 6650), True, 'import numpy as np\n'), ((6820, 6894), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'input_data', 'index': 'self.index[0:6]', 'columns': 'self.columns'}), '(data=input_data, index=self.index[0:6], columns=self.columns)\n', (6832, 6894), True, 'import pandas as pd\n'), ((7112, 7145), 'numpy.array', 'np.array', (['[1.0, 0.25, 0.25 * 0.2]'], {}), '([1.0, 0.25, 0.25 * 0.2])\n', (7120, 7145), True, 'import numpy as np\n'), ((7329, 7385), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['wham_hist.values', 'self.exact'], {}), '(wham_hist.values, self.exact)\n', (7355, 7385), True, 'import numpy as np\n'), ((7489, 7589), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0], [0.5, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.5, 1.0], [0.0, \n 0.1, 0.2]]'], {}), '([[1.0, 0.0, 0.0], [0.5, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.5, 1.0\n ], [0.0, 0.1, 0.2]])\n', (7497, 7589), True, 'import numpy as np\n'), ((7718, 7790), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'bad_data', 'index': 'self.index[0:5]', 'columns': 'self.columns'}), '(data=bad_data, index=self.index[0:5], columns=self.columns)\n', (7730, 7790), True, 'import pandas as pd\n'), ((8003, 8120), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0], [0.5, 0.0, 0.0], [0.2, 1.0, 0.0], [0.1, 0.5, 0.0], [0.0, \n 0.0, 1.0], [0.0, 0.0, 0.5]]'], {}), '([[1.0, 0.0, 0.0], [0.5, 0.0, 0.0], [0.2, 1.0, 0.0], [0.1, 0.5, 0.0\n ], [0.0, 0.0, 1.0], [0.0, 0.0, 0.5]])\n', (8011, 8120), True, 'import numpy as np\n'), ((8278, 8350), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'bad_data', 'index': 'self.index[0:6]', 'columns': 'self.columns'}), '(data=bad_data, index=self.index[0:6], columns=self.columns)\n', (8290, 8350), True, 'import pandas as pd\n'), ((8562, 8708), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0, 0.0], [0.5, 1.0, 0.0, 0.0], [0.1, 0.2, 0.0, 0.0], [0.0, \n 0.0, 1.0, 0.0], [0.0, 0.0, 0.5, 1.0], [0.0, 0.0, 0.1, 0.2]]'], {}), '([[1.0, 0.0, 0.0, 0.0], [0.5, 1.0, 0.0, 0.0], [0.1, 0.2, 0.0, 0.0],\n [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.5, 1.0], [0.0, 0.0, 0.1, 0.2]])\n', (8570, 8708), True, 'import numpy as np\n'), ((8867, 8962), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'bad_data', 'index': 'self.index[0:6]', 'columns': "(self.columns + ['Interface 4'])"}), "(data=bad_data, index=self.index[0:6], columns=self.columns + [\n 'Interface 4'])\n", (8879, 8962), True, 'import pandas as pd\n'), ((2893, 3032), 'numpy.array', 'np.array', (['[[2.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.5, 1.0, 0.0], [0.25, 0.5, 3.0], [0.0,\n 0.25, 3.0], [0.0, 0.125, 1.5], [0.0, 0.0, 0.75]]'], {}), '([[2.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.5, 1.0, 0.0], [0.25, 0.5, \n 3.0], [0.0, 0.25, 3.0], [0.0, 0.125, 1.5], [0.0, 0.0, 0.75]])\n', (2901, 3032), True, 'import numpy as np\n'), ((6435, 6455), 'numpy.array', 'np.array', (['self.exact'], {}), '(self.exact)\n', (6443, 6455), True, 'import numpy as np\n'), ((7197, 7215), 'numpy.log', 'np.log', (['expected_Z'], {}), '(expected_Z)\n', (7203, 7215), True, 'import numpy as np\n'), ((5016, 5033), 'past.utils.old_div', 'old_div', (['(1.0)', '(4.0)'], {}), '(1.0, 4.0)\n', (5023, 5033), False, 'from past.utils import old_div\n'), ((5034, 5053), 'past.utils.old_div', 'old_div', (['(7.0)', '(120.0)'], {}), '(7.0, 120.0)\n', (5041, 5053), False, 'from past.utils import old_div\n'), ((1612, 1661), 'numpy.array', 'np.array', (['[self.iface1, self.iface2, self.iface3]'], {}), '([self.iface1, self.iface2, self.iface3])\n', (1620, 1661), True, 'import numpy as np\n'), ((6111, 6128), 'past.utils.old_div', 'old_div', (['(1.0)', '(4.0)'], {}), '(1.0, 4.0)\n', (6118, 6128), False, 'from past.utils import old_div\n'), ((6129, 6148), 'past.utils.old_div', 'old_div', (['(7.0)', '(120.0)'], {}), '(7.0, 120.0)\n', (6136, 6148), False, 'from past.utils import old_div\n')] |
#/usr/bin/env python
from os.path import join, split
import numpy as np
import matplotlib.pyplot as plt
import pandas
from dtk import process
from gaitanalysis import gait, controlid
from gaitanalysis.utils import _percent_formatter
directory = split(__file__)[0]
perturbation_data = gait.WalkingData(join(directory,
'../data/perturbation.h5'))
# Controller identification.
sensors = ['Right.Ankle.Flexion.Angle',
'Right.Ankle.Flexion.Rate',
'Right.Knee.Flexion.Angle',
'Right.Knee.Flexion.Rate',
'Right.Hip.Flexion.Angle',
'Right.Hip.Flexion.Rate',
'Left.Ankle.Flexion.Angle',
'Left.Ankle.Flexion.Rate',
'Left.Knee.Flexion.Angle',
'Left.Knee.Flexion.Rate',
'Left.Hip.Flexion.Angle',
'Left.Hip.Flexion.Rate']
controls = ['Right.Ankle.PlantarFlexion.Moment',
'Right.Knee.PlantarFlexion.Moment',
'Right.Hip.PlantarFlexion.Moment',
'Left.Ankle.PlantarFlexion.Moment',
'Left.Knee.PlantarFlexion.Moment',
'Left.Hip.PlantarFlexion.Moment']
perturbation_data_solver = \
controlid.SimpleControlSolver(perturbation_data.steps, sensors,
controls)
gain_omission_matrix = np.zeros((len(controls), len(sensors))).astype(bool)
for i, row in enumerate(gain_omission_matrix):
row[2 * i:2 * i + 2] = True
gains, controls_star, variance, gain_var, control_var, estimated_controls = \
perturbation_data_solver.solve(gain_omission_matrix=gain_omission_matrix)
# Gain plot
fig_width_pt = 266.0125 # column width in abstract
inches_per_pt = 1.0 / 72.27
params = {'backend': 'ps',
'font.family': 'serif',
'font.serif': 'times',
'axes.labelsize': 6,
'text.fontsize': 6,
'legend.fontsize': 6,
'xtick.labelsize': 4,
'ytick.labelsize': 4,
'axes.titlesize': 6,
'text.usetex': True,
'figure.figsize': (fig_width_pt * inches_per_pt,
fig_width_pt * inches_per_pt * 0.70)}
plt.rcParams.update(params)
fig, axes = plt.subplots(3, 2, sharex=True)
for i, row in enumerate(['Ankle', 'Knee', 'Hip']):
for j, (col, unit) in enumerate(zip(['Angle', 'Rate'],
['Nm/rad', r'Nm $\cdot$ s/rad'])):
for side, marker, color in zip(['Right', 'Left'],
['o', 'o'],
['Blue', 'Red']):
row_label = '.'.join([side, row, 'PlantarFlexion.Moment'])
col_label = '.'.join([side, row, 'Flexion', col])
gain_row_idx = controls.index(row_label)
gain_col_idx = sensors.index(col_label)
gains_per = gains[:, gain_row_idx, gain_col_idx]
sigma = np.sqrt(gain_var[:, gain_row_idx, gain_col_idx])
percent_of_gait_cycle = \
perturbation_data_solver.identification_data.iloc[0].index.values.astype(float)
xlim = (percent_of_gait_cycle[0], percent_of_gait_cycle[-1])
if side == 'Left':
# Shift that diggidty-dogg signal 50%
# This only works for an even number of samples.
if len(percent_of_gait_cycle) % 2 != 0:
raise StandardError("Doesn't work with odd samples.")
first = percent_of_gait_cycle[percent_of_gait_cycle < 0.5] + 0.5
second = percent_of_gait_cycle[percent_of_gait_cycle > 0.5] - 0.5
percent_of_gait_cycle = np.hstack((first, second))
# sort and sort gains/sigma same way
sort_idx = np.argsort(percent_of_gait_cycle)
percent_of_gait_cycle = percent_of_gait_cycle[sort_idx]
gains_per = gains_per[sort_idx]
sigma = sigma[sort_idx]
axes[i, j].fill_between(percent_of_gait_cycle,
gains_per - sigma,
gains_per + sigma,
alpha=0.5,
color=color)
axes[i, j].plot(percent_of_gait_cycle, gains_per,
marker='o',
ms=2,
color=color,
label=side)
#axes[i, j].set_title(' '.join(col_label.split('.')[1:]))
axes[i, j].set_title(r"{}: {} $\rightarrow$ Moment".format(row, col))
axes[i, j].set_ylabel(unit)
if i == 2:
axes[i, j].set_xlabel(r'\% of Gait Cycle')
axes[i, j].xaxis.set_major_formatter(_percent_formatter)
axes[i, j].set_xlim(xlim)
plt.tight_layout()
fig.savefig(join(directory, '../fig/gains.pdf'))
# Fit plot.
estimated_walking = \
pandas.concat([df for k, df in estimated_controls.iteritems()],
ignore_index=True)
actual_walking = \
pandas.concat([df for k, df in
perturbation_data_solver.validation_data.iteritems()],
ignore_index=True)
params = {'figure.figsize': (fig_width_pt * inches_per_pt,
fig_width_pt * inches_per_pt * 0.4),
'axes.labelsize': 8,
'xtick.labelsize': 6,
'ytick.labelsize': 6,
}
plt.rcParams.update(params)
fig, ax = plt.subplots(1)
sample_number = actual_walking.index.values
measured = actual_walking['Right.Ankle.PlantarFlexion.Moment'].values
predicted = estimated_walking['Right.Ankle.PlantarFlexion.Moment'].values
std_of_predicted = np.sqrt(variance) * np.ones_like(predicted)
error = measured - predicted
rms = np.sqrt(np.linalg.norm(error).mean())
r_squared = process.coefficient_of_determination(measured, predicted)
ax.plot(sample_number, measured, color='black')
ax.plot(sample_number, predicted, color='blue', ms=4)
#ax.errorbar(sample_number, predicted, yerr=std_of_predicted, fmt='.', ms=4)
ax.set_ylabel('Right Ankle Torque')
ax.set_xlabel('Sample Number')
# TODO : Figure out how to get matplotlib + tex to print the percent sign.
ax.legend(('Measured', r'Estimated [VAF={:1.0f}\%]'.format(r_squared * 100.0)))
ax.set_xlim((100, 200))
plt.tight_layout()
fig.savefig(join(directory, '../fig/fit.pdf'))
| [
"numpy.ones_like",
"numpy.sqrt",
"numpy.hstack",
"gaitanalysis.controlid.SimpleControlSolver",
"os.path.join",
"os.path.split",
"numpy.argsort",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.tight_layout",
"numpy.linalg.norm",
"matplotlib.pyplot.subplots",
"dtk.process.coefficient_of... | [((1197, 1270), 'gaitanalysis.controlid.SimpleControlSolver', 'controlid.SimpleControlSolver', (['perturbation_data.steps', 'sensors', 'controls'], {}), '(perturbation_data.steps, sensors, controls)\n', (1226, 1270), False, 'from gaitanalysis import gait, controlid\n'), ((2152, 2179), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (['params'], {}), '(params)\n', (2171, 2179), True, 'import matplotlib.pyplot as plt\n'), ((2193, 2224), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(2)'], {'sharex': '(True)'}), '(3, 2, sharex=True)\n', (2205, 2224), True, 'import matplotlib.pyplot as plt\n'), ((4821, 4839), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4837, 4839), True, 'import matplotlib.pyplot as plt\n'), ((5430, 5457), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (['params'], {}), '(params)\n', (5449, 5457), True, 'import matplotlib.pyplot as plt\n'), ((5469, 5484), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {}), '(1)\n', (5481, 5484), True, 'import matplotlib.pyplot as plt\n'), ((5822, 5879), 'dtk.process.coefficient_of_determination', 'process.coefficient_of_determination', (['measured', 'predicted'], {}), '(measured, predicted)\n', (5858, 5879), False, 'from dtk import process\n'), ((6307, 6325), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6323, 6325), True, 'import matplotlib.pyplot as plt\n'), ((248, 263), 'os.path.split', 'split', (['__file__'], {}), '(__file__)\n', (253, 263), False, 'from os.path import join, split\n'), ((305, 347), 'os.path.join', 'join', (['directory', '"""../data/perturbation.h5"""'], {}), "(directory, '../data/perturbation.h5')\n", (309, 347), False, 'from os.path import join, split\n'), ((4853, 4888), 'os.path.join', 'join', (['directory', '"""../fig/gains.pdf"""'], {}), "(directory, '../fig/gains.pdf')\n", (4857, 4888), False, 'from os.path import join, split\n'), ((5693, 5710), 'numpy.sqrt', 'np.sqrt', (['variance'], {}), '(variance)\n', (5700, 5710), True, 'import numpy as np\n'), ((5713, 5736), 'numpy.ones_like', 'np.ones_like', (['predicted'], {}), '(predicted)\n', (5725, 5736), True, 'import numpy as np\n'), ((6339, 6372), 'os.path.join', 'join', (['directory', '"""../fig/fit.pdf"""'], {}), "(directory, '../fig/fit.pdf')\n", (6343, 6372), False, 'from os.path import join, split\n'), ((2899, 2947), 'numpy.sqrt', 'np.sqrt', (['gain_var[:, gain_row_idx, gain_col_idx]'], {}), '(gain_var[:, gain_row_idx, gain_col_idx])\n', (2906, 2947), True, 'import numpy as np\n'), ((5780, 5801), 'numpy.linalg.norm', 'np.linalg.norm', (['error'], {}), '(error)\n', (5794, 5801), True, 'import numpy as np\n'), ((3642, 3668), 'numpy.hstack', 'np.hstack', (['(first, second)'], {}), '((first, second))\n', (3651, 3668), True, 'import numpy as np\n'), ((3750, 3783), 'numpy.argsort', 'np.argsort', (['percent_of_gait_cycle'], {}), '(percent_of_gait_cycle)\n', (3760, 3783), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Algorithms for TableDataExtractor.
.. codeauthor:: <NAME> <<EMAIL>>
"""
import logging
import numpy as np
from sympy import Symbol
from sympy import factor_list, factor
from tabledataextractor.exceptions import MIPSError
from tabledataextractor.table.parse import StringParser, CellParser
log = logging.getLogger(__name__)
def empty_string(string, regex=r'^([\s\-\–\—\"]+)?$'):
"""
Returns `True` if a particular string is empty, which is defined with a regular expression.
:param string: Input string for testing
:type string: str
:param regex: The regular expression which defines an empty cell (can be tweaked).
:type regex: str
:return: True/False
"""
empty_parser = StringParser(regex)
return empty_parser.parse(string, method='fullmatch')
def empty_cells(array, regex=r'^([\s\-\–\—\"]+)?$'):
"""
Returns a mask with `True` for all empty cells in the original array and `False` for non-empty cells.
:param regex: The regular expression which defines an empty cell (can be tweaked).
:type regex: str
:param array: Input array to return the mask for
:type array: numpy array
"""
empty = np.full_like(array, fill_value=False, dtype=bool)
empty_parser = CellParser(regex)
for empty_cell in empty_parser.parse(array, method='fullmatch'):
if array.ndim == 2:
empty[empty_cell[0], empty_cell[1]] = True
elif array.ndim == 1:
empty[empty_cell[0]] = True
return empty
def standardize_empty(array):
"""
Returns an array with the empty cells of the input array standardized to 'NoValue'.
:param array: Input array
:type array: numpy.array
:return: Array with standardized empty cells
"""
standardized = np.copy(array)
for row_index, row in enumerate(standardized):
for col_index, col in enumerate(row):
if empty_string(col):
standardized[row_index, col_index] = 'NoValue'
return standardized
def pre_clean(array):
"""
Removes empty and duplicate rows and columns that extend over the whole table.
:param array: Input Table object
:type array: Numpy array
"""
pre_cleaned_table = np.copy(array)
array_empty = empty_cells(array)
# find empty rows and delete them
empty_rows = []
for row_index, row in enumerate(array_empty):
if False not in row:
empty_rows.append(row_index)
log.debug("Empty rows {} deleted.".format(empty_rows))
pre_cleaned_table = np.delete(pre_cleaned_table, empty_rows, axis=0)
# find empty columns and delete them
empty_columns = []
for column_index, column in enumerate(array_empty.T):
if False not in column:
empty_columns.append(column_index)
log.debug("Empty columns {} deleted.".format(empty_columns))
pre_cleaned_table = np.delete(pre_cleaned_table, empty_columns, axis=1)
# delete duplicate rows that extend over the whole table
_, indices = np.unique(pre_cleaned_table, axis=0, return_index=True)
# for logging only, which rows have been removed
removed_rows = []
for row_index in range(0, len(pre_cleaned_table)):
if row_index not in indices:
removed_rows.append(row_index)
log.debug("Duplicate rows {} removed.".format(removed_rows))
# deletion:
pre_cleaned_table = pre_cleaned_table[np.sort(indices)]
# delete duplicate columns that extend over the whole table
_, indices = np.unique(pre_cleaned_table, axis=1, return_index=True)
# for logging only, which rows have been removed
removed_columns = []
for column_index in range(0, len(pre_cleaned_table.T)):
if column_index not in indices:
removed_columns.append(column_index)
log.debug("Duplicate columns {} removed.".format(removed_columns))
# deletion:
pre_cleaned_table = pre_cleaned_table[:, np.sort(indices)]
# clean-up unicode characters
pre_cleaned_table = clean_unicode(pre_cleaned_table)
return pre_cleaned_table
def clean_unicode(array):
"""
Replaces problematic unicode characters in a given numpy array.
:param array: input array
:type array: numpy.array
:return: cleaned array
"""
temp = np.copy(array)
temp = np.core.defchararray.replace(temp, '\xa0', ' ')
return temp
def find_cc4(table_object):
"""
Searches for critical cell `CC4`.
Searching from the bottom of the pre-cleaned table for the last row with a minority of empty cells.
Rows with at most a few empty cells are assumed to be part of the data region rather than notes or footnotes rows
(which usually only have one or two non-empty cells).
:param table_object: Input Table object
:type table_object: ~tabledataextractor.table.table.Table
:return: cc4
"""
# searching from the bottom of original table:
n_rows = len(table_object.pre_cleaned_table)
for row_index in range(n_rows - 1, -1, -1):
# counting the number of full cells
# if n_empty < n_full terminate, this is our goal row
n_full = 0
n_columns = len(table_object.pre_cleaned_table_empty[row_index])
for empty in table_object.pre_cleaned_table_empty[row_index]:
if not empty:
n_full += 1
if n_full > int(n_columns / 2):
return row_index, n_columns - 1
def duplicate_rows(table):
"""
Returns True if there are duplicate rows in the table and False if there are no duplicate rows
:param table:
:return: True or False
"""
if table.ndim > 0 and table.size:
_, indices = np.unique(table, axis=0, return_index=True)
if len(table) > len(indices):
return True
else:
return False
else:
return False
def duplicate_columns(table):
"""
Returns True if there are duplicate columns in the table and False if there are no duplicate columns
:param table:
:return: True or False
"""
if table.T.ndim > 0 and table.T.size:
_, indices = np.unique(table.T, axis=0, return_index=True)
if len(table.T) > len(indices):
return True
else:
return False
else:
return False
def find_cc1_cc2(table_object, cc4, array):
"""
Main MIPS (*Minimum Indexing Point Search*) algorithm. According to Embley et al., *DOI: 10.1007/s10032-016-0259-1*.
Searches for critical cells `CC2` and `CC3`.
MIPS locates the critical cells that define the minimum row and column headers needed to index
every data cell.
:param table_object: Input Table object
:type table_object: ~tabledataextractor.table.table.Table
:param cc4: Position of `CC4` cell found with ``find_cc4()``
:param array: table to search for `CC1` and `CC2`
:type array: numpy array
:type cc4: (int, int)
:return: cc1, cc2
"""
# Initialize
cc2 = None
c_max = cc4[1]
r_max = cc4[0]
r1 = 0
c1 = 0
r2 = r_max - 1
c2 = 0
max_area = 0
def table_slice_cc2(table, r2, r_max, c1, c2):
"""
Function to cut the correct slices out of array for `CC2 `in ``find_cc1_cc2()``.
Cuts out the next row and column header candidates from the pre-cleaned table.
:param table: pre-cleaned table
:param r2: current r2 parameter in MIPS algorithm
:param r_max: r_max parameter in MIPS algorithm
:param c1: first column for MIPS algorithm
:param c2: current c2 parameter for MIPS algorithm
:return: (section_1, section_2)
"""
# one more row and column index than in the published pseudocode is needed,
# since the a:b notation in python doesn't include b
if r2 + 1 == r_max and c1 == c2:
section_1 = table[r2 + 1, c1]
elif r2 + 1 == r_max and c1 != c2:
section_1 = table[r2 + 1, c1:c2 + 1]
elif r2 + 1 != r_max and c1 != c2:
section_1 = table[r2 + 1:r_max + 1, c1:c2 + 1]
elif r2 + 1 != r_max and c1 == c2:
section_1 = table[r2 + 1:r_max + 1, c1]
else:
log.critical("Not defined section_1, r2+1= {}, r_max= {}, c1= {}, c2= {}".format(r2 + 1, r_max, c1, c2))
section_1 = None
# contrary to the published pseudocode the row maximum is r2, not r2-1
# one more row and column index than in the published pseudocode is needed,
# since the a:b notation in python doesn't include b
if r1 == r2 and c2 + 1 == c_max:
section_2 = table[r1, c2 + 1]
elif r1 == r2 and c2 + 1 != c_max:
section_2 = table[r1, c2 + 1: c_max + 1]
elif r1 != r2 and c2 + 1 != c_max:
section_2 = table[r1: r2 + 1, c2 + 1: c_max + 1]
elif r1 != r2 and c2 + 1 == c_max:
section_2 = table[r1: r2 + 1, c2 + 1]
else:
log.critical(
"Not defined section_2, r2-1= {}, r1= {}, c2+1= {}, c_max= {}".format(r2 - 1, r1, c2 + 1, c_max))
section_2 = None
return section_1, section_2
def table_slice_1_cc1(table, r1, r2, c2, c_max):
"""
Function to cut a correct slice out of array for CC1 in _find_cc1_cc2().
Cuts out the column header.
"""
# one more row and column index than in the published pseudocode is needed,
# since the a:b notation in python doesn't include b
# contrary to the published pseudocode, the correct range is [r1:r2,c2+1:c_max] and not [r1+1:r2,c2+1:c_max]
if r1 == r2 and c2 + 1 == c_max:
section = table[r1, c2 + 1]
elif r1 == r2 and c2 + 1 != c_max:
section = table[r1, c2 + 1:c_max + 1]
elif r1 != r2 and c2 + 1 != c_max:
section = table[r1: r2 + 1, c2 + 1:c_max + 1]
elif r1 != r2 and c2 + 1 == c_max:
section = table[r1: r2 + 1, c2 + 1]
else:
log.critical(
"Not defined section 1 for cc1, r1+1= {}, r2= {}, c2+1= {}, c_max= {}".format(r1 + 1, r2, c2 + 1,
c_max))
section = None
return section
def table_slice_2_cc1(table, r2, r_max, c1, c2):
"""
Function to cut a correct slice out of array for CC1 in _find_cc1_cc2().
Cuts out the row header.
"""
# one more row and column index than in the published pseudocode is needed,
# since the a:b notation in python doesn't include b
# contrary to the published pseudocode, the correct range is [r2:r_max,c1:c2] and not [r2+1:c2,c1+1:r_max]
if r2 + 1 == r_max and c1 == c2:
section = table[r2 + 1, c1]
elif r2 + 1 == r_max and c1 != c2:
section = table[r2 + 1, c1: c2 + 1]
elif r2 + 1 != r_max and c1 != c2:
section = table[r2 + 1: r_max + 1, c1: c2 + 1]
elif r2 + 1 != r_max and c1 == c2:
section = table[r2 + 1: r_max + 1, c1]
else:
log.critical(
"Not defined section 2 for cc1, r2+1= {}, c2= {}, c1+1= {}, r_max= {}".format(r2 + 1, c2, c1 + 1,
r_max))
section = None
return section
# MAIN MIPS algorithm
# Locate candidate MIPs by finding the minimum indexing headers:
# This is significantly altered compared to the published pseudocode, which is flawed.
# The pseudocode clearly does not return cc2 if the column has not been changed and it doesn't
# discriminate between duplicate rows in the row header vs duplicate columns in the column header
while c2 < c_max and r2 >= r1:
log.debug("Entering loop: r_max= {}, c_max= {}, c1= {}, c2= {}, r1= {}, r2= {}, cc2= {}"
.format(r_max, c_max, c1, c2, r1, r2, cc2))
temp_section_1, temp_section_2 = table_slice_cc2(array, r2, r_max, c1, c2)
log.debug("temp_section_1:\n{}".format(temp_section_1))
log.debug("temp_section_2:\n{}".format(temp_section_2))
log.debug("duplicate_rows= {}, duplicate_columns= {}".
format(duplicate_rows(temp_section_1), duplicate_rows(temp_section_2)))
if not duplicate_rows(temp_section_1) and not duplicate_columns(temp_section_2):
if table_object.configs['use_max_data_area']:
data_area = (r_max - r2) * (c_max - c2)
log.debug("The data area of the new candidate C2= {} is *1: {}".format((r2, c2), data_area))
log.debug("Data area:\n{}".format(array[r2 + 1:r_max + 1, c2 + 1:c_max + 1]))
if data_area >= max_area:
max_area = data_area
cc2 = (r2, c2)
log.debug("CC2= {}".format(cc2))
r2 = r2 - 1
else:
cc2 = (r2, c2)
log.debug("CC2= {}".format(cc2))
r2 = r2 - 1
elif duplicate_rows(temp_section_1) and not duplicate_columns(temp_section_2):
c2 = c2 + 1
if table_object.configs['use_max_data_area']:
data_area = (r_max - r2) * (c_max - c2)
log.debug("The data area of the new candidate C2= {} is *2: {}".format((r2, c2), data_area))
log.debug("Data area:\n{}".format(array[r2 + 1:r_max + 1, c2 + 1:c_max + 1]))
if data_area >= max_area:
max_area = data_area
cc2 = (r2, c2)
log.debug("CC2= {}".format(cc2))
else:
cc2 = (r2, c2)
log.debug("CC2= {}".format(cc2))
elif duplicate_rows(temp_section_1) and duplicate_columns(temp_section_2):
c2 = c2 + 1
r2 = r2 + 1
if table_object.configs['use_max_data_area']:
data_area = (r_max - r2) * (c_max - c2)
log.debug("The data area of the new candidate C2= {} is *3: {}".format((r2, c2), data_area))
log.debug("Data area:\n{}".format(array[r2 + 1:r_max + 1, c2 + 1:c_max + 1]))
if data_area >= max_area:
max_area = data_area
cc2 = (r2, c2)
log.debug("CC2= {}".format(cc2))
else:
cc2 = (r2, c2)
# if none of those above is satisfied, just finish the loop
else:
r2 = r2 + 1
if table_object.configs['use_max_data_area']:
data_area = (r_max - r2) * (c_max - c2)
log.debug("The data area of the new candidate C2= {} is *4: {}".format((r2, c2), data_area))
log.debug("Data area:\n{}".format(array[r2 + 1:r_max + 1, c2 + 1:c_max + 1]))
if data_area >= max_area:
max_area = data_area
cc2 = (r2, c2)
log.debug("CC2= {}".format(cc2))
break
else:
cc2 = (r2, c2)
break
log.debug(
"Ended loop with: r_max= {}, c_max= {}, c1= {}, c2= {}, r1= {}, r2= {}, cc2= {}\n\n\n\n".format(r_max,
c_max, c1,
c2, r1, r2,
cc2))
# re-initialization of r2 and c2 from cc2; missing in the pseudocode
r2 = cc2[0]
c2 = cc2[1]
# Locate CC1 at intersection of the top row and the leftmost column necessary for indexing:
log.debug("Potentially duplicate columns:\n{}".format(table_slice_1_cc1(array, r1, r2, c2, c_max)))
while not duplicate_columns(table_slice_1_cc1(array, r1, r2, c2, c_max)) and r1 <= r2:
log.debug("Potentially duplicate columns:\n{}".format(table_slice_1_cc1(array, r1, r2, c2, c_max)))
log.debug("Duplicate columns= {}".format(duplicate_columns(table_slice_1_cc1(array, r1, r2, c2, c_max))))
r1 = r1 + 1
log.debug("r1= {}".format(r1))
log.debug("Potentially duplicate rows:\n{}".format(table_slice_2_cc1(array, r2, r_max, c1, c2)))
while not duplicate_rows(table_slice_2_cc1(array, r2, r_max, c1, c2)) and c1 <= c2:
log.debug("Potentially duplicate rows:\n{}".format(table_slice_2_cc1(array, r2, r_max, c1, c2)))
log.debug("Duplicate rows= {}".format(duplicate_rows(table_slice_2_cc1(array, r2, r_max, c1, c2))))
c1 = c1 + 1
log.debug("c1= {}".format(c1))
# final cc1 is (r1-1,c1-1), because the last run of the while loops doesn't count
# a problem could arise if the code never stepped through the while loops,
# returning a cc1 with a negative index.
# however, this should never happen since the final headers CANNOT have duplicate rows/columns,
# by definition of cc2.
# hence, the assertions:
try:
assert not duplicate_columns(table_slice_1_cc1(array, r1=0, r2=cc2[0], c2=cc2[1], c_max=c_max))
assert not duplicate_rows(table_slice_2_cc1(array, r2=cc2[0], r_max=r_max, c1=0, c2=cc2[1]))
assert r1 >= 0 and c1 >= 0
cc1 = (r1 - 1, c1 - 1)
except AssertionError:
raise MIPSError("Error in _find_cc1_cc2")
# provision for using the uppermost row possible for cc1, if titles are turned of
if not table_object.configs['use_title_row']:
if cc1[0] != 0:
log.debug("METHOD. Title row removed, cc1 was shifted from {} to {}".format(cc1, (0, cc1[1])))
cc1 = (0, cc1[1])
table_object.history._title_row_removed = True
else:
table_object.history._title_row_removed = False
# provision for using only the first column of the table as row header
if table_object.configs['row_header'] is not None:
row_header = table_object.configs['row_header']
assert isinstance(row_header, int)
if table_object.history.prefixed_rows:
row_header += 1
left = min(cc1[1], row_header)
cc1 = (cc1[0], left)
cc2 = (cc2[0], row_header)
# provision for using only the first row of the table as column header
if table_object.configs['col_header'] is not None:
col_header = table_object.configs['col_header']
assert isinstance(col_header, int)
if table_object.history.prefixing_performed and not table_object.history.prefixed_rows:
col_header += 1
top = min(cc1[0], col_header)
cc1 = (top, cc1[1])
cc2 = (col_header, cc2[1])
return cc1, cc2
def find_cc3(table_object, cc2):
"""
Searches for critical cell `CC3`, as the leftmost cell of the first filled row of the data region.
.. rubric:: Comment on implementation
There are two options on how to implement the search for `CC3`:
1. With the possibility of `Notes` rows directly below the header (default):
* the first half filled row below the header is considered as the start of the data region, just like for the `CC4` cell
* implemented by Embley et. al.
2. Without the possibility of `Notes` rows directly below the header:
* the first row below the header is considered as the start of the data region
* for scientific tables it might be more common that the first data row only has a single entry
* this can be chosen my commenting/uncommenting the code within this function
:param table_object: Input Table object
:type table_object: ~tabledataextractor.table.table.Table
:param cc2: Tuple, position of `CC2` cell found with find_cc1_cc2()
:type cc2: (int,int)
:return: cc3
"""
# OPTION 1
# searching from the top of table for first half-full row, starting with first row below the header:
n_rows = len(table_object.pre_cleaned_table[cc2[0] + 1:])
log.debug("n_rows= {}".format(n_rows))
for row_index in range(cc2[0] + 1, cc2[0] + 1 + n_rows, 1):
n_full = 0
n_columns = len(table_object.pre_cleaned_table[row_index, cc2[1] + 1:])
log.debug("n_columns= {}".format(n_columns))
for column_index in range(cc2[1] + 1, cc2[1] + 1 + n_columns, 1):
empty = table_object.pre_cleaned_table_empty[row_index, column_index]
if not empty:
n_full += 1
if n_full >= int(n_columns / 2):
return row_index, cc2[1] + 1
raise MIPSError("No CC3 critical cell found! No data region defined.")
# OPTION 2
# return (cc2[0]+1,cc2[1]+1)
def find_title_row(table_object):
"""
Searches for the topmost non-empty row.
:param table_object: Input Table object
:type table_object: ~tabledataextractor.table.table.Table
:return: int
"""
for row_index, empty_row in enumerate(table_object.pre_cleaned_table_empty):
if not empty_row.all():
return row_index
def find_note_cells(table_object, labels_table):
"""
Searches for all non-empty cells that have not been labelled differently.
:param table_object: Input Table object
:type table_object: ~tabledataextractor.table.table.Table
:param labels_table: table that holds all the labels
:type labels_table: Numpy array
:return: Tuple
"""
for row_index, row in enumerate(labels_table):
for column_index, cell in enumerate(row):
if cell == '/' and not table_object.pre_cleaned_table_empty[row_index, column_index]:
yield row_index, column_index
def prefix_duplicate_labels(table_object, array):
"""
Prefixes duplicate labels in first row or column where this is possible,
by adding a new row/column containing the preceding (to the left or above) unique labels, if available.
Nested prefixing is not supported.
The algorithm is not completely selective and there might be cases where it's application is undesirable.
However, on standard datasets it significantly improves table-region classification.
Algorithm for column headers:
1. Run MIPS, to find the old header region, without prefixing.
2. For row in table, can *meaningful* prefixing in this row been done?
* yes --> do prefixing and go to 3, prefixing of only one row is possible; accept prefixing only if prefixed rows/cells are above the end of the header (not in the data region), the prefixed cells can still be above the header
* no --> go to 2, next row
3. run MIPS to get the new header region
4. accept prefixing only if the prefixing has not made the header region start lower than before and if it hasn't made the header region wider than before
The algorithm has been modified from Embley et al., *DOI: 10.1007/s10032-016-0259-1*.
:param table_object: Input Table object
:type table_object: ~tabledataextractor.table.table.Table
:param array: Table to use as input and to do the prefixing on
:type array: Numpy array
:return: Table with added rows/columns with prefixes, or, input table, if no prefixing was done
"""
def unique(data, row_or_column):
"""
Returns True if data is unique in the given row/column or False if not unique or not present.
:param data:
:param row_or_column:
:return:
"""
count = 0
for cell in row_or_column:
if cell == data:
count += 1
if count == 1:
return True
else:
return False
def prefixed_row_or_column(table):
"""
Main algorithm for creating prefixed column/row headers.
If cell is not unique, it is prefixed with the first unique above (for row header) or to the left
(for column header).
Returns the row/column containing the prefixes and the position of the row/column where the new row/column
has to be inserted into the original table.
This function is getting ugly and could be rewritten with the use of a nice list of tuples,
for every row/column in the table, we would have a list of distinct elements with their positions in the row/column
:param table: input table (will not be changed)
:return: row_index: where the row/column has to be inserted, new_row: the list of prefixes
"""
unique_prefix = False
prefixed = False
row_index = 0
new_row = []
for row_index, row in enumerate(table):
duplicated_row = []
new_row = []
for cell_index, cell in enumerate(row):
# append if unique or empty cell
if unique(cell, row) or empty_string(cell):
duplicated_row.append(cell)
new_row.append("")
else:
# find the first unique cell to the left
# don't use the first column and first row
# as these will presumably be in the stub header region
for prefix in reversed(duplicated_row[1:]):
# use the prefix if it is unique and not empty
if unique(prefix, row) and not empty_string(prefix):
unique_prefix = prefix
break
# prefix the cell and append it to new row
if unique_prefix:
duplicated_row.append(unique_prefix + "/" + cell)
new_row.append(unique_prefix)
prefixed = True
# else, if no unique prefix was found, just append the original cell,
else:
duplicated_row.append(cell)
new_row.append("")
# and continue to the next row (if no prefixing has been performed)
if prefixed:
break
if prefixed:
return row_index, new_row
else:
return None
# MAIN ALGORITHM
# 1. first, check the MIPS, to see what header we would have gotten without the prefixing
# note, cc4 couldn't have changed
log.debug("Prefixing. Attempt to run main MIPS algorithm.")
try:
cc1, cc2 = find_cc1_cc2(table_object, find_cc4(table_object), array)
except (MIPSError, TypeError):
log.error("Prefixing was not performed due to failure of MIPS algorithm.")
return array
# this flag is used for the return value, if it doesn't change the original table is returned
prefixed = False
# 2. DO THE PREFIXING
# prefixing of column headers
if prefixed_row_or_column(array):
row_index, new_row = prefixed_row_or_column(array)
# only perform prefixing if not below of header region (above is allowed!)
# to allow prefixing even below the old header region cannot be right
if row_index <= cc2[0]:
log.debug("Column header prefixing, row_index= {}".format(row_index))
log.debug("Prefixed row= {}".format(new_row))
# Prefixing by adding new row:
prefixed = True
prefixed_table = np.insert(array, row_index, new_row, axis=0)
# prefixing of row headers
if prefixed_row_or_column(array.T):
column_index, new_column = prefixed_row_or_column(array.T)
# only perform prefixing if not to the right of header region (to the left is allowed!)
# to allow prefixing even below the old header region cannot be right
if column_index <= cc2[1]:
log.debug("Row header prefixing, column_index= {}".format(column_index))
log.debug("Prefixed column= {}".format(new_column))
# Prefixing by adding a new column:
prefixed = True
prefixed_table = np.insert(array, column_index, new_column, axis=1)
# 3. check the headers again, after prefixing
# note, cc4 couldn't have changed
if prefixed:
# if new headers fail, the prefixing has destroyed the table, which is not a HIT table anymore
try:
cc1_new, cc2_new = find_cc1_cc2(table_object, find_cc4(table_object), prefixed_table)
except (MIPSError, TypeError):
log.debug("Prefixing was not performed because it destroyed the table")
return array
# return prefixed_table only if the prefixing has not made the header to start lower,
# it can end lower (and this is desired and what we want - not to include the data region into the header),
# but it cannot start lower, because that would mean that we have removed some of the hierarchy and added
# hierarchy from the left/above into a column/row
if cc1_new[0] <= cc1[0] and cc1_new[1] <= cc1[1]:
# Another condition, the header has to end lower than before, not to include at east one
# lower row/column that was included before
if cc2_new[0] <= cc2[0] and cc2_new[1] <= cc2[1]:
table_object.history._prefixing_performed = True
log.debug("METHOD. Prefixing was performed.")
if len(prefixed_table.T) > len(array.T):
table_object.history._prefixed_rows = True
return prefixed_table
else:
return array
else:
return array
else:
return array
def duplicate_spanning_cells(table_object, array):
"""
Duplicates cell contents into appropriate spanning cells. This is sometimes necessary for `.csv` files where
information has been lost, or, if the source table is not properly formatted.
Cells outside the row/column header (such as data cells) will not be duplicated.
MIPS is run to perform a check for that.
Algorithm according to Nagy and Seth, 2016, in Procs. ICPR 2016, Cancun, Mexico.
:param table_object: Input Table object
:type table_object: ~tabledataextractor.table.table.Table
:param array: Table to use as input
:type array: Numpy array
:return: Array with spanning cells copied, if necessary. Alternatively, returns the original table.
"""
def empty_row(arrayy):
"""Returns 'True' if the whole row is truly empty"""
for element in arrayy:
if element:
return False
return True
# running MIPS to find the data region
log.debug("Spanning cells. Attempt to run MIPS algorithm, to find potential title row.")
try:
cc1, cc2 = find_cc1_cc2(table_object, find_cc4(table_object), table_object.pre_cleaned_table)
except (MIPSError, TypeError):
log.error("Spanning cells update was not performed due to failure of MIPS algorithm.")
return array
log.debug("Spanning cells. Attempt to run main spanning cell algorithm.")
temp = array.copy()
top_fill = None
left_fill = None
for c in range(0, len(temp.T)):
flag = 0
for r in range(cc1[0], len(temp)):
if temp[r, c]:
top_fill = temp[r, c]
flag = 1
elif flag == 1:
temp[r, c] = top_fill
if len(temp) - 1 > r and empty_row(temp[r + 1]):
flag = 0
for r in range(cc1[0], len(temp)):
flag = 0
for c in range(len(temp.T)):
if temp[r, c]:
if (len(temp) - 1 > r and temp[r + 1, c] != temp[r, c]) or temp[r - 1, c] != temp[r, c]:
left_fill = temp[r, c]
flag = 1
else:
flag = 0
elif flag == 1:
temp[r, c] = left_fill
if len(temp.T) - 1 > c and empty_row(temp.T[c + 1]):
flag = 0
# Finding the header regions to make sure the spanning cells additions are not applied in the data region
# Then, the main MIPS algorithm has to be run
temp2 = np.copy(temp)
diff_row_length = 0
diff_col_length = 0
if table_object.configs['use_prefixing']:
temp2 = prefix_duplicate_labels(table_object, temp)
# reset the prefixing flag
table_object.history._prefixing_performed = False
table_object.history._prefixed_rows = False
diff_row_length = len(temp2) - len(temp)
diff_col_length = len(temp2.T) - len(temp.T)
log.debug("Spanning cells. Attempt to run main MIPS algorithm.")
# disable title row temporarily
old_title_row_setting = table_object.configs['use_title_row']
table_object.configs['use_title_row'] = False
try:
cc1, cc2 = find_cc1_cc2(table_object, find_cc4(table_object), temp2)
except (MIPSError, TypeError):
log.error("Spanning cells update was not performed due to failure of MIPS algorithm.")
return array
finally:
table_object.configs['use_title_row'] = old_title_row_setting
updated = array.copy()
# update the original table with values from the updated table if the cells are in the header regions
# update column header
for col_header_index in range(cc1[0], cc2[0] + 1 - diff_row_length):
updated[col_header_index, :] = temp[col_header_index, :]
# update row header
for row_header_index in range(cc1[1], cc2[1] + 1 - diff_col_length):
updated[:, row_header_index] = temp[:, row_header_index]
# log
if not np.array_equal(updated, array):
table_object.history._spanning_cells_extended = True
log.debug("METHOD. Spanning cells extended.")
return updated
def header_extension_up(table_object, cc1):
"""
Extends the header after main MIPS run.
Algorithm according to Nagy and Seth, 2016, *"Table Headers: An entrance to the data mine"*,
in Procs. ICPR 2016, Cancun, Mexico.
:param table_object: Input Table object
:type table_object: ~tabledataextractor.table.table.Table
:param cc1: `CC1` critical cell
:return: cc1_new
"""
cc1_new_row = None
cc1_new_col = None
# add row above the identified column header if it does not consist of cells with identical values and if it
# adds at least one non-blank cell that has a value different from the cell immediately below it
current_row = table_object.pre_cleaned_table[cc1[0], :]
for row_index in range(cc1[0]-1, -1, -1):
# start after the first column to allow for a title
if len(np.unique(table_object.pre_cleaned_table[row_index, 1:])) == 1:
cc1_new_row = row_index+1
else:
for col_index, cell in enumerate(table_object.pre_cleaned_table[row_index, :]):
# remove the first row from this check to preserve a title,
# if the title is the only non-empty element of the row
if col_index != 0 and \
cell != current_row[col_index] and \
not table_object.pre_cleaned_table_empty[row_index, col_index]:
current_row = table_object.pre_cleaned_table[row_index, :]
cc1_new_row = row_index
break
if cc1_new_row is None:
cc1_new_row = cc1[0]
# now do the same for the row headers
current_col = table_object.pre_cleaned_table[:, cc1[1]]
for col_index in range(cc1[1]-1, -1, -1):
if len(np.unique(table_object.pre_cleaned_table[:, col_index])) == 1:
cc1_new_col = col_index+1
else:
for row_index, cell in enumerate(table_object.pre_cleaned_table[:, col_index]):
if cell != current_col[row_index] and not table_object.pre_cleaned_table_empty[row_index, col_index]:
current_col = table_object.pre_cleaned_table[:, col_index]
cc1_new_col = col_index
break
if cc1_new_col is None:
cc1_new_col = cc1[1]
cc1_new = (cc1_new_row, cc1_new_col)
# log
if not cc1_new == cc1:
table_object.history._header_extended_up = True
log.debug("METHOD. Header extended upwards.")
return cc1_new
def header_extension_down(table_object, cc1, cc2, cc4):
"""
Extends the header downwards, if no prefixing was done and if the appropriate stub header is empty.
For column-header expansion downwards, only the first cell of the stub header has to be empty.
For row-header expansion to the right, the whole stub header column above has to be empty.
:param table_object: Input Table object
:type table_object: ~tabledataextractor.table.table.Table
:param cc2: Critical cell `CC2`
:type cc2: (int, int)
:param cc1: Critical cell `CC1`
:type cc1: (int, int)
:param cc4: Critical cell `CC4`
:type cc4: (int, int)
:return: New `cc2`
"""
cc2_new = cc2
extended = False
# only do downwards header extension if no prefixing was done
if not table_object.history.prefixing_performed:
# extend column header downwards, changes cc2 row
# only the first cell of the stub header has to be empty to accept the move downwards
row_index = cc2[0]
while row_index <= cc4[0] and empty_string(table_object.pre_cleaned_table[row_index, cc1[1]]):
row_index += 1
cc2_new = (row_index - 1, cc2_new[1])
if cc2_new != cc2:
extended = True
if extended:
table_object.history._header_extended_down = True
# Check if row header can be shortened now, check duplicate rows accordingly, changes cc2 col
if extended:
cc2_new_col = cc2_new[1]
i = len(table_object.row_header.T)
while not duplicate_rows(table_object.row_header[:, :i]) and i > 1:
i -= 1
if not duplicate_rows(table_object.row_header[:, :i]):
cc2_new_col -= 1
cc2_new = (cc2_new[0], cc2_new_col)
extended = False
# extend row header to the right, changes cc2 col
# this check is more rigorous than above, and all the cells in the stub header have to be empty
col_index = cc2_new[1]
while col_index <= cc4[1] and empty_cells(table_object.pre_cleaned_table[cc1[0]:cc2[0]+1, col_index]).all():
col_index += 1
if col_index - 1 != cc2_new[1]:
extended = True
cc2_new = (cc2_new[0], col_index - 1)
if extended:
# Check if column header can be shortened now, changes cc2 row
cc2_new_row = cc2_new[0]
i = len(table_object.col_header)
while not duplicate_columns(table_object.col_header[:i, :]) and i > 1:
i -= 1
if not duplicate_columns(table_object.col_header[:i, :]):
cc2_new_row -= 1
cc2_new = (cc2_new_row, cc2_new[1])
if extended:
table_object.history._header_extended_down = True
return cc2_new
def categorize_header(header):
"""
Performs header categorization (calls the `SymPy` `fact` function) for a given table.
:param header: header region, Numpy array
:return: factor_list
"""
# empty expression and part of the expression that will be factorized
# these are SymPy expressions
expression = 0
part = 0
for row_index, row in enumerate(header):
for column_index, cell in enumerate(row):
if column_index == 0:
part = Symbol(cell)
else:
part = part * Symbol(cell)
expression = expression + part
# factorization
# f = factor(expression, deep=True)
f = factor_list(expression)
log.debug("Factorization, initial header: {}".format(expression))
log.debug("Factorization, factorized header: {}".format(f))
return f
def build_category_table(table, cc1, cc2, cc3, cc4):
"""
Build category table for given input table.
Original header factorization, according to Embley et al., *DOI: 10.1007/s10032-016-0259-1*.
This version is not used, instead :class:`~tabledataextractor.output.to_pandas.build_category_table` is being used.
:param table: Table on which to perform the categorization
:type table: Numpy array
:param cc1: key MIPS cell
:param cc2: key MIPS cell
:param cc3: key MIPS cell
:param cc4: key MIPS cell
:return: category table as numpy array
"""
column_header = table[cc1[0]:cc2[0] + 1, cc3[1]:cc4[1] + 1]
row_header = table[cc3[0]:cc4[0] + 1, cc1[1]:cc2[1] + 1]
column_factors = categorize_header(column_header.T)
row_factors = categorize_header(row_header)
def split_table(table_object):
"""
Splits table into subtables. Yields :class:`~tabledataextractor.table.table.Table` objects.
Algorithm:
If the stub header is repeated in the column header section the table is split up before
the repeated element.
:param table_object: Input Table object
:type table_object: ~tabledataextractor.table.table.Table
"""
# first, the column header
i = 0
# the last row of the column/stub header is not used, as it will be determined as
# data region by the main MIPS algorithm
for col_index, column in enumerate(table_object.col_header[:-1].T):
# the first match is backwards and forwards looking
if i == 0 and column.size > 0 and \
table_object.stub_header[:-1].T[0].size > 0 and \
np.array_equal(column, table_object.stub_header[:-1].T[0]):
yield table_object._pre_cleaned_table[:, 0:col_index + 1].tolist()
i += 1
# every other match is only forwards looking
if i > 0 and column.size > 0 and \
table_object.stub_header[:-1].T[0].size > 0 and \
np.array_equal(column, table_object.stub_header[:-1].T[0]):
yield table_object._pre_cleaned_table[:, col_index + 1:col_index + i * col_index + 2].tolist()
i += 1
# now the same thing for the row header
i = 0
for row_index, row in enumerate(table_object.row_header[:, :-1]):
# the first match is backwards and forwards looking
if i == 0 and row.size > 0 and \
table_object.stub_header[0, :-1].size > 0 and \
np.array_equal(row, table_object.stub_header[0, :-1]):
yield table_object._pre_cleaned_table[0:row_index + 1, :].tolist()
i += 1
# every other match is only forwards looking
if i > 0 and row.size > 0 and \
table_object.stub_header[0, :-1].size > 0 \
and np.array_equal(row, table_object.stub_header[0, :-1]):
yield table_object._pre_cleaned_table[row_index + 1:row_index + i * row_index + 2, :].tolist()
i += 1
def find_row_header_table(category_table, stub_header):
"""
Constructs a Table from the row categories of the original table.
:param category_table: ~tabledataextractor.table.table.Table.category_table
:type category_table: list
:param stub_header: ~tabledataextractor.table.table.Table.stub_header
:type stub_header: numpy.ndarray
:return: list
"""
stub_header = stub_header.tolist()
raw_table = list()
for line in stub_header:
new_line = list()
for item in line:
new_line.append(item)
raw_table.append(new_line)
for line in category_table:
new_line = list()
for item in line[1]:
new_line.append(item)
raw_table.append(new_line)
return raw_table
def clean_row_header(pre_cleaned_table, cc2):
"""
Cleans the row header by removing duplicate rows that span the whole table.
"""
unmodified_part = pre_cleaned_table[:cc2[0]+1, :]
modified_part = pre_cleaned_table[cc2[0]+1:, :]
# delete duplicate rows that extend over the whole table
_, indices = np.unique(modified_part, axis=0, return_index=True)
# for logging only, which rows have been removed
removed_rows = []
for row_index in range(0, len(modified_part)):
if row_index not in indices:
removed_rows.append(row_index)
# deletion
modified_part = modified_part[np.sort(indices)]
return np.vstack((unmodified_part, modified_part))
| [
"logging.getLogger",
"numpy.copy",
"numpy.insert",
"sympy.Symbol",
"tabledataextractor.table.parse.StringParser",
"numpy.full_like",
"numpy.unique",
"numpy.delete",
"numpy.sort",
"tabledataextractor.table.parse.CellParser",
"numpy.core.defchararray.replace",
"numpy.array_equal",
"numpy.vstac... | [((329, 356), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (346, 356), False, 'import logging\n'), ((744, 763), 'tabledataextractor.table.parse.StringParser', 'StringParser', (['regex'], {}), '(regex)\n', (756, 763), False, 'from tabledataextractor.table.parse import StringParser, CellParser\n'), ((1202, 1251), 'numpy.full_like', 'np.full_like', (['array'], {'fill_value': '(False)', 'dtype': 'bool'}), '(array, fill_value=False, dtype=bool)\n', (1214, 1251), True, 'import numpy as np\n'), ((1271, 1288), 'tabledataextractor.table.parse.CellParser', 'CellParser', (['regex'], {}), '(regex)\n', (1281, 1288), False, 'from tabledataextractor.table.parse import StringParser, CellParser\n'), ((1792, 1806), 'numpy.copy', 'np.copy', (['array'], {}), '(array)\n', (1799, 1806), True, 'import numpy as np\n'), ((2240, 2254), 'numpy.copy', 'np.copy', (['array'], {}), '(array)\n', (2247, 2254), True, 'import numpy as np\n'), ((2554, 2602), 'numpy.delete', 'np.delete', (['pre_cleaned_table', 'empty_rows'], {'axis': '(0)'}), '(pre_cleaned_table, empty_rows, axis=0)\n', (2563, 2602), True, 'import numpy as np\n'), ((2894, 2945), 'numpy.delete', 'np.delete', (['pre_cleaned_table', 'empty_columns'], {'axis': '(1)'}), '(pre_cleaned_table, empty_columns, axis=1)\n', (2903, 2945), True, 'import numpy as np\n'), ((3025, 3080), 'numpy.unique', 'np.unique', (['pre_cleaned_table'], {'axis': '(0)', 'return_index': '(True)'}), '(pre_cleaned_table, axis=0, return_index=True)\n', (3034, 3080), True, 'import numpy as np\n'), ((3514, 3569), 'numpy.unique', 'np.unique', (['pre_cleaned_table'], {'axis': '(1)', 'return_index': '(True)'}), '(pre_cleaned_table, axis=1, return_index=True)\n', (3523, 3569), True, 'import numpy as np\n'), ((4278, 4292), 'numpy.copy', 'np.copy', (['array'], {}), '(array)\n', (4285, 4292), True, 'import numpy as np\n'), ((4304, 4351), 'numpy.core.defchararray.replace', 'np.core.defchararray.replace', (['temp', '"""\xa0"""', '""" """'], {}), "(temp, '\\xa0', ' ')\n", (4332, 4351), True, 'import numpy as np\n'), ((20638, 20702), 'tabledataextractor.exceptions.MIPSError', 'MIPSError', (['"""No CC3 critical cell found! No data region defined."""'], {}), "('No CC3 critical cell found! No data region defined.')\n", (20647, 20702), False, 'from tabledataextractor.exceptions import MIPSError\n'), ((32073, 32086), 'numpy.copy', 'np.copy', (['temp'], {}), '(temp)\n', (32080, 32086), True, 'import numpy as np\n'), ((39735, 39758), 'sympy.factor_list', 'factor_list', (['expression'], {}), '(expression)\n', (39746, 39758), False, 'from sympy import factor_list, factor\n'), ((43992, 44043), 'numpy.unique', 'np.unique', (['modified_part'], {'axis': '(0)', 'return_index': '(True)'}), '(modified_part, axis=0, return_index=True)\n', (44001, 44043), True, 'import numpy as np\n'), ((44329, 44372), 'numpy.vstack', 'np.vstack', (['(unmodified_part, modified_part)'], {}), '((unmodified_part, modified_part))\n', (44338, 44372), True, 'import numpy as np\n'), ((3414, 3430), 'numpy.sort', 'np.sort', (['indices'], {}), '(indices)\n', (3421, 3430), True, 'import numpy as np\n'), ((5667, 5710), 'numpy.unique', 'np.unique', (['table'], {'axis': '(0)', 'return_index': '(True)'}), '(table, axis=0, return_index=True)\n', (5676, 5710), True, 'import numpy as np\n'), ((6104, 6149), 'numpy.unique', 'np.unique', (['table.T'], {'axis': '(0)', 'return_index': '(True)'}), '(table.T, axis=0, return_index=True)\n', (6113, 6149), True, 'import numpy as np\n'), ((33513, 33543), 'numpy.array_equal', 'np.array_equal', (['updated', 'array'], {}), '(updated, array)\n', (33527, 33543), True, 'import numpy as np\n'), ((44299, 44315), 'numpy.sort', 'np.sort', (['indices'], {}), '(indices)\n', (44306, 44315), True, 'import numpy as np\n'), ((3929, 3945), 'numpy.sort', 'np.sort', (['indices'], {}), '(indices)\n', (3936, 3945), True, 'import numpy as np\n'), ((17426, 17461), 'tabledataextractor.exceptions.MIPSError', 'MIPSError', (['"""Error in _find_cc1_cc2"""'], {}), "('Error in _find_cc1_cc2')\n", (17435, 17461), False, 'from tabledataextractor.exceptions import MIPSError\n'), ((27333, 27377), 'numpy.insert', 'np.insert', (['array', 'row_index', 'new_row'], {'axis': '(0)'}), '(array, row_index, new_row, axis=0)\n', (27342, 27377), True, 'import numpy as np\n'), ((27980, 28030), 'numpy.insert', 'np.insert', (['array', 'column_index', 'new_column'], {'axis': '(1)'}), '(array, column_index, new_column, axis=1)\n', (27989, 28030), True, 'import numpy as np\n'), ((41554, 41612), 'numpy.array_equal', 'np.array_equal', (['column', 'table_object.stub_header[:-1].T[0]'], {}), '(column, table_object.stub_header[:-1].T[0])\n', (41568, 41612), True, 'import numpy as np\n'), ((41890, 41948), 'numpy.array_equal', 'np.array_equal', (['column', 'table_object.stub_header[:-1].T[0]'], {}), '(column, table_object.stub_header[:-1].T[0])\n', (41904, 41948), True, 'import numpy as np\n'), ((42382, 42435), 'numpy.array_equal', 'np.array_equal', (['row', 'table_object.stub_header[0, :-1]'], {}), '(row, table_object.stub_header[0, :-1])\n', (42396, 42435), True, 'import numpy as np\n'), ((42708, 42761), 'numpy.array_equal', 'np.array_equal', (['row', 'table_object.stub_header[0, :-1]'], {}), '(row, table_object.stub_header[0, :-1])\n', (42722, 42761), True, 'import numpy as np\n'), ((34532, 34588), 'numpy.unique', 'np.unique', (['table_object.pre_cleaned_table[row_index, 1:]'], {}), '(table_object.pre_cleaned_table[row_index, 1:])\n', (34541, 34588), True, 'import numpy as np\n'), ((35447, 35502), 'numpy.unique', 'np.unique', (['table_object.pre_cleaned_table[:, col_index]'], {}), '(table_object.pre_cleaned_table[:, col_index])\n', (35456, 35502), True, 'import numpy as np\n'), ((39554, 39566), 'sympy.Symbol', 'Symbol', (['cell'], {}), '(cell)\n', (39560, 39566), False, 'from sympy import Symbol\n'), ((39615, 39627), 'sympy.Symbol', 'Symbol', (['cell'], {}), '(cell)\n', (39621, 39627), False, 'from sympy import Symbol\n')] |
import sys
import numpy as np
#import preprocess_blockSVD as pre_svd
import multiprocessing
import time
import matplotlib.pyplot as plt
#import greedyPCA_SV as gpca
#import greedyPCA as gpca
from math import ceil
from functools import partial
from itertools import product
# compute single mean_th factor for all tiles
def block_split_size(l, n):
"""
For an array of length l that should be split into n sections,
calculate the dimension of each section:
l%n sub-arrays of size l//n +1 and the rest of size l//n
Input:
------
l: int
length of array
n: int
number of section in which an array of size l
will be partitioned
Output:
------
d: np.array (n,)
length of each partitioned array.
"""
d = np.zeros((n,)).astype('int')
cut = l%n
d[:cut] = l//n+1
d[cut:] = l//n
return d
def split_image_into_blocks(image,
nblocks=[10,10]):
"""
Split an image into blocks.
Parameters:
----------
image: np.array (d1 x d2 x T)
array to be split into nblocks
along first two dimensions
nblocks: list (2,)
parameters to split image across
the first two dimensions, respectively
Outputs
-------
blocks: list,
contains nblocks[0]*nblocks[1] number of tiles
each of dimensions (d1' x d2' x T)
in fortran 'F' order.
"""
if all(isinstance(n, int) for n in nblocks):
number_of_blocks = np.prod(nblocks)
else:
number_of_blocks = (len(nblocks[0])+1)*(len(nblocks[1])+1)
blocks = []
if number_of_blocks != (image.shape[0] * image.shape[1]):
block_divided_image = np.array_split(image,nblocks[0],axis=0)
for row in block_divided_image:
blocks_ = np.array_split(row,nblocks[1],axis=1)
for block in blocks_:
blocks.append(np.array(block))
else:
blocks = image.flatten()
return blocks
def vector_offset(array, offset_factor=2):
"""
Given the dimenions of a matrix (dims), which was
split row and column wise according to row_array,col_array,
Calculate the offset in which to split the
Inputs:
-------
Outputs:
-------
"""
#x,y = np.meshgrid(row_array[:],col_array[:])
array_offset = np.ceil(np.divide(np.diff(array),
offset_factor)).astype('int')
#c_offset = np.ceil(np.divide(np.diff(col_array),2)).astype('int')
# calculate the dimensions of three off-grid splits
#row_cut = row_array[:-1]+r_offset
#col_cut = col_array[:-1]+c_offset
#dims_rs = dims[0],row_cut[-1]-row_cut[0],dims[2]
#dims_cs = col_cut[-1]-col_cut[0],dims[1],dims[2]
#dims_rcs = col_cut[-1]-col_cut[0],row_cut[-1]-row_cut[0],dims[2]
return array_offset
def tile_grids(dims,
indiv_grids=True,
nblocks=[10,10]):
"""
Input:
------
Output:
------
"""
if all(isinstance(n, int) for n in nblocks):
d_row = block_split_size(dims[0],nblocks[0])
d_col = block_split_size(dims[1],nblocks[1])
else:
d_row, d_col=nblocks
if indiv_grids:
d_row = np.insert(d_row,0,0)
d_col = np.insert(d_col,0,0)
return d_row.cumsum(),d_col.cumsum()
d_row = np.append(d_row,dims[0])
d_col = np.append(d_col,dims[1])
d_row = np.diff(np.insert(d_row,0,0))
d_col = np.diff(np.insert(d_col,0,0))
number_of_blocks = (len(d_row))*(len(d_col))
#row_array = np.zeros((number_blocks,))
#col_array = np.zeros((number_blocks,))
array = np.zeros((number_of_blocks,2))
for ii,row in enumerate(product(d_row,d_col)):
array[ii] = row
"""
# for each row
for ii in range(nblocks[0]):
# split it into cols
d_col = block_split_size(dims[1],nblocks[1])
# advance col size
idx_= ii*nblocks[1]
# assign the row dim
row_array[idx_:idx_+nblocks[1]]=d_row[ii]
# assign the col dim
col_array[idx_:idx_+nblocks[1]]=d_col
"""
# return size of row col dimension
#return np.stack((row_array.astype('int'),col_array.astype('int'))).T
return array.astype('int')
def offset_tiling_dims(dims,
nblocks,
offset_case=None):
"""
"""
row_array, col_array = tile_grids(dims,
nblocks=nblocks)
r_offset = vector_offset(row_array)
c_offset = vector_offset(col_array)
rc0, rc1 = (row_array[1:]-r_offset)[[0,-1]]
cc0, cc1 = (col_array[1:]-c_offset)[[0,-1]]
if offset_case is None:
row_array=row_array[1:-1]
col_array=col_array[1:-1]
elif offset_case == 'r':
dims = rc1-rc0,dims[1],dims[2]
row_array=row_array[1:-2]
col_array=col_array[1:-1]
elif offset_case == 'c':
dims = dims[0],cc1-cc0,dims[2]
row_array=row_array[1:-1]
col_array=col_array[1:-2]
elif offset_case == 'rc':
dims = rc1-rc0,cc1-cc0,dims[2]
row_array=row_array[1:-2]
col_array=col_array[1:-2]
else:
print('Invalid option')
indiv_dim = tile_grids(dims,
nblocks=[row_array,col_array],
indiv_grids=False)
return dims, indiv_dim
def offset_tiling(W,
nblocks=[10,10],
offset_case=None):
"""
Given a matrix W, which was split row and column wise
given row_cut,col_cut, calculate three off-grid splits
of the same matrix. Each offgrid will be only row-,
only column-, and row and column-wise.
Inputs:
-------
W: np.array (d1 x d2 x T)
r_offset:
c_offset:
row_cut:
col_cut:
Outputs:
--------
W_rs: list
W_cs: list
W_rcs: list
"""
#col_array,row_array = tile_grids(dims,nblocks)
#r_offset,c_offset = extract_4dx_grid(dims,row_array,col_array)
dims=W.shape
row_array,col_array = tile_grids(dims,
nblocks=nblocks)
r_offset = vector_offset(row_array)
c_offset = vector_offset(col_array)
rc0, rc1 = (row_array[1:]-r_offset)[[0,-1]]
cc0, cc1 = (col_array[1:]-c_offset)[[0,-1]]
if offset_case is None:
W_off = split_image_into_blocks(W,
nblocks=nblocks)
elif offset_case == 'r':
W = W[rc0:rc1,:,:]
W_off = split_image_into_blocks(W,
nblocks=[row_array[1:-2],
col_array[1:-1]])
elif offset_case == 'c':
W = W[:,cc0:cc1,:]
W_off = split_image_into_blocks(W,
nblocks=[row_array[1:-1],
col_array[1:-2]])
elif offset_case == 'rc':
W = W[rc0:rc1,cc0:cc1,:]
W_off = split_image_into_blocks(W,
nblocks=[row_array[1:-2],
col_array[1:-2]])
else:
print('Invalid option')
W_off = W
return W_off, W.shape
def denoise_dx_tiles(W,
confidence=0.99,
dx=1,
fudge_factor=1.,
greedy=False,
maxlag=3,
mean_th_factor=1.15,
min_rank=1,
nblocks=[10,10],
snr_threshold=2,
U_update=False,
verbose=False):
"""
Given matrix W, denoise it according
Input:
------
Output:
------
"""
dims = W.shape
W_ = split_image_into_blocks(W,nblocks=nblocks)
#########################
# No offset tiling
#########################
if verbose:
print('Running individual tiles')
dW_,rank_W_ = run_single(W_,
confidence=confidence,
fudge_factor=fudge_factor,
greedy=greedy,
maxlag=maxlag,
mean_th_factor=mean_th_factor,
min_rank=min_rank,
snr_threshold=snr_threshold,
U_update=U_update,
verbose=verbose)
del W_
dims_ = list(map(np.shape,dW_))
dW_ = combine_blocks(dims,
dW_,
list_order='C')
if dx ==1:
return dW_, rank_W_
#########################
# Row wise offset tiling
#########################
if verbose:
print('Row wise tiling')
W_rs, drs = offset_tiling(W,
nblocks=nblocks,
offset_case='r')
#dims_=[dims,dims_rs,dims_cs,dims_rcs]
#return W_,W_rs,W_cs,W_rcs, dims_
dW_rs, rank_W_rs = run_single(W_rs,
confidence=confidence,
fudge_factor=fudge_factor,
greedy=greedy,
maxlag=maxlag,
mean_th_factor=mean_th_factor,
min_rank=min_rank,
snr_threshold=snr_threshold,
U_update=U_update,
verbose=verbose)
del W_rs
dims_rs = list(map(np.shape,dW_rs))
dW_rs = combine_blocks(drs,
dW_rs,
list_order='C')
#########################
# Col wise offset tiling
#########################
if verbose:
print('Col wise tiling')
W_cs, dcs = offset_tiling(W,
nblocks=nblocks,
offset_case='c')
dW_cs,rank_W_cs = run_single(W_cs,
confidence=confidence,
fudge_factor=fudge_factor,
greedy=greedy,
maxlag=maxlag,
mean_th_factor=mean_th_factor,
min_rank=min_rank,
snr_threshold=snr_threshold,
U_update=U_update,
verbose=verbose)
del W_cs
dims_cs = list(map(np.shape,dW_cs))
dW_cs = combine_blocks(dcs,
dW_cs,
list_order='C')
#########################
# Row/Col wise offset tiling
#########################
if verbose:
print('Row/Col wise tiling')
W_rcs, drcs = offset_tiling(W,
nblocks=nblocks,
offset_case='rc')
dW_rcs,rank_W_rcs = run_single(W_rcs,
confidence=confidence,
fudge_factor=fudge_factor,
greedy=greedy,
maxlag=maxlag,
mean_th_factor=mean_th_factor,
min_rank=min_rank,
snr_threshold=snr_threshold,
U_update=U_update,
verbose=verbose)
del W_rcs
dims_rcs = list(map(np.shape,dW_rcs))
dW_rcs = combine_blocks(drcs,
dW_rcs,
list_order='C')
if False: # debug
return nblocks, dW_, dW_rs, dW_cs, dW_rcs, dims_, dims_rs, dims_cs, dims_rcs
W_four = combine_4xd(nblocks,
dW_,
dW_rs,
dW_cs,
dW_rcs,
dims_,
dims_rs,
dims_cs,
dims_rcs)
return W_four , [rank_W_,rank_W_rs,rank_W_cs,rank_W_rcs]
def combine_4xd(nblocks,dW_,dW_rs,dW_cs,dW_rcs,dims_,dims_rs,dims_cs,dims_rcs,plot_en=False):
"""
Inputs:
-------
Output:
-------
"""
dims = dW_.shape
row_array,col_array = tile_grids(dims,
nblocks=nblocks)
r_offset = vector_offset(row_array)
c_offset = vector_offset(col_array)
r1, r2 = (row_array[1:]-r_offset)[[0,-1]]
c1, c2 = (col_array[1:]-c_offset)[[0,-1]]
drs = dW_rs.shape
dcs = dW_cs.shape
drcs = dW_rcs.shape
# Get pyramid functions for each grid
ak1 = np.zeros(dims[:2])
ak2 = np.zeros(dims[:2])
ak3 = np.zeros(dims[:2])
ak0 = pyramid_tiles(dims,
dims_,
list_order='C')
ak1[r1:r2,:] = pyramid_tiles(drs,
dims_rs,
list_order='C')
ak2[:,c1:c2] = pyramid_tiles(dcs,
dims_cs,
list_order='C')
ak3[r1:r2,c1:c2] = pyramid_tiles(drcs,
dims_rcs,
list_order='C')
# Force outer most border = 1
ak0[[0,-1],:]=1
ak0[:,[0,-1]]=1
#return ak0,ak1,ak2,ak3,patches,W_rs,W_cs,W_rcs
if False:
print('427 -- debug')
return ak0,ak1,ak2,ak3
W1 = np.zeros(dims)
W2 = np.zeros(dims)
W3 = np.zeros(dims)
W1[r1:r2,:,:] = dW_rs
W2[:,c1:c2,:] = dW_cs
W3[r1:r2,c1:c2,:] = dW_rcs
if plot_en:
for ak_ in [ak0,ak1,ak2,ak3]:
plt.figure(figsize=(10,10))
plt.imshow(ak_[:,:])
plt.show()
if plot_en:
plt.figure(figsize=(10,10))
plt.imshow((ak0+ak1+ak2+ak3)[:,:])
plt.colorbar()
W_hat = ak0[:,:,np.newaxis]*dW_
W_hat += ak1[:,:,np.newaxis]*W1
W_hat += ak2[:,:,np.newaxis]*W2
W_hat += ak3[:,:,np.newaxis]*W3
W_hat /= (ak0+ak1+ak2+ak3)[:,:,np.newaxis]
return W_hat
def run_single(Y,
confidence=0.99,
debug = False,
fudge_factor=1,
greedy=False,
maxlag=3,
mean_th_factor=1.15,
min_rank=1,
parallel=True,
snr_threshold=2,
U_update=False,
verbose=False
):
"""
Run denoiser in each movie in the list Y.
Inputs:
------
Y: list (number_movies,)
list of 3D movies, each of dimensions (d1,d2,T)
Each element in the list can be of different size.
Outputs:
--------
Yds: list (number_movies,)
list of denoised 3D movies, each of same dimensions
as the corresponding input movie.input
vtids: list (number_movies,)
rank or final number of components stored for each movie.
------
"""
if debug:
print('485-debug')
vtids = np.zeros((len(Y),))
return Y, vtids
mean_th = gpca.wnoise_acov_CI(Y[0].shape[2],
confidence=confidence,
maxlag=maxlag)
if sys.platform == 'darwin':
print('parallel version not for Darwin')
parallel = False
start = time.time()
if parallel:
cpu_count = max(1, multiprocessing.cpu_count()-2)
args=[[patch] for patch in Y]
start=time.time()
pool = multiprocessing.Pool(cpu_count)
print('Running %d blocks in %d cpus'%(len(Y),
cpu_count))#if verbose else 0
# define params in function
c_outs = pool.starmap(partial(gpca.denoise_patch,
confidence=confidence,
fudge_factor=fudge_factor,
greedy=greedy,
maxlag=maxlag,
mean_th=mean_th,
mean_th_factor=mean_th_factor,
min_rank=min_rank,
snr_threshold=snr_threshold,
U_update=U_update,
verbose=verbose),
args)
pool.close()
pool.join()
Yds = [out_[0] for out_ in c_outs]
vtids = [out_[1] for out_ in c_outs]
else:
Yds = [None]*len(Y)
vtids = [None]*len(Y)
for ii, patch in enumerate(Y):
print('Tile %d'%ii)
#if not debug:
y_ , vt_ = gpca.denoise_patch(patch,
confidence=confidence,
fudge_factor=fudge_factor,
greedy=greedy,
maxlag=maxlag,
mean_th=mean_th,
mean_th_factor=mean_th_factor,
min_rank=min_rank,
snr_threshold=snr_threshold,
U_update=U_update,
verbose=verbose)
#else:
# y_ =patch
# vt_ = 0
#print(vt_)
Yds[ii] = y_
vtids[ii] = vt_
#print('535debug')
#return
vtids = np.asarray(vtids).astype('int')
print('Blocks(=%d) run time: %f'%(len(Y),time.time()-start))
return Yds, vtids
def run_single_deprecated_v2(Y,
confidence=0.999,
fudge_factor=0.99,
greedy=False,
maxlag=5,
mean_th_factor=1.15,
min_rank=1,
parallel=True,
U_update=False):
"""
Run denoiser in each movie in the list Y.
Inputs:
------
Y: list (number_movies,)
list of 3D movies, each of dimensions (d1,d2,T)
Each element in the list can be of different size.
Outputs:
--------
Yds: list (number_movies,)
list of denoised 3D movies, each of same dimensions
as the corresponding input movie.input
vtids: list (number_movies,)
rank or final number of components stored for each movie.
------
"""
def mp_worker(data_in,out_q):
""" The worker function, invoked in a process
'nums' is the input.
The results are placed in a dictionary that's pushed to a queue.
"""
outdict={}
#print('Len is %d'%len(data_in))
for ii, patch in enumerate(data_in):
#print('Run for %d'%ii)
#print(patch.shape)
outdict[ii] = gpca.denoise_patch(patch,
maxlag=maxlag,
confidence=confidence,
greedy=greedy,
fudge_factor=fudge_factor,
mean_th_factor=mean_th_factor,
U_update=U_update,
min_rank=min_rank,
stim_knots=stim_knots,
stim_delta=stim_delta)
#print('out_q')
out_q.put(outdict)
# Each process will get 'chunksize' nums and a queue to put his out
# dict
# Parallel not for mac os single numpy default does not run with lapack
if sys.platform == 'darwin':
#print('Darwin')
parallel = False
start=time.time()
print('debug')
parallel =False
if parallel:
nprocs = max(1, multiprocessing.cpu_count()-2)
out_q = multiprocessing.Queue()
chunksize = int(ceil(len(Y) / float(nprocs)))
procs = []
for i in range(nprocs):
p = multiprocessing.Process(
target=mp_worker,
args=(Y[chunksize * i:chunksize * (i + 1)],
out_q))
procs.append(p)
p.start()
# Collect all results into a single result dict. We know how many dicts
# with results to expect.
resultdict = {}
for i in range(nprocs):
resultdict.update(out_q.get())
# Wait for all worker processes to finish
for p in procs:
p.join()
Yds=[]
vtids=[]
for c_out in resultdict:
print(c_out)
print(len(resultdict[c_out]))
print(resultdict[c_out][0].shape)
#for out_ in c_out:
# Yds.append(out_[0])
# vtids.append(out_[1])
#print(len(Yds))
#print(len(vtids))
#Yds = #[out_[0] for out_ in c_out]
#vtids = [out_[1] for out_ in c_out]
else:
Yds = [None]*len(Y)
vtids = [None]*len(Y)
for ii, patch in enumerate(Y):
print('component %d'%ii)
resultdict = gpca.denoise_patch(patch,
maxlag=maxlag,
confidence=confidence,
greedy=greedy,
fudge_factor=fudge_factor,
mean_th_factor=mean_th_factor,
U_update=U_update,
min_rank=min_rank,
stim_knots=stim_knots,
stim_delta=stim_delta)
Yds[ii]=resultdict[0]
vtids[ii]=resultdict[1]
vtids = np.asarray(vtids).astype('int')
print('Run single video run time: %f'%(time.time()-start))
return Yds, vtids
def run_single_deprecated(Y,
confidence=0.999,
fudge_factor=0.99,
greedy=False,
maxlag=5,
mean_th_factor=1.15,
min_rank=1,
U_update=False,
stim_knots=None,
stim_delta=200):
"""
Run denoiser in each movie in the list Y.
GIL (Global Interpreter Lock) issues
Inputs:
------
Y: list (number_movies,)
list of 3D movies, each of dimensions (d1,d2,T)
Each element in the list can be of different size.
Outputs:
--------
Yds: list (number_movies,)
list of denoised 3D movies, each of same dimensions
as the corresponding input movie.input
vtids: list (number_movies,)
rank or final number of components stored for each movie.
------
"""
args=[[patch] for patch in Y]
cpu_count = 1#max(1, multiprocessing.cpu_count()-1)
start=time.time()
pool = multiprocessing.Pool(cpu_count)
print('Running %d blocks in %d cpus'%(len(Y),cpu_count)) #if verbose else 0
# define params in function
c_outs = pool.starmap(partial(gpca.denoise_patch,
maxlag=maxlag,
confidence=confidence,
greedy=greedy,
fudge_factor=fudge_factor,
mean_th_factor=mean_th_factor,
U_update=U_update,
min_rank=min_rank,
stim_knots=stim_knots,
stim_delta=stim_delta),
args)
pool.close()
pool.join()
print('Run single video run time: %f'%(time.time()-start))
Yds = [out_[0] for out_ in c_outs]
vtids = [out_[1] for out_ in c_outs]
vtids = np.asarray(vtids).astype('int')
return Yds, vtids
def pyramid_matrix(dims,plot_en=False):
"""
Compute a 2D pyramid function of size dims.
Parameters:
----------
dims: tuple (d1,d2)
size of pyramid function
Outputs:
-------
a_k: np.array (dims)
Pyramid function ranges [0,1],
where 0 indicates the boundary
and 1 the center.
"""
a_k = np.zeros(dims[:2])
xc, yc = ceil(dims[0]/2),ceil(dims[1]/2)
for ii in range(xc):
for jj in range(yc):
a_k[ii,jj]=max(dims)-min(ii,jj)
a_k[-ii-1,-jj-1]=a_k[ii,jj]
for ii in range(xc,dims[0]):
for jj in range(yc):
a_k[ii,jj]=a_k[ii,-jj-1]
for ii in range(xc):
for jj in range(yc,dims[1]):
a_k[ii,jj]=a_k[-ii-1,jj]
a_k = a_k.max() - a_k
a_k /=a_k.max()
if plot_en:
plt.figure(figsize=(10,10))
plt.imshow(a_k)
plt.xticks(np.arange(dims[1]))
plt.yticks(np.arange(dims[0]))
plt.colorbar()
plt.show()
#if len(dims)>2:
#a_k = np.array([a_k,]*dims[2]).transpose([1,2,0])
return a_k
def pyramid_tiles(dims_rs,
dims_,
list_order='C',
plot_en=False):
"""
Calculate 2D array of size dims_rs,
composed of pyramid matrices, each of which has the same
dimensions as an element in W_rs.
Inputs:
-------
dims_rs: tuple (d1,d2)
dimension of array
W_rs: list
list of pacthes which indicate dimensions
of each pyramid function
list_order: order in which the
Outputs:
--------
"""
#dims_ = np.asarray(list(map(np.shape,W_rs)))
a_ks = []
for dim_ in dims_:
a_k = pyramid_matrix(dim_)
a_ks.append(a_k)
# given W_rs and a_ks reconstruct array
a_k = combine_blocks(dims_rs[:2],
a_ks,
dims_,
list_order=list_order)
if plot_en:
plt.figure(figsize=(10,10))
plt.imshow(a_k)
plt.colorbar()
return a_k
def cn_ranks(dim_block, ranks, dims, list_order='C'):
"""
"""
Crank = np.zeros(shape=dims)*np.nan
d1,d2 = Crank.shape
i,j = 0,0
for ii in range(0,len(ranks)):
d1c , d2c = dim_block[ii][:2]
Crank[i:i+d1c,j:j+d2c].fill(int(ranks[ii]))
if list_order=='F':
i += d1c
if i == d1:
j += d2c
i = 0
else:
j+= d2c
if j == d2:
i+= d1c
j = 0
return Crank
def combine_blocks(dimsM,
Mc,
dimsMc=None,
list_order='C',
array_order='F'):
"""
Combine blocks given by compress_blocks
Parameters:
----------
dimsM: tuple (d1,d2,T)
dimensions of original array
Mc: np.array or list
contains (padded) tiles from array.
dimsMc: np.array of tuples (d1,d2,T)
(original) dimensions of each tile in array
list_order: string {'F','C'}
determine order to reshape tiles in array
array order if dxT instead of d1xd2xT assumes always array_order='F'
NOTE: if dimsMC is NONE then MC must be a d1 x d2 x T array
array_order: string{'F','C'}
array order to concatenate tiles
if Mc is (dxT), the outputs is converted to (d1xd2xT)
Outputs:
--------
M_all: np.array (dimsM)
reconstruction of array from Mc
"""
ndims = len(dimsM)
if ndims ==3:
d1, d2, T = dimsM
Mall = np.zeros(shape=(d1, d2, T))*np.nan
elif ndims ==2:
d1,d2 = dimsM[:2]
Mall = np.zeros(shape=(d1, d2))*np.nan
if type(Mc)==list:
k = len(Mc)
elif type(Mc)==np.ndarray:
k = Mc.shape[0]
else:
print('error= must be np.array or list')
if dimsMc is None:
dimsMc = np.asarray(list(map(np.shape,Mc)))
i, j = 0, 0
for ii, Mn in enumerate(Mc):
# shape of current block
d1c, d2c = dimsMc[ii][:2]
if (np.isnan(Mn).any()):
Mn = unpad(Mn)
if Mn.ndim < 3 and ndims ==3:
Mn = Mn.reshape((d1c, d2c)+(T,), order=array_order)
if ndims ==3:
Mall[i:i+d1c, j:j+d2c, :] = Mn
elif ndims ==2:
Mall[i:i+d1c, j:j+d2c] = Mn
if list_order=='F':
i += d1c
if i == d1:
j += d2c
i = 0
else:
j += d2c
if j == d2:
i += d1c
j = 0
return Mall
####################
# Deprecated
####################
def test_pyramids(dims,dims_rs,dims_cs,dims_rcs,W_1,W_rs,W_cs,W_rcs,row_cut,col_cut):
"""
Input:
------
Output:
------
"""
ak0 = compute_ak(dims[:2],W_1,list_order='C')
ak1 = compute_ak(dims_rs[:2],W_rs,list_order='F')
ak2 = compute_ak(dims_cs[:2],W_cs,list_order='F')
ak3 = compute_ak(dims_rcs[:2],W_rcs,list_order='F')
ak0,ak1,ak2,ak3 = combine_4xd(dims,row_cut,col_cut,W_1,W_rs,W_cs,W_rcs)
#plt.imshow((ak0+ak1+ak2+ak3)[:15,:15])
for a_k in [ak0,ak1,ak2,ak3]:
plt.figure(figsize=(15,10))
plt.imshow((a_k)[:15,:15])
plt.colorbar()
plt.colorbar()
print((ak0+ak1+ak2+ak3).min())
np.argwhere((ak0+ak1+ak2+ak3)==0)
return
def test_off_grids(mov_nn, nblocks=[10,10]):
"""
Input:
------
Output:
------
"""
dims = mov_nn.shape
## denoiser 1
W_ = split_image_into_blocks(mov_nn,nblocks=nblocks)
dW_,rank_W_ = run_single(W_,debug=True)
del W_
dims_ = list(map(np.shape,dW_))
dW_ = combine_blocks(dims,
dW_,
list_order='C')
## denoiser 2
W_rs, drs = offset_tiling(mov_nn,
nblocks=nblocks,
offset_case='r')
dW_rs,rank_W_rs = run_single(W_rs,debug=True)
del W_rs
dims_rs = list(map(np.shape,dW_rs))
dW_rs = combine_blocks(drs,
dW_rs,
list_order='C')
# denoiser 3
W_cs, dcs = offset_tiling(mov_nn,
nblocks=nblocks,
offset_case='c')
dW_cs,rank_W_cs = run_single(W_cs,
debug=True)
del W_cs
dims_cs = list(map(np.shape,dW_cs))
dW_cs = combine_blocks(dcs,
dW_cs,
list_order='C')
# denoiser 4
W_rcs, drcs = offset_tiling(mov_nn,
nblocks=nblocks,
offset_case='rc')
dW_rcs,rank_W_rcs = run_single(W_rcs,debug=True)
del W_rcs
dims_rcs = list(map(np.shape,dW_rcs))
dW_rcs = combine_blocks(drcs,
dW_rcs,
list_order='C')
row_array,col_array = tile_grids(dims,
nblocks=nblocks)
r_offset = vector_offset(row_array)
c_offset = vector_offset(col_array)
r1, r2 = (row_array[1:]-r_offset)[[0,-1]]
c1, c2 = (col_array[1:]-c_offset)[[0,-1]]
print (np.array_equiv(mov_nn,dW_))
print (np.array_equiv(mov_nn[r1:r2,:,:],dW_rs))
print (np.array_equiv(mov_nn[:,c1:c2,:],dW_cs))
print (np.array_equiv(mov_nn[r1:r2,c1:c2,:],dW_rcs))
return
def test_running_times(W,nblocks=[4,30]):
"""
Input:
------
Output:
------
"""
dims = W.shape
assert dims[2] >6000
t_times = np.linspace(1000,7000,7).astype('int')
run_times2= np.zeros((7,))
for ii,ttime in enumerate(t_times):
start = time.time()
_ = denoise_dx_tiles(image_[:,:,:ttime],nblocks=nblocks,dx=1)
run_times2[ii]=time.time()-start
print('Run for %f'%(run_times[ii]))
plt.plot(t_times,run_times,'bo-')
plt.xlabel('Number of [%d, %d] frames'%(dims[0],dims[1]))
plt.ylabel('Run time [s]')
return run_times
| [
"numpy.prod",
"matplotlib.pyplot.ylabel",
"multiprocessing.Process",
"multiprocessing.cpu_count",
"numpy.array_split",
"numpy.array",
"numpy.arange",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"itertools.product",
"numpy.asarray",
"numpy.diff",
"numpy... | [((3485, 3510), 'numpy.append', 'np.append', (['d_row', 'dims[0]'], {}), '(d_row, dims[0])\n', (3494, 3510), True, 'import numpy as np\n'), ((3522, 3547), 'numpy.append', 'np.append', (['d_col', 'dims[1]'], {}), '(d_col, dims[1])\n', (3531, 3547), True, 'import numpy as np\n'), ((3782, 3813), 'numpy.zeros', 'np.zeros', (['(number_of_blocks, 2)'], {}), '((number_of_blocks, 2))\n', (3790, 3813), True, 'import numpy as np\n'), ((12782, 12800), 'numpy.zeros', 'np.zeros', (['dims[:2]'], {}), '(dims[:2])\n', (12790, 12800), True, 'import numpy as np\n'), ((12811, 12829), 'numpy.zeros', 'np.zeros', (['dims[:2]'], {}), '(dims[:2])\n', (12819, 12829), True, 'import numpy as np\n'), ((12840, 12858), 'numpy.zeros', 'np.zeros', (['dims[:2]'], {}), '(dims[:2])\n', (12848, 12858), True, 'import numpy as np\n'), ((13583, 13597), 'numpy.zeros', 'np.zeros', (['dims'], {}), '(dims)\n', (13591, 13597), True, 'import numpy as np\n'), ((13607, 13621), 'numpy.zeros', 'np.zeros', (['dims'], {}), '(dims)\n', (13615, 13621), True, 'import numpy as np\n'), ((13631, 13645), 'numpy.zeros', 'np.zeros', (['dims'], {}), '(dims)\n', (13639, 13645), True, 'import numpy as np\n'), ((15466, 15477), 'time.time', 'time.time', ([], {}), '()\n', (15475, 15477), False, 'import time\n'), ((19649, 19660), 'time.time', 'time.time', ([], {}), '()\n', (19658, 19660), False, 'import time\n'), ((22771, 22782), 'time.time', 'time.time', ([], {}), '()\n', (22780, 22782), False, 'import time\n'), ((22794, 22825), 'multiprocessing.Pool', 'multiprocessing.Pool', (['cpu_count'], {}), '(cpu_count)\n', (22814, 22825), False, 'import multiprocessing\n'), ((24184, 24202), 'numpy.zeros', 'np.zeros', (['dims[:2]'], {}), '(dims[:2])\n', (24192, 24202), True, 'import numpy as np\n'), ((29288, 29302), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (29300, 29302), True, 'import matplotlib.pyplot as plt\n'), ((29342, 29381), 'numpy.argwhere', 'np.argwhere', (['(ak0 + ak1 + ak2 + ak3 == 0)'], {}), '(ak0 + ak1 + ak2 + ak3 == 0)\n', (29353, 29381), True, 'import numpy as np\n'), ((31548, 31562), 'numpy.zeros', 'np.zeros', (['(7,)'], {}), '((7,))\n', (31556, 31562), True, 'import numpy as np\n'), ((31793, 31828), 'matplotlib.pyplot.plot', 'plt.plot', (['t_times', 'run_times', '"""bo-"""'], {}), "(t_times, run_times, 'bo-')\n", (31801, 31828), True, 'import matplotlib.pyplot as plt\n'), ((31831, 31891), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Number of [%d, %d] frames' % (dims[0], dims[1]))"], {}), "('Number of [%d, %d] frames' % (dims[0], dims[1]))\n", (31841, 31891), True, 'import matplotlib.pyplot as plt\n'), ((31893, 31919), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Run time [s]"""'], {}), "('Run time [s]')\n", (31903, 31919), True, 'import matplotlib.pyplot as plt\n'), ((1647, 1663), 'numpy.prod', 'np.prod', (['nblocks'], {}), '(nblocks)\n', (1654, 1663), True, 'import numpy as np\n'), ((1849, 1890), 'numpy.array_split', 'np.array_split', (['image', 'nblocks[0]'], {'axis': '(0)'}), '(image, nblocks[0], axis=0)\n', (1863, 1890), True, 'import numpy as np\n'), ((3369, 3391), 'numpy.insert', 'np.insert', (['d_row', '(0)', '(0)'], {}), '(d_row, 0, 0)\n', (3378, 3391), True, 'import numpy as np\n'), ((3406, 3428), 'numpy.insert', 'np.insert', (['d_col', '(0)', '(0)'], {}), '(d_col, 0, 0)\n', (3415, 3428), True, 'import numpy as np\n'), ((3567, 3589), 'numpy.insert', 'np.insert', (['d_row', '(0)', '(0)'], {}), '(d_row, 0, 0)\n', (3576, 3589), True, 'import numpy as np\n'), ((3609, 3631), 'numpy.insert', 'np.insert', (['d_col', '(0)', '(0)'], {}), '(d_col, 0, 0)\n', (3618, 3631), True, 'import numpy as np\n'), ((3842, 3863), 'itertools.product', 'product', (['d_row', 'd_col'], {}), '(d_row, d_col)\n', (3849, 3863), False, 'from itertools import product\n'), ((13906, 13934), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (13916, 13934), True, 'import matplotlib.pyplot as plt\n'), ((13942, 13983), 'matplotlib.pyplot.imshow', 'plt.imshow', (['(ak0 + ak1 + ak2 + ak3)[:, :]'], {}), '((ak0 + ak1 + ak2 + ak3)[:, :])\n', (13952, 13983), True, 'import matplotlib.pyplot as plt\n'), ((13985, 13999), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (13997, 13999), True, 'import matplotlib.pyplot as plt\n'), ((15606, 15617), 'time.time', 'time.time', ([], {}), '()\n', (15615, 15617), False, 'import time\n'), ((15633, 15664), 'multiprocessing.Pool', 'multiprocessing.Pool', (['cpu_count'], {}), '(cpu_count)\n', (15653, 15664), False, 'import multiprocessing\n'), ((19788, 19811), 'multiprocessing.Queue', 'multiprocessing.Queue', ([], {}), '()\n', (19809, 19811), False, 'import multiprocessing\n'), ((22964, 23200), 'functools.partial', 'partial', (['gpca.denoise_patch'], {'maxlag': 'maxlag', 'confidence': 'confidence', 'greedy': 'greedy', 'fudge_factor': 'fudge_factor', 'mean_th_factor': 'mean_th_factor', 'U_update': 'U_update', 'min_rank': 'min_rank', 'stim_knots': 'stim_knots', 'stim_delta': 'stim_delta'}), '(gpca.denoise_patch, maxlag=maxlag, confidence=confidence, greedy=\n greedy, fudge_factor=fudge_factor, mean_th_factor=mean_th_factor,\n U_update=U_update, min_rank=min_rank, stim_knots=stim_knots, stim_delta\n =stim_delta)\n', (22971, 23200), False, 'from functools import partial\n'), ((24216, 24233), 'math.ceil', 'ceil', (['(dims[0] / 2)'], {}), '(dims[0] / 2)\n', (24220, 24233), False, 'from math import ceil\n'), ((24232, 24249), 'math.ceil', 'ceil', (['(dims[1] / 2)'], {}), '(dims[1] / 2)\n', (24236, 24249), False, 'from math import ceil\n'), ((24656, 24684), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (24666, 24684), True, 'import matplotlib.pyplot as plt\n'), ((24692, 24707), 'matplotlib.pyplot.imshow', 'plt.imshow', (['a_k'], {}), '(a_k)\n', (24702, 24707), True, 'import matplotlib.pyplot as plt\n'), ((24794, 24808), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (24806, 24808), True, 'import matplotlib.pyplot as plt\n'), ((24817, 24827), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (24825, 24827), True, 'import matplotlib.pyplot as plt\n'), ((25834, 25862), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (25844, 25862), True, 'import matplotlib.pyplot as plt\n'), ((25870, 25885), 'matplotlib.pyplot.imshow', 'plt.imshow', (['a_k'], {}), '(a_k)\n', (25880, 25885), True, 'import matplotlib.pyplot as plt\n'), ((25894, 25908), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (25906, 25908), True, 'import matplotlib.pyplot as plt\n'), ((26008, 26028), 'numpy.zeros', 'np.zeros', ([], {'shape': 'dims'}), '(shape=dims)\n', (26016, 26028), True, 'import numpy as np\n'), ((29197, 29225), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 10)'}), '(figsize=(15, 10))\n', (29207, 29225), True, 'import matplotlib.pyplot as plt\n'), ((29233, 29258), 'matplotlib.pyplot.imshow', 'plt.imshow', (['a_k[:15, :15]'], {}), '(a_k[:15, :15])\n', (29243, 29258), True, 'import matplotlib.pyplot as plt\n'), ((29268, 29282), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (29280, 29282), True, 'import matplotlib.pyplot as plt\n'), ((31130, 31157), 'numpy.array_equiv', 'np.array_equiv', (['mov_nn', 'dW_'], {}), '(mov_nn, dW_)\n', (31144, 31157), True, 'import numpy as np\n'), ((31169, 31211), 'numpy.array_equiv', 'np.array_equiv', (['mov_nn[r1:r2, :, :]', 'dW_rs'], {}), '(mov_nn[r1:r2, :, :], dW_rs)\n', (31183, 31211), True, 'import numpy as np\n'), ((31221, 31263), 'numpy.array_equiv', 'np.array_equiv', (['mov_nn[:, c1:c2, :]', 'dW_cs'], {}), '(mov_nn[:, c1:c2, :], dW_cs)\n', (31235, 31263), True, 'import numpy as np\n'), ((31273, 31320), 'numpy.array_equiv', 'np.array_equiv', (['mov_nn[r1:r2, c1:c2, :]', 'dW_rcs'], {}), '(mov_nn[r1:r2, c1:c2, :], dW_rcs)\n', (31287, 31320), True, 'import numpy as np\n'), ((31620, 31631), 'time.time', 'time.time', ([], {}), '()\n', (31629, 31631), False, 'import time\n'), ((815, 829), 'numpy.zeros', 'np.zeros', (['(n,)'], {}), '((n,))\n', (823, 829), True, 'import numpy as np\n'), ((1951, 1990), 'numpy.array_split', 'np.array_split', (['row', 'nblocks[1]'], {'axis': '(1)'}), '(row, nblocks[1], axis=1)\n', (1965, 1990), True, 'import numpy as np\n'), ((13797, 13825), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (13807, 13825), True, 'import matplotlib.pyplot as plt\n'), ((13837, 13858), 'matplotlib.pyplot.imshow', 'plt.imshow', (['ak_[:, :]'], {}), '(ak_[:, :])\n', (13847, 13858), True, 'import matplotlib.pyplot as plt\n'), ((13870, 13880), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13878, 13880), True, 'import matplotlib.pyplot as plt\n'), ((15861, 16114), 'functools.partial', 'partial', (['gpca.denoise_patch'], {'confidence': 'confidence', 'fudge_factor': 'fudge_factor', 'greedy': 'greedy', 'maxlag': 'maxlag', 'mean_th': 'mean_th', 'mean_th_factor': 'mean_th_factor', 'min_rank': 'min_rank', 'snr_threshold': 'snr_threshold', 'U_update': 'U_update', 'verbose': 'verbose'}), '(gpca.denoise_patch, confidence=confidence, fudge_factor=\n fudge_factor, greedy=greedy, maxlag=maxlag, mean_th=mean_th,\n mean_th_factor=mean_th_factor, min_rank=min_rank, snr_threshold=\n snr_threshold, U_update=U_update, verbose=verbose)\n', (15868, 16114), False, 'from functools import partial\n'), ((17470, 17487), 'numpy.asarray', 'np.asarray', (['vtids'], {}), '(vtids)\n', (17480, 17487), True, 'import numpy as np\n'), ((19934, 20031), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'mp_worker', 'args': '(Y[chunksize * i:chunksize * (i + 1)], out_q)'}), '(target=mp_worker, args=(Y[chunksize * i:chunksize *\n (i + 1)], out_q))\n', (19957, 20031), False, 'import multiprocessing\n'), ((21670, 21687), 'numpy.asarray', 'np.asarray', (['vtids'], {}), '(vtids)\n', (21680, 21687), True, 'import numpy as np\n'), ((23721, 23738), 'numpy.asarray', 'np.asarray', (['vtids'], {}), '(vtids)\n', (23731, 23738), True, 'import numpy as np\n'), ((24727, 24745), 'numpy.arange', 'np.arange', (['dims[1]'], {}), '(dims[1])\n', (24736, 24745), True, 'import numpy as np\n'), ((24766, 24784), 'numpy.arange', 'np.arange', (['dims[0]'], {}), '(dims[0])\n', (24775, 24784), True, 'import numpy as np\n'), ((27606, 27633), 'numpy.zeros', 'np.zeros', ([], {'shape': '(d1, d2, T)'}), '(shape=(d1, d2, T))\n', (27614, 27633), True, 'import numpy as np\n'), ((31493, 31519), 'numpy.linspace', 'np.linspace', (['(1000)', '(7000)', '(7)'], {}), '(1000, 7000, 7)\n', (31504, 31519), True, 'import numpy as np\n'), ((31725, 31736), 'time.time', 'time.time', ([], {}), '()\n', (31734, 31736), False, 'import time\n'), ((15523, 15550), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (15548, 15550), False, 'import multiprocessing\n'), ((19741, 19768), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (19766, 19768), False, 'import multiprocessing\n'), ((21746, 21757), 'time.time', 'time.time', ([], {}), '()\n', (21755, 21757), False, 'import time\n'), ((23609, 23620), 'time.time', 'time.time', ([], {}), '()\n', (23618, 23620), False, 'import time\n'), ((27702, 27726), 'numpy.zeros', 'np.zeros', ([], {'shape': '(d1, d2)'}), '(shape=(d1, d2))\n', (27710, 27726), True, 'import numpy as np\n'), ((28095, 28107), 'numpy.isnan', 'np.isnan', (['Mn'], {}), '(Mn)\n', (28103, 28107), True, 'import numpy as np\n'), ((2053, 2068), 'numpy.array', 'np.array', (['block'], {}), '(block)\n', (2061, 2068), True, 'import numpy as np\n'), ((2494, 2508), 'numpy.diff', 'np.diff', (['array'], {}), '(array)\n', (2501, 2508), True, 'import numpy as np\n'), ((17548, 17559), 'time.time', 'time.time', ([], {}), '()\n', (17557, 17559), False, 'import time\n')] |
import wx
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.backends.backend_wxagg import \
FigureCanvasWxAgg as FigureCanvas, \
NavigationToolbar2WxAgg as NavigationToolbar
class Overview(wx.Panel):
def __init__(self, ParentFrame, Data):
# Create Data Frame window
wx.Panel.__init__(self, parent=ParentFrame, style=wx.SUNKEN_BORDER)
# Specify relevant variables
self.Data = Data
self.ParentFrame = ParentFrame
newFigure(self)
def update(self, Results):
if self.Data.Datasets == []:
newFigure(self)
else:
# Get relevant information
self.figure.clear()
# Create bad channel histogram
axes = self.figure.add_subplot(2, 1, 1)
axes.clear()
nChannels = np.arange(len(Results.badChannelsLabel))
axes.bar(nChannels, Results.distChannelBroken, 0.75, color='c',
label='Broken', alpha=0.5)
axes.bar(nChannels, Results.distChannelThreshold, 0.75, color='r',
label='Threshold', alpha=0.5)
axes.bar(nChannels, Results.distChannelSelected, 0.75,
color='#ff8c00', label='Broken', alpha=0.5)
distOutliersChannel = np.vstack(
[Results.distChannelThreshold,
Results.distChannelBroken,
Results.distChannelSelected]).sum(axis=0)
# Needed for verbose file
self.Data.Results.OoutlierChannels = int(sum(distOutliersChannel))
axes.title.set_text(
'Channel Overview - %s Epochs Total (%s Outliers)'
% (Results.markers.shape[0],
int(sum(distOutliersChannel))))
axes.grid(True, axis='y')
axes.set_ylabel('Epochs')
axes.set_xticks(nChannels + .75 / 2)
axes.set_xticklabels(Results.badChannelsLabel, rotation=90)
# Write percentage of outliers in channel overview plot
distOutliersChannel = 1. * \
distOutliersChannel / Results.markers.shape[0]
ticks = axes.get_xticks()
for i, d in enumerate(distOutliersChannel):
percentage = np.round(distOutliersChannel[i] * 100., 1)
if percentage != 0:
axes.text(
ticks[i], 0.8,
'{0}%'.format(str(percentage)),
horizontalalignment='center',
verticalalignment='bottom', rotation=90)
# Y-axis should only use integers
yticks = axes.get_yticks().astype('int')
axes.set_yticks(np.unique(yticks))
# Create bad marker histogram
axes = self.figure.add_subplot(2, 1, 2)
axes.clear()
nMarker = np.arange(Results.uniqueMarkers.shape[0])
axes.bar(nMarker, Results.distMarkerOK, 0.75, color='g',
label='OK', alpha=0.5)
axes.bar(
nMarker, Results.distMarkerSelected, 0.75, color='#ff8c00',
bottom=Results.distMarkerOK, label='Outliers', alpha=0.5)
axes.bar(nMarker, Results.distMarkerThreshold, 0.75, color='r',
bottom=np.sum(
np.vstack((Results.distMarkerOK,
Results.distMarkerSelected)), axis=0),
label='Threshold', alpha=0.5)
axes.bar(nMarker, Results.distMarkerBlink, 0.75, color='b',
bottom=np.sum(
np.vstack((Results.distMarkerOK,
Results.distMarkerSelected,
Results.distMarkerThreshold)), axis=0),
label='Blink', alpha=0.5)
axes.bar(nMarker, Results.distMarkerBroken, 0.75, color='c',
bottom=np.sum(
np.vstack((Results.distMarkerOK,
Results.distMarkerSelected,
Results.distMarkerThreshold,
Results.distMarkerBlink)), axis=0),
label='Broken', alpha=0.5)
percentageBad = 1 - float(
sum(Results.distMarkerOK)) / self.Data.Results.okID.shape[0]
nOutliers = int(
self.Data.Results.okID.shape[0] - sum(Results.distMarkerOK))
axes.title.set_text(
'Marker Overview - {0} Outliers [{1}%]'.format(
nOutliers, round(percentageBad * 100, 1)))
axes.grid(True, axis='y')
axes.set_ylabel('Epochs')
axes.set_xticks(nMarker + .75 / 2)
axes.set_xticklabels(Results.uniqueMarkers.astype('str'))
# Write percentage of outliers in marker overview plot
distOutliersMarker = np.vstack(
[Results.distMarkerThreshold,
Results.distMarkerBroken,
Results.distMarkerBlink,
Results.distMarkerSelected]).sum(axis=0)
distOutliersMarker = np.divide(
distOutliersMarker.astype('float'),
distOutliersMarker + Results.distMarkerOK)
ticks = axes.get_xticks()
for i, d in enumerate(distOutliersMarker):
percentage = 100 - np.round(distOutliersMarker[i] * 100., 1)
axes.text(
ticks[i], 0.8,
'{0}%'.format(str(percentage)),
horizontalalignment='center',
verticalalignment='bottom', rotation=90)
# Adjust and draw histograms
self.figure.tight_layout()
self.canvas.draw()
# Save distributions for latter access
dist = self.Data.Results
dist.OnSelectedOutliers = Results.nSelectedOutliers
dist.OdistChannelThreshold = Results.distChannelThreshold
dist.OBroken = len(Results.brokenID)
dist.OBlink = Results.matrixBlink.sum()
dist.OpercentageChannels = distOutliersChannel
dist.OxaxisChannel = self.Data.labelsChannel[Results.badChannelsID]
dist.OoutlierEpochs = nOutliers
dist.OdistMarkerOK = Results.distMarkerOK
dist.OdistMarkerThreshold = Results.distMarkerThreshold
dist.OdistMarkerBroken = Results.distMarkerBroken
dist.OdistMarkerBlink = Results.distMarkerBlink
dist.OdistMarkerSelected = Results.distMarkerSelected
dist.OpercentageMarker = distOutliersMarker
dist.OxaxisMarker = Results.uniqueMarkers.astype('str')
class GFPSummary(wx.Panel):
def __init__(self, ParentFrame, Data):
# Create Data Frame window
wx.Panel.__init__(self, parent=ParentFrame, style=wx.SUNKEN_BORDER)
# Specify relevant variables
self.Data = Data
self.ParentFrame = ParentFrame
newFigure(self, showGrid=True)
# Figure events
self.canvas.mpl_connect('button_press_event', self.gotoDetailGFP)
def update(self, Results):
if self.Data.Datasets == []:
newFigure(self, showGrid=True)
else:
self.figure.clear()
xaxis = getXaxis(Results)
avgGFP = np.array(
Results.avgGFP)[:, Results.preCut -
Results.preFrame:Results.preCut +
Results.postFrame]
# Which markers to show
markers2show = np.array(
[True if m not in self.Data.markers2hide else False
for m in Results.uniqueMarkers])
plt.plot(xaxis, np.transpose(avgGFP[markers2show]))
plt.xlabel('time [ms]')
plt.ylabel('GFP')
plt.title('GFP Overview')
plt.legend(Results.uniqueMarkers[markers2show])
self.figure.tight_layout()
plt.grid(self.CheckboxGrid.IsChecked())
self.canvas.draw()
def gotoDetailGFP(self, event):
ax = event.inaxes
if ax is None:
return
if event.button is 1:
if event.dblclick:
self.ParentFrame.SetSelection(2)
self.canvas.ReleaseMouse()
elif event.button is 3:
if event.dblclick:
if hasattr(self.Data.Results, 'collapsedMarkers'):
del self.Data.Results.collapsedMarkers
self.canvas.ReleaseMouse()
self.Data.markers2hide = []
self.Data.Results.updateEpochs(self.Data)
else:
self.canvas.ReleaseMouse()
def updateFigure(self, event):
if self.Data.Datasets != []:
self.Data.Overview.update(self)
self.update(self.Data.Results)
self.Data.ERPSummary.update([])
self.Data.EpochsDetail.update([])
event.Skip()
class GFPDetail(wx.Panel):
def __init__(self, ParentFrame, Data):
# Create Data Frame window
wx.Panel.__init__(self, parent=ParentFrame, style=wx.SUNKEN_BORDER)
# Specify relevant variables
self.Data = Data
self.ParentFrame = ParentFrame
newFigure(self, showGrid=True, showGFP=True, showGMD=True)
# Figure events
self.canvas.mpl_connect('button_press_event', self.zoomInDetailGFP)
def update(self, Results):
if self.Data.Datasets == []:
newFigure(self, showGrid=True, showGFP=True, showGMD=True)
else:
self.figure.clear()
xaxis = getXaxis(Results)
avgGFP = np.array(Results.avgGFP)[
:, Results.preCut - Results.preFrame:Results.preCut +
Results.postFrame]
avgGMD = np.array(Results.avgGMD)[
:, Results.preCut - Results.preFrame:Results.preCut +
Results.postFrame]
# Which markers to show
markers2show = np.array(
[True if m not in self.Data.markers2hide else False
for m in Results.uniqueMarkers])
avgGFP = avgGFP[markers2show]
avgGMD = avgGMD[markers2show]
results2show = [r for i, r in enumerate(Results.avgGFP)
if markers2show[i]]
shownMarkers = Results.uniqueMarkers[markers2show]
figureShape = findSquare(len(shownMarkers))
for i, g in enumerate(results2show):
axes = self.figure.add_subplot(figureShape[0],
figureShape[1],
i + 1)
if self.CheckboxGFP.IsChecked():
axes.plot(xaxis, avgGFP[i], 'b')
if self.CheckboxGMD.IsChecked():
axes.plot(xaxis, avgGMD[i], 'r')
nMarkers = Results.distMarkerOK[i]
axes.title.set_text(
'Marker: %s [N=%s]' % (shownMarkers[i], nMarkers))
axes.grid(self.CheckboxGrid.IsChecked())
self.figure.tight_layout()
self.canvas.draw()
def updateFigure(self, event):
if self.Data.Datasets != []:
self.update(self.Data.Results)
event.Skip()
def zoomInDetailGFP(self, event):
ax = event.inaxes
if ax is None:
return
if event.dblclick:
# On left click, zoom the selected axes
if event.button is 1:
subplotID = event.inaxes.get_subplotspec().num1
markerID = self.Data.Results.uniqueMarkers[subplotID]
self.Data.ERPSummary.update(markerID)
comboMarker = self.Data.EpochsDetail.ComboMarkers
selectionID = int(np.where(np.array(
comboMarker.GetItems()) == str(markerID))[0])
comboMarker.SetSelection(selectionID)
self.Data.EpochsDetail.update(markerID)
self.ParentFrame.SetSelection(3)
self.canvas.ReleaseMouse()
class ERPSummary(wx.Panel):
def __init__(self, ParentFrame, Data):
# Create Data Frame window
wx.Panel.__init__(self, parent=ParentFrame, style=wx.SUNKEN_BORDER)
# Specify relevant variables
self.Data = Data
newFigure(self, showSummaryEpochs=True)
# Figure events
self.canvas.callbacks.connect('pick_event', self.onPick)
def update(self, markerValue=[], shiftView=0):
self.figure.clear()
self.shiftView = shiftView
self.markerValue = markerValue
# Set correct markerList and selection
self.allMarker = np.unique([
m for m in self.Data.Results.markers
if m not in self.Data.markers2hide])
markerList = ['All '] + self.allMarker.astype('str').tolist()
self.ComboMarkers.SetItems(markerList)
if self.markerValue == []:
self.ComboMarkers.SetSelection(0)
else:
markerID = markerList.index(str(markerValue))
self.shiftView = markerID - 1
self.ComboMarkers.SetSelection(markerID)
self.ComboLayout.SetValue('1x1')
# Prepare Visualization
self.labelsChannel = self.Data.Datasets[0].labelsChannel
Results = self.Data.Results
samplingPoints = Results.epochs.shape[2]
preStimuli = 1000. / (float(Results.sampleRate) /
Results.preCut)
postStimuli = 1000. / (float(Results.sampleRate) /
Results.postCut)
xaxis = [int(float(i) * (preStimuli + postStimuli) /
samplingPoints - preStimuli)
for i in range(samplingPoints)]
# Get Visualization layout
layout = self.ComboLayout.GetValue()
vPlots = int(layout[-1])
hPlots = int(layout[0])
self.tiles = vPlots * hPlots
# Draw the average epochs
for k, i in enumerate(range(self.shiftView,
self.tiles + self.shiftView)):
axes = self.figure.add_subplot(vPlots, hPlots, k + 1)
if i < len(Results.avgEpochs):
markerID = self.allMarker[i]
epoch = Results.avgEpochs[i]
sizer = np.sqrt(
np.sum(np.ptp(epoch, axis=1) / epoch.shape[0])) * 2
modulator = float(self.ComboAmplitude.GetValue()[:-1])
sizer *= modulator / 100.
# Draw single channels
minmax = [0, 0]
for j, c in enumerate(epoch):
if self.ComboOverlay.GetValue() == 'Overlay':
delta = 0
else:
delta = j
color = 'gray'
lines = axes.plot(xaxis, c / sizer - delta, color,
picker=1)
ydata = lines[0].get_ydata()
lineMin = ydata.min()
lineMax = ydata.max()
if minmax[0] > lineMin:
minmax[0] = lineMin
if minmax[1] < lineMax:
minmax[1] = lineMax
delta = np.abs(minmax).sum() * .01
minmax = [minmax[0] - delta, minmax[1] + delta]
axes.set_ylim(minmax)
axes.get_yaxis().set_visible(False)
axes.title.set_text('Marker %s' % markerID)
axes.vlines(0, minmax[0], minmax[1], linestyles='dotted')
currentPage = (self.shiftView / self.tiles) + 1
totalPage = (len(Results.avgEpochs) - 1) / self.tiles + 1
if totalPage == 0:
currentPage = 0
self.TextPages.SetLabel('Page: %s/%s ' % (currentPage, totalPage))
self.figure.tight_layout()
self.canvas.draw()
def onPick(self, event):
# Only do something if left double click
if event.mouseevent.dblclick and event.mouseevent.button == 1:
# Print Line name and color it black if requested
if event.artist.get_picker() == 1:
event.artist.set_color('black')
linenumber = int(event.artist.get_label()[5:])
xValue = 1000. * self.Data.Results.postCut / \
self.Data.Results.sampleRate + 1
yValue = event.artist.get_data()[1][-1]
event.artist.axes.text(xValue, yValue,
self.labelsChannel[linenumber],
color='black')
self.canvas.draw()
if event.mouseevent.name == 'button_press_event':
self.canvas.ReleaseMouse()
def updateLayout(self, event):
if hasattr(self, 'markerValue'):
self.update([])
self.ComboMarkers.SetSelection(0)
event.Skip()
def updateSize(self, event):
if hasattr(self, 'markerValue'):
self.update(self.markerValue, self.shiftView)
event.Skip()
def updateFigure(self, event):
if self.Data.Datasets != []:
markerList = self.ComboMarkers.GetItems()
marker = markerList[self.ComboMarkers.GetSelection()]
if 'All' in marker:
markerValue = []
else:
markerValue = str(marker)
self.update(markerValue)
event.Skip()
def shiftViewLeft(self, event):
if self.Data.Datasets != []:
if self.shiftView != 0:
viewShift = self.shiftView - self.tiles
if viewShift < 0:
viewShift = 0
markerList = self.ComboMarkers.GetItems()
if self.markerValue == []:
marker = []
else:
markerID = markerList.index(str(self.markerValue)) - 1
if markerID < 1:
marker = []
else:
marker = markerList[markerID]
self.update(marker, viewShift)
event.Skip()
def shiftViewRight(self, event):
if self.Data.Datasets != []:
if self.shiftView + self.tiles \
< len(self.allMarker):
viewShift = self.shiftView + self.tiles
markerList = self.ComboMarkers.GetItems()
if self.markerValue == []:
marker = []
else:
markerID = markerList.index(str(self.markerValue)) + 1
if markerID >= len(markerList):
markerID = len(markerList) - 1
marker = markerList[markerID]
self.update(marker, viewShift)
event.Skip()
def updateOverlay(self, event):
if hasattr(self, 'markerValue'):
self.update(self.markerValue, self.shiftView)
event.Skip()
class EpochsDetail(wx.Panel):
def __init__(self, ParentFrame, Data):
# Create Data Frame window
wx.Panel.__init__(self, parent=ParentFrame, style=wx.SUNKEN_BORDER)
# Specify relevant variables
self.Data = Data
newFigure(self, showDetailEpochs=True)
# Figure events
self.canvas.callbacks.connect('pick_event', self.onPick)
# To record button presses
self.canvas.Bind(wx.EVT_CHAR, self.keyDown)
def keyDown(self, event):
"""Interact with the figure canvas if keyboard is used"""
if self.Data.Datasets != []:
key = event.KeyCode
if key == 49:
self.keySelect(1)
elif key == 50:
self.keySelect(2)
elif key == 51:
self.keySelect(3)
elif key == 52:
self.keySelect(4)
elif key == 53:
self.keySelect(5)
elif key == 54:
self.keySelect(6)
# Shift view
elif key == 113:
# Key: 'Q'
self.shiftViewLeft(event)
elif key == 101:
# Key: 'E'
self.shiftViewRight(event)
event.Skip()
def keySelect(self, figID):
subPlots = self.canvas.figure.get_axes()
if figID <= len(subPlots):
selectedID = subPlots[figID - 1].get_title()
children = subPlots[figID - 1].get_children()
textChildren = np.array(
[[i, i.get_text()]
for i in children
if 'matplotlib.text.Text' in str(type(i))])
titleObject = textChildren[
np.where(textChildren[:, 1] == selectedID)[0]][0][0]
selectedID = int(selectedID[selectedID.find('Epoch') + 6:]) - 1
selectedType = self.Data.Results.matrixSelected[selectedID]
# If Epoch is already selected as an outlier
if selectedType in ['selected', 'threshold', 'blink']:
color = 'black'
titleObject.set_fontweight('normal')
if self.ComboOutliers.GetSelection() <= 2:
self.shiftView -= 1
if selectedType == 'selected':
self.Data.Results.matrixSelected[
selectedID] = 'ok_normal'
elif selectedType == 'threshold':
self.Data.Results.matrixSelected[
selectedID] = 'ok_thresh'
elif selectedType == 'blink':
self.Data.Results.matrixSelected[
selectedID] = 'ok_blink'
else:
titleObject.set_fontweight('bold')
if self.ComboOutliers.GetSelection() <= 2:
self.shiftView += 1
if selectedType == 'ok_normal':
color = '#ff8c00'
self.Data.Results.matrixSelected[
selectedID] = 'selected'
elif selectedType == 'ok_thresh':
color = 'r'
self.Data.Results.matrixSelected[
selectedID] = 'threshold'
elif selectedType == 'ok_blink':
color = 'b'
self.Data.Results.matrixSelected[
selectedID] = 'blink'
titleObject.set_color(color)
for ax in titleObject.axes.spines:
titleObject.axes.spines[ax].set_color(color)
self.Data.Results.updateAnalysis = True
self.canvas.draw()
def update(self, markerValue=[], shiftView=0):
self.figure.clear()
self.shiftView = shiftView
self.markerValue = markerValue
markerList = [
m for m in self.Data.Results.markers
if m not in self.Data.markers2hide]
markerList = ['All '] + np.unique(
markerList).astype('str').tolist()
self.ComboMarkers.SetItems(markerList)
if self.markerValue == []:
self.id2Show = []
else:
self.id2Show = np.where(
self.Data.Results.markers == self.markerValue)[0]
if self.markerValue == []:
self.ComboMarkers.SetSelection(0)
self.id2Show = np.arange(self.Data.Results.markers.shape[0])
# Only show unhidden markers
self.id2Show = np.array(
[i for i, m in enumerate(self.Data.Results.markers)
if m not in self.Data.markers2hide and i in self.id2Show])
if self.ComboOutliers.GetSelection() == 0:
restrictedList = [
i for i, m in enumerate(self.Data.Results.matrixSelected)
if 'ok_' not in m]
self.id2Show = [r for r in restrictedList if r in self.id2Show]
elif self.ComboOutliers.GetSelection() == 1:
restrictedList = [
i for i, m in enumerate(self.Data.Results.matrixSelected)
if m == 'threshold']
self.id2Show = [r for r in restrictedList if r in self.id2Show]
elif self.ComboOutliers.GetSelection() == 2:
restrictedList = [
i for i, m in enumerate(self.Data.Results.matrixSelected)
if m == 'blink']
self.id2Show = [r for r in restrictedList if r in self.id2Show]
elif self.ComboOutliers.GetSelection() == 3:
restrictedList = [
i for i, m in enumerate(self.Data.Results.matrixSelected)
if 'ok_' in m]
self.id2Show = [r for r in restrictedList if r in self.id2Show]
self.labelsChannel = self.Data.Datasets[0].labelsChannel
Results = self.Data.Results
samplingPoints = Results.epochs.shape[2]
preStimuli = 1000. / (float(Results.sampleRate) /
Results.preCut)
postStimuli = 1000. / (float(Results.sampleRate) /
Results.postCut)
xaxis = [int(float(i) * (preStimuli + postStimuli) /
samplingPoints - preStimuli)
for i in range(samplingPoints)]
# Get Visualization layout
layout = self.ComboLayout.GetValue()
vPlots = int(layout[-1])
hPlots = int(layout[0])
self.tiles = vPlots * hPlots
# Draw the epochs
for k, i in enumerate(range(shiftView, self.tiles + shiftView)):
axes = self.figure.add_subplot(vPlots, hPlots, k + 1)
if i < len(self.id2Show):
epochID = self.id2Show[i]
markerID = self.Data.Results.markers[epochID]
epoch = self.Data.Results.epochs[epochID]
idSelected = self.Data.Results.matrixSelected[epochID]
sizer = np.sqrt(
np.sum(np.ptp(epoch, axis=1) / epoch.shape[0])) * 4
modulator = float(self.ComboAmplitude.GetValue()[:-1])
sizer *= modulator / 100.
# Check if the epoch is broken
isBroken = epochID in Results.brokenID
# Draw single channels
minmax = [0, 0]
for j, c in enumerate(epoch):
if self.ComboOverlay.GetValue() == 'Overlay':
delta = 0
else:
delta = j
if isBroken:
color = 'c'
axes.title.set_fontweight('bold')
axes.title.set_color(color)
for ax in axes.spines:
axes.spines[ax].set_color(color)
elif Results.matrixThreshold[epochID][j]:
color = 'r'
axes.text(postStimuli + 1, c[-1] / sizer - delta,
self.labelsChannel[j], color=color)
axes.title.set_fontweight('bold')
axes.title.set_color(color)
for ax in axes.spines:
axes.spines[ax].set_color(color)
else:
color = 'gray'
lines = axes.plot(xaxis, c / sizer - delta, color,
picker=1)
ydata = lines[0].get_ydata()
lineMin = ydata.min()
lineMax = ydata.max()
if minmax[0] > lineMin:
minmax[0] = lineMin
if minmax[1] < lineMax:
minmax[1] = lineMax
# Draw blink periods in figure
if Results.matrixBlink[epochID].sum() != 0:
blinkEpoch = np.append(Results.matrixBlink[epochID], False)
blinkPhase = np.where(blinkEpoch[:-1] != blinkEpoch[1:])[0]
color = 'm'
for i in range(blinkPhase.shape[0] / 2):
axes.axvspan(xaxis[blinkPhase[2 * i]],
xaxis[blinkPhase[2 * i + 1]],
facecolor=color, alpha=0.2)
stimuliSegment = Results.matrixBlink[
epochID, Results.preCut -
Results.preFrame:Results.preCut + Results.postFrame]
if stimuliSegment.sum() != 0:
axes.title.set_fontweight('bold')
axes.title.set_color(color)
for ax in axes.spines:
axes.spines[ax].set_color(color)
delta = np.abs(minmax).sum() * .01
minmax = [minmax[0] - delta, minmax[1] + delta]
axes.set_ylim(minmax)
axes.get_yaxis().set_visible(False)
axes.title.set_text('Marker %s - Epoch %s' % (markerID,
epochID + 1))
axes.title.set_picker(5)
axes.vlines(0, minmax[0], minmax[1], linestyles='dotted')
if idSelected == 'selected':
color = '#ff8c00'
axes.title.set_fontweight('bold')
axes.title.set_color(color)
for ax in axes.spines:
axes.spines[ax].set_color(color)
elif 'ok_' in idSelected:
color = 'k'
axes.title.set_fontweight('normal')
axes.title.set_color(color)
for ax in axes.spines:
axes.spines[ax].set_color(color)
currentPage = (self.shiftView / self.tiles) + 1
totalPage = (len(self.id2Show) - 1) / self.tiles + 1
if totalPage == 0:
currentPage = 0
self.TextPages.SetLabel('Page: %s/%s ' % (currentPage, totalPage))
self.figure.tight_layout()
self.canvas.draw()
def updateFigure(self, event):
if self.Data.Datasets != []:
markerSelection = self.ComboMarkers.GetSelection()
if markerSelection == 0:
self.markerValue = []
else:
self.markerValue = str(self.ComboMarkers.GetValue())
self.update(self.markerValue)
event.Skip()
def shiftViewLeft(self, event):
if self.Data.Datasets != []:
if self.shiftView != 0:
viewShift = self.shiftView - self.tiles
if viewShift < 0:
viewShift = 0
self.update(self.markerValue, viewShift)
event.Skip()
def shiftViewRight(self, event):
if self.Data.Datasets != []:
if self.shiftView + self.tiles \
< len(self.id2Show):
viewShift = self.shiftView + self.tiles
self.update(self.markerValue, viewShift)
event.Skip()
def onPick(self, event):
# Only do something if left double click
if event.mouseevent.dblclick and event.mouseevent.button == 1:
# Print Line name and color it black if requested
if event.artist.get_picker() == 1:
event.artist.set_color('black')
linenumber = int(event.artist.get_label()[5:])
xValue = 1000. * self.Data.Results.postCut / \
self.Data.Results.sampleRate + 1
yValue = event.artist.get_data()[1][-1]
event.artist.axes.text(xValue, yValue,
self.labelsChannel[linenumber],
color='black')
# Select or Deselect an Epoch as an Outlier
elif event.artist.get_picker() == 5:
selectedID = event.artist.get_text()
selectedID = int(selectedID[selectedID.find('Epoch') + 6:]) - 1
selectedType = self.Data.Results.matrixSelected[selectedID]
# If Epoch is already selected as an outlier
if selectedType in ['selected', 'threshold', 'blink']:
color = 'black'
event.artist.set_fontweight('normal')
if self.ComboOutliers.GetSelection() <= 2:
self.shiftView -= 1
if selectedType == 'selected':
self.Data.Results.matrixSelected[
selectedID] = 'ok_normal'
elif selectedType == 'threshold':
self.Data.Results.matrixSelected[
selectedID] = 'ok_thresh'
elif selectedType == 'blink':
self.Data.Results.matrixSelected[
selectedID] = 'ok_blink'
else:
event.artist.set_fontweight('bold')
if self.ComboOutliers.GetSelection() <= 2:
self.shiftView += 1
if selectedType == 'ok_normal':
color = '#ff8c00'
self.Data.Results.matrixSelected[
selectedID] = 'selected'
elif selectedType == 'ok_thresh':
color = 'r'
self.Data.Results.matrixSelected[
selectedID] = 'threshold'
elif selectedType == 'ok_blink':
color = 'b'
self.Data.Results.matrixSelected[
selectedID] = 'blink'
event.artist.set_color(color)
for ax in event.artist.axes.spines:
event.artist.axes.spines[ax].set_color(color)
self.Data.Results.updateAnalysis = True
self.canvas.draw()
if event.mouseevent.name == 'button_press_event':
self.canvas.ReleaseMouse()
def updateLayout(self, event):
if hasattr(self, 'markerValue'):
self.update(self.markerValue)
event.Skip()
def updateSize(self, event):
if hasattr(self, 'markerValue'):
self.update(self.markerValue, self.shiftView)
event.Skip()
def updateOverlay(self, event):
if hasattr(self, 'markerValue'):
self.update(self.markerValue, self.shiftView)
event.Skip()
def newFigure(self, showGrid=False, showGFP=False, showGMD=False,
showDetailEpochs=False, showSummaryEpochs=False):
self.figure = plt.figure(facecolor=(0.95, 0.95, 0.95))
self.canvas = FigureCanvas(self, wx.ID_ANY, self.figure)
self.toolbar = NavigationToolbar(self.canvas)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
self.hbox = wx.BoxSizer(wx.HORIZONTAL)
flags = wx.ALIGN_LEFT | wx.ALL | wx.ALIGN_CENTER_VERTICAL
if showGrid:
self.CheckboxGrid = wx.CheckBox(self, wx.ID_ANY, 'Show Grid')
self.CheckboxGrid.SetValue(True)
self.hbox.Add(self.CheckboxGrid, 0, border=3, flag=flags)
wx.EVT_CHECKBOX(
self.CheckboxGrid, self.CheckboxGrid.Id, self.updateFigure)
if showGFP:
self.CheckboxGFP = wx.CheckBox(self, wx.ID_ANY, 'Show GFP')
self.CheckboxGFP.SetValue(True)
self.hbox.Add(self.CheckboxGFP, 0, border=3, flag=flags)
wx.EVT_CHECKBOX(
self.CheckboxGFP, self.CheckboxGFP.Id, self.updateFigure)
if showGMD:
self.CheckboxGMD = wx.CheckBox(self, wx.ID_ANY, 'Show GMD')
self.CheckboxGMD.SetValue(True)
self.hbox.Add(self.CheckboxGMD, 0, border=3, flag=flags)
wx.EVT_CHECKBOX(
self.CheckboxGMD, self.CheckboxGMD.Id, self.updateFigure)
if showDetailEpochs:
self.TextLayout = wx.StaticText(self, wx.ID_ANY, label='Layout:')
self.ComboLayout = wx.ComboBox(self, style=wx.CB_READONLY,
choices=['1x1', '1x2', '2x2', '2x3'])
self.ComboLayout.SetSelection(2)
wx.EVT_COMBOBOX(self.ComboLayout, self.ComboLayout.Id,
self.updateLayout)
self.hbox.Add(self.TextLayout, 0, border=3, flag=flags)
self.hbox.Add(self.ComboLayout, 0, border=3, flag=flags)
self.TextSizer = wx.StaticText(self, wx.ID_ANY, label='Amplitude:')
self.ComboAmplitude = wx.ComboBox(
self, style=wx.CB_READONLY,
choices=['750%', '500%', '400%', '300%', '200%', '150%',
'125%', '100%', '90%', '80%', '70%', '60%',
'50%', '40%', '30%', '20%', '10%', '5%'])
self.ComboAmplitude.SetSelection(7)
wx.EVT_COMBOBOX(self.ComboAmplitude, self.ComboAmplitude.Id,
self.updateSize)
self.hbox.Add(self.TextSizer, 0, border=3, flag=flags)
self.hbox.Add(self.ComboAmplitude, 0, border=3, flag=flags)
self.TextMarker = wx.StaticText(self, wx.ID_ANY, label='Marker:')
self.hbox.Add(self.TextMarker, 0, border=3, flag=flags)
self.ComboMarkers = wx.ComboBox(self, style=wx.CB_READONLY,
choices=['All '])
self.ComboMarkers.SetSelection(0)
wx.EVT_COMBOBOX(self.ComboMarkers, self.ComboMarkers.Id,
self.updateFigure)
self.hbox.Add(self.ComboMarkers, 0, border=3, flag=flags)
self.ComboOutliers = wx.ComboBox(
self, style=wx.CB_READONLY,
choices=['Outliers', 'Threshold', 'Blink', 'Accepted', 'All'])
self.ComboOutliers.SetSelection(0)
wx.EVT_COMBOBOX(self.ComboOutliers, self.ComboOutliers.Id,
self.updateFigure)
self.hbox.Add(self.ComboOutliers, 0, border=3, flag=flags)
self.TextPages = wx.StaticText(self, wx.ID_ANY, label='Page: 0/0 ')
self.hbox.Add(self.TextPages, 0, border=3, flag=flags)
self.goLeftButton = wx.Button(self, wx.ID_ANY, "<<", size=(35, 30))
self.goRightButton = wx.Button(self, wx.ID_ANY, ">>", size=(35, 30))
wx.EVT_BUTTON(self.goLeftButton, self.goLeftButton.Id,
self.shiftViewLeft)
wx.EVT_BUTTON(self.goRightButton, self.goRightButton.Id,
self.shiftViewRight)
self.hbox.Add(self.goLeftButton, 0, border=3, flag=flags)
self.hbox.Add(self.goRightButton, 0, border=3, flag=flags)
if showSummaryEpochs:
self.TextLayout = wx.StaticText(self, wx.ID_ANY, label='Layout:')
self.ComboLayout = wx.ComboBox(self, style=wx.CB_READONLY,
choices=['1x1', '1x2', '2x2', '2x3'])
self.ComboLayout.SetSelection(2)
wx.EVT_COMBOBOX(self.ComboLayout, self.ComboLayout.Id,
self.updateLayout)
self.hbox.Add(self.TextLayout, 0, border=3, flag=flags)
self.hbox.Add(self.ComboLayout, 0, border=3, flag=flags)
self.TextSizer = wx.StaticText(self, wx.ID_ANY, label='Amplitude:')
self.ComboAmplitude = wx.ComboBox(
self, style=wx.CB_READONLY,
choices=['750%', '500%', '400%', '300%', '200%', '150%',
'125%', '100%', '90%', '80%', '70%', '60%',
'50%', '40%', '30%', '20%', '10%', '5%'])
self.ComboAmplitude.SetSelection(7)
wx.EVT_COMBOBOX(self.ComboAmplitude, self.ComboAmplitude.Id,
self.updateSize)
self.hbox.Add(self.TextSizer, 0, border=3, flag=flags)
self.hbox.Add(self.ComboAmplitude, 0, border=3, flag=flags)
self.TextMarker = wx.StaticText(self, wx.ID_ANY, label='Marker:')
self.hbox.Add(self.TextMarker, 0, border=3, flag=flags)
self.ComboMarkers = wx.ComboBox(self, style=wx.CB_READONLY,
choices=['All '])
self.ComboMarkers.SetSelection(0)
wx.EVT_COMBOBOX(self.ComboMarkers, self.ComboMarkers.Id,
self.updateFigure)
self.hbox.Add(self.ComboMarkers, 0, border=3, flag=flags)
self.TextPages = wx.StaticText(self, wx.ID_ANY, label='Page: 0/0 ')
self.hbox.Add(self.TextPages, 0, border=3, flag=flags)
self.goLeftButton = wx.Button(self, wx.ID_ANY, "<<", size=(35, 30))
self.goRightButton = wx.Button(self, wx.ID_ANY, ">>", size=(35, 30))
wx.EVT_BUTTON(self.goLeftButton, self.goLeftButton.Id,
self.shiftViewLeft)
wx.EVT_BUTTON(self.goRightButton, self.goRightButton.Id,
self.shiftViewRight)
self.hbox.Add(self.goLeftButton, 0, border=3, flag=flags)
self.hbox.Add(self.goRightButton, 0, border=3, flag=flags)
if showDetailEpochs or showSummaryEpochs:
self.ComboOverlay = wx.ComboBox(
self, style=wx.CB_READONLY,
choices=['Spread', 'Overlay'])
self.ComboOverlay.SetSelection(0)
wx.EVT_COMBOBOX(self.ComboOverlay, self.ComboOverlay.Id,
self.updateOverlay)
self.hbox.Add(self.ComboOverlay, 0, border=3, flag=flags)
self.sizer.Add(self.hbox, 0, flag=wx.ALIGN_LEFT | wx.TOP)
self.sizer.Add(self.toolbar, 0, wx.EXPAND)
self.SetSizer(self.sizer)
self.Fit()
def findSquare(number):
if number == 0:
return 1, 1
else:
s1 = int(np.round(np.sqrt(number)))
s2 = int(np.ceil(float(number) / s1))
return s1, s2
def getXaxis(Results):
stepSize = (Results.preEpoch + Results.postEpoch) / \
(Results.preFrame + Results.postFrame)
xaxis = [int(i * stepSize - Results.preEpoch)
for i in range(Results.preFrame + Results.postFrame)]
return xaxis
| [
"matplotlib.backends.backend_wxagg.FigureCanvasWxAgg",
"numpy.ptp",
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"numpy.array",
"wx.EVT_CHECKBOX",
"wx.Panel.__init__",
"numpy.arange",
"numpy.where",
"matplotlib.pyplot.xlabel",
"wx.CheckBox",
"numpy.vstack",
"numpy.round",
"wx.EVT_COMBOBOX",
... | [((35587, 35627), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'facecolor': '(0.95, 0.95, 0.95)'}), '(facecolor=(0.95, 0.95, 0.95))\n', (35597, 35627), True, 'from matplotlib import pyplot as plt\n'), ((35647, 35689), 'matplotlib.backends.backend_wxagg.FigureCanvasWxAgg', 'FigureCanvas', (['self', 'wx.ID_ANY', 'self.figure'], {}), '(self, wx.ID_ANY, self.figure)\n', (35659, 35689), True, 'from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas, NavigationToolbar2WxAgg as NavigationToolbar\n'), ((35710, 35740), 'matplotlib.backends.backend_wxagg.NavigationToolbar2WxAgg', 'NavigationToolbar', (['self.canvas'], {}), '(self.canvas)\n', (35727, 35740), True, 'from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas, NavigationToolbar2WxAgg as NavigationToolbar\n'), ((35761, 35785), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.VERTICAL'], {}), '(wx.VERTICAL)\n', (35772, 35785), False, 'import wx\n'), ((35869, 35895), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.HORIZONTAL'], {}), '(wx.HORIZONTAL)\n', (35880, 35895), False, 'import wx\n'), ((333, 400), 'wx.Panel.__init__', 'wx.Panel.__init__', (['self'], {'parent': 'ParentFrame', 'style': 'wx.SUNKEN_BORDER'}), '(self, parent=ParentFrame, style=wx.SUNKEN_BORDER)\n', (350, 400), False, 'import wx\n'), ((7043, 7110), 'wx.Panel.__init__', 'wx.Panel.__init__', (['self'], {'parent': 'ParentFrame', 'style': 'wx.SUNKEN_BORDER'}), '(self, parent=ParentFrame, style=wx.SUNKEN_BORDER)\n', (7060, 7110), False, 'import wx\n'), ((9390, 9457), 'wx.Panel.__init__', 'wx.Panel.__init__', (['self'], {'parent': 'ParentFrame', 'style': 'wx.SUNKEN_BORDER'}), '(self, parent=ParentFrame, style=wx.SUNKEN_BORDER)\n', (9407, 9457), False, 'import wx\n'), ((12621, 12688), 'wx.Panel.__init__', 'wx.Panel.__init__', (['self'], {'parent': 'ParentFrame', 'style': 'wx.SUNKEN_BORDER'}), '(self, parent=ParentFrame, style=wx.SUNKEN_BORDER)\n', (12638, 12688), False, 'import wx\n'), ((13132, 13221), 'numpy.unique', 'np.unique', (['[m for m in self.Data.Results.markers if m not in self.Data.markers2hide]'], {}), '([m for m in self.Data.Results.markers if m not in self.Data.\n markers2hide])\n', (13141, 13221), True, 'import numpy as np\n'), ((19730, 19797), 'wx.Panel.__init__', 'wx.Panel.__init__', (['self'], {'parent': 'ParentFrame', 'style': 'wx.SUNKEN_BORDER'}), '(self, parent=ParentFrame, style=wx.SUNKEN_BORDER)\n', (19747, 19797), False, 'import wx\n'), ((36008, 36049), 'wx.CheckBox', 'wx.CheckBox', (['self', 'wx.ID_ANY', '"""Show Grid"""'], {}), "(self, wx.ID_ANY, 'Show Grid')\n", (36019, 36049), False, 'import wx\n'), ((36168, 36243), 'wx.EVT_CHECKBOX', 'wx.EVT_CHECKBOX', (['self.CheckboxGrid', 'self.CheckboxGrid.Id', 'self.updateFigure'], {}), '(self.CheckboxGrid, self.CheckboxGrid.Id, self.updateFigure)\n', (36183, 36243), False, 'import wx\n'), ((36305, 36345), 'wx.CheckBox', 'wx.CheckBox', (['self', 'wx.ID_ANY', '"""Show GFP"""'], {}), "(self, wx.ID_ANY, 'Show GFP')\n", (36316, 36345), False, 'import wx\n'), ((36462, 36535), 'wx.EVT_CHECKBOX', 'wx.EVT_CHECKBOX', (['self.CheckboxGFP', 'self.CheckboxGFP.Id', 'self.updateFigure'], {}), '(self.CheckboxGFP, self.CheckboxGFP.Id, self.updateFigure)\n', (36477, 36535), False, 'import wx\n'), ((36597, 36637), 'wx.CheckBox', 'wx.CheckBox', (['self', 'wx.ID_ANY', '"""Show GMD"""'], {}), "(self, wx.ID_ANY, 'Show GMD')\n", (36608, 36637), False, 'import wx\n'), ((36754, 36827), 'wx.EVT_CHECKBOX', 'wx.EVT_CHECKBOX', (['self.CheckboxGMD', 'self.CheckboxGMD.Id', 'self.updateFigure'], {}), '(self.CheckboxGMD, self.CheckboxGMD.Id, self.updateFigure)\n', (36769, 36827), False, 'import wx\n'), ((36897, 36944), 'wx.StaticText', 'wx.StaticText', (['self', 'wx.ID_ANY'], {'label': '"""Layout:"""'}), "(self, wx.ID_ANY, label='Layout:')\n", (36910, 36944), False, 'import wx\n'), ((36973, 37050), 'wx.ComboBox', 'wx.ComboBox', (['self'], {'style': 'wx.CB_READONLY', 'choices': "['1x1', '1x2', '2x2', '2x3']"}), "(self, style=wx.CB_READONLY, choices=['1x1', '1x2', '2x2', '2x3'])\n", (36984, 37050), False, 'import wx\n'), ((37142, 37215), 'wx.EVT_COMBOBOX', 'wx.EVT_COMBOBOX', (['self.ComboLayout', 'self.ComboLayout.Id', 'self.updateLayout'], {}), '(self.ComboLayout, self.ComboLayout.Id, self.updateLayout)\n', (37157, 37215), False, 'import wx\n'), ((37400, 37450), 'wx.StaticText', 'wx.StaticText', (['self', 'wx.ID_ANY'], {'label': '"""Amplitude:"""'}), "(self, wx.ID_ANY, label='Amplitude:')\n", (37413, 37450), False, 'import wx\n'), ((37482, 37672), 'wx.ComboBox', 'wx.ComboBox', (['self'], {'style': 'wx.CB_READONLY', 'choices': "['750%', '500%', '400%', '300%', '200%', '150%', '125%', '100%', '90%',\n '80%', '70%', '60%', '50%', '40%', '30%', '20%', '10%', '5%']"}), "(self, style=wx.CB_READONLY, choices=['750%', '500%', '400%',\n '300%', '200%', '150%', '125%', '100%', '90%', '80%', '70%', '60%',\n '50%', '40%', '30%', '20%', '10%', '5%'])\n", (37493, 37672), False, 'import wx\n'), ((37790, 37867), 'wx.EVT_COMBOBOX', 'wx.EVT_COMBOBOX', (['self.ComboAmplitude', 'self.ComboAmplitude.Id', 'self.updateSize'], {}), '(self.ComboAmplitude, self.ComboAmplitude.Id, self.updateSize)\n', (37805, 37867), False, 'import wx\n'), ((38055, 38102), 'wx.StaticText', 'wx.StaticText', (['self', 'wx.ID_ANY'], {'label': '"""Marker:"""'}), "(self, wx.ID_ANY, label='Marker:')\n", (38068, 38102), False, 'import wx\n'), ((38199, 38258), 'wx.ComboBox', 'wx.ComboBox', (['self'], {'style': 'wx.CB_READONLY', 'choices': "['All ']"}), "(self, style=wx.CB_READONLY, choices=['All '])\n", (38210, 38258), False, 'import wx\n'), ((38352, 38427), 'wx.EVT_COMBOBOX', 'wx.EVT_COMBOBOX', (['self.ComboMarkers', 'self.ComboMarkers.Id', 'self.updateFigure'], {}), '(self.ComboMarkers, self.ComboMarkers.Id, self.updateFigure)\n', (38367, 38427), False, 'import wx\n'), ((38552, 38658), 'wx.ComboBox', 'wx.ComboBox', (['self'], {'style': 'wx.CB_READONLY', 'choices': "['Outliers', 'Threshold', 'Blink', 'Accepted', 'All']"}), "(self, style=wx.CB_READONLY, choices=['Outliers', 'Threshold',\n 'Blink', 'Accepted', 'All'])\n", (38563, 38658), False, 'import wx\n'), ((38735, 38812), 'wx.EVT_COMBOBOX', 'wx.EVT_COMBOBOX', (['self.ComboOutliers', 'self.ComboOutliers.Id', 'self.updateFigure'], {}), '(self.ComboOutliers, self.ComboOutliers.Id, self.updateFigure)\n', (38750, 38812), False, 'import wx\n'), ((38934, 38984), 'wx.StaticText', 'wx.StaticText', (['self', 'wx.ID_ANY'], {'label': '"""Page: 0/0 """'}), "(self, wx.ID_ANY, label='Page: 0/0 ')\n", (38947, 38984), False, 'import wx\n'), ((39080, 39127), 'wx.Button', 'wx.Button', (['self', 'wx.ID_ANY', '"""<<"""'], {'size': '(35, 30)'}), "(self, wx.ID_ANY, '<<', size=(35, 30))\n", (39089, 39127), False, 'import wx\n'), ((39158, 39205), 'wx.Button', 'wx.Button', (['self', 'wx.ID_ANY', '""">>"""'], {'size': '(35, 30)'}), "(self, wx.ID_ANY, '>>', size=(35, 30))\n", (39167, 39205), False, 'import wx\n'), ((39215, 39289), 'wx.EVT_BUTTON', 'wx.EVT_BUTTON', (['self.goLeftButton', 'self.goLeftButton.Id', 'self.shiftViewLeft'], {}), '(self.goLeftButton, self.goLeftButton.Id, self.shiftViewLeft)\n', (39228, 39289), False, 'import wx\n'), ((39322, 39399), 'wx.EVT_BUTTON', 'wx.EVT_BUTTON', (['self.goRightButton', 'self.goRightButton.Id', 'self.shiftViewRight'], {}), '(self.goRightButton, self.goRightButton.Id, self.shiftViewRight)\n', (39335, 39399), False, 'import wx\n'), ((39614, 39661), 'wx.StaticText', 'wx.StaticText', (['self', 'wx.ID_ANY'], {'label': '"""Layout:"""'}), "(self, wx.ID_ANY, label='Layout:')\n", (39627, 39661), False, 'import wx\n'), ((39690, 39767), 'wx.ComboBox', 'wx.ComboBox', (['self'], {'style': 'wx.CB_READONLY', 'choices': "['1x1', '1x2', '2x2', '2x3']"}), "(self, style=wx.CB_READONLY, choices=['1x1', '1x2', '2x2', '2x3'])\n", (39701, 39767), False, 'import wx\n'), ((39859, 39932), 'wx.EVT_COMBOBOX', 'wx.EVT_COMBOBOX', (['self.ComboLayout', 'self.ComboLayout.Id', 'self.updateLayout'], {}), '(self.ComboLayout, self.ComboLayout.Id, self.updateLayout)\n', (39874, 39932), False, 'import wx\n'), ((40117, 40167), 'wx.StaticText', 'wx.StaticText', (['self', 'wx.ID_ANY'], {'label': '"""Amplitude:"""'}), "(self, wx.ID_ANY, label='Amplitude:')\n", (40130, 40167), False, 'import wx\n'), ((40199, 40389), 'wx.ComboBox', 'wx.ComboBox', (['self'], {'style': 'wx.CB_READONLY', 'choices': "['750%', '500%', '400%', '300%', '200%', '150%', '125%', '100%', '90%',\n '80%', '70%', '60%', '50%', '40%', '30%', '20%', '10%', '5%']"}), "(self, style=wx.CB_READONLY, choices=['750%', '500%', '400%',\n '300%', '200%', '150%', '125%', '100%', '90%', '80%', '70%', '60%',\n '50%', '40%', '30%', '20%', '10%', '5%'])\n", (40210, 40389), False, 'import wx\n'), ((40507, 40584), 'wx.EVT_COMBOBOX', 'wx.EVT_COMBOBOX', (['self.ComboAmplitude', 'self.ComboAmplitude.Id', 'self.updateSize'], {}), '(self.ComboAmplitude, self.ComboAmplitude.Id, self.updateSize)\n', (40522, 40584), False, 'import wx\n'), ((40772, 40819), 'wx.StaticText', 'wx.StaticText', (['self', 'wx.ID_ANY'], {'label': '"""Marker:"""'}), "(self, wx.ID_ANY, label='Marker:')\n", (40785, 40819), False, 'import wx\n'), ((40916, 40975), 'wx.ComboBox', 'wx.ComboBox', (['self'], {'style': 'wx.CB_READONLY', 'choices': "['All ']"}), "(self, style=wx.CB_READONLY, choices=['All '])\n", (40927, 40975), False, 'import wx\n'), ((41069, 41144), 'wx.EVT_COMBOBOX', 'wx.EVT_COMBOBOX', (['self.ComboMarkers', 'self.ComboMarkers.Id', 'self.updateFigure'], {}), '(self.ComboMarkers, self.ComboMarkers.Id, self.updateFigure)\n', (41084, 41144), False, 'import wx\n'), ((41265, 41315), 'wx.StaticText', 'wx.StaticText', (['self', 'wx.ID_ANY'], {'label': '"""Page: 0/0 """'}), "(self, wx.ID_ANY, label='Page: 0/0 ')\n", (41278, 41315), False, 'import wx\n'), ((41411, 41458), 'wx.Button', 'wx.Button', (['self', 'wx.ID_ANY', '"""<<"""'], {'size': '(35, 30)'}), "(self, wx.ID_ANY, '<<', size=(35, 30))\n", (41420, 41458), False, 'import wx\n'), ((41489, 41536), 'wx.Button', 'wx.Button', (['self', 'wx.ID_ANY', '""">>"""'], {'size': '(35, 30)'}), "(self, wx.ID_ANY, '>>', size=(35, 30))\n", (41498, 41536), False, 'import wx\n'), ((41546, 41620), 'wx.EVT_BUTTON', 'wx.EVT_BUTTON', (['self.goLeftButton', 'self.goLeftButton.Id', 'self.shiftViewLeft'], {}), '(self.goLeftButton, self.goLeftButton.Id, self.shiftViewLeft)\n', (41559, 41620), False, 'import wx\n'), ((41653, 41730), 'wx.EVT_BUTTON', 'wx.EVT_BUTTON', (['self.goRightButton', 'self.goRightButton.Id', 'self.shiftViewRight'], {}), '(self.goRightButton, self.goRightButton.Id, self.shiftViewRight)\n', (41666, 41730), False, 'import wx\n'), ((41967, 42037), 'wx.ComboBox', 'wx.ComboBox', (['self'], {'style': 'wx.CB_READONLY', 'choices': "['Spread', 'Overlay']"}), "(self, style=wx.CB_READONLY, choices=['Spread', 'Overlay'])\n", (41978, 42037), False, 'import wx\n'), ((42117, 42193), 'wx.EVT_COMBOBOX', 'wx.EVT_COMBOBOX', (['self.ComboOverlay', 'self.ComboOverlay.Id', 'self.updateOverlay'], {}), '(self.ComboOverlay, self.ComboOverlay.Id, self.updateOverlay)\n', (42132, 42193), False, 'import wx\n'), ((2967, 3008), 'numpy.arange', 'np.arange', (['Results.uniqueMarkers.shape[0]'], {}), '(Results.uniqueMarkers.shape[0])\n', (2976, 3008), True, 'import numpy as np\n'), ((7833, 7932), 'numpy.array', 'np.array', (['[(True if m not in self.Data.markers2hide else False) for m in Results.\n uniqueMarkers]'], {}), '([(True if m not in self.Data.markers2hide else False) for m in\n Results.uniqueMarkers])\n', (7841, 7932), True, 'import numpy as np\n'), ((8043, 8066), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time [ms]"""'], {}), "('time [ms]')\n", (8053, 8066), True, 'from matplotlib import pyplot as plt\n'), ((8080, 8097), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""GFP"""'], {}), "('GFP')\n", (8090, 8097), True, 'from matplotlib import pyplot as plt\n'), ((8111, 8136), 'matplotlib.pyplot.title', 'plt.title', (['"""GFP Overview"""'], {}), "('GFP Overview')\n", (8120, 8136), True, 'from matplotlib import pyplot as plt\n'), ((8150, 8197), 'matplotlib.pyplot.legend', 'plt.legend', (['Results.uniqueMarkers[markers2show]'], {}), '(Results.uniqueMarkers[markers2show])\n', (8160, 8197), True, 'from matplotlib import pyplot as plt\n'), ((10346, 10445), 'numpy.array', 'np.array', (['[(True if m not in self.Data.markers2hide else False) for m in Results.\n uniqueMarkers]'], {}), '([(True if m not in self.Data.markers2hide else False) for m in\n Results.uniqueMarkers])\n', (10354, 10445), True, 'import numpy as np\n'), ((24085, 24130), 'numpy.arange', 'np.arange', (['self.Data.Results.markers.shape[0]'], {}), '(self.Data.Results.markers.shape[0])\n', (24094, 24130), True, 'import numpy as np\n'), ((2337, 2380), 'numpy.round', 'np.round', (['(distOutliersChannel[i] * 100.0)', '(1)'], {}), '(distOutliersChannel[i] * 100.0, 1)\n', (2345, 2380), True, 'import numpy as np\n'), ((2799, 2816), 'numpy.unique', 'np.unique', (['yticks'], {}), '(yticks)\n', (2808, 2816), True, 'import numpy as np\n'), ((7584, 7608), 'numpy.array', 'np.array', (['Results.avgGFP'], {}), '(Results.avgGFP)\n', (7592, 7608), True, 'import numpy as np\n'), ((7994, 8028), 'numpy.transpose', 'np.transpose', (['avgGFP[markers2show]'], {}), '(avgGFP[markers2show])\n', (8006, 8028), True, 'import numpy as np\n'), ((9991, 10015), 'numpy.array', 'np.array', (['Results.avgGFP'], {}), '(Results.avgGFP)\n', (9999, 10015), True, 'import numpy as np\n'), ((10146, 10170), 'numpy.array', 'np.array', (['Results.avgGMD'], {}), '(Results.avgGMD)\n', (10154, 10170), True, 'import numpy as np\n'), ((23895, 23950), 'numpy.where', 'np.where', (['(self.Data.Results.markers == self.markerValue)'], {}), '(self.Data.Results.markers == self.markerValue)\n', (23903, 23950), True, 'import numpy as np\n'), ((42557, 42572), 'numpy.sqrt', 'np.sqrt', (['number'], {}), '(number)\n', (42564, 42572), True, 'import numpy as np\n'), ((1343, 1445), 'numpy.vstack', 'np.vstack', (['[Results.distChannelThreshold, Results.distChannelBroken, Results.\n distChannelSelected]'], {}), '([Results.distChannelThreshold, Results.distChannelBroken, Results\n .distChannelSelected])\n', (1352, 1445), True, 'import numpy as np\n'), ((5074, 5198), 'numpy.vstack', 'np.vstack', (['[Results.distMarkerThreshold, Results.distMarkerBroken, Results.\n distMarkerBlink, Results.distMarkerSelected]'], {}), '([Results.distMarkerThreshold, Results.distMarkerBroken, Results.\n distMarkerBlink, Results.distMarkerSelected])\n', (5083, 5198), True, 'import numpy as np\n'), ((5569, 5611), 'numpy.round', 'np.round', (['(distOutliersMarker[i] * 100.0)', '(1)'], {}), '(distOutliersMarker[i] * 100.0, 1)\n', (5577, 5611), True, 'import numpy as np\n'), ((28637, 28683), 'numpy.append', 'np.append', (['Results.matrixBlink[epochID]', '(False)'], {}), '(Results.matrixBlink[epochID], False)\n', (28646, 28683), True, 'import numpy as np\n'), ((3439, 3500), 'numpy.vstack', 'np.vstack', (['(Results.distMarkerOK, Results.distMarkerSelected)'], {}), '((Results.distMarkerOK, Results.distMarkerSelected))\n', (3448, 3500), True, 'import numpy as np\n'), ((3736, 3831), 'numpy.vstack', 'np.vstack', (['(Results.distMarkerOK, Results.distMarkerSelected, Results.distMarkerThreshold)'], {}), '((Results.distMarkerOK, Results.distMarkerSelected, Results.\n distMarkerThreshold))\n', (3745, 3831), True, 'import numpy as np\n'), ((4096, 4216), 'numpy.vstack', 'np.vstack', (['(Results.distMarkerOK, Results.distMarkerSelected, Results.\n distMarkerThreshold, Results.distMarkerBlink)'], {}), '((Results.distMarkerOK, Results.distMarkerSelected, Results.\n distMarkerThreshold, Results.distMarkerBlink))\n', (4105, 4216), True, 'import numpy as np\n'), ((28718, 28761), 'numpy.where', 'np.where', (['(blinkEpoch[:-1] != blinkEpoch[1:])'], {}), '(blinkEpoch[:-1] != blinkEpoch[1:])\n', (28726, 28761), True, 'import numpy as np\n'), ((15791, 15805), 'numpy.abs', 'np.abs', (['minmax'], {}), '(minmax)\n', (15797, 15805), True, 'import numpy as np\n'), ((21386, 21428), 'numpy.where', 'np.where', (['(textChildren[:, 1] == selectedID)'], {}), '(textChildren[:, 1] == selectedID)\n', (21394, 21428), True, 'import numpy as np\n'), ((23676, 23697), 'numpy.unique', 'np.unique', (['markerList'], {}), '(markerList)\n', (23685, 23697), True, 'import numpy as np\n'), ((29546, 29560), 'numpy.abs', 'np.abs', (['minmax'], {}), '(minmax)\n', (29552, 29560), True, 'import numpy as np\n'), ((14847, 14868), 'numpy.ptp', 'np.ptp', (['epoch'], {'axis': '(1)'}), '(epoch, axis=1)\n', (14853, 14868), True, 'import numpy as np\n'), ((26673, 26694), 'numpy.ptp', 'np.ptp', (['epoch'], {'axis': '(1)'}), '(epoch, axis=1)\n', (26679, 26694), True, 'import numpy as np\n')] |
import numpy as np
import pytest
from numpy.testing import assert_almost_equal, assert_raises, assert_warns
from ...tools import linear, power
from .. import Dcorr
class TestDcorrStat:
@pytest.mark.parametrize("n", [100, 200])
@pytest.mark.parametrize("obs_stat", [1.0])
@pytest.mark.parametrize("obs_pvalue", [1 / 1000])
def test_linear_oned(self, n, obs_stat, obs_pvalue):
np.random.seed(123456789)
x, y = linear(n, 1)
stat1, pvalue1 = Dcorr().test(x, y)
stat2 = Dcorr().statistic(x, y)
assert_almost_equal(stat1, obs_stat, decimal=2)
assert_almost_equal(stat2, obs_stat, decimal=2)
assert_almost_equal(pvalue1, obs_pvalue, decimal=2)
class TestDcorrTypeIError:
def test_oned(self):
np.random.seed(123456789)
est_power = power(
"Dcorr",
sim_type="indep",
sim="multimodal_independence",
n=100,
p=1,
alpha=0.05,
)
assert_almost_equal(est_power, 0.05, decimal=2)
def test_oned_fast(self):
np.random.seed(123456789)
est_power = power(
"Dcorr",
sim_type="indep",
sim="multimodal_independence",
n=100,
p=1,
alpha=0.05,
auto=True,
)
assert_almost_equal(est_power, 0.05, decimal=2)
def test_threed_fast(self):
np.random.seed(123456789)
est_power = power(
"Dcorr",
sim_type="indep",
sim="multimodal_independence",
n=100,
p=3,
alpha=0.05,
auto=True,
)
assert_almost_equal(est_power, 0.05, decimal=2)
| [
"pytest.mark.parametrize",
"numpy.random.seed",
"numpy.testing.assert_almost_equal"
] | [((193, 233), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n"""', '[100, 200]'], {}), "('n', [100, 200])\n", (216, 233), False, 'import pytest\n'), ((239, 281), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""obs_stat"""', '[1.0]'], {}), "('obs_stat', [1.0])\n", (262, 281), False, 'import pytest\n'), ((287, 336), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""obs_pvalue"""', '[1 / 1000]'], {}), "('obs_pvalue', [1 / 1000])\n", (310, 336), False, 'import pytest\n'), ((402, 427), 'numpy.random.seed', 'np.random.seed', (['(123456789)'], {}), '(123456789)\n', (416, 427), True, 'import numpy as np\n'), ((549, 596), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['stat1', 'obs_stat'], {'decimal': '(2)'}), '(stat1, obs_stat, decimal=2)\n', (568, 596), False, 'from numpy.testing import assert_almost_equal, assert_raises, assert_warns\n'), ((605, 652), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['stat2', 'obs_stat'], {'decimal': '(2)'}), '(stat2, obs_stat, decimal=2)\n', (624, 652), False, 'from numpy.testing import assert_almost_equal, assert_raises, assert_warns\n'), ((661, 712), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['pvalue1', 'obs_pvalue'], {'decimal': '(2)'}), '(pvalue1, obs_pvalue, decimal=2)\n', (680, 712), False, 'from numpy.testing import assert_almost_equal, assert_raises, assert_warns\n'), ((775, 800), 'numpy.random.seed', 'np.random.seed', (['(123456789)'], {}), '(123456789)\n', (789, 800), True, 'import numpy as np\n'), ((1001, 1048), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['est_power', '(0.05)'], {'decimal': '(2)'}), '(est_power, 0.05, decimal=2)\n', (1020, 1048), False, 'from numpy.testing import assert_almost_equal, assert_raises, assert_warns\n'), ((1088, 1113), 'numpy.random.seed', 'np.random.seed', (['(123456789)'], {}), '(123456789)\n', (1102, 1113), True, 'import numpy as np\n'), ((1337, 1384), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['est_power', '(0.05)'], {'decimal': '(2)'}), '(est_power, 0.05, decimal=2)\n', (1356, 1384), False, 'from numpy.testing import assert_almost_equal, assert_raises, assert_warns\n'), ((1426, 1451), 'numpy.random.seed', 'np.random.seed', (['(123456789)'], {}), '(123456789)\n', (1440, 1451), True, 'import numpy as np\n'), ((1675, 1722), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['est_power', '(0.05)'], {'decimal': '(2)'}), '(est_power, 0.05, decimal=2)\n', (1694, 1722), False, 'from numpy.testing import assert_almost_equal, assert_raises, assert_warns\n')] |
import os
import random
import pathlib
from unittest.mock import patch
import numpy as np
from word_vectors import FileType
from word_vectors.read import read
from word_vectors.convert import convert
from utils import vocab, vectors, DATA, GLOVE, W2V, W2V_TEXT, LEADER, rand_str
INPUT_MAPPING = {
GLOVE: FileType.GLOVE,
W2V: FileType.W2V,
W2V_TEXT: FileType.W2V_TEXT,
LEADER: FileType.LEADER,
}
def test_convert():
data = random.choice([GLOVE, W2V, W2V_TEXT, LEADER])
output_type = random.choice(list(FileType))
input_path = str(DATA / data)
gold_output_path = os.path.splitext(input_path)[0] + "." + str(output_type)
with patch("word_vectors.convert_module.write") as write_patch:
w, wv = read(input_path)
convert(input_path, output_file_type=output_type)
call_file, call_w, call_wv, call_type = write_patch.call_args_list[0][0]
assert call_file == gold_output_path
assert call_w == w
np.testing.assert_allclose(call_wv, wv)
assert call_type is output_type
def test_convert_with_output():
data = random.choice([GLOVE, W2V, W2V_TEXT, LEADER])
output_type = random.choice(list(FileType))
output = rand_str()
input_path = str(DATA / data)
with patch("word_vectors.convert_module.write") as write_patch:
w, wv = read(input_path)
convert(input_path, output, output_file_type=output_type)
call_file, call_w, call_wv, call_type = write_patch.call_args_list[0][0]
assert call_file == output
assert call_w == w
assert call_type == output_type
np.testing.assert_allclose(call_wv, wv)
def test_convert_with_input():
data = random.choice([GLOVE, W2V, W2V_TEXT, LEADER])
input_type = INPUT_MAPPING[data]
output_type = random.choice(list(FileType))
input_path = str(DATA / data)
output = rand_str()
with patch("word_vectors.convert_module.read") as read_patch:
with patch("word_vectors.convert_module.write") as write_patch:
w, wv = read(input_path)
read_patch.return_value = (w, wv)
convert(input_path, output, output_file_type=output_type, input_file_type=input_type)
read_patch.assert_called_once_with(input_path, input_type)
call_file, call_w, call_wv, call_type = write_patch.call_args_list[0][0]
assert call_file == output
assert call_w == w
assert call_type == output_type
np.testing.assert_allclose(call_wv, wv)
def test_convert_pathlib():
data = random.choice([GLOVE, W2V, W2V_TEXT, LEADER])
output_type = random.choice(list(FileType))
input_path = DATA / data
gold_output_path = os.path.splitext(str(input_path))[0] + "." + str(output_type)
with patch("word_vectors.convert_module.write") as write_patch:
w, wv = read(input_path)
convert(input_path, output_file_type=output_type)
call_file, call_w, call_wv, call_type = write_patch.call_args_list[0][0]
assert str(call_file) == gold_output_path
assert call_w == w
np.testing.assert_allclose(call_wv, wv)
assert call_type is output_type
def test_convert_with_output_pathlib():
data = random.choice([GLOVE, W2V, W2V_TEXT, LEADER])
output_type = random.choice(list(FileType))
output = pathlib.Path(rand_str())
input_path = DATA / data
with patch("word_vectors.convert_module.write") as write_patch:
w, wv = read(input_path)
convert(input_path, output, output_file_type=output_type)
call_file, call_w, call_wv, call_type = write_patch.call_args_list[0][0]
assert call_file == output
assert call_w == w
assert call_type == output_type
np.testing.assert_allclose(call_wv, wv)
def test_convert_with_input_pathlib():
data = random.choice([GLOVE, W2V, W2V_TEXT, LEADER])
input_type = INPUT_MAPPING[data]
output_type = random.choice(list(FileType))
input_path = DATA / data
output = pathlib.Path(rand_str())
with patch("word_vectors.convert_module.read") as read_patch:
with patch("word_vectors.convert_module.write") as write_patch:
w, wv = read(input_path)
read_patch.return_value = (w, wv)
convert(input_path, output, output_file_type=output_type, input_file_type=input_type)
read_patch.assert_called_once_with(input_path, input_type)
call_file, call_w, call_wv, call_type = write_patch.call_args_list[0][0]
assert call_file == output
assert call_w == w
assert call_type == output_type
np.testing.assert_allclose(call_wv, wv)
def test_convert_open():
data = random.choice([GLOVE, W2V, W2V_TEXT, LEADER])
output_type = random.choice(list(FileType))
input_path = DATA / data
gold_output_path = os.path.splitext(str(input_path))[0] + "." + str(output_type)
with open(input_path, "r" if data in (GLOVE, W2V_TEXT) else "rb") as input_path:
with patch("word_vectors.convert_module.write") as write_patch:
w, wv = read(input_path)
convert(input_path, output_file_type=output_type)
call_file, call_w, call_wv, call_type = write_patch.call_args_list[0][0]
assert str(call_file) == gold_output_path
assert call_w == w
np.testing.assert_allclose(call_wv, wv)
assert call_type is output_type
def test_convert_with_output_open():
data = random.choice([GLOVE, W2V, W2V_TEXT, LEADER])
output_type = random.choice(list(FileType))
output = rand_str()
input_path = DATA / data
print(output)
try:
with open(input_path, "r" if data in (GLOVE, W2V_TEXT) else "rb") as input_path:
with open(output, "w" if output_type in (FileType.GLOVE, FileType.W2V_TEXT) else "wb") as output:
with patch("word_vectors.convert_module.write") as write_patch:
w, wv = read(input_path)
convert(input_path, output, output_file_type=output_type)
call_file, call_w, call_wv, call_type = write_patch.call_args_list[0][0]
assert call_file == output
assert call_w == w
assert call_type == output_type
np.testing.assert_allclose(call_wv, wv)
finally:
os.remove(output.name)
def test_convert_with_input_open():
data = random.choice([GLOVE, W2V, W2V_TEXT, LEADER])
input_type = INPUT_MAPPING[data]
output_type = random.choice(list(FileType))
input_path = DATA / data
output = rand_str()
print(output)
try:
with open(input_path, "r" if data in (GLOVE, W2V_TEXT) else "rb") as input_path:
with open(output, "w" if output_type in (FileType.GLOVE, FileType.W2V_TEXT) else "wb") as output:
with patch("word_vectors.convert_module.read") as read_patch:
with patch("word_vectors.convert_module.write") as write_patch:
w, wv = read(input_path)
read_patch.return_value = (w, wv)
convert(input_path, output, output_file_type=output_type, input_file_type=input_type)
read_patch.assert_called_once_with(input_path, input_type)
call_file, call_w, call_wv, call_type = write_patch.call_args_list[0][0]
assert call_file == output
assert call_w == w
assert call_type == output_type
np.testing.assert_allclose(call_wv, wv)
finally:
os.remove(output.name)
| [
"random.choice",
"word_vectors.convert.convert",
"numpy.testing.assert_allclose",
"os.path.splitext",
"utils.rand_str",
"word_vectors.read.read",
"unittest.mock.patch",
"os.remove"
] | [((446, 491), 'random.choice', 'random.choice', (['[GLOVE, W2V, W2V_TEXT, LEADER]'], {}), '([GLOVE, W2V, W2V_TEXT, LEADER])\n', (459, 491), False, 'import random\n'), ((1099, 1144), 'random.choice', 'random.choice', (['[GLOVE, W2V, W2V_TEXT, LEADER]'], {}), '([GLOVE, W2V, W2V_TEXT, LEADER])\n', (1112, 1144), False, 'import random\n'), ((1206, 1216), 'utils.rand_str', 'rand_str', ([], {}), '()\n', (1214, 1216), False, 'from utils import vocab, vectors, DATA, GLOVE, W2V, W2V_TEXT, LEADER, rand_str\n'), ((1693, 1738), 'random.choice', 'random.choice', (['[GLOVE, W2V, W2V_TEXT, LEADER]'], {}), '([GLOVE, W2V, W2V_TEXT, LEADER])\n', (1706, 1738), False, 'import random\n'), ((1871, 1881), 'utils.rand_str', 'rand_str', ([], {}), '()\n', (1879, 1881), False, 'from utils import vocab, vectors, DATA, GLOVE, W2V, W2V_TEXT, LEADER, rand_str\n'), ((2564, 2609), 'random.choice', 'random.choice', (['[GLOVE, W2V, W2V_TEXT, LEADER]'], {}), '([GLOVE, W2V, W2V_TEXT, LEADER])\n', (2577, 2609), False, 'import random\n'), ((3230, 3275), 'random.choice', 'random.choice', (['[GLOVE, W2V, W2V_TEXT, LEADER]'], {}), '([GLOVE, W2V, W2V_TEXT, LEADER])\n', (3243, 3275), False, 'import random\n'), ((3841, 3886), 'random.choice', 'random.choice', (['[GLOVE, W2V, W2V_TEXT, LEADER]'], {}), '([GLOVE, W2V, W2V_TEXT, LEADER])\n', (3854, 3886), False, 'import random\n'), ((4718, 4763), 'random.choice', 'random.choice', (['[GLOVE, W2V, W2V_TEXT, LEADER]'], {}), '([GLOVE, W2V, W2V_TEXT, LEADER])\n', (4731, 4763), False, 'import random\n'), ((5498, 5543), 'random.choice', 'random.choice', (['[GLOVE, W2V, W2V_TEXT, LEADER]'], {}), '([GLOVE, W2V, W2V_TEXT, LEADER])\n', (5511, 5543), False, 'import random\n'), ((5605, 5615), 'utils.rand_str', 'rand_str', ([], {}), '()\n', (5613, 5615), False, 'from utils import vocab, vectors, DATA, GLOVE, W2V, W2V_TEXT, LEADER, rand_str\n'), ((6458, 6503), 'random.choice', 'random.choice', (['[GLOVE, W2V, W2V_TEXT, LEADER]'], {}), '([GLOVE, W2V, W2V_TEXT, LEADER])\n', (6471, 6503), False, 'import random\n'), ((6631, 6641), 'utils.rand_str', 'rand_str', ([], {}), '()\n', (6639, 6641), False, 'from utils import vocab, vectors, DATA, GLOVE, W2V, W2V_TEXT, LEADER, rand_str\n'), ((663, 705), 'unittest.mock.patch', 'patch', (['"""word_vectors.convert_module.write"""'], {}), "('word_vectors.convert_module.write')\n", (668, 705), False, 'from unittest.mock import patch\n'), ((738, 754), 'word_vectors.read.read', 'read', (['input_path'], {}), '(input_path)\n', (742, 754), False, 'from word_vectors.read import read\n'), ((763, 812), 'word_vectors.convert.convert', 'convert', (['input_path'], {'output_file_type': 'output_type'}), '(input_path, output_file_type=output_type)\n', (770, 812), False, 'from word_vectors.convert import convert\n'), ((974, 1013), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['call_wv', 'wv'], {}), '(call_wv, wv)\n', (1000, 1013), True, 'import numpy as np\n'), ((1260, 1302), 'unittest.mock.patch', 'patch', (['"""word_vectors.convert_module.write"""'], {}), "('word_vectors.convert_module.write')\n", (1265, 1302), False, 'from unittest.mock import patch\n'), ((1335, 1351), 'word_vectors.read.read', 'read', (['input_path'], {}), '(input_path)\n', (1339, 1351), False, 'from word_vectors.read import read\n'), ((1360, 1417), 'word_vectors.convert.convert', 'convert', (['input_path', 'output'], {'output_file_type': 'output_type'}), '(input_path, output, output_file_type=output_type)\n', (1367, 1417), False, 'from word_vectors.convert import convert\n'), ((1609, 1648), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['call_wv', 'wv'], {}), '(call_wv, wv)\n', (1635, 1648), True, 'import numpy as np\n'), ((1891, 1932), 'unittest.mock.patch', 'patch', (['"""word_vectors.convert_module.read"""'], {}), "('word_vectors.convert_module.read')\n", (1896, 1932), False, 'from unittest.mock import patch\n'), ((2781, 2823), 'unittest.mock.patch', 'patch', (['"""word_vectors.convert_module.write"""'], {}), "('word_vectors.convert_module.write')\n", (2786, 2823), False, 'from unittest.mock import patch\n'), ((2856, 2872), 'word_vectors.read.read', 'read', (['input_path'], {}), '(input_path)\n', (2860, 2872), False, 'from word_vectors.read import read\n'), ((2881, 2930), 'word_vectors.convert.convert', 'convert', (['input_path'], {'output_file_type': 'output_type'}), '(input_path, output_file_type=output_type)\n', (2888, 2930), False, 'from word_vectors.convert import convert\n'), ((3097, 3136), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['call_wv', 'wv'], {}), '(call_wv, wv)\n', (3123, 3136), True, 'import numpy as np\n'), ((3350, 3360), 'utils.rand_str', 'rand_str', ([], {}), '()\n', (3358, 3360), False, 'from utils import vocab, vectors, DATA, GLOVE, W2V, W2V_TEXT, LEADER, rand_str\n'), ((3400, 3442), 'unittest.mock.patch', 'patch', (['"""word_vectors.convert_module.write"""'], {}), "('word_vectors.convert_module.write')\n", (3405, 3442), False, 'from unittest.mock import patch\n'), ((3475, 3491), 'word_vectors.read.read', 'read', (['input_path'], {}), '(input_path)\n', (3479, 3491), False, 'from word_vectors.read import read\n'), ((3500, 3557), 'word_vectors.convert.convert', 'convert', (['input_path', 'output'], {'output_file_type': 'output_type'}), '(input_path, output, output_file_type=output_type)\n', (3507, 3557), False, 'from word_vectors.convert import convert\n'), ((3749, 3788), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['call_wv', 'wv'], {}), '(call_wv, wv)\n', (3775, 3788), True, 'import numpy as np\n'), ((4027, 4037), 'utils.rand_str', 'rand_str', ([], {}), '()\n', (4035, 4037), False, 'from utils import vocab, vectors, DATA, GLOVE, W2V, W2V_TEXT, LEADER, rand_str\n'), ((4048, 4089), 'unittest.mock.patch', 'patch', (['"""word_vectors.convert_module.read"""'], {}), "('word_vectors.convert_module.read')\n", (4053, 4089), False, 'from unittest.mock import patch\n'), ((6386, 6408), 'os.remove', 'os.remove', (['output.name'], {}), '(output.name)\n', (6395, 6408), False, 'import os\n'), ((7662, 7684), 'os.remove', 'os.remove', (['output.name'], {}), '(output.name)\n', (7671, 7684), False, 'import os\n'), ((1961, 2003), 'unittest.mock.patch', 'patch', (['"""word_vectors.convert_module.write"""'], {}), "('word_vectors.convert_module.write')\n", (1966, 2003), False, 'from unittest.mock import patch\n'), ((2040, 2056), 'word_vectors.read.read', 'read', (['input_path'], {}), '(input_path)\n', (2044, 2056), False, 'from word_vectors.read import read\n'), ((2115, 2205), 'word_vectors.convert.convert', 'convert', (['input_path', 'output'], {'output_file_type': 'output_type', 'input_file_type': 'input_type'}), '(input_path, output, output_file_type=output_type, input_file_type=\n input_type)\n', (2122, 2205), False, 'from word_vectors.convert import convert\n'), ((2483, 2522), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['call_wv', 'wv'], {}), '(call_wv, wv)\n', (2509, 2522), True, 'import numpy as np\n'), ((4118, 4160), 'unittest.mock.patch', 'patch', (['"""word_vectors.convert_module.write"""'], {}), "('word_vectors.convert_module.write')\n", (4123, 4160), False, 'from unittest.mock import patch\n'), ((4197, 4213), 'word_vectors.read.read', 'read', (['input_path'], {}), '(input_path)\n', (4201, 4213), False, 'from word_vectors.read import read\n'), ((4272, 4362), 'word_vectors.convert.convert', 'convert', (['input_path', 'output'], {'output_file_type': 'output_type', 'input_file_type': 'input_type'}), '(input_path, output, output_file_type=output_type, input_file_type=\n input_type)\n', (4279, 4362), False, 'from word_vectors.convert import convert\n'), ((4640, 4679), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['call_wv', 'wv'], {}), '(call_wv, wv)\n', (4666, 4679), True, 'import numpy as np\n'), ((5024, 5066), 'unittest.mock.patch', 'patch', (['"""word_vectors.convert_module.write"""'], {}), "('word_vectors.convert_module.write')\n", (5029, 5066), False, 'from unittest.mock import patch\n'), ((5103, 5119), 'word_vectors.read.read', 'read', (['input_path'], {}), '(input_path)\n', (5107, 5119), False, 'from word_vectors.read import read\n'), ((5132, 5181), 'word_vectors.convert.convert', 'convert', (['input_path'], {'output_file_type': 'output_type'}), '(input_path, output_file_type=output_type)\n', (5139, 5181), False, 'from word_vectors.convert import convert\n'), ((5364, 5403), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['call_wv', 'wv'], {}), '(call_wv, wv)\n', (5390, 5403), True, 'import numpy as np\n'), ((597, 625), 'os.path.splitext', 'os.path.splitext', (['input_path'], {}), '(input_path)\n', (613, 625), False, 'import os\n'), ((5892, 5934), 'unittest.mock.patch', 'patch', (['"""word_vectors.convert_module.write"""'], {}), "('word_vectors.convert_module.write')\n", (5897, 5934), False, 'from unittest.mock import patch\n'), ((5979, 5995), 'word_vectors.read.read', 'read', (['input_path'], {}), '(input_path)\n', (5983, 5995), False, 'from word_vectors.read import read\n'), ((6016, 6073), 'word_vectors.convert.convert', 'convert', (['input_path', 'output'], {'output_file_type': 'output_type'}), '(input_path, output, output_file_type=output_type)\n', (6023, 6073), False, 'from word_vectors.convert import convert\n'), ((6325, 6364), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['call_wv', 'wv'], {}), '(call_wv, wv)\n', (6351, 6364), True, 'import numpy as np\n'), ((6889, 6930), 'unittest.mock.patch', 'patch', (['"""word_vectors.convert_module.read"""'], {}), "('word_vectors.convert_module.read')\n", (6894, 6930), False, 'from unittest.mock import patch\n'), ((6971, 7013), 'unittest.mock.patch', 'patch', (['"""word_vectors.convert_module.write"""'], {}), "('word_vectors.convert_module.write')\n", (6976, 7013), False, 'from unittest.mock import patch\n'), ((7062, 7078), 'word_vectors.read.read', 'read', (['input_path'], {}), '(input_path)\n', (7066, 7078), False, 'from word_vectors.read import read\n'), ((7161, 7251), 'word_vectors.convert.convert', 'convert', (['input_path', 'output'], {'output_file_type': 'output_type', 'input_file_type': 'input_type'}), '(input_path, output, output_file_type=output_type, input_file_type=\n input_type)\n', (7168, 7251), False, 'from word_vectors.convert import convert\n'), ((7601, 7640), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['call_wv', 'wv'], {}), '(call_wv, wv)\n', (7627, 7640), True, 'import numpy as np\n')] |
"""
Matching pennies environment.
"""
import gym
import numpy as np
from gym.spaces import Discrete, Tuple
from .common import OneHot
class IteratedMatchingPennies(gym.Env):
"""
A two-agent vectorized environment for the Matching Pennies game.
"""
NAME = 'IMP'
NUM_AGENTS = 2
NUM_ACTIONS = 2
NUM_STATES = 5
def __init__(self, max_steps):
self.max_steps = max_steps
self.payout_mat = np.array([[1, -1],[-1, 1]])
self.action_space = \
Tuple([Discrete(self.NUM_ACTIONS), Discrete(self.NUM_ACTIONS)])
self.observation_space = \
Tuple([OneHot(self.NUM_STATES), OneHot(self.NUM_STATES)])
self.step_count = None
def reset(self):
self.step_count = 0
init_state = np.zeros(self.NUM_STATES)
init_state[-1] = 1
observations = [init_state, init_state]
return observations
def step(self, action):
ac0, ac1 = action
self.step_count += 1
rewards = [self.payout_mat[ac1][ac0], -self.payout_mat[ac1][ac0]]
state = np.zeros(self.NUM_STATES)
state[ac0 * 2 + ac1] = 1
observations = [state, state]
done = (self.step_count == self.max_steps)
return observations, rewards, done
| [
"numpy.array",
"numpy.zeros",
"gym.spaces.Discrete"
] | [((436, 464), 'numpy.array', 'np.array', (['[[1, -1], [-1, 1]]'], {}), '([[1, -1], [-1, 1]])\n', (444, 464), True, 'import numpy as np\n'), ((778, 803), 'numpy.zeros', 'np.zeros', (['self.NUM_STATES'], {}), '(self.NUM_STATES)\n', (786, 803), True, 'import numpy as np\n'), ((1084, 1109), 'numpy.zeros', 'np.zeros', (['self.NUM_STATES'], {}), '(self.NUM_STATES)\n', (1092, 1109), True, 'import numpy as np\n'), ((513, 539), 'gym.spaces.Discrete', 'Discrete', (['self.NUM_ACTIONS'], {}), '(self.NUM_ACTIONS)\n', (521, 539), False, 'from gym.spaces import Discrete, Tuple\n'), ((541, 567), 'gym.spaces.Discrete', 'Discrete', (['self.NUM_ACTIONS'], {}), '(self.NUM_ACTIONS)\n', (549, 567), False, 'from gym.spaces import Discrete, Tuple\n')] |
#!/usr/bin/env python
# Filename: planet_svm_classify
"""
introduction: Using SVM in sklearn library to perform classification on Planet images
authors: <NAME>
email:<EMAIL>
add time: 4 January, 2019
"""
import sys, os
from optparse import OptionParser
import rasterio
import numpy as np
HOME = os.path.expanduser('~')
# Landuse_DL
codes_dir = HOME + '/codes/PycharmProjects/Landuse_DL'
sys.path.insert(0, codes_dir)
sys.path.insert(0, os.path.join(codes_dir, 'datasets'))
# path of DeeplabforRS
codes_dir2 = HOME + '/codes/PycharmProjects/DeeplabforRS'
sys.path.insert(0, codes_dir2)
import basic_src.io_function as io_function
import basic_src.basic as basic
# pip install imbalanced-learn for sub sample the training data.
import imblearn
# Preprocessing
from sklearn import preprocessing
# library for SVM classifier
from sklearn import svm
# model_selection # change grid_search to model_selection
from sklearn import model_selection
from sklearn.externals import joblib # save and load model
import datasets.build_RS_data as build_RS_data
import multiprocessing
from multiprocessing import Pool
model_saved_path = "sk_svm_trained.pkl"
scaler_saved_path = "scaler_saved.pkl"
def get_output_name(input_tif):
folder = os.path.dirname(input_tif)
file_name = os.path.basename(input_tif)
name_noext = os.path.splitext(file_name)[0]
return os.path.join(folder, name_noext + "_classified.tif")
def read_training_pixels(image_path, label_path):
"""
read training pixels from image and the corresponding label
:param image_path:
:param label_path:
:return: X,y array or False
"""
if io_function.is_file_exist(image_path) is False or io_function.is_file_exist(label_path) is False:
return False
# check: they are from the same polygons
polygon_index_img = os.path.basename(image_path).split('_')[-3]
polygon_index_label = os.path.basename(label_path).split('_')[-3]
if polygon_index_img != polygon_index_label:
raise ValueError("%s and %s are not from the same training polygons" % (image_path, label_path))
with rasterio.open(image_path) as img_obj:
# read the all bands
indexes = img_obj.indexes
nbands = len(indexes)
img_data = img_obj.read(indexes)
with rasterio.open(label_path) as img_obj:
# read the all bands (only have one band)
indexes = img_obj.indexes
if len(indexes) != 1:
raise ValueError('error, the label should only have one band')
label_data = img_obj.read(indexes)
# check the size
# print(img_data.shape)
# print(label_data.shape)
if img_data.shape[1] != label_data.shape[1] or img_data.shape[2] != label_data.shape[2]:
raise ValueError('the image and label have different size')
X_arr = img_data.reshape(nbands, -1)
y_arr = label_data.reshape(-1)
basic.outputlogMessage(str(X_arr.shape))
basic.outputlogMessage(str(y_arr.shape))
# sys.exit(1)
return X_arr, y_arr
def read_whole_x_pixels(image_path):
with rasterio.open(image_path) as img_obj:
# read the all bands
indexes = img_obj.indexes
img_data = img_obj.read(indexes)
nbands, height, width = img_data.shape
X_arr = img_data.reshape(nbands, -1)
X_arr = np.transpose(X_arr, (1, 0))
return X_arr, height, width
def inference_one_patch_svm(img_idx,image_count,p_idx,patch_count,inf_output_dir,img_patch,scaler,clf):
"""
inference one patch
:param img_idx: index of the image
:param idx: index of the patch on the image
:param org_img_path: org image path
:param boundary: the patch boundary
:param model: sk-learn, svm model
:return:
"""
# due to multiprocessing: the Pickle.PicklingError: Can't pickle <type 'module'>: attribute lookup __builtin__.module failed
# recreate the class instance, but there is a model from tensorflow, so it sill not work
# read images
patch_data = build_RS_data.read_patch(img_patch) # read_whole_x_pixels(input)
nbands, height, width = patch_data.shape
X_predit = patch_data.reshape(nbands, -1)
X_predit = np.transpose(X_predit, (1, 0))
if os.path.isfile(scaler_saved_path) and scaler is None:
scaler = joblib.load(scaler_saved_path)
result = scaler.transform(X_predit)
X = result.tolist()
elif scaler is not None:
result = scaler.transform(X_predit)
X = result.tolist()
else:
X = X_predit
basic.outputlogMessage('warning, no pre-processing of data before prediction')
# more method on prediction can be foudn in :
# https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html
pre_result = clf.predict(X)
result_img = pre_result.reshape((height, width))
# save results
print('Save patch:%d/%d on Image:%d/%d , shape:(%d,%d)' %
(p_idx,patch_count,img_idx, image_count, result_img.shape[0], result_img.shape[1]))
# short the file name to avoid error of " Argument list too long", hlc 2018-Oct-29
file_name = "I%d_%d" % (img_idx, p_idx)
save_path = os.path.join(inf_output_dir, file_name + '.tif')
build_RS_data.save_patch_oneband_8bit(img_patch,result_img.astype(np.uint8),save_path)
return True
class classify_pix_operation(object):
"""perform classify operation on raster images"""
def __init__(self):
# Preprocessing
self._scaler = None
# classifiers
self._classifier = None
# self._classifier_tree = None
pass
def __del__(self):
# release resource
self._classifier = None
# self._classifier_tree = None
pass
def read_training_pixels_from_multi_images(input, subImg_folder, subLabel_folder):
"""
read pixels from subset images, which are extracted from Planet images based on trainig polygons
:param subImg_folder: the folder containing images
:param subLabel_folder: the folder containing labels
:return: X, y arrays or None
"""
img_list = io_function.get_file_list_by_ext('.tif', subImg_folder, bsub_folder=False)
label_list = io_function.get_file_list_by_ext('.tif', subLabel_folder, bsub_folder=False)
img_list.sort()
label_list.sort()
if len(img_list) < 1 or len(label_list) < 1:
raise IOError('No tif images or labels in folder %s or %s' % (subImg_folder, subLabel_folder))
if len(img_list) != len(label_list):
raise ValueError('the number of images is not equal to the one of labels')
# read them one by one
Xs, ys = [], []
for img, label in zip(img_list, label_list):
# # test by hlc
# polygon_index_img = os.path.basename(img).split('_')[-3]
# # print(polygon_index_img)
# if polygon_index_img not in [str(83), str(86)] :
# continue
X_aImg, y_a = read_training_pixels(img, label)
Xs.append(X_aImg)
ys.append(y_a)
X_pixels = np.concatenate(Xs, axis=1)
y_pixels = np.concatenate(ys, axis=0)
X_pixels = np.transpose(X_pixels, (1, 0))
basic.outputlogMessage(str(X_pixels.shape))
basic.outputlogMessage(str(y_pixels.shape))
return X_pixels, y_pixels
def read_training_pixels_inside_polygons(self, img_path, shp_path):
'''
read pixels on a image in the extent of polygons
:param img_path: the path of an image
:param shp_path: the path of shape file
:return:
'''
if io_function.is_file_exist(img_path) is False or io_function.is_file_exist(shp_path) is False:
return False
no_data = 255 # consider changing to other values
touch = False # we only read the pixels inside the polygons, so set all_touched as False
sub_images, class_labels = build_RS_data.read_pixels_inside_polygons(img_path,shp_path,mask_no_data=no_data, touch=touch)
# read them one by one
Xs, ys = [], []
for idx, (img_data, label) in enumerate(zip(sub_images, class_labels)):
# img: 3d array (nband, height, width)
# label: int values
# print(img_data)
# print(label)
X_arr = img_data.reshape(img_data.shape[0], -1)
# remove non-data pixels
valid_pixles = np.any(X_arr != no_data,axis=0)
X_arr = X_arr[:,valid_pixles]
valid_pixel_count = int(X_arr.size/img_data.shape[0])
# print('pixel count',valid_pixel_count)
if valid_pixel_count < 1:
basic.outputlogMessage('Warning, No valid pixel in %d th polygon due to its small size'%valid_pixel_count)
continue
y_arr = np.ones(X_arr.shape[1])*label
Xs.append(X_arr)
ys.append(y_arr)
X_pixels = np.concatenate(Xs, axis=1)
y_pixels = np.concatenate(ys, axis=0)
X_pixels = np.transpose(X_pixels, (1, 0))
basic.outputlogMessage(str(X_pixels.shape))
basic.outputlogMessage(str(y_pixels.shape))
return X_pixels, y_pixels
def pre_processing(self, whole_dataset, type=None):
"""
pre-processing of whole dataset
:param whole_dataset: the whole dataset
:param type: pre processing type, such as Standardization, Normalization, Binarization, Encoding categorical features
:return:
"""
# for svm
X = whole_dataset
if self._scaler == None:
self._scaler = preprocessing.StandardScaler().fit(X)
else:
basic.outputlogMessage('warning, StandardScaler object already exist, this operation will overwrite it')
self._scaler = preprocessing.StandardScaler().fit(X)
# save
joblib.dump(self._scaler, scaler_saved_path)
def training_svm_classifier(self, training_X, training_y):
"""
train svm classifier
:param training_data: an array of size [n_records, n_features(fields) + 1 (class) ]
:return: True if successful, Flase otherwise
"""
if training_X is None or training_y is None:
raise ValueError('the training samples are None')
if self._classifier is None:
self._classifier = svm.SVC() # LinearSVC() #SVC()
else:
basic.outputlogMessage('warning, classifier already exist, this operation will replace the old one')
self._classifier = svm.SVC() # LinearSVC() #SVC()
if os.path.isfile(scaler_saved_path) and self._scaler is None:
self._scaler = joblib.load(scaler_saved_path)
result = self._scaler.transform(training_X)
X = result.tolist()
elif self._scaler is not None:
result = self._scaler.transform(training_X)
X = result.tolist()
else:
X = training_X
basic.outputlogMessage('warning, no pre-processing of data before training')
y = training_y
basic.outputlogMessage('Training data set nsample: %d, nfeature: %d' % (len(X), len(X[0])))
# # sub sample and make the class 0 and 1 balanced (have the same number)
# basic.outputlogMessage('Number of sample before sub-sample: %d, class 0: %d, class 1: %d'%
# (len(X),len(np.where(y==0)[0]),len(np.where(y==1)[0])))
# from imblearn.under_sampling import RandomUnderSampler
# rus = RandomUnderSampler(return_indices=True)
# X_rus, y_rus, id_rus = rus.fit_sample(X, y)
# X = X_rus
# y = y_rus
# basic.outputlogMessage('Number of sample after sub-sample: %d, class 0: %d, class 1: %d'%
# (len(X),len(np.where(y==0)[0]),len(np.where(y==1)[0])))
X_train = X
y_train = y
# # for test by hlc
# X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.95, random_state=0)
# X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.2, random_state=0)
# SVM Parameter Tuning in Scikit Learn using GridSearchCV
# #Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-4, 0.001, 0.01, 0.1, 1, 2.5, 5],
'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000]}]
# # for test by hlc
# tuned_parameters = [{'kernel': ['linear'], 'C': [0.001, 0.1,1, 10]}]
clf = model_selection.GridSearchCV(svm.SVC(), tuned_parameters, cv=5,
scoring='f1_macro', n_jobs=-1, verbose=3)
clf.fit(X_train, y_train)
basic.outputlogMessage("Best parameters set found on development set:" + str(clf.best_params_))
basic.outputlogMessage("Grid scores on development set:\n")
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
basic.outputlogMessage("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params))
# fit_model = self._classifier.fit(X,y)
# basic.outputlogMessage(str(fit_model))
# save the classification model
joblib.dump(clf, model_saved_path)
def prediction_on_a_image(self, input, output,model_saved_path):
"""
conduct prediction on a tif image
:param input:
:param output:
:return:
"""
# load the saved model
if os.path.isfile(model_saved_path) is False:
raise IOError('trained model: %s not exist' % model_saved_path)
clf = joblib.load(model_saved_path)
# split a large image to many small ones
patch_w = 500 # parameters.get_digit_parameters("", "train_patch_width", None, 'int')
patch_h = 500 # parameters.get_digit_parameters("", "train_patch_height", None, 'int')
overlay_x = 0 # parameters.get_digit_parameters("", "train_pixel_overlay_x", None, 'int')
overlay_y = 0 # parameters.get_digit_parameters("", "train_pixel_overlay_y", None, 'int')
img_folder = os.path.dirname(input)
img_name = os.path.basename(input)
inf_list_txt = 'inf_image_list.txt'
with open(inf_list_txt, 'w') as txt_obj:
txt_obj.writelines(img_name + '\n')
img_patches = build_RS_data.make_dataset(img_folder, inf_list_txt, patch_w, patch_h, overlay_x, overlay_y,
train=False)
for img_idx, aImg_patches in enumerate(img_patches):
inf_output_dir = 'inf_results' #os.path.splitext(img_name)[0]
os.system('mkdir -p '+inf_output_dir)
os.system('rm '+inf_output_dir+'/*')
## parallel inference patches
# but it turns out not work due to the Pickle.PicklingError
# not working due to mulitple parameters. Jan 9, 2019, hlc
# use multiple thread
num_cores = multiprocessing.cpu_count()
print('number of thread %d' % num_cores)
# theadPool = mp.Pool(num_cores) # multi threads, can not utilize all the CPUs? not sure hlc 2018-4-19
theadPool = Pool(num_cores) # multi processes
# inference_one_patch_svm(img_idx, image_count, p_idx, patch_count, inf_output_dir, img_patch, scaler,clf)
parameters_list = [
(img_idx, len(img_patches), idx, len(aImg_patches), inf_output_dir, img_patch, self._scaler, clf)
for (idx, img_patch) in enumerate(aImg_patches)]
# results = theadPool.map(inference_one_patch_svm, parameters_list) # not working
results = theadPool.starmap(inference_one_patch_svm, parameters_list) # need python3
print('result_list', results)
# for p_idx, img_patch in enumerate(aImg_patches):
# # read images
# patch_data = build_RS_data.read_patch(img_patch) # read_whole_x_pixels(input)
#
# nbands, height, width = patch_data.shape
#
# X_predit = patch_data.reshape(nbands, -1)
# X_predit = np.transpose(X_predit, (1, 0))
#
# if os.path.isfile(scaler_saved_path) and self._scaler is None:
# self._scaler = joblib.load(scaler_saved_path)
# result = self._scaler.transform(X_predit)
# X = result.tolist()
# elif self._scaler is not None:
# result = self._scaler.transform(X_predit)
# X = result.tolist()
# else:
# X = X_predit
# basic.outputlogMessage('warning, no pre-processing of data before prediction')
#
# # more method on prediction can be foudn in :
# # https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html
# pre_result = clf.predict(X)
# result_img = pre_result.reshape((height, width))
#
# # save results
# print('Save patch:%d/%d on Image:%d/%d , shape:(%d,%d)' %
# (p_idx,len(aImg_patches), img_idx,len(img_patches), result_img.shape[0], result_img.shape[1]))
#
# # short the file name to avoid error of " Argument list too long", hlc 2018-Oct-29
# file_name = "I%d_%d" % (img_idx, p_idx)
#
# save_path = os.path.join(inf_output_dir, file_name + '.tif')
# build_RS_data.save_patch_oneband_8bit(img_patch,result_img.astype(np.uint8),save_path)
#
# with rasterio.open(input) as src_obj:
# # Set spatial characteristics of the output object to mirror the input
# kwargs = src_obj.meta
# kwargs.update(
# dtype=rasterio.uint8,
# count=1)
# # Create the file
# with rasterio.open(output, 'w', **kwargs) as dst:
# dst.write_band(1, result_img.astype(rasterio.uint8))
# basic.outputlogMessage("save to %s" % output)
return True
def main(options, args):
basic.outputlogMessage('Is_preprocessing:' + str(options.ispreprocess))
basic.outputlogMessage('Is_training:' + str(options.istraining))
classify_obj = classify_pix_operation()
if options.ispreprocess:
input_tif = args[0]
# preprocessing
if os.path.isfile(scaler_saved_path) is False:
# #read whole data set for pre-processing
X, _, _ = read_whole_x_pixels(input_tif)
classify_obj.pre_processing(X)
else:
basic.outputlogMessage('warning, scaled model already exist, skip pre-processing')
elif options.istraining:
# training
if options.polygon_train is None:
# read training data (make sure 'subImages', 'subLabels' is under current folder)
X, y = classify_obj.read_training_pixels_from_multi_images('subImages', 'subLabels')
else:
input_tif = args[0]
X, y = classify_obj.read_training_pixels_inside_polygons(input_tif, options.polygon_train)
if os.path.isfile(model_saved_path) is False:
classify_obj.training_svm_classifier(X, y)
else:
basic.outputlogMessage("warning, trained model already exist, skip training")
else:
# prediction
input_tif = args[0]
if options.output is not None:
output = options.output
else:
output = get_output_name(input_tif)
basic.outputlogMessage('staring prediction on image:' + str(input_tif))
classify_obj.prediction_on_a_image(input_tif, output,model_saved_path)
if __name__ == "__main__":
usage = "usage: %prog [options] input_image"
parser = OptionParser(usage=usage, version="1.0 2019-1-4")
parser.description = 'Introduction: Using SVM in sklearn library to perform classification on Planet images'
parser.add_option("-p", "--ispreprocess",
action="store_true", dest="ispreprocess", default=False,
help="to indicate the script will perform pre-processing, if this set, istraining will be ignored")
parser.add_option("-t", "--istraining",
action="store_true", dest="istraining", default=False,
help="to indicate the script will perform training process")
parser.add_option("-s", "--shape_train",
action="store", dest="polygon_train",
help="the shape file containing polygons for training")
parser.add_option("-o", "--output",
action="store", dest="output",
help="the output file path")
(options, args) = parser.parse_args()
if len(sys.argv) < 2:
parser.print_help()
sys.exit(2)
basic.setlogfile('planet_svm_log.txt')
main(options, args)
| [
"sys.path.insert",
"sklearn.externals.joblib.load",
"multiprocessing.cpu_count",
"basic_src.basic.outputlogMessage",
"sys.exit",
"basic_src.io_function.is_file_exist",
"numpy.concatenate",
"datasets.build_RS_data.make_dataset",
"os.path.expanduser",
"numpy.ones",
"datasets.build_RS_data.read_pat... | [((299, 322), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (317, 322), False, 'import sys, os\n'), ((391, 420), 'sys.path.insert', 'sys.path.insert', (['(0)', 'codes_dir'], {}), '(0, codes_dir)\n', (406, 420), False, 'import sys, os\n'), ((559, 589), 'sys.path.insert', 'sys.path.insert', (['(0)', 'codes_dir2'], {}), '(0, codes_dir2)\n', (574, 589), False, 'import sys, os\n'), ((440, 475), 'os.path.join', 'os.path.join', (['codes_dir', '"""datasets"""'], {}), "(codes_dir, 'datasets')\n", (452, 475), False, 'import sys, os\n'), ((1242, 1268), 'os.path.dirname', 'os.path.dirname', (['input_tif'], {}), '(input_tif)\n', (1257, 1268), False, 'import sys, os\n'), ((1285, 1312), 'os.path.basename', 'os.path.basename', (['input_tif'], {}), '(input_tif)\n', (1301, 1312), False, 'import sys, os\n'), ((1372, 1424), 'os.path.join', 'os.path.join', (['folder', "(name_noext + '_classified.tif')"], {}), "(folder, name_noext + '_classified.tif')\n", (1384, 1424), False, 'import sys, os\n'), ((4002, 4037), 'datasets.build_RS_data.read_patch', 'build_RS_data.read_patch', (['img_patch'], {}), '(img_patch)\n', (4026, 4037), True, 'import datasets.build_RS_data as build_RS_data\n'), ((4176, 4206), 'numpy.transpose', 'np.transpose', (['X_predit', '(1, 0)'], {}), '(X_predit, (1, 0))\n', (4188, 4206), True, 'import numpy as np\n'), ((5168, 5216), 'os.path.join', 'os.path.join', (['inf_output_dir', "(file_name + '.tif')"], {}), "(inf_output_dir, file_name + '.tif')\n", (5180, 5216), False, 'import sys, os\n'), ((20322, 20371), 'optparse.OptionParser', 'OptionParser', ([], {'usage': 'usage', 'version': '"""1.0 2019-1-4"""'}), "(usage=usage, version='1.0 2019-1-4')\n", (20334, 20371), False, 'from optparse import OptionParser\n'), ((21389, 21427), 'basic_src.basic.setlogfile', 'basic.setlogfile', (['"""planet_svm_log.txt"""'], {}), "('planet_svm_log.txt')\n", (21405, 21427), True, 'import basic_src.basic as basic\n'), ((1330, 1357), 'os.path.splitext', 'os.path.splitext', (['file_name'], {}), '(file_name)\n', (1346, 1357), False, 'import sys, os\n'), ((2110, 2135), 'rasterio.open', 'rasterio.open', (['image_path'], {}), '(image_path)\n', (2123, 2135), False, 'import rasterio\n'), ((2292, 2317), 'rasterio.open', 'rasterio.open', (['label_path'], {}), '(label_path)\n', (2305, 2317), False, 'import rasterio\n'), ((3063, 3088), 'rasterio.open', 'rasterio.open', (['image_path'], {}), '(image_path)\n', (3076, 3088), False, 'import rasterio\n'), ((3316, 3343), 'numpy.transpose', 'np.transpose', (['X_arr', '(1, 0)'], {}), '(X_arr, (1, 0))\n', (3328, 3343), True, 'import numpy as np\n'), ((4215, 4248), 'os.path.isfile', 'os.path.isfile', (['scaler_saved_path'], {}), '(scaler_saved_path)\n', (4229, 4248), False, 'import sys, os\n'), ((4286, 4316), 'sklearn.externals.joblib.load', 'joblib.load', (['scaler_saved_path'], {}), '(scaler_saved_path)\n', (4297, 4316), False, 'from sklearn.externals import joblib\n'), ((6131, 6205), 'basic_src.io_function.get_file_list_by_ext', 'io_function.get_file_list_by_ext', (['""".tif"""', 'subImg_folder'], {'bsub_folder': '(False)'}), "('.tif', subImg_folder, bsub_folder=False)\n", (6163, 6205), True, 'import basic_src.io_function as io_function\n'), ((6227, 6303), 'basic_src.io_function.get_file_list_by_ext', 'io_function.get_file_list_by_ext', (['""".tif"""', 'subLabel_folder'], {'bsub_folder': '(False)'}), "('.tif', subLabel_folder, bsub_folder=False)\n", (6259, 6303), True, 'import basic_src.io_function as io_function\n'), ((7123, 7149), 'numpy.concatenate', 'np.concatenate', (['Xs'], {'axis': '(1)'}), '(Xs, axis=1)\n', (7137, 7149), True, 'import numpy as np\n'), ((7169, 7195), 'numpy.concatenate', 'np.concatenate', (['ys'], {'axis': '(0)'}), '(ys, axis=0)\n', (7183, 7195), True, 'import numpy as np\n'), ((7215, 7245), 'numpy.transpose', 'np.transpose', (['X_pixels', '(1, 0)'], {}), '(X_pixels, (1, 0))\n', (7227, 7245), True, 'import numpy as np\n'), ((7975, 8076), 'datasets.build_RS_data.read_pixels_inside_polygons', 'build_RS_data.read_pixels_inside_polygons', (['img_path', 'shp_path'], {'mask_no_data': 'no_data', 'touch': 'touch'}), '(img_path, shp_path, mask_no_data=\n no_data, touch=touch)\n', (8016, 8076), True, 'import datasets.build_RS_data as build_RS_data\n'), ((8981, 9007), 'numpy.concatenate', 'np.concatenate', (['Xs'], {'axis': '(1)'}), '(Xs, axis=1)\n', (8995, 9007), True, 'import numpy as np\n'), ((9027, 9053), 'numpy.concatenate', 'np.concatenate', (['ys'], {'axis': '(0)'}), '(ys, axis=0)\n', (9041, 9053), True, 'import numpy as np\n'), ((9073, 9103), 'numpy.transpose', 'np.transpose', (['X_pixels', '(1, 0)'], {}), '(X_pixels, (1, 0))\n', (9085, 9103), True, 'import numpy as np\n'), ((9918, 9962), 'sklearn.externals.joblib.dump', 'joblib.dump', (['self._scaler', 'scaler_saved_path'], {}), '(self._scaler, scaler_saved_path)\n', (9929, 9962), False, 'from sklearn.externals import joblib\n'), ((13010, 13069), 'basic_src.basic.outputlogMessage', 'basic.outputlogMessage', (['"""Grid scores on development set:\n"""'], {}), "('Grid scores on development set:\\n')\n", (13032, 13069), True, 'import basic_src.basic as basic\n'), ((13520, 13554), 'sklearn.externals.joblib.dump', 'joblib.dump', (['clf', 'model_saved_path'], {}), '(clf, model_saved_path)\n', (13531, 13554), False, 'from sklearn.externals import joblib\n'), ((13930, 13959), 'sklearn.externals.joblib.load', 'joblib.load', (['model_saved_path'], {}), '(model_saved_path)\n', (13941, 13959), False, 'from sklearn.externals import joblib\n'), ((14421, 14443), 'os.path.dirname', 'os.path.dirname', (['input'], {}), '(input)\n', (14436, 14443), False, 'import sys, os\n'), ((14463, 14486), 'os.path.basename', 'os.path.basename', (['input'], {}), '(input)\n', (14479, 14486), False, 'import sys, os\n'), ((14651, 14760), 'datasets.build_RS_data.make_dataset', 'build_RS_data.make_dataset', (['img_folder', 'inf_list_txt', 'patch_w', 'patch_h', 'overlay_x', 'overlay_y'], {'train': '(False)'}), '(img_folder, inf_list_txt, patch_w, patch_h,\n overlay_x, overlay_y, train=False)\n', (14677, 14760), True, 'import datasets.build_RS_data as build_RS_data\n'), ((21372, 21383), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (21380, 21383), False, 'import sys, os\n'), ((1643, 1680), 'basic_src.io_function.is_file_exist', 'io_function.is_file_exist', (['image_path'], {}), '(image_path)\n', (1668, 1680), True, 'import basic_src.io_function as io_function\n'), ((1693, 1730), 'basic_src.io_function.is_file_exist', 'io_function.is_file_exist', (['label_path'], {}), '(label_path)\n', (1718, 1730), True, 'import basic_src.io_function as io_function\n'), ((4529, 4607), 'basic_src.basic.outputlogMessage', 'basic.outputlogMessage', (['"""warning, no pre-processing of data before prediction"""'], {}), "('warning, no pre-processing of data before prediction')\n", (4551, 4607), True, 'import basic_src.basic as basic\n'), ((8473, 8505), 'numpy.any', 'np.any', (['(X_arr != no_data)'], {'axis': '(0)'}), '(X_arr != no_data, axis=0)\n', (8479, 8505), True, 'import numpy as np\n'), ((9725, 9839), 'basic_src.basic.outputlogMessage', 'basic.outputlogMessage', (['"""warning, StandardScaler object already exist, this operation will overwrite it"""'], {}), "(\n 'warning, StandardScaler object already exist, this operation will overwrite it'\n )\n", (9747, 9839), True, 'import basic_src.basic as basic\n'), ((10409, 10418), 'sklearn.svm.SVC', 'svm.SVC', ([], {}), '()\n', (10416, 10418), False, 'from sklearn import svm\n'), ((10467, 10577), 'basic_src.basic.outputlogMessage', 'basic.outputlogMessage', (['"""warning, classifier already exist, this operation will replace the old one"""'], {}), "(\n 'warning, classifier already exist, this operation will replace the old one'\n )\n", (10489, 10577), True, 'import basic_src.basic as basic\n'), ((10599, 10608), 'sklearn.svm.SVC', 'svm.SVC', ([], {}), '()\n', (10606, 10608), False, 'from sklearn import svm\n'), ((10644, 10677), 'os.path.isfile', 'os.path.isfile', (['scaler_saved_path'], {}), '(scaler_saved_path)\n', (10658, 10677), False, 'import sys, os\n'), ((10731, 10761), 'sklearn.externals.joblib.load', 'joblib.load', (['scaler_saved_path'], {}), '(scaler_saved_path)\n', (10742, 10761), False, 'from sklearn.externals import joblib\n'), ((12742, 12751), 'sklearn.svm.SVC', 'svm.SVC', ([], {}), '()\n', (12749, 12751), False, 'from sklearn import svm\n'), ((13261, 13337), 'basic_src.basic.outputlogMessage', 'basic.outputlogMessage', (["('%0.3f (+/-%0.03f) for %r' % (mean, std * 2, params))"], {}), "('%0.3f (+/-%0.03f) for %r' % (mean, std * 2, params))\n", (13283, 13337), True, 'import basic_src.basic as basic\n'), ((13796, 13828), 'os.path.isfile', 'os.path.isfile', (['model_saved_path'], {}), '(model_saved_path)\n', (13810, 13828), False, 'import sys, os\n'), ((14954, 14993), 'os.system', 'os.system', (["('mkdir -p ' + inf_output_dir)"], {}), "('mkdir -p ' + inf_output_dir)\n", (14963, 14993), False, 'import sys, os\n'), ((15004, 15044), 'os.system', 'os.system', (["('rm ' + inf_output_dir + '/*')"], {}), "('rm ' + inf_output_dir + '/*')\n", (15013, 15044), False, 'import sys, os\n'), ((15285, 15312), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (15310, 15312), False, 'import multiprocessing\n'), ((15506, 15521), 'multiprocessing.Pool', 'Pool', (['num_cores'], {}), '(num_cores)\n', (15510, 15521), False, 'from multiprocessing import Pool\n'), ((18931, 18964), 'os.path.isfile', 'os.path.isfile', (['scaler_saved_path'], {}), '(scaler_saved_path)\n', (18945, 18964), False, 'import sys, os\n'), ((19151, 19238), 'basic_src.basic.outputlogMessage', 'basic.outputlogMessage', (['"""warning, scaled model already exist, skip pre-processing"""'], {}), "(\n 'warning, scaled model already exist, skip pre-processing')\n", (19173, 19238), True, 'import basic_src.basic as basic\n'), ((1832, 1860), 'os.path.basename', 'os.path.basename', (['image_path'], {}), '(image_path)\n', (1848, 1860), False, 'import sys, os\n'), ((1902, 1930), 'os.path.basename', 'os.path.basename', (['label_path'], {}), '(label_path)\n', (1918, 1930), False, 'import sys, os\n'), ((7661, 7696), 'basic_src.io_function.is_file_exist', 'io_function.is_file_exist', (['img_path'], {}), '(img_path)\n', (7686, 7696), True, 'import basic_src.io_function as io_function\n'), ((7709, 7744), 'basic_src.io_function.is_file_exist', 'io_function.is_file_exist', (['shp_path'], {}), '(shp_path)\n', (7734, 7744), True, 'import basic_src.io_function as io_function\n'), ((8720, 8837), 'basic_src.basic.outputlogMessage', 'basic.outputlogMessage', (["('Warning, No valid pixel in %d th polygon due to its small size' %\n valid_pixel_count)"], {}), "(\n 'Warning, No valid pixel in %d th polygon due to its small size' %\n valid_pixel_count)\n", (8742, 8837), True, 'import basic_src.basic as basic\n'), ((8873, 8896), 'numpy.ones', 'np.ones', (['X_arr.shape[1]'], {}), '(X_arr.shape[1])\n', (8880, 8896), True, 'import numpy as np\n'), ((11030, 11106), 'basic_src.basic.outputlogMessage', 'basic.outputlogMessage', (['"""warning, no pre-processing of data before training"""'], {}), "('warning, no pre-processing of data before training')\n", (11052, 11106), True, 'import basic_src.basic as basic\n'), ((19673, 19705), 'os.path.isfile', 'os.path.isfile', (['model_saved_path'], {}), '(model_saved_path)\n', (19687, 19705), False, 'import sys, os\n'), ((19797, 19874), 'basic_src.basic.outputlogMessage', 'basic.outputlogMessage', (['"""warning, trained model already exist, skip training"""'], {}), "('warning, trained model already exist, skip training')\n", (19819, 19874), True, 'import basic_src.basic as basic\n'), ((9661, 9691), 'sklearn.preprocessing.StandardScaler', 'preprocessing.StandardScaler', ([], {}), '()\n', (9689, 9691), False, 'from sklearn import preprocessing\n'), ((9857, 9887), 'sklearn.preprocessing.StandardScaler', 'preprocessing.StandardScaler', ([], {}), '()\n', (9885, 9887), False, 'from sklearn import preprocessing\n')] |
#!/usr/bin/env python3
from __future__ import division
from builtins import str
from builtins import range
from builtins import object
from past.utils import old_div
import isce
from isceobj.Scene.Frame import Frame
from isceobj.Planet.AstronomicalHandbook import Const
from isceobj.Planet.Planet import Planet
from Sentinel1_TOPS import Sentinel1_TOPS
import argparse
from lxml import objectify as OBJ
from FrameInfoExtractor import FrameInfoExtractor as FIE
import numpy as np
from osgeo import ogr, osr
import os, sys, re, requests, json, shutil, traceback, logging, hashlib, math
DATASETTYPE_RE = re.compile(r'-(raw|slc)-')
MISSION_RE = re.compile(r'S1(\w)')
def cmdLineParse():
'''
Command line parsing.
'''
parser = argparse.ArgumentParser(description='Extract metadata from S1 swath')
#parser.add_argument('-i','--input', dest='inxml', type=str, required=True,
#help='Swath XML file')a
parser.add_argument('-i','--input', dest='xml_file', type=str, nargs='+', help='Swath XML file')
parser.add_argument('-o', '--output', dest='outjson', type=str, required=True,
help = 'Ouput met.json')
return parser.parse_args()
def objectify(inxml):
'''
Return objectified XML.
'''
with open(inxml, 'r') as fid:
root = OBJ.parse(fid).getroot()
return root
def get_area(coords):
'''get area of enclosed coordinates- determines clockwise or counterclockwise order'''
n = len(coords) # of corners
area = 0.0
for i in range(n):
j = (i + 1) % n
area += coords[i][1] * coords[j][0]
area -= coords[j][1] * coords[i][0]
#area = abs(area) / 2.0
return old_div(area, 2)
def change_direction(coords):
cord_area= get_area(coords)
if not get_area(coords) > 0: #reverse order if not clockwise
print("update_met_json, reversing the coords")
coords = coords[::-1]
return coords
def getGeometry(obj):
'''
Get bbox and central coordinates.
'''
pts = []
glist = obj.geolocationGrid.geolocationGridPointList
for child in glist.getchildren():
pts.append( [float(child.line), float(child.pixel), float(child.latitude), float(child.longitude)])
ys = sorted(list(set([x[0] for x in pts])))
dy = ys[1] - ys[0]
ny= int(old_div((ys[-1] - ys[0]),dy) + 1)
xs = sorted(list(set([x[1] for x in pts])))
dx = xs[1] - xs[0]
nx = int(old_div((xs[-1] - xs[0]),dx) + 1)
lat = np.array([x[2] for x in pts]).reshape((ny,nx))
lon = np.array([x[3] for x in pts]).reshape((ny,nx))
bbox = [[lat[0,0],lon[0,0]], [lat[0,-1],lon[0,-1]],
[lat[-1,-1],lon[-1,-1]], [lat[-1,0], lon[-1,0]]]
center = { "coordinates": [lon[ny//2,nx//2], lat[ny//2, nx//2]],
"type" : "point"}
return center, bbox
class S1toFrame(object):
'''
Create a traditional ISCE Frame object from S1 container.
'''
def __init__(self, sar, obj):
self.sar = sar
self.obj = obj
self.missionId = self.obj.xpath('.//missionId/text()')[0]
self.missionId_char = MISSION_RE.search(self.missionId).group(1)
self.frame = Frame()
self.frame.configure()
self.parse()
def parse(self):
self._populatePlatform()
self._populateInstrument()
self._populateFrame()
self._populateOrbit()
self._populateExtras()
def _populatePlatform(self):
platform = self.frame.getInstrument().getPlatform()
platform.setMission(self.missionId)
platform.setPlanet(Planet(pname='Earth'))
platform.setPointingDirection(-1)
platform.setAntennaLength(40.0)
def _populateInstrument(self):
ins = self.frame.getInstrument()
b0 = self.sar.bursts[0]
b1 = self.sar.bursts[-1]
ins.setRadarWavelength(b0.radarWavelength)
ins.setPulseRepetitionFrequency(1.0/b0.azimuthTimeInterval)
ins.setRangePixelSize(b0.rangePixelSize)
tau = self.obj.generalAnnotation.replicaInformationList.replicaInformation.referenceReplica.timeDelay
ins.setPulseLength(float(tau))
slope = str(self.obj.generalAnnotation.replicaInformationList.replicaInformation.referenceReplica.phaseCoefficients).split()[2]
ins.setChirpSlope(float(slope))
fsamp = old_div(Const.c, (2.0 * b0.rangePixelSize))
ins.setRangeSamplingRate(fsamp)
ins.setInPhaseValue(127.5)
ins.setQuadratureValue(127.5)
ins.setBeamNumber(self.obj.adsHeader.swath)
def _populateFrame(self):
frame = self.frame
b0 = self.sar.bursts[0]
b1 = self.sar.bursts[-1]
hdg = self.obj.generalAnnotation.productInformation.platformHeading
if hdg < -90:
frame.setPassDirection('Descending')
else:
frame.setPassDirection('Ascending')
frame.setStartingRange(b0.startingRange)
frame.setOrbitNumber(int(self.obj.adsHeader.absoluteOrbitNumber))
frame.setProcessingFacility('Sentinel 1%s' % self.missionId_char)
frame.setProcessingSoftwareVersion('IPF')
frame.setPolarization(self.obj.adsHeader.polarisation)
frame.setNumberOfSamples(int(self.obj.imageAnnotation.imageInformation.numberOfSamples))
frame.setNumberOfLines(int(self.obj.imageAnnotation.imageInformation.numberOfLines))
frame.setSensingStart(b0.sensingStart)
frame.setSensingStop(b1.sensingStop)
tmid = b0.sensingStart + 0.5 * (b1.sensingStop - b0.sensingStart)
frame.setSensingMid(tmid)
farRange = b0.startingRange + frame.getNumberOfSamples() * b0.rangePixelSize
frame.setFarRange(farRange)
def _populateOrbit(self):
b0 = self.sar.bursts[0]
self.frame.orbit = b0.orbit
def _populateExtras(self):
b0 = self.sar.bursts[0]
self.frame._squintAngle = 0.0
self.frame.doppler = b0.doppler._coeffs[0]
match = DATASETTYPE_RE.search(self.sar.xml)
if match: self.frame.datasetType = 'slc'
else: self.frame.datasetType = ''
def get_loc(frameInfo, bbox_type):
"""Return GeoJSON bbox."""
bbox = np.array(frameInfo.getBBox()).astype(np.float)
print("get_loc bbox: %s" %bbox)
if bbox_type == "refbbox":
bbox = np.array(frameInfo.getReferenceBBox()).astype(np.float)
coords = [
[ bbox[0,1], bbox[0,0] ],
[ bbox[1,1], bbox[1,0] ],
[ bbox[2,1], bbox[2,0] ],
[ bbox[3,1], bbox[3,0] ],
[ bbox[0,1], bbox[0,0] ],
]
print("get_loc coords : [%s]" %coords)
return {
"type": "Polygon",
"coordinates": [coords]
}
def set_value(param, value):
try:
param = value
print("set value of %s is %s" %(param, value))
except Exception as e:
print(traceback.format_exc())
def get_union_geom(frame_infoes, bbox_type):
geom_union = None
for frameInfo in frame_infoes:
loc = get_loc(frameInfo, bbox_type)
print("get_union_geom loc : %s" %loc)
geom = ogr.CreateGeometryFromJson(json.dumps(loc))
print("get_union_geom : geom : %s" %get_union_geom)
if geom_union is None:
geom_union = geom
else:
geom_union = geom_union.Union(geom)
print("union geom : %s " %geom_union)
print("final geom_union : %s" %geom_union)
print("extract data geom_union type : %s" %type(geom_union))
return geom_union
def get_env_box(env):
#print("get_env_box env :%s" %env)
bbox = [
[ env[3], env[0] ],
[ env[3], env[1] ],
[ env[2], env[1] ],
[ env[2], env[0] ],
]
print("get_env_box box : %s" %bbox)
return bbox
def create_stitched_met_json( frame_infoes, met_json_file):
"""Create HySDS met json file."""
# build met
geom_union = get_union_geom(frame_infoes, "bbox")
print("create_stitched_met_json : bbox geom_union : %s" %geom_union)
bbox = json.loads(geom_union.ExportToJson())["coordinates"][0]
print("create_stitched_met_json : bbox : %s" %bbox)
bbox = get_env_box(geom_union.GetEnvelope())
bbox = change_direction(bbox)
print("create_stitched_met_json :Final bbox : %s" %bbox)
geom_union = get_union_geom(frame_infoes, "refbbox")
print("create_stitched_met_json : refbbox geom_union : %s" %geom_union)
refbbox = json.loads(geom_union.ExportToJson())["coordinates"][0]
print("create_stitched_met_json : refbbox : %s" %refbbox)
refbbox = get_env_box(geom_union.GetEnvelope())
refbbox = change_direction(refbbox)
print("create_stitched_met_json :Final refbbox : %s" %refbbox)
#refbbox = json.loads(get_union_geom(frame_infoes, "refbbox").ExportToJson())["coordinates"][0]
#print("create_stitched_met_json : refbbox : %s" %refbbox)
met = {
'product_type': 'interferogram',
#'master_scenes': [],
'refbbox': refbbox,
#'esd_threshold': [],
'frameID': [],
#'temporal_span': [],
#'swath': [1, 2, 3],
'trackNumber': [],
#'archive_filename': id,
'dataset_type': 'slc',
'tile_layers': [],
#'latitudeIndexMin': int(math.floor(env[2] * 10)),
#'latitudeIndexMax': int(math.ceil(env[3] * 10)),
'latitudeIndexMin': [],
'latitudeIndexMax': [],
#'parallelBaseline': [],
'url': [],
'doppler': [],
#'version': [],
#'slave_scenes': [],
#'orbit_type': [],
#'spacecraftName': [],
'frameNumber': None,
#'reference': None,
'bbox': bbox,
'ogr_bbox': [],
'orbitNumber': [],
#'inputFile': 'sentinel.ini',
#'perpendicularBaseline': [],
'orbitRepeat': [],
'sensingStop': [],
#'polarization': [],
#'scene_count': 0,
'beamID': None,
'sensor': [],
'lookDirection': [],
'platform': [],
'startingRange': [],
'frameName': [],
#'tiles': True,
'sensingStart': [],
#'beamMode': [],
#'imageCorners': [],
'direction': [],
'prf': [],
#'range_looks': [],
#'dem_type': None,
#'filter_strength': [],
#'azimuth_looks': [],
"sha224sum": hashlib.sha224(str.encode(os.path.basename(met_json_file))).hexdigest()
}
# collect values
set_params=('tile_layers',
'latitudeIndexMin', 'url', 'prf', 'doppler', 'platform', 'orbitNumber',
'latitudeIndexMax', 'sensingStop', 'startingRange', 'sensingStart'
#'master_scenes', 'temporal_span', 'swath'
)
single_params = ('frameID', 'sensor', 'beamID', 'frameNumber', 'trackNumber',
'dataset_type', 'archive_filename',
'direction', 'orbitRepeat', 'lookDirection','frameName', 'product_type'
#,'esd_threshold'
)
list_params=( 'tile_layers', 'latitudeIndexMin', 'url', 'prf', 'doppler', 'platform', 'orbitNumber',
'latitudeIndexMax', 'sensingStop', 'startingRange', 'sensingStart'
#'master_scenes', temporal_span' , 'swath'
)
mean_params = ( 'prf', 'doppler')
min_params = ('latitudeIndexMin', 'startingRange', 'sensingStart' )
max_params = ('latitudeIndexMax', 'sensingStop')
for i, frame_info in enumerate(frame_infoes):
md = frame_info.toDict()
for param in set_params:
if param not in md:
continue
print(" set param: {}".format(param))
if isinstance(md[param], list):
met[param].extend(md[param])
else:
met[param].append(md[param])
if i == 0:
for param in single_params:
if param in md:
met[param] = md[param]
##met['scene_count'] += 1
for param in set_params:
print("param: {}".format(param))
tmp_met = list(set(met[param]))
if param in list_params:
met[param] = tmp_met
else:
met[param] = tmp_met[0] if len(tmp_met) == 1 else tmp_met
for param in mean_params:
print("mean param: %s type : %s " %(param, type(param)))
met[param] = np.mean(met[param])
for param in min_params:
print("min param: %s type : %s " %(param, type(param)))
if met[param] is None:
print("Missing Min Param : %s" %param)
else:
print(met[param])
met[param] = min(met[param])
for param in max_params:
print("max param: %s type : %s " %(param, type(param)))
if met[param] is None:
print("Missing Max Param : %s" %param)
else:
print(met[param])
met[param] = max(met[param])
#met['imageCorners'] = get_image_corners(met['imageCorners'])
try:
print(bbox)
print(type(bbox))
met['ogr_bbox'] = [[x, y] for y, x in bbox]
except Exception as e:
print(traceback.format_exc())
# write out dataset json
with open(met_json_file, 'w') as f:
json.dump(met, f, indent=2)
if __name__ == '__main__':
'''
Main driver.
'''
#Parse command line
inps = cmdLineParse()
#Read in metadata
xml_files=inps.xml_file
frame_infos=[]
i=0
for inxml in xml_files:
i=i+1
sar = Sentinel1_TOPS()
met_file= "test_met%s.json"%i
sar.xml = inxml
print("Extract Metadata : Processing %s" %inxml)
sar.parse()
obj = objectify(inxml)
####Copy into ISCE Frame
frame = S1toFrame(sar,obj)
####Frameinfoextractor
fie = FIE()
frameInfo = fie.extractInfoFromFrame(frame.frame)
print("printing FramInfo :\n")
print(frameInfo)
frame_infos.append(frameInfo)
frameInfo.dump(met_file)
create_stitched_met_json( frame_infos, inps.outjson)
| [
"numpy.mean",
"traceback.format_exc",
"FrameInfoExtractor.FrameInfoExtractor",
"argparse.ArgumentParser",
"re.compile",
"json.dumps",
"builtins.str",
"past.utils.old_div",
"Sentinel1_TOPS.Sentinel1_TOPS",
"numpy.array",
"builtins.range",
"isceobj.Scene.Frame.Frame",
"lxml.objectify.parse",
... | [((603, 628), 're.compile', 're.compile', (['"""-(raw|slc)-"""'], {}), "('-(raw|slc)-')\n", (613, 628), False, 'import os, sys, re, requests, json, shutil, traceback, logging, hashlib, math\n'), ((644, 665), 're.compile', 're.compile', (['"""S1(\\\\w)"""'], {}), "('S1(\\\\w)')\n", (654, 665), False, 'import os, sys, re, requests, json, shutil, traceback, logging, hashlib, math\n'), ((744, 813), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Extract metadata from S1 swath"""'}), "(description='Extract metadata from S1 swath')\n", (767, 813), False, 'import argparse\n'), ((1515, 1523), 'builtins.range', 'range', (['n'], {}), '(n)\n', (1520, 1523), False, 'from builtins import range\n'), ((1676, 1692), 'past.utils.old_div', 'old_div', (['area', '(2)'], {}), '(area, 2)\n', (1683, 1692), False, 'from past.utils import old_div\n'), ((3162, 3169), 'isceobj.Scene.Frame.Frame', 'Frame', ([], {}), '()\n', (3167, 3169), False, 'from isceobj.Scene.Frame import Frame\n'), ((4344, 4385), 'past.utils.old_div', 'old_div', (['Const.c', '(2.0 * b0.rangePixelSize)'], {}), '(Const.c, 2.0 * b0.rangePixelSize)\n', (4351, 4385), False, 'from past.utils import old_div\n'), ((12343, 12362), 'numpy.mean', 'np.mean', (['met[param]'], {}), '(met[param])\n', (12350, 12362), True, 'import numpy as np\n'), ((13206, 13233), 'json.dump', 'json.dump', (['met', 'f'], {'indent': '(2)'}), '(met, f, indent=2)\n', (13215, 13233), False, 'import os, sys, re, requests, json, shutil, traceback, logging, hashlib, math\n'), ((13485, 13501), 'Sentinel1_TOPS.Sentinel1_TOPS', 'Sentinel1_TOPS', ([], {}), '()\n', (13499, 13501), False, 'from Sentinel1_TOPS import Sentinel1_TOPS\n'), ((13791, 13796), 'FrameInfoExtractor.FrameInfoExtractor', 'FIE', ([], {}), '()\n', (13794, 13796), True, 'from FrameInfoExtractor import FrameInfoExtractor as FIE\n'), ((2302, 2329), 'past.utils.old_div', 'old_div', (['(ys[-1] - ys[0])', 'dy'], {}), '(ys[-1] - ys[0], dy)\n', (2309, 2329), False, 'from past.utils import old_div\n'), ((2421, 2448), 'past.utils.old_div', 'old_div', (['(xs[-1] - xs[0])', 'dx'], {}), '(xs[-1] - xs[0], dx)\n', (2428, 2448), False, 'from past.utils import old_div\n'), ((2466, 2495), 'numpy.array', 'np.array', (['[x[2] for x in pts]'], {}), '([x[2] for x in pts])\n', (2474, 2495), True, 'import numpy as np\n'), ((2523, 2552), 'numpy.array', 'np.array', (['[x[3] for x in pts]'], {}), '([x[3] for x in pts])\n', (2531, 2552), True, 'import numpy as np\n'), ((3569, 3590), 'isceobj.Planet.Planet.Planet', 'Planet', ([], {'pname': '"""Earth"""'}), "(pname='Earth')\n", (3575, 3590), False, 'from isceobj.Planet.Planet import Planet\n'), ((7121, 7136), 'json.dumps', 'json.dumps', (['loc'], {}), '(loc)\n', (7131, 7136), False, 'import os, sys, re, requests, json, shutil, traceback, logging, hashlib, math\n'), ((1299, 1313), 'lxml.objectify.parse', 'OBJ.parse', (['fid'], {}), '(fid)\n', (1308, 1313), True, 'from lxml import objectify as OBJ\n'), ((6860, 6882), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (6880, 6882), False, 'import os, sys, re, requests, json, shutil, traceback, logging, hashlib, math\n'), ((13103, 13125), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (13123, 13125), False, 'import os, sys, re, requests, json, shutil, traceback, logging, hashlib, math\n'), ((4159, 4272), 'builtins.str', 'str', (['self.obj.generalAnnotation.replicaInformationList.replicaInformation.referenceReplica.phaseCoefficients'], {}), '(self.obj.generalAnnotation.replicaInformationList.replicaInformation.\n referenceReplica.phaseCoefficients)\n', (4162, 4272), False, 'from builtins import str\n'), ((10336, 10367), 'os.path.basename', 'os.path.basename', (['met_json_file'], {}), '(met_json_file)\n', (10352, 10367), False, 'import os, sys, re, requests, json, shutil, traceback, logging, hashlib, math\n')] |
import numpy as np
from numba import njit, b1, i1, int64, float64
@njit(b1(i1[:, :], i1, i1))
def was_winning_move(board, row, col):
if col == -1:
return False
player = board[row, col]
player_pieces = board == player
win_len = 4
row_win = player_pieces[row, :]
for i in range(row_win.size - win_len + 1):
if row_win[i: i + win_len].all():
return True
diag_win1 = np.diag(player_pieces, col - row)
for i in range(diag_win1.size - win_len + 1):
if diag_win1[i: i + win_len].all():
return True
new_col = 6 - col
diag_win2 = np.diag(player_pieces[:, ::-1], new_col - row)
for i in range(diag_win2.size - win_len + 1):
if diag_win2[i: i + win_len].all():
return True
if row < 3:
col_win = player_pieces[row:, col]
for i in range(col_win.size - win_len + 1):
if col_win[i: i + win_len].all():
return True
return False
"""
@njit(i1[:, :](i1[:, :], i1, i1, i1))
def add_stone(board, row, col, player):
# available_idx = np.argmin(board[:, column] == 0) - 1
#new_board = board.copy()
board[row][col] = player
return board
@njit(b1[:](i1[:, :]))
def valid_moves(board):
return board[0] == 0
@njit(i1(i1[:, :], i1))
def playable_row(board, col):
return np.where(board[:, col] == 0)[0][-1]
"""
@njit(float64(i1[:, :], int64, int64, int64, float64, float64, int64, float64[:, :]))
def minimax(board, move_row, move_col, depth, alpha, beta, player, result):
if was_winning_move(board, move_row, move_col):
return -player
moves = np.where(board[0] == 0)[0] # we get columns (moves) we can play
if depth == 0 or moves.size == 0:
return 0
if player == 1:
best_val = -np.inf
else:
best_val = np.inf
for i in range(moves.size):
col = moves[i]
row = np.where(board[:, col] == 0)[0][-1] # np.argmin(board[:, col] == 0) - 1
board[row][col] = player # we play the move
# child_node = Node(new_board, row, col)
value = minimax(board, row, col, depth - 1, alpha, beta, -player, result)
board[row][col] = 0 # undo the move this way is faster than just copying board
if move_row == -1 and move_col == -1: # if we are in root "node" we want best child and the move we played
result[col, 0] = 1 # we played this move
result[col, 1] = value # value we obtained
# move/col is an index of result
if player == 1:
best_val = max(best_val, value)
alpha = max(alpha, best_val)
else:
best_val = min(best_val, value)
alpha = min(alpha, best_val)
if beta <= alpha:
break
return best_val
# @njit(int64(i1[:, :], int64))
def best_move_alpha_beta(board, depth):
board = board.astype(np.int8)
# root = Node(board, -1, -1)
result = np.zeros((7, 2))
v = minimax(board, -1, -1, depth, -np.inf, np.inf, 1, result) # board is in canonical form
moves = []
for move in range(7):
searched, value = result[move, :]
if searched and value == v:
moves.append(move)
return np.random.choice(moves)
if __name__ == "__main__":
import time
b = np.zeros((6, 7))
b[5][3] = 1
b[5][2] = 1
# b[5][1] = 1
b[4][3] = -1
b = np.array([
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, -1, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0]])
b = b.astype(np.int8)
b = b * -1
# b[4][2] = -1
# b[4][1] = -1
# print(was_winning_move(b, 2,1))
print(b)
t = time.time()
print(best_move_alpha_beta(b, 9))
print(time.time() - t)
| [
"numba.b1",
"numpy.random.choice",
"numpy.where",
"numpy.diag",
"numpy.array",
"numpy.zeros",
"numba.float64",
"time.time"
] | [((423, 456), 'numpy.diag', 'np.diag', (['player_pieces', '(col - row)'], {}), '(player_pieces, col - row)\n', (430, 456), True, 'import numpy as np\n'), ((613, 659), 'numpy.diag', 'np.diag', (['player_pieces[:, ::-1]', '(new_col - row)'], {}), '(player_pieces[:, ::-1], new_col - row)\n', (620, 659), True, 'import numpy as np\n'), ((74, 94), 'numba.b1', 'b1', (['i1[:, :]', 'i1', 'i1'], {}), '(i1[:, :], i1, i1)\n', (76, 94), False, 'from numba import njit, b1, i1, int64, float64\n'), ((1390, 1468), 'numba.float64', 'float64', (['i1[:, :]', 'int64', 'int64', 'int64', 'float64', 'float64', 'int64', 'float64[:, :]'], {}), '(i1[:, :], int64, int64, int64, float64, float64, int64, float64[:, :])\n', (1397, 1468), False, 'from numba import njit, b1, i1, int64, float64\n'), ((2952, 2968), 'numpy.zeros', 'np.zeros', (['(7, 2)'], {}), '((7, 2))\n', (2960, 2968), True, 'import numpy as np\n'), ((3228, 3251), 'numpy.random.choice', 'np.random.choice', (['moves'], {}), '(moves)\n', (3244, 3251), True, 'import numpy as np\n'), ((3306, 3322), 'numpy.zeros', 'np.zeros', (['(6, 7)'], {}), '((6, 7))\n', (3314, 3322), True, 'import numpy as np\n'), ((3399, 3553), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0,\n 0, 0, 0, 0, 0], [0, 0, -1, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0]]'], {}), '([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, \n 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, -1, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0]])\n', (3407, 3553), True, 'import numpy as np\n'), ((3738, 3749), 'time.time', 'time.time', ([], {}), '()\n', (3747, 3749), False, 'import time\n'), ((1634, 1657), 'numpy.where', 'np.where', (['(board[0] == 0)'], {}), '(board[0] == 0)\n', (1642, 1657), True, 'import numpy as np\n'), ((3798, 3809), 'time.time', 'time.time', ([], {}), '()\n', (3807, 3809), False, 'import time\n'), ((1908, 1936), 'numpy.where', 'np.where', (['(board[:, col] == 0)'], {}), '(board[:, col] == 0)\n', (1916, 1936), True, 'import numpy as np\n')] |
import argparse
import matplotlib.pyplot as plt
import numpy as np
import torch
from sklearn.manifold import TSNE
from src.data.make_dataset import CorruptMnist
from src.models.model import MyAwesomeModel
def tsne_embedding_plot() -> None:
parser = argparse.ArgumentParser(description="Training arguments")
parser.add_argument("model_checkpoint", type=str)
args = parser.parse_args()
print(args)
train_set = CorruptMnist(train=True, in_folder="data/raw", out_folder="data/processed")
dataloader = torch.utils.data.DataLoader(train_set, batch_size=128)
device = "cuda" if torch.cuda.is_available() else "cpu"
model = MyAwesomeModel()
model.load_state_dict(torch.load(args.model_checkpoint))
model = model.to(device)
print("Extract embeddings")
embeddings, labels = [], []
with torch.no_grad():
for batch in dataloader:
x, y = batch
# Extract features from the backbone
emb = model.backbone(x.to(device)).reshape(x.shape[0], -1)
embeddings.append(emb)
labels.append(y)
embeddings = torch.cat(embeddings, dim=0).cpu().numpy()
labels = torch.cat(labels, dim=0).numpy()
print("Running tsne")
tsne = TSNE(n_components=2)
embeddings_2d = tsne.fit_transform(embeddings)
for i in np.unique(labels):
plt.scatter(embeddings_2d[labels == i, 0], embeddings_2d[labels == i, 1], label=str(i))
plt.legend()
plt.savefig(f"reports/figures/2d_tsne_embedding.png")
if __name__ == "__main__":
tsne_embedding_plot()
| [
"matplotlib.pyplot.savefig",
"numpy.unique",
"src.models.model.MyAwesomeModel",
"argparse.ArgumentParser",
"torch.load",
"sklearn.manifold.TSNE",
"torch.cat",
"torch.cuda.is_available",
"src.data.make_dataset.CorruptMnist",
"torch.utils.data.DataLoader",
"torch.no_grad",
"matplotlib.pyplot.leg... | [((257, 314), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Training arguments"""'}), "(description='Training arguments')\n", (280, 314), False, 'import argparse\n'), ((433, 508), 'src.data.make_dataset.CorruptMnist', 'CorruptMnist', ([], {'train': '(True)', 'in_folder': '"""data/raw"""', 'out_folder': '"""data/processed"""'}), "(train=True, in_folder='data/raw', out_folder='data/processed')\n", (445, 508), False, 'from src.data.make_dataset import CorruptMnist\n'), ((526, 580), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_set'], {'batch_size': '(128)'}), '(train_set, batch_size=128)\n', (553, 580), False, 'import torch\n'), ((654, 670), 'src.models.model.MyAwesomeModel', 'MyAwesomeModel', ([], {}), '()\n', (668, 670), False, 'from src.models.model import MyAwesomeModel\n'), ((1239, 1259), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)'}), '(n_components=2)\n', (1243, 1259), False, 'from sklearn.manifold import TSNE\n'), ((1325, 1342), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (1334, 1342), True, 'import numpy as np\n'), ((1444, 1456), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1454, 1456), True, 'import matplotlib.pyplot as plt\n'), ((1461, 1514), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""reports/figures/2d_tsne_embedding.png"""'], {}), "(f'reports/figures/2d_tsne_embedding.png')\n", (1472, 1514), True, 'import matplotlib.pyplot as plt\n'), ((604, 629), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (627, 629), False, 'import torch\n'), ((697, 730), 'torch.load', 'torch.load', (['args.model_checkpoint'], {}), '(args.model_checkpoint)\n', (707, 730), False, 'import torch\n'), ((835, 850), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (848, 850), False, 'import torch\n'), ((1168, 1192), 'torch.cat', 'torch.cat', (['labels'], {'dim': '(0)'}), '(labels, dim=0)\n', (1177, 1192), False, 'import torch\n'), ((1112, 1140), 'torch.cat', 'torch.cat', (['embeddings'], {'dim': '(0)'}), '(embeddings, dim=0)\n', (1121, 1140), False, 'import torch\n')] |
import numpy as np
import tensorflow as tf
from .sac import SAC, td_target
from softlearning.misc.utils import mixup
from softlearning.models.utils import flatten_input_structure
class SACClassifierMultiGoal(SAC):
def __init__(
self,
classifiers,
goal_example_pools,
goal_example_validation_pools,
classifier_lr=1e-4,
classifier_batch_size=128,
reward_type='logits',
n_classifier_train_steps=int(1e4),
classifier_optim_name='adam',
mixup_alpha=0.2,
goal_conditioned=False,
**kwargs,
):
self._classifiers = classifiers
self._goal_example_pools = goal_example_pools
self._goal_example_validation_pools = goal_example_validation_pools
assert classifiers and len(classifiers) == len(goal_example_pools) \
and len(classifiers) == len(goal_example_validation_pools), \
'Number of goal classifiers must match the number of goal pools'
self._num_goals = len(classifiers)
self._classifier_lr = classifier_lr
self._reward_type = reward_type
self._n_classifier_train_steps = n_classifier_train_steps
self._classifier_optim_name = classifier_optim_name
self._classifier_batch_size = classifier_batch_size
self._mixup_alpha = mixup_alpha
self._goal_conditioned = goal_conditioned
super(SACClassifierMultiGoal, self).__init__(**kwargs)
def _build(self):
super(SACClassifierMultiGoal, self)._build()
self._init_classifier_update()
def _init_placeholders(self):
super(SACClassifierMultiGoal, self)._init_placeholders()
self._placeholders['labels'] = tf.placeholder(
tf.float32,
shape=(None, 1),
name='labels',
)
def _get_classifier_training_ops(self):
if self._classifier_optim_name == 'adam':
opt_func = tf.train.AdamOptimizer
elif self._classifier_optim_name == 'sgd':
opt_func = tf.train.GradientDescentOptimizer
else:
raise NotImplementedError
self._classifier_optimizers = [
opt_func(
learning_rate=self._classifier_lr,
name='classifier_optimizer_' + str(goal)
)
for goal in range(self._num_goals)
]
classifier_training_ops = [
tf.contrib.layers.optimize_loss(
classifier_loss_t,
self.global_step,
learning_rate=self._classifier_lr,
optimizer=classifier_optimizer,
variables=classifier.trainable_variables,
increment_global_step=False,
)
for classifier_loss_t, classifier_optimizer, classifier
in zip(self._classifier_losses_t,
self._classifier_optimizers,
self._classifiers)
]
return classifier_training_ops
def _init_classifier_update(self):
classifier_inputs = flatten_input_structure({
name: self._placeholders['observations'][name]
for name in self._classifiers[0].observation_keys
})
goal_logits = [classifier(classifier_inputs)
for classifier in self._classifiers]
self._classifier_losses_t = [
tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=logits, labels=self._placeholders['labels']))
for logits in goal_logits
]
self._classifier_training_ops = self._get_classifier_training_ops()
def _init_external_reward(self):
classifier_inputs = flatten_input_structure({
name: self._placeholders['observations'][name]
for name in self._classifiers[0].observation_keys
})
observation_logits_per_classifier = [
classifier(classifier_inputs) for classifier in self._classifiers]
# DEBUG
# self._observation_logits_per_classifier = observation_logits_per_classifier
goal_indices = self._placeholders['observations']['goal_index']
goal_index_masks = [
tf.equal(goal_indices, goal)
for goal in range(self._num_goals)
]
# DEBUG
# self._goal_index_masks = goal_index_masks
# Replace the correct classification logits for the repsective goals
observation_logits = observation_logits_per_classifier[0]
for goal in range(1, self._num_goals):
observation_logits = tf.where(
goal_index_masks[goal],
x=observation_logits_per_classifier[goal],
y=observation_logits
)
self._ext_reward = self._reward_t = observation_logits
def _get_classifier_feed_dicts(self):
# Sample N x the normal amount of observations, where N is
# the number of goals.
negatives = self.sampler.random_batch(
self._num_goals * self._classifier_batch_size)['observations']
# Split up the sample observations based on the goal index.
# TODO: Make it split based on the goal qpos
negative_inds = [
(negatives['goal_index'] == goal).flatten()
for goal in range(self._num_goals)
]
negatives_per_goal = [
{
key: values[negative_ind]
for key, values in negatives.items()
}
for negative_ind in negative_inds
]
# Get positives from different goal pools
goal_example_pool_sizes = [
goal_example_pool[next(iter(goal_example_pool.keys()))].shape[0]
for goal_example_pool in self._goal_example_pools
]
rand_positive_indices = [
np.random.randint(
goal_example_pool_size,
size=self._classifier_batch_size)
for goal_example_pool_size in goal_example_pool_sizes
]
positives_per_goal = [
{
key: values[rand_positive_ind]
for key, values in goal_examples.items()
}
for rand_positive_ind, goal_examples
in zip(rand_positive_indices, self._goal_example_pools)
]
labels_batches = []
for goal in range(self._num_goals):
n_negatives = np.sum(negative_inds[goal].astype(int))
n_positives = self._classifier_batch_size
labels_batch = np.concatenate([
np.zeros((n_negatives, 1)),
np.ones((n_positives, 1)),
])
labels_batches.append(labels_batch)
# labels_batch = np.zeros((2 * self._classifier_batch_size, 1))
# labels_batch[self._classifier_batch_size:] = 1.0
# labels_batches = [labels_batch.copy() for _ in range(self._num_goals)]
observation_batches = [
{
key: np.concatenate((_negatives[key], _positives[key]), axis=0)
for key in self._classifiers[0].observation_keys
}
for _negatives, _positives in zip(negatives_per_goal, positives_per_goal)
]
if self._mixup_alpha > 0:
for goal_index in range(self._num_goals):
observation_batches[goal_index], labels_batches[goal_index] = mixup(
observation_batches[goal_index], labels_batches[goal_index], alpha=self._mixup_alpha)
feed_dicts = [
{
**{
self._placeholders['observations'][key]:
observations_batch[key]
for key in self._classifiers[0].observation_keys
},
self._placeholders['labels']: labels_batch
}
for observations_batch, labels_batch in zip(observation_batches, labels_batches)
]
return feed_dicts
def _epoch_after_hook(self, *args, **kwargs):
if self._epoch == 0:
for i in range(self._n_classifier_train_steps):
feed_dicts = self._get_classifier_feed_dicts()
self._train_classifier_step(feed_dicts)
def _train_classifier_step(self, feed_dicts):
losses = []
for feed_dict, classifier_training_op, classifier_loss_t \
in zip(feed_dicts,
self._classifier_training_ops,
self._classifier_losses_t):
_, loss = self._session.run((
classifier_training_op, classifier_loss_t
), feed_dict)
losses.append(loss)
self._training_losses = losses
return losses
def get_diagnostics(self,
iteration,
batch,
training_paths,
evaluation_paths):
diagnostics = super(SACClassifierMultiGoal, self).get_diagnostics(
iteration, batch, training_paths, evaluation_paths)
sample_obs = batch['observations']
n_sample_obs = sample_obs[next(iter(sample_obs))].shape[0]
goal_indices = [
np.random.randint(
goal_examples[next(iter(goal_examples))].shape[0],
size=n_sample_obs)
for goal_examples in self._goal_example_pools
]
goal_observations_per_goal = [
{
key: goal_examples[key][goal_index]
for key in goal_examples.keys()
}
for goal_examples, goal_index in zip(self._goal_example_pools, goal_indices)
]
goal_indices_validation = [
np.random.randint(
goal_examples_validation[next(iter(goal_examples_validation))].shape[0],
size=n_sample_obs)
for goal_examples_validation in self._goal_example_validation_pools
]
goal_observations_validation_per_goal = [
{
key: goal_examples_validation[key][goal_index]
for key in goal_examples_validation.keys()
}
for goal_examples_validation, goal_index in
zip(self._goal_example_validation_pools, goal_indices_validation)
]
reward_sample, reward_goal, reward_goal_validation, losses = [], [], [], []
for goal_index in range(self._num_goals):
goal_obs = goal_observations_per_goal[goal_index]
n_goal_obs = goal_obs[next(iter(goal_obs))].shape[0]
goal_obs_validation = goal_observations_validation_per_goal[goal_index]
n_goal_obs_validation = goal_obs_validation[next(iter(goal_obs_validation))].shape[0]
# DEBUG
# observation_logits_0, observation_logits_1 = self._observation_logits_per_classifier
# goal_index_mask_0, goal_index_mask_1 = self._goal_index_masks
try:
obs_feed_dict = {
self._placeholders['observations'][key]: np.concatenate((
sample_obs[key],
goal_obs[key],
goal_obs_validation[key]
), axis=0)
for key in self._policy.observation_keys
}
except:
obs_feed_dict = {
self._placeholders['observations'][key]: np.concatenate((
sample_obs[key],
goal_obs[key],
goal_obs_validation[key]
), axis=0)
for key in self._classifiers[goal_index].observation_keys
}
reward_sample_goal_observations, classifier_loss = self._session.run(
(self._reward_t, self._classifier_losses_t[goal_index]),
feed_dict={
**obs_feed_dict,
# **{
# self._placeholders['observations'][key]: np.concatenate((
# sample_obs[key],
# goal_obs[key],
# goal_obs_validation[key]
# ), axis=0)
# for key in self._policy.observation_keys
# # for key in self._classifiers[goal_index].observation_keys
# },
self._placeholders['labels']: np.concatenate([
np.zeros((n_sample_obs, 1)),
np.ones((n_goal_obs, 1)),
np.ones((n_goal_obs_validation, 1)),
])
}
)
(reward_sample_observations,
reward_goal_observations,
reward_goal_observations_validation) = np.split(
reward_sample_goal_observations,
(n_sample_obs, n_sample_obs + n_goal_obs),
axis=0)
reward_sample.append(reward_sample_observations)
reward_goal.append(reward_goal_observations)
reward_goal_validation.append(reward_goal_observations_validation)
losses.append(classifier_loss)
# Add losses/classifier outputs to the dictionary
diagnostics.update({
# 'reward_learning/classifier_training_loss_' + str(goal): losses[goal]
'reward_learning/classifier_training_loss_' + str(goal):
self._training_losses[goal]
for goal in range(self._num_goals)
})
diagnostics.update({
'reward_learning/reward_sample_obs_mean_' + str(goal):
np.mean(reward_sample[goal])
for goal in range(self._num_goals)
})
diagnostics.update({
'reward_learning/reward_goal_obs_mean_' + str(goal):
np.mean(reward_goal[goal])
for goal in range(self._num_goals)
})
diagnostics.update({
'reward_learning/reward_goal_obs_validation_mean_' + str(goal):
np.mean(reward_goal_validation[goal])
for goal in range(self._num_goals)
})
return diagnostics
def _evaluate_rollouts(self, episodes, env):
"""Compute evaluation metrics for the given rollouts."""
diagnostics = super(SACClassifierMultiGoal, self)._evaluate_rollouts(
episodes, env)
learned_reward = self._session.run(
self._reward_t,
feed_dict={
self._placeholders['observations'][name]: np.concatenate([
episode['observations'][name]
for episode in episodes
])
for name in self._policy.observation_keys
# for name in self._classifiers[0].observation_keys
})
diagnostics[f'reward_learning/reward-mean'] = np.mean(learned_reward)
diagnostics[f'reward_learning/reward-min'] = np.min(learned_reward)
diagnostics[f'reward_learning/reward-max'] = np.max(learned_reward)
diagnostics[f'reward_learning/reward-std'] = np.std(learned_reward)
return diagnostics
def get_reward(self, observations):
learned_reward = self._session.run(
self._reward_t,
feed_dict={
self._placeholders['observations'][name]: observations[name]
for name in self._policy.observation_keys
# for name in self._classifiers[0].observation_keys
}
)
return learned_reward
@property
def tf_saveables(self):
saveables = super(SACClassifierMultiGoal, self).tf_saveables
saveables.update({
'_classifier_optimizer_' + str(goal): self._classifier_optimizers[goal]
for goal in range(self._num_goals)
})
return saveables
| [
"tensorflow.contrib.layers.optimize_loss",
"numpy.mean",
"tensorflow.equal",
"numpy.ones",
"softlearning.models.utils.flatten_input_structure",
"tensorflow.placeholder",
"numpy.std",
"numpy.max",
"tensorflow.where",
"numpy.random.randint",
"numpy.split",
"numpy.zeros",
"numpy.concatenate",
... | [((1715, 1773), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '(None, 1)', 'name': '"""labels"""'}), "(tf.float32, shape=(None, 1), name='labels')\n", (1729, 1773), True, 'import tensorflow as tf\n'), ((3061, 3188), 'softlearning.models.utils.flatten_input_structure', 'flatten_input_structure', (["{name: self._placeholders['observations'][name] for name in self.\n _classifiers[0].observation_keys}"], {}), "({name: self._placeholders['observations'][name] for\n name in self._classifiers[0].observation_keys})\n", (3084, 3188), False, 'from softlearning.models.utils import flatten_input_structure\n'), ((3721, 3848), 'softlearning.models.utils.flatten_input_structure', 'flatten_input_structure', (["{name: self._placeholders['observations'][name] for name in self.\n _classifiers[0].observation_keys}"], {}), "({name: self._placeholders['observations'][name] for\n name in self._classifiers[0].observation_keys})\n", (3744, 3848), False, 'from softlearning.models.utils import flatten_input_structure\n'), ((14806, 14829), 'numpy.mean', 'np.mean', (['learned_reward'], {}), '(learned_reward)\n', (14813, 14829), True, 'import numpy as np\n'), ((14883, 14905), 'numpy.min', 'np.min', (['learned_reward'], {}), '(learned_reward)\n', (14889, 14905), True, 'import numpy as np\n'), ((14959, 14981), 'numpy.max', 'np.max', (['learned_reward'], {}), '(learned_reward)\n', (14965, 14981), True, 'import numpy as np\n'), ((15035, 15057), 'numpy.std', 'np.std', (['learned_reward'], {}), '(learned_reward)\n', (15041, 15057), True, 'import numpy as np\n'), ((2413, 2627), 'tensorflow.contrib.layers.optimize_loss', 'tf.contrib.layers.optimize_loss', (['classifier_loss_t', 'self.global_step'], {'learning_rate': 'self._classifier_lr', 'optimizer': 'classifier_optimizer', 'variables': 'classifier.trainable_variables', 'increment_global_step': '(False)'}), '(classifier_loss_t, self.global_step,\n learning_rate=self._classifier_lr, optimizer=classifier_optimizer,\n variables=classifier.trainable_variables, increment_global_step=False)\n', (2444, 2627), True, 'import tensorflow as tf\n'), ((4221, 4249), 'tensorflow.equal', 'tf.equal', (['goal_indices', 'goal'], {}), '(goal_indices, goal)\n', (4229, 4249), True, 'import tensorflow as tf\n'), ((4600, 4701), 'tensorflow.where', 'tf.where', (['goal_index_masks[goal]'], {'x': 'observation_logits_per_classifier[goal]', 'y': 'observation_logits'}), '(goal_index_masks[goal], x=observation_logits_per_classifier[goal],\n y=observation_logits)\n', (4608, 4701), True, 'import tensorflow as tf\n'), ((5837, 5912), 'numpy.random.randint', 'np.random.randint', (['goal_example_pool_size'], {'size': 'self._classifier_batch_size'}), '(goal_example_pool_size, size=self._classifier_batch_size)\n', (5854, 5912), True, 'import numpy as np\n'), ((12757, 12853), 'numpy.split', 'np.split', (['reward_sample_goal_observations', '(n_sample_obs, n_sample_obs + n_goal_obs)'], {'axis': '(0)'}), '(reward_sample_goal_observations, (n_sample_obs, n_sample_obs +\n n_goal_obs), axis=0)\n', (12765, 12853), True, 'import numpy as np\n'), ((3416, 3512), 'tensorflow.nn.sigmoid_cross_entropy_with_logits', 'tf.nn.sigmoid_cross_entropy_with_logits', ([], {'logits': 'logits', 'labels': "self._placeholders['labels']"}), "(logits=logits, labels=self.\n _placeholders['labels'])\n", (3455, 3512), True, 'import tensorflow as tf\n'), ((6984, 7042), 'numpy.concatenate', 'np.concatenate', (['(_negatives[key], _positives[key])'], {'axis': '(0)'}), '((_negatives[key], _positives[key]), axis=0)\n', (6998, 7042), True, 'import numpy as np\n'), ((7385, 7481), 'softlearning.misc.utils.mixup', 'mixup', (['observation_batches[goal_index]', 'labels_batches[goal_index]'], {'alpha': 'self._mixup_alpha'}), '(observation_batches[goal_index], labels_batches[goal_index], alpha=\n self._mixup_alpha)\n', (7390, 7481), False, 'from softlearning.misc.utils import mixup\n'), ((13586, 13614), 'numpy.mean', 'np.mean', (['reward_sample[goal]'], {}), '(reward_sample[goal])\n', (13593, 13614), True, 'import numpy as np\n'), ((13779, 13805), 'numpy.mean', 'np.mean', (['reward_goal[goal]'], {}), '(reward_goal[goal])\n', (13786, 13805), True, 'import numpy as np\n'), ((13981, 14018), 'numpy.mean', 'np.mean', (['reward_goal_validation[goal]'], {}), '(reward_goal_validation[goal])\n', (13988, 14018), True, 'import numpy as np\n'), ((6569, 6595), 'numpy.zeros', 'np.zeros', (['(n_negatives, 1)'], {}), '((n_negatives, 1))\n', (6577, 6595), True, 'import numpy as np\n'), ((6613, 6638), 'numpy.ones', 'np.ones', (['(n_positives, 1)'], {}), '((n_positives, 1))\n', (6620, 6638), True, 'import numpy as np\n'), ((11004, 11090), 'numpy.concatenate', 'np.concatenate', (['(sample_obs[key], goal_obs[key], goal_obs_validation[key])'], {'axis': '(0)'}), '((sample_obs[key], goal_obs[key], goal_obs_validation[key]),\n axis=0)\n', (11018, 11090), True, 'import numpy as np\n'), ((14480, 14551), 'numpy.concatenate', 'np.concatenate', (["[episode['observations'][name] for episode in episodes]"], {}), "([episode['observations'][name] for episode in episodes])\n", (14494, 14551), True, 'import numpy as np\n'), ((11375, 11461), 'numpy.concatenate', 'np.concatenate', (['(sample_obs[key], goal_obs[key], goal_obs_validation[key])'], {'axis': '(0)'}), '((sample_obs[key], goal_obs[key], goal_obs_validation[key]),\n axis=0)\n', (11389, 11461), True, 'import numpy as np\n'), ((12432, 12459), 'numpy.zeros', 'np.zeros', (['(n_sample_obs, 1)'], {}), '((n_sample_obs, 1))\n', (12440, 12459), True, 'import numpy as np\n'), ((12485, 12509), 'numpy.ones', 'np.ones', (['(n_goal_obs, 1)'], {}), '((n_goal_obs, 1))\n', (12492, 12509), True, 'import numpy as np\n'), ((12535, 12570), 'numpy.ones', 'np.ones', (['(n_goal_obs_validation, 1)'], {}), '((n_goal_obs_validation, 1))\n', (12542, 12570), True, 'import numpy as np\n')] |
import numpy as np
from prml.linear.classifier import Classifier
class Perceptron(Classifier):
"""
Perceptron model
"""
def fit(self, X, t, max_epoch=100):
"""
fit perceptron model on given input pair
Parameters
----------
X : (N, D) np.ndarray
training independent variable
t : (N,)
training dependent variable
binary -1 or 1
max_epoch : int, optional
maximum number of epoch (the default is 100)
"""
self.w = np.zeros(np.size(X, 1))
for _ in range(max_epoch):
X_error=X[np.sign(X@self.w)!=t]
t_error=t[np.sign(X@self.w)!=t]
idx=np.random.choice(len(X_error))
self.w+=X_error[idx]*t_error[idx]
if (X@self.w*t>0).all():
break
def classify(self, X):
"""
classify input data
Parameters
----------
X : (N, D) np.ndarray
independent variable to be classified
Returns
-------
(N,) np.ndarray
binary class (-1 or 1) for each input
"""
return np.sign(X @ self.w).astype(np.int)
| [
"numpy.size",
"numpy.sign"
] | [((560, 573), 'numpy.size', 'np.size', (['X', '(1)'], {}), '(X, 1)\n', (567, 573), True, 'import numpy as np\n'), ((1171, 1190), 'numpy.sign', 'np.sign', (['(X @ self.w)'], {}), '(X @ self.w)\n', (1178, 1190), True, 'import numpy as np\n'), ((632, 651), 'numpy.sign', 'np.sign', (['(X @ self.w)'], {}), '(X @ self.w)\n', (639, 651), True, 'import numpy as np\n'), ((676, 695), 'numpy.sign', 'np.sign', (['(X @ self.w)'], {}), '(X @ self.w)\n', (683, 695), True, 'import numpy as np\n')] |
# Original code by:
# <NAME>: Mapping Your Music Collection
# http://www.christianpeccei.com/musicmap/
import numpy as np
import os
import struct
import wave
from shlex import split
from subprocess import call
from uuid import uuid4
class Analyzer:
FEATURES_LENGTH = 42
SECONDS_PER_SONG = 90
SAMPLING_RATE = 10000
def valid_features(self, data):
return len(data) == self.FEATURES_LENGTH
def moments(self, x):
mean = x.mean()
std = x.var() ** 0.5
skewness = ((x - mean) ** 3).mean() / std ** 3
kurtosis = ((x - mean) ** 4).mean() / std ** 4
return [mean, std, skewness, kurtosis]
def fftfeatures(self, wavdata):
f = np.fft.fft(wavdata)
f = f[2:(f.size / 2 + 1)]
f = abs(f)
total_power = f.sum()
f = np.array_split(f, 10)
return [e.sum() / total_power for e in f]
def features(self, data):
# convert to array
x = np.array(data)
# initialize result vector
feature_vec = np.zeros(self.FEATURES_LENGTH)
# smoothing window: 1 samples
x1 = x
d1 = x1[1:] - x1[:-1]
feature_vec[0:4] = self.moments(x1)
feature_vec[4:8] = self.moments(d1)
# smoothing window: 10 samples
x10 = x.reshape(-1, 10).mean(1)
d10 = x10[1:] - x10[:-1]
feature_vec[8:12] = self.moments(x10)
feature_vec[12:16] = self.moments(d10)
# smoothing window: 100 samples
x100 = x.reshape(-1, 100).mean(1)
d100 = x100[1:] - x100[:-1]
feature_vec[16:20] = self.moments(x100)
feature_vec[20:24] = self.moments(d100)
# smoothing window: 1000 samples
x1000 = x.reshape(-1, 1000).mean(1)
d1000 = x1000[1:] - x1000[:-1]
feature_vec[24:28] = self.moments(x1000)
feature_vec[28:32] = self.moments(d1000)
feature_vec[32:] = self.fftfeatures(data)
return feature_vec
def read_wav(self, wav_file):
song_data = wave.open(wav_file)
n = song_data.getnframes()
n = n - n % 1000
frames = song_data.readframes(n)
wav_data = struct.unpack('%dh' % n, frames)
return wav_data
def compute_features(self, mp3_file):
out_path = '/tmp/%s.wav' % uuid4()
cmd_args = 'avconv -v quiet -i "%s" -ac 1 -ar %s -t %s "%s"'
cmd_args = cmd_args % (mp3_file, self.SAMPLING_RATE,
self.SECONDS_PER_SONG, out_path)
ret_code = call(split(cmd_args))
assert(ret_code == 0)
sample_data = self.read_wav(out_path)
assert(len(sample_data) > 0)
os.remove(out_path)
return self.features(sample_data)
| [
"wave.open",
"shlex.split",
"numpy.fft.fft",
"uuid.uuid4",
"numpy.array_split",
"numpy.array",
"numpy.zeros",
"struct.unpack",
"os.remove"
] | [((710, 729), 'numpy.fft.fft', 'np.fft.fft', (['wavdata'], {}), '(wavdata)\n', (720, 729), True, 'import numpy as np\n'), ((825, 846), 'numpy.array_split', 'np.array_split', (['f', '(10)'], {}), '(f, 10)\n', (839, 846), True, 'import numpy as np\n'), ((969, 983), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (977, 983), True, 'import numpy as np\n'), ((1042, 1072), 'numpy.zeros', 'np.zeros', (['self.FEATURES_LENGTH'], {}), '(self.FEATURES_LENGTH)\n', (1050, 1072), True, 'import numpy as np\n'), ((2024, 2043), 'wave.open', 'wave.open', (['wav_file'], {}), '(wav_file)\n', (2033, 2043), False, 'import wave\n'), ((2164, 2196), 'struct.unpack', 'struct.unpack', (["('%dh' % n)", 'frames'], {}), "('%dh' % n, frames)\n", (2177, 2196), False, 'import struct\n'), ((2668, 2687), 'os.remove', 'os.remove', (['out_path'], {}), '(out_path)\n', (2677, 2687), False, 'import os\n'), ((2301, 2308), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (2306, 2308), False, 'from uuid import uuid4\n'), ((2528, 2543), 'shlex.split', 'split', (['cmd_args'], {}), '(cmd_args)\n', (2533, 2543), False, 'from shlex import split\n')] |
import re
import numpy as np
from rdkit import Chem
from rdkit.Chem.rdchem import ChiralType
from EFGs import standize
pat = r'\d+(?:\.\d+)?%'
p2f = lambda x: float(x.strip('%'))/100
def mols_from_smiles_list(all_smiles):
'''Given a list of smiles strings, this function creates rdkit
molecules'''
mols = []
for smiles in all_smiles:
if not smiles: continue
mols.append(Chem.MolFromSmiles(smiles))
return mols
def replace_deuterated(smi):
return re.sub('\[2H\]', r'[H]', smi)
def get_tagged_atoms_from_mols(mols):
'''Takes a list of RDKit molecules and returns total list of
atoms and their tags'''
atoms = []
atom_tags = []
for mol in mols:
new_atoms, new_atom_tags = get_tagged_atoms_from_mol(mol)
atoms += new_atoms
atom_tags += new_atom_tags
return atoms, atom_tags
def get_tagged_atoms_from_mol(mol):
'''Takes an RDKit molecule and returns list of tagged atoms and their
corresponding numbers'''
atoms = []
atom_tags = []
for atom in mol.GetAtoms():
if atom.HasProp('molAtomMapNumber'):
atoms.append(atom)
atom_tags.append(str(atom.GetProp('molAtomMapNumber')))
return atoms, atom_tags
def atoms_are_different(atom1, atom2):
'''Compares two RDKit atoms based on basic properties'''
if atom1.GetAtomicNum() != atom2.GetAtomicNum(): return True # must be true for atom mapping
if atom1.GetTotalNumHs() != atom2.GetTotalNumHs(): return True
if atom1.GetFormalCharge() != atom2.GetFormalCharge(): return True
if atom1.GetDegree() != atom2.GetDegree(): return True
if atom1.IsInRing() != atom2.IsInRing(): return True
if atom1.GetNumRadicalElectrons() != atom2.GetNumRadicalElectrons(): return True
if atom1.GetIsAromatic() != atom2.GetIsAromatic(): return True
# Check bonds and nearest neighbor identity
bonds1 = sorted([bond_to_label(bond) for bond in atom1.GetBonds()])
bonds2 = sorted([bond_to_label(bond) for bond in atom2.GetBonds()])
if bonds1 != bonds2: return True
return False
def find_map_num(mol, mapnum):
return [(a.GetIdx(), a) for a in mol.GetAtoms() if a.HasProp('molAtomMapNumber')
and a.GetProp('molAtomMapNumber') == str(mapnum)][0]
def get_tetrahedral_atoms(reactants, products):
tetrahedral_atoms = []
for reactant in reactants:
for ar in reactant.GetAtoms():
if not ar.HasProp('molAtomMapNumber'):
continue
atom_tag = ar.GetProp('molAtomMapNumber')
for product in products:
try:
(ip, ap) = find_map_num(product, atom_tag)
if ar.GetChiralTag() != ChiralType.CHI_UNSPECIFIED or\
ap.GetChiralTag() != ChiralType.CHI_UNSPECIFIED:
tetrahedral_atoms.append((atom_tag, ar, ap))
except IndexError:
pass
return tetrahedral_atoms
def set_isotope_to_equal_mapnum(mol):
for a in mol.GetAtoms():
if a.HasProp('molAtomMapNumber'):
a.SetIsotope(int(a.GetProp('molAtomMapNumber')))
def get_frag_around_tetrahedral_center(mol, idx):
'''Builds a MolFragment using neighbors of a tetrahedral atom,
where the molecule has already been updated to include isotopes'''
ids_to_include = [idx]
for neighbor in mol.GetAtomWithIdx(idx).GetNeighbors():
ids_to_include.append(neighbor.GetIdx())
symbols = ['[{}{}]'.format(a.GetIsotope(), a.GetSymbol()) if a.GetIsotope() != 0\
else '[#{}]'.format(a.GetAtomicNum()) for a in mol.GetAtoms()]
return Chem.MolFragmentToSmiles(mol, ids_to_include, isomericSmiles=True,
atomSymbols=symbols, allBondsExplicit=True,
allHsExplicit=True)
def check_tetrahedral_centers_equivalent(atom1, atom2):
'''Checks to see if tetrahedral centers are equivalent in
chirality, ignoring the ChiralTag. Owning molecules of the
input atoms must have been Isotope-mapped'''
atom1_frag = get_frag_around_tetrahedral_center(atom1.GetOwningMol(), atom1.GetIdx())
atom1_neighborhood = Chem.MolFromSmiles(atom1_frag, sanitize=False)
for matched_ids in atom2.GetOwningMol().GetSubstructMatches(atom1_neighborhood, useChirality=True):
if atom2.GetIdx() in matched_ids:
return True
return False
def clear_isotope(mol):
[a.SetIsotope(0) for a in mol.GetAtoms()]
def get_rxn_tag(reaction_smiles):
'''Given a reaction, return a reaction tag.
0: Reaction without any stereocenters involved
1: Reaction with chirality involved, but not in reaction center
2: Reaction with chirality involved, and in reaction center
'''
rt, pd = reaction_smiles.split('>>')
reactants = mols_from_smiles_list(replace_deuterated(rt).split('.'))
products = mols_from_smiles_list(replace_deuterated(pd).split('.'))
prod_atoms, prod_atom_tags = get_tagged_atoms_from_mols(products)
reac_atoms, reac_atom_tags = get_tagged_atoms_from_mols(reactants)
# Find differences
changed_atoms = {} # actual reactant atom species
# Product atoms that are different from reactant atom equivalent
for i, prod_tag in enumerate(prod_atom_tags):
for j, reac_tag in enumerate(reac_atom_tags):
if reac_tag != prod_tag: continue
if reac_tag not in changed_atoms: # don't bother comparing if we know this atom changes
# If atom changed, add
if atoms_are_different(prod_atoms[i], reac_atoms[j]):
changed_atoms[reac_tag] = reac_atoms[j]
break
# If reac_tag appears multiple times, add (need for stoichometry > 1)
if prod_atom_tags.count(reac_tag) > 1:
changed_atoms[reac_tag] = reac_atoms[j]
break
# Reactant atoms that do not appear in product (tagged leaving groups)
for j, reac_tag in enumerate(reac_atom_tags):
if reac_tag not in changed_atoms:
if reac_tag not in prod_atom_tags:
changed_atoms[reac_tag] = reac_atoms[j]
# Atoms that change CHIRALITY (just tetrahedral for now...)
tetra_exist = True
tetra_atoms = get_tetrahedral_atoms(reactants, products)
[set_isotope_to_equal_mapnum(reactant) for reactant in reactants]
[set_isotope_to_equal_mapnum(product) for product in products]
if not tetra_atoms:
tetra_exist = False
[clear_isotope(reactant) for reactant in reactants]
[clear_isotope(product) for product in products]
return 0
for (atom_tag, ar, ap) in tetra_atoms:
if atom_tag in changed_atoms:
[clear_isotope(reactant) for reactant in reactants]
[clear_isotope(product) for product in products]
return 2
else:
unchanged = check_tetrahedral_centers_equivalent(ar, ap) and \
ChiralType.CHI_UNSPECIFIED not in [ar.GetChiralTag(), ap.GetChiralTag()]
if not unchanged:
# Make sure chiral change is next to the reaction center and not
# a random specifidation (must be CONNECTED to a changed atom)
tetra_adj_to_rxn = False
for neighbor in ap.GetNeighbors():
if neighbor.HasProp('molAtomMapNumber'):
nei_mapnum = neighbor.GetProp('molAtomMapNumber')
if nei_mapnum in changed_atoms:
tetra_adj_to_rxn = True
break
if tetra_adj_to_rxn:
changed_atoms[atom_tag] = ar
[clear_isotope(reactant) for reactant in reactants]
[clear_isotope(product) for product in products]
return 2
[clear_isotope(reactant) for reactant in reactants]
[clear_isotope(product) for product in products]
return 1
def bond_to_label(bond):
'''This function takes an RDKit bond and creates a label describing
the most important attributes'''
a1_label = str(bond.GetBeginAtom().GetAtomicNum())
a2_label = str(bond.GetEndAtom().GetAtomicNum())
if bond.GetBeginAtom().HasProp('molAtomMapNumber'):
a1_label += bond.GetBeginAtom().GetProp('molAtomMapNumber')
if bond.GetEndAtom().HasProp('molAtomMapNumber'):
a2_label += bond.GetEndAtom().GetProp('molAtomMapNumber')
atoms = sorted([a1_label, a2_label])
return '{}{}{}'.format(atoms[0], bond.GetSmarts(), atoms[1])
def FakeRxnChecker(rxn):
'''
Check if a reaction is valid.
A reaction would be viewed as invalid if product is the same as one of reactants
'''
reactants, reagents, prods = rxn.strip().split('>')
rts = [standize(x) for x in reactants.split('.')+reagents.split('.') if x]
pds = [standize(x) for x in prods.split('.') if x]
if set(rts)&set(pds):
pds = '.'.join(set(pds).difference(rts))
if (not rts) or (not pds):
return True
return False
def StripTrivalProd(rxn):
'''
Remove trival prod(s) that also appear in reactants
'''
reactants, reagents, prods = rxn.strip().split('>')
if reagents:
reactants = reactants + '.' + reagents
rts = reactants.split('.')
pds = prods.split('.')
if set(rts)&set(pds):
pds = list(set(pds).difference(rts))
assert len(pds) == 1
return rxn.rsplit('>',1)[0]+'>'+pds[0]
def GetYield(yield_list, tolerance=0.1):
valid_yield = []
for y in yield_list:
# Remove np.nan
if type(y)==str:
per = re.findall(pat, y)
# Remove yield data > 100%
if per and p2f(per[-1]) <= 1:
valid_yield.append(p2f(per[-1]))
if not valid_yield:
return False
if len(valid_yield) == 1:
return valid_yield[0]
if np.abs(valid_yield[0]-valid_yield[1]) < tolerance:
return np.average(valid_yield)
return valid_yield[0]
| [
"numpy.abs",
"rdkit.Chem.MolFragmentToSmiles",
"numpy.average",
"rdkit.Chem.MolFromSmiles",
"re.sub",
"re.findall",
"EFGs.standize"
] | [((492, 522), 're.sub', 're.sub', (['"""\\\\[2H\\\\]"""', '"""[H]"""', 'smi'], {}), "('\\\\[2H\\\\]', '[H]', smi)\n", (498, 522), False, 'import re\n'), ((3664, 3798), 'rdkit.Chem.MolFragmentToSmiles', 'Chem.MolFragmentToSmiles', (['mol', 'ids_to_include'], {'isomericSmiles': '(True)', 'atomSymbols': 'symbols', 'allBondsExplicit': '(True)', 'allHsExplicit': '(True)'}), '(mol, ids_to_include, isomericSmiles=True,\n atomSymbols=symbols, allBondsExplicit=True, allHsExplicit=True)\n', (3688, 3798), False, 'from rdkit import Chem\n'), ((4212, 4258), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['atom1_frag'], {'sanitize': '(False)'}), '(atom1_frag, sanitize=False)\n', (4230, 4258), False, 'from rdkit import Chem\n'), ((8858, 8869), 'EFGs.standize', 'standize', (['x'], {}), '(x)\n', (8866, 8869), False, 'from EFGs import standize\n'), ((8937, 8948), 'EFGs.standize', 'standize', (['x'], {}), '(x)\n', (8945, 8948), False, 'from EFGs import standize\n'), ((9966, 10005), 'numpy.abs', 'np.abs', (['(valid_yield[0] - valid_yield[1])'], {}), '(valid_yield[0] - valid_yield[1])\n', (9972, 10005), True, 'import numpy as np\n'), ((10032, 10055), 'numpy.average', 'np.average', (['valid_yield'], {}), '(valid_yield)\n', (10042, 10055), True, 'import numpy as np\n'), ((406, 432), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['smiles'], {}), '(smiles)\n', (424, 432), False, 'from rdkit import Chem\n'), ((9705, 9723), 're.findall', 're.findall', (['pat', 'y'], {}), '(pat, y)\n', (9715, 9723), False, 'import re\n')] |
#!/usr/bin/env python
import pickle
import tensorflow as tf
import numpy as np
import tf_util
import gym
import load_policy
from tensorflow import keras
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('behavioral_cloning_file', type=str)
parser.add_argument('envname', type=str)
parser.add_argument('--render', action='store_true')
parser.add_argument("--max_timesteps", type=int)
parser.add_argument('--num_rollouts', type=int, default=20,
help='Number of expert roll outs')
args = parser.parse_args()
print('loading behavioral cloning model')
model = keras.models.load_model(args.behavioral_cloning_file)
print('successfully loaded')
import gym
env = gym.make(args.envname)
max_steps = args.max_timesteps or env.spec.timestep_limit
returns = []
observations = []
actions = []
for i in range(args.num_rollouts):
if i % 10 == 0:
print('iter', i)
obs = env.reset()
done = False
totalr = 0.
steps = 0
while not done:
action = model.predict(obs.reshape(1, obs.shape[0]))
observations.append(obs)
actions.append(action)
obs, r, done, _ = env.step(action)
totalr += r
steps += 1
if args.render:
env.render()
if steps >= max_steps:
break
returns.append(totalr)
print('returns', returns)
print('mean return', np.mean(returns))
print('std of return', np.std(returns))
if __name__ == '__main__':
main()
| [
"numpy.mean",
"argparse.ArgumentParser",
"tensorflow.keras.models.load_model",
"numpy.std",
"gym.make"
] | [((200, 225), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (223, 225), False, 'import argparse\n'), ((655, 708), 'tensorflow.keras.models.load_model', 'keras.models.load_model', (['args.behavioral_cloning_file'], {}), '(args.behavioral_cloning_file)\n', (678, 708), False, 'from tensorflow import keras\n'), ((768, 790), 'gym.make', 'gym.make', (['args.envname'], {}), '(args.envname)\n', (776, 790), False, 'import gym\n'), ((1543, 1559), 'numpy.mean', 'np.mean', (['returns'], {}), '(returns)\n', (1550, 1559), True, 'import numpy as np\n'), ((1588, 1603), 'numpy.std', 'np.std', (['returns'], {}), '(returns)\n', (1594, 1603), True, 'import numpy as np\n')] |
import sys
assert sys.version_info[0] == 2
import rosbag
import numpy as np
from rospy_message_converter import message_converter
bag = rosbag.Bag('out.bag')
nb_event = 0
x_fin = []
y_fin = []
ts_fin = []
p_fin = []
for topic, msg, t in bag.read_messages(topics=['/cam0/events']):
msg_str = message_converter.convert_ros_message_to_dictionary(msg)
events = msg_str['events']
x = np.zeros(len(events), dtype=np.uint16)
y = np.zeros(len(events), dtype=np.uint16)
ts = np.zeros(len(events), dtype=np.uint64)
p = np.zeros(len(events), dtype=np.uint8)
i = 0
for m in events:
x[i] = m['x']
y[i] = m['y']
ts[i] = m['ts']['nsecs']
p[i] = 1 if m['polarity'] else 0
i += 1
x_fin = np.array(np.concatenate([x_fin, x]), dtype=np.uint16)
y_fin = np.array(np.concatenate([y_fin, y]), dtype=np.uint16)
ts_fin = np.array(np.concatenate([ts_fin, ts]), dtype=np.uint64)
p_fin = np.array(np.concatenate([p_fin, p]), dtype=np.uint8)
bag.close()
np.save("x.npy", x_fin)
np.save("y.npy", y_fin)
np.save("p.npy", p_fin)
np.save("ts.npy", ts_fin) | [
"rospy_message_converter.message_converter.convert_ros_message_to_dictionary",
"numpy.concatenate",
"numpy.save",
"rosbag.Bag"
] | [((137, 158), 'rosbag.Bag', 'rosbag.Bag', (['"""out.bag"""'], {}), "('out.bag')\n", (147, 158), False, 'import rosbag\n'), ((1014, 1037), 'numpy.save', 'np.save', (['"""x.npy"""', 'x_fin'], {}), "('x.npy', x_fin)\n", (1021, 1037), True, 'import numpy as np\n'), ((1038, 1061), 'numpy.save', 'np.save', (['"""y.npy"""', 'y_fin'], {}), "('y.npy', y_fin)\n", (1045, 1061), True, 'import numpy as np\n'), ((1062, 1085), 'numpy.save', 'np.save', (['"""p.npy"""', 'p_fin'], {}), "('p.npy', p_fin)\n", (1069, 1085), True, 'import numpy as np\n'), ((1086, 1111), 'numpy.save', 'np.save', (['"""ts.npy"""', 'ts_fin'], {}), "('ts.npy', ts_fin)\n", (1093, 1111), True, 'import numpy as np\n'), ((296, 352), 'rospy_message_converter.message_converter.convert_ros_message_to_dictionary', 'message_converter.convert_ros_message_to_dictionary', (['msg'], {}), '(msg)\n', (347, 352), False, 'from rospy_message_converter import message_converter\n'), ((757, 783), 'numpy.concatenate', 'np.concatenate', (['[x_fin, x]'], {}), '([x_fin, x])\n', (771, 783), True, 'import numpy as np\n'), ((823, 849), 'numpy.concatenate', 'np.concatenate', (['[y_fin, y]'], {}), '([y_fin, y])\n', (837, 849), True, 'import numpy as np\n'), ((890, 918), 'numpy.concatenate', 'np.concatenate', (['[ts_fin, ts]'], {}), '([ts_fin, ts])\n', (904, 918), True, 'import numpy as np\n'), ((958, 984), 'numpy.concatenate', 'np.concatenate', (['[p_fin, p]'], {}), '([p_fin, p])\n', (972, 984), True, 'import numpy as np\n')] |
import numpy as np
import math
import sys, copy
sys.path.insert(0,'../Robots')
import robot_toy_example as robot_moel
import uvs as uvss
import time
robot = robot_moel.toy_blocks_robot()
estimate_jacobian_random_motion_range = [2, 5]
step_normalize_range = [2, 3]
uvs = uvss.UVS(robot, 0.5, 0.1, 2, step_normalize_range,estimate_jacobian_random_motion_range)
trajectory = []
def set_intermediate_target(dx, dy):
current_joints = robot.current_joints()
robot.set_intermediate_target_fake(current_joints[0] + dx, current_joints[1] + dy)
def estimate_jacobian(trials=3):
current_joints = robot.current_joints()
delta_joints = []
delta_errors = []
prev_error = robot.current_joints()
for i in range(trials):
input("Press Enter to continue...")
r_motion = uvs.estimate_jacobian_random_motion(i)
r_error = robot.current_joints()
delta_error = r_error - prev_error
if uvs.estimate_jacobian_motion_quality(delta_error):
delta_joints.append(r_motion)
delta_errors.append(delta_error)
# move back to current joints
input("Press Enter to continue...")
robot.move_to(current_joints)
print('back to origin')
delta_joints = np.asarray(delta_joints)
delta_errors = np.asarray(delta_errors)
uvs.estimate_jacobian(delta_joints, delta_errors)
max_it = 100
def go_loop():
it_count = 0
trajectory.append(robot.get_recording_state())
while it_count < max_it and robot.task_done_fake() is not True:
prev_error = robot.current_joints()
r_motion = uvs.move_step(robot.get_fake_error())
print('step:')
print(r_motion)
current_joints = robot.current_joints()
trajectory.append(robot.get_recording_state())
print('current state x: ' + str(current_joints[0]) + ' y: ' + str(current_joints[1]))
r_error = robot.current_joints()
delta_error = r_error - prev_error
print('delta error')
print(delta_error)
print('delta_joints')
print(r_motion)
# broyden update
uvs.broyden_update(delta_error, r_motion)
input("Press Enter to continue...")
it_count += 1
print('task done')
# current_error = robot.get_fake_error() | [
"numpy.asarray",
"sys.path.insert",
"uvs.UVS",
"robot_toy_example.toy_blocks_robot"
] | [((48, 79), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../Robots"""'], {}), "(0, '../Robots')\n", (63, 79), False, 'import sys, copy\n'), ((159, 188), 'robot_toy_example.toy_blocks_robot', 'robot_moel.toy_blocks_robot', ([], {}), '()\n', (186, 188), True, 'import robot_toy_example as robot_moel\n'), ((272, 365), 'uvs.UVS', 'uvss.UVS', (['robot', '(0.5)', '(0.1)', '(2)', 'step_normalize_range', 'estimate_jacobian_random_motion_range'], {}), '(robot, 0.5, 0.1, 2, step_normalize_range,\n estimate_jacobian_random_motion_range)\n', (280, 365), True, 'import uvs as uvss\n'), ((1246, 1270), 'numpy.asarray', 'np.asarray', (['delta_joints'], {}), '(delta_joints)\n', (1256, 1270), True, 'import numpy as np\n'), ((1290, 1314), 'numpy.asarray', 'np.asarray', (['delta_errors'], {}), '(delta_errors)\n', (1300, 1314), True, 'import numpy as np\n')] |
import os
from pyscf.pbc.gto import Cell
from pyscf.pbc.scf import KRHF
from pyscf.pbc.tdscf import KTDHF
from pyscf.pbc.tdscf import krhf_slow_gamma as ktd
import unittest
from numpy import testing
import numpy
from test_common import retrieve_m, retrieve_m_hf, assert_vectors_close, tdhf_frozen_mask
class DiamondTest(unittest.TestCase):
"""Compare this (krhf_slow_gamma) @2kp@Gamma vs reference (pyscf)."""
k = 2
k_c = (0, 0, 0)
@classmethod
def setUpClass(cls):
cls.cell = cell = Cell()
# Lift some degeneracies
cell.atom = '''
C 0.000000000000 0.000000000000 0.000000000000
C 1.67 1.68 1.69
'''
cell.basis = {'C': [[0, (0.8, 1.0)],
[1, (1.0, 1.0)]]}
# cell.basis = 'gth-dzvp'
cell.pseudo = 'gth-pade'
cell.a = '''
0.000000000, 3.370137329, 3.370137329
3.370137329, 0.000000000, 3.370137329
3.370137329, 3.370137329, 0.000000000'''
cell.unit = 'B'
cell.verbose = 5
cell.build()
k = cell.make_kpts([cls.k, 1, 1], scaled_center=cls.k_c)
# K-points
cls.model_krhf = model_krhf = KRHF(cell, k).density_fit()
model_krhf.kernel()
cls.td_model_krhf = td_model_krhf = KTDHF(model_krhf)
td_model_krhf.kernel()
cls.ref_m = retrieve_m(td_model_krhf)
cls.ref_e = td_model_krhf.e
@classmethod
def tearDownClass(cls):
# These are here to remove temporary files
del cls.td_model_krhf
del cls.model_krhf
del cls.cell
def test_eri(self):
"""Tests all ERI implementations: with and without symmetries."""
for eri in (ktd.PhysERI, ktd.PhysERI4, ktd.PhysERI8):
# Note that specific combintation of k-points results in real orbitals and allows testing PhysERI8
try:
e = eri(self.model_krhf)
m = e.tdhf_full_form()
# Test matrix vs ref
testing.assert_allclose(m, retrieve_m_hf(e), atol=1e-11)
# Test matrix vs pyscf
testing.assert_allclose(self.ref_m, m, atol=1e-5)
except Exception:
print("When testing {} the following exception occurred:".format(eri))
raise
def test_class(self):
"""Tests container behavior."""
model = ktd.TDRHF(self.model_krhf)
model.nroots = self.td_model_krhf.nroots
assert not model.fast
model.kernel()
testing.assert_allclose(model.e, self.td_model_krhf.e, atol=1e-5)
nocc = nvirt = 4
testing.assert_equal(model.xy.shape, (len(model.e), 2, self.k, nocc, nvirt))
assert_vectors_close(model.xy, numpy.array(self.td_model_krhf.xy), atol=1e-2)
class FrozenTest(unittest.TestCase):
"""Tests frozen behavior."""
k = 2
k_c = (0, 0, 0)
df_file = os.path.realpath(os.path.join(__file__, "..", "frozen_test_cderi.h5"))
@classmethod
def setUpClass(cls):
cls.cell = cell = Cell()
# Lift some degeneracies
cell.atom = '''
C 0.000000000000 0.000000000000 0.000000000000
C 1.67 1.68 1.69
'''
cell.basis = 'sto-3g'
cell.a = '''
0.000000000, 3.370137329, 3.370137329
3.370137329, 0.000000000, 3.370137329
3.370137329, 3.370137329, 0.000000000'''
cell.unit = 'B'
cell.verbose = 5
cell.build()
k = cell.make_kpts([cls.k, 1, 1], scaled_center=cls.k_c)
# K-points
cls.model_krhf = model_krhf = KRHF(cell, k).density_fit()
# model_krhf.with_df._cderi_to_save = cls.df_file
model_krhf.with_df._cderi = cls.df_file
model_krhf.conv_tol = 1e-14
model_krhf.kernel()
cls.td_model_krhf = model_ktd = ktd.TDRHF(model_krhf)
model_ktd.nroots = 5
model_ktd.kernel()
@classmethod
def tearDownClass(cls):
# These are here to remove temporary files
del cls.td_model_krhf
del cls.model_krhf
del cls.cell
def test_class(self):
"""Tests container behavior (frozen vs non-frozen)."""
for frozen in (1, [0, 1]):
try:
model = ktd.TDRHF(self.model_krhf, frozen=frozen)
model.nroots = self.td_model_krhf.nroots
model.kernel()
mask_o, mask_v = tdhf_frozen_mask(model.eri, kind="o,v")
testing.assert_allclose(model.e, self.td_model_krhf.e, atol=1e-3)
assert_vectors_close(model.xy, numpy.array(self.td_model_krhf.xy)[..., mask_o, :][..., mask_v], atol=1e-2)
except Exception:
print("When testing class with frozen={} the following exception occurred:".format(repr(frozen)))
raise
| [
"test_common.tdhf_frozen_mask",
"test_common.retrieve_m_hf",
"numpy.testing.assert_allclose",
"pyscf.pbc.tdscf.krhf_slow_gamma.TDRHF",
"os.path.join",
"pyscf.pbc.tdscf.KTDHF",
"numpy.array",
"pyscf.pbc.scf.KRHF",
"test_common.retrieve_m",
"pyscf.pbc.gto.Cell"
] | [((517, 523), 'pyscf.pbc.gto.Cell', 'Cell', ([], {}), '()\n', (521, 523), False, 'from pyscf.pbc.gto import Cell\n'), ((1296, 1313), 'pyscf.pbc.tdscf.KTDHF', 'KTDHF', (['model_krhf'], {}), '(model_krhf)\n', (1301, 1313), False, 'from pyscf.pbc.tdscf import KTDHF\n'), ((1366, 1391), 'test_common.retrieve_m', 'retrieve_m', (['td_model_krhf'], {}), '(td_model_krhf)\n', (1376, 1391), False, 'from test_common import retrieve_m, retrieve_m_hf, assert_vectors_close, tdhf_frozen_mask\n'), ((2411, 2437), 'pyscf.pbc.tdscf.krhf_slow_gamma.TDRHF', 'ktd.TDRHF', (['self.model_krhf'], {}), '(self.model_krhf)\n', (2420, 2437), True, 'from pyscf.pbc.tdscf import krhf_slow_gamma as ktd\n'), ((2548, 2614), 'numpy.testing.assert_allclose', 'testing.assert_allclose', (['model.e', 'self.td_model_krhf.e'], {'atol': '(1e-05)'}), '(model.e, self.td_model_krhf.e, atol=1e-05)\n', (2571, 2614), False, 'from numpy import testing\n'), ((2943, 2995), 'os.path.join', 'os.path.join', (['__file__', '""".."""', '"""frozen_test_cderi.h5"""'], {}), "(__file__, '..', 'frozen_test_cderi.h5')\n", (2955, 2995), False, 'import os\n'), ((3066, 3072), 'pyscf.pbc.gto.Cell', 'Cell', ([], {}), '()\n', (3070, 3072), False, 'from pyscf.pbc.gto import Cell\n'), ((3855, 3876), 'pyscf.pbc.tdscf.krhf_slow_gamma.TDRHF', 'ktd.TDRHF', (['model_krhf'], {}), '(model_krhf)\n', (3864, 3876), True, 'from pyscf.pbc.tdscf import krhf_slow_gamma as ktd\n'), ((2763, 2797), 'numpy.array', 'numpy.array', (['self.td_model_krhf.xy'], {}), '(self.td_model_krhf.xy)\n', (2774, 2797), False, 'import numpy\n'), ((1195, 1208), 'pyscf.pbc.scf.KRHF', 'KRHF', (['cell', 'k'], {}), '(cell, k)\n', (1199, 1208), False, 'from pyscf.pbc.scf import KRHF\n'), ((2139, 2189), 'numpy.testing.assert_allclose', 'testing.assert_allclose', (['self.ref_m', 'm'], {'atol': '(1e-05)'}), '(self.ref_m, m, atol=1e-05)\n', (2162, 2189), False, 'from numpy import testing\n'), ((3616, 3629), 'pyscf.pbc.scf.KRHF', 'KRHF', (['cell', 'k'], {}), '(cell, k)\n', (3620, 3629), False, 'from pyscf.pbc.scf import KRHF\n'), ((4274, 4315), 'pyscf.pbc.tdscf.krhf_slow_gamma.TDRHF', 'ktd.TDRHF', (['self.model_krhf'], {'frozen': 'frozen'}), '(self.model_krhf, frozen=frozen)\n', (4283, 4315), True, 'from pyscf.pbc.tdscf import krhf_slow_gamma as ktd\n'), ((4437, 4476), 'test_common.tdhf_frozen_mask', 'tdhf_frozen_mask', (['model.eri'], {'kind': '"""o,v"""'}), "(model.eri, kind='o,v')\n", (4453, 4476), False, 'from test_common import retrieve_m, retrieve_m_hf, assert_vectors_close, tdhf_frozen_mask\n'), ((4493, 4559), 'numpy.testing.assert_allclose', 'testing.assert_allclose', (['model.e', 'self.td_model_krhf.e'], {'atol': '(0.001)'}), '(model.e, self.td_model_krhf.e, atol=0.001)\n', (4516, 4559), False, 'from numpy import testing\n'), ((2053, 2069), 'test_common.retrieve_m_hf', 'retrieve_m_hf', (['e'], {}), '(e)\n', (2066, 2069), False, 'from test_common import retrieve_m, retrieve_m_hf, assert_vectors_close, tdhf_frozen_mask\n'), ((4606, 4640), 'numpy.array', 'numpy.array', (['self.td_model_krhf.xy'], {}), '(self.td_model_krhf.xy)\n', (4617, 4640), False, 'import numpy\n')] |
import os
import cv2
import numpy as np
import torch
import imageio
from torchvision import transforms
from .colmap_utils import *
import pdb
def load_img_list(datadir, load_test=False):
with open(os.path.join(datadir, 'train.txt'), 'r') as f:
lines = f.readlines()
image_list = [line.strip() for line in lines]
if load_test:
with open(os.path.join(datadir, 'test.txt'), 'r') as f:
lines = f.readlines()
image_list += [line.strip() for line in lines]
return image_list
def load_colmap(image_list, datadir, H=None, W=None):
depths = []
masks = []
ply_path = os.path.join(datadir, 'dense', 'fused.ply')
ply_masks = read_ply_mask(ply_path)
for image_name in image_list:
depth_path = os.path.join(datadir, 'dense/stereo/depth_maps', image_name + '.geometric.bin')
depth = read_array(depth_path)
mask = ply_masks[image_name]
if H is not None:
depth_resize = cv2.resize(depth, (W, H))
mask_resize = cv2.resize(mask, (W, H))
depths.append(depth_resize)
masks.append(mask_resize > 0.5)
return np.stack(depths), np.stack(masks)
def load_gt_depths(image_list, datadir, H=None, W=None):
depths = []
masks = []
for image_name in image_list:
frame_id = image_name.split('.')[0] +'.'+ image_name.split('.')[1] +'.'+ image_name.split('.')[2]
depth_path = os.path.join(datadir, 'depth', '{}.png'.format(frame_id))
depth = cv2.imread(depth_path, cv2.IMREAD_UNCHANGED)
depth = depth.astype(np.float32) / 1000
if H is not None:
mask = (depth > 0).astype(np.uint8)
depth_resize = cv2.resize(depth, (W, H), interpolation=cv2.INTER_NEAREST)
mask_resize = cv2.resize(mask, (W, H), interpolation=cv2.INTER_NEAREST)
depths.append(depth_resize)
masks.append(mask_resize > 0.5)
else:
depths.append(depth)
masks.append(depth > 0)
return np.stack(depths), np.stack(masks)
def load_depths(image_list, datadir, H=None, W=None):
depths = []
for image_name in image_list:
frame_id = image_name.split('.')[0] +'.'+ image_name.split('.')[1] +'.'+ image_name.split('.')[2]
depth_path = os.path.join(datadir, '{}_depth.npy'.format(frame_id))
if not os.path.exists(depth_path):
depth_path = os.path.join(datadir, '{}.npy'.format(frame_id))
depth = np.load(depth_path)
if H is not None:
depth_resize = cv2.resize(depth, (W, H))
depths.append(depth_resize)
else:
depths.append(depth)
return np.stack(depths)
def pil_loader(path):
from PIL import Image
# open path as file to avoid ResourceWarning
# (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
with Image.open(f) as img:
return img.convert('RGB')
def load_rgbs(image_list, datadir, H=None, W=None, is_png=False):
from PIL import Image
to_tensor = transforms.ToTensor()
resize = transforms.Resize((H, W), interpolation=Image.ANTIALIAS)
rgbs = []
for image_name in image_list:
if is_png:
image_name = image_name.replace('.jpg', '.png')
rgb_path = os.path.join(datadir, image_name)
rgb = pil_loader(rgb_path)
if H is not None:
rgb = resize(rgb)
rgbs.append(to_tensor(rgb))
return torch.stack(rgbs)
def load_rgbs_np(image_list, datadir, H=None, W=None, is_png=False, use_cv2=True):
rgbs = []
for image_name in image_list:
if is_png:
image_name = image_name.replace('.jpg', '.png')
rgb_path = os.path.join(datadir, image_name)
if use_cv2:
rgb = cv2.imread(rgb_path)
else:
rgb = imageio.imread(rgb_path)[..., :3] / 255.0
if H is not None:
if use_cv2:
rgb = cv2.resize(rgb, (W, H))
else:
rgb = resize(rgb, (W, H))
rgbs.append(rgb)
return np.stack(rgbs)
def visualize_depth(depth, mask=None, depth_min=None, depth_max=None, direct=False):
"""Visualize the depth map with colormap.
Rescales the values so that depth_min and depth_max map to 0 and 1,
respectively.
"""
if not direct:
depth = 1.0 / (depth + 1e-6)
invalid_mask = np.logical_or(np.isnan(depth), np.logical_not(np.isfinite(depth)))
if mask is not None:
invalid_mask += np.logical_not(mask)
if depth_min is None:
depth_min = np.percentile(depth[np.logical_not(invalid_mask)], 5)
if depth_max is None:
depth_max = np.percentile(depth[np.logical_not(invalid_mask)], 95)
depth[depth < depth_min] = depth_min
depth[depth > depth_max] = depth_max
depth[invalid_mask] = depth_max
depth_scaled = (depth - depth_min) / (depth_max - depth_min)
depth_scaled_uint8 = np.uint8(depth_scaled * 255)
depth_color = cv2.applyColorMap(depth_scaled_uint8, cv2.COLORMAP_MAGMA)
depth_color[invalid_mask, :] = 0
return depth_color
| [
"numpy.uint8",
"cv2.applyColorMap",
"os.path.exists",
"PIL.Image.open",
"imageio.imread",
"cv2.resize",
"torch.stack",
"os.path.join",
"numpy.logical_not",
"numpy.stack",
"numpy.isnan",
"numpy.isfinite",
"torchvision.transforms.Resize",
"torchvision.transforms.ToTensor",
"numpy.load",
... | [((642, 685), 'os.path.join', 'os.path.join', (['datadir', '"""dense"""', '"""fused.ply"""'], {}), "(datadir, 'dense', 'fused.ply')\n", (654, 685), False, 'import os\n'), ((2702, 2718), 'numpy.stack', 'np.stack', (['depths'], {}), '(depths)\n', (2710, 2718), True, 'import numpy as np\n'), ((3090, 3111), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3109, 3111), False, 'from torchvision import transforms\n'), ((3125, 3181), 'torchvision.transforms.Resize', 'transforms.Resize', (['(H, W)'], {'interpolation': 'Image.ANTIALIAS'}), '((H, W), interpolation=Image.ANTIALIAS)\n', (3142, 3181), False, 'from torchvision import transforms\n'), ((3503, 3520), 'torch.stack', 'torch.stack', (['rgbs'], {}), '(rgbs)\n', (3514, 3520), False, 'import torch\n'), ((4126, 4140), 'numpy.stack', 'np.stack', (['rgbs'], {}), '(rgbs)\n', (4134, 4140), True, 'import numpy as np\n'), ((4999, 5027), 'numpy.uint8', 'np.uint8', (['(depth_scaled * 255)'], {}), '(depth_scaled * 255)\n', (5007, 5027), True, 'import numpy as np\n'), ((5046, 5103), 'cv2.applyColorMap', 'cv2.applyColorMap', (['depth_scaled_uint8', 'cv2.COLORMAP_MAGMA'], {}), '(depth_scaled_uint8, cv2.COLORMAP_MAGMA)\n', (5063, 5103), False, 'import cv2\n'), ((782, 861), 'os.path.join', 'os.path.join', (['datadir', '"""dense/stereo/depth_maps"""', "(image_name + '.geometric.bin')"], {}), "(datadir, 'dense/stereo/depth_maps', image_name + '.geometric.bin')\n", (794, 861), False, 'import os\n'), ((1156, 1172), 'numpy.stack', 'np.stack', (['depths'], {}), '(depths)\n', (1164, 1172), True, 'import numpy as np\n'), ((1174, 1189), 'numpy.stack', 'np.stack', (['masks'], {}), '(masks)\n', (1182, 1189), True, 'import numpy as np\n'), ((1515, 1559), 'cv2.imread', 'cv2.imread', (['depth_path', 'cv2.IMREAD_UNCHANGED'], {}), '(depth_path, cv2.IMREAD_UNCHANGED)\n', (1525, 1559), False, 'import cv2\n'), ((2040, 2056), 'numpy.stack', 'np.stack', (['depths'], {}), '(depths)\n', (2048, 2056), True, 'import numpy as np\n'), ((2058, 2073), 'numpy.stack', 'np.stack', (['masks'], {}), '(masks)\n', (2066, 2073), True, 'import numpy as np\n'), ((2495, 2514), 'numpy.load', 'np.load', (['depth_path'], {}), '(depth_path)\n', (2502, 2514), True, 'import numpy as np\n'), ((3329, 3362), 'os.path.join', 'os.path.join', (['datadir', 'image_name'], {}), '(datadir, image_name)\n', (3341, 3362), False, 'import os\n'), ((3752, 3785), 'os.path.join', 'os.path.join', (['datadir', 'image_name'], {}), '(datadir, image_name)\n', (3764, 3785), False, 'import os\n'), ((4466, 4481), 'numpy.isnan', 'np.isnan', (['depth'], {}), '(depth)\n', (4474, 4481), True, 'import numpy as np\n'), ((4568, 4588), 'numpy.logical_not', 'np.logical_not', (['mask'], {}), '(mask)\n', (4582, 4588), True, 'import numpy as np\n'), ((203, 237), 'os.path.join', 'os.path.join', (['datadir', '"""train.txt"""'], {}), "(datadir, 'train.txt')\n", (215, 237), False, 'import os\n'), ((991, 1016), 'cv2.resize', 'cv2.resize', (['depth', '(W, H)'], {}), '(depth, (W, H))\n', (1001, 1016), False, 'import cv2\n'), ((1043, 1067), 'cv2.resize', 'cv2.resize', (['mask', '(W, H)'], {}), '(mask, (W, H))\n', (1053, 1067), False, 'import cv2\n'), ((1718, 1776), 'cv2.resize', 'cv2.resize', (['depth', '(W, H)'], {'interpolation': 'cv2.INTER_NEAREST'}), '(depth, (W, H), interpolation=cv2.INTER_NEAREST)\n', (1728, 1776), False, 'import cv2\n'), ((1803, 1860), 'cv2.resize', 'cv2.resize', (['mask', '(W, H)'], {'interpolation': 'cv2.INTER_NEAREST'}), '(mask, (W, H), interpolation=cv2.INTER_NEAREST)\n', (1813, 1860), False, 'import cv2\n'), ((2377, 2403), 'os.path.exists', 'os.path.exists', (['depth_path'], {}), '(depth_path)\n', (2391, 2403), False, 'import os\n'), ((2577, 2602), 'cv2.resize', 'cv2.resize', (['depth', '(W, H)'], {}), '(depth, (W, H))\n', (2587, 2602), False, 'import cv2\n'), ((2921, 2934), 'PIL.Image.open', 'Image.open', (['f'], {}), '(f)\n', (2931, 2934), False, 'from PIL import Image\n'), ((3824, 3844), 'cv2.imread', 'cv2.imread', (['rgb_path'], {}), '(rgb_path)\n', (3834, 3844), False, 'import cv2\n'), ((4498, 4516), 'numpy.isfinite', 'np.isfinite', (['depth'], {}), '(depth)\n', (4509, 4516), True, 'import numpy as np\n'), ((379, 412), 'os.path.join', 'os.path.join', (['datadir', '"""test.txt"""'], {}), "(datadir, 'test.txt')\n", (391, 412), False, 'import os\n'), ((4004, 4027), 'cv2.resize', 'cv2.resize', (['rgb', '(W, H)'], {}), '(rgb, (W, H))\n', (4014, 4027), False, 'import cv2\n'), ((4655, 4683), 'numpy.logical_not', 'np.logical_not', (['invalid_mask'], {}), '(invalid_mask)\n', (4669, 4683), True, 'import numpy as np\n'), ((4755, 4783), 'numpy.logical_not', 'np.logical_not', (['invalid_mask'], {}), '(invalid_mask)\n', (4769, 4783), True, 'import numpy as np\n'), ((3877, 3901), 'imageio.imread', 'imageio.imread', (['rgb_path'], {}), '(rgb_path)\n', (3891, 3901), False, 'import imageio\n')] |
import numpy as np
import pandas as pd
import pymongo
from sklearn.preprocessing import StandardScaler
from tensorflow.keras.models import load_model
import os
import glob
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
model_1 = load_model('model_2249')
model_2 = load_model('model_5699')
class Pipeline:
'''Provides daily results to the TradingHydro application.
Propagates data from the web, via inference, to a database.
Takes in some csv document and gives back numerous updates to databse.
'''
def __init__(self, csv_filename=False, no_scraping=False):
'''Initializes Pipeline variables.
Attributes:
url: str, api key for mongodb.
csv_filename: str with the csv filename.
no_scraping: bool, True as default, False means no scraping.
DF_out: pandas DataFrame, csv transition data.
model_1_trans: dict with transition data.
model_2_trans: dict with transition data.
'''
# strings
self.url = 'your-mongodb-api-key'
self.csv_filename = 'csv_filename.csv'
# debugging
if csv_filename:
self.csv_filename = csv_filename
self.no_scraping = no_scraping
# transitions
self.DF_out = None
self.model_1_trans = None
self.model_2_trans = None
def db_health_checking(self):
'''Checks for mismatchs and doubles in the plot collections.'''
# set database
client = pymongo.MongoClient(self.url)
db = client['powercell']
# name collection vars so they correspond with mongodb col names
plot_1 = db['plot_1']
plot_2 = db['plot_2']
plot_3 = db['plot_3']
plot_4 = db['plot_4']
# find the current data in respective collection
querys = [plot_1.find_one(),
plot_3.find_one(),
plot_4.find_one()]
# clean out mongodb id object
querys_no_id = [{i: query[i] for i in ['dates', 'lineseries']} for query in querys]
# compare lens
for name, query in zip(('plot_1', 'plot_3', 'plot_4'), querys_no_id):
lens = [len(query['dates'])]
lens = lens + [len(query['lineseries'][i]['points']) for i in range(len(query))]
assert len(set(lens)) == 1, 'Health issue, len mismatch in plot ' + name
return True
def scraping(self):
'''Downloads a csv file from the web to disk.
Returns:
bool, True if procedure is successful.
'''
# PREPARE FOR SCRAPE
# locate yesterdays csv file in folder
csvfiles = [file for file in glob.glob('*.csv')]
assert len(csvfiles) == 1, 'Prep for scrape, more or less than one csv on disk.'
# remove csv
os.remove(csvfiles[0])
assert len([file for file in glob.glob('*.csv')]) == 0, 'Remove csv, still csv on disk.'
# SELENIUM
# strings
url = 'http://www.nasdaqomxnordic.com/shares/microsite?Instrument=SSE105121'
user_agent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 ' \
'(KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36'
# options
chrome_options = Options()
chrome_options.add_argument('--user-agent=' + user_agent)
chrome_options.add_argument('--headless')
# download location
download_dir = os.path.dirname(os.path.realpath('__file__'))
prefs = {'download.default_directory' : download_dir}
chrome_options.add_experimental_option('prefs', prefs)
# wait, launch browser and wait
time.sleep(np.random.randint(1, 120))
driver = webdriver.Chrome(options=chrome_options)
driver.implicitly_wait(np.random.randint(3, 15))
# go to page and wait
driver.get(url)
driver.implicitly_wait(np.random.randint(3, 15))
# find showhistory button wait and click
show_history_class = driver.find_element_by_class_name('showHistory')
show_history_class.click()
driver.implicitly_wait(np.random.randint(3, 15))
# find, click, download csv and wait
exportExcel_id = driver.find_element(By.ID, 'exportExcel')
exportExcel_id.click()
time.sleep(5)
# POST SCRAPE
# change name on csv file and wait
csvfiles = [file for file in glob.glob('*.csv')]
assert len(csvfiles) == 1, 'Post scrape, more or less than one csv on disk.'
os.rename(csvfiles[0], self.csv_filename)
time.sleep(5)
return True
def preprocessing(self):
'''Preprocess new data wrt old and slice off last.
Returns:
date: str with todays date.
x1_s2: numpy array, x1 part of s2.
p_t: float, yesterdays price t-1 for todays calculations.
c_: float with todays closing price return.
price: float, powercell raw price for today.
ma_26: float, TA indicator.
em_12: float, TA indicator.
em_26: float, TA indicator.
'''
names = ['date', 'price', 'avg_p', 'bid', 'ask',
'o', 'h', 'l', 'c', 'avgp', 'vol', 'oms', 'num']
# put scraped csv in dataframe and obtain the last row
df_scraped = pd.read_csv(self.csv_filename, sep=';', header=1).iloc[:,:1]
df_scraped[[1, 2]] = pd.read_csv(self.csv_filename, sep=';', header=1).iloc[:,6:8]
df_scraped = pd.concat([df_scraped, pd.read_csv(
self.csv_filename, sep=';', header=1).iloc[:,:-1].drop(
columns=['Date'])], axis=1).iloc[::-1].reset_index().drop(columns='index')
df_scraped.columns = names
scraped_row = df_scraped.iloc[[-1]]
# dataframe (DF) related database (DB) and collection
client = pymongo.MongoClient(self.url)
db_DF = client['DF']
DF = db_DF['DF']
# fetch yesterdays DF
df_in = db_DF.DF.find_one(sort=[('_id', pymongo.DESCENDING)])
df_in_json = pd.read_json(df_in[list(df_in.keys())[-1]], keep_default_dates=False)
# concatenate yesterdays DF and the scraped row
df = pd.concat([df_in_json, scraped_row], axis=0).reset_index().drop(columns='index')
# store now but update later
self.df_out = pd.concat([df_in_json, scraped_row], axis=0).reset_index().drop(columns='index')
# assert that the scraped row is not the same as the last in df_in
date = df['date'].iloc[-1]
assert date != df['date'].iloc[-2], (
'Update Abort: scraped row is same as last. Weekend?')
# Filter out null
for name in names:
no_null = []
# check if null exist in column
if any(df[name].isnull()):
# traverse the boolean dataframe
for i, j in enumerate(df[name].isnull()):
if not j:
# hold a value from latest non null
tmp = df[name].iloc[i]
no_null.append(tmp)
else:
no_null.append(tmp)
# put back in dataframe
df[name] = pd.Series(no_null)
# Get float from string
for name in names[1:]:
if type(df[name].iloc[1]) == str:
df[name] = pd.Series([float(i.replace(',', '.')) for i in df[name]])
# Moving averages
ma_sizes = (26,)
ma = {i: [] for i in ma_sizes}
for size in ma_sizes:
for i in range(len(df)):
if i <= size:
ma[size].append(np.average(df['price']))
else:
value = sum(df['price'].values[i - size: i]) / size
ma[size].append(value)
# Exponential moving average
smoother = 2
em_sizes = (12, 20, 26)
em = {i: [] for i in em_sizes}
for size in em_sizes:
em_t = sum(df['price'].iloc[:size]) / size
for i in range(len(df)):
if i <= size:
em[size].append(0)
else:
em_t = (df['price'].iloc[i] * (
smoother / (1 + size)) + (em_t * (1 - (smoother / (1 + size)))))
em[size].append(em_t)
# MACD
macd1 = [i - j for i, j in zip(em[12], em[26])]
macd2 = []
macd3 = []
em_t = sum(macd1[:9]) / 9
for i in range(len(macd1)):
if i <= 9:
macd2.append(0)
else:
em_t = (macd1[i] * (
smoother / (1 + size)) + (em_t * (1 - (smoother / (1 + size)))))
macd2.append(em_t)
macd3 = [i - j for i, j in zip(macd1, macd2)]
tech = [ma[26], em[12], em[26], macd1, macd2, macd3]
names_df2 = ['ma1', 'em1', 'em2', 'md1', 'md2', 'md3']
names2 = names + names_df2
df2 = pd.DataFrame({i: j for i, j in zip(names_df2, tech)})
# slice the first 26 rows due to moving averages
df3 = pd.concat([df, df2], axis=1).iloc[27:]
# get diff and pct change
diff = df3[['vol', 'oms', 'num']].diff()
pct = df3[['bid', 'ask', 'o', 'h', 'l', 'c', 'avgp'] + names_df2].pct_change()
diff_pct = pd.concat([pct, diff], axis=1)
diff_pct.columns = [
name + '_' for name in [
'bid', 'ask', 'o', 'h', 'l', 'c', 'avgp'] + names_df2 + ['vol', 'oms', 'num']]
df4 = pd.concat([df3, diff_pct], axis=1).iloc[1:].reset_index().drop(columns='index')
names3 = df4.columns
# clipping outliers
for name in diff_pct.columns.tolist():
df4[[name]] = df4[[name]].clip(- 3 *df4[name].std(), 3 * df4[name].std())
# Normalizing
scaler = StandardScaler()
norm = scaler.fit_transform(
df4[list(diff_pct.columns)].values.reshape(-1, len(list(diff_pct.columns))))
# Add avgp__ to df4
df4[['avgp__']] = pd.DataFrame({None: norm[:,6:7].squeeze()})
# package output
x1_s2 = norm[-1]
p_t = round(float(df4['avg_p'].iloc[-2]), 3) # yesterdays price for todays calculations
c_ = round(float(df4['c_'].iloc[-1]), 3) # todays closing price return
price = round(float(df4['price'].iloc[-1]), 3)
ma_26 = round(float(df4['ma1'].iloc[-1]), 3)
em_12 = round(float(df4['em1'].iloc[-1]), 3)
em_26 = round(float(df4['em2'].iloc[-1]), 3)
return date, x1_s2, p_t, c_, price, ma_26, em_12, em_26
def inference(self, x1_s2, p_t, c_, date):
'''Fetches transitions from DB, performs inference
and computes results. It packages the states nicely
for the next transitions, and finally pushes them to DB.
Arguments:
x1_s2: numpy array, x1 part of s2.
p_t: float, yesterdays price for todays calculations.
c_: float with todays closing price return.
date: str with todays date.
Returns:
model_1_value: float, todays portfolio value.
model_2_value: float, todays portfolio value.
baseline_value: float, todays portfolio value.
model_1_action: float, zero or one model output.
model_2_action: float, zero or one model output.
'''
# ORIGINAL BUNDLE - ORIGINAL BUNDLE
# transition related db and collections
client = pymongo.MongoClient(self.url)
db_transitions = client['transitions']
trans_model_1 = db_transitions['trans_model_1']
trans_model_2 = db_transitions['trans_model_2']
trans_baseline = db_transitions['trans_baseline']
# fetch transition components for model 1 (A)
A_query = trans_model_1.find_one(sort=[('_id', pymongo.DESCENDING)])
# A_s is now redundant but we let it remain
A_s = [np.array(A_query['s'][0]).reshape(1, 16),
np.array(A_query['s'][1]).reshape(1, 3)]
# A_a is the action from the post calculation inference yesterday
A_a = A_query['a']
A_cash = A_query['cash']
A_stock_v = A_query['stock_v']
A_stock_n = A_query['stock_n']
# fetch transition components for model 2 (B)
B_query = trans_model_2.find_one(sort=[('_id', pymongo.DESCENDING)])
# B_s is now redundant but we let it remain
B_s = [np.array(B_query['s'][0]).reshape(1, 16),
np.array(B_query['s'][1]).reshape(1, 3)]
# B_a is the action from the post calculation inference yesterday
B_a = B_query['a']
B_cash = B_query['cash']
B_stock_v = B_query['stock_v']
B_stock_n = B_query['stock_n']
# fetch transition components for baseline
base_val_in = trans_baseline.find_one(sort=[('_id', pymongo.DESCENDING)])['value']
# load models
#model_1 = load_model('model_2249')
#model_2 = load_model('model_5699')
# NO INFERENCE pre calculations, do them post and pass the action
# compute portfolio for model 1
C = 0.02
if A_a == 0:
Q = np.floor(A_cash / (p_t * (1 + C))) # measure up the long position
if A_a == 1:
Q = -np.floor(A_stock_n) # measure up the short position
A_cash = abs(A_cash - (Q * p_t) - (C * abs(Q))) # change in cash value
A_stock_v = (A_stock_n + Q) * p_t # change in stock value
A_stock_n = A_stock_n + Q # change in number of stock
# compute portfolio for model 2
if B_a == 0:
Q = np.floor(B_cash / (p_t * (1 + C))) # measure up the long position
if B_a == 1:
Q = -np.floor(B_stock_n) # measure up the short position
B_cash = abs(B_cash - (Q * p_t) - (C * abs(Q))) # change in cash value
B_stock_v = (B_stock_n + Q) * p_t # change in stock value
B_stock_n = B_stock_n + Q # change in number of stock
# package respective s2 states
scaler = StandardScaler()
A_x2 = scaler.fit_transform(np.array(
[[A_cash, A_stock_v, A_stock_n]]).reshape(-1, 1)).reshape(1, 3)
A_s2 = [x1_s2, A_x2]
B_x2 = scaler.fit_transform(np.array(
[[B_cash, B_stock_v, B_stock_n]]).reshape(-1, 1)).reshape(1, 3)
B_s2 = [x1_s2, B_x2]
# set values
model_1_value = A_cash + A_stock_v
model_2_value = B_cash + B_stock_v
baseline_value = base_val_in + (base_val_in * c_)
# INFERENCE - action values for now and for calculations tomorrow.
model_1_action = float(np.argmax(model_1.predict(A_s)))
model_2_action = float(np.argmax(model_2.predict(B_s)))
# push model 1 transition components
trans_names = list(A_query.keys())[1:]
self.model_1_trans = {name: val for name, val in zip(trans_names,
[date,
[x.tolist() for x in A_s2],
model_1_action,
round(A_cash, 3),
round(A_stock_v, 3),
round(A_stock_n, 3)])}
# push model 2 transition components
self.model_2_trans = {name: val for name, val in zip(trans_names,
[date,
[x.tolist() for x in B_s2],
model_2_action,
round(B_cash, 3),
round(B_stock_v, 3),
round(B_stock_n, 3)])}
return (round(model_1_value, 3),
round(model_2_value, 3),
round(baseline_value, 3),
model_1_action,
model_2_action)
def packaging_and_pushing(self,
model_1_value,
model_2_value,
baseline_value,
model_1_action,
model_2_action,
price,
ma_26,
em_12,
em_26,
date):
'''Fetches full document of plot data from DB,
updates it locally, then packages and pushes to DB.
Arguments:
model_1_value: float, todays portfolio value.
model_2_value: float, todays portfolio value.
baseline_value: float, todays portfolio value.
model_1_action: float, zero or one model output.
model_2_action: float, zero or one model output.
price: float, powercell raw price for time t.
ma_26: float, TA indicator.
em_12: float, TA indicator.
em_26: float, TA indicator.
date: str, date for time t.
Returns:
bool
'''
values = {
'plot_1': {
0: model_1_value,
1: model_2_value,
2: baseline_value},
'plot_3': {
0: 10000.,
1: 10000.,
2: 10000.,
3: 10000.},
'plot_4': {
0: price,
1: ma_26,
2: em_12,
3: em_26}
}
# plot_2 update
plot_2_update = {
'date': date,
'Model_1': model_1_action,
'Model_2': model_2_action
}
# set database
client = pymongo.MongoClient(self.url)
db = client['powercell']
# name collection vars so they correspond with mongodb col names
plot_1 = db['plot_1']
plot_2 = db['plot_2']
plot_3 = db['plot_3']
plot_4 = db['plot_4']
# find the current data in respective collection
querys = [plot_1.find_one(),
plot_3.find_one(),
plot_4.find_one()]
# clean out mongodb id object
updates = [{i: query[i] for i in ['dates', 'lineseries']} for query in querys]
# append date
for update in updates:
update['dates'].append(date)
# append values
for i in range(len(updates[0]['lineseries'])):
updates[0]['lineseries'][i]['points'].append(values['plot_1'][i])
updates[1]['lineseries'][i]['points'].append(values['plot_3'][i])
updates[2]['lineseries'][i]['points'].append(values['plot_4'][i])
# push to database
res = db.plot_1.replace_one({}, updates[0])
assert res.modified_count == 1, 'Plot 1 update to database failed.'
res = db.plot_3.replace_one({}, updates[1])
assert res.modified_count == 1, 'Plot 3 update to database failed.'
res = db.plot_4.replace_one({}, updates[2])
assert res.modified_count == 1, 'Plot 4 update to database failed.'
# insert plot 2 update
res = db.plot_2.insert_one(plot_2_update)
# PREPROCESSING PUSHES - update DF
# DF database
db_DF = client['DF']
DF = db_DF['DF']
# push DF
res = db_DF.DF.insert_one({date: self.df_out.to_json()})
# INFERENCE PUSHES - model_1, model_2, baseline
# transition related db and collections
db_transitions = client['transitions']
trans_model_1 = db_transitions['trans_model_1']
trans_model_2 = db_transitions['trans_model_2']
trans_baseline = db_transitions['trans_baseline']
# push transitions
res = db_transitions.trans_model_1.insert_one(self.model_1_trans)
res = db_transitions.trans_model_2.insert_one(self.model_2_trans)
res = db_transitions.trans_baseline.insert_one({'date': date,
'value': baseline_value})
return True
def main(self):
'''Executes the pipeline methods.'''
# check database health
self.db_health_checking()
# scraping
if not self.no_scraping:
self.scraping()
# preprocessing
date, x1_s2, p_t, c_, price, ma_26, em_12, em_26 = self.preprocessing()
# inference
inference = self.inference(x1_s2, p_t, c_, date)
model_1_value, model_2_value, baseline_value = inference[0], inference[1], inference[2]
model_1_action, model_2_action = inference[3], inference[4]
# packaging and pushing
self.packaging_and_pushing(model_1_value,
model_2_value,
baseline_value,
model_1_action,
model_2_action,
price,
ma_26,
em_12,
em_26,
date)
if __name__ == '__main__':
Pipeline().main() | [
"pandas.Series",
"selenium.webdriver.chrome.options.Options",
"pandas.read_csv",
"numpy.average",
"selenium.webdriver.Chrome",
"os.rename",
"numpy.floor",
"time.sleep",
"sklearn.preprocessing.StandardScaler",
"os.path.realpath",
"numpy.random.randint",
"numpy.array",
"tensorflow.keras.models... | [((328, 352), 'tensorflow.keras.models.load_model', 'load_model', (['"""model_2249"""'], {}), "('model_2249')\n", (338, 352), False, 'from tensorflow.keras.models import load_model\n'), ((363, 387), 'tensorflow.keras.models.load_model', 'load_model', (['"""model_5699"""'], {}), "('model_5699')\n", (373, 387), False, 'from tensorflow.keras.models import load_model\n'), ((1624, 1653), 'pymongo.MongoClient', 'pymongo.MongoClient', (['self.url'], {}), '(self.url)\n', (1643, 1653), False, 'import pymongo\n'), ((2997, 3019), 'os.remove', 'os.remove', (['csvfiles[0]'], {}), '(csvfiles[0])\n', (3006, 3019), False, 'import os\n'), ((3450, 3459), 'selenium.webdriver.chrome.options.Options', 'Options', ([], {}), '()\n', (3457, 3459), False, 'from selenium.webdriver.chrome.options import Options\n'), ((3919, 3959), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {'options': 'chrome_options'}), '(options=chrome_options)\n', (3935, 3959), False, 'from selenium import webdriver\n'), ((4525, 4538), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (4535, 4538), False, 'import time\n'), ((4772, 4813), 'os.rename', 'os.rename', (['csvfiles[0]', 'self.csv_filename'], {}), '(csvfiles[0], self.csv_filename)\n', (4781, 4813), False, 'import os\n'), ((4822, 4835), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (4832, 4835), False, 'import time\n'), ((6119, 6148), 'pymongo.MongoClient', 'pymongo.MongoClient', (['self.url'], {}), '(self.url)\n', (6138, 6148), False, 'import pymongo\n'), ((9698, 9728), 'pandas.concat', 'pd.concat', (['[pct, diff]'], {'axis': '(1)'}), '([pct, diff], axis=1)\n', (9707, 9728), True, 'import pandas as pd\n'), ((10235, 10251), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (10249, 10251), False, 'from sklearn.preprocessing import StandardScaler\n'), ((11913, 11942), 'pymongo.MongoClient', 'pymongo.MongoClient', (['self.url'], {}), '(self.url)\n', (11932, 11942), False, 'import pymongo\n'), ((14556, 14572), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (14570, 14572), False, 'from sklearn.preprocessing import StandardScaler\n'), ((18587, 18616), 'pymongo.MongoClient', 'pymongo.MongoClient', (['self.url'], {}), '(self.url)\n', (18606, 18616), False, 'import pymongo\n'), ((3652, 3680), 'os.path.realpath', 'os.path.realpath', (['"""__file__"""'], {}), "('__file__')\n", (3668, 3680), False, 'import os\n'), ((3875, 3900), 'numpy.random.randint', 'np.random.randint', (['(1)', '(120)'], {}), '(1, 120)\n', (3892, 3900), True, 'import numpy as np\n'), ((3991, 4015), 'numpy.random.randint', 'np.random.randint', (['(3)', '(15)'], {}), '(3, 15)\n', (4008, 4015), True, 'import numpy as np\n'), ((4111, 4135), 'numpy.random.randint', 'np.random.randint', (['(3)', '(15)'], {}), '(3, 15)\n', (4128, 4135), True, 'import numpy as np\n'), ((4339, 4363), 'numpy.random.randint', 'np.random.randint', (['(3)', '(15)'], {}), '(3, 15)\n', (4356, 4363), True, 'import numpy as np\n'), ((13652, 13686), 'numpy.floor', 'np.floor', (['(A_cash / (p_t * (1 + C)))'], {}), '(A_cash / (p_t * (1 + C)))\n', (13660, 13686), True, 'import numpy as np\n'), ((14114, 14148), 'numpy.floor', 'np.floor', (['(B_cash / (p_t * (1 + C)))'], {}), '(B_cash / (p_t * (1 + C)))\n', (14122, 14148), True, 'import numpy as np\n'), ((2850, 2868), 'glob.glob', 'glob.glob', (['"""*.csv"""'], {}), "('*.csv')\n", (2859, 2868), False, 'import glob\n'), ((4659, 4677), 'glob.glob', 'glob.glob', (['"""*.csv"""'], {}), "('*.csv')\n", (4668, 4677), False, 'import glob\n'), ((5588, 5637), 'pandas.read_csv', 'pd.read_csv', (['self.csv_filename'], {'sep': '""";"""', 'header': '(1)'}), "(self.csv_filename, sep=';', header=1)\n", (5599, 5637), True, 'import pandas as pd\n'), ((5678, 5727), 'pandas.read_csv', 'pd.read_csv', (['self.csv_filename'], {'sep': '""";"""', 'header': '(1)'}), "(self.csv_filename, sep=';', header=1)\n", (5689, 5727), True, 'import pandas as pd\n'), ((7512, 7530), 'pandas.Series', 'pd.Series', (['no_null'], {}), '(no_null)\n', (7521, 7530), True, 'import pandas as pd\n'), ((9461, 9489), 'pandas.concat', 'pd.concat', (['[df, df2]'], {'axis': '(1)'}), '([df, df2], axis=1)\n', (9470, 9489), True, 'import pandas as pd\n'), ((13756, 13775), 'numpy.floor', 'np.floor', (['A_stock_n'], {}), '(A_stock_n)\n', (13764, 13775), True, 'import numpy as np\n'), ((14219, 14238), 'numpy.floor', 'np.floor', (['B_stock_n'], {}), '(B_stock_n)\n', (14227, 14238), True, 'import numpy as np\n'), ((12367, 12392), 'numpy.array', 'np.array', (["A_query['s'][0]"], {}), "(A_query['s'][0])\n", (12375, 12392), True, 'import numpy as np\n'), ((12424, 12449), 'numpy.array', 'np.array', (["A_query['s'][1]"], {}), "(A_query['s'][1])\n", (12432, 12449), True, 'import numpy as np\n'), ((12885, 12910), 'numpy.array', 'np.array', (["B_query['s'][0]"], {}), "(B_query['s'][0])\n", (12893, 12910), True, 'import numpy as np\n'), ((12942, 12967), 'numpy.array', 'np.array', (["B_query['s'][1]"], {}), "(B_query['s'][1])\n", (12950, 12967), True, 'import numpy as np\n'), ((3057, 3075), 'glob.glob', 'glob.glob', (['"""*.csv"""'], {}), "('*.csv')\n", (3066, 3075), False, 'import glob\n'), ((6473, 6517), 'pandas.concat', 'pd.concat', (['[df_in_json, scraped_row]'], {'axis': '(0)'}), '([df_in_json, scraped_row], axis=0)\n', (6482, 6517), True, 'import pandas as pd\n'), ((6613, 6657), 'pandas.concat', 'pd.concat', (['[df_in_json, scraped_row]'], {'axis': '(0)'}), '([df_in_json, scraped_row], axis=0)\n', (6622, 6657), True, 'import pandas as pd\n'), ((7968, 7991), 'numpy.average', 'np.average', (["df['price']"], {}), "(df['price'])\n", (7978, 7991), True, 'import numpy as np\n'), ((14618, 14660), 'numpy.array', 'np.array', (['[[A_cash, A_stock_v, A_stock_n]]'], {}), '([[A_cash, A_stock_v, A_stock_n]])\n', (14626, 14660), True, 'import numpy as np\n'), ((14787, 14829), 'numpy.array', 'np.array', (['[[B_cash, B_stock_v, B_stock_n]]'], {}), '([[B_cash, B_stock_v, B_stock_n]])\n', (14795, 14829), True, 'import numpy as np\n'), ((9904, 9938), 'pandas.concat', 'pd.concat', (['[df3, diff_pct]'], {'axis': '(1)'}), '([df3, diff_pct], axis=1)\n', (9913, 9938), True, 'import pandas as pd\n'), ((5784, 5833), 'pandas.read_csv', 'pd.read_csv', (['self.csv_filename'], {'sep': '""";"""', 'header': '(1)'}), "(self.csv_filename, sep=';', header=1)\n", (5795, 5833), True, 'import pandas as pd\n')] |
import numpy as np
from collections import namedtuple
from itertools import product
import pybullet as p
from pybullet_planning.utils import CLIENT, BASE_LINK, UNKNOWN_FILE, OBJ_MESH_CACHE
from pybullet_planning.utils import implies
#####################################
# Bounding box
AABB = namedtuple('AABB', ['lower', 'upper'])
"""axis-aligned bounding box: https://en.wikipedia.org/wiki/Bounding_volume
Notice that the world-axis is used here. We don't have support for OOBB (using the object's local coordinate system)?
"""
def aabb_from_points(points):
return AABB(np.min(points, axis=0), np.max(points, axis=0))
def aabb_union(aabbs):
return aabb_from_points(np.vstack([aabb for aabb in aabbs]))
def aabb_overlap(aabb1, aabb2):
lower1, upper1 = aabb1
lower2, upper2 = aabb2
return np.less_equal(lower1, upper2).all() and \
np.less_equal(lower2, upper1).all()
#####################################
# Bounding box from body
def get_subtree_aabb(body, root_link=BASE_LINK):
from pybullet_planning.interfaces.robots.link import get_link_subtree
return aabb_union(get_aabb(body, link) for link in get_link_subtree(body, root_link))
def get_aabbs(body):
from pybullet_planning.interfaces.robots.link import get_all_links
return [get_aabb(body, link=link) for link in get_all_links(body)]
def get_aabb(body, link=None):
# Note that the query is conservative and may return additional objects that don't have actual AABB overlap.
# This happens because the acceleration structures have some heuristic that enlarges the AABBs a bit
# (extra margin and extruded along the velocity vector).
# Contact points with distance exceeding this threshold are not processed by the LCP solver.
# AABBs are extended by this number. Defaults to 0.02 in Bullet 2.x
#p.setPhysicsEngineParameter(contactBreakingThreshold=0.0, physicsClientId=CLIENT)
if link is None:
aabb = aabb_union(get_aabbs(body))
else:
aabb = p.getAABB(body, linkIndex=link, physicsClientId=CLIENT)
return aabb
get_lower_upper = get_aabb
def get_aabb_center(aabb):
lower, upper = aabb
return (np.array(lower) + np.array(upper)) / 2.
def get_aabb_extent(aabb):
"""return the bounding box range in the x, y, z in the body's pose frame
Parameters
----------
aabb : AABB
[description]
Returns
-------
np array of three float
[width, length, height]
"""
lower, upper = aabb
return np.array(upper) - np.array(lower)
def get_center_extent(body, **kwargs):
aabb = get_aabb(body, **kwargs)
return get_aabb_center(aabb), get_aabb_extent(aabb)
def aabb2d_from_aabb(aabb):
(lower, upper) = aabb
return lower[:2], upper[:2]
def aabb_contains_aabb(contained, container):
lower1, upper1 = contained
lower2, upper2 = container
return np.less_equal(lower2, lower1).all() and \
np.less_equal(upper1, upper2).all()
#return np.all(lower2 <= lower1) and np.all(upper1 <= upper2)
def aabb_contains_point(point, container):
lower, upper = container
return np.less_equal(lower, point).all() and \
np.less_equal(point, upper).all()
#return np.all(lower <= point) and np.all(point <= upper)
def get_bodies_in_region(aabb):
"""This query will return all the unique ids of objects that have axis aligned bounding box overlap with a given axis aligned bounding box.
Note that the query is conservative and may return additional objects that don't have actual AABB overlap.
This happens because the acceleration structures have some heuristic that enlarges the AABBs a bit
(extra margin and extruded along the velocity vector).
Parameters
----------
aabb : [type]
[description]
Returns
-------
a list of object unique ids.
"""
(lower, upper) = aabb
bodies = p.getOverlappingObjects(lower, upper, physicsClientId=CLIENT)
return [] if bodies is None else bodies
def get_aabb_volume(aabb):
return np.prod(get_aabb_extent(aabb))
def get_aabb_area(aabb):
return np.prod(get_aabb_extent(aabb2d_from_aabb(aabb)))
#####################################
# AABB approximation
def get_aabb_vertices(aabb):
d = len(aabb[0])
return [tuple(aabb[i[k]][k] for k in range(d))
for i in product(range(len(aabb)), repeat=d)]
| [
"collections.namedtuple",
"pybullet.getAABB",
"numpy.less_equal",
"pybullet_planning.interfaces.robots.link.get_all_links",
"numpy.max",
"pybullet.getOverlappingObjects",
"numpy.array",
"numpy.vstack",
"numpy.min",
"pybullet_planning.interfaces.robots.link.get_link_subtree"
] | [((297, 335), 'collections.namedtuple', 'namedtuple', (['"""AABB"""', "['lower', 'upper']"], {}), "('AABB', ['lower', 'upper'])\n", (307, 335), False, 'from collections import namedtuple\n'), ((3897, 3958), 'pybullet.getOverlappingObjects', 'p.getOverlappingObjects', (['lower', 'upper'], {'physicsClientId': 'CLIENT'}), '(lower, upper, physicsClientId=CLIENT)\n', (3920, 3958), True, 'import pybullet as p\n'), ((583, 605), 'numpy.min', 'np.min', (['points'], {'axis': '(0)'}), '(points, axis=0)\n', (589, 605), True, 'import numpy as np\n'), ((607, 629), 'numpy.max', 'np.max', (['points'], {'axis': '(0)'}), '(points, axis=0)\n', (613, 629), True, 'import numpy as np\n'), ((683, 718), 'numpy.vstack', 'np.vstack', (['[aabb for aabb in aabbs]'], {}), '([aabb for aabb in aabbs])\n', (692, 718), True, 'import numpy as np\n'), ((2005, 2060), 'pybullet.getAABB', 'p.getAABB', (['body'], {'linkIndex': 'link', 'physicsClientId': 'CLIENT'}), '(body, linkIndex=link, physicsClientId=CLIENT)\n', (2014, 2060), True, 'import pybullet as p\n'), ((2511, 2526), 'numpy.array', 'np.array', (['upper'], {}), '(upper)\n', (2519, 2526), True, 'import numpy as np\n'), ((2529, 2544), 'numpy.array', 'np.array', (['lower'], {}), '(lower)\n', (2537, 2544), True, 'import numpy as np\n'), ((1328, 1347), 'pybullet_planning.interfaces.robots.link.get_all_links', 'get_all_links', (['body'], {}), '(body)\n', (1341, 1347), False, 'from pybullet_planning.interfaces.robots.link import get_all_links\n'), ((2169, 2184), 'numpy.array', 'np.array', (['lower'], {}), '(lower)\n', (2177, 2184), True, 'import numpy as np\n'), ((2187, 2202), 'numpy.array', 'np.array', (['upper'], {}), '(upper)\n', (2195, 2202), True, 'import numpy as np\n'), ((818, 847), 'numpy.less_equal', 'np.less_equal', (['lower1', 'upper2'], {}), '(lower1, upper2)\n', (831, 847), True, 'import numpy as np\n'), ((871, 900), 'numpy.less_equal', 'np.less_equal', (['lower2', 'upper1'], {}), '(lower2, upper1)\n', (884, 900), True, 'import numpy as np\n'), ((1150, 1183), 'pybullet_planning.interfaces.robots.link.get_link_subtree', 'get_link_subtree', (['body', 'root_link'], {}), '(body, root_link)\n', (1166, 1183), False, 'from pybullet_planning.interfaces.robots.link import get_link_subtree\n'), ((2884, 2913), 'numpy.less_equal', 'np.less_equal', (['lower2', 'lower1'], {}), '(lower2, lower1)\n', (2897, 2913), True, 'import numpy as np\n'), ((2937, 2966), 'numpy.less_equal', 'np.less_equal', (['upper1', 'upper2'], {}), '(upper1, upper2)\n', (2950, 2966), True, 'import numpy as np\n'), ((3123, 3150), 'numpy.less_equal', 'np.less_equal', (['lower', 'point'], {}), '(lower, point)\n', (3136, 3150), True, 'import numpy as np\n'), ((3174, 3201), 'numpy.less_equal', 'np.less_equal', (['point', 'upper'], {}), '(point, upper)\n', (3187, 3201), True, 'import numpy as np\n')] |
#
# Copyright 2018-2021 <NAME>
# 2019 <NAME>
# 2015-2016 <NAME>
#
# ### MIT license
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""
Variable bandwidth analysis for nonuniform topographies
"""
import numpy as np
from ..HeightContainer import NonuniformLineScanInterface
from ..NonuniformLineScan import NonuniformLineScan
def checkerboard_detrend_profile(line_scan, subdivisions, tol=1e-6):
"""
Perform tilt correction (and substract mean value) in each individual
rectangle of a checkerboard decomposition of the surface. This is
identical to subdividing the surface into individual, nonoverlapping
rectangles and performing individual tilt corrections on them.
The main application of this function is to carry out a variable
bandwidth analysis of the surface.
Parameters
----------
line_scan : :obj:`NonuniformLineScan`
Container storing the uniform topography map
subdivisions : int
Number of subdivisions.
tol : float
Tolerance for searching for existing data points at domain boundaries.
(Default: 1e-6)
Returns
-------
subdivided_line_scans : list of :obj:`NonuniformLineScan`
List with new, subdivided and detrended line scans.
"""
if subdivisions == 1:
return [line_scan.detrend()]
x, y = line_scan.positions_and_heights()
subdivided_line_scans = []
for i in range(subdivisions):
# Subdivide interval
sub_xleft = x[0] + i * (x[-1] - x[0]) / subdivisions
sub_xright = x[0] + (i + 1) * (x[-1] - x[0]) / subdivisions
# Search for the data point closes to sub_xleft and sub_xright
sub_ileft = x.searchsorted(sub_xleft)
sub_iright = x.searchsorted(sub_xright, side='right')
sub_x = x[sub_ileft:sub_iright]
sub_y = y[sub_ileft:sub_iright]
# Put additional data points on the left and right boundaries, if there
# is none already in the data set at exactly those points
if sub_ileft != 0 and sub_xleft < x[sub_ileft] - tol:
# Linear interpolation to boundary point
sub_yleft = y[sub_ileft - 1] + (sub_xleft - x[sub_ileft - 1]) / (
x[sub_ileft] - x[sub_ileft - 1]) * (
y[sub_ileft] - y[sub_ileft - 1])
# Add additional point to data
sub_x = np.append([sub_xleft], sub_x)
sub_y = np.append([sub_yleft], sub_y)
if sub_iright != len(x) and sub_xright > x[sub_iright - 1] + tol:
# Linear interpolation to boundary point
sub_yright = y[sub_iright - 1] + (
sub_xright - x[sub_iright - 1]) / (
x[sub_iright] - x[sub_iright - 1]) * (
y[sub_iright] - y[sub_iright - 1])
# Add additional point to data
sub_x = np.append(sub_x, [sub_xright])
sub_y = np.append(sub_y, [sub_yright])
subdivided_line_scans += [
NonuniformLineScan(sub_x, sub_y, info=line_scan.info).detrend()]
return subdivided_line_scans
def variable_bandwidth_from_profile(line_scan, nb_grid_pts_cutoff=4):
"""
Perform a variable bandwidth analysis by computing the mean
root-mean-square height within increasingly finer subdivisions of the
line scan.
Parameters
----------
line_scan : obj:`NonuniformLineScan`
Container storing the uniform topography map
nb_grid_pts_cutoff : int
Minimum number of data points to allow for subdivision. The analysis
will automatically analyze subdivision down to this nb_grid_pts.
Returns
-------
magnifications : array
Array containing the magnifications.
bandwidths : array
Array containing the bandwidths, here the physical_sizes of the
subdivided topography.
rms_heights : array
Array containing the rms height corresponding to the respective
magnification.
"""
magnification = 1
min_nb_grid_pts, = line_scan.nb_grid_pts
magnifications = []
bandwidths = []
rms_heights = []
while min_nb_grid_pts >= nb_grid_pts_cutoff:
subdivided_line_scans = line_scan.checkerboard_detrend_profile(magnification)
min_nb_grid_pts = min(
[line.nb_grid_pts[0] for line in subdivided_line_scans])
magnifications += [magnification]
bandwidths += [subdivided_line_scans[0].physical_sizes[0]]
rms_heights += [
np.mean([line.rms_height_from_profile() for line in subdivided_line_scans])]
magnification *= 2
return np.array(magnifications), np.array(bandwidths), np.array(rms_heights)
# Register analysis functions from this module
NonuniformLineScanInterface.register_function('checkerboard_detrend_profile', checkerboard_detrend_profile)
NonuniformLineScanInterface.register_function('variable_bandwidth_from_profile', variable_bandwidth_from_profile)
| [
"numpy.append",
"numpy.array"
] | [((5676, 5700), 'numpy.array', 'np.array', (['magnifications'], {}), '(magnifications)\n', (5684, 5700), True, 'import numpy as np\n'), ((5702, 5722), 'numpy.array', 'np.array', (['bandwidths'], {}), '(bandwidths)\n', (5710, 5722), True, 'import numpy as np\n'), ((5724, 5745), 'numpy.array', 'np.array', (['rms_heights'], {}), '(rms_heights)\n', (5732, 5745), True, 'import numpy as np\n'), ((3410, 3439), 'numpy.append', 'np.append', (['[sub_xleft]', 'sub_x'], {}), '([sub_xleft], sub_x)\n', (3419, 3439), True, 'import numpy as np\n'), ((3460, 3489), 'numpy.append', 'np.append', (['[sub_yleft]', 'sub_y'], {}), '([sub_yleft], sub_y)\n', (3469, 3489), True, 'import numpy as np\n'), ((3932, 3962), 'numpy.append', 'np.append', (['sub_x', '[sub_xright]'], {}), '(sub_x, [sub_xright])\n', (3941, 3962), True, 'import numpy as np\n'), ((3983, 4013), 'numpy.append', 'np.append', (['sub_y', '[sub_yright]'], {}), '(sub_y, [sub_yright])\n', (3992, 4013), True, 'import numpy as np\n')] |
# Feb 9, 2019
# <NAME>, <NAME>, <NAME>, <NAME>
#
# This script tests the distance function for kmedians.py
import pytest
import numpy as np
from KMediansPy.distance import distance
## Helper Functions
def toy_data():
"""
Generates simple data set and parameters to test
"""
X = np.array([[1, 2],[5, 4]])
medians = np.array([[1, 2],[5, 4]])
dist = distance(X, medians)
return X, medians, dist
## Test Functions
def test_distance_calc():
"""
Confirm that the correct distance is being calculated for each point from the medians by using a
simple toy dataset where the distances were calculated manually
"""
_, _, dist = toy_data()
assert np.all(dist) == np.all(np.array([[6, 0], [0, 6]]))
def test_dist_each_point():
"""
Confirm that the distance is being calculated for each point
"""
X, _, dist = toy_data()
assert X.shape[0] == dist.shape[0]
def test_dist_each_cluster():
"""
Confirm that the distance is being calculated for each median
"""
_, medians, dist = toy_data()
assert medians.shape[0] == dist.shape[1]
def test_correct_input_type():
"""
Confirm the input and outputs are numpy arrays and are all 2D arrays
"""
X, medians, dist = toy_data()
# check the type of the input/outputs are numpy arrays
assert type(X) == np.ndarray
assert type(medians) == np.ndarray
assert type(dist) == np.ndarray
# check that the input/outputs are 2D
assert X.ndim == 2
assert medians.ndim == 2
assert dist.ndim == 2
| [
"numpy.array",
"KMediansPy.distance.distance",
"numpy.all"
] | [((300, 326), 'numpy.array', 'np.array', (['[[1, 2], [5, 4]]'], {}), '([[1, 2], [5, 4]])\n', (308, 326), True, 'import numpy as np\n'), ((340, 366), 'numpy.array', 'np.array', (['[[1, 2], [5, 4]]'], {}), '([[1, 2], [5, 4]])\n', (348, 366), True, 'import numpy as np\n'), ((377, 397), 'KMediansPy.distance.distance', 'distance', (['X', 'medians'], {}), '(X, medians)\n', (385, 397), False, 'from KMediansPy.distance import distance\n'), ((698, 710), 'numpy.all', 'np.all', (['dist'], {}), '(dist)\n', (704, 710), True, 'import numpy as np\n'), ((721, 747), 'numpy.array', 'np.array', (['[[6, 0], [0, 6]]'], {}), '([[6, 0], [0, 6]])\n', (729, 747), True, 'import numpy as np\n')] |
from multiprocessing import Pool
import numpy as np
import pandas as pd
from cgms_data_seg import CGMSDataSeg
from sklearn.model_selection import KFold
def hyperglycemia(x, threshold=1.8):
return np.hstack((x >= threshold, x < threshold)).astype(np.float32)
def hypoglycemia(x, threshold=0.7):
# threshold can be set to 0.54 suggested by Dr. Mantzoros
return np.hstack((x <= threshold, x > threshold)).astype(np.float32)
def threeclasses(x, th_min=0.7, th_max=1.8):
def safe(x):
return [x[0] < th_max and x[0] > th_min]
return np.hstack(
(x <= th_min, np.apply_along_axis(safe, 1, x), x >= th_max)
).astype(np.float32)
def accuracy(y_true, y_pred):
return np.mean(np.equal(np.argmax(y_pred, axis=-1), np.argmax(y_true, axis=-1)))
def read_patient_info():
df = pd.read_excel(
open("../data/CGMdataCSMcomplete_update.xlsx", "rb"), sheet_name="Demographics"
)
# trim empty spaces in front and after cell value
df = df.apply(lambda x: x.str.strip() if x.dtype == "object" else x)
return df[df["Group"] == "Control"], df[df["Group"] != "Control"]
def read_direcnet_patient_info():
df = pd.read_csv("../data/tblAScreening.csv")
df2 = pd.read_csv("../data/tblALab.csv")
df = df.merge(df2, on="PtID")
subdf = df[["PtID", "Gender", "BMI", "Type1DM", "AgeAtBaseline", "HbA1c"]]
return subdf[subdf["Type1DM"] != "Yes"], subdf[subdf["Type1DM"] == "Yes"]
def groupby_meds():
control, diabetic = read_patient_info()
cat1 = ["no dm meds", np.nan, "none"]
cat2 = ["insulin", "lantus", "novolin", "hum"]
no_med = diabetic[diabetic["Medications"].str.lower().isin(cat1)]
insulin = pd.DataFrame()
for item in cat2:
insulin = pd.concat(
[
insulin,
diabetic[
diabetic["Medications"].str.contains(item, na=False, case=False)
],
]
)
other_pid = (
set(diabetic["Patient ID"])
- set(no_med["Patient ID"])
- set(insulin["Patient ID"])
)
other = diabetic[diabetic["Patient ID"].isin(other_pid)]
no_med.drop_duplicates(inplace=True)
insulin.drop_duplicates(inplace=True)
other.drop_duplicates(inplace=True)
return no_med, insulin, other
def runner(learner, argv, transfer=None, outtype="Same", cut_point=50, label="data"):
# 'Same' for regressor; 'None' for classifier
_, diabetic = read_patient_info()
pids = list(diabetic["Patient ID"])
low_fid_data = CGMSDataSeg("direcnet", "../data/tblADataCGMS.csv", 5)
sampling_horizon = 7
prediction_horizon = 6
scale = 0.01
train_test_ratio = 0.1 # dummy number
low_fid_data.reset(
sampling_horizon, prediction_horizon, scale, 100, False, outtype, 1
)
of = open("{}_{}.txt".format(label, cut_point), "a+")
for pid in range(1, 6):
print("=" * 100)
if pid <= 3:
print("pid: ", pid)
high_fid_data = CGMSDataSeg(
"VA", ["../data/CLARITY_Export_{}.txt".format(pid)], 5
)
elif pid == 4:
print("pid: ", pid)
high_fid_data = CGMSDataSeg("VA1", "../data/A_J_201211301041.xls", 5)
else:
high_fid_data = CGMSDataSeg("VA2", "../data/CGMdataCSMcomplete.xlsx", 5)
if pid <= 4:
high_fid_data.set_cutpoint = cut_point
high_fid_data.reset(
sampling_horizon,
prediction_horizon,
scale,
train_test_ratio,
False,
outtype,
1,
)
with Pool(1) as p:
err = p.apply(learner, (low_fid_data, high_fid_data, *argv))
if transfer is not None:
with Pool(1) as p:
terr1 = p.apply(transfer, (high_fid_data, argv[-3], argv[-2], 1))
terr2 = p.apply(transfer, (high_fid_data, argv[-3], argv[-2], 2))
terr3 = p.apply(transfer, (high_fid_data, argv[-3], argv[-2], 3))
of.write(
"{:d} {:.4f} {:.4f} {:.4f} {:.4f}\n".format(
pid, err, terr1, terr2, terr3
)
)
else:
of.write("{:d} {:.4f}\n".format(pid, err))
else:
for item in high_fid_data.raw_data:
if item in pids:
print("Processing pid: {}".format(item))
high_fid_data.data = high_fid_data.raw_data[item]
high_fid_data.set_cutpoint = cut_point
high_fid_data.reset(
sampling_horizon,
prediction_horizon,
scale,
train_test_ratio,
False,
outtype,
1,
)
if cut_point > high_fid_data.train_n:
print("Training data size smaller than required, skipped")
continue
with Pool(1) as p:
err = p.apply(learner, (low_fid_data, high_fid_data, *argv))
if transfer is not None:
with Pool(1) as p:
terr1 = p.apply(
transfer, (high_fid_data, argv[-3], argv[-2], 1)
)
terr2 = p.apply(
transfer, (high_fid_data, argv[-3], argv[-2], 2)
)
terr3 = p.apply(
transfer, (high_fid_data, argv[-3], argv[-2], 3)
)
of.write(
"{:s} {:.4f} {:.4f} {:.4f} {:.4f}\n".format(
item, err, terr1, terr2, terr3
)
)
else:
of.write("{:s} {:.4f}\n".format(item, err))
of.close()
def hierarchical_runner(
learner, argv, transfer=None, outtype="Same", cut_point=50, label="data", throw=5
):
# 'Same' for regressor; 'None' for classifier
_, diabetic = read_patient_info()
pids = list(diabetic["Patient ID"])
print(f"# {len(pids)} diabetic patients from new samples")
# read direcnet
low_fid_data = CGMSDataSeg("direcnet", "../data/tblADataCGMS.csv", 5)
sampling_horizon = 7
prediction_horizon = 6
scale = 0.01
train_test_ratio = 0.1 # dummy number
# store patients' data in all_data
all_data = dict()
all_data["4"] = CGMSDataSeg("VA1", "../data/A_J_201211301041.xls", 5).data
for pid in range(1, 4):
all_data[str(pid)] = CGMSDataSeg(
"VA", ["../data/CLARITY_Export_{}.txt".format(pid)], 5
).data
# create an interface for high_fid_data
high_fid_data = CGMSDataSeg("VA2", "../data/CGMdataCSMcomplete.xlsx", 5)
all_data.update({pid: high_fid_data.raw_data[pid] for pid in pids})
all_pids = list(all_data.keys())
raw_fold = len(all_pids) / (len(all_pids) - throw)
if raw_fold < 1.5:
# for fold not meeting requirement
fold = np.ceil(1 / (1 - 1 / raw_fold))
else:
fold = np.ceil(raw_fold)
kf = KFold(n_splits=int(fold))
print(f"# {len(all_pids)} patients, {raw_fold} fold requested, {fold} fold given")
print(f"before adding cohort, size {len(low_fid_data.data)}")
already_tested = []
of = open(f"{label}_{throw}_{cut_point}.txt", "a+")
for train_index, test_index in kf.split(all_pids):
# map to get train pid
if raw_fold < 1.5:
train_index, test_index = test_index, train_index
train_ids = [all_pids[k] for k in train_index]
for k in train_ids:
low_fid_data.data += all_data[k]
print(f"after adding cohort, size {len(low_fid_data.data)}")
low_fid_data.reset(
sampling_horizon, prediction_horizon, scale, 100, False, outtype, 1
)
# map to get test pid set and test on new ids
test_ids = set([all_pids[k] for k in test_index]) - set(already_tested)
already_tested += list(test_ids)
print(f"{throw} train ids: {train_ids}")
print(f"{len(already_tested)} already tested: {already_tested}")
print(f"{len(test_ids)} in testing: {test_ids}")
for item in test_ids:
print(f"Processing patient {item}")
high_fid_data.data = all_data[item]
high_fid_data.set_cutpoint = cut_point
high_fid_data.reset(
sampling_horizon,
prediction_horizon,
scale,
train_test_ratio,
False,
outtype,
1,
)
if cut_point > high_fid_data.train_n:
print("Training data size smaller than required, skipped")
continue
with Pool(1) as p:
err = p.apply(learner, (low_fid_data, high_fid_data, *argv))
if transfer is not None:
with Pool(1) as p:
terr1 = p.apply(
transfer, (high_fid_data, argv[-3], argv[-2], 1)
)
terr2 = p.apply(
transfer, (high_fid_data, argv[-3], argv[-2], 2)
)
terr3 = p.apply(
transfer, (high_fid_data, argv[-3], argv[-2], 3)
)
of.write(
"{:s} {:.4f} {:.4f} {:.4f} {:.4f}\n".format(
item, err, terr1, terr2, terr3
)
)
else:
of.write("{:s} {:.4f}\n".format(item, err))
of.close()
def native_runner(
learner, argv, transfer=None, outtype="Same", cut_point=50, label="data"
):
# 'Same' for regressor; 'None' for classifier
# nomed, insulin, other = groupby_meds()
_, allp = read_patient_info()
the_group = allp
# the_group = the_group.sample(frac=0.3, random_state=2)
print(f"{len(the_group)} patients selected")
# _, diabetic = read_patient_info()
# pids = list(diabetic["Patient ID"])
pids = list(the_group["Patient ID"])
sampling_horizon = 7
prediction_horizon = 6
scale = 0.01
train_test_ratio = 0.1 # dummy number
# store patients' data in all_data
all_data = dict()
low_fid_data = CGMSDataSeg("VA1", "../data/A_J_201211301041.xls", 5)
all_data["4"] = low_fid_data.data
for pid in range(1, 4):
data = CGMSDataSeg("VA", ["../data/CLARITY_Export_{}.txt".format(pid)], 5)
all_data[str(pid)] = data.data
# create an interface for high_fid_data
high_fid_data = CGMSDataSeg("VA2", "../data/CGMdataCSMcomplete.xlsx", 5)
all_data.update({pid: high_fid_data.raw_data[pid] for pid in pids})
# all_pids = all_data.keys() # not used
of = open("{}_{}.txt".format(label, cut_point), "a+")
for item in pids:
# for item in all_pids:
print("Processing pid: {}".format(item))
train_pids = set(pids) - set(item)
train_data = [all_data[k] for k in train_pids]
low_fid_data.data = [item for data in train_data for item in data]
print(
"# {} data seg from other patients".format(
sum([len(x) for x in low_fid_data.data])
)
)
low_fid_data.set_cutpoint = -1
low_fid_data.reset(
sampling_horizon, prediction_horizon, scale, 100, False, outtype, 1
)
high_fid_data.data = all_data[item]
high_fid_data.set_cutpoint = cut_point
high_fid_data.reset(
sampling_horizon,
prediction_horizon,
scale,
train_test_ratio,
False,
outtype,
1,
)
if cut_point > high_fid_data.train_n:
print("Training data size smaller than required, skipped")
continue
with Pool(1) as p:
err, labs = p.apply(learner, (low_fid_data, high_fid_data, *argv))
if transfer is not None:
with Pool(1) as p:
terr1, score1 = p.apply(
transfer, (high_fid_data, argv[-3], argv[-2], 1)
)
terr2, score2 = p.apply(
transfer, (high_fid_data, argv[-3], argv[-2], 2)
)
terr3, score3 = p.apply(
transfer, (high_fid_data, argv[-3], argv[-2], 3)
)
of.write(
"{:s} {:.4f} {:.4f} {:.4f} {:.4f}\n".format(
item, err, terr1, terr2, terr3
)
)
np.savetxt(
f"{item}.txt", np.hstack((labs, score1, score2, score3)), fmt="%.4f"
)
else:
of.write("{:s} {:.4f}\n".format(item, err))
of.close()
def feature_runner(
learner, argv, transfer=None, outtype="Same", cut_point=50, label="data"
):
# 'Same' for regressor; 'None' for classifier
nomed, insulin, other = groupby_meds()
_, t2d = read_patient_info()
the_group = t2d
# the_group = the_group.sample(frac=0.3, random_state=2)
print(f"{len(the_group)} patients selected")
# the_group = pd.concat([no_med, other])
pids = list(the_group["Patient ID"])
bmis = pd.Series(the_group["BMI"].values, index=the_group["Patient ID"]).to_dict()
genders = pd.Series(the_group["Gender"].values, index=the_group["Patient ID"])
genders = genders.apply(lambda x: 100 if x == "Female" else 0).to_dict()
# ages = pd.Series(the_group["Age"].values, index=the_group["Patient ID"]).to_dict()
hba1cs = pd.Series(the_group["HbA1c"].values, index=the_group["Patient ID"])
hba1cs.dropna(how="any", inplace=True)
hba1cs = hba1cs.to_dict()
sampling_horizon = 7
prediction_horizon = 4
scale = 0.01
train_test_ratio = 0.01 # dummy number
# create an interface for low_fid_data and clean out data
low_fid_data = CGMSDataSeg("VA2", "../data/CGMdataCSMcomplete.xlsx", 5)
# create an interface for high_fid_data
high_fid_data = CGMSDataSeg("VA2", "../data/CGMdataCSMcomplete.xlsx", 5)
# store diabetic patients' data in all_data
all_data = {k: high_fid_data.raw_data[k] for k in pids}
all_feature = {
k: list(np.tile(bmis[k], (len(high_fid_data.raw_data[k]), 1)))
for k in bmis.keys()
}
# all_feature = {k : list(np.tile(hba1cs[k], (len(high_fid_data.raw_data[k]), 1))) for k in hba1cs.keys()}
of = open("{}_{}.txt".format(label, cut_point), "a+")
for item in all_feature.keys():
print("Processing pid: {}".format(item))
low_fid_data.data = []
low_fid_data.feature = []
train_pids = set(all_feature.keys()) - set(item)
for k in train_pids:
low_fid_data.data += all_data[k]
# set up feature vector
low_fid_data.feature += all_feature[k]
low_fid_data.set_cutpoint = -1
low_fid_data.reset(
sampling_horizon, prediction_horizon, scale, 100, False, outtype, 1
)
high_fid_data.data = all_data[item]
high_fid_data.set_cutpoint = cut_point
high_fid_data.feature = all_feature[item]
high_fid_data.reset(
sampling_horizon,
prediction_horizon,
scale,
train_test_ratio,
False,
outtype,
1,
)
if cut_point > high_fid_data.train_n:
print("Training data size smaller than required, skipped")
continue
with Pool(1) as p:
err, labs = p.apply(learner, (low_fid_data, high_fid_data, *argv))
if transfer is not None:
with Pool(1) as p:
terr1, score1 = p.apply(
transfer, (high_fid_data, argv[-3], argv[-2], 1)
)
terr2, score2 = p.apply(
transfer, (high_fid_data, argv[-3], argv[-2], 2)
)
terr3, score3 = p.apply(
transfer, (high_fid_data, argv[-3], argv[-2], 3)
)
of.write(
"{:s} {:.4f} {:.4f} {:.4f} {:.4f}\n".format(
item, err, terr1, terr2, terr3
)
)
np.savetxt(
f"{item}.txt", np.hstack((labs, score1, score2, score3)), fmt="%.4f"
)
else:
of.write("{:s} {:.4f}\n".format(item, err))
of.close()
def hierarchical_feature_runner(
learner, argv, transfer=None, outtype="Same", cut_point=50, label="data"
):
# 'Same' for regressor; 'None' for classifier
# create an interface for low_fid_data and clean out data
low_fid_data = CGMSDataSeg("direcnet_pid", "../data/tblADataCGMS.csv", 5)
# create an interface for high_fid_data
high_fid_data = CGMSDataSeg("VA2", "../data/CGMdataCSMcomplete.xlsx", 5)
sampling_horizon = 7
prediction_horizon = 6
scale = 0.01
train_test_ratio = 0.1 # dummy number
# Reading demographic data for BIDMC patients, t2d
_, t2d = read_patient_info()
bmis = pd.Series(t2d["BMI"].values, index=t2d["Patient ID"]).to_dict()
genders = pd.Series(t2d["Gender"].values, index=t2d["Patient ID"])
genders = genders.apply(lambda x: 1 if x == "Female" else 0).to_dict()
ages = pd.Series(t2d["Age"].values, index=t2d["Patient ID"]).to_dict()
hba1cs = pd.Series(t2d["HbA1c"].values, index=t2d["Patient ID"])
bmis.dropna(how="any", inplace=True)
ages.dropna(how="any", inplace=True)
hba1cs.dropna(how="any", inplace=True)
hba1cs = hba1cs.to_dict()
# Reading demographic data for DirecNet patients, t1d
_, t1d = read_direcnet_patient_info()
direcnet_bmis = pd.Series(t1d["BMI"].values, index=t1d["PtID"]).to_dict()
direcnet_ages = pd.Series(t1d["AgeAtBaseline"].values, index=t1d["PtID"]).to_dict()
direcnet_hba1cs = pd.Series(t1d["HbA1c"].values, index=t1d["PtID"])
direcnet_bmis.dropna(how="any", inplace=True)
direcnet_ages.dropna(how="any", inplace=True)
direcnet_hba1cs.dropna(how="any", inplace=True)
direcnet_hba1cs = direcnet_hba1cs.to_dict()
direcnet_genders = pd.Series(t1d["Gender"].values, index=t1d["PtID"])
direcnet_genders = direcnet_genders.apply(lambda x: 1 if x == "F" else 0).to_dict()
# store diabetic patients' data in all_data
# because sometime feature is missing for some patients, so set feature
# first
t1d_feature = {
k: list(np.tile(100 * direcnet_hba1cs[k], (len(low_fid_data.raw_data[k]), 1)))
for k in direcnet_hba1cs.keys()
}
t2d_feature = {
k: list(np.tile(100 * hba1cs[k], (len(high_fid_data.raw_data[k]), 1)))
for k in hba1cs.keys()
}
t1d_data = {k: low_fid_data.raw_data[k] for k in direcnet_hba1cs}
t2d_data = {k: high_fid_data.raw_data[k] for k in hba1cs}
of = open("{}_{}.txt".format(label, cut_point), "a+")
for item in t2d_feature.keys():
print("Processing pid: {}".format(item))
low_fid_data.data = sum(list(t1d_data.values()), [])
low_fid_data.feature = sum(list(t1d_feature.values()), [])
train_pids = set(t2d_feature.keys()) - set(item)
for k in train_pids:
low_fid_data.data += t2d_data[k]
# set up feature vector
low_fid_data.feature += t2d_feature[k]
low_fid_data.reset(
sampling_horizon, prediction_horizon, scale, 100, False, outtype, 1
)
high_fid_data.data = t2d_data[item]
high_fid_data.set_cutpoint = cut_point
high_fid_data.feature = t2d_feature[item]
high_fid_data.reset(
sampling_horizon,
prediction_horizon,
scale,
train_test_ratio,
False,
outtype,
1,
)
if cut_point > high_fid_data.train_n:
print("Training data size smaller than required, skipped")
continue
with Pool(1) as p:
err = p.apply(learner, (low_fid_data, high_fid_data, *argv))
if transfer is not None:
with Pool(1) as p:
terr1 = p.apply(transfer, (high_fid_data, argv[-3], argv[-2], 1))
terr2 = p.apply(transfer, (high_fid_data, argv[-3], argv[-2], 2))
terr3 = p.apply(transfer, (high_fid_data, argv[-3], argv[-2], 3))
of.write(
"{:s} {:.4f} {:.4f} {:.4f} {:.4f}\n".format(
item, err, terr1, terr2, terr3
)
)
else:
of.write("{:s} {:.4f}\n".format(item, err))
of.close()
if __name__ == "__main__":
no_med, insulin, other = groupby_meds()
with pd.ExcelWriter("CGMdata_meds.xlsx") as writer:
no_med.to_excel(writer, sheet_name="no_med", index=False)
insulin.to_excel(writer, sheet_name="insulin", index=False)
other.to_excel(writer, sheet_name="other", index=False)
| [
"pandas.Series",
"numpy.ceil",
"pandas.read_csv",
"numpy.hstack",
"numpy.argmax",
"numpy.apply_along_axis",
"multiprocessing.Pool",
"pandas.DataFrame",
"cgms_data_seg.CGMSDataSeg",
"pandas.ExcelWriter"
] | [((1171, 1211), 'pandas.read_csv', 'pd.read_csv', (['"""../data/tblAScreening.csv"""'], {}), "('../data/tblAScreening.csv')\n", (1182, 1211), True, 'import pandas as pd\n'), ((1222, 1256), 'pandas.read_csv', 'pd.read_csv', (['"""../data/tblALab.csv"""'], {}), "('../data/tblALab.csv')\n", (1233, 1256), True, 'import pandas as pd\n'), ((1691, 1705), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1703, 1705), True, 'import pandas as pd\n'), ((2536, 2590), 'cgms_data_seg.CGMSDataSeg', 'CGMSDataSeg', (['"""direcnet"""', '"""../data/tblADataCGMS.csv"""', '(5)'], {}), "('direcnet', '../data/tblADataCGMS.csv', 5)\n", (2547, 2590), False, 'from cgms_data_seg import CGMSDataSeg\n'), ((6503, 6557), 'cgms_data_seg.CGMSDataSeg', 'CGMSDataSeg', (['"""direcnet"""', '"""../data/tblADataCGMS.csv"""', '(5)'], {}), "('direcnet', '../data/tblADataCGMS.csv', 5)\n", (6514, 6557), False, 'from cgms_data_seg import CGMSDataSeg\n'), ((7029, 7085), 'cgms_data_seg.CGMSDataSeg', 'CGMSDataSeg', (['"""VA2"""', '"""../data/CGMdataCSMcomplete.xlsx"""', '(5)'], {}), "('VA2', '../data/CGMdataCSMcomplete.xlsx', 5)\n", (7040, 7085), False, 'from cgms_data_seg import CGMSDataSeg\n'), ((10714, 10767), 'cgms_data_seg.CGMSDataSeg', 'CGMSDataSeg', (['"""VA1"""', '"""../data/A_J_201211301041.xls"""', '(5)'], {}), "('VA1', '../data/A_J_201211301041.xls', 5)\n", (10725, 10767), False, 'from cgms_data_seg import CGMSDataSeg\n'), ((11022, 11078), 'cgms_data_seg.CGMSDataSeg', 'CGMSDataSeg', (['"""VA2"""', '"""../data/CGMdataCSMcomplete.xlsx"""', '(5)'], {}), "('VA2', '../data/CGMdataCSMcomplete.xlsx', 5)\n", (11033, 11078), False, 'from cgms_data_seg import CGMSDataSeg\n'), ((13749, 13817), 'pandas.Series', 'pd.Series', (["the_group['Gender'].values"], {'index': "the_group['Patient ID']"}), "(the_group['Gender'].values, index=the_group['Patient ID'])\n", (13758, 13817), True, 'import pandas as pd\n'), ((13999, 14066), 'pandas.Series', 'pd.Series', (["the_group['HbA1c'].values"], {'index': "the_group['Patient ID']"}), "(the_group['HbA1c'].values, index=the_group['Patient ID'])\n", (14008, 14066), True, 'import pandas as pd\n'), ((14336, 14392), 'cgms_data_seg.CGMSDataSeg', 'CGMSDataSeg', (['"""VA2"""', '"""../data/CGMdataCSMcomplete.xlsx"""', '(5)'], {}), "('VA2', '../data/CGMdataCSMcomplete.xlsx', 5)\n", (14347, 14392), False, 'from cgms_data_seg import CGMSDataSeg\n'), ((14458, 14514), 'cgms_data_seg.CGMSDataSeg', 'CGMSDataSeg', (['"""VA2"""', '"""../data/CGMdataCSMcomplete.xlsx"""', '(5)'], {}), "('VA2', '../data/CGMdataCSMcomplete.xlsx', 5)\n", (14469, 14514), False, 'from cgms_data_seg import CGMSDataSeg\n'), ((17104, 17162), 'cgms_data_seg.CGMSDataSeg', 'CGMSDataSeg', (['"""direcnet_pid"""', '"""../data/tblADataCGMS.csv"""', '(5)'], {}), "('direcnet_pid', '../data/tblADataCGMS.csv', 5)\n", (17115, 17162), False, 'from cgms_data_seg import CGMSDataSeg\n'), ((17228, 17284), 'cgms_data_seg.CGMSDataSeg', 'CGMSDataSeg', (['"""VA2"""', '"""../data/CGMdataCSMcomplete.xlsx"""', '(5)'], {}), "('VA2', '../data/CGMdataCSMcomplete.xlsx', 5)\n", (17239, 17284), False, 'from cgms_data_seg import CGMSDataSeg\n'), ((17576, 17632), 'pandas.Series', 'pd.Series', (["t2d['Gender'].values"], {'index': "t2d['Patient ID']"}), "(t2d['Gender'].values, index=t2d['Patient ID'])\n", (17585, 17632), True, 'import pandas as pd\n'), ((17796, 17851), 'pandas.Series', 'pd.Series', (["t2d['HbA1c'].values"], {'index': "t2d['Patient ID']"}), "(t2d['HbA1c'].values, index=t2d['Patient ID'])\n", (17805, 17851), True, 'import pandas as pd\n'), ((18297, 18346), 'pandas.Series', 'pd.Series', (["t1d['HbA1c'].values"], {'index': "t1d['PtID']"}), "(t1d['HbA1c'].values, index=t1d['PtID'])\n", (18306, 18346), True, 'import pandas as pd\n'), ((18571, 18621), 'pandas.Series', 'pd.Series', (["t1d['Gender'].values"], {'index': "t1d['PtID']"}), "(t1d['Gender'].values, index=t1d['PtID'])\n", (18580, 18621), True, 'import pandas as pd\n'), ((6752, 6805), 'cgms_data_seg.CGMSDataSeg', 'CGMSDataSeg', (['"""VA1"""', '"""../data/A_J_201211301041.xls"""', '(5)'], {}), "('VA1', '../data/A_J_201211301041.xls', 5)\n", (6763, 6805), False, 'from cgms_data_seg import CGMSDataSeg\n'), ((7331, 7362), 'numpy.ceil', 'np.ceil', (['(1 / (1 - 1 / raw_fold))'], {}), '(1 / (1 - 1 / raw_fold))\n', (7338, 7362), True, 'import numpy as np\n'), ((7388, 7405), 'numpy.ceil', 'np.ceil', (['raw_fold'], {}), '(raw_fold)\n', (7395, 7405), True, 'import numpy as np\n'), ((21173, 21208), 'pandas.ExcelWriter', 'pd.ExcelWriter', (['"""CGMdata_meds.xlsx"""'], {}), "('CGMdata_meds.xlsx')\n", (21187, 21208), True, 'import pandas as pd\n'), ((203, 245), 'numpy.hstack', 'np.hstack', (['(x >= threshold, x < threshold)'], {}), '((x >= threshold, x < threshold))\n', (212, 245), True, 'import numpy as np\n'), ((376, 418), 'numpy.hstack', 'np.hstack', (['(x <= threshold, x > threshold)'], {}), '((x <= threshold, x > threshold))\n', (385, 418), True, 'import numpy as np\n'), ((727, 753), 'numpy.argmax', 'np.argmax', (['y_pred'], {'axis': '(-1)'}), '(y_pred, axis=-1)\n', (736, 753), True, 'import numpy as np\n'), ((755, 781), 'numpy.argmax', 'np.argmax', (['y_true'], {'axis': '(-1)'}), '(y_true, axis=-1)\n', (764, 781), True, 'import numpy as np\n'), ((12288, 12295), 'multiprocessing.Pool', 'Pool', (['(1)'], {}), '(1)\n', (12292, 12295), False, 'from multiprocessing import Pool\n'), ((13658, 13723), 'pandas.Series', 'pd.Series', (["the_group['BMI'].values"], {'index': "the_group['Patient ID']"}), "(the_group['BMI'].values, index=the_group['Patient ID'])\n", (13667, 13723), True, 'import pandas as pd\n'), ((15942, 15949), 'multiprocessing.Pool', 'Pool', (['(1)'], {}), '(1)\n', (15946, 15949), False, 'from multiprocessing import Pool\n'), ((17498, 17551), 'pandas.Series', 'pd.Series', (["t2d['BMI'].values"], {'index': "t2d['Patient ID']"}), "(t2d['BMI'].values, index=t2d['Patient ID'])\n", (17507, 17551), True, 'import pandas as pd\n'), ((17719, 17772), 'pandas.Series', 'pd.Series', (["t2d['Age'].values"], {'index': "t2d['Patient ID']"}), "(t2d['Age'].values, index=t2d['Patient ID'])\n", (17728, 17772), True, 'import pandas as pd\n'), ((18129, 18176), 'pandas.Series', 'pd.Series', (["t1d['BMI'].values"], {'index': "t1d['PtID']"}), "(t1d['BMI'].values, index=t1d['PtID'])\n", (18138, 18176), True, 'import pandas as pd\n'), ((18207, 18264), 'pandas.Series', 'pd.Series', (["t1d['AgeAtBaseline'].values"], {'index': "t1d['PtID']"}), "(t1d['AgeAtBaseline'].values, index=t1d['PtID'])\n", (18216, 18264), True, 'import pandas as pd\n'), ((20375, 20382), 'multiprocessing.Pool', 'Pool', (['(1)'], {}), '(1)\n', (20379, 20382), False, 'from multiprocessing import Pool\n'), ((3184, 3237), 'cgms_data_seg.CGMSDataSeg', 'CGMSDataSeg', (['"""VA1"""', '"""../data/A_J_201211301041.xls"""', '(5)'], {}), "('VA1', '../data/A_J_201211301041.xls', 5)\n", (3195, 3237), False, 'from cgms_data_seg import CGMSDataSeg\n'), ((3280, 3336), 'cgms_data_seg.CGMSDataSeg', 'CGMSDataSeg', (['"""VA2"""', '"""../data/CGMdataCSMcomplete.xlsx"""', '(5)'], {}), "('VA2', '../data/CGMdataCSMcomplete.xlsx', 5)\n", (3291, 3336), False, 'from cgms_data_seg import CGMSDataSeg\n'), ((3667, 3674), 'multiprocessing.Pool', 'Pool', (['(1)'], {}), '(1)\n', (3671, 3674), False, 'from multiprocessing import Pool\n'), ((9105, 9112), 'multiprocessing.Pool', 'Pool', (['(1)'], {}), '(1)\n', (9109, 9112), False, 'from multiprocessing import Pool\n'), ((12431, 12438), 'multiprocessing.Pool', 'Pool', (['(1)'], {}), '(1)\n', (12435, 12438), False, 'from multiprocessing import Pool\n'), ((13050, 13091), 'numpy.hstack', 'np.hstack', (['(labs, score1, score2, score3)'], {}), '((labs, score1, score2, score3))\n', (13059, 13091), True, 'import numpy as np\n'), ((16085, 16092), 'multiprocessing.Pool', 'Pool', (['(1)'], {}), '(1)\n', (16089, 16092), False, 'from multiprocessing import Pool\n'), ((16704, 16745), 'numpy.hstack', 'np.hstack', (['(labs, score1, score2, score3)'], {}), '((labs, score1, score2, score3))\n', (16713, 16745), True, 'import numpy as np\n'), ((596, 627), 'numpy.apply_along_axis', 'np.apply_along_axis', (['safe', '(1)', 'x'], {}), '(safe, 1, x)\n', (615, 627), True, 'import numpy as np\n'), ((3816, 3823), 'multiprocessing.Pool', 'Pool', (['(1)'], {}), '(1)\n', (3820, 3823), False, 'from multiprocessing import Pool\n'), ((20520, 20527), 'multiprocessing.Pool', 'Pool', (['(1)'], {}), '(1)\n', (20524, 20527), False, 'from multiprocessing import Pool\n'), ((5147, 5154), 'multiprocessing.Pool', 'Pool', (['(1)'], {}), '(1)\n', (5151, 5154), False, 'from multiprocessing import Pool\n'), ((9262, 9269), 'multiprocessing.Pool', 'Pool', (['(1)'], {}), '(1)\n', (9266, 9269), False, 'from multiprocessing import Pool\n'), ((5320, 5327), 'multiprocessing.Pool', 'Pool', (['(1)'], {}), '(1)\n', (5324, 5327), False, 'from multiprocessing import Pool\n')] |
import numpy as np
import glob
cannon_teff = np.array([])
cannon_logg = np.array([])
cannon_feh = np.array([])
cannon_alpha = np.array([])
tr_teff = np.array([])
tr_logg = np.array([])
tr_feh = np.array([])
tr_alpha = np.array([])
a = glob.glob("./*tr_label.npz")
a.sort()
for filename in a:
labels = np.load(filename)['arr_0']
tr_teff = np.append(tr_teff, labels[:,0])
tr_logg = np.append(tr_logg, labels[:,1])
tr_feh = np.append(tr_feh, labels[:,2])
tr_alpha = np.append(tr_alpha, labels[:,3])
a = glob.glob("./*cannon_labels.npz")
a.sort()
for filename in a:
labels = np.load(filename)['arr_0']
cannon_teff = np.append(cannon_teff, labels[:,0])
cannon_logg = np.append(cannon_logg, labels[:,1])
cannon_feh = np.append(cannon_feh, labels[:,2])
cannon_alpha = np.append(cannon_alpha, labels[:,3])
a = glob.glob("./*_SNR.npz")
a.sort()
test_SNR = np.array([])
for filename in a:
SNRs = np.load(filename)['arr_0']
test_SNR = np.append(test_SNR, SNRs)
np.savez("test_SNR", test_SNR)
np.savez("tr_label", np.vstack((tr_teff, tr_logg, tr_feh, tr_alpha)))
np.savez("cannon_label", np.vstack((cannon_teff, cannon_logg, cannon_feh, cannon_alpha)))
| [
"numpy.savez",
"numpy.append",
"numpy.array",
"numpy.vstack",
"numpy.load",
"glob.glob"
] | [((46, 58), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (54, 58), True, 'import numpy as np\n'), ((73, 85), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (81, 85), True, 'import numpy as np\n'), ((99, 111), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (107, 111), True, 'import numpy as np\n'), ((127, 139), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (135, 139), True, 'import numpy as np\n'), ((151, 163), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (159, 163), True, 'import numpy as np\n'), ((174, 186), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (182, 186), True, 'import numpy as np\n'), ((196, 208), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (204, 208), True, 'import numpy as np\n'), ((220, 232), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (228, 232), True, 'import numpy as np\n'), ((238, 266), 'glob.glob', 'glob.glob', (['"""./*tr_label.npz"""'], {}), "('./*tr_label.npz')\n", (247, 266), False, 'import glob\n'), ((525, 558), 'glob.glob', 'glob.glob', (['"""./*cannon_labels.npz"""'], {}), "('./*cannon_labels.npz')\n", (534, 558), False, 'import glob\n'), ((849, 873), 'glob.glob', 'glob.glob', (['"""./*_SNR.npz"""'], {}), "('./*_SNR.npz')\n", (858, 873), False, 'import glob\n'), ((895, 907), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (903, 907), True, 'import numpy as np\n'), ((1008, 1038), 'numpy.savez', 'np.savez', (['"""test_SNR"""', 'test_SNR'], {}), "('test_SNR', test_SNR)\n", (1016, 1038), True, 'import numpy as np\n'), ((350, 382), 'numpy.append', 'np.append', (['tr_teff', 'labels[:, 0]'], {}), '(tr_teff, labels[:, 0])\n', (359, 382), True, 'import numpy as np\n'), ((396, 428), 'numpy.append', 'np.append', (['tr_logg', 'labels[:, 1]'], {}), '(tr_logg, labels[:, 1])\n', (405, 428), True, 'import numpy as np\n'), ((441, 472), 'numpy.append', 'np.append', (['tr_feh', 'labels[:, 2]'], {}), '(tr_feh, labels[:, 2])\n', (450, 472), True, 'import numpy as np\n'), ((487, 520), 'numpy.append', 'np.append', (['tr_alpha', 'labels[:, 3]'], {}), '(tr_alpha, labels[:, 3])\n', (496, 520), True, 'import numpy as np\n'), ((646, 682), 'numpy.append', 'np.append', (['cannon_teff', 'labels[:, 0]'], {}), '(cannon_teff, labels[:, 0])\n', (655, 682), True, 'import numpy as np\n'), ((700, 736), 'numpy.append', 'np.append', (['cannon_logg', 'labels[:, 1]'], {}), '(cannon_logg, labels[:, 1])\n', (709, 736), True, 'import numpy as np\n'), ((753, 788), 'numpy.append', 'np.append', (['cannon_feh', 'labels[:, 2]'], {}), '(cannon_feh, labels[:, 2])\n', (762, 788), True, 'import numpy as np\n'), ((807, 844), 'numpy.append', 'np.append', (['cannon_alpha', 'labels[:, 3]'], {}), '(cannon_alpha, labels[:, 3])\n', (816, 844), True, 'import numpy as np\n'), ((981, 1006), 'numpy.append', 'np.append', (['test_SNR', 'SNRs'], {}), '(test_SNR, SNRs)\n', (990, 1006), True, 'import numpy as np\n'), ((1060, 1107), 'numpy.vstack', 'np.vstack', (['(tr_teff, tr_logg, tr_feh, tr_alpha)'], {}), '((tr_teff, tr_logg, tr_feh, tr_alpha))\n', (1069, 1107), True, 'import numpy as np\n'), ((1134, 1197), 'numpy.vstack', 'np.vstack', (['(cannon_teff, cannon_logg, cannon_feh, cannon_alpha)'], {}), '((cannon_teff, cannon_logg, cannon_feh, cannon_alpha))\n', (1143, 1197), True, 'import numpy as np\n'), ((309, 326), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (316, 326), True, 'import numpy as np\n'), ((601, 618), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (608, 618), True, 'import numpy as np\n'), ((939, 956), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (946, 956), True, 'import numpy as np\n')] |
import unittest
import numpy as np
from algorithms.genetic.nsgaii.nsgaii_algorithm import NSGAIIAlgorithm as tested_algorithm_class
class NSGAIITestCase(unittest.TestCase):
def setUp(self):
"""
Set up algorithm and random seed
"""
seed = 0
self.algorithm = tested_algorithm_class()
self.algorithm.set_seed(seed)
def test_run(self):
"""
Test that `run()` method works
"""
self.algorithm.population_length = 5
self.algorithm.max_generations = 5
result = self.algorithm.run()
expected_num_generations = 5
expected_num_evaluations = 55 # 5+ 5*(5+5) =55;
expected_pop_size = 5
actual_population = result["population"]
actual_num_generations = result["numGenerations"]
actual_num_evaluations = result["numEvaluations"]
actual_pop_size = len(result["population"])
self.assertEqual(actual_num_generations, expected_num_generations)
self.assertEqual(actual_num_evaluations, expected_num_evaluations)
self.assertEqual(actual_pop_size, expected_pop_size)
expected_genes = [[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 1],
[1, 0, 0, 0, 1],
[1, 1, 1, 1, 1]]
for i in range(expected_pop_size):
with self.subTest(i=i):
self.assertIsNone(np.testing.assert_array_equal(
expected_genes[i], actual_population[i].selected))
def test_generate_starting_population(self):
"""
Test that `generate_starting_population()` method works
"""
self.algorithm.population_length = 5
actual_population = self.algorithm.generate_starting_population()
expected_genes = [[0, 1, 1, 0, 1],
[1, 1, 1, 1, 1],
[1, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
[0, 1, 1, 0, 0]]
for i in range(len(expected_genes)):
with self.subTest(i=i):
self.assertIsNone(np.testing.assert_array_equal(
expected_genes[i], actual_population[i].selected))
def test_selection_tournament(self):
"""
Test that `selection_tournament()` method works
"""
self.algorithm.population_length = 5
self.algorithm.max_generations = 5
self.algorithm.selection_candidates = 2
result = self.algorithm.run()
actual_population = self.algorithm.selection_tournament(
result["population"])
expected_genes = [[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[0, 0, 0, 0, 1]]
for i in range(len(expected_genes)):
with self.subTest(i=i):
self.assertIsNone(np.testing.assert_array_equal(
expected_genes[i], actual_population[i].selected))
def test_crossover_one_point(self):
"""
Test that `crossover_one_point()` method works
"""
self.algorithm.population_length = 5
self.algorithm.max_generations = 5
result = self.algorithm.run()
actual_population = self.algorithm.crossover_one_point(
result["population"])
expected_genes = [[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 1],
[1, 0, 0, 0, 1],
[1, 1, 1, 1, 1]]
for i in range(len(expected_genes)):
with self.subTest(i=i):
self.assertIsNone(np.testing.assert_array_equal(
expected_genes[i], actual_population[i].selected))
def test_crossover_aux_one_point(self):
"""
Test that `crossover_aux_one_point()` method works
"""
self.algorithm.population_length = 5
self.algorithm.max_generations = 5
result = self.algorithm.run()
actual_population = self.algorithm.crossover_aux_one_point(
result["population"][0], result["population"][3])
expected_genes = [[0, 0, 0, 0, 1],
[1, 0, 0, 0, 0]]
for i in range(len(expected_genes)):
with self.subTest(i=i):
self.assertIsNone(np.testing.assert_array_equal(
expected_genes[i], actual_population[i].selected))
def test_mutation_flip1bit(self):
"""
Test that `mutation_flip1bit()` method works
"""
self.algorithm.population_length = 5
self.algorithm.max_generations = 5
result = self.algorithm.run()
self.algorithm.mutation_prob = 1.0
actual_population = self.algorithm.mutation_flip1bit(
result["population"])
expected_genes = [[0, 0, 0, 0, 1],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 1],
[1, 0, 1, 0, 1],
[1, 0, 1, 1, 1]]
for i in range(len(expected_genes)):
with self.subTest(i=i):
self.assertIsNone(np.testing.assert_array_equal(
expected_genes[i], actual_population[i].selected))
def test_mutation_flipeachbit(self):
"""
Test that `mutation_flipeachbit()` method works
"""
self.algorithm.population_length = 5
self.algorithm.max_generations = 5
result = self.algorithm.run()
self.algorithm.mutation_prob = 1.0
actual_population = self.algorithm.mutation_flipeachbit(
result["population"])
expected_genes = [[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]]
for i in range(len(expected_genes)):
with self.subTest(i=i):
self.assertIsNone(np.testing.assert_array_equal(
expected_genes[i], actual_population[i].selected))
def test_fast_nondominated_sort(self):
"""
Test that `fast_nondominated_sort()` method works
"""
self.algorithm.population_length = 5
self.algorithm.max_generations = 5
self.algorithm.selection_candidates = 2
result = self.algorithm.run()
actual_population, actual_fronts = self.algorithm.fast_nondominated_sort(
result["population"])
expected_population = [[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 1],
[1, 0, 0, 0, 1],
[1, 1, 1, 1, 1]]
for i in range(len(actual_population)):
with self.subTest(i=i):
self.assertIsNone(np.testing.assert_array_equal(
expected_population[i], actual_population[i].selected))
expected_fronts_appended = [[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 1],
[1, 0, 0, 0, 1],
[1, 1, 1, 1, 1]]
expected_fronts_len = 2
actual_fronts_len = len(actual_fronts)
self.assertEqual(expected_fronts_len, actual_fronts_len)
actual_fronts_appended = actual_fronts[0]+actual_fronts[1]
for i in range(len(actual_fronts_appended)):
with self.subTest(i=i):
self.assertIsNone(np.testing.assert_array_equal(
expected_fronts_appended[i], actual_fronts_appended[i].selected))
expected_ranks = [0, 0, 0, 0, 0]
expected_domination_count = [0, 0, 0, 0, 0]
for i in range(len(actual_fronts_appended)):
print(actual_fronts_appended[i].rank)
with self.subTest(i=i):
self.assertIsNone(np.testing.assert_array_equal(
expected_ranks[i], actual_fronts_appended[i].rank))
self.assertIsNone(np.testing.assert_array_equal(
expected_domination_count[i], actual_fronts_appended[i].domination_count))
def test_calculate_crowding_distance(self):
"""
Test that `calculate_crowding_distance()` method works
"""
self.algorithm.population_length = 5
self.algorithm.max_generations = 5
self.algorithm.selection_candidates = 2
result = self.algorithm.run()
actual_population = self.algorithm.calculate_crowding_distance(
result["population"])
expected_crowding_distance = [
float("inf"), 0.64, 1.17, 1.36, float("inf")]
actual_crowding_distance = [
x.crowding_distance for x in actual_population]
actual_crowding_distance = np.around(actual_crowding_distance, 2)
for i in range(len(actual_population)):
with self.subTest(i=i):
self.assertIsNone(np.testing.assert_array_equal(
expected_crowding_distance[i], actual_crowding_distance[i]))
def test_crowding_operator(self):
"""
Test that `crowding_operator()` method works
"""
self.algorithm.population_length = 5
self.algorithm.max_generations = 5
self.algorithm.selection_candidates = 2
result = self.algorithm.run()
actual_result1 = self.algorithm.crowding_operator(
result["population"][0], result["population"][1])
expected_result1 = 1
self.assertEqual(expected_result1, actual_result1)
actual_result2 = self.algorithm.crowding_operator(
result["population"][2], result["population"][3])
expected_result2 = -1
self.assertEqual(expected_result2, actual_result2)
| [
"numpy.testing.assert_array_equal",
"numpy.around",
"algorithms.genetic.nsgaii.nsgaii_algorithm.NSGAIIAlgorithm"
] | [((305, 329), 'algorithms.genetic.nsgaii.nsgaii_algorithm.NSGAIIAlgorithm', 'tested_algorithm_class', ([], {}), '()\n', (327, 329), True, 'from algorithms.genetic.nsgaii.nsgaii_algorithm import NSGAIIAlgorithm as tested_algorithm_class\n'), ((8993, 9031), 'numpy.around', 'np.around', (['actual_crowding_distance', '(2)'], {}), '(actual_crowding_distance, 2)\n', (9002, 9031), True, 'import numpy as np\n'), ((1468, 1547), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['expected_genes[i]', 'actual_population[i].selected'], {}), '(expected_genes[i], actual_population[i].selected)\n', (1497, 1547), True, 'import numpy as np\n'), ((2160, 2239), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['expected_genes[i]', 'actual_population[i].selected'], {}), '(expected_genes[i], actual_population[i].selected)\n', (2189, 2239), True, 'import numpy as np\n'), ((2991, 3070), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['expected_genes[i]', 'actual_population[i].selected'], {}), '(expected_genes[i], actual_population[i].selected)\n', (3020, 3070), True, 'import numpy as np\n'), ((3771, 3850), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['expected_genes[i]', 'actual_population[i].selected'], {}), '(expected_genes[i], actual_population[i].selected)\n', (3800, 3850), True, 'import numpy as np\n'), ((4462, 4541), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['expected_genes[i]', 'actual_population[i].selected'], {}), '(expected_genes[i], actual_population[i].selected)\n', (4491, 4541), True, 'import numpy as np\n'), ((5279, 5358), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['expected_genes[i]', 'actual_population[i].selected'], {}), '(expected_genes[i], actual_population[i].selected)\n', (5308, 5358), True, 'import numpy as np\n'), ((6105, 6184), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['expected_genes[i]', 'actual_population[i].selected'], {}), '(expected_genes[i], actual_population[i].selected)\n', (6134, 6184), True, 'import numpy as np\n'), ((6985, 7074), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['expected_population[i]', 'actual_population[i].selected'], {}), '(expected_population[i], actual_population[i].\n selected)\n', (7014, 7074), True, 'import numpy as np\n'), ((7696, 7794), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['expected_fronts_appended[i]', 'actual_fronts_appended[i].selected'], {}), '(expected_fronts_appended[i],\n actual_fronts_appended[i].selected)\n', (7725, 7794), True, 'import numpy as np\n'), ((8081, 8166), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['expected_ranks[i]', 'actual_fronts_appended[i].rank'], {}), '(expected_ranks[i], actual_fronts_appended[i].rank\n )\n', (8110, 8166), True, 'import numpy as np\n'), ((8218, 8325), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['expected_domination_count[i]', 'actual_fronts_appended[i].domination_count'], {}), '(expected_domination_count[i],\n actual_fronts_appended[i].domination_count)\n', (8247, 8325), True, 'import numpy as np\n'), ((9151, 9244), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['expected_crowding_distance[i]', 'actual_crowding_distance[i]'], {}), '(expected_crowding_distance[i],\n actual_crowding_distance[i])\n', (9180, 9244), True, 'import numpy as np\n')] |
"""Training procedure for real NVP.
"""
import argparse
import torch, torchvision
import torch.distributions as distributions
import torch.optim as optim
import torchvision.utils as utils
import numpy as np
import realnvp, data_utils
class Hyperparameters():
def __init__(self, base_dim, res_blocks, bottleneck,
skip, weight_norm, coupling_bn, affine):
"""Instantiates a set of hyperparameters used for constructing layers.
Args:
base_dim: features in residual blocks of first few layers.
res_blocks: number of residual blocks to use.
bottleneck: True if use bottleneck, False otherwise.
skip: True if use skip architecture, False otherwise.
weight_norm: True if apply weight normalization, False otherwise.
coupling_bn: True if batchnorm coupling layer output, False otherwise.
affine: True if use affine coupling, False if use additive coupling.
"""
self.base_dim = base_dim
self.res_blocks = res_blocks
self.bottleneck = bottleneck
self.skip = skip
self.weight_norm = weight_norm
self.coupling_bn = coupling_bn
self.affine = affine
def main(args):
device = torch.device("cuda:0")
# model hyperparameters
dataset = args.dataset
batch_size = args.batch_size
hps = Hyperparameters(
base_dim = args.base_dim,
res_blocks = args.res_blocks,
bottleneck = args.bottleneck,
skip = args.skip,
weight_norm = args.weight_norm,
coupling_bn = args.coupling_bn,
affine = args.affine)
scale_reg = 5e-5 # L2 regularization strength
# optimization hyperparameters
lr = args.lr
momentum = args.momentum
decay = args.decay
# prefix for images and checkpoints
filename = 'bs%d_' % batch_size \
+ 'normal_' \
+ 'bd%d_' % hps.base_dim \
+ 'rb%d_' % hps.res_blocks \
+ 'bn%d_' % hps.bottleneck \
+ 'sk%d_' % hps.skip \
+ 'wn%d_' % hps.weight_norm \
+ 'cb%d_' % hps.coupling_bn \
+ 'af%d' % hps.affine \
# load dataset
train_split, val_split, data_info = data_utils.load(dataset)
train_loader = torch.utils.data.DataLoader(train_split,
batch_size=batch_size, shuffle=True, num_workers=2)
val_loader = torch.utils.data.DataLoader(val_split,
batch_size=batch_size, shuffle=False, num_workers=2)
prior = distributions.Normal( # isotropic standard normal distribution
torch.tensor(0.).to(device), torch.tensor(1.).to(device))
flow = realnvp.RealNVP(datainfo=data_info, prior=prior, hps=hps).to(device)
optimizer = optim.Adamax(flow.parameters(), lr=lr, betas=(momentum, decay), eps=1e-7)
epoch = 0
running_loss = 0.
running_log_ll = 0.
optimal_log_ll = float('-inf')
early_stop = 0
image_size = data_info.channel * data_info.size**2 # full image dimension
while epoch < args.max_epoch:
epoch += 1
print('Epoch %d:' % epoch)
flow.train()
for batch_idx, data in enumerate(train_loader, 1):
optimizer.zero_grad()
x, _ = data
# log-determinant of Jacobian from the logit transform
x, log_det = data_utils.logit_transform(x)
x = x.to(device)
log_det = log_det.to(device)
# log-likelihood of input minibatch
log_ll, weight_scale = flow(x)
log_ll = (log_ll + log_det).mean()
# add L2 regularization on scaling factors
loss = -log_ll + scale_reg * weight_scale
running_loss += loss.item()
running_log_ll += log_ll.item()
loss.backward()
optimizer.step()
if batch_idx % 10 == 0:
bit_per_dim = (-log_ll.item() + np.log(256.) * image_size) \
/ (image_size * np.log(2.))
print('[%d/%d]\tloss: %.3f\tlog-ll: %.3f\tbits/dim: %.3f' % \
(batch_idx*batch_size, len(train_loader.dataset),
loss.item(), log_ll.item(), bit_per_dim))
mean_loss = running_loss / batch_idx
mean_log_ll = running_log_ll / batch_idx
mean_bit_per_dim = (-mean_log_ll + np.log(256.) * image_size) \
/ (image_size * np.log(2.))
print('===> Average train loss: %.3f' % mean_loss)
print('===> Average train log-likelihood: %.3f' % mean_log_ll)
print('===> Average train bit_per_dim: %.3f' % mean_bit_per_dim)
running_loss = 0.
running_log_ll = 0.
flow.eval()
with torch.no_grad():
for batch_idx, data in enumerate(val_loader, 1):
x, _ = data
x, log_det = data_utils.logit_transform(x)
x = x.to(device)
log_det = log_det.to(device)
# log-likelihood of input minibatch
log_ll, weight_scale = flow(x)
log_ll = (log_ll + log_det).mean()
# add L2 regularization on scaling factors
loss = -log_ll + scale_reg * weight_scale
running_loss += loss.item()
running_log_ll += log_ll.item()
mean_loss = running_loss / batch_idx
mean_log_ll = running_log_ll / batch_idx
mean_bit_per_dim = (-mean_log_ll + np.log(256.) * image_size) \
/ (image_size * np.log(2.))
print('===> Average validation loss: %.3f' % mean_loss)
print('===> Average validation log-likelihood: %.3f' % mean_log_ll)
print('===> Average validation bits/dim: %.3f' % mean_bit_per_dim)
running_loss = 0.
running_log_ll = 0.
samples = flow.sample(args.sample_size)
samples, _ = data_utils.logit_transform(samples, reverse=True)
utils.save_image(utils.make_grid(samples),
'./samples/' + dataset + '/' + filename + '_ep%d.png' % epoch)
if mean_log_ll > optimal_log_ll:
early_stop = 0
optimal_log_ll = mean_log_ll
torch.save(flow, './models/' + dataset + '/' + filename + '.model')
print('[MODEL SAVED]')
else:
early_stop += 1
if early_stop >= 100:
break
print('--> Early stopping %d/100 (BEST validation log-likelihood: %.3f)' \
% (early_stop, optimal_log_ll))
print('Training finished at epoch %d.' % epoch)
if __name__ == '__main__':
parser = argparse.ArgumentParser('Real NVP PyTorch implementation')
parser.add_argument('--dataset',
help='dataset to be modeled.',
type=str,
default='cifar10')
parser.add_argument('--batch_size',
help='number of images in a mini-batch.',
type=int,
default=64)
parser.add_argument('--base_dim',
help='features in residual blocks of first few layers.',
type=int,
default=64)
parser.add_argument('--res_blocks',
help='number of residual blocks per group.',
type=int,
default=8)
parser.add_argument('--bottleneck',
help='whether to use bottleneck in residual blocks.',
type=int,
default=0)
parser.add_argument('--skip',
help='whether to use skip connection in coupling layers.',
type=int,
default=1)
parser.add_argument('--weight_norm',
help='whether to apply weight normalization.',
type=int,
default=1)
parser.add_argument('--coupling_bn',
help='whether to apply batchnorm after coupling layers.',
type=int,
default=1)
parser.add_argument('--affine',
help='whether to use affine coupling.',
type=int,
default=1)
parser.add_argument('--max_epoch',
help='maximum number of training epoches.',
type=int,
default=500)
parser.add_argument('--sample_size',
help='number of images to generate.',
type=int,
default=64)
parser.add_argument('--lr',
help='initial learning rate.',
type=float,
default=1e-3)
parser.add_argument('--momentum',
help='beta1 in Adam optimizer.',
type=float,
default=0.9)
parser.add_argument('--decay',
help='beta2 in Adam optimizer.',
type=float,
default=0.999)
args = parser.parse_args()
main(args) | [
"torchvision.utils.make_grid",
"argparse.ArgumentParser",
"numpy.log",
"torch.no_grad",
"torch.tensor",
"torch.save",
"torch.utils.data.DataLoader",
"data_utils.load",
"realnvp.RealNVP",
"data_utils.logit_transform",
"torch.device"
] | [((1247, 1269), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (1259, 1269), False, 'import torch, torchvision\n'), ((2245, 2269), 'data_utils.load', 'data_utils.load', (['dataset'], {}), '(dataset)\n', (2260, 2269), False, 'import realnvp, data_utils\n'), ((2289, 2386), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_split'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': '(2)'}), '(train_split, batch_size=batch_size, shuffle=\n True, num_workers=2)\n', (2316, 2386), False, 'import torch, torchvision\n'), ((2407, 2502), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['val_split'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': '(2)'}), '(val_split, batch_size=batch_size, shuffle=False,\n num_workers=2)\n', (2434, 2502), False, 'import torch, torchvision\n'), ((6647, 6705), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Real NVP PyTorch implementation"""'], {}), "('Real NVP PyTorch implementation')\n", (6670, 6705), False, 'import argparse\n'), ((2662, 2719), 'realnvp.RealNVP', 'realnvp.RealNVP', ([], {'datainfo': 'data_info', 'prior': 'prior', 'hps': 'hps'}), '(datainfo=data_info, prior=prior, hps=hps)\n', (2677, 2719), False, 'import realnvp, data_utils\n'), ((3341, 3370), 'data_utils.logit_transform', 'data_utils.logit_transform', (['x'], {}), '(x)\n', (3367, 3370), False, 'import realnvp, data_utils\n'), ((4716, 4731), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4729, 4731), False, 'import torch, torchvision\n'), ((5910, 5959), 'data_utils.logit_transform', 'data_utils.logit_transform', (['samples'], {'reverse': '(True)'}), '(samples, reverse=True)\n', (5936, 5959), False, 'import realnvp, data_utils\n'), ((6216, 6283), 'torch.save', 'torch.save', (['flow', "('./models/' + dataset + '/' + filename + '.model')"], {}), "(flow, './models/' + dataset + '/' + filename + '.model')\n", (6226, 6283), False, 'import torch, torchvision\n'), ((2593, 2610), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (2605, 2610), False, 'import torch, torchvision\n'), ((2622, 2639), 'torch.tensor', 'torch.tensor', (['(1.0)'], {}), '(1.0)\n', (2634, 2639), False, 'import torch, torchvision\n'), ((4413, 4424), 'numpy.log', 'np.log', (['(2.0)'], {}), '(2.0)\n', (4419, 4424), True, 'import numpy as np\n'), ((4851, 4880), 'data_utils.logit_transform', 'data_utils.logit_transform', (['x'], {}), '(x)\n', (4877, 4880), False, 'import realnvp, data_utils\n'), ((5989, 6013), 'torchvision.utils.make_grid', 'utils.make_grid', (['samples'], {}), '(samples)\n', (6004, 6013), True, 'import torchvision.utils as utils\n'), ((4355, 4368), 'numpy.log', 'np.log', (['(256.0)'], {}), '(256.0)\n', (4361, 4368), True, 'import numpy as np\n'), ((5531, 5542), 'numpy.log', 'np.log', (['(2.0)'], {}), '(2.0)\n', (5537, 5542), True, 'import numpy as np\n'), ((3982, 3993), 'numpy.log', 'np.log', (['(2.0)'], {}), '(2.0)\n', (3988, 3993), True, 'import numpy as np\n'), ((5470, 5483), 'numpy.log', 'np.log', (['(256.0)'], {}), '(256.0)\n', (5476, 5483), True, 'import numpy as np\n'), ((3917, 3930), 'numpy.log', 'np.log', (['(256.0)'], {}), '(256.0)\n', (3923, 3930), True, 'import numpy as np\n')] |
import numpy as np
from copy import deepcopy
import config
class Node:
'''
Attribute
----------
board : Board
This node's board Class.
cpuct : floar
c puct constance.
w : float
Value this node ever got.
n : int
How many times this node ever simulated.
children : list
Contains Node classes this node has.
root : bool
Is this node root.
history : set
Match's history.
dammy : bool
Is this node dammy.
eps : float
Constance for noise.
alpha : float
Constance for noise.
'''
def __init__(self, board, policy, cpuct, root = False, history = None, dammy = False):
'''
Parameters
----------
board : Board
Board class defined in game.py.
policy : numpy.ndarray
Monte-Carlo's policy only for movable actions.
cpuct : float
Constant used for node evaluation
root : bool
Wether this node is root one.
history : dict
Board's history contains board's string expression.
dammy : bool
Is this instance dammy (has this node's board already appeared)?
Returns
-------
None.
'''
self.board = board
self.policy = policy
self.cput = config.C_PUT
self.w = 0
self.n = 0
self.children = None
self.root = root
self.history = set() if history is None else history
self.dammy = dammy
self.eps = config.EPS
self.alpha = config.ALPHA
def _select(self):
'''
Parameters
----------
None.
Returns
-------
node : Node class
Node class of this node's children which has highest value.
'''
t = sum([c.n for c in self.children])
sq = np.sqrt(t)
values = []
for c in self.children:
if c.dammy:
v = -np.inf
elif c.n == 0:
v = self.cput * c.policy * sq / (1 + c.n)
else:
v = -c.w / c.n + self.cput * c.policy * sq / (1 + c.n)
values.append(v)
node = self.children[np.argmax(values)]
return node
def _select_without_net(self):
'''
Returns
-------
node : Node class
Node class which has highest UCB1 value.
'''
for c in self.children:
if c.n == 0 and not c.dammy:
return c
total = 0
for c in self.children:
total += c.n
values = [-c.w / c.n + pow(2 * np.log(total) / c.n, 0.5) if not c.dammy else -np.inf for c in self.children]
node = self.children[np.argmax(values)]
return node
def _random_action(self, board):
'''
Parameters
----------
board : Board
Board class defined in game.py.
Returns
-------
action : int
Action index chosen randomly.
'''
takables = board.takable_actions()
action = takables[np.random.randint(0, len(takables))]
return action
def _playout(self, board):
'''
Parameters
----------
board : Board
Board class defined in game.py.
Returns
-------
value : int
Value got in playout. This works recursivly.
'''
if board.is_lose():
return -1
elif board.is_draw():
return 0
old_board = deepcopy(board)
return -self._playout(old_board.next_board(self._random_action(board)))
def _predict(self, net, board):
'''
Parameters
----------
net : Neural Network
Neural network class defined in network.py.
This class has instance variable "model",
which is tensorflow.keras.models.Model.
board : Board
Board class defined in game.py.
Returns
-------
policy : numpy.ndarray
Monte-Carlo's policy only for movable actions.
value : float
Monte-Carlo's value.
'''
x = board.reshape_input()
y = net.model.predict(x, batch_size=1)
takables = board.takable_actions()
policy = y[0][0][takables]
policy /= sum(policy) if sum(policy) else 1
value = y[1][0][0]
return policy, value
def eval(self, net):
'''
Parameters
----------
net : Neural Network
Neural network class defined in network.py.
This class has instance variable "model",
which is tensorflow.keras.models.Model
Returns
-------
value : float
Monte-Carlo's value.
'''
if self.board.is_over():
value = -1 if self.board.is_lose() else 0
self.w += value
self.n += 1
return value
if self.children is None:
policy, value = self._predict(net, self.board)
noises = np.random.dirichlet(alpha=[self.alpha] * len(policy))
if self.root:
policy = (1 - self.eps) * policy + self.eps * noises
self.w += value
self.n += 1
takables = self.board.takable_actions()
self.children = []
idx = 0
for a, p in zip(takables, policy):
old_board = deepcopy(self.board)
next_board = old_board.next_board(a)
s = next_board.board_to_str()
if s in self.history:
policy[idx] = -pow(10, 10)
self.children.append(Node(next_board, p, self.cput, history=self.history, dammy=True))
else:
self.history.add(s)
self.children.append(Node(next_board, p, self.cput, history=self.history, ))
idx += 1
else:
value = -self._select().eval(net)
self.w += value
self.n += 1
return value
def eval_without_net(self):
'''
Parameters
----------
None.
Returns
-------
value : int
Node class of this node's children which has highest value.
'''
if self.board.is_over():
value = -1 if self.board.is_lose() else 0
self.w += value
self.n += 1
return value
if self.children is None:
value = self._playout(self.board)
self.w += value
self.n += 1
if self.n == 10:
takables = self.board.takable_actions()
self.children = []
for a in takables:
old_board = deepcopy(self.board)
next_board = old_board.next_board(a)
s = next_board.board_to_str()
if s in self.history:
self.children.append(Node(next_board, 0, self.cput, history=self.history, dammy=True))
else:
self.history.add(s)
self.children.append(Node(next_board, 0, self.cput, self.history))
return value
else:
value = -self._select_without_net().eval_without_net()
self.w += value
self.n += 1
return value
| [
"numpy.log",
"numpy.sqrt",
"numpy.argmax",
"copy.deepcopy"
] | [((1910, 1920), 'numpy.sqrt', 'np.sqrt', (['t'], {}), '(t)\n', (1917, 1920), True, 'import numpy as np\n'), ((3617, 3632), 'copy.deepcopy', 'deepcopy', (['board'], {}), '(board)\n', (3625, 3632), False, 'from copy import deepcopy\n'), ((2258, 2275), 'numpy.argmax', 'np.argmax', (['values'], {}), '(values)\n', (2267, 2275), True, 'import numpy as np\n'), ((2791, 2808), 'numpy.argmax', 'np.argmax', (['values'], {}), '(values)\n', (2800, 2808), True, 'import numpy as np\n'), ((5549, 5569), 'copy.deepcopy', 'deepcopy', (['self.board'], {}), '(self.board)\n', (5557, 5569), False, 'from copy import deepcopy\n'), ((6903, 6923), 'copy.deepcopy', 'deepcopy', (['self.board'], {}), '(self.board)\n', (6911, 6923), False, 'from copy import deepcopy\n'), ((2684, 2697), 'numpy.log', 'np.log', (['total'], {}), '(total)\n', (2690, 2697), True, 'import numpy as np\n')] |
import os
import sys
import numpy as np
import torch
import argparse
import _pickle as pkl
import matplotlib.pylab as plt
import seaborn as sea
sea.set_style("whitegrid")
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
from random import uniform
from .Protein import Protein
from .Complex import Complex
from .DockerGPU import DockerGPU
from tqdm import tqdm
class Interaction:
def __init__(self, docker, scores, receptor, ligand):
self.docker = docker
self.scores = scores
self.min_score, self.cplx, self.ind = self.docker.get_conformation(self.scores, receptor, ligand)
@classmethod
def with_docker(cls, docker, receptor, ligand):
scores = docker.dock_global(receptor, ligand)
return cls(docker, scores, receptor, ligand)
def find_funnels(self, num_funnels=2):
rmsd_all = self.cplx.ligand.grid_rmsd(self.docker.angles, self.cplx.translation, torch.tensor([self.cplx.rotation])).to(device='cuda')
funnels = []
complexes = []
funnel_scores = self.scores.clone()
for i in range(num_funnels):
funnel_min_score, cplx, ind = self.docker.get_conformation(funnel_scores, self.cplx.receptor, self.cplx.ligand)
funnel_trans = cplx.translation
funnel_rot = torch.tensor([cplx.rotation])
rmsd_grid = self.cplx.ligand.grid_rmsd(self.docker.angles, funnel_trans, funnel_rot).to(device='cuda')
mask_scores_clus = funnel_scores < 0.9*funnel_min_score
mask_rmsd = rmsd_grid < 8.0
mask_funnel = torch.logical_and(mask_rmsd, mask_scores_clus)
funnel_rmsd = rmsd_all.masked_select(mask_funnel).clone()
funnel_sc = funnel_scores.masked_select(mask_funnel).clone()
if funnel_rmsd.size(0) == 0 or funnel_rmsd.size(0) == 0:
break
funnel_scores = funnel_scores.masked_fill(mask_rmsd, 0.0)
complexes.append(cplx)
funnels.append((funnel_rmsd, funnel_sc))
return funnels, complexes
def est_binding(self):
min = torch.min(self.scores).item()
Znorm = -torch.log(torch.sum(torch.exp(-(self.scores - min)))).item()
# U = torch.mean(self.scores).item()
return Znorm + min
def plot_funnels(self, num_funnels=2, cell_size=90, ax=None, im_offset=(70,25), plot_conformations=True):
mask_scores = self.scores < -10
rmsd_grid = self.cplx.ligand.grid_rmsd(self.docker.angles, self.cplx.translation, torch.tensor([self.cplx.rotation])).to(device='cuda')
all_rmsd = rmsd_grid.masked_select(mask_scores)
all_sc = self.scores.masked_select(mask_scores)
funnels, complexes = self.find_funnels()
if ax is None:
ax = plt.subplot(111)
prev_rmsd = None
ax.scatter(all_rmsd.cpu().numpy(), all_sc.cpu().numpy())
for i, funnel in enumerate(funnels):
cplx_img = complexes[i].get_canvas(cell_size)
rmsds = funnel[0].cpu().numpy()
scores = funnel[1].cpu().numpy()
ax.scatter(rmsds, scores, label=f'Funnel:{i}')
if plot_conformations:
im = OffsetImage(cplx_img.copy(), zoom=1.0, cmap='gist_heat_r')
# im.image.axes = ax
if not(prev_rmsd is None):
if np.abs(rmsds[0] - prev_rmsd) < 10:
im_offset = (im_offset[0] + 60, im_offset[1])
ab = AnnotationBbox(im, (rmsds[0], scores[0]),
xybox=im_offset,
xycoords='data',
boxcoords="offset points",
pad=0.3,
arrowprops=dict(arrowstyle="->",color='black',lw=2.5))
ax.add_artist(ab)
prev_rmsd = rmsds[0]
def test_funnels():
rec = Protein.generateConcave(size=50, alpha=0.95, num_points = 100)
lig = Protein.generateConcave(size=50, alpha=0.95, num_points = 100)
cplx = Complex.generate(rec, lig)
cor_score = cplx.score(boundary_size=3, a00=1.0, a11=0.4, a10=-1.0)
dck = DockerGPU(num_angles=360, boundary_size=3, a00=1.0, a11=0.4, a10=-1.0)
scores = dck.dock_global(cplx.receptor, cplx.ligand)
score, cplx_docked, ind = dck.get_conformation(scores, cplx.receptor, cplx.ligand)
docked_score = cplx_docked.score(boundary_size=3, a00=1.0, a11=0.4, a10=-1.0)
print('Predicted:')
print(f'Score:{score}/{docked_score}', 'Translation:', cplx_docked.translation, 'Rotation:', cplx_docked.rotation)
print('Correct:')
print('Score:', cor_score, 'Translation:', cplx.translation, 'Rotation:', cplx.rotation)
plt.figure(figsize=(12,6))
Interaction(dck, scores, cplx_docked.receptor, cplx_docked.ligand).plot_funnels()
plt.show()
if __name__=='__main__':
test_funnels() | [
"numpy.abs",
"matplotlib.pylab.figure",
"torch.exp",
"torch.min",
"seaborn.set_style",
"torch.tensor",
"matplotlib.pylab.show",
"matplotlib.pylab.subplot",
"torch.logical_and"
] | [((145, 171), 'seaborn.set_style', 'sea.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (158, 171), True, 'import seaborn as sea\n'), ((4156, 4183), 'matplotlib.pylab.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (4166, 4183), True, 'import matplotlib.pylab as plt\n'), ((4267, 4277), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (4275, 4277), True, 'import matplotlib.pylab as plt\n'), ((1210, 1239), 'torch.tensor', 'torch.tensor', (['[cplx.rotation]'], {}), '([cplx.rotation])\n', (1222, 1239), False, 'import torch\n'), ((1455, 1501), 'torch.logical_and', 'torch.logical_and', (['mask_rmsd', 'mask_scores_clus'], {}), '(mask_rmsd, mask_scores_clus)\n', (1472, 1501), False, 'import torch\n'), ((2517, 2533), 'matplotlib.pylab.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (2528, 2533), True, 'import matplotlib.pylab as plt\n'), ((1900, 1922), 'torch.min', 'torch.min', (['self.scores'], {}), '(self.scores)\n', (1909, 1922), False, 'import torch\n'), ((883, 917), 'torch.tensor', 'torch.tensor', (['[self.cplx.rotation]'], {}), '([self.cplx.rotation])\n', (895, 917), False, 'import torch\n'), ((2288, 2322), 'torch.tensor', 'torch.tensor', (['[self.cplx.rotation]'], {}), '([self.cplx.rotation])\n', (2300, 2322), False, 'import torch\n'), ((2986, 3014), 'numpy.abs', 'np.abs', (['(rmsds[0] - prev_rmsd)'], {}), '(rmsds[0] - prev_rmsd)\n', (2992, 3014), True, 'import numpy as np\n'), ((1961, 1992), 'torch.exp', 'torch.exp', (['(-(self.scores - min))'], {}), '(-(self.scores - min))\n', (1970, 1992), False, 'import torch\n')] |
from osim.env import L2M2019Env
from osim.control.osim_loco_reflex_song2019 import OsimReflexCtrl
"""
imported package dir: E:\\miniconda3_64\\envs\\osim_onn\\lib\\site-packages\\osim'
"""
from onn_torch_gd import Neural_Network
print ('onn imported')
from sklearn.datasets import make_classification, make_circles
import torch
from sklearn.metrics import accuracy_score, balanced_accuracy_score,mean_squared_error
import numpy as np
from torch.utils.tensorboard import SummaryWriter
import argparse
import datetime
import torch.nn as nn
import torch.optim as optim
from statsmodels.tsa.stattools import adfuller, kpss
import pandas as pd
# Construct the argument parser
ap = argparse.ArgumentParser()
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
# Add the arguments to the parser
ap.add_argument("-train",type=str2bool,required=True,
help="specify mode in training or test, expecting True or False")
ap.add_argument("-load", type=str2bool,required=True,
help="specify load mode from saved model or not, expecting True or False")
args = vars(ap.parse_args())
now_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
PATH = 'runs/L2M2019_onn_torch_gd/'+now_time
writer = SummaryWriter(PATH )
mode = '2D'
difficulty = 3
visualize = False
seed = None
sim_dt = 0.01
sim_t = 10
timstep_limit = int(round(sim_t/sim_dt))
INIT_POSE = np.array([
1.699999999999999956e+00, # forward speed
.5, # rightward speed
9.023245653983965608e-01, # pelvis height
2.012303881285582852e-01, # trunk lean
0*np.pi/180, # [right] hip adduct
-6.952390849304798115e-01, # hip flex
-3.231075259785813891e-01, # knee extend
1.709011708233401095e-01, # ankle flex
0*np.pi/180, # [left] hip adduct
-5.282323914341899296e-02, # hip flex
-8.041966456860847323e-01, # knee extend
-1.745329251994329478e-01]) # ankle flex
if mode is '2D':
params = np.loadtxt('params_2D.txt')
elif mode is '3D':
params = np.loadtxt('params_3D_init.txt')
locoCtrl = OsimReflexCtrl(mode=mode, dt=sim_dt)
locoCtrl.set_control_params(params)
env = L2M2019Env(visualize=visualize, seed=seed, difficulty=difficulty)
env.change_model(model=mode, difficulty=difficulty, seed=seed)
obs_dict = env.reset(project=True, seed=seed,
obs_as_dict=True, init_pose=INIT_POSE)
env.spec.timestep_limit = timstep_limit
total_reward = 0
t = 0
i = 0
# initiate onn network
#onn_network = ONN(features_size=2, max_num_hidden_layers=5,
# qtd_neuron_per_hidden_layer=10, n_classes=2,loss_fun = 'mse')
onn_network = Neural_Network()
print (onn_network)
criterion = nn.MSELoss()
# create your optimizer
optimizer = optim.SGD(onn_network.parameters(), lr=0.01)
load_file = 'state_dict_model.pt'
if args['load'] == True:
onn_network.load_state_dict(torch.load(PATH +'/'+ load_file))
print('%s loaded'%load_file)
else:
print('trained from scratch')
if args['train'] == True:
print ('Performing traning mode')
else:
print('Not traing mode')
#hip_angle = np.zeros((timstep_limit, 1))
#knee_angle = np.zeros((timstep_limit, 1))
#ankle_angle = np.zeros((timstep_limit, 1))
#r_foot_force = np.zeros((timstep_limit, 1))
#l_foot_force = np.zeros((timstep_limit, 1))
hip_abd = np.zeros((1, 1))
hip_angle = np.zeros((1, 1))
knee_angle = np.zeros((1, 1))
ankle_angle = np.zeros((1, 1))
r_foot_force = np.zeros((1, 1))
l_foot_force = np.zeros((1, 1))
y_pred_list = []
force_ind = []
X_list = []
y_list = []
acc_list = []
# timestep = 300
running_loss = 0.0
df = pd.DataFrame(index=range(timstep_limit),columns=['grf_r','grf_l'])
for i in range(timstep_limit):
t += sim_dt
# locoCtrl.set_control_params(params)
action = locoCtrl.update(obs_dict)
# done if either the pelvis of the human model falls below 0.6 meters or when it reaches 10 seconds (i=1000)
obs_dict, reward, done, info = env.step(
action, project=True, obs_as_dict=True)
# hip_angle.append(-obs_dict['r_leg']['joint']['hip'])
# knee_angle.append(-obs_dict['r_leg']['joint']['knee'])
# ankle_angle.append(-obs_dict['r_leg']['joint']['ankle'])
# hip_angle[i, :] = -obs_dict['r_leg']['joint']['hip']
# knee_angle[i, :] = -obs_dict['r_leg']['joint']['knee']
# ankle_angle[i, :] = -obs_dict['r_leg']['joint']['ankle']
hip_abd[0, :] = -obs_dict['r_leg']['joint']['hip_abd']
hip_angle[0, :] = -obs_dict['r_leg']['joint']['hip']
knee_angle[0, :] = -obs_dict['r_leg']['joint']['knee']
ankle_angle[0, :] = -obs_dict['r_leg']['joint']['ankle']
r_foot_force[0, :] = obs_dict['r_leg']['ground_reaction_forces'][2]
l_foot_force[0, :] = obs_dict['l_leg']['ground_reaction_forces'][2]
df.loc[i,['grf_r']] = r_foot_force[0, :]
df.loc[i,['grf_l']] = l_foot_force[0, :]
# if obs_dict['r_leg']['ground_reaction_forces'][2] > 0:
# y = np.array([1])
# else:
# y = np.array([0])
X = np.array([hip_abd[0, :],hip_angle[0, :] ,knee_angle[0, :] ,ankle_angle[0, :]]).T
y = np.array([r_foot_force[0,:],l_foot_force[0,:]]).T
# X = np.array([r_foot_force[0,:],l_foot_force[0,:]]).T
X = torch.tensor(X, dtype=torch.float)
y = torch.tensor(y, dtype=torch.float)
# X_list.append(X)
# y_list.append(y)
# predictions = onn_network.predict(X)
optimizer.zero_grad()
output = onn_network(X)
loss = criterion(output,y)
running_loss += loss
# writer.add_scalar('training loss lr=0.001',loss,i)
writer.add_scalars(f'ground reaction force', {'true':r_foot_force[0,:],'predicted': output[0,0],}, i)
if i % 200 == 0:
writer.add_scalar('training loss_new',
running_loss / 200,
i)
print("Online error on %d steps: %.4f"%(i,running_loss / 200))
running_loss = 0.0
if args['train'] == True:
loss.backward()
optimizer.step()
torch.save(onn_network.state_dict(), PATH+'/'+load_file)
# if len(X_list) % 10 == 0:
# # X,y = split_sequences(X_list,y_list,n_steps = 10)
#
# X = np.array(X_list).reshape((1,n_steps_in,3))
#
# #X = np.array(X_list).reshape((1,n_steps_in*3))
#
# X = tf.convert_to_tensor(X, dtype=tf.float32)
#
#
# with tf.GradientTape() as tape:
#
# # Run the forward pass of the layer.
# # The operations that the layer applies
# # to its inputs are going to be recorded
# # on the GradientTape.
# logits = model(X, training=True) # Logits for this minibatch
#
# # Compute the loss value for this minibatch.
# loss_value = loss_fn(y, logits)
#
#
#
# # Use the gradient tape to automatically retrieve
# # the gradients of the trainable variables with respect to the loss.
# grads = tape.gradient(loss_value, model.trainable_weights)
#
# # Run one step of gradient descent by updating
# # the value of the variables to minimize the loss.
# optimizer.apply_gradients(zip(grads, model.trainable_weights))
#
# y_pred= model.predict(X)
#
# predictions = onn_network.predict(X_test)
#
# if i % 20 == 0:
# print(
# "Training loss at step %d: %.4f"
# % (i, float(loss_value))
# )
#
#
#
#
# if loss_value < 0.01:
# print ('loss = ', loss_value.numpy())
# force_ind.append(i)
# obs_dict['r_leg']['ground_reaction_forces'][2] = y_pred[0,0]
# obs_dict['l_leg']['ground_reaction_forces'][2] = y_pred[0,1]
#
# y_pred_list.append(y_pred[0])
#
# X_list.pop(0)
# y_list.pop(0)
total_reward += reward
if done:
break
print(' score={} time={}sec'.format(total_reward, t))
df.to_csv('ground_reaction_force.csv',float_format='%.4f')
# ADF Test
result = adfuller(df['grf_r'].values, autolag='AIC')
print(f'ADF Statistic: {result[0]}')
print(f'p-value: {result[1]}')
for key, value in result[4].items():
print('Critial Values:')
print(f' {key}, {value}')
# KPSS Test
result = kpss(df['grf_l'].values, regression='c')
print('\nKPSS Statistic: %f' % result[0])
print('p-value: %f' % result[1])
for key, value in result[3].items():
print('Critial Values:')
print(f' {key}, {value}')
#print('toatal_acc : ', np.mean(np.array(acc_list)))
# concat_arr = np.concatenate((hip_angle,knee_angle,ankle_angle,r_foot_force,l_foot_force),axis= 1)
# fig,ax=plt.subplots(4,1)
# ax[0].plot(np.arange(timstep_limit),hip_angle)
# ax[1].plot(np.arange(timstep_limit),knee_angle)
# ax[2].plot(np.arange(timstep_limit),ankle_angle)
# ax[3].plot(np.arange(timstep_limit),r_foot_force)
# plt.show() | [
"torch.utils.tensorboard.SummaryWriter",
"argparse.ArgumentParser",
"statsmodels.tsa.stattools.adfuller",
"statsmodels.tsa.stattools.kpss",
"torch.load",
"osim.control.osim_loco_reflex_song2019.OsimReflexCtrl",
"osim.env.L2M2019Env",
"argparse.ArgumentTypeError",
"numpy.array",
"torch.nn.MSELoss",... | [((731, 756), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (754, 756), False, 'import argparse\n'), ((1504, 1523), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['PATH'], {}), '(PATH)\n', (1517, 1523), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((1676, 1912), 'numpy.array', 'np.array', (['[1.7, 0.5, 0.9023245653983966, 0.20123038812855829, 0 * np.pi / 180, -\n 0.6952390849304798, -0.3231075259785814, 0.1709011708233401, 0 * np.pi /\n 180, -0.05282323914341899, -0.8041966456860847, -0.17453292519943295]'], {}), '([1.7, 0.5, 0.9023245653983966, 0.20123038812855829, 0 * np.pi / \n 180, -0.6952390849304798, -0.3231075259785814, 0.1709011708233401, 0 *\n np.pi / 180, -0.05282323914341899, -0.8041966456860847, -\n 0.17453292519943295])\n', (1684, 1912), True, 'import numpy as np\n'), ((2352, 2388), 'osim.control.osim_loco_reflex_song2019.OsimReflexCtrl', 'OsimReflexCtrl', ([], {'mode': 'mode', 'dt': 'sim_dt'}), '(mode=mode, dt=sim_dt)\n', (2366, 2388), False, 'from osim.control.osim_loco_reflex_song2019 import OsimReflexCtrl\n'), ((2435, 2500), 'osim.env.L2M2019Env', 'L2M2019Env', ([], {'visualize': 'visualize', 'seed': 'seed', 'difficulty': 'difficulty'}), '(visualize=visualize, seed=seed, difficulty=difficulty)\n', (2445, 2500), False, 'from osim.env import L2M2019Env\n'), ((2942, 2958), 'onn_torch_gd.Neural_Network', 'Neural_Network', ([], {}), '()\n', (2956, 2958), False, 'from onn_torch_gd import Neural_Network\n'), ((2995, 3007), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (3005, 3007), True, 'import torch.nn as nn\n'), ((3650, 3666), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (3658, 3666), True, 'import numpy as np\n'), ((3680, 3696), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (3688, 3696), True, 'import numpy as np\n'), ((3711, 3727), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (3719, 3727), True, 'import numpy as np\n'), ((3743, 3759), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (3751, 3759), True, 'import numpy as np\n'), ((3778, 3794), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (3786, 3794), True, 'import numpy as np\n'), ((3811, 3827), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (3819, 3827), True, 'import numpy as np\n'), ((8378, 8421), 'statsmodels.tsa.stattools.adfuller', 'adfuller', (["df['grf_r'].values"], {'autolag': '"""AIC"""'}), "(df['grf_r'].values, autolag='AIC')\n", (8386, 8421), False, 'from statsmodels.tsa.stattools import adfuller, kpss\n'), ((8618, 8658), 'statsmodels.tsa.stattools.kpss', 'kpss', (["df['grf_l'].values"], {'regression': '"""c"""'}), "(df['grf_l'].values, regression='c')\n", (8622, 8658), False, 'from statsmodels.tsa.stattools import adfuller, kpss\n'), ((2243, 2270), 'numpy.loadtxt', 'np.loadtxt', (['"""params_2D.txt"""'], {}), "('params_2D.txt')\n", (2253, 2270), True, 'import numpy as np\n'), ((5577, 5611), 'torch.tensor', 'torch.tensor', (['X'], {'dtype': 'torch.float'}), '(X, dtype=torch.float)\n', (5589, 5611), False, 'import torch\n'), ((5621, 5655), 'torch.tensor', 'torch.tensor', (['y'], {'dtype': 'torch.float'}), '(y, dtype=torch.float)\n', (5633, 5655), False, 'import torch\n'), ((1398, 1421), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1419, 1421), False, 'import datetime\n'), ((2305, 2337), 'numpy.loadtxt', 'np.loadtxt', (['"""params_3D_init.txt"""'], {}), "('params_3D_init.txt')\n", (2315, 2337), True, 'import numpy as np\n'), ((3193, 3227), 'torch.load', 'torch.load', (["(PATH + '/' + load_file)"], {}), "(PATH + '/' + load_file)\n", (3203, 3227), False, 'import torch\n'), ((5368, 5447), 'numpy.array', 'np.array', (['[hip_abd[0, :], hip_angle[0, :], knee_angle[0, :], ankle_angle[0, :]]'], {}), '([hip_abd[0, :], hip_angle[0, :], knee_angle[0, :], ankle_angle[0, :]])\n', (5376, 5447), True, 'import numpy as np\n'), ((5458, 5508), 'numpy.array', 'np.array', (['[r_foot_force[0, :], l_foot_force[0, :]]'], {}), '([r_foot_force[0, :], l_foot_force[0, :]])\n', (5466, 5508), True, 'import numpy as np\n'), ((1000, 1053), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['"""Boolean value expected."""'], {}), "('Boolean value expected.')\n", (1026, 1053), False, 'import argparse\n')] |
#!/usr/bin/env python
"""
Created on 2014-11-10T15:05:21
"""
from __future__ import division, print_function
import sys
try:
import numpy as np
except ImportError:
print('You need numpy installed')
sys.exit(1)
try:
import matplotlib.pyplot as plt
got_mpl = True
except ImportError:
print('You need matplotlib installed to get a plot')
got_mpl = False
__author__ = "<NAME> (github: @mattgiguere)"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
__version__ = '1.0.0'
def blazeFit(wav, spec, maxrms, numcalls=10, curcall=0,
verbose=False, showplot=False):
"""PURPOSE: To fit the continuum of an order of an
echelle spectrum to model the Blaze Function.
INPUTS:
WAV: The wavelength
SPEC: The spectrum. This should be the same number of elements
(and corrsepond to) the input wavelength array (wav).
MAXRMS: The threshold criteria for the fit in normalized rms.
For example, a threshold of 0.01 will keep iterating until
the rms of the residuals of dividing the continuum pixels
by the Blaze Function comes out to 1%.
NUMCALLS: The maximum number of recursive iterations to execute.
CURCALL: Store the current iteration for recursive purposes.
VERBOSE: Set this to True to print out the iteration, residual
rms and the threshold value.
SHOWPLOT: Set this to True to produce a plot of the spectrum,
threshold and continuum at every iteration.
"""
#get wavelength range:
wavspread = max(wav) - min(wav)
#center wavelength range about zero:
wavcent = wav - min(wav) - wavspread/2.
#normalize the spectrum:
normspec = spec/max(spec)
#fit a polynomial to the data:
z = np.polyfit(wavcent, normspec, 7)
#make a function based on those polynomial coefficients:
cfit = np.poly1d(z)
#make a lower threshold that is offset below the continuum fit. All points
#below this fit (i.e. spectral lines) will be excluded from the fit in the
#next iteration.
thresh = cfit(wavcent) - (0.5 * (1. / (curcall + 1)))
if (showplot is True):
#plot the original spectrum:
plt.plot(wavcent, normspec)
#overplot the continuum fit
plt.plot(wavcent, cfit(wavcent))
plt.plot(wavcent, thresh)
mask = np.where(normspec > thresh)[0]
residrms = np.std(normspec/cfit(wavcent))
if (verbose is True):
print('now in iteration {0}'.format(curcall))
print('residrms is now {0:.5f}'.format(residrms))
print('maxrms is {0})'.format(maxrms))
#print('z is: {}'.format(z))
if ((curcall < numcalls) and (residrms > maxrms)):
z = blazeFit(wavcent[mask], normspec[mask], maxrms,
numcalls=numcalls, curcall=curcall+1)
#now un-center the wavelength range:
#if curcall == 0:
#z[-1] = z[-1] - min(wav) - wavspread/2.
return z
| [
"numpy.polyfit",
"numpy.where",
"matplotlib.pyplot.plot",
"sys.exit",
"numpy.poly1d"
] | [((1737, 1769), 'numpy.polyfit', 'np.polyfit', (['wavcent', 'normspec', '(7)'], {}), '(wavcent, normspec, 7)\n', (1747, 1769), True, 'import numpy as np\n'), ((1843, 1855), 'numpy.poly1d', 'np.poly1d', (['z'], {}), '(z)\n', (1852, 1855), True, 'import numpy as np\n'), ((213, 224), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (221, 224), False, 'import sys\n'), ((2167, 2194), 'matplotlib.pyplot.plot', 'plt.plot', (['wavcent', 'normspec'], {}), '(wavcent, normspec)\n', (2175, 2194), True, 'import matplotlib.pyplot as plt\n'), ((2280, 2305), 'matplotlib.pyplot.plot', 'plt.plot', (['wavcent', 'thresh'], {}), '(wavcent, thresh)\n', (2288, 2305), True, 'import matplotlib.pyplot as plt\n'), ((2318, 2345), 'numpy.where', 'np.where', (['(normspec > thresh)'], {}), '(normspec > thresh)\n', (2326, 2345), True, 'import numpy as np\n')] |
from matplotlib import pyplot as plt
import pickle
import numpy as np
def plot_1d_pointGoals(_file , num_goals = 100):
fobj = open(_file+ '.pkl', 'wb')
goals = np.random.normal(0,1, size = (num_goals))
import ipdb ; ipdb.set_trace()
pickle.dump(goals , fobj)
plt.scatter( np.arange(num_goals) , goals)
plt.savefig(_file+'.png')
def plot_2d_pointGoals(_file , num_goals = 100):
fobj = open(_file+ '.pkl', 'wb')
goals = np.random.normal(0,1, size = (num_goals , 2))
pickle.dump(goals , fobj)
plt.scatter( goals[:,0] , goals[:,1])
plt.savefig(_file+'.png')
plot_1d_pointGoals('1d_point_mean1_v1')
#plot_2d_pointGoals('2d_point_mean1_v2') | [
"numpy.random.normal",
"matplotlib.pyplot.savefig",
"pickle.dump",
"ipdb.set_trace",
"matplotlib.pyplot.scatter",
"numpy.arange"
] | [((168, 206), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {'size': 'num_goals'}), '(0, 1, size=num_goals)\n', (184, 206), True, 'import numpy as np\n'), ((225, 241), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (239, 241), False, 'import ipdb\n'), ((245, 269), 'pickle.dump', 'pickle.dump', (['goals', 'fobj'], {}), '(goals, fobj)\n', (256, 269), False, 'import pickle\n'), ((317, 344), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(_file + '.png')"], {}), "(_file + '.png')\n", (328, 344), True, 'from matplotlib import pyplot as plt\n'), ((437, 480), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {'size': '(num_goals, 2)'}), '(0, 1, size=(num_goals, 2))\n', (453, 480), True, 'import numpy as np\n'), ((484, 508), 'pickle.dump', 'pickle.dump', (['goals', 'fobj'], {}), '(goals, fobj)\n', (495, 508), False, 'import pickle\n'), ((512, 549), 'matplotlib.pyplot.scatter', 'plt.scatter', (['goals[:, 0]', 'goals[:, 1]'], {}), '(goals[:, 0], goals[:, 1])\n', (523, 549), True, 'from matplotlib import pyplot as plt\n'), ((551, 578), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(_file + '.png')"], {}), "(_file + '.png')\n", (562, 578), True, 'from matplotlib import pyplot as plt\n'), ((286, 306), 'numpy.arange', 'np.arange', (['num_goals'], {}), '(num_goals)\n', (295, 306), True, 'import numpy as np\n')] |
from scipy.stats import beta
from matplotlib import pyplot as plt
import numpy as np
def samples(a, b, success, trials, num_episodes=100):
'''
:param a: the shape param for prior dist
:param b: the shape param for prior dist
:param success: num success in the experiments
:param trials: num trails conducted
:param num_episodes: num samples to draw from this distribution
:return:
'''
dist = beta(a+success, b+trials-success)
episodes = num_episodes
nums = [dist.rvs() for _ in range(episodes)]
return nums
def stats(nums):
avg = sum(nums)/len(nums)
var = sum(
[pow((x - avg), 2) for x in nums]
)
print(avg, var)
def plots(data, bin_size=20):
bins = np.arange(0, bin_size, 1) # fixed bin size
bins = bins/bin_size # normalize bins
num_plots = len(data)
for i, nums in enumerate(data):
plt.subplot(num_plots, 1, i+1)
# plot histogram
plt.hist(nums, bins=bins, alpha=0.5)
# hist = np.histogram(nums, bin_size)
plt.show()
'''
The conclusion is better prior requires less trails to converge.
Worse prior requires more trails to converge.
'''
successes = 3
trials =10
# alpha, beta defines the shape of beta dist, success and trials is number of experiments.
a, b = 1, 1 # uniform
num_episodes = 2000 # num samples sampled from distribution in order to draw distribution
bin_size = 100
container = []
ret = samples(a, b, successes, trials, num_episodes=num_episodes)
container.append(ret)
stats(ret)
a, b = 0.5, 0.5 # convex shape prior
ret = samples(a, b, successes, trials, num_episodes=num_episodes)
container.append(ret)
stats(ret)
a, b = 1.1, 30 # 0-0.2 prior
ret = samples(a, b, successes, trials, num_episodes=num_episodes)
container.append(ret)
stats(ret)
a, b = 2, 5 # .0-0.8 prior
ret = samples(a, b, successes, trials, num_episodes=num_episodes)
container.append(ret)
stats(ret)
a, b = 2, 2 # bell shape between 0,1
ret = samples(a, b, successes, trials, num_episodes=num_episodes)
container.append(ret)
stats(ret)
plots(container, bin_size=bin_size) | [
"matplotlib.pyplot.hist",
"scipy.stats.beta",
"matplotlib.pyplot.subplot",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((432, 471), 'scipy.stats.beta', 'beta', (['(a + success)', '(b + trials - success)'], {}), '(a + success, b + trials - success)\n', (436, 471), False, 'from scipy.stats import beta\n'), ((732, 757), 'numpy.arange', 'np.arange', (['(0)', 'bin_size', '(1)'], {}), '(0, bin_size, 1)\n', (741, 757), True, 'import numpy as np\n'), ((1039, 1049), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1047, 1049), True, 'from matplotlib import pyplot as plt\n'), ((887, 919), 'matplotlib.pyplot.subplot', 'plt.subplot', (['num_plots', '(1)', '(i + 1)'], {}), '(num_plots, 1, i + 1)\n', (898, 919), True, 'from matplotlib import pyplot as plt\n'), ((951, 987), 'matplotlib.pyplot.hist', 'plt.hist', (['nums'], {'bins': 'bins', 'alpha': '(0.5)'}), '(nums, bins=bins, alpha=0.5)\n', (959, 987), True, 'from matplotlib import pyplot as plt\n')] |
#!/usr/bin/python
#
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for computing SSL-HSIC losses."""
import functools
from typing import Any, Dict, List, Optional, Text, Tuple
import haiku as hk
import jax
import jax.numpy as jnp
import mpmath
import numpy as np
def pairwise_distance_square(x: jnp.ndarray,
y: jnp.ndarray, maximum=1e10) -> jnp.ndarray:
"""Computes the square of pairwise distances.
dist_ij = (x[i] - y[j])'(x[i] - y[j])
= x[i]'x[i] - 2x[i]'y[j] + y[j]'y[j]
dist = x.x_{ij,ik->i} - 2 x.y_{ik,jk->ij} + y.y_{ij,ik->i}
Args:
x: tf.Tensor [B1, d].
y: tf.Tensor [B2, d]. If y is None, then y=x.
maximum: the maximum value to avoid overflow.
Returns:
Pairwise distance matrix [B1, B2].
"""
x_sq = jnp.einsum('ij,ij->i', x, x)[:, jnp.newaxis]
y_sq = jnp.einsum('ij,ij->i', y, y)[jnp.newaxis, :]
x_y = jnp.einsum('ik,jk->ij', x, y)
dist = x_sq + y_sq - 2 * x_y
# Safe in case dist becomes negative.
return jnp.minimum(jnp.maximum(dist, 0.0), maximum)
def get_label_weights(batch: int) -> Tuple[float, float]:
"""Returns the positive and negative weights of the label kernel matrix."""
w_pos_base = jnp.atleast_2d(1.0)
w_neg_base = jnp.atleast_2d(0.0)
w_mean = (w_pos_base + w_neg_base * (batch - 1)) / batch
w_pos_base -= w_mean
w_neg_base -= w_mean
w_mean = (w_pos_base + w_neg_base * (batch - 1)) / batch
w_pos = w_pos_base - w_mean
w_neg = w_neg_base - w_mean
return w_pos[0, 0], w_neg[0, 0]
def compute_prob(n: int, x_range: np.ndarray) -> np.ndarray:
"""Compute the probablity to sample the random fourier features."""
probs = [mpmath.besselk((n - 1) / 2, x) * mpmath.power(x, (n - 1) / 2)
for x in x_range]
normalized_probs = [float(p / sum(probs)) for p in probs]
return np.array(normalized_probs)
def imq_amplitude_frequency_and_probs(n: int) -> Tuple[np.ndarray, np.ndarray]:
"""Returns the range and probablity for sampling RFF."""
x = np.linspace(1e-12, 100, 10000) # int(n * 10 / c)
p = compute_prob(n, x)
return x, p
def imq_rff_features(num_features: int, rng: jnp.DeviceArray, x: jnp.ndarray,
c: float, amp: jnp.ndarray,
amp_probs: jnp.ndarray) -> jnp.ndarray:
"""Returns the RFF feature for IMQ kernel with pre-computed amplitude prob."""
d = x.shape[-1]
rng1, rng2 = jax.random.split(rng)
amp = jax.random.choice(rng1, amp, shape=[num_features, 1], p=amp_probs)
directions = jax.random.normal(rng2, shape=(num_features, d))
b = jax.random.uniform(rng2, shape=(1, num_features)) * 2 * jnp.pi
w = directions / jnp.linalg.norm(directions, axis=-1, keepdims=True) * amp
z_x = jnp.sqrt(2 / num_features) * jnp.cos(jnp.matmul(x / c, w.T) + b)
return z_x
def rff_approximate_hsic_xy(list_hiddens: List[jnp.ndarray], w: float,
num_features: int, rng: jnp.DeviceArray, c: float,
rff_kwargs: Dict[Text, jnp.ndarray]) -> jnp.ndarray:
"""RFF approximation of Unbiased HSIC(X, Y).
Args:
list_hiddens: a list of features.
w: difference between max and min of the label Y's gram matrix.
num_features: number of RFF features used for the approximation.
rng: random seed used for sampling RFF features of the hiddens.
c: parameter of the inverse multiquadric kernel.
rff_kwargs: keyword arguments used for sampling frequencies.
Returns:
Approximation of HSIC(X, Y) where the kernel is inverse multiquadric kernel.
"""
b, _ = list_hiddens[0].shape
k = len(list_hiddens)
rff_hiddens = jnp.zeros((b, num_features))
mean = jnp.zeros((1, num_features))
n_square = (b * k) ** 2
for hidden in list_hiddens:
rff_features = imq_rff_features(num_features, rng, hidden, c, **rff_kwargs)
rff_hiddens += rff_features
mean += rff_features.sum(0, keepdims=True)
return w * ((rff_hiddens**2).sum() / (b * k * (k - 1)) -
(mean**2).sum() / n_square)
def rff_approximate_hsic_xx(
list_hiddens: List[jnp.ndarray], num_features: int, rng: jnp.DeviceArray,
rng_used: jnp.DeviceArray, c: float, rff_kwargs: Dict[Text, jnp.ndarray]
) -> jnp.ndarray:
"""RFF approximation of HSIC(X, X) where inverse multiquadric kernel is used.
Args:
list_hiddens: a list of features.
num_features: number of RFF features used for the approximation.
rng: random seed used for sampling the first RFF features.
rng_used: random seed used for sampling the second RFF features.
c: parameter of the inverse multiquadric kernel.
rff_kwargs: keyword arguments used for sampling frequencies.
Returns:
Approximation of HSIC(X, X) where the kernel is inverse multiquadric kernel.
"""
x1_rffs = []
x2_rffs = []
for xs in list_hiddens:
x1_rff = imq_rff_features(num_features, rng_used, xs, c, **rff_kwargs)
x1_rffs.append(x1_rff)
x2_rff = imq_rff_features(num_features, rng, xs, c, **rff_kwargs)
x2_rffs.append(x2_rff)
mean_x1 = (functools.reduce(jax.lax.add, x1_rffs) / len(x1_rffs)).mean(
0, keepdims=True)
mean_x2 = (functools.reduce(jax.lax.add, x2_rffs) / len(x2_rffs)).mean(
0, keepdims=True)
z = jnp.zeros(shape=(num_features, num_features), dtype=jnp.float32)
for x1_rff, x2_rff in zip(x1_rffs, x2_rffs):
z += jnp.einsum('ni,nj->ij', x1_rff - mean_x1, x2_rff - mean_x2)
return (z ** 2).sum() / ((x1_rff.shape[0] * len(list_hiddens)) ** 2)
class HSICLoss(hk.Module):
"""SSL-HSIC loss."""
def __init__(self,
num_rff_features: int,
regul_weight: float,
name: Optional[Text] = 'hsic_loss'):
"""Initialize HSICLoss.
Args:
num_rff_features: number of RFF features used for the approximation.
regul_weight: regularization weight applied for HSIC(X, X).
name: name of the module, optional.
"""
super().__init__(name=name)
self._num_rff_features = num_rff_features
self._regul_weight = regul_weight
def __call__(
self, list_hiddens: List[jnp.ndarray],
rff_kwargs: Optional[Dict[Text, Any]]
) -> Tuple[jnp.ndarray, Dict[Text, jnp.ndarray]]:
"""Returns the HSIC loss and summaries.
Args:
list_hiddens: list of hiddens from different views.
rff_kwargs: keyword args for sampling frequencies to compute RFF.
Returns:
total loss and a dictionary of summaries.
"""
b = list_hiddens[0].shape[0]
scale = hk.get_parameter('scale', shape=[], dtype=jnp.float32,
init=hk.initializers.Constant(1.))
c = jax.lax.stop_gradient(scale)
rff_kwargs = rff_kwargs or {}
w_pos, w_neg = get_label_weights(b)
rng1, rng2 = jax.random.split(hk.next_rng_key())
hsic_xy = rff_approximate_hsic_xy(list_hiddens, w_pos - w_neg,
self._num_rff_features, rng1, c,
rff_kwargs=rff_kwargs)
hsic_xx = rff_approximate_hsic_xx(list_hiddens, self._num_rff_features,
rng1, rng2, c, rff_kwargs)
total_loss = self._regul_weight * jnp.sqrt(hsic_xx) - hsic_xy
# Compute gradient norm.
n_samples = int(1024 / len(list_hiddens)) # 1024 samples in total.
sampled_hiddens_1 = jnp.concatenate([
x[jax.random.choice(hk.next_rng_key(), jnp.arange(b), (n_samples,)), :]
for x in list_hiddens
])
sampled_hiddens_2 = jnp.concatenate([
x[jax.random.choice(hk.next_rng_key(), jnp.arange(b), (n_samples,)), :]
for x in list_hiddens
])
dist_sq = jax.lax.stop_gradient(
pairwise_distance_square(sampled_hiddens_1, sampled_hiddens_2))
grad = jax.grad(lambda x, y: (y / jnp.sqrt(x + y**2)).sum())(dist_sq, scale)
grad_norm = 0.5 * jnp.log(jnp.maximum(1e-14, grad ** 2)).mean()
summaries = {'kernel_loss/hsic_xy': hsic_xy,
'kernel_loss/hsic_xx': hsic_xx,
'kernel_loss/total_loss': total_loss,
'kernel_loss/kernel_param': scale,
'kernel_loss/grad_norm': grad_norm}
return total_loss, summaries
| [
"numpy.array",
"mpmath.power",
"haiku.next_rng_key",
"jax.numpy.matmul",
"jax.random.split",
"jax.random.normal",
"haiku.initializers.Constant",
"numpy.linspace",
"jax.random.choice",
"jax.random.uniform",
"functools.reduce",
"jax.numpy.atleast_2d",
"jax.lax.stop_gradient",
"jax.numpy.eins... | [((1451, 1480), 'jax.numpy.einsum', 'jnp.einsum', (['"""ik,jk->ij"""', 'x', 'y'], {}), "('ik,jk->ij', x, y)\n", (1461, 1480), True, 'import jax.numpy as jnp\n'), ((1759, 1778), 'jax.numpy.atleast_2d', 'jnp.atleast_2d', (['(1.0)'], {}), '(1.0)\n', (1773, 1778), True, 'import jax.numpy as jnp\n'), ((1794, 1813), 'jax.numpy.atleast_2d', 'jnp.atleast_2d', (['(0.0)'], {}), '(0.0)\n', (1808, 1813), True, 'import jax.numpy as jnp\n'), ((2376, 2402), 'numpy.array', 'np.array', (['normalized_probs'], {}), '(normalized_probs)\n', (2384, 2402), True, 'import numpy as np\n'), ((2550, 2580), 'numpy.linspace', 'np.linspace', (['(1e-12)', '(100)', '(10000)'], {}), '(1e-12, 100, 10000)\n', (2561, 2580), True, 'import numpy as np\n'), ((2943, 2964), 'jax.random.split', 'jax.random.split', (['rng'], {}), '(rng)\n', (2959, 2964), False, 'import jax\n'), ((2973, 3039), 'jax.random.choice', 'jax.random.choice', (['rng1', 'amp'], {'shape': '[num_features, 1]', 'p': 'amp_probs'}), '(rng1, amp, shape=[num_features, 1], p=amp_probs)\n', (2990, 3039), False, 'import jax\n'), ((3055, 3103), 'jax.random.normal', 'jax.random.normal', (['rng2'], {'shape': '(num_features, d)'}), '(rng2, shape=(num_features, d))\n', (3072, 3103), False, 'import jax\n'), ((4156, 4184), 'jax.numpy.zeros', 'jnp.zeros', (['(b, num_features)'], {}), '((b, num_features))\n', (4165, 4184), True, 'import jax.numpy as jnp\n'), ((4194, 4222), 'jax.numpy.zeros', 'jnp.zeros', (['(1, num_features)'], {}), '((1, num_features))\n', (4203, 4222), True, 'import jax.numpy as jnp\n'), ((5751, 5815), 'jax.numpy.zeros', 'jnp.zeros', ([], {'shape': '(num_features, num_features)', 'dtype': 'jnp.float32'}), '(shape=(num_features, num_features), dtype=jnp.float32)\n', (5760, 5815), True, 'import jax.numpy as jnp\n'), ((1344, 1372), 'jax.numpy.einsum', 'jnp.einsum', (['"""ij,ij->i"""', 'x', 'x'], {}), "('ij,ij->i', x, x)\n", (1354, 1372), True, 'import jax.numpy as jnp\n'), ((1398, 1426), 'jax.numpy.einsum', 'jnp.einsum', (['"""ij,ij->i"""', 'y', 'y'], {}), "('ij,ij->i', y, y)\n", (1408, 1426), True, 'import jax.numpy as jnp\n'), ((1573, 1595), 'jax.numpy.maximum', 'jnp.maximum', (['dist', '(0.0)'], {}), '(dist, 0.0)\n', (1584, 1595), True, 'import jax.numpy as jnp\n'), ((3258, 3284), 'jax.numpy.sqrt', 'jnp.sqrt', (['(2 / num_features)'], {}), '(2 / num_features)\n', (3266, 3284), True, 'import jax.numpy as jnp\n'), ((5872, 5931), 'jax.numpy.einsum', 'jnp.einsum', (['"""ni,nj->ij"""', '(x1_rff - mean_x1)', '(x2_rff - mean_x2)'], {}), "('ni,nj->ij', x1_rff - mean_x1, x2_rff - mean_x2)\n", (5882, 5931), True, 'import jax.numpy as jnp\n'), ((7138, 7166), 'jax.lax.stop_gradient', 'jax.lax.stop_gradient', (['scale'], {}), '(scale)\n', (7159, 7166), False, 'import jax\n'), ((2216, 2246), 'mpmath.besselk', 'mpmath.besselk', (['((n - 1) / 2)', 'x'], {}), '((n - 1) / 2, x)\n', (2230, 2246), False, 'import mpmath\n'), ((2249, 2277), 'mpmath.power', 'mpmath.power', (['x', '((n - 1) / 2)'], {}), '(x, (n - 1) / 2)\n', (2261, 2277), False, 'import mpmath\n'), ((3110, 3159), 'jax.random.uniform', 'jax.random.uniform', (['rng2'], {'shape': '(1, num_features)'}), '(rng2, shape=(1, num_features))\n', (3128, 3159), False, 'import jax\n'), ((3192, 3243), 'jax.numpy.linalg.norm', 'jnp.linalg.norm', (['directions'], {'axis': '(-1)', 'keepdims': '(True)'}), '(directions, axis=-1, keepdims=True)\n', (3207, 3243), True, 'import jax.numpy as jnp\n'), ((7275, 7292), 'haiku.next_rng_key', 'hk.next_rng_key', ([], {}), '()\n', (7290, 7292), True, 'import haiku as hk\n'), ((3295, 3317), 'jax.numpy.matmul', 'jnp.matmul', (['(x / c)', 'w.T'], {}), '(x / c, w.T)\n', (3305, 3317), True, 'import jax.numpy as jnp\n'), ((5562, 5600), 'functools.reduce', 'functools.reduce', (['jax.lax.add', 'x1_rffs'], {}), '(jax.lax.add, x1_rffs)\n', (5578, 5600), False, 'import functools\n'), ((5660, 5698), 'functools.reduce', 'functools.reduce', (['jax.lax.add', 'x2_rffs'], {}), '(jax.lax.add, x2_rffs)\n', (5676, 5698), False, 'import functools\n'), ((7100, 7129), 'haiku.initializers.Constant', 'hk.initializers.Constant', (['(1.0)'], {}), '(1.0)\n', (7124, 7129), True, 'import haiku as hk\n'), ((7672, 7689), 'jax.numpy.sqrt', 'jnp.sqrt', (['hsic_xx'], {}), '(hsic_xx)\n', (7680, 7689), True, 'import jax.numpy as jnp\n'), ((8340, 8369), 'jax.numpy.maximum', 'jnp.maximum', (['(1e-14)', '(grad ** 2)'], {}), '(1e-14, grad ** 2)\n', (8351, 8369), True, 'import jax.numpy as jnp\n'), ((7872, 7889), 'haiku.next_rng_key', 'hk.next_rng_key', ([], {}), '()\n', (7887, 7889), True, 'import haiku as hk\n'), ((7891, 7904), 'jax.numpy.arange', 'jnp.arange', (['b'], {}), '(b)\n', (7901, 7904), True, 'import jax.numpy as jnp\n'), ((8031, 8048), 'haiku.next_rng_key', 'hk.next_rng_key', ([], {}), '()\n', (8046, 8048), True, 'import haiku as hk\n'), ((8050, 8063), 'jax.numpy.arange', 'jnp.arange', (['b'], {}), '(b)\n', (8060, 8063), True, 'import jax.numpy as jnp\n'), ((8267, 8287), 'jax.numpy.sqrt', 'jnp.sqrt', (['(x + y ** 2)'], {}), '(x + y ** 2)\n', (8275, 8287), True, 'import jax.numpy as jnp\n')] |
import cv2
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from skimage.restoration import (denoise_tv_chambolle, denoise_bilateral,
denoise_wavelet, estimate_sigma)
from pathlib import Path
def process_img_and_save(img_path: Path, denoise_h=20,
sobel=True, kernel_size=3):
img = cv2.imread(str(img_path), cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, (350, 350),
interpolation=cv2.INTER_AREA)
denoised = cv2.fastNlMeansDenoising(src=img, dst=None, h=denoise_h)
if sobel:
filtered = apply_sobel(denoised, kernel_size)
else:
filtered = apply_laplacian(denoised, kernel_size)
masked = apply_circle_mask(filtered)
resized_to_100 = cv2.resize(masked, (100, 100),
interpolation=cv2.INTER_AREA)
# resized_to_100 = masked
rescaled = ((resized_to_100 / resized_to_100.max()) * 255).astype("uint8")
im = Image.fromarray(rescaled)
img_orig_name = img_path.stem
img_folder = img_path.parent
im.save(img_folder / (img_orig_name + "_p_l" + str(kernel_size) + ".png"))
return rescaled
def process_after_cropped(img_path: Path, denoise_h=20, filter=False,
sobel=True, kernel_size=3):
img = cv2.imread(str(img_path), cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, (270, 270),
interpolation=cv2.INTER_AREA)
denoised = cv2.fastNlMeansDenoising(src=img, dst=None, h=denoise_h)
if filter:
if sobel:
filtered = apply_sobel(denoised, kernel_size)
else:
filtered = apply_laplacian(denoised, kernel_size)
else:
filtered = denoised
masked = apply_circle_mask(filtered, radius=135)
normalized = cv2.equalizeHist(masked)
resized_to_100 = cv2.resize(normalized, (100, 100),
interpolation=cv2.INTER_AREA)
# resized_to_100 = masked
rescaled = ((resized_to_100 / resized_to_100.max()) * 255).astype("uint8")
im = Image.fromarray(rescaled)
img_orig_name = img_path.stem
img_folder = img_path.parent
im.save(img_folder / (img_orig_name + "no_filter" + ".png"))
return rescaled
def apply_sobel(img, kernel_size):
sobel_x = cv2.Sobel(img, dx=1, dy=0, ddepth=cv2.CV_64F,
ksize=kernel_size, borderType=cv2.BORDER_REFLECT)
sobel_y = cv2.Sobel(img, dx=0, dy=1, ddepth=cv2.CV_64F,
ksize=kernel_size, borderType=cv2.BORDER_REFLECT)
sobel_both = np.sqrt((sobel_x ** 2 + sobel_y ** 2))
return sobel_both
def apply_laplacian(img, kernel_size):
laplacian = np.abs(cv2.Laplacian(img, ddepth=cv2.CV_64F,
ksize=kernel_size, borderType=cv2.BORDER_REFLECT))
return laplacian
def apply_circle_mask(img, radius=150):
hh, ww = img.shape
ycen = hh // 2
xcen = ww // 2
mask = np.zeros_like(img)
mask = cv2.circle(mask, center=(ycen, xcen), radius=radius,
color=1, thickness=-1)
masked = np.where(mask == 1, img, 0)
return masked
def canny_process_img_and_save(img_path, l_t=150, h_t=300,
kernel_size=3):
img = cv2.imread(str(img_path), cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, (350, 350),
interpolation=cv2.INTER_AREA)
icanny_out = cv2.Canny(img, threshold1=l_t,
threshold2=h_t, L2gradient=True, apertureSize=kernel_size)
masked = apply_circle_mask(icanny_out)
resized_to_100 = cv2.resize(masked, (100, 100),
interpolation=cv2.INTER_AREA)
rescaled = ((resized_to_100 / resized_to_100.max()) * 255).astype("uint8")
im = Image.fromarray(rescaled)
img_orig_name = img_path.stem
img_folder = img_path.parent
im.save(img_folder / (img_orig_name + "_p_can" + ".png"))
return rescaled
# main_path = Path(__file__).resolve().parent
# image_folder = main_path / "orig_coin_5_classes"
# for image_path in image_folder.glob("**/*.jpg"):
# process_img_and_save(image_path,sobel=False,kernel_size=5)
main_path = Path(__file__).resolve().parent
image_folder = main_path / "input_coins"
for image_path in image_folder.glob("**/*.jpg"):
process_after_cropped(image_path)
# Sobel noisy
# Canny is very sensitive to thresholds
# Laplacian h=5 is best
| [
"PIL.Image.fromarray",
"cv2.Laplacian",
"numpy.sqrt",
"cv2.fastNlMeansDenoising",
"pathlib.Path",
"numpy.where",
"cv2.equalizeHist",
"cv2.circle",
"cv2.resize",
"cv2.Canny",
"numpy.zeros_like",
"cv2.Sobel"
] | [((427, 484), 'cv2.resize', 'cv2.resize', (['img', '(350, 350)'], {'interpolation': 'cv2.INTER_AREA'}), '(img, (350, 350), interpolation=cv2.INTER_AREA)\n', (437, 484), False, 'import cv2\n'), ((521, 577), 'cv2.fastNlMeansDenoising', 'cv2.fastNlMeansDenoising', ([], {'src': 'img', 'dst': 'None', 'h': 'denoise_h'}), '(src=img, dst=None, h=denoise_h)\n', (545, 577), False, 'import cv2\n'), ((776, 836), 'cv2.resize', 'cv2.resize', (['masked', '(100, 100)'], {'interpolation': 'cv2.INTER_AREA'}), '(masked, (100, 100), interpolation=cv2.INTER_AREA)\n', (786, 836), False, 'import cv2\n'), ((987, 1012), 'PIL.Image.fromarray', 'Image.fromarray', (['rescaled'], {}), '(rescaled)\n', (1002, 1012), False, 'from PIL import Image\n'), ((1373, 1430), 'cv2.resize', 'cv2.resize', (['img', '(270, 270)'], {'interpolation': 'cv2.INTER_AREA'}), '(img, (270, 270), interpolation=cv2.INTER_AREA)\n', (1383, 1430), False, 'import cv2\n'), ((1467, 1523), 'cv2.fastNlMeansDenoising', 'cv2.fastNlMeansDenoising', ([], {'src': 'img', 'dst': 'None', 'h': 'denoise_h'}), '(src=img, dst=None, h=denoise_h)\n', (1491, 1523), False, 'import cv2\n'), ((1799, 1823), 'cv2.equalizeHist', 'cv2.equalizeHist', (['masked'], {}), '(masked)\n', (1815, 1823), False, 'import cv2\n'), ((1845, 1909), 'cv2.resize', 'cv2.resize', (['normalized', '(100, 100)'], {'interpolation': 'cv2.INTER_AREA'}), '(normalized, (100, 100), interpolation=cv2.INTER_AREA)\n', (1855, 1909), False, 'import cv2\n'), ((2060, 2085), 'PIL.Image.fromarray', 'Image.fromarray', (['rescaled'], {}), '(rescaled)\n', (2075, 2085), False, 'from PIL import Image\n'), ((2289, 2389), 'cv2.Sobel', 'cv2.Sobel', (['img'], {'dx': '(1)', 'dy': '(0)', 'ddepth': 'cv2.CV_64F', 'ksize': 'kernel_size', 'borderType': 'cv2.BORDER_REFLECT'}), '(img, dx=1, dy=0, ddepth=cv2.CV_64F, ksize=kernel_size, borderType\n =cv2.BORDER_REFLECT)\n', (2298, 2389), False, 'import cv2\n'), ((2423, 2523), 'cv2.Sobel', 'cv2.Sobel', (['img'], {'dx': '(0)', 'dy': '(1)', 'ddepth': 'cv2.CV_64F', 'ksize': 'kernel_size', 'borderType': 'cv2.BORDER_REFLECT'}), '(img, dx=0, dy=1, ddepth=cv2.CV_64F, ksize=kernel_size, borderType\n =cv2.BORDER_REFLECT)\n', (2432, 2523), False, 'import cv2\n'), ((2561, 2597), 'numpy.sqrt', 'np.sqrt', (['(sobel_x ** 2 + sobel_y ** 2)'], {}), '(sobel_x ** 2 + sobel_y ** 2)\n', (2568, 2597), True, 'import numpy as np\n'), ((2947, 2965), 'numpy.zeros_like', 'np.zeros_like', (['img'], {}), '(img)\n', (2960, 2965), True, 'import numpy as np\n'), ((2977, 3052), 'cv2.circle', 'cv2.circle', (['mask'], {'center': '(ycen, xcen)', 'radius': 'radius', 'color': '(1)', 'thickness': '(-1)'}), '(mask, center=(ycen, xcen), radius=radius, color=1, thickness=-1)\n', (2987, 3052), False, 'import cv2\n'), ((3088, 3115), 'numpy.where', 'np.where', (['(mask == 1)', 'img', '(0)'], {}), '(mask == 1, img, 0)\n', (3096, 3115), True, 'import numpy as np\n'), ((3310, 3367), 'cv2.resize', 'cv2.resize', (['img', '(350, 350)'], {'interpolation': 'cv2.INTER_AREA'}), '(img, (350, 350), interpolation=cv2.INTER_AREA)\n', (3320, 3367), False, 'import cv2\n'), ((3406, 3499), 'cv2.Canny', 'cv2.Canny', (['img'], {'threshold1': 'l_t', 'threshold2': 'h_t', 'L2gradient': '(True)', 'apertureSize': 'kernel_size'}), '(img, threshold1=l_t, threshold2=h_t, L2gradient=True,\n apertureSize=kernel_size)\n', (3415, 3499), False, 'import cv2\n'), ((3587, 3647), 'cv2.resize', 'cv2.resize', (['masked', '(100, 100)'], {'interpolation': 'cv2.INTER_AREA'}), '(masked, (100, 100), interpolation=cv2.INTER_AREA)\n', (3597, 3647), False, 'import cv2\n'), ((3768, 3793), 'PIL.Image.fromarray', 'Image.fromarray', (['rescaled'], {}), '(rescaled)\n', (3783, 3793), False, 'from PIL import Image\n'), ((2686, 2778), 'cv2.Laplacian', 'cv2.Laplacian', (['img'], {'ddepth': 'cv2.CV_64F', 'ksize': 'kernel_size', 'borderType': 'cv2.BORDER_REFLECT'}), '(img, ddepth=cv2.CV_64F, ksize=kernel_size, borderType=cv2.\n BORDER_REFLECT)\n', (2699, 2778), False, 'import cv2\n'), ((4172, 4186), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (4176, 4186), False, 'from pathlib import Path\n')] |
import numpy as np
import warnings
from time import time
import pandas as pd
# SeldonianML imports
from utils import argsweep, experiment, keyboard
from datasets import tutoring_bandit as TutoringSystem
import core.srl_fairness as SRL
import baselines.naive_full as NSRL
# Supress sklearn FutureWarnings for SGD
warnings.simplefilter(action='ignore', category=FutureWarning)
# Imports for baseline algorithms
from baselines.POEM.Skylines import PRMWrapper
from baselines.POEM.DatasetReader import BanditDataset
from contextualbandits.offpolicy import OffsetTree
from sklearn.linear_model import LogisticRegression
########################################
# Helpers for selection SRL models #
########################################
def get_srl_class(bound_ref_return=True, females_only=False, ci_type='ttest'):
if not(ci_type in ['ttest', 'bootstrap']):
raise ValueError('get_srl_class(): Unknown ci_type, "%s".' % ci_type)
if bound_ref_return:
if females_only:
if ci_type == 'ttest':
return SRL.TutoringSystemFemaleSRL
elif ci_type == 'bootstrap':
return SRL.TutoringSystemFemaleBootstrapSRL
else:
if ci_type == 'ttest':
return SRL.TutoringSystemSRL
elif ci_type == 'bootstrap':
return SRL.TutoringSystemBootstrapSRL
else:
if females_only:
if ci_type == 'ttest':
return SRL.TutoringSystemFemaleEmpSRL
elif ci_type == 'bootstrap':
return SRL.TutoringSystemFemaleBootstrapEmpSRL
else:
if ci_type == 'ttest':
return SRL.TutoringSystemEmpSRL
elif ci_type == 'bootstrap':
return SRL.TutoringSystemBootstrapEmpSRL
def get_nsrl_class(bound_ref_return=True, females_only=False, ci_type='ttest'):
if not(ci_type in ['ttest', 'bootstrap']):
raise ValueError('get_srl_class(): Unknown ci_type, "%s".' % ci_type)
if bound_ref_return:
if females_only:
if ci_type == 'ttest':
return NSRL.TutoringSystemFemaleNaiveSRL
elif ci_type == 'bootstrap':
return NSRL.TutoringSystemFemaleBootstrapNaiveSRL
else:
if ci_type == 'ttest':
return NSRL.TutoringSystemNaiveSRL
elif ci_type == 'bootstrap':
return NSRL.TutoringSystemBootstrapNaiveSRL
else:
if females_only:
if ci_type == 'ttest':
return NSRL.TutoringSystemFemaleEmpNaiveSRL
elif ci_type == 'bootstrap':
return NSRL.TutoringSystemFemaleBootstrapEmpNaiveSRL
else:
if ci_type == 'ttest':
return NSRL.TutoringSystemEmpNaiveSRL
elif ci_type == 'bootstrap':
return NSRL.TutoringSystemBootstrapEmpNaiveSRL
########################
# Model Evaluators #
########################
def eval_offset_trees(dataset, mp):
n_actions = dataset.n_actions
t = time()
dataset.enable_R_corrections()
S, A, R, _, P = dataset.training_splits(flatten=True)
new_policy = OffsetTree(base_algorithm=LogisticRegression(solver='lbfgs'), nchoices=dataset.n_actions)
new_policy.fit(X=S, a=A, r=R, p=P)
t_train = time() - t
def predict_proba(S):
S = S[:,0,:]
AP = new_policy.predict(S)
P = np.zeros((len(AP), dataset.n_actions))
for i,a in enumerate(AP):
P[i,a] = 1.0
return P[:,None,:]
# Evaluate using SRL's evaluate method
dataset.disable_R_corrections()
sfp = mp['simulated_female_proportion']
model_params = {
'epsilon_f' : mp['e_f'],
'epsilon_m' : mp['e_m'],
'delta' : mp['d'] }
if not(mp['simulated_female_proportion'] is None):
model_params['male_iw_correction'] = (1-mp['simulated_female_proportion'])/np.mean(dataset._T==0)
model_params['female_iw_correction'] = mp['simulated_female_proportion']/np.mean(dataset._T==1)
min_reward, max_reward = dataset.min_reward, dataset.max_reward
_, _, R, T, _ = dataset.testing_splits(flatten=True)
r_ref_T0 = np.mean(R[T==0])
r_ref_T1 = np.mean(R[T==1])
TutoringSystemSRL = get_srl_class(mp['bound_ref_return'], mp['females_only'], mp['ci_type'])
model = TutoringSystemSRL(min_reward, max_reward, r_ref_T0, r_ref_T1, **model_params)
results = model.evaluate(dataset, probf=predict_proba)
results['train_time'] = t_train
return results
def eval_poem(dataset, mp):
n_actions = dataset.n_actions
# Represent our data in a form compatible with POEM
dataset.enable_R_corrections()
bandit_dataset = BanditDataset(None, verbose=False)
S, A, R, _, P = dataset.testing_splits(flatten=True)
labels = np.zeros((len(A),dataset.n_actions))
for i, a in enumerate(A):
labels[i,a] = 1.0
bandit_dataset.testFeatures = S
bandit_dataset.testLabels = labels
S, A, R, _, P = dataset.training_splits(flatten=True)
labels = np.zeros((len(A),dataset.n_actions))
for i, a in enumerate(A):
labels[i,a] = 1.0
bandit_dataset.trainFeatures = S
bandit_dataset.trainLabels = labels
bandit_dataset.registerSampledData(labels, np.log(P), -R) # POEM expects penalties not rewards
bandit_dataset.createTrainValidateSplit(0.1)
# Train POEM
ss = np.random.random((dataset.n_features, dataset.n_actions))
maj = PRMWrapper(bandit_dataset, n_iter = 1000, tol = 1e-6, minC = 0, maxC = -1, minV = -6, maxV = 0, minClip = 0, maxClip = 0, estimator_type = 'Stochastic', verbose = False, parallel = None, smartStart = ss)
maj.calibrateHyperParams()
t_train = maj.validate()
# Extract the predictor and construct a proba function
def predict_proba(S):
S = S[:,0,:]
V = S.dot(maj.labeler.coef_).astype('float64')
EV = np.exp(V)
return (EV / EV.sum(axis=1)[:,None])[:,None,:]
# Evaluate using SRL's evaluate method
dataset.disable_R_corrections()
model_params = {
'epsilon_f' : mp['e_f'],
'epsilon_m' : mp['e_m'],
'delta' : mp['d'] }
if not(mp['simulated_female_proportion'] is None):
model_params['male_iw_correction'] = (1-mp['simulated_female_proportion'])/np.mean(dataset._T==0)
model_params['female_iw_correction'] = mp['simulated_female_proportion']/np.mean(dataset._T==1)
min_reward, max_reward = dataset.min_reward, dataset.max_reward
_, _, R, T, _ = dataset.testing_splits(flatten=True)
r_ref_T0 = np.mean(R[T==0])
r_ref_T1 = np.mean(R[T==1])
TutoringSystemSRL = get_srl_class(mp['bound_ref_return'], mp['females_only'], mp['ci_type'])
model = TutoringSystemSRL(min_reward, max_reward, r_ref_T0, r_ref_T1, **model_params)
results = model.evaluate(dataset, probf=predict_proba)
results['train_time'] = t_train
return results
def eval_naive(dataset, mp):
n_actions = dataset.n_actions
# Train the model
t = time()
dataset.disable_R_corrections()
model_params = {
'epsilon_f' : mp['e_f'],
'epsilon_m' : mp['e_m'],
'delta' : mp['d'] }
if not(mp['simulated_female_proportion'] is None):
model_params['male_iw_correction'] = (1-mp['simulated_female_proportion'])/np.mean(dataset._T==0)
model_params['female_iw_correction'] = mp['simulated_female_proportion']/np.mean(dataset._T==1)
min_reward, max_reward = dataset.min_reward, dataset.max_reward
_, _, R, T, _ = dataset.testing_splits(flatten=True)
r_ref_T0 = np.mean(R[T==0])
r_ref_T1 = np.mean(R[T==1])
TutoringSystemNaiveSRL = get_nsrl_class(mp['bound_ref_return'], mp['females_only'], mp['ci_type'])
model = TutoringSystemNaiveSRL(min_reward, max_reward, r_ref_T0, r_ref_T1, **model_params)
model.fit(dataset, n_iters=mp['n_iters'], optimizer_name='cmaes')
t_train = time() - t
# Assess the model
results = model.evaluate(dataset, probf=model.get_probf())
results['train_time'] = t_train
return results
def eval_sb(dataset, mp):
n_actions = dataset.n_actions
# Train the model
t = time()
dataset.disable_R_corrections()
model_params = {
'epsilon_f' : mp['e_f'],
'epsilon_m' : mp['e_m'],
'delta' : mp['d'] }
if not(mp['simulated_female_proportion'] is None):
model_params['male_iw_correction'] = (1-mp['simulated_female_proportion'])/np.mean(dataset._T==0)
model_params['female_iw_correction'] = mp['simulated_female_proportion'] /np.mean(dataset._T==1)
min_reward, max_reward = dataset.min_reward, dataset.max_reward
_, _, R, T, _ = dataset.testing_splits(flatten=True)
r_ref_T0 = np.mean(R[T==0])
r_ref_T1 = np.mean(R[T==1])
TutoringSystemSRL = get_srl_class(mp['bound_ref_return'], mp['females_only'], mp['ci_type'])
model = TutoringSystemSRL(min_reward, max_reward, r_ref_T0, r_ref_T1, **model_params)
model.fit(dataset, n_iters=mp['n_iters'], optimizer_name='cmaes')
t_train = time() - t
# Assess the model
results = model.evaluate(dataset)
results['train_time'] = t_train
return results
######################
# Dataset Loader #
######################
def load_dataset(tparams, seed):
dset_args = {
'r_train' : tparams['r_train_v_test'],
'r_candidate' : tparams['r_cand_v_safe'],
'include_T' : tparams['include_T'],
'include_intercept' : not(tparams['omit_intercept']),
'use_pct' : tparams['data_pct'],
'remove_biased_tutorial' : tparams['remove_biased_tutorial'],
'simulated_female_proportion' : tparams['simulated_female_proportion']
}
return TutoringSystem.load(**dset_args)
############
# Main #
############
if __name__ == '__main__':
# Note: This script computes experiments for the cross product of all values given for the
# sweepable arguments.
# Note: Sweepable arguments allow inputs of the form, <start>:<end>:<increment>, which are then
# expanded into ranges via np.arange(<start>, <end>, <increment>).
# Eventually I'll add a nice usage string explaining this.
with argsweep.ArgumentSweeper() as parser:
# Execution parameters
parser.add_argument('--status_delay', type=int, default=30, help='Number of seconds between status updates when running multiple jobs.')
parser.add_argument('base_path', type=str)
parser.add_argument('--n_jobs', type=int, default=4, help='Number of processes to use.')
parser.add_argument('--n_trials', type=int, default=10, help='Number of trials to run.')
# Dataset arguments
parser.add_sweepable_argument('--r_train_v_test', type=float, default=0.4, nargs='*', help='Ratio of data used for training vs testing.')
parser.add_sweepable_argument('--r_cand_v_safe', type=float, default=0.4, nargs='*', help='Ratio of training data used for candidate selection vs safety checking. (SMLA only)')
parser.add_argument('--include_T', action='store_true', help='Whether or not to include type as a predictive feature.')
parser.add_argument('--omit_intercept', action='store_false', help='Whether or not to include an intercept as a predictive feature (included by default).')
parser.add_sweepable_argument('--data_pct', type=float, default=1.0, nargs='*', help='Percentage of the overall size of the dataset to use.')
parser.add_argument('--use_score_text', action='store_true', help='Whether or not to base actions off of the COMPAS score text (default uses the "decile_score" feature).')
parser.add_argument('--rwd_recid', type=float, default=-1.0, help='Reward for instances of recidivism.')
parser.add_argument('--rwd_nonrecid', type=float, default=1.0, help='Reward for instances of non-recidivism.')
parser.add_argument('--simulated_female_proportion', type=float, default=None, help='If specified, rescales the importance weight terms to simulate having the specified proportion of females.')
# Seldonian algorithm parameters
parser.add_argument('--females_only', action='store_true', help='If enabled, only enforce the constraint for females.')
parser.add_argument('--bound_ref_return', action='store_true', help='If enabled, also bound the expected return of the behavior policy.')
parser.add_argument('--ci_type', type=str, default='ttest', help='Choice of confidence interval to use in the Seldonian methods.')
parser.add_argument('--n_iters', type=int, default=10, help='Number of SMLA training iterations.')
parser.add_argument('--remove_biased_tutorial', action='store_true', help='If true, remove the tutorial that is slanted against females (default is to include all data).')
parser.add_sweepable_argument('--e_f', type=float, default=0.00, nargs='*', help='Values for epsilon for the female constraint.')
parser.add_sweepable_argument('--e_m', type=float, default=0.00, nargs='*', help='Values for epsilon for the male constraint (no effect if --females_only).')
parser.add_sweepable_argument('--d', type=float, default=0.05, nargs='*', help='Values for delta.')
args = parser.parse_args()
args_dict = dict(args.__dict__)
# Define the evaluators to be included in the experiment and specify which ones are Seldonian
model_name = get_srl_class(args.bound_ref_return, args.females_only, args.ci_type).__name__
model_evaluators = {
model_name : eval_sb,
'POEM' : eval_poem,
'OffsetTree' : eval_offset_trees,
'Naive' : eval_naive
}
smla_names = [model_name]
# Store task parameters:
tparam_names = ['n_jobs', 'base_path', 'data_pct', 'r_train_v_test', 'r_cand_v_safe', 'include_T', 'omit_intercept', 'use_score_text', 'rwd_recid', 'rwd_nonrecid', 'remove_biased_tutorial', 'simulated_female_proportion']
tparams = {k:args_dict[k] for k in tparam_names}
# Store method parameters:
srl_mparam_names = ['e_f', 'e_m', 'd', 'n_iters', 'ci_type', 'females_only', 'bound_ref_return', 'simulated_female_proportion']
bsln_mparam_names = ['e_f', 'e_m', 'd', 'n_iters', 'ci_type', 'females_only', 'bound_ref_return', 'simulated_female_proportion']
mparams = {}
for name in model_evaluators.keys():
if name in smla_names:
mparams[name] = {k:args_dict[k] for k in srl_mparam_names}
else:
mparams[name] = {k:args_dict[k] for k in bsln_mparam_names}
# Expand the parameter sets into a set of configurations
tparams, mparams = experiment.make_parameters(tparams, mparams, expand=parser._sweep_argnames)
# Create a results file and directory
print()
save_path = experiment.prepare_paths(args.base_path, tparams, mparams, smla_names, root='results', filename=None)
# Run the experiment
print()
experiment.run(args.n_trials, save_path, model_evaluators, load_dataset, tparams, mparams, n_workers=args.n_jobs, seed=None)
| [
"utils.experiment.prepare_paths",
"numpy.mean",
"utils.experiment.run",
"numpy.random.random",
"numpy.log",
"baselines.POEM.DatasetReader.BanditDataset",
"sklearn.linear_model.LogisticRegression",
"numpy.exp",
"utils.experiment.make_parameters",
"baselines.POEM.Skylines.PRMWrapper",
"utils.argsw... | [((315, 377), 'warnings.simplefilter', 'warnings.simplefilter', ([], {'action': '"""ignore"""', 'category': 'FutureWarning'}), "(action='ignore', category=FutureWarning)\n", (336, 377), False, 'import warnings\n'), ((2637, 2643), 'time.time', 'time', ([], {}), '()\n', (2641, 2643), False, 'from time import time\n'), ((3684, 3702), 'numpy.mean', 'np.mean', (['R[T == 0]'], {}), '(R[T == 0])\n', (3691, 3702), True, 'import numpy as np\n'), ((3713, 3731), 'numpy.mean', 'np.mean', (['R[T == 1]'], {}), '(R[T == 1])\n', (3720, 3731), True, 'import numpy as np\n'), ((4180, 4214), 'baselines.POEM.DatasetReader.BanditDataset', 'BanditDataset', (['None'], {'verbose': '(False)'}), '(None, verbose=False)\n', (4193, 4214), False, 'from baselines.POEM.DatasetReader import BanditDataset\n'), ((4823, 4880), 'numpy.random.random', 'np.random.random', (['(dataset.n_features, dataset.n_actions)'], {}), '((dataset.n_features, dataset.n_actions))\n', (4839, 4880), True, 'import numpy as np\n'), ((4888, 5077), 'baselines.POEM.Skylines.PRMWrapper', 'PRMWrapper', (['bandit_dataset'], {'n_iter': '(1000)', 'tol': '(1e-06)', 'minC': '(0)', 'maxC': '(-1)', 'minV': '(-6)', 'maxV': '(0)', 'minClip': '(0)', 'maxClip': '(0)', 'estimator_type': '"""Stochastic"""', 'verbose': '(False)', 'parallel': 'None', 'smartStart': 'ss'}), "(bandit_dataset, n_iter=1000, tol=1e-06, minC=0, maxC=-1, minV=-6,\n maxV=0, minClip=0, maxClip=0, estimator_type='Stochastic', verbose=\n False, parallel=None, smartStart=ss)\n", (4898, 5077), False, 'from baselines.POEM.Skylines import PRMWrapper\n'), ((5925, 5943), 'numpy.mean', 'np.mean', (['R[T == 0]'], {}), '(R[T == 0])\n', (5932, 5943), True, 'import numpy as np\n'), ((5954, 5972), 'numpy.mean', 'np.mean', (['R[T == 1]'], {}), '(R[T == 1])\n', (5961, 5972), True, 'import numpy as np\n'), ((6344, 6350), 'time.time', 'time', ([], {}), '()\n', (6348, 6350), False, 'from time import time\n'), ((6880, 6898), 'numpy.mean', 'np.mean', (['R[T == 0]'], {}), '(R[T == 0])\n', (6887, 6898), True, 'import numpy as np\n'), ((6909, 6927), 'numpy.mean', 'np.mean', (['R[T == 1]'], {}), '(R[T == 1])\n', (6916, 6927), True, 'import numpy as np\n'), ((7419, 7425), 'time.time', 'time', ([], {}), '()\n', (7423, 7425), False, 'from time import time\n'), ((7955, 7973), 'numpy.mean', 'np.mean', (['R[T == 0]'], {}), '(R[T == 0])\n', (7962, 7973), True, 'import numpy as np\n'), ((7984, 8002), 'numpy.mean', 'np.mean', (['R[T == 1]'], {}), '(R[T == 1])\n', (7991, 8002), True, 'import numpy as np\n'), ((8894, 8926), 'datasets.tutoring_bandit.load', 'TutoringSystem.load', ([], {}), '(**dset_args)\n', (8913, 8926), True, 'from datasets import tutoring_bandit as TutoringSystem\n'), ((2882, 2888), 'time.time', 'time', ([], {}), '()\n', (2886, 2888), False, 'from time import time\n'), ((4696, 4705), 'numpy.log', 'np.log', (['P'], {}), '(P)\n', (4702, 4705), True, 'import numpy as np\n'), ((5297, 5306), 'numpy.exp', 'np.exp', (['V'], {}), '(V)\n', (5303, 5306), True, 'import numpy as np\n'), ((7196, 7202), 'time.time', 'time', ([], {}), '()\n', (7200, 7202), False, 'from time import time\n'), ((8260, 8266), 'time.time', 'time', ([], {}), '()\n', (8264, 8266), False, 'from time import time\n'), ((9364, 9390), 'utils.argsweep.ArgumentSweeper', 'argsweep.ArgumentSweeper', ([], {}), '()\n', (9388, 9390), False, 'from utils import argsweep, experiment, keyboard\n'), ((13659, 13734), 'utils.experiment.make_parameters', 'experiment.make_parameters', (['tparams', 'mparams'], {'expand': 'parser._sweep_argnames'}), '(tparams, mparams, expand=parser._sweep_argnames)\n', (13685, 13734), False, 'from utils import argsweep, experiment, keyboard\n'), ((13799, 13905), 'utils.experiment.prepare_paths', 'experiment.prepare_paths', (['args.base_path', 'tparams', 'mparams', 'smla_names'], {'root': '"""results"""', 'filename': 'None'}), "(args.base_path, tparams, mparams, smla_names, root\n ='results', filename=None)\n", (13823, 13905), False, 'from utils import argsweep, experiment, keyboard\n'), ((13936, 14064), 'utils.experiment.run', 'experiment.run', (['args.n_trials', 'save_path', 'model_evaluators', 'load_dataset', 'tparams', 'mparams'], {'n_workers': 'args.n_jobs', 'seed': 'None'}), '(args.n_trials, save_path, model_evaluators, load_dataset,\n tparams, mparams, n_workers=args.n_jobs, seed=None)\n', (13950, 14064), False, 'from utils import argsweep, experiment, keyboard\n'), ((2771, 2805), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""lbfgs"""'}), "(solver='lbfgs')\n", (2789, 2805), False, 'from sklearn.linear_model import LogisticRegression\n'), ((3427, 3451), 'numpy.mean', 'np.mean', (['(dataset._T == 0)'], {}), '(dataset._T == 0)\n', (3434, 3451), True, 'import numpy as np\n'), ((3529, 3553), 'numpy.mean', 'np.mean', (['(dataset._T == 1)'], {}), '(dataset._T == 1)\n', (3536, 3553), True, 'import numpy as np\n'), ((5668, 5692), 'numpy.mean', 'np.mean', (['(dataset._T == 0)'], {}), '(dataset._T == 0)\n', (5675, 5692), True, 'import numpy as np\n'), ((5770, 5794), 'numpy.mean', 'np.mean', (['(dataset._T == 1)'], {}), '(dataset._T == 1)\n', (5777, 5794), True, 'import numpy as np\n'), ((6622, 6646), 'numpy.mean', 'np.mean', (['(dataset._T == 0)'], {}), '(dataset._T == 0)\n', (6629, 6646), True, 'import numpy as np\n'), ((6724, 6748), 'numpy.mean', 'np.mean', (['(dataset._T == 1)'], {}), '(dataset._T == 1)\n', (6731, 6748), True, 'import numpy as np\n'), ((7697, 7721), 'numpy.mean', 'np.mean', (['(dataset._T == 0)'], {}), '(dataset._T == 0)\n', (7704, 7721), True, 'import numpy as np\n'), ((7799, 7823), 'numpy.mean', 'np.mean', (['(dataset._T == 1)'], {}), '(dataset._T == 1)\n', (7806, 7823), True, 'import numpy as np\n')] |
import numpy as np
from frontend import signal
class stt_framework():
def __init__(self, transformation, **kwargs):
self.transformationInst = transformation(**kwargs)
def stt_transform(self, y_signal:signal, nSamplesWindow:int=2**10, overlapFactor:int=0, windowType:str=None, suppressPrint:bool=False):
y_split_list = y_signal.split(nSamplesWindow, overlapFactor=overlapFactor, windowType=windowType)
# y_split_list = y_signal.split(nSamplesWindow)
nParts = len(y_split_list)
y_hat = np.empty((self.transformationInst.estimateSize(y_split_list[0]),nParts), dtype=np.complex64)
if not suppressPrint:
print(f"Transformation output will be of shape {y_hat.shape}")
for i in range(0, nParts):
if not suppressPrint:
print(f"Running iteration {i} of {nParts}")
y_hat[:, i] = self.transformationInst.transform(y_split_list[i])
# spectrum = np.fft.fft(padded) / fft_size # take the Fourier Transform and scale by the number of samples
# autopower = np.abs(spectrum * np.conj(spectrum)) # find the autopower spectrum
# result[i, :] = autopower[:fft_size] # append to the results array
# result = 20*np.log10(result) # scale to db
# result = np.clip(result, -40, 200) # clip values
return y_hat
def stt_transformInv(self, y_signal, nSamplesWindow=2**10, overlapFactor=0, windowType=None, suppressPrint=False):
hopSize = np.int32(np.floor(nSamplesWindow * (1-overlapFactor)))
# nParts = np.int32(np.ceil(len(y_signal.t) / np.float32(hopSize)))
y_hat = np.zeros((len(y_signal.t)+1)*hopSize, dtype=np.float64)
y_signal_part = signal()
nParts = len(y_signal.t)
for i in range(0,nParts):
if not suppressPrint:
print(f"Running iteration {i} of {nParts}")
y_signal_part.externalSample(y_signal.y[:,i], y_signal.t)
pt = i*hopSize
# if i == 0:
#---a
# y_hat[pt:pt+int(len(y_signal.f)*overlapFactor)] += np.float64(self.transformationInst.transform(y_signal_part)) # y_hat_temp = np.float64(self.transformationInst.transform(y_signal_part))
#---
#---b
# y_hat_temp = np.float64(self.transformationInst.transform(y_signal_part))
# if i == 0:
# y_hat[:len(y_signal.f)] += y_hat_temp
# elif i == nParts-1:
# y_hat[-len(y_signal.f):] += y_hat_temp
# else:
# y_hat[int(pt-hopSize/2):int(pt+len(y_signal.f)*overlapFactor+hopSize/2)] += y_hat_temp
#---
#---c
y_hat_temp = np.float64(self.transformationInst.transform(y_signal_part))
n = y_hat_temp.shape[0]//2
y_hat_sliced =y_hat_temp[n:]/n
y_hat[pt:pt+int(len(y_signal.f)*overlapFactor)] += y_hat_sliced
#---
return y_hat
def postProcess(self, y_hat, f, t):
for t_idx in range(0, y_hat.shape[1]):
y_hat[:,t_idx], f= self.transformationInst.postProcess(y_hat[:,t_idx], f)
return y_hat, f, t | [
"frontend.signal",
"numpy.floor"
] | [((1795, 1803), 'frontend.signal', 'signal', ([], {}), '()\n', (1801, 1803), False, 'from frontend import signal\n'), ((1568, 1614), 'numpy.floor', 'np.floor', (['(nSamplesWindow * (1 - overlapFactor))'], {}), '(nSamplesWindow * (1 - overlapFactor))\n', (1576, 1614), True, 'import numpy as np\n')] |
import json
import matplotlib.style as style
import numpy as np
import pandas as pd
import pylab as pl
def make_rows(cngrs_prsn):
"""Output a list of dicitonaries for each JSON object representing a
congressperson.
Each individaul dictionary will contain information about the congressperson
as well as info about their term.
"""
name = cngrs_prsn["name"]["first"] + " " + cngrs_prsn["name"]["last"]
birthday = cngrs_prsn["bio"].get("birthday", None)
gender = cngrs_prsn["bio"]["gender"]
terms = cngrs_prsn["terms"]
rows = []
for t in terms:
row = {}
row["name"] = name
row["birthday"] = birthday
row["gender"] = gender
row["term_start"] = t["start"]
row["term_end"] = t["end"]
row["term_type"] = t["type"]
row["party"] = t.get("party") # Defaults to None
rows.append(row)
return rows
def load_df_from_files():
"""Create a DataFrame where each row contains information on a
Congressperson's age on December 31st for each year in which he or she is in
office.
"""
with open("legislators-historical.json") as f:
data_old = json.load(f)
with open("legislators-current.json") as f:
data_new = json.load(f)
data = data_old + data_new
rows = []
for person in data:
try:
these_rows = make_rows(person)
except:
print(person)
rows.extend(these_rows)
df = pd.DataFrame(rows)
return df
def clean_df(df):
"""Transform types and filter some data."""
# TODO: get birthdays for people missing birthdays
df = df[~df.birthday.isnull()]
df["birthday"] = pd.to_datetime(df["birthday"])
return df
def expand_df_dates(df):
"""Expand the dataframe so that each row has the age of a Congressperson in a
particular year.
This code based on:
https://stackoverflow.com/questions/43832484/expanding-a-dataframe-based-on-start-and-end-columns-speed
"""
dates = [pd.bdate_range(r[0], r[1], freq="A").to_series()
for r in df[['term_start', 'term_end']].values]
lens = [len(x) for x in dates]
df = pd.DataFrame(
{col:np.repeat(df[col].values, lens) for col in df.columns}
).assign(date=np.concatenate(dates))
return df
def create_df():
"""Create the dataframe of Congresspeople and their birthdays."""
df = load_df_from_files()
df = clean_df(df)
df = expand_df_dates(df)
df["age_at_t"] = ((df["date"] - df["birthday"]) / 365).dt.days # Yeah, this is weird.
return df
# Load that data
df = create_df()
# Limit to when next term ends (as of time of writing, 2019-03-09)
df = df[df["date"] <= "2020-12-31"]
# Set the style
style.use("seaborn-whitegrid")
# Overall average age
df.groupby("date").age_at_t.mean().plot(figsize=(8, 4))
pl.title("Average Age of Congress")
pl.ylabel("Average Age")
pl.xlabel("Date")
pl.tight_layout()
pl.savefig("fig/time_avgage.png")
# Mean and Median
tmp = df.groupby("date").agg({"age_at_t": ["mean", "median"]}).plot()
pl.title("Average and Median Age of Congress")
# Age by Senate vs. House
tmp = (df
.groupby(["date", "term_type"])
.age_at_t
.mean()
.unstack())
tmp.columns = ["House", "Senate"]
tmp.plot(figsize=(8, 4))
pl.title("Average Age of Congress by House")
pl.ylabel("Average Age")
pl.xlabel("Date")
pl.tight_layout()
pl.savefig("fig/time_avgage_byhouse.png")
# Age by Gender
(df
.groupby(["date", "gender"])
.age_at_t
.mean()
.unstack()
.plot(figsize=(8, 4)))
pl.title("Average Age of Congress by Gender")
pl.ylabel("Average Age")
pl.xlabel("Date")
pl.tight_layout()
pl.savefig("fig/time_avgage_bygender.png")
# Min and Max Age
# df[df.age_at_t > 0].groupby(["date"]).agg({"age_at_t": ["max", "min"]}).plot(figsize=(8, 4))
tmp = (df
.groupby(["date"])
.agg({"age_at_t": ["max", "min"]})
.plot(figsize=(8, 4)))
tmp.columns = ["Min", "Max"]
pl.title("Min and Max Age of Congress")
pl.ylabel("Age")
pl.xlabel("Date")
pl.tight_layout()
pl.savefig("fig/time_minmaxage.png")
tmp = (df[df.date >= "1900"]
.groupby(["date"])
.agg({"age_at_t": ["max", "min"]})
.plot(figsize=(8, 4)))
tmp.columns = ["Min", "Max"]
pl.title("Min and Max Age of Congress")
pl.ylabel("Age")
pl.xlabel("Date")
pl.tight_layout()
pl.savefig("fig/time_minmaxage_filtered.png")
# Age by Party
# Yeah this doesn't look very good.
(df
.groupby(["date", "party"])
.age_at_t
.mean()
.unstack()
.plot())
pl.title("Average Age of Congress by Party")
pl.ylabel("Average Age")
pl.xlabel("Date")
pl.tight_layout()
pl.savefig("fig/time_avgage_byparty_all.png")
# Age by Dem v Rep
(df[df.party.isin(["Democrat", "Republican", "Independent"])]
.groupby(["date", "party"])
.age_at_t
.mean()
.unstack()
.plot())
pl.title("Average Age of Congress by (some) Party")
pl.ylabel("Average Age")
pl.xlabel("Date")
pl.tight_layout()
pl.savefig("fig/time_avgage_byparty_some.png")
| [
"pylab.title",
"numpy.repeat",
"pylab.tight_layout",
"pylab.savefig",
"pylab.xlabel",
"pandas.bdate_range",
"json.load",
"matplotlib.style.use",
"numpy.concatenate",
"pandas.DataFrame",
"pylab.ylabel",
"pandas.to_datetime"
] | [((2609, 2639), 'matplotlib.style.use', 'style.use', (['"""seaborn-whitegrid"""'], {}), "('seaborn-whitegrid')\n", (2618, 2639), True, 'import matplotlib.style as style\n'), ((2720, 2755), 'pylab.title', 'pl.title', (['"""Average Age of Congress"""'], {}), "('Average Age of Congress')\n", (2728, 2755), True, 'import pylab as pl\n'), ((2756, 2780), 'pylab.ylabel', 'pl.ylabel', (['"""Average Age"""'], {}), "('Average Age')\n", (2765, 2780), True, 'import pylab as pl\n'), ((2781, 2798), 'pylab.xlabel', 'pl.xlabel', (['"""Date"""'], {}), "('Date')\n", (2790, 2798), True, 'import pylab as pl\n'), ((2799, 2816), 'pylab.tight_layout', 'pl.tight_layout', ([], {}), '()\n', (2814, 2816), True, 'import pylab as pl\n'), ((2817, 2850), 'pylab.savefig', 'pl.savefig', (['"""fig/time_avgage.png"""'], {}), "('fig/time_avgage.png')\n", (2827, 2850), True, 'import pylab as pl\n'), ((2941, 2987), 'pylab.title', 'pl.title', (['"""Average and Median Age of Congress"""'], {}), "('Average and Median Age of Congress')\n", (2949, 2987), True, 'import pylab as pl\n'), ((3155, 3199), 'pylab.title', 'pl.title', (['"""Average Age of Congress by House"""'], {}), "('Average Age of Congress by House')\n", (3163, 3199), True, 'import pylab as pl\n'), ((3200, 3224), 'pylab.ylabel', 'pl.ylabel', (['"""Average Age"""'], {}), "('Average Age')\n", (3209, 3224), True, 'import pylab as pl\n'), ((3225, 3242), 'pylab.xlabel', 'pl.xlabel', (['"""Date"""'], {}), "('Date')\n", (3234, 3242), True, 'import pylab as pl\n'), ((3243, 3260), 'pylab.tight_layout', 'pl.tight_layout', ([], {}), '()\n', (3258, 3260), True, 'import pylab as pl\n'), ((3261, 3302), 'pylab.savefig', 'pl.savefig', (['"""fig/time_avgage_byhouse.png"""'], {}), "('fig/time_avgage_byhouse.png')\n", (3271, 3302), True, 'import pylab as pl\n'), ((3416, 3461), 'pylab.title', 'pl.title', (['"""Average Age of Congress by Gender"""'], {}), "('Average Age of Congress by Gender')\n", (3424, 3461), True, 'import pylab as pl\n'), ((3462, 3486), 'pylab.ylabel', 'pl.ylabel', (['"""Average Age"""'], {}), "('Average Age')\n", (3471, 3486), True, 'import pylab as pl\n'), ((3487, 3504), 'pylab.xlabel', 'pl.xlabel', (['"""Date"""'], {}), "('Date')\n", (3496, 3504), True, 'import pylab as pl\n'), ((3505, 3522), 'pylab.tight_layout', 'pl.tight_layout', ([], {}), '()\n', (3520, 3522), True, 'import pylab as pl\n'), ((3523, 3565), 'pylab.savefig', 'pl.savefig', (['"""fig/time_avgage_bygender.png"""'], {}), "('fig/time_avgage_bygender.png')\n", (3533, 3565), True, 'import pylab as pl\n'), ((3803, 3842), 'pylab.title', 'pl.title', (['"""Min and Max Age of Congress"""'], {}), "('Min and Max Age of Congress')\n", (3811, 3842), True, 'import pylab as pl\n'), ((3843, 3859), 'pylab.ylabel', 'pl.ylabel', (['"""Age"""'], {}), "('Age')\n", (3852, 3859), True, 'import pylab as pl\n'), ((3860, 3877), 'pylab.xlabel', 'pl.xlabel', (['"""Date"""'], {}), "('Date')\n", (3869, 3877), True, 'import pylab as pl\n'), ((3878, 3895), 'pylab.tight_layout', 'pl.tight_layout', ([], {}), '()\n', (3893, 3895), True, 'import pylab as pl\n'), ((3896, 3932), 'pylab.savefig', 'pl.savefig', (['"""fig/time_minmaxage.png"""'], {}), "('fig/time_minmaxage.png')\n", (3906, 3932), True, 'import pylab as pl\n'), ((4076, 4115), 'pylab.title', 'pl.title', (['"""Min and Max Age of Congress"""'], {}), "('Min and Max Age of Congress')\n", (4084, 4115), True, 'import pylab as pl\n'), ((4116, 4132), 'pylab.ylabel', 'pl.ylabel', (['"""Age"""'], {}), "('Age')\n", (4125, 4132), True, 'import pylab as pl\n'), ((4133, 4150), 'pylab.xlabel', 'pl.xlabel', (['"""Date"""'], {}), "('Date')\n", (4142, 4150), True, 'import pylab as pl\n'), ((4151, 4168), 'pylab.tight_layout', 'pl.tight_layout', ([], {}), '()\n', (4166, 4168), True, 'import pylab as pl\n'), ((4169, 4214), 'pylab.savefig', 'pl.savefig', (['"""fig/time_minmaxage_filtered.png"""'], {}), "('fig/time_minmaxage_filtered.png')\n", (4179, 4214), True, 'import pylab as pl\n'), ((4348, 4392), 'pylab.title', 'pl.title', (['"""Average Age of Congress by Party"""'], {}), "('Average Age of Congress by Party')\n", (4356, 4392), True, 'import pylab as pl\n'), ((4393, 4417), 'pylab.ylabel', 'pl.ylabel', (['"""Average Age"""'], {}), "('Average Age')\n", (4402, 4417), True, 'import pylab as pl\n'), ((4418, 4435), 'pylab.xlabel', 'pl.xlabel', (['"""Date"""'], {}), "('Date')\n", (4427, 4435), True, 'import pylab as pl\n'), ((4436, 4453), 'pylab.tight_layout', 'pl.tight_layout', ([], {}), '()\n', (4451, 4453), True, 'import pylab as pl\n'), ((4454, 4499), 'pylab.savefig', 'pl.savefig', (['"""fig/time_avgage_byparty_all.png"""'], {}), "('fig/time_avgage_byparty_all.png')\n", (4464, 4499), True, 'import pylab as pl\n'), ((4659, 4710), 'pylab.title', 'pl.title', (['"""Average Age of Congress by (some) Party"""'], {}), "('Average Age of Congress by (some) Party')\n", (4667, 4710), True, 'import pylab as pl\n'), ((4711, 4735), 'pylab.ylabel', 'pl.ylabel', (['"""Average Age"""'], {}), "('Average Age')\n", (4720, 4735), True, 'import pylab as pl\n'), ((4736, 4753), 'pylab.xlabel', 'pl.xlabel', (['"""Date"""'], {}), "('Date')\n", (4745, 4753), True, 'import pylab as pl\n'), ((4754, 4771), 'pylab.tight_layout', 'pl.tight_layout', ([], {}), '()\n', (4769, 4771), True, 'import pylab as pl\n'), ((4772, 4818), 'pylab.savefig', 'pl.savefig', (['"""fig/time_avgage_byparty_some.png"""'], {}), "('fig/time_avgage_byparty_some.png')\n", (4782, 4818), True, 'import pylab as pl\n'), ((1381, 1399), 'pandas.DataFrame', 'pd.DataFrame', (['rows'], {}), '(rows)\n', (1393, 1399), True, 'import pandas as pd\n'), ((1584, 1614), 'pandas.to_datetime', 'pd.to_datetime', (["df['birthday']"], {}), "(df['birthday'])\n", (1598, 1614), True, 'import pandas as pd\n'), ((1114, 1126), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1123, 1126), False, 'import json\n'), ((1189, 1201), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1198, 1201), False, 'import json\n'), ((2152, 2173), 'numpy.concatenate', 'np.concatenate', (['dates'], {}), '(dates)\n', (2166, 2173), True, 'import numpy as np\n'), ((1902, 1938), 'pandas.bdate_range', 'pd.bdate_range', (['r[0]', 'r[1]'], {'freq': '"""A"""'}), "(r[0], r[1], freq='A')\n", (1916, 1938), True, 'import pandas as pd\n'), ((2077, 2108), 'numpy.repeat', 'np.repeat', (['df[col].values', 'lens'], {}), '(df[col].values, lens)\n', (2086, 2108), True, 'import numpy as np\n')] |
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('C:\Code_python\Image\Picture\Tiger.jpg',0)
# img2 = cv2.equalizeHist(img)
hist,bins = np.histogram(img.flatten(),256,[0,256])
cdf = hist.cumsum()
cdf_normalized = cdf * hist.max()/ cdf.max()
cdf_m = np.ma.masked_equal(cdf,0)
cdf_m = (cdf_m - cdf_m.min())*255/(cdf_m.max()-cdf_m.min())
cdf = np.ma.filled(cdf_m,0).astype('uint8')
img2 = cdf[img]
cv2.namedWindow('before')
cv2.imshow('before',img)
cv2.namedWindow('after')
cv2.imshow('after',img2)
cv2.waitKey(0)
cv2.destroyAllWindows() | [
"numpy.ma.masked_equal",
"cv2.imshow",
"numpy.ma.filled",
"cv2.destroyAllWindows",
"cv2.waitKey",
"cv2.namedWindow",
"cv2.imread"
] | [((76, 135), 'cv2.imread', 'cv2.imread', (['"""C:\\\\Code_python\\\\Image\\\\Picture\\\\Tiger.jpg"""', '(0)'], {}), "('C:\\\\Code_python\\\\Image\\\\Picture\\\\Tiger.jpg', 0)\n", (86, 135), False, 'import cv2\n'), ((292, 318), 'numpy.ma.masked_equal', 'np.ma.masked_equal', (['cdf', '(0)'], {}), '(cdf, 0)\n', (310, 318), True, 'import numpy as np\n'), ((446, 471), 'cv2.namedWindow', 'cv2.namedWindow', (['"""before"""'], {}), "('before')\n", (461, 471), False, 'import cv2\n'), ((473, 498), 'cv2.imshow', 'cv2.imshow', (['"""before"""', 'img'], {}), "('before', img)\n", (483, 498), False, 'import cv2\n'), ((499, 523), 'cv2.namedWindow', 'cv2.namedWindow', (['"""after"""'], {}), "('after')\n", (514, 523), False, 'import cv2\n'), ((525, 550), 'cv2.imshow', 'cv2.imshow', (['"""after"""', 'img2'], {}), "('after', img2)\n", (535, 550), False, 'import cv2\n'), ((551, 565), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (562, 565), False, 'import cv2\n'), ((567, 590), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (588, 590), False, 'import cv2\n'), ((386, 408), 'numpy.ma.filled', 'np.ma.filled', (['cdf_m', '(0)'], {}), '(cdf_m, 0)\n', (398, 408), True, 'import numpy as np\n')] |
from mesa import Agent, Model
from mesa.time import RandomActivation
from mesa.space import MultiGrid
from mesa.datacollection import DataCollector
from mesa.batchrunner import BatchRunner
import matplotlib.pyplot as plt
import numpy as np
def compute_gini(model):
agent_wealths = [agent.wealth for agent in model.schedule.agents]
x = sorted(agent_wealths)
N = model.num_agents
B = sum( xi * (N-i) for i, xi in enumerate(x) ) / (N*sum(x))
return 1 + (1/N) - 2*B
class MoneyModel(Model):
"""A model with some number of agents."""
def __init__(self, N, width, height):
self.num_agents = N
self.grid = MultiGrid(width, height, True)
self.schedule = RandomActivation(self)
self.running = True
# Create agents
for i in range(self.num_agents):
a = MoneyAgent(i, self)
self.schedule.add(a)
# Add the agent to a random grid cell
x = self.random.randrange(self.grid.width)
y = self.random.randrange(self.grid.height)
self.grid.place_agent(a, (x, y))
# data collector
self.datacollector = DataCollector(
model_reporters={"Gini": compute_gini}, # `compute_gini` defined above
agent_reporters={"Wealth": "wealth"})
def step(self):
'''Advance the model by one step.'''
self.datacollector.collect(self)
self.schedule.step()
class MoneyAgent(Agent):
""" An agent with fixed initial wealth."""
def __init__(self, unique_id, model):
super(MoneyAgent, self).__init__(unique_id, model)
self.wealth = 1
def move(self):
possible_steps = self.model.grid.get_neighborhood(
self.pos,
moore=True,
include_center=False)
new_position = self.random.choice(possible_steps)
self.model.grid.move_agent(self, new_position)
def give_money(self):
cellmates = self.model.grid.get_cell_list_contents([self.pos])
# own input for changing code a little bit
# give away to poorest neighbour instead of random one
if len(cellmates) > 1:
cellmates_and_wealths = {mate.wealth: mate for mate in cellmates}
poorest_cellmate = cellmates_and_wealths[min(cellmates_and_wealths)]
poorest_cellmate.wealth += 1
self.wealth -= 1
# if len(cellmates) > 1:
# other = self.random.choice(cellmates)
# other.wealth += 1
# self.wealth -= 1
def step(self):
self.move()
if self.wealth > 0:
self.give_money()
model = MoneyModel(50, 10, 10)
for i in range(100):
model.step()
agent_counts = np.zeros((model.grid.width, model.grid.height))
for cell in model.grid.coord_iter():
cell_content, x, y = cell
agent_count = len(cell_content)
agent_counts[x][y] = agent_count
plt.imshow(agent_counts, interpolation='nearest')
plt.colorbar()
plt.show()
# Data collector
gini = model.datacollector.get_model_vars_dataframe()
plt.plot(gini)
plt.show()
# agent_wealth = model.datacollector.get_agent_vars_dataframe()
# print(agent_wealth)
# batch runner
# one_agent_wealth = agent_wealth.xs(14, level="AgentID")
# plt.plot(one_agent_wealth.Wealth)
# #plt.show()
#
#
# fixed_params = {
# "width": 10,
# "height": 10
# }
# variable_params = {"N": range(10, 500, 10)}
#
# # The variables parameters will be invoke along with the fixed parameters allowing for either or both to be honored.
# batch_run = BatchRunner(
# MoneyModel,
# variable_params,
# fixed_params,
# iterations=5,
# max_steps=100,
# model_reporters={"Gini": compute_gini}
# )
#
# batch_run.run_all()
#
# run_data = batch_run.get_model_vars_dataframe()
# plt.scatter(run_data.N, run_data.Gini)
# plt.show() | [
"matplotlib.pyplot.imshow",
"mesa.datacollection.DataCollector",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.plot",
"mesa.space.MultiGrid",
"numpy.zeros",
"mesa.time.RandomActivation",
"matplotlib.pyplot.show"
] | [((2715, 2762), 'numpy.zeros', 'np.zeros', (['(model.grid.width, model.grid.height)'], {}), '((model.grid.width, model.grid.height))\n', (2723, 2762), True, 'import numpy as np\n'), ((2903, 2952), 'matplotlib.pyplot.imshow', 'plt.imshow', (['agent_counts'], {'interpolation': '"""nearest"""'}), "(agent_counts, interpolation='nearest')\n", (2913, 2952), True, 'import matplotlib.pyplot as plt\n'), ((2953, 2967), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (2965, 2967), True, 'import matplotlib.pyplot as plt\n'), ((2968, 2978), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2976, 2978), True, 'import matplotlib.pyplot as plt\n'), ((3052, 3066), 'matplotlib.pyplot.plot', 'plt.plot', (['gini'], {}), '(gini)\n', (3060, 3066), True, 'import matplotlib.pyplot as plt\n'), ((3067, 3077), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3075, 3077), True, 'import matplotlib.pyplot as plt\n'), ((648, 678), 'mesa.space.MultiGrid', 'MultiGrid', (['width', 'height', '(True)'], {}), '(width, height, True)\n', (657, 678), False, 'from mesa.space import MultiGrid\n'), ((703, 725), 'mesa.time.RandomActivation', 'RandomActivation', (['self'], {}), '(self)\n', (719, 725), False, 'from mesa.time import RandomActivation\n'), ((1150, 1246), 'mesa.datacollection.DataCollector', 'DataCollector', ([], {'model_reporters': "{'Gini': compute_gini}", 'agent_reporters': "{'Wealth': 'wealth'}"}), "(model_reporters={'Gini': compute_gini}, agent_reporters={\n 'Wealth': 'wealth'})\n", (1163, 1246), False, 'from mesa.datacollection import DataCollector\n')] |
import json
import io
import time
import numpy as np
try:
to_unicode = unicode
except NameError:
to_unicode = str
def save_json(file_path, dictionary):
with io.open(file_path , 'w', encoding='utf8') as outfile:
str_ = json.dumps(dictionary,
indent=4,
separators=(',', ': '), ensure_ascii=False)
outfile.write(to_unicode(str_))
def get_camera_calibration_matrix(camera_sensor_options):
CAM_WIDTH = float(camera_sensor_options['image_size_x'])
CAM_HEIGHT = float(camera_sensor_options['image_size_y'])
FOV = float(camera_sensor_options['fov'])
calibration = np.identity(3)
calibration[0, 2] = int(CAM_WIDTH) / 2.0
calibration[1, 2] = int(CAM_HEIGHT) / 2.0
calibration[0, 0] = calibration[1, 1] = int(CAM_WIDTH) / (2.0 * np.tan(FOV/2* 2*np.pi / 360.0) )
return calibration
class CustomTimer:
def __init__(self):
try:
self.timer = time.perf_counter
except AttributeError:
self.timer = time.time
def time(self):
return self.timer()
| [
"numpy.identity",
"json.dumps",
"numpy.tan",
"io.open"
] | [((726, 740), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (737, 740), True, 'import numpy as np\n'), ((186, 226), 'io.open', 'io.open', (['file_path', '"""w"""'], {'encoding': '"""utf8"""'}), "(file_path, 'w', encoding='utf8')\n", (193, 226), False, 'import io\n'), ((260, 336), 'json.dumps', 'json.dumps', (['dictionary'], {'indent': '(4)', 'separators': "(',', ': ')", 'ensure_ascii': '(False)'}), "(dictionary, indent=4, separators=(',', ': '), ensure_ascii=False)\n", (270, 336), False, 'import json\n'), ((900, 935), 'numpy.tan', 'np.tan', (['(FOV / 2 * 2 * np.pi / 360.0)'], {}), '(FOV / 2 * 2 * np.pi / 360.0)\n', (906, 935), True, 'import numpy as np\n')] |
import os
import sys
import gym
import time
import math
import time
import scipy
import skimage
import random
import logging
import pybullet
import numpy as np
from gym import spaces
from gym.utils import seeding
from pprint import pprint
from skimage.transform import rescale
from PIL import Image, ImageDraw
from ..utils.color import random_color
from ..utils.vector import rotate_vector, normalize
from ..utils.math import normalize_angle, positive_component, rotation_change
COUNT = 0
RENDER_WIDTH = 960
RENDER_HEIGHT = 720
RENDER_SIZE = (RENDER_HEIGHT, RENDER_WIDTH)
EPISODE_LEN = 100
ROBOT_DANGER_DISTANCE = 0.8
ROBOT_CRASH_DISTANCE = 0.4
TARGET_REWARD = 1
CHECKPOINT_REWARD = 0.1
CHECKPOINT_DISTANCE = 0.5
BATTERY_THRESHOLD = 0.6
BATTERY_WEIGHT = -0.01
ROTATION_COST = -0.01
CRASHED_PENALTY = -1
MAP_GRID_SCALE = 0.2
NUM_CHECKPOINTS = 8
STATE_BUFFER_SIZE = 100
MIN_EPISODE_REWARD = -1 # Terminate if the reward gets lower than this
TARGET_DISTANCE_THRESHOLD = 0.6 # Max distance to the target
HOST, PORT = "localhost", 9999
COUNT = 0
DEFAULTS = {
'is_discrete': False,
'target_policy': 'random',
'robot_policy': 'random',
'geometry_policy': 'initial',
'reset_on_target': False,
'default_start': [-1,0],
'default_target': [1,1],
'timestep': 0.4, # Robot makes a decision every 0.4 s
'verbosity': 0,
'debug': False,
'renders': False,
'headless': False,
}
def pad(array, reference_shape, offsets):
"""
array: Array to be padded
reference_shape: tuple of size of ndarray to create
offsets: list of offsets (number of elements must be equal to the dimension of the array)
will throw a ValueError if offsets is too big and the reference_shape cannot handle the offsets
"""
# Create an array of zeros with the reference shape
result = np.zeros(reference_shape)
# Create a list of slices from offset to offset + shape in each dimension
insertHere = [slice(offsets[dim], offsets[dim] + array.shape[dim]) for dim in range(array.ndim)]
# Insert the array in the result at the specified offsets
result[tuple(insertHere)] = array
return result
class SingleEnvironment():
def __init__(
self,
base_env,
robot=None,
config={}
):
"""
Single robot environment
@base_env: A wrapper around the pybullet simulator. May be shared across
multiple environents
@robot: The turtlebot robot that will receive control actions from this env
@config: Additional enviroment configuration
@config.target_policy: Where to put the robot target at the start of the simulation
If "random" then the target position is set to a random position on restart
If "api" then the target position is pulled from the API on restart
@config.robot_policy: Controls how the robot position is updated
If "random" then the robot position is set to a random position on restart
If "api" then the robot position is pulled from the API on restart
If "subscribe" then the robot position is constant pulled from the API
@config.geometry_policy: Controls how the geometry is updated
If "initial" then the geometry is pulled once from the API
If "api" then the geometry position is pulled from the API on restart
If "subscribe" then the geometry is constantly pulled from Kafka
@config.verbosity:
0 - Silent
1 - Normal logging
2 - Verbose
"""
config = dict(DEFAULTS, **config)
if config['verbosity']>1:
print("Initializing new Single Robot Environment")
print("Environment Config:", config)
self.base = base_env
self.physics = base_env.physics
self.color = random_color()
self.verbosity = config["verbosity"]
self.timestep = config["timestep"]
self.velocity_multiplier = math.sqrt(DEFAULTS["timestep"]/self.timestep)
self.default_start = config["default_start"]
self.default_target = config["default_target"]
self.reset_on_target = config["reset_on_target"]
self.target_min_distance = config.get("target_min_distance")
self.target_max_distance = config.get("target_max_distance")
self.debug = config["debug"]
self.renders = config["renders"]
self.isDiscrete = config["is_discrete"]
self.action_repeat = int(self.timestep / base_env.timestep)
self.previous_state = None
self.ckpt_count = 4
print("Using velocity multiplier:", self.velocity_multiplier)
# Environment Policies
self.robot_policy = config['robot_policy']
self.target_policy = config['target_policy']
self.geometry_policy = config['geometry_policy']
self.targetUniqueId = -1
self.robot = robot # The controlled robot
self.checkpoints = [] # Each checkpoint is given an id. Closest checkpoints are near zero index
self.dead_checkpoints = [] # List of checkpoints that are not active
self.collision_objects = [] # Other objects that can be collided with
self.buildingIds = [] # Each plane is given an id
# Building Map
self.building_map = self.base.loader.map.fetch()
self.state_cache_buffer = []
self.reward_so_far = 0
# Camera observation
self.width = 320 # The resolution of the sensor image (320x240)
self.height = 240
self.cam_dist = 3.
self.cam_pitch = 0.
self.cam_yaw = 0.
self.cam_roll = 0.
self.envStepCounter = 0
self.startedTime = time.time()
self.base.start()
# Define the observation space a dict of simpler spaces
self.observation_space = spaces.Dict({
'robot_theta': spaces.Box(low=-math.pi, high=math.pi, shape=(1,), dtype=np.float32),
'robot_velocity': spaces.Box(low=-10, high=10, shape=(3,), dtype=np.float32),
'target': spaces.Box(low=-50, high=50, shape=(2,), dtype=np.float32),
'ckpts': spaces.Box(low=-50, high=50, shape=(self.ckpt_count,2), dtype=np.float32),
})
if self.geometry_policy=="subscribe":
self.base.loader.mesh.subscribe_robot_position()
if self.isDiscrete:
self.action_space = spaces.Discrete(9)
else:
action_min = -1
action_max = 1
self.action_space = spaces.Box(low=action_min, high=action_max, shape=(2,), dtype=np.float32)
self.viewer = None
self.reset()
print("Initialized env with %.3f timestep (%i base repeat)"%(self.timestep, self.action_repeat))
def __del__(self):
self.physics = 0
def reset(self):
"""Reset the environment. Move the target and the car"""
steps = self.envStepCounter
duration = time.time() - self.startedTime
if self.debug:
print("Reset after %i steps in %.2f seconds"%(steps,duration))
self.startedTime = time.time()
self.envStepCounter = 0
self.reward_so_far = 0
self.action = [0,0]
self.reset_robot_position()
self.reset_target_position()
self.reset_checkpoints()
# Allow all the objects to reach equilibrium
self.physics.stepSimulation()
robot_pos, robot_orn = self.physics.getBasePositionAndOrientation(self.robot.racecarUniqueId)
state = self.get_state(robot_pos, robot_orn)
self.camera_orn = robot_orn
# Reset again if the current state is not valid
if self.termination(state):
return self.reset()
return self.get_observation(state)
def _get_cache(self):
robot_pos, robot_orn = self.physics.getBasePositionAndOrientation(self.robot.racecarUniqueId)
target_pos, target_orn = self.physics.getBasePositionAndOrientation(self.targetUniqueId)
checkpoints = [self.physics.getBasePositionAndOrientation(c)[0] for c in self.checkpoints]
return {
"robot_pos": robot_pos,
"robot_orn": robot_orn,
"target_pos": target_pos,
"target_orn": target_orn,
"checkpoint_pos": checkpoints
}
def _restore_cache(self, cache):
self.physics.resetBasePositionAndOrientation(self.targetUniqueId, cache["target_pos"], cache["target_orn"])
self.physics.resetBasePositionAndOrientation(self.robot.racecarUniqueId, cache["robot_pos"], cache["robot_orn"])
for checkpoint in self.checkpoints:
self.remove_checkpoint(checkpoint)
for checkpoint_pos in cache["checkpoint_pos"]:
self.create_checkpoint(checkpoint_pos)
def reset_robot_position(self):
"""Move the robot to a new position"""
if self.robot_policy=="random":
start = self.base.get_reachable_point(self.default_start)
start = start + [0.1]
theta = 2*math.pi*random.random()
orn = pybullet.getQuaternionFromEuler((0, 0, theta))
self.robot.set_pose(start, orn)
# Overwrite the previous state so we do not get huge velocities
if self.previous_state is not None:
self.previous_state["robot_pos"] = start
self.previous_state["robot_theta"] = theta
elif self.robot_policy=="api":
raise NotImplimentedError("API Robot not implemented")
elif self.robot_policy=="subscribe":
self.base.sync_robot_position()
else:
raise ValueError("Invalid robot policy", self.robot_policy)
def reset_target_position(self):
"""Move the target to a new position"""
if self.target_policy=="random":
position = self.base.get_reachable_point(
self.default_start,
self.target_min_distance,
self.target_max_distance
)
elif self.target_policy=="api":
raise NotImplimentedError("API Target not implemented")
else:
raise ValueError("Invalid target policy", self.robot_policy)
# Create a target if needed
if self.targetUniqueId<0:
self.targetUniqueId = self.base.create_target(position, self.color)
# Move target to new position
position = position + [0.25]
_, orn = self.physics.getBasePositionAndOrientation(self.targetUniqueId)
self.physics.resetBasePositionAndOrientation(self.targetUniqueId, np.array(position), orn)
def reset_checkpoints(self):
"""Create new checkpoints at [(vx,yy)...] locations"""
for checkpoints in reversed(self.checkpoints):
self.dead_checkpoints.append(self.checkpoints.pop())
# Use motion planner to find checkpoint locations
base_pos, carorn = self.physics.getBasePositionAndOrientation(self.robot.racecarUniqueId)
target_pos, target_orn = self.physics.getBasePositionAndOrientation(self.targetUniqueId)
start_pos = base_pos[:2]
goal_pos = target_pos[:2]
nodes = self.base.get_path_to_goal(goal_pos, start_pos)
# Create new checkpoints
if nodes is None or len(nodes)==0:
if self.verbosity>0:
logging.info("RRT failed to find trajectory")
nodes = []
else:
last_checkpoint = len(nodes)-1
indicies = np.linspace(0, last_checkpoint, NUM_CHECKPOINTS)
for i in indicies:
node = nodes[int(i)]
position = (node[0], node[1], 0.2)
self.create_checkpoint(position)
def create_checkpoint(self, position):
"""
Create a new checkpoint object
May take the checkpoint from the dead checkpoints list
"""
orientation = (0,0,0,1)
if len(self.dead_checkpoints):
ckpt = self.dead_checkpoints.pop()
self.physics.resetBasePositionAndOrientation(ckpt, position, orientation)
else:
ckpt = self.base.create_shape(pybullet.GEOM_CYLINDER,
position,
color=self.color,
radius=0.15,
length=0.04,
specular=[0.3,0.3,0.3,0.3]
)
self.checkpoints.append(ckpt)
return ckpt
def get_checkpoint_positions(self):
"""
Return all the checkpoint positions
"""
return [self.physics.getBasePositionAndOrientation(c)[0] for c in self.checkpoints]
def remove_checkpoint(self, ckpt):
"""
Remove a checkpoint from the map, and self.checkpoints
Also moves the ckpt from self.checkpoints to self.dead_checkpoints
"""
orientation = (0,0,0,1)
self.checkpoints.remove(ckpt)
self.physics.resetBasePositionAndOrientation(ckpt, (10,10,10), orientation)
self.dead_checkpoints.append(ckpt)
def get_state(self, robot_pos, robot_orn):
"""
Return a dict that describes the state of the car
Calculating the state is computationally intensive and should be done sparingly
"""
state = {}
robot_euler = pybullet.getEulerFromQuaternion(robot_orn)
robot_theta = robot_euler[2]
#carmat = self.physics.getMatrixFromQuaternion(robot_orn)
tarpos, tarorn = self.physics.getBasePositionAndOrientation(self.targetUniqueId)
invCarPos, invCarOrn = self.physics.invertTransform(robot_pos, robot_orn)
tarPosInCar, tarOrnInCar = self.physics.multiplyTransforms(invCarPos, invCarOrn, tarpos, tarorn)
# Iterate through checkpoints appending them to the distance list
# Delete any checkpoints close to the robot, and the subsequent checkpoints
ckpt_positions = []
is_at_checkpoint = False
for ckpt in self.checkpoints:
pos, _ = self.physics.getBasePositionAndOrientation(ckpt)
rel_pos = np.array(pos) - np.array(robot_pos)
rel_distance = np.linalg.norm(rel_pos)
if rel_distance < CHECKPOINT_DISTANCE:
is_at_checkpoint = True
if is_at_checkpoint:
self.remove_checkpoint(ckpt)
else:
ckpt_positions.append(tuple(rel_pos[0:2]))
# Sort checkpoints. Pad with zeros until length n_ckpt
ckpt_positions = list(reversed(ckpt_positions)) + [(10,10)]*self.ckpt_count
ckpt_positions = ckpt_positions[:self.ckpt_count]
# Write robot positions to the map
robot_pose = self.base.get_robot_positions()
state = {
"robot_pos": robot_pos,
"robot_orn": robot_orn,
"robot_theta": robot_theta,
"robot_vx": 0,
"robot_vy": 0,
"robot_vt": 0,
"other_robots": [],
"rel_ckpt_positions": ckpt_positions,
"rel_target_orientation": math.atan2(tarPosInCar[1], tarPosInCar[0]),
"rel_target_distance": math.sqrt(tarPosInCar[1]**2 + tarPosInCar[0]**2),
"is_at_checkpoint": is_at_checkpoint,
"is_crashed": self.is_crashed(),
"is_at_target": self.is_at_target(),
"is_broken": False,
}
if self.previous_state is not None:
state["robot_vx"] = robot_pos[0] - self.previous_state["robot_pos"][0]
state["robot_vy"] = robot_pos[1] - self.previous_state["robot_pos"][1]
state["robot_vt"] = rotation_change(robot_theta, self.previous_state["robot_theta"])
# Check if the simulation is broken
if robot_pos[2] < 0 or robot_pos[2] > 1:
if self.verbosity>0:
print("Something went wrong with the simulation")
state["is_broken"] = True
# Calculate the distance to other robots
for robot_id in self.base.robot_ids:
if robot_id != self.robot.racecarUniqueId:
enemy_position, _ = self.physics.getBasePositionAndOrientation(robot_id)
state["other_robots"].append(np.linalg.norm(np.array(robot_pos) - np.array(enemy_position)))
if np.any(np.less(state["other_robots"],[ROBOT_CRASH_DISTANCE])):
state["is_crashed"] = True
if self.debug:
print("Target orientation:", state["rel_target_orientation"])
print("Target position:", state["rel_target_distance"])
if self.debug>1:
print("State:")
pprint(state)
return state
def get_observation(self, state):
"""
Return the observation that is passed to the learning algorithm
"""
def encode_checkpoints(ckpts, robot_orn):
"""Encode checkpoints to [theta,r]"""
encoded = []
for c in ckpts:
orn = math.atan2(c[1], c[0]) - robot_orn
orn = normalize_angle(orn)
dist = math.sqrt(c[1]**2 + c[0]**2)
encoded.append([orn,dist])
return np.array(encoded, dtype=np.float32)
def encode_target(state):
"""Encode target to [theta,r]"""
orn = normalize_angle(state["rel_target_orientation"])
dist = state["rel_target_distance"]
return np.array([orn, dist], dtype=np.float32)
obs = {
'robot_theta': np.array([state["robot_theta"]], dtype=np.float32),
'robot_velocity': self.velocity_multiplier * np.array([
state["robot_vx"],
state["robot_vy"],
state["robot_vt"]
], dtype=np.float32),
'target': encode_target(state),
'ckpts': encode_checkpoints(state["rel_ckpt_positions"], state["robot_theta"]),
}
return obs
def get_observation_array(self):
"""
Return simulated observations at every point in the grid
The observation array has dimension (ny, nx, n_observations)
"""
raise NotImplimentedError("Can not get bulk observations")
robot_pos, robot_orn = self.physics.getBasePositionAndOrientation(self.robot.racecarUniqueId)
state = self.get_state(robot_pos, robot_orn)
obser = self.get_observation(state)
xlist = np.arange(self.world.grid.min_x, self.world.grid.max_x, self.world.grid.size/4)
ylist = np.arange(self.world.grid.min_y, self.world.grid.max_y, self.world.grid.size/4)
nx = len(xlist)
ny = len(ylist)
observations = []
for i in range(nx):
for j in range(ny):
robot_pos = (xlist[i], ylist[j], robot_pos[2])
state = self.get_state(robot_pos, robot_orn)
observations.append(self.get_observation(state))
return observations, nx, ny
def act(self, action):
"""
Move the simulation one step forward
@action is the robot action, in the form [rotation, velocity]
"""
if self.renders:
basePos, orn = self.physics.getBasePositionAndOrientation(self.robot.racecarUniqueId)
# Comment out this line to prevent the camera moving with the car
#self.physics.resetDebugVisualizerCamera(1, 30, -40, basePos)
if self.isDiscrete:
fwd = [-1, -1, -1, 0, 0, 0, 1, 1, 1]
steerings = [-0.6, 0, 0.6, -0.6, 0, 0.6, -0.6, 0, 0.6]
forward = fwd[action]
steer = steerings[action]
realaction = [forward, steer]
else:
realaction = action
self.action = action
self.robot.applyAction(realaction)
def step(self):
"""
Step the physics simulator.
Steps the simulator forward @self.action_repeat steps
If robot policy is set to 'subscribe', then we also pull
robot positions from Kafka. Some steps are applied afterwards for
latency compensation
"""
for i in range(self.action_repeat):
self.base.step()
if self.robot_policy=="subscribe":
self.base.sync_robot_position()
self.base.step() # Latency compensation
def observe(self):
# Keep the simulation loop as lean as possible.
robot_pos, robot_orn = self.physics.getBasePositionAndOrientation(self.robot.racecarUniqueId)
action = self.action
state = self.get_state(robot_pos, robot_orn)
observation = self.get_observation(state)
reward = self.reward(state, action)
self.envStepCounter += 1
self.previous_state = state
self.reward_so_far += reward
done = self.termination(state)
info = dict(timeout=False)
# Respawn the target and clear the isAtTarget flag
if not self.reset_on_target and state["is_at_target"]:
self.reset_target_position()
self.reset_checkpoints()
if self.debug:
self._validate_observation(observation)
return observation, reward, done, {}
def is_crashed(self):
"""
Return true if the robots have crashed
Does not check robot-robot collision as this is done using distances
"""
objects = self.base.walls + self.base.objects
for obj in objects:
contact = self.physics.getContactPoints(self.robot.racecarUniqueId, obj)
if len(contact):
return True
return False
def is_at_target(self):
basePos, _ = self.physics.getBasePositionAndOrientation(self.robot.racecarUniqueId)
targetPos, _ = self.physics.getBasePositionAndOrientation(self.targetUniqueId)
return np.linalg.norm(np.array(basePos) - np.array(targetPos)) < TARGET_DISTANCE_THRESHOLD
def termination(self, state):
"""Return True if the episode should end"""
if state["is_crashed"] or state["is_broken"]:
return True
if state["is_at_target"] and self.reset_on_target:
return True
return self.reward_so_far < MIN_EPISODE_REWARD
def reward(self, state, action):
"""
Return the reward:
Target Reward: 1 if target reached, else 0
Collision Reward: -1 if crashed, else 0
Battery Reward: Penalty if rotation or velocity exceeds 0.5
Rotation Reward: Small penalty for any rotation
"""
# Add positive reward if we are near the target
if state["is_at_target"]:
target_reward = TARGET_REWARD
else:
target_reward = 0
# End the simulation with negative reward
if state["is_crashed"]:
crashed_reward = CRASHED_PENALTY
else:
crashed_reward = 0
# Reward for reaching a checkpoint
if state["is_at_checkpoint"]:
checkpoint_reward = CHECKPOINT_REWARD
else:
checkpoint_reward = 0
# Penalty for closeness
danger_reward = 0
for other in state["other_robots"]:
if other < ROBOT_DANGER_DISTANCE:
danger_reward -= 0.3*math.exp(20*(ROBOT_CRASH_DISTANCE-other))
danger_reward = max(-1, danger_reward)
# There is a cost to acceleration and turning
# We use the squared cost to incentivise careful use of battery resources
battery_reward = BATTERY_WEIGHT * np.sum(
positive_component(np.abs(action) - BATTERY_THRESHOLD)
)
# There is an additional cost due to rotation
rotation_reward = ROTATION_COST * abs(state["robot_vt"])
# Total reward is the sum of components
reward = target_reward + crashed_reward + battery_reward + rotation_reward + checkpoint_reward + danger_reward
if self.debug:
print("---- Step %i Summary -----"%self.envStepCounter)
print("Action: ", action)
print("Target Reward: %.3f"%target_reward)
print("Checkpoint Reward: %.3f"%checkpoint_reward)
print("Crashed Reward: %.3f"%crashed_reward)
print("Battery Reward: %.3f"%battery_reward)
print("Rotation Reward: %.3f"%rotation_reward)
print("Danger Reward: %.3f"%danger_reward)
print("Total Reward: %.3f\n"%reward)
return reward
def render(self, mode='rgb_array', close=False, width=640, height=480):
"""Render the simulation to a frame"""
if mode != "rgb_array":
return np.array([])
# Move the camera with the base_pos
base_pos, carorn = self.physics.getBasePositionAndOrientation(self.robot.racecarUniqueId)
state = self.get_state(base_pos, carorn)
# Follow the robot smoothly
self.camera_orn = 0.99*np.array(self.camera_orn) + 0.01*np.array(carorn)
# Position the camera behind the car, slightly above
dist = 2
world_up = [0,0,1]
dir_vec = np.array(rotate_vector(self.camera_orn, [2*dist, 0, 0]))
cam_eye = np.subtract(np.array(base_pos), np.add(dir_vec, np.array([0, 0, -2*dist])))
cam_up = normalize(world_up - np.multiply(np.dot(world_up, dir_vec), dir_vec))
view_matrix = self.physics.computeViewMatrix(
cameraEyePosition=cam_eye,
cameraTargetPosition=base_pos,
cameraUpVector=cam_up)
proj_matrix = self.physics.computeProjectionMatrixFOV(
fov=60, aspect=float(width) / height,
nearVal=0.1, farVal=100.0)
(_, _, px, _, seg) = self.physics.getCameraImage(
width=width, height=height, viewMatrix=view_matrix,
projectionMatrix=proj_matrix, renderer=pybullet.ER_BULLET_HARDWARE_OPENGL)
rgb_array = np.array(px, dtype=np.uint8)
rgb_array = rgb_array.reshape((height, width, 4))
return rgb_array
def render_observation(self, width=128, height=128):
# Move the camera with the base_pos
base_pos, carorn = self.physics.getBasePositionAndOrientation(self.robot.racecarUniqueId)
# Position the camera behind the car, slightly above
dir_vec = np.array(rotate_vector(carorn, [2, 0, 0]))
cam_eye = np.subtract(np.array(base_pos), np.array([0, 0, -5]))
cam_up = normalize(self.world.world_up - np.multiply(np.dot(self.world.world_up, dir_vec), dir_vec))
view_matrix = self.physics.computeViewMatrix(
cameraEyePosition=cam_eye,
cameraTargetPosition=base_pos,
cameraUpVector=cam_up)
proj_matrix = self.physics.computeProjectionMatrixFOV(
fov=60, aspect=float(width) / height,
nearVal=0.1, farVal=100.0)
(_, _, px, _, seg) = self.physics.getCameraImage(
width=width, height=height, viewMatrix=view_matrix,
projectionMatrix=proj_matrix, renderer=pybullet.ER_BULLET_HARDWARE_OPENGL)
#rgb_array = np.array(px, dtype=np.uint8)
#rgb_array = rgb_array.reshape((height, width, 4))
rgb_array = 40*np.array(seg, dtype=np.uint8)
rgb_array = rgb_array.reshape((height, width, 1))
rgb_array = np.tile(rgb_array, (1,1,4))
return rgb_array
def _validate_observation(self, obs):
"""
Validate an observation against the observation space
"""
for key in obs:
state = obs[key]
box = self.observation_space[key]
if not box.contains(state):
raise ValueError("Box {} does not contain {}".format(box, state))
# Test the whole space
assert(self.observation_space.contains(obs))
| [
"math.sqrt",
"numpy.array",
"numpy.linalg.norm",
"math.exp",
"logging.info",
"pprint.pprint",
"pybullet.getEulerFromQuaternion",
"numpy.arange",
"numpy.less",
"pybullet.getQuaternionFromEuler",
"numpy.linspace",
"numpy.dot",
"numpy.tile",
"numpy.abs",
"gym.spaces.Discrete",
"math.atan2... | [((1822, 1847), 'numpy.zeros', 'np.zeros', (['reference_shape'], {}), '(reference_shape)\n', (1830, 1847), True, 'import numpy as np\n'), ((3943, 3990), 'math.sqrt', 'math.sqrt', (["(DEFAULTS['timestep'] / self.timestep)"], {}), "(DEFAULTS['timestep'] / self.timestep)\n", (3952, 3990), False, 'import math\n'), ((5715, 5726), 'time.time', 'time.time', ([], {}), '()\n', (5724, 5726), False, 'import time\n'), ((7108, 7119), 'time.time', 'time.time', ([], {}), '()\n', (7117, 7119), False, 'import time\n'), ((13258, 13300), 'pybullet.getEulerFromQuaternion', 'pybullet.getEulerFromQuaternion', (['robot_orn'], {}), '(robot_orn)\n', (13289, 13300), False, 'import pybullet\n'), ((18325, 18411), 'numpy.arange', 'np.arange', (['self.world.grid.min_x', 'self.world.grid.max_x', '(self.world.grid.size / 4)'], {}), '(self.world.grid.min_x, self.world.grid.max_x, self.world.grid.\n size / 4)\n', (18334, 18411), True, 'import numpy as np\n'), ((18421, 18507), 'numpy.arange', 'np.arange', (['self.world.grid.min_y', 'self.world.grid.max_y', '(self.world.grid.size / 4)'], {}), '(self.world.grid.min_y, self.world.grid.max_y, self.world.grid.\n size / 4)\n', (18430, 18507), True, 'import numpy as np\n'), ((25754, 25782), 'numpy.array', 'np.array', (['px'], {'dtype': 'np.uint8'}), '(px, dtype=np.uint8)\n', (25762, 25782), True, 'import numpy as np\n'), ((27145, 27174), 'numpy.tile', 'np.tile', (['rgb_array', '(1, 1, 4)'], {}), '(rgb_array, (1, 1, 4))\n', (27152, 27174), True, 'import numpy as np\n'), ((6410, 6428), 'gym.spaces.Discrete', 'spaces.Discrete', (['(9)'], {}), '(9)\n', (6425, 6428), False, 'from gym import spaces\n'), ((6530, 6603), 'gym.spaces.Box', 'spaces.Box', ([], {'low': 'action_min', 'high': 'action_max', 'shape': '(2,)', 'dtype': 'np.float32'}), '(low=action_min, high=action_max, shape=(2,), dtype=np.float32)\n', (6540, 6603), False, 'from gym import spaces\n'), ((6951, 6962), 'time.time', 'time.time', ([], {}), '()\n', (6960, 6962), False, 'import time\n'), ((9085, 9131), 'pybullet.getQuaternionFromEuler', 'pybullet.getQuaternionFromEuler', (['(0, 0, theta)'], {}), '((0, 0, theta))\n', (9116, 9131), False, 'import pybullet\n'), ((10587, 10605), 'numpy.array', 'np.array', (['position'], {}), '(position)\n', (10595, 10605), True, 'import numpy as np\n'), ((11491, 11539), 'numpy.linspace', 'np.linspace', (['(0)', 'last_checkpoint', 'NUM_CHECKPOINTS'], {}), '(0, last_checkpoint, NUM_CHECKPOINTS)\n', (11502, 11539), True, 'import numpy as np\n'), ((14094, 14117), 'numpy.linalg.norm', 'np.linalg.norm', (['rel_pos'], {}), '(rel_pos)\n', (14108, 14117), True, 'import numpy as np\n'), ((14999, 15041), 'math.atan2', 'math.atan2', (['tarPosInCar[1]', 'tarPosInCar[0]'], {}), '(tarPosInCar[1], tarPosInCar[0])\n', (15009, 15041), False, 'import math\n'), ((15078, 15130), 'math.sqrt', 'math.sqrt', (['(tarPosInCar[1] ** 2 + tarPosInCar[0] ** 2)'], {}), '(tarPosInCar[1] ** 2 + tarPosInCar[0] ** 2)\n', (15087, 15130), False, 'import math\n'), ((16221, 16275), 'numpy.less', 'np.less', (["state['other_robots']", '[ROBOT_CRASH_DISTANCE]'], {}), "(state['other_robots'], [ROBOT_CRASH_DISTANCE])\n", (16228, 16275), True, 'import numpy as np\n'), ((16548, 16561), 'pprint.pprint', 'pprint', (['state'], {}), '(state)\n', (16554, 16561), False, 'from pprint import pprint\n'), ((17087, 17122), 'numpy.array', 'np.array', (['encoded'], {'dtype': 'np.float32'}), '(encoded, dtype=np.float32)\n', (17095, 17122), True, 'import numpy as np\n'), ((17337, 17376), 'numpy.array', 'np.array', (['[orn, dist]'], {'dtype': 'np.float32'}), '([orn, dist], dtype=np.float32)\n', (17345, 17376), True, 'import numpy as np\n'), ((17421, 17471), 'numpy.array', 'np.array', (["[state['robot_theta']]"], {'dtype': 'np.float32'}), "([state['robot_theta']], dtype=np.float32)\n", (17429, 17471), True, 'import numpy as np\n'), ((24516, 24528), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (24524, 24528), True, 'import numpy as np\n'), ((25050, 25068), 'numpy.array', 'np.array', (['base_pos'], {}), '(base_pos)\n', (25058, 25068), True, 'import numpy as np\n'), ((26220, 26238), 'numpy.array', 'np.array', (['base_pos'], {}), '(base_pos)\n', (26228, 26238), True, 'import numpy as np\n'), ((26240, 26260), 'numpy.array', 'np.array', (['[0, 0, -5]'], {}), '([0, 0, -5])\n', (26248, 26260), True, 'import numpy as np\n'), ((27037, 27066), 'numpy.array', 'np.array', (['seg'], {'dtype': 'np.uint8'}), '(seg, dtype=np.uint8)\n', (27045, 27066), True, 'import numpy as np\n'), ((5892, 5960), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-math.pi)', 'high': 'math.pi', 'shape': '(1,)', 'dtype': 'np.float32'}), '(low=-math.pi, high=math.pi, shape=(1,), dtype=np.float32)\n', (5902, 5960), False, 'from gym import spaces\n'), ((5992, 6050), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-10)', 'high': '(10)', 'shape': '(3,)', 'dtype': 'np.float32'}), '(low=-10, high=10, shape=(3,), dtype=np.float32)\n', (6002, 6050), False, 'from gym import spaces\n'), ((6074, 6132), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-50)', 'high': '(50)', 'shape': '(2,)', 'dtype': 'np.float32'}), '(low=-50, high=50, shape=(2,), dtype=np.float32)\n', (6084, 6132), False, 'from gym import spaces\n'), ((6155, 6229), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-50)', 'high': '(50)', 'shape': '(self.ckpt_count, 2)', 'dtype': 'np.float32'}), '(low=-50, high=50, shape=(self.ckpt_count, 2), dtype=np.float32)\n', (6165, 6229), False, 'from gym import spaces\n'), ((9051, 9066), 'random.random', 'random.random', ([], {}), '()\n', (9064, 9066), False, 'import random\n'), ((11342, 11387), 'logging.info', 'logging.info', (['"""RRT failed to find trajectory"""'], {}), "('RRT failed to find trajectory')\n", (11354, 11387), False, 'import logging\n'), ((14031, 14044), 'numpy.array', 'np.array', (['pos'], {}), '(pos)\n', (14039, 14044), True, 'import numpy as np\n'), ((14047, 14066), 'numpy.array', 'np.array', (['robot_pos'], {}), '(robot_pos)\n', (14055, 14066), True, 'import numpy as np\n'), ((16996, 17028), 'math.sqrt', 'math.sqrt', (['(c[1] ** 2 + c[0] ** 2)'], {}), '(c[1] ** 2 + c[0] ** 2)\n', (17005, 17028), False, 'import math\n'), ((17530, 17620), 'numpy.array', 'np.array', (["[state['robot_vx'], state['robot_vy'], state['robot_vt']]"], {'dtype': 'np.float32'}), "([state['robot_vx'], state['robot_vy'], state['robot_vt']], dtype=\n np.float32)\n", (17538, 17620), True, 'import numpy as np\n'), ((24789, 24814), 'numpy.array', 'np.array', (['self.camera_orn'], {}), '(self.camera_orn)\n', (24797, 24814), True, 'import numpy as np\n'), ((24822, 24838), 'numpy.array', 'np.array', (['carorn'], {}), '(carorn)\n', (24830, 24838), True, 'import numpy as np\n'), ((25086, 25113), 'numpy.array', 'np.array', (['[0, 0, -2 * dist]'], {}), '([0, 0, -2 * dist])\n', (25094, 25113), True, 'import numpy as np\n'), ((16895, 16917), 'math.atan2', 'math.atan2', (['c[1]', 'c[0]'], {}), '(c[1], c[0])\n', (16905, 16917), False, 'import math\n'), ((21730, 21747), 'numpy.array', 'np.array', (['basePos'], {}), '(basePos)\n', (21738, 21747), True, 'import numpy as np\n'), ((21750, 21769), 'numpy.array', 'np.array', (['targetPos'], {}), '(targetPos)\n', (21758, 21769), True, 'import numpy as np\n'), ((23147, 23192), 'math.exp', 'math.exp', (['(20 * (ROBOT_CRASH_DISTANCE - other))'], {}), '(20 * (ROBOT_CRASH_DISTANCE - other))\n', (23155, 23192), False, 'import math\n'), ((25164, 25189), 'numpy.dot', 'np.dot', (['world_up', 'dir_vec'], {}), '(world_up, dir_vec)\n', (25170, 25189), True, 'import numpy as np\n'), ((26323, 26359), 'numpy.dot', 'np.dot', (['self.world.world_up', 'dir_vec'], {}), '(self.world.world_up, dir_vec)\n', (26329, 26359), True, 'import numpy as np\n'), ((23454, 23468), 'numpy.abs', 'np.abs', (['action'], {}), '(action)\n', (23460, 23468), True, 'import numpy as np\n'), ((16153, 16172), 'numpy.array', 'np.array', (['robot_pos'], {}), '(robot_pos)\n', (16161, 16172), True, 'import numpy as np\n'), ((16175, 16199), 'numpy.array', 'np.array', (['enemy_position'], {}), '(enemy_position)\n', (16183, 16199), True, 'import numpy as np\n')] |
import numpy as np
def fx(x):
return (np.sin(np.sqrt(100 * x))) ** 2
def trapezoidal(a, b, e):
n = 1
h = []
er = []
i = []
h.append((b - a) / n)
s = (fx(a) + fx(b)) / 2
i.append(h[0] * s)
er.append(" ------")
for m in range(1, 1000, 1):
n = 2 * n
h.append((b - a) / n)
i.append(0.5 * i[m - 1])
for t in range(1, n, 2):
i[m] = i[m] + h[m] * fx(a + (t * h[m]))
er.append((1 / 3) * abs(i[m] - i[m - 1]))
if er[m] <= e:
break
return i, er
i1, error = trapezoidal(0, 1, 1e-6)
print("n I error")
for j in range(0, len(i1)):
print(j + 1, " ", i1[j], " ", error[j])
| [
"numpy.sqrt"
] | [((51, 67), 'numpy.sqrt', 'np.sqrt', (['(100 * x)'], {}), '(100 * x)\n', (58, 67), True, 'import numpy as np\n')] |
import autogp
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import sklearn.metrics.pairwise as sk
import time
import scipy
import seaborn as sns
import random
from kerpy.Kernel import Kernel
from kerpy.MaternKernel import MaternKernel
from kerpy.GaussianKernel import GaussianKernel
# This code does the following:
# generate the values for f
# generate the values for exp pf f = intensity
# generate with this intensity some observations (thus each obs will correspond to a different intensity)
# use the inputs and the outputs in the GP model
# look at the posterior distribution for the intenisty to see if the true values of lambda are close to the posterior means
## NB. When using the Matern Kernel there is still a problem of numerical stability for the inversion the Cholesky matrix! I had to increase the jitter.
## Similar problem when computing the RBF kernel
#np.random.seed(1990)
np.random.seed(1500)
# Generate synthetic data. N_all = total number of observations, N = training points.
N_all = 200
N = 50
#Set data parameters
offset_data = 1.0
lengthscale_data = 1.0/3.0 #fixed to be a third od the range of the dataset
sigma_data = 1.0
random_noise = np.random.normal(loc=0.0, scale=1.0, size=None)
#Set initial parameters
#gamma = 1. #to check
#lengthscale_initial = np.sqrt(1/(2*gamma)) + random_noise
lengthscale_initial = lengthscale_data + random_noise
offset_initial = offset_data + random_noise
sigma_initial = sigma_data + random_noise
inputs = np.linspace(0, 1, num=N_all)[:, np.newaxis]
sigma = GaussianKernel(sigma = lengthscale_data).kernel(inputs,inputs) #lenghtscale in kerpy is called sigma
#np.savetxt("../../Workspace/updated_AutoGP/R_plots/inputs.csv", inputs, header="inputs", delimiter=",")
# There is a problem of numerical precision
pert = np.zeros((N_all,N_all))
np.fill_diagonal(pert, 0.001)
#print('perturbation',pert)
sigma = sigma + pert
#print('this is the covariance used to generate the data', sigma)
#print('this is the covariance shape', sigma.shape)
#print('its cholesky', np.linalg.cholesky(sigma+pert))
#sigma = MaternKernel(width = lengthscale_data, nu = 1.5, sigma = sigma_data).kernel(inputs,inputs) #Matern 3_2
#sigma = MaternKernel(width = lengthscale_data, nu = 2.5, sigma = sigma_data).kernel(inputs,inputs) #Matern 5_2
#sigma = sk.rbf_kernel(inputs, inputs)
#sigma = sk.rbf_kernel(inputs, inputs, gamma = 50)
#print('shape of sigma', sigma.shape)
n_samples = 1 # num of realisations for the GP
process_values = np.random.multivariate_normal(mean=np.repeat(0,N_all), cov=sigma)
process_values = np.reshape(process_values, (N_all,n_samples))
sample_intensity = np.exp(process_values + offset_data)
outputs = np.ones((N_all,n_samples))
for i in range(N_all):
for j in range(n_samples):
outputs[i,j] = np.random.poisson(lam=sample_intensity[i,j])
# selects training and test
idx = np.arange(N_all)
np.random.shuffle(idx) #Maybe to be fixed?
xtrain = inputs[idx[:N]]
ytrain = outputs[idx[:N]]
data = autogp.datasets.DataSet(xtrain, ytrain)
xtest = inputs[idx[N:]]
ytest = outputs[idx[N:]]
# Initialize the Gaussian process.
likelihood = autogp.likelihoods.LGCP(offset = offset_initial)
kernel = [autogp.kernels.RadialBasis(1, lengthscale= lengthscale_initial, std_dev = sigma_initial)]
#kernel = [autogp.kernels.Matern_3_2(1, lengthscale= lengthscale_initial, std_dev = sigma_initial)]
#kernel = [autogp.kernels.Matern_5_2(1, lengthscale= lengthscale_initial, std_dev = sigma_initial)]
sparsity_vector = np.array([1.,0.5,0.2,0.1])
times = np.zeros((sparsity_vector.shape[0]))
for i in range(sparsity_vector.shape[0]):
# Need to redefine the data
data = autogp.datasets.DataSet(xtrain, ytrain)
sparsity_factor = sparsity_vector[i]
print('sparsity factor is', sparsity_factor)
inducing_number = int(sparsity_factor*N)
print('number of inducing inputs', inducing_number)
id_sparse = np.arange(N)
np.random.shuffle(id_sparse)
inducing_inputs = xtrain[id_sparse[:inducing_number]]
# Define the model
model = autogp.GaussianProcess(likelihood, kernel, inducing_inputs, num_components=2, diag_post=True)
# Define the optimizer
optimizer = tf.train.RMSPropOptimizer(0.005)
# Define array to store the times
# Train the model
start = time.time()
print("Start the training")
print("The sparsity factor is" + " " + str(sparsity_factor))
model.fit(data, optimizer, loo_steps=0, var_steps=50, epochs=1000, display_step=30)
end = time.time()
time_elapsed = end-start
times[i] = time_elapsed
print("Execution finished in seconds", time_elapsed)
# Predict new inputs.
ypred, _ = model.predict(xtest) #V_the command predict gives back the predicted mean and the predicted variance corresponding to the xtest
_, post_var = model.predict(xtest) #V_the command predict gives back the predicted mean and the predicted variance corresponding to the xtest
ypred_np = np.asarray(ypred)
post_var_np = np.asarray(post_var)
path = "../../Workspace/updated_AutoGP/R_plots/"
sparse = str(sparsity_factor)
var_distr = "2sparse"
# Save the data to export to R
np.savetxt(path + sparse + var_distr + "data_inputs.csv", inputs, header='inputs', delimiter=",")
np.savetxt(path + sparse + var_distr + "data_outputs.csv", outputs, header='outputs', delimiter=",")
np.savetxt(path + sparse + var_distr + "xtest.csv", xtest, header='xtest', delimiter=",")
np.savetxt(path + sparse + var_distr + "ytest.csv", ytest, header='ytest', delimiter=",")
np.savetxt(path + sparse + var_distr + "xtrain.csv", xtrain, header='xtrain', delimiter=",")
np.savetxt(path + sparse + var_distr + "ytrain.csv", ytrain, header='ytrain', delimiter=",")
np.savetxt(path + sparse + var_distr + "sample_intensity_test.csv", sample_intensity[idx[N:]], header='sample_intensity_test', delimiter=",")
np.savetxt(path + sparse + var_distr + "total_results_ypred.csv", ypred_np, header='ypred', delimiter =",")
np.savetxt(path + sparse + var_distr + "total_results_postvar.csv", post_var_np, header='post_var', delimiter =",")
# Plot the training set, the set test and the posterior mean for the intesity which is equal to the E[y].
first_line, = plt.plot(xtrain, ytrain, '.', mew=2, label = "a") #V_plot the points used to train the model
second_line, = plt.plot(xtest, ytest, 'o', mew=2, label = "b") #V_plot the points used to trest the model
third_line, = plt.plot(xtest, ypred, 'x', mew=2, label = "c") #V_plot the tested x with the predicted y
plt.ylabel('Value of the process')
plt.xlabel('x')
plt.legend([first_line, second_line, third_line], ['Training set', 'Test set', 'Predicted y values'])
plt.savefig(path + sparse + 'first_plot.png')
plt.show()
#Plot posterior intensity together with the the intensity used to generate the model
first_line, = plt.plot(xtest, ypred, 'x', mew=2) #plotting the ypred which is the pposterior mean of the intensity
second_line, = plt.plot(xtest, sample_intensity[idx[N:]], 'o', mew=2) #plotting the intensity used to generate ytest
plt.ylabel('Intensity')
plt.xlabel('x')
plt.legend([first_line, second_line], ['Posterior mean', 'True mean'])
plt.savefig(path + sparse + 'second_plot.png')
plt.show()
# Plotting the histograms for the true intensity and posterior mean intensity
#count, bins, ignored = plt.hist(sample_intensity, normed=True)
#count, bins, ignored = plt.hist(ypred, normed=True)
sns.distplot(sample_intensity, label='Sample intensity')
sns.distplot(ypred, label = 'Posterior mean intensity')
plt.ylabel('Frequency of the intensity')
plt.xlabel('x')
plt.legend()
plt.savefig(path + sparse + 'third_plot.png')
plt.show()
print('time vector', times)
| [
"matplotlib.pyplot.ylabel",
"numpy.array",
"kerpy.GaussianKernel.GaussianKernel",
"numpy.arange",
"numpy.reshape",
"numpy.repeat",
"numpy.random.poisson",
"seaborn.distplot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.asarray",
"autogp.datasets.DataSet",
"numpy.exp",
"num... | [((928, 948), 'numpy.random.seed', 'np.random.seed', (['(1500)'], {}), '(1500)\n', (942, 948), True, 'import numpy as np\n'), ((1205, 1252), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': '(1.0)', 'size': 'None'}), '(loc=0.0, scale=1.0, size=None)\n', (1221, 1252), True, 'import numpy as np\n'), ((1821, 1845), 'numpy.zeros', 'np.zeros', (['(N_all, N_all)'], {}), '((N_all, N_all))\n', (1829, 1845), True, 'import numpy as np\n'), ((1845, 1874), 'numpy.fill_diagonal', 'np.fill_diagonal', (['pert', '(0.001)'], {}), '(pert, 0.001)\n', (1861, 1874), True, 'import numpy as np\n'), ((2601, 2647), 'numpy.reshape', 'np.reshape', (['process_values', '(N_all, n_samples)'], {}), '(process_values, (N_all, n_samples))\n', (2611, 2647), True, 'import numpy as np\n'), ((2667, 2703), 'numpy.exp', 'np.exp', (['(process_values + offset_data)'], {}), '(process_values + offset_data)\n', (2673, 2703), True, 'import numpy as np\n'), ((2715, 2742), 'numpy.ones', 'np.ones', (['(N_all, n_samples)'], {}), '((N_all, n_samples))\n', (2722, 2742), True, 'import numpy as np\n'), ((2891, 2907), 'numpy.arange', 'np.arange', (['N_all'], {}), '(N_all)\n', (2900, 2907), True, 'import numpy as np\n'), ((2908, 2930), 'numpy.random.shuffle', 'np.random.shuffle', (['idx'], {}), '(idx)\n', (2925, 2930), True, 'import numpy as np\n'), ((3009, 3048), 'autogp.datasets.DataSet', 'autogp.datasets.DataSet', (['xtrain', 'ytrain'], {}), '(xtrain, ytrain)\n', (3032, 3048), False, 'import autogp\n'), ((3148, 3194), 'autogp.likelihoods.LGCP', 'autogp.likelihoods.LGCP', ([], {'offset': 'offset_initial'}), '(offset=offset_initial)\n', (3171, 3194), False, 'import autogp\n'), ((3516, 3546), 'numpy.array', 'np.array', (['[1.0, 0.5, 0.2, 0.1]'], {}), '([1.0, 0.5, 0.2, 0.1])\n', (3524, 3546), True, 'import numpy as np\n'), ((3551, 3585), 'numpy.zeros', 'np.zeros', (['sparsity_vector.shape[0]'], {}), '(sparsity_vector.shape[0])\n', (3559, 3585), True, 'import numpy as np\n'), ((1511, 1539), 'numpy.linspace', 'np.linspace', (['(0)', '(1)'], {'num': 'N_all'}), '(0, 1, num=N_all)\n', (1522, 1539), True, 'import numpy as np\n'), ((3207, 3297), 'autogp.kernels.RadialBasis', 'autogp.kernels.RadialBasis', (['(1)'], {'lengthscale': 'lengthscale_initial', 'std_dev': 'sigma_initial'}), '(1, lengthscale=lengthscale_initial, std_dev=\n sigma_initial)\n', (3233, 3297), False, 'import autogp\n'), ((3669, 3708), 'autogp.datasets.DataSet', 'autogp.datasets.DataSet', (['xtrain', 'ytrain'], {}), '(xtrain, ytrain)\n', (3692, 3708), False, 'import autogp\n'), ((3904, 3916), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (3913, 3916), True, 'import numpy as np\n'), ((3918, 3946), 'numpy.random.shuffle', 'np.random.shuffle', (['id_sparse'], {}), '(id_sparse)\n', (3935, 3946), True, 'import numpy as np\n'), ((4033, 4131), 'autogp.GaussianProcess', 'autogp.GaussianProcess', (['likelihood', 'kernel', 'inducing_inputs'], {'num_components': '(2)', 'diag_post': '(True)'}), '(likelihood, kernel, inducing_inputs, num_components=\n 2, diag_post=True)\n', (4055, 4131), False, 'import autogp\n'), ((4165, 4197), 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', (['(0.005)'], {}), '(0.005)\n', (4190, 4197), True, 'import tensorflow as tf\n'), ((4262, 4273), 'time.time', 'time.time', ([], {}), '()\n', (4271, 4273), False, 'import time\n'), ((4457, 4468), 'time.time', 'time.time', ([], {}), '()\n', (4466, 4468), False, 'import time\n'), ((4895, 4912), 'numpy.asarray', 'np.asarray', (['ypred'], {}), '(ypred)\n', (4905, 4912), True, 'import numpy as np\n'), ((4928, 4948), 'numpy.asarray', 'np.asarray', (['post_var'], {}), '(post_var)\n', (4938, 4948), True, 'import numpy as np\n'), ((5088, 5190), 'numpy.savetxt', 'np.savetxt', (["(path + sparse + var_distr + 'data_inputs.csv')", 'inputs'], {'header': '"""inputs"""', 'delimiter': '""","""'}), "(path + sparse + var_distr + 'data_inputs.csv', inputs, header=\n 'inputs', delimiter=',')\n", (5098, 5190), True, 'import numpy as np\n'), ((5188, 5293), 'numpy.savetxt', 'np.savetxt', (["(path + sparse + var_distr + 'data_outputs.csv')", 'outputs'], {'header': '"""outputs"""', 'delimiter': '""","""'}), "(path + sparse + var_distr + 'data_outputs.csv', outputs, header=\n 'outputs', delimiter=',')\n", (5198, 5293), True, 'import numpy as np\n'), ((5291, 5384), 'numpy.savetxt', 'np.savetxt', (["(path + sparse + var_distr + 'xtest.csv')", 'xtest'], {'header': '"""xtest"""', 'delimiter': '""","""'}), "(path + sparse + var_distr + 'xtest.csv', xtest, header='xtest',\n delimiter=',')\n", (5301, 5384), True, 'import numpy as np\n'), ((5383, 5476), 'numpy.savetxt', 'np.savetxt', (["(path + sparse + var_distr + 'ytest.csv')", 'ytest'], {'header': '"""ytest"""', 'delimiter': '""","""'}), "(path + sparse + var_distr + 'ytest.csv', ytest, header='ytest',\n delimiter=',')\n", (5393, 5476), True, 'import numpy as np\n'), ((5475, 5572), 'numpy.savetxt', 'np.savetxt', (["(path + sparse + var_distr + 'xtrain.csv')", 'xtrain'], {'header': '"""xtrain"""', 'delimiter': '""","""'}), "(path + sparse + var_distr + 'xtrain.csv', xtrain, header=\n 'xtrain', delimiter=',')\n", (5485, 5572), True, 'import numpy as np\n'), ((5570, 5667), 'numpy.savetxt', 'np.savetxt', (["(path + sparse + var_distr + 'ytrain.csv')", 'ytrain'], {'header': '"""ytrain"""', 'delimiter': '""","""'}), "(path + sparse + var_distr + 'ytrain.csv', ytrain, header=\n 'ytrain', delimiter=',')\n", (5580, 5667), True, 'import numpy as np\n'), ((5665, 5810), 'numpy.savetxt', 'np.savetxt', (["(path + sparse + var_distr + 'sample_intensity_test.csv')", 'sample_intensity[idx[N:]]'], {'header': '"""sample_intensity_test"""', 'delimiter': '""","""'}), "(path + sparse + var_distr + 'sample_intensity_test.csv',\n sample_intensity[idx[N:]], header='sample_intensity_test', delimiter=',')\n", (5675, 5810), True, 'import numpy as np\n'), ((5809, 5919), 'numpy.savetxt', 'np.savetxt', (["(path + sparse + var_distr + 'total_results_ypred.csv')", 'ypred_np'], {'header': '"""ypred"""', 'delimiter': '""","""'}), "(path + sparse + var_distr + 'total_results_ypred.csv', ypred_np,\n header='ypred', delimiter=',')\n", (5819, 5919), True, 'import numpy as np\n'), ((5918, 6036), 'numpy.savetxt', 'np.savetxt', (["(path + sparse + var_distr + 'total_results_postvar.csv')", 'post_var_np'], {'header': '"""post_var"""', 'delimiter': '""","""'}), "(path + sparse + var_distr + 'total_results_postvar.csv',\n post_var_np, header='post_var', delimiter=',')\n", (5928, 6036), True, 'import numpy as np\n'), ((6158, 6205), 'matplotlib.pyplot.plot', 'plt.plot', (['xtrain', 'ytrain', '"""."""'], {'mew': '(2)', 'label': '"""a"""'}), "(xtrain, ytrain, '.', mew=2, label='a')\n", (6166, 6205), True, 'import matplotlib.pyplot as plt\n'), ((6267, 6312), 'matplotlib.pyplot.plot', 'plt.plot', (['xtest', 'ytest', '"""o"""'], {'mew': '(2)', 'label': '"""b"""'}), "(xtest, ytest, 'o', mew=2, label='b')\n", (6275, 6312), True, 'import matplotlib.pyplot as plt\n'), ((6374, 6419), 'matplotlib.pyplot.plot', 'plt.plot', (['xtest', 'ypred', '"""x"""'], {'mew': '(2)', 'label': '"""c"""'}), "(xtest, ypred, 'x', mew=2, label='c')\n", (6382, 6419), True, 'import matplotlib.pyplot as plt\n'), ((6466, 6500), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Value of the process"""'], {}), "('Value of the process')\n", (6476, 6500), True, 'import matplotlib.pyplot as plt\n'), ((6502, 6517), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (6512, 6517), True, 'import matplotlib.pyplot as plt\n'), ((6519, 6624), 'matplotlib.pyplot.legend', 'plt.legend', (['[first_line, second_line, third_line]', "['Training set', 'Test set', 'Predicted y values']"], {}), "([first_line, second_line, third_line], ['Training set',\n 'Test set', 'Predicted y values'])\n", (6529, 6624), True, 'import matplotlib.pyplot as plt\n'), ((6622, 6667), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path + sparse + 'first_plot.png')"], {}), "(path + sparse + 'first_plot.png')\n", (6633, 6667), True, 'import matplotlib.pyplot as plt\n'), ((6669, 6679), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6677, 6679), True, 'import matplotlib.pyplot as plt\n'), ((6783, 6817), 'matplotlib.pyplot.plot', 'plt.plot', (['xtest', 'ypred', '"""x"""'], {'mew': '(2)'}), "(xtest, ypred, 'x', mew=2)\n", (6791, 6817), True, 'import matplotlib.pyplot as plt\n'), ((6900, 6954), 'matplotlib.pyplot.plot', 'plt.plot', (['xtest', 'sample_intensity[idx[N:]]', '"""o"""'], {'mew': '(2)'}), "(xtest, sample_intensity[idx[N:]], 'o', mew=2)\n", (6908, 6954), True, 'import matplotlib.pyplot as plt\n'), ((7003, 7026), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Intensity"""'], {}), "('Intensity')\n", (7013, 7026), True, 'import matplotlib.pyplot as plt\n'), ((7028, 7043), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (7038, 7043), True, 'import matplotlib.pyplot as plt\n'), ((7045, 7115), 'matplotlib.pyplot.legend', 'plt.legend', (['[first_line, second_line]', "['Posterior mean', 'True mean']"], {}), "([first_line, second_line], ['Posterior mean', 'True mean'])\n", (7055, 7115), True, 'import matplotlib.pyplot as plt\n'), ((7117, 7163), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path + sparse + 'second_plot.png')"], {}), "(path + sparse + 'second_plot.png')\n", (7128, 7163), True, 'import matplotlib.pyplot as plt\n'), ((7165, 7175), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7173, 7175), True, 'import matplotlib.pyplot as plt\n'), ((7377, 7433), 'seaborn.distplot', 'sns.distplot', (['sample_intensity'], {'label': '"""Sample intensity"""'}), "(sample_intensity, label='Sample intensity')\n", (7389, 7433), True, 'import seaborn as sns\n'), ((7435, 7488), 'seaborn.distplot', 'sns.distplot', (['ypred'], {'label': '"""Posterior mean intensity"""'}), "(ypred, label='Posterior mean intensity')\n", (7447, 7488), True, 'import seaborn as sns\n'), ((7492, 7532), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency of the intensity"""'], {}), "('Frequency of the intensity')\n", (7502, 7532), True, 'import matplotlib.pyplot as plt\n'), ((7534, 7549), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (7544, 7549), True, 'import matplotlib.pyplot as plt\n'), ((7551, 7563), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7561, 7563), True, 'import matplotlib.pyplot as plt\n'), ((7565, 7610), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(path + sparse + 'third_plot.png')"], {}), "(path + sparse + 'third_plot.png')\n", (7576, 7610), True, 'import matplotlib.pyplot as plt\n'), ((7612, 7622), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7620, 7622), True, 'import matplotlib.pyplot as plt\n'), ((1563, 1601), 'kerpy.GaussianKernel.GaussianKernel', 'GaussianKernel', ([], {'sigma': 'lengthscale_data'}), '(sigma=lengthscale_data)\n', (1577, 1601), False, 'from kerpy.GaussianKernel import GaussianKernel\n'), ((2553, 2572), 'numpy.repeat', 'np.repeat', (['(0)', 'N_all'], {}), '(0, N_all)\n', (2562, 2572), True, 'import numpy as np\n'), ((2810, 2855), 'numpy.random.poisson', 'np.random.poisson', ([], {'lam': 'sample_intensity[i, j]'}), '(lam=sample_intensity[i, j])\n', (2827, 2855), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
def plot_bars_by_group(grouped_data,colors,edgecolor=None,property_name="Data by group",ylabel="Value",figsize=(16,6),annotations = True):
"""Make bar graph by group.
Params:
------
grouped_data: (dict)
Each key represents a group, each group is a dictionary, where each key is a characteristic with its value.
colors: (list)
List with the colors of each characteristic
edgecolor: (str)
Color of the border of the rectangle, if it is None the border will be equeal to the color of the rectangle
property_name: (str) Chart title
ylabel: (str) Y axis name
figsize: (tuple) Chart dimensions
annotations (boolean) If each bar shows the value
Output:
------
Bar graph by group
"""
groups = list(grouped_data.keys())
labels = [list(grouped_data[k].keys()) for k in grouped_data.keys()][0]
n_groups = len(groups)
n_labels = len(labels)
width = 1/(n_groups+1)
# the label locations
main_x = np.arange(n_labels)
if n_groups % 2 == 0:
pos = "edge"
else:
pos = "center"
X = []
k = [k if i == 0 else -1*k for k in range(1,n_groups) for i in range(2)]
for n in range(n_groups-1):
X.append([r+k[n]*width for r in main_x])
fig, ax = plt.subplots()
fig.set_size_inches(figsize)
rects = []
for g in range(n_groups):
if g == 0:
rects.append(ax.bar(main_x,grouped_data[groups[g]].values(),width=width,label=groups[g],color=colors[g],edgecolor = edgecolor,align=pos))
else:
rects.append(ax.bar(X[g-1],grouped_data[groups[g]].values(),width=width,label=groups[g],color=colors[g],edgecolor = edgecolor,align=pos))
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel(ylabel)
ax.set_title(f'{property_name}')
if pos == "center":
ax.set_xticks(main_x)
else:
ax.set_xticks(X[0])
ax.set_xticklabels(labels)
ax.grid(False)
ax.legend()
#add annotations
if annotations:
heights = []
for rect in rects:
for r in rect:
height = r.get_height()
ax.annotate('{} '.format(height),
xy=(r.get_x() + r.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom') | [
"matplotlib.pyplot.subplots",
"numpy.arange"
] | [((1072, 1091), 'numpy.arange', 'np.arange', (['n_labels'], {}), '(n_labels)\n', (1081, 1091), True, 'import numpy as np\n'), ((1391, 1405), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1403, 1405), True, 'import matplotlib.pyplot as plt\n')] |
from src.model.networks.local import LocalModel
from src.model import loss
import src.model.functions as smfunctions
from src.model.archs.baseArch import BaseArch
from src.data import dataloaders
import torch, os
import torch.optim as optim
from torch.utils.data import DataLoader
import pickle as pkl
import numpy as np
from scipy import stats
from glob import glob
class mpmrireg(BaseArch):
def __init__(self, config):
super(mpmrireg, self).__init__(config)
self.config = config
self.check_methods()
self.set_dataloader()
self.set_networks_and_optim()
self.best_metric = 0
def check_methods(self):
assert self.config.method in ['unsupervised', 'mixed', 'B0', 'joint', 'privileged'], f"method {self.config.method} can not be recongnised."
def set_networks_and_optim(self):
if self.config.method != "joint":
self.net = LocalModel(self.config).cuda()
self.optimizer = optim.Adam(self.net.parameters(), lr=self.config.lr)
else:
self.net_AB = LocalModel(self.config).cuda()
self.net_BC = LocalModel(self.config).cuda()
self.net_AC = LocalModel(self.config).cuda()
self.optimizer = optim.Adam(
list(self.net_AB.parameters())+list(self.net_BC.parameters())+list(self.net_AC.parameters()),
lr=self.config.lr
)
def set_dataloader(self):
self.train_set = dataloaders.mpMRIData(config=self.config, phase='train')
self.train_loader = DataLoader(self.train_set, batch_size=self.config.batch_size, shuffle=True, num_workers=4)
print('>>> Train set ready.')
self.val_set = dataloaders.mpMRIData(config=self.config, phase='val')
self.val_loader = DataLoader(self.val_set, batch_size=1, shuffle=False, num_workers=1, drop_last=False)
print('>>> Validation set ready.')
self.test_set = dataloaders.mpMRIData(config=self.config, phase='test')
self.test_loader = DataLoader(self.test_set, batch_size=1, shuffle=False, num_workers=1, drop_last=False)
print('>>> Holdout set ready.')
def set_external_dataloader(self):
self.miami_set = dataloaders.mpMRIData(config=self.config, phase='test', external='/media/yipeng/data/data/mpMriReg/external/Miami-external-npy')
self.miami_loader = DataLoader(self.miami_set, batch_size=1, shuffle=False, num_workers=1, drop_last=False)
self.cia_set = dataloaders.mpMRIData(config=self.config, phase='test', external='/media/yipeng/data/data/mpMriReg/external/CIA-external-npy')
self.cia_loader = DataLoader(self.cia_set, batch_size=1, shuffle=False, num_workers=1, drop_last=False)
print('>>> External set ready.')
def get_input(self, input_dict, phase='train'):
assert phase in ['train', 'val', 'test'], f"phase {phase} not correct for getting data"
if self.config.method == 'unsupervised':
data = self.get_input_unsup(input_dict, phase)
elif self.config.method == 'mixed':
data = self.get_input_mixed(input_dict, phase)
elif self.config.method == 'joint':
data = self.get_input_joint(input_dict, phase)
elif self.config.method == 'privileged':
data = self.get_input_privileged(input_dict, phase)
elif self.config.method == 'B0':
data = self.get_input_B0(input_dict, phase)
return data
def get_input_unsup(self, input_dict, phase):
fx_img, mv_img = input_dict['t2'].cuda(), input_dict['dwi'].cuda()
if phase == 'train':
affine_grid_fx = smfunctions.rand_affine_grid(fx_img, scale=self.config.affine_scale)
affine_grid_mv = smfunctions.rand_affine_grid(mv_img, scale=self.config.affine_scale)
mv_img = torch.nn.functional.grid_sample(mv_img, affine_grid_mv, mode='bilinear', align_corners=True)
fx_img = torch.nn.functional.grid_sample(fx_img, affine_grid_fx, mode='bilinear', align_corners=True)
return fx_img, mv_img
def get_input_mixed(self, input_dict, phase):
fx_img, mv_img = input_dict['t2'].cuda(), input_dict['dwi'].cuda()
if phase == 'train':
affine_grid_fx = smfunctions.rand_affine_grid(fx_img, scale=self.config.affine_scale)
affine_grid_mv = smfunctions.rand_affine_grid(mv_img, scale=self.config.affine_scale)
mv_img = torch.nn.functional.grid_sample(mv_img, affine_grid_mv, mode='bilinear', align_corners=True)
fx_img = torch.nn.functional.grid_sample(fx_img, affine_grid_fx, mode='bilinear', align_corners=True)
return fx_img, mv_img
def get_input_B0(self, input_dict, phase):
fx_img = input_dict['t2'].cuda()
mv_img = input_dict['dwi_b0'].cuda()
if phase == 'train':
affine_grid_fx = smfunctions.rand_affine_grid(fx_img, scale=self.config.affine_scale)
affine_grid_mv = smfunctions.rand_affine_grid(mv_img, scale=self.config.affine_scale)
mv_img = torch.nn.functional.grid_sample(mv_img, affine_grid_mv, mode='bilinear', align_corners=True)
fx_img = torch.nn.functional.grid_sample(fx_img, affine_grid_fx, mode='bilinear', align_corners=True)
return fx_img, mv_img
def get_input_joint(self, input_dict, phase):
t2, dwi, dwi_b0 = input_dict['t2'].cuda(), input_dict['dwi'].cuda(), input_dict['dwi_b0'].cuda()
if phase == 'train':
affine_grid_t2 = smfunctions.rand_affine_grid(t2, scale=self.config.affine_scale)
affine_grid_dwis = smfunctions.rand_affine_grid(dwi, scale=self.config.affine_scale)
t2 = torch.nn.functional.grid_sample(t2, affine_grid_t2, mode='bilinear', align_corners=True)
dwi = torch.nn.functional.grid_sample(dwi, affine_grid_dwis, mode='bilinear', align_corners=True)
dwi_b0 = torch.nn.functional.grid_sample(dwi_b0, affine_grid_dwis, mode='bilinear', align_corners=True)
return t2, dwi, dwi_b0
def get_input_privileged(self, input_dict, phase):
t2, dwi, dwi_b0 = input_dict['t2'].cuda(), input_dict['dwi'].cuda(), input_dict['dwi_b0'].cuda()
if phase == 'train': ## overall-level affine transformation augmentation
affine_grid_t2 = smfunctions.rand_affine_grid(t2, scale=self.config.affine_scale)
affine_grid_dwis = smfunctions.rand_affine_grid(dwi, scale=self.config.affine_scale)
t2 = torch.nn.functional.grid_sample(t2, affine_grid_t2, mode='bilinear', align_corners=True)
dwi = torch.nn.functional.grid_sample(dwi, affine_grid_dwis, mode='bilinear', align_corners=True)
dwi_b0 = torch.nn.functional.grid_sample(dwi_b0, affine_grid_dwis, mode='bilinear', align_corners=True)
if phase == 'train': ## privileged specific affine optimization(correction)
base_mi = [loss.global_mutual_information(dwi[i], dwi_b0[i]) for i in range(dwi.shape[0])]
for i in range(self.config.mi_resample_count):
affine_grid_b0 = smfunctions.rand_affine_grid(dwi_b0, scale=self.config.affine_scale)
new_b0 = torch.nn.functional.grid_sample(dwi_b0, affine_grid_dwis, mode='bilinear', align_corners=True)
affine_mi = [loss.global_mutual_information(dwi[i], new_b0[i]) for i in range(dwi.shape[0])]
for idx, (b_mi, a_mi) in enumerate(zip(base_mi, affine_mi)):
if a_mi > b_mi:
print("better mi found!!!")
dwi_b0[idx] = new_b0[idx]
base_mi[idx] = a_mi
return t2, dwi, dwi_b0
def set_train_mode(self):
if self.config.method != "joint":
self.net.train()
else:
self.net_AB.train()
self.net_BC.train()
self.net_AC.train()
def set_eval_mode(self):
if self.config.method != "joint":
self.net.eval()
else:
self.net_AB.eval()
self.net_BC.eval()
self.net_AC.eval()
def forward(self, input_data):
if self.config.method == "joint":
t2, dwi, dwi_b0 = input_data
_, ddf_AB = self.net_AB(torch.cat([t2, dwi_b0], dim=1))
_, ddf_BC = self.net_BC(torch.cat([dwi_b0, dwi], dim=1))
_, ddf_AC = self.net_AC(torch.cat([t2, dwi], dim=1))
return ddf_AB, ddf_BC, ddf_AC
elif self.config.method == "privileged":
t2, dwi, _ = input_data
_, ddf = self.net(torch.cat([t2, dwi], dim=1))
return ddf
else:
_, ddf = self.net(torch.cat(input_data, dim=1))
return ddf
def get_warpped_images(self, input_data, ddfs):
if self.config.method == "joint":
t2, dwi, dwi_b0 = input_data
ddf_AB, ddf_BC, ddf_AC = ddfs
warpped_img_AB = smfunctions.warp3d(dwi_b0, ddf_AB)
warpped_img_BC = smfunctions.warp3d(dwi, ddf_BC)
warpped_img_AC = smfunctions.warp3d(dwi, ddf_AC)
warpped_img_C2A = smfunctions.warp3d(smfunctions.warp3d(dwi, ddf_BC), ddf_AB)
warpped_imgs = [warpped_img_AB, warpped_img_AC, warpped_img_BC, warpped_img_C2A]
elif self.config.method == "privileged":
_, mv_img, b0 = input_data # t2, dwi, dwi_b0
warpped_imgs = [smfunctions.warp3d(mv_img, ddfs), smfunctions.warp3d(b0, ddfs)]
else:
_, mv_img = input_data # t2, dwi
warpped_imgs = smfunctions.warp3d(mv_img, ddfs)
return warpped_imgs
def train(self):
self.save_configure()
for self.epoch in range(1, self.config.num_epochs + 1):
self.set_train_mode()
print('-'*10, 'training', '-'*10)
for self.step, input_dict in enumerate(self.train_loader):
input_data = self.get_input(input_dict)
self.optimizer.zero_grad()
ddfs = self.forward(input_data)
warpped_imgs = self.get_warpped_images(input_data, ddfs)
global_loss = self.loss(input_data, ddfs, warpped_imgs)
global_loss.backward()
self.optimizer.step()
if self.epoch % self.config.save_frequency == 0:
self.SAVE()
print('-'*10, 'validation', '-'*10)
self.validation(dataloader=self.val_loader, epoch=self.epoch)
def SAVE(self, type=None):
if self.config.method != "joint":
self.save(type)
else:
ckpt_path = os.path.join(self.log_dir, 'checkpoints')
os.makedirs(ckpt_path, exist_ok=True)
if type is None:
torch.save(self.net_AB, os.path.join(ckpt_path, f'AB-epoch-{self.epoch}.pt'))
torch.save(self.net_BC, os.path.join(ckpt_path, f'BC-epoch-{self.epoch}.pt'))
torch.save(self.net_AC, os.path.join(ckpt_path, f'AC-epoch-{self.epoch}.pt'))
elif type == 'best':
exist_best_models = glob(os.path.join(ckpt_path, 'best*.pt'))
[os.remove(i) for i in exist_best_models]
torch.save(self.net_AB, os.path.join(ckpt_path, f'best-AB-epoch-{self.epoch}.pt'))
torch.save(self.net_BC, os.path.join(ckpt_path, f'best-BC-epoch-{self.epoch}.pt'))
torch.save(self.net_AC, os.path.join(ckpt_path, f'best-AC-epoch-{self.epoch}.pt'))
else:
raise NotImplementedError
def load_epoch_origin(self, num_epoch):
if num_epoch != 'best':
self.epoch = int(num_epoch)
self.net = torch.load(os.path.join(self.log_dir, 'checkpoints', f'epoch-{num_epoch}.pt'))
print(f'load from epoch {self.epoch}')
else:
best_ckpt = glob(os.path.join(self.log_dir, 'checkpoints', 'best*'))
assert(len(best_ckpt)) != 0, "no best ckpt found in this exp..."
self.net = torch.load(best_ckpt[0])
self.epoch = int(best_ckpt[0].replace('.pt', '').split('-')[-1])
print(f'load from best epoch {best_ckpt[0]}')
def load_epoch_joint(self, num_epoch):
if num_epoch != 'best':
self.epoch = int(num_epoch)
self.net_AB = torch.load(os.path.join(self.log_dir, 'checkpoints', f'AB-epoch-{num_epoch}.pt'))
self.net_BC = torch.load(os.path.join(self.log_dir, 'checkpoints', f'BC-epoch-{num_epoch}.pt'))
self.net_AC = torch.load(os.path.join(self.log_dir, 'checkpoints', f'AC-epoch-{num_epoch}.pt'))
else:
self.epoch = num_epoch
best_ckpt = glob(os.path.join(self.log_dir, 'checkpoints', f'best*'))
assert(len(best_ckpt)) != 0, "no best ckpt found in this exp..."
self.net_AB = torch.load([i for i in best_ckpt if 'AB-' in i][0])
self.net_BC = torch.load([i for i in best_ckpt if 'BC-' in i][0])
self.net_AC = torch.load([i for i in best_ckpt if 'AC-' in i][0])
def load_epoch(self, num_epoch):
if self.config.method != 'joint':
self.load_epoch_origin(num_epoch)
else:
self.load_epoch_joint(num_epoch)
def regular_loss(self, input_data, ddfs, warpped_imgs, prefix=''):
fx_img = input_data[0]
L_Dreg_l2g = loss.l2_gradient(ddfs) * self.config.w_l2g
L_Isim = (1.5 - loss.global_mutual_information(fx_img, warpped_imgs)) * self.config.w_gmi
L_All = L_Dreg_l2g + L_Isim
Info = f'epoch {self.epoch}, step {self.step+1}, L_All:{L_All:.3f}, L2R: {L_Dreg_l2g:.6f}, Isim: {L_Isim:.3f}'
print(prefix, Info)
return L_All
def joint_loss(self, input_data, ddfs, warpped_imgs):
t2, dwi, dwi_b0 = input_data
warpped_img_AB, warpped_img_AC, warpped_img_BC, warpped_img_C2A = warpped_imgs
ddf_AB, ddf_BC, ddf_AC = ddfs
l1 = self.regular_loss([t2], ddf_AB, warpped_img_AB, prefix='Net AB:')
l2 = self.regular_loss([dwi_b0], ddf_BC, warpped_img_BC, prefix='Net BC:')
l3 = self.regular_loss([t2], ddf_AC, warpped_img_AC, prefix='Net AC:')
ddf_similarity_loss = self.config.w_dsl * loss.ssd(warpped_img_C2A, warpped_img_AC)
L_All = l1 + l2 + l3 + ddf_similarity_loss
print(f'DDF similarity: {ddf_similarity_loss:.6f}, L_All: {L_All:.3f}')
return L_All
def privileged_loss(self, input_data, ddfs, warpped_imgs, prefix=''):
fx_img = input_data[0]
L_Dreg_l2g = loss.l2_gradient(ddfs) * self.config.w_l2g
L_Isim = (1.5 - loss.global_mutual_information(fx_img, warpped_imgs[0])) * self.config.w_gmi
L_Isim_weak = (1.5 - loss.global_mutual_information(fx_img, warpped_imgs[1])) * self.config.w_gmi
L_All = L_Dreg_l2g + L_Isim + L_Isim_weak
Info = f'epoch {self.epoch}, step {self.step+1}, L_All:{L_All:.3f}, L2R: {L_Dreg_l2g:.6f}, Isim: {L_Isim:.3f}, Isim_weak: {L_Isim_weak:.3f}'
print(prefix, Info)
return L_All
def loss(self, input_data, ddfs, warpped_imgs):
if self.config.method == 'joint':
return self.joint_loss(input_data, ddfs, warpped_imgs)
elif self.config.method == 'privileged':
return self.privileged_loss(input_data, ddfs, warpped_imgs)
else:
return self.regular_loss(input_data, ddfs, warpped_imgs)
@torch.no_grad()
def validation(self, dataloader, epoch=None):
self.set_eval_mode()
res = []
for idx, input_dict in enumerate(dataloader):
input_data = self.get_input(input_dict, phase='val')
ddfs = self.forward(input_data)
warpped_imgs = self.get_warpped_images(input_data, ddfs)
fx_img = input_data[0]
if self.config.method == 'joint':
wp_img = warpped_imgs[1]
elif self.config.method == 'privileged':
wp_img = warpped_imgs[0]
else:
wp_img = warpped_imgs
res.append(loss.global_mutual_information(fx_img, wp_img))
res = torch.tensor(res)
mean, std = torch.mean(res), torch.std(res)
if mean > self.best_metric:
self.best_metric = mean
self.SAVE(type='best')
print('Result:', mean, std)
def visualization(
self, idx,
fx_img, mv_img, pr_img, wp_mv_img, wp_pr_img,
ddf,
t2_ldmk_paths, dwi_ldmk_paths, t2_ldmks, dwi_ldmks, warped_ldmks, save_suffix=''):
# save images: t2, dwi, b0, wp_dwi, wp_b0 and ddfs
visualization_path = os.path.join(self.log_dir, f'vis-{self.epoch}{save_suffix}', idx)
os.makedirs(visualization_path, exist_ok=True)
print('-' * 20)
self.save_img(fx_img, os.path.join(visualization_path, f'fx_img.nii')) # t2
self.save_img(mv_img, os.path.join(visualization_path, f'mv_img.nii')) # dwi
self.save_img(pr_img, os.path.join(visualization_path, f'pr_img.nii')) # b0
self.save_img(wp_mv_img, os.path.join(visualization_path, f'wp_mv_img.nii')) # wp_dwi
self.save_img(wp_pr_img, os.path.join(visualization_path, f'wp_pr_img.nii')) # wp_b0
self.save_img(ddf[0, 0, :, :, :], os.path.join(visualization_path, f'ddf-x.nii'))
self.save_img(ddf[0, 1, :, :, :], os.path.join(visualization_path, f'ddf-y.nii'))
self.save_img(ddf[0, 2, :, :, :], os.path.join(visualization_path, f'ddf-z.nii'))
assert len(t2_ldmk_paths)==len(dwi_ldmk_paths)==len(t2_ldmks)==len(dwi_ldmks)==len(warped_ldmks), "landmark size not equal"
# print("ldmk_list", t2_ldmk_paths)
for i in range(len(t2_ldmks)):
self.save_img(t2_ldmks[i], os.path.join(visualization_path, os.path.basename(t2_ldmk_paths[i][0]).replace('.npy', '_fx.nii')))
self.save_img(dwi_ldmks[i], os.path.join(visualization_path, os.path.basename(dwi_ldmk_paths[i][0]).replace('.npy', '_mv.nii')))
self.save_img(warped_ldmks[i], os.path.join(visualization_path, os.path.basename(dwi_ldmk_paths[i][0]).replace('.npy', '_wp.nii')))
@torch.no_grad()
def evaluation(self, fx_img, mv_img, pr_img):
_, ddf = self.net(torch.cat([fx_img, mv_img], dim=1))
wp_mv_img = smfunctions.warp3d(mv_img, ddf)
wp_pr_img = smfunctions.warp3d(pr_img, ddf)
return wp_mv_img, wp_pr_img, ddf
@torch.no_grad()
def evaluation_joint(self, fx_img, mv_img, pr_img):
t2, dwi, dwi_b0 = fx_img, mv_img, pr_img
_, ddf_AB = self.net_AB(torch.cat([t2, dwi_b0], dim=1))
_, ddf_AC = self.net_AC(torch.cat([t2, dwi], dim=1))
wp_pr_img = smfunctions.warp3d(dwi_b0, ddf_AB)
wp_mv_img = smfunctions.warp3d(dwi, ddf_AC)
return wp_mv_img, wp_pr_img, ddf_AC
@torch.no_grad()
def evaluation_B0(self, fx_img, mv_img, pr_img):
t2, dwi, dwi_b0 = fx_img, mv_img, pr_img
_, ddf = self.net(torch.cat([fx_img, pr_img], dim=1))
wp_mv_img = smfunctions.warp3d(mv_img, ddf)
wp_pr_img = smfunctions.warp3d(pr_img, ddf)
return wp_mv_img, wp_pr_img, ddf
@torch.no_grad()
def calc_TREs(self, t2_ldmks, t2_ldmk_paths, dwi_ldmks, dwi_ldmk_paths, ddf):
assert t2_ldmks.shape == dwi_ldmks.shape, "shape of the landmarks are not equal"
assert len(t2_ldmk_paths) == len(dwi_ldmk_paths), "lens of the landmarks not equal"
temp_dict = {'tre':[], 'tre-wo-reg':[]}
tre_dict = {}
print('-'*10 + t2_ldmk_paths[0][0].split('/')[-2] + '-'*10)
fx_ldmks_arr = []
mv_ldmks_arr = []
wp_ldmks_arr = []
pid = os.path.basename(os.path.dirname(t2_ldmk_paths[0][0]))
tre_dict[pid] = []
for i in range(t2_ldmks.shape[1]):
mv_ldmk = dwi_ldmks[:, i:i+1, :, :, :]
fx_ldmk = t2_ldmks[:, i:i+1, :, :, :]
wp_ldmk = smfunctions.warp3d(mv_ldmk, ddf)
fx_ldmks_arr.append(fx_ldmk)
mv_ldmks_arr.append(mv_ldmk)
wp_ldmks_arr.append(wp_ldmk)
TRE = loss.centroid_distance(fx_ldmk, wp_ldmk).cpu().numpy()
TRE_wo_reg = loss.centroid_distance(fx_ldmk, mv_ldmk).cpu().numpy()
if not np.isnan(TRE):
temp_dict['tre'].append(TRE)
temp_dict['tre-wo-reg'].append(TRE_wo_reg)
tre_dict[pid].append([TRE_wo_reg, TRE])
print(
f'{i+1}',
'woreg:', np.around(TRE_wo_reg, decimals=3),
'after-reg:', np.around(TRE, decimals=3),
'ipmt:', np.around(TRE_wo_reg - TRE, decimals=3)
)
else:
print(i+1, 'warning: nan exists.')
return temp_dict, fx_ldmks_arr, mv_ldmks_arr, wp_ldmks_arr, tre_dict
def inference(self):
self.sub_inference(external_dataloader=self.test_loader)
# used for external validation
# self.set_external_dataloader()
# print('-------Miami results-------')
# self.sub_inference(external_dataloader=self.miami_loader, save_suffix='_miami')
# print('-------CIA results-------')
# self.sub_inference(external_dataloader=self.cia_loader, save_suffix='_cia')
@torch.no_grad()
def sub_inference(self, external_dataloader, save_suffix=''):
self.set_eval_mode()
results = {
'mi': [],
'mi-wo-reg': [],
'tre': [],
'tre-wo-reg': []
}
tre_dict = {}
dataloader = external_dataloader
for idx, input_dict in enumerate(dataloader):
fx_img, mv_img, pr_img = input_dict['t2'].cuda(), input_dict['dwi'].cuda(), input_dict['dwi_b0'].cuda()
t2_ldmks, dwi_ldmks = input_dict['t2_ldmks'].cuda(), input_dict['dwi_ldmks'].cuda()
t2_ldmk_paths, dwi_ldmk_paths = input_dict['t2_ldmks_paths'], input_dict['dwi_ldmks_paths']
# get ddfs, get warpped images
if self.config.method in ['unsupervised', 'mixed', 'privileged']:
wp_mv_img, wp_pr_img, ddf = self.evaluation(fx_img, mv_img, pr_img)
elif self.config.method == 'joint':
wp_mv_img, wp_pr_img, ddf = self.evaluation_joint(fx_img, mv_img, pr_img)
elif self.config.method == 'B0':
wp_mv_img, wp_pr_img, ddf = self.evaluation_B0(fx_img, mv_img, pr_img)
else:
print("can not recognise method")
raise NotImplementedError
# calculate TREs and MIs
tmp_TREs, fx_ldmks, mv_ldmks, warped_ldmks, sub_tre_dict = self.calc_TREs(t2_ldmks, t2_ldmk_paths, dwi_ldmks, dwi_ldmk_paths, ddf)
for key, value in tmp_TREs.items():
results[key].extend(value)
results['mi'].append(loss.global_mutual_information(fx_img, wp_mv_img).cpu().numpy())
results['mi-wo-reg'].append(loss.global_mutual_information(fx_img, mv_img).cpu().numpy())
tre_dict.update(sub_tre_dict)
# calculate jaccobian
jc = loss.jacobian_determinant(ddf)
if (jc>0).all():
print('jaccobian all > 0')
else:
print('jaccobian <=0 exist.')
# save images ??
self.visualization(
t2_ldmk_paths[0][0].split('/')[-2],
fx_img,
mv_img,
pr_img,
wp_mv_img,
wp_pr_img,
ddf,
t2_ldmk_paths,
dwi_ldmk_paths,
fx_ldmks,
mv_ldmks,
warped_ldmks,
save_suffix)
# save results
for k, v in results.items():
print(k, np.mean(v), np.std(v))
with open(os.path.join(self.log_dir, f'results{save_suffix}.pkl'), 'wb') as f:
pkl.dump(results, f)
with open(os.path.join(self.log_dir, f'tre_dic{save_suffix}.pkl'), 'wb') as f:
pkl.dump(tre_dict, f)
| [
"src.model.networks.local.LocalModel",
"os.remove",
"src.model.functions.rand_affine_grid",
"torch.nn.functional.grid_sample",
"numpy.mean",
"src.model.functions.warp3d",
"torch.mean",
"src.model.loss.global_mutual_information",
"src.model.loss.jacobian_determinant",
"os.path.dirname",
"src.mode... | [((15480, 15495), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (15493, 15495), False, 'import torch, os\n'), ((18207, 18222), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (18220, 18222), False, 'import torch, os\n'), ((18486, 18501), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (18499, 18501), False, 'import torch, os\n'), ((18889, 18904), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (18902, 18904), False, 'import torch, os\n'), ((19220, 19235), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (19233, 19235), False, 'import torch, os\n'), ((21390, 21405), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (21403, 21405), False, 'import torch, os\n'), ((1469, 1525), 'src.data.dataloaders.mpMRIData', 'dataloaders.mpMRIData', ([], {'config': 'self.config', 'phase': '"""train"""'}), "(config=self.config, phase='train')\n", (1490, 1525), False, 'from src.data import dataloaders\n'), ((1554, 1648), 'torch.utils.data.DataLoader', 'DataLoader', (['self.train_set'], {'batch_size': 'self.config.batch_size', 'shuffle': '(True)', 'num_workers': '(4)'}), '(self.train_set, batch_size=self.config.batch_size, shuffle=True,\n num_workers=4)\n', (1564, 1648), False, 'from torch.utils.data import DataLoader\n'), ((1706, 1760), 'src.data.dataloaders.mpMRIData', 'dataloaders.mpMRIData', ([], {'config': 'self.config', 'phase': '"""val"""'}), "(config=self.config, phase='val')\n", (1727, 1760), False, 'from src.data import dataloaders\n'), ((1787, 1876), 'torch.utils.data.DataLoader', 'DataLoader', (['self.val_set'], {'batch_size': '(1)', 'shuffle': '(False)', 'num_workers': '(1)', 'drop_last': '(False)'}), '(self.val_set, batch_size=1, shuffle=False, num_workers=1,\n drop_last=False)\n', (1797, 1876), False, 'from torch.utils.data import DataLoader\n'), ((1940, 1995), 'src.data.dataloaders.mpMRIData', 'dataloaders.mpMRIData', ([], {'config': 'self.config', 'phase': '"""test"""'}), "(config=self.config, phase='test')\n", (1961, 1995), False, 'from src.data import dataloaders\n'), ((2023, 2113), 'torch.utils.data.DataLoader', 'DataLoader', (['self.test_set'], {'batch_size': '(1)', 'shuffle': '(False)', 'num_workers': '(1)', 'drop_last': '(False)'}), '(self.test_set, batch_size=1, shuffle=False, num_workers=1,\n drop_last=False)\n', (2033, 2113), False, 'from torch.utils.data import DataLoader\n'), ((2215, 2348), 'src.data.dataloaders.mpMRIData', 'dataloaders.mpMRIData', ([], {'config': 'self.config', 'phase': '"""test"""', 'external': '"""/media/yipeng/data/data/mpMriReg/external/Miami-external-npy"""'}), "(config=self.config, phase='test', external=\n '/media/yipeng/data/data/mpMriReg/external/Miami-external-npy')\n", (2236, 2348), False, 'from src.data import dataloaders\n'), ((2372, 2463), 'torch.utils.data.DataLoader', 'DataLoader', (['self.miami_set'], {'batch_size': '(1)', 'shuffle': '(False)', 'num_workers': '(1)', 'drop_last': '(False)'}), '(self.miami_set, batch_size=1, shuffle=False, num_workers=1,\n drop_last=False)\n', (2382, 2463), False, 'from torch.utils.data import DataLoader\n'), ((2483, 2614), 'src.data.dataloaders.mpMRIData', 'dataloaders.mpMRIData', ([], {'config': 'self.config', 'phase': '"""test"""', 'external': '"""/media/yipeng/data/data/mpMriReg/external/CIA-external-npy"""'}), "(config=self.config, phase='test', external=\n '/media/yipeng/data/data/mpMriReg/external/CIA-external-npy')\n", (2504, 2614), False, 'from src.data import dataloaders\n'), ((2636, 2725), 'torch.utils.data.DataLoader', 'DataLoader', (['self.cia_set'], {'batch_size': '(1)', 'shuffle': '(False)', 'num_workers': '(1)', 'drop_last': '(False)'}), '(self.cia_set, batch_size=1, shuffle=False, num_workers=1,\n drop_last=False)\n', (2646, 2725), False, 'from torch.utils.data import DataLoader\n'), ((16191, 16208), 'torch.tensor', 'torch.tensor', (['res'], {}), '(res)\n', (16203, 16208), False, 'import torch, os\n'), ((16698, 16763), 'os.path.join', 'os.path.join', (['self.log_dir', 'f"""vis-{self.epoch}{save_suffix}"""', 'idx'], {}), "(self.log_dir, f'vis-{self.epoch}{save_suffix}', idx)\n", (16710, 16763), False, 'import torch, os\n'), ((16772, 16818), 'os.makedirs', 'os.makedirs', (['visualization_path'], {'exist_ok': '(True)'}), '(visualization_path, exist_ok=True)\n', (16783, 16818), False, 'import torch, os\n'), ((18355, 18386), 'src.model.functions.warp3d', 'smfunctions.warp3d', (['mv_img', 'ddf'], {}), '(mv_img, ddf)\n', (18373, 18386), True, 'import src.model.functions as smfunctions\n'), ((18407, 18438), 'src.model.functions.warp3d', 'smfunctions.warp3d', (['pr_img', 'ddf'], {}), '(pr_img, ddf)\n', (18425, 18438), True, 'import src.model.functions as smfunctions\n'), ((18752, 18786), 'src.model.functions.warp3d', 'smfunctions.warp3d', (['dwi_b0', 'ddf_AB'], {}), '(dwi_b0, ddf_AB)\n', (18770, 18786), True, 'import src.model.functions as smfunctions\n'), ((18807, 18838), 'src.model.functions.warp3d', 'smfunctions.warp3d', (['dwi', 'ddf_AC'], {}), '(dwi, ddf_AC)\n', (18825, 18838), True, 'import src.model.functions as smfunctions\n'), ((19089, 19120), 'src.model.functions.warp3d', 'smfunctions.warp3d', (['mv_img', 'ddf'], {}), '(mv_img, ddf)\n', (19107, 19120), True, 'import src.model.functions as smfunctions\n'), ((19141, 19172), 'src.model.functions.warp3d', 'smfunctions.warp3d', (['pr_img', 'ddf'], {}), '(pr_img, ddf)\n', (19159, 19172), True, 'import src.model.functions as smfunctions\n'), ((3650, 3718), 'src.model.functions.rand_affine_grid', 'smfunctions.rand_affine_grid', (['fx_img'], {'scale': 'self.config.affine_scale'}), '(fx_img, scale=self.config.affine_scale)\n', (3678, 3718), True, 'import src.model.functions as smfunctions\n'), ((3748, 3816), 'src.model.functions.rand_affine_grid', 'smfunctions.rand_affine_grid', (['mv_img'], {'scale': 'self.config.affine_scale'}), '(mv_img, scale=self.config.affine_scale)\n', (3776, 3816), True, 'import src.model.functions as smfunctions\n'), ((3838, 3934), 'torch.nn.functional.grid_sample', 'torch.nn.functional.grid_sample', (['mv_img', 'affine_grid_mv'], {'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(mv_img, affine_grid_mv, mode='bilinear',\n align_corners=True)\n", (3869, 3934), False, 'import torch, os\n'), ((3952, 4048), 'torch.nn.functional.grid_sample', 'torch.nn.functional.grid_sample', (['fx_img', 'affine_grid_fx'], {'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(fx_img, affine_grid_fx, mode='bilinear',\n align_corners=True)\n", (3983, 4048), False, 'import torch, os\n'), ((4268, 4336), 'src.model.functions.rand_affine_grid', 'smfunctions.rand_affine_grid', (['fx_img'], {'scale': 'self.config.affine_scale'}), '(fx_img, scale=self.config.affine_scale)\n', (4296, 4336), True, 'import src.model.functions as smfunctions\n'), ((4366, 4434), 'src.model.functions.rand_affine_grid', 'smfunctions.rand_affine_grid', (['mv_img'], {'scale': 'self.config.affine_scale'}), '(mv_img, scale=self.config.affine_scale)\n', (4394, 4434), True, 'import src.model.functions as smfunctions\n'), ((4456, 4552), 'torch.nn.functional.grid_sample', 'torch.nn.functional.grid_sample', (['mv_img', 'affine_grid_mv'], {'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(mv_img, affine_grid_mv, mode='bilinear',\n align_corners=True)\n", (4487, 4552), False, 'import torch, os\n'), ((4570, 4666), 'torch.nn.functional.grid_sample', 'torch.nn.functional.grid_sample', (['fx_img', 'affine_grid_fx'], {'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(fx_img, affine_grid_fx, mode='bilinear',\n align_corners=True)\n", (4601, 4666), False, 'import torch, os\n'), ((4896, 4964), 'src.model.functions.rand_affine_grid', 'smfunctions.rand_affine_grid', (['fx_img'], {'scale': 'self.config.affine_scale'}), '(fx_img, scale=self.config.affine_scale)\n', (4924, 4964), True, 'import src.model.functions as smfunctions\n'), ((4994, 5062), 'src.model.functions.rand_affine_grid', 'smfunctions.rand_affine_grid', (['mv_img'], {'scale': 'self.config.affine_scale'}), '(mv_img, scale=self.config.affine_scale)\n', (5022, 5062), True, 'import src.model.functions as smfunctions\n'), ((5084, 5180), 'torch.nn.functional.grid_sample', 'torch.nn.functional.grid_sample', (['mv_img', 'affine_grid_mv'], {'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(mv_img, affine_grid_mv, mode='bilinear',\n align_corners=True)\n", (5115, 5180), False, 'import torch, os\n'), ((5198, 5294), 'torch.nn.functional.grid_sample', 'torch.nn.functional.grid_sample', (['fx_img', 'affine_grid_fx'], {'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(fx_img, affine_grid_fx, mode='bilinear',\n align_corners=True)\n", (5229, 5294), False, 'import torch, os\n'), ((5538, 5602), 'src.model.functions.rand_affine_grid', 'smfunctions.rand_affine_grid', (['t2'], {'scale': 'self.config.affine_scale'}), '(t2, scale=self.config.affine_scale)\n', (5566, 5602), True, 'import src.model.functions as smfunctions\n'), ((5634, 5699), 'src.model.functions.rand_affine_grid', 'smfunctions.rand_affine_grid', (['dwi'], {'scale': 'self.config.affine_scale'}), '(dwi, scale=self.config.affine_scale)\n', (5662, 5699), True, 'import src.model.functions as smfunctions\n'), ((5717, 5809), 'torch.nn.functional.grid_sample', 'torch.nn.functional.grid_sample', (['t2', 'affine_grid_t2'], {'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(t2, affine_grid_t2, mode='bilinear',\n align_corners=True)\n", (5748, 5809), False, 'import torch, os\n'), ((5824, 5919), 'torch.nn.functional.grid_sample', 'torch.nn.functional.grid_sample', (['dwi', 'affine_grid_dwis'], {'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(dwi, affine_grid_dwis, mode='bilinear',\n align_corners=True)\n", (5855, 5919), False, 'import torch, os\n'), ((5937, 6035), 'torch.nn.functional.grid_sample', 'torch.nn.functional.grid_sample', (['dwi_b0', 'affine_grid_dwis'], {'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(dwi_b0, affine_grid_dwis, mode='bilinear',\n align_corners=True)\n", (5968, 6035), False, 'import torch, os\n'), ((6346, 6410), 'src.model.functions.rand_affine_grid', 'smfunctions.rand_affine_grid', (['t2'], {'scale': 'self.config.affine_scale'}), '(t2, scale=self.config.affine_scale)\n', (6374, 6410), True, 'import src.model.functions as smfunctions\n'), ((6442, 6507), 'src.model.functions.rand_affine_grid', 'smfunctions.rand_affine_grid', (['dwi'], {'scale': 'self.config.affine_scale'}), '(dwi, scale=self.config.affine_scale)\n', (6470, 6507), True, 'import src.model.functions as smfunctions\n'), ((6525, 6617), 'torch.nn.functional.grid_sample', 'torch.nn.functional.grid_sample', (['t2', 'affine_grid_t2'], {'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(t2, affine_grid_t2, mode='bilinear',\n align_corners=True)\n", (6556, 6617), False, 'import torch, os\n'), ((6632, 6727), 'torch.nn.functional.grid_sample', 'torch.nn.functional.grid_sample', (['dwi', 'affine_grid_dwis'], {'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(dwi, affine_grid_dwis, mode='bilinear',\n align_corners=True)\n", (6663, 6727), False, 'import torch, os\n'), ((6745, 6843), 'torch.nn.functional.grid_sample', 'torch.nn.functional.grid_sample', (['dwi_b0', 'affine_grid_dwis'], {'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(dwi_b0, affine_grid_dwis, mode='bilinear',\n align_corners=True)\n", (6776, 6843), False, 'import torch, os\n'), ((8963, 8997), 'src.model.functions.warp3d', 'smfunctions.warp3d', (['dwi_b0', 'ddf_AB'], {}), '(dwi_b0, ddf_AB)\n', (8981, 8997), True, 'import src.model.functions as smfunctions\n'), ((9027, 9058), 'src.model.functions.warp3d', 'smfunctions.warp3d', (['dwi', 'ddf_BC'], {}), '(dwi, ddf_BC)\n', (9045, 9058), True, 'import src.model.functions as smfunctions\n'), ((9088, 9119), 'src.model.functions.warp3d', 'smfunctions.warp3d', (['dwi', 'ddf_AC'], {}), '(dwi, ddf_AC)\n', (9106, 9119), True, 'import src.model.functions as smfunctions\n'), ((10641, 10682), 'os.path.join', 'os.path.join', (['self.log_dir', '"""checkpoints"""'], {}), "(self.log_dir, 'checkpoints')\n", (10653, 10682), False, 'import torch, os\n'), ((10695, 10732), 'os.makedirs', 'os.makedirs', (['ckpt_path'], {'exist_ok': '(True)'}), '(ckpt_path, exist_ok=True)\n', (10706, 10732), False, 'import torch, os\n'), ((12035, 12059), 'torch.load', 'torch.load', (['best_ckpt[0]'], {}), '(best_ckpt[0])\n', (12045, 12059), False, 'import torch, os\n'), ((12873, 12924), 'torch.load', 'torch.load', (["[i for i in best_ckpt if 'AB-' in i][0]"], {}), "([i for i in best_ckpt if 'AB-' in i][0])\n", (12883, 12924), False, 'import torch, os\n'), ((12951, 13002), 'torch.load', 'torch.load', (["[i for i in best_ckpt if 'BC-' in i][0]"], {}), "([i for i in best_ckpt if 'BC-' in i][0])\n", (12961, 13002), False, 'import torch, os\n'), ((13029, 13080), 'torch.load', 'torch.load', (["[i for i in best_ckpt if 'AC-' in i][0]"], {}), "([i for i in best_ckpt if 'AC-' in i][0])\n", (13039, 13080), False, 'import torch, os\n'), ((13394, 13416), 'src.model.loss.l2_gradient', 'loss.l2_gradient', (['ddfs'], {}), '(ddfs)\n', (13410, 13416), False, 'from src.model import loss\n'), ((14271, 14312), 'src.model.loss.ssd', 'loss.ssd', (['warpped_img_C2A', 'warpped_img_AC'], {}), '(warpped_img_C2A, warpped_img_AC)\n', (14279, 14312), False, 'from src.model import loss\n'), ((14592, 14614), 'src.model.loss.l2_gradient', 'loss.l2_gradient', (['ddfs'], {}), '(ddfs)\n', (14608, 14614), False, 'from src.model import loss\n'), ((16229, 16244), 'torch.mean', 'torch.mean', (['res'], {}), '(res)\n', (16239, 16244), False, 'import torch, os\n'), ((16246, 16260), 'torch.std', 'torch.std', (['res'], {}), '(res)\n', (16255, 16260), False, 'import torch, os\n'), ((16874, 16921), 'os.path.join', 'os.path.join', (['visualization_path', 'f"""fx_img.nii"""'], {}), "(visualization_path, f'fx_img.nii')\n", (16886, 16921), False, 'import torch, os\n'), ((16959, 17006), 'os.path.join', 'os.path.join', (['visualization_path', 'f"""mv_img.nii"""'], {}), "(visualization_path, f'mv_img.nii')\n", (16971, 17006), False, 'import torch, os\n'), ((17046, 17093), 'os.path.join', 'os.path.join', (['visualization_path', 'f"""pr_img.nii"""'], {}), "(visualization_path, f'pr_img.nii')\n", (17058, 17093), False, 'import torch, os\n'), ((17134, 17184), 'os.path.join', 'os.path.join', (['visualization_path', 'f"""wp_mv_img.nii"""'], {}), "(visualization_path, f'wp_mv_img.nii')\n", (17146, 17184), False, 'import torch, os\n'), ((17229, 17279), 'os.path.join', 'os.path.join', (['visualization_path', 'f"""wp_pr_img.nii"""'], {}), "(visualization_path, f'wp_pr_img.nii')\n", (17241, 17279), False, 'import torch, os\n'), ((17333, 17379), 'os.path.join', 'os.path.join', (['visualization_path', 'f"""ddf-x.nii"""'], {}), "(visualization_path, f'ddf-x.nii')\n", (17345, 17379), False, 'import torch, os\n'), ((17423, 17469), 'os.path.join', 'os.path.join', (['visualization_path', 'f"""ddf-y.nii"""'], {}), "(visualization_path, f'ddf-y.nii')\n", (17435, 17469), False, 'import torch, os\n'), ((17513, 17559), 'os.path.join', 'os.path.join', (['visualization_path', 'f"""ddf-z.nii"""'], {}), "(visualization_path, f'ddf-z.nii')\n", (17525, 17559), False, 'import torch, os\n'), ((18299, 18333), 'torch.cat', 'torch.cat', (['[fx_img, mv_img]'], {'dim': '(1)'}), '([fx_img, mv_img], dim=1)\n', (18308, 18333), False, 'import torch, os\n'), ((18639, 18669), 'torch.cat', 'torch.cat', (['[t2, dwi_b0]'], {'dim': '(1)'}), '([t2, dwi_b0], dim=1)\n', (18648, 18669), False, 'import torch, os\n'), ((18703, 18730), 'torch.cat', 'torch.cat', (['[t2, dwi]'], {'dim': '(1)'}), '([t2, dwi], dim=1)\n', (18712, 18730), False, 'import torch, os\n'), ((19033, 19067), 'torch.cat', 'torch.cat', (['[fx_img, pr_img]'], {'dim': '(1)'}), '([fx_img, pr_img], dim=1)\n', (19042, 19067), False, 'import torch, os\n'), ((19759, 19795), 'os.path.dirname', 'os.path.dirname', (['t2_ldmk_paths[0][0]'], {}), '(t2_ldmk_paths[0][0])\n', (19774, 19795), False, 'import torch, os\n'), ((19991, 20023), 'src.model.functions.warp3d', 'smfunctions.warp3d', (['mv_ldmk', 'ddf'], {}), '(mv_ldmk, ddf)\n', (20009, 20023), True, 'import src.model.functions as smfunctions\n'), ((23224, 23254), 'src.model.loss.jacobian_determinant', 'loss.jacobian_determinant', (['ddf'], {}), '(ddf)\n', (23249, 23254), False, 'from src.model import loss\n'), ((24050, 24070), 'pickle.dump', 'pkl.dump', (['results', 'f'], {}), '(results, f)\n', (24058, 24070), True, 'import pickle as pkl\n'), ((24171, 24192), 'pickle.dump', 'pkl.dump', (['tre_dict', 'f'], {}), '(tre_dict, f)\n', (24179, 24192), True, 'import pickle as pkl\n'), ((6948, 6997), 'src.model.loss.global_mutual_information', 'loss.global_mutual_information', (['dwi[i]', 'dwi_b0[i]'], {}), '(dwi[i], dwi_b0[i])\n', (6978, 6997), False, 'from src.model import loss\n'), ((7120, 7188), 'src.model.functions.rand_affine_grid', 'smfunctions.rand_affine_grid', (['dwi_b0'], {'scale': 'self.config.affine_scale'}), '(dwi_b0, scale=self.config.affine_scale)\n', (7148, 7188), True, 'import src.model.functions as smfunctions\n'), ((7214, 7312), 'torch.nn.functional.grid_sample', 'torch.nn.functional.grid_sample', (['dwi_b0', 'affine_grid_dwis'], {'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(dwi_b0, affine_grid_dwis, mode='bilinear',\n align_corners=True)\n", (7245, 7312), False, 'import torch, os\n'), ((8284, 8314), 'torch.cat', 'torch.cat', (['[t2, dwi_b0]'], {'dim': '(1)'}), '([t2, dwi_b0], dim=1)\n', (8293, 8314), False, 'import torch, os\n'), ((8352, 8383), 'torch.cat', 'torch.cat', (['[dwi_b0, dwi]'], {'dim': '(1)'}), '([dwi_b0, dwi], dim=1)\n', (8361, 8383), False, 'import torch, os\n'), ((8421, 8448), 'torch.cat', 'torch.cat', (['[t2, dwi]'], {'dim': '(1)'}), '([t2, dwi], dim=1)\n', (8430, 8448), False, 'import torch, os\n'), ((9169, 9200), 'src.model.functions.warp3d', 'smfunctions.warp3d', (['dwi', 'ddf_BC'], {}), '(dwi, ddf_BC)\n', (9187, 9200), True, 'import src.model.functions as smfunctions\n'), ((9589, 9621), 'src.model.functions.warp3d', 'smfunctions.warp3d', (['mv_img', 'ddfs'], {}), '(mv_img, ddfs)\n', (9607, 9621), True, 'import src.model.functions as smfunctions\n'), ((11721, 11787), 'os.path.join', 'os.path.join', (['self.log_dir', '"""checkpoints"""', 'f"""epoch-{num_epoch}.pt"""'], {}), "(self.log_dir, 'checkpoints', f'epoch-{num_epoch}.pt')\n", (11733, 11787), False, 'import torch, os\n'), ((11883, 11933), 'os.path.join', 'os.path.join', (['self.log_dir', '"""checkpoints"""', '"""best*"""'], {}), "(self.log_dir, 'checkpoints', 'best*')\n", (11895, 11933), False, 'import torch, os\n'), ((12352, 12421), 'os.path.join', 'os.path.join', (['self.log_dir', '"""checkpoints"""', 'f"""AB-epoch-{num_epoch}.pt"""'], {}), "(self.log_dir, 'checkpoints', f'AB-epoch-{num_epoch}.pt')\n", (12364, 12421), False, 'import torch, os\n'), ((12460, 12529), 'os.path.join', 'os.path.join', (['self.log_dir', '"""checkpoints"""', 'f"""BC-epoch-{num_epoch}.pt"""'], {}), "(self.log_dir, 'checkpoints', f'BC-epoch-{num_epoch}.pt')\n", (12472, 12529), False, 'import torch, os\n'), ((12568, 12637), 'os.path.join', 'os.path.join', (['self.log_dir', '"""checkpoints"""', 'f"""AC-epoch-{num_epoch}.pt"""'], {}), "(self.log_dir, 'checkpoints', f'AC-epoch-{num_epoch}.pt')\n", (12580, 12637), False, 'import torch, os\n'), ((12717, 12768), 'os.path.join', 'os.path.join', (['self.log_dir', '"""checkpoints"""', 'f"""best*"""'], {}), "(self.log_dir, 'checkpoints', f'best*')\n", (12729, 12768), False, 'import torch, os\n'), ((13461, 13513), 'src.model.loss.global_mutual_information', 'loss.global_mutual_information', (['fx_img', 'warpped_imgs'], {}), '(fx_img, warpped_imgs)\n', (13491, 13513), False, 'from src.model import loss\n'), ((14659, 14714), 'src.model.loss.global_mutual_information', 'loss.global_mutual_information', (['fx_img', 'warpped_imgs[0]'], {}), '(fx_img, warpped_imgs[0])\n', (14689, 14714), False, 'from src.model import loss\n'), ((14765, 14820), 'src.model.loss.global_mutual_information', 'loss.global_mutual_information', (['fx_img', 'warpped_imgs[1]'], {}), '(fx_img, warpped_imgs[1])\n', (14795, 14820), False, 'from src.model import loss\n'), ((16120, 16166), 'src.model.loss.global_mutual_information', 'loss.global_mutual_information', (['fx_img', 'wp_img'], {}), '(fx_img, wp_img)\n', (16150, 16166), False, 'from src.model import loss\n'), ((20338, 20351), 'numpy.isnan', 'np.isnan', (['TRE'], {}), '(TRE)\n', (20346, 20351), True, 'import numpy as np\n'), ((23927, 23937), 'numpy.mean', 'np.mean', (['v'], {}), '(v)\n', (23934, 23937), True, 'import numpy as np\n'), ((23939, 23948), 'numpy.std', 'np.std', (['v'], {}), '(v)\n', (23945, 23948), True, 'import numpy as np\n'), ((23969, 24024), 'os.path.join', 'os.path.join', (['self.log_dir', 'f"""results{save_suffix}.pkl"""'], {}), "(self.log_dir, f'results{save_suffix}.pkl')\n", (23981, 24024), False, 'import torch, os\n'), ((24090, 24145), 'os.path.join', 'os.path.join', (['self.log_dir', 'f"""tre_dic{save_suffix}.pkl"""'], {}), "(self.log_dir, f'tre_dic{save_suffix}.pkl')\n", (24102, 24145), False, 'import torch, os\n'), ((911, 934), 'src.model.networks.local.LocalModel', 'LocalModel', (['self.config'], {}), '(self.config)\n', (921, 934), False, 'from src.model.networks.local import LocalModel\n'), ((1064, 1087), 'src.model.networks.local.LocalModel', 'LocalModel', (['self.config'], {}), '(self.config)\n', (1074, 1087), False, 'from src.model.networks.local import LocalModel\n'), ((1121, 1144), 'src.model.networks.local.LocalModel', 'LocalModel', (['self.config'], {}), '(self.config)\n', (1131, 1144), False, 'from src.model.networks.local import LocalModel\n'), ((1178, 1201), 'src.model.networks.local.LocalModel', 'LocalModel', (['self.config'], {}), '(self.config)\n', (1188, 1201), False, 'from src.model.networks.local import LocalModel\n'), ((7338, 7387), 'src.model.loss.global_mutual_information', 'loss.global_mutual_information', (['dwi[i]', 'new_b0[i]'], {}), '(dwi[i], new_b0[i])\n', (7368, 7387), False, 'from src.model import loss\n'), ((8607, 8634), 'torch.cat', 'torch.cat', (['[t2, dwi]'], {'dim': '(1)'}), '([t2, dwi], dim=1)\n', (8616, 8634), False, 'import torch, os\n'), ((8703, 8731), 'torch.cat', 'torch.cat', (['input_data'], {'dim': '(1)'}), '(input_data, dim=1)\n', (8712, 8731), False, 'import torch, os\n'), ((9438, 9470), 'src.model.functions.warp3d', 'smfunctions.warp3d', (['mv_img', 'ddfs'], {}), '(mv_img, ddfs)\n', (9456, 9470), True, 'import src.model.functions as smfunctions\n'), ((9472, 9500), 'src.model.functions.warp3d', 'smfunctions.warp3d', (['b0', 'ddfs'], {}), '(b0, ddfs)\n', (9490, 9500), True, 'import src.model.functions as smfunctions\n'), ((10802, 10854), 'os.path.join', 'os.path.join', (['ckpt_path', 'f"""AB-epoch-{self.epoch}.pt"""'], {}), "(ckpt_path, f'AB-epoch-{self.epoch}.pt')\n", (10814, 10854), False, 'import torch, os\n'), ((10896, 10948), 'os.path.join', 'os.path.join', (['ckpt_path', 'f"""BC-epoch-{self.epoch}.pt"""'], {}), "(ckpt_path, f'BC-epoch-{self.epoch}.pt')\n", (10908, 10948), False, 'import torch, os\n'), ((10990, 11042), 'os.path.join', 'os.path.join', (['ckpt_path', 'f"""AC-epoch-{self.epoch}.pt"""'], {}), "(ckpt_path, f'AC-epoch-{self.epoch}.pt')\n", (11002, 11042), False, 'import torch, os\n'), ((20597, 20630), 'numpy.around', 'np.around', (['TRE_wo_reg'], {'decimals': '(3)'}), '(TRE_wo_reg, decimals=3)\n', (20606, 20630), True, 'import numpy as np\n'), ((20666, 20692), 'numpy.around', 'np.around', (['TRE'], {'decimals': '(3)'}), '(TRE, decimals=3)\n', (20675, 20692), True, 'import numpy as np\n'), ((20723, 20762), 'numpy.around', 'np.around', (['(TRE_wo_reg - TRE)'], {'decimals': '(3)'}), '(TRE_wo_reg - TRE, decimals=3)\n', (20732, 20762), True, 'import numpy as np\n'), ((11118, 11153), 'os.path.join', 'os.path.join', (['ckpt_path', '"""best*.pt"""'], {}), "(ckpt_path, 'best*.pt')\n", (11130, 11153), False, 'import torch, os\n'), ((11172, 11184), 'os.remove', 'os.remove', (['i'], {}), '(i)\n', (11181, 11184), False, 'import torch, os\n'), ((11253, 11310), 'os.path.join', 'os.path.join', (['ckpt_path', 'f"""best-AB-epoch-{self.epoch}.pt"""'], {}), "(ckpt_path, f'best-AB-epoch-{self.epoch}.pt')\n", (11265, 11310), False, 'import torch, os\n'), ((11352, 11409), 'os.path.join', 'os.path.join', (['ckpt_path', 'f"""best-BC-epoch-{self.epoch}.pt"""'], {}), "(ckpt_path, f'best-BC-epoch-{self.epoch}.pt')\n", (11364, 11409), False, 'import torch, os\n'), ((11451, 11508), 'os.path.join', 'os.path.join', (['ckpt_path', 'f"""best-AC-epoch-{self.epoch}.pt"""'], {}), "(ckpt_path, f'best-AC-epoch-{self.epoch}.pt')\n", (11463, 11508), False, 'import torch, os\n'), ((17849, 17886), 'os.path.basename', 'os.path.basename', (['t2_ldmk_paths[i][0]'], {}), '(t2_ldmk_paths[i][0])\n', (17865, 17886), False, 'import torch, os\n'), ((17989, 18027), 'os.path.basename', 'os.path.basename', (['dwi_ldmk_paths[i][0]'], {}), '(dwi_ldmk_paths[i][0])\n', (18005, 18027), False, 'import torch, os\n'), ((18133, 18171), 'os.path.basename', 'os.path.basename', (['dwi_ldmk_paths[i][0]'], {}), '(dwi_ldmk_paths[i][0])\n', (18149, 18171), False, 'import torch, os\n'), ((20167, 20207), 'src.model.loss.centroid_distance', 'loss.centroid_distance', (['fx_ldmk', 'wp_ldmk'], {}), '(fx_ldmk, wp_ldmk)\n', (20189, 20207), False, 'from src.model import loss\n'), ((20247, 20287), 'src.model.loss.centroid_distance', 'loss.centroid_distance', (['fx_ldmk', 'mv_ldmk'], {}), '(fx_ldmk, mv_ldmk)\n', (20269, 20287), False, 'from src.model import loss\n'), ((22963, 23012), 'src.model.loss.global_mutual_information', 'loss.global_mutual_information', (['fx_img', 'wp_mv_img'], {}), '(fx_img, wp_mv_img)\n', (22993, 23012), False, 'from src.model import loss\n'), ((23068, 23114), 'src.model.loss.global_mutual_information', 'loss.global_mutual_information', (['fx_img', 'mv_img'], {}), '(fx_img, mv_img)\n', (23098, 23114), False, 'from src.model import loss\n')] |
from sciapp.action import dataio
import numpy as np
def imread(path):
return np.loadtxt(path, dtype=float)
def imsave(path, img):
np.savetxt(path, img)
dataio.ReaderManager.add("dat", imread, "img")
dataio.WriterManager.add("dat", imsave, "img")
class OpenFile(dataio.Reader):
title = "DAT Open"
tag = "img"
filt = ["DAT"]
class SaveFile(dataio.ImageWriter):
title = "DAT Save"
tag = "img"
filt = ["DAT"]
plgs = [OpenFile, SaveFile]
| [
"sciapp.action.dataio.WriterManager.add",
"numpy.loadtxt",
"sciapp.action.dataio.ReaderManager.add",
"numpy.savetxt"
] | [((166, 212), 'sciapp.action.dataio.ReaderManager.add', 'dataio.ReaderManager.add', (['"""dat"""', 'imread', '"""img"""'], {}), "('dat', imread, 'img')\n", (190, 212), False, 'from sciapp.action import dataio\n'), ((213, 259), 'sciapp.action.dataio.WriterManager.add', 'dataio.WriterManager.add', (['"""dat"""', 'imsave', '"""img"""'], {}), "('dat', imsave, 'img')\n", (237, 259), False, 'from sciapp.action import dataio\n'), ((83, 112), 'numpy.loadtxt', 'np.loadtxt', (['path'], {'dtype': 'float'}), '(path, dtype=float)\n', (93, 112), True, 'import numpy as np\n'), ((142, 163), 'numpy.savetxt', 'np.savetxt', (['path', 'img'], {}), '(path, img)\n', (152, 163), True, 'import numpy as np\n')] |
import struct
import sys
import matplotlib.pyplot as plt
import numpy as np
import mmap
import os
if len(sys.argv) < 2:
print("Usage: %s <path>" %(sys.argv[0]))
file = sys.argv[1]
filename = file.split("/")[-1]
arr = np.memmap(file, dtype='float64', mode='r')
plt.plot(arr)
plt.title(filename)
print("saving="+file + ".png")
plt.savefig(file + ".png")
| [
"matplotlib.pyplot.title",
"numpy.memmap",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot"
] | [((224, 266), 'numpy.memmap', 'np.memmap', (['file'], {'dtype': '"""float64"""', 'mode': '"""r"""'}), "(file, dtype='float64', mode='r')\n", (233, 266), True, 'import numpy as np\n'), ((267, 280), 'matplotlib.pyplot.plot', 'plt.plot', (['arr'], {}), '(arr)\n', (275, 280), True, 'import matplotlib.pyplot as plt\n'), ((282, 301), 'matplotlib.pyplot.title', 'plt.title', (['filename'], {}), '(filename)\n', (291, 301), True, 'import matplotlib.pyplot as plt\n'), ((333, 359), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(file + '.png')"], {}), "(file + '.png')\n", (344, 359), True, 'import matplotlib.pyplot as plt\n')] |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2021 <NAME> <<EMAIL>>
#
# Distributed under terms of the MIT license.
"""
Example implementation of MAML++ on miniImageNet.
"""
import learn2learn as l2l
import numpy as np
import random
import torch
from collections import namedtuple
from typing import Tuple
from tqdm import tqdm
from examples.vision.mamlpp.cnn4_bnrs import CNN4_BNRS
from examples.vision.mamlpp.MAMLpp import MAMLpp
MetaBatch = namedtuple("MetaBatch", "support query")
train_samples, val_samples, test_samples = 38400, 9600, 12000 # Is that correct?
tasks = 600
def accuracy(predictions, targets):
predictions = predictions.argmax(dim=1).view(targets.shape)
return (predictions == targets).sum().float() / targets.size(0)
class MAMLppTrainer:
def __init__(
self,
ways=5,
k_shots=10,
n_queries=30,
steps=5,
msl_epochs=25,
DA_epochs=50,
use_cuda=True,
seed=42,
):
self._use_cuda = use_cuda
self._device = torch.device("cpu")
if self._use_cuda and torch.cuda.device_count():
torch.cuda.manual_seed(seed)
self._device = torch.device("cuda")
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
# Dataset
print("[*] Loading mini-ImageNet...")
(
self._train_tasks,
self._valid_tasks,
self._test_tasks,
) = l2l.vision.benchmarks.get_tasksets(
"mini-imagenet",
train_samples=k_shots,
train_ways=ways,
test_samples=n_queries,
test_ways=ways,
root="~/data",
)
# Model
self._model = CNN4_BNRS(ways, adaptation_steps=steps)
if self._use_cuda:
self._model.cuda()
# Meta-Learning related
self._steps = steps
self._k_shots = k_shots
self._n_queries = n_queries
self._inner_criterion = torch.nn.CrossEntropyLoss(reduction="mean")
# Multi-Step Loss
self._msl_epochs = msl_epochs
self._step_weights = torch.ones(steps, device=self._device) * (1.0 / steps)
self._msl_decay_rate = 1.0 / steps / msl_epochs
self._msl_min_value_for_non_final_losses = torch.tensor(0.03 / steps)
self._msl_max_value_for_final_loss = 1.0 - (
(steps - 1) * self._msl_min_value_for_non_final_losses
)
# Derivative-Order Annealing (when to start using second-order opt)
self._derivative_order_annealing_from_epoch = DA_epochs
def _anneal_step_weights(self):
self._step_weights[:-1] = torch.max(
self._step_weights[:-1] - self._msl_decay_rate,
self._msl_min_value_for_non_final_losses,
)
self._step_weights[-1] = torch.min(
self._step_weights[-1] + ((self._steps - 1) * self._msl_decay_rate),
self._msl_max_value_for_final_loss,
)
def _split_batch(self, batch: tuple) -> MetaBatch:
"""
Separate data batch into adaptation/evalutation sets.
"""
images, labels = batch
batch_size = self._k_shots + self._n_queries
assert batch_size <= images.shape[0], "K+N are greater than the batch size!"
indices = torch.randperm(batch_size)
support_indices = indices[: self._k_shots]
query_indices = indices[self._k_shots :]
return MetaBatch(
(
images[support_indices],
labels[support_indices],
),
(images[query_indices], labels[query_indices]),
)
def _training_step(
self,
batch: MetaBatch,
learner: MAMLpp,
msl: bool = True,
epoch: int = 0,
) -> Tuple[torch.Tensor, float]:
s_inputs, s_labels = batch.support
q_inputs, q_labels = batch.query
query_loss = torch.tensor(.0, device=self._device)
if self._use_cuda:
s_inputs = s_inputs.float().cuda(device=self._device)
s_labels = s_labels.cuda(device=self._device)
q_inputs = q_inputs.float().cuda(device=self._device)
q_labels = q_labels.cuda(device=self._device)
# Derivative-Order Annealing
second_order = True
if epoch < self._derivative_order_annealing_from_epoch:
second_order = False
# Adapt the model on the support set
for step in range(self._steps):
# forward + backward + optimize
pred = learner(s_inputs, step)
support_loss = self._inner_criterion(pred, s_labels)
learner.adapt(support_loss, first_order=not second_order, step=step)
# Multi-Step Loss
if msl:
q_pred = learner(q_inputs, step)
query_loss += self._step_weights[step] * self._inner_criterion(
q_pred, q_labels
)
# Evaluate the adapted model on the query set
if not msl:
q_pred = learner(q_inputs, self._steps-1)
query_loss = self._inner_criterion(q_pred, q_labels)
acc = accuracy(q_pred, q_labels).detach()
return query_loss, acc
def _testing_step(
self, batch: MetaBatch, learner: MAMLpp
) -> Tuple[torch.Tensor, float]:
s_inputs, s_labels = batch.support
q_inputs, q_labels = batch.query
if self._use_cuda:
s_inputs = s_inputs.float().cuda(device=self._device)
s_labels = s_labels.cuda(device=self._device)
q_inputs = q_inputs.float().cuda(device=self._device)
q_labels = q_labels.cuda(device=self._device)
# Adapt the model on the support set
for step in range(self._steps):
# forward + backward + optimize
pred = learner(s_inputs, step)
support_loss = self._inner_criterion(pred, s_labels)
learner.adapt(support_loss, step=step)
# Evaluate the adapted model on the query set
q_pred = learner(q_inputs, self._steps-1)
query_loss = self._inner_criterion(q_pred, q_labels).detach()
acc = accuracy(q_pred, q_labels)
return query_loss, acc
def train(
self,
meta_lr=0.001,
fast_lr=0.01,
meta_bsz=5,
epochs=100,
val_interval=1,
):
print("[*] Training...")
maml = MAMLpp(
self._model,
lr=fast_lr, # Initialisation LR for all layers and steps
adaptation_steps=self._steps, # For LSLR
first_order=False,
allow_nograd=True, # For the parameters of the MetaBatchNorm layers
)
opt = torch.optim.AdamW(maml.parameters(), meta_lr, betas=(0, 0.999))
iter_per_epoch = (
train_samples // (meta_bsz * (self._k_shots + self._n_queries))
) + 1
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
opt,
T_max=epochs * iter_per_epoch,
eta_min=0.00001,
)
for epoch in range(epochs):
epoch_meta_train_loss, epoch_meta_train_acc = 0.0, 0.0
for _ in tqdm(range(iter_per_epoch)):
opt.zero_grad()
meta_train_losses, meta_train_accs = [], []
for _ in range(meta_bsz):
meta_batch = self._split_batch(self._train_tasks.sample())
meta_loss, meta_acc = self._training_step(
meta_batch,
maml.clone(),
msl=(epoch < self._msl_epochs),
epoch=epoch,
)
meta_loss.backward()
meta_train_losses.append(meta_loss.detach())
meta_train_accs.append(meta_acc)
epoch_meta_train_loss += torch.Tensor(meta_train_losses).mean().item()
epoch_meta_train_acc += torch.Tensor(meta_train_accs).mean().item()
# Average the accumulated gradients and optimize
with torch.no_grad():
for p in maml.parameters():
# Remember the MetaBatchNorm layer has parameters that don't require grad!
if p.requires_grad:
p.grad.data.mul_(1.0 / meta_bsz)
opt.step()
scheduler.step()
# Multi-Step Loss
self._anneal_step_weights()
epoch_meta_train_loss /= iter_per_epoch
epoch_meta_train_acc /= iter_per_epoch
print(f"==========[Epoch {epoch}]==========")
print(f"Meta-training Loss: {epoch_meta_train_loss:.6f}")
print(f"Meta-training Acc: {epoch_meta_train_acc:.6f}")
# ======= Validation ========
if (epoch + 1) % val_interval == 0:
# Backup the BatchNorm layers' running statistics
maml.backup_stats()
# Compute the meta-validation loss
# TODO: Go through the entire validation set, which shouldn't be shuffled, and
# which tasks should not be continuously resampled from!
meta_val_losses, meta_val_accs = [], []
for _ in tqdm(range(val_samples // tasks)):
meta_batch = self._split_batch(self._valid_tasks.sample())
loss, acc = self._testing_step(meta_batch, maml.clone())
meta_val_losses.append(loss)
meta_val_accs.append(acc)
meta_val_loss = float(torch.Tensor(meta_val_losses).mean().item())
meta_val_acc = float(torch.Tensor(meta_val_accs).mean().item())
print(f"Meta-validation Loss: {meta_val_loss:.6f}")
print(f"Meta-validation Accuracy: {meta_val_acc:.6f}")
# Restore the BatchNorm layers' running statistics
maml.restore_backup_stats()
print("============================================")
return self._model.state_dict()
def test(
self,
model_state_dict,
meta_lr=0.001,
fast_lr=0.01,
meta_bsz=5,
):
self._model.load_state_dict(model_state_dict)
maml = MAMLpp(
self._model,
lr=fast_lr,
adaptation_steps=self._steps,
first_order=False,
allow_nograd=True,
)
opt = torch.optim.AdamW(maml.parameters(), meta_lr, betas=(0, 0.999))
meta_losses, meta_accs = [], []
for _ in tqdm(range(test_samples // tasks)):
meta_batch = self._split_batch(self._test_tasks.sample())
loss, acc = self._testing_step(meta_batch, maml.clone())
meta_losses.append(loss)
meta_accs.append(acc)
loss = float(torch.Tensor(meta_losses).mean().item())
acc = float(torch.Tensor(meta_accs).mean().item())
print(f"Meta-training Loss: {loss:.6f}")
print(f"Meta-training Acc: {acc:.6f}")
if __name__ == "__main__":
mamlPlusPlus = MAMLppTrainer()
model = mamlPlusPlus.train()
mamlPlusPlus.test(model)
| [
"torch.nn.CrossEntropyLoss",
"torch.randperm",
"torch.max",
"torch.cuda.device_count",
"torch.min",
"examples.vision.mamlpp.cnn4_bnrs.CNN4_BNRS",
"learn2learn.vision.benchmarks.get_tasksets",
"numpy.random.seed",
"collections.namedtuple",
"torch.Tensor",
"examples.vision.mamlpp.MAMLpp.MAMLpp",
... | [((486, 526), 'collections.namedtuple', 'namedtuple', (['"""MetaBatch"""', '"""support query"""'], {}), "('MetaBatch', 'support query')\n", (496, 526), False, 'from collections import namedtuple\n'), ((1071, 1090), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1083, 1090), False, 'import torch\n'), ((1245, 1262), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (1256, 1262), False, 'import random\n'), ((1271, 1291), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1285, 1291), True, 'import numpy as np\n'), ((1300, 1323), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (1317, 1323), False, 'import torch\n'), ((1503, 1653), 'learn2learn.vision.benchmarks.get_tasksets', 'l2l.vision.benchmarks.get_tasksets', (['"""mini-imagenet"""'], {'train_samples': 'k_shots', 'train_ways': 'ways', 'test_samples': 'n_queries', 'test_ways': 'ways', 'root': '"""~/data"""'}), "('mini-imagenet', train_samples=k_shots,\n train_ways=ways, test_samples=n_queries, test_ways=ways, root='~/data')\n", (1537, 1653), True, 'import learn2learn as l2l\n'), ((1772, 1811), 'examples.vision.mamlpp.cnn4_bnrs.CNN4_BNRS', 'CNN4_BNRS', (['ways'], {'adaptation_steps': 'steps'}), '(ways, adaptation_steps=steps)\n', (1781, 1811), False, 'from examples.vision.mamlpp.cnn4_bnrs import CNN4_BNRS\n'), ((2031, 2074), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {'reduction': '"""mean"""'}), "(reduction='mean')\n", (2056, 2074), False, 'import torch\n'), ((2331, 2357), 'torch.tensor', 'torch.tensor', (['(0.03 / steps)'], {}), '(0.03 / steps)\n', (2343, 2357), False, 'import torch\n'), ((2700, 2804), 'torch.max', 'torch.max', (['(self._step_weights[:-1] - self._msl_decay_rate)', 'self._msl_min_value_for_non_final_losses'], {}), '(self._step_weights[:-1] - self._msl_decay_rate, self.\n _msl_min_value_for_non_final_losses)\n', (2709, 2804), False, 'import torch\n'), ((2868, 2984), 'torch.min', 'torch.min', (['(self._step_weights[-1] + (self._steps - 1) * self._msl_decay_rate)', 'self._msl_max_value_for_final_loss'], {}), '(self._step_weights[-1] + (self._steps - 1) * self._msl_decay_rate,\n self._msl_max_value_for_final_loss)\n', (2877, 2984), False, 'import torch\n'), ((3347, 3373), 'torch.randperm', 'torch.randperm', (['batch_size'], {}), '(batch_size)\n', (3361, 3373), False, 'import torch\n'), ((3963, 4001), 'torch.tensor', 'torch.tensor', (['(0.0)'], {'device': 'self._device'}), '(0.0, device=self._device)\n', (3975, 4001), False, 'import torch\n'), ((6469, 6573), 'examples.vision.mamlpp.MAMLpp.MAMLpp', 'MAMLpp', (['self._model'], {'lr': 'fast_lr', 'adaptation_steps': 'self._steps', 'first_order': '(False)', 'allow_nograd': '(True)'}), '(self._model, lr=fast_lr, adaptation_steps=self._steps, first_order=\n False, allow_nograd=True)\n', (6475, 6573), False, 'from examples.vision.mamlpp.MAMLpp import MAMLpp\n'), ((6961, 7058), 'torch.optim.lr_scheduler.CosineAnnealingLR', 'torch.optim.lr_scheduler.CosineAnnealingLR', (['opt'], {'T_max': '(epochs * iter_per_epoch)', 'eta_min': '(1e-05)'}), '(opt, T_max=epochs *\n iter_per_epoch, eta_min=1e-05)\n', (7003, 7058), False, 'import torch\n'), ((10346, 10450), 'examples.vision.mamlpp.MAMLpp.MAMLpp', 'MAMLpp', (['self._model'], {'lr': 'fast_lr', 'adaptation_steps': 'self._steps', 'first_order': '(False)', 'allow_nograd': '(True)'}), '(self._model, lr=fast_lr, adaptation_steps=self._steps, first_order=\n False, allow_nograd=True)\n', (10352, 10450), False, 'from examples.vision.mamlpp.MAMLpp import MAMLpp\n'), ((1121, 1146), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (1144, 1146), False, 'import torch\n'), ((1160, 1188), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (1182, 1188), False, 'import torch\n'), ((1216, 1236), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (1228, 1236), False, 'import torch\n'), ((2169, 2207), 'torch.ones', 'torch.ones', (['steps'], {'device': 'self._device'}), '(steps, device=self._device)\n', (2179, 2207), False, 'import torch\n'), ((8142, 8157), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8155, 8157), False, 'import torch\n'), ((10920, 10945), 'torch.Tensor', 'torch.Tensor', (['meta_losses'], {}), '(meta_losses)\n', (10932, 10945), False, 'import torch\n'), ((10981, 11004), 'torch.Tensor', 'torch.Tensor', (['meta_accs'], {}), '(meta_accs)\n', (10993, 11004), False, 'import torch\n'), ((7925, 7956), 'torch.Tensor', 'torch.Tensor', (['meta_train_losses'], {}), '(meta_train_losses)\n', (7937, 7956), False, 'import torch\n'), ((8011, 8040), 'torch.Tensor', 'torch.Tensor', (['meta_train_accs'], {}), '(meta_train_accs)\n', (8023, 8040), False, 'import torch\n'), ((9668, 9697), 'torch.Tensor', 'torch.Tensor', (['meta_val_losses'], {}), '(meta_val_losses)\n', (9680, 9697), False, 'import torch\n'), ((9750, 9777), 'torch.Tensor', 'torch.Tensor', (['meta_val_accs'], {}), '(meta_val_accs)\n', (9762, 9777), False, 'import torch\n')] |
import math
import numpy
__author__ = 'Matt'
def drawWheelDisplay(canvas, x, y, size, data):
wheelSize = size / 6
drawWheel(canvas, x+size*1/4, y+size/6, wheelSize, data.frontLeftWheel)
drawWheel(canvas, x+size*1/4, y+size/6*3, wheelSize, data.midLeftWheel)
drawWheel(canvas, x+size*1/4, y+size/6*5, wheelSize, data.rearLeftWheel)
drawWheel(canvas, x+size*3/4, y+size/6, wheelSize, data.frontRightWheel)
drawWheel(canvas, x+size*3/4, y+size/6*3, wheelSize, data.midRightWheel)
drawWheel(canvas, x+size*3/4, y+size/6*5, wheelSize, data.midRightWheel)
##
# drawWheel
#
# Description - draws a wheel with position, rotation, and indicators based on given data
#
def drawWheel(canvas, x, y, size, wheelData):
half_length = size/2
# Some fun linear algebra to rotate the wheel
rotationMatrix = numpy.matrix(
[[math.cos(wheelData.theta), -math.sin(wheelData.theta)],
[math.sin(wheelData.theta), math.cos(wheelData.theta)]]
)
rect = numpy.matrix(
[[-half_length, -half_length],
[-half_length, half_length],
[half_length, half_length],
[half_length, -half_length]]
)
speedArrow = numpy.matrix(
[[half_length, 0],
[half_length, half_length*wheelData.speed]]
)
rotRect = rect.dot(rotationMatrix)
rotSpeed = speedArrow.dot(rotationMatrix)
# end of fun linear algebra
# Draw the wheel
canvas.create_polygon(
x+rotRect[0].item(0), y+rotRect[0].item(1),
x+rotRect[1].item(0), y+rotRect[1].item(1),
x+rotRect[2].item(0), y+rotRect[2].item(1),
x+rotRect[3].item(0), y+rotRect[3].item(1),
fill="grey"
)
# Draw the speed intensity bar
canvas.create_line(
x+rotSpeed[0].item(0), y+rotSpeed[0].item(1),
x+rotSpeed[1].item(0), y+rotSpeed[1].item(1),
width=size/18,
fill="purple"
)
return | [
"math.cos",
"numpy.matrix",
"math.sin"
] | [((1001, 1136), 'numpy.matrix', 'numpy.matrix', (['[[-half_length, -half_length], [-half_length, half_length], [half_length,\n half_length], [half_length, -half_length]]'], {}), '([[-half_length, -half_length], [-half_length, half_length], [\n half_length, half_length], [half_length, -half_length]])\n', (1013, 1136), False, 'import numpy\n'), ((1191, 1269), 'numpy.matrix', 'numpy.matrix', (['[[half_length, 0], [half_length, half_length * wheelData.speed]]'], {}), '([[half_length, 0], [half_length, half_length * wheelData.speed]])\n', (1203, 1269), False, 'import numpy\n'), ((862, 887), 'math.cos', 'math.cos', (['wheelData.theta'], {}), '(wheelData.theta)\n', (870, 887), False, 'import math\n'), ((928, 953), 'math.sin', 'math.sin', (['wheelData.theta'], {}), '(wheelData.theta)\n', (936, 953), False, 'import math\n'), ((955, 980), 'math.cos', 'math.cos', (['wheelData.theta'], {}), '(wheelData.theta)\n', (963, 980), False, 'import math\n'), ((890, 915), 'math.sin', 'math.sin', (['wheelData.theta'], {}), '(wheelData.theta)\n', (898, 915), False, 'import math\n')] |
import gym
import numpy as np
from grpc import RpcError
from robo_gym.utils.exceptions import InvalidStateError, RobotServerError
class MoveEffectorToWayPoints(gym.Wrapper):
"""
Add environment a goal that the robot end-effector must reach all waypoints.
"""
def __init__(self, env, wayPoints: np.ndarray, endEffectorName: str, distanceThreshold: float=0.3):
"""
env: the environment to be wrapped.
wayPoints: a (, 3) numpy array representing the (x,y,z) positions of the wayPoints.
endEffectorName: the name of the end-effector. It must exist in the environment observation.
distanceThreshold: the euclidean distance between the object and the target must be less than this
value to be considered as goal. 0.3 by default.
"""
if not isinstance(wayPoints, np.ndarray):
raise Exception('wayPoints must be a numpy array with shape (, 3).')
if not isinstance(endEffectorName, str):
raise Exception('endEffectorName must be a string.')
if not isinstance(distanceThreshold, float) and distanceThreshold <= 0:
raise Exception('distanceThreshold must be a positive float.')
super().__init__(env)
self.env = env
self.wayPoints = np.copy(wayPoints)
self.reachedWayPoints = [False] * len(self.wayPoints)
self.reachCount = 0
self.endEffectorName = endEffectorName
self.distanceThreshold = distanceThreshold
self.goalReached = False
objectDict = self.client.get_state_msg().state_dict
if self.endEffectorName + '_x' not in objectDict:
raise Exception('{}_x does not exist in the environment.'.format(endEffectorName))
if self.endEffectorName + '_y' not in objectDict:
raise Exception('{}_y does not exist in the environment.'.format(endEffectorName))
if self.endEffectorName + '_z' not in objectDict:
raise Exception('{}_z does not exist in the environment.'.format(endEffectorName))
def step(self, action):
"""
Update the envirnoment with robot's action. If the effector's position is near any of the
waypoints, that waypoint is considered reached, info['atWayPoint'] will contains the
position of the reached waypoint(s). If all waypoints are reached, it
is considered the goal state, and return reward = 1.
"""
observation, reward, done, info = self.env.step(action)
effectorPosition = np.array([observation[self.endEffectorName + '_x'],
observation[self.endEffectorName + '_y'],
observation[self.endEffectorName + '_z']])
reachWayPoints = []
for i in range(len(self.wayPoints)):
distance = np.linalg.norm(effectorPosition - self.wayPoints[i])
if distance <= self.distanceThreshold and not self.reachedWayPoints[i]:
self.reachedWayPoints[i] = True
self.reachCount += 1
reachWayPoints.append(self.wayPoints[i])
self.goalReached = (self.reachCount == len(self.wayPoints))
reward = self.reward(observation, self.goalReached, info)
info['atWaypoint'] = reachWayPoints
return observation, reward, self.goalReached, info
def reward(self, observation, done, info):
"""
By default, reward = +1 when all waypoints are reached, -0.01 else.
You can set your own reward by first derive from this class, then redefine this function.
"""
if self.goalReached:
return 1
return -0.01
class MoveObjectToTargetTask(gym.Wrapper):
"""
Add environment a goal that an object must be moved to the target position by all means.
"""
def __init__(self, env, objectName: str, targetPosition: np.ndarray, distanceThreshold: float=0.3):
"""
env: the environment to be wrapped.
objectName: the name of the object. This object must exist in the environment, or
it will raise an error.
targetPosition: an np.array with 3 elements representing (x,y,z) of the target position.
distanceThreshold: the euclidean distance between the object and the target must be less than this
value to be considered as goal. 0.3 by default.
"""
if not isinstance(objectName, str):
raise Exception('objectName must be a string.')
if not isinstance(targetPosition, np.ndarray) and len(targetPosition) != 3:
raise Exception('targetPosition must be a np.array with 3 elements \
representing (x,y,z) of destination.')
if not isinstance(distanceThreshold, float) and distanceThreshold <= 0:
raise Exception('targetTolerance must be a positive float.')
super().__init__(env)
self.env = env
self.objectName = objectName
self.targetPosition = np.copy(targetPosition)
self.distanceThreshold = distanceThreshold
self.goalReached = False
objectDict = self.client.get_state_msg().state_dict
if self.objectName + '_x' not in objectDict:
raise Exception("ObjectName {} does not exist in the environment.".format(self.objectName))
def step(self, action):
"""
Perform distance check for each update. If the object and target is close enough, set self.goalReached to True.
"""
observation, reward, done, info = self.env.step(action)
objectPosition = np.array([observation[self.objectName + '_x'],
observation[self.objectName + '_y'],
observation[self.objectName + '_z']])
if not self.goalReached:
if np.linalg.norm(objectPosition - self.targetPosition) <= self.distanceThreshold:
self.goalReached = True
reward = self.reward(observation, self.goalReached, info)
return observation, reward, self.goalReached, info
def reward(self, observation, done, info):
"""
By default, reward = +1 if target is at the targetPosition, -0.01 else.
You can set your own reward by first derive from this class, then redefine this function.
"""
if self.goalReached:
return 1
return -0.01
| [
"numpy.copy",
"numpy.array",
"numpy.linalg.norm"
] | [((1296, 1314), 'numpy.copy', 'np.copy', (['wayPoints'], {}), '(wayPoints)\n', (1303, 1314), True, 'import numpy as np\n'), ((2536, 2677), 'numpy.array', 'np.array', (["[observation[self.endEffectorName + '_x'], observation[self.endEffectorName +\n '_y'], observation[self.endEffectorName + '_z']]"], {}), "([observation[self.endEffectorName + '_x'], observation[self.\n endEffectorName + '_y'], observation[self.endEffectorName + '_z']])\n", (2544, 2677), True, 'import numpy as np\n'), ((5033, 5056), 'numpy.copy', 'np.copy', (['targetPosition'], {}), '(targetPosition)\n', (5040, 5056), True, 'import numpy as np\n'), ((5620, 5745), 'numpy.array', 'np.array', (["[observation[self.objectName + '_x'], observation[self.objectName + '_y'],\n observation[self.objectName + '_z']]"], {}), "([observation[self.objectName + '_x'], observation[self.objectName +\n '_y'], observation[self.objectName + '_z']])\n", (5628, 5745), True, 'import numpy as np\n'), ((2843, 2895), 'numpy.linalg.norm', 'np.linalg.norm', (['(effectorPosition - self.wayPoints[i])'], {}), '(effectorPosition - self.wayPoints[i])\n', (2857, 2895), True, 'import numpy as np\n'), ((5860, 5912), 'numpy.linalg.norm', 'np.linalg.norm', (['(objectPosition - self.targetPosition)'], {}), '(objectPosition - self.targetPosition)\n', (5874, 5912), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import os
from keras import backend
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from keras.layers.merge import concatenate
from keras.models import Sequential, Model
from keras.layers import Dense, Embedding, Activation, merge, Input, Lambda, Reshape
from keras.layers import Convolution1D, Flatten, Dropout, MaxPool1D, GlobalAveragePooling1D
from keras.layers import LSTM, GRU, TimeDistributed, Bidirectional
from keras.layers import BatchNormalization
from keras.utils.np_utils import to_categorical
from keras import initializers
from keras import backend as K
from keras import constraints
from keras import regularizers
from keras.engine.topology import Layer
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import SGDClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import cross_val_score
train = pd.read_csv('data/train_first.csv')
test = pd.read_csv('data/predict_first.csv')
title = train['Discuss']
label = train['Discuss']
X_train, X_test, y_train, y_test = train_test_split(title, label, test_size=0.1, random_state=42)
'''
# MultinomialNB Classifier
vect = TfidfVectorizer(stop_words='english',
token_pattern=r'\b\w{2,}\b',
min_df=1, max_df=0.1,
ngram_range=(1, 2))
mnb = MultinomialNB(alpha=2)
svm = SGDClassifier(loss='hinge', penalty='l2', alpha=1e-3, max_iter=5, random_state=42)
mnb_pipeline = make_pipeline(vect, mnb)
svm_pipeline = make_pipeline(vect, svm)
mnb_cv = cross_val_score(mnb_pipeline, title, label, scoring='accuracy', cv=10, n_jobs=1)
svm_cv = cross_val_score(svm_pipeline, title, label, scoring='accuracy', cv=10, n_jobs=1)
print('\nMultinomialNB Classifier\'s Accuracy: %0.5f\n' % mnb_cv.mean())
# 0.28284
print('\nSVM Classifier\'s Accuracy: %0.5f\n' % svm_cv.mean())
# 0.27684
'''
'''
y_labels = list(y_train.value_counts().index)
le = preprocessing.LabelEncoder()
le.fit(y_labels)
num_labels = len(y_labels)
y_train = to_categorical(y_train.map(lambda x: le.transform([x])[0]), num_labels)
y_test = to_categorical(y_test.map(lambda x: le.transform([x])[0]), num_labels)
'''
y_train = y_train.values - 1
y_test = y_test.values - 1
y1 = np.zeros(shape=(y_train.shape[0], 5))
y2 = np.zeros(shape=(y_test.shape[0], 5))
y1[np.arange(0, y1.shape[0]), y_train] = 1
y2[np.arange(0, y2.shape[0]), y_test] = 1
# load glove word embedding data
GLOVE_DIR = "glove/"
embeddings_index = {}
f = open(os.path.join(GLOVE_DIR, 'yun.txt'), encoding='utf-8')
for num, line in enumerate(f):
if num == 0:
continue
print(line)
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print(embeddings_index.keys)
# take tokens and build word-id dictionary
tokenizer = Tokenizer(filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n', lower=True, split=" ")
tokenizer.fit_on_texts(title)
vocab = tokenizer.word_index
# Match the word vector for each word in the data set from Glove
embedding_matrix = np.zeros((len(vocab) + 1, 300))
for word, i in vocab.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
# Match the input format of the model
x_train_word_ids = tokenizer.texts_to_sequences(X_train)
x_test_word_ids = tokenizer.texts_to_sequences(X_test)
x_train_padded_seqs = pad_sequences(x_train_word_ids, maxlen=20)
x_test_padded_seqs = pad_sequences(x_test_word_ids, maxlen=20)
# one-hot mlp
x_train = tokenizer.sequences_to_matrix(x_train_word_ids, mode='binary')
x_test = tokenizer.sequences_to_matrix(x_test_word_ids, mode='binary')
model = Sequential()
model.add(Dense(512, input_shape=(len(vocab) + 1,), activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_labels, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=32,
epochs=15,
validation_data=(x_test, y_test))
model.save('model/mlp.model')
# 0.4557
# RNN model
model = Sequential()
model.add(Embedding(len(vocab) + 1, 256, input_length=20))
model.add(LSTM(256, dropout=0.2, recurrent_dropout=0.1, return_sequences=True))
model.add(LSTM(256, dropout=0.2, recurrent_dropout=0.1))
model.add(Dense(num_labels, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(x_train_padded_seqs, y_train,
batch_size=32,
epochs=12,
validation_data=(x_test_padded_seqs, y_test))
model.save('model/RNN.model')
# Average training time between 280-300 seconds
# GRU-15-256-256-0.1-0.1 0.4262
# GRU-18-256-256-0.15- 0.4364
# GRU-18-256-256-0.2- 0.4305
# GRU-18-256-256-0.2-0.1 0.4359(*best)
# GRU-18-256-256-0.15-0.1 0.4283
# GRU-20-256-256-0.1-0.1 0.4224
# GRU-20-256-256-0.2-0.2 0.4214
# GRU-18-256-256-0.2-0.2 0.4278
# LSTM-18-256-256-0.2-0.2 0.43
# LSTM-18-256-256-0.3-0.2 0.4198
# LSTM-18-256-256-0.2-0.1 0.4235
# epoch 15 0.4439
# BI 0.4482
score, acc = model.evaluate(x_test_padded_seqs, y_test,
batch_size=32)
# CNN model
model = Sequential()
model.add(Embedding(len(vocab) + 1, 256, input_length=20))
# Convolutional model (3x conv, flatten, 2x dense)
model.add(Convolution1D(256, 3, padding='same'))
model.add(MaxPool1D(3, 3, padding='same'))
model.add(Convolution1D(128, 3, padding='same'))
model.add(MaxPool1D(3, 3, padding='same'))
model.add(Convolution1D(64, 3, padding='same'))
model.add(Flatten())
model.add(Dropout(0.1))
model.add(BatchNormalization())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(num_labels, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(x_train_padded_seqs, y_train,
batch_size=32,
epochs=12,
validation_data=(x_test_padded_seqs, y_test))
model.save('model/CNN.model')
# Average training time between 40-55 seconds
# 32-15-conv*3(128-64-32)-3-0.2 0.4069(same)
# 32-20-conv*3(128-64-32)-3-0.2 0.4133(same)
# 32-20-conv*3(128-64-32)-3-0.1 0.4165(same)
# 32-20-conv*3(128-64-32)-3-0.15 0.4149(same)
# 64-20-conv*3(128-64-32)-3-0.1 0.4042(same)
# 32-20-conv*3(256-128-64)-3-0.1 0.4219(same)
# 32-20-conv*3(256-128-64)-4-0.1 0.4144(same)
# 32-20-conv*3(256-128-64)-3-0.1 0.4069(valid)
# 32-20-conv*3(256-128-64)-3-0.1 0.4144(same)
# add max-pooling 0.4257(same)(*best)
# epoch 15 0.4359(*best)
# CNN+GRU
model = Sequential()
model.add(Embedding(len(vocab) + 1, 256, input_length=20))
model.add(Convolution1D(256, 3, padding='same', strides=1))
model.add(Activation('relu'))
model.add(MaxPool1D(pool_size=2))
model.add(GRU(256, dropout=0.2, recurrent_dropout=0.1, return_sequences=True))
model.add(GRU(256, dropout=0.2, recurrent_dropout=0.1))
model.add(Dense(num_labels, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(x_train_padded_seqs, y_train,
batch_size=32,
epochs=12,
validation_data=(x_test_padded_seqs, y_test))
model.save('model/CNN_GRU.model')
# 128
# GRU+CONV 0.4412
# GRU*2+CONV*2 0.4359
# GRU*2+CONV*1 0.4337
# GRU*1+CONV*2 0.4305
# 256
# GRU*1+CONV*1 0.4418
# GRU*1+CONV*2 0.4289
# GRU*2+CONV*1 0.4423
# TextCNN
main_input = Input(shape=(20,), dtype='float64')
embedder = Embedding(len(vocab) + 1, 300, input_length=20)
embed = embedder(main_input)
cnn1 = Convolution1D(256, 3, padding='same', strides=1, activation='relu')(embed)
cnn1 = MaxPool1D(pool_size=4)(cnn1)
cnn2 = Convolution1D(256, 4, padding='same', strides=1, activation='relu')(embed)
cnn2 = MaxPool1D(pool_size=4)(cnn2)
cnn3 = Convolution1D(256, 5, padding='same', strides=1, activation='relu')(embed)
cnn3 = MaxPool1D(pool_size=4)(cnn3)
cnn = concatenate([cnn1, cnn2, cnn3], axis=-1)
flat = Flatten()(cnn)
drop = Dropout(0.2)(flat)
main_output = Dense(num_labels, activation='softmax')(drop)
model = Model(inputs=main_input, outputs=main_output)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(x_train_padded_seqs, y_train,
batch_size=32,
epochs=12,
validation_data=(x_test_padded_seqs, y_test))
model.save('model/TextCNN.model')
# conv*3(3,4,5) 0.4546
# with embedding 100d 0.4326
# with embedding 200d 0.4283
# with embedding 200d 0.4332
# TextCNN with GRU
main_input = Input(shape=(20,), dtype='float64')
embed = Embedding(len(vocab) + 1, 256, input_length=20)(main_input)
cnn1 = Convolution1D(256, 3, padding='same', strides=1, activation='relu')(embed)
cnn1 = MaxPool1D(pool_size=4)(cnn1)
cnn2 = Convolution1D(256, 4, padding='same', strides=1, activation='relu')(embed)
cnn2 = MaxPool1D(pool_size=4)(cnn2)
cnn3 = Convolution1D(256, 5, padding='same', strides=1, activation='relu')(embed)
cnn3 = MaxPool1D(pool_size=4)(cnn3)
cnn = concatenate([cnn1, cnn2, cnn3], axis=-1)
gru = Bidirectional(GRU(256, dropout=0.2, recurrent_dropout=0.1))(cnn)
main_output = Dense(num_labels, activation='softmax')(gru)
model = Model(inputs=main_input, outputs=main_output)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(x_train_padded_seqs, y_train,
batch_size=32,
epochs=12,
validation_data=(x_test_padded_seqs, y_test))
# BI-lstm
# 0.4471
# CNN+LSTM concat
main_input = Input(shape=(20,), dtype='float64')
embed = Embedding(len(vocab) + 1, 256, input_length=20)(main_input)
cnn = Convolution1D(256, 3, padding='same', strides=1, activation='relu')(embed)
cnn = MaxPool1D(pool_size=4)(cnn)
cnn = Flatten()(cnn)
cnn = Dense(256)(cnn)
rnn = Bidirectional(GRU(256, dropout=0.2, recurrent_dropout=0.1))(embed)
rnn = Dense(256)(rnn)
con = concatenate([cnn, rnn], axis=-1)
main_output = Dense(num_labels, activation='softmax')(con)
model = Model(inputs=main_input, outputs=main_output)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(x_train_padded_seqs, y_train,
batch_size=32,
epochs=12,
validation_data=(x_test_padded_seqs, y_test))
model.save('model/CNN_LSTM.model')
# 0.4434
# C-LSTM
main_input = Input(shape=(20,), dtype='float64')
embed = Embedding(len(vocab) + 1, 256, input_length=20)(main_input)
cnn = Convolution1D(256, 3, padding='same', strides=1, activation='relu')(embed)
new = Reshape(target_shape=(cnn.shape[2].value, cnn.shape[1].value))(cnn)
# CNN-char
# Reprocess the input
# get vocab
all_sent = []
for sent in title.tolist():
new = []
for word in sent:
for char in word:
new.append(word)
new_sent = " ".join(new)
all_sent.append(new_sent)
tokenizer = Tokenizer(filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n', lower=True, split=" ")
tokenizer.fit_on_texts(all_sent)
vocab = tokenizer.word_index
X_train, X_test, y_train, y_test = train_test_split(all_sent, label, test_size=0.1, random_state=42)
X_train_word_ids = tokenizer.texts_to_sequences(X_train)
X_test_word_ids = tokenizer.texts_to_sequences(X_test)
X_train_padded_seqs = pad_sequences(X_train_word_ids, maxlen=30)
X_test_padded_seqs = pad_sequences(X_test_word_ids, maxlen=30)
# 0.4063
# DCNN
def KMaxPooling1D():
pass
def Folding():
pass
model = Sequential()
model.add(Embedding(len(vocab) + 1, 256, input_length=20))
model.add(Convolution1D())
model.add(Folding())
model.add(KMaxPooling1D())
model.add(Activation('tanh'))
model.add()
# GRU with Attention
# Aspect-level attention
# Hierarchical Model with Attention
class AttLayer(Layer):
def __init__(self, init='glorot_uniform', kernel_regularizer=None,
bias_regularizer=None, kernel_constraint=None,
bias_constraint=None, **kwargs):
self.supports_masking = True
self.init = initializers.get(init)
self.kernel_initializer = initializers.get(init)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(kernel_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
super(AttLayer, self).__init__(**kwargs)
def build(self, input_shape):
assert len(input_shape) == 3
self.W = self.add_weight((input_shape[-1], 1),
initializer=self.kernel_initializer,
name='{}_W'.format(self.name),
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.b = self.add_weight((input_shape[1],),
initializer='zero',
name='{}_b'.format(self.name),
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
self.u = self.add_weight((input_shape[1],),
initializer=self.kernel_initializer,
name='{}_u'.format(self.name),
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.built = True
def compute_mask(self, input, input_mask=None):
return None
def call(self, x, mask=None):
uit = K.dot(x, self.W) # (x, 40, 1)
uit = K.squeeze(uit, -1) # (x, 40)
uit = uit + self.b # (x, 40) + (40,)
uit = K.tanh(uit) # (x, 40)
ait = uit * self.u # (x, 40) * (40, 1) => (x, 1)
ait = K.exp(ait) # (X, 1)
if mask is not None:
mask = K.cast(mask, K.floatx()) # (x, 40)
ait = mask * ait # (x, 40) * (x, 40, )
ait /= K.cast(K.sum(ait, axis=1, keepdims=True) + K.epsilon(), K.floatx())
ait = K.expand_dims(ait)
weighted_input = x * ait
output = K.sum(weighted_input, axis=1)
return output
def compute_output_shape(self, input_shape):
return (input_shape[0], input_shape[-1])
inputs = Input(shape=(20,), dtype='float64')
embed = Embedding(len(vocab) + 1, 300, input_length=20)(inputs)
gru = Bidirectional(GRU(100, dropout=0.2, recurrent_dropout=0.1, return_sequences=True))(embed)
attention = AttLayer()(gru)
output = Dense(num_labels, activation='softmax')(attention)
model = Model(inputs, output)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(x_train_padded_seqs, y_train,
batch_size=32,
epochs=12,
validation_data=(x_test_padded_seqs, y_test))
# 0.4487
# fastetxt model
# Generates the n-gram combination vocabulary for the input text
n_value = 2
def create_ngram_set(input_list, ngram_value=n_value):
return set(zip(*[input_list[i:] for i in range(ngram_value)]))
# Add the new n-gram generated words into the original sentence sequence
def add_ngram(sequences, token_indice, ngram_range=n_value):
new_sequences = []
for input_list in sequences:
new_list = input_list[:]
for i in range(len(new_list) - ngram_range + 1):
for ngram_value in range(2, ngram_range + 1):
ngram = tuple(new_list[i:i + ngram_value])
if ngram in token_indice:
new_list.append(token_indice[ngram])
new_sequences.append(new_list)
return new_sequences
ngram_set = set()
for input_list in x_train_padded_seqs:
for i in range(2, n_value + 1):
set_of_ngram = create_ngram_set(input_list, ngram_value=i)
ngram_set.update(set_of_ngram)
start_index = len(vocab) + 2
token_indice = {v: k + start_index for k, v in enumerate(ngram_set)} # 给bigram词汇编码
indice_token = {token_indice[k]: k for k in token_indice}
max_features = np.max(list(indice_token.keys())) + 1
x_train = add_ngram(x_train_word_ids, token_indice, 3)
x_test = add_ngram(x_test_word_ids, token_indice, 3)
x_train = pad_sequences(x_train, maxlen=64)
x_test = pad_sequences(x_test, maxlen=64)
model = Sequential()
model.add(Embedding(max_features, 256, input_length=64))
model.add(GlobalAveragePooling1D())
model.add(Dense(num_labels, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=32,
epochs=5,
validation_data=(x_test, y_test))
# 2-gram 0.4648
# 3-gram 0.4546
# TextRCNN
# RCNN for paper http://www.aaai.org/ocs/index.php/AAAI/AAAI15/paper/view/9745
left_train_word_ids = [[len(vocab)] + x[:-1] for x in X_train_word_ids]
left_test_word_ids = [[len(vocab)] + x[:-1] for x in X_test_word_ids]
right_train_word_ids = [x[1:] + [len(vocab)] for x in X_train_word_ids]
right_test_word_ids = [x[1:] + [len(vocab)] for x in X_test_word_ids]
left_train_padded_seqs = pad_sequences(left_train_word_ids, maxlen=20)
left_test_padded_seqs = pad_sequences(left_test_word_ids, maxlen=20)
right_train_padded_seqs = pad_sequences(right_train_word_ids, maxlen=20)
right_test_padded_seqs = pad_sequences(right_test_word_ids, maxlen=20)
document = Input(shape=(None,), dtype="int32")
left_context = Input(shape=(None,), dtype="int32")
right_context = Input(shape=(None,), dtype="int32")
embedder = Embedding(len(vocab) + 1, 300, input_length=20)
doc_embedding = embedder(document)
l_embedding = embedder(left_context)
r_embedding = embedder(right_context)
forward = LSTM(256, return_sequences=True)(l_embedding) # See equation (1)
backward = LSTM(256, return_sequences=True, go_backwards=True)(r_embedding) # See equation (2)
together = concatenate([forward, doc_embedding, backward], axis=2) # See equation (3)
semantic = TimeDistributed(Dense(128, activation="tanh"))(together) # See equation (4)
pool_rnn = Lambda(lambda x: backend.max(x, axis=1), output_shape=(128,))(semantic) # See equation (5)
output = Dense(10, activation="softmax")(pool_rnn) # See equations (6) and (7)
model = Model(inputs=[document, left_context, right_context], outputs=output)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit([x_train_padded_seqs, left_train_padded_seqs, right_train_padded_seqs], y_train,
batch_size=32,
epochs=12,
validation_data=([x_test_padded_seqs, left_test_padded_seqs, right_test_padded_seqs], y_test))
# 0.4439
# 0.4240
# 0.4498
'''
# next step cv for finding better models
from sklearn.model_selection import StratifiedKFold
kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=2017)
cvscores = []
Y = np.concatenate((y_train,y_test), axis=0)
X = pd.concat([X_train_padded_seqs, X_test_padded_seqs])
for train, test in kfold.split(X, Y):
# create model
model = best
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# Fit the model
model.fit(X[train], Y[train], epochs=10, batch_size=32, verbose=0)
# evaluate the model
scores = model.evaluate(X[test], Y[test], verbose=0)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
cvscores.append(scores[1] * 100)
print("%.2f%% (+/- %.2f%%)" % (np.mean(cvscores), np.std(cvscores)))
''' | [
"keras.backend.sum",
"pandas.read_csv",
"keras.layers.MaxPool1D",
"keras.backend.floatx",
"keras.backend.squeeze",
"keras.backend.dot",
"keras.layers.Activation",
"keras.layers.Dense",
"keras.preprocessing.sequence.pad_sequences",
"numpy.arange",
"keras.backend.tanh",
"keras.layers.merge.conca... | [((1167, 1202), 'pandas.read_csv', 'pd.read_csv', (['"""data/train_first.csv"""'], {}), "('data/train_first.csv')\n", (1178, 1202), True, 'import pandas as pd\n'), ((1210, 1247), 'pandas.read_csv', 'pd.read_csv', (['"""data/predict_first.csv"""'], {}), "('data/predict_first.csv')\n", (1221, 1247), True, 'import pandas as pd\n'), ((1333, 1395), 'sklearn.model_selection.train_test_split', 'train_test_split', (['title', 'label'], {'test_size': '(0.1)', 'random_state': '(42)'}), '(title, label, test_size=0.1, random_state=42)\n', (1349, 1395), False, 'from sklearn.model_selection import train_test_split\n'), ((2505, 2542), 'numpy.zeros', 'np.zeros', ([], {'shape': '(y_train.shape[0], 5)'}), '(shape=(y_train.shape[0], 5))\n', (2513, 2542), True, 'import numpy as np\n'), ((2548, 2584), 'numpy.zeros', 'np.zeros', ([], {'shape': '(y_test.shape[0], 5)'}), '(shape=(y_test.shape[0], 5))\n', (2556, 2584), True, 'import numpy as np\n'), ((3120, 3205), 'keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {'filters': '"""!"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~\t\n"""', 'lower': '(True)', 'split': '""" """'}), '(filters=\'!"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~\\t\\n\', lower=True, split=\' \'\n )\n', (3129, 3205), False, 'from keras.preprocessing.text import Tokenizer\n'), ((3714, 3756), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['x_train_word_ids'], {'maxlen': '(20)'}), '(x_train_word_ids, maxlen=20)\n', (3727, 3756), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((3778, 3819), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['x_test_word_ids'], {'maxlen': '(20)'}), '(x_test_word_ids, maxlen=20)\n', (3791, 3819), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((3988, 4000), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3998, 4000), False, 'from keras.models import Sequential, Model\n'), ((4443, 4455), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (4453, 4455), False, 'from keras.models import Sequential, Model\n'), ((5633, 5645), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (5643, 5645), False, 'from keras.models import Sequential, Model\n'), ((7073, 7085), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (7083, 7085), False, 'from keras.models import Sequential, Model\n'), ((8001, 8036), 'keras.layers.Input', 'Input', ([], {'shape': '(20,)', 'dtype': '"""float64"""'}), "(shape=(20,), dtype='float64')\n", (8006, 8036), False, 'from keras.layers import Dense, Embedding, Activation, merge, Input, Lambda, Reshape\n'), ((8485, 8525), 'keras.layers.merge.concatenate', 'concatenate', (['[cnn1, cnn2, cnn3]'], {'axis': '(-1)'}), '([cnn1, cnn2, cnn3], axis=-1)\n', (8496, 8525), False, 'from keras.layers.merge import concatenate\n'), ((8642, 8687), 'keras.models.Model', 'Model', ([], {'inputs': 'main_input', 'outputs': 'main_output'}), '(inputs=main_input, outputs=main_output)\n', (8647, 8687), False, 'from keras.models import Sequential, Model\n'), ((9150, 9185), 'keras.layers.Input', 'Input', ([], {'shape': '(20,)', 'dtype': '"""float64"""'}), "(shape=(20,), dtype='float64')\n", (9155, 9185), False, 'from keras.layers import Dense, Embedding, Activation, merge, Input, Lambda, Reshape\n'), ((9614, 9654), 'keras.layers.merge.concatenate', 'concatenate', (['[cnn1, cnn2, cnn3]'], {'axis': '(-1)'}), '([cnn1, cnn2, cnn3], axis=-1)\n', (9625, 9654), False, 'from keras.layers.merge import concatenate\n'), ((9793, 9838), 'keras.models.Model', 'Model', ([], {'inputs': 'main_input', 'outputs': 'main_output'}), '(inputs=main_input, outputs=main_output)\n', (9798, 9838), False, 'from keras.models import Sequential, Model\n'), ((10150, 10185), 'keras.layers.Input', 'Input', ([], {'shape': '(20,)', 'dtype': '"""float64"""'}), "(shape=(20,), dtype='float64')\n", (10155, 10185), False, 'from keras.layers import Dense, Embedding, Activation, merge, Input, Lambda, Reshape\n'), ((10513, 10545), 'keras.layers.merge.concatenate', 'concatenate', (['[cnn, rnn]'], {'axis': '(-1)'}), '([cnn, rnn], axis=-1)\n', (10524, 10545), False, 'from keras.layers.merge import concatenate\n'), ((10613, 10658), 'keras.models.Model', 'Model', ([], {'inputs': 'main_input', 'outputs': 'main_output'}), '(inputs=main_input, outputs=main_output)\n', (10618, 10658), False, 'from keras.models import Sequential, Model\n'), ((10983, 11018), 'keras.layers.Input', 'Input', ([], {'shape': '(20,)', 'dtype': '"""float64"""'}), "(shape=(20,), dtype='float64')\n", (10988, 11018), False, 'from keras.layers import Dense, Embedding, Activation, merge, Input, Lambda, Reshape\n'), ((11495, 11580), 'keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {'filters': '"""!"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~\t\n"""', 'lower': '(True)', 'split': '""" """'}), '(filters=\'!"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~\\t\\n\', lower=True, split=\' \'\n )\n', (11504, 11580), False, 'from keras.preprocessing.text import Tokenizer\n'), ((11673, 11738), 'sklearn.model_selection.train_test_split', 'train_test_split', (['all_sent', 'label'], {'test_size': '(0.1)', 'random_state': '(42)'}), '(all_sent, label, test_size=0.1, random_state=42)\n', (11689, 11738), False, 'from sklearn.model_selection import train_test_split\n'), ((11873, 11915), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['X_train_word_ids'], {'maxlen': '(30)'}), '(X_train_word_ids, maxlen=30)\n', (11886, 11915), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((11937, 11978), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['X_test_word_ids'], {'maxlen': '(30)'}), '(X_test_word_ids, maxlen=30)\n', (11950, 11978), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((12064, 12076), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (12074, 12076), False, 'from keras.models import Sequential, Model\n'), ((14901, 14936), 'keras.layers.Input', 'Input', ([], {'shape': '(20,)', 'dtype': '"""float64"""'}), "(shape=(20,), dtype='float64')\n", (14906, 14936), False, 'from keras.layers import Dense, Embedding, Activation, merge, Input, Lambda, Reshape\n'), ((15193, 15214), 'keras.models.Model', 'Model', (['inputs', 'output'], {}), '(inputs, output)\n', (15198, 15214), False, 'from keras.models import Sequential, Model\n'), ((16808, 16841), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['x_train'], {'maxlen': '(64)'}), '(x_train, maxlen=64)\n', (16821, 16841), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((16851, 16883), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['x_test'], {'maxlen': '(64)'}), '(x_test, maxlen=64)\n', (16864, 16883), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((16893, 16905), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (16903, 16905), False, 'from keras.models import Sequential, Model\n'), ((17716, 17761), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['left_train_word_ids'], {'maxlen': '(20)'}), '(left_train_word_ids, maxlen=20)\n', (17729, 17761), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((17786, 17830), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['left_test_word_ids'], {'maxlen': '(20)'}), '(left_test_word_ids, maxlen=20)\n', (17799, 17830), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((17857, 17903), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['right_train_word_ids'], {'maxlen': '(20)'}), '(right_train_word_ids, maxlen=20)\n', (17870, 17903), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((17929, 17974), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['right_test_word_ids'], {'maxlen': '(20)'}), '(right_test_word_ids, maxlen=20)\n', (17942, 17974), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((17987, 18022), 'keras.layers.Input', 'Input', ([], {'shape': '(None,)', 'dtype': '"""int32"""'}), "(shape=(None,), dtype='int32')\n", (17992, 18022), False, 'from keras.layers import Dense, Embedding, Activation, merge, Input, Lambda, Reshape\n'), ((18038, 18073), 'keras.layers.Input', 'Input', ([], {'shape': '(None,)', 'dtype': '"""int32"""'}), "(shape=(None,), dtype='int32')\n", (18043, 18073), False, 'from keras.layers import Dense, Embedding, Activation, merge, Input, Lambda, Reshape\n'), ((18090, 18125), 'keras.layers.Input', 'Input', ([], {'shape': '(None,)', 'dtype': '"""int32"""'}), "(shape=(None,), dtype='int32')\n", (18095, 18125), False, 'from keras.layers import Dense, Embedding, Activation, merge, Input, Lambda, Reshape\n'), ((18479, 18534), 'keras.layers.merge.concatenate', 'concatenate', (['[forward, doc_embedding, backward]'], {'axis': '(2)'}), '([forward, doc_embedding, backward], axis=2)\n', (18490, 18534), False, 'from keras.layers.merge import concatenate\n'), ((18834, 18903), 'keras.models.Model', 'Model', ([], {'inputs': '[document, left_context, right_context]', 'outputs': 'output'}), '(inputs=[document, left_context, right_context], outputs=output)\n', (18839, 18903), False, 'from keras.models import Sequential, Model\n'), ((2756, 2790), 'os.path.join', 'os.path.join', (['GLOVE_DIR', '"""yun.txt"""'], {}), "(GLOVE_DIR, 'yun.txt')\n", (2768, 2790), False, 'import os\n'), ((2950, 2989), 'numpy.asarray', 'np.asarray', (['values[1:]'], {'dtype': '"""float32"""'}), "(values[1:], dtype='float32')\n", (2960, 2989), True, 'import numpy as np\n'), ((4083, 4095), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (4090, 4095), False, 'from keras.layers import Convolution1D, Flatten, Dropout, MaxPool1D, GlobalAveragePooling1D\n'), ((4107, 4146), 'keras.layers.Dense', 'Dense', (['num_labels'], {'activation': '"""softmax"""'}), "(num_labels, activation='softmax')\n", (4112, 4146), False, 'from keras.layers import Dense, Embedding, Activation, merge, Input, Lambda, Reshape\n'), ((4525, 4593), 'keras.layers.LSTM', 'LSTM', (['(256)'], {'dropout': '(0.2)', 'recurrent_dropout': '(0.1)', 'return_sequences': '(True)'}), '(256, dropout=0.2, recurrent_dropout=0.1, return_sequences=True)\n', (4529, 4593), False, 'from keras.layers import LSTM, GRU, TimeDistributed, Bidirectional\n'), ((4605, 4650), 'keras.layers.LSTM', 'LSTM', (['(256)'], {'dropout': '(0.2)', 'recurrent_dropout': '(0.1)'}), '(256, dropout=0.2, recurrent_dropout=0.1)\n', (4609, 4650), False, 'from keras.layers import LSTM, GRU, TimeDistributed, Bidirectional\n'), ((4662, 4701), 'keras.layers.Dense', 'Dense', (['num_labels'], {'activation': '"""softmax"""'}), "(num_labels, activation='softmax')\n", (4667, 4701), False, 'from keras.layers import Dense, Embedding, Activation, merge, Input, Lambda, Reshape\n'), ((5767, 5804), 'keras.layers.Convolution1D', 'Convolution1D', (['(256)', '(3)'], {'padding': '"""same"""'}), "(256, 3, padding='same')\n", (5780, 5804), False, 'from keras.layers import Convolution1D, Flatten, Dropout, MaxPool1D, GlobalAveragePooling1D\n'), ((5816, 5847), 'keras.layers.MaxPool1D', 'MaxPool1D', (['(3)', '(3)'], {'padding': '"""same"""'}), "(3, 3, padding='same')\n", (5825, 5847), False, 'from keras.layers import Convolution1D, Flatten, Dropout, MaxPool1D, GlobalAveragePooling1D\n'), ((5859, 5896), 'keras.layers.Convolution1D', 'Convolution1D', (['(128)', '(3)'], {'padding': '"""same"""'}), "(128, 3, padding='same')\n", (5872, 5896), False, 'from keras.layers import Convolution1D, Flatten, Dropout, MaxPool1D, GlobalAveragePooling1D\n'), ((5908, 5939), 'keras.layers.MaxPool1D', 'MaxPool1D', (['(3)', '(3)'], {'padding': '"""same"""'}), "(3, 3, padding='same')\n", (5917, 5939), False, 'from keras.layers import Convolution1D, Flatten, Dropout, MaxPool1D, GlobalAveragePooling1D\n'), ((5951, 5987), 'keras.layers.Convolution1D', 'Convolution1D', (['(64)', '(3)'], {'padding': '"""same"""'}), "(64, 3, padding='same')\n", (5964, 5987), False, 'from keras.layers import Convolution1D, Flatten, Dropout, MaxPool1D, GlobalAveragePooling1D\n'), ((5999, 6008), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (6006, 6008), False, 'from keras.layers import Convolution1D, Flatten, Dropout, MaxPool1D, GlobalAveragePooling1D\n'), ((6020, 6032), 'keras.layers.Dropout', 'Dropout', (['(0.1)'], {}), '(0.1)\n', (6027, 6032), False, 'from keras.layers import Convolution1D, Flatten, Dropout, MaxPool1D, GlobalAveragePooling1D\n'), ((6044, 6064), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (6062, 6064), False, 'from keras.layers import BatchNormalization\n'), ((6076, 6105), 'keras.layers.Dense', 'Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (6081, 6105), False, 'from keras.layers import Dense, Embedding, Activation, merge, Input, Lambda, Reshape\n'), ((6117, 6129), 'keras.layers.Dropout', 'Dropout', (['(0.1)'], {}), '(0.1)\n', (6124, 6129), False, 'from keras.layers import Convolution1D, Flatten, Dropout, MaxPool1D, GlobalAveragePooling1D\n'), ((6141, 6180), 'keras.layers.Dense', 'Dense', (['num_labels'], {'activation': '"""softmax"""'}), "(num_labels, activation='softmax')\n", (6146, 6180), False, 'from keras.layers import Dense, Embedding, Activation, merge, Input, Lambda, Reshape\n'), ((7155, 7203), 'keras.layers.Convolution1D', 'Convolution1D', (['(256)', '(3)'], {'padding': '"""same"""', 'strides': '(1)'}), "(256, 3, padding='same', strides=1)\n", (7168, 7203), False, 'from keras.layers import Convolution1D, Flatten, Dropout, MaxPool1D, GlobalAveragePooling1D\n'), ((7215, 7233), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (7225, 7233), False, 'from keras.layers import Dense, Embedding, Activation, merge, Input, Lambda, Reshape\n'), ((7245, 7267), 'keras.layers.MaxPool1D', 'MaxPool1D', ([], {'pool_size': '(2)'}), '(pool_size=2)\n', (7254, 7267), False, 'from keras.layers import Convolution1D, Flatten, Dropout, MaxPool1D, GlobalAveragePooling1D\n'), ((7279, 7346), 'keras.layers.GRU', 'GRU', (['(256)'], {'dropout': '(0.2)', 'recurrent_dropout': '(0.1)', 'return_sequences': '(True)'}), '(256, dropout=0.2, recurrent_dropout=0.1, return_sequences=True)\n', (7282, 7346), False, 'from keras.layers import LSTM, GRU, TimeDistributed, Bidirectional\n'), ((7358, 7402), 'keras.layers.GRU', 'GRU', (['(256)'], {'dropout': '(0.2)', 'recurrent_dropout': '(0.1)'}), '(256, dropout=0.2, recurrent_dropout=0.1)\n', (7361, 7402), False, 'from keras.layers import LSTM, GRU, TimeDistributed, Bidirectional\n'), ((7414, 7453), 'keras.layers.Dense', 'Dense', (['num_labels'], {'activation': '"""softmax"""'}), "(num_labels, activation='softmax')\n", (7419, 7453), False, 'from keras.layers import Dense, Embedding, Activation, merge, Input, Lambda, Reshape\n'), ((8132, 8199), 'keras.layers.Convolution1D', 'Convolution1D', (['(256)', '(3)'], {'padding': '"""same"""', 'strides': '(1)', 'activation': '"""relu"""'}), "(256, 3, padding='same', strides=1, activation='relu')\n", (8145, 8199), False, 'from keras.layers import Convolution1D, Flatten, Dropout, MaxPool1D, GlobalAveragePooling1D\n'), ((8214, 8236), 'keras.layers.MaxPool1D', 'MaxPool1D', ([], {'pool_size': '(4)'}), '(pool_size=4)\n', (8223, 8236), False, 'from keras.layers import Convolution1D, Flatten, Dropout, MaxPool1D, GlobalAveragePooling1D\n'), ((8250, 8317), 'keras.layers.Convolution1D', 'Convolution1D', (['(256)', '(4)'], {'padding': '"""same"""', 'strides': '(1)', 'activation': '"""relu"""'}), "(256, 4, padding='same', strides=1, activation='relu')\n", (8263, 8317), False, 'from keras.layers import Convolution1D, Flatten, Dropout, MaxPool1D, GlobalAveragePooling1D\n'), ((8332, 8354), 'keras.layers.MaxPool1D', 'MaxPool1D', ([], {'pool_size': '(4)'}), '(pool_size=4)\n', (8341, 8354), False, 'from keras.layers import Convolution1D, Flatten, Dropout, MaxPool1D, GlobalAveragePooling1D\n'), ((8368, 8435), 'keras.layers.Convolution1D', 'Convolution1D', (['(256)', '(5)'], {'padding': '"""same"""', 'strides': '(1)', 'activation': '"""relu"""'}), "(256, 5, padding='same', strides=1, activation='relu')\n", (8381, 8435), False, 'from keras.layers import Convolution1D, Flatten, Dropout, MaxPool1D, GlobalAveragePooling1D\n'), ((8450, 8472), 'keras.layers.MaxPool1D', 'MaxPool1D', ([], {'pool_size': '(4)'}), '(pool_size=4)\n', (8459, 8472), False, 'from keras.layers import Convolution1D, Flatten, Dropout, MaxPool1D, GlobalAveragePooling1D\n'), ((8533, 8542), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (8540, 8542), False, 'from keras.layers import Convolution1D, Flatten, Dropout, MaxPool1D, GlobalAveragePooling1D\n'), ((8555, 8567), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (8562, 8567), False, 'from keras.layers import Convolution1D, Flatten, Dropout, MaxPool1D, GlobalAveragePooling1D\n'), ((8588, 8627), 'keras.layers.Dense', 'Dense', (['num_labels'], {'activation': '"""softmax"""'}), "(num_labels, activation='softmax')\n", (8593, 8627), False, 'from keras.layers import Dense, Embedding, Activation, merge, Input, Lambda, Reshape\n'), ((9261, 9328), 'keras.layers.Convolution1D', 'Convolution1D', (['(256)', '(3)'], {'padding': '"""same"""', 'strides': '(1)', 'activation': '"""relu"""'}), "(256, 3, padding='same', strides=1, activation='relu')\n", (9274, 9328), False, 'from keras.layers import Convolution1D, Flatten, Dropout, MaxPool1D, GlobalAveragePooling1D\n'), ((9343, 9365), 'keras.layers.MaxPool1D', 'MaxPool1D', ([], {'pool_size': '(4)'}), '(pool_size=4)\n', (9352, 9365), False, 'from keras.layers import Convolution1D, Flatten, Dropout, MaxPool1D, GlobalAveragePooling1D\n'), ((9379, 9446), 'keras.layers.Convolution1D', 'Convolution1D', (['(256)', '(4)'], {'padding': '"""same"""', 'strides': '(1)', 'activation': '"""relu"""'}), "(256, 4, padding='same', strides=1, activation='relu')\n", (9392, 9446), False, 'from keras.layers import Convolution1D, Flatten, Dropout, MaxPool1D, GlobalAveragePooling1D\n'), ((9461, 9483), 'keras.layers.MaxPool1D', 'MaxPool1D', ([], {'pool_size': '(4)'}), '(pool_size=4)\n', (9470, 9483), False, 'from keras.layers import Convolution1D, Flatten, Dropout, MaxPool1D, GlobalAveragePooling1D\n'), ((9497, 9564), 'keras.layers.Convolution1D', 'Convolution1D', (['(256)', '(5)'], {'padding': '"""same"""', 'strides': '(1)', 'activation': '"""relu"""'}), "(256, 5, padding='same', strides=1, activation='relu')\n", (9510, 9564), False, 'from keras.layers import Convolution1D, Flatten, Dropout, MaxPool1D, GlobalAveragePooling1D\n'), ((9579, 9601), 'keras.layers.MaxPool1D', 'MaxPool1D', ([], {'pool_size': '(4)'}), '(pool_size=4)\n', (9588, 9601), False, 'from keras.layers import Convolution1D, Flatten, Dropout, MaxPool1D, GlobalAveragePooling1D\n'), ((9740, 9779), 'keras.layers.Dense', 'Dense', (['num_labels'], {'activation': '"""softmax"""'}), "(num_labels, activation='softmax')\n", (9745, 9779), False, 'from keras.layers import Dense, Embedding, Activation, merge, Input, Lambda, Reshape\n'), ((10260, 10327), 'keras.layers.Convolution1D', 'Convolution1D', (['(256)', '(3)'], {'padding': '"""same"""', 'strides': '(1)', 'activation': '"""relu"""'}), "(256, 3, padding='same', strides=1, activation='relu')\n", (10273, 10327), False, 'from keras.layers import Convolution1D, Flatten, Dropout, MaxPool1D, GlobalAveragePooling1D\n'), ((10341, 10363), 'keras.layers.MaxPool1D', 'MaxPool1D', ([], {'pool_size': '(4)'}), '(pool_size=4)\n', (10350, 10363), False, 'from keras.layers import Convolution1D, Flatten, Dropout, MaxPool1D, GlobalAveragePooling1D\n'), ((10375, 10384), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (10382, 10384), False, 'from keras.layers import Convolution1D, Flatten, Dropout, MaxPool1D, GlobalAveragePooling1D\n'), ((10396, 10406), 'keras.layers.Dense', 'Dense', (['(256)'], {}), '(256)\n', (10401, 10406), False, 'from keras.layers import Dense, Embedding, Activation, merge, Input, Lambda, Reshape\n'), ((10491, 10501), 'keras.layers.Dense', 'Dense', (['(256)'], {}), '(256)\n', (10496, 10501), False, 'from keras.layers import Dense, Embedding, Activation, merge, Input, Lambda, Reshape\n'), ((10560, 10599), 'keras.layers.Dense', 'Dense', (['num_labels'], {'activation': '"""softmax"""'}), "(num_labels, activation='softmax')\n", (10565, 10599), False, 'from keras.layers import Dense, Embedding, Activation, merge, Input, Lambda, Reshape\n'), ((11093, 11160), 'keras.layers.Convolution1D', 'Convolution1D', (['(256)', '(3)'], {'padding': '"""same"""', 'strides': '(1)', 'activation': '"""relu"""'}), "(256, 3, padding='same', strides=1, activation='relu')\n", (11106, 11160), False, 'from keras.layers import Convolution1D, Flatten, Dropout, MaxPool1D, GlobalAveragePooling1D\n'), ((11174, 11236), 'keras.layers.Reshape', 'Reshape', ([], {'target_shape': '(cnn.shape[2].value, cnn.shape[1].value)'}), '(target_shape=(cnn.shape[2].value, cnn.shape[1].value))\n', (11181, 11236), False, 'from keras.layers import Dense, Embedding, Activation, merge, Input, Lambda, Reshape\n'), ((12146, 12161), 'keras.layers.Convolution1D', 'Convolution1D', ([], {}), '()\n', (12159, 12161), False, 'from keras.layers import Convolution1D, Flatten, Dropout, MaxPool1D, GlobalAveragePooling1D\n'), ((12221, 12239), 'keras.layers.Activation', 'Activation', (['"""tanh"""'], {}), "('tanh')\n", (12231, 12239), False, 'from keras.layers import Dense, Embedding, Activation, merge, Input, Lambda, Reshape\n'), ((15134, 15173), 'keras.layers.Dense', 'Dense', (['num_labels'], {'activation': '"""softmax"""'}), "(num_labels, activation='softmax')\n", (15139, 15173), False, 'from keras.layers import Dense, Embedding, Activation, merge, Input, Lambda, Reshape\n'), ((16916, 16961), 'keras.layers.Embedding', 'Embedding', (['max_features', '(256)'], {'input_length': '(64)'}), '(max_features, 256, input_length=64)\n', (16925, 16961), False, 'from keras.layers import Dense, Embedding, Activation, merge, Input, Lambda, Reshape\n'), ((16973, 16997), 'keras.layers.GlobalAveragePooling1D', 'GlobalAveragePooling1D', ([], {}), '()\n', (16995, 16997), False, 'from keras.layers import Convolution1D, Flatten, Dropout, MaxPool1D, GlobalAveragePooling1D\n'), ((17009, 17048), 'keras.layers.Dense', 'Dense', (['num_labels'], {'activation': '"""softmax"""'}), "(num_labels, activation='softmax')\n", (17014, 17048), False, 'from keras.layers import Dense, Embedding, Activation, merge, Input, Lambda, Reshape\n'), ((18306, 18338), 'keras.layers.LSTM', 'LSTM', (['(256)'], {'return_sequences': '(True)'}), '(256, return_sequences=True)\n', (18310, 18338), False, 'from keras.layers import LSTM, GRU, TimeDistributed, Bidirectional\n'), ((18383, 18434), 'keras.layers.LSTM', 'LSTM', (['(256)'], {'return_sequences': '(True)', 'go_backwards': '(True)'}), '(256, return_sequences=True, go_backwards=True)\n', (18387, 18434), False, 'from keras.layers import LSTM, GRU, TimeDistributed, Bidirectional\n'), ((18755, 18786), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""softmax"""'}), "(10, activation='softmax')\n", (18760, 18786), False, 'from keras.layers import Dense, Embedding, Activation, merge, Input, Lambda, Reshape\n'), ((2588, 2613), 'numpy.arange', 'np.arange', (['(0)', 'y1.shape[0]'], {}), '(0, y1.shape[0])\n', (2597, 2613), True, 'import numpy as np\n'), ((2631, 2656), 'numpy.arange', 'np.arange', (['(0)', 'y2.shape[0]'], {}), '(0, y2.shape[0])\n', (2640, 2656), True, 'import numpy as np\n'), ((9675, 9719), 'keras.layers.GRU', 'GRU', (['(256)'], {'dropout': '(0.2)', 'recurrent_dropout': '(0.1)'}), '(256, dropout=0.2, recurrent_dropout=0.1)\n', (9678, 9719), False, 'from keras.layers import LSTM, GRU, TimeDistributed, Bidirectional\n'), ((10432, 10476), 'keras.layers.GRU', 'GRU', (['(256)'], {'dropout': '(0.2)', 'recurrent_dropout': '(0.1)'}), '(256, dropout=0.2, recurrent_dropout=0.1)\n', (10435, 10476), False, 'from keras.layers import LSTM, GRU, TimeDistributed, Bidirectional\n'), ((12604, 12626), 'keras.initializers.get', 'initializers.get', (['init'], {}), '(init)\n', (12620, 12626), False, 'from keras import initializers\n'), ((12661, 12683), 'keras.initializers.get', 'initializers.get', (['init'], {}), '(init)\n', (12677, 12683), False, 'from keras import initializers\n'), ((12719, 12755), 'keras.regularizers.get', 'regularizers.get', (['kernel_regularizer'], {}), '(kernel_regularizer)\n', (12735, 12755), False, 'from keras import regularizers\n'), ((12788, 12824), 'keras.regularizers.get', 'regularizers.get', (['kernel_regularizer'], {}), '(kernel_regularizer)\n', (12804, 12824), False, 'from keras import regularizers\n'), ((12859, 12893), 'keras.constraints.get', 'constraints.get', (['kernel_constraint'], {}), '(kernel_constraint)\n', (12874, 12893), False, 'from keras import constraints\n'), ((12925, 12957), 'keras.constraints.get', 'constraints.get', (['bias_constraint'], {}), '(bias_constraint)\n', (12940, 12957), False, 'from keras import constraints\n'), ((14183, 14199), 'keras.backend.dot', 'K.dot', (['x', 'self.W'], {}), '(x, self.W)\n', (14188, 14199), True, 'from keras import backend as K\n'), ((14228, 14246), 'keras.backend.squeeze', 'K.squeeze', (['uit', '(-1)'], {}), '(uit, -1)\n', (14237, 14246), True, 'from keras import backend as K\n'), ((14318, 14329), 'keras.backend.tanh', 'K.tanh', (['uit'], {}), '(uit)\n', (14324, 14329), True, 'from keras import backend as K\n'), ((14414, 14424), 'keras.backend.exp', 'K.exp', (['ait'], {}), '(ait)\n', (14419, 14424), True, 'from keras import backend as K\n'), ((14670, 14688), 'keras.backend.expand_dims', 'K.expand_dims', (['ait'], {}), '(ait)\n', (14683, 14688), True, 'from keras import backend as K\n'), ((14739, 14768), 'keras.backend.sum', 'K.sum', (['weighted_input'], {'axis': '(1)'}), '(weighted_input, axis=1)\n', (14744, 14768), True, 'from keras import backend as K\n'), ((15021, 15088), 'keras.layers.GRU', 'GRU', (['(100)'], {'dropout': '(0.2)', 'recurrent_dropout': '(0.1)', 'return_sequences': '(True)'}), '(100, dropout=0.2, recurrent_dropout=0.1, return_sequences=True)\n', (15024, 15088), False, 'from keras.layers import LSTM, GRU, TimeDistributed, Bidirectional\n'), ((18582, 18611), 'keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""tanh"""'}), "(128, activation='tanh')\n", (18587, 18611), False, 'from keras.layers import Dense, Embedding, Activation, merge, Input, Lambda, Reshape\n'), ((14644, 14654), 'keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (14652, 14654), True, 'from keras import backend as K\n'), ((18671, 18693), 'keras.backend.max', 'backend.max', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (18682, 18693), False, 'from keras import backend\n'), ((14497, 14507), 'keras.backend.floatx', 'K.floatx', ([], {}), '()\n', (14505, 14507), True, 'from keras import backend as K\n'), ((14595, 14628), 'keras.backend.sum', 'K.sum', (['ait'], {'axis': '(1)', 'keepdims': '(True)'}), '(ait, axis=1, keepdims=True)\n', (14600, 14628), True, 'from keras import backend as K\n'), ((14631, 14642), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (14640, 14642), True, 'from keras import backend as K\n')] |
"""
Synopsis: A binder for enabling this package using numpy arrays.
Author: <NAME> <<EMAIL>, <EMAIL>>
"""
from ctypes import cdll, POINTER, c_int, c_double, byref
import numpy as np
import ctypes
import pandas as pd
from numpy.ctypeslib import ndpointer
lib = cdll.LoadLibrary("./miniball_python.so")
def miniball(val):
"""
Computes the miniball.
input: val, a 2D numpy-array with points as rows, features as columns.
output: a dict containing:
- center: a 1D numpy-vector with the center of the miniball.
- radius: The radius.
- radius_squared. The radius squared.
"""
if isinstance(val, pd.DataFrame):
val = val.values
assert isinstance(val, np.ndarray)
if val.flags["C_CONTIGUOUS"] is False:
val = val.copy(order="C")
a = c_double(0)
b = c_double(0)
lib.miniball.argtypes = [
ndpointer(ctypes.c_double, flags="C_CONTIGUOUS"),
c_int,
c_int,
POINTER(c_double),
POINTER(ctypes.c_double),
]
rows = int(val.shape[0])
cols = int(val.shape[1])
lib.miniball.restype = POINTER(ctypes.c_double * val.shape[1])
center = lib.miniball(val, rows, cols, byref(a), byref(b))
return {
"center": np.array([i for i in center.contents]),
"radius": a.value,
"radius_squared": b.value,
}
if __name__ == "__main__":
print(
miniball(np.array([[3.0, 1.0], [3.0, 1.0], [1.0, 0.0]], dtype=np.double))[
"center"
]
)
| [
"ctypes.byref",
"ctypes.POINTER",
"ctypes.cdll.LoadLibrary",
"numpy.array",
"numpy.ctypeslib.ndpointer",
"ctypes.c_double"
] | [((272, 312), 'ctypes.cdll.LoadLibrary', 'cdll.LoadLibrary', (['"""./miniball_python.so"""'], {}), "('./miniball_python.so')\n", (288, 312), False, 'from ctypes import cdll, POINTER, c_int, c_double, byref\n'), ((842, 853), 'ctypes.c_double', 'c_double', (['(0)'], {}), '(0)\n', (850, 853), False, 'from ctypes import cdll, POINTER, c_int, c_double, byref\n'), ((862, 873), 'ctypes.c_double', 'c_double', (['(0)'], {}), '(0)\n', (870, 873), False, 'from ctypes import cdll, POINTER, c_int, c_double, byref\n'), ((1144, 1183), 'ctypes.POINTER', 'POINTER', (['(ctypes.c_double * val.shape[1])'], {}), '(ctypes.c_double * val.shape[1])\n', (1151, 1183), False, 'from ctypes import cdll, POINTER, c_int, c_double, byref\n'), ((912, 960), 'numpy.ctypeslib.ndpointer', 'ndpointer', (['ctypes.c_double'], {'flags': '"""C_CONTIGUOUS"""'}), "(ctypes.c_double, flags='C_CONTIGUOUS')\n", (921, 960), False, 'from numpy.ctypeslib import ndpointer\n'), ((1000, 1017), 'ctypes.POINTER', 'POINTER', (['c_double'], {}), '(c_double)\n', (1007, 1017), False, 'from ctypes import cdll, POINTER, c_int, c_double, byref\n'), ((1027, 1051), 'ctypes.POINTER', 'POINTER', (['ctypes.c_double'], {}), '(ctypes.c_double)\n', (1034, 1051), False, 'from ctypes import cdll, POINTER, c_int, c_double, byref\n'), ((1227, 1235), 'ctypes.byref', 'byref', (['a'], {}), '(a)\n', (1232, 1235), False, 'from ctypes import cdll, POINTER, c_int, c_double, byref\n'), ((1237, 1245), 'ctypes.byref', 'byref', (['b'], {}), '(b)\n', (1242, 1245), False, 'from ctypes import cdll, POINTER, c_int, c_double, byref\n'), ((1278, 1316), 'numpy.array', 'np.array', (['[i for i in center.contents]'], {}), '([i for i in center.contents])\n', (1286, 1316), True, 'import numpy as np\n'), ((1443, 1506), 'numpy.array', 'np.array', (['[[3.0, 1.0], [3.0, 1.0], [1.0, 0.0]]'], {'dtype': 'np.double'}), '([[3.0, 1.0], [3.0, 1.0], [1.0, 0.0]], dtype=np.double)\n', (1451, 1506), True, 'import numpy as np\n')] |
import numpy as np
import os
import csv
import argparse
import torchvision.transforms as transforms
from PIL import Image
def loading_ucf_lists():
dataset_root = "/home/ubuntu/data/ucf101"
split = 'split_1'
# data frame root
dataset_frame_root = os.path.join(dataset_root, 'rawframes')
# data list file
train_list_file = os.path.join(dataset_root, 'ucfTrainTestlist',
'ucf101_' + 'train' + '_' + split + '_rawframes' + '.txt')
test_list_file = os.path.join(dataset_root, 'ucfTrainTestlist',
'ucf101_' + 'test' + '_' + split + '_rawframes' + '.txt')
# load vid samples
samples_train = _load_list(train_list_file, dataset_frame_root)
samples_test = _load_list(test_list_file, dataset_frame_root)
return samples_train, samples_test
def loading_hmdb_lists():
dataset_root = "/home/ubuntu/data/hmdb51/"
split = 'split_1'
# data frame root
dataset_frame_root = os.path.join(dataset_root, 'rawframes')
# data list file
train_list_file = os.path.join(dataset_root, 'testTrainMulti_7030_splits',
'hmdb51_' + 'train' + '_' + split + '_rawframes' + '.txt')
test_list_file = os.path.join(dataset_root, 'testTrainMulti_7030_splits',
'hmdb51_' + 'test' + '_' + split + '_rawframes' + '.txt')
# load vid samples
samples_train = _load_list(train_list_file, dataset_frame_root)
samples_test = _load_list(test_list_file, dataset_frame_root)
return samples_train, samples_test
def _load_list(list_root, dataset_frame_root):
with open(list_root, 'r') as f:
lines = f.readlines()
vids = []
for k, l in enumerate(lines):
lsp = l.strip().split(' ')
# path, frame, label
vid_root = os.path.join(dataset_frame_root, lsp[0])
vid_root, _ = os.path.splitext(vid_root)
# use splitetxt twice because there are some video root like: abseiling/9EnSwbXxu5g.mp4.webm
vid_root, _ = os.path.splitext(vid_root)
vids.append((vid_root, int(lsp[1]), int(lsp[2])))
return vids
def _get_imgs(frame_root, frame_idx, transform):
frame = Image.open(os.path.join(frame_root, 'img_{:05d}.jpg'.format(frame_idx)))
frame.convert('RGB')
frame_aug = transform(frame)
return np.array(frame_aug)
def retrieval_imgs(samples, idx, transform):
frame_root, frame_num, cls = samples[idx]
frame_indices = np.round(np.linspace(1, frame_num, num=3)).astype(np.int64)
# get query images
imgs = []
for frame_idx in frame_indices:
imgs.append(_get_imgs(frame_root, frame_idx, transform))
out_img = Image.fromarray(np.concatenate(imgs, axis=1))
return frame_root.split('/')[7], out_img
if __name__ == '__main__':
parser = argparse.ArgumentParser('retrieval visualization')
parser.add_argument('--data-source', type=str)
args = parser.parse_args()
if args.data_source == "ucf":
samples_train, samples_query = loading_ucf_lists()
elif args.data_source == "hmdb":
samples_train, samples_query = loading_hmdb_lists()
else:
raise Exception("Please assigne the data-source argument!")
top_k_indices = np.load('./model/eval_retrieval/top_k_indices.npy')
transform_list = [transforms.CenterCrop(224)]
img_transform = transforms.Compose(transform_list)
save_folder = './model/eval_retrieval/imgs'
os.makedirs(save_folder, exist_ok=True)
label_dict = dict()
for idx, top_k in enumerate(top_k_indices):
query_label, query = retrieval_imgs(samples_query, idx, img_transform)
query_root = os.path.join(save_folder, query_label)
os.makedirs(query_root, exist_ok=True)
query.save(os.path.join(query_root, 'query.png'))
# top k images
top = 1
top_k_label = []
for topk_idx in top_k:
key_label, key = retrieval_imgs(samples_train, topk_idx, img_transform)
key.save(os.path.join(query_root, 'top_{}.png'.format(top)))
top_k_label.append(key_label)
top += 1
label_dict[query_label] = top_k_label
# save label
label_file = os.path.join(save_folder, 'label_dict.txt')
f = open(label_file, 'w')
for k, v in label_dict.items():
print(k, ":", v)
f.write(k + ':' + str(v))
f.write('\n')
f.close()
| [
"torchvision.transforms.CenterCrop",
"argparse.ArgumentParser",
"os.makedirs",
"os.path.join",
"os.path.splitext",
"numpy.array",
"numpy.linspace",
"numpy.concatenate",
"numpy.load",
"torchvision.transforms.Compose"
] | [((265, 304), 'os.path.join', 'os.path.join', (['dataset_root', '"""rawframes"""'], {}), "(dataset_root, 'rawframes')\n", (277, 304), False, 'import os\n'), ((349, 458), 'os.path.join', 'os.path.join', (['dataset_root', '"""ucfTrainTestlist"""', "('ucf101_' + 'train' + '_' + split + '_rawframes' + '.txt')"], {}), "(dataset_root, 'ucfTrainTestlist', 'ucf101_' + 'train' + '_' +\n split + '_rawframes' + '.txt')\n", (361, 458), False, 'import os\n'), ((511, 619), 'os.path.join', 'os.path.join', (['dataset_root', '"""ucfTrainTestlist"""', "('ucf101_' + 'test' + '_' + split + '_rawframes' + '.txt')"], {}), "(dataset_root, 'ucfTrainTestlist', 'ucf101_' + 'test' + '_' +\n split + '_rawframes' + '.txt')\n", (523, 619), False, 'import os\n'), ((992, 1031), 'os.path.join', 'os.path.join', (['dataset_root', '"""rawframes"""'], {}), "(dataset_root, 'rawframes')\n", (1004, 1031), False, 'import os\n'), ((1076, 1195), 'os.path.join', 'os.path.join', (['dataset_root', '"""testTrainMulti_7030_splits"""', "('hmdb51_' + 'train' + '_' + split + '_rawframes' + '.txt')"], {}), "(dataset_root, 'testTrainMulti_7030_splits', 'hmdb51_' +\n 'train' + '_' + split + '_rawframes' + '.txt')\n", (1088, 1195), False, 'import os\n'), ((1248, 1366), 'os.path.join', 'os.path.join', (['dataset_root', '"""testTrainMulti_7030_splits"""', "('hmdb51_' + 'test' + '_' + split + '_rawframes' + '.txt')"], {}), "(dataset_root, 'testTrainMulti_7030_splits', 'hmdb51_' + 'test' +\n '_' + split + '_rawframes' + '.txt')\n", (1260, 1366), False, 'import os\n'), ((2362, 2381), 'numpy.array', 'np.array', (['frame_aug'], {}), '(frame_aug)\n', (2370, 2381), True, 'import numpy as np\n'), ((2843, 2893), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""retrieval visualization"""'], {}), "('retrieval visualization')\n", (2866, 2893), False, 'import argparse\n'), ((3267, 3318), 'numpy.load', 'np.load', (['"""./model/eval_retrieval/top_k_indices.npy"""'], {}), "('./model/eval_retrieval/top_k_indices.npy')\n", (3274, 3318), True, 'import numpy as np\n'), ((3389, 3423), 'torchvision.transforms.Compose', 'transforms.Compose', (['transform_list'], {}), '(transform_list)\n', (3407, 3423), True, 'import torchvision.transforms as transforms\n'), ((3477, 3516), 'os.makedirs', 'os.makedirs', (['save_folder'], {'exist_ok': '(True)'}), '(save_folder, exist_ok=True)\n', (3488, 3516), False, 'import os\n'), ((4234, 4277), 'os.path.join', 'os.path.join', (['save_folder', '"""label_dict.txt"""'], {}), "(save_folder, 'label_dict.txt')\n", (4246, 4277), False, 'import os\n'), ((1841, 1881), 'os.path.join', 'os.path.join', (['dataset_frame_root', 'lsp[0]'], {}), '(dataset_frame_root, lsp[0])\n', (1853, 1881), False, 'import os\n'), ((1904, 1930), 'os.path.splitext', 'os.path.splitext', (['vid_root'], {}), '(vid_root)\n', (1920, 1930), False, 'import os\n'), ((2054, 2080), 'os.path.splitext', 'os.path.splitext', (['vid_root'], {}), '(vid_root)\n', (2070, 2080), False, 'import os\n'), ((2725, 2753), 'numpy.concatenate', 'np.concatenate', (['imgs'], {'axis': '(1)'}), '(imgs, axis=1)\n', (2739, 2753), True, 'import numpy as np\n'), ((3341, 3367), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (3362, 3367), True, 'import torchvision.transforms as transforms\n'), ((3691, 3729), 'os.path.join', 'os.path.join', (['save_folder', 'query_label'], {}), '(save_folder, query_label)\n', (3703, 3729), False, 'import os\n'), ((3738, 3776), 'os.makedirs', 'os.makedirs', (['query_root'], {'exist_ok': '(True)'}), '(query_root, exist_ok=True)\n', (3749, 3776), False, 'import os\n'), ((3797, 3834), 'os.path.join', 'os.path.join', (['query_root', '"""query.png"""'], {}), "(query_root, 'query.png')\n", (3809, 3834), False, 'import os\n'), ((2504, 2536), 'numpy.linspace', 'np.linspace', (['(1)', 'frame_num'], {'num': '(3)'}), '(1, frame_num, num=3)\n', (2515, 2536), True, 'import numpy as np\n')] |
from __future__ import division
import sys
import numpy as np
import matplotlib.pyplot as plt
import itertools
from matplotlib import rcParams
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Tahoma']
rcParams['ps.useafm'] = True
rcParams['pdf.use14corefonts'] = True
rcParams['text.usetex'] = True
rcParams["figure.figsize"] = (17,15)
def plot(title, *filenames):
figlabel = itertools.cycle(('a','b','c','d','e','f','g','h','i'))
marker = itertools.cycle(('o', 'v','*','D','x','+'))
colors = itertools.cycle(('c','r','b','g','r','y','y','b'))
names = []
fig = plt.figure()
ax = fig.add_subplot(111)
fig.subplots_adjust (top = 0.9, left = 0.1, right = 0.95, hspace = 0.3)
ax.set_xlabel('Utilization (\%)',size=17)
ax.set_ylabel('Acceptance Ratio (\%)',size=17)
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_color('none')
ax.spines['left'].set_color('none')
ax.spines['right'].set_color('none')
ax.tick_params(labelcolor = 'w', top = 'off', bottom = 'off', left = 'off', right = 'off')
for filename in filenames:
data = np.load(filename)
data = data.item()
step_size = data['step_size']
set_size = data['set_size']
sim_size = data['sim_size']
utilization = map(lambda u : 100 * u, np.arange(step_size, 1.0 + step_size, step_size))
acceptance = map(lambda failed : 100.0 * (sim_size - failed)/sim_size, data['results'])
ax.axis([-2,102,-2,102])
ax.plot(utilization, acceptance, '-', color = colors.next(), marker = marker.next(), markersize = 12, fillstyle = 'none', markevery = 1, label = data['id'], linewidth = 1.9)
names.append(data['id'])
ax.tick_params(labelcolor='k', top='off', bottom='off', left='off', right='off')
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(15)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(15)
if len(filenames) == 1:
ax.legend(bbox_to_anchor = (1.65, 1.2), loc = 10, markerscale = 1.0, ncol = len(filenames), borderaxespad = 0., prop = {'size':7})
ax.set_title('('+figlabel.next()+')', size = 8, y = 1.02)
ax.grid()
plt.title(title)
ax.legend(names)
ax.grid()
plt.show()
return fig | [
"itertools.cycle",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"numpy.load",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((404, 466), 'itertools.cycle', 'itertools.cycle', (["('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i')"], {}), "(('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i'))\n", (419, 466), False, 'import itertools\n'), ((472, 519), 'itertools.cycle', 'itertools.cycle', (["('o', 'v', '*', 'D', 'x', '+')"], {}), "(('o', 'v', '*', 'D', 'x', '+'))\n", (487, 519), False, 'import itertools\n'), ((529, 586), 'itertools.cycle', 'itertools.cycle', (["('c', 'r', 'b', 'g', 'r', 'y', 'y', 'b')"], {}), "(('c', 'r', 'b', 'g', 'r', 'y', 'y', 'b'))\n", (544, 586), False, 'import itertools\n'), ((605, 617), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (615, 617), True, 'import matplotlib.pyplot as plt\n'), ((2282, 2298), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2291, 2298), True, 'import matplotlib.pyplot as plt\n'), ((2338, 2348), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2346, 2348), True, 'import matplotlib.pyplot as plt\n'), ((1125, 1142), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (1132, 1142), True, 'import numpy as np\n'), ((1335, 1383), 'numpy.arange', 'np.arange', (['step_size', '(1.0 + step_size)', 'step_size'], {}), '(step_size, 1.0 + step_size, step_size)\n', (1344, 1383), True, 'import numpy as np\n')] |
from commons.util import most_similar, create_co_matrix, ppmi
from datasets import ptb
import numpy as np
window_size = 2
wordvec_size = 100
corpus, word_to_id, id_to_word = ptb.load_data('train')
vocab_size = len(word_to_id)
print('Calculating coincide number ...')
C = create_co_matrix(corpus, vocab_size, window_size)
print('Calculating PPMI ...')
W = ppmi(C, verbose=True)
print('Calculating SVD ...')
try:
# truncated SVD(fast)
from sklearn.utils.extmath import randomized_svd
U, S, V = randomized_svd(
W, n_components=wordvec_size, n_iter=5, random_state=None)
except ImportError:
# SVD(slow)
U, S, V = np.linalg.svd(W)
word_vecs = U[:, :wordvec_size]
querys = ['you', 'year', 'car', 'toyota']
for query in querys:
most_similar(query, word_to_id, id_to_word, word_vecs, top=5)
| [
"commons.util.create_co_matrix",
"sklearn.utils.extmath.randomized_svd",
"commons.util.most_similar",
"datasets.ptb.load_data",
"commons.util.ppmi",
"numpy.linalg.svd"
] | [((176, 198), 'datasets.ptb.load_data', 'ptb.load_data', (['"""train"""'], {}), "('train')\n", (189, 198), False, 'from datasets import ptb\n'), ((273, 322), 'commons.util.create_co_matrix', 'create_co_matrix', (['corpus', 'vocab_size', 'window_size'], {}), '(corpus, vocab_size, window_size)\n', (289, 322), False, 'from commons.util import most_similar, create_co_matrix, ppmi\n'), ((357, 378), 'commons.util.ppmi', 'ppmi', (['C'], {'verbose': '(True)'}), '(C, verbose=True)\n', (361, 378), False, 'from commons.util import most_similar, create_co_matrix, ppmi\n'), ((507, 580), 'sklearn.utils.extmath.randomized_svd', 'randomized_svd', (['W'], {'n_components': 'wordvec_size', 'n_iter': '(5)', 'random_state': 'None'}), '(W, n_components=wordvec_size, n_iter=5, random_state=None)\n', (521, 580), False, 'from sklearn.utils.extmath import randomized_svd\n'), ((758, 819), 'commons.util.most_similar', 'most_similar', (['query', 'word_to_id', 'id_to_word', 'word_vecs'], {'top': '(5)'}), '(query, word_to_id, id_to_word, word_vecs, top=5)\n', (770, 819), False, 'from commons.util import most_similar, create_co_matrix, ppmi\n'), ((640, 656), 'numpy.linalg.svd', 'np.linalg.svd', (['W'], {}), '(W)\n', (653, 656), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from __future__ import division
import bz2
from datetime import datetime
import os
import pickle
import numpy as np
import torch
from tqdm import trange
from agent import Agent
from utils import initialize_environment
from memory import ReplayMemory
from test import test
from parsers import parser
# Setup
args = parser.parse_args()
print('Options')
for k, v in vars(args).items():
print(k + ': ' + str(v))
results_dir = os.path.join('results', args.id)
if not os.path.exists(results_dir):
os.makedirs(results_dir)
metrics = {'steps': [], 'rewards': [], 'Qs': [], 'best_avg_reward': -float('inf')}
np.random.seed(args.seed)
torch.manual_seed(np.random.randint(1, 10000))
if torch.cuda.is_available() and not args.disable_cuda:
args.device = torch.device('cuda')
torch.cuda.manual_seed(np.random.randint(1, 10000))
torch.backends.cudnn.enabled = args.enable_cudnn
else:
args.device = torch.device('cpu')
# Simple ISO 8601 timestamped logger
def log(st):
print('[' + str(datetime.now().strftime('%Y-%m-%dT%H:%M:%S')) + '] ' + st)
def load_memory(memory_path, disable_bzip):
if disable_bzip:
with open(memory_path, 'rb') as pickle_file:
return pickle.load(pickle_file)
else:
with bz2.open(memory_path, 'rb') as zipped_pickle_file:
return pickle.load(zipped_pickle_file)
def save_memory(memory, memory_path, disable_bzip):
if disable_bzip:
with open(memory_path, 'wb') as pickle_file:
pickle.dump(memory, pickle_file)
else:
with bz2.open(memory_path, 'wb') as zipped_pickle_file:
pickle.dump(memory, zipped_pickle_file)
# Environment
env, test_env = initialize_environment(args)
n_actions = env.action_space.n
# Agent
dqn = Agent(args, env)
# If a model is provided, and evaluate is fale, presumably we want to resume, so try to load memory
if args.model is not None and not args.evaluate:
if not args.memory:
raise ValueError('Cannot resume training without memory save path. Aborting...')
elif not os.path.exists(args.memory):
raise ValueError(
'Could not find memory file at {path}. Aborting...'.format(path=args.memory))
mem = load_memory(args.memory, args.disable_bzip_memory)
else:
mem = ReplayMemory(args, args.memory_capacity, env)
priority_weight_increase = (1 - args.priority_weight) / (args.T_max - args.learn_start)
# # Construct validation memory
# val_mem = ReplayMemory(args, args.evaluation_size, test_env)
# T, done = 0, True
# while T < args.evaluation_size:
# if done:
# state, done = env.reset(), False
# next_state, _, done, _ = env.step(np.random.randint(0, n_actions))
# val_mem.append(state, -1, 0.0, done)
# state = next_state
# T += 1
# starting = 100
if args.evaluate:
dqn.eval() # Set DQN (online network) to evaluation mode
avg_reward = test(args, test_env, 0, dqn, metrics, results_dir, evaluate=True) # Test
print('Avg. reward: ' + str(avg_reward))
else:
# Training loop
print("STARTING TRAINING")
dqn.train()
T, done = 0, True
for T in trange(1, args.T_max + 1):
# env.render()
if done:
state, done = env.reset(), False
# starting = starting
# print("RESTART")
mem.buffer.on_episode_end()
# if starting > 0:
# starting-=1
# env.render()
if T % args.replay_frequency == 0:
dqn.reset_noise() # Draw a new set of noisy weights
action = dqn.act(state) # Choose an action greedily (with noisy weights)
next_state, reward, done, _ = env.step(action) # Step
if args.reward_clip > 0:
reward = max(min(reward, args.reward_clip), -args.reward_clip) # Clip rewards
mem.append(state, next_state, action, reward, done) # Append transition to memory
# Train and test
if T >= args.learn_start:
mem.priority_weight = min(mem.priority_weight + priority_weight_increase,
1) # Anneal importance sampling weight β to 1
if T % args.replay_frequency == 0:
dqn.learn(mem) # Train with n-step distributional double-Q learning
if T % args.evaluation_interval == 0:
dqn.eval() # Set DQN (online network) to evaluation mode
avg_reward = test(args, test_env, T, dqn, metrics, results_dir) # Test
log('T = ' + str(T) + ' / ' + str(args.T_max) + ' | Avg. reward: ' +
str(avg_reward))
dqn.train() # Set DQN (online network) back to training mode
# If memory path provided, save it
if args.memory is not None:
save_memory(mem, args.memory, args.disable_bzip_memory)
# Update target network
if T % args.target_update == 0:
dqn.update_target_net()
# Checkpoint the network
if (args.checkpoint_interval != 0) and (T % args.checkpoint_interval == 0):
dqn.save(results_dir, 'checkpoint.pth')
state = next_state
env.close()
| [
"os.path.exists",
"parsers.parser.parse_args",
"pickle.dump",
"os.makedirs",
"os.path.join",
"pickle.load",
"test.test",
"utils.initialize_environment",
"datetime.datetime.now",
"agent.Agent",
"numpy.random.randint",
"torch.cuda.is_available",
"numpy.random.seed",
"memory.ReplayMemory",
... | [((341, 360), 'parsers.parser.parse_args', 'parser.parse_args', ([], {}), '()\n', (358, 360), False, 'from parsers import parser\n'), ((455, 487), 'os.path.join', 'os.path.join', (['"""results"""', 'args.id'], {}), "('results', args.id)\n", (467, 487), False, 'import os\n'), ((638, 663), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (652, 663), True, 'import numpy as np\n'), ((1711, 1739), 'utils.initialize_environment', 'initialize_environment', (['args'], {}), '(args)\n', (1733, 1739), False, 'from utils import initialize_environment\n'), ((1786, 1802), 'agent.Agent', 'Agent', (['args', 'env'], {}), '(args, env)\n', (1791, 1802), False, 'from agent import Agent\n'), ((496, 523), 'os.path.exists', 'os.path.exists', (['results_dir'], {}), '(results_dir)\n', (510, 523), False, 'import os\n'), ((529, 553), 'os.makedirs', 'os.makedirs', (['results_dir'], {}), '(results_dir)\n', (540, 553), False, 'import os\n'), ((682, 709), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10000)'], {}), '(1, 10000)\n', (699, 709), True, 'import numpy as np\n'), ((715, 740), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (738, 740), False, 'import torch\n'), ((786, 806), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (798, 806), False, 'import torch\n'), ((940, 959), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (952, 959), False, 'import torch\n'), ((2303, 2348), 'memory.ReplayMemory', 'ReplayMemory', (['args', 'args.memory_capacity', 'env'], {}), '(args, args.memory_capacity, env)\n', (2315, 2348), False, 'from memory import ReplayMemory\n'), ((2916, 2981), 'test.test', 'test', (['args', 'test_env', '(0)', 'dqn', 'metrics', 'results_dir'], {'evaluate': '(True)'}), '(args, test_env, 0, dqn, metrics, results_dir, evaluate=True)\n', (2920, 2981), False, 'from test import test\n'), ((3143, 3168), 'tqdm.trange', 'trange', (['(1)', '(args.T_max + 1)'], {}), '(1, args.T_max + 1)\n', (3149, 3168), False, 'from tqdm import trange\n'), ((834, 861), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10000)'], {}), '(1, 10000)\n', (851, 861), True, 'import numpy as np\n'), ((1230, 1254), 'pickle.load', 'pickle.load', (['pickle_file'], {}), '(pickle_file)\n', (1241, 1254), False, 'import pickle\n'), ((1278, 1305), 'bz2.open', 'bz2.open', (['memory_path', '"""rb"""'], {}), "(memory_path, 'rb')\n", (1286, 1305), False, 'import bz2\n'), ((1348, 1379), 'pickle.load', 'pickle.load', (['zipped_pickle_file'], {}), '(zipped_pickle_file)\n', (1359, 1379), False, 'import pickle\n'), ((1520, 1552), 'pickle.dump', 'pickle.dump', (['memory', 'pickle_file'], {}), '(memory, pickle_file)\n', (1531, 1552), False, 'import pickle\n'), ((1576, 1603), 'bz2.open', 'bz2.open', (['memory_path', '"""wb"""'], {}), "(memory_path, 'wb')\n", (1584, 1603), False, 'import bz2\n'), ((1639, 1678), 'pickle.dump', 'pickle.dump', (['memory', 'zipped_pickle_file'], {}), '(memory, zipped_pickle_file)\n', (1650, 1678), False, 'import pickle\n'), ((2079, 2106), 'os.path.exists', 'os.path.exists', (['args.memory'], {}), '(args.memory)\n', (2093, 2106), False, 'import os\n'), ((4447, 4497), 'test.test', 'test', (['args', 'test_env', 'T', 'dqn', 'metrics', 'results_dir'], {}), '(args, test_env, T, dqn, metrics, results_dir)\n', (4451, 4497), False, 'from test import test\n'), ((1032, 1046), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1044, 1046), False, 'from datetime import datetime\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
from scipy import signal
import copy
"""
___author__ = "<NAME>, <NAME>"
__email__ = <EMAIL>"
"""
def randRange(x1, x2, integer):
y = np.random.uniform(low=x1, high=x2, size=(1,))
if integer:
y = int(y)
return y
def normWav(x,always):
if always:
x = x/np.amax(abs(x))
elif np.amax(abs(x)) > 1:
x = x/np.amax(abs(x))
return x
def genNotchCoeffs(nBands,minF,maxF,minBW,maxBW,minCoeff,maxCoeff,minG,maxG,fs):
b = 1
for i in range(0, nBands):
fc = randRange(minF,maxF,0);
bw = randRange(minBW,maxBW,0);
c = randRange(minCoeff,maxCoeff,1);
if c/2 == int(c/2):
c = c + 1
f1 = fc - bw/2
f2 = fc + bw/2
if f1 <= 0:
f1 = 1/1000
if f2 >= fs/2:
f2 = fs/2-1/1000
b = np.convolve(signal.firwin(c, [float(f1), float(f2)], window='hamming', fs=fs),b)
G = randRange(minG,maxG,0);
_, h = signal.freqz(b, 1, fs=fs)
b = pow(10, G/20)*b/np.amax(abs(h))
return b
def filterFIR(x,b):
N = b.shape[0] + 1
xpad = np.pad(x, (0, N), 'constant')
y = signal.lfilter(b, 1, xpad)
y = y[int(N/2):int(y.shape[0]-N/2)]
return y
# Linear and non-linear convolutive noise
def LnL_convolutive_noise(x,N_f,nBands,minF,maxF,minBW,maxBW,minCoeff,maxCoeff,minG,maxG,minBiasLinNonLin,maxBiasLinNonLin,fs):
y = [0] * x.shape[0]
for i in range(0, N_f):
if i == 1:
minG = minG-minBiasLinNonLin;
maxG = maxG-maxBiasLinNonLin;
b = genNotchCoeffs(nBands,minF,maxF,minBW,maxBW,minCoeff,maxCoeff,minG,maxG,fs)
y = y + filterFIR(np.power(x, (i+1)), b)
y = y - np.mean(y)
y = normWav(y,0)
return y
# Impulsive signal dependent noise
def ISD_additive_noise(x, P, g_sd):
beta = randRange(0, P, 0)
y = copy.deepcopy(x)
x_len = x.shape[0]
n = int(x_len*(beta/100))
p = np.random.permutation(x_len)[:n]
f_r= np.multiply(((2*np.random.rand(p.shape[0]))-1),((2*np.random.rand(p.shape[0]))-1))
r = g_sd * x[p] * f_r
y[p] = x[p] + r
y = normWav(y,0)
return y
# Stationary signal independent noise
def SSI_additive_noise(x,SNRmin,SNRmax,nBands,minF,maxF,minBW,maxBW,minCoeff,maxCoeff,minG,maxG,fs):
noise = np.random.normal(0, 1, x.shape[0])
b = genNotchCoeffs(nBands,minF,maxF,minBW,maxBW,minCoeff,maxCoeff,minG,maxG,fs)
noise = filterFIR(noise, b)
noise = normWav(noise,1)
SNR = randRange(SNRmin, SNRmax, 0)
noise = noise / np.linalg.norm(noise,2) * np.linalg.norm(x,2) / 10.0**(0.05 * SNR)
x = x + noise
return x
| [
"numpy.random.normal",
"numpy.mean",
"numpy.random.rand",
"numpy.power",
"numpy.linalg.norm",
"numpy.pad",
"scipy.signal.lfilter",
"numpy.random.uniform",
"copy.deepcopy",
"scipy.signal.freqz",
"numpy.random.permutation"
] | [((207, 252), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'x1', 'high': 'x2', 'size': '(1,)'}), '(low=x1, high=x2, size=(1,))\n', (224, 252), True, 'import numpy as np\n'), ((1034, 1059), 'scipy.signal.freqz', 'signal.freqz', (['b', '(1)'], {'fs': 'fs'}), '(b, 1, fs=fs)\n', (1046, 1059), False, 'from scipy import signal\n'), ((1176, 1205), 'numpy.pad', 'np.pad', (['x', '(0, N)', '"""constant"""'], {}), "(x, (0, N), 'constant')\n", (1182, 1205), True, 'import numpy as np\n'), ((1214, 1240), 'scipy.signal.lfilter', 'signal.lfilter', (['b', '(1)', 'xpad'], {}), '(b, 1, xpad)\n', (1228, 1240), False, 'from scipy import signal\n'), ((1937, 1953), 'copy.deepcopy', 'copy.deepcopy', (['x'], {}), '(x)\n', (1950, 1953), False, 'import copy\n'), ((2374, 2408), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', 'x.shape[0]'], {}), '(0, 1, x.shape[0])\n', (2390, 2408), True, 'import numpy as np\n'), ((1776, 1786), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (1783, 1786), True, 'import numpy as np\n'), ((2015, 2043), 'numpy.random.permutation', 'np.random.permutation', (['x_len'], {}), '(x_len)\n', (2036, 2043), True, 'import numpy as np\n'), ((2639, 2659), 'numpy.linalg.norm', 'np.linalg.norm', (['x', '(2)'], {}), '(x, 2)\n', (2653, 2659), True, 'import numpy as np\n'), ((1735, 1753), 'numpy.power', 'np.power', (['x', '(i + 1)'], {}), '(x, i + 1)\n', (1743, 1753), True, 'import numpy as np\n'), ((2073, 2099), 'numpy.random.rand', 'np.random.rand', (['p.shape[0]'], {}), '(p.shape[0])\n', (2087, 2099), True, 'import numpy as np\n'), ((2108, 2134), 'numpy.random.rand', 'np.random.rand', (['p.shape[0]'], {}), '(p.shape[0])\n', (2122, 2134), True, 'import numpy as np\n'), ((2613, 2637), 'numpy.linalg.norm', 'np.linalg.norm', (['noise', '(2)'], {}), '(noise, 2)\n', (2627, 2637), True, 'import numpy as np\n')] |
import comet_ml # noqa: F401
import pytest
import numpy as np
import torch
from conftest import create_dataset, create_image
from traintool.image_classification.preprocessing import (
recognize_data_format,
torch_to_numpy,
numpy_to_torch,
files_to_numpy,
files_to_torch,
load_image,
recognize_image_format,
get_num_classes,
)
@pytest.fixture
def numpy_data():
return create_dataset(data_format="numpy", seed=0, grayscale=False)
@pytest.fixture
def torch_data():
return create_dataset(data_format="torch", seed=0, grayscale=False)
@pytest.fixture
def files_data(tmp_path):
return create_dataset(data_format="files", seed=0, tmp_path=tmp_path)
@pytest.fixture
def numpy_image():
return create_image(data_format="numpy", seed=0, grayscale=False)
@pytest.fixture
def torch_image():
return create_image(data_format="torch", seed=0, grayscale=False)
@pytest.fixture
def files_image(tmp_path):
return create_image(data_format="files", seed=0, tmp_path=tmp_path)
def test_recognize_data_format(numpy_data, torch_data, files_data):
# correct data formats
assert recognize_data_format(numpy_data) == "numpy"
assert recognize_data_format(torch_data) == "pytorch-dataset"
assert recognize_data_format(files_data) == "files"
# incorrect data formats
with pytest.raises(ValueError):
recognize_data_format(None)
with pytest.raises(ValueError):
recognize_data_format([1, 2, 3])
with pytest.raises(FileNotFoundError):
recognize_data_format("non/existent/dir/123")
def test_recognize_image_format(numpy_image, torch_image, files_image):
# correct image formats
assert recognize_image_format(numpy_image) == "numpy"
assert recognize_image_format(files_image) == "files"
# incorrect image formats
with pytest.raises(ValueError):
recognize_image_format(None)
with pytest.raises(ValueError):
recognize_image_format([1, 2, 3])
with pytest.raises(FileNotFoundError):
recognize_image_format("non/existent/file/123")
def test_torch_to_numpy(numpy_data, torch_data):
converted_data = torch_to_numpy(torch_data)
assert np.allclose(converted_data[0], numpy_data[0])
assert np.allclose(converted_data[1], numpy_data[1])
def test_numpy_to_torch(numpy_data, torch_data):
converted_data = numpy_to_torch(numpy_data)
# Note that we compare with tolerance of 0.1 here, because due to conversion to PIL,
# values are not exactly preserved.
assert torch.allclose(converted_data[0][0], torch_data[0][0], atol=0.1)
assert converted_data[0][1] == torch_data[0][1]
resized_converted_data = numpy_to_torch(
numpy_data, resize=256, crop=224, mean=[0.1, 0.1, 0.1], std=[0.1, 0.1, 0.1]
)
assert resized_converted_data[0][0].shape[1] == 224
assert resized_converted_data[0][0].shape[2] == 224
def test_files_to_numpy(files_data, numpy_data):
converted_data = files_to_numpy(files_data)
assert converted_data[0][0].shape == numpy_data[0][0].shape
resized_converted_data = files_to_numpy(
files_data, resize=256, crop=224, mean=[0.1, 0.1, 0.1], std=[0.1, 0.1, 0.1]
)
assert resized_converted_data[0][0].shape[1] == 224
assert resized_converted_data[0][0].shape[2] == 224
def test_files_to_torch(files_data, torch_data):
converted_data = files_to_torch(files_data)
assert converted_data[0][0].shape == torch_data[0][0].shape
resized_converted_data = files_to_numpy(
files_data, resize=256, crop=224, mean=[0.1, 0.1, 0.1], std=[0.1, 0.1, 0.1]
)
assert resized_converted_data[0][0].shape[1] == 224
assert resized_converted_data[0][0].shape[2] == 224
# TODO: Maybe add tests for to_numpy and to_torch, but note that these are kinda
# redundant to the tests above.
def test_load_image(tmp_path):
data = create_dataset(grayscale=False, data_format="files", tmp_path=tmp_path)
# Select a random image.
image_path = next(data.rglob("*.png"))
# torch
img = load_image(image_path, resize=50, crop=40)
assert isinstance(img, torch.Tensor)
assert img.shape == (3, 40, 40)
# numpy
img = load_image(image_path, resize=50, crop=40, to_numpy=True)
assert isinstance(img, np.ndarray)
assert img.shape == (3, 40, 40)
def test_get_num_classes(numpy_data, files_data):
assert get_num_classes(numpy_data) == 4
assert get_num_classes(files_data) == 4
| [
"conftest.create_image",
"traintool.image_classification.preprocessing.load_image",
"numpy.allclose",
"traintool.image_classification.preprocessing.files_to_torch",
"conftest.create_dataset",
"traintool.image_classification.preprocessing.recognize_image_format",
"traintool.image_classification.preproces... | [((408, 468), 'conftest.create_dataset', 'create_dataset', ([], {'data_format': '"""numpy"""', 'seed': '(0)', 'grayscale': '(False)'}), "(data_format='numpy', seed=0, grayscale=False)\n", (422, 468), False, 'from conftest import create_dataset, create_image\n'), ((516, 576), 'conftest.create_dataset', 'create_dataset', ([], {'data_format': '"""torch"""', 'seed': '(0)', 'grayscale': '(False)'}), "(data_format='torch', seed=0, grayscale=False)\n", (530, 576), False, 'from conftest import create_dataset, create_image\n'), ((632, 694), 'conftest.create_dataset', 'create_dataset', ([], {'data_format': '"""files"""', 'seed': '(0)', 'tmp_path': 'tmp_path'}), "(data_format='files', seed=0, tmp_path=tmp_path)\n", (646, 694), False, 'from conftest import create_dataset, create_image\n'), ((743, 801), 'conftest.create_image', 'create_image', ([], {'data_format': '"""numpy"""', 'seed': '(0)', 'grayscale': '(False)'}), "(data_format='numpy', seed=0, grayscale=False)\n", (755, 801), False, 'from conftest import create_dataset, create_image\n'), ((850, 908), 'conftest.create_image', 'create_image', ([], {'data_format': '"""torch"""', 'seed': '(0)', 'grayscale': '(False)'}), "(data_format='torch', seed=0, grayscale=False)\n", (862, 908), False, 'from conftest import create_dataset, create_image\n'), ((965, 1025), 'conftest.create_image', 'create_image', ([], {'data_format': '"""files"""', 'seed': '(0)', 'tmp_path': 'tmp_path'}), "(data_format='files', seed=0, tmp_path=tmp_path)\n", (977, 1025), False, 'from conftest import create_dataset, create_image\n'), ((2154, 2180), 'traintool.image_classification.preprocessing.torch_to_numpy', 'torch_to_numpy', (['torch_data'], {}), '(torch_data)\n', (2168, 2180), False, 'from traintool.image_classification.preprocessing import recognize_data_format, torch_to_numpy, numpy_to_torch, files_to_numpy, files_to_torch, load_image, recognize_image_format, get_num_classes\n'), ((2192, 2237), 'numpy.allclose', 'np.allclose', (['converted_data[0]', 'numpy_data[0]'], {}), '(converted_data[0], numpy_data[0])\n', (2203, 2237), True, 'import numpy as np\n'), ((2249, 2294), 'numpy.allclose', 'np.allclose', (['converted_data[1]', 'numpy_data[1]'], {}), '(converted_data[1], numpy_data[1])\n', (2260, 2294), True, 'import numpy as np\n'), ((2367, 2393), 'traintool.image_classification.preprocessing.numpy_to_torch', 'numpy_to_torch', (['numpy_data'], {}), '(numpy_data)\n', (2381, 2393), False, 'from traintool.image_classification.preprocessing import recognize_data_format, torch_to_numpy, numpy_to_torch, files_to_numpy, files_to_torch, load_image, recognize_image_format, get_num_classes\n'), ((2534, 2598), 'torch.allclose', 'torch.allclose', (['converted_data[0][0]', 'torch_data[0][0]'], {'atol': '(0.1)'}), '(converted_data[0][0], torch_data[0][0], atol=0.1)\n', (2548, 2598), False, 'import torch\n'), ((2681, 2777), 'traintool.image_classification.preprocessing.numpy_to_torch', 'numpy_to_torch', (['numpy_data'], {'resize': '(256)', 'crop': '(224)', 'mean': '[0.1, 0.1, 0.1]', 'std': '[0.1, 0.1, 0.1]'}), '(numpy_data, resize=256, crop=224, mean=[0.1, 0.1, 0.1], std=\n [0.1, 0.1, 0.1])\n', (2695, 2777), False, 'from traintool.image_classification.preprocessing import recognize_data_format, torch_to_numpy, numpy_to_torch, files_to_numpy, files_to_torch, load_image, recognize_image_format, get_num_classes\n'), ((2971, 2997), 'traintool.image_classification.preprocessing.files_to_numpy', 'files_to_numpy', (['files_data'], {}), '(files_data)\n', (2985, 2997), False, 'from traintool.image_classification.preprocessing import recognize_data_format, torch_to_numpy, numpy_to_torch, files_to_numpy, files_to_torch, load_image, recognize_image_format, get_num_classes\n'), ((3092, 3188), 'traintool.image_classification.preprocessing.files_to_numpy', 'files_to_numpy', (['files_data'], {'resize': '(256)', 'crop': '(224)', 'mean': '[0.1, 0.1, 0.1]', 'std': '[0.1, 0.1, 0.1]'}), '(files_data, resize=256, crop=224, mean=[0.1, 0.1, 0.1], std=\n [0.1, 0.1, 0.1])\n', (3106, 3188), False, 'from traintool.image_classification.preprocessing import recognize_data_format, torch_to_numpy, numpy_to_torch, files_to_numpy, files_to_torch, load_image, recognize_image_format, get_num_classes\n'), ((3382, 3408), 'traintool.image_classification.preprocessing.files_to_torch', 'files_to_torch', (['files_data'], {}), '(files_data)\n', (3396, 3408), False, 'from traintool.image_classification.preprocessing import recognize_data_format, torch_to_numpy, numpy_to_torch, files_to_numpy, files_to_torch, load_image, recognize_image_format, get_num_classes\n'), ((3503, 3599), 'traintool.image_classification.preprocessing.files_to_numpy', 'files_to_numpy', (['files_data'], {'resize': '(256)', 'crop': '(224)', 'mean': '[0.1, 0.1, 0.1]', 'std': '[0.1, 0.1, 0.1]'}), '(files_data, resize=256, crop=224, mean=[0.1, 0.1, 0.1], std=\n [0.1, 0.1, 0.1])\n', (3517, 3599), False, 'from traintool.image_classification.preprocessing import recognize_data_format, torch_to_numpy, numpy_to_torch, files_to_numpy, files_to_torch, load_image, recognize_image_format, get_num_classes\n'), ((3882, 3953), 'conftest.create_dataset', 'create_dataset', ([], {'grayscale': '(False)', 'data_format': '"""files"""', 'tmp_path': 'tmp_path'}), "(grayscale=False, data_format='files', tmp_path=tmp_path)\n", (3896, 3953), False, 'from conftest import create_dataset, create_image\n'), ((4050, 4092), 'traintool.image_classification.preprocessing.load_image', 'load_image', (['image_path'], {'resize': '(50)', 'crop': '(40)'}), '(image_path, resize=50, crop=40)\n', (4060, 4092), False, 'from traintool.image_classification.preprocessing import recognize_data_format, torch_to_numpy, numpy_to_torch, files_to_numpy, files_to_torch, load_image, recognize_image_format, get_num_classes\n'), ((4193, 4250), 'traintool.image_classification.preprocessing.load_image', 'load_image', (['image_path'], {'resize': '(50)', 'crop': '(40)', 'to_numpy': '(True)'}), '(image_path, resize=50, crop=40, to_numpy=True)\n', (4203, 4250), False, 'from traintool.image_classification.preprocessing import recognize_data_format, torch_to_numpy, numpy_to_torch, files_to_numpy, files_to_torch, load_image, recognize_image_format, get_num_classes\n'), ((1139, 1172), 'traintool.image_classification.preprocessing.recognize_data_format', 'recognize_data_format', (['numpy_data'], {}), '(numpy_data)\n', (1160, 1172), False, 'from traintool.image_classification.preprocessing import recognize_data_format, torch_to_numpy, numpy_to_torch, files_to_numpy, files_to_torch, load_image, recognize_image_format, get_num_classes\n'), ((1195, 1228), 'traintool.image_classification.preprocessing.recognize_data_format', 'recognize_data_format', (['torch_data'], {}), '(torch_data)\n', (1216, 1228), False, 'from traintool.image_classification.preprocessing import recognize_data_format, torch_to_numpy, numpy_to_torch, files_to_numpy, files_to_torch, load_image, recognize_image_format, get_num_classes\n'), ((1261, 1294), 'traintool.image_classification.preprocessing.recognize_data_format', 'recognize_data_format', (['files_data'], {}), '(files_data)\n', (1282, 1294), False, 'from traintool.image_classification.preprocessing import recognize_data_format, torch_to_numpy, numpy_to_torch, files_to_numpy, files_to_torch, load_image, recognize_image_format, get_num_classes\n'), ((1345, 1370), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1358, 1370), False, 'import pytest\n'), ((1380, 1407), 'traintool.image_classification.preprocessing.recognize_data_format', 'recognize_data_format', (['None'], {}), '(None)\n', (1401, 1407), False, 'from traintool.image_classification.preprocessing import recognize_data_format, torch_to_numpy, numpy_to_torch, files_to_numpy, files_to_torch, load_image, recognize_image_format, get_num_classes\n'), ((1417, 1442), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1430, 1442), False, 'import pytest\n'), ((1452, 1484), 'traintool.image_classification.preprocessing.recognize_data_format', 'recognize_data_format', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1473, 1484), False, 'from traintool.image_classification.preprocessing import recognize_data_format, torch_to_numpy, numpy_to_torch, files_to_numpy, files_to_torch, load_image, recognize_image_format, get_num_classes\n'), ((1494, 1526), 'pytest.raises', 'pytest.raises', (['FileNotFoundError'], {}), '(FileNotFoundError)\n', (1507, 1526), False, 'import pytest\n'), ((1536, 1581), 'traintool.image_classification.preprocessing.recognize_data_format', 'recognize_data_format', (['"""non/existent/dir/123"""'], {}), "('non/existent/dir/123')\n", (1557, 1581), False, 'from traintool.image_classification.preprocessing import recognize_data_format, torch_to_numpy, numpy_to_torch, files_to_numpy, files_to_torch, load_image, recognize_image_format, get_num_classes\n'), ((1696, 1731), 'traintool.image_classification.preprocessing.recognize_image_format', 'recognize_image_format', (['numpy_image'], {}), '(numpy_image)\n', (1718, 1731), False, 'from traintool.image_classification.preprocessing import recognize_data_format, torch_to_numpy, numpy_to_torch, files_to_numpy, files_to_torch, load_image, recognize_image_format, get_num_classes\n'), ((1754, 1789), 'traintool.image_classification.preprocessing.recognize_image_format', 'recognize_image_format', (['files_image'], {}), '(files_image)\n', (1776, 1789), False, 'from traintool.image_classification.preprocessing import recognize_data_format, torch_to_numpy, numpy_to_torch, files_to_numpy, files_to_torch, load_image, recognize_image_format, get_num_classes\n'), ((1841, 1866), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1854, 1866), False, 'import pytest\n'), ((1876, 1904), 'traintool.image_classification.preprocessing.recognize_image_format', 'recognize_image_format', (['None'], {}), '(None)\n', (1898, 1904), False, 'from traintool.image_classification.preprocessing import recognize_data_format, torch_to_numpy, numpy_to_torch, files_to_numpy, files_to_torch, load_image, recognize_image_format, get_num_classes\n'), ((1914, 1939), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1927, 1939), False, 'import pytest\n'), ((1949, 1982), 'traintool.image_classification.preprocessing.recognize_image_format', 'recognize_image_format', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1971, 1982), False, 'from traintool.image_classification.preprocessing import recognize_data_format, torch_to_numpy, numpy_to_torch, files_to_numpy, files_to_torch, load_image, recognize_image_format, get_num_classes\n'), ((1992, 2024), 'pytest.raises', 'pytest.raises', (['FileNotFoundError'], {}), '(FileNotFoundError)\n', (2005, 2024), False, 'import pytest\n'), ((2034, 2081), 'traintool.image_classification.preprocessing.recognize_image_format', 'recognize_image_format', (['"""non/existent/file/123"""'], {}), "('non/existent/file/123')\n", (2056, 2081), False, 'from traintool.image_classification.preprocessing import recognize_data_format, torch_to_numpy, numpy_to_torch, files_to_numpy, files_to_torch, load_image, recognize_image_format, get_num_classes\n'), ((4389, 4416), 'traintool.image_classification.preprocessing.get_num_classes', 'get_num_classes', (['numpy_data'], {}), '(numpy_data)\n', (4404, 4416), False, 'from traintool.image_classification.preprocessing import recognize_data_format, torch_to_numpy, numpy_to_torch, files_to_numpy, files_to_torch, load_image, recognize_image_format, get_num_classes\n'), ((4433, 4460), 'traintool.image_classification.preprocessing.get_num_classes', 'get_num_classes', (['files_data'], {}), '(files_data)\n', (4448, 4460), False, 'from traintool.image_classification.preprocessing import recognize_data_format, torch_to_numpy, numpy_to_torch, files_to_numpy, files_to_torch, load_image, recognize_image_format, get_num_classes\n')] |
import numpy as np
from numpy import ndarray
from base_ada_classifier import BaseClassifier
class RandomClassifier(BaseClassifier):
_feature_index: int = None
_feature_value: float = None
_max_cycle = 1000
def __init__(self, w: ndarray, norm_factor = 1):
super(RandomClassifier, self).__init__(w, norm_factor)
def fit(self, X: ndarray, Y: ndarray) -> ndarray:
n_el, n_features = X.shape
err = 1
i = 0
f_index = np.random.randint(0, n_features)
v_min = np.min(X[:, f_index])
v_max = np.max(X[:, f_index])
step_size = n_features
step = (v_max - v_min) / step_size
# and i < self._max_cycle
while (err > 0.5):
i += 1
if step == 0:
f_value = v_min
else:
rng = np.arange(v_min, v_max, step)
f_value = np.random.choice(rng)
prediction = self.predict(X, f_index, f_value)
err = self._get_err(Y, prediction)
if (i % (n_el) == 0 or step == 0):
f_index = np.random.randint(0, n_features)
v_min = np.min(X[:, f_index])
v_max = np.max(X[:, f_index])
step = (v_max - v_min) / step_size
if (i == self._max_cycle):
print("Cycles Error")
assert (i != self._max_cycle)
self._error = err
self._feature_index = f_index
self._feature_value = f_value
return self._get_updated_w(Y, prediction)
def predict(self, X: ndarray, f_index=None, f_value=None) -> ndarray:
f_index = self._feature_index if f_index == None else f_index
f_value = self._feature_value if f_value == None else f_value
pred = X[:, f_index] > f_value
classes = np.ones(pred.size)
# classes[pred == True] = 1
classes[pred == False] = -1
return classes
| [
"numpy.ones",
"numpy.random.choice",
"numpy.max",
"numpy.random.randint",
"numpy.min",
"numpy.arange"
] | [((474, 506), 'numpy.random.randint', 'np.random.randint', (['(0)', 'n_features'], {}), '(0, n_features)\n', (491, 506), True, 'import numpy as np\n'), ((523, 544), 'numpy.min', 'np.min', (['X[:, f_index]'], {}), '(X[:, f_index])\n', (529, 544), True, 'import numpy as np\n'), ((561, 582), 'numpy.max', 'np.max', (['X[:, f_index]'], {}), '(X[:, f_index])\n', (567, 582), True, 'import numpy as np\n'), ((1815, 1833), 'numpy.ones', 'np.ones', (['pred.size'], {}), '(pred.size)\n', (1822, 1833), True, 'import numpy as np\n'), ((838, 867), 'numpy.arange', 'np.arange', (['v_min', 'v_max', 'step'], {}), '(v_min, v_max, step)\n', (847, 867), True, 'import numpy as np\n'), ((894, 915), 'numpy.random.choice', 'np.random.choice', (['rng'], {}), '(rng)\n', (910, 915), True, 'import numpy as np\n'), ((1097, 1129), 'numpy.random.randint', 'np.random.randint', (['(0)', 'n_features'], {}), '(0, n_features)\n', (1114, 1129), True, 'import numpy as np\n'), ((1154, 1175), 'numpy.min', 'np.min', (['X[:, f_index]'], {}), '(X[:, f_index])\n', (1160, 1175), True, 'import numpy as np\n'), ((1200, 1221), 'numpy.max', 'np.max', (['X[:, f_index]'], {}), '(X[:, f_index])\n', (1206, 1221), True, 'import numpy as np\n')] |
import numpy as np
import cv2 as cv
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
def read_gray_image(path):
img = cv.imread(path)
img_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
plt.imshow(img_gray, cmap='gray', interpolation='nearest')
plt.savefig('./results/img_gray.png')
plt.close()
return img, img_gray
def threshold_otsu_method(img_gray):
ret,thresholded_img = cv.threshold(img_gray, 0, 255, cv.THRESH_OTSU)
plt.imshow(thresholded_img, cmap='gray', interpolation='nearest')
plt.savefig('./results/img_thresholded.png')
plt.close()
return thresholded_img
# canny method
def extract_contour(thresholded_img):
THRESHOLD_MIN = 3
THRESHOLD_MAX = 30
img_edges = cv.Canny(thresholded_img, THRESHOLD_MIN, THRESHOLD_MAX)
plt.imshow(img_edges, cmap='gray', interpolation='nearest')
plt.savefig('./results/img_contour.png')
plt.close()
return img_edges
def prob_hough_transform(img, img_edges):
minLineLength = 10
maxLineGap = 35
lines = cv.HoughLinesP(img_edges,1,np.pi/180,1,minLineLength,maxLineGap)
img_rgb = cv.cvtColor(img, cv.COLOR_BGR2RGB)
for n in range(len(lines)):
for x1,y1,x2,y2 in lines[n]:
cv.line(img_rgb,(x1,y1),(x2,y2),(0,255,0),2)
#plt.imshow(img_rgb, cmap='gray', interpolation='nearest')
#plt.savefig('./results/img_hough_prob.png')
#plt.close()
def hough_transform(img,img_edges):
line_array = []
lines = cv.HoughLines(img_edges,1,np.pi/2,90)
img_rgb = cv.cvtColor(img, cv.COLOR_BGR2RGB)
indexes = [0]*len(lines)
# to remove lines in extreme proximity, maybe due to the design of the card.
# ========================================= preprocessing
for r1 in range(len(lines)):
if(indexes[r1]==0):
r = lines[r1]
for v1 in range(len(lines)):
if(indexes[v1]==0):
v = lines[v1]
if(v1!=r1):
if(abs(r[0][0]-v[0][0])<5 and r[0][1]-v[0][1]==0):
if(r[0][0]>v[0][0]):
indexes[r1] = 1
else:
indexes[v1] = 1
lines1 = []
#print('Length:', len(lines))
for i in range(len(indexes)):
if(indexes[i]==0):
lines1.append(lines[i])
lines = np.asarray(lines1)
#print(len(lines))
x = img.shape[0]
y = img.shape[1]
for n in range(len(lines)):
for rho,theta in lines[n]:
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = round(x0 + y*(-b))
y1 = round(y0 + x*(a))
x2 = round(x0 - y*(-b))
y2 = round(y0 - x*(a))
line_array.append([[x1,y1],[x2,y2]])
cv.line(img_rgb,((int)(x1),(int)(y1)),((int)(x2),(int)(y2)),(0,0,255),2)
plt.imshow(img_rgb, cmap='gray', interpolation='nearest')
plt.savefig('./results/img_hough.png')
plt.close()
return line_array
def find_corners(img, lines):
# equation of a line
points = []
points_1 = []
points_2 = []
for i in lines:
x1, y1 = i[0][0], i[0][1]
x2, y2 = i[1][0], i[1][1]
for j in lines:
if(i!=j):
x11, y11 = j[0][0], j[0][1]
x12, y12 = j[1][0], j[1][1]
if(x1!=x2 and x11!=x12):
m1 = ((float)(y2-y1)/(float)((x2-x1)))
c1 = y1 - x1*(m1)
m2 = ((float)(y12-y11)/(float)((x12-x11)))
c2 = y11 - x11*(m2)
if(m1!=m2):
point_y = (c1*m2 - c2*m1) / (m2-m1)
if(m1!=0):
point_x = (point_y - c1) / m1
else:
point_x = (point_y - c2) / m2
points.append([point_x, point_y])
elif(x1!=x2):
m1 = ((float)(y2-y1)/(float)((x2-x1)))
c1 = y1 - x1*(m1)
point_x = x11
point_y = m1*point_x + c1
points.append([point_x, point_y])
points_1.append([point_x, point_y])
elif(x11!=x12):
m2 = ((float)(y12-y11)/(float)((x12-x11)))
c2 = y11 - x11*(m2)
point_x = x1
point_y = m2*point_x + c2
points.append([point_x, point_y])
points_2.append([point_x, point_y])
points1 = []
pts_1 = []
pts_2 = []
for i in points:
if i not in points1:
points1.append(i)
for i in points_1:
if i not in pts_1:
pts_1.append(i)
for i in points_2:
if i not in pts_2:
pts_2.append(i)
return points1, pts_1, pts_2
def plot_points(img, points, name):
green = [0, 255, 0]
img_rgb = cv.cvtColor(img, cv.COLOR_BGR2RGB)
x, y = img_rgb.shape[0], img.shape[1]
#print('points:', points)
for i in points:
if(i[0]<y and i[1]<x):
cv.circle(img_rgb,(int(i[0]),int(i[1])),5,(50,0,250),-1)
plt.imshow(img_rgb, cmap='gray', interpolation='nearest')
plt.savefig('./results/'+name+'.png')
plt.close()
def determine_rank(extracted_card):
#possible_ranks = ['./A_template.png', './2_template.png', './3_template.png', './4_template.png', './5_template.png', './6_template.png', './7_template.png', './8_template.png', './9_template.png', './10_template.png', './J_template.png''./Q_template.png', './K_template.png']
possible_ranks = ['./templates/ranks/2_black.png', './templates/ranks/3_red.png', './templates/ranks/3_red1.jpeg', './templates/ranks/7_red.png', './templates/ranks/8_red.jpg', './templates/ranks/9_black.png', './templates/ranks/10_red.png', './templates/ranks/A_red.png', './templates/ranks/4_black.png', './templates/ranks/7_red1.png', './templates/ranks/K_red.png', './templates/ranks/8_black.png', './templates/ranks/A_black1.png']
img_rgb = cv.imread(extracted_card)
img_rgb = cv.cvtColor(img_rgb, cv.COLOR_BGR2RGB)
img_gray = cv.cvtColor(img_rgb, cv.COLOR_BGR2GRAY)
rank_ = 0
rank_ = template_matching_rank(img_gray, img_rgb, possible_ranks)
#for i in possible_ranks:
#rank_ = template_matching_rank(img_gray, img_rgb, i)
#if(rank_>0):
#print("=============================================================", rank_)
rank = rank_.split('/')[3].split('_')[0]
print('Rank of the card is : ', rank)
return rank
def determine_suit(extracted_card):
red_possible_suits = ['./templates/suits/heart/heart.png', './templates/suits/heart/heart1.png', './templates/suits/heart/heart2.png', './templates/suits/heart/heart3.png']
black_possible_suits = ['./templates/suits/spade/spade.jpg', './templates/suits/spade/spade1.jpg', './templates/suits/spade/spade2.png', './templates/suits/spade/spade3.png', './templates/suits/spade/spade4.png']
img_rgb = cv.imread(extracted_card) # opencv reads image in BGR by default.
img_rgb = cv.cvtColor(img_rgb, cv.COLOR_BGR2RGB) # convert to standard RGB for matplotlib deafult.
# get rgb pixels color count
red = img_rgb[:,:,0]
#red = red[10:, 10:]
#print("size", red.shape)
green = img_rgb[:,:,1]
blue = img_rgb[:,:,2]
red_count = np.sum(red)
green_count = np.sum(green)
blue_count = np.sum(blue)
#print("Count : ", red_count, green_count, blue_count)
# detecting the color of the card
if(red_count>green_count):
color = 'red'
else:
color = 'black'
#color = 'black'
img_gray = cv.cvtColor(img_rgb, cv.COLOR_BGR2GRAY) # convert to grayscale
if(color=='red'):
suit = template_matching_suit(img_gray, img_rgb, red_possible_suits, color)
elif(color=='black'):
suit = template_matching_suit(img_gray, img_rgb, black_possible_suits, color)
print('Color of the card is : ', color)
print('Suit of the card is : ', suit)
return suit
def template_matching_suit(image, img_rgb, templates, color):
# detect suit
max_length = 0
#print(template)
for template_ in templates:
suit = 0
#print(template_)
template = cv.imread(template_,0)
#print(template.shape)
#print(template)
w, h = template.shape[::-1]
res = cv.matchTemplate(image, template, cv.TM_CCOEFF_NORMED)
threshold = 0.8
loc1 = np.where( res >= threshold)
if(len(loc1[0])>max_length):
max_length = len(loc1[0])
loc = loc1
if(max_length > 0 and color=='red'):
suit = 'heart'
elif(max_length == 0 and color=='red'):
suit = 'diamond'
elif(max_length > 0 and color=='black'):
suit = 'spade'
elif(max_length == 0 and color=='black'):
suit = 'club'
if(max_length!=0):
for pt in zip(*loc[::-1]):
cv.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0,0,255), 2)
#plt.imshow(img_rgb)
#plt.savefig('img_with_suit.png')
#plt.close()
return suit[0]
def template_matching_rank(image, img_rgb, templates):
# detect rank
max_length = 0
name = ""
for template_ in templates:
rank = 0
template = cv.imread(template_,0)
w, h = template.shape[::-1]
res = cv.matchTemplate(image, template, cv.TM_CCOEFF_NORMED)
threshold = 0.95
loc1 = np.where( res >= threshold)
if(len(loc1[0])>max_length):
max_length = len(loc1[0])
loc = loc1
name = template_
if(max_length!=0):
for pt in zip(*loc[::-1]):
cv.rectangle(img_rgb, pt, (pt[0] + w, pt[1] + h), (0,0,255), 2)
#plt.imshow(img_rgb)
#plt.savefig('img_with_rank.png')
#plt.close()
return name
def perspective_transform(img,points):
# segregate points
y = {}
y1 = []
y2 = []
for i in points:
if(i[1] not in y.keys()):
y[i[1]] = [i]
else:
y[i[1]].append(i)
k = list(y.keys())
k.sort()
y1 = y[k[0]]
y1.sort(key=lambda x: x[0])
y2 = y[k[1]]
y2.sort(key=lambda x: x[0])
index = []
img = cv.cvtColor(img, cv.COLOR_BGR2RGB)
if(len(y1)%2==0):
loop_increment = 2
else:
loop_increment = 1
#print('y1:', y1)
#print('y2:', y2)
for i in range(0, (int)(len(y1)), loop_increment):
if(i<len(y1) and i+1<len(y1)):
pts1 = np.float32([y1[i], y2[i], y1[i+1], y2[i+1]])
pts2 = np.float32([[0,0],[0, 450],[450, 0],[450,450]])
M = cv.getPerspectiveTransform(pts1,pts2)
dst = cv.warpPerspective(img,M,(450,450))
plt.imshow(dst, cmap='gray', interpolation='nearest')
plt.savefig('./results/perspective/'+(str)(i)+'.png')
index.append((str)(i))
plt.close()
return index
if (__name__ == '__main__'):
i = './test_cards/card21.jpeg'
image, gray = read_gray_image(i)
thresholded = threshold_otsu_method(gray)
edges = extract_contour(thresholded)
lines = hough_transform(image, edges)
points, pts_1, pts_2 = find_corners(image, lines)
plot_points(image, points, 'points')
index = perspective_transform(image, points)
suit = []
rank = []
for i in index:
s = determine_suit('./results/perspective/'+i+'.png')
r = determine_rank('./results/perspective/'+i+'.png')
suit.append(s)
rank.append(r)
cards = []
for i in range(len(suit)):
cards.append(rank[i] + suit[i])
print ('CARDS are : ', cards)
| [
"cv2.rectangle",
"cv2.warpPerspective",
"cv2.HoughLines",
"numpy.sin",
"matplotlib.pyplot.imshow",
"cv2.threshold",
"numpy.where",
"cv2.line",
"numpy.asarray",
"matplotlib.pyplot.close",
"cv2.matchTemplate",
"matplotlib.pyplot.savefig",
"cv2.getPerspectiveTransform",
"matplotlib.use",
"n... | [((54, 75), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (68, 75), False, 'import matplotlib\n'), ((148, 163), 'cv2.imread', 'cv.imread', (['path'], {}), '(path)\n', (157, 163), True, 'import cv2 as cv\n'), ((176, 211), 'cv2.cvtColor', 'cv.cvtColor', (['img', 'cv.COLOR_BGR2GRAY'], {}), '(img, cv.COLOR_BGR2GRAY)\n', (187, 211), True, 'import cv2 as cv\n'), ((213, 271), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img_gray'], {'cmap': '"""gray"""', 'interpolation': '"""nearest"""'}), "(img_gray, cmap='gray', interpolation='nearest')\n", (223, 271), True, 'from matplotlib import pyplot as plt\n'), ((273, 310), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./results/img_gray.png"""'], {}), "('./results/img_gray.png')\n", (284, 310), True, 'from matplotlib import pyplot as plt\n'), ((312, 323), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (321, 323), True, 'from matplotlib import pyplot as plt\n'), ((407, 453), 'cv2.threshold', 'cv.threshold', (['img_gray', '(0)', '(255)', 'cv.THRESH_OTSU'], {}), '(img_gray, 0, 255, cv.THRESH_OTSU)\n', (419, 453), True, 'import cv2 as cv\n'), ((455, 520), 'matplotlib.pyplot.imshow', 'plt.imshow', (['thresholded_img'], {'cmap': '"""gray"""', 'interpolation': '"""nearest"""'}), "(thresholded_img, cmap='gray', interpolation='nearest')\n", (465, 520), True, 'from matplotlib import pyplot as plt\n'), ((522, 566), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./results/img_thresholded.png"""'], {}), "('./results/img_thresholded.png')\n", (533, 566), True, 'from matplotlib import pyplot as plt\n'), ((568, 579), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (577, 579), True, 'from matplotlib import pyplot as plt\n'), ((710, 765), 'cv2.Canny', 'cv.Canny', (['thresholded_img', 'THRESHOLD_MIN', 'THRESHOLD_MAX'], {}), '(thresholded_img, THRESHOLD_MIN, THRESHOLD_MAX)\n', (718, 765), True, 'import cv2 as cv\n'), ((767, 826), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img_edges'], {'cmap': '"""gray"""', 'interpolation': '"""nearest"""'}), "(img_edges, cmap='gray', interpolation='nearest')\n", (777, 826), True, 'from matplotlib import pyplot as plt\n'), ((828, 868), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./results/img_contour.png"""'], {}), "('./results/img_contour.png')\n", (839, 868), True, 'from matplotlib import pyplot as plt\n'), ((870, 881), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (879, 881), True, 'from matplotlib import pyplot as plt\n'), ((989, 1060), 'cv2.HoughLinesP', 'cv.HoughLinesP', (['img_edges', '(1)', '(np.pi / 180)', '(1)', 'minLineLength', 'maxLineGap'], {}), '(img_edges, 1, np.pi / 180, 1, minLineLength, maxLineGap)\n', (1003, 1060), True, 'import cv2 as cv\n'), ((1065, 1099), 'cv2.cvtColor', 'cv.cvtColor', (['img', 'cv.COLOR_BGR2RGB'], {}), '(img, cv.COLOR_BGR2RGB)\n', (1076, 1099), True, 'import cv2 as cv\n'), ((1392, 1434), 'cv2.HoughLines', 'cv.HoughLines', (['img_edges', '(1)', '(np.pi / 2)', '(90)'], {}), '(img_edges, 1, np.pi / 2, 90)\n', (1405, 1434), True, 'import cv2 as cv\n'), ((1441, 1475), 'cv2.cvtColor', 'cv.cvtColor', (['img', 'cv.COLOR_BGR2RGB'], {}), '(img, cv.COLOR_BGR2RGB)\n', (1452, 1475), True, 'import cv2 as cv\n'), ((2079, 2097), 'numpy.asarray', 'np.asarray', (['lines1'], {}), '(lines1)\n', (2089, 2097), True, 'import numpy as np\n'), ((2506, 2563), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img_rgb'], {'cmap': '"""gray"""', 'interpolation': '"""nearest"""'}), "(img_rgb, cmap='gray', interpolation='nearest')\n", (2516, 2563), True, 'from matplotlib import pyplot as plt\n'), ((2565, 2603), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./results/img_hough.png"""'], {}), "('./results/img_hough.png')\n", (2576, 2603), True, 'from matplotlib import pyplot as plt\n'), ((2605, 2616), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2614, 2616), True, 'from matplotlib import pyplot as plt\n'), ((4033, 4067), 'cv2.cvtColor', 'cv.cvtColor', (['img', 'cv.COLOR_BGR2RGB'], {}), '(img, cv.COLOR_BGR2RGB)\n', (4044, 4067), True, 'import cv2 as cv\n'), ((4239, 4296), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img_rgb'], {'cmap': '"""gray"""', 'interpolation': '"""nearest"""'}), "(img_rgb, cmap='gray', interpolation='nearest')\n", (4249, 4296), True, 'from matplotlib import pyplot as plt\n'), ((4298, 4339), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('./results/' + name + '.png')"], {}), "('./results/' + name + '.png')\n", (4309, 4339), True, 'from matplotlib import pyplot as plt\n'), ((4337, 4348), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4346, 4348), True, 'from matplotlib import pyplot as plt\n'), ((5113, 5138), 'cv2.imread', 'cv.imread', (['extracted_card'], {}), '(extracted_card)\n', (5122, 5138), True, 'import cv2 as cv\n'), ((5150, 5188), 'cv2.cvtColor', 'cv.cvtColor', (['img_rgb', 'cv.COLOR_BGR2RGB'], {}), '(img_rgb, cv.COLOR_BGR2RGB)\n', (5161, 5188), True, 'import cv2 as cv\n'), ((5201, 5240), 'cv2.cvtColor', 'cv.cvtColor', (['img_rgb', 'cv.COLOR_BGR2GRAY'], {}), '(img_rgb, cv.COLOR_BGR2GRAY)\n', (5212, 5240), True, 'import cv2 as cv\n'), ((6029, 6054), 'cv2.imread', 'cv.imread', (['extracted_card'], {}), '(extracted_card)\n', (6038, 6054), True, 'import cv2 as cv\n'), ((6106, 6144), 'cv2.cvtColor', 'cv.cvtColor', (['img_rgb', 'cv.COLOR_BGR2RGB'], {}), '(img_rgb, cv.COLOR_BGR2RGB)\n', (6117, 6144), True, 'import cv2 as cv\n'), ((6357, 6368), 'numpy.sum', 'np.sum', (['red'], {}), '(red)\n', (6363, 6368), True, 'import numpy as np\n'), ((6384, 6397), 'numpy.sum', 'np.sum', (['green'], {}), '(green)\n', (6390, 6397), True, 'import numpy as np\n'), ((6412, 6424), 'numpy.sum', 'np.sum', (['blue'], {}), '(blue)\n', (6418, 6424), True, 'import numpy as np\n'), ((6615, 6654), 'cv2.cvtColor', 'cv.cvtColor', (['img_rgb', 'cv.COLOR_BGR2GRAY'], {}), '(img_rgb, cv.COLOR_BGR2GRAY)\n', (6626, 6654), True, 'import cv2 as cv\n'), ((8822, 8856), 'cv2.cvtColor', 'cv.cvtColor', (['img', 'cv.COLOR_BGR2RGB'], {}), '(img, cv.COLOR_BGR2RGB)\n', (8833, 8856), True, 'import cv2 as cv\n'), ((7158, 7181), 'cv2.imread', 'cv.imread', (['template_', '(0)'], {}), '(template_, 0)\n', (7167, 7181), True, 'import cv2 as cv\n'), ((7263, 7317), 'cv2.matchTemplate', 'cv.matchTemplate', (['image', 'template', 'cv.TM_CCOEFF_NORMED'], {}), '(image, template, cv.TM_CCOEFF_NORMED)\n', (7279, 7317), True, 'import cv2 as cv\n'), ((7345, 7371), 'numpy.where', 'np.where', (['(res >= threshold)'], {}), '(res >= threshold)\n', (7353, 7371), True, 'import numpy as np\n'), ((8038, 8061), 'cv2.imread', 'cv.imread', (['template_', '(0)'], {}), '(template_, 0)\n', (8047, 8061), True, 'import cv2 as cv\n'), ((8099, 8153), 'cv2.matchTemplate', 'cv.matchTemplate', (['image', 'template', 'cv.TM_CCOEFF_NORMED'], {}), '(image, template, cv.TM_CCOEFF_NORMED)\n', (8115, 8153), True, 'import cv2 as cv\n'), ((8182, 8208), 'numpy.where', 'np.where', (['(res >= threshold)'], {}), '(res >= threshold)\n', (8190, 8208), True, 'import numpy as np\n'), ((1163, 1215), 'cv2.line', 'cv.line', (['img_rgb', '(x1, y1)', '(x2, y2)', '(0, 255, 0)', '(2)'], {}), '(img_rgb, (x1, y1), (x2, y2), (0, 255, 0), 2)\n', (1170, 1215), True, 'import cv2 as cv\n'), ((2220, 2233), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2226, 2233), True, 'import numpy as np\n'), ((2241, 2254), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2247, 2254), True, 'import numpy as np\n'), ((7733, 7798), 'cv2.rectangle', 'cv.rectangle', (['img_rgb', 'pt', '(pt[0] + w, pt[1] + h)', '(0, 0, 255)', '(2)'], {}), '(img_rgb, pt, (pt[0] + w, pt[1] + h), (0, 0, 255), 2)\n', (7745, 7798), True, 'import cv2 as cv\n'), ((8356, 8421), 'cv2.rectangle', 'cv.rectangle', (['img_rgb', 'pt', '(pt[0] + w, pt[1] + h)', '(0, 0, 255)', '(2)'], {}), '(img_rgb, pt, (pt[0] + w, pt[1] + h), (0, 0, 255), 2)\n', (8368, 8421), True, 'import cv2 as cv\n'), ((9059, 9107), 'numpy.float32', 'np.float32', (['[y1[i], y2[i], y1[i + 1], y2[i + 1]]'], {}), '([y1[i], y2[i], y1[i + 1], y2[i + 1]])\n', (9069, 9107), True, 'import numpy as np\n'), ((9114, 9166), 'numpy.float32', 'np.float32', (['[[0, 0], [0, 450], [450, 0], [450, 450]]'], {}), '([[0, 0], [0, 450], [450, 0], [450, 450]])\n', (9124, 9166), True, 'import numpy as np\n'), ((9169, 9207), 'cv2.getPerspectiveTransform', 'cv.getPerspectiveTransform', (['pts1', 'pts2'], {}), '(pts1, pts2)\n', (9195, 9207), True, 'import cv2 as cv\n'), ((9216, 9254), 'cv2.warpPerspective', 'cv.warpPerspective', (['img', 'M', '(450, 450)'], {}), '(img, M, (450, 450))\n', (9234, 9254), True, 'import cv2 as cv\n'), ((9255, 9308), 'matplotlib.pyplot.imshow', 'plt.imshow', (['dst'], {'cmap': '"""gray"""', 'interpolation': '"""nearest"""'}), "(dst, cmap='gray', interpolation='nearest')\n", (9265, 9308), True, 'from matplotlib import pyplot as plt\n'), ((9395, 9406), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9404, 9406), True, 'from matplotlib import pyplot as plt\n')] |
__author__ = '<NAME>'
import multiprocessing as mp
import numpy as np
import os # For path names working under Windows and Linux
from pypet import Environment, cartesian_product
def multiply(traj, result_list):
"""Example of a sophisticated simulation that involves multiplying two values.
This time we will store tha value in a shared list and only in the end add the result.
:param traj:
Trajectory containing
the parameters in a particular combination,
it also serves as a container for results.
"""
z=traj.x*traj.y
result_list[traj.v_idx] = z
def main():
# Create an environment that handles running
filename = os.path.join('hdf5', 'example_12.hdf5')
env = Environment(trajectory='Multiplication',
filename=filename,
file_title='Example_12_Sharing_Data',
overwrite_file=True,
comment='The first example!',
continuable=False, # We have shared data in terms of a multiprocessing list,
# so we CANNOT use the continue feature.
multiproc=True,
ncores=2)
# The environment has created a trajectory container for us
traj = env.trajectory
# Add both parameters
traj.f_add_parameter('x', 1, comment='I am the first dimension!')
traj.f_add_parameter('y', 1, comment='I am the second dimension!')
# Explore the parameters with a cartesian product
traj.f_explore(cartesian_product({'x':[1,2,3,4], 'y':[6,7,8]}))
# We want a shared list where we can put all out results in. We use a manager for this:
result_list = mp.Manager().list()
# Let's make some space for potential results
result_list[:] =[0 for _dummy in range(len(traj))]
# Run the simulation
env.run(multiply, result_list)
# Now we want to store the final list as numpy array
traj.f_add_result('z', np.array(result_list))
# Finally let's print the result to see that it worked
print(traj.z)
#Disable logging and close all log-files
env.disable_logging()
if __name__ == '__main__':
main() | [
"pypet.Environment",
"pypet.cartesian_product",
"os.path.join",
"numpy.array",
"multiprocessing.Manager"
] | [((682, 721), 'os.path.join', 'os.path.join', (['"""hdf5"""', '"""example_12.hdf5"""'], {}), "('hdf5', 'example_12.hdf5')\n", (694, 721), False, 'import os\n'), ((732, 935), 'pypet.Environment', 'Environment', ([], {'trajectory': '"""Multiplication"""', 'filename': 'filename', 'file_title': '"""Example_12_Sharing_Data"""', 'overwrite_file': '(True)', 'comment': '"""The first example!"""', 'continuable': '(False)', 'multiproc': '(True)', 'ncores': '(2)'}), "(trajectory='Multiplication', filename=filename, file_title=\n 'Example_12_Sharing_Data', overwrite_file=True, comment=\n 'The first example!', continuable=False, multiproc=True, ncores=2)\n", (743, 935), False, 'from pypet import Environment, cartesian_product\n'), ((1534, 1588), 'pypet.cartesian_product', 'cartesian_product', (["{'x': [1, 2, 3, 4], 'y': [6, 7, 8]}"], {}), "({'x': [1, 2, 3, 4], 'y': [6, 7, 8]})\n", (1551, 1588), False, 'from pypet import Environment, cartesian_product\n'), ((1965, 1986), 'numpy.array', 'np.array', (['result_list'], {}), '(result_list)\n', (1973, 1986), True, 'import numpy as np\n'), ((1694, 1706), 'multiprocessing.Manager', 'mp.Manager', ([], {}), '()\n', (1704, 1706), True, 'import multiprocessing as mp\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 11 11:16:27 2020
@author: hiroyasu
"""
import cvxpy as cp
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import control
import SCPmulti as scp
import pickle
DT = scp.DT
TSPAN = scp.TSPAN
M = scp.M
II = scp.II
L = scp.L
bb = scp.bb
FMIN = scp.FMIN
FMAX = scp.FMAX
RungeNum = scp.RungeNum
AA = scp.AA
Robs = scp.Robs
Rsafe = scp.Rsafe
XOBSs = scp.XOBSs
XXd0 = np.load('data/params/desired_n/Xhis.npy')
UUd0 = np.load('data/params/desired_n/Uhis.npy')
dratio = 0.15
d_over = np.sqrt(3)*dratio
X0 = scp.X0
Xf = scp.Xf
class CCM:
def __init__(self,XXd0,UUd0,XXds,UUds,tspan=TSPAN,dt=DT,runge_num=RungeNum,m=M,I=II,l=L,b=bb,A=AA,fmin=FMIN,fmax=FMAX):
self.XXd = XXd0
self.UUd = UUd0
self.XXds = XXds
self.UUds = UUds
self.tspan = tspan
self.dt = dt
self.runge_num = runge_num
self.h = dt/runge_num
self.m = m
self.I = I
self.l = l
self.b = b
self.A = A
self.fmin = fmin
self.fmax = fmax
def dynamics(self,t,states,inputs):
A = self.A
B = self.GetB(states)
Xv = np.transpose(np.array([states]))
Uv = np.transpose(np.array([inputs]))
dXdt = A.dot(Xv)+B.dot(Uv)
dXdt = dXdt[:,0]
return dXdt
def GetB(self,states):
m = self.m
I = self.I
l = self.l
b = self.b
th = states[2]
T = np.array([[np.cos(th)/m,np.sin(th)/m,0],[-np.sin(th)/m,np.cos(th)/m,0],[0,0,1/2./I]])
H = np.array([[-1,-1,0,0,1,1,0,0],[0,0,-1,-1,0,0,1,1],[-l,l,-b,b,-l,l,-b,b]])
B = np.vstack((np.zeros((3,8)),T@H))
return B
def rk4(self,t,X,U):
h = self.h
k1 = self.dynamics(t,X,U)
k2 = self.dynamics(t+h/2.,X+k1*h/2.,U)
k3 = self.dynamics(t+h/2.,X+k2*h/2.,U)
k4 = self.dynamics(t+h,X+k3*h,U)
return t+h,X+h*(k1+2.*k2+2.*k3+k4)/6.
def one_step_sim(self,t,X,U):
runge_num = self.runge_num
for num in range(0, runge_num):
t,X = self.rk4(t,X,U)
return t,X
def GetCCM(self,alp):
dt = self.dt
epsilon = 0.
XX = self.XXd
N = XX.shape[0]-1
I = np.identity(6)
WW = {}
for i in range(N+1):
WW[i] = cp.Variable((6,6),PSD=True)
nu = cp.Variable(nonneg=True)
chi = cp.Variable(nonneg=True)
constraints = [chi*I-WW[0] >> epsilon*I,WW[0]-I >> epsilon*I]
for k in range(N):
Xk = XX[k,:]
Ax = self.A
Bx = self.GetB(Xk)
Wk = WW[k]
Wkp1 = WW[k+1]
constraints += [-2*alp*Wk-(-(Wkp1-Wk)/dt+Ax@Wk+Wk@Ax.T-2*nu*Bx@Bx.T) >> epsilon*I]
constraints += [chi*I-Wkp1 >> epsilon*I,Wkp1-I >> epsilon*I]
prob = cp.Problem(cp.Minimize(chi+nu),constraints)
prob.solve(solver=cp.MOSEK)
cvx_status = prob.status
print(cvx_status)
WWout = {}
MMout = {}
for i in range(N+1):
WWout[i] = WW[i].value/nu.value
MMout[i] = np.linalg.inv(WWout[i])
chi = chi.value
nu = nu.value
cvx_optval = chi/alp
return cvx_status,cvx_optval,WWout,MMout,chi,nu
def GetCCM2(self):
dt = self.dt
epsilon = 0.
XX = self.XXd
N = XX.shape[0]-1
I = np.identity(6)
WW = {}
for i in range(N+1):
WW[i] = cp.Variable((6,6),PSD=True)
nu = cp.Variable(nonneg=True)
pp = cp.Variable((2,2),symmetric=True)
tau = pp[0,0]
chi = pp[1,0]
alp = pp[1,1]
constraints = [chi*I-WW[0] >> epsilon*I,WW[0]-I >> epsilon*I]
constraints += [tau >= 0, alp >= 0, chi >= 0]
constraints += [pp >> epsilon*np.identity(2)]
for k in range(N):
Xk = XX[k,:]
Ax = self.A
Bx = self.GetB(Xk)
Wk = WW[k]
Wkp1 = WW[k+1]
constraints += [-2*alp*I-(-(Wkp1-Wk)/dt+Ax@Wk+Wk@Ax.T-2*nu*Bx@Bx.T) >> epsilon*I]
constraints += [chi*I-Wkp1 >> epsilon*I,Wkp1-I >> epsilon*I]
prob = cp.Problem(cp.Minimize(tau),constraints)
prob.solve()
cvx_status = prob.status
print(cvx_status)
cvx_optval = tau.value
WWout = {}
MMout = {}
for i in range(N+1):
WWout[i] = WW[i].value/nu.value
MMout[i] = np.linalg.inv(WWout[i])
chi = chi.value
nu = nu.value
tau = tau.value
alp = alp.value
return cvx_status,cvx_optval,WWout,MMout,chi,nu,tau,alp
def CLFQP(self,X,Xd,M,Ud,alp):
dt = self.dt
U = cp.Variable((8,1))
Ud = np.array([Ud]).T
p = cp.Variable((1,1))
fmin = self.fmin
fmax = self.fmax
A = self.A
Bx = self.GetB(X)
Bxd = self.GetB(Xd)
evec = np.array([X-Xd]).T
constraints = [evec.T@(M@A+A.T@M)@evec+2*evec.T@M@Bx@U-2*evec.T@M@Bxd@Ud <= -2*alp*evec.T@M@evec+p]
for i in range(8):
constraints += [U[i,0] <= fmax, U[i,0] >= fmin]
prob = cp.Problem(cp.Minimize(cp.sum_squares(U-Ud)+p**2),constraints)
prob.solve()
cvx_status = prob.status
U = U.value
U = np.ravel(U)
return U
def FinalTrajectory(self,MM,alp,XXdRCT,UUdRCT):
dt = self.dt
XXd = self.XXd
UUd = self.UUd
N = UUd.shape[0]
X0 = XXd[0,:]
B = self.GetB(X0)
t = 0
t1 = 0
X1 = X0
X2 = X0
Xd = XXd[0,:]
XdRCT = XXdRCT[0,:]
this = np.zeros(N+1)
X1his = np.zeros((N+1,X0.size))
X2his = np.zeros((N+1,X0.size))
this[0] = t
X1his[0,:] = X1
U1his = np.zeros((N,B.shape[1]))
X2his[0,:] = X2
U2his = np.zeros((N,B.shape[1]))
for i in range(N):
M = MM[i]
A = self.A
Bx1 = self.GetB(X1)
Bx2 = self.GetB(X2)
Q = 0.1*np.identity(6)
R = 1*np.identity(8)
K,P,E = control.lqr(A,Bx1,Q,R)
#U = UUd[i,:]-Bx.T@M@(X-Xd)
U1 = self.CLFQP(X1,Xd,M,UUd[i,:],alp)
U2 = self.CLFQP(X2,XdRCT,M,UUdRCT[i,:],alp)
t,X1 = self.one_step_sim(t,X1,U1)
t1,X2 = self.one_step_sim(t1,X2,U2)
Xd = XXd[i+1,:]
XdRCT = XXdRCT[i+1,:]
d1 = np.hstack((np.zeros(3),(np.random.rand(3)*2-1)))*dratio*10
#d1 = (np.random.rand(6)*2-1)*dratio
X1 = X1+d1*dt
X2 = X2+d1*dt
this[i+1] = t
X1his[i+1,:] = X1
U1his[i,:] = U1
X2his[i+1,:] = X2
U2his[i,:] = U2
return this,X1his,U1his,X2his,U2his
def GetOptimalCCMs(self,alp):
XXds = self.XXds
UUds = self.UUds
numX0 = len(XXds)
optvals = np.zeros(numX0)
chi_s = np.zeros(numX0)
nu_s = np.zeros(numX0)
status_str = []
WWs = {}
for iX0 in range(numX0):
print('iX0 = ',iX0)
self.XXd = XXds[iX0]
self.UUd = UUds[iX0]
cvx_status,cvx_optval,WW,MM,chi,nu = self.GetCCM(alp)
optvals[iX0] = cvx_optval/alp
chi_s[iX0] = chi
nu_s[iX0] = nu
WWs[iX0] = WW
status_str.append(cvx_status)
self.WWs = WWs
XXs = self.XXds
return optvals,chi_s,nu_s,status_str,XXs,WWs
def SaveTrainingData(self):
XXs = self.XXds
WWs = self.WWs
ns = WWs[0][0].shape[0]
N = len(WWs[0])-1
NX0 = len(WWs)
XhisNN = XXs[0]
MMs = np.zeros((ns,ns,N+1,NX0))
NN = NX0*(N+1)
WhisNN = np.zeros((NN,ns**2))
MhisNN = np.zeros((NN,ns**2))
nc = int(1/2*ns*(ns+1))
cMhisNN = np.zeros((NN,nc))
for j in range(NX0):
if j > 0:
XhisNN = np.vstack((XhisNN,XXs[j]))
for i in range(N+1):
Wi = WWs[j][i]
Mi = np.linalg.inv(Wi)
MMs[:,:,i,j] = Mi
WhisNN[i+(N+1)*j,:] = np.reshape(Wi,ns**2)
MhisNN[i+(N+1)*j,:] = np.reshape(Mi,ns**2)
cholMi = np.linalg.cholesky(Mi)
cholMi = cholMi.T # upper triangular
for ii in range (ns):
jj = (ns-1)-ii;
di = np.diag(cholMi,jj)
cMhisNN[i+(N+1)*j,int(1/2*ii*(ii+1)):int(1/2*(ii+1)*(ii+2))] = di
return XhisNN,MMs,WhisNN,MhisNN,cMhisNN
def Rot2d(th):
R = np.array([[np.cos(th),-np.sin(th)],[np.sin(th),np.cos(th)]])
return R
def GetTubePlot(XXdRCT,alp,chi,d_over,th):
Rtube = d_over*np.sqrt(chi)/alp
xxdRCT = XXdRCT[:,0:2]
dxxdRCT = np.diff(xxdRCT,axis=0)
for i in range(dxxdRCT.shape[0]):
dxxdRCT[i,:] = Rot2d(th)@(dxxdRCT[i,:]/np.linalg.norm(dxxdRCT[i,:]))
return xxdRCT[0:dxxdRCT.shape[0],:]+dxxdRCT*Rtube
def SaveDict(filename,var):
output = open(filename,'wb')
pickle.dump(var,output)
output.close()
pass
def LoadDict(filename):
pkl_file = open(filename,'rb')
varout = pickle.load(pkl_file)
pkl_file.close()
return varout
if __name__ == "__main__":
np.random.seed(seed=19940808)
# np.random.seed(seed=6881994)
#np.random.seed(seed=1995226)
XXds = LoadDict('data/params/desireds/XXds.pkl')
UUds = LoadDict('data/params/desireds/UUds.pkl')
ccm = CCM(XXd0,UUd0,XXds,UUds)
'''
# macro alpha
alp_list = np.array([0.01,0.1,0.5,1,1.5,2,3,10])
a_len = alp_list.size
optvals = np.zeros(a_len)
for i in range(a_len):
print(i)
alp = alp_list[i]
cvx_status,cvx_optval,WW,MM,chi,nu = ccm.GetCCM(alp)
optvals[i] = cvx_optval
plt.figure()
plt.semilogx(alp_list,optvals)
alp = alp_list[np.argmin(optvals)]
np.save('data/params/alpha_macro/alp.npy',alp)
np.save('data/params/alpha_macro/alp_list.npy',alp_list)
np.save('data/params/alpha_macro/optvals.npy',optvals)
'''
'''
# micro alpha
alp_list = np.arange(0.1,1.5,0.1)
a_len = alp_list.size
optvals = np.zeros(a_len)
for i in range(a_len):
print(i)
alp = alp_list[i]
cvx_status,cvx_optval,WW,MM,chi,nu = ccm.GetCCM(alp)
optvals[i] = cvx_optval
plt.figure()
plt.semilogx(alp_list,optvals)
alp = alp_list[np.argmin(optvals)]
np.save('data/params/alpha_micro/alp.npy',alp)
np.save('data/params/alpha_micro/alp_list.npy',alp_list)
np.save('data/params/alpha_micro/optvals.npy',optvals)
'''
'''
# mmicro alpha
alp_list = np.arange(0.5,0.71,0.01)
a_len = alp_list.size
optvals = np.zeros(a_len)
for i in range(a_len):
print(i)
alp = alp_list[i]
cvx_status,cvx_optval,WW,MM,chi,nu = ccm.GetCCM(alp)
optvals[i] = cvx_optval
plt.figure()
plt.semilogx(alp_list,optvals)
alp = alp_list[np.argmin(optvals)]
np.save('data/params/alpha_mmicro/alp.npy',alp)
np.save('data/params/alpha_mmicro/alp_list.npy',alp_list)
np.save('data/params/alpha_mmicro/optvals.npy',optvals)
'''
'''
alp = np.load('data/params/alpha_mmicro/alp.npy')
optvals,chi_s,nu_s,status_str,XXs,WWs = ccm.GetOptimalCCMs(alp)
np.save('data/params/optimal/alp.npy',alp)
np.save('data/params/optimal/optvals.npy',optvals)
np.save('data/params/optimal/chi_s.npy',chi_s)
np.save('data/params/optimal/nu_s.npy',nu_s)
np.save('data/params/optimal/status_str.npy',status_str)
SaveDict('data/params/optimal/XXs.pkl',XXs)
SaveDict('data/params/optimal/WWs.pkl',WWs)
'''
'''
XXs = LoadDict('data/params/optimal/XXs.pkl')
WWs = LoadDict('data/params/optimal/WWs.pkl')
ccm = CCM(XXd0,UUd0,XXs,UUds)
ccm.WWs = WWs
ccm.XXds = XXs
XhisNN,MMs,WhisNN,MhisNN,cMhisNN = ccm.SaveTrainingData()
np.save('data/training_data/XhisNN.npy',XhisNN)
np.save('data/training_data/WWs.npy',WWs)
np.save('data/training_data/MMs.npy',MMs)
np.save('data/training_data/WhisNN.npy',WhisNN)
np.save('data/training_data/MhisNN.npy',MhisNN)
np.save('data/training_data/cMhisNN.npy',cMhisNN)
'''
'''
# robust tube desired trajectory
alp = np.load('data/params/alpha_mmicro/alp.npy')
dtra = scp.DesiredTrajectories(X0,Xf)
ccm.XXd = XXd0
ccm.UUd = UUd0
cvx_status,cvx_optval,WW,MM,chi,nu = ccm.GetCCM(alp)
dtra.SCPRCT(alp,chi,d_over)
this,XXdRCT,UUdRCT = dtra.FinalTrajectory()
np.save('data/params/desiredRCT/this.npy',this)
np.save('data/params/desiredRCT/XXdRCT.npy',XXdRCT)
np.save('data/params/desiredRCT/UUdRCT.npy',UUdRCT)
'''
alp = np.load('data/params/alpha_mmicro/alp.npy')
#cvx_status,cvx_optval,WW,MM,chi,nu = ccm.GetCCM(alp)
XXdRCT = np.load('data/params/desiredRCT/XXdRCT.npy')
UUdRCT = np.load('data/params/desiredRCT/UUdRCT.npy')
ccm.XXd = XXd0
ccm.UUd = UUd0
this,X1his,U1his,X2his,U2his = ccm.FinalTrajectory(MM,alp,XXdRCT,UUdRCT)
xxTube1 = GetTubePlot(XXdRCT,alp,chi,d_over,np.pi/2)
xxTube2 = GetTubePlot(XXdRCT,alp,chi,d_over,-np.pi/2)
Nplot = xxTube1.shape[0]
plt.figure()
plt.plot(X1his[:,0],X1his[:,1])
plt.plot(X2his[:,0],X2his[:,1])
#plt.plot(xxTube1[:,0],xxTube1[:,1])
#plt.plot(xxTube2[:,0],xxTube2[:,1])
plt.plot(XXdRCT[:,0],XXdRCT[:,1],'--k')
plt.fill_between(xxTube1[:,0],xxTube1[:,1],np.zeros(Nplot),facecolor='black',alpha=0.2)
plt.fill_between(xxTube2[:,0],xxTube2[:,1],np.zeros(Nplot),facecolor='white')
plt.fill_between(np.linspace(19.52,22,100),17.2528*np.ones(100),np.zeros(100),facecolor='white')
for i in range(XOBSs.shape[0]):
x,y=[],[]
for _x in np.linspace(0,2*np.pi):
x.append(Robs*np.cos(_x)+XOBSs[i,0])
y.append(Robs*np.sin(_x)+XOBSs[i,1])
plt.plot(x,y,'k')
plt.axes().set_aspect('equal')
plt.show()
| [
"cvxpy.sum_squares",
"numpy.sqrt",
"numpy.random.rand",
"numpy.array",
"numpy.linalg.norm",
"numpy.sin",
"cvxpy.Minimize",
"numpy.reshape",
"matplotlib.pyplot.plot",
"numpy.diff",
"numpy.linspace",
"numpy.random.seed",
"numpy.vstack",
"numpy.identity",
"numpy.ones",
"control.lqr",
"p... | [((475, 516), 'numpy.load', 'np.load', (['"""data/params/desired_n/Xhis.npy"""'], {}), "('data/params/desired_n/Xhis.npy')\n", (482, 516), True, 'import numpy as np\n'), ((524, 565), 'numpy.load', 'np.load', (['"""data/params/desired_n/Uhis.npy"""'], {}), "('data/params/desired_n/Uhis.npy')\n", (531, 565), True, 'import numpy as np\n'), ((589, 599), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (596, 599), True, 'import numpy as np\n'), ((8925, 8948), 'numpy.diff', 'np.diff', (['xxdRCT'], {'axis': '(0)'}), '(xxdRCT, axis=0)\n', (8932, 8948), True, 'import numpy as np\n'), ((9183, 9207), 'pickle.dump', 'pickle.dump', (['var', 'output'], {}), '(var, output)\n', (9194, 9207), False, 'import pickle\n'), ((9312, 9333), 'pickle.load', 'pickle.load', (['pkl_file'], {}), '(pkl_file)\n', (9323, 9333), False, 'import pickle\n'), ((9406, 9435), 'numpy.random.seed', 'np.random.seed', ([], {'seed': '(19940808)'}), '(seed=19940808)\n', (9420, 9435), True, 'import numpy as np\n'), ((12902, 12945), 'numpy.load', 'np.load', (['"""data/params/alpha_mmicro/alp.npy"""'], {}), "('data/params/alpha_mmicro/alp.npy')\n", (12909, 12945), True, 'import numpy as np\n'), ((13017, 13061), 'numpy.load', 'np.load', (['"""data/params/desiredRCT/XXdRCT.npy"""'], {}), "('data/params/desiredRCT/XXdRCT.npy')\n", (13024, 13061), True, 'import numpy as np\n'), ((13075, 13119), 'numpy.load', 'np.load', (['"""data/params/desiredRCT/UUdRCT.npy"""'], {}), "('data/params/desiredRCT/UUdRCT.npy')\n", (13082, 13119), True, 'import numpy as np\n'), ((13383, 13395), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (13393, 13395), True, 'import matplotlib.pyplot as plt\n'), ((13400, 13434), 'matplotlib.pyplot.plot', 'plt.plot', (['X1his[:, 0]', 'X1his[:, 1]'], {}), '(X1his[:, 0], X1his[:, 1])\n', (13408, 13434), True, 'import matplotlib.pyplot as plt\n'), ((13436, 13470), 'matplotlib.pyplot.plot', 'plt.plot', (['X2his[:, 0]', 'X2his[:, 1]'], {}), '(X2his[:, 0], X2his[:, 1])\n', (13444, 13470), True, 'import matplotlib.pyplot as plt\n'), ((13554, 13597), 'matplotlib.pyplot.plot', 'plt.plot', (['XXdRCT[:, 0]', 'XXdRCT[:, 1]', '"""--k"""'], {}), "(XXdRCT[:, 0], XXdRCT[:, 1], '--k')\n", (13562, 13597), True, 'import matplotlib.pyplot as plt\n'), ((14132, 14142), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14140, 14142), True, 'import matplotlib.pyplot as plt\n'), ((1626, 1727), 'numpy.array', 'np.array', (['[[-1, -1, 0, 0, 1, 1, 0, 0], [0, 0, -1, -1, 0, 0, 1, 1], [-l, l, -b, b, -l,\n l, -b, b]]'], {}), '([[-1, -1, 0, 0, 1, 1, 0, 0], [0, 0, -1, -1, 0, 0, 1, 1], [-l, l, -\n b, b, -l, l, -b, b]])\n', (1634, 1727), True, 'import numpy as np\n'), ((2319, 2333), 'numpy.identity', 'np.identity', (['(6)'], {}), '(6)\n', (2330, 2333), True, 'import numpy as np\n'), ((2438, 2462), 'cvxpy.Variable', 'cp.Variable', ([], {'nonneg': '(True)'}), '(nonneg=True)\n', (2449, 2462), True, 'import cvxpy as cp\n'), ((2477, 2501), 'cvxpy.Variable', 'cp.Variable', ([], {'nonneg': '(True)'}), '(nonneg=True)\n', (2488, 2501), True, 'import cvxpy as cp\n'), ((3462, 3476), 'numpy.identity', 'np.identity', (['(6)'], {}), '(6)\n', (3473, 3476), True, 'import numpy as np\n'), ((3581, 3605), 'cvxpy.Variable', 'cp.Variable', ([], {'nonneg': '(True)'}), '(nonneg=True)\n', (3592, 3605), True, 'import cvxpy as cp\n'), ((3619, 3654), 'cvxpy.Variable', 'cp.Variable', (['(2, 2)'], {'symmetric': '(True)'}), '((2, 2), symmetric=True)\n', (3630, 3654), True, 'import cvxpy as cp\n'), ((4769, 4788), 'cvxpy.Variable', 'cp.Variable', (['(8, 1)'], {}), '((8, 1))\n', (4780, 4788), True, 'import cvxpy as cp\n'), ((4830, 4849), 'cvxpy.Variable', 'cp.Variable', (['(1, 1)'], {}), '((1, 1))\n', (4841, 4849), True, 'import cvxpy as cp\n'), ((5365, 5376), 'numpy.ravel', 'np.ravel', (['U'], {}), '(U)\n', (5373, 5376), True, 'import numpy as np\n'), ((5713, 5728), 'numpy.zeros', 'np.zeros', (['(N + 1)'], {}), '(N + 1)\n', (5721, 5728), True, 'import numpy as np\n'), ((5743, 5769), 'numpy.zeros', 'np.zeros', (['(N + 1, X0.size)'], {}), '((N + 1, X0.size))\n', (5751, 5769), True, 'import numpy as np\n'), ((5783, 5809), 'numpy.zeros', 'np.zeros', (['(N + 1, X0.size)'], {}), '((N + 1, X0.size))\n', (5791, 5809), True, 'import numpy as np\n'), ((5867, 5892), 'numpy.zeros', 'np.zeros', (['(N, B.shape[1])'], {}), '((N, B.shape[1]))\n', (5875, 5892), True, 'import numpy as np\n'), ((5932, 5957), 'numpy.zeros', 'np.zeros', (['(N, B.shape[1])'], {}), '((N, B.shape[1]))\n', (5940, 5957), True, 'import numpy as np\n'), ((7002, 7017), 'numpy.zeros', 'np.zeros', (['numX0'], {}), '(numX0)\n', (7010, 7017), True, 'import numpy as np\n'), ((7034, 7049), 'numpy.zeros', 'np.zeros', (['numX0'], {}), '(numX0)\n', (7042, 7049), True, 'import numpy as np\n'), ((7065, 7080), 'numpy.zeros', 'np.zeros', (['numX0'], {}), '(numX0)\n', (7073, 7080), True, 'import numpy as np\n'), ((7798, 7828), 'numpy.zeros', 'np.zeros', (['(ns, ns, N + 1, NX0)'], {}), '((ns, ns, N + 1, NX0))\n', (7806, 7828), True, 'import numpy as np\n'), ((7864, 7887), 'numpy.zeros', 'np.zeros', (['(NN, ns ** 2)'], {}), '((NN, ns ** 2))\n', (7872, 7887), True, 'import numpy as np\n'), ((7902, 7925), 'numpy.zeros', 'np.zeros', (['(NN, ns ** 2)'], {}), '((NN, ns ** 2))\n', (7910, 7925), True, 'import numpy as np\n'), ((7973, 7991), 'numpy.zeros', 'np.zeros', (['(NN, nc)'], {}), '((NN, nc))\n', (7981, 7991), True, 'import numpy as np\n'), ((13641, 13656), 'numpy.zeros', 'np.zeros', (['Nplot'], {}), '(Nplot)\n', (13649, 13656), True, 'import numpy as np\n'), ((13733, 13748), 'numpy.zeros', 'np.zeros', (['Nplot'], {}), '(Nplot)\n', (13741, 13748), True, 'import numpy as np\n'), ((13789, 13816), 'numpy.linspace', 'np.linspace', (['(19.52)', '(22)', '(100)'], {}), '(19.52, 22, 100)\n', (13800, 13816), True, 'import numpy as np\n'), ((13836, 13849), 'numpy.zeros', 'np.zeros', (['(100)'], {}), '(100)\n', (13844, 13849), True, 'import numpy as np\n'), ((13941, 13966), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)'], {}), '(0, 2 * np.pi)\n', (13952, 13966), True, 'import numpy as np\n'), ((1239, 1257), 'numpy.array', 'np.array', (['[states]'], {}), '([states])\n', (1247, 1257), True, 'import numpy as np\n'), ((1285, 1303), 'numpy.array', 'np.array', (['[inputs]'], {}), '([inputs])\n', (1293, 1303), True, 'import numpy as np\n'), ((2397, 2426), 'cvxpy.Variable', 'cp.Variable', (['(6, 6)'], {'PSD': '(True)'}), '((6, 6), PSD=True)\n', (2408, 2426), True, 'import cvxpy as cp\n'), ((2923, 2944), 'cvxpy.Minimize', 'cp.Minimize', (['(chi + nu)'], {}), '(chi + nu)\n', (2934, 2944), True, 'import cvxpy as cp\n'), ((3181, 3204), 'numpy.linalg.inv', 'np.linalg.inv', (['WWout[i]'], {}), '(WWout[i])\n', (3194, 3204), True, 'import numpy as np\n'), ((3540, 3569), 'cvxpy.Variable', 'cp.Variable', (['(6, 6)'], {'PSD': '(True)'}), '((6, 6), PSD=True)\n', (3551, 3569), True, 'import cvxpy as cp\n'), ((4247, 4263), 'cvxpy.Minimize', 'cp.Minimize', (['tau'], {}), '(tau)\n', (4258, 4263), True, 'import cvxpy as cp\n'), ((4518, 4541), 'numpy.linalg.inv', 'np.linalg.inv', (['WWout[i]'], {}), '(WWout[i])\n', (4531, 4541), True, 'import numpy as np\n'), ((4801, 4815), 'numpy.array', 'np.array', (['[Ud]'], {}), '([Ud])\n', (4809, 4815), True, 'import numpy as np\n'), ((4987, 5005), 'numpy.array', 'np.array', (['[X - Xd]'], {}), '([X - Xd])\n', (4995, 5005), True, 'import numpy as np\n'), ((6181, 6206), 'control.lqr', 'control.lqr', (['A', 'Bx1', 'Q', 'R'], {}), '(A, Bx1, Q, R)\n', (6192, 6206), False, 'import control\n'), ((8867, 8879), 'numpy.sqrt', 'np.sqrt', (['chi'], {}), '(chi)\n', (8874, 8879), True, 'import numpy as np\n'), ((13823, 13835), 'numpy.ones', 'np.ones', (['(100)'], {}), '(100)\n', (13830, 13835), True, 'import numpy as np\n'), ((14075, 14094), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""k"""'], {}), "(x, y, 'k')\n", (14083, 14094), True, 'import matplotlib.pyplot as plt\n'), ((14097, 14107), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (14105, 14107), True, 'import matplotlib.pyplot as plt\n'), ((1723, 1739), 'numpy.zeros', 'np.zeros', (['(3, 8)'], {}), '((3, 8))\n', (1731, 1739), True, 'import numpy as np\n'), ((6113, 6127), 'numpy.identity', 'np.identity', (['(6)'], {}), '(6)\n', (6124, 6127), True, 'import numpy as np\n'), ((6146, 6160), 'numpy.identity', 'np.identity', (['(8)'], {}), '(8)\n', (6157, 6160), True, 'import numpy as np\n'), ((8067, 8094), 'numpy.vstack', 'np.vstack', (['(XhisNN, XXs[j])'], {}), '((XhisNN, XXs[j]))\n', (8076, 8094), True, 'import numpy as np\n'), ((8179, 8196), 'numpy.linalg.inv', 'np.linalg.inv', (['Wi'], {}), '(Wi)\n', (8192, 8196), True, 'import numpy as np\n'), ((8269, 8292), 'numpy.reshape', 'np.reshape', (['Wi', '(ns ** 2)'], {}), '(Wi, ns ** 2)\n', (8279, 8292), True, 'import numpy as np\n'), ((8328, 8351), 'numpy.reshape', 'np.reshape', (['Mi', '(ns ** 2)'], {}), '(Mi, ns ** 2)\n', (8338, 8351), True, 'import numpy as np\n'), ((8374, 8396), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['Mi'], {}), '(Mi)\n', (8392, 8396), True, 'import numpy as np\n'), ((8737, 8747), 'numpy.cos', 'np.cos', (['th'], {}), '(th)\n', (8743, 8747), True, 'import numpy as np\n'), ((8762, 8772), 'numpy.sin', 'np.sin', (['th'], {}), '(th)\n', (8768, 8772), True, 'import numpy as np\n'), ((8773, 8783), 'numpy.cos', 'np.cos', (['th'], {}), '(th)\n', (8779, 8783), True, 'import numpy as np\n'), ((9033, 9062), 'numpy.linalg.norm', 'np.linalg.norm', (['dxxdRCT[i, :]'], {}), '(dxxdRCT[i, :])\n', (9047, 9062), True, 'import numpy as np\n'), ((3881, 3895), 'numpy.identity', 'np.identity', (['(2)'], {}), '(2)\n', (3892, 3895), True, 'import numpy as np\n'), ((5239, 5261), 'cvxpy.sum_squares', 'cp.sum_squares', (['(U - Ud)'], {}), '(U - Ud)\n', (5253, 5261), True, 'import cvxpy as cp\n'), ((8549, 8568), 'numpy.diag', 'np.diag', (['cholMi', 'jj'], {}), '(cholMi, jj)\n', (8556, 8568), True, 'import numpy as np\n'), ((8749, 8759), 'numpy.sin', 'np.sin', (['th'], {}), '(th)\n', (8755, 8759), True, 'import numpy as np\n'), ((1539, 1549), 'numpy.cos', 'np.cos', (['th'], {}), '(th)\n', (1545, 1549), True, 'import numpy as np\n'), ((1552, 1562), 'numpy.sin', 'np.sin', (['th'], {}), '(th)\n', (1558, 1562), True, 'import numpy as np\n'), ((1583, 1593), 'numpy.cos', 'np.cos', (['th'], {}), '(th)\n', (1589, 1593), True, 'import numpy as np\n'), ((13991, 14001), 'numpy.cos', 'np.cos', (['_x'], {}), '(_x)\n', (13997, 14001), True, 'import numpy as np\n'), ((14040, 14050), 'numpy.sin', 'np.sin', (['_x'], {}), '(_x)\n', (14046, 14050), True, 'import numpy as np\n'), ((1570, 1580), 'numpy.sin', 'np.sin', (['th'], {}), '(th)\n', (1576, 1580), True, 'import numpy as np\n'), ((6534, 6545), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (6542, 6545), True, 'import numpy as np\n'), ((6547, 6564), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (6561, 6564), True, 'import numpy as np\n')] |
import numpy as np
class RolloutWorker:
def __init__(self, env, policy, cfg, env_params, language_conditioned=False):
self.env = env
self.policy = policy
self.cfg = cfg
self.env_params = env_params
self.language_conditioned = language_conditioned
self.timestep_counter = 0
def generate_rollout(self, train_mode=False, animated=False):
episodes = []
for _ in range(self.cfg.num_rollouts_per_mpi):
ep_obs, ep_actions, ep_success, ep_rewards = [], [], [], []
observation = self.env.reset()
obs = observation['observation']
if self.language_conditioned:
instruction = observation['instruction']
ep_instructions, ep_hinsight_instruction = [], []
else:
ag = observation['achieved_goal']
g = observation['desired_goal']
ep_ag, ep_g = [], []
for _ in range(self.env_params['max_timesteps']):
if self.language_conditioned:
action = self.policy.act(obs.copy(), instruction.copy(), train_mode)
else:
action = self.policy.act(obs.copy(), ag.copy(), g.copy(), train_mode)
if animated:
self.env.render()
observation_new, reward, _, info = self.env.step(action)
self.timestep_counter += 1
obs_new = observation_new['observation']
if self.language_conditioned:
instruction_new = observation_new['instruction']
hindsight_instr = info['hindsight_instruction'] if 'hindsight_instruction' in info.keys(
) else np.zeros_like(instruction_new)
else:
ag_new = observation_new['achieved_goal']
ep_obs.append(obs.copy())
ep_actions.append(action.copy())
ep_rewards.append([reward])
if self.language_conditioned:
ep_instructions.append(instruction.copy())
ep_hinsight_instruction.append(hindsight_instr.copy())
else:
ep_ag.append(ag.copy())
ep_g.append(g.copy())
obs = obs_new
if self.language_conditioned:
instruction = instruction_new
else:
ag = ag_new
ep_success.append(info['is_success'])
ep_obs.append(obs.copy())
if not self.language_conditioned:
ep_ag.append(ag.copy())
episode_data = dict(obs=np.array(ep_obs).copy(),
action=np.array(ep_actions).copy(),
reward=np.array(ep_rewards).copy(),
success=np.array(ep_success).copy(),
timesteps=self.timestep_counter)
if self.language_conditioned:
episode_data['instruction'] = np.array(ep_instructions).copy()
episode_data['hindsight_instruction'] = np.array(ep_hinsight_instruction).copy()
else:
episode_data['g'] = np.array(ep_g).copy()
episode_data['ag'] = np.array(ep_ag).copy()
episodes.append(episode_data)
return episodes
def generate_test_rollout(self, animated=False):
rollout_data = []
for _ in range(self.cfg.n_test_rollouts):
rollout = self.generate_rollout(train_mode=False, animated=animated)
rollout_data.append(rollout)
# only take the last step to calculate success rate
success_rate = np.mean([_rd['success'][-1] for rd in rollout_data for _rd in rd])
rewards = np.sum([_rd['reward'] for rd in rollout_data for _rd in rd], 1).mean()
return success_rate, rewards
| [
"numpy.mean",
"numpy.array",
"numpy.zeros_like",
"numpy.sum"
] | [((3750, 3816), 'numpy.mean', 'np.mean', (["[_rd['success'][-1] for rd in rollout_data for _rd in rd]"], {}), "([_rd['success'][-1] for rd in rollout_data for _rd in rd])\n", (3757, 3816), True, 'import numpy as np\n'), ((3835, 3898), 'numpy.sum', 'np.sum', (["[_rd['reward'] for rd in rollout_data for _rd in rd]", '(1)'], {}), "([_rd['reward'] for rd in rollout_data for _rd in rd], 1)\n", (3841, 3898), True, 'import numpy as np\n'), ((1757, 1787), 'numpy.zeros_like', 'np.zeros_like', (['instruction_new'], {}), '(instruction_new)\n', (1770, 1787), True, 'import numpy as np\n'), ((3081, 3106), 'numpy.array', 'np.array', (['ep_instructions'], {}), '(ep_instructions)\n', (3089, 3106), True, 'import numpy as np\n'), ((3170, 3203), 'numpy.array', 'np.array', (['ep_hinsight_instruction'], {}), '(ep_hinsight_instruction)\n', (3178, 3203), True, 'import numpy as np\n'), ((3265, 3279), 'numpy.array', 'np.array', (['ep_g'], {}), '(ep_g)\n', (3273, 3279), True, 'import numpy as np\n'), ((3324, 3339), 'numpy.array', 'np.array', (['ep_ag'], {}), '(ep_ag)\n', (3332, 3339), True, 'import numpy as np\n'), ((2697, 2713), 'numpy.array', 'np.array', (['ep_obs'], {}), '(ep_obs)\n', (2705, 2713), True, 'import numpy as np\n'), ((2761, 2781), 'numpy.array', 'np.array', (['ep_actions'], {}), '(ep_actions)\n', (2769, 2781), True, 'import numpy as np\n'), ((2829, 2849), 'numpy.array', 'np.array', (['ep_rewards'], {}), '(ep_rewards)\n', (2837, 2849), True, 'import numpy as np\n'), ((2898, 2918), 'numpy.array', 'np.array', (['ep_success'], {}), '(ep_success)\n', (2906, 2918), True, 'import numpy as np\n')] |
import numpy as np
import pytest
from pytest import approx
from uhi.numpy_plottable import ensure_plottable_histogram
def test_from_numpy() -> None:
hist1 = ((1, 2, 3, 4, 1, 2), (0, 1, 2, 3))
h = ensure_plottable_histogram(hist1)
assert h.values() == approx(np.array(hist1[0]))
assert len(h.axes) == 1
assert len(h.axes[0]) == 3
assert h.axes[0][0] == (0, 1)
assert h.axes[0][1] == (1, 2)
assert h.axes[0][2] == (2, 3)
def test_from_numpy_2d() -> None:
np.random.seed(42)
x = np.random.normal(1, 2, 1000)
y = np.random.normal(-1, 1, 1000)
result = np.histogram2d(x, y) # type: ignore
h = ensure_plottable_histogram(result)
assert h.values() == approx(result[0])
assert len(h.axes) == 2
assert len(h.axes[0]) == 10
assert h.axes[0][0] == approx(result[1][0:2])
assert h.axes[0][1] == approx(result[1][1:3])
assert h.axes[1][0] == approx(result[2][0:2])
assert h.axes[1][1] == approx(result[2][1:3])
def test_from_numpy_dd() -> None:
np.random.seed(42)
x = np.random.normal(1, 2, 1000)
y = np.random.normal(-1, 1, 1000)
z = np.random.normal(3, 3, 1000)
result = np.histogramdd((x, y, z)) # type: ignore
h = ensure_plottable_histogram(result)
assert h.values() == approx(result[0])
assert len(h.axes) == 3
assert len(h.axes[0]) == 10
assert h.axes[0][0] == approx(result[1][0][0:2])
assert h.axes[0][1] == approx(result[1][0][1:3])
assert h.axes[1][0] == approx(result[1][1][0:2])
assert h.axes[1][1] == approx(result[1][1][1:3])
assert h.axes[2][0] == approx(result[1][2][0:2])
assert h.axes[2][1] == approx(result[1][2][1:3])
def test_from_bh_regular() -> None:
bh = pytest.importorskip("boost_histogram")
h1 = bh.Histogram(bh.axis.Regular(5, 0, 5))
h1[...] = (3, 2, 1, 2, 3)
h = ensure_plottable_histogram(h1)
assert h is h1
assert h.values() == approx(np.array((3, 2, 1, 2, 3)))
assert len(h.axes) == 1
assert len(h.axes[0]) == 5
assert h.axes[0][0] == approx(np.array((0, 1)))
assert h.axes[0][1] == approx(np.array((1, 2)))
assert h.axes[0][2] == approx(np.array((2, 3)))
def test_from_bh_integer() -> None:
bh = pytest.importorskip("boost_histogram")
h1 = bh.Histogram(bh.axis.Integer(1, 6))
h1[...] = (3, 2, 1, 2, 3)
h = ensure_plottable_histogram(h1)
assert h is h1
assert h.values() == approx(np.array((3, 2, 1, 2, 3)))
assert len(h.axes) == 1
assert len(h.axes[0]) == 5
assert h.axes[0][0] == 1
assert h.axes[0][1] == 2
assert h.axes[0][2] == 3
def test_from_bh_str_cat() -> None:
bh = pytest.importorskip("boost_histogram")
h1 = bh.Histogram(bh.axis.StrCategory(["hi", "ho"]))
h1.fill(["hi", "hi", "hi", "ho"])
h = ensure_plottable_histogram(h1)
assert h is h1
assert h.values() == approx(np.array((3, 1)))
assert len(h.axes) == 1
assert len(h.axes[0]) == 2
assert h.axes[0][0] == "hi"
assert h.axes[0][1] == "ho"
| [
"numpy.random.normal",
"pytest.approx",
"numpy.histogramdd",
"uhi.numpy_plottable.ensure_plottable_histogram",
"numpy.array",
"pytest.importorskip",
"numpy.random.seed",
"numpy.histogram2d"
] | [((208, 241), 'uhi.numpy_plottable.ensure_plottable_histogram', 'ensure_plottable_histogram', (['hist1'], {}), '(hist1)\n', (234, 241), False, 'from uhi.numpy_plottable import ensure_plottable_histogram\n'), ((496, 514), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (510, 514), True, 'import numpy as np\n'), ((523, 551), 'numpy.random.normal', 'np.random.normal', (['(1)', '(2)', '(1000)'], {}), '(1, 2, 1000)\n', (539, 551), True, 'import numpy as np\n'), ((560, 589), 'numpy.random.normal', 'np.random.normal', (['(-1)', '(1)', '(1000)'], {}), '(-1, 1, 1000)\n', (576, 589), True, 'import numpy as np\n'), ((603, 623), 'numpy.histogram2d', 'np.histogram2d', (['x', 'y'], {}), '(x, y)\n', (617, 623), True, 'import numpy as np\n'), ((649, 683), 'uhi.numpy_plottable.ensure_plottable_histogram', 'ensure_plottable_histogram', (['result'], {}), '(result)\n', (675, 683), False, 'from uhi.numpy_plottable import ensure_plottable_histogram\n'), ((1028, 1046), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (1042, 1046), True, 'import numpy as np\n'), ((1055, 1083), 'numpy.random.normal', 'np.random.normal', (['(1)', '(2)', '(1000)'], {}), '(1, 2, 1000)\n', (1071, 1083), True, 'import numpy as np\n'), ((1092, 1121), 'numpy.random.normal', 'np.random.normal', (['(-1)', '(1)', '(1000)'], {}), '(-1, 1, 1000)\n', (1108, 1121), True, 'import numpy as np\n'), ((1130, 1158), 'numpy.random.normal', 'np.random.normal', (['(3)', '(3)', '(1000)'], {}), '(3, 3, 1000)\n', (1146, 1158), True, 'import numpy as np\n'), ((1172, 1197), 'numpy.histogramdd', 'np.histogramdd', (['(x, y, z)'], {}), '((x, y, z))\n', (1186, 1197), True, 'import numpy as np\n'), ((1223, 1257), 'uhi.numpy_plottable.ensure_plottable_histogram', 'ensure_plottable_histogram', (['result'], {}), '(result)\n', (1249, 1257), False, 'from uhi.numpy_plottable import ensure_plottable_histogram\n'), ((1727, 1765), 'pytest.importorskip', 'pytest.importorskip', (['"""boost_histogram"""'], {}), "('boost_histogram')\n", (1746, 1765), False, 'import pytest\n'), ((1853, 1883), 'uhi.numpy_plottable.ensure_plottable_histogram', 'ensure_plottable_histogram', (['h1'], {}), '(h1)\n', (1879, 1883), False, 'from uhi.numpy_plottable import ensure_plottable_histogram\n'), ((2226, 2264), 'pytest.importorskip', 'pytest.importorskip', (['"""boost_histogram"""'], {}), "('boost_histogram')\n", (2245, 2264), False, 'import pytest\n'), ((2349, 2379), 'uhi.numpy_plottable.ensure_plottable_histogram', 'ensure_plottable_histogram', (['h1'], {}), '(h1)\n', (2375, 2379), False, 'from uhi.numpy_plottable import ensure_plottable_histogram\n'), ((2653, 2691), 'pytest.importorskip', 'pytest.importorskip', (['"""boost_histogram"""'], {}), "('boost_histogram')\n", (2672, 2691), False, 'import pytest\n'), ((2796, 2826), 'uhi.numpy_plottable.ensure_plottable_histogram', 'ensure_plottable_histogram', (['h1'], {}), '(h1)\n', (2822, 2826), False, 'from uhi.numpy_plottable import ensure_plottable_histogram\n'), ((710, 727), 'pytest.approx', 'approx', (['result[0]'], {}), '(result[0])\n', (716, 727), False, 'from pytest import approx\n'), ((815, 837), 'pytest.approx', 'approx', (['result[1][0:2]'], {}), '(result[1][0:2])\n', (821, 837), False, 'from pytest import approx\n'), ((865, 887), 'pytest.approx', 'approx', (['result[1][1:3]'], {}), '(result[1][1:3])\n', (871, 887), False, 'from pytest import approx\n'), ((915, 937), 'pytest.approx', 'approx', (['result[2][0:2]'], {}), '(result[2][0:2])\n', (921, 937), False, 'from pytest import approx\n'), ((965, 987), 'pytest.approx', 'approx', (['result[2][1:3]'], {}), '(result[2][1:3])\n', (971, 987), False, 'from pytest import approx\n'), ((1284, 1301), 'pytest.approx', 'approx', (['result[0]'], {}), '(result[0])\n', (1290, 1301), False, 'from pytest import approx\n'), ((1389, 1414), 'pytest.approx', 'approx', (['result[1][0][0:2]'], {}), '(result[1][0][0:2])\n', (1395, 1414), False, 'from pytest import approx\n'), ((1442, 1467), 'pytest.approx', 'approx', (['result[1][0][1:3]'], {}), '(result[1][0][1:3])\n', (1448, 1467), False, 'from pytest import approx\n'), ((1495, 1520), 'pytest.approx', 'approx', (['result[1][1][0:2]'], {}), '(result[1][1][0:2])\n', (1501, 1520), False, 'from pytest import approx\n'), ((1548, 1573), 'pytest.approx', 'approx', (['result[1][1][1:3]'], {}), '(result[1][1][1:3])\n', (1554, 1573), False, 'from pytest import approx\n'), ((1601, 1626), 'pytest.approx', 'approx', (['result[1][2][0:2]'], {}), '(result[1][2][0:2])\n', (1607, 1626), False, 'from pytest import approx\n'), ((1654, 1679), 'pytest.approx', 'approx', (['result[1][2][1:3]'], {}), '(result[1][2][1:3])\n', (1660, 1679), False, 'from pytest import approx\n'), ((275, 293), 'numpy.array', 'np.array', (['hist1[0]'], {}), '(hist1[0])\n', (283, 293), True, 'import numpy as np\n'), ((1937, 1962), 'numpy.array', 'np.array', (['(3, 2, 1, 2, 3)'], {}), '((3, 2, 1, 2, 3))\n', (1945, 1962), True, 'import numpy as np\n'), ((2057, 2073), 'numpy.array', 'np.array', (['(0, 1)'], {}), '((0, 1))\n', (2065, 2073), True, 'import numpy as np\n'), ((2109, 2125), 'numpy.array', 'np.array', (['(1, 2)'], {}), '((1, 2))\n', (2117, 2125), True, 'import numpy as np\n'), ((2161, 2177), 'numpy.array', 'np.array', (['(2, 3)'], {}), '((2, 3))\n', (2169, 2177), True, 'import numpy as np\n'), ((2433, 2458), 'numpy.array', 'np.array', (['(3, 2, 1, 2, 3)'], {}), '((3, 2, 1, 2, 3))\n', (2441, 2458), True, 'import numpy as np\n'), ((2880, 2896), 'numpy.array', 'np.array', (['(3, 1)'], {}), '((3, 1))\n', (2888, 2896), True, 'import numpy as np\n')] |
"""Prediction of users based on tweet embeddings"""
import numpy as np
from sklearn.linear_model import LogisticRegression
from .models import User
from .twitter import vectorize_tweet
def predict_user(user0_name, user1_name, hypo_tweet_text):
"""
Determine and return which user is more likely to say a hypothetical tweet.
"""
user0 = User.query.filter(User.name == user0_name).one()
user1 = User.query.filter(User.name == user1.name).one()
user0_vects = np.array([tweet.vect for tweet in user0.tweets])
user1_vects = np.array([tweet.vect for tweet in user1.tweets])
vects = np.vstack([user0_vects, user1_vects])
labels = np.concatenate([np.zeros(len(user0.tweets)), np.ones(len(user1.tweets))])
hypo_tweet_vect = vectorize_tweet(hypo_tweet_text)
log_reg = LogisticRegression().fit(vects, labels)
return log_reg.predict(hypo_tweet_vect.reshape(1, -1)) | [
"numpy.array",
"numpy.vstack",
"sklearn.linear_model.LogisticRegression"
] | [((481, 529), 'numpy.array', 'np.array', (['[tweet.vect for tweet in user0.tweets]'], {}), '([tweet.vect for tweet in user0.tweets])\n', (489, 529), True, 'import numpy as np\n'), ((548, 596), 'numpy.array', 'np.array', (['[tweet.vect for tweet in user1.tweets]'], {}), '([tweet.vect for tweet in user1.tweets])\n', (556, 596), True, 'import numpy as np\n'), ((609, 646), 'numpy.vstack', 'np.vstack', (['[user0_vects, user1_vects]'], {}), '([user0_vects, user1_vects])\n', (618, 646), True, 'import numpy as np\n'), ((808, 828), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (826, 828), False, 'from sklearn.linear_model import LogisticRegression\n')] |
#!/usr/bin/env python
"""
Numba sampling routines
"""
import numpy as np
import math
from numba import jit, prange
# import lom._cython.matrix_updates as cython_mu
import lom._numba.lom_outputs as lom_outputs
import lom._numba.posterior_score_fcts as score_fcts
# only needed for IBP
from lom.auxiliary_functions import logit, expit
from scipy.special import gammaln
from math import lgamma
@jit('int8(float64, int8)', nopython=True, nogil=True)
def flip_metropolised_gibbs_numba(logit_p, z):
"""
Given the logit probability of z=1
flip z according to metropolised Gibbs
"""
if z == 1 and logit_p <= 0:
return -1
elif z == -1 and logit_p >= 0:
return 1
else:
# map from logit to [0,1]
if math.exp(-z * logit_p) > np.random.rand():
return -z
else:
return z
@jit('int8(float64)', nopython=True, nogil=True)
def flip_gibbs_numba(p):
"""
Given the probability of z=1
flip z according to standard Gibbs sampler
"""
if p > np.random.rand():
return 1
else:
return -1
@jit('int8(float64, int8)', nopython=True, nogil=True)
def flip_metropolised_gibbs_numba_classic(p, z):
"""
Given the *probability* of z=1
flip z according to metropolised Gibbs
"""
if z == 1:
if p <= .5:
return -z
# alpha = 1 # TODO, can return -x here
else:
alpha = (1 - p) / p
else:
if p >= .5:
return -z
# alpha = 1
else:
alpha = p / (1 - p)
if np.random.rand() < alpha:
return -z
else:
return z
def get_posterior_score_fct(model):
if model == 'OR_AND_2D':
posterior_score_fct = score_fcts.posterior_score_OR_AND_2D
elif model == 'OR_NAND_2D':
posterior_score_fct = score_fcts.posterior_score_OR_NAND_2D
elif model == 'OR_XOR_2D':
posterior_score_fct = score_fcts.posterior_score_OR_XOR_2D
elif model == 'NAND_XOR_2D':
posterior_score_fct = score_fcts.posterior_score_NAND_XOR_2D
elif model == 'XOR_AND_2D':
posterior_score_fct = score_fcts.posterior_score_XOR_AND_2D
elif model == 'XOR_XOR_2D':
posterior_score_fct = score_fcts.posterior_score_XOR_XOR_2D
elif model == 'XOR_NXOR_2D':
posterior_score_fct = score_fcts.posterior_score_XOR_NXOR_2D
elif model == 'XOR_NAND_2D':
posterior_score_fct = score_fcts.posterior_score_XOR_NAND_2D
elif model == 'OR_AND_3D':
posterior_score_fct = score_fcts.posterior_score_OR_AND_3D
elif model == 'OR_NAND_3D':
posterior_score_fct = score_fcts.posterior_score_OR_NAND_3D
elif model == 'OR_XOR_3D':
posterior_score_fct = score_fcts.posterior_score_OR_XOR_3D
elif model == 'NAND_XOR_3D':
posterior_score_fct = score_fcts.posterior_score_NAND_XOR_3D
elif model == 'XOR_AND_3D':
posterior_score_fct = score_fcts.posterior_score_XOR_AND_3D
elif model == 'XOR_XOR_3D':
posterior_score_fct = score_fcts.posterior_score_XOR_XOR_3D
elif model == 'XOR_NXOR_3D':
posterior_score_fct = score_fcts.posterior_score_XOR_NXOR_3D
elif model == 'XOR_NAND_3D':
posterior_score_fct = score_fcts.posterior_score_XOR_NAND_3D
elif model == 'OR_ALL_2D':
posterior_score_fct = score_fcts.posterior_score_OR_ALL_2D
elif model == 'OR_ALL_3D':
posterior_score_fct = score_fcts.posterior_score_OR_ALL_3D
else:
print(model)
raise NotImplementedError('Posterior sampling for ' + model + '.')
return posterior_score_fct
def get_parent_score_fct(model):
if model == 'OR_AND_2D':
return lom_outputs.OR_AND_product
if model == 'OR_NAND_2D':
return lom_outputs.OR_NAND_product
if model == 'OR_XOR_2D':
return lom_outputs.OR_XOR_product
if model == 'NAND_XOR_2D':
return lom_outputs.NAND_XOR_product
if model == 'XOR_AND_2D':
return lom_outputs.XOR_AND_product
if model == 'XOR_XOR_2D':
return lom_outputs.XOR_XOR_product
if model == 'XOR_NXOR_2D':
return lom_outputs.XOR_NXOR_product
if model == 'XOR_NAND_2D':
return lom_outputs.XOR_NAND_product
if model == 'OR_AND_3D':
return lom_outputs.OR_AND_product_3d
if model == 'OR_NAND_3D':
return lom_outputs.OR_NAND_product_3d
if model == 'OR_XOR_3D':
return lom_outputs.OR_XOR_product_3d
if model == 'NAND_XOR_3D':
return lom_outputs.NAND_XOR_product_3d
if model == 'XOR_AND_3D':
return lom_outputs.XOR_AND_product_3d
if model == 'XOR_XOR_3D':
return lom_outputs.XOR_XOR_product_3d
if model == 'XOR_NXOR_3D':
return lom_outputs.XOR_NXOR_product_3d
if model == 'XOR_NAND_3D':
return lom_outputs.XOR_NAND_product_3d
else:
print(model)
raise NotImplementedError
def make_sampling_fct_onechild(model):
posterior_score_fct = get_posterior_score_fct(model)
if model[-2:] == '2D':
@jit('void(int8[:,:], int8[:,:], int8[:,:], int8[:,:],'
'float64, float64)',
nogil=True, nopython=True, parallel=True)
def sampling_fct(Z, Z_fixed, U, X, lbda, logit_prior):
N, L = Z.shape
for n in prange(N):
for l in range(L):
if Z_fixed[n, l] == 1:
continue
logit_score = lbda *\
posterior_score_fct(Z[n, :], U, X[n, :], l)
Z[n, l] = flip_metropolised_gibbs_numba(
logit_score + logit_prior, Z[n, l])
elif model[-2:] == '3D':
@jit('void(int8[:,:], int8[:,:], int8[:,:], int8[:,:],' +
'int8[:,:,:], float64, float64)',
nogil=True, nopython=True, parallel=True)
def sampling_fct(Z, Z_fixed, U, V, X, lbda, logit_prior):
N, L = Z.shape
for n in prange(N):
for l in range(L):
if Z_fixed[n, l] == 1:
continue
logit_score = lbda *\
posterior_score_fct(Z[n, :], U, V, X[n, :, :], l)
Z[n, l] = flip_metropolised_gibbs_numba(
logit_score + logit_prior, Z[n, l])
return sampling_fct
def make_sampling_fct_onechild_oneparent(model, parent_model):
posterior_score_fct = get_posterior_score_fct(model)
parent_posterior_score_fct = get_parent_score_fct(parent_model)
if model[-2:] == '2D':
@jit('void(int8[:,:], int8[:,:], int8[:,:], int8[:,:], ' +
'float64, int8[:,:], int8[:,:], float64, float64)',
nogil=True, nopython=True, parallel=True)
def sampling_fct(Z, Z_fixed, U, X, lbda, pa1, pa2, lbda_pa, logit_prior):
N, L = Z.shape
for n in prange(N):
for l in range(L):
if Z_fixed[n, l] == 1:
continue
logit_score = lbda *\
posterior_score_fct(Z[n, :], U, X[n, :], l)
logit_parent_score = lbda_pa *\
parent_posterior_score_fct(pa1[n, :], pa2[l, :])
Z[n, l] = flip_metropolised_gibbs_numba(
logit_score + logit_parent_score + logit_prior, Z[n, l])
elif model[-2:] == '3D':
@jit('void(int8[:,:], int8[:,:], int8[:,:], int8[:,:], int8[:,:,:], ' +
'float64, int8[:,:], int8[:,:], float64, float64)',
nogil=True, nopython=True, parallel=True)
def sampling_fct(Z, Z_fixed, U, V, X, lbda, pa1, pa2, lbda_pa, logit_prior):
N, L = Z.shape
for n in prange(N):
for l in range(L):
if Z_fixed[n, l] == 1:
continue
logit_score = lbda *\
posterior_score_fct(Z[n, :], U, V, X[n, :], l)
logit_parent_score = lbda_pa *\
parent_posterior_score_fct(pa1[n, :], pa2[l, :])
Z[n, l] = flip_metropolised_gibbs_numba(
logit_score + logit_parent_score + logit_prior), Z[n, l]
return sampling_fct
def make_sampling_fct_nochild_oneparent(parent_model):
"""
Generate update function for factor matrices without children.
In the general case this is a sampling version of the factor product.
"""
parent_posterior_score_fct = get_parent_score_fct(parent_model)
if parent_model[-2:] == '2D':
@jit('void(int8[:,:], int8[:,:], int8[:,:], int8[:,:], float64, float64)',
nogil=True, nopython=True, parallel=True)
def sampling_fct(Z, Z_fixed, pa1, pa2, lbda_pa, logit_prior):
N, L = Z.shape
for n in prange(N):
for l in range(L):
if Z_fixed[n, l] == 1:
continue
logit_parent_score = lbda_pa *\
parent_posterior_score_fct(pa1[n, :], pa2[l, :])
Z[n, l] = flip_metropolised_gibbs_numba(
logit_parent_score + logit_prior, Z[n, l])
elif parent_model[-2:] == '3D':
@jit('void(int8[:,:,:], int8[:,:,:], int8[:,:], int8[:,:], float64, float64)',
nogil=True, nopython=True, parallel=True)
def sampling_fct(Z, Z_fixed, pa1, pa2, pa3, lbda_pa, logit_prior):
N, D, M = Z.shape
for n in prange(N):
for d in range(D):
for m in range(M):
if Z_fixed[n, d, m] == 1:
continue
logit_parent_score = lbda_pa *\
parent_posterior_score_fct(
pa1[n, :], pa2[d, :], pa3[m, :])
Z[n, d, m] = flip_metropolised_gibbs_numba(
logit_parent_score + logit_prior, Z[n, d, m])
return sampling_fct
def make_sampling_fct_nochild_twoparents(parent_model_1, parent_model_2):
parent_posterior_score_fct = get_parent_score_fct(parent_model_1)
parent_posterior_score_fct = get_parent_score_fct(parent_model_2)
if parent_model_1[-2:] == '2D' and parent_model_2[-2:] == '2D':
@jit('void(int8[:,:], int8[:,:], int8[:,:], int8[:,:], ' +
'float64, int8[:,:], int8[:,:], float64, float64)',
nogil=True, nopython=True, parallel=True)
def sampling_fct(Z, Z_fixed,
pa1_1, pa1_2, lbda_pa1,
pa2_1, pa2_2, lbda_pa2,
logit_prior):
N, L = Z.shape
for n in prange(N):
for l in range(L):
if Z_fixed[n, l] == 1:
continue
logit_parent_score_1 = lbda_pa1 *\
parent_posterior_score_fct(pa1_1[n, :], pa1_2[l, :])
logit_parent_score_2 = lbda_pa2 *\
parent_posterior_score_fct(pa2_1[n, :], pa2_2[l, :])
Z[n, l] = flip_metropolised_gibbs_numba(
logit_parent_score_1 + logit_parent_score_2 + logit_prior,
Z[n, l])
elif parent_model_1[-2:] == '3D' and parent_model_2[-2:] == '3D':
@jit('void(int8[:,:,:], int8[:,:,:], int8[:,:], int8[:,:], int8[:,:], ' +
'float64, int8[:,:], int8[:,:], int8[:,:], float64, float64)',
nogil=True, nopython=True, parallel=True)
def sampling_fct(Z, Z_fixed,
pa1_1, pa1_2, pa1_3, lbda_pa1,
pa2_1, pa2_2, pa2_3, lbda_pa2,
logit_prior):
N, D, M = Z.shape
for n in prange(N):
for d in range(D):
for m in range(M):
if Z_fixed[n, d, m] == 1:
continue
logit_parent_score_1 = lbda_pa1 *\
parent_posterior_score_fct(
pa1_1[n, :], pa1_2[d, :], pa1_3[m, :])
logit_parent_score_2 = lbda_pa2 *\
parent_posterior_score_fct(
pa2_1[n, :], pa2_2[d, :], pa2_3[m, :])
Z[n, d, m] = flip_metropolised_gibbs_numba(
logit_parent_score_1 +
logit_parent_score_2 +
logit_prior,
Z[n, d, m])
return sampling_fct
def make_sampling_fct_onechild_twoparents(model, parent_model_1, parent_model_2):
posterior_score_fct = get_posterior_score_fct(model)
parent_posterior_score_fct = get_parent_score_fct(parent_model_1)
parent_posterior_score_fct = get_parent_score_fct(parent_model_2)
if model[-2:] == '2D':
@jit('void(int8[:,:], int8[:,:], int8[:,:], int8[:,:], float64,' +
'int8[:,:], int8[:,:], float64,' +
'int8[:,:], int8[:,:], float64,' +
'float64)',
nogil=True, nopython=True, parallel=True)
def sampling_fct(Z, Z_fixed, U, X, lbda,
pa1_1, pa1_2, lbda_pa1,
pa2_1, pa2_2, lbda_pa2,
logit_prior):
N, L = Z.shape
for n in prange(N):
for l in range(L):
if Z_fixed[n, l] == 1:
continue
logit_score = lbda *\
posterior_score_fct(Z[n, :], U, X[n, :], l) # V?
logit_parent_score_1 = lbda_pa1 *\
parent_posterior_score_fct(pa1_1[n, :], pa1_2[l, :])
logit_parent_score_2 = lbda_pa2 *\
parent_posterior_score_fct(pa2_1[n, :], pa2_2[l, :])
Z[n, l] = flip_metropolised_gibbs_numba(
logit_score + logit_parent_score_1 +
logit_parent_score_2 + logit_prior,
Z[n, l])
elif model[-2:] == '3D':
raise NotImplementedError('3D tensors can not have children.')
return sampling_fct
def sample_2d_IBP(Z, U, X, lbda, q, alpha):
"""
IBP update procedure for 2D OrMachine, drawing U and Z where
U has flat prior and Z comes from IBP with concentration parameter alpha.
Z[n,l], U[d,l], X[n,d], q: Bernoulli prior, alpha: IPB concentration parameter.
"""
# lbda = lr.lbda.val
# alpha = lr.alpha
# X = lr.child()
L_new_max = 3 # maxmimum number of new dishes to consider
N, L = Z.shape #
D, _ = U.shape
posterior_score_fct = get_posterior_score_fct('OR_AND_2D')
# pre-compute scores for updating L
# these are loglikelihood contributions of false negative/true negative
# data points for a range of L'
# For simple Bernoulli on U
FN_factor = [np.log((expit(lbda) * (1 - 2 * (q**L_temp))) + (q**L_temp))
for L_temp in range(L_new_max)]
TN_factor = [np.log((expit(-lbda) * (1 - 2 * (q**L_temp))) + (q**L_temp))
for L_temp in range(L_new_max)]
for n in range(N):
# how often is each dish ordered by other customers
m = (Z[np.arange(N) != n, :] == 1).sum(axis=0)
columns_to_keep = np.ones(L, dtype=bool)
for l in range(L):
# dishes that have already been ordered
if m[l] > 0:
# draw z[n,l] as usual
logit_score = lbda * posterior_score_fct(
Z[n, :], U, X[n, :], l)
logit_prior = logit(m[l] / N)
Z[n, l] = flip_gibbs_numba(expit(logit_score +
logit_prior))
# print(U[n, l])
# print('score: ' + str(logit_score))
# print('\n')
# print('prior: ' + str(logit_prior))
elif m[l] == 0:
# mark columns for removal
columns_to_keep[l] = False
# remove marked columns
Z = Z[:, columns_to_keep]
U = U[:, columns_to_keep]
L = columns_to_keep.sum()
# draw number of new dishes (columns)
# compute log probability of L' for a range of L' values
# n_predict = [lom_outputs.OR_AND_product(Z[n, :], U[d, :]) for d in range(D)]
# faster
n_predict = lom_outputs.OR_AND_single_n(Z[n, :], U)
# assert(np.all(n_predict_test==n_predict))
# compute number of true negatives / false negatives
TN = ((X[n, :] == -1) * (np.array(n_predict) == -1)).sum()
FN = ((X[n, :] == 1) * (np.array(n_predict) == -1)).sum()
lik_L_new = [TN * TN_factor[L_temp] + FN * FN_factor[L_temp]
for L_temp in range(L_new_max)]
# L_new or L+L_new ??!
prior_L_new = [(L_temp + L) * np.log(alpha / N) - (alpha / N) -
gammaln(L + L_temp + 1)
for L_temp in range(L_new_max)]
log_L_new = [loglik + logprior
for loglik, logprior in zip(lik_L_new, prior_L_new)]
# map to probabilities
p_L_new = [np.exp(log_L_new[i] - np.max(log_L_new))
for i in range(L_new_max)]
p_L_new /= np.sum(p_L_new)
L_new = np.random.choice(range(L_new_max), p=p_L_new)
if L_new > 0:
# add new columns to Z
Z = np.hstack([Z, np.full([N, L_new], fill_value=-1, dtype=np.int8)])
Z[n, -L_new:] = 1
U = np.hstack([U, 2 * np.zeros([D, L_new], dtype=np.int8) - 1])
# sample the new hidden causes
for l in list(range(L, L + L_new)):
for d in range(D):
logit_score = lbda * posterior_score_fct(
U[d, :], Z, X[:, d], l)
U[d, l] = flip_gibbs_numba(expit(logit_score))
L += L_new
# if L_new > 0:
# print(L_new, Z.shape[0])
return Z, U
# TODO: numba
# @jit(parallel=True)
# def sample_qL_q(Z, U, X, q, lbda, gamma):
# N, D = X.shape
# # g = np.zeros([N, D], dtype=np.int8)
# # lom_outputs.compute_g(Z, U, g)
# log_expit_plus_lbda = np.log(1 + np.exp(lbda))
# log_expit_minus_lbda = np.log(1 + np.exp(-lbda))
# q_max = 10
# for d in prange(D):
# q[0, d] = draw_q_d(X[:, d], Z, U[d, :],
# log_expit_plus_lbda,
# log_expit_minus_lbda,
# gamma,
# q_max)
# return
@jit('int8(float64[:])', nopython=True, nogil=True)
def random_choice(p):
"""
Return random index according to probabilities p.
"""
rand_float = np.random.ranf()
acc = 0
for i in range(len(p)):
acc += p[i]
if rand_float < acc:
return np.int8(i + 1)
return len(p) + 1
# @jit('int8(int8[:], int8[:,:], int8[:], float64, float64, float64, int8)',
# nopython=True, nogil=True)
# def draw_q_d(X_d, Z, U_d, p_lbda, m_lbda, gamma, q_max):
@jit('void(int8[:,:], int8[:,:], int8[:,:], int8[:,:], float64, float64)', nopython=True, nogil=True)
def sample_qL_q(Z, U, X, q, lbda, gamma):
N, D = X.shape
q_max = 6
p_lbda = np.log(1 + np.exp(lbda))
m_lbda = np.log(1 + np.exp(-lbda))
logconditional = np.zeros((q_max - 1, D), dtype=np.float64)
for d in prange(D):
for q_new in prange(q_max - 1): # shift by one to have q values and indices agree
# compute logpriors
logconditional[q_new, d] = (q_new + 1) * np.log(gamma) -\
gamma - lgamma(q_new + 2)
# compute loglikelihoods
true_predictions = np.sum(np.dot((Z + 1) / 2, (U[d, :] + 1) / 2) >= q_new + 1)
logconditional[q_new, d] = logconditional[q_new, d] + true_predictions * p_lbda +\
(N - true_predictions) * m_lbda
# overwrite log-conditional with normalised probability
# (but avoid allocating new memory)
log_p_max = np.max(logconditional)
for q_new in prange(q_max - 1):
logconditional[q_new, d] = np.exp(logconditional[q_new, d] - log_p_max)
logconditional[:, d] /= np.sum(logconditional[:, d]) # normalise
# import pdb; pdb.set_trace()
q[0, d] = random_choice(logconditional[:, d])
# q[0, d] = np.argmax(logconditional[:, d]) + 1
@jit('int16(int8[:], int8[:,:], int8[:], int16, int8[:,:])', nopython=True, nogil=True)
def posterior_score_fct_qL_Z(Z_n, U, X_n, l, q):
"""
# TODO: numba
"""
D, L = U.shape
score = 0
# We need q-1 sources active for Z_n to have an effect
for d in range(D):
if U[d, l] != 1:
continue
counter = 0 # count active sources
# alrdy_active = False # temp line
for l_prime in range(L):
if (Z_n[l_prime] == 1) and\
(U[d, l_prime] == 1) and\
(l_prime != l):
# alrdy_active = True # temp line
counter += 1
# no contribution of we have alrdy q source
if counter == q[0, d]:
break
# if alrdy_active is False:
# score += X_n[d]
if counter == q[0, d] - 1:
score += X_n[d]
return score
@jit('int16(int8[:], int8[:,:], int8[:], int16, int8)', nopython=True, nogil=True)
def posterior_score_fct_qL_U(U_d, Z, X_d, l, q_d):
N, L = Z.shape
score = 0
# We need q-1 sources active for Z_n to have an effect
for n in range(N):
if Z[n, l] != 1:
continue
counter = 0 # count active sources
for l_prime in range(L):
if (U_d[l_prime] == 1) and\
(Z[n, l_prime] == 1) and\
(l_prime != l):
counter += 1
# no contribution of we have alrdy q sources
if counter == q_d:
break
if counter == q_d - 1:
score += X_d[n]
return score
@jit('void(int8[:,:], int8[:,:], int8[:,:], int8[:,:], float64)', nopython=True, nogil=True)
def sample_qL_factors_Z(Z, U, X, q, lbda):
"""
Need separate functions for U and Z because we have different q's
for every Z[n,:], but the same q for every U[d,:]
# TODO: numba
"""
N, L = Z.shape
for n in prange(N):
for l in range(L):
logit_score = posterior_score_fct_qL_Z(
Z[n, :], U, X[n, :], l, q)
Z[n, l] = flip_metropolised_gibbs_numba(
logit_score, Z[n, l])
return
@jit('void(int8[:,:], int8[:,:], int8[:,:], int8[:,:], float64)', nopython=True, nogil=True)
def sample_qL_factors_U(U, Z, X, q, lbda):
"""
Need separate functions for U and Z because we have different q's
for every Z[n,:], but the same q for every U[d,:]
# TODO: numba
"""
D, L = U.shape
for d in prange(D):
for l in range(L):
logit_score = posterior_score_fct_qL_U(
U[d, :], Z, X[d, :], l, q[0, d])
U[d, l] = flip_metropolised_gibbs_numba(
logit_score, U[d, l])
return
| [
"lom.auxiliary_functions.logit",
"numpy.random.rand",
"numpy.log",
"numpy.array",
"numba.prange",
"math.exp",
"numpy.arange",
"numpy.int8",
"lom.auxiliary_functions.expit",
"numpy.max",
"numpy.exp",
"numpy.dot",
"numpy.ones",
"numba.jit",
"numpy.random.ranf",
"math.lgamma",
"numpy.su... | [((395, 448), 'numba.jit', 'jit', (['"""int8(float64, int8)"""'], {'nopython': '(True)', 'nogil': '(True)'}), "('int8(float64, int8)', nopython=True, nogil=True)\n", (398, 448), False, 'from numba import jit, prange\n'), ((856, 903), 'numba.jit', 'jit', (['"""int8(float64)"""'], {'nopython': '(True)', 'nogil': '(True)'}), "('int8(float64)', nopython=True, nogil=True)\n", (859, 903), False, 'from numba import jit, prange\n'), ((1102, 1155), 'numba.jit', 'jit', (['"""int8(float64, int8)"""'], {'nopython': '(True)', 'nogil': '(True)'}), "('int8(float64, int8)', nopython=True, nogil=True)\n", (1105, 1155), False, 'from numba import jit, prange\n'), ((18739, 18789), 'numba.jit', 'jit', (['"""int8(float64[:])"""'], {'nopython': '(True)', 'nogil': '(True)'}), "('int8(float64[:])', nopython=True, nogil=True)\n", (18742, 18789), False, 'from numba import jit, prange\n'), ((19236, 19340), 'numba.jit', 'jit', (['"""void(int8[:,:], int8[:,:], int8[:,:], int8[:,:], float64, float64)"""'], {'nopython': '(True)', 'nogil': '(True)'}), "('void(int8[:,:], int8[:,:], int8[:,:], int8[:,:], float64, float64)',\n nopython=True, nogil=True)\n", (19239, 19340), False, 'from numba import jit, prange\n'), ((20592, 20682), 'numba.jit', 'jit', (['"""int16(int8[:], int8[:,:], int8[:], int16, int8[:,:])"""'], {'nopython': '(True)', 'nogil': '(True)'}), "('int16(int8[:], int8[:,:], int8[:], int16, int8[:,:])', nopython=True,\n nogil=True)\n", (20595, 20682), False, 'from numba import jit, prange\n'), ((21503, 21589), 'numba.jit', 'jit', (['"""int16(int8[:], int8[:,:], int8[:], int16, int8)"""'], {'nopython': '(True)', 'nogil': '(True)'}), "('int16(int8[:], int8[:,:], int8[:], int16, int8)', nopython=True, nogil\n =True)\n", (21506, 21589), False, 'from numba import jit, prange\n'), ((22209, 22305), 'numba.jit', 'jit', (['"""void(int8[:,:], int8[:,:], int8[:,:], int8[:,:], float64)"""'], {'nopython': '(True)', 'nogil': '(True)'}), "('void(int8[:,:], int8[:,:], int8[:,:], int8[:,:], float64)', nopython=\n True, nogil=True)\n", (22212, 22305), False, 'from numba import jit, prange\n'), ((22772, 22868), 'numba.jit', 'jit', (['"""void(int8[:,:], int8[:,:], int8[:,:], int8[:,:], float64)"""'], {'nopython': '(True)', 'nogil': '(True)'}), "('void(int8[:,:], int8[:,:], int8[:,:], int8[:,:], float64)', nopython=\n True, nogil=True)\n", (22775, 22868), False, 'from numba import jit, prange\n'), ((18900, 18916), 'numpy.random.ranf', 'np.random.ranf', ([], {}), '()\n', (18914, 18916), True, 'import numpy as np\n'), ((19512, 19554), 'numpy.zeros', 'np.zeros', (['(q_max - 1, D)'], {'dtype': 'np.float64'}), '((q_max - 1, D), dtype=np.float64)\n', (19520, 19554), True, 'import numpy as np\n'), ((19569, 19578), 'numba.prange', 'prange', (['D'], {}), '(D)\n', (19575, 19578), False, 'from numba import jit, prange\n'), ((22534, 22543), 'numba.prange', 'prange', (['N'], {}), '(N)\n', (22540, 22543), False, 'from numba import jit, prange\n'), ((23098, 23107), 'numba.prange', 'prange', (['D'], {}), '(D)\n', (23104, 23107), False, 'from numba import jit, prange\n'), ((1036, 1052), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1050, 1052), True, 'import numpy as np\n'), ((1582, 1598), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1596, 1598), True, 'import numpy as np\n'), ((5060, 5178), 'numba.jit', 'jit', (['"""void(int8[:,:], int8[:,:], int8[:,:], int8[:,:],float64, float64)"""'], {'nogil': '(True)', 'nopython': '(True)', 'parallel': '(True)'}), "('void(int8[:,:], int8[:,:], int8[:,:], int8[:,:],float64, float64)',\n nogil=True, nopython=True, parallel=True)\n", (5063, 5178), False, 'from numba import jit, prange\n'), ((6592, 6751), 'numba.jit', 'jit', (["('void(int8[:,:], int8[:,:], int8[:,:], int8[:,:], ' +\n 'float64, int8[:,:], int8[:,:], float64, float64)')"], {'nogil': '(True)', 'nopython': '(True)', 'parallel': '(True)'}), "('void(int8[:,:], int8[:,:], int8[:,:], int8[:,:], ' +\n 'float64, int8[:,:], int8[:,:], float64, float64)', nogil=True,\n nopython=True, parallel=True)\n", (6595, 6751), False, 'from numba import jit, prange\n'), ((8618, 8737), 'numba.jit', 'jit', (['"""void(int8[:,:], int8[:,:], int8[:,:], int8[:,:], float64, float64)"""'], {'nogil': '(True)', 'nopython': '(True)', 'parallel': '(True)'}), "('void(int8[:,:], int8[:,:], int8[:,:], int8[:,:], float64, float64)',\n nogil=True, nopython=True, parallel=True)\n", (8621, 8737), False, 'from numba import jit, prange\n'), ((10358, 10517), 'numba.jit', 'jit', (["('void(int8[:,:], int8[:,:], int8[:,:], int8[:,:], ' +\n 'float64, int8[:,:], int8[:,:], float64, float64)')"], {'nogil': '(True)', 'nopython': '(True)', 'parallel': '(True)'}), "('void(int8[:,:], int8[:,:], int8[:,:], int8[:,:], ' +\n 'float64, int8[:,:], int8[:,:], float64, float64)', nogil=True,\n nopython=True, parallel=True)\n", (10361, 10517), False, 'from numba import jit, prange\n'), ((12989, 13186), 'numba.jit', 'jit', (["('void(int8[:,:], int8[:,:], int8[:,:], int8[:,:], float64,' +\n 'int8[:,:], int8[:,:], float64,' + 'int8[:,:], int8[:,:], float64,' +\n 'float64)')"], {'nogil': '(True)', 'nopython': '(True)', 'parallel': '(True)'}), "('void(int8[:,:], int8[:,:], int8[:,:], int8[:,:], float64,' +\n 'int8[:,:], int8[:,:], float64,' + 'int8[:,:], int8[:,:], float64,' +\n 'float64)', nogil=True, nopython=True, parallel=True)\n", (12992, 13186), False, 'from numba import jit, prange\n'), ((15444, 15466), 'numpy.ones', 'np.ones', (['L'], {'dtype': 'bool'}), '(L, dtype=bool)\n', (15451, 15466), True, 'import numpy as np\n'), ((16543, 16582), 'lom._numba.lom_outputs.OR_AND_single_n', 'lom_outputs.OR_AND_single_n', (['Z[n, :]', 'U'], {}), '(Z[n, :], U)\n', (16570, 16582), True, 'import lom._numba.lom_outputs as lom_outputs\n'), ((17427, 17442), 'numpy.sum', 'np.sum', (['p_L_new'], {}), '(p_L_new)\n', (17433, 17442), True, 'import numpy as np\n'), ((19602, 19619), 'numba.prange', 'prange', (['(q_max - 1)'], {}), '(q_max - 1)\n', (19608, 19619), False, 'from numba import jit, prange\n'), ((20218, 20240), 'numpy.max', 'np.max', (['logconditional'], {}), '(logconditional)\n', (20224, 20240), True, 'import numpy as np\n'), ((20262, 20279), 'numba.prange', 'prange', (['(q_max - 1)'], {}), '(q_max - 1)\n', (20268, 20279), False, 'from numba import jit, prange\n'), ((20398, 20426), 'numpy.sum', 'np.sum', (['logconditional[:, d]'], {}), '(logconditional[:, d])\n', (20404, 20426), True, 'import numpy as np\n'), ((5315, 5324), 'numba.prange', 'prange', (['N'], {}), '(N)\n', (5321, 5324), False, 'from numba import jit, prange\n'), ((5707, 5843), 'numba.jit', 'jit', (["('void(int8[:,:], int8[:,:], int8[:,:], int8[:,:],' +\n 'int8[:,:,:], float64, float64)')"], {'nogil': '(True)', 'nopython': '(True)', 'parallel': '(True)'}), "('void(int8[:,:], int8[:,:], int8[:,:], int8[:,:],' +\n 'int8[:,:,:], float64, float64)', nogil=True, nopython=True, parallel=True)\n", (5710, 5843), False, 'from numba import jit, prange\n'), ((6900, 6909), 'numba.prange', 'prange', (['N'], {}), '(N)\n', (6906, 6909), False, 'from numba import jit, prange\n'), ((7439, 7611), 'numba.jit', 'jit', (["('void(int8[:,:], int8[:,:], int8[:,:], int8[:,:], int8[:,:,:], ' +\n 'float64, int8[:,:], int8[:,:], float64, float64)')"], {'nogil': '(True)', 'nopython': '(True)', 'parallel': '(True)'}), "('void(int8[:,:], int8[:,:], int8[:,:], int8[:,:], int8[:,:,:], ' +\n 'float64, int8[:,:], int8[:,:], float64, float64)', nogil=True,\n nopython=True, parallel=True)\n", (7442, 7611), False, 'from numba import jit, prange\n'), ((8865, 8874), 'numba.prange', 'prange', (['N'], {}), '(N)\n', (8871, 8874), False, 'from numba import jit, prange\n'), ((9287, 9410), 'numba.jit', 'jit', (['"""void(int8[:,:,:], int8[:,:,:], int8[:,:], int8[:,:], float64, float64)"""'], {'nogil': '(True)', 'nopython': '(True)', 'parallel': '(True)'}), "('void(int8[:,:,:], int8[:,:,:], int8[:,:], int8[:,:], float64, float64)',\n nogil=True, nopython=True, parallel=True)\n", (9290, 9410), False, 'from numba import jit, prange\n'), ((10758, 10767), 'numba.prange', 'prange', (['N'], {}), '(N)\n', (10764, 10767), False, 'from numba import jit, prange\n'), ((11404, 11590), 'numba.jit', 'jit', (["('void(int8[:,:,:], int8[:,:,:], int8[:,:], int8[:,:], int8[:,:], ' +\n 'float64, int8[:,:], int8[:,:], int8[:,:], float64, float64)')"], {'nogil': '(True)', 'nopython': '(True)', 'parallel': '(True)'}), "('void(int8[:,:,:], int8[:,:,:], int8[:,:], int8[:,:], int8[:,:], ' +\n 'float64, int8[:,:], int8[:,:], int8[:,:], float64, float64)', nogil=\n True, nopython=True, parallel=True)\n", (11407, 11590), False, 'from numba import jit, prange\n'), ((13465, 13474), 'numba.prange', 'prange', (['N'], {}), '(N)\n', (13471, 13474), False, 'from numba import jit, prange\n'), ((19025, 19039), 'numpy.int8', 'np.int8', (['(i + 1)'], {}), '(i + 1)\n', (19032, 19039), True, 'import numpy as np\n'), ((19437, 19449), 'numpy.exp', 'np.exp', (['lbda'], {}), '(lbda)\n', (19443, 19449), True, 'import numpy as np\n'), ((19475, 19488), 'numpy.exp', 'np.exp', (['(-lbda)'], {}), '(-lbda)\n', (19481, 19488), True, 'import numpy as np\n'), ((20320, 20364), 'numpy.exp', 'np.exp', (['(logconditional[q_new, d] - log_p_max)'], {}), '(logconditional[q_new, d] - log_p_max)\n', (20326, 20364), True, 'import numpy as np\n'), ((753, 775), 'math.exp', 'math.exp', (['(-z * logit_p)'], {}), '(-z * logit_p)\n', (761, 775), False, 'import math\n'), ((778, 794), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (792, 794), True, 'import numpy as np\n'), ((5980, 5989), 'numba.prange', 'prange', (['N'], {}), '(N)\n', (5986, 5989), False, 'from numba import jit, prange\n'), ((7763, 7772), 'numba.prange', 'prange', (['N'], {}), '(N)\n', (7769, 7772), False, 'from numba import jit, prange\n'), ((9546, 9555), 'numba.prange', 'prange', (['N'], {}), '(N)\n', (9552, 9555), False, 'from numba import jit, prange\n'), ((11847, 11856), 'numba.prange', 'prange', (['N'], {}), '(N)\n', (11853, 11856), False, 'from numba import jit, prange\n'), ((15744, 15759), 'lom.auxiliary_functions.logit', 'logit', (['(m[l] / N)'], {}), '(m[l] / N)\n', (15749, 15759), False, 'from lom.auxiliary_functions import logit, expit\n'), ((17079, 17102), 'scipy.special.gammaln', 'gammaln', (['(L + L_temp + 1)'], {}), '(L + L_temp + 1)\n', (17086, 17102), False, 'from scipy.special import gammaln\n'), ((19799, 19816), 'math.lgamma', 'lgamma', (['(q_new + 2)'], {}), '(q_new + 2)\n', (19805, 19816), False, 'from math import lgamma\n'), ((15050, 15061), 'lom.auxiliary_functions.expit', 'expit', (['lbda'], {}), '(lbda)\n', (15055, 15061), False, 'from lom.auxiliary_functions import logit, expit\n'), ((15176, 15188), 'lom.auxiliary_functions.expit', 'expit', (['(-lbda)'], {}), '(-lbda)\n', (15181, 15188), False, 'from lom.auxiliary_functions import logit, expit\n'), ((15803, 15835), 'lom.auxiliary_functions.expit', 'expit', (['(logit_score + logit_prior)'], {}), '(logit_score + logit_prior)\n', (15808, 15835), False, 'from lom.auxiliary_functions import logit, expit\n'), ((17343, 17360), 'numpy.max', 'np.max', (['log_L_new'], {}), '(log_L_new)\n', (17349, 17360), True, 'import numpy as np\n'), ((17595, 17644), 'numpy.full', 'np.full', (['[N, L_new]'], {'fill_value': '(-1)', 'dtype': 'np.int8'}), '([N, L_new], fill_value=-1, dtype=np.int8)\n', (17602, 17644), True, 'import numpy as np\n'), ((19893, 19931), 'numpy.dot', 'np.dot', (['((Z + 1) / 2)', '((U[d, :] + 1) / 2)'], {}), '((Z + 1) / 2, (U[d, :] + 1) / 2)\n', (19899, 19931), True, 'import numpy as np\n'), ((16730, 16749), 'numpy.array', 'np.array', (['n_predict'], {}), '(n_predict)\n', (16738, 16749), True, 'import numpy as np\n'), ((16796, 16815), 'numpy.array', 'np.array', (['n_predict'], {}), '(n_predict)\n', (16804, 16815), True, 'import numpy as np\n'), ((17022, 17039), 'numpy.log', 'np.log', (['(alpha / N)'], {}), '(alpha / N)\n', (17028, 17039), True, 'import numpy as np\n'), ((18037, 18055), 'lom.auxiliary_functions.expit', 'expit', (['logit_score'], {}), '(logit_score)\n', (18042, 18055), False, 'from lom.auxiliary_functions import logit, expit\n'), ((19758, 19771), 'numpy.log', 'np.log', (['gamma'], {}), '(gamma)\n', (19764, 19771), True, 'import numpy as np\n'), ((17711, 17746), 'numpy.zeros', 'np.zeros', (['[D, L_new]'], {'dtype': 'np.int8'}), '([D, L_new], dtype=np.int8)\n', (17719, 17746), True, 'import numpy as np\n'), ((15378, 15390), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (15387, 15390), True, 'import numpy as np\n')] |
"""
nmrglue table functions.
nmrglue uses numpy records array as stores of various data (peak tables,
trajectories, etc). This module provides functions to read and write records
arrays from disk. Formatting of the numeric values is left to Python's str
function and only the data type need be specified. In addition this module
contains functions to convert nmrglue's table format NMRPipe's table format.
"""
import numpy as np
from . import fileiobase
def pipe2glue(pcomments, pformat, rec):
"""
Convert a NMRPipe table to a nmrglue table
Parameters
----------
pcomments : list
List of NMRPipe comment lines.
pformats : list
List of NMRPipe table column formats strings.
rec : recarray
Records array with named fields.
Returns
-------
comments : list
List of comments
rec : recarray
Records array with named fields.
"""
# add a "#" to the list of comments and we are done
comments = ["# " + c for c in pcomments]
return comments, rec
def glue2pipe(comments, rec):
"""
Convert a nmrglue table to a NMRPipe table.
Parameters
----------
comments : list
List of comments
rec : recarray
Records array with named fields.
Returns
-------
pcomments : list
List of NMRPipe comment lines.
pformats : list
List of NMRPipe table column formats strings. This list is guessed
from the data types and precision in the reconrds array. This may not
be the exact format desired, edit this to your liking.
rec : recarray
Records array with named fields.
"""
# add REMARK to each comment
pcomments = ["REMARK " + c for c in comments]
# guess the pipe format strings
pformat = [guess_pformat(rec[t]) for t in rec.dtype.names]
return pcomments, pformat, rec
def guess_pformat(col):
"""
Guess a NMRPipe table column format string given a column.
Parameters
----------
col : ndarray
Array from a records array.
Returns
-------
s : str
String for formatting NMRPipe table.
"""
kind = col.dtype.kind
if kind == 'S' or kind == 'a': # string
return '%s'
if kind == 'i' or kind == 'u': # integer (signed or unsigned)
# N is the number of digits in largest value, or 1
N = max(np.ceil(np.log(np.abs(col).max()) / np.log(10)), 1)
# +1 for sign
return '%{0}d'.format(int(N + 1))
if kind == 'f':
# will be either %+e or %N.3f, see if 'e' is in %g to determine
if True in ['e' in '%g' % (v) for v in col]:
return '%+e'
else:
N = max(np.ceil(np.log(np.abs(col).max()) / np.log(10)), 1)
# +1 for sign, +1 for decimal points, +3 for precision
return '%{0}.3f'.format(int(N + 5))
# remaining kinds: 'c' - complex, 'b' - boolean, 'U' - unicode, 'V' - void
raise ValueError("unknown kind %s in column" % (kind))
def read(filename):
"""
Read a nmrglue table file.
Parameters
----------
filename : str
Filename of nmrglue table file to read.
Returns
-------
comments : list
List of comments (strings terminated with newline)
rec : recarray
Records array with named fields.
"""
# pull out the comment lines from the file (start with #)
f = open(filename, 'rb')
comments = [l for l in f if l[0] == '#']
f.close()
# find the line beginning with # NAMES and parse out the column names
nl = [i for i, l in enumerate(comments) if l[:7] == "# NAMES"]
if len(nl) != 1:
raise IOError("%s does not have a # NAMES line" % (filename))
dtd = {'names': comments.pop(nl[0])[7:].split()}
# find the line beginning with # DTYPE and parse out the column names
dl = [i for i, l in enumerate(comments) if l[:9] == "# FORMATS"]
if len(dl) != 1:
raise IOError("%s does not have a # FORMATS line" % (filename))
dtd['formats'] = comments.pop(dl[0])[9:].split()
# return the data as a records array
return comments, np.atleast_1d(np.recfromtxt(filename, dtype=dtd))
def write(filename, comments, rec, overwrite=False):
"""
Write a nmrglue table to file.
Parameters
----------
filename : str
Filename of file to write table to.
comments : list
List of comments (strings terminated with newline).
rec : recarray
Records array to write to file.
overwrite : bool, optional
True to overwrite file if it exists. False will raise an Warning if the
file exists.
"""
# open the file for writing
f = fileiobase.open_towrite(filename, overwrite)
# write out the comment lines at the top of the file
for c in comments:
f.write(c)
# Determine the list of column names
names = rec.dtype.names
# Determine the list of column formats
sizes = [rec.dtype[n].itemsize for n in names]
kinds = [rec.dtype[n].kind for n in names]
formats = [k + str(s) for s, k in zip(sizes, kinds)]
# write out the NAMES and FORMATS lines
f.write("# NAMES " + " ".join(names) + "\n")
f.write("# FORMATS " + " ".join(formats) + "\n")
# maximum string length for each column
col_len = [max([len(str(i)) for i in rec[n]]) for n in names]
# write out each line of the table
for row in rec:
s = " ".join([str(v).ljust(l) for v, l in zip(row, col_len)])
f.write(s + "\n")
f.close()
# Row functions
def insert_row(rec, N, row):
"""
Insert a row into a records array before row number N.
Parameters
----------
rec : recarray
Records array.
N : int
Row number to insert new row before.
row : array_like
Array or similar object which will be converted into a new row.
Returns
-------
new_rec : recarray
New records array with inserted row.
"""
return np.insert(rec, N, row)
def append_row(rec, row):
"""
Append a row to the end of a records array.
Parameters
----------
rec : recarray
Records array.
row : array_like
Array or similar object which will be converted into a new row.
Returns
-------
new_rec : recarray
New records array with inserted row.
"""
N = len(rec)
return insert_row(rec, N, row)
def delete_row(rec, N):
"""
Delete a row from a records array.
Parameters
----------
rec : recarray
Records array.
N : int
Row number to delete.
Returns
-------
new_rec : recarray
New records array with row deleted.
See Also
--------
reorder_rows : delete multiple rows in a single call.
"""
return np.delete(rec, N)
def reorder_rows(rec, new_order):
"""
Reorder or delete rows in a records array.
This function can also be used to delete multiple rows from a records
array, only the rows in the new_order list are retained in the new records
array.
Parameters
----------
rec : recarray
Records array.
new_order : list
List of row indices and order in new records array. Only the rows in
this list are retained in the new records array. Therefore this
function can also be used to delete multiple rows from a records
array.
Returns
-------
new_rec : recarray
New records array with rows reordered.
"""
return np.take(rec, new_order)
# Column functions
def append_column(rec, col, name=None, format=None):
"""
Append a column to the end of a records array.
Parameters
----------
rec : recarray
Records array.
col : array_like
Array or similar object which will be converted into the new column.
name : str, optional
Name of the column. If None col.dtypes.name will be used.
format : dtype, optional
Data type to convert the new column into before appending. Required if
col is not an ndarray.
Returns
-------
new_rec : recarray
New records array with column appended.
"""
N = len(rec.dtype.descr)
return insert_column(rec, N, col, name, format)
def insert_column(rec, N, col, name=None, format=None):
"""
Insert a column into a records array.
Parameters
----------
rec : recarray
Records array.
col : array_like
Array or similar object which will be converted into the new column.
N : int
Column number to insert new column before.
name : str, optional
Name of the column. If None col.dtypes.name will be used.
format : dtype, optional
Data type to convert the new column into before appending. Required if
col in not an ndarray.
Returns
-------
new_rec : recarray
New records array with column inserted.
"""
col = np.array(col)
# get name and format parameter from column if not provided
if name is None:
if col.dtype.names is not None:
name = col.dtype.names
else:
raise ValueError("Must provide a name for the column")
if format is None:
format = col.dtype.str
# insert the new column (name, format) to the table dtypes
dtd = rec.dtype.descr
dtd.insert(N, (name, format))
# create the new table with an additional column
new_rec = np.empty(rec.shape, dtd)
# fill in the old columns
for n in rec.dtype.names:
new_rec[n] = rec[n]
# and the new column
new_rec[name] = col.astype(format)
return np.atleast_1d(np.rec.array(new_rec))
def delete_column(rec, N):
"""
Delete a column from a records array.
Parameters
----------
rec : recarray
Records array.
N : int
Column number to delete.
Returns
-------
new_rec : recarray
New records array with column deleted.
See Also
--------
reorder_columns : Delete multiple columns from a records array.
"""
# remove the column from the list of columns.
dtd = rec.dtype.descr
dtd.pop(N)
# create the new records array and fill it in
new_rec = np.empty(rec.shape, dtd)
for n in new_rec.dtype.names:
new_rec[n] = rec[n]
return np.atleast_1d(np.rec.array(new_rec))
def reorder_columns(rec, new_order):
"""
Reorder or delete columns in a records array.
Parameters
----------
rec : recarray
Records array.
new_order : list
List of column indices and order in new records array. Only the
columns in this list are retained in the new records array.
Therefore this function can also be used to delete multiple columns
from a records array.
Returns
-------
new_rec : recarray
New records array with columns reordered.
"""
# reorder the dtype description list
dtd = rec.dtype.descr
new_dtd = [dtd[i] for i in new_order]
# create the new array and fill it in.
new_rec = np.empty(rec.shape, new_dtd)
for n in new_rec.dtype.names:
new_rec[n] = rec[n]
return np.atleast_1d(np.rec.array(new_rec))
| [
"numpy.insert",
"numpy.abs",
"numpy.delete",
"numpy.recfromtxt",
"numpy.log",
"numpy.take",
"numpy.array",
"numpy.empty",
"numpy.rec.array"
] | [((5991, 6013), 'numpy.insert', 'np.insert', (['rec', 'N', 'row'], {}), '(rec, N, row)\n', (6000, 6013), True, 'import numpy as np\n'), ((6804, 6821), 'numpy.delete', 'np.delete', (['rec', 'N'], {}), '(rec, N)\n', (6813, 6821), True, 'import numpy as np\n'), ((7526, 7549), 'numpy.take', 'np.take', (['rec', 'new_order'], {}), '(rec, new_order)\n', (7533, 7549), True, 'import numpy as np\n'), ((8957, 8970), 'numpy.array', 'np.array', (['col'], {}), '(col)\n', (8965, 8970), True, 'import numpy as np\n'), ((9459, 9483), 'numpy.empty', 'np.empty', (['rec.shape', 'dtd'], {}), '(rec.shape, dtd)\n', (9467, 9483), True, 'import numpy as np\n'), ((10238, 10262), 'numpy.empty', 'np.empty', (['rec.shape', 'dtd'], {}), '(rec.shape, dtd)\n', (10246, 10262), True, 'import numpy as np\n'), ((11085, 11113), 'numpy.empty', 'np.empty', (['rec.shape', 'new_dtd'], {}), '(rec.shape, new_dtd)\n', (11093, 11113), True, 'import numpy as np\n'), ((9663, 9684), 'numpy.rec.array', 'np.rec.array', (['new_rec'], {}), '(new_rec)\n', (9675, 9684), True, 'import numpy as np\n'), ((10350, 10371), 'numpy.rec.array', 'np.rec.array', (['new_rec'], {}), '(new_rec)\n', (10362, 10371), True, 'import numpy as np\n'), ((11201, 11222), 'numpy.rec.array', 'np.rec.array', (['new_rec'], {}), '(new_rec)\n', (11213, 11222), True, 'import numpy as np\n'), ((4151, 4185), 'numpy.recfromtxt', 'np.recfromtxt', (['filename'], {'dtype': 'dtd'}), '(filename, dtype=dtd)\n', (4164, 4185), True, 'import numpy as np\n'), ((2425, 2435), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (2431, 2435), True, 'import numpy as np\n'), ((2746, 2756), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (2752, 2756), True, 'import numpy as np\n'), ((2404, 2415), 'numpy.abs', 'np.abs', (['col'], {}), '(col)\n', (2410, 2415), True, 'import numpy as np\n'), ((2725, 2736), 'numpy.abs', 'np.abs', (['col'], {}), '(col)\n', (2731, 2736), True, 'import numpy as np\n')] |
from setuptools import setup
from Cython.Build import cythonize
from numpy import get_include
setup(
name='vis_precision',
ext_modules=cythonize('vis_precision.pyx'),
include_dirs=[get_include()],
zip_safe=False,
)
setup(
name='dnb_int32',
ext_modules=cythonize('dnb_int32.pyx'),
include_dirs=[get_include()],
zip_safe=False,
)
setup(
name='dnb_float32',
ext_modules=cythonize('dnb_float32.pyx'),
include_dirs=[get_include()],
zip_safe=False,
)
| [
"Cython.Build.cythonize",
"numpy.get_include"
] | [((150, 180), 'Cython.Build.cythonize', 'cythonize', (['"""vis_precision.pyx"""'], {}), "('vis_precision.pyx')\n", (159, 180), False, 'from Cython.Build import cythonize\n'), ((291, 317), 'Cython.Build.cythonize', 'cythonize', (['"""dnb_int32.pyx"""'], {}), "('dnb_int32.pyx')\n", (300, 317), False, 'from Cython.Build import cythonize\n'), ((430, 458), 'Cython.Build.cythonize', 'cythonize', (['"""dnb_float32.pyx"""'], {}), "('dnb_float32.pyx')\n", (439, 458), False, 'from Cython.Build import cythonize\n'), ((201, 214), 'numpy.get_include', 'get_include', ([], {}), '()\n', (212, 214), False, 'from numpy import get_include\n'), ((338, 351), 'numpy.get_include', 'get_include', ([], {}), '()\n', (349, 351), False, 'from numpy import get_include\n'), ((479, 492), 'numpy.get_include', 'get_include', ([], {}), '()\n', (490, 492), False, 'from numpy import get_include\n')] |
#!/usr/bin/env python
'''
The basic usage of range-separated Gaussian density fitting (RSGDF or simply
RSDF), including choosing an auxiliary basis, initializing 3c integrals, saving
and reading 3c integrals, jk build, ao2mo, etc is the same as that of the
GDF module. Please refer to the following examples for details:
- 21-k_points_all_electron_scf.py # SCF
- 30-ao_integrals.py # compute ao integrals
- 35-gaussian_density_fit.py # auxiliary basis, save & load cderi,
# and loop over 3c integrals
This script highlights special settings of the RSGDF module.
Note: currently RSDF does NOT support low-dimensional systems (0D ~ 2D).
'''
import numpy as np
from pyscf import gto as mol_gto
from pyscf.pbc import gto, scf, cc, df, mp
cell = gto.Cell()
cell.atom='''
C 0.000000000000 0.000000000000 0.000000000000
C 1.685068664391 1.685068664391 1.685068664391
'''
cell.basis = 'cc-pvdz'
cell.a = '''
0.000000000, 3.370137329, 3.370137329
3.370137329, 0.000000000, 3.370137329
3.370137329, 3.370137329, 0.000000000'''
cell.unit = 'B'
cell.verbose = 6
cell.build()
kmesh = [2,1,1]
kpts = cell.make_kpts(kmesh)
#
# Use the 'rs_density_fit' method provided by all PBC SCF classes (including
# DFT) for using RSDF to handle the density fitting.
#
mf = scf.KRHF(cell, kpts).rs_density_fit()
mf.kernel()
#
# One can also initialize a RSDF instance separately and overwrite 'SCF.with_df'
#
mydf = df.RSDF(cell, kpts)
mf = scf.KRKS(cell, kpts).rs_density_fit()
mf.with_df = mydf
mf.xc = "pbe"
mf.kernel()
#
# RSDF calculates the DF 3c integrals in two parts:
# j3c = j3c_SR + j3c_LR
# where SR and LR stand for short-range and long-range, respectively.
# The parameter 'omega' determines the cost of the two parts: the larger omega
# is, the faster (slower) the SR (LR) part is computed, and vice versa.
# By default, the code will determine an optimal value for 'omega' that is
# suitable for most cases. The user can nonetheless tests the effect of using
# different values of 'omega'. The few lines below do this and verify that the
# results (in terms of both the HF and the MP2 energies) are not affected.
# In the output file, you can also
# grep -a "CPU time for j3c" [output_file]
# to see how the DF initialization time is affected by using different omega.
#
omegas = np.array([0.3, 0.5, 0.7, 0.9])
escfs = np.zeros_like(omegas)
emp2s = np.zeros_like(omegas)
for i,omega in enumerate(omegas):
mf = scf.KRHF(cell, kpts).rs_density_fit()
mf.with_df.omega = omega
mf.kernel()
escfs[i] = mf.e_tot
mmp = mp.KMP2(mf)
mmp.with_t2 = False
mmp.kernel()
emp2s[i] = mmp.e_corr
for omega, escf, emp2 in zip(omegas, escfs, emp2s):
print("%.2f %.10f %.10f" % (omega, escf, emp2))
maxdiffescf = escfs.max()-escfs.min()
maxdiffemp2 = emp2s.max()-emp2s.min()
print("Maximum difference in SCF energy: %.10f" % (maxdiffescf))
print("Maximum difference in MP2 energy: %.10f" % (maxdiffemp2))
''' example output:
0.30 -75.3226526450 -0.2242441141
0.50 -75.3226526440 -0.2242441145
0.70 -75.3226526451 -0.2242441148
0.90 -75.3226526455 -0.2242441143
Maximum difference in SCF energy: 0.0000000015
Maximum difference in MP2 energy: 0.0000000007
'''
| [
"numpy.array",
"pyscf.pbc.scf.KRHF",
"pyscf.pbc.gto.Cell",
"pyscf.pbc.df.RSDF",
"numpy.zeros_like",
"pyscf.pbc.mp.KMP2",
"pyscf.pbc.scf.KRKS"
] | [((807, 817), 'pyscf.pbc.gto.Cell', 'gto.Cell', ([], {}), '()\n', (815, 817), False, 'from pyscf.pbc import gto, scf, cc, df, mp\n'), ((1467, 1486), 'pyscf.pbc.df.RSDF', 'df.RSDF', (['cell', 'kpts'], {}), '(cell, kpts)\n', (1474, 1486), False, 'from pyscf.pbc import gto, scf, cc, df, mp\n'), ((2357, 2387), 'numpy.array', 'np.array', (['[0.3, 0.5, 0.7, 0.9]'], {}), '([0.3, 0.5, 0.7, 0.9])\n', (2365, 2387), True, 'import numpy as np\n'), ((2396, 2417), 'numpy.zeros_like', 'np.zeros_like', (['omegas'], {}), '(omegas)\n', (2409, 2417), True, 'import numpy as np\n'), ((2426, 2447), 'numpy.zeros_like', 'np.zeros_like', (['omegas'], {}), '(omegas)\n', (2439, 2447), True, 'import numpy as np\n'), ((2608, 2619), 'pyscf.pbc.mp.KMP2', 'mp.KMP2', (['mf'], {}), '(mf)\n', (2615, 2619), False, 'from pyscf.pbc import gto, scf, cc, df, mp\n'), ((1323, 1343), 'pyscf.pbc.scf.KRHF', 'scf.KRHF', (['cell', 'kpts'], {}), '(cell, kpts)\n', (1331, 1343), False, 'from pyscf.pbc import gto, scf, cc, df, mp\n'), ((1492, 1512), 'pyscf.pbc.scf.KRKS', 'scf.KRKS', (['cell', 'kpts'], {}), '(cell, kpts)\n', (1500, 1512), False, 'from pyscf.pbc import gto, scf, cc, df, mp\n'), ((2491, 2511), 'pyscf.pbc.scf.KRHF', 'scf.KRHF', (['cell', 'kpts'], {}), '(cell, kpts)\n', (2499, 2511), False, 'from pyscf.pbc import gto, scf, cc, df, mp\n')] |
from sklearn.utils import shuffle
import numpy as np
from donkeycar.parts.augment import augment_image
from donkeycar.parts.datastore import Tub
from donkeycar.utils import load_scaled_image_arr
import keras
def vae_generator(cfg, data, batch_size, isTrainSet=True, min_records_to_train=1000, aug=False, aux=None, pilot=False):
num_records = len(data)
while True:
batch_data = []
keys = list(data.keys())
keys = shuffle(keys)
for key in keys:
if not key in data:
continue
_record = data[key]
if _record['train'] != isTrainSet:
continue
batch_data.append(_record)
if len(batch_data) == batch_size:
inputs_img = []
aux_out = []
steering = []
throttle = []
for record in batch_data:
img_arr = None
#get image data if we don't already have it
if record['img_data'] is None:
img_arr = load_scaled_image_arr(record['image_path'], cfg)
if img_arr is None:
break
if aug:
img_arr = augment_image(img_arr)
if cfg.CACHE_IMAGES:
record['img_data'] = img_arr
else:
img_arr = record['img_data']
if img_arr is None:
continue
inputs_img.append(img_arr)
if aux is not None:
if aux in record['json_data']:
aux_out.append(record['json_data'][aux])
else:
print( "Missing aux data in: {}".format( record ) )
continue
st, th = Tub.get_angle_throttle(record['json_data'])
steering.append(st)
throttle.append(th)
X = np.array(inputs_img).reshape(batch_size, cfg.IMAGE_H, cfg.IMAGE_W, cfg.IMAGE_DEPTH)
y = {'main_output': X}
if pilot:
y['steering_output'] = np.array(steering)
y['throttle_output'] = np.array(throttle)
if aux is not None:
aux_out = keras.utils.to_categorical(aux_out, num_classes=7)
y['aux_output'] = aux_out
yield X, y
batch_data = []
if __name__ == "__main__":
import argparse
import donkeycar as dk
from donkeycar.templates.train import collate_records, preprocessFileList
from donkeycar.utils import gather_records
parser = argparse.ArgumentParser(description='Test VAE data loader.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--aux', default=None, help='Name of the auxilliary data to use.')
parser.add_argument('file', nargs='+', help='Text file with a list of tubs to train on.')
args = parser.parse_args()
try:
cfg = dk.load_config()
except FileNotFoundError:
cfg = dk.load_config("config.py") # retry in the current directory
tub_names = preprocessFileList( args.file )
input_shape = (cfg.IMAGE_W, cfg.IMAGE_H, cfg.IMAGE_DEPTH)
# Code for multiple inputs: http://digital-thinking.de/deep-learning-combining-numerical-and-text-features-in-deep-neural-networks/
aux_out = 0
if args.aux is not None:
aux_out = 7 # need to get number of aux outputs from data
opts = { 'cfg' : cfg}
opts['categorical'] = False
opts['continuous'] = False
gen_records = {}
records = gather_records(cfg, tub_names, verbose=True)
collate_records(records, gen_records, opts)
train_gen = vae_generator(cfg, gen_records, cfg.BATCH_SIZE, isTrainSet=True, aug=False, aux=args.aux, pilot=True)
for X, y in train_gen:
print( "X {} {}".format( type(X[0]), X[0].shape ) )
img = y['main_output'][0]
print( "main {} min/max/avg: {}/{}/{}".format( img.shape, np.min(img), np.max(img), np.mean(img) ) )
if 'aux_output' in y:
print( "aux {}".format( y['aux_output'].shape ) )
if 'steering_output' in y:
print( "Steering {}".format( y['steering_output'].shape ) )
break
| [
"donkeycar.templates.train.collate_records",
"numpy.mean",
"argparse.ArgumentParser",
"donkeycar.parts.datastore.Tub.get_angle_throttle",
"sklearn.utils.shuffle",
"donkeycar.parts.augment.augment_image",
"donkeycar.load_config",
"numpy.max",
"keras.utils.to_categorical",
"donkeycar.utils.gather_re... | [((2867, 2987), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Test VAE data loader."""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='Test VAE data loader.',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (2890, 2987), False, 'import argparse\n'), ((3363, 3392), 'donkeycar.templates.train.preprocessFileList', 'preprocessFileList', (['args.file'], {}), '(args.file)\n', (3381, 3392), False, 'from donkeycar.templates.train import collate_records, preprocessFileList\n'), ((3826, 3870), 'donkeycar.utils.gather_records', 'gather_records', (['cfg', 'tub_names'], {'verbose': '(True)'}), '(cfg, tub_names, verbose=True)\n', (3840, 3870), False, 'from donkeycar.utils import gather_records\n'), ((3875, 3918), 'donkeycar.templates.train.collate_records', 'collate_records', (['records', 'gen_records', 'opts'], {}), '(records, gen_records, opts)\n', (3890, 3918), False, 'from donkeycar.templates.train import collate_records, preprocessFileList\n'), ((453, 466), 'sklearn.utils.shuffle', 'shuffle', (['keys'], {}), '(keys)\n', (460, 466), False, 'from sklearn.utils import shuffle\n'), ((3225, 3241), 'donkeycar.load_config', 'dk.load_config', ([], {}), '()\n', (3239, 3241), True, 'import donkeycar as dk\n'), ((3286, 3313), 'donkeycar.load_config', 'dk.load_config', (['"""config.py"""'], {}), "('config.py')\n", (3300, 3313), True, 'import donkeycar as dk\n'), ((4227, 4238), 'numpy.min', 'np.min', (['img'], {}), '(img)\n', (4233, 4238), True, 'import numpy as np\n'), ((4240, 4251), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (4246, 4251), True, 'import numpy as np\n'), ((4253, 4265), 'numpy.mean', 'np.mean', (['img'], {}), '(img)\n', (4260, 4265), True, 'import numpy as np\n'), ((2008, 2051), 'donkeycar.parts.datastore.Tub.get_angle_throttle', 'Tub.get_angle_throttle', (["record['json_data']"], {}), "(record['json_data'])\n", (2030, 2051), False, 'from donkeycar.parts.datastore import Tub\n'), ((2346, 2364), 'numpy.array', 'np.array', (['steering'], {}), '(steering)\n', (2354, 2364), True, 'import numpy as np\n'), ((2408, 2426), 'numpy.array', 'np.array', (['throttle'], {}), '(throttle)\n', (2416, 2426), True, 'import numpy as np\n'), ((2494, 2544), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['aux_out'], {'num_classes': '(7)'}), '(aux_out, num_classes=7)\n', (2520, 2544), False, 'import keras\n'), ((1092, 1140), 'donkeycar.utils.load_scaled_image_arr', 'load_scaled_image_arr', (["record['image_path']", 'cfg'], {}), "(record['image_path'], cfg)\n", (1113, 1140), False, 'from donkeycar.utils import load_scaled_image_arr\n'), ((2153, 2173), 'numpy.array', 'np.array', (['inputs_img'], {}), '(inputs_img)\n', (2161, 2173), True, 'import numpy as np\n'), ((1315, 1337), 'donkeycar.parts.augment.augment_image', 'augment_image', (['img_arr'], {}), '(img_arr)\n', (1328, 1337), False, 'from donkeycar.parts.augment import augment_image\n')] |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# zmq_SUB_proc.py
# Author: <NAME>
import zmq
import numpy as np
import time
import matplotlib.pyplot as plt
context = zmq.Context()
socket = context.socket(zmq.SUB)
socket.connect("tcp://127.0.0.1:4030") # connect, not bind, the PUB will bind, only 1 can bind
socket.setsockopt(zmq.SUBSCRIBE, b'') # subscribe to topic of all (needed or else it won't work)
buffer = []
preamb = [1,0,1,1,0,0,1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0]
found = False
message_len = 128
message = []
while True:
if socket.poll(10) != 0: # check if there is a message on the socket
msg = socket.recv() # grab the message
# print(len(msg)) # size of msg
# print(msg)
data = np.frombuffer(msg, dtype=np.ubyte, count=-1) # make sure to use correct data type (complex64 or float32); '-1' means read all data in the buffer
#print(data[0:8])
#print(data[2:10])
#print(data)
#print([x>>7 for x in data])
# plt.plot(np.real(data))
# plt.plot(np.imag(data))
# plt.show()
# print(data)
buffer.extend(data)
if found:
message.extend(buffer)
if len(message) >= message_len:
message = message[:message_len]
found = False
print(message)
message = []
if len(buffer) > 100 and (found is not None):
for i in range(len(buffer)-len(preamb)):
for j in range(len(preamb)):
if buffer[i+j] != preamb[j]:
break
else:
print("Found_prefix")
found = True
message.extend(buffer[i+j:])
buffer = []
break
if len(buffer) > 150:
buffer = buffer[:100]
else:
time.sleep(0.1) # wait 100ms and try again
| [
"numpy.frombuffer",
"zmq.Context",
"time.sleep"
] | [((165, 178), 'zmq.Context', 'zmq.Context', ([], {}), '()\n', (176, 178), False, 'import zmq\n'), ((742, 786), 'numpy.frombuffer', 'np.frombuffer', (['msg'], {'dtype': 'np.ubyte', 'count': '(-1)'}), '(msg, dtype=np.ubyte, count=-1)\n', (755, 786), True, 'import numpy as np\n'), ((1903, 1918), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (1913, 1918), False, 'import time\n')] |
"""
Neural Network
by <NAME>
<EMAIL>
"""
import numpy as np
from NeuralNetwork.Activation import fSigmoid as fActivation, dSigmoid as dActivation
from NeuralNetwork.Cost import fQuadratic as fCost, dQuadratic as dCost
class NeuralNetwork:
"""
Class of the neural network which works with backpropagation
"""
def __init__(self, dims):
"""
Creates a neural network respecting the different given dimensions,
this should be a list of number, wher the first represents the number of
inputs and the last, the number of outputs.
The neural network will be fully connected
"""
self.layersNumber = len(dims) - 1
self.weights = []
self.biases = []
np.random.seed(42)
for d in range(self.layersNumber):
self.weights.append(np.random.randn(dims[d+1], dims[d]))
self.biases.append(np.random.randn(dims[d+1], 1))
def compute(self, inputs):
"""
Computes the result of the network by propagation
"""
res = inputs
for layer in range(self.layersNumber):
weight = self.weights[layer]
bias = self.biases[layer]
res = fActivation(np.dot(weight, res) + bias)
return res
def backpropagationWeighted(self, inputs, inputsWeights, targets,
learningRate, batchSize, maxIteration):
"""
Computes the backpropagation of the gradient in order to reduce the
quadratic error with a weight for each input
Standard backpropagation is when weights are all equal to one
"""
error, pastError = 0, 0
errorVector, classifiedVector = [], []
for iteration in range(maxIteration):
errorVector, classifiedVector = [], []
# Decrease the learningRate
if iteration > 1 and error > pastError :
learningRate /= 2
pastError = error
# Computes each image
for batch in range(len(targets)//batchSize - 1):
totalDiffWeight = [np.zeros(weight.shape) for weight in self.weights]
totalDiffBias = [np.zeros(bias.shape) for bias in self.biases]
# Computes the difference for each batch
for i in range(batch*batchSize,(batch+1)*batchSize):
# TODO : Change the update of the weight in order to take
# into account inputs weights
diffWeight, diffBias, diffError, classified = self.computeDiff(inputs[i], targets[i])
totalDiffWeight = [totalDiffWeight[j] + diffWeight[j]*inputsWeights[j]
for j in range(len(totalDiffWeight))]
totalDiffBias = [totalDiffBias[j] + diffBias[j]*inputsWeights[j]
for j in range(len(totalDiffBias))]
error += diffError
errorVector.append(diffError)
classifiedVector.append(classified)
# Update weights and biases of each neuron
self.weights = [self.weights[i] - learningRate*totalDiffWeight[i]
for i in range(len(totalDiffWeight))]
self.biases = [self.biases[i] - learningRate*totalDiffBias[i]
for i in range(len(totalDiffBias))]
print("{} / {}".format(iteration+1, maxIteration), end = '\r')
print("\nBackPropagation done !")
return errorVector, classifiedVector
def computeDiff(self, input, target):
"""
Executes the forward and backward propagation for the given data
"""
diffWeight = [np.zeros(weight.shape) for weight in self.weights]
diffBias = [np.zeros(bias.shape) for bias in self.biases]
# Forward
# layerSum contents all the result of nodes
# layerAct = fActivation(layerSum)
layerSum = []
lastRes = input
layerAct = [lastRes]
for layer in range(self.layersNumber):
layerRes = np.dot(self.weights[layer], lastRes) + self.biases[layer]
lastRes = fActivation(layerRes)
layerSum.append(layerRes)
layerAct.append(lastRes)
classified = False
if (np.argmax(lastRes) == np.argmax(target)) :
classified = True
# Backward
diffError = sum(fCost(lastRes, target))
delta = dCost(lastRes, target) * dActivation(lastRes)
diffBias[-1] = delta
diffWeight[-1] = np.dot(delta, layerAct[-2].transpose())
for layer in reversed(range(self.layersNumber-1)):
delta = np.dot(self.weights[layer+1].transpose(), delta) *\
dActivation(layerSum[layer])
diffBias[layer] = delta
diffWeight[layer] = np.dot(delta, layerAct[layer].transpose())
return diffWeight, diffBias, diffError, classified
| [
"numpy.argmax",
"numpy.zeros",
"NeuralNetwork.Activation.dSigmoid",
"numpy.dot",
"numpy.random.seed",
"NeuralNetwork.Cost.fQuadratic",
"NeuralNetwork.Cost.dQuadratic",
"NeuralNetwork.Activation.fSigmoid",
"numpy.random.randn"
] | [((669, 687), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (683, 687), True, 'import numpy as np\n'), ((3084, 3106), 'numpy.zeros', 'np.zeros', (['weight.shape'], {}), '(weight.shape)\n', (3092, 3106), True, 'import numpy as np\n'), ((3149, 3169), 'numpy.zeros', 'np.zeros', (['bias.shape'], {}), '(bias.shape)\n', (3157, 3169), True, 'import numpy as np\n'), ((3474, 3495), 'NeuralNetwork.Activation.fSigmoid', 'fActivation', (['layerRes'], {}), '(layerRes)\n', (3485, 3495), True, 'from NeuralNetwork.Activation import fSigmoid as fActivation, dSigmoid as dActivation\n'), ((3581, 3599), 'numpy.argmax', 'np.argmax', (['lastRes'], {}), '(lastRes)\n', (3590, 3599), True, 'import numpy as np\n'), ((3603, 3620), 'numpy.argmax', 'np.argmax', (['target'], {}), '(target)\n', (3612, 3620), True, 'import numpy as np\n'), ((3677, 3699), 'NeuralNetwork.Cost.fQuadratic', 'fCost', (['lastRes', 'target'], {}), '(lastRes, target)\n', (3682, 3699), True, 'from NeuralNetwork.Cost import fQuadratic as fCost, dQuadratic as dCost\n'), ((3711, 3733), 'NeuralNetwork.Cost.dQuadratic', 'dCost', (['lastRes', 'target'], {}), '(lastRes, target)\n', (3716, 3733), True, 'from NeuralNetwork.Cost import fQuadratic as fCost, dQuadratic as dCost\n'), ((3736, 3756), 'NeuralNetwork.Activation.dSigmoid', 'dActivation', (['lastRes'], {}), '(lastRes)\n', (3747, 3756), True, 'from NeuralNetwork.Activation import fSigmoid as fActivation, dSigmoid as dActivation\n'), ((748, 785), 'numpy.random.randn', 'np.random.randn', (['dims[d + 1]', 'dims[d]'], {}), '(dims[d + 1], dims[d])\n', (763, 785), True, 'import numpy as np\n'), ((807, 838), 'numpy.random.randn', 'np.random.randn', (['dims[d + 1]', '(1)'], {}), '(dims[d + 1], 1)\n', (822, 838), True, 'import numpy as np\n'), ((3403, 3439), 'numpy.dot', 'np.dot', (['self.weights[layer]', 'lastRes'], {}), '(self.weights[layer], lastRes)\n', (3409, 3439), True, 'import numpy as np\n'), ((3959, 3987), 'NeuralNetwork.Activation.dSigmoid', 'dActivation', (['layerSum[layer]'], {}), '(layerSum[layer])\n', (3970, 3987), True, 'from NeuralNetwork.Activation import fSigmoid as fActivation, dSigmoid as dActivation\n'), ((1069, 1088), 'numpy.dot', 'np.dot', (['weight', 'res'], {}), '(weight, res)\n', (1075, 1088), True, 'import numpy as np\n'), ((1781, 1803), 'numpy.zeros', 'np.zeros', (['weight.shape'], {}), '(weight.shape)\n', (1789, 1803), True, 'import numpy as np\n'), ((1853, 1873), 'numpy.zeros', 'np.zeros', (['bias.shape'], {}), '(bias.shape)\n', (1861, 1873), True, 'import numpy as np\n')] |
"""Superficial default settings."""
from matplotlib.cm import get_cmap, register_cmap
from matplotlib.colors import ListedColormap
from numpy import array
#: Default color palette for continuous data.
continuous_palette = "YlGn"
#: Secondary color palette for continuous data.
alternate_palette = "Blues"
#: Default color palette for discrete data.
discrete_palette = "tab20_woven"
continuous_color = (.38601, .73495, .43145) # 140th color in YlGn
alternate_color = (.28089, .58762, .78508) # 155th color in Blues
color_missing = (.4, .4, .4, .2) # Lightgrey.
color_gridlines = (.6, .6, .6, .5) # Darkgrey.
im_settings = {
"interpolation": "none",
"origin": "upper",
}
point_settings = {
"s": 1,
"marker": "s"
}
patch_settings = {
"alpha": 0.5,
"closed": True,
"edgecolor": "k",
"facecolor": "none",
"fill": False,
"linestyle": "solid",
"linewidth": 1
}
def mk_discrete_cmap(name):
"""Modified version of `tab20` palette such that light versions
of colors are shifted to the end of the cycle.
"""
tab20 = get_cmap("tab20")
regular_color_idx = list(range(0, tab20.N, 2))
light_color_idx = list(range(1, tab20.N, 2))
colors = [tab20.colors[i] for i in regular_color_idx + light_color_idx]
return ListedColormap(tuple(colors), name=name)
register_cmap(cmap=mk_discrete_cmap(discrete_palette))
def apply_theme(*axes, grid=False):
"""Update one or more axes with the package theme.
Each axes is modified in place. If more than one is provided,
they are returned as a flat array.
Parameters
----------
*axes : matplotlib Axes
Axes containing the plot(s) to be modified.
grid : bool, optional
Add gridlines to each axes. It is advised to leave
this as False if using a 3rd-party theme (e.g.
seaborn's `darkgrid`).
Returns
-------
matplotlib Axes or array of Axes
"""
for ax in axes:
ax.set_xlabel("Easting")
ax.set_ylabel("Northing")
# Force x and y axes to be equally spaced, as befitting
# geographic/geometric data. This has potential to create
# a large amount of whitespace around the data, as an
# unfortunate side-effect.
ax.set_aspect("equal", adjustable="datalim")
# Remove axis ticks and tick labels.
# Tick labels are not removed with `ax.tick_params`
# as it leaves the coordinate offset in the corner.
ax.tick_params(
axis="both",
which="both",
bottom=False,
top=False,
left=False,
right=False)
ax.set_yticklabels([])
ax.set_xticklabels([])
if grid:
for ax in axes:
ax.grid(True, color=color_gridlines)
# Attempt to preserve inputs - matplotlib uses arrays to wrap multiple
# axes.
axes = array(axes) if len(axes) > 1 else axes[0]
return axes
| [
"numpy.array",
"matplotlib.cm.get_cmap"
] | [((1100, 1117), 'matplotlib.cm.get_cmap', 'get_cmap', (['"""tab20"""'], {}), "('tab20')\n", (1108, 1117), False, 'from matplotlib.cm import get_cmap, register_cmap\n'), ((2908, 2919), 'numpy.array', 'array', (['axes'], {}), '(axes)\n', (2913, 2919), False, 'from numpy import array\n')] |
#!/usr/bin/python3
'''Routines useful in generation and processing of synthetic data
These are very useful in analyzing the behavior or cameras and lenses.
All functions are exported into the mrcal module. So you can call these via
mrcal.synthetic_data.fff() or mrcal.fff(). The latter is preferred.
'''
import numpy as np
import numpysane as nps
import sys
import mrcal
def ref_calibration_object(W, H, object_spacing, calobject_warp=None):
r'''Return the geometry of the calibration object
SYNOPSIS
import gnuplotlib as gp
import numpysane as nps
obj = mrcal.ref_calibration_object( 10,6, 0.1 )
print(obj.shape)
===> (6, 10, 3)
gp.plot( nps.clump( obj[...,:2], n=2),
tuplesize = -2,
_with = 'points',
_xrange = (-0.1,1.0),
_yrange = (-0.1,0.6),
unset = 'grid',
square = True,
terminal = 'dumb 74,45')
0.6 +---------------------------------------------------------------+
| + + + + + |
| |
0.5 |-+ A A A A A A A A A A +-|
| |
| |
0.4 |-+ A A A A A A A A A A +-|
| |
| |
0.3 |-+ A A A A A A A A A A +-|
| |
| |
0.2 |-+ A A A A A A A A A A +-|
| |
| |
0.1 |-+ A A A A A A A A A A +-|
| |
| |
0 |-+ A A A A A A A A A A +-|
| |
| + + + + + |
-0.1 +---------------------------------------------------------------+
0 0.2 0.4 0.6 0.8 1
Returns the geometry of a calibration object in its own reference coordinate
system in a (H,W,3) array. Only a grid-of-points calibration object is
supported, possibly with some bowing (i.e. what the internal mrcal solver
supports). Each row of the output is an (x,y,z) point. The origin is at the
corner of the grid, so ref_calibration_object(...)[0,0,:] is
np.array((0,0,0)). The grid spans x and y, with z representing the depth: z=0
for a flat calibration object.
A simple parabolic board warping model is supported by passing a (2,) array in
calobject_warp. These 2 values describe additive flex along the x axis and along
the y axis, in that order. In each direction the flex is a parabola, with the
parameter k describing the max deflection at the center. If the edges were at
+-1 we'd have
z = k*(1 - x^2)
The edges we DO have are at (0,N-1), so the equivalent expression is
xr = x / (N-1)
z = k*( 1 - 4*xr^2 + 4*xr - 1 ) =
4*k*(xr - xr^2) =
ARGUMENTS
- W: how many points we have in the horizontal direction
- H: how many points we have in the vertical direction
- object_spacing: the distance between adjacent points in the calibration
object. A square object is assumed, so the vertical and horizontal distances
are assumed to be identical.
- calobject_warp: optional array of shape (2,) defaults to None. Describes the
warping of the calibration object. If None, the object is flat. If an array is
given, the values describe the maximum additive deflection along the x and y
axes
RETURNED VALUES
The calibration object geometry in a (H,W,3) array
'''
xx,yy = np.meshgrid( np.arange(W,dtype=float), np.arange(H,dtype=float))
full_object = nps.glue(nps.mv( nps.cat(xx,yy), 0, -1),
np.zeros((H,W,1)),
axis=-1) # shape (H,W,3)
full_object *= object_spacing
if calobject_warp is not None:
xr = xx / (W-1)
yr = yy / (H-1)
dx = 4. * xr * (1. - xr)
dy = 4. * yr * (1. - yr)
full_object[..., 2] += calobject_warp[0] * dx
full_object[..., 2] += calobject_warp[1] * dy
return full_object
def synthesize_board_observations(models,
object_width_n,object_height_n,
object_spacing, calobject_warp,
rt_ref_boardcenter,
rt_ref_boardcenter__noiseradius,
Nframes,
which = 'all-cameras-must-see-full-board'):
r'''Produce synthetic chessboard observations
SYNOPSIS
models = [mrcal.cameramodel("0.cameramodel"),
mrcal.cameramodel("1.cameramodel"),]
# shapes (Nframes, Ncameras, object_height_n, object_width_n, 2) and
# (Nframes, 4, 3)
q,Rt_ref_boardref = \
mrcal.synthesize_board_observations( \
models,
# board geometry
10,12,0.1,None,
# mean board pose and the radius of the added uniform noise
rt_ref_boardcenter,
rt_ref_boardcenter__noiseradius,
# How many frames we want
100,
which = 'some-cameras-must-see-half-board')
# q now contains the synthetic pixel observations, but some of them will be
# out of view. I construct an (x,y,weight) observations array, as expected
# by the optimizer, and I set the weight for the out-of-view points to -1 to
# tell the optimizer to ignore those points
# Set the weights to 1 initially
# shape (Nframes, Ncameras, object_height_n, object_width_n, 3)
observations = nps.glue(q,
np.ones( q.shape[:-1] + (1,) ),
axis = -1)
# shape (Ncameras, 1, 1, 2)
imagersizes = nps.mv( nps.cat(*[ m.imagersize() for m in models ]),
-2, -4 )
observations[ np.any( q < 0, axis=-1 ), 2 ] = -1.
observations[ np.any( q-imagersizes >= 0, axis=-1 ), 2 ] = -1.
Given a description of a calibration object and of the cameras observing it,
produces perfect pixel observations of the objects by those cameras. We return a
dense observation array: every corner observation from every chessboard pose
will be reported for every camera. Some of these observations MAY be
out-of-view, depending on the value of the 'which' argument; see description
below. The example above demonstrates how to mark such out-of-bounds
observations as outliers to tell the optimization to ignore these.
The "models" provides the intrinsics and extrinsics.
The calibration objects are nominally have pose rt_ref_boardcenter in the
reference coordinate system, with each pose perturbed uniformly with radius
rt_ref_boardcenter__noiseradius. This is nonstandard since here I'm placing the
board origin at its center instead of the corner (as
mrcal.ref_calibration_object() does). But this is more appropriate to the usage
of this function. The returned Rt_ref_boardref transformation DOES use the
normal corner-referenced board geometry
Returns the point observations and the chessboard poses that produced these
observations.
ARGUMENTS
- models: an array of mrcal.cameramodel objects, one for each camera we're
simulating. This is the intrinsics and the extrinsics. Ncameras = len(models)
- object_width_n: the number of horizontal points in the calibration object grid
- object_height_n: the number of vertical points in the calibration object grid
- object_spacing: the distance between adjacent points in the calibration
object. A square object is assumed, so the vertical and horizontal distances
are assumed to be identical.
- calobject_warp: a description of the calibration board warping. None means "no
warping": the object is flat. Otherwise this is an array of shape (2,). See
the docs for ref_calibration_object() for the meaning of the values in this
array.
- rt_ref_boardcenter: the nominal pose of the calibration object, in the
reference coordinate system. This is an rt transformation from a
center-referenced calibration object to the reference coordinate system
- rt_ref_boardcenter__noiseradius: the deviation-from-nominal for the chessboard
pose for each frame. I add uniform noise to rt_ref_boardcenter, with each
element sampled independently, with the radius given here.
- Nframes: how many frames of observations to return
- which: a string, defaulting to 'all-cameras-must-see-full-board'. Controls the
requirements on the visibility of the returned points. Valid values:
- 'all-cameras-must-see-full-board': We return only those chessboard poses
that produce observations that are FULLY visible by ALL the cameras.
- 'some-cameras-must-see-full-board': We return only those chessboard poses
that produce observations that are FULLY visible by AT LEAST ONE camera.
- 'all-cameras-must-see-half-board': We return only those chessboard poses
that produce observations that are AT LEAST HALF visible by ALL the cameras.
- 'some-cameras-must-see-half-board': We return only those chessboard poses
that produce observations that are AT LEAST HALF visible by AT LEAST ONE
camera.
RETURNED VALUES
We return a tuple:
- q: an array of shape (Nframes, Ncameras, object_height, object_width, 2)
containing the pixel coordinates of the generated observations
- Rt_ref_boardref: an array of shape (Nframes, 4,3) containing the poses of the
chessboards. This transforms the object returned by ref_calibration_object()
to the pose that was projected, in the ref coord system
'''
# Can visualize results with this script:
r'''
r = np.array((30, 0, 0,), dtype=float) * np.pi/180.
model = mrcal.cameramodel( intrinsics = ('LENSMODEL_PINHOLE',
np.array((1000., 1000., 1000., 1000.,))),
imagersize = np.array((2000,2000)) )
Rt_ref_boardref = \
mrcal.synthesize_board_observations([model],
5,20,0.1,None,
nps.glue(r, np.array((0,0,3.)), axis=-1),
np.array((0,0,0., 0,0,0)),
1) [1]
mrcal.show_geometry( models_or_extrinsics_rt_fromref = np.zeros((1,1,6), dtype=float),
frames_rt_toref = mrcal.rt_from_Rt(Rt_ref_boardref),
object_width_n = 20,
object_height_n = 5,
object_spacing = 0.1,
_set = 'xyplane 0',
wait = 1 )
'''
which_valid = ( 'all-cameras-must-see-full-board',
'some-cameras-must-see-full-board',
'all-cameras-must-see-half-board',
'some-cameras-must-see-half-board', )
if not which in which_valid:
raise Exception(f"'which' argument must be one of {which_valid}")
Ncameras = len(models)
# I move the board, and keep the cameras stationary.
#
# Camera coords: x,y with pixels, z forward
# Board coords: x,y in-plane. z forward (i.e. back towards the camera)
# The center of the board is at the origin (ignoring warping)
board_center = \
np.array(( (object_width_n -1)*object_spacing/2.,
(object_height_n-1)*object_spacing/2.,
0 ))
# shape: (Nh,Nw,3)
board_reference = \
mrcal.ref_calibration_object(object_width_n,object_height_n,
object_spacing,calobject_warp) - \
board_center
# Transformation from the board returned by ref_calibration_object() to
# the one I use here. It's a shift to move the origin to the center of the
# board
Rt_boardref_origboardref = mrcal.identity_Rt()
Rt_boardref_origboardref[3,:] = -board_center
def get_observation_chunk():
'''Make Nframes observations, and return them all, even the out-of-view ones'''
# I compute the full random block in one shot. This is useful for
# simulations that want to see identical poses when asking for N-1
# random poses and when asking for the first N-1 of a set of N random
# poses
# shape (Nframes,6)
randomblock = np.random.uniform(low=-1.0, high=1.0, size=(Nframes,6))
# shape(Nframes,4,3)
Rt_ref_boardref = \
mrcal.Rt_from_rt( rt_ref_boardcenter + randomblock * rt_ref_boardcenter__noiseradius )
# shape = (Nframes, Nh,Nw,3)
boards_ref = mrcal.transform_point_Rt( # shape (Nframes, 1,1,4,3)
nps.mv(Rt_ref_boardref, 0, -5),
# shape ( Nh,Nw,3)
board_reference )
# I project full_board. Shape: (Nframes,Ncameras,Nh,Nw,2)
q = \
nps.mv( \
nps.cat( \
*[ mrcal.project( mrcal.transform_point_Rt(models[i].extrinsics_Rt_fromref(), boards_ref),
*models[i].intrinsics()) \
for i in range(Ncameras) ]),
0,1 )
return q,Rt_ref_boardref
def cull_out_of_view(q,Rt_ref_boardref,
which):
# q has shape (Nframes,Ncameras,Nh,Nw,2)
# Rt_ref_boardref has shape (Nframes,4,3)
# I pick only those frames where at least one cameras sees the whole
# board
# shape (Nframes,Ncameras,Nh,Nw)
mask_visible = (q[..., 0] >= 0) * (q[..., 1] >= 0)
for i in range(Ncameras):
W,H = models[i].imagersize()
mask_visible[:,i,...] *= \
(q[:,i,:,:,0] <= W-1) * \
(q[:,i,:,:,1] <= H-1)
# shape (Nframes, Ncameras)
Nvisible = np.count_nonzero(mask_visible, axis=(-1,-2) )
Nh,Nw = q.shape[2:4]
if which == 'all-cameras-must-see-full-board':
iframe = np.all(Nvisible == Nh*Nw, axis=-1)
elif which == 'some-cameras-must-see-full-board':
iframe = np.any(Nvisible == Nh*Nw, axis=-1)
elif which == 'all-cameras-must-see-half-board':
iframe = np.all(Nvisible > Nh*Nw//2, axis=-1)
elif which == 'some-cameras-must-see-half-board':
iframe = np.any(Nvisible > Nh*Nw//2, axis=-1)
else:
raise Exception("Unknown 'which' argument. This is a bug. I checked for the valid options at the top of this function")
# q has shape (Nframes_inview,Ncameras,Nh*Nw,2)
# Rt_ref_boardref has shape (Nframes_inview,4,3)
return q[iframe, ...], Rt_ref_boardref[iframe, ...]
# shape (Nframes_sofar,Ncameras,Nh,Nw,2)
q = np.zeros((0,
Ncameras,
object_height_n,object_width_n,
2),
dtype=float)
# shape (Nframes_sofar,4,3)
Rt_ref_boardref = np.zeros((0,4,3), dtype=float)
# I keep creating data, until I get Nframes-worth of in-view observations
while True:
q_here, Rt_ref_boardref_here = get_observation_chunk()
q_here, Rt_ref_boardref_here = \
cull_out_of_view(q_here, Rt_ref_boardref_here,
which)
q = nps.glue(q, q_here, axis=-5)
Rt_ref_boardref = nps.glue(Rt_ref_boardref, Rt_ref_boardref_here, axis=-3)
if q.shape[0] >= Nframes:
q = q [:Nframes,...]
Rt_ref_boardref = Rt_ref_boardref[:Nframes,...]
break
return q, mrcal.compose_Rt(Rt_ref_boardref, Rt_boardref_origboardref)
def _noisy_observation_vectors_for_triangulation(p,
Rt01,
intrinsics0, intrinsics1,
Nsamples, sigma):
# p has shape (...,3)
# shape (..., 2)
q0 = mrcal.project( p,
*intrinsics0 )
q1 = mrcal.project( mrcal.transform_point_Rt( mrcal.invert_Rt(Rt01), p),
*intrinsics1 )
# shape (..., 1,2). Each has x,y
q0 = nps.dummy(q0,-2)
q1 = nps.dummy(q1,-2)
q_noise = np.random.randn(*p.shape[:-1], Nsamples,2,2) * sigma
# shape (..., Nsamples,2). Each has x,y
q0_noise = q_noise[...,:,0,:]
q1_noise = q_noise[...,:,1,:]
q0_noisy = q0 + q0_noise
q1_noisy = q1 + q1_noise
# shape (..., Nsamples, 3)
v0local_noisy = mrcal.unproject( q0_noisy, *intrinsics0 )
v1local_noisy = mrcal.unproject( q1_noisy, *intrinsics1 )
v0_noisy = v0local_noisy
v1_noisy = mrcal.rotate_point_R(Rt01[:3,:], v1local_noisy)
# All have shape (..., Nsamples,3)
return \
v0local_noisy, v1local_noisy, v0_noisy,v1_noisy, \
q0,q1, q0_noisy, q1_noisy
| [
"mrcal.identity_Rt",
"numpy.count_nonzero",
"numpy.array",
"mrcal.ref_calibration_object",
"mrcal.rotate_point_R",
"numpy.arange",
"numpysane.mv",
"mrcal.unproject",
"mrcal.Rt_from_rt",
"numpysane.glue",
"numpysane.cat",
"mrcal.compose_Rt",
"numpy.any",
"mrcal.project",
"numpysane.dummy"... | [((12096, 12205), 'numpy.array', 'np.array', (['((object_width_n - 1) * object_spacing / 2.0, (object_height_n - 1) *\n object_spacing / 2.0, 0)'], {}), '(((object_width_n - 1) * object_spacing / 2.0, (object_height_n - 1\n ) * object_spacing / 2.0, 0))\n', (12104, 12205), True, 'import numpy as np\n'), ((12637, 12656), 'mrcal.identity_Rt', 'mrcal.identity_Rt', ([], {}), '()\n', (12654, 12656), False, 'import mrcal\n'), ((15611, 15683), 'numpy.zeros', 'np.zeros', (['(0, Ncameras, object_height_n, object_width_n, 2)'], {'dtype': 'float'}), '((0, Ncameras, object_height_n, object_width_n, 2), dtype=float)\n', (15619, 15683), True, 'import numpy as np\n'), ((15808, 15840), 'numpy.zeros', 'np.zeros', (['(0, 4, 3)'], {'dtype': 'float'}), '((0, 4, 3), dtype=float)\n', (15816, 15840), True, 'import numpy as np\n'), ((16816, 16846), 'mrcal.project', 'mrcal.project', (['p', '*intrinsics0'], {}), '(p, *intrinsics0)\n', (16829, 16846), False, 'import mrcal\n'), ((17036, 17053), 'numpysane.dummy', 'nps.dummy', (['q0', '(-2)'], {}), '(q0, -2)\n', (17045, 17053), True, 'import numpysane as nps\n'), ((17062, 17079), 'numpysane.dummy', 'nps.dummy', (['q1', '(-2)'], {}), '(q1, -2)\n', (17071, 17079), True, 'import numpysane as nps\n'), ((17370, 17409), 'mrcal.unproject', 'mrcal.unproject', (['q0_noisy', '*intrinsics0'], {}), '(q0_noisy, *intrinsics0)\n', (17385, 17409), False, 'import mrcal\n'), ((17432, 17471), 'mrcal.unproject', 'mrcal.unproject', (['q1_noisy', '*intrinsics1'], {}), '(q1_noisy, *intrinsics1)\n', (17447, 17471), False, 'import mrcal\n'), ((17528, 17576), 'mrcal.rotate_point_R', 'mrcal.rotate_point_R', (['Rt01[:3, :]', 'v1local_noisy'], {}), '(Rt01[:3, :], v1local_noisy)\n', (17548, 17576), False, 'import mrcal\n'), ((4317, 4342), 'numpy.arange', 'np.arange', (['W'], {'dtype': 'float'}), '(W, dtype=float)\n', (4326, 4342), True, 'import numpy as np\n'), ((4343, 4368), 'numpy.arange', 'np.arange', (['H'], {'dtype': 'float'}), '(H, dtype=float)\n', (4352, 4368), True, 'import numpy as np\n'), ((4455, 4474), 'numpy.zeros', 'np.zeros', (['(H, W, 1)'], {}), '((H, W, 1))\n', (4463, 4474), True, 'import numpy as np\n'), ((12284, 12381), 'mrcal.ref_calibration_object', 'mrcal.ref_calibration_object', (['object_width_n', 'object_height_n', 'object_spacing', 'calobject_warp'], {}), '(object_width_n, object_height_n,\n object_spacing, calobject_warp)\n', (12312, 12381), False, 'import mrcal\n'), ((13124, 13180), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1.0)', 'high': '(1.0)', 'size': '(Nframes, 6)'}), '(low=-1.0, high=1.0, size=(Nframes, 6))\n', (13141, 13180), True, 'import numpy as np\n'), ((13250, 13338), 'mrcal.Rt_from_rt', 'mrcal.Rt_from_rt', (['(rt_ref_boardcenter + randomblock * rt_ref_boardcenter__noiseradius)'], {}), '(rt_ref_boardcenter + randomblock *\n rt_ref_boardcenter__noiseradius)\n', (13266, 13338), False, 'import mrcal\n'), ((14689, 14734), 'numpy.count_nonzero', 'np.count_nonzero', (['mask_visible'], {'axis': '(-1, -2)'}), '(mask_visible, axis=(-1, -2))\n', (14705, 14734), True, 'import numpy as np\n'), ((16147, 16175), 'numpysane.glue', 'nps.glue', (['q', 'q_here'], {'axis': '(-5)'}), '(q, q_here, axis=-5)\n', (16155, 16175), True, 'import numpysane as nps\n'), ((16202, 16258), 'numpysane.glue', 'nps.glue', (['Rt_ref_boardref', 'Rt_ref_boardref_here'], {'axis': '(-3)'}), '(Rt_ref_boardref, Rt_ref_boardref_here, axis=-3)\n', (16210, 16258), True, 'import numpysane as nps\n'), ((16447, 16506), 'mrcal.compose_Rt', 'mrcal.compose_Rt', (['Rt_ref_boardref', 'Rt_boardref_origboardref'], {}), '(Rt_ref_boardref, Rt_boardref_origboardref)\n', (16463, 16506), False, 'import mrcal\n'), ((17094, 17140), 'numpy.random.randn', 'np.random.randn', (['*p.shape[:-1]', 'Nsamples', '(2)', '(2)'], {}), '(*p.shape[:-1], Nsamples, 2, 2)\n', (17109, 17140), True, 'import numpy as np\n'), ((4404, 4419), 'numpysane.cat', 'nps.cat', (['xx', 'yy'], {}), '(xx, yy)\n', (4411, 4419), True, 'import numpysane as nps\n'), ((13496, 13526), 'numpysane.mv', 'nps.mv', (['Rt_ref_boardref', '(0)', '(-5)'], {}), '(Rt_ref_boardref, 0, -5)\n', (13502, 13526), True, 'import numpysane as nps\n'), ((14843, 14879), 'numpy.all', 'np.all', (['(Nvisible == Nh * Nw)'], {'axis': '(-1)'}), '(Nvisible == Nh * Nw, axis=-1)\n', (14849, 14879), True, 'import numpy as np\n'), ((16923, 16944), 'mrcal.invert_Rt', 'mrcal.invert_Rt', (['Rt01'], {}), '(Rt01)\n', (16938, 16944), False, 'import mrcal\n'), ((14957, 14993), 'numpy.any', 'np.any', (['(Nvisible == Nh * Nw)'], {'axis': '(-1)'}), '(Nvisible == Nh * Nw, axis=-1)\n', (14963, 14993), True, 'import numpy as np\n'), ((15070, 15110), 'numpy.all', 'np.all', (['(Nvisible > Nh * Nw // 2)'], {'axis': '(-1)'}), '(Nvisible > Nh * Nw // 2, axis=-1)\n', (15076, 15110), True, 'import numpy as np\n'), ((15186, 15226), 'numpy.any', 'np.any', (['(Nvisible > Nh * Nw // 2)'], {'axis': '(-1)'}), '(Nvisible > Nh * Nw // 2, axis=-1)\n', (15192, 15226), True, 'import numpy as np\n')] |
#Trabalhando com sorted no Pandas
import pandas as pd
import numpy as np
unsorted_df = pd.DataFrame(np.random.randn(10,2),index=[1,4,6,2,3,5,9,8,0,7],columns = ['col2','col1'])
sorted_df=unsorted_df.sort_index()
print (sorted_df) | [
"numpy.random.randn"
] | [((100, 122), 'numpy.random.randn', 'np.random.randn', (['(10)', '(2)'], {}), '(10, 2)\n', (115, 122), True, 'import numpy as np\n')] |
import os
import sys
import argparse
import tensorflow as tf
import numpy as np
from PIL import Image
from reader import Reader
from source.anchor_filter import AnchorFilter
import logging
import random
import time
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
class VS3D(object):
def __init__(self, kitti,
train_set, val_set,
is_training=False,
mini_batch_size=[1024, 128]):
self.reader = Reader(kitti)
self.anchor_filter = AnchorFilter()
self.is_training = is_training
self.mini_batch_size = mini_batch_size
with open(train_set) as f:
indices = f.readlines()
self.train_indices = \
[index.strip() for index in indices]
self.train_indices.sort()
with open(val_set) as f:
indices = f.readlines()
self.val_indices = \
[index.strip() for index in indices]
self.val_indices.sort()
self.endpoint = self.build()
return
def random_keep(self, mask, num_keep):
curr_num = tf.reduce_sum(
tf.cast(mask, tf.float32))
keep_ratio = tf.divide(num_keep,
curr_num + 1.0)
rand_select = tf.random.uniform(
shape=tf.shape(mask),
minval=0,
maxval=1)
keep = tf.less(rand_select, keep_ratio)
mask = tf.logical_and(mask, keep)
return mask
def balance_pos_neg(self, scores, num_keep,
pos_thres=0.7,
neg_thres=0.4):
num_keep_pos = num_keep // 2
num_keep_neg = num_keep // 2
all_pos = tf.greater(scores, pos_thres)
select_pos = self.random_keep(all_pos,
num_keep_pos)
all_neg = tf.less(scores, neg_thres)
select_neg = self.random_keep(all_neg,
num_keep_neg)
select = tf.logical_or(select_pos, select_neg)
return select
def mask_out(self, mask, tensors):
masked_tensors = [tf.boolean_mask(tensor, mask) \
for tensor in tensors]
return masked_tensors
def build(self):
endpoint = {}
placeholder = {}
placeholder['sphere_map'] = tf.placeholder(
shape=[64, 512, 5],
dtype=tf.float32)
placeholder['input_image'] = tf.placeholder(
shape=[384, 1248, 3],
dtype=tf.float32)
placeholder['image_size'] = tf.placeholder(
shape=[2],
dtype=tf.float32)
placeholder['plane'] = tf.placeholder(
shape=[4],
dtype=tf.float32)
placeholder['velo_to_cam'] = tf.placeholder(
shape=[4, 4],
dtype=tf.float32)
placeholder['cam_to_img'] = tf.placeholder(
shape=[3, 4],
dtype=tf.float32)
placeholder['cam_to_velo'] = tf.placeholder(
shape=[4, 4],
dtype=tf.float32)
xyz, ranges, density = tf.split(
placeholder['sphere_map'],
[3, 1, 1], axis=-1)
anchor_centers, scores_init = \
self.anchor_filter.filt(xyz, placeholder['plane'],
placeholder['cam_to_velo'])
endpoint['anchor_centers'] = anchor_centers
endpoint['scores_init'] = scores_init
mask = tf.greater(scores_init, 0.9 if self.is_training else 0.8)
if self.is_training:
mask = self.random_keep(mask,
self.mini_batch_size[0])
bottom_centers, rotation, class_prob, full_prob = \
self.anchor_filter.filt_image(
placeholder['input_image'],
placeholder['plane'],
placeholder['cam_to_img'],
placeholder['image_size'],
mask)
if self.is_training:
mask_balance = self.balance_pos_neg(
class_prob,
self.mini_batch_size[1])
bottom_centers, rotation, class_prob, full_prob = \
self.mask_out(mask_balance,
[bottom_centers, rotation, class_prob, full_prob])
endpoint['bottom_centers'] = bottom_centers
endpoint['rotation'] = rotation
endpoint['class_prob'] = class_prob
endpoint['full_prob'] = full_prob
[_, rotation_lidar, rot_vect_lidar,
class_prob_lidar, mask_prob_lidar] = \
self.anchor_filter.filt_lidar(
placeholder['sphere_map'],
placeholder['plane'],
placeholder['cam_to_velo'],
placeholder['velo_to_cam'],
mask)
if self.is_training:
[_, rotation_lidar, rot_vect_lidar,
class_prob_lidar, mask_prob_lidar] = \
self.mask_out(mask_balance,
[_, rotation_lidar, rot_vect_lidar,
class_prob_lidar, mask_prob_lidar])
endpoint['rotation_lidar'] = rotation_lidar
endpoint['class_prob_lidar'] = class_prob_lidar
endpoint['rot_vect_lidar'] = rot_vect_lidar
endpoint['mask_prob_lidar'] = mask_prob_lidar
bottom_centers_aligned, point_cloud_density = \
self.anchor_filter.points_alignment(
xyz,
bottom_centers,
rotation,
placeholder['velo_to_cam'],
placeholder['cam_to_velo'])
endpoint['bottom_centers_aligned'] = bottom_centers_aligned
endpoint['point_cloud_density'] = point_cloud_density
bottom_centers_aligned_lidar, point_cloud_density_lidar = \
self.anchor_filter.points_alignment(
xyz,
bottom_centers,
rotation_lidar,
placeholder['velo_to_cam'],
placeholder['cam_to_velo'])
endpoint['bottom_centers_aligned_lidar'] = bottom_centers_aligned_lidar
endpoint['point_cloud_density_lidar'] = point_cloud_density_lidar
rotation_aligned = self.anchor_filter.rotation_align(
xyz,
bottom_centers,
rotation,
placeholder['velo_to_cam'],
placeholder['cam_to_velo'])
endpoint['rotation_aligned'] = rotation_aligned
instance_points, instance_mask = \
self.anchor_filter.instance_mask(
xyz,
bottom_centers_aligned,
rotation,
placeholder['velo_to_cam'],
placeholder['cam_to_velo'])
endpoint['instance_points'] = instance_points
endpoint['instance_mask'] = instance_mask
instance_points_lidar, instance_mask_lidar = \
self.anchor_filter.instance_mask(
xyz,
bottom_centers_aligned_lidar,
rotation_lidar,
placeholder['velo_to_cam'],
placeholder['cam_to_velo'])
endpoint['instance_points_lidar'] = instance_points_lidar
endpoint['instance_mask_lidar'] = instance_mask_lidar
endpoint['mask_prob_lidar'] = mask_prob_lidar
nms_indices = self.anchor_filter.nms_image(
bottom_centers,
rotation,
tf.minimum(class_prob, full_prob),
placeholder['cam_to_img'],
placeholder['image_size'])
endpoint['nms_indices'] = nms_indices
nms_indices_lidar = self.anchor_filter.nms_image(
bottom_centers,
rotation_lidar,
class_prob_lidar,
placeholder['cam_to_img'],
placeholder['image_size'])
endpoint['nms_indices_lidar'] = nms_indices_lidar
class_loss, rot_loss, mask_loss, rot_error = \
self.anchor_filter.build_loss(
rotation, rot_vect_lidar, rotation_lidar,
tf.minimum(class_prob, full_prob),
class_prob_lidar, instance_mask, mask_prob_lidar)
endpoint['class_loss'] = class_loss
endpoint['rot_loss'] = rot_loss
endpoint['mask_loss'] = mask_loss
endpoint['rot_error'] = rot_error
self.placeholder = placeholder
return endpoint
def to_kitti_line(self, bbox, center,
size, rotation, score):
kitti_line = 'Car -1 -1 -10 ' + \
'{:.2f} {:.2f} {:.2f} {:.2f} '.format(
bbox[0], bbox[1], bbox[2], bbox[3]) + \
'{:.2f} {:.2f} {:.2f} '.format(
size[0], size[1], size[2]) + \
'{:.2f} {:.2f} {:.2f} '.format(
center[0], center[1], center[2]) + \
'{:.2f} {:.2f} \n'.format(
rotation, score)
return kitti_line
def to_bbox(self, center,
size, rotation,
cam_to_img, image_size):
R = np.array([[+np.cos(rotation), 0, +np.sin(rotation)],
[ 0, 1, 0],
[-np.sin(rotation), 0, +np.cos(rotation)]],
dtype=np.float32)
h, w, l = size
x_corners = [l/2, l/2, -l/2, -l/2, l/2, l/2, -l/2, -l/2]
y_corners = [0,0,0,0,-h,-h,-h,-h]
z_corners = [w/2, -w/2, -w/2, w/2, w/2, -w/2, -w/2, w/2]
corners = np.dot(R, [x_corners, y_corners, z_corners])
corners = corners + center.reshape((3, 1))
projection = np.dot(cam_to_img, np.vstack([corners,
np.ones(8, dtype=np.float32)]))
projection = (projection / projection[2])[:2]
left = max(np.amin(projection[0]), 0)
right = min(np.amax(projection[0]), image_size[1])
top = max(np.amin(projection[1]), 0)
bottom = min(np.amax(projection[1]), image_size[0])
return [left, top, right, bottom]
def train(self,
model_image,
model_lidar=None,
save_dir='./runs/weights',
steps=160000,
learning_rate_init=1e-4,
l2_weight=1e-5,
clip_grads=False,
clip_grads_norm=2.0,
display_step=200,
save_step=2000):
class_loss = self.endpoint['class_loss']
rot_loss = self.endpoint['rot_loss'] * 5.0
mask_loss = self.endpoint['mask_loss'] * 2.0
rot_error = self.endpoint['rot_error']
global_step = tf.placeholder(shape=(), dtype=tf.int32)
learning_rate = tf.train.exponential_decay(
learning_rate_init, global_step,
120000, 0.2, staircase=False)
weight_loss = [tf.nn.l2_loss(var) for var \
in tf.trainable_variables()]
weight_loss = tf.reduce_sum(weight_loss) * l2_weight
total_loss = weight_loss + class_loss + rot_loss + mask_loss
opt = tf.train.AdamOptimizer(learning_rate=learning_rate)
all_vars = tf.get_collection_ref(
tf.GraphKeys.GLOBAL_VARIABLES)
var_list_image = \
[var for var in all_vars if "lidar" not in var.name]
var_list_lidar = \
[var for var in all_vars if "lidar" in var.name]
if clip_grads:
grads_and_vars = opt.compute_gradients(total_loss,
var_list_lidar)
grads, tvars = zip(*grads_and_vars)
clipped_grads, norm = tf.clip_by_global_norm(
grads, clip_grads_norm)
grads_and_vars = zip(clipped_grads, tvars)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = opt.apply_gradients(grads_and_vars)
else:
train_op = tf.train.AdamOptimizer(
learning_rate=learning_rate
).minimize(total_loss, var_list=var_list_lidar)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
saver_image = tf.train.Saver(var_list=var_list_image)
saver_lidar = tf.train.Saver(var_list=var_list_lidar)
saver_image.restore(sess, model_image)
if model_lidar:
saver_lidar.restore(sess, model_lidar)
rot_loss_np_list = []
class_loss_np_list = []
mask_loss_np_list = []
rot_error_np_list = []
for step in range(steps):
index = random.choice(self.train_indices)
data = self.reader.data[index]
sphere_map = np.load(open(
data['sphere_path'], 'rb'))
image_pil = Image.open(
data['image_path'])
width, height = image_pil.size
image_size = np.array([height, width],
dtype=np.float32)
image_np = np.array(image_pil.resize((1248, 384)),
dtype=np.float32)
cam_to_img = data['P2']
plane = data['plane']
velo_to_cam = np.dot(data['R0'], data['Tr'])
cam_to_velo = np.linalg.inv(velo_to_cam)
placeholder = self.placeholder
_, weight_loss_np, class_loss_np, \
rot_loss_np, mask_loss_np, rot_error_np, debug_np = \
sess.run([train_op, weight_loss,
class_loss, rot_loss,
mask_loss, rot_error,
tf.get_collection('debug')],
feed_dict={
placeholder['sphere_map']: sphere_map,
placeholder['plane']: plane,
placeholder['velo_to_cam']: velo_to_cam,
placeholder['cam_to_velo']: cam_to_velo,
placeholder['input_image']: image_np,
placeholder['image_size']: image_size,
placeholder['cam_to_img']: cam_to_img,
global_step: step})
rot_loss_np_list.append(rot_loss_np)
class_loss_np_list.append(class_loss_np)
mask_loss_np_list.append(mask_loss_np)
rot_error_np_list.append(rot_error_np)
if step % display_step == 0:
logging.info(
'Step: {} / {}, '.format(step, steps) + \
'Loss Weight: {:.3f}, '.format(weight_loss_np) + \
'Class: {:.3f}, '.format(np.mean(class_loss_np_list)) + \
'Rotation: {:.3f}, '.format(np.mean(rot_loss_np_list)) + \
'Mask: {:.3f}, '.format(np.mean(mask_loss_np_list)) + \
'Rot Error: {:.3f}'.format(np.mean(rot_error_np_list)))
rot_loss_np_list = []
class_loss_np_list = []
mask_loss_np_list = []
rot_error_np_list= []
if not os.path.exists(save_dir):
os.mkdir(save_dir)
if step % save_step == 0:
saver_lidar.save(sess, os.path.join(save_dir,
'model_lidar_{}'.format(str(step).zfill(6))))
def run(self, score_thres=0.05,
density_thres=0.1, save_dir=None,
image_model=None, lidar_model=None,
return_pred=False, max_pred_frames=np.inf):
with tf.Session() as sess:
all_vars = tf.get_collection_ref(
tf.GraphKeys.GLOBAL_VARIABLES)
var_list_image = \
[var for var in all_vars if "lidar" not in var.name]
var_list_lidar = \
[var for var in all_vars if "lidar" in var.name]
assert len(all_vars) == len(var_list_image + var_list_lidar)
if image_model and not lidar_model:
saver = tf.train.Saver(var_list=var_list_image)
saver.restore(sess, image_model)
rotation_tf = self.endpoint['rotation']
centers_tf = self.endpoint['bottom_centers']
centers_aligned_tf = self.endpoint['bottom_centers_aligned']
scores_tf = tf.minimum(self.endpoint['class_prob'],
self.endpoint['full_prob'])
nms_indices_tf = self.endpoint['nms_indices']
point_cloud_density_tf = self.endpoint['point_cloud_density']
instance_points_tf = self.endpoint['instance_points']
instance_mask_tf = self.endpoint['instance_mask']
elif not image_model and lidar_model:
saver = tf.train.Saver(var_list=var_list_lidar)
saver.restore(sess, lidar_model)
rotation_tf = self.endpoint['rotation_lidar']
centers_tf = self.endpoint['bottom_centers']
centers_aligned_tf = self.endpoint['bottom_centers_aligned_lidar']
scores_tf = self.endpoint['class_prob_lidar']
nms_indices_tf = self.endpoint['nms_indices_lidar']
point_cloud_density_tf = self.endpoint['point_cloud_density_lidar']
instance_points_tf = self.endpoint['instance_points_lidar']
instance_mask_tf = self.endpoint['instance_mask_lidar']
elif image_model and lidar_model:
saver = tf.train.Saver(var_list=var_list_image)
saver.restore(sess, image_model)
saver = tf.train.Saver(var_list=var_list_lidar)
saver.restore(sess, lidar_model)
else:
raise Exception('Image or LiDAR model must be provided!')
out_tf = [rotation_tf, centers_tf, centers_aligned_tf,
scores_tf, nms_indices_tf, point_cloud_density_tf,
instance_points_tf, instance_mask_tf]
bbox_list = []
mask_list = []
index_list = []
total_time = []
for iindex, index in enumerate(self.val_indices):
if iindex == max_pred_frames:
break
logging.info('Inference {}'.format(index))
data = self.reader.data[index]
sphere_map = np.load(open(
data['sphere_path'], 'rb'))
image_pil = Image.open(
data['image_path'])
width, height = image_pil.size
image_size = np.array([height, width],
dtype=np.float32)
image_np = np.array(image_pil.resize((1248, 384)),
dtype=np.float32)
cam_to_img = data['P2']
plane = data['plane']
velo_to_cam = np.dot(data['R0'], data['Tr'])
cam_to_velo = np.linalg.inv(velo_to_cam)
placeholder = self.placeholder
start_time = time.time()
out_np = sess.run(out_tf,
feed_dict={
placeholder['sphere_map']: sphere_map,
placeholder['plane']: plane,
placeholder['velo_to_cam']: velo_to_cam,
placeholder['cam_to_velo']: cam_to_velo,
placeholder['input_image']: image_np,
placeholder['image_size']: image_size,
placeholder['cam_to_img']: cam_to_img})
total_time.append(time.time() - start_time)
[rotation_np, centers_np, centers_aligned_np,
scores_np, nms_indices_np, point_cloud_density_np,
instance_points_np, instance_mask_np] = out_np
if iindex % 300 == 0:
logging.info('Forward time: {:.3f}s, STD: {:.3f}s'.format(np.mean(total_time), np.std(total_time)))
total_time = []
kitti_lines = []
instance_points_masked = []
for aind in nms_indices_np:
score = scores_np[aind]
density = point_cloud_density_np[aind]
if score < score_thres or density < density_thres:
continue
bbox = self.to_bbox(center=centers_aligned_np[aind],
size=[1.45, 1.55, 4.00],
rotation=rotation_np[aind],
cam_to_img=cam_to_img,
image_size=image_size)
kitti_line = self.to_kitti_line(
bbox=bbox,
center=centers_aligned_np[aind],
size=[1.45, 1.55, 4.00],
rotation=rotation_np[aind],
score=score)
kitti_lines.append(kitti_line)
instance_points_masked.append(
instance_points_np[aind] * instance_mask_np[aind])
if not os.path.exists(save_dir):
os.makedirs(os.path.join(save_dir, 'bbox'))
os.makedirs(os.path.join(save_dir, 'mask'))
with open(os.path.join(save_dir,
'bbox',
index+'.txt'), 'w') as f:
f.writelines(kitti_lines)
f.close()
with open(os.path.join(save_dir,
'mask',
index+'.npy'), 'wb') as f:
np.save(f, instance_points_masked)
f.close()
if return_pred:
bbox_list.append(kitti_lines)
mask_list.append(instance_points_masked)
index_list.append(index)
if return_pred:
return bbox_list, mask_list, index_list, self.reader
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--mode',
type=str,
required=True,
help='train or evaluate')
parser.add_argument('--teacher_model',
type=str,
default='../data/pretrained/teacher/iter_158000',
help='required in training.')
parser.add_argument('--student_model',
type=str,
default=None,
help='required in testing and optional in training.')
parser.add_argument('--gpu',
type=str,
default='0',
help='GPU to use.')
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
if args.mode == 'train':
vs3d = VS3D(kitti='../data/kitti/training',
train_set='../data/kitti/train.txt',
val_set='../data/kitti/val.txt',
is_training=True)
vs3d.train(model_image=args.teacher_model)
elif args.mode == 'evaluate':
vs3d = VS3D(kitti='../data/kitti/training',
train_set='../data/kitti/train.txt',
val_set='../data/kitti/val.txt',
is_training=False)
vs3d.run(save_dir='../output',
lidar_model=args.student_model)
else:
raise NotImplementedError | [
"tensorflow.shape",
"tensorflow.get_collection_ref",
"tensorflow.boolean_mask",
"tensorflow.reduce_sum",
"tensorflow.split",
"numpy.array",
"tensorflow.control_dependencies",
"numpy.sin",
"tensorflow.cast",
"tensorflow.clip_by_global_norm",
"numpy.save",
"os.path.exists",
"numpy.mean",
"ar... | [((215, 273), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'stream': 'sys.stdout'}), '(level=logging.INFO, stream=sys.stdout)\n', (234, 273), False, 'import logging\n'), ((23911, 23936), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (23934, 23936), False, 'import argparse\n'), ((471, 484), 'reader.Reader', 'Reader', (['kitti'], {}), '(kitti)\n', (477, 484), False, 'from reader import Reader\n'), ((514, 528), 'source.anchor_filter.AnchorFilter', 'AnchorFilter', ([], {}), '()\n', (526, 528), False, 'from source.anchor_filter import AnchorFilter\n'), ((1207, 1242), 'tensorflow.divide', 'tf.divide', (['num_keep', '(curr_num + 1.0)'], {}), '(num_keep, curr_num + 1.0)\n', (1216, 1242), True, 'import tensorflow as tf\n'), ((1441, 1473), 'tensorflow.less', 'tf.less', (['rand_select', 'keep_ratio'], {}), '(rand_select, keep_ratio)\n', (1448, 1473), True, 'import tensorflow as tf\n'), ((1489, 1515), 'tensorflow.logical_and', 'tf.logical_and', (['mask', 'keep'], {}), '(mask, keep)\n', (1503, 1515), True, 'import tensorflow as tf\n'), ((1756, 1785), 'tensorflow.greater', 'tf.greater', (['scores', 'pos_thres'], {}), '(scores, pos_thres)\n', (1766, 1785), True, 'import tensorflow as tf\n'), ((1904, 1930), 'tensorflow.less', 'tf.less', (['scores', 'neg_thres'], {}), '(scores, neg_thres)\n', (1911, 1930), True, 'import tensorflow as tf\n'), ((2047, 2084), 'tensorflow.logical_or', 'tf.logical_or', (['select_pos', 'select_neg'], {}), '(select_pos, select_neg)\n', (2060, 2084), True, 'import tensorflow as tf\n'), ((2391, 2443), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[64, 512, 5]', 'dtype': 'tf.float32'}), '(shape=[64, 512, 5], dtype=tf.float32)\n', (2405, 2443), True, 'import tensorflow as tf\n'), ((2542, 2596), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[384, 1248, 3]', 'dtype': 'tf.float32'}), '(shape=[384, 1248, 3], dtype=tf.float32)\n', (2556, 2596), True, 'import tensorflow as tf\n'), ((2694, 2737), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[2]', 'dtype': 'tf.float32'}), '(shape=[2], dtype=tf.float32)\n', (2708, 2737), True, 'import tensorflow as tf\n'), ((2831, 2874), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[4]', 'dtype': 'tf.float32'}), '(shape=[4], dtype=tf.float32)\n', (2845, 2874), True, 'import tensorflow as tf\n'), ((2973, 3019), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[4, 4]', 'dtype': 'tf.float32'}), '(shape=[4, 4], dtype=tf.float32)\n', (2987, 3019), True, 'import tensorflow as tf\n'), ((3117, 3163), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[3, 4]', 'dtype': 'tf.float32'}), '(shape=[3, 4], dtype=tf.float32)\n', (3131, 3163), True, 'import tensorflow as tf\n'), ((3262, 3308), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[4, 4]', 'dtype': 'tf.float32'}), '(shape=[4, 4], dtype=tf.float32)\n', (3276, 3308), True, 'import tensorflow as tf\n'), ((3401, 3456), 'tensorflow.split', 'tf.split', (["placeholder['sphere_map']", '[3, 1, 1]'], {'axis': '(-1)'}), "(placeholder['sphere_map'], [3, 1, 1], axis=-1)\n", (3409, 3456), True, 'import tensorflow as tf\n'), ((3796, 3853), 'tensorflow.greater', 'tf.greater', (['scores_init', '(0.9 if self.is_training else 0.8)'], {}), '(scores_init, 0.9 if self.is_training else 0.8)\n', (3806, 3853), True, 'import tensorflow as tf\n'), ((10609, 10653), 'numpy.dot', 'np.dot', (['R', '[x_corners, y_corners, z_corners]'], {}), '(R, [x_corners, y_corners, z_corners])\n', (10615, 10653), True, 'import numpy as np\n'), ((11717, 11757), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '()', 'dtype': 'tf.int32'}), '(shape=(), dtype=tf.int32)\n', (11731, 11757), True, 'import tensorflow as tf\n'), ((11782, 11875), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['learning_rate_init', 'global_step', '(120000)', '(0.2)'], {'staircase': '(False)'}), '(learning_rate_init, global_step, 120000, 0.2,\n staircase=False)\n', (11808, 11875), True, 'import tensorflow as tf\n'), ((12186, 12237), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (12208, 12237), True, 'import tensorflow as tf\n'), ((12257, 12309), 'tensorflow.get_collection_ref', 'tf.get_collection_ref', (['tf.GraphKeys.GLOBAL_VARIABLES'], {}), '(tf.GraphKeys.GLOBAL_VARIABLES)\n', (12278, 12309), True, 'import tensorflow as tf\n'), ((1159, 1184), 'tensorflow.cast', 'tf.cast', (['mask', 'tf.float32'], {}), '(mask, tf.float32)\n', (1166, 1184), True, 'import tensorflow as tf\n'), ((2173, 2202), 'tensorflow.boolean_mask', 'tf.boolean_mask', (['tensor', 'mask'], {}), '(tensor, mask)\n', (2188, 2202), True, 'import tensorflow as tf\n'), ((8355, 8388), 'tensorflow.minimum', 'tf.minimum', (['class_prob', 'full_prob'], {}), '(class_prob, full_prob)\n', (8365, 8388), True, 'import tensorflow as tf\n'), ((9107, 9140), 'tensorflow.minimum', 'tf.minimum', (['class_prob', 'full_prob'], {}), '(class_prob, full_prob)\n', (9117, 9140), True, 'import tensorflow as tf\n'), ((10905, 10927), 'numpy.amin', 'np.amin', (['projection[0]'], {}), '(projection[0])\n', (10912, 10927), True, 'import numpy as np\n'), ((10953, 10975), 'numpy.amax', 'np.amax', (['projection[0]'], {}), '(projection[0])\n', (10960, 10975), True, 'import numpy as np\n'), ((11013, 11035), 'numpy.amin', 'np.amin', (['projection[1]'], {}), '(projection[1])\n', (11020, 11035), True, 'import numpy as np\n'), ((11061, 11083), 'numpy.amax', 'np.amax', (['projection[1]'], {}), '(projection[1])\n', (11068, 11083), True, 'import numpy as np\n'), ((11956, 11974), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['var'], {}), '(var)\n', (11969, 11974), True, 'import tensorflow as tf\n'), ((12063, 12089), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['weight_loss'], {}), '(weight_loss)\n', (12076, 12089), True, 'import tensorflow as tf\n'), ((12757, 12803), 'tensorflow.clip_by_global_norm', 'tf.clip_by_global_norm', (['grads', 'clip_grads_norm'], {}), '(grads, clip_grads_norm)\n', (12779, 12803), True, 'import tensorflow as tf\n'), ((12923, 12965), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.UPDATE_OPS'], {}), '(tf.GraphKeys.UPDATE_OPS)\n', (12940, 12965), True, 'import tensorflow as tf\n'), ((13276, 13288), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (13286, 13288), True, 'import tensorflow as tf\n'), ((13381, 13420), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'var_list': 'var_list_image'}), '(var_list=var_list_image)\n', (13395, 13420), True, 'import tensorflow as tf\n'), ((13447, 13486), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'var_list': 'var_list_lidar'}), '(var_list=var_list_lidar)\n', (13461, 13486), True, 'import tensorflow as tf\n'), ((17046, 17058), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (17056, 17058), True, 'import tensorflow as tf\n'), ((17091, 17143), 'tensorflow.get_collection_ref', 'tf.get_collection_ref', (['tf.GraphKeys.GLOBAL_VARIABLES'], {}), '(tf.GraphKeys.GLOBAL_VARIABLES)\n', (17112, 17143), True, 'import tensorflow as tf\n'), ((1340, 1354), 'tensorflow.shape', 'tf.shape', (['mask'], {}), '(mask)\n', (1348, 1354), True, 'import tensorflow as tf\n'), ((12015, 12039), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (12037, 12039), True, 'import tensorflow as tf\n'), ((12984, 13019), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['update_ops'], {}), '(update_ops)\n', (13007, 13019), True, 'import tensorflow as tf\n'), ((13319, 13352), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (13350, 13352), True, 'import tensorflow as tf\n'), ((13826, 13859), 'random.choice', 'random.choice', (['self.train_indices'], {}), '(self.train_indices)\n', (13839, 13859), False, 'import random\n'), ((14039, 14069), 'PIL.Image.open', 'Image.open', (["data['image_path']"], {}), "(data['image_path'])\n", (14049, 14069), False, 'from PIL import Image\n'), ((14180, 14223), 'numpy.array', 'np.array', (['[height, width]'], {'dtype': 'np.float32'}), '([height, width], dtype=np.float32)\n', (14188, 14223), True, 'import numpy as np\n'), ((14483, 14513), 'numpy.dot', 'np.dot', (["data['R0']", "data['Tr']"], {}), "(data['R0'], data['Tr'])\n", (14489, 14513), True, 'import numpy as np\n'), ((14544, 14570), 'numpy.linalg.inv', 'np.linalg.inv', (['velo_to_cam'], {}), '(velo_to_cam)\n', (14557, 14570), True, 'import numpy as np\n'), ((17522, 17561), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'var_list': 'var_list_image'}), '(var_list=var_list_image)\n', (17536, 17561), True, 'import tensorflow as tf\n'), ((17834, 17901), 'tensorflow.minimum', 'tf.minimum', (["self.endpoint['class_prob']", "self.endpoint['full_prob']"], {}), "(self.endpoint['class_prob'], self.endpoint['full_prob'])\n", (17844, 17901), True, 'import tensorflow as tf\n'), ((20025, 20055), 'PIL.Image.open', 'Image.open', (["data['image_path']"], {}), "(data['image_path'])\n", (20035, 20055), False, 'from PIL import Image\n'), ((20166, 20209), 'numpy.array', 'np.array', (['[height, width]'], {'dtype': 'np.float32'}), '([height, width], dtype=np.float32)\n', (20174, 20209), True, 'import numpy as np\n'), ((20469, 20499), 'numpy.dot', 'np.dot', (["data['R0']", "data['Tr']"], {}), "(data['R0'], data['Tr'])\n", (20475, 20499), True, 'import numpy as np\n'), ((20530, 20556), 'numpy.linalg.inv', 'np.linalg.inv', (['velo_to_cam'], {}), '(velo_to_cam)\n', (20543, 20556), True, 'import numpy as np\n'), ((20633, 20644), 'time.time', 'time.time', ([], {}), '()\n', (20642, 20644), False, 'import time\n'), ((10798, 10826), 'numpy.ones', 'np.ones', (['(8)'], {'dtype': 'np.float32'}), '(8, dtype=np.float32)\n', (10805, 10826), True, 'import numpy as np\n'), ((13121, 13172), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (13143, 13172), True, 'import tensorflow as tf\n'), ((16578, 16602), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (16592, 16602), False, 'import os\n'), ((16624, 16642), 'os.mkdir', 'os.mkdir', (['save_dir'], {}), '(save_dir)\n', (16632, 16642), False, 'import os\n'), ((18293, 18332), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'var_list': 'var_list_lidar'}), '(var_list=var_list_lidar)\n', (18307, 18332), True, 'import tensorflow as tf\n'), ((22922, 22946), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (22936, 22946), False, 'import os\n'), ((23513, 23547), 'numpy.save', 'np.save', (['f', 'instance_points_masked'], {}), '(f, instance_points_masked)\n', (23520, 23547), True, 'import numpy as np\n'), ((10183, 10199), 'numpy.cos', 'np.cos', (['rotation'], {}), '(rotation)\n', (10189, 10199), True, 'import numpy as np\n'), ((10205, 10221), 'numpy.sin', 'np.sin', (['rotation'], {}), '(rotation)\n', (10211, 10221), True, 'import numpy as np\n'), ((10314, 10330), 'numpy.sin', 'np.sin', (['rotation'], {}), '(rotation)\n', (10320, 10330), True, 'import numpy as np\n'), ((10336, 10352), 'numpy.cos', 'np.cos', (['rotation'], {}), '(rotation)\n', (10342, 10352), True, 'import numpy as np\n'), ((14986, 15012), 'tensorflow.get_collection', 'tf.get_collection', (['"""debug"""'], {}), "('debug')\n", (15003, 15012), True, 'import tensorflow as tf\n'), ((19022, 19061), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'var_list': 'var_list_image'}), '(var_list=var_list_image)\n', (19036, 19061), True, 'import tensorflow as tf\n'), ((19135, 19174), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'var_list': 'var_list_lidar'}), '(var_list=var_list_lidar)\n', (19149, 19174), True, 'import tensorflow as tf\n'), ((21269, 21280), 'time.time', 'time.time', ([], {}), '()\n', (21278, 21280), False, 'import time\n'), ((22980, 23010), 'os.path.join', 'os.path.join', (['save_dir', '"""bbox"""'], {}), "(save_dir, 'bbox')\n", (22992, 23010), False, 'import os\n'), ((23044, 23074), 'os.path.join', 'os.path.join', (['save_dir', '"""mask"""'], {}), "(save_dir, 'mask')\n", (23056, 23074), False, 'import os\n'), ((23116, 23162), 'os.path.join', 'os.path.join', (['save_dir', '"""bbox"""', "(index + '.txt')"], {}), "(save_dir, 'bbox', index + '.txt')\n", (23128, 23162), False, 'import os\n'), ((23356, 23402), 'os.path.join', 'os.path.join', (['save_dir', '"""mask"""', "(index + '.npy')"], {}), "(save_dir, 'mask', index + '.npy')\n", (23368, 23402), False, 'import os\n'), ((21605, 21624), 'numpy.mean', 'np.mean', (['total_time'], {}), '(total_time)\n', (21612, 21624), True, 'import numpy as np\n'), ((21626, 21644), 'numpy.std', 'np.std', (['total_time'], {}), '(total_time)\n', (21632, 21644), True, 'import numpy as np\n'), ((16353, 16379), 'numpy.mean', 'np.mean', (['rot_error_np_list'], {}), '(rot_error_np_list)\n', (16360, 16379), True, 'import numpy as np\n'), ((16270, 16296), 'numpy.mean', 'np.mean', (['mask_loss_np_list'], {}), '(mask_loss_np_list)\n', (16277, 16296), True, 'import numpy as np\n'), ((16191, 16216), 'numpy.mean', 'np.mean', (['rot_loss_np_list'], {}), '(rot_loss_np_list)\n', (16198, 16216), True, 'import numpy as np\n'), ((16106, 16133), 'numpy.mean', 'np.mean', (['class_loss_np_list'], {}), '(class_loss_np_list)\n', (16113, 16133), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# Lib:
import numpy as np
import pylab
def mandelbrot(h, w, maxit=100):
'''Returns an image of the Mandelbrot fractal of size (h,w).
'''
y, x = np.ogrid[-2:2:h*1j, -3:1:w*1j]
c = x+y*1j
z = c
divtime = maxit + np.zeros(z.shape, dtype=int)
for i in xrange(maxit):
z = z**2 + c
diverge = z*np.conj(z) > 2**2 # who is diverging
div_now = diverge & (divtime == maxit) # who is diverging now
divtime[div_now] = i # note when
z[diverge] = 2 # avoid diverging too much
return divtime
pylab.imshow(mandelbrot(400, 400, 100))
# pylab.show()
| [
"numpy.conj",
"numpy.zeros"
] | [((282, 310), 'numpy.zeros', 'np.zeros', (['z.shape'], {'dtype': 'int'}), '(z.shape, dtype=int)\n', (290, 310), True, 'import numpy as np\n'), ((402, 412), 'numpy.conj', 'np.conj', (['z'], {}), '(z)\n', (409, 412), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import numpy as np
import pandas as pd
import copy
from ctypes import *
from ..exrpc.rpclib import *
from ..exrpc.server import *
from ..matrix.dtype import *
from .frovedisColumn import *
from . import df
class FrovedisGroupedDataframe :
'''A python container for holding Frovedis side created grouped dataframe'''
def __init__(cls,df=None):
cls.__fdata = None
cls.__cols = None
cls.__types = None
cls.__p_cols = None
cls.__p_types = None
def load_dummy(cls,fdata,cols,types,p_cols,p_types):
cls.__fdata = fdata
cls.__cols = copy.deepcopy(cols)
cls.__types = copy.deepcopy(types)
cls.__p_cols = copy.deepcopy(p_cols)
cls.__p_types = copy.deepcopy(p_types)
for i in range(0,len(p_cols)):
cname = p_cols[i]
dt = p_types[i]
cls.__dict__[cname] = FrovedisColumn(cname, dt)
return cls
def release(cls):
if cls.__fdata is not None:
(host, port) = FrovedisServer.getServerInstance()
rpclib.release_frovedis_dataframe(host,port,cls.__fdata)
excpt = rpclib.check_server_exception()
if excpt["status"]: raise RuntimeError(excpt["info"])
for cname in cls.__cols: del cls.__dict__[cname]
cls.__fdata = None
cls.__cols = None
cls.__types = None
cls.__p_cols = None
cls.__p_types = None
#def __del__(cls):
# if FrovedisServer.isUP(): cls.release()
def agg(cls, func, *args, **kwargs):
return cls.aggregate(func, args, kwargs)
def aggregate(cls, func, *args, **kwargs):
if cls.__fdata is not None:
if isinstance(func, str):
return cls.__agg_with_list([func])
elif isinstance(func, list):
return cls.__agg_with_list(func)
elif isinstance(func, dict):
return cls.__agg_with_dict(func)
else: raise TypeError("Unsupported input type for aggregation")
else: raise ValueError("Operation on invalid frovedis grouped dataframe!")
def __agg_with_list(cls,func):
num_cols = cls.__get_numeric_columns()
args = {}
for col in num_cols:
args[col] = func
if 'count' in func:
n_num_cols = cls.__get_non_numeric_columns()
for col in n_num_cols:
args[col] = ['count']
return cls.__agg_with_dict(args)
def __agg_with_dict(cls,func):
agg_func = []
agg_col = []
agg_col_as = []
agg_col_as_types = []
for col,aggfuncs in func.items():
if col not in cls.__dict__:
raise ValueError("No column named: ", col)
else: tid = cls.__dict__[col].dtype
for f in aggfuncs:
if (tid == DTYPE.STRING and f != 'count'):
raise ValueError("Currently Frovedis doesn't support aggregator %s \
to be applied on string-typed column %s" %(f,col))
else:
agg_func.append(f)
agg_col.append(col)
new_col = f + '(' + col + ')'
agg_col_as.append(new_col)
if(f == 'count'): col_as_tid = DTYPE.LONG
elif(f == 'mean'): col_as_tid = DTYPE.DOUBLE
else: col_as_tid = tid
agg_col_as_types.append(col_as_tid)
#print(agg_func)
#print(agg_col)
#print(agg_col_as)
#print(agg_col_as_types)
g_cols = np.asarray(cls.__cols)
sz1 = g_cols.size
g_cols_arr = (c_char_p * sz1)()
g_cols_arr[:] = np.array([e.encode('ascii') for e in g_cols.T])
a_func = np.asarray(agg_func)
a_col = np.asarray(agg_col)
a_col_as = np.asarray(agg_col_as)
sz2 = a_func.size
a_func_arr = (c_char_p * sz2)()
a_col_arr = (c_char_p * sz2)()
a_col_as_arr = (c_char_p * sz2)()
a_func_arr[:] = np.array([e.encode('ascii') for e in a_func.T])
a_col_arr[:] = np.array([e.encode('ascii') for e in a_col.T])
a_col_as_arr[:] = np.array([e.encode('ascii') for e in a_col_as.T])
(host, port) = FrovedisServer.getServerInstance()
fdata = rpclib.agg_grouped_dataframe(host,port,cls.__fdata,
g_cols_arr,sz1,
a_func_arr,a_col_arr,
a_col_as_arr,sz2)
excpt = rpclib.check_server_exception()
if excpt["status"]: raise RuntimeError(excpt["info"])
cols = cls.__cols + agg_col_as
types = cls.__types + agg_col_as_types
return df.FrovedisDataframe().load_dummy(fdata,cols,types)
def __get_numeric_columns(cls):
cols = []
for i in range(0, len(cls.__p_cols)):
if (cls.__p_types[i] != DTYPE.STRING): cols.append(cls.__p_cols[i])
return cols
def __get_non_numeric_columns(cls):
cols = []
for i in range(0, len(cls.__p_cols)):
if (cls.__p_types[i] == DTYPE.STRING): cols.append(cls.__p_cols[i])
return cols
def get(cls): return cls.__fdata
| [
"numpy.asarray",
"copy.deepcopy"
] | [((590, 609), 'copy.deepcopy', 'copy.deepcopy', (['cols'], {}), '(cols)\n', (603, 609), False, 'import copy\n'), ((628, 648), 'copy.deepcopy', 'copy.deepcopy', (['types'], {}), '(types)\n', (641, 648), False, 'import copy\n'), ((668, 689), 'copy.deepcopy', 'copy.deepcopy', (['p_cols'], {}), '(p_cols)\n', (681, 689), False, 'import copy\n'), ((710, 732), 'copy.deepcopy', 'copy.deepcopy', (['p_types'], {}), '(p_types)\n', (723, 732), False, 'import copy\n'), ((3225, 3247), 'numpy.asarray', 'np.asarray', (['cls.__cols'], {}), '(cls.__cols)\n', (3235, 3247), True, 'import numpy as np\n'), ((3388, 3408), 'numpy.asarray', 'np.asarray', (['agg_func'], {}), '(agg_func)\n', (3398, 3408), True, 'import numpy as np\n'), ((3421, 3440), 'numpy.asarray', 'np.asarray', (['agg_col'], {}), '(agg_col)\n', (3431, 3440), True, 'import numpy as np\n'), ((3456, 3478), 'numpy.asarray', 'np.asarray', (['agg_col_as'], {}), '(agg_col_as)\n', (3466, 3478), True, 'import numpy as np\n')] |
import numpy as np
import unittest
import pytest
from mvc.misc.batch import make_batch
class MakeBatchTest(unittest.TestCase):
def test_success(self):
batch_size = np.random.randint(32) + 1
data_size = batch_size * np.random.randint(10) + 1
data = {
'test1': np.random.random((data_size)),
'test2': np.random.random((data_size))
}
count = 0
for batch in make_batch(data, batch_size, data_size):
assert batch['test1'].shape[0] == batch_size
assert batch['test2'].shape[0] == batch_size
count += 1
assert count == data_size // batch_size
def test_assertion_error(self):
with pytest.raises(AssertionError):
data = [1, 2, 3]
batch_size = np.random.randint(32) + 1
data_size = np.random.randint(1024) + 1
batch = make_batch(data, batch_size, data_size)
next(batch)
| [
"numpy.random.random",
"numpy.random.randint",
"pytest.raises",
"mvc.misc.batch.make_batch"
] | [((433, 472), 'mvc.misc.batch.make_batch', 'make_batch', (['data', 'batch_size', 'data_size'], {}), '(data, batch_size, data_size)\n', (443, 472), False, 'from mvc.misc.batch import make_batch\n'), ((179, 200), 'numpy.random.randint', 'np.random.randint', (['(32)'], {}), '(32)\n', (196, 200), True, 'import numpy as np\n'), ((302, 329), 'numpy.random.random', 'np.random.random', (['data_size'], {}), '(data_size)\n', (318, 329), True, 'import numpy as np\n'), ((354, 381), 'numpy.random.random', 'np.random.random', (['data_size'], {}), '(data_size)\n', (370, 381), True, 'import numpy as np\n'), ((713, 742), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (726, 742), False, 'import pytest\n'), ((896, 935), 'mvc.misc.batch.make_batch', 'make_batch', (['data', 'batch_size', 'data_size'], {}), '(data, batch_size, data_size)\n', (906, 935), False, 'from mvc.misc.batch import make_batch\n'), ((238, 259), 'numpy.random.randint', 'np.random.randint', (['(10)'], {}), '(10)\n', (255, 259), True, 'import numpy as np\n'), ((798, 819), 'numpy.random.randint', 'np.random.randint', (['(32)'], {}), '(32)\n', (815, 819), True, 'import numpy as np\n'), ((848, 871), 'numpy.random.randint', 'np.random.randint', (['(1024)'], {}), '(1024)\n', (865, 871), True, 'import numpy as np\n')] |
import json
import pytest
from lxml import etree
import numpy as np
import xarray as xr
import pandas as pd
import finch
import finch.processes
from finch.processes.wps_xclim_indices import XclimIndicatorBase
from finch.processes.wps_base import make_xclim_indicator_process
from . utils import execute_process, wps_input_file, wps_literal_input
from pathlib import Path
from pywps.app.exceptions import ProcessError
from pywps import configuration
from unittest import mock
from numpy.testing import assert_equal
from xclim.testing import open_dataset
K2C = 273.16
configuration.CONFIG['finch:metadata']['testing_session'] = "True"
def _get_output_standard_name(process_identifier):
for p in finch.processes.get_processes():
if p.identifier == process_identifier:
return p.xci.standard_name
@pytest.mark.parametrize("indicator", finch.processes.indicators)
def test_indicators_processes_discovery(indicator):
process = make_xclim_indicator_process(indicator, "Process", XclimIndicatorBase)
assert indicator.identifier == process.identifier
# Remove args not supported by finch: we remove special kinds,
# 50 is "kwargs". 70 is Dataset ('ds') and 99 is "unknown". All normal types are 0-9.
parameters = set([k for k, v in indicator.parameters.items() if v['kind'] < 50 or k == 'indexer'])
parameters.add("check_missing")
parameters.add("missing_options")
parameters.add("cf_compliance")
parameters.add("data_validation")
parameters.add("variable")
if "indexer" in parameters:
parameters.remove("indexer")
parameters.add("month")
parameters.add("season")
assert_equal(parameters, set(i.identifier for i in process.inputs), indicator.identifier)
# TODO : Extend test coverage
def test_processes(client, netcdf_datasets):
"""Run a dummy calculation for every process, keeping some default parameters."""
# indicators = finch.processes.indicators
processes = filter(lambda x: isinstance(x, XclimIndicatorBase), finch.processes.xclim.__dict__.values())
literal_inputs = {
"freq": "MS",
"window": "3",
"mid_date": "07-01",
"before_date": "07-01",
}
keep_defaults = ["thresh", "thresh_tasmin", "thresh_tasmax"]
attrs = xr.open_dataset(list(netcdf_datasets.values())[0], decode_times=False).attrs
for process in processes:
inputs = []
for process_input in process.inputs:
name = process_input.identifier
if name in netcdf_datasets.keys():
inputs.append(wps_input_file(name, netcdf_datasets[name]))
elif name in literal_inputs.keys():
inputs.append(wps_literal_input(name, literal_inputs[name]))
elif name in keep_defaults:
pass
else:
raise NotImplementedError
outputs = execute_process(client, process.identifier, inputs)
ds = xr.open_dataset(outputs[0])
output_variable = list(ds.data_vars)[0]
assert getattr(ds, output_variable).standard_name == process.xci.standard_name
assert ds.attrs['testing_session']
model = attrs["driving_model_id"]
experiment = attrs["driving_experiment_id"].replace(",", "+")
ensemble = (
f"r{attrs['driving_realization']}"
f"i{attrs['driving_initialization_method']}"
f"p{attrs['driving_physics_version']}"
)
date_start = pd.to_datetime(str(ds.time[0].values))
date_end = pd.to_datetime(str(ds.time[-1].values))
expected = (
f"{output_variable.replace('_', '-')}_"
f"{model}_{experiment}_{ensemble}_"
f"{date_start:%Y%m%d}-{date_end:%Y%m%d}.nc"
)
assert Path(outputs[0]).name == expected
def test_wps_daily_temperature_range_multiple(client, netcdf_datasets):
identifier = "dtr"
inputs = [wps_literal_input("freq", "YS")]
for _ in range(5):
inputs.append(wps_input_file("tasmax", netcdf_datasets["tasmax"]))
inputs.append(wps_input_file("tasmin", netcdf_datasets["tasmin"]))
with mock.patch(
"finch.processes.wps_xclim_indices.FinchProgressBar"
) as mock_progress:
outputs = execute_process(
client, identifier, inputs, output_names=["output_netcdf", "ref"]
)
assert mock_progress.call_args_list[0][1]["start_percentage"] == 0
assert mock_progress.call_args_list[0][1]["end_percentage"] == 20
assert mock_progress.call_args_list[4][1]["start_percentage"] == 80
assert mock_progress.call_args_list[4][1]["end_percentage"] == 100
et = etree.fromstring(outputs[1].data[0].encode())
urls = [e[2].text for e in et if e.tag.endswith("file")]
assert len(urls) == 5, "Containing 10 files"
assert len(set(urls)) == 5, "With different links"
assert urls[1].endswith("-1.nc")
def test_wps_daily_temperature_range_multiple_not_same_length(client, netcdf_datasets):
identifier = "dtr"
inputs = [wps_literal_input("freq", "YS")]
for _ in range(5):
inputs.append(wps_input_file("tasmax", netcdf_datasets["tasmax"]))
inputs.append(wps_input_file("tasmin", netcdf_datasets["tasmin"]))
inputs.pop()
with pytest.raises(ProcessError, match="must be equal"):
execute_process(
client, identifier, inputs, output_names=["output_netcdf", "ref"]
)
def test_heat_wave_frequency_window_thresh_parameters(client, netcdf_datasets):
identifier = "heat_wave_frequency"
inputs = [
wps_input_file("tasmax", netcdf_datasets["tasmax"]),
wps_input_file("tasmin", netcdf_datasets["tasmin"]),
wps_literal_input("window", "3"),
wps_literal_input("freq", "YS"),
wps_literal_input("thresh_tasmin", "20 degC"),
wps_literal_input("thresh_tasmax", "25 degC"),
]
outputs = execute_process(client, identifier, inputs)
ds = xr.open_dataset(outputs[0])
assert ds.attrs["frequency"] == "yr"
assert ds.heat_wave_frequency.standard_name == _get_output_standard_name(identifier)
def test_heat_wave_index_thresh_parameter(client, netcdf_datasets):
identifier = "heat_wave_index"
inputs = [
wps_input_file("tasmax", netcdf_datasets["tasmax"]),
wps_literal_input("thresh", "30 degC"),
]
outputs = execute_process(client, identifier, inputs)
ds = xr.open_dataset(outputs[0])
assert ds["heat_wave_index"].standard_name == _get_output_standard_name(identifier)
def test_missing_options(client, netcdf_datasets):
identifier = "tg_mean"
inputs = [
wps_input_file("tas", netcdf_datasets["tas_missing"]),
wps_literal_input("freq", "YS"),
]
outputs = execute_process(client, identifier, inputs)
ds = xr.open_dataset(outputs[0])
np.testing.assert_array_equal(ds.tg_mean.isnull(), True)
inputs = [
wps_input_file("tas", netcdf_datasets["tas_missing"]),
wps_literal_input("freq", "YS"),
wps_literal_input("check_missing", "pct"),
wps_literal_input("missing_options", json.dumps({"pct": {"tolerance": 0.1}}))
]
outputs = execute_process(client, identifier, inputs)
ds = xr.open_dataset(outputs[0])
np.testing.assert_array_equal(ds.tg_mean.isnull(), False)
def test_stats_process(client, netcdf_datasets):
"""Test stats and the capacity to choose the variable."""
identifier = "stats"
inputs = [
wps_input_file("da", netcdf_datasets["pr_discharge"]),
wps_literal_input("freq", "YS"),
wps_literal_input("op", "max"),
wps_literal_input("season", "JJA"),
wps_literal_input("variable", "discharge")
]
outputs = execute_process(client, identifier, inputs)
ds = xr.open_dataset(outputs[0])
np.testing.assert_array_equal(ds.qsummermax.isnull(), False)
def test_freqanalysis_process(client, netcdf_datasets):
identifier = "freq_analysis"
inputs = [
wps_input_file("da", netcdf_datasets["discharge"]),
wps_literal_input("t", "2"),
wps_literal_input("t", "50"),
wps_literal_input("freq", "YS"),
wps_literal_input("mode", "max"),
wps_literal_input("season", "JJA"),
wps_literal_input("dist", "gumbel_r"),
wps_literal_input("variable", "discharge")
]
outputs = execute_process(client, identifier, inputs)
ds = xr.open_dataset(outputs[0])
np.testing.assert_array_equal(ds.q1maxsummer.shape, (2, 5, 6))
class TestFitProcess:
identifier = "fit"
def test_simple(self, client, netcdf_datasets):
inputs = [
wps_input_file("da", netcdf_datasets["discharge"]),
wps_literal_input("dist", "norm"),
]
outputs = execute_process(client, self.identifier, inputs)
ds = xr.open_dataset(outputs[0])
np.testing.assert_array_equal(ds.params.shape, (2, 5, 6))
def test_nan(self, client, q_series, tmp_path):
q_series([333, 145, 203, 109, 430, 230, np.nan]).to_netcdf(tmp_path / "q.nc")
inputs = [
wps_input_file("da", tmp_path / "q.nc"),
wps_literal_input("dist", "norm"),
]
outputs = execute_process(client, self.identifier, inputs)
ds = xr.open_dataset(outputs[0])
np.testing.assert_array_equal(ds.params.isnull(), False)
def test_rain_approximation(client, pr_series, tas_series, tmp_path):
identifier = "prlp"
pr_series(np.ones(10)).to_netcdf(tmp_path / 'pr.nc')
tas_series(np.arange(10) + K2C).to_netcdf(tmp_path / 'tas.nc')
inputs = [wps_input_file("pr", tmp_path / "pr.nc"),
wps_input_file("tas", tmp_path / "tas.nc"),
wps_literal_input("thresh", "5 degC"),
wps_literal_input("method", "binary")]
outputs = execute_process(client, identifier, inputs)
with xr.open_dataset(outputs[0]) as ds:
np.testing.assert_allclose(
ds.prlp, [0, 0, 0, 0, 0, 1, 1, 1, 1, 1], atol=1e-5, rtol=1e-3
)
@pytest.mark.xfail
def test_two_nondefault_variable_name(client, pr_series, tas_series, tmp_path):
identifier = "prlp"
pr_series(np.ones(10)).to_dataset(name="my_pr").to_netcdf(tmp_path / 'pr.nc')
tas_series(np.arange(10) + K2C).to_dataset(name="my_tas").to_netcdf(tmp_path / 'tas.nc')
inputs = [wps_input_file("pr", tmp_path / "pr.nc"),
wps_input_file("tas", tmp_path / "tas.nc"),
wps_literal_input("thresh", "5 degC"),
wps_literal_input("method", "binary"),
wps_literal_input("variable", "my_pr")
]
outputs = execute_process(client, identifier, inputs)
with xr.open_dataset(outputs[0]) as ds:
np.testing.assert_allclose(
ds.prlp, [0, 0, 0, 0, 0, 1, 1, 1, 1, 1], atol=1e-5, rtol=1e-3
)
def test_degree_days_exceedance_date(client, tmp_path):
identifier = "degree_days_exceedance_date"
tas = open_dataset("FWI/GFWED_sample_2017.nc").tas
tas.attrs.update(
cell_methods="time: mean within days", standard_name="air_temperature"
)
tas.to_netcdf(tmp_path / "tas.nc")
inputs = [wps_input_file("tas", tmp_path / "tas.nc"),
wps_literal_input("thresh", "4 degC"),
wps_literal_input("op", ">"),
wps_literal_input("sum_thresh", "200 K days")
]
outputs = execute_process(client, identifier, inputs)
with xr.open_dataset(outputs[0]) as ds:
np.testing.assert_array_equal(ds.degree_days_exceedance_date, np.array([[153, 136, 9, 6]]).T)
| [
"finch.processes.wps_base.make_xclim_indicator_process",
"xclim.testing.open_dataset",
"numpy.ones",
"pathlib.Path",
"numpy.arange",
"numpy.testing.assert_allclose",
"json.dumps",
"pytest.mark.parametrize",
"finch.processes.get_processes",
"numpy.array",
"pytest.raises",
"finch.processes.xclim... | [((824, 888), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""indicator"""', 'finch.processes.indicators'], {}), "('indicator', finch.processes.indicators)\n", (847, 888), False, 'import pytest\n'), ((702, 733), 'finch.processes.get_processes', 'finch.processes.get_processes', ([], {}), '()\n', (731, 733), False, 'import finch\n'), ((955, 1025), 'finch.processes.wps_base.make_xclim_indicator_process', 'make_xclim_indicator_process', (['indicator', '"""Process"""', 'XclimIndicatorBase'], {}), "(indicator, 'Process', XclimIndicatorBase)\n", (983, 1025), False, 'from finch.processes.wps_base import make_xclim_indicator_process\n'), ((5950, 5977), 'xarray.open_dataset', 'xr.open_dataset', (['outputs[0]'], {}), '(outputs[0])\n', (5965, 5977), True, 'import xarray as xr\n'), ((6411, 6438), 'xarray.open_dataset', 'xr.open_dataset', (['outputs[0]'], {}), '(outputs[0])\n', (6426, 6438), True, 'import xarray as xr\n'), ((6800, 6827), 'xarray.open_dataset', 'xr.open_dataset', (['outputs[0]'], {}), '(outputs[0])\n', (6815, 6827), True, 'import xarray as xr\n'), ((7219, 7246), 'xarray.open_dataset', 'xr.open_dataset', (['outputs[0]'], {}), '(outputs[0])\n', (7234, 7246), True, 'import xarray as xr\n'), ((7775, 7802), 'xarray.open_dataset', 'xr.open_dataset', (['outputs[0]'], {}), '(outputs[0])\n', (7790, 7802), True, 'import xarray as xr\n'), ((8407, 8434), 'xarray.open_dataset', 'xr.open_dataset', (['outputs[0]'], {}), '(outputs[0])\n', (8422, 8434), True, 'import xarray as xr\n'), ((8439, 8501), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['ds.q1maxsummer.shape', '(2, 5, 6)'], {}), '(ds.q1maxsummer.shape, (2, 5, 6))\n', (8468, 8501), True, 'import numpy as np\n'), ((2025, 2064), 'finch.processes.xclim.__dict__.values', 'finch.processes.xclim.__dict__.values', ([], {}), '()\n', (2062, 2064), False, 'import finch\n'), ((2947, 2974), 'xarray.open_dataset', 'xr.open_dataset', (['outputs[0]'], {}), '(outputs[0])\n', (2962, 2974), True, 'import xarray as xr\n'), ((4136, 4200), 'unittest.mock.patch', 'mock.patch', (['"""finch.processes.wps_xclim_indices.FinchProgressBar"""'], {}), "('finch.processes.wps_xclim_indices.FinchProgressBar')\n", (4146, 4200), False, 'from unittest import mock\n'), ((5261, 5311), 'pytest.raises', 'pytest.raises', (['ProcessError'], {'match': '"""must be equal"""'}), "(ProcessError, match='must be equal')\n", (5274, 5311), False, 'import pytest\n'), ((8823, 8850), 'xarray.open_dataset', 'xr.open_dataset', (['outputs[0]'], {}), '(outputs[0])\n', (8838, 8850), True, 'import xarray as xr\n'), ((8859, 8916), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['ds.params.shape', '(2, 5, 6)'], {}), '(ds.params.shape, (2, 5, 6))\n', (8888, 8916), True, 'import numpy as np\n'), ((9265, 9292), 'xarray.open_dataset', 'xr.open_dataset', (['outputs[0]'], {}), '(outputs[0])\n', (9280, 9292), True, 'import xarray as xr\n'), ((9868, 9895), 'xarray.open_dataset', 'xr.open_dataset', (['outputs[0]'], {}), '(outputs[0])\n', (9883, 9895), True, 'import xarray as xr\n'), ((9911, 10007), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ds.prlp', '[0, 0, 0, 0, 0, 1, 1, 1, 1, 1]'], {'atol': '(1e-05)', 'rtol': '(0.001)'}), '(ds.prlp, [0, 0, 0, 0, 0, 1, 1, 1, 1, 1], atol=\n 1e-05, rtol=0.001)\n', (9937, 10007), True, 'import numpy as np\n'), ((10681, 10708), 'xarray.open_dataset', 'xr.open_dataset', (['outputs[0]'], {}), '(outputs[0])\n', (10696, 10708), True, 'import xarray as xr\n'), ((10724, 10820), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['ds.prlp', '[0, 0, 0, 0, 0, 1, 1, 1, 1, 1]'], {'atol': '(1e-05)', 'rtol': '(0.001)'}), '(ds.prlp, [0, 0, 0, 0, 0, 1, 1, 1, 1, 1], atol=\n 1e-05, rtol=0.001)\n', (10750, 10820), True, 'import numpy as np\n'), ((10952, 10992), 'xclim.testing.open_dataset', 'open_dataset', (['"""FWI/GFWED_sample_2017.nc"""'], {}), "('FWI/GFWED_sample_2017.nc')\n", (10964, 10992), False, 'from xclim.testing import open_dataset\n'), ((11443, 11470), 'xarray.open_dataset', 'xr.open_dataset', (['outputs[0]'], {}), '(outputs[0])\n', (11458, 11470), True, 'import xarray as xr\n'), ((7105, 7144), 'json.dumps', 'json.dumps', (["{'pct': {'tolerance': 0.1}}"], {}), "({'pct': {'tolerance': 0.1}})\n", (7115, 7144), False, 'import json\n'), ((3775, 3791), 'pathlib.Path', 'Path', (['outputs[0]'], {}), '(outputs[0])\n', (3779, 3791), False, 'from pathlib import Path\n'), ((9469, 9480), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (9476, 9480), True, 'import numpy as np\n'), ((11548, 11576), 'numpy.array', 'np.array', (['[[153, 136, 9, 6]]'], {}), '([[153, 136, 9, 6]])\n', (11556, 11576), True, 'import numpy as np\n'), ((9527, 9540), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (9536, 9540), True, 'import numpy as np\n'), ((10163, 10174), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (10170, 10174), True, 'import numpy as np\n'), ((10246, 10259), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (10255, 10259), True, 'import numpy as np\n')] |
from multiprocessing import Pool
from bokeh.io import export_png
from bokeh.plotting import figure, show, output_file
from bokeh.palettes import Category10 as palette
import itertools
# import matplotlib.pyplot as plt
import numpy as np
import tqdm
# from progress.bar import Bar
from network_simulator.components import simulator
from network_simulator.helpers import writeSimCache, readSimCache, genDescendUnitArray
def main():
return simulator(g_init_vars, g_aplist, g_usrlist)
def seriesRatioMP(init_vars, aplist, usrlist):
global g_init_vars, g_aplist, g_usrlist
g_init_vars = init_vars
g_aplist = aplist
g_usrlist = usrlist
plot_from_saved = 1
geometric_ratio = np.arange(0.01, 0.99, 0.01)
total_runs = range(20)
_output = {}
if plot_from_saved == 0:
_sim_dict_axes = {
"axes1" : {
"param" : "No Transmission Policy - Share Evenly",
"ENERGY_POLICY" : 0,
"SHARE_ENERGY" : 1,
},
"axes2" : {
"param" : "Cheapest Users - Share Evenly",
"ENERGY_POLICY" : 2,
"SHARE_ENERGY" : 1,
},
"axes3" : {
"param" : "No Transmission Policy - AP Energy Arrival",
"ENERGY_POLICY" : 0,
"SHARE_ENERGY" : 2,
},
"axes4" : {
"param" : "Cheapest Users - AP Energy Arrival",
"ENERGY_POLICY" : 2,
"SHARE_ENERGY" : 2,
},
"axes5" : {
"param" : "No Transmission Policy - AP Energy Use",
"ENERGY_POLICY" : 0,
"SHARE_ENERGY" : 3,
},
"axes6" : {
"param" : "Cheapest Users - AP Energy Use",
"ENERGY_POLICY" : 2,
"SHARE_ENERGY" : 3,
},
"axes7" : {
"param" : "No Transmission Policy - AP Efficiency",
"ENERGY_POLICY" : 0,
"SHARE_ENERGY" : 4,
},
"axes8" : {
"param" : "Cheapest Users - AP Efficiency",
"ENERGY_POLICY" : 2,
"SHARE_ENERGY" : 4,
}
}
bar = tqdm.tqdm(total=len(_sim_dict_axes.keys()) * len(geometric_ratio))
# bar = Bar("Geometric Ratio MP" , max=len(_sim_dict_axes.values()))
for axes in _sim_dict_axes.values():
for param in ["ENERGY_POLICY", "SHARE_ENERGY"]:
init_vars[param] = axes[param]
_avg_serviced_users = []
for ratio in geometric_ratio:
init_vars["SERIES_RATIO"] = ratio
init_vars["descendunit_arr"] = genDescendUnitArray(init_vars["AP_TOTAL"], 1, init_vars["SERIES_RATIO"])
pool = Pool(10)
_serviced_users = [pool.apply_async(main, ()) for run in total_runs]
_avg_serviced_users.append(sum([result.get() for result in _serviced_users]) / len(total_runs))
bar.update(1)
pool.close()
pool.join()
_output[axes["param"]] = { "result" : _avg_serviced_users }
bar.close()
writeSimCache("GeometricRatioMP", _output)
else:
_output = readSimCache("GeometricRatioMP")
output_file("interactive/geometricratio.html")
TOOLTIPS = [
("(x, y)", "($x, $y)"),
("desc", "$name")
]
# Plot colours
colors = itertools.cycle(palette[8])
p = figure(width=1200, height=800, x_axis_label='Geometric Ratio', y_axis_label='Total Number of Serviced Users', tooltips=TOOLTIPS, output_backend='svg')
for key, value in _output.items():
print(key + " : " + str(sum(value["result"])/len(value["result"])))
p.line(geometric_ratio, value["result"], legend_label=key, name=key, color=next(colors), line_width=3)
# p.legend.location = (20, 100)
# export_png(p, filename="test.png")
p.xaxis.axis_label_text_font_size='20px'
p.xaxis.major_label_text_font_size='20px'
p.yaxis.axis_label_text_font_size='20px'
p.yaxis.major_label_text_font_size='20px'
p.legend.label_text_font_size='18px'
p.legend[0].orientation = "vertical"
legend_ref = p.legend[0]
p.add_layout(legend_ref, "right")
show(p)
p.toolbar.logo = None
p.toolbar_location = None
return p
| [
"itertools.cycle",
"network_simulator.helpers.readSimCache",
"bokeh.plotting.figure",
"bokeh.plotting.show",
"numpy.arange",
"network_simulator.helpers.genDescendUnitArray",
"multiprocessing.Pool",
"network_simulator.helpers.writeSimCache",
"network_simulator.components.simulator",
"bokeh.plotting... | [((442, 485), 'network_simulator.components.simulator', 'simulator', (['g_init_vars', 'g_aplist', 'g_usrlist'], {}), '(g_init_vars, g_aplist, g_usrlist)\n', (451, 485), False, 'from network_simulator.components import simulator\n'), ((701, 728), 'numpy.arange', 'np.arange', (['(0.01)', '(0.99)', '(0.01)'], {}), '(0.01, 0.99, 0.01)\n', (710, 728), True, 'import numpy as np\n'), ((3359, 3405), 'bokeh.plotting.output_file', 'output_file', (['"""interactive/geometricratio.html"""'], {}), "('interactive/geometricratio.html')\n", (3370, 3405), False, 'from bokeh.plotting import figure, show, output_file\n'), ((3541, 3568), 'itertools.cycle', 'itertools.cycle', (['palette[8]'], {}), '(palette[8])\n', (3556, 3568), False, 'import itertools\n'), ((3578, 3733), 'bokeh.plotting.figure', 'figure', ([], {'width': '(1200)', 'height': '(800)', 'x_axis_label': '"""Geometric Ratio"""', 'y_axis_label': '"""Total Number of Serviced Users"""', 'tooltips': 'TOOLTIPS', 'output_backend': '"""svg"""'}), "(width=1200, height=800, x_axis_label='Geometric Ratio', y_axis_label\n ='Total Number of Serviced Users', tooltips=TOOLTIPS, output_backend='svg')\n", (3584, 3733), False, 'from bokeh.plotting import figure, show, output_file\n'), ((4372, 4379), 'bokeh.plotting.show', 'show', (['p'], {}), '(p)\n', (4376, 4379), False, 'from bokeh.plotting import figure, show, output_file\n'), ((3250, 3292), 'network_simulator.helpers.writeSimCache', 'writeSimCache', (['"""GeometricRatioMP"""', '_output'], {}), "('GeometricRatioMP', _output)\n", (3263, 3292), False, 'from network_simulator.helpers import writeSimCache, readSimCache, genDescendUnitArray\n'), ((3321, 3353), 'network_simulator.helpers.readSimCache', 'readSimCache', (['"""GeometricRatioMP"""'], {}), "('GeometricRatioMP')\n", (3333, 3353), False, 'from network_simulator.helpers import writeSimCache, readSimCache, genDescendUnitArray\n'), ((2756, 2828), 'network_simulator.helpers.genDescendUnitArray', 'genDescendUnitArray', (["init_vars['AP_TOTAL']", '(1)', "init_vars['SERIES_RATIO']"], {}), "(init_vars['AP_TOTAL'], 1, init_vars['SERIES_RATIO'])\n", (2775, 2828), False, 'from network_simulator.helpers import writeSimCache, readSimCache, genDescendUnitArray\n'), ((2853, 2861), 'multiprocessing.Pool', 'Pool', (['(10)'], {}), '(10)\n', (2857, 2861), False, 'from multiprocessing import Pool\n')] |
import numpy as np
from produce_lightcurve import Lightcurve
import synphot as sp
import dorado.sensitivity
import astropy.units as u
from astropy import constants as c
import dill as pickle
import os
from dynesty_sampler import getSampler, wrappedSampler, find
from parameters import getParameters
# get parameters
parameters = getParameters(osargs_list=['read_data','model','delay','dist','include_optical','include_uv','print_progress','method','resume_previous','sample','save_after_seconds','parallel','dlogz_threshold'])
# parameters = {
# 'model' : 'kilonova_uvboost',
# 'delay' : 0,
# 'dist' : 40,
# 'include_optical' : 'False',
# 'print_progress' : 'True',
# 'method' : 'sample',
# 'include_uv' : 'NUV_D',
# 'read_data' : 'shock',
# 'sample' : 'auto',
# 'resume_previous' : 'False',
# 'save_after_seconds' : 600, #float/int or will resort to False
# 'parallel' : 'True',
# 'dlogz_threshold' : 10000
# }
model = parameters['model'] #shock, kilonova, kilonova_uvboost
delay = parameters['delay'] #hours
dist = parameters['dist'] #mpc
include_optical = parameters['include_optical'].split(',') # 'r', ['u', 'g','r', 'I', 'z'], ['False']
print_progress=parameters['print_progress']=='True'
method = parameters['method'] #'plot', 'sample'
include_uv = parameters['include_uv'].split(',')
read_data = parameters['read_data']
sample = parameters['sample']
resume_previous = parameters['resume_previous']=='True'
try:
save_after_seconds = int(parameters['save_after_seconds'])
except:
save_after_seconds = False
try:
parallel = int(parameters['parallel'])
except:
parallel = False
dlogz_threshold=float(parameters['dlogz_threshold'])
######## MORE PARAMETERS, DONT TOUCH ##########
distance = dist * u.Mpc
heating = 'beta'
if model == 'shock':
radiation = 'shock'
elif model == 'kilonova' or model == 'kilonova_uvboost':
radiation = 'kilonova'
bs = {}
# Include uv bands
if include_uv == ['False']:
uv_string = 'no_uv'
else:
uv_string = ''.join(include_uv)
if type(include_uv) == list:
for key in include_uv:
bs[key] = getattr(dorado.sensitivity.bandpasses, key)
elif type(include_uv) == str:
bs[include_uv] = getattr(dorado.sensitivity.bandpasses, include_uv)
# Include optical bands
if include_optical == ['False']:
optical_string = 'no_optical'
else:
optical_string = ''.join(include_optical)
if type(include_optical) == list:
for key in include_optical:
bs[key] = sp.SpectralElement.from_file(f'input_files/bands/SLOAN_SDSS.{key}.dat')
elif type(include_optical) == str:
bs[include_optical] = sp.SpectralElement.from_file(f'input_files/bands/SLOAN_SDSS.{include_optical}.dat')
#### READ DATA #########
with open(f'input_files/data/SNR_fiducial_{read_data}_{dist}Mpc_opticalbands_ugri_uvbands_NUV_DD1D2_{delay}h_delay.pkl','rb') as tf:
data_list = pickle.load(tf)
ts_data, abmags_data, snrs, abmags_error = data_list
########## LOG PROBABILITIES ##########
if model == 'kilonova' or model == 'kilonova_uvboost':
ndim = 7
limits = np.ndarray((ndim,2),dtype=object)
limits[0] = [0.01, 0.1] # mass (solar masses)
if model == 'kilonova_uvboost':
limits[1:4] = np.array(([0.05, 0.2], ['vmin', 'vmax'], [0.21, 0.8]),dtype=object) # velocities (cm/s)
limits[4:6] = np.array(([1,10],[0.01,0.1])) # opacities (cm^2/g)
elif model == 'kilonova':
limits[1:4] = np.array(([0.05, 0.2], ['vmin', 'vmax'], [0.3, 0.8]),dtype=object) # velocities (cm/s)
limits[4:6] = np.array(([1,10],[0.1,1])) # opacities (cm^2/g)
limits[6] = [4,5]
elif model == 'shock':
#limits=np.array(([0.01,10],[0.01,5],[0.01,3],[0.1,10])) #old broad
limits=np.array(([1,10],[0.5,5],[1,3],[1,10])) #lim[[lower,upper],..
# k in 0.1 cm^2/g, M in 0.01 solar masses, v in 0.1c, R_0 in 10^10 cm
ndim = limits.shape[0]
########## DEFINE MODEL ##########
lightcurve_object = Lightcurve(distance, heating_function=heating)
def lightcurve_model(t,theta_reshaped,bandpasses):
abmags = lightcurve_object.calc_abmags_combined(t,theta_reshaped,bandpasses,radiation = radiation)
return abmags
folderstring = f'output_files/results/{model}model_{read_data}data_{delay}h_delay'
filestring = f'{dist}Mpc_{optical_string}band_{uv_string}band'
if not (resume_previous == True and isinstance(find(filestring+'_sampler_dlogz=*', folderstring), str)):
if model == 'kilonova' or model == 'kilonova_uvboost':
def priortransform(uniform):
mass = (limits[0,1]-limits[0,0])*uniform[0]+limits[0,0]
#velocities = (limits[1:4,1]-limits[1:4,0])*uniform[1:4]+limits[1:4,0]
v_min = (limits[1,1]-limits[1,0])*uniform[1]+limits[1,0]
v_max = (limits[3,1]-limits[3,0])*uniform[2]+limits[3,0]
v_k = (v_max-v_min)*uniform[3]+v_min
#velocities = np.array([v_min, v_k, v_max])
opacities = (limits[4:6,1]-limits[4:6,0])*uniform[4:6]+limits[4:6,0]
n = (limits[6,1]-limits[6,0])*uniform[6]+limits[6,0]
theta = np.array([mass, v_min, v_k, v_max, opacities[0], opacities[1], n]) #for postprocessing
return theta
def loglikelihood(theta):
theta_reshaped = np.array((theta[0] * u.Msun, np.array((theta[1], theta[2], theta[3])) * c.c, np.array((theta[4], theta[5])) * u.cm**2 / u.g, theta[6]), dtype= 'object')
sigmas2 = {}
loglikelihood = {}
abmags_model = lightcurve_model(ts_data, theta_reshaped, bs)
for key in bs:
sigmas2[key] = abmags_error[key]**2
loglikelihood[key] = -0.5 * np.sum((abmags_data[key] - abmags_model[key]) ** 2 / sigmas2[key] + np.log(2*np.pi*sigmas2[key]))
return sum(loglikelihood.values())
elif model == 'shock':
def priortransform(u):
theta = (limits[:,1]-limits[:,0])*u+limits[:,0]
return theta
def loglikelihood(theta):
sigmas2 = {}
loglikelihood = {}
abmags_model = lightcurve_model(ts_data, theta, bs)
for key in bs:
sigmas2[key] = abmags_error[key]**2
loglikelihood[key] = -0.5 * np.sum((abmags_data[key] - abmags_model[key]) ** 2 / sigmas2[key] + np.log(2*np.pi*sigmas2[key]))
return sum(loglikelihood.values())
####### TESTS / PARAMETER ESTIMATION #############
if method == 'plot':
print(f'testing...{model} model, {read_data} data')
import timeit
if model == 'kilonova' or model == 'kilonova_uvboost':
#uniform_random = np.random.rand(ndim)
#theta = priortransform(uniform_random)
#### parameters
if model == 'kilonova':
theta = np.array((0.05,0.1,0.2,0.4,3.0,0.5,4.5))
elif model == 'kilonova_uvboost':
theta = np.array((0.05,0.1,0.2,0.23,3.0,0.04,4.5))
logl = loglikelihood(theta)
print('theta: ',theta)
print('log(likelihood): ', logl)
timing_loglikelihood = int(np.round(1e3 * np.median(timeit.repeat(
'loglikelihood(theta)',
globals=globals(), number=1, repeat=10))))
print(f'One loglikelihood calculation = {timing_loglikelihood} ms')
theta_reshaped = np.array((theta[0] * u.Msun, np.array((theta[1], theta[2], theta[3])) * c.c, np.array((theta[4], theta[5])) * u.cm**2 / u.g, theta[6]), dtype= 'object')
abmags_model = lightcurve_model(ts_data, theta_reshaped, bs)
elif model == 'shock':
k = 10 # 0.1 cm^2/g
M = 0.5 #0.01 solar masses
v = 2 #0.1c
R = 10 #10^10 cm #Initial radius for shock
theta = k, M, v, R
# uniform_random = np.random.rand(ndim)
# theta = priortransform(uniform_random)
logl = loglikelihood(theta)
print('theta: ',theta)
print('log(likelihood): ', logl)
timing_loglikelihood = int(np.round(1e3 * np.median(timeit.repeat(
'loglikelihood(theta)',
globals=globals(), number=1, repeat=10))))
print(f'One loglikelihood calculation = {timing_loglikelihood} ms')
abmags_model = lightcurve_model(ts_data, theta, bs)
import matplotlib.pyplot as plt
fig,ax = plt.subplots()
for key in bs:
ax.plot(ts_data[key].to_value('day'),abmags_data[key],'x')
ax.plot(ts_data[key].to_value('day'),abmags_model[key])
try:
os.mkdir('./output_files/plots')
print('created folder for plots')
except:
print('folder for plots exists')
print_string = f'./output_files/plots/test_{model}model_{read_data}data_{delay}h_delay_{dist}Mpc_{optical_string}band_{uv_string}band.png'
fig.savefig(print_string)
print(f'saved in {print_string}')
########## NESTED SAMPLER #########
elif method == 'sample':
try:
os.mkdir(folderstring)
print(f'Created directory: {folderstring}')
except:
print(f'Opened directory: {folderstring}')
if not os.path.exists(folderstring+f'/{filestring}_results'):
with open(folderstring+f'/{filestring}_priorlims','wb') as prior_limits :
pickle.dump(limits, prior_limits)
with open(folderstring+f'/{filestring}_parameters','wb') as input_parameters :
pickle.dump(parameters,input_parameters)
try:
priortransform
except NameError:
sampler, previous_dlogz = getSampler(ndim, folderstring, filestring, parallel=parallel, sample=sample, resume_previous=resume_previous)
priortransform=sampler.prior_transform.func
loglikelihood=sampler.loglikelihood.func
else:
sampler, previous_dlogz = getSampler(ndim, folderstring, filestring, loglikelihood=loglikelihood, priortransform=priortransform, parallel=parallel, sample=sample, resume_previous=resume_previous)
wrappedSampler(sampler, folderstring, filestring, previous_dlogz=previous_dlogz, sample=sample, save_after_seconds=save_after_seconds, print_progress=print_progress, parallel=parallel, dlogz_threshold=dlogz_threshold)
else:
print(f'{filestring}_results already exists, skipping...')
| [
"os.path.exists",
"parameters.getParameters",
"dynesty_sampler.getSampler",
"dynesty_sampler.wrappedSampler",
"numpy.log",
"dynesty_sampler.find",
"numpy.array",
"dill.dump",
"numpy.ndarray",
"os.mkdir",
"synphot.SpectralElement.from_file",
"produce_lightcurve.Lightcurve",
"matplotlib.pyplot... | [((331, 552), 'parameters.getParameters', 'getParameters', ([], {'osargs_list': "['read_data', 'model', 'delay', 'dist', 'include_optical', 'include_uv',\n 'print_progress', 'method', 'resume_previous', 'sample',\n 'save_after_seconds', 'parallel', 'dlogz_threshold']"}), "(osargs_list=['read_data', 'model', 'delay', 'dist',\n 'include_optical', 'include_uv', 'print_progress', 'method',\n 'resume_previous', 'sample', 'save_after_seconds', 'parallel',\n 'dlogz_threshold'])\n", (344, 552), False, 'from parameters import getParameters\n'), ((4007, 4053), 'produce_lightcurve.Lightcurve', 'Lightcurve', (['distance'], {'heating_function': 'heating'}), '(distance, heating_function=heating)\n', (4017, 4053), False, 'from produce_lightcurve import Lightcurve\n'), ((2943, 2958), 'dill.load', 'pickle.load', (['tf'], {}), '(tf)\n', (2954, 2958), True, 'import dill as pickle\n'), ((3138, 3173), 'numpy.ndarray', 'np.ndarray', (['(ndim, 2)'], {'dtype': 'object'}), '((ndim, 2), dtype=object)\n', (3148, 3173), True, 'import numpy as np\n'), ((8319, 8333), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (8331, 8333), True, 'import matplotlib.pyplot as plt\n'), ((3281, 3349), 'numpy.array', 'np.array', (["([0.05, 0.2], ['vmin', 'vmax'], [0.21, 0.8])"], {'dtype': 'object'}), "(([0.05, 0.2], ['vmin', 'vmax'], [0.21, 0.8]), dtype=object)\n", (3289, 3349), True, 'import numpy as np\n'), ((3392, 3424), 'numpy.array', 'np.array', (['([1, 10], [0.01, 0.1])'], {}), '(([1, 10], [0.01, 0.1]))\n', (3400, 3424), True, 'import numpy as np\n'), ((3783, 3829), 'numpy.array', 'np.array', (['([1, 10], [0.5, 5], [1, 3], [1, 10])'], {}), '(([1, 10], [0.5, 5], [1, 3], [1, 10]))\n', (3791, 3829), True, 'import numpy as np\n'), ((8505, 8537), 'os.mkdir', 'os.mkdir', (['"""./output_files/plots"""'], {}), "('./output_files/plots')\n", (8513, 8537), False, 'import os\n'), ((2536, 2607), 'synphot.SpectralElement.from_file', 'sp.SpectralElement.from_file', (['f"""input_files/bands/SLOAN_SDSS.{key}.dat"""'], {}), "(f'input_files/bands/SLOAN_SDSS.{key}.dat')\n", (2564, 2607), True, 'import synphot as sp\n'), ((2677, 2765), 'synphot.SpectralElement.from_file', 'sp.SpectralElement.from_file', (['f"""input_files/bands/SLOAN_SDSS.{include_optical}.dat"""'], {}), "(\n f'input_files/bands/SLOAN_SDSS.{include_optical}.dat')\n", (2705, 2765), True, 'import synphot as sp\n'), ((3496, 3563), 'numpy.array', 'np.array', (["([0.05, 0.2], ['vmin', 'vmax'], [0.3, 0.8])"], {'dtype': 'object'}), "(([0.05, 0.2], ['vmin', 'vmax'], [0.3, 0.8]), dtype=object)\n", (3504, 3563), True, 'import numpy as np\n'), ((3606, 3635), 'numpy.array', 'np.array', (['([1, 10], [0.1, 1])'], {}), '(([1, 10], [0.1, 1]))\n', (3614, 3635), True, 'import numpy as np\n'), ((4422, 4473), 'dynesty_sampler.find', 'find', (["(filestring + '_sampler_dlogz=*')", 'folderstring'], {}), "(filestring + '_sampler_dlogz=*', folderstring)\n", (4426, 4473), False, 'from dynesty_sampler import getSampler, wrappedSampler, find\n'), ((5137, 5203), 'numpy.array', 'np.array', (['[mass, v_min, v_k, v_max, opacities[0], opacities[1], n]'], {}), '([mass, v_min, v_k, v_max, opacities[0], opacities[1], n])\n', (5145, 5203), True, 'import numpy as np\n'), ((6832, 6878), 'numpy.array', 'np.array', (['(0.05, 0.1, 0.2, 0.4, 3.0, 0.5, 4.5)'], {}), '((0.05, 0.1, 0.2, 0.4, 3.0, 0.5, 4.5))\n', (6840, 6878), True, 'import numpy as np\n'), ((8931, 8953), 'os.mkdir', 'os.mkdir', (['folderstring'], {}), '(folderstring)\n', (8939, 8953), False, 'import os\n'), ((9080, 9135), 'os.path.exists', 'os.path.exists', (["(folderstring + f'/{filestring}_results')"], {}), "(folderstring + f'/{filestring}_results')\n", (9094, 9135), False, 'import os\n'), ((9956, 10187), 'dynesty_sampler.wrappedSampler', 'wrappedSampler', (['sampler', 'folderstring', 'filestring'], {'previous_dlogz': 'previous_dlogz', 'sample': 'sample', 'save_after_seconds': 'save_after_seconds', 'print_progress': 'print_progress', 'parallel': 'parallel', 'dlogz_threshold': 'dlogz_threshold'}), '(sampler, folderstring, filestring, previous_dlogz=\n previous_dlogz, sample=sample, save_after_seconds=save_after_seconds,\n print_progress=print_progress, parallel=parallel, dlogz_threshold=\n dlogz_threshold)\n', (9970, 10187), False, 'from dynesty_sampler import getSampler, wrappedSampler, find\n'), ((6935, 6983), 'numpy.array', 'np.array', (['(0.05, 0.1, 0.2, 0.23, 3.0, 0.04, 4.5)'], {}), '((0.05, 0.1, 0.2, 0.23, 3.0, 0.04, 4.5))\n', (6943, 6983), True, 'import numpy as np\n'), ((9229, 9262), 'dill.dump', 'pickle.dump', (['limits', 'prior_limits'], {}), '(limits, prior_limits)\n', (9240, 9262), True, 'import dill as pickle\n'), ((9362, 9403), 'dill.dump', 'pickle.dump', (['parameters', 'input_parameters'], {}), '(parameters, input_parameters)\n', (9373, 9403), True, 'import dill as pickle\n'), ((9778, 9955), 'dynesty_sampler.getSampler', 'getSampler', (['ndim', 'folderstring', 'filestring'], {'loglikelihood': 'loglikelihood', 'priortransform': 'priortransform', 'parallel': 'parallel', 'sample': 'sample', 'resume_previous': 'resume_previous'}), '(ndim, folderstring, filestring, loglikelihood=loglikelihood,\n priortransform=priortransform, parallel=parallel, sample=sample,\n resume_previous=resume_previous)\n', (9788, 9955), False, 'from dynesty_sampler import getSampler, wrappedSampler, find\n'), ((7382, 7422), 'numpy.array', 'np.array', (['(theta[1], theta[2], theta[3])'], {}), '((theta[1], theta[2], theta[3]))\n', (7390, 7422), True, 'import numpy as np\n'), ((9507, 9620), 'dynesty_sampler.getSampler', 'getSampler', (['ndim', 'folderstring', 'filestring'], {'parallel': 'parallel', 'sample': 'sample', 'resume_previous': 'resume_previous'}), '(ndim, folderstring, filestring, parallel=parallel, sample=sample,\n resume_previous=resume_previous)\n', (9517, 9620), False, 'from dynesty_sampler import getSampler, wrappedSampler, find\n'), ((5346, 5386), 'numpy.array', 'np.array', (['(theta[1], theta[2], theta[3])'], {}), '((theta[1], theta[2], theta[3]))\n', (5354, 5386), True, 'import numpy as np\n'), ((7430, 7460), 'numpy.array', 'np.array', (['(theta[4], theta[5])'], {}), '((theta[4], theta[5]))\n', (7438, 7460), True, 'import numpy as np\n'), ((5394, 5424), 'numpy.array', 'np.array', (['(theta[4], theta[5])'], {}), '((theta[4], theta[5]))\n', (5402, 5424), True, 'import numpy as np\n'), ((5790, 5822), 'numpy.log', 'np.log', (['(2 * np.pi * sigmas2[key])'], {}), '(2 * np.pi * sigmas2[key])\n', (5796, 5822), True, 'import numpy as np\n'), ((6378, 6410), 'numpy.log', 'np.log', (['(2 * np.pi * sigmas2[key])'], {}), '(2 * np.pi * sigmas2[key])\n', (6384, 6410), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from tools import get_array, dags
data = dags[0]
find_or_add_index = get_array(data, "find_or_add index")
find_or_add_add = get_array(data, "find_or_add add")
find_or_add_level = get_array(data, "find_or_add level")
plt.xlabel("index")
plt.ylabel("num")
indices_add = find_or_add_add == 1
indices_noadd = find_or_add_add == 0
print("Num add: ", np.sum(indices_add))
print("Num no add: ", np.sum(indices_noadd))
assert np.sum(indices_add) + np.sum(indices_noadd) == len(find_or_add_index)
plot_index = 0
for level in range(32):
level_indices = level == find_or_add_level
if np.sum(level_indices) == 0:
continue
plot_index += 1
plt.subplot(2, 3, plot_index)
plt.title("level = " + str(level))
indices = find_or_add_index[np.logical_and(indices_noadd, level_indices)]
plt.hist(indices, color="blue", bins=range(0, int(np.max(indices) + 1)))
plt.show()
| [
"tools.get_array",
"numpy.logical_and",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.max",
"numpy.sum",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
] | [((122, 158), 'tools.get_array', 'get_array', (['data', '"""find_or_add index"""'], {}), "(data, 'find_or_add index')\n", (131, 158), False, 'from tools import get_array, dags\n'), ((177, 211), 'tools.get_array', 'get_array', (['data', '"""find_or_add add"""'], {}), "(data, 'find_or_add add')\n", (186, 211), False, 'from tools import get_array, dags\n'), ((232, 268), 'tools.get_array', 'get_array', (['data', '"""find_or_add level"""'], {}), "(data, 'find_or_add level')\n", (241, 268), False, 'from tools import get_array, dags\n'), ((270, 289), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""index"""'], {}), "('index')\n", (280, 289), True, 'import matplotlib.pyplot as plt\n'), ((290, 307), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""num"""'], {}), "('num')\n", (300, 307), True, 'import matplotlib.pyplot as plt\n'), ((933, 943), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (941, 943), True, 'import matplotlib.pyplot as plt\n'), ((401, 420), 'numpy.sum', 'np.sum', (['indices_add'], {}), '(indices_add)\n', (407, 420), True, 'import numpy as np\n'), ((444, 465), 'numpy.sum', 'np.sum', (['indices_noadd'], {}), '(indices_noadd)\n', (450, 465), True, 'import numpy as np\n'), ((709, 738), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', 'plot_index'], {}), '(2, 3, plot_index)\n', (720, 738), True, 'import matplotlib.pyplot as plt\n'), ((475, 494), 'numpy.sum', 'np.sum', (['indices_add'], {}), '(indices_add)\n', (481, 494), True, 'import numpy as np\n'), ((497, 518), 'numpy.sum', 'np.sum', (['indices_noadd'], {}), '(indices_noadd)\n', (503, 518), True, 'import numpy as np\n'), ((639, 660), 'numpy.sum', 'np.sum', (['level_indices'], {}), '(level_indices)\n', (645, 660), True, 'import numpy as np\n'), ((810, 854), 'numpy.logical_and', 'np.logical_and', (['indices_noadd', 'level_indices'], {}), '(indices_noadd, level_indices)\n', (824, 854), True, 'import numpy as np\n'), ((910, 925), 'numpy.max', 'np.max', (['indices'], {}), '(indices)\n', (916, 925), True, 'import numpy as np\n')] |
from .matrix import Matrix
from . import constant
import numpy as np
from typing import List
class Key(Matrix):
def __init__(self, array: np.ndarray) -> None:
self._state = super().__init__(array)
def a_key_schedule(round_key: Key, round: int) -> Key:
""" Generate new Roundkey from old one"""
new_round_key = np.empty([4, 4], dtype="uint8")
for row in range(4):
new_round_key[row,0] = constant.S_BOX[round_key.array[row-3,3]] ^ round_key.array[row,0] ^ constant.R_CON[row,round-1]
for column in range(1,4):
new_round_key[row,column] = round_key.array[row,column] ^ new_round_key[row,column-1]
return Key(new_round_key)
def key_schedule(cipherkey: Key, amount_of_round: int) -> List[Key]:
""" Generate a list of Roundkey """
keys = {
"cipherkey" : cipherkey,
1 : a_key_schedule(cipherkey, 1)
}
for round in range(2,amount_of_round+1):
keys[round] = a_key_schedule(keys[round-1], round)
return keys
| [
"numpy.empty"
] | [((334, 365), 'numpy.empty', 'np.empty', (['[4, 4]'], {'dtype': '"""uint8"""'}), "([4, 4], dtype='uint8')\n", (342, 365), True, 'import numpy as np\n')] |
import time
import os
from torch.autograd import Variable
import torch
import numpy
import networks
import shutil
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
torch.backends.cudnn.benchmark = True
with open('process_info.txt', 'r') as file:
process_info = file.read()
process_info = eval(process_info)
os.chdir(process_info['dain_folder'])
sf_length = len(str(process_info['sf'] - 1))
model = networks.__dict__[process_info['net_name']](
channel=3,
filter_size=4,
timestep=1 / process_info['sf'],
training=False).cuda()
model_path = process_info['model_path']
pretrained_dict = torch.load(model_path)
model_dict = model.state_dict()
# 1. filter out unnecessary keys
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
# 2. overwrite entries in the existing state dict
model_dict.update(pretrained_dict)
# 3. load the new state dict
model.load_state_dict(model_dict)
# 4. release the pretrained dict for saving memory
pretrained_dict = []
model = model.eval() # deploy mode
timestep = 1/process_info['sf']
time_offsets = [kk * timestep for kk in range(1, int(1.0 / timestep))]
torch.set_grad_enabled(False)
input_files = process_info['frames_to_process']
loop_timer = []
try:
for _ in range(len(input_files) - 1):
start_time = time.time()
filename_frame_1 = f'{process_info["temp_folder"]}/in/{input_files[_]}'
filename_frame_2 = f'{process_info["temp_folder"]}/in/{input_files[_ + 1]}'
# X0 = torch.from_numpy(numpy.transpose(numpy.load(filename_frame_1)['arr_0'], (2, 0, 1))[0:3].astype("float32") / 255.0).type(torch.cuda.FloatTensor)
# X1 = torch.from_numpy(numpy.transpose(numpy.load(filename_frame_2)['arr_0'], (2, 0, 1))[0:3].astype("float32") / 255.0).type(torch.cuda.FloatTensor)
X0 = torch.cuda.FloatTensor(numpy.load(filename_frame_1)['arr_0'])[:, :, :3].permute(2, 0, 1) / 255
X1 = torch.cuda.FloatTensor(numpy.load(filename_frame_2)['arr_0'])[:, :, :3].permute(2, 0, 1) / 255
assert (X0.size() == X1.size())
intWidth = X0.size(2)
intHeight = X0.size(1)
channels = X0.size(0)
if not channels == 3:
print(f"Skipping {filename_frame_1}-{filename_frame_2} -- expected 3 color channels but found {channels}.")
continue
if intWidth != ((intWidth >> 7) << 7):
intWidth_pad = (((intWidth >> 7) + 1) << 7) # more than necessary
intPaddingLeft = int((intWidth_pad - intWidth) / 2)
intPaddingRight = intWidth_pad - intWidth - intPaddingLeft
else:
intPaddingLeft = 32
intPaddingRight = 32
if intHeight != ((intHeight >> 7) << 7):
intHeight_pad = (((intHeight >> 7) + 1) << 7) # more than necessary
intPaddingTop = int((intHeight_pad - intHeight) / 2)
intPaddingBottom = intHeight_pad - intHeight - intPaddingTop
else:
intPaddingTop = 32
intPaddingBottom = 32
pader = torch.nn.ReplicationPad2d([intPaddingLeft, intPaddingRight, intPaddingTop, intPaddingBottom])
X0 = Variable(torch.unsqueeze(X0, 0))
X1 = Variable(torch.unsqueeze(X1, 0))
X0 = pader(X0)
X1 = pader(X1)
y_s, offset, filter = model(torch.stack((X0, X1), dim=0))
y_ = y_s[process_info['save_which']]
X0 = X0.data.cpu().numpy()
if not isinstance(y_, list):
y_ = [y_.data.cpu().numpy()]
else:
y_ = [item.data.cpu().numpy() for item in y_]
offset = [offset_i.data.cpu().numpy() for offset_i in offset]
filter = [filter_i.data.cpu().numpy() for filter_i in filter] if filter[0] is not None else None
X1 = X1.data.cpu().numpy()
X0 = numpy.transpose(255.0 * X0.clip(0, 1.0)[0, :, intPaddingTop:intPaddingTop + intHeight, intPaddingLeft: intPaddingLeft + intWidth], (1, 2, 0))
y_ = [numpy.transpose(255.0 * item.clip(0, 1.0)[0, :, intPaddingTop:intPaddingTop + intHeight,intPaddingLeft:intPaddingLeft + intWidth], (1, 2, 0)) for item in y_]
offset = [numpy.transpose(offset_i[0, :, intPaddingTop:intPaddingTop + intHeight, intPaddingLeft: intPaddingLeft + intWidth], (1, 2, 0)) for offset_i in offset]
filter = [numpy.transpose(filter_i[0, :, intPaddingTop:intPaddingTop + intHeight, intPaddingLeft: intPaddingLeft + intWidth], (1, 2, 0)) for filter_i in filter] if filter is not None else None
X1 = numpy.transpose(255.0 * X1.clip(0, 1.0)[0, :, intPaddingTop:intPaddingTop + intHeight, intPaddingLeft: intPaddingLeft + intWidth], (1, 2, 0))
interpolated_frame_number = 0
shutil.copy(filename_frame_1, f'{process_info["temp_folder"]}/out/{input_files[_].replace(".npz", "")}_{"0".zfill(sf_length)}.npz')
for item, time_offset in zip(y_, time_offsets):
interpolated_frame_number += 1
output_frame_file_path = f'{process_info["temp_folder"]}/out/{input_files[_].replace(".npz", "")}_{str(interpolated_frame_number).zfill(sf_length)}'
numpy.savez_compressed(output_frame_file_path, numpy.round(item).astype('uint8'))
end_time = time.time()
time_spent = end_time - start_time
if _ == 0:
frame_count_len = len(str(len(input_files)))
print(f"****** Initialized model and processed frame {'1'.zfill(frame_count_len)} | Time spent: {round(time_spent, 2)}s ******************")
else:
if _ == 1:
len_time_spent = len(str(round(time_spent))) + 5
loop_timer.append(time_spent)
frames_left = len(input_files) - _ - 2
estimated_seconds_left = round(frames_left * sum(loop_timer)/len(loop_timer), 2)
m, s = divmod(estimated_seconds_left, 60)
h, m = divmod(m, 60)
estimated_time_left = "%d:%02d:%02d" % (h, m, s)
print('\r'+f"****** Processed frame {str(_+1).zfill(frame_count_len)} | Time spent: {(str(round(time_spent, 2)) + 's').ljust(len_time_spent)} | Time left: {estimated_time_left} ******************", end='', flush=True)
print("\nFinished processing images.")
except KeyboardInterrupt:
exit(1)
| [
"torch.unsqueeze",
"torch.load",
"torch.stack",
"os.chdir",
"torch.nn.ReplicationPad2d",
"torch.set_grad_enabled",
"numpy.transpose",
"time.time",
"warnings.filterwarnings",
"numpy.round",
"numpy.load"
] | [((131, 186), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'UserWarning'}), "('ignore', category=UserWarning)\n", (154, 186), False, 'import warnings\n'), ((339, 376), 'os.chdir', 'os.chdir', (["process_info['dain_folder']"], {}), "(process_info['dain_folder'])\n", (347, 376), False, 'import os\n'), ((634, 656), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (644, 656), False, 'import torch\n'), ((1165, 1194), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (1187, 1194), False, 'import torch\n'), ((1329, 1340), 'time.time', 'time.time', ([], {}), '()\n', (1338, 1340), False, 'import time\n'), ((3051, 3148), 'torch.nn.ReplicationPad2d', 'torch.nn.ReplicationPad2d', (['[intPaddingLeft, intPaddingRight, intPaddingTop, intPaddingBottom]'], {}), '([intPaddingLeft, intPaddingRight, intPaddingTop,\n intPaddingBottom])\n', (3076, 3148), False, 'import torch\n'), ((5198, 5209), 'time.time', 'time.time', ([], {}), '()\n', (5207, 5209), False, 'import time\n'), ((3168, 3190), 'torch.unsqueeze', 'torch.unsqueeze', (['X0', '(0)'], {}), '(X0, 0)\n', (3183, 3190), False, 'import torch\n'), ((3214, 3236), 'torch.unsqueeze', 'torch.unsqueeze', (['X1', '(0)'], {}), '(X1, 0)\n', (3229, 3236), False, 'import torch\n'), ((3321, 3349), 'torch.stack', 'torch.stack', (['(X0, X1)'], {'dim': '(0)'}), '((X0, X1), dim=0)\n', (3332, 3349), False, 'import torch\n'), ((4138, 4267), 'numpy.transpose', 'numpy.transpose', (['offset_i[0, :, intPaddingTop:intPaddingTop + intHeight, intPaddingLeft:\n intPaddingLeft + intWidth]', '(1, 2, 0)'], {}), '(offset_i[0, :, intPaddingTop:intPaddingTop + intHeight,\n intPaddingLeft:intPaddingLeft + intWidth], (1, 2, 0))\n', (4153, 4267), False, 'import numpy\n'), ((4307, 4436), 'numpy.transpose', 'numpy.transpose', (['filter_i[0, :, intPaddingTop:intPaddingTop + intHeight, intPaddingLeft:\n intPaddingLeft + intWidth]', '(1, 2, 0)'], {}), '(filter_i[0, :, intPaddingTop:intPaddingTop + intHeight,\n intPaddingLeft:intPaddingLeft + intWidth], (1, 2, 0))\n', (4322, 4436), False, 'import numpy\n'), ((5143, 5160), 'numpy.round', 'numpy.round', (['item'], {}), '(item)\n', (5154, 5160), False, 'import numpy\n'), ((1861, 1889), 'numpy.load', 'numpy.load', (['filename_frame_1'], {}), '(filename_frame_1)\n', (1871, 1889), False, 'import numpy\n'), ((1969, 1997), 'numpy.load', 'numpy.load', (['filename_frame_2'], {}), '(filename_frame_2)\n', (1979, 1997), False, 'import numpy\n')] |
import re
import xml.etree.ElementTree as ET
from os import path
from collections import Counter
import argparse
import csv
import json
import os
import random
# package local imports
import sys
import uuid
import matplotlib.pyplot as plt
import math
from dateutil.parser import parse
from tdigest import TDigest
import numpy as np
import boto3
from tqdm import tqdm
# package local imports
sys.path.append(os.getcwd() + "/..")
field_tokenization = ",.<>{}[]\"':;!@#$%^&*()-+=~"
from common_datagen import (
download_url,
generate_setup_json,
compress_files,
generate_inputs_dict_item,
humanized_bytes,
del_non_use_case_specific_keys,
add_key_metric,
upload_dataset_artifacts_s3,
add_deployment_requirements_redis_server_module,
add_deployment_requirements_benchmark_tool,
add_deployment_requirements_utilities,
init_deployment_requirement,
remove_file_if_exists,
decompress_file,
)
from tqdm import tqdm
from pathlib import Path
origin = "https://dumps.wikimedia.org/enwiki/20210501/enwiki-20210501-pages-articles1.xml-p1p41242.bz2"
filename = "enwiki-20210501-pages-articles1.xml-p1p41242.bz2"
decompressed_fname = "enwiki-20210501-pages-articles1.xml-p1p41242"
def generate_enwiki_pages_index_type():
types = {}
for f in ["title", "text", "comment"]:
types[f] = "text"
for f in ["username"]:
types[f] = "tag"
for f in ["timestamp"]:
types[f] = "numeric"
return types
def generate_lognormal_dist(n_elements):
mu, sigma = 0.0, 1
s = np.random.lognormal(mu, sigma, n_elements)
min_s = min(s)
max_s = max(s)
diff = max_s - min_s
s = s - min_s
s = s / diff
return s
def generate_ft_create_row(index, index_types, use_ftadd, no_index_list):
if use_ftadd:
cmd = ['"FT.CREATE"', '"{index}"'.format(index=index), '"SCHEMA"']
else:
cmd = [
'"FT.CREATE"',
'"{index}"'.format(index=index),
'"ON"',
'"HASH"',
'"SCHEMA"',
]
for f, v in index_types.items():
cmd.append('"{}"'.format(f))
cmd.append('"{}"'.format(v))
if f in no_index_list:
cmd.append('"NOINDEX"')
else:
cmd.append("SORTABLE")
cmd.append('"SORTABLE"')
return cmd
def generate_ft_drop_row(index):
cmd = ["FT.DROP", "{index}".format(index=index), "DD"]
return cmd
def EscapeTextFileString(field):
for char_escape in field_tokenization:
field = field.replace(char_escape, "\\{}".format(char_escape))
field = field.replace("\n", " \\n")
return field
def use_case_to_cmd(use_ftadd, title, text, comment, username, timestamp, total_docs):
escaped_title = EscapeTextFileString(title)
escaped_text = EscapeTextFileString(text)
escaped_comment = EscapeTextFileString(comment)
size = len(escaped_title) + len(escaped_text) + len(escaped_comment) + len(username)
unprunned_hash = {
"title": title,
"text": text,
"comment": comment,
"username": username,
"timestamp": timestamp,
}
# print(len(text),size)
hash = {
"title": escaped_title,
"text": escaped_text,
"comment": escaped_comment,
"username": username,
"timestamp": timestamp,
}
docid_str = "doc:{hash}:{n}".format(hash=uuid.uuid4().hex, n=total_docs)
fields = []
for f, v in hash.items():
if v is not None:
fields.append(f)
fields.append(v)
if use_ftadd is False:
cmd = ["WRITE", "W1", 1, "HSET", docid_str]
else:
cmd = ["WRITE", "W1", 2, "FT.ADD", indexname, docid_str, "1.0", "FIELDS"]
for x in fields:
cmd.append(x)
return cmd, size
def getQueryWords(doc, stop_words, size):
words = doc["comment"]
words = re.sub("[^0-9a-zA-Z]+", " ", words)
words = words.split(" ")
queryWords = []
totalQueryWords = 0
for word in words:
word = word.lstrip().rstrip()
if len(word) > 3 and word not in stop_words and word != "Wikipedia":
queryWords.append(word)
totalQueryWords = totalQueryWords + 1
if totalQueryWords > size:
break
return queryWords, totalQueryWords
def generate_benchmark_commands(
total_benchmark_commands,
bench_fname,
all_fname,
indexname,
docs,
stop_words,
use_numeric_range_searchs,
ts_digest,
p_writes,
query_choices,
):
total_benchmark_reads = 0
total_benchmark_writes = 0
all_csvfile = open(all_fname, "a", newline="")
bench_csvfile = open(bench_fname, "w", newline="")
all_csv_writer = csv.writer(all_csvfile, delimiter=",", quoting=csv.QUOTE_ALL)
bench_csv_writer = csv.writer(bench_csvfile, delimiter=",", quoting=csv.QUOTE_ALL)
progress = tqdm(unit="docs", total=total_benchmark_commands)
total_docs = len(docs)
## timestamp related
timestamps_pdist = generate_lognormal_dist(total_benchmark_commands)
min_ts = ts_digest.percentile(0.0)
max_ts = ts_digest.percentile(100.0)
query_range_digest = TDigest()
generated_commands = 0
while generated_commands < total_benchmark_commands:
query_ts_pdist = timestamps_pdist[generated_commands]
percentile = (1.0 - query_ts_pdist) * 100.0
query_min_ts = ts_digest.percentile(percentile)
random_doc_pos = random.randint(0, total_docs - 1)
doc = docs[random_doc_pos]
# decide read or write
p_cmd = random.random()
if p_cmd < p_writes:
## WRITE
total_benchmark_writes = total_benchmark_writes + 1
generated_row, doc_size = use_case_to_cmd(
use_ftadd,
doc["title"],
doc["text"],
doc["comment"],
doc["username"],
doc["timestamp"],
generated_commands,
)
else:
## READ
total_benchmark_reads = total_benchmark_reads + 1
words, totalW = getQueryWords(doc, stop_words, 2)
choice = random.choices(query_choices)[0]
generated_row = None
numeric_range_str = ""
if use_numeric_range_searchs:
numeric_range_str = "@timestamp:[{} {}] ".format(query_min_ts, max_ts)
query_range_digest.update(int(max_ts - query_min_ts))
if choice == "simple-1word-query" and len(words) >= 1:
generated_row = generate_ft_search_row(
indexname,
"simple-1word-query",
"{}{}".format(numeric_range_str, words[0]),
)
elif choice == "2word-union-query" and len(words) >= 2:
generated_row = generate_ft_search_row(
indexname,
"2word-union-query",
"{}{} {}".format(numeric_range_str, words[0], words[1]),
)
elif choice == "2word-intersection-query" and len(words) >= 2:
generated_row = generate_ft_search_row(
indexname,
"2word-intersection-query",
"{}{}|{}".format(numeric_range_str, words[0], words[1]),
)
if generated_row != None:
# all_csv_writer.writerow(generated_row)
# bench_csv_writer.writerow(generated_row)
progress.update()
generated_commands = generated_commands + 1
progress.close()
bench_csvfile.close()
all_csvfile.close()
# print()
xx = []
yy = []
p90 = query_range_digest.percentile(90.0)
dataset_percent = ts_digest.cdf(p90)
print(
"90% of the read queries target at max {} percent o keyspace".format(
dataset_percent
)
)
print(
"100% of the read queries target at max {} percent o keyspace".format(
ts_digest.cdf(max_ts - min_ts)
)
)
for centroid in query_range_digest.centroids_to_list():
ts_m = centroid["m"]
xx.append(ts_m)
yy.append(query_range_digest.cdf(ts_m))
plt.scatter(xx, yy)
plt.title("EnWiki pages Query time range")
plt.xlabel("Query time range")
plt.ylabel("cdf")
plt.xscale("log")
plt.show()
return total_benchmark_reads, total_benchmark_writes
def generate_ft_search_row(index, query_name, query):
cmd = [
"READ",
query_name,
1,
"FT.SEARCH",
"{index}".format(index=index),
"{query}".format(query=query),
]
return cmd
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="RediSearch FTSB data generator.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--project", type=str, default="redisearch", help="the project being tested"
)
parser.add_argument(
"--seed",
type=int,
default=12345,
help="the random seed used to generate random deterministic outputs",
)
parser.add_argument(
"--read-ratio", type=int, default=10, help="query time read ratio"
)
parser.add_argument(
"--write-ratio", type=int, default=1, help="query time write ratio"
)
parser.add_argument(
"--min-doc-len",
type=int,
default=1024,
help="Discard any generated document bellow the specified value",
)
parser.add_argument(
"--doc-limit",
type=int,
default=100000,
help="the total documents to generate to be added in the setup stage",
)
parser.add_argument(
"--total-benchmark-commands",
type=int,
default=100000,
help="the total commands to generate to be issued in the benchmark stage",
)
parser.add_argument(
"--stop-words",
type=str,
default="a,is,the,an,and,are,as,at,be,but,by,for,if,in,into,it,no,not,of,on,or,such,that,their,then,there,these,they,this,to,was,will,with",
help="When searching, stop-words are ignored and treated as if they were not sent to the query processor. Therefore, to be 100% correct we need to prevent those words to enter a query",
)
parser.add_argument(
"--index-name",
type=str,
default="enwiki_pages",
help="the name of the RediSearch index to be used",
)
parser.add_argument(
"--test-name",
type=str,
default="100K-enwiki_pages-hashes",
help="the name of the test",
)
parser.add_argument(
"--test-description",
type=str,
default="benchmark focused on full text search queries performance, making usage of English-language Wikipedia:Database page revisions",
help="the full description of the test",
)
parser.add_argument(
"--upload-artifacts-s3",
default=False,
action="store_true",
help="uploads the generated dataset files and configuration file to public benchmarks.redislabs bucket. Proper credentials are required",
)
parser.add_argument(
"--use-ftadd",
default=False,
action="store_true",
help="Use FT.ADD instead of HSET",
)
parser.add_argument(
"--query-use-ts-numeric-range-filter",
default=False,
action="store_true",
help="Use a numeric range filter on queries to simulate searchs that imply a log-normal keyspace access (very hot data and some cold data)",
)
parser.add_argument(
"--big-text-field-noindex",
default=False,
action="store_true",
help="On index creation mark the largest text field as no index. If a field has NOINDEX and doesn't have SORTABLE, it will just be ignored by the index. This is usefull to test RoF for example.",
)
parser.add_argument(
"--temporary-work-dir",
type=str,
default="./tmp",
help="The temporary dir to use as working directory for file download, compression,etc... ",
)
parser.add_argument(
"--query-choices",
type=str,
default="simple-1word-query,2word-union-query,2word-intersection-query",
help="comma separated list of queries to produce. one of: simple-1word-query,2word-union-query,2word-intersection-query",
)
parser.add_argument(
"--upload-artifacts-s3-uncompressed",
action="store_true",
help="uploads the generated dataset files and configuration file to public benchmarks.redislabs bucket. Proper credentials are required",
)
args = parser.parse_args()
query_choices = args.query_choices.split(",")
use_case_specific_arguments = del_non_use_case_specific_keys(dict(args.__dict__))
# generate the temporary working dir if required
working_dir = args.temporary_work_dir
Path(working_dir).mkdir(parents=True, exist_ok=True)
seed = args.seed
project = args.project
doc_limit = args.doc_limit
stop_words = args.stop_words.split(",")
indexname = args.index_name
test_name = args.test_name
use_numeric_range_searchs = args.query_use_ts_numeric_range_filter
no_index_list = []
big_text_field_noindex = args.big_text_field_noindex
if big_text_field_noindex:
test_name += "-big-text-field-noindex"
no_index_list = ["text"]
if use_numeric_range_searchs:
test_name += "-lognormal-numeric-range-searchs"
min_doc_len = args.min_doc_len
description = args.test_description
s3_bucket_name = "benchmarks.redislabs"
s3_bucket_path = "redisearch/datasets/{}/".format(test_name)
s3_uri = "https://s3.amazonaws.com/{bucket_name}/{bucket_path}".format(
bucket_name=s3_bucket_name, bucket_path=s3_bucket_path
)
benchmark_output_file = "{test_name}.{project}.commands".format(
test_name=test_name, project=project
)
benchmark_config_file = "{test_name}.{project}.cfg.json".format(
test_name=test_name, project=project
)
all_fname = "{}.ALL.csv".format(benchmark_output_file)
all_fname_compressed = "{}.ALL.tar.gz".format(benchmark_output_file)
all_fname = "{}.ALL.csv".format(benchmark_output_file)
setup_fname = "{}.SETUP.csv".format(benchmark_output_file)
bench_fname = "{}.BENCH.QUERY_{}_write_{}_to_read_{}.csv".format(
benchmark_output_file,
"__".join(query_choices),
args.write_ratio,
args.read_ratio,
)
all_fname_compressed = "{}.ALL.tar.gz".format(benchmark_output_file)
setup_fname_compressed = "{}.SETUP.tar.gz".format(benchmark_output_file)
bench_fname_compressed = "{}.BENCH.tar.gz".format(benchmark_output_file)
remote_url_all = "{}{}".format(s3_uri, all_fname_compressed)
remote_url_setup = "{}{}".format(s3_uri, setup_fname_compressed)
remote_url_bench = "{}{}".format(s3_uri, bench_fname_compressed)
## remove previous files if they exist
all_artifacts = [
all_fname,
setup_fname,
bench_fname,
all_fname_compressed,
setup_fname_compressed,
bench_fname_compressed,
benchmark_config_file,
]
for artifact in all_artifacts:
remove_file_if_exists(artifact)
use_ftadd = args.use_ftadd
total_benchmark_commands = args.total_benchmark_commands
used_indices = [indexname]
setup_commands = []
teardown_commands = []
key_metrics = []
add_key_metric(
key_metrics,
"setup",
"throughput",
"OverallRates.overallOpsRate",
"Overall writes query rate",
"docs/sec",
"numeric",
"higher-better",
1,
)
add_key_metric(
key_metrics,
"setup",
"latency",
"OverallQuantiles.allCommands.q50",
"Overall writes query q50 latency",
"ms",
"numeric",
"lower-better",
2,
)
add_key_metric(
key_metrics,
"benchmark",
"throughput",
"OverallRates.overallOpsRate",
"Overall writes query rate",
"docs/sec",
"numeric",
"higher-better",
1,
)
add_key_metric(
key_metrics,
"benchmark",
"latency",
"OverallQuantiles.allCommands.q50",
"Overall writes query q50 latency",
"ms",
"numeric",
"lower-better",
2,
)
total_writes = 0
total_reads = 0
total_updates = 0
total_deletes = 0
# 1:10
p_writes = float(args.write_ratio) / (
float(args.read_ratio) + float(args.write_ratio)
)
json_version = "0.1"
benchmark_repetitions_require_teardown_and_resetup = False
print("-- Benchmark: {} -- ".format(test_name))
print("-- Description: {} -- ".format(description))
total_docs = 0
print("Using random seed {0}".format(args.seed))
random.seed(args.seed)
print("Using the following stop-words: {0}".format(stop_words))
index_types = generate_enwiki_pages_index_type()
print("-- generating the ft.create commands -- ")
ft_create_cmd = generate_ft_create_row(
indexname, index_types, use_ftadd, no_index_list
)
print("FT.CREATE command: {}".format(" ".join(ft_create_cmd)))
setup_commands.append(ft_create_cmd)
print("-- generating the ft.drop commands -- ")
ft_drop_cmd = generate_ft_drop_row(indexname)
teardown_commands.append(ft_drop_cmd)
csv_filenames = []
print(
"Retrieving the required English-language Wikipedia:Database page edition data"
)
if path.exists(filename) is False:
print("Downloading {} to {}".format(origin, filename))
download_url(origin, filename)
else:
print("{} exists, no need to download again".format(filename))
if path.exists(decompressed_fname) is False:
print("Decompressing {}".format(filename))
decompress_file(filename)
docs = []
tree = ET.iterparse(decompressed_fname)
print("Reading {}\n".format(decompressed_fname))
progress = tqdm(unit="docs")
doc = {}
text = None
comment = None
username = None
timestamp = None
ts_digest = TDigest()
for event, elem in tree:
if elem.tag == "{http://www.mediawiki.org/xml/export-0.10/}page":
doc = {}
doc["title"] = elem.findtext(
"{http://www.mediawiki.org/xml/export-0.10/}title"
)
doc["text"] = text
doc["comment"] = comment
doc["username"] = username
doc["timestamp"] = int(timestamp)
ts_digest.update(int(timestamp))
if (
doc["text"] is not None
and doc["comment"] is not None
and doc["username"] is not None
and doc["timestamp"] is not None
):
total_docs = total_docs + 1
docs.append(doc)
progress.update()
elem.clear() # won't need the children any more
if elem.tag == "{http://www.mediawiki.org/xml/export-0.10/}revision":
text = elem.findtext("{http://www.mediawiki.org/xml/export-0.10/}text")
comment = elem.findtext(
"{http://www.mediawiki.org/xml/export-0.10/}comment"
)
ts = elem.findtext("{http://www.mediawiki.org/xml/export-0.10/}timestamp")
dt = parse(ts)
timestamp = dt.timestamp()
if elem.tag == "{http://www.mediawiki.org/xml/export-0.10/}contributor":
username = elem.findtext(
"{http://www.mediawiki.org/xml/export-0.10/}username"
)
progress.close()
print("\n")
setup_csvfile = open(setup_fname, "w", newline="")
all_csvfile = open(all_fname, "a", newline="")
all_csv_writer = csv.writer(all_csvfile, delimiter=",", quoting=csv.QUOTE_ALL)
setup_csv_writer = csv.writer(setup_csvfile, delimiter=",", quoting=csv.QUOTE_ALL)
print("\n")
print("-- generating the setup commands -- \n")
progress = tqdm(unit="docs", total=args.doc_limit)
doc_limit = args.doc_limit
docs_sizes = []
total_docs = 0
if doc_limit == 0:
doc_limit = len(docs)
while total_docs < doc_limit:
random_doc_pos = random.randint(0, len(docs) - 1)
doc = docs[random_doc_pos]
cmd, doc_size = use_case_to_cmd(
use_ftadd,
doc["title"],
doc["text"],
doc["comment"],
doc["username"],
doc["timestamp"],
total_docs,
)
if doc_size >= min_doc_len:
total_docs = total_docs + 1
docs_sizes.append(doc_size)
progress.update()
# setup_csv_writer.writerow(cmd)
# all_csv_writer.writerow(cmd)
# fixed bin size
bins = np.linspace(
math.ceil(min(docs_sizes)), math.floor(max(docs_sizes)), 200
) # fixed number of bins
plt.xlim([1, max(docs_sizes) + 5])
plt.hist(docs_sizes, bins=bins, alpha=0.5)
plt.title(
"EnWiki pages document size frequency. Avg document size: {} Bytes".format(
int(np.average(docs_sizes))
)
)
plt.xlabel("Document Size in Bytes")
plt.ylabel("count")
plt.xscale("log")
plt.show()
xx = []
yy = []
for centroid in ts_digest.centroids_to_list():
# print(centroid)
ts_m = centroid["m"]
xx.append(ts_m)
yy.append(ts_digest.cdf(ts_m))
plt.scatter(xx, yy)
plt.title("EnWiki pages timestamp range")
plt.xlabel("timestamp")
plt.ylabel("cdf")
# plt.xscale('log')
plt.show()
progress.close()
all_csvfile.close()
setup_csvfile.close()
print(
"-- generating {} full text search commands -- ".format(
total_benchmark_commands
)
)
print("\t saving to {} and {}".format(bench_fname, all_fname))
total_benchmark_reads, total_benchmark_writes = generate_benchmark_commands(
total_benchmark_commands,
bench_fname,
all_fname,
indexname,
docs,
stop_words,
use_numeric_range_searchs,
ts_digest,
p_writes,
query_choices,
)
total_commands = total_docs
total_setup_commands = total_docs
cmd_category_all = {
"setup-writes": total_docs,
"writes": total_writes,
"updates": total_updates,
"reads": total_reads,
"deletes": total_deletes,
}
cmd_category_setup = {
"setup-writes": total_docs,
"writes": 0,
"updates": 0,
"reads": 0,
"deletes": 0,
}
cmd_category_benchmark = {
"setup-writes": 0,
"writes": total_benchmark_writes,
"updates": total_updates,
"reads": total_benchmark_reads,
"deletes": total_deletes,
}
status, uncompressed_size, compressed_size = compress_files(
[all_fname], all_fname_compressed
)
inputs_entry_all = generate_inputs_dict_item(
"all",
all_fname,
"contains both setup and benchmark commands",
remote_url_all,
uncompressed_size,
all_fname_compressed,
compressed_size,
total_commands,
cmd_category_all,
)
status, uncompressed_size, compressed_size = compress_files(
[setup_fname], setup_fname_compressed
)
inputs_entry_setup = generate_inputs_dict_item(
"setup",
setup_fname,
"contains only the commands required to populate the dataset",
remote_url_setup,
uncompressed_size,
setup_fname_compressed,
compressed_size,
total_setup_commands,
cmd_category_setup,
)
status, uncompressed_size, compressed_size = compress_files(
[bench_fname], bench_fname_compressed
)
inputs_entry_benchmark = generate_inputs_dict_item(
"benchmark",
bench_fname,
"contains only the benchmark commands (requires the dataset to have been previously populated)",
remote_url_bench,
uncompressed_size,
bench_fname_compressed,
compressed_size,
total_benchmark_commands,
cmd_category_benchmark,
)
inputs = {
"all": inputs_entry_all,
"setup": inputs_entry_setup,
"benchmark": inputs_entry_benchmark,
}
deployment_requirements = init_deployment_requirement()
add_deployment_requirements_redis_server_module(
deployment_requirements, "search", {}
)
add_deployment_requirements_utilities(
deployment_requirements, "ftsb_redisearch", {}
)
add_deployment_requirements_benchmark_tool(
deployment_requirements, "ftsb_redisearch"
)
run_stages = ["benchmark"]
run_stages = ["setup", "benchmark"]
with open(benchmark_config_file, "w") as setupf:
setup_json = generate_setup_json(
json_version,
project,
use_case_specific_arguments,
test_name,
description,
run_stages,
deployment_requirements,
key_metrics,
inputs,
setup_commands,
teardown_commands,
used_indices,
total_commands,
total_setup_commands,
total_benchmark_commands,
total_docs,
total_writes,
total_updates,
total_reads,
total_deletes,
benchmark_repetitions_require_teardown_and_resetup,
["setup"],
["benchmark"],
)
json.dump(setup_json, setupf, indent=2)
if args.upload_artifacts_s3:
artifacts = [
benchmark_config_file,
all_fname_compressed,
setup_fname_compressed,
bench_fname_compressed,
]
upload_dataset_artifacts_s3(s3_bucket_name, s3_bucket_path, artifacts)
if args.upload_artifacts_s3_uncompressed:
artifacts = [setup_fname, bench_fname]
upload_dataset_artifacts_s3(s3_bucket_name, s3_bucket_path, artifacts)
print("############################################")
print("All artifacts generated.")
| [
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylabel",
"numpy.random.lognormal",
"common_datagen.add_deployment_requirements_redis_server_module",
"random.choices",
"common_datagen.generate_inputs_dict_item",
"common_datagen.add_deployment_requirements_utilities",
"os.path.exists",
"tdigest.TDigest",... | [((1553, 1595), 'numpy.random.lognormal', 'np.random.lognormal', (['mu', 'sigma', 'n_elements'], {}), '(mu, sigma, n_elements)\n', (1572, 1595), True, 'import numpy as np\n'), ((3867, 3902), 're.sub', 're.sub', (['"""[^0-9a-zA-Z]+"""', '""" """', 'words'], {}), "('[^0-9a-zA-Z]+', ' ', words)\n", (3873, 3902), False, 'import re\n'), ((4701, 4762), 'csv.writer', 'csv.writer', (['all_csvfile'], {'delimiter': '""","""', 'quoting': 'csv.QUOTE_ALL'}), "(all_csvfile, delimiter=',', quoting=csv.QUOTE_ALL)\n", (4711, 4762), False, 'import csv\n'), ((4786, 4849), 'csv.writer', 'csv.writer', (['bench_csvfile'], {'delimiter': '""","""', 'quoting': 'csv.QUOTE_ALL'}), "(bench_csvfile, delimiter=',', quoting=csv.QUOTE_ALL)\n", (4796, 4849), False, 'import csv\n'), ((4865, 4914), 'tqdm.tqdm', 'tqdm', ([], {'unit': '"""docs"""', 'total': 'total_benchmark_commands'}), "(unit='docs', total=total_benchmark_commands)\n", (4869, 4914), False, 'from tqdm import tqdm\n'), ((5146, 5155), 'tdigest.TDigest', 'TDigest', ([], {}), '()\n', (5153, 5155), False, 'from tdigest import TDigest\n'), ((8229, 8248), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xx', 'yy'], {}), '(xx, yy)\n', (8240, 8248), True, 'import matplotlib.pyplot as plt\n'), ((8254, 8296), 'matplotlib.pyplot.title', 'plt.title', (['"""EnWiki pages Query time range"""'], {}), "('EnWiki pages Query time range')\n", (8263, 8296), True, 'import matplotlib.pyplot as plt\n'), ((8301, 8331), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Query time range"""'], {}), "('Query time range')\n", (8311, 8331), True, 'import matplotlib.pyplot as plt\n'), ((8336, 8353), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""cdf"""'], {}), "('cdf')\n", (8346, 8353), True, 'import matplotlib.pyplot as plt\n'), ((8358, 8375), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (8368, 8375), True, 'import matplotlib.pyplot as plt\n'), ((8380, 8390), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8388, 8390), True, 'import matplotlib.pyplot as plt\n'), ((8726, 8856), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""RediSearch FTSB data generator."""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='RediSearch FTSB data generator.',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (8749, 8856), False, 'import argparse\n'), ((15518, 15679), 'common_datagen.add_key_metric', 'add_key_metric', (['key_metrics', '"""setup"""', '"""throughput"""', '"""OverallRates.overallOpsRate"""', '"""Overall writes query rate"""', '"""docs/sec"""', '"""numeric"""', '"""higher-better"""', '(1)'], {}), "(key_metrics, 'setup', 'throughput',\n 'OverallRates.overallOpsRate', 'Overall writes query rate', 'docs/sec',\n 'numeric', 'higher-better', 1)\n", (15532, 15679), False, 'from common_datagen import download_url, generate_setup_json, compress_files, generate_inputs_dict_item, humanized_bytes, del_non_use_case_specific_keys, add_key_metric, upload_dataset_artifacts_s3, add_deployment_requirements_redis_server_module, add_deployment_requirements_benchmark_tool, add_deployment_requirements_utilities, init_deployment_requirement, remove_file_if_exists, decompress_file\n'), ((15755, 15918), 'common_datagen.add_key_metric', 'add_key_metric', (['key_metrics', '"""setup"""', '"""latency"""', '"""OverallQuantiles.allCommands.q50"""', '"""Overall writes query q50 latency"""', '"""ms"""', '"""numeric"""', '"""lower-better"""', '(2)'], {}), "(key_metrics, 'setup', 'latency',\n 'OverallQuantiles.allCommands.q50', 'Overall writes query q50 latency',\n 'ms', 'numeric', 'lower-better', 2)\n", (15769, 15918), False, 'from common_datagen import download_url, generate_setup_json, compress_files, generate_inputs_dict_item, humanized_bytes, del_non_use_case_specific_keys, add_key_metric, upload_dataset_artifacts_s3, add_deployment_requirements_redis_server_module, add_deployment_requirements_benchmark_tool, add_deployment_requirements_utilities, init_deployment_requirement, remove_file_if_exists, decompress_file\n'), ((15995, 16160), 'common_datagen.add_key_metric', 'add_key_metric', (['key_metrics', '"""benchmark"""', '"""throughput"""', '"""OverallRates.overallOpsRate"""', '"""Overall writes query rate"""', '"""docs/sec"""', '"""numeric"""', '"""higher-better"""', '(1)'], {}), "(key_metrics, 'benchmark', 'throughput',\n 'OverallRates.overallOpsRate', 'Overall writes query rate', 'docs/sec',\n 'numeric', 'higher-better', 1)\n", (16009, 16160), False, 'from common_datagen import download_url, generate_setup_json, compress_files, generate_inputs_dict_item, humanized_bytes, del_non_use_case_specific_keys, add_key_metric, upload_dataset_artifacts_s3, add_deployment_requirements_redis_server_module, add_deployment_requirements_benchmark_tool, add_deployment_requirements_utilities, init_deployment_requirement, remove_file_if_exists, decompress_file\n'), ((16236, 16403), 'common_datagen.add_key_metric', 'add_key_metric', (['key_metrics', '"""benchmark"""', '"""latency"""', '"""OverallQuantiles.allCommands.q50"""', '"""Overall writes query q50 latency"""', '"""ms"""', '"""numeric"""', '"""lower-better"""', '(2)'], {}), "(key_metrics, 'benchmark', 'latency',\n 'OverallQuantiles.allCommands.q50', 'Overall writes query q50 latency',\n 'ms', 'numeric', 'lower-better', 2)\n", (16250, 16403), False, 'from common_datagen import download_url, generate_setup_json, compress_files, generate_inputs_dict_item, humanized_bytes, del_non_use_case_specific_keys, add_key_metric, upload_dataset_artifacts_s3, add_deployment_requirements_redis_server_module, add_deployment_requirements_benchmark_tool, add_deployment_requirements_utilities, init_deployment_requirement, remove_file_if_exists, decompress_file\n'), ((16954, 16976), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (16965, 16976), False, 'import random\n'), ((18028, 18060), 'xml.etree.ElementTree.iterparse', 'ET.iterparse', (['decompressed_fname'], {}), '(decompressed_fname)\n', (18040, 18060), True, 'import xml.etree.ElementTree as ET\n'), ((18129, 18146), 'tqdm.tqdm', 'tqdm', ([], {'unit': '"""docs"""'}), "(unit='docs')\n", (18133, 18146), False, 'from tqdm import tqdm\n'), ((18253, 18262), 'tdigest.TDigest', 'TDigest', ([], {}), '()\n', (18260, 18262), False, 'from tdigest import TDigest\n'), ((19904, 19965), 'csv.writer', 'csv.writer', (['all_csvfile'], {'delimiter': '""","""', 'quoting': 'csv.QUOTE_ALL'}), "(all_csvfile, delimiter=',', quoting=csv.QUOTE_ALL)\n", (19914, 19965), False, 'import csv\n'), ((19989, 20052), 'csv.writer', 'csv.writer', (['setup_csvfile'], {'delimiter': '""","""', 'quoting': 'csv.QUOTE_ALL'}), "(setup_csvfile, delimiter=',', quoting=csv.QUOTE_ALL)\n", (19999, 20052), False, 'import csv\n'), ((20136, 20175), 'tqdm.tqdm', 'tqdm', ([], {'unit': '"""docs"""', 'total': 'args.doc_limit'}), "(unit='docs', total=args.doc_limit)\n", (20140, 20175), False, 'from tqdm import tqdm\n'), ((21094, 21136), 'matplotlib.pyplot.hist', 'plt.hist', (['docs_sizes'], {'bins': 'bins', 'alpha': '(0.5)'}), '(docs_sizes, bins=bins, alpha=0.5)\n', (21102, 21136), True, 'import matplotlib.pyplot as plt\n'), ((21296, 21332), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Document Size in Bytes"""'], {}), "('Document Size in Bytes')\n", (21306, 21332), True, 'import matplotlib.pyplot as plt\n'), ((21337, 21356), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""count"""'], {}), "('count')\n", (21347, 21356), True, 'import matplotlib.pyplot as plt\n'), ((21361, 21378), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (21371, 21378), True, 'import matplotlib.pyplot as plt\n'), ((21384, 21394), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (21392, 21394), True, 'import matplotlib.pyplot as plt\n'), ((21594, 21613), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xx', 'yy'], {}), '(xx, yy)\n', (21605, 21613), True, 'import matplotlib.pyplot as plt\n'), ((21619, 21660), 'matplotlib.pyplot.title', 'plt.title', (['"""EnWiki pages timestamp range"""'], {}), "('EnWiki pages timestamp range')\n", (21628, 21660), True, 'import matplotlib.pyplot as plt\n'), ((21665, 21688), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""timestamp"""'], {}), "('timestamp')\n", (21675, 21688), True, 'import matplotlib.pyplot as plt\n'), ((21693, 21710), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""cdf"""'], {}), "('cdf')\n", (21703, 21710), True, 'import matplotlib.pyplot as plt\n'), ((21743, 21753), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (21751, 21753), True, 'import matplotlib.pyplot as plt\n'), ((23018, 23067), 'common_datagen.compress_files', 'compress_files', (['[all_fname]', 'all_fname_compressed'], {}), '([all_fname], all_fname_compressed)\n', (23032, 23067), False, 'from common_datagen import download_url, generate_setup_json, compress_files, generate_inputs_dict_item, humanized_bytes, del_non_use_case_specific_keys, add_key_metric, upload_dataset_artifacts_s3, add_deployment_requirements_redis_server_module, add_deployment_requirements_benchmark_tool, add_deployment_requirements_utilities, init_deployment_requirement, remove_file_if_exists, decompress_file\n'), ((23105, 23314), 'common_datagen.generate_inputs_dict_item', 'generate_inputs_dict_item', (['"""all"""', 'all_fname', '"""contains both setup and benchmark commands"""', 'remote_url_all', 'uncompressed_size', 'all_fname_compressed', 'compressed_size', 'total_commands', 'cmd_category_all'], {}), "('all', all_fname,\n 'contains both setup and benchmark commands', remote_url_all,\n uncompressed_size, all_fname_compressed, compressed_size,\n total_commands, cmd_category_all)\n", (23130, 23314), False, 'from common_datagen import download_url, generate_setup_json, compress_files, generate_inputs_dict_item, humanized_bytes, del_non_use_case_specific_keys, add_key_metric, upload_dataset_artifacts_s3, add_deployment_requirements_redis_server_module, add_deployment_requirements_benchmark_tool, add_deployment_requirements_utilities, init_deployment_requirement, remove_file_if_exists, decompress_file\n'), ((23432, 23485), 'common_datagen.compress_files', 'compress_files', (['[setup_fname]', 'setup_fname_compressed'], {}), '([setup_fname], setup_fname_compressed)\n', (23446, 23485), False, 'from common_datagen import download_url, generate_setup_json, compress_files, generate_inputs_dict_item, humanized_bytes, del_non_use_case_specific_keys, add_key_metric, upload_dataset_artifacts_s3, add_deployment_requirements_redis_server_module, add_deployment_requirements_benchmark_tool, add_deployment_requirements_utilities, init_deployment_requirement, remove_file_if_exists, decompress_file\n'), ((23525, 23767), 'common_datagen.generate_inputs_dict_item', 'generate_inputs_dict_item', (['"""setup"""', 'setup_fname', '"""contains only the commands required to populate the dataset"""', 'remote_url_setup', 'uncompressed_size', 'setup_fname_compressed', 'compressed_size', 'total_setup_commands', 'cmd_category_setup'], {}), "('setup', setup_fname,\n 'contains only the commands required to populate the dataset',\n remote_url_setup, uncompressed_size, setup_fname_compressed,\n compressed_size, total_setup_commands, cmd_category_setup)\n", (23550, 23767), False, 'from common_datagen import download_url, generate_setup_json, compress_files, generate_inputs_dict_item, humanized_bytes, del_non_use_case_specific_keys, add_key_metric, upload_dataset_artifacts_s3, add_deployment_requirements_redis_server_module, add_deployment_requirements_benchmark_tool, add_deployment_requirements_utilities, init_deployment_requirement, remove_file_if_exists, decompress_file\n'), ((23885, 23938), 'common_datagen.compress_files', 'compress_files', (['[bench_fname]', 'bench_fname_compressed'], {}), '([bench_fname], bench_fname_compressed)\n', (23899, 23938), False, 'from common_datagen import download_url, generate_setup_json, compress_files, generate_inputs_dict_item, humanized_bytes, del_non_use_case_specific_keys, add_key_metric, upload_dataset_artifacts_s3, add_deployment_requirements_redis_server_module, add_deployment_requirements_benchmark_tool, add_deployment_requirements_utilities, init_deployment_requirement, remove_file_if_exists, decompress_file\n'), ((23982, 24271), 'common_datagen.generate_inputs_dict_item', 'generate_inputs_dict_item', (['"""benchmark"""', 'bench_fname', '"""contains only the benchmark commands (requires the dataset to have been previously populated)"""', 'remote_url_bench', 'uncompressed_size', 'bench_fname_compressed', 'compressed_size', 'total_benchmark_commands', 'cmd_category_benchmark'], {}), "('benchmark', bench_fname,\n 'contains only the benchmark commands (requires the dataset to have been previously populated)'\n , remote_url_bench, uncompressed_size, bench_fname_compressed,\n compressed_size, total_benchmark_commands, cmd_category_benchmark)\n", (24007, 24271), False, 'from common_datagen import download_url, generate_setup_json, compress_files, generate_inputs_dict_item, humanized_bytes, del_non_use_case_specific_keys, add_key_metric, upload_dataset_artifacts_s3, add_deployment_requirements_redis_server_module, add_deployment_requirements_benchmark_tool, add_deployment_requirements_utilities, init_deployment_requirement, remove_file_if_exists, decompress_file\n'), ((24506, 24535), 'common_datagen.init_deployment_requirement', 'init_deployment_requirement', ([], {}), '()\n', (24533, 24535), False, 'from common_datagen import download_url, generate_setup_json, compress_files, generate_inputs_dict_item, humanized_bytes, del_non_use_case_specific_keys, add_key_metric, upload_dataset_artifacts_s3, add_deployment_requirements_redis_server_module, add_deployment_requirements_benchmark_tool, add_deployment_requirements_utilities, init_deployment_requirement, remove_file_if_exists, decompress_file\n'), ((24540, 24630), 'common_datagen.add_deployment_requirements_redis_server_module', 'add_deployment_requirements_redis_server_module', (['deployment_requirements', '"""search"""', '{}'], {}), "(deployment_requirements,\n 'search', {})\n", (24587, 24630), False, 'from common_datagen import download_url, generate_setup_json, compress_files, generate_inputs_dict_item, humanized_bytes, del_non_use_case_specific_keys, add_key_metric, upload_dataset_artifacts_s3, add_deployment_requirements_redis_server_module, add_deployment_requirements_benchmark_tool, add_deployment_requirements_utilities, init_deployment_requirement, remove_file_if_exists, decompress_file\n'), ((24645, 24734), 'common_datagen.add_deployment_requirements_utilities', 'add_deployment_requirements_utilities', (['deployment_requirements', '"""ftsb_redisearch"""', '{}'], {}), "(deployment_requirements,\n 'ftsb_redisearch', {})\n", (24682, 24734), False, 'from common_datagen import download_url, generate_setup_json, compress_files, generate_inputs_dict_item, humanized_bytes, del_non_use_case_specific_keys, add_key_metric, upload_dataset_artifacts_s3, add_deployment_requirements_redis_server_module, add_deployment_requirements_benchmark_tool, add_deployment_requirements_utilities, init_deployment_requirement, remove_file_if_exists, decompress_file\n'), ((24749, 24839), 'common_datagen.add_deployment_requirements_benchmark_tool', 'add_deployment_requirements_benchmark_tool', (['deployment_requirements', '"""ftsb_redisearch"""'], {}), "(deployment_requirements,\n 'ftsb_redisearch')\n", (24791, 24839), False, 'from common_datagen import download_url, generate_setup_json, compress_files, generate_inputs_dict_item, humanized_bytes, del_non_use_case_specific_keys, add_key_metric, upload_dataset_artifacts_s3, add_deployment_requirements_redis_server_module, add_deployment_requirements_benchmark_tool, add_deployment_requirements_utilities, init_deployment_requirement, remove_file_if_exists, decompress_file\n'), ((411, 422), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (420, 422), False, 'import os\n'), ((5437, 5470), 'random.randint', 'random.randint', (['(0)', '(total_docs - 1)'], {}), '(0, total_docs - 1)\n', (5451, 5470), False, 'import random\n'), ((5553, 5568), 'random.random', 'random.random', ([], {}), '()\n', (5566, 5568), False, 'import random\n'), ((15284, 15315), 'common_datagen.remove_file_if_exists', 'remove_file_if_exists', (['artifact'], {}), '(artifact)\n', (15305, 15315), False, 'from common_datagen import download_url, generate_setup_json, compress_files, generate_inputs_dict_item, humanized_bytes, del_non_use_case_specific_keys, add_key_metric, upload_dataset_artifacts_s3, add_deployment_requirements_redis_server_module, add_deployment_requirements_benchmark_tool, add_deployment_requirements_utilities, init_deployment_requirement, remove_file_if_exists, decompress_file\n'), ((17652, 17673), 'os.path.exists', 'path.exists', (['filename'], {}), '(filename)\n', (17663, 17673), False, 'from os import path\n'), ((17755, 17785), 'common_datagen.download_url', 'download_url', (['origin', 'filename'], {}), '(origin, filename)\n', (17767, 17785), False, 'from common_datagen import download_url, generate_setup_json, compress_files, generate_inputs_dict_item, humanized_bytes, del_non_use_case_specific_keys, add_key_metric, upload_dataset_artifacts_s3, add_deployment_requirements_redis_server_module, add_deployment_requirements_benchmark_tool, add_deployment_requirements_utilities, init_deployment_requirement, remove_file_if_exists, decompress_file\n'), ((17875, 17906), 'os.path.exists', 'path.exists', (['decompressed_fname'], {}), '(decompressed_fname)\n', (17886, 17906), False, 'from os import path\n'), ((17976, 18001), 'common_datagen.decompress_file', 'decompress_file', (['filename'], {}), '(filename)\n', (17991, 18001), False, 'from common_datagen import download_url, generate_setup_json, compress_files, generate_inputs_dict_item, humanized_bytes, del_non_use_case_specific_keys, add_key_metric, upload_dataset_artifacts_s3, add_deployment_requirements_redis_server_module, add_deployment_requirements_benchmark_tool, add_deployment_requirements_utilities, init_deployment_requirement, remove_file_if_exists, decompress_file\n'), ((24997, 25435), 'common_datagen.generate_setup_json', 'generate_setup_json', (['json_version', 'project', 'use_case_specific_arguments', 'test_name', 'description', 'run_stages', 'deployment_requirements', 'key_metrics', 'inputs', 'setup_commands', 'teardown_commands', 'used_indices', 'total_commands', 'total_setup_commands', 'total_benchmark_commands', 'total_docs', 'total_writes', 'total_updates', 'total_reads', 'total_deletes', 'benchmark_repetitions_require_teardown_and_resetup', "['setup']", "['benchmark']"], {}), "(json_version, project, use_case_specific_arguments,\n test_name, description, run_stages, deployment_requirements,\n key_metrics, inputs, setup_commands, teardown_commands, used_indices,\n total_commands, total_setup_commands, total_benchmark_commands,\n total_docs, total_writes, total_updates, total_reads, total_deletes,\n benchmark_repetitions_require_teardown_and_resetup, ['setup'], [\n 'benchmark'])\n", (25016, 25435), False, 'from common_datagen import download_url, generate_setup_json, compress_files, generate_inputs_dict_item, humanized_bytes, del_non_use_case_specific_keys, add_key_metric, upload_dataset_artifacts_s3, add_deployment_requirements_redis_server_module, add_deployment_requirements_benchmark_tool, add_deployment_requirements_utilities, init_deployment_requirement, remove_file_if_exists, decompress_file\n'), ((25707, 25746), 'json.dump', 'json.dump', (['setup_json', 'setupf'], {'indent': '(2)'}), '(setup_json, setupf, indent=2)\n', (25716, 25746), False, 'import json\n'), ((25962, 26032), 'common_datagen.upload_dataset_artifacts_s3', 'upload_dataset_artifacts_s3', (['s3_bucket_name', 's3_bucket_path', 'artifacts'], {}), '(s3_bucket_name, s3_bucket_path, artifacts)\n', (25989, 26032), False, 'from common_datagen import download_url, generate_setup_json, compress_files, generate_inputs_dict_item, humanized_bytes, del_non_use_case_specific_keys, add_key_metric, upload_dataset_artifacts_s3, add_deployment_requirements_redis_server_module, add_deployment_requirements_benchmark_tool, add_deployment_requirements_utilities, init_deployment_requirement, remove_file_if_exists, decompress_file\n'), ((26135, 26205), 'common_datagen.upload_dataset_artifacts_s3', 'upload_dataset_artifacts_s3', (['s3_bucket_name', 's3_bucket_path', 'artifacts'], {}), '(s3_bucket_name, s3_bucket_path, artifacts)\n', (26162, 26205), False, 'from common_datagen import download_url, generate_setup_json, compress_files, generate_inputs_dict_item, humanized_bytes, del_non_use_case_specific_keys, add_key_metric, upload_dataset_artifacts_s3, add_deployment_requirements_redis_server_module, add_deployment_requirements_benchmark_tool, add_deployment_requirements_utilities, init_deployment_requirement, remove_file_if_exists, decompress_file\n'), ((12945, 12962), 'pathlib.Path', 'Path', (['working_dir'], {}), '(working_dir)\n', (12949, 12962), False, 'from pathlib import Path\n'), ((19486, 19495), 'dateutil.parser.parse', 'parse', (['ts'], {}), '(ts)\n', (19491, 19495), False, 'from dateutil.parser import parse\n'), ((3387, 3399), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3397, 3399), False, 'import uuid\n'), ((6154, 6183), 'random.choices', 'random.choices', (['query_choices'], {}), '(query_choices)\n', (6168, 6183), False, 'import random\n'), ((21252, 21274), 'numpy.average', 'np.average', (['docs_sizes'], {}), '(docs_sizes)\n', (21262, 21274), True, 'import numpy as np\n')] |
import os
import platform
if not os.path.exists("temp"):
os.mkdir("temp")
def from_flopy_kl_test():
import shutil
import numpy as np
import pandas as pd
try:
import flopy
except:
return
import pyemu
org_model_ws = os.path.join("..", "examples", "freyberg_sfr_update")
nam_file = "freyberg.nam"
m = flopy.modflow.Modflow.load(nam_file, model_ws=org_model_ws, check=False)
flopy.modflow.ModflowRiv(m, stress_period_data={0: [[0, 0, 0, 30.0, 1.0, 25.0],
[0, 0, 1, 31.0, 1.0, 25.0],
[0, 0, 1, 31.0, 1.0, 25.0]]})
hfb_data = []
jcol1, jcol2 = 14, 15
for i in range(m.nrow):
hfb_data.append([0, i, jcol1, i, jcol2, 0.001])
flopy.modflow.ModflowHfb(m, 0, 0, len(hfb_data), hfb_data=hfb_data)
org_model_ws = "temp"
m.change_model_ws(org_model_ws)
m.write_input()
setattr(m,"sr",pyemu.helpers.SpatialReference(delc=m.dis.delc.array,delr=m.dis.delr.array))
new_model_ws = "temp_pst_from_flopy"
hds_kperk = []
for k in range(m.nlay):
for kper in range(m.nper):
hds_kperk.append([kper, k])
temp_list_props = [["wel.flux", None]]
spat_list_props = [["riv.cond", 0], ["riv.stage", 0]]
kl_props = [["upw.hk", 0], ["upw.vka", 0], ["rch.rech", 0]]
ph = pyemu.helpers.PstFromFlopyModel(m, new_model_ws=new_model_ws,
org_model_ws=org_model_ws,
kl_props=kl_props,
remove_existing=True,
model_exe_name="mfnwt")
def from_flopy():
import shutil
import numpy as np
import pandas as pd
try:
import flopy
except:
return
import pyemu
org_model_ws = os.path.join("..", "examples", "freyberg_sfr_update")
nam_file = "freyberg.nam"
m = flopy.modflow.Modflow.load(nam_file, model_ws=org_model_ws, check=False)
flopy.modflow.ModflowRiv(m, stress_period_data={0: [[0, 0, 0, 30.0, 1.0, 25.0],
[0, 0, 1, 31.0, 1.0, 25.0],
[0, 0, 1, 31.0, 1.0, 25.0]]})
hfb_data = []
jcol1, jcol2 = 14, 15
for i in range(m.nrow):
hfb_data.append([0, i, jcol1, i, jcol2, 0.001])
flopy.modflow.ModflowHfb(m, 0, 0, len(hfb_data), hfb_data=hfb_data)
org_model_ws = "temp"
m.change_model_ws(org_model_ws)
m.write_input()
new_model_ws = "temp_pst_from_flopy"
hds_kperk = []
for k in range(m.nlay):
for kper in range(m.nper):
hds_kperk.append([kper, k])
temp_list_props = [["wel.flux", None]]
spat_list_props = [["riv.cond", 0], ["riv.stage", 0]]
ph = pyemu.helpers.PstFromFlopyModel(nam_file, new_model_ws=new_model_ws,
org_model_ws=org_model_ws,
zone_props=[["rch.rech", 0], ["rch.rech", [1, 2]]],
remove_existing=True,
model_exe_name="mfnwt", temporal_list_props=temp_list_props,
spatial_list_props=spat_list_props, hfb_pars=True)
csv = os.path.join(new_model_ws, "arr_pars.csv")
df = pd.read_csv(csv, index_col=0)
mults_not_linked_to_pst = [f for f in df.mlt_file.unique()
if f not in ph.pst.input_files]
assert len(mults_not_linked_to_pst) == 0, print(mults_not_linked_to_pst)
par = ph.pst.parameter_data
pe = ph.draw(100)
par.loc["welflux_000", 'parval1'] = 2.0
os.chdir(new_model_ws)
ph.pst.write_input_files()
pyemu.helpers.apply_list_pars()
os.chdir("..")
ph = pyemu.helpers.PstFromFlopyModel(nam_file, new_model_ws=new_model_ws,
org_model_ws=org_model_ws,
zone_props=[["rch.rech", 0], ["rch.rech", [1, 2]]],
remove_existing=True,
model_exe_name="mfnwt",
spatial_list_props=spat_list_props)
pe = ph.draw(100)
ph = pyemu.helpers.PstFromFlopyModel(nam_file, new_model_ws=new_model_ws,
org_model_ws=org_model_ws,
zone_props=[["rch.rech", 0], ["rch.rech", [1, 2]]],
remove_existing=True,
model_exe_name="mfnwt", temporal_list_props=temp_list_props)
pe = ph.draw(100)
ph.pst.parameter_data.loc["rech0_zn1", "parval1"] = 2.0
bd = os.getcwd()
os.chdir(new_model_ws)
# try:
ph.pst.write_input_files()
csv = os.path.join("arr_pars.csv")
df = pd.read_csv(csv,index_col=0)
df.loc[:, "upper_bound"] = np.NaN
df.loc[:, "lower_bound"] = np.NaN
df.to_csv(csv)
pyemu.helpers.apply_array_pars()
# jwhite 21 sept 2019 - the except here is no longer being
# caught because of multiprocessing...
# #df.loc[:, "org_file"] = df.org_file.iloc[0]
# #df.loc[:, "model_file"] = df.org_file
# df.loc[:, "upper_bound"] = np.arange(df.shape[0])
# df.loc[:, "lower_bound"] = np.NaN
# print(df)
# df.to_csv(csv)
# try:
# pyemu.helpers.apply_array_pars()
# except:
# pass
# else:
# raise Exception()
# df.loc[:, "lower_bound"] = np.arange(df.shape[0])
# df.loc[:, "upper_bound"] = np.NaN
# print(df)
# df.to_csv(csv)
# try:
# pyemu.helpers.apply_array_pars()
# except:
# pass
# else:
# raise Exception()
df.loc[:, "lower_bound"] = 0.1
df.loc[:, "upper_bound"] = 0.9
print(df)
df.to_csv(csv)
pyemu.helpers.apply_array_pars()
arr = np.loadtxt(df.model_file.iloc[0])
assert arr.min() >= df.lower_bound.iloc[0]
assert arr.max() <= df.upper_bound.iloc[0]
# except:
# pass
os.chdir(bd)
org_model_ws = os.path.join("..", "examples", "freyberg_sfr_update")
nam_file = "freyberg.nam"
m = flopy.modflow.Modflow.load(nam_file, model_ws=org_model_ws, check=False)
helper = pyemu.helpers.PstFromFlopyModel(nam_file, new_model_ws, org_model_ws,
hds_kperk=[0, 0], remove_existing=True,
model_exe_name="mfnwt", sfr_pars=True, sfr_obs=True,
temporal_sfr_pars=True)
pe = helper.draw(100)
# go again testing passing list to sfr_pars
m = flopy.modflow.Modflow.load(nam_file, model_ws=org_model_ws, check=False)
helper = pyemu.helpers.PstFromFlopyModel(nam_file, new_model_ws, org_model_ws,
hds_kperk=[0, 0], remove_existing=True,
model_exe_name="mfnwt",
sfr_pars=['flow', 'not_a_par'],
temporal_sfr_pars=True,
sfr_obs=True)
try:
pe = helper.draw(100)
except:
pass
else:
raise Exception()
# go again passing bumph to sfr_par
m = flopy.modflow.Modflow.load(nam_file, model_ws=org_model_ws, check=False)
helper = pyemu.helpers.PstFromFlopyModel(nam_file, new_model_ws, org_model_ws,
hds_kperk=[0, 0], remove_existing=True,
model_exe_name="mfnwt", sfr_pars=['not_a_par0', 'not_a_par1'], sfr_obs=True)
try:
pe = helper.draw(100)
except:
pass
else:
raise Exception()
pp_props = [["upw.ss", [0, 1]], ["upw.ss", 1], ["upw.ss", 2], ["extra.prsity", 0], \
["rch.rech", 0], ["rch.rech", [1, 2]]]
helper = pyemu.helpers.PstFromFlopyModel(nam_file, new_model_ws, org_model_ws,
pp_props=pp_props, hds_kperk=[0, 0], remove_existing=True,
model_exe_name="mfnwt")
m = flopy.modflow.Modflow.load(nam_file, model_ws=org_model_ws, exe_name="mfnwt", check=False)
const_props = [["rch.rech", i] for i in range(m.nper)]
helper = pyemu.helpers.PstFromFlopyModel(m, new_model_ws,
const_props=const_props, hds_kperk=[0, 0], remove_existing=True)
pe = helper.draw(100)
grid_props = [["extra.pr", 0]]
for k in range(3):
# grid scale pars for hk in all layers
grid_props.append(["upw.hk", k])
# const par for hk, ss, sy in all layers
const_props.append(["upw.hk", k])
const_props.append(["upw.ss", k])
const_props.append(["upw.sy", k])
helper = pyemu.helpers.PstFromFlopyModel(nam_file, new_model_ws, org_model_ws,
grid_props=grid_props, hds_kperk=[0, 0], remove_existing=True)
pe = helper.draw(100)
# zones using ibound values - vka in layer 2
zone_props = ["upw.vka", 1]
helper = pyemu.helpers.PstFromFlopyModel(nam_file, new_model_ws, org_model_ws,
zone_props=zone_props, hds_kperk=[0, 0], remove_existing=True)
pe = helper.draw(100)
# kper-level multipliers for boundary conditions
list_props = []
for iper in range(m.nper):
list_props.append(["wel.flux", iper])
# list_props.append(["drn.elev",iper])
helper = pyemu.helpers.PstFromFlopyModel(nam_file, new_model_ws, org_model_ws,
temporal_list_props=list_props, hds_kperk=[0, 0], remove_existing=True)
pe = helper.draw(100)
zn_arr = np.loadtxt(os.path.join("..", "examples", "Freyberg_Truth", "hk.zones"), dtype=int)
k_zone_dict = {k: zn_arr for k in range(3)}
obssim_smp_pairs = None
helper = pyemu.helpers.PstFromFlopyModel(nam_file, new_model_ws, org_model_ws,
pp_props=pp_props,
const_props=const_props,
grid_props=grid_props,
zone_props=zone_props,
temporal_list_props=list_props,
spatial_list_props=list_props,
remove_existing=True,
obssim_smp_pairs=obssim_smp_pairs,
pp_space=4,
use_pp_zones=False,
k_zone_dict=k_zone_dict,
hds_kperk=[0, 0], build_prior=False)
pst = helper.pst
par = pst.parameter_data
par.loc[par.parubnd>100,"pariubnd"] = 100.0
par.loc[par.parlbnd<0.1,"parlbnd"] = 0.1
pe = helper.draw(100)
obs = pst.observation_data
obs.loc[:, "weight"] = 0.0
obs.loc[obs.obsnme.apply(lambda x: x.startswith("cr")), "weight"] = 1.0
obs.loc[obs.weight > 0.0, "obsval"] += np.random.normal(0.0, 2.0, pst.nnz_obs)
pst.control_data.noptmax = 0
pst.write(os.path.join(new_model_ws, "freyberg_pest.pst"))
cov = helper.build_prior(fmt="none")
cov.to_coo(os.path.join(new_model_ws, "cov.coo"))
from_flopy_zone_pars()
def from_flopy_zone_pars():
import numpy as np
try:
import flopy
except:
return
import pyemu
org_model_ws = os.path.join("..", "examples", "freyberg_sfr_update")
nam_file = "freyberg.nam"
m = flopy.modflow.Modflow.load(nam_file, model_ws=org_model_ws, check=False)
m.change_model_ws(org_model_ws)
m.write_input()
new_model_ws = "temp_pst_from_flopy"
grid_props = [["upw.ss", [0, 1]], ["upw.ss", 1], ["upw.ss", 2], ["extra.prsity", 0],
["rch.rech", 0], ["rch.rech", [1, 2]]]
const_props = [["rch.rech", i] for i in range(m.nper)]
grid_props = grid_props.extend(["extra.prsity", 0])
zone_props = [["extra.prsity", 0], ["extra.prsity", 2], ["upw.vka", 1], ["upw.vka", 2]]
zn_arr = np.loadtxt(os.path.join("..", "examples", "Freyberg_Truth", "hk.zones"), dtype=int)
zn_arr2 = np.loadtxt(os.path.join("..", "examples", "Freyberg_Truth", "rand.zones"), dtype=int)
pp_props = [["upw.hk", [0, 1]], ["extra.prsity", 1], ["upw.ss", 1], ["upw.ss", 2], ["upw.vka", 2]]
k_zone_dict = {"upw.hk": {k: zn_arr for k in range(3)}, "extra.prsity": {k: zn_arr2 for k in range(3)},
"general_zn": {k: zn_arr for k in range(3)}}
obssim_smp_pairs = None
helper = pyemu.helpers.PstFromFlopyModel(nam_file, new_model_ws, org_model_ws,
const_props=const_props,
grid_props=grid_props,
zone_props=zone_props,
pp_props=pp_props,
remove_existing=True,
obssim_smp_pairs=obssim_smp_pairs,
pp_space=4,
use_pp_zones=True,
k_zone_dict=k_zone_dict,
hds_kperk=[0, 0], build_prior=False)
k_zone_dict = {"upw.vka": {k: zn_arr for k in range(3)}, "extra.prsity": {k: zn_arr2 for k in range(3)}}
helper = pyemu.helpers.PstFromFlopyModel(nam_file, new_model_ws, org_model_ws,
const_props=const_props,
grid_props=grid_props,
zone_props=zone_props,
pp_props=pp_props,
remove_existing=True,
obssim_smp_pairs=obssim_smp_pairs,
pp_space=4,
use_pp_zones=True,
k_zone_dict=k_zone_dict,
hds_kperk=[0, 0], build_prior=False)
print(helper.pst.par_groups)
def from_flopy_test():
bd = os.getcwd()
try:
from_flopy()
except Exception as e:
os.chdir(bd)
raise Exception("error in from_flopy: " + str(e))
# print(os.getcwd())
def from_flopy_test_reachinput_test():
bd = os.getcwd()
try:
from_flopy_reachinput()
except Exception as e:
os.chdir(bd)
raise Exception("error in from_flopy_reachinput: " + str(e))
# print(os.getcwd())
def from_flopy_reachinput():
import pandas as pd
""" test for building sfr pars from reachinput sfr and seg pars across all kper"""
try:
import flopy
except:
return
import pyemu
# if platform.platform().lower().startswith('win'):
# tempchek = os.path.join("..", "..", "bin", "win", "tempchek.exe")
# else:
# tempchek = None # os.path.join("..", "..", "bin", "linux", "tempchek")
bd = os.getcwd()
org_model_ws = os.path.join("..", "examples", "freyberg_sfr_reaches")
nam_file = "freyberg.nam"
new_model_ws = "temp_pst_from_flopy_reaches"
m = flopy.modflow.Modflow.load(nam_file, model_ws=org_model_ws, check=False)
# test passing different arguments
args_to_test = [True,
["strhc1", "flow"],
["flow", "runoff"],
["not_a_par", "not_a_par2"],
"strhc1",
["strhc1", "flow", "runoff"]]
for i, sfr_par in enumerate(args_to_test): # if i=2 no reach pars, i==3 no pars, i=4 no seg pars
for f in ["sfr_reach_pars.config", "sfr_seg_pars.config"]: # clean up
if os.path.exists(f):
os.remove(f)
if i < 5:
include_temporal_pars = False
else:
include_temporal_pars = {'flow': [0], 'runoff': [2]}
helper = pyemu.helpers.PstFromFlopyModel(nam_file, new_model_ws, org_model_ws,
hds_kperk=[0, 0], remove_existing=True,
model_exe_name="mfnwt", sfr_pars=sfr_par,
temporal_sfr_pars=include_temporal_pars,
sfr_obs=True)
os.chdir(new_model_ws)
mult_files = []
spars = {}
try: # read seg pars config file
with open("sfr_seg_pars.config", 'r') as f:
for line in f:
line = line.strip().split()
spars[line[0]] = line[1]
mult_files.append(spars["mult_file"])
except:
if i in [3, 4]: # for scenario 3 or 4 not expecting any seg pars
pass
else:
raise Exception()
rpars = {}
try: # read reach pars config file
with open("sfr_reach_pars.config", 'r') as f:
for line in f:
line = line.strip().split()
rpars[line[0]] = line[1]
mult_files.append(rpars["mult_file"])
except:
if i in [2, 3]: # for scenario 2 or 3 not expecting any reach pars
pass
else:
raise Exception()
try:
# actually write out files to check template file
helper.pst.write_input_files()
try:
exec(helper.frun_pre_lines[0])
except Exception as e:
raise Exception("error applying sfr pars, check tpl(s) and datafiles: {0}".format(str(e)))
# test using tempchek for writing tpl file
# par = helper.pst.parameter_data
# if rpars == {}:
# par_file = "{}.par".format(spars['nam_file'])
# else:
# par_file = "{}.par".format(rpars['nam_file'])
# with open(par_file, 'w') as f:
# f.write('single point\n')
# f.flush()
# par[['parnme', 'parval1', 'scale', 'offset']].to_csv(f, sep=' ', header=False, index=False, mode='a')
# if tempchek is not None:
# for mult in mult_files:
# tpl_file = "{}.tpl".format(mult)
# try:
# pyemu.os_utils.run("{} {} {} {}".format(tempchek, tpl_file, mult, par_file))
# except Exception as e:
# raise Exception("error running tempchek on template file {1} and data file {0} : {2}".
# format(mult, tpl_file, str(e)))
# try:
# exec(helper.frun_pre_lines[0])
# except Exception as e:
# raise Exception("error applying sfr pars, check tpl(s) and datafiles: {0}".format(str(e)))
except Exception as e:
if i == 3: # scenario 3 should not set up any parameters
pass
else:
raise Exception(str(e))
os.chdir(bd)
def run_array_pars():
import os
import pyemu
new_model_ws = "temp_pst_from_flopy"
os.chdir(new_model_ws)
pyemu.helpers.apply_array_pars()
os.chdir('..')
def parrep_test():
import pyemu
import pandas as pd
import numpy as np
# make some fake parnames and values
parnames = ['p_{0:03}'.format(i) for i in range(20)]
np.random.seed(42)
parvals = np.random.random(20) + 5
parvals[0] = 0.001
# make a fake parfile
with open('fake.par','w') as ofp:
ofp.write('single point\n')
[ofp.write('{0:10s} {1:12.6f} 1.00 0.0\n'.format(i,j)) for i,j in zip(parnames,parvals)]
# make a fake ensemble parameter file
np.random.seed(99)
parens = pd.DataFrame(np.tile(parvals,(5,1))+np.random.randn(5,20)*.5, columns=parnames)
parens.index = list(range(4)) + ['base']
parens.index.name = 'real_name'
parens.loc['base'] = parvals[::-1]
# get cheeky and reverse the column names to test updating
parens.columns = parens.columns.sort_values(ascending = False)
parens.to_csv('fake.par.0.csv')
parens.drop('base').to_csv('fake.par.0.nobase.csv')
# and make a fake pst file
pst = pyemu.pst_utils.generic_pst(par_names=parnames)
pst.parameter_data['parval1'] = [float(i+1) for i in range(len(parvals))]
pst.parameter_data['parlbnd'] = 0.01
pst.parameter_data['parubnd'] = 100.01
pyemu.ParameterEnsemble(pst=pst,df=parens).to_binary('fake_parens.jcb')
# test the parfile style
pst.parrep('fake.par')
assert pst.parameter_data.parval1[0] == pst.parameter_data.parlbnd[0]
assert np.allclose(pst.parameter_data.iloc[1:].parval1.values,parvals[1:],atol=0.0001)
assert pst.control_data.noptmax == 0
pst.parrep('fake.par', noptmax=99, enforce_bounds=False)
assert np.allclose(pst.parameter_data.parval1.values,parvals,atol=0.0001)
assert pst.control_data.noptmax == 99
# now test the ensemble style
pst.parrep('fake.par.0.csv')
assert pst.parameter_data.parval1[0] == pst.parameter_data.parlbnd[0]
assert np.allclose(pst.parameter_data.iloc[1:].parval1.values,parvals[1:],atol=0.0001)
pst.parrep('fake.par.0.nobase.csv')
# flip the parameter ensemble back around
parens = parens[parens.columns.sort_values()]
assert np.allclose(pst.parameter_data.parval1.values[:-1],parens.T[0].values[:-1],atol=0.0001)
pst.parrep('fake.par.0.csv', real_name=3)
# flip the parameter ensemble back around
parens = parens[parens.columns.sort_values()]
assert np.allclose(pst.parameter_data.parval1.values[:-1],parens.T[3].values[:-1],atol=0.0001)
pst.parrep('fake_parens.jcb', real_name=2)
# confirm binary format works as csv did
assert np.allclose(pst.parameter_data.parval1.values[:-1],parens.T[2].values[:-1],atol=0.0001)
def pst_from_flopy_geo_draw_test():
import shutil
import numpy as np
import pandas as pd
try:
import flopy
except:
return
import pyemu
org_model_ws = os.path.join("..", "examples", "freyberg_sfr_update")
nam_file = "freyberg.nam"
m = flopy.modflow.Modflow.load(nam_file, model_ws=org_model_ws, check=False)
flopy.modflow.ModflowRiv(m, stress_period_data={0: [[0, 0, 0, 30.0, 1.0, 25.0],
[0, 0, 1, 31.0, 1.0, 25.0],
[0, 0, 1, 31.0, 1.0, 25.0]]})
org_model_ws = "temp"
m.change_model_ws(org_model_ws)
m.write_input()
new_model_ws = "temp_pst_from_flopy"
hds_kperk = []
for k in range(m.nlay):
for kper in range(m.nper):
hds_kperk.append([kper, k])
temp_list_props = [["wel.flux", None]]
spat_list_props = [["riv.cond", 0], ["riv.stage", 0]]
ph = pyemu.helpers.PstFromFlopyModel(nam_file, new_model_ws=new_model_ws,
org_model_ws=org_model_ws,
zone_props=[["rch.rech", 0], ["rch.rech", [1, 2]]],
remove_existing=True,
model_exe_name="mfnwt", temporal_list_props=temp_list_props,
spatial_list_props=spat_list_props)
num_reals = 100000
pe1 = ph.draw(num_reals=num_reals, sigma_range=6)
pyemu.Ensemble.reseed()
pe2 = pyemu.ParameterEnsemble.from_gaussian_draw(ph.pst, ph.build_prior(sigma_range=6), num_reals=num_reals)
mn1, mn2 = pe1.mean(), pe2.mean()
sd1, sd2 = pe1.std(), pe2.std()
diff_mn = mn1 - mn2
diff_sd = sd1 - sd2
# print(mn1,mn2)
print(diff_mn)
assert diff_mn.apply(np.abs).max() < 0.1
print(diff_sd)
assert diff_sd.apply(np.abs).max() < 0.1
def from_flopy_pp_test():
import numpy as np
import pandas as pd
try:
import flopy
except:
return
import pyemu
org_model_ws = os.path.join("..", "examples", "freyberg_sfr_update")
nam_file = "freyberg.nam"
m = flopy.modflow.Modflow.load(nam_file, model_ws=org_model_ws, check=False)
m.change_model_ws("temp")
ib = m.bas6.ibound.array
ib[ib>0] = 3
m.bas6.ibound = ib
m.write_input()
new_model_ws = "temp_pst_from_flopy"
pp_props = [["upw.ss", [0, 1]],["upw.hk",[1,0]],["upw.vka",1]]
obssim_smp_pairs = None
helper = pyemu.helpers.PstFromFlopyModel(nam_file, new_model_ws, "temp",
pp_props=pp_props,
remove_existing=True,
pp_space=4,
use_pp_zones=False,
build_prior=False)
b_d = os.getcwd()
os.chdir(new_model_ws)
try:
pyemu.helpers.apply_array_pars()
except Exception as e:
os.chdir(b_d)
raise (str(e))
os.chdir(b_d)
mlt_dir = os.path.join(new_model_ws,"arr_mlt")
for f in os.listdir(mlt_dir):
arr = np.loadtxt(os.path.join(mlt_dir,f))
assert np.all(arr==1)
df = pd.read_csv(os.path.join(new_model_ws, "arr_pars.csv"), index_col=0)
assert np.all(df.pp_fill_value.values == 1)
new_model_ws = "temp_pst_from_flopy"
props = ["upw.ss","upw.hk","upw.vka"]
pp_props = []
for k in range(m.nlay):
for p in props:
pp_props.append([p,k])
#pp_props = [["upw.ss", [0,], ["upw.hk", [1, 0]], ["upw.vka", 1]]
obssim_smp_pairs = None
helper = pyemu.helpers.PstFromFlopyModel(nam_file, new_model_ws, "temp",
pp_props=pp_props,
remove_existing=True,
pp_space=4,
use_pp_zones=False,
build_prior=True)
def pst_from_flopy_specsim_draw_test():
import shutil
import numpy as np
import pandas as pd
try:
import flopy
except:
return
import pyemu
org_model_ws = os.path.join("..", "examples", "freyberg_sfr_update")
nam_file = "freyberg.nam"
m = flopy.modflow.Modflow.load(nam_file, model_ws=org_model_ws, check=False)
flopy.modflow.ModflowRiv(m, stress_period_data={0: [[0, 0, 0, 30.0, 1.0, 25.0],
[0, 0, 1, 31.0, 1.0, 25.0],
[0, 0, 1, 31.0, 1.0, 25.0]]})
org_model_ws = "temp"
m.change_model_ws(org_model_ws)
m.write_input()
new_model_ws = "temp_pst_from_flopy"
hds_kperk = []
for k in range(m.nlay):
for kper in range(m.nper):
hds_kperk.append([kper, k])
temp_list_props = [["wel.flux", None]]
spat_list_props = [["riv.cond", 0], ["riv.stage", 0]]
v = pyemu.geostats.ExpVario(a=2500,contribution=1.0)
gs = pyemu.geostats.GeoStruct(variograms=[v],transform="log")
ph = pyemu.helpers.PstFromFlopyModel(nam_file, new_model_ws=new_model_ws,
org_model_ws=org_model_ws,
grid_props=[["rch.rech", 0], ["rch.rech", [1, 2]]],
remove_existing=True,
model_exe_name="mfnwt", temporal_list_props=temp_list_props,
spatial_list_props=spat_list_props,build_prior=False,
grid_geostruct=gs)
num_reals = 10000
par = ph.pst.parameter_data
par.loc[:,"parval1"] = 1
par.loc[:, "parubnd"] = 10
par.loc[:, "parlbnd"] = .1
#gr_par = par.loc[par.pargp.apply(lambda x: "gr" in x),:]
#par.loc[gr_par.parnme,"parval1"] = 20#np.arange(1,gr_par.shape[0]+1)
#par.loc[gr_par.parnme,"parubnd"] = 30#par.loc[gr_par.parnme,"parval1"].max()
#par.loc[gr_par.parnme, "parlbnd"] = 0.001#par.loc[gr_par.parnme,"parval1"].min()
#print(par.loc[gr_par.parnme,"parval1"])
li = par.partrans == "log"
pe1 = ph.draw(num_reals=num_reals, sigma_range=2,use_specsim=True)
pyemu.Ensemble.reseed()
#print(ph.pst.parameter_data.loc[gr_par.parnme,"parval1"])
#pe2 = pyemu.ParameterEnsemble.from_gaussian_draw(ph.pst, ph.build_prior(sigma_range=2), num_reals=num_reals)
pe2 = ph.draw(num_reals=num_reals,sigma_range=2)
pe1.transform()
pe2.transform()
gr_df = ph.par_dfs[ph.gr_suffix]
grps = gr_df.pargp.unique()
gr_par = gr_df.loc[gr_df.pargp==grps[0],:]
real1 = pe1.loc[pe1.index[-1],gr_par.parnme]
real2 = pe2.loc[0, gr_par.parnme]
arr = np.zeros((ph.m.nrow,ph.m.ncol))
arr[gr_par.i,gr_par.j] = real1
par_vals = par.parval1.copy()
par_vals.loc[li] = par_vals.loc[li].apply(np.log10)
mn1, mn2 = pe1.mean(), pe2.mean()
sd1, sd2 = pe1.std(), pe2.std()
diag = pyemu.Cov.from_parameter_data(ph.pst,sigma_range=2.0)
var_vals = {p:np.sqrt(v) for p,v in zip(diag.row_names,diag.x)}
for pname in par_vals.index:
print(pname,par_vals[pname],mn1[pname],mn2[pname],var_vals[pname],sd1[pname],sd2[pname])
diff_mn = mn1 - mn2
diff_sd = sd1 - sd2
#print(diff_mn)
assert diff_mn.apply(np.abs).max() < 0.1, diff_mn.apply(np.abs).max()
#print(sd1)
#print(sd2)
#print(diff_sd)
assert diff_sd.apply(np.abs).max() < 0.1,diff_sd.apply(np.abs).sort_values()
def at_bounds_test():
import pyemu
pst = pyemu.Pst(os.path.join("pst","pest.pst"))
par = pst.parameter_data
par.loc[pst.par_names[0],"parval1"] = par.parubnd[pst.par_names[0]] + 1.0
par.loc[pst.par_names[1], "parval1"] = par.parlbnd[pst.par_names[1]]
lb,ub = pst.get_adj_pars_at_bounds()
assert len(lb) == 1
assert len(ub) == 1
def ineq_phi_test():
import pyemu
pst = pyemu.Pst(os.path.join("pst","pest.pst"))
org_phi = pst.phi
#print(pst.res.loc[pst.nnz_obs_names,"residual"])
pst.observation_data.loc[pst.nnz_obs_names, "obsval"] = pst.res.loc[pst.nnz_obs_names,"modelled"] - 1
pst.observation_data.loc[pst.nnz_obs_names, "obgnme"] = "g_test"
assert pst.phi < 1.0e-6
pst.observation_data.loc[pst.nnz_obs_names, "obgnme"] = "greater_test"
assert pst.phi < 1.0e-6
pst.observation_data.loc[pst.nnz_obs_names, "obgnme"] = "<@"
assert pst.phi < 1.0e-6
pst.observation_data.loc[pst.nnz_obs_names, "obsval"] = pst.res.loc[pst.nnz_obs_names, "modelled"] + 1
pst.observation_data.loc[pst.nnz_obs_names, "obgnme"] = "l_test"
assert pst.phi < 1.0e-6
pst.observation_data.loc[pst.nnz_obs_names, "obgnme"] = "less_"
assert pst.phi < 1.0e-6
pst.observation_data.loc[pst.nnz_obs_names, "obgnme"] = ">@"
assert pst.phi < 1.0e-6
#pst.observation_data.loc[pst.nnz_obs_names, "obgnme"] = "l_test"
#print(org_phi, pst.phi)
if __name__ == "__main__":
#at_bounds_test()
#pst_from_flopy_geo_draw_test()
#pst_from_flopy_specsim_draw_test()
# run_array_pars()
# from_flopy_zone_pars()
#from_flopy_pp_test()
from_flopy()
#parrep_test()
#from_flopy_kl_test()
#from_flopy_reachinput()
#ineq_phi_test()
| [
"flopy.modflow.Modflow.load",
"numpy.sqrt",
"pandas.read_csv",
"pyemu.geostats.GeoStruct",
"pyemu.helpers.PstFromFlopyModel",
"numpy.loadtxt",
"pyemu.geostats.ExpVario",
"os.remove",
"os.path.exists",
"flopy.modflow.ModflowRiv",
"os.listdir",
"numpy.random.random",
"pyemu.Ensemble.reseed",
... | [((34, 56), 'os.path.exists', 'os.path.exists', (['"""temp"""'], {}), "('temp')\n", (48, 56), False, 'import os\n'), ((62, 78), 'os.mkdir', 'os.mkdir', (['"""temp"""'], {}), "('temp')\n", (70, 78), False, 'import os\n'), ((264, 317), 'os.path.join', 'os.path.join', (['""".."""', '"""examples"""', '"""freyberg_sfr_update"""'], {}), "('..', 'examples', 'freyberg_sfr_update')\n", (276, 317), False, 'import os\n'), ((356, 428), 'flopy.modflow.Modflow.load', 'flopy.modflow.Modflow.load', (['nam_file'], {'model_ws': 'org_model_ws', 'check': '(False)'}), '(nam_file, model_ws=org_model_ws, check=False)\n', (382, 428), False, 'import flopy\n'), ((433, 577), 'flopy.modflow.ModflowRiv', 'flopy.modflow.ModflowRiv', (['m'], {'stress_period_data': '{(0): [[0, 0, 0, 30.0, 1.0, 25.0], [0, 0, 1, 31.0, 1.0, 25.0], [0, 0, 1, \n 31.0, 1.0, 25.0]]}'}), '(m, stress_period_data={(0): [[0, 0, 0, 30.0, 1.0, \n 25.0], [0, 0, 1, 31.0, 1.0, 25.0], [0, 0, 1, 31.0, 1.0, 25.0]]})\n', (457, 577), False, 'import flopy\n'), ((1400, 1563), 'pyemu.helpers.PstFromFlopyModel', 'pyemu.helpers.PstFromFlopyModel', (['m'], {'new_model_ws': 'new_model_ws', 'org_model_ws': 'org_model_ws', 'kl_props': 'kl_props', 'remove_existing': '(True)', 'model_exe_name': '"""mfnwt"""'}), "(m, new_model_ws=new_model_ws, org_model_ws=\n org_model_ws, kl_props=kl_props, remove_existing=True, model_exe_name=\n 'mfnwt')\n", (1431, 1563), False, 'import pyemu\n'), ((1897, 1950), 'os.path.join', 'os.path.join', (['""".."""', '"""examples"""', '"""freyberg_sfr_update"""'], {}), "('..', 'examples', 'freyberg_sfr_update')\n", (1909, 1950), False, 'import os\n'), ((1989, 2061), 'flopy.modflow.Modflow.load', 'flopy.modflow.Modflow.load', (['nam_file'], {'model_ws': 'org_model_ws', 'check': '(False)'}), '(nam_file, model_ws=org_model_ws, check=False)\n', (2015, 2061), False, 'import flopy\n'), ((2066, 2210), 'flopy.modflow.ModflowRiv', 'flopy.modflow.ModflowRiv', (['m'], {'stress_period_data': '{(0): [[0, 0, 0, 30.0, 1.0, 25.0], [0, 0, 1, 31.0, 1.0, 25.0], [0, 0, 1, \n 31.0, 1.0, 25.0]]}'}), '(m, stress_period_data={(0): [[0, 0, 0, 30.0, 1.0, \n 25.0], [0, 0, 1, 31.0, 1.0, 25.0], [0, 0, 1, 31.0, 1.0, 25.0]]})\n', (2090, 2210), False, 'import flopy\n'), ((2874, 3168), 'pyemu.helpers.PstFromFlopyModel', 'pyemu.helpers.PstFromFlopyModel', (['nam_file'], {'new_model_ws': 'new_model_ws', 'org_model_ws': 'org_model_ws', 'zone_props': "[['rch.rech', 0], ['rch.rech', [1, 2]]]", 'remove_existing': '(True)', 'model_exe_name': '"""mfnwt"""', 'temporal_list_props': 'temp_list_props', 'spatial_list_props': 'spat_list_props', 'hfb_pars': '(True)'}), "(nam_file, new_model_ws=new_model_ws,\n org_model_ws=org_model_ws, zone_props=[['rch.rech', 0], ['rch.rech', [1,\n 2]]], remove_existing=True, model_exe_name='mfnwt', temporal_list_props\n =temp_list_props, spatial_list_props=spat_list_props, hfb_pars=True)\n", (2905, 3168), False, 'import pyemu\n'), ((3371, 3413), 'os.path.join', 'os.path.join', (['new_model_ws', '"""arr_pars.csv"""'], {}), "(new_model_ws, 'arr_pars.csv')\n", (3383, 3413), False, 'import os\n'), ((3423, 3452), 'pandas.read_csv', 'pd.read_csv', (['csv'], {'index_col': '(0)'}), '(csv, index_col=0)\n', (3434, 3452), True, 'import pandas as pd\n'), ((3761, 3783), 'os.chdir', 'os.chdir', (['new_model_ws'], {}), '(new_model_ws)\n', (3769, 3783), False, 'import os\n'), ((3819, 3850), 'pyemu.helpers.apply_list_pars', 'pyemu.helpers.apply_list_pars', ([], {}), '()\n', (3848, 3850), False, 'import pyemu\n'), ((3855, 3869), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (3863, 3869), False, 'import os\n'), ((3880, 4122), 'pyemu.helpers.PstFromFlopyModel', 'pyemu.helpers.PstFromFlopyModel', (['nam_file'], {'new_model_ws': 'new_model_ws', 'org_model_ws': 'org_model_ws', 'zone_props': "[['rch.rech', 0], ['rch.rech', [1, 2]]]", 'remove_existing': '(True)', 'model_exe_name': '"""mfnwt"""', 'spatial_list_props': 'spat_list_props'}), "(nam_file, new_model_ws=new_model_ws,\n org_model_ws=org_model_ws, zone_props=[['rch.rech', 0], ['rch.rech', [1,\n 2]]], remove_existing=True, model_exe_name='mfnwt', spatial_list_props=\n spat_list_props)\n", (3911, 4122), False, 'import pyemu\n'), ((4347, 4590), 'pyemu.helpers.PstFromFlopyModel', 'pyemu.helpers.PstFromFlopyModel', (['nam_file'], {'new_model_ws': 'new_model_ws', 'org_model_ws': 'org_model_ws', 'zone_props': "[['rch.rech', 0], ['rch.rech', [1, 2]]]", 'remove_existing': '(True)', 'model_exe_name': '"""mfnwt"""', 'temporal_list_props': 'temp_list_props'}), "(nam_file, new_model_ws=new_model_ws,\n org_model_ws=org_model_ws, zone_props=[['rch.rech', 0], ['rch.rech', [1,\n 2]]], remove_existing=True, model_exe_name='mfnwt', temporal_list_props\n =temp_list_props)\n", (4378, 4590), False, 'import pyemu\n'), ((4834, 4845), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4843, 4845), False, 'import os\n'), ((4850, 4872), 'os.chdir', 'os.chdir', (['new_model_ws'], {}), '(new_model_ws)\n', (4858, 4872), False, 'import os\n'), ((4925, 4953), 'os.path.join', 'os.path.join', (['"""arr_pars.csv"""'], {}), "('arr_pars.csv')\n", (4937, 4953), False, 'import os\n'), ((4963, 4992), 'pandas.read_csv', 'pd.read_csv', (['csv'], {'index_col': '(0)'}), '(csv, index_col=0)\n', (4974, 4992), True, 'import pandas as pd\n'), ((5091, 5123), 'pyemu.helpers.apply_array_pars', 'pyemu.helpers.apply_array_pars', ([], {}), '()\n', (5121, 5123), False, 'import pyemu\n'), ((5948, 5980), 'pyemu.helpers.apply_array_pars', 'pyemu.helpers.apply_array_pars', ([], {}), '()\n', (5978, 5980), False, 'import pyemu\n'), ((5991, 6024), 'numpy.loadtxt', 'np.loadtxt', (['df.model_file.iloc[0]'], {}), '(df.model_file.iloc[0])\n', (6001, 6024), True, 'import numpy as np\n'), ((6153, 6165), 'os.chdir', 'os.chdir', (['bd'], {}), '(bd)\n', (6161, 6165), False, 'import os\n'), ((6186, 6239), 'os.path.join', 'os.path.join', (['""".."""', '"""examples"""', '"""freyberg_sfr_update"""'], {}), "('..', 'examples', 'freyberg_sfr_update')\n", (6198, 6239), False, 'import os\n'), ((6278, 6350), 'flopy.modflow.Modflow.load', 'flopy.modflow.Modflow.load', (['nam_file'], {'model_ws': 'org_model_ws', 'check': '(False)'}), '(nam_file, model_ws=org_model_ws, check=False)\n', (6304, 6350), False, 'import flopy\n'), ((6365, 6559), 'pyemu.helpers.PstFromFlopyModel', 'pyemu.helpers.PstFromFlopyModel', (['nam_file', 'new_model_ws', 'org_model_ws'], {'hds_kperk': '[0, 0]', 'remove_existing': '(True)', 'model_exe_name': '"""mfnwt"""', 'sfr_pars': '(True)', 'sfr_obs': '(True)', 'temporal_sfr_pars': '(True)'}), "(nam_file, new_model_ws, org_model_ws,\n hds_kperk=[0, 0], remove_existing=True, model_exe_name='mfnwt',\n sfr_pars=True, sfr_obs=True, temporal_sfr_pars=True)\n", (6396, 6559), False, 'import pyemu\n'), ((6770, 6842), 'flopy.modflow.Modflow.load', 'flopy.modflow.Modflow.load', (['nam_file'], {'model_ws': 'org_model_ws', 'check': '(False)'}), '(nam_file, model_ws=org_model_ws, check=False)\n', (6796, 6842), False, 'import flopy\n'), ((6857, 7068), 'pyemu.helpers.PstFromFlopyModel', 'pyemu.helpers.PstFromFlopyModel', (['nam_file', 'new_model_ws', 'org_model_ws'], {'hds_kperk': '[0, 0]', 'remove_existing': '(True)', 'model_exe_name': '"""mfnwt"""', 'sfr_pars': "['flow', 'not_a_par']", 'temporal_sfr_pars': '(True)', 'sfr_obs': '(True)'}), "(nam_file, new_model_ws, org_model_ws,\n hds_kperk=[0, 0], remove_existing=True, model_exe_name='mfnwt',\n sfr_pars=['flow', 'not_a_par'], temporal_sfr_pars=True, sfr_obs=True)\n", (6888, 7068), False, 'import pyemu\n'), ((7438, 7510), 'flopy.modflow.Modflow.load', 'flopy.modflow.Modflow.load', (['nam_file'], {'model_ws': 'org_model_ws', 'check': '(False)'}), '(nam_file, model_ws=org_model_ws, check=False)\n', (7464, 7510), False, 'import flopy\n'), ((7525, 7719), 'pyemu.helpers.PstFromFlopyModel', 'pyemu.helpers.PstFromFlopyModel', (['nam_file', 'new_model_ws', 'org_model_ws'], {'hds_kperk': '[0, 0]', 'remove_existing': '(True)', 'model_exe_name': '"""mfnwt"""', 'sfr_pars': "['not_a_par0', 'not_a_par1']", 'sfr_obs': '(True)'}), "(nam_file, new_model_ws, org_model_ws,\n hds_kperk=[0, 0], remove_existing=True, model_exe_name='mfnwt',\n sfr_pars=['not_a_par0', 'not_a_par1'], sfr_obs=True)\n", (7556, 7719), False, 'import pyemu\n'), ((8060, 8220), 'pyemu.helpers.PstFromFlopyModel', 'pyemu.helpers.PstFromFlopyModel', (['nam_file', 'new_model_ws', 'org_model_ws'], {'pp_props': 'pp_props', 'hds_kperk': '[0, 0]', 'remove_existing': '(True)', 'model_exe_name': '"""mfnwt"""'}), "(nam_file, new_model_ws, org_model_ws,\n pp_props=pp_props, hds_kperk=[0, 0], remove_existing=True,\n model_exe_name='mfnwt')\n", (8091, 8220), False, 'import pyemu\n'), ((8312, 8407), 'flopy.modflow.Modflow.load', 'flopy.modflow.Modflow.load', (['nam_file'], {'model_ws': 'org_model_ws', 'exe_name': '"""mfnwt"""', 'check': '(False)'}), "(nam_file, model_ws=org_model_ws, exe_name=\n 'mfnwt', check=False)\n", (8338, 8407), False, 'import flopy\n'), ((8475, 8592), 'pyemu.helpers.PstFromFlopyModel', 'pyemu.helpers.PstFromFlopyModel', (['m', 'new_model_ws'], {'const_props': 'const_props', 'hds_kperk': '[0, 0]', 'remove_existing': '(True)'}), '(m, new_model_ws, const_props=const_props,\n hds_kperk=[0, 0], remove_existing=True)\n', (8506, 8592), False, 'import pyemu\n'), ((8994, 9130), 'pyemu.helpers.PstFromFlopyModel', 'pyemu.helpers.PstFromFlopyModel', (['nam_file', 'new_model_ws', 'org_model_ws'], {'grid_props': 'grid_props', 'hds_kperk': '[0, 0]', 'remove_existing': '(True)'}), '(nam_file, new_model_ws, org_model_ws,\n grid_props=grid_props, hds_kperk=[0, 0], remove_existing=True)\n', (9025, 9130), False, 'import pyemu\n'), ((9292, 9428), 'pyemu.helpers.PstFromFlopyModel', 'pyemu.helpers.PstFromFlopyModel', (['nam_file', 'new_model_ws', 'org_model_ws'], {'zone_props': 'zone_props', 'hds_kperk': '[0, 0]', 'remove_existing': '(True)'}), '(nam_file, new_model_ws, org_model_ws,\n zone_props=zone_props, hds_kperk=[0, 0], remove_existing=True)\n', (9323, 9428), False, 'import pyemu\n'), ((9706, 9851), 'pyemu.helpers.PstFromFlopyModel', 'pyemu.helpers.PstFromFlopyModel', (['nam_file', 'new_model_ws', 'org_model_ws'], {'temporal_list_props': 'list_props', 'hds_kperk': '[0, 0]', 'remove_existing': '(True)'}), '(nam_file, new_model_ws, org_model_ws,\n temporal_list_props=list_props, hds_kperk=[0, 0], remove_existing=True)\n', (9737, 9851), False, 'import pyemu\n'), ((10107, 10502), 'pyemu.helpers.PstFromFlopyModel', 'pyemu.helpers.PstFromFlopyModel', (['nam_file', 'new_model_ws', 'org_model_ws'], {'pp_props': 'pp_props', 'const_props': 'const_props', 'grid_props': 'grid_props', 'zone_props': 'zone_props', 'temporal_list_props': 'list_props', 'spatial_list_props': 'list_props', 'remove_existing': '(True)', 'obssim_smp_pairs': 'obssim_smp_pairs', 'pp_space': '(4)', 'use_pp_zones': '(False)', 'k_zone_dict': 'k_zone_dict', 'hds_kperk': '[0, 0]', 'build_prior': '(False)'}), '(nam_file, new_model_ws, org_model_ws,\n pp_props=pp_props, const_props=const_props, grid_props=grid_props,\n zone_props=zone_props, temporal_list_props=list_props,\n spatial_list_props=list_props, remove_existing=True, obssim_smp_pairs=\n obssim_smp_pairs, pp_space=4, use_pp_zones=False, k_zone_dict=\n k_zone_dict, hds_kperk=[0, 0], build_prior=False)\n', (10138, 10502), False, 'import pyemu\n'), ((11371, 11410), 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(2.0)', 'pst.nnz_obs'], {}), '(0.0, 2.0, pst.nnz_obs)\n', (11387, 11410), True, 'import numpy as np\n'), ((11776, 11829), 'os.path.join', 'os.path.join', (['""".."""', '"""examples"""', '"""freyberg_sfr_update"""'], {}), "('..', 'examples', 'freyberg_sfr_update')\n", (11788, 11829), False, 'import os\n'), ((11868, 11940), 'flopy.modflow.Modflow.load', 'flopy.modflow.Modflow.load', (['nam_file'], {'model_ws': 'org_model_ws', 'check': '(False)'}), '(nam_file, model_ws=org_model_ws, check=False)\n', (11894, 11940), False, 'import flopy\n'), ((12905, 13232), 'pyemu.helpers.PstFromFlopyModel', 'pyemu.helpers.PstFromFlopyModel', (['nam_file', 'new_model_ws', 'org_model_ws'], {'const_props': 'const_props', 'grid_props': 'grid_props', 'zone_props': 'zone_props', 'pp_props': 'pp_props', 'remove_existing': '(True)', 'obssim_smp_pairs': 'obssim_smp_pairs', 'pp_space': '(4)', 'use_pp_zones': '(True)', 'k_zone_dict': 'k_zone_dict', 'hds_kperk': '[0, 0]', 'build_prior': '(False)'}), '(nam_file, new_model_ws, org_model_ws,\n const_props=const_props, grid_props=grid_props, zone_props=zone_props,\n pp_props=pp_props, remove_existing=True, obssim_smp_pairs=\n obssim_smp_pairs, pp_space=4, use_pp_zones=True, k_zone_dict=\n k_zone_dict, hds_kperk=[0, 0], build_prior=False)\n', (12936, 13232), False, 'import pyemu\n'), ((13788, 14115), 'pyemu.helpers.PstFromFlopyModel', 'pyemu.helpers.PstFromFlopyModel', (['nam_file', 'new_model_ws', 'org_model_ws'], {'const_props': 'const_props', 'grid_props': 'grid_props', 'zone_props': 'zone_props', 'pp_props': 'pp_props', 'remove_existing': '(True)', 'obssim_smp_pairs': 'obssim_smp_pairs', 'pp_space': '(4)', 'use_pp_zones': '(True)', 'k_zone_dict': 'k_zone_dict', 'hds_kperk': '[0, 0]', 'build_prior': '(False)'}), '(nam_file, new_model_ws, org_model_ws,\n const_props=const_props, grid_props=grid_props, zone_props=zone_props,\n pp_props=pp_props, remove_existing=True, obssim_smp_pairs=\n obssim_smp_pairs, pp_space=4, use_pp_zones=True, k_zone_dict=\n k_zone_dict, hds_kperk=[0, 0], build_prior=False)\n', (13819, 14115), False, 'import pyemu\n'), ((14616, 14627), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (14625, 14627), False, 'import os\n'), ((14839, 14850), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (14848, 14850), False, 'import os\n'), ((15487, 15498), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (15496, 15498), False, 'import os\n'), ((15518, 15572), 'os.path.join', 'os.path.join', (['""".."""', '"""examples"""', '"""freyberg_sfr_reaches"""'], {}), "('..', 'examples', 'freyberg_sfr_reaches')\n", (15530, 15572), False, 'import os\n'), ((15661, 15733), 'flopy.modflow.Modflow.load', 'flopy.modflow.Modflow.load', (['nam_file'], {'model_ws': 'org_model_ws', 'check': '(False)'}), '(nam_file, model_ws=org_model_ws, check=False)\n', (15687, 15733), False, 'import flopy\n'), ((19658, 19680), 'os.chdir', 'os.chdir', (['new_model_ws'], {}), '(new_model_ws)\n', (19666, 19680), False, 'import os\n'), ((19685, 19717), 'pyemu.helpers.apply_array_pars', 'pyemu.helpers.apply_array_pars', ([], {}), '()\n', (19715, 19717), False, 'import pyemu\n'), ((19722, 19736), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (19730, 19736), False, 'import os\n'), ((19923, 19941), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (19937, 19941), True, 'import numpy as np\n'), ((20253, 20271), 'numpy.random.seed', 'np.random.seed', (['(99)'], {}), '(99)\n', (20267, 20271), True, 'import numpy as np\n'), ((20753, 20800), 'pyemu.pst_utils.generic_pst', 'pyemu.pst_utils.generic_pst', ([], {'par_names': 'parnames'}), '(par_names=parnames)\n', (20780, 20800), False, 'import pyemu\n'), ((21185, 21271), 'numpy.allclose', 'np.allclose', (['pst.parameter_data.iloc[1:].parval1.values', 'parvals[1:]'], {'atol': '(0.0001)'}), '(pst.parameter_data.iloc[1:].parval1.values, parvals[1:], atol=\n 0.0001)\n', (21196, 21271), True, 'import numpy as np\n'), ((21378, 21446), 'numpy.allclose', 'np.allclose', (['pst.parameter_data.parval1.values', 'parvals'], {'atol': '(0.0001)'}), '(pst.parameter_data.parval1.values, parvals, atol=0.0001)\n', (21389, 21446), True, 'import numpy as np\n'), ((21644, 21730), 'numpy.allclose', 'np.allclose', (['pst.parameter_data.iloc[1:].parval1.values', 'parvals[1:]'], {'atol': '(0.0001)'}), '(pst.parameter_data.iloc[1:].parval1.values, parvals[1:], atol=\n 0.0001)\n', (21655, 21730), True, 'import numpy as np\n'), ((21872, 21965), 'numpy.allclose', 'np.allclose', (['pst.parameter_data.parval1.values[:-1]', 'parens.T[0].values[:-1]'], {'atol': '(0.0001)'}), '(pst.parameter_data.parval1.values[:-1], parens.T[0].values[:-1],\n atol=0.0001)\n', (21883, 21965), True, 'import numpy as np\n'), ((22114, 22207), 'numpy.allclose', 'np.allclose', (['pst.parameter_data.parval1.values[:-1]', 'parens.T[3].values[:-1]'], {'atol': '(0.0001)'}), '(pst.parameter_data.parval1.values[:-1], parens.T[3].values[:-1],\n atol=0.0001)\n', (22125, 22207), True, 'import numpy as np\n'), ((22306, 22399), 'numpy.allclose', 'np.allclose', (['pst.parameter_data.parval1.values[:-1]', 'parens.T[2].values[:-1]'], {'atol': '(0.0001)'}), '(pst.parameter_data.parval1.values[:-1], parens.T[2].values[:-1],\n atol=0.0001)\n', (22317, 22399), True, 'import numpy as np\n'), ((22589, 22642), 'os.path.join', 'os.path.join', (['""".."""', '"""examples"""', '"""freyberg_sfr_update"""'], {}), "('..', 'examples', 'freyberg_sfr_update')\n", (22601, 22642), False, 'import os\n'), ((22681, 22753), 'flopy.modflow.Modflow.load', 'flopy.modflow.Modflow.load', (['nam_file'], {'model_ws': 'org_model_ws', 'check': '(False)'}), '(nam_file, model_ws=org_model_ws, check=False)\n', (22707, 22753), False, 'import flopy\n'), ((22758, 22902), 'flopy.modflow.ModflowRiv', 'flopy.modflow.ModflowRiv', (['m'], {'stress_period_data': '{(0): [[0, 0, 0, 30.0, 1.0, 25.0], [0, 0, 1, 31.0, 1.0, 25.0], [0, 0, 1, \n 31.0, 1.0, 25.0]]}'}), '(m, stress_period_data={(0): [[0, 0, 0, 30.0, 1.0, \n 25.0], [0, 0, 1, 31.0, 1.0, 25.0], [0, 0, 1, 31.0, 1.0, 25.0]]})\n', (22782, 22902), False, 'import flopy\n'), ((23365, 23644), 'pyemu.helpers.PstFromFlopyModel', 'pyemu.helpers.PstFromFlopyModel', (['nam_file'], {'new_model_ws': 'new_model_ws', 'org_model_ws': 'org_model_ws', 'zone_props': "[['rch.rech', 0], ['rch.rech', [1, 2]]]", 'remove_existing': '(True)', 'model_exe_name': '"""mfnwt"""', 'temporal_list_props': 'temp_list_props', 'spatial_list_props': 'spat_list_props'}), "(nam_file, new_model_ws=new_model_ws,\n org_model_ws=org_model_ws, zone_props=[['rch.rech', 0], ['rch.rech', [1,\n 2]]], remove_existing=True, model_exe_name='mfnwt', temporal_list_props\n =temp_list_props, spatial_list_props=spat_list_props)\n", (23396, 23644), False, 'import pyemu\n'), ((23920, 23943), 'pyemu.Ensemble.reseed', 'pyemu.Ensemble.reseed', ([], {}), '()\n', (23941, 23943), False, 'import pyemu\n'), ((24500, 24553), 'os.path.join', 'os.path.join', (['""".."""', '"""examples"""', '"""freyberg_sfr_update"""'], {}), "('..', 'examples', 'freyberg_sfr_update')\n", (24512, 24553), False, 'import os\n'), ((24592, 24664), 'flopy.modflow.Modflow.load', 'flopy.modflow.Modflow.load', (['nam_file'], {'model_ws': 'org_model_ws', 'check': '(False)'}), '(nam_file, model_ws=org_model_ws, check=False)\n', (24618, 24664), False, 'import flopy\n'), ((24935, 25099), 'pyemu.helpers.PstFromFlopyModel', 'pyemu.helpers.PstFromFlopyModel', (['nam_file', 'new_model_ws', '"""temp"""'], {'pp_props': 'pp_props', 'remove_existing': '(True)', 'pp_space': '(4)', 'use_pp_zones': '(False)', 'build_prior': '(False)'}), "(nam_file, new_model_ws, 'temp', pp_props=\n pp_props, remove_existing=True, pp_space=4, use_pp_zones=False,\n build_prior=False)\n", (24966, 25099), False, 'import pyemu\n'), ((25326, 25337), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (25335, 25337), False, 'import os\n'), ((25342, 25364), 'os.chdir', 'os.chdir', (['new_model_ws'], {}), '(new_model_ws)\n', (25350, 25364), False, 'import os\n'), ((25491, 25504), 'os.chdir', 'os.chdir', (['b_d'], {}), '(b_d)\n', (25499, 25504), False, 'import os\n'), ((25521, 25558), 'os.path.join', 'os.path.join', (['new_model_ws', '"""arr_mlt"""'], {}), "(new_model_ws, 'arr_mlt')\n", (25533, 25558), False, 'import os\n'), ((25571, 25590), 'os.listdir', 'os.listdir', (['mlt_dir'], {}), '(mlt_dir)\n', (25581, 25590), False, 'import os\n'), ((25761, 25797), 'numpy.all', 'np.all', (['(df.pp_fill_value.values == 1)'], {}), '(df.pp_fill_value.values == 1)\n', (25767, 25797), True, 'import numpy as np\n'), ((26099, 26262), 'pyemu.helpers.PstFromFlopyModel', 'pyemu.helpers.PstFromFlopyModel', (['nam_file', 'new_model_ws', '"""temp"""'], {'pp_props': 'pp_props', 'remove_existing': '(True)', 'pp_space': '(4)', 'use_pp_zones': '(False)', 'build_prior': '(True)'}), "(nam_file, new_model_ws, 'temp', pp_props=\n pp_props, remove_existing=True, pp_space=4, use_pp_zones=False,\n build_prior=True)\n", (26130, 26262), False, 'import pyemu\n'), ((26680, 26733), 'os.path.join', 'os.path.join', (['""".."""', '"""examples"""', '"""freyberg_sfr_update"""'], {}), "('..', 'examples', 'freyberg_sfr_update')\n", (26692, 26733), False, 'import os\n'), ((26772, 26844), 'flopy.modflow.Modflow.load', 'flopy.modflow.Modflow.load', (['nam_file'], {'model_ws': 'org_model_ws', 'check': '(False)'}), '(nam_file, model_ws=org_model_ws, check=False)\n', (26798, 26844), False, 'import flopy\n'), ((26849, 26993), 'flopy.modflow.ModflowRiv', 'flopy.modflow.ModflowRiv', (['m'], {'stress_period_data': '{(0): [[0, 0, 0, 30.0, 1.0, 25.0], [0, 0, 1, 31.0, 1.0, 25.0], [0, 0, 1, \n 31.0, 1.0, 25.0]]}'}), '(m, stress_period_data={(0): [[0, 0, 0, 30.0, 1.0, \n 25.0], [0, 0, 1, 31.0, 1.0, 25.0], [0, 0, 1, 31.0, 1.0, 25.0]]})\n', (26873, 26993), False, 'import flopy\n'), ((27455, 27504), 'pyemu.geostats.ExpVario', 'pyemu.geostats.ExpVario', ([], {'a': '(2500)', 'contribution': '(1.0)'}), '(a=2500, contribution=1.0)\n', (27478, 27504), False, 'import pyemu\n'), ((27513, 27570), 'pyemu.geostats.GeoStruct', 'pyemu.geostats.GeoStruct', ([], {'variograms': '[v]', 'transform': '"""log"""'}), "(variograms=[v], transform='log')\n", (27537, 27570), False, 'import pyemu\n'), ((27579, 27900), 'pyemu.helpers.PstFromFlopyModel', 'pyemu.helpers.PstFromFlopyModel', (['nam_file'], {'new_model_ws': 'new_model_ws', 'org_model_ws': 'org_model_ws', 'grid_props': "[['rch.rech', 0], ['rch.rech', [1, 2]]]", 'remove_existing': '(True)', 'model_exe_name': '"""mfnwt"""', 'temporal_list_props': 'temp_list_props', 'spatial_list_props': 'spat_list_props', 'build_prior': '(False)', 'grid_geostruct': 'gs'}), "(nam_file, new_model_ws=new_model_ws,\n org_model_ws=org_model_ws, grid_props=[['rch.rech', 0], ['rch.rech', [1,\n 2]]], remove_existing=True, model_exe_name='mfnwt', temporal_list_props\n =temp_list_props, spatial_list_props=spat_list_props, build_prior=False,\n grid_geostruct=gs)\n", (27610, 27900), False, 'import pyemu\n'), ((28733, 28756), 'pyemu.Ensemble.reseed', 'pyemu.Ensemble.reseed', ([], {}), '()\n', (28754, 28756), False, 'import pyemu\n'), ((29242, 29274), 'numpy.zeros', 'np.zeros', (['(ph.m.nrow, ph.m.ncol)'], {}), '((ph.m.nrow, ph.m.ncol))\n', (29250, 29274), True, 'import numpy as np\n'), ((29485, 29539), 'pyemu.Cov.from_parameter_data', 'pyemu.Cov.from_parameter_data', (['ph.pst'], {'sigma_range': '(2.0)'}), '(ph.pst, sigma_range=2.0)\n', (29514, 29539), False, 'import pyemu\n'), ((985, 1061), 'pyemu.helpers.SpatialReference', 'pyemu.helpers.SpatialReference', ([], {'delc': 'm.dis.delc.array', 'delr': 'm.dis.delr.array'}), '(delc=m.dis.delc.array, delr=m.dis.delr.array)\n', (1015, 1061), False, 'import pyemu\n'), ((9944, 10004), 'os.path.join', 'os.path.join', (['""".."""', '"""examples"""', '"""Freyberg_Truth"""', '"""hk.zones"""'], {}), "('..', 'examples', 'Freyberg_Truth', 'hk.zones')\n", (9956, 10004), False, 'import os\n'), ((11458, 11505), 'os.path.join', 'os.path.join', (['new_model_ws', '"""freyberg_pest.pst"""'], {}), "(new_model_ws, 'freyberg_pest.pst')\n", (11470, 11505), False, 'import os\n'), ((11563, 11600), 'os.path.join', 'os.path.join', (['new_model_ws', '"""cov.coo"""'], {}), "(new_model_ws, 'cov.coo')\n", (11575, 11600), False, 'import os\n'), ((12415, 12475), 'os.path.join', 'os.path.join', (['""".."""', '"""examples"""', '"""Freyberg_Truth"""', '"""hk.zones"""'], {}), "('..', 'examples', 'Freyberg_Truth', 'hk.zones')\n", (12427, 12475), False, 'import os\n'), ((12513, 12575), 'os.path.join', 'os.path.join', (['""".."""', '"""examples"""', '"""Freyberg_Truth"""', '"""rand.zones"""'], {}), "('..', 'examples', 'Freyberg_Truth', 'rand.zones')\n", (12525, 12575), False, 'import os\n'), ((16408, 16622), 'pyemu.helpers.PstFromFlopyModel', 'pyemu.helpers.PstFromFlopyModel', (['nam_file', 'new_model_ws', 'org_model_ws'], {'hds_kperk': '[0, 0]', 'remove_existing': '(True)', 'model_exe_name': '"""mfnwt"""', 'sfr_pars': 'sfr_par', 'temporal_sfr_pars': 'include_temporal_pars', 'sfr_obs': '(True)'}), "(nam_file, new_model_ws, org_model_ws,\n hds_kperk=[0, 0], remove_existing=True, model_exe_name='mfnwt',\n sfr_pars=sfr_par, temporal_sfr_pars=include_temporal_pars, sfr_obs=True)\n", (16439, 16622), False, 'import pyemu\n'), ((16819, 16841), 'os.chdir', 'os.chdir', (['new_model_ws'], {}), '(new_model_ws)\n', (16827, 16841), False, 'import os\n'), ((19545, 19557), 'os.chdir', 'os.chdir', (['bd'], {}), '(bd)\n', (19553, 19557), False, 'import os\n'), ((19956, 19976), 'numpy.random.random', 'np.random.random', (['(20)'], {}), '(20)\n', (19972, 19976), True, 'import numpy as np\n'), ((25382, 25414), 'pyemu.helpers.apply_array_pars', 'pyemu.helpers.apply_array_pars', ([], {}), '()\n', (25412, 25414), False, 'import pyemu\n'), ((25657, 25673), 'numpy.all', 'np.all', (['(arr == 1)'], {}), '(arr == 1)\n', (25663, 25673), True, 'import numpy as np\n'), ((25693, 25735), 'os.path.join', 'os.path.join', (['new_model_ws', '"""arr_pars.csv"""'], {}), "(new_model_ws, 'arr_pars.csv')\n", (25705, 25735), False, 'import os\n'), ((29557, 29567), 'numpy.sqrt', 'np.sqrt', (['v'], {}), '(v)\n', (29564, 29567), True, 'import numpy as np\n'), ((30074, 30105), 'os.path.join', 'os.path.join', (['"""pst"""', '"""pest.pst"""'], {}), "('pst', 'pest.pst')\n", (30086, 30105), False, 'import os\n'), ((30436, 30467), 'os.path.join', 'os.path.join', (['"""pst"""', '"""pest.pst"""'], {}), "('pst', 'pest.pst')\n", (30448, 30467), False, 'import os\n'), ((14693, 14705), 'os.chdir', 'os.chdir', (['bd'], {}), '(bd)\n', (14701, 14705), False, 'import os\n'), ((14927, 14939), 'os.chdir', 'os.chdir', (['bd'], {}), '(bd)\n', (14935, 14939), False, 'import os\n'), ((16204, 16221), 'os.path.exists', 'os.path.exists', (['f'], {}), '(f)\n', (16218, 16221), False, 'import os\n'), ((20298, 20322), 'numpy.tile', 'np.tile', (['parvals', '(5, 1)'], {}), '(parvals, (5, 1))\n', (20305, 20322), True, 'import numpy as np\n'), ((20972, 21015), 'pyemu.ParameterEnsemble', 'pyemu.ParameterEnsemble', ([], {'pst': 'pst', 'df': 'parens'}), '(pst=pst, df=parens)\n', (20995, 21015), False, 'import pyemu\n'), ((25450, 25463), 'os.chdir', 'os.chdir', (['b_d'], {}), '(b_d)\n', (25458, 25463), False, 'import os\n'), ((25617, 25641), 'os.path.join', 'os.path.join', (['mlt_dir', 'f'], {}), '(mlt_dir, f)\n', (25629, 25641), False, 'import os\n'), ((16239, 16251), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (16248, 16251), False, 'import os\n'), ((20321, 20343), 'numpy.random.randn', 'np.random.randn', (['(5)', '(20)'], {}), '(5, 20)\n', (20336, 20343), True, 'import numpy as np\n')] |
# A Faster R-CNN approach to Mu2e Tracking
# The detector (Fast R-CNN) part for alternative (i) method in the original paper
# see https://arxiv.org/abs/1506.01497
# Author: <NAME>
# Email: <EMAIL>
### imports starts
import sys
from pathlib import Path
import pickle
import timeit
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Dropout, TimeDistributed, Flatten, Reshape, Softmax
from tensorflow.keras.optimizers import Adam
from tensorflow.distribute import MirroredStrategy
from tensorflow.keras.metrics import CategoricalAccuracy
util_dir = Path.cwd().parent.joinpath('Utility')
sys.path.insert(1, str(util_dir))
from Information import *
from Configuration import frcnn_config
from Layers import RoIPooling
from Loss import *
from Metric import *
### imports ends
def detector_train(C):
pstage("Start Training")
# load the oneHotEncoder
oneHotEncoder = C.oneHotEncoder
classNum = len(oneHotEncoder)
# prepare the tensorflow.data.DataSet object
inputs = np.load(C.img_inputs_npy)
rois = np.load(C.rois)
Y_labels = np.load(C.detector_train_Y_classifier)
Y_bboxes = np.load(C.detector_train_Y_regressor)
Y_labels = np.expand_dims(Y_labels, axis=(2,3))
Y_bboxes = np.expand_dims(Y_bboxes, axis=(2,3))
pdebug(Y_labels.shape)
# outputs
cwd = Path.cwd()
data_dir = C.sub_data_dir
weights_dir = C.weight_dir
rpn_model_weight_file = weights_dir.joinpath(C.rpn_model_name+'.h5')
detector_model_weight_file = weights_dir.joinpath(C.detector_model_name+'.h5')
record_file = data_dir.joinpath(C.detector_record_name+'.csv')
pinfo('I/O Path is configured')
# construct model
img_input = Input(shape=C.input_shape)
RoI_input = Input(shape=rois.shape[1:])
x = C.base_net.get_base_net(img_input)
x = RoIPooling(8,8)([x, RoI_input])
x = TimeDistributed(Conv2D(256, (3,3), activation='relu', padding='same'))(x)
x = TimeDistributed(Conv2D(256, (3,3), activation='relu', padding='same'))(x)
x = TimeDistributed(MaxPooling2D((2,2)))(x)
x = TimeDistributed(Conv2D(128, (3,3), activation='relu', padding='same'))(x)
x = TimeDistributed(Conv2D(128, (3,3), activation='relu', padding='same'))(x)
x = TimeDistributed(MaxPooling2D((2,2)))(x)
x = TimeDistributed(Conv2D(64, (3,3), activation='relu', padding='same'))(x)
x = TimeDistributed(Conv2D(64, (3,3), activation='relu', padding='same'))(x)
x = TimeDistributed(MaxPooling2D((2,2)))(x)
x1 = TimeDistributed(Conv2D(classNum, (1,1), padding='same'))(x)
output_classifier = TimeDistributed(Softmax(), name='detector_out_class')(x1)
output_regressor = TimeDistributed(Conv2D(classNum*4, (1,1), activation='linear', padding='same'), name='detector_out_regr')(x)
#model = Model(inputs=[img_input, RoI_input], outputs = x)
model = Model(inputs=[img_input, RoI_input], outputs = [output_classifier, output_regressor])
model.summary()
# load weights trianed by RPN
model.load_weights(rpn_model_weight_file, by_name=True)
# setup loss functions
detector_class_loss = define_detector_class_loss(C.detector_lambda[0])
detector_regr_loss = define_detector_regr_loss(C.detector_lambda[1])
# setup optimizer
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=1e-4,
decay_steps=10000,
decay_rate=0.9)
adam = Adam(learning_rate=lr_schedule)
# setup callbacks
CsvCallback = tf.keras.callbacks.CSVLogger(str(record_file), separator=",", append=False)
ca = CategoricalAccuracy()
# compile the model
model.compile(optimizer=adam, loss={'detector_out_class':detector_class_loss,\
'detector_out_regr':detector_regr_loss},\
metrics = {'detector_out_class':ca,\
'detector_out_regr':unmasked_IoU})
# initialize fit parameters
model.fit(x=[inputs, rois], y=[Y_labels, Y_bboxes],\
validation_split=0.25,\
shuffle=True,\
batch_size=8, epochs=200,\
callbacks = [CsvCallback])
return C
if __name__ == "__main__":
pbanner()
psystem('Faster R-CNN Object Detection System')
pmode('Training Fast R-CNN detector')
pinfo('Parameters are set inside the script')
cwd = Path.cwd()
pickle_path = cwd.joinpath('frcnn.train.config.pickle')
C = pickle.load(open(pickle_path,'rb'))
# initialize parameters
lambdas = [1, 100]
model_name = 'detector_mc_01'
record_name = 'detector_mc_record_01'
C.set_detector_record(model_name, record_name)
C.set_detector_lambda(lambdas)
C = detector_train(C)
| [
"tensorflow.keras.layers.Input",
"Layers.RoIPooling",
"tensorflow.keras.layers.Conv2D",
"pathlib.Path.cwd",
"tensorflow.keras.optimizers.schedules.ExponentialDecay",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.optimizers.Adam",
"numpy.expand_dims",
"tensorflow.keras.metrics.Categorical... | [((1111, 1136), 'numpy.load', 'np.load', (['C.img_inputs_npy'], {}), '(C.img_inputs_npy)\n', (1118, 1136), True, 'import numpy as np\n'), ((1148, 1163), 'numpy.load', 'np.load', (['C.rois'], {}), '(C.rois)\n', (1155, 1163), True, 'import numpy as np\n'), ((1179, 1217), 'numpy.load', 'np.load', (['C.detector_train_Y_classifier'], {}), '(C.detector_train_Y_classifier)\n', (1186, 1217), True, 'import numpy as np\n'), ((1233, 1270), 'numpy.load', 'np.load', (['C.detector_train_Y_regressor'], {}), '(C.detector_train_Y_regressor)\n', (1240, 1270), True, 'import numpy as np\n'), ((1286, 1323), 'numpy.expand_dims', 'np.expand_dims', (['Y_labels'], {'axis': '(2, 3)'}), '(Y_labels, axis=(2, 3))\n', (1300, 1323), True, 'import numpy as np\n'), ((1338, 1375), 'numpy.expand_dims', 'np.expand_dims', (['Y_bboxes'], {'axis': '(2, 3)'}), '(Y_bboxes, axis=(2, 3))\n', (1352, 1375), True, 'import numpy as np\n'), ((1427, 1437), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (1435, 1437), False, 'from pathlib import Path\n'), ((1799, 1825), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': 'C.input_shape'}), '(shape=C.input_shape)\n', (1804, 1825), False, 'from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Dropout, TimeDistributed, Flatten, Reshape, Softmax\n'), ((1842, 1869), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': 'rois.shape[1:]'}), '(shape=rois.shape[1:])\n', (1847, 1869), False, 'from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Dropout, TimeDistributed, Flatten, Reshape, Softmax\n'), ((2950, 3037), 'tensorflow.keras.Model', 'Model', ([], {'inputs': '[img_input, RoI_input]', 'outputs': '[output_classifier, output_regressor]'}), '(inputs=[img_input, RoI_input], outputs=[output_classifier,\n output_regressor])\n', (2955, 3037), False, 'from tensorflow.keras import Model\n'), ((3368, 3483), 'tensorflow.keras.optimizers.schedules.ExponentialDecay', 'tf.keras.optimizers.schedules.ExponentialDecay', ([], {'initial_learning_rate': '(0.0001)', 'decay_steps': '(10000)', 'decay_rate': '(0.9)'}), '(initial_learning_rate=0.0001,\n decay_steps=10000, decay_rate=0.9)\n', (3414, 3483), True, 'import tensorflow as tf\n'), ((3514, 3545), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'learning_rate': 'lr_schedule'}), '(learning_rate=lr_schedule)\n', (3518, 3545), False, 'from tensorflow.keras.optimizers import Adam\n'), ((3673, 3694), 'tensorflow.keras.metrics.CategoricalAccuracy', 'CategoricalAccuracy', ([], {}), '()\n', (3692, 3694), False, 'from tensorflow.keras.metrics import CategoricalAccuracy\n'), ((4501, 4511), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (4509, 4511), False, 'from pathlib import Path\n'), ((1922, 1938), 'Layers.RoIPooling', 'RoIPooling', (['(8)', '(8)'], {}), '(8, 8)\n', (1932, 1938), False, 'from Layers import RoIPooling\n'), ((671, 681), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (679, 681), False, 'from pathlib import Path\n'), ((1979, 2033), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(256)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(256, (3, 3), activation='relu', padding='same')\n", (1985, 2033), False, 'from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Dropout, TimeDistributed, Flatten, Reshape, Softmax\n'), ((2061, 2115), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(256)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(256, (3, 3), activation='relu', padding='same')\n", (2067, 2115), False, 'from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Dropout, TimeDistributed, Flatten, Reshape, Softmax\n'), ((2143, 2163), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {}), '((2, 2))\n', (2155, 2163), False, 'from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Dropout, TimeDistributed, Flatten, Reshape, Softmax\n'), ((2192, 2246), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(128)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(128, (3, 3), activation='relu', padding='same')\n", (2198, 2246), False, 'from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Dropout, TimeDistributed, Flatten, Reshape, Softmax\n'), ((2274, 2328), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(128)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(128, (3, 3), activation='relu', padding='same')\n", (2280, 2328), False, 'from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Dropout, TimeDistributed, Flatten, Reshape, Softmax\n'), ((2356, 2376), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {}), '((2, 2))\n', (2368, 2376), False, 'from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Dropout, TimeDistributed, Flatten, Reshape, Softmax\n'), ((2405, 2458), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(64, (3, 3), activation='relu', padding='same')\n", (2411, 2458), False, 'from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Dropout, TimeDistributed, Flatten, Reshape, Softmax\n'), ((2486, 2539), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(64, (3, 3), activation='relu', padding='same')\n", (2492, 2539), False, 'from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Dropout, TimeDistributed, Flatten, Reshape, Softmax\n'), ((2567, 2587), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', (['(2, 2)'], {}), '((2, 2))\n', (2579, 2587), False, 'from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Dropout, TimeDistributed, Flatten, Reshape, Softmax\n'), ((2617, 2657), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['classNum', '(1, 1)'], {'padding': '"""same"""'}), "(classNum, (1, 1), padding='same')\n", (2623, 2657), False, 'from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Dropout, TimeDistributed, Flatten, Reshape, Softmax\n'), ((2701, 2710), 'tensorflow.keras.layers.Softmax', 'Softmax', ([], {}), '()\n', (2708, 2710), False, 'from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Dropout, TimeDistributed, Flatten, Reshape, Softmax\n'), ((2782, 2847), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(classNum * 4)', '(1, 1)'], {'activation': '"""linear"""', 'padding': '"""same"""'}), "(classNum * 4, (1, 1), activation='linear', padding='same')\n", (2788, 2847), False, 'from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, Dropout, TimeDistributed, Flatten, Reshape, Softmax\n')] |
import numpy as np
import warnings
def drop_outlier(adata,
thresh=15,
axis=0,
drop=True,
verbose=False):
"""Drop all features or cells with a min or max absolute value that is greater than a threshold.
Expects normally distributed data.
Args:
adata (anndata.AnnData): Multidimensional morphological data.
thresh (int): Threshold for outlier identification.
axis (int): 0 means along features, 1 means along cells.
drop (bool): Drop features/ cells with outliers if True.
verbose (bool)
Returns:
anndata.AnnData
Only if axis is 0:
.uns['outlier_feats']: Dropped features with outliers.
.var['outlier_feats']: True for features that contain outliers.
Only if drop is False.
"""
# brief check for normal distribution
means = np.nanmean(adata.X, axis=0)
if not all(np.logical_and(means > -2, means < 2)):
warnings.warn("Data does not seem to be normally distributed, "
"use normalize() with 'standard', 'mad_robust' or 'robust' beforehand.")
assert axis in [0, 1], f"axis has to be either 0 (features) or 1 (cells), instead got {axis}"
max_values = np.abs(np.max(adata.X, axis=axis))
min_values = np.abs(np.min(adata.X, axis=axis))
assert isinstance(thresh, (int, float)), f"thresh expected to be of type(int) or type(float), " \
f"instead got {type(thresh)}"
mask = np.logical_and((max_values <= thresh), (min_values <= thresh))
if axis == 0:
dropped_feats = adata.var_names[~mask]
if verbose:
print(f"Drop {len(dropped_feats)} features with outlier values: {dropped_feats}")
# drop features
if drop:
adata = adata[:, mask].copy()
adata.uns['outlier_feats'] = dropped_feats
else:
adata.var['outlier_feats'] = ~mask
else:
n_before = len(adata)
if drop:
adata = adata[mask, :].copy()
if verbose:
print(f"{n_before - len(adata)} cell removed with feature values >= or <= {thresh}")
return adata | [
"numpy.logical_and",
"numpy.max",
"numpy.nanmean",
"numpy.min",
"warnings.warn"
] | [((907, 934), 'numpy.nanmean', 'np.nanmean', (['adata.X'], {'axis': '(0)'}), '(adata.X, axis=0)\n', (917, 934), True, 'import numpy as np\n'), ((1551, 1609), 'numpy.logical_and', 'np.logical_and', (['(max_values <= thresh)', '(min_values <= thresh)'], {}), '(max_values <= thresh, min_values <= thresh)\n', (1565, 1609), True, 'import numpy as np\n'), ((998, 1141), 'warnings.warn', 'warnings.warn', (['"""Data does not seem to be normally distributed, use normalize() with \'standard\', \'mad_robust\' or \'robust\' beforehand."""'], {}), '(\n "Data does not seem to be normally distributed, use normalize() with \'standard\', \'mad_robust\' or \'robust\' beforehand."\n )\n', (1011, 1141), False, 'import warnings\n'), ((1281, 1307), 'numpy.max', 'np.max', (['adata.X'], {'axis': 'axis'}), '(adata.X, axis=axis)\n', (1287, 1307), True, 'import numpy as np\n'), ((1333, 1359), 'numpy.min', 'np.min', (['adata.X'], {'axis': 'axis'}), '(adata.X, axis=axis)\n', (1339, 1359), True, 'import numpy as np\n'), ((950, 987), 'numpy.logical_and', 'np.logical_and', (['(means > -2)', '(means < 2)'], {}), '(means > -2, means < 2)\n', (964, 987), True, 'import numpy as np\n')] |
'''
This script serves to get the speech files prepared for training neural networks, with "matched" noise added to the training data.
Speech was collected from the Saarbücken Voice Database
'''
import pandas as pd
import numpy as np
import librosa
import sqlite3
from sqlite3 import Error
import glob
from pathlib import Path
import time
import random
import math
from get_speech_features import get_samps, get_mfcc, get_fundfreq, get_domfreq
def collect_filenames(filename):
filenames = []
for wav in glob.glob("./data/{}/*.wav".format(filename)):
filenames.append(wav)
return filenames
def get_speaker_id(path):
'''
databases often save relevant data in the name of file. This function extracts the user id, i.e. the first parts of the .wav filename:
'''
sp = Path(path).parts[2]
sp_id = sp.split("-")[0]
return sp_id
def collect_features(dict_speech_features, filename, group):
print("now processing {} speech.".format(group))
wavefiles = collect_filenames(filename)
for wav in wavefiles:
sp_id = get_speaker_id(wav)
sr = 16000
y = get_samps(wav,sr)
mfcc = get_mfcc(y,sr)
fundfreq = np.array(get_fundfreq(y,sr))
fundfreq = fundfreq.reshape(len(fundfreq),1)
domfreq = np.array(get_domfreq(y,sr))
domfreq = domfreq.reshape(len(domfreq),1)
features = np.concatenate((mfcc,fundfreq,domfreq),axis=1)
if group == "female":
sex = 0
else:
sex = 1
#add value attributed to sex (0 = female, 1 = male)
dict_speech_features[sp_id] = (features, sex)
print("successfully extracted features")
return dict_speech_features
def dataprep_SQL(dict_speech_features):
'''
I need to get each set of features into a tuple.
'''
prepped_data = []
for key, value in dict_speech_features.items():
# key = speaker id
# value[0] = 40 MFCC values (each representing 25ms of speech data...)
# value[1] = sex (0 = female, 1 = male)
speaker_id = key
sex = value[1]
for row in value[0]: #get the 40 MFCC values for each segment of 25ms - there will be many!
features = list(row)
features.insert(0,speaker_id) #insert at index 0 the speaker ID --> corresponds to first row of SQL table
features.append(sex) #add *at the end* the sex --> corresponds to last row of SQL table
prepped_data.append(tuple(features)) #turn into tuple - tuples are immutable
return prepped_data
def save_data_sql(prepped_data, database, table_name):
try:
conn = sqlite3.connect(database)
c = conn.cursor()
num_cols = len(prepped_data[0])
cols = ""
for i in range(num_cols):
if i != num_cols-1:
cols += " ?,"
else:
cols += " ?"
msg = '''INSERT INTO %s VALUES(NULL, %s)''' % (table_name,cols)
c.executemany(msg, prepped_data)
conn.commit()
print("All speech and noise data saved successfully!")
except Error as e:
print("Database Error: {}".format(e))
finally:
if conn:
conn.close()
return None
if __name__=="__main__":
conn = None
start = time.time()
#initialize the dictionary that will collect the speech features according to speaker id
# perk about dictionaries?
# they don't let you enter in more than one kind of key --> you will get a key error
dict_speech_features = {}
try:
dict_speech_features = collect_features(dict_speech_features,"female_speech","female")
dict_speech_features = collect_features(dict_speech_features,"male_speech","male")
#prep the dictionary to insert data into SQL table
data_prepped_4_SQL = dataprep_SQL(dict_speech_features)
#insert data to SQL table
#need relevant infos:
database = "male_female_speech_svd.db"
table_name = "features_mfcc_freq"
save_data_sql(data_prepped_4_SQL, database, table_name)
except KeyError as e:
print("The speaker ID was repeated. Check for duplicates in your data.")
finally:
end = time.time()
print("Total time: {} seconds".format(round(end - start),3))
| [
"get_speech_features.get_mfcc",
"sqlite3.connect",
"pathlib.Path",
"get_speech_features.get_fundfreq",
"get_speech_features.get_domfreq",
"numpy.concatenate",
"get_speech_features.get_samps",
"time.time"
] | [((3417, 3428), 'time.time', 'time.time', ([], {}), '()\n', (3426, 3428), False, 'import time\n'), ((1124, 1142), 'get_speech_features.get_samps', 'get_samps', (['wav', 'sr'], {}), '(wav, sr)\n', (1133, 1142), False, 'from get_speech_features import get_samps, get_mfcc, get_fundfreq, get_domfreq\n'), ((1157, 1172), 'get_speech_features.get_mfcc', 'get_mfcc', (['y', 'sr'], {}), '(y, sr)\n', (1165, 1172), False, 'from get_speech_features import get_samps, get_mfcc, get_fundfreq, get_domfreq\n'), ((1406, 1455), 'numpy.concatenate', 'np.concatenate', (['(mfcc, fundfreq, domfreq)'], {'axis': '(1)'}), '((mfcc, fundfreq, domfreq), axis=1)\n', (1420, 1455), True, 'import numpy as np\n'), ((2704, 2729), 'sqlite3.connect', 'sqlite3.connect', (['database'], {}), '(database)\n', (2719, 2729), False, 'import sqlite3\n'), ((4384, 4395), 'time.time', 'time.time', ([], {}), '()\n', (4393, 4395), False, 'import time\n'), ((805, 815), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (809, 815), False, 'from pathlib import Path\n'), ((1200, 1219), 'get_speech_features.get_fundfreq', 'get_fundfreq', (['y', 'sr'], {}), '(y, sr)\n', (1212, 1219), False, 'from get_speech_features import get_samps, get_mfcc, get_fundfreq, get_domfreq\n'), ((1309, 1327), 'get_speech_features.get_domfreq', 'get_domfreq', (['y', 'sr'], {}), '(y, sr)\n', (1320, 1327), False, 'from get_speech_features import get_samps, get_mfcc, get_fundfreq, get_domfreq\n')] |
#----------------------------------------------------------------------------
# Name: wet_antenna
# Purpose: Estimation and removal of wet antenna effects
#
# Authors: <NAME>
#
# Created: 01.12.2014
# Copyright: (c) <NAME> 2014
# Licence: The MIT License
#----------------------------------------------------------------------------
from builtins import range
import numpy as np
import pandas as pd
from numba.decorators import jit
########################################
# Functions for wet antenna estimation #
########################################
@jit(nopython=True)
def _numba_waa_schleiss(rsl, baseline, waa_max, delta_t, tau, wet):
"""Calculate wet antenna attenuation
Parameters
----------
A : float
Attenuation value
waa : float
Value of wet antenna attenuation at the preceding timestep
waa_max : float
Maximum value of wet antenna attenuation
delta_t : float
Parameter for wet antnenna attenation model
tau : float
Parameter for wet antnenna attenation model
wet : int or float
Wet/dry classification information.
Returns
-------
float
Value of wet antenna attenuation
Note
----
The wet antenna adjusting is based on a peer-reviewed publication [3]_
References
----------
.. [3] <NAME>., <NAME>. and <NAME>.: "Quantification and
modeling of wet-antenna attenuation for commercial microwave
links", IEEE Geoscience and Remote Sensing Letters, 10, 2013
"""
waa = np.zeros_like(rsl, dtype=np.float64)
A = rsl - baseline
for i in range(1,len(rsl)):
if wet[i] == True:
waa[i] = min(A[i],
waa_max,
waa[i-1] + (waa_max-waa[i-1])*3*delta_t/tau)
else:
waa[i] = min(A[i],
waa_max)
return waa
def waa_adjust_baseline(rsl, baseline, wet, waa_max, delta_t, tau):
"""Calculate baseline adjustion due to wet antenna
Parameters
----------
rsl : iterable of float
Time series of received signal level
baseline : iterable of float
Time series of baseline for rsl
waa_max : float
Maximum value of wet antenna attenuation
delta_t : float
Parameter for wet antnenna attenation model
tau : float
Parameter for wet antnenna attenation model
wet : iterable of int or iterable of float
Time series with wet/dry classification information.
Returns
-------
iterable of float
Adjusted time series of baseline
iterable of float
Time series of wet antenna attenuation
"""
if type(rsl) == pd.Series:
rsl = rsl.values
if type(baseline) == pd.Series:
baseline = baseline.values
if type(wet) == pd.Series:
wet = wet.values
rsl = rsl.astype(np.float64)
baseline = baseline.astype(np.float64)
wet = wet.astype(np.float64)
waa = _numba_waa_schleiss(rsl, baseline, waa_max, delta_t, tau, wet)
#return baseline + waa, waa
return baseline + waa
| [
"numba.decorators.jit",
"numpy.zeros_like"
] | [((592, 610), 'numba.decorators.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (595, 610), False, 'from numba.decorators import jit\n'), ((1658, 1694), 'numpy.zeros_like', 'np.zeros_like', (['rsl'], {'dtype': 'np.float64'}), '(rsl, dtype=np.float64)\n', (1671, 1694), True, 'import numpy as np\n')] |
import pyhecdss
import pandas as pd
import numpy as np
import os
def test_read_write_cycle_rts():
'''
Test reading and writing of period time stamped data so
that reads and writes don't result in shifting the data
'''
fname = "test2.dss"
if os.path.exists(fname):
os.remove(fname)
path = '/SAMPLE/SIN/WAVE/01JAN1990 - 01JAN1990/15MIN/SAMPLE1/'
sina = np.sin(np.linspace(-np.pi, np.pi, 201))
dfr = pd.DataFrame(sina,
index=pd.period_range('01jan1990 0100', periods=len(sina), freq='15T'),
columns=[path])
d = pyhecdss.DSSFile(fname, create_new=True)
unit2, ptype2 = 'UNIT-X', 'PER-VAL'
d.write_rts(path, dfr, unit2, ptype2)
d.close()
#
d2 = pyhecdss.DSSFile(fname)
plist2 = d2.get_pathnames()
path = plist2[0]
dfr2, unit2, ptype2 = d.read_rts(path)
pd.testing.assert_frame_equal(dfr, dfr2)
| [
"os.path.exists",
"pyhecdss.DSSFile",
"numpy.linspace",
"pandas.testing.assert_frame_equal",
"os.remove"
] | [((279, 300), 'os.path.exists', 'os.path.exists', (['fname'], {}), '(fname)\n', (293, 300), False, 'import os\n'), ((625, 665), 'pyhecdss.DSSFile', 'pyhecdss.DSSFile', (['fname'], {'create_new': '(True)'}), '(fname, create_new=True)\n', (641, 665), False, 'import pyhecdss\n'), ((782, 805), 'pyhecdss.DSSFile', 'pyhecdss.DSSFile', (['fname'], {}), '(fname)\n', (798, 805), False, 'import pyhecdss\n'), ((910, 950), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['dfr', 'dfr2'], {}), '(dfr, dfr2)\n', (939, 950), True, 'import pandas as pd\n'), ((311, 327), 'os.remove', 'os.remove', (['fname'], {}), '(fname)\n', (320, 327), False, 'import os\n'), ((415, 446), 'numpy.linspace', 'np.linspace', (['(-np.pi)', 'np.pi', '(201)'], {}), '(-np.pi, np.pi, 201)\n', (426, 446), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
from matplotlib import cm
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x = y = np.arange(-15, 15, 0.5)
X, Y = np.meshgrid(x, y)
sigma = 4
Z = np.exp(-(X**2 + Y**2)/(2*sigma**2)) / (2*np.pi*sigma**2)
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.coolwarm)
plt.savefig("data/dst/matplotlib_mplot3d_surface.png")
ax.clear()
ax.plot_wireframe(X, Y, Z, rstride=2, cstride=2)
plt.savefig("data/dst/matplotlib_mplot3d_wireframe.png")
ax.clear()
ax.scatter(X, Y, Z, s=1)
plt.savefig("data/dst/matplotlib_mplot3d_scatter.png")
| [
"matplotlib.pyplot.savefig",
"numpy.exp",
"matplotlib.pyplot.figure",
"numpy.meshgrid",
"numpy.arange"
] | [((124, 136), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (134, 136), True, 'import matplotlib.pyplot as plt\n'), ((189, 212), 'numpy.arange', 'np.arange', (['(-15)', '(15)', '(0.5)'], {}), '(-15, 15, 0.5)\n', (198, 212), True, 'import numpy as np\n'), ((220, 237), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (231, 237), True, 'import numpy as np\n'), ((376, 430), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""data/dst/matplotlib_mplot3d_surface.png"""'], {}), "('data/dst/matplotlib_mplot3d_surface.png')\n", (387, 430), True, 'import matplotlib.pyplot as plt\n'), ((492, 548), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""data/dst/matplotlib_mplot3d_wireframe.png"""'], {}), "('data/dst/matplotlib_mplot3d_wireframe.png')\n", (503, 548), True, 'import matplotlib.pyplot as plt\n'), ((586, 640), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""data/dst/matplotlib_mplot3d_scatter.png"""'], {}), "('data/dst/matplotlib_mplot3d_scatter.png')\n", (597, 640), True, 'import matplotlib.pyplot as plt\n'), ((253, 298), 'numpy.exp', 'np.exp', (['(-(X ** 2 + Y ** 2) / (2 * sigma ** 2))'], {}), '(-(X ** 2 + Y ** 2) / (2 * sigma ** 2))\n', (259, 298), True, 'import numpy as np\n')] |
import os
from random import seed
import numpy as np
from hyperopt import hp, tpe, rand
import pytest
from sklearn.metrics import mean_squared_error as mse, roc_auc_score as roc
from fedot.core.data.data import InputData
from fedot.core.data.data_split import train_test_data_setup
from fedot.core.pipelines.node import PrimaryNode, SecondaryNode
from fedot.core.pipelines.pipeline import Pipeline
from fedot.core.pipelines.tuning.sequential import SequentialTuner
from fedot.core.pipelines.tuning.unified import PipelineTuner
from fedot.core.pipelines.tuning.search_space import SearchSpace
from fedot.core.repository.tasks import Task, TaskTypesEnum
from test.unit.tasks.test_forecasting import get_ts_data
seed(1)
np.random.seed(1)
@pytest.fixture()
def regression_dataset():
test_file_path = str(os.path.dirname(__file__))
file = os.path.join('../../data', 'advanced_regression.csv')
return InputData.from_csv(os.path.join(test_file_path, file), task=Task(TaskTypesEnum.regression))
@pytest.fixture()
def classification_dataset():
test_file_path = str(os.path.dirname(__file__))
file = os.path.join('../../data', 'advanced_classification.csv')
return InputData.from_csv(os.path.join(test_file_path, file), task=Task(TaskTypesEnum.classification))
def get_simple_regr_pipeline():
final = PrimaryNode(operation_type='xgbreg')
pipeline = Pipeline(final)
return pipeline
def get_complex_regr_pipeline():
node_scaling = PrimaryNode(operation_type='scaling')
node_ridge = SecondaryNode('ridge', nodes_from=[node_scaling])
node_linear = SecondaryNode('linear', nodes_from=[node_scaling])
final = SecondaryNode('xgbreg', nodes_from=[node_ridge, node_linear])
pipeline = Pipeline(final)
return pipeline
def get_simple_class_pipeline():
final = PrimaryNode(operation_type='logit')
pipeline = Pipeline(final)
return pipeline
def get_complex_class_pipeline():
first = PrimaryNode(operation_type='xgboost')
second = PrimaryNode(operation_type='pca')
final = SecondaryNode(operation_type='logit',
nodes_from=[first, second])
pipeline = Pipeline(final)
return pipeline
def get_not_default_search_space():
custom_search_space = {
'logit': {
'C': (hp.uniform, [0.01, 5.0])
},
'ridge': {
'alpha': (hp.uniform, [0.01, 5.0])
},
'xgbreg': {
'n_estimators': (hp.choice, [[100]]),
'max_depth': (hp.choice, [range(1, 7)]),
'learning_rate': (hp.choice, [[1e-3, 1e-2, 1e-1]]),
'subsample': (hp.choice, [np.arange(0.15, 1.01, 0.05)])
},
'xgboost': {
'max_depth': (hp.choice, [range(1, 5)]),
'subsample': (hp.uniform, [0.1, 0.9]),
'min_child_weight': (hp.choice, [range(1, 15)])
},
'ar': {
'lag_1': (hp.uniform, [2, 100]),
'lag_2': (hp.uniform, [2, 500])
},
'pca': {
'n_components': (hp.uniform, [0.2, 0.8])
}
}
return SearchSpace(custom_search_space=custom_search_space)
@pytest.mark.parametrize('data_fixture', ['classification_dataset'])
def test_custom_params_setter(data_fixture, request):
data = request.getfixturevalue(data_fixture)
pipeline = get_complex_class_pipeline()
custom_params = dict(C=10)
pipeline.root_node.custom_params = custom_params
pipeline.fit(data)
params = pipeline.root_node.fitted_operation.get_params()
assert params['C'] == 10
@pytest.mark.parametrize('data_fixture', ['regression_dataset'])
def test_pipeline_tuner_regression_correct(data_fixture, request):
""" Test PipelineTuner for pipeline based on hyperopt library """
data = request.getfixturevalue(data_fixture)
train_data, test_data = train_test_data_setup(data=data)
# Pipelines for regression task
pipeline_simple = get_simple_regr_pipeline()
pipeline_complex = get_complex_regr_pipeline()
for pipeline in [pipeline_simple, pipeline_complex]:
for search_space in [SearchSpace(), get_not_default_search_space()]:
for algo in [tpe.suggest, rand.suggest]:
# Pipeline tuning
pipeline_tuner = PipelineTuner(pipeline=pipeline,
task=train_data.task,
iterations=1,
search_space=search_space,
algo=algo)
# Optimization will be performed on RMSE metric, so loss params are defined
tuned_pipeline = pipeline_tuner.tune_pipeline(input_data=train_data,
loss_function=mse,
loss_params={'squared': False})
is_tuning_finished = True
assert is_tuning_finished
@pytest.mark.parametrize('data_fixture', ['classification_dataset'])
def test_pipeline_tuner_classification_correct(data_fixture, request):
""" Test PipelineTuner for pipeline based on hyperopt library """
data = request.getfixturevalue(data_fixture)
train_data, test_data = train_test_data_setup(data=data)
# Pipelines for classification task
pipeline_simple = get_simple_class_pipeline()
pipeline_complex = get_complex_class_pipeline()
for pipeline in [pipeline_simple, pipeline_complex]:
for search_space in [SearchSpace(), get_not_default_search_space()]:
for algo in [tpe.suggest, rand.suggest]:
# Pipeline tuning
pipeline_tuner = PipelineTuner(pipeline=pipeline,
task=train_data.task,
iterations=1,
search_space=search_space,
algo=algo)
tuned_pipeline = pipeline_tuner.tune_pipeline(input_data=train_data,
loss_function=roc)
is_tuning_finished = True
assert is_tuning_finished
@pytest.mark.parametrize('data_fixture', ['regression_dataset'])
def test_sequential_tuner_regression_correct(data_fixture, request):
""" Test SequentialTuner for pipeline based on hyperopt library """
data = request.getfixturevalue(data_fixture)
train_data, test_data = train_test_data_setup(data=data)
# Pipelines for regression task
pipeline_simple = get_simple_regr_pipeline()
pipeline_complex = get_complex_regr_pipeline()
for pipeline in [pipeline_simple, pipeline_complex]:
for search_space in [SearchSpace(), get_not_default_search_space()]:
for algo in [tpe.suggest, rand.suggest]:
# Pipeline tuning
sequential_tuner = SequentialTuner(pipeline=pipeline,
task=train_data.task,
iterations=1,
search_space=search_space,
algo=algo)
# Optimization will be performed on RMSE metric, so loss params are defined
tuned_pipeline = sequential_tuner.tune_pipeline(input_data=train_data,
loss_function=mse,
loss_params={'squared': False})
is_tuning_finished = True
assert is_tuning_finished
@pytest.mark.parametrize('data_fixture', ['classification_dataset'])
def test_sequential_tuner_classification_correct(data_fixture, request):
""" Test SequentialTuner for pipeline based on hyperopt library """
data = request.getfixturevalue(data_fixture)
train_data, test_data = train_test_data_setup(data=data)
# Pipelines for classification task
pipeline_simple = get_simple_class_pipeline()
pipeline_complex = get_complex_class_pipeline()
for pipeline in [pipeline_simple, pipeline_complex]:
for search_space in [SearchSpace(), get_not_default_search_space()]:
for algo in [tpe.suggest, rand.suggest]:
# Pipeline tuning
sequential_tuner = SequentialTuner(pipeline=pipeline,
task=train_data.task,
iterations=2,
search_space=search_space,
algo=algo)
tuned_pipeline = sequential_tuner.tune_pipeline(input_data=train_data,
loss_function=roc)
is_tuning_finished = True
assert is_tuning_finished
@pytest.mark.parametrize('data_fixture', ['regression_dataset'])
def test_certain_node_tuning_regression_correct(data_fixture, request):
""" Test SequentialTuner for particular node based on hyperopt library """
data = request.getfixturevalue(data_fixture)
train_data, test_data = train_test_data_setup(data=data)
# Pipelines for regression task
pipeline_simple = get_simple_regr_pipeline()
pipeline_complex = get_complex_regr_pipeline()
for pipeline in [pipeline_simple, pipeline_complex]:
for search_space in [SearchSpace(), get_not_default_search_space()]:
for algo in [tpe.suggest, rand.suggest]:
# Pipeline tuning
sequential_tuner = SequentialTuner(pipeline=pipeline,
task=train_data.task,
iterations=1,
search_space=search_space,
algo=algo)
tuned_pipeline = sequential_tuner.tune_node(input_data=train_data,
node_index=0,
loss_function=mse)
is_tuning_finished = True
assert is_tuning_finished
@pytest.mark.parametrize('data_fixture', ['classification_dataset'])
def test_certain_node_tuning_classification_correct(data_fixture, request):
""" Test SequentialTuner for particular node based on hyperopt library """
data = request.getfixturevalue(data_fixture)
train_data, test_data = train_test_data_setup(data=data)
# Pipelines for classification task
pipeline_simple = get_simple_class_pipeline()
pipeline_complex = get_complex_class_pipeline()
for pipeline in [pipeline_simple, pipeline_complex]:
for search_space in [SearchSpace(), get_not_default_search_space()]:
for algo in [tpe.suggest, rand.suggest]:
# Pipeline tuning
sequential_tuner = SequentialTuner(pipeline=pipeline,
task=train_data.task,
iterations=1,
search_space=search_space,
algo=algo)
tuned_pipeline = sequential_tuner.tune_node(input_data=train_data,
node_index=0,
loss_function=roc)
is_tuning_finished = True
assert is_tuning_finished
def test_ts_pipeline_with_stats_model():
""" Tests PipelineTuner for time series forecasting task with AR model """
train_data, test_data = get_ts_data(n_steps=200, forecast_length=5)
ar_pipeline = Pipeline(PrimaryNode('ar'))
for search_space in [SearchSpace(), get_not_default_search_space()]:
for algo in [tpe.suggest, rand.suggest]:
# Tune AR model
tuner_ar = PipelineTuner(pipeline=ar_pipeline, task=train_data.task, iterations=3,
search_space=search_space, algo=algo)
tuned_ar_pipeline = tuner_ar.tune_pipeline(input_data=train_data,
loss_function=mse)
is_tuning_finished = True
assert is_tuning_finished
def test_search_space_correctness_after_customization():
default_search_space = SearchSpace()
custom_search_space = {'gbr': {'max_depth': (hp.choice, [[3, 7, 31, 127, 8191, 131071]])}}
custom_search_space_without_replace = SearchSpace(custom_search_space=custom_search_space,
replace_default_search_space=False)
custom_search_space_with_replace = SearchSpace(custom_search_space=custom_search_space,
replace_default_search_space=True)
default_params = default_search_space.get_node_params(node_id=0,
operation_name='gbr')
custom_without_replace_params = custom_search_space_without_replace.get_node_params(node_id=0,
operation_name='gbr')
custom_with_replace_params = custom_search_space_with_replace.get_node_params(node_id=0,
operation_name='gbr')
assert default_params.keys() == custom_without_replace_params.keys()
assert default_params.keys() != custom_with_replace_params.keys()
assert default_params['0 || gbr | max_depth'] != custom_without_replace_params['0 || gbr | max_depth']
assert default_params['0 || gbr | max_depth'] != custom_with_replace_params['0 || gbr | max_depth']
def test_search_space_get_operation_parameter_range():
default_search_space = SearchSpace()
gbr_operations = ['n_estimators', 'loss', 'learning_rate', 'max_depth', 'min_samples_split',
'min_samples_leaf', 'subsample', 'max_features', 'alpha']
custom_search_space = {'gbr': {'max_depth': (hp.choice, [[3, 7, 31, 127, 8191, 131071]])}}
custom_search_space_without_replace = SearchSpace(custom_search_space=custom_search_space,
replace_default_search_space=False)
custom_search_space_with_replace = SearchSpace(custom_search_space=custom_search_space,
replace_default_search_space=True)
default_operations = default_search_space.get_operation_parameter_range('gbr')
custom_without_replace_operations = custom_search_space_without_replace.get_operation_parameter_range('gbr')
custom_with_replace_operations = custom_search_space_with_replace.get_operation_parameter_range('gbr')
assert default_operations == gbr_operations
assert custom_without_replace_operations == gbr_operations
assert custom_with_replace_operations == ['max_depth']
| [
"fedot.core.pipelines.tuning.unified.PipelineTuner",
"fedot.core.pipelines.node.SecondaryNode",
"numpy.arange",
"fedot.core.data.data_split.train_test_data_setup",
"os.path.join",
"fedot.core.pipelines.pipeline.Pipeline",
"random.seed",
"fedot.core.pipelines.tuning.sequential.SequentialTuner",
"pyte... | [((712, 719), 'random.seed', 'seed', (['(1)'], {}), '(1)\n', (716, 719), False, 'from random import seed\n'), ((720, 737), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (734, 737), True, 'import numpy as np\n'), ((741, 757), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (755, 757), False, 'import pytest\n'), ((1007, 1023), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (1021, 1023), False, 'import pytest\n'), ((3143, 3210), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""data_fixture"""', "['classification_dataset']"], {}), "('data_fixture', ['classification_dataset'])\n", (3166, 3210), False, 'import pytest\n'), ((3562, 3625), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""data_fixture"""', "['regression_dataset']"], {}), "('data_fixture', ['regression_dataset'])\n", (3585, 3625), False, 'import pytest\n'), ((4976, 5043), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""data_fixture"""', "['classification_dataset']"], {}), "('data_fixture', ['classification_dataset'])\n", (4999, 5043), False, 'import pytest\n'), ((6218, 6281), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""data_fixture"""', "['regression_dataset']"], {}), "('data_fixture', ['regression_dataset'])\n", (6241, 6281), False, 'import pytest\n'), ((7662, 7729), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""data_fixture"""', "['classification_dataset']"], {}), "('data_fixture', ['classification_dataset'])\n", (7685, 7729), False, 'import pytest\n'), ((8932, 8995), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""data_fixture"""', "['regression_dataset']"], {}), "('data_fixture', ['regression_dataset'])\n", (8955, 8995), False, 'import pytest\n'), ((10264, 10331), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""data_fixture"""', "['classification_dataset']"], {}), "('data_fixture', ['classification_dataset'])\n", (10287, 10331), False, 'import pytest\n'), ((847, 900), 'os.path.join', 'os.path.join', (['"""../../data"""', '"""advanced_regression.csv"""'], {}), "('../../data', 'advanced_regression.csv')\n", (859, 900), False, 'import os\n'), ((1117, 1174), 'os.path.join', 'os.path.join', (['"""../../data"""', '"""advanced_classification.csv"""'], {}), "('../../data', 'advanced_classification.csv')\n", (1129, 1174), False, 'import os\n'), ((1328, 1364), 'fedot.core.pipelines.node.PrimaryNode', 'PrimaryNode', ([], {'operation_type': '"""xgbreg"""'}), "(operation_type='xgbreg')\n", (1339, 1364), False, 'from fedot.core.pipelines.node import PrimaryNode, SecondaryNode\n'), ((1380, 1395), 'fedot.core.pipelines.pipeline.Pipeline', 'Pipeline', (['final'], {}), '(final)\n', (1388, 1395), False, 'from fedot.core.pipelines.pipeline import Pipeline\n'), ((1471, 1508), 'fedot.core.pipelines.node.PrimaryNode', 'PrimaryNode', ([], {'operation_type': '"""scaling"""'}), "(operation_type='scaling')\n", (1482, 1508), False, 'from fedot.core.pipelines.node import PrimaryNode, SecondaryNode\n'), ((1526, 1575), 'fedot.core.pipelines.node.SecondaryNode', 'SecondaryNode', (['"""ridge"""'], {'nodes_from': '[node_scaling]'}), "('ridge', nodes_from=[node_scaling])\n", (1539, 1575), False, 'from fedot.core.pipelines.node import PrimaryNode, SecondaryNode\n'), ((1594, 1644), 'fedot.core.pipelines.node.SecondaryNode', 'SecondaryNode', (['"""linear"""'], {'nodes_from': '[node_scaling]'}), "('linear', nodes_from=[node_scaling])\n", (1607, 1644), False, 'from fedot.core.pipelines.node import PrimaryNode, SecondaryNode\n'), ((1657, 1718), 'fedot.core.pipelines.node.SecondaryNode', 'SecondaryNode', (['"""xgbreg"""'], {'nodes_from': '[node_ridge, node_linear]'}), "('xgbreg', nodes_from=[node_ridge, node_linear])\n", (1670, 1718), False, 'from fedot.core.pipelines.node import PrimaryNode, SecondaryNode\n'), ((1734, 1749), 'fedot.core.pipelines.pipeline.Pipeline', 'Pipeline', (['final'], {}), '(final)\n', (1742, 1749), False, 'from fedot.core.pipelines.pipeline import Pipeline\n'), ((1818, 1853), 'fedot.core.pipelines.node.PrimaryNode', 'PrimaryNode', ([], {'operation_type': '"""logit"""'}), "(operation_type='logit')\n", (1829, 1853), False, 'from fedot.core.pipelines.node import PrimaryNode, SecondaryNode\n'), ((1869, 1884), 'fedot.core.pipelines.pipeline.Pipeline', 'Pipeline', (['final'], {}), '(final)\n', (1877, 1884), False, 'from fedot.core.pipelines.pipeline import Pipeline\n'), ((1954, 1991), 'fedot.core.pipelines.node.PrimaryNode', 'PrimaryNode', ([], {'operation_type': '"""xgboost"""'}), "(operation_type='xgboost')\n", (1965, 1991), False, 'from fedot.core.pipelines.node import PrimaryNode, SecondaryNode\n'), ((2005, 2038), 'fedot.core.pipelines.node.PrimaryNode', 'PrimaryNode', ([], {'operation_type': '"""pca"""'}), "(operation_type='pca')\n", (2016, 2038), False, 'from fedot.core.pipelines.node import PrimaryNode, SecondaryNode\n'), ((2051, 2116), 'fedot.core.pipelines.node.SecondaryNode', 'SecondaryNode', ([], {'operation_type': '"""logit"""', 'nodes_from': '[first, second]'}), "(operation_type='logit', nodes_from=[first, second])\n", (2064, 2116), False, 'from fedot.core.pipelines.node import PrimaryNode, SecondaryNode\n'), ((2159, 2174), 'fedot.core.pipelines.pipeline.Pipeline', 'Pipeline', (['final'], {}), '(final)\n', (2167, 2174), False, 'from fedot.core.pipelines.pipeline import Pipeline\n'), ((3087, 3139), 'fedot.core.pipelines.tuning.search_space.SearchSpace', 'SearchSpace', ([], {'custom_search_space': 'custom_search_space'}), '(custom_search_space=custom_search_space)\n', (3098, 3139), False, 'from fedot.core.pipelines.tuning.search_space import SearchSpace\n'), ((3840, 3872), 'fedot.core.data.data_split.train_test_data_setup', 'train_test_data_setup', ([], {'data': 'data'}), '(data=data)\n', (3861, 3872), False, 'from fedot.core.data.data_split import train_test_data_setup\n'), ((5262, 5294), 'fedot.core.data.data_split.train_test_data_setup', 'train_test_data_setup', ([], {'data': 'data'}), '(data=data)\n', (5283, 5294), False, 'from fedot.core.data.data_split import train_test_data_setup\n'), ((6500, 6532), 'fedot.core.data.data_split.train_test_data_setup', 'train_test_data_setup', ([], {'data': 'data'}), '(data=data)\n', (6521, 6532), False, 'from fedot.core.data.data_split import train_test_data_setup\n'), ((7952, 7984), 'fedot.core.data.data_split.train_test_data_setup', 'train_test_data_setup', ([], {'data': 'data'}), '(data=data)\n', (7973, 7984), False, 'from fedot.core.data.data_split import train_test_data_setup\n'), ((9224, 9256), 'fedot.core.data.data_split.train_test_data_setup', 'train_test_data_setup', ([], {'data': 'data'}), '(data=data)\n', (9245, 9256), False, 'from fedot.core.data.data_split import train_test_data_setup\n'), ((10564, 10596), 'fedot.core.data.data_split.train_test_data_setup', 'train_test_data_setup', ([], {'data': 'data'}), '(data=data)\n', (10585, 10596), False, 'from fedot.core.data.data_split import train_test_data_setup\n'), ((11757, 11800), 'test.unit.tasks.test_forecasting.get_ts_data', 'get_ts_data', ([], {'n_steps': '(200)', 'forecast_length': '(5)'}), '(n_steps=200, forecast_length=5)\n', (11768, 11800), False, 'from test.unit.tasks.test_forecasting import get_ts_data\n'), ((12469, 12482), 'fedot.core.pipelines.tuning.search_space.SearchSpace', 'SearchSpace', ([], {}), '()\n', (12480, 12482), False, 'from fedot.core.pipelines.tuning.search_space import SearchSpace\n'), ((12621, 12713), 'fedot.core.pipelines.tuning.search_space.SearchSpace', 'SearchSpace', ([], {'custom_search_space': 'custom_search_space', 'replace_default_search_space': '(False)'}), '(custom_search_space=custom_search_space,\n replace_default_search_space=False)\n', (12632, 12713), False, 'from fedot.core.pipelines.tuning.search_space import SearchSpace\n'), ((12803, 12894), 'fedot.core.pipelines.tuning.search_space.SearchSpace', 'SearchSpace', ([], {'custom_search_space': 'custom_search_space', 'replace_default_search_space': '(True)'}), '(custom_search_space=custom_search_space,\n replace_default_search_space=True)\n', (12814, 12894), False, 'from fedot.core.pipelines.tuning.search_space import SearchSpace\n'), ((13937, 13950), 'fedot.core.pipelines.tuning.search_space.SearchSpace', 'SearchSpace', ([], {}), '()\n', (13948, 13950), False, 'from fedot.core.pipelines.tuning.search_space import SearchSpace\n'), ((14266, 14358), 'fedot.core.pipelines.tuning.search_space.SearchSpace', 'SearchSpace', ([], {'custom_search_space': 'custom_search_space', 'replace_default_search_space': '(False)'}), '(custom_search_space=custom_search_space,\n replace_default_search_space=False)\n', (14277, 14358), False, 'from fedot.core.pipelines.tuning.search_space import SearchSpace\n'), ((14448, 14539), 'fedot.core.pipelines.tuning.search_space.SearchSpace', 'SearchSpace', ([], {'custom_search_space': 'custom_search_space', 'replace_default_search_space': '(True)'}), '(custom_search_space=custom_search_space,\n replace_default_search_space=True)\n', (14459, 14539), False, 'from fedot.core.pipelines.tuning.search_space import SearchSpace\n'), ((809, 834), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (824, 834), False, 'import os\n'), ((931, 965), 'os.path.join', 'os.path.join', (['test_file_path', 'file'], {}), '(test_file_path, file)\n', (943, 965), False, 'import os\n'), ((1079, 1104), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1094, 1104), False, 'import os\n'), ((1205, 1239), 'os.path.join', 'os.path.join', (['test_file_path', 'file'], {}), '(test_file_path, file)\n', (1217, 1239), False, 'import os\n'), ((11829, 11846), 'fedot.core.pipelines.node.PrimaryNode', 'PrimaryNode', (['"""ar"""'], {}), "('ar')\n", (11840, 11846), False, 'from fedot.core.pipelines.node import PrimaryNode, SecondaryNode\n'), ((11874, 11887), 'fedot.core.pipelines.tuning.search_space.SearchSpace', 'SearchSpace', ([], {}), '()\n', (11885, 11887), False, 'from fedot.core.pipelines.tuning.search_space import SearchSpace\n'), ((972, 1002), 'fedot.core.repository.tasks.Task', 'Task', (['TaskTypesEnum.regression'], {}), '(TaskTypesEnum.regression)\n', (976, 1002), False, 'from fedot.core.repository.tasks import Task, TaskTypesEnum\n'), ((1246, 1280), 'fedot.core.repository.tasks.Task', 'Task', (['TaskTypesEnum.classification'], {}), '(TaskTypesEnum.classification)\n', (1250, 1280), False, 'from fedot.core.repository.tasks import Task, TaskTypesEnum\n'), ((4097, 4110), 'fedot.core.pipelines.tuning.search_space.SearchSpace', 'SearchSpace', ([], {}), '()\n', (4108, 4110), False, 'from fedot.core.pipelines.tuning.search_space import SearchSpace\n'), ((5525, 5538), 'fedot.core.pipelines.tuning.search_space.SearchSpace', 'SearchSpace', ([], {}), '()\n', (5536, 5538), False, 'from fedot.core.pipelines.tuning.search_space import SearchSpace\n'), ((6757, 6770), 'fedot.core.pipelines.tuning.search_space.SearchSpace', 'SearchSpace', ([], {}), '()\n', (6768, 6770), False, 'from fedot.core.pipelines.tuning.search_space import SearchSpace\n'), ((8215, 8228), 'fedot.core.pipelines.tuning.search_space.SearchSpace', 'SearchSpace', ([], {}), '()\n', (8226, 8228), False, 'from fedot.core.pipelines.tuning.search_space import SearchSpace\n'), ((9481, 9494), 'fedot.core.pipelines.tuning.search_space.SearchSpace', 'SearchSpace', ([], {}), '()\n', (9492, 9494), False, 'from fedot.core.pipelines.tuning.search_space import SearchSpace\n'), ((10827, 10840), 'fedot.core.pipelines.tuning.search_space.SearchSpace', 'SearchSpace', ([], {}), '()\n', (10838, 10840), False, 'from fedot.core.pipelines.tuning.search_space import SearchSpace\n'), ((12022, 12135), 'fedot.core.pipelines.tuning.unified.PipelineTuner', 'PipelineTuner', ([], {'pipeline': 'ar_pipeline', 'task': 'train_data.task', 'iterations': '(3)', 'search_space': 'search_space', 'algo': 'algo'}), '(pipeline=ar_pipeline, task=train_data.task, iterations=3,\n search_space=search_space, algo=algo)\n', (12035, 12135), False, 'from fedot.core.pipelines.tuning.unified import PipelineTuner\n'), ((4265, 4375), 'fedot.core.pipelines.tuning.unified.PipelineTuner', 'PipelineTuner', ([], {'pipeline': 'pipeline', 'task': 'train_data.task', 'iterations': '(1)', 'search_space': 'search_space', 'algo': 'algo'}), '(pipeline=pipeline, task=train_data.task, iterations=1,\n search_space=search_space, algo=algo)\n', (4278, 4375), False, 'from fedot.core.pipelines.tuning.unified import PipelineTuner\n'), ((5693, 5803), 'fedot.core.pipelines.tuning.unified.PipelineTuner', 'PipelineTuner', ([], {'pipeline': 'pipeline', 'task': 'train_data.task', 'iterations': '(1)', 'search_space': 'search_space', 'algo': 'algo'}), '(pipeline=pipeline, task=train_data.task, iterations=1,\n search_space=search_space, algo=algo)\n', (5706, 5803), False, 'from fedot.core.pipelines.tuning.unified import PipelineTuner\n'), ((6927, 7039), 'fedot.core.pipelines.tuning.sequential.SequentialTuner', 'SequentialTuner', ([], {'pipeline': 'pipeline', 'task': 'train_data.task', 'iterations': '(1)', 'search_space': 'search_space', 'algo': 'algo'}), '(pipeline=pipeline, task=train_data.task, iterations=1,\n search_space=search_space, algo=algo)\n', (6942, 7039), False, 'from fedot.core.pipelines.tuning.sequential import SequentialTuner\n'), ((8385, 8497), 'fedot.core.pipelines.tuning.sequential.SequentialTuner', 'SequentialTuner', ([], {'pipeline': 'pipeline', 'task': 'train_data.task', 'iterations': '(2)', 'search_space': 'search_space', 'algo': 'algo'}), '(pipeline=pipeline, task=train_data.task, iterations=2,\n search_space=search_space, algo=algo)\n', (8400, 8497), False, 'from fedot.core.pipelines.tuning.sequential import SequentialTuner\n'), ((9651, 9763), 'fedot.core.pipelines.tuning.sequential.SequentialTuner', 'SequentialTuner', ([], {'pipeline': 'pipeline', 'task': 'train_data.task', 'iterations': '(1)', 'search_space': 'search_space', 'algo': 'algo'}), '(pipeline=pipeline, task=train_data.task, iterations=1,\n search_space=search_space, algo=algo)\n', (9666, 9763), False, 'from fedot.core.pipelines.tuning.sequential import SequentialTuner\n'), ((10997, 11109), 'fedot.core.pipelines.tuning.sequential.SequentialTuner', 'SequentialTuner', ([], {'pipeline': 'pipeline', 'task': 'train_data.task', 'iterations': '(1)', 'search_space': 'search_space', 'algo': 'algo'}), '(pipeline=pipeline, task=train_data.task, iterations=1,\n search_space=search_space, algo=algo)\n', (11012, 11109), False, 'from fedot.core.pipelines.tuning.sequential import SequentialTuner\n'), ((2637, 2664), 'numpy.arange', 'np.arange', (['(0.15)', '(1.01)', '(0.05)'], {}), '(0.15, 1.01, 0.05)\n', (2646, 2664), True, 'import numpy as np\n')] |
import pyPLUTO as pp
import numpy as np
import matplotlib.pyplot as plt
import os, shutil, sys
from multiprocessing import Pool
# Value of theta and m
# TODO : Extract this directly from the run
gamma = 5.0/3.0
theta = 10.0
m = 1.0
def create_directory(path):
'''
Creates given directory.
Removes existing directoy if the path exists.
/\ Use with caution ! /\
'''
if os.path.exists(path):
shutil.rmtree(path)
os.mkdir(path)
def count_pluto_snaps(path='.', ext='.dbl'):
'''
Returns the number of pluto data.xxxx.dbl snapshots at path
'''
snap_count = 0
for f in os.listdir(path):
if f.startswith('data') and f.endswith(ext):
snap_count += 1
return snap_count
def extract_Tbar(sid):
'''
Extracts the horizontally-averaged temperature profile of a given snapshot
'''
d = pp.pload(sid)
T = d.prs / d.rho
Tbar = np.average(T, axis=(0, 1))
return d.time, Tbar
def plot(sid):
'''
Makes a mosaic plot of the current snapshot
. Top left : horizontal slice of temperature near the top
. Bottom left : vertical slice of temperature variation at center
. Top right : Density vs initial density profile
. Middle right : Pressure vs initial pressure profile
. Bottom right : Temperature vs initial temperature profile
'''
d = pp.pload(sid)
axd = plt.figure(constrained_layout=True, figsize=(12, 12)).subplot_mosaic(
"""
AAAACC
AAAACC
AAAADD
AAAADD
BBBBEE
BBBBEE
""")
Nx, Ny, Nz = d.rho.shape
# Top and front slice in temperature
top_slice = d.prs[:,:,2] / d.rho[:,:,2]
front_slice = d.prs[:,Ny//2,::-1] / d.rho[:,Ny//2,::-1]
ext_top = [d.x1[0], d.x1[-1], d.x2[0], d.x2[-1]]
ext_front = [d.x2[0], d.x2[-1], d.x3[0], d.x3[-1]]
front_slice = front_slice.T
# Profiles
# C -> Density
# D -> Pressure
# E -> Temperature
z = d.x3
depth = d.x3[::-1]
rho = np.average(d.rho, axis=(0, 1))[::-1]
prs = np.average(d.prs, axis=(0, 1))[::-1]
T = np.average(d.prs / d.rho, axis=(0, 1))[::-1]
for k in range(Nz):
front_slice[k,:] -= T[k]
rho_0 = (1.0 + theta*depth)
prs_0 = (1.0 + theta*depth)**2.0
T_0 = prs_0 / rho_0
axd['A'].imshow(top_slice.T, origin='lower', extent=ext_top)
axd['A'].set_xlabel('X')
axd['A'].set_ylabel('Y')
axd['A'].set_title('Temperature slice at z={:.3f}'.format(z[2]))
axd['A'].axhline(2.0, linestyle='--', color='red')
clim = (-np.abs(front_slice).max(), np.abs(front_slice).max())
axd['B'].imshow(front_slice, origin='lower', extent=ext_front, clim=clim, cmap='bwr')
axd['B'].set_xlabel('X')
axd['B'].set_ylabel('d')
axd['B'].set_title('Temperature variation at y={}'.format(d.x2[Ny//2]))
axd['C'].plot(depth, rho, '-k', linewidth=2)
axd['C'].plot(depth, rho_0, '--k')
axd['C'].set_xlabel('d')
axd['C'].set_ylim(rho_0.min(), rho_0.max())
axd['C'].set_ylabel(r'$\langle \rho \rangle$')
axd['D'].plot(depth, prs, '-k', linewidth=2)
axd['D'].plot(depth, prs_0, '--k')
axd['D'].set_ylim(prs_0.min(), prs_0.max())
axd['D'].set_xlabel('d')
axd['D'].set_ylabel(r'$\langle P \rangle$')
axd['E'].plot(depth, T, '-k', linewidth=2)
axd['E'].plot(depth, T_0, '--k')
axd['E'].set_ylim(T_0.min(), T_0.max())
axd['E'].set_xlabel('d')
axd['E'].set_ylabel(r'$\langle T \rangle$')
plt.savefig('render/rho.{:04}.png'.format(sid))
plt.close()
def extract_quantities(d):
'''
Extracts kinetic, internal and total energy of the snapshot
'''
T = d.time
dV = d.dx1[0] * d.dx2[0] * d.dx3[0]
mass = (dV * d.rho).sum()
Ek = 0.5 * d.rho * (d.vx1**2.0 + d.vx2**2.0 + d.vx3**2.0) * dV
e = d.rho * d.prs / (d.rho * (gamma-1.0)) * dV
E = Ek + e
Ek = Ek.sum()
e = e.sum()
E = E.sum()
return T, mass, Ek, e, E
def get_periodic_gradient(vec, dh, axis):
'''
Helper function to extract a gradient from a periodic domain
as np.gradient cannot handle periodic BCs
Simply return first order finite differences
'''
return (np.roll(vec, -1, axis=axis) - np.roll(vec, 1, axis=axis)) / (2.0*dh)
def extract_profiles(dstart, dend):
'''
Extracts fluxes/vertical profiles and averages them for
snapshots between dstart and dend
'''
profiles_evol = []
for sid in range(dstart, dend+1):
d = pp.pload(sid)
T = d.prs / d.rho
Tprime = T - np.average(T, axis=(0, 1))
rhoPrime = d.rho - np.average(d.rho, axis=(0, 1))
Pprime = d.prs - np.average(d.prs, axis=(0, 1))
# Fluxes : Enthalpy, Kinetic, Acoustic, Buoyancy work
Fe = gamma / (gamma-1.0) * d.rho * Tprime * d.vx3
Fk = 0.5 * d.rho * d.vx3 * (d.vx1**2.0 + d.vx2**2.0 + d.vx3**2.0)
Fp = d.vx3 * Pprime
Wb = theta * (m+1.0) * d.vx3 * rhoPrime
# Averaging horizontally
Fe = np.average(Fe, axis=(0, 1))
Fk = np.average(Fk, axis=(0, 1))
Fp = np.average(Fp, axis=(0, 1))
Wb = np.average(Wb, axis=(0, 1))
# Energy ratio
ux2_bar = np.average(d.vx1*d.vx1, axis=(0, 1))
uy2_bar = np.average(d.vx2*d.vx2, axis=(0, 1))
uz2_bar = np.average(d.vx3*d.vx3, axis=(0, 1))
re = uz2_bar / (ux2_bar + uy2_bar)
# Calculating vorticity
dx = d.dx1[0]
dy = d.dx2[0]
dz = d.dx3[0]
dudz = np.gradient(d.vx1, dz, axis=2)
dudy = get_periodic_gradient(d.vx1, dy, 1)
dvdx = get_periodic_gradient(d.vx2, dx, 0)
dvdz = np.gradient(d.vx2, dz, axis=2)
dwdx = get_periodic_gradient(d.vx3, dx, 0)
dwdy = get_periodic_gradient(d.vx3, dy, 1)
omega_x = dwdy - dvdz
omega_y = dudz - dwdx
omega_z = dvdx - dudy
# And enstrophy
ox2_bar = np.average(omega_x**2.0, axis=(0, 1))
oy2_bar = np.average(omega_y**2.0, axis=(0, 1))
oz2_bar = np.average(omega_z**2.0, axis=(0, 1))
romega = (ox2_bar + oy2_bar) / oz2_bar
# Putting everything in a table
Nz = d.n3_tot
profiles = np.empty((Nz, 7))
profiles[:,0] = d.x3
profiles[:,1] = Fe
profiles[:,2] = Fk
profiles[:,3] = Fp
profiles[:,4] = Wb
profiles[:,5] = re
profiles[:,6] = romega
profiles_evol.append(profiles)
# Returning time average
profiles_evol = np.array(profiles_evol)
return np.average(profiles_evol, axis=0)
### Main
if __name__ == '__main__':
print('Counting snapshots')
snap_count = count_pluto_snaps()
# --no-render allows to replot quantities without rendering everything
if not '--no-render' in sys.argv:
print('Rendering ...')
create_directory('render')
p = Pool(32)
p.map(plot, range(snap_count))
# Getting the evolution of all the quantities
if not '--no-time-evolution' in sys.argv:
T = []
mass = []
Ek = []
e = []
E = []
print('Extracting time evolution ...')
for sid in range(snap_count):
d = pp.pload(sid)
ndims = len(d.rho.shape)
T_, mass_, Ek_, e_, E_ = extract_quantities(d)
T.append(T_)
mass.append(mass_)
Ek.append(Ek_)
e.append(e_)
E.append(E_)
# Saving the values to CSV file
NT = len(T)
time_evolution = np.empty((NT, 4))
time_evolution[:,0] = np.array(T)
time_evolution[:,1] = np.array(Ek)
time_evolution[:,2] = np.array(e)
time_evolution[:,3] = np.array(E)
np.savetxt('pluto_time.csv', time_evolution, delimiter=',')
# Plotting time evolution
fig, ax = plt.subplots(2, 2, figsize=(15, 15))
ax[0,0].plot(T, mass, '-k')
ax[0,0].axhline(mass[0], linestyle='--')
ax[0,0].set_xlabel('T')
ax[0,0].set_ylabel('Mass')
ax[0,1].plot(T, Ek, '-k')
ax[0,1].set_xlabel('T')
ax[0,1].set_ylabel('Kinetic energy')
ax[1,0].plot(T, e, '-k')
ax[1,0].set_xlabel('T')
ax[1,0].set_ylabel('Internal energy density')
ax[1,1].plot(T, E, '-k')
ax[1,1].set_xlabel('T')
ax[1,1].set_ylabel('Total energy')
plt.savefig('time_evolution.png')
# Plotting temperature evolution
# at z = 0.0 the temperature should be 1 on every curve
# at z = 1.0 the temperature gradient should be roughly the same
if not '--no-temperatures' in sys.argv:
print('Extracting temperature evolution')
T_snaps = range(0, snap_count, 50)
T_bar = []
z = []
for sid in T_snaps:
if sid == 0:
d = pp.pload(sid)
z = d.x3
t, Tbar = extract_Tbar(sid)
plt.plot(z, Tbar)
plt.xlabel('z')
plt.ylabel('T')
plt.savefig('temperatures.png')
# And finally extracting fluxes and profiles
if not '--no-profiles' in sys.argv:
print('Extracting profiles')
profiles = extract_profiles(895, 905)
np.savetxt('pluto_prof.csv', profiles, delimiter=',')
print('All good !')
| [
"matplotlib.pyplot.ylabel",
"numpy.array",
"numpy.gradient",
"os.path.exists",
"os.listdir",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"numpy.empty",
"os.mkdir",
"pyPLUTO.pload",
"numpy.abs",
"matplotlib.pyplot.savefig",
"numpy.average",
"numpy.save... | [((397, 417), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (411, 417), False, 'import os, shutil, sys\n'), ((451, 465), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (459, 465), False, 'import os, shutil, sys\n'), ((624, 640), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (634, 640), False, 'import os, shutil, sys\n'), ((873, 886), 'pyPLUTO.pload', 'pp.pload', (['sid'], {}), '(sid)\n', (881, 886), True, 'import pyPLUTO as pp\n'), ((920, 946), 'numpy.average', 'np.average', (['T'], {'axis': '(0, 1)'}), '(T, axis=(0, 1))\n', (930, 946), True, 'import numpy as np\n'), ((1376, 1389), 'pyPLUTO.pload', 'pp.pload', (['sid'], {}), '(sid)\n', (1384, 1389), True, 'import pyPLUTO as pp\n'), ((3529, 3540), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3538, 3540), True, 'import matplotlib.pyplot as plt\n'), ((6544, 6567), 'numpy.array', 'np.array', (['profiles_evol'], {}), '(profiles_evol)\n', (6552, 6567), True, 'import numpy as np\n'), ((6579, 6612), 'numpy.average', 'np.average', (['profiles_evol'], {'axis': '(0)'}), '(profiles_evol, axis=0)\n', (6589, 6612), True, 'import numpy as np\n'), ((427, 446), 'shutil.rmtree', 'shutil.rmtree', (['path'], {}), '(path)\n', (440, 446), False, 'import os, shutil, sys\n'), ((1998, 2028), 'numpy.average', 'np.average', (['d.rho'], {'axis': '(0, 1)'}), '(d.rho, axis=(0, 1))\n', (2008, 2028), True, 'import numpy as np\n'), ((2045, 2075), 'numpy.average', 'np.average', (['d.prs'], {'axis': '(0, 1)'}), '(d.prs, axis=(0, 1))\n', (2055, 2075), True, 'import numpy as np\n'), ((2092, 2130), 'numpy.average', 'np.average', (['(d.prs / d.rho)'], {'axis': '(0, 1)'}), '(d.prs / d.rho, axis=(0, 1))\n', (2102, 2130), True, 'import numpy as np\n'), ((4481, 4494), 'pyPLUTO.pload', 'pp.pload', (['sid'], {}), '(sid)\n', (4489, 4494), True, 'import pyPLUTO as pp\n'), ((5006, 5033), 'numpy.average', 'np.average', (['Fe'], {'axis': '(0, 1)'}), '(Fe, axis=(0, 1))\n', (5016, 5033), True, 'import numpy as np\n'), ((5047, 5074), 'numpy.average', 'np.average', (['Fk'], {'axis': '(0, 1)'}), '(Fk, axis=(0, 1))\n', (5057, 5074), True, 'import numpy as np\n'), ((5088, 5115), 'numpy.average', 'np.average', (['Fp'], {'axis': '(0, 1)'}), '(Fp, axis=(0, 1))\n', (5098, 5115), True, 'import numpy as np\n'), ((5129, 5156), 'numpy.average', 'np.average', (['Wb'], {'axis': '(0, 1)'}), '(Wb, axis=(0, 1))\n', (5139, 5156), True, 'import numpy as np\n'), ((5207, 5245), 'numpy.average', 'np.average', (['(d.vx1 * d.vx1)'], {'axis': '(0, 1)'}), '(d.vx1 * d.vx1, axis=(0, 1))\n', (5217, 5245), True, 'import numpy as np\n'), ((5262, 5300), 'numpy.average', 'np.average', (['(d.vx2 * d.vx2)'], {'axis': '(0, 1)'}), '(d.vx2 * d.vx2, axis=(0, 1))\n', (5272, 5300), True, 'import numpy as np\n'), ((5317, 5355), 'numpy.average', 'np.average', (['(d.vx3 * d.vx3)'], {'axis': '(0, 1)'}), '(d.vx3 * d.vx3, axis=(0, 1))\n', (5327, 5355), True, 'import numpy as np\n'), ((5529, 5559), 'numpy.gradient', 'np.gradient', (['d.vx1', 'dz'], {'axis': '(2)'}), '(d.vx1, dz, axis=2)\n', (5540, 5559), True, 'import numpy as np\n'), ((5677, 5707), 'numpy.gradient', 'np.gradient', (['d.vx2', 'dz'], {'axis': '(2)'}), '(d.vx2, dz, axis=2)\n', (5688, 5707), True, 'import numpy as np\n'), ((5953, 5992), 'numpy.average', 'np.average', (['(omega_x ** 2.0)'], {'axis': '(0, 1)'}), '(omega_x ** 2.0, axis=(0, 1))\n', (5963, 5992), True, 'import numpy as np\n'), ((6009, 6048), 'numpy.average', 'np.average', (['(omega_y ** 2.0)'], {'axis': '(0, 1)'}), '(omega_y ** 2.0, axis=(0, 1))\n', (6019, 6048), True, 'import numpy as np\n'), ((6065, 6104), 'numpy.average', 'np.average', (['(omega_z ** 2.0)'], {'axis': '(0, 1)'}), '(omega_z ** 2.0, axis=(0, 1))\n', (6075, 6104), True, 'import numpy as np\n'), ((6233, 6250), 'numpy.empty', 'np.empty', (['(Nz, 7)'], {}), '((Nz, 7))\n', (6241, 6250), True, 'import numpy as np\n'), ((6912, 6920), 'multiprocessing.Pool', 'Pool', (['(32)'], {}), '(32)\n', (6916, 6920), False, 'from multiprocessing import Pool\n'), ((7592, 7609), 'numpy.empty', 'np.empty', (['(NT, 4)'], {}), '((NT, 4))\n', (7600, 7609), True, 'import numpy as np\n'), ((7640, 7651), 'numpy.array', 'np.array', (['T'], {}), '(T)\n', (7648, 7651), True, 'import numpy as np\n'), ((7682, 7694), 'numpy.array', 'np.array', (['Ek'], {}), '(Ek)\n', (7690, 7694), True, 'import numpy as np\n'), ((7725, 7736), 'numpy.array', 'np.array', (['e'], {}), '(e)\n', (7733, 7736), True, 'import numpy as np\n'), ((7767, 7778), 'numpy.array', 'np.array', (['E'], {}), '(E)\n', (7775, 7778), True, 'import numpy as np\n'), ((7787, 7846), 'numpy.savetxt', 'np.savetxt', (['"""pluto_time.csv"""', 'time_evolution'], {'delimiter': '""","""'}), "('pluto_time.csv', time_evolution, delimiter=',')\n", (7797, 7846), True, 'import numpy as np\n'), ((7900, 7936), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'figsize': '(15, 15)'}), '(2, 2, figsize=(15, 15))\n', (7912, 7936), True, 'import matplotlib.pyplot as plt\n'), ((8471, 8504), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""time_evolution.png"""'], {}), "('time_evolution.png')\n", (8482, 8504), True, 'import matplotlib.pyplot as plt\n'), ((9033, 9048), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""z"""'], {}), "('z')\n", (9043, 9048), True, 'import matplotlib.pyplot as plt\n'), ((9057, 9072), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""T"""'], {}), "('T')\n", (9067, 9072), True, 'import matplotlib.pyplot as plt\n'), ((9081, 9112), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""temperatures.png"""'], {}), "('temperatures.png')\n", (9092, 9112), True, 'import matplotlib.pyplot as plt\n'), ((9294, 9347), 'numpy.savetxt', 'np.savetxt', (['"""pluto_prof.csv"""', 'profiles'], {'delimiter': '""","""'}), "('pluto_prof.csv', profiles, delimiter=',')\n", (9304, 9347), True, 'import numpy as np\n'), ((1400, 1453), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'constrained_layout': '(True)', 'figsize': '(12, 12)'}), '(constrained_layout=True, figsize=(12, 12))\n', (1410, 1453), True, 'import matplotlib.pyplot as plt\n'), ((4186, 4213), 'numpy.roll', 'np.roll', (['vec', '(-1)'], {'axis': 'axis'}), '(vec, -1, axis=axis)\n', (4193, 4213), True, 'import numpy as np\n'), ((4216, 4242), 'numpy.roll', 'np.roll', (['vec', '(1)'], {'axis': 'axis'}), '(vec, 1, axis=axis)\n', (4223, 4242), True, 'import numpy as np\n'), ((4545, 4571), 'numpy.average', 'np.average', (['T'], {'axis': '(0, 1)'}), '(T, axis=(0, 1))\n', (4555, 4571), True, 'import numpy as np\n'), ((4599, 4629), 'numpy.average', 'np.average', (['d.rho'], {'axis': '(0, 1)'}), '(d.rho, axis=(0, 1))\n', (4609, 4629), True, 'import numpy as np\n'), ((4657, 4687), 'numpy.average', 'np.average', (['d.prs'], {'axis': '(0, 1)'}), '(d.prs, axis=(0, 1))\n', (4667, 4687), True, 'import numpy as np\n'), ((7254, 7267), 'pyPLUTO.pload', 'pp.pload', (['sid'], {}), '(sid)\n', (7262, 7267), True, 'import pyPLUTO as pp\n'), ((9007, 9024), 'matplotlib.pyplot.plot', 'plt.plot', (['z', 'Tbar'], {}), '(z, Tbar)\n', (9015, 9024), True, 'import matplotlib.pyplot as plt\n'), ((2583, 2602), 'numpy.abs', 'np.abs', (['front_slice'], {}), '(front_slice)\n', (2589, 2602), True, 'import numpy as np\n'), ((8916, 8929), 'pyPLUTO.pload', 'pp.pload', (['sid'], {}), '(sid)\n', (8924, 8929), True, 'import pyPLUTO as pp\n'), ((2556, 2575), 'numpy.abs', 'np.abs', (['front_slice'], {}), '(front_slice)\n', (2562, 2575), True, 'import numpy as np\n')] |
import pathlib
import pytest
import numpy as np
from plums.commons.data import TileWrapper, Record, RecordCollection, DataPoint
from plums.dataflow.dataset import PatternDataset
def _dummy_tile_driver(paths, **matches):
paths = sorted(paths, key=str, reverse=True)
print(paths)
print(matches)
return TileWrapper(np.zeros((12, 12, 3)), filename=paths[0], **matches)
def _invalid_return_tile_driver(paths, **matches):
print(paths)
print(matches)
return np.zeros((12, 12, 3))
def _invalid_paths_signature_tile_driver(*paths, **matches):
print(paths)
print(matches)
return TileWrapper(np.zeros((12, 12, 3)), filename=paths[0], **matches)
def _invalid_matches_signature_tile_driver(*paths, matches=None):
print(paths)
print(matches)
return TileWrapper(np.zeros((12, 12, 3)), filename=paths[0], **matches)
def _invalid_extra_signature_tile_driver(*paths, degenerate=False, **matches):
print(paths)
print(degenerate)
print(matches)
return TileWrapper(np.zeros((12, 12, 3)), filename=paths[0], **matches)
def _dummy_annotation_driver(paths, **matches):
print(paths)
print(matches)
record = Record([[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]], ('label', ), paths=paths, **matches)
return RecordCollection(record)
def _invalid_return_annotation_driver(paths, **matches):
print(paths)
print(matches)
return matches
def _invalid_paths_signature_annotation_driver(*paths, **matches):
print(paths)
print(matches)
record = Record([[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]], ('label', ), paths=paths, **matches)
return RecordCollection(record)
def _invalid_matches_signature_annotation_driver(*paths, matches=None):
print(paths)
print(matches)
record = Record([[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]], ('label', ), paths=paths, **matches)
return RecordCollection(record)
def _invalid_extra_signature_annotation_driver(*paths, degenerate=False, **matches):
print(paths)
print(degenerate)
print(matches)
record = Record([[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]], ('label', ), paths=paths, **matches)
return RecordCollection(record)
class TestSignature:
def test_type_tile_signature(self):
with pytest.raises(TypeError, match='Invalid Tile driver: Expected a callable'):
_ = PatternDataset('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',
'data/labels/{dataset}/{aoi}/{type}/{tile}.json',
None, _dummy_annotation_driver)
def test_invalid_paths_tile_signature(self):
with pytest.raises(TypeError, match='Invalid Tile driver: Expected function'):
_ = PatternDataset('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',
'data/labels/{dataset}/{aoi}/{type}/{tile}.json',
_invalid_paths_signature_tile_driver, _dummy_annotation_driver)
def test_invalid_matches_tile_signature(self):
with pytest.raises(TypeError, match='Invalid Tile driver: Expected function'):
_ = PatternDataset('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',
'data/labels/{dataset}/{aoi}/{type}/{tile}.json',
_invalid_matches_signature_tile_driver, _dummy_annotation_driver)
def test_invalid_extra_tile_signature(self):
with pytest.raises(TypeError, match='Invalid Tile driver: Expected function'):
_ = PatternDataset('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',
'data/labels/{dataset}/{aoi}/{type}/{tile}.json',
_invalid_extra_signature_tile_driver, _dummy_annotation_driver)
def test_type_annotation_signature(self):
with pytest.raises(TypeError, match='Invalid Annotation driver: Expected a callable'):
_ = PatternDataset('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',
'data/labels/{dataset}/{aoi}/{type}/{tile}.json',
_dummy_tile_driver, None)
def test_invalid_paths_annotation_signature(self):
with pytest.raises(TypeError, match='Invalid Annotation driver: Expected function'):
_ = PatternDataset('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',
'data/labels/{dataset}/{aoi}/{type}/{tile}.json',
_dummy_tile_driver, _invalid_paths_signature_annotation_driver)
def test_invalid_matches_annotation_signature(self):
with pytest.raises(TypeError, match='Invalid Annotation driver: Expected function'):
_ = PatternDataset('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',
'data/labels/{dataset}/{aoi}/{type}/{tile}.json',
_dummy_tile_driver, _invalid_matches_signature_annotation_driver)
def test_invalid_extra_annotation_signature(self):
with pytest.raises(TypeError, match='Invalid Annotation driver: Expected function'):
_ = PatternDataset('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',
'data/labels/{dataset}/{aoi}/{type}/{tile}.json',
_dummy_tile_driver, _invalid_extra_signature_annotation_driver)
class TestPairMatch:
def test_strict(self, strict_pattern_tree):
root, path_list = strict_pattern_tree
dataset = PatternDataset('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',
'data/labels/{dataset}/{aoi}/{type}/{tile}.json',
_dummy_tile_driver, _dummy_annotation_driver, path=str(root))
assert len(dataset) == 8
assert dataset._matching_groups == ('dataset', 'aoi', 'type', 'tile')
assert set(dataset._group_index) == {('dataset_1', 'aoi_0', 'simulated', 'tile_00'),
('dataset_1', 'aoi_0', 'simulated', 'tile_01'),
('dataset_1', 'aoi_0', 'labeled', 'tile_00'),
('dataset_1', 'aoi_0', 'labeled', 'tile_01'),
('dataset_1', 'aoi_3', 'simulated', 'tile_00'),
('dataset_1', 'aoi_3', 'simulated', 'tile_01'),
('dataset_1', 'aoi_3', 'labeled', 'tile_00'),
('dataset_1', 'aoi_3', 'labeled', 'tile_01')}
def test_sort(self, strict_pattern_tree):
root, path_list = strict_pattern_tree
dataset = PatternDataset('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',
'data/labels/{dataset}/{aoi}/{type}/{tile}.json',
_dummy_tile_driver, _dummy_annotation_driver, path=root,
sort_key=lambda x: tuple(reversed(x)))
assert len(dataset) == 8
assert dataset._matching_groups == ('dataset', 'aoi', 'type', 'tile')
assert dataset._group_index == [
('dataset_1', 'aoi_0', 'labeled', 'tile_00'),
('dataset_1', 'aoi_3', 'labeled', 'tile_00'),
('dataset_1', 'aoi_0', 'simulated', 'tile_00'),
('dataset_1', 'aoi_3', 'simulated', 'tile_00'),
('dataset_1', 'aoi_0', 'labeled', 'tile_01'),
('dataset_1', 'aoi_3', 'labeled', 'tile_01'),
('dataset_1', 'aoi_0', 'simulated', 'tile_01'),
('dataset_1', 'aoi_3', 'simulated', 'tile_01'),
]
def test_cache(self, strict_pattern_tree):
root, path_list = strict_pattern_tree
dataset = PatternDataset('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',
'data/labels/{dataset}/{aoi}/{type}/{tile}.json',
_dummy_tile_driver, _dummy_annotation_driver,
path=root)
cached = PatternDataset('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',
'data/labels/{dataset}/{aoi}/{type}/{tile}.json',
_dummy_tile_driver, _dummy_annotation_driver,
path=root, cache=True)
assert dataset._tiles_database == cached._tiles_database
assert dataset._tiles_index == cached._tiles_index
assert dataset._annotations_database == cached._annotations_database
assert dataset._annotations_index == cached._annotations_index
assert dataset._matching_groups == cached._matching_groups
assert dataset._group_index == cached._group_index
def test_cache_miss(self, strict_pattern_tree):
root, path_list = strict_pattern_tree
dataset = PatternDataset('data/images/{dataset}/{type}/{tile}.jpg',
'data/labels/{dataset}/{type}/{tile}.json',
_dummy_tile_driver, _dummy_annotation_driver,
path=pathlib.Path(str(root)), cache=True)
assert len(dataset) == 2
assert dataset._matching_groups == ('dataset', 'type', 'tile')
assert set(dataset._group_index) == {('dataset_0', 'labeled', 'tile_00'),
('dataset_0', 'labeled', 'tile_01')}
def test_strict_recursive(self, strict_pattern_tree):
root, path_list = strict_pattern_tree
dataset = PatternDataset('data/images/{dataset}/{aoi/}/{tile}.jpg',
'data/labels/{dataset}/{aoi/}/{tile}.json',
_dummy_tile_driver, _dummy_annotation_driver, path=root)
assert len(dataset) == 10
assert dataset._matching_groups == ('dataset', 'aoi', 'tile')
assert set(dataset._group_index) == {('dataset_0', 'labeled', 'tile_00'),
('dataset_0', 'labeled', 'tile_01'),
('dataset_1', 'aoi_0/simulated', 'tile_00'),
('dataset_1', 'aoi_0/simulated', 'tile_01'),
('dataset_1', 'aoi_0/labeled', 'tile_00'),
('dataset_1', 'aoi_0/labeled', 'tile_01'),
('dataset_1', 'aoi_3/simulated', 'tile_00'),
('dataset_1', 'aoi_3/simulated', 'tile_01'),
('dataset_1', 'aoi_3/labeled', 'tile_00'),
('dataset_1', 'aoi_3/labeled', 'tile_01')}
def test_tile_degeneracy_fail(self, loose_pattern_tree):
root, path_list = loose_pattern_tree
with pytest.raises(ValueError, match='Tile pattern degeneracy is not supported'):
_ = PatternDataset('data/images/tile.jpg',
'data/labels/{dataset_id}/{aoi_id}/{type_id}/{tile_id}.json',
_dummy_tile_driver, _dummy_annotation_driver, path=root)
def test_no_common_group_fail(self, loose_pattern_tree):
root, path_list = loose_pattern_tree
with pytest.raises(ValueError, match='No common group could be found in between patterns'):
_ = PatternDataset('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',
'data/labels/{dataset_id}/{aoi_id}/{type_id}/{tile_id}.json',
_dummy_tile_driver, _dummy_annotation_driver, path=root)
def test_no_match_fail(self, loose_pattern_tree):
root, path_list = loose_pattern_tree
with pytest.raises(ValueError, match='No matches where found between tiles and annotation'):
_ = PatternDataset('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',
'data/labels/{dataset}/{aoi}/{type}/{tile}.JSON',
_dummy_tile_driver, _dummy_annotation_driver, path=root, strict=False)
def test_loose_fail(self, loose_pattern_tree):
root, path_list = loose_pattern_tree
with pytest.raises(ValueError, match='does not have a matching annotation'):
_ = PatternDataset('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',
'data/labels/{dataset}/{aoi}/{type}/{tile}.json',
_dummy_tile_driver, _dummy_annotation_driver, path=root)
def test_loose(self, loose_pattern_tree):
root, path_list = loose_pattern_tree
dataset = PatternDataset('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',
'data/labels/{dataset}/{aoi}/{type}/{tile}.json',
_dummy_tile_driver, _dummy_annotation_driver, path=root, strict=False)
assert len(dataset) == 6
assert dataset._matching_groups == ('dataset', 'aoi', 'type', 'tile')
assert set(dataset._group_index) == {('dataset_1', 'aoi_0', 'simulated', 'tile_00'),
('dataset_1', 'aoi_0', 'simulated', 'tile_01'),
('dataset_1', 'aoi_3', 'simulated', 'tile_00'),
('dataset_1', 'aoi_3', 'simulated', 'tile_01'),
('dataset_1', 'aoi_3', 'labeled', 'tile_00'),
('dataset_1', 'aoi_3', 'labeled', 'tile_01')}
def test_loose_alternative(self, loose_pattern_tree):
root, path_list = loose_pattern_tree
dataset = PatternDataset('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',
'data/images/{dataset}/{aoi}/{type}/{tile}.json',
_dummy_tile_driver, _dummy_annotation_driver, path=root, strict=False)
assert len(dataset) == 1
assert dataset._matching_groups == ('dataset', 'aoi', 'type', 'tile')
assert set(dataset._group_index) == {('dataset_1', 'aoi_0', 'labeled', 'tile_00')}
dataset = PatternDataset('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',
'data/images/{dataset}/{aoi}/{type}/{tile}.[json|geojson]',
_dummy_tile_driver, _dummy_annotation_driver, path=root, strict=False)
assert len(dataset) == 2
assert set(dataset._group_index) == {('dataset_1', 'aoi_0', 'labeled', 'tile_00'),
('dataset_1', 'aoi_0', 'labeled', 'tile_01')}
def test_loose_duplicate(self, loose_pattern_tree):
root, path_list = loose_pattern_tree
with pytest.raises(ValueError, match='does not have a matching annotation'):
_ = PatternDataset('data/images/{dataset}/{type}/{prior}/{tile}.jpg',
'data/labels/{dataset}/{type}/{tile}.json',
_dummy_tile_driver, _dummy_annotation_driver, path=root)
dataset = PatternDataset('data/images/{dataset}/{type}/{prior}/{tile}.jpg',
'data/labels/{dataset}/{type}/{tile}.json',
_dummy_tile_driver, _dummy_annotation_driver, path=root, strict=False)
assert len(dataset) == 2
assert dataset._matching_groups == ('dataset', 'type', 'tile')
assert set(dataset._group_index) == {('dataset_0', 'labeled', 'tile_00'),
('dataset_0', 'labeled', 'tile_01')}
assert len(dataset._tiles_database[('dataset_0', 'labeled', 'tile_00')]) == 2
assert len(dataset._tiles_database[('dataset_0', 'labeled', 'tile_01')]) == 2
assert len(dataset._annotations_database[('dataset_0', 'labeled', 'tile_00')]) == 1
assert len(dataset._annotations_database[('dataset_0', 'labeled', 'tile_01')]) == 1
def test_degenerate(self, loose_pattern_tree):
root, path_list = loose_pattern_tree
dataset = PatternDataset('data/images/{dataset}/{type}/{prior}/{tile}.jpg',
'data/images.json',
_dummy_tile_driver, _dummy_annotation_driver, path=root)
assert len(dataset) == 12
assert dataset._matching_groups == ('dataset', 'type', 'prior', 'tile')
assert set(dataset._group_index) == {('dataset_0', 'labeled', 'prior', 'tile_00'),
('dataset_0', 'labeled', 'prior', 'tile_01'),
('dataset_0', 'labeled', 'posterior', 'tile_00'),
('dataset_0', 'labeled', 'posterior', 'tile_01'),
('dataset_1', 'aoi_0', 'simulated', 'tile_00'),
('dataset_1', 'aoi_0', 'simulated', 'tile_01'),
('dataset_1', 'aoi_0', 'labeled', 'tile_00'),
('dataset_1', 'aoi_0', 'labeled', 'tile_01'),
('dataset_1', 'aoi_3', 'simulated', 'tile_00'),
('dataset_1', 'aoi_3', 'simulated', 'tile_01'),
('dataset_1', 'aoi_3', 'labeled', 'tile_00'),
('dataset_1', 'aoi_3', 'labeled', 'tile_01')}
class TestDriver:
def test_call_argument(self, loose_pattern_tree):
root, path_list = loose_pattern_tree
dataset = PatternDataset('data/images/{dataset}/{nature}/{prior}/{tile}.jpg',
'data/labels/{dataset}/{nature}/{tile}.json',
_dummy_tile_driver, _dummy_annotation_driver, path=root,
strict=False, sort_key=lambda x: x)
assert isinstance(dataset[0], DataPoint)
assert dataset[0].tiles.iloc[0].filename == root / 'data/images/dataset_0/labeled/prior/tile_00.jpg'
assert dataset[0].tiles.iloc[0].dataset == 'dataset_0'
assert dataset[0].tiles.iloc[0].nature == 'labeled'
assert dataset[0].tiles.iloc[0].tile == 'tile_00'
assert not hasattr(dataset[0].tiles.iloc[0], 'prior')
assert dataset[0].annotation[0].paths == (root / 'data/labels/dataset_0/labeled/tile_00.json', )
assert dataset[0].annotation[0].dataset == 'dataset_0'
assert dataset[0].annotation[0].nature == 'labeled'
assert dataset[0].annotation[0].tile == 'tile_00'
assert not hasattr(dataset[0].annotation[0], 'prior')
def test_degenerate_call_argument(self, loose_pattern_tree):
root, path_list = loose_pattern_tree
dataset = PatternDataset('data/images/{dataset}/{nature}/{prior}/{tile}.jpg',
'data/images.json',
_dummy_tile_driver, _dummy_annotation_driver, path=root,
sort_key=lambda x: x)
assert isinstance(dataset[0], DataPoint)
assert dataset[0].tiles.iloc[0].filename == root / 'data/images/dataset_0/labeled/posterior/tile_00.jpg'
assert dataset[0].tiles.iloc[0].dataset == 'dataset_0'
assert dataset[0].tiles.iloc[0].nature == 'labeled'
assert dataset[0].tiles.iloc[0].prior == 'posterior'
assert dataset[0].tiles.iloc[0].tile == 'tile_00'
assert dataset[0].annotation[0].paths == (root / 'data/images.json', )
assert dataset[0].annotation[0].dataset == 'dataset_0'
assert dataset[0].annotation[0].nature == 'labeled'
assert dataset[0].annotation[0].prior == 'posterior'
assert dataset[0].annotation[0].tile == 'tile_00'
def test_call_invalid_tile_type(self, loose_pattern_tree):
root, path_list = loose_pattern_tree
dataset = PatternDataset('data/images/{dataset}/{nature}/{prior}/{tile}.jpg',
'data/labels/{dataset}/{nature}/{tile}.json',
_invalid_return_tile_driver, _dummy_annotation_driver, path=root, strict=False)
with pytest.raises(TypeError):
_ = dataset[0]
def test_call_invalid_annotation_type(self, loose_pattern_tree):
root, path_list = loose_pattern_tree
dataset = PatternDataset('data/images/{dataset}/{nature}/{prior}/{tile}.jpg',
'data/labels/{dataset}/{nature}/{tile}.json',
_dummy_tile_driver, _invalid_return_annotation_driver, path=root, strict=False)
with pytest.raises(TypeError):
_ = dataset[0]
| [
"plums.commons.data.RecordCollection",
"plums.dataflow.dataset.PatternDataset",
"plums.commons.data.Record",
"numpy.zeros",
"pytest.raises"
] | [((488, 509), 'numpy.zeros', 'np.zeros', (['(12, 12, 3)'], {}), '((12, 12, 3))\n', (496, 509), True, 'import numpy as np\n'), ((1183, 1273), 'plums.commons.data.Record', 'Record', (['[[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]]', "('label',)"], {'paths': 'paths'}), "([[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]], ('label',), paths=paths,\n **matches)\n", (1189, 1273), False, 'from plums.commons.data import TileWrapper, Record, RecordCollection, DataPoint\n'), ((1282, 1306), 'plums.commons.data.RecordCollection', 'RecordCollection', (['record'], {}), '(record)\n', (1298, 1306), False, 'from plums.commons.data import TileWrapper, Record, RecordCollection, DataPoint\n'), ((1541, 1631), 'plums.commons.data.Record', 'Record', (['[[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]]', "('label',)"], {'paths': 'paths'}), "([[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]], ('label',), paths=paths,\n **matches)\n", (1547, 1631), False, 'from plums.commons.data import TileWrapper, Record, RecordCollection, DataPoint\n'), ((1640, 1664), 'plums.commons.data.RecordCollection', 'RecordCollection', (['record'], {}), '(record)\n', (1656, 1664), False, 'from plums.commons.data import TileWrapper, Record, RecordCollection, DataPoint\n'), ((1789, 1879), 'plums.commons.data.Record', 'Record', (['[[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]]', "('label',)"], {'paths': 'paths'}), "([[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]], ('label',), paths=paths,\n **matches)\n", (1795, 1879), False, 'from plums.commons.data import TileWrapper, Record, RecordCollection, DataPoint\n'), ((1888, 1912), 'plums.commons.data.RecordCollection', 'RecordCollection', (['record'], {}), '(record)\n', (1904, 1912), False, 'from plums.commons.data import TileWrapper, Record, RecordCollection, DataPoint\n'), ((2072, 2162), 'plums.commons.data.Record', 'Record', (['[[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]]', "('label',)"], {'paths': 'paths'}), "([[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]], ('label',), paths=paths,\n **matches)\n", (2078, 2162), False, 'from plums.commons.data import TileWrapper, Record, RecordCollection, DataPoint\n'), ((2171, 2195), 'plums.commons.data.RecordCollection', 'RecordCollection', (['record'], {}), '(record)\n', (2187, 2195), False, 'from plums.commons.data import TileWrapper, Record, RecordCollection, DataPoint\n'), ((334, 355), 'numpy.zeros', 'np.zeros', (['(12, 12, 3)'], {}), '((12, 12, 3))\n', (342, 355), True, 'import numpy as np\n'), ((633, 654), 'numpy.zeros', 'np.zeros', (['(12, 12, 3)'], {}), '((12, 12, 3))\n', (641, 654), True, 'import numpy as np\n'), ((814, 835), 'numpy.zeros', 'np.zeros', (['(12, 12, 3)'], {}), '((12, 12, 3))\n', (822, 835), True, 'import numpy as np\n'), ((1030, 1051), 'numpy.zeros', 'np.zeros', (['(12, 12, 3)'], {}), '((12, 12, 3))\n', (1038, 1051), True, 'import numpy as np\n'), ((7724, 7902), 'plums.dataflow.dataset.PatternDataset', 'PatternDataset', (['"""data/images/{dataset}/{aoi}/{type}/{tile}.jpg"""', '"""data/labels/{dataset}/{aoi}/{type}/{tile}.json"""', '_dummy_tile_driver', '_dummy_annotation_driver'], {'path': 'root'}), "('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',\n 'data/labels/{dataset}/{aoi}/{type}/{tile}.json', _dummy_tile_driver,\n _dummy_annotation_driver, path=root)\n", (7738, 7902), False, 'from plums.dataflow.dataset import PatternDataset\n'), ((8011, 8201), 'plums.dataflow.dataset.PatternDataset', 'PatternDataset', (['"""data/images/{dataset}/{aoi}/{type}/{tile}.jpg"""', '"""data/labels/{dataset}/{aoi}/{type}/{tile}.json"""', '_dummy_tile_driver', '_dummy_annotation_driver'], {'path': 'root', 'cache': '(True)'}), "('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',\n 'data/labels/{dataset}/{aoi}/{type}/{tile}.json', _dummy_tile_driver,\n _dummy_annotation_driver, path=root, cache=True)\n", (8025, 8201), False, 'from plums.dataflow.dataset import PatternDataset\n'), ((9486, 9652), 'plums.dataflow.dataset.PatternDataset', 'PatternDataset', (['"""data/images/{dataset}/{aoi/}/{tile}.jpg"""', '"""data/labels/{dataset}/{aoi/}/{tile}.json"""', '_dummy_tile_driver', '_dummy_annotation_driver'], {'path': 'root'}), "('data/images/{dataset}/{aoi/}/{tile}.jpg',\n 'data/labels/{dataset}/{aoi/}/{tile}.json', _dummy_tile_driver,\n _dummy_annotation_driver, path=root)\n", (9500, 9652), False, 'from plums.dataflow.dataset import PatternDataset\n'), ((12597, 12789), 'plums.dataflow.dataset.PatternDataset', 'PatternDataset', (['"""data/images/{dataset}/{aoi}/{type}/{tile}.jpg"""', '"""data/labels/{dataset}/{aoi}/{type}/{tile}.json"""', '_dummy_tile_driver', '_dummy_annotation_driver'], {'path': 'root', 'strict': '(False)'}), "('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',\n 'data/labels/{dataset}/{aoi}/{type}/{tile}.json', _dummy_tile_driver,\n _dummy_annotation_driver, path=root, strict=False)\n", (12611, 12789), False, 'from plums.dataflow.dataset import PatternDataset\n'), ((13635, 13827), 'plums.dataflow.dataset.PatternDataset', 'PatternDataset', (['"""data/images/{dataset}/{aoi}/{type}/{tile}.jpg"""', '"""data/images/{dataset}/{aoi}/{type}/{tile}.json"""', '_dummy_tile_driver', '_dummy_annotation_driver'], {'path': 'root', 'strict': '(False)'}), "('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',\n 'data/images/{dataset}/{aoi}/{type}/{tile}.json', _dummy_tile_driver,\n _dummy_annotation_driver, path=root, strict=False)\n", (13649, 13827), False, 'from plums.dataflow.dataset import PatternDataset\n'), ((14107, 14309), 'plums.dataflow.dataset.PatternDataset', 'PatternDataset', (['"""data/images/{dataset}/{aoi}/{type}/{tile}.jpg"""', '"""data/images/{dataset}/{aoi}/{type}/{tile}.[json|geojson]"""', '_dummy_tile_driver', '_dummy_annotation_driver'], {'path': 'root', 'strict': '(False)'}), "('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',\n 'data/images/{dataset}/{aoi}/{type}/{tile}.[json|geojson]',\n _dummy_tile_driver, _dummy_annotation_driver, path=root, strict=False)\n", (14121, 14309), False, 'from plums.dataflow.dataset import PatternDataset\n'), ((15034, 15222), 'plums.dataflow.dataset.PatternDataset', 'PatternDataset', (['"""data/images/{dataset}/{type}/{prior}/{tile}.jpg"""', '"""data/labels/{dataset}/{type}/{tile}.json"""', '_dummy_tile_driver', '_dummy_annotation_driver'], {'path': 'root', 'strict': '(False)'}), "('data/images/{dataset}/{type}/{prior}/{tile}.jpg',\n 'data/labels/{dataset}/{type}/{tile}.json', _dummy_tile_driver,\n _dummy_annotation_driver, path=root, strict=False)\n", (15048, 15222), False, 'from plums.dataflow.dataset import PatternDataset\n'), ((16022, 16173), 'plums.dataflow.dataset.PatternDataset', 'PatternDataset', (['"""data/images/{dataset}/{type}/{prior}/{tile}.jpg"""', '"""data/images.json"""', '_dummy_tile_driver', '_dummy_annotation_driver'], {'path': 'root'}), "('data/images/{dataset}/{type}/{prior}/{tile}.jpg',\n 'data/images.json', _dummy_tile_driver, _dummy_annotation_driver, path=root\n )\n", (16036, 16173), False, 'from plums.dataflow.dataset import PatternDataset\n'), ((17590, 17804), 'plums.dataflow.dataset.PatternDataset', 'PatternDataset', (['"""data/images/{dataset}/{nature}/{prior}/{tile}.jpg"""', '"""data/labels/{dataset}/{nature}/{tile}.json"""', '_dummy_tile_driver', '_dummy_annotation_driver'], {'path': 'root', 'strict': '(False)', 'sort_key': '(lambda x: x)'}), "('data/images/{dataset}/{nature}/{prior}/{tile}.jpg',\n 'data/labels/{dataset}/{nature}/{tile}.json', _dummy_tile_driver,\n _dummy_annotation_driver, path=root, strict=False, sort_key=lambda x: x)\n", (17604, 17804), False, 'from plums.dataflow.dataset import PatternDataset\n'), ((18776, 18951), 'plums.dataflow.dataset.PatternDataset', 'PatternDataset', (['"""data/images/{dataset}/{nature}/{prior}/{tile}.jpg"""', '"""data/images.json"""', '_dummy_tile_driver', '_dummy_annotation_driver'], {'path': 'root', 'sort_key': '(lambda x: x)'}), "('data/images/{dataset}/{nature}/{prior}/{tile}.jpg',\n 'data/images.json', _dummy_tile_driver, _dummy_annotation_driver, path=\n root, sort_key=lambda x: x)\n", (18790, 18951), False, 'from plums.dataflow.dataset import PatternDataset\n'), ((19896, 20101), 'plums.dataflow.dataset.PatternDataset', 'PatternDataset', (['"""data/images/{dataset}/{nature}/{prior}/{tile}.jpg"""', '"""data/labels/{dataset}/{nature}/{tile}.json"""', '_invalid_return_tile_driver', '_dummy_annotation_driver'], {'path': 'root', 'strict': '(False)'}), "('data/images/{dataset}/{nature}/{prior}/{tile}.jpg',\n 'data/labels/{dataset}/{nature}/{tile}.json',\n _invalid_return_tile_driver, _dummy_annotation_driver, path=root,\n strict=False)\n", (19910, 20101), False, 'from plums.dataflow.dataset import PatternDataset\n'), ((20356, 20557), 'plums.dataflow.dataset.PatternDataset', 'PatternDataset', (['"""data/images/{dataset}/{nature}/{prior}/{tile}.jpg"""', '"""data/labels/{dataset}/{nature}/{tile}.json"""', '_dummy_tile_driver', '_invalid_return_annotation_driver'], {'path': 'root', 'strict': '(False)'}), "('data/images/{dataset}/{nature}/{prior}/{tile}.jpg',\n 'data/labels/{dataset}/{nature}/{tile}.json', _dummy_tile_driver,\n _invalid_return_annotation_driver, path=root, strict=False)\n", (20370, 20557), False, 'from plums.dataflow.dataset import PatternDataset\n'), ((2272, 2346), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""Invalid Tile driver: Expected a callable"""'}), "(TypeError, match='Invalid Tile driver: Expected a callable')\n", (2285, 2346), False, 'import pytest\n'), ((2364, 2517), 'plums.dataflow.dataset.PatternDataset', 'PatternDataset', (['"""data/images/{dataset}/{aoi}/{type}/{tile}.jpg"""', '"""data/labels/{dataset}/{aoi}/{type}/{tile}.json"""', 'None', '_dummy_annotation_driver'], {}), "('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',\n 'data/labels/{dataset}/{aoi}/{type}/{tile}.json', None,\n _dummy_annotation_driver)\n", (2378, 2517), False, 'from plums.dataflow.dataset import PatternDataset\n'), ((2635, 2707), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""Invalid Tile driver: Expected function"""'}), "(TypeError, match='Invalid Tile driver: Expected function')\n", (2648, 2707), False, 'import pytest\n'), ((2725, 2910), 'plums.dataflow.dataset.PatternDataset', 'PatternDataset', (['"""data/images/{dataset}/{aoi}/{type}/{tile}.jpg"""', '"""data/labels/{dataset}/{aoi}/{type}/{tile}.json"""', '_invalid_paths_signature_tile_driver', '_dummy_annotation_driver'], {}), "('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',\n 'data/labels/{dataset}/{aoi}/{type}/{tile}.json',\n _invalid_paths_signature_tile_driver, _dummy_annotation_driver)\n", (2739, 2910), False, 'from plums.dataflow.dataset import PatternDataset\n'), ((3030, 3102), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""Invalid Tile driver: Expected function"""'}), "(TypeError, match='Invalid Tile driver: Expected function')\n", (3043, 3102), False, 'import pytest\n'), ((3120, 3307), 'plums.dataflow.dataset.PatternDataset', 'PatternDataset', (['"""data/images/{dataset}/{aoi}/{type}/{tile}.jpg"""', '"""data/labels/{dataset}/{aoi}/{type}/{tile}.json"""', '_invalid_matches_signature_tile_driver', '_dummy_annotation_driver'], {}), "('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',\n 'data/labels/{dataset}/{aoi}/{type}/{tile}.json',\n _invalid_matches_signature_tile_driver, _dummy_annotation_driver)\n", (3134, 3307), False, 'from plums.dataflow.dataset import PatternDataset\n'), ((3425, 3497), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""Invalid Tile driver: Expected function"""'}), "(TypeError, match='Invalid Tile driver: Expected function')\n", (3438, 3497), False, 'import pytest\n'), ((3515, 3700), 'plums.dataflow.dataset.PatternDataset', 'PatternDataset', (['"""data/images/{dataset}/{aoi}/{type}/{tile}.jpg"""', '"""data/labels/{dataset}/{aoi}/{type}/{tile}.json"""', '_invalid_extra_signature_tile_driver', '_dummy_annotation_driver'], {}), "('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',\n 'data/labels/{dataset}/{aoi}/{type}/{tile}.json',\n _invalid_extra_signature_tile_driver, _dummy_annotation_driver)\n", (3529, 3700), False, 'from plums.dataflow.dataset import PatternDataset\n'), ((3815, 3900), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""Invalid Annotation driver: Expected a callable"""'}), "(TypeError, match='Invalid Annotation driver: Expected a callable'\n )\n", (3828, 3900), False, 'import pytest\n'), ((3913, 4056), 'plums.dataflow.dataset.PatternDataset', 'PatternDataset', (['"""data/images/{dataset}/{aoi}/{type}/{tile}.jpg"""', '"""data/labels/{dataset}/{aoi}/{type}/{tile}.json"""', '_dummy_tile_driver', 'None'], {}), "('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',\n 'data/labels/{dataset}/{aoi}/{type}/{tile}.json', _dummy_tile_driver, None)\n", (3927, 4056), False, 'from plums.dataflow.dataset import PatternDataset\n'), ((4184, 4262), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""Invalid Annotation driver: Expected function"""'}), "(TypeError, match='Invalid Annotation driver: Expected function')\n", (4197, 4262), False, 'import pytest\n'), ((4280, 4465), 'plums.dataflow.dataset.PatternDataset', 'PatternDataset', (['"""data/images/{dataset}/{aoi}/{type}/{tile}.jpg"""', '"""data/labels/{dataset}/{aoi}/{type}/{tile}.json"""', '_dummy_tile_driver', '_invalid_paths_signature_annotation_driver'], {}), "('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',\n 'data/labels/{dataset}/{aoi}/{type}/{tile}.json', _dummy_tile_driver,\n _invalid_paths_signature_annotation_driver)\n", (4294, 4465), False, 'from plums.dataflow.dataset import PatternDataset\n'), ((4591, 4669), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""Invalid Annotation driver: Expected function"""'}), "(TypeError, match='Invalid Annotation driver: Expected function')\n", (4604, 4669), False, 'import pytest\n'), ((4687, 4874), 'plums.dataflow.dataset.PatternDataset', 'PatternDataset', (['"""data/images/{dataset}/{aoi}/{type}/{tile}.jpg"""', '"""data/labels/{dataset}/{aoi}/{type}/{tile}.json"""', '_dummy_tile_driver', '_invalid_matches_signature_annotation_driver'], {}), "('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',\n 'data/labels/{dataset}/{aoi}/{type}/{tile}.json', _dummy_tile_driver,\n _invalid_matches_signature_annotation_driver)\n", (4701, 4874), False, 'from plums.dataflow.dataset import PatternDataset\n'), ((4998, 5076), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""Invalid Annotation driver: Expected function"""'}), "(TypeError, match='Invalid Annotation driver: Expected function')\n", (5011, 5076), False, 'import pytest\n'), ((5094, 5279), 'plums.dataflow.dataset.PatternDataset', 'PatternDataset', (['"""data/images/{dataset}/{aoi}/{type}/{tile}.jpg"""', '"""data/labels/{dataset}/{aoi}/{type}/{tile}.json"""', '_dummy_tile_driver', '_invalid_extra_signature_annotation_driver'], {}), "('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',\n 'data/labels/{dataset}/{aoi}/{type}/{tile}.json', _dummy_tile_driver,\n _invalid_extra_signature_annotation_driver)\n", (5108, 5279), False, 'from plums.dataflow.dataset import PatternDataset\n'), ((10811, 10886), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Tile pattern degeneracy is not supported"""'}), "(ValueError, match='Tile pattern degeneracy is not supported')\n", (10824, 10886), False, 'import pytest\n'), ((10904, 11069), 'plums.dataflow.dataset.PatternDataset', 'PatternDataset', (['"""data/images/tile.jpg"""', '"""data/labels/{dataset_id}/{aoi_id}/{type_id}/{tile_id}.json"""', '_dummy_tile_driver', '_dummy_annotation_driver'], {'path': 'root'}), "('data/images/tile.jpg',\n 'data/labels/{dataset_id}/{aoi_id}/{type_id}/{tile_id}.json',\n _dummy_tile_driver, _dummy_annotation_driver, path=root)\n", (10918, 11069), False, 'from plums.dataflow.dataset import PatternDataset\n'), ((11244, 11334), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""No common group could be found in between patterns"""'}), "(ValueError, match=\n 'No common group could be found in between patterns')\n", (11257, 11334), False, 'import pytest\n'), ((11347, 11537), 'plums.dataflow.dataset.PatternDataset', 'PatternDataset', (['"""data/images/{dataset}/{aoi}/{type}/{tile}.jpg"""', '"""data/labels/{dataset_id}/{aoi_id}/{type_id}/{tile_id}.json"""', '_dummy_tile_driver', '_dummy_annotation_driver'], {'path': 'root'}), "('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',\n 'data/labels/{dataset_id}/{aoi_id}/{type_id}/{tile_id}.json',\n _dummy_tile_driver, _dummy_annotation_driver, path=root)\n", (11361, 11537), False, 'from plums.dataflow.dataset import PatternDataset\n'), ((11705, 11796), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""No matches where found between tiles and annotation"""'}), "(ValueError, match=\n 'No matches where found between tiles and annotation')\n", (11718, 11796), False, 'import pytest\n'), ((11809, 12001), 'plums.dataflow.dataset.PatternDataset', 'PatternDataset', (['"""data/images/{dataset}/{aoi}/{type}/{tile}.jpg"""', '"""data/labels/{dataset}/{aoi}/{type}/{tile}.JSON"""', '_dummy_tile_driver', '_dummy_annotation_driver'], {'path': 'root', 'strict': '(False)'}), "('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',\n 'data/labels/{dataset}/{aoi}/{type}/{tile}.JSON', _dummy_tile_driver,\n _dummy_annotation_driver, path=root, strict=False)\n", (11823, 12001), False, 'from plums.dataflow.dataset import PatternDataset\n'), ((12166, 12236), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""does not have a matching annotation"""'}), "(ValueError, match='does not have a matching annotation')\n", (12179, 12236), False, 'import pytest\n'), ((12254, 12432), 'plums.dataflow.dataset.PatternDataset', 'PatternDataset', (['"""data/images/{dataset}/{aoi}/{type}/{tile}.jpg"""', '"""data/labels/{dataset}/{aoi}/{type}/{tile}.json"""', '_dummy_tile_driver', '_dummy_annotation_driver'], {'path': 'root'}), "('data/images/{dataset}/{aoi}/{type}/{tile}.jpg',\n 'data/labels/{dataset}/{aoi}/{type}/{tile}.json', _dummy_tile_driver,\n _dummy_annotation_driver, path=root)\n", (12268, 12432), False, 'from plums.dataflow.dataset import PatternDataset\n'), ((14698, 14768), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""does not have a matching annotation"""'}), "(ValueError, match='does not have a matching annotation')\n", (14711, 14768), False, 'import pytest\n'), ((14786, 14960), 'plums.dataflow.dataset.PatternDataset', 'PatternDataset', (['"""data/images/{dataset}/{type}/{prior}/{tile}.jpg"""', '"""data/labels/{dataset}/{type}/{tile}.json"""', '_dummy_tile_driver', '_dummy_annotation_driver'], {'path': 'root'}), "('data/images/{dataset}/{type}/{prior}/{tile}.jpg',\n 'data/labels/{dataset}/{type}/{tile}.json', _dummy_tile_driver,\n _dummy_annotation_driver, path=root)\n", (14800, 14960), False, 'from plums.dataflow.dataset import PatternDataset\n'), ((20170, 20194), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (20183, 20194), False, 'import pytest\n'), ((20630, 20654), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (20643, 20654), False, 'import pytest\n')] |
from keras.utils import np_utils
import numpy as np
import h5py
import os
class HDF5DatasetWriter:
def __init__(self, dims, outputPath, dataKey="images",max_label_length=1,
bufSize=1000):
if os.path.exists(outputPath):
raise ValueError("The supplied 'outputPath' already "
"exists and cannot be overwritten. Manually delete"
"the file before continuing.", outputPath)
self.max_label_length = max_label_length
self.db = h5py.File(outputPath, "w")
self.data = self.db.create_dataset(dataKey, dims,
dtype="float")
self.labels = self.db.create_dataset("labels", (dims[0], self.max_label_length),
dtype="int")
self.bufSize = bufSize
self.buffer = {"data": [], "labels": []}
self.idx = 0
def add(self, rows, labels):
self.buffer["data"].extend(rows)
self.buffer["labels"].extend(labels)
if len(self.buffer["data"]) >= self.bufSize:
self.flush()
def flush(self):
i = self.idx + len(self.buffer["data"])
self.data[self.idx:i] = self.buffer["data"]
self.labels[self.idx:i] = self.buffer["labels"]
self.idx = i
self.buffer = {"data": [], "labels": []}
def storeClassLabels(self, classLabels):
dt = h5py.special_dtype(vlen=str)
labelSet = self.db.create_dataset("label_names",
(len(classLabels),), dtype=dt)
labelSet[:] = classLabels
def close(self):
if len(self.buffer["data"]) > 0:
self.flush()
self.db.close()
class HDF5DatasetGenerator:
def __init__(self, dbPath, batchSize, preprocessors=None,
aug=None, binarize=True, classes=2, max_label_length=1):
self.batchSize = batchSize
self.preprocessors = preprocessors
self.aug = aug
self.binarize = binarize
self.classes = classes
self.max_label_length = max_label_length
self.db = h5py.File(dbPath)
self.numImages = self.db["labels"].shape[0]
def generator(self, passes=np.inf):
epochs = 0
while epochs<passes:
for i in np.arange(0, self.numImages, self.batchSize):
images = self.db["images"][i:i+self.batchSize]
labels = self.db["labels"][i:i+self.batchSize]
rotate_list = []
for image in images:
image = image.T
rotate_list.append(image)
if self.binarize:
labels = np_utils.to_categorical(labels,
self.classes)
if self.preprocessors is not None:
procImages = []
for image in rotate_list:
for p in self.preprocessors:
image = p.preprocess(image)
procImages.append(image)
images = np.array(procImages)
if self.aug is not None:
(images, labels) = next(self.aug.flow(images,
labels, batch_size=self.batchSize))
input_length = np.ones((self.batchSize, 1)) * 30 #(80 timesteps-2)
label_length = np.zeros((self.batchSize, 1))
for i in range(self.batchSize):
label_length[i] = self.max_label_length
inputs = {
'input': images,
'label': labels,
'input_length': input_length,
'label_length': label_length
}
outputs = {'ctc': np.zeros([self.batchSize])}
yield(inputs, outputs)
epochs += 1
def close(self):
self.db.close()
| [
"os.path.exists",
"numpy.ones",
"h5py.File",
"numpy.array",
"numpy.zeros",
"keras.utils.np_utils.to_categorical",
"h5py.special_dtype",
"numpy.arange"
] | [((198, 224), 'os.path.exists', 'os.path.exists', (['outputPath'], {}), '(outputPath)\n', (212, 224), False, 'import os\n'), ((442, 468), 'h5py.File', 'h5py.File', (['outputPath', '"""w"""'], {}), "(outputPath, 'w')\n", (451, 468), False, 'import h5py\n'), ((1155, 1183), 'h5py.special_dtype', 'h5py.special_dtype', ([], {'vlen': 'str'}), '(vlen=str)\n', (1173, 1183), False, 'import h5py\n'), ((1726, 1743), 'h5py.File', 'h5py.File', (['dbPath'], {}), '(dbPath)\n', (1735, 1743), False, 'import h5py\n'), ((1875, 1919), 'numpy.arange', 'np.arange', (['(0)', 'self.numImages', 'self.batchSize'], {}), '(0, self.numImages, self.batchSize)\n', (1884, 1919), True, 'import numpy as np\n'), ((2679, 2708), 'numpy.zeros', 'np.zeros', (['(self.batchSize, 1)'], {}), '((self.batchSize, 1))\n', (2687, 2708), True, 'import numpy as np\n'), ((2165, 2210), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['labels', 'self.classes'], {}), '(labels, self.classes)\n', (2188, 2210), False, 'from keras.utils import np_utils\n'), ((2438, 2458), 'numpy.array', 'np.array', (['procImages'], {}), '(procImages)\n', (2446, 2458), True, 'import numpy as np\n'), ((2607, 2635), 'numpy.ones', 'np.ones', (['(self.batchSize, 1)'], {}), '((self.batchSize, 1))\n', (2614, 2635), True, 'import numpy as np\n'), ((2944, 2970), 'numpy.zeros', 'np.zeros', (['[self.batchSize]'], {}), '([self.batchSize])\n', (2952, 2970), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.