code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
from __future__ import annotations
from typing import List, Union, Optional, TYPE_CHECKING
import numpy as np
from pyNastran.bdf.mesh_utils.internal_utils import get_bdf_model
if TYPE_CHECKING:
from pyNastran.bdf.bdf import BDF
def find_coplanar_triangles(bdf_filename: Union[BDF, str],
eids: Optional[List[int]]=None) -> List[int]:
"""
Finds coplanar triangles
Parameters
----------
bdf_filename : BDF/str
BDF: a model
str: the path to the bdf input file
eids : list
the element ids to consider
Returns
-------
coplanar_eids : List[int]
the elements that are coplanar
"""
model = get_bdf_model(bdf_filename, xref=False, log=None, debug=False)
log = model.log
if eids is None:
eids = model.elements.keys()
i = 0
eids_removed = []
neids = len(eids)
nids = np.zeros((neids, 3), dtype='int32')
for eid in eids:
elem = model.elements[eid]
try:
nids[i, :] = elem.nodes
except ValueError:
eids_removed.append(eid)
assert len(elem.nodes) != 3, str(elem)
continue
i += 1
if i != neids:
log.warning(f'removed {neids-i} non-triangles; eids_removed={eids_removed}')
nids = nids[:i, :]
#nids = np.array([
#[10, 20, 30],
#[20, 30, 10],
#[10, 30, 20],
#], dtype='int32')
# [1, 2, 3]
# [2, 3, 1]
# [1, 3, 2]
#imin = nids.argmin(axis=1)
#imax = nids.argmax(axis=1)
imin = nids.min(axis=1)
imax = nids.max(axis=1)
#print('imin = %s' % (imin)) # [0, 2, 0]
#print('imax = %s' % (imax)) # [2, 1, 1]
imid = []
for row, imini, imaxi in zip(nids, imin, imax):
#a = [imini, imaxi]
#print(row, imini, imaxi)
a = list(row)
#a.remove(row[imini])
#a.remove(row[imaxi])
#print(a)
a.remove(imini)
#print(a)
a.remove(imaxi)
#print(a)
#print('')
imid.append(a[0])
#print('imid = %s' % (imid)) # [1, 0, 2]
nids2 = np.vstack([imin, imid, imax]).T
aset = set()
eids_to_remove = set()
for eid, row in zip(eids, nids2):
new_row = tuple(list(row))
if new_row in aset:
log.debug(f'eid={eid} exists already...')
eids_to_remove.add(eid)
else:
aset.add(new_row)
return model, eids_to_remove
| [
"pyNastran.bdf.mesh_utils.internal_utils.get_bdf_model",
"numpy.zeros",
"numpy.vstack"
] | [((695, 757), 'pyNastran.bdf.mesh_utils.internal_utils.get_bdf_model', 'get_bdf_model', (['bdf_filename'], {'xref': '(False)', 'log': 'None', 'debug': '(False)'}), '(bdf_filename, xref=False, log=None, debug=False)\n', (708, 757), False, 'from pyNastran.bdf.mesh_utils.internal_utils import get_bdf_model\n'), ((903, 938), 'numpy.zeros', 'np.zeros', (['(neids, 3)'], {'dtype': '"""int32"""'}), "((neids, 3), dtype='int32')\n", (911, 938), True, 'import numpy as np\n'), ((2125, 2154), 'numpy.vstack', 'np.vstack', (['[imin, imid, imax]'], {}), '([imin, imid, imax])\n', (2134, 2154), True, 'import numpy as np\n')] |
import openpnm as op
import numpy as np
import openpnm.models as mods
import pytest
from testfixtures import LogCapture
class ModelsTest:
def setup_class(self):
self.net = op.network.Cubic(shape=[3, 3, 3])
self.geo = op.geometry.StickAndBall(network=self.net,
pores=self.net.Ps,
throats=self.net.Ts)
def teardown_class(self):
ws = op.Workspace()
ws.clear()
def test_models_dict_print(self):
net = op.network.Cubic(shape=[3, 3, 3])
geo = op.geometry.StickAndBall(network=net, pores=net.Ps,
throats=net.Ts)
s = geo.models.__str__().split('\n')
assert len(s) == 70
assert s.count('―'*85) == 15
def test_regenerate_models(self):
a = len(self.geo.props())
assert a == 16
self.geo.clear(mode='props')
a = len(self.geo.props())
assert a == 0
self.geo.regenerate_models()
a = len(self.geo.props())
assert a == 16
def test_dependency_graph(self):
phase = op.phases.GenericPhase(network=self.net)
phase.add_model(propname="pore.foo", model=op.models.misc.constant, value=1.0)
phys = op.physics.GenericPhysics(network=self.net,
phase=phase,
geometry=self.geo)
phys.add_model(propname="pore.baz", model=op.models.misc.constant, value=0.0)
def mymodel(target, foo="pore.foo", baz="pore.baz"):
return 0.0
phys.add_model(propname="pore.bar_depends_on_foo_and_baz", model=mymodel)
dg = phys.models.dependency_graph()
assert ["pore.baz", "pore.bar_depends_on_foo_and_baz"] in dg.edges()
assert ["pore.foo", "pore.bar_depends_on_foo_and_baz"] not in dg.edges
dg = phys.models.dependency_graph(deep=True)
assert ["pore.baz", "pore.bar_depends_on_foo_and_baz"] in dg.edges
assert ["pore.foo", "pore.bar_depends_on_foo_and_baz"] in dg.edges
def test_dependency_list(self):
prj = self.net.project
prj.purge_object(self.geo)
geom = op.geometry.GenericGeometry(network=self.net,
pores=self.net.Ps)
geom.add_model(propname='pore.volume',
model=mods.geometry.pore_volume.sphere,
pore_diameter='pore.diameter',
regen_mode='deferred')
geom.add_model(propname='pore.diameter',
model=mods.misc.product,
prop1='pore.max_size',
prop2='pore.seed',
regen_mode='deferred')
geom.add_model(propname='pore.area',
model=mods.geometry.pore_area.sphere,
pore_diameter='pore.diameter',
regen_mode='deferred')
geom.add_model(propname='pore.seed',
model=mods.misc.random,
element='pore',
num_range=[0, 0.1],
seed=None)
tree = np.asarray(geom.models.dependency_list())
pos_v = np.argwhere(tree == 'pore.volume').flatten()[0]
pos_d = np.argwhere(tree == 'pore.diameter').flatten()[0]
pos_a = np.argwhere(tree == 'pore.area').flatten()[0]
pos_s = np.argwhere(tree == 'pore.seed').flatten()[0]
assert pos_v > pos_d
assert pos_d > pos_s
assert pos_a > pos_d
self.geo = geom
def test_dependency_list_circular(self):
pn = self.net
def chicken(target, prop='pore.egg'):
return np.ones(target.Np)
def egg(target, prop='pore.chicken'):
return np.ones(target.Np)
pn.add_model(propname='pore.chicken', model=chicken)
pn.add_model(propname='pore.egg', model=egg)
with pytest.raises(Exception):
pn.models.dependency_list()
pn.remove_model('pore.chicken')
pn.remove_model('pore.egg')
def test_dependency_list_tri_circular(self):
pn = self.net
def rock(target, prop='pore.scissors'):
return np.ones(target.Np)
def scissors(target, prop='pore.paper'):
return np.ones(target.Np)
def paper(target, prop='pore.rock'):
return np.ones(target.Np)
pn.add_model(propname='pore.paper', model=paper)
pn.add_model(propname='pore.scissors', model=scissors)
pn.add_model(propname='pore.rock', model=rock)
with pytest.raises(Exception):
pn.models.dependency_list()
def test_regenerate_models_on_phase_with_deep(self):
pn = op.network.Cubic(shape=[5, 5, 5])
geo = op.geometry.StickAndBall(network=pn, pores=pn.Ps, throats=pn.Ts)
phase = op.phases.Water(network=pn)
phys = op.physics.Standard(network=pn, phase=phase, geometry=geo)
phase.clear(mode='model_data')
phys.clear()
assert len(phys) == 2 # Only pore and throat.all remain
phase.regenerate_models(propnames=None, deep=False)
assert len(phys) == 2 # Still only pore and throat.all
phase.regenerate_models(propnames=None, deep=True)
assert len(phys) > 2 # Phys models are regenerated by phase regen
def test_regenerate_models_on_physics_with_deep(self):
pn = op.network.Cubic(shape=[5, 5, 5])
geo = op.geometry.StickAndBall(network=pn, pores=pn.Ps, throats=pn.Ts)
phase = op.phases.Water(network=pn)
phys = op.physics.Standard(network=pn, phase=phase, geometry=geo)
len_phase = 23
phase.clear(mode='model_data')
phys.clear()
ws = op.Workspace()
loglevel = ws.settings["loglevel"]
ws.settings["loglevel"] = 50
assert len(phys) == 2
assert len(phase) == 14
phys.regenerate_models(propnames=None, deep=False)
assert len(phys) == 14
# Note that 2 new models were added to the phase during interpolation
assert len(phase) < len_phase
phase.clear(mode='model_data')
assert len(phase) == 14
phys.regenerate_models(propnames=None, deep=True)
assert len(phase) < len_phase
ws.settings["loglevel"] = loglevel
def test_regenerate_models_on_network_with_deep(self):
pn = op.network.Cubic(shape=[5, 5, 5])
geo = op.geometry.StickAndBall(network=pn, pores=pn.Ps, throats=pn.Ts)
a = len(pn.props())
pn.clear()
pn.regenerate_models()
assert len(pn.props()) == a # pn has NO models
b = len(geo.props())
geo.clear(mode='model_data')
pn.regenerate_models(deep=False)
assert len(geo.props()) == 0
pn.regenerate_models(deep=True)
assert len(geo.props()) == b
def test_regen_mode_default_value(self):
pn = op.network.Cubic(shape=[3, 3, 3], spacing=1e-4)
geo = op.geometry.StickAndBall(network=pn, pores=pn.Ps, throats=pn.Ts,
settings={'regen_mode': 'deferred'})
assert len(geo.props()) == 0
geo.regenerate_models()
assert len(geo.props()) == 16
def test_automatic_running_on_models_when_missing_data(self):
pn = op.network.Cubic(shape=[3, 3, 3], spacing=1e-4)
geo = op.geometry.StickAndBall(network=pn, pores=pn.Ps, throats=pn.Ts,
settings={'regen_mode': 'deferred'})
assert len(geo) == 2
_ = geo['pore.seed']
assert len(geo) == 3
if __name__ == '__main__':
t = ModelsTest()
self = t
t.setup_class()
for item in t.__dir__():
if item.startswith('test'):
print('running test: '+item)
t.__getattribute__(item)()
| [
"openpnm.Workspace",
"openpnm.geometry.GenericGeometry",
"openpnm.phases.Water",
"numpy.ones",
"openpnm.physics.Standard",
"openpnm.network.Cubic",
"openpnm.geometry.StickAndBall",
"numpy.argwhere",
"pytest.raises",
"openpnm.phases.GenericPhase",
"openpnm.physics.GenericPhysics"
] | [((187, 220), 'openpnm.network.Cubic', 'op.network.Cubic', ([], {'shape': '[3, 3, 3]'}), '(shape=[3, 3, 3])\n', (203, 220), True, 'import openpnm as op\n'), ((240, 327), 'openpnm.geometry.StickAndBall', 'op.geometry.StickAndBall', ([], {'network': 'self.net', 'pores': 'self.net.Ps', 'throats': 'self.net.Ts'}), '(network=self.net, pores=self.net.Ps, throats=self.\n net.Ts)\n', (264, 327), True, 'import openpnm as op\n'), ((455, 469), 'openpnm.Workspace', 'op.Workspace', ([], {}), '()\n', (467, 469), True, 'import openpnm as op\n'), ((542, 575), 'openpnm.network.Cubic', 'op.network.Cubic', ([], {'shape': '[3, 3, 3]'}), '(shape=[3, 3, 3])\n', (558, 575), True, 'import openpnm as op\n'), ((590, 657), 'openpnm.geometry.StickAndBall', 'op.geometry.StickAndBall', ([], {'network': 'net', 'pores': 'net.Ps', 'throats': 'net.Ts'}), '(network=net, pores=net.Ps, throats=net.Ts)\n', (614, 657), True, 'import openpnm as op\n'), ((1144, 1184), 'openpnm.phases.GenericPhase', 'op.phases.GenericPhase', ([], {'network': 'self.net'}), '(network=self.net)\n', (1166, 1184), True, 'import openpnm as op\n'), ((1287, 1362), 'openpnm.physics.GenericPhysics', 'op.physics.GenericPhysics', ([], {'network': 'self.net', 'phase': 'phase', 'geometry': 'self.geo'}), '(network=self.net, phase=phase, geometry=self.geo)\n', (1312, 1362), True, 'import openpnm as op\n'), ((2219, 2283), 'openpnm.geometry.GenericGeometry', 'op.geometry.GenericGeometry', ([], {'network': 'self.net', 'pores': 'self.net.Ps'}), '(network=self.net, pores=self.net.Ps)\n', (2246, 2283), True, 'import openpnm as op\n'), ((4776, 4809), 'openpnm.network.Cubic', 'op.network.Cubic', ([], {'shape': '[5, 5, 5]'}), '(shape=[5, 5, 5])\n', (4792, 4809), True, 'import openpnm as op\n'), ((4824, 4888), 'openpnm.geometry.StickAndBall', 'op.geometry.StickAndBall', ([], {'network': 'pn', 'pores': 'pn.Ps', 'throats': 'pn.Ts'}), '(network=pn, pores=pn.Ps, throats=pn.Ts)\n', (4848, 4888), True, 'import openpnm as op\n'), ((4905, 4932), 'openpnm.phases.Water', 'op.phases.Water', ([], {'network': 'pn'}), '(network=pn)\n', (4920, 4932), True, 'import openpnm as op\n'), ((4948, 5006), 'openpnm.physics.Standard', 'op.physics.Standard', ([], {'network': 'pn', 'phase': 'phase', 'geometry': 'geo'}), '(network=pn, phase=phase, geometry=geo)\n', (4967, 5006), True, 'import openpnm as op\n'), ((5463, 5496), 'openpnm.network.Cubic', 'op.network.Cubic', ([], {'shape': '[5, 5, 5]'}), '(shape=[5, 5, 5])\n', (5479, 5496), True, 'import openpnm as op\n'), ((5511, 5575), 'openpnm.geometry.StickAndBall', 'op.geometry.StickAndBall', ([], {'network': 'pn', 'pores': 'pn.Ps', 'throats': 'pn.Ts'}), '(network=pn, pores=pn.Ps, throats=pn.Ts)\n', (5535, 5575), True, 'import openpnm as op\n'), ((5592, 5619), 'openpnm.phases.Water', 'op.phases.Water', ([], {'network': 'pn'}), '(network=pn)\n', (5607, 5619), True, 'import openpnm as op\n'), ((5635, 5693), 'openpnm.physics.Standard', 'op.physics.Standard', ([], {'network': 'pn', 'phase': 'phase', 'geometry': 'geo'}), '(network=pn, phase=phase, geometry=geo)\n', (5654, 5693), True, 'import openpnm as op\n'), ((5790, 5804), 'openpnm.Workspace', 'op.Workspace', ([], {}), '()\n', (5802, 5804), True, 'import openpnm as op\n'), ((6436, 6469), 'openpnm.network.Cubic', 'op.network.Cubic', ([], {'shape': '[5, 5, 5]'}), '(shape=[5, 5, 5])\n', (6452, 6469), True, 'import openpnm as op\n'), ((6484, 6548), 'openpnm.geometry.StickAndBall', 'op.geometry.StickAndBall', ([], {'network': 'pn', 'pores': 'pn.Ps', 'throats': 'pn.Ts'}), '(network=pn, pores=pn.Ps, throats=pn.Ts)\n', (6508, 6548), True, 'import openpnm as op\n'), ((6963, 7012), 'openpnm.network.Cubic', 'op.network.Cubic', ([], {'shape': '[3, 3, 3]', 'spacing': '(0.0001)'}), '(shape=[3, 3, 3], spacing=0.0001)\n', (6979, 7012), True, 'import openpnm as op\n'), ((7025, 7131), 'openpnm.geometry.StickAndBall', 'op.geometry.StickAndBall', ([], {'network': 'pn', 'pores': 'pn.Ps', 'throats': 'pn.Ts', 'settings': "{'regen_mode': 'deferred'}"}), "(network=pn, pores=pn.Ps, throats=pn.Ts, settings={\n 'regen_mode': 'deferred'})\n", (7049, 7131), True, 'import openpnm as op\n'), ((7354, 7403), 'openpnm.network.Cubic', 'op.network.Cubic', ([], {'shape': '[3, 3, 3]', 'spacing': '(0.0001)'}), '(shape=[3, 3, 3], spacing=0.0001)\n', (7370, 7403), True, 'import openpnm as op\n'), ((7416, 7522), 'openpnm.geometry.StickAndBall', 'op.geometry.StickAndBall', ([], {'network': 'pn', 'pores': 'pn.Ps', 'throats': 'pn.Ts', 'settings': "{'regen_mode': 'deferred'}"}), "(network=pn, pores=pn.Ps, throats=pn.Ts, settings={\n 'regen_mode': 'deferred'})\n", (7440, 7522), True, 'import openpnm as op\n'), ((3743, 3761), 'numpy.ones', 'np.ones', (['target.Np'], {}), '(target.Np)\n', (3750, 3761), True, 'import numpy as np\n'), ((3828, 3846), 'numpy.ones', 'np.ones', (['target.Np'], {}), '(target.Np)\n', (3835, 3846), True, 'import numpy as np\n'), ((3976, 4000), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (3989, 4000), False, 'import pytest\n'), ((4258, 4276), 'numpy.ones', 'np.ones', (['target.Np'], {}), '(target.Np)\n', (4265, 4276), True, 'import numpy as np\n'), ((4346, 4364), 'numpy.ones', 'np.ones', (['target.Np'], {}), '(target.Np)\n', (4353, 4364), True, 'import numpy as np\n'), ((4430, 4448), 'numpy.ones', 'np.ones', (['target.Np'], {}), '(target.Np)\n', (4437, 4448), True, 'import numpy as np\n'), ((4639, 4663), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (4652, 4663), False, 'import pytest\n'), ((3260, 3294), 'numpy.argwhere', 'np.argwhere', (["(tree == 'pore.volume')"], {}), "(tree == 'pore.volume')\n", (3271, 3294), True, 'import numpy as np\n'), ((3324, 3360), 'numpy.argwhere', 'np.argwhere', (["(tree == 'pore.diameter')"], {}), "(tree == 'pore.diameter')\n", (3335, 3360), True, 'import numpy as np\n'), ((3390, 3422), 'numpy.argwhere', 'np.argwhere', (["(tree == 'pore.area')"], {}), "(tree == 'pore.area')\n", (3401, 3422), True, 'import numpy as np\n'), ((3452, 3484), 'numpy.argwhere', 'np.argwhere', (["(tree == 'pore.seed')"], {}), "(tree == 'pore.seed')\n", (3463, 3484), True, 'import numpy as np\n')] |
# Copyright (c) Microsoft Corporation
# Licensed under the MIT License.
import numpy as np
import pandas as pd
from scipy.sparse import issparse
from sklearn.utils import check_consistent_length
from typing import Dict, List
_DF_COLUMN_BAD_NAME = "DataFrame column names must be strings."\
" Name '{0}' is of type {1}"
_LIST_NONSCALAR = "Lists must be of scalar types"
_TOO_MANY_DIMS = "Array must have at most two dimensions"
def _convert_to_list(array):
if issparse(array):
if array.shape[1] > 1000:
raise ValueError("Exceeds maximum number of features for "
"visualization (1000)")
return array.toarray().tolist()
if isinstance(array, pd.DataFrame):
return array.values.tolist()
if isinstance(array, pd.Series):
return array.values.tolist()
if isinstance(array, np.ndarray):
return array.tolist()
if isinstance(array, pd.Index):
return array.tolist()
return array
def _convert_to_string_list_dict(
base_name_format: str,
ys,
sample_array) -> Dict[str, List]:
"""Convert the given input to a string-list dictionary.
This function is used to convert arrays in a variety of types
into a dictionary mapping column names to regular Python lists
(in preparation for JSON serialisation). It is a modification
of the feature processing code in :class:`fairlearn.metrics.MetricFrame`.
The array to be converted is passed in :code:`ys`, and a variety
of types are supported. The :code:`sample_array` argument is
used in a call to :func:`sklearn.utils.check_consistent_length`
to ensure that the resultant lists are of the right length.
Finally `base_name_format` is used to generate sequential
keys for the dictionary if none are in the supplied :code:`ys`.
It must be of the form :code:`'Base String {0}'`, with the
:code:`{0}` being replaced by a sequential integer.
It is not possible to list out all the possible underlying types
for :code:`ys`. A brief summary:
- :class:`pd.Series`
- :class:`pd.DataFrame`
- A simple Python list
- A Python dictionary with string keys and values which are
convertible to lists
- Anything convertible to a :class:`np.ndarray`
"""
result = {}
if isinstance(ys, pd.Series):
check_consistent_length(ys, sample_array)
if ys.name is not None:
result[ys.name] = _convert_to_list(ys)
else:
result[base_name_format.format(0)] = _convert_to_list(ys)
elif isinstance(ys, pd.DataFrame):
for i in range(len(ys.columns)):
col_name = ys.columns[i]
if not isinstance(col_name, str):
msg = _DF_COLUMN_BAD_NAME.format(col_name, type(col_name))
raise ValueError(msg)
column = ys.iloc[:, i]
check_consistent_length(column, sample_array)
result[col_name] = _convert_to_list(column)
elif isinstance(ys, list):
if np.isscalar(ys[0]):
f_arr = np.atleast_1d(np.squeeze(np.asarray(ys)))
assert len(f_arr.shape) == 1 # Sanity check
check_consistent_length(f_arr, sample_array)
result[base_name_format.format(0)] = _convert_to_list(f_arr)
else:
raise ValueError(_LIST_NONSCALAR)
elif isinstance(ys, dict):
for k, v in ys.items():
result[k] = _convert_to_list(v)
else:
# Assume it's something which can go into np.as_array
f_arr = np.squeeze(np.asarray(ys, dtype=np.object))
if len(f_arr.shape) == 1:
check_consistent_length(f_arr, sample_array)
result[base_name_format.format(0)] = _convert_to_list(f_arr)
elif len(f_arr.shape) == 2:
# Work similarly to pd.DataFrame(data=ndarray)
for i in range(f_arr.shape[1]):
col = f_arr[:, i]
check_consistent_length(col, sample_array)
result[base_name_format.format(i)] = _convert_to_list(col)
else:
raise ValueError(_TOO_MANY_DIMS)
return result
| [
"numpy.asarray",
"sklearn.utils.check_consistent_length",
"numpy.isscalar",
"scipy.sparse.issparse"
] | [((472, 487), 'scipy.sparse.issparse', 'issparse', (['array'], {}), '(array)\n', (480, 487), False, 'from scipy.sparse import issparse\n'), ((2385, 2426), 'sklearn.utils.check_consistent_length', 'check_consistent_length', (['ys', 'sample_array'], {}), '(ys, sample_array)\n', (2408, 2426), False, 'from sklearn.utils import check_consistent_length\n'), ((2917, 2962), 'sklearn.utils.check_consistent_length', 'check_consistent_length', (['column', 'sample_array'], {}), '(column, sample_array)\n', (2940, 2962), False, 'from sklearn.utils import check_consistent_length\n'), ((3061, 3079), 'numpy.isscalar', 'np.isscalar', (['ys[0]'], {}), '(ys[0])\n', (3072, 3079), True, 'import numpy as np\n'), ((3212, 3256), 'sklearn.utils.check_consistent_length', 'check_consistent_length', (['f_arr', 'sample_array'], {}), '(f_arr, sample_array)\n', (3235, 3256), False, 'from sklearn.utils import check_consistent_length\n'), ((3596, 3627), 'numpy.asarray', 'np.asarray', (['ys'], {'dtype': 'np.object'}), '(ys, dtype=np.object)\n', (3606, 3627), True, 'import numpy as np\n'), ((3675, 3719), 'sklearn.utils.check_consistent_length', 'check_consistent_length', (['f_arr', 'sample_array'], {}), '(f_arr, sample_array)\n', (3698, 3719), False, 'from sklearn.utils import check_consistent_length\n'), ((3126, 3140), 'numpy.asarray', 'np.asarray', (['ys'], {}), '(ys)\n', (3136, 3140), True, 'import numpy as np\n'), ((3982, 4024), 'sklearn.utils.check_consistent_length', 'check_consistent_length', (['col', 'sample_array'], {}), '(col, sample_array)\n', (4005, 4024), False, 'from sklearn.utils import check_consistent_length\n')] |
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Simulated Annealing class for global optimization.
Created Feb,20,2020
@author: <NAME>
References
----------
.. [1] <NAME>.; <NAME>.; <NAME>. (1983).
``Optimization by Simulated Annealing". Science. 220 (4598): 671–680.
.. [2] <NAME> and <NAME>, ``Simulated Annealing: Theory
and Applications", Kluwer Academic Publishers, 1987.
.. [3] W.H. Press et al., ``Numerical Recipies: The Art of Scientific Computing",
Cambridge U. Press, 1987.
.. [4] Tsallis C. ``Possible generalization of Boltzmann-Gibbs
statistics". Journal of Statistical Physics, 52, 479-487 (1998).
.. [5] Tsallis C, Stariolo DA. ``Generalized Simulated Annealing."
Physica A, 233, 395-406 (1996).
.. [6] <NAME>, <NAME>, <NAME>, <NAME>. ``Generalized Simulated
Annealing Algorithm and Its Application to the Thomson Model."
Physics Letters A, 233, 216-220 (1997).
.. [7] <NAME>, <NAME>. ``Efficiency of Generalized Simulated
Annealing". Physical Review E, 62, 4473 (2000).
.. [8] <NAME>, <NAME>, <NAME>, <NAME>. ``Generalized
Simulated Annealing for Efficient Global Optimization: the GenSA
Package for R". The R Journal, Volume 5/1 (2013).
.. [9] <NAME>. ``Continuous Global Optimization in R". Journal of
Statistical Software, 60(6), 1 - 45, (2014). DOI:10.18637/jss.v060.i06
.. [10] <NAME> and <NAME> and <NAME>,
"Simulated annealing: A proof of convergence",
IEEE Transactions on Pattern Analysis and Machine Intelligence, 1994.
.. [11] <NAME>, "Simulated Annealing: Practice versus theory",
Math. Comput. Modelling, 1993.
.. [12] <NAME>, "Optimization by simulated annealing: Quantitative studies",
Journal of Statistical Physics, 1983.
.. [13] <NAME> and <NAME>, "Global wiring by simulated annealing",
IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems, 1983.
"""
#External Modules------------------------------------------------------------------------------------
import numpy as np
from collections import deque, defaultdict
#External Modules End--------------------------------------------------------------------------------
#Internal Modules------------------------------------------------------------------------------------
from utils import mathUtils, randomUtils, InputData, InputTypes
from .RavenSampled import RavenSampled
from .stepManipulators import NoConstraintResolutionFound
#Internal Modules End--------------------------------------------------------------------------------
class SimulatedAnnealing(RavenSampled):
"""
This class performs simulated annealing optimization utilizing several cooling scheduling methods.
Cooling Schedule includes Boltzmann, Exponential, Cauchy, and VeryFast cooling.
The Simulated Annealing optimizer is a metaheuristic approach to perform a global
search in large design spaces. The methodology rose from statistical physics
and was inspitred by metallurgy where it was found that fast cooling might lead
to smaller and defected crystals, and that reheating and slowly controling cooling
will lead to better states. This allows climbing to avoid being stuck in local minima
and hence facilitates finding the global minima for non-convex probloems.
"""
convergenceOptions = {'objective': r""" provides the desired value for the convergence criterion of the objective function
($\epsilon^{obj}$), i.e., convergence is reached when: $$ |newObjevtive - oldObjective| \le \epsilon^{obj}$$.
\default{1e-6}, if no criteria specified""",
'temperature': r""" provides the desired value for the convergence creiteron of the system temperature,
($\epsilon^{temp}$), i.e., convergence is reached when: $$T \le \epsilon^{temp}$$.
\default{1e-10}, if no criteria specified"""}
coolingOptions = {#'linear': {'beta':r"""slope"""},
'exponential':{'alpha':r"""slowing down constant, should be between 0,1 and preferable very close to 1. \default{0.94}"""},
#'fast':{'c':r"""decay constant, \default{1.0}"""},
'veryfast':{'c':r"""decay constant, \default{1.0}"""},
'cauchy':{'d':r"""bias, \default{1.0}"""},
'boltzmann':{'d':r"""bias, \default{1.0}"""}}
##########################
# Initialization Methods #
##########################
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for class cls.
@ In, cls, the class for which we are retrieving the specification
@ Out, specs, InputData.ParameterInput, class to use for specifying input of cls.
"""
specs = super(SimulatedAnnealing, cls).getInputSpecification()
specs.description = r"""The \xmlNode{SimulatedAnnealing} optimizer is a metaheuristic approach
to perform a global search in large design spaces. The methodology rose
from statistical physics and was inspired by metallurgy where
it was found that fast cooling might lead to smaller and defected crystals,
and that reheating and slowly controlling cooling will lead to better states.
This allows climbing to avoid being stuck in local minima and hence facilitates
finding the global minima for non-convex problems.
More information can be found in: <NAME>.; <NAME>.; <NAME>. (1983).
``Optimization by Simulated Annealing". Science. 220 (4598): 671–680."""
# convergence
conv = InputData.parameterInputFactory('convergence', strictMode=True,
printPriority=108,
descr=r"""a node containing the desired convergence criteria for the optimization algorithm.
Note that convergence is met when any one of the convergence criteria is met. If no convergence
criteria are given, then the defaults are used.""")
specs.addSub(conv)
for name,descr in cls.convergenceOptions.items():
conv.addSub(InputData.parameterInputFactory(name, contentType=InputTypes.FloatType,descr=descr,printPriority=108 ))
# Presistance
conv.addSub(InputData.parameterInputFactory('persistence', contentType=InputTypes.IntegerType,
printPriority = 109,
descr=r"""provides the number of consecutive times convergence should be reached before a trajectory
is considered fully converged. This helps in preventing early false convergence."""))
# Cooling Schedule
coolingSchedule = InputData.parameterInputFactory('coolingSchedule',
printPriority=109,
descr=r""" The function governing the cooling process. Currently, user can select between,"""
# \xmlString{linear},
+r"""\xmlString{exponential},
\xmlString{cauchy},
\xmlString{boltzmann},"""
# \xmlString{fast},
+r"""or \xmlString{veryfast}.\\ \\"""
#In case of \xmlString{linear} is provided, The cooling process will be governed by: $$ T^{k} = T^0 - 0.1 * k$$
+r"""In case of \xmlString{exponential} is provided, The cooling process will be governed by: $$ T^{k} = T^0 * \alpha^k$$
In case of \xmlString{boltzmann} is provided, The cooling process will be governed by: $$ T^{k} = \frac{T^0}{log(k + d)}$$
In case of \xmlString{cauchy} is provided, The cooling process will be governed by: $$ T^{k} = \frac{T^0}{k + d}$$"""
#In case of \xmlString{fast} is provided, The cooling process will be governed by: $$ T^{k} = T^0 * \exp(-ck)$$
+r"""In case of \xmlString{veryfast} is provided, The cooling process will be governed by: $$ T^{k} = T^0 * \exp(-ck^{1/D}),$$
where $D$ is the dimentionality of the problem (i.e., number of optimized variables), $k$ is the number of the current iteration
$T^{0} = \max{(0.01,1-\frac{k}{\xmlNode{limit}})}$ is the initial temperature, and $T^{k}$ is the current temperature
according to the specified cooling schedule.
\default{exponential}.""")
specs.addSub(coolingSchedule)
for schedule,param in cls.coolingOptions.items(): # FIXME: right now this allows multiple cooling schedule, which should be fixed as soon as
# InputData can allow having list of subnodes
sch = InputData.parameterInputFactory(schedule, contentType = InputTypes.StringType,descr=schedule+' cooling schedule')
for par,descr in param.items():
sch.addSub(InputData.parameterInputFactory(par, contentType = InputTypes.FloatType,descr=descr))
coolingSchedule.addSub(sch)
return specs
@classmethod
def getSolutionExportVariableNames(cls):
"""
Compiles a list of acceptable SolutionExport variable options.
@ In, cls, the class for which we are retrieving the solution export
@ Out, ok, dict, {varName: description} for valid solution export variable names
"""
# cannot be determined before run-time due to variables and prefixes.
ok = super(SimulatedAnnealing, cls).getSolutionExportVariableNames()
new = {}
# new = {'': 'the size of step taken in the normalized input space to arrive at each optimal point'}
new['conv_{CONV}'] = 'status of each given convergence criteria'
# TODO need to include StepManipulators and GradientApproximators solution export entries as well!
# # -> but really should only include active ones, not all of them. This seems like it should work
# # when the InputData can scan forward to determine which entities are actually used.
new['amp_{VAR}'] = 'amplitude associated to each variable used to compute step size based on cooling method and the corresponding next neighbor'
new ['delta_{VAR}'] = 'step size associated to each variable'
new['Temp'] = 'temperature at current state'
new['fraction'] = 'current fraction of the max iteration limit'
ok.update(new)
return ok
def __init__(self):
"""
Constructor.
@ In, None
@ Out, None
"""
RavenSampled.__init__(self)
self._convergenceCriteria = defaultdict(mathUtils.giveZero) # names and values for convergence checks
self._acceptHistory = {} # acceptability
self._acceptRerun = {} # by traj, if True then override accept for point rerun
self._convergenceInfo = {} # by traj, the persistence and convergence information for most recent opt
self._requiredPersistence = 0 # consecutive persistence required to mark convergence
self.T = None # current temperature
self._coolingMethod = None # initializing cooling method
self._coolingParameters = {} # initializing the cooling schedule parameters
def handleInput(self, paramInput):
"""
Read input specs
@ In, paramInput, InputData.ParameterInput, parameter specs interpreted
@ Out, None
"""
RavenSampled.handleInput(self, paramInput)
# Convergence Criterion
convNode = paramInput.findFirst('convergence')
if convNode is not None:
for sub in convNode.subparts:
if sub.getName() == 'persistence':
self._requiredPersistence = sub.value
else:
self._convergenceCriteria[sub.name] = sub.value
if not self._convergenceCriteria:
self.raiseAWarning('No convergence criteria given; using defaults.')
self._convergenceCriteria['objective'] = 1e-6
self._convergenceCriteria['temperature'] = 1e-10
# same point is ALWAYS a criterion
self._convergenceCriteria['samePoint'] = -1 # For simulated Annealing samePoint convergence
# should not be one of the stopping criteria
# set persistence to 1 if not set
if self._requiredPersistence is None:
self.raiseADebug('No persistence given; setting to 1.')
self._requiredPersistence = 1
# Cooling Schedule
coolingNode = paramInput.findFirst('coolingSchedule')
if coolingNode is None:
self._coolingMethod = 'exponential'
else:
for sub in coolingNode.subparts:
self._coolingMethod = sub.name
for subSub in sub.subparts:
self._coolingParameters = {subSub.name:subSub.value}
#defaults
if not self._coolingMethod:
self._coolingMethod = 'exponential'
if not self._coolingParameters:
self._coolingParameters['alpha'] = 0.94
self._coolingParameters['beta'] = 0.1
self._coolingParameters['c'] = 1.0
self._coolingParameters['d'] = 1.0
def initialize(self, externalSeeding=None, solutionExport=None):
"""
This function should be called every time a clean optimizer is needed. Called before takeAstep in <Step>
@ In, externalSeeding, int, optional, external seed
@ In, solutionExport, DataObject, optional, a PointSet to hold the solution
@ Out, None
"""
RavenSampled.initialize(self, externalSeeding=externalSeeding, solutionExport=solutionExport)
self.info = {}
for var in self.toBeSampled:
self.info['amp_'+var] = None
self.info['delta_'+var] = None
# queue up the first run for each trajectory
for traj, init in enumerate(self._initialValues):
self._submitRun(init,traj,self.getIteration(traj))
def initializeTrajectory(self, traj=None):
"""
Handles the generation of a trajectory.
@ In, traj, int, optional, label to use
@ Out, traj, int, new trajectory number
"""
traj = RavenSampled.initializeTrajectory(self)
self._acceptHistory[traj] = deque(maxlen=self._maxHistLen)
self._acceptRerun[traj] = False
self._convergenceInfo[traj] = {'persistence': 0}
for criteria in self._convergenceCriteria:
self._convergenceInfo[traj][criteria] = False
return traj
def _submitRun(self, point, traj, step, moreInfo=None):
"""
Submits a single run with associated info to the submission queue
@ In, point, dict, point to submit
@ In, traj, int, trajectory identifier
@ In, step, int, iteration number identifier
@ In, moreInfo, dict, optional, additional run-identifying information to track
@ Out, None
"""
info = {}
if moreInfo is not None:
info.update(moreInfo)
info.update({'traj': traj,
'step': step
})
# NOTE: explicit constraints have been checked before this!
self.raiseADebug('Adding run to queue: {} | {}'.format(self.denormalizeData(point), info))
self._submissionQueue.append((point, info))
# END queuing Runs
# * * * * * * * * * * * * * * * *
###############
# Run Methods #
###############
def _useRealization(self, info, rlz):
"""
Used to feedback the collected runs into actionable items within the sampler.
@ In, info, dict, identifying information about the realization
@ In, rlz, dict, realized realization
@ In, optVal, float, value of objective variable (corrected for min/max)
@ Out, None
"""
traj = info['traj']
info['optVal'] = rlz[self._objectiveVar]
self.incrementIteration(traj)
self._resolveNewOptPoint(traj, rlz, rlz[self._objectiveVar], info)
if self._stepTracker[traj]['opt'] == None:
# revert to the last accepted point
rlz = self._optPointHistory[traj][-1][0]
info = self._optPointHistory[traj][-1][1]
info['step'] = self.getIteration(traj)
optVal = rlz[self._objectiveVar]
iter = int(self.getIteration(traj) +1) # Is that ok or should we always keep the traj in case I have multiple trajectories in parallel?
fraction = iter/self.limit
currentPoint = self._collectOptPoint(rlz)
T0 = self._temperature(fraction)
self.T = self._coolingSchedule(iter,T0)
if traj in self._activeTraj:
newPoint = self._nextNeighbour(rlz,fraction)
# check new opt point against constraints
try:
suggested, modded = self._handleExplicitConstraints(newPoint, currentPoint, 'opt')
except NoConstraintResolutionFound:
# we've tried everything, but we just can't hack it
self.raiseAMessage('Optimizer "{}" trajectory {} was unable to continue due to functional or boundary constraints.'
.format(self.name, traj))
self._closeTrajectory(traj, 'converge', 'no constraint resolution', newPoint[self._objectiveVar])
return
self._submitRun(suggested, traj, self.getIteration(traj))
# * * * * * * * * * * * * * * * *
# Convergence Checks
convFormat = RavenSampled.convFormat
# NOTE checkConvSamePoint has a different call than the others
def checkConvergence(self, traj, new, old):
"""
Check for trajectory convergence
@ In, traj, int, trajectory to consider
@ In, new, dict, new point
@ In, old, dict, old point
@ Out, any(convs.values()), bool, True of any of the convergence criteria was reached
@ Out, convs, dict, on the form convs[conv] = bool, where conv is in self._convergenceCriteria
"""
convs = {}
for conv in self._convergenceCriteria:
# special treatment for same point check
if conv == 'samePoint':
convs[conv] = self._checkConvSamePoint(new, old)
continue
# fix capitalization for RAVEN standards
fName = conv[:1].upper() + conv[1:]
# get function from lookup
f = getattr(self, '_checkConv{}'.format(fName))
# check convergence function
okay = f(traj)
# store and update
convs[conv] = okay
return any(convs.values()), convs
def _checkConvSamePoint(self, new, old):
"""
Checks for a repeated same point
@ In, new, dict, new opt point
@ In, old, dict, old opt point
@ Out, converged, bool, convergence state
"""
# TODO diff within tolerance? Exactly equivalent seems good for now
same = list(abs(new[var] - old[var])==self._convergenceCriteria['samePoint'] for var in self.toBeSampled)
converged = all(same)
self.raiseADebug(self.convFormat.format(name='same point',
conv=str(converged),
got=sum(same),
req=len(same)))
return converged
def _checkConvObjective(self, traj):
"""
Checks the change in objective for convergence
@ In, traj, int, trajectory identifier
@ Out, converged, bool, convergence state
"""
if len(self._optPointHistory[traj]) < 2 or (self._convergenceCriteria['objective'] < 0):
return False
o1, _ = self._optPointHistory[traj][-1]
o2, _ = self._optPointHistory[traj][-2]
delta = o2[self._objectiveVar]-o1[self._objectiveVar]
converged = abs(delta) < self._convergenceCriteria['objective']
self.raiseADebug(self.convFormat.format(name='objective',
conv=str(converged),
got=delta,
req=self._convergenceCriteria['objective']))
return converged
def _checkConvTemperature(self, traj):
"""
Checks temperature for the current state for convergence
@ In, traj, int, trajectory identifier
@ Out, converged, bool, convergence state
"""
converged = abs(self.T) <= self._convergenceCriteria['temperature']
self.raiseADebug(self.convFormat.format(name='temperature',
conv=str(converged),
got=self.T,
req=self._convergenceCriteria['temperature']))
return converged
def _checkForImprovement(self, new, old):
"""
Determine if the new value is sufficient improved over the old.
@ In, new, float, new optimization value
@ In, old, float, previous optimization value
@ Out, improved, bool, True if "sufficiently" improved or False if not.
"""
# This is not required for simulated annealing as it's handled in the probabilistic acceptance criteria
# But since it is an abstract method it has to exist
return True
def _checkAcceptability(self, traj, opt, optVal, info):
"""
Check if new opt point is acceptably better than the old one
@ In, traj, int, identifier
@ In, opt, dict, new opt point
@ In, optVal, float, new optimization value
@ In, info, dict, meta information about the opt point
@ Out, acceptable, str, acceptability condition for point
@ Out, old, dict, old opt point
@ Out, rejectReason, str, reject reason of opt point, or return None if accepted
"""
# Check acceptability
# NOTE: if self._optPointHistory[traj]: -> faster to use "try" for all but the first time
try:
old, _ = self._optPointHistory[traj][-1]
oldVal = old[self._objectiveVar]
# check if same point
self.raiseADebug(' ... change: {d: 1.3e} new objective: {n: 1.6e} old objective: {o: 1.6e}'
.format(d=opt[self._objectiveVar]-oldVal, o=oldVal, n=opt[self._objectiveVar]))
# if this is an opt point rerun, accept it without checking.
if self._acceptRerun[traj]:
acceptable = 'rerun'
self._acceptRerun[traj] = False
elif all(opt[var] == old[var] for var in self.toBeSampled):
# this is the classic "same point" trap; we accept the same point, and check convergence later
acceptable = 'accepted'
else:
if self._acceptabilityCriterion(oldVal,opt[self._objectiveVar])>randomUtils.random(dim=1, samples=1): # TODO replace it back
acceptable = 'accepted'
else:
acceptable = 'rejected'
except IndexError:
# if first sample, simply assume it's better!
acceptable = 'first'
old = None
self._acceptHistory[traj].append(acceptable)
self.raiseADebug(' ... {a}!'.format(a=acceptable))
return acceptable, old, 'None'
def _acceptabilityCriterion(self,currentObjective,newObjective):
"""
Check if new opt point is acceptably better than the old one
@ In, currentObjective, float, the current value of the objective function (i.e., current energy)
@ In, newObjective, float, the value of the objective function at the new candidate
@ Out, prob, float, the acceptance probability
"""
kB = 1
if newObjective <= currentObjective:
prob = 1
else:
deltaE = newObjective - currentObjective
prob = min(1,np.exp(-deltaE/(kB * self.T)))
return prob
def _updateConvergence(self, traj, new, old, acceptable):
"""
Updates convergence information for trajectory
@ In, traj, int, identifier
@ In, new, dict, new point
@ In, old, dict, old point
@ In, acceptable, str, condition of new point
@ Out, converged, bool, True if converged on ANY criteria
"""
## NOTE we have multiple "if acceptable" trees here, as we need to update soln export regardless
if acceptable == 'accepted':
self.raiseADebug('Convergence Check for Trajectory {}:'.format(traj))
# check convergence
converged, convDict = self.checkConvergence(traj, new, old)
else:
converged = False
convDict = dict((var, False) for var in self._convergenceInfo[traj])
self._convergenceInfo[traj].update(convDict)
return converged
def _updatePersistence(self, traj, converged, optVal):
"""
Update persistence tracking state variables
@ In, traj, identifier
@ In, converged, bool, convergence check result
@ In, optVal, float, new optimal value
@ Out, None
"""
# update persistence
if converged:
self._convergenceInfo[traj]['persistence'] += 1
self.raiseADebug('Trajectory {} has converged successfully {} time(s)!'.format(traj, self._convergenceInfo[traj]['persistence']))
if self._convergenceInfo[traj]['persistence'] >= self._requiredPersistence:
self._closeTrajectory(traj, 'converge', 'converged', optVal)
else:
self._convergenceInfo[traj]['persistence'] = 0
self.raiseADebug('Resetting convergence for trajectory {}.'.format(traj))
def _addToSolutionExport(self, traj, rlz, acceptable):
"""
Contributes additional entries to the solution export.
@ In, traj, int, trajectory which should be written
@ In, rlz, dict, collected point
@ In, acceptable, bool, acceptability of opt point
@ Out, toAdd, dict, additional entries
"""
# meta variables
toAdd = {'Temp': self.T,
'fraction': self.getIteration(traj)/self.limit
}
for var in self.toBeSampled:
toAdd['amp_'+var] = self.info['amp_'+var]
toAdd['delta_'+var] = self.info['delta_'+var]
for var, val in self.constants.items():
toAdd[var] = val
toAdd = dict((key, np.atleast_1d(val)) for key, val in toAdd.items())
for key, val in self._convergenceInfo[traj].items():
toAdd['conv_{}'.format(key)] = bool(val)
return toAdd
def _formatSolutionExportVariableNames(self, acceptable):
"""
Does magic formatting for variables, based on this class's needs.
Extend in inheritors as needed.
@ In, acceptable, set, set of acceptable entries for solution export for this entity
@ Out, new, set, modified set of acceptable variables with all formatting complete
"""
# remaking the list is easier than using the existing one
acceptable = RavenSampled._formatSolutionExportVariableNames(self, acceptable)
new = []
while acceptable:
template = acceptable.pop()
if '{CONV}' in template:
new.extend([template.format(CONV=conv) for conv in self._convergenceCriteria])
elif '{VAR}' in template:
new.extend([template.format(VAR=var) for var in self.toBeSampled.keys()])
else:
new.append(template)
return set(new)
def _rejectOptPoint(self, traj, info, old):
"""
Having rejected the suggested opt point, take actions so we can move forward
@ In, traj, int, identifier
@ In, info, dict, meta information about the opt point
@ In, old, dict, previous optimal point (to resubmit)
@ Out, none
"""
self._cancelAssociatedJobs(info['traj'], step=info['step'])
# initialize a new step
self._initializeStep(traj)
# END resolving potential opt points
# * * * * * * * * * * * * * * * *
def _applyFunctionalConstraints(self, suggested, previous):
"""
applies functional constraints of variables in "suggested" -> DENORMED point expected!
@ In, suggested, dict, potential point to apply constraints to
@ In, previous, dict, previous opt point in consideration
@ Out, point, dict, adjusted variables
@ Out, modded, bool, whether point was modified or not
"""
# assume no modifications until proved otherwise
modded = False
# are we violating functional constraints?
passFuncs = self._checkFunctionalConstraints(self.denormalizeData(suggested))
# while in violation of constraints ...
tries = 500
while not passFuncs:
modded = True
# try to find new acceptable point
denormed = self.denormalizeData(suggested)
suggested, _= self._fixFuncConstraintViolations(suggested)
denormed = self.denormalizeData(suggested)
self.raiseADebug(' ... suggested new opt {}'.format(denormed))
passFuncs = self._checkFunctionalConstraints(denormed)
tries -= 1
if tries == 0:
self.raiseAnError(NotImplementedError, 'No acceptable point findable! Now what?')
return suggested, modded
###########
# Utility Methods #
###########
def _temperature(self, fraction):
"""
A utility function to compute the initial temperature
currently it is just a function of how far in the process are we
@ In, fraction, float, the current iteration divided by the iteration limit i.e., $\frac{iter}{Limit}$
@ Out, _temperature, float, initial temperature, i.e., $T0 = max(0.01,1-fraction) $
"""
return max(0.01,1-fraction)
def _coolingSchedule(self, iter, T0):
"""
A utility function to compute the current cooled state temperature
based on the user-selected cooling schedule methodology
@ In, iter, int, the iteration number
@ In, T0, float, The previous temperature before cooling
@ Out, _coolingSchedule, float, the cooled state temperature i.e., $T^{k} = f(T^0, coolingSchedule);$ where k is the iteration number
"""
type = self._coolingMethod
if type in ['exponential','geometric']:
alpha = self._coolingParameters['alpha']
return alpha ** iter * T0
elif type == 'boltzmann':
d = self._coolingParameters['d']
return T0/(np.log10(iter + d))
elif type == 'veryfast':
c = self._coolingParameters['c']
return np.exp(-c*iter**(1/len(self.toBeSampled.keys()))) * T0
elif type == 'cauchy':
d = self._coolingParameters['d']
return T0/(iter + d)
else:
self.raiseAnError(NotImplementedError,'cooling schedule type not implemented.')
def _nextNeighbour(self, rlz,fraction=1):
r"""
Perturbs the state to find the next random neighbor based on the cooling schedule
@ In, rlz, dict, current realization
@ In, fraction, float, optional, the current iteration divided by the iteration limit i.e., $\frac{iter}{Limit}$
@ Out, nextNeighbour, dict, the next random state
for exponential cooling:
.. math::
fraction = \\frac{iter}{Limit}
amp = 1-fraction
delta = \\frac{-amp}{2} + amp * r
where :math: `r \sim \mathcal{U}(0,1)`
for boltzmann cooling:
.. math::
amp = min(\\sqrt(T), \\frac{1}{3*alpha}
delta = r * alpha * amp
where :math: `r \\sim \\mathcal{N}(0,1)`
for cauchy cooling:
.. math::
amp = r
delta = alpha * T * tan(amp)
where :math: `r \\sim \\mathcal{U}(-\\pi,\\pi)`
for veryfast cooling:
.. math::
amp = r
delta = \\sign(amp-0.5)*T*((1.0+\\frac{1.0}{T})^{\\abs{2*amp-1}-1.0}
where :math: `r \\sim \\mathcal{U}(0,1)`
"""
nextNeighbour = {}
D = len(self.toBeSampled.keys())
alpha = 0.94
if self._coolingMethod in ['exponential', 'geometric']:
amp = ((fraction)**-1) / 20
r = randomUtils.random(dim=D, samples=1)
delta = (-amp/2.)+ amp * r
elif self._coolingMethod == 'boltzmann':
amp = min(np.sqrt(self.T), 1/3.0/alpha)
delta = randomUtils.randomNormal(size=D)*alpha*amp
elif self._coolingMethod == 'veryfast':
amp = randomUtils.random(dim=D, samples=1)
delta = np.sign(amp-0.5)*self.T*((1+1.0/self.T)**abs(2*amp-1)-1.0)
elif self._coolingMethod == 'cauchy':
amp = (np.pi - (-np.pi))*randomUtils.random(dim=D, samples=1)-np.pi
delta = alpha*self.T*np.tan(amp)
for i,var in enumerate(self.toBeSampled.keys()):
nextNeighbour[var] = rlz[var] + delta[i]
self.info['amp_'+var] = amp
self.info['delta_'+var] = delta[i]
self.info['fraction'] = fraction
return nextNeighbour
def _fixFuncConstraintViolations(self,suggested):
"""
fixes functional constraints of variables in "suggested"
and finds the new point that does not violate the constraints
@ In, suggested, dict, potential point to apply constraints to
@ Out, point, dict, adjusted variables
@ Out, modded, bool, whether point was modified or not
"""
fraction = self.info['fraction']
new = self._nextNeighbour(suggested,fraction)
point, modded = self._handleExplicitConstraints(new, suggested, 'opt')
return point, modded
##############
# Destructor #
##############
def __del__(self):
"""
Destructor.
@ In, None
@ Out, None
"""
return
| [
"utils.InputData.parameterInputFactory",
"numpy.log10",
"collections.deque",
"numpy.sqrt",
"numpy.tan",
"utils.randomUtils.random",
"numpy.exp",
"collections.defaultdict",
"numpy.sign",
"utils.randomUtils.randomNormal",
"numpy.atleast_1d"
] | [((6417, 6781), 'utils.InputData.parameterInputFactory', 'InputData.parameterInputFactory', (['"""convergence"""'], {'strictMode': '(True)', 'printPriority': '(108)', 'descr': '"""a node containing the desired convergence criteria for the optimization algorithm.\n Note that convergence is met when any one of the convergence criteria is met. If no convergence\n criteria are given, then the defaults are used."""'}), '(\'convergence\', strictMode=True,\n printPriority=108, descr=\n """a node containing the desired convergence criteria for the optimization algorithm.\n Note that convergence is met when any one of the convergence criteria is met. If no convergence\n criteria are given, then the defaults are used."""\n )\n', (6448, 6781), False, 'from utils import mathUtils, randomUtils, InputData, InputTypes\n'), ((7387, 8671), 'utils.InputData.parameterInputFactory', 'InputData.parameterInputFactory', (['"""coolingSchedule"""'], {'printPriority': '(109)', 'descr': '(\n \' The function governing the cooling process. Currently, user can select between,\'\n +\n """\\\\xmlString{exponential},\n \\\\xmlString{cauchy},\n \\\\xmlString{boltzmann},"""\n + \'or \\\\xmlString{veryfast}.\\\\\\\\ \\\\\\\\\' +\n """In case of \\\\xmlString{exponential} is provided, The cooling process will be governed by: $$ T^{k} = T^0 * \\\\alpha^k$$\n In case of \\\\xmlString{boltzmann} is provided, The cooling process will be governed by: $$ T^{k} = \\\\frac{T^0}{log(k + d)}$$\n In case of \\\\xmlString{cauchy} is provided, The cooling process will be governed by: $$ T^{k} = \\\\frac{T^0}{k + d}$$"""\n +\n """In case of \\\\xmlString{veryfast} is provided, The cooling process will be governed by: $$ T^{k} = T^0 * \\\\exp(-ck^{1/D}),$$\n where $D$ is the dimentionality of the problem (i.e., number of optimized variables), $k$ is the number of the current iteration\n $T^{0} = \\\\max{(0.01,1-\\\\frac{k}{\\\\xmlNode{limit}})}$ is the initial temperature, and $T^{k}$ is the current temperature\n according to the specified cooling schedule.\n \\\\default{exponential}."""\n )'}), '(\'coolingSchedule\', printPriority=109, descr\n =\n \' The function governing the cooling process. Currently, user can select between,\'\n +\n """\\\\xmlString{exponential},\n \\\\xmlString{cauchy},\n \\\\xmlString{boltzmann},"""\n + \'or \\\\xmlString{veryfast}.\\\\\\\\ \\\\\\\\\' +\n """In case of \\\\xmlString{exponential} is provided, The cooling process will be governed by: $$ T^{k} = T^0 * \\\\alpha^k$$\n In case of \\\\xmlString{boltzmann} is provided, The cooling process will be governed by: $$ T^{k} = \\\\frac{T^0}{log(k + d)}$$\n In case of \\\\xmlString{cauchy} is provided, The cooling process will be governed by: $$ T^{k} = \\\\frac{T^0}{k + d}$$"""\n +\n """In case of \\\\xmlString{veryfast} is provided, The cooling process will be governed by: $$ T^{k} = T^0 * \\\\exp(-ck^{1/D}),$$\n where $D$ is the dimentionality of the problem (i.e., number of optimized variables), $k$ is the number of the current iteration\n $T^{0} = \\\\max{(0.01,1-\\\\frac{k}{\\\\xmlNode{limit}})}$ is the initial temperature, and $T^{k}$ is the current temperature\n according to the specified cooling schedule.\n \\\\default{exponential}."""\n )\n', (7418, 8671), False, 'from utils import mathUtils, randomUtils, InputData, InputTypes\n'), ((11115, 11146), 'collections.defaultdict', 'defaultdict', (['mathUtils.giveZero'], {}), '(mathUtils.giveZero)\n', (11126, 11146), False, 'from collections import deque, defaultdict\n'), ((14745, 14775), 'collections.deque', 'deque', ([], {'maxlen': 'self._maxHistLen'}), '(maxlen=self._maxHistLen)\n', (14750, 14775), False, 'from collections import deque, defaultdict\n'), ((7020, 7335), 'utils.InputData.parameterInputFactory', 'InputData.parameterInputFactory', (['"""persistence"""'], {'contentType': 'InputTypes.IntegerType', 'printPriority': '(109)', 'descr': '"""provides the number of consecutive times convergence should be reached before a trajectory\n is considered fully converged. This helps in preventing early false convergence."""'}), '(\'persistence\', contentType=InputTypes.\n IntegerType, printPriority=109, descr=\n """provides the number of consecutive times convergence should be reached before a trajectory\n is considered fully converged. This helps in preventing early false convergence."""\n )\n', (7051, 7335), False, 'from utils import mathUtils, randomUtils, InputData, InputTypes\n'), ((9345, 9463), 'utils.InputData.parameterInputFactory', 'InputData.parameterInputFactory', (['schedule'], {'contentType': 'InputTypes.StringType', 'descr': "(schedule + ' cooling schedule')"}), "(schedule, contentType=InputTypes.StringType,\n descr=schedule + ' cooling schedule')\n", (9376, 9463), False, 'from utils import mathUtils, randomUtils, InputData, InputTypes\n'), ((31578, 31614), 'utils.randomUtils.random', 'randomUtils.random', ([], {'dim': 'D', 'samples': '(1)'}), '(dim=D, samples=1)\n', (31596, 31614), False, 'from utils import mathUtils, randomUtils, InputData, InputTypes\n'), ((6880, 6987), 'utils.InputData.parameterInputFactory', 'InputData.parameterInputFactory', (['name'], {'contentType': 'InputTypes.FloatType', 'descr': 'descr', 'printPriority': '(108)'}), '(name, contentType=InputTypes.FloatType,\n descr=descr, printPriority=108)\n', (6911, 6987), False, 'from utils import mathUtils, randomUtils, InputData, InputTypes\n'), ((23669, 23700), 'numpy.exp', 'np.exp', (['(-deltaE / (kB * self.T))'], {}), '(-deltaE / (kB * self.T))\n', (23675, 23700), True, 'import numpy as np\n'), ((9516, 9603), 'utils.InputData.parameterInputFactory', 'InputData.parameterInputFactory', (['par'], {'contentType': 'InputTypes.FloatType', 'descr': 'descr'}), '(par, contentType=InputTypes.FloatType,\n descr=descr)\n', (9547, 9603), False, 'from utils import mathUtils, randomUtils, InputData, InputTypes\n'), ((26031, 26049), 'numpy.atleast_1d', 'np.atleast_1d', (['val'], {}), '(val)\n', (26044, 26049), True, 'import numpy as np\n'), ((29946, 29964), 'numpy.log10', 'np.log10', (['(iter + d)'], {}), '(iter + d)\n', (29954, 29964), True, 'import numpy as np\n'), ((31709, 31724), 'numpy.sqrt', 'np.sqrt', (['self.T'], {}), '(self.T)\n', (31716, 31724), True, 'import numpy as np\n'), ((31853, 31889), 'utils.randomUtils.random', 'randomUtils.random', ([], {'dim': 'D', 'samples': '(1)'}), '(dim=D, samples=1)\n', (31871, 31889), False, 'from utils import mathUtils, randomUtils, InputData, InputTypes\n'), ((22726, 22762), 'utils.randomUtils.random', 'randomUtils.random', ([], {'dim': '(1)', 'samples': '(1)'}), '(dim=1, samples=1)\n', (22744, 22762), False, 'from utils import mathUtils, randomUtils, InputData, InputTypes\n'), ((31754, 31786), 'utils.randomUtils.randomNormal', 'randomUtils.randomNormal', ([], {'size': 'D'}), '(size=D)\n', (31778, 31786), False, 'from utils import mathUtils, randomUtils, InputData, InputTypes\n'), ((31904, 31922), 'numpy.sign', 'np.sign', (['(amp - 0.5)'], {}), '(amp - 0.5)\n', (31911, 31922), True, 'import numpy as np\n'), ((32106, 32117), 'numpy.tan', 'np.tan', (['amp'], {}), '(amp)\n', (32112, 32117), True, 'import numpy as np\n'), ((32036, 32072), 'utils.randomUtils.random', 'randomUtils.random', ([], {'dim': 'D', 'samples': '(1)'}), '(dim=D, samples=1)\n', (32054, 32072), False, 'from utils import mathUtils, randomUtils, InputData, InputTypes\n')] |
#!/usr/bin/python3
import argparse
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import wavfile
from scipy.signal import convolve
argument_parser = argparse.ArgumentParser(description='Plots the RMS amplitude of a signal over time.')
argument_parser.add_argument('--wav-file', help='path to the input WAV file', required=True)
argument_parser.add_argument('--reference-wav-file', help='use a WAV file as a reference signal')
argument_parser.add_argument('--relative', help='plot the error between the signal and the reference (if any)', action='store_true')
against_amplitude_group = argument_parser.add_mutually_exclusive_group()
against_amplitude_group.add_argument('--against-amplitude', help='plot against reference amplitude (if any), not time', action='store_true')
against_amplitude_group.add_argument('--against-normalized-amplitude', help='plot against normalized reference amplitude (if any), not time', action='store_true')
argument_parser.add_argument('--center', help='offset the Y axis such that that the median is 0 dB', action='store_true')
argument_parser.add_argument('--window-size-seconds', help='size of sliding window, in seconds', type=float, default=0.01)
argument_parser.add_argument('--x-label', help='X axis label')
argument_parser.add_argument('--y-label', help='Y axis label')
args = argument_parser.parse_args()
if (args.window_size_seconds <= 0):
raise RuntimeError('invalid window size')
def wavfile_read_normalized(wav_file):
sample_rate_hz, samples = wavfile.read(wav_file)
if samples.dtype.kind == 'i':
factor = np.iinfo(samples.dtype).max
samples = samples.astype(float)
samples /= factor
return sample_rate_hz, samples
sample_rate_hz, samples = wavfile_read_normalized(args.wav_file)
if samples.ndim != 1:
raise RuntimeError('input file must only have 1 channel')
window_size_samples = int(sample_rate_hz * args.window_size_seconds)
def compute_rms_db(samples):
samples_squared = np.square(samples)
samples_mean_squared = np.fmax(convolve(samples_squared, np.ones(window_size_samples) / window_size_samples, mode='valid'), 1e-35)
samples_mean_squared = samples_mean_squared[::window_size_samples]
samples_rms = np.sqrt(samples_mean_squared)
samples_rms_db = 20 * np.log10(samples_rms * np.sqrt(2)) # *sqrt(2) because of dBFS definition
return samples_rms_db
samples_rms_db = compute_rms_db(samples)
figure = plt.figure()
axes = figure.add_subplot(1, 1, 1)
axes.set_ylabel('RMS amplitude (dBFS)')
axes.autoscale(axis='x', tight=True)
axes.grid()
axes.set_xlabel('Time (seconds)')
xaxis = np.arange(window_size_samples, samples.size + 1, window_size_samples) / sample_rate_hz
def plot(x, y, **kwargs):
if args.center:
y -= np.median(y)
if args.against_amplitude or args.against_normalized_amplitude:
axes.scatter(x, y, **kwargs)
else:
axes.plot(x, y, **kwargs)
if args.reference_wav_file is None:
plot(xaxis, samples_rms_db)
else:
reference_sample_rate_hz, reference_samples = wavfile_read_normalized(args.reference_wav_file)
if reference_samples.ndim != 1:
raise RuntimeError('reference file must only have 1 channel')
if reference_sample_rate_hz != sample_rate_hz:
raise RuntimError('input file and reference file must have the same sample rate')
if reference_samples.size != samples.size:
raise RuntimeError('input file and reference file must be the same length')
reference_sample_rms_db = compute_rms_db(reference_samples)
if args.against_amplitude:
xaxis = reference_sample_rms_db
axes.set_xlabel('Reference amplitude (dBFS)')
if args.against_normalized_amplitude:
xaxis = reference_sample_rms_db - np.max(reference_sample_rms_db)
axes.set_xlabel('Normalized reference amplitude (dB)')
if args.relative:
axes.set_ylabel('RMS amplitude error (dB)')
plot(xaxis, samples_rms_db - reference_sample_rms_db)
elif args.against_amplitude or args.against_normalized_amplitude:
plot(xaxis, samples_rms_db)
else:
plot(xaxis, reference_sample_rms_db, label='Reference')
plot(xaxis, samples_rms_db, label='Signal')
axes.legend()
if args.x_label is not None:
axes.set_xlabel(args.x_label)
if args.y_label is not None:
axes.set_ylabel(args.y_label)
plt.show()
| [
"numpy.median",
"numpy.sqrt",
"numpy.ones",
"argparse.ArgumentParser",
"numpy.iinfo",
"numpy.square",
"numpy.max",
"matplotlib.pyplot.figure",
"scipy.io.wavfile.read",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((170, 260), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Plots the RMS amplitude of a signal over time."""'}), "(description=\n 'Plots the RMS amplitude of a signal over time.')\n", (193, 260), False, 'import argparse\n'), ((2390, 2402), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2400, 2402), True, 'import matplotlib.pyplot as plt\n'), ((4173, 4183), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4181, 4183), True, 'import matplotlib.pyplot as plt\n'), ((1511, 1533), 'scipy.io.wavfile.read', 'wavfile.read', (['wav_file'], {}), '(wav_file)\n', (1523, 1533), False, 'from scipy.io import wavfile\n'), ((1955, 1973), 'numpy.square', 'np.square', (['samples'], {}), '(samples)\n', (1964, 1973), True, 'import numpy as np\n'), ((2189, 2218), 'numpy.sqrt', 'np.sqrt', (['samples_mean_squared'], {}), '(samples_mean_squared)\n', (2196, 2218), True, 'import numpy as np\n'), ((2570, 2639), 'numpy.arange', 'np.arange', (['window_size_samples', '(samples.size + 1)', 'window_size_samples'], {}), '(window_size_samples, samples.size + 1, window_size_samples)\n', (2579, 2639), True, 'import numpy as np\n'), ((2708, 2720), 'numpy.median', 'np.median', (['y'], {}), '(y)\n', (2717, 2720), True, 'import numpy as np\n'), ((1576, 1599), 'numpy.iinfo', 'np.iinfo', (['samples.dtype'], {}), '(samples.dtype)\n', (1584, 1599), True, 'import numpy as np\n'), ((3617, 3648), 'numpy.max', 'np.max', (['reference_sample_rms_db'], {}), '(reference_sample_rms_db)\n', (3623, 3648), True, 'import numpy as np\n'), ((2032, 2060), 'numpy.ones', 'np.ones', (['window_size_samples'], {}), '(window_size_samples)\n', (2039, 2060), True, 'import numpy as np\n'), ((2265, 2275), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2272, 2275), True, 'import numpy as np\n')] |
import numpy as np
import scipy.signal
def delay(vis, inverse=False, taper=None):
"""
Perform delay transform on visibility data.
``vis`` must have shape (Nfreqs, Ntimes).
"""
# Construct taper function
if taper is not None:
w = np.array(taper(vis.shape[0]))
else:
w = np.ones(vis.shape[0])
# Perform either forward or inverse FFT
if inverse:
# Do the inverse FFT on each time sample
return np.fft.ifft(vis * w[:,np.newaxis], axis=0)
else:
# Do the forward FFT on each time sample
return np.fft.fft(vis * w[:,np.newaxis], axis=0)
def fringe_rate(vis, inverse=False, taper=None):
"""
Perform fringe rate transform on visibility data.
``vis`` must have shape (Nfreqs, Ntimes).
"""
# Construct taper function
if taper is not None:
w = np.array(taper(vis.shape[1]))
else:
w = np.ones(vis.shape[1])
# Perform either forward or inverse FFT
if inverse:
# Do the inverse FFT on each frequency sample
return np.fft.ifft(vis * w[np.newaxis,:], axis=1)
else:
# Do the forward FFT on each frequency sample
return np.fft.fft(vis * w[np.newaxis,:], axis=1)
| [
"numpy.fft.fft",
"numpy.fft.ifft",
"numpy.ones"
] | [((320, 341), 'numpy.ones', 'np.ones', (['vis.shape[0]'], {}), '(vis.shape[0])\n', (327, 341), True, 'import numpy as np\n'), ((471, 514), 'numpy.fft.ifft', 'np.fft.ifft', (['(vis * w[:, np.newaxis])'], {'axis': '(0)'}), '(vis * w[:, np.newaxis], axis=0)\n', (482, 514), True, 'import numpy as np\n'), ((588, 630), 'numpy.fft.fft', 'np.fft.fft', (['(vis * w[:, np.newaxis])'], {'axis': '(0)'}), '(vis * w[:, np.newaxis], axis=0)\n', (598, 630), True, 'import numpy as np\n'), ((923, 944), 'numpy.ones', 'np.ones', (['vis.shape[1]'], {}), '(vis.shape[1])\n', (930, 944), True, 'import numpy as np\n'), ((1079, 1122), 'numpy.fft.ifft', 'np.fft.ifft', (['(vis * w[np.newaxis, :])'], {'axis': '(1)'}), '(vis * w[np.newaxis, :], axis=1)\n', (1090, 1122), True, 'import numpy as np\n'), ((1201, 1243), 'numpy.fft.fft', 'np.fft.fft', (['(vis * w[np.newaxis, :])'], {'axis': '(1)'}), '(vis * w[np.newaxis, :], axis=1)\n', (1211, 1243), True, 'import numpy as np\n')] |
import cv2 as cv
import numpy as np
class MorphologicalMixin:
def sharpen(self) -> None:
"""Sharpens the image with 3x3 filter. Only works on 2D images."""
if self.dim != 2:
raise ValueError("Only on 2D images")
filter = np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]])
self.data = cv.filter2D(self.data, -1, filter)
def open(self, size: int = (5, 5), element: int = cv.MORPH_ELLIPSE) -> None:
"""Performs morphological opening on the image. Only works on 2D images.
Parameters
----------
size: int, optional
Size of the kernel (default is (5, 5))
element: int, optional
Structural element (default is cv.MORPH_ELLIPSE)
"""
if self.dim != 2:
raise ValueError("Only on 2D images")
kernel = cv.getStructuringElement(element, size)
self.data = cv.morphologyEx(self.data, cv.MORPH_OPEN, kernel)
def close(self, size: int = (5, 5), element: int = cv.MORPH_ELLIPSE) -> None:
"""Performs morphological closing on the image. Only works on 2D images.
Parameters
----------
size: int, optional
Size of the kernel (default is (5, 5)
)
element: int, optional
Structural element (default is cv.MORPH_ELLIPSE)
"""
if self.dim != 2:
raise ValueError("Only on 2D images")
kernel = cv.getStructuringElement(element, size)
self.data = cv.morphologyEx(self.data, cv.MORPH_CLOSE, kernel)
def dilate(self, size: int = (5, 5), element: int = cv.MORPH_ELLIPSE) -> None:
"""Performs morphological dilatation on the image. Only works on 2D images.
Parameters
----------
size: int, optional
Size of the kernel (default is (5, 5))
element: int, optional
Structural element (default is cv.MORPH_ELLIPSE)
"""
if self.dim != 2:
raise ValueError("Only on 2D images")
kernel = cv.getStructuringElement(element, size)
self.data = cv.morphologyEx(self.data, cv.MORPH_DILATE, kernel)
def erode(self, size: int = (5, 5), element: int = cv.MORPH_ELLIPSE) -> None:
"""Performs morphological erosion on the image. Only works on 2D images.
Parameters
----------
size: int, optional
Size of the kernel (default is (5,5))
element: int, optional
Structural element (default is cv.MORPH_ELLIPSE)
"""
if self.dim != 2:
raise ValueError("Only on 2D images")
kernel = cv.getStructuringElement(element, size)
self.data = cv.morphologyEx(self.data, cv.MORPH_ERODE, kernel)
def tophat(self, size: int = 5, element: int = cv.MORPH_ELLIPSE) -> None:
"""Performs morphological tophat on the image. Only works on 2D images.
Parameters
----------
size: int, optional
Size of the kernel (default is 5)
element: int, optional
Structural element (default is cv.MORPH_ELLIPSE)
"""
if self.dim != 2:
raise ValueError("Only on 2D images")
kernel = cv.getStructuringElement(element, (size, size))
self.data = cv.morphologyEx(self.data, cv.MORPH_TOPHAT, kernel)
def algebric_open(self, size: int = 5, step: int = 5) -> None:
"""Performs morphological algebric opening on the image. Only works on 2D images.
Parameters
----------
size: int, optional
Structural element size
step: int, optional
Angle step
"""
if self.dim != 2:
raise ValueError("Only on 2D images")
result = np.zeros(self.shape, dtype=np.uint8)
for a in range(0, 180, step):
kernel = line_strel(size=size, angle=a)
temp = cv.morphologyEx(self.data, cv.MORPH_OPEN, kernel).astype(np.uint8)
result = np.maximum(result, temp)
self.data = result
def algebric_dilate(self, size: int = 5, step: int = 5) -> None:
"""Performs morphological algebric dilatation on the image. Only works on 2D images.
Parameters
----------
size: int, optional
Structural element size
step: int, optional
Angle step
"""
if self.dim != 2:
raise ValueError("Only on 2D images")
result = np.zeros(self.shape, dtype=np.uint8)
for a in range(0, 180, step):
kernel = line_strel(size=size, angle=a)
temp = cv.morphologyEx(self.data, cv.MORPH_DILATE, kernel).astype(np.uint8)
result = np.maximum(result, temp)
self.data = result
def blackhat(self, size: int = 5, element: int = cv.MORPH_ELLIPSE) -> None:
"""Performs morphological blackhat on the image. Only works on 2D images.
Parameters
----------
size: int, optional
Size of the kernel (default is 5)
element: int, optional
Structural element (default is cv.MORPH_ELLIPSE)
"""
if self.dim != 2:
raise ValueError("Only on 2D images")
kernel = cv.getStructuringElement(element, (size, size))
self.data = cv.morphologyEx(self.data, cv.MORPH_BLACKHAT, kernel)
def gabor(self) -> None:
"""Applies gabor filter to the image."""
ksize = 21
thetas = [-45]
filters = []
for a in thetas:
kernel = cv.getGaborKernel([ksize, ksize], 40, a, 25, 1)
filters.append(kernel)
result = np.zeros(self.shape, dtype=np.uint8)
for kernel in filters:
imgfiltered = cv.filter2D(self.data, -1, kernel)
result = np.maximum(result, imgfiltered)
self.data = result
def edges(self, thres1: int = 100, thres2: int = 200) -> None:
"""Finds the edges on the image with Canny algorithm.
Parameters
----------
thres1: int, optional
Low threshold (default is 100)
thres2: int, optional
High threshold (default is 200)
"""
self.data = cv.Canny(image=self.data, threshold1=thres1, threshold2=thres2)
def sobel(self) -> None:
self.data = cv.Sobel(src=self.data, ddepth=cv.CV_8UC1, dx=1, dy=1, ksize=3)
def line_strel(size: int, angle: float) -> np.ndarray:
"""Creates a linear structural element, with given length and rotation angle.
Parameters
----------
size: int
Length of the structural element when laying flat
angle: float
Rotation angle of the line in degrees
Returns
-------
np.ndarray
Square array containing the linear structural element
"""
if size % 2 != 1:
raise ValueError("Size must be odd")
line = np.zeros((size, size))
line[line.height // 2, :] = 1
center = (size // 2, size // 2)
tform = cv.getRotationMatrix2D(center, angle, 1)
kernel = cv.warpAffine(line, tform, line.shape)
return (kernel * 255).astype(np.uint8)
| [
"cv2.warpAffine",
"cv2.getGaborKernel",
"cv2.filter2D",
"numpy.array",
"numpy.zeros",
"cv2.morphologyEx",
"cv2.getRotationMatrix2D",
"numpy.maximum",
"cv2.Canny",
"cv2.getStructuringElement",
"cv2.Sobel"
] | [((6877, 6899), 'numpy.zeros', 'np.zeros', (['(size, size)'], {}), '((size, size))\n', (6885, 6899), True, 'import numpy as np\n'), ((6984, 7024), 'cv2.getRotationMatrix2D', 'cv.getRotationMatrix2D', (['center', 'angle', '(1)'], {}), '(center, angle, 1)\n', (7006, 7024), True, 'import cv2 as cv\n'), ((7038, 7076), 'cv2.warpAffine', 'cv.warpAffine', (['line', 'tform', 'line.shape'], {}), '(line, tform, line.shape)\n', (7051, 7076), True, 'import cv2 as cv\n'), ((264, 315), 'numpy.array', 'np.array', (['[[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]]'], {}), '([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]])\n', (272, 315), True, 'import numpy as np\n'), ((336, 370), 'cv2.filter2D', 'cv.filter2D', (['self.data', '(-1)', 'filter'], {}), '(self.data, -1, filter)\n', (347, 370), True, 'import cv2 as cv\n'), ((850, 889), 'cv2.getStructuringElement', 'cv.getStructuringElement', (['element', 'size'], {}), '(element, size)\n', (874, 889), True, 'import cv2 as cv\n'), ((910, 959), 'cv2.morphologyEx', 'cv.morphologyEx', (['self.data', 'cv.MORPH_OPEN', 'kernel'], {}), '(self.data, cv.MORPH_OPEN, kernel)\n', (925, 959), True, 'import cv2 as cv\n'), ((1453, 1492), 'cv2.getStructuringElement', 'cv.getStructuringElement', (['element', 'size'], {}), '(element, size)\n', (1477, 1492), True, 'import cv2 as cv\n'), ((1513, 1563), 'cv2.morphologyEx', 'cv.morphologyEx', (['self.data', 'cv.MORPH_CLOSE', 'kernel'], {}), '(self.data, cv.MORPH_CLOSE, kernel)\n', (1528, 1563), True, 'import cv2 as cv\n'), ((2048, 2087), 'cv2.getStructuringElement', 'cv.getStructuringElement', (['element', 'size'], {}), '(element, size)\n', (2072, 2087), True, 'import cv2 as cv\n'), ((2108, 2159), 'cv2.morphologyEx', 'cv.morphologyEx', (['self.data', 'cv.MORPH_DILATE', 'kernel'], {}), '(self.data, cv.MORPH_DILATE, kernel)\n', (2123, 2159), True, 'import cv2 as cv\n'), ((2639, 2678), 'cv2.getStructuringElement', 'cv.getStructuringElement', (['element', 'size'], {}), '(element, size)\n', (2663, 2678), True, 'import cv2 as cv\n'), ((2699, 2749), 'cv2.morphologyEx', 'cv.morphologyEx', (['self.data', 'cv.MORPH_ERODE', 'kernel'], {}), '(self.data, cv.MORPH_ERODE, kernel)\n', (2714, 2749), True, 'import cv2 as cv\n'), ((3220, 3267), 'cv2.getStructuringElement', 'cv.getStructuringElement', (['element', '(size, size)'], {}), '(element, (size, size))\n', (3244, 3267), True, 'import cv2 as cv\n'), ((3288, 3339), 'cv2.morphologyEx', 'cv.morphologyEx', (['self.data', 'cv.MORPH_TOPHAT', 'kernel'], {}), '(self.data, cv.MORPH_TOPHAT, kernel)\n', (3303, 3339), True, 'import cv2 as cv\n'), ((3758, 3794), 'numpy.zeros', 'np.zeros', (['self.shape'], {'dtype': 'np.uint8'}), '(self.shape, dtype=np.uint8)\n', (3766, 3794), True, 'import numpy as np\n'), ((4470, 4506), 'numpy.zeros', 'np.zeros', (['self.shape'], {'dtype': 'np.uint8'}), '(self.shape, dtype=np.uint8)\n', (4478, 4506), True, 'import numpy as np\n'), ((5235, 5282), 'cv2.getStructuringElement', 'cv.getStructuringElement', (['element', '(size, size)'], {}), '(element, (size, size))\n', (5259, 5282), True, 'import cv2 as cv\n'), ((5303, 5356), 'cv2.morphologyEx', 'cv.morphologyEx', (['self.data', 'cv.MORPH_BLACKHAT', 'kernel'], {}), '(self.data, cv.MORPH_BLACKHAT, kernel)\n', (5318, 5356), True, 'import cv2 as cv\n'), ((5647, 5683), 'numpy.zeros', 'np.zeros', (['self.shape'], {'dtype': 'np.uint8'}), '(self.shape, dtype=np.uint8)\n', (5655, 5683), True, 'import numpy as np\n'), ((6205, 6268), 'cv2.Canny', 'cv.Canny', ([], {'image': 'self.data', 'threshold1': 'thres1', 'threshold2': 'thres2'}), '(image=self.data, threshold1=thres1, threshold2=thres2)\n', (6213, 6268), True, 'import cv2 as cv\n'), ((6319, 6382), 'cv2.Sobel', 'cv.Sobel', ([], {'src': 'self.data', 'ddepth': 'cv.CV_8UC1', 'dx': '(1)', 'dy': '(1)', 'ksize': '(3)'}), '(src=self.data, ddepth=cv.CV_8UC1, dx=1, dy=1, ksize=3)\n', (6327, 6382), True, 'import cv2 as cv\n'), ((3994, 4018), 'numpy.maximum', 'np.maximum', (['result', 'temp'], {}), '(result, temp)\n', (4004, 4018), True, 'import numpy as np\n'), ((4708, 4732), 'numpy.maximum', 'np.maximum', (['result', 'temp'], {}), '(result, temp)\n', (4718, 4732), True, 'import numpy as np\n'), ((5546, 5593), 'cv2.getGaborKernel', 'cv.getGaborKernel', (['[ksize, ksize]', '(40)', 'a', '(25)', '(1)'], {}), '([ksize, ksize], 40, a, 25, 1)\n', (5563, 5593), True, 'import cv2 as cv\n'), ((5741, 5775), 'cv2.filter2D', 'cv.filter2D', (['self.data', '(-1)', 'kernel'], {}), '(self.data, -1, kernel)\n', (5752, 5775), True, 'import cv2 as cv\n'), ((5797, 5828), 'numpy.maximum', 'np.maximum', (['result', 'imgfiltered'], {}), '(result, imgfiltered)\n', (5807, 5828), True, 'import numpy as np\n'), ((3906, 3955), 'cv2.morphologyEx', 'cv.morphologyEx', (['self.data', 'cv.MORPH_OPEN', 'kernel'], {}), '(self.data, cv.MORPH_OPEN, kernel)\n', (3921, 3955), True, 'import cv2 as cv\n'), ((4618, 4669), 'cv2.morphologyEx', 'cv.morphologyEx', (['self.data', 'cv.MORPH_DILATE', 'kernel'], {}), '(self.data, cv.MORPH_DILATE, kernel)\n', (4633, 4669), True, 'import cv2 as cv\n')] |
import numpy as np
__all__ = ['cal_pdf']
def cal_pdf(data, bins=60, save_to=None):
"""Calculate probability density function.
Parameters
----------
data : array-like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (30, by default). If `bins` is a sequence,
it defines the bin edges, including the rightmost edge, allowing
for non-uniform bin widths.
save_to : str
File to save output to
Returns
-------
pdf: array
The PDF, normalized so that the integral is 1.
bin_centers: array
The center of each PDF bin.
"""
pdf, bin_edges = np.histogram(data, bins=bins, density=True)
bin_centers = (bin_edges[:-1] + bin_edges[1:]) / 2.
if save_to is not None:
np.savetxt(save_to, np.vstack((pdf, bin_centers)).T, delimiter=',',
header='pdf, bin_centers')
return pdf, bin_centers
| [
"numpy.histogram",
"numpy.vstack"
] | [((789, 832), 'numpy.histogram', 'np.histogram', (['data'], {'bins': 'bins', 'density': '(True)'}), '(data, bins=bins, density=True)\n', (801, 832), True, 'import numpy as np\n'), ((945, 974), 'numpy.vstack', 'np.vstack', (['(pdf, bin_centers)'], {}), '((pdf, bin_centers))\n', (954, 974), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 21 22:15:30 2019
@author: yoelr
"""
from numpy import asarray, array
from biosteam._utils import wegstein_secant, aitken_secant, wegstein, aitken
__all__ = ('DewPoint',)
class DewPoint:
__slots__ = ('gamma', 'P', 'T', 'x')
rootsolver = staticmethod(aitken_secant)
itersolver = staticmethod(aitken)
def __init__(self, gamma):
self.gamma = gamma
def _x_iter_at_P(self, x, T, zP, VPs):
return zP/(array([i(T) for i in VPs]) * self.gamma(x/x.sum(), T))
def _x_iter_at_T(self, x, T, P, z, Psat):
return z*P/(Psat * self.gamma(x/x.sum(), T))
def _T_error(self, T, zP, VPs):
self.x = self.itersolver(self._x_iter_at_P, self.x, 1e-5, args=(T, zP, VPs))
return 1 - self.x.sum()
def _P_error(self, P, T, z, Psat):
self.x = self.itersolver(self._x_iter_at_T, self.x, 1e-5, args=(T, P, z, Psat))
return 1 - self.x.sum()
def solve_Tx(self, z, P):
"""Dew point given composition and pressure.
**Parameters**
**y:** [array_like] Vapor phase composition.
**P:** [float] Pressure (Pa).
**Returns**
**T:** [float] Dew point temperature (K).
**x:** [numpy array] Liquid phase composition.
>>> from biosteam import Species, DewPoint, Dortmund
>>> gamma = Dortmund(*Species('Ethanol', 'Water'))
>>> dp = DewPoint(gamma)
>>> dp.solve_Tx(z=(0.5, 0.5), P=101325)
(357.45184742263075, array([0.151, 0.849]))
"""
z = asarray(z)
self.P = P
try:
self.T = self.rootsolver(self._T_error, self.T, self.T-0.01, 1e-6,
args=(P*z, [s.VaporPressure for s in self.gamma.species]))
except:
self.x = z.copy()
T = (z * [s.Tb for s in self.gamma.species]).sum()
try:
self.T = self.rootsolver(self._T_error, T, T-0.01, 1e-6,
args=(P*z, [s.VaporPressure for s in self.gamma.species]))
except:
self.x = z.copy()
T_guess = min([s.Tb for s in self.gamma.species])
try:
self.T = self.rootsolver(self._T_error, T_guess, T_guess-0.01, 1e-6,
args=(P*z, [s.VaporPressure for s in self.gamma.species]))
except:
self.T = T
self.x = z.copy()
self.x = self.x/self.x.sum()
return self.T, self.x
def solve_Px(self, z, T):
"""Dew point given composition and temperature.
**Parameters**
**y:** [array_like] Vapor phase composition.
**T:** [float] Temperature (K).
**Returns**
**P:** [float] Dew point pressure (Pa).
**x:** [numpy array] Liquid phase composition.
>>> from biosteam import Species, DewPoint, Dortmund
>>> gamma = Dortmund(*Species('Ethanol', 'Water'))
>>> dp = DewPoint(gamma)
>>> dp.solve_Px(z=(0.703, 0.297), T=352.28)
(111366.15384513882, array([0.6, 0.4]))
"""
z = asarray(z)
Psat = array([i.VaporPressure(T) for i in self.gamma.species])
self.T = T
try:
self.P = self.rootsolver(self._P_error, self.P, self.P+1, 1e-2,
args=(T, z, Psat))
except:
P = (z * Psat).sum()
self.x = z.copy()
self.P = self.rootsolver(self._P_error, P, P+1, 1e-2,
args=(T, z, Psat))
self.x = self.x/self.x.sum()
return self.P, self.x
def __repr__(self):
return f"<{type(self).__name__}: gamma={self.gamma}>" | [
"numpy.asarray"
] | [((1594, 1604), 'numpy.asarray', 'asarray', (['z'], {}), '(z)\n', (1601, 1604), False, 'from numpy import asarray, array\n'), ((3253, 3263), 'numpy.asarray', 'asarray', (['z'], {}), '(z)\n', (3260, 3263), False, 'from numpy import asarray, array\n')] |
import sys,os
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
sys.path.append("../../")
from Utils.callback_tf import callbacklist
from tensorflow import keras
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
(train_data, train_targets), (test_data, test_targets) = keras.datasets.boston_housing.load_data()
mean = train_data.mean(axis=0)
train_data -= mean
std = train_data.std(axis=0)
train_data /= std
test_data -= mean
test_data /= std
def build_model():
model = keras.models.Sequential()
model.add(keras.layers.Dense(64, activation='relu', input_shape=(train_data.shape[1],)))
model.add(keras.layers.Dense(64, activation='relu'))
model.add(keras.layers.Dense(1))
model.compile(optimizer='rmsprop', loss='mse', metrics=['mae'])
return model
k = 4
num_val_samples = len(train_data) // k
all_scores = []
num_epochs = 500
all_mae_histories = []
for i in range(k):
print('processing fold #', i)
# Prepare the validation data: data from partition # k
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
# Prepare the training data: data from all other partitions
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
# Build the Keras model (already compiled)
model = build_model()
# Train the model (in silent mode, verbose=0)
history = model.fit(partial_train_data, partial_train_targets,
validation_data=(val_data, val_targets),
epochs=num_epochs, batch_size=1, verbose=0)
mae_history = history.history['val_mean_absolute_error']
all_mae_histories.append(mae_history)
average_mae_history = [
np.mean([x[i] for x in all_mae_histories]) for i in range(num_epochs)]
#绘制验证分数(删除前 10 个数据点)
def smooth_curve(points, factor=0.9):
smoothed_points = []
for point in points:
if smoothed_points:
previous = smoothed_points[-1]
smoothed_points.append(previous * factor + point * (1 - factor))
else:
smoothed_points.append(point)
return smoothed_points
smooth_mae_history = smooth_curve(average_mae_history[10:])
# 绘制训练 & 验证的准确率值
plt.plot(range(1, len(smooth_mae_history) + 1), smooth_mae_history)
plt.xlabel('Epochs')
plt.ylabel('Validation MAE')
plt.show() | [
"numpy.mean",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"tensorflow.keras.datasets.boston_housing.load_data",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.models.Sequential",
"numpy.concatenate",
"sys.path.append",
"matplotlib.pyplot.show"
] | [((89, 114), 'sys.path.append', 'sys.path.append', (['"""../../"""'], {}), "('../../')\n", (104, 114), False, 'import sys, os\n'), ((286, 327), 'tensorflow.keras.datasets.boston_housing.load_data', 'keras.datasets.boston_housing.load_data', ([], {}), '()\n', (325, 327), False, 'from tensorflow import keras\n'), ((2517, 2537), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (2527, 2537), True, 'import matplotlib.pyplot as plt\n'), ((2538, 2566), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Validation MAE"""'], {}), "('Validation MAE')\n", (2548, 2566), True, 'import matplotlib.pyplot as plt\n'), ((2567, 2577), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2575, 2577), True, 'import matplotlib.pyplot as plt\n'), ((495, 520), 'tensorflow.keras.models.Sequential', 'keras.models.Sequential', ([], {}), '()\n', (518, 520), False, 'from tensorflow import keras\n'), ((1252, 1354), 'numpy.concatenate', 'np.concatenate', (['[train_data[:i * num_val_samples], train_data[(i + 1) * num_val_samples:]]'], {'axis': '(0)'}), '([train_data[:i * num_val_samples], train_data[(i + 1) *\n num_val_samples:]], axis=0)\n', (1266, 1354), True, 'import numpy as np\n'), ((1405, 1513), 'numpy.concatenate', 'np.concatenate', (['[train_targets[:i * num_val_samples], train_targets[(i + 1) * num_val_samples:]\n ]'], {'axis': '(0)'}), '([train_targets[:i * num_val_samples], train_targets[(i + 1) *\n num_val_samples:]], axis=0)\n', (1419, 1513), True, 'import numpy as np\n'), ((1992, 2034), 'numpy.mean', 'np.mean', (['[x[i] for x in all_mae_histories]'], {}), '([x[i] for x in all_mae_histories])\n', (1999, 2034), True, 'import numpy as np\n'), ((535, 612), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(64)'], {'activation': '"""relu"""', 'input_shape': '(train_data.shape[1],)'}), "(64, activation='relu', input_shape=(train_data.shape[1],))\n", (553, 612), False, 'from tensorflow import keras\n'), ((628, 669), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(64)'], {'activation': '"""relu"""'}), "(64, activation='relu')\n", (646, 669), False, 'from tensorflow import keras\n'), ((685, 706), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(1)'], {}), '(1)\n', (703, 706), False, 'from tensorflow import keras\n')] |
# /bin/python2.7
import sys
import numpy as np
sys.path.append('../../seq2seq/inputs')
sys.path.append('../../seq2seq/utils')
from preprocess import *
from rank_io import *
if __name__ == '__main__':
run_mode = 'ranking'
if len(sys.argv) > 1 and sys.argv[1] == 'classification':
run_mode = 'classification'
hist_size = 30
path = '../../data/toy_example/%s/' % (run_mode)
embed_size = 50
embedfile = path + 'embed_glove_d50_norm'
corpfile = path + 'corpus_preprocessed.txt'
relfiles = [path + 'relation_train.txt', path + 'relation_valid.txt',
path + 'relation_test.txt']
histfiles = [path + 'relation.train.hist-%d.txt' % (hist_size),
path + 'relation.valid.hist-%d.txt' % (hist_size),
path + 'relation.test.hist-%d.txt' % (hist_size)]
# note here word embeddings have been normalized to speed up calculation
embed_dict = read_embedding(filename=embedfile)
print('after read embedding ...')
_PAD_ = len(
embed_dict) # for word without wordembeeding, assign an random embedding
embed_dict[_PAD_] = np.zeros((embed_size,), dtype=np.float32)
embed = np.float32(np.random.uniform(-0.2, 0.2, [_PAD_ + 1, embed_size]))
embed = convert_embed_2_numpy(embed_dict, embed=embed)
corp, _ = read_data(corpfile)
print('after read corpus ....')
for i in range(len(relfiles)):
rel = read_relation(relfiles[i])
fout = open(histfiles[i], 'w')
inum = 0
for label, d1, d2 in rel:
inum += 1
assert d1 in corp
assert d2 in corp
qnum = len(corp[d1])
d1_embed = embed[corp[d1]]
d2_embed = embed[corp[d2]]
curr_hist = cal_hist(d1_embed, d2_embed, qnum, hist_size)
curr_hist = curr_hist.tolist()
fout.write(' '.join(map(str, curr_hist)))
fout.write('\n')
if inum % 1000 == 0:
print('inum: %d ....\r' % inum, )
sys.stdout.flush()
# print(curr_hist)
fout.close()
print('file: %s processed... ' % (relfiles[i]))
print('\nfinished ...')
| [
"sys.stdout.flush",
"numpy.zeros",
"sys.path.append",
"numpy.random.uniform"
] | [((49, 88), 'sys.path.append', 'sys.path.append', (['"""../../seq2seq/inputs"""'], {}), "('../../seq2seq/inputs')\n", (64, 88), False, 'import sys\n'), ((89, 127), 'sys.path.append', 'sys.path.append', (['"""../../seq2seq/utils"""'], {}), "('../../seq2seq/utils')\n", (104, 127), False, 'import sys\n'), ((1124, 1165), 'numpy.zeros', 'np.zeros', (['(embed_size,)'], {'dtype': 'np.float32'}), '((embed_size,), dtype=np.float32)\n', (1132, 1165), True, 'import numpy as np\n'), ((1189, 1242), 'numpy.random.uniform', 'np.random.uniform', (['(-0.2)', '(0.2)', '[_PAD_ + 1, embed_size]'], {}), '(-0.2, 0.2, [_PAD_ + 1, embed_size])\n', (1206, 1242), True, 'import numpy as np\n'), ((2029, 2047), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2045, 2047), False, 'import sys\n')] |
import os
import random
import time
import pickle
import argparse
import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
import torch.optim as optim
from torch.utils.data import DataLoader
from tqdm import tqdm
from graph4nlp.pytorch.data.data import to_batch
from graph4nlp.pytorch.datasets.mathqa import MathQADatasetForTree
from graph4nlp.pytorch.modules.graph_construction import *
from graph4nlp.pytorch.modules.graph_embedding import *
from graph4nlp.pytorch.models.graph2tree import Graph2Tree
from graph4nlp.pytorch.modules.utils.tree_utils import Tree, VocabForAll
import warnings
warnings.filterwarnings('ignore')
class MathQA:
def __init__(self, opt=None):
super(MathQA, self).__init__()
self.opt = opt
seed = self.opt["seed"]
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if self.opt["gpuid"] == -1:
self.device = torch.device("cpu")
else:
self.device = torch.device("cuda:{}".format(self.opt["gpuid"]))
self.use_copy = self.opt["decoder_args"]["rnn_decoder_share"]["use_copy"]
self.use_share_vocab = self.opt["graph_construction_args"]["graph_construction_share"]["share_vocab"]
self.data_dir = self.opt["graph_construction_args"]["graph_construction_share"]["root_dir"]
self._build_dataloader()
self._build_model()
self._build_optimizer()
def _build_dataloader(self):
graph_type = self.opt["graph_construction_args"]["graph_construction_share"]["graph_type"]
enc_emb_size = self.opt["graph_construction_args"]["node_embedding"]["input_size"]
tgt_emb_size = self.opt["decoder_args"]["rnn_decoder_share"]["input_size"]
topology_subdir = self.opt["graph_construction_args"]["graph_construction_share"]["topology_subdir"]
if graph_type == "dependency":
dataset = MathQADatasetForTree(root_dir=self.data_dir,
topology_builder=DependencyBasedGraphConstruction,
topology_subdir=topology_subdir,
edge_strategy=self.opt["graph_construction_args"]["graph_construction_private"]["edge_strategy"],
graph_type='static',
share_vocab=self.use_share_vocab,
enc_emb_size=enc_emb_size,
dec_emb_size=tgt_emb_size,
min_word_vocab_freq=self.opt["min_freq"],
pretrained_word_emb_name=self.opt["pretrained_word_emb_name"],
pretrained_word_emb_url=self.opt["pretrained_word_emb_url"],
pretrained_word_emb_cache_dir=self.opt["pretrained_word_emb_cache_dir"])
elif graph_type == "constituency":
dataset = MathQADatasetForTree(root_dir=self.data_dir,
topology_builder=ConstituencyBasedGraphConstruction,
topology_subdir=topology_subdir,
edge_strategy=self.opt["graph_construction_args"]["graph_construction_private"]["edge_strategy"],
graph_type='static',
share_vocab=self.use_share_vocab,
enc_emb_size=enc_emb_size,
dec_emb_size=tgt_emb_size,
min_word_vocab_freq=self.opt["min_freq"],
pretrained_word_emb_name=self.opt["pretrained_word_emb_name"],
pretrained_word_emb_url=self.opt["pretrained_word_emb_url"],
pretrained_word_emb_cache_dir=self.opt["pretrained_word_emb_cache_dir"])
elif graph_type == "node_emb":
dataset = MathQADatasetForTree(root_dir=self.data_dir,
word_emb_size=enc_emb_size,
topology_builder=NodeEmbeddingBasedGraphConstruction,
topology_subdir=topology_subdir,
graph_type='dynamic',
dynamic_graph_type=graph_type,
edge_strategy=self.opt["graph_construction_args"]["graph_construction_private"]["edge_strategy"],
share_vocab=self.use_share_vocab,
enc_emb_size=enc_emb_size,
dec_emb_size=tgt_emb_size,
min_word_vocab_freq=self.opt["min_freq"],
pretrained_word_emb_name=self.opt["pretrained_word_emb_name"],
pretrained_word_emb_url=self.opt["pretrained_word_emb_url"],
pretrained_word_emb_cache_dir=self.opt["pretrained_word_emb_cache_dir"])
elif graph_type == "node_emb_refined":
dynamic_init_graph_type = self.opt["graph_construction_args"]["graph_construction_private"]["dynamic_init_graph_type"]
if dynamic_init_graph_type is None or dynamic_init_graph_type == 'line':
dynamic_init_topology_builder = None
elif dynamic_init_graph_type == 'dependency':
dynamic_init_topology_builder = DependencyBasedGraphConstruction
elif dynamic_init_graph_type == 'constituency':
dynamic_init_topology_builder = ConstituencyBasedGraphConstruction
else:
# dynamic_init_topology_builder
raise RuntimeError('Define your own dynamic_init_topology_builder')
dataset = MathQADatasetForTree(root_dir=self.data_dir,
word_emb_size=enc_emb_size,
topology_builder=NodeEmbeddingBasedRefinedGraphConstruction,
topology_subdir=topology_subdir,
graph_type='dynamic',
dynamic_graph_type=graph_type,
share_vocab=self.use_share_vocab,
enc_emb_size=enc_emb_size,
dec_emb_size=tgt_emb_size,
dynamic_init_topology_builder=dynamic_init_topology_builder,
min_word_vocab_freq=self.opt["min_freq"],
pretrained_word_emb_name=self.opt["pretrained_word_emb_name"],
pretrained_word_emb_url=self.opt["pretrained_word_emb_url"],
pretrained_word_emb_cache_dir=self.opt["pretrained_word_emb_cache_dir"])
else:
raise NotImplementedError
self.train_data_loader = DataLoader(dataset.train, batch_size=self.opt["batch_size"], shuffle=True, num_workers=1,
collate_fn=dataset.collate_fn)
self.test_data_loader = DataLoader(dataset.test, batch_size=1, shuffle=False, num_workers=1,
collate_fn=dataset.collate_fn)
self.valid_data_loader = DataLoader(dataset.val, batch_size=1, shuffle=False, num_workers=1,
collate_fn=dataset.collate_fn)
self.src_vocab = dataset.src_vocab_model
self.tgt_vocab = dataset.tgt_vocab_model
if self.use_share_vocab:
self.share_vocab = dataset.share_vocab_model
self.vocab_model = VocabForAll(in_word_vocab=self.src_vocab, out_word_vocab=self.tgt_vocab, share_vocab=self.share_vocab)
def _build_model(self):
'''For encoder-decoder'''
self.model = Graph2Tree.from_args(self.opt, vocab_model=self.vocab_model)
self.model.init(self.opt["init_weight"])
self.model.to(self.device)
def _build_optimizer(self):
optim_state = {"learningRate": self.opt["learning_rate"], "weight_decay": self.opt["weight_decay"]}
parameters = [p for p in self.model.parameters() if p.requires_grad]
self.optimizer = optim.Adam(parameters, lr=optim_state['learningRate'], weight_decay=optim_state['weight_decay'])
def prepare_ext_vocab(self, batch_graph, src_vocab):
oov_dict = copy.deepcopy(src_vocab)
token_matrix = []
for n in batch_graph.node_attributes:
node_token = n['token']
if (n.get('type') == None or n.get('type') == 0) and oov_dict.get_symbol_idx(node_token) == oov_dict.get_symbol_idx(oov_dict.unk_token):
oov_dict.add_symbol(node_token)
token_matrix.append(oov_dict.get_symbol_idx(node_token))
batch_graph.node_features['token_id_oov'] = torch.tensor(token_matrix, dtype=torch.long).to(self.device)
return oov_dict
def train_epoch(self, epoch):
loss_to_print = 0
num_batch = len(self.train_data_loader)
for step, data in tqdm(enumerate(self.train_data_loader), desc=f'Epoch {epoch:02d}', total=len(self.train_data_loader)):
batch_graph, batch_tree_list, batch_original_tree_list = data['graph_data'], data['dec_tree_batch'], data['original_dec_tree_batch']
batch_graph = batch_graph.to(self.device)
self.optimizer.zero_grad()
oov_dict = self.prepare_ext_vocab(
batch_graph, self.src_vocab) if self.use_copy else None
if self.use_copy:
batch_tree_list_refined = []
for item in batch_original_tree_list:
tgt_list = oov_dict.get_symbol_idx_for_list(item.strip().split())
tgt_tree = Tree.convert_to_tree(tgt_list, 0, len(tgt_list), oov_dict)
batch_tree_list_refined.append(tgt_tree)
loss = self.model(batch_graph, batch_tree_list_refined if self.use_copy else batch_tree_list, oov_dict=oov_dict)
loss.backward()
torch.nn.utils.clip_grad_value_(
self.model.parameters(), self.opt["grad_clip"])
self.optimizer.step()
loss_to_print += loss
return loss_to_print/num_batch
def train(self):
best_acc = (-1, -1)
print("-------------\nStarting training.")
for epoch in range(1, self.opt["max_epochs"]+1):
self.model.train()
loss_to_print = self.train_epoch(epoch)
print("epochs = {}, train_loss = {:.3f}".format(epoch, loss_to_print))
if epoch > 2 and epoch % 5 == 0:
test_acc = self.eval(self.model, mode="test")
val_acc = self.eval(self.model, mode="val")
if val_acc > best_acc[1]:
best_acc = (test_acc, val_acc)
print("Best Acc: {:.3f}\n".format(best_acc[0]))
return best_acc
def eval(self, model, mode="val"):
from evaluation import convert_to_string, compute_tree_accuracy
model.eval()
reference_list = []
candidate_list = []
data_loader = self.test_data_loader if mode == "test" else self.valid_data_loader
for data in data_loader:
eval_input_graph, batch_tree_list, batch_original_tree_list = data['graph_data'], data['dec_tree_batch'], data['original_dec_tree_batch']
eval_input_graph = eval_input_graph.to(self.device)
oov_dict = self.prepare_ext_vocab(eval_input_graph, self.src_vocab)
if self.use_copy:
assert len(batch_original_tree_list) == 1
reference = oov_dict.get_symbol_idx_for_list(batch_original_tree_list[0].split())
eval_vocab = oov_dict
else:
assert len(batch_original_tree_list) == 1
reference = model.tgt_vocab.get_symbol_idx_for_list(batch_original_tree_list[0].split())
eval_vocab = self.tgt_vocab
candidate = model.decoder.translate(model.use_copy,
model.decoder.enc_hidden_size,
model.decoder.hidden_size,
model,
eval_input_graph,
self.src_vocab,
self.tgt_vocab,
self.device,
self.opt["decoder_args"]["rnn_decoder_private"]["max_decoder_step"],
self.opt["decoder_args"]["rnn_decoder_private"]["max_tree_depth"],
oov_dict=oov_dict,
use_beam_search=True,
beam_size=self.opt["beam_size"])
candidate = [int(c) for c in candidate]
num_left_paren = sum(
1 for c in candidate if eval_vocab.idx2symbol[int(c)] == "(")
num_right_paren = sum(
1 for c in candidate if eval_vocab.idx2symbol[int(c)] == ")")
diff = num_left_paren - num_right_paren
if diff > 0:
for i in range(diff):
candidate.append(
self.test_data_loader.tgt_vocab.symbol2idx[")"])
elif diff < 0:
candidate = candidate[:diff]
ref_str = convert_to_string(
reference, eval_vocab)
cand_str = convert_to_string(
candidate, eval_vocab)
reference_list.append(reference)
candidate_list.append(candidate)
eval_acc = compute_tree_accuracy(
candidate_list, reference_list, eval_vocab)
print("{} accuracy = {:.3f}\n".format(mode, eval_acc))
return eval_acc
if __name__ == "__main__":
from config import get_args
start = time.time()
runner = MathQA(opt=get_args())
best_acc = runner.train()
end = time.time()
print("total time: {} minutes\n".format((end - start)/60)) | [
"torch.manual_seed",
"torch.optim.Adam",
"copy.deepcopy",
"graph4nlp.pytorch.models.graph2tree.Graph2Tree.from_args",
"graph4nlp.pytorch.modules.utils.tree_utils.VocabForAll",
"graph4nlp.pytorch.datasets.mathqa.MathQADatasetForTree",
"random.seed",
"config.get_args",
"torch.tensor",
"evaluation.co... | [((665, 698), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (688, 698), False, 'import warnings\n'), ((14558, 14569), 'time.time', 'time.time', ([], {}), '()\n', (14567, 14569), False, 'import time\n'), ((14647, 14658), 'time.time', 'time.time', ([], {}), '()\n', (14656, 14658), False, 'import time\n'), ((852, 869), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (863, 869), False, 'import random\n'), ((878, 898), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (892, 898), True, 'import numpy as np\n'), ((907, 930), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (924, 930), False, 'import torch\n'), ((7418, 7542), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset.train'], {'batch_size': "self.opt['batch_size']", 'shuffle': '(True)', 'num_workers': '(1)', 'collate_fn': 'dataset.collate_fn'}), "(dataset.train, batch_size=self.opt['batch_size'], shuffle=True,\n num_workers=1, collate_fn=dataset.collate_fn)\n", (7428, 7542), False, 'from torch.utils.data import DataLoader\n'), ((7614, 7717), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset.test'], {'batch_size': '(1)', 'shuffle': '(False)', 'num_workers': '(1)', 'collate_fn': 'dataset.collate_fn'}), '(dataset.test, batch_size=1, shuffle=False, num_workers=1,\n collate_fn=dataset.collate_fn)\n', (7624, 7717), False, 'from torch.utils.data import DataLoader\n'), ((7789, 7891), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset.val'], {'batch_size': '(1)', 'shuffle': '(False)', 'num_workers': '(1)', 'collate_fn': 'dataset.collate_fn'}), '(dataset.val, batch_size=1, shuffle=False, num_workers=1,\n collate_fn=dataset.collate_fn)\n', (7799, 7891), False, 'from torch.utils.data import DataLoader\n'), ((8145, 8251), 'graph4nlp.pytorch.modules.utils.tree_utils.VocabForAll', 'VocabForAll', ([], {'in_word_vocab': 'self.src_vocab', 'out_word_vocab': 'self.tgt_vocab', 'share_vocab': 'self.share_vocab'}), '(in_word_vocab=self.src_vocab, out_word_vocab=self.tgt_vocab,\n share_vocab=self.share_vocab)\n', (8156, 8251), False, 'from graph4nlp.pytorch.modules.utils.tree_utils import Tree, VocabForAll\n'), ((8332, 8392), 'graph4nlp.pytorch.models.graph2tree.Graph2Tree.from_args', 'Graph2Tree.from_args', (['self.opt'], {'vocab_model': 'self.vocab_model'}), '(self.opt, vocab_model=self.vocab_model)\n', (8352, 8392), False, 'from graph4nlp.pytorch.models.graph2tree import Graph2Tree\n'), ((8720, 8821), 'torch.optim.Adam', 'optim.Adam', (['parameters'], {'lr': "optim_state['learningRate']", 'weight_decay': "optim_state['weight_decay']"}), "(parameters, lr=optim_state['learningRate'], weight_decay=\n optim_state['weight_decay'])\n", (8730, 8821), True, 'import torch.optim as optim\n'), ((8894, 8918), 'copy.deepcopy', 'copy.deepcopy', (['src_vocab'], {}), '(src_vocab)\n', (8907, 8918), False, 'import copy\n'), ((14320, 14385), 'evaluation.compute_tree_accuracy', 'compute_tree_accuracy', (['candidate_list', 'reference_list', 'eval_vocab'], {}), '(candidate_list, reference_list, eval_vocab)\n', (14341, 14385), False, 'from evaluation import convert_to_string, compute_tree_accuracy\n'), ((994, 1013), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1006, 1013), False, 'import torch\n'), ((1968, 2576), 'graph4nlp.pytorch.datasets.mathqa.MathQADatasetForTree', 'MathQADatasetForTree', ([], {'root_dir': 'self.data_dir', 'topology_builder': 'DependencyBasedGraphConstruction', 'topology_subdir': 'topology_subdir', 'edge_strategy': "self.opt['graph_construction_args']['graph_construction_private'][\n 'edge_strategy']", 'graph_type': '"""static"""', 'share_vocab': 'self.use_share_vocab', 'enc_emb_size': 'enc_emb_size', 'dec_emb_size': 'tgt_emb_size', 'min_word_vocab_freq': "self.opt['min_freq']", 'pretrained_word_emb_name': "self.opt['pretrained_word_emb_name']", 'pretrained_word_emb_url': "self.opt['pretrained_word_emb_url']", 'pretrained_word_emb_cache_dir': "self.opt['pretrained_word_emb_cache_dir']"}), "(root_dir=self.data_dir, topology_builder=\n DependencyBasedGraphConstruction, topology_subdir=topology_subdir,\n edge_strategy=self.opt['graph_construction_args'][\n 'graph_construction_private']['edge_strategy'], graph_type='static',\n share_vocab=self.use_share_vocab, enc_emb_size=enc_emb_size,\n dec_emb_size=tgt_emb_size, min_word_vocab_freq=self.opt['min_freq'],\n pretrained_word_emb_name=self.opt['pretrained_word_emb_name'],\n pretrained_word_emb_url=self.opt['pretrained_word_emb_url'],\n pretrained_word_emb_cache_dir=self.opt['pretrained_word_emb_cache_dir'])\n", (1988, 2576), False, 'from graph4nlp.pytorch.datasets.mathqa import MathQADatasetForTree\n'), ((14071, 14111), 'evaluation.convert_to_string', 'convert_to_string', (['reference', 'eval_vocab'], {}), '(reference, eval_vocab)\n', (14088, 14111), False, 'from evaluation import convert_to_string, compute_tree_accuracy\n'), ((14152, 14192), 'evaluation.convert_to_string', 'convert_to_string', (['candidate', 'eval_vocab'], {}), '(candidate, eval_vocab)\n', (14169, 14192), False, 'from evaluation import convert_to_string, compute_tree_accuracy\n'), ((14594, 14604), 'config.get_args', 'get_args', ([], {}), '()\n', (14602, 14604), False, 'from config import get_args\n'), ((3063, 3673), 'graph4nlp.pytorch.datasets.mathqa.MathQADatasetForTree', 'MathQADatasetForTree', ([], {'root_dir': 'self.data_dir', 'topology_builder': 'ConstituencyBasedGraphConstruction', 'topology_subdir': 'topology_subdir', 'edge_strategy': "self.opt['graph_construction_args']['graph_construction_private'][\n 'edge_strategy']", 'graph_type': '"""static"""', 'share_vocab': 'self.use_share_vocab', 'enc_emb_size': 'enc_emb_size', 'dec_emb_size': 'tgt_emb_size', 'min_word_vocab_freq': "self.opt['min_freq']", 'pretrained_word_emb_name': "self.opt['pretrained_word_emb_name']", 'pretrained_word_emb_url': "self.opt['pretrained_word_emb_url']", 'pretrained_word_emb_cache_dir': "self.opt['pretrained_word_emb_cache_dir']"}), "(root_dir=self.data_dir, topology_builder=\n ConstituencyBasedGraphConstruction, topology_subdir=topology_subdir,\n edge_strategy=self.opt['graph_construction_args'][\n 'graph_construction_private']['edge_strategy'], graph_type='static',\n share_vocab=self.use_share_vocab, enc_emb_size=enc_emb_size,\n dec_emb_size=tgt_emb_size, min_word_vocab_freq=self.opt['min_freq'],\n pretrained_word_emb_name=self.opt['pretrained_word_emb_name'],\n pretrained_word_emb_url=self.opt['pretrained_word_emb_url'],\n pretrained_word_emb_cache_dir=self.opt['pretrained_word_emb_cache_dir'])\n", (3083, 3673), False, 'from graph4nlp.pytorch.datasets.mathqa import MathQADatasetForTree\n'), ((9345, 9389), 'torch.tensor', 'torch.tensor', (['token_matrix'], {'dtype': 'torch.long'}), '(token_matrix, dtype=torch.long)\n', (9357, 9389), False, 'import torch\n'), ((4197, 4876), 'graph4nlp.pytorch.datasets.mathqa.MathQADatasetForTree', 'MathQADatasetForTree', ([], {'root_dir': 'self.data_dir', 'word_emb_size': 'enc_emb_size', 'topology_builder': 'NodeEmbeddingBasedGraphConstruction', 'topology_subdir': 'topology_subdir', 'graph_type': '"""dynamic"""', 'dynamic_graph_type': 'graph_type', 'edge_strategy': "self.opt['graph_construction_args']['graph_construction_private'][\n 'edge_strategy']", 'share_vocab': 'self.use_share_vocab', 'enc_emb_size': 'enc_emb_size', 'dec_emb_size': 'tgt_emb_size', 'min_word_vocab_freq': "self.opt['min_freq']", 'pretrained_word_emb_name': "self.opt['pretrained_word_emb_name']", 'pretrained_word_emb_url': "self.opt['pretrained_word_emb_url']", 'pretrained_word_emb_cache_dir': "self.opt['pretrained_word_emb_cache_dir']"}), "(root_dir=self.data_dir, word_emb_size=enc_emb_size,\n topology_builder=NodeEmbeddingBasedGraphConstruction, topology_subdir=\n topology_subdir, graph_type='dynamic', dynamic_graph_type=graph_type,\n edge_strategy=self.opt['graph_construction_args'][\n 'graph_construction_private']['edge_strategy'], share_vocab=self.\n use_share_vocab, enc_emb_size=enc_emb_size, dec_emb_size=tgt_emb_size,\n min_word_vocab_freq=self.opt['min_freq'], pretrained_word_emb_name=self\n .opt['pretrained_word_emb_name'], pretrained_word_emb_url=self.opt[\n 'pretrained_word_emb_url'], pretrained_word_emb_cache_dir=self.opt[\n 'pretrained_word_emb_cache_dir'])\n", (4217, 4876), False, 'from graph4nlp.pytorch.datasets.mathqa import MathQADatasetForTree\n'), ((6148, 6794), 'graph4nlp.pytorch.datasets.mathqa.MathQADatasetForTree', 'MathQADatasetForTree', ([], {'root_dir': 'self.data_dir', 'word_emb_size': 'enc_emb_size', 'topology_builder': 'NodeEmbeddingBasedRefinedGraphConstruction', 'topology_subdir': 'topology_subdir', 'graph_type': '"""dynamic"""', 'dynamic_graph_type': 'graph_type', 'share_vocab': 'self.use_share_vocab', 'enc_emb_size': 'enc_emb_size', 'dec_emb_size': 'tgt_emb_size', 'dynamic_init_topology_builder': 'dynamic_init_topology_builder', 'min_word_vocab_freq': "self.opt['min_freq']", 'pretrained_word_emb_name': "self.opt['pretrained_word_emb_name']", 'pretrained_word_emb_url': "self.opt['pretrained_word_emb_url']", 'pretrained_word_emb_cache_dir': "self.opt['pretrained_word_emb_cache_dir']"}), "(root_dir=self.data_dir, word_emb_size=enc_emb_size,\n topology_builder=NodeEmbeddingBasedRefinedGraphConstruction,\n topology_subdir=topology_subdir, graph_type='dynamic',\n dynamic_graph_type=graph_type, share_vocab=self.use_share_vocab,\n enc_emb_size=enc_emb_size, dec_emb_size=tgt_emb_size,\n dynamic_init_topology_builder=dynamic_init_topology_builder,\n min_word_vocab_freq=self.opt['min_freq'], pretrained_word_emb_name=self\n .opt['pretrained_word_emb_name'], pretrained_word_emb_url=self.opt[\n 'pretrained_word_emb_url'], pretrained_word_emb_cache_dir=self.opt[\n 'pretrained_word_emb_cache_dir'])\n", (6168, 6794), False, 'from graph4nlp.pytorch.datasets.mathqa import MathQADatasetForTree\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2019 by University of Kassel, T<NAME>, RWTH Aachen University and Fraunhofer
# Institute for Energy Economics and Energy System Technology (IEE) Kassel and individual
# contributors (see AUTHORS file for details). All rights reserved.
import numpy as np
import pandas as pd
import datetime as dt
from packaging import version
from pandapower import compare_arrays
try:
import pplog as logging
except ImportError:
import logging
logger = logging.getLogger(__name__)
__author__ = 'smeinecke'
def ensure_iterability(var, len_=None):
""" This function ensures iterability of a variable (and optional length). """
if hasattr(var, "__iter__") and not isinstance(var, str):
if isinstance(len_, int) and len(var) != len_:
raise ValueError("Length of variable differs from %i." % len_)
else:
len_ = len_ or 1
var = [var]*len_
return var
def find_idx_by_name(df, column, name):
idx = df.index[df[column] == name]
if len(idx) == 0:
raise UserWarning("In column '%s', there is no element named %s" % (column, name))
if len(idx) > 1:
raise UserWarning("In column '%s', multiple elements are named %s" % (column, name))
return idx[0]
def idx_in_2nd_array(arr1, arr2, match=True):
""" This function returns an array of indices of arr1 matching arr2.
arr1 may include duplicates. If an item of arr1 misses in arr2, 'match' decides whether
the idx of the nearest value is returned (False) or an error is raised (True).
"""
if match:
missings = list(set(arr1) - set(arr2))
if len(missings):
raise ValueError("These values misses in arr2: " + str(missings))
arr1_, uni_inverse = np.unique(arr1, return_inverse=True)
sort_lookup = np.argsort(arr2)
arr2_ = np.sort(arr2)
idx = np.searchsorted(arr2_, arr1_)
res = sort_lookup[idx][uni_inverse]
return res
def column_indices(df, query_cols):
""" returns an numpy array with the indices of the columns requested by 'query_cols'.
Works propperly for string column names. """
cols = df.columns.values
sidx = np.argsort(cols)
return sidx[np.searchsorted(cols, query_cols, sorter=sidx)]
def merge_dataframes(dfs, keep="first", sort_index=True, sort_column=True, column_to_sort=None,
index_time_str=None, **kwargs):
"""
This is a wrapper function of pandas.concat(dfs, axis=0) to merge DataFrames.
INPUT:
**dfs** (DataFrames) - a sequence or mapping of DataFrames
OPTIONAL:
**keep** (str, "first") - Flag to decide which data are kept in case of duplicated
indices - first, last or all duplicated data.
**sort_index** (bool, True) - If True, the indices of the returning DataFrame will be
sorted. If False, the indices and columns will be in order of the original DataFrames.
**sort_column** (bool, True) - If True, the indices of the returning DataFrame will be
sorted. If False, the indices and columns will be in order of the original DataFrames.
**column_to_sort** (-, None) - If given, 'column_to_sort' must be a column name occuring in
both DataFrames. The returning DataFrame will be sorted by this column. The input
indices get lost.
**index_time_str** (str, None) - If given, the indices or the 'column_to_sort' if given will
be sorted in datetime order.
****kwargs** - Keyword arguments for pandas.concat() except axis, such as sort, join,
join_axes, ignore_index, keys. 'sort' can overwrite 'sort_index' and 'sort_column'.
"""
if "axis" in kwargs:
if kwargs["axis"] != 0:
logger.warning("'axis' is always assumed as zero.")
kwargs.pop("axis")
if "sort" in kwargs:
if not kwargs["sort"] == sort_index == sort_column:
sort_index = kwargs["sort"]
sort_column = kwargs["sort"]
if not sort_index or not sort_column:
logger.warning("'sort' overwrites 'sort_index' and 'sort_column'.")
kwargs.pop("sort")
# --- set index_column as index
if column_to_sort is not None:
if any([column_to_sort not in df.columns for df in dfs]):
raise KeyError("column_to_sort '%s' must be a column of " % column_to_sort +
"both dataframes, df1 and df2")
if not sort_index:
logger.warning("Since 'column_to_sort' is given, the returning DataFrame will be" +
"sorted by this column as well as the columns, although 'sort' " +
"was given as False.")
sort_index = True
dfs = [df.set_index(column_to_sort) for df in dfs]
# --- concat
df = pd.concat(dfs, axis=0, **kwargs)
# --- unsorted index and columns
output_index = df.index.drop_duplicates()
# --- drop rows with duplicated indices
if keep == "first":
df = df.groupby(df.index).first()
elif keep == "last":
df = df.groupby(df.index).last()
elif keep != "all":
raise ValueError("This value %s is unknown to 'keep'" % keep)
# --- sorted index and reindex columns
if sort_index:
if index_time_str:
dates = [dt.datetime.strptime(ts, index_time_str) for ts in df.index]
dates.sort()
output_index = [dt.datetime.strftime(ts, index_time_str) for ts in dates]
if keep == "all":
logger.warning("If 'index_time_str' is not None, keep cannot be 'all' but are " +
"assumed as 'first'.")
else:
output_index = sorted(df.index)
# --- reindex as required
if keep != "all":
if version.parse(pd.__version__) >= version.parse("0.21.0"):
df = df.reindex(output_index)
else:
df = df.reindex_axis(output_index)
if sort_column:
if version.parse(pd.__version__) >= version.parse("0.21.0"):
df = df.reindex(columns=sorted(df.columns))
else:
df = df.reindex_axis(sorted(df.columns), axis=1)
# --- get back column_to_sort as column from index
if column_to_sort is not None:
df.reset_index(inplace=True)
return df
def get_unique_duplicated_dict(df, subset=None, only_dupl_entries=False):
""" Returns a dict which keys are the indices of unique row of the dataframe 'df'. The values
of the dict are the indices which are duplicated to each key index.
This is a wrapper function of _get_unique_duplicated_dict() to consider only_dupl_entries.
"""
is_dupl = df.duplicated(subset=subset, keep=False)
uniq_dupl_dict = _get_unique_duplicated_dict(df[is_dupl], subset)
if not only_dupl_entries:
others = df.index[~is_dupl]
uniq_empties = {o: [] for o in others}
# python 3.5+
# uniq_dupl_dict = {**uniq_dupl_dict, **uniq_empties}
# python 3.4
for k, v in uniq_empties.items():
uniq_dupl_dict[k] = v
return uniq_dupl_dict
def _get_unique_duplicated_dict(df, subset=None):
""" Returns a dict which keys are the indices of unique row of the dataframe 'df'. The values
of the dict are the indices which are duplicated to each key index. """
subset = subset or df.columns
dupl = df.index[df.duplicated(subset=subset)]
uniq = df.index[~df.duplicated(subset=subset)]
uniq_dupl_dict = {}
# nan_str only needed since compare_arrays() using old numpy versions connected to python 3.4
# don't detect reliably nans as equal
nan_str = "nan"
while nan_str in df.values:
nan_str += "n"
for uni in uniq:
do_dupl_fit = compare_arrays(
np.repeat(df.loc[uni, subset].fillna(nan_str).values.reshape(1, -1), len(dupl), axis=0),
df.loc[dupl, subset].fillna(nan_str).values).all(axis=1)
uniq_dupl_dict[uni] = list(dupl[do_dupl_fit])
return uniq_dupl_dict
def reindex_dict_dataframes(dataframes_dict):
""" Set new continuous index starting at zero for every DataFrame in the dict. """
for key in dataframes_dict.keys():
if isinstance(dataframes_dict[key], pd.DataFrame) and key != "StudyCases":
dataframes_dict[key].index = list(range(dataframes_dict[key].shape[0]))
def ensure_full_column_data_existence(dict_, tablename, column):
"""
Ensures that the column of a dict's DataFrame is fully filled with information. If there are
missing data, it will be filled up by name tablename+index
"""
missing_data = dict_[tablename].index[dict_[tablename][column].isnull()]
# fill missing data by tablename+index, e.g. "Bus 2"
dict_[tablename][column].loc[missing_data] = [tablename + ' %s' % n for n in (
missing_data.values + 1)]
return dict_[tablename]
def avoid_duplicates_in_column(dict_, tablename, column):
""" Avoids duplicates in given column (as type string) of a dict's DataFrame """
query = dict_[tablename][column].duplicated(keep=False)
for double in dict_[tablename][column].loc[query].unique():
idx = dict_[tablename][column].index[dict_[tablename][column] == double]
dict_[tablename][column].loc[idx] = [double + " (%i)" % i for i in range(len(idx))]
if sum(dict_[tablename][column].duplicated()):
raise ValueError("The renaming by 'double + int' was not appropriate to remove all " +
"duplicates.")
def append_str_by_underline_count(str_series, append_only_duplicates=False, counting_start=1,
reserved_strings=None):
"""
Returns a Series of appended strings and a set of all strings which were appended or are set as
reserved by input.
INPUT:
**str_series** (Series with string values) - strings to be appended by "_" + a number
OPTIONAL:
**append_only_duplicates** (bool, False) - If True, all strings will be appended. If False,
only duplicated strings will be appended.
**counting_start** (int, 1) - Integer to start appending with
**reserved_strings** (iterable, None) - strings which are not allowed in str_series and must
be appended.
OUTPUT:
**appended_strings** (Series with string values) - appended strings
**reserved_strings** (set) - all reserved_strings from input and all strings which were
appended
"""
# --- initalizations
# ensure only unique values in reserved_strings:
reserved_strings = pd.Series(sorted(set(reserved_strings))) if reserved_strings is not None \
else pd.Series()
count = counting_start
# --- do first append
# concatenate reserved_strings and str_series (which should be appended by "_%i")
# must be in this order (first reserved_strings) to append only the str_series (keep='first')
if not append_only_duplicates:
series = str_series + "_%i" % count
series = pd.concat([reserved_strings, series], ignore_index=True)
all_dupl = pd.Series([True]*len(series))
else:
series = pd.concat([reserved_strings, str_series], ignore_index=True)
all_dupl = pd.Series([True]*len(reserved_strings)+[False]*len(str_series))
dupl = series.duplicated()
all_dupl |= dupl
series.loc[dupl] += "_%i" % count
dupl = series.duplicated()
all_dupl |= dupl
# --- append as much as necessary -> while loop
while sum(dupl):
series.loc[dupl] = series[dupl].str.replace("_%i" % count, "_%i" % (count+1))
dupl = series.duplicated()
all_dupl |= dupl
count += 1
# --- output adaptations
appended_strings = series.iloc[len(reserved_strings):]
appended_strings.index = str_series.index
reserved_strings = set(series[all_dupl])
return appended_strings, reserved_strings
| [
"logging.getLogger",
"pandas.Series",
"numpy.unique",
"numpy.searchsorted",
"datetime.datetime.strptime",
"numpy.sort",
"numpy.argsort",
"packaging.version.parse",
"datetime.datetime.strftime",
"pandas.concat"
] | [((489, 516), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (506, 516), False, 'import logging\n'), ((1763, 1799), 'numpy.unique', 'np.unique', (['arr1'], {'return_inverse': '(True)'}), '(arr1, return_inverse=True)\n', (1772, 1799), True, 'import numpy as np\n'), ((1818, 1834), 'numpy.argsort', 'np.argsort', (['arr2'], {}), '(arr2)\n', (1828, 1834), True, 'import numpy as np\n'), ((1847, 1860), 'numpy.sort', 'np.sort', (['arr2'], {}), '(arr2)\n', (1854, 1860), True, 'import numpy as np\n'), ((1871, 1900), 'numpy.searchsorted', 'np.searchsorted', (['arr2_', 'arr1_'], {}), '(arr2_, arr1_)\n', (1886, 1900), True, 'import numpy as np\n'), ((2177, 2193), 'numpy.argsort', 'np.argsort', (['cols'], {}), '(cols)\n', (2187, 2193), True, 'import numpy as np\n'), ((4814, 4846), 'pandas.concat', 'pd.concat', (['dfs'], {'axis': '(0)'}), '(dfs, axis=0, **kwargs)\n', (4823, 4846), True, 'import pandas as pd\n'), ((2210, 2256), 'numpy.searchsorted', 'np.searchsorted', (['cols', 'query_cols'], {'sorter': 'sidx'}), '(cols, query_cols, sorter=sidx)\n', (2225, 2256), True, 'import numpy as np\n'), ((10678, 10689), 'pandas.Series', 'pd.Series', ([], {}), '()\n', (10687, 10689), True, 'import pandas as pd\n'), ((11024, 11080), 'pandas.concat', 'pd.concat', (['[reserved_strings, series]'], {'ignore_index': '(True)'}), '([reserved_strings, series], ignore_index=True)\n', (11033, 11080), True, 'import pandas as pd\n'), ((11157, 11217), 'pandas.concat', 'pd.concat', (['[reserved_strings, str_series]'], {'ignore_index': '(True)'}), '([reserved_strings, str_series], ignore_index=True)\n', (11166, 11217), True, 'import pandas as pd\n'), ((5789, 5818), 'packaging.version.parse', 'version.parse', (['pd.__version__'], {}), '(pd.__version__)\n', (5802, 5818), False, 'from packaging import version\n'), ((5822, 5845), 'packaging.version.parse', 'version.parse', (['"""0.21.0"""'], {}), "('0.21.0')\n", (5835, 5845), False, 'from packaging import version\n'), ((5981, 6010), 'packaging.version.parse', 'version.parse', (['pd.__version__'], {}), '(pd.__version__)\n', (5994, 6010), False, 'from packaging import version\n'), ((6014, 6037), 'packaging.version.parse', 'version.parse', (['"""0.21.0"""'], {}), "('0.21.0')\n", (6027, 6037), False, 'from packaging import version\n'), ((5313, 5353), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['ts', 'index_time_str'], {}), '(ts, index_time_str)\n', (5333, 5353), True, 'import datetime as dt\n'), ((5427, 5467), 'datetime.datetime.strftime', 'dt.datetime.strftime', (['ts', 'index_time_str'], {}), '(ts, index_time_str)\n', (5447, 5467), True, 'import datetime as dt\n')] |
from typing import Any, Dict, List, Optional, Sequence, Set, Union
import numpy
from . import _vroom
from .amount import Amount
from .location import Location, LocationCoordinates, LocationIndex
from .time_window import TimeWindow
class JobBaseclass:
"""Baseclass for all Job classes containing common attributes."""
_id: int
_location: Location
_setup: int
_service: int
_time_windows: Sequence[TimeWindow]
_description: str
def _get_attributes(self) -> Dict[str, Any]:
"""Arguments to be used in repr view."""
return {
"id": self.id,
"location": self.location,
"setup": self.setup,
"service": self.service,
"time_windows": self.time_windows,
"description": self.description,
}
@property
def description(self) -> str:
return self._description
@property
def id(self) -> int:
return self._id
@property
def location(self) -> Location:
"""
The location where to go.
Either by index (used with duration matrix) or
by coordinate (used with map server).
"""
return Location(self._location)
@property
def service(self) -> int:
return self._service
@property
def setup(self) -> int:
return self._setup
@property
def time_windows(self) -> List[TimeWindow]:
"""Time window for when job can be delivered."""
return [TimeWindow(tw) for tw in self._time_windows]
def __repr__(self) -> str:
attributes = self._get_attributes()
args = [f"{self.id}"]
if isinstance(attributes["location"], LocationIndex):
args.append(f"{self.location.index}")
elif isinstance(attributes["location"], LocationCoordinates):
args.append(f"{self.location.coords}")
else:
args.append(f"{self.location}")
if attributes["setup"]:
args.append(f"setup={attributes['setup']}")
if attributes["service"]:
args.append(f"service={attributes['service']}")
if attributes.get("amount", False):
args.append(f"amount={numpy.asarray(attributes['amount']).tolist()}")
if attributes.get("delivery", False):
args.append(f"delivery={numpy.asarray(attributes['delivery']).tolist()}")
if attributes.get("pickup", False):
args.append(f"pickup={numpy.asarray(attributes['pickup']).tolist()}")
if attributes["time_windows"] != [TimeWindow()]:
windows = [(tw.start, tw.end) for tw in attributes["time_windows"]]
args.append(f"time_windows={windows}")
if attributes["description"]:
args.append(f"description={attributes['description']!r}")
return f"vroom.{self.__class__.__name__}({', '.join(args)})"
class Job(_vroom.Job, JobBaseclass):
"""A regular one-stop job with both a deliver and pickup that has to be performed.
Args:
id:
Job identifier number. Two jobs can not have the same
identifier.
location:
Location of the job. If interger, value interpreted as an the
column in duration matrix. If pair of numbers, value
interpreted as longitude and latitude coordinates respectively.
setup:
The cost of preparing the vehicle before actually going out for
a job.
service:
The time (in secondes) it takes to pick up/deliver shipment
when at customer.
delivery:
An interger representation of how much is being carried to
customer.
pickup:
An interger representation of how much is being carried back
from customer.
skills:
Skills required to perform job. Only vehicles which satisfies
all required skills (i.e. has at minimum all skills values
required) are allowed to perform this job.
priority:
The job priority level, where 0 is the most
important and 100 is the least important.
time_windows:
Windows for where service is allowed to begin.
Defaults to have not restraints.
description:
Optional string descriping the job.
Examples:
>>> vroom.Job(0, [4., 5.], delivery=[4], pickup=[7])
vroom.Job(0, (4.0, 5.0), delivery=[4], pickup=[7])
"""
def __init__(
self,
id: int,
location: Union[Location, int, Sequence[float]],
setup: int = 0,
service: int = 0,
delivery: Amount = Amount(),
pickup: Amount = Amount(),
skills: Optional[Set[int]] = None,
priority: int = 0,
time_windows: Sequence[TimeWindow] = (),
description: str = "",
) -> None:
if not pickup:
if not delivery:
pickup = Amount([])
delivery = Amount([])
else:
pickup = Amount([0] * len(delivery))
elif not delivery:
delivery = Amount([0] * len(pickup))
_vroom.Job.__init__(
self,
id=int(id),
location=Location(location),
setup=int(setup),
service=int(service),
delivery=Amount(delivery),
pickup=Amount(pickup),
skills=set(skills or []),
priority=int(priority),
tws=[TimeWindow(tw) for tw in time_windows] or [TimeWindow()],
description=str(description),
)
@property
def delivery(self) -> Amount:
return Amount(self._delivery)
@property
def pickup(self) -> Amount:
return Amount(self._pickup)
@property
def skills(self) -> int:
return self._skills
@property
def priority(self) -> int:
return self._priority
def _get_attributes(self) -> Dict[str, Any]:
"""Arguments to be used in repr view."""
attributes = super()._get_attributes()
if self._pickup:
attributes["pickup"] = self.pickup
if self._delivery:
attributes["delivery"] = self.delivery
if self._skills:
attributes["skills"] = self.skills
if self._priority:
attributes["priority"] = self.priority
return attributes
class ShipmentStep(JobBaseclass):
"""A delivery job that has to be performed.
Args:
id:
Job identifier number. Two jobs can not have the same
identifier.
location:
Location of the job. If interger, value interpreted as an the
column in duration matrix. If pair of numbers, value
interpreted as longitude and latitude coordinates respectively.
setup:
The cost of preparing the vehicle before actually going out for
a job.
service:
The time (in secondes) it takes to pick up/deliver shipment
when at customer.
time_windows:
Windows for where service is allowed to begin.
Defaults to have not restraints.
description:
Optional string descriping the job.
Examples:
>>> vroom.ShipmentStep(0, [4., 5.])
vroom.ShipmentStep(0, (4.0, 5.0))
"""
def __init__(
self,
id: int,
location: Union[Location, int, Sequence[float]],
setup: int = 0,
service: int = 0,
time_windows: Sequence[TimeWindow] = (),
description: str = "",
) -> None:
self._id = int(id)
self._location = Location(location)
self._setup = int(setup)
self._service = int(service)
self._time_windows = [TimeWindow(tw) for tw in time_windows] or [TimeWindow()]
self._description = str(description)
class Shipment:
"""A shipment that has to be performed.
Args:
pickup:
Description of the pickup part of the shipment.
delivery:
Description of the delivery part of the shipment.
amount:
An interger representation of how much is being carried back
from customer.
skills:
Skills required to perform job. Only vehicles which satisfies
all required skills (i.e. has at minimum all skills values
required) are allowed to perform this job.
priority:
The job priority level, where 0 is the most
important and 100 is the least important.
Examples:
>>> pickup = vroom.ShipmentStep(0, [4., 5.])
>>> delivery = vroom.ShipmentStep(1, [5., 4.])
>>> vroom.Shipment(pickup, delivery, amount=[7]) # doctest: +NORMALIZE_WHITESPACE
vroom.Shipment(vroom.ShipmentStep(0, (4.0, 5.0)),
vroom.ShipmentStep(1, (5.0, 4.0)),
amount=[7])
"""
def __init__(
self,
pickup: ShipmentStep,
delivery: ShipmentStep,
amount: Amount = Amount(),
skills: Optional[Set[int]] = None,
priority: int = 0,
) -> None:
self.pickup = pickup
self.delivery = delivery
self.amount = Amount(amount)
self.skills = skills or set()
self.priority = int(priority)
def __repr__(self) -> str:
args = [str(self.pickup), str(self.delivery)]
if self.amount:
args.append(f"amount={numpy.asarray(self.amount).tolist()}")
if self.skills:
args.append(f"skills={self.skills}")
if self.priority:
args.append(f"priority={self.priority}")
return f"vroom.{self.__class__.__name__}({', '.join(args)})"
| [
"numpy.asarray"
] | [((2193, 2228), 'numpy.asarray', 'numpy.asarray', (["attributes['amount']"], {}), "(attributes['amount'])\n", (2206, 2228), False, 'import numpy\n'), ((2323, 2360), 'numpy.asarray', 'numpy.asarray', (["attributes['delivery']"], {}), "(attributes['delivery'])\n", (2336, 2360), False, 'import numpy\n'), ((2451, 2486), 'numpy.asarray', 'numpy.asarray', (["attributes['pickup']"], {}), "(attributes['pickup'])\n", (2464, 2486), False, 'import numpy\n'), ((9466, 9492), 'numpy.asarray', 'numpy.asarray', (['self.amount'], {}), '(self.amount)\n', (9479, 9492), False, 'import numpy\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tarfile
import os
import pickle as pkl
import numpy as np
import skimage
import skimage.io
import skimage.transform
from tensorflow import keras
#from tensorflow.examples.tutorials.mnist import input_data
(X_train,y_train),(X_test,y_test) = keras.datasets.mnist.load_data()
#mnist = input_data.read_data_sets('./dataset/mnist')
BST_PATH = os.path.abspath('./dataset/BSR_bsds500.tgz')
rand = np.random.RandomState(42)
f = tarfile.open(BST_PATH)
train_files = []
for name in f.getnames():
if name.startswith('BSR/BSDS500/data/images/train/'):
train_files.append(name)
print('Loading BSR training images')
background_data = []
for name in train_files:
try:
fp = f.extractfile(name)
bg_img = skimage.io.imread(fp)
background_data.append(bg_img)
except:
continue
def compose_image(digit, background):
"""Difference-blend a digit and a random patch from a background image."""
w, h, _ = background.shape
dw, dh, _ = digit.shape
x = np.random.randint(0, w - dw)
y = np.random.randint(0, h - dh)
bg = background[x:x+dw, y:y+dh]
return np.abs(bg - digit).astype(np.uint8)
def mnist_to_img(x):
"""Binarize MNIST digit and convert to RGB."""
x = (x > 0).astype(np.float32)
d = x.reshape([28, 28, 1]) * 255
return np.concatenate([d, d, d], 2)
def create_mnistm(X):
"""
Give an array of MNIST digits, blend random background patches to
build the MNIST-M dataset as described in
http://jmlr.org/papers/volume17/15-239/15-239.pdf
"""
X_ = np.zeros([X.shape[0], 28, 28, 3], np.uint8)
for i in range(X.shape[0]):
if i % 1000 == 0:
print('Processing example', i)
bg_img = rand.choice(background_data)
d = mnist_to_img(X[i])
d = compose_image(d, bg_img)
X_[i] = d
return X_
print('Building train set...')
train = create_mnistm(X_train)
print('Building validation set...')
valid = create_mnistm(X_test)
# Save dataset as pickle
mnistm_dir = os.path.abspath("./dataset/mnistm")
if not os.path.exists(mnistm_dir):
os.mkdir(mnistm_dir)
with open(os.path.join(mnistm_dir,'mnistm_data.pkl'), 'wb') as f:
pkl.dump({ 'train': train, 'valid': valid }, f, pkl.HIGHEST_PROTOCOL)
| [
"os.path.exists",
"numpy.abs",
"tarfile.open",
"pickle.dump",
"tensorflow.keras.datasets.mnist.load_data",
"os.path.join",
"numpy.random.randint",
"numpy.zeros",
"skimage.io.imread",
"os.mkdir",
"numpy.concatenate",
"os.path.abspath",
"numpy.random.RandomState"
] | [((358, 390), 'tensorflow.keras.datasets.mnist.load_data', 'keras.datasets.mnist.load_data', ([], {}), '()\n', (388, 390), False, 'from tensorflow import keras\n'), ((457, 501), 'os.path.abspath', 'os.path.abspath', (['"""./dataset/BSR_bsds500.tgz"""'], {}), "('./dataset/BSR_bsds500.tgz')\n", (472, 501), False, 'import os\n'), ((510, 535), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (531, 535), True, 'import numpy as np\n'), ((541, 563), 'tarfile.open', 'tarfile.open', (['BST_PATH'], {}), '(BST_PATH)\n', (553, 563), False, 'import tarfile\n'), ((2140, 2175), 'os.path.abspath', 'os.path.abspath', (['"""./dataset/mnistm"""'], {}), "('./dataset/mnistm')\n", (2155, 2175), False, 'import os\n'), ((1117, 1145), 'numpy.random.randint', 'np.random.randint', (['(0)', '(w - dw)'], {}), '(0, w - dw)\n', (1134, 1145), True, 'import numpy as np\n'), ((1154, 1182), 'numpy.random.randint', 'np.random.randint', (['(0)', '(h - dh)'], {}), '(0, h - dh)\n', (1171, 1182), True, 'import numpy as np\n'), ((1428, 1456), 'numpy.concatenate', 'np.concatenate', (['[d, d, d]', '(2)'], {}), '([d, d, d], 2)\n', (1442, 1456), True, 'import numpy as np\n'), ((1676, 1719), 'numpy.zeros', 'np.zeros', (['[X.shape[0], 28, 28, 3]', 'np.uint8'], {}), '([X.shape[0], 28, 28, 3], np.uint8)\n', (1684, 1719), True, 'import numpy as np\n'), ((2183, 2209), 'os.path.exists', 'os.path.exists', (['mnistm_dir'], {}), '(mnistm_dir)\n', (2197, 2209), False, 'import os\n'), ((2215, 2235), 'os.mkdir', 'os.mkdir', (['mnistm_dir'], {}), '(mnistm_dir)\n', (2223, 2235), False, 'import os\n'), ((2306, 2373), 'pickle.dump', 'pkl.dump', (["{'train': train, 'valid': valid}", 'f', 'pkl.HIGHEST_PROTOCOL'], {}), "({'train': train, 'valid': valid}, f, pkl.HIGHEST_PROTOCOL)\n", (2314, 2373), True, 'import pickle as pkl\n'), ((841, 862), 'skimage.io.imread', 'skimage.io.imread', (['fp'], {}), '(fp)\n', (858, 862), False, 'import skimage\n'), ((2246, 2289), 'os.path.join', 'os.path.join', (['mnistm_dir', '"""mnistm_data.pkl"""'], {}), "(mnistm_dir, 'mnistm_data.pkl')\n", (2258, 2289), False, 'import os\n'), ((1235, 1253), 'numpy.abs', 'np.abs', (['(bg - digit)'], {}), '(bg - digit)\n', (1241, 1253), True, 'import numpy as np\n')] |
"""
Simple Baseline Model for AV-Sync. Organinzed in PyTorch Lightning
Flatten both audio and video features; concat them and feed into sequential linear layers.
"""
import numpy as np
import torch
import torch.nn as nn
import pytorch_lightning as pl
class PrintSize(nn.Module):
def __init__(self):
super(PrintSize, self).__init__()
def forward(self, x):
print(x.shape)
return x
class NNBaseline(pl.LightningModule):
def __init__(self, configs):
super().__init__()
self._init_configs(configs)
self._init_model()
self.save_hyperparameters()
def _init_configs(self, configs):
self.dataset_name = configs.dataset.name
self.backbone = configs.training.backbone
self.ways = configs.dataset.ways
self.hidden_size = configs.training.hidden_dim
self.lr = configs.training.lr
self.momentum = configs.training.momentum
self.optimizer_type = configs.training.optimizer_type
self.criterion = nn.CrossEntropyLoss()
self.train_acc = pl.metrics.Accuracy()
self.valid_acc = pl.metrics.Accuracy()
self.test_acc = pl.metrics.Accuracy()
def _init_model(self):
if self.backbone == "default":
if self.dataset_name == "mini-imagenet":
self.model = l2l.vision.models.MiniImagenetCNN(self.ways)
else:
raise NotImplementedError
else:
raise NotImplementedError
def forward(self, data):
return self.model(data)
def configure_optimizers(self):
if self.optimizer_type == "Adam":
return torch.optim.Adam(self.parameters(), lr = self.lr)
elif self.optimizer_type == "AdamW":
return torch.optim.AdamW(self.parameters(), lr = self.lr)
elif self.optimizer_type == "SGD":
return torch.optim.SGD(self.parameters(), lr = self.lr, momentum = self.momentum)
def training_step(self, batch, batch_idx):
data, labels = batch
prob = self(data)
loss = self.criterion(prob, labels) # need to be named loss?
predictions = torch.argmax(prob, axis = -1)
self.log("loss", loss, on_epoch = True)
self.log("train_acc", self.train_acc(predictions, labels), prog_bar = True, on_epoch = True)
return loss
def validation_step(self, batch, batch_idx):
data, labels = batch
prob = self(data)
loss = self.criterion(prob, labels) # need to be named loss?
predictions = torch.argmax(prob, axis = -1)
self.log("val_loss", loss, on_epoch = True, prog_bar = True)
self.log("val_acc", self.valid_acc(predictions, labels), on_epoch = True, prog_bar = True)
def test_step(self, batch, batch_idx):
data, labels = batch
prob = self(data)
test_loss = self.criterion(prob, labels) # need to be named loss?
predictions = torch.argmax(prob, axis = -1)
self.log("test_loss", test_loss, on_epoch = True, prog_bar = True)
self.log("test_acc", self.test_acc(predictions, labels), on_epoch = True, prog_bar = True)
return test_loss
def _sample_data(self, X, y):
indices = []
for i in range(ways):
indices.append(np.random.choice(self.shots*2,self.shots, replace = False) + 2 * i * self.shots)
mask_indices = np.hstack(indices)
mask = np.zeros(X.size(0), dtype=bool)
mask[mask_indices] = True
X_oracle = X[mask]
y_oracle = y[mask]
X_pseudo = X[~mask]
y_pseudo = y[~mask]
return X_oracle, y_oracle, X_pseudo, y_pseudo, mask
class LitBaseline(pl.LightningModule):
def __init__(self, video_size, audio_size, configs):
super().__init__()
self._init_configs(configs)
self._init_model(video_size, audio_size, self.hidden_size)
self.save_hyperparameters()
def _init_configs(self, configs):
self.hidden_size = configs.training.hidden_dim
self.lr = configs.training.lr
self.momentum = configs.training.momentum
self.optimizer_type = configs.training.optimizer_type
self.criterion = nn.CrossEntropyLoss()
self.train_acc = pl.metrics.Accuracy()
self.valid_acc = pl.metrics.Accuracy()
self.test_acc = pl.metrics.Accuracy()
self.test_predictions = []
self.test_shifts = []
self.test_labels = []
pass
def _init_model(self, video_size, audio_size, hidden_size = 128):
"""
Private function for initializing the model architecture
Params:
video_size: iterable
the shape of the video representaiton matrix
audio_size: iterable
the shape of the audio representation matrix
hidden_size: int, optional
"""
self.video_stream = nn.Sequential(
nn.Flatten(),
nn.Linear(np.product(video_size), hidden_size)
)
self.audio_stream = nn.Sequential(
nn.Flatten(),
nn.Linear(np.product(audio_size), hidden_size)
)
self.fc = nn.Sequential(
nn.Linear(hidden_size*2, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, hidden_size//2),
nn.ReLU(),
nn.Dropout(p=0.5),
nn.Linear(hidden_size//2, 2))
self.relu = nn.ReLU()
def forward(self, video, audio):
v_out = self.video_stream(video)
a_out = self.audio_stream(audio)
cat_out = torch.cat((v_out, a_out), 1)
out = self.fc(cat_out)
return out
def configure_optimizers(self):
if self.optimizer_type == "Adam":
return torch.optim.Adam(self.parameters(), lr = self.lr)
elif self.optimizer_type == "AdamW":
return torch.optim.AdamW(self.parameters(), lr = self.lr)
elif self.optimizer_type == "SGD":
return torch.optim.SGD(self.parameters(), lr = self.lr, momentum = self.momentum)
def training_step(self, batch, batch_idx):
video, audio, labels, shifts = batch
prob = self(video, audio)
loss = self.criterion(prob, labels) # need to be named loss?
predictions = torch.argmax(prob, axis = -1)
self.log("loss", loss, on_epoch = True)
self.log("train_acc", self.train_acc(predictions, labels), prog_bar = True, on_epoch = True)
return loss
# def training_step_end(self, outputs):
# self.train_acc(outputs['preds'], outputs['target'])
# self.log('train_acc', self.train_acc, on_step=True, prog_bar = True)
# self.log("loss", outputs['loss'], on_step=True, prog_bar = True)
# def training_epoch_end(self, outs):
# # log epoch metric
# self.log('train_acc_epoch', self.train_acc.compute(), prog_bar = True)
def validation_step(self, batch, batch_idx):
video, audio, labels, shifts = batch
prob = self(video, audio)
loss = self.criterion(prob, labels) # need to be named loss?
predictions = torch.argmax(prob, axis = -1)
self.log("val_loss", loss, on_epoch = True, prog_bar = True)
self.log("val_acc", self.valid_acc(predictions, labels), on_epoch = True, prog_bar = True)
# return val_loss, val_acc
# def validation_epoch_end(self, outs):
# self.log('val_acc_epoch', self.valid_acc.compute(), prog_bar = True)
def test_step(self, batch, batch_idx):
video, audio, labels, shifts = batch
prob = self(video, audio)
test_loss = self.criterion(prob, labels) # need to be named loss?
predictions = torch.argmax(prob, axis = -1)
self.log("test_loss", test_loss, on_epoch = True, prog_bar = True)
self.log("test_acc", self.test_acc(predictions, labels), on_epoch = True, prog_bar = True)
self.test_predictions.append(predictions)
self.test_shifts.append(shifts)
self.test_labels.append(labels)
return test_loss
| [
"numpy.product",
"torch.nn.ReLU",
"torch.nn.Dropout",
"torch.nn.CrossEntropyLoss",
"numpy.hstack",
"numpy.random.choice",
"torch.nn.Flatten",
"pytorch_lightning.metrics.Accuracy",
"torch.nn.Linear",
"torch.cat",
"torch.argmax"
] | [((1025, 1046), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (1044, 1046), True, 'import torch.nn as nn\n'), ((1072, 1093), 'pytorch_lightning.metrics.Accuracy', 'pl.metrics.Accuracy', ([], {}), '()\n', (1091, 1093), True, 'import pytorch_lightning as pl\n'), ((1119, 1140), 'pytorch_lightning.metrics.Accuracy', 'pl.metrics.Accuracy', ([], {}), '()\n', (1138, 1140), True, 'import pytorch_lightning as pl\n'), ((1165, 1186), 'pytorch_lightning.metrics.Accuracy', 'pl.metrics.Accuracy', ([], {}), '()\n', (1184, 1186), True, 'import pytorch_lightning as pl\n'), ((2176, 2203), 'torch.argmax', 'torch.argmax', (['prob'], {'axis': '(-1)'}), '(prob, axis=-1)\n', (2188, 2203), False, 'import torch\n'), ((2575, 2602), 'torch.argmax', 'torch.argmax', (['prob'], {'axis': '(-1)'}), '(prob, axis=-1)\n', (2587, 2602), False, 'import torch\n'), ((2968, 2995), 'torch.argmax', 'torch.argmax', (['prob'], {'axis': '(-1)'}), '(prob, axis=-1)\n', (2980, 2995), False, 'import torch\n'), ((3415, 3433), 'numpy.hstack', 'np.hstack', (['indices'], {}), '(indices)\n', (3424, 3433), True, 'import numpy as np\n'), ((4223, 4244), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (4242, 4244), True, 'import torch.nn as nn\n'), ((4270, 4291), 'pytorch_lightning.metrics.Accuracy', 'pl.metrics.Accuracy', ([], {}), '()\n', (4289, 4291), True, 'import pytorch_lightning as pl\n'), ((4317, 4338), 'pytorch_lightning.metrics.Accuracy', 'pl.metrics.Accuracy', ([], {}), '()\n', (4336, 4338), True, 'import pytorch_lightning as pl\n'), ((4363, 4384), 'pytorch_lightning.metrics.Accuracy', 'pl.metrics.Accuracy', ([], {}), '()\n', (4382, 4384), True, 'import pytorch_lightning as pl\n'), ((5498, 5507), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (5505, 5507), True, 'import torch.nn as nn\n'), ((5657, 5685), 'torch.cat', 'torch.cat', (['(v_out, a_out)', '(1)'], {}), '((v_out, a_out), 1)\n', (5666, 5685), False, 'import torch\n'), ((6363, 6390), 'torch.argmax', 'torch.argmax', (['prob'], {'axis': '(-1)'}), '(prob, axis=-1)\n', (6375, 6390), False, 'import torch\n'), ((7200, 7227), 'torch.argmax', 'torch.argmax', (['prob'], {'axis': '(-1)'}), '(prob, axis=-1)\n', (7212, 7227), False, 'import torch\n'), ((7781, 7808), 'torch.argmax', 'torch.argmax', (['prob'], {'axis': '(-1)'}), '(prob, axis=-1)\n', (7793, 7808), False, 'import torch\n'), ((5000, 5012), 'torch.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (5010, 5012), True, 'import torch.nn as nn\n'), ((5139, 5151), 'torch.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (5149, 5151), True, 'import torch.nn as nn\n'), ((5268, 5307), 'torch.nn.Linear', 'nn.Linear', (['(hidden_size * 2)', 'hidden_size'], {}), '(hidden_size * 2, hidden_size)\n', (5277, 5307), True, 'import torch.nn as nn\n'), ((5319, 5328), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (5326, 5328), True, 'import torch.nn as nn\n'), ((5342, 5382), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', '(hidden_size // 2)'], {}), '(hidden_size, hidden_size // 2)\n', (5351, 5382), True, 'import torch.nn as nn\n'), ((5394, 5403), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (5401, 5403), True, 'import torch.nn as nn\n'), ((5417, 5434), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (5427, 5434), True, 'import torch.nn as nn\n'), ((5448, 5478), 'torch.nn.Linear', 'nn.Linear', (['(hidden_size // 2)', '(2)'], {}), '(hidden_size // 2, 2)\n', (5457, 5478), True, 'import torch.nn as nn\n'), ((5036, 5058), 'numpy.product', 'np.product', (['video_size'], {}), '(video_size)\n', (5046, 5058), True, 'import numpy as np\n'), ((5175, 5197), 'numpy.product', 'np.product', (['audio_size'], {}), '(audio_size)\n', (5185, 5197), True, 'import numpy as np\n'), ((3311, 3370), 'numpy.random.choice', 'np.random.choice', (['(self.shots * 2)', 'self.shots'], {'replace': '(False)'}), '(self.shots * 2, self.shots, replace=False)\n', (3327, 3370), True, 'import numpy as np\n')] |
"""
Author(s): <NAME>
See LICENCE.txt for licensing and contact information.
"""
__all__ = ['mstack', 'wget']
import ipdb
def mstack(vs, fs):
import chumpy as ch
import numpy as np
lengths = [v.shape[0] for v in vs]
f = np.vstack([fs[i]+np.sum(lengths[:i]).astype(np.uint32) for i in range(len(fs))])
v = ch.vstack(vs)
return v, f
def wget(url, dest_fname=None):
import urllib.request, urllib.error, urllib.parse
from os.path import split, join
curdir = split(__file__)[0]
if dest_fname is None:
dest_fname = join(curdir, split(url)[1])
try:
contents = urllib.request.urlopen(url).read()
except:
raise Exception('Unable to get url: %s' % (url,))
open(dest_fname, 'wb').write(contents)
| [
"numpy.sum",
"chumpy.vstack",
"os.path.split"
] | [((328, 341), 'chumpy.vstack', 'ch.vstack', (['vs'], {}), '(vs)\n', (337, 341), True, 'import chumpy as ch\n'), ((497, 512), 'os.path.split', 'split', (['__file__'], {}), '(__file__)\n', (502, 512), False, 'from os.path import split, join\n'), ((577, 587), 'os.path.split', 'split', (['url'], {}), '(url)\n', (582, 587), False, 'from os.path import split, join\n'), ((256, 275), 'numpy.sum', 'np.sum', (['lengths[:i]'], {}), '(lengths[:i])\n', (262, 275), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright © 2020 <NAME>
"""Post processing functions for Zemax import
.. Created on Mon Aug 10 18:17:55 2020
.. codeauthor: <NAME>
"""
import numpy as np
import rayoptics.seq.medium as mdm
import rayoptics.elem.profiles as profiles
import rayoptics.elem.surface as surface
from rayoptics.optical.model_enums import DecenterType as dec
def apply_fct_to_sm(opt_model, fct, start=None, stop=None, step=None):
"""Iterate in reverse over seq_model.ifcs. Override if needed."""
sm = opt_model.seq_model
start = len(sm.ifcs)-1 if start is None else start
stop = 0 if stop is None else stop
step = -1 if step is None else step
num_changes = 0
for cur in range(start, stop, step):
if fct(opt_model, cur):
num_changes += 1
return num_changes
def convert_to_bend(opt_model, cur):
"""Scan the zemax import for tilted mirrors and convert to BEND types."""
sm = opt_model.seq_model
ifc = sm.ifcs[cur]
if ifc.interact_mode == 'reflect':
ifc_p = sm.ifcs[cur-1]
ifc_f = sm.ifcs[cur+1]
if (ifc_p.z_type == 'COORDBRK' and ifc_f.z_type == 'COORDBRK'):
if np.array_equal(ifc_f.decenter.euler, ifc_p.decenter.euler):
ifc.decenter = ifc_p.decenter
ifc.decenter.dtype = dec.BEND
sm.remove(cur+1, prev=True)
sm.remove(cur-1)
return True
return False
def convert_to_dar(opt_model, cur):
"""Scan the zemax import for tilted surfs and convert to DAR types."""
sm = opt_model.seq_model
if cur < len(sm.ifcs)-1:
ifc = sm.ifcs[cur]
ifc_p = sm.ifcs[cur-1]
ifc_f = sm.ifcs[cur+1]
if (ifc_p.z_type == 'COORDBRK' and ifc_f.z_type == 'COORDBRK'):
acum_dec = ifc_f.decenter.dec + ifc_p.decenter.dec
acum_euler = ifc_f.decenter.euler + ifc_p.decenter.euler
if np.all(acum_dec == 0) and np.all(acum_euler == 0):
ifc.decenter = ifc_p.decenter
ifc.decenter.dtype = dec.DAR
sm.remove(cur+1, prev=True)
sm.remove(cur-1)
return True
return False
def collapse_coordbrk(opt_model, cur):
"""Attempt to apply the cur COORDBRK to an adjacent real interface."""
sm = opt_model.seq_model
ifc_cb = sm.ifcs[cur]
if ifc_cb.z_type == 'COORDBRK':
if ifc_cb.decenter.dtype == dec.REV:
ifc = sm.ifcs[cur-1]
prev = True
else:
ifc = sm.ifcs[cur+1]
prev = False
if ifc.decenter is not None:
return False
else:
ifc.decenter = ifc_cb.decenter
sm.remove(cur, prev=prev)
return True
return False
def remove_null_sg(opt_model, cur):
"""Remove sg with planar profile and an adjacent zero thickness air gap."""
sm = opt_model.seq_model
ifc = sm.ifcs[cur]
if is_null_ifc(ifc):
prev = None
cur_gap = False if len(sm.gaps)-1 < cur else True
prev_gap = True if 0 < cur else False
if cur_gap and is_null_gap(sm.gaps[cur]):
prev = False
elif prev_gap and is_null_gap(sm.gaps[cur-1]):
prev = True
if prev is not None:
sm.remove(cur, prev=prev)
return True
return False
def is_null_ifc(ifc):
if isinstance(ifc, surface.Surface):
if isinstance(ifc.profile, profiles.Spherical):
if (
ifc.profile.cv == 0 and
ifc.decenter is None and
ifc.interact_mode == 'transmit'
):
return True
return False
def is_null_gap(gap):
if gap.thi == 0 and isinstance(gap.medium, mdm.Air):
return True
else:
return False
| [
"numpy.all",
"numpy.array_equal"
] | [((1194, 1252), 'numpy.array_equal', 'np.array_equal', (['ifc_f.decenter.euler', 'ifc_p.decenter.euler'], {}), '(ifc_f.decenter.euler, ifc_p.decenter.euler)\n', (1208, 1252), True, 'import numpy as np\n'), ((1947, 1968), 'numpy.all', 'np.all', (['(acum_dec == 0)'], {}), '(acum_dec == 0)\n', (1953, 1968), True, 'import numpy as np\n'), ((1973, 1996), 'numpy.all', 'np.all', (['(acum_euler == 0)'], {}), '(acum_euler == 0)\n', (1979, 1996), True, 'import numpy as np\n')] |
"""This module contains functions relevant to the ALARA activation code and the Chebyshev Rational Approximation Method
"""
from __future__ import print_function
from pyne.xs.data_source import SimpleDataSource
from pyne.data import N_A, decay_const, decay_children, branch_ratio
from pyne.nucname import serpent, alara, znum, anum
from pyne import nucname
from pyne.material import Material, from_atom_frac
from pyne.mesh import Mesh, MeshError, HAVE_PYMOAB
import os
import collections
from warnings import warn
from pyne.utils import QAWarning, to_sec
import numpy as np
import tables as tb
warn(__name__ + " is not yet QA compliant.", QAWarning)
try:
basestring
except NameError:
basestring = str
if HAVE_PYMOAB:
from pyne.mesh import mesh_iterate
else:
warn("The PyMOAB optional dependency could not be imported. "
"Some aspects of the mesh module may be incomplete.", QAWarning)
def mesh_to_fluxin(flux_mesh, flux_tag, fluxin="fluxin.out",
reverse=False, sub_voxel=False, cell_fracs=None,
cell_mats=None):
"""This function creates an ALARA fluxin file from fluxes tagged on a PyNE
Mesh object. Fluxes are printed in the order of the flux_mesh.__iter__().
Parameters
----------
flux_mesh : PyNE Mesh object
Contains the mesh with fluxes tagged on each volume element.
flux_tag : string
The name of the tag of the flux mesh. Flux values for different energy
groups are assumed to be represented as vector tags.
fluxin : string
The name of the ALARA fluxin file to be output.
reverse : bool
If true, fluxes will be printed in the reverse order as they appear in
the flux vector tagged on the mesh.
sub_voxel: bool, optional
If true, sub-voxel r2s work flow will be sued. Flux of a voxel will
be duplicated c times. Where c is the cell numbers of that voxel.
cell_fracs : structured array, optional
The output from dagmc.discretize_geom(). A sorted, one dimensional
array, each entry containing the following fields:
:idx: int
The volume element index.
:cell: int
The geometry cell number.
:vol_frac: float
The volume fraction of the cell withing the mesh ve.
:rel_error: float
The relative error associated with the volume fraction.
The array must be sorted with respect to both idx and cell, with
cell changing fastest.
cell_mats : dict, optional
Maps geometry cell numbers to PyNE Material objects.
The cell_fracs and cell_mats are used only when sub_voxel=True.
If sub_voxel=False, neither cell_fracs nor cell_mats will be used.
"""
tag_flux = flux_mesh.get_tag(flux_tag)
# find number of e_groups
e_groups = tag_flux[list(mesh_iterate(flux_mesh.mesh))[0]]
e_groups = np.atleast_1d(e_groups)
num_e_groups = len(e_groups)
# Establish for loop bounds based on if forward or backward printing
# is requested
if not reverse:
start = 0
stop = num_e_groups
direction = 1
else:
start = num_e_groups - 1
stop = -1
direction = -1
output = ""
if not sub_voxel:
for i, mat, ve in flux_mesh:
# print flux data to file
output = _output_flux(ve, tag_flux, output, start, stop, direction)
else:
ves = list(flux_mesh.iter_ve())
for row in cell_fracs:
if len(cell_mats[row['cell']].comp) != 0:
output = _output_flux(ves[row['idx']], tag_flux, output, start,
stop, direction)
with open(fluxin, "w") as f:
f.write(output)
def photon_source_to_hdf5(filename, chunkshape=(10000,)):
"""Converts a plaintext photon source file to an HDF5 version for
quick later use.
This function produces a single HDF5 file named <filename>.h5 containing the
table headings:
idx : int
The volume element index assuming the volume elements appear in xyz
order (z changing fastest) within the photon source file in the case of
a structured mesh or mesh.mesh_iterate() order for an unstructured mesh.
nuc : str
The nuclide name as it appears in the photon source file.
time : str
The decay time as it appears in the photon source file.
phtn_src : 1D array of floats
Contains the photon source density for each energy group.
Parameters
----------
filename : str
The path to the file
chunkshape : tuple of int
A 1D tuple of the HDF5 chunkshape.
"""
f = open(filename, 'r')
header = f.readline().strip().split('\t')
f.seek(0)
G = len(header) - 2
dt = np.dtype([
('idx', np.int64),
('nuc', 'S6'),
('time', 'S20'),
('phtn_src', np.float64, G),
])
filters = tb.Filters(complevel=1, complib='zlib')
h5f = tb.open_file(filename + '.h5', 'w', filters=filters)
tab = h5f.create_table('/', 'data', dt, chunkshape=chunkshape)
chunksize = chunkshape[0]
rows = np.empty(chunksize, dtype=dt)
idx = 0
old = ""
for i, line in enumerate(f, 1):
ls = line.strip().split('\t')
# Keep track of the idx by delimiting by the last TOTAL line in a
# volume element.
if ls[0] != 'TOTAL' and old == 'TOTAL':
idx += 1
j = (i-1) % chunksize
rows[j] = (idx, ls[0].strip(), ls[1].strip(),
np.array(ls[2:], dtype=np.float64))
# Save the nuclide in order to keep track of idx
old = ls[0]
if i % chunksize == 0:
tab.append(rows)
rows = np.empty(chunksize, dtype=dt)
if i % chunksize != 0:
tab.append(rows[:j+1])
h5f.close()
f.close()
def photon_source_hdf5_to_mesh(mesh, filename, tags, sub_voxel=False,
cell_mats=None):
"""This function reads in an hdf5 file produced by photon_source_to_hdf5
and tags the requested data to the mesh of a PyNE Mesh object. Any
combinations of nuclides and decay times are allowed. The photon source
file is assumed to be in mesh.__iter__() order
Parameters
----------
mesh : PyNE Mesh
The object containing the PyMOAB instance to be tagged.
filename : str
The path of the hdf5 version of the photon source file.
tags: dict
A dictionary were the keys are tuples with two values. The first is a
string denoting an nuclide in any form that is understood by
pyne.nucname (e.g. '1001', 'U-235', '242Am') or 'TOTAL' for all
nuclides. The second is a string denoting the decay time as it appears
in the file (e.g. 'shutdown', '1 h' '3 d'). The values of the
dictionary are the requested tag names for the combination of nuclide
and decay time. For example if one wanted tags for the photon source
densities from U235 at shutdown and from all nuclides at 1 hour, the
dictionary could be:
tags = {('U-235', 'shutdown') : 'tag1', ('TOTAL', '1 h') : 'tag2'}
sub_voxel: bool, optional
If the sub_voxel is True, then the sub-voxel r2s will be used.
Then the photon_source will be interpreted as sub-voxel photon source.
cell_mats : dict, optional
cell_mats is required when sub_voxel is True.
Maps geometry cell numbers to PyNE Material objects.
"""
# find number of energy groups
with tb.open_file(filename) as h5f:
num_e_groups = len(h5f.root.data[0][3])
max_num_cells = 1
ve0 = next(mesh.iter_ve())
if sub_voxel:
num_vol_elements = len(mesh)
subvoxel_array = _get_subvoxel_array(mesh, cell_mats)
# get max_num_cells
max_num_cells = len(np.atleast_1d(mesh.cell_number[ve0]))
# create a dict of tag handles for all keys of the tags dict
tag_handles = {}
tag_size = num_e_groups * max_num_cells
for tag_name in tags.values():
mesh.tag(tag_name, np.zeros(tag_size, dtype=float), 'nat_mesh',
size=tag_size, dtype=float)
tag_handles[tag_name] = mesh.get_tag(tag_name)
# creat a list of decay times (strings) in the source file
phtn_src_dc = []
with tb.open_file(filename) as h5f:
for row in h5f.root.data:
phtn_src_dc.append(row[2])
phtn_src_dc = list(set(phtn_src_dc))
# iterate through each requested nuclide/dectay time
for cond in tags.keys():
with tb.open_file(filename) as h5f:
# Convert nuclide to the form found in the ALARA phtn_src
# file, which is similar to the Serpent form. Note this form is
# different from the ALARA input nuclide form found in nucname.
if cond[0] != "TOTAL":
nuc = serpent(cond[0]).lower()
else:
nuc = "TOTAL"
# time match, convert string mathch to float mathch
dc = _find_phsrc_dc(cond[1], phtn_src_dc)
# create of array of rows that match the nuclide/decay criteria
matched_data = h5f.root.data.read_where(
"(nuc == '{0}') & (time == '{1}')".format(nuc, dc))
if not sub_voxel:
idx = 0
for i, _, ve in mesh:
if matched_data[idx][0] == i:
tag_handles[tags[cond]][ve] = matched_data[idx][3]
idx += 1
else:
tag_handles[tags[cond]][ve] = [0] * num_e_groups
else:
temp_mesh_data = np.empty(
shape=(num_vol_elements, max_num_cells, num_e_groups),
dtype=float)
temp_mesh_data.fill(0.0)
for sve, subvoxel in enumerate(subvoxel_array):
temp_mesh_data[subvoxel['idx'], subvoxel['scid'], :] = \
matched_data[sve][3][:]
for i, _, ve in mesh:
tag_handles[tags[cond]][ve] = \
temp_mesh_data[i, :].reshape(max_num_cells * num_e_groups)
def record_to_geom(mesh, cell_fracs, cell_mats, geom_file, matlib_file,
sig_figs=6, sub_voxel=False):
"""This function preforms the same task as alara.mesh_to_geom, except the
geometry is on the basis of the stuctured array output of
dagmc.discretize_geom rather than a PyNE material object with materials.
This allows for more efficient ALARA runs by minimizing the number of
materials in the ALARA matlib. This is done by treating mixtures that are
equal up to <sig_figs> digits to be the same mixture within ALARA.
Parameters
----------
mesh : PyNE Mesh object
The Mesh object for which the geometry is discretized.
cell_fracs : structured array
The output from dagmc.discretize_geom(). A sorted, one dimensional
array, each entry containing the following fields:
:idx: int
The volume element index.
:cell: int
The geometry cell number.
:vol_frac: float
The volume fraction of the cell withing the mesh ve.
:rel_error: float
The relative error associated with the volume fraction.
cell_mats : dict
Maps geometry cell numbers to PyNE Material objects. Each PyNE material
object must have 'name' specified in Material.metadata.
geom_file : str
The name of the file to print the geometry and material blocks.
matlib_file : str
The name of the file to print the matlib.
sig_figs : int
The number of significant figures that two mixtures must have in common
to be treated as the same mixture within ALARA.
sub_voxel : bool
If sub_voxel is True, the sub-voxel r2s will be used.
"""
# Create geometry information header. Note that the shape of the geometry
# (rectangular) is actually inconsequential to the ALARA calculation so
# unstructured meshes are not adversely affected.
geometry = 'geometry rectangular\n\n'
# Create three strings in order to create all ALARA input blocks in a
# single mesh iteration.
volume = 'volume\n' # volume input block
mat_loading = 'mat_loading\n' # material loading input block
mixture = '' # mixture blocks
unique_mixtures = []
if not sub_voxel:
for i, mat, ve in mesh:
volume += ' {0: 1.6E} zone_{1}\n'.format(
mesh.elem_volume(ve), i)
ve_mixture = {}
for row in cell_fracs[cell_fracs['idx'] == i]:
cell_mat = cell_mats[row['cell']]
name = cell_mat.metadata['name']
if _is_void(name):
name = 'mat_void'
if name not in ve_mixture.keys():
ve_mixture[name] = np.round(row['vol_frac'], sig_figs)
else:
ve_mixture[name] += np.round(row['vol_frac'], sig_figs)
if ve_mixture not in unique_mixtures:
unique_mixtures.append(ve_mixture)
mixture += 'mixture mix_{0}\n'.format(
unique_mixtures.index(ve_mixture))
for key, value in ve_mixture.items():
mixture += ' material {0} 1 {1}\n'.format(key, value)
mixture += 'end\n\n'
mat_loading += ' zone_{0} mix_{1}\n'.format(i,
unique_mixtures.index(ve_mixture))
else:
ves = list(mesh.iter_ve())
sve_count = 0
for row in cell_fracs:
if len(cell_mats[row['cell']].comp) != 0:
volume += ' {0: 1.6E} zone_{1}\n'.format(
mesh.elem_volume(ves[row['idx']]) * row['vol_frac'], sve_count)
cell_mat = cell_mats[row['cell']]
name = cell_mat.metadata['name']
if name not in unique_mixtures:
unique_mixtures.append(name)
mixture += 'mixture {0}\n'.format(name)
mixture += ' material {0} 1 1\n'.format(name)
mixture += 'end\n\n'
mat_loading += ' zone_{0} {1}\n'.format(
sve_count, name)
sve_count += 1
volume += 'end\n\n'
mat_loading += 'end\n\n'
with open(geom_file, 'w') as f:
f.write(geometry + volume + mat_loading + mixture)
matlib = '' # ALARA material library string
printed_mats = []
print_void = False
for mat in cell_mats.values():
name = mat.metadata['name']
if _is_void(name):
print_void = True
continue
if name not in printed_mats:
printed_mats.append(name)
matlib += '{0} {1: 1.6E} {2}\n'.format(name, mat.density,
len(mat.comp))
for nuc, comp in mat.comp.iteritems():
matlib += '{0} {1: 1.6E} {2}\n'.format(alara(nuc),
comp*100.0, znum(nuc))
matlib += '\n'
if print_void:
matlib += '# void material\nmat_void 0.0 1\nhe 1 2\n'
with open(matlib_file, 'w') as f:
f.write(matlib)
def _is_void(name):
"""Private function for determining if a material name specifies void.
"""
lname = name.lower()
return 'vacuum' in lname or 'void' in lname or 'graveyard' in lname
def mesh_to_geom(mesh, geom_file, matlib_file):
"""This function reads the materials of a PyNE mesh object and prints the
geometry and materials portion of an ALARA input file, as well as a
corresponding matlib file. If the mesh is structured, xyz ordering is used
(z changing fastest). If the mesh is unstructured the mesh_iterate order is
used.
Parameters
----------
mesh : PyNE Mesh object
The Mesh object containing the materials to be printed.
geom_file : str
The name of the file to print the geometry and material blocks.
matlib_file : str
The name of the file to print the matlib.
"""
# Create geometry information header. Note that the shape of the geometry
# (rectangular) is actually inconsequential to the ALARA calculation so
# unstructured meshes are not adversely affected.
geometry = "geometry rectangular\n\n"
# Create three strings in order to create all ALARA input blocks in a
# single mesh iteration.
volume = "volume\n" # volume input block
mat_loading = "mat_loading\n" # material loading input block
mixture = "" # mixture blocks
matlib = "" # ALARA material library string
for i, mat, ve in mesh:
volume += " {0: 1.6E} zone_{1}\n".format(mesh.elem_volume(ve), i)
mat_loading += " zone_{0} mix_{0}\n".format(i)
matlib += "mat_{0} {1: 1.6E} {2}\n".format(i, mesh.density[i],
len(mesh.comp[i]))
mixture += ("mixture mix_{0}\n"
" material mat_{0} 1 1\nend\n\n".format(i))
for nuc, comp in mesh.comp[i].iteritems():
matlib += "{0} {1: 1.6E} {2}\n".format(alara(nuc), comp*100.0,
znum(nuc))
matlib += "\n"
volume += "end\n\n"
mat_loading += "end\n\n"
with open(geom_file, 'w') as f:
f.write(geometry + volume + mat_loading + mixture)
with open(matlib_file, 'w') as f:
f.write(matlib)
def num_density_to_mesh(lines, time, m):
"""num_density_to_mesh(lines, time, m)
This function reads ALARA output containing number density information and
creates material objects which are then added to a supplied PyNE Mesh object.
The volumes within ALARA are assummed to appear in the same order as the
idx on the Mesh object.
Parameters
----------
lines : list or str
ALARA output from ALARA run with 'number_density' in the 'output' block
of the input file. Lines can either be a filename or the equivalent to
calling readlines() on an ALARA output file. If reading in ALARA output
from stdout, call split('\n') before passing it in as the lines parameter.
time : str
The decay time for which number densities are requested (e.g. '1 h',
'shutdown', etc.)
m : PyNE Mesh
Mesh object for which mats will be applied to.
"""
if isinstance(lines, basestring):
with open(lines) as f:
lines = f.readlines()
elif not isinstance(lines, collections.Sequence):
raise TypeError("Lines argument not a file or sequence.")
# Advance file to number density portion.
header = 'Number Density [atoms/cm3]'
line = ""
while line.rstrip() != header:
line = lines.pop(0)
# Get decay time index from next line (the column the decay time answers
# appear in.
line_strs = lines.pop(0).replace('\t', ' ')
time_index = [s.strip() for s in line_strs.split(' ')
if s.strip()].index(time)
# Create a dict of mats for the mesh.
mats = {}
count = 0
# Read through file until enough material objects are create to fill mesh.
while count != len(m):
# Pop lines to the start of the next material.
while (lines.pop(0) + " ")[0] != '=':
pass
# Create a new material object and add to mats dict.
line = lines.pop(0)
nucvec = {}
density = 0.0
# Read lines until '=' delimiter at the end of a material.
while line[0] != '=':
nuc = line.split()[0]
n = float(line.split()[time_index])
if n != 0.0:
nucvec[nuc] = n
density += n * anum(nuc)/N_A
line = lines.pop(0)
mat = from_atom_frac(nucvec, density=density, mass=0)
mats[count] = mat
count += 1
m.mats = mats
def irradiation_blocks(material_lib, element_lib, data_library, cooling,
flux_file, irr_time, output="number_density",
truncation=1E-12, impurity=(5E-6, 1E-3),
dump_file="dump_file"):
"""irradiation_blocks(material_lib, element_lib, data_library, cooling,
flux_file, irr_time, output = "number_density",
truncation=1E-12, impurity = (5E-6, 1E-3),
dump_file = "dump_file")
This function returns a string of the irradation-related input blocks. This
function is meant to be used with files created by the mesh_to_geom
function, in order to append the remaining input blocks to form a complete
ALARA input file. Only the simplest irradiation schedule is supported: a
single pulse of time <irr_time>. The notation in this function is consistent
with the ALARA users' guide, found at:
http://alara.engr.wisc.edu/users.guide.html/
Parameters
----------
material_lib : str
Path to material library.
element_lib : str
Path to element library.
data_library : str
The data_library card (see ALARA user's guide).
cooling : str or iterable of str
Cooling times for which output is requested. Given in ALARA form (e.g.
"1 h", "0.5 y"). Note that "shutdown" is always implicitly included.
flux_file : str
Path to the "fluxin" file.
irr_time : str
The duration of the single pulse irradiation. Given in the ALARA form
(e.g. "1 h", "0.5 y").
output : str or iterable of str, optional.
The requested output blocks (see ALARA users' guide).
truncation : float, optional
The chain truncation value (see ALARA users' guide).
impurity : tuple of two floats, optional
The impurity parameters (see ALARA users' guide).
dump_file: str, optional
Path to the dump file.
Returns
-------
s : str
Irradition-related ALARA input blocks.
"""
s = ""
# Material, element, and data_library blocks
s += "material_lib {0}\n".format(material_lib)
s += "element_lib {0}\n".format(element_lib)
s += "data_library {0}\n\n".format(data_library)
# Cooling times
s += "cooling\n"
if isinstance(cooling, collections.Iterable) and not isinstance(cooling, basestring):
for c in cooling:
s += " {0}\n".format(c)
else:
s += " {0}\n".format(cooling)
s += "end\n\n"
# Flux block
s += "flux flux_1 {0} 1.0 0 default\n".format(flux_file)
# Flux schedule
s += ("schedule simple_schedule\n"
" {0} flux_1 pulse_once 0 s\nend\n\n".format(irr_time))
s += "pulsehistory pulse_once\n 1 0.0 s\nend\n\n"
# Output block
s += "output zone\n units Ci cm3\n"
if isinstance(output, collections.Iterable) and not isinstance(output, basestring):
for out in output:
s += " {0}\n".format(out)
else:
s += " {0}\n".format(output)
s += "end\n\n"
# Other parameters
s += "truncation {0}\n".format(truncation)
s += "impurity {0} {1}\n".format(impurity[0], impurity[1])
s += "dump_file {0}\n".format(dump_file)
return s
def phtn_src_energy_bounds(input_file):
"""Reads an ALARA input file and extracts the energy bounds from the
photon_source block.
Parameters
----------
input_file : str
The ALARA input file name, which must contain a photon_source block.
Returns
-------
e_bounds : list of floats
The lower and upper energy bounds for the photon_source discretization. Unit: eV.
"""
phtn_src_lines = ""
with open(input_file, 'r') as f:
line = f.readline()
while not (' photon_source ' in line and line.strip()[0] != "#"):
line = f.readline()
num_groups = float(line.split()[3])
upper_bounds = [float(x) for x in line.split()[4:]]
while len(upper_bounds) < num_groups:
line = f.readline()
upper_bounds += [float(x) for x in line.split("#")
[0].split('end')[0].split()]
e_bounds = [0.] + upper_bounds
return e_bounds
def _build_matrix(N):
""" This function builds burnup matrix, A. Decay only.
"""
A = np.zeros((len(N), len(N)))
# convert N to id form
N_id = []
for i in range(len(N)):
if isinstance(N[i], str):
ID = nucname.id(N[i])
else:
ID = N[i]
N_id.append(ID)
sds = SimpleDataSource()
# Decay
for i in range(len(N)):
A[i, i] -= decay_const(N_id[i])
# Find decay parents
for k in range(len(N)):
if N_id[i] in decay_children(N_id[k]):
A[i, k] += branch_ratio(N_id[k], N_id[i])*decay_const(N_id[k])
return A
def _rat_apprx_14(A, t, n_0):
""" CRAM of order 14
Parameters
---------
A : numpy array
Burnup matrix
t : float
Time step
n_0: numpy array
Inital composition vector
"""
theta = np.array([-8.8977731864688888199 + 16.630982619902085304j,
-3.7032750494234480603 + 13.656371871483268171j,
-.2087586382501301251 + 10.991260561901260913j,
3.9933697105785685194 + 6.0048316422350373178j,
5.0893450605806245066 + 3.5888240290270065102j,
5.6231425727459771248 + 1.1940690463439669766j,
2.2697838292311127097 + 8.4617379730402214019j])
alpha = np.array([-.000071542880635890672853 + .00014361043349541300111j,
.0094390253107361688779 - .01784791958483017511j,
-.37636003878226968717 + .33518347029450104214j,
-23.498232091082701191 - 5.8083591297142074004j,
46.933274488831293047 + 45.643649768827760791j,
-27.875161940145646468 - 102.14733999056451434j,
4.8071120988325088907 - 1.3209793837428723881j])
alpha_0 = np.array([1.8321743782540412751e-14])
s = 7
A = A*t
n = 0*n_0
for j in range(7):
n = n + np.linalg.solve(A - theta[j] *
np.identity(np.shape(A)[0]), alpha[j]*n_0)
n = 2*n.real
n = n + alpha_0*n_0
return n
def _rat_apprx_16(A, t, n_0):
""" CRAM of order 16
Parameters
---------
A : numpy array
Burnup matrix
t : float
Time step
n_0: numpy array
Inital composition vector
"""
theta = np.array([-10.843917078696988026 + 19.277446167181652284j,
-5.2649713434426468895 + 16.220221473167927305j,
5.9481522689511774808 + 3.5874573620183222829j,
3.5091036084149180974 + 8.4361989858843750826j,
6.4161776990994341923 + 1.1941223933701386874j,
1.4193758971856659786 + 10.925363484496722585j,
4.9931747377179963991 + 5.9968817136039422260j,
-1.4139284624888862114 + 13.497725698892745389j])
alpha = np.array([-.0000005090152186522491565 - .00002422001765285228797j,
.00021151742182466030907 + .0043892969647380673918j,
113.39775178483930527 + 101.9472170421585645j,
15.059585270023467528 - 5.7514052776421819979j,
-64.500878025539646595 - 224.59440762652096056j,
-1.4793007113557999718 + 1.7686588323782937906j,
-62.518392463207918892 - 11.19039109428322848j,
.041023136835410021273 - .15743466173455468191j])
alpha_0 = np.array([2.1248537104952237488e-16])
s = 8
A = A*t
n = 0*n_0
for j in range(8):
n = n + np.linalg.solve(A - theta[j] *
np.identity(np.shape(A)[0]), alpha[j]*n_0)
n = 2*n.real
n = n + alpha_0*n_0
return n
def cram(N, t, n_0, order):
""" This function returns matrix exponential solution n using CRAM14 or CRAM16
Parameters
----------
N : list or array
Array of nuclides under consideration
t : float
Time step
n_0 : list or array
Nuclide concentration vector
order : int
Order of method. Only 14 and 16 are supported.
"""
n_0 = np.array(n_0)
A = _build_matrix(N)
if order == 14:
return _rat_apprx_14(A, t, n_0)
elif order == 16:
return _rat_apprx_16(A, t, n_0)
else:
msg = 'Rational approximation of degree {0} is not supported.'.format(
order)
raise ValueError(msg)
def _output_flux(ve, tag_flux, output, start, stop, direction):
"""
This function is used to get neutron flux for fluxin
Parameters
----------
ve : entity, a mesh sub-voxel
tag_flux : array, neutron flux of the sub-voxel
output : string
start : int
stop : int
direction: int
"""
count = 0
flux_data = np.atleast_1d(tag_flux[ve])
for i in range(start, stop, direction):
output += "{:.6E} ".format(flux_data[i])
# fluxin formatting: create a new line
# after every 6th entry
count += 1
if count % 6 == 0:
output += "\n"
output += "\n\n"
return output
def _get_subvoxel_array(mesh, cell_mats):
"""
This function returns an array of subvoxels.
Parameters
----------
mesh : PyNE Mesh object
The Mesh object for which the geometry is discretized.
return : subvoxel_array: structured array
A sorted, one dimensional array, each entry containing the following
fields:
:svid: int
The index of non-void subvoxel id
:idx: int
The idx of the voxel
:scid: int
The cell index of the cell in that voxel
"""
cell_number_tag = mesh.cell_number
subvoxel_array = np.zeros(0, dtype=[(b'svid', np.int64),
(b'idx', np.int64),
(b'scid', np.int64)])
temp_subvoxel = np.zeros(1, dtype=[(b'svid', np.int64),
(b'idx', np.int64),
(b'scid', np.int64)])
# calculate the total number of non-void sub-voxel
non_void_sv_num = 0
for i, _, ve in mesh:
for c, cell in enumerate(np.atleast_1d(cell_number_tag[ve])):
if cell > 0 and len(cell_mats[cell].comp): # non-void cell
temp_subvoxel[0] = (non_void_sv_num, i, c)
subvoxel_array = np.append(subvoxel_array, temp_subvoxel)
non_void_sv_num += 1
return subvoxel_array
def _convert_unit_to_s(dc):
"""
This function return a float number represent a time in unit of s.
Parameters
----------
dc : string. Contain a num and an unit.
Returns
-------
a float number
"""
# get num and unit
num, unit = dc.split()
return to_sec(float(num), unit)
def _find_phsrc_dc(idc, phtn_src_dc):
"""
This function returns a string representing a time in phsrc_dc.
Parameters
----------
idc : string
Represents a time, input decay time
phtn_src_dc : list of strings
Decay times in phtn_src file
Returns
-------
string from phtn_src_dc list that mathches idc
"""
# Check the existence of idc in phtn_src_dc list.
if idc in phtn_src_dc:
return idc
# Direct matching cannot be found. Convert units to [s] and compare.
else:
# convert idc to [s]
idc_s = _convert_unit_to_s(idc)
# Loop over decay times in phtn_src_dc list and compare to idc_s.
for dc in phtn_src_dc:
# Skip "shutdown" string in list.
if dc == 'shutdown':
continue
# Convert to [s].
dc_s = _convert_unit_to_s(dc)
if idc_s == dc_s:
# idc_s matches dc_s. return original string, dc.
return dc
elif dc_s != 0.0 and (abs(idc_s - dc_s)/dc_s) < 1e-6:
return dc
# if idc doesn't match any string in phtn_src_dc list, raise an error.
raise ValueError(
'Decay time {0} not found in phtn_src file'.format(idc))
| [
"pyne.nucname.serpent",
"numpy.array",
"pyne.data.decay_children",
"tables.Filters",
"numpy.empty",
"warnings.warn",
"numpy.dtype",
"numpy.round",
"pyne.data.decay_const",
"pyne.nucname.id",
"tables.open_file",
"pyne.mesh.mesh_iterate",
"numpy.shape",
"numpy.atleast_1d",
"pyne.data.branc... | [((595, 650), 'warnings.warn', 'warn', (["(__name__ + ' is not yet QA compliant.')", 'QAWarning'], {}), "(__name__ + ' is not yet QA compliant.', QAWarning)\n", (599, 650), False, 'from warnings import warn\n'), ((778, 911), 'warnings.warn', 'warn', (['"""The PyMOAB optional dependency could not be imported. Some aspects of the mesh module may be incomplete."""', 'QAWarning'], {}), "(\n 'The PyMOAB optional dependency could not be imported. Some aspects of the mesh module may be incomplete.'\n , QAWarning)\n", (782, 911), False, 'from warnings import warn\n'), ((2945, 2968), 'numpy.atleast_1d', 'np.atleast_1d', (['e_groups'], {}), '(e_groups)\n', (2958, 2968), True, 'import numpy as np\n'), ((4876, 4970), 'numpy.dtype', 'np.dtype', (["[('idx', np.int64), ('nuc', 'S6'), ('time', 'S20'), ('phtn_src', np.float64, G)\n ]"], {}), "([('idx', np.int64), ('nuc', 'S6'), ('time', 'S20'), ('phtn_src',\n np.float64, G)])\n", (4884, 4970), True, 'import numpy as np\n'), ((5021, 5060), 'tables.Filters', 'tb.Filters', ([], {'complevel': '(1)', 'complib': '"""zlib"""'}), "(complevel=1, complib='zlib')\n", (5031, 5060), True, 'import tables as tb\n'), ((5071, 5123), 'tables.open_file', 'tb.open_file', (["(filename + '.h5')", '"""w"""'], {'filters': 'filters'}), "(filename + '.h5', 'w', filters=filters)\n", (5083, 5123), True, 'import tables as tb\n'), ((5233, 5262), 'numpy.empty', 'np.empty', (['chunksize'], {'dtype': 'dt'}), '(chunksize, dtype=dt)\n', (5241, 5262), True, 'import numpy as np\n'), ((24776, 24794), 'pyne.xs.data_source.SimpleDataSource', 'SimpleDataSource', ([], {}), '()\n', (24792, 24794), False, 'from pyne.xs.data_source import SimpleDataSource\n'), ((25318, 25637), 'numpy.array', 'np.array', (['[-8.89777318646889 + 16.630982619902085j, -3.703275049423448 + \n 13.656371871483268j, -0.20875863825013014 + 10.99126056190126j, \n 3.9933697105785684 + 6.004831642235037j, 5.089345060580625 + \n 3.5888240290270064j, 5.623142572745977 + 1.194069046343967j, \n 2.2697838292311125 + 8.461737973040222j]'], {}), '([-8.89777318646889 + 16.630982619902085j, -3.703275049423448 + \n 13.656371871483268j, -0.20875863825013014 + 10.99126056190126j, \n 3.9933697105785684 + 6.004831642235037j, 5.089345060580625 + \n 3.5888240290270064j, 5.623142572745977 + 1.194069046343967j, \n 2.2697838292311125 + 8.461737973040222j])\n', (25326, 25637), True, 'import numpy as np\n'), ((25812, 26146), 'numpy.array', 'np.array', (['[-7.154288063589067e-05 + 0.000143610433495413j, 0.00943902531073617 - \n 0.017847919584830174j, -0.3763600387822697 + 0.335183470294501j, -\n 23.498232091082702 - 5.808359129714208j, 46.933274488831294 + \n 45.643649768827764j, -27.875161940145645 - 102.14733999056452j, \n 4.8071120988325085 - 1.3209793837428725j]'], {}), '([-7.154288063589067e-05 + 0.000143610433495413j, \n 0.00943902531073617 - 0.017847919584830174j, -0.3763600387822697 + \n 0.335183470294501j, -23.498232091082702 - 5.808359129714208j, \n 46.933274488831294 + 45.643649768827764j, -27.875161940145645 - \n 102.14733999056452j, 4.8071120988325085 - 1.3209793837428725j])\n', (25820, 26146), True, 'import numpy as np\n'), ((26319, 26353), 'numpy.array', 'np.array', (['[1.8321743782540412e-14]'], {}), '([1.8321743782540412e-14])\n', (26327, 26353), True, 'import numpy as np\n'), ((26832, 27195), 'numpy.array', 'np.array', (['[-10.843917078696988 + 19.27744616718165j, -5.264971343442647 + \n 16.22022147316793j, 5.948152268951177 + 3.587457362018322j, \n 3.509103608414918 + 8.436198985884374j, 6.416177699099435 + \n 1.1941223933701386j, 1.419375897185666 + 10.925363484496723j, \n 4.993174737717997 + 5.996881713603942j, -1.4139284624888862 + \n 13.497725698892745j]'], {}), '([-10.843917078696988 + 19.27744616718165j, -5.264971343442647 + \n 16.22022147316793j, 5.948152268951177 + 3.587457362018322j, \n 3.509103608414918 + 8.436198985884374j, 6.416177699099435 + \n 1.1941223933701386j, 1.419375897185666 + 10.925363484496723j, \n 4.993174737717997 + 5.996881713603942j, -1.4139284624888862 + \n 13.497725698892745j])\n', (26840, 27195), True, 'import numpy as np\n'), ((27397, 27781), 'numpy.array', 'np.array', (['[-5.090152186522492e-07 - 2.422001765285229e-05j, 0.0002115174218246603 + \n 0.004389296964738067j, 113.3977517848393 + 101.94721704215857j, \n 15.059585270023467 - 5.751405277642182j, -64.50087802553965 - \n 224.59440762652096j, -1.4793007113557999 + 1.7686588323782937j, -\n 62.51839246320792 - 11.190391094283228j, 0.04102313683541002 - \n 0.15743466173455467j]'], {}), '([-5.090152186522492e-07 - 2.422001765285229e-05j, \n 0.0002115174218246603 + 0.004389296964738067j, 113.3977517848393 + \n 101.94721704215857j, 15.059585270023467 - 5.751405277642182j, -\n 64.50087802553965 - 224.59440762652096j, -1.4793007113557999 + \n 1.7686588323782937j, -62.51839246320792 - 11.190391094283228j, \n 0.04102313683541002 - 0.15743466173455467j])\n', (27405, 27781), True, 'import numpy as np\n'), ((27977, 28011), 'numpy.array', 'np.array', (['[2.1248537104952236e-16]'], {}), '([2.1248537104952236e-16])\n', (27985, 28011), True, 'import numpy as np\n'), ((28648, 28661), 'numpy.array', 'np.array', (['n_0'], {}), '(n_0)\n', (28656, 28661), True, 'import numpy as np\n'), ((29307, 29334), 'numpy.atleast_1d', 'np.atleast_1d', (['tag_flux[ve]'], {}), '(tag_flux[ve])\n', (29320, 29334), True, 'import numpy as np\n'), ((30264, 30350), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': "[(b'svid', np.int64), (b'idx', np.int64), (b'scid', np.int64)]"}), "(0, dtype=[(b'svid', np.int64), (b'idx', np.int64), (b'scid', np.\n int64)])\n", (30272, 30350), True, 'import numpy as np\n'), ((30446, 30532), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': "[(b'svid', np.int64), (b'idx', np.int64), (b'scid', np.int64)]"}), "(1, dtype=[(b'svid', np.int64), (b'idx', np.int64), (b'scid', np.\n int64)])\n", (30454, 30532), True, 'import numpy as np\n'), ((7641, 7663), 'tables.open_file', 'tb.open_file', (['filename'], {}), '(filename)\n', (7653, 7663), True, 'import tables as tb\n'), ((8417, 8439), 'tables.open_file', 'tb.open_file', (['filename'], {}), '(filename)\n', (8429, 8439), True, 'import tables as tb\n'), ((20071, 20118), 'pyne.material.from_atom_frac', 'from_atom_frac', (['nucvec'], {'density': 'density', 'mass': '(0)'}), '(nucvec, density=density, mass=0)\n', (20085, 20118), False, 'from pyne.material import Material, from_atom_frac\n'), ((24855, 24875), 'pyne.data.decay_const', 'decay_const', (['N_id[i]'], {}), '(N_id[i])\n', (24866, 24875), False, 'from pyne.data import N_A, decay_const, decay_children, branch_ratio\n'), ((5636, 5670), 'numpy.array', 'np.array', (['ls[2:]'], {'dtype': 'np.float64'}), '(ls[2:], dtype=np.float64)\n', (5644, 5670), True, 'import numpy as np\n'), ((5829, 5858), 'numpy.empty', 'np.empty', (['chunksize'], {'dtype': 'dt'}), '(chunksize, dtype=dt)\n', (5837, 5858), True, 'import numpy as np\n'), ((7946, 7982), 'numpy.atleast_1d', 'np.atleast_1d', (['mesh.cell_number[ve0]'], {}), '(mesh.cell_number[ve0])\n', (7959, 7982), True, 'import numpy as np\n'), ((8178, 8209), 'numpy.zeros', 'np.zeros', (['tag_size'], {'dtype': 'float'}), '(tag_size, dtype=float)\n', (8186, 8209), True, 'import numpy as np\n'), ((8662, 8684), 'tables.open_file', 'tb.open_file', (['filename'], {}), '(filename)\n', (8674, 8684), True, 'import tables as tb\n'), ((9722, 9798), 'numpy.empty', 'np.empty', ([], {'shape': '(num_vol_elements, max_num_cells, num_e_groups)', 'dtype': 'float'}), '(shape=(num_vol_elements, max_num_cells, num_e_groups), dtype=float)\n', (9730, 9798), True, 'import numpy as np\n'), ((24688, 24704), 'pyne.nucname.id', 'nucname.id', (['N[i]'], {}), '(N[i])\n', (24698, 24704), False, 'from pyne import nucname\n'), ((30744, 30778), 'numpy.atleast_1d', 'np.atleast_1d', (['cell_number_tag[ve]'], {}), '(cell_number_tag[ve])\n', (30757, 30778), True, 'import numpy as np\n'), ((2896, 2924), 'pyne.mesh.mesh_iterate', 'mesh_iterate', (['flux_mesh.mesh'], {}), '(flux_mesh.mesh)\n', (2908, 2924), False, 'from pyne.mesh import mesh_iterate\n'), ((17425, 17435), 'pyne.nucname.alara', 'alara', (['nuc'], {}), '(nuc)\n', (17430, 17435), False, 'from pyne.nucname import serpent, alara, znum, anum\n'), ((17506, 17515), 'pyne.nucname.znum', 'znum', (['nuc'], {}), '(nuc)\n', (17510, 17515), False, 'from pyne.nucname import serpent, alara, znum, anum\n'), ((24964, 24987), 'pyne.data.decay_children', 'decay_children', (['N_id[k]'], {}), '(N_id[k])\n', (24978, 24987), False, 'from pyne.data import N_A, decay_const, decay_children, branch_ratio\n'), ((30945, 30985), 'numpy.append', 'np.append', (['subvoxel_array', 'temp_subvoxel'], {}), '(subvoxel_array, temp_subvoxel)\n', (30954, 30985), True, 'import numpy as np\n'), ((12997, 13032), 'numpy.round', 'np.round', (["row['vol_frac']", 'sig_figs'], {}), "(row['vol_frac'], sig_figs)\n", (13005, 13032), True, 'import numpy as np\n'), ((13095, 13130), 'numpy.round', 'np.round', (["row['vol_frac']", 'sig_figs'], {}), "(row['vol_frac'], sig_figs)\n", (13103, 13130), True, 'import numpy as np\n'), ((15202, 15212), 'pyne.nucname.alara', 'alara', (['nuc'], {}), '(nuc)\n', (15207, 15212), False, 'from pyne.nucname import serpent, alara, znum, anum\n'), ((15287, 15296), 'pyne.nucname.znum', 'znum', (['nuc'], {}), '(nuc)\n', (15291, 15296), False, 'from pyne.nucname import serpent, alara, znum, anum\n'), ((25016, 25046), 'pyne.data.branch_ratio', 'branch_ratio', (['N_id[k]', 'N_id[i]'], {}), '(N_id[k], N_id[i])\n', (25028, 25046), False, 'from pyne.data import N_A, decay_const, decay_children, branch_ratio\n'), ((25047, 25067), 'pyne.data.decay_const', 'decay_const', (['N_id[k]'], {}), '(N_id[k])\n', (25058, 25067), False, 'from pyne.data import N_A, decay_const, decay_children, branch_ratio\n'), ((8972, 8988), 'pyne.nucname.serpent', 'serpent', (['cond[0]'], {}), '(cond[0])\n', (8979, 8988), False, 'from pyne.nucname import serpent, alara, znum, anum\n'), ((20010, 20019), 'pyne.nucname.anum', 'anum', (['nuc'], {}), '(nuc)\n', (20014, 20019), False, 'from pyne.nucname import serpent, alara, znum, anum\n'), ((26509, 26520), 'numpy.shape', 'np.shape', (['A'], {}), '(A)\n', (26517, 26520), True, 'import numpy as np\n'), ((28167, 28178), 'numpy.shape', 'np.shape', (['A'], {}), '(A)\n', (28175, 28178), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""This module defines a player class exposing the Open AI Gym API.
"""
import asyncio
import numpy as np # pyre-ignore
import time
from abc import ABC, abstractmethod, abstractproperty
from queue import Queue
from threading import Thread
from typing import Any, Callable, List, Optional, Tuple, Union
from poke_env.environment.battle import Battle
from poke_env.player.player import Player
from poke_env.player_configuration import PlayerConfiguration
from poke_env.server_configuration import ServerConfiguration
from poke_env.teambuilder.teambuilder import Teambuilder
from poke_env.utils import to_id_str
from self_play import MCTS, Node, GameHistory, MinMaxStats
from poke_env.pokeconfig import MuZeroConfig
from poke_env.data import POKEDEX # if you are not playing in gen 8, you might want to do import GENX_POKEDEX instead
from poke_env.data import MOVES
import models
import torch
class MuPlayer(Player, ABC): # pyre-ignore
"""Player with local gamehistory of own perspective.
Chooses move using and MCTS search of local model."""
#gottem
MAX_BATTLE_SWITCH_RETRY = 10000
PAUSE_BETWEEN_RETRIES = 0.001
_ACTION_SPACE = list(range(4 * 4 + 6))
SPECIES_TO_DEX_NUMBER = {}
idx = 0
for species, values in POKEDEX.items():
SPECIES_TO_DEX_NUMBER[species] = idx
idx += 1
if 'otherFormes' in values:
for other_form in values['otherFormes']:
SPECIES_TO_DEX_NUMBER[to_id_str(other_form)] = idx
idx += 1
MOVE_TO_NUM = {}
a=1
for moves, values in MOVES.items():
MOVE_TO_NUM[moves] = a
a+=1
def __init__(
self,
player_configuration: Optional[PlayerConfiguration] = None,
*,
avatar: Optional[int] = None,
battle_format: str = "gen8randombattle",
log_level: Optional[int] = None,
server_configuration: Optional[ServerConfiguration] = None,
start_listening: bool = True,
team: Optional[Union[str, Teambuilder]] = None,
initcheckpoint = None,
mu_config: MuZeroConfig,
):
#summary deleted
super(MuPlayer, self).__init__(
player_configuration=player_configuration,
avatar=avatar,
battle_format=battle_format,
log_level=log_level,
max_concurrent_battles=1,
server_configuration=server_configuration,
start_listening=start_listening,
team=team,
)
#self._actions = {}
self._current_battle: Battle
#self._observations = {}
#self._reward_buffer = {}
self._start_new_battle = False
self.laction = 0
self.gh = GameHistory()#this will be replaced by self_play's version
self.config = mu_config
#network initialization
self.model = models.MuZeroNetwork(self.config)
self.model.set_weights(initcheckpoint["weights"])
self.model.to(torch.device("cuda" if torch.cuda.is_available() else "cpu"))
self.model.eval()
self._ACTION_SPACE = list(range(4 * 4 + 6))
self.temp = 0
self.temp_thresh = 0
self.lroot = 0
def battle_once(self, opponent: Player):
"""
Does not have team preview functionality for built team battles
"""
#print("mu player 63 battle_once called",self.gh.observation_history)
#challenge, accept by opponent, get battle, choose moves until end, game history
self._start_new_battle = True
async def launch_battles(player: MuPlayer, opponent: Player):
battles_coroutine = asyncio.gather(
player.send_challenges(
opponent=to_id_str(opponent.username),
n_challenges=1,
to_wait=opponent.logged_in,
),
opponent.accept_challenges(
opponent=to_id_str(player.username), n_challenges=1
),
)
await battles_coroutine
#self._current_battle = await battles_coroutine
"""
def env_algorithm_wrapper(player, kwargs):
#env_algorithm(player, **kwargs)#This should be a control function and not be necessary
player._start_new_battle = False
while True:
try:
player.complete_current_battle()
player.reset()
except OSError:
break
loop = asyncio.get_event_loop()
env_algorithm_kwargs=None#shouldnt be necessary so initialized as None
if env_algorithm_kwargs is None:
env_algorithm_kwargs = {}"""
loop = asyncio.get_event_loop()#duplicated
thread = Thread()
thread.start()
while self._start_new_battle:
loop.run_until_complete(launch_battles(self, opponent))
thread.join()
"""while True:#no idea what this does
try:
player.complete_current_battle()
player.reset()
except OSError:
break
###
loop = asyncio.get_event_loop()
while self._start_new_battle:
loop.run_until_complete(launch_battles(self, opponent))
"""
#append game history stuff to local gh attribute
#no return
async def mu_message(self, player, message) -> None:
await self.player_message(player, message)
async def mu_room_message(self, messagein, battlein) -> None:
await self._send_message(message = messagein, battle=battlein)
def get_moves(self) -> Any:
a = self.stripmoves(self._current_battle.avaliable_moves)
#return a
return [1,2,3,4]
def battle_summary(self, perspective):
return "I didn't finish this method b/c no purpose yet"
def battle_state(self, battle: Battle):
"""
Much of the data in a pokemon battle is not ordinal or easily represented
This method will encode a variety of information naively using values from 0-1
2D array with battle/field attributes in first row.
Next 12 rows are player's and opponent's pokemon
"""
battle = self._current_battle
assert battle != None, "battle_state received None instead of Battle object"
state = [[0]* 13]*13# 1+6+6 1 field, 6 mons, 6 opmons
properties = 13#13 pokemon traits
substate = [0]*properties #substate is one pokemon
substate[0] = self.statetonum(battle.fields,12)#Set[Field]
substate[1] = self.statetonum(battle.opponent_side_conditions,13)#Set(SideCondition)
substate[2] = self.statetonum(battle.side_conditions,13)#Set(SideCondition)
substate[3] = int(battle.maybe_trapped)#bool
substate[4] = int(battle.trapped)#bool
substate[5] = self.weathertonum(battle.weather)#Optional[Weather]
substate[6] = int(battle.can_dynamax)
substate[7] = 0
substate[8] = 0
substate[9] = 0
substate[10] = 0
substate[11] = 0
substate[12] = 0
state[0] = substate
monindex = 1
for mon in battle.team.values():
substate[0] = 0#mon.species#
substate[1] = mon.base_stats["spe"]/500
substate[2] = self.typetonum(mon,18)#
substate[3] = 0#mon.ability#####
substate[4] = mon.level/100
substate[5] = mon.current_hp_fraction
substate[6] = self.statetonum(mon.effects,162)
substate[7] = self.statustonum(mon.status,7)
substate[8] = 0#mon.item#####
substate[9], substate[10], substate[11], substate[12] = self.stripmoves(mon.moves)
state[monindex] = substate
monindex += 1
opponent_prop = properties
opss = [0] * opponent_prop
for opmon in battle.opponent_team.values():
opss[0] = 0#opmon.species#####
opss[1] = opmon.base_stats["spe"]/500
opss[2] = self.typetonum(battle.active_pokemon,18)
opss[3] = 0#opmon.ability#####
opss[4] = opmon.level/100
opss[5] = opmon.current_hp_fraction
opss[6] = self.statetonum(opmon.effects,162)
opss[7] = self.statustonum(opmon.status,7)
opss[8] = 0#opmon.item#####
opss[9] = 0#will be it's known moves
opss[10] = 0
opss[11] = 0
opss[12] = 0
state[monindex] = substate
monindex += 1
#moves not implemented yet
return [state]
def empty_state(self):
return [[[0]*13]*13]
def stripmoves(self, moves):
"""
moves parameter is array of 4 moves.
Returns 4 integers representing the moves.
These values are assigned with the constant MOVE_TO_NUM dictionary
"""
intmoves = [0]*4
counter = 0
for move in moves:
intmoves[counter]=self.MOVE_TO_NUM[move]
counter+=1
return intmoves[0], intmoves[1], intmoves[2], intmoves[3]
def statetonum(self, states, statenum):
#Generates a unique corresponding int ID for each field combination
if not states or states is None:
return 0
a=0
num = 0
if(type(states) == 'Weather'):
print("STATES IS ")
print(states)
raise ValueError("States is weather; please review errors")
for state in states:
num+=state.value*(statenum**a)
a+=1
return num
def weathertonum(self, weather):
if not weather or weather is None:
return 0
return weather.value
def typetonum(self, pokemon, typenum):
#same function as statetonum but functions for tuples instead of sets
num=0
num += pokemon._type_1.value
if pokemon._type_2:
num+= pokemon._type_2.value * typenum
return num
def statustonum(self, status, statusnum):
if not status or status is None:
return 0
return status.value
def mysit(self, instring):
"""
My String To Int (mysit)
"""
sum = 0
for a in range(0, len(instring)):
sum += ord(instring[a]) - 97
return sum
def myone(self,value,min,max):
"""
My integer to 0-1 (myone)
"""
return (value-min)/(max-min)
def check_win(self, battle: Battle):
if battle.won:
return 1
elif battle.lost:
return -1
return 0
def printname(self):
print("Mu Player's name is ", self._username)
def _action_to_move(self, action: int, battle: Battle) -> str:
"""Abstract method converting elements of the action space to move orders.
"""
def _battle_finished_callback(self, battle: Battle) -> None:
print("battle has completed")
#self._observations[battle].put(self.embed_battle(battle))
def _init_battle(self, battle: Battle) -> None:
self._current_battle = battle#added
def choose_move(self, battle: Battle) -> str:
#print("choose move method, obs history length below")
#print(len(self.gh.observation_history), len(self.gh.observation_history[0]), len(self.gh.observation_history[0][0]),len(self.gh.observation_history[0][0]))
#self.printname()
self._init_battle(battle)
temperature = self.temp
temperature_threshold = self.temp_thresh
print("choose move contents: ")
print(self.config.stacked_observations)
stacked_observations = self.gh.get_stacked_observations(
-1,
self.config.stacked_observations,
)
print(stacked_observations)
root, mcts_info = MCTS(self.config).run(
self.model,
stacked_observations,
self._ACTION_SPACE,
1,
True,
)
action = self.select_action(
root,
temperature
if not temperature_threshold
or len(gh.action_history) < temperature_threshold
else 0,
)
self.laction = action
self.lroot = root
#step()
#gamehistory appends are normally here
return self._action_to_move(action, battle)
#check move's results
def check_move(self, battle: Battle, from_teampreview_request: bool = False, maybe_default_order=False):
root = self.lroot
self.gh.store_search_statistics(root, self.config.action_space)
self.gh.action_history.append(self.laction)
self.gh.observation_history.append(self.battle_state(battle))
self.gh.reward_history.append(self.check_win(battle))
self.gh.to_play_history.append(1)#technically should be to_play
def choose_max_move(self, battle: Battle):
if battle.available_moves:
# Finds the best move among available ones
best_move = max(battle.available_moves, key=lambda move: move.base_power)
return self.create_order(best_move)
# If no attack is available, a random switch will be made
else:
return self.choose_random_move(battle)
def close(self) -> None:
"""Unimplemented. Has no effect."""
def complete_current_battle(self) -> None:
"""Completes the current battle by performing random moves."""
done = self._current_battle.finished
while not done:
_, _, done, _ = self.step(np.random.choice(self._ACTION_SPACE))
def compute_reward(self, battle: Battle) -> float:
"""Returns a reward for the given battle.
The default implementation corresponds to the default parameters of the
reward_computing_helper method.
:param battle: The battle for which to compute the reward.
:type battle: Battle
:return: The computed reward.
:rtype: float
"""
return self.reward_computing_helper(battle)
#@abstractmethod
def embed_battle(self, battle: Battle) -> Any:
return self.battle_state(battle)
"""Abstract method for embedding battles.
:param battle: The battle whose state is being embedded
:type battle: Battle
:return: The computed embedding
:rtype: Any
"""
def reset(self) -> Any:
"""Resets the internal environment state. The current battle will be set to an
active unfinished battle.
:return: The observation of the new current battle.
:rtype: Any
:raies: EnvironmentError
"""
print("resetted 339 muplayer")
#self._current_battle = battles[0]
def render(self, mode="human") -> None:
"""A one line rendering of the current state of the battle.
"""
print(
" Turn %4d. | [%s][%3d/%3dhp] %10.10s - %10.10s [%3d%%hp][%s]"
% (
self._current_battle.turn,
"".join(
[
"⦻" if mon.fainted else "●"
for mon in self._current_battle.team.values()
]
),
self._current_battle.active_pokemon.current_hp or 0,
self._current_battle.active_pokemon.max_hp or 0,
self._current_battle.active_pokemon.species,
self._current_battle.opponent_active_pokemon.species, # pyre-ignore
self._current_battle.opponent_active_pokemon.current_hp # pyre-ignore
or 0,
"".join(
[
"⦻" if mon.fainted else "●"
for mon in self._current_battle.opponent_team.values()
]
),
),
end="\n" if self._current_battle.finished else "\r",
)
def seed(self, seed=None) -> None:
"""Sets the numpy seed."""
np.random.seed(seed)
def step(self, action: int) -> Tuple:
"""Performs action in the current battle.
:param action: The action to perform.
:type action: int
:return: A tuple containing the next observation, the reward, a boolean
indicating wheter the episode is finished, and additional information
:rtype: tuple
"""
self._actions[self._current_battle].put(action)
observation = self._observations[self._current_battle].get()
return (
observation,
self.compute_reward(self._current_battle),
self._current_battle.finished,
{},
)
def die():
self.stop_listening()
def action_space(self) -> List:
"""Returns the action space of the player. Must be implemented by subclasses."""
pass
def _action_to_move(self, action: int, battle: Battle) -> str:
"""Converts actions to move orders.
The conversion is done as follows:
0 <= action < 4:
The actionth available move in battle.available_moves is executed.
4 <= action < 8:
The action - 4th available move in battle.available_moves is executed, with
z-move.
8 <= action < 12:
The action - 8th available move in battle.available_moves is executed, with
mega-evolution.
8 <= action < 12:
The action - 8th available move in battle.available_moves is executed, with
mega-evolution.
12 <= action < 16:
The action - 12th available move in battle.available_moves is executed,
while dynamaxing.
16 <= action < 22
The action - 16th available switch in battle.available_switches is executed.
If the proposed action is illegal, a random legal move is performed.
:param action: The action to convert.
:type action: int
:param battle: The battle in which to act.
:type battle: Battle
:return: the order to send to the server.
:rtype: str
"""
if (
action < 4
and action < len(battle.available_moves)
and not battle.force_switch
):
return self.create_order(battle.available_moves[action])
elif (
not battle.force_switch
and battle.can_z_move
and 0 <= action - 4 < len(battle.active_pokemon.available_z_moves)
):
return self.create_order(
battle.active_pokemon.available_z_moves[action - 4], z_move=True
)
elif (
battle.can_mega_evolve
and 0 <= action - 8 < len(battle.available_moves)
and not battle.force_switch
):
return self.create_order(battle.available_moves[action - 8], mega=True)
elif (
battle.can_dynamax
and 0 <= action - 12 < len(battle.available_moves)
and not battle.force_switch
):
return self.create_order(battle.available_moves[action - 12], dynamax=True)
elif 0 <= action - 16 < len(battle.available_switches):
return self.create_order(battle.available_switches[action - 16])
else:
return self.choose_random_move(battle)
@property
def action_space(self) -> List:
"""The action space for gen 7 single battles.
The conversion to moves is done as follows:
0 <= action < 4:
The actionth available move in battle.available_moves is executed.
4 <= action < 8:
The action - 4th available move in battle.available_moves is executed,
with z-move.
8 <= action < 12:
The action - 8th available move in battle.available_moves is executed,
with mega-evolution.
12 <= action < 16:
The action - 12th available move in battle.available_moves is executed,
while dynamaxing.
16 <= action < 22
The action - 16th available switch in battle.available_switches is
executed.
"""
return self._ACTION_SPACE
@staticmethod
def select_action(node, temperature):
"""
Select action according to the visit count distribution and the temperature.
The temperature is changed dynamically with the visit_softmax_temperature function
in the config.
"""
visit_counts = np.array(
[child.visit_count for child in node.children.values()], dtype="int32"
)
actions = [action for action in node.children.keys()]
if temperature == 0:
action = actions[np.argmax(visit_counts)]
elif temperature == float("inf"):
action = np.random.choice(actions)
else:
# See paper appendix Data Generation
visit_count_distribution = visit_counts ** (1 / temperature)
visit_count_distribution = visit_count_distribution / sum(
visit_count_distribution
)
action = np.random.choice(actions, p=visit_count_distribution)
return action
def main():
start = time.time()
# We create two players.
max_damage_player = MaxDamagePlayer(battle_format="gen8randombattle")
mu_player = MuPlayer(battle_format="gen8randombattle")
# Now, let's evaluate our player
mu_player.battle_against(max_damage_player, n_battles=5)
print(
"Max damage player won %d / 100 battles [this took %f seconds]"
% (max_damage_player.n_won_battles, time.time() - start)
)
if __name__ == "__main__":
main()
#asyncio.get_event_loop().run_until_complete(main())
| [
"models.MuZeroNetwork",
"self_play.MCTS",
"numpy.random.choice",
"poke_env.utils.to_id_str",
"numpy.argmax",
"torch.cuda.is_available",
"numpy.random.seed",
"poke_env.data.MOVES.items",
"poke_env.data.POKEDEX.items",
"asyncio.get_event_loop",
"time.time",
"self_play.GameHistory",
"threading.... | [((1315, 1330), 'poke_env.data.POKEDEX.items', 'POKEDEX.items', ([], {}), '()\n', (1328, 1330), False, 'from poke_env.data import POKEDEX\n'), ((1639, 1652), 'poke_env.data.MOVES.items', 'MOVES.items', ([], {}), '()\n', (1650, 1652), False, 'from poke_env.data import MOVES\n'), ((21874, 21885), 'time.time', 'time.time', ([], {}), '()\n', (21883, 21885), False, 'import time\n'), ((2813, 2826), 'self_play.GameHistory', 'GameHistory', ([], {}), '()\n', (2824, 2826), False, 'from self_play import MCTS, Node, GameHistory, MinMaxStats\n'), ((2960, 2993), 'models.MuZeroNetwork', 'models.MuZeroNetwork', (['self.config'], {}), '(self.config)\n', (2980, 2993), False, 'import models\n'), ((4860, 4884), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (4882, 4884), False, 'import asyncio\n'), ((4914, 4922), 'threading.Thread', 'Thread', ([], {}), '()\n', (4920, 4922), False, 'from threading import Thread\n'), ((16475, 16495), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (16489, 16495), True, 'import numpy as np\n'), ((12188, 12205), 'self_play.MCTS', 'MCTS', (['self.config'], {}), '(self.config)\n', (12192, 12205), False, 'from self_play import MCTS, Node, GameHistory, MinMaxStats\n'), ((13965, 14001), 'numpy.random.choice', 'np.random.choice', (['self._ACTION_SPACE'], {}), '(self._ACTION_SPACE)\n', (13981, 14001), True, 'import numpy as np\n'), ((21361, 21384), 'numpy.argmax', 'np.argmax', (['visit_counts'], {}), '(visit_counts)\n', (21370, 21384), True, 'import numpy as np\n'), ((21451, 21476), 'numpy.random.choice', 'np.random.choice', (['actions'], {}), '(actions)\n', (21467, 21476), True, 'import numpy as np\n'), ((21767, 21820), 'numpy.random.choice', 'np.random.choice', (['actions'], {'p': 'visit_count_distribution'}), '(actions, p=visit_count_distribution)\n', (21783, 21820), True, 'import numpy as np\n'), ((1527, 1548), 'poke_env.utils.to_id_str', 'to_id_str', (['other_form'], {}), '(other_form)\n', (1536, 1548), False, 'from poke_env.utils import to_id_str\n'), ((3099, 3124), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3122, 3124), False, 'import torch\n'), ((22289, 22300), 'time.time', 'time.time', ([], {}), '()\n', (22298, 22300), False, 'import time\n'), ((3843, 3871), 'poke_env.utils.to_id_str', 'to_id_str', (['opponent.username'], {}), '(opponent.username)\n', (3852, 3871), False, 'from poke_env.utils import to_id_str\n'), ((4054, 4080), 'poke_env.utils.to_id_str', 'to_id_str', (['player.username'], {}), '(player.username)\n', (4063, 4080), False, 'from poke_env.utils import to_id_str\n')] |
import time
from dotenv import load_dotenv, find_dotenv
import os
import numpy as np
from pathlib import Path
import tensorflow as tf
from keras import Input, Model
from keras.callbacks import TensorBoard
from keras.layers import Dense, Dropout, Lambda, BatchNormalization
from keras.optimizers import RMSprop, Adam
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import normalize
import keras.backend as K
def create_pairs(x, y):
'''Positive and negative pair creation.
Alternates between positive and negative pairs.
'''
pairs = []
labels = []
for i in range(len(x)):
for j in range(i+1, len(x)):
obs_i, obs_j = x[i], x[j]
pairs += [[obs_i, obs_j]]
labels += [(y[i] - y[j])**2]
return np.array(pairs), np.array(labels)
def euclidean_distance(vects):
x, y = vects
sum_square = K.sum(K.square(x - y), axis=1, keepdims=True)
return tf.sqrt(K.maximum(sum_square, K.epsilon()))
def eucl_dist_output_shape(shapes):
shape1, shape2 = shapes
return (shape1[0], 1)
def create_base_network(input_shape):
'''Base network to be shared (eq. to feature extraction).
'''
input = Input(shape=input_shape)
x = Dense(30, activation='relu')(input)
x = BatchNormalization()(x)
# x = Dropout(0.1)(x)
return Model(input, x)
def create_siamese_embedder(X, y, X_val, y_val, batch_size=128, nb_epochs=10):
input_shape = X.shape[1:]
train_pairs, train_y = create_pairs(X, y)
test_pairs, test_y = create_pairs(X_val, y_val)
base_network = create_base_network(input_shape)
input_a = Input(shape=input_shape)
input_b = Input(shape=input_shape)
# because we re-use the same instance `base_network`,
# the weights of the network
# will be shared across the two branches
processed_a = base_network(input_a)
processed_b = base_network(input_b)
distance = Lambda(euclidean_distance,
output_shape=eucl_dist_output_shape)([processed_a, processed_b])
siamese_model = Model([input_a, input_b], distance)
tb_cb = TensorBoard(log_dir='./logs_siamese/{}'.format(time.time()),
histogram_freq=0, batch_size=32, write_graph=True, write_grads=True, write_images=False, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None,
embeddings_data=None, update_freq='batch')
# train
adam = Adam(1e-1)
siamese_model.compile(loss="mse", optimizer=adam)
siamese_model.fit([train_pairs[:, 0], train_pairs[:, 1]], train_y,
batch_size=batch_size,
epochs=nb_epochs,
validation_data=([test_pairs[:, 0], test_pairs[:, 1]], test_y),
callbacks=[tb_cb])
transformation_model = Model(input=[input_a], output=[processed_a])
transformation_model.compile(optimizer=adam, loss="mse")
return transformation_model
def main():
load_dotenv(find_dotenv())
input_data_file = Path(os.environ["project_dir"]) / "data/external/pcm_main_rotor.npz"
data = np.load(input_data_file)
X, y = data["X"], data["y"]
batch_size=128
nb_epochs = 5
X = normalize(X, axis=0, norm="max")
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
max_y_train = np.max(y_train)
y_train /= max_y_train
y_test /= max_y_train
transformation_model = create_siamese_embedder(X_train, y_train, X_test, y_test, batch_size=batch_size, nb_epochs=nb_epochs)
transformed_data = transformation_model.predict(X)
output_data_file = Path(os.environ["project_dir"]) / "data/external/pcm_main_rotor_embeded.npz"
np.savez(output_data_file, **{"X": transformed_data, "y": y})
if __name__ == "__main__":
main() | [
"keras.optimizers.Adam",
"numpy.savez",
"dotenv.find_dotenv",
"keras.Model",
"sklearn.model_selection.train_test_split",
"pathlib.Path",
"keras.backend.square",
"keras.layers.Lambda",
"numpy.max",
"keras.Input",
"numpy.array",
"keras.layers.BatchNormalization",
"keras.layers.Dense",
"sklea... | [((1210, 1234), 'keras.Input', 'Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (1215, 1234), False, 'from keras import Input, Model\n'), ((1348, 1363), 'keras.Model', 'Model', (['input', 'x'], {}), '(input, x)\n', (1353, 1363), False, 'from keras import Input, Model\n'), ((1641, 1665), 'keras.Input', 'Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (1646, 1665), False, 'from keras import Input, Model\n'), ((1680, 1704), 'keras.Input', 'Input', ([], {'shape': 'input_shape'}), '(shape=input_shape)\n', (1685, 1704), False, 'from keras import Input, Model\n'), ((2073, 2108), 'keras.Model', 'Model', (['[input_a, input_b]', 'distance'], {}), '([input_a, input_b], distance)\n', (2078, 2108), False, 'from keras import Input, Model\n'), ((2461, 2470), 'keras.optimizers.Adam', 'Adam', (['(0.1)'], {}), '(0.1)\n', (2465, 2470), False, 'from keras.optimizers import RMSprop, Adam\n'), ((2806, 2850), 'keras.Model', 'Model', ([], {'input': '[input_a]', 'output': '[processed_a]'}), '(input=[input_a], output=[processed_a])\n', (2811, 2850), False, 'from keras import Input, Model\n'), ((3090, 3114), 'numpy.load', 'np.load', (['input_data_file'], {}), '(input_data_file)\n', (3097, 3114), True, 'import numpy as np\n'), ((3193, 3225), 'sklearn.preprocessing.normalize', 'normalize', (['X'], {'axis': '(0)', 'norm': '"""max"""'}), "(X, axis=0, norm='max')\n", (3202, 3225), False, 'from sklearn.preprocessing import normalize\n'), ((3265, 3319), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(X, y, test_size=0.2, random_state=42)\n', (3281, 3319), False, 'from sklearn.model_selection import train_test_split\n'), ((3338, 3353), 'numpy.max', 'np.max', (['y_train'], {}), '(y_train)\n', (3344, 3353), True, 'import numpy as np\n'), ((3697, 3758), 'numpy.savez', 'np.savez', (['output_data_file'], {}), "(output_data_file, **{'X': transformed_data, 'y': y})\n", (3705, 3758), True, 'import numpy as np\n'), ((796, 811), 'numpy.array', 'np.array', (['pairs'], {}), '(pairs)\n', (804, 811), True, 'import numpy as np\n'), ((813, 829), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (821, 829), True, 'import numpy as np\n'), ((902, 917), 'keras.backend.square', 'K.square', (['(x - y)'], {}), '(x - y)\n', (910, 917), True, 'import keras.backend as K\n'), ((1243, 1271), 'keras.layers.Dense', 'Dense', (['(30)'], {'activation': '"""relu"""'}), "(30, activation='relu')\n", (1248, 1271), False, 'from keras.layers import Dense, Dropout, Lambda, BatchNormalization\n'), ((1287, 1307), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (1305, 1307), False, 'from keras.layers import Dense, Dropout, Lambda, BatchNormalization\n'), ((1938, 2001), 'keras.layers.Lambda', 'Lambda', (['euclidean_distance'], {'output_shape': 'eucl_dist_output_shape'}), '(euclidean_distance, output_shape=eucl_dist_output_shape)\n', (1944, 2001), False, 'from keras.layers import Dense, Dropout, Lambda, BatchNormalization\n'), ((2973, 2986), 'dotenv.find_dotenv', 'find_dotenv', ([], {}), '()\n', (2984, 2986), False, 'from dotenv import load_dotenv, find_dotenv\n'), ((3010, 3041), 'pathlib.Path', 'Path', (["os.environ['project_dir']"], {}), "(os.environ['project_dir'])\n", (3014, 3041), False, 'from pathlib import Path\n'), ((3616, 3647), 'pathlib.Path', 'Path', (["os.environ['project_dir']"], {}), "(os.environ['project_dir'])\n", (3620, 3647), False, 'from pathlib import Path\n'), ((983, 994), 'keras.backend.epsilon', 'K.epsilon', ([], {}), '()\n', (992, 994), True, 'import keras.backend as K\n'), ((2169, 2180), 'time.time', 'time.time', ([], {}), '()\n', (2178, 2180), False, 'import time\n')] |
"""Utility functions for Annif"""
import glob
import os
import os.path
import tempfile
import numpy as np
from annif import logger
from annif.suggestion import VectorSuggestionResult
def atomic_save(obj, dirname, filename, method=None):
"""Save the given object (which must have a .save() method, unless the
method parameter is given) into the given directory with the given
filename, using a temporary file and renaming the temporary file to the
final name."""
prefix, suffix = os.path.splitext(filename)
tempfd, tempfilename = tempfile.mkstemp(
prefix=prefix, suffix=suffix, dir=dirname)
os.close(tempfd)
logger.debug('saving %s to temporary file %s', str(obj)[:90], tempfilename)
if method is not None:
method(obj, tempfilename)
else:
obj.save(tempfilename)
for fn in glob.glob(tempfilename + '*'):
newname = fn.replace(tempfilename, os.path.join(dirname, filename))
logger.debug('renaming temporary file %s to %s', fn, newname)
os.rename(fn, newname)
def cleanup_uri(uri):
"""remove angle brackets from a URI, if any"""
if uri.startswith('<') and uri.endswith('>'):
return uri[1:-1]
return uri
def merge_hits(weighted_hits, subject_index):
"""Merge hits from multiple sources. Input is a sequence of WeightedSuggestion
objects. A SubjectIndex is needed to convert between subject IDs and URIs.
Returns an SuggestionResult object."""
weights = [whit.weight for whit in weighted_hits]
scores = [whit.hits.as_vector(subject_index) for whit in weighted_hits]
result = np.average(scores, axis=0, weights=weights)
return VectorSuggestionResult(result)
def parse_sources(sourcedef):
"""parse a source definition such as 'src1:1.0,src2' into a sequence of
tuples (src_id, weight)"""
sources = []
totalweight = 0.0
for srcdef in sourcedef.strip().split(','):
srcval = srcdef.strip().split(':')
src_id = srcval[0]
if len(srcval) > 1:
weight = float(srcval[1])
else:
weight = 1.0
sources.append((src_id, weight))
totalweight += weight
return [(srcid, weight / totalweight) for srcid, weight in sources]
def parse_args(param_string):
"""Parse a string of comma separated arguments such as '42,43,key=abc' into
a list of positional args [42, 43] and a dict of keyword args {key: abc}"""
if not param_string:
return [], {}
posargs = []
kwargs = {}
param_strings = param_string.split(',')
for p_string in param_strings:
parts = p_string.split('=')
if len(parts) == 1:
posargs.append(p_string)
elif len(parts) == 2:
kwargs[parts[0]] = parts[1]
return posargs, kwargs
def boolean(val):
"""Convert the given value to a boolean True/False value, if it isn't already.
True values are '1', 'yes', 'true', and 'on' (case insensitive), everything
else is False."""
return str(val).lower() in ('1', 'yes', 'true', 'on')
def identity(x):
"""Identity function: return the given argument unchanged"""
return x
| [
"annif.suggestion.VectorSuggestionResult",
"numpy.average",
"os.close",
"os.rename",
"os.path.splitext",
"os.path.join",
"tempfile.mkstemp",
"annif.logger.debug",
"glob.glob"
] | [((503, 529), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (519, 529), False, 'import os\n'), ((557, 616), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'prefix': 'prefix', 'suffix': 'suffix', 'dir': 'dirname'}), '(prefix=prefix, suffix=suffix, dir=dirname)\n', (573, 616), False, 'import tempfile\n'), ((630, 646), 'os.close', 'os.close', (['tempfd'], {}), '(tempfd)\n', (638, 646), False, 'import os\n'), ((843, 872), 'glob.glob', 'glob.glob', (["(tempfilename + '*')"], {}), "(tempfilename + '*')\n", (852, 872), False, 'import glob\n'), ((1613, 1656), 'numpy.average', 'np.average', (['scores'], {'axis': '(0)', 'weights': 'weights'}), '(scores, axis=0, weights=weights)\n', (1623, 1656), True, 'import numpy as np\n'), ((1668, 1698), 'annif.suggestion.VectorSuggestionResult', 'VectorSuggestionResult', (['result'], {}), '(result)\n', (1690, 1698), False, 'from annif.suggestion import VectorSuggestionResult\n'), ((958, 1019), 'annif.logger.debug', 'logger.debug', (['"""renaming temporary file %s to %s"""', 'fn', 'newname'], {}), "('renaming temporary file %s to %s', fn, newname)\n", (970, 1019), False, 'from annif import logger\n'), ((1028, 1050), 'os.rename', 'os.rename', (['fn', 'newname'], {}), '(fn, newname)\n', (1037, 1050), False, 'import os\n'), ((917, 948), 'os.path.join', 'os.path.join', (['dirname', 'filename'], {}), '(dirname, filename)\n', (929, 948), False, 'import os\n')] |
import os
import yaml
import time
import shutil
import torch
import random
import argparse
import numpy as np
from torch.utils import data
from tqdm import tqdm
from ptsemseg.models import get_model
from ptsemseg.loss import get_loss_function
from ptsemseg.loader import get_loader
from ptsemseg.utils import get_logger
from ptsemseg.metrics import runningScore, averageMeter
from ptsemseg.augmentations import get_composed_augmentations
from ptsemseg.schedulers import get_scheduler
from ptsemseg.optimizers import get_optimizer
from tensorboardX import SummaryWriter
def train(cfg, writer, logger):
# Setup seeds
torch.manual_seed(cfg.get("seed", 1337))
torch.cuda.manual_seed(cfg.get("seed", 1337))
np.random.seed(cfg.get("seed", 1337))
random.seed(cfg.get("seed", 1337))
# Setup device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Setup Augmentations
augmentations = cfg["training"].get("augmentations", None)
data_aug = get_composed_augmentations(augmentations)
# Setup Dataloader
data_loader = get_loader(cfg["data"]["dataset"])
tloader_params = {k: v for k, v in cfg["data"]["train"].items()}
tloader_params.update({'root':cfg["data"]["path"]})
vloader_params = {k: v for k, v in cfg["data"]["val"].items()}
vloader_params.update({'root':cfg["data"]["path"]})
t_loader = data_loader(**tloader_params)
v_loader = data_loader(**vloader_params)
n_classes = t_loader.n_classes
trainloader = data.DataLoader(
t_loader,
batch_size=cfg["training"]["batch_size"],
num_workers=cfg["training"]["n_workers"],
shuffle=True,
)
valloader = data.DataLoader(
v_loader, batch_size=cfg["training"]["batch_size"], num_workers=cfg["training"]["n_workers"]
)
# Setup Metrics
running_metrics_val = runningScore(n_classes)
# Setup Model
model = get_model(cfg["model"], n_classes).to(device)
model = torch.nn.DataParallel(model, device_ids=range(torch.cuda.device_count()))
# Setup optimizer, lr_scheduler and loss function
optimizer_cls = get_optimizer(cfg)
optimizer_params = {k: v for k, v in cfg["training"]["optimizer"].items() if k != "name"}
optimizer = optimizer_cls(model.parameters(), **optimizer_params)
logger.info("Using optimizer {}".format(optimizer))
scheduler = get_scheduler(optimizer, cfg["training"]["lr_schedule"])
loss_type = cfg["training"]["loss"]["name"]
if loss_type == 'BalancedCE' or loss_type =='WeightedCE':
cls_num_list = np.zeros((n_classes,))
print("=" * 10, "CALCULATING WEIGHTS", "=" * 10)
# for _, valloader in loaders['val'].items():
for _, (_, labels_list) in tqdm(enumerate(valloader)):
for i in range(n_classes):
cls_num_list[i] = cls_num_list[i] + (labels_list[0] == i).sum()
if loss_type == 'BalancedCE':
beta = (np.sum(cls_num_list)-1)/np.sum(cls_num_list)
effective_num = 1.0 - np.power(beta, cls_num_list)
effective_num[effective_num==0] = 1
per_cls_weights = (1.0 - beta) / np.array(effective_num)
per_cls_weights = per_cls_weights / np.sum(per_cls_weights) * len(cls_num_list)
per_cls_weights = torch.tensor(per_cls_weights,dtype=torch.float).cuda(device)
cls_num_list = None
elif loss_type =='WeightedCE':
median = np.median(cls_num_list)
per_cls_weights = median/cls_num_list
per_cls_weights[per_cls_weights==np.inf] = 0.0
per_cls_weights = torch.tensor(per_cls_weights,dtype=torch.float).cuda(device)
cls_num_list = None
else:
per_cls_weights = None
else:
per_cls_weights = None
cls_num_list = None
if cfg["training"]["loss"][loss_type] is not None:
loss_params = {k: v for k, v in cfg["training"]["loss"][loss_type].items()}
else:
loss_params = {}
loss_params["n_classes"] = n_classes
loss_params["ignore_index"] = cfg["training"]["loss"]["ignore_index"]
if per_cls_weights is not None:
loss_params["weight"] = per_cls_weights
if cls_num_list is not None:
loss_params["cls_num_list"] = cls_num_list
loss_fn = get_loss_function(cfg,**loss_params)
logger.info("Using loss {}".format(loss_fn))
start_iter = 0
if cfg["training"]["resume"] is not None:
if os.path.isfile(cfg["training"]["resume"]):
logger.info(
"Loading model and optimizer from checkpoint '{}'".format(cfg["training"]["resume"])
)
checkpoint = torch.load(cfg["training"]["resume"])
model.load_state_dict(checkpoint["model_state"])
optimizer.load_state_dict(checkpoint["optimizer_state"])
scheduler.load_state_dict(checkpoint["scheduler_state"])
start_iter = checkpoint["epoch"]
logger.info(
"Loaded checkpoint '{}' (iter {})".format(
cfg["training"]["resume"], checkpoint["epoch"]
)
)
else:
logger.info("No checkpoint found at '{}'".format(cfg["training"]["resume"]))
val_loss_meter = averageMeter()
time_meter = averageMeter()
best_iou = -100.0
i = start_iter
flag = True
while i <= cfg["training"]["train_iters"] and flag:
for (images, labels) in trainloader:
i += 1
start_ts = time.time()
model.train()
images = images.to(device)
labels = labels.to(device).squeeze()
if labels.shape[0] <= 2:
continue
optimizer.zero_grad()
outputs = model(images)
loss = loss_fn(input=outputs, target=labels)
loss.backward()
optimizer.step()
scheduler.step()
time_meter.update(time.time() - start_ts)
if (i + 1) % cfg["training"]["print_interval"] == 0:
fmt_str = "Iter [{:d}/{:d}] Loss: {:.4f} Time/Image: {:.4f}"
print_str = fmt_str.format(
i + 1,
cfg["training"]["train_iters"],
loss.item(),
time_meter.avg / cfg["training"]["batch_size"],
)
print(print_str)
logger.info(print_str)
writer.add_scalar("loss/train_loss", loss.item(), i + 1)
time_meter.reset()
if (i + 1) % cfg["training"]["val_interval"] == 0 or (i + 1) == cfg["training"][
"train_iters"
]:
model.eval()
with torch.no_grad():
for i_val, (images_val, labels_val) in tqdm(enumerate(valloader)):
images_val = images_val.to(device)
labels_val = labels_val.to(device).squeeze()
outputs = model(images_val)
val_loss = loss_fn(input=outputs, target=labels_val)
pred = outputs.data.max(1)[1].cpu().numpy()
gt = labels_val.data.cpu().numpy()
running_metrics_val.update(gt, pred)
val_loss_meter.update(val_loss.item())
writer.add_scalar("loss/val_loss", val_loss_meter.avg, i + 1)
logger.info("Iter %d Loss: %.4f" % (i + 1, val_loss_meter.avg))
score, class_iou,_,_ = running_metrics_val.get_scores()
for k, v in score.items():
print(k, v)
logger.info("{}: {}".format(k, v))
writer.add_scalar("val_metrics/{}".format(k), v, i + 1)
for k, v in class_iou.items():
logger.info("{}: {}".format(k, v))
writer.add_scalar("val_metrics/cls_{}".format(k), v, i + 1)
val_loss_meter.reset()
running_metrics_val.reset()
if score["Mean IoU : \t"] >= best_iou:
best_iou = score["Mean IoU : \t"]
state = {
"epoch": i + 1,
"model_state": model.state_dict(),
"optimizer_state": optimizer.state_dict(),
"scheduler_state": scheduler.state_dict(),
"best_iou": best_iou,
}
save_path = os.path.join(
writer.file_writer.get_logdir(),
"{}_{}_best_model.pkl".format(cfg["model"]["arch"], cfg["data"]["dataset"]),
)
torch.save(state, save_path)
if (i + 1) == cfg["training"]["train_iters"]:
flag = False
break
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="config")
parser.add_argument(
"--config",
nargs="?",
type=str,
default="configs/deeplab_synthia.yml",
help="Configuration file to use",
)
args = parser.parse_args()
with open(args.config) as fp:
cfg = yaml.load(fp)
logdir = os.path.join("runs", os.path.basename(args.config)[:-4],cfg['model']['backbone'],cfg['id'])
writer = SummaryWriter(logdir)
print("RUNDIR: {}".format(logdir))
shutil.copy(args.config, logdir)
logger = get_logger(logdir)
logger.info("Let the games begin")
train(cfg, writer, logger)
| [
"ptsemseg.metrics.averageMeter",
"yaml.load",
"torch.cuda.device_count",
"numpy.array",
"torch.cuda.is_available",
"ptsemseg.schedulers.get_scheduler",
"ptsemseg.loss.get_loss_function",
"ptsemseg.loader.get_loader",
"ptsemseg.augmentations.get_composed_augmentations",
"tensorboardX.SummaryWriter"... | [((1002, 1043), 'ptsemseg.augmentations.get_composed_augmentations', 'get_composed_augmentations', (['augmentations'], {}), '(augmentations)\n', (1028, 1043), False, 'from ptsemseg.augmentations import get_composed_augmentations\n'), ((1086, 1120), 'ptsemseg.loader.get_loader', 'get_loader', (["cfg['data']['dataset']"], {}), "(cfg['data']['dataset'])\n", (1096, 1120), False, 'from ptsemseg.loader import get_loader\n'), ((1533, 1660), 'torch.utils.data.DataLoader', 'data.DataLoader', (['t_loader'], {'batch_size': "cfg['training']['batch_size']", 'num_workers': "cfg['training']['n_workers']", 'shuffle': '(True)'}), "(t_loader, batch_size=cfg['training']['batch_size'],\n num_workers=cfg['training']['n_workers'], shuffle=True)\n", (1548, 1660), False, 'from torch.utils import data\n'), ((1713, 1826), 'torch.utils.data.DataLoader', 'data.DataLoader', (['v_loader'], {'batch_size': "cfg['training']['batch_size']", 'num_workers': "cfg['training']['n_workers']"}), "(v_loader, batch_size=cfg['training']['batch_size'],\n num_workers=cfg['training']['n_workers'])\n", (1728, 1826), False, 'from torch.utils import data\n'), ((1884, 1907), 'ptsemseg.metrics.runningScore', 'runningScore', (['n_classes'], {}), '(n_classes)\n', (1896, 1907), False, 'from ptsemseg.metrics import runningScore, averageMeter\n'), ((2146, 2164), 'ptsemseg.optimizers.get_optimizer', 'get_optimizer', (['cfg'], {}), '(cfg)\n', (2159, 2164), False, 'from ptsemseg.optimizers import get_optimizer\n'), ((2403, 2459), 'ptsemseg.schedulers.get_scheduler', 'get_scheduler', (['optimizer', "cfg['training']['lr_schedule']"], {}), "(optimizer, cfg['training']['lr_schedule'])\n", (2416, 2459), False, 'from ptsemseg.schedulers import get_scheduler\n'), ((4340, 4377), 'ptsemseg.loss.get_loss_function', 'get_loss_function', (['cfg'], {}), '(cfg, **loss_params)\n', (4357, 4377), False, 'from ptsemseg.loss import get_loss_function\n'), ((5302, 5316), 'ptsemseg.metrics.averageMeter', 'averageMeter', ([], {}), '()\n', (5314, 5316), False, 'from ptsemseg.metrics import runningScore, averageMeter\n'), ((5334, 5348), 'ptsemseg.metrics.averageMeter', 'averageMeter', ([], {}), '()\n', (5346, 5348), False, 'from ptsemseg.metrics import runningScore, averageMeter\n'), ((8976, 9021), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""config"""'}), "(description='config')\n", (8999, 9021), False, 'import argparse\n'), ((9413, 9434), 'tensorboardX.SummaryWriter', 'SummaryWriter', (['logdir'], {}), '(logdir)\n', (9426, 9434), False, 'from tensorboardX import SummaryWriter\n'), ((9479, 9511), 'shutil.copy', 'shutil.copy', (['args.config', 'logdir'], {}), '(args.config, logdir)\n', (9490, 9511), False, 'import shutil\n'), ((9526, 9544), 'ptsemseg.utils.get_logger', 'get_logger', (['logdir'], {}), '(logdir)\n', (9536, 9544), False, 'from ptsemseg.utils import get_logger\n'), ((2594, 2616), 'numpy.zeros', 'np.zeros', (['(n_classes,)'], {}), '((n_classes,))\n', (2602, 2616), True, 'import numpy as np\n'), ((4504, 4545), 'os.path.isfile', 'os.path.isfile', (["cfg['training']['resume']"], {}), "(cfg['training']['resume'])\n", (4518, 4545), False, 'import os\n'), ((9280, 9293), 'yaml.load', 'yaml.load', (['fp'], {}), '(fp)\n', (9289, 9293), False, 'import yaml\n'), ((859, 884), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (882, 884), False, 'import torch\n'), ((1939, 1973), 'ptsemseg.models.get_model', 'get_model', (["cfg['model']", 'n_classes'], {}), "(cfg['model'], n_classes)\n", (1948, 1973), False, 'from ptsemseg.models import get_model\n'), ((4712, 4749), 'torch.load', 'torch.load', (["cfg['training']['resume']"], {}), "(cfg['training']['resume'])\n", (4722, 4749), False, 'import torch\n'), ((5551, 5562), 'time.time', 'time.time', ([], {}), '()\n', (5560, 5562), False, 'import time\n'), ((9329, 9358), 'os.path.basename', 'os.path.basename', (['args.config'], {}), '(args.config)\n', (9345, 9358), False, 'import os\n'), ((2043, 2068), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (2066, 2068), False, 'import torch\n'), ((3001, 3021), 'numpy.sum', 'np.sum', (['cls_num_list'], {}), '(cls_num_list)\n', (3007, 3021), True, 'import numpy as np\n'), ((3056, 3084), 'numpy.power', 'np.power', (['beta', 'cls_num_list'], {}), '(beta, cls_num_list)\n', (3064, 3084), True, 'import numpy as np\n'), ((3178, 3201), 'numpy.array', 'np.array', (['effective_num'], {}), '(effective_num)\n', (3186, 3201), True, 'import numpy as np\n'), ((3477, 3500), 'numpy.median', 'np.median', (['cls_num_list'], {}), '(cls_num_list)\n', (3486, 3500), True, 'import numpy as np\n'), ((2977, 2997), 'numpy.sum', 'np.sum', (['cls_num_list'], {}), '(cls_num_list)\n', (2983, 2997), True, 'import numpy as np\n'), ((3250, 3273), 'numpy.sum', 'np.sum', (['per_cls_weights'], {}), '(per_cls_weights)\n', (3256, 3273), True, 'import numpy as np\n'), ((3324, 3372), 'torch.tensor', 'torch.tensor', (['per_cls_weights'], {'dtype': 'torch.float'}), '(per_cls_weights, dtype=torch.float)\n', (3336, 3372), False, 'import torch\n'), ((6009, 6020), 'time.time', 'time.time', ([], {}), '()\n', (6018, 6020), False, 'import time\n'), ((6789, 6804), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6802, 6804), False, 'import torch\n'), ((8795, 8823), 'torch.save', 'torch.save', (['state', 'save_path'], {}), '(state, save_path)\n', (8805, 8823), False, 'import torch\n'), ((3640, 3688), 'torch.tensor', 'torch.tensor', (['per_cls_weights'], {'dtype': 'torch.float'}), '(per_cls_weights, dtype=torch.float)\n', (3652, 3688), False, 'import torch\n')] |
import sys,os
sys.path.append("..")
from model_io import model_io
import numpy as np
import tensorflow as tf
from bunch import Bunch
from example import feature_writer, write_to_tfrecords, classifier_processor
from data_generator import tokenization
from data_generator import hvd_distributed_tf_data_utils as tf_data_utils
from example import hvd_bert_order_classifier as bert_order_classifier
import horovod.tensorflow as hvd
from example import esim_bert
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"eval_data_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string(
"output_file", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"config_file", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"init_checkpoint", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"result_file", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"vocab_file", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"label_id", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_integer(
"max_length", 128,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"train_file", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"dev_file", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"model_output", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"gpu_id", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_integer(
"epoch", 5,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_integer(
"num_classes", 3,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_integer(
"train_size", 256434,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_integer(
"batch_size", 32,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"model_type", None,
"Input TF example files (can be a glob or comma separated).")
def main(_):
hvd.init()
sess_config = tf.ConfigProto()
sess_config.gpu_options.visible_device_list = str(hvd.local_rank())
graph = tf.Graph()
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
with graph.as_default():
import json
# config = json.load(open("/data/xuht/bert/chinese_L-12_H-768_A-12/bert_config.json", "r"))
config = json.load(open(FLAGS.config_file, "r"))
init_checkpoint = FLAGS.init_checkpoint
print("===init checkoutpoint==={}".format(init_checkpoint))
# init_checkpoint = "/data/xuht/bert/chinese_L-12_H-768_A-12/bert_model.ckpt"
# init_checkpoint = "/data/xuht/concat/model_1/oqmrc.ckpt"
config = Bunch(config)
config.use_one_hot_embeddings = True
config.scope = "esim/bert"
config.dropout_prob = 0.1
config.label_type = "single_label"
config.lstm_dim = 128
config.num_heads = 12
config.num_units = 768
import json
label_dict = json.load(open(FLAGS.label_id))
# label_tensor = np.asarray(label_dict["class_ratio"]).astype(np.float32)
label_tensor = None
# config.loss = "focal_loss"
json.dump(config, open(FLAGS.model_output+"/config.json", "w"))
# os.environ["CUDA_VISIBLE_DEVICES"] = FLAGS.gpu_id
sess = tf.Session(config=sess_config)
train_size = int(FLAGS.train_size/hvd.size())
num_train_steps = int(
train_size / FLAGS.batch_size * FLAGS.epoch)
num_warmup_steps = int(num_train_steps * 0.1)
num_storage_steps = int(train_size / FLAGS.batch_size)
print(num_train_steps, num_warmup_steps, "=============")
opt_config = Bunch({"init_lr":(2e-5/hvd.size()),
"num_train_steps":num_train_steps,
"num_warmup_steps":num_warmup_steps,
"train_op":"adam"})
model_io_config = Bunch({"fix_lm":False})
model_io_fn = model_io.ModelIO(model_io_config)
num_choice = FLAGS.num_classes
max_seq_length = FLAGS.max_length
if FLAGS.model_type == "original":
model_function = bert_order_classifier.classifier_model_fn_builder
elif FLAGS.model_type == "attn":
model_function = bert_order_classifier.classifier_attn_model_fn_builder
elif FLAGS.model_type == "orignal_nonlinear":
model_function = bert_order_classifier.classifier_model_fn_builder_v1
elif FLAGS.model_type == "esim_bert":
model_function = esim_bert.classifier_attn_model_fn_builder
model_train_fn = model_function(
config,
num_choice,
init_checkpoint,
model_reuse=None,
load_pretrained=True,
model_io_fn=model_io_fn,
model_io_config=model_io_config,
opt_config=opt_config,
input_name=["a", "b"],
label_tensor=label_tensor,
not_storage_params=["adam", "adam_1"],
exclude_scope_dict={"task":"esim"})
model_eval_fn = model_function(
config,
num_choice,
init_checkpoint,
model_reuse=True,
load_pretrained=True,
model_io_fn=model_io_fn,
model_io_config=model_io_config,
opt_config=opt_config,
input_name=["a", "b"],
label_tensor=label_tensor,
not_storage_params=["adam", "adam_1"],
exclude_scope_dict={"task":"esim"})
def metric_fn(features, logits, loss):
print(logits.get_shape(), "===logits shape===")
pred_label = tf.argmax(logits, axis=-1, output_type=tf.int32)
prob = tf.nn.softmax(logits)
accuracy = correct = tf.equal(
tf.cast(pred_label, tf.int32),
tf.cast(features["label_ids"], tf.int32)
)
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
return {"accuracy":accuracy, "loss":loss, "pred_label":pred_label, "label_ids":features["label_ids"]}
name_to_features = {
"input_ids_a":
tf.FixedLenFeature([max_seq_length], tf.int64),
"input_mask_a":
tf.FixedLenFeature([max_seq_length], tf.int64),
"segment_ids_a":
tf.FixedLenFeature([max_seq_length], tf.int64),
"input_ids_b":
tf.FixedLenFeature([max_seq_length], tf.int64),
"input_mask_b":
tf.FixedLenFeature([max_seq_length], tf.int64),
"segment_ids_b":
tf.FixedLenFeature([max_seq_length], tf.int64),
"label_ids":
tf.FixedLenFeature([], tf.int64),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example.
"""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
params = Bunch({})
params.epoch = FLAGS.epoch
params.batch_size = FLAGS.batch_size
# train_features = tf_data_utils.train_input_fn("/data/xuht/wsdm19/data/train.tfrecords",
# _decode_record, name_to_features, params)
# eval_features = tf_data_utils.eval_input_fn("/data/xuht/wsdm19/data/dev.tfrecords",
# _decode_record, name_to_features, params)
train_features = tf_data_utils.train_input_fn(FLAGS.train_file,
_decode_record, name_to_features, params)
eval_features = tf_data_utils.eval_input_fn(FLAGS.dev_file,
_decode_record, name_to_features, params)
[train_op, train_loss, train_per_example_loss, train_logits] = model_train_fn(train_features, [], tf.estimator.ModeKeys.TRAIN)
[_, eval_loss, eval_per_example_loss, eval_logits] = model_eval_fn(eval_features, [], tf.estimator.ModeKeys.EVAL)
result = metric_fn(eval_features, eval_logits, eval_loss)
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
sess.run(init_op)
sess.run(hvd.broadcast_global_variables(0))
def eval_fn(result):
i = 0
total_accuracy = 0
label, label_id = [], []
# label_weight = []
while True:
try:
eval_result = sess.run(result)
total_accuracy += eval_result["accuracy"]
label_id.extend(eval_result["label_ids"])
label.extend(eval_result["pred_label"])
# for item in eval_result["label_ids"]:
# label_weight.append(label_tensor[item])
i += 1
except tf.errors.OutOfRangeError:
print("End of dataset")
break
# f1 = f1_score(label_id, label, average="macro", sample_weight=label_weight)
# accuracy = accuracy_score(label_id, label, sample_weight=label_weight)
f1 = f1_score(label_id, label, average="macro")
accuracy = accuracy_score(label_id, label)
print("test accuracy accuracy {} {} f1 {}".format(total_accuracy/i,
accuracy, f1))
return total_accuracy/ i, f1
def train_fn(op, loss):
i = 0
cnt = 0
total_loss = 0.0
while True:
try:
[_, train_loss] = sess.run([op, loss])
total_loss += train_loss
i += 1
cnt += 1
if np.mod(i, num_storage_steps) == 0:
print(total_loss/cnt)
# model_io_fn.save_model(sess, "/data/xuht/wsdm19/data/model_11_15_focal_loss/oqmrc_{}.ckpt".format(int(i/8000)))
if hvd.rank() == 0:
model_io_fn.save_model(sess, FLAGS.model_output+"/oqmrc_{}.ckpt".format(int(i/num_storage_steps)))
print("==successful storing model=={}".format(int(i/num_storage_steps)))
total_loss = 0
cnt = 0
except tf.errors.OutOfRangeError:
break
print("===========begin to train============")
train_fn(train_op, train_loss)
print("===========begin to eval============")
accuracy, f1 = eval_fn(result)
print("==accuracy {} f1 {}==".format(accuracy, f1))
# model_io_fn.save_model(sess, "/data/xuht/wsdm19/data/model_11_15_focal_loss/oqmrc.ckpt")
if hvd.rank() == 0:
model_io_fn.save_model(sess, FLAGS.model_output+"/oqmrc.ckpt")
if __name__ == "__main__":
flags.mark_flag_as_required("eval_data_file")
flags.mark_flag_as_required("output_file")
flags.mark_flag_as_required("config_file")
flags.mark_flag_as_required("init_checkpoint")
flags.mark_flag_as_required("result_file")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("train_file")
flags.mark_flag_as_required("dev_file")
flags.mark_flag_as_required("max_length")
flags.mark_flag_as_required("model_output")
flags.mark_flag_as_required("gpu_id")
flags.mark_flag_as_required("epoch")
flags.mark_flag_as_required("num_classes")
tf.app.run()
| [
"tensorflow.local_variables_initializer",
"model_io.model_io.ModelIO",
"bunch.Bunch",
"horovod.tensorflow.init",
"horovod.tensorflow.broadcast_global_variables",
"tensorflow.nn.softmax",
"horovod.tensorflow.local_rank",
"data_generator.hvd_distributed_tf_data_utils.train_input_fn",
"sys.path.append"... | [((14, 35), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (29, 35), False, 'import sys, os\n'), ((2497, 2507), 'horovod.tensorflow.init', 'hvd.init', ([], {}), '()\n', (2505, 2507), True, 'import horovod.tensorflow as hvd\n'), ((2527, 2543), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (2541, 2543), True, 'import tensorflow as tf\n'), ((2629, 2639), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2637, 2639), True, 'import tensorflow as tf\n'), ((13042, 13054), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (13052, 13054), True, 'import tensorflow as tf\n'), ((2598, 2614), 'horovod.tensorflow.local_rank', 'hvd.local_rank', ([], {}), '()\n', (2612, 2614), True, 'import horovod.tensorflow as hvd\n'), ((3240, 3253), 'bunch.Bunch', 'Bunch', (['config'], {}), '(config)\n', (3245, 3253), False, 'from bunch import Bunch\n'), ((3873, 3903), 'tensorflow.Session', 'tf.Session', ([], {'config': 'sess_config'}), '(config=sess_config)\n', (3883, 3903), True, 'import tensorflow as tf\n'), ((4503, 4527), 'bunch.Bunch', 'Bunch', (["{'fix_lm': False}"], {}), "({'fix_lm': False})\n", (4508, 4527), False, 'from bunch import Bunch\n'), ((4558, 4591), 'model_io.model_io.ModelIO', 'model_io.ModelIO', (['model_io_config'], {}), '(model_io_config)\n', (4574, 4591), False, 'from model_io import model_io\n'), ((8570, 8579), 'bunch.Bunch', 'Bunch', (['{}'], {}), '({})\n', (8575, 8579), False, 'from bunch import Bunch\n'), ((9038, 9130), 'data_generator.hvd_distributed_tf_data_utils.train_input_fn', 'tf_data_utils.train_input_fn', (['FLAGS.train_file', '_decode_record', 'name_to_features', 'params'], {}), '(FLAGS.train_file, _decode_record,\n name_to_features, params)\n', (9066, 9130), True, 'from data_generator import hvd_distributed_tf_data_utils as tf_data_utils\n'), ((9187, 9276), 'data_generator.hvd_distributed_tf_data_utils.eval_input_fn', 'tf_data_utils.eval_input_fn', (['FLAGS.dev_file', '_decode_record', 'name_to_features', 'params'], {}), '(FLAGS.dev_file, _decode_record,\n name_to_features, params)\n', (9214, 9276), True, 'from data_generator import hvd_distributed_tf_data_utils as tf_data_utils\n'), ((6803, 6851), 'tensorflow.argmax', 'tf.argmax', (['logits'], {'axis': '(-1)', 'output_type': 'tf.int32'}), '(logits, axis=-1, output_type=tf.int32)\n', (6812, 6851), True, 'import tensorflow as tf\n'), ((6871, 6892), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['logits'], {}), '(logits)\n', (6884, 6892), True, 'import tensorflow as tf\n'), ((7329, 7375), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[max_seq_length]', 'tf.int64'], {}), '([max_seq_length], tf.int64)\n', (7347, 7375), True, 'import tensorflow as tf\n'), ((7433, 7479), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[max_seq_length]', 'tf.int64'], {}), '([max_seq_length], tf.int64)\n', (7451, 7479), True, 'import tensorflow as tf\n'), ((7538, 7584), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[max_seq_length]', 'tf.int64'], {}), '([max_seq_length], tf.int64)\n', (7556, 7584), True, 'import tensorflow as tf\n'), ((7641, 7687), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[max_seq_length]', 'tf.int64'], {}), '([max_seq_length], tf.int64)\n', (7659, 7687), True, 'import tensorflow as tf\n'), ((7745, 7791), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[max_seq_length]', 'tf.int64'], {}), '([max_seq_length], tf.int64)\n', (7763, 7791), True, 'import tensorflow as tf\n'), ((7850, 7896), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[max_seq_length]', 'tf.int64'], {}), '([max_seq_length], tf.int64)\n', (7868, 7896), True, 'import tensorflow as tf\n'), ((7951, 7983), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.int64'], {}), '([], tf.int64)\n', (7969, 7983), True, 'import tensorflow as tf\n'), ((8153, 8202), 'tensorflow.parse_single_example', 'tf.parse_single_example', (['record', 'name_to_features'], {}), '(record, name_to_features)\n', (8176, 8202), True, 'import tensorflow as tf\n'), ((9677, 9710), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (9708, 9710), True, 'import tensorflow as tf\n'), ((9712, 9744), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (9742, 9744), True, 'import tensorflow as tf\n'), ((9790, 9823), 'horovod.tensorflow.broadcast_global_variables', 'hvd.broadcast_global_variables', (['(0)'], {}), '(0)\n', (9820, 9823), True, 'import horovod.tensorflow as hvd\n'), ((10726, 10768), 'sklearn.metrics.f1_score', 'f1_score', (['label_id', 'label'], {'average': '"""macro"""'}), "(label_id, label, average='macro')\n", (10734, 10768), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score\n'), ((10792, 10823), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['label_id', 'label'], {}), '(label_id, label)\n', (10806, 10823), False, 'from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score\n'), ((12316, 12326), 'horovod.tensorflow.rank', 'hvd.rank', ([], {}), '()\n', (12324, 12326), True, 'import horovod.tensorflow as hvd\n'), ((3947, 3957), 'horovod.tensorflow.size', 'hvd.size', ([], {}), '()\n', (3955, 3957), True, 'import horovod.tensorflow as hvd\n'), ((6952, 6981), 'tensorflow.cast', 'tf.cast', (['pred_label', 'tf.int32'], {}), '(pred_label, tf.int32)\n', (6959, 6981), True, 'import tensorflow as tf\n'), ((6999, 7039), 'tensorflow.cast', 'tf.cast', (["features['label_ids']", 'tf.int32'], {}), "(features['label_ids'], tf.int32)\n", (7006, 7039), True, 'import tensorflow as tf\n'), ((7092, 7120), 'tensorflow.cast', 'tf.cast', (['correct', 'tf.float32'], {}), '(correct, tf.float32)\n', (7099, 7120), True, 'import tensorflow as tf\n'), ((4286, 4296), 'horovod.tensorflow.size', 'hvd.size', ([], {}), '()\n', (4294, 4296), True, 'import horovod.tensorflow as hvd\n'), ((8475, 8489), 'tensorflow.to_int32', 'tf.to_int32', (['t'], {}), '(t)\n', (8486, 8489), True, 'import tensorflow as tf\n'), ((11313, 11341), 'numpy.mod', 'np.mod', (['i', 'num_storage_steps'], {}), '(i, num_storage_steps)\n', (11319, 11341), True, 'import numpy as np\n'), ((11559, 11569), 'horovod.tensorflow.rank', 'hvd.rank', ([], {}), '()\n', (11567, 11569), True, 'import horovod.tensorflow as hvd\n')] |
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "<NAME>" at 11:04, 19/07/2020 %
# %
# Email: <EMAIL> %
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
# Github: https://github.com/thieu1995 %
#-------------------------------------------------------------------------------------------------------%
from numpy import array
from permetrics.regression import Metrics
## 1-D array
y_true = array([3, -0.5, 2, 7])
y_pred = array([2.5, 0.0, 2, 8])
y_true2 = array([3, -0.5, 2, 7])
y_pred2 = array([2.5, 0.0, 2, 9])
### C1. Using OOP style - very powerful when calculating multiple metrics
obj1 = Metrics(y_true, y_pred) # Pass the data here
result = obj1.pearson_correlation_index(clean=True, decimal=5)
print(f"1-D array, OOP style: {result}")
### C2. Using functional style
obj2 = Metrics()
result = obj2.pearson_correlation_index(clean=True, decimal=5, y_true=y_true2, y_pred=y_pred2)
# Pass the data here, remember the keywords (y_true, y_pred)
print(f"1-D array, Functional style: {result}")
## > 1-D array - Multi-dimensional Array
y_true = array([[0.5, 1], [-1, 1], [7, -6]])
y_pred = array([[0, 2], [-1, 2], [8, -5]])
multi_outputs = [None, "raw_values", [0.3, 1.2], array([0.5, 0.2]), (0.1, 0.9)]
obj3 = Metrics(y_true, y_pred)
for multi_output in multi_outputs:
result = obj3.pearson_correlation_index(clean=True, multi_output=multi_output, decimal=5)
print(f"n-D array, OOP style: {result}") | [
"permetrics.regression.Metrics",
"numpy.array"
] | [((824, 846), 'numpy.array', 'array', (['[3, -0.5, 2, 7]'], {}), '([3, -0.5, 2, 7])\n', (829, 846), False, 'from numpy import array\n'), ((856, 879), 'numpy.array', 'array', (['[2.5, 0.0, 2, 8]'], {}), '([2.5, 0.0, 2, 8])\n', (861, 879), False, 'from numpy import array\n'), ((891, 913), 'numpy.array', 'array', (['[3, -0.5, 2, 7]'], {}), '([3, -0.5, 2, 7])\n', (896, 913), False, 'from numpy import array\n'), ((924, 947), 'numpy.array', 'array', (['[2.5, 0.0, 2, 9]'], {}), '([2.5, 0.0, 2, 9])\n', (929, 947), False, 'from numpy import array\n'), ((1030, 1053), 'permetrics.regression.Metrics', 'Metrics', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (1037, 1053), False, 'from permetrics.regression import Metrics\n'), ((1219, 1228), 'permetrics.regression.Metrics', 'Metrics', ([], {}), '()\n', (1226, 1228), False, 'from permetrics.regression import Metrics\n'), ((1484, 1519), 'numpy.array', 'array', (['[[0.5, 1], [-1, 1], [7, -6]]'], {}), '([[0.5, 1], [-1, 1], [7, -6]])\n', (1489, 1519), False, 'from numpy import array\n'), ((1529, 1562), 'numpy.array', 'array', (['[[0, 2], [-1, 2], [8, -5]]'], {}), '([[0, 2], [-1, 2], [8, -5]])\n', (1534, 1562), False, 'from numpy import array\n'), ((1651, 1674), 'permetrics.regression.Metrics', 'Metrics', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (1658, 1674), False, 'from permetrics.regression import Metrics\n'), ((1613, 1630), 'numpy.array', 'array', (['[0.5, 0.2]'], {}), '([0.5, 0.2])\n', (1618, 1630), False, 'from numpy import array\n')] |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import time
import joblib
import tqdm
import glob
import imageio
import copy
import numpy as np
from typing import ClassVar, Dict, List
from collections import defaultdict
import torch
import habitat
from habitat import Config, logger
from habitat.utils.visualizations import maps
from pointnav_vo.utils.tensorboard_utils import TensorboardWriter
from pointnav_vo.vis.utils import resize_top_down_map
from pointnav_vo.vo.common.common_vars import *
EPSILON = 1e-8
class BaseTrainer:
r"""Generic trainer class that serves as a base template for more
specific trainer classes like RL trainer, SLAM or imitation learner.
Includes only the most basic functionality.
"""
supported_tasks: ClassVar[List[str]]
def train(self) -> None:
raise NotImplementedError
def eval(self) -> None:
raise NotImplementedError
def save_checkpoint(self, file_name) -> None:
raise NotImplementedError
def load_checkpoint(self, checkpoint_path, *args, **kwargs) -> Dict:
raise NotImplementedError
class BaseRLTrainer(BaseTrainer):
r"""Base trainer class for RL trainers. Future RL-specific
methods should be hosted here.
"""
device: torch.device
config: Config
video_option: List[str]
_flush_secs: int
def __init__(self, config: Config):
super().__init__()
assert config is not None, "needs config file to initialize trainer"
self.config = config
self._flush_secs = 30
# Define Corruptions
self.corruptions_sequence = (
config.TASK_CONFIG.SIMULATOR.CORRUPTIONS.CORRUPTIONS_SEQUENCE
)
self.severity_sequence = (
config.TASK_CONFIG.SIMULATOR.CORRUPTIONS.SEVERITY_SEQUENCE
)
self.corruptions_sequence_depth= (
config.TASK_CONFIG.SIMULATOR.CORRUPTIONS.CORRUPTIONS_SEQUENCE_DEPTH
)
self.severity_sequence_depth = (
config.TASK_CONFIG.SIMULATOR.CORRUPTIONS.SEVERITY_SEQUENCE_DEPTH
)
@property
def flush_secs(self):
return self._flush_secs
@flush_secs.setter
def flush_secs(self, value: int):
self._flush_secs = value
def train(self) -> None:
raise NotImplementedError
def eval(self) -> None:
r"""Main method of trainer evaluation. Calls _eval_checkpoint() that
is specified in Trainer class that inherits from BaseRLTrainer
Returns:
None
"""
self.device = (
torch.device("cuda", self.config.TORCH_GPU_ID)
if torch.cuda.is_available()
else torch.device("cpu")
)
if "tensorboard" in self.config.VIDEO_OPTION:
assert (
len(self.config.TENSORBOARD_DIR) > 0
), "Must specify a tensorboard directory for video display"
if "disk" in self.config.VIDEO_OPTION:
assert (
len(self.config.VIDEO_DIR) > 0
), "Must specify a directory for storing videos on disk"
with TensorboardWriter(
self.config.TENSORBOARD_DIR, flush_secs=self.flush_secs
) as writer:
if os.path.isfile(self.config.EVAL.EVAL_CKPT_PATH):
# evaluate singe checkpoint
eval_f_list = [self.config.EVAL.EVAL_CKPT_PATH]
else:
# evaluate multiple checkpoints in order
eval_f_list = list(
glob.glob(
os.path.join(self.config.EVAL.EVAL_CKPT_PATH, "*.pth")
)
)
eval_f_list = sorted(
eval_f_list, key=lambda x: os.stat(x).st_mtime
)
for ckpt_id, current_ckpt in tqdm.tqdm(enumerate(eval_f_list)):
logger.info(f"======= current_ckpt: {current_ckpt} =======\n")
(
current_episode_result,
current_overall_result,
) = self._eval_checkpoint(
checkpoint_path=current_ckpt,
writer=writer,
checkpoint_index=ckpt_id,
)
try:
# assume the file name is ckpt_XX.update_XX.frames_XX.pth
current_ckpt_filename = os.path.basename(current_ckpt)
current_frame = int(
current_ckpt_filename.split("frames_")[1].split(".")[0]
)
current_overall_result["frames"] = [current_frame]
except:
current_overall_result["frames"] = [None]
self._save_info_dict(
current_episode_result,
os.path.join(
self.config.INFO_DIR,
"{}.infos.p".format(
current_ckpt_filename.split(".pth")[0]
),
),
)
self._save_info_dict(
current_overall_result,
os.path.join(self.config.INFO_DIR, "eval_infos.p"),
)
if (
self.config.EVAL.SAVE_RANKED_IMGS
and self.config.VO.use_vo_model
):
logger.info("Start post processing ...\n")
self._eval_ckpt_post_process(current_episode_result)
logger.info("... post processing done.\n")
def _eval_ckpt_post_process(self, ckpt_eval_result):
cur_config = ckpt_eval_result["config"]
top_k = cur_config.EVAL.RANK_TOP_K
for k in tqdm.tqdm(ckpt_eval_result):
if k != "config":
delta_type_dict = {
"dx": defaultdict(lambda: defaultdict(list)),
"dz": defaultdict(lambda: defaultdict(list)),
"dyaw": defaultdict(lambda: defaultdict(list)),
}
# sort all steps in this scene
for episode_info in tqdm.tqdm(ckpt_eval_result[k].values()):
cur_map_info = episode_info["map"]
for tmp in episode_info["traj"]:
step_info = copy.deepcopy(tmp)
step_info["map"] = cur_map_info
act = ACT_IDX2NAME[step_info["action"]]
for i, d_type in enumerate(["dx", "dz", "dyaw"]):
step_info[f"{d_type}_abs"] = np.abs(
step_info["gt_delta"][i]
- step_info["pred_delta"][i]
)
step_info[f"{d_type}_rel"] = np.abs(
step_info["gt_delta"][i]
- step_info["pred_delta"][i]
) / (np.abs(step_info["gt_delta"][i]) + EPSILON)
delta_type_dict[d_type][act][f"abs"].append(
step_info
)
delta_type_dict[d_type][act][f"rel"].append(
step_info
)
for d_type in ["dx", "dz", "dyaw"]:
for act in delta_type_dict[d_type]:
ranked_list_abs = delta_type_dict[d_type][act][
f"abs"
]
ranked_list_abs = sorted(
ranked_list_abs,
key=lambda x: x[f"{d_type}_abs"],
reverse=True,
)
delta_type_dict[d_type][act][
"abs"
] = ranked_list_abs[:top_k]
ranked_list_rel = delta_type_dict[d_type][act][
"rel"
]
ranked_list_rel = sorted(
ranked_list_rel,
key=lambda x: x[f"{d_type}_rel"],
reverse=True,
)
delta_type_dict[d_type][act][
"rel"
] = ranked_list_rel[:top_k]
# plot figures
cur_scene = os.path.basename(k).split(".")[0]
cur_scene_dir = os.path.join(self.config.VIDEO_DIR, cur_scene)
os.makedirs(cur_scene_dir)
cur_config.defrost()
cur_config.TASK_CONFIG.DATASET.CONTENT_SCENES = [cur_scene]
cur_config.TASK_CONFIG.TASK.TOP_DOWN_MAP.TYPE = "TopDownMap"
cur_config.freeze()
with habitat.Env(config=cur_config.TASK_CONFIG) as env:
for i, d_type in enumerate(["dx", "dz", "dyaw"]):
for compare_type in ["abs", "rel"]:
cur_d_dir = os.path.join(
cur_scene_dir, f"{d_type}_{compare_type}"
)
os.makedirs(cur_d_dir, exist_ok=False)
for act in delta_type_dict[d_type]:
ranked_list = delta_type_dict[d_type][act][
compare_type
]
assert len(ranked_list) == top_k
for j, step_info in enumerate(ranked_list):
# obtain observation
prev_obs = env._sim.get_observations_at(
position=step_info["prev_agent_state"][
"position"
],
rotation=step_info["prev_agent_state"][
"rotation"
],
keep_agent_at_new_pose=False,
)
cur_obs = env._sim.get_observations_at(
position=step_info["cur_agent_state"][
"position"
],
rotation=step_info["cur_agent_state"][
"rotation"
],
keep_agent_at_new_pose=False,
)
prev_rgb = prev_obs["rgb"].astype(np.uint8)
cur_rgb = cur_obs["rgb"].astype(np.uint8)
prev_depth = (
np.repeat(prev_obs["depth"], 3, axis=2)
* 255.0
).astype(np.uint8)
cur_depth = (
np.repeat(cur_obs["depth"], 3, axis=2)
* 255.0
).astype(np.uint8)
# plot map
prev_top_down_map = self._get_top_down_map(
step_info, "prev", cur_rgb.shape[0]
)
cur_top_down_map = self._get_top_down_map(
step_info, "cur", cur_rgb.shape[0]
)
# set layout of the image
first_row = np.concatenate(
(
prev_top_down_map,
prev_rgb,
prev_depth,
),
axis=1,
)
second_row = np.concatenate(
(cur_top_down_map, cur_rgb, cur_depth),
axis=1,
)
out_img = np.concatenate(
(first_row, second_row), axis=0,
)
tmp_k = f"{d_type}_{compare_type}"
out_f = os.path.join(
cur_d_dir,
f"{act}-rank_{j:02d}-gt_{step_info['gt_delta'][i]:.3f}-"
f"pred_{step_info['pred_delta'][i]:.3f}-"
f"{compare_type}_{step_info[tmp_k]:.3f}-"
f"collision_{step_info['collision']}.png",
)
imageio.imsave(out_f, out_img)
def _get_top_down_map(self, step_info, state_k, target_size):
map_info = step_info["map"]
top_down_map = map_info["blank_top_down_map"]
top_down_map = maps.colorize_topdown_map(top_down_map)
map_agent_x, map_agent_y = maps.to_grid(
step_info[f"{state_k}_agent_state"]["position"][0], # x
step_info[f"{state_k}_agent_state"]["position"][2], # z
map_info["coordinate_min"],
map_info["coordinate_max"],
map_info["map_resolution"],
)
agent_map_coord = (
map_agent_x - (map_info["ind_x_min"] - map_info["grid_delta"]),
map_agent_y - (map_info["ind_y_min"] - map_info["grid_delta"]),
)
if self.config.EVAL.RESIZE_TOPDOWN_MAP:
top_down_map = resize_top_down_map(
top_down_map,
[[agent_map_coord, step_info[f"{state_k}_agent_angle"]]],
target_size,
)
return top_down_map
def _setup_eval_config(self, checkpoint_config: Config) -> Config:
r"""Sets up and returns a merged config for evaluation. Config
object saved from checkpoint is merged into config file specified
at evaluation time with the following overwrite priority:
eval_opts > ckpt_opts > eval_cfg > ckpt_cfg
If the saved config is outdated, only the eval config is returned.
Args:
checkpoint_config: saved config from checkpoint.
Returns:
Config: merged config for eval.
"""
config = self.config.clone()
ckpt_cmd_opts = checkpoint_config.CMD_TRAILING_OPTS
eval_cmd_opts = config.CMD_TRAILING_OPTS
try:
config.merge_from_other_cfg(checkpoint_config)
config.merge_from_other_cfg(self.config)
config.merge_from_list(ckpt_cmd_opts)
config.merge_from_list(eval_cmd_opts)
except KeyError:
logger.info("Saved config is outdated, using solely eval config")
config = self.config.clone()
config.merge_from_list(eval_cmd_opts)
if config.TASK_CONFIG.DATASET.SPLIT == "train":
config.TASK_CONFIG.defrost()
config.TASK_CONFIG.DATASET.SPLIT = "val"
config.TASK_CONFIG.freeze()
config.TASK_CONFIG.defrost()
config.TASK_CONFIG.SIMULATOR.AGENT_0.SENSORS = self.config.SENSORS
config.freeze()
return config
def _eval_checkpoint(
self,
checkpoint_path: str,
writer: TensorboardWriter,
checkpoint_index: int = 0,
) -> None:
r"""Evaluates a single checkpoint. Trainer algorithms should
implement this.
Args:
checkpoint_path: path of checkpoint
writer: tensorboard writer object for logging to tensorboard
checkpoint_index: index of cur checkpoint for logging
Returns:
None
"""
raise NotImplementedError
def save_checkpoint(self, file_name) -> None:
raise NotImplementedError
def load_checkpoint(self, checkpoint_path, *args, **kwargs) -> Dict:
raise NotImplementedError
@staticmethod
def _pause_envs(
envs_to_pause,
envs,
test_recurrent_hidden_states,
not_done_masks,
current_episode_reward,
prev_actions,
batch,
rgb_frames,
):
# pausing self.envs with no new episode
if len(envs_to_pause) > 0:
state_index = list(range(envs.num_envs))
for idx in reversed(envs_to_pause):
state_index.pop(idx)
envs.pause_at(idx)
# indexing along the batch dimensions
test_recurrent_hidden_states = test_recurrent_hidden_states[
:, state_index
]
not_done_masks = not_done_masks[state_index]
current_episode_reward = current_episode_reward[state_index]
prev_actions = prev_actions[state_index]
for k, v in batch.items():
try:
batch[k] = v[state_index]
except:
print(
f"\nin base_trainer.py _pause_envs(): {k}, {len(v)}, {state_index}, {envs_to_pause}\n"
)
rgb_frames = [rgb_frames[i] for i in state_index]
return (
envs,
test_recurrent_hidden_states,
not_done_masks,
current_episode_reward,
prev_actions,
batch,
rgb_frames,
)
def _save_info_dict(self, save_dict: Dict[str, List], f_path: str):
if not os.path.isfile(f_path):
tmp_dict = save_dict
else:
with open(f_path, "rb") as f:
tmp_dict = joblib.load(f)
for k, v in save_dict.items():
if k in tmp_dict:
tmp_dict[k].extend(v)
else:
tmp_dict[k] = v
with open(f_path, "wb") as f:
joblib.dump(tmp_dict, f, compress="lz4")
| [
"imageio.imsave",
"habitat.utils.visualizations.maps.colorize_topdown_map",
"torch.cuda.is_available",
"copy.deepcopy",
"numpy.repeat",
"numpy.concatenate",
"joblib.load",
"habitat.Env",
"habitat.utils.visualizations.maps.to_grid",
"joblib.dump",
"numpy.abs",
"pointnav_vo.utils.tensorboard_uti... | [((5880, 5907), 'tqdm.tqdm', 'tqdm.tqdm', (['ckpt_eval_result'], {}), '(ckpt_eval_result)\n', (5889, 5907), False, 'import tqdm\n'), ((13760, 13799), 'habitat.utils.visualizations.maps.colorize_topdown_map', 'maps.colorize_topdown_map', (['top_down_map'], {}), '(top_down_map)\n', (13785, 13799), False, 'from habitat.utils.visualizations import maps\n'), ((13836, 14045), 'habitat.utils.visualizations.maps.to_grid', 'maps.to_grid', (["step_info[f'{state_k}_agent_state']['position'][0]", "step_info[f'{state_k}_agent_state']['position'][2]", "map_info['coordinate_min']", "map_info['coordinate_max']", "map_info['map_resolution']"], {}), "(step_info[f'{state_k}_agent_state']['position'][0], step_info[\n f'{state_k}_agent_state']['position'][2], map_info['coordinate_min'],\n map_info['coordinate_max'], map_info['map_resolution'])\n", (13848, 14045), False, 'from habitat.utils.visualizations import maps\n'), ((2783, 2808), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2806, 2808), False, 'import torch\n'), ((2721, 2767), 'torch.device', 'torch.device', (['"""cuda"""', 'self.config.TORCH_GPU_ID'], {}), "('cuda', self.config.TORCH_GPU_ID)\n", (2733, 2767), False, 'import torch\n'), ((2826, 2845), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2838, 2845), False, 'import torch\n'), ((3256, 3330), 'pointnav_vo.utils.tensorboard_utils.TensorboardWriter', 'TensorboardWriter', (['self.config.TENSORBOARD_DIR'], {'flush_secs': 'self.flush_secs'}), '(self.config.TENSORBOARD_DIR, flush_secs=self.flush_secs)\n', (3273, 3330), False, 'from pointnav_vo.utils.tensorboard_utils import TensorboardWriter\n'), ((3380, 3427), 'os.path.isfile', 'os.path.isfile', (['self.config.EVAL.EVAL_CKPT_PATH'], {}), '(self.config.EVAL.EVAL_CKPT_PATH)\n', (3394, 3427), False, 'import os\n'), ((14384, 14493), 'pointnav_vo.vis.utils.resize_top_down_map', 'resize_top_down_map', (['top_down_map', "[[agent_map_coord, step_info[f'{state_k}_agent_angle']]]", 'target_size'], {}), "(top_down_map, [[agent_map_coord, step_info[\n f'{state_k}_agent_angle']]], target_size)\n", (14403, 14493), False, 'from pointnav_vo.vis.utils import resize_top_down_map\n'), ((18312, 18334), 'os.path.isfile', 'os.path.isfile', (['f_path'], {}), '(f_path)\n', (18326, 18334), False, 'import os\n'), ((18714, 18754), 'joblib.dump', 'joblib.dump', (['tmp_dict', 'f'], {'compress': '"""lz4"""'}), "(tmp_dict, f, compress='lz4')\n", (18725, 18754), False, 'import joblib\n'), ((4014, 4076), 'habitat.logger.info', 'logger.info', (['f"""======= current_ckpt: {current_ckpt} =======\n"""'], {}), "(f'======= current_ckpt: {current_ckpt} =======\\n')\n", (4025, 4076), False, 'from habitat import Config, logger\n'), ((8768, 8814), 'os.path.join', 'os.path.join', (['self.config.VIDEO_DIR', 'cur_scene'], {}), '(self.config.VIDEO_DIR, cur_scene)\n', (8780, 8814), False, 'import os\n'), ((8831, 8857), 'os.makedirs', 'os.makedirs', (['cur_scene_dir'], {}), '(cur_scene_dir)\n', (8842, 8857), False, 'import os\n'), ((15574, 15639), 'habitat.logger.info', 'logger.info', (['"""Saved config is outdated, using solely eval config"""'], {}), "('Saved config is outdated, using solely eval config')\n", (15585, 15639), False, 'from habitat import Config, logger\n'), ((18452, 18466), 'joblib.load', 'joblib.load', (['f'], {}), '(f)\n', (18463, 18466), False, 'import joblib\n'), ((4520, 4550), 'os.path.basename', 'os.path.basename', (['current_ckpt'], {}), '(current_ckpt)\n', (4536, 4550), False, 'import os\n'), ((5296, 5346), 'os.path.join', 'os.path.join', (['self.config.INFO_DIR', '"""eval_infos.p"""'], {}), "(self.config.INFO_DIR, 'eval_infos.p')\n", (5308, 5346), False, 'import os\n'), ((5533, 5575), 'habitat.logger.info', 'logger.info', (['"""Start post processing ...\n"""'], {}), "('Start post processing ...\\n')\n", (5544, 5575), False, 'from habitat import Config, logger\n'), ((5669, 5711), 'habitat.logger.info', 'logger.info', (['"""... post processing done.\n"""'], {}), "('... post processing done.\\n')\n", (5680, 5711), False, 'from habitat import Config, logger\n'), ((9107, 9149), 'habitat.Env', 'habitat.Env', ([], {'config': 'cur_config.TASK_CONFIG'}), '(config=cur_config.TASK_CONFIG)\n', (9118, 9149), False, 'import habitat\n'), ((3703, 3757), 'os.path.join', 'os.path.join', (['self.config.EVAL.EVAL_CKPT_PATH', '"""*.pth"""'], {}), "(self.config.EVAL.EVAL_CKPT_PATH, '*.pth')\n", (3715, 3757), False, 'import os\n'), ((6465, 6483), 'copy.deepcopy', 'copy.deepcopy', (['tmp'], {}), '(tmp)\n', (6478, 6483), False, 'import copy\n'), ((6022, 6039), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (6033, 6039), False, 'from collections import defaultdict\n'), ((6088, 6105), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (6099, 6105), False, 'from collections import defaultdict\n'), ((6156, 6173), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (6167, 6173), False, 'from collections import defaultdict\n'), ((6735, 6796), 'numpy.abs', 'np.abs', (["(step_info['gt_delta'][i] - step_info['pred_delta'][i])"], {}), "(step_info['gt_delta'][i] - step_info['pred_delta'][i])\n", (6741, 6796), True, 'import numpy as np\n'), ((8702, 8721), 'os.path.basename', 'os.path.basename', (['k'], {}), '(k)\n', (8718, 8721), False, 'import os\n'), ((9329, 9384), 'os.path.join', 'os.path.join', (['cur_scene_dir', 'f"""{d_type}_{compare_type}"""'], {}), "(cur_scene_dir, f'{d_type}_{compare_type}')\n", (9341, 9384), False, 'import os\n'), ((9475, 9513), 'os.makedirs', 'os.makedirs', (['cur_d_dir'], {'exist_ok': '(False)'}), '(cur_d_dir, exist_ok=False)\n', (9486, 9513), False, 'import os\n'), ((3883, 3893), 'os.stat', 'os.stat', (['x'], {}), '(x)\n', (3890, 3893), False, 'import os\n'), ((6948, 7009), 'numpy.abs', 'np.abs', (["(step_info['gt_delta'][i] - step_info['pred_delta'][i])"], {}), "(step_info['gt_delta'][i] - step_info['pred_delta'][i])\n", (6954, 7009), True, 'import numpy as np\n'), ((7107, 7139), 'numpy.abs', 'np.abs', (["step_info['gt_delta'][i]"], {}), "(step_info['gt_delta'][i])\n", (7113, 7139), True, 'import numpy as np\n'), ((12186, 12251), 'numpy.concatenate', 'np.concatenate', (['(prev_top_down_map, prev_rgb, prev_depth)'], {'axis': '(1)'}), '((prev_top_down_map, prev_rgb, prev_depth), axis=1)\n', (12200, 12251), True, 'import numpy as np\n'), ((12595, 12657), 'numpy.concatenate', 'np.concatenate', (['(cur_top_down_map, cur_rgb, cur_depth)'], {'axis': '(1)'}), '((cur_top_down_map, cur_rgb, cur_depth), axis=1)\n', (12609, 12657), True, 'import numpy as np\n'), ((12823, 12870), 'numpy.concatenate', 'np.concatenate', (['(first_row, second_row)'], {'axis': '(0)'}), '((first_row, second_row), axis=0)\n', (12837, 12870), True, 'import numpy as np\n'), ((13066, 13270), 'os.path.join', 'os.path.join', (['cur_d_dir', 'f"""{act}-rank_{j:02d}-gt_{step_info[\'gt_delta\'][i]:.3f}-pred_{step_info[\'pred_delta\'][i]:.3f}-{compare_type}_{step_info[tmp_k]:.3f}-collision_{step_info[\'collision\']}.png"""'], {}), '(cur_d_dir,\n f"{act}-rank_{j:02d}-gt_{step_info[\'gt_delta\'][i]:.3f}-pred_{step_info[\'pred_delta\'][i]:.3f}-{compare_type}_{step_info[tmp_k]:.3f}-collision_{step_info[\'collision\']}.png"\n )\n', (13078, 13270), False, 'import os\n'), ((13549, 13579), 'imageio.imsave', 'imageio.imsave', (['out_f', 'out_img'], {}), '(out_f, out_img)\n', (13563, 13579), False, 'import imageio\n'), ((11266, 11305), 'numpy.repeat', 'np.repeat', (["prev_obs['depth']", '(3)'], {'axis': '(2)'}), "(prev_obs['depth'], 3, axis=2)\n", (11275, 11305), True, 'import numpy as np\n'), ((11499, 11537), 'numpy.repeat', 'np.repeat', (["cur_obs['depth']", '(3)'], {'axis': '(2)'}), "(cur_obs['depth'], 3, axis=2)\n", (11508, 11537), True, 'import numpy as np\n')] |
#!/usr/bin/python
import numpy as np
from math import atan2, sin, cos, pi
class DiffDriveController():
"""
Class used for controlling the robot linear and angular velocity
"""
def __init__(self, max_speed, max_omega):
# TODO for Student: Specify these parameters
self.kp= 0.5 #0.3
self.ka= 2.0 #4
self.kb= 0.001 #0.01
self.MAX_SPEED = max_speed
self.MAX_OMEGA = max_omega
self.target_rho = 1.0
def update_target_rho(self, new_rho):
self.target_rho = new_rho
def compute_vel(self, state, goal):
"""
Function that computes the desired outputs given the state and goal
Inputs:
state - a numpy vector of size 3 by 1 with components (x,y,theta)
goal - a numpy vector of size 2 by 1 specifying the location of the goal
Outputs: a tuple with 3 elements
v - a number specifying the forward speed (in m/s) of the robot (should
be no more than max_speed)
omega - a number specifying the angular velocity (in rad/s) of the robot
(should be no more than max_omega)
done - a boolean value specifying if the robot has reached its goal (or
is close enough
"""
# YOUR CODE HERE
#print "goal: ", goal
#print "state: ", state
dx = goal[0] - state[0]
dy = goal[1] - state[1]
theta = state[2]
rho = np.sqrt(dx**2 + dy**2)
pos_beta = atan2(dy,dx) #NOTE, I CHANGED THE DEFINITION BETA TO BE +ATAN2, SO NOW kb > 0
alpha = pos_beta - theta
if(alpha >= pi):
alpha -= 2*pi
elif(alpha < -pi):
alpha += 2*pi
v = self.kp * rho
if(v < -self.MAX_SPEED):
v = -self.MAX_SPEED
elif(v > self.MAX_SPEED):
v = self.MAX_SPEED
w = self.ka*alpha + self.kb*pos_beta
if(w < -self.MAX_OMEGA):
w = -self.MAX_OMEGA
elif(w > self.MAX_OMEGA):
w = self.MAX_OMEGA
#~ if(v < 0.15):
#~ v = 0.15
#~ if(abs(w) < 0.5):
#~ v = 0.15
#~ else:
#~ v = 0.0
#~ if(w < 0):
#~ w = -1.0
#~ else:
#~ w = 1.0
done = False
if(rho < self.target_rho):
v = 0.0
w = 0.0
done = True
return v,w,done, alpha, pos_beta
| [
"numpy.sqrt",
"math.atan2"
] | [((1460, 1486), 'numpy.sqrt', 'np.sqrt', (['(dx ** 2 + dy ** 2)'], {}), '(dx ** 2 + dy ** 2)\n', (1467, 1486), True, 'import numpy as np\n'), ((1502, 1515), 'math.atan2', 'atan2', (['dy', 'dx'], {}), '(dy, dx)\n', (1507, 1515), False, 'from math import atan2, sin, cos, pi\n')] |
# -*- coding:utf8 -*-
import json, sys, csv, math
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
class GraphConfig:
def __init__(self, config:dict):
self.FileName = config['output_file_name']
self.Dpi = config['dpi']
self.Width = config['graph_width']
self.Height = config['graph_height']
self.MinPeriod = config['min_period']
self.MaxPeriod = config['max_period']
self.MinVel = config['min_vel']
self.MaxVel = config['max_vel']
self.XLabel = config['x_label']
self.YLabel = config['y_label']
self.AccLabel = config['acc_label']
self.DispLabel = config['disp_label']
self.EigenPeriod = config['eigen_period']
class Spectrum:
def __init__(self, item):
self.Name = item['name']
self.Path = item['path']
self.SkipRow = item['skip_row']
self.PeriodCol = item['period_col'] - 1
self.SpectrumCol = item['spectrum_col'] - 1
self.Color = item['color']
def graph_init():
fig = plt.figure(figsize=(graph.Width, graph.Height))
ax = fig.add_subplot(1,1,1)
plt.subplots_adjust(left=0.15)
return fig, ax
def graph_format(ax):
ax.set_xlim(graph.MinPeriod, graph.MaxPeriod)
ax.set_ylim(graph.MinVel, graph.MaxVel)
ax.set_xscale('log')
ax.set_yscale('log')
ax.grid(which="both")
ax.legend()
ax.get_xaxis().set_major_formatter(plt.FormatStrFormatter('%.2f'))
ax.get_yaxis().set_major_formatter(plt.FormatStrFormatter('%.0f'))
ax.set_xlabel(graph.XLabel)
ax.set_ylabel(graph.YLabel)
def read_config(path:str):
with open(path) as f:
config = json.load(f)
return config
def read_vel_resp_spectrum(path:str, skip_row:int, period_col:int, spectrum_col:int):
df = pd.read_csv(path, header=None, skiprows=skip_row)
return df[period_col].values, df[spectrum_col].values
def get_grid_lines_of_acc_and_disp():
x1 = graph.MinPeriod
x2 = graph.MaxPeriod
# acc
acc_list = np.concatenate([np.linspace(int(10**i), int(10**(i+1)), 10) for i in range(4)])
acc_lines = {}
for acc in acc_list:
# Sv = Sa / (2π/T)
y1 = acc / (2.0 * np.pi / x1)
y2 = acc / (2.0 * np.pi / x2)
acc_lines[acc] = [x1, x2, y1, y2]
# disp
disp_list = np.concatenate([np.linspace(int(0.01*10**i), int(0.01*10**(i+1)), 10) for i in range(4)])
disp_lines = {}
for disp in disp_list:
# Sv = Sd * (2π/T)
y1 = disp * (2.0 * np.pi / x1)
y2 = disp * (2.0 * np.pi / x2)
disp_lines[disp] = [x1, x2, y1, y2]
return acc_lines, disp_lines
def draw_grid(grid_type, fig, ax, label, lines, text_pos, angle, vertalalign):
for k, v in lines.items():
x1, x2, y1, y2 = tuple(v)
ax.plot([x1, x2], [y1, y2], color='darkgray', linewidth='0.5')
text = '{:.0f}'.format(k)
if (grid_type == 'acc'):
if (np.log10(k)).is_integer():
ax.text(x1, y1, text, rotation=angle, verticalalignment=vertalalign)
elif (grid_type == 'disp'):
if k >= 1.0 and (np.log10(k)).is_integer():
ax.text(x2, y2, text, rotation=angle, verticalalignment=vertalalign)
fig.text(text_pos[0], text_pos[1], label, rotation=angle)
def draw_grids(fig, ax):
acc_lines, disp_lines = get_grid_lines_of_acc_and_disp()
draw_grid('acc', fig, ax, graph.AccLabel, acc_lines, [0.3, 0.65], 45, 'bottom')
draw_grid('disp', fig, ax, graph.DispLabel, disp_lines, [0.7, 0.75], -45, 'top')
def draw_vel_spectrum(ax):
for spec in spectra:
x, y = read_vel_resp_spectrum(spec.Path, spec.SkipRow, spec.PeriodCol, spec.SpectrumCol)
ax.plot(x, y, label=spec.Name, color=spec.Color)
def draw_period_line(ax):
for period in graph.EigenPeriod:
ax.plot([period, period], [graph.MinVel, graph.MaxVel], color='black', linestyle='dashed')
def draw_tripartite():
fig, ax = graph_init()
draw_grids(fig, ax)
draw_vel_spectrum(ax)
draw_period_line(ax)
graph_format(ax)
plt.savefig(graph.FileName, dpi=graph.Dpi)
plt.show()
if __name__ == '__main__':
config = read_config('./config.json')
graph = GraphConfig(config['graph'])
spectra = []
for item in config['spectra']:
spectra.append(Spectrum(item))
draw_tripartite()
| [
"numpy.log10",
"matplotlib.pyplot.savefig",
"pandas.read_csv",
"matplotlib.pyplot.FormatStrFormatter",
"matplotlib.pyplot.figure",
"json.load",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.show"
] | [((1059, 1106), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(graph.Width, graph.Height)'}), '(figsize=(graph.Width, graph.Height))\n', (1069, 1106), True, 'import matplotlib.pyplot as plt\n'), ((1143, 1173), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.15)'}), '(left=0.15)\n', (1162, 1173), True, 'import matplotlib.pyplot as plt\n'), ((1806, 1855), 'pandas.read_csv', 'pd.read_csv', (['path'], {'header': 'None', 'skiprows': 'skip_row'}), '(path, header=None, skiprows=skip_row)\n', (1817, 1855), True, 'import pandas as pd\n'), ((4070, 4112), 'matplotlib.pyplot.savefig', 'plt.savefig', (['graph.FileName'], {'dpi': 'graph.Dpi'}), '(graph.FileName, dpi=graph.Dpi)\n', (4081, 4112), True, 'import matplotlib.pyplot as plt\n'), ((4117, 4127), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4125, 4127), True, 'import matplotlib.pyplot as plt\n'), ((1441, 1471), 'matplotlib.pyplot.FormatStrFormatter', 'plt.FormatStrFormatter', (['"""%.2f"""'], {}), "('%.2f')\n", (1463, 1471), True, 'import matplotlib.pyplot as plt\n'), ((1512, 1542), 'matplotlib.pyplot.FormatStrFormatter', 'plt.FormatStrFormatter', (['"""%.0f"""'], {}), "('%.0f')\n", (1534, 1542), True, 'import matplotlib.pyplot as plt\n'), ((1679, 1691), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1688, 1691), False, 'import json, sys, csv, math\n'), ((2942, 2953), 'numpy.log10', 'np.log10', (['k'], {}), '(k)\n', (2950, 2953), True, 'import numpy as np\n'), ((3119, 3130), 'numpy.log10', 'np.log10', (['k'], {}), '(k)\n', (3127, 3130), True, 'import numpy as np\n')] |
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
#
#
# SCRIPT : merge_data_for_classifier.py
# POURPOSE : TODO: Update
# AUTHOR : <NAME>
# EMAIL : <EMAIL>
#
# V1.0 : XX/XX/XXXX [<NAME>]
#
# TODO: VERFIRY THE OPUTS OF THIS SCRIPT!
#
#
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
import os
import argparse
import random
import string
from glob import glob
from natsort import natsorted
import numpy as np
from skimage.io import imread, imsave
from skimage.color import grey2rgb
import pandas as pd
def random_string(length=16):
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(length))
if __name__ == "__main__":
print("\nExtracting data for the classifier, please wait...\n")
# Argument parser
parser = argparse.ArgumentParser()
parser.add_argument("--input", "-i",
nargs="*",
action="store",
dest="input",
required=True,
help="Input processed folders.",)
parser.add_argument("--crop-size",
nargs=1,
action="store",
dest="crop_size",
default=[None],
required=False,
help="Output size.",)
parser.add_argument("--target-labels",
nargs="*",
action="store",
dest="target_labels",
default=[4, 5],
required=False,
help="Target labels to consider as wave breaking.",)
parser.add_argument("--output", "-o",
nargs=1,
action="store",
dest="output",
default=["classifier/"],
required=False,
help="Output path.",)
args = parser.parse_args()
# input paths
paths = args.input
# output path
out = args.output[0]
os.makedirs(out, exist_ok=True)
print("\nProcessing labels:\n")
dfs = []
for i, path in enumerate(paths):
if os.path.isdir(path):
print("Processing path {}".format(path))
# read csv file
xls = glob(path+"/*.xlsx")
if xls:
print(" + labels found")
df = pd.read_excel(xls[0])
dfs.append(df)
df = pd.concat(dfs)
# binarize labels
try:
labels = df["label"].values
labels_str = np.ones(labels.shape).astype(str)
except Exception:
raise ValueError("At least one column must be called \'{label}\'.")
if len(np.unique(labels)) > 2:
print("- Warning: more than 2 unique labels in the dataset.")
print(" using \"TARGET_LABELS\" to binarize the dataset.")
for l in np.array(args.target_labels).astype(np.int):
idxs = np.where(labels == l)[0]
labels_str[idxs] = "breaking"
idxs = np.where(labels_str != "breaking")[0]
labels_str[idxs] = "otherwise"
# creat a folder for each labels
folder0 = os.path.join(args.output[0], "0")
folder1 = os.path.join(args.output[0], "1")
os.makedirs(folder0, exist_ok=True)
os.makedirs(folder1, exist_ok=True)
# loop over images
print("\n\nProcessing images:")
fnames = []
for i, path in enumerate(paths):
if os.path.isdir(path):
print("Processing path {}".format(path))
pngs = natsorted(glob(path+"/img/*.png"))
if pngs:
print(" + images found")
for png in pngs:
fnames.append(png)
else:
raise IOError(" - Did not find any images!")
# save data to a temporary folder so that we can use keras data generators
# make sure that the data has 3 chanels
k = 0
i = 0
j = 0
print("\n")
for label, fname in zip(labels_str, fnames):
print(" - Processing image {} of {}".format(k+1, len(fnames)),
end="\r")
if label == "otherwise":
img3c = grey2rgb(imread(fname)) # make shure that there are 3d
fname0 = random_string()+".png"
imsave(os.path.join(folder0, fname0), img3c)
i += 1
elif label == "breaking":
img3c = grey2rgb(imread(fname)) # make shure that there are 3d
fname1 = random_string()+".png"
imsave(os.path.join(folder1, fname1), img3c)
j += 1
else:
raise ValueError("Fatal, stopping now.")
k += 1
print("\n\nMy work is done!\n")
| [
"random.choice",
"numpy.unique",
"os.makedirs",
"argparse.ArgumentParser",
"numpy.where",
"numpy.ones",
"os.path.join",
"numpy.array",
"skimage.io.imread",
"os.path.isdir",
"pandas.read_excel",
"pandas.concat",
"glob.glob"
] | [((1029, 1054), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1052, 1054), False, 'import argparse\n'), ((2362, 2393), 'os.makedirs', 'os.makedirs', (['out'], {'exist_ok': '(True)'}), '(out, exist_ok=True)\n', (2373, 2393), False, 'import os\n'), ((2792, 2806), 'pandas.concat', 'pd.concat', (['dfs'], {}), '(dfs)\n', (2801, 2806), True, 'import pandas as pd\n'), ((3513, 3546), 'os.path.join', 'os.path.join', (['args.output[0]', '"""0"""'], {}), "(args.output[0], '0')\n", (3525, 3546), False, 'import os\n'), ((3562, 3595), 'os.path.join', 'os.path.join', (['args.output[0]', '"""1"""'], {}), "(args.output[0], '1')\n", (3574, 3595), False, 'import os\n'), ((3601, 3636), 'os.makedirs', 'os.makedirs', (['folder0'], {'exist_ok': '(True)'}), '(folder0, exist_ok=True)\n', (3612, 3636), False, 'import os\n'), ((3642, 3677), 'os.makedirs', 'os.makedirs', (['folder1'], {'exist_ok': '(True)'}), '(folder1, exist_ok=True)\n', (3653, 3677), False, 'import os\n'), ((2497, 2516), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (2510, 2516), False, 'import os\n'), ((3808, 3827), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (3821, 3827), False, 'import os\n'), ((840, 862), 'random.choice', 'random.choice', (['letters'], {}), '(letters)\n', (853, 862), False, 'import random\n'), ((2622, 2644), 'glob.glob', 'glob', (["(path + '/*.xlsx')"], {}), "(path + '/*.xlsx')\n", (2626, 2644), False, 'from glob import glob\n'), ((3047, 3064), 'numpy.unique', 'np.unique', (['labels'], {}), '(labels)\n', (3056, 3064), True, 'import numpy as np\n'), ((3380, 3414), 'numpy.where', 'np.where', (["(labels_str != 'breaking')"], {}), "(labels_str != 'breaking')\n", (3388, 3414), True, 'import numpy as np\n'), ((2728, 2749), 'pandas.read_excel', 'pd.read_excel', (['xls[0]'], {}), '(xls[0])\n', (2741, 2749), True, 'import pandas as pd\n'), ((2901, 2922), 'numpy.ones', 'np.ones', (['labels.shape'], {}), '(labels.shape)\n', (2908, 2922), True, 'import numpy as np\n'), ((3231, 3259), 'numpy.array', 'np.array', (['args.target_labels'], {}), '(args.target_labels)\n', (3239, 3259), True, 'import numpy as np\n'), ((3296, 3317), 'numpy.where', 'np.where', (['(labels == l)'], {}), '(labels == l)\n', (3304, 3317), True, 'import numpy as np\n'), ((3913, 3938), 'glob.glob', 'glob', (["(path + '/img/*.png')"], {}), "(path + '/img/*.png')\n", (3917, 3938), False, 'from glob import glob\n'), ((4548, 4561), 'skimage.io.imread', 'imread', (['fname'], {}), '(fname)\n', (4554, 4561), False, 'from skimage.io import imread, imsave\n'), ((4660, 4689), 'os.path.join', 'os.path.join', (['folder0', 'fname0'], {}), '(folder0, fname0)\n', (4672, 4689), False, 'import os\n'), ((4783, 4796), 'skimage.io.imread', 'imread', (['fname'], {}), '(fname)\n', (4789, 4796), False, 'from skimage.io import imread, imsave\n'), ((4895, 4924), 'os.path.join', 'os.path.join', (['folder1', 'fname1'], {}), '(folder1, fname1)\n', (4907, 4924), False, 'import os\n')] |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module that computes the basic statistics for a collection of RecordBatches.
It computes common statistics for each column in the
RecordBatches. And if a column is of struct type (i.e. it consists of
child arrays), it recursively computes common statistics for each child array.
Common statistics are about the presence and valency of the column. The column
is assumed to be at least 1-nested (i.e. a list<primitive>, which means the
value of the column at each row of a RecordBatch is a list of primitives) and
could be more deeply nested (i.e. a of list<list<...>> type). We compute
presence and valency stats for each nest level, relative to its outer level.
Note that the presence and valency of the outermost nest level is relative to a
RecordBatch row. The following presence and valency stats are computed:
* Number of missing (value == null) elements.
Note:
- For the out-most level, this number means number of rows that does not
have values at this column. And this number is actually not computed
here because we need num_rows (or num_examples) to compute it and that
is not tracked here. See stats_impl.py.
- An empty list is distinguished from a null and is not counted as
missing.
* Number of present elements.
* Maximum valency of elements.
* Minimum valency of elements. Note that the valency of an empty list is 0
but a null element has no valency (does not contribute to the result).
* Total number of values (sum of valency).
* Quantiles histogram over the valency.
It computes the following statistics for each numeric column (or leaf numeric
array contained in some struct column):
- Mean of the values.
- Standard deviation of the values.
- Median of the values.
- Number of values that equal zero.
- Minimum value.
- Maximum value.
- Standard histogram over the values.
- Quantiles histogram over the values.
We compute the following statistics for each string column (or leaf numeric
array contained in some struct column):
- Average length of the values for this feature.
"""
import collections
import itertools
import math
import sys
from typing import Any, Callable, Dict, Iterable, List, Mapping, Optional, Text
import apache_beam as beam
import numpy as np
import pyarrow as pa
from tensorflow_data_validation import constants
from tensorflow_data_validation import types
from tensorflow_data_validation.arrow import arrow_util
from tensorflow_data_validation.statistics.generators import stats_generator
from tensorflow_data_validation.utils import quantiles_util
from tensorflow_data_validation.utils import schema_util
from tensorflow_data_validation.utils import stats_util
from tensorflow_data_validation.utils import top_k_uniques_stats_util
from tensorflow_data_validation.utils import variance_util
from tensorflow_data_validation.utils.example_weight_map import ExampleWeightMap
from tfx_bsl import sketches
from tfx_bsl.arrow import array_util
from tensorflow_metadata.proto.v0 import schema_pb2
from tensorflow_metadata.proto.v0 import statistics_pb2
class _PresenceAndValencyStats(object):
"""Contains stats on presence and valency of a feature."""
__slots__ = [
'num_non_missing', 'min_num_values', 'max_num_values', 'total_num_values',
'weighted_total_num_values', 'weighted_num_non_missing',
'num_values_summary']
def __init__(self,
make_quantiles_sketch_fn: Callable[[],
sketches.QuantilesSketch]):
# The number of examples with at least one value for this feature.
self.num_non_missing = 0
# The minimum number of values in a single example for this feature.
self.min_num_values = sys.maxsize
# The maximum number of values in a single example for this feature.
self.max_num_values = 0
# The total number of values for this feature.
self.total_num_values = 0
# The sum of weights of all the values for this feature.
self.weighted_total_num_values = 0
# The sum of weights of all the examples with at least one value for this
# feature.
self.weighted_num_non_missing = 0
self.num_values_summary = make_quantiles_sketch_fn()
def merge_with(self, other: '_PresenceAndValencyStats') -> None:
self.num_non_missing += other.num_non_missing
self.min_num_values = min(self.min_num_values, other.min_num_values)
self.max_num_values = max(self.max_num_values, other.max_num_values)
self.total_num_values += other.total_num_values
self.weighted_num_non_missing += other.weighted_num_non_missing
self.weighted_total_num_values += other.weighted_total_num_values
self.num_values_summary.Merge(other.num_values_summary)
def update(self, feature_array: pa.Array, presence_mask: np.ndarray,
num_values: np.ndarray, num_values_not_none: np.ndarray,
weights: Optional[np.ndarray]) -> None:
"""Updates the stats with a feature array."""
self.num_non_missing += len(feature_array) - feature_array.null_count
self.max_num_values = np.maximum.reduce(
num_values_not_none, initial=self.max_num_values)
self.min_num_values = np.minimum.reduce(num_values_not_none,
initial=self.min_num_values)
self.total_num_values += np.sum(num_values_not_none)
# num values tends to vary little. pre-aggregate them by values would help
# reduce the cost in AddValues().
num_values_grouped = pa.array(num_values_not_none).value_counts()
self.num_values_summary.AddValues(num_values_grouped.field(0),
num_values_grouped.field(1))
if weights is not None:
if weights.size != num_values.size:
raise ValueError('Weight feature must not be missing.')
self.weighted_total_num_values += np.sum(num_values * weights)
self.weighted_num_non_missing += np.sum(weights[presence_mask])
class _PartialCommonStats(object):
"""Holds partial common statistics for a single feature."""
__slots__ = ['type', 'has_weights', 'presence_and_valency_stats']
def __init__(self, has_weights: bool):
# Type of the feature.
self.type = None # type: Optional[types.FeatureNameStatisticsType]
# This will be a List[_PresenceAndValencyStats] once `update()` is called.
# presence_and_valency_stats[i] contains the stats at nest level i.
# for example: a feature of type list<list<int>> will have
# presence_and_valency_stats of length 2. presence_and_valency_stats[0]
# contains the stats about the outer list.
self.presence_and_valency_stats = None # type: Optional[List[Any]]
self.has_weights = has_weights
def merge_with(
self, feature_path: types.FeaturePath, other: '_PartialCommonStats'
) -> None:
"""Merges two partial common statistics and return the merged statistics.
Note that this DOES NOT merge self.num_values_summaries. See
`merge_num_values_summaries()`.
Args:
feature_path: path of the feature that `self` is associated with.
other: a _PartialCommonStats to merge with.
"""
assert self.has_weights == other.has_weights
if self.presence_and_valency_stats is None:
self.presence_and_valency_stats = other.presence_and_valency_stats
elif other.presence_and_valency_stats is not None:
this_nest_level = len(self.presence_and_valency_stats)
other_nest_level = len(other.presence_and_valency_stats)
if this_nest_level != other_nest_level:
raise ValueError(
'Unable to merge common stats with different nest levels for '
'feature {}: {} vs {}'.format(
feature_path, this_nest_level, other_nest_level))
for self_stats, other_stats in zip(self.presence_and_valency_stats,
other.presence_and_valency_stats):
self_stats.merge_with(other_stats)
# Set the type of the merged common stats.
# Case 1: Both the types are None. We set the merged type to be None.
# Case 2: One of two types is None. We set the merged type to be the type
# which is not None. For example, if left.type=FLOAT and right.type=None,
# we set the merged type to be FLOAT.
# Case 3: Both the types are same (and not None), we set the merged type to
# be the same type.
if self.type is None:
self.type = other.type
def update(self,
feature_path: types.FeaturePath,
feature_array: pa.Array,
feature_type: types.FeatureNameStatisticsType,
make_quantiles_sketch_fn: Callable[[], sketches.QuantilesSketch],
weights: Optional[np.ndarray] = None) -> None:
"""Update the partial common statistics using the input value."""
if self.type is None:
self.type = feature_type # pytype: disable=annotation-type-mismatch
elif feature_type is not None and self.type != feature_type:
raise TypeError('Cannot determine the type of feature %s. '
'Found values of types %s and %s.' %
(feature_path, self.type, feature_type))
nest_level = arrow_util.get_nest_level(feature_array.type)
if self.presence_and_valency_stats is None:
self.presence_and_valency_stats = [
_PresenceAndValencyStats(make_quantiles_sketch_fn)
for _ in range(nest_level)
]
elif nest_level != len(self.presence_and_valency_stats):
raise ValueError('Inconsistent nestedness in feature {}: {} vs {}'.format(
feature_path, nest_level, len(self.presence_and_valency_stats)))
# And there's nothing we can collect in this case.
if not feature_array:
return
level = 0
while arrow_util.is_list_like(feature_array.type):
presence_mask = ~np.asarray(
array_util.GetArrayNullBitmapAsByteArray(feature_array)).view(np.bool)
num_values = np.asarray(
array_util.ListLengthsFromListArray(feature_array))
num_values_not_none = num_values[presence_mask]
self.presence_and_valency_stats[level].update(feature_array,
presence_mask, num_values,
num_values_not_none,
weights)
flattened = feature_array.flatten()
if weights is not None:
parent_indices = array_util.GetFlattenedArrayParentIndices(
feature_array).to_numpy()
weights = weights[parent_indices]
feature_array = flattened
level += 1
class _PartialNumericStats(object):
"""Holds partial numeric statistics for a single feature."""
__slots__ = [
'num_zeros', 'num_nan', 'min', 'max', 'finite_min', 'finite_max',
'quantiles_summary', 'has_weights', 'weighted_quantiles_summary',
'mean_var_accumulator', 'weighted_mean_var_accumulator'
]
def __init__(
self, has_weights: bool,
make_quantiles_sketch_fn: Callable[[], sketches.QuantilesSketch]):
# The number of values for this feature that equal 0.
self.num_zeros = 0
# The number of NaN values for this feature. This is computed only for
# FLOAT features.
self.num_nan = 0
# The minimum value among all the values for this feature.
self.min = float('inf')
# The maximum value among all the values for this feature.
self.max = float('-inf')
# The minimum value among all the finite values for this feature.
self.finite_min = float('inf')
# The maximum value among all the finite values for this feature.
self.finite_max = float('-inf')
# Summary of the quantiles for the values in this feature.
self.quantiles_summary = make_quantiles_sketch_fn()
self.has_weights = has_weights
# Accumulator for mean and variance.
self.mean_var_accumulator = variance_util.MeanVarAccumulator()
# Keep track of partial weighted numeric stats.
if has_weights:
# Summary of the weighted quantiles for the values in this feature.
self.weighted_quantiles_summary = make_quantiles_sketch_fn()
# Accumulator for weighted mean and weighted variance.
self.weighted_mean_var_accumulator = (
variance_util.WeightedMeanVarAccumulator())
else:
self.weighted_mean_var_accumulator = None
def __iadd__(self, other: '_PartialNumericStats') -> '_PartialNumericStats':
"""Merge two partial numeric statistics and return the merged statistics."""
self.num_zeros += other.num_zeros
self.num_nan += other.num_nan
self.min = min(self.min, other.min)
self.max = max(self.max, other.max)
self.finite_min = min(self.finite_min, other.finite_min)
self.finite_max = max(self.finite_max, other.finite_max)
self.quantiles_summary.Merge(other.quantiles_summary)
self.mean_var_accumulator.merge(other.mean_var_accumulator)
assert self.has_weights == other.has_weights
if self.has_weights:
self.weighted_quantiles_summary.Merge(other.weighted_quantiles_summary)
self.weighted_mean_var_accumulator.merge(
other.weighted_mean_var_accumulator)
return self
def update(
self,
feature_array: pa.Array,
weights: Optional[np.ndarray] = None) -> None:
"""Update the partial numeric statistics using the input value."""
# np.max / np.min below cannot handle empty arrays. And there's nothing
# we can collect in this case.
if not feature_array:
return
flattened_value_array, value_parent_indices = arrow_util.flatten_nested(
feature_array, weights is not None)
# Note: to_numpy will fail if flattened_value_array is empty.
if not flattened_value_array:
return
values = np.asarray(flattened_value_array)
nan_mask = np.isnan(values)
self.num_nan += np.sum(nan_mask)
non_nan_mask = ~nan_mask
values_no_nan = values[non_nan_mask]
# We do this check to avoid failing in np.min/max with empty array.
if values_no_nan.size == 0:
return
# This is to avoid integer overflow when computing sum or sum of squares.
values_no_nan_as_double = values_no_nan.astype(np.float64)
self.mean_var_accumulator.update(values_no_nan_as_double)
# Use np.minimum.reduce(values_no_nan, initial=self.min) once we upgrade
# to numpy 1.16
curr_min = np.min(values_no_nan)
curr_max = np.max(values_no_nan)
self.min = min(self.min, curr_min)
self.max = max(self.max, curr_max)
if curr_min == float('-inf') or curr_max == float('inf'):
finite_values = values_no_nan[np.isfinite(values_no_nan)]
if finite_values.size > 0:
self.finite_min = min(self.finite_min, np.min(finite_values))
self.finite_max = max(self.finite_max, np.max(finite_values))
self.num_zeros += values_no_nan.size - np.count_nonzero(values_no_nan)
self.quantiles_summary.AddValues(pa.array(values_no_nan))
if weights is not None:
flat_weights = weights[value_parent_indices]
flat_weights_no_nan = flat_weights[non_nan_mask]
self.weighted_mean_var_accumulator.update(values_no_nan_as_double,
flat_weights_no_nan)
self.weighted_quantiles_summary.AddValues(
pa.array(values_no_nan),
pa.array(flat_weights_no_nan))
class _PartialStringStats(object):
"""Holds partial string statistics for a single feature."""
__slots__ = ['total_bytes_length']
def __init__(self):
# The total length of all the values for this feature.
self.total_bytes_length = 0
def __iadd__(self, other: '_PartialStringStats') -> '_PartialStringStats':
"""Merge two partial string statistics and return the merged statistics."""
self.total_bytes_length += other.total_bytes_length
return self
def update(self, feature_array: pa.Array) -> None:
"""Update the partial string statistics using the input value."""
if pa.types.is_null(feature_array.type):
return
# Iterate through the value array and update the partial stats.
flattened_values_array, _ = arrow_util.flatten_nested(feature_array)
if arrow_util.is_binary_like(flattened_values_array.type):
# GetBinaryArrayTotalByteSize returns a Python long (to be compatible
# with Python3). To make sure we do cheaper integer arithemetics in
# Python2, we first convert it to int.
self.total_bytes_length += int(array_util.GetBinaryArrayTotalByteSize(
flattened_values_array))
elif flattened_values_array:
# We can only do flattened_values_array.to_numpy() when it's not empty.
# This could be computed faster by taking log10 of the integer.
def _len_after_conv(s):
return len(str(s))
self.total_bytes_length += np.sum(
np.vectorize(_len_after_conv,
otypes=[np.int32])(np.asarray(flattened_values_array)))
class _PartialBytesStats(object):
"""Holds partial bytes statistics for a single feature."""
__slots__ = ['total_num_bytes', 'min_num_bytes', 'max_num_bytes']
def __init__(self):
# The total number of bytes of all the values for this feature.
self.total_num_bytes = 0
# The minimum number of bytes among all the values for this feature.
self.min_num_bytes = sys.maxsize
# The maximum number of bytes among all the values for this feature.
self.max_num_bytes = -sys.maxsize
def __iadd__(self, other: '_PartialBytesStats') -> '_PartialBytesStats':
"""Merge two partial bytes statistics and return the merged statistics."""
self.total_num_bytes += other.total_num_bytes
self.min_num_bytes = min(self.min_num_bytes, other.min_num_bytes)
self.max_num_bytes = max(self.max_num_bytes, other.max_num_bytes)
return self
def update(self, feature_array: pa.Array) -> None:
"""Update the partial bytes statistics using the input value."""
if pa.types.is_null(feature_array.type):
return
# Iterate through the value array and update the partial stats.'
flattened_values_array, _ = arrow_util.flatten_nested(feature_array)
if (pa.types.is_floating(flattened_values_array.type) or
pa.types.is_integer(flattened_values_array.type)):
raise ValueError('Bytes stats cannot be computed on INT/FLOAT features.')
if flattened_values_array:
num_bytes = array_util.GetElementLengths(
flattened_values_array).to_numpy()
self.min_num_bytes = min(self.min_num_bytes, np.min(num_bytes))
self.max_num_bytes = max(self.max_num_bytes, np.max(num_bytes))
self.total_num_bytes += np.sum(num_bytes)
class _PartialBasicStats(object):
"""Holds partial statistics for a single feature."""
__slots__ = ['common_stats', 'numeric_stats', 'string_stats', 'bytes_stats']
def __init__(
self, has_weights: bool,
make_quantiles_sketch_fn: Callable[[], sketches.QuantilesSketch]):
self.common_stats = _PartialCommonStats(has_weights=has_weights)
self.numeric_stats = _PartialNumericStats(
has_weights=has_weights,
make_quantiles_sketch_fn=make_quantiles_sketch_fn)
self.string_stats = _PartialStringStats()
self.bytes_stats = _PartialBytesStats()
def _make_presence_and_valency_stats_protos(
parent_presence_and_valency: Optional[_PresenceAndValencyStats],
presence_and_valency: List[_PresenceAndValencyStats]
) -> List[statistics_pb2.PresenceAndValencyStatistics]:
"""Converts presence and valency stats to corresponding protos."""
result = []
# The top-level non-missing is computed by
# num_examples - top_level.num_non_missing (outside BasicStatsGenerator as
# num_examples cannot be computed here). For all other levels,
# it's previous_level.total_num_values - this_level.num_non_missing.
for prev_s, s in zip(
itertools.chain([parent_presence_and_valency], presence_and_valency),
presence_and_valency):
proto = statistics_pb2.PresenceAndValencyStatistics()
if prev_s is not None:
proto.num_missing = (prev_s.total_num_values - s.num_non_missing)
proto.num_non_missing = s.num_non_missing
if s.num_non_missing > 0:
proto.min_num_values = s.min_num_values
proto.max_num_values = s.max_num_values
proto.tot_num_values = s.total_num_values
result.append(proto)
return result
def _make_weighted_presence_and_valency_stats_protos(
parent_presence_and_valency: Optional[_PresenceAndValencyStats],
presence_and_valency: List[_PresenceAndValencyStats]
) -> List[statistics_pb2.WeightedCommonStatistics]:
"""Converts weighted presence and valency stats to corresponding protos."""
result = []
# The top-level non-missing is computed by
# weighted_num_examples - top_level.weighted_num_non_missing (outside
# BasicStatsGenerator as num_examples cannot be computed here).
# For all other levels,
# it's (previous_level.weighted_total_num_values -
# this_level.weighted_num_non_missing).
for prev_s, s in zip(
itertools.chain([parent_presence_and_valency], presence_and_valency),
presence_and_valency):
proto = statistics_pb2.WeightedCommonStatistics()
if prev_s is not None:
proto.num_missing = (
prev_s.weighted_total_num_values - s.weighted_num_non_missing)
proto.num_non_missing = s.weighted_num_non_missing
proto.tot_num_values = s.weighted_total_num_values
if s.weighted_num_non_missing > 0:
proto.avg_num_values = (
s.weighted_total_num_values / s.weighted_num_non_missing)
result.append(proto)
return result
def _make_common_stats_proto(
common_stats: _PartialCommonStats,
parent_common_stats: Optional[_PartialCommonStats],
make_quantiles_sketch_fn: Callable[[], sketches.QuantilesSketch],
num_values_histogram_buckets: int,
has_weights: bool
) -> statistics_pb2.CommonStatistics:
"""Convert the partial common stats into a CommonStatistics proto."""
result = statistics_pb2.CommonStatistics()
parent_presence_and_valency = None
if parent_common_stats is not None:
parent_presence_and_valency = (
_PresenceAndValencyStats(make_quantiles_sketch_fn)
if parent_common_stats.presence_and_valency_stats is None else
parent_common_stats.presence_and_valency_stats[-1])
presence_and_valency_stats = common_stats.presence_and_valency_stats
# the CommonStatistics already contains the presence and valency
# for a 1-nested feature.
if (presence_and_valency_stats is not None and
len(presence_and_valency_stats) > 1):
result.presence_and_valency_stats.extend(
_make_presence_and_valency_stats_protos(
parent_presence_and_valency,
common_stats.presence_and_valency_stats))
if has_weights:
result.weighted_presence_and_valency_stats.extend(
_make_weighted_presence_and_valency_stats_protos(
parent_presence_and_valency,
common_stats.presence_and_valency_stats))
top_level_presence_and_valency = (
_PresenceAndValencyStats(make_quantiles_sketch_fn)
if common_stats.presence_and_valency_stats is None else
common_stats.presence_and_valency_stats[0])
result.num_non_missing = top_level_presence_and_valency.num_non_missing
if parent_presence_and_valency is not None:
result.num_missing = (
parent_presence_and_valency.total_num_values -
top_level_presence_and_valency.num_non_missing)
result.tot_num_values = top_level_presence_and_valency.total_num_values
# TODO(b/79685042): Need to decide on what is the expected values for
# statistics like min_num_values, max_num_values, avg_num_values, when
# all the values for the feature are missing.
if top_level_presence_and_valency.num_non_missing > 0:
result.min_num_values = top_level_presence_and_valency.min_num_values
result.max_num_values = top_level_presence_and_valency.max_num_values
result.avg_num_values = (
top_level_presence_and_valency.total_num_values /
top_level_presence_and_valency.num_non_missing)
if top_level_presence_and_valency.num_values_summary is not None:
# Add num_values_histogram to the common stats proto.
num_values_quantiles = (
top_level_presence_and_valency.num_values_summary.GetQuantiles(
num_values_histogram_buckets).flatten().to_pylist())
histogram = quantiles_util.generate_quantiles_histogram(
num_values_quantiles, top_level_presence_and_valency.num_non_missing,
num_values_histogram_buckets)
result.num_values_histogram.CopyFrom(histogram)
# Add weighted common stats to the proto.
if has_weights:
weighted_common_stats_proto = statistics_pb2.WeightedCommonStatistics(
num_non_missing=top_level_presence_and_valency.weighted_num_non_missing,
tot_num_values=top_level_presence_and_valency.weighted_total_num_values)
if parent_presence_and_valency is not None:
weighted_common_stats_proto.num_missing = (
parent_presence_and_valency.weighted_total_num_values -
top_level_presence_and_valency.weighted_num_non_missing)
if top_level_presence_and_valency.weighted_num_non_missing > 0:
weighted_common_stats_proto.avg_num_values = (
top_level_presence_and_valency.weighted_total_num_values /
top_level_presence_and_valency.weighted_num_non_missing)
result.weighted_common_stats.CopyFrom(
weighted_common_stats_proto)
return result
def _make_numeric_stats_proto(
numeric_stats: _PartialNumericStats,
total_num_values: int,
num_histogram_buckets: int,
num_quantiles_histogram_buckets: int,
has_weights: bool
) -> statistics_pb2.NumericStatistics:
"""Convert the partial numeric statistics into NumericStatistics proto."""
result = statistics_pb2.NumericStatistics()
if numeric_stats.num_nan > 0:
total_num_values -= numeric_stats.num_nan
if total_num_values == 0:
# If we only have nan values, we only set num_nan.
if numeric_stats.num_nan > 0:
result.histograms.add(type=statistics_pb2.Histogram.STANDARD).num_nan = (
numeric_stats.num_nan)
result.histograms.add(type=statistics_pb2.Histogram.QUANTILES).num_nan = (
numeric_stats.num_nan)
return result
result.mean = float(numeric_stats.mean_var_accumulator.mean)
result.std_dev = math.sqrt(
max(0, numeric_stats.mean_var_accumulator.variance))
result.num_zeros = numeric_stats.num_zeros
result.min = float(numeric_stats.min)
result.max = float(numeric_stats.max)
# Extract the quantiles from the summary.
assert numeric_stats.quantiles_summary is not None
quantiles = (
numeric_stats.quantiles_summary.GetQuantiles(
max(num_quantiles_histogram_buckets,
_NUM_QUANTILES_FACTOR_FOR_STD_HISTOGRAM *
num_histogram_buckets)).flatten().to_pylist())
# Find the median from the quantiles and update the numeric stats proto.
result.median = float(quantiles_util.find_median(quantiles))
# Construct the equi-width histogram from the quantiles and add it to the
# numeric stats proto.
std_histogram = quantiles_util.generate_equi_width_histogram(
quantiles, numeric_stats.finite_min, numeric_stats.finite_max,
total_num_values, num_histogram_buckets)
std_histogram.num_nan = numeric_stats.num_nan
new_std_histogram = result.histograms.add()
new_std_histogram.CopyFrom(std_histogram)
# Construct the quantiles histogram from the quantiles and add it to the
# numeric stats proto.
q_histogram = quantiles_util.generate_quantiles_histogram(
quantiles, total_num_values, num_quantiles_histogram_buckets)
q_histogram.num_nan = numeric_stats.num_nan
new_q_histogram = result.histograms.add()
new_q_histogram.CopyFrom(q_histogram)
# Add weighted numeric stats to the proto.
if has_weights:
assert numeric_stats.weighted_mean_var_accumulator is not None
weighted_numeric_stats_proto = statistics_pb2.WeightedNumericStatistics()
weighted_total_num_values = (
numeric_stats.weighted_mean_var_accumulator.weights_mean *
numeric_stats.weighted_mean_var_accumulator.count)
weighted_mean = numeric_stats.weighted_mean_var_accumulator.mean
weighted_variance = max(
0, numeric_stats.weighted_mean_var_accumulator.variance)
weighted_numeric_stats_proto.mean = weighted_mean
weighted_numeric_stats_proto.std_dev = math.sqrt(weighted_variance)
# Extract the weighted quantiles from the summary.
assert numeric_stats.weighted_quantiles_summary is not None
weighted_quantiles = (
numeric_stats.weighted_quantiles_summary.GetQuantiles(
max(num_quantiles_histogram_buckets,
_NUM_QUANTILES_FACTOR_FOR_STD_HISTOGRAM *
num_histogram_buckets)).flatten().to_pylist())
# Find the weighted median from the quantiles and update the proto.
weighted_numeric_stats_proto.median = float(
quantiles_util.find_median(weighted_quantiles))
# Construct the weighted equi-width histogram from the quantiles and
# add it to the numeric stats proto.
weighted_std_histogram = quantiles_util.generate_equi_width_histogram(
weighted_quantiles, numeric_stats.finite_min, numeric_stats.finite_max,
weighted_total_num_values, num_histogram_buckets)
weighted_std_histogram.num_nan = numeric_stats.num_nan
weighted_numeric_stats_proto.histograms.extend([weighted_std_histogram])
# Construct the weighted quantiles histogram from the quantiles and
# add it to the numeric stats proto.
weighted_q_histogram = quantiles_util.generate_quantiles_histogram(
weighted_quantiles, weighted_total_num_values,
num_quantiles_histogram_buckets)
weighted_q_histogram.num_nan = numeric_stats.num_nan
weighted_numeric_stats_proto.histograms.extend([weighted_q_histogram])
result.weighted_numeric_stats.CopyFrom(
weighted_numeric_stats_proto)
return result
def _make_string_stats_proto(string_stats: _PartialStringStats,
total_num_values: int
) -> statistics_pb2.StringStatistics:
"""Convert the partial string statistics into StringStatistics proto."""
result = statistics_pb2.StringStatistics()
if total_num_values > 0:
result.avg_length = string_stats.total_bytes_length / total_num_values
return result
def _make_bytes_stats_proto(bytes_stats: _PartialBytesStats,
total_num_values: int
) -> statistics_pb2.BytesStatistics:
"""Convert the partial bytes statistics into BytesStatistics proto."""
result = statistics_pb2.BytesStatistics()
if total_num_values > 0:
result.avg_num_bytes = bytes_stats.total_num_bytes / total_num_values
result.min_num_bytes = bytes_stats.min_num_bytes
result.max_num_bytes = bytes_stats.max_num_bytes
result.max_num_bytes_int = bytes_stats.max_num_bytes
return result
def _make_num_values_custom_stats_proto(
common_stats: _PartialCommonStats,
num_histogram_buckets: int,
) -> List[statistics_pb2.CustomStatistic]:
"""Returns a list of CustomStatistic protos that contains histograms.
Those histograms captures the distribution of number of values at each
nest level.
It will only create histograms for nest levels greater than 1. Because
the histogram of nest level 1 is already in
CommonStatistics.num_values_histogram.
Args:
common_stats: a _PartialCommonStats.
num_histogram_buckets: number of buckets in the histogram.
Returns:
a (potentially empty) list of statistics_pb2.CustomStatistic.
"""
result = []
if common_stats.type is None:
return result
presence_and_valency_stats = common_stats.presence_and_valency_stats
if presence_and_valency_stats is None:
return result
# The top level histogram is included in CommonStats -- skip.
for level, presence_and_valency, parent_presence_and_valency in zip(
itertools.count(2), presence_and_valency_stats[1:],
presence_and_valency_stats):
num_values_quantiles = (
presence_and_valency.num_values_summary.GetQuantiles(
num_histogram_buckets).flatten().to_pylist())
histogram = quantiles_util.generate_quantiles_histogram(
num_values_quantiles, parent_presence_and_valency.num_non_missing,
num_histogram_buckets)
proto = statistics_pb2.CustomStatistic()
proto.name = 'level_{}_value_list_length'.format(level)
proto.histogram.CopyFrom(histogram)
result.append(proto)
return result
def _make_feature_stats_proto(
feature_path: types.FeaturePath, basic_stats: _PartialBasicStats,
parent_basic_stats: Optional[_PartialBasicStats],
make_quantiles_sketch_fn: Callable[[], sketches.QuantilesSketch],
num_values_histogram_buckets: int, num_histogram_buckets: int,
num_quantiles_histogram_buckets: int, is_bytes: bool,
categorical_numeric_types: Mapping[types.FeaturePath,
'schema_pb2.FeatureType'],
has_weights: bool) -> statistics_pb2.FeatureNameStatistics:
"""Convert the partial basic stats into a FeatureNameStatistics proto.
Args:
feature_path: The path of the feature.
basic_stats: The partial basic stats associated with the feature.
parent_basic_stats: The partial basic stats of the parent of the feature.
make_quantiles_sketch_fn: A callable to create a quantiles sketch.
num_values_histogram_buckets: Number of buckets in the quantiles
histogram for the number of values per feature.
num_histogram_buckets: Number of buckets in a standard
NumericStatistics.histogram with equal-width buckets.
num_quantiles_histogram_buckets: Number of buckets in a
quantiles NumericStatistics.histogram.
is_bytes: A boolean indicating whether the feature is bytes.
categorical_numeric_types: A mapping from feature path to type derived from
the schema.
has_weights: A boolean indicating whether a weight feature is specified.
Returns:
A statistics_pb2.FeatureNameStatistics proto.
"""
# Create a new FeatureNameStatistics proto.
result = statistics_pb2.FeatureNameStatistics()
result.path.CopyFrom(feature_path.to_proto())
# Set the feature type.
inferred_type = basic_stats.common_stats.type
if inferred_type is not None:
# The user claims the feature to be BYTES. Only trust them if the inferred
# type is STRING (which means the actual data is in strings/bytes). We
# never infer BYTES.
if (is_bytes and
inferred_type == statistics_pb2.FeatureNameStatistics.STRING):
result.type = statistics_pb2.FeatureNameStatistics.BYTES
else:
result.type = inferred_type
# The inferred type being None means we don't see any value for this feature.
# We trust user's claim.
elif is_bytes:
result.type = statistics_pb2.FeatureNameStatistics.BYTES
else:
# We don't have an "unknown" type, so default to STRING here.
result.type = statistics_pb2.FeatureNameStatistics.STRING
# Construct common statistics proto.
common_stats_proto = _make_common_stats_proto(
basic_stats.common_stats,
parent_basic_stats.common_stats
if parent_basic_stats is not None else None,
make_quantiles_sketch_fn,
num_values_histogram_buckets, has_weights)
# this is the total number of values at the leaf level.
total_num_values = (
0 if basic_stats.common_stats.presence_and_valency_stats is None else
basic_stats.common_stats.presence_and_valency_stats[-1].total_num_values)
# Copy the common stats into appropriate numeric/string stats.
# If the type is not set, we currently wrap the common stats
# within numeric stats.
if result.type == statistics_pb2.FeatureNameStatistics.BYTES:
# Construct bytes statistics proto.
bytes_stats_proto = _make_bytes_stats_proto(
basic_stats.bytes_stats, common_stats_proto.tot_num_values)
# Add the common stats into bytes stats.
bytes_stats_proto.common_stats.CopyFrom(common_stats_proto)
result.bytes_stats.CopyFrom(bytes_stats_proto)
# TODO(b/187054148): Update to allow FLOAT
if (result.type == statistics_pb2.FeatureNameStatistics.STRING or
top_k_uniques_stats_util.output_categorical_numeric(
categorical_numeric_types, feature_path, result.type)):
# Construct string statistics proto.
string_stats_proto = _make_string_stats_proto(basic_stats.string_stats,
total_num_values)
# Add the common stats into string stats.
string_stats_proto.common_stats.CopyFrom(common_stats_proto)
result.string_stats.CopyFrom(string_stats_proto)
elif result.type == statistics_pb2.FeatureNameStatistics.STRUCT:
result.struct_stats.common_stats.CopyFrom(common_stats_proto)
elif result.type in (statistics_pb2.FeatureNameStatistics.INT,
statistics_pb2.FeatureNameStatistics.FLOAT):
# Construct numeric statistics proto.
numeric_stats_proto = _make_numeric_stats_proto(
basic_stats.numeric_stats, total_num_values,
num_histogram_buckets, num_quantiles_histogram_buckets, has_weights)
# Add the common stats into numeric stats.
numeric_stats_proto.common_stats.CopyFrom(common_stats_proto)
result.num_stats.CopyFrom(numeric_stats_proto)
result.custom_stats.extend(_make_num_values_custom_stats_proto(
basic_stats.common_stats,
num_values_histogram_buckets))
return result
# Named tuple containing TFDV metrics.
_TFDVMetrics = collections.namedtuple(
'_TFDVMetrics', ['num_non_missing', 'min_value_count',
'max_value_count', 'total_num_values'])
_TFDVMetrics.__new__.__defaults__ = (0, sys.maxsize, 0, 0)
def _update_tfdv_telemetry(
accumulator: Dict[types.FeaturePath, _PartialBasicStats]) -> None:
"""Update TFDV Beam metrics."""
# Aggregate type specific metrics.
metrics = {
statistics_pb2.FeatureNameStatistics.INT: _TFDVMetrics(),
statistics_pb2.FeatureNameStatistics.FLOAT: _TFDVMetrics(),
statistics_pb2.FeatureNameStatistics.STRING: _TFDVMetrics(),
statistics_pb2.FeatureNameStatistics.STRUCT: _TFDVMetrics(),
}
for basic_stats in accumulator.values():
common_stats = basic_stats.common_stats
if common_stats.type is None:
continue
# Take the leaf level stats.
presence_and_valency = (
_PresenceAndValencyStats(lambda: None)
if common_stats.presence_and_valency_stats is None else
common_stats.presence_and_valency_stats[-1])
# Update type specific metrics.
type_metrics = metrics[common_stats.type]
num_non_missing = (type_metrics.num_non_missing +
presence_and_valency.num_non_missing)
min_value_count = min(type_metrics.min_value_count,
presence_and_valency.min_num_values)
max_value_count = max(type_metrics.max_value_count,
presence_and_valency.max_num_values)
total_num_values = (type_metrics.total_num_values +
presence_and_valency.total_num_values)
metrics[common_stats.type] = _TFDVMetrics(num_non_missing, min_value_count,
max_value_count, total_num_values)
# Update Beam counters.
counter = beam.metrics.Metrics.counter
for feature_type in metrics:
type_str = statistics_pb2.FeatureNameStatistics.Type.Name(
feature_type).lower()
type_metrics = metrics[feature_type]
counter(
constants.METRICS_NAMESPACE,
'num_' + type_str + '_feature_values').inc(
int(type_metrics.num_non_missing))
if type_metrics.num_non_missing > 0:
counter(
constants.METRICS_NAMESPACE,
type_str + '_feature_values_min_count').inc(
int(type_metrics.min_value_count))
counter(
constants.METRICS_NAMESPACE,
type_str + '_feature_values_max_count').inc(
int(type_metrics.max_value_count))
counter(
constants.METRICS_NAMESPACE,
type_str + '_feature_values_mean_count').inc(
int(type_metrics.total_num_values / type_metrics.num_non_missing))
# Currently we construct the equi-width histogram by using the
# quantiles. Specifically, we compute a large number of quantiles (say, N),
# and then compute the density for each bucket by aggregating the densities
# of the smaller quantile intervals that fall within the bucket. We set N to
# be _NUM_QUANTILES_FACTOR_FOR_STD_HISTOGRAM * num_histogram_buckets,
# where num_histogram_buckets is the required number of buckets in the
# histogram.
_NUM_QUANTILES_FACTOR_FOR_STD_HISTOGRAM = 100
# TODO(b/79685042): Currently the stats generator operates on the
# Dict representation of input (mapping from feature name to a batch of
# values). But we process each feature independently. We should
# consider making the stats generator to operate per feature.
class BasicStatsGenerator(stats_generator.CombinerStatsGenerator):
"""A combiner statistics generator that computes basic statistics.
It computes common statistics for all the features, numeric statistics for
numeric features and string statistics for string/categorical features.
"""
def __init__(
self, # pylint: disable=useless-super-delegation
name: Text = 'BasicStatsGenerator',
schema: Optional[schema_pb2.Schema] = None,
example_weight_map: ExampleWeightMap = ExampleWeightMap(),
num_values_histogram_buckets: Optional[int] = 10,
num_histogram_buckets: Optional[int] = 10,
num_quantiles_histogram_buckets: Optional[int] = 10,
epsilon: Optional[float] = 0.01) -> None:
"""Initializes basic statistics generator.
Args:
name: An optional unique name associated with the statistics generator.
schema: An optional schema for the dataset.
example_weight_map: an ExampleWeightMap that maps a FeaturePath to its
corresponding weight column.
num_values_histogram_buckets: An optional number of buckets in a quantiles
histogram for the number of values per Feature, which is stored in
CommonStatistics.num_values_histogram.
num_histogram_buckets: An optional number of buckets in a standard
NumericStatistics.histogram with equal-width buckets.
num_quantiles_histogram_buckets: An optional number of buckets in a
quantiles NumericStatistics.histogram.
epsilon: An optional error tolerance for the computation of quantiles,
typically a small fraction close to zero (e.g. 0.01). Higher values
of epsilon increase the quantile approximation, and hence result in
more unequal buckets, but could improve performance, and resource
consumption.
"""
super(BasicStatsGenerator, self).__init__(name, schema)
self._bytes_features = set(
schema_util.get_bytes_features(schema) if schema else [])
self._categorical_numeric_types = schema_util.get_categorical_numeric_feature_types(
schema) if schema else {}
self._example_weight_map = example_weight_map
self._num_values_histogram_buckets = num_values_histogram_buckets
self._num_histogram_buckets = num_histogram_buckets
self._num_quantiles_histogram_buckets = num_quantiles_histogram_buckets
self._make_quantiles_sketch_fn = lambda: sketches.QuantilesSketch( # pylint: disable=g-long-lambda
eps=epsilon,
max_num_elements=1 << 32,
num_streams=1)
# Create an accumulator, which maps feature name to the partial stats
# associated with the feature.
def create_accumulator(self) -> Dict[types.FeaturePath, _PartialBasicStats]:
return {}
# Incorporates the input (a Python dict whose keys are feature names and
# values are lists representing a batch of examples) into the accumulator.
def add_input(
self, accumulator: Dict[types.FeaturePath, _PartialBasicStats],
examples: pa.RecordBatch
) -> Dict[types.FeaturePath, _PartialBasicStats]:
for feature_path, feature_array, weights in arrow_util.enumerate_arrays(
examples,
example_weight_map=self._example_weight_map,
enumerate_leaves_only=False):
stats_for_feature = accumulator.get(feature_path)
if stats_for_feature is None:
stats_for_feature = _PartialBasicStats(
weights is not None, self._make_quantiles_sketch_fn)
accumulator[feature_path] = stats_for_feature
feature_type = stats_util.get_feature_type_from_arrow_type(
feature_path, feature_array.type)
stats_for_feature.common_stats.update(feature_path,
feature_array, feature_type,
self._make_quantiles_sketch_fn,
weights)
# The user may make certain claims about a feature's data type
# (e.g. _bytes_features imply string data type). However we should not
# trust those claims because TFDV is also responsible for detecting
# mismatching types. We collect stats according to the actual type, and
# only when the actual type matches the claim do we collect the
# type-specific stats (like for categorical int and bytes features).
if feature_type == statistics_pb2.FeatureNameStatistics.STRING:
if feature_path in self._bytes_features:
stats_for_feature.bytes_stats.update(feature_array)
else:
stats_for_feature.string_stats.update(feature_array)
# We want to compute string stats for a numeric only if a top-k stats
# generator is running, hence the dependency on this library function.
elif top_k_uniques_stats_util.output_categorical_numeric(
self._categorical_numeric_types, feature_path, feature_type):
stats_for_feature.string_stats.update(feature_array)
elif feature_type in (statistics_pb2.FeatureNameStatistics.FLOAT,
statistics_pb2.FeatureNameStatistics.INT):
stats_for_feature.numeric_stats.update(feature_array, weights)
return accumulator
# Merge together a list of basic common statistics.
def merge_accumulators(
self, accumulators: Iterable[Dict[types.FeaturePath, _PartialBasicStats]]
) -> Dict[types.FeaturePath, _PartialBasicStats]:
result = {}
for accumulator in accumulators:
for feature_path, basic_stats in accumulator.items():
current_type = basic_stats.common_stats.type
existing_stats = result.get(feature_path)
if existing_stats is None:
existing_stats = basic_stats
result[feature_path] = basic_stats
else:
# Check if the types from the two partial statistics are not
# compatible. If so, raise an error. We consider types to be
# compatible if both types are same or one of them is None.
left_type = existing_stats.common_stats.type
right_type = current_type
if (left_type is not None and right_type is not None and
left_type != right_type):
raise TypeError('Cannot determine the type of feature %s. '
'Found values of types %s and %s.' %
(feature_path, left_type, right_type))
existing_stats.common_stats.merge_with(feature_path,
basic_stats.common_stats)
if current_type is not None:
if feature_path in self._bytes_features:
existing_stats.bytes_stats += basic_stats.bytes_stats
elif (top_k_uniques_stats_util.output_categorical_numeric(
self._categorical_numeric_types, feature_path, current_type) or
current_type == statistics_pb2.FeatureNameStatistics.STRING):
existing_stats.string_stats += basic_stats.string_stats
elif current_type in (statistics_pb2.FeatureNameStatistics.INT,
statistics_pb2.FeatureNameStatistics.FLOAT):
existing_stats.numeric_stats += basic_stats.numeric_stats
return result
def compact(
self, accumulator: Dict[types.FeaturePath, _PartialBasicStats]
) -> Dict[types.FeaturePath, _PartialBasicStats]:
for stats in accumulator.values():
stats.numeric_stats.quantiles_summary.Compact()
if stats.numeric_stats.has_weights:
stats.numeric_stats.weighted_quantiles_summary.Compact()
if stats.common_stats.presence_and_valency_stats is not None:
for p_and_v_stat in stats.common_stats.presence_and_valency_stats:
p_and_v_stat.num_values_summary.Compact()
return accumulator
# Return final stats as a DatasetFeatureStatistics proto.
def extract_output(self,
accumulator: Dict[types.FeaturePath, _PartialBasicStats]
) -> statistics_pb2.DatasetFeatureStatistics:
# Update TFDV telemetry.
_update_tfdv_telemetry(accumulator)
# Create a new DatasetFeatureStatistics proto.
result = statistics_pb2.DatasetFeatureStatistics()
for feature_path, basic_stats in accumulator.items():
# Construct the FeatureNameStatistics proto from the partial
# basic stats.
feature_stats_proto = _make_feature_stats_proto(
feature_path, basic_stats, accumulator.get(feature_path.parent()),
self._make_quantiles_sketch_fn, self._num_values_histogram_buckets,
self._num_histogram_buckets, self._num_quantiles_histogram_buckets,
feature_path in self._bytes_features, self._categorical_numeric_types,
self._example_weight_map.get(feature_path) is not None)
# Copy the constructed FeatureNameStatistics proto into the
# DatasetFeatureStatistics proto.
new_feature_stats_proto = result.features.add()
new_feature_stats_proto.CopyFrom(feature_stats_proto)
return result
| [
"itertools.chain",
"tensorflow_data_validation.arrow.arrow_util.get_nest_level",
"tensorflow_data_validation.arrow.arrow_util.is_list_like",
"tensorflow_data_validation.utils.quantiles_util.find_median",
"math.sqrt",
"tensorflow_metadata.proto.v0.statistics_pb2.CustomStatistic",
"tensorflow_data_validat... | [((38133, 38254), 'collections.namedtuple', 'collections.namedtuple', (['"""_TFDVMetrics"""', "['num_non_missing', 'min_value_count', 'max_value_count', 'total_num_values']"], {}), "('_TFDVMetrics', ['num_non_missing',\n 'min_value_count', 'max_value_count', 'total_num_values'])\n", (38155, 38254), False, 'import collections\n'), ((22468, 22501), 'tensorflow_metadata.proto.v0.statistics_pb2.CommonStatistics', 'statistics_pb2.CommonStatistics', ([], {}), '()\n', (22499, 22501), False, 'from tensorflow_metadata.proto.v0 import statistics_pb2\n'), ((26327, 26361), 'tensorflow_metadata.proto.v0.statistics_pb2.NumericStatistics', 'statistics_pb2.NumericStatistics', ([], {}), '()\n', (26359, 26361), False, 'from tensorflow_metadata.proto.v0 import statistics_pb2\n'), ((27671, 27828), 'tensorflow_data_validation.utils.quantiles_util.generate_equi_width_histogram', 'quantiles_util.generate_equi_width_histogram', (['quantiles', 'numeric_stats.finite_min', 'numeric_stats.finite_max', 'total_num_values', 'num_histogram_buckets'], {}), '(quantiles, numeric_stats.\n finite_min, numeric_stats.finite_max, total_num_values,\n num_histogram_buckets)\n', (27715, 27828), False, 'from tensorflow_data_validation.utils import quantiles_util\n'), ((28088, 28197), 'tensorflow_data_validation.utils.quantiles_util.generate_quantiles_histogram', 'quantiles_util.generate_quantiles_histogram', (['quantiles', 'total_num_values', 'num_quantiles_histogram_buckets'], {}), '(quantiles, total_num_values,\n num_quantiles_histogram_buckets)\n', (28131, 28197), False, 'from tensorflow_data_validation.utils import quantiles_util\n'), ((30793, 30826), 'tensorflow_metadata.proto.v0.statistics_pb2.StringStatistics', 'statistics_pb2.StringStatistics', ([], {}), '()\n', (30824, 30826), False, 'from tensorflow_metadata.proto.v0 import statistics_pb2\n'), ((31206, 31238), 'tensorflow_metadata.proto.v0.statistics_pb2.BytesStatistics', 'statistics_pb2.BytesStatistics', ([], {}), '()\n', (31236, 31238), False, 'from tensorflow_metadata.proto.v0 import statistics_pb2\n'), ((34726, 34764), 'tensorflow_metadata.proto.v0.statistics_pb2.FeatureNameStatistics', 'statistics_pb2.FeatureNameStatistics', ([], {}), '()\n', (34762, 34764), False, 'from tensorflow_metadata.proto.v0 import statistics_pb2\n'), ((5643, 5710), 'numpy.maximum.reduce', 'np.maximum.reduce', (['num_values_not_none'], {'initial': 'self.max_num_values'}), '(num_values_not_none, initial=self.max_num_values)\n', (5660, 5710), True, 'import numpy as np\n'), ((5746, 5813), 'numpy.minimum.reduce', 'np.minimum.reduce', (['num_values_not_none'], {'initial': 'self.min_num_values'}), '(num_values_not_none, initial=self.min_num_values)\n', (5763, 5813), True, 'import numpy as np\n'), ((5887, 5914), 'numpy.sum', 'np.sum', (['num_values_not_none'], {}), '(num_values_not_none)\n', (5893, 5914), True, 'import numpy as np\n'), ((9718, 9763), 'tensorflow_data_validation.arrow.arrow_util.get_nest_level', 'arrow_util.get_nest_level', (['feature_array.type'], {}), '(feature_array.type)\n', (9743, 9763), False, 'from tensorflow_data_validation.arrow import arrow_util\n'), ((10297, 10340), 'tensorflow_data_validation.arrow.arrow_util.is_list_like', 'arrow_util.is_list_like', (['feature_array.type'], {}), '(feature_array.type)\n', (10320, 10340), False, 'from tensorflow_data_validation.arrow import arrow_util\n'), ((12425, 12459), 'tensorflow_data_validation.utils.variance_util.MeanVarAccumulator', 'variance_util.MeanVarAccumulator', ([], {}), '()\n', (12457, 12459), False, 'from tensorflow_data_validation.utils import variance_util\n'), ((14095, 14156), 'tensorflow_data_validation.arrow.arrow_util.flatten_nested', 'arrow_util.flatten_nested', (['feature_array', '(weights is not None)'], {}), '(feature_array, weights is not None)\n', (14120, 14156), False, 'from tensorflow_data_validation.arrow import arrow_util\n'), ((14292, 14325), 'numpy.asarray', 'np.asarray', (['flattened_value_array'], {}), '(flattened_value_array)\n', (14302, 14325), True, 'import numpy as np\n'), ((14341, 14357), 'numpy.isnan', 'np.isnan', (['values'], {}), '(values)\n', (14349, 14357), True, 'import numpy as np\n'), ((14378, 14394), 'numpy.sum', 'np.sum', (['nan_mask'], {}), '(nan_mask)\n', (14384, 14394), True, 'import numpy as np\n'), ((14898, 14919), 'numpy.min', 'np.min', (['values_no_nan'], {}), '(values_no_nan)\n', (14904, 14919), True, 'import numpy as np\n'), ((14935, 14956), 'numpy.max', 'np.max', (['values_no_nan'], {}), '(values_no_nan)\n', (14941, 14956), True, 'import numpy as np\n'), ((16485, 16521), 'pyarrow.types.is_null', 'pa.types.is_null', (['feature_array.type'], {}), '(feature_array.type)\n', (16501, 16521), True, 'import pyarrow as pa\n'), ((16636, 16676), 'tensorflow_data_validation.arrow.arrow_util.flatten_nested', 'arrow_util.flatten_nested', (['feature_array'], {}), '(feature_array)\n', (16661, 16676), False, 'from tensorflow_data_validation.arrow import arrow_util\n'), ((16684, 16738), 'tensorflow_data_validation.arrow.arrow_util.is_binary_like', 'arrow_util.is_binary_like', (['flattened_values_array.type'], {}), '(flattened_values_array.type)\n', (16709, 16738), False, 'from tensorflow_data_validation.arrow import arrow_util\n'), ((18443, 18479), 'pyarrow.types.is_null', 'pa.types.is_null', (['feature_array.type'], {}), '(feature_array.type)\n', (18459, 18479), True, 'import pyarrow as pa\n'), ((18595, 18635), 'tensorflow_data_validation.arrow.arrow_util.flatten_nested', 'arrow_util.flatten_nested', (['feature_array'], {}), '(feature_array)\n', (18620, 18635), False, 'from tensorflow_data_validation.arrow import arrow_util\n'), ((20342, 20410), 'itertools.chain', 'itertools.chain', (['[parent_presence_and_valency]', 'presence_and_valency'], {}), '([parent_presence_and_valency], presence_and_valency)\n', (20357, 20410), False, 'import itertools\n'), ((20453, 20498), 'tensorflow_metadata.proto.v0.statistics_pb2.PresenceAndValencyStatistics', 'statistics_pb2.PresenceAndValencyStatistics', ([], {}), '()\n', (20496, 20498), False, 'from tensorflow_metadata.proto.v0 import statistics_pb2\n'), ((21519, 21587), 'itertools.chain', 'itertools.chain', (['[parent_presence_and_valency]', 'presence_and_valency'], {}), '([parent_presence_and_valency], presence_and_valency)\n', (21534, 21587), False, 'import itertools\n'), ((21630, 21671), 'tensorflow_metadata.proto.v0.statistics_pb2.WeightedCommonStatistics', 'statistics_pb2.WeightedCommonStatistics', ([], {}), '()\n', (21669, 21671), False, 'from tensorflow_metadata.proto.v0 import statistics_pb2\n'), ((25210, 25405), 'tensorflow_metadata.proto.v0.statistics_pb2.WeightedCommonStatistics', 'statistics_pb2.WeightedCommonStatistics', ([], {'num_non_missing': 'top_level_presence_and_valency.weighted_num_non_missing', 'tot_num_values': 'top_level_presence_and_valency.weighted_total_num_values'}), '(num_non_missing=\n top_level_presence_and_valency.weighted_num_non_missing, tot_num_values\n =top_level_presence_and_valency.weighted_total_num_values)\n', (25249, 25405), False, 'from tensorflow_metadata.proto.v0 import statistics_pb2\n'), ((27512, 27549), 'tensorflow_data_validation.utils.quantiles_util.find_median', 'quantiles_util.find_median', (['quantiles'], {}), '(quantiles)\n', (27538, 27549), False, 'from tensorflow_data_validation.utils import quantiles_util\n'), ((28497, 28539), 'tensorflow_metadata.proto.v0.statistics_pb2.WeightedNumericStatistics', 'statistics_pb2.WeightedNumericStatistics', ([], {}), '()\n', (28537, 28539), False, 'from tensorflow_metadata.proto.v0 import statistics_pb2\n'), ((28960, 28988), 'math.sqrt', 'math.sqrt', (['weighted_variance'], {}), '(weighted_variance)\n', (28969, 28988), False, 'import math\n'), ((29691, 29865), 'tensorflow_data_validation.utils.quantiles_util.generate_equi_width_histogram', 'quantiles_util.generate_equi_width_histogram', (['weighted_quantiles', 'numeric_stats.finite_min', 'numeric_stats.finite_max', 'weighted_total_num_values', 'num_histogram_buckets'], {}), '(weighted_quantiles,\n numeric_stats.finite_min, numeric_stats.finite_max,\n weighted_total_num_values, num_histogram_buckets)\n', (29735, 29865), False, 'from tensorflow_data_validation.utils import quantiles_util\n'), ((30152, 30279), 'tensorflow_data_validation.utils.quantiles_util.generate_quantiles_histogram', 'quantiles_util.generate_quantiles_histogram', (['weighted_quantiles', 'weighted_total_num_values', 'num_quantiles_histogram_buckets'], {}), '(weighted_quantiles,\n weighted_total_num_values, num_quantiles_histogram_buckets)\n', (30195, 30279), False, 'from tensorflow_data_validation.utils import quantiles_util\n'), ((32533, 32551), 'itertools.count', 'itertools.count', (['(2)'], {}), '(2)\n', (32548, 32551), False, 'import itertools\n'), ((32785, 32922), 'tensorflow_data_validation.utils.quantiles_util.generate_quantiles_histogram', 'quantiles_util.generate_quantiles_histogram', (['num_values_quantiles', 'parent_presence_and_valency.num_non_missing', 'num_histogram_buckets'], {}), '(num_values_quantiles,\n parent_presence_and_valency.num_non_missing, num_histogram_buckets)\n', (32828, 32922), False, 'from tensorflow_data_validation.utils import quantiles_util\n'), ((32948, 32980), 'tensorflow_metadata.proto.v0.statistics_pb2.CustomStatistic', 'statistics_pb2.CustomStatistic', ([], {}), '()\n', (32978, 32980), False, 'from tensorflow_metadata.proto.v0 import statistics_pb2\n'), ((36802, 36911), 'tensorflow_data_validation.utils.top_k_uniques_stats_util.output_categorical_numeric', 'top_k_uniques_stats_util.output_categorical_numeric', (['categorical_numeric_types', 'feature_path', 'result.type'], {}), '(categorical_numeric_types,\n feature_path, result.type)\n', (36853, 36911), False, 'from tensorflow_data_validation.utils import top_k_uniques_stats_util\n'), ((42062, 42080), 'tensorflow_data_validation.utils.example_weight_map.ExampleWeightMap', 'ExampleWeightMap', ([], {}), '()\n', (42078, 42080), False, 'from tensorflow_data_validation.utils.example_weight_map import ExampleWeightMap\n'), ((44694, 44810), 'tensorflow_data_validation.arrow.arrow_util.enumerate_arrays', 'arrow_util.enumerate_arrays', (['examples'], {'example_weight_map': 'self._example_weight_map', 'enumerate_leaves_only': '(False)'}), '(examples, example_weight_map=self.\n _example_weight_map, enumerate_leaves_only=False)\n', (44721, 44810), False, 'from tensorflow_data_validation.arrow import arrow_util\n'), ((49694, 49735), 'tensorflow_metadata.proto.v0.statistics_pb2.DatasetFeatureStatistics', 'statistics_pb2.DatasetFeatureStatistics', ([], {}), '()\n', (49733, 49735), False, 'from tensorflow_metadata.proto.v0 import statistics_pb2\n'), ((6412, 6440), 'numpy.sum', 'np.sum', (['(num_values * weights)'], {}), '(num_values * weights)\n', (6418, 6440), True, 'import numpy as np\n'), ((6480, 6510), 'numpy.sum', 'np.sum', (['weights[presence_mask]'], {}), '(weights[presence_mask])\n', (6486, 6510), True, 'import numpy as np\n'), ((12789, 12831), 'tensorflow_data_validation.utils.variance_util.WeightedMeanVarAccumulator', 'variance_util.WeightedMeanVarAccumulator', ([], {}), '()\n', (12829, 12831), False, 'from tensorflow_data_validation.utils import variance_util\n'), ((15378, 15409), 'numpy.count_nonzero', 'np.count_nonzero', (['values_no_nan'], {}), '(values_no_nan)\n', (15394, 15409), True, 'import numpy as np\n'), ((15447, 15470), 'pyarrow.array', 'pa.array', (['values_no_nan'], {}), '(values_no_nan)\n', (15455, 15470), True, 'import pyarrow as pa\n'), ((18644, 18693), 'pyarrow.types.is_floating', 'pa.types.is_floating', (['flattened_values_array.type'], {}), '(flattened_values_array.type)\n', (18664, 18693), True, 'import pyarrow as pa\n'), ((18705, 18753), 'pyarrow.types.is_integer', 'pa.types.is_integer', (['flattened_values_array.type'], {}), '(flattened_values_array.type)\n', (18724, 18753), True, 'import pyarrow as pa\n'), ((19130, 19147), 'numpy.sum', 'np.sum', (['num_bytes'], {}), '(num_bytes)\n', (19136, 19147), True, 'import numpy as np\n'), ((24894, 25045), 'tensorflow_data_validation.utils.quantiles_util.generate_quantiles_histogram', 'quantiles_util.generate_quantiles_histogram', (['num_values_quantiles', 'top_level_presence_and_valency.num_non_missing', 'num_values_histogram_buckets'], {}), '(num_values_quantiles,\n top_level_presence_and_valency.num_non_missing,\n num_values_histogram_buckets)\n', (24937, 25045), False, 'from tensorflow_data_validation.utils import quantiles_util\n'), ((29499, 29545), 'tensorflow_data_validation.utils.quantiles_util.find_median', 'quantiles_util.find_median', (['weighted_quantiles'], {}), '(weighted_quantiles)\n', (29525, 29545), False, 'from tensorflow_data_validation.utils import quantiles_util\n'), ((43600, 43657), 'tensorflow_data_validation.utils.schema_util.get_categorical_numeric_feature_types', 'schema_util.get_categorical_numeric_feature_types', (['schema'], {}), '(schema)\n', (43649, 43657), False, 'from tensorflow_data_validation.utils import schema_util\n'), ((43983, 44061), 'tfx_bsl.sketches.QuantilesSketch', 'sketches.QuantilesSketch', ([], {'eps': 'epsilon', 'max_num_elements': '(1 << 32)', 'num_streams': '(1)'}), '(eps=epsilon, max_num_elements=1 << 32, num_streams=1)\n', (44007, 44061), False, 'from tfx_bsl import sketches\n'), ((45113, 45190), 'tensorflow_data_validation.utils.stats_util.get_feature_type_from_arrow_type', 'stats_util.get_feature_type_from_arrow_type', (['feature_path', 'feature_array.type'], {}), '(feature_path, feature_array.type)\n', (45156, 45190), False, 'from tensorflow_data_validation.utils import stats_util\n'), ((6057, 6086), 'pyarrow.array', 'pa.array', (['num_values_not_none'], {}), '(num_values_not_none)\n', (6065, 6086), True, 'import pyarrow as pa\n'), ((10499, 10549), 'tfx_bsl.arrow.array_util.ListLengthsFromListArray', 'array_util.ListLengthsFromListArray', (['feature_array'], {}), '(feature_array)\n', (10534, 10549), False, 'from tfx_bsl.arrow import array_util\n'), ((15133, 15159), 'numpy.isfinite', 'np.isfinite', (['values_no_nan'], {}), '(values_no_nan)\n', (15144, 15159), True, 'import numpy as np\n'), ((15807, 15830), 'pyarrow.array', 'pa.array', (['values_no_nan'], {}), '(values_no_nan)\n', (15815, 15830), True, 'import pyarrow as pa\n'), ((15842, 15871), 'pyarrow.array', 'pa.array', (['flat_weights_no_nan'], {}), '(flat_weights_no_nan)\n', (15850, 15871), True, 'import pyarrow as pa\n'), ((16972, 17034), 'tfx_bsl.arrow.array_util.GetBinaryArrayTotalByteSize', 'array_util.GetBinaryArrayTotalByteSize', (['flattened_values_array'], {}), '(flattened_values_array)\n', (17010, 17034), False, 'from tfx_bsl.arrow import array_util\n'), ((19011, 19028), 'numpy.min', 'np.min', (['num_bytes'], {}), '(num_bytes)\n', (19017, 19028), True, 'import numpy as np\n'), ((19081, 19098), 'numpy.max', 'np.max', (['num_bytes'], {}), '(num_bytes)\n', (19087, 19098), True, 'import numpy as np\n'), ((39982, 40042), 'tensorflow_metadata.proto.v0.statistics_pb2.FeatureNameStatistics.Type.Name', 'statistics_pb2.FeatureNameStatistics.Type.Name', (['feature_type'], {}), '(feature_type)\n', (40028, 40042), False, 'from tensorflow_metadata.proto.v0 import statistics_pb2\n'), ((43504, 43542), 'tensorflow_data_validation.utils.schema_util.get_bytes_features', 'schema_util.get_bytes_features', (['schema'], {}), '(schema)\n', (43534, 43542), False, 'from tensorflow_data_validation.utils import schema_util\n'), ((46327, 46444), 'tensorflow_data_validation.utils.top_k_uniques_stats_util.output_categorical_numeric', 'top_k_uniques_stats_util.output_categorical_numeric', (['self._categorical_numeric_types', 'feature_path', 'feature_type'], {}), '(self.\n _categorical_numeric_types, feature_path, feature_type)\n', (46378, 46444), False, 'from tensorflow_data_validation.utils import top_k_uniques_stats_util\n'), ((15241, 15262), 'numpy.min', 'np.min', (['finite_values'], {}), '(finite_values)\n', (15247, 15262), True, 'import numpy as np\n'), ((15311, 15332), 'numpy.max', 'np.max', (['finite_values'], {}), '(finite_values)\n', (15317, 15332), True, 'import numpy as np\n'), ((18885, 18937), 'tfx_bsl.arrow.array_util.GetElementLengths', 'array_util.GetElementLengths', (['flattened_values_array'], {}), '(flattened_values_array)\n', (18913, 18937), False, 'from tfx_bsl.arrow import array_util\n'), ((10982, 11038), 'tfx_bsl.arrow.array_util.GetFlattenedArrayParentIndices', 'array_util.GetFlattenedArrayParentIndices', (['feature_array'], {}), '(feature_array)\n', (11023, 11038), False, 'from tfx_bsl.arrow import array_util\n'), ((17336, 17384), 'numpy.vectorize', 'np.vectorize', (['_len_after_conv'], {'otypes': '[np.int32]'}), '(_len_after_conv, otypes=[np.int32])\n', (17348, 17384), True, 'import numpy as np\n'), ((17408, 17442), 'numpy.asarray', 'np.asarray', (['flattened_values_array'], {}), '(flattened_values_array)\n', (17418, 17442), True, 'import numpy as np\n'), ((10387, 10442), 'tfx_bsl.arrow.array_util.GetArrayNullBitmapAsByteArray', 'array_util.GetArrayNullBitmapAsByteArray', (['feature_array'], {}), '(feature_array)\n', (10427, 10442), False, 'from tfx_bsl.arrow import array_util\n'), ((48244, 48361), 'tensorflow_data_validation.utils.top_k_uniques_stats_util.output_categorical_numeric', 'top_k_uniques_stats_util.output_categorical_numeric', (['self._categorical_numeric_types', 'feature_path', 'current_type'], {}), '(self.\n _categorical_numeric_types, feature_path, current_type)\n', (48295, 48361), False, 'from tensorflow_data_validation.utils import top_k_uniques_stats_util\n')] |
import rosbag
import subprocess, yaml
import numpy as np
import pdb
import sys
from scipy import interpolate
# static parameters
CUBE_TOPIC_STRING = '/tagslam/odom/body_cube'
BOARD_TOPIC_STRING = '/tagslam/odom/body_surface'
if len(sys.argv) < 3:
print("usage: python[2] apriltag_csv.py [ROSBAG] [CSV_OUT]")
sys.exit()
inbag = sys.argv[1]
outcsv = sys.argv[2]
# open rosbag
bag = rosbag.Bag(inbag)
# get summary info from rosbag as a dictionary
info_dict = yaml.load(bag._get_yaml_info())
# extract metadata from cube and board topics
cube_topic = [topic for topic in info_dict['topics'] if topic['topic'] == CUBE_TOPIC_STRING][0]
board_topic = [topic for topic in info_dict['topics'] if topic['topic'] == BOARD_TOPIC_STRING][0]
# ensure there are an equal number of cube and board odom messages
num_msg = cube_topic['messages']
if not num_msg == board_topic['messages']:
raise Exception('Missing odom messages for board and/or cube!')
# extract cube pose data
t_ros = np.zeros(num_msg)
cube_ros = np.zeros((7,num_msg))
i = 0
for i, tmt in enumerate(bag.read_messages(topics=[CUBE_TOPIC_STRING])):
topic, msg, t = tmt
tstamp = msg.header.stamp
t_ros[i] = tstamp.secs + tstamp.nsecs*1e-9
cube_pose = msg.pose.pose
cube_pos = np.asarray([cube_pose.position.x, cube_pose.position.y, cube_pose.position.z])
cube_quat = np.asarray([cube_pose.orientation.x, cube_pose.orientation.y, cube_pose.orientation.z, cube_pose.orientation.w])
cube_ros[:4,i] = cube_quat
cube_ros[4:7,i] = cube_pos
i = i + 1
# extract board pose data
board_ros = np.zeros((7,num_msg))
i = 0
for i, tmt in enumerate(bag.read_messages(topics=[BOARD_TOPIC_STRING])):
topic, msg, t = tmt
board_pose = msg.pose.pose
board_pos = np.asarray([board_pose.position.x, board_pose.position.y, board_pose.position.z])
board_quat = np.asarray([board_pose.orientation.x, board_pose.orientation.y, board_pose.orientation.z, board_pose.orientation.w])
board_ros[:4,i] = board_quat
board_ros[4:7,i] = board_pos
i = i + 1
bag.close()
# assemble CSV data and save
data = np.concatenate((np.expand_dims(t_ros, axis=0), cube_ros, board_ros), axis=0)
data = data.T
np.savetxt(outcsv, data, delimiter=',')
| [
"numpy.asarray",
"rosbag.Bag",
"numpy.zeros",
"numpy.savetxt",
"sys.exit",
"numpy.expand_dims"
] | [((393, 410), 'rosbag.Bag', 'rosbag.Bag', (['inbag'], {}), '(inbag)\n', (403, 410), False, 'import rosbag\n'), ((990, 1007), 'numpy.zeros', 'np.zeros', (['num_msg'], {}), '(num_msg)\n', (998, 1007), True, 'import numpy as np\n'), ((1019, 1041), 'numpy.zeros', 'np.zeros', (['(7, num_msg)'], {}), '((7, num_msg))\n', (1027, 1041), True, 'import numpy as np\n'), ((1588, 1610), 'numpy.zeros', 'np.zeros', (['(7, num_msg)'], {}), '((7, num_msg))\n', (1596, 1610), True, 'import numpy as np\n'), ((2196, 2235), 'numpy.savetxt', 'np.savetxt', (['outcsv', 'data'], {'delimiter': '""","""'}), "(outcsv, data, delimiter=',')\n", (2206, 2235), True, 'import numpy as np\n'), ((319, 329), 'sys.exit', 'sys.exit', ([], {}), '()\n', (327, 329), False, 'import sys\n'), ((1265, 1343), 'numpy.asarray', 'np.asarray', (['[cube_pose.position.x, cube_pose.position.y, cube_pose.position.z]'], {}), '([cube_pose.position.x, cube_pose.position.y, cube_pose.position.z])\n', (1275, 1343), True, 'import numpy as np\n'), ((1360, 1477), 'numpy.asarray', 'np.asarray', (['[cube_pose.orientation.x, cube_pose.orientation.y, cube_pose.orientation.z,\n cube_pose.orientation.w]'], {}), '([cube_pose.orientation.x, cube_pose.orientation.y, cube_pose.\n orientation.z, cube_pose.orientation.w])\n', (1370, 1477), True, 'import numpy as np\n'), ((1760, 1846), 'numpy.asarray', 'np.asarray', (['[board_pose.position.x, board_pose.position.y, board_pose.position.z]'], {}), '([board_pose.position.x, board_pose.position.y, board_pose.\n position.z])\n', (1770, 1846), True, 'import numpy as np\n'), ((1859, 1980), 'numpy.asarray', 'np.asarray', (['[board_pose.orientation.x, board_pose.orientation.y, board_pose.orientation\n .z, board_pose.orientation.w]'], {}), '([board_pose.orientation.x, board_pose.orientation.y, board_pose.\n orientation.z, board_pose.orientation.w])\n', (1869, 1980), True, 'import numpy as np\n'), ((2121, 2150), 'numpy.expand_dims', 'np.expand_dims', (['t_ros'], {'axis': '(0)'}), '(t_ros, axis=0)\n', (2135, 2150), True, 'import numpy as np\n')] |
"""Methods to plot data defined on Landlab grids.
Plotting functions
++++++++++++++++++
.. autosummary::
~landlab.plot.imshow.imshowhs_grid
~landlab.plot.imshow.imshowhs_grid_at_node
"""
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import LightSource
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from .event_handler import query_grid_on_button_press
def imshowhs_grid(grid, values, **kwds):
"""Prepare a map view of data over all nodes in the grid using a hillshade
topography map in the background.
Data is plotted as cells shaded with the value at the node at its center.
Outer edges of perimeter cells are extrapolated. Closed elements are
colored uniformly (default black, overridden with kwd 'color_for_closed');
other open boundary nodes get their actual values.
*values* can be a field name, a regular array, or a masked array. If a
masked array is provided, masked entries will be treated as if they were
Landlab BC_NODE_IS_CLOSED. Used together with the color_at_closed=None
keyword (i.e., "transparent"), this can allow for construction of overlay
layers in a figure (e.g., only defining values in a river network, and
overlaying it on another landscape).
Use matplotlib functions like xlim, ylim to modify your plot after calling
:func:`imshowhs_grid`, as desired.
Node coordinates are printed when a mouse button is pressed on a cell in
the plot.
For now, this function only works with regular grids.
Parameters
----------
grid : ModelGrid
Grid containing the field to plot, or describing the geometry of the
provided array.
values : array_like, masked_array, or str
Node values, or a field name as a string from which to draw the data.
plot_name : str, optional
String to put as the plot title.
var_name : str, optional
Variable name, to use as a colorbar label.
var_name_two : str, optional
Variable name of second layer, to use as a colorbar label.
var_units : str, optional
Units for the variable being plotted, for the colorbar.
grid_units : tuple of str, optional
Units for y, and x dimensions. If None, component will look to the
gri property `axis_units` for this information. If no units are
specified there, no entry is made.
symmetric_cbar : bool
Make the colormap symetric about 0.
cmap : str
Name of a colormap
limits : tuple of float
Minimum and maximum of the colorbar.
vmin, vmax: floats
Alternatives to limits.
norm : matplotlib.colors.Normalize
The normalizing object which scales data, typically into the interval
[0, 1]. Ignore in most cases.
ticks_km : bool, optional
Display ticks in km instead of m
allow_colorbar : bool
If True, include the colorbar.
shrink : float
Fraction by which to shrink the colorbar.
color_for_closed : str or None
Color to use for closed nodes (default 'black'). If None, closed
(or masked) nodes will be transparent.
color_for_background : color str or other color declaration, or None
Color to use for closed elements (default None). If None, the
background will be transparent, and appear white.
output : None, string, or bool
If None (or False), the image is sent to the imaging buffer to await
an explicit call to show() or savefig() from outside this function.
If a string, the string should be the path to a save location, and the
filename (with file extension). The function will then call
plt.savefig([string]) itself. If True, the function will call
plt.show() itself once plotting is complete.
fontweight_xlabel : str, optional
weight of x label. The default is 'bold'.
fontweight_ylabel : str, optional
weight of y label. The default is 'bold'.
plot_type : str, optional
The type of plot that will be plotted.
There are four options:
* 'DEM': Display a digital elevation map underlain by a shaded relief,
based on the same DEM ('topographic__elevation')
* 'Hillshade': Display the shaded relief, of the provided DEM ('topographic__elevation')
* 'Drape1': Display any kind of provided layer on top of a shaded
relief provided in the 'topographic__elevation' field
* 'Drape2': Display two layers on top of a shaded relief provided in
the 'topographic__elevation' field
The default is "DEM".
drape1 : array_like, masked_array
Node values to plot on top of a hillshade map. The default is None.
drape2 : array_like, masked_array
Node values to plot on top of drape1 and a hillshade map. The default is None.
cmap2 : str
Name of a colormap for drape 2. The default is None.
vertical_exa : float, optional
vertical exageration of hillshade map. The default is None.
azdeg : float, optional
azimuth of light source. The default is 315.
altdeg : float, optional
elevation of light source. The default is 65.
thres_drape1 : float, optional
threshold below which drape1 is made transparant. The default is None.
alpha : float (0-1), optional
transparency of DEM/Drape1 . The default is None.
thres_drape2 : float, optional
threshold below which drape2 is made transparant. The default is None.
alpha2 : float (0-1), optional
transparency of Drape2 . The default is None.
add_double_colorbar : bool, optional
add a double colorbar when two drapes are plotted. The default is False.
plt_contour : bool, optional
Add contour lines to elevation plot . The default is False.
contour_nb : int, optional
number of contour lines. The default is 50.
default_fontsize : float, optional
Default font size of plot labels. The default is 10.
cbar_height : percentage, optional
height of colorbar in percentage of figure. The default is "5%".
cbar_width : percentage, optional
width of colorbar in percentage of figure. The default is "30%".
cbar_or : str, optional
orientation of colorbar. The default is "horizontal".
cbar_loc : str, optional
location of colorbar. The default is "lower right".
bbox_to_anchor : vector, optional
bbox to anchor. The default is (0, 0, 1, 1).
cbar_ticks_position : str, optional
location of colorbar ticks (below or on top of the colorbar). The default is "top".
cbar_ticks_position2 : str, optional
location of colorbar ticks for colorbar of Drape2 (below or on top of the colorbar). The default is "bottom".
colorbar_label_y : float, optional
location of colorbar label with respect to the colorbar in y direction. The default is -40.
colorbar_label_x : float , optional
location of colorbar label with respect to the colorbar in x direction. The default is 0.5.
cbar_tick_size : float, optional
colorbar tick size. The default is 10.
cbar_label_color : str, optional
colorbar tick color. The default is 'black'.
cbar_label_fontweight : str, optional
colorbar font weight. The default is 'bold'.
add_label_bbox : bool, optional
Add a bbox surrounding the colorbar label. The default is False.
y_label_offSet_var_1 : float, optional
Offset of ylabel on colorbar of first variable in plot with two overlaying plots. The default is 3.0.
y_label_offSet_var_2 : float, optional
Offset of ylabel on colorbar of first variable in plot with two overlaying plots. The default is -1.25.
Returns
-------
ax : figure ax
return ax if output == True.
"""
values_at = kwds.pop("values_at", "node")
values_at = kwds.pop("at", values_at)
if isinstance(values, str):
values = grid.field_values(values_at, values)
if values_at == "node":
ax = imshowhs_grid_at_node(grid, values, **kwds)
elif values_at == "cell":
raise NotImplementedError(
"For now, only values at nodes can be displayed using the in the imshowhs functions"
)
else:
raise TypeError("value location %s not understood" % values_at)
return ax
def imshowhs_grid_at_node(grid, values, **kwds):
"""Prepare a map view of data over all nodes in the grid using a hillshade
topography map in the background.
Data is plotted as cells shaded with the value at the node at its center.
Outer edges of perimeter cells are extrapolated. Closed elements are
colored uniformly (default black, overridden with kwd 'color_for_closed');
other open boundary nodes get their actual values.
*values* can be a field name, a regular array, or a masked array. If a
masked array is provided, masked entries will be treated as if they were
Landlab BC_NODE_IS_CLOSED. Used together with the color_at_closed=None
keyword (i.e., "transparent"), this can allow for construction of overlay
layers in a figure (e.g., only defining values in a river network, and
overlaying it on another landscape).
Use matplotlib functions like xlim, ylim to modify your plot after calling
:func:`imshowhs_grid`, as desired.
Node coordinates are printed when a mouse button is pressed on a cell in
the plot.
For now, this function only works with regular grids.
Developed by: <NAME>
Parameters
----------
grid : ModelGrid
Grid containing the field to plot, or describing the geometry of the
provided array.
values : array_like, masked_array, or str
Node values, or a field name as a string from which to draw the data.
plot_name : str, optional
String to put as the plot title.
var_name : str, optional
Variable name, to use as a colorbar label.
var_name_two : str, optional
Variable name of second layer, to use as a colorbar label.
var_units : str, optional
Units for the variable being plotted, for the colorbar.
grid_units : tuple of str, optional
Units for y, and x dimensions. If None, component will look to the
gri property `axis_units` for this information. If no units are
specified there, no entry is made.
symmetric_cbar : bool
Make the colormap symetric about 0.
cmap : str
Name of a colormap
limits : tuple of float
Minimum and maximum of the colorbar.
vmin, vmax: floats
Alternatives to limits.
norm : matplotlib.colors.Normalize
The normalizing object which scales data, typically into the interval
[0, 1]. Ignore in most cases.
ticks_km : bool, optional
Display ticks in km instead of m
Default: False
allow_colorbar : bool
If True, include the colorbar.
shrink : float
Fraction by which to shrink the colorbar.
color_for_closed : str or None
Color to use for closed nodes (default 'black'). If None, closed
(or masked) nodes will be transparent.
color_for_background : color str or other color declaration, or None
Color to use for closed elements (default None). If None, the
background will be transparent, and appear white.
output : None, string, or bool
If None (or False), the image is sent to the imaging buffer to await
an explicit call to show() or savefig() from outside this function.
If a string, the string should be the path to a save location, and the
filename (with file extension). The function will then call
plt.savefig([string]) itself. If True, the function will call
plt.show() itself once plotting is complete.
fontweight_xlabel : str, optional
weight of x label. The default is 'bold'.
fontweight_ylabel : str, optional
weight of y label. The default is 'bold'.
plot_type : str, optional
There are four options:
* 'DEM': Display a digital elevation map underlain by a shaded relief,
based on the same DEM ('topographic__elevation')
* 'Hillshade': Display the shaded relief, of the provided DEM ('topographic__elevation')
* 'Drape1': Display any kind of provided layer on top of a shaded
relief provided in the 'topographic__elevation' field
* 'Drape2': Display two layers on top of a shaded relief provided in
the 'topographic__elevation' field
The default is "DEM".
drape1 : array_like, masked_array
Node values to plot on top of a hillshade map. The default is None.
drape2 : array_like, masked_array
Node values to plot on top of drape1 and a hillshade map. The default is None.
cmap2 : str
Name of a colormap for drape 2. The default is None.
vertical_exa : float, optional
vertical exageration of hillshade map. The default is None.
azdeg : float, optional
azimuth of light source. The default is 315.
altdeg : float, optional
elevation of light source. The default is 65.
thres_drape1 : float, optional
threshold below which drape1 is made transparant. The default is None.
alpha : float (0-1), optional
transparency of DEM/Drape1 . The default is None.
thres_drape2 : float, optional
threshold below which drape2 is made transparant. The default is None.
alpha2 : float (0-1), optional
transparency of Drape2 . The default is None.
add_double_colorbar : bool, optional
add a double colorbar when two drapes are plotted. The default is False.
plt_contour : bool, optional
Add contour lines to elevation plot . The default is False.
contour_nb : int, optional
number of contour lines. The default is 50.
default_fontsize : float, optional
Default font size of plot labels. The default is 10.
cbar_height : percentage, optional
height of colorbar in percentage of figure. The default is "5%".
cbar_width : percentage, optional
width of colorbar in percentage of figure. The default is "30%".
cbar_or : str, optional
orientation of colorbar. The default is "horizontal".
cbar_loc : str, optional
location of colorbar. The default is "lower right".
bbox_to_anchor : vector, optional
bbox to anchor. The default is (0, 0, 1, 1).
cbar_ticks_position : str, optional
location of colorbar ticks (below or on top of the colorbar). The default is "top".
cbar_ticks_position2 : str, optional
location of colorbar ticks for colorbar of Drape2 (below or on top of the colorbar). The default is "bottom".
colorbar_label_y : float, optional
location of colorbar label with respect to the colorbar in y direction. The default is -40.
colorbar_label_x : float , optional
location of colorbar label with respect to the colorbar in x direction. The default is 0.5.
cbar_tick_size : float, optional
colorbar tick size. The default is 10.
cbar_label_color : str, optional
colorbar label color. The default is 'black'.
cbar_ticks_color : str, optional
colorbar tick color. The default is 'black'.
cbar_label_fontweight : str, optional
colorbar font weight. The default is 'bold'.
add_label_bbox : bool, optional
Add a bbox surrounding the colorbar label. The default is False.
y_label_offSet_var_1 : float, optional
Offset of ylabel on colorbar of first variable in plot with two overlaying plots. The default is 3.0.
y_label_offSet_var_2 : float, optional
Offset of ylabel on colorbar of first variable in plot with two overlaying plots. The default is -1.25.
Returns
-------
ax : figure ax
return ax if output == True.
"""
if isinstance(values, str):
values_at_node = grid.at_node[values]
else:
values_at_node = values.reshape((-1,))
if values_at_node.size != grid.number_of_nodes:
raise ValueError("number of values does not match number of nodes")
values_at_node = np.ma.masked_where(
grid.status_at_node == grid.BC_NODE_IS_CLOSED, values_at_node
)
ax = _imshowhs_grid_values(grid, values_at_node, **kwds)
if isinstance(values, str):
plt.title(values)
plt.gcf().canvas.mpl_connect(
"button_press_event", lambda event: query_grid_on_button_press(event, grid)
)
# plt.show()
return ax
def _imshowhs_grid_values(
grid,
values,
plot_name=None,
var_name=None,
var_name_two=None,
var_units=None,
fontweight_xlabel="bold",
fontweight_ylabel="bold",
grid_units=(None, None),
symmetric_cbar=False,
cmap="pink",
limits=None,
allow_colorbar=True,
vmin=None,
vmax=None,
norm=None,
ticks_km=False,
shrink=1.0,
color_for_closed=None,
color_for_background=None,
output=None,
plot_type="DEM",
drape1=None,
drape2=None,
cmap2=None,
vertical_exa=None,
azdeg=315,
altdeg=65,
thres_drape1=None,
alpha=None,
thres_drape2=None,
alpha2=None,
add_double_colorbar=False,
plt_contour=False,
contour_nb=50,
default_fontsize=10,
cbar_height="5%",
cbar_width="30%",
cbar_or="horizontal",
cbar_loc="lower right",
bbox_to_anchor=(0, 0, 1, 1),
cbar_ticks_position="top",
cbar_ticks_position2="bottom",
colorbar_label_y=-40,
colorbar_label_x=0.5,
cbar_tick_size=10,
cbar_label_color="black",
cbar_tick_color="black",
cbar_label_fontweight="bold",
add_label_bbox=False,
y_label_offSet_var_1=3,
y_label_offSet_var_2=-1.25,
):
from ..grid.raster import RasterModelGrid
if not isinstance(grid, RasterModelGrid):
raise NotImplementedError(
"For now, only RasterModelGrids are supported in the imshowhs functions"
)
plot_type_options = ["DEM", "Hillshade", "Drape1", "Drape2"]
if plot_type not in plot_type_options:
raise ValueError(
"plot_type should be one of the following: "
+ ", ".join(map(str, plot_type_options))
)
if plot_type == "Drape1" and drape1 is None:
raise ValueError(
"if plot_type is Drape1, 'drape1' input argument cannot be None. \
Provide at least one array with the size of the number of grid nodes as drape1='field_to_be_plotted'"
)
if plot_type == "Drape2" and (drape1 is None or drape2 is None):
raise ValueError(
"if plot_type is Drape2, 'drape1' and 'drape2' input arguments cannot be None. \
Provide an array for both with the size of the number of grid nodes as drape1='field1_to_be_plotted' and drape2='field2_to_be_plotted' "
)
# Poperties of bounding box of colorbar label, if used:
if add_label_bbox:
bbox_prop = dict(
boxstyle="round", pad=0.1, facecolor="white", alpha=0.7, edgecolor="white"
)
else:
bbox_prop = None
cmap = plt.get_cmap(cmap)
if color_for_closed is not None:
cmap.set_bad(color=color_for_closed)
else:
cmap.set_bad(alpha=0.0)
values.shape = grid.shape
if isinstance(grid, RasterModelGrid):
# somethingToPlot is a flag indicating if any pixels should be plotted.
somethingToPlot = True
if values.ndim != 2:
raise ValueError("values must have ndim == 2")
y = (
np.arange(values.shape[0] + 1) * grid.dy
- grid.dy * 0.5
+ grid.xy_of_lower_left[1]
)
x = (
np.arange(values.shape[1] + 1) * grid.dx
- grid.dx * 0.5
+ grid.xy_of_lower_left[0]
)
ls = LightSource(azdeg=azdeg, altdeg=altdeg)
if cmap is None:
cmap = plt.cm.terrain
dx = x[1] - x[0]
dy = y[1] - y[0]
if vertical_exa is not None:
ve = vertical_exa
else:
ve = 3
extent = np.array([x[0] - dx, x[-1] + dx, y[-1] + dy, y[0] - dy])
if ticks_km:
extent /= 1e3
ax1 = plt.gca()
if alpha is None:
alpha = 1
if alpha2 is None:
alpha2 = 1
blend_modes = ["hsv", "overlay", "soft"]
if plot_type == "DEM":
kwds = dict(cmap=cmap)
(kwds["vmin"], kwds["vmax"]) = (values.min(), values.max())
if (limits is None) and ((vmin is None) and (vmax is None)):
if symmetric_cbar:
(var_min, var_max) = (values.min(), values.max())
limit = max(abs(var_min), abs(var_max))
(kwds["vmin"], kwds["vmax"]) = (-limit, limit)
elif limits is not None:
(kwds["vmin"], kwds["vmax"]) = (limits[0], limits[1])
else:
if vmin is not None:
kwds["vmin"] = vmin
if vmax is not None:
kwds["vmax"] = vmax
val = values.data
rgb = ls.shade(
val,
cmap=cmap,
blend_mode=blend_modes[0],
vert_exag=ve,
dx=dx,
dy=dy,
fraction=0.4,
)
ima = ax1.imshow(rgb, extent=extent, **kwds)
elif plot_type == "Hillshade":
ima = plt.imshow(
ls.hillshade(values, vert_exag=ve, dx=dx, dy=dy),
cmap="gray",
extent=extent,
)
allow_colorbar = False
elif plot_type == "Drape1" or plot_type == "Drape2":
# Process values from first drape
if isinstance(drape1, str):
values_at_node_drape1 = grid.at_node[drape1]
else:
values_at_node_drape1 = drape1.reshape((-1,))
if values_at_node_drape1.size != grid.number_of_nodes:
raise ValueError("number of values does not match number of nodes")
values_at_node_drape1 = np.ma.masked_where(
grid.status_at_node == grid.BC_NODE_IS_CLOSED, values_at_node_drape1
)
# Add mask if thres_drape1 is given
if thres_drape1 is not None:
# check if any value exceeds threshold
if not np.any(values_at_node_drape1 > thres_drape1):
somethingToPlot = False
values_at_node_drape1 = np.ma.masked_where(
values_at_node_drape1 < thres_drape1, values_at_node_drape1
)
if isinstance(grid, RasterModelGrid):
shape = grid.shape
else:
shape = (-1,)
val1 = values_at_node_drape1.reshape(shape)
kwds = dict(cmap=cmap)
(kwds["vmin"], kwds["vmax"]) = (val1.min(), val1.max())
if (limits is None) and ((vmin is None) and (vmax is None)):
if symmetric_cbar:
(var_min, var_max) = (val1.min(), val1.max())
limit = max(abs(var_min), abs(var_max))
(kwds["vmin"], kwds["vmax"]) = (-limit, limit)
elif limits is not None:
(kwds["vmin"], kwds["vmax"]) = (limits[0], limits[1])
else:
if vmin is not None:
kwds["vmin"] = vmin
if vmax is not None:
kwds["vmax"] = vmax
plt.imshow(
ls.hillshade(values, vert_exag=ve, dx=dx, dy=dy),
cmap="gray",
extent=extent,
)
ima = ax1.imshow(val1, extent=extent, alpha=alpha, **kwds)
if plt_contour:
plt.contour(
x[0:-1] * 1e-3,
y[0:-1] * 1e-3,
val1,
contour_nb,
colors="black",
linewidths=0.2,
)
if somethingToPlot:
# To cartezian coordinates (not if other layers has to be plotted on top!)
if plot_type != "Drape2":
ax1.invert_yaxis()
plt.xticks(fontsize=default_fontsize)
plt.yticks(fontsize=default_fontsize)
# if Drape2, default behavior is to add colorbar of first layer if add_double_colorbar == False
if allow_colorbar and (
plot_type == "DEM"
or plot_type == "Drape1"
or (plot_type == "Drape2" and not add_double_colorbar)
):
cb_or = cbar_or
cb_ticks_position = cbar_ticks_position
axins1 = inset_axes(
ax1,
width=cbar_width, # width = 50% of parent_bbox width
height=cbar_height, # height : 5%
loc=cbar_loc,
bbox_transform=ax1.transAxes,
borderpad=0,
bbox_to_anchor=bbox_to_anchor,
)
maxV = kwds["vmax"]
minV = kwds["vmin"]
cb_length = maxV - minV
if maxV <= 10:
cb = plt.colorbar(
ima,
ax=ax1,
cax=axins1,
orientation=cb_or,
ticks=[
np.round(minV + 0.2 * cb_length, 1),
np.round(minV + 0.8 * cb_length, 1),
],
)
elif maxV <= 100:
cb = plt.colorbar(
ima,
ax=ax1,
cax=axins1,
orientation=cb_or,
ticks=[
np.round(minV + 0.2 * cb_length, 0),
np.round(minV + 0.8 * cb_length, 0),
],
)
else:
cb = plt.colorbar(
ima,
ax=ax1,
cax=axins1,
orientation=cb_or,
ticks=[
np.round(0.1 * (minV + 0.2 * cb_length)) * 10,
np.round(0.1 * (minV + 0.8 * cb_length)) * 10,
],
)
axins1.xaxis.set_ticks_position(cb_ticks_position)
cb.ax.tick_params(
labelsize=cbar_tick_size,
color=cbar_tick_color,
labelcolor=cbar_tick_color,
)
# if colorbar_label:
# cb.set_label(colorbar_label, rotation=270)
# # ax1.xaxis.set_label_coords(0,2.5)
if plot_type == "Drape2":
# Process values from first drape
if isinstance(drape2, str):
values_at_node_drape2 = grid.at_node[drape2]
else:
values_at_node_drape2 = drape2.reshape((-1,))
if values_at_node_drape2.size != grid.number_of_nodes:
raise ValueError("number of values does not match number of nodes")
values_at_node_drape2 = np.ma.masked_where(
grid.status_at_node == grid.BC_NODE_IS_CLOSED, values_at_node_drape2
)
# Add mask if thres_drape1 is given
if thres_drape2 is not None:
values_at_node_drape2 = np.ma.masked_where(
values_at_node_drape2 < thres_drape2, values_at_node_drape2
)
if isinstance(grid, RasterModelGrid):
shape = grid.shape
else:
shape = (-1,)
val2 = values_at_node_drape2.reshape(shape)
if cmap2 is None:
cmap2 = plt.cm.terrain
kwds = dict(cmap=cmap2)
(kwds["vmin"], kwds["vmax"]) = (val2.min(), val2.max())
if (limits is None) and ((vmin is None) and (vmax is None)):
if symmetric_cbar:
(var_min, var_max) = (val2.min(), val2.max())
limit = max(abs(var_min), abs(var_max))
(kwds["vmin"], kwds["vmax"]) = (-limit, limit)
elif limits is not None:
(kwds["vmin"], kwds["vmax"]) = (limits[0], limits[1])
else:
if vmin is not None:
kwds["vmin"] = vmin
if vmax is not None:
kwds["vmax"] = vmax
ima2 = ax1.imshow(val2, extent=extent, alpha=alpha2, **kwds)
ax1.invert_yaxis()
# Add colorbars
if add_double_colorbar:
axins1 = inset_axes(
ax1,
width=cbar_width, # width = 50% of parent_bbox width
height=cbar_height, # height : 5%
loc=cbar_loc,
bbox_to_anchor=(-0.005, 0.25, 1, 1),
bbox_transform=ax1.transAxes,
borderpad=0,
)
cb_or = cbar_or
cb_ticks_position = cbar_ticks_position
maxV = np.max(val1)
minV = np.min(val1)
cb_length = maxV - minV
if maxV <= 10:
cb = plt.colorbar(
ima,
ax=ax1,
cax=axins1,
orientation=cb_or,
ticks=[
np.round(minV + 0.2 * cb_length, 1),
np.round(minV + 0.8 * cb_length, 1),
],
)
elif maxV <= 100:
cb = plt.colorbar(
ima,
ax=ax1,
cax=axins1,
orientation=cb_or,
ticks=[
np.round(minV + 0.2 * cb_length, 0),
np.round(minV + 0.8 * cb_length, 0),
],
)
else:
cb = plt.colorbar(
ima,
ax=ax1,
cax=axins1,
orientation=cb_or,
ticks=[
np.round(0.1 * (minV + 0.2 * cb_length)) * 10,
np.round(0.1 * (minV + 0.8 * cb_length)) * 10,
],
)
cb.ax.tick_params(
labelsize=cbar_tick_size,
color=cbar_tick_color,
labelcolor=cbar_tick_color,
)
axins1.xaxis.set_ticks_position(cb_ticks_position)
axins1.set_xlabel(
var_name,
usetex=True,
fontsize=default_fontsize,
rotation=0,
color=cbar_label_color,
fontweight=cbar_label_fontweight,
bbox=bbox_prop,
)
axins1.xaxis.set_label_coords(0.5, y_label_offSet_var_1)
axins2 = inset_axes(
ax1,
width=cbar_width, # width = 50% of parent_bbox width
height=cbar_height, # height : 5%
loc=cbar_loc,
bbox_to_anchor=(-0.005, 0.15, 1, 1),
bbox_transform=ax1.transAxes,
borderpad=0,
)
cb_or = cbar_or
cb_ticks_position = cbar_ticks_position2
maxV = np.max(val2)
minV = np.min(val2)
cb_length = maxV - minV
if maxV <= 10:
cb = plt.colorbar(
ima2,
ax=ax1,
cax=axins2,
orientation=cb_or,
ticks=[
np.round(minV + 0.2 * cb_length, 1),
np.round(minV + 0.8 * cb_length, 1),
],
)
elif maxV <= 100:
cb = plt.colorbar(
ima2,
ax=ax1,
cax=axins2,
orientation=cb_or,
ticks=[
np.round(minV + 0.2 * cb_length, 0),
np.round(minV + 0.8 * cb_length, 0),
],
)
else:
cb = plt.colorbar(
ima2,
ax=ax1,
cax=axins2,
orientation=cb_or,
ticks=[
np.round(0.1 * (minV + 0.2 * cb_length)) * 10,
np.round(0.1 * (minV + 0.8 * cb_length)) * 10,
],
)
cb.ax.tick_params(
labelsize=cbar_tick_size,
color=cbar_tick_color,
labelcolor=cbar_tick_color,
)
axins2.xaxis.set_ticks_position(cb_ticks_position)
axins2.set_xlabel(
var_name_two,
usetex=True,
fontsize=default_fontsize,
rotation=0,
color=cbar_label_color,
fontweight=cbar_label_fontweight,
bbox=bbox_prop,
)
axins2.xaxis.set_label_coords(0.5, y_label_offSet_var_2)
if grid_units[1] is None and grid_units[0] is None:
grid_units = grid.axis_units
if grid_units[1] == "-" and grid_units[0] == "-":
ax1.set_xlabel(
"Easting", fontweight=fontweight_xlabel, fontsize=default_fontsize
)
ax1.set_ylabel(
"Northing", fontweight=fontweight_ylabel, fontsize=default_fontsize
)
else:
ax1.set_xlabel(
"Easting, %s" % grid_units[1],
fontweight=fontweight_xlabel,
fontsize=default_fontsize,
)
ax1.set_ylabel(
"Northing, %s" % grid_units[1],
fontweight=fontweight_ylabel,
fontsize=default_fontsize,
)
else:
ax1.set_xlabel(
"Easting, %s" % grid_units[1],
fontweight=fontweight_xlabel,
fontsize=default_fontsize,
)
ax1.set_ylabel(
"Northing, %s" % grid_units[1],
fontweight=fontweight_ylabel,
fontsize=default_fontsize,
)
if plot_name is not None:
plt.title("%s" % (plot_name))
if (
somethingToPlot
and (var_name is not None or var_units is not None)
and plot_type != "Drape2"
):
if var_name is not None:
assert type(var_name) is str
if var_units is not None:
assert type(var_units) is str
colorbar_label = var_name + " (" + var_units + ")"
else:
colorbar_label = var_name
else:
assert type(var_units) is str
colorbar_label = "(" + var_units + ")"
assert type(colorbar_label) is str
if allow_colorbar:
cb.set_label(
colorbar_label,
fontsize=default_fontsize,
labelpad=colorbar_label_y,
color=cbar_label_color,
x=colorbar_label_x,
fontweight=cbar_label_fontweight,
bbox=bbox_prop,
)
if color_for_background is not None:
plt.gca().set_facecolor(color_for_background)
if output is not None:
if type(output) is str:
plt.savefig(output)
plt.clf()
elif output:
plt.show()
return ax1
| [
"mpl_toolkits.axes_grid1.inset_locator.inset_axes",
"numpy.array",
"numpy.arange",
"numpy.ma.masked_where",
"numpy.max",
"matplotlib.pyplot.contour",
"matplotlib.pyplot.yticks",
"numpy.min",
"numpy.round",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.gca",
"mat... | [((16211, 16296), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(grid.status_at_node == grid.BC_NODE_IS_CLOSED)', 'values_at_node'], {}), '(grid.status_at_node == grid.BC_NODE_IS_CLOSED,\n values_at_node)\n', (16229, 16296), True, 'import numpy as np\n'), ((19185, 19203), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['cmap'], {}), '(cmap)\n', (19197, 19203), True, 'import matplotlib.pyplot as plt\n'), ((16410, 16427), 'matplotlib.pyplot.title', 'plt.title', (['values'], {}), '(values)\n', (16419, 16427), True, 'import matplotlib.pyplot as plt\n'), ((19906, 19945), 'matplotlib.colors.LightSource', 'LightSource', ([], {'azdeg': 'azdeg', 'altdeg': 'altdeg'}), '(azdeg=azdeg, altdeg=altdeg)\n', (19917, 19945), False, 'from matplotlib.colors import LightSource\n'), ((20174, 20230), 'numpy.array', 'np.array', (['[x[0] - dx, x[-1] + dx, y[-1] + dy, y[0] - dy]'], {}), '([x[0] - dx, x[-1] + dx, y[-1] + dy, y[0] - dy])\n', (20182, 20230), True, 'import numpy as np\n'), ((20293, 20302), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (20300, 20302), True, 'import matplotlib.pyplot as plt\n'), ((35810, 35837), 'matplotlib.pyplot.title', 'plt.title', (["('%s' % plot_name)"], {}), "('%s' % plot_name)\n", (35819, 35837), True, 'import matplotlib.pyplot as plt\n'), ((24348, 24385), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': 'default_fontsize'}), '(fontsize=default_fontsize)\n', (24358, 24385), True, 'import matplotlib.pyplot as plt\n'), ((24398, 24435), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': 'default_fontsize'}), '(fontsize=default_fontsize)\n', (24408, 24435), True, 'import matplotlib.pyplot as plt\n'), ((36921, 36940), 'matplotlib.pyplot.savefig', 'plt.savefig', (['output'], {}), '(output)\n', (36932, 36940), True, 'import matplotlib.pyplot as plt\n'), ((36953, 36962), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (36960, 36962), True, 'import matplotlib.pyplot as plt\n'), ((16433, 16442), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (16440, 16442), True, 'import matplotlib.pyplot as plt\n'), ((24858, 25003), 'mpl_toolkits.axes_grid1.inset_locator.inset_axes', 'inset_axes', (['ax1'], {'width': 'cbar_width', 'height': 'cbar_height', 'loc': 'cbar_loc', 'bbox_transform': 'ax1.transAxes', 'borderpad': '(0)', 'bbox_to_anchor': 'bbox_to_anchor'}), '(ax1, width=cbar_width, height=cbar_height, loc=cbar_loc,\n bbox_transform=ax1.transAxes, borderpad=0, bbox_to_anchor=bbox_to_anchor)\n', (24868, 25003), False, 'from mpl_toolkits.axes_grid1.inset_locator import inset_axes\n'), ((27507, 27599), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(grid.status_at_node == grid.BC_NODE_IS_CLOSED)', 'values_at_node_drape2'], {}), '(grid.status_at_node == grid.BC_NODE_IS_CLOSED,\n values_at_node_drape2)\n', (27525, 27599), True, 'import numpy as np\n'), ((36803, 36812), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (36810, 36812), True, 'import matplotlib.pyplot as plt\n'), ((36996, 37006), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (37004, 37006), True, 'import matplotlib.pyplot as plt\n'), ((19630, 19660), 'numpy.arange', 'np.arange', (['(values.shape[0] + 1)'], {}), '(values.shape[0] + 1)\n', (19639, 19660), True, 'import numpy as np\n'), ((19774, 19804), 'numpy.arange', 'np.arange', (['(values.shape[1] + 1)'], {}), '(values.shape[1] + 1)\n', (19783, 19804), True, 'import numpy as np\n'), ((22223, 22315), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(grid.status_at_node == grid.BC_NODE_IS_CLOSED)', 'values_at_node_drape1'], {}), '(grid.status_at_node == grid.BC_NODE_IS_CLOSED,\n values_at_node_drape1)\n', (22241, 22315), True, 'import numpy as np\n'), ((27776, 27855), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(values_at_node_drape2 < thres_drape2)', 'values_at_node_drape2'], {}), '(values_at_node_drape2 < thres_drape2, values_at_node_drape2)\n', (27794, 27855), True, 'import numpy as np\n'), ((29145, 29300), 'mpl_toolkits.axes_grid1.inset_locator.inset_axes', 'inset_axes', (['ax1'], {'width': 'cbar_width', 'height': 'cbar_height', 'loc': 'cbar_loc', 'bbox_to_anchor': '(-0.005, 0.25, 1, 1)', 'bbox_transform': 'ax1.transAxes', 'borderpad': '(0)'}), '(ax1, width=cbar_width, height=cbar_height, loc=cbar_loc,\n bbox_to_anchor=(-0.005, 0.25, 1, 1), bbox_transform=ax1.transAxes,\n borderpad=0)\n', (29155, 29300), False, 'from mpl_toolkits.axes_grid1.inset_locator import inset_axes\n'), ((29659, 29671), 'numpy.max', 'np.max', (['val1'], {}), '(val1)\n', (29665, 29671), True, 'import numpy as np\n'), ((29699, 29711), 'numpy.min', 'np.min', (['val1'], {}), '(val1)\n', (29705, 29711), True, 'import numpy as np\n'), ((31919, 32074), 'mpl_toolkits.axes_grid1.inset_locator.inset_axes', 'inset_axes', (['ax1'], {'width': 'cbar_width', 'height': 'cbar_height', 'loc': 'cbar_loc', 'bbox_to_anchor': '(-0.005, 0.15, 1, 1)', 'bbox_transform': 'ax1.transAxes', 'borderpad': '(0)'}), '(ax1, width=cbar_width, height=cbar_height, loc=cbar_loc,\n bbox_to_anchor=(-0.005, 0.15, 1, 1), bbox_transform=ax1.transAxes,\n borderpad=0)\n', (31929, 32074), False, 'from mpl_toolkits.axes_grid1.inset_locator import inset_axes\n'), ((32433, 32445), 'numpy.max', 'np.max', (['val2'], {}), '(val2)\n', (32439, 32445), True, 'import numpy as np\n'), ((32473, 32485), 'numpy.min', 'np.min', (['val2'], {}), '(val2)\n', (32479, 32485), True, 'import numpy as np\n'), ((22641, 22720), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(values_at_node_drape1 < thres_drape1)', 'values_at_node_drape1'], {}), '(values_at_node_drape1 < thres_drape1, values_at_node_drape1)\n', (22659, 22720), True, 'import numpy as np\n'), ((23912, 24012), 'matplotlib.pyplot.contour', 'plt.contour', (['(x[0:-1] * 0.001)', '(y[0:-1] * 0.001)', 'val1', 'contour_nb'], {'colors': '"""black"""', 'linewidths': '(0.2)'}), "(x[0:-1] * 0.001, y[0:-1] * 0.001, val1, contour_nb, colors=\n 'black', linewidths=0.2)\n", (23923, 24012), True, 'import matplotlib.pyplot as plt\n'), ((22510, 22554), 'numpy.any', 'np.any', (['(values_at_node_drape1 > thres_drape1)'], {}), '(values_at_node_drape1 > thres_drape1)\n', (22516, 22554), True, 'import numpy as np\n'), ((25593, 25628), 'numpy.round', 'np.round', (['(minV + 0.2 * cb_length)', '(1)'], {}), '(minV + 0.2 * cb_length, 1)\n', (25601, 25628), True, 'import numpy as np\n'), ((25658, 25693), 'numpy.round', 'np.round', (['(minV + 0.8 * cb_length)', '(1)'], {}), '(minV + 0.8 * cb_length, 1)\n', (25666, 25693), True, 'import numpy as np\n'), ((26017, 26052), 'numpy.round', 'np.round', (['(minV + 0.2 * cb_length)', '(0)'], {}), '(minV + 0.2 * cb_length, 0)\n', (26025, 26052), True, 'import numpy as np\n'), ((26082, 26117), 'numpy.round', 'np.round', (['(minV + 0.8 * cb_length)', '(0)'], {}), '(minV + 0.8 * cb_length, 0)\n', (26090, 26117), True, 'import numpy as np\n'), ((30058, 30093), 'numpy.round', 'np.round', (['(minV + 0.2 * cb_length)', '(1)'], {}), '(minV + 0.2 * cb_length, 1)\n', (30066, 30093), True, 'import numpy as np\n'), ((30127, 30162), 'numpy.round', 'np.round', (['(minV + 0.8 * cb_length)', '(1)'], {}), '(minV + 0.8 * cb_length, 1)\n', (30135, 30162), True, 'import numpy as np\n'), ((32833, 32868), 'numpy.round', 'np.round', (['(minV + 0.2 * cb_length)', '(1)'], {}), '(minV + 0.2 * cb_length, 1)\n', (32841, 32868), True, 'import numpy as np\n'), ((32902, 32937), 'numpy.round', 'np.round', (['(minV + 0.8 * cb_length)', '(1)'], {}), '(minV + 0.8 * cb_length, 1)\n', (32910, 32937), True, 'import numpy as np\n'), ((26429, 26469), 'numpy.round', 'np.round', (['(0.1 * (minV + 0.2 * cb_length))'], {}), '(0.1 * (minV + 0.2 * cb_length))\n', (26437, 26469), True, 'import numpy as np\n'), ((26504, 26544), 'numpy.round', 'np.round', (['(0.1 * (minV + 0.8 * cb_length))'], {}), '(0.1 * (minV + 0.8 * cb_length))\n', (26512, 26544), True, 'import numpy as np\n'), ((30526, 30561), 'numpy.round', 'np.round', (['(minV + 0.2 * cb_length)', '(0)'], {}), '(minV + 0.2 * cb_length, 0)\n', (30534, 30561), True, 'import numpy as np\n'), ((30595, 30630), 'numpy.round', 'np.round', (['(minV + 0.8 * cb_length)', '(0)'], {}), '(minV + 0.8 * cb_length, 0)\n', (30603, 30630), True, 'import numpy as np\n'), ((33302, 33337), 'numpy.round', 'np.round', (['(minV + 0.2 * cb_length)', '(0)'], {}), '(minV + 0.2 * cb_length, 0)\n', (33310, 33337), True, 'import numpy as np\n'), ((33371, 33406), 'numpy.round', 'np.round', (['(minV + 0.8 * cb_length)', '(0)'], {}), '(minV + 0.8 * cb_length, 0)\n', (33379, 33406), True, 'import numpy as np\n'), ((30982, 31022), 'numpy.round', 'np.round', (['(0.1 * (minV + 0.2 * cb_length))'], {}), '(0.1 * (minV + 0.2 * cb_length))\n', (30990, 31022), True, 'import numpy as np\n'), ((31061, 31101), 'numpy.round', 'np.round', (['(0.1 * (minV + 0.8 * cb_length))'], {}), '(0.1 * (minV + 0.8 * cb_length))\n', (31069, 31101), True, 'import numpy as np\n'), ((33759, 33799), 'numpy.round', 'np.round', (['(0.1 * (minV + 0.2 * cb_length))'], {}), '(0.1 * (minV + 0.2 * cb_length))\n', (33767, 33799), True, 'import numpy as np\n'), ((33838, 33878), 'numpy.round', 'np.round', (['(0.1 * (minV + 0.8 * cb_length))'], {}), '(0.1 * (minV + 0.8 * cb_length))\n', (33846, 33878), True, 'import numpy as np\n')] |
import numpy as np
import argparse
from project.midi_handler import midi2score, score2midi
from project.utils import padding, load_model, save_model, add_beat
from project.test import style_transfer
from project.model import lstm_wavenet
from project.train import train
def main():
# Arguments
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--phase',
help='phase: training or testing (default: %(default)s',
type=str, default='testing')
# arguments for testing
parser.add_argument('-d', '--dataset_path',
help='path to data set (default: %(default)s',
type=str, default='bach_dataset.pickle')
parser.add_argument('-e', '--epoch',
help='number of epoch(default: %(default)s',
type=int, default=80)
parser.add_argument('-n', '--steps',
help='number of step per epoch(default: %(default)s',
type=int, default=6000)
parser.add_argument('-b', '--batch_size_train',
help='batch size(default: %(default)s',
type=int, default=88*3)
parser.add_argument('-o', '--output_model_name',
help='name of the output model(default: %(default)s',
type=str, default="out")
# arguments for testing
parser.add_argument('-m', '--model_path',
help='path to existing model (default: %(default)s',
type=str, default='bach')
parser.add_argument('-i', '--input_file',
help='path to input file (default: %(default)s',
type=str, default="LiveAndLetDie_all.mid")
parser.add_argument('-ii', '--input_file_melody',
help='path to input melody file (default: %(default)s',
type=str, default="LiveAndLetDie_main.mid")
parser.add_argument('-s', '--subdivision',
help='subdivision within one beat (default: %(default)s',
type=int, default=4)
args = parser.parse_args()
print(args)
if(args.phase == "training"):
#set arguments
timesteps = 32
step = 4
subdivision = args.subdivision
batch_size = args.batch_size_train
dataset_path = args.dataset_path
#create model
model = lstm_wavenet(num_features_lr=91, timesteps=timesteps,
step=step, num_units_lstm=[150, 150, 150, 150],
num_dense=150,
conv_layers=5,
skip_layers=2)
model.compile(optimizer="adam", loss={'prediction': 'binary_crossentropy'}, metrics=['accuracy'])
#train
model = train(model,
dataset_path,
subdivision,
epoch=args.epoch,
steps=args.steps,
timesteps=timesteps,
step=step,
batch_size=batch_size)
#save model
save_model(model, args.output_model_name)
else:
#load input file
subdivision = args.subdivision
path = args.input_file
path_melody = args.input_file_melody
score = midi2score(path, subdivision)
if(path_melody == "none"):
score_melody = np.zeros(score.shape)
else:
score_melody = midi2score(path_melody, subdivision)
score = add_beat(score, subdivision)
score_melody = add_beat(score_melody, subdivision)
score = np.array(score[0:640])
score_melody = np.array(score_melody[0:640])
extended_score = padding(score, 32, 4)
#load model
model = load_model(model_name=args.model_path)
#generation
result = style_transfer(extended_score, score_melody, model, iter_num=25)
#save result
score2midi("test.mid", result, subdivision, 120, melody_constraint=True, melody=score_melody)
print("saved")
if __name__ == "__main__":
main() | [
"project.midi_handler.midi2score",
"project.utils.save_model",
"project.utils.add_beat",
"project.utils.padding",
"argparse.ArgumentParser",
"project.utils.load_model",
"project.train.train",
"numpy.array",
"project.model.lstm_wavenet",
"numpy.zeros",
"project.midi_handler.score2midi",
"projec... | [((313, 338), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (336, 338), False, 'import argparse\n'), ((2468, 2622), 'project.model.lstm_wavenet', 'lstm_wavenet', ([], {'num_features_lr': '(91)', 'timesteps': 'timesteps', 'step': 'step', 'num_units_lstm': '[150, 150, 150, 150]', 'num_dense': '(150)', 'conv_layers': '(5)', 'skip_layers': '(2)'}), '(num_features_lr=91, timesteps=timesteps, step=step,\n num_units_lstm=[150, 150, 150, 150], num_dense=150, conv_layers=5,\n skip_layers=2)\n', (2480, 2622), False, 'from project.model import lstm_wavenet\n'), ((2871, 3005), 'project.train.train', 'train', (['model', 'dataset_path', 'subdivision'], {'epoch': 'args.epoch', 'steps': 'args.steps', 'timesteps': 'timesteps', 'step': 'step', 'batch_size': 'batch_size'}), '(model, dataset_path, subdivision, epoch=args.epoch, steps=args.steps,\n timesteps=timesteps, step=step, batch_size=batch_size)\n', (2876, 3005), False, 'from project.train import train\n'), ((3185, 3226), 'project.utils.save_model', 'save_model', (['model', 'args.output_model_name'], {}), '(model, args.output_model_name)\n', (3195, 3226), False, 'from project.utils import padding, load_model, save_model, add_beat\n'), ((3395, 3424), 'project.midi_handler.midi2score', 'midi2score', (['path', 'subdivision'], {}), '(path, subdivision)\n', (3405, 3424), False, 'from project.midi_handler import midi2score, score2midi\n'), ((3605, 3633), 'project.utils.add_beat', 'add_beat', (['score', 'subdivision'], {}), '(score, subdivision)\n', (3613, 3633), False, 'from project.utils import padding, load_model, save_model, add_beat\n'), ((3657, 3692), 'project.utils.add_beat', 'add_beat', (['score_melody', 'subdivision'], {}), '(score_melody, subdivision)\n', (3665, 3692), False, 'from project.utils import padding, load_model, save_model, add_beat\n'), ((3710, 3732), 'numpy.array', 'np.array', (['score[0:640]'], {}), '(score[0:640])\n', (3718, 3732), True, 'import numpy as np\n'), ((3756, 3785), 'numpy.array', 'np.array', (['score_melody[0:640]'], {}), '(score_melody[0:640])\n', (3764, 3785), True, 'import numpy as np\n'), ((3812, 3833), 'project.utils.padding', 'padding', (['score', '(32)', '(4)'], {}), '(score, 32, 4)\n', (3819, 3833), False, 'from project.utils import padding, load_model, save_model, add_beat\n'), ((3872, 3910), 'project.utils.load_model', 'load_model', ([], {'model_name': 'args.model_path'}), '(model_name=args.model_path)\n', (3882, 3910), False, 'from project.utils import padding, load_model, save_model, add_beat\n'), ((3950, 4014), 'project.test.style_transfer', 'style_transfer', (['extended_score', 'score_melody', 'model'], {'iter_num': '(25)'}), '(extended_score, score_melody, model, iter_num=25)\n', (3964, 4014), False, 'from project.test import style_transfer\n'), ((4046, 4143), 'project.midi_handler.score2midi', 'score2midi', (['"""test.mid"""', 'result', 'subdivision', '(120)'], {'melody_constraint': '(True)', 'melody': 'score_melody'}), "('test.mid', result, subdivision, 120, melody_constraint=True,\n melody=score_melody)\n", (4056, 4143), False, 'from project.midi_handler import midi2score, score2midi\n'), ((3488, 3509), 'numpy.zeros', 'np.zeros', (['score.shape'], {}), '(score.shape)\n', (3496, 3509), True, 'import numpy as np\n'), ((3551, 3587), 'project.midi_handler.midi2score', 'midi2score', (['path_melody', 'subdivision'], {}), '(path_melody, subdivision)\n', (3561, 3587), False, 'from project.midi_handler import midi2score, score2midi\n')] |
"""Contains common utility functions."""
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import OrderedDict
from prettytable import PrettyTable
import distutils.util
import numpy as np
import six
def print_arguments(args):
"""Print argparse's arguments.
Usage:
.. code-block:: python
parser = argparse.ArgumentParser()
parser.add_argument("name", default="Jonh", type=str, help="User name.")
args = parser.parse_args()
print_arguments(args)
:param args: Input argparse.Namespace for printing.
:type args: argparse.Namespace
"""
print("----------- Configuration Arguments -----------")
for arg, value in sorted(six.iteritems(vars(args))):
print("%s: %s" % (arg, value))
print("------------------------------------------------")
def add_arguments(argname, type, default, help, argparser, **kwargs):
"""Add argparse's argument.
Usage:
.. code-block:: python
parser = argparse.ArgumentParser()
add_argument("name", str, "Jonh", "User name.", parser)
args = parser.parse_args()
"""
type = distutils.util.strtobool if type == bool else type
argparser.add_argument(
"--" + argname,
default=default,
type=type,
help=help + ' Default: %(default)s.',
**kwargs)
def summary(main_prog):
'''
It can summary model's PARAMS, FLOPs until now.
It support common operator like conv, fc, pool, relu, sigmoid, bn etc.
Args:
main_prog: main program
Returns:
print summary on terminal
'''
collected_ops_list = []
is_quantize = False
for one_b in main_prog.blocks:
block_vars = one_b.vars
for one_op in one_b.ops:
if str(one_op.type).find('quantize') > -1:
is_quantize = True
op_info = OrderedDict()
spf_res = _summary_model(block_vars, one_op)
if spf_res is None:
continue
# TODO: get the operator name
op_info['type'] = one_op.type
op_info['input_shape'] = spf_res[0][1:]
op_info['out_shape'] = spf_res[1][1:]
op_info['PARAMs'] = spf_res[2]
op_info['FLOPs'] = spf_res[3]
collected_ops_list.append(op_info)
summary_table, total = _format_summary(collected_ops_list)
_print_summary(summary_table, total)
return total, is_quantize
def _summary_model(block_vars, one_op):
'''
Compute operator's params and flops.
Args:
block_vars: all vars of one block
one_op: one operator to count
Returns:
in_data_shape: one operator's input data shape
out_data_shape: one operator's output data shape
params: one operator's PARAMs
flops: : one operator's FLOPs
'''
if one_op.type in ['conv2d', 'depthwise_conv2d']:
k_arg_shape = block_vars[one_op.input("Filter")[0]].shape
in_data_shape = block_vars[one_op.input("Input")[0]].shape
out_data_shape = block_vars[one_op.output("Output")[0]].shape
c_out, c_in, k_h, k_w = k_arg_shape
_, c_out_, h_out, w_out = out_data_shape
assert c_out == c_out_, 'shape error!'
k_groups = one_op.attr("groups")
kernel_ops = k_h * k_w * (c_in / k_groups)
bias_ops = 0 if one_op.input("Bias") == [] else 1
params = c_out * (kernel_ops + bias_ops)
flops = h_out * w_out * c_out * (kernel_ops + bias_ops)
# base nvidia paper, include mul and add
flops = 2 * flops
# var_name = block_vars[one_op.input("Filter")[0]].name
# if var_name.endswith('.int8'):
# flops /= 2.0
elif one_op.type == 'pool2d':
in_data_shape = block_vars[one_op.input("X")[0]].shape
out_data_shape = block_vars[one_op.output("Out")[0]].shape
_, c_out, h_out, w_out = out_data_shape
k_size = one_op.attr("ksize")
params = 0
flops = h_out * w_out * c_out * (k_size[0] * k_size[1])
elif one_op.type == 'mul':
k_arg_shape = block_vars[one_op.input("Y")[0]].shape
in_data_shape = block_vars[one_op.input("X")[0]].shape
out_data_shape = block_vars[one_op.output("Out")[0]].shape
# TODO: fc has mul ops
# add attr to mul op, tell us whether it belongs to 'fc'
# this's not the best way
if 'fc' not in one_op.output("Out")[0]:
return None
k_in, k_out = k_arg_shape
# bias in sum op
params = k_in * k_out + 1
flops = k_in * k_out
# var_name = block_vars[one_op.input("Y")[0]].name
# if var_name.endswith('.int8'):
# flops /= 2.0
elif one_op.type in ['sigmoid', 'tanh', 'relu', 'leaky_relu', 'prelu']:
in_data_shape = block_vars[one_op.input("X")[0]].shape
out_data_shape = block_vars[one_op.output("Out")[0]].shape
params = 0
if one_op.type == 'prelu':
params = 1
flops = 1
for one_dim in in_data_shape[1:]:
flops *= one_dim
elif one_op.type == 'batch_norm':
in_data_shape = block_vars[one_op.input("X")[0]].shape
out_data_shape = block_vars[one_op.output("Y")[0]].shape
_, c_in, h_out, w_out = in_data_shape
# gamma, beta
params = c_in * 2
# compute mean and std
flops = h_out * w_out * c_in * 2
else:
return None
return in_data_shape, out_data_shape, params, flops
def _format_summary(collected_ops_list):
'''
Format summary report.
Args:
collected_ops_list: the collected operator with summary
Returns:
summary_table: summary report format
total: sum param and flops
'''
summary_table = PrettyTable(
["No.", "TYPE", "INPUT", "OUTPUT", "PARAMs", "FLOPs"])
summary_table.align = 'r'
total = {}
total_params = []
total_flops = []
for i, one_op in enumerate(collected_ops_list):
# notice the order
table_row = [
i,
one_op['type'],
one_op['input_shape'],
one_op['out_shape'],
int(one_op['PARAMs']),
int(one_op['FLOPs']),
]
summary_table.add_row(table_row)
total_params.append(int(one_op['PARAMs']))
total_flops.append(int(one_op['FLOPs']))
total['params'] = total_params
total['flops'] = total_flops
return summary_table, total
def _print_summary(summary_table, total):
'''
Print all the summary on terminal.
Args:
summary_table: summary report format
total: sum param and flops
'''
parmas = total['params']
flops = total['flops']
print(summary_table)
print('Total PARAMs: {}({:.4f}M)'.format(
sum(parmas), sum(parmas) / (10 ** 6)))
print('Total FLOPs: {}({:.2f}G)'.format(sum(flops), sum(flops) / 10 ** 9))
print(
"Notice: \n now supported ops include [Conv, DepthwiseConv, FC(mul), BatchNorm, Pool, Activation(sigmoid, tanh, relu, leaky_relu, prelu)]"
)
def get_batch_dt_res(nmsed_out_v, data, contiguous_category_id_to_json_id, batch_size):
dts_res = []
lod = nmsed_out_v[0].lod()[0]
nmsed_out_v = np.array(nmsed_out_v[0])
real_batch_size = min(batch_size, len(data))
assert (len(lod) == real_batch_size + 1), \
"Error Lod Tensor offset dimension. Lod({}) vs. batch_size({})".format(len(lod), batch_size)
k = 0
for i in range(real_batch_size):
dt_num_this_img = lod[i + 1] - lod[i]
image_id = int(data[i][4][0])
image_width = int(data[i][4][1])
image_height = int(data[i][4][2])
for j in range(dt_num_this_img):
dt = nmsed_out_v[k]
k = k + 1
category_id, score, xmin, ymin, xmax, ymax = dt.tolist()
xmin = max(min(xmin, 1.0), 0.0) * image_width
ymin = max(min(ymin, 1.0), 0.0) * image_height
xmax = max(min(xmax, 1.0), 0.0) * image_width
ymax = max(min(ymax, 1.0), 0.0) * image_height
w = xmax - xmin
h = ymax - ymin
bbox = [xmin, ymin, w, h]
dt_res = {
'image_id': image_id,
'category_id': contiguous_category_id_to_json_id[category_id],
'bbox': bbox,
'score': score
}
dts_res.append(dt_res)
return dts_res
| [
"prettytable.PrettyTable",
"numpy.array",
"collections.OrderedDict"
] | [((6445, 6511), 'prettytable.PrettyTable', 'PrettyTable', (["['No.', 'TYPE', 'INPUT', 'OUTPUT', 'PARAMs', 'FLOPs']"], {}), "(['No.', 'TYPE', 'INPUT', 'OUTPUT', 'PARAMs', 'FLOPs'])\n", (6456, 6511), False, 'from prettytable import PrettyTable\n'), ((7909, 7933), 'numpy.array', 'np.array', (['nmsed_out_v[0]'], {}), '(nmsed_out_v[0])\n', (7917, 7933), True, 'import numpy as np\n'), ((2529, 2542), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2540, 2542), False, 'from collections import OrderedDict\n')] |
# Libraries
# Standard library
import _pickle as Pickle
import gzip
# Third-party libraries
import numpy as np
def load_data():
"""Return the MNIST data as a tuple containing the training data,
the validation data, and the test data."""
"""The ``training_data`` is returned as a tuple with two entries.
The first entry contains the actual training images. This is a
numpy ndarray with 50,000 entries. Each entry is, in turn, a
numpy ndarray with 784 values, representing the 28 * 28 = 784
pixels in a single MNIST image."""
"""The second entry in the ``training_data`` tuple is a numpy ndarray
containing 50,000 entries. Those entries are just the digit
values (0...9) for the corresponding images contained in the first
entry of the tuple."""
"""The ``validation_data`` and ``test_data`` are similar, except
each contains only 10,000 images."""
f = gzip.open('MNIST/data/mnist.pkl.gz', 'rb')
training_data, validation_data, test_data = Pickle.load(f, encoding='bytes'
)
f.close()
return (training_data, validation_data, test_data)
def load_data_wrapper():
"""Return a tuple containing 'training_data, validation_data,
test_data'. Based on 'load_data', but the format is more
convenient for use in our implementation of neural networks.
In particular, training_data is a list containing 50,000
tuples (x, y). x is a 784-dimensional numpy.ndarray
containing the input image. y is a 10-dimensional
numpy.ndarray representing the unit vector corresponding to the
correct digit for x.
validation_data and test_data are lists containing 10,000
tuples (x, y). In each case, x is a 784-dimensional
numpy.ndarry containing the input image, and y is the
corresponding classification, i.e., the digit values (integers)
corresponding to x.
Obviously, this means we're using slightly different formats for
the training data and the validation / test data. These formats
turn out to be the most convenient for use in our neural network
code."""
tr_d, va_d, te_d = load_data()
training_inputs = [np.reshape(x, (784, 1)) for x in tr_d[0]]
training_results = [vectorized_result(y) for y in tr_d[1]]
training_data = list(zip(training_inputs, training_results))
validation_inputs = [np.reshape(x, (784, 1)) for x in va_d[0]]
validation_data = list(zip(validation_inputs, va_d[1]))
test_inputs = [np.reshape(x, (784, 1)) for x in te_d[0]]
test_data = list(zip(test_inputs, te_d[1]))
return (training_data, validation_data, test_data)
def vectorized_result(j):
"""Return a 10-dimensional unit vector with a 1.0 in the jth
position and zeroes elsewhere. This is used to convert a digit
(0...9) into a corresponding desired output from the neural
network."""
e = np.zeros((10, 1))
e[j] = 1.0
return e
| [
"numpy.zeros",
"numpy.reshape",
"_pickle.load",
"gzip.open"
] | [((915, 957), 'gzip.open', 'gzip.open', (['"""MNIST/data/mnist.pkl.gz"""', '"""rb"""'], {}), "('MNIST/data/mnist.pkl.gz', 'rb')\n", (924, 957), False, 'import gzip\n'), ((1006, 1038), '_pickle.load', 'Pickle.load', (['f'], {'encoding': '"""bytes"""'}), "(f, encoding='bytes')\n", (1017, 1038), True, 'import _pickle as Pickle\n'), ((2911, 2928), 'numpy.zeros', 'np.zeros', (['(10, 1)'], {}), '((10, 1))\n', (2919, 2928), True, 'import numpy as np\n'), ((2201, 2224), 'numpy.reshape', 'np.reshape', (['x', '(784, 1)'], {}), '(x, (784, 1))\n', (2211, 2224), True, 'import numpy as np\n'), ((2396, 2419), 'numpy.reshape', 'np.reshape', (['x', '(784, 1)'], {}), '(x, (784, 1))\n', (2406, 2419), True, 'import numpy as np\n'), ((2517, 2540), 'numpy.reshape', 'np.reshape', (['x', '(784, 1)'], {}), '(x, (784, 1))\n', (2527, 2540), True, 'import numpy as np\n')] |
import numpy as np
from monty.json import MSONable
from datetime import datetime
import sys
import networkx as nx
from abc import ABC, abstractmethod
from propnet import ureg
from pint import DimensionalityError
from propnet.core.symbols import Symbol
from propnet.core.provenance import ProvenanceElement
from propnet.symbols import DEFAULT_SYMBOLS, DEFAULT_SYMBOL_VALUES
from propnet.core.exceptions import SymbolConstraintError
from typing import Union
import uuid
import copy
import logging
logger = logging.getLogger(__name__)
class BaseQuantity(ABC, MSONable):
"""
Base class for storing the value of a property.
Subclasses of BaseQuantity allow for different kind of information to be stored and interpreted.
Attributes:
symbol_type: (Symbol or str) the type of information that is represented
by the associated value. If a string, assigns a symbol from
the default symbols that has that string name
value: (id) the value associated with this symbol. This can be any object.
tags: (list<str>) tags associated with the quantity, typically
related to its origin, e. g. "DFT" or "ML" or "experiment"
provenance: (ProvenanceElement) provenance information of quantity origin
"""
def __init__(self, symbol_type, value, tags=None,
provenance=None):
"""
Parses inputs for constructing a BaseQuantity object.
Args:
symbol_type (Symbol or str): pointer to a Symbol
object in DEFAULT_SYMBOLS or string giving the name
of a Symbol object. Identifies the type of data
stored in the quantity.
value (id): value of the quantity.
tags (list<str>): list of strings storing metadata from
evaluation.
provenance (ProvenanceElement): provenance associated with the
object (e. g. inputs, model, see ProvenanceElement). If not specified,
a default object will be created. All objects will receive
the time created and the internal ID as fields 'source.date_created'
and 'source.source_key', respectively, if the fields are not already
written.
"""
if not isinstance(symbol_type, Symbol):
symbol_type = self.get_symbol_from_string(symbol_type)
if provenance and not isinstance(provenance, ProvenanceElement):
raise TypeError("Expected ProvenanceElement for provenance. "
"Instead received: {}".format(type(provenance)))
self._value = value
self._symbol_type = symbol_type
self._tags = []
if tags:
if isinstance(tags, str):
tags = [tags]
self._tags.extend(tags)
self._provenance = provenance
self._internal_id = uuid.uuid4().hex
if self._provenance is not None:
if not isinstance(self._provenance.source, dict):
self._provenance.source = {"source": self._provenance.source}
if 'date_created' not in self._provenance.source.keys() or \
self._provenance.source['date_created'] in (None, ""):
self._provenance.source['date_created'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if 'source_key' not in self._provenance.source.keys() or \
self._provenance.source['source_key'] in (None, ""):
self._provenance.source['source_key'] = self._internal_id
else:
self._provenance = ProvenanceElement(source={"source": None,
"source_key": self._internal_id,
"date_created": datetime.now().strftime("%Y-%m-%d %H:%M:%S")})
@staticmethod
def get_symbol_from_string(name):
"""
Looks up Symbol from name in DEFAULT_SYMBOLS registry.
Args:
name: (str) the name of the Symbol object
Returns: (Symbol) the Symbol object associated with the name
"""
# Invoke default symbol if symbol is a string
if not isinstance(name, str):
raise TypeError("Expected str, encountered {}".format(type(name)))
if name not in DEFAULT_SYMBOLS.keys():
raise ValueError("Symbol type {} not recognized".format(name))
return DEFAULT_SYMBOLS[name]
@property
@abstractmethod
def magnitude(self):
"""
Returns the value of a quantity without any units.
Should be implemented for numerical subclasses. Otherwise call self.value.
Returns:
(id): value without units (if numerical), otherwise just the value
"""
pass
@property
def symbol(self):
"""
Returns the Symbol object associated with the quantity.
Returns:
(Symbol): Symbol of the BaseQuantity
"""
return self._symbol_type
@property
def tags(self):
"""
Returns the list of tags.
Returns:
(list<str>): tags of the BaseQuantity
"""
return self._tags
@property
def provenance(self):
"""
Returns the object containing the provenance information for the quantity
Returns:
(ProvenanceElement): Provenance object for the quantity
"""
return self._provenance
@property
def value(self):
"""
Returns a copy of the value object stored in the quantity.
Returns:
(id): copy of value object stored in quantity
"""
# This returns a deep copy of the object holding the value
# in case it is a class instance and the user manipulates
# the object. This is particularly problematic if a user
# calls np.isclose(x.value, y.value) and x and/or y contain
# pint Quantities. pint automatically converts the magnitudes
# into ndarrays, even for scalars, which breaks the careful
# type controlling we do for NumQuantity.
# If this is problematic for large ndarrays or pymatgen objects,
# for example, then we can revisit this decision to copy.
return copy.deepcopy(self._value)
@property
@abstractmethod
def units(self):
"""
Returns the units of the quantity.
Should be implemented for numerical subclasses. Otherwise return None.
Returns:
(pint.unit): units associated with the value
"""
pass
@property
@abstractmethod
def uncertainty(self):
"""
Returns the pint object holding the uncertainty of a quantity.
Should be implemented for numerical subclasses. Otherwise return None.
Returns:
(pint.Quantity): copy of uncertainty object stored in quantity
"""
pass
@abstractmethod
def pretty_string(self, **kwargs):
"""
Returns a string representing the value of the object in a pretty format.
Returns:
(str): text string representing the value of an object
"""
pass
def is_cyclic(self, visited=None):
"""
Algorithm for determining if there are any cycles in
the provenance tree, i. e. a repeated quantity in a
tree branch
Args:
visited (list of visited model/symbols in the built tree
that allows for recursion
Returns:
(bool) whether or not there is a cycle in the quantity
provenance, i. e. repeated value in a tree branch
"""
if visited is None:
visited = set()
if self.symbol in visited:
return True
visited.add(self.symbol)
if self.provenance is None:
return False
# add distinct model hash to distinguish properties from models,
# e.g. pugh ratio
model_hash = "model_{}".format(self.provenance.model)
if model_hash in visited:
return True
visited.add(model_hash)
for p_input in self.provenance.inputs or []:
this_visited = visited.copy()
if p_input.is_cyclic(this_visited):
return True
return False
def get_provenance_graph(self, start=None, filter_long_labels=True):
"""
Gets an nxgraph object corresponding to the provenance graph
Args:
start (nxgraph): starting graph to build from
filter_long_labels (bool): true truncates long labels to just the symbol name
Returns:
(nxgraph): graph representation of provenance
"""
graph = start or nx.MultiDiGraph()
label = "{}: {}".format(self.symbol.name, self.pretty_string())
if filter_long_labels and len(label) > 30:
label = "{}".format(self.symbol.name)
graph.add_node(
self, fillcolor="#43A1F8", fontcolor='white', label=label)
model = getattr(self.provenance, 'model', None)
source = getattr(self.provenance, 'source', None)
if model is not None:
model = "Model: {}".format(model)
graph.add_node(model, label=model, fillcolor='orange',
fontcolor='white', shape='rectangle')
graph.add_edge(model, self)
for model_input in self.provenance.inputs:
graph = model_input.get_provenance_graph(start=graph)
graph.add_edge(model_input, model)
elif source is not None:
source = "Source: {}".format(source['source'])
graph.add_edge(source, self)
return graph
def draw_provenance_graph(self, filename, prog='dot', **kwargs):
"""
Outputs the provenance graph for this quantity to a file.
Args:
filename: (str) filename for output
prog: (str) pygraphviz layout method for drawing the graph
**kwargs: args to pygraphviz.AGraph.draw() method
"""
nx_graph = self.get_provenance_graph()
a_graph = nx.nx_agraph.to_agraph(nx_graph)
a_graph.node_attr['style'] = 'filled'
a_graph.draw(filename, prog=prog, **kwargs)
def as_dict(self):
"""
Serializes object as a dictionary. Object can be reconstructed with from_dict().
Returns:
(dict): representation of object as a dictionary
"""
symbol = self._symbol_type
if symbol.name in DEFAULT_SYMBOLS.keys() and symbol == DEFAULT_SYMBOLS[symbol.name]:
symbol = self._symbol_type.name
else:
symbol = symbol.as_dict()
return {"symbol_type": symbol,
"provenance": self.provenance.as_dict() if self.provenance else None,
"tags": self.tags}
@abstractmethod
def contains_nan_value(self):
"""
Determines if value contains a NaN (not a number) value.
Should be implemented for numerical subclasses. Otherwise return False.
Returns:
(bool): True if value contains at least one NaN value.
"""
pass
@abstractmethod
def contains_complex_type(self):
"""
Determines if value contains one or more complex-type values based on variable type.
Should be implemented for numerical subclasses. Otherwise return False.
Returns:
(bool): True if value contains at least one complex-type value.
"""
pass
@abstractmethod
def contains_imaginary_value(self):
"""
Determines if value has a non-zero imaginary component. Differs from
contains_complex_type() in that it checks the imaginary component's value.
If zero or very small, returns True.
Should be implemented for numerical subclasses. Otherwise return False.
Returns:
(bool): True if value contains at least one value with a non-zero imaginary component.
"""
pass
@abstractmethod
def has_eq_value_to(self, rhs):
"""
Determines if the current quantity's value is equal to that of another quantity.
This ignores provenance of the quantity and compares the values only.
Args:
rhs: (BaseQuantity) the quantity to which the current object will be compared
Returns: (bool): True if the values are found to be equal (or equivalent)
"""
pass
def __hash__(self):
"""
Hash function for this class.
Note: the hash function for this class does not hash the value,
so it cannot alone determine equality.
Returns: (int) hash value
"""
hash_value = hash(self.symbol.name) ^ hash(self.provenance)
if self.tags:
# Sorting to ensure it is deterministic
sorted_tags = self.tags.copy()
sorted_tags.sort()
for tag in sorted_tags:
hash_value = hash_value ^ hash(tag)
return hash_value
def __str__(self):
return "<{}, {}, {}>".format(self.symbol.name, self.value, self.tags)
def __repr__(self):
return self.__str__()
def __bool__(self):
return bool(self.value)
def __eq__(self, other):
"""
Determines equality of common components of BaseQuantity-derived objects.
Note: Does not check for equivalence of value. Derived classes should
override this method to determine equivalence of values.
Note: __eq__() does not provide comparisons to other types, but does support
implied comparisons by returning NotImplemented for other types.
Args:
other: (BaseQuantity-derived type) object for value comparison
Returns: (bool) True if the symbol, tags, and provenance are equal.
"""
return self.symbol == other.symbol and \
self.tags == other.tags and \
self.provenance == other.provenance
class NumQuantity(BaseQuantity):
"""
Class extending BaseQuantity for storing numerical values, scalar and non-scalar.
Allowed scalar types: int, float, complex, np.integer, np.floating, np.complexfloating
Allowed array types: list, np.array
Note: Array types must contain only allowed scalar types. Scalars with numpy types will
be converted to python-native types.
Types shown below are how the objects are stored. See __init__() for initialization.
Attributes:
symbol_type: (Symbol) the type of information that is represented
by the associated value
value: (pint.Quantity) the value of the property wrapped in a pint quantity
for unit handling
units: (pint.unit) units of the object
tags: (list<str>) tags associated with the quantity, typically
related to its origin, e. g. "DFT" or "ML" or "experiment"
provenance: (ProvenanceElement) provenance associated with the
object. See BaseQuantity.__init__() for more info.
uncertainty: (pint.Quantity) uncertainty associated with the value stored in the same units
"""
# Allowed types
_ACCEPTABLE_SCALAR_TYPES = (int, float, complex)
_ACCEPTABLE_ARRAY_TYPES = (list, np.ndarray)
_ACCEPTABLE_DTYPES = (np.integer, np.floating, np.complexfloating)
_ACCEPTABLE_TYPES = _ACCEPTABLE_ARRAY_TYPES + _ACCEPTABLE_SCALAR_TYPES + _ACCEPTABLE_DTYPES + (ureg.Quantity,)
# This must be checked for explicitly because bool is a subtype of int
# and isinstance(True/False, int) returns true
_UNACCEPTABLE_TYPES = (bool,)
def __init__(self, symbol_type, value, units=None, tags=None,
provenance=None, uncertainty=None):
"""
Instantiates an instance of the NumQuantity class.
Args:
symbol_type: (Symbol or str) the type of information that is represented
by the associated value. If a string, assigns a symbol from
the default symbols that has that string name
value: (int, float, complex, np.integer, np.floating, np.complexfloating,
list, np.ndarray, pint.Quantity) the value of the property
units: (str, tuple, list) desired units of the quantity. If value is a
pint.Quantity, the value will be converted to these units. Input can
be any acceptable unit format for pint.Quantity.
tags: (list<str>) tags associated with the quantity, typically
related to its origin, e. g. "DFT" or "ML" or "experiment"
provenance: (ProvenanceElement) provenance associated with the
object. See BaseQuantity.__init__() for more info.
uncertainty: (int, float, complex, np.integer, np.floating, np.complexfloating,
list, np.ndarray, pint.Quantity, tuple, NumQuantity) uncertainty
associated with the value stored in the same units. pint.Quantity,
tuple, and NumQuantity types will be converted to the units
specified in 'units'. Other types will be assumed to be in the
specified units.
"""
# TODO: Test value on the shape dictated by symbol
if isinstance(symbol_type, str):
symbol_type = BaseQuantity.get_symbol_from_string(symbol_type)
# Set default units if not supplied
if not units:
logger.warning("No units supplied, assuming default units from symbol.")
units = units or symbol_type.units
if isinstance(value, self._ACCEPTABLE_DTYPES):
value_in = ureg.Quantity(np.asscalar(value), units)
elif isinstance(value, ureg.Quantity):
value_in = value.to(units)
elif self.is_acceptable_type(value):
value_in = ureg.Quantity(value, units)
else:
raise TypeError('Invalid type passed to constructor for value:'
' {}'.format(type(value)))
super(NumQuantity, self).__init__(symbol_type, value_in,
tags=tags, provenance=provenance)
if uncertainty is not None:
if isinstance(uncertainty, self._ACCEPTABLE_DTYPES):
self._uncertainty = ureg.Quantity(np.asscalar(uncertainty), units)
elif isinstance(uncertainty, ureg.Quantity):
self._uncertainty = uncertainty.to(units)
elif isinstance(uncertainty, NumQuantity):
self._uncertainty = uncertainty._value.to(units)
elif isinstance(uncertainty, tuple):
self._uncertainty = ureg.Quantity.from_tuple(uncertainty).to(units)
elif self.is_acceptable_type(uncertainty):
self._uncertainty = ureg.Quantity(uncertainty, units)
else:
raise TypeError('Invalid type passed to constructor for uncertainty:'
' {}'.format(type(uncertainty)))
else:
self._uncertainty = None
# TODO: Symbol-level constraints are hacked together atm,
# constraints as a whole need to be refactored and
# put into a separate module. They also are only
# available for numerical symbols because it uses
# sympy to evaluate the constraints. Would be better
# to make some class for symbol and/or model constraints
if symbol_type.constraint is not None:
if not symbol_type.constraint(**{symbol_type.name: self.magnitude}):
raise SymbolConstraintError(
"NumQuantity with {} value does not satisfy {}".format(
value, symbol_type.constraint))
@staticmethod
def _is_acceptable_dtype(this_dtype):
"""
This function checks a dtype against the allowed dtypes for this class.
Args:
this_dtype: (numpy.dtype) the dtype to check
Returns: True if this_dtype is a sub-dtype of the acceptable dtypes.
"""
return any(np.issubdtype(this_dtype, dt) for dt in NumQuantity._ACCEPTABLE_DTYPES)
def to(self, units):
"""
Method to convert quantities between units, a la pint
Args:
units: (tuple or str) units to convert quantity to
Returns:
"""
# Calling deepcopy() instead of ctor preserves internal_id
# while returning a new object (as is desired?)
q = copy.deepcopy(self)
q._value = q._value.to(units)
if q._uncertainty is not None:
q._uncertainty = q._uncertainty.to(units)
return q
@classmethod
def from_weighted_mean(cls, quantities):
"""
Function to invoke weighted mean quantity from other
quantities
Args:
quantities ([NumQuantity]): list of quantities of the same type
Returns: (NumQuantity) a quantity containing the weighted mean and
standard deviation.
"""
if not all(isinstance(q, cls) for q in quantities):
raise ValueError("Weighted mean cannot be applied to non-NumQuantity objects")
input_symbol = quantities[0].symbol
if not all(input_symbol == q.symbol for q in quantities):
raise ValueError("Can only calculate a weighted mean if "
"all quantities refer to the same symbol.")
# TODO: an actual weighted mean; just a simple mean at present
# TODO: support propagation of uncertainties (this will only work
# once at present)
# # TODO: test this with units, not magnitudes ... remember units
# # may not be canonical units(?)
# if isinstance(quantities[0].value, list):
# # hack to get arrays working for now
# vals = [q.value for q in quantities]
# else:
# vals = [q.value.magnitude for q in quantities]
vals = [q.value for q in quantities]
# Explicit formulas for mean / standard dev for pint support
new_value = sum(vals) / len(vals)
std_dev = (sum([(v - new_value) ** 2 for v in vals]) / len(vals)) ** (1 / 2)
# Accumulate provenance and tags for new quantities
new_tags = set()
new_provenance = ProvenanceElement(model='aggregation', inputs=[])
for quantity in quantities:
if quantity.tags:
for tag in quantity.tags:
new_tags.add(tag)
new_provenance.inputs.append(quantity)
return cls(symbol_type=input_symbol, value=new_value,
tags=list(new_tags), provenance=new_provenance,
uncertainty=std_dev)
@property
def magnitude(self):
"""
Returns the value of a quantity without any units.
Returns:
(int, float, complex, np.ndarray): value without units
"""
return self._value.magnitude
@property
def units(self):
"""
Returns the units of the quantity.
Returns:
(pint.unit): units associated with the value
"""
return self._value.units
@property
def uncertainty(self):
"""
Returns the pint object holding the uncertainty of a quantity.
Returns:
(pint.Quantity): copy of uncertainty object stored in quantity
"""
# See note on BaseQuantity.value about why this is a deep copy
return copy.deepcopy(self._uncertainty)
@staticmethod
def is_acceptable_type(to_check):
"""
Checks object to ensure it contains only numerical types, including numpy types.
Works with nested lists.
Args:
to_check: (list) list of data to be checked
Returns: (bool): true if all data contained in the list is numerical (float, int, complex)
"""
def recursive_list_type_check(l):
nested_lists = [v for v in l if isinstance(v, list)]
np_arrays = [v for v in l if isinstance(v, np.ndarray)]
ureg_quantities = [v for v in l if isinstance(v, ureg.Quantity)]
regular_data = [v for v in l if not isinstance(v, (list, np.ndarray))]
regular_data_is_type = all([isinstance(v, NumQuantity._ACCEPTABLE_TYPES) and not
isinstance(v, NumQuantity._UNACCEPTABLE_TYPES)
for v in regular_data])
# If nested_lists is empty, all() returns true automatically
nested_lists_is_type = all(recursive_list_type_check(v) for v in nested_lists)
np_arrays_is_type = all(NumQuantity._is_acceptable_dtype(v.dtype)
for v in np_arrays)
ureg_quantities_is_type = all(recursive_list_type_check([v.magnitude])
for v in ureg_quantities)
return regular_data_is_type and nested_lists_is_type \
and np_arrays_is_type and ureg_quantities_is_type
return recursive_list_type_check([to_check])
def pretty_string(self, **kwargs):
"""
Returns a string representing the value of the object in a pretty format with units.
Note: units are omitted for non-scalar properties.
Keyword Args:
sigfigs: (int) how many significant figures to include. default: 4
Returns:
(str): text string representing the value of an object
"""
# TODO: maybe support a rounding kwarg?
if 'sigfigs' in kwargs.keys():
sigfigs = kwargs['sigfigs']
else:
sigfigs = 4
if isinstance(self.magnitude, self._ACCEPTABLE_SCALAR_TYPES):
out = "{1:.{0}g}".format(sigfigs, self.magnitude)
if self.uncertainty:
out += "\u00B1{0:.4g}".format(self.uncertainty.magnitude)
else:
out = "{}".format(self.magnitude)
if self.units and str(self.units) != 'dimensionless':
# The format str is specific to pint units. ~ invokes abbreviations, P is "pretty" format
out += " {:~P}".format(self.units)
return out
def contains_nan_value(self):
"""
Determines if the value of the object contains a NaN value if the
object holds numerical data.
Returns:
(bool) true if the quantity is numerical and contains one
or more NaN values. false if the quantity is numerical and
does not contain any NaN values OR if the quantity does not
store numerical information
"""
return np.any(np.isnan(self.magnitude))
def contains_complex_type(self):
"""
Determines if the type of the variable holding the object's magnitude is complex, if the
object holds numerical data.
Returns:
(bool) true if the quantity is numerical and holds a complex scalar or array type as its value.
false if the quantity is numerical and holds only real values OR if the quantity does not
store numerical information
"""
return self.is_complex_type(self.magnitude)
@staticmethod
def is_complex_type(value):
"""
Determines if the type of the argument is complex. If the argument is non-scalar, it determines
if the ndarray type contains complex data types.
Returns:
(bool) true if the argument holds a complex scalar or np.array.
"""
if isinstance(value, np.ndarray):
return np.issubdtype(value.dtype, np.complexfloating)
elif isinstance(value, BaseQuantity):
return value.contains_complex_type()
elif isinstance(value, ureg.Quantity):
return NumQuantity.is_complex_type(value.magnitude)
return isinstance(value, complex)
def contains_imaginary_value(self):
"""
Determines if the value of the object contains a non-zero imaginary
value if the object holds numerical data.
Note this function returns false if the values are of complex type,
but the imaginary portions are (approximately) zero. To assess the
type as complex, use is_complex_type().
Returns:
(bool) true if the quantity is numerical and contains one
or more non-zero imaginary values. false if the quantity is
numerical and all imaginary values are zero OR if the quantity does not
store numerical information.
"""
if self.contains_complex_type():
# Calling as static methods allows for evaluation of both scalars and arrays
return not np.all(np.isclose(np.imag(self.magnitude), 0))
return False
def as_dict(self):
"""
Serializes object as a dictionary. Object can be reconstructed with from_dict().
Returns:
(dict): representation of object as a dictionary
"""
d = super(NumQuantity, self).as_dict()
d.update({"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"value": self.magnitude,
"units": self.units.format_babel(),
"uncertainty": self.uncertainty.to_tuple() if self.uncertainty else None})
return d
def __eq__(self, other):
"""
Determines if another NumQuantity object is equivalent to this object.
Equivalence is defined as having the same symbol name, tags, provenance, and
equal (within tolerance) value and uncertainty in the default units of the symbol.
Note: __eq__() does not provide comparisons to other types, but does support
implied comparisons by returning NotImplemented for other types.
Args:
other: (NumQuantity) the object to compare to
Returns: (bool) True if the objects are equivalent
"""
# Use has_eq_value_to() to compare only values.
if not isinstance(other, NumQuantity):
return NotImplemented
if not self.uncertainty and not other.uncertainty:
uncertainty_is_close = True
elif self.uncertainty and other.uncertainty:
uncertainty_is_close = self.values_close_in_units(self.uncertainty,
other.uncertainty,
units_for_comparison=self.uncertainty.units)
else:
return False
value_is_close = self.values_close_in_units(self.value, other.value,
units_for_comparison=self.symbol.units)
return \
super().__eq__(other) and \
uncertainty_is_close and \
value_is_close
def has_eq_value_to(self, rhs):
"""
Determines if the current quantity's value is equivalent to that of another quantity.
This ignores provenance of the quantity and compares the values only.
Equivalence is defined as having the same numerical value in the units defined by the
quantities' symbol, within an absolute tolerance of 1e-8 and relative tolerance of 1e-5.
Args:
rhs: (NumQuantity) the quantity to which the current object will be compared
Returns: (bool): True if the values are found to be equivalent
"""
if not isinstance(rhs, type(self)):
raise TypeError("This method requires two {} objects".format(type(self).__name__))
return self.values_close_in_units(self.value, rhs.value,
units_for_comparison=self.symbol.units)
@staticmethod
def values_close_in_units(lhs, rhs, units_for_comparison=None):
"""
Compares two pint quantities in a given unit. The purpose is to
ensure dimensional, small quantities (e.g. femtoseconds) don't
get discounted as small, close-to-zero quantities.
If units are not specified explicitly, they are selected using the
following criteria, in order of precedence:
1. If one quantity has a value of exactly 0, the units of that quantity
are used for comparison.
2. The units of both quantities are rescaled such that the magnitude
of each quantity is between 1 and 1000, or where the unit is at the
smallest (or largest) possible unit defined by pint. The smaller of
the two units is then used to compare the values (i.e. gram would be
selected over kilogram).
Note: dimensionless quantities will NOT be scaled and will be treated
as bare numbers. This means dimensionless values that are small,
but different will be treated as equal if abs(a-b) <= 1e-8, e.g.
1e-8 and 2e-8 will yield True, as will 1e-8 and 1e-20.
Args:
lhs: (pint.Quantity) quantity object to compare
rhs: (pint.Quantity) quantity object to compare
units_for_comparison: (str, pint.Units, tuple) units that the
quantities will be compared in. Input can be any acceptable
format for Quantity.to()
Returns: (bool) True if the values are equal within an absolute tolerance
of 1e-8 and a relative tolerance of 1e-5. False if not equal within
the tolerance bounds, or the dimensionality of the units are not equal.
"""
if not (isinstance(lhs, ureg.Quantity) and isinstance(rhs, ureg.Quantity)):
raise TypeError("This method requires two pint Quantity objects")
if lhs.units.dimensionality != rhs.units.dimensionality:
return False
if not units_for_comparison:
if not isinstance(lhs.magnitude, np.ndarray):
if lhs.magnitude == 0 and rhs.magnitude == 0:
return True
elif lhs.magnitude == 0:
# Compare using the units of whatever the zero value is
units_for_comparison = lhs.units
elif rhs.magnitude == 0:
units_for_comparison = rhs.units
else:
# Select smallest unit that brings values close to 1
# Add a +1 buffer so that instead of 999.99999... micrograms
# we get 1 milligram instead.
lhs_compact = lhs.to_compact()
lhs_compact_units = (lhs_compact + 1 * lhs_compact.units).to_compact().units
rhs_compact = rhs.to_compact()
rhs_compact_units = (rhs_compact + 1 * rhs_compact.units).to_compact().units
if 1 * lhs_compact_units < 1 * rhs_compact_units:
units_for_comparison = lhs_compact_units
else:
units_for_comparison = rhs_compact_units
else:
try:
if 1 * lhs.units < 1 * rhs.units:
units_for_comparison = lhs.units
else:
units_for_comparison = rhs.units
except DimensionalityError:
return False
try:
lhs_convert = lhs.to(units_for_comparison)
rhs_convert = rhs.to(units_for_comparison)
except DimensionalityError:
return False
return np.allclose(lhs_convert, rhs_convert)
def __hash__(self):
"""
Hash function for this class.
Note: the hash function for this class does not hash the value,
so it cannot alone determine equality.
Returns: (int) hash value
"""
return super().__hash__()
class ObjQuantity(BaseQuantity):
"""
Class extending BaseQuantity for storing any value type that does not require units.
Types shown below are how the objects are stored. See __init__() for initialization.
Attributes:
symbol_type: (Symbol) the type of information that is represented
by the associated value
value: (id) the value of the property
tags: (list<str>) tags associated with the quantity, typically
related to its origin, e. g. "DFT" or "ML" or "experiment"
provenance: (ProvenanceElement) provenance associated with the
object. See BaseQuantity.__init__() for more info.
"""
def __init__(self, symbol_type, value, tags=None,
provenance=None):
"""
Instantiates an instance of the ObjQuantity class.
Args:
symbol_type: (Symbol or str) the type of information that is represented
by the associated value. If a string, assigns a symbol from
the default symbols that has that string name
value: (id) the value of the property, can be any type except None.
Ideally, numerical values should be stored in NumQuantity objects,
because ObjQuantity does not support units.
tags: (list<str>) tags associated with the quantity, typically
related to its origin, e. g. "DFT" or "ML" or "experiment"
provenance: (ProvenanceElement) provenance associated with the
object. See BaseQuantity.__init__() for more info.
"""
if value is None:
raise ValueError("ObjQuantity must hold a non-NoneType object for its value.")
if isinstance(symbol_type, str):
symbol_type = super().get_symbol_from_string(symbol_type)
if not symbol_type.is_correct_object_type(value):
old_type = type(value)
target_module = symbol_type.object_module
target_class = symbol_type.object_class
if target_module in sys.modules and \
hasattr(sys.modules[target_module], target_class):
try:
cls_ = getattr(sys.modules[target_module], target_class)
value = cls_(value)
except (TypeError, ValueError):
raise TypeError("Mismatch in type of value ({}) and type specified "
"by '{}' object symbol ({}).\nTypecasting failed."
"".format(old_type.__name__,
symbol_type.name,
symbol_type.object_class))
else:
# Do not try to import the module for security reasons.
# We don't want malicious modules to be automatically imported.
raise NameError("Mismatch in type of value ({}) and type specified "
"by '{}' object symbol ({}).\nCannot typecast because "
"'{}' is not imported or does not exist."
"".format(old_type.__name__,
symbol_type.name,
symbol_type.object_class,
symbol_type.object_type))
logger.warning("WARNING: Mismatch in type of value ({}) "
"and type specified by '{}' object symbol ({}). "
"Value cast as '{}'.".format(old_type.__name__,
symbol_type.name,
symbol_type.object_class,
symbol_type.object_type))
super(ObjQuantity, self).__init__(symbol_type, value, tags=tags, provenance=provenance)
@property
def magnitude(self):
"""
Returns the value of the quantity. Same as self.value.
Returns:
(id): value contained by quantity
"""
return self._value
@property
def units(self):
"""
Returns None because this class does not support units.
Returns:
None
"""
return None
@property
def uncertainty(self):
"""
Returns None because this class does not support uncertainty.
Returns:
None
"""
return None
def pretty_string(self, **kwargs):
"""
Returns a string representing the value of the object in a pretty format.
Returns:
(str): text string representing the value of an object
"""
return "{}".format(self.value)
# TODO: Determine whether it's necessary to define these for ObjQuantity
# we could just assess this if models return NumQuantity
def contains_nan_value(self):
"""
Returns False because this class does not support numerical types.
Returns:
(bool): False
"""
return False
def contains_complex_type(self):
"""
Returns False because this class does not support numerical types.
Returns:
(bool): False
"""
return False
def contains_imaginary_value(self):
"""
Returns False because this class does not support numerical types.
Returns:
(bool): False
"""
return False
def has_eq_value_to(self, rhs):
"""
Determines if the value of another ObjQuantity is equivalent to the current.
Equivalence is defined by the default __eq__() method for the object held in value.
Args:
rhs: (ObjQuantity) object for value comparison
Returns: (bool) True if the values are equal.
"""
if not isinstance(rhs, type(self)):
raise TypeError("This method requires two {} objects".format(type(self).__name__))
return self.value == rhs.value
def as_dict(self):
"""
Serializes object as a dictionary. Object can be reconstructed with from_dict().
Note: If value is not JSON serializable, this object will not be JSON serializable.
Returns:
(dict): representation of object as a dictionary
"""
d = super().as_dict()
d.update({"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"value": self.value})
return d
def __eq__(self, other):
"""
Determines if the value of another ObjQuantity is equivalent to the current.
Equivalence is defined by equivalence of symbol name, tags, provenance, and
value as indicated by the default __eq__() method for the object held
in value.
Note: __eq__() does not provide comparisons to other types, but does support
implied comparisons by returning NotImplemented for other types.
Args:
other: (ObjQuantity) object for value comparison
Returns: (bool) True if the objects are equal.
"""
# Use has_eq_value_to() to compare only values.
if not isinstance(other, ObjQuantity):
return False
return super().__eq__(other) and self.value == other.value
def __hash__(self):
"""
Hash function for this class.
Note: the hash function for this class does not hash the value,
so it cannot alone determine equality.
Returns: (int) hash value
"""
return super().__hash__()
class QuantityFactory(object):
"""
Helper class to construct BaseQuantity-derived objects using factory methods.
Use create_quantity() to generate objects on the fly depending on the value type.
"""
@staticmethod
def create_quantity(symbol_type, value, units=None, tags=None,
provenance=None, uncertainty=None):
"""
Factory method for BaseQuantity class, provides more intuitive call
to create new BaseQuantity child objects and provide backwards
compatibility. If BaseQuantity is passed in as value, a new object
will be created. Use BaseQuantity.to_quantity() to return the same object.
Args:
symbol_type: (str or Symbol) represents the type of data being stored
value: (id) data to be stored
units: (str, ureg.Unit) units of the data being stored. Must be None
for non-numerical values
tags: (list<str>) list of strings storing metadata from
Quantity evaluation.
provenance: (ProvenanceElement) provenance associated with the
object (e. g. inputs, model, see ProvenanceElement)
uncertainty: (id) uncertainty of the specified value
Returns: (NumQuantity or ObjQuantity) constructed object of appropriate
type based on value's type
"""
if value is None:
raise ValueError("Cannot initialize a BaseQuantity object with a value of None.")
if isinstance(value, BaseQuantity):
units = units or value.units
tags = tags or value.tags
provenance = provenance or value.provenance
uncertainty = uncertainty or value.uncertainty
value = value.value
# TODO: This sort of thing probably indicates Symbol objects need to be split
# into different categories, like Quantity has been
if not isinstance(symbol_type, Symbol):
symbol_type = BaseQuantity.get_symbol_from_string(symbol_type)
symbol_is_object = symbol_type.category == 'object'
if not symbol_is_object:
if NumQuantity.is_acceptable_type(value):
return NumQuantity(symbol_type, value,
units=units, tags=tags,
provenance=provenance,
uncertainty=uncertainty)
else:
raise TypeError("Cannot initialize a {}-type symbol with a non-numerical"
" value type.".format(symbol_type.category))
if units is not None:
logger.warning("Cannot assign units to object-type symbol '{}'. "
"Ignoring units.".format(symbol_type.name))
if uncertainty is not None:
logger.warning("Cannot assign uncertainty to object-type symbol '{}'. "
"Ignoring uncertainty.".format(symbol_type.name))
return ObjQuantity(symbol_type, value,
tags=tags,
provenance=provenance)
@staticmethod
def from_default(symbol):
"""
Method to invoke a default quantity from a symbol name
Args:
symbol (Symbol or str): symbol or string corresponding to
the symbol name
Returns:
BaseQuantity corresponding to default quantity from default
"""
val = DEFAULT_SYMBOL_VALUES.get(symbol)
if val is None:
raise ValueError("No default value for {}".format(symbol))
prov = ProvenanceElement(model='default')
return QuantityFactory.create_quantity(symbol, val, provenance=prov)
@staticmethod
def to_quantity(symbol: Union[str, Symbol],
to_coerce: Union[float, np.ndarray, ureg.Quantity, "BaseQuantity"],
**kwargs) -> "BaseQuantity":
"""
Converts the argument into a BaseQuantity-derived object. If input is:
- BaseQuantity-derived object -> immediately returned without modification (same object, not copied)
- Any other python object -> passed to create_quantity() with keyword arguments
to create a new BaseQuantity-derived object
Args:
symbol: a string or Symbol object representing the type of data stored
to_coerce: item to be converted into a BaseQuantity-derived object
kwargs: keyword arguments to create new object if to_coerce is not a BaseQuantity-derived object
Returns:
(BaseQuantity) item as a BaseQuantity-derived object
"""
# If a quantity is passed in, return the quantity.
if isinstance(to_coerce, BaseQuantity):
return to_coerce
# Else
# Return the correct BaseQuantity - warns if units are assumed.
return QuantityFactory.create_quantity(symbol, to_coerce, **kwargs)
@staticmethod
def from_dict(d):
"""
Method to construct BaseQuantity-derived objects from dictionaries.
Args:
d: (dict) input dictionary
Returns: (NumQuantity, ObjQuantity) new object defined from dictionary
"""
if d['@class'] == 'NumQuantity':
return NumQuantity.from_dict(d)
elif d['@class'] == 'ObjQuantity':
return ObjQuantity.from_dict(d)
else:
raise ValueError("Cannot build non-BaseQuantity objects!")
| [
"logging.getLogger",
"propnet.symbols.DEFAULT_SYMBOL_VALUES.get",
"numpy.allclose",
"networkx.MultiDiGraph",
"propnet.ureg.Quantity.from_tuple",
"numpy.asscalar",
"networkx.nx_agraph.to_agraph",
"uuid.uuid4",
"numpy.issubdtype",
"datetime.datetime.now",
"propnet.symbols.DEFAULT_SYMBOLS.keys",
... | [((510, 537), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (527, 537), False, 'import logging\n'), ((6308, 6334), 'copy.deepcopy', 'copy.deepcopy', (['self._value'], {}), '(self._value)\n', (6321, 6334), False, 'import copy\n'), ((10196, 10228), 'networkx.nx_agraph.to_agraph', 'nx.nx_agraph.to_agraph', (['nx_graph'], {}), '(nx_graph)\n', (10218, 10228), True, 'import networkx as nx\n'), ((20628, 20647), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (20641, 20647), False, 'import copy\n'), ((22442, 22491), 'propnet.core.provenance.ProvenanceElement', 'ProvenanceElement', ([], {'model': '"""aggregation"""', 'inputs': '[]'}), "(model='aggregation', inputs=[])\n", (22459, 22491), False, 'from propnet.core.provenance import ProvenanceElement\n'), ((23631, 23663), 'copy.deepcopy', 'copy.deepcopy', (['self._uncertainty'], {}), '(self._uncertainty)\n', (23644, 23663), False, 'import copy\n'), ((35681, 35718), 'numpy.allclose', 'np.allclose', (['lhs_convert', 'rhs_convert'], {}), '(lhs_convert, rhs_convert)\n', (35692, 35718), True, 'import numpy as np\n'), ((47213, 47246), 'propnet.symbols.DEFAULT_SYMBOL_VALUES.get', 'DEFAULT_SYMBOL_VALUES.get', (['symbol'], {}), '(symbol)\n', (47238, 47246), False, 'from propnet.symbols import DEFAULT_SYMBOLS, DEFAULT_SYMBOL_VALUES\n'), ((47357, 47391), 'propnet.core.provenance.ProvenanceElement', 'ProvenanceElement', ([], {'model': '"""default"""'}), "(model='default')\n", (47374, 47391), False, 'from propnet.core.provenance import ProvenanceElement\n'), ((2901, 2913), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2911, 2913), False, 'import uuid\n'), ((4347, 4369), 'propnet.symbols.DEFAULT_SYMBOLS.keys', 'DEFAULT_SYMBOLS.keys', ([], {}), '()\n', (4367, 4369), False, 'from propnet.symbols import DEFAULT_SYMBOLS, DEFAULT_SYMBOL_VALUES\n'), ((8796, 8813), 'networkx.MultiDiGraph', 'nx.MultiDiGraph', ([], {}), '()\n', (8811, 8813), True, 'import networkx as nx\n'), ((26827, 26851), 'numpy.isnan', 'np.isnan', (['self.magnitude'], {}), '(self.magnitude)\n', (26835, 26851), True, 'import numpy as np\n'), ((27766, 27812), 'numpy.issubdtype', 'np.issubdtype', (['value.dtype', 'np.complexfloating'], {}), '(value.dtype, np.complexfloating)\n', (27779, 27812), True, 'import numpy as np\n'), ((10604, 10626), 'propnet.symbols.DEFAULT_SYMBOLS.keys', 'DEFAULT_SYMBOLS.keys', ([], {}), '()\n', (10624, 10626), False, 'from propnet.symbols import DEFAULT_SYMBOLS, DEFAULT_SYMBOL_VALUES\n'), ((17781, 17799), 'numpy.asscalar', 'np.asscalar', (['value'], {}), '(value)\n', (17792, 17799), True, 'import numpy as np\n'), ((20212, 20241), 'numpy.issubdtype', 'np.issubdtype', (['this_dtype', 'dt'], {}), '(this_dtype, dt)\n', (20225, 20241), True, 'import numpy as np\n'), ((17962, 17989), 'propnet.ureg.Quantity', 'ureg.Quantity', (['value', 'units'], {}), '(value, units)\n', (17975, 17989), False, 'from propnet import ureg\n'), ((18429, 18453), 'numpy.asscalar', 'np.asscalar', (['uncertainty'], {}), '(uncertainty)\n', (18440, 18453), True, 'import numpy as np\n'), ((3307, 3321), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3319, 3321), False, 'from datetime import datetime\n'), ((28913, 28936), 'numpy.imag', 'np.imag', (['self.magnitude'], {}), '(self.magnitude)\n', (28920, 28936), True, 'import numpy as np\n'), ((3821, 3835), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3833, 3835), False, 'from datetime import datetime\n'), ((18921, 18954), 'propnet.ureg.Quantity', 'ureg.Quantity', (['uncertainty', 'units'], {}), '(uncertainty, units)\n', (18934, 18954), False, 'from propnet import ureg\n'), ((18782, 18819), 'propnet.ureg.Quantity.from_tuple', 'ureg.Quantity.from_tuple', (['uncertainty'], {}), '(uncertainty)\n', (18806, 18819), False, 'from propnet import ureg\n')] |
# Trying to find (rho,g) = (growth rate difference, gen time ratio) of two variants using non-parametric methods (Kendall tau - could improve by weighting).
# It relies on non-steady overall growth in both variants over the time window to get a localised result in (rho,g)-space.
# Steady overall growth in one variant means you can trade off increased coefficient of that variant with increased g.
from stuff import *
import numpy as np
from math import exp,log,sqrt
from scipy.stats import kendalltau
mindate='0000-00-00'
maxdate='9999-99-99'
mincount=10
prlevel=1
if len(sys.argv)>1: mindate=sys.argv[1]
if len(sys.argv)>2: maxdate=sys.argv[2]
if len(sys.argv)>3: mincount=int(sys.argv[3])
print("Using date range",mindate,"-",maxdate,file=sys.stderr)
print("Min count:",mincount,file=sys.stderr)
# Variant0, Variant1 counts by day
V0=[];V1=[];DT=[]
fp=sys.stdin
#fp=open('cog.y145h','r')
if 1:
for x in fp:
y=x.strip().split()
if y[0]>=mindate and y[0]<=maxdate:
d=datetoday(y[0])
v0=int(y[1])
v1=int(y[2])
if v0>=mincount and v1>=mincount: V0.append(v0);V1.append(v1);DT.append(d)
ndays=len(V0)
# Scale sequenced totals up to smoothed version of overall samples (attempting to correct for limited sequencing capacity and testing fluctuations)
casescsv=loadcsv('UKcasesbysampledate.csv')
cases=dict(zip( map(datetoday,casescsv['date']) , casescsv['newCasesBySpecimenDate'] ))
scases={}
for day in range(min(cases),max(cases)+1):
s0=s1=0
for day1 in range(day-3,day+4):
if day1 in cases: s0+=1;s1+=cases[day1]
if s0>0: scases[day]=s1/s0
with open('temp1','w') as fp:
S0=[];S1=[]
for (dt,v0,v1) in zip(DT,V0,V1):
sc=scases[dt]/(v0+v1)
S0.append(sc*v0)
S1.append(sc*v1)
print(daytodate(dt),"%8d %8d %8.1f %8.1f"%(v0,v1,sc*v0,sc*v1),file=fp)
# Do simple regression to get good initial values
A=np.array(V0)
D=np.array(V1)
W=1/(1/A+1/D)
day0=int(round(sum(DT)/len(DT)))
X=np.array(DT)-day0
Y=np.log((D+1e-20)/(A+1e-20))
m=np.array([[sum(W), sum(W*X)], [sum(W*X), sum(W*X*X)]])
r=np.array([sum(W*Y),sum(W*X*Y)])
c=np.linalg.solve(m,r)
growth=c[1]
day0=day0-c[0]/c[1]
# Not clear whether it's better to scale up to overall samples or not
(N0,N1)=(S0,S1)
#(N0,N1)=(V0,V1)
# Trying to find maximum (over rho) rank correlation of log(n1(t))-rho*log(n0(t)), n0(t), n1(t) = numbers of each variant
# The best rho should be the ratio of gen times, T0/T1
# Rank method using maximum rank correlation doesn't work so well if cases of new variant are just growing, because then it's happy to use rho=0
if 0:
for i in range(101):
rho=i/100*2
l=[log(n1)-rho*log(n0) for (n0,n1) in zip(N0,N1)]
res=kendalltau(l,range(ndays))
print("%8.5f %10.7f"%(rho,res.correlation))
# Trying to find minimum (over rho, g) rank correlation of log(n1(t))-rho*log(n0(t))-g*t, n0(t), n1(t) = numbers of each variant
for g in np.arange(max(growth-.01,0),growth+.01,0.0005):
for rho in np.arange(0.,2,0.02):
l=[log(n1)-rho*log(n0)-g*(day-day0) for (day,n0,n1) in zip(DT,N0,N1)]
res=kendalltau(l,range(ndays))
if res.pvalue>.025:
print("%8.5f %8.5f %10.7f %10.7f"%(g,rho,res.correlation,res.pvalue))
| [
"numpy.linalg.solve",
"numpy.log",
"math.log",
"numpy.array",
"numpy.arange"
] | [((1864, 1876), 'numpy.array', 'np.array', (['V0'], {}), '(V0)\n', (1872, 1876), True, 'import numpy as np\n'), ((1879, 1891), 'numpy.array', 'np.array', (['V1'], {}), '(V1)\n', (1887, 1891), True, 'import numpy as np\n'), ((1961, 1994), 'numpy.log', 'np.log', (['((D + 1e-20) / (A + 1e-20))'], {}), '((D + 1e-20) / (A + 1e-20))\n', (1967, 1994), True, 'import numpy as np\n'), ((2082, 2103), 'numpy.linalg.solve', 'np.linalg.solve', (['m', 'r'], {}), '(m, r)\n', (2097, 2103), True, 'import numpy as np\n'), ((1941, 1953), 'numpy.array', 'np.array', (['DT'], {}), '(DT)\n', (1949, 1953), True, 'import numpy as np\n'), ((2945, 2968), 'numpy.arange', 'np.arange', (['(0.0)', '(2)', '(0.02)'], {}), '(0.0, 2, 0.02)\n', (2954, 2968), True, 'import numpy as np\n'), ((2614, 2621), 'math.log', 'log', (['n1'], {}), '(n1)\n', (2617, 2621), False, 'from math import exp, log, sqrt\n'), ((2626, 2633), 'math.log', 'log', (['n0'], {}), '(n0)\n', (2629, 2633), False, 'from math import exp, log, sqrt\n'), ((2974, 2981), 'math.log', 'log', (['n1'], {}), '(n1)\n', (2977, 2981), False, 'from math import exp, log, sqrt\n'), ((2986, 2993), 'math.log', 'log', (['n0'], {}), '(n0)\n', (2989, 2993), False, 'from math import exp, log, sqrt\n')] |
# importing libraries
import cv2
import dlib
import numpy as np
# path for predictor
PREDICTOR_PATH = "shape_predictor_68_face_landmarks.dat"
predictor = dlib.shape_predictor(PREDICTOR_PATH)
face_cascade = cv2.CascadeClassifier('Haarcascades/haarcascade_frontalface_default.xml')
detector = dlib.get_frontal_face_detector()
# ensuring face detection works for single face
def get_landmarks(im):
rects = detector(im, 1)
if len(rects) > 1:
return 'error'
if len(rects) == 0:
return 'No face'
return np.matrix([[p.x, p.y] for p in predictor(im, rects[0]).parts()])
def annotate_landmarks(im, landmarks):
im = im.copy()
for idx, point in enumerate(landmarks):
pos = (point[0, 0], point[0, 1])
cv2.putText(im, str(idx), pos,
fontFace=cv2.FONT_HERSHEY_SCRIPT_SIMPLEX,
fontScale=0.4,
color=(0,0,255))
cv2.circle(im, pos, 3, color=(0,255,255))
return im
def top_lip(landmarks):
top_lip_pts = []
for i in range(50,53):
top_lip_pts.append(landmarks[i])
for i in range(61,64):
top_lip_pts.append(landmarks[i])
top_lip_all_pts = np.squeeze(np.asarray(top_lip_pts))
top_lip_mean = np.mean(top_lip_pts, axis = 0)
return int(top_lip_mean[:, 1])
def bottom_lip(landmarks):
bottom_lip_pts = []
for i in range(65,68):
bottom_lip_pts.append(landmarks[i])
for i in range(56,59):
bottom_lip_pts.append(landmarks[i])
bottom_lip_all_pts = np.squeeze(np.asarray(bottom_lip_pts))
bottom_lip_mean = np.mean(bottom_lip_pts, axis = 0)
return int(bottom_lip_mean[:, 1])
def mouth_open(image):
landmarks = get_landmarks(image)
if landmarks == 'error':
return image, 0
image_with_landmarks = annotate_landmarks(image, landmarks)
top_lip_center = top_lip(landmarks)
bottom_lip_center = bottom_lip(landmarks)
lip_distance = abs(top_lip_center - bottom_lip_center)
return image_with_landmarks, lip_distance
# for right eyes
def top_eye_lid_right(landmarks):
top_eye_lid_right_pts = []
for i in range(42,44):
top_eye_lid_right_pts.append(landmarks[i])
top_eye_lid_right_all_pts = np.squeeze(np.asarray(top_eye_lid_right_pts))
top_eye_lid_right_mean = np.mean(top_eye_lid_right_pts, axis = 0)
return int(top_eye_lid_right_mean[:, 1])
def bottom_eye_lid_right(landmarks):
bottom_eye_lid_right_pts = []
for i in range(45,47):
bottom_eye_lid_right_pts.append(landmarks[i])
bottom_eye_lid_right_all_pts = np.squeeze(np.asarray(bottom_eye_lid_right_pts))
bottom_eye_lid_right_mean = np.mean(bottom_eye_lid_right_pts, axis = 0)
return int(bottom_eye_lid_right_mean[:, 1])
def right_eyes_open(image):
landmarks = get_landmarks(image)
if landmarks == 'error':
return image, 0
image_with_landmarks = annotate_landmarks(image, landmarks)
top_eye_lid_right_center = top_eye_lid_right(landmarks)
bottom_eye_lid_right_center = bottom_eye_lid_right(landmarks)
eye_lid_right_distance = abs(top_eye_lid_right_center - bottom_eye_lid_right_center)
# print('eye right distance:',eye_lid_right_distance)
return image_with_landmarks, eye_lid_right_distance
# for left eyes
def top_eye_lid_left(landmarks):
top_eye_lid_left_pts = []
for i in range(36,38):
top_eye_lid_left_pts.append(landmarks[i])
top_eye_lid_left_all_pts = np.squeeze(np.asarray(top_eye_lid_left_pts))
top_eye_lid_left_mean = np.mean(top_eye_lid_left_pts, axis = 0)
return int(top_eye_lid_left_mean[:, 1])
def bottom_eye_lid_left(landmarks):
bottom_eye_lid_left_pts = []
for i in range(39,41):
bottom_eye_lid_left_pts.append(landmarks[i])
bottom_eye_lid_left_all_pts = np.squeeze(np.asarray(bottom_eye_lid_left_pts))
bottom_eye_lid_left_mean = np.mean(bottom_eye_lid_left_pts, axis = 0)
return int(bottom_eye_lid_left_mean[:, 1])
def left_eyes_open(image):
landmarks = get_landmarks(image)
if landmarks == 'error':
return image, 0
image_with_landmarks = annotate_landmarks(image, landmarks)
top_eye_lid_left_center = top_eye_lid_left(landmarks)
bottom_eye_lid_left_center = bottom_eye_lid_left(landmarks)
eye_lid_left_distance = abs(top_eye_lid_left_center - bottom_eye_lid_left_center)
# print('eye left distance:',eye_lid_left_distance)
return image_with_landmarks, eye_lid_left_distance
# yawn detector main program trial and error
cap = cv2.VideoCapture(0)
yawns = 0
yawn_status = False
right_eye_open_status = False
left_eye_open_status = False
while True:
ret, frame = cap.read()
image_landmarks, lip_distance = mouth_open(frame)
image_landmarks, eye_lid_right_distance = right_eyes_open(frame)
image_landmarks, eye_lid_left_distance = left_eyes_open(frame)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray)
prev_yawn_status = yawn_status
prev_right_eye_open_status = right_eye_open_status
prev_left_eye_open_status = left_eye_open_status
for (x,y,w,h) in faces:
cv2.rectangle(frame, (x,y), (x+w, y+h), (255,0,0),2)
# ROI_gray = gray[y:y+h, x:x+w]
# ROI_color = frame[y:y+h, x:x+w]
if lip_distance > 15:
yawn_status = True
cv2.putText(frame, 'subject is yawning', (10, 100),
cv2.FONT_HERSHEY_COMPLEX, 0.5, (0,0,255), 1)
# output_text = 'Yawn Count: ' + str(yawns + 1)
# cv2.putText(frame, output_text, (50,50),
# cv2.FONT_HERSHEY_COMPLEX, 1, (0,255,127), 2)
else:
yawn_status = False
if eye_lid_right_distance > 3:
right_eye_open_status = True
cv2.putText(frame, 'right eye is open', (10, 20),
cv2.FONT_HERSHEY_COMPLEX, 0.5, (0,255,0), 1)
else:
right_eye_open_status = False
cv2.putText(frame, 'right eye is closed', (10, 20),
cv2.FONT_HERSHEY_COMPLEX, 0.5, (0,255,0), 1)
if eye_lid_left_distance > 3:
left_eye_open_status = True
cv2.putText(frame, 'left eye is open', (10, 40),
cv2.FONT_HERSHEY_COMPLEX, 0.5, (0,255,0), 1)
else:
left_eye_open_status = False
cv2.putText(frame, 'left eye is closed', (10, 40),
cv2.FONT_HERSHEY_COMPLEX, 0.5, (0,255,0), 1)
if prev_yawn_status == True and yawn_status ==False:
yawns += 1
cv2.imshow('Live Landmarks', image_landmarks)
cv2.imshow('Drowsy Detection', frame)
if cv2.waitKey(1) == 13:
break
cap.release()
cv2.destroyAllWindows() | [
"cv2.rectangle",
"numpy.mean",
"numpy.asarray",
"dlib.shape_predictor",
"cv2.imshow",
"cv2.putText",
"dlib.get_frontal_face_detector",
"cv2.circle",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.cvtColor",
"cv2.CascadeClassifier",
"cv2.waitKey"
] | [((155, 191), 'dlib.shape_predictor', 'dlib.shape_predictor', (['PREDICTOR_PATH'], {}), '(PREDICTOR_PATH)\n', (175, 191), False, 'import dlib\n'), ((207, 280), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""Haarcascades/haarcascade_frontalface_default.xml"""'], {}), "('Haarcascades/haarcascade_frontalface_default.xml')\n", (228, 280), False, 'import cv2\n'), ((292, 324), 'dlib.get_frontal_face_detector', 'dlib.get_frontal_face_detector', ([], {}), '()\n', (322, 324), False, 'import dlib\n'), ((4506, 4525), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (4522, 4525), False, 'import cv2\n'), ((6714, 6737), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (6735, 6737), False, 'import cv2\n'), ((1235, 1263), 'numpy.mean', 'np.mean', (['top_lip_pts'], {'axis': '(0)'}), '(top_lip_pts, axis=0)\n', (1242, 1263), True, 'import numpy as np\n'), ((1581, 1612), 'numpy.mean', 'np.mean', (['bottom_lip_pts'], {'axis': '(0)'}), '(bottom_lip_pts, axis=0)\n', (1588, 1612), True, 'import numpy as np\n'), ((2292, 2330), 'numpy.mean', 'np.mean', (['top_eye_lid_right_pts'], {'axis': '(0)'}), '(top_eye_lid_right_pts, axis=0)\n', (2299, 2330), True, 'import numpy as np\n'), ((2647, 2688), 'numpy.mean', 'np.mean', (['bottom_eye_lid_right_pts'], {'axis': '(0)'}), '(bottom_eye_lid_right_pts, axis=0)\n', (2654, 2688), True, 'import numpy as np\n'), ((3514, 3551), 'numpy.mean', 'np.mean', (['top_eye_lid_left_pts'], {'axis': '(0)'}), '(top_eye_lid_left_pts, axis=0)\n', (3521, 3551), True, 'import numpy as np\n'), ((3861, 3901), 'numpy.mean', 'np.mean', (['bottom_eye_lid_left_pts'], {'axis': '(0)'}), '(bottom_eye_lid_left_pts, axis=0)\n', (3868, 3901), True, 'import numpy as np\n'), ((4858, 4897), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (4870, 4897), False, 'import cv2\n'), ((6555, 6600), 'cv2.imshow', 'cv2.imshow', (['"""Live Landmarks"""', 'image_landmarks'], {}), "('Live Landmarks', image_landmarks)\n", (6565, 6600), False, 'import cv2\n'), ((6605, 6642), 'cv2.imshow', 'cv2.imshow', (['"""Drowsy Detection"""', 'frame'], {}), "('Drowsy Detection', frame)\n", (6615, 6642), False, 'import cv2\n'), ((920, 963), 'cv2.circle', 'cv2.circle', (['im', 'pos', '(3)'], {'color': '(0, 255, 255)'}), '(im, pos, 3, color=(0, 255, 255))\n', (930, 963), False, 'import cv2\n'), ((1191, 1214), 'numpy.asarray', 'np.asarray', (['top_lip_pts'], {}), '(top_lip_pts)\n', (1201, 1214), True, 'import numpy as np\n'), ((1531, 1557), 'numpy.asarray', 'np.asarray', (['bottom_lip_pts'], {}), '(bottom_lip_pts)\n', (1541, 1557), True, 'import numpy as np\n'), ((2228, 2261), 'numpy.asarray', 'np.asarray', (['top_eye_lid_right_pts'], {}), '(top_eye_lid_right_pts)\n', (2238, 2261), True, 'import numpy as np\n'), ((2577, 2613), 'numpy.asarray', 'np.asarray', (['bottom_eye_lid_right_pts'], {}), '(bottom_eye_lid_right_pts)\n', (2587, 2613), True, 'import numpy as np\n'), ((3452, 3484), 'numpy.asarray', 'np.asarray', (['top_eye_lid_left_pts'], {}), '(top_eye_lid_left_pts)\n', (3462, 3484), True, 'import numpy as np\n'), ((3793, 3828), 'numpy.asarray', 'np.asarray', (['bottom_eye_lid_left_pts'], {}), '(bottom_eye_lid_left_pts)\n', (3803, 3828), True, 'import numpy as np\n'), ((5134, 5194), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x, y)', '(x + w, y + h)', '(255, 0, 0)', '(2)'], {}), '(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)\n', (5147, 5194), False, 'import cv2\n'), ((5344, 5447), 'cv2.putText', 'cv2.putText', (['frame', '"""subject is yawning"""', '(10, 100)', 'cv2.FONT_HERSHEY_COMPLEX', '(0.5)', '(0, 0, 255)', '(1)'], {}), "(frame, 'subject is yawning', (10, 100), cv2.\n FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255), 1)\n", (5355, 5447), False, 'import cv2\n'), ((5783, 5883), 'cv2.putText', 'cv2.putText', (['frame', '"""right eye is open"""', '(10, 20)', 'cv2.FONT_HERSHEY_COMPLEX', '(0.5)', '(0, 255, 0)', '(1)'], {}), "(frame, 'right eye is open', (10, 20), cv2.FONT_HERSHEY_COMPLEX,\n 0.5, (0, 255, 0), 1)\n", (5794, 5883), False, 'import cv2\n'), ((5962, 6065), 'cv2.putText', 'cv2.putText', (['frame', '"""right eye is closed"""', '(10, 20)', 'cv2.FONT_HERSHEY_COMPLEX', '(0.5)', '(0, 255, 0)', '(1)'], {}), "(frame, 'right eye is closed', (10, 20), cv2.\n FONT_HERSHEY_COMPLEX, 0.5, (0, 255, 0), 1)\n", (5973, 6065), False, 'import cv2\n'), ((6170, 6270), 'cv2.putText', 'cv2.putText', (['frame', '"""left eye is open"""', '(10, 40)', 'cv2.FONT_HERSHEY_COMPLEX', '(0.5)', '(0, 255, 0)', '(1)'], {}), "(frame, 'left eye is open', (10, 40), cv2.FONT_HERSHEY_COMPLEX, \n 0.5, (0, 255, 0), 1)\n", (6181, 6270), False, 'import cv2\n'), ((6342, 6443), 'cv2.putText', 'cv2.putText', (['frame', '"""left eye is closed"""', '(10, 40)', 'cv2.FONT_HERSHEY_COMPLEX', '(0.5)', '(0, 255, 0)', '(1)'], {}), "(frame, 'left eye is closed', (10, 40), cv2.FONT_HERSHEY_COMPLEX,\n 0.5, (0, 255, 0), 1)\n", (6353, 6443), False, 'import cv2\n'), ((6655, 6669), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (6666, 6669), False, 'import cv2\n')] |
import numpy as np
from sklearn.neural_network import MLPClassifier
from datetime import datetime
import sys
def get_learning_speed(loss_curve):
cutoff = 1.5 # cutoff value for learning speed estimation
temp = np.where(np.array(loss_curve) <= cutoff)
speed = 1. / (temp[0][0] + 1) if len(temp[0]) > 0 else 0
return speed
def run_learning(samples, target):
clf = MLPClassifier(solver='sgd',
alpha=0.0001,
random_state=1,
max_iter=5000,
learning_rate='adaptive',
early_stopping=False,
hidden_layer_sizes=(samples.shape[1],))
clf.fit(samples, target)
score = clf.score(samples, target)
learning_speed = get_learning_speed(clf.loss_curve_) if hasattr(clf, "loss_curve_") else np.nan
return score, clf.loss_, learning_speed, clf.n_iter_
if __name__ == '__main__':
startTime = datetime.now()
np.random.seed(42)
basedir = sys.argv[1]
print(basedir)
n_syn = 4
n_classes = 10
f_mf = np.linspace(0.1, 0.9, 9)
mf_results = np.zeros((len(f_mf), 4))
gc_results = np.zeros((len(f_mf), 4))
for i, fraction in enumerate(f_mf):
mf_samples = np.loadtxt(basedir + '/MF_samples_{}_{:.2f}.txt'.format(n_syn, fraction)).T
gc_samples = np.loadtxt(basedir + '/GrC_samples_{}_{:.2f}.txt'.format(n_syn, fraction)).T
y = np.random.choice(n_classes, mf_samples.shape[0], replace=True)
mf_results[i] = run_learning(samples=mf_samples, target=y)
gc_results[i] = run_learning(samples=gc_samples, target=y)
print('MF patterns:')
print(mf_results)
print('')
print('GC patterns:')
print(gc_results)
np.savetxt(basedir + '/sklearn_result.txt', np.hstack((mf_results, gc_results)), delimiter='\t')
print(datetime.now() - startTime)
| [
"sklearn.neural_network.MLPClassifier",
"numpy.hstack",
"numpy.random.choice",
"datetime.datetime.now",
"numpy.linspace",
"numpy.array",
"numpy.random.seed"
] | [((391, 560), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'solver': '"""sgd"""', 'alpha': '(0.0001)', 'random_state': '(1)', 'max_iter': '(5000)', 'learning_rate': '"""adaptive"""', 'early_stopping': '(False)', 'hidden_layer_sizes': '(samples.shape[1],)'}), "(solver='sgd', alpha=0.0001, random_state=1, max_iter=5000,\n learning_rate='adaptive', early_stopping=False, hidden_layer_sizes=(\n samples.shape[1],))\n", (404, 560), False, 'from sklearn.neural_network import MLPClassifier\n'), ((969, 983), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (981, 983), False, 'from datetime import datetime\n'), ((988, 1006), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (1002, 1006), True, 'import numpy as np\n'), ((1098, 1122), 'numpy.linspace', 'np.linspace', (['(0.1)', '(0.9)', '(9)'], {}), '(0.1, 0.9, 9)\n', (1109, 1122), True, 'import numpy as np\n'), ((1457, 1519), 'numpy.random.choice', 'np.random.choice', (['n_classes', 'mf_samples.shape[0]'], {'replace': '(True)'}), '(n_classes, mf_samples.shape[0], replace=True)\n', (1473, 1519), True, 'import numpy as np\n'), ((1815, 1850), 'numpy.hstack', 'np.hstack', (['(mf_results, gc_results)'], {}), '((mf_results, gc_results))\n', (1824, 1850), True, 'import numpy as np\n'), ((232, 252), 'numpy.array', 'np.array', (['loss_curve'], {}), '(loss_curve)\n', (240, 252), True, 'import numpy as np\n'), ((1878, 1892), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1890, 1892), False, 'from datetime import datetime\n')] |
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import compatible_shapes, strict_compare_tensors, \
is_fully_defined
from openvino.tools.mo.graph.graph import Node, Graph
from openvino.tools.mo.ops.op import Op
class ScatterNDBase(Op):
enabled = False
op = op_type = None
version = None
def __init__(self, graph: Graph, attrs: dict):
assert self.op is not None and self.op_type is not None and self.version is not None, \
'Please use specialized ScatterNDBase operation class, ScatterNDBase is base class'
mandatory_props = {
'op': self.op,
'type': self.op_type,
'version': self.version,
'infer': self.infer,
'in_ports_count': 3,
'out_ports_count': 1,
}
super().__init__(graph, mandatory_props, attrs)
@staticmethod
def infer(node: Node):
node_name = node.soft_get('name', node.id)
input_shape = node.in_port(0).data.get_shape()
indices_shape = node.in_port(1).data.get_shape()
updates_shape = node.in_port(2).data.get_shape()
assert input_shape is not None and updates_shape is not None and indices_shape is not None, \
'The node "{}" input shape is None'.format(node_name)
# check that shapes are correct
# 1. ranks of both input and indices must be at least 1
assert len(input_shape) >= 1 and len(indices_shape) >= 1, \
'The node "{}" input and indices ranks must be at least 1'.format(node_name)
# 2. the last dimension of indices shape must be at most a rank of input
assert not is_fully_defined(indices_shape[-1]) or indices_shape[-1] <= len(input_shape), \
'The last dimension of indices shape must be at most a rank of input for the node "{}"'.format(node_name)
# 3. updates is a tensor of shape indices_shape[:-1] + input_shape[indices_shape[-1]:]
# if expected updates shape is scalar, updates can be tensor with the single element (for example, of shape
# [1], [[1]], etc.)
expected_updates_shape = np.ma.concatenate((indices_shape[:-1], input_shape[indices_shape[-1]:]), axis=0)
assert compatible_shapes(updates_shape, expected_updates_shape) or \
(strict_compare_tensors(expected_updates_shape, []) and
strict_compare_tensors(updates_shape, np.ones(len(updates_shape), dtype=np.int64))), \
'The updates shape must be equal to indices_shape[:-1] + input_shape[indices_shape[-1]:] for the node ' \
'"{}"'.format(node_name)
node.out_port(0).data.set_shape(input_shape)
@staticmethod
def type_infer(node: Node):
assert node.in_port(0).get_source().get_data_type() == node.in_port(2).get_source().get_data_type(), \
'The data type of the first and the third inputs must be equal for the node {}'.format(node.name)
node.out_port(0).set_data_type(node.in_port(0).get_data_type())
class ScatterNDUpdate(ScatterNDBase):
op = op_type = 'ScatterNDUpdate'
version = 'opset4'
@staticmethod
def infer(node: Node):
ScatterNDBase.infer(node)
input_value = node.in_port(0).data.get_value()
indices_shape = node.in_port(1).data.get_shape()
indices_value = node.in_port(1).data.get_value()
updates_value = node.in_port(2).data.get_value()
# compute output value if all inputs are constant
if input_value is not None and is_fully_defined(indices_value) and updates_value is not None:
output_value = input_value.copy()
indx_range = indices_shape[:-1]
for indx in np.ndindex(tuple(indx_range)):
if indx == ():
# a case when updates is a scalar
indx = 0
updates_value = [updates_value]
insert_index = indices_value[indx]
# we check and change index type explicitly to avoid error in indexing ndarray by another ndarray
if isinstance(insert_index, np.ndarray):
insert_index = tuple(insert_index)
output_value[insert_index] = updates_value[indx]
node.out_port(0).data.set_value(output_value)
class TFScatterND(Op):
"""
TFScatterND operation comes from TensorFlow and will be replaced by TFScatterNDDecomposition.
"""
op = 'TFScatterND'
enabled = False
def __init__(self, graph: Graph, attrs: dict):
super().__init__(graph, {
'type': None,
'op': self.op,
'in_ports_count': 3,
'out_ports_count': 1,
'infer': None
}, attrs)
| [
"openvino.tools.mo.front.common.partial_infer.utils.strict_compare_tensors",
"numpy.ma.concatenate",
"openvino.tools.mo.front.common.partial_infer.utils.is_fully_defined",
"openvino.tools.mo.front.common.partial_infer.utils.compatible_shapes"
] | [((2225, 2310), 'numpy.ma.concatenate', 'np.ma.concatenate', (['(indices_shape[:-1], input_shape[indices_shape[-1]:])'], {'axis': '(0)'}), '((indices_shape[:-1], input_shape[indices_shape[-1]:]), axis=0\n )\n', (2242, 2310), True, 'import numpy as np\n'), ((2321, 2377), 'openvino.tools.mo.front.common.partial_infer.utils.compatible_shapes', 'compatible_shapes', (['updates_shape', 'expected_updates_shape'], {}), '(updates_shape, expected_updates_shape)\n', (2338, 2377), False, 'from openvino.tools.mo.front.common.partial_infer.utils import compatible_shapes, strict_compare_tensors, is_fully_defined\n'), ((3615, 3646), 'openvino.tools.mo.front.common.partial_infer.utils.is_fully_defined', 'is_fully_defined', (['indices_value'], {}), '(indices_value)\n', (3631, 3646), False, 'from openvino.tools.mo.front.common.partial_infer.utils import compatible_shapes, strict_compare_tensors, is_fully_defined\n'), ((1754, 1789), 'openvino.tools.mo.front.common.partial_infer.utils.is_fully_defined', 'is_fully_defined', (['indices_shape[-1]'], {}), '(indices_shape[-1])\n', (1770, 1789), False, 'from openvino.tools.mo.front.common.partial_infer.utils import compatible_shapes, strict_compare_tensors, is_fully_defined\n'), ((2399, 2449), 'openvino.tools.mo.front.common.partial_infer.utils.strict_compare_tensors', 'strict_compare_tensors', (['expected_updates_shape', '[]'], {}), '(expected_updates_shape, [])\n', (2421, 2449), False, 'from openvino.tools.mo.front.common.partial_infer.utils import compatible_shapes, strict_compare_tensors, is_fully_defined\n')] |
import numpy as np
import pydigree as pyd
from pydigree.simulation import QuantitativeTrait
ninds = 5000
nloc = 1000
maf = 0.5
traitname = 'synthetic'
# Create population
pop = pyd.Population()
# Create chromosomes
for i in range(100):
c = pyd.ChromosomeTemplate()
c.add_genotype(maf, 0)
pop.add_chromosome(c)
# Create trait QuantitativeTrait
trait = QuantitativeTrait('synthetic', 'quantitative', chromosomes=pop.chromosomes)
for i in range(100):
trait.add_effect((i,0), 1 * (-1 if i % 2 else 1))
print('Locus mean genotypic value: {}'.format(trait.effects[0].expected_genotypic_value))
print('Locus variance: {}'.format(trait.effects[0].locus_additive_variance))
print('Expected trait mean: {}'.format(trait.expected_genotypic_value))
print('Expected trait variance: {}'.format(trait.additive_genetic_variance))
for i in range(ninds):
i = pop.founder_individual()
i.get_genotypes(linkeq=True)
i.phenotypes[traitname] = trait.predict_phenotype(i)
y = np.array([i.phenotypes[traitname] for i in pop.individuals])
print('Observed trait mean {}'.format(y.mean()))
print('Observed trait variance: {}'.format(y.var()))
| [
"numpy.array",
"pydigree.Population",
"pydigree.ChromosomeTemplate",
"pydigree.simulation.QuantitativeTrait"
] | [((181, 197), 'pydigree.Population', 'pyd.Population', ([], {}), '()\n', (195, 197), True, 'import pydigree as pyd\n'), ((360, 435), 'pydigree.simulation.QuantitativeTrait', 'QuantitativeTrait', (['"""synthetic"""', '"""quantitative"""'], {'chromosomes': 'pop.chromosomes'}), "('synthetic', 'quantitative', chromosomes=pop.chromosomes)\n", (377, 435), False, 'from pydigree.simulation import QuantitativeTrait\n'), ((968, 1028), 'numpy.array', 'np.array', (['[i.phenotypes[traitname] for i in pop.individuals]'], {}), '([i.phenotypes[traitname] for i in pop.individuals])\n', (976, 1028), True, 'import numpy as np\n'), ((246, 270), 'pydigree.ChromosomeTemplate', 'pyd.ChromosomeTemplate', ([], {}), '()\n', (268, 270), True, 'import pydigree as pyd\n')] |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import mindspore as ms
import mindspore.context as context
from mindspore.common.api import _executor
from mindspore import Tensor, Parameter
import mindspore.nn as nn
from mindspore.nn import Cell, TrainOneStepCell, Momentum
from mindspore.ops import operations as P
class TwoInputBpropOperator(Cell):
def __init__(self):
super().__init__()
self.op = P.Mul()
self.bp = P.Add()
def construct(self, x, y):
return self.op(x, y)
def bprop(self, x, y, out, dout):
return self.bp(5, x), self.bp(y, 8)
class ParallelFloorDivBpropNet(Cell):
def __init__(self, mul_size, test_size, strategy=None, strategy2=None):
super().__init__()
mul_np = np.full(mul_size, 0.5, dtype=np.float32)
floordiv_np = np.full(test_size, 0.1, dtype=np.float32)
self.mul_weight = Parameter(Tensor(mul_np), name="mul_weight")
self.floordiv_weight = Parameter(Tensor(floordiv_np), name="floordiv_weight")
self.mul = TwoInputBpropOperator()
self.floor_div = P.FloorDiv()
self.bn = nn.BatchNorm1d(num_features=96)
if strategy is not None:
self.mul.op.shard(strategy2)
self.mul.bp.shard(strategy2)
self.floor_div.shard(strategy)
def construct(self, inputs, label):
x = self.mul(inputs, self.mul_weight)
x = self.floor_div(x, self.floordiv_weight)
x = self.bn(x)
return x
inputs_ = Tensor(np.random.randn(128, 96).astype(np.float32), dtype=ms.float32)
label_ = Tensor(np.random.randn(128, 96).astype(np.float32), dtype=ms.float32)
def compile_net(net):
optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
train_net = TrainOneStepCell(net, optimizer)
train_net.set_auto_parallel()
train_net.set_train()
_executor.compile(train_net, inputs_, label_)
context.reset_auto_parallel_context()
def test_net():
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=4, global_rank=0)
strategy = ((4, 1), (4, 1))
net = ParallelFloorDivBpropNet(mul_size=(128, 96), test_size=(128, 96), strategy=strategy, strategy2=strategy)
compile_net(net)
| [
"mindspore.ops.operations.Mul",
"mindspore.ops.operations.Add",
"mindspore.nn.TrainOneStepCell",
"mindspore.context.set_context",
"mindspore.context.reset_auto_parallel_context",
"numpy.random.randn",
"mindspore.common.api._executor.compile",
"mindspore.ops.operations.FloorDiv",
"mindspore.Tensor",
... | [((2333, 2365), 'mindspore.nn.TrainOneStepCell', 'TrainOneStepCell', (['net', 'optimizer'], {}), '(net, optimizer)\n', (2349, 2365), False, 'from mindspore.nn import Cell, TrainOneStepCell, Momentum\n'), ((2430, 2475), 'mindspore.common.api._executor.compile', '_executor.compile', (['train_net', 'inputs_', 'label_'], {}), '(train_net, inputs_, label_)\n', (2447, 2475), False, 'from mindspore.common.api import _executor\n'), ((2480, 2517), 'mindspore.context.reset_auto_parallel_context', 'context.reset_auto_parallel_context', ([], {}), '()\n', (2515, 2517), True, 'import mindspore.context as context\n'), ((2540, 2608), 'mindspore.context.set_context', 'context.set_context', ([], {'mode': 'context.GRAPH_MODE', 'device_target': '"""Ascend"""'}), "(mode=context.GRAPH_MODE, device_target='Ascend')\n", (2559, 2608), True, 'import mindspore.context as context\n'), ((2613, 2715), 'mindspore.context.set_auto_parallel_context', 'context.set_auto_parallel_context', ([], {'parallel_mode': '"""semi_auto_parallel"""', 'device_num': '(4)', 'global_rank': '(0)'}), "(parallel_mode='semi_auto_parallel',\n device_num=4, global_rank=0)\n", (2646, 2715), True, 'import mindspore.context as context\n'), ((982, 989), 'mindspore.ops.operations.Mul', 'P.Mul', ([], {}), '()\n', (987, 989), True, 'from mindspore.ops import operations as P\n'), ((1008, 1015), 'mindspore.ops.operations.Add', 'P.Add', ([], {}), '()\n', (1013, 1015), True, 'from mindspore.ops import operations as P\n'), ((1320, 1360), 'numpy.full', 'np.full', (['mul_size', '(0.5)'], {'dtype': 'np.float32'}), '(mul_size, 0.5, dtype=np.float32)\n', (1327, 1360), True, 'import numpy as np\n'), ((1383, 1424), 'numpy.full', 'np.full', (['test_size', '(0.1)'], {'dtype': 'np.float32'}), '(test_size, 0.1, dtype=np.float32)\n', (1390, 1424), True, 'import numpy as np\n'), ((1650, 1662), 'mindspore.ops.operations.FloorDiv', 'P.FloorDiv', ([], {}), '()\n', (1660, 1662), True, 'from mindspore.ops import operations as P\n'), ((1681, 1712), 'mindspore.nn.BatchNorm1d', 'nn.BatchNorm1d', ([], {'num_features': '(96)'}), '(num_features=96)\n', (1695, 1712), True, 'import mindspore.nn as nn\n'), ((1461, 1475), 'mindspore.Tensor', 'Tensor', (['mul_np'], {}), '(mul_np)\n', (1467, 1475), False, 'from mindspore import Tensor, Parameter\n'), ((1537, 1556), 'mindspore.Tensor', 'Tensor', (['floordiv_np'], {}), '(floordiv_np)\n', (1543, 1556), False, 'from mindspore import Tensor, Parameter\n'), ((2069, 2093), 'numpy.random.randn', 'np.random.randn', (['(128)', '(96)'], {}), '(128, 96)\n', (2084, 2093), True, 'import numpy as np\n'), ((2148, 2172), 'numpy.random.randn', 'np.random.randn', (['(128)', '(96)'], {}), '(128, 96)\n', (2163, 2172), True, 'import numpy as np\n')] |
from sympy import poly
from sympy import Matrix
from sympy import Rational
import sympy as sy
import numpy as np
import fractions
from copy import deepcopy
import math
from utils import get_only_poly_equation
class ConstantPoly:
"""
for some reason sympy won't allow constant polynomials
so I created this small class to allow me to do stuff
without taking into account that I might have an int
instead of a polynomial object
"""
def __init__(self, val):
self.val = val
self.args = [str(val)]
self.gens = ()
def degree(self):
return 0
def total_degree(self):
return 0
def __str__(self):
return str(self.val)
def __repr__(self):
return str(self)
def __mod__(self, b):
return self.val % b
def __mul__(self, b):
return self.val * b
def get_independent_polynomials(number_of_possible_values, list_of_polynomials_variables):
current_degrees = [0 for _ in range(len(list_of_polynomials_variables))]
def up_current_degrees():
i = 0
while i < len(current_degrees):
current_degrees[i] += 1
if current_degrees[i] < number_of_possible_values:
return True
current_degrees[i] = 0
i += 1
return False
def get_polynomial_from_degrees():
poly_expression = 1
for j in range(len(list_of_polynomials_variables)):
poly_expression *= list_of_polynomials_variables[j] ** current_degrees[j]
return poly(poly_expression)
polynomials = [ConstantPoly(1)]
while up_current_degrees():
polynomials.append(get_polynomial_from_degrees())
# sort the polynomials by their degree and total_degree
polynomials.sort(key=lambda pol: pol.degree())
polynomials.sort(key=lambda pol: pol.total_degree())
return polynomials
class EvaluatePolynomial:
def __init__(self, list_of_values, list_of_polynomials_variables):
self.list_of_values = deepcopy(list_of_values)
self.list_of_polynomials_variables = list_of_polynomials_variables
@staticmethod
def evaluate_poly_with_vals(pol, list_of_polynomials_variables, list_of_values):
"""
:param pol:
:param list_of_polynomials_variables: a list of polynomials variables to evaluate
:param list_of_values: a list the same length of the symbols to evaluate
such that each value i would be put to symbol i
:return: the evaluated polynomial
"""
symbols_in_poly = pol.gens
for i in range(len(list_of_polynomials_variables)):
if list_of_polynomials_variables[i] in symbols_in_poly:
pol = pol.eval(list_of_values[i])
return pol
def __call__(self, pol):
return EvaluatePolynomial.evaluate_poly_with_vals(pol, self.list_of_polynomials_variables, self.list_of_values)
def get_dual_base(number_of_possible_values, list_of_polynomials_variables):
"""
:param number_of_possible_values:
:return: a list of size number_of_possible_values**(number of variables in polynomials)
of functions that would evaluate polynomials
for example if (number of variables in polynomials)=2 then it would return
the first function would evaluate all polynomials at 0,0
the second function would evaluate all polynomials at 0,1
.
.
(we set k = number_of_possible_values - 1)
the k'th function would evaluate all polynomials at 0,k
the k+1 function would evaluate all polynomials at 1,0
.
.
.
the k**2 function would evaluate all polynomials at k,k
"""
current_evaluation = [0 for _ in range(len(list_of_polynomials_variables))]
def up_current_evaluation():
i = 0
while i < len(current_evaluation):
current_evaluation[i] += 1
if current_evaluation[i] < number_of_possible_values:
return True
current_evaluation[i] = 0
i += 1
return False
dual_base = [EvaluatePolynomial(current_evaluation, list_of_polynomials_variables)]
while up_current_evaluation():
dual_base.append(EvaluatePolynomial(current_evaluation, list_of_polynomials_variables))
return dual_base
def get_evaluation_matrix(number_of_possible_values, list_of_polynomials, list_of_polynomials_variables):
"""
:param number_of_possible_values:
:return: a matrix such that
for example if len(list_of_polynomials)=2 then it would return
the first row would be the all the polynomials evaluated at 0,0 mod number_of_possible_values
the second row would be the all the polynomials evaluated at 0,1 mod number_of_possible_values
.
.
(we set k = number_of_possible_values - 1)
the k'th row would be the all the polynomials evaluated at 0,k mod number_of_possible_values
the k+1 row would be the all the polynomials evaluated at 1,0 mod number_of_possible_values
.
.
.
the k**2 row would be the all the polynomials evaluated at k,k mod number_of_possible_values
"""
dual_base = get_dual_base(number_of_possible_values, list_of_polynomials_variables)
rows = []
for eval_func in dual_base:
new_row = map(eval_func, list_of_polynomials)
new_row = map(lambda m: m % number_of_possible_values, new_row)
rows.append(list(new_row))
return Matrix(rows)
def sympy_rational_to_int_modulus_number(sympy_rational, modulus_number):
def additive_inverse(negative_num):
return modulus_number + negative_num
# taken from https://en.wikibooks.org/wiki/Algorithm_Implementation/Mathematics/Extended_Euclidean_algorithm
def multiplicative_inverse(num):
def egcd(a, b):
if a == 0:
return (b, 0, 1)
else:
g, y, x = egcd(b % a, a)
return (g, x - (b // a) * y, y)
def modinv(a, m):
g, x, y = egcd(a, m)
if g != 1:
raise Exception('modular inverse does not exist')
else:
return x % m
return modinv(num, modulus_number)
def multiplicative_inverse_of_prime(num):
return pow(num, modulus_number - 2, modulus_number)
def is_prime(num):
return all(num % l for l in range(2, math.ceil(math.sqrt(num))))
numerator, denominator = sympy_rational.p, sympy_rational.q
if numerator < 0:
numerator = additive_inverse(numerator)
if is_prime(modulus_number):
denominator = multiplicative_inverse_of_prime(denominator)
else:
denominator = multiplicative_inverse(denominator)
return (numerator * denominator) % modulus_number
def get_matrix_inverse_transpose_mod(matrix, modulus):
# for some reason the inverse in modulus operation takes a really long time
# but the regular inverse is really fast
# so perhaps it would be better to calculate the regular inverse and then convert it to a modulus one
# this is the way to do the inverse + transpose using modulus
# return (matrix.inv_mod(modulus)).T
# it seems like even the usual sympy matrix inverse is seriously slow
# I think its because it keeps the numbers rational
# if you want here is the operation used to calculate regular inverse + transpose
# inverse_transposed = (matrix ** -1).T
# my solution would be to calculate the inverse in numpy and then convert the result into a sympy matrix
# of rational numbers
# first calculate the inverse using numpy
numpy_matrix = np.array(matrix).astype(np.float64)
inverse_matrix = (np.linalg.inv(numpy_matrix))
# now convert the floats in the matrix to their rational form
vfunc = np.vectorize(lambda x: Rational(fractions.Fraction(x).limit_denominator()))
inverse_matrix = vfunc(inverse_matrix)
# now convert the matrix back to a sympy matrix and apply modulus operation
inverse_matrix = Matrix(inverse_matrix)
inverse_matrix = inverse_matrix.applyfunc(
lambda num: sympy_rational_to_int_modulus_number(num, modulus))
# sanity check - check that the inverse is really the inverse modulus
assert (inverse_matrix * matrix) % modulus == sy.eye(len(numpy_matrix))
return inverse_matrix.T
def multiply_matrix_by_polynomial_column(matrix, polynomial_list):
"""
:param matrix:
:param polynomial_list:
:return:
the result of matrix * polynomial_list such that polynomial_list is a column vector
"""
# first get rid of all the constant polynomials which are object I created
new_polynomial_list = []
for i in range(len(polynomial_list)):
polynomial = polynomial_list[i]
if not polynomial.gens:
polynomial = polynomial.val
new_polynomial_list.append(polynomial)
to_return = []
for row in matrix.tolist():
summ = 0
for i in range(len(row)):
summ += row[i] * new_polynomial_list[i]
to_return.append(summ)
return to_return
def apply_operation_to_all_pairs_in_list(operation, lis):
result = [operation(lis[i], lis[i + 1]) for i in range(0, len(lis) - 1, 2)]
if len(lis) % 2 == 1:
result.append(lis[-1])
return result
| [
"sympy.poly",
"sympy.Matrix",
"fractions.Fraction",
"math.sqrt",
"numpy.array",
"numpy.linalg.inv",
"copy.deepcopy"
] | [((5421, 5433), 'sympy.Matrix', 'Matrix', (['rows'], {}), '(rows)\n', (5427, 5433), False, 'from sympy import Matrix\n'), ((7652, 7679), 'numpy.linalg.inv', 'np.linalg.inv', (['numpy_matrix'], {}), '(numpy_matrix)\n', (7665, 7679), True, 'import numpy as np\n'), ((7981, 8003), 'sympy.Matrix', 'Matrix', (['inverse_matrix'], {}), '(inverse_matrix)\n', (7987, 8003), False, 'from sympy import Matrix\n'), ((1554, 1575), 'sympy.poly', 'poly', (['poly_expression'], {}), '(poly_expression)\n', (1558, 1575), False, 'from sympy import poly\n'), ((2025, 2049), 'copy.deepcopy', 'deepcopy', (['list_of_values'], {}), '(list_of_values)\n', (2033, 2049), False, 'from copy import deepcopy\n'), ((7594, 7610), 'numpy.array', 'np.array', (['matrix'], {}), '(matrix)\n', (7602, 7610), True, 'import numpy as np\n'), ((7792, 7813), 'fractions.Fraction', 'fractions.Fraction', (['x'], {}), '(x)\n', (7810, 7813), False, 'import fractions\n'), ((6359, 6373), 'math.sqrt', 'math.sqrt', (['num'], {}), '(num)\n', (6368, 6373), False, 'import math\n')] |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import List, Optional
import numpy as np
from ax.core.observation import ObservationData, ObservationFeatures
from ax.core.search_space import SearchSpace
from ax.core.types import TConfig
from ax.modelbridge.transforms.base import Transform
from ax.modelbridge.transforms.utils import get_data
from ax.utils.common.logger import get_logger
from ax.utils.common.typeutils import checked_cast
logger = get_logger(__name__)
class Winsorize(Transform):
"""Clip the mean values for each metric to lay within the limits provided in
the config. You can either specify different winsorization limits for each metric
such as:
"winsorization_lower": {"metric_1": 0.2},
"winsorization_upper": {"metric_2": 0.1}
which will winsorize 20% from below for metric_1 and 10% from above from metric_2.
Additional metrics won't be winsorized. It is also possible to specify the same
winsorization limits for all metrics, e.g.,
"winsorization_lower": None,
"winsorization_upper": 0.2
which will winsorize 20% from above for all metrics.
Additionally, you can pass in percentile_bounds that specify the largest/smallest
possible values for the percentiles. This is useful for MOO where we want to make
sure winsorization doesn't move values to the other side of the reference point.
"""
def __init__(
self,
search_space: SearchSpace,
observation_features: List[ObservationFeatures],
observation_data: List[ObservationData],
config: Optional[TConfig] = None,
) -> None:
if len(observation_data) == 0:
raise ValueError("Winsorize transform requires non-empty observation data.")
# If winsorization limits are missing or either one of them is None,
# we can just replace that limit(s) with 0.0, as in those cases the
# percentile will just interpret them as 0-th or 100-th percentile,
# leaving the data unclipped.
wins_l = 0.0
if config is not None and "winsorization_lower" in config:
wins_l = config.get("winsorization_lower") or 0.0
wins_u = 0.0
if config is not None and "winsorization_upper" in config:
wins_u = config.get("winsorization_upper") or 0.0
pct_bounds = {}
if config is not None and "percentile_bounds" in config:
pct_bounds = checked_cast(dict, config.get("percentile_bounds") or {})
metric_values = get_data(observation_data=observation_data)
self.percentiles = {}
for metric_name, vals in metric_values.items():
lower = (
wins_l.get(metric_name) or 0.0 if isinstance(wins_l, dict) else wins_l
)
upper = (
wins_u.get(metric_name) or 0.0 if isinstance(wins_u, dict) else wins_u
)
if lower >= 1 - upper:
raise ValueError( # pragma: no cover
f"Lower bound: {lower} was greater than the inverse of the upper "
f"bound: {1 - upper} for metric {metric_name}. Decrease one or "
f"both of your winsorization_limits: {(lower, upper)}."
)
pct_l = np.percentile(vals, lower * 100, interpolation="lower")
pct_u = np.percentile(vals, (1 - upper) * 100, interpolation="higher")
if metric_name in pct_bounds:
# Update the percentiles if percentile_bounds are specified
metric_bnds = pct_bounds.get(metric_name)
if len(metric_bnds) != 2:
raise ValueError( # pragma: no cover
f"Expected percentile_bounds for metric {metric_name} to be "
f"of the form (l, u), got {metric_bnds}."
)
bnd_l, bnd_u = metric_bnds
pct_l = min(pct_l, bnd_l if bnd_l is not None else float("inf"))
pct_u = max(pct_u, bnd_u if bnd_u is not None else -float("inf"))
self.percentiles[metric_name] = (pct_l, pct_u)
def transform_observation_data(
self,
observation_data: List[ObservationData],
observation_features: List[ObservationFeatures],
) -> List[ObservationData]:
"""Winsorize observation data in place."""
for obsd in observation_data:
for idx, metric_name in enumerate(obsd.metric_names):
if metric_name not in self.percentiles: # pragma: no cover
raise ValueError(f"Cannot winsorize unknown metric {metric_name}")
# Clip on the winsorization bounds.
obsd.means[idx] = max(obsd.means[idx], self.percentiles[metric_name][0])
obsd.means[idx] = min(obsd.means[idx], self.percentiles[metric_name][1])
return observation_data
| [
"numpy.percentile",
"ax.utils.common.logger.get_logger",
"ax.modelbridge.transforms.utils.get_data"
] | [((617, 637), 'ax.utils.common.logger.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (627, 637), False, 'from ax.utils.common.logger import get_logger\n'), ((2683, 2726), 'ax.modelbridge.transforms.utils.get_data', 'get_data', ([], {'observation_data': 'observation_data'}), '(observation_data=observation_data)\n', (2691, 2726), False, 'from ax.modelbridge.transforms.utils import get_data\n'), ((3437, 3492), 'numpy.percentile', 'np.percentile', (['vals', '(lower * 100)'], {'interpolation': '"""lower"""'}), "(vals, lower * 100, interpolation='lower')\n", (3450, 3492), True, 'import numpy as np\n'), ((3513, 3575), 'numpy.percentile', 'np.percentile', (['vals', '((1 - upper) * 100)'], {'interpolation': '"""higher"""'}), "(vals, (1 - upper) * 100, interpolation='higher')\n", (3526, 3575), True, 'import numpy as np\n')] |
import logging
from sacred import Experiment
import numpy as np
from tracking import database_utils as db_utils
from tracking import misc
ex = Experiment()
misc.setup_logger(ex)
@ex.config
def config():
overwrite = None
db_collection = None
if db_collection is not None:
ex.observers.append(db_utils.create_mongodb_observer(db_collection, overwrite=overwrite))
@ex.automain
def run(dataset, hidden_sizes, learning_rate, reg_scale, keep_prob, max_epochs, patience, display_step):
logging.info('Received the following configuration:')
logging.info(f'Dataset: {dataset}, hidden sizes: {hidden_sizes}, learning_rate: {learning_rate},'
f'reg_scale: {reg_scale}, keep_prob:{keep_prob}, max_epochs: {max_epochs}, patience:{patience}'
f'display_step: {display_step}')
# do your processing here
results = {
'test_acc': 2*np.random.randn() + 1,
'test_loss': np.random.uniform(0,10),
# ...
}
# the returned result will be written into the database
return results
| [
"tracking.database_utils.create_mongodb_observer",
"sacred.Experiment",
"numpy.random.randn",
"tracking.misc.setup_logger",
"numpy.random.uniform",
"logging.info"
] | [((145, 157), 'sacred.Experiment', 'Experiment', ([], {}), '()\n', (155, 157), False, 'from sacred import Experiment\n'), ((158, 179), 'tracking.misc.setup_logger', 'misc.setup_logger', (['ex'], {}), '(ex)\n', (175, 179), False, 'from tracking import misc\n'), ((510, 563), 'logging.info', 'logging.info', (['"""Received the following configuration:"""'], {}), "('Received the following configuration:')\n", (522, 563), False, 'import logging\n'), ((568, 796), 'logging.info', 'logging.info', (['f"""Dataset: {dataset}, hidden sizes: {hidden_sizes}, learning_rate: {learning_rate},reg_scale: {reg_scale}, keep_prob:{keep_prob}, max_epochs: {max_epochs}, patience:{patience}display_step: {display_step}"""'], {}), "(\n f'Dataset: {dataset}, hidden sizes: {hidden_sizes}, learning_rate: {learning_rate},reg_scale: {reg_scale}, keep_prob:{keep_prob}, max_epochs: {max_epochs}, patience:{patience}display_step: {display_step}'\n )\n", (580, 796), False, 'import logging\n'), ((944, 968), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(10)'], {}), '(0, 10)\n', (961, 968), True, 'import numpy as np\n'), ((315, 383), 'tracking.database_utils.create_mongodb_observer', 'db_utils.create_mongodb_observer', (['db_collection'], {'overwrite': 'overwrite'}), '(db_collection, overwrite=overwrite)\n', (347, 383), True, 'from tracking import database_utils as db_utils\n'), ((900, 917), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (915, 917), True, 'import numpy as np\n')] |
# CODING-STYLE CHECKS:
# pycodestyle test_decorators.py
import os
import sys
import pytest
import importlib
import numpy as np
from pandas import DataFrame
from pandas.util.testing import assert_frame_equal
import taxcalc
from taxcalc.decorators import *
def test_create_apply_function_string():
ans = create_apply_function_string(['a', 'b', 'c'], ['d', 'e'], [])
exp = ("def ap_func(x_0,x_1,x_2,x_3,x_4):\n"
" for i in range(len(x_0)):\n"
" x_0[i],x_1[i],x_2[i] = jitted_f(x_3[i],x_4[i])\n"
" return x_0,x_1,x_2\n")
assert ans == exp
def test_create_apply_function_string_with_params():
ans = create_apply_function_string(['a', 'b', 'c'], ['d', 'e'], ['d'])
exp = ("def ap_func(x_0,x_1,x_2,x_3,x_4):\n"
" for i in range(len(x_0)):\n"
" x_0[i],x_1[i],x_2[i] = jitted_f(x_3,x_4[i])\n"
" return x_0,x_1,x_2\n")
assert ans == exp
def test_create_toplevel_function_string_mult_outputs():
ans = create_toplevel_function_string(['a', 'b'], ['d', 'e'],
['pm', 'pm', 'pf', 'pm'])
exp = ''
exp = ("def hl_func(pm, pf):\n"
" from pandas import DataFrame\n"
" import numpy as np\n"
" import pandas as pd\n"
" def get_values(x):\n"
" if isinstance(x, pd.Series):\n"
" return x.values\n"
" else:\n"
" return x\n"
" outputs = \\\n"
" (pm.a, pm.b) = \\\n"
" applied_f(get_values(pm.a), get_values(pm.b), "
"get_values(pf.d), get_values(pm.e), )\n"
" header = ['a', 'b']\n"
" return DataFrame(data=np.column_stack(outputs),"
"columns=header)")
assert ans == exp
def test_create_toplevel_function_string():
ans = create_toplevel_function_string(['a'], ['d', 'e'],
['pm', 'pf', 'pm'])
exp = ''
exp = ("def hl_func(pm, pf):\n"
" from pandas import DataFrame\n"
" import numpy as np\n"
" import pandas as pd\n"
" def get_values(x):\n"
" if isinstance(x, pd.Series):\n"
" return x.values\n"
" else:\n"
" return x\n"
" outputs = \\\n"
" (pm.a) = \\\n"
" applied_f(get_values(pm.a), get_values(pf.d), "
"get_values(pm.e), )\n"
" header = ['a']\n"
" return DataFrame(data=outputs,"
"columns=header)")
assert ans == exp
def some_calc(x, y, z):
a = x + y
b = x + y + z
return (a, b)
def test_make_apply_function():
ans_do_jit = make_apply_function(some_calc, ['a', 'b'], ['x', 'y', 'z'],
[], do_jit=True, no_python=True)
assert ans_do_jit
ans_no_jit = make_apply_function(some_calc, ['a', 'b'], ['x', 'y', 'z'],
[], do_jit=False, no_python=True)
assert ans_no_jit
@apply_jit(["a", "b"], ["x", "y", "z"], nopython=True)
def Magic_calc(x, y, z):
a = x + y
b = x + y + z
return (a, b)
def Magic(pm, pf):
# Adjustments
outputs = pf.a, pf.b = Magic_calc(pm, pf)
header = ['a', 'b']
return DataFrame(data=np.column_stack(outputs), columns=header)
@iterate_jit(nopython=True)
def Magic_calc2(x, y, z):
a = x + y
b = x + y + z
return (a, b)
class Foo(object):
pass
@iterate_jit(nopython=True)
def faux_function(MARS):
if MARS == 1:
var = 2
else:
var = 1
return var
@iterate_jit(nopython=True)
def ret_everything(a, b, c, d, e, f):
c = a + b
d = a + b
e = a + b
f = a + b
return (c, d, e,
f)
def test_magic_apply_jit():
pm = Foo()
pf = Foo()
pm.a = np.ones((5,))
pm.b = np.ones((5,))
pf.x = np.ones((5,))
pf.y = np.ones((5,))
pf.z = np.ones((5,))
xx = Magic(pm, pf)
exp = DataFrame(data=[[2.0, 3.0]] * 5, columns=["a", "b"])
assert_frame_equal(xx, exp)
def test_magic_apply_jit_swap():
pm = Foo()
pf = Foo()
pm.a = np.ones((5,))
pm.b = np.ones((5,))
pf.x = np.ones((5,))
pf.y = np.ones((5,))
pf.z = np.ones((5,))
xx = Magic(pf, pm)
exp = DataFrame(data=[[2.0, 3.0]] * 5, columns=["a", "b"])
assert_frame_equal(xx, exp)
def test_magic_iterate_jit():
pm = Foo()
pf = Foo()
pm.a = np.ones((5,))
pm.b = np.ones((5,))
pf.x = np.ones((5,))
pf.y = np.ones((5,))
pf.z = np.ones((5,))
xx = Magic_calc2(pm, pf)
exp = DataFrame(data=[[2.0, 3.0]] * 5, columns=["a", "b"])
assert_frame_equal(xx, exp)
def test_faux_function_iterate_jit():
pm = Foo()
pf = Foo()
pf.MARS = np.ones((5,))
pf.var = np.ones((5,))
ans = faux_function(pm, pf)
exp = DataFrame(data=[2.0] * 5, columns=['var'])
assert_frame_equal(ans, exp)
def test_ret_everything_iterate_jit():
pm = Foo()
pf = Foo()
pf.a = np.ones((5,))
pf.b = np.ones((5,))
pf.c = np.ones((5,))
pf.d = np.ones((5,))
pf.e = np.ones((5,))
pf.f = np.ones((5,))
ans = ret_everything(pm, pf)
exp = DataFrame(data=[[2.0, 2.0, 2.0, 2.0]] * 5,
columns=["c", "d", "e", "f"])
assert_frame_equal(ans, exp)
@iterate_jit(nopython=True)
def Magic_calc3(x, y, z):
a = x + y
b = a + z
return (a, b)
def test_function_takes_kwarg():
pm = Foo()
pf = Foo()
pm.a = np.ones((5,))
pm.b = np.ones((5,))
pf.x = np.ones((5,))
pf.y = np.ones((5,))
pf.z = np.ones((5,))
ans = Magic_calc3(pm, pf)
exp = DataFrame(data=[[2.0, 3.0]] * 5,
columns=["a", "b"])
assert_frame_equal(ans, exp)
@iterate_jit(nopython=True)
def Magic_calc4(x, y, z):
a = x + y
b = a + z
return (a, b)
def test_function_no_parameters_listed():
pm = Foo()
pf = Foo()
pm.a = np.ones((5,))
pm.b = np.ones((5,))
pf.x = np.ones((5,))
pf.y = np.ones((5,))
pf.z = np.ones((5,))
ans = Magic_calc4(pm, pf)
exp = DataFrame(data=[[2.0, 3.0]] * 5,
columns=["a", "b"])
assert_frame_equal(ans, exp)
@iterate_jit(parameters=['w'], nopython=True)
def Magic_calc5(w, x, y, z):
a = x + y
b = w[0] + x + y + z
return (a, b)
def test_function_parameters_optional():
pm = Foo()
pf = Foo()
pm.a = np.ones((5,))
pm.b = np.ones((5,))
pm.w = np.ones((5,))
pf.x = np.ones((5,))
pf.y = np.ones((5,))
pf.z = np.ones((5,))
ans = Magic_calc5(pm, pf)
exp = DataFrame(data=[[2.0, 4.0]] * 5,
columns=["a", "b"])
assert_frame_equal(ans, exp)
def unjittable_function1(w, x, y, z):
a = x + y
b = w[0] + x + y + z
def unjittable_function2(w, x, y, z):
a = x + y
b = w[0] + x + y + z
return (a, b, c)
def test_iterate_jit_raises_on_no_return():
with pytest.raises(ValueError):
ij = iterate_jit(parameters=['w'], nopython=True)
ij(unjittable_function1)
def test_iterate_jit_raises_on_unknown_return_argument():
ij = iterate_jit(parameters=['w'], nopython=True)
uf2 = ij(unjittable_function2)
pm = Foo()
pf = Foo()
pm.a = np.ones((5,))
pm.b = np.ones((5,))
pm.w = np.ones((5,))
pf.x = np.ones((5,))
pf.y = np.ones((5,))
pf.z = np.ones((5,))
with pytest.raises(AttributeError):
ans = uf2(pm, pf)
def Magic_calc6(w, x, y, z):
a = x + y
b = w[0] + x + y + z
return (a, b)
def test_force_no_jit():
"""
Force execution of code for "DO_JIT = False", which tests the
id_wrapper function in the decorators.py file.
"""
# set environment variable that turns off JIT decorator logic
os.environ['NOTAXCALCJIT'] = 'NOJIT'
# reload the decorators module
importlib.reload(taxcalc.decorators)
# verify Magic_calc6 function works as expected
Magic_calc6_ = iterate_jit(parameters=['w'], nopython=True)(Magic_calc6)
pm = Foo()
pf = Foo()
pm.a = np.ones((5,))
pm.b = np.ones((5,))
pm.w = np.ones((5,))
pf.x = np.ones((5,))
pf.y = np.ones((5,))
pf.z = np.ones((5,))
ans = Magic_calc6_(pm, pf)
exp = DataFrame(data=[[2.0, 4.0]] * 5,
columns=["a", "b"])
assert_frame_equal(ans, exp)
# restore normal JIT operation of decorators module
del os.environ['NOTAXCALCJIT']
importlib.reload(taxcalc.decorators)
| [
"numpy.ones",
"numpy.column_stack",
"pytest.raises",
"importlib.reload",
"pandas.DataFrame",
"pandas.util.testing.assert_frame_equal"
] | [((3950, 3963), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (3957, 3963), True, 'import numpy as np\n'), ((3975, 3988), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (3982, 3988), True, 'import numpy as np\n'), ((4000, 4013), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (4007, 4013), True, 'import numpy as np\n'), ((4025, 4038), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (4032, 4038), True, 'import numpy as np\n'), ((4050, 4063), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (4057, 4063), True, 'import numpy as np\n'), ((4097, 4149), 'pandas.DataFrame', 'DataFrame', ([], {'data': '([[2.0, 3.0]] * 5)', 'columns': "['a', 'b']"}), "(data=[[2.0, 3.0]] * 5, columns=['a', 'b'])\n", (4106, 4149), False, 'from pandas import DataFrame\n'), ((4154, 4181), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['xx', 'exp'], {}), '(xx, exp)\n', (4172, 4181), False, 'from pandas.util.testing import assert_frame_equal\n'), ((4258, 4271), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (4265, 4271), True, 'import numpy as np\n'), ((4283, 4296), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (4290, 4296), True, 'import numpy as np\n'), ((4308, 4321), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (4315, 4321), True, 'import numpy as np\n'), ((4333, 4346), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (4340, 4346), True, 'import numpy as np\n'), ((4358, 4371), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (4365, 4371), True, 'import numpy as np\n'), ((4405, 4457), 'pandas.DataFrame', 'DataFrame', ([], {'data': '([[2.0, 3.0]] * 5)', 'columns': "['a', 'b']"}), "(data=[[2.0, 3.0]] * 5, columns=['a', 'b'])\n", (4414, 4457), False, 'from pandas import DataFrame\n'), ((4462, 4489), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['xx', 'exp'], {}), '(xx, exp)\n', (4480, 4489), False, 'from pandas.util.testing import assert_frame_equal\n'), ((4563, 4576), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (4570, 4576), True, 'import numpy as np\n'), ((4588, 4601), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (4595, 4601), True, 'import numpy as np\n'), ((4613, 4626), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (4620, 4626), True, 'import numpy as np\n'), ((4638, 4651), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (4645, 4651), True, 'import numpy as np\n'), ((4663, 4676), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (4670, 4676), True, 'import numpy as np\n'), ((4716, 4768), 'pandas.DataFrame', 'DataFrame', ([], {'data': '([[2.0, 3.0]] * 5)', 'columns': "['a', 'b']"}), "(data=[[2.0, 3.0]] * 5, columns=['a', 'b'])\n", (4725, 4768), False, 'from pandas import DataFrame\n'), ((4773, 4800), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['xx', 'exp'], {}), '(xx, exp)\n', (4791, 4800), False, 'from pandas.util.testing import assert_frame_equal\n'), ((4885, 4898), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (4892, 4898), True, 'import numpy as np\n'), ((4912, 4925), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (4919, 4925), True, 'import numpy as np\n'), ((4968, 5010), 'pandas.DataFrame', 'DataFrame', ([], {'data': '([2.0] * 5)', 'columns': "['var']"}), "(data=[2.0] * 5, columns=['var'])\n", (4977, 5010), False, 'from pandas import DataFrame\n'), ((5015, 5043), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['ans', 'exp'], {}), '(ans, exp)\n', (5033, 5043), False, 'from pandas.util.testing import assert_frame_equal\n'), ((5126, 5139), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (5133, 5139), True, 'import numpy as np\n'), ((5151, 5164), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (5158, 5164), True, 'import numpy as np\n'), ((5176, 5189), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (5183, 5189), True, 'import numpy as np\n'), ((5201, 5214), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (5208, 5214), True, 'import numpy as np\n'), ((5226, 5239), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (5233, 5239), True, 'import numpy as np\n'), ((5251, 5264), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (5258, 5264), True, 'import numpy as np\n'), ((5308, 5380), 'pandas.DataFrame', 'DataFrame', ([], {'data': '([[2.0, 2.0, 2.0, 2.0]] * 5)', 'columns': "['c', 'd', 'e', 'f']"}), "(data=[[2.0, 2.0, 2.0, 2.0]] * 5, columns=['c', 'd', 'e', 'f'])\n", (5317, 5380), False, 'from pandas import DataFrame\n'), ((5405, 5433), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['ans', 'exp'], {}), '(ans, exp)\n', (5423, 5433), False, 'from pandas.util.testing import assert_frame_equal\n'), ((5612, 5625), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (5619, 5625), True, 'import numpy as np\n'), ((5637, 5650), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (5644, 5650), True, 'import numpy as np\n'), ((5662, 5675), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (5669, 5675), True, 'import numpy as np\n'), ((5687, 5700), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (5694, 5700), True, 'import numpy as np\n'), ((5712, 5725), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (5719, 5725), True, 'import numpy as np\n'), ((5766, 5818), 'pandas.DataFrame', 'DataFrame', ([], {'data': '([[2.0, 3.0]] * 5)', 'columns': "['a', 'b']"}), "(data=[[2.0, 3.0]] * 5, columns=['a', 'b'])\n", (5775, 5818), False, 'from pandas import DataFrame\n'), ((5843, 5871), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['ans', 'exp'], {}), '(ans, exp)\n', (5861, 5871), False, 'from pandas.util.testing import assert_frame_equal\n'), ((6059, 6072), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (6066, 6072), True, 'import numpy as np\n'), ((6084, 6097), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (6091, 6097), True, 'import numpy as np\n'), ((6109, 6122), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (6116, 6122), True, 'import numpy as np\n'), ((6134, 6147), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (6141, 6147), True, 'import numpy as np\n'), ((6159, 6172), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (6166, 6172), True, 'import numpy as np\n'), ((6213, 6265), 'pandas.DataFrame', 'DataFrame', ([], {'data': '([[2.0, 3.0]] * 5)', 'columns': "['a', 'b']"}), "(data=[[2.0, 3.0]] * 5, columns=['a', 'b'])\n", (6222, 6265), False, 'from pandas import DataFrame\n'), ((6290, 6318), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['ans', 'exp'], {}), '(ans, exp)\n', (6308, 6318), False, 'from pandas.util.testing import assert_frame_equal\n'), ((6537, 6550), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (6544, 6550), True, 'import numpy as np\n'), ((6562, 6575), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (6569, 6575), True, 'import numpy as np\n'), ((6587, 6600), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (6594, 6600), True, 'import numpy as np\n'), ((6612, 6625), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (6619, 6625), True, 'import numpy as np\n'), ((6637, 6650), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (6644, 6650), True, 'import numpy as np\n'), ((6662, 6675), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (6669, 6675), True, 'import numpy as np\n'), ((6716, 6768), 'pandas.DataFrame', 'DataFrame', ([], {'data': '([[2.0, 4.0]] * 5)', 'columns': "['a', 'b']"}), "(data=[[2.0, 4.0]] * 5, columns=['a', 'b'])\n", (6725, 6768), False, 'from pandas import DataFrame\n'), ((6793, 6821), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['ans', 'exp'], {}), '(ans, exp)\n', (6811, 6821), False, 'from pandas.util.testing import assert_frame_equal\n'), ((7364, 7377), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (7371, 7377), True, 'import numpy as np\n'), ((7389, 7402), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (7396, 7402), True, 'import numpy as np\n'), ((7414, 7427), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (7421, 7427), True, 'import numpy as np\n'), ((7439, 7452), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (7446, 7452), True, 'import numpy as np\n'), ((7464, 7477), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (7471, 7477), True, 'import numpy as np\n'), ((7489, 7502), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (7496, 7502), True, 'import numpy as np\n'), ((7963, 7999), 'importlib.reload', 'importlib.reload', (['taxcalc.decorators'], {}), '(taxcalc.decorators)\n', (7979, 7999), False, 'import importlib\n'), ((8170, 8183), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (8177, 8183), True, 'import numpy as np\n'), ((8195, 8208), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (8202, 8208), True, 'import numpy as np\n'), ((8220, 8233), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (8227, 8233), True, 'import numpy as np\n'), ((8245, 8258), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (8252, 8258), True, 'import numpy as np\n'), ((8270, 8283), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (8277, 8283), True, 'import numpy as np\n'), ((8295, 8308), 'numpy.ones', 'np.ones', (['(5,)'], {}), '((5,))\n', (8302, 8308), True, 'import numpy as np\n'), ((8350, 8402), 'pandas.DataFrame', 'DataFrame', ([], {'data': '([[2.0, 4.0]] * 5)', 'columns': "['a', 'b']"}), "(data=[[2.0, 4.0]] * 5, columns=['a', 'b'])\n", (8359, 8402), False, 'from pandas import DataFrame\n'), ((8427, 8455), 'pandas.util.testing.assert_frame_equal', 'assert_frame_equal', (['ans', 'exp'], {}), '(ans, exp)\n', (8445, 8455), False, 'from pandas.util.testing import assert_frame_equal\n'), ((8551, 8587), 'importlib.reload', 'importlib.reload', (['taxcalc.decorators'], {}), '(taxcalc.decorators)\n', (8567, 8587), False, 'import importlib\n'), ((7056, 7081), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7069, 7081), False, 'import pytest\n'), ((7512, 7541), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (7525, 7541), False, 'import pytest\n'), ((3409, 3433), 'numpy.column_stack', 'np.column_stack', (['outputs'], {}), '(outputs)\n', (3424, 3433), True, 'import numpy as np\n')] |
#encoding: utf-8
import numpy as np
import tensorflow as tf
from timeit import default_timer as timer
from keras import backend as K
from PedestrianDetectionFunc.PedestrianDetectionModel import PedestrianModel
class Pedestrian(object):
def __init__(self):
K.clear_session()
self.GPU_config = tf.ConfigProto()
self.GPU_config.gpu_options.per_process_gpu_memory_fraction = 0.10
def restore(self, model_path='./PedestrianDetectionFunc/Data/model.h5'):
self.graph = tf.Graph()
self.sess = tf.Session(config=self.GPU_config, graph=self.graph)
with self.sess.as_default():
with self.graph.as_default():
self.PedestrianModel = PedestrianModel()
self.PedestrianModel.load_model(model_path)
self.PedestrianModel.generate()
def MakePedestrianDetectionFunc(self, mImage):
image = self.PedestrianModel.Preprocess(mImage)
w, h = mImage.size[1], mImage.size[0]
start = timer()
out_boxes, out_scores, out_classes = self.sess.run(
[self.PedestrianModel.boxes, self.PedestrianModel.scores, self.PedestrianModel.classes],
feed_dict={
self.PedestrianModel.model.input: image,
self.PedestrianModel.input_image_shape: [w, h],
})
end = timer()
t = end - start
out_boxes = out_boxes[out_classes == 0] # Pedestrian id of coco is 0
out_scores = out_scores[out_classes == 0]
out_classes = out_classes[out_classes[:] == 0]
ResponseStr = {'result': 'success'}
ResponseStr["box"] = []
for i, c in reversed(list(enumerate(out_classes))):
predicted_class = self.PedestrianModel.class_names[c]
box = out_boxes[i]
score = out_scores[i]
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(mImage.size[1], np.floor(bottom + 0.5).astype('int32'))
right = min(mImage.size[0], np.floor(right + 0.5).astype('int32'))
res_box = "{} {:.2f} {} {} {} {}".format(predicted_class, score, left, top, right, bottom)
ResponseStr["box"].append(res_box)
ResponseStr["time"] = "{:.2f}ms".format(t*100)
return 1, ResponseStr
def GetPedestrianRes(mImage, Pedestrianclass):
ResFlag, ResStr = Pedestrianclass.MakePedestrianDetectionFunc(mImage)
return ResFlag, ResStr
| [
"tensorflow.Graph",
"PedestrianDetectionFunc.PedestrianDetectionModel.PedestrianModel",
"timeit.default_timer",
"tensorflow.Session",
"numpy.floor",
"keras.backend.clear_session",
"tensorflow.ConfigProto"
] | [((269, 286), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (284, 286), True, 'from keras import backend as K\n'), ((313, 329), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (327, 329), True, 'import tensorflow as tf\n'), ((504, 514), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (512, 514), True, 'import tensorflow as tf\n'), ((535, 587), 'tensorflow.Session', 'tf.Session', ([], {'config': 'self.GPU_config', 'graph': 'self.graph'}), '(config=self.GPU_config, graph=self.graph)\n', (545, 587), True, 'import tensorflow as tf\n'), ((1002, 1009), 'timeit.default_timer', 'timer', ([], {}), '()\n', (1007, 1009), True, 'from timeit import default_timer as timer\n'), ((1345, 1352), 'timeit.default_timer', 'timer', ([], {}), '()\n', (1350, 1352), True, 'from timeit import default_timer as timer\n'), ((706, 723), 'PedestrianDetectionFunc.PedestrianDetectionModel.PedestrianModel', 'PedestrianModel', ([], {}), '()\n', (721, 723), False, 'from PedestrianDetectionFunc.PedestrianDetectionModel import PedestrianModel\n'), ((1897, 1916), 'numpy.floor', 'np.floor', (['(top + 0.5)'], {}), '(top + 0.5)\n', (1905, 1916), True, 'import numpy as np\n'), ((1960, 1980), 'numpy.floor', 'np.floor', (['(left + 0.5)'], {}), '(left + 0.5)\n', (1968, 1980), True, 'import numpy as np\n'), ((2039, 2061), 'numpy.floor', 'np.floor', (['(bottom + 0.5)'], {}), '(bottom + 0.5)\n', (2047, 2061), True, 'import numpy as np\n'), ((2119, 2140), 'numpy.floor', 'np.floor', (['(right + 0.5)'], {}), '(right + 0.5)\n', (2127, 2140), True, 'import numpy as np\n')] |
import os
#os.environ['CUDA_VISIBLE_DEVICES'] = '-1' #Comment to Enable GPU, disable CUDA cores
import numpy as np
from numpy.random import seed #fix random seed for reproducibility (numpy)
seed(1)
from tensorflow import set_random_seed # fix random seed for reproducibility (tensorflow backend)
set_random_seed(2)
from numpy import array
from random import randint
from sklearn.preprocessing import MinMaxScaler
import scipy.io as sio
from numpy import genfromtxt
import matplotlib.pyplot as plt
import keras
from keras import initializers
from keras import layers
from keras.layers import *
from keras.utils import *
from keras.models import *
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint
from keras.callbacks import TensorBoard
#### Load Data1 ####
npzfile = np.load('training_data.npz')
npzfile.files
x_train = npzfile['x_train']
y_train = npzfile['y_train']
x_test = npzfile['x_test']
y_test = npzfile['y_test']
## Scale 0-255 bands range into float 0-1
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 100
x_test /= 100
x_train = x_train.reshape(840, 32, 32, 288)
# y_train = y_train.reshape(840, 6, 1, 1, 1)
x_test = x_test.reshape(120, 32, 32, 288)
# y_test = y_test.reshape(120, 6, 1, 1, 1)
print ('x_train shape:', x_train.shape)
print ('x_test shape:', x_test.shape)
print ('y_train shape:', y_train.shape)
print ('y_test shape:', y_test.shape)
#### Hyperparameters ####
batch_size = 3
num_classes = 6
epochs = 1000
#### CNN structure (Functional API Model Style) ####
## Uncomment initializer to be used
#initializer = keras.initializers.Ones()
#initializer = keras.initializers.RandomNormal(mean=0.0, stddev=0.005, seed=True)
#initializer = keras.initializers.RandomUniform(minval=-0.05, maxval=0.05, seed=seed)
#initializer = keras.initializers.TruncatedNormal(mean=0.0, stddev=0.05, seed=None)
#initializer = keras.initializers.VarianceScaling(scale=1.0, mode='fan_in', distribution='normal', seed=None)
initializer = keras.initializers.Orthogonal(gain=2, seed=True)
#initializer = keras.initializers.lecun_uniform(seed=True)
#initializer = keras.initializers.glorot_normal(seed=None)
#initializer = keras.initializers.glorot_uniform(seed=None)
#initializer = keras.initializers.he_normal(seed=True)
#initializer = keras.initializers.lecun_normal(seed=None)
#initializer = keras.initializers.he_uniform(seed=None)
input1 = Input(shape=(32,32,288))
spatial = Conv2D(128, (1, 1), padding='same', activation='relu')(input1)
spatial= Conv2D(128, (3, 3), padding='same', activation='relu')(spatial)
spectral = MaxPooling2D((3, 3), strides=(1, 1), padding='same')(input1)
spectral = Conv2D(64, (1, 1), padding='same', activation='relu')(spectral)
concat = concatenate([spatial, spectral], axis=3)
l1 = Conv2D(128, kernel_size=(1, 1), padding='same', activation='relu')(concat)
l1bn=BatchNormalization()(l1)
l2 = Conv2D(128, kernel_size=(1, 1), padding='same', activation='relu')(l1bn)
l2bn=BatchNormalization()(l2)
l3 = Conv2D(128, kernel_size=(1, 1), padding='same', activation='relu')(l2bn)
add1 = add([l1bn, l3])
l4 = Conv2D(128, kernel_size=(1, 1), padding='same',activation='relu')(add1)
l5 = Conv2D(128, kernel_size=(1, 1), padding='same',activation='relu')(l4)
add2 = add([l4, l5])
l7 = Conv2D(128, kernel_size=(1, 1), activation='relu')(add2)
drop1 = SpatialDropout2D(0.2)(l7)
l8 = Conv2D(128, kernel_size=(1, 1), activation='relu')(drop1)
drop2 = SpatialDropout2D(0.2)(l8)
l9 = Conv2D(128, kernel_size=(1, 1), activation='relu')(drop2)
flat = Flatten()(l9)
output = Dense(6, activation='softmax')(flat)
model = Model(inputs=input1, outputs=output)
## initiate RMSprop optimizer
opt0 = keras.optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=None, decay=5e-4)
opt1 = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0, amsgrad=False)
opt2 = keras.optimizers.SGD(lr=0.001, momentum=0.9, decay=0.0005, nesterov=False)
## Let's train the model using RMSprop
model.compile(loss='categorical_crossentropy',
optimizer=opt2,
metrics=['accuracy'])
# checkpoint
filepath="logs/weights-improvement-{epoch:02d}-{val_acc:.2f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
#TensorBoard
tensorflow = keras.callbacks.TensorBoard(log_dir='logs', histogram_freq=0, batch_size=32, write_graph=True, update_freq='epoch')
callbacks_list = [checkpoint, tensorflow]
#np.random.seed(seed)
cnn = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test),
verbose=1,
callbacks=callbacks_list,
shuffle=True)
#### Save model ####
model.save_weights('H_K_2016_trained_model_weights.h5')
# model_path = os.path.join(save_dir, model_name)
# model.save(model_path)
# print('Saved trained model at %s ' % model_path)
#### Model testing ####
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1]) | [
"keras.optimizers.Adam",
"keras.callbacks.ModelCheckpoint",
"keras.callbacks.TensorBoard",
"keras.optimizers.SGD",
"numpy.random.seed",
"keras.optimizers.RMSprop",
"tensorflow.set_random_seed",
"keras.initializers.Orthogonal",
"numpy.load"
] | [((195, 202), 'numpy.random.seed', 'seed', (['(1)'], {}), '(1)\n', (199, 202), False, 'from numpy.random import seed\n'), ((303, 321), 'tensorflow.set_random_seed', 'set_random_seed', (['(2)'], {}), '(2)\n', (318, 321), False, 'from tensorflow import set_random_seed\n'), ((846, 874), 'numpy.load', 'np.load', (['"""training_data.npz"""'], {}), "('training_data.npz')\n", (853, 874), True, 'import numpy as np\n'), ((2090, 2138), 'keras.initializers.Orthogonal', 'keras.initializers.Orthogonal', ([], {'gain': '(2)', 'seed': '(True)'}), '(gain=2, seed=True)\n', (2119, 2138), False, 'import keras\n'), ((3815, 3886), 'keras.optimizers.RMSprop', 'keras.optimizers.RMSprop', ([], {'lr': '(0.001)', 'rho': '(0.9)', 'epsilon': 'None', 'decay': '(0.0005)'}), '(lr=0.001, rho=0.9, epsilon=None, decay=0.0005)\n', (3839, 3886), False, 'import keras\n'), ((3895, 3994), 'keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'lr': '(0.001)', 'beta_1': '(0.9)', 'beta_2': '(0.999)', 'epsilon': 'None', 'decay': '(0)', 'amsgrad': '(False)'}), '(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None,\n decay=0, amsgrad=False)\n', (3916, 3994), False, 'import keras\n'), ((4001, 4075), 'keras.optimizers.SGD', 'keras.optimizers.SGD', ([], {'lr': '(0.001)', 'momentum': '(0.9)', 'decay': '(0.0005)', 'nesterov': '(False)'}), '(lr=0.001, momentum=0.9, decay=0.0005, nesterov=False)\n', (4021, 4075), False, 'import keras\n'), ((4368, 4460), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['filepath'], {'monitor': '"""val_acc"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""max"""'}), "(filepath, monitor='val_acc', verbose=1, save_best_only=True,\n mode='max')\n", (4383, 4460), False, 'from keras.callbacks import ModelCheckpoint\n'), ((4487, 4606), 'keras.callbacks.TensorBoard', 'keras.callbacks.TensorBoard', ([], {'log_dir': '"""logs"""', 'histogram_freq': '(0)', 'batch_size': '(32)', 'write_graph': '(True)', 'update_freq': '"""epoch"""'}), "(log_dir='logs', histogram_freq=0, batch_size=32,\n write_graph=True, update_freq='epoch')\n", (4514, 4606), False, 'import keras\n')] |
import numpy as np
from computeCost import compute_cost
def gradient_descent(X, y, theta, alpha, num_iters):
# Initialize some useful values, can also deal with multi theta(>2)
m = len(X)
theta_len = len(theta)
J_history = np.zeros(num_iters)
for i in range(0, num_iters):
# ===================== Your Code Here =====================
# Instructions : Perform a single gradient step on the parameter vector theta
#
# Hint: X.shape = (97, 2), y.shape = (97, ), theta.shape = (2, )
inner = np.array(X).dot(theta) - y
for j in range(theta_len):
theta[j, 0] = theta[j, 0] - (alpha / m * (np.sum(inner.multiply(np.array(X.iloc[:, j:j+1])))))['Price']
# ===========================================================
# Save the cost every iteration
J_history[i] = compute_cost(X, y, theta)
return theta, J_history
| [
"numpy.array",
"numpy.zeros",
"computeCost.compute_cost"
] | [((241, 260), 'numpy.zeros', 'np.zeros', (['num_iters'], {}), '(num_iters)\n', (249, 260), True, 'import numpy as np\n'), ((861, 886), 'computeCost.compute_cost', 'compute_cost', (['X', 'y', 'theta'], {}), '(X, y, theta)\n', (873, 886), False, 'from computeCost import compute_cost\n'), ((550, 561), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (558, 561), True, 'import numpy as np\n'), ((688, 716), 'numpy.array', 'np.array', (['X.iloc[:, j:j + 1]'], {}), '(X.iloc[:, j:j + 1])\n', (696, 716), True, 'import numpy as np\n')] |
# SETUP ------------------------------------------------------------------------
import sys
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras.layers import Activation, Conv2D, MaxPooling2D, UpSampling2D
from tensorflow.keras.preprocessing.image import load_img
# Set base filepath. Import helper functions.
path = '/content/drive/My Drive/finding-houses/'
sys.path.insert(0, path)
from helper_functions import *
# DATA -------------------------------------------------------------------------
# Load training data. Convert label image to one-hot encoding.
x_all = np.array(load_img(path+'images/rgb.png', color_mode='rgb'))
y_all = np.array(load_img(path+'images/gt.png', color_mode='grayscale')) // 255
y_all = tf.keras.utils.to_categorical(y_all, num_classes=2, dtype=np.uint8)
# Reserve 256px-high strip of training images for validation.
x_train = x_all[256:]
x_valid = x_all[:256]
y_train = y_all[256:]
y_valid = y_all[:256]
# Crop x_valid and y_valid at the red line shown in `x_valid_uncropped.png`.
j0 = (x_valid.shape[1] % 256) + 256
x_valid = x_valid[:, j0:]
y_valid = y_valid[:, j0:]
# Split x_valid and y_valid into 256x256px frames.
num_splits = x_valid.shape[1] // 256
x_valid = np.array(np.split(x_valid, num_splits, axis=1))
y_valid = np.array(np.split(y_valid, num_splits, axis=1))
# Sample training frames. Bundle validation data.
x, y = sample_training_data(x_train, y_train, num_examples=40000)
xy_valid = (x_valid, y_valid)
# MODEL ------------------------------------------------------------------------
# Implement model as specified in instructions.
model = tf.keras.models.Sequential([
Conv2D(filters=16, kernel_size=(3, 3), padding='same', input_shape=(256, 256, 3)),
Activation('relu'),
Conv2D(filters=32, kernel_size=(3, 3), padding='same'),
Activation('relu'),
MaxPooling2D(pool_size=(2, 2)),
Conv2D(filters=16, kernel_size=(3, 3), padding='same'),
Activation('relu'),
UpSampling2D(size=(2, 2)),
Conv2D(filters=2, kernel_size=(5, 5), padding='same')])
# Use binary cross-entropy loss and Adam optimiser.
model.compile(
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
optimizer='adam',
metrics=['accuracy'])
# TRAINING ---------------------------------------------------------------------
# Train model for 50 epochs.
history = model.fit(x, y, batch_size=256, epochs=50, validation_data=xy_valid)
# Save trained model and in-training metric values.
model.save(path+'training/model.h5')
metrics = pd.DataFrame({
'loss_train':history.history['loss'],
'loss_valid':history.history['val_loss'],
'acc_train':history.history['accuracy'],
'acc_valid':history.history['val_accuracy']})
metrics.to_csv(path+'training/metrics.csv', index=False)
| [
"tensorflow.keras.preprocessing.image.load_img",
"tensorflow.keras.utils.to_categorical",
"sys.path.insert",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.UpSampling2D",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.losses.BinaryCrossentropy",
"numpy.split",
"pandas.DataFram... | [((393, 417), 'sys.path.insert', 'sys.path.insert', (['(0)', 'path'], {}), '(0, path)\n', (408, 417), False, 'import sys\n'), ((751, 818), 'tensorflow.keras.utils.to_categorical', 'tf.keras.utils.to_categorical', (['y_all'], {'num_classes': '(2)', 'dtype': 'np.uint8'}), '(y_all, num_classes=2, dtype=np.uint8)\n', (780, 818), True, 'import tensorflow as tf\n'), ((2532, 2725), 'pandas.DataFrame', 'pd.DataFrame', (["{'loss_train': history.history['loss'], 'loss_valid': history.history[\n 'val_loss'], 'acc_train': history.history['accuracy'], 'acc_valid':\n history.history['val_accuracy']}"], {}), "({'loss_train': history.history['loss'], 'loss_valid': history.\n history['val_loss'], 'acc_train': history.history['accuracy'],\n 'acc_valid': history.history['val_accuracy']})\n", (2544, 2725), True, 'import pandas as pd\n'), ((612, 663), 'tensorflow.keras.preprocessing.image.load_img', 'load_img', (["(path + 'images/rgb.png')"], {'color_mode': '"""rgb"""'}), "(path + 'images/rgb.png', color_mode='rgb')\n", (620, 663), False, 'from tensorflow.keras.preprocessing.image import load_img\n'), ((1244, 1281), 'numpy.split', 'np.split', (['x_valid', 'num_splits'], {'axis': '(1)'}), '(x_valid, num_splits, axis=1)\n', (1252, 1281), True, 'import numpy as np\n'), ((1302, 1339), 'numpy.split', 'np.split', (['y_valid', 'num_splits'], {'axis': '(1)'}), '(y_valid, num_splits, axis=1)\n', (1310, 1339), True, 'import numpy as np\n'), ((680, 736), 'tensorflow.keras.preprocessing.image.load_img', 'load_img', (["(path + 'images/gt.png')"], {'color_mode': '"""grayscale"""'}), "(path + 'images/gt.png', color_mode='grayscale')\n", (688, 736), False, 'from tensorflow.keras.preprocessing.image import load_img\n'), ((1660, 1746), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(16)', 'kernel_size': '(3, 3)', 'padding': '"""same"""', 'input_shape': '(256, 256, 3)'}), "(filters=16, kernel_size=(3, 3), padding='same', input_shape=(256, \n 256, 3))\n", (1666, 1746), False, 'from tensorflow.keras.layers import Activation, Conv2D, MaxPooling2D, UpSampling2D\n'), ((1747, 1765), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1757, 1765), False, 'from tensorflow.keras.layers import Activation, Conv2D, MaxPooling2D, UpSampling2D\n'), ((1771, 1825), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3, 3)', 'padding': '"""same"""'}), "(filters=32, kernel_size=(3, 3), padding='same')\n", (1777, 1825), False, 'from tensorflow.keras.layers import Activation, Conv2D, MaxPooling2D, UpSampling2D\n'), ((1831, 1849), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1841, 1849), False, 'from tensorflow.keras.layers import Activation, Conv2D, MaxPooling2D, UpSampling2D\n'), ((1855, 1885), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (1867, 1885), False, 'from tensorflow.keras.layers import Activation, Conv2D, MaxPooling2D, UpSampling2D\n'), ((1891, 1945), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(16)', 'kernel_size': '(3, 3)', 'padding': '"""same"""'}), "(filters=16, kernel_size=(3, 3), padding='same')\n", (1897, 1945), False, 'from tensorflow.keras.layers import Activation, Conv2D, MaxPooling2D, UpSampling2D\n'), ((1951, 1969), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1961, 1969), False, 'from tensorflow.keras.layers import Activation, Conv2D, MaxPooling2D, UpSampling2D\n'), ((1975, 2000), 'tensorflow.keras.layers.UpSampling2D', 'UpSampling2D', ([], {'size': '(2, 2)'}), '(size=(2, 2))\n', (1987, 2000), False, 'from tensorflow.keras.layers import Activation, Conv2D, MaxPooling2D, UpSampling2D\n'), ((2006, 2059), 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(2)', 'kernel_size': '(5, 5)', 'padding': '"""same"""'}), "(filters=2, kernel_size=(5, 5), padding='same')\n", (2012, 2059), False, 'from tensorflow.keras.layers import Activation, Conv2D, MaxPooling2D, UpSampling2D\n'), ((2139, 2191), 'tensorflow.keras.losses.BinaryCrossentropy', 'tf.keras.losses.BinaryCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (2173, 2191), True, 'import tensorflow as tf\n')] |
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
from __future__ import absolute_import
import pytest # noqa: F401
import numpy as np # noqa: F401
import awkward as ak # noqa: F401
def test_varnewaxis_1():
array = ak.Array(
[
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14]],
[[15, 16, 17, 18, 19], [20, 21, 22, 23, 24], [25, 26, 27, 28, 29]],
]
)
slicer = ak.Array([[3, 4], [0, 1, 2, 3]])
assert array[slicer[:, np.newaxis]].tolist() == [
[[3, 4], [8, 9], [13, 14]],
[[15, 16, 17, 18], [20, 21, 22, 23], [25, 26, 27, 28]],
]
def test_varnewaxis_2():
array = ak.Array(
[
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14]],
[[15, 16, 17, 18, 19], [20, 21, 22, 23, 24], [25, 26, 27, 28, 29]],
]
)
slicer = ak.Array([[3, 4], [0, 1, None, 3]])
assert array[slicer[:, np.newaxis]].tolist() == [
[[3, 4], [8, 9], [13, 14]],
[[15, 16, None, 18], [20, 21, None, 23], [25, 26, None, 28]],
]
def test_varnewaxis_3():
array = ak.Array(
[
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14]],
[[15, 16, 17, 18, 19], [20, 21, 22, 23, 24], [25, 26, 27, 28, 29]],
]
)
slicer = ak.Array(
[[False, False, False, True, True], [True, True, True, True, False]]
)
assert array[slicer[:, np.newaxis]].tolist() == [
[[3, 4], [8, 9], [13, 14]],
[[15, 16, 17, 18], [20, 21, 22, 23], [25, 26, 27, 28]],
]
def test_varnewaxis_4():
array = ak.Array(
[
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14]],
[[15, 16, 17, 18, 19], [20, 21, 22, 23, 24], [25, 26, 27, 28, 29]],
]
)
slicer = ak.Array(
[[False, False, False, True, True], [True, True, None, True, False]]
)
assert array[slicer[:, np.newaxis]].tolist() == [
[[3, 4], [8, 9], [13, 14]],
[[15, 16, None, 18], [20, 21, None, 23], [25, 26, None, 28]],
]
def test_varnewaxis_5():
array = ak.Array(
[
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14]],
[[15, 16, 17, 18, 19], [20, 21, 22, 23, 24], [25, 26, 27, 28, 29]],
]
)
array = array[[1, 0]]
slicer = ak.Array([[3, 4], [0, 1, 2, 3]])
assert array[slicer[:, np.newaxis]].tolist() == [
[[18, 19], [23, 24], [28, 29]],
[[0, 1, 2, 3], [5, 6, 7, 8], [10, 11, 12, 13]],
]
def test_varnewaxis_6():
array = ak.Array(
np.array(
[
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14]],
[[15, 16, 17, 18, 19], [20, 21, 22, 23, 24], [25, 26, 27, 28, 29]],
]
)
)
slicer = ak.Array([[3, 4], [0, 1, 2, 3]])
assert array[slicer[:, np.newaxis]].tolist() == [
[[3, 4], [8, 9], [13, 14]],
[[15, 16, 17, 18], [20, 21, 22, 23], [25, 26, 27, 28]],
]
| [
"numpy.array",
"awkward.Array"
] | [((264, 405), 'awkward.Array', 'ak.Array', (['[[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14]], [[15, 16, 17, 18,\n 19], [20, 21, 22, 23, 24], [25, 26, 27, 28, 29]]]'], {}), '([[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14]], [[15, \n 16, 17, 18, 19], [20, 21, 22, 23, 24], [25, 26, 27, 28, 29]]])\n', (272, 405), True, 'import awkward as ak\n'), ((463, 495), 'awkward.Array', 'ak.Array', (['[[3, 4], [0, 1, 2, 3]]'], {}), '([[3, 4], [0, 1, 2, 3]])\n', (471, 495), True, 'import awkward as ak\n'), ((695, 836), 'awkward.Array', 'ak.Array', (['[[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14]], [[15, 16, 17, 18,\n 19], [20, 21, 22, 23, 24], [25, 26, 27, 28, 29]]]'], {}), '([[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14]], [[15, \n 16, 17, 18, 19], [20, 21, 22, 23, 24], [25, 26, 27, 28, 29]]])\n', (703, 836), True, 'import awkward as ak\n'), ((894, 929), 'awkward.Array', 'ak.Array', (['[[3, 4], [0, 1, None, 3]]'], {}), '([[3, 4], [0, 1, None, 3]])\n', (902, 929), True, 'import awkward as ak\n'), ((1135, 1276), 'awkward.Array', 'ak.Array', (['[[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14]], [[15, 16, 17, 18,\n 19], [20, 21, 22, 23, 24], [25, 26, 27, 28, 29]]]'], {}), '([[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14]], [[15, \n 16, 17, 18, 19], [20, 21, 22, 23, 24], [25, 26, 27, 28, 29]]])\n', (1143, 1276), True, 'import awkward as ak\n'), ((1334, 1412), 'awkward.Array', 'ak.Array', (['[[False, False, False, True, True], [True, True, True, True, False]]'], {}), '([[False, False, False, True, True], [True, True, True, True, False]])\n', (1342, 1412), True, 'import awkward as ak\n'), ((1626, 1767), 'awkward.Array', 'ak.Array', (['[[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14]], [[15, 16, 17, 18,\n 19], [20, 21, 22, 23, 24], [25, 26, 27, 28, 29]]]'], {}), '([[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14]], [[15, \n 16, 17, 18, 19], [20, 21, 22, 23, 24], [25, 26, 27, 28, 29]]])\n', (1634, 1767), True, 'import awkward as ak\n'), ((1825, 1903), 'awkward.Array', 'ak.Array', (['[[False, False, False, True, True], [True, True, None, True, False]]'], {}), '([[False, False, False, True, True], [True, True, None, True, False]])\n', (1833, 1903), True, 'import awkward as ak\n'), ((2123, 2264), 'awkward.Array', 'ak.Array', (['[[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14]], [[15, 16, 17, 18,\n 19], [20, 21, 22, 23, 24], [25, 26, 27, 28, 29]]]'], {}), '([[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14]], [[15, \n 16, 17, 18, 19], [20, 21, 22, 23, 24], [25, 26, 27, 28, 29]]])\n', (2131, 2264), True, 'import awkward as ak\n'), ((2348, 2380), 'awkward.Array', 'ak.Array', (['[[3, 4], [0, 1, 2, 3]]'], {}), '([[3, 4], [0, 1, 2, 3]])\n', (2356, 2380), True, 'import awkward as ak\n'), ((2819, 2851), 'awkward.Array', 'ak.Array', (['[[3, 4], [0, 1, 2, 3]]'], {}), '([[3, 4], [0, 1, 2, 3]])\n', (2827, 2851), True, 'import awkward as ak\n'), ((2594, 2735), 'numpy.array', 'np.array', (['[[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14]], [[15, 16, 17, 18,\n 19], [20, 21, 22, 23, 24], [25, 26, 27, 28, 29]]]'], {}), '([[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14]], [[15, \n 16, 17, 18, 19], [20, 21, 22, 23, 24], [25, 26, 27, 28, 29]]])\n', (2602, 2735), True, 'import numpy as np\n')] |
import numpy as np
import torch
from conformer_rl import utils
from conformer_rl.agents import PPORecurrentAgent
from conformer_rl.config import Config
from conformer_rl.environments import Task
from conformer_rl.models import RTGNRecurrent
from conformer_rl.molecule_generation import branched_alkane
# import the custom created environment to run the gym register script
import custom_env
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if __name__ == '__main__':
utils.set_one_thread()
mol_config = branched_alkane(16)
config = Config()
# set the tag to represent the run
config.tag = 'atom_type_test'
# Update the network's node_dim to equal 2
config.network = RTGNRecurrent(6, 128, edge_dim=6, node_dim=2).to(device)
# Batch Hyperparameters
config.num_workers = 20
config.rollout_length = 20
config.recurrence = 5
config.optimization_epochs = 4
config.max_steps = 10000000
config.save_interval = config.num_workers*200*5
config.eval_interval = config.num_workers*200*5
config.eval_episodes = 2
config.mini_batch_size = 50
# Coefficient Hyperparameters
lr = 5e-6 * np.sqrt(config.num_workers)
config.optimizer_fn = lambda params: torch.optim.Adam(params, lr=lr, eps=1e-5)
# set the environment to the test env
config.train_env = Task('TestEnv-v0', concurrency=True, num_envs=config.num_workers, seed=np.random.randint(0,1e5), mol_config=mol_config, max_steps=200)
config.eval_env = Task('TestEnv-v0', seed=np.random.randint(0,7e4), mol_config=mol_config, max_steps=200)
agent = PPORecurrentAgent(config)
agent.run_steps() | [
"torch.optim.Adam",
"numpy.sqrt",
"conformer_rl.molecule_generation.branched_alkane",
"conformer_rl.utils.set_one_thread",
"conformer_rl.agents.PPORecurrentAgent",
"numpy.random.randint",
"torch.cuda.is_available",
"conformer_rl.config.Config",
"conformer_rl.models.RTGNRecurrent"
] | [((498, 520), 'conformer_rl.utils.set_one_thread', 'utils.set_one_thread', ([], {}), '()\n', (518, 520), False, 'from conformer_rl import utils\n'), ((539, 558), 'conformer_rl.molecule_generation.branched_alkane', 'branched_alkane', (['(16)'], {}), '(16)\n', (554, 558), False, 'from conformer_rl.molecule_generation import branched_alkane\n'), ((573, 581), 'conformer_rl.config.Config', 'Config', ([], {}), '()\n', (579, 581), False, 'from conformer_rl.config import Config\n'), ((1618, 1643), 'conformer_rl.agents.PPORecurrentAgent', 'PPORecurrentAgent', (['config'], {}), '(config)\n', (1635, 1643), False, 'from conformer_rl.agents import PPORecurrentAgent\n'), ((428, 453), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (451, 453), False, 'import torch\n'), ((1183, 1210), 'numpy.sqrt', 'np.sqrt', (['config.num_workers'], {}), '(config.num_workers)\n', (1190, 1210), True, 'import numpy as np\n'), ((1252, 1294), 'torch.optim.Adam', 'torch.optim.Adam', (['params'], {'lr': 'lr', 'eps': '(1e-05)'}), '(params, lr=lr, eps=1e-05)\n', (1268, 1294), False, 'import torch\n'), ((725, 770), 'conformer_rl.models.RTGNRecurrent', 'RTGNRecurrent', (['(6)', '(128)'], {'edge_dim': '(6)', 'node_dim': '(2)'}), '(6, 128, edge_dim=6, node_dim=2)\n', (738, 770), False, 'from conformer_rl.models import RTGNRecurrent\n'), ((1431, 1461), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100000.0)'], {}), '(0, 100000.0)\n', (1448, 1461), True, 'import numpy as np\n'), ((1541, 1570), 'numpy.random.randint', 'np.random.randint', (['(0)', '(70000.0)'], {}), '(0, 70000.0)\n', (1558, 1570), True, 'import numpy as np\n')] |
import jieba
import config
import numpy as np
import torch
from utils import alignWord2Char
import gensim
import pickle
# jieba.enable_paddle()
jieba.initialize()
jieba.load_userdict(config.vocab_path)
'''
Returns:
vocab_size
word2id
id2word
'''
def get_Map_word_id():
with open(config.word_path,'rb') as f:
word2id=pickle.load(f,encoding='utf-8')
return len(word2id),word2id,0
def get_Map_char_id():
with open(config.char_path,'rb') as f:
char2id=pickle.load(f,encoding='utf-8')
return len(char2id),char2id,0
# def word_char_id():
# model=gensim.models.Word2Vec.load(config.wv_baidu_path)
# vocab=model.wv.vocab
# word2id={'[PAD]':0,'[UNK]':1}
# char2id={'[PAD]':0,'[UNK]':1}
# for w in tqdm(vocab)
def tokenize(sentence):
return jieba.lcut(sentence,HMM=False,cut_all=False)
'''
Returns:
input_ids: batch_size * max_seq_length
attention_mask : padding mask
'''
def sent2id(batch_sentence,word2id):
ans=[]
UNK=word2id.get('[UNK]')
PAD=word2id.get('[PAD]')
for word_list in batch_sentence:
id_list=[word2id.get(i,UNK) for i in word_list]
a=np.array(id_list)
ans.append(id_list)
input_ids,attention_mask=seq_padding(ans,padding=PAD)
return input_ids,attention_mask
def seq_padding(batch_sentence,padding=0):
len_lists=[ len(i) for i in batch_sentence]
max_length=max(len_lists)
input_ids=np.array([
np.concatenate([x,[padding]*(max_length-(len(x)))]) if len(x)<max_length else x for x in batch_sentence
])
attention_mask=np.where(input_ids!=padding,1,0)
return input_ids,attention_mask
if __name__ == '__main__':
import pdb;pdb.set_trace()
_,word2id,_=get_Map_word_id()
_,char2id,_=get_Map_char_id()
print(len(word2id))
print(len(char2id)) | [
"jieba.lcut",
"numpy.where",
"jieba.initialize",
"jieba.load_userdict",
"pickle.load",
"numpy.array",
"pdb.set_trace"
] | [((144, 162), 'jieba.initialize', 'jieba.initialize', ([], {}), '()\n', (160, 162), False, 'import jieba\n'), ((164, 202), 'jieba.load_userdict', 'jieba.load_userdict', (['config.vocab_path'], {}), '(config.vocab_path)\n', (183, 202), False, 'import jieba\n'), ((806, 852), 'jieba.lcut', 'jieba.lcut', (['sentence'], {'HMM': '(False)', 'cut_all': '(False)'}), '(sentence, HMM=False, cut_all=False)\n', (816, 852), False, 'import jieba\n'), ((1589, 1625), 'numpy.where', 'np.where', (['(input_ids != padding)', '(1)', '(0)'], {}), '(input_ids != padding, 1, 0)\n', (1597, 1625), True, 'import numpy as np\n'), ((1703, 1718), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (1716, 1718), False, 'import pdb\n'), ((343, 375), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""utf-8"""'}), "(f, encoding='utf-8')\n", (354, 375), False, 'import pickle\n'), ((492, 524), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""utf-8"""'}), "(f, encoding='utf-8')\n", (503, 524), False, 'import pickle\n'), ((1156, 1173), 'numpy.array', 'np.array', (['id_list'], {}), '(id_list)\n', (1164, 1173), True, 'import numpy as np\n')] |
"""
Class used to model NICMOS specific instrument data.
:Authors: <NAME>, <NAME>, <NAME>
:License: :doc:`LICENSE`
"""
from stsci.tools import fileutil
from nictools import readTDD
import numpy as np
from .imageObject import imageObject
class NICMOSInputImage(imageObject):
SEPARATOR = '_'
def __init__(self, filename=None):
super().__init__(filename)
self.timeExt = 'TIME'
# define the cosmic ray bits value to use in the dq array
self.cr_bits_value = 4096
# Detector parameters, nic only has 1 detector in each file
self.full_shape = (256,256)
self._instrument=self._image['PRIMARY'].header["INSTRUME"]
self.native_units = 'COUNTS/S'
self.flatkey = 'FLATFILE'
for chip in range(1,self._numchips+1,1):
self._image[self.scienceExt,chip].cte_dir = 0 #no correction for nicmos
self._effGain = 1. #get the specific gain from the detector subclass
def _assignSignature(self, chip):
"""assign a unique signature for the image based
on the instrument, detector, chip, and size
this will be used to uniquely identify the appropriate
static mask for the image
this also records the filename for the static mask to the outputNames dictionary
"""
sci_chip = self._image[self.scienceExt,chip]
ny=sci_chip._naxis1
nx=sci_chip._naxis2
detnum = sci_chip.detnum
instr=self._instrument
sig=(instr+str(self._detector),(nx,ny),int(detnum)) #signature is a tuple
sci_chip.signature=sig #signature is a tuple
def doUnitConversions(self):
"""Convert the data to electrons
This converts all science data extensions and saves
the results back to disk. We need to make sure
the data inside the chips already in memory is altered as well.
"""
# Image information
_handle = fileutil.openImage(self._filename, mode='readonly', memmap=False)
for det in range(1,self._numchips+1,1):
chip=self._image[self.scienceExt,det]
if chip._gain is not None:
#conversionFactor = (self.getExpTime() * self.getGain())
conversionFactor = chip._gain
if self.isCountRate():
conversionFactor *= chip._exptime
counts_str = 'COUNTS/S'
else:
counts_str = 'COUNTS'
# Multiply the values of the sci extension pixels by the gain.
print("Converting %s[%s,%d] from %s to ELECTRONS"%(self._filename,self.scienceExt,det,counts_str))
"""
# If the exptime is 0 the science image will be zeroed out.
np.multiply(_handle[self.scienceExt,det].data,conversionFactor,_handle[self.scienceExt,det].data)
#chip.data=_handle[self.scienceExt,det].data.copy()
# Set the BUNIT keyword to 'electrons'
chip.header.update('BUNIT','ELECTRONS')
_handle[0].header.update('BUNIT','ELECTRONS')
# Update the PHOTFLAM value
photflam = _handle[0].header['PHOTFLAM']
_handle[0].header.update('PHOTFLAM',(photflam/chip._gain))
chip._effGain = 1.0
"""
chip._effGain = chip._gain
chip._conversionFactor = conversionFactor
else:
msg = "Invalid gain value for data, no conversion done"
print(msg)
raise ValueError(msg)
# Close the files and clean-up
_handle.close()
self._effGain = conversionFactor #1.0
def _setchippars(self):
self._setDefaultReadnoise()
def getexptimeimg(self,chip):
"""
Return an array representing the exposure time per pixel for the detector.
Returns
-------
dark: array
Exposure time array in the same shape as the input image
"""
return self._image[self.timeExt,chip].data
def getflat(self, chip):
"""
Method for retrieving a detector's flat field.
Returns
-------
flat : array
The flat field array in the same shape as the input image with **units of cps**.
"""
# The reference flat field is inverted:
flat = 1.0 / super().getflat(chip)
return flat
def getdarkcurrent(self):
"""
Return the dark current for the NICMOS detectors.
Returns
-------
darkcurrent : float
Dark current value with **units of cps**.
"""
try:
darkcurrent = self._image[0].header['exptime'] * \
self._image[self.scienceExt,1]._darkrate
except:
str = "#############################################\n"
str += "# #\n"
str += "# Error: #\n"
str += "# Cannot find the value for 'EXPTIME' #\n"
str += "# in the image header. NICMOS input #\n"
str += "# images are expected to have this header #\n"
str += "# keyword. #\n"
str += "# #\n"
str += "#Error occured in the NICMOSInputImage class#\n"
str += "# #\n"
str += "#############################################\n"
raise ValueError(str)
return darkcurrent
def getdarkimg(self,chip):
"""
Return an array representing the dark image for the detector.
Returns
-------
dark : array
The dark array in the same shape as the image with **units of cps**.
"""
# Read the temperature dependeant dark file. The name for the file is taken from
# the TEMPFILE keyword in the primary header.
tddobj = readTDD.fromcalfile(self.name)
if tddobj is None:
return np.ones(self.full_shape, dtype=self.image_dtype) * self.getdarkcurrent()
else:
# Create Dark Object from AMPGLOW and Lineark Dark components
darkobj = tddobj.getampglow() + tddobj.getlindark()
# Return the darkimage taking into account an subarray information available
return darkobj[self.ltv2:self.size2,self.ltv1:self.size1]
def isCountRate(self):
"""
isCountRate: Method or IRInputObject used to indicate if the
science data is in units of counts or count rate. This method
assumes that the keyword 'BUNIT' is in the header of the input
FITS file.
"""
has_bunit = False
if 'BUNIT' in self._image['sci',1].header :
has_bunit = True
countrate = False
if (self._image[0].header['UNITCORR'].strip() == 'PERFORM') or \
(has_bunit and self._image['sci',1].header['bunit'].find('/') != -1) :
countrate = True
return countrate
class NIC1InputImage(NICMOSInputImage):
def __init__(self, filename=None):
super().__init__(filename)
self._effGain = 1. #get the gain from the detector subclass
self._detector = self._image["PRIMARY"].header["CAMERA"]
self.proc_unit = "native"
def _getDarkRate(self):
_darkrate = 0.08 #electrons/s
if self.proc_unit == 'native':
_darkrate = _darkrate / self._effGain # DN/s
return _darkrate
def _getDefaultReadnoise(self):
""" This could be updated to calculate the readnoise from the NOISFILE.
"""
_rdnoise = 26.0 # electrons
if self.proc_unit == 'native':
_rdnoise = _rdnoise / self._effGain # ADU
return _rdnoise
def setInstrumentParameters(self, instrpars):
""" This method overrides the superclass to set default values into
the parameter dictionary, in case empty entries are provided.
"""
pri_header = self._image[0].header
self.proc_unit = instrpars['proc_unit']
if self._isNotValid (instrpars['gain'], instrpars['gnkeyword']):
instrpars['gnkeyword'] = 'ADCGAIN' #gain has been hardcoded below
if self._isNotValid (instrpars['rdnoise'], instrpars['rnkeyword']):
instrpars['rnkeyword'] = None
if self._isNotValid (instrpars['exptime'], instrpars['expkeyword']):
instrpars['expkeyword'] = 'EXPTIME'
for chip in self.returnAllChips(extname=self.scienceExt):
chip._gain= 5.4 #measured gain
chip._rdnoise = self.getInstrParameter(instrpars['rdnoise'], pri_header,
instrpars['rnkeyword'])
chip._exptime = self.getInstrParameter(instrpars['exptime'], pri_header,
instrpars['expkeyword'])
if chip._gain is None or self._exptime is None:
print('ERROR: invalid instrument task parameter')
raise ValueError
# We need to treat Read Noise as a special case since it is
# not populated in the NICMOS primary header
if chip._rdnoise is None:
chip._rdnoise = self._getDefaultReadnoise()
chip._darkrate=self._getDarkRate()
chip.darkcurrent = self.getdarkcurrent()
chip._effGain = chip._gain
self._assignSignature(chip._chip) #this is used in the static mask, static mask name also defined here, must be done after outputNames
# Convert the science data to electrons if specified by the user.
self.doUnitConversions()
class NIC2InputImage(NICMOSInputImage):
def __init__(self,filename=None):
super().__init__(filename)
self._effGain=1. #measured
self._detector=self._image["PRIMARY"].header["CAMERA"]
self.proc_unit = "native"
def _getDarkRate(self):
_darkrate = 0.08 #electrons/s
if self.proc_unit == 'native':
_darkrate = _darkrate / self._effGain # DN/s
return _darkrate
def _getDefaultReadnoise(self):
_rdnoise = 26.0 #electrons
if self.proc_unit == 'native':
_rdnoise = _rdnoise/self._effGain #ADU
return _rdnoise
def setInstrumentParameters(self, instrpars):
""" This method overrides the superclass to set default values into
the parameter dictionary, in case empty entries are provided.
"""
pri_header = self._image[0].header
self.proc_unit = instrpars['proc_unit']
if self._isNotValid (instrpars['gain'], instrpars['gnkeyword']):
instrpars['gnkeyword'] = 'ADCGAIN' #gain has been hardcoded below
if self._isNotValid (instrpars['rdnoise'], instrpars['rnkeyword']):
instrpars['rnkeyword'] = None
if self._isNotValid (instrpars['exptime'], instrpars['expkeyword']):
instrpars['expkeyword'] = 'EXPTIME'
for chip in self.returnAllChips(extname=self.scienceExt):
chip._gain= 5.4 #measured gain
chip._rdnoise = self.getInstrParameter(
instrpars['rdnoise'], pri_header, instrpars['rnkeyword']
)
chip._exptime = self.getInstrParameter(
instrpars['exptime'], pri_header, instrpars['expkeyword']
)
if chip._gain is None or self._exptime is None:
print('ERROR: invalid instrument task parameter')
raise ValueError
# We need to treat Read Noise as a special case since it is
# not populated in the NICMOS primary header
if chip._rdnoise is None:
chip._rdnoise = self._getDefaultReadnoise()
chip._darkrate=self._getDarkRate()
chip.darkcurrent = self.getdarkcurrent()
chip._effGain = chip._gain
# this is used in the static mask, static mask name also defined
# here, must be done after outputNames
self._assignSignature(chip._chip)
# Convert the science data to electrons if specified by the user.
self.doUnitConversions()
def createHoleMask(self):
"""Add in a mask for the coronographic hole to the general static
pixel mask. """
pass
class NIC3InputImage(NICMOSInputImage):
def __init__(self, filename=None):
super().__init__(filename)
self._detector=self._image["PRIMARY"].header["CAMERA"] #returns 1,2,3
self._effGain = 1.
self.proc_unit = "native"
def _getDarkRate(self):
_darkrate = 0.15 #electrons/s
if self.proc_unit == 'native':
_darkrate = _darkrate/self._effGain #DN/s
return _darkrate
def _getDefaultReadnoise(self):
_rdnoise = 29.0 # electrons
if self.proc_unit == 'native':
_rdnoise = _rdnoise/self._effGain #ADU
return _rdnoise
def setInstrumentParameters(self, instrpars):
""" This method overrides the superclass to set default values into
the parameter dictionary, in case empty entries are provided.
"""
pri_header = self._image[0].header
self.proc_unit = instrpars['proc_unit']
if self._isNotValid (instrpars['gain'], instrpars['gnkeyword']):
instrpars['gnkeyword'] = 'ADCGAIN'
if self._isNotValid (instrpars['rdnoise'], instrpars['rnkeyword']):
instrpars['rnkeyword'] = None
if self._isNotValid (instrpars['exptime'], instrpars['expkeyword']):
instrpars['expkeyword'] = 'EXPTIME'
for chip in self.returnAllChips(extname=self.scienceExt):
chip._gain= 6.5 #measured gain
chip._rdnoise = self.getInstrParameter(
instrpars['rdnoise'], pri_header, instrpars['rnkeyword']
)
chip._exptime = self.getInstrParameter(
instrpars['exptime'], pri_header, instrpars['expkeyword']
)
if chip._gain is None or self._exptime is None:
print('ERROR: invalid instrument task parameter')
raise ValueError
# We need to treat Read Noise as a special case since it is
# not populated in the NICMOS primary header
if chip._rdnoise is None:
chip._rdnoise = self._getDefaultReadnoise()
chip._darkrate=self._getDarkRate()
chip.darkcurrent = self.getdarkcurrent()
chip._effGain = chip._gain
self._assignSignature(chip._chip) #this is used in the static mask, static mask name also defined here, must be done after outputNames
# Convert the science data to electrons if specified by the user.
self.doUnitConversions()
| [
"nictools.readTDD.fromcalfile",
"stsci.tools.fileutil.openImage",
"numpy.ones"
] | [((1960, 2025), 'stsci.tools.fileutil.openImage', 'fileutil.openImage', (['self._filename'], {'mode': '"""readonly"""', 'memmap': '(False)'}), "(self._filename, mode='readonly', memmap=False)\n", (1978, 2025), False, 'from stsci.tools import fileutil\n'), ((6120, 6150), 'nictools.readTDD.fromcalfile', 'readTDD.fromcalfile', (['self.name'], {}), '(self.name)\n', (6139, 6150), False, 'from nictools import readTDD\n'), ((6198, 6246), 'numpy.ones', 'np.ones', (['self.full_shape'], {'dtype': 'self.image_dtype'}), '(self.full_shape, dtype=self.image_dtype)\n', (6205, 6246), True, 'import numpy as np\n')] |
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generate examples of two objects moving in different directions."""
import random
import sys
import numpy as np
from six.moves import xrange
import tensorflow as tf
tf.flags.DEFINE_string('out_file', '',
'Output file for the tfrecords.')
def _add_object(obj_type, image, image2, xpos, ypos):
"""Add a moving obj to two consecutive images."""
obj_size = random.randint(8, 10)
channel = random.randint(0, 2)
move = random.randint(6, 10)
obj = np.zeros([obj_size, obj_size, 3])
if obj_type == 'rectangle':
xpos2 = xpos + move
ypos2 = ypos
for i in xrange(obj_size):
obj[i, 0:i + 1, channel] = [1.0 for _ in xrange(i + 1)]
elif obj_type == 'square':
xpos2 = xpos
ypos2 = ypos + move
obj[:, :, channel] = 1.0
for x in xrange(obj_size):
for y in xrange(obj_size):
if obj[x, y, channel] == 1.0:
image[xpos + x, ypos + y, channel] = 1.0
image2[xpos2 + x, ypos2 + y, channel] = 1.0
def _images_to_example(image, image2):
"""Convert two consecutive images to SequenceExample."""
example = tf.SequenceExample()
feature_list = example.feature_lists.feature_list['moving_objs']
feature = feature_list.feature.add()
feature.float_list.value.extend(np.reshape(image, [-1]).tolist())
feature = feature_list.feature.add()
feature.float_list.value.extend(np.reshape(image2, [-1]).tolist())
return example
def generate_input():
"""Generate tfrecords."""
writer = tf.python_io.TFRecordWriter(tf.flags.FLAGS.out_file)
writer2 = tf.python_io.TFRecordWriter(tf.flags.FLAGS.out_file + '_test')
examples = []
for xpos in xrange(0, 40, 3):
for ypos in xrange(0, 40, 3):
for xpos2 in xrange(0, 40, 3):
for ypos2 in xrange(0, 40, 3):
image = np.zeros([64, 64, 3])
image2 = np.zeros([64, 64, 3])
_add_object('rectangle', image, image2, xpos, ypos)
_add_object('square', image, image2, xpos2, ypos2)
examples.append(_images_to_example(image, image2))
sys.stderr.write('Finish generating examples.\n')
random.shuffle(examples)
for count, ex in enumerate(examples):
if count % 10 == 0:
writer2.write(ex.SerializeToString())
else:
writer.write(ex.SerializeToString())
def main(_):
generate_input()
if __name__ == '__main__':
tf.app.run()
| [
"tensorflow.flags.DEFINE_string",
"numpy.reshape",
"random.shuffle",
"sys.stderr.write",
"numpy.zeros",
"six.moves.xrange",
"tensorflow.SequenceExample",
"tensorflow.python_io.TFRecordWriter",
"random.randint",
"tensorflow.app.run"
] | [((859, 931), 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""out_file"""', '""""""', '"""Output file for the tfrecords."""'], {}), "('out_file', '', 'Output file for the tfrecords.')\n", (881, 931), True, 'import tensorflow as tf\n'), ((1080, 1101), 'random.randint', 'random.randint', (['(8)', '(10)'], {}), '(8, 10)\n', (1094, 1101), False, 'import random\n'), ((1116, 1136), 'random.randint', 'random.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (1130, 1136), False, 'import random\n'), ((1148, 1169), 'random.randint', 'random.randint', (['(6)', '(10)'], {}), '(6, 10)\n', (1162, 1169), False, 'import random\n'), ((1181, 1214), 'numpy.zeros', 'np.zeros', (['[obj_size, obj_size, 3]'], {}), '([obj_size, obj_size, 3])\n', (1189, 1214), True, 'import numpy as np\n'), ((1526, 1542), 'six.moves.xrange', 'xrange', (['obj_size'], {}), '(obj_size)\n', (1532, 1542), False, 'from six.moves import xrange\n'), ((1854, 1874), 'tensorflow.SequenceExample', 'tf.SequenceExample', ([], {}), '()\n', (1872, 1874), True, 'import tensorflow as tf\n'), ((2253, 2305), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (['tf.flags.FLAGS.out_file'], {}), '(tf.flags.FLAGS.out_file)\n', (2280, 2305), True, 'import tensorflow as tf\n'), ((2320, 2382), 'tensorflow.python_io.TFRecordWriter', 'tf.python_io.TFRecordWriter', (["(tf.flags.FLAGS.out_file + '_test')"], {}), "(tf.flags.FLAGS.out_file + '_test')\n", (2347, 2382), True, 'import tensorflow as tf\n'), ((2418, 2434), 'six.moves.xrange', 'xrange', (['(0)', '(40)', '(3)'], {}), '(0, 40, 3)\n', (2424, 2434), False, 'from six.moves import xrange\n'), ((2884, 2933), 'sys.stderr.write', 'sys.stderr.write', (['"""Finish generating examples.\n"""'], {}), "('Finish generating examples.\\n')\n", (2900, 2933), False, 'import sys\n'), ((2938, 2962), 'random.shuffle', 'random.shuffle', (['examples'], {}), '(examples)\n', (2952, 2962), False, 'import random\n'), ((3215, 3227), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (3225, 3227), True, 'import tensorflow as tf\n'), ((1313, 1329), 'six.moves.xrange', 'xrange', (['obj_size'], {}), '(obj_size)\n', (1319, 1329), False, 'from six.moves import xrange\n'), ((1561, 1577), 'six.moves.xrange', 'xrange', (['obj_size'], {}), '(obj_size)\n', (1567, 1577), False, 'from six.moves import xrange\n'), ((2456, 2472), 'six.moves.xrange', 'xrange', (['(0)', '(40)', '(3)'], {}), '(0, 40, 3)\n', (2462, 2472), False, 'from six.moves import xrange\n'), ((2499, 2515), 'six.moves.xrange', 'xrange', (['(0)', '(40)', '(3)'], {}), '(0, 40, 3)\n', (2505, 2515), False, 'from six.moves import xrange\n'), ((2021, 2044), 'numpy.reshape', 'np.reshape', (['image', '[-1]'], {}), '(image, [-1])\n', (2031, 2044), True, 'import numpy as np\n'), ((2132, 2156), 'numpy.reshape', 'np.reshape', (['image2', '[-1]'], {}), '(image2, [-1])\n', (2142, 2156), True, 'import numpy as np\n'), ((2546, 2562), 'six.moves.xrange', 'xrange', (['(0)', '(40)', '(3)'], {}), '(0, 40, 3)\n', (2552, 2562), False, 'from six.moves import xrange\n'), ((1384, 1397), 'six.moves.xrange', 'xrange', (['(i + 1)'], {}), '(i + 1)\n', (1390, 1397), False, 'from six.moves import xrange\n'), ((2592, 2613), 'numpy.zeros', 'np.zeros', (['[64, 64, 3]'], {}), '([64, 64, 3])\n', (2600, 2613), True, 'import numpy as np\n'), ((2643, 2664), 'numpy.zeros', 'np.zeros', (['[64, 64, 3]'], {}), '([64, 64, 3])\n', (2651, 2664), True, 'import numpy as np\n')] |
#!/usr/bin/python
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def user_bias_update(A, u, v, mu, c):
m = np.shape(A)[0]
n = np.shape(A)[1]
b = np.array([0.] * m)
for i in xrange(m):
for j in xrange(n):
b[i] += (-1. / n) * (np.dot(u[i], v[j].T) + c[j] + mu - A[i, j])
return b
def user_vector_update(A, v, k, mu, b, c):
m = np.shape(A)[0]
n = np.shape(A)[1]
u = np.zeros([m, k])
v_matrix = np.dot(v.T, v)
for i in xrange(m):
right_side = np.zeros([k, ])
for j in xrange(n):
right_side += b[i] * v[j]
right_side += c[j] * v[j]
right_side += mu * v[j]
right_side -= A[i, j] * v[j]
u[i, :] = -np.dot(np.linalg.inv(v_matrix), right_side)
return u
def movie_bias_update(A, u, v, mu, b):
m = np.shape(A)[0]
n = np.shape(A)[1]
c = np.array([0.] * n)
for i in xrange(m):
for j in xrange(n):
c[j] += (-1. / m) * (np.dot(u[i], v[j].T) + b[i] + mu - A[i, j])
return c
def movie_vector_update(A, u, k, mu, b, c):
m = np.shape(A)[0]
n = np.shape(A)[1]
v = np.zeros([n, k])
u_matrix = np.dot(u.T, u)
for j in xrange(n):
right_side = np.zeros([k, ])
for i in xrange(m):
right_side += b[i] * u[i]
right_side += c[j] * u[i]
right_side += mu * u[i]
right_side -= A[i, j] * u[i]
v[j, :] = -np.dot(np.linalg.inv(u_matrix), right_side)
return v
def log_update(A, u, v, T, mu, b, c):
log_iter = 0
m = np.shape(A)[0]
n = np.shape(A)[1]
for i in xrange(m):
for j in xrange(n):
log_iter += (-1. / 2) * \
((np.dot(u[i], v[j].T) + b[i] + c[j] + mu - A[i, j])**2)
return log_iter
def alt_least_squares(A, k, T):
"""
Inputs:
A: input data
k: number of dimensions for movie vectors & user vectors
T: number of iterations
Output:
Log-likelihood function for each iteration
"""
# Calculate average rating in A
mu = np.mean(A["ratings"])
# Independently draw u_i and v_j vectors from multivariate normal
m = max(A["i"])
n = max(A["j"])
omega = len(A["i"])
# Total # of elements in matrix
A_matrix = np.zeros([m, n])
for l in xrange(omega):
A_matrix[A["i"][l] - 1][A["j"][l] - 1] = A["ratings"][l]
mean_vect = np.array([0] * k)
cov_matrix = (1. / k) * np.identity(k)
u = []
v = []
for i in xrange(m):
u.append(np.random.multivariate_normal(mean_vect, cov_matrix))
for j in xrange(n):
v.append(np.random.multivariate_normal(mean_vect, cov_matrix))
# Initalize b_i and c_j to 0
u = np.array(u)
v = np.array(v)
b = np.array([0.] * m)
c = np.array([0.] * n)
# for all iterations
log_like = np.zeros([T])
for t in xrange(T):
# update b_i
b = user_bias_update(A_matrix, u, v, mu, c)
# update u_i
u = user_vector_update(A_matrix, v, k, mu, b, c)
# update c_j
c = movie_bias_update(A_matrix, u, v, mu, b)
# update v_j
v = movie_vector_update(A_matrix, u, k, mu, b, c)
# update log-likelihood
log_like[t] = log_update(A_matrix, u, v, T, mu, b, c)
return log_like
if __name__ == '__main__':
ratings_fake = pd.read_csv("ratings_fake.csv",
quotechar='"', encoding='UTF-8',
names=["i", "j", "ratings"])
n_features = 5
n_iter = 20
log_fake = alt_least_squares(ratings_fake, n_features, n_iter)
plt.plot(range(20), log_fake)
plt.axis([0, 25, min(log_fake) - 100, max(log_fake) + 100])
plt.xlabel('Iteration Number')
plt.ylabel('Log-Likelihood')
plt.title('Log Likelihood of Alternating Least Squares')
plt.show()
plt.close()
| [
"numpy.identity",
"numpy.mean",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"numpy.random.multivariate_normal",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.close",
"numpy.array",
"numpy.dot",
"numpy.zeros",
"numpy.linalg.inv",
"matplotlib.pyplot.title",
"numpy.shape",
"matplotlib.pyp... | [((184, 203), 'numpy.array', 'np.array', (['([0.0] * m)'], {}), '([0.0] * m)\n', (192, 203), True, 'import numpy as np\n'), ((444, 460), 'numpy.zeros', 'np.zeros', (['[m, k]'], {}), '([m, k])\n', (452, 460), True, 'import numpy as np\n'), ((476, 490), 'numpy.dot', 'np.dot', (['v.T', 'v'], {}), '(v.T, v)\n', (482, 490), True, 'import numpy as np\n'), ((904, 923), 'numpy.array', 'np.array', (['([0.0] * n)'], {}), '([0.0] * n)\n', (912, 923), True, 'import numpy as np\n'), ((1165, 1181), 'numpy.zeros', 'np.zeros', (['[n, k]'], {}), '([n, k])\n', (1173, 1181), True, 'import numpy as np\n'), ((1197, 1211), 'numpy.dot', 'np.dot', (['u.T', 'u'], {}), '(u.T, u)\n', (1203, 1211), True, 'import numpy as np\n'), ((2090, 2111), 'numpy.mean', 'np.mean', (["A['ratings']"], {}), "(A['ratings'])\n", (2097, 2111), True, 'import numpy as np\n'), ((2298, 2314), 'numpy.zeros', 'np.zeros', (['[m, n]'], {}), '([m, n])\n', (2306, 2314), True, 'import numpy as np\n'), ((2424, 2441), 'numpy.array', 'np.array', (['([0] * k)'], {}), '([0] * k)\n', (2432, 2441), True, 'import numpy as np\n'), ((2738, 2749), 'numpy.array', 'np.array', (['u'], {}), '(u)\n', (2746, 2749), True, 'import numpy as np\n'), ((2758, 2769), 'numpy.array', 'np.array', (['v'], {}), '(v)\n', (2766, 2769), True, 'import numpy as np\n'), ((2778, 2797), 'numpy.array', 'np.array', (['([0.0] * m)'], {}), '([0.0] * m)\n', (2786, 2797), True, 'import numpy as np\n'), ((2805, 2824), 'numpy.array', 'np.array', (['([0.0] * n)'], {}), '([0.0] * n)\n', (2813, 2824), True, 'import numpy as np\n'), ((2864, 2877), 'numpy.zeros', 'np.zeros', (['[T]'], {}), '([T])\n', (2872, 2877), True, 'import numpy as np\n'), ((3367, 3464), 'pandas.read_csv', 'pd.read_csv', (['"""ratings_fake.csv"""'], {'quotechar': '"""\\""""', 'encoding': '"""UTF-8"""', 'names': "['i', 'j', 'ratings']"}), '(\'ratings_fake.csv\', quotechar=\'"\', encoding=\'UTF-8\', names=[\'i\',\n \'j\', \'ratings\'])\n', (3378, 3464), True, 'import pandas as pd\n'), ((3729, 3759), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iteration Number"""'], {}), "('Iteration Number')\n", (3739, 3759), True, 'import matplotlib.pyplot as plt\n'), ((3764, 3792), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Log-Likelihood"""'], {}), "('Log-Likelihood')\n", (3774, 3792), True, 'import matplotlib.pyplot as plt\n'), ((3797, 3853), 'matplotlib.pyplot.title', 'plt.title', (['"""Log Likelihood of Alternating Least Squares"""'], {}), "('Log Likelihood of Alternating Least Squares')\n", (3806, 3853), True, 'import matplotlib.pyplot as plt\n'), ((3858, 3868), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3866, 3868), True, 'import matplotlib.pyplot as plt\n'), ((3873, 3884), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3882, 3884), True, 'import matplotlib.pyplot as plt\n'), ((138, 149), 'numpy.shape', 'np.shape', (['A'], {}), '(A)\n', (146, 149), True, 'import numpy as np\n'), ((161, 172), 'numpy.shape', 'np.shape', (['A'], {}), '(A)\n', (169, 172), True, 'import numpy as np\n'), ((398, 409), 'numpy.shape', 'np.shape', (['A'], {}), '(A)\n', (406, 409), True, 'import numpy as np\n'), ((421, 432), 'numpy.shape', 'np.shape', (['A'], {}), '(A)\n', (429, 432), True, 'import numpy as np\n'), ((536, 549), 'numpy.zeros', 'np.zeros', (['[k]'], {}), '([k])\n', (544, 549), True, 'import numpy as np\n'), ((858, 869), 'numpy.shape', 'np.shape', (['A'], {}), '(A)\n', (866, 869), True, 'import numpy as np\n'), ((881, 892), 'numpy.shape', 'np.shape', (['A'], {}), '(A)\n', (889, 892), True, 'import numpy as np\n'), ((1119, 1130), 'numpy.shape', 'np.shape', (['A'], {}), '(A)\n', (1127, 1130), True, 'import numpy as np\n'), ((1142, 1153), 'numpy.shape', 'np.shape', (['A'], {}), '(A)\n', (1150, 1153), True, 'import numpy as np\n'), ((1257, 1270), 'numpy.zeros', 'np.zeros', (['[k]'], {}), '([k])\n', (1265, 1270), True, 'import numpy as np\n'), ((1595, 1606), 'numpy.shape', 'np.shape', (['A'], {}), '(A)\n', (1603, 1606), True, 'import numpy as np\n'), ((1618, 1629), 'numpy.shape', 'np.shape', (['A'], {}), '(A)\n', (1626, 1629), True, 'import numpy as np\n'), ((2470, 2484), 'numpy.identity', 'np.identity', (['k'], {}), '(k)\n', (2481, 2484), True, 'import numpy as np\n'), ((2548, 2600), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean_vect', 'cov_matrix'], {}), '(mean_vect, cov_matrix)\n', (2577, 2600), True, 'import numpy as np\n'), ((2643, 2695), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mean_vect', 'cov_matrix'], {}), '(mean_vect, cov_matrix)\n', (2672, 2695), True, 'import numpy as np\n'), ((759, 782), 'numpy.linalg.inv', 'np.linalg.inv', (['v_matrix'], {}), '(v_matrix)\n', (772, 782), True, 'import numpy as np\n'), ((1480, 1503), 'numpy.linalg.inv', 'np.linalg.inv', (['u_matrix'], {}), '(u_matrix)\n', (1493, 1503), True, 'import numpy as np\n'), ((288, 308), 'numpy.dot', 'np.dot', (['u[i]', 'v[j].T'], {}), '(u[i], v[j].T)\n', (294, 308), True, 'import numpy as np\n'), ((1008, 1028), 'numpy.dot', 'np.dot', (['u[i]', 'v[j].T'], {}), '(u[i], v[j].T)\n', (1014, 1028), True, 'import numpy as np\n'), ((1741, 1761), 'numpy.dot', 'np.dot', (['u[i]', 'v[j].T'], {}), '(u[i], v[j].T)\n', (1747, 1761), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import rospy
import numpy as np
import math
from geometry_msgs.msg import PoseStamped, TwistStamped
from styx_msgs.msg import Lane, Waypoint
from std_msgs.msg import Int32
'''
This node will publish waypoints from the car's current position to some `x` distance ahead.
As mentioned in the doc, you should ideally first implement a version which does not care
about traffic lights or obstacles.
Once you have created dbw_node, you will update this node to use the status of traffic lights too.
Please note that our simulator also provides the exact location of traffic lights and their
current status in `/vehicle/traffic_lights` message. You can use this message to build this node
as well as to verify your TL classifier.
TODO (for Yousuf and Aaron): Stopline location for each traffic light.
'''
LOOKAHEAD_WPS = 50 #200 # Number of waypoints we will publish. You can change this number
NORMAL_DECEL = 4 # m/s^2
MAX_DECEL = 9.5 # m/2^2
NORMAL_ACCEL = 6 # m/s^2
VELOCITY_30MPH = 2.77 # m/s
REFRESH_RATE = 10 #50 # Hz
STOP_OFFSET = 8
class WaypointUpdater(object):
def __init__(self):
rospy.init_node('waypoint_updater')
self.current_pose = None
self.base_waypoints = None
self.stop_waypoint_idx = 752 #750 #286
#self.stopped_time = 0.0
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
# TODO: Add a subscriber for /traffic_waypoint and /obstacle_waypoint below
rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb)
# rospy.Subscriber('/obstacle_waypoint', Waypoint, self.obstacle_cb)
rospy.Subscriber('/current_velocity', TwistStamped, self.velocity_cb)
self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)
# TODO: Add other member variables you need below
#rospy.spin()
self.rate = rospy.Rate(REFRESH_RATE) # 50hz sampling rate
while not rospy.is_shutdown():
# rospy.loginfo("WaypointUpdater goes to loop")
self.loop()
# rospy.loginfo("Vehicle stopped time: %d", self.stopped_time)
# if self.stopped_time >= 10: # vehicle has stopped for over 10 seconds
# self.stop_waypoint_idx += 400
# self.stopped_time = 0.0
def loop(self):
if (self.current_pose is None) or (self.base_waypoints is None):
return
# step 1. find out the nearest waypoint to the current position
# current x & y coordinates. Shall we include z???
current_pose_x = self.current_pose.pose.position.x
current_pose_y = self.current_pose.pose.position.y
current_pose_z = self.current_pose.pose.position.z
shortest_distance = +np.inf
nearest_waypoint_idx = 0
roll, pitch, yaw = quaternion_to_euler_angle(self.current_pose.pose.orientation.w,
self.current_pose.pose.orientation.x,
self.current_pose.pose.orientation.y,
self.current_pose.pose.orientation.z)
# for each waypoint of the base_waypoints, calculate the distance from the current position, find out the nearest waypoint index
for i in range(len(self.base_waypoints)):
# base waypoint x & y coordinates.
base_waypoint_x = self.base_waypoints[i].pose.pose.position.x
base_waypoint_y = self.base_waypoints[i].pose.pose.position.y
base_waypoint_z = self.base_waypoints[i].pose.pose.position.z
distance = np.sqrt((current_pose_x - base_waypoint_x)**2 + (current_pose_y - base_waypoint_y)**2 + (current_pose_z - base_waypoint_z)**2)
if distance < shortest_distance:
shortest_distance = distance
nearest_waypoint_idx = i
# rospy.loginfo("nearest waypoint index is %d", nearest_waypoint_idx)
# step 2. the nearest waypoint might be behind the car, we need to check if the nearest waypoint is at the current heading direction. We need to utilize the orientation info from the PoseStampd message
nearest_waypoint_x = self.base_waypoints[nearest_waypoint_idx].pose.pose.position.x
nearest_waypoint_y = self.base_waypoints[nearest_waypoint_idx].pose.pose.position.y
wp_yaw = np.arctan2((nearest_waypoint_y - current_pose_y), (nearest_waypoint_x - current_pose_x)) # I`m not too sure about this part
# calculate the angle between car's yaw and wp_yaw, only accept the waypoint if the angle is less than 45 degree, otherwise, use the next waypoint as the first lookahead waypoint. Then append the next 200 base waypoints as the lookahead waypoints. Rollover to the first base waypoint when the loop reaches the end of the base waypoint list.
theta = yaw - wp_yaw
lookahead_waypoints = []
if abs(theta) < np.pi/2:
for i in range(LOOKAHEAD_WPS):
waypoint_idx = (nearest_waypoint_idx + i) % len(self.base_waypoints)
lookahead_waypoints.append(self.base_waypoints[waypoint_idx])
else:
for i in range(LOOKAHEAD_WPS):
waypoint_idx = (nearest_waypoint_idx + 1 + i) % len(self.base_waypoints)
lookahead_waypoints.append(self.base_waypoints[waypoint_idx])
# step 3.
if self.stop_waypoint_idx is not None:
if self.stop_waypoint_idx == -1 - STOP_OFFSET:
# no red light detected, adjust current velocity to 30MPH
# calculate the distance the vehicle needs to travel from current velocity to 30mph
# d=(vc^2-vo^2)/2a
dist_to_30mph = (VELOCITY_30MPH**2 - self.current_velocity**2) / (2*NORMAL_ACCEL)
accel_per_dist = (VELOCITY_30MPH - self.current_velocity) / (dist_to_30mph + 1e-12)
# update the velocity of the lookahead_waypoints
for i in range(nearest_waypoint_idx, nearest_waypoint_idx+LOOKAHEAD_WPS):
dist_curr_to_i = self.distance(self.base_waypoints, nearest_waypoint_idx, i+1)
increased_v = dist_curr_to_i * accel_per_dist
velocity_i = self.current_velocity + increased_v
velocity_i = velocity_i if velocity_i < VELOCITY_30MPH else VELOCITY_30MPH
self.set_waypoint_velocity(lookahead_waypoints, i-nearest_waypoint_idx, velocity_i)
else:
rospy.loginfo("stop_waypoint_idx is %d", self.stop_waypoint_idx)
# red light detected
# calculate the normal braking distance from the current_velocity
# a=(vc-v0)/t, d=((vc+v0)/2)*t, v0=0 --> d=vc^2/(2*a)
normal_brake_dist = (self.current_velocity**2)/(2*NORMAL_DECEL)
# calculate the distance between the current position and the red light stop position. use the nearest waypoint as the current position
dist_to_stop = self.distance(self.base_waypoints, nearest_waypoint_idx, self.stop_waypoint_idx)
# if the car is getting close to the red light, start braking, otherwise, keep constant speed
if dist_to_stop <= normal_brake_dist and dist_to_stop > 3:
#rospy.loginfo("if cond1: brake, current_velocity is %f", self.current_velocity)
decel_per_dist = self.current_velocity / (dist_to_stop + 1e-12) #* 2 # provide a factor of 1.5 to be safe
for i in range(nearest_waypoint_idx, self.stop_waypoint_idx):
dist_curr_to_i = self.distance(self.base_waypoints, nearest_waypoint_idx, i+1)
reduced_v = dist_curr_to_i * decel_per_dist
velocity_i = self.current_velocity - reduced_v
velocity_i = velocity_i if velocity_i > 0 else 0.0
if i < nearest_waypoint_idx + LOOKAHEAD_WPS:
self.set_waypoint_velocity(lookahead_waypoints, i-nearest_waypoint_idx, velocity_i)
elif dist_to_stop <= 3:
#rospy.loginfo("if cond2: stop, current_velocity is %f", self.current_velocity)
for i in range(nearest_waypoint_idx, nearest_waypoint_idx+LOOKAHEAD_WPS):
if i < nearest_waypoint_idx + LOOKAHEAD_WPS:
self.set_waypoint_velocity(lookahead_waypoints, i-nearest_waypoint_idx, 0.0)
# adjust velocity up to 30mph if current velocity is slow and vehicle is still away from red light
elif dist_to_stop > 3 and dist_to_stop > 2*normal_brake_dist and self.current_velocity < VELOCITY_30MPH:
#rospy.loginfo("if cond2: stop, current_velocity is %f", self.current_velocity)
# calculate the distance the vehicle needs to travel from current velocity to 30mph
# d=(vc^2-vo^2)/2a
dist_to_30mph = (VELOCITY_30MPH**2 - self.current_velocity**2) / (2*NORMAL_ACCEL)
accel_per_dist = (VELOCITY_30MPH - self.current_velocity) / (dist_to_30mph + 1e-12)
# update the velocity of the lookahead_waypoints
for i in range(nearest_waypoint_idx, nearest_waypoint_idx+LOOKAHEAD_WPS):
dist_curr_to_i = self.distance(self.base_waypoints, nearest_waypoint_idx, i+1)
increased_v = dist_curr_to_i * accel_per_dist
velocity_i = self.current_velocity + increased_v
velocity_i = velocity_i if velocity_i < VELOCITY_30MPH else VELOCITY_30MPH
self.set_waypoint_velocity(lookahead_waypoints, i-nearest_waypoint_idx, velocity_i)
# rospy.loginfo("current_velocity: %f", self.current_velocity)
# if self.current_velocity <= 1.0:
# self.stopped_time = self.stopped_time + 0.02 #1/REFRESH_RATE
# if dist_to_stop <= normal_brake_dist:
# decel = (self.current_velocity**2)/(2*dist_to_stop + 1e-12)
# if decel > MAX_DECEL:
# decel = MAX_DECEL
# # calculate the velocity for each waypoint between the current position and red light stop line
# for i in range(nearest_waypoint_idx, self.stop_waypoint_idx+1):
# dist_curr_to_i = self.distance(self.base_waypoints, nearest_waypoint_idx, i)
# # vi = sqrt(vc^2-2*a*d)
# velocity_i = np.sqrt(self.current_velocity**2 - 2*decel*dist_curr_to_i)
# # set velocity for each waypoint in the lookahead_waypoints
# if i < nearest_waypoint_idx + LOOKAHEAD_WPS:
# self.set_waypoint_velocity(lookahead_waypoints, i-nearest_waypoint_idx, velocity_i)
# if i == 0:
# rospy.loginfo(velocity_i)
# rospy.loginfo(nearest_waypoint_idx)
# create an empty Lane message to hold the lookahead_waypoints
lane = Lane()
lane.waypoints = lookahead_waypoints
# rospy.loginfo("waypoint 0 velocity %f", lane.waypoints[0].twist.twist.linear.x)
# rospy.loginfo("waypoint 1 velocity %f", lane.waypoints[1].twist.twist.linear.x)
# rospy.loginfo("waypoint 2 velocity %f", lane.waypoints[2].twist.twist.linear.x)
self.final_waypoints_pub.publish(lane)
self.rate.sleep()
def pose_cb(self, msg):
# TODO: Implement
'''msg type geometry_msgs/PoseStamped
geometry_msgs/Pose pose
geometry_msgs/Point position
float64 x
float64 y
float64 z
geometry_msgs/Quaternion orientation
float64 x
float64 y
float64 z
float64 w
'''
self.current_pose = msg
#pass
def waypoints_cb(self, waypoints):
# TODO: Implement
'''waypoints message type styx_msgs/Lane
styx_msgs/Waypoint[] waypoints
geometry_msgs/PoseStamped pose
std_msgs/Header header
uint32 seq
time stamp
string frame_id
geometry_msgs/Pose pose
geometry_msgs/Point position
float64 x
float64 y
float64 z
geometry_msgs/Quaternion orientation
float64 x
float64 y
float64 z
float64 w
geometry_msgs/TwistStamped twist
std_msgs/Header header
uint32 seq
time stamp
string frame_id
geometry_msgs/Twist twist
geometry_msgs/Vector3 linear
float64 x
float64 y
float64 z
geometry_msgs/Vector3 angular
float64 x
float64 y
float64 z
'''
# get the waypoint list from the Lane message
self.base_waypoints = waypoints.waypoints
#pass
def traffic_cb(self, msg):
# TODO: Callback for /traffic_waypoint message. Implement
self.stop_waypoint_idx = msg.data - STOP_OFFSET
#pass
def obstacle_cb(self, msg):
# TODO: Callback for /obstacle_waypoint message. We will implement it later
pass
def velocity_cb(self, msg):
'''msg type geometry_msgs/TwistStamped
geometry_msgs/Twist twist
geometry_msgs/Vector3 linear
float64 x
float64 y
float64 z
geometry_msgs/Vector3 angular
float64 x
float64 y
float64 z
'''
# get the vehicle's current velocity from the simulator
self.current_velocity = msg.twist.linear.x
#pass
def get_waypoint_velocity(self, waypoint):
return waypoint.twist.twist.linear.x
def set_waypoint_velocity(self, waypoints, waypoint, velocity):
waypoints[waypoint].twist.twist.linear.x = velocity
def distance(self, waypoints, wp1, wp2):
dist = 0
dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)
for i in range(wp1, wp2+1):
dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)
wp1 = i
return dist
def quaternion_to_euler_angle(w, x, y, z):
"""
helper function to convert quaternion to euler angle
"""
ysqr = y * y
t0 = +2.0 * (w * x + y * z)
t1 = +1.0 - 2.0 * (x * x + ysqr)
X = math.degrees(math.atan2(t0, t1)) #roll
t2 = +2.0 * (w * y - z * x)
t2 = +1.0 if t2 > +1.0 else t2
t2 = -1.0 if t2 < -1.0 else t2
Y = math.degrees(math.asin(t2)) #pitch
t3 = +2.0 * (w * z + x * y)
t4 = +1.0 - 2.0 * (ysqr + z * z)
Z = math.degrees(math.atan2(t3, t4)) #yaw
return X, Y, Z
if __name__ == '__main__':
try:
WaypointUpdater()
except rospy.ROSInterruptException:
rospy.logerr('Could not start waypoint updater node.')
| [
"rospy.logerr",
"numpy.sqrt",
"rospy.Subscriber",
"rospy.is_shutdown",
"rospy.init_node",
"math.asin",
"math.sqrt",
"rospy.Rate",
"numpy.arctan2",
"math.atan2",
"rospy.Publisher",
"rospy.loginfo",
"styx_msgs.msg.Lane"
] | [((1126, 1161), 'rospy.init_node', 'rospy.init_node', (['"""waypoint_updater"""'], {}), "('waypoint_updater')\n", (1141, 1161), False, 'import rospy\n'), ((1313, 1373), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/current_pose"""', 'PoseStamped', 'self.pose_cb'], {}), "('/current_pose', PoseStamped, self.pose_cb)\n", (1329, 1373), False, 'import rospy\n'), ((1382, 1442), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/base_waypoints"""', 'Lane', 'self.waypoints_cb'], {}), "('/base_waypoints', Lane, self.waypoints_cb)\n", (1398, 1442), False, 'import rospy\n'), ((1536, 1597), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/traffic_waypoint"""', 'Int32', 'self.traffic_cb'], {}), "('/traffic_waypoint', Int32, self.traffic_cb)\n", (1552, 1597), False, 'import rospy\n'), ((1683, 1752), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/current_velocity"""', 'TwistStamped', 'self.velocity_cb'], {}), "('/current_velocity', TwistStamped, self.velocity_cb)\n", (1699, 1752), False, 'import rospy\n'), ((1789, 1843), 'rospy.Publisher', 'rospy.Publisher', (['"""final_waypoints"""', 'Lane'], {'queue_size': '(1)'}), "('final_waypoints', Lane, queue_size=1)\n", (1804, 1843), False, 'import rospy\n'), ((1945, 1969), 'rospy.Rate', 'rospy.Rate', (['REFRESH_RATE'], {}), '(REFRESH_RATE)\n', (1955, 1969), False, 'import rospy\n'), ((4449, 4537), 'numpy.arctan2', 'np.arctan2', (['(nearest_waypoint_y - current_pose_y)', '(nearest_waypoint_x - current_pose_x)'], {}), '(nearest_waypoint_y - current_pose_y, nearest_waypoint_x -\n current_pose_x)\n', (4459, 4537), True, 'import numpy as np\n'), ((11281, 11287), 'styx_msgs.msg.Lane', 'Lane', ([], {}), '()\n', (11285, 11287), False, 'from styx_msgs.msg import Lane, Waypoint\n'), ((14971, 14989), 'math.atan2', 'math.atan2', (['t0', 't1'], {}), '(t0, t1)\n', (14981, 14989), False, 'import math\n'), ((15121, 15134), 'math.asin', 'math.asin', (['t2'], {}), '(t2)\n', (15130, 15134), False, 'import math\n'), ((15234, 15252), 'math.atan2', 'math.atan2', (['t3', 't4'], {}), '(t3, t4)\n', (15244, 15252), False, 'import math\n'), ((2009, 2028), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (2026, 2028), False, 'import rospy\n'), ((3701, 3837), 'numpy.sqrt', 'np.sqrt', (['((current_pose_x - base_waypoint_x) ** 2 + (current_pose_y -\n base_waypoint_y) ** 2 + (current_pose_z - base_waypoint_z) ** 2)'], {}), '((current_pose_x - base_waypoint_x) ** 2 + (current_pose_y -\n base_waypoint_y) ** 2 + (current_pose_z - base_waypoint_z) ** 2)\n', (3708, 3837), True, 'import numpy as np\n'), ((14524, 14589), 'math.sqrt', 'math.sqrt', (['((a.x - b.x) ** 2 + (a.y - b.y) ** 2 + (a.z - b.z) ** 2)'], {}), '((a.x - b.x) ** 2 + (a.y - b.y) ** 2 + (a.z - b.z) ** 2)\n', (14533, 14589), False, 'import math\n'), ((15390, 15444), 'rospy.logerr', 'rospy.logerr', (['"""Could not start waypoint updater node."""'], {}), "('Could not start waypoint updater node.')\n", (15402, 15444), False, 'import rospy\n'), ((6614, 6678), 'rospy.loginfo', 'rospy.loginfo', (['"""stop_waypoint_idx is %d"""', 'self.stop_waypoint_idx'], {}), "('stop_waypoint_idx is %d', self.stop_waypoint_idx)\n", (6627, 6678), False, 'import rospy\n')] |
import os
import re
import csv
import sys
import json
import argparse
import random
from random import shuffle
import logging
from glob import glob
import numpy as np
from statistics import mean
OFFSET_IDX = 1e5 # start roughly after memesdataset files max idx
logging.basicConfig(format='%(asctime)s : %(levelname)s - %(message)s',
datefmt='%d/%m/%Y %I:%M:%S %p',
level=logging.INFO)
logger = logging.getLogger('CrossValLog')
def generate_jsonl_file(data_path, dev_size=300):
random.seed(42)
data_list = []
read_dir = os.path.join(data_path,'labels.csv')
img_feat_dir = os.path.join(data_path, 'img_feats')
with open(read_dir, 'r', encoding='utf8') as read_file:
rows = csv.DictReader(read_file)
for row in rows:
data_dict = {}
id = int(row[''])+1+int(OFFSET_IDX)
img_feat_path = os.path.join(img_feat_dir, str(id)+'.npy')
img_feat_info_path = os.path.join(img_feat_dir, str(id)+'_info.npy')
# Only if the img_feats exist we add it to the dataset
if os.path.isfile(img_feat_path) and os.path.isfile(img_feat_info_path):
data_dict['id'] = str(id)
data_dict['img'] = 'images\/'+str(row['image_name'].replace('image_', ''))
data_dict['label'] = 0
text = row['text_corrected']
text = text.replace('\n', ' ')
text = re.sub(r"\b(?:https?://|www\.)[a-z0-9-]+(\.[a-z0-9-]+)+(?:[/?].*)?", "", text) # removes most urls
text = re.sub(r"(w{3}\.)*[a-zA-Z0-9]+\.{1}(co){1}[m]{0,1}\s{0,1}", "", text) # removes any.com urls
text = re.sub(r"(w{3}\.)*[a-zA-Z0-9]+\.{1}(net){1}\s{0,1}", "", text) # removes any.net urls
data_dict['text'] = text
data_list.append(data_dict)
logger.info("Total data points = {}".format(len(data_list)))
write_dir = os.path.join(data_path, 'all.jsonl')
logger.info("Writing the file at : {}".format(write_dir))
export_jsonl(write_dir, data_list)
def export_jsonl(filepath, dict_list):
s = "\n".join([json.dumps(d) for d in dict_list])
with open(filepath, "w") as f:
f.write(s)
def rename_img_feats(dir='../dataset/memotion_dataset/img_feats'):
logger.info("Renaming img_feat files..")
for root, dirs, files in os.walk(dir):
for count, file in enumerate(files):
src_file_path = os.path.join(root, file)
id = re.findall("\d+", file)[0] # find the number in string (image_xxx.npy)
renamed_file = str(int(id)+int(OFFSET_IDX))+'_info.npy' if 'info' in file else str(int(id)+int(OFFSET_IDX))+'.npy'
contents = np.load(src_file_path, allow_pickle=True)
dest_file_path = os.path.join(root, renamed_file)
np.save(dest_file_path, contents, allow_pickle=True)
logger.info("Renaming and saving done..")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_path', type=str, default='../dataset/memotion_dataset',
help='Path to folder of the meme dataset')
args, unparsed = parser.parse_known_args()
config = args.__dict__
assert os.path.exists(config['data_path']), "[!] The provided data path does not exist!"
generate_jsonl_file(data_path=config['data_path'])
rename_img_feats() | [
"logging.basicConfig",
"logging.getLogger",
"os.path.exists",
"csv.DictReader",
"argparse.ArgumentParser",
"json.dumps",
"os.path.join",
"random.seed",
"os.path.isfile",
"numpy.save",
"re.sub",
"re.findall",
"numpy.load",
"os.walk"
] | [((263, 390), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s : %(levelname)s - %(message)s"""', 'datefmt': '"""%d/%m/%Y %I:%M:%S %p"""', 'level': 'logging.INFO'}), "(format='%(asctime)s : %(levelname)s - %(message)s',\n datefmt='%d/%m/%Y %I:%M:%S %p', level=logging.INFO)\n", (282, 390), False, 'import logging\n'), ((436, 468), 'logging.getLogger', 'logging.getLogger', (['"""CrossValLog"""'], {}), "('CrossValLog')\n", (453, 468), False, 'import logging\n'), ((525, 540), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (536, 540), False, 'import random\n'), ((575, 612), 'os.path.join', 'os.path.join', (['data_path', '"""labels.csv"""'], {}), "(data_path, 'labels.csv')\n", (587, 612), False, 'import os\n'), ((631, 667), 'os.path.join', 'os.path.join', (['data_path', '"""img_feats"""'], {}), "(data_path, 'img_feats')\n", (643, 667), False, 'import os\n'), ((1956, 1992), 'os.path.join', 'os.path.join', (['data_path', '"""all.jsonl"""'], {}), "(data_path, 'all.jsonl')\n", (1968, 1992), False, 'import os\n'), ((2387, 2399), 'os.walk', 'os.walk', (['dir'], {}), '(dir)\n', (2394, 2399), False, 'import os\n'), ((2998, 3023), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3021, 3023), False, 'import argparse\n'), ((3266, 3301), 'os.path.exists', 'os.path.exists', (["config['data_path']"], {}), "(config['data_path'])\n", (3280, 3301), False, 'import os\n'), ((743, 768), 'csv.DictReader', 'csv.DictReader', (['read_file'], {}), '(read_file)\n', (757, 768), False, 'import csv\n'), ((2155, 2168), 'json.dumps', 'json.dumps', (['d'], {}), '(d)\n', (2165, 2168), False, 'import json\n'), ((2474, 2498), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (2486, 2498), False, 'import os\n'), ((2737, 2778), 'numpy.load', 'np.load', (['src_file_path'], {'allow_pickle': '(True)'}), '(src_file_path, allow_pickle=True)\n', (2744, 2778), True, 'import numpy as np\n'), ((2808, 2840), 'os.path.join', 'os.path.join', (['root', 'renamed_file'], {}), '(root, renamed_file)\n', (2820, 2840), False, 'import os\n'), ((2853, 2905), 'numpy.save', 'np.save', (['dest_file_path', 'contents'], {'allow_pickle': '(True)'}), '(dest_file_path, contents, allow_pickle=True)\n', (2860, 2905), True, 'import numpy as np\n'), ((1103, 1132), 'os.path.isfile', 'os.path.isfile', (['img_feat_path'], {}), '(img_feat_path)\n', (1117, 1132), False, 'import os\n'), ((1137, 1171), 'os.path.isfile', 'os.path.isfile', (['img_feat_info_path'], {}), '(img_feat_info_path)\n', (1151, 1171), False, 'import os\n'), ((1460, 1545), 're.sub', 're.sub', (['"""\\\\b(?:https?://|www\\\\.)[a-z0-9-]+(\\\\.[a-z0-9-]+)+(?:[/?].*)?"""', '""""""', 'text'], {}), "('\\\\b(?:https?://|www\\\\.)[a-z0-9-]+(\\\\.[a-z0-9-]+)+(?:[/?].*)?', '', text\n )\n", (1466, 1545), False, 'import re\n'), ((1583, 1654), 're.sub', 're.sub', (['"""(w{3}\\\\.)*[a-zA-Z0-9]+\\\\.{1}(co){1}[m]{0,1}\\\\s{0,1}"""', '""""""', 'text'], {}), "('(w{3}\\\\.)*[a-zA-Z0-9]+\\\\.{1}(co){1}[m]{0,1}\\\\s{0,1}', '', text)\n", (1589, 1654), False, 'import re\n'), ((1699, 1763), 're.sub', 're.sub', (['"""(w{3}\\\\.)*[a-zA-Z0-9]+\\\\.{1}(net){1}\\\\s{0,1}"""', '""""""', 'text'], {}), "('(w{3}\\\\.)*[a-zA-Z0-9]+\\\\.{1}(net){1}\\\\s{0,1}', '', text)\n", (1705, 1763), False, 'import re\n'), ((2516, 2540), 're.findall', 're.findall', (['"""\\\\d+"""', 'file'], {}), "('\\\\d+', file)\n", (2526, 2540), False, 'import re\n')] |
#############################################
#
# potential.py
# An exptool utility to handle energy and kappa calculations
#
# MSP 10.1.2015
#
# MSP 26 Apr 2020 revised for more graceful bar transform handling
#
'''
potential (part of exptool.basis)
construct instances that are combinations of different components
TODO:
1. add support for energy/kappa dimensionality conversion
'''
# python3 compatibility...
from __future__ import absolute_import, division, print_function, unicode_literals
# standard libraries
import numpy as np
import time
# exptool classes
from ..utils import utils
from ..io import psp_io
from ..analysis import pattern
from . import eof
from ..io import particle
from . import spheresl
#from exptool.basis import spheresl_new as spheresl
from ..utils import halo_methods
# for interpolation
from scipy.interpolate import UnivariateSpline
class Fields():
'''
class to accumulate (all) particles from a dump and return the field quantities
UNFORTUNATELY, this only works on halo+disk systems right now. Should offload the ability to plug in multiple components.
'''
def __init__(self,infile,eof_file,sph_file,model_file,nhalo=1000000,transform=False,no_odd=False,centering=False,mutual_center=False,verbose=1):
'''
__init__
inputs
--------------------
infile
eof_file
sph_file
model_file
nhalo=1000000
transform=False
no_odd=False
centering=False
mutual_center=False
verbose=1
returns
-------------------
self, now set up with basic parameters
'''
self.filename = infile
self.eof_file = eof_file
self.sph_file = sph_file
self.model_file = model_file
self.nhalo = nhalo
self.transform = transform
self.no_odd = no_odd
self.centering = centering
self.mutual_center = mutual_center
self.verbose = verbose
# do the total coefficient calculation?
#Fields.total_coefficients(self)
def total_coefficients(self):
'''
total_coefficients
inputs
-----------------
self
returns
-----------------
self
time
'''
# read in the files
#
PSPDumpDisk = psp_io.Input(self.filename,comp='star')
# add for ability to tabulate
self.time = PSPDumpDisk.time
if self.transform:
PSPDumpDiskTransformed = pattern.BarTransform(PSPDumpDisk)
if self.verbose > 1:
print('potential.Fields.total_coefficients: Using bar_angle {0:4.3f}'.format(PSPDumpDiskTransformed.bar_angle))
else:
# let's reread for safety
PSPDumpDiskTransformed = psp_io.Input(self.filename,comp='star')
# read in both partial and full halo to figure out the halofactor
PSPDumpHaloT = psp_io.Input(self.filename,comp='dark')
PSPDumpHalo = psp_io.Input(self.filename,comp='dark')#,nbodies=self.nhalo) #I, <NAME> Edited This line
self.halofac = float(PSPDumpHaloT.header['dark']['nbodies'])/float(PSPDumpHalo.header['dark']['nbodies']) #I, <NAME>, edited this line
#float(PSPDumpHaloT.nbodies)/float(PSPDumpHalo.nbodies)
if self.transform:
PSPDumpHaloTransformed = pattern.BarTransform(PSPDumpHalo,bar_angle=PSPDumpDiskTransformed.bar_angle)
else:
PSPDumpHaloTransformed = PSPDumpHalo = psp_io.Input(self.filename,comp='dark')#,nbodies=self.nhalo) #I, <NAME>, edited this line
#
# do centering
if self.centering:
print('potential.Fields.total_coefficients: Computing centering (centering=True)')
# this should be adaptable at some point
ncenter = 10000
# rank order particles
rrank = (PSPDumpDiskTransformed.data['x']*PSPDumpDiskTransformed.data['x'] + \
PSPDumpDiskTransformed.data['y']*PSPDumpDiskTransformed.data['y'] + \
PSPDumpDiskTransformed.data['z']*PSPDumpDiskTransformed.data['z'])**0.5 #edited to match new psp format
'''(PSPDumpDiskTransformed.xpos*PSPDumpDiskTransformed.xpos + \
PSPDumpDiskTransformed.ypos*PSPDumpDiskTransformed.ypos + \
PSPDumpDiskTransformed.zpos*PSPDumpDiskTransformed.zpos)**0.5'''
cparticles = rrank.argsort()[0:ncenter]
# use the specified particles to calculate the center of mass in each dimension
self.xcen_disk = np.sum(PSPDumpDiskTransformed.data['x'][cparticles]*PSPDumpDiskTransformed.data['m'][cparticles])/np.sum(PSPDumpDiskTransformed.data['m'][cparticles])
#edited to match new psp format
#np.sum(PSPDumpDiskTransformed.xpos[cparticles]*PSPDumpDiskTransformed.mass[cparticles])/np.sum(PSPDumpDiskTransformed.mass[cparticles])
self.ycen_disk = np.sum(PSPDumpDiskTransformed.data['y'][cparticles]*PSPDumpDiskTransformed.data['m'][cparticles])/np.sum(PSPDumpDiskTransformed.data['m'][cparticles])
#self.ycen_disk = np.sum(PSPDumpDiskTransformed.ypos[cparticles]*PSPDumpDiskTransformed.mass[cparticles])/np.sum(PSPDumpDiskTransformed.mass[cparticles])
#self.zcen_disk = np.sum(PSPDumpDiskTransformed.zpos[cparticles]*PSPDumpDiskTransformed.mass[cparticles])/np.sum(PSPDumpDiskTransformed.mass[cparticles])
self.zcen_disk = np.sum(PSPDumpDiskTransformed.data['z'][cparticles]*PSPDumpDiskTransformed.data['m'][cparticles])/np.sum(PSPDumpDiskTransformed.data['m'][cparticles])
# pinned both components to same position?
if self.mutual_center:
print('potential.Fields.total_coefficients: Using computed disk center for halo (mutual_center=True)')
self.xcen_halo = self.xcen_disk
self.ycen_halo = self.ycen_disk
self.zcen_halo = self.zcen_disk
else:
# rank order particles
rrank = (PSPDumpDiskTransformed.data['x']*PSPDumpDiskTransformed.data['x'] + \
PSPDumpDiskTransformed.data['y']*PSPDumpDiskTransformed.data['y'] + \
PSPDumpDiskTransformed.data['z']*PSPDumpDiskTransformed.data['z'])**0.5
'''(PSPDumpDiskTransformed.xpos*PSPDumpDiskTransformed.xpos + \
PSPDumpDiskTransformed.ypos*PSPDumpDiskTransformed.ypos + \
PSPDumpDiskTransformed.zpos*PSPDumpDiskTransformed.zpos)**0.5'''
cparticles = rrank.argsort()[0:ncenter]
self.xcen_halo = np.sum(PSPDumpHaloTransformed.data['x'][cparticles]*PSPDumpHaloTransformed.data['m'][cparticles])/np.sum(PSPDumpHaloTransformed.data['m'][cparticles])
self.ycen_halo = np.sum(PSPDumpHaloTransformed.data['y'][cparticles]*PSPDumpHaloTransformed.data['m'][cparticles])/np.sum(PSPDumpHaloTransformed.data['m'][cparticles])
self.zcen_halo = np.sum(PSPDumpHaloTransformed.data['z'][cparticles]*PSPDumpHaloTransformed.data['m'][cparticles])/np.sum(PSPDumpHaloTransformed.data['m'][cparticles])
'''
self.xcen_halo = np.sum(PSPDumpHaloTransformed.xpos[cparticles]*PSPDumpHaloTransformed.mass[cparticles])/np.sum(PSPDumpHaloTransformed.mass[cparticles])
self.ycen_halo = np.sum(PSPDumpHaloTransformed.ypos[cparticles]*PSPDumpHaloTransformed.mass[cparticles])/np.sum(PSPDumpHaloTransformed.mass[cparticles])
self.zcen_halo = np.sum(PSPDumpHaloTransformed.zpos[cparticles]*PSPDumpHaloTransformed.mass[cparticles])/np.sum(PSPDumpHaloTransformed.mass[cparticles])
'''
print('potential.Fields.total_coefficients: (x,y,z) = {0:6.5f},{1:6.5f},{2:6.5f}'\
.format(float(self.xcen_disk),float(self.ycen_disk),float(self.zcen_disk)))
PSPDumpDiskTransformed.data['x'] = PSPDumpDiskTransformed.data['x'] - self.xcen_disk
PSPDumpDiskTransformed.data['y'] = PSPDumpDiskTransformed.data['y'] - self.ycen_disk
PSPDumpDiskTransformed.data['z'] = PSPDumpDiskTransformed.data['z'] - self.zcen_disk
PSPDumpHaloTransformed.data['x'] = PSPDumpHaloTransformed.data['x'] - self.xcen_halo
PSPDumpHaloTransformed.data['y'] = PSPDumpHaloTransformed.data['y'] - self.ycen_halo
PSPDumpHaloTransformed.data['z'] = PSPDumpHaloTransformed.data['z'] - self.zcen_halo
'''
PSPDumpDiskTransformed.xpos = PSPDumpDiskTransformed.xpos - self.xcen_disk
PSPDumpDiskTransformed.ypos = PSPDumpDiskTransformed.ypos - self.ycen_disk
PSPDumpDiskTransformed.zpos = PSPDumpDiskTransformed.zpos - self.zcen_disk
PSPDumpHaloTransformed.xpos = PSPDumpHaloTransformed.xpos - self.xcen_halo
PSPDumpHaloTransformed.ypos = PSPDumpHaloTransformed.ypos - self.ycen_halo
PSPDumpHaloTransformed.zpos = PSPDumpHaloTransformed.zpos - self.zcen_halo
'''
else:
self.xcen_disk = 0.
self.ycen_disk = 0.
self.zcen_disk = 0.
self.xcen_halo = 0.
self.ycen_halo = 0.
self.zcen_halo = 0.
#
# compute coefficients
#
self.EOF = eof.compute_coefficients(PSPDumpDiskTransformed,self.eof_file,verbose=self.verbose,no_odd=self.no_odd)
self.SL = spheresl.compute_coefficients(PSPDumpHaloTransformed,self.sph_file,self.model_file,verbose=self.verbose,no_odd=self.no_odd)
def prep_tables(self):
'''
prep_tables
reads the cached files to set up tables for accumulation
'''
try:
x = self.EOF.eof_file
except:
print('potential.Fields.prep_tables: must first call total_coefficients.')
# build disk tables
self.potC,self.rforceC,self.zforceC,self.densC,\
self.potS,self.rforceS,self.zforceS,self.densS \
= eof.parse_eof(self.EOF.eof_file)
self.rmindisk,self.rmaxdisk,self.numx,self.numy,self.mmax,self.norder,self.ascale,self.hscale,self.cmapdisk,self.densdisk \
= eof.eof_params(self.EOF.eof_file)
self.XMIN,self.XMAX,self.dX,self.YMIN,self.YMAX,self.dY \
= eof.set_table_params(RMAX=self.rmaxdisk,RMIN=self.rmindisk,ASCALE=self.ascale,HSCALE=self.hscale,NUMX=self.numx,NUMY=self.numy,CMAP=self.cmapdisk)
self.disk_use_m = self.mmax
self.disk_use_n = self.norder
# build halo tables
self.lmaxhalo,self.nmaxhalo,self.numrhalo,self.cmaphalo,\
self.rminhalo,self.rmaxhalo,self.scalehalo,self.ltablehalo,self.evtablehalo,self.eftablehalo \
= halo_methods.read_cached_table(self.SL.sph_file)
self.xihalo,self.rarrhalo,self.p0halo,self.d0halo \
= halo_methods.init_table(self.SL.model_file,self.numrhalo,self.rminhalo,self.rmaxhalo,cmap=self.cmaphalo,scale=self.scalehalo)
self.halo_use_l = self.lmaxhalo
self.halo_use_n = self.nmaxhalo
def return_density(self,xval,yval,zval):
'''
definition to return the density for the monopole term and total separately, and for the two components
wrapped elsewhere to some end
'''
try:
x = self.EOF.eof_file
y = self.potC
except:
print('potential.Fields.return_density: must first call total_coefficients and prep_tables.')
r2val = (xval*xval + yval*yval)**0.5 + 1.e-10
r3val = (r2val*r2val + zval*zval)**0.5 + 1.e-10
costh = zval/r3val
phival = np.arctan2(yval,xval)
# disk evaluation call
diskp0,diskp,diskfr,diskfp,diskfz,diskden0,diskden1 = eof.accumulated_eval(r2val, zval, phival,\
self.EOF.cos, self.EOF.sin,\
self.potC, self.rforceC, self.zforceC, self.densC,\
self.potS, self.rforceS, self.zforceS, self.densS,\
rmin=self.XMIN,dR=self.dX,zmin=self.YMIN,dZ=self.dY,numx=self.numx,numy=self.numy,fac = 1.0,\
MMAX=self.mmax,NMAX=self.norder,\
ASCALE=self.ascale,HSCALE=self.hscale,CMAP=self.cmapdisk,no_odd=self.no_odd)
#
# halo evaluation call
haloden0,haloden1,halopot0,halopot1,halopotr,halopott,halopotp = spheresl.all_eval(r3val, costh, phival,\
self.halofac*self.SL.expcoef,\
self.xihalo,self.p0halo,self.d0halo,\
self.cmaphalo,self.scalehalo,\
self.lmaxhalo,self.nmaxhalo,\
self.evtablehalo,self.eftablehalo,no_odd=self.no_odd)
return haloden0,haloden1,diskden0,diskden1
def density_calculate(self,rvals=np.linspace(0.,0.1,100)):
'''
routine to compute in-plane major axis density values for looking at profile change over time
'''
# cheap to just do this here and set everything up as a just-in-case
Fields.prep_tables(self)
if not self.densdisk:
print('Fields.density_calculate: no density terms in basis!')
return None
halodens_mono = np.zeros_like(rvals)
diskdens_mono = np.zeros_like(rvals)
halodens_total = np.zeros_like(rvals)
diskdens_total = np.zeros_like(rvals)
for indx,rval in enumerate(rvals):
halodens_mono[indx],halodens_total[indx],diskdens_mono[indx],diskdens_total[indx] = Fields.return_density(self,rval,0.0,0.0)
self.rvals = rvals
self.halodens_mono = halodens_mono
self.diskdens_mono = diskdens_mono
self.halodens_total = halodens_total
self.diskdens_total = diskdens_total
def set_field_parameters(self,no_odd=False,halo_l=-1,halo_n=-1,disk_m=-1,disk_n=-1):
'''
in preparation for other definitions, specify
'''
self.no_odd = no_odd
if halo_l > -1: self.halo_use_l = halo_l
if halo_n > -1: self.halo_use_n = halo_n
if disk_m > -1: self.disk_use_m = disk_m
if disk_n > -1: self.disk_use_n = disk_n
def reset_field_parameters(self):
self.no_odd = False
self.halo_use_l = self.lmaxhalo
self.halo_use_n = self.nmaxhalo
self.disk_use_m = self.mmax
self.disk_use_n = self.norder
def return_forces_cyl(self,xval,yval,zval,rotpos=0.0):
try:
x = self.no_odd
except:
print('Fields.return_forces_cart: applying default potential parameters.')
Fields.set_field_parameters(self)
r2val = np.sqrt(xval*xval + yval*yval) + 1.e-10
r3val = np.sqrt(r2val*r2val + zval*zval) + 1.e-10
costh = zval/r3val
phival = np.arctan2(yval,xval)
#
# disk force call
diskfr,diskfp,diskfz,diskp,diskp0 = eof.force_eval(r2val, zval, phival + rotpos, \
self.EOF.cos, self.EOF.sin, \
self.potC, self.rforceC, self.zforceC,\
self.potS, self.rforceS, self.zforceS,\
rmin=self.XMIN,dR=self.dX,zmin=self.YMIN,dZ=self.dY,numx=self.numx,numy=self.numy,fac = 1.0,\
MMAX=self.disk_use_m,NMAX=self.disk_use_n,\
#MMAX=self.mmax,NMAX=self.norder,\
ASCALE=self.ascale,HSCALE=self.hscale,CMAP=self.cmapdisk,no_odd=self.no_odd,perturb=False)
#
# halo force call
halofr,haloft,halofp,halop,halop0 = spheresl.force_eval(r3val, costh, phival + rotpos, \
self.halofac*self.SL.expcoef,\
self.xihalo,self.p0halo,self.d0halo,self.cmaphalo,self.scalehalo,\
self.halo_use_l,self.halo_use_n,\
#self.lmaxhalo,self.nmaxhalo,\
self.evtablehalo,self.eftablehalo,no_odd=self.no_odd)
# recommended guards against bizarre phi forces
# do we need any other guards?
if r3val < np.min(self.xihalo):
halofp = 0.
diskfp = 0.
# convert halo to cylindrical coordinates
frhalo = -1.*(r2val*halofr + zval*haloft)/r3val
fzhalo = -1.*(zval*halofr - r2val*haloft)/r3val
# this is now returning the total potential in both disk and
# halo case
# fix to have the correct disk potential value
return diskfr,frhalo,diskfp,-1.*halofp,diskfz,fzhalo,-1.*diskp,(halop + halop0)
def return_forces_cart(self,xval,yval,zval,rotpos=0.0):
try:
x = self.no_odd
except:
print('Fields.return_forces_cart: applying default potential parameters.')
Fields.set_field_parameters(self)
r2val = (xval*xval + yval*yval)**0.5 + 1.e-15
r3val = (r2val*r2val + zval*zval)**0.5 + 1.e-15
costh = zval/r3val
phival = np.arctan2(yval,xval)
#
# disk force call
diskfr,diskfp,diskfz,diskp,diskp0 = eof.force_eval(r2val, zval, phival + rotpos, \
self.EOF.cos, self.EOF.sin, \
self.potC, self.rforceC, self.zforceC,\
self.potS, self.rforceS, self.zforceS,\
rmin=self.XMIN,dR=self.dX,zmin=self.YMIN,dZ=self.dY,numx=self.numx,numy=self.numy,fac = 1.0,\
MMAX=self.disk_use_m,NMAX=self.disk_use_n,\
#MMAX=self.mmax,NMAX=self.norder,\
ASCALE=self.ascale,HSCALE=self.hscale,CMAP=self.cmapdisk,no_odd=self.no_odd,perturb=False)
#
# halo force call
halofr,haloft,halofp,halop,halop0 = spheresl.force_eval(r3val, costh, phival + rotpos, \
self.halofac*self.SL.expcoef,\
self.xihalo,self.p0halo,self.d0halo,self.cmaphalo,self.scalehalo,\
self.halo_use_l,self.halo_use_n,\
#self.lmaxhalo,self.nmaxhalo,\
self.evtablehalo,self.eftablehalo,no_odd=self.no_odd)
# recommended guards against bizarre phi forces
# do we need any other guards?
if r3val < np.min(self.xihalo):
halofp = 0.
diskfp = 0.
fxdisk = (diskfr*(xval/r2val) - diskfp*(yval/(r2val*r2val)) )
fxhalo = -1.* ( halofr*(xval/r3val) - haloft*(xval*zval/(r3val*r3val*r3val))) + halofp*(yval/(r2val*r2val))
fydisk = (diskfr*(yval/r2val) + diskfp*(xval/(r2val*r2val)) )
fyhalo = -1.* ( halofr*(yval/r3val) - haloft*(yval*zval/(r3val*r3val*r3val))) - halofp*(xval/(r2val*r2val))
fzdisk = diskfz
fzhalo = -1.* ( halofr*(zval/r3val) + haloft*( (r2val*r2val)/(r3val*r3val*r3val)) )
# this is now returning the total potential in both disk and halo case
return fxdisk,fxhalo,fydisk,fyhalo,fzdisk,fzhalo,diskp,(halop + halop0)
def rotation_curve(self,rvals=np.linspace(0.0001,0.1,100),mono=False,angle=0.):
'''
returns the rotation curve, computed as$ v_c = \sqrt{r|F_r|}$.
by default, returns the values for the disk and halo where x>0, y=0.
(so rotate potential first if desired!)
inputs
--------------
self : (Field instance)
rvals : (float, default=sampling to 0.1) what rvalues to evaluate
mono : (bool, default=False) use only the monopole?
angle : (float, default=0.)
returns
--------------
additions to Field instance--
disk_rotation : disk contribution to the rotation curve
halo_rotation : halo contribution to the rotation curve
total_rotation : the total rotation curve
'''
try:
x = self.no_odd
except:
print('Fields.return_forces_cart: applying default potential parameters.')
Fields.set_field_parameters(self)
if mono == True:
tmp_halo = self.halo_use_l
tmp_disk = self.disk_use_m
# zero out to get monopole only
self.halo_use_l = 0
self.disk_use_m = 0
disk_force = np.zeros_like(rvals)
halo_force = np.zeros_like(rvals)
for indx,rval in enumerate(rvals):
disk_force[indx],halo_force[indx],a,b,c,d,e,f = Fields.return_forces_cyl(self,rval,angle,0.0)
self.rvals = rvals
self.disk_dpdr = -1.*disk_force
self.halo_dpdr = -1.*halo_force
self.total_dpdr = -1.*disk_force + -1.*halo_force
self.disk_rotation = (rvals*abs(disk_force))**0.5
self.halo_rotation = (rvals*abs(halo_force))**0.5
self.total_rotation = (rvals*(abs(halo_force)+abs(disk_force)))**0.5
if mono == True:
# reset values
self.halo_use_l = tmp_halo
self.disk_use_m = tmp_disk
def resonance_positions(self,rvals=np.linspace(0.0001,0.1,100),mono=False):
'''
calculate simple resonance lines for modelling purposes
inputs
--------------
self : the Field instance
rvals : (default=sampling to 0.1) what rvalues to evaluate
mono : (default=False) use only the monopole?
returns
--------------
additions to Field instance--
rvals
omega
kappa
'''
try:
x = self.no_odd
except:
print('Fields.return_forces_cart: applying default potential parameters.')
Fields.set_field_parameters(self)
if mono == True:
tmp_halo = self.halo_use_l
tmp_disk = self.disk_use_m
# zero out to get monopole only
self.halo_use_l = 0
self.disk_use_m = 0
disk_force = np.zeros_like(rvals)
halo_force = np.zeros_like(rvals)
for indx,rval in enumerate(rvals):
disk_force[indx],halo_force[indx],a,b,c,d,e,f = Fields.return_forces_cyl(self,rval,0.0,0.0)
self.rvals = rvals
# circular frequency
self.omega = ((abs(halo_force)+abs(disk_force))/rvals)**0.5
# radial frequency
# do the derivative
spl = UnivariateSpline(rvals, self.omega, k=3, s=0)
ddphi = spl.derivative()(rvals)
self.kappa = ( 3.*self.omega**2. + ddphi)**0.5
if mono == True:
# reset values
self.halo_use_l = tmp_halo
self.disk_use_m = tmp_disk
def compute_axis_potential(self,rvals=np.linspace(0.,0.1,100)):
'''
returns the potential along the major axis
this is kind of dumb and needs a revamp. Throwing away tons of information.
'''
disk_pot = np.zeros_like(rvals)
halo_pot = np.zeros_like(rvals)
for indx,rval in enumerate(rvals):
a,b,c,d,e,f,disk_pot[indx],halo_pot[indx] = Fields.return_forces_cart(self,rval,0.0,0.0)
self.rvals = rvals
self.disk_pot = disk_pot
self.halo_pot = halo_pot
self.total_pot = disk_pot+halo_pot
def make_force_grid(self,rline = np.linspace(0.00022,0.1,100),thline = np.linspace(0.00022,2.*np.pi,50)):
'''
make_force_grid: evaluate a simple grid of points along an axis
inputs
---------
self : Fields instance
rline : spacing in radius to probe
thline : spacing in theta to probe
returns
---------
wake : dictionary with the following keys
R : radius grid
T : theta grid
P : potential grid
D : density grid
tfR : total radial force grid
dfR : DISK radial force grid
hfR : HALO radial force grid
'''
rgrid,thgrid = np.meshgrid(rline,thline)
P = particle.holder() #psp_io.particle_holder()
P.xpos = (rgrid*np.cos(thgrid)).reshape(-1,)
P.ypos = (rgrid*np.sin(thgrid)).reshape(-1,)
P.zpos = np.zeros(rgrid.size)
P.mass = np.zeros(rgrid.size)
# the only way to do even-only calculation with these is to wipe out the odd terms from the coefficients (do-able)
# for disk
cos_coefs_in = np.copy(self.EOF.cos)
sin_coefs_in = np.copy(self.EOF.sin)
#
if self.no_odd:
for i in range(1,self.EOF.mmax,2):
cos_coefs_in[i] = np.zeros(self.EOF.nmax)
sin_coefs_in[i] = np.zeros(self.EOF.nmax)
# if using a restored file, potC etc may already exist. checking...
try:
p0,p,d0,d,fr,fp,fz,R = eof.accumulated_eval_particles(P, cos_coefs_in, sin_coefs_in ,\
potC=self.potC, rforceC=self.rforceC, zforceC=self.zforceC, \
potS=self.potS, rforceS=self.rforceS, zforceS=self.zforceS, \
rmin=self.rmindisk,dR=0,zmin=self.zmindisk,dZ=0,numx=0,numy=0,MMAX=6,NMAX=18,\
ASCALE=0.0,HSCALE=0.0,CMAP=0,m1=0,m2=1000,verbose=0,density=False,eof_file='')
except:
p0,p,d0,d,fr,fp,fz,R = eof.accumulated_eval_particles(P, cos_coefs_in, sin_coefs_in ,m1=0,m2=self.disk_use_m,eof_file=self.EOF.eof_file,density=True)
den0,den1,pot0,pot1,potr,pott,potp,rr = spheresl.eval_particles(P,self.halofac*self.SL.expcoef,self.SL.sph_file,self.SL.model_file,l1=0,l2=self.halo_use_l)
halo_rforce = ( rr*potr + P.zpos*pott )/( rr**2. + P.zpos**2.)**0.5
wake = {}
wake['R'] = rgrid
wake['T'] = thgrid
wake['P'] = (p+pot1+p0+pot0).reshape([thline.shape[0],rline.shape[0]])
wake['P1'] = (p+pot1).reshape([thline.shape[0],rline.shape[0]])
wake['D'] = (d+den0+den1).reshape([thline.shape[0],rline.shape[0]])
wake['tfR'] = (-1.*fr+halo_rforce).reshape([thline.shape[0],rline.shape[0]])
wake['dfR'] = (-1.*fr).reshape([thline.shape[0],rline.shape[0]])
wake['hfR'] = halo_rforce.reshape([thline.shape[0],rline.shape[0]])
wake['tfP'] = (fp+potp).reshape([thline.shape[0],rline.shape[0]])
wake['dfP'] = fp.reshape([thline.shape[0],rline.shape[0]])
wake['hfP'] = potp.reshape([thline.shape[0],rline.shape[0]])
wake['tfZ'] = fz.reshape([thline.shape[0],rline.shape[0]])
wake['dfZ'] = fz.reshape([thline.shape[0],rline.shape[0]])
wake['Rline'] = rline
wake['Tline'] = thline
self.wake = wake
def save_field(self,filename=''):
'''
save_field
----------------
print field quantities to file to restore quickly
inputs
----------------
self : the Field instance
filename : default to add file number
outputs
---------------
printed field file, to be read with potential.restore_field(filename)
'''
if filename=='':
print('potential.Fields.save_field: No filename specified.')
f = open(filename,'wb')
#####################################################
# global parameters
#self.filename
np.array([self.filename],dtype='S100').tofile(f)
#self.eof_file
np.array([self.eof_file],dtype='S100').tofile(f)
#self.sph_file
np.array([self.sph_file],dtype='S100').tofile(f)
#self.model_file
np.array([self.model_file],dtype='S100').tofile(f)
#[infile,eof_file,sph_file,model_file] = np.fromfile(f,dtype='S100',count=4)
#self.nhalo
np.array([self.nhalo],dtype='i4').tofile(f)
#self.transform
np.array([self.transform],dtype='i4').tofile(f)
#self.no_odd
np.array([self.no_odd],dtype='i4').tofile(f)
#self.centering
np.array([self.centering],dtype='i4').tofile(f)
#self.mutual_center
np.array([self.mutual_center],dtype='i4').tofile(f)
#self.verbose
np.array([self.verbose],dtype='i4').tofile(f)
#[nhalo,transform,no_odd,centering,mutual_center,verbose] = np.fromfile(f,dtype='i4',count=6)
#self.time
np.array([self.time],dtype='f4').tofile(f)
#[time] = np.fromfile(f,dtype='f4',count=1)
####################################################
# EOF parameters
#self.numx
np.array([self.numx],dtype='i4').tofile(f)
#self.numy
np.array([self.numy],dtype='i4').tofile(f)
#self.mmax
np.array([self.mmax],dtype='i4').tofile(f)
#self.norder
np.array([self.norder],dtype='i4').tofile(f)
#self.cmapdisk
np.array([self.cmapdisk],dtype='i4').tofile(f)
#self.densdisk
np.array([self.densdisk],dtype='i4').tofile(f)
#[numx,numy,mmax,norder,cmapdisk,densdisk] = np.fromfile(f,dtype='i4',count=6)
#self.rmindisk
np.array([self.rmindisk],dtype='f4').tofile(f)
#self.rmaxdisk
np.array([self.rmaxdisk],dtype='f4').tofile(f)
#self.ascale
np.array([self.ascale],dtype='f4').tofile(f)
#self.hscale
np.array([self.hscale],dtype='f4').tofile(f)
#self.XMIN
np.array([self.XMIN],dtype='f4').tofile(f)
#self.dX
np.array([self.dX],dtype='f4').tofile(f)
#self.YMIN
np.array([self.YMIN],dtype='f4').tofile(f)
#self.dY
np.array([self.dY],dtype='f4').tofile(f)
#self.xcen_disk = 0.
np.array([self.xcen_disk],dtype='f4').tofile(f)
#self.ycen_disk = 0.
np.array([self.ycen_disk],dtype='f4').tofile(f)
#self.zcen_disk = 0.
np.array([self.zcen_disk],dtype='f4').tofile(f)
#[rmindisk,rmaxdisk,ascale,hscale,XMIN,dX,YMIN,dY,xcen_disk,ycen_disk,zcen_disk] = np.fromfile(f,dtype='f4',count=11)
#self.EOF.cos
#self.EOF.sin
np.array(self.EOF.cos.reshape(-1,),dtype='f8').tofile(f)
np.array(self.EOF.sin.reshape(-1,),dtype='f8').tofile(f)
# 8 bytes X 2 arrays x (m+1) x n = 16(m+1)n bytes
#EOF.cos = (np.fromfile(f,dtype='f8',count=(mmax+1)*norder)).reshape([(mmax+1),norder])
#EOF.sin = (np.fromfile(f,dtype='f8',count=(mmax+1)*norder)).reshape([(mmax+1),norder])
#self.potC
np.array(self.potC.reshape(-1,),dtype='f8').tofile(f)
# 8 bytes x (numx+1) x (numy+1) = 8(numx+1)(numy+1) bytes
#potC = (np.fromfile(f,dtype='f8',count=(mmax+1)*norder)).reshape([(mmax+1),norder])
#self.rforceC
np.array(self.rforceC.reshape(-1,),dtype='f8').tofile(f)
#self.zforceC
np.array(self.zforceC.reshape(-1,),dtype='f8').tofile(f)
#self.densC
np.array(self.densC.reshape(-1,),dtype='f8').tofile(f)
#self.potS
np.array(self.potS.reshape(-1,),dtype='f8').tofile(f)
#self.rforceS
np.array(self.rforceS.reshape(-1,),dtype='f8').tofile(f)
#self.zforceS
np.array(self.zforceS.reshape(-1,),dtype='f8').tofile(f)
#self.densS
np.array(self.densS.reshape(-1,),dtype='f8').tofile(f)
#########################################
# SL parameters
#self.halofac
np.array([self.halofac],dtype='f4').tofile(f)
#self.rminhalo
np.array([self.rminhalo],dtype='f4').tofile(f)
#self.rmaxhalo
np.array([self.rmaxhalo],dtype='f4').tofile(f)
#self.scalehalo
np.array([self.scalehalo],dtype='f4').tofile(f)
#self.xcen_halo = 0.
np.array([self.xcen_halo],dtype='f4').tofile(f)
#self.ycen_halo = 0.
np.array([self.ycen_halo],dtype='f4').tofile(f)
#self.zcen_halo = 0.
np.array([self.zcen_halo],dtype='f4').tofile(f)
#[halofac,rminhalo,rmaxhalo,scalehalo,xcen_halo,ycen_halo,zcen_halo] = np.fromfile(f,dtype='f4',count=7)
#self.numrhalo
np.array([self.numrhalo],dtype='i4').tofile(f)
#self.cmaphalo
np.array([self.cmaphalo],dtype='i4').tofile(f)
#self.lmaxhalo
np.array([self.lmaxhalo],dtype='i4').tofile(f)
#self.nmaxhalo
np.array([self.nmaxhalo],dtype='i4').tofile(f)
#[numrhalo,cmaphalo,lmaxhalo,nmaxhalo] = np.fromfile(f,dtype='i4',count=4)
#self.xihalo
np.array(self.xihalo.reshape(-1,),dtype='f8').tofile(f)
#xihalo = (np.fromfile(f,dtype='f8',count=numrhalo))
#self.p0halo
np.array(self.p0halo.reshape(-1,),dtype='f8').tofile(f)
#self.d0halo
np.array(self.d0halo.reshape(-1,),dtype='f8').tofile(f)
#self.ltablehalo
np.array(self.ltablehalo.reshape(-1,),dtype='f8').tofile(f)
#self.evtablehalo
np.array(self.evtablehalo.reshape(-1,),dtype='f8').tofile(f)
#self.eftablehalo
np.array(self.eftablehalo.reshape(-1,),dtype='f8').tofile(f)
#self.SL.expcoef
np.array(self.SL.expcoef.reshape(-1,),dtype='f8').tofile(f)
# 8 bytes X 2 arrays x (m+1) x n = 16(m+1)n bytes to end of array
f.close()
def restore_field(filename=''):
'''
restore_field
----------------
read in a Fields instance
'''
f = open(filename,'rb')
###########################
# global block
[infile,eof_file,sph_file,model_file] = np.fromfile(f,dtype='S100',count=4)
[nhalo,transform,no_odd,centering,mutual_center,verbose] = np.fromfile(f,dtype='i4',count=6)
[time] = np.fromfile(f,dtype='f4',count=1)
F = Fields(infile,eof_file,sph_file,model_file,nhalo=nhalo,transform=transform,no_odd=no_odd,centering=centering,mutual_center=mutual_center,verbose=verbose)
# somehow this doesn't get carried over normally?
F.time = time
###########################
# EOF block
[F.numx,F.numy,F.mmax,F.norder,F.cmapdisk,F.densdisk] = np.fromfile(f,dtype='i4',count=6)
[F.rmindisk,F.rmaxdisk,F.ascale,F.hscale,F.XMIN,F.dX,F.YMIN,F.dY,F.xcen_disk,F.ycen_disk,F.zcen_disk] = np.fromfile(f,dtype='f4',count=11)
F.EOF = eof.EOF_Object()
F.EOF.cos = (np.fromfile(f,dtype='f8',count=(F.mmax+1)*F.norder)).reshape([(F.mmax+1),F.norder])
F.EOF.sin = (np.fromfile(f,dtype='f8',count=(F.mmax+1)*F.norder)).reshape([(F.mmax+1),F.norder])
F.potC = (np.fromfile(f,dtype='f8',count=(F.mmax+1)*F.norder*(F.numx+1)*(F.numy+1))).reshape([(F.mmax+1),F.norder,(F.numx+1),(F.numy+1)])
F.rforceC = (np.fromfile(f,dtype='f8',count=(F.mmax+1)*F.norder*(F.numx+1)*(F.numy+1))).reshape([(F.mmax+1),F.norder,(F.numx+1),(F.numy+1)])
F.zforceC = (np.fromfile(f,dtype='f8',count=(F.mmax+1)*F.norder*(F.numx+1)*(F.numy+1))).reshape([(F.mmax+1),F.norder,(F.numx+1),(F.numy+1)])
F.densC = (np.fromfile(f,dtype='f8',count=(F.mmax+1)*F.norder*(F.numx+1)*(F.numy+1))).reshape([(F.mmax+1),F.norder,(F.numx+1),(F.numy+1)])
F.potS = (np.fromfile(f,dtype='f8',count=(F.mmax+1)*F.norder*(F.numx+1)*(F.numy+1))).reshape([(F.mmax+1),F.norder,(F.numx+1),(F.numy+1)])
F.rforceS = (np.fromfile(f,dtype='f8',count=(F.mmax+1)*F.norder*(F.numx+1)*(F.numy+1))).reshape([(F.mmax+1),F.norder,(F.numx+1),(F.numy+1)])
F.zforceS = (np.fromfile(f,dtype='f8',count=(F.mmax+1)*F.norder*(F.numx+1)*(F.numy+1))).reshape([(F.mmax+1),F.norder,(F.numx+1),(F.numy+1)])
F.densS = (np.fromfile(f,dtype='f8',count=(F.mmax+1)*F.norder*(F.numx+1)*(F.numy+1))).reshape([(F.mmax+1),F.norder,(F.numx+1),(F.numy+1)])
#############################
# SL block
[F.halofac,F.rminhalo,F.rmaxhalo,F.scalehalo,F.xcen_halo,F.ycen_halo,F.zcen_halo] = np.fromfile(f,dtype='f4',count=7)
[F.numrhalo,F.cmaphalo,F.lmaxhalo,F.nmaxhalo] = np.fromfile(f,dtype='i4',count=4)
F.xihalo = (np.fromfile(f,dtype='f8',count=F.numrhalo))
F.p0halo = (np.fromfile(f,dtype='f8',count=F.numrhalo))
F.d0halo = (np.fromfile(f,dtype='f8',count=F.numrhalo))
F.ltable = (np.fromfile(f,dtype='f8',count=(F.lmaxhalo+1)))
F.evtablehalo = (np.fromfile(f,dtype='f8',count=(F.lmaxhalo+1)*(F.nmaxhalo))).reshape([(F.lmaxhalo+1),(F.nmaxhalo)])
F.eftablehalo = (np.fromfile(f,dtype='f8',count=(F.lmaxhalo+1)*(F.nmaxhalo)*(F.numrhalo))).reshape([(F.lmaxhalo+1),(F.nmaxhalo),(F.numrhalo)])
F.SL = spheresl.SL_Object()
F.SL.expcoef = (np.fromfile(f,dtype='f8',count=(F.lmaxhalo+1)*(F.lmaxhalo+1)*(F.nmaxhalo))).reshape([(F.lmaxhalo+1)*(F.lmaxhalo+1),(F.nmaxhalo)])
F.disk_use_m = F.mmax
F.disk_use_n = F.norder
F.halo_use_l = F.lmaxhalo
F.halo_use_n = F.nmaxhalo
# should restore to point just after F.prep_tables()
f.close()
# compatibility doubling
F.SL.model_file = F.model_file
F.SL.sph_file = F.sph_file
F.EOF.eof_file = F.eof_file
F.EOF.mmax = F.mmax
return F
class EnergyKappa():
#
# class to look at energy-kappa mapping
#
def __init__(self,ParticleArray,nbins=200,map_file=None,eres=80,percen=99.5,spline_order=3):
self.PA = PArray()
# check to see if this is single or multi-timestep
try:
self.PA.MASS = ParticleArray.MASS
self.PA.XPOS = ParticleArray.XPOS
self.PA.YPOS = ParticleArray.YPOS
self.PA.ZPOS = ParticleArray.ZPOS
self.PA.XVEL = ParticleArray.XVEL
self.PA.YVEL = ParticleArray.YVEL
self.PA.ZVEL = ParticleArray.ZVEL
self.PA.POTE = ParticleArray.POTE
self.multitime = True
except:
self.PA.TIME = ParticleArray.time
self.PA.MASS = ParticleArray.mass
self.PA.XPOS = ParticleArray.xpos
self.PA.YPOS = ParticleArray.ypos
self.PA.ZPOS = ParticleArray.zpos
self.PA.XVEL = ParticleArray.xvel
self.PA.YVEL = ParticleArray.yvel
self.PA.ZVEL = ParticleArray.zvel
self.PA.POTE = ParticleArray.pote
self.multitime = False
self.nbins = nbins
EnergyKappa.map_ekappa(self,percen=percen,eres=eres,spline_order=spline_order)
if map_file:
EnergyKappa.output_map(self,map_file)
def map_ekappa(self,percen=99.9,eres=80,twodee=False,spline_order=3,smethod='sg'):
# enables plotting of
# self.
# Energy : the index
# Kappa : LZ/LZ_max for each orbit
# E : energy for each orbit
# maxLZ : relation to Energy for a circular (planar) orbit
# maxL : relation to Energy for a spherical orbit
# maxR : relation to Energy for radius in spherical bins
# squared velocity
V2 = (self.PA.XVEL*self.PA.XVEL + self.PA.YVEL*self.PA.YVEL + self.PA.ZVEL*self.PA.ZVEL)
# angular momentum evaluation
LX = self.PA.YPOS*self.PA.ZVEL - self.PA.ZPOS*self.PA.YVEL
LY = self.PA.ZPOS*self.PA.XVEL - self.PA.XPOS*self.PA.ZVEL
LZ = self.PA.XPOS*self.PA.YVEL - self.PA.YPOS*self.PA.XVEL
L = (LX*LX + LY*LY + LZ*LZ)**0.5
# total energy (to be made accessible)
self.Energy = 0.5*V2 + self.PA.POTE
#
# should think about 2d vs 3d utility
#
if twodee:
R = (self.PA.XPOS*self.PA.XPOS + self.PA.YPOS*self.PA.YPOS)**0.5
else:
R = (self.PA.XPOS*self.PA.XPOS + self.PA.YPOS*self.PA.YPOS + self.PA.ZPOS*self.PA.ZPOS)**0.5
# partition particles into Energy bins
self.Ebins = np.linspace(0.999*np.min(self.Energy),np.min([-2.5,1.001*np.max(self.Energy)]),eres)
eindx = np.digitize(self.Energy,self.Ebins)
# allocate arrays
self.maxLz = np.zeros_like(self.Ebins)
self.maxR = np.zeros_like(self.Ebins)
self.maxL = np.zeros_like(self.Ebins)
self.circR = np.zeros_like(self.Ebins)
for i,energy in enumerate(self.Ebins):
energy_range = np.where( eindx==i+1)[0]
if len(energy_range) > 1:
# reduce operations for speed
#maxLx[i] = np.percentile(LX[yese],percen)
#maxLy[i] = np.percentile(LY[yese],percen)
self.maxLz[i] = np.percentile(LZ[energy_range],percen) #np.max(LZ[yese])
# take median of top 100 Lz for guiding center radius
# (that is, radius of a circular orbit)
lzarg = energy_range[LZ[energy_range].argsort()]
self.circR[i] = np.median( R[lzarg[-100:-1]] )
self.maxR[i] = np.percentile(R[energy_range],percen)
self.maxL[i] = np.percentile(L[energy_range],percen)
else: # guard for empty bins
#maxLx[i] = maxLx[i-1]
#maxLy[i] = maxLy[i-1]
self.maxLz[i] = self.maxLz[i-1]
self.maxR[i] = self.maxR[i-1]
self.maxL[i] = self.maxL[i-1]
self.circR[i] = self.circR[i-1]
# smooth discontinuities from bin choice
if smethod == 'sg':
smthLz = helpers.savitzky_golay(self.maxLz,7,3) # could figure out an adaptive smooth?
smthL = helpers.savitzky_golay(self.maxL,7,3)
smthR = helpers.savitzky_golay(self.circR,7,3)
else:
smthLzf = UnivariateSpline(self.Ebins,self.maxLz,k=spline_order)
smthLf = UnivariateSpline(self.Ebins,self.maxL,k=spline_order)
smthRf = UnivariateSpline(self.Ebins,self.circR,k=spline_order)
smthLz = smthLzf(self.Ebins)
smthL = smthLf(self.Ebins)
smthR = smthRf(self.Ebins)
# return energy and kappa for all orbits
if smethod == 'sg':
self.Kappa = LZ/smthLz[eindx-1]
self.Beta = L/smthL[eindx-1]
self.cR = smthR[eindx-1]
else:
self.Kappa = LZ/smthLzf(self.Energy)
self.Beta = L/smthLf(self.Energy)
self.cR = smthRf(self.Energy)
self.LZ = LZ
self.L = L
def clear_output_map_file(self):
return None
def output_map(self,file):
#
# helper class to
#
f = open(file,'w+')
#print >>f,PA.TIME,len(self.Ebins),self.Ebins,self.maxLz,self.maxL,self.maxR,self.circR
f.close()
def ek_grid(self,eres=80,kres=80,set_ebins=True,ebins_in=None):
self.Kbins = np.linspace(-1.,1.,kres)
self.Ebins = self.Ebins
if not (set_ebins):
self.Ebins = ebins_in
self.Eindx = np.digitize(self.Energy,self.Ebins)
self.Kindx = np.digitize(self.Kappa,self.Kbins)
def sum_ek_values(self,sumval):
# sumval is an input of the same lengths as self.Eindx
# has ek_grid already been run?
# if not, run it.
try:
x = self.Kindx[0]
except:
print('exptool.potential.sum_ek_values: Making Grid...')
EnergyKappa.ek_grid(self)
if len(sumval)!=len(self.Eindx):
print('exptool.potential.sum_ek_values: Input array must have values for all particles.')
#break
ebmax = len(self.Ebins)
kbmax = len(self.Kbins)
self.EKarray = np.zeros([ebmax,kbmax])
self.Egrid,self.Kgrid = np.meshgrid(self.Ebins,self.Kbins)
for i in range(0,len(self.Eindx)):
if (self.Eindx[i] < ebmax) and (self.Kindx[i] < kbmax):
self.EKarray[self.Eindx[i],self.Kindx[i]] += sumval[i]
#
# this is EXCLUSIVELY temporary until a better format is decided on
#
def get_fields(simulation_directory,simulation_name,intime,eof_file,sph_file,model_file,bar_file='',nhalo=1000000,transform=True):
'''
input
-----------------------------------
simulation_directory :
simulation_name :
intime :
eof_file :
sph_file :
model_file :
bar_bonus='' :
nhalo=1000000 :
returns
----------------------------------
F :
pattern :
rotfreq :
'''
infile = simulation_directory+'OUT.'+simulation_name+'.%05i' %intime
BarInstance = pattern.BarDetermine()
if transform:
BarInstance.read_bar(bar_file)
# reset the derivative
BarInstance.frequency_and_derivative(spline_derivative=2)
# put in modern psp reader format
PSPDump = psp_io.Input(infile)#,legacy=False)
patt = pattern.find_barpattern(PSPDump.time,BarInstance,smth_order=None)
rotfreq = patt/(2.*np.pi)
else:
patt = 0.
rotfreq = 0.
F = Fields(infile,eof_file,sph_file,model_file,nhalo=nhalo,transform=transform,no_odd=False,centering=True,mutual_center=True)
F.total_coefficients()
F.prep_tables()
return F,patt,rotfreq
def make_rotation(simulation_directory,simulation_name,intime):
'''
make_rotation
'''
# this restores to the orientation of the saving potential! could be dangerous.
F = restore_field(simulation_directory+'/potential.'+str(intime)+'.dat')
F.EOF.eof_file = simulation_directory+'/.eof.cache.file'
F.SL.model_file = simulation_directory+'/SLGridSph.model'
F.SL.sph_file = simulation_directory+'/SLGridSph.cache.'+simulation_name
F.set_field_parameters(no_odd=True,halo_l=-1,halo_n=-1,disk_m=-1,disk_n=-1)
F.make_force_grid()
F.rotation_curve()
return F
| [
"numpy.fromfile",
"numpy.sqrt",
"numpy.array",
"numpy.arctan2",
"numpy.sin",
"numpy.where",
"numpy.max",
"numpy.linspace",
"numpy.min",
"numpy.meshgrid",
"numpy.digitize",
"numpy.cos",
"scipy.interpolate.UnivariateSpline",
"numpy.copy",
"numpy.median",
"numpy.sum",
"numpy.zeros",
"... | [((34903, 34940), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': '"""S100"""', 'count': '(4)'}), "(f, dtype='S100', count=4)\n", (34914, 34940), True, 'import numpy as np\n'), ((35002, 35037), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': '"""i4"""', 'count': '(6)'}), "(f, dtype='i4', count=6)\n", (35013, 35037), True, 'import numpy as np\n'), ((35049, 35084), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': '"""f4"""', 'count': '(1)'}), "(f, dtype='f4', count=1)\n", (35060, 35084), True, 'import numpy as np\n'), ((35428, 35463), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': '"""i4"""', 'count': '(6)'}), "(f, dtype='i4', count=6)\n", (35439, 35463), True, 'import numpy as np\n'), ((35570, 35606), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': '"""f4"""', 'count': '(11)'}), "(f, dtype='f4', count=11)\n", (35581, 35606), True, 'import numpy as np\n'), ((37127, 37162), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': '"""f4"""', 'count': '(7)'}), "(f, dtype='f4', count=7)\n", (37138, 37162), True, 'import numpy as np\n'), ((37214, 37249), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': '"""i4"""', 'count': '(4)'}), "(f, dtype='i4', count=4)\n", (37225, 37249), True, 'import numpy as np\n'), ((37265, 37309), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': '"""f8"""', 'count': 'F.numrhalo'}), "(f, dtype='f8', count=F.numrhalo)\n", (37276, 37309), True, 'import numpy as np\n'), ((37325, 37369), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': '"""f8"""', 'count': 'F.numrhalo'}), "(f, dtype='f8', count=F.numrhalo)\n", (37336, 37369), True, 'import numpy as np\n'), ((37385, 37429), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': '"""f8"""', 'count': 'F.numrhalo'}), "(f, dtype='f8', count=F.numrhalo)\n", (37396, 37429), True, 'import numpy as np\n'), ((37445, 37493), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': '"""f8"""', 'count': '(F.lmaxhalo + 1)'}), "(f, dtype='f8', count=F.lmaxhalo + 1)\n", (37456, 37493), True, 'import numpy as np\n'), ((11730, 11752), 'numpy.arctan2', 'np.arctan2', (['yval', 'xval'], {}), '(yval, xval)\n', (11740, 11752), True, 'import numpy as np\n'), ((13389, 13415), 'numpy.linspace', 'np.linspace', (['(0.0)', '(0.1)', '(100)'], {}), '(0.0, 0.1, 100)\n', (13400, 13415), True, 'import numpy as np\n'), ((13807, 13827), 'numpy.zeros_like', 'np.zeros_like', (['rvals'], {}), '(rvals)\n', (13820, 13827), True, 'import numpy as np\n'), ((13852, 13872), 'numpy.zeros_like', 'np.zeros_like', (['rvals'], {}), '(rvals)\n', (13865, 13872), True, 'import numpy as np\n'), ((13898, 13918), 'numpy.zeros_like', 'np.zeros_like', (['rvals'], {}), '(rvals)\n', (13911, 13918), True, 'import numpy as np\n'), ((13944, 13964), 'numpy.zeros_like', 'np.zeros_like', (['rvals'], {}), '(rvals)\n', (13957, 13964), True, 'import numpy as np\n'), ((15386, 15408), 'numpy.arctan2', 'np.arctan2', (['yval', 'xval'], {}), '(yval, xval)\n', (15396, 15408), True, 'import numpy as np\n'), ((17891, 17913), 'numpy.arctan2', 'np.arctan2', (['yval', 'xval'], {}), '(yval, xval)\n', (17901, 17913), True, 'import numpy as np\n'), ((20271, 20300), 'numpy.linspace', 'np.linspace', (['(0.0001)', '(0.1)', '(100)'], {}), '(0.0001, 0.1, 100)\n', (20282, 20300), True, 'import numpy as np\n'), ((21608, 21628), 'numpy.zeros_like', 'np.zeros_like', (['rvals'], {}), '(rvals)\n', (21621, 21628), True, 'import numpy as np\n'), ((21650, 21670), 'numpy.zeros_like', 'np.zeros_like', (['rvals'], {}), '(rvals)\n', (21663, 21670), True, 'import numpy as np\n'), ((22354, 22383), 'numpy.linspace', 'np.linspace', (['(0.0001)', '(0.1)', '(100)'], {}), '(0.0001, 0.1, 100)\n', (22365, 22383), True, 'import numpy as np\n'), ((23273, 23293), 'numpy.zeros_like', 'np.zeros_like', (['rvals'], {}), '(rvals)\n', (23286, 23293), True, 'import numpy as np\n'), ((23315, 23335), 'numpy.zeros_like', 'np.zeros_like', (['rvals'], {}), '(rvals)\n', (23328, 23335), True, 'import numpy as np\n'), ((23680, 23725), 'scipy.interpolate.UnivariateSpline', 'UnivariateSpline', (['rvals', 'self.omega'], {'k': '(3)', 's': '(0)'}), '(rvals, self.omega, k=3, s=0)\n', (23696, 23725), False, 'from scipy.interpolate import UnivariateSpline\n'), ((23998, 24024), 'numpy.linspace', 'np.linspace', (['(0.0)', '(0.1)', '(100)'], {}), '(0.0, 0.1, 100)\n', (24009, 24024), True, 'import numpy as np\n'), ((24205, 24225), 'numpy.zeros_like', 'np.zeros_like', (['rvals'], {}), '(rvals)\n', (24218, 24225), True, 'import numpy as np\n'), ((24245, 24265), 'numpy.zeros_like', 'np.zeros_like', (['rvals'], {}), '(rvals)\n', (24258, 24265), True, 'import numpy as np\n'), ((24587, 24617), 'numpy.linspace', 'np.linspace', (['(0.00022)', '(0.1)', '(100)'], {}), '(0.00022, 0.1, 100)\n', (24598, 24617), True, 'import numpy as np\n'), ((24625, 24662), 'numpy.linspace', 'np.linspace', (['(0.00022)', '(2.0 * np.pi)', '(50)'], {}), '(0.00022, 2.0 * np.pi, 50)\n', (24636, 24662), True, 'import numpy as np\n'), ((25265, 25291), 'numpy.meshgrid', 'np.meshgrid', (['rline', 'thline'], {}), '(rline, thline)\n', (25276, 25291), True, 'import numpy as np\n'), ((25472, 25492), 'numpy.zeros', 'np.zeros', (['rgrid.size'], {}), '(rgrid.size)\n', (25480, 25492), True, 'import numpy as np\n'), ((25510, 25530), 'numpy.zeros', 'np.zeros', (['rgrid.size'], {}), '(rgrid.size)\n', (25518, 25530), True, 'import numpy as np\n'), ((25698, 25719), 'numpy.copy', 'np.copy', (['self.EOF.cos'], {}), '(self.EOF.cos)\n', (25705, 25719), True, 'import numpy as np\n'), ((25743, 25764), 'numpy.copy', 'np.copy', (['self.EOF.sin'], {}), '(self.EOF.sin)\n', (25750, 25764), True, 'import numpy as np\n'), ((41098, 41134), 'numpy.digitize', 'np.digitize', (['self.Energy', 'self.Ebins'], {}), '(self.Energy, self.Ebins)\n', (41109, 41134), True, 'import numpy as np\n'), ((41182, 41207), 'numpy.zeros_like', 'np.zeros_like', (['self.Ebins'], {}), '(self.Ebins)\n', (41195, 41207), True, 'import numpy as np\n'), ((41228, 41253), 'numpy.zeros_like', 'np.zeros_like', (['self.Ebins'], {}), '(self.Ebins)\n', (41241, 41253), True, 'import numpy as np\n'), ((41274, 41299), 'numpy.zeros_like', 'np.zeros_like', (['self.Ebins'], {}), '(self.Ebins)\n', (41287, 41299), True, 'import numpy as np\n'), ((41321, 41346), 'numpy.zeros_like', 'np.zeros_like', (['self.Ebins'], {}), '(self.Ebins)\n', (41334, 41346), True, 'import numpy as np\n'), ((43867, 43895), 'numpy.linspace', 'np.linspace', (['(-1.0)', '(1.0)', 'kres'], {}), '(-1.0, 1.0, kres)\n', (43878, 43895), True, 'import numpy as np\n'), ((44009, 44045), 'numpy.digitize', 'np.digitize', (['self.Energy', 'self.Ebins'], {}), '(self.Energy, self.Ebins)\n', (44020, 44045), True, 'import numpy as np\n'), ((44066, 44101), 'numpy.digitize', 'np.digitize', (['self.Kappa', 'self.Kbins'], {}), '(self.Kappa, self.Kbins)\n', (44077, 44101), True, 'import numpy as np\n'), ((44694, 44718), 'numpy.zeros', 'np.zeros', (['[ebmax, kbmax]'], {}), '([ebmax, kbmax])\n', (44702, 44718), True, 'import numpy as np\n'), ((44750, 44785), 'numpy.meshgrid', 'np.meshgrid', (['self.Ebins', 'self.Kbins'], {}), '(self.Ebins, self.Kbins)\n', (44761, 44785), True, 'import numpy as np\n'), ((15242, 15276), 'numpy.sqrt', 'np.sqrt', (['(xval * xval + yval * yval)'], {}), '(xval * xval + yval * yval)\n', (15249, 15276), True, 'import numpy as np\n'), ((15299, 15335), 'numpy.sqrt', 'np.sqrt', (['(r2val * r2val + zval * zval)'], {}), '(r2val * r2val + zval * zval)\n', (15306, 15335), True, 'import numpy as np\n'), ((17010, 17029), 'numpy.min', 'np.min', (['self.xihalo'], {}), '(self.xihalo)\n', (17016, 17029), True, 'import numpy as np\n'), ((19515, 19534), 'numpy.min', 'np.min', (['self.xihalo'], {}), '(self.xihalo)\n', (19521, 19534), True, 'import numpy as np\n'), ((35652, 35709), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': '"""f8"""', 'count': '((F.mmax + 1) * F.norder)'}), "(f, dtype='f8', count=(F.mmax + 1) * F.norder)\n", (35663, 35709), True, 'import numpy as np\n'), ((35753, 35810), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': '"""f8"""', 'count': '((F.mmax + 1) * F.norder)'}), "(f, dtype='f8', count=(F.mmax + 1) * F.norder)\n", (35764, 35810), True, 'import numpy as np\n'), ((35852, 35944), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': '"""f8"""', 'count': '((F.mmax + 1) * F.norder * (F.numx + 1) * (F.numy + 1))'}), "(f, dtype='f8', count=(F.mmax + 1) * F.norder * (F.numx + 1) * (\n F.numy + 1))\n", (35863, 35944), True, 'import numpy as np\n'), ((35997, 36089), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': '"""f8"""', 'count': '((F.mmax + 1) * F.norder * (F.numx + 1) * (F.numy + 1))'}), "(f, dtype='f8', count=(F.mmax + 1) * F.norder * (F.numx + 1) * (\n F.numy + 1))\n", (36008, 36089), True, 'import numpy as np\n'), ((36142, 36234), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': '"""f8"""', 'count': '((F.mmax + 1) * F.norder * (F.numx + 1) * (F.numy + 1))'}), "(f, dtype='f8', count=(F.mmax + 1) * F.norder * (F.numx + 1) * (\n F.numy + 1))\n", (36153, 36234), True, 'import numpy as np\n'), ((36285, 36377), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': '"""f8"""', 'count': '((F.mmax + 1) * F.norder * (F.numx + 1) * (F.numy + 1))'}), "(f, dtype='f8', count=(F.mmax + 1) * F.norder * (F.numx + 1) * (\n F.numy + 1))\n", (36296, 36377), True, 'import numpy as np\n'), ((36428, 36520), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': '"""f8"""', 'count': '((F.mmax + 1) * F.norder * (F.numx + 1) * (F.numy + 1))'}), "(f, dtype='f8', count=(F.mmax + 1) * F.norder * (F.numx + 1) * (\n F.numy + 1))\n", (36439, 36520), True, 'import numpy as np\n'), ((36573, 36665), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': '"""f8"""', 'count': '((F.mmax + 1) * F.norder * (F.numx + 1) * (F.numy + 1))'}), "(f, dtype='f8', count=(F.mmax + 1) * F.norder * (F.numx + 1) * (\n F.numy + 1))\n", (36584, 36665), True, 'import numpy as np\n'), ((36718, 36810), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': '"""f8"""', 'count': '((F.mmax + 1) * F.norder * (F.numx + 1) * (F.numy + 1))'}), "(f, dtype='f8', count=(F.mmax + 1) * F.norder * (F.numx + 1) * (\n F.numy + 1))\n", (36729, 36810), True, 'import numpy as np\n'), ((36861, 36953), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': '"""f8"""', 'count': '((F.mmax + 1) * F.norder * (F.numx + 1) * (F.numy + 1))'}), "(f, dtype='f8', count=(F.mmax + 1) * F.norder * (F.numx + 1) * (\n F.numy + 1))\n", (36872, 36953), True, 'import numpy as np\n'), ((37515, 37578), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': '"""f8"""', 'count': '((F.lmaxhalo + 1) * F.nmaxhalo)'}), "(f, dtype='f8', count=(F.lmaxhalo + 1) * F.nmaxhalo)\n", (37526, 37578), True, 'import numpy as np\n'), ((37636, 37712), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': '"""f8"""', 'count': '((F.lmaxhalo + 1) * F.nmaxhalo * F.numrhalo)'}), "(f, dtype='f8', count=(F.lmaxhalo + 1) * F.nmaxhalo * F.numrhalo)\n", (37647, 37712), True, 'import numpy as np\n'), ((37815, 37902), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': '"""f8"""', 'count': '((F.lmaxhalo + 1) * (F.lmaxhalo + 1) * F.nmaxhalo)'}), "(f, dtype='f8', count=(F.lmaxhalo + 1) * (F.lmaxhalo + 1) * F.\n nmaxhalo)\n", (37826, 37902), True, 'import numpy as np\n'), ((42774, 42830), 'scipy.interpolate.UnivariateSpline', 'UnivariateSpline', (['self.Ebins', 'self.maxLz'], {'k': 'spline_order'}), '(self.Ebins, self.maxLz, k=spline_order)\n', (42790, 42830), False, 'from scipy.interpolate import UnivariateSpline\n'), ((42851, 42906), 'scipy.interpolate.UnivariateSpline', 'UnivariateSpline', (['self.Ebins', 'self.maxL'], {'k': 'spline_order'}), '(self.Ebins, self.maxL, k=spline_order)\n', (42867, 42906), False, 'from scipy.interpolate import UnivariateSpline\n'), ((42927, 42983), 'scipy.interpolate.UnivariateSpline', 'UnivariateSpline', (['self.Ebins', 'self.circR'], {'k': 'spline_order'}), '(self.Ebins, self.circR, k=spline_order)\n', (42943, 42983), False, 'from scipy.interpolate import UnivariateSpline\n'), ((4652, 4755), 'numpy.sum', 'np.sum', (["(PSPDumpDiskTransformed.data['x'][cparticles] * PSPDumpDiskTransformed.data\n ['m'][cparticles])"], {}), "(PSPDumpDiskTransformed.data['x'][cparticles] *\n PSPDumpDiskTransformed.data['m'][cparticles])\n", (4658, 4755), True, 'import numpy as np\n'), ((4750, 4802), 'numpy.sum', 'np.sum', (["PSPDumpDiskTransformed.data['m'][cparticles]"], {}), "(PSPDumpDiskTransformed.data['m'][cparticles])\n", (4756, 4802), True, 'import numpy as np\n'), ((5025, 5128), 'numpy.sum', 'np.sum', (["(PSPDumpDiskTransformed.data['y'][cparticles] * PSPDumpDiskTransformed.data\n ['m'][cparticles])"], {}), "(PSPDumpDiskTransformed.data['y'][cparticles] *\n PSPDumpDiskTransformed.data['m'][cparticles])\n", (5031, 5128), True, 'import numpy as np\n'), ((5123, 5175), 'numpy.sum', 'np.sum', (["PSPDumpDiskTransformed.data['m'][cparticles]"], {}), "(PSPDumpDiskTransformed.data['m'][cparticles])\n", (5129, 5175), True, 'import numpy as np\n'), ((5537, 5640), 'numpy.sum', 'np.sum', (["(PSPDumpDiskTransformed.data['z'][cparticles] * PSPDumpDiskTransformed.data\n ['m'][cparticles])"], {}), "(PSPDumpDiskTransformed.data['z'][cparticles] *\n PSPDumpDiskTransformed.data['m'][cparticles])\n", (5543, 5640), True, 'import numpy as np\n'), ((5635, 5687), 'numpy.sum', 'np.sum', (["PSPDumpDiskTransformed.data['m'][cparticles]"], {}), "(PSPDumpDiskTransformed.data['m'][cparticles])\n", (5641, 5687), True, 'import numpy as np\n'), ((25880, 25903), 'numpy.zeros', 'np.zeros', (['self.EOF.nmax'], {}), '(self.EOF.nmax)\n', (25888, 25903), True, 'import numpy as np\n'), ((25938, 25961), 'numpy.zeros', 'np.zeros', (['self.EOF.nmax'], {}), '(self.EOF.nmax)\n', (25946, 25961), True, 'import numpy as np\n'), ((28752, 28791), 'numpy.array', 'np.array', (['[self.filename]'], {'dtype': '"""S100"""'}), "([self.filename], dtype='S100')\n", (28760, 28791), True, 'import numpy as np\n'), ((28833, 28872), 'numpy.array', 'np.array', (['[self.eof_file]'], {'dtype': '"""S100"""'}), "([self.eof_file], dtype='S100')\n", (28841, 28872), True, 'import numpy as np\n'), ((28914, 28953), 'numpy.array', 'np.array', (['[self.sph_file]'], {'dtype': '"""S100"""'}), "([self.sph_file], dtype='S100')\n", (28922, 28953), True, 'import numpy as np\n'), ((28997, 29038), 'numpy.array', 'np.array', (['[self.model_file]'], {'dtype': '"""S100"""'}), "([self.model_file], dtype='S100')\n", (29005, 29038), True, 'import numpy as np\n'), ((29163, 29197), 'numpy.array', 'np.array', (['[self.nhalo]'], {'dtype': '"""i4"""'}), "([self.nhalo], dtype='i4')\n", (29171, 29197), True, 'import numpy as np\n'), ((29240, 29278), 'numpy.array', 'np.array', (['[self.transform]'], {'dtype': '"""i4"""'}), "([self.transform], dtype='i4')\n", (29248, 29278), True, 'import numpy as np\n'), ((29318, 29353), 'numpy.array', 'np.array', (['[self.no_odd]'], {'dtype': '"""i4"""'}), "([self.no_odd], dtype='i4')\n", (29326, 29353), True, 'import numpy as np\n'), ((29396, 29434), 'numpy.array', 'np.array', (['[self.centering]'], {'dtype': '"""i4"""'}), "([self.centering], dtype='i4')\n", (29404, 29434), True, 'import numpy as np\n'), ((29481, 29523), 'numpy.array', 'np.array', (['[self.mutual_center]'], {'dtype': '"""i4"""'}), "([self.mutual_center], dtype='i4')\n", (29489, 29523), True, 'import numpy as np\n'), ((29564, 29600), 'numpy.array', 'np.array', (['[self.verbose]'], {'dtype': '"""i4"""'}), "([self.verbose], dtype='i4')\n", (29572, 29600), True, 'import numpy as np\n'), ((29741, 29774), 'numpy.array', 'np.array', (['[self.time]'], {'dtype': '"""f4"""'}), "([self.time], dtype='f4')\n", (29749, 29774), True, 'import numpy as np\n'), ((29953, 29986), 'numpy.array', 'np.array', (['[self.numx]'], {'dtype': '"""i4"""'}), "([self.numx], dtype='i4')\n", (29961, 29986), True, 'import numpy as np\n'), ((30024, 30057), 'numpy.array', 'np.array', (['[self.numy]'], {'dtype': '"""i4"""'}), "([self.numy], dtype='i4')\n", (30032, 30057), True, 'import numpy as np\n'), ((30095, 30128), 'numpy.array', 'np.array', (['[self.mmax]'], {'dtype': '"""i4"""'}), "([self.mmax], dtype='i4')\n", (30103, 30128), True, 'import numpy as np\n'), ((30168, 30203), 'numpy.array', 'np.array', (['[self.norder]'], {'dtype': '"""i4"""'}), "([self.norder], dtype='i4')\n", (30176, 30203), True, 'import numpy as np\n'), ((30245, 30282), 'numpy.array', 'np.array', (['[self.cmapdisk]'], {'dtype': '"""i4"""'}), "([self.cmapdisk], dtype='i4')\n", (30253, 30282), True, 'import numpy as np\n'), ((30324, 30361), 'numpy.array', 'np.array', (['[self.densdisk]'], {'dtype': '"""i4"""'}), "([self.densdisk], dtype='i4')\n", (30332, 30361), True, 'import numpy as np\n'), ((30492, 30529), 'numpy.array', 'np.array', (['[self.rmindisk]'], {'dtype': '"""f4"""'}), "([self.rmindisk], dtype='f4')\n", (30500, 30529), True, 'import numpy as np\n'), ((30571, 30608), 'numpy.array', 'np.array', (['[self.rmaxdisk]'], {'dtype': '"""f4"""'}), "([self.rmaxdisk], dtype='f4')\n", (30579, 30608), True, 'import numpy as np\n'), ((30648, 30683), 'numpy.array', 'np.array', (['[self.ascale]'], {'dtype': '"""f4"""'}), "([self.ascale], dtype='f4')\n", (30656, 30683), True, 'import numpy as np\n'), ((30723, 30758), 'numpy.array', 'np.array', (['[self.hscale]'], {'dtype': '"""f4"""'}), "([self.hscale], dtype='f4')\n", (30731, 30758), True, 'import numpy as np\n'), ((30796, 30829), 'numpy.array', 'np.array', (['[self.XMIN]'], {'dtype': '"""f4"""'}), "([self.XMIN], dtype='f4')\n", (30804, 30829), True, 'import numpy as np\n'), ((30865, 30896), 'numpy.array', 'np.array', (['[self.dX]'], {'dtype': '"""f4"""'}), "([self.dX], dtype='f4')\n", (30873, 30896), True, 'import numpy as np\n'), ((30934, 30967), 'numpy.array', 'np.array', (['[self.YMIN]'], {'dtype': '"""f4"""'}), "([self.YMIN], dtype='f4')\n", (30942, 30967), True, 'import numpy as np\n'), ((31003, 31034), 'numpy.array', 'np.array', (['[self.dY]'], {'dtype': '"""f4"""'}), "([self.dY], dtype='f4')\n", (31011, 31034), True, 'import numpy as np\n'), ((31082, 31120), 'numpy.array', 'np.array', (['[self.xcen_disk]'], {'dtype': '"""f4"""'}), "([self.xcen_disk], dtype='f4')\n", (31090, 31120), True, 'import numpy as np\n'), ((31168, 31206), 'numpy.array', 'np.array', (['[self.ycen_disk]'], {'dtype': '"""f4"""'}), "([self.ycen_disk], dtype='f4')\n", (31176, 31206), True, 'import numpy as np\n'), ((31254, 31292), 'numpy.array', 'np.array', (['[self.zcen_disk]'], {'dtype': '"""f4"""'}), "([self.zcen_disk], dtype='f4')\n", (31262, 31292), True, 'import numpy as np\n'), ((32805, 32841), 'numpy.array', 'np.array', (['[self.halofac]'], {'dtype': '"""f4"""'}), "([self.halofac], dtype='f4')\n", (32813, 32841), True, 'import numpy as np\n'), ((32883, 32920), 'numpy.array', 'np.array', (['[self.rminhalo]'], {'dtype': '"""f4"""'}), "([self.rminhalo], dtype='f4')\n", (32891, 32920), True, 'import numpy as np\n'), ((32962, 32999), 'numpy.array', 'np.array', (['[self.rmaxhalo]'], {'dtype': '"""f4"""'}), "([self.rmaxhalo], dtype='f4')\n", (32970, 32999), True, 'import numpy as np\n'), ((33042, 33080), 'numpy.array', 'np.array', (['[self.scalehalo]'], {'dtype': '"""f4"""'}), "([self.scalehalo], dtype='f4')\n", (33050, 33080), True, 'import numpy as np\n'), ((33128, 33166), 'numpy.array', 'np.array', (['[self.xcen_halo]'], {'dtype': '"""f4"""'}), "([self.xcen_halo], dtype='f4')\n", (33136, 33166), True, 'import numpy as np\n'), ((33214, 33252), 'numpy.array', 'np.array', (['[self.ycen_halo]'], {'dtype': '"""f4"""'}), "([self.ycen_halo], dtype='f4')\n", (33222, 33252), True, 'import numpy as np\n'), ((33300, 33338), 'numpy.array', 'np.array', (['[self.zcen_halo]'], {'dtype': '"""f4"""'}), "([self.zcen_halo], dtype='f4')\n", (33308, 33338), True, 'import numpy as np\n'), ((33494, 33531), 'numpy.array', 'np.array', (['[self.numrhalo]'], {'dtype': '"""i4"""'}), "([self.numrhalo], dtype='i4')\n", (33502, 33531), True, 'import numpy as np\n'), ((33573, 33610), 'numpy.array', 'np.array', (['[self.cmaphalo]'], {'dtype': '"""i4"""'}), "([self.cmaphalo], dtype='i4')\n", (33581, 33610), True, 'import numpy as np\n'), ((33652, 33689), 'numpy.array', 'np.array', (['[self.lmaxhalo]'], {'dtype': '"""i4"""'}), "([self.lmaxhalo], dtype='i4')\n", (33660, 33689), True, 'import numpy as np\n'), ((33731, 33768), 'numpy.array', 'np.array', (['[self.nmaxhalo]'], {'dtype': '"""i4"""'}), "([self.nmaxhalo], dtype='i4')\n", (33739, 33768), True, 'import numpy as np\n'), ((41015, 41034), 'numpy.min', 'np.min', (['self.Energy'], {}), '(self.Energy)\n', (41021, 41034), True, 'import numpy as np\n'), ((41423, 41447), 'numpy.where', 'np.where', (['(eindx == i + 1)'], {}), '(eindx == i + 1)\n', (41431, 41447), True, 'import numpy as np\n'), ((41683, 41722), 'numpy.percentile', 'np.percentile', (['LZ[energy_range]', 'percen'], {}), '(LZ[energy_range], percen)\n', (41696, 41722), True, 'import numpy as np\n'), ((41966, 41994), 'numpy.median', 'np.median', (['R[lzarg[-100:-1]]'], {}), '(R[lzarg[-100:-1]])\n', (41975, 41994), True, 'import numpy as np\n'), ((42029, 42067), 'numpy.percentile', 'np.percentile', (['R[energy_range]', 'percen'], {}), '(R[energy_range], percen)\n', (42042, 42067), True, 'import numpy as np\n'), ((42098, 42136), 'numpy.percentile', 'np.percentile', (['L[energy_range]', 'percen'], {}), '(L[energy_range], percen)\n', (42111, 42136), True, 'import numpy as np\n'), ((6724, 6827), 'numpy.sum', 'np.sum', (["(PSPDumpHaloTransformed.data['x'][cparticles] * PSPDumpHaloTransformed.data\n ['m'][cparticles])"], {}), "(PSPDumpHaloTransformed.data['x'][cparticles] *\n PSPDumpHaloTransformed.data['m'][cparticles])\n", (6730, 6827), True, 'import numpy as np\n'), ((6822, 6874), 'numpy.sum', 'np.sum', (["PSPDumpHaloTransformed.data['m'][cparticles]"], {}), "(PSPDumpHaloTransformed.data['m'][cparticles])\n", (6828, 6874), True, 'import numpy as np\n'), ((6908, 7011), 'numpy.sum', 'np.sum', (["(PSPDumpHaloTransformed.data['y'][cparticles] * PSPDumpHaloTransformed.data\n ['m'][cparticles])"], {}), "(PSPDumpHaloTransformed.data['y'][cparticles] *\n PSPDumpHaloTransformed.data['m'][cparticles])\n", (6914, 7011), True, 'import numpy as np\n'), ((7006, 7058), 'numpy.sum', 'np.sum', (["PSPDumpHaloTransformed.data['m'][cparticles]"], {}), "(PSPDumpHaloTransformed.data['m'][cparticles])\n", (7012, 7058), True, 'import numpy as np\n'), ((7092, 7195), 'numpy.sum', 'np.sum', (["(PSPDumpHaloTransformed.data['z'][cparticles] * PSPDumpHaloTransformed.data\n ['m'][cparticles])"], {}), "(PSPDumpHaloTransformed.data['z'][cparticles] *\n PSPDumpHaloTransformed.data['m'][cparticles])\n", (7098, 7195), True, 'import numpy as np\n'), ((7190, 7242), 'numpy.sum', 'np.sum', (["PSPDumpHaloTransformed.data['m'][cparticles]"], {}), "(PSPDumpHaloTransformed.data['m'][cparticles])\n", (7196, 7242), True, 'import numpy as np\n'), ((25373, 25387), 'numpy.cos', 'np.cos', (['thgrid'], {}), '(thgrid)\n', (25379, 25387), True, 'import numpy as np\n'), ((25426, 25440), 'numpy.sin', 'np.sin', (['thgrid'], {}), '(thgrid)\n', (25432, 25440), True, 'import numpy as np\n'), ((41054, 41073), 'numpy.max', 'np.max', (['self.Energy'], {}), '(self.Energy)\n', (41060, 41073), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
import networkx as nx
import igraph as ig
from utils.constants import DatasetType, GraphVisualizationTool, network_repository_cora_url, cora_label_to_color_map
from utils.utils import convert_adj_to_edge_index
def plot_in_out_degree_distributions(edge_index, num_of_nodes, dataset_name):
"""
Note: It would be easy to do various kinds of powerful network analysis using igraph/networkx, etc.
I chose to explicitly calculate only the node degree statistics here, but you can go much further if needed and
calculate the graph diameter, number of triangles and many other concepts from the network analysis field.
"""
assert isinstance(edge_index, np.ndarray), f'Expected NumPy array got {type(edge_index)}.'
if edge_index.shape[0] == edge_index.shape[1]:
edge_index = convert_adj_to_edge_index(edge_index)
# Store each node's input and output degree (they're the same for undirected graphs such as Cora)
in_degrees = np.zeros(num_of_nodes, dtype=np.int)
out_degrees = np.zeros(num_of_nodes, dtype=np.int)
# Edge index shape = (2, E), the first row contains the source nodes, the second one target/sink nodes
# Note on terminology: source nodes point to target/sink nodes
num_of_edges = edge_index.shape[1]
for cnt in range(num_of_edges):
source_node_id = edge_index[0, cnt]
target_node_id = edge_index[1, cnt]
out_degrees[source_node_id] += 1 # source node points towards some other node -> increment it's out degree
in_degrees[target_node_id] += 1 # similarly here
hist = np.zeros(np.max(out_degrees) + 1)
for out_degree in out_degrees:
hist[out_degree] += 1
fig = plt.figure()
fig.subplots_adjust(hspace=0.6)
plt.subplot(311)
plt.plot(in_degrees, color='red')
plt.xlabel('node id'); plt.ylabel('in-degree count'); plt.title('Input degree for different node ids')
plt.subplot(312)
plt.plot(out_degrees, color='green')
plt.xlabel('node id'); plt.ylabel('out-degree count'); plt.title('Out degree for different node ids')
plt.subplot(313)
plt.plot(hist, color='blue')
plt.xlabel('node degree'); plt.ylabel('# nodes for a given out-degree'); plt.title(f'Node out-degree distribution for {dataset_name} dataset')
plt.xticks(np.arange(0, len(hist), 5.0))
plt.grid(True)
plt.show()
def visualize_graph(edge_index, node_labels, dataset_name, visualization_tool=GraphVisualizationTool.IGRAPH):
"""
Check out this blog for available graph visualization tools:
https://towardsdatascience.com/large-graph-visualization-tools-and-approaches-2b8758a1cd59
Basically depending on how big your graph is there may be better drawing tools than igraph.
Note:
There are also some nice browser-based tools to visualize graphs like this one:
http://networkrepository.com/graphvis.php?d=./data/gsm50/labeled/cora.edges
Nonetheless tools like igraph can be useful for quick visualization directly from Python
"""
assert isinstance(edge_index, np.ndarray), f'Expected NumPy array got {type(edge_index)}.'
if edge_index.shape[0] == edge_index.shape[1]:
edge_index = convert_adj_to_edge_index(edge_index)
num_of_nodes = len(node_labels)
edge_index_tuples = list(zip(edge_index[0, :], edge_index[1, :]))
# Networkx package is primarily used for network analysis, graph visualization was an afterthought in the design
# of the package - but nonetheless you'll see it used for graph drawing as well
if visualization_tool == GraphVisualizationTool.NETWORKX:
nx_graph = nx.Graph()
nx_graph.add_edges_from(edge_index_tuples)
nx.draw_networkx(nx_graph)
plt.show()
elif visualization_tool == GraphVisualizationTool.IGRAPH:
# Construct the igraph graph
ig_graph = ig.Graph()
ig_graph.add_vertices(num_of_nodes)
ig_graph.add_edges(edge_index_tuples)
# Prepare the visualization settings dictionary
visual_style = {}
# Defines the size of the plot and margins
visual_style["bbox"] = (3000, 3000)
visual_style["margin"] = 35
# I've chosen the edge thickness such that it's proportional to the number of shortest paths (geodesics)
# that go through a certain edge in our graph (edge_betweenness function, a simple ad hoc heuristic)
# line1: I use log otherwise some edges will be too thick and others not visible at all
# edge_betweeness returns < 1.0 for certain edges that's why I use clip as log would be negative for those edges
# line2: Normalize so that the thickest edge is 1 otherwise edges appear too thick on the chart
# line3: The idea here is to make the strongest edge stay stronger than others, 6 just worked, don't dwell on it
edge_weights_raw = np.clip(np.log(np.asarray(ig_graph.edge_betweenness()) + 1e-16), a_min=0, a_max=None)
edge_weights_raw_normalized = edge_weights_raw / np.max(edge_weights_raw)
edge_weights = [w**6 for w in edge_weights_raw_normalized]
visual_style["edge_width"] = edge_weights
# A simple heuristic for vertex size. Size ~ (degree / 2) (it gave nice results I tried log and sqrt as well)
visual_style["vertex_size"] = [deg / 2 for deg in ig_graph.degree()]
# This is the only part that's Cora specific as Cora has 7 labels
if dataset_name.lower() == DatasetType.CORA.name.lower():
visual_style["vertex_color"] = [cora_label_to_color_map[label] for label in node_labels]
else:
print('Feel free to add custom color scheme for your specific dataset. Using igraph default coloring.')
# Set the layout - the way the graph is presented on a 2D chart. Graph drawing is a subfield for itself!
# I used "Kamada Kawai" a force-directed method, this family of methods are based on physical system simulation.
# (layout_drl also gave nice results for Cora)
visual_style["layout"] = ig_graph.layout_kamada_kawai()
print('Plotting results ... (it may take couple of seconds).')
ig.plot(ig_graph, **visual_style)
else:
raise Exception(f'Visualization tool {visualization_tool.name} not supported.')
def draw_entropy_histogram(entropy_array, title, color='blue', uniform_distribution=False, num_bins=30):
max_value = np.max(entropy_array)
bar_width = (max_value / num_bins) * (1.0 if uniform_distribution else 0.75)
histogram_values, histogram_bins = np.histogram(entropy_array, bins=num_bins, range=(0.0, max_value))
plt.bar(histogram_bins[:num_bins], histogram_values[:num_bins], width=bar_width, color=color)
plt.xlabel(f'entropy bins')
plt.ylabel(f'# of node neighborhoods')
plt.title(title)
| [
"numpy.histogram",
"matplotlib.pyplot.grid",
"utils.utils.convert_adj_to_edge_index",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"igraph.Graph",
"networkx.Graph",
"numpy.max",
"networkx.draw_networkx",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplot... | [((1028, 1064), 'numpy.zeros', 'np.zeros', (['num_of_nodes'], {'dtype': 'np.int'}), '(num_of_nodes, dtype=np.int)\n', (1036, 1064), True, 'import numpy as np\n'), ((1083, 1119), 'numpy.zeros', 'np.zeros', (['num_of_nodes'], {'dtype': 'np.int'}), '(num_of_nodes, dtype=np.int)\n', (1091, 1119), True, 'import numpy as np\n'), ((1755, 1767), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1765, 1767), True, 'import matplotlib.pyplot as plt\n'), ((1809, 1825), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(311)'], {}), '(311)\n', (1820, 1825), True, 'import matplotlib.pyplot as plt\n'), ((1830, 1863), 'matplotlib.pyplot.plot', 'plt.plot', (['in_degrees'], {'color': '"""red"""'}), "(in_degrees, color='red')\n", (1838, 1863), True, 'import matplotlib.pyplot as plt\n'), ((1868, 1889), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""node id"""'], {}), "('node id')\n", (1878, 1889), True, 'import matplotlib.pyplot as plt\n'), ((1891, 1920), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""in-degree count"""'], {}), "('in-degree count')\n", (1901, 1920), True, 'import matplotlib.pyplot as plt\n'), ((1922, 1970), 'matplotlib.pyplot.title', 'plt.title', (['"""Input degree for different node ids"""'], {}), "('Input degree for different node ids')\n", (1931, 1970), True, 'import matplotlib.pyplot as plt\n'), ((1976, 1992), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(312)'], {}), '(312)\n', (1987, 1992), True, 'import matplotlib.pyplot as plt\n'), ((1997, 2033), 'matplotlib.pyplot.plot', 'plt.plot', (['out_degrees'], {'color': '"""green"""'}), "(out_degrees, color='green')\n", (2005, 2033), True, 'import matplotlib.pyplot as plt\n'), ((2038, 2059), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""node id"""'], {}), "('node id')\n", (2048, 2059), True, 'import matplotlib.pyplot as plt\n'), ((2061, 2091), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""out-degree count"""'], {}), "('out-degree count')\n", (2071, 2091), True, 'import matplotlib.pyplot as plt\n'), ((2093, 2139), 'matplotlib.pyplot.title', 'plt.title', (['"""Out degree for different node ids"""'], {}), "('Out degree for different node ids')\n", (2102, 2139), True, 'import matplotlib.pyplot as plt\n'), ((2145, 2161), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(313)'], {}), '(313)\n', (2156, 2161), True, 'import matplotlib.pyplot as plt\n'), ((2166, 2194), 'matplotlib.pyplot.plot', 'plt.plot', (['hist'], {'color': '"""blue"""'}), "(hist, color='blue')\n", (2174, 2194), True, 'import matplotlib.pyplot as plt\n'), ((2199, 2224), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""node degree"""'], {}), "('node degree')\n", (2209, 2224), True, 'import matplotlib.pyplot as plt\n'), ((2226, 2270), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""# nodes for a given out-degree"""'], {}), "('# nodes for a given out-degree')\n", (2236, 2270), True, 'import matplotlib.pyplot as plt\n'), ((2272, 2341), 'matplotlib.pyplot.title', 'plt.title', (['f"""Node out-degree distribution for {dataset_name} dataset"""'], {}), "(f'Node out-degree distribution for {dataset_name} dataset')\n", (2281, 2341), True, 'import matplotlib.pyplot as plt\n'), ((2392, 2406), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (2400, 2406), True, 'import matplotlib.pyplot as plt\n'), ((2411, 2421), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2419, 2421), True, 'import matplotlib.pyplot as plt\n'), ((6467, 6488), 'numpy.max', 'np.max', (['entropy_array'], {}), '(entropy_array)\n', (6473, 6488), True, 'import numpy as np\n'), ((6609, 6675), 'numpy.histogram', 'np.histogram', (['entropy_array'], {'bins': 'num_bins', 'range': '(0.0, max_value)'}), '(entropy_array, bins=num_bins, range=(0.0, max_value))\n', (6621, 6675), True, 'import numpy as np\n'), ((6681, 6779), 'matplotlib.pyplot.bar', 'plt.bar', (['histogram_bins[:num_bins]', 'histogram_values[:num_bins]'], {'width': 'bar_width', 'color': 'color'}), '(histogram_bins[:num_bins], histogram_values[:num_bins], width=\n bar_width, color=color)\n', (6688, 6779), True, 'import matplotlib.pyplot as plt\n'), ((6779, 6806), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['f"""entropy bins"""'], {}), "(f'entropy bins')\n", (6789, 6806), True, 'import matplotlib.pyplot as plt\n'), ((6811, 6849), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['f"""# of node neighborhoods"""'], {}), "(f'# of node neighborhoods')\n", (6821, 6849), True, 'import matplotlib.pyplot as plt\n'), ((6854, 6870), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (6863, 6870), True, 'import matplotlib.pyplot as plt\n'), ((870, 907), 'utils.utils.convert_adj_to_edge_index', 'convert_adj_to_edge_index', (['edge_index'], {}), '(edge_index)\n', (895, 907), False, 'from utils.utils import convert_adj_to_edge_index\n'), ((3252, 3289), 'utils.utils.convert_adj_to_edge_index', 'convert_adj_to_edge_index', (['edge_index'], {}), '(edge_index)\n', (3277, 3289), False, 'from utils.utils import convert_adj_to_edge_index\n'), ((3680, 3690), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (3688, 3690), True, 'import networkx as nx\n'), ((3750, 3776), 'networkx.draw_networkx', 'nx.draw_networkx', (['nx_graph'], {}), '(nx_graph)\n', (3766, 3776), True, 'import networkx as nx\n'), ((3785, 3795), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3793, 3795), True, 'import matplotlib.pyplot as plt\n'), ((1654, 1673), 'numpy.max', 'np.max', (['out_degrees'], {}), '(out_degrees)\n', (1660, 1673), True, 'import numpy as np\n'), ((3915, 3925), 'igraph.Graph', 'ig.Graph', ([], {}), '()\n', (3923, 3925), True, 'import igraph as ig\n'), ((6212, 6245), 'igraph.plot', 'ig.plot', (['ig_graph'], {}), '(ig_graph, **visual_style)\n', (6219, 6245), True, 'import igraph as ig\n'), ((5068, 5092), 'numpy.max', 'np.max', (['edge_weights_raw'], {}), '(edge_weights_raw)\n', (5074, 5092), True, 'import numpy as np\n'), ((5516, 5545), 'utils.constants.DatasetType.CORA.name.lower', 'DatasetType.CORA.name.lower', ([], {}), '()\n', (5543, 5545), False, 'from utils.constants import DatasetType, GraphVisualizationTool, network_repository_cora_url, cora_label_to_color_map\n')] |
import unittest
import numpy as np
from mars.executor import Executor
from mars.tensor.datasource import ones
class Test(unittest.TestCase):
def setUp(self):
self.executor = Executor('numpy')
def testReshapeExecution(self):
x = ones((1, 2, 3), chunk_size=[4, 3, 5])
y = x.reshape(3, 2)
res = self.executor.execute_tensor(y)[0]
self.assertEqual(y.shape, (3, 2))
np.testing.assert_equal(res, np.ones((3, 2)))
| [
"mars.tensor.datasource.ones",
"mars.executor.Executor",
"numpy.ones"
] | [((189, 206), 'mars.executor.Executor', 'Executor', (['"""numpy"""'], {}), "('numpy')\n", (197, 206), False, 'from mars.executor import Executor\n'), ((256, 293), 'mars.tensor.datasource.ones', 'ones', (['(1, 2, 3)'], {'chunk_size': '[4, 3, 5]'}), '((1, 2, 3), chunk_size=[4, 3, 5])\n', (260, 293), False, 'from mars.tensor.datasource import ones\n'), ((450, 465), 'numpy.ones', 'np.ones', (['(3, 2)'], {}), '((3, 2))\n', (457, 465), True, 'import numpy as np\n')] |
import numpy as np
import pytest
import probnum.problems.zoo.filtsmooth as filtsmooth_zoo
from probnum import filtsmooth
@pytest.fixture(params=[filtsmooth_zoo.logistic_ode])
def setup(request):
"""Filter and regression problem."""
problem = request.param
regression_problem, info = problem()
kalman = filtsmooth.Kalman(
info["prior_process"],
)
return (kalman, regression_problem)
def test_rmse_filt_smooth(setup):
"""Assert that iterated smoothing beats smoothing."""
np.random.seed(12345)
kalman, regression_problem = setup
truth = regression_problem.solution
stopcrit = filtsmooth.StoppingCriterion(atol=1e-1, rtol=1e-1, maxit=10)
posterior, _ = kalman.filter(regression_problem)
posterior = kalman.smooth(posterior)
iterated_posterior, _ = kalman.iterated_filtsmooth(
regression_problem, stopcrit=stopcrit
)
filtms = posterior.filtering_posterior.states.mean
smooms = posterior.states.mean
iterms = iterated_posterior.states.mean
if filtms.ndim == 1:
filtms = filtms.reshape((-1, 1))
smooms = smooms.reshape((-1, 1))
iterms = iterms.reshape((-1, 1))
if truth.ndim == 1:
truth = truth.reshape((-1, 1))
# Compare only zeroth component
# for compatibility with all test cases
smooms_rmse = np.mean(np.abs(smooms[:, 0] - truth[:, 0]))
iterms_rmse = np.mean(np.abs(iterms[:, 0] - truth[:, 0]))
assert iterms_rmse < smooms_rmse
| [
"numpy.abs",
"probnum.filtsmooth.Kalman",
"probnum.filtsmooth.StoppingCriterion",
"numpy.random.seed",
"pytest.fixture"
] | [((125, 177), 'pytest.fixture', 'pytest.fixture', ([], {'params': '[filtsmooth_zoo.logistic_ode]'}), '(params=[filtsmooth_zoo.logistic_ode])\n', (139, 177), False, 'import pytest\n'), ((322, 362), 'probnum.filtsmooth.Kalman', 'filtsmooth.Kalman', (["info['prior_process']"], {}), "(info['prior_process'])\n", (339, 362), False, 'from probnum import filtsmooth\n'), ((517, 538), 'numpy.random.seed', 'np.random.seed', (['(12345)'], {}), '(12345)\n', (531, 538), True, 'import numpy as np\n'), ((634, 692), 'probnum.filtsmooth.StoppingCriterion', 'filtsmooth.StoppingCriterion', ([], {'atol': '(0.1)', 'rtol': '(0.1)', 'maxit': '(10)'}), '(atol=0.1, rtol=0.1, maxit=10)\n', (662, 692), False, 'from probnum import filtsmooth\n'), ((1354, 1388), 'numpy.abs', 'np.abs', (['(smooms[:, 0] - truth[:, 0])'], {}), '(smooms[:, 0] - truth[:, 0])\n', (1360, 1388), True, 'import numpy as np\n'), ((1416, 1450), 'numpy.abs', 'np.abs', (['(iterms[:, 0] - truth[:, 0])'], {}), '(iterms[:, 0] - truth[:, 0])\n', (1422, 1450), True, 'import numpy as np\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_allclose
from astropy.coordinates import SkyCoord
import astropy.units as u
from astropy.wcs import WCS
from regions import (
CircleAnnulusSkyRegion,
EllipseSkyRegion,
PointSkyRegion,
PolygonSkyRegion,
CircleSkyRegion,
RectangleSkyRegion,
)
from gammapy.maps import Map, WcsGeom, RegionGeom, MapAxis
from gammapy.modeling.models import (
ConstantSpatialModel,
DiskSpatialModel,
GaussianSpatialModel,
GeneralizedGaussianSpatialModel,
PointSpatialModel,
ShellSpatialModel,
Shell2SpatialModel,
TemplateSpatialModel,
)
from gammapy.utils.testing import mpl_plot_check, requires_data, requires_dependency
def test_sky_point_source():
geom = WcsGeom.create(skydir=(2.4, 2.3), npix=(10, 10), binsz=0.3)
model = PointSpatialModel(lon_0="2.5 deg", lat_0="2.5 deg", frame="icrs")
assert model.evaluation_radius.unit == "deg"
assert_allclose(model.evaluation_radius.value, 0)
assert model.frame == "icrs"
assert_allclose(model.position.ra.deg, 2.5)
assert_allclose(model.position.dec.deg, 2.5)
val = model.evaluate_geom(geom)
assert val.unit == "sr-1"
assert_allclose(np.sum(val * geom.solid_angle()), 1)
assert isinstance(model.to_region(), PointSkyRegion)
def test_sky_gaussian():
# Test symmetric model
sigma = 1 * u.deg
model = GaussianSpatialModel(lon_0="5 deg", lat_0="15 deg", sigma=sigma)
assert model.parameters["sigma"].min == 0
val_0 = model(5 * u.deg, 15 * u.deg)
val_sigma = model(5 * u.deg, 16 * u.deg)
assert val_0.unit == "sr-1"
ratio = val_0 / val_sigma
assert_allclose(ratio, np.exp(0.5))
radius = model.evaluation_radius
assert radius.unit == "deg"
assert_allclose(radius.value, 5 * sigma.value)
# test the normalization for an elongated Gaussian near the Galactic Plane
m_geom_1 = WcsGeom.create(
binsz=0.05, width=(20, 20), skydir=(2, 2), frame="galactic", proj="AIT"
)
coords = m_geom_1.get_coord()
solid_angle = m_geom_1.solid_angle()
lon = coords.lon
lat = coords.lat
sigma = 3 * u.deg
model_1 = GaussianSpatialModel(
lon_0=2 * u.deg, lat_0=2 * u.deg, sigma=sigma, e=0.8, phi=30 * u.deg
)
vals_1 = model_1(lon, lat)
assert vals_1.unit == "sr-1"
assert_allclose(np.sum(vals_1 * solid_angle), 1, rtol=1.0e-3)
radius = model_1.evaluation_radius
assert radius.unit == "deg"
assert_allclose(radius.value, 5 * sigma.value)
# check the ratio between the value at the peak and on the 1-sigma isocontour
sigma = 4 * u.deg
semi_minor = 2 * u.deg
e = np.sqrt(1 - (semi_minor / sigma) ** 2)
model_2 = GaussianSpatialModel(
lon_0=0 * u.deg, lat_0=0 * u.deg, sigma=sigma, e=e, phi=0 * u.deg
)
val_0 = model_2(0 * u.deg, 0 * u.deg)
val_major = model_2(0 * u.deg, 4 * u.deg)
val_minor = model_2(2 * u.deg, 0 * u.deg)
assert val_0.unit == "sr-1"
ratio_major = val_0 / val_major
ratio_minor = val_0 / val_minor
assert_allclose(ratio_major, np.exp(0.5))
assert_allclose(ratio_minor, np.exp(0.5))
# check the rotation
model_3 = GaussianSpatialModel(
lon_0=0 * u.deg, lat_0=0 * u.deg, sigma=sigma, e=e, phi=90 * u.deg
)
val_minor_rotated = model_3(0 * u.deg, 2 * u.deg)
ratio_minor_rotated = val_0 / val_minor_rotated
assert_allclose(ratio_minor_rotated, np.exp(0.5))
assert isinstance(model.to_region(), EllipseSkyRegion)
@pytest.mark.parametrize("eta", np.arange(0.1, 1.01, 0.3))
@pytest.mark.parametrize("r_0", np.arange(0.01, 1.01, 0.3))
@pytest.mark.parametrize("e", np.arange(0.0, 0.801, 0.4))
def test_generalized_gaussian(eta, r_0, e):
# check normalization is robust for a large set of values
model = GeneralizedGaussianSpatialModel(
eta=eta, r_0=r_0 * u.deg, e=e, frame="galactic"
)
width = np.maximum(2 * model.evaluation_radius.to_value("deg"), 0.5)
geom = WcsGeom.create(skydir=(0, 0), binsz=0.02, width=width, frame="galactic",)
integral = model.integrate_geom(geom)
assert integral.unit.is_equivalent("")
assert_allclose(integral.data.sum(), 1.0, atol=5e-3)
def test_generalized_gaussian_io():
model = GeneralizedGaussianSpatialModel(e=0.5)
reg = model.to_region()
assert isinstance(reg, EllipseSkyRegion)
assert_allclose(reg.width.value, 1.73205, rtol=1e-5)
new_model = GeneralizedGaussianSpatialModel.from_dict(model.to_dict())
assert isinstance(new_model, GeneralizedGaussianSpatialModel)
def test_sky_disk():
# Test the disk case (e=0)
r_0 = 2 * u.deg
model = DiskSpatialModel(lon_0="1 deg", lat_0="45 deg", r_0=r_0)
lon = [1, 5, 359] * u.deg
lat = 46 * u.deg
val = model(lon, lat)
assert val.unit == "sr-1"
desired = [261.263956, 0, 261.263956]
assert_allclose(val.value, desired)
radius = model.evaluation_radius
assert radius.unit == "deg"
assert_allclose(radius.to_value("deg"), 2.222)
# test the normalization for an elongated ellipse near the Galactic Plane
m_geom_1 = WcsGeom.create(
binsz=0.015, width=(20, 20), skydir=(2, 2), frame="galactic", proj="AIT"
)
coords = m_geom_1.get_coord()
solid_angle = m_geom_1.solid_angle()
lon = coords.lon
lat = coords.lat
r_0 = 10 * u.deg
model_1 = DiskSpatialModel(
lon_0=2 * u.deg, lat_0=2 * u.deg, r_0=r_0, e=0.4, phi=30 * u.deg
)
vals_1 = model_1(lon, lat)
assert vals_1.unit == "sr-1"
assert_allclose(np.sum(vals_1 * solid_angle), 1, rtol=1.0e-3)
radius = model_1.evaluation_radius
assert radius.unit == "deg"
assert_allclose(radius.to_value("deg"), 11.11)
# test rotation
r_0 = 2 * u.deg
semi_minor = 1 * u.deg
eccentricity = np.sqrt(1 - (semi_minor / r_0) ** 2)
model_rot_test = DiskSpatialModel(
lon_0=0 * u.deg, lat_0=0 * u.deg, r_0=r_0, e=eccentricity, phi=90 * u.deg
)
assert_allclose(model_rot_test(0 * u.deg, 1.5 * u.deg).value, 0)
# test the normalization for a disk (ellipse with e=0) at the Galactic Pole
m_geom_2 = WcsGeom.create(
binsz=0.1, width=(6, 6), skydir=(0, 90), frame="galactic", proj="AIT"
)
coords = m_geom_2.get_coord()
lon = coords.lon
lat = coords.lat
r_0 = 5 * u.deg
disk = DiskSpatialModel(lon_0=0 * u.deg, lat_0=90 * u.deg, r_0=r_0)
vals_disk = disk(lon, lat)
solid_angle = 2 * np.pi * (1 - np.cos(5 * u.deg))
assert_allclose(np.max(vals_disk).value * solid_angle, 1)
assert isinstance(model.to_region(), EllipseSkyRegion)
def test_sky_disk_edge():
r_0 = 2 * u.deg
model = DiskSpatialModel(lon_0="0 deg", lat_0="0 deg", r_0=r_0, e=0.5, phi="0 deg",)
value_center = model(0 * u.deg, 0 * u.deg)
value_edge = model(0 * u.deg, r_0)
assert_allclose((value_edge / value_center).to_value(""), 0.5)
edge = model.edge_width.value * r_0
value_edge_pwidth = model(0 * u.deg, r_0 + edge / 2)
assert_allclose((value_edge_pwidth / value_center).to_value(""), 0.05)
value_edge_nwidth = model(0 * u.deg, r_0 - edge / 2)
assert_allclose((value_edge_nwidth / value_center).to_value(""), 0.95)
def test_sky_shell():
width = 2 * u.deg
rad = 2 * u.deg
model = ShellSpatialModel(lon_0="1 deg", lat_0="45 deg", radius=rad, width=width)
lon = [1, 2, 4] * u.deg
lat = 45 * u.deg
val = model(lon, lat)
assert val.unit == "deg-2"
desired = [55.979449, 57.831651, 94.919895]
assert_allclose(val.to_value("sr-1"), desired)
radius = model.evaluation_radius
assert radius.unit == "deg"
assert_allclose(radius.value, rad.value + width.value)
assert isinstance(model.to_region(), CircleAnnulusSkyRegion)
def test_sky_shell2():
width = 2 * u.deg
rad = 2 * u.deg
model = Shell2SpatialModel(lon_0="1 deg", lat_0="45 deg", r_0=rad + width, eta=0.5)
lon = [1, 2, 4] * u.deg
lat = 45 * u.deg
val = model(lon, lat)
assert val.unit == "deg-2"
desired = [55.979449, 57.831651, 94.919895]
assert_allclose(val.to_value("sr-1"), desired)
radius = model.evaluation_radius
assert radius.unit == "deg"
assert_allclose(radius.value, rad.value + width.value)
assert_allclose(model.r_in.value, rad.value)
assert isinstance(model.to_region(), CircleAnnulusSkyRegion)
def test_sky_diffuse_constant():
model = ConstantSpatialModel(value="42 sr-1")
lon = [1, 2] * u.deg
lat = 45 * u.deg
val = model(lon, lat)
assert val.unit == "sr-1"
assert_allclose(val.value, 42)
radius = model.evaluation_radius
assert radius is None
assert isinstance(model.to_region(), EllipseSkyRegion)
@requires_dependency("matplotlib")
@requires_data()
def test_sky_diffuse_map(caplog):
filename = "$GAMMAPY_DATA/catalogs/fermi/Extended_archive_v18/Templates/RXJ1713_2016_250GeV.fits"
model = TemplateSpatialModel.read(filename, normalize=False)
lon = [258.5, 0] * u.deg
lat = -39.8 * u.deg
val = model(lon, lat)
assert "WARNING" in [_.levelname for _ in caplog.records]
assert "Missing spatial template unit, assuming sr^-1" in [
_.message for _ in caplog.records
]
assert val.unit == "sr-1"
desired = [3269.178107, 0]
assert_allclose(val.value, desired)
res = model.evaluate_geom(model.map.geom)
assert_allclose(np.sum(res.value), 32816514.42078349)
radius = model.evaluation_radius
assert radius.unit == "deg"
assert_allclose(radius.value, 0.64, rtol=1.0e-2)
assert model.frame == "fk5"
assert isinstance(model.to_region(), RectangleSkyRegion)
with pytest.raises(TypeError):
model.plot_interative()
with pytest.raises(TypeError):
model.plot_grid()
@requires_data()
def test_sky_diffuse_map_3d():
filename = "$GAMMAPY_DATA/fermi-3fhl-gc/gll_iem_v06_gc.fits.gz"
model = TemplateSpatialModel.read(filename, normalize=False)
lon = [258.5, 0] * u.deg
lat = -39.8 * u.deg
energy = 1 * u.GeV
val = model(lon, lat, energy)
with pytest.raises(ValueError):
model(lon, lat)
assert model.map.unit == "cm-2 s-1 MeV-1 sr-1"
val = model(lon, lat, energy)
assert val.unit == "cm-2 s-1 MeV-1 sr-1"
res = model.evaluate_geom(model.map.geom)
assert_allclose(np.sum(res.value), 0.11803847221522712)
with pytest.raises(TypeError):
model.plot()
@requires_data()
def test_sky_diffuse_map_normalize():
# define model map with a constant value of 1
model_map = Map.create(map_type="wcs", width=(10, 5), binsz=0.5)
model_map.data += 1.0
model_map.unit = "sr-1"
model = TemplateSpatialModel(model_map)
# define data map with a different spatial binning
data_map = Map.create(map_type="wcs", width=(10, 5), binsz=1)
coords = data_map.geom.get_coord()
solid_angle = data_map.geom.solid_angle()
vals = model(coords.lon, coords.lat) * solid_angle
assert vals.unit == ""
integral = vals.sum()
assert_allclose(integral.value, 1, rtol=1e-4)
def test_evaluate_on_fk5_map():
# Check if spatial model can be evaluated on a map with FK5 frame
# Regression test for GH-2402
header = {}
header["CDELT1"] = 1.0
header["CDELT2"] = 1.0
header["CTYPE1"] = "RA---TAN"
header["CTYPE2"] = "DEC--TAN"
header["RADESYS"] = "FK5"
header["CRVAL1"] = 0
header["CRVAL2"] = 0
header["CRPIX1"] = 5
header["CRPIX2"] = 5
wcs = WCS(header)
geom = WcsGeom(wcs, npix=(10, 10))
model = GaussianSpatialModel(lon_0="0 deg", lat_0="0 deg", sigma="1 deg")
data = model.evaluate_geom(geom)
assert data.sum() > 0
def test_evaluate_fk5_model():
geom = WcsGeom.create(width=(5, 5), binsz=0.1, frame="icrs")
model = GaussianSpatialModel(
lon_0="0 deg", lat_0="0 deg", sigma="0.1 deg", frame="fk5"
)
data = model.evaluate_geom(geom)
assert data.sum() > 0
@requires_dependency("matplotlib")
def test_spatial_model_plot():
model = PointSpatialModel()
model.covariance = np.diag([0.01, 0.01])
with mpl_plot_check():
ax = model.plot()
with mpl_plot_check():
model.plot_error(ax=ax)
def test_integrate_region_geom():
center = SkyCoord("0d", "0d", frame="icrs")
model = GaussianSpatialModel(lon="0d", lat="0d", sigma=0.1 * u.deg, frame="icrs")
radius_large = 1 * u.deg
circle_large = CircleSkyRegion(center, radius_large)
radius_small = 0.1 * u.deg
circle_small = CircleSkyRegion(center, radius_small)
geom_large, geom_small = (
RegionGeom(region=circle_large),
RegionGeom(region=circle_small, binsz_wcs="0.01d"),
)
integral_large, integral_small = (
model.integrate_geom(geom_large).data,
model.integrate_geom(geom_small).data,
)
assert_allclose(integral_large[0], 1, rtol=0.001)
assert_allclose(integral_small[0], 0.3953, rtol=0.001)
def test_integrate_wcs_geom():
center = SkyCoord("0d", "0d", frame="icrs")
model_0_0d = GaussianSpatialModel(
lon="0.234d", lat="-0.172d", sigma=1e-4 * u.deg, frame="icrs"
)
model_0_01d = GaussianSpatialModel(
lon="0.234d", lat="-0.172d", sigma=0.01 * u.deg, frame="icrs"
)
model_0_005d = GaussianSpatialModel(
lon="0.234d", lat="-0.172d", sigma=0.005 * u.deg, frame="icrs"
)
geom = WcsGeom.create(skydir=center, npix=100, binsz=0.02)
#TODO: solve issue with small radii
integrated_0_0d = model_0_0d.integrate_geom(geom)
integrated_0_01d = model_0_01d.integrate_geom(geom)
integrated_0_005d = model_0_005d.integrate_geom(geom)
assert_allclose(integrated_0_0d.data.sum(), 1, atol=2e-4)
assert_allclose(integrated_0_01d.data.sum(), 1, atol=2e-4)
assert_allclose(integrated_0_005d.data.sum(), 1, atol=2e-4)
def test_integrate_geom_energy_axis():
center = SkyCoord("0d", "0d", frame="icrs")
model = GaussianSpatialModel(lon="0d", lat="0d", sigma=0.1 * u.deg, frame="icrs")
radius = 1 * u.deg
square = RectangleSkyRegion(center, radius, radius)
axis = MapAxis.from_energy_bounds("1 TeV", "10 TeV", nbin=10)
geom = RegionGeom(region=square, axes=[axis])
integral = model.integrate_geom(geom).data
assert_allclose(integral, 1, rtol=0.0001)
| [
"numpy.sqrt",
"regions.CircleSkyRegion",
"gammapy.modeling.models.DiskSpatialModel",
"gammapy.modeling.models.GeneralizedGaussianSpatialModel",
"gammapy.modeling.models.TemplateSpatialModel.read",
"numpy.arange",
"gammapy.modeling.models.ConstantSpatialModel",
"gammapy.modeling.models.ShellSpatialMode... | [((8782, 8815), 'gammapy.utils.testing.requires_dependency', 'requires_dependency', (['"""matplotlib"""'], {}), "('matplotlib')\n", (8801, 8815), False, 'from gammapy.utils.testing import mpl_plot_check, requires_data, requires_dependency\n'), ((8817, 8832), 'gammapy.utils.testing.requires_data', 'requires_data', ([], {}), '()\n', (8830, 8832), False, 'from gammapy.utils.testing import mpl_plot_check, requires_data, requires_dependency\n'), ((9844, 9859), 'gammapy.utils.testing.requires_data', 'requires_data', ([], {}), '()\n', (9857, 9859), False, 'from gammapy.utils.testing import mpl_plot_check, requires_data, requires_dependency\n'), ((10493, 10508), 'gammapy.utils.testing.requires_data', 'requires_data', ([], {}), '()\n', (10506, 10508), False, 'from gammapy.utils.testing import mpl_plot_check, requires_data, requires_dependency\n'), ((12010, 12043), 'gammapy.utils.testing.requires_dependency', 'requires_dependency', (['"""matplotlib"""'], {}), "('matplotlib')\n", (12029, 12043), False, 'from gammapy.utils.testing import mpl_plot_check, requires_data, requires_dependency\n'), ((828, 887), 'gammapy.maps.WcsGeom.create', 'WcsGeom.create', ([], {'skydir': '(2.4, 2.3)', 'npix': '(10, 10)', 'binsz': '(0.3)'}), '(skydir=(2.4, 2.3), npix=(10, 10), binsz=0.3)\n', (842, 887), False, 'from gammapy.maps import Map, WcsGeom, RegionGeom, MapAxis\n'), ((900, 965), 'gammapy.modeling.models.PointSpatialModel', 'PointSpatialModel', ([], {'lon_0': '"""2.5 deg"""', 'lat_0': '"""2.5 deg"""', 'frame': '"""icrs"""'}), "(lon_0='2.5 deg', lat_0='2.5 deg', frame='icrs')\n", (917, 965), False, 'from gammapy.modeling.models import ConstantSpatialModel, DiskSpatialModel, GaussianSpatialModel, GeneralizedGaussianSpatialModel, PointSpatialModel, ShellSpatialModel, Shell2SpatialModel, TemplateSpatialModel\n'), ((1020, 1069), 'numpy.testing.assert_allclose', 'assert_allclose', (['model.evaluation_radius.value', '(0)'], {}), '(model.evaluation_radius.value, 0)\n', (1035, 1069), False, 'from numpy.testing import assert_allclose\n'), ((1109, 1152), 'numpy.testing.assert_allclose', 'assert_allclose', (['model.position.ra.deg', '(2.5)'], {}), '(model.position.ra.deg, 2.5)\n', (1124, 1152), False, 'from numpy.testing import assert_allclose\n'), ((1157, 1201), 'numpy.testing.assert_allclose', 'assert_allclose', (['model.position.dec.deg', '(2.5)'], {}), '(model.position.dec.deg, 2.5)\n', (1172, 1201), False, 'from numpy.testing import assert_allclose\n'), ((1472, 1536), 'gammapy.modeling.models.GaussianSpatialModel', 'GaussianSpatialModel', ([], {'lon_0': '"""5 deg"""', 'lat_0': '"""15 deg"""', 'sigma': 'sigma'}), "(lon_0='5 deg', lat_0='15 deg', sigma=sigma)\n", (1492, 1536), False, 'from gammapy.modeling.models import ConstantSpatialModel, DiskSpatialModel, GaussianSpatialModel, GeneralizedGaussianSpatialModel, PointSpatialModel, ShellSpatialModel, Shell2SpatialModel, TemplateSpatialModel\n'), ((1844, 1890), 'numpy.testing.assert_allclose', 'assert_allclose', (['radius.value', '(5 * sigma.value)'], {}), '(radius.value, 5 * sigma.value)\n', (1859, 1890), False, 'from numpy.testing import assert_allclose\n'), ((1986, 2077), 'gammapy.maps.WcsGeom.create', 'WcsGeom.create', ([], {'binsz': '(0.05)', 'width': '(20, 20)', 'skydir': '(2, 2)', 'frame': '"""galactic"""', 'proj': '"""AIT"""'}), "(binsz=0.05, width=(20, 20), skydir=(2, 2), frame='galactic',\n proj='AIT')\n", (2000, 2077), False, 'from gammapy.maps import Map, WcsGeom, RegionGeom, MapAxis\n'), ((2241, 2335), 'gammapy.modeling.models.GaussianSpatialModel', 'GaussianSpatialModel', ([], {'lon_0': '(2 * u.deg)', 'lat_0': '(2 * u.deg)', 'sigma': 'sigma', 'e': '(0.8)', 'phi': '(30 * u.deg)'}), '(lon_0=2 * u.deg, lat_0=2 * u.deg, sigma=sigma, e=0.8,\n phi=30 * u.deg)\n', (2261, 2335), False, 'from gammapy.modeling.models import ConstantSpatialModel, DiskSpatialModel, GaussianSpatialModel, GeneralizedGaussianSpatialModel, PointSpatialModel, ShellSpatialModel, Shell2SpatialModel, TemplateSpatialModel\n'), ((2552, 2598), 'numpy.testing.assert_allclose', 'assert_allclose', (['radius.value', '(5 * sigma.value)'], {}), '(radius.value, 5 * sigma.value)\n', (2567, 2598), False, 'from numpy.testing import assert_allclose\n'), ((2739, 2777), 'numpy.sqrt', 'np.sqrt', (['(1 - (semi_minor / sigma) ** 2)'], {}), '(1 - (semi_minor / sigma) ** 2)\n', (2746, 2777), True, 'import numpy as np\n'), ((2792, 2883), 'gammapy.modeling.models.GaussianSpatialModel', 'GaussianSpatialModel', ([], {'lon_0': '(0 * u.deg)', 'lat_0': '(0 * u.deg)', 'sigma': 'sigma', 'e': 'e', 'phi': '(0 * u.deg)'}), '(lon_0=0 * u.deg, lat_0=0 * u.deg, sigma=sigma, e=e,\n phi=0 * u.deg)\n', (2812, 2883), False, 'from gammapy.modeling.models import ConstantSpatialModel, DiskSpatialModel, GaussianSpatialModel, GeneralizedGaussianSpatialModel, PointSpatialModel, ShellSpatialModel, Shell2SpatialModel, TemplateSpatialModel\n'), ((3265, 3357), 'gammapy.modeling.models.GaussianSpatialModel', 'GaussianSpatialModel', ([], {'lon_0': '(0 * u.deg)', 'lat_0': '(0 * u.deg)', 'sigma': 'sigma', 'e': 'e', 'phi': '(90 * u.deg)'}), '(lon_0=0 * u.deg, lat_0=0 * u.deg, sigma=sigma, e=e,\n phi=90 * u.deg)\n', (3285, 3357), False, 'from gammapy.modeling.models import ConstantSpatialModel, DiskSpatialModel, GaussianSpatialModel, GeneralizedGaussianSpatialModel, PointSpatialModel, ShellSpatialModel, Shell2SpatialModel, TemplateSpatialModel\n'), ((3885, 3970), 'gammapy.modeling.models.GeneralizedGaussianSpatialModel', 'GeneralizedGaussianSpatialModel', ([], {'eta': 'eta', 'r_0': '(r_0 * u.deg)', 'e': 'e', 'frame': '"""galactic"""'}), "(eta=eta, r_0=r_0 * u.deg, e=e, frame='galactic'\n )\n", (3916, 3970), False, 'from gammapy.modeling.models import ConstantSpatialModel, DiskSpatialModel, GaussianSpatialModel, GeneralizedGaussianSpatialModel, PointSpatialModel, ShellSpatialModel, Shell2SpatialModel, TemplateSpatialModel\n'), ((4065, 4137), 'gammapy.maps.WcsGeom.create', 'WcsGeom.create', ([], {'skydir': '(0, 0)', 'binsz': '(0.02)', 'width': 'width', 'frame': '"""galactic"""'}), "(skydir=(0, 0), binsz=0.02, width=width, frame='galactic')\n", (4079, 4137), False, 'from gammapy.maps import Map, WcsGeom, RegionGeom, MapAxis\n'), ((3622, 3647), 'numpy.arange', 'np.arange', (['(0.1)', '(1.01)', '(0.3)'], {}), '(0.1, 1.01, 0.3)\n', (3631, 3647), True, 'import numpy as np\n'), ((3681, 3707), 'numpy.arange', 'np.arange', (['(0.01)', '(1.01)', '(0.3)'], {}), '(0.01, 1.01, 0.3)\n', (3690, 3707), True, 'import numpy as np\n'), ((3739, 3765), 'numpy.arange', 'np.arange', (['(0.0)', '(0.801)', '(0.4)'], {}), '(0.0, 0.801, 0.4)\n', (3748, 3765), True, 'import numpy as np\n'), ((4332, 4370), 'gammapy.modeling.models.GeneralizedGaussianSpatialModel', 'GeneralizedGaussianSpatialModel', ([], {'e': '(0.5)'}), '(e=0.5)\n', (4363, 4370), False, 'from gammapy.modeling.models import ConstantSpatialModel, DiskSpatialModel, GaussianSpatialModel, GeneralizedGaussianSpatialModel, PointSpatialModel, ShellSpatialModel, Shell2SpatialModel, TemplateSpatialModel\n'), ((4449, 4502), 'numpy.testing.assert_allclose', 'assert_allclose', (['reg.width.value', '(1.73205)'], {'rtol': '(1e-05)'}), '(reg.width.value, 1.73205, rtol=1e-05)\n', (4464, 4502), False, 'from numpy.testing import assert_allclose\n'), ((4730, 4786), 'gammapy.modeling.models.DiskSpatialModel', 'DiskSpatialModel', ([], {'lon_0': '"""1 deg"""', 'lat_0': '"""45 deg"""', 'r_0': 'r_0'}), "(lon_0='1 deg', lat_0='45 deg', r_0=r_0)\n", (4746, 4786), False, 'from gammapy.modeling.models import ConstantSpatialModel, DiskSpatialModel, GaussianSpatialModel, GeneralizedGaussianSpatialModel, PointSpatialModel, ShellSpatialModel, Shell2SpatialModel, TemplateSpatialModel\n'), ((4940, 4975), 'numpy.testing.assert_allclose', 'assert_allclose', (['val.value', 'desired'], {}), '(val.value, desired)\n', (4955, 4975), False, 'from numpy.testing import assert_allclose\n'), ((5190, 5282), 'gammapy.maps.WcsGeom.create', 'WcsGeom.create', ([], {'binsz': '(0.015)', 'width': '(20, 20)', 'skydir': '(2, 2)', 'frame': '"""galactic"""', 'proj': '"""AIT"""'}), "(binsz=0.015, width=(20, 20), skydir=(2, 2), frame='galactic',\n proj='AIT')\n", (5204, 5282), False, 'from gammapy.maps import Map, WcsGeom, RegionGeom, MapAxis\n'), ((5445, 5531), 'gammapy.modeling.models.DiskSpatialModel', 'DiskSpatialModel', ([], {'lon_0': '(2 * u.deg)', 'lat_0': '(2 * u.deg)', 'r_0': 'r_0', 'e': '(0.4)', 'phi': '(30 * u.deg)'}), '(lon_0=2 * u.deg, lat_0=2 * u.deg, r_0=r_0, e=0.4, phi=30 *\n u.deg)\n', (5461, 5531), False, 'from gammapy.modeling.models import ConstantSpatialModel, DiskSpatialModel, GaussianSpatialModel, GeneralizedGaussianSpatialModel, PointSpatialModel, ShellSpatialModel, Shell2SpatialModel, TemplateSpatialModel\n'), ((5881, 5917), 'numpy.sqrt', 'np.sqrt', (['(1 - (semi_minor / r_0) ** 2)'], {}), '(1 - (semi_minor / r_0) ** 2)\n', (5888, 5917), True, 'import numpy as np\n'), ((5939, 6034), 'gammapy.modeling.models.DiskSpatialModel', 'DiskSpatialModel', ([], {'lon_0': '(0 * u.deg)', 'lat_0': '(0 * u.deg)', 'r_0': 'r_0', 'e': 'eccentricity', 'phi': '(90 * u.deg)'}), '(lon_0=0 * u.deg, lat_0=0 * u.deg, r_0=r_0, e=eccentricity,\n phi=90 * u.deg)\n', (5955, 6034), False, 'from gammapy.modeling.models import ConstantSpatialModel, DiskSpatialModel, GaussianSpatialModel, GeneralizedGaussianSpatialModel, PointSpatialModel, ShellSpatialModel, Shell2SpatialModel, TemplateSpatialModel\n'), ((6210, 6299), 'gammapy.maps.WcsGeom.create', 'WcsGeom.create', ([], {'binsz': '(0.1)', 'width': '(6, 6)', 'skydir': '(0, 90)', 'frame': '"""galactic"""', 'proj': '"""AIT"""'}), "(binsz=0.1, width=(6, 6), skydir=(0, 90), frame='galactic',\n proj='AIT')\n", (6224, 6299), False, 'from gammapy.maps import Map, WcsGeom, RegionGeom, MapAxis\n'), ((6418, 6478), 'gammapy.modeling.models.DiskSpatialModel', 'DiskSpatialModel', ([], {'lon_0': '(0 * u.deg)', 'lat_0': '(90 * u.deg)', 'r_0': 'r_0'}), '(lon_0=0 * u.deg, lat_0=90 * u.deg, r_0=r_0)\n', (6434, 6478), False, 'from gammapy.modeling.models import ConstantSpatialModel, DiskSpatialModel, GaussianSpatialModel, GeneralizedGaussianSpatialModel, PointSpatialModel, ShellSpatialModel, Shell2SpatialModel, TemplateSpatialModel\n'), ((6747, 6822), 'gammapy.modeling.models.DiskSpatialModel', 'DiskSpatialModel', ([], {'lon_0': '"""0 deg"""', 'lat_0': '"""0 deg"""', 'r_0': 'r_0', 'e': '(0.5)', 'phi': '"""0 deg"""'}), "(lon_0='0 deg', lat_0='0 deg', r_0=r_0, e=0.5, phi='0 deg')\n", (6763, 6822), False, 'from gammapy.modeling.models import ConstantSpatialModel, DiskSpatialModel, GaussianSpatialModel, GeneralizedGaussianSpatialModel, PointSpatialModel, ShellSpatialModel, Shell2SpatialModel, TemplateSpatialModel\n'), ((7361, 7434), 'gammapy.modeling.models.ShellSpatialModel', 'ShellSpatialModel', ([], {'lon_0': '"""1 deg"""', 'lat_0': '"""45 deg"""', 'radius': 'rad', 'width': 'width'}), "(lon_0='1 deg', lat_0='45 deg', radius=rad, width=width)\n", (7378, 7434), False, 'from gammapy.modeling.models import ConstantSpatialModel, DiskSpatialModel, GaussianSpatialModel, GeneralizedGaussianSpatialModel, PointSpatialModel, ShellSpatialModel, Shell2SpatialModel, TemplateSpatialModel\n'), ((7713, 7767), 'numpy.testing.assert_allclose', 'assert_allclose', (['radius.value', '(rad.value + width.value)'], {}), '(radius.value, rad.value + width.value)\n', (7728, 7767), False, 'from numpy.testing import assert_allclose\n'), ((7912, 7987), 'gammapy.modeling.models.Shell2SpatialModel', 'Shell2SpatialModel', ([], {'lon_0': '"""1 deg"""', 'lat_0': '"""45 deg"""', 'r_0': '(rad + width)', 'eta': '(0.5)'}), "(lon_0='1 deg', lat_0='45 deg', r_0=rad + width, eta=0.5)\n", (7930, 7987), False, 'from gammapy.modeling.models import ConstantSpatialModel, DiskSpatialModel, GaussianSpatialModel, GeneralizedGaussianSpatialModel, PointSpatialModel, ShellSpatialModel, Shell2SpatialModel, TemplateSpatialModel\n'), ((8266, 8320), 'numpy.testing.assert_allclose', 'assert_allclose', (['radius.value', '(rad.value + width.value)'], {}), '(radius.value, rad.value + width.value)\n', (8281, 8320), False, 'from numpy.testing import assert_allclose\n'), ((8325, 8369), 'numpy.testing.assert_allclose', 'assert_allclose', (['model.r_in.value', 'rad.value'], {}), '(model.r_in.value, rad.value)\n', (8340, 8369), False, 'from numpy.testing import assert_allclose\n'), ((8482, 8519), 'gammapy.modeling.models.ConstantSpatialModel', 'ConstantSpatialModel', ([], {'value': '"""42 sr-1"""'}), "(value='42 sr-1')\n", (8502, 8519), False, 'from gammapy.modeling.models import ConstantSpatialModel, DiskSpatialModel, GaussianSpatialModel, GeneralizedGaussianSpatialModel, PointSpatialModel, ShellSpatialModel, Shell2SpatialModel, TemplateSpatialModel\n'), ((8626, 8656), 'numpy.testing.assert_allclose', 'assert_allclose', (['val.value', '(42)'], {}), '(val.value, 42)\n', (8641, 8656), False, 'from numpy.testing import assert_allclose\n'), ((8981, 9033), 'gammapy.modeling.models.TemplateSpatialModel.read', 'TemplateSpatialModel.read', (['filename'], {'normalize': '(False)'}), '(filename, normalize=False)\n', (9006, 9033), False, 'from gammapy.modeling.models import ConstantSpatialModel, DiskSpatialModel, GaussianSpatialModel, GeneralizedGaussianSpatialModel, PointSpatialModel, ShellSpatialModel, Shell2SpatialModel, TemplateSpatialModel\n'), ((9354, 9389), 'numpy.testing.assert_allclose', 'assert_allclose', (['val.value', 'desired'], {}), '(val.value, desired)\n', (9369, 9389), False, 'from numpy.testing import assert_allclose\n'), ((9569, 9615), 'numpy.testing.assert_allclose', 'assert_allclose', (['radius.value', '(0.64)'], {'rtol': '(0.01)'}), '(radius.value, 0.64, rtol=0.01)\n', (9584, 9615), False, 'from numpy.testing import assert_allclose\n'), ((9971, 10023), 'gammapy.modeling.models.TemplateSpatialModel.read', 'TemplateSpatialModel.read', (['filename'], {'normalize': '(False)'}), '(filename, normalize=False)\n', (9996, 10023), False, 'from gammapy.modeling.models import ConstantSpatialModel, DiskSpatialModel, GaussianSpatialModel, GeneralizedGaussianSpatialModel, PointSpatialModel, ShellSpatialModel, Shell2SpatialModel, TemplateSpatialModel\n'), ((10613, 10665), 'gammapy.maps.Map.create', 'Map.create', ([], {'map_type': '"""wcs"""', 'width': '(10, 5)', 'binsz': '(0.5)'}), "(map_type='wcs', width=(10, 5), binsz=0.5)\n", (10623, 10665), False, 'from gammapy.maps import Map, WcsGeom, RegionGeom, MapAxis\n'), ((10732, 10763), 'gammapy.modeling.models.TemplateSpatialModel', 'TemplateSpatialModel', (['model_map'], {}), '(model_map)\n', (10752, 10763), False, 'from gammapy.modeling.models import ConstantSpatialModel, DiskSpatialModel, GaussianSpatialModel, GeneralizedGaussianSpatialModel, PointSpatialModel, ShellSpatialModel, Shell2SpatialModel, TemplateSpatialModel\n'), ((10835, 10885), 'gammapy.maps.Map.create', 'Map.create', ([], {'map_type': '"""wcs"""', 'width': '(10, 5)', 'binsz': '(1)'}), "(map_type='wcs', width=(10, 5), binsz=1)\n", (10845, 10885), False, 'from gammapy.maps import Map, WcsGeom, RegionGeom, MapAxis\n'), ((11084, 11131), 'numpy.testing.assert_allclose', 'assert_allclose', (['integral.value', '(1)'], {'rtol': '(0.0001)'}), '(integral.value, 1, rtol=0.0001)\n', (11099, 11131), False, 'from numpy.testing import assert_allclose\n'), ((11547, 11558), 'astropy.wcs.WCS', 'WCS', (['header'], {}), '(header)\n', (11550, 11558), False, 'from astropy.wcs import WCS\n'), ((11570, 11597), 'gammapy.maps.WcsGeom', 'WcsGeom', (['wcs'], {'npix': '(10, 10)'}), '(wcs, npix=(10, 10))\n', (11577, 11597), False, 'from gammapy.maps import Map, WcsGeom, RegionGeom, MapAxis\n'), ((11610, 11675), 'gammapy.modeling.models.GaussianSpatialModel', 'GaussianSpatialModel', ([], {'lon_0': '"""0 deg"""', 'lat_0': '"""0 deg"""', 'sigma': '"""1 deg"""'}), "(lon_0='0 deg', lat_0='0 deg', sigma='1 deg')\n", (11630, 11675), False, 'from gammapy.modeling.models import ConstantSpatialModel, DiskSpatialModel, GaussianSpatialModel, GeneralizedGaussianSpatialModel, PointSpatialModel, ShellSpatialModel, Shell2SpatialModel, TemplateSpatialModel\n'), ((11783, 11836), 'gammapy.maps.WcsGeom.create', 'WcsGeom.create', ([], {'width': '(5, 5)', 'binsz': '(0.1)', 'frame': '"""icrs"""'}), "(width=(5, 5), binsz=0.1, frame='icrs')\n", (11797, 11836), False, 'from gammapy.maps import Map, WcsGeom, RegionGeom, MapAxis\n'), ((11849, 11934), 'gammapy.modeling.models.GaussianSpatialModel', 'GaussianSpatialModel', ([], {'lon_0': '"""0 deg"""', 'lat_0': '"""0 deg"""', 'sigma': '"""0.1 deg"""', 'frame': '"""fk5"""'}), "(lon_0='0 deg', lat_0='0 deg', sigma='0.1 deg', frame='fk5'\n )\n", (11869, 11934), False, 'from gammapy.modeling.models import ConstantSpatialModel, DiskSpatialModel, GaussianSpatialModel, GeneralizedGaussianSpatialModel, PointSpatialModel, ShellSpatialModel, Shell2SpatialModel, TemplateSpatialModel\n'), ((12087, 12106), 'gammapy.modeling.models.PointSpatialModel', 'PointSpatialModel', ([], {}), '()\n', (12104, 12106), False, 'from gammapy.modeling.models import ConstantSpatialModel, DiskSpatialModel, GaussianSpatialModel, GeneralizedGaussianSpatialModel, PointSpatialModel, ShellSpatialModel, Shell2SpatialModel, TemplateSpatialModel\n'), ((12130, 12151), 'numpy.diag', 'np.diag', (['[0.01, 0.01]'], {}), '([0.01, 0.01])\n', (12137, 12151), True, 'import numpy as np\n'), ((12315, 12349), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['"""0d"""', '"""0d"""'], {'frame': '"""icrs"""'}), "('0d', '0d', frame='icrs')\n", (12323, 12349), False, 'from astropy.coordinates import SkyCoord\n'), ((12362, 12435), 'gammapy.modeling.models.GaussianSpatialModel', 'GaussianSpatialModel', ([], {'lon': '"""0d"""', 'lat': '"""0d"""', 'sigma': '(0.1 * u.deg)', 'frame': '"""icrs"""'}), "(lon='0d', lat='0d', sigma=0.1 * u.deg, frame='icrs')\n", (12382, 12435), False, 'from gammapy.modeling.models import ConstantSpatialModel, DiskSpatialModel, GaussianSpatialModel, GeneralizedGaussianSpatialModel, PointSpatialModel, ShellSpatialModel, Shell2SpatialModel, TemplateSpatialModel\n'), ((12485, 12522), 'regions.CircleSkyRegion', 'CircleSkyRegion', (['center', 'radius_large'], {}), '(center, radius_large)\n', (12500, 12522), False, 'from regions import CircleAnnulusSkyRegion, EllipseSkyRegion, PointSkyRegion, PolygonSkyRegion, CircleSkyRegion, RectangleSkyRegion\n'), ((12573, 12610), 'regions.CircleSkyRegion', 'CircleSkyRegion', (['center', 'radius_small'], {}), '(center, radius_small)\n', (12588, 12610), False, 'from regions import CircleAnnulusSkyRegion, EllipseSkyRegion, PointSkyRegion, PolygonSkyRegion, CircleSkyRegion, RectangleSkyRegion\n'), ((12895, 12944), 'numpy.testing.assert_allclose', 'assert_allclose', (['integral_large[0]', '(1)'], {'rtol': '(0.001)'}), '(integral_large[0], 1, rtol=0.001)\n', (12910, 12944), False, 'from numpy.testing import assert_allclose\n'), ((12949, 13003), 'numpy.testing.assert_allclose', 'assert_allclose', (['integral_small[0]', '(0.3953)'], {'rtol': '(0.001)'}), '(integral_small[0], 0.3953, rtol=0.001)\n', (12964, 13003), False, 'from numpy.testing import assert_allclose\n'), ((13050, 13084), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['"""0d"""', '"""0d"""'], {'frame': '"""icrs"""'}), "('0d', '0d', frame='icrs')\n", (13058, 13084), False, 'from astropy.coordinates import SkyCoord\n'), ((13102, 13191), 'gammapy.modeling.models.GaussianSpatialModel', 'GaussianSpatialModel', ([], {'lon': '"""0.234d"""', 'lat': '"""-0.172d"""', 'sigma': '(0.0001 * u.deg)', 'frame': '"""icrs"""'}), "(lon='0.234d', lat='-0.172d', sigma=0.0001 * u.deg,\n frame='icrs')\n", (13122, 13191), False, 'from gammapy.modeling.models import ConstantSpatialModel, DiskSpatialModel, GaussianSpatialModel, GeneralizedGaussianSpatialModel, PointSpatialModel, ShellSpatialModel, Shell2SpatialModel, TemplateSpatialModel\n'), ((13219, 13307), 'gammapy.modeling.models.GaussianSpatialModel', 'GaussianSpatialModel', ([], {'lon': '"""0.234d"""', 'lat': '"""-0.172d"""', 'sigma': '(0.01 * u.deg)', 'frame': '"""icrs"""'}), "(lon='0.234d', lat='-0.172d', sigma=0.01 * u.deg, frame\n ='icrs')\n", (13239, 13307), False, 'from gammapy.modeling.models import ConstantSpatialModel, DiskSpatialModel, GaussianSpatialModel, GeneralizedGaussianSpatialModel, PointSpatialModel, ShellSpatialModel, Shell2SpatialModel, TemplateSpatialModel\n'), ((13336, 13424), 'gammapy.modeling.models.GaussianSpatialModel', 'GaussianSpatialModel', ([], {'lon': '"""0.234d"""', 'lat': '"""-0.172d"""', 'sigma': '(0.005 * u.deg)', 'frame': '"""icrs"""'}), "(lon='0.234d', lat='-0.172d', sigma=0.005 * u.deg,\n frame='icrs')\n", (13356, 13424), False, 'from gammapy.modeling.models import ConstantSpatialModel, DiskSpatialModel, GaussianSpatialModel, GeneralizedGaussianSpatialModel, PointSpatialModel, ShellSpatialModel, Shell2SpatialModel, TemplateSpatialModel\n'), ((13447, 13498), 'gammapy.maps.WcsGeom.create', 'WcsGeom.create', ([], {'skydir': 'center', 'npix': '(100)', 'binsz': '(0.02)'}), '(skydir=center, npix=100, binsz=0.02)\n', (13461, 13498), False, 'from gammapy.maps import Map, WcsGeom, RegionGeom, MapAxis\n'), ((13952, 13986), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['"""0d"""', '"""0d"""'], {'frame': '"""icrs"""'}), "('0d', '0d', frame='icrs')\n", (13960, 13986), False, 'from astropy.coordinates import SkyCoord\n'), ((13999, 14072), 'gammapy.modeling.models.GaussianSpatialModel', 'GaussianSpatialModel', ([], {'lon': '"""0d"""', 'lat': '"""0d"""', 'sigma': '(0.1 * u.deg)', 'frame': '"""icrs"""'}), "(lon='0d', lat='0d', sigma=0.1 * u.deg, frame='icrs')\n", (14019, 14072), False, 'from gammapy.modeling.models import ConstantSpatialModel, DiskSpatialModel, GaussianSpatialModel, GeneralizedGaussianSpatialModel, PointSpatialModel, ShellSpatialModel, Shell2SpatialModel, TemplateSpatialModel\n'), ((14110, 14152), 'regions.RectangleSkyRegion', 'RectangleSkyRegion', (['center', 'radius', 'radius'], {}), '(center, radius, radius)\n', (14128, 14152), False, 'from regions import CircleAnnulusSkyRegion, EllipseSkyRegion, PointSkyRegion, PolygonSkyRegion, CircleSkyRegion, RectangleSkyRegion\n'), ((14165, 14219), 'gammapy.maps.MapAxis.from_energy_bounds', 'MapAxis.from_energy_bounds', (['"""1 TeV"""', '"""10 TeV"""'], {'nbin': '(10)'}), "('1 TeV', '10 TeV', nbin=10)\n", (14191, 14219), False, 'from gammapy.maps import Map, WcsGeom, RegionGeom, MapAxis\n'), ((14231, 14269), 'gammapy.maps.RegionGeom', 'RegionGeom', ([], {'region': 'square', 'axes': '[axis]'}), '(region=square, axes=[axis])\n', (14241, 14269), False, 'from gammapy.maps import Map, WcsGeom, RegionGeom, MapAxis\n'), ((14323, 14364), 'numpy.testing.assert_allclose', 'assert_allclose', (['integral', '(1)'], {'rtol': '(0.0001)'}), '(integral, 1, rtol=0.0001)\n', (14338, 14364), False, 'from numpy.testing import assert_allclose\n'), ((1758, 1769), 'numpy.exp', 'np.exp', (['(0.5)'], {}), '(0.5)\n', (1764, 1769), True, 'import numpy as np\n'), ((2430, 2458), 'numpy.sum', 'np.sum', (['(vals_1 * solid_angle)'], {}), '(vals_1 * solid_angle)\n', (2436, 2458), True, 'import numpy as np\n'), ((3166, 3177), 'numpy.exp', 'np.exp', (['(0.5)'], {}), '(0.5)\n', (3172, 3177), True, 'import numpy as np\n'), ((3212, 3223), 'numpy.exp', 'np.exp', (['(0.5)'], {}), '(0.5)\n', (3218, 3223), True, 'import numpy as np\n'), ((3515, 3526), 'numpy.exp', 'np.exp', (['(0.5)'], {}), '(0.5)\n', (3521, 3526), True, 'import numpy as np\n'), ((5626, 5654), 'numpy.sum', 'np.sum', (['(vals_1 * solid_angle)'], {}), '(vals_1 * solid_angle)\n', (5632, 5654), True, 'import numpy as np\n'), ((9457, 9474), 'numpy.sum', 'np.sum', (['res.value'], {}), '(res.value)\n', (9463, 9474), True, 'import numpy as np\n'), ((9721, 9745), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (9734, 9745), False, 'import pytest\n'), ((9789, 9813), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (9802, 9813), False, 'import pytest\n'), ((10144, 10169), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (10157, 10169), False, 'import pytest\n'), ((10393, 10410), 'numpy.sum', 'np.sum', (['res.value'], {}), '(res.value)\n', (10399, 10410), True, 'import numpy as np\n'), ((10443, 10467), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (10456, 10467), False, 'import pytest\n'), ((12162, 12178), 'gammapy.utils.testing.mpl_plot_check', 'mpl_plot_check', ([], {}), '()\n', (12176, 12178), False, 'from gammapy.utils.testing import mpl_plot_check, requires_data, requires_dependency\n'), ((12216, 12232), 'gammapy.utils.testing.mpl_plot_check', 'mpl_plot_check', ([], {}), '()\n', (12230, 12232), False, 'from gammapy.utils.testing import mpl_plot_check, requires_data, requires_dependency\n'), ((12651, 12682), 'gammapy.maps.RegionGeom', 'RegionGeom', ([], {'region': 'circle_large'}), '(region=circle_large)\n', (12661, 12682), False, 'from gammapy.maps import Map, WcsGeom, RegionGeom, MapAxis\n'), ((12692, 12742), 'gammapy.maps.RegionGeom', 'RegionGeom', ([], {'region': 'circle_small', 'binsz_wcs': '"""0.01d"""'}), "(region=circle_small, binsz_wcs='0.01d')\n", (12702, 12742), False, 'from gammapy.maps import Map, WcsGeom, RegionGeom, MapAxis\n'), ((6546, 6563), 'numpy.cos', 'np.cos', (['(5 * u.deg)'], {}), '(5 * u.deg)\n', (6552, 6563), True, 'import numpy as np\n'), ((6585, 6602), 'numpy.max', 'np.max', (['vals_disk'], {}), '(vals_disk)\n', (6591, 6602), True, 'import numpy as np\n')] |
import numpy as np
import torch
import torch.nn as nn
from scipy import sparse
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.preprocessing import MinMaxScaler
from torch.utils.data import DataLoader
from tqdm.auto import tqdm
from recoxplainer.utils.torch_utils import use_cuda
from recoxplainer.data_reader.user_item_dict import UserItemDict
from recoxplainer.utils.torch_utils import use_optimizer
class ExplAutoencoderTorch(nn.Module):
def __init__(self,
hidden_layer_features: int,
learning_rate: float,
positive_threshold: float,
weight_decay: float,
epochs: int,
knn: int,
cuda: bool,
optimizer_name: str,
expl: bool,
device_id=None
):
if optimizer_name not in ['sgd', 'adam', 'rmsprop']:
raise Exception["Wrong optimizer."]
if cuda is True:
use_cuda(True, device_id)
self.positive_threshold = positive_threshold
self.weight_decay = weight_decay
self.knn = knn
self.learning_rate = learning_rate
self.epochs = epochs
self.cuda = cuda
self.optimizer_name = optimizer_name
self.hidden_layer_features = hidden_layer_features
self.expl = expl
self.dataset = None
self.dataset_metadata = None
self.embedding_user = None
self.embedding_item = None
self.optimizer = None
self.explainability_matrix = None
self.sim_users = {}
super().__init__()
self.criterion = nn.MSELoss()
def fit(self, dataset_metadata):
self.dataset_metadata = dataset_metadata
self.dataset = dataset_metadata.dataset
num_items = self.dataset_metadata.num_item
self.encoder_hidden_layer = nn.Linear(
in_features=num_items, out_features=self.hidden_layer_features
)
self.decoder_output_layer = nn.Linear(
in_features=self.hidden_layer_features, out_features=num_items
)
self.compute_explainability()
self.optimizer = use_optimizer(network=self,
learning_rate=self.learning_rate,
weight_decay=self.weight_decay,
optimizer=self.optimizer_name)
with tqdm(total=self.epochs) as progress:
train_loader = self.instance_a_train_loader()
for epoch in range(self.epochs):
loss = self.train_an_epoch(train_loader)
progress.update(1)
progress.set_postfix({"loss": loss})
return True
def compute_explainability(self):
ds = self.dataset.pivot(index='userId', columns='itemId', values='rating')
ds = ds.fillna(0)
ds = sparse.csr_matrix(ds)
sim_matrix = cosine_similarity(ds)
min_val = sim_matrix.min() - 1
for i in range(self.dataset_metadata.num_user):
sim_matrix[i, i] = min_val
knn_to_user_i = (-sim_matrix[i, :]).argsort()[:self.knn]
self.sim_users[i] = knn_to_user_i
self.explainability_matrix = np.zeros((self.dataset_metadata.num_user,
self.dataset_metadata.num_item))
filter_dataset_on_threshold = self.dataset[
self.dataset['rating'] >= self.positive_threshold
]
for i in range(self.dataset_metadata.num_user):
knn_to_user_i = self.sim_users[i]
rated_items_by_sim_users = filter_dataset_on_threshold[
filter_dataset_on_threshold['userId'].isin(knn_to_user_i)]
sim_scores = rated_items_by_sim_users.groupby(by='itemId')
sim_scores = sim_scores['rating'].sum()
sim_scores = sim_scores.reset_index()
self.explainability_matrix[i, sim_scores.itemId] = sim_scores.rating.to_list()
self.explainability_matrix = MinMaxScaler().fit_transform(self.explainability_matrix)
self.explainability_matrix = torch.from_numpy(self.explainability_matrix)
def instance_a_train_loader(self):
"""instance train loader for one training epoch"""
self.user_item_dict = UserItemDict(self.dataset, self.explainability_matrix, self.expl)
return DataLoader(self.user_item_dict, shuffle=True)
def train_an_epoch(self, train_loader):
self.train()
cnt = 0
total_loss = 0
for batch_id, batch in enumerate(train_loader):
assert isinstance(batch[0], torch.Tensor)
rating = batch[0]
rating = rating.float()
loss = self.train_single_user(rating)
total_loss += loss
cnt += 1
return total_loss / cnt
def train_single_user(self, ratings):
if self.cuda is True:
ratings = ratings.cuda()
self.optimizer.zero_grad()
ratings_pred = self(ratings)
loss = self.criterion(ratings_pred, ratings)
loss.backward()
self.optimizer.step()
loss = loss.item()
return loss
def forward(self, user_adjusted_ratings):
activation = self.encoder_hidden_layer(user_adjusted_ratings)
code = torch.relu(activation)
activation = self.decoder_output_layer(code)
reconstructed_ratings = torch.relu(activation)
return reconstructed_ratings
def predict(self, user_id, item_id):
if type(user_id) == 'int':
user_id = [user_id]
if type(item_id) == 'int':
item_id = [item_id]
with torch.no_grad():
if self.cuda:
user_id = user_id.cuda()
item_id = item_id.cuda()
rating = self.user_item_dict[user_id]
rating = rating.float()
pred = self.forward(rating).cpu()
return pred[item_id].tolist()
| [
"recoxplainer.data_reader.user_item_dict.UserItemDict",
"sklearn.metrics.pairwise.cosine_similarity",
"tqdm.auto.tqdm",
"torch.relu",
"torch.from_numpy",
"recoxplainer.utils.torch_utils.use_optimizer",
"recoxplainer.utils.torch_utils.use_cuda",
"torch.nn.MSELoss",
"numpy.zeros",
"torch.nn.Linear",... | [((1664, 1676), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (1674, 1676), True, 'import torch.nn as nn\n'), ((1901, 1974), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'num_items', 'out_features': 'self.hidden_layer_features'}), '(in_features=num_items, out_features=self.hidden_layer_features)\n', (1910, 1974), True, 'import torch.nn as nn\n'), ((2034, 2107), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'self.hidden_layer_features', 'out_features': 'num_items'}), '(in_features=self.hidden_layer_features, out_features=num_items)\n', (2043, 2107), True, 'import torch.nn as nn\n'), ((2195, 2324), 'recoxplainer.utils.torch_utils.use_optimizer', 'use_optimizer', ([], {'network': 'self', 'learning_rate': 'self.learning_rate', 'weight_decay': 'self.weight_decay', 'optimizer': 'self.optimizer_name'}), '(network=self, learning_rate=self.learning_rate, weight_decay=\n self.weight_decay, optimizer=self.optimizer_name)\n', (2208, 2324), False, 'from recoxplainer.utils.torch_utils import use_optimizer\n'), ((2917, 2938), 'scipy.sparse.csr_matrix', 'sparse.csr_matrix', (['ds'], {}), '(ds)\n', (2934, 2938), False, 'from scipy import sparse\n'), ((2960, 2981), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['ds'], {}), '(ds)\n', (2977, 2981), False, 'from sklearn.metrics.pairwise import cosine_similarity\n'), ((3271, 3345), 'numpy.zeros', 'np.zeros', (['(self.dataset_metadata.num_user, self.dataset_metadata.num_item)'], {}), '((self.dataset_metadata.num_user, self.dataset_metadata.num_item))\n', (3279, 3345), True, 'import numpy as np\n'), ((4168, 4212), 'torch.from_numpy', 'torch.from_numpy', (['self.explainability_matrix'], {}), '(self.explainability_matrix)\n', (4184, 4212), False, 'import torch\n'), ((4342, 4407), 'recoxplainer.data_reader.user_item_dict.UserItemDict', 'UserItemDict', (['self.dataset', 'self.explainability_matrix', 'self.expl'], {}), '(self.dataset, self.explainability_matrix, self.expl)\n', (4354, 4407), False, 'from recoxplainer.data_reader.user_item_dict import UserItemDict\n'), ((4423, 4468), 'torch.utils.data.DataLoader', 'DataLoader', (['self.user_item_dict'], {'shuffle': '(True)'}), '(self.user_item_dict, shuffle=True)\n', (4433, 4468), False, 'from torch.utils.data import DataLoader\n'), ((5353, 5375), 'torch.relu', 'torch.relu', (['activation'], {}), '(activation)\n', (5363, 5375), False, 'import torch\n'), ((5461, 5483), 'torch.relu', 'torch.relu', (['activation'], {}), '(activation)\n', (5471, 5483), False, 'import torch\n'), ((1004, 1029), 'recoxplainer.utils.torch_utils.use_cuda', 'use_cuda', (['(True)', 'device_id'], {}), '(True, device_id)\n', (1012, 1029), False, 'from recoxplainer.utils.torch_utils import use_cuda\n'), ((2451, 2474), 'tqdm.auto.tqdm', 'tqdm', ([], {'total': 'self.epochs'}), '(total=self.epochs)\n', (2455, 2474), False, 'from tqdm.auto import tqdm\n'), ((5710, 5725), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5723, 5725), False, 'import torch\n'), ((4073, 4087), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (4085, 4087), False, 'from sklearn.preprocessing import MinMaxScaler\n')] |
import uuid
import torch
import argparse
import matplotlib
import numpy as np
import pandas as pd
matplotlib.use('Agg')
import seaborn as sns
from pathlib import Path
import matplotlib.pyplot as plt
from external_libs.hessian_eigenthings import compute_hessian_eigenthings
TRIAL_ID = uuid.uuid4().hex.upper()[0:6]
EXPERIMENT_DIRECTORY = './outputs/{}'.format(TRIAL_ID)
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
def parse_arguments():
parser = argparse.ArgumentParser(description='Argument parser')
parser.add_argument('--tasks', default=5, type=int, help='total number of tasks')
parser.add_argument('--epochs-per-task', default=1, type=int, help='epochs per task')
parser.add_argument('--dataset', default='core50', type=str, help='dataset. options: core50, toybox, ilab')
parser.add_argument('--batch-size', default=128, type=int, help='batch-size')
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
parser.add_argument('--gamma', default=0.85, type=float, help='learning rate decay. Use 1.0 for no decay')
parser.add_argument('--dropout', default=0.1, type=float, help='dropout probability. Use 0.0 for no dropout')
parser.add_argument('--hiddens', default=256, type=int, help='num of hidden neurons in each layer of a 2-layer MLP')
parser.add_argument('--compute-eigenspectrum', default=False, type=bool, help='compute eigenvalues/eigenvectors?')
parser.add_argument('--seed', default=1234, type=int, help='random seed')
parser.add_argument('--run', default=0, type=int, help='run: 0 to 10')
parser.add_argument('--paradigm', default='class_iid', type=str, help='class_iid or class_instance')
args = parser.parse_args()
return args
def init_experiment(args):
print('------------------- Experiment started -----------------')
print(f"Parameters:\n seed={args.seed}\n benchmark={args.dataset}\n num_tasks={args.tasks}\n "+
f"epochs_per_task={args.epochs_per_task}\n batch_size={args.batch_size}\n "+
f"learning_rate={args.lr}\n learning rate decay(gamma)={args.gamma}\n dropout prob={args.dropout}\n")
# 1. setup seed for reproducibility
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# 2. create directory to save results
Path(EXPERIMENT_DIRECTORY).mkdir(parents=True, exist_ok=True)
print("The results will be saved in {}\n".format(EXPERIMENT_DIRECTORY))
# 3. create data structures to store metrics
loss_db0 = {t: [0 for i in range(args.epochs_per_task*args.tasks)] for t in range(1, 1+1)}
acc_db0 = {t: [0 for i in range(args.epochs_per_task*args.tasks)] for t in range(1, 1+1)}
loss_db1 = {t: [0 for i in range(args.epochs_per_task*args.tasks)] for t in range(1+1, args.tasks+1)}
acc_db1 = {t: [0 for i in range(args.epochs_per_task*args.tasks)] for t in range(1+1, args.tasks+1)}
loss_db = {**loss_db0, **loss_db1}
acc_db = {**acc_db0, **acc_db1}
hessian_eig_db = {}
return acc_db, loss_db, hessian_eig_db
def end_experiment(args, acc_db, loss_db, hessian_eig_db):
# 1. save all metrics into csv file
acc_df = pd.DataFrame(acc_db)
acc_df.to_csv(EXPERIMENT_DIRECTORY+'/accs.csv')
visualize_result(acc_df, EXPERIMENT_DIRECTORY+'/accs.png')
loss_df = pd.DataFrame(loss_db)
loss_df.to_csv(EXPERIMENT_DIRECTORY+'/loss.csv')
visualize_result(loss_df, EXPERIMENT_DIRECTORY+'/loss.png')
hessian_df = pd.DataFrame(hessian_eig_db)
hessian_df.to_csv(EXPERIMENT_DIRECTORY+'/hessian_eigs.csv')
# 2. calculate average accuracy and forgetting (c.f. ``evaluation`` section in our paper)
# print(acc_db.keys())
score = np.mean([acc_db[i][-1] for i in acc_db.keys()])
forget = np.mean([max(acc_db[i])-acc_db[i][-1] for i in range(1, args.tasks+1)])/100.0
print('average accuracy = {}, forget = {}'.format(score, forget))
print()
print('------------------- Experiment ended -----------------')
def log_metrics(metrics, time, task_id, acc_db, loss_db):
"""
Log accuracy and loss at different times of training
"""
print('epoch {}, task:{}, metrics: {}'.format(time, task_id, metrics))
# log to db
acc = metrics['accuracy']
loss = metrics['loss']
loss_db[task_id][time-1] = loss
acc_db[task_id][time-1] = acc
return acc_db, loss_db
def save_eigenvec(filename, arr):
"""
Save eigenvectors to file
"""
np.save(filename, arr)
def log_hessian(model, loader, time, task_id, hessian_eig_db):
"""
Compute and log Hessian for a specific task
:param model: The PyTorch Model
:param loader: Dataloader [to calculate loss and then Hessian]
:param time: time is a discrete concept regarding epoch. If we have T tasks each with E epoch,
time will be from 0, to (T x E)-1. E.g., if we have 5 tasks with 5 epochs each, then when we finish
task 1, time will be 5.
:param task_id: Task id (to distiniguish between Hessians of different tasks)
:param hessian_eig_db: (The dictionary to store hessians)
:return:
"""
criterion = torch.nn.CrossEntropyLoss().to(DEVICE)
use_gpu = True if DEVICE != 'cpu' else False
est_eigenvals, est_eigenvecs = compute_hessian_eigenthings(
model,
loader,
criterion,
num_eigenthings=3,
power_iter_steps=18,
power_iter_err_threshold=1e-5,
momentum=0,
use_gpu=use_gpu,
)
key = 'task-{}-epoch-{}'.format(task_id, time-1)
hessian_eig_db[key] = est_eigenvals
save_eigenvec(EXPERIMENT_DIRECTORY+"/{}-vec.npy".format(key), est_eigenvecs)
return hessian_eig_db
def save_checkpoint(model, time):
"""
Save checkpoints of model paramters
:param model: pytorch model
:param time: int
"""
filename = '{directory}/model-{trial}-{time}.pth'.format(directory=EXPERIMENT_DIRECTORY, trial=TRIAL_ID, time=time)
torch.save(model.cpu().state_dict(), filename)
def visualize_result(df, filename):
ax = sns.lineplot(data=df, dashes=False)
ax.figure.savefig(filename, dpi=250)
plt.close()
| [
"torch.manual_seed",
"external_libs.hessian_eigenthings.compute_hessian_eigenthings",
"torch.nn.CrossEntropyLoss",
"argparse.ArgumentParser",
"pathlib.Path",
"matplotlib.use",
"uuid.uuid4",
"seaborn.lineplot",
"matplotlib.pyplot.close",
"torch.cuda.is_available",
"numpy.random.seed",
"pandas.D... | [((98, 119), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (112, 119), False, 'import matplotlib\n'), ((390, 415), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (413, 415), False, 'import torch\n'), ((462, 516), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Argument parser"""'}), "(description='Argument parser')\n", (485, 516), False, 'import argparse\n'), ((2123, 2151), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (2140, 2151), False, 'import torch\n'), ((2153, 2178), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (2167, 2178), True, 'import numpy as np\n'), ((3036, 3056), 'pandas.DataFrame', 'pd.DataFrame', (['acc_db'], {}), '(acc_db)\n', (3048, 3056), True, 'import pandas as pd\n'), ((3179, 3200), 'pandas.DataFrame', 'pd.DataFrame', (['loss_db'], {}), '(loss_db)\n', (3191, 3200), True, 'import pandas as pd\n'), ((3328, 3356), 'pandas.DataFrame', 'pd.DataFrame', (['hessian_eig_db'], {}), '(hessian_eig_db)\n', (3340, 3356), True, 'import pandas as pd\n'), ((4244, 4266), 'numpy.save', 'np.save', (['filename', 'arr'], {}), '(filename, arr)\n', (4251, 4266), True, 'import numpy as np\n'), ((4987, 5149), 'external_libs.hessian_eigenthings.compute_hessian_eigenthings', 'compute_hessian_eigenthings', (['model', 'loader', 'criterion'], {'num_eigenthings': '(3)', 'power_iter_steps': '(18)', 'power_iter_err_threshold': '(1e-05)', 'momentum': '(0)', 'use_gpu': 'use_gpu'}), '(model, loader, criterion, num_eigenthings=3,\n power_iter_steps=18, power_iter_err_threshold=1e-05, momentum=0,\n use_gpu=use_gpu)\n', (5014, 5149), False, 'from external_libs.hessian_eigenthings import compute_hessian_eigenthings\n'), ((5688, 5723), 'seaborn.lineplot', 'sns.lineplot', ([], {'data': 'df', 'dashes': '(False)'}), '(data=df, dashes=False)\n', (5700, 5723), True, 'import seaborn as sns\n'), ((5764, 5775), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5773, 5775), True, 'import matplotlib.pyplot as plt\n'), ((2221, 2247), 'pathlib.Path', 'Path', (['EXPERIMENT_DIRECTORY'], {}), '(EXPERIMENT_DIRECTORY)\n', (2225, 2247), False, 'from pathlib import Path\n'), ((4870, 4897), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (4895, 4897), False, 'import torch\n'), ((286, 298), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (296, 298), False, 'import uuid\n')] |
"""
Code borrowed from
http://alexhwilliams.info/itsneuronalblog/2018/02/26/crossval/
https://gist.github.com/ahwillia/65d8f87fcd4bded3676d67b55c1a3954
"""
import numpy as np
from sklearn.utils import check_random_state
from copy import deepcopy
from sklearn.impute import SimpleImputer
from ya_pca.linalg_utils import svd_wrapper, rand_orthog
def svd_missing(X, M, rank, U_init='impute_mean', max_n_steps=100,
atol=1e-6, random_state=None):
"""
Computes SVD with missing data using the alternating algorithm described in http://alexhwilliams.info/itsneuronalblog/2018/02/26/crossval/.
Parameters
----------
X: array-like, (n_samples, n_features)
The data matrix.
M: array-like, (n_samples, n_features)
The binary matrix indicating missing values (0 means missing).
rank: int
SVD rank to compute.
U_init: str
How to initialize the left singular vectors.
Must be one of
'random'
Sample a random orthonormal matrix.
'impute_mean'
Impute the missing entries with the column means then compute the SVD.
max_n_steps: int
Maximum number of steps.
atol: float
Absolute tolerance for stopping criteria.
random_state: None, int
Random seed for random initalization.
Output
------
U, V, opt_history
U: array-like, (n_samples, rank)
The left singular values.
V: array-like, (n_samples, rank)
The right singular values.
opt_history: list
The loss values at each step.
Note there is no normalization for the left/right singular vectors.
"""
rng = check_random_state(random_state)
assert M.dtype == bool
assert all(M.mean(axis=0) != 0)
assert all(M.mean(axis=1) != 0)
# initialize U randomly
if type(U_init) == str:
if U_init == 'random':
U = rand_orthog(X.shape[0], rank, random_state=rng)
elif U_init == 'impute_mean':
X_filled = SimpleImputer(strategy='mean').fit_transform(X)
# X_filled = X.copy()
# X_filled[~M] = np.nan
# m = np.nanmean(X_filled, axis=0)
# for j in range(X.shape[1]):
# nan_idxs = np.where(~M[:, j])[0]
# X_filled[nan_idxs, j] = m[j]
U = svd_wrapper(X_filled, rank=rank)[0]
loss_history = []
prev_loss = np.nan
for step in range(max_n_steps):
Vt = censored_lstsq(U, X, M)
U = censored_lstsq(Vt.T, X.T, M.T).T
resid = np.dot(U, Vt) - X
loss_val = np.mean(resid[M]**2)
loss_history.append(loss_val)
if step >= 1:
diff = prev_loss - loss_val
if diff < atol:
break
else:
prev_loss = deepcopy(loss_val)
return U, Vt.T, loss_history
def censored_lstsq(A, B, M):
"""Solves least squares problem with missing data in B
Note: uses a broadcasted solve for speed.
Args
----
A: (ndarray) : n x r matrix
B (ndarray) : m x n matrix
M (ndarray) : m x n binary matrix (zeros indicate missing values)
Returns
-------
X (ndarray) : r x n matrix that minimizes norm(M*(AX - B))
"""
if A.ndim == 1:
A = A[:, None]
# else solve via tensor representation
rhs = np.dot(A.T, M * B).T[:, :, None] # n x r x 1 tensor
T = np.matmul(A.T[None, :, :], M.T[:, :, None] * A[None, :, :]) # n x r x r tensor
try:
# transpose to get r x n
return np.squeeze(np.linalg.solve(T, rhs), axis=-1).T
except:
r = T.shape[1]
T[:, np.arange(r), np.arange(r)] += 1e-6
return np.squeeze(np.linalg.solve(T, rhs), axis=-1).T
| [
"numpy.mean",
"sklearn.utils.check_random_state",
"numpy.linalg.solve",
"numpy.dot",
"ya_pca.linalg_utils.rand_orthog",
"numpy.matmul",
"sklearn.impute.SimpleImputer",
"copy.deepcopy",
"ya_pca.linalg_utils.svd_wrapper",
"numpy.arange"
] | [((1676, 1708), 'sklearn.utils.check_random_state', 'check_random_state', (['random_state'], {}), '(random_state)\n', (1694, 1708), False, 'from sklearn.utils import check_random_state\n'), ((3413, 3472), 'numpy.matmul', 'np.matmul', (['A.T[None, :, :]', '(M.T[:, :, None] * A[None, :, :])'], {}), '(A.T[None, :, :], M.T[:, :, None] * A[None, :, :])\n', (3422, 3472), True, 'import numpy as np\n'), ((2601, 2623), 'numpy.mean', 'np.mean', (['(resid[M] ** 2)'], {}), '(resid[M] ** 2)\n', (2608, 2623), True, 'import numpy as np\n'), ((1913, 1960), 'ya_pca.linalg_utils.rand_orthog', 'rand_orthog', (['X.shape[0]', 'rank'], {'random_state': 'rng'}), '(X.shape[0], rank, random_state=rng)\n', (1924, 1960), False, 'from ya_pca.linalg_utils import svd_wrapper, rand_orthog\n'), ((2564, 2577), 'numpy.dot', 'np.dot', (['U', 'Vt'], {}), '(U, Vt)\n', (2570, 2577), True, 'import numpy as np\n'), ((3352, 3370), 'numpy.dot', 'np.dot', (['A.T', '(M * B)'], {}), '(A.T, M * B)\n', (3358, 3370), True, 'import numpy as np\n'), ((2819, 2837), 'copy.deepcopy', 'deepcopy', (['loss_val'], {}), '(loss_val)\n', (2827, 2837), False, 'from copy import deepcopy\n'), ((3561, 3584), 'numpy.linalg.solve', 'np.linalg.solve', (['T', 'rhs'], {}), '(T, rhs)\n', (3576, 3584), True, 'import numpy as np\n'), ((2346, 2378), 'ya_pca.linalg_utils.svd_wrapper', 'svd_wrapper', (['X_filled'], {'rank': 'rank'}), '(X_filled, rank=rank)\n', (2357, 2378), False, 'from ya_pca.linalg_utils import svd_wrapper, rand_orthog\n'), ((3646, 3658), 'numpy.arange', 'np.arange', (['r'], {}), '(r)\n', (3655, 3658), True, 'import numpy as np\n'), ((3660, 3672), 'numpy.arange', 'np.arange', (['r'], {}), '(r)\n', (3669, 3672), True, 'import numpy as np\n'), ((3708, 3731), 'numpy.linalg.solve', 'np.linalg.solve', (['T', 'rhs'], {}), '(T, rhs)\n', (3723, 3731), True, 'import numpy as np\n'), ((2024, 2054), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'strategy': '"""mean"""'}), "(strategy='mean')\n", (2037, 2054), False, 'from sklearn.impute import SimpleImputer\n')] |
import os
import copy
import torch
import pickle
import numpy as np
from language import Lang
def initialize_env(config):
try:
os.mkdir(config['root'])
except:
print("Directory found!!!!")
config['batch_size'] = int(config['batch_size'])
config['bidirectional'] = True if config['bidirectional'] == 'true' else False
config['checkpoint'] = None if config['checkpoint'] == 'false' else config['checkpoint']
config['reverse'] = True if config['reverse'] == 'true' else False
config['rnn'] = 'LSTM' if config['rnn'] == 'lstm' else 'GRU'
config['max_length'] = int(config['max_length'])
config['epochs'] = int(config['epochs'])
config['learning_rate'] = float(config['learning_rate'])
config['hidden_size'] = int(config['hidden_size'])
config['eval_frequency'] = int(config['eval_frequency'])
config['obj_path'] = None if config['obj_path'] == 'false' else config['obj_path']
return config
def save_model(epochs, encoder, decoder, encoder_optimizer, decoder_optimizer, path):
torch.save({
'epochs': epochs,
'encoder': encoder.state_dict(),
'decoder': decoder.state_dict(),
'encoder_optimizer': encoder_optimizer.state_dict(),
'decoder_optimizer': decoder_optimizer.state_dict()
}, path)
class Helper:
"""helper class for reading data and create word bank"""
def __init__(self, **kwargs):
"""initialize class object"""
self.lang1_name = kwargs['lang1']
self.lang2_name = kwargs['lang2']
try:
self.max_length = kwargs['max_length']
self.reverse = kwargs['reverse']
except:
self.max_length = 0
self.reverse = False
if self.reverse:
self.input_lang = Lang(self.lang2_name)
self.output_lang = Lang(self.lang1_name)
else:
self.input_lang = Lang(self.lang1_name)
self.output_lang = Lang(self.lang2_name)
def read_langs(self, path):
"""read data file from given path and create Lang objects"""
print("Reading lines...")
# Read the file and split into lines
lines = open(path, encoding='utf-8').read().strip().split('\n')
# Split every line into pairs
pairs = [[s for s in l.split('\t')] for l in lines]
print("Total %s sentence pairs\n" % len(pairs))
# Reverse pairs, make Lang instances
if self.reverse:
pairs = [list(reversed(p)) for p in pairs]
return pairs
def filter_pair(self, pair, lower=None):
"""choose pair based on max_length"""
if lower is not None:
if isinstance(pair[0], list):
return len(pair[0]) > lower and len(pair[1]) > lower and \
len(pair[0]) < self.max_length and \
len(pair[1]) < self.max_length
if isinstance(pair[0], list):
return len(pair[0]) < self.max_length and \
len(pair[1]) < self.max_length
return len(pair[0].split(' ')) < self.max_length and \
len(pair[1].split(' ')) < self.max_length
def filter_pairs(self, pairs, lower = None):
"""choose pairs"""
return [pair for pair in pairs if self.filter_pair(pair, lower)]
def indexes_from_sentence(self, lang, sentence):
"""convert sentence into its corresponding indices"""
return [lang.word2index[word] if word in lang.word2index.keys()\
else lang.unk_token for word in sentence]
def tensor_from_sentence(self, lang, sentence):
"""convert sentence indices into numpy array"""
indices = self.indexes_from_sentence(lang, sentence)
indices.append(lang.eos_token)
return np.array(indices)
def tensors_from_pair(self, pair):
"""convert pair into indices"""
input_tensor = self.tensor_from_sentence(self.input_lang, pair[0])
target_tensor = self.tensor_from_sentence(self.output_lang, pair[1])
return (input_tensor, target_tensor)
def prepare_data(self, pairs, occurence=None, load=None):
"""prepare data for model"""
pairs = self.filter_pairs(pairs)
print("Trimmed to %s sentence pairs as per max_length\n" % len(pairs))
if load == None:
for pair in pairs:
self.input_lang.add_sentence(pair[0])
self.output_lang.add_sentence(pair[1])
if occurence != None:
self.input_lang.most_common_words(5)
self.output_lang.most_common_words(5)
print("Most common words:")
print(self.input_lang.name, self.input_lang.n_words)
print(self.output_lang.name, self.output_lang.n_words)
else:
self.load_lang_object(load)
print("Counted words:")
print(self.input_lang.name, self.input_lang.n_words)
print(self.output_lang.name, self.output_lang.n_words)
pair_tensors = copy.deepcopy(pairs)
for i, _ in enumerate(pair_tensors):
tensors = self.tensors_from_pair(pair_tensors[i])
pair_tensors[i][0] = tensors[0]
pair_tensors[i][1] = tensors[1]
return self.input_lang, self.output_lang, pairs, pair_tensors
def padding_sentence(self, word_indices, pad_token):
"""senctence -> fixed length word vector"""
if self.max_length > len(word_indices):
word_indices = np.concatenate([word_indices, np.array([pad_token \
for _ in range(self.max_length - len(word_indices))])])
return word_indices
def padding(self, pair_tensors, pad_token):
"""pairs -> tensors"""
for i, _ in enumerate(pair_tensors):
pair_tensors[i][0] = self.padding_sentence(pair_tensors[i][0], \
pad_token)
pair_tensors[i][1] = self.padding_sentence(pair_tensors[i][1], \
pad_token)
return np.array(pair_tensors)
def save_lang_object(self, path):
with open((path+'/input_lang.pkl'), 'wb') as output:
pickle.dump(self.input_lang, output, pickle.HIGHEST_PROTOCOL)
with open((path+'/output_lang.pkl'), 'wb') as output:
pickle.dump(self.output_lang, output, pickle.HIGHEST_PROTOCOL)
def load_lang_object(self, path):
print("Loading language objects from : {}/".format(path))
with open((path+'/input_lang.pkl'), 'rb') as input:
self.input_lang = pickle.load(input)
with open((path+'/output_lang.pkl'), 'rb') as input:
self.output_lang = pickle.load(input) | [
"pickle.dump",
"pickle.load",
"numpy.array",
"os.mkdir",
"copy.deepcopy",
"language.Lang"
] | [((141, 165), 'os.mkdir', 'os.mkdir', (["config['root']"], {}), "(config['root'])\n", (149, 165), False, 'import os\n'), ((3796, 3813), 'numpy.array', 'np.array', (['indices'], {}), '(indices)\n', (3804, 3813), True, 'import numpy as np\n'), ((5042, 5062), 'copy.deepcopy', 'copy.deepcopy', (['pairs'], {}), '(pairs)\n', (5055, 5062), False, 'import copy\n'), ((6110, 6132), 'numpy.array', 'np.array', (['pair_tensors'], {}), '(pair_tensors)\n', (6118, 6132), True, 'import numpy as np\n'), ((1805, 1826), 'language.Lang', 'Lang', (['self.lang2_name'], {}), '(self.lang2_name)\n', (1809, 1826), False, 'from language import Lang\n'), ((1858, 1879), 'language.Lang', 'Lang', (['self.lang1_name'], {}), '(self.lang1_name)\n', (1862, 1879), False, 'from language import Lang\n'), ((1924, 1945), 'language.Lang', 'Lang', (['self.lang1_name'], {}), '(self.lang1_name)\n', (1928, 1945), False, 'from language import Lang\n'), ((1977, 1998), 'language.Lang', 'Lang', (['self.lang2_name'], {}), '(self.lang2_name)\n', (1981, 1998), False, 'from language import Lang\n'), ((6245, 6306), 'pickle.dump', 'pickle.dump', (['self.input_lang', 'output', 'pickle.HIGHEST_PROTOCOL'], {}), '(self.input_lang, output, pickle.HIGHEST_PROTOCOL)\n', (6256, 6306), False, 'import pickle\n'), ((6381, 6443), 'pickle.dump', 'pickle.dump', (['self.output_lang', 'output', 'pickle.HIGHEST_PROTOCOL'], {}), '(self.output_lang, output, pickle.HIGHEST_PROTOCOL)\n', (6392, 6443), False, 'import pickle\n'), ((6639, 6657), 'pickle.load', 'pickle.load', (['input'], {}), '(input)\n', (6650, 6657), False, 'import pickle\n'), ((6750, 6768), 'pickle.load', 'pickle.load', (['input'], {}), '(input)\n', (6761, 6768), False, 'import pickle\n')] |
"""Principal Component Analysis Base Classes"""
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from ..base import BaseEstimator, TransformerMixin
from ..utils.validation import check_is_fitted
from abc import ABCMeta, abstractmethod
class _BasePCA(TransformerMixin, BaseEstimator, metaclass=ABCMeta):
"""Base class for PCA methods.
Warning: This class should not be used directly.
Use derived classes instead.
"""
def get_covariance(self):
"""Compute data covariance with the generative model.
``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
where S**2 contains the explained variances, and sigma2 contains the
noise variances.
Returns
-------
cov : array, shape=(n_features, n_features)
Estimated covariance of data.
"""
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
cov = np.dot(components_.T * exp_var_diff, components_)
cov.flat[::len(cov) + 1] += self.noise_variance_ # modify diag inplace
return cov
def get_precision(self):
"""Compute data precision matrix with the generative model.
Equals the inverse of the covariance but computed with
the matrix inversion lemma for efficiency.
Returns
-------
precision : array, shape=(n_features, n_features)
Estimated precision of data.
"""
n_features = self.components_.shape[1]
# handle corner cases first
if self.n_components_ == 0:
return np.eye(n_features) / self.noise_variance_
if self.n_components_ == n_features:
return linalg.inv(self.get_covariance())
# Get precision using matrix inversion lemma
components_ = self.components_
exp_var = self.explained_variance_
if self.whiten:
components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
precision = np.dot(components_, components_.T) / self.noise_variance_
precision.flat[::len(precision) + 1] += 1. / exp_var_diff
precision = np.dot(components_.T,
np.dot(linalg.inv(precision), components_))
precision /= -(self.noise_variance_ ** 2)
precision.flat[::len(precision) + 1] += 1. / self.noise_variance_
return precision
@abstractmethod
def fit(self, X, y=None):
"""Placeholder for fit. Subclasses should implement this method!
Fit the model with X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
def transform(self, X):
"""Apply dimensionality reduction to X.
X is projected on the first principal components previously extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import IncrementalPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> ipca = IncrementalPCA(n_components=2, batch_size=3)
>>> ipca.fit(X)
IncrementalPCA(batch_size=3, n_components=2)
>>> ipca.transform(X) # doctest: +SKIP
"""
check_is_fitted(self)
X = self._validate_data(X, dtype=[np.float64, np.float32], reset=False)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = np.dot(X, self.components_.T)
if self.whiten:
X_transformed /= np.sqrt(self.explained_variance_)
return X_transformed
def inverse_transform(self, X):
"""Transform data back to its original space.
In other words, return an input X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform will compute the
exact inverse operation, which includes reversing whitening.
"""
if self.whiten:
return np.dot(X, np.sqrt(self.explained_variance_[:, np.newaxis]) *
self.components_) + self.mean_
else:
return np.dot(X, self.components_) + self.mean_
| [
"numpy.eye",
"numpy.sqrt",
"numpy.dot",
"numpy.maximum",
"scipy.linalg.inv"
] | [((1209, 1256), 'numpy.maximum', 'np.maximum', (['(exp_var - self.noise_variance_)', '(0.0)'], {}), '(exp_var - self.noise_variance_, 0.0)\n', (1219, 1256), True, 'import numpy as np\n'), ((1270, 1319), 'numpy.dot', 'np.dot', (['(components_.T * exp_var_diff)', 'components_'], {}), '(components_.T * exp_var_diff, components_)\n', (1276, 1319), True, 'import numpy as np\n'), ((2310, 2357), 'numpy.maximum', 'np.maximum', (['(exp_var - self.noise_variance_)', '(0.0)'], {}), '(exp_var - self.noise_variance_, 0.0)\n', (2320, 2357), True, 'import numpy as np\n'), ((4339, 4368), 'numpy.dot', 'np.dot', (['X', 'self.components_.T'], {}), '(X, self.components_.T)\n', (4345, 4368), True, 'import numpy as np\n'), ((2377, 2411), 'numpy.dot', 'np.dot', (['components_', 'components_.T'], {}), '(components_, components_.T)\n', (2383, 2411), True, 'import numpy as np\n'), ((4422, 4455), 'numpy.sqrt', 'np.sqrt', (['self.explained_variance_'], {}), '(self.explained_variance_)\n', (4429, 4455), True, 'import numpy as np\n'), ((1154, 1185), 'numpy.sqrt', 'np.sqrt', (['exp_var[:, np.newaxis]'], {}), '(exp_var[:, np.newaxis])\n', (1161, 1185), True, 'import numpy as np\n'), ((1915, 1933), 'numpy.eye', 'np.eye', (['n_features'], {}), '(n_features)\n', (1921, 1933), True, 'import numpy as np\n'), ((2255, 2286), 'numpy.sqrt', 'np.sqrt', (['exp_var[:, np.newaxis]'], {}), '(exp_var[:, np.newaxis])\n', (2262, 2286), True, 'import numpy as np\n'), ((2577, 2598), 'scipy.linalg.inv', 'linalg.inv', (['precision'], {}), '(precision)\n', (2587, 2598), False, 'from scipy import linalg\n'), ((5340, 5367), 'numpy.dot', 'np.dot', (['X', 'self.components_'], {}), '(X, self.components_)\n', (5346, 5367), True, 'import numpy as np\n'), ((5197, 5245), 'numpy.sqrt', 'np.sqrt', (['self.explained_variance_[:, np.newaxis]'], {}), '(self.explained_variance_[:, np.newaxis])\n', (5204, 5245), True, 'import numpy as np\n')] |
import os,time,cv2, sys, math
import bchlib
import tensorflow as tf
import argparse
import numpy as np
import tensorflow.contrib.image
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.saved_model import signature_constants
parser = argparse.ArgumentParser()
parser.add_argument('--detector_model', type=str, required=True)
parser.add_argument('--decoder_model', type=str, required=True)
parser.add_argument('--video', type=str, required=True)
parser.add_argument('--secret_size', type=int, default=100)
parser.add_argument('--save_video', type=str, default=None)
parser.add_argument('--visualize_detector', action='store_true', help='Visualize detector mask output')
args = parser.parse_args()
BCH_POLYNOMIAL = 137
BCH_BITS = 5
def get_intersect(p1, p2, p3, p4):
s = np.vstack([p1,p2,p3,p4])
h = np.hstack((s, np.ones((4, 1))))
l1 = np.cross(h[0], h[1])
l2 = np.cross(h[2], h[3])
x, y, z = np.cross(l1, l2)
if z == 0:
print('invalid')
return (0,0)
return (x/z, y/z)
def poly_area(poly):
return 0.5*np.abs(np.dot(poly[:,0],np.roll(poly[:,1],1))-np.dot(poly[:,1],np.roll(poly[:,0],1)))
def order_points(pts):
rect = np.zeros((4, 2), dtype=np.float32)
s = pts.sum(axis=1)
rect[0] = pts[np.argmin(s)]
rect[2] = pts[np.argmax(s)]
diff = np.diff(pts, axis = 1)
rect[1] = pts[np.argmin(diff)]
rect[3] = pts[np.argmax(diff)]
return rect
def main():
# Initializing network
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
detector_graph = tf.Graph()
decoder_graph = tf.Graph()
with detector_graph.as_default():
detector_sess = tf.Session()
detector_model = tf.saved_model.loader.load(detector_sess, [tag_constants.SERVING], args.detector_model)
detector_input_name = detector_model.signature_def[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY].inputs['image'].name
detector_input = detector_graph.get_tensor_by_name(detector_input_name)
detector_output_name = detector_model.signature_def[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY].outputs['detections'].name
detector_output = detector_graph.get_tensor_by_name(detector_output_name)
with decoder_graph.as_default():
decoder_sess = tf.Session()
decoder_model = tf.saved_model.loader.load(decoder_sess, [tag_constants.SERVING], args.decoder_model)
decoder_input_name = decoder_model.signature_def[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY].inputs['image'].name
decoder_input = decoder_graph.get_tensor_by_name(decoder_input_name)
decoder_output_name = decoder_model.signature_def[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY].outputs['decoded'].name
decoder_output = decoder_graph.get_tensor_by_name(decoder_output_name)
cap = cv2.VideoCapture(args.video)
bch = bchlib.BCH(BCH_POLYNOMIAL, BCH_BITS)
ret, frame = cap.read()
f_height, f_width = frame.shape[0:2]
if args.save_video is not None:
fourcc1 = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(args.save_video, fourcc1, 30.0, (f_width, f_height))
while(True):
ret, frame = cap.read()
if frame is None:
break
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
detector_image_input = cv2.resize(frame_rgb, (1024,1024))
detector_image_input = np.expand_dims(np.float32(detector_image_input),axis=0)/255.0
output_image = detector_sess.run(detector_output,feed_dict={detector_input:detector_image_input})
output_image = np.array(output_image[0,:,:,:])
output_image = x = np.argmax(output_image, axis = -1)
color_codes = np.array([[255,255,255],[0,0,0]])
out_vis_image = color_codes[output_image.astype(int)]
mask_im = cv2.resize(np.float32(out_vis_image), (f_width,f_height))
if args.visualize_detector:
mask_vis = mask_im.astype(np.uint8)
contours, _ = cv2.findContours(cv2.cvtColor(mask_im, cv2.COLOR_BGR2GRAY).astype(np.uint8),1,2)
extrema = np.zeros((8,2))
corners = np.zeros((4,2))
for cnt in contours:
area = cv2.contourArea(cnt)
if area < 1000:
continue
hull = cv2.convexHull(cnt)
if len(hull) < 4:
continue
if args.visualize_detector:
cv2.polylines(mask_vis, np.int32([corners]), thickness=6, color=(100,100,250), isClosed=True)
extrema[0,:] = hull[np.argmax(hull[:,0,0]),0,:]
extrema[1,:] = hull[np.argmax(hull[:,0,0]+hull[:,0,1]),0,:]
extrema[2,:] = hull[np.argmax(hull[:,0,1]),0,:]
extrema[3,:] = hull[np.argmax(-hull[:,0,0]+hull[:,0,1]),0,:]
extrema[4,:] = hull[np.argmax(-hull[:,0,0]),0,:]
extrema[5,:] = hull[np.argmax(-hull[:,0,0]-hull[:,0,1]),0,:]
extrema[6,:] = hull[np.argmax(-hull[:,0,1]),0,:]
extrema[7,:] = hull[np.argmax(hull[:,0,0]-hull[:,0,1]),0,:]
extrema_lines = extrema - np.roll(extrema, shift=1, axis=0)
extrema_len = extrema_lines[:,0]**2 + extrema_lines[:,1]**2
line_idx = np.sort(extrema_len.argsort()[-4:])
for c in range(4):
p1 = extrema[line_idx[(c-1)%4],:]
p2 = extrema[(line_idx[(c-1)%4]-1)%8,:]
p3 = extrema[line_idx[c],:]
p4 = extrema[(line_idx[c]-1)%8,:]
corners[c,:] = get_intersect(p1, p2, p3, p4)
new_area = poly_area(corners)
if new_area / area > 1.5:
continue
corners = order_points(corners)
corners_full_res = corners
pts_dst = np.array([[0,0],[399,0],[399,399],[0,399]])
h, status = cv2.findHomography(corners_full_res, pts_dst)
try:
warped_im = cv2.warpPerspective(frame_rgb, h, (400,400))
w_im = warped_im.astype(np.float32)
w_im /= 255.
except:
continue
for im_rotation in range(4):
w_rotated = np.rot90(w_im, im_rotation)
recovered_secret = decoder_sess.run([decoder_output],feed_dict={decoder_input:[w_rotated]})[0][0]
recovered_secret = list(recovered_secret)
recovered_secret = [int(i) for i in recovered_secret]
packet_binary = "".join([str(bit) for bit in recovered_secret[:96]])
footer = recovered_secret[96:]
if np.sum(footer) > 0:
continue
packet = bytes(int(packet_binary[i : i + 8], 2) for i in range(0, len(packet_binary), 8))
packet = bytearray(packet)
data, ecc = packet[:-bch.ecc_bytes], packet[-bch.ecc_bytes:]
bitflips = bch.decode_inplace(data, ecc)
if bitflips != -1:
print('Num bits corrected: ', bitflips)
try:
code = data.decode("utf-8")
except:
continue
color = (100,250,100)
cv2.polylines(frame, np.int32([corners]), thickness=6, color=color, isClosed=True)
font = cv2.FONT_HERSHEY_SIMPLEX
im = cv2.putText(frame, code, tuple((corners[0,:]+np.array([0,-15])).astype(np.int)), font, 1,(0,0,0), 2, cv2.LINE_AA)
if args.save_video is not None:
out.write(frame)
else:
cv2.imshow('frame',frame)
if args.visualize_detector:
cv2.imshow('detector_mask', mask_vis)
cv2.waitKey(1)
cap.release()
if args.save_video:
out.release()
if __name__ == "__main__":
main()
| [
"numpy.int32",
"tensorflow.saved_model.loader.load",
"cv2.imshow",
"numpy.array",
"cv2.warpPerspective",
"numpy.rot90",
"bchlib.BCH",
"tensorflow.Graph",
"numpy.cross",
"argparse.ArgumentParser",
"tensorflow.Session",
"numpy.diff",
"cv2.VideoWriter",
"cv2.contourArea",
"numpy.vstack",
... | [((263, 288), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (286, 288), False, 'import argparse\n'), ((804, 831), 'numpy.vstack', 'np.vstack', (['[p1, p2, p3, p4]'], {}), '([p1, p2, p3, p4])\n', (813, 831), True, 'import numpy as np\n'), ((878, 898), 'numpy.cross', 'np.cross', (['h[0]', 'h[1]'], {}), '(h[0], h[1])\n', (886, 898), True, 'import numpy as np\n'), ((908, 928), 'numpy.cross', 'np.cross', (['h[2]', 'h[3]'], {}), '(h[2], h[3])\n', (916, 928), True, 'import numpy as np\n'), ((943, 959), 'numpy.cross', 'np.cross', (['l1', 'l2'], {}), '(l1, l2)\n', (951, 959), True, 'import numpy as np\n'), ((1201, 1235), 'numpy.zeros', 'np.zeros', (['(4, 2)'], {'dtype': 'np.float32'}), '((4, 2), dtype=np.float32)\n', (1209, 1235), True, 'import numpy as np\n'), ((1336, 1356), 'numpy.diff', 'np.diff', (['pts'], {'axis': '(1)'}), '(pts, axis=1)\n', (1343, 1356), True, 'import numpy as np\n'), ((1498, 1514), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (1512, 1514), True, 'import tensorflow as tf\n'), ((1579, 1589), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1587, 1589), True, 'import tensorflow as tf\n'), ((1610, 1620), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1618, 1620), True, 'import tensorflow as tf\n'), ((2874, 2902), 'cv2.VideoCapture', 'cv2.VideoCapture', (['args.video'], {}), '(args.video)\n', (2890, 2902), False, 'import os, time, cv2, sys, math\n'), ((2913, 2949), 'bchlib.BCH', 'bchlib.BCH', (['BCH_POLYNOMIAL', 'BCH_BITS'], {}), '(BCH_POLYNOMIAL, BCH_BITS)\n', (2923, 2949), False, 'import bchlib\n'), ((1278, 1290), 'numpy.argmin', 'np.argmin', (['s'], {}), '(s)\n', (1287, 1290), True, 'import numpy as np\n'), ((1310, 1322), 'numpy.argmax', 'np.argmax', (['s'], {}), '(s)\n', (1319, 1322), True, 'import numpy as np\n'), ((1377, 1392), 'numpy.argmin', 'np.argmin', (['diff'], {}), '(diff)\n', (1386, 1392), True, 'import numpy as np\n'), ((1412, 1427), 'numpy.argmax', 'np.argmax', (['diff'], {}), '(diff)\n', (1421, 1427), True, 'import numpy as np\n'), ((1684, 1696), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1694, 1696), True, 'import tensorflow as tf\n'), ((1722, 1814), 'tensorflow.saved_model.loader.load', 'tf.saved_model.loader.load', (['detector_sess', '[tag_constants.SERVING]', 'args.detector_model'], {}), '(detector_sess, [tag_constants.SERVING], args.\n detector_model)\n', (1748, 1814), True, 'import tensorflow as tf\n'), ((2312, 2324), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2322, 2324), True, 'import tensorflow as tf\n'), ((2349, 2439), 'tensorflow.saved_model.loader.load', 'tf.saved_model.loader.load', (['decoder_sess', '[tag_constants.SERVING]', 'args.decoder_model'], {}), '(decoder_sess, [tag_constants.SERVING], args.\n decoder_model)\n', (2375, 2439), True, 'import tensorflow as tf\n'), ((3075, 3106), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'XVID'"], {}), "(*'XVID')\n", (3097, 3106), False, 'import os, time, cv2, sys, math\n'), ((3121, 3189), 'cv2.VideoWriter', 'cv2.VideoWriter', (['args.save_video', 'fourcc1', '(30.0)', '(f_width, f_height)'], {}), '(args.save_video, fourcc1, 30.0, (f_width, f_height))\n', (3136, 3189), False, 'import os, time, cv2, sys, math\n'), ((3304, 3342), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB'], {}), '(frame, cv2.COLOR_BGR2RGB)\n', (3316, 3342), False, 'import os, time, cv2, sys, math\n'), ((3375, 3410), 'cv2.resize', 'cv2.resize', (['frame_rgb', '(1024, 1024)'], {}), '(frame_rgb, (1024, 1024))\n', (3385, 3410), False, 'import os, time, cv2, sys, math\n'), ((3633, 3667), 'numpy.array', 'np.array', (['output_image[0, :, :, :]'], {}), '(output_image[0, :, :, :])\n', (3641, 3667), True, 'import numpy as np\n'), ((3692, 3724), 'numpy.argmax', 'np.argmax', (['output_image'], {'axis': '(-1)'}), '(output_image, axis=-1)\n', (3701, 3724), True, 'import numpy as np\n'), ((3750, 3788), 'numpy.array', 'np.array', (['[[255, 255, 255], [0, 0, 0]]'], {}), '([[255, 255, 255], [0, 0, 0]])\n', (3758, 3788), True, 'import numpy as np\n'), ((4129, 4145), 'numpy.zeros', 'np.zeros', (['(8, 2)'], {}), '((8, 2))\n', (4137, 4145), True, 'import numpy as np\n'), ((4163, 4179), 'numpy.zeros', 'np.zeros', (['(4, 2)'], {}), '((4, 2))\n', (4171, 4179), True, 'import numpy as np\n'), ((851, 866), 'numpy.ones', 'np.ones', (['(4, 1)'], {}), '((4, 1))\n', (858, 866), True, 'import numpy as np\n'), ((3876, 3901), 'numpy.float32', 'np.float32', (['out_vis_image'], {}), '(out_vis_image)\n', (3886, 3901), True, 'import numpy as np\n'), ((4227, 4247), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (4242, 4247), False, 'import os, time, cv2, sys, math\n'), ((4321, 4340), 'cv2.convexHull', 'cv2.convexHull', (['cnt'], {}), '(cnt)\n', (4335, 4340), False, 'import os, time, cv2, sys, math\n'), ((5789, 5839), 'numpy.array', 'np.array', (['[[0, 0], [399, 0], [399, 399], [0, 399]]'], {}), '([[0, 0], [399, 0], [399, 399], [0, 399]])\n', (5797, 5839), True, 'import numpy as np\n'), ((5857, 5902), 'cv2.findHomography', 'cv2.findHomography', (['corners_full_res', 'pts_dst'], {}), '(corners_full_res, pts_dst)\n', (5875, 5902), False, 'import os, time, cv2, sys, math\n'), ((7611, 7637), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'frame'], {}), "('frame', frame)\n", (7621, 7637), False, 'import os, time, cv2, sys, math\n'), ((7743, 7757), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (7754, 7757), False, 'import os, time, cv2, sys, math\n'), ((3456, 3488), 'numpy.float32', 'np.float32', (['detector_image_input'], {}), '(detector_image_input)\n', (3466, 3488), True, 'import numpy as np\n'), ((5119, 5152), 'numpy.roll', 'np.roll', (['extrema'], {'shift': '(1)', 'axis': '(0)'}), '(extrema, shift=1, axis=0)\n', (5126, 5152), True, 'import numpy as np\n'), ((5948, 5993), 'cv2.warpPerspective', 'cv2.warpPerspective', (['frame_rgb', 'h', '(400, 400)'], {}), '(frame_rgb, h, (400, 400))\n', (5967, 5993), False, 'import os, time, cv2, sys, math\n'), ((6189, 6216), 'numpy.rot90', 'np.rot90', (['w_im', 'im_rotation'], {}), '(w_im, im_rotation)\n', (6197, 6216), True, 'import numpy as np\n'), ((7693, 7730), 'cv2.imshow', 'cv2.imshow', (['"""detector_mask"""', 'mask_vis'], {}), "('detector_mask', mask_vis)\n", (7703, 7730), False, 'import os, time, cv2, sys, math\n'), ((1104, 1126), 'numpy.roll', 'np.roll', (['poly[:, 1]', '(1)'], {}), '(poly[:, 1], 1)\n', (1111, 1126), True, 'import numpy as np\n'), ((1143, 1165), 'numpy.roll', 'np.roll', (['poly[:, 0]', '(1)'], {}), '(poly[:, 0], 1)\n', (1150, 1165), True, 'import numpy as np\n'), ((4047, 4088), 'cv2.cvtColor', 'cv2.cvtColor', (['mask_im', 'cv2.COLOR_BGR2GRAY'], {}), '(mask_im, cv2.COLOR_BGR2GRAY)\n', (4059, 4088), False, 'import os, time, cv2, sys, math\n'), ((4477, 4496), 'numpy.int32', 'np.int32', (['[corners]'], {}), '([corners])\n', (4485, 4496), True, 'import numpy as np\n'), ((4580, 4604), 'numpy.argmax', 'np.argmax', (['hull[:, 0, 0]'], {}), '(hull[:, 0, 0])\n', (4589, 4604), True, 'import numpy as np\n'), ((4640, 4680), 'numpy.argmax', 'np.argmax', (['(hull[:, 0, 0] + hull[:, 0, 1])'], {}), '(hull[:, 0, 0] + hull[:, 0, 1])\n', (4649, 4680), True, 'import numpy as np\n'), ((4712, 4736), 'numpy.argmax', 'np.argmax', (['hull[:, 0, 1]'], {}), '(hull[:, 0, 1])\n', (4721, 4736), True, 'import numpy as np\n'), ((4772, 4813), 'numpy.argmax', 'np.argmax', (['(-hull[:, 0, 0] + hull[:, 0, 1])'], {}), '(-hull[:, 0, 0] + hull[:, 0, 1])\n', (4781, 4813), True, 'import numpy as np\n'), ((4845, 4870), 'numpy.argmax', 'np.argmax', (['(-hull[:, 0, 0])'], {}), '(-hull[:, 0, 0])\n', (4854, 4870), True, 'import numpy as np\n'), ((4906, 4947), 'numpy.argmax', 'np.argmax', (['(-hull[:, 0, 0] - hull[:, 0, 1])'], {}), '(-hull[:, 0, 0] - hull[:, 0, 1])\n', (4915, 4947), True, 'import numpy as np\n'), ((4979, 5004), 'numpy.argmax', 'np.argmax', (['(-hull[:, 0, 1])'], {}), '(-hull[:, 0, 1])\n', (4988, 5004), True, 'import numpy as np\n'), ((5040, 5080), 'numpy.argmax', 'np.argmax', (['(hull[:, 0, 0] - hull[:, 0, 1])'], {}), '(hull[:, 0, 0] - hull[:, 0, 1])\n', (5049, 5080), True, 'import numpy as np\n'), ((6611, 6625), 'numpy.sum', 'np.sum', (['footer'], {}), '(footer)\n', (6617, 6625), True, 'import numpy as np\n'), ((7262, 7281), 'numpy.int32', 'np.int32', (['[corners]'], {}), '([corners])\n', (7270, 7281), True, 'import numpy as np\n'), ((7446, 7464), 'numpy.array', 'np.array', (['[0, -15]'], {}), '([0, -15])\n', (7454, 7464), True, 'import numpy as np\n')] |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""ncf export file"""
import numpy as np
from mindspore import Tensor, context, load_checkpoint, load_param_into_net, export
import src.constants as rconst
from utils.config import config
from ncf import NCFModel, PredictWithSigmoid
context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target)
if config.device_target == "Ascend":
context.set_context(device_id=config.device_id)
if __name__ == "__main__":
topk = rconst.TOP_K
num_eval_neg = rconst.NUM_EVAL_NEGATIVES
if config.dataset == "ml-1m":
num_eval_users = 6040
num_eval_items = 3706
elif config.dataset == "ml-20m":
num_eval_users = 138493
num_eval_items = 26744
else:
raise ValueError("not supported dataset")
ncf_net = NCFModel(num_users=num_eval_users,
num_items=num_eval_items,
num_factors=config.num_factors,
model_layers=config.layers,
mf_regularization=0,
mlp_reg_layers=[0.0, 0.0, 0.0, 0.0],
mf_dim=16)
param_dict = load_checkpoint(config.ckpt_file)
load_param_into_net(ncf_net, param_dict)
network = PredictWithSigmoid(ncf_net, topk, num_eval_neg)
users = Tensor(np.zeros([config.eval_batch_size, 1]).astype(np.int32))
items = Tensor(np.zeros([config.eval_batch_size, 1]).astype(np.int32))
masks = Tensor(np.zeros([config.eval_batch_size, 1]).astype(np.float32))
input_data = [users, items, masks]
export(network, *input_data, file_name=config.file_name, file_format=config.file_format)
| [
"mindspore.export",
"mindspore.context.set_context",
"numpy.zeros",
"mindspore.load_checkpoint",
"mindspore.load_param_into_net",
"ncf.NCFModel",
"ncf.PredictWithSigmoid"
] | [((903, 988), 'mindspore.context.set_context', 'context.set_context', ([], {'mode': 'context.GRAPH_MODE', 'device_target': 'config.device_target'}), '(mode=context.GRAPH_MODE, device_target=config.device_target\n )\n', (922, 988), False, 'from mindspore import Tensor, context, load_checkpoint, load_param_into_net, export\n'), ((1025, 1072), 'mindspore.context.set_context', 'context.set_context', ([], {'device_id': 'config.device_id'}), '(device_id=config.device_id)\n', (1044, 1072), False, 'from mindspore import Tensor, context, load_checkpoint, load_param_into_net, export\n'), ((1440, 1638), 'ncf.NCFModel', 'NCFModel', ([], {'num_users': 'num_eval_users', 'num_items': 'num_eval_items', 'num_factors': 'config.num_factors', 'model_layers': 'config.layers', 'mf_regularization': '(0)', 'mlp_reg_layers': '[0.0, 0.0, 0.0, 0.0]', 'mf_dim': '(16)'}), '(num_users=num_eval_users, num_items=num_eval_items, num_factors=\n config.num_factors, model_layers=config.layers, mf_regularization=0,\n mlp_reg_layers=[0.0, 0.0, 0.0, 0.0], mf_dim=16)\n', (1448, 1638), False, 'from ncf import NCFModel, PredictWithSigmoid\n'), ((1786, 1819), 'mindspore.load_checkpoint', 'load_checkpoint', (['config.ckpt_file'], {}), '(config.ckpt_file)\n', (1801, 1819), False, 'from mindspore import Tensor, context, load_checkpoint, load_param_into_net, export\n'), ((1824, 1864), 'mindspore.load_param_into_net', 'load_param_into_net', (['ncf_net', 'param_dict'], {}), '(ncf_net, param_dict)\n', (1843, 1864), False, 'from mindspore import Tensor, context, load_checkpoint, load_param_into_net, export\n'), ((1880, 1927), 'ncf.PredictWithSigmoid', 'PredictWithSigmoid', (['ncf_net', 'topk', 'num_eval_neg'], {}), '(ncf_net, topk, num_eval_neg)\n', (1898, 1927), False, 'from ncf import NCFModel, PredictWithSigmoid\n'), ((2200, 2293), 'mindspore.export', 'export', (['network', '*input_data'], {'file_name': 'config.file_name', 'file_format': 'config.file_format'}), '(network, *input_data, file_name=config.file_name, file_format=config\n .file_format)\n', (2206, 2293), False, 'from mindspore import Tensor, context, load_checkpoint, load_param_into_net, export\n'), ((1948, 1985), 'numpy.zeros', 'np.zeros', (['[config.eval_batch_size, 1]'], {}), '([config.eval_batch_size, 1])\n', (1956, 1985), True, 'import numpy as np\n'), ((2023, 2060), 'numpy.zeros', 'np.zeros', (['[config.eval_batch_size, 1]'], {}), '([config.eval_batch_size, 1])\n', (2031, 2060), True, 'import numpy as np\n'), ((2098, 2135), 'numpy.zeros', 'np.zeros', (['[config.eval_batch_size, 1]'], {}), '([config.eval_batch_size, 1])\n', (2106, 2135), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
# from scipy import signal
from matplotlib import animation
# import scipy.constants as con
from IPython.display import HTML
from tqdm import tqdm
# import matplotlib.cm as cm
c = 1
def resonator_modes(t, z, n_modes=3, random_phases=False, plot=True,
figuresize=(10, 4), spectrum_std=1000, save_in=""):
# length of the resonator
L = z.max() - z.min()
# calculate the frequency difference between two neighbouring modes of
# the resonator
delta_nu = c / (2 * L)
frequencies = np.array([delta_nu * i for i in range(1, n_modes+1)])
phases = np.zeros(n_modes)
if random_phases is True:
phases = np.random.uniform(0, 200, n_modes)
# spectrum = signal.gaussian(n_modes, std=spectrum_std)
spectrum = np.ones(n_modes)
if plot is True:
fig, axs = plt.subplots(2, 1, figsize=figuresize, dpi=100, frameon=False)
axs[0].axis('off')
axs[1].axis('off')
axs.flatten()
axs[0].set_xlim(z.min(), z.max())
axs[1].set_xlim(z.min(), z.max())
# axs[2].plot(frequencies, spectrum)
# calculate the sum...
E_i = np.zeros([n_modes, len(z)])
for i in range(n_modes):
omega = 2 * np.pi * frequencies[i]
k = omega / c
E_i[i, :] = spectrum[i] * np.sin(2 * omega * t - phases[i]) * np.sin(k * z)
if plot is True:
fig_2, ax2 = plt.subplots(figsize=(10, 2), dpi=100, frameon=False)
ax2.set_ylim(-1.1, 1.1)
ax2.axis('off')
ax2.plot(z, E_i[i])
axs[0].plot(z, E_i[i], label=str(i))
if save_in != "":
fig_2.savefig(save_in+"_mode_"+str(i)+".pdf")
plt.close()
else:
pass
if plot is True:
E_total = np.sum(E_i, axis=0)
maximum = np.max(np.abs(E_total))
axs[1].set_ylim(- 1.2 * maximum, 1.2 * maximum)
# axs[0].legend()
axs[1].plot(z, E_total)
fig_3, ax3 = plt.subplots(figsize=(10, 2), dpi=100, frameon=False)
ax3.axis('off')
ax3.plot(z, E_total)
if save_in != "":
fig.savefig(save_in+"_both.pdf")
fig_3.savefig(save_in+"_sum.pdf")
plt.close()
else:
pass
return E_i
def animate_resonator(z, times, n_modes, ms_between_frames=60, figuresize=(11, 4), saveas=""):
"""Animates the time evolution of the wave packet
Parameters
----------
z : array_like
Array of the z-axis your wave packet is propagating on.
times : array_like
Times you want to include in the animation.
n_modes: int
Number of modes included in the calculation.
ms_between_frames : int, optional
Milliseconds of pause between two frames in the animation. Default
is 30.
figuresize : tuple of ints, optional
Size of the figure when plotting the wave. Default is (11, 4).
saveas : string, optional
Path where you want to save the animation as .gif-file.
"""
modes = [resonator_modes(t, z, n_modes, plot=False) for t in tqdm(times)]
pulses = [E_i.sum(axis=0) for E_i in tqdm(modes)]
fig, ax = plt.subplots(figsize=figuresize)
ax.set_xlim(z.min(), z.max())
maximum = np.max(np.abs(np.array(pulses)))
ax.set_ylim(-1.2 * maximum, 1.2 * maximum)
ax.set_xlabel(r"position $z$")
lines = [ax.plot([], [], color="forestgreen")[0]
for i in pulses]
def init():
for line in lines:
line.set_data([], [])
return lines
def animate(i):
for j in range(len(lines)):
lines[j].set_data(z, pulses[i])
return lines
plt.close()
anim = animation.FuncAnimation(fig, animate, init_func=init, blit=True,
frames=len(pulses),
interval=ms_between_frames)
if saveas != "":
anim.save(saveas, writer='imagemagick', fps=int(1000/ms_between_frames))
return HTML(anim.to_html5_video())
| [
"numpy.abs",
"numpy.ones",
"tqdm.tqdm",
"matplotlib.pyplot.close",
"numpy.sum",
"numpy.zeros",
"numpy.array",
"numpy.random.uniform",
"numpy.sin",
"matplotlib.pyplot.subplots"
] | [((643, 660), 'numpy.zeros', 'np.zeros', (['n_modes'], {}), '(n_modes)\n', (651, 660), True, 'import numpy as np\n'), ((819, 835), 'numpy.ones', 'np.ones', (['n_modes'], {}), '(n_modes)\n', (826, 835), True, 'import numpy as np\n'), ((3236, 3268), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'figuresize'}), '(figsize=figuresize)\n', (3248, 3268), True, 'import matplotlib.pyplot as plt\n'), ((3744, 3755), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3753, 3755), True, 'import matplotlib.pyplot as plt\n'), ((708, 742), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(200)', 'n_modes'], {}), '(0, 200, n_modes)\n', (725, 742), True, 'import numpy as np\n'), ((877, 939), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': 'figuresize', 'dpi': '(100)', 'frameon': '(False)'}), '(2, 1, figsize=figuresize, dpi=100, frameon=False)\n', (889, 939), True, 'import matplotlib.pyplot as plt\n'), ((1840, 1859), 'numpy.sum', 'np.sum', (['E_i'], {'axis': '(0)'}), '(E_i, axis=0)\n', (1846, 1859), True, 'import numpy as np\n'), ((2038, 2091), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 2)', 'dpi': '(100)', 'frameon': '(False)'}), '(figsize=(10, 2), dpi=100, frameon=False)\n', (2050, 2091), True, 'import matplotlib.pyplot as plt\n'), ((1376, 1389), 'numpy.sin', 'np.sin', (['(k * z)'], {}), '(k * z)\n', (1382, 1389), True, 'import numpy as np\n'), ((1441, 1494), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 2)', 'dpi': '(100)', 'frameon': '(False)'}), '(figsize=(10, 2), dpi=100, frameon=False)\n', (1453, 1494), True, 'import matplotlib.pyplot as plt\n'), ((1885, 1900), 'numpy.abs', 'np.abs', (['E_total'], {}), '(E_total)\n', (1891, 1900), True, 'import numpy as np\n'), ((2274, 2285), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2283, 2285), True, 'import matplotlib.pyplot as plt\n'), ((3154, 3165), 'tqdm.tqdm', 'tqdm', (['times'], {}), '(times)\n', (3158, 3165), False, 'from tqdm import tqdm\n'), ((3208, 3219), 'tqdm.tqdm', 'tqdm', (['modes'], {}), '(modes)\n', (3212, 3219), False, 'from tqdm import tqdm\n'), ((3332, 3348), 'numpy.array', 'np.array', (['pulses'], {}), '(pulses)\n', (3340, 3348), True, 'import numpy as np\n'), ((1340, 1373), 'numpy.sin', 'np.sin', (['(2 * omega * t - phases[i])'], {}), '(2 * omega * t - phases[i])\n', (1346, 1373), True, 'import numpy as np\n'), ((1749, 1760), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1758, 1760), True, 'import matplotlib.pyplot as plt\n')] |
import unittest, numpy as np
from rubix2by2 import *
class BasicRotationTest(unittest.TestCase):
def setUp(self):
self.sol = np.array(range(24))
def test_forward_rotation(self):
F_result = np.array([0, 12, 2, 13, 4, 5, 3, 1, 10, 8, 11, 9, 18, 16, 14, 15, 6, 17, 7, 19, 20, 21, 22, 23])
F = generate_basic_moveset()[0]
self.assertTrue((F_result == F.dot(self.sol)).all())
def test_right_rotation(self):
R_result = np.array([0, 1, 2, 3, 4, 9, 6, 11, 8, 13, 10, 15, 12, 22, 14, 20, 18, 16, 19, 17, 7, 21, 5, 23])
R = generate_basic_moveset()[1]
self.assertTrue((R_result == R.dot(self.sol)).all())
def test_down_rotation(self):
D_result = np.array([0, 1, 22, 23, 4, 5, 6, 7, 8, 9, 2, 3, 14, 12, 15, 13, 16, 17, 10, 11, 20, 21, 18, 19])
D = generate_basic_moveset()[2]
self.assertTrue((D_result == D.dot(self.sol)).all())
class InvertedRotationTest(unittest.TestCase):
def setUp(self):
self.eye = np.eye(24)
def test_forward_inv_rotation(self):
Fi = generate_quarter_moveset()[3]
F2 = generate_half_moveset()[6]
self.assertTrue((Fi.dot(Fi.dot(F2)) == self.eye).all())
self.assertTrue((Fi.dot(F2.dot(Fi)) == self.eye).all())
self.assertTrue((F2.dot(Fi.dot(Fi)) == self.eye).all())
def test_right_inv_rotation(self):
Ri = generate_quarter_moveset()[4]
R2 = generate_half_moveset()[7]
self.assertTrue((Ri.dot(Ri.dot(R2)) == self.eye).all())
self.assertTrue((Ri.dot(R2.dot(Ri)) == self.eye).all())
self.assertTrue((R2.dot(Ri.dot(Ri)) == self.eye).all())
def test_down_inv_rotation(self):
Di = generate_quarter_moveset()[5]
D2 = generate_half_moveset()[8]
self.assertTrue((Di.dot(Di.dot(D2)) == self.eye).all())
self.assertTrue((Di.dot(D2.dot(Di)) == self.eye).all())
self.assertTrue((D2.dot(Di.dot(Di)) == self.eye).all())
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"numpy.array",
"numpy.eye"
] | [((1902, 1917), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1915, 1917), False, 'import unittest, numpy as np\n'), ((210, 310), 'numpy.array', 'np.array', (['[0, 12, 2, 13, 4, 5, 3, 1, 10, 8, 11, 9, 18, 16, 14, 15, 6, 17, 7, 19, 20, \n 21, 22, 23]'], {}), '([0, 12, 2, 13, 4, 5, 3, 1, 10, 8, 11, 9, 18, 16, 14, 15, 6, 17, 7,\n 19, 20, 21, 22, 23])\n', (218, 310), True, 'import unittest, numpy as np\n'), ((451, 552), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 9, 6, 11, 8, 13, 10, 15, 12, 22, 14, 20, 18, 16, 19, 17, 7,\n 21, 5, 23]'], {}), '([0, 1, 2, 3, 4, 9, 6, 11, 8, 13, 10, 15, 12, 22, 14, 20, 18, 16, \n 19, 17, 7, 21, 5, 23])\n', (459, 552), True, 'import unittest, numpy as np\n'), ((691, 791), 'numpy.array', 'np.array', (['[0, 1, 22, 23, 4, 5, 6, 7, 8, 9, 2, 3, 14, 12, 15, 13, 16, 17, 10, 11, 20, \n 21, 18, 19]'], {}), '([0, 1, 22, 23, 4, 5, 6, 7, 8, 9, 2, 3, 14, 12, 15, 13, 16, 17, 10,\n 11, 20, 21, 18, 19])\n', (699, 791), True, 'import unittest, numpy as np\n'), ((965, 975), 'numpy.eye', 'np.eye', (['(24)'], {}), '(24)\n', (971, 975), True, 'import unittest, numpy as np\n')] |
# ReLu function
import numpy as np
def relu(z):
"""
Implements the relu function
Parameters:
vector (np.array,list,tuple): A numpy array of shape (1,n)
consisting of real values or a similar list,tuple
Returns:
relu_vec (np.array): The input numpy array, after applying
relu.
"""
# compare two arrays and then return element-wise maxima
return np.maximum(0,z)
| [
"numpy.maximum"
] | [((416, 432), 'numpy.maximum', 'np.maximum', (['(0)', 'z'], {}), '(0, z)\n', (426, 432), True, 'import numpy as np\n')] |
import pytest
from pyha import Hardware, Sfix, Complex, simulate, sims_close
import numpy as np
from pyha.cores import Cordic, CordicMode
class NCO(Hardware):
"""
Baseband signal generator. Integrated phase accumulator.
"""
def __init__(self, cordic_iterations=14):
"""
:param cordic_iterations:
"""
self.cordic = Cordic(cordic_iterations, CordicMode.ROTATION)
self.phase_acc = Sfix(0, 0, -17, wrap_is_ok=True)
self.out = Complex(0, 0, -17, overflow_style='saturate')
self.DELAY = self.cordic.ITERATIONS + 1 + 1
self.INIT_X = 1.0 / 1.646760 # gets rid of cordic gain, could use for amplitude modulation
def main(self, phase_inc):
"""
:param phase_inc: amount of rotation applied for next clock cycle, must be normalized to -1 to 1.
:rtype: Complex
"""
self.phase_acc = self.phase_acc + phase_inc
start_x = self.INIT_X
start_y = Sfix(0.0, 0, -17)
x, y, phase = self.cordic.main(start_x, start_y, self.phase_acc)
self.out = Complex(x, y)
return self.out
def model(self, phase_list):
p = np.cumsum(np.array(phase_list) * np.pi)
return np.exp(p * 1j)
def test_basic():
inputs = [0.01] * 4
expect = [np.exp(0.01j * np.pi), np.exp(0.02j * np.pi), np.exp(0.03j * np.pi), np.exp(0.04j * np.pi)]
dut = NCO()
sim_out = simulate(dut, inputs)
assert sims_close(sim_out, expect, rtol=1e-2, atol=1e-4)
@pytest.mark.parametrize('period', [0.25, 0.50, 0.75, 1, 2, 4])
def test_nco(period):
fs = 1024
freq = 200
phase_inc = 2 * np.pi * freq / fs
phase_cumsum = np.arange(0, period * fs * phase_inc, phase_inc)
input_signal = np.diff(phase_cumsum) / np.pi
dut = NCO()
sims = ['MODEL', 'HARDWARE', 'RTL']
if period == 1:
sims.append('NETLIST')
sim_out = simulate(dut, input_signal, simulations=sims)
assert sims_close(sim_out, rtol=1e-2, atol=1e-4)
| [
"pyha.Sfix",
"numpy.diff",
"pyha.Complex",
"numpy.exp",
"pytest.mark.parametrize",
"pyha.cores.Cordic",
"numpy.array",
"pyha.simulate",
"pyha.sims_close",
"numpy.arange"
] | [((1509, 1570), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""period"""', '[0.25, 0.5, 0.75, 1, 2, 4]'], {}), "('period', [0.25, 0.5, 0.75, 1, 2, 4])\n", (1532, 1570), False, 'import pytest\n'), ((1423, 1444), 'pyha.simulate', 'simulate', (['dut', 'inputs'], {}), '(dut, inputs)\n', (1431, 1444), False, 'from pyha import Hardware, Sfix, Complex, simulate, sims_close\n'), ((1456, 1507), 'pyha.sims_close', 'sims_close', (['sim_out', 'expect'], {'rtol': '(0.01)', 'atol': '(0.0001)'}), '(sim_out, expect, rtol=0.01, atol=0.0001)\n', (1466, 1507), False, 'from pyha import Hardware, Sfix, Complex, simulate, sims_close\n'), ((1680, 1728), 'numpy.arange', 'np.arange', (['(0)', '(period * fs * phase_inc)', 'phase_inc'], {}), '(0, period * fs * phase_inc, phase_inc)\n', (1689, 1728), True, 'import numpy as np\n'), ((1902, 1947), 'pyha.simulate', 'simulate', (['dut', 'input_signal'], {'simulations': 'sims'}), '(dut, input_signal, simulations=sims)\n', (1910, 1947), False, 'from pyha import Hardware, Sfix, Complex, simulate, sims_close\n'), ((1959, 2002), 'pyha.sims_close', 'sims_close', (['sim_out'], {'rtol': '(0.01)', 'atol': '(0.0001)'}), '(sim_out, rtol=0.01, atol=0.0001)\n', (1969, 2002), False, 'from pyha import Hardware, Sfix, Complex, simulate, sims_close\n'), ((367, 413), 'pyha.cores.Cordic', 'Cordic', (['cordic_iterations', 'CordicMode.ROTATION'], {}), '(cordic_iterations, CordicMode.ROTATION)\n', (373, 413), False, 'from pyha.cores import Cordic, CordicMode\n'), ((439, 471), 'pyha.Sfix', 'Sfix', (['(0)', '(0)', '(-17)'], {'wrap_is_ok': '(True)'}), '(0, 0, -17, wrap_is_ok=True)\n', (443, 471), False, 'from pyha import Hardware, Sfix, Complex, simulate, sims_close\n'), ((491, 536), 'pyha.Complex', 'Complex', (['(0)', '(0)', '(-17)'], {'overflow_style': '"""saturate"""'}), "(0, 0, -17, overflow_style='saturate')\n", (498, 536), False, 'from pyha import Hardware, Sfix, Complex, simulate, sims_close\n'), ((976, 993), 'pyha.Sfix', 'Sfix', (['(0.0)', '(0)', '(-17)'], {}), '(0.0, 0, -17)\n', (980, 993), False, 'from pyha import Hardware, Sfix, Complex, simulate, sims_close\n'), ((1088, 1101), 'pyha.Complex', 'Complex', (['x', 'y'], {}), '(x, y)\n', (1095, 1101), False, 'from pyha import Hardware, Sfix, Complex, simulate, sims_close\n'), ((1227, 1243), 'numpy.exp', 'np.exp', (['(p * 1.0j)'], {}), '(p * 1.0j)\n', (1233, 1243), True, 'import numpy as np\n'), ((1300, 1321), 'numpy.exp', 'np.exp', (['(0.01j * np.pi)'], {}), '(0.01j * np.pi)\n', (1306, 1321), True, 'import numpy as np\n'), ((1323, 1344), 'numpy.exp', 'np.exp', (['(0.02j * np.pi)'], {}), '(0.02j * np.pi)\n', (1329, 1344), True, 'import numpy as np\n'), ((1346, 1367), 'numpy.exp', 'np.exp', (['(0.03j * np.pi)'], {}), '(0.03j * np.pi)\n', (1352, 1367), True, 'import numpy as np\n'), ((1369, 1390), 'numpy.exp', 'np.exp', (['(0.04j * np.pi)'], {}), '(0.04j * np.pi)\n', (1375, 1390), True, 'import numpy as np\n'), ((1749, 1770), 'numpy.diff', 'np.diff', (['phase_cumsum'], {}), '(phase_cumsum)\n', (1756, 1770), True, 'import numpy as np\n'), ((1182, 1202), 'numpy.array', 'np.array', (['phase_list'], {}), '(phase_list)\n', (1190, 1202), True, 'import numpy as np\n')] |
import os
import torch
import numpy as np
import re
import glob
import torchvision.transforms as transforms
from PIL import Image
from torch.utils import data
# taken from https://github.com/intel-isl/MultiObjectiveOptimization and adapted
class CelebA(data.Dataset):
def __init__(self, split, task_ids=[], root='data/celeba', dim=64, augmentations=None, **kwargs):
"""__init__
:param root:
:param split:
:param is_transform:
:param img_size:
:param augmentations
"""
self.root = root
self.split = split
self.task_ids = task_ids
self.augmentations = augmentations
self.n_classes = 40
self.files = {}
self.labels = {}
assert dim[-1] == dim[-2]
self.transform=transforms.Compose([
transforms.Resize(dim[-1]),
transforms.CenterCrop(dim[-1]),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
self.label_file = self.root+"/Anno/list_attr_celeba.txt"
label_map = {}
with open(self.label_file, 'r') as l_file:
labels = l_file.read().split('\n')[2:-1]
for label_line in labels:
f_name = re.sub('jpg', 'png', label_line.split(' ')[0])
label_txt = list(map(lambda x:int(x), re.sub('-1','0',label_line).split()[1:]))
label_map[f_name]=label_txt
self.all_files = glob.glob(self.root+'/Img/img_align_celeba_png/*.png')
with open(root+'//Eval/list_eval_partition.txt', 'r') as f:
fl = f.read().split('\n')
fl.pop()
if 'train' in self.split:
selected_files = list(filter(lambda x:x.split(' ')[1]=='0', fl))
elif 'val' in self.split:
selected_files = list(filter(lambda x:x.split(' ')[1]=='1', fl))
elif 'test' in self.split:
selected_files = list(filter(lambda x:x.split(' ')[1]=='2', fl))
selected_file_names = list(map(lambda x:re.sub('jpg', 'png', x.split(' ')[0]), selected_files))
base_path = '/'.join(self.all_files[0].split('/')[:-1])
self.files[self.split] = list(map(lambda x: '/'.join([base_path, x]), set(map(lambda x:x.split('/')[-1], self.all_files)).intersection(set(selected_file_names))))
self.labels[self.split] = list(map(lambda x: label_map[x], set(map(lambda x:x.split('/')[-1], self.all_files)).intersection(set(selected_file_names))))
self.class_names = ['5_o_Clock_Shadow', 'Arched_Eyebrows', 'Attractive', 'Bags_Under_Eyes', 'Bald', 'Bangs',
'Big_Lips', 'Big_Nose', 'Black_Hair', 'Blond_Hair', 'Blurry', 'Brown_Hair', 'Bushy_Eyebrows',
'Chubby', 'Double_Chin', 'Eyeglasses', 'Goatee', 'Gray_Hair', 'Heavy_Makeup', 'High_Cheekbones',
'Male', 'Mouth_Slightly_Open', 'Mustache', 'Narrow_Eyes', 'No_Beard', 'Oval_Face', 'Pale_Skin',
'Pointy_Nose', 'Receding_Hairline', 'Rosy_Cheeks', 'Sideburns', 'Smiling', 'Straight_Hair', 'Wavy_Hair',
'Wearing_Earrings', 'Wearing_Hat', 'Wearing_Lipstick', 'Wearing_Necklace', 'Wearing_Necktie', 'Young']
if len(self.files[self.split]) < 2:
raise Exception("No files for split=[%s] found in %s" % (self.split, self.root))
print("Found {} {} images. Defined tasks: {}".format(
len(self.files[self.split]),
self.split,
[self.class_names[i] for i in task_ids] if task_ids else 'all'
))
def __len__(self):
"""__len__"""
return len(self.files[self.split])
def __getitem__(self, index):
"""__getitem__
:param index:
"""
label = self.labels[self.split][index]
label = torch.Tensor(label).long()
img_path = self.files[self.split][index].rstrip()
img = Image.open(img_path)
if self.augmentations is not None:
img = self.augmentations(np.array(img, dtype=np.uint8))
img = self.transform(img)
labels = {'labels_{}'.format(i): label[i] for i in self.task_names()}
return dict(data=img, **labels)
def task_names(self):
return self.task_ids if self.task_ids else range(self.n_classes)
if __name__ == '__main__':
import matplotlib.pyplot as plt
dst = CelebA(split='val', task_ids=[22, 39])
bs = 4
trainloader = data.DataLoader(dst, batch_size=bs, num_workers=0)
for i, data in enumerate(trainloader):
imgs = data['data']
labels = data['labels']
imgs = imgs.numpy()[:, ::-1, :, :]
imgs = np.transpose(imgs, [0,2,3,1])
f, axarr = plt.subplots(bs,4)
for j in range(bs):
axarr[j][0].imshow(imgs[j])
plt.show()
a = input()
if a == 'ex':
break
else:
plt.close()
| [
"torchvision.transforms.CenterCrop",
"PIL.Image.open",
"torch.Tensor",
"matplotlib.pyplot.close",
"numpy.array",
"torchvision.transforms.Normalize",
"torch.utils.data.DataLoader",
"torchvision.transforms.Resize",
"re.sub",
"torchvision.transforms.ToTensor",
"numpy.transpose",
"matplotlib.pyplo... | [((4545, 4595), 'torch.utils.data.DataLoader', 'data.DataLoader', (['dst'], {'batch_size': 'bs', 'num_workers': '(0)'}), '(dst, batch_size=bs, num_workers=0)\n', (4560, 4595), False, 'from torch.utils import data\n'), ((1470, 1526), 'glob.glob', 'glob.glob', (["(self.root + '/Img/img_align_celeba_png/*.png')"], {}), "(self.root + '/Img/img_align_celeba_png/*.png')\n", (1479, 1526), False, 'import glob\n'), ((4009, 4029), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (4019, 4029), False, 'from PIL import Image\n'), ((4758, 4790), 'numpy.transpose', 'np.transpose', (['imgs', '[0, 2, 3, 1]'], {}), '(imgs, [0, 2, 3, 1])\n', (4770, 4790), True, 'import numpy as np\n'), ((4808, 4827), 'matplotlib.pyplot.subplots', 'plt.subplots', (['bs', '(4)'], {}), '(bs, 4)\n', (4820, 4827), True, 'import matplotlib.pyplot as plt\n'), ((4903, 4913), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4911, 4913), True, 'import matplotlib.pyplot as plt\n'), ((5000, 5011), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5009, 5011), True, 'import matplotlib.pyplot as plt\n'), ((831, 857), 'torchvision.transforms.Resize', 'transforms.Resize', (['dim[-1]'], {}), '(dim[-1])\n', (848, 857), True, 'import torchvision.transforms as transforms\n'), ((871, 901), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['dim[-1]'], {}), '(dim[-1])\n', (892, 901), True, 'import torchvision.transforms as transforms\n'), ((915, 936), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (934, 936), True, 'import torchvision.transforms as transforms\n'), ((950, 1004), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (970, 1004), True, 'import torchvision.transforms as transforms\n'), ((3909, 3928), 'torch.Tensor', 'torch.Tensor', (['label'], {}), '(label)\n', (3921, 3928), False, 'import torch\n'), ((4111, 4140), 'numpy.array', 'np.array', (['img'], {'dtype': 'np.uint8'}), '(img, dtype=np.uint8)\n', (4119, 4140), True, 'import numpy as np\n'), ((1362, 1391), 're.sub', 're.sub', (['"""-1"""', '"""0"""', 'label_line'], {}), "('-1', '0', label_line)\n", (1368, 1391), False, 'import re\n')] |
import unittest
import numpy as np
from neet.boolean.examples import mouse_cortical_7B
from neet.boolean.randomnet import (random_logic, random_binary_states,)
TESTSEED = 314159
class TestRandomnet(unittest.TestCase):
def test_random_logic_invalid_p(self):
"""
``random_logic`` should raise a value error if ``p`` is an
incorrect size
"""
with self.assertRaises(ValueError):
net = mouse_cortical_7B
random_logic(net, p=np.ones(net.size + 1))
def test_random_binary_states(self):
self.assertEqual(8, len(random_binary_states(4, 0.5)))
self.assertTrue(len(random_binary_states(3, 0.4)) in (3, 4))
def test_random_logic_fixed_structure(self):
net = mouse_cortical_7B
np.random.seed(TESTSEED)
randnet = random_logic(net, connections='fixed-structure')
# fixed-structure should preserve all neighbors
for i in range(net.size):
self.assertEqual(net.neighbors_in(i), randnet.neighbors_in(i))
def test_random_logic_fixed_in_degree(self):
net = mouse_cortical_7B
np.random.seed(TESTSEED)
randnet = random_logic(net, connections='fixed-in-degree')
# fixed-in-degree should preserve each node's in degree
for i in range(net.size):
self.assertEqual(len(net.neighbors_in(i)),
len(randnet.neighbors_in(i)))
def test_random_logic_fixed_mean_degree(self):
net = mouse_cortical_7B
np.random.seed(TESTSEED)
randnet = random_logic(net, connections='fixed-mean-degree')
# fixed-mean-degree should preserve the total number of edges
numedges = np.sum([len(net.neighbors_in(i)) for i in range(net.size)])
randnumedges = np.sum([len(randnet.neighbors_in(i))
for i in range(randnet.size)])
self.assertEqual(numedges, randnumedges)
| [
"neet.boolean.randomnet.random_binary_states",
"neet.boolean.randomnet.random_logic",
"numpy.ones",
"numpy.random.seed"
] | [((777, 801), 'numpy.random.seed', 'np.random.seed', (['TESTSEED'], {}), '(TESTSEED)\n', (791, 801), True, 'import numpy as np\n'), ((820, 868), 'neet.boolean.randomnet.random_logic', 'random_logic', (['net'], {'connections': '"""fixed-structure"""'}), "(net, connections='fixed-structure')\n", (832, 868), False, 'from neet.boolean.randomnet import random_logic, random_binary_states\n'), ((1124, 1148), 'numpy.random.seed', 'np.random.seed', (['TESTSEED'], {}), '(TESTSEED)\n', (1138, 1148), True, 'import numpy as np\n'), ((1167, 1215), 'neet.boolean.randomnet.random_logic', 'random_logic', (['net'], {'connections': '"""fixed-in-degree"""'}), "(net, connections='fixed-in-degree')\n", (1179, 1215), False, 'from neet.boolean.randomnet import random_logic, random_binary_states\n'), ((1520, 1544), 'numpy.random.seed', 'np.random.seed', (['TESTSEED'], {}), '(TESTSEED)\n', (1534, 1544), True, 'import numpy as np\n'), ((1563, 1613), 'neet.boolean.randomnet.random_logic', 'random_logic', (['net'], {'connections': '"""fixed-mean-degree"""'}), "(net, connections='fixed-mean-degree')\n", (1575, 1613), False, 'from neet.boolean.randomnet import random_logic, random_binary_states\n'), ((587, 615), 'neet.boolean.randomnet.random_binary_states', 'random_binary_states', (['(4)', '(0.5)'], {}), '(4, 0.5)\n', (607, 615), False, 'from neet.boolean.randomnet import random_logic, random_binary_states\n'), ((490, 511), 'numpy.ones', 'np.ones', (['(net.size + 1)'], {}), '(net.size + 1)\n', (497, 511), True, 'import numpy as np\n'), ((646, 674), 'neet.boolean.randomnet.random_binary_states', 'random_binary_states', (['(3)', '(0.4)'], {}), '(3, 0.4)\n', (666, 674), False, 'from neet.boolean.randomnet import random_logic, random_binary_states\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
get_ipython().run_line_magic('matplotlib', 'inline')
import numpy as np
import matplotlib.pyplot as plt
# In[ ]:
x = np.linspace(0, 2*np.pi, 1000)
# In[ ]:
y = 5.5 * np.cos(2*x) + 5.5
z = 0.02 * np.exp(x)
w = 0.25 * x**2 + 0.1* np.sin(10*x)
fig = plt.figure(figsize=(6,6))
#The functions that each line on the graph represents
plt.plot(x, y, label=r'$y(x) = \5.5cos(2*x) + 5.5$')
plt.plot(x, z, label=r'$y(x) = \0.02 * exp(x)$')
plt.plot(x, w, label=r'$y(x) = \0.25 * x^2 + 0.1sin(10*x)$')
#X and Y axes labels
plt.xlabel('Time Spent in ASTR-119')
plt.ylabel('Measure of Pure Awesomeness')
#X and Y ranges
plt.xlim([0,2*np.pi])
plt.ylim([-1,10.])
# In[ ]:
| [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.exp",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.cos",
"numpy.sin",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim"
] | [((171, 202), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(1000)'], {}), '(0, 2 * np.pi, 1000)\n', (182, 202), True, 'import numpy as np\n'), ((306, 332), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (316, 332), True, 'import matplotlib.pyplot as plt\n'), ((387, 439), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'label': '"""$y(x) = \\\\5.5cos(2*x) + 5.5$"""'}), "(x, y, label='$y(x) = \\\\5.5cos(2*x) + 5.5$')\n", (395, 439), True, 'import matplotlib.pyplot as plt\n'), ((440, 488), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'z'], {'label': '"""$y(x) = \\\\0.02 * exp(x)$"""'}), "(x, z, label='$y(x) = \\\\0.02 * exp(x)$')\n", (448, 488), True, 'import matplotlib.pyplot as plt\n'), ((489, 549), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'w'], {'label': '"""$y(x) = \\\\0.25 * x^2 + 0.1sin(10*x)$"""'}), "(x, w, label='$y(x) = \\\\0.25 * x^2 + 0.1sin(10*x)$')\n", (497, 549), True, 'import matplotlib.pyplot as plt\n'), ((573, 609), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time Spent in ASTR-119"""'], {}), "('Time Spent in ASTR-119')\n", (583, 609), True, 'import matplotlib.pyplot as plt\n'), ((610, 651), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Measure of Pure Awesomeness"""'], {}), "('Measure of Pure Awesomeness')\n", (620, 651), True, 'import matplotlib.pyplot as plt\n'), ((669, 693), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 2 * np.pi]'], {}), '([0, 2 * np.pi])\n', (677, 693), True, 'import matplotlib.pyplot as plt\n'), ((691, 711), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-1, 10.0]'], {}), '([-1, 10.0])\n', (699, 711), True, 'import matplotlib.pyplot as plt\n'), ((253, 262), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (259, 262), True, 'import numpy as np\n'), ((224, 237), 'numpy.cos', 'np.cos', (['(2 * x)'], {}), '(2 * x)\n', (230, 237), True, 'import numpy as np\n'), ((286, 300), 'numpy.sin', 'np.sin', (['(10 * x)'], {}), '(10 * x)\n', (292, 300), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import cv2
import numpy as np
import tensorflow as tf
def get_mesh(model_path='models/model.tflite'):
# Load the model
interpreter = tf.lite.Interpreter(model_path=model_path)
# Set model input
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# Preprocess the image before sending to the network.
return interpreter, input_details, output_details
def get_square_box(box):
# Get a square box out of the given box, by expanding it
left_x = box[0]
top_y = box[1]
right_x = box[2]
bottom_y = box[3]
box_width = right_x - left_x
box_height = bottom_y - top_y
# Check if box is already a square. If not, make it a square.
diff = box_height - box_width
delta = int(abs(diff) / 2)
if diff == 0: # Already a square.
return box
elif diff > 0: # Height > width, a slim box.
left_x -= delta
right_x += delta
if diff % 2 == 1:
right_x += 1
else: # Width > height, a short box.
top_y -= delta
bottom_y += delta
if diff % 2 == 1:
bottom_y += 1
# Make sure box is always square.
assert ((right_x - left_x) == (bottom_y - top_y)), 'Box is not square.'
return [left_x, top_y, right_x, bottom_y]
def move_box(box, offset):
# Move the box to direction specified by vector offset
left_x = box[0] + offset[0]
top_y = box[1] + offset[1]
right_x = box[2] + offset[0]
bottom_y = box[3] + offset[1]
return [left_x, top_y, right_x, bottom_y]
def detect_marks(img, face):
offset_y = int(abs((face[3] - face[1]) * 0.1))
box_moved = move_box(face, [0, offset_y])
facebox = get_square_box(box_moved)
h, w = img.shape[:2]
if facebox[0] < 0:
facebox[0] = 0
if facebox[1] < 0:
facebox[1] = 0
if facebox[2] > w:
facebox[2] = w
if facebox[3] > h:
facebox[3] = h
try:
face_img = img[facebox[1]: facebox[3],
facebox[0]: facebox[2]]
face_img = cv2.resize(face_img, (128, 128))
face_img = cv2.cvtColor(face_img, cv2.COLOR_BGR2RGB)
image = tf.image.convert_image_dtype(face_img, tf.uint8)
image = np.expand_dims(image, axis=0)
interpreter, input_details, output_details = get_mesh()
# The actual detection.
interpreter.set_tensor(input_details[0]["index"], image)
interpreter.invoke()
# Save the results.
mesh = interpreter.get_tensor(output_details[0]["index"])[
0]
# Convert predictions to landmarks.
marks = np.array(mesh).flatten()[:136]
marks = np.reshape(marks, (-1, 2))
marks *= (facebox[2] - facebox[0])
marks[:, 0] += facebox[0]
marks[:, 1] += facebox[1]
marks = marks.astype(np.uint)
return marks
except Exception as e:
pass
# define a function to draw masks
def draw_marks(image, marks, color=(0, 255, 0)):
for mark in marks:
img = cv2.circle(image, (mark[0], mark[1]), 1, color, -1, cv2.LINE_AA)
return img
def line(img, marks):
img = cv2.drawContours(img, [marks], 0, (255, 255, 255), 1)
return img
def linemain(img, marks):
for index, item in enumerate(marks):
if index == len(marks) - 1:
break
img = cv2.line(img, item, marks[index + 1], [255, 255, 255], 1)
return img
| [
"tensorflow.lite.Interpreter",
"cv2.drawContours",
"tensorflow.image.convert_image_dtype",
"numpy.reshape",
"cv2.line",
"numpy.array",
"cv2.circle",
"cv2.cvtColor",
"numpy.expand_dims",
"cv2.resize"
] | [((169, 211), 'tensorflow.lite.Interpreter', 'tf.lite.Interpreter', ([], {'model_path': 'model_path'}), '(model_path=model_path)\n', (188, 211), True, 'import tensorflow as tf\n'), ((3305, 3358), 'cv2.drawContours', 'cv2.drawContours', (['img', '[marks]', '(0)', '(255, 255, 255)', '(1)'], {}), '(img, [marks], 0, (255, 255, 255), 1)\n', (3321, 3358), False, 'import cv2\n'), ((2204, 2236), 'cv2.resize', 'cv2.resize', (['face_img', '(128, 128)'], {}), '(face_img, (128, 128))\n', (2214, 2236), False, 'import cv2\n'), ((2256, 2297), 'cv2.cvtColor', 'cv2.cvtColor', (['face_img', 'cv2.COLOR_BGR2RGB'], {}), '(face_img, cv2.COLOR_BGR2RGB)\n', (2268, 2297), False, 'import cv2\n'), ((2316, 2364), 'tensorflow.image.convert_image_dtype', 'tf.image.convert_image_dtype', (['face_img', 'tf.uint8'], {}), '(face_img, tf.uint8)\n', (2344, 2364), True, 'import tensorflow as tf\n'), ((2381, 2410), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (2395, 2410), True, 'import numpy as np\n'), ((2820, 2846), 'numpy.reshape', 'np.reshape', (['marks', '(-1, 2)'], {}), '(marks, (-1, 2))\n', (2830, 2846), True, 'import numpy as np\n'), ((3190, 3254), 'cv2.circle', 'cv2.circle', (['image', '(mark[0], mark[1])', '(1)', 'color', '(-1)', 'cv2.LINE_AA'], {}), '(image, (mark[0], mark[1]), 1, color, -1, cv2.LINE_AA)\n', (3200, 3254), False, 'import cv2\n'), ((3511, 3568), 'cv2.line', 'cv2.line', (['img', 'item', 'marks[index + 1]', '[255, 255, 255]', '(1)'], {}), '(img, item, marks[index + 1], [255, 255, 255], 1)\n', (3519, 3568), False, 'import cv2\n'), ((2773, 2787), 'numpy.array', 'np.array', (['mesh'], {}), '(mesh)\n', (2781, 2787), True, 'import numpy as np\n')] |
"""
Author: <NAME>
Date: 05/10/2022
"""
from functools import partial
import argparse
import numpy as np
import os
import torch
import datetime
import logging
from pathlib import Path
from dataset.ScanObjectNNDataLoader import ScanObjectNNDataLoader
from modules.ptaug_utils import transform_point_cloud, scale_point_cloud, get_aug_args
from modules.pointnet2_utils import sample
from utils.utils import get_model, get_loss, set_seed, weight_init
def parse_args():
"""PARAMETERS"""
parser = argparse.ArgumentParser('RepSurf')
# Basic
parser.add_argument('--log_dir', type=str, default=None, help='experiment root')
parser.add_argument('--data_dir', type=str, default='./data', help='data dir')
parser.add_argument('--log_root', type=str, default='./log', help='log root dir')
parser.add_argument('--model', default='repsurf.scanobjectnn.repsurf_ssg_umb',
help='model file name [default: repsurf_ssg_umb]')
parser.add_argument('--gpus', nargs='+', type=str, default=None)
parser.add_argument('--seed', type=int, default=2800, help='Training Seed')
parser.add_argument('--cuda_ops', action='store_true', default=False,
help='Whether to use cuda version operations [default: False]')
# Training
parser.add_argument('--batch_size', type=int, default=64, help='batch size in training [default: 64]')
parser.add_argument('--optimizer', type=str, default='Adam', help='optimizer for training [Adam, SGD]')
parser.add_argument('--scheduler', type=str, default='step', help='scheduler for training')
parser.add_argument('--epoch', default=500, type=int, help='number of epoch in training [default: 200]')
parser.add_argument('--learning_rate', default=0.001, type=float, help='learning rate in training [default: 0.001]')
parser.add_argument('--decay_rate', type=float, default=1e-4, help='decay rate [default: 1e-4]')
parser.add_argument('--decay_step', default=20, type=int, help='number of epoch per decay [default: 20]')
parser.add_argument('--n_workers', type=int, default=4, help='DataLoader Workers Number [default: 1024]')
parser.add_argument('--init', type=str, default=None, help='initializer for model [kaiming, xavier]')
# Evaluation
parser.add_argument('--min_val', type=int, default=100, help='Min val epoch [default: 0]')
# Augmentation
parser.add_argument('--aug_scale', action='store_true', default=False,
help='Whether to augment by scaling [default: False]')
parser.add_argument('--aug_shift', action='store_true', default=False,
help='Whether to augment by shifting [default: False]')
# Modeling
parser.add_argument('--num_point', type=int, default=1024, help='Point Number [default: 1024]')
parser.add_argument('--return_dist', action='store_true', default=False,
help='Whether to use signed distance [default: False]')
parser.add_argument('--return_center', action='store_true', default=False,
help='Whether to return center in surface abstraction [default: False]')
parser.add_argument('--return_polar', action='store_true', default=False,
help='Whether to return polar coordinate in surface abstraction [default: False]')
parser.add_argument('--group_size', type=int, default=8, help='Size of umbrella group [default: 0]')
parser.add_argument('--umb_pool', type=str, default='sum', help='pooling for umbrella repsurf [mean, max]')
return parser.parse_args()
def test(model, loader, num_class=15, num_point=1024, num_votes=10, total_num=1):
vote_correct = 0
sing_correct = 0
classifier = model.eval()
for j, data in enumerate(loader):
points, target = data
points, target = points.cuda(), target.cuda()
# preprocess
points = sample(num_point, points)
# vote
vote_pool = torch.zeros(target.shape[0], num_class).cuda()
for i in range(num_votes):
new_points = points.clone()
# scale
if i > 0:
new_points[:, :3] = scale_point_cloud(new_points[:, :3])
# predict
pred = classifier(new_points)
# single
if i == 0:
sing_pred = pred
# vote
vote_pool += pred
vote_pred = vote_pool / num_votes
# single pred
sing_pred_choice = sing_pred.data.max(1)[1]
sing_correct += sing_pred_choice.eq(target.long().data).cpu().sum()
# vote pred
vote_pred_choice = vote_pred.data.max(1)[1]
vote_correct += vote_pred_choice.eq(target.long().data).cpu().sum()
sing_acc = sing_correct.item() / total_num
vote_acc = vote_correct.item() / total_num
return sing_acc, vote_acc
def main(args):
def log_string(s):
logger.info(s)
print(s)
'''HYPER PARAMETER'''
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(args.gpus)
set_seed(args.seed)
'''CREATE DIR'''
experiment_dir = Path(os.path.join(args.log_root, 'PointAnalysis', 'log'))
experiment_dir.mkdir(exist_ok=True)
experiment_dir = experiment_dir.joinpath('ScanObjectNN')
experiment_dir.mkdir(exist_ok=True)
if args.log_dir is None:
timestr = str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M'))
experiment_dir = experiment_dir.joinpath(timestr)
else:
experiment_dir = experiment_dir.joinpath(args.log_dir)
experiment_dir.mkdir(exist_ok=True)
checkpoints_dir = experiment_dir.joinpath('checkpoints/')
checkpoints_dir.mkdir(exist_ok=True)
log_dir = experiment_dir.joinpath('logs/')
log_dir.mkdir(exist_ok=True)
'''LOG'''
args = parse_args()
logger = logging.getLogger("Model")
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler = logging.FileHandler('%s/%s.txt' % (log_dir, args.model))
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
log_string('PARAMETER ...')
log_string(args)
'''DATA LOADING'''
log_string('Load dataset ...')
args.num_class = 15
args.dataset = 'ScanObjectNN'
args.normal = False
aug_args = get_aug_args(args)
DATA_PATH = os.path.join(args.data_dir, 'ScanObjectNN')
TRAIN_DATASET = ScanObjectNNDataLoader(root=DATA_PATH, split='training')
TEST_DATASET = ScanObjectNNDataLoader(root=DATA_PATH, split='test')
trainDataLoader = torch.utils.data.DataLoader(TRAIN_DATASET, batch_size=args.batch_size, shuffle=True,
num_workers=args.n_workers, drop_last=True)
testDataLoader = torch.utils.data.DataLoader(TEST_DATASET, batch_size=args.batch_size, shuffle=False,
num_workers=args.n_workers)
'''MODEL BUILDING'''
classifier = torch.nn.DataParallel(get_model(args)).cuda()
criterion = get_loss().cuda()
try:
checkpoint = torch.load(str(experiment_dir) + '/checkpoints/best_model.pth')
start_epoch = checkpoint['epoch']
classifier.load_state_dict(checkpoint['model_state_dict'])
log_string('Use pretrain model')
except:
log_string('No existing model, starting training from scratch...')
start_epoch = 0
if args.init:
init_func = partial(weight_init, init_type=args.init)
classifier = classifier.apply(init_func)
'''OPTIMIZER'''
if args.optimizer == 'Adam':
optimizer = torch.optim.Adam(
classifier.parameters(),
lr=args.learning_rate,
betas=(0.9, 0.999),
eps=1e-08,
weight_decay=args.decay_rate)
elif args.optimizer == 'SGD':
optimizer = torch.optim.SGD(
classifier.parameters(),
lr=args.learning_rate,
momentum=0.9)
'''LR SCHEDULER'''
if args.scheduler == 'step':
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.decay_step, gamma=0.7)
else:
raise Exception('No Such Scheduler')
global_epoch = 0
global_step = 0
best_sing_acc = 0.0
best_vote_acc = 0.0
loader_len = len(trainDataLoader)
'''TRANING'''
logger.info('Start training...')
for epoch in range(start_epoch, args.epoch):
log_string('Epoch %d (%d/%s):' % (global_epoch + 1, epoch + 1, args.epoch))
train_loss = []
train_correct = 0
scheduler.step()
for batch_id, data in enumerate(trainDataLoader):
'''INPUT'''
points, target = data
points, target = points.cuda(), target.cuda()
'''PREPROCESS'''
points = sample(args.num_point, points)
points = transform_point_cloud(points, args, aug_args)
'''FORWARD'''
optimizer.zero_grad()
lr = optimizer.state_dict()['param_groups'][0]['lr']
classifier = classifier.train()
pred = classifier(points)
loss = criterion(pred, target.long())
pred_choice = pred.data.max(1)[1]
correct = pred_choice.eq(target.long().data).cpu().sum()
train_correct += correct
train_loss.append(loss.item())
'''BACKWARD'''
loss.backward()
optimizer.step()
global_step += 1
if batch_id % 80 == 0:
print('Epoch: [{0}][{1}/{2}] lr {lr:.6f} loss {loss:.4f}'.
format(epoch, batch_id, len(trainDataLoader), lr=lr, loss=loss.item()))
train_instance_acc = train_correct.item() / (loader_len * args.batch_size)
train_mean_loss = np.mean(train_loss)
log_string('Train Instance Accuracy: %.2f, Loss: %f' % (train_instance_acc * 100, train_mean_loss))
if epoch >= args.min_val:
with torch.no_grad():
sing_acc, vote_acc = test(classifier.eval(), testDataLoader, num_point=args.num_point,
total_num=len(TEST_DATASET))
if sing_acc >= best_sing_acc:
best_sing_acc = sing_acc
if vote_acc >= best_vote_acc:
best_vote_acc = vote_acc
best_epoch = epoch + 1
log_string('Test Single Accuracy: %.2f' % (sing_acc * 100))
log_string('Best Single Accuracy: %.2f' % (best_sing_acc * 100))
log_string('Test Vote Accuracy: %.2f' % (vote_acc * 100))
log_string('Best Vote Accuracy: %.2f' % (best_vote_acc * 100))
if vote_acc >= best_vote_acc:
logger.info('Save model...')
savepath = str(checkpoints_dir) + '/best_model.pth'
log_string('Saving at %s' % savepath)
state = {
'epoch': best_epoch,
'vote_acc': vote_acc,
'model_state_dict': classifier.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}
torch.save(state, savepath)
global_epoch += 1
logger.info('End of training...')
if __name__ == '__main__':
args = parse_args()
main(args)
| [
"logging.getLogger",
"modules.ptaug_utils.transform_point_cloud",
"numpy.mean",
"argparse.ArgumentParser",
"dataset.ScanObjectNNDataLoader.ScanObjectNNDataLoader",
"modules.pointnet2_utils.sample",
"logging.FileHandler",
"utils.utils.get_model",
"torch.save",
"modules.ptaug_utils.get_aug_args",
... | [((505, 539), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""RepSurf"""'], {}), "('RepSurf')\n", (528, 539), False, 'import argparse\n'), ((5031, 5050), 'utils.utils.set_seed', 'set_seed', (['args.seed'], {}), '(args.seed)\n', (5039, 5050), False, 'from utils.utils import get_model, get_loss, set_seed, weight_init\n'), ((5802, 5828), 'logging.getLogger', 'logging.getLogger', (['"""Model"""'], {}), "('Model')\n", (5819, 5828), False, 'import logging\n'), ((5879, 5952), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (5896, 5952), False, 'import logging\n'), ((5972, 6028), 'logging.FileHandler', 'logging.FileHandler', (["('%s/%s.txt' % (log_dir, args.model))"], {}), "('%s/%s.txt' % (log_dir, args.model))\n", (5991, 6028), False, 'import logging\n'), ((6355, 6373), 'modules.ptaug_utils.get_aug_args', 'get_aug_args', (['args'], {}), '(args)\n', (6367, 6373), False, 'from modules.ptaug_utils import transform_point_cloud, scale_point_cloud, get_aug_args\n'), ((6390, 6433), 'os.path.join', 'os.path.join', (['args.data_dir', '"""ScanObjectNN"""'], {}), "(args.data_dir, 'ScanObjectNN')\n", (6402, 6433), False, 'import os\n'), ((6454, 6510), 'dataset.ScanObjectNNDataLoader.ScanObjectNNDataLoader', 'ScanObjectNNDataLoader', ([], {'root': 'DATA_PATH', 'split': '"""training"""'}), "(root=DATA_PATH, split='training')\n", (6476, 6510), False, 'from dataset.ScanObjectNNDataLoader import ScanObjectNNDataLoader\n'), ((6530, 6582), 'dataset.ScanObjectNNDataLoader.ScanObjectNNDataLoader', 'ScanObjectNNDataLoader', ([], {'root': 'DATA_PATH', 'split': '"""test"""'}), "(root=DATA_PATH, split='test')\n", (6552, 6582), False, 'from dataset.ScanObjectNNDataLoader import ScanObjectNNDataLoader\n'), ((6605, 6737), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['TRAIN_DATASET'], {'batch_size': 'args.batch_size', 'shuffle': '(True)', 'num_workers': 'args.n_workers', 'drop_last': '(True)'}), '(TRAIN_DATASET, batch_size=args.batch_size,\n shuffle=True, num_workers=args.n_workers, drop_last=True)\n', (6632, 6737), False, 'import torch\n'), ((6805, 6921), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['TEST_DATASET'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': 'args.n_workers'}), '(TEST_DATASET, batch_size=args.batch_size,\n shuffle=False, num_workers=args.n_workers)\n', (6832, 6921), False, 'import torch\n'), ((3902, 3927), 'modules.pointnet2_utils.sample', 'sample', (['num_point', 'points'], {}), '(num_point, points)\n', (3908, 3927), False, 'from modules.pointnet2_utils import sample\n'), ((5099, 5150), 'os.path.join', 'os.path.join', (['args.log_root', '"""PointAnalysis"""', '"""log"""'], {}), "(args.log_root, 'PointAnalysis', 'log')\n", (5111, 5150), False, 'import os\n'), ((8094, 8179), 'torch.optim.lr_scheduler.StepLR', 'torch.optim.lr_scheduler.StepLR', (['optimizer'], {'step_size': 'args.decay_step', 'gamma': '(0.7)'}), '(optimizer, step_size=args.decay_step, gamma=0.7\n )\n', (8125, 8179), False, 'import torch\n'), ((9828, 9847), 'numpy.mean', 'np.mean', (['train_loss'], {}), '(train_loss)\n', (9835, 9847), True, 'import numpy as np\n'), ((7072, 7082), 'utils.utils.get_loss', 'get_loss', ([], {}), '()\n', (7080, 7082), False, 'from utils.utils import get_model, get_loss, set_seed, weight_init\n'), ((8848, 8878), 'modules.pointnet2_utils.sample', 'sample', (['args.num_point', 'points'], {}), '(args.num_point, points)\n', (8854, 8878), False, 'from modules.pointnet2_utils import sample\n'), ((8900, 8945), 'modules.ptaug_utils.transform_point_cloud', 'transform_point_cloud', (['points', 'args', 'aug_args'], {}), '(points, args, aug_args)\n', (8921, 8945), False, 'from modules.ptaug_utils import transform_point_cloud, scale_point_cloud, get_aug_args\n'), ((3964, 4003), 'torch.zeros', 'torch.zeros', (['target.shape[0]', 'num_class'], {}), '(target.shape[0], num_class)\n', (3975, 4003), False, 'import torch\n'), ((4164, 4200), 'modules.ptaug_utils.scale_point_cloud', 'scale_point_cloud', (['new_points[:, :3]'], {}), '(new_points[:, :3])\n', (4181, 4200), False, 'from modules.ptaug_utils import transform_point_cloud, scale_point_cloud, get_aug_args\n'), ((7032, 7047), 'utils.utils.get_model', 'get_model', (['args'], {}), '(args)\n', (7041, 7047), False, 'from utils.utils import get_model, get_loss, set_seed, weight_init\n'), ((7492, 7533), 'functools.partial', 'partial', (['weight_init'], {'init_type': 'args.init'}), '(weight_init, init_type=args.init)\n', (7499, 7533), False, 'from functools import partial\n'), ((10008, 10023), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10021, 10023), False, 'import torch\n'), ((5344, 5367), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5365, 5367), False, 'import datetime\n'), ((11266, 11293), 'torch.save', 'torch.save', (['state', 'savepath'], {}), '(state, savepath)\n', (11276, 11293), False, 'import torch\n')] |
import numpy as np
from util import patches
from util import convert_label
from tensorflow.python.keras.utils import Sequence
class DataGenerator(Sequence):
'''Generate data for keras'''
def __init__(self, images, labels, patch_pos, keys, batch_size = 500,
patch_size = [40,16], n_channels = 1, n_classes = 5,
n_patches = 100, overlap =0.6, shuffle = True):
'''Initialization'''
self.images = images
self.labels = labels
self.patch_pos = patch_pos
self.keys = keys
self.batch_size = batch_size
self.patch_size = patch_size
self.n_channels = n_channels
self.n_classes = n_classes
self.n_patches = n_patches
self.overlap = overlap
self.shuffle = shuffle
self.on_epoch_end()
def __getitem__(self,index):
'''Generate one batch of data'''
# Generate indices of the batch
list_keys_temp = self.keys[index*self.batch_size//self.n_patches:(index+1)*self.batch_size//self.n_patches]
# Generate data
X, y, y_reg = self.__data_generation(list_keys_temp)
return X, {'class':y, 'reg':y_reg}
def on_epoch_end(self):
'''Update indices after each epoch'''
if self.shuffle == True:
np.random.shuffle(self.keys)
def __data_generation(self, list_keys_temp):
'''Generate data containing batch_size samples'''
# Initialization
X = np.empty((self.batch_size, *self.patch_size))
y = np.empty((self.batch_size, self.n_classes), dtype='int32')
y_reg = np.empty((self.batch_size, 1))
for idx, key in enumerate(list_keys_temp,0):
# Draw n_patches from the all possible patch positions in patch_pos
patch_pos_temp = patches.draw_num_patches(self.patch_pos[key], self.n_patches)
# Extract patches
X[idx*self.n_patches:(idx+1)*self.n_patches] = patches.extract_patches(self.images[key],patch_pos_temp, self.n_patches, self.n_channels, self.patch_size)
# Create one hot encoded vector
y[idx*self.n_patches:(idx+1)*self.n_patches] = convert_label.make_class_label(self.labels[key], patch_pos_temp, self.patch_size)
# Create regression label
y_reg[idx*self.n_patches:(idx+1)*self.n_patches] = convert_label.make_reg_label(self.labels[key], patch_pos_temp, self.patch_size)
return X, y, y_reg
def __len__(self):
'''Denotes number of batches per epoch'''
return int(np.floor(len(self.keys)*self.n_patches/self.batch_size))
| [
"util.patches.draw_num_patches",
"util.convert_label.make_reg_label",
"util.convert_label.make_class_label",
"util.patches.extract_patches",
"numpy.empty",
"numpy.random.shuffle"
] | [((1493, 1538), 'numpy.empty', 'np.empty', (['(self.batch_size, *self.patch_size)'], {}), '((self.batch_size, *self.patch_size))\n', (1501, 1538), True, 'import numpy as np\n'), ((1551, 1609), 'numpy.empty', 'np.empty', (['(self.batch_size, self.n_classes)'], {'dtype': '"""int32"""'}), "((self.batch_size, self.n_classes), dtype='int32')\n", (1559, 1609), True, 'import numpy as np\n'), ((1626, 1656), 'numpy.empty', 'np.empty', (['(self.batch_size, 1)'], {}), '((self.batch_size, 1))\n', (1634, 1656), True, 'import numpy as np\n'), ((1315, 1343), 'numpy.random.shuffle', 'np.random.shuffle', (['self.keys'], {}), '(self.keys)\n', (1332, 1343), True, 'import numpy as np\n'), ((1828, 1889), 'util.patches.draw_num_patches', 'patches.draw_num_patches', (['self.patch_pos[key]', 'self.n_patches'], {}), '(self.patch_pos[key], self.n_patches)\n', (1852, 1889), False, 'from util import patches\n'), ((1979, 2090), 'util.patches.extract_patches', 'patches.extract_patches', (['self.images[key]', 'patch_pos_temp', 'self.n_patches', 'self.n_channels', 'self.patch_size'], {}), '(self.images[key], patch_pos_temp, self.n_patches,\n self.n_channels, self.patch_size)\n', (2002, 2090), False, 'from util import patches\n'), ((2189, 2275), 'util.convert_label.make_class_label', 'convert_label.make_class_label', (['self.labels[key]', 'patch_pos_temp', 'self.patch_size'], {}), '(self.labels[key], patch_pos_temp, self.\n patch_size)\n', (2219, 2275), False, 'from util import convert_label\n'), ((2372, 2451), 'util.convert_label.make_reg_label', 'convert_label.make_reg_label', (['self.labels[key]', 'patch_pos_temp', 'self.patch_size'], {}), '(self.labels[key], patch_pos_temp, self.patch_size)\n', (2400, 2451), False, 'from util import convert_label\n')] |
import numpy as np
import pandas as pd
from tqdm import tqdm
from prereise.gather.winddata.rap.power_curves import (
get_power,
get_state_power_curves,
get_turbine_power_curves,
)
def _check_curve(curve):
allowed_curves = ["state", "IEC class 2"]
if curve not in allowed_curves:
err_msg = "curve not in allowed: " + ", ".join(allowed_curves)
raise ValueError(err_msg)
def _find_to_impute(data):
# Locate missing data
to_impute = data[data.U.isna()].index
if len(to_impute) == 0:
print("No missing data")
return
else:
return to_impute
def _select_similar(data, dates, j):
year = dates[j].year
month = dates[j].month
hour = dates[j].hour
select = data[
(dates.year == year)
& (dates.month == month)
& (dates.hour == hour)
& (pd.notna(data.Pout))
]
return select
def simple(data, wind_farm, inplace=True, curve="state"):
"""Impute missing data using a simple procedure. For each missing entry,
the extrema of the U and V components of the wind speed of all non missing
entries that have the same location, same month, same hour are first found
for each missing entry. Then, a U and V value are randomly generated
between the respective derived ranges.
:param pandas.DataFrame data: data frame as returned by
:py:func:`prereise.gather.winddata.rap.rap.retrieve_data`.
:param pandas.DataFrame wind_farm: data frame of wind farms.
:param bool inplace: should the imputation be done in place.
:param str curve: 'state' to use the state average, otherwise named curve.
:return: (*pandas.DataFrame*) -- data frame with missing entries imputed.
"""
_check_curve(curve)
data_impute = data if inplace else data.copy()
to_impute = _find_to_impute(data)
if to_impute is None:
return
# Information on wind turbines & state average tubrine curves
tpc = get_turbine_power_curves()
spc = get_state_power_curves()
# Timestamp of all entries in data frame
dates = pd.DatetimeIndex(data.index.values)
n_target = len(wind_farm)
select = None
for i, j in tqdm(enumerate(to_impute), total=len(to_impute)):
if i % n_target == 0:
select = _select_similar(data, dates, j)
k = data.loc[j].plant_id
select_plant = select[select.plant_id == k]
min_u, max_u = select_plant["U"].min(), select_plant["U"].max()
min_v, max_v = select_plant["V"].min(), select_plant["V"].max()
data_impute.at[j, "U"] = min_u + (max_u - min_u) * np.random.random()
data_impute.at[j, "V"] = min_v + (max_v - min_v) * np.random.random()
wspd = np.sqrt(data.loc[j].U ** 2 + data.loc[j].V ** 2)
capacity = wind_farm.loc[k].Pmax
normalized_power = get_power(tpc, spc, wspd, "IEC class 2")
data_impute.at[j, "Pout"] = normalized_power * capacity
if not inplace:
return data_impute
def gaussian(data, wind_farm, inplace=True, curve="state"):
"""Impute missing data using gaussian distributions of U & V. For each
missing entry, sample U & V based on mean and covariance of non-missing
entries that have the same location, same month, and same hour.
:param pandas.DataFrame data: data frame as returned by
:py:func:`prereise.gather.winddata.rap.rap.retrieve_data`.
:param pandas.DataFrame wind_farm: data frame of wind farms.
:param bool inplace: should the imputation be done in place.
:param str curve: 'state' to use the state average, otherwise named curve.
:return: (*pandas.DataFrame*) -- data frame with missing entries imputed.
"""
_check_curve(curve)
data_impute = data if inplace else data.copy()
to_impute = _find_to_impute(data)
if to_impute is None:
return
# Information on wind turbines & state average tubrine curves
tpc = get_turbine_power_curves()
spc = get_state_power_curves()
# Timestamp of all entries in data frame
dates = pd.DatetimeIndex(data.index.values)
n_target = len(wind_farm)
select = None
for i, hour in tqdm(enumerate(to_impute), total=len(to_impute)):
# Only run the similar-selection function the first time
if i % n_target == 0:
select = _select_similar(data, dates, hour)
plant_id = data.loc[hour].plant_id
select_plant = select[select.plant_id == plant_id]
uv_data = np.array([select_plant["U"].to_numpy(), select_plant["V"].to_numpy()])
cov = np.cov(uv_data)
mean = np.mean(uv_data, axis=1)
sample = np.random.multivariate_normal(mean=mean, cov=cov, size=1)
data_impute.at[hour, "U"] = sample[0][0]
data_impute.at[hour, "V"] = sample[0][1]
wspd = np.sqrt(data.loc[hour].U ** 2 + data.loc[hour].V ** 2)
capacity = wind_farm.loc[plant_id].Pmax
normalized_power = get_power(tpc, spc, wspd, "IEC class 2")
data_impute.at[hour, "Pout"] = normalized_power * capacity
if not inplace:
return data_impute
| [
"numpy.mean",
"numpy.sqrt",
"pandas.DatetimeIndex",
"numpy.random.multivariate_normal",
"numpy.random.random",
"prereise.gather.winddata.rap.power_curves.get_turbine_power_curves",
"numpy.cov",
"prereise.gather.winddata.rap.power_curves.get_state_power_curves",
"pandas.notna",
"prereise.gather.win... | [((1965, 1991), 'prereise.gather.winddata.rap.power_curves.get_turbine_power_curves', 'get_turbine_power_curves', ([], {}), '()\n', (1989, 1991), False, 'from prereise.gather.winddata.rap.power_curves import get_power, get_state_power_curves, get_turbine_power_curves\n'), ((2002, 2026), 'prereise.gather.winddata.rap.power_curves.get_state_power_curves', 'get_state_power_curves', ([], {}), '()\n', (2024, 2026), False, 'from prereise.gather.winddata.rap.power_curves import get_power, get_state_power_curves, get_turbine_power_curves\n'), ((2085, 2120), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['data.index.values'], {}), '(data.index.values)\n', (2101, 2120), True, 'import pandas as pd\n'), ((3927, 3953), 'prereise.gather.winddata.rap.power_curves.get_turbine_power_curves', 'get_turbine_power_curves', ([], {}), '()\n', (3951, 3953), False, 'from prereise.gather.winddata.rap.power_curves import get_power, get_state_power_curves, get_turbine_power_curves\n'), ((3964, 3988), 'prereise.gather.winddata.rap.power_curves.get_state_power_curves', 'get_state_power_curves', ([], {}), '()\n', (3986, 3988), False, 'from prereise.gather.winddata.rap.power_curves import get_power, get_state_power_curves, get_turbine_power_curves\n'), ((4047, 4082), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['data.index.values'], {}), '(data.index.values)\n', (4063, 4082), True, 'import pandas as pd\n'), ((2721, 2769), 'numpy.sqrt', 'np.sqrt', (['(data.loc[j].U ** 2 + data.loc[j].V ** 2)'], {}), '(data.loc[j].U ** 2 + data.loc[j].V ** 2)\n', (2728, 2769), True, 'import numpy as np\n'), ((2838, 2878), 'prereise.gather.winddata.rap.power_curves.get_power', 'get_power', (['tpc', 'spc', 'wspd', '"""IEC class 2"""'], {}), "(tpc, spc, wspd, 'IEC class 2')\n", (2847, 2878), False, 'from prereise.gather.winddata.rap.power_curves import get_power, get_state_power_curves, get_turbine_power_curves\n'), ((4559, 4574), 'numpy.cov', 'np.cov', (['uv_data'], {}), '(uv_data)\n', (4565, 4574), True, 'import numpy as np\n'), ((4590, 4614), 'numpy.mean', 'np.mean', (['uv_data'], {'axis': '(1)'}), '(uv_data, axis=1)\n', (4597, 4614), True, 'import numpy as np\n'), ((4632, 4689), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', ([], {'mean': 'mean', 'cov': 'cov', 'size': '(1)'}), '(mean=mean, cov=cov, size=1)\n', (4661, 4689), True, 'import numpy as np\n'), ((4804, 4858), 'numpy.sqrt', 'np.sqrt', (['(data.loc[hour].U ** 2 + data.loc[hour].V ** 2)'], {}), '(data.loc[hour].U ** 2 + data.loc[hour].V ** 2)\n', (4811, 4858), True, 'import numpy as np\n'), ((4934, 4974), 'prereise.gather.winddata.rap.power_curves.get_power', 'get_power', (['tpc', 'spc', 'wspd', '"""IEC class 2"""'], {}), "(tpc, spc, wspd, 'IEC class 2')\n", (4943, 4974), False, 'from prereise.gather.winddata.rap.power_curves import get_power, get_state_power_curves, get_turbine_power_curves\n'), ((854, 873), 'pandas.notna', 'pd.notna', (['data.Pout'], {}), '(data.Pout)\n', (862, 873), True, 'import pandas as pd\n'), ((2609, 2627), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2625, 2627), True, 'import numpy as np\n'), ((2687, 2705), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2703, 2705), True, 'import numpy as np\n')] |
from __future__ import division
import torch
import numpy as np
import argparse
import os
# import detector
from pytorch_yolo_v3.yolo_detector import Darknet_Detector
# import utility functions
from util_detect import detect_video, remove_duplicates
from util_track import condense_detections,track_SORT
from util_transform import transform_obj_list, write_json, get_best_transform
from util_draw import draw_world,draw_tracks
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Get input file, output directory, .')
parser.add_argument("input",help='<Required> string - input video file path',type = str)
parser.add_argument("out_dir",help='<Required> string - output file directory',type = str)
parser.add_argument("--cam",help='string - camera image coordinate numpy file',type = str)
parser.add_argument("--sat",help='string - satellite image coordinate numpy file',type = str)
parser.add_argument("--gps",help='string - gps coordinate numpy file',type = str)
parser.add_argument("--sat_im",help='string - satellite image file path',type = str)
args = parser.parse_args()
# parse args
CONVERT = True
input_file = args.input
out_dir = args.out_dir
#
# input_file = "test.avi"
# out_dir = "test"
try:
cam_pts = np.load(args.cam)
world_pts = np.load(args.sat)
gps_pts = np.load(args.gps)
background_file = args.sat_im
except:
CONVERT = False # only try to convert if conversion coords input
# name out files
detect_file = os.path.join(out_dir,"detections.avi")
track_file = os.path.join(out_dir,"tracks.avi")
world_file = os.path.join(out_dir,"trajectories.avi")
data_file = os.path.join(out_dir,"data.json")
# loads model unless already loaded
try:
net
except:
params = {'cfg_file' :'pytorch_yolo_v3/cfg/yolov3.cfg',
'wt_file': 'pytorch_yolo_v3/yolov3.weights',
'class_file': 'pytorch_yolo_v3/data/coco.names',
'pallete_file': 'pytorch_yolo_v3/pallete',
'nms_threshold': 0.5,
'conf': 0.52,
'resolution': 1024,
'num_classes': 80}
net = Darknet_Detector(**params)
print("Model reloaded.")
# get detections
detections,num_frames = detect_video(input_file,net,show = True, save_file=detect_file)
detections = remove_duplicates(detections)
detections = condense_detections(detections,style = "SORT_cls")
# get tracks
objs, _ = track_SORT(detections,mod_err = 1, meas_err = 10, state_err = 1000, fsld_max = 25)
num_frames = int(num_frames)
draw_tracks(objs,input_file,track_file,show = True, trail_size = 50,frame_lim = num_frames)
if CONVERT:
# get transform for camera to world space and transform object points
M = get_best_transform(cam_pts,world_pts)
M2 = get_best_transform(cam_pts,gps_pts)
objs = transform_obj_list(objs,M,M2)
# plot together
draw_world(objs,background_file,world_file,show = True,trail_size = 20,plot_label = True,frame_lim = num_frames)
metadata = {
"camera_id": "None",
"start_time":"None",
"num_frames":num_frames,
"frame_rate":"Unknown"
}
out = write_json(objs,metadata,num_frames,data_file)
| [
"util_transform.get_best_transform",
"util_transform.write_json",
"util_detect.remove_duplicates",
"util_track.track_SORT",
"argparse.ArgumentParser",
"util_draw.draw_world",
"os.path.join",
"util_track.condense_detections",
"util_transform.transform_obj_list",
"pytorch_yolo_v3.yolo_detector.Darkn... | [((481, 555), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Get input file, output directory, ."""'}), "(description='Get input file, output directory, .')\n", (504, 555), False, 'import argparse\n'), ((1578, 1617), 'os.path.join', 'os.path.join', (['out_dir', '"""detections.avi"""'], {}), "(out_dir, 'detections.avi')\n", (1590, 1617), False, 'import os\n'), ((1635, 1670), 'os.path.join', 'os.path.join', (['out_dir', '"""tracks.avi"""'], {}), "(out_dir, 'tracks.avi')\n", (1647, 1670), False, 'import os\n'), ((1688, 1729), 'os.path.join', 'os.path.join', (['out_dir', '"""trajectories.avi"""'], {}), "(out_dir, 'trajectories.avi')\n", (1700, 1729), False, 'import os\n'), ((1746, 1780), 'os.path.join', 'os.path.join', (['out_dir', '"""data.json"""'], {}), "(out_dir, 'data.json')\n", (1758, 1780), False, 'import os\n'), ((2400, 2463), 'util_detect.detect_video', 'detect_video', (['input_file', 'net'], {'show': '(True)', 'save_file': 'detect_file'}), '(input_file, net, show=True, save_file=detect_file)\n', (2412, 2463), False, 'from util_detect import detect_video, remove_duplicates\n'), ((2481, 2510), 'util_detect.remove_duplicates', 'remove_duplicates', (['detections'], {}), '(detections)\n', (2498, 2510), False, 'from util_detect import detect_video, remove_duplicates\n'), ((2528, 2577), 'util_track.condense_detections', 'condense_detections', (['detections'], {'style': '"""SORT_cls"""'}), "(detections, style='SORT_cls')\n", (2547, 2577), False, 'from util_track import condense_detections, track_SORT\n'), ((2615, 2690), 'util_track.track_SORT', 'track_SORT', (['detections'], {'mod_err': '(1)', 'meas_err': '(10)', 'state_err': '(1000)', 'fsld_max': '(25)'}), '(detections, mod_err=1, meas_err=10, state_err=1000, fsld_max=25)\n', (2625, 2690), False, 'from util_track import condense_detections, track_SORT\n'), ((2735, 2828), 'util_draw.draw_tracks', 'draw_tracks', (['objs', 'input_file', 'track_file'], {'show': '(True)', 'trail_size': '(50)', 'frame_lim': 'num_frames'}), '(objs, input_file, track_file, show=True, trail_size=50,\n frame_lim=num_frames)\n', (2746, 2828), False, 'from util_draw import draw_world, draw_tracks\n'), ((1319, 1336), 'numpy.load', 'np.load', (['args.cam'], {}), '(args.cam)\n', (1326, 1336), True, 'import numpy as np\n'), ((1357, 1374), 'numpy.load', 'np.load', (['args.sat'], {}), '(args.sat)\n', (1364, 1374), True, 'import numpy as np\n'), ((1393, 1410), 'numpy.load', 'np.load', (['args.gps'], {}), '(args.gps)\n', (1400, 1410), True, 'import numpy as np\n'), ((2938, 2976), 'util_transform.get_best_transform', 'get_best_transform', (['cam_pts', 'world_pts'], {}), '(cam_pts, world_pts)\n', (2956, 2976), False, 'from util_transform import transform_obj_list, write_json, get_best_transform\n'), ((2989, 3025), 'util_transform.get_best_transform', 'get_best_transform', (['cam_pts', 'gps_pts'], {}), '(cam_pts, gps_pts)\n', (3007, 3025), False, 'from util_transform import transform_obj_list, write_json, get_best_transform\n'), ((3041, 3072), 'util_transform.transform_obj_list', 'transform_obj_list', (['objs', 'M', 'M2'], {}), '(objs, M, M2)\n', (3059, 3072), False, 'from util_transform import transform_obj_list, write_json, get_best_transform\n'), ((3112, 3226), 'util_draw.draw_world', 'draw_world', (['objs', 'background_file', 'world_file'], {'show': '(True)', 'trail_size': '(20)', 'plot_label': '(True)', 'frame_lim': 'num_frames'}), '(objs, background_file, world_file, show=True, trail_size=20,\n plot_label=True, frame_lim=num_frames)\n', (3122, 3226), False, 'from util_draw import draw_world, draw_tracks\n'), ((3443, 3492), 'util_transform.write_json', 'write_json', (['objs', 'metadata', 'num_frames', 'data_file'], {}), '(objs, metadata, num_frames, data_file)\n', (3453, 3492), False, 'from util_transform import transform_obj_list, write_json, get_best_transform\n'), ((2282, 2308), 'pytorch_yolo_v3.yolo_detector.Darknet_Detector', 'Darknet_Detector', ([], {}), '(**params)\n', (2298, 2308), False, 'from pytorch_yolo_v3.yolo_detector import Darknet_Detector\n')] |
"""
Event module
"""
# Import modules
import numpy as np
import scipy
from inpoly import inpoly2
import datetime
import math
import time
import matplotlib
import matplotlib.pyplot as plt
class Event:
"""This class handles all the detection procedures"""
def __init__(self, devices, detections, events, travel_times, params) -> None:
super().__init__()
self.devices = devices
self.detections = detections
self.events = events
self.params = params
self.travel_times = travel_times
self.active_events = {}
def run(self):
# run loop indefinitely
while True:
self.find_and_locate()
time.sleep(self.params["sleep_time"])
def find_and_locate(self):
# 1. Get new detections
new_detections = self.get_detections()
# 2. Associate new detections with events
# for each new detection
for new_index, new_detection in new_detections.iterrows():
# initially, the detection is not associated
det_assoc = False
for event_id in self.active_events.keys():
_, device_ids = self.get_active_devices_ingrid(event_id)
if new_detection["device_id"] in device_ids:
# while not associate, continue trying
det_assoc = self.associate(event_id, new_index, new_detection)
if det_assoc == True:
new_exist = "existing"
break
if det_assoc == False:
# if it could not be associated with an existing event, create a new one
self.set_new_event(new_index, new_detection)
new_exist = "new"
self.print_detection_stats(new_detection["device_id"], new_exist)
# 3. Update location and magnitude of each event
for event_id in list(self.active_events.keys()):
# time since the last detection
tsl = self.time_since_last(event_id)
# Delete event if it is too old
if tsl > self.params["tsl_max"]:
del self.active_events[event_id]
self.detections.drop(event_id, self.params)
self.events.drop(event_id)
# Or update location, magnitude, and origin time, publish to mqtt
else:
self.update_events(event_id)
self.events.publish_event(self.params, event_id=event_id)
self.print_event_stats(event_id)
def set_new_event(self, new_index, new_detection):
"""This sets a new event in the class"""
# Get event ID
# timestamp
timestamp = datetime.datetime.utcfromtimestamp(new_detection["cloud_t"])
year = str(timestamp.year - 2000).zfill(2)
month = str(timestamp.month).zfill(2)
day = str(timestamp.day).zfill(2)
hour = str(timestamp.hour).zfill(2)
minute = str(timestamp.minute).zfill(2)
event_id = "E_" + year + month + day + hour + minute
# alphabet letter
all_events_id = list(set(self.events.data["event_id"].to_list()))
all_events_id = [n for n in all_events_id if n[:-1] == event_id]
letter_count = len(all_events_id)
# while id
event_id = event_id + chr(ord("@") + letter_count + 1)
self.active_events[event_id] = {}
# Associate detection with event
self.detections.data.loc[new_index, "event_id"] = event_id
# Get location and magnitude based on the first detection
self.get_loc_not_yet_arrived(event_id, new_detection)
def associate(self, event_id, new_index, new_detection):
"""
Calculate probabilities and associate new event
"""
# get all detections of the event
all_detections = self.detections.data[
self.detections.data["event_id"] == event_id
]
# get all detected devices
detected_devices = all_detections["device_id"]
# get the new device id and detection time
new_device = new_detection["device_id"]
new_time = new_detection["cloud_t"]
# get first detection
first_detection = self.get_first_detection(event_id)
first_det_device = first_detection["device_id"]
# set a new list of new probabilities
new_prob = np.zeros_like(self.travel_times.grid_lat)
if new_device not in detected_devices:
# loop over all associated detections
for _, detection in all_detections.iterrows():
# get device ID and detection time
det_device = detection["device_id"]
det_time = detection["cloud_t"]
# get sigma
sigma = self.get_sigma(event_id, new_device, det_device)
# calculate probability curve
grid_device_old = self.get_device_tt_grid(
det_device, first_det_device, self.params
)
grid_device_new = self.get_device_tt_grid(
new_device, first_det_device, self.params
)
tt_prob = np.exp(
-((grid_device_old - grid_device_new - det_time + new_time) ** 2)
/ (2 * sigma ** 2)
)
# and add the probability the rest
new_prob = new_prob + tt_prob
# ASSOCIATE THE NEW DETECTION
# get updated potential location of the eq epicenter
best_lat, best_lon, _, _ = self.get_best_location(
event_id, add_prob=new_prob
)
# test the RMS of mispics
tt_precalc = self.travel_times.tt_vector
misfit = []
# get the new location
for _, detection in all_detections.iterrows():
det_device_old = detection["device_id"]
det_time_old = detection["cloud_t"]
epic_dist_old = self.get_sta_delta(
event_id, det_device_old, eq_lat=best_lat, eq_lon=best_lon
)
epic_dist_new = self.get_sta_delta(
event_id, new_device, eq_lat=best_lat, eq_lon=best_lon
)
# find the closest time from the tt_precalc and place it in the grid
tt_old = tt_precalc["travel_time"][
np.argmin(np.abs(tt_precalc["dist"] - epic_dist_old / 111.3))
]
tt_new = tt_precalc["travel_time"][
np.argmin(np.abs(tt_precalc["dist"] - epic_dist_new / 111.3))
]
misfit.append(((tt_old - tt_new) - (det_time_old - new_time)) ** 2)
# matplotlib.use("agg")
# plt.plot(misfit)
# plt.savefig("./obj/assoc/" + event_id + "_" + str(len(misfit)) + ".png")
# plt.close()
misfit_mean = np.sqrt(np.sum(np.array(misfit)) / len(misfit))
assoc_win = self.params["assoc_win"]
if misfit_mean < assoc_win:
# if associated, append the probabbilities
self.active_events[event_id]["loc_prob"] = (
self.active_events[event_id]["loc_prob"] + new_prob
)
# add new detection to detections
self.detections.data.loc[new_index, "event_id"] = event_id
return True
else:
return False
def update_events(self, event_id):
# get timestamp for the event trace
dt = datetime.datetime.now(datetime.timezone.utc)
utc_time = dt.replace(tzinfo=datetime.timezone.utc)
cloud_t = utc_time.timestamp()
# Update location
best_lat, best_lon, best_depth, best_orig_time = self.get_best_location(
event_id
)
# Update magnitude
magnitude, mconf2, mconf16, mconf84, mconf98 = self.get_magnitude(
event_id, best_lat, best_lon
)
# Number of associated phases
num_assoc = len(
self.detections.data[self.detections.data["event_id"] == event_id]
)
# Add line in events
new_event = {
"event_id": event_id,
"cloud_t": cloud_t,
"orig_time": best_orig_time,
"lat": best_lat,
"lon": best_lon,
"dep": best_depth,
"mag": magnitude,
"mconf2": mconf2,
"mconf16": mconf16,
"mconf84": mconf84,
"mconf98": mconf98,
"num_assoc": num_assoc,
}
self.events.update(new_event)
### LOCATION FUNCTIONS ###
def prior_loc(self):
"""
This function sets the prior probability distribution for earthquake location
The function is rather a placeholder for a more sophisticated initial distrubution
given by historical seismicity etc.
"""
loc_prob = np.zeros_like(self.travel_times.grid_lat)
return loc_prob
def get_loc_not_yet_arrived(self, event_id, new_detection):
"""
Updates location for a new event
"""
# get the station with the first detection
first_device = new_detection["device_id"]
first_device_lat = self.devices.data[
self.devices.data["device_id"] == first_device
]["latitude"].to_list()[0]
first_device_lon = self.devices.data[
self.devices.data["device_id"] == first_device
]["longitude"].to_list()[0]
# get location of all the detected device
loc_det = [(first_device_lon, first_device_lat)]
# get all the not-yet arrived devices
nya_devices, _ = self.get_active_devices_ingrid(event_id)
loc_nya_lat = nya_devices["latitude"].to_list()
loc_nya_lon = nya_devices["longitude"].to_list()
loc_nya = list(zip(loc_nya_lon, loc_nya_lat))
# append the loc_det at the beginning
loc_all = loc_det + loc_nya
loc_all = list(set(loc_all))
index = loc_all.index(loc_det[0])
if len(loc_all) > 3:
# compute the Voronoi cells
vor = scipy.spatial.Voronoi(loc_all)
regions, vertices = self.voronoi_finite_polygons_2d(vor)
# get the lat and lon grid
lat_grid = self.travel_times.grid_lat + loc_det[0][1]
lon_grid = self.travel_times.grid_lon + loc_det[0][0]
# get the polygon aroud the device with detection
polygon = vertices[regions[index]]
# get the points in the polygon
points = np.concatenate(
(
np.reshape(lon_grid, (lon_grid.size, 1)),
np.reshape(lat_grid, (lat_grid.size, 1)),
),
axis=1,
)
inside, _ = inpoly2(points, polygon)
# change the points in the polygons to 1 and out of the polygon to 0
inside = inside.reshape(lon_grid.shape)
inside[inside == True] = 1
inside[inside == False] = 0
else:
inside = np.ones_like(self.travel_times.grid_lat)
# get the best prob
loc_prior = self.prior_loc()
best_prob = loc_prior + inside
# and replace the prob with the best prob
self.active_events[event_id] = {"loc_prob": best_prob}
def get_best_location(self, event_id, assoc=False, add_prob=0):
# get first detection
first_detection = self.get_first_detection(event_id)
first_device_id = first_detection["device_id"]
first_time = first_detection["cloud_t"]
# get the first device latitude and longitude
fist_dev_lat = self.devices.data[
self.devices.data["device_id"] == first_device_id
]["latitude"].iloc[0]
fist_dev_lon = self.devices.data[
self.devices.data["device_id"] == first_device_id
]["longitude"].iloc[0]
lat = self.travel_times.grid_lat + fist_dev_lat
lon = self.travel_times.grid_lon + fist_dev_lon
# initial probability is equal to the prior
loc_prob = self.active_events[event_id]["loc_prob"]
# add aditional probability (for calling the function by the associator)
loc_prob = loc_prob + add_prob
# get best location
num_assoc = self.get_number_of_assoc(event_id)
# if there is only one associated phase, the best location is the station location
if all([assoc == False, num_assoc < 2]):
best_lat = fist_dev_lat
best_lon = fist_dev_lon
best_depth = self.params["eq_depth"] # depth is fixed for all
else:
best_lat = lat[loc_prob == loc_prob.max()][0]
best_lon = lon[loc_prob == loc_prob.max()][0]
best_depth = self.params["eq_depth"] # depth is fixed for all
# get origin time based on the location and the first detection
device_grid = self.get_device_tt_grid(
first_device_id, first_device_id, self.params
)
sta_travel_time = device_grid[loc_prob == loc_prob.max()]
best_orig_time = first_time - sta_travel_time[0]
return best_lat, best_lon, best_depth, best_orig_time
### MAGNITUDE FUNCTIONS ###
def prior_mag(self):
"""
This function sets the prior probability distribution for magnitude
It uses the concept of magnitude of completeness and exponential
decay of probability with increasing magnitude
The prior probability distribution is a lineary increasing function
(in a log10 space) from the Mc-2 to Mc (Mc is the magnitude of completenes).
It peaks at the Mc and decreases to 0 at magnitude 10 with the slope of
b_value (set to 1 by default)
"""
prior_type = self.params["prior_type"]
mc = self.params["mc"]
b_value = self.params["b_value"]
# set limits on magnitude and the discretization step
mag_step = 0.01
mag_min = 0
mag_max = 10
mag_bins = np.arange(mag_min, mag_max, mag_step)
if prior_type == "gutenberg":
# create an array with zero probability everywhere
mag_prob = np.zeros(len(mag_bins))
mag_step = mag_bins[1] - mag_bins[0]
# the index of Mc
peak_index = int(mc * 1 / mag_step)
# the linear decrease with the b_value
max_value = (10 - mc) * b_value
num_of_steps = (10 - mc) * (1 / mag_step)
mag_prob[peak_index:] = max_value - np.arange(
0, max_value, max_value / num_of_steps
)
# the linear increase to the Mc
num_of_steps = int(2 * (1 / mag_step))
mag_prob[peak_index - num_of_steps : peak_index] = np.arange(
0, max_value, max_value / num_of_steps
)
mag_prob = np.ones(len(mag_bins))
# transform from linear to exponential
mag_prob = 10 ** mag_prob
elif prior_type == "constant":
mag_prob = np.ones(len(mag_bins))
# normalize probability density function
mag_prob = mag_prob / max(np.cumsum(mag_prob))
# return the probability function
return mag_prob, mag_bins
def get_magnitude(self, event_id, best_lat, best_lon):
"""
This function uses the station magnitude estimation and calculates
the probability distribution for the magnitude.
It also updates the most likely magnitude and the 68 and 96 percent
probability intervals
"""
# get magnitude bins and prior
mag_prob, mag_bins = self.prior_mag()
# get all detections
detections = self.detections.data[self.detections.data["event_id"] == event_id]
for _, det in detections.iterrows():
det_sta = det["device_id"]
pd_all = det[
["mag1", "mag2", "mag3", "mag4", "mag5", "mag6", "mag7", "mag8", "mag9"]
]
pd = [n for n in pd_all if n is not None]
try:
pd_type = "mag" + str(len(pd))
pd = pd[-1]
a = self.params[pd_type][0]
b = self.params[pd_type][1]
c = self.params[pd_type][2]
std = self.params[pd_type][3]
# Normalize the displacement for the epicentral distance of 1 km
dist = self.get_sta_delta(
event_id, sta=det_sta, eq_lat=best_lat, eq_lon=best_lon
)
pd = np.log10(pd) + c * np.log10(dist + 1)
# Calculate station magnitude from pd given the linear function with a, b, c
sta_mag_mu = a * pd + b
# generate the probability distribution for the station magnitude
p_m_pd = scipy.stats.norm(sta_mag_mu, std).pdf(mag_bins)
# multiply the prior and the current measurement (the Bayes happens in here)
mag_prob = np.multiply(mag_prob, p_m_pd)
except:
pass
# normalize the mag_prob
mag_prob = mag_prob / max(np.cumsum(mag_prob))
# get magnitude and confidence
magnitude = mag_bins[np.argmax(mag_prob)]
cum_prob = np.cumsum(mag_prob)
conf2 = mag_bins[np.argmin(abs(cum_prob - 0.02))]
conf16 = mag_bins[np.argmin(abs(cum_prob - 0.16))]
conf84 = mag_bins[np.argmin(abs(cum_prob - 0.84))]
conf98 = mag_bins[np.argmin(abs(cum_prob - 0.98))]
# set initial magnitude and confidence intervals
# (just a rough estimate)
if magnitude == 0:
magnitude = 4
conf2 = 2
conf16 = 3
conf84 = 5.5
conf98 = 8
return magnitude, conf2, conf16, conf84, conf98
### UTILITY FUNCTIONS ###
def print_event_stats(self, event_id):
event = self.events.data[self.events.data["event_id"] == event_id].sort_values(
"cloud_t"
)
num_assoc = event["num_assoc"]
# print only
# if there are more associated phases than ndef_min
# AND
# there is a new detection associated with the event
if num_assoc.iloc[-1] >= self.params["ndef_min"]:
magnitude = event["mag"].iloc[-1]
best_lat = event["lat"].iloc[-1]
best_lon = event["lon"].iloc[-1]
if np.diff(num_assoc)[-1] > 0:
print(
"🔥 Earthquake in progress: event_id: "
+ event_id
+ " M "
+ str(magnitude)
+ " lat "
+ str(best_lat)
+ " lon "
+ str(best_lon)
+ " assoc "
+ str(num_assoc.iloc[-1])
)
if self.params["plot_event"]:
matplotlib.use("agg")
plt.imshow(self.active_events[event_id]["loc_prob"])
plt.savefig(
"./obj/events/" + event_id + "_" + str(num_assoc.iloc[-1]) + ".png"
)
plt.close()
def print_detection_stats(self, device_id, new_exist):
detection = self.detections.data[
self.detections.data["device_id"] == device_id
].sort_values("cloud_t")
cloud_t = detection["cloud_t"]
event_id = detection["event_id"].iloc[0]
print(
"⭐ New detection: device_id: "
+ device_id
+ " at "
+ datetime.datetime.utcfromtimestamp(cloud_t).strftime("%Y-%m-%d %H:%M:%S")
+ ", assoc. with "
+ new_exist
+ " event_id: "
+ event_id
)
def voronoi_finite_polygons_2d(self, vor, radius=None):
"""
Reconstruct infinite voronoi regions in a 2D diagram to finite
regions.
Parameters
----------
vor : Voronoi
Input diagram
radius : float, optional
Distance to 'points at infinity'.
Returns
-------
regions : list of tuples
Indices of vertices in each revised Voronoi regions.
vertices : list of tuples
Coordinates for revised Voronoi vertices. Same as coordinates
of input vertices, with 'points at infinity' appended to the
end.
Credit: <NAME>, github: pv
"""
if vor.points.shape[1] != 2:
raise ValueError("Requires 2D input")
new_regions = []
new_vertices = vor.vertices.tolist()
center = vor.points.mean(axis=0)
if radius is None:
radius = vor.points.ptp().max() * 2
# Construct a map containing all ridges for a given point
all_ridges = {}
for (p1, p2), (v1, v2) in zip(vor.ridge_points, vor.ridge_vertices):
all_ridges.setdefault(p1, []).append((p2, v1, v2))
all_ridges.setdefault(p2, []).append((p1, v1, v2))
# Reconstruct infinite regions
for p1, region in enumerate(vor.point_region):
vertices = vor.regions[region]
if all(v >= 0 for v in vertices):
# finite region
new_regions.append(vertices)
continue
# reconstruct a non-finite region
ridges = all_ridges[p1]
new_region = [v for v in vertices if v >= 0]
for p2, v1, v2 in ridges:
if v2 < 0:
v1, v2 = v2, v1
if v1 >= 0:
# finite ridge: already in the region
continue
# Compute the missing endpoint of an infinite ridge
t = vor.points[p2] - vor.points[p1] # tangent
t /= np.linalg.norm(t)
n = np.array([-t[1], t[0]]) # normal
midpoint = vor.points[[p1, p2]].mean(axis=0)
direction = np.sign(np.dot(midpoint - center, n)) * n
far_point = vor.vertices[v2] + direction * radius
new_region.append(len(new_vertices))
new_vertices.append(far_point.tolist())
# sort region counterclockwise
vs = np.asarray([new_vertices[v] for v in new_region])
c = vs.mean(axis=0)
angles = np.arctan2(vs[:, 1] - c[1], vs[:, 0] - c[0])
new_region = np.array(new_region)[np.argsort(angles)]
# finish
new_regions.append(new_region.tolist())
return new_regions, np.asarray(new_vertices)
def globe_distance(self, lat1, lon1, lat2, lon2):
# approximate radius of earth in km
R = 6373.0
lat1 = math.radians(lat1)
lon1 = math.radians(lon1)
lat2 = math.radians(lat2)
lon2 = math.radians(lon2)
dlon = lon2 - lon1
dlat = lat2 - lat1
a = (
math.sin(dlat / 2) ** 2
+ math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2) ** 2
)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
distance = R * c
return distance
def get_sta_delta(self, event_id, sta, **kwargs):
sta_lat = self.devices.data[self.devices.data["device_id"] == sta]["latitude"]
sta_lon = self.devices.data[self.devices.data["device_id"] == sta]["longitude"]
if "eq_lat" in kwargs.keys():
eq_lat = kwargs["eq_lat"]
else:
eq_lat = self.events.data[self.events.data["event_id"] == event_id][
"lat"
].iloc[-1]
if "eq_lon" in kwargs.keys():
eq_lon = kwargs["eq_lon"]
else:
eq_lon = self.events.data[self.events.data["event_id"] == event_id][
"lon"
].iloc[-1]
epic_dist = self.globe_distance(sta_lat, sta_lon, eq_lat, eq_lon)
return epic_dist
def get_first_detection(self, event_id):
all_detections = self.detections.data[
self.detections.data["event_id"] == event_id
]
all_detections = all_detections.sort_values("cloud_t")
first_detection = all_detections.iloc[0]
return first_detection
def get_sigma(self, event_id, new_device, det_device):
"""
Get sigma from distances between the detections and easrthquakes
"""
# if constant sigma is chosen
if self.params["sigma_type"] == "const":
sigma = self.params["sigma_const"]
# if sigma is computed from the sigmoid function
elif self.params["sigma_type"] == "linear":
try:
dist1 = self.get_sta_delta(event_id, new_device)
dist2 = self.get_sta_delta(event_id, det_device)
dist_ave = (dist1 + dist2) / 2
sigma = dist_ave * 0.05 + 1
if sigma > 8:
sigma = 8
except:
sigma = self.params["sigma_const"]
return sigma
def get_detections(self):
"""Get new detections from the detection table"""
# Get new detections
new_detections = self.detections.data[self.detections.data["event_id"].isnull()]
new_detections = new_detections.sort_values("cloud_t", ascending=True)
return new_detections
def time_since_last(self, event_id):
"""
Get time elapsed since the last detection
"""
# get timestamp for the received trace
dt = datetime.datetime.now(datetime.timezone.utc)
utc_time = dt.replace(tzinfo=datetime.timezone.utc)
cloud_t = utc_time.timestamp()
last_detection = self.detections.data[
self.detections.data["event_id"] == event_id
]["cloud_t"].to_numpy()
last_det_time = cloud_t - max(last_detection)
return last_det_time
def get_device_tt_grid(self, device_id, first_device_id, params):
# get device latitude and longitude
dev_lat = self.devices.data[self.devices.data["device_id"] == device_id][
"latitude"
].iloc[0]
dev_lon = self.devices.data[self.devices.data["device_id"] == device_id][
"longitude"
].iloc[0]
# get the first device latitude and longitude
fist_dev_lat = self.devices.data[
self.devices.data["device_id"] == first_device_id
]["latitude"].iloc[0]
fist_dev_lon = self.devices.data[
self.devices.data["device_id"] == first_device_id
]["longitude"].iloc[0]
# get grid limits
lat_min = -params["lat_width"] / 4 + fist_dev_lat
lat_max = params["lat_width"] / 4 + fist_dev_lat
lon_min = -params["lon_width"] / 4 + fist_dev_lon
lon_max = params["lon_width"] / 4 + fist_dev_lon
step = params["step"]
# get first and last samples
first_sample_lat = int(
np.round(((lat_max - lat_min) - (dev_lat - lat_min)) * (1 / step))
)
last_sample_lat = first_sample_lat + int(self.travel_times.grid_lat.shape[0])
first_sample_lon = int(
np.round(((lon_max - lon_min) - (dev_lon - lon_min)) * (1 / step))
)
last_sample_lon = first_sample_lon + int(self.travel_times.grid_lat.shape[1])
# get the device grid
dev_grid = self.travel_times.tt_grid[
first_sample_lat:last_sample_lat, first_sample_lon:last_sample_lon
]
return dev_grid
def get_active_devices_ingrid(self, event_id):
"""Grabs all the devices that are sending data
This functions as a placeholder for more sophisticated function that
would grab active devices from some device SOH info
It also checks whether the test_device is situated within the original device grid
"""
first_detection = self.get_first_detection(event_id)
first_device = first_detection["device_id"]
# get the first device latitude and longitude
fist_dev_lat = self.devices.data[
self.devices.data["device_id"] == first_device
]["latitude"].iloc[0]
fist_dev_lon = self.devices.data[
self.devices.data["device_id"] == first_device
]["longitude"].iloc[0]
# get grid limits
lat_min = -self.params["lat_width"] / 4 + fist_dev_lat
lat_max = self.params["lat_width"] / 4 + fist_dev_lat
lon_min = -self.params["lon_width"] / 4 + fist_dev_lon
lon_max = self.params["lon_width"] / 4 + fist_dev_lon
try:
devices = self.devices.data[self.devices.data["device_id"] != first_device]
devices = devices[
(devices["latitude"] > lat_min) & (devices["latitude"] < lat_max)
]
devices = devices[
(devices["longitude"] > lon_min) & (devices["longitude"] < lon_max)
]
device_ids = devices["device_id"].to_list()
except:
devices = None
device_ids = None
return devices, device_ids
def get_number_of_assoc(self, event_id):
assoc = self.events.data[self.events.data["event_id"] == event_id].sort_values(
"cloud_t"
)
num_assoc = len(assoc)
return num_assoc
| [
"datetime.datetime.utcfromtimestamp",
"numpy.log10",
"math.sqrt",
"time.sleep",
"numpy.argsort",
"numpy.array",
"math.cos",
"numpy.arctan2",
"numpy.linalg.norm",
"numpy.arange",
"matplotlib.pyplot.imshow",
"numpy.multiply",
"numpy.reshape",
"numpy.asarray",
"numpy.diff",
"numpy.exp",
... | [((2710, 2770), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', (["new_detection['cloud_t']"], {}), "(new_detection['cloud_t'])\n", (2744, 2770), False, 'import datetime\n'), ((4391, 4432), 'numpy.zeros_like', 'np.zeros_like', (['self.travel_times.grid_lat'], {}), '(self.travel_times.grid_lat)\n', (4404, 4432), True, 'import numpy as np\n'), ((7626, 7670), 'datetime.datetime.now', 'datetime.datetime.now', (['datetime.timezone.utc'], {}), '(datetime.timezone.utc)\n', (7647, 7670), False, 'import datetime\n'), ((9028, 9069), 'numpy.zeros_like', 'np.zeros_like', (['self.travel_times.grid_lat'], {}), '(self.travel_times.grid_lat)\n', (9041, 9069), True, 'import numpy as np\n'), ((14190, 14227), 'numpy.arange', 'np.arange', (['mag_min', 'mag_max', 'mag_step'], {}), '(mag_min, mag_max, mag_step)\n', (14199, 14227), True, 'import numpy as np\n'), ((17455, 17474), 'numpy.cumsum', 'np.cumsum', (['mag_prob'], {}), '(mag_prob)\n', (17464, 17474), True, 'import numpy as np\n'), ((22935, 22953), 'math.radians', 'math.radians', (['lat1'], {}), '(lat1)\n', (22947, 22953), False, 'import math\n'), ((22969, 22987), 'math.radians', 'math.radians', (['lon1'], {}), '(lon1)\n', (22981, 22987), False, 'import math\n'), ((23003, 23021), 'math.radians', 'math.radians', (['lat2'], {}), '(lat2)\n', (23015, 23021), False, 'import math\n'), ((23037, 23055), 'math.radians', 'math.radians', (['lon2'], {}), '(lon2)\n', (23049, 23055), False, 'import math\n'), ((25720, 25764), 'datetime.datetime.now', 'datetime.datetime.now', (['datetime.timezone.utc'], {}), '(datetime.timezone.utc)\n', (25741, 25764), False, 'import datetime\n'), ((693, 730), 'time.sleep', 'time.sleep', (["self.params['sleep_time']"], {}), "(self.params['sleep_time'])\n", (703, 730), False, 'import time\n'), ((10246, 10276), 'scipy.spatial.Voronoi', 'scipy.spatial.Voronoi', (['loc_all'], {}), '(loc_all)\n', (10267, 10276), False, 'import scipy\n'), ((10933, 10957), 'inpoly.inpoly2', 'inpoly2', (['points', 'polygon'], {}), '(points, polygon)\n', (10940, 10957), False, 'from inpoly import inpoly2\n'), ((11207, 11247), 'numpy.ones_like', 'np.ones_like', (['self.travel_times.grid_lat'], {}), '(self.travel_times.grid_lat)\n', (11219, 11247), True, 'import numpy as np\n'), ((14943, 14992), 'numpy.arange', 'np.arange', (['(0)', 'max_value', '(max_value / num_of_steps)'], {}), '(0, max_value, max_value / num_of_steps)\n', (14952, 14992), True, 'import numpy as np\n'), ((17414, 17433), 'numpy.argmax', 'np.argmax', (['mag_prob'], {}), '(mag_prob)\n', (17423, 17433), True, 'import numpy as np\n'), ((22458, 22507), 'numpy.asarray', 'np.asarray', (['[new_vertices[v] for v in new_region]'], {}), '([new_vertices[v] for v in new_region])\n', (22468, 22507), True, 'import numpy as np\n'), ((22561, 22605), 'numpy.arctan2', 'np.arctan2', (['(vs[:, 1] - c[1])', '(vs[:, 0] - c[0])'], {}), '(vs[:, 1] - c[1], vs[:, 0] - c[0])\n', (22571, 22605), True, 'import numpy as np\n'), ((22775, 22799), 'numpy.asarray', 'np.asarray', (['new_vertices'], {}), '(new_vertices)\n', (22785, 22799), True, 'import numpy as np\n'), ((27141, 27205), 'numpy.round', 'np.round', (['((lat_max - lat_min - (dev_lat - lat_min)) * (1 / step))'], {}), '((lat_max - lat_min - (dev_lat - lat_min)) * (1 / step))\n', (27149, 27205), True, 'import numpy as np\n'), ((27349, 27413), 'numpy.round', 'np.round', (['((lon_max - lon_min - (dev_lon - lon_min)) * (1 / step))'], {}), '((lon_max - lon_min - (dev_lon - lon_min)) * (1 / step))\n', (27357, 27413), True, 'import numpy as np\n'), ((5197, 5291), 'numpy.exp', 'np.exp', (['(-(grid_device_old - grid_device_new - det_time + new_time) ** 2 / (2 * \n sigma ** 2))'], {}), '(-(grid_device_old - grid_device_new - det_time + new_time) ** 2 / (2 *\n sigma ** 2))\n', (5203, 5291), True, 'import numpy as np\n'), ((14704, 14753), 'numpy.arange', 'np.arange', (['(0)', 'max_value', '(max_value / num_of_steps)'], {}), '(0, max_value, max_value / num_of_steps)\n', (14713, 14753), True, 'import numpy as np\n'), ((15331, 15350), 'numpy.cumsum', 'np.cumsum', (['mag_prob'], {}), '(mag_prob)\n', (15340, 15350), True, 'import numpy as np\n'), ((17184, 17213), 'numpy.multiply', 'np.multiply', (['mag_prob', 'p_m_pd'], {}), '(mag_prob, p_m_pd)\n', (17195, 17213), True, 'import numpy as np\n'), ((17324, 17343), 'numpy.cumsum', 'np.cumsum', (['mag_prob'], {}), '(mag_prob)\n', (17333, 17343), True, 'import numpy as np\n'), ((19102, 19123), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (19116, 19123), False, 'import matplotlib\n'), ((19140, 19192), 'matplotlib.pyplot.imshow', 'plt.imshow', (["self.active_events[event_id]['loc_prob']"], {}), "(self.active_events[event_id]['loc_prob'])\n", (19150, 19192), True, 'import matplotlib.pyplot as plt\n'), ((19344, 19355), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (19353, 19355), True, 'import matplotlib.pyplot as plt\n'), ((22017, 22034), 'numpy.linalg.norm', 'np.linalg.norm', (['t'], {}), '(t)\n', (22031, 22034), True, 'import numpy as np\n'), ((22055, 22078), 'numpy.array', 'np.array', (['[-t[1], t[0]]'], {}), '([-t[1], t[0]])\n', (22063, 22078), True, 'import numpy as np\n'), ((22631, 22651), 'numpy.array', 'np.array', (['new_region'], {}), '(new_region)\n', (22639, 22651), True, 'import numpy as np\n'), ((22652, 22670), 'numpy.argsort', 'np.argsort', (['angles'], {}), '(angles)\n', (22662, 22670), True, 'import numpy as np\n'), ((23138, 23156), 'math.sin', 'math.sin', (['(dlat / 2)'], {}), '(dlat / 2)\n', (23146, 23156), False, 'import math\n'), ((23271, 23283), 'math.sqrt', 'math.sqrt', (['a'], {}), '(a)\n', (23280, 23283), False, 'import math\n'), ((23285, 23301), 'math.sqrt', 'math.sqrt', (['(1 - a)'], {}), '(1 - a)\n', (23294, 23301), False, 'import math\n'), ((10748, 10788), 'numpy.reshape', 'np.reshape', (['lon_grid', '(lon_grid.size, 1)'], {}), '(lon_grid, (lon_grid.size, 1))\n', (10758, 10788), True, 'import numpy as np\n'), ((10810, 10850), 'numpy.reshape', 'np.reshape', (['lat_grid', '(lat_grid.size, 1)'], {}), '(lat_grid, (lat_grid.size, 1))\n', (10820, 10850), True, 'import numpy as np\n'), ((16735, 16747), 'numpy.log10', 'np.log10', (['pd'], {}), '(pd)\n', (16743, 16747), True, 'import numpy as np\n'), ((18608, 18626), 'numpy.diff', 'np.diff', (['num_assoc'], {}), '(num_assoc)\n', (18615, 18626), True, 'import numpy as np\n'), ((23176, 23190), 'math.cos', 'math.cos', (['lat1'], {}), '(lat1)\n', (23184, 23190), False, 'import math\n'), ((23193, 23207), 'math.cos', 'math.cos', (['lat2'], {}), '(lat2)\n', (23201, 23207), False, 'import math\n'), ((23210, 23228), 'math.sin', 'math.sin', (['(dlon / 2)'], {}), '(dlon / 2)\n', (23218, 23228), False, 'import math\n'), ((6460, 6510), 'numpy.abs', 'np.abs', (["(tt_precalc['dist'] - epic_dist_old / 111.3)"], {}), "(tt_precalc['dist'] - epic_dist_old / 111.3)\n", (6466, 6510), True, 'import numpy as np\n'), ((6612, 6662), 'numpy.abs', 'np.abs', (["(tt_precalc['dist'] - epic_dist_new / 111.3)"], {}), "(tt_precalc['dist'] - epic_dist_new / 111.3)\n", (6618, 6662), True, 'import numpy as np\n'), ((6990, 7006), 'numpy.array', 'np.array', (['misfit'], {}), '(misfit)\n', (6998, 7006), True, 'import numpy as np\n'), ((16754, 16772), 'numpy.log10', 'np.log10', (['(dist + 1)'], {}), '(dist + 1)\n', (16762, 16772), True, 'import numpy as np\n'), ((17015, 17048), 'scipy.stats.norm', 'scipy.stats.norm', (['sta_mag_mu', 'std'], {}), '(sta_mag_mu, std)\n', (17031, 17048), False, 'import scipy\n'), ((22187, 22215), 'numpy.dot', 'np.dot', (['(midpoint - center)', 'n'], {}), '(midpoint - center, n)\n', (22193, 22215), True, 'import numpy as np\n'), ((19757, 19800), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', (['cloud_t'], {}), '(cloud_t)\n', (19791, 19800), False, 'import datetime\n')] |
from pathlib import Path
from numpy import ( sin, cos, exp, pi, tan, log, sinh, cosh, tanh, sinc,
sqrt, cbrt, angle, real, imag, abs,
arcsin, arccos, arctan, arcsinh, arccosh, arctanh)
from numpy import pi, e
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import animation
import scipy.linalg
import scipy as sp
import scipy.sparse
import scipy.sparse.linalg
from numba import njit
from schrodinger import util
import sys
from time import time
""" Original french comments from
https://github.com/Azercoco/Python-2D-Simulation-of-Schrodinger-Equation
Le programme simule le comportement d'un paquet d'onde gaussien suivant
l'équation de Schrödinger. L'algorithme utilisé est la méthode
Alternating direction implicit method.
La simulation permet de configurer un potentiel constant avec le temps
ainsi que la présence d'obstacles (qui sont gérés comme des barrières
de potentiel très élévées).
La fonction d'onde complexe est affichée en convertissant les nombres
complexes en format de couleur HSV.
x , y : Les positions de départ du paquet d'onde
Kx, Ky : Ses nombres d'onde
Ax, Ay : Ses facteurs d'étalements selon x et y
V : L'expression du potentiel
O : L'expression de la présence d'obstacles
Le potentiel et la présence d'obstacle doivent être exprimés comme des
expressions Python valides dépendant de x et y (valant respectivement
un float et un boolean) car le progamme utilise la fonction Python
eval() pour les évaluer.
"""
""" Translated by Google Translate
https://github.com/Azercoco/Python-2D-Simulation-of-Schrodinger-Equation
The program simulates the behavior of a Gaussian wave packet following the
Schrödinger's equation. The algorithm used is the method
Alternating direction implicit method.
The simulation makes it possible to configure a constant potential over time
as well as the presence of obstacles (which are managed as barriers
very high potential).
Complex wave function is displayed by converting numbers
complex in HSV color format.
x, y: The starting positions of the wave packet
Kx, Ky: The numbers of the wave
Ax, Ay: Its spreading factors along x and y
V: The expression of potential
O: The expression of the presence of obstacles
The potential and the presence of obstacles must be expressed as
valid Python expressions depending on x and y (respectively
a float and a boolean) because the program uses the Python function
eval () to evaluate them.
"""
class Field:
def __init__(self):
self.potential_expr = None
self.obstacle_expr = None
def setPotential(self, expr):
self.potential_expr = expr
self.test_pot_expr()
def setObstacle(self, expr):
self.obstacle_expr = expr
self.test_obs_expr()
def test_pot_expr(self):
# required for eval()
x = 0
y = 0
try:
a = eval(self.potential_expr)
except:
print(self.potential_expr)
print('Potential calculation error: set to 0 by default')
self.potential_expr = '0'
def test_obs_expr(self):
# required for eval()
x = 0
y = 0
try:
a = eval(self.obstacle_expr)
except:
print('Error setting obstacle: Set to False by default')
self.obstacle_expr = 'False'
def isObstacle(self, x, y):
a = False
try:
a = eval(self.obstacle_expr)
except:
print(f'Invalid obstacle: {self.obstacle_expr}')
return a
def getPotential(self, x, y):
a = 0 + 0j
try:
a = eval(self.potential_expr)
except:
print(f'Invalid potential: {self.potential_expr}')
return a
def solve(wf, V_x, V_y, HX, HY, N, step, delta_t):
vector_wrt_x = util.x_concatenate(wf, N)
vector_derive_y_wrt_x = util.x_concatenate(util.dy_square(wf, N, step), N)
U_wrt_x = vector_wrt_x + (1j*delta_t/2 )*(vector_derive_y_wrt_x - V_x*vector_wrt_x)
U_wrt_x_plus = scipy.sparse.linalg.spsolve(HX, U_wrt_x)
wf = util.x_deconcatenate(U_wrt_x_plus, N)
vector_wrt_y = util.y_concatenate(wf, N)
vector_derive_x_wrt_y = util.y_concatenate(util.dx_square(wf, N, step), N)
U_wrt_y = vector_wrt_y + (1j*delta_t/2 )*(vector_derive_x_wrt_y - V_y *vector_wrt_y)
U_wrt_y_plus = scipy.sparse.linalg.spsolve(HY, U_wrt_y)
wf = util.y_deconcatenate(U_wrt_y_plus, N)
return wf
class Simulate:
SIZE = 10 # simulation self.size
# wavefunction collision
FPS = 60
DURATION = 5 # duration in seconds
DELTA_T = 0.005 # 0.125 #time elapsed per second of video
# wavefunction collapse
FPS = 60
DURATION = 5 # duration in seconds
DELTA_T = 0.01 # 0.125 #time elapsed per second of video
# wavefunction collapse 2 & 3
FPS = 60
DURATION = 5 # duration in seconds
DELTA_T = 0.03 # 0.125 #time elapsed per second of video
# wavefunction collapse 4
FPS = 60
DURATION = 5 # duration in seconds
DELTA_T = 0.005 # 0.125 #time elapsed per second of video
# entanglement1
FPS = 60
DURATION = 5 # duration in seconds
DELTA_T = 0.02 # 0.125 #time elapsed per second of video
# wavefunction movement
FPS = 60
DURATION = 5 # duration in seconds
DELTA_T = 0.005 # 0.125 #time elapsed per second of video
def __init__(self, N, collapse=False):
self.N = N # dimension in number of points of the simulation
self.FRAMES = self.DURATION * self.FPS
self.field = Field()
#Potential as a function of x and y
self.field.setPotential("0") # Ex: x**2+y**2"
#Obstacle: boolean expression in fct of x and y
# (set to False if you do not want an obstacle)
obstacles = ("(x > 0.5 and x < 1 and not "
"((y > 0.25 and y < 0.75) or "
"(y < -0.25 and y > -0.75)))")
obstacles = "False"
self.collapse = collapse
self.field.setObstacle(obstacles)
self.size = self.SIZE
#self.dataset = np.zeros((self.FRAMES,self.N,self.N), dtype='c16')
print(16*self.N*self.N*1e-9, 'GB of memory')
#if self.dataset.nbytes > 100e9:
# raise(Exception("TOO MUCH DATA FOR MEMORY"))
self.simulation_initialize()
""" ------ INITIAL CONDITIONS FOR WAVEFUNCTION COLLISION
x0 = [0, 0],
y0 = [0,1],
#number of waves
k_x = [0, 0],#5000
k_y = [0, 90000],#2500,
#spreading
a_x = [.2, .2], #.2, #.1,#.33,#0.05#.33
a_y = [.2, .2], #.2, #.1,#.33,#0.05#.33
"""
""" ------ INITIAL CONDITIONS FOR WAVEFUNCTION COLLISION 1
x0 = [0,0],
y0 = [0,1.5],
#number of waves
k_x = [10, 0],#5000
k_y = [0, 90000],#2500,
#spreading
a_x = [.15, .15], #.2, #.1,#.33,#0.05#.33
a_y = [.15, .15], #.2, #.1,#.33,#0.05#.33
"""
""" ------ INITIAL CONDITIONS FOR MOVEMENT SHOTS
x0 = [0],
y0 = [0],
#number of waves
k_x = [5000],
k_y = [2500],#2500,
#spreading
a_x = [.2], #.2, #.1,#.33,#0.05#.33
a_y = [.2], #.2, #.1,#.33,#0.05#.33
"""
""" ------ INITIAL CONDITIONS FOR WAVEFUNCTION COLLAPSE
x0 = [0],#0],
y0 = [0],
#number of waves
k_x = [50],
k_y = [25],#2500,
#spreading
a_x = [.25], #.2, #.1,#.33,#0.05#.33
a_y = [.25], #.2, #.1,#.33,#0.05#.33
"""
""" ------ INITIAL CONDITIONS FOR WAVEFUNCTION COLLAPSE 3
x0 = [0],#0],
y0 = [0],
#number of waves
k_x = [50],
k_y = [25],#2500,
#spreading
a_x = [.28], #.2, #.1,#.33,#0.05#.33
a_y = [.28], #.2, #.1,#.33,#0.05#.33
"""
""" ------ INITIAL CONDITIONS FOR ENTANGLEMENT
x0 = [0, 0],
y0 = [1,-1],
#number of waves
k_x = [0, 0],#5000
k_y = [-3000, 3000],#2500,
#spreading
a_x = [.15, .15], #.2, #.1,#.33,#0.05#.33
a_y = [.15, .15], #.2, #.1,#.33,#0.05#.33
"""
def simulation_initialize(self,
#characteristics of the wave packet gaussian 2D
#centre
x0 = [0],
y0 = [0],
#number of waves
k_x = [5000],
k_y = [2500],#2500,
#spreading
a_x = [.2], #.2, #.1,#.33,#0.05#.33
a_y = [.2], #.2, #.1,#.33,#0.05#.33
# keep below the same
wall_potential = 1e10,
):
""" initialize the wave packet """
N = self.N
step = self.SIZE/self.N
delta_t = self.DELTA_T/self.FPS
self.counter = 0
# create points at all xy coordinates in meshgrid
self.x_axis = np.linspace(-self.size/2, self.size/2, N)
self.y_axis = np.linspace(-self.size/2, self.size/2, N)
X, Y = np.meshgrid(self.x_axis, self.y_axis)
n = 0
phase = np.exp( 1j*(X*k_x[n] + Y*k_y[n]))
px = np.exp( - ((x0[n] - X)**2)/(4*a_x[n]**2))
py = np.exp( - ((y0[n] - Y)**2)/(4*a_y[n]**2))
wave_function = phase*px*py
norm = np.sqrt(util.integrate(np.abs(wave_function)**2, N, step))
self.wave_function = wave_function/norm
for n in range(1,len(x0)):
phase = np.exp( 1j*(X*k_x[n] + Y*k_y[n]))
px = np.exp( - ((x0[n] - X)**2)/(4*a_x[n]**2))
py = np.exp( - ((y0[n] - Y)**2)/(4*a_y[n]**2))
wave_function = phase*px*py
norm = np.sqrt(util.integrate(np.abs(wave_function)**2, N, step))
self.wave_function += wave_function/norm
LAPLACE_MATRIX = sp.sparse.lil_matrix(-2*sp.sparse.identity(N*N))
for i in range(N):
for j in range(N-1):
k = i*N + j
LAPLACE_MATRIX[k,k+1] = 1
LAPLACE_MATRIX[k+1,k] = 1
self.V_x = np.zeros(N*N, dtype='c16')
for j in range(N):
for i in range(N):
xx = i
yy = N*j
if self.field.isObstacle(self.x_axis[j], self.y_axis[i]):
self.V_x[xx+yy] = wall_potential
else:
self.V_x[xx+yy] = self.field.getPotential(self.x_axis[j],
self.y_axis[i])
self.V_y = np.zeros(N*N, dtype='c16')
for j in range(N):
for i in range(N):
xx = j*N
yy = i
if self.field.isObstacle(self.x_axis[i], self.y_axis[j]):
self.V_y[xx+yy] = wall_potential
else:
self.V_y[xx+yy] = self.field.getPotential(self.x_axis[i],
self.y_axis[j])
self.V_x_matrix = sp.sparse.diags([self.V_x], [0])
self.V_y_matrix = sp.sparse.diags([self.V_y], [0])
LAPLACE_MATRIX = LAPLACE_MATRIX/(step ** 2)
self.H1 = (1*sp.sparse.identity(N*N) - 1j*(delta_t/2)*(LAPLACE_MATRIX))
self.H1 = sp.sparse.dia_matrix(self.H1)
self.HX = (1*sp.sparse.identity(N*N) - 1j*(delta_t/2)*(LAPLACE_MATRIX - self.V_x_matrix))
self.HX = sp.sparse.dia_matrix(self.HX)
self.HY = (1*sp.sparse.identity(N*N) - 1j*(delta_t/2)*(LAPLACE_MATRIX - self.V_y_matrix))
self.HY = sp.sparse.dia_matrix(self.HY)
self.start_time = time()
self.i_time = time()
def simulate_frames(self):
for f in range(self.FRAMES):
start=time()
simulate_frame(f)
print('>>>',time()-start)
#dataname = f"C:/data/sim_{N}x{N}.npz"
#np.savez(dataname, self.dataset)
def simulate_frame(self, save=False, debug=True):
""" evolve according to schrodinger equation """
N = self.N
step = self.SIZE/self.N
delta_t = self.DELTA_T/self.FPS
self.wave_function = solve(self.wave_function,
self.V_x, self.V_y,
self.HX, self.HY,
N, step, delta_t)
if save:
self.save_wave(self.wave_function)
if debug:
self.print_update()
self.counter += 1
#if self.counter == self.FRAMES:
# self.simulation_initialize()
return self.wave_function
def collapse_wavefunction(self):
dist=np.abs(self.wave_function)**2 # joint pmf
dist/=dist.sum() # it has to be normalized
# generate the set of all x,y pairs represented by the pmf
pairs=np.indices(dimensions=(self.N, self.N)).T # here are all of the x,y pairs
# make n random selections from the flattened pmf without replacement
# whether you want replacement depends on your application
n=1
inds=np.random.choice(np.arange(self.N**2),
p=dist.reshape(-1),
size=n,
replace=False)
# inds is the set of n randomly chosen indicies into the flattened dist array...
# therefore the random x,y selections
# come from selecting the associated elements
# from the flattened pairs array
selection_place = pairs.reshape(-1,2)[inds][0]
# convert to sim coordinates
selection = (selection_place/self.N -.5) * (self.size)
selection = [selection[0], selection[1], 0, 0]
# collapsewidth
cw = 10 / self.N
print(">>> COLLAPSED TO =", selection, cw)
self.simulation_initialize(x0=[selection[0]],
y0=[selection[1]],
k_x = [selection[2]],
k_y = [selection[3]],
a_x=[cw], a_y=[cw])
return selection_place
def dual_collapse_wavefunction(self):
dist=np.abs(self.wave_function)**2 # joint pmf
dist/=dist.sum() # it has to be normalized
# generate the set of all x,y pairs represented by the pmf
pairs=np.indices(dimensions=(self.N, self.N)).T # here are all of the x,y pairs
# make n random selections from the flattened pmf without replacement
# whether you want replacement depends on your application
n=10
inds=np.random.choice(np.arange(self.N**2),
p=dist.reshape(-1),
size=n,
replace=False)
# inds is the set of n randomly chosen indicies into the flattened dist array...
# therefore the random x,y selections
# come from selecting the associated elements
# from the flattened pairs array
selection_place = pairs.reshape(-1,2)[inds][0]
# convert to sim coordinates
selection = (selection_place/self.N -.5) * (self.size)
momx, momy = 700, 1000
selection1 = [selection[0], selection[1], momx, momy]
# collapsewidth
cw = 10 / self.N
print(">>> COLLAPSED TO =", selection1, cw)
for i in range(1, n):
selection_place = pairs.reshape(-1,2)[inds][i]
# convert to sim coordinates
selection = (selection_place/self.N -.5) * (self.size)
normto1 = np.linalg.norm(selection - np.array([selection1[0],selection1[1]]))
if normto1 < 2:
print("CONTINUE, dist:", normto1)
continue
else:
print("FOUND IT!, dist:", normto1)
break
selection2 = [selection[0], selection[1], -momx, -momy]
# collapsewidth
cw = 10 / self.N
print(">>> COLLAPSED TO =", selection2, cw)
self.simulation_initialize(x0=[selection1[0], selection2[0]],
y0=[selection1[1], selection2[1]],
k_x = [selection1[2], selection2[2]],
k_y = [selection1[3], selection2[3]],
a_x=[cw, cw], a_y=[cw, cw])
return selection_place
def save_wave(self, data):
self.dataset[self.counter,:,:] = data
def print_update(self):
N = self.N
step = self.SIZE/self.N
delta_t = self.DELTA_T/self.FPS
NORM = np.sqrt(util.integrate(np.abs(self.wave_function)**2, N, step))
report = self.counter/(self.DURATION*self.FPS)
M = 20
k = int(report*M)
l = M - k
to_print = '[' + k*'#' + l*'-'+ '] {0:.3f} %'
d_time = time() - self.start_time
ETA = (time()-self.i_time) * (self.FRAMES-self.counter) # (time / frame) * frames remaining
ETA = (ETA / 60) # sec to min ... / 60 # seconds to hours
ETA = np.modf(ETA)
ETA = int(ETA[1]), int(round(ETA[0]*60))
ETA = str(ETA[0]) + ":" + str(ETA[1]).zfill(2)
self.i_time = time()
print('--- Simulation in progress ---')
print(to_print.format(report*100))
print('Time elapsed : {0:.1f} s'.format(d_time))
print(f'Estimated time remaining : {ETA}')
print('Function standard : {0:.3f} '.format(NORM))
| [
"schrodinger.util.dy_square",
"numpy.array",
"numpy.arange",
"schrodinger.util.x_concatenate",
"numpy.exp",
"numpy.linspace",
"scipy.sparse.diags",
"numpy.meshgrid",
"schrodinger.util.dx_square",
"numpy.abs",
"numpy.indices",
"schrodinger.util.y_deconcatenate",
"scipy.sparse.identity",
"sc... | [((3703, 3728), 'schrodinger.util.x_concatenate', 'util.x_concatenate', (['wf', 'N'], {}), '(wf, N)\n', (3721, 3728), False, 'from schrodinger import util\n'), ((3959, 3996), 'schrodinger.util.x_deconcatenate', 'util.x_deconcatenate', (['U_wrt_x_plus', 'N'], {}), '(U_wrt_x_plus, N)\n', (3979, 3996), False, 'from schrodinger import util\n'), ((4016, 4041), 'schrodinger.util.y_concatenate', 'util.y_concatenate', (['wf', 'N'], {}), '(wf, N)\n', (4034, 4041), False, 'from schrodinger import util\n'), ((4274, 4311), 'schrodinger.util.y_deconcatenate', 'util.y_deconcatenate', (['U_wrt_y_plus', 'N'], {}), '(U_wrt_y_plus, N)\n', (4294, 4311), False, 'from schrodinger import util\n'), ((3774, 3801), 'schrodinger.util.dy_square', 'util.dy_square', (['wf', 'N', 'step'], {}), '(wf, N, step)\n', (3788, 3801), False, 'from schrodinger import util\n'), ((4087, 4114), 'schrodinger.util.dx_square', 'util.dx_square', (['wf', 'N', 'step'], {}), '(wf, N, step)\n', (4101, 4114), False, 'from schrodinger import util\n'), ((8131, 8176), 'numpy.linspace', 'np.linspace', (['(-self.size / 2)', '(self.size / 2)', 'N'], {}), '(-self.size / 2, self.size / 2, N)\n', (8142, 8176), True, 'import numpy as np\n'), ((8190, 8235), 'numpy.linspace', 'np.linspace', (['(-self.size / 2)', '(self.size / 2)', 'N'], {}), '(-self.size / 2, self.size / 2, N)\n', (8201, 8235), True, 'import numpy as np\n'), ((8242, 8279), 'numpy.meshgrid', 'np.meshgrid', (['self.x_axis', 'self.y_axis'], {}), '(self.x_axis, self.y_axis)\n', (8253, 8279), True, 'import numpy as np\n'), ((8302, 8342), 'numpy.exp', 'np.exp', (['(1.0j * (X * k_x[n] + Y * k_y[n]))'], {}), '(1.0j * (X * k_x[n] + Y * k_y[n]))\n', (8308, 8342), True, 'import numpy as np\n'), ((8344, 8389), 'numpy.exp', 'np.exp', (['(-(x0[n] - X) ** 2 / (4 * a_x[n] ** 2))'], {}), '(-(x0[n] - X) ** 2 / (4 * a_x[n] ** 2))\n', (8350, 8389), True, 'import numpy as np\n'), ((8394, 8439), 'numpy.exp', 'np.exp', (['(-(y0[n] - Y) ** 2 / (4 * a_y[n] ** 2))'], {}), '(-(y0[n] - Y) ** 2 / (4 * a_y[n] ** 2))\n', (8400, 8439), True, 'import numpy as np\n'), ((9123, 9151), 'numpy.zeros', 'np.zeros', (['(N * N)'], {'dtype': '"""c16"""'}), "(N * N, dtype='c16')\n", (9131, 9151), True, 'import numpy as np\n'), ((9449, 9477), 'numpy.zeros', 'np.zeros', (['(N * N)'], {'dtype': '"""c16"""'}), "(N * N, dtype='c16')\n", (9457, 9477), True, 'import numpy as np\n'), ((9782, 9814), 'scipy.sparse.diags', 'sp.sparse.diags', (['[self.V_x]', '[0]'], {}), '([self.V_x], [0])\n', (9797, 9814), True, 'import scipy as sp\n'), ((9836, 9868), 'scipy.sparse.diags', 'sp.sparse.diags', (['[self.V_y]', '[0]'], {}), '([self.V_y], [0])\n', (9851, 9868), True, 'import scipy as sp\n'), ((10008, 10037), 'scipy.sparse.dia_matrix', 'sp.sparse.dia_matrix', (['self.H1'], {}), '(self.H1)\n', (10028, 10037), True, 'import scipy as sp\n'), ((10146, 10175), 'scipy.sparse.dia_matrix', 'sp.sparse.dia_matrix', (['self.HX'], {}), '(self.HX)\n', (10166, 10175), True, 'import scipy as sp\n'), ((10284, 10313), 'scipy.sparse.dia_matrix', 'sp.sparse.dia_matrix', (['self.HY'], {}), '(self.HY)\n', (10304, 10313), True, 'import scipy as sp\n'), ((10337, 10343), 'time.time', 'time', ([], {}), '()\n', (10341, 10343), False, 'from time import time\n'), ((10361, 10367), 'time.time', 'time', ([], {}), '()\n', (10365, 10367), False, 'from time import time\n'), ((14878, 14890), 'numpy.modf', 'np.modf', (['ETA'], {}), '(ETA)\n', (14885, 14890), True, 'import numpy as np\n'), ((15004, 15010), 'time.time', 'time', ([], {}), '()\n', (15008, 15010), False, 'from time import time\n'), ((8625, 8665), 'numpy.exp', 'np.exp', (['(1.0j * (X * k_x[n] + Y * k_y[n]))'], {}), '(1.0j * (X * k_x[n] + Y * k_y[n]))\n', (8631, 8665), True, 'import numpy as np\n'), ((8668, 8713), 'numpy.exp', 'np.exp', (['(-(x0[n] - X) ** 2 / (4 * a_x[n] ** 2))'], {}), '(-(x0[n] - X) ** 2 / (4 * a_x[n] ** 2))\n', (8674, 8713), True, 'import numpy as np\n'), ((8719, 8764), 'numpy.exp', 'np.exp', (['(-(y0[n] - Y) ** 2 / (4 * a_y[n] ** 2))'], {}), '(-(y0[n] - Y) ** 2 / (4 * a_y[n] ** 2))\n', (8725, 8764), True, 'import numpy as np\n'), ((10443, 10449), 'time.time', 'time', ([], {}), '()\n', (10447, 10449), False, 'from time import time\n'), ((11170, 11196), 'numpy.abs', 'np.abs', (['self.wave_function'], {}), '(self.wave_function)\n', (11176, 11196), True, 'import numpy as np\n'), ((11331, 11370), 'numpy.indices', 'np.indices', ([], {'dimensions': '(self.N, self.N)'}), '(dimensions=(self.N, self.N))\n', (11341, 11370), True, 'import numpy as np\n'), ((11574, 11596), 'numpy.arange', 'np.arange', (['(self.N ** 2)'], {}), '(self.N ** 2)\n', (11583, 11596), True, 'import numpy as np\n'), ((12418, 12444), 'numpy.abs', 'np.abs', (['self.wave_function'], {}), '(self.wave_function)\n', (12424, 12444), True, 'import numpy as np\n'), ((12579, 12618), 'numpy.indices', 'np.indices', ([], {'dimensions': '(self.N, self.N)'}), '(dimensions=(self.N, self.N))\n', (12589, 12618), True, 'import numpy as np\n'), ((12823, 12845), 'numpy.arange', 'np.arange', (['(self.N ** 2)'], {}), '(self.N ** 2)\n', (12832, 12845), True, 'import numpy as np\n'), ((14686, 14692), 'time.time', 'time', ([], {}), '()\n', (14690, 14692), False, 'from time import time\n'), ((8956, 8981), 'scipy.sparse.identity', 'sp.sparse.identity', (['(N * N)'], {}), '(N * N)\n', (8974, 8981), True, 'import scipy as sp\n'), ((9936, 9961), 'scipy.sparse.identity', 'sp.sparse.identity', (['(N * N)'], {}), '(N * N)\n', (9954, 9961), True, 'import scipy as sp\n'), ((10056, 10081), 'scipy.sparse.identity', 'sp.sparse.identity', (['(N * N)'], {}), '(N * N)\n', (10074, 10081), True, 'import scipy as sp\n'), ((10194, 10219), 'scipy.sparse.identity', 'sp.sparse.identity', (['(N * N)'], {}), '(N * N)\n', (10212, 10219), True, 'import scipy as sp\n'), ((14723, 14729), 'time.time', 'time', ([], {}), '()\n', (14727, 14729), False, 'from time import time\n'), ((8502, 8523), 'numpy.abs', 'np.abs', (['wave_function'], {}), '(wave_function)\n', (8508, 8523), True, 'import numpy as np\n'), ((10488, 10494), 'time.time', 'time', ([], {}), '()\n', (10492, 10494), False, 'from time import time\n'), ((13666, 13706), 'numpy.array', 'np.array', (['[selection1[0], selection1[1]]'], {}), '([selection1[0], selection1[1]])\n', (13674, 13706), True, 'import numpy as np\n'), ((14484, 14510), 'numpy.abs', 'np.abs', (['self.wave_function'], {}), '(self.wave_function)\n', (14490, 14510), True, 'import numpy as np\n'), ((8829, 8850), 'numpy.abs', 'np.abs', (['wave_function'], {}), '(wave_function)\n', (8835, 8850), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2021 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Trax TF input pipeline."""
import collections
import functools
import json
import math
import os
import random
import re
from absl import logging
import gin
import numpy as np
import scipy
import t5.data
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
import tensorflow_datasets as tfds
import tensorflow_text as tf_text
from trax import fastmath
from trax.data import text_encoder
# How many examples from the stream to skip at random during training.
# For now, we skip at most 100K examples for efficiency.
# TODO(lukaszkaiser): can we improve efficiency, should that be changed?
_MAX_SKIP_EXAMPLES = 1e5
def no_preprocess(dataset, training):
del training
return dataset
def t2t_problems():
# Load t2t problems on request only, this should save some import time.
from tensor2tensor import problems_colab as t2tp # pylint: disable=g-import-not-at-top
return t2tp
@gin.configurable()
def data_streams(dataset_name,
data_dir=None,
preprocess_fn=no_preprocess,
bare_preprocess_fn=None,
shuffle_buffer_size=1024,
eval_holdout_size=0,
input_name=None,
target_name=None):
"""Make data streams for TF datasets.
Args:
dataset_name: a TFDS or T2T dataset name. If it's a T2T dataset name, prefix
with 't2t_'.
data_dir: data directory.
preprocess_fn: function to use for pre-processing after appending targets to
inputs.
bare_preprocess_fn: function to use for pre-processing before appending
targets to inputs.
shuffle_buffer_size: size of the shuffle buffer.
eval_holdout_size: float from 0 to <1; if >0 use this much of training data
for evaluation (instead of looking for a pre-specified VALIDATION split).
input_name: optional, name of the inputs from the dictionary.
target_name: optional, name of the outputs either from the dictionary or as
a result of post-processing.
Returns:
A pair of python streams, one for training and one for eval.
"""
data_dir = download_and_prepare(dataset_name, data_dir)
cache = []
def stream(which):
"""Create the stream, cache TF streams if needed."""
if not cache:
cache.append(
_train_and_eval_streams(dataset_name, data_dir, preprocess_fn,
bare_preprocess_fn, shuffle_buffer_size,
eval_holdout_size, input_name, target_name))
(train_ds, eval_ds, input_name_c) = cache[0]
dataset = eval_ds if which == 'eval' else train_ds
return dataset_to_stream(dataset, input_name_c)
train_stream = lambda: stream('train')
eval_stream = lambda: stream('eval')
return train_stream, eval_stream
def dataset_to_stream(dataset, input_name):
"""Takes a tf.Dataset and creates a numpy stream of ready batches."""
# All input-pipeline processing should be on CPU.
for example in fastmath.dataset_as_numpy(dataset):
features = example[0]
inp, out = features[input_name], example[1]
mask = features['mask'] if 'mask' in features else None
# Some accelerators don't handle uint8 well, cast to int.
if isinstance(inp, np.uint8):
inp = inp.astype(np.int32)
if isinstance(out, np.uint8):
out = out.astype(np.int32)
yield (inp, out) if mask is None else (inp, out, mask)
def _train_and_eval_streams(dataset, data_dir, preprocess_fn,
bare_preprocess_fn, shuffle_buffer_size,
eval_holdout_size, input_name, target_name):
"""Return train and eval batches with input name and shape."""
(train_data, eval_data,
keys) = _train_and_eval_dataset(dataset, data_dir, eval_holdout_size)
# If provided select input_name/target_name else fall back to keys if that is
# available, else [None].
input_names = ([input_name] if input_name is not None else
keys[0] if keys is not None else [None])
target_names = ([target_name] if target_name is not None else
keys[1] if keys is not None else [None])
train_batches = _shuffle_data(train_data, target_names, True,
shuffle_buffer_size, preprocess_fn,
bare_preprocess_fn)
eval_batches = _shuffle_data(eval_data, target_names, False,
shuffle_buffer_size, preprocess_fn,
bare_preprocess_fn)
return (train_batches, eval_batches, input_names[0])
def _shuffle_data(dataset, target_names, training, shuffle_buffer_size,
preprocess_fn, bare_preprocess_fn):
"""Shuffle the given dataset and run pre-processing."""
def append_targets(example):
"""Append targets to the example dictionary. Needed for Keras."""
if len(target_names) == 1:
return (example, example[target_names[0]])
targets = {}
for name in target_names:
targets[name] = example[name]
return (example, targets)
# `bare_preprocess_fn` is called before appending targets etc.
if bare_preprocess_fn is not None:
dataset = bare_preprocess_fn(dataset, training)
dataset = dataset.map(append_targets)
# TODO(pkozakowski): Repeat both the training and evaluation set, so we don't
# have incomplete batches during evaluation. This will be a problem when we
# add an option to evaluate on the whole dataset, then we'll need to think of
# a different solution.
dataset = dataset.repeat()
if training:
# Skip a random fraction at the beginning of the stream. The skip is
# essential for synchronous highly-parallel training to avoid multiple
# replicas reading the same data in lock-step.
dataset = dataset.skip(random.randint(0, _MAX_SKIP_EXAMPLES))
dataset = preprocess_fn(dataset, training)
dataset = dataset.shuffle(shuffle_buffer_size)
return dataset.prefetch(8)
def _train_and_eval_dataset(dataset_name,
data_dir,
eval_holdout_size,
train_shuffle_files=True,
eval_shuffle_files=False):
"""Return train and evaluation datasets, feature info and supervised keys.
Args:
dataset_name: a string, the name of the dataset; if it starts with 't2t_'
then we'll search T2T Problem registry for it, otherwise we assume it is a
dataset from TFDS and load it from there.
data_dir: directory where the data is located.
eval_holdout_size: float from 0 to <1; if >0 use this much of training data
for evaluation (instead of looking for a pre-specified VALIDATION split).
train_shuffle_files: Boolean determining whether or not to shuffle the train
files at startup. Set to False if you want data determinism.
eval_shuffle_files: Boolean determining whether or not to shuffle the test
files at startup. Set to False if you want data determinism.
Returns:
a 4-tuple consisting of:
* the train tf.Dataset
* the eval tf.Dataset
* information about features: a python dictionary with feature names
as keys and an object as value that provides .shape and .n_classes.
* supervised_keys: information what's the input and what's the target,
ie., a pair of lists with input and target feature names.
"""
if dataset_name.startswith('t2t_'):
return _train_and_eval_dataset_v1(dataset_name[4:], data_dir,
train_shuffle_files, eval_shuffle_files)
dataset_builder = tfds.builder(dataset_name, data_dir=data_dir)
info = dataset_builder.info
splits = dataset_builder.info.splits
if tfds.Split.TRAIN not in splits:
raise ValueError('To train we require a train split in the dataset.')
train_split = tfds.Split.TRAIN
if eval_holdout_size > 0:
holdout_percentage = int(eval_holdout_size * 100.0)
train_percentage = 100 - holdout_percentage
train_split = f'train[:{train_percentage}%]'
eval_split = f'train[{train_percentage}%:]'
elif dataset_name == 'glue/mnli':
eval_split = 'validation_matched'
# TODO(kitaev): Support diagnostic dataset (AX)
else:
if tfds.Split.VALIDATION not in splits and 'test' not in splits:
raise ValueError('We require a validation or test split in the dataset.')
eval_split = tfds.Split.VALIDATION
if tfds.Split.VALIDATION not in splits:
eval_split = tfds.Split.TEST
train = tfds.load(
name=dataset_name,
split=train_split,
data_dir=data_dir,
shuffle_files=train_shuffle_files)
valid = tfds.load(
name=dataset_name,
split=eval_split,
data_dir=data_dir,
shuffle_files=eval_shuffle_files)
keys = None
if info.supervised_keys:
keys = ([info.supervised_keys[0]], [info.supervised_keys[1]])
return train, valid, keys
@gin.configurable()
def TFDS( # pylint: disable=invalid-name
dataset_name,
data_dir=None,
tfds_preprocess_fn=None,
keys=None,
train=True,
eval_holdout_size=0):
"""Returns an iterator of numpy arrays representing the dataset."""
data_dir = download_and_prepare(dataset_name, data_dir)
(train_data, eval_data, _) = _train_and_eval_dataset(dataset_name, data_dir,
eval_holdout_size)
dataset = train_data if train else eval_data
dataset = dataset if tfds_preprocess_fn is None else tfds_preprocess_fn(
dataset)
def select_from(example):
return tuple(example[k] for k in keys)
dataset = dataset.map(select_from)
dataset = dataset.repeat()
def gen(generator=None):
del generator
for example in fastmath.dataset_as_numpy(dataset):
yield example
return gen
def _select_features(example, feature_list=None):
"""Select a subset of features from the example dict."""
feature_list = feature_list or ['inputs', 'targets']
return {f: example[f] for f in feature_list if f in example}
def _eager_dataset_iterator(dataset):
for item in dataset:
flat = tf.nest.flatten(item)
flat = [el.numpy() for el in flat]
yield tf.nest.pack_sequence_as(item, flat)
def _train_and_eval_dataset_v1(problem_name, data_dir, train_shuffle_files,
eval_shuffle_files):
"""Return train and evaluation datasets, feature info and supervised keys."""
with tf.device('cpu:0'):
problem = t2t_problems().problem(problem_name)
hparams = None
if problem_name == 'video_bair_robot_pushing':
hparams = problem.get_hparams()
bair_robot_pushing_hparams(hparams)
train_dataset = problem.dataset(
tf.estimator.ModeKeys.TRAIN,
data_dir,
shuffle_files=train_shuffle_files,
hparams=hparams)
train_dataset = train_dataset.map(_select_features)
eval_dataset = problem.dataset(
tf.estimator.ModeKeys.EVAL,
data_dir,
shuffle_files=eval_shuffle_files,
hparams=hparams)
eval_dataset = eval_dataset.map(_select_features)
# TODO(lukaszkaiser): remove this need for one example, just input_key.
examples = list(tfds.as_numpy(train_dataset.take(1)))
# We use 'inputs' as input except for purely auto-regressive tasks like
# language models where 'targets' are used as input_key.
input_key = 'inputs' if 'inputs' in examples[0] else 'targets'
supervised_keys = ([input_key], ['targets'])
return train_dataset, eval_dataset, supervised_keys
# Tokenization.
def tokenize(stream,
keys=None,
vocab_type='subword',
vocab_file=None,
vocab_dir=None,
n_reserved_ids=0,
debug=False):
"""Tokenize examples from the stream.
This function assumes that `stream` generates either strings or tuples/dicts
containing strings at some `keys`. This function maps these strings to
numpy arrays of integers -- the tokenized version of each string.
Args:
stream: A python generator yielding strings, tuples or dicts.
keys: which keys of the tuple/dict to tokenize (by default: all)
vocab_type: Type of vocabulary, one of: 'subword', 'sentencepiece', 'char'.
vocab_file: Name of the vocabulary file.
vocab_dir: Directory which contains the vocabulary file.
n_reserved_ids: An int, offset added so 0, ..., n_reserved_ids-1 are unused;
This is common for example when reserving the 0 for padding and 1 for EOS,
but it's only needed if these symbols are not already included (and thus
reserved) in the vocab_file.
debug: boolean, If true, prints debug information every power of 2 steps.
Yields:
Examples from stream with strings at `keys` replaced by np.arrays of
integers -- the tokenized version of these strings.
"""
vocab = _get_vocab(vocab_type, vocab_file, vocab_dir)
debug_count = 0
for example in stream:
debug_count += 1
if isinstance(example, (list, tuple)):
new_example = []
for i, x in enumerate(example):
if keys is None or i in keys:
new_example.append(np.array(vocab.encode(x)) + n_reserved_ids)
else:
new_example.append(x)
output = tuple(new_example)
if debug and (debug_count & debug_count - 1 == 0):
logging.info('Tokenize Example[%d] is %r', debug_count, output)
yield output
elif isinstance(example, dict):
new_example = {}
for k in example:
if keys is None or k in keys:
new_example[k] = np.array(vocab.encode(example[k])) + n_reserved_ids
else:
new_example[k] = example[k]
if debug and (debug_count & debug_count - 1 == 0):
logging.info('Tokenize Example[%d] is %r', debug_count, new_example)
yield new_example
else:
output = np.array(vocab.encode(example)) + n_reserved_ids
if debug and (debug_count & debug_count - 1 == 0):
logging.info('Tokenize Example[%d] is %r', debug_count, output)
yield output
@gin.configurable()
def Tokenize( # pylint: disable=invalid-name
keys=None,
vocab_type='subword', # pylint: disable=invalid-name
vocab_file=None,
vocab_dir=None,
n_reserved_ids=0,
debug=False):
"""Returns a function that maps text to integer arrays; see `tokenize`."""
return lambda g: tokenize( # pylint: disable=g-long-lambda
g,
keys=keys,
vocab_type=vocab_type,
vocab_file=vocab_file,
vocab_dir=vocab_dir,
n_reserved_ids=n_reserved_ids,
debug=debug)
def detokenize(x,
vocab_type='subword',
vocab_file=None,
vocab_dir=None,
n_reserved_ids=0):
"""Maps integer arrays to text; the opposite of `tokenize`.
In many cases (all char- and subword-type vocabularies and most sentencepiece
ones) the tokenization is invertible, so detokenize(tokenize(x)) = x. In some
more rare cases this can remove some spacing, but it is still often useful
to run detokenize to get a readable version for a tokenized string.
Args:
x: a list or numpy array of integers.
vocab_type: Type of vocabulary, one of: 'subword', 'sentencepiece', 'char'.
vocab_file: Name of the vocabulary file.
vocab_dir: Directory which contains the vocabulary file.
n_reserved_ids: An int, offset added so 0, ..., n_reserved_ids-1 are unused;
This is common for example when reserving the 0 for padding and 1 for EOS,
but it's only needed if these symbols are not already included (and thus
reserved) in the vocab_file.
Returns:
A string corresponding to the de-tokenized version of x.
"""
vocab = _get_vocab(vocab_type, vocab_file, vocab_dir)
x_unreserved = np.array(x) - n_reserved_ids
return str(vocab.decode(x_unreserved.tolist()))
def _to_unicode(s):
# Errors of the casting are ignored (e.g. sequences not allowed by UTF-8),
# in order not to stay with incomplete examples (with empty values).
return str(s, encoding='utf-8', errors='ignore')
def ConvertToUnicode(keys=None, debug=False): # pylint: disable=invalid-name
"""Converts to Unicode UTF-8 elements of an example.
Useful for when TFDS outputs byte arrays. All of the errors of the conversion
are ignored.
Args:
keys: tuple/list of example dimensions to convert.
debug: boolean, If true, prints debug information every power of 2 steps.
Returns:
Function converting chosen elements of an example to UTF-8.
"""
def _convert_to_unicode_str(stream, keys=None):
debug_count = 0
for example in stream:
debug_count += 1
if isinstance(example, (list, tuple)):
new_example = []
for i, x in enumerate(example):
if keys is None or i in keys:
new_example.append(_to_unicode(x))
else:
new_example.append(x)
output = tuple(new_example)
if debug and (debug_count & debug_count - 1 == 0):
logging.info('Example[%d] is %r', debug_count, output)
yield output
elif isinstance(example, dict):
new_example = {}
for k in example:
if keys is None or k in keys:
new_example[k] = _to_unicode(example[k])
else:
new_example[k] = example[k]
if debug and (debug_count & debug_count - 1 == 0):
logging.info('Example[%d] is %r', debug_count, new_example)
yield new_example
else:
output = _to_unicode(example)
if debug and (debug_count & debug_count - 1 == 0):
logging.info('Example[%d] is %r', debug_count, output)
yield output
return lambda g: _convert_to_unicode_str(g, keys)
def vocab_size(vocab_type='subword',
vocab_file=None,
vocab_dir=None,
n_reserved_ids=0):
"""Returns the size of the vocabulary (number of symbols used).
This function can be used to set the size of the final layers of a model that
needs to predict symbols from a given vocabulary. More precisely, if this
function returns N then the last layer size should be set to at least N (it
can be more). Note that this function does take reserved IDs into account.
Args:
vocab_type: Type of vocabulary, one of: 'subword', 'sentencepiece', 'char'.
vocab_file: Name of the vocabulary file.
vocab_dir: Directory which contains the vocabulary file.
n_reserved_ids: An int, offset added so 0, ..., n_reserved_ids-1 are unused.
Returns:
An integer, the number of symbols used (including reserved IDs).
"""
vocab = _get_vocab(vocab_type, vocab_file, vocab_dir)
return vocab.vocab_size + n_reserved_ids
def _get_vocab(vocab_type='subword', vocab_file=None, vocab_dir=None):
"""Gets the vocabulary object for tokenization; see tokenize for details."""
if vocab_type not in [
'char', 'subword', 'sentencepiece', 'bert', 'bert-lowercase'
]:
raise ValueError(
'vocab_type must be "subword", "char", "sentencepiece", "bert" or "bert-lowercase" '
f'but got {vocab_type}')
if vocab_type == 'char':
# Note that we set num_reserved_ids=0 below. We could instead pass
# the value n_reserved_ids from tokenize here -- ByteTextEncoder does
# exactly the same thing as tokenize above, ie., adds num_reserved_ids.
return text_encoder.ByteTextEncoder(num_reserved_ids=0)
vocab_dir = vocab_dir or 'gs://trax-ml/vocabs/'
path = os.path.join(vocab_dir, vocab_file)
if vocab_type == 'subword':
return text_encoder.SubwordTextEncoder(path)
if vocab_type == 'bert':
return text_encoder.BertEncoder(path, do_lower_case=False)
if vocab_type == 'bert-lowercase':
return text_encoder.BertEncoder(path, do_lower_case=True)
assert vocab_type == 'sentencepiece'
return t5.data.SentencePieceVocabulary(sentencepiece_model_file=path,
extra_ids=0)
# Makes the function accessible in gin configs, even with all args denylisted.
@gin.configurable(denylist=['dataset', 'training'])
def cifar10_no_augmentation_preprocess(dataset, training):
del training
def cast_image(features, targets):
features['image'] = tf.cast(features['image'], tf.float32) / 255.0
return features, targets
dataset = dataset.map(cast_image)
return dataset
def _cifar_augment_image(image):
"""Image augmentation suitable for CIFAR-10/100.
As described in https://arxiv.org/pdf/1608.06993v3.pdf (page 5).
Args:
image: a Tensor.
Returns:
Tensor of the same shape as image.
"""
image = tf.image.resize_with_crop_or_pad(image, 40, 40)
image = tf.image.random_crop(image, [32, 32, 3])
image = tf.image.random_flip_left_right(image)
return image
# Makes the function accessible in gin configs, even with all args denylisted.
@gin.configurable(denylist=['dataset', 'training'])
def cifar10_augmentation_preprocess(dataset, training):
"""Preprocessing for cifar10 with augmentation (see below)."""
def augment(features, targets):
features['image'] = _cifar_augment_image(features['image'])
return features, targets
def cast_image(features, targets):
features['image'] = tf.cast(features['image'], tf.float32) / 255.0
return features, targets
if training:
dataset = dataset.map(augment)
dataset = dataset.map(cast_image)
return dataset
@gin.configurable(denylist=['dataset', 'training'])
def cifar10_augmentation_flatten_preprocess(dataset,
training,
predict_image_train_weight=0.01):
"""Preprocessing for cifar10 that flattens it and appends targets."""
def augment(features, targets):
features['image'] = _cifar_augment_image(features['image'])
return features, targets
def flatten_image(features, targets):
"""Flatten the image."""
img = features['image']
flat = tf.cast(tf.reshape(img, [-1]), tf.int64)
tgt = tf.expand_dims(targets, axis=0)
flat_with_target = tf.concat([flat, tgt], axis=0)
new_features = {}
new_features['image'] = flat_with_target
predict_image_weight = predict_image_train_weight if training else 0.0
mask_begin = tf.ones_like(flat)
mask_begin = tf.cast(mask_begin, tf.float32) * predict_image_weight
mask_end = tf.cast(tf.ones_like(tgt), tf.float32)
new_features['mask'] = tf.concat([mask_begin, mask_end], axis=0)
return new_features, flat_with_target
if training:
dataset = dataset.map(augment)
dataset = dataset.map(flatten_image)
return dataset
@gin.configurable(denylist=['dataset', 'training'])
def concat_preprocess(dataset, training, pad_symbol=0):
"""Pre-processing function that concatenates input and target for LM."""
del training
def concat(features, targets):
inp = features['inputs']
pad = tf.expand_dims(tf.zeros_like(inp[0]) + pad_symbol, axis=0)
concat = tf.concat([pad, inp, pad, targets], axis=0)
# Note: we're updating existing features dictionary here, so make sure
# it is not re-used in some other ways outside of this function.
features['inputs'] = concat
return features, concat
dataset = dataset.map(concat)
return dataset
@gin.configurable(denylist=['dataset', 'training'])
def squeeze_targets_preprocess(dataset, training):
"""Pre-processing function that squeezes last axis of targets."""
del training
def squeeze(features, targets):
if targets.shape[-1] == 1:
targets = tf.squeeze(targets, axis=-1)
return features, targets
dataset = dataset.map(squeeze)
return dataset
@gin.configurable(denylist=['dataset', 'training'])
def lm1b_preprocess(dataset,
training,
max_target_length=-1,
max_eval_target_length=-1):
"""Preprocessing for LM1B: filter out targets exceeding maximum length."""
def target_right_length(_, target):
return tf.less(tf.shape(target)[0], max_target_length + 1)
def eval_target_right_length(_, target):
return tf.less(tf.shape(target)[0], max_eval_target_length + 1)
if max_target_length > 0 and training:
dataset = dataset.filter(target_right_length)
if max_eval_target_length > 0 and not training:
dataset = dataset.filter(eval_target_right_length)
return dataset
# TODO(lukaszkaiser): find a single more abstract way of text pre-processing.
@gin.configurable(denylist=['dataset', 'training'])
def wmt_preprocess(dataset, training, max_length=-1, max_eval_length=-1):
"""Preprocessing for LM1B: filter out targets exceeding maximum length."""
def train_right_length(example, target):
l = tf.maximum(tf.shape(example['inputs'])[0], tf.shape(target)[0])
return tf.less(l, max_length + 1)
def eval_right_length(example, target):
l = tf.maximum(tf.shape(example['inputs'])[0], tf.shape(target)[0])
return tf.less(l, max_eval_length + 1)
if max_length > 0 and training:
dataset = dataset.filter(train_right_length)
if max_eval_length > 0 and not training:
dataset = dataset.filter(eval_right_length)
return dataset
@gin.configurable(denylist=['dataset', 'training'])
def wmt_concat_preprocess(dataset, training, max_length=-1, max_eval_length=-1):
"""Preprocessing for WMT: filter exceeding maximum length and concatenate."""
dataset = wmt_preprocess(dataset, training, max_length, max_eval_length)
def concat_and_add_mask(features, targets):
inp = features['inputs']
pad = tf.expand_dims(tf.zeros_like(inp[0]), axis=0)
concat = tf.concat([inp, pad, targets], axis=0)
mask = tf.concat([tf.zeros_like(inp), pad, tf.ones_like(targets)], axis=0)
features['inputs'] = concat
features['mask'] = mask
return features, concat
dataset = dataset.map(concat_and_add_mask)
return dataset
@gin.configurable(denylist=['dataset', 'training'])
def lm_token_preprocessing(dataset, training):
"""Concatenates inputs, 0, targets, with masking only for targets."""
del training
def concat_and_add_mask(x):
inp = x['inputs']
targets = x['targets']
pad = tf.expand_dims(tf.zeros_like(inp[0]), axis=0)
concat = tf.concat([inp, pad, targets], axis=0)
mask = tf.concat([tf.zeros_like(inp), pad, tf.ones_like(targets)], axis=0)
x['inputs'] = concat
x['targets'] = concat
x['mask'] = mask
return x
dataset = dataset.map(concat_and_add_mask)
return dataset
@gin.configurable(denylist=['hparams'])
def bair_robot_pushing_hparams(hparams=None,
video_num_input_frames=1,
video_num_target_frames=15):
if hparams is not None:
hparams.video_num_input_frames = video_num_input_frames
hparams.video_num_target_frames = video_num_target_frames
else:
return video_num_input_frames, video_num_target_frames
@gin.configurable(denylist=['dataset', 'training'])
def bair_robot_pushing_preprocess(dataset, training):
"""Pre-processing function that concatenates input and target frames."""
del training
def concat_and_add_mask(features, targets):
"""Concatenate input and output frames to form a language modeling setup."""
inp = features['inputs']
concat = tf.concat([inp, targets], axis=0)
mask = tf.concat([tf.zeros_like(inp), tf.ones_like(targets)], axis=0)
concat = tf.reshape(concat, (-1,))
mask = tf.reshape(mask, (-1,))
concat = tf.cast(concat, tf.int32)
mask = tf.cast(mask, tf.float32)
features['inputs'] = features['targets'] = concat
features['mask'] = mask
return features, concat
dataset = dataset.map(concat_and_add_mask)
return dataset
DEFAULT_SPM_PATH = 'gs://t5-data/vocabs/cc_all.32000/sentencepiece.model' # GCS
@gin.configurable(denylist=['dataset', 'training'])
def c4_preprocess(dataset,
training,
max_target_length=-1,
tokenization=None,
spm_path=None):
"""Pre-processing function for C4 dataset."""
del training
def unicode_decode_chars(features, targets):
targets = tf.strings.unicode_decode(features['text'], 'UTF-8')
targets = tf.cast(targets, tf.int64)
features['targets'] = targets
features['inputs'] = targets
return (features, targets)
def spc_tokenize(tokenizer, features, targets):
del targets
tokenized_text = tokenizer.tokenize(features['text'])
features['targets'] = tf.cast(tokenized_text, tf.int64)
features['inputs'] = features['targets']
return features, features['targets']
if tokenization == 'spc':
spm_path = spm_path or t5.data.DEFAULT_SPM_PATH
with tf.compat.v1.gfile.GFile(spm_path, 'rb') as f:
spc_model = f.read()
tokenizer = tf_text.SentencepieceTokenizer(model=spc_model)
dataset = dataset.map(functools.partial(spc_tokenize, tokenizer))
else:
dataset = dataset.map(unicode_decode_chars)
def target_right_length(_, target):
return tf.less(tf.shape(target)[0], max_target_length + 1)
if max_target_length > 0:
dataset = dataset.filter(target_right_length)
return dataset
@gin.configurable(denylist=['dataset', 'training'])
def c4_bare_preprocess_fn(dataset,
training=True,
spm_path=None,
copy_pretokenized=True,
sequence_length=None):
"""Returns a dataset that contains 'inputs' and 'targets' from C4."""
# Set target key to be equal to the text content.
dataset = t5.data.preprocessors.rekey(
dataset, key_map={
'targets': 'text',
'inputs': None
})
# Vocabulary for tokenization.
extra_ids = 0
vocab = t5.data.SentencePieceVocabulary(
sentencepiece_model_file=spm_path or t5.data.DEFAULT_SPM_PATH,
extra_ids=extra_ids)
feature = t5.data.Feature(vocab)
output_features = {'targets': feature, 'inputs': feature}
# Tokenize the targets.
keys = output_features
def encode_string_features_fn(features):
"""Encode all specified feature that are strings and return a dictionary.
Args:
features: a dictionary
Returns:
a dictionary
"""
ret = {}
for k, v in features.items():
if k in keys and v.dtype == tf.string:
if copy_pretokenized:
ret['%s_pretokenized' % k] = v
v = tf.cast(output_features[k].vocabulary.encode_tf(v), tf.int64)
ret[k] = v
return ret
dataset = dataset.map(
encode_string_features_fn,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
# Preprocess the tokens - the exact preprocessors are set via gin.
dataset = t5.data.preprocessors.unsupervised(
dataset, sequence_length=sequence_length, output_features=output_features)
# Add EOS.
dataset = add_eos_to_output_features(dataset, training)
# Truncate and then pad the examples -- all examples have the same shape.
dataset = truncate_dataset_on_len(dataset, training, sequence_length, True)
dataset = pad_dataset_to_length(dataset, training, sequence_length)
return dataset
@gin.configurable(denylist=['dataset', 'training'])
def filter_dataset_on_len(dataset,
training,
len_map=None,
filter_on_eval=False):
"""Filters a dataset of lengths given in `len_map`.
Args:
dataset: `tf.data.Dataset` the dataset to filter.
training: bool, true if we are in training mode.
len_map: optional dict of str to (int, int). We filter examples where a
feature's size is beyond the specified bounds. Ex:
{'inputs': (1, 512), 'targets': (64, 128)} will keep only those examples
where 1 <= len(inputs) <= 512 and 64 <= len(targets) <= 128.
filter_on_eval: bool if true, we will filter in eval mode also.
Returns:
a filtered `tf.data.Dataset`.
"""
if (len_map is None) or (not training and not filter_on_eval):
return dataset
assert isinstance(len_map, dict)
for k, bounds in len_map.items():
# pylint: disable=cell-var-from-loop
# TODO(afrozm): Investigate `cell-var-from-loop` - since this is WAI and
# there is a test too.
def within_bounds(x, key, len_bounds):
size = tf.shape(x[key])[0]
min_len, max_len = len_bounds
return (min_len <= size) and (size <= max_len)
dataset = dataset.filter(lambda x: within_bounds(x, k, bounds))
# pylint: enable=cell-var-from-loop
return dataset
@gin.configurable(denylist=['dataset', 'training'])
def truncate_dataset_on_len(dataset,
training,
len_map=None,
truncate_on_eval=False):
"""Truncates features in an example to lengths given in `len_map`.
Args:
dataset: `tf.data.Dataset` the dataset to filter.
training: bool, true if we are in training mode.
len_map: optional dict of str to int, we truncate examples where a feature's
size is beyond the max. Ex: {'inputs': 512, 'targets': 64} will truncate
examples to be within those bounds.
truncate_on_eval: bool if true, we will truncate in eval mode also.
Returns:
a filtered `tf.data.Dataset`.
"""
if (len_map is None) or (not training and not truncate_on_eval):
return dataset
assert isinstance(len_map, dict)
def truncate_example(x):
for key, max_len in len_map.items():
x_len = tf.shape(x[key])[0]
if x_len > max_len:
x[key] = x[key][:max_len, ...]
return x
return dataset.map(truncate_example)
@gin.configurable(denylist=['dataset', 'training'])
def pad_dataset_to_length(dataset, training, len_map=None):
"""Pad features less than specified length to specified length."""
del training
if len_map is None:
return dataset
def pad_to_len(x):
for key, max_len in len_map.items():
x_shape = tf.shape(x[key])
x_len = x_shape[0]
if x_len < max_len:
pad_shape = [
max_len - x_len,
]
zeros = tf.zeros(pad_shape, dtype=x[key].dtype)
x[key] = tf.concat([x[key], zeros], 0)
return x
return dataset.map(pad_to_len)
@gin.configurable(denylist=['dataset', 'training'])
def add_eos_to_output_features(dataset,
training,
output_features='targets',
eos=1):
"""Adds `EOS` to all features in `output_features`."""
del training
if not isinstance(output_features, (list, tuple)):
output_features = [output_features]
def add_eos(x):
for output_feature in output_features:
x[output_feature] = tf.concat([x[output_feature], [eos]], axis=0)
return x
return dataset.map(add_eos)
@gin.configurable(denylist=['dataset', 'training'])
def generic_text_dataset_preprocess_fn(dataset,
training=True,
text_preprocess_fns=None,
token_preprocess_fns=None,
spm_path=None,
copy_pretokenized=False,
debug_print_examples=False,
debug_print_examples_rate=0.01):
"""Pre-processes, tokenizes and post-processes a `tf.data.Dataset`.
Args:
dataset: `tf.data.Dataset` to process.
training: boolean, set to True if training, False otherwise.
text_preprocess_fns: None or list of callables: `tf.data.Dataset`, bool ->
`tf.data.Dataset` this operates before tokenization. Typically used to
select which fields we want to learn over or change something into "text
to text" form.
token_preprocess_fns: None or list of callables: `tf.data.Dataset`, bool ->
`tf.data.Dataset`, this operates after tokenization. Since this can view
the tokenized fields, this can be used to filter on length etc.
spm_path: None or str, path to a sentencepiece model to use for tokenization
by default uses the 32k vocabulary from T5.
copy_pretokenized: bool, if True retains the original fields after
tokenization.
debug_print_examples: bool, if True this prints examples to the logging
stream for inspection, both before and after tokenization.
debug_print_examples_rate: float, [0, 1.0], on average this fraction of
dataset examples will be printed out in each phase i.e. pre and post
tokenization.
Returns:
a `tf.data.Dataset` with all the preprocessing and tokenization performed.
"""
# The assumption is that `text_preprocess_fns` finally gives us a dataset
# which has `inputs` and `targets`.
if text_preprocess_fns is not None:
for text_preprocess_fn in text_preprocess_fns:
dataset = text_preprocess_fn(dataset, training)
# Print debugging examples if needed before tokenization.
if debug_print_examples:
def print_examples(x):
if np.random.uniform() < debug_print_examples_rate:
tf.print(x, output_stream=logging.info)
return x
dataset = dataset.map(print_examples)
# Vocabulary for tokenization.
extra_ids = 0
vocab = t5.data.SentencePieceVocabulary(
sentencepiece_model_file=spm_path or t5.data.DEFAULT_SPM_PATH,
extra_ids=extra_ids)
feature = t5.data.Feature(vocab)
output_features = {'targets': feature, 'inputs': feature}
# Tokenize the inputs and targets.
dataset = t5.data.preprocessors.tokenize(
dataset, output_features, copy_pretokenized=copy_pretokenized)
# Apply the token-preprocessors.
if token_preprocess_fns is not None:
for token_preprocess_fn in token_preprocess_fns:
dataset = token_preprocess_fn(dataset, training)
if debug_print_examples:
def print_examples_and_shapes(x):
if np.random.uniform() < debug_print_examples_rate:
tf.print(
{
'inputs_shape': tf.size(x['inputs']),
'targets_shape': tf.size(x['targets']),
'inputs': x['inputs'],
'targets': x['targets'],
},
output_stream=logging.info)
return x
dataset = dataset.map(print_examples_and_shapes)
return dataset
@gin.configurable
def get_t5_preprocessor_by_name(name=None, fn_kwargs=None):
"""Returns a closure of any T5 preprocessor function with its arguments.
The main use-case is to use this (with gin scopes) to make any preprocessor
function available in a gin file to configure and use.
See: `TFInputs.test_gin_configurable_preprocessors`
Args:
name: str, name of the preprocessor function to configure.
fn_kwargs: optional dictionary, the arguments to configure, these will be
partially applied to the function given by `name`.
Returns:
a closure of the preprocessor function along with its arguments, this
function takes two arguments only, dataset and boolean training and ignores
the training and calls the t5 processor with the dataset (and closed over
arguments only).
"""
assert name is not None
f = getattr(t5.data.preprocessors, name)
if fn_kwargs is not None:
f = functools.partial(f, **fn_kwargs)
return lambda ds, unused_training: f(ds)
def download_and_prepare(dataset_name, data_dir):
"""Downloads and prepares T2T or TFDS dataset.
Args:
dataset_name: tfds dataset or t2t problem name prefixed by 't2t_'.
data_dir: location of existing dataset or None.
Returns:
data_dir: path string of downloaded data.
"""
if not data_dir:
data_dir = os.path.expanduser('~/tensorflow_datasets/')
dl_dir = os.path.join(data_dir, 'download')
logging.info(
'No dataset directory provided. '
'Downloading and generating dataset for %s inside data directory %s '
'For large datasets it is better to prepare datasets manually!',
dataset_name, data_dir)
if dataset_name.startswith('t2t_'):
# Download and run dataset generator for T2T problem.
data_dir = os.path.join(data_dir, dataset_name)
tf.io.gfile.makedirs(data_dir)
tf.io.gfile.makedirs(dl_dir)
t2t_problems().problem(dataset_name[len('t2t_'):]).generate_data(
data_dir, dl_dir)
else:
# Download and prepare TFDS dataset.
tfds_builder = tfds.builder(dataset_name)
tfds_builder.download_and_prepare(download_dir=dl_dir)
else:
data_dir = os.path.expanduser(data_dir)
return data_dir
def BertSingleSentenceInputs(batch, # pylint: disable=invalid-name
labeled=True,
cls_id=101,
sep_id=102):
"""Prepares inputs for BERT: add [SEP], [CLS] and create embeddings."""
if labeled:
for sent1, label in batch:
value_vector = np.concatenate(([cls_id], sent1, [sep_id]))
segment_embs = np.zeros(sent1.shape[0] + 2, dtype=np.int32)
yield value_vector, segment_embs, segment_embs, label, np.int32(1)
else:
for (sent1,) in batch: # row is a tuple with 1 element
value_vector = np.concatenate(([cls_id], sent1, [sep_id]))
segment_embs = np.zeros(sent1.shape[0] + 2, dtype=np.int32)
yield value_vector, segment_embs, segment_embs
def BertDoubleSentenceInputs(batch, # pylint: disable=invalid-name
labeled=True,
cls_id=101,
sep_id=102):
"""Prepares inputs for BERT models by adding [SEP] and [CLS] tokens and creating segment embeddings."""
if labeled:
for sent1, sent2, label in batch:
value_vector = np.concatenate(
([cls_id], sent1, [sep_id], sent2, [sep_id]))
segment_embs = np.zeros(
sent1.shape[0] + sent2.shape[0] + 3, dtype=np.int32)
second_sent_start = sent1.shape[0] + 2
segment_embs[second_sent_start:] = 1
yield value_vector, segment_embs, segment_embs, label, np.int32(1)
else:
for sent1, sent2 in batch:
value_vector = np.concatenate(
([cls_id], sent1, [sep_id], sent2, [sep_id]))
segment_embs = np.zeros(
sent1.shape[0] + sent2.shape[0] + 3, dtype=np.int32)
second_sent_start = sent1.shape[0] + 2
segment_embs[second_sent_start:] = 1
yield value_vector, segment_embs, segment_embs
def CreateBertInputs(double_sentence=True, # pylint: disable=invalid-name
labeled=True,
cls_id=101,
sep_id=102):
bert_inputs_fn = BertDoubleSentenceInputs if double_sentence else BertSingleSentenceInputs
return functools.partial(
bert_inputs_fn, labeled=labeled, cls_id=cls_id, sep_id=sep_id)
def mask_random_tokens(batch,
explicit_vocab_size=30522,
masking_prob=0.15,
cls_id=101,
sep_id=102,
mask_id=103,
vocab_start_id=999):
"""Prepares input for the masking task.
Preparation consist in masking masking_prob percentage of non-special tokens
at each input row; round(masking_prob * num_nonspecial_tokens) random tokens
are selected out of which each token is either
- replaced with [MASK] token with 80% probability,
- replaced with random token with 10% probability,
- or unchanged with 10%.
The implentation is based on
https://github.com/google-research/bert/blob/master/create_pretraining_data.py#L342
Examples:
- batch is a stream with each row having tuple (token_ids,). Function yields
rows of form (modified_token_ids, original_tokens, token_weights), where
modified_token_ids have [MASK] tokens or random tokens according to the
procedure described above.
- batch is a stream with each row having tuple (token_ids, segment_embeddings,
nsp_label, nsp_weight).Function yields rows of form (modified_token_ids,
segment_embeddings, nsp_label, nsp_weight, original_tokens, token_weights).
Args:
batch: stream of inputs. Each row in the stream is a tuple which first
element is an array of tokens
explicit_vocab_size: the total size of the vocabulary.
masking_prob: Determines percent of non-special tokens to be selected for
masking.
cls_id: id of the special CLS token.
sep_id: id of the special SEP token.
mask_id: id of the special MASK token.
vocab_start_id: id of first non-special token in the vocabulary.
Yields:
a stream with tokens masked for MLM training and 2 appended arrays:
- original tokens: a copy of original tokens used as a label for mlm
training
- token_weights: weights distributed uniformly over selected tokens (sum
is 1). Other tokens have 0 weight.
"""
for token_ids, *row_rest in batch:
original_tokens = token_ids.copy()
# choose tokens for prediction. Chooses 0.15 of
# all non-special tokens
is_special_token = np.logical_or(token_ids == cls_id,
token_ids == sep_id) # CLS and SEP tokens
is_special_token = np.logical_or(is_special_token,
token_ids == 0) # padding
viable_ids = np.arange(token_ids.shape[0])[~is_special_token]
num_to_sample = round(masking_prob * viable_ids.shape[0])
if num_to_sample == 0:
# sentence is too short to select given percentage of tokens to mask
continue
candidate_ids = np.random.choice(viable_ids, num_to_sample, replace=False)
# create weights
token_weights = np.zeros(token_ids.shape)
token_weights[candidate_ids] = 1 / candidate_ids.shape[0]
prob_scores = np.random.random(candidate_ids.shape)
# change 80 % of tokens to [MASK]
mask_token_ids = candidate_ids[prob_scores < 0.8]
token_ids[mask_token_ids] = mask_id
# change 10% of tokens to random token
random_token_ids = candidate_ids[(0.8 <= prob_scores) & (prob_scores < 0.9)]
token_ids[random_token_ids] = np.random.randint(vocab_start_id,
explicit_vocab_size,
random_token_ids.shape[0])
# rest (10%) is left unchaged
yield (token_ids, *row_rest, original_tokens, token_weights)
def BertNextSentencePredictionInputs(dataset_name, # pylint: disable=invalid-name
data_dir=None,
text_key='text',
train=True,
shuffle_size=50000):
"""Defines a stream for the next sentence prediction task."""
stream = TFDS(
dataset_name,
data_dir=data_dir,
tfds_preprocess_fn=functools.partial(
t5.data.preprocessors.next_sentence_prediction,
text_key=text_key,
label_sentences=True,
buffer_size=shuffle_size),
keys=['inputs', 'targets'],
train=train)
def split_stream(generator=None):
# split string with 'sentence1:' and 'sentence2:' into two separate strings
for text, target in stream(generator):
text_str = str(text)[:-1] # removes last '"' which is always at the end
sentences = text_str.split('sentence1: ')[1].split(' sentence2: ')
if len(sentences) != 2:
# 'sentence2:' appeared in the text and got mixed up with the label
continue
sent1, sent2 = sentences
yield sent1, sent2, target == 'next'
return split_stream
def CorpusToRandomChunks(dataset_name, num_tokens=512, train=True): # pylint: disable=invalid-name
return TFDS(
dataset_name,
tfds_preprocess_fn=functools.partial(
t5.data.preprocessors.random_split_text,
max_words_per_segment=num_tokens),
train=train,
keys=['text'])
@gin.configurable()
def get_glue_key(task_name=gin.REQUIRED):
"""Get glue key from the task name."""
ext_task_name = task_name if task_name.startswith(
'glue') else f'glue/{task_name}'
try:
glue_keys = {
'glue/cola': ('sentence',),
'glue/sst2': ('sentence',),
'glue/mrpc': ('sentence1', 'sentence2'),
'glue/qqp': ('question1', 'question2'),
'glue/stsb': ('sentence1', 'sentence2'),
'glue/mnli': ('premise', 'hypothesis'),
'glue/qnli': ('question', 'sentence'),
'glue/rte': ('sentence1', 'sentence2'),
'glue/wnli': ('sentence1', 'sentence2'),
}
return (*glue_keys[ext_task_name], 'label')
except KeyError:
raise KeyError(
f'Wrong task name entered, available glue tasks: {list(glue_keys.keys())}. Entered: {task_name}'
)
def get_glue_t5_labels(dataset_name):
"""Get glue labels for T5 from the task name."""
ext_task_name = dataset_name if dataset_name.startswith(
'glue') else f'glue/{dataset_name}'
try:
# Labels inferred from the T5 paper: https://arxiv.org/pdf/1910.10683.pdf
glue_t5_labels = {
'glue/cola': ('unacceptable', 'acceptable'),
'glue/sst2': ('negative', 'positive'),
'glue/mrpc': ('not_equivalent', 'equivalent'),
'glue/qqp': ('not_duplicate', 'duplicate'),
# Requires processing of floats
# 'glue/stsb': ('sentence1', 'sentence2'),
'glue/mnli': ('entailment', 'neutral', 'contradiction'),
'glue/qnli': ('entailment', 'not_entailment'),
'glue/rte': ('entailment', 'not_entailment'),
# Used for evaluation and for training of T5.
# As explained in Section 2.4 of https://arxiv.org/pdf/1910.10683.pdf
# it has an overlap with WSC from Super-GLUE.
# 'glue/wnli': ('sentence1', 'sentence2'),
}
return glue_t5_labels[ext_task_name]
except KeyError:
raise KeyError(
f'Wrong task name entered, available glue tasks: {list(glue_t5_labels.keys())}. Entered: {dataset_name}'
)
def get_t5_splits(dataset_name, train=True):
"""Get splits for glue tasks."""
# Splits listed in https://www.tensorflow.org/datasets/catalog/glue
glue_t5_labels = collections.defaultdict(lambda: ('train', 'validation'))
glue_t5_labels['glue/mnli'] = ('train', 'validation_matched')
if train:
return glue_t5_labels[dataset_name][0]
else:
return glue_t5_labels[dataset_name][1]
def CreateT5GlueInputs( # pylint: disable=invalid-name
dataset_name='glue/qnli',
split=None,
train=True,
label_names=('entailment', 'not_entailment')):
"""Prepares glue inputs for T5 models using standard T5 preprocessor."""
label_names = get_glue_t5_labels(dataset_name)
if not split:
split = get_t5_splits(dataset_name, train)
benchmark_name = dataset_name.split('/')[1]
dataset = tfds.load(name=dataset_name, split=split)
proc_dataset = generic_text_dataset_preprocess_fn(
dataset,
spm_path=t5.data.DEFAULT_SPM_PATH,
text_preprocess_fns=[
lambda ds, training: t5.data.preprocessors.glue( # pylint: disable=g-long-lambda
ds,
benchmark_name=benchmark_name,
label_names=label_names)
],
copy_pretokenized=True,
debug_print_examples=True,
debug_print_examples_rate=0.05)
def t5_yield_examples(generator=None):
del generator
while True:
for example in proc_dataset:
input_values = example['inputs']
target_values = example['targets']
yield (fastmath.numpy.array(input_values),
fastmath.numpy.array(target_values),
fastmath.numpy.array([1] * len(target_values)))
return t5_yield_examples
def compute_single_result(op_name, num_args):
"""An implementation of the most popular ops from the MathQA dataset."""
# See https://gitlab.cs.washington.edu/amini91/mathqa-categorization/
# and specfically line 142 and following in new_DataStructure.py
# for an implementation which covers more details.
if op_name == 'add':
return num_args[0] + num_args[1]
elif op_name == 'circle_arc':
return num_args[0] / 360 * math.pi * 2 * num_args[1]
elif op_name == 'circle_area':
return math.pi * num_args[0]**2
elif op_name == 'circle_sector_area':
return num_args[1] / 360 * math.pi * (num_args[0]**2)
elif op_name == 'circumface':
return 2 * math.pi * num_args[0]
elif op_name == 'choose':
return scipy.misc.comb(num_args[0], num_args[1])
elif op_name == 'cosine':
return math.cos(num_args[0])
elif op_name == 'cube_edge_by_volume':
return num_args[0]**(1 / 3)
elif op_name == 'combined_work':
return 1 / (
min(num_args[0], 1 / num_args[0]) + min(num_args[1], 1 / num_args[1]))
elif op_name == 'count_interval':
return num_args[0] - num_args[1] + 1
elif op_name == 'diagonal':
return math.sqrt(num_args[0]**2 + num_args[1]**2)
elif op_name == 'divide':
if num_args[1] != 0:
return num_args[0] / num_args[1]
else:
return 0
elif op_name == 'factorial':
return math.factorial(min(15, int(num_args[0])))
elif op_name == 'floor':
return math.floor(num_args[0])
elif op_name == 'find_work':
return 1 / (
max(
min(num_args[0], 1 / num_args[0]), min(
num_args[1], 1 / num_args[1])) - min(
min(num_args[0], 1 / num_args[0]),
min(num_args[1], 1 / num_args[1])))
elif op_name == 'from_percent':
return num_args[0] / 100
elif op_name == 'gain_percent':
return 100 + num_args[0]
elif op_name == 'gcd':
return scipy.gcd(int(num_args[0]), int(num_args[1]))
elif op_name == 'inverse':
if num_args[0] != 0:
return 1 / num_args[0]
else:
return 0
elif op_name == 'lcm':
return scipy.lcm(int(num_args[0]), int(num_args[1]))
elif op_name == 'log':
return math.log(max(1e-5, num_args[0]), 2)
elif op_name == 'loss_percent':
return 100 - num_args[0]
elif op_name == 'max':
return max(num_args[0], num_args[1])
elif op_name == 'multiply':
return num_args[0] * num_args[1]
elif op_name == 'negate_percent':
return 100 - num_args[0]
elif op_name == 'negate':
return -num_args[0]
elif op_name == 'original_price_before_loss':
return num_args[1] * 100 / (100 + 1e-5 - num_args[0])
elif op_name == 'original_price_before_gain':
return num_args[1] * 100 / (100 + num_args[0])
elif op_name == 'permutation':
n, m = min(num_args[0], num_args[1]), max(num_args[0], num_args[1])
return math.factorial(int(m)) / math.factorial(int(m - n))
elif op_name == 'power':
return num_args[0]**min(num_args[1], 5)
elif op_name == 'percent':
return num_args[0] / 100 * num_args[1]
elif op_name == 'price_after_gain' or op_name == 'p_after_gain':
return (1 + num_args[0] / 100) * num_args[1]
elif op_name == 'price_after_loss' or op_name == 'price_after_loss':
return (1 - num_args[0] / 100) * num_args[1]
elif op_name == 'quadrilateral_area':
return num_args[0] * (num_args[1] + num_args[2]) / 2
elif op_name == 'reminder':
return num_args[0] % num_args[1]
elif op_name == 'rectangle_area':
return num_args[0] * num_args[1]
elif op_name == 'rectangle_perimeter':
return math.sqrt(num_args[0]**2 + num_args[1]**2)
elif op_name == 'rhombus_area':
return num_args[0] * num_args[1] / 2
elif op_name == 'sine':
return math.sin(num_args[0])
elif op_name == 'speed':
return num_args[0] / num_args[1]
elif op_name == 'sqrt':
return math.sqrt(max(0, num_args[0]))
elif op_name == 'subtract':
return num_args[0] - num_args[1]
elif op_name == 'square_edge_by_perimeter':
return num_args[0] / 4
elif op_name == 'square_edge_by_area':
return math.sqrt(num_args[0])
elif op_name == 'square_area':
return num_args[0]**2
elif op_name == 'surface_cube':
return 6 * num_args[0]**2
elif op_name == 'surface_rectangular_prism':
return 2 * (
num_args[0] * num_args[1] + num_args[0] * num_args[2] +
num_args[1] * num_args[2])
elif op_name == 'semi_circle_perimiter':
return math.pi * num_args[0] + 2 * num_args[0]
elif op_name == 'square_perimeter' or op_name == 'rhombus_perimeter':
return 4 * num_args[0]
elif op_name == 'surface_sphere':
return 4 * math.pi * num_args[0]**2
elif op_name == 'speed':
return num_args[0] / num_args[1]
elif op_name == 'speed_ratio_steel_to_stream':
return (num_args[0] + num_args[1]) / (num_args[0] - num_args[1])
elif op_name == 'speed_in_still_water':
return (num_args[0] + num_args[1]) / 2
elif op_name == 'stream_speed':
return (num_args[0] - num_args[1]) / 2
elif op_name == 'trapezium_area':
return num_args[0] * (num_args[1] + num_args[2]) / 2
elif op_name == 'triangle_area':
return num_args[0] * num_args[1] / 2
elif op_name == 'triangle_perimeter':
return num_args[0] + num_args[1] + num_args[2]
elif op_name == 'triangle_area_three_edges':
# Heron's formula
s = (num_args[0] + num_args[1] + num_args[2]) / 2
return math.sqrt(
max(0,
s * (s - num_args[0]) * (s - num_args[1]) * (s - num_args[2])))
elif op_name == 'union_prob':
return num_args[0] + num_args[1] - num_args[0]
elif op_name == 'negate_prob':
return 1 - num_args[0]
elif op_name == 'volume_cube':
return num_args[0]**3
elif op_name == 'volume_cone':
return math.pi * num_args[0]**2 * num_args[1] / 3
elif op_name == 'volume_cylinder':
return math.pi * num_args[0]**2 * num_args[1]
elif op_name == 'volume_rectangular_prism':
return num_args[0] * num_args[1] * num_args[2]
elif op_name == 'volume_sphere':
return 4 / 3 * math.pi * num_args[0]**3
def compute_result(list_op, list_num):
"""Python execution of MathQA ops."""
# The last of temporary results is the final answer.
temporary_results = []
for op in list_op:
op_name = op.split('(')[0]
start_bracket = op.find('(')
end_bracket = op.find(')')
op_args = op[start_bracket + 1:end_bracket].split(',')
num_args = []
for arg in op_args:
# The hash stands for a number stored in temporary_results.
# For example #2 refers to the third temporary result.
if arg[0] == '#':
temp_index = int(
re.findall(r'[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?',
arg)[0])
num_args.append(temporary_results[temp_index])
# The n prefix stands for numbers which listed in list_num -
# originally they were contained in the text.
elif arg[0] == 'n':
n_index = int(
re.findall(r'[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?',
arg)[0])
num_args.append(list_num[n_index])
elif arg[0] == 'c':
if arg == 'const_pi':
constant = math.pi
elif arg == 'const_deg_to_rad':
constant = math.pi / 180
else:
consts = re.findall(
r'[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?', arg)
if len(consts) == 1:
constant = float(consts[0])
else:
constant1 = float(consts[0])
constant2 = float('0.' + consts[1])
constant = constant1 + constant2
num_args.append(constant)
temporary_results.append(compute_single_result(op_name, num_args))
return temporary_results
def process_single_mathqa_example(example):
"""Execute a single example and verify coherence of a MathQA problem.
Args:
example: a dictionary with the following fields: Problem - a natural
language formulation of the problem Rationale - a natural language
solution of the problem options - five possible answers ( a) b) c) d) and
e) ) correct - the letter representing the correct answer
annotated_formula - formula representing the full solution linear_formula
- a string of operations separated by the | character, e.g.
multiply(n2,const_100)|multiply(n0,n1)|divide(#0,#1)|
multiply(#2,const_100)|divide(#3,#1)| category - a natural language
description of the category to which a given problem belongs.
Returns:
answer_num: numerical answer contained in the example
python_result: numerical answers computed in Python, including intermediate
results. The answer_num should be close python_result[-1]
list_op: list of arithmetic operations
list_num: list of identified numbers in the text
"""
question = example['Problem']
# The funny looking replace is needed to deal with numbers such as 4,000
# TODO(henrykm) deal with numbers written as words "one", "two", ...
list_num = [
float(num.replace(',', '')) for num in re.findall(
r'[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?', question)
]
list_op = example['linear_formula'].split('|')
answers = example['options']
correct_answer = example['correct']
index = answers.find('{} )'.format(correct_answer))
answer_string = re.findall(
r'[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?', answers[index:])
# The if statement deals with empty lists - they are needed to treat
# a correct non-numerical answer e) None of the above. Here we do not want
# non-numerical answers, hence we return None.
if answer_string:
answer_num = float(
re.findall(r'[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?',
answers[index:])[0].replace(',', ''))
else:
return None
# The if statements below deals with answers written as fractions e.g.
# a ) 1 / 2 , b ) 1 / 3 , c ) 1 / 5 , d ) 10 / 30 , e ) 2 / 5 ?
index_end_of_answer = index + len(str(answer_num)) + 3
if index_end_of_answer < len(answers) and answers[index_end_of_answer] == '/':
answer_denom = float(
re.findall(r'[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?',
answers[index_end_of_answer:])[0].replace(',', ''))
answer_num /= answer_denom
# In some cases the list of operations contains a superflous last element,
# namely an empty string.
if not list_op[-1]:
list_op = list_op[:-1]
python_result = compute_result(list_op, list_num)
return answer_num, python_result, list_op, list_num
def CreateMathQAInputs( # pylint: disable=invalid-name
dataset_path=None,
train=True,
tolerance=0.01,
cumulative=True):
"""Prepares MathQA inputs.
The generation procedure leaves a lot parameters to be set by the user.
Currently we support only correct examples in the following sense:
python execution agrees with the declared answer up to 1%.
According to this criterion wrong examples such as
problem: calculate 85184 ÷ ? = 352
operations ['multiply(n0,n1)']
are ignored (this should be divide(n0,n1) in this case).
Args:
dataset_path: a path with the MathQA dataset
train: if True, then generate training examples, otherwhise generate
validation examples (the dataset has also a test set)
tolerance: if for a given example relative difference between Python result
and the result declared in the dataset exceeds the level, then the example
is dropped; tolerances ranging from 0.1 to 0.001 yield from 18K to 21K
examples.
cumulative: if set to True, then generate examples in the format input -
problem + numbers + op1 + op2 + op3 target - op4 If set to False, then
examples are in the format input - problem + numbers target - all
operations
Returns:
mathqa_yield_examples: a generator of MathQA examples; the generator yields
non-tokenized examples - they can be further processed using for example
the tokenize function from this module
tokenize(mathqa_yield_examples, keys = [0, 1], vocab_file='en_32k.subword')
"""
if train:
dataset_path = os.path.join(dataset_path, 'train.json')
else:
dataset_path = os.path.join(dataset_path, 'valid.json')
# Opening with GFile allows to use remotely stored files, e.g.
# in a gs bucket.
dataset_handle = tf.io.gfile.GFile(dataset_path, 'r')
dataset = json.load(dataset_handle)
def mathqa_yield_examples(generator=None):
del generator
while True:
for example in dataset:
answer_num, python_result, list_op, list_num = process_single_mathqa_example(
example)
if math.isclose(answer_num, python_result[-1], rel_tol=tolerance):
input_prefix = example['Problem'] + ' '.join(list_num)
if cumulative:
for op in list_op:
input_values = input_prefix
target_values = op
input_prefix += ' ' + op
yield input_values, target_values, [1] * len(target_values)
else:
input_values = input_prefix
target_values = example['linear_formula']
yield input_values, target_values, [1] * len(target_values)
return mathqa_yield_examples
| [
"tensorflow.compat.v1.gfile.GFile",
"tensorflow.shape",
"math.floor",
"numpy.int32",
"math.sqrt",
"absl.logging.info",
"math.cos",
"numpy.array",
"gin.configurable",
"trax.data.text_encoder.ByteTextEncoder",
"tensorflow.ones_like",
"tensorflow_text.SentencepieceTokenizer",
"tensorflow.cast",... | [((1538, 1556), 'gin.configurable', 'gin.configurable', ([], {}), '()\n', (1554, 1556), False, 'import gin\n'), ((9460, 9478), 'gin.configurable', 'gin.configurable', ([], {}), '()\n', (9476, 9478), False, 'import gin\n'), ((14550, 14568), 'gin.configurable', 'gin.configurable', ([], {}), '()\n', (14566, 14568), False, 'import gin\n'), ((20506, 20556), 'gin.configurable', 'gin.configurable', ([], {'denylist': "['dataset', 'training']"}), "(denylist=['dataset', 'training'])\n", (20522, 20556), False, 'import gin\n'), ((21319, 21369), 'gin.configurable', 'gin.configurable', ([], {'denylist': "['dataset', 'training']"}), "(denylist=['dataset', 'training'])\n", (21335, 21369), False, 'import gin\n'), ((21864, 21914), 'gin.configurable', 'gin.configurable', ([], {'denylist': "['dataset', 'training']"}), "(denylist=['dataset', 'training'])\n", (21880, 21914), False, 'import gin\n'), ((23072, 23122), 'gin.configurable', 'gin.configurable', ([], {'denylist': "['dataset', 'training']"}), "(denylist=['dataset', 'training'])\n", (23088, 23122), False, 'import gin\n'), ((23715, 23765), 'gin.configurable', 'gin.configurable', ([], {'denylist': "['dataset', 'training']"}), "(denylist=['dataset', 'training'])\n", (23731, 23765), False, 'import gin\n'), ((24094, 24144), 'gin.configurable', 'gin.configurable', ([], {'denylist': "['dataset', 'training']"}), "(denylist=['dataset', 'training'])\n", (24110, 24144), False, 'import gin\n'), ((24882, 24932), 'gin.configurable', 'gin.configurable', ([], {'denylist': "['dataset', 'training']"}), "(denylist=['dataset', 'training'])\n", (24898, 24932), False, 'import gin\n'), ((25593, 25643), 'gin.configurable', 'gin.configurable', ([], {'denylist': "['dataset', 'training']"}), "(denylist=['dataset', 'training'])\n", (25609, 25643), False, 'import gin\n'), ((26297, 26347), 'gin.configurable', 'gin.configurable', ([], {'denylist': "['dataset', 'training']"}), "(denylist=['dataset', 'training'])\n", (26313, 26347), False, 'import gin\n'), ((26900, 26938), 'gin.configurable', 'gin.configurable', ([], {'denylist': "['hparams']"}), "(denylist=['hparams'])\n", (26916, 26938), False, 'import gin\n'), ((27319, 27369), 'gin.configurable', 'gin.configurable', ([], {'denylist': "['dataset', 'training']"}), "(denylist=['dataset', 'training'])\n", (27335, 27369), False, 'import gin\n'), ((28201, 28251), 'gin.configurable', 'gin.configurable', ([], {'denylist': "['dataset', 'training']"}), "(denylist=['dataset', 'training'])\n", (28217, 28251), False, 'import gin\n'), ((29562, 29612), 'gin.configurable', 'gin.configurable', ([], {'denylist': "['dataset', 'training']"}), "(denylist=['dataset', 'training'])\n", (29578, 29612), False, 'import gin\n'), ((31522, 31572), 'gin.configurable', 'gin.configurable', ([], {'denylist': "['dataset', 'training']"}), "(denylist=['dataset', 'training'])\n", (31538, 31572), False, 'import gin\n'), ((32900, 32950), 'gin.configurable', 'gin.configurable', ([], {'denylist': "['dataset', 'training']"}), "(denylist=['dataset', 'training'])\n", (32916, 32950), False, 'import gin\n'), ((33980, 34030), 'gin.configurable', 'gin.configurable', ([], {'denylist': "['dataset', 'training']"}), "(denylist=['dataset', 'training'])\n", (33996, 34030), False, 'import gin\n'), ((34577, 34627), 'gin.configurable', 'gin.configurable', ([], {'denylist': "['dataset', 'training']"}), "(denylist=['dataset', 'training'])\n", (34593, 34627), False, 'import gin\n'), ((35152, 35202), 'gin.configurable', 'gin.configurable', ([], {'denylist': "['dataset', 'training']"}), "(denylist=['dataset', 'training'])\n", (35168, 35202), False, 'import gin\n'), ((48160, 48178), 'gin.configurable', 'gin.configurable', ([], {}), '()\n', (48176, 48178), False, 'import gin\n'), ((3587, 3621), 'trax.fastmath.dataset_as_numpy', 'fastmath.dataset_as_numpy', (['dataset'], {}), '(dataset)\n', (3612, 3621), False, 'from trax import fastmath\n'), ((8161, 8206), 'tensorflow_datasets.builder', 'tfds.builder', (['dataset_name'], {'data_dir': 'data_dir'}), '(dataset_name, data_dir=data_dir)\n', (8173, 8206), True, 'import tensorflow_datasets as tfds\n'), ((9060, 9165), 'tensorflow_datasets.load', 'tfds.load', ([], {'name': 'dataset_name', 'split': 'train_split', 'data_dir': 'data_dir', 'shuffle_files': 'train_shuffle_files'}), '(name=dataset_name, split=train_split, data_dir=data_dir,\n shuffle_files=train_shuffle_files)\n', (9069, 9165), True, 'import tensorflow_datasets as tfds\n'), ((9197, 9300), 'tensorflow_datasets.load', 'tfds.load', ([], {'name': 'dataset_name', 'split': 'eval_split', 'data_dir': 'data_dir', 'shuffle_files': 'eval_shuffle_files'}), '(name=dataset_name, split=eval_split, data_dir=data_dir,\n shuffle_files=eval_shuffle_files)\n', (9206, 9300), True, 'import tensorflow_datasets as tfds\n'), ((19951, 19986), 'os.path.join', 'os.path.join', (['vocab_dir', 'vocab_file'], {}), '(vocab_dir, vocab_file)\n', (19963, 19986), False, 'import os\n'), ((21074, 21121), 'tensorflow.image.resize_with_crop_or_pad', 'tf.image.resize_with_crop_or_pad', (['image', '(40)', '(40)'], {}), '(image, 40, 40)\n', (21106, 21121), True, 'import tensorflow as tf\n'), ((21132, 21172), 'tensorflow.image.random_crop', 'tf.image.random_crop', (['image', '[32, 32, 3]'], {}), '(image, [32, 32, 3])\n', (21152, 21172), True, 'import tensorflow as tf\n'), ((21183, 21221), 'tensorflow.image.random_flip_left_right', 'tf.image.random_flip_left_right', (['image'], {}), '(image)\n', (21214, 21221), True, 'import tensorflow as tf\n'), ((42996, 43081), 'functools.partial', 'functools.partial', (['bert_inputs_fn'], {'labeled': 'labeled', 'cls_id': 'cls_id', 'sep_id': 'sep_id'}), '(bert_inputs_fn, labeled=labeled, cls_id=cls_id, sep_id=sep_id\n )\n', (43013, 43081), False, 'import functools\n'), ((50378, 50435), 'collections.defaultdict', 'collections.defaultdict', (["(lambda : ('train', 'validation'))"], {}), "(lambda : ('train', 'validation'))\n", (50401, 50435), False, 'import collections\n'), ((51022, 51063), 'tensorflow_datasets.load', 'tfds.load', ([], {'name': 'dataset_name', 'split': 'split'}), '(name=dataset_name, split=split)\n', (51031, 51063), True, 'import tensorflow_datasets as tfds\n'), ((61214, 61307), 're.findall', 're.findall', (['"""[-+]?[.]?[\\\\d]+(?:,\\\\d\\\\d\\\\d)*[\\\\.]?\\\\d*(?:[eE][-+]?\\\\d+)?"""', 'answers[index:]'], {}), "('[-+]?[.]?[\\\\d]+(?:,\\\\d\\\\d\\\\d)*[\\\\.]?\\\\d*(?:[eE][-+]?\\\\d+)?',\n answers[index:])\n", (61224, 61307), False, 'import re\n'), ((64229, 64265), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['dataset_path', '"""r"""'], {}), "(dataset_path, 'r')\n", (64246, 64265), True, 'import tensorflow as tf\n'), ((64278, 64303), 'json.load', 'json.load', (['dataset_handle'], {}), '(dataset_handle)\n', (64287, 64303), False, 'import json\n'), ((10267, 10301), 'trax.fastmath.dataset_as_numpy', 'fastmath.dataset_as_numpy', (['dataset'], {}), '(dataset)\n', (10292, 10301), False, 'from trax import fastmath\n'), ((10640, 10661), 'tensorflow.nest.flatten', 'tf.nest.flatten', (['item'], {}), '(item)\n', (10655, 10661), True, 'import tensorflow as tf\n'), ((10965, 10983), 'tensorflow.device', 'tf.device', (['"""cpu:0"""'], {}), "('cpu:0')\n", (10974, 10983), True, 'import tensorflow as tf\n'), ((16264, 16275), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (16272, 16275), True, 'import numpy as np\n'), ((19842, 19890), 'trax.data.text_encoder.ByteTextEncoder', 'text_encoder.ByteTextEncoder', ([], {'num_reserved_ids': '(0)'}), '(num_reserved_ids=0)\n', (19870, 19890), False, 'from trax.data import text_encoder\n'), ((20029, 20066), 'trax.data.text_encoder.SubwordTextEncoder', 'text_encoder.SubwordTextEncoder', (['path'], {}), '(path)\n', (20060, 20066), False, 'from trax.data import text_encoder\n'), ((20106, 20157), 'trax.data.text_encoder.BertEncoder', 'text_encoder.BertEncoder', (['path'], {'do_lower_case': '(False)'}), '(path, do_lower_case=False)\n', (20130, 20157), False, 'from trax.data import text_encoder\n'), ((20207, 20257), 'trax.data.text_encoder.BertEncoder', 'text_encoder.BertEncoder', (['path'], {'do_lower_case': '(True)'}), '(path, do_lower_case=True)\n', (20231, 20257), False, 'from trax.data import text_encoder\n'), ((22460, 22491), 'tensorflow.expand_dims', 'tf.expand_dims', (['targets'], {'axis': '(0)'}), '(targets, axis=0)\n', (22474, 22491), True, 'import tensorflow as tf\n'), ((22515, 22545), 'tensorflow.concat', 'tf.concat', (['[flat, tgt]'], {'axis': '(0)'}), '([flat, tgt], axis=0)\n', (22524, 22545), True, 'import tensorflow as tf\n'), ((22705, 22723), 'tensorflow.ones_like', 'tf.ones_like', (['flat'], {}), '(flat)\n', (22717, 22723), True, 'import tensorflow as tf\n'), ((22877, 22918), 'tensorflow.concat', 'tf.concat', (['[mask_begin, mask_end]'], {'axis': '(0)'}), '([mask_begin, mask_end], axis=0)\n', (22886, 22918), True, 'import tensorflow as tf\n'), ((23414, 23457), 'tensorflow.concat', 'tf.concat', (['[pad, inp, pad, targets]'], {'axis': '(0)'}), '([pad, inp, pad, targets], axis=0)\n', (23423, 23457), True, 'import tensorflow as tf\n'), ((25211, 25237), 'tensorflow.less', 'tf.less', (['l', '(max_length + 1)'], {}), '(l, max_length + 1)\n', (25218, 25237), True, 'import tensorflow as tf\n'), ((25364, 25395), 'tensorflow.less', 'tf.less', (['l', '(max_eval_length + 1)'], {}), '(l, max_eval_length + 1)\n', (25371, 25395), True, 'import tensorflow as tf\n'), ((26025, 26063), 'tensorflow.concat', 'tf.concat', (['[inp, pad, targets]'], {'axis': '(0)'}), '([inp, pad, targets], axis=0)\n', (26034, 26063), True, 'import tensorflow as tf\n'), ((26631, 26669), 'tensorflow.concat', 'tf.concat', (['[inp, pad, targets]'], {'axis': '(0)'}), '([inp, pad, targets], axis=0)\n', (26640, 26669), True, 'import tensorflow as tf\n'), ((27684, 27717), 'tensorflow.concat', 'tf.concat', (['[inp, targets]'], {'axis': '(0)'}), '([inp, targets], axis=0)\n', (27693, 27717), True, 'import tensorflow as tf\n'), ((27805, 27830), 'tensorflow.reshape', 'tf.reshape', (['concat', '(-1,)'], {}), '(concat, (-1,))\n', (27815, 27830), True, 'import tensorflow as tf\n'), ((27842, 27865), 'tensorflow.reshape', 'tf.reshape', (['mask', '(-1,)'], {}), '(mask, (-1,))\n', (27852, 27865), True, 'import tensorflow as tf\n'), ((27879, 27904), 'tensorflow.cast', 'tf.cast', (['concat', 'tf.int32'], {}), '(concat, tf.int32)\n', (27886, 27904), True, 'import tensorflow as tf\n'), ((27916, 27941), 'tensorflow.cast', 'tf.cast', (['mask', 'tf.float32'], {}), '(mask, tf.float32)\n', (27923, 27941), True, 'import tensorflow as tf\n'), ((28543, 28595), 'tensorflow.strings.unicode_decode', 'tf.strings.unicode_decode', (["features['text']", '"""UTF-8"""'], {}), "(features['text'], 'UTF-8')\n", (28568, 28595), True, 'import tensorflow as tf\n'), ((28610, 28636), 'tensorflow.cast', 'tf.cast', (['targets', 'tf.int64'], {}), '(targets, tf.int64)\n', (28617, 28636), True, 'import tensorflow as tf\n'), ((28886, 28919), 'tensorflow.cast', 'tf.cast', (['tokenized_text', 'tf.int64'], {}), '(tokenized_text, tf.int64)\n', (28893, 28919), True, 'import tensorflow as tf\n'), ((29186, 29233), 'tensorflow_text.SentencepieceTokenizer', 'tf_text.SentencepieceTokenizer', ([], {'model': 'spc_model'}), '(model=spc_model)\n', (29216, 29233), True, 'import tensorflow_text as tf_text\n'), ((39571, 39604), 'functools.partial', 'functools.partial', (['f'], {}), '(f, **fn_kwargs)\n', (39588, 39604), False, 'import functools\n'), ((39979, 40023), 'os.path.expanduser', 'os.path.expanduser', (['"""~/tensorflow_datasets/"""'], {}), "('~/tensorflow_datasets/')\n", (39997, 40023), False, 'import os\n'), ((40037, 40071), 'os.path.join', 'os.path.join', (['data_dir', '"""download"""'], {}), "(data_dir, 'download')\n", (40049, 40071), False, 'import os\n'), ((40076, 40285), 'absl.logging.info', 'logging.info', (['"""No dataset directory provided. Downloading and generating dataset for %s inside data directory %s For large datasets it is better to prepare datasets manually!"""', 'dataset_name', 'data_dir'], {}), "(\n 'No dataset directory provided. Downloading and generating dataset for %s inside data directory %s For large datasets it is better to prepare datasets manually!'\n , dataset_name, data_dir)\n", (40088, 40285), False, 'from absl import logging\n'), ((40826, 40854), 'os.path.expanduser', 'os.path.expanduser', (['data_dir'], {}), '(data_dir)\n', (40844, 40854), False, 'import os\n'), ((45308, 45363), 'numpy.logical_or', 'np.logical_or', (['(token_ids == cls_id)', '(token_ids == sep_id)'], {}), '(token_ids == cls_id, token_ids == sep_id)\n', (45321, 45363), True, 'import numpy as np\n'), ((45446, 45493), 'numpy.logical_or', 'np.logical_or', (['is_special_token', '(token_ids == 0)'], {}), '(is_special_token, token_ids == 0)\n', (45459, 45493), True, 'import numpy as np\n'), ((45807, 45865), 'numpy.random.choice', 'np.random.choice', (['viable_ids', 'num_to_sample'], {'replace': '(False)'}), '(viable_ids, num_to_sample, replace=False)\n', (45823, 45865), True, 'import numpy as np\n'), ((45908, 45933), 'numpy.zeros', 'np.zeros', (['token_ids.shape'], {}), '(token_ids.shape)\n', (45916, 45933), True, 'import numpy as np\n'), ((46015, 46052), 'numpy.random.random', 'np.random.random', (['candidate_ids.shape'], {}), '(candidate_ids.shape)\n', (46031, 46052), True, 'import numpy as np\n'), ((46345, 46431), 'numpy.random.randint', 'np.random.randint', (['vocab_start_id', 'explicit_vocab_size', 'random_token_ids.shape[0]'], {}), '(vocab_start_id, explicit_vocab_size, random_token_ids.\n shape[0])\n', (46362, 46431), True, 'import numpy as np\n'), ((64016, 64056), 'os.path.join', 'os.path.join', (['dataset_path', '"""train.json"""'], {}), "(dataset_path, 'train.json')\n", (64028, 64056), False, 'import os\n'), ((64084, 64124), 'os.path.join', 'os.path.join', (['dataset_path', '"""valid.json"""'], {}), "(dataset_path, 'valid.json')\n", (64096, 64124), False, 'import os\n'), ((6364, 6401), 'random.randint', 'random.randint', (['(0)', '_MAX_SKIP_EXAMPLES'], {}), '(0, _MAX_SKIP_EXAMPLES)\n', (6378, 6401), False, 'import random\n'), ((10711, 10747), 'tensorflow.nest.pack_sequence_as', 'tf.nest.pack_sequence_as', (['item', 'flat'], {}), '(item, flat)\n', (10735, 10747), True, 'import tensorflow as tf\n'), ((20693, 20731), 'tensorflow.cast', 'tf.cast', (["features['image']", 'tf.float32'], {}), "(features['image'], tf.float32)\n", (20700, 20731), True, 'import tensorflow as tf\n'), ((21681, 21719), 'tensorflow.cast', 'tf.cast', (["features['image']", 'tf.float32'], {}), "(features['image'], tf.float32)\n", (21688, 21719), True, 'import tensorflow as tf\n'), ((22417, 22438), 'tensorflow.reshape', 'tf.reshape', (['img', '[-1]'], {}), '(img, [-1])\n', (22427, 22438), True, 'import tensorflow as tf\n'), ((22741, 22772), 'tensorflow.cast', 'tf.cast', (['mask_begin', 'tf.float32'], {}), '(mask_begin, tf.float32)\n', (22748, 22772), True, 'import tensorflow as tf\n'), ((22819, 22836), 'tensorflow.ones_like', 'tf.ones_like', (['tgt'], {}), '(tgt)\n', (22831, 22836), True, 'import tensorflow as tf\n'), ((23982, 24010), 'tensorflow.squeeze', 'tf.squeeze', (['targets'], {'axis': '(-1)'}), '(targets, axis=-1)\n', (23992, 24010), True, 'import tensorflow as tf\n'), ((25981, 26002), 'tensorflow.zeros_like', 'tf.zeros_like', (['inp[0]'], {}), '(inp[0])\n', (25994, 26002), True, 'import tensorflow as tf\n'), ((26587, 26608), 'tensorflow.zeros_like', 'tf.zeros_like', (['inp[0]'], {}), '(inp[0])\n', (26600, 26608), True, 'import tensorflow as tf\n'), ((29096, 29136), 'tensorflow.compat.v1.gfile.GFile', 'tf.compat.v1.gfile.GFile', (['spm_path', '"""rb"""'], {}), "(spm_path, 'rb')\n", (29120, 29136), True, 'import tensorflow as tf\n'), ((29260, 29302), 'functools.partial', 'functools.partial', (['spc_tokenize', 'tokenizer'], {}), '(spc_tokenize, tokenizer)\n', (29277, 29302), False, 'import functools\n'), ((34295, 34311), 'tensorflow.shape', 'tf.shape', (['x[key]'], {}), '(x[key])\n', (34303, 34311), True, 'import tensorflow as tf\n'), ((35059, 35104), 'tensorflow.concat', 'tf.concat', (['[x[output_feature], [eos]]'], {'axis': '(0)'}), '([x[output_feature], [eos]], axis=0)\n', (35068, 35104), True, 'import tensorflow as tf\n'), ((40432, 40468), 'os.path.join', 'os.path.join', (['data_dir', 'dataset_name'], {}), '(data_dir, dataset_name)\n', (40444, 40468), False, 'import os\n'), ((40475, 40505), 'tensorflow.io.gfile.makedirs', 'tf.io.gfile.makedirs', (['data_dir'], {}), '(data_dir)\n', (40495, 40505), True, 'import tensorflow as tf\n'), ((40512, 40540), 'tensorflow.io.gfile.makedirs', 'tf.io.gfile.makedirs', (['dl_dir'], {}), '(dl_dir)\n', (40532, 40540), True, 'import tensorflow as tf\n'), ((40715, 40741), 'tensorflow_datasets.builder', 'tfds.builder', (['dataset_name'], {}), '(dataset_name)\n', (40727, 40741), True, 'import tensorflow_datasets as tfds\n'), ((41209, 41252), 'numpy.concatenate', 'np.concatenate', (['([cls_id], sent1, [sep_id])'], {}), '(([cls_id], sent1, [sep_id]))\n', (41223, 41252), True, 'import numpy as np\n'), ((41274, 41318), 'numpy.zeros', 'np.zeros', (['(sent1.shape[0] + 2)'], {'dtype': 'np.int32'}), '(sent1.shape[0] + 2, dtype=np.int32)\n', (41282, 41318), True, 'import numpy as np\n'), ((41481, 41524), 'numpy.concatenate', 'np.concatenate', (['([cls_id], sent1, [sep_id])'], {}), '(([cls_id], sent1, [sep_id]))\n', (41495, 41524), True, 'import numpy as np\n'), ((41546, 41590), 'numpy.zeros', 'np.zeros', (['(sent1.shape[0] + 2)'], {'dtype': 'np.int32'}), '(sent1.shape[0] + 2, dtype=np.int32)\n', (41554, 41590), True, 'import numpy as np\n'), ((42019, 42079), 'numpy.concatenate', 'np.concatenate', (['([cls_id], sent1, [sep_id], sent2, [sep_id])'], {}), '(([cls_id], sent1, [sep_id], sent2, [sep_id]))\n', (42033, 42079), True, 'import numpy as np\n'), ((42113, 42174), 'numpy.zeros', 'np.zeros', (['(sent1.shape[0] + sent2.shape[0] + 3)'], {'dtype': 'np.int32'}), '(sent1.shape[0] + sent2.shape[0] + 3, dtype=np.int32)\n', (42121, 42174), True, 'import numpy as np\n'), ((42407, 42467), 'numpy.concatenate', 'np.concatenate', (['([cls_id], sent1, [sep_id], sent2, [sep_id])'], {}), '(([cls_id], sent1, [sep_id], sent2, [sep_id]))\n', (42421, 42467), True, 'import numpy as np\n'), ((42501, 42562), 'numpy.zeros', 'np.zeros', (['(sent1.shape[0] + sent2.shape[0] + 3)'], {'dtype': 'np.int32'}), '(sent1.shape[0] + sent2.shape[0] + 3, dtype=np.int32)\n', (42509, 42562), True, 'import numpy as np\n'), ((45559, 45588), 'numpy.arange', 'np.arange', (['token_ids.shape[0]'], {}), '(token_ids.shape[0])\n', (45568, 45588), True, 'import numpy as np\n'), ((47080, 47217), 'functools.partial', 'functools.partial', (['t5.data.preprocessors.next_sentence_prediction'], {'text_key': 'text_key', 'label_sentences': '(True)', 'buffer_size': 'shuffle_size'}), '(t5.data.preprocessors.next_sentence_prediction, text_key=\n text_key, label_sentences=True, buffer_size=shuffle_size)\n', (47097, 47217), False, 'import functools\n'), ((48002, 48098), 'functools.partial', 'functools.partial', (['t5.data.preprocessors.random_split_text'], {'max_words_per_segment': 'num_tokens'}), '(t5.data.preprocessors.random_split_text,\n max_words_per_segment=num_tokens)\n', (48019, 48098), False, 'import functools\n'), ((60932, 61018), 're.findall', 're.findall', (['"""[-+]?[.]?[\\\\d]+(?:,\\\\d\\\\d\\\\d)*[\\\\.]?\\\\d*(?:[eE][-+]?\\\\d+)?"""', 'question'], {}), "('[-+]?[.]?[\\\\d]+(?:,\\\\d\\\\d\\\\d)*[\\\\.]?\\\\d*(?:[eE][-+]?\\\\d+)?',\n question)\n", (60942, 61018), False, 'import re\n'), ((13832, 13895), 'absl.logging.info', 'logging.info', (['"""Tokenize Example[%d] is %r"""', 'debug_count', 'output'], {}), "('Tokenize Example[%d] is %r', debug_count, output)\n", (13844, 13895), False, 'from absl import logging\n'), ((23357, 23378), 'tensorflow.zeros_like', 'tf.zeros_like', (['inp[0]'], {}), '(inp[0])\n', (23370, 23378), True, 'import tensorflow as tf\n'), ((24429, 24445), 'tensorflow.shape', 'tf.shape', (['target'], {}), '(target)\n', (24437, 24445), True, 'import tensorflow as tf\n'), ((24536, 24552), 'tensorflow.shape', 'tf.shape', (['target'], {}), '(target)\n', (24544, 24552), True, 'import tensorflow as tf\n'), ((25147, 25174), 'tensorflow.shape', 'tf.shape', (["example['inputs']"], {}), "(example['inputs'])\n", (25155, 25174), True, 'import tensorflow as tf\n'), ((25179, 25195), 'tensorflow.shape', 'tf.shape', (['target'], {}), '(target)\n', (25187, 25195), True, 'import tensorflow as tf\n'), ((25300, 25327), 'tensorflow.shape', 'tf.shape', (["example['inputs']"], {}), "(example['inputs'])\n", (25308, 25327), True, 'import tensorflow as tf\n'), ((25332, 25348), 'tensorflow.shape', 'tf.shape', (['target'], {}), '(target)\n', (25340, 25348), True, 'import tensorflow as tf\n'), ((26086, 26104), 'tensorflow.zeros_like', 'tf.zeros_like', (['inp'], {}), '(inp)\n', (26099, 26104), True, 'import tensorflow as tf\n'), ((26111, 26132), 'tensorflow.ones_like', 'tf.ones_like', (['targets'], {}), '(targets)\n', (26123, 26132), True, 'import tensorflow as tf\n'), ((26692, 26710), 'tensorflow.zeros_like', 'tf.zeros_like', (['inp'], {}), '(inp)\n', (26705, 26710), True, 'import tensorflow as tf\n'), ((26717, 26738), 'tensorflow.ones_like', 'tf.ones_like', (['targets'], {}), '(targets)\n', (26729, 26738), True, 'import tensorflow as tf\n'), ((27740, 27758), 'tensorflow.zeros_like', 'tf.zeros_like', (['inp'], {}), '(inp)\n', (27753, 27758), True, 'import tensorflow as tf\n'), ((27760, 27781), 'tensorflow.ones_like', 'tf.ones_like', (['targets'], {}), '(targets)\n', (27772, 27781), True, 'import tensorflow as tf\n'), ((29418, 29434), 'tensorflow.shape', 'tf.shape', (['target'], {}), '(target)\n', (29426, 29434), True, 'import tensorflow as tf\n'), ((32661, 32677), 'tensorflow.shape', 'tf.shape', (['x[key]'], {}), '(x[key])\n', (32669, 32677), True, 'import tensorflow as tf\n'), ((33839, 33855), 'tensorflow.shape', 'tf.shape', (['x[key]'], {}), '(x[key])\n', (33847, 33855), True, 'import tensorflow as tf\n'), ((34440, 34479), 'tensorflow.zeros', 'tf.zeros', (['pad_shape'], {'dtype': 'x[key].dtype'}), '(pad_shape, dtype=x[key].dtype)\n', (34448, 34479), True, 'import tensorflow as tf\n'), ((34497, 34526), 'tensorflow.concat', 'tf.concat', (['[x[key], zeros]', '(0)'], {}), '([x[key], zeros], 0)\n', (34506, 34526), True, 'import tensorflow as tf\n'), ((37379, 37398), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (37396, 37398), True, 'import numpy as np\n'), ((37436, 37475), 'tensorflow.print', 'tf.print', (['x'], {'output_stream': 'logging.info'}), '(x, output_stream=logging.info)\n', (37444, 37475), True, 'import tensorflow as tf\n'), ((38228, 38247), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (38245, 38247), True, 'import numpy as np\n'), ((64532, 64594), 'math.isclose', 'math.isclose', (['answer_num', 'python_result[-1]'], {'rel_tol': 'tolerance'}), '(answer_num, python_result[-1], rel_tol=tolerance)\n', (64544, 64594), False, 'import math\n'), ((14232, 14300), 'absl.logging.info', 'logging.info', (['"""Tokenize Example[%d] is %r"""', 'debug_count', 'new_example'], {}), "('Tokenize Example[%d] is %r', debug_count, new_example)\n", (14244, 14300), False, 'from absl import logging\n'), ((14464, 14527), 'absl.logging.info', 'logging.info', (['"""Tokenize Example[%d] is %r"""', 'debug_count', 'output'], {}), "('Tokenize Example[%d] is %r', debug_count, output)\n", (14476, 14527), False, 'from absl import logging\n'), ((17492, 17546), 'absl.logging.info', 'logging.info', (['"""Example[%d] is %r"""', 'debug_count', 'output'], {}), "('Example[%d] is %r', debug_count, output)\n", (17504, 17546), False, 'from absl import logging\n'), ((41380, 41391), 'numpy.int32', 'np.int32', (['(1)'], {}), '(1)\n', (41388, 41391), True, 'import numpy as np\n'), ((42335, 42346), 'numpy.int32', 'np.int32', (['(1)'], {}), '(1)\n', (42343, 42346), True, 'import numpy as np\n'), ((17875, 17934), 'absl.logging.info', 'logging.info', (['"""Example[%d] is %r"""', 'debug_count', 'new_example'], {}), "('Example[%d] is %r', debug_count, new_example)\n", (17887, 17934), False, 'from absl import logging\n'), ((18080, 18134), 'absl.logging.info', 'logging.info', (['"""Example[%d] is %r"""', 'debug_count', 'output'], {}), "('Example[%d] is %r', debug_count, output)\n", (18092, 18134), False, 'from absl import logging\n'), ((38341, 38361), 'tensorflow.size', 'tf.size', (["x['inputs']"], {}), "(x['inputs'])\n", (38348, 38361), True, 'import tensorflow as tf\n'), ((38396, 38417), 'tensorflow.size', 'tf.size', (["x['targets']"], {}), "(x['targets'])\n", (38403, 38417), True, 'import tensorflow as tf\n'), ((51715, 51749), 'trax.fastmath.numpy.array', 'fastmath.numpy.array', (['input_values'], {}), '(input_values)\n', (51735, 51749), False, 'from trax import fastmath\n'), ((51766, 51801), 'trax.fastmath.numpy.array', 'fastmath.numpy.array', (['target_values'], {}), '(target_values)\n', (51786, 51801), False, 'from trax import fastmath\n'), ((58504, 58581), 're.findall', 're.findall', (['"""[-+]?[.]?[\\\\d]+(?:,\\\\d\\\\d\\\\d)*[\\\\.]?\\\\d*(?:[eE][-+]?\\\\d+)?"""', 'arg'], {}), "('[-+]?[.]?[\\\\d]+(?:,\\\\d\\\\d\\\\d)*[\\\\.]?\\\\d*(?:[eE][-+]?\\\\d+)?', arg)\n", (58514, 58581), False, 'import re\n'), ((61554, 61647), 're.findall', 're.findall', (['"""[-+]?[.]?[\\\\d]+(?:,\\\\d\\\\d\\\\d)*[\\\\.]?\\\\d*(?:[eE][-+]?\\\\d+)?"""', 'answers[index:]'], {}), "('[-+]?[.]?[\\\\d]+(?:,\\\\d\\\\d\\\\d)*[\\\\.]?\\\\d*(?:[eE][-+]?\\\\d+)?',\n answers[index:])\n", (61564, 61647), False, 'import re\n'), ((62013, 62120), 're.findall', 're.findall', (['"""[-+]?[.]?[\\\\d]+(?:,\\\\d\\\\d\\\\d)*[\\\\.]?\\\\d*(?:[eE][-+]?\\\\d+)?"""', 'answers[index_end_of_answer:]'], {}), "('[-+]?[.]?[\\\\d]+(?:,\\\\d\\\\d\\\\d)*[\\\\.]?\\\\d*(?:[eE][-+]?\\\\d+)?',\n answers[index_end_of_answer:])\n", (62023, 62120), False, 'import re\n'), ((52633, 52674), 'scipy.misc.comb', 'scipy.misc.comb', (['num_args[0]', 'num_args[1]'], {}), '(num_args[0], num_args[1])\n', (52648, 52674), False, 'import scipy\n'), ((58838, 58915), 're.findall', 're.findall', (['"""[-+]?[.]?[\\\\d]+(?:,\\\\d\\\\d\\\\d)*[\\\\.]?\\\\d*(?:[eE][-+]?\\\\d+)?"""', 'arg'], {}), "('[-+]?[.]?[\\\\d]+(?:,\\\\d\\\\d\\\\d)*[\\\\.]?\\\\d*(?:[eE][-+]?\\\\d+)?', arg)\n", (58848, 58915), False, 'import re\n'), ((52714, 52735), 'math.cos', 'math.cos', (['num_args[0]'], {}), '(num_args[0])\n', (52722, 52735), False, 'import math\n'), ((59173, 59250), 're.findall', 're.findall', (['"""[-+]?[.]?[\\\\d]+(?:,\\\\d\\\\d\\\\d)*[\\\\.]?\\\\d*(?:[eE][-+]?\\\\d+)?"""', 'arg'], {}), "('[-+]?[.]?[\\\\d]+(?:,\\\\d\\\\d\\\\d)*[\\\\.]?\\\\d*(?:[eE][-+]?\\\\d+)?', arg)\n", (59183, 59250), False, 'import re\n'), ((53058, 53104), 'math.sqrt', 'math.sqrt', (['(num_args[0] ** 2 + num_args[1] ** 2)'], {}), '(num_args[0] ** 2 + num_args[1] ** 2)\n', (53067, 53104), False, 'import math\n'), ((53340, 53363), 'math.floor', 'math.floor', (['num_args[0]'], {}), '(num_args[0])\n', (53350, 53363), False, 'import math\n'), ((55466, 55512), 'math.sqrt', 'math.sqrt', (['(num_args[0] ** 2 + num_args[1] ** 2)'], {}), '(num_args[0] ** 2 + num_args[1] ** 2)\n', (55475, 55512), False, 'import math\n'), ((55621, 55642), 'math.sin', 'math.sin', (['num_args[0]'], {}), '(num_args[0])\n', (55629, 55642), False, 'import math\n'), ((55967, 55989), 'math.sqrt', 'math.sqrt', (['num_args[0]'], {}), '(num_args[0])\n', (55976, 55989), False, 'import math\n')] |
"""
Program to make an contour plot from a contour data file generated
by the Perple_X program WERAMI, for data file format see:
http://www.perplex.ethz.ch/faq/Perple_X_tab_file_format.txt
"""
# author: <NAME>
# website: petrol.natur.cuni.cz/~ondro
# last edited: April 16, 2014
import sys
import os
import pickle
import gzip
import argparse
from pkg_resources import resource_filename, get_distribution, DistributionNotFound
from PyQt5 import QtCore, QtGui, QtWidgets
import numpy as np
import matplotlib
from scipy import ndimage
from matplotlib import cm
# from matplotlib import ticker
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt5agg import (
FigureCanvasQTAgg as FigureCanvas,
NavigationToolbar2QT as NavigationToolbar)
from mpl_toolkits.mplot3d import Axes3D
from .ui_pywerami import Ui_MainWindow
from .api import GridData
try:
_dist = get_distribution('pywerami')
# Normalize case for Windows systems
dist_loc = os.path.normcase(_dist.location)
here = os.path.normcase(__file__)
if not here.startswith(os.path.join(dist_loc, 'foobar')):
# not installed, but there is another version that *is*
raise DistributionNotFound
except DistributionNotFound:
__version__ = 'Not installed version'
else:
__version__ = _dist.version
# Make sure that we are using QT5
matplotlib.use('Qt5Agg')
class PyWeramiWindow(QtWidgets.QMainWindow, Ui_MainWindow):
def __init__(self, filename=None, parent=None):
super(PyWeramiWindow, self).__init__(parent)
self.settings = QtCore.QSettings("LX", "pywerami")
self.setupUi(self)
self._fig = Figure(facecolor="white")
self._ax = self._fig.add_subplot(111)
self._canvas = FigureCanvas(self._fig)
self._canvas.setParent(self.widget)
self._canvas.setFocusPolicy(QtCore.Qt.StrongFocus)
self.matplot.addWidget(self._canvas)
self.mpl_toolbar = NavigationToolbar(self._canvas, self.widget)
self.mpl_toolbar.hide()
self.matplot.addWidget(self.mpl_toolbar)
self.setWindowTitle('PyWerami')
window_icon = resource_filename(__name__, 'images/pywerami.png')
self.setWindowIcon(QtGui.QIcon(window_icon))
self.about_dialog = AboutDialog(__version__)
# set combos
self.cmaps = ['viridis', 'inferno', 'plasma', 'magma', 'Blues', 'BuGn', 'BuPu',
'GnBu', 'Greens', 'Greys', 'Oranges', 'OrRd', 'PuBu', 'PuBuGn',
'PuRd', 'Purples', 'RdPu', 'Reds', 'YlGn', 'YlGnBu', 'YlOrBr',
'YlOrRd', 'afmhot', 'autumn', 'bone', 'cool', 'copper', 'gist_heat',
'gray', 'hot', 'pink', 'spring', 'summer', 'winter', 'BrBG', 'bwr',
'coolwarm', 'PiYG', 'PRGn', 'PuOr', 'RdBu', 'RdGy', 'RdYlBu',
'RdYlGn', 'Spectral', 'seismic', 'gist_earth', 'terrain', 'ocean',
'gist_stern', 'brg', 'CMRmap', 'cubehelix', 'gnuplot', 'gnuplot2',
'gist_ncar', 'nipy_spectral', 'jet', 'rainbow', 'gist_rainbow',
'hsv', 'flag', 'prism']
self.mapstyle.addItems(self.cmaps)
# set validators
self.levelmin.setValidator(QtGui.QDoubleValidator(self.levelmin))
self.levelmax.setValidator(QtGui.QDoubleValidator(self.levelmax))
self.levelnum.setValidator(QtGui.QIntValidator(self.levelnum))
self.levelstep.setValidator(QtGui.QDoubleValidator(self.levelstep))
self.clipmin.setValidator(QtGui.QDoubleValidator(self.clipmin))
self.clipmax.setValidator(QtGui.QDoubleValidator(self.clipmax))
# Set icons in toolbar
self.actionOpen.setIcon(QtGui.QIcon.fromTheme('document-open'))
self.actionSave.setIcon(QtGui.QIcon.fromTheme('document-save'))
self.actionSaveas.setIcon(QtGui.QIcon.fromTheme('document-save-as'))
self.actionImport.setIcon(QtGui.QIcon.fromTheme('x-office-spreadsheet'))
self.actionHome.setIcon(self.mpl_toolbar._icon('home.png'))
self.actionPan.setIcon(self.mpl_toolbar._icon('move.png'))
self.actionZoom.setIcon(self.mpl_toolbar._icon('zoom_to_rect.png'))
self.actionGrid.setIcon(QtGui.QIcon.fromTheme('format-justify-fill'))
self.actionAxes.setIcon(self.mpl_toolbar._icon('qt4_editor_options.png'))
self.actionSavefig.setIcon(self.mpl_toolbar._icon('filesave.png'))
# self.action3D.setIcon(QtGui.QIcon.fromTheme(''))
self.actionProperties.setIcon(QtGui.QIcon.fromTheme('preferences-other'))
self.actionQuit.setIcon(QtGui.QIcon.fromTheme('application-exit'))
self.actionAbout.setIcon(QtGui.QIcon.fromTheme('help-about'))
# connect signals
self.actionOpen.triggered.connect(self.openProject)
self.actionSave.triggered.connect(self.saveProject)
self.actionSaveas.triggered.connect(self.saveProjectAs)
self.actionImport.triggered.connect(self.import_data)
self.actionHome.triggered.connect(self.mpl_toolbar.home)
self.actionPan.triggered.connect(self.plotpan)
self.actionZoom.triggered.connect(self.plotzoom)
self.actionGrid.triggered.connect(self.plotgrid)
self.actionAxes.triggered.connect(self.mpl_toolbar.edit_parameters)
self.actionSavefig.triggered.connect(self.mpl_toolbar.save_figure)
self.actionProperties.triggered.connect(self.edit_options)
self.actionQuit.triggered.connect(self.close)
self.actionAbout.triggered.connect(self.about_dialog.exec)
# buttons signals
self.buttonBox.button(QtWidgets.QDialogButtonBox.Apply).clicked.connect(self.apply_props)
self.buttonBox.button(QtWidgets.QDialogButtonBox.RestoreDefaults).clicked.connect(self.restore_props)
self.contcolor.clicked.connect(self.contours_color)
self.action3D.triggered.connect(self.switch3d)
# signals to calculate step size
self.levelmin.editingFinished.connect(self.step_from_levels)
self.levelmax.editingFinished.connect(self.step_from_levels)
self.levelnum.editingFinished.connect(self.step_from_levels)
self.setlevels.toggled.connect(self.step_from_levels)
# almost done
self.ready = False
self.changed = False
self.project = None
if filename:
self.import_data(filename)
# ready
self.statusbar.showMessage("Ready", 5000)
def closeEvent(self, event):
if self.changed:
quit_msg = 'Project have been changed. Save ?'
qb = QtWidgets.QMessageBox
reply = qb.question(self, 'Message', quit_msg,
qb.Cancel | qb.Discard | qb.Save, qb.Save)
if reply == qb.Save:
self.do_save()
if self.project is not None:
event.accept()
else:
event.ignore()
elif reply == qb.Discard:
event.accept()
else:
event.ignore()
def import_data(self, filename=None):
if not filename:
filename = QtWidgets.QFileDialog.getOpenFileName(self, "Import data file", ".", "Perple_X Table (*.tab *.TAB);;TCInvestigator (*.tci *.TCI)")[0]
if filename:
if filename.lower().endswith('.tab'):
self.data = GridData.from_tab(filename)
elif filename.lower().endswith('.tci'):
self.data = GridData.from_tci(filename)
else:
raise Exception('Unsupported file format')
# populate listview and setup properties
self.datafilename = filename
self.ready = True
self.project = None
self.changed = True
self.props = {}
self._model = QtGui.QStandardItemModel(self.listView)
for var in self.data.dep:
item = QtGui.QStandardItem(var)
item.setCheckable(True)
self._model.appendRow(item)
self.default_var_props(var)
self.listView.setModel(self._model)
self.listView.show()
# connect listview signals
self.varSel = self.listView.selectionModel()
try:
self.varSel.selectionChanged.disconnect()
except Exception:
pass
self.varSel.selectionChanged.connect(self.on_var_changed)
try:
self._model.itemChanged.disconnect()
except Exception:
pass
self._model.itemChanged.connect(self.plot)
# all done focus
self.action3D.setChecked(False) # no 3d on import
self.varSel.setCurrentIndex(self._model.index(0, 0), QtCore.QItemSelectionModel.ClearAndSelect | QtCore.QItemSelectionModel.Rows)
self.listView.setFocus()
self.plot()
self.statusbar.showMessage("Data from {} imported".format(self.data.label), 5000)
def openProject(self, checked, projfile=None):
"""Open pywerami project
"""
if self.changed:
quit_msg = 'Project have been changed. Save ?'
qb = QtWidgets.QMessageBox
reply = qb.question(self, 'Message', quit_msg,
qb.Discard | qb.Save,
qb.Save)
if reply == qb.Save:
self.do_save()
if projfile is None:
qd = QtWidgets.QFileDialog
filt = 'pywermi project (*.pwp)'
projfile = qd.getOpenFileName(self, 'Open project',
os.path.expanduser('~'),
filt)[0]
if os.path.exists(projfile):
stream = gzip.open(projfile, 'rb')
data = pickle.load(stream)
stream.close()
# set actual working dir in case folder was moved
self.datafilename = data['datafilename']
self.import_data(self.datafilename)
self.props = data['props']
# all done
self.ready = True
self.project = projfile
self.changed = False
# all done focus
self.action3D.setChecked(False) # no 3d on import
self.varSel.setCurrentIndex(self._model.index(0, 0), QtCore.QItemSelectionModel.ClearAndSelect | QtCore.QItemSelectionModel.Rows)
self.listView.setFocus()
self.plot()
self.statusbar.showMessage("Project loaded.", 5000)
def saveProject(self):
"""Save project
"""
if self.ready:
if self.project is None:
filename = QtWidgets.QFileDialog.getSaveFileName(self, 'Save current project',
os.path.dirname(self.datafilename),
'pywerami project (*.pwp)')[0]
if filename:
if not filename.lower().endswith('.pwp'):
filename = filename + '.pwp'
self.project = filename
self.do_save()
else:
self.do_save()
def saveProjectAs(self):
"""Save project as
"""
if self.ready:
filename = QtWidgets.QFileDialog.getSaveFileName(self, 'Save current project as',
os.path.dirname(self.datafilename),
'pywerami project (*.pwp)')[0]
if filename:
if not filename.lower().endswith('.pwp'):
filename = filename + '.pwp'
self.project = filename
self.do_save()
def do_save(self):
"""Do saving of poject
"""
if self.project:
# put to dict
data = {'datafilename': self.datafilename,
'props': self.props}
# do save
stream = gzip.open(self.project, 'wb')
pickle.dump(data, stream)
stream.close()
self.changed = False
self.statusBar().showMessage('Project saved.')
def contours_color(self):
if self.ready:
col = QtWidgets.QColorDialog.getColor()
if col.isValid():
self.contcolor.setStyleSheet("background-color: {}".format(col.name()))
def step_from_levels(self):
if self.ready:
if int(self.levelnum.text()) < 2:
self.levelnum.setText('2')
if float(self.levelmax.text()) < float(self.levelmin.text()):
self.levelmin.setText(self.levelmax.text())
if self.setlevels.isChecked():
step = (float(self.levelmax.text()) - float(self.levelmin.text())) / (int(self.levelnum.text()) - 1)
self.levelstep.setText(repr(step))
self.props[self.var]['step'] = step
self.changed = True
def default_var_props(self, var):
if self.ready:
data = self.data.get_var(var)
prop = {}
# levels
prop['min'] = data.min()
prop['max'] = data.max()
prop['num'] = 10
prop['step'] = (prop['max'] - prop['min']) / (prop['num'] - 1)
prop['levels'] = 'num'
prop['type'] = 'linear'
# style
prop['fill'] = False
prop['cbar'] = False
prop['opacity'] = 100
prop['cmap'] = 'viridis'
prop['contours'] = 'color'
prop['color'] = '#000000'
prop['label'] = False
prop['digits'] = 3
# processing
prop['resample'] = 1
prop['median'] = 1
prop['gauss'] = 0
prop['clipmin'] = data.min()
prop['clipmax'] = data.max()
self.props[var] = prop
def set_var_props(self, var):
if self.ready:
# levels
self.levelmin.setText(repr(self.props[var]['min']))
self.levelmax.setText(repr(self.props[var]['max']))
self.levelnum.setText(repr(self.props[var]['num']))
self.levelstep.setText(repr(self.props[var]['step']))
if self.props[var]['levels'] == 'num':
self.setlevels.setChecked(True)
else:
self.setstep.setChecked(True)
if self.props[var]['type'] == 'linear':
self.linlevel.setChecked(True)
else:
self.cdflevel.setChecked(True)
# style
if self.props[var]['fill']:
self.fillstyle.setChecked(True)
else:
self.fillstyle.setChecked(False)
if self.props[var].get('cbar', False):
self.checkCBar.setChecked(True)
else:
self.checkCBar.setChecked(False)
self.opacity.setValue(self.props[var]['opacity'])
self.mapstyle.setCurrentIndex(self.cmaps.index(self.props[var]['cmap']))
self.contcolor.setStyleSheet("background-color: {}".format(self.props[var]['color']))
self.labelDigits.setValue(self.props[var]['digits'])
if self.props[var]['contours'] == 'map':
self.contcheckmap.setChecked(True)
elif self.props[var]['contours'] == 'color':
self.contcheckcolor.setChecked(True)
else:
self.contchecknone.setChecked(True)
if self.props[var]['label']:
self.contlabel.setChecked(True)
else:
self.contlabel.setChecked(False)
# processing
self.resample.setValue(self.props[var]['resample'])
self.filtersize.setValue(self.props[var]['median'])
self.filtersigma.setValue(self.props[var]['gauss'])
self.clipmin.setText(repr(self.props[var]['clipmin']))
self.clipmax.setText(repr(self.props[var]['clipmax']))
def on_var_changed(self, selected):
if self.ready:
self.var = self.data.dep[selected.indexes()[0].row()]
self.set_var_props(self.var)
if self.action3D.isChecked():
self.plot()
def apply_props(self):
if self.ready:
# levels
self.props[self.var]['min'] = float(self.levelmin.text())
self.props[self.var]['max'] = float(self.levelmax.text())
self.props[self.var]['num'] = int(self.levelnum.text())
self.props[self.var]['step'] = float(self.levelstep.text())
if self.setlevels.isChecked():
self.props[self.var]['levels'] = 'num'
else:
self.props[self.var]['levels'] = 'step'
if self.linlevel.isChecked():
self.props[self.var]['type'] = 'linear'
else:
self.props[self.var]['type'] = 'cdf'
# style
if self.fillstyle.isChecked():
self.props[self.var]['fill'] = True
else:
self.props[self.var]['fill'] = False
if self.checkCBar.isChecked():
self.props[self.var]['cbar'] = True
else:
self.props[self.var]['cbar'] = False
self.props[self.var]['opacity'] = self.opacity.value()
self.props[self.var]['cmap'] = str(self.mapstyle.currentText())
self.props[self.var]['color'] = str(self.contcolor.palette().color(1).name())
self.props[self.var]['digits'] = self.labelDigits.value()
if self.contcheckmap.isChecked():
self.props[self.var]['contours'] = 'map'
elif self.contcheckcolor.isChecked():
self.props[self.var]['contours'] = 'color'
else:
self.props[self.var]['contours'] = ''
if self.contlabel.isChecked():
self.props[self.var]['label'] = True
else:
self.props[self.var]['label'] = False
# processing
self.props[self.var]['resample'] = self.resample.value()
self.props[self.var]['median'] = self.filtersize.value()
self.props[self.var]['gauss'] = self.filtersigma.value()
self.props[self.var]['clipmin'] = float(self.clipmin.text())
self.props[self.var]['clipmax'] = float(self.clipmax.text())
self.changed = True
self.plot()
def restore_props(self):
if self.ready:
self.default_var_props(self.var)
self.set_var_props(self.var)
self.plot()
def edit_options(self):
dlg = OptionsForm(self)
dlg.exec_()
def plotpan(self):
self.actionZoom.setChecked(False)
self.mpl_toolbar.pan()
def plotzoom(self):
self.actionPan.setChecked(False)
self.mpl_toolbar.zoom()
def plotgrid(self):
self._ax.grid()
self._canvas.draw()
def switch3d(self):
if self.ready:
self.plot()
def plot(self, item=None):
if self.ready:
if not self.action3D.isChecked():
self._fig.clear()
self._ax = self._fig.add_subplot(111)
else:
self._fig.clear()
self._ax = self._fig.add_subplot(111, projection='3d')
if item:
index = self._model.createIndex(item.row(), item.column())
if index.isValid():
self.listView.setCurrentIndex(index)
if not self.action3D.isChecked():
extent = self.data.get_extent()
i = 0
while self._model.item(i):
if self._model.item(i).checkState():
CS = None
var = str(self._model.item(i).text())
# get data, smooth and clip
data = self.data.get_var(var, nan=np.float(self.settings.value("nan", "NaN", type=str)))
if self.props[var]['resample'] > 1:
data = np.ma.array(ndimage.zoom(data.filled(0), self.props[var]['resample']), mask=ndimage.zoom(data.mask, self.props[var]['resample'], order=0))
if self.props[var]['median'] > 1:
data = np.ma.array(ndimage.median_filter(data, size=self.props[var]['median'] * self.props[var]['resample']), mask=data.mask)
if self.props[var]['gauss'] > 0:
data = np.ma.array(ndimage.gaussian_filter(data, sigma=self.props[var]['gauss'] * self.props[var]['resample']), mask=data.mask)
data = np.ma.masked_outside(data, self.props[var]['clipmin'], self.props[var]['clipmax'])
if self.props[var]['fill']:
img = self._ax.imshow(data, interpolation='none', origin='lower', extent=extent, aspect='auto', cmap=cm.get_cmap(self.props[var]['cmap']), alpha=self.props[var]['opacity'] / 100.0)
if self.props[var]['cbar']:
cbar = self._fig.colorbar(img)
cbar.ax.set_ylabel(var)
if self.props[var]['min'] == self.props[var]['max']:
clevels = np.array([self.props[var]['min']])
else:
if self.props[var]['type'] == 'linear':
if self.props[var]['levels'] == 'num':
clevels = np.linspace(self.props[var]['min'], self.props[var]['max'], self.props[var]['num'])
else:
# trick to include max in levels
clevels = np.arange(self.props[var]['min'], self.props[var]['max'] + np.finfo(np.float32).eps, self.props[var]['step'])
else:
# cdf based on histogram binned acording to the Freedman-Diaconis rule
data = np.ma.masked_outside(data, self.props[var]['min'], self.props[var]['max'])
v = np.sort(data.compressed())
IQR = v[int(round((v.size - 1) * float(0.75)))] - v[int(round((v.size - 1) * float(0.25)))]
bin_size = 2 * IQR * v.size**(-1.0 / 3)
nbins = int(round(max(self.props[var]['num'], (v[-1] - v[0]) / (bin_size + 0.001))))
hist, bin_edges = np.histogram(v, bins=nbins)
cdf = np.cumsum(hist)
cdfx = np.cumsum(np.diff(bin_edges)) + bin_edges[:2].sum() / 2
# clevels = np.interp(np.linspace(cdf[0],cdf[-1],self.props[var]['num'] + 2)[1:-1], cdf, cdfx)
clevels = np.interp(np.linspace(cdf[0], cdf[-1], self.props[var]['num']), cdf, cdfx)
clevels = np.round(10**self.props[var]['digits'] * clevels) / 10**self.props[var]['digits']
# Contour levels must be increasing
clevels = np.append(clevels[:1], clevels[1:][np.diff(clevels) > 0])
if self.props[var]['contours'] == 'map':
CS = self._ax.contour(self.data.get_xrange(self.props[var]['resample']), self.data.get_yrange(self.props[var]['resample']), data, clevels, cmap=cm.get_cmap(self.props[var]['cmap']))
elif self.props[var]['contours'] == 'color':
CS = self._ax.contour(self.data.get_xrange(self.props[var]['resample']), self.data.get_yrange(self.props[var]['resample']), data, clevels, colors=self.props[var]['color'])
if self.props[var]['label'] and CS:
self._ax.clabel(CS, fontsize=8, inline=1, fmt='%g')
i += 1
self._ax.axis(extent)
self._ax.set_title(self.data.label)
else:
# get data, smooth and clip
data = self.data.get_var(self.var)
if self.props[self.var]['resample'] > 1:
data = np.ma.array(ndimage.zoom(data.filled(0), self.props[self.var]['resample']), mask=ndimage.zoom(data.mask, self.props[self.var]['resample'], order=0))
if self.props[self.var]['median'] > 1:
data = np.ma.array(ndimage.median_filter(data, size=self.props[self.var]['median'] * self.props[self.var]['resample']), mask=data.mask)
if self.props[self.var]['gauss'] > 0:
data = np.ma.array(ndimage.gaussian_filter(data, sigma=self.props[self.var]['gauss'] * self.props[self.var]['resample']), mask=data.mask)
data = np.ma.masked_outside(data, self.props[self.var]['clipmin'], self.props[self.var]['clipmax'])
x, y = np.meshgrid(self.data.get_xrange(self.props[self.var]['resample']), self.data.get_yrange(self.props[self.var]['resample']))
img = self._ax.plot_surface(x, y, data.filled(np.NaN), vmin=data.min(), vmax=data.max(), cmap=cm.get_cmap(self.props[self.var]['cmap']), linewidth=0.5, alpha=self.props[self.var]['opacity'] / 100.0)
self._ax.view_init(azim=235, elev=30)
if self.props[self.var]['cbar']:
cbar = self._fig.colorbar(img)
cbar.ax.set_ylabel(self.var)
self._ax.set_xlabel(self.data.ind[self.data.xvar]['name'])
self._ax.set_ylabel(self.data.ind[self.data.yvar]['name'])
self._fig.tight_layout()
self._canvas.draw()
class OptionsForm(QtWidgets.QDialog):
def __init__(self, parent=None):
super(OptionsForm, self).__init__(parent)
settings = QtCore.QSettings("LX", "pywerami")
layout = QtWidgets.QVBoxLayout(self)
form = QtWidgets.QWidget()
formlayout = QtWidgets.QFormLayout(form)
# scale
# self.scale = QLineEdit(repr(settings.value("scale", 1, type=float)), self)
# self.scale.setValidator(QDoubleValidator(self.scale))
# formlayout.addRow('Scale', self.scale)
# not-a-number
self.nan = QtWidgets.QLineEdit(settings.value("nan", "NaN", type=str), self)
formlayout.addRow('Not a number', self.nan)
form.setLayout(formlayout)
buttonBox = QtWidgets.QDialogButtonBox(QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel)
layout.addWidget(form)
layout.addWidget(buttonBox)
self.setLayout(layout)
buttonBox.accepted.connect(self.check)
buttonBox.rejected.connect(self.reject)
self.setWindowTitle("PyWerami options")
def check(self):
try:
np.float(self.nan.text())
self.accept()
except Exception:
QtWidgets.QMessageBox.warning(self, "Warning", "Not a number must be float number or NaN")
def accept(self):
settings = QtCore.QSettings("LX", "pywerami")
# settings.setValue("scale", float(self.scale.text()))
settings.setValue("nan", self.nan.text())
QtWidgets.QDialog.accept(self)
class AboutDialog(QtWidgets.QDialog):
"""About dialog
"""
def __init__(self, version, parent=None):
"""Display a dialog that shows application information."""
super(AboutDialog, self).__init__(parent)
self.setWindowTitle('About')
self.resize(300, 100)
about = QtWidgets.QLabel('PyWerami {}\nstand-alone program to make an countour/3D plot from a contour data'.format(version))
about.setAlignment(QtCore.Qt.AlignCenter)
author = QtWidgets.QLabel('<NAME>')
author.setAlignment(QtCore.Qt.AlignCenter)
github = QtWidgets.QLabel('GitHub: <a href="https://github.com/ondrolexa/pywerami">ondrolexa</a>')
github.setAlignment(QtCore.Qt.AlignCenter)
github.setOpenExternalLinks(True)
self.layout = QtWidgets.QVBoxLayout()
self.layout.setAlignment(QtCore.Qt.AlignVCenter)
self.layout.addWidget(about)
self.layout.addWidget(author)
self.layout.addWidget(github)
self.setLayout(self.layout)
def process_cl_args():
parser = argparse.ArgumentParser()
parser.add_argument('filename', action='store', nargs='?', default=None, help="Data file")
parsed_args, unparsed_args = parser.parse_known_args()
return parsed_args, unparsed_args
def main():
parsed_args, unparsed_args = process_cl_args()
app = QtWidgets.QApplication(unparsed_args)
MainWindow = PyWeramiWindow(parsed_args.filename)
MainWindow.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
| [
"matplotlib.backends.backend_qt5agg.NavigationToolbar2QT",
"PyQt5.QtGui.QIcon",
"gzip.open",
"scipy.ndimage.median_filter",
"numpy.array",
"numpy.cumsum",
"PyQt5.QtWidgets.QApplication",
"scipy.ndimage.gaussian_filter",
"PyQt5.QtWidgets.QVBoxLayout",
"PyQt5.QtWidgets.QFileDialog.getOpenFileName",
... | [((1352, 1376), 'matplotlib.use', 'matplotlib.use', (['"""Qt5Agg"""'], {}), "('Qt5Agg')\n", (1366, 1376), False, 'import matplotlib\n'), ((891, 919), 'pkg_resources.get_distribution', 'get_distribution', (['"""pywerami"""'], {}), "('pywerami')\n", (907, 919), False, 'from pkg_resources import resource_filename, get_distribution, DistributionNotFound\n'), ((976, 1008), 'os.path.normcase', 'os.path.normcase', (['_dist.location'], {}), '(_dist.location)\n', (992, 1008), False, 'import os\n'), ((1020, 1046), 'os.path.normcase', 'os.path.normcase', (['__file__'], {}), '(__file__)\n', (1036, 1046), False, 'import os\n'), ((28564, 28589), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (28587, 28589), False, 'import argparse\n'), ((28858, 28895), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['unparsed_args'], {}), '(unparsed_args)\n', (28880, 28895), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1569, 1603), 'PyQt5.QtCore.QSettings', 'QtCore.QSettings', (['"""LX"""', '"""pywerami"""'], {}), "('LX', 'pywerami')\n", (1585, 1603), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1651, 1676), 'matplotlib.figure.Figure', 'Figure', ([], {'facecolor': '"""white"""'}), "(facecolor='white')\n", (1657, 1676), False, 'from matplotlib.figure import Figure\n'), ((1747, 1770), 'matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg', 'FigureCanvas', (['self._fig'], {}), '(self._fig)\n', (1759, 1770), True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas, NavigationToolbar2QT as NavigationToolbar\n'), ((1946, 1990), 'matplotlib.backends.backend_qt5agg.NavigationToolbar2QT', 'NavigationToolbar', (['self._canvas', 'self.widget'], {}), '(self._canvas, self.widget)\n', (1963, 1990), True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas, NavigationToolbar2QT as NavigationToolbar\n'), ((2134, 2184), 'pkg_resources.resource_filename', 'resource_filename', (['__name__', '"""images/pywerami.png"""'], {}), "(__name__, 'images/pywerami.png')\n", (2151, 2184), False, 'from pkg_resources import resource_filename, get_distribution, DistributionNotFound\n'), ((9791, 9815), 'os.path.exists', 'os.path.exists', (['projfile'], {}), '(projfile)\n', (9805, 9815), False, 'import os\n'), ((26101, 26135), 'PyQt5.QtCore.QSettings', 'QtCore.QSettings', (['"""LX"""', '"""pywerami"""'], {}), "('LX', 'pywerami')\n", (26117, 26135), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((26153, 26180), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', (['self'], {}), '(self)\n', (26174, 26180), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((26196, 26215), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (26213, 26215), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((26237, 26264), 'PyQt5.QtWidgets.QFormLayout', 'QtWidgets.QFormLayout', (['form'], {}), '(form)\n', (26258, 26264), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((26697, 26795), 'PyQt5.QtWidgets.QDialogButtonBox', 'QtWidgets.QDialogButtonBox', (['(QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel)'], {}), '(QtWidgets.QDialogButtonBox.Ok | QtWidgets.\n QDialogButtonBox.Cancel)\n', (26723, 26795), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((27304, 27338), 'PyQt5.QtCore.QSettings', 'QtCore.QSettings', (['"""LX"""', '"""pywerami"""'], {}), "('LX', 'pywerami')\n", (27320, 27338), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((27460, 27490), 'PyQt5.QtWidgets.QDialog.accept', 'QtWidgets.QDialog.accept', (['self'], {}), '(self)\n', (27484, 27490), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((27992, 28018), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['"""<NAME>"""'], {}), "('<NAME>')\n", (28008, 28018), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((28088, 28182), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['"""GitHub: <a href="https://github.com/ondrolexa/pywerami">ondrolexa</a>"""'], {}), '(\n \'GitHub: <a href="https://github.com/ondrolexa/pywerami">ondrolexa</a>\')\n', (28104, 28182), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((28294, 28317), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', ([], {}), '()\n', (28315, 28317), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1074, 1106), 'os.path.join', 'os.path.join', (['dist_loc', '"""foobar"""'], {}), "(dist_loc, 'foobar')\n", (1086, 1106), False, 'import os\n'), ((2212, 2236), 'PyQt5.QtGui.QIcon', 'QtGui.QIcon', (['window_icon'], {}), '(window_icon)\n', (2223, 2236), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3251, 3288), 'PyQt5.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', (['self.levelmin'], {}), '(self.levelmin)\n', (3273, 3288), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3325, 3362), 'PyQt5.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', (['self.levelmax'], {}), '(self.levelmax)\n', (3347, 3362), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3399, 3433), 'PyQt5.QtGui.QIntValidator', 'QtGui.QIntValidator', (['self.levelnum'], {}), '(self.levelnum)\n', (3418, 3433), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3471, 3509), 'PyQt5.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', (['self.levelstep'], {}), '(self.levelstep)\n', (3493, 3509), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3545, 3581), 'PyQt5.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', (['self.clipmin'], {}), '(self.clipmin)\n', (3567, 3581), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3617, 3653), 'PyQt5.QtGui.QDoubleValidator', 'QtGui.QDoubleValidator', (['self.clipmax'], {}), '(self.clipmax)\n', (3639, 3653), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3719, 3757), 'PyQt5.QtGui.QIcon.fromTheme', 'QtGui.QIcon.fromTheme', (['"""document-open"""'], {}), "('document-open')\n", (3740, 3757), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3791, 3829), 'PyQt5.QtGui.QIcon.fromTheme', 'QtGui.QIcon.fromTheme', (['"""document-save"""'], {}), "('document-save')\n", (3812, 3829), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3865, 3906), 'PyQt5.QtGui.QIcon.fromTheme', 'QtGui.QIcon.fromTheme', (['"""document-save-as"""'], {}), "('document-save-as')\n", (3886, 3906), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3942, 3987), 'PyQt5.QtGui.QIcon.fromTheme', 'QtGui.QIcon.fromTheme', (['"""x-office-spreadsheet"""'], {}), "('x-office-spreadsheet')\n", (3963, 3987), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4232, 4276), 'PyQt5.QtGui.QIcon.fromTheme', 'QtGui.QIcon.fromTheme', (['"""format-justify-fill"""'], {}), "('format-justify-fill')\n", (4253, 4276), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4532, 4574), 'PyQt5.QtGui.QIcon.fromTheme', 'QtGui.QIcon.fromTheme', (['"""preferences-other"""'], {}), "('preferences-other')\n", (4553, 4574), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4608, 4649), 'PyQt5.QtGui.QIcon.fromTheme', 'QtGui.QIcon.fromTheme', (['"""application-exit"""'], {}), "('application-exit')\n", (4629, 4649), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4684, 4719), 'PyQt5.QtGui.QIcon.fromTheme', 'QtGui.QIcon.fromTheme', (['"""help-about"""'], {}), "('help-about')\n", (4705, 4719), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7851, 7890), 'PyQt5.QtGui.QStandardItemModel', 'QtGui.QStandardItemModel', (['self.listView'], {}), '(self.listView)\n', (7875, 7890), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((9838, 9863), 'gzip.open', 'gzip.open', (['projfile', '"""rb"""'], {}), "(projfile, 'rb')\n", (9847, 9863), False, 'import gzip\n'), ((9883, 9902), 'pickle.load', 'pickle.load', (['stream'], {}), '(stream)\n', (9894, 9902), False, 'import pickle\n'), ((12136, 12165), 'gzip.open', 'gzip.open', (['self.project', '"""wb"""'], {}), "(self.project, 'wb')\n", (12145, 12165), False, 'import gzip\n'), ((12178, 12203), 'pickle.dump', 'pickle.dump', (['data', 'stream'], {}), '(data, stream)\n', (12189, 12203), False, 'import pickle\n'), ((12395, 12428), 'PyQt5.QtWidgets.QColorDialog.getColor', 'QtWidgets.QColorDialog.getColor', ([], {}), '()\n', (12426, 12428), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7163, 7297), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QtWidgets.QFileDialog.getOpenFileName', (['self', '"""Import data file"""', '"""."""', '"""Perple_X Table (*.tab *.TAB);;TCInvestigator (*.tci *.TCI)"""'], {}), "(self, 'Import data file', '.',\n 'Perple_X Table (*.tab *.TAB);;TCInvestigator (*.tci *.TCI)')\n", (7200, 7297), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((7952, 7976), 'PyQt5.QtGui.QStandardItem', 'QtGui.QStandardItem', (['var'], {}), '(var)\n', (7971, 7976), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((25085, 25182), 'numpy.ma.masked_outside', 'np.ma.masked_outside', (['data', "self.props[self.var]['clipmin']", "self.props[self.var]['clipmax']"], {}), "(data, self.props[self.var]['clipmin'], self.props[self\n .var]['clipmax'])\n", (25105, 25182), True, 'import numpy as np\n'), ((27171, 27265), 'PyQt5.QtWidgets.QMessageBox.warning', 'QtWidgets.QMessageBox.warning', (['self', '"""Warning"""', '"""Not a number must be float number or NaN"""'], {}), "(self, 'Warning',\n 'Not a number must be float number or NaN')\n", (27200, 27265), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((9704, 9727), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (9722, 9727), False, 'import os\n'), ((11548, 11582), 'os.path.dirname', 'os.path.dirname', (['self.datafilename'], {}), '(self.datafilename)\n', (11563, 11582), False, 'import os\n'), ((10897, 10931), 'os.path.dirname', 'os.path.dirname', (['self.datafilename'], {}), '(self.datafilename)\n', (10912, 10931), False, 'import os\n'), ((20894, 20981), 'numpy.ma.masked_outside', 'np.ma.masked_outside', (['data', "self.props[var]['clipmin']", "self.props[var]['clipmax']"], {}), "(data, self.props[var]['clipmin'], self.props[var][\n 'clipmax'])\n", (20914, 20981), True, 'import numpy as np\n'), ((24733, 24837), 'scipy.ndimage.median_filter', 'ndimage.median_filter', (['data'], {'size': "(self.props[self.var]['median'] * self.props[self.var]['resample'])"}), "(data, size=self.props[self.var]['median'] * self.\n props[self.var]['resample'])\n", (24754, 24837), False, 'from scipy import ndimage\n'), ((24943, 25049), 'scipy.ndimage.gaussian_filter', 'ndimage.gaussian_filter', (['data'], {'sigma': "(self.props[self.var]['gauss'] * self.props[self.var]['resample'])"}), "(data, sigma=self.props[self.var]['gauss'] * self.\n props[self.var]['resample'])\n", (24966, 25049), False, 'from scipy import ndimage\n'), ((25435, 25476), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (["self.props[self.var]['cmap']"], {}), "(self.props[self.var]['cmap'])\n", (25446, 25476), False, 'from matplotlib import cm\n'), ((21528, 21562), 'numpy.array', 'np.array', (["[self.props[var]['min']]"], {}), "([self.props[var]['min']])\n", (21536, 21562), True, 'import numpy as np\n'), ((23257, 23308), 'numpy.round', 'np.round', (["(10 ** self.props[var]['digits'] * clevels)"], {}), "(10 ** self.props[var]['digits'] * clevels)\n", (23265, 23308), True, 'import numpy as np\n'), ((24571, 24637), 'scipy.ndimage.zoom', 'ndimage.zoom', (['data.mask', "self.props[self.var]['resample']"], {'order': '(0)'}), "(data.mask, self.props[self.var]['resample'], order=0)\n", (24583, 24637), False, 'from scipy import ndimage\n'), ((20543, 20637), 'scipy.ndimage.median_filter', 'ndimage.median_filter', (['data'], {'size': "(self.props[var]['median'] * self.props[var]['resample'])"}), "(data, size=self.props[var]['median'] * self.props[var\n ]['resample'])\n", (20564, 20637), False, 'from scipy import ndimage\n'), ((20754, 20850), 'scipy.ndimage.gaussian_filter', 'ndimage.gaussian_filter', (['data'], {'sigma': "(self.props[var]['gauss'] * self.props[var]['resample'])"}), "(data, sigma=self.props[var]['gauss'] * self.props[\n var]['resample'])\n", (20777, 20850), False, 'from scipy import ndimage\n'), ((22301, 22375), 'numpy.ma.masked_outside', 'np.ma.masked_outside', (['data', "self.props[var]['min']", "self.props[var]['max']"], {}), "(data, self.props[var]['min'], self.props[var]['max'])\n", (22321, 22375), True, 'import numpy as np\n'), ((22802, 22829), 'numpy.histogram', 'np.histogram', (['v'], {'bins': 'nbins'}), '(v, bins=nbins)\n', (22814, 22829), True, 'import numpy as np\n'), ((22868, 22883), 'numpy.cumsum', 'np.cumsum', (['hist'], {}), '(hist)\n', (22877, 22883), True, 'import numpy as np\n'), ((20375, 20436), 'scipy.ndimage.zoom', 'ndimage.zoom', (['data.mask', "self.props[var]['resample']"], {'order': '(0)'}), "(data.mask, self.props[var]['resample'], order=0)\n", (20387, 20436), False, 'from scipy import ndimage\n'), ((21158, 21194), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (["self.props[var]['cmap']"], {}), "(self.props[var]['cmap'])\n", (21169, 21194), False, 'from matplotlib import cm\n'), ((21778, 21866), 'numpy.linspace', 'np.linspace', (["self.props[var]['min']", "self.props[var]['max']", "self.props[var]['num']"], {}), "(self.props[var]['min'], self.props[var]['max'], self.props[var]\n ['num'])\n", (21789, 21866), True, 'import numpy as np\n'), ((23158, 23210), 'numpy.linspace', 'np.linspace', (['cdf[0]', 'cdf[-1]', "self.props[var]['num']"], {}), "(cdf[0], cdf[-1], self.props[var]['num'])\n", (23169, 23210), True, 'import numpy as np\n'), ((23468, 23484), 'numpy.diff', 'np.diff', (['clevels'], {}), '(clevels)\n', (23475, 23484), True, 'import numpy as np\n'), ((23728, 23764), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (["self.props[var]['cmap']"], {}), "(self.props[var]['cmap'])\n", (23739, 23764), False, 'from matplotlib import cm\n'), ((22933, 22951), 'numpy.diff', 'np.diff', (['bin_edges'], {}), '(bin_edges)\n', (22940, 22951), True, 'import numpy as np\n'), ((22074, 22094), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (22082, 22094), True, 'import numpy as np\n')] |
from scipy.io import netcdf
import numpy as np
import numpy.matlib
basedir = '/marconi_scratch/userexternal/dstonge0/stella/rad_test/krook/krook1c_alt/'
basedir = '/marconi_work/FUA34_MULTEI/stonge0_FUA34/rad_test/2nd_deriv/rho_scan2/r0.001ky/'
basedir = '/marconi_work/FUA34_MULTEI/stonge0_FUA34/rad_test/fg_drive/0.001d/'
right_file = basedir + 'center.out.nc'
center_file = basedir + 'center.out.nc'
left_file = basedir + 'center.out.nc'
right_nc = netcdf.netcdf_file(right_file,'r')
center_nc = netcdf.netcdf_file(center_file,'r')
left_nc = netcdf.netcdf_file(left_file,'r')
def read_stella_float(infile, var):
import numpy as np
try:
#print('a')
#arr = np.copy(infile.variables[var][:])
arr = infile.variables[var][:]
#print('b')
flag = True
except KeyError:
print('INFO: '+var+' not found in netcdf file')
arr =np.arange(1,dtype=float)
flag = FLAG
return arr, flag
def phi_vs_t_to_x(infile,var,ny,nx):
# t ntube z kx ky ri
avt, present = read_stella_float(infile,var)
#print('c')
avt_kxky = ny*nx*(avt[:,0,:,:,:,0] + 1j*avt[:,0,:,:,:,1])
#print('d')
arr = np.fft.ifft(avt_kxky,axis=2)
#print('e')
return arr
def mom_vs_t_to_x(infile,var,ny,nx):
#in: t nspec ntube z kx ky ri
#out: t z kx ky
avt, present = read_stella_float(infile,var)
avt_kxky = ny*nx*(avt[:,0,0,:,:,:,0] + 1j*avt[:,0,0,:,:,:,1])
arr = np.fft.ifft(avt_kxky,axis=2)
return arr
print('0')
naky = center_nc.dimensions['ky']
nakxl = left_nc.dimensions['kx']
nakxc = center_nc.dimensions['kx']
nakxr = right_nc.dimensions['kx']
ky = np.copy(center_nc.variables['ky'][:])
kxc = np.copy(center_nc.variables['kx'][:])
dx = 2*np.pi/kxc[1]/nakxc
t = np.copy(center_nc.variables['t'][:])
nt = t.size
zed = np.copy(center_nc.variables['zed'][:])
nzed = zed.size
omp = ((nzed+1)/2) - 1
delzed = zed[1]-zed[0]
fac = 2*np.ones(naky)
fac[0] = 1
jacobl = np.copy( left_nc.variables['jacob'][:])
jacobc = np.copy(center_nc.variables['jacob'][:])
jacobr = np.copy( right_nc.variables['jacob'][:])
print('1')
dl_over_bl = np.squeeze(delzed*jacobl)
dl_over_bc = np.squeeze(delzed*jacobc)
dl_over_br = np.squeeze(delzed*jacobr)
dl_over_bl[nzed-1] = 0.0
dl_over_bc[nzed-1] = 0.0
dl_over_br[nzed-1] = 0.0
dl_over_bl = dl_over_bl/sum(dl_over_bl)
dl_over_bc = dl_over_bc/sum(dl_over_bc)
dl_over_br = dl_over_br/sum(dl_over_br)
dobl = np.transpose(np.matlib.tile(dl_over_bl,(naky,nakxl,1)))
dobc = np.transpose(np.matlib.tile(dl_over_bc,(naky,nakxc,1)))
dobr = np.transpose(np.matlib.tile(dl_over_br,(naky,nakxr,1)))
print('2')
phil_xky = phi_vs_t_to_x(left_nc ,'phi_vs_t',naky,nakxl)
phic_xky = phi_vs_t_to_x(center_nc,'phi_vs_t',naky,nakxc)
phir_xky = phi_vs_t_to_x(right_nc ,'phi_vs_t',naky,nakxr)
print('3')
densl_xky = mom_vs_t_to_x(left_nc ,'density',naky,nakxl)
densc_xky = mom_vs_t_to_x(center_nc,'density',naky,nakxc)
densr_xky = mom_vs_t_to_x(right_nc ,'density',naky,nakxr)
uparl_xky = mom_vs_t_to_x(left_nc ,'upar',naky,nakxl)
uparc_xky = mom_vs_t_to_x(center_nc,'upar',naky,nakxc)
uparr_xky = mom_vs_t_to_x(right_nc ,'upar',naky,nakxr)
templ_xky = mom_vs_t_to_x(left_nc ,'temperature',naky,nakxl)
tempc_xky = mom_vs_t_to_x(center_nc,'temperature',naky,nakxc)
tempr_xky = mom_vs_t_to_x(right_nc ,'temperature',naky,nakxr)
vxl = 1j*ky*phil_xky
vxc = 1j*ky*phic_xky
vxr = 1j*ky*phir_xky
print('4')
phic_zf = np.real(np.sum(dobc[:,:,0]*phic_xky[:,:,:,0],1))
dens_zf = np.real(np.sum(dobc[:,:,0]*densc_xky[:,:,:,0],1))
upar_zf = np.real(np.sum(dobc[:,:,0]*uparc_xky[:,:,:,0],1))
temp_zf = np.real(np.sum(dobc[:,:,0]*tempc_xky[:,:,:,0],1))
phic2 = np.real(np.mean(np.abs(phic_xky[:,omp,:,:])**2,2))
dens2 = np.real(np.mean(np.abs(densc_xky[:,omp,:,:])**2,2))
upar2 = np.real(np.mean(np.abs(uparc_xky[:,omp,:,:])**2,2))
temp2 = np.real(np.mean(np.abs(tempc_xky[:,omp,:,:])**2,2))
print('5')
fluxl_d = 0.5*np.mean(np.sum(fac*np.real(vxl*np.conj(densl_xky)*dobl),1),2)/naky
fluxl_u = 0.5*np.mean(np.sum(fac*np.real(vxl*np.conj(uparl_xky)*dobl),1),2)/naky
fluxl_T = 0.5*np.mean(np.sum(fac*np.real(vxl*np.conj(templ_xky)*dobl),1),2)/naky
cout = open(basedir + 'left.fluxes_t','w')
cout.write('[1] t ')
cout.write('[2] x ')
cout.write('[3] flux_d')
cout.write('[4] flux_u')
cout.write('[5] flux_t')
cout.write('\n')
print('6')
for i in range (0, nt):
for j in range (0, nakxl):
cout.write('%f ' % t[i])
cout.write('%f ' % (dx*j))
cout.write('%f ' % fluxl_d[i,j])
cout.write('%f ' % fluxl_u[i,j])
cout.write('%f ' % fluxl_T[i,j])
cout.write('\n')
cout.write('\n')
cout.close()
fluxc_d = 0.5*np.mean(np.sum(fac*np.real(vxc*np.conj(densc_xky)*dobc),1),2)/naky
fluxc_u = 0.5*np.mean(np.sum(fac*np.real(vxc*np.conj(uparc_xky)*dobc),1),2)/naky
fluxc_T = 0.5*np.mean(np.sum(fac*np.real(vxc*np.conj(tempc_xky)*dobc),1),2)/naky
cout = open(basedir + 'center.fluxes_t','w')
cout.write('[1] t ')
cout.write('[2] x ')
cout.write('[3] flux_d')
cout.write('[4] flux_u')
cout.write('[5] flux_t')
cout.write('\n')
print('7')
for i in range (0, nt):
for j in range (0, nakxc):
cout.write('%f ' % t[i])
cout.write('%f ' % (dx*j))
cout.write('%f ' % fluxc_d[i,j])
cout.write('%f ' % fluxc_u[i,j])
cout.write('%f ' % fluxc_T[i,j])
cout.write('\n')
cout.write('\n')
cout.close()
fluxr_d = 0.5*np.mean(np.sum(fac*np.real(vxr*np.conj(densr_xky)*dobr),1),2)/naky
fluxr_u = 0.5*np.mean(np.sum(fac*np.real(vxr*np.conj(uparr_xky)*dobr),1),2)/naky
fluxr_T = 0.5*np.mean(np.sum(fac*np.real(vxr*np.conj(tempr_xky)*dobr),1),2)/naky
cout = open(basedir + 'right.fluxes_t','w')
cout.write('[1] t ')
cout.write('[2] x ')
cout.write('[3] flux_d')
cout.write('[4] flux_u')
cout.write('[5] flux_t')
cout.write('\n')
print('8')
for i in range (0, nt):
for j in range (0, nakxr):
cout.write('%f ' % t[i])
cout.write('%f ' % (j*dx))
cout.write('%f ' % fluxr_d[i,j])
cout.write('%f ' % fluxr_u[i,j])
cout.write('%f ' % fluxr_T[i,j])
cout.write('\n')
cout.write('\n')
cout.close()
tave = int(0.7*float(nt))
print('Average time ' + str(tave) + ' to ' + str(nt))
temp_ave = np.mean(temp_zf[tave:nt,:],0)
fdc_ave = np.mean(fluxc_d[tave:nt,:],0)
fuc_ave = np.mean(fluxc_u[tave:nt,:],0)
fTc_ave = np.mean(fluxc_T[tave:nt,:],0)
cout = open(basedir + 'center.stuff','w')
cout.write('[1] i ')
cout.write('[2] phi ')
cout.write('[3] dens ')
cout.write('[4] upar ')
cout.write('[5] temp ')
cout.write('[6] temp_ave')
cout.write('[7] flux_d ')
cout.write('[8] flux_u ')
cout.write('[9] flux_T ')
cout.write('[10] fd_ave ')
cout.write('[11] fu_ave ')
cout.write('[12] fT_ave ')
cout.write('\n')
print('9')
for i in range (0, nakxc):
cout.write('%f ' % (i*dx))
cout.write('%f ' % phic_zf[nt-1,i])
cout.write('%f ' % dens_zf[nt-1,i])
cout.write('%f ' % upar_zf[nt-1,i])
cout.write('%f ' % temp_zf[nt-1,i])
cout.write('%f ' % temp_ave[i])
cout.write('%f ' % fluxc_d[nt-1,i])
cout.write('%f ' % fluxc_u[nt-1,i])
cout.write('%f ' % fluxc_T[nt-1,i])
cout.write('%f ' % fdc_ave[i])
cout.write('%f ' % fuc_ave[i])
cout.write('%f ' % fTc_ave[i])
cout.write('\n')
# print(("%d " % i), end='')
# print( "%f " % dens_zf[nt-1,i], end='', file=cout )
# print( "%f " % upar_zf[nt-1,i], end='', file=cout )
# print( "%f " % temp_zf[nt-1,i], end='', file=cout )
# print( "%f " % temp_ave[i] , end='', file=cout )
# print( "" ,file=cout )
cout.close()
for j in range (0, nt):
cout = open(basedir + 'prof_' + str(j),'w')
for i in range (0, nakxc):
cout.write('%e ' % (i*dx))
cout.write('%f ' % phic_zf[j,i])
cout.write('%f ' % dens_zf[j,i])
cout.write('%f ' % upar_zf[j,i])
cout.write('%f ' % temp_zf[j,i])
cout.write('%f ' % phic2[j,i])
cout.write('%f ' % dens2[j,i])
cout.write('%f ' % upar2[j,i])
cout.write('%f ' % temp2[j,i])
cout.write('\n')
cout.close()
temp0 = np.mean(temp_zf,1)
cout = open(basedir + 'temp.prof','w')
for i in range (0, nt - 1):
cout.write('%e ' % t[i])
cout.write('%e ' % temp0[i])
cout.write('\n')
cout.close()
exit()
| [
"numpy.copy",
"numpy.mean",
"numpy.matlib.tile",
"numpy.abs",
"numpy.ones",
"numpy.conj",
"numpy.squeeze",
"numpy.sum",
"scipy.io.netcdf.netcdf_file",
"numpy.fft.ifft",
"numpy.arange"
] | [((462, 497), 'scipy.io.netcdf.netcdf_file', 'netcdf.netcdf_file', (['right_file', '"""r"""'], {}), "(right_file, 'r')\n", (480, 497), False, 'from scipy.io import netcdf\n'), ((509, 545), 'scipy.io.netcdf.netcdf_file', 'netcdf.netcdf_file', (['center_file', '"""r"""'], {}), "(center_file, 'r')\n", (527, 545), False, 'from scipy.io import netcdf\n'), ((557, 591), 'scipy.io.netcdf.netcdf_file', 'netcdf.netcdf_file', (['left_file', '"""r"""'], {}), "(left_file, 'r')\n", (575, 591), False, 'from scipy.io import netcdf\n'), ((1598, 1635), 'numpy.copy', 'np.copy', (["center_nc.variables['ky'][:]"], {}), "(center_nc.variables['ky'][:])\n", (1605, 1635), True, 'import numpy as np\n'), ((1643, 1680), 'numpy.copy', 'np.copy', (["center_nc.variables['kx'][:]"], {}), "(center_nc.variables['kx'][:])\n", (1650, 1680), True, 'import numpy as np\n'), ((1714, 1750), 'numpy.copy', 'np.copy', (["center_nc.variables['t'][:]"], {}), "(center_nc.variables['t'][:])\n", (1721, 1750), True, 'import numpy as np\n'), ((1771, 1809), 'numpy.copy', 'np.copy', (["center_nc.variables['zed'][:]"], {}), "(center_nc.variables['zed'][:])\n", (1778, 1809), True, 'import numpy as np\n'), ((1917, 1955), 'numpy.copy', 'np.copy', (["left_nc.variables['jacob'][:]"], {}), "(left_nc.variables['jacob'][:])\n", (1924, 1955), True, 'import numpy as np\n'), ((1968, 2008), 'numpy.copy', 'np.copy', (["center_nc.variables['jacob'][:]"], {}), "(center_nc.variables['jacob'][:])\n", (1975, 2008), True, 'import numpy as np\n'), ((2019, 2058), 'numpy.copy', 'np.copy', (["right_nc.variables['jacob'][:]"], {}), "(right_nc.variables['jacob'][:])\n", (2026, 2058), True, 'import numpy as np\n'), ((2086, 2113), 'numpy.squeeze', 'np.squeeze', (['(delzed * jacobl)'], {}), '(delzed * jacobl)\n', (2096, 2113), True, 'import numpy as np\n'), ((2125, 2152), 'numpy.squeeze', 'np.squeeze', (['(delzed * jacobc)'], {}), '(delzed * jacobc)\n', (2135, 2152), True, 'import numpy as np\n'), ((2164, 2191), 'numpy.squeeze', 'np.squeeze', (['(delzed * jacobr)'], {}), '(delzed * jacobr)\n', (2174, 2191), True, 'import numpy as np\n'), ((6117, 6148), 'numpy.mean', 'np.mean', (['temp_zf[tave:nt, :]', '(0)'], {}), '(temp_zf[tave:nt, :], 0)\n', (6124, 6148), True, 'import numpy as np\n'), ((6158, 6189), 'numpy.mean', 'np.mean', (['fluxc_d[tave:nt, :]', '(0)'], {}), '(fluxc_d[tave:nt, :], 0)\n', (6165, 6189), True, 'import numpy as np\n'), ((6199, 6230), 'numpy.mean', 'np.mean', (['fluxc_u[tave:nt, :]', '(0)'], {}), '(fluxc_u[tave:nt, :], 0)\n', (6206, 6230), True, 'import numpy as np\n'), ((6240, 6271), 'numpy.mean', 'np.mean', (['fluxc_T[tave:nt, :]', '(0)'], {}), '(fluxc_T[tave:nt, :], 0)\n', (6247, 6271), True, 'import numpy as np\n'), ((7893, 7912), 'numpy.mean', 'np.mean', (['temp_zf', '(1)'], {}), '(temp_zf, 1)\n', (7900, 7912), True, 'import numpy as np\n'), ((1131, 1160), 'numpy.fft.ifft', 'np.fft.ifft', (['avt_kxky'], {'axis': '(2)'}), '(avt_kxky, axis=2)\n', (1142, 1160), True, 'import numpy as np\n'), ((1397, 1426), 'numpy.fft.ifft', 'np.fft.ifft', (['avt_kxky'], {'axis': '(2)'}), '(avt_kxky, axis=2)\n', (1408, 1426), True, 'import numpy as np\n'), ((1881, 1894), 'numpy.ones', 'np.ones', (['naky'], {}), '(naky)\n', (1888, 1894), True, 'import numpy as np\n'), ((2406, 2450), 'numpy.matlib.tile', 'np.matlib.tile', (['dl_over_bl', '(naky, nakxl, 1)'], {}), '(dl_over_bl, (naky, nakxl, 1))\n', (2420, 2450), True, 'import numpy as np\n'), ((2469, 2513), 'numpy.matlib.tile', 'np.matlib.tile', (['dl_over_bc', '(naky, nakxc, 1)'], {}), '(dl_over_bc, (naky, nakxc, 1))\n', (2483, 2513), True, 'import numpy as np\n'), ((2532, 2576), 'numpy.matlib.tile', 'np.matlib.tile', (['dl_over_br', '(naky, nakxr, 1)'], {}), '(dl_over_br, (naky, nakxr, 1))\n', (2546, 2576), True, 'import numpy as np\n'), ((3398, 3445), 'numpy.sum', 'np.sum', (['(dobc[:, :, 0] * phic_xky[:, :, :, 0])', '(1)'], {}), '(dobc[:, :, 0] * phic_xky[:, :, :, 0], 1)\n', (3404, 3445), True, 'import numpy as np\n'), ((3457, 3505), 'numpy.sum', 'np.sum', (['(dobc[:, :, 0] * densc_xky[:, :, :, 0])', '(1)'], {}), '(dobc[:, :, 0] * densc_xky[:, :, :, 0], 1)\n', (3463, 3505), True, 'import numpy as np\n'), ((3517, 3565), 'numpy.sum', 'np.sum', (['(dobc[:, :, 0] * uparc_xky[:, :, :, 0])', '(1)'], {}), '(dobc[:, :, 0] * uparc_xky[:, :, :, 0], 1)\n', (3523, 3565), True, 'import numpy as np\n'), ((3577, 3625), 'numpy.sum', 'np.sum', (['(dobc[:, :, 0] * tempc_xky[:, :, :, 0])', '(1)'], {}), '(dobc[:, :, 0] * tempc_xky[:, :, :, 0], 1)\n', (3583, 3625), True, 'import numpy as np\n'), ((867, 892), 'numpy.arange', 'np.arange', (['(1)'], {'dtype': 'float'}), '(1, dtype=float)\n', (876, 892), True, 'import numpy as np\n'), ((3644, 3674), 'numpy.abs', 'np.abs', (['phic_xky[:, omp, :, :]'], {}), '(phic_xky[:, omp, :, :])\n', (3650, 3674), True, 'import numpy as np\n'), ((3703, 3734), 'numpy.abs', 'np.abs', (['densc_xky[:, omp, :, :]'], {}), '(densc_xky[:, omp, :, :])\n', (3709, 3734), True, 'import numpy as np\n'), ((3763, 3794), 'numpy.abs', 'np.abs', (['uparc_xky[:, omp, :, :]'], {}), '(uparc_xky[:, omp, :, :])\n', (3769, 3794), True, 'import numpy as np\n'), ((3823, 3854), 'numpy.abs', 'np.abs', (['tempc_xky[:, omp, :, :]'], {}), '(tempc_xky[:, omp, :, :])\n', (3829, 3854), True, 'import numpy as np\n'), ((3916, 3934), 'numpy.conj', 'np.conj', (['densl_xky'], {}), '(densl_xky)\n', (3923, 3934), True, 'import numpy as np\n'), ((3997, 4015), 'numpy.conj', 'np.conj', (['uparl_xky'], {}), '(uparl_xky)\n', (4004, 4015), True, 'import numpy as np\n'), ((4078, 4096), 'numpy.conj', 'np.conj', (['templ_xky'], {}), '(templ_xky)\n', (4085, 4096), True, 'import numpy as np\n'), ((4633, 4651), 'numpy.conj', 'np.conj', (['densc_xky'], {}), '(densc_xky)\n', (4640, 4651), True, 'import numpy as np\n'), ((4714, 4732), 'numpy.conj', 'np.conj', (['uparc_xky'], {}), '(uparc_xky)\n', (4721, 4732), True, 'import numpy as np\n'), ((4795, 4813), 'numpy.conj', 'np.conj', (['tempc_xky'], {}), '(tempc_xky)\n', (4802, 4813), True, 'import numpy as np\n'), ((5352, 5370), 'numpy.conj', 'np.conj', (['densr_xky'], {}), '(densr_xky)\n', (5359, 5370), True, 'import numpy as np\n'), ((5433, 5451), 'numpy.conj', 'np.conj', (['uparr_xky'], {}), '(uparr_xky)\n', (5440, 5451), True, 'import numpy as np\n'), ((5514, 5532), 'numpy.conj', 'np.conj', (['tempr_xky'], {}), '(tempr_xky)\n', (5521, 5532), True, 'import numpy as np\n')] |
import numpy as np
import Levenshtein # pip install python-Levenshtein
from cmd2.table_creator import Column, SimpleTable, HorizontalAlignment
from cmd2 import ansi
from typing import Any, List
import json
import os
import functools
import hashlib
bold_yellow = functools.partial(ansi.style, fg=ansi.fg.bright_yellow, bold=True)
def shallow_dict_to_fixed_width(d):
return {k: f"{v:.4f}" if isinstance(v, float) else v for k, v in d.items()}
def printable_numpy(batch):
o = np.get_printoptions()
np.set_printoptions(
threshold=30, precision=3, floatmode="maxprec_equal", formatter=dict(float=lambda x: f"{x:4.3f}")
)
result = [str(np.array(row)).replace("\n", " ") for row in batch]
np.set_printoptions(
threshold=o["threshold"], precision=o["precision"], floatmode=o["floatmode"], formatter=o["formatter"]
)
return result
def get_printable_batch(target, samples):
if target.model_data_type == "text":
result = samples
if target.model_data_type == "image":
result = []
for image in samples:
_id = hashlib.md5(target._key(image)).hexdigest()[:8]
basename = f"{target.model_name}-sample-{_id}"
filename = target._save_image(image, filename=basename)
result.append(filename)
elif target.model_data_type == "PE":
result = []
for exe in samples:
_id = hashlib.md5(target._key(exe)).hexdigest()[:8]
basename = f"{target.model_name}-sample-{_id}"
filename = target._save_exe(exe, basename)
result.append(filename)
else: # numpy
result = printable_numpy(samples)
return result
def get_run_summary(target, attack=None):
""" this function gathers statistics about a run and returns a dict """
if attack is None:
attack = target.active_attack
# count successes
success_indicator = target.check_attack_success()
batch_size = len(success_indicator)
successes = sum(success_indicator)
# initial scores/labels
i_0 = np.array(attack.results["initial"]["input"])
o_0 = np.array(attack.results["initial"]["output"])
l_0 = np.array(attack.results["initial"]["label"])
# final scores/labels
i_f = np.array(attack.results['final']['input'])
o_f = np.array(attack.results['final']['output'])
l_f = np.array(attack.results['final']['label'])
# handle degenerate cases in which target_class is the true class
targeted = attack.parameters.get("targeted", False)
degenerate = np.logical_and(l_0 == l_f, success_indicator == True)
# compute distance, depending on target
if target.model_data_type == "text":
# Levenshtein distance
metric = "% edit dist."
distances = [Levenshtein.distance(iif, ii0) for iif, ii0 in zip(i_f, i_0)]
rel_distance = [d / len(ii0) for d, ii0 in zip(distances, i_0)]
elif target.model_data_type == "numpy" or target.model_data_type == "image":
# l2 norm
i_0 = i_0.reshape(batch_size, -1).astype(float)
i_f = i_f.reshape(batch_size, -1).astype(float)
metric = "% Eucl. dist."
eps = np.finfo("float32").eps
rel_distance = np.sqrt(np.nansum(np.square(i_f - i_0), axis=1)) / (np.linalg.norm(i_0, axis=1) + eps)
elif target.model_data_type == "PE":
metric = "TODO"
rel_distance = [0]
else:
raise ValueError("Unexpected model_data_type")
result = (
attack.results["final"]["images"]
if target.model_data_type == "image"
else get_printable_batch(target, samples=i_f)
)
conf_0 = np.array([o_0[i, target.model_output_classes.index(lab)] for i, lab in enumerate(l_0)])
conf_f = np.array([o_f[i, target.model_output_classes.index(lab)] for i, lab in enumerate(l_f)])
params = attack.parameters.copy()
params.update([("sample_index", attack.sample_index), ("target_class", attack.target_class)])
return {
'batch_size': batch_size,
'successes': successes,
'input_change': rel_distance,
'input_change_metric': metric,
'initial_confidence': conf_0,
'final_confidence': conf_f,
'initial_label': l_0,
'final_label': l_f,
'sample_index': np.atleast_1d(attack.sample_index),
'type': target.model_data_type,
'result': result,
'elapsed_time': attack.results['elapsed_time'],
'queries': attack.results['queries'],
'attack_name': attack.attack_name,
'attack_id': attack.attack_id,
'parameters': params,
'targeted': targeted,
'target_class': attack.target_class,
'degenerate': degenerate
}
def get_printable_run_summary(summary):
output = ""
output += f"\n[+] {summary['successes']}/{summary['batch_size']} succeeded\n\n"
if summary['elapsed_time'] > summary['queries']:
query_rate = summary['elapsed_time'] / summary['queries']
units = 'sec/query'
else:
query_rate = summary["queries"] / summary["elapsed_time"]
units = "query/sec"
metric = summary["input_change_metric"]
terminal_cols = os.get_terminal_size().columns
results_width = terminal_cols - 125 # default Windows is 120x30
columns: List[Column] = list()
columns.append(Column("", width=3)) # number
columns.append(
Column(
"Sample Index",
width=13,
header_horiz_align=HorizontalAlignment.CENTER,
data_horiz_align=HorizontalAlignment.RIGHT,
)
)
columns.append(
Column(
"Label (conf)",
width=18,
header_horiz_align=HorizontalAlignment.CENTER,
data_horiz_align=HorizontalAlignment.RIGHT,
)
)
columns.append(
Column(
"Attack Label (conf)",
width=19,
header_horiz_align=HorizontalAlignment.CENTER,
data_horiz_align=HorizontalAlignment.RIGHT,
)
)
columns.append(
Column(
metric,
width=len(metric),
header_horiz_align=HorizontalAlignment.CENTER,
data_horiz_align=HorizontalAlignment.RIGHT,
)
)
columns.append(
Column(
"Elapsed Time [sec]",
width=18,
header_horiz_align=HorizontalAlignment.CENTER,
data_horiz_align=HorizontalAlignment.RIGHT,
)
)
columns.append(
Column(
"Queries (rate)",
width=18,
header_horiz_align=HorizontalAlignment.CENTER,
data_horiz_align=HorizontalAlignment.RIGHT,
)
)
if results_width > 0:
columns.append(
Column(
"Attack Input",
width=results_width,
header_horiz_align=HorizontalAlignment.CENTER,
data_horiz_align=HorizontalAlignment.LEFT,
)
)
data_list: List[List[Any]] = list()
elapsed_time_str = f"{summary['elapsed_time']:.1f}"
query_rate_str = f"{summary['queries']:.0f} ({query_rate:.1f} {units})"
for i, (si, li, conf_0, lf, conf_f, change, res, d) in enumerate(zip(
summary["sample_index"],
summary["initial_label"],
summary["initial_confidence"],
summary["final_label"],
summary["final_confidence"],
summary["input_change"],
summary["result"],
summary["degenerate"])):
label_confidence = f"{li} ({conf_0:.4f})"
final_confidence = f"{lf} ({conf_f:.4f})"
if d:
label_confidence = f"{bold_yellow('*')} " + label_confidence
final_confidence = f"{bold_yellow('*')} " + final_confidence
change_str = f"{change:.5%}"
if results_width > 0:
data_list.append(
[
f"{i+1}.",
si,
label_confidence,
final_confidence,
change_str,
elapsed_time_str,
query_rate_str,
str(np.array(res)),
]
)
else:
data_list.append(
[
f"{i+1}.",
si,
label_confidence,
final_confidence,
change_str,
elapsed_time_str,
query_rate_str,
]
)
if sum(summary["degenerate"]) > 0:
output += bold_yellow(" * target_class is the same as the original class") + "\n\n"
if results_width <= 0:
output = bold_yellow("""\nIncrease terminal width to show results.\n""") + output
# return table as output
st = SimpleTable(columns)
return output + '\n' + st.generate_table(data_list, row_spacing=0) + '\n'
def get_scan_summary(list_of_runs):
# summarize by attack -- what is the best
# - success rate
# - average time
# - best result (highest confidence confusion)
# - attack_id for best parameters
# - attack_name
total_successes = sum([s["successes"] for s in list_of_runs])
total_runs = sum([s["batch_size"] for s in list_of_runs])
times = [s["elapsed_time"] for s in list_of_runs]
queries = [s["queries"] for s in list_of_runs]
best_attack = None
best_id = None
best_score = None
best_params = None
best_queries = None
for s in list_of_runs:
for conf, il, fl in zip(s['final_confidence'], s['initial_label'], s['final_label']):
if (s['targeted'] and s['target_class'] == fl) or (s['targeted']==False and fl != il):
if best_score is None or conf > best_score or (conf == best_score and s['queries'] < best_queries):
best_score = conf
best_id = s["attack_id"]
best_attack = s["attack_name"]
best_params = s["parameters"]
best_queries = s["queries"]
return {
"total_runs": total_runs,
"total_successes": total_successes,
"avg_time": np.mean(times),
"min_time": np.min(times),
"max_time": np.max(times),
"avg_queries": int(np.mean(queries)),
"min_queries": np.min(queries),
"max_queries": np.max(queries),
"best_attack_name": best_attack,
"best_attack_id": best_id,
"best_attack_score": best_score,
"best_params": best_params,
}
def get_printable_scan_summary(summaries_by_attack, summaries_by_label=None):
output = "\n =============== \n SCAN SUMMARY \n ===============\n\n"
terminal_cols = os.get_terminal_size().columns
results_width = terminal_cols - 128 # default Windows is 120x30
if results_width <= 0:
output += bold_yellow("""\nIncrease terminal width to show parameters.\n\n""")
columns: List[Column] = list()
columns.append(
Column(
"Attack Name",
width=15,
header_horiz_align=HorizontalAlignment.LEFT,
data_horiz_align=HorizontalAlignment.RIGHT,
)
)
columns.append(
Column(
"Total Runs",
width=10,
header_horiz_align=HorizontalAlignment.LEFT,
data_horiz_align=HorizontalAlignment.RIGHT,
)
)
columns.append(
Column(
"Successes (%)",
width=13,
header_horiz_align=HorizontalAlignment.LEFT,
data_horiz_align=HorizontalAlignment.RIGHT,
)
)
columns.append(
Column(
"Time[sec] (min/avg/max)",
width=15,
header_horiz_align=HorizontalAlignment.LEFT,
data_horiz_align=HorizontalAlignment.RIGHT,
)
)
columns.append(
Column(
"Queries (min/avg/max)",
width=18,
header_horiz_align=HorizontalAlignment.LEFT,
data_horiz_align=HorizontalAlignment.RIGHT,
)
)
columns.append(
Column(
"Best Score (attack_id)",
width=15,
header_horiz_align=HorizontalAlignment.LEFT,
data_horiz_align=HorizontalAlignment.RIGHT,
)
)
if results_width > 0:
columns.append(
Column(
"Best Parameters",
width=25,
header_horiz_align=HorizontalAlignment.RIGHT,
data_horiz_align=HorizontalAlignment.RIGHT,
)
)
data_list: List[List[Any]] = list()
for name, summary in summaries_by_attack.items():
frac = summary["total_successes"] / summary["total_runs"]
successes = f"{summary['total_successes']} ({frac:>.1%})"
times = f"{summary['min_time']:>4.1f}/{summary['avg_time']:>4.1f}/{summary['max_time']:>4.1f}"
queries = f"{summary['min_queries']:>5d}/{summary['avg_queries']:>5d}/{summary['max_queries']:>5d}"
best = (
f"{summary['best_attack_score']:0.1f} ({summary['best_attack_id']})"
if summary["best_attack_score"]
else "N/A"
)
if results_width > 0:
if summary["best_params"] is not None:
trunc_params = shallow_dict_to_fixed_width((summary["best_params"]))
param_str = json.dumps(trunc_params, indent=1, separators=("", "="))[2:-1].replace('"', "")
else:
param_str = "N/A"
data_list.append([name, summary["total_runs"], successes, times, queries, best, param_str])
else:
data_list.append([name, summary["total_runs"], successes, times, queries, best])
st = SimpleTable(columns)
output += '\n' + st.generate_table(data_list, row_spacing=0) + '\n'
if summaries_by_label is not None:
output += "\n"
# table by sample_index
columns: List[Column] = list()
columns.append(
Column(
"Class Label",
width=15,
header_horiz_align=HorizontalAlignment.LEFT,
data_horiz_align=HorizontalAlignment.RIGHT,
)
)
columns.append(
Column(
"Total Runs",
width=10,
header_horiz_align=HorizontalAlignment.LEFT,
data_horiz_align=HorizontalAlignment.RIGHT,
)
)
columns.append(
Column(
"Successes (%)",
width=13,
header_horiz_align=HorizontalAlignment.LEFT,
data_horiz_align=HorizontalAlignment.RIGHT,
)
)
columns.append(
Column(
"Time[sec] (min/avg/max)",
width=15,
header_horiz_align=HorizontalAlignment.LEFT,
data_horiz_align=HorizontalAlignment.RIGHT,
)
)
columns.append(
Column(
"Queries (min/avg/max)",
width=18,
header_horiz_align=HorizontalAlignment.LEFT,
data_horiz_align=HorizontalAlignment.RIGHT,
)
)
columns.append(
Column(
"Best Score (Attack)",
width=15,
header_horiz_align=HorizontalAlignment.LEFT,
data_horiz_align=HorizontalAlignment.RIGHT,
)
)
data_list: List[List[Any]] = list()
for name, summary in sorted(summaries_by_label.items()):
frac = summary["total_successes"] / summary["total_runs"]
successes = f"{summary['total_successes']} ({frac:>.1%})"
times = f"{summary['min_time']:>4.1f}/{summary['avg_time']:>4.1f}/{summary['max_time']:>4.1f}"
queries = f"{summary['min_queries']:>5d}/{summary['avg_queries']:>5d}/{summary['max_queries']:>5d}"
best = (
f"{summary['best_attack_score']:0.1f} ({summary['best_attack_name']})"
if summary["best_attack_score"]
else "N/A"
)
data_list.append([name, summary["total_runs"], successes, times, queries, best])
st = SimpleTable(columns)
output += '\n' + st.generate_table(data_list, row_spacing=0) + '\n'
return output
| [
"numpy.mean",
"cmd2.table_creator.Column",
"os.get_terminal_size",
"numpy.logical_and",
"numpy.get_printoptions",
"json.dumps",
"numpy.linalg.norm",
"numpy.max",
"Levenshtein.distance",
"numpy.array",
"numpy.square",
"functools.partial",
"cmd2.table_creator.SimpleTable",
"numpy.min",
"nu... | [((276, 342), 'functools.partial', 'functools.partial', (['ansi.style'], {'fg': 'ansi.fg.bright_yellow', 'bold': '(True)'}), '(ansi.style, fg=ansi.fg.bright_yellow, bold=True)\n', (293, 342), False, 'import functools\n'), ((507, 528), 'numpy.get_printoptions', 'np.get_printoptions', ([], {}), '()\n', (526, 528), True, 'import numpy as np\n'), ((745, 872), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': "o['threshold']", 'precision': "o['precision']", 'floatmode': "o['floatmode']", 'formatter': "o['formatter']"}), "(threshold=o['threshold'], precision=o['precision'],\n floatmode=o['floatmode'], formatter=o['formatter'])\n", (764, 872), True, 'import numpy as np\n'), ((2133, 2177), 'numpy.array', 'np.array', (["attack.results['initial']['input']"], {}), "(attack.results['initial']['input'])\n", (2141, 2177), True, 'import numpy as np\n'), ((2189, 2234), 'numpy.array', 'np.array', (["attack.results['initial']['output']"], {}), "(attack.results['initial']['output'])\n", (2197, 2234), True, 'import numpy as np\n'), ((2246, 2290), 'numpy.array', 'np.array', (["attack.results['initial']['label']"], {}), "(attack.results['initial']['label'])\n", (2254, 2290), True, 'import numpy as np\n'), ((2331, 2373), 'numpy.array', 'np.array', (["attack.results['final']['input']"], {}), "(attack.results['final']['input'])\n", (2339, 2373), True, 'import numpy as np\n'), ((2385, 2428), 'numpy.array', 'np.array', (["attack.results['final']['output']"], {}), "(attack.results['final']['output'])\n", (2393, 2428), True, 'import numpy as np\n'), ((2440, 2482), 'numpy.array', 'np.array', (["attack.results['final']['label']"], {}), "(attack.results['final']['label'])\n", (2448, 2482), True, 'import numpy as np\n'), ((2631, 2684), 'numpy.logical_and', 'np.logical_and', (['(l_0 == l_f)', '(success_indicator == True)'], {}), '(l_0 == l_f, success_indicator == True)\n', (2645, 2684), True, 'import numpy as np\n'), ((9115, 9135), 'cmd2.table_creator.SimpleTable', 'SimpleTable', (['columns'], {}), '(columns)\n', (9126, 9135), False, 'from cmd2.table_creator import Column, SimpleTable, HorizontalAlignment\n'), ((14173, 14193), 'cmd2.table_creator.SimpleTable', 'SimpleTable', (['columns'], {}), '(columns)\n', (14184, 14193), False, 'from cmd2.table_creator import Column, SimpleTable, HorizontalAlignment\n'), ((4396, 4430), 'numpy.atleast_1d', 'np.atleast_1d', (['attack.sample_index'], {}), '(attack.sample_index)\n', (4409, 4430), True, 'import numpy as np\n'), ((5309, 5331), 'os.get_terminal_size', 'os.get_terminal_size', ([], {}), '()\n', (5329, 5331), False, 'import os\n'), ((5468, 5487), 'cmd2.table_creator.Column', 'Column', (['""""""'], {'width': '(3)'}), "('', width=3)\n", (5474, 5487), False, 'from cmd2.table_creator import Column, SimpleTable, HorizontalAlignment\n'), ((5529, 5657), 'cmd2.table_creator.Column', 'Column', (['"""Sample Index"""'], {'width': '(13)', 'header_horiz_align': 'HorizontalAlignment.CENTER', 'data_horiz_align': 'HorizontalAlignment.RIGHT'}), "('Sample Index', width=13, header_horiz_align=HorizontalAlignment.\n CENTER, data_horiz_align=HorizontalAlignment.RIGHT)\n", (5535, 5657), False, 'from cmd2.table_creator import Column, SimpleTable, HorizontalAlignment\n'), ((5754, 5882), 'cmd2.table_creator.Column', 'Column', (['"""Label (conf)"""'], {'width': '(18)', 'header_horiz_align': 'HorizontalAlignment.CENTER', 'data_horiz_align': 'HorizontalAlignment.RIGHT'}), "('Label (conf)', width=18, header_horiz_align=HorizontalAlignment.\n CENTER, data_horiz_align=HorizontalAlignment.RIGHT)\n", (5760, 5882), False, 'from cmd2.table_creator import Column, SimpleTable, HorizontalAlignment\n'), ((5979, 6114), 'cmd2.table_creator.Column', 'Column', (['"""Attack Label (conf)"""'], {'width': '(19)', 'header_horiz_align': 'HorizontalAlignment.CENTER', 'data_horiz_align': 'HorizontalAlignment.RIGHT'}), "('Attack Label (conf)', width=19, header_horiz_align=\n HorizontalAlignment.CENTER, data_horiz_align=HorizontalAlignment.RIGHT)\n", (5985, 6114), False, 'from cmd2.table_creator import Column, SimpleTable, HorizontalAlignment\n'), ((6437, 6571), 'cmd2.table_creator.Column', 'Column', (['"""Elapsed Time [sec]"""'], {'width': '(18)', 'header_horiz_align': 'HorizontalAlignment.CENTER', 'data_horiz_align': 'HorizontalAlignment.RIGHT'}), "('Elapsed Time [sec]', width=18, header_horiz_align=\n HorizontalAlignment.CENTER, data_horiz_align=HorizontalAlignment.RIGHT)\n", (6443, 6571), False, 'from cmd2.table_creator import Column, SimpleTable, HorizontalAlignment\n'), ((6668, 6798), 'cmd2.table_creator.Column', 'Column', (['"""Queries (rate)"""'], {'width': '(18)', 'header_horiz_align': 'HorizontalAlignment.CENTER', 'data_horiz_align': 'HorizontalAlignment.RIGHT'}), "('Queries (rate)', width=18, header_horiz_align=HorizontalAlignment.\n CENTER, data_horiz_align=HorizontalAlignment.RIGHT)\n", (6674, 6798), False, 'from cmd2.table_creator import Column, SimpleTable, HorizontalAlignment\n'), ((10520, 10534), 'numpy.mean', 'np.mean', (['times'], {}), '(times)\n', (10527, 10534), True, 'import numpy as np\n'), ((10557, 10570), 'numpy.min', 'np.min', (['times'], {}), '(times)\n', (10563, 10570), True, 'import numpy as np\n'), ((10593, 10606), 'numpy.max', 'np.max', (['times'], {}), '(times)\n', (10599, 10606), True, 'import numpy as np\n'), ((10679, 10694), 'numpy.min', 'np.min', (['queries'], {}), '(queries)\n', (10685, 10694), True, 'import numpy as np\n'), ((10720, 10735), 'numpy.max', 'np.max', (['queries'], {}), '(queries)\n', (10726, 10735), True, 'import numpy as np\n'), ((11081, 11103), 'os.get_terminal_size', 'os.get_terminal_size', ([], {}), '()\n', (11101, 11103), False, 'import os\n'), ((11368, 11492), 'cmd2.table_creator.Column', 'Column', (['"""Attack Name"""'], {'width': '(15)', 'header_horiz_align': 'HorizontalAlignment.LEFT', 'data_horiz_align': 'HorizontalAlignment.RIGHT'}), "('Attack Name', width=15, header_horiz_align=HorizontalAlignment.LEFT,\n data_horiz_align=HorizontalAlignment.RIGHT)\n", (11374, 11492), False, 'from cmd2.table_creator import Column, SimpleTable, HorizontalAlignment\n'), ((11590, 11713), 'cmd2.table_creator.Column', 'Column', (['"""Total Runs"""'], {'width': '(10)', 'header_horiz_align': 'HorizontalAlignment.LEFT', 'data_horiz_align': 'HorizontalAlignment.RIGHT'}), "('Total Runs', width=10, header_horiz_align=HorizontalAlignment.LEFT,\n data_horiz_align=HorizontalAlignment.RIGHT)\n", (11596, 11713), False, 'from cmd2.table_creator import Column, SimpleTable, HorizontalAlignment\n'), ((11811, 11938), 'cmd2.table_creator.Column', 'Column', (['"""Successes (%)"""'], {'width': '(13)', 'header_horiz_align': 'HorizontalAlignment.LEFT', 'data_horiz_align': 'HorizontalAlignment.RIGHT'}), "('Successes (%)', width=13, header_horiz_align=HorizontalAlignment.\n LEFT, data_horiz_align=HorizontalAlignment.RIGHT)\n", (11817, 11938), False, 'from cmd2.table_creator import Column, SimpleTable, HorizontalAlignment\n'), ((12035, 12172), 'cmd2.table_creator.Column', 'Column', (['"""Time[sec] (min/avg/max)"""'], {'width': '(15)', 'header_horiz_align': 'HorizontalAlignment.LEFT', 'data_horiz_align': 'HorizontalAlignment.RIGHT'}), "('Time[sec] (min/avg/max)', width=15, header_horiz_align=\n HorizontalAlignment.LEFT, data_horiz_align=HorizontalAlignment.RIGHT)\n", (12041, 12172), False, 'from cmd2.table_creator import Column, SimpleTable, HorizontalAlignment\n'), ((12269, 12404), 'cmd2.table_creator.Column', 'Column', (['"""Queries (min/avg/max)"""'], {'width': '(18)', 'header_horiz_align': 'HorizontalAlignment.LEFT', 'data_horiz_align': 'HorizontalAlignment.RIGHT'}), "('Queries (min/avg/max)', width=18, header_horiz_align=\n HorizontalAlignment.LEFT, data_horiz_align=HorizontalAlignment.RIGHT)\n", (12275, 12404), False, 'from cmd2.table_creator import Column, SimpleTable, HorizontalAlignment\n'), ((12501, 12637), 'cmd2.table_creator.Column', 'Column', (['"""Best Score (attack_id)"""'], {'width': '(15)', 'header_horiz_align': 'HorizontalAlignment.LEFT', 'data_horiz_align': 'HorizontalAlignment.RIGHT'}), "('Best Score (attack_id)', width=15, header_horiz_align=\n HorizontalAlignment.LEFT, data_horiz_align=HorizontalAlignment.RIGHT)\n", (12507, 12637), False, 'from cmd2.table_creator import Column, SimpleTable, HorizontalAlignment\n'), ((16753, 16773), 'cmd2.table_creator.SimpleTable', 'SimpleTable', (['columns'], {}), '(columns)\n', (16764, 16773), False, 'from cmd2.table_creator import Column, SimpleTable, HorizontalAlignment\n'), ((2861, 2891), 'Levenshtein.distance', 'Levenshtein.distance', (['iif', 'ii0'], {}), '(iif, ii0)\n', (2881, 2891), False, 'import Levenshtein\n'), ((6930, 7068), 'cmd2.table_creator.Column', 'Column', (['"""Attack Input"""'], {'width': 'results_width', 'header_horiz_align': 'HorizontalAlignment.CENTER', 'data_horiz_align': 'HorizontalAlignment.LEFT'}), "('Attack Input', width=results_width, header_horiz_align=\n HorizontalAlignment.CENTER, data_horiz_align=HorizontalAlignment.LEFT)\n", (6936, 7068), False, 'from cmd2.table_creator import Column, SimpleTable, HorizontalAlignment\n'), ((10636, 10652), 'numpy.mean', 'np.mean', (['queries'], {}), '(queries)\n', (10643, 10652), True, 'import numpy as np\n'), ((12769, 12899), 'cmd2.table_creator.Column', 'Column', (['"""Best Parameters"""'], {'width': '(25)', 'header_horiz_align': 'HorizontalAlignment.RIGHT', 'data_horiz_align': 'HorizontalAlignment.RIGHT'}), "('Best Parameters', width=25, header_horiz_align=HorizontalAlignment.\n RIGHT, data_horiz_align=HorizontalAlignment.RIGHT)\n", (12775, 12899), False, 'from cmd2.table_creator import Column, SimpleTable, HorizontalAlignment\n'), ((14446, 14570), 'cmd2.table_creator.Column', 'Column', (['"""Class Label"""'], {'width': '(15)', 'header_horiz_align': 'HorizontalAlignment.LEFT', 'data_horiz_align': 'HorizontalAlignment.RIGHT'}), "('Class Label', width=15, header_horiz_align=HorizontalAlignment.LEFT,\n data_horiz_align=HorizontalAlignment.RIGHT)\n", (14452, 14570), False, 'from cmd2.table_creator import Column, SimpleTable, HorizontalAlignment\n'), ((14700, 14823), 'cmd2.table_creator.Column', 'Column', (['"""Total Runs"""'], {'width': '(10)', 'header_horiz_align': 'HorizontalAlignment.LEFT', 'data_horiz_align': 'HorizontalAlignment.RIGHT'}), "('Total Runs', width=10, header_horiz_align=HorizontalAlignment.LEFT,\n data_horiz_align=HorizontalAlignment.RIGHT)\n", (14706, 14823), False, 'from cmd2.table_creator import Column, SimpleTable, HorizontalAlignment\n'), ((14953, 15080), 'cmd2.table_creator.Column', 'Column', (['"""Successes (%)"""'], {'width': '(13)', 'header_horiz_align': 'HorizontalAlignment.LEFT', 'data_horiz_align': 'HorizontalAlignment.RIGHT'}), "('Successes (%)', width=13, header_horiz_align=HorizontalAlignment.\n LEFT, data_horiz_align=HorizontalAlignment.RIGHT)\n", (14959, 15080), False, 'from cmd2.table_creator import Column, SimpleTable, HorizontalAlignment\n'), ((15209, 15346), 'cmd2.table_creator.Column', 'Column', (['"""Time[sec] (min/avg/max)"""'], {'width': '(15)', 'header_horiz_align': 'HorizontalAlignment.LEFT', 'data_horiz_align': 'HorizontalAlignment.RIGHT'}), "('Time[sec] (min/avg/max)', width=15, header_horiz_align=\n HorizontalAlignment.LEFT, data_horiz_align=HorizontalAlignment.RIGHT)\n", (15215, 15346), False, 'from cmd2.table_creator import Column, SimpleTable, HorizontalAlignment\n'), ((15475, 15610), 'cmd2.table_creator.Column', 'Column', (['"""Queries (min/avg/max)"""'], {'width': '(18)', 'header_horiz_align': 'HorizontalAlignment.LEFT', 'data_horiz_align': 'HorizontalAlignment.RIGHT'}), "('Queries (min/avg/max)', width=18, header_horiz_align=\n HorizontalAlignment.LEFT, data_horiz_align=HorizontalAlignment.RIGHT)\n", (15481, 15610), False, 'from cmd2.table_creator import Column, SimpleTable, HorizontalAlignment\n'), ((15739, 15872), 'cmd2.table_creator.Column', 'Column', (['"""Best Score (Attack)"""'], {'width': '(15)', 'header_horiz_align': 'HorizontalAlignment.LEFT', 'data_horiz_align': 'HorizontalAlignment.RIGHT'}), "('Best Score (Attack)', width=15, header_horiz_align=\n HorizontalAlignment.LEFT, data_horiz_align=HorizontalAlignment.RIGHT)\n", (15745, 15872), False, 'from cmd2.table_creator import Column, SimpleTable, HorizontalAlignment\n'), ((3260, 3279), 'numpy.finfo', 'np.finfo', (['"""float32"""'], {}), "('float32')\n", (3268, 3279), True, 'import numpy as np\n'), ((688, 701), 'numpy.array', 'np.array', (['row'], {}), '(row)\n', (696, 701), True, 'import numpy as np\n'), ((3360, 3387), 'numpy.linalg.norm', 'np.linalg.norm', (['i_0'], {'axis': '(1)'}), '(i_0, axis=1)\n', (3374, 3387), True, 'import numpy as np\n'), ((3326, 3346), 'numpy.square', 'np.square', (['(i_f - i_0)'], {}), '(i_f - i_0)\n', (3335, 3346), True, 'import numpy as np\n'), ((8424, 8437), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (8432, 8437), True, 'import numpy as np\n'), ((13815, 13871), 'json.dumps', 'json.dumps', (['trunc_params'], {'indent': '(1)', 'separators': "('', '=')"}), "(trunc_params, indent=1, separators=('', '='))\n", (13825, 13871), False, 'import json\n')] |
"""Script to perform hierarchical inference with the trained Bayesian GNN
"""
import os
import sys
import numpy as np
from scipy import stats
from n2j.inference.inference_manager import InferenceManager
from n2j.config_utils import get_config
if __name__ == '__main__':
cfg = get_config()
infer_obj = InferenceManager(checkpoint_dir=cfg['trainer']['checkpoint_dir'],
**cfg['inference_manager'])
infer_obj.delete_previous()
# Load training stats (for normalizing data)
norm_obj = getattr(stats, cfg['data']['train_dist_name'])(**cfg['data']['train_dist_kwargs'])
train_raytracing = [os.path.join(cfg['data']['in_dir'],
f'cosmodc2_{hp}/Y_{hp}') for hp in cfg['data']['train_hp']]
infer_obj.load_dataset(
dict(features=cfg['data']['features'],
raytracing_out_dirs=train_raytracing,
healpixes=cfg['data']['train_hp'],
n_data=cfg['data']['n_train'],
aperture_size=1.0,
subsample_pdf_func=norm_obj.pdf,
n_subsample=cfg['data']['n_subsample_train'],
stop_mean_std_early=False,
in_dir=cfg['data']['in_dir']),
sub_features=cfg['data']['sub_features'],
sub_target=cfg['data']['sub_target'],
sub_target_local=cfg['data']['sub_target_local'],
is_train=True,
batch_size=cfg['data']['batch_size'],
num_workers=cfg['data']['num_workers'],
rebin=False,
noise_kwargs=cfg['data']['noise_kwargs'],
detection_kwargs=cfg['data'].get('detection_kwargs', {}),
)
# Load test set
norm_obj_test = getattr(stats, cfg['test_data']['dist_name'])(**cfg['test_data']['dist_kwargs'])
test_raytracing = [os.path.join(cfg['data']['in_dir'],
f'cosmodc2_{hp}/Y_{hp}') for hp in cfg['test_data']['test_hp']]
infer_obj.load_dataset(dict(features=cfg['data']['features'],
raytracing_out_dirs=test_raytracing,
healpixes=cfg['test_data']['test_hp'],
n_data=cfg['test_data']['n_test'],
aperture_size=1.0,
subsample_pdf_func=norm_obj_test.pdf,
n_subsample=cfg['test_data']['n_subsample_test'],
in_dir=cfg['data']['in_dir']),
sub_features=cfg['data']['sub_features'],
sub_target=cfg['data']['sub_target'],
sub_target_local=cfg['data']['sub_target_local'],
is_train=False,
batch_size=cfg['test_data']['batch_size'],
noise_kwargs=cfg['data']['noise_kwargs'],
detection_kwargs=cfg['data'].get('detection_kwargs', {}),
)
infer_obj.include_los = cfg['test_data'].get('idx', None)
# Define model
model_kwargs = dict(
dim_in=len(cfg['data']['sub_features']),
dim_out_local=len(cfg['data']['sub_target_local']),
dim_out_global=len(cfg['data']['sub_target']),
**cfg['model']
)
infer_obj.configure_model('N2JNet', model_kwargs)
# Load trained model
infer_obj.load_state(cfg['checkpoint_path'])
# Get summary stats baseline
infer_obj.get_summary_stats(cfg['summary_stats']['thresholds'],
norm_obj.pdf,
match=True,
min_matches=cfg['summary_stats']['min_matches'])
# Hierarchical reweighting
p0 = np.array([[0.01, np.log(0.04)]])
p0 = p0 + np.random.randn(cfg['extra_mcmc_kwargs']['n_walkers'],
2)*np.array([[0.01, 0.5]])
mcmc_kwargs = dict(p0=p0,
chain_path=os.path.join(infer_obj.out_dir, 'omega_chain.h5'),
**cfg['extra_mcmc_kwargs']
)
if cfg['run_mcmc']:
# MCMC over BNN posteriors
mcmc_kwargs = dict(p0=p0,
chain_path=os.path.join(infer_obj.out_dir, 'omega_chain.h5'),
**cfg['extra_mcmc_kwargs']
)
infer_obj.run_mcmc_for_omega_post(n_samples=1000,
n_mc_dropout=20,
mcmc_kwargs=mcmc_kwargs,
interim_pdf_func=norm_obj.pdf,
bounds_lower=np.array([-0.5, -6]),
bounds_upper=np.array([1.5, 0]),
)
# MCMC over unweighted N summary stats
mcmc_kwargs_N = dict(p0=p0,
chain_path=os.path.join(infer_obj.out_dir,
'omega_chain_N.h5'),
**cfg['extra_mcmc_kwargs']
)
infer_obj.run_mcmc_for_omega_post_summary_stats('N',
mcmc_kwargs=mcmc_kwargs_N,
interim_pdf_func=norm_obj.pdf,
bounds_lower=np.array([-0.5, -6]),
bounds_upper=np.array([1.5, 0])
)
# MCMC over inv-dist N summary stats
mcmc_kwargs_N_inv_dist = dict(p0=p0,
chain_path=os.path.join(infer_obj.out_dir,
'omega_chain_N_inv_dist.h5'),
**cfg['extra_mcmc_kwargs']
)
infer_obj.run_mcmc_for_omega_post_summary_stats('N_inv_dist',
mcmc_kwargs=mcmc_kwargs_N_inv_dist,
interim_pdf_func=norm_obj.pdf,
bounds_lower=np.array([-0.5, -6]),
bounds_upper=np.array([1.5, 0])
)
grid_k_kwargs = dict(grid=np.linspace(-0.2, 0.2, 1000),
n_samples=1000,
n_mc_dropout=20,
interim_pdf_func=norm_obj.pdf,
)
# Use the per-sample reweighted samples for calibration plot
k_bnn_analytic, k_bnn = infer_obj.get_reweighted_bnn_kappa(10000, grid_k_kwargs)
infer_obj.get_calibration_plot(k_bnn_analytic)
infer_obj.compute_metrics()
| [
"n2j.config_utils.get_config",
"os.path.join",
"numpy.log",
"numpy.array",
"numpy.linspace",
"n2j.inference.inference_manager.InferenceManager",
"numpy.random.randn"
] | [((282, 294), 'n2j.config_utils.get_config', 'get_config', ([], {}), '()\n', (292, 294), False, 'from n2j.config_utils import get_config\n'), ((311, 409), 'n2j.inference.inference_manager.InferenceManager', 'InferenceManager', ([], {'checkpoint_dir': "cfg['trainer']['checkpoint_dir']"}), "(checkpoint_dir=cfg['trainer']['checkpoint_dir'], **cfg[\n 'inference_manager'])\n", (327, 409), False, 'from n2j.inference.inference_manager import InferenceManager\n'), ((641, 701), 'os.path.join', 'os.path.join', (["cfg['data']['in_dir']", 'f"""cosmodc2_{hp}/Y_{hp}"""'], {}), "(cfg['data']['in_dir'], f'cosmodc2_{hp}/Y_{hp}')\n", (653, 701), False, 'import os\n'), ((2136, 2196), 'os.path.join', 'os.path.join', (["cfg['data']['in_dir']", 'f"""cosmodc2_{hp}/Y_{hp}"""'], {}), "(cfg['data']['in_dir'], f'cosmodc2_{hp}/Y_{hp}')\n", (2148, 2196), False, 'import os\n'), ((4190, 4247), 'numpy.random.randn', 'np.random.randn', (["cfg['extra_mcmc_kwargs']['n_walkers']", '(2)'], {}), "(cfg['extra_mcmc_kwargs']['n_walkers'], 2)\n", (4205, 4247), True, 'import numpy as np\n'), ((4278, 4301), 'numpy.array', 'np.array', (['[[0.01, 0.5]]'], {}), '([[0.01, 0.5]])\n', (4286, 4301), True, 'import numpy as np\n'), ((4366, 4415), 'os.path.join', 'os.path.join', (['infer_obj.out_dir', '"""omega_chain.h5"""'], {}), "(infer_obj.out_dir, 'omega_chain.h5')\n", (4378, 4415), False, 'import os\n'), ((6879, 6907), 'numpy.linspace', 'np.linspace', (['(-0.2)', '(0.2)', '(1000)'], {}), '(-0.2, 0.2, 1000)\n', (6890, 6907), True, 'import numpy as np\n'), ((4160, 4172), 'numpy.log', 'np.log', (['(0.04)'], {}), '(0.04)\n', (4166, 4172), True, 'import numpy as np\n'), ((4623, 4672), 'os.path.join', 'os.path.join', (['infer_obj.out_dir', '"""omega_chain.h5"""'], {}), "(infer_obj.out_dir, 'omega_chain.h5')\n", (4635, 4672), False, 'import os\n'), ((5069, 5089), 'numpy.array', 'np.array', (['[-0.5, -6]'], {}), '([-0.5, -6])\n', (5077, 5089), True, 'import numpy as np\n'), ((5146, 5164), 'numpy.array', 'np.array', (['[1.5, 0]'], {}), '([1.5, 0])\n', (5154, 5164), True, 'import numpy as np\n'), ((5333, 5384), 'os.path.join', 'os.path.join', (['infer_obj.out_dir', '"""omega_chain_N.h5"""'], {}), "(infer_obj.out_dir, 'omega_chain_N.h5')\n", (5345, 5384), False, 'import os\n'), ((5826, 5846), 'numpy.array', 'np.array', (['[-0.5, -6]'], {}), '([-0.5, -6])\n', (5834, 5846), True, 'import numpy as np\n'), ((5917, 5935), 'numpy.array', 'np.array', (['[1.5, 0]'], {}), '([1.5, 0])\n', (5925, 5935), True, 'import numpy as np\n'), ((6133, 6193), 'os.path.join', 'os.path.join', (['infer_obj.out_dir', '"""omega_chain_N_inv_dist.h5"""'], {}), "(infer_obj.out_dir, 'omega_chain_N_inv_dist.h5')\n", (6145, 6193), False, 'import os\n'), ((6680, 6700), 'numpy.array', 'np.array', (['[-0.5, -6]'], {}), '([-0.5, -6])\n', (6688, 6700), True, 'import numpy as np\n'), ((6771, 6789), 'numpy.array', 'np.array', (['[1.5, 0]'], {}), '([1.5, 0])\n', (6779, 6789), True, 'import numpy as np\n')] |
from typing import Dict
import pyarrow.parquet as pq
import pandas as pd
import numpy as np
import re
import json
from typing import List, Callable, Iterator, Union, Optional, Dict
from sportsdataverse.dl_utils import download, flatten_json_iterative, key_check
def espn_wnba_pbp(game_id: int, raw = False) -> Dict:
"""espn_wnba_pbp() - Pull the game by id. Data from API endpoints - `wnba/playbyplay`, `wnba/summary`
Args:
game_id (int): Unique game_id, can be obtained from wnba_schedule().
Returns:
Dict: Dictionary of game data with keys - "gameId", "plays", "winprobability", "boxscore", "header",
"broadcasts", "videos", "playByPlaySource", "standings", "leaders", "seasonseries", "timeouts",
"pickcenter", "againstTheSpread", "odds", "predictor", "espnWP", "gameInfo", "season"
Example:
`wnba_df = sportsdataverse.wnba.espn_wnba_pbp(game_id=401370395)`
"""
# play by play
pbp_txt = {}
pbp_txt['timeouts'] = {}
# summary endpoint for pickcenter array
summary_url = "http://site.api.espn.com/apis/site/v2/sports/basketball/wnba/summary?event={}".format(game_id)
summary_resp = download(summary_url)
summary = json.loads(summary_resp)
for k in ['plays', 'seasonseries', 'videos', 'broadcasts', 'pickcenter', 'againstTheSpread', 'odds', 'winprobability', 'teamInfo', 'espnWP', 'leaders']:
pbp_txt[k]=key_check(obj=summary, key = k, replacement = np.array([]))
for k in ['boxscore','format', 'gameInfo', 'predictor', 'article', 'header', 'season', 'standings']:
pbp_txt[k] = key_check(obj=summary, key = k, replacement = {})
for k in ['news','shop']:
if k in pbp_txt.keys():
del pbp_txt[k]
incoming_keys_expected = ['boxscore', 'format', 'gameInfo', 'leaders', 'seasonseries', 'broadcasts',
'predictor', 'pickcenter', 'againstTheSpread', 'odds', 'winprobability',
'header', 'plays', 'article', 'videos', 'standings',
'teamInfo', 'espnWP', 'season', 'timeouts']
if raw == True:
# reorder keys in raw format, appending empty keys which are defined later to the end
pbp_json = {}
for k in incoming_keys_expected:
if k in pbp_txt.keys():
pbp_json[k] = pbp_txt[k]
else:
pbp_json[k] = {}
return pbp_json
pbp_json = helper_wnba_pbp(game_id, pbp_txt)
return pbp_json
def helper_wnba_pbp(game_id, pbp_txt):
gameSpread, homeFavorite, gameSpreadAvailable = helper_wnba_pickcenter(pbp_txt)
pbp_txt['gameInfo'] = pbp_txt['header']['competitions'][0]
pbp_txt['season'] = pbp_txt['header']['season']
pbp_txt['playByPlaySource'] = pbp_txt['header']['competitions'][0]['playByPlaySource']
# Home and Away identification variables
homeTeamId = int(pbp_txt['header']['competitions'][0]['competitors'][0]['team']['id'])
awayTeamId = int(pbp_txt['header']['competitions'][0]['competitors'][1]['team']['id'])
homeTeamMascot = str(pbp_txt['header']['competitions'][0]['competitors'][0]['team']['name'])
awayTeamMascot = str(pbp_txt['header']['competitions'][0]['competitors'][1]['team']['name'])
homeTeamName = str(pbp_txt['header']['competitions'][0]['competitors'][0]['team']['location'])
awayTeamName = str(pbp_txt['header']['competitions'][0]['competitors'][1]['team']['location'])
homeTeamAbbrev = str(pbp_txt['header']['competitions'][0]['competitors'][0]['team']['abbreviation'])
awayTeamAbbrev = str(pbp_txt['header']['competitions'][0]['competitors'][1]['team']['abbreviation'])
homeTeamNameAlt = re.sub("Stat(.+)", "St", str(homeTeamName))
awayTeamNameAlt = re.sub("Stat(.+)", "St", str(awayTeamName))
if (pbp_txt['playByPlaySource'] != "none") & (len(pbp_txt['plays'])>1):
helper_wnba_pbp_features(game_id, pbp_txt, gameSpread, homeFavorite, gameSpreadAvailable, homeTeamId, awayTeamId, homeTeamMascot, awayTeamMascot, homeTeamName, awayTeamName, homeTeamAbbrev, awayTeamAbbrev, homeTeamNameAlt, awayTeamNameAlt)
else:
pbp_txt['plays'] = pd.DataFrame()
pbp_txt['plays'] = pbp_txt['plays'].replace({np.nan: None})
pbp_json = {
"gameId": game_id,
"plays" : pbp_txt['plays'].to_dict(orient='records'),
"winprobability" : np.array(pbp_txt['winprobability']).tolist(),
"boxscore" : pbp_txt['boxscore'],
"header" : pbp_txt['header'],
"broadcasts" : np.array(pbp_txt['broadcasts']).tolist(),
"videos" : np.array(pbp_txt['videos']).tolist(),
"playByPlaySource": pbp_txt['playByPlaySource'],
"standings" : pbp_txt['standings'],
"leaders" : np.array(pbp_txt['leaders']).tolist(),
"seasonseries" : np.array(pbp_txt['seasonseries']).tolist(),
"timeouts" : pbp_txt['timeouts'],
"pickcenter" : np.array(pbp_txt['pickcenter']).tolist(),
"againstTheSpread" : np.array(pbp_txt['againstTheSpread']).tolist(),
"odds" : np.array(pbp_txt['odds']).tolist(),
"predictor" : pbp_txt['predictor'],
"espnWP" : np.array(pbp_txt['espnWP']).tolist(),
"gameInfo" : np.array(pbp_txt['gameInfo']).tolist(),
"season" : np.array(pbp_txt['season']).tolist()
}
return pbp_json
def helper_wnba_pbp_features(game_id, pbp_txt, gameSpread, homeFavorite, gameSpreadAvailable, homeTeamId, awayTeamId, homeTeamMascot, awayTeamMascot, homeTeamName, awayTeamName, homeTeamAbbrev, awayTeamAbbrev, homeTeamNameAlt, awayTeamNameAlt):
pbp_txt['plays_mod'] = []
for play in pbp_txt['plays']:
p = flatten_json_iterative(play)
pbp_txt['plays_mod'].append(p)
pbp_txt['plays'] = pd.json_normalize(pbp_txt,'plays_mod')
pbp_txt['plays']['season'] = pbp_txt['season']['year']
pbp_txt['plays']['seasonType'] = pbp_txt['season']['type']
pbp_txt['plays']["awayTeamId"] = awayTeamId
pbp_txt['plays']["awayTeamName"] = str(awayTeamName)
pbp_txt['plays']["awayTeamMascot"] = str(awayTeamMascot)
pbp_txt['plays']["awayTeamAbbrev"] = str(awayTeamAbbrev)
pbp_txt['plays']["awayTeamNameAlt"] = str(awayTeamNameAlt)
pbp_txt['plays']["homeTeamId"] = homeTeamId
pbp_txt['plays']["homeTeamName"] = str(homeTeamName)
pbp_txt['plays']["homeTeamMascot"] = str(homeTeamMascot)
pbp_txt['plays']["homeTeamAbbrev"] = str(homeTeamAbbrev)
pbp_txt['plays']["homeTeamNameAlt"] = str(homeTeamNameAlt)
# Spread definition
pbp_txt['plays']["homeTeamSpread"] = 2.5
pbp_txt['plays']["gameSpread"] = abs(gameSpread)
pbp_txt['plays']["homeTeamSpread"] = np.where(homeFavorite == True, abs(gameSpread), -1*abs(gameSpread))
pbp_txt['homeTeamSpread'] = np.where(homeFavorite == True, abs(gameSpread), -1*abs(gameSpread))
pbp_txt['plays']["homeFavorite"] = homeFavorite
pbp_txt['plays']["gameSpread"] = gameSpread
pbp_txt['plays']["gameSpreadAvailable"] = gameSpreadAvailable
pbp_txt['plays'] = pbp_txt['plays'].to_dict(orient='records')
pbp_txt['plays'] = pd.DataFrame(pbp_txt['plays'])
pbp_txt['plays']['season'] = pbp_txt['header']['season']['year']
pbp_txt['plays']['seasonType'] = pbp_txt['header']['season']['type']
pbp_txt['plays']['game_id'] = int(game_id)
pbp_txt['plays']["homeTeamId"] = homeTeamId
pbp_txt['plays']["awayTeamId"] = awayTeamId
pbp_txt['plays']["homeTeamName"] = str(homeTeamName)
pbp_txt['plays']["awayTeamName"] = str(awayTeamName)
pbp_txt['plays']["homeTeamMascot"] = str(homeTeamMascot)
pbp_txt['plays']["awayTeamMascot"] = str(awayTeamMascot)
pbp_txt['plays']["homeTeamAbbrev"] = str(homeTeamAbbrev)
pbp_txt['plays']["awayTeamAbbrev"] = str(awayTeamAbbrev)
pbp_txt['plays']["homeTeamNameAlt"] = str(homeTeamNameAlt)
pbp_txt['plays']["awayTeamNameAlt"] = str(awayTeamNameAlt)
pbp_txt['plays']['period.number'] = pbp_txt['plays']['period.number'].apply(lambda x: int(x))
pbp_txt['plays']['qtr'] = pbp_txt['plays']['period.number'].apply(lambda x: int(x))
pbp_txt['plays']["homeTeamSpread"] = 2.5
pbp_txt['plays']["gameSpread"] = abs(gameSpread)
pbp_txt['plays']["gameSpreadAvailable"] = gameSpreadAvailable
pbp_txt['plays']["homeTeamSpread"] = np.where(homeFavorite == True, abs(gameSpread), -1*abs(gameSpread))
pbp_txt['homeTeamSpread'] = np.where(homeFavorite == True, abs(gameSpread), -1*abs(gameSpread))
pbp_txt['plays']["homeFavorite"] = homeFavorite
pbp_txt['plays']["gameSpread"] = gameSpread
pbp_txt['plays']["homeFavorite"] = homeFavorite
#----- Time ---------------
pbp_txt['plays']['clock.displayValue'] = np.select(
[
pbp_txt['plays']['clock.displayValue'].str.contains(":") == False
],
[
"0:" + pbp_txt['plays']['clock.displayValue'].apply(lambda x: str(x))
], default = pbp_txt['plays']['clock.displayValue']
)
pbp_txt['plays']['time'] = pbp_txt['plays']['clock.displayValue']
pbp_txt['plays']['clock.mm'] = pbp_txt['plays']['clock.displayValue'].str.split(pat=':')
pbp_txt['plays'][['clock.minutes','clock.seconds']] = pbp_txt['plays']['clock.mm'].to_list()
pbp_txt['plays']['clock.minutes'] = pbp_txt['plays']['clock.minutes'].apply(lambda x: int(x))
pbp_txt['plays']['clock.seconds'] = pbp_txt['plays']['clock.seconds'].apply(lambda x: float(x))
# pbp_txt['plays']['clock.mm'] = pbp_txt['plays']['clock.displayValue'].apply(lambda x: datetime.strptime(str(x),'%M:%S'))
pbp_txt['plays']['half'] = np.where(pbp_txt['plays']['qtr'] <= 2, "1","2")
pbp_txt['plays']['game_half'] = np.where(pbp_txt['plays']['qtr'] <= 2, "1","2")
pbp_txt['plays']['lag_qtr'] = pbp_txt['plays']['qtr'].shift(1)
pbp_txt['plays']['lead_qtr'] = pbp_txt['plays']['qtr'].shift(-1)
pbp_txt['plays']['lag_game_half'] = pbp_txt['plays']['game_half'].shift(1)
pbp_txt['plays']['lead_game_half'] = pbp_txt['plays']['game_half'].shift(-1)
pbp_txt['plays']['start.quarter_seconds_remaining'] = 60*pbp_txt['plays']['clock.minutes'].astype(int) + pbp_txt['plays']['clock.seconds'].astype(int)
pbp_txt['plays']['start.half_seconds_remaining'] = np.where(
pbp_txt['plays']['qtr'].isin([1,3]),
600 + 60*pbp_txt['plays']['clock.minutes'].astype(int) + pbp_txt['plays']['clock.seconds'].astype(int),
60*pbp_txt['plays']['clock.minutes'].astype(int) + pbp_txt['plays']['clock.seconds'].astype(int)
)
pbp_txt['plays']['start.game_seconds_remaining'] = np.select(
[
pbp_txt['plays']['qtr'] == 1,
pbp_txt['plays']['qtr'] == 2,
pbp_txt['plays']['qtr'] == 3,
pbp_txt['plays']['qtr'] == 4
],
[
1800 + 60*pbp_txt['plays']['clock.minutes'].astype(int) + pbp_txt['plays']['clock.seconds'].astype(int),
1200 + 60*pbp_txt['plays']['clock.minutes'].astype(int) + pbp_txt['plays']['clock.seconds'].astype(int),
600 + 60*pbp_txt['plays']['clock.minutes'].astype(int) + pbp_txt['plays']['clock.seconds'].astype(int),
60*pbp_txt['plays']['clock.minutes'].astype(int) + pbp_txt['plays']['clock.seconds'].astype(int)
], default = 60*pbp_txt['plays']['clock.minutes'].astype(int) + pbp_txt['plays']['clock.seconds'].astype(int)
)
# Pos Team - Start and End Id
pbp_txt['plays']['game_play_number'] = np.arange(len(pbp_txt['plays']))+1
pbp_txt['plays']['text'] = pbp_txt['plays']['text'].astype(str)
pbp_txt['plays']['id'] = pbp_txt['plays']['id'].apply(lambda x: int(x))
pbp_txt['plays']['end.quarter_seconds_remaining'] = pbp_txt['plays']['start.quarter_seconds_remaining'].shift(1)
pbp_txt['plays']['end.half_seconds_remaining'] = pbp_txt['plays']['start.half_seconds_remaining'].shift(1)
pbp_txt['plays']['end.game_seconds_remaining'] = pbp_txt['plays']['start.game_seconds_remaining'].shift(1)
pbp_txt['plays']['end.quarter_seconds_remaining'] = np.select(
[
(pbp_txt['plays']['game_play_number'] == 1)|
((pbp_txt['plays']['qtr'] == 2) & (pbp_txt['plays']['lag_qtr'] == 1))|
((pbp_txt['plays']['qtr'] == 3) & (pbp_txt['plays']['lag_qtr'] == 2))|
((pbp_txt['plays']['qtr'] == 4) & (pbp_txt['plays']['lag_qtr'] == 3))
],
[
600
], default = pbp_txt['plays']['end.quarter_seconds_remaining']
)
pbp_txt['plays']['end.half_seconds_remaining'] = np.select(
[
(pbp_txt['plays']['game_play_number'] == 1)|
((pbp_txt['plays']['game_half'] == "2") & (pbp_txt['plays']['lag_game_half'] == "1"))
],
[
1200
], default = pbp_txt['plays']['end.half_seconds_remaining']
)
pbp_txt['plays']['end.game_seconds_remaining'] = np.select(
[
(pbp_txt['plays']['game_play_number'] == 1),
((pbp_txt['plays']['game_half'] == "2") & (pbp_txt['plays']['lag_game_half'] == "1"))
],
[
2400,
1200
], default = pbp_txt['plays']['end.game_seconds_remaining']
)
pbp_txt['plays']['period'] = pbp_txt['plays']['qtr']
del pbp_txt['plays']['clock.mm']
def helper_wnba_pickcenter(pbp_txt):
if len(pbp_txt['pickcenter']) > 1:
if 'spread' in pbp_txt['pickcenter'][1].keys():
gameSpread = pbp_txt['pickcenter'][1]['spread']
homeFavorite = pbp_txt['pickcenter'][1]['homeTeamOdds']['favorite']
gameSpreadAvailable = True
else:
gameSpread = pbp_txt['pickcenter'][0]['spread']
homeFavorite = pbp_txt['pickcenter'][0]['homeTeamOdds']['favorite']
gameSpreadAvailable = True
else:
gameSpread = 2.5
homeFavorite = True
gameSpreadAvailable = False
return gameSpread,homeFavorite,gameSpreadAvailable
| [
"json.loads",
"pandas.json_normalize",
"numpy.select",
"numpy.where",
"sportsdataverse.dl_utils.key_check",
"sportsdataverse.dl_utils.flatten_json_iterative",
"numpy.array",
"pandas.DataFrame",
"sportsdataverse.dl_utils.download"
] | [((1204, 1225), 'sportsdataverse.dl_utils.download', 'download', (['summary_url'], {}), '(summary_url)\n', (1212, 1225), False, 'from sportsdataverse.dl_utils import download, flatten_json_iterative, key_check\n'), ((1240, 1264), 'json.loads', 'json.loads', (['summary_resp'], {}), '(summary_resp)\n', (1250, 1264), False, 'import json\n'), ((5766, 5805), 'pandas.json_normalize', 'pd.json_normalize', (['pbp_txt', '"""plays_mod"""'], {}), "(pbp_txt, 'plays_mod')\n", (5783, 5805), True, 'import pandas as pd\n'), ((7097, 7127), 'pandas.DataFrame', 'pd.DataFrame', (["pbp_txt['plays']"], {}), "(pbp_txt['plays'])\n", (7109, 7127), True, 'import pandas as pd\n'), ((9610, 9658), 'numpy.where', 'np.where', (["(pbp_txt['plays']['qtr'] <= 2)", '"""1"""', '"""2"""'], {}), "(pbp_txt['plays']['qtr'] <= 2, '1', '2')\n", (9618, 9658), True, 'import numpy as np\n'), ((9694, 9742), 'numpy.where', 'np.where', (["(pbp_txt['plays']['qtr'] <= 2)", '"""1"""', '"""2"""'], {}), "(pbp_txt['plays']['qtr'] <= 2, '1', '2')\n", (9702, 9742), True, 'import numpy as np\n'), ((12096, 12448), 'numpy.select', 'np.select', (["[(pbp_txt['plays']['game_play_number'] == 1) | (pbp_txt['plays']['qtr'] == \n 2) & (pbp_txt['plays']['lag_qtr'] == 1) | (pbp_txt['plays']['qtr'] == 3\n ) & (pbp_txt['plays']['lag_qtr'] == 2) | (pbp_txt['plays']['qtr'] == 4) &\n (pbp_txt['plays']['lag_qtr'] == 3)]", '[600]'], {'default': "pbp_txt['plays']['end.quarter_seconds_remaining']"}), "([(pbp_txt['plays']['game_play_number'] == 1) | (pbp_txt['plays'][\n 'qtr'] == 2) & (pbp_txt['plays']['lag_qtr'] == 1) | (pbp_txt['plays'][\n 'qtr'] == 3) & (pbp_txt['plays']['lag_qtr'] == 2) | (pbp_txt['plays'][\n 'qtr'] == 4) & (pbp_txt['plays']['lag_qtr'] == 3)], [600], default=\n pbp_txt['plays']['end.quarter_seconds_remaining'])\n", (12105, 12448), True, 'import numpy as np\n'), ((12629, 12845), 'numpy.select', 'np.select', (["[(pbp_txt['plays']['game_play_number'] == 1) | (pbp_txt['plays'][\n 'game_half'] == '2') & (pbp_txt['plays']['lag_game_half'] == '1')]", '[1200]'], {'default': "pbp_txt['plays']['end.half_seconds_remaining']"}), "([(pbp_txt['plays']['game_play_number'] == 1) | (pbp_txt['plays'][\n 'game_half'] == '2') & (pbp_txt['plays']['lag_game_half'] == '1')], [\n 1200], default=pbp_txt['plays']['end.half_seconds_remaining'])\n", (12638, 12845), True, 'import numpy as np\n'), ((13002, 13221), 'numpy.select', 'np.select', (["[pbp_txt['plays']['game_play_number'] == 1, (pbp_txt['plays']['game_half'] ==\n '2') & (pbp_txt['plays']['lag_game_half'] == '1')]", '[2400, 1200]'], {'default': "pbp_txt['plays']['end.game_seconds_remaining']"}), "([pbp_txt['plays']['game_play_number'] == 1, (pbp_txt['plays'][\n 'game_half'] == '2') & (pbp_txt['plays']['lag_game_half'] == '1')], [\n 2400, 1200], default=pbp_txt['plays']['end.game_seconds_remaining'])\n", (13011, 13221), True, 'import numpy as np\n'), ((1627, 1672), 'sportsdataverse.dl_utils.key_check', 'key_check', ([], {'obj': 'summary', 'key': 'k', 'replacement': '{}'}), '(obj=summary, key=k, replacement={})\n', (1636, 1672), False, 'from sportsdataverse.dl_utils import download, flatten_json_iterative, key_check\n'), ((4183, 4197), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4195, 4197), True, 'import pandas as pd\n'), ((5675, 5703), 'sportsdataverse.dl_utils.flatten_json_iterative', 'flatten_json_iterative', (['play'], {}), '(play)\n', (5697, 5703), False, 'from sportsdataverse.dl_utils import download, flatten_json_iterative, key_check\n'), ((1487, 1499), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1495, 1499), True, 'import numpy as np\n'), ((4395, 4430), 'numpy.array', 'np.array', (["pbp_txt['winprobability']"], {}), "(pbp_txt['winprobability'])\n", (4403, 4430), True, 'import numpy as np\n'), ((4544, 4575), 'numpy.array', 'np.array', (["pbp_txt['broadcasts']"], {}), "(pbp_txt['broadcasts'])\n", (4552, 4575), True, 'import numpy as np\n'), ((4605, 4632), 'numpy.array', 'np.array', (["pbp_txt['videos']"], {}), "(pbp_txt['videos'])\n", (4613, 4632), True, 'import numpy as np\n'), ((4764, 4792), 'numpy.array', 'np.array', (["pbp_txt['leaders']"], {}), "(pbp_txt['leaders'])\n", (4772, 4792), True, 'import numpy as np\n'), ((4828, 4861), 'numpy.array', 'np.array', (["pbp_txt['seasonseries']"], {}), "(pbp_txt['seasonseries'])\n", (4836, 4861), True, 'import numpy as np\n'), ((4937, 4968), 'numpy.array', 'np.array', (["pbp_txt['pickcenter']"], {}), "(pbp_txt['pickcenter'])\n", (4945, 4968), True, 'import numpy as np\n'), ((5008, 5045), 'numpy.array', 'np.array', (["pbp_txt['againstTheSpread']"], {}), "(pbp_txt['againstTheSpread'])\n", (5016, 5045), True, 'import numpy as np\n'), ((5073, 5098), 'numpy.array', 'np.array', (["pbp_txt['odds']"], {}), "(pbp_txt['odds'])\n", (5081, 5098), True, 'import numpy as np\n'), ((5172, 5199), 'numpy.array', 'np.array', (["pbp_txt['espnWP']"], {}), "(pbp_txt['espnWP'])\n", (5180, 5199), True, 'import numpy as np\n'), ((5231, 5260), 'numpy.array', 'np.array', (["pbp_txt['gameInfo']"], {}), "(pbp_txt['gameInfo'])\n", (5239, 5260), True, 'import numpy as np\n'), ((5290, 5317), 'numpy.array', 'np.array', (["pbp_txt['season']"], {}), "(pbp_txt['season'])\n", (5298, 5317), True, 'import numpy as np\n')] |
"""
Basic docstring explaining example
"""
from __future__ import print_function
#********************
#sf3dmodels libraries
#********************
from sf3dmodels.outflow import OutflowModel #Model functions
import sf3dmodels.utils.units as u #Units
import sf3dmodels.rt as rt #Writing functions for radiative transfer
import sf3dmodels.Plot_model as Pm #Plotting model
import sf3dmodels.Model as Model #Grid
from sf3dmodels.grid import Overlap #Overlap submodels
#********************
#Extra libraries
#********************
import numpy as np
import time
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as colors
t0 = time.time()
#********
#GRIDDING
#********
sizex = 100 * u.au
sizey = sizez = 100 * u.au
Nx = Ny = Nz = 100
GRID = Model.grid([sizex, sizey, sizez], [Nx, Ny, Nz], rt_code='lime')
#********
#MERGING
#********
files = ['disc.dat', 'outflow.dat']
#Old style
#outflows = BGG.overlap(GRID, submodels = data2merge, rho_min = 1e6)
#New style
columns = ['id', 'x', 'y', 'z', 'dens_H2', 'dens_Hplus', 'temp_gas', 'vel_x', 'vel_y', 'vel_z', 'abundance', 'gtdratio']
overlap = Overlap(GRID)
finalprop = overlap.fromfiles(columns, submodels = files, rt_code = 'lime')
#**********
#WRITING
#**********
lime = rt.Lime(GRID)
lime.finalmodel(finalprop)
#********
#TIMING
#********
print ('Ellapsed time: %.3fs' % (time.time() - t0))
print ('-------------------------------------------------\n-------------------------------------------------\n')
#********
#PLOTTING
#********
density = finalprop['dens_H2'] / 1e6 #dens. in cm^-3
temperature = finalprop['temp_gas']
weight = 1.0#100 * np.mean(density)
"""
#-----------------
#Plot for DENSITY
#-----------------
Pm.scatter3D(GRID, density, weight, NRand = 4000, axisunit = u.au, colorscale = 'log', cmap = 'cool',
colorlabel = r'${\rm log}_{10}(n [cm^{-3}])$', output = 'global_grid_dens.png', vmin = 5)
#--------------------
#Plot for TEMPERATURE
#--------------------
Pm.scatter3D(GRID, density, weight, colordim = temperature, NRand = 4000, axisunit = u.au, colorscale = 'log',
cmap = 'brg', colorlabel = r'${\rm log}_{10}(T$ $[K])$', output = 'global_grid_temp.png', vmin = 2)
"""
#******************
#3D plotting
#******************
lims = np.array([-100,100])
weight = 1.0
ax_kw = {'projection': '3d'}#, 'xlim': lims, 'ylim': lims, 'zlim': lims, 'azim': -50, 'elev': 30}
canvas3d = Pm.Canvas3d(ax_kw=ax_kw)
sp = canvas3d.scatter_random(GRID, density, weight, GRID_unit=u.au, power=0, NRand=10000, prop_min=1.0, #function arguments
marker = '+', cmap = 'jet', s = 3, edgecolors = 'none', vmin=1, norm = colors.LogNorm()) #Scatter kwargs
cbar = plt.colorbar(sp)
cbar.ax.set_ylabel(r'H$_2$ density [cm$^{-3}$]')
canvas3d.ax.set_xlabel('au')
plt.savefig('grid_dens3d.png', bbox_inches='tight')
canvas3d = Pm.Canvas3d(ax_kw=ax_kw)
sp = canvas3d.scatter_random(GRID, density, weight, prop_color = temperature, GRID_unit=u.au, power=0, NRand=10000, prop_min=1.0, #function arguments
marker = '+', cmap = 'jet', s = 3, edgecolors = 'none', vmin=1, norm = colors.LogNorm()) #Scatter kwargs
cbar = plt.colorbar(sp)
cbar.ax.set_ylabel(r'T [K]')
canvas3d.ax.set_xlabel('au')
plt.savefig('grid_temp3d.png', bbox_inches='tight')
plt.show()
| [
"sf3dmodels.grid.Overlap",
"matplotlib.pyplot.savefig",
"sf3dmodels.Model.grid",
"sf3dmodels.rt.Lime",
"matplotlib.pyplot.colorbar",
"numpy.array",
"sf3dmodels.Plot_model.Canvas3d",
"matplotlib.colors.LogNorm",
"time.time",
"matplotlib.pyplot.show"
] | [((723, 734), 'time.time', 'time.time', ([], {}), '()\n', (732, 734), False, 'import time\n'), ((839, 902), 'sf3dmodels.Model.grid', 'Model.grid', (['[sizex, sizey, sizez]', '[Nx, Ny, Nz]'], {'rt_code': '"""lime"""'}), "([sizex, sizey, sizez], [Nx, Ny, Nz], rt_code='lime')\n", (849, 902), True, 'import sf3dmodels.Model as Model\n'), ((1192, 1205), 'sf3dmodels.grid.Overlap', 'Overlap', (['GRID'], {}), '(GRID)\n', (1199, 1205), False, 'from sf3dmodels.grid import Overlap\n'), ((1323, 1336), 'sf3dmodels.rt.Lime', 'rt.Lime', (['GRID'], {}), '(GRID)\n', (1330, 1336), True, 'import sf3dmodels.rt as rt\n'), ((2338, 2359), 'numpy.array', 'np.array', (['[-100, 100]'], {}), '([-100, 100])\n', (2346, 2359), True, 'import numpy as np\n'), ((2482, 2506), 'sf3dmodels.Plot_model.Canvas3d', 'Pm.Canvas3d', ([], {'ax_kw': 'ax_kw'}), '(ax_kw=ax_kw)\n', (2493, 2506), True, 'import sf3dmodels.Plot_model as Pm\n'), ((2772, 2788), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['sp'], {}), '(sp)\n', (2784, 2788), True, 'import matplotlib.pyplot as plt\n'), ((2867, 2918), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""grid_dens3d.png"""'], {'bbox_inches': '"""tight"""'}), "('grid_dens3d.png', bbox_inches='tight')\n", (2878, 2918), True, 'import matplotlib.pyplot as plt\n'), ((2932, 2956), 'sf3dmodels.Plot_model.Canvas3d', 'Pm.Canvas3d', ([], {'ax_kw': 'ax_kw'}), '(ax_kw=ax_kw)\n', (2943, 2956), True, 'import sf3dmodels.Plot_model as Pm\n'), ((3248, 3264), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['sp'], {}), '(sp)\n', (3260, 3264), True, 'import matplotlib.pyplot as plt\n'), ((3323, 3374), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""grid_temp3d.png"""'], {'bbox_inches': '"""tight"""'}), "('grid_temp3d.png', bbox_inches='tight')\n", (3334, 3374), True, 'import matplotlib.pyplot as plt\n'), ((3376, 3386), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3384, 3386), True, 'import matplotlib.pyplot as plt\n'), ((2731, 2747), 'matplotlib.colors.LogNorm', 'colors.LogNorm', ([], {}), '()\n', (2745, 2747), True, 'import matplotlib.colors as colors\n'), ((3207, 3223), 'matplotlib.colors.LogNorm', 'colors.LogNorm', ([], {}), '()\n', (3221, 3223), True, 'import matplotlib.colors as colors\n'), ((1427, 1438), 'time.time', 'time.time', ([], {}), '()\n', (1436, 1438), False, 'import time\n')] |
from __future__ import print_function
import numpy as np
import nose.tools as nt
from ..randomization import randomization
def test_noise_dbns():
X = np.random.standard_normal((10, 5))
Q = X.T.dot(X)
noises = [randomization.isotropic_gaussian((5,), 1.),
randomization.laplace((5,), 1.),
randomization.logistic((5,), 1.),
randomization.gaussian(Q)]
v1, v2 = [], []
for i, noise in enumerate(noises):
x = np.random.standard_normal(5)
u = np.random.standard_normal(5)
v1.append(np.exp(noise.log_density(x)))
v2.append(noise._density(x))
noise.smooth_objective(x, 'func')
noise.smooth_objective(x, 'grad')
noise.smooth_objective(x, 'both')
noise.gradient(x)
nt.assert_equal(noise.sample().shape, (5,))
nt.assert_equal(noise.sample().shape, (5,))
if noise.CGF is not None:
u = np.zeros(5)
u[:2] = 0.1
noise.CGF.smooth_objective(u, 'both')
if noise.CGF_conjugate is not None:
noise.CGF_conjugate.smooth_objective(x, 'both')
| [
"numpy.random.standard_normal",
"numpy.zeros"
] | [((158, 192), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(10, 5)'], {}), '((10, 5))\n', (183, 192), True, 'import numpy as np\n'), ((480, 508), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(5)'], {}), '(5)\n', (505, 508), True, 'import numpy as np\n'), ((521, 549), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(5)'], {}), '(5)\n', (546, 549), True, 'import numpy as np\n'), ((944, 955), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (952, 955), True, 'import numpy as np\n')] |
import numpy as np
from .._base import SupervisedModel
class Bayes(SupervisedModel):
"""Bayes model, multi-class (or binary) classifier.
Bayes models include Gaussian, Multinomial, Bernoulli,
however here I only implemented Gaussian.
"""
def __init__(self):
self._prior_dict = None
self._mean_dict = None
self._cov_dict = None
self._cov_all = None
self._p = None
def fit(self, x: np.ndarray, label: np.ndarray, **kwargs) -> float:
assert x.shape[0] == label.shape[0]
n, p = x.shape
if self._mean_dict is None or self._cov_dict is None \
or self._prior_dict is None or self._p != p:
self._prior_dict = {}
self._mean_dict = {}
self._cov_dict = {}
self._p = p
# Calculate mean and co-variance matrix for each class
all_class = np.unique(label)
for c in all_class:
group = x[label == c]
mean, cov = self._param_gaussian(group)
self._prior_dict[c] = group.shape[0] / n
self._mean_dict[c] = mean
self._cov_dict[c] = cov
# Calculate the whole co-variance matrix
_, cov = self._param_gaussian(x)
self._cov_all = cov
# Calculate loss on x
_, loss = self.evaluate(x, label)
return loss
def predict(self, x: np.ndarray, **kwargs) -> np.ndarray:
assert self._cov_dict is not None and self._mean_dict is not None
assert self._cov_all is not None
assert self._p == x.shape[1]
# Default: non-linear classifier
linear = False
if 'linear' in kwargs:
assert isinstance(kwargs['linear'], bool)
linear = kwargs['linear']
# Calculate posterior propability for each class
# All class share a same co-variance matrix if linear == True
prob, label_list = [], []
for c, mean in self._mean_dict.items():
if linear:
cov = self._cov_all
else:
cov = self._cov_dict[c]
prior = self._prior_dict[c]
current_prob = self._posterior_gaussian(x, prior, mean, cov)
prob.append(current_prob)
label_list.append(c)
# Get index of class having maximum probability for each x
pred_val = np.argmax(prob, axis=0)
label_list = np.array(label_list)
pred_label = label_list[pred_val]
return pred_label
def evaluate(self, x: np.ndarray, label: np.ndarray, **kwargs) -> tuple:
assert x.shape[0] == label.shape[0]
pred_label = self.predict(x, **kwargs)
# Calculate 0-1 loss
loss = np.count_nonzero(pred_label != label)
# Use loss to calculate precision
precision = 1 - loss / x.shape[0]
return precision, loss
@staticmethod
def _param_gaussian(x: np.ndarray) -> tuple:
"""Estimate mean and variance."""
mean = x.mean(axis=0)
diff = x - mean
cov = np.matmul(diff.T, diff) / x.shape[0]
return mean, cov
@staticmethod
def _posterior_gaussian(x: np.ndarray, prior: float,
mean: np.ndarray, cov: np.ndarray) -> np.ndarray:
"""Calculate posterior probability P(wi | x)."""
# Calculate likelihood probability:
# P(xj | wi) ~ 1 / sqrt(det(cov))
# * exp(-0.5 * (xj - mean)^T * cov^(-1) * (xi - mean))
diff = x - mean
coef = np.power(np.linalg.det(cov), -0.5)
inv = np.linalg.pinv(cov)
# Get exponent for xj (0 < j < n)
exponents = np.apply_along_axis(
lambda row: float(np.matmul(row, inv).dot(row)), 1, diff)
likelihood = coef * np.exp(-0.5 * exponents)
# Posterior = prior * likelihood / evidence (omitted)
posterior = prior * likelihood
return posterior
| [
"numpy.unique",
"numpy.linalg.pinv",
"numpy.argmax",
"numpy.linalg.det",
"numpy.count_nonzero",
"numpy.array",
"numpy.exp",
"numpy.matmul"
] | [((897, 913), 'numpy.unique', 'np.unique', (['label'], {}), '(label)\n', (906, 913), True, 'import numpy as np\n'), ((2366, 2389), 'numpy.argmax', 'np.argmax', (['prob'], {'axis': '(0)'}), '(prob, axis=0)\n', (2375, 2389), True, 'import numpy as np\n'), ((2411, 2431), 'numpy.array', 'np.array', (['label_list'], {}), '(label_list)\n', (2419, 2431), True, 'import numpy as np\n'), ((2713, 2750), 'numpy.count_nonzero', 'np.count_nonzero', (['(pred_label != label)'], {}), '(pred_label != label)\n', (2729, 2750), True, 'import numpy as np\n'), ((3555, 3574), 'numpy.linalg.pinv', 'np.linalg.pinv', (['cov'], {}), '(cov)\n', (3569, 3574), True, 'import numpy as np\n'), ((3044, 3067), 'numpy.matmul', 'np.matmul', (['diff.T', 'diff'], {}), '(diff.T, diff)\n', (3053, 3067), True, 'import numpy as np\n'), ((3515, 3533), 'numpy.linalg.det', 'np.linalg.det', (['cov'], {}), '(cov)\n', (3528, 3533), True, 'import numpy as np\n'), ((3756, 3780), 'numpy.exp', 'np.exp', (['(-0.5 * exponents)'], {}), '(-0.5 * exponents)\n', (3762, 3780), True, 'import numpy as np\n'), ((3688, 3707), 'numpy.matmul', 'np.matmul', (['row', 'inv'], {}), '(row, inv)\n', (3697, 3707), True, 'import numpy as np\n')] |
'''
Created on june 12, 2018
author: Edmond
'''
from __future__ import print_function, division, absolute_import, unicode_literals
import os
import shutil
import numpy as np
from collections import OrderedDict
import logging
from time import time
import tensorflow as tf
import util
from layers import (weight_variable, weight_variable_devonc, bias_variable,
conv2d, deconv2d, max_pool, crop_and_concat, pixel_wise_softmax_2,
crop_to_shape_v2,cross_entropy)
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
class SegNet(object):
def __init__(self,cfg,learning_rate = 0.0017, channels=3, n_class=2,decay_step=2000,decay = 0.96, cost="cross_entropy",
cost_kwargs={}, **kwargs):
print('begin initialize!')
self.cfg=cfg
self.n_class = n_class
self.in_shape =(592,800)
self.summaries = kwargs.get("summaries", True)
self.x = tf.placeholder("float", shape=[None, 592, 800, channels])
self.y = tf.placeholder("float", shape=[None, 592, 800, 1])
self.Dropout_Rate = tf.placeholder(tf.float32) # dropout (keep probability)
self.IsTraining = tf.placeholder(tf.bool)
self.logits = self._creat_model(self.x,channels,n_class)
self.loss = -tf.reduce_mean(self.y*tf.log(tf.clip_by_value(self.logits,1e-10,1.0)))+\
-tf.reduce_mean((1-self.y)*tf.log(tf.clip_by_value(1-self.logits,1e-10,1.0)))
self.predicter =tf.sign(self.logits-0.5)
self.correct_pred = tf.equal(tf.cast(self.predicter, tf.bool), tf.cast(self.y, tf.bool))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_pred, tf.float32))
self.cross_entropy = self.loss
self.decay_step = decay_step
self.decay = decay
self.verification_batch_size =4
with tf.name_scope('steps'):
self.train_step = tf.Variable(0, name = 'global_step', trainable= False)
with tf.name_scope('lr'):
self.lr = tf.train.exponential_decay(learning_rate, self.train_step, self.decay_step, self.decay, staircase= True, name= 'learning_rate')
with tf.name_scope('rmsprop'):
self.rmsprop = tf.train.RMSPropOptimizer(learning_rate= self.lr)
with tf.name_scope('minimizer'):
self.update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(self.update_ops):
self.train_rmsprop = self.rmsprop.minimize(self.loss, self.train_step)
self.init = tf.global_variables_initializer()
tf.summary.scalar('loss', self.loss)
tf.summary.scalar('cross_entropy', self.cross_entropy)
tf.summary.scalar('accuracy', self.accuracy)
tf.summary.scalar('learning_rate', self.lr)
self.summary_op = tf.summary.merge_all()
print('end initialize!')
def _conv(self, inputs, filters, kernel_size = 1, strides = 1, pad = 'VALID', name = 'conv'):
""" Spatial Convolution (CONV2D)
Args:
inputs : Input Tensor (Data Type : NHWC)
filters : Number of filters (channels)
kernel_size : Size of kernel
strides : Stride
pad : Padding Type (VALID/SAME) # DO NOT USE 'SAME' NETWORK BUILT FOR VALID
name : Name of the block
Returns:
conv : Output Tensor (Convolved Input)
"""
with tf.name_scope(name):
# Kernel for convolution, Xavier Initialisation
kernel = tf.Variable(tf.contrib.layers.xavier_initializer(uniform=False)([kernel_size,kernel_size, inputs.get_shape().as_list()[3], filters]), name= 'weights')
conv = tf.nn.conv2d(inputs, kernel, [1,strides,strides,1], padding=pad, data_format='NHWC')
return conv
def _conv_bn_relu(self, inputs, filters, kernel_size = 1, strides = 1, pad = 'VALID', name = 'conv_bn_relu'):
""" Spatial Convolution (CONV2D) + BatchNormalization + ReLU Activation
Args:
inputs : Input Tensor (Data Type : NHWC)
filters : Number of filters (channels)
kernel_size : Size of kernel
strides : Stride
pad : Padding Type (VALID/SAME) # DO NOT USE 'SAME' NETWORK BUILT FOR VALID
name : Name of the block
Returns:
norm : Output Tensor
"""
with tf.name_scope(name):
kernel = tf.Variable(tf.contrib.layers.xavier_initializer(uniform=False)([kernel_size,kernel_size, inputs.get_shape().as_list()[3], filters]), name= 'weights')
conv = tf.nn.conv2d(inputs, kernel, [1,strides,strides,1], padding='VALID', data_format='NHWC')
norm = tf.contrib.layers.batch_norm(conv, 0.9, epsilon=1e-5, activation_fn = tf.nn.relu, is_training = self.IsTraining)
#if self.w_summary:
# with tf.device('/cpu:0'):
# tf.summary.histogram('weights_summary', kernel, collections = ['weight'])
return norm
def _conv_block(self, inputs, numOut, name = 'conv_block'):
""" Convolutional Block
Args:
inputs : Input Tensor
numOut : Desired output number of channel
name : Name of the block
Returns:
conv_3 : Output Tensor
"""
with tf.name_scope(name):
with tf.name_scope('norm_1'):
norm_1 = tf.contrib.layers.batch_norm(inputs, 0.9, epsilon=1e-5, activation_fn = tf.nn.relu, is_training = self.IsTraining)
conv_1 = self._conv(norm_1, int(numOut/2), kernel_size=1, strides=1, pad = 'VALID', name= 'conv')
with tf.name_scope('norm_2'):
norm_2 = tf.contrib.layers.batch_norm(conv_1, 0.9, epsilon=1e-5, activation_fn = tf.nn.relu, is_training = self.IsTraining)
pad = tf.pad(norm_2, np.array([[0,0],[1,1],[1,1],[0,0]]), name= 'pad')
conv_2 = self._conv(pad, int(numOut/2), kernel_size=3, strides=1, pad = 'VALID', name= 'conv')
with tf.name_scope('norm_3'):
norm_3 = tf.contrib.layers.batch_norm(conv_2, 0.9, epsilon=1e-5, activation_fn = tf.nn.relu, is_training = self.IsTraining)
conv_3 = self._conv(norm_3, int(numOut), kernel_size=1, strides=1, pad = 'VALID', name= 'conv')
return conv_3
def _skip_layer(self, inputs, numOut, name = 'skip_layer'):
""" Skip Layer
Args:
inputs : Input Tensor
numOut : Desired output number of channel
name : Name of the bloc
Returns:
Tensor of shape (None, inputs.height, inputs.width, numOut)
"""
with tf.name_scope(name):
if inputs.get_shape().as_list()[3] == numOut:
return inputs
else:
conv = self._conv(inputs, numOut, kernel_size=1, strides = 1, name = 'conv')
return conv
def _residual(self, inputs, numOut, modif = False, name = 'residual_block'):
""" Residual Unit
Args:
inputs : Input Tensor
numOut : Number of Output Features (channels)
name : Name of the block
"""
with tf.name_scope(name):
convb = self._conv_block(inputs, numOut)
skipl = self._skip_layer(inputs, numOut)
if modif:
return tf.nn.relu(tf.add_n([convb, skipl], name = 'res_block'))
else:
return tf.add_n([convb, skipl], name = 'res_block')
def _bn_relu(self, inputs):
norm = tf.contrib.layers.batch_norm(inputs, 0.9, epsilon=1e-5, activation_fn = tf.nn.relu, is_training = self.IsTraining)
return norm
def _pool_layer(self, inputs, numOut, name = 'pool_layer'):
with tf.name_scope(name):
bnr_1 = self._bn_relu(inputs)
pool = tf.contrib.layers.max_pool2d(bnr_1,[2,2],[2,2],padding='VALID')
pad_1 = tf.pad(pool, np.array([[0,0],[1,1],[1,1],[0,0]]))
conv_1 = self._conv(pad_1, numOut, kernel_size=3, strides=1, name='conv')
bnr_2 = self._bn_relu(conv_1)
pad_2 = tf.pad(bnr_2, np.array([[0,0],[1,1],[1,1],[0,0]]))
conv_2 = self._conv(pad_2, numOut, kernel_size=3, strides=1, name='conv')
upsample = tf.image.resize_nearest_neighbor(conv_2, tf.shape(conv_2)[1:3]*2, name = 'upsampling')
return upsample
def _attention_iter(self, inputs, lrnSize, itersize, name = 'attention_iter'):
with tf.name_scope(name):
numIn = inputs.get_shape().as_list()[3]
padding = np.floor(lrnSize/2)
pad = tf.pad(inputs, np.array([[0,0],[1,1],[1,1],[0,0]]))
U = self._conv(pad, filters=1, kernel_size=3, strides=1)
pad_2 = tf.pad(U, np.array([[0,0],[padding,padding],[padding,padding],[0,0]]))
sharedK = tf.Variable(tf.contrib.layers.xavier_initializer(uniform=False)([lrnSize,lrnSize, 1, 1]), name= 'shared_weights')
Q = []
C = []
for i in range(itersize):
if i ==0:
conv = tf.nn.conv2d(pad_2, sharedK, [1,1,1,1], padding='VALID', data_format='NHWC')
else:
conv = tf.nn.conv2d(Q[i-1], sharedK, [1,1,1,1], padding='SAME', data_format='NHWC')
C.append(conv)
Q_tmp = tf.nn.sigmoid(tf.add_n([C[i], U]))
Q.append(Q_tmp)
stacks = []
for i in range(numIn):
stacks.append(Q[-1])
pfeat = tf.multiply(inputs,tf.concat(stacks, axis = 3) )
return pfeat
def _residual_pool(self, inputs, numOut, name = 'residual_pool'):
with tf.name_scope(name):
return tf.add_n([self._conv_block(inputs, numOut), self._skip_layer(inputs, numOut), self._pool_layer(inputs, numOut)])
def _lin(self, inputs, numOut, name = 'lin'):
l = self._conv(inputs, filters = numOut, kernel_size = 1, strides = 1)
return self._bn_relu(l)
def _rep_residual(self, inputs, numOut, nRep, name = 'rep_residual'):
with tf.name_scope(name):
out = [None]*nRep
for i in range(nRep):
if i == 0:
tmpout = self._residual(inputs,numOut)
else:
tmpout = self._residual_pool(out[i-1],numOut)
out[i] = tmpout
return out[nRep-1]
def up_sample(self,inputs,numOut,pool_size = 2,name = 'upsample'):
with tf.name_scope('upsample'):
kernel = tf.Variable(tf.contrib.layers.xavier_initializer(uniform=False)([pool_size,pool_size, numOut, inputs.get_shape().as_list()[3]]), name= 'weights')
#wd = weight_variable_devonc([pool_size, pool_size, numOut// 2, numOut], stddev)
#bd = bias_variable([features // 2])
h_deconv = tf.nn.relu(deconv2d(inputs, kernel, pool_size))
return h_deconv
def _hg_mcam(self, inputs, n, numOut, nModual, name = 'mcam_hg'):
with tf.name_scope(name):
#------------Upper Branch
pool = tf.contrib.layers.max_pool2d(inputs,[2,2],[2,2],padding='VALID')
up = []
low = []
for i in range(nModual):
if i == 0:
if n>1:
tmpup = self._rep_residual(inputs, numOut, n -1)
else:
tmpup = self._residual(inputs, numOut)
tmplow = self._residual(pool, numOut*2)
else:
if n>1:
tmpup = self._rep_residual(up[i-1], numOut, n-1)
else:
tmpup = self._residual_pool(up[i-1], numOut)
tmplow = self._residual(low[i-1], numOut*2)
up.append(tmpup)
low.append(tmplow)
#up[i] = tmpup
#low[i] = tmplow
#----------------Lower Branch
if n>1:
low2 = self._hg_mcam(low[-1], n-1, numOut*2, nModual)
else:
low2 = self._residual(low[-1], numOut*2)
low3 = self._residual(low2, numOut*2)
#up_2 = tf.image.resize_nearest_neighbor(low3, tf.shape(low3)[1:3]*2, name = 'upsampling')
up_2 = self.up_sample(low3,numOut)
return tf.add_n([up[-1], up_2], name = 'out_hg')
def _creat_model(self,x,channels, n_class, layers=3, features_root=16,
filter_size=3, pool_size=2,summaries=True):
# Placeholder for the input image
#nx = tf.shape(x)[1]
#ny = tf.shape(x)[2]
#x_image = tf.reshape(x, tf.stack([-1, nx, ny, channels]))
in_node = x
#batch_size = tf.shape(x_image)[0]
with tf.name_scope('model'):
with tf.name_scope('preprocessing'):
pad1 = tf.pad(in_node, [[0,0],[1,1],[1,1],[0,0]], name='pad_1')
conv1 = self._conv_bn_relu(pad1, filters= 8, kernel_size = 3, strides = 1, name = 'conv_channel_to_64')
in_node = self._residual_pool(conv1, numOut = 16, name = 'r1')
#in_node = self._skip_layer(in_node,16,name = "conv_channel_64_to_128")
#pool = tf.contrib.layers.max_pool2d(in_node,[2,2],[2,2],padding='VALID')
with tf.name_scope('unet'):
in_node=self._hg_mcam(in_node,3,16,2)
with tf.name_scope('attention'):
drop = tf.layers.dropout(in_node, rate=self.Dropout_Rate, training = self.IsTraining)
output = self._lin(in_node,1)
#att = self._attention_iter(ll,3,3)
#upsample = tf.image.resize_nearest_neighbor(att, tf.shape(att)[1:3]*2, name = 'upsampling')
# with tf.name_scope('output'):
# out = self._lin(att,2)
return tf.nn.sigmoid(output)
def _get_cost(self, logits, cost_name, cost_kwargs):
"""
Constructs the cost function, either cross_entropy, weighted cross_entropy or dice_coefficient.
Optional arguments are:
class_weights: weights for the different classes in case of multi-class imbalance
regularizer: power of the L2 regularizers added to the loss function
"""
flat_logits = tf.reshape(logits, [-1, self.n_class])
flat_labels = tf.reshape(self.y, [-1, self.n_class])
if cost_name == "cross_entropy":
class_weights = cost_kwargs.pop("class_weights", None)
if class_weights is not None:
class_weights = tf.constant(np.array(class_weights, dtype=np.float32))
weight_map = tf.multiply(flat_labels, class_weights)
weight_map = tf.reduce_sum(weight_map, axis=1)
loss_map = tf.nn.softmax_cross_entropy_with_logits_v2(logits=flat_logits,
labels=flat_labels)
weighted_loss = tf.multiply(loss_map, weight_map)
loss = tf.reduce_mean(weighted_loss)
else:
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=flat_logits,
labels=flat_labels))
elif cost_name == "dice_coefficient":
eps = 1e-5
prediction = pixel_wise_softmax_2(logits)
intersection = tf.reduce_sum(prediction * self.y)
union = eps + tf.reduce_sum(prediction) + tf.reduce_sum(self.y)
loss = -(2 * intersection / (union))
else:
raise ValueError("Unknown cost function: " % cost_name)
return loss
def predict(self, model_path, x_test):
"""
Uses the model to create a prediction for the given data
:param model_path: path to the model checkpoint to restore
:param x_test: Data to predict on. Shape [n, nx, ny, channels]
:returns prediction: The unet prediction Shape [n, px, py, labels] (px=nx-self.offset/2)
"""
init = tf.global_variables_initializer()
with tf.Session() as sess:
# Initialize variables
sess.run(init)
# Restore model weights from previously saved model
self.restore(sess, model_path)
y_dummy = np.empty((x_test.shape[0], x_test.shape[1], x_test.shape[2], self.n_class))
begin = time()
for i in range(1000):
prediction = sess.run(self.predicter, feed_dict={self.x:crop_to_shape_v2(x_test,self.in_shape),
self.Dropout_Rate: 0.,self.IsTraining:False})
print('time comsumed:',(time()-begin)/1000.)
return prediction
def save(self, sess, model_path):
"""
Saves the current session to a checkpoint
:param sess: current session
:param model_path: path to file system location
"""
saver = tf.train.Saver()
save_path = saver.save(sess, model_path)
return save_path
def restore(self, sess, model_path):
"""
Restores a session from a checkpoint
:param sess: current session instance
:param model_path: path to file system checkpoint location
"""
saver = tf.train.Saver()
saver.restore(sess, model_path)
logging.info("Model restored from file: %s" % model_path)
def _initialize(self, output_path, prediction_path,restore =False):
abs_prediction_path = os.path.abspath(prediction_path)
output_path = os.path.abspath(output_path)
if not restore:
logging.info("Removing '{:}'".format(abs_prediction_path))
shutil.rmtree(abs_prediction_path, ignore_errors=True)
logging.info("Removing '{:}'".format(output_path))
shutil.rmtree(output_path, ignore_errors=True)
else:
self.restore(sess,output_path)
if not os.path.exists(abs_prediction_path):
logging.info("Allocating '{:}'".format(abs_prediction_path))
os.makedirs(abs_prediction_path)
if not os.path.exists(output_path):
logging.info("Allocating '{:}'".format(output_path))
os.makedirs(output_path)
def train(self, data_provider, output_path, training_iters=10, epochs=100, dropout=0.25,
display_step=1,restore=False, write_graph=False, prediction_path='prediction'):
"""
Lauches the training process
:param data_provider: callable returning training and verification data
:param output_path: path where to store checkpoints
:param training_iters: number of training mini batch iteration
:param epochs: number of epochs
:param dropout: dropout probability
:param display_step: number of steps till outputting stats
:param restore: Flag if previous model should be restored
:param write_graph: Flag if the computation graph should be written as protobuf file to the output path
:param prediction_path: path where to save predictions on each epoch
"""
print('begin training')
save_path = os.path.join(output_path, "model.ckpt")
if epochs == 0:
return save_path
self.prediction_path = prediction_path
self.model_path = output_path
self._initialize(output_path,prediction_path)
#gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.75)
#config=tf.ConfigProto(gpu_options=gpu_options)
with tf.Session() as sess:
if write_graph:
tf.train.write_graph(sess.graph_def, output_path, "graph.pb", False)
sess.run(self.init)
if restore:
ckpt = tf.train.get_checkpoint_state(output_path)
if ckpt and ckpt.model_checkpoint_path:
self.restore(sess, ckpt.model_checkpoint_path)
#############################
self.verification_batch_size=2
self.batch_size=1
test_x, test_y = data_provider(self.verification_batch_size)
#print('shape')
#print(test_x.shape,test_y.shape)
pred_shape = self.store_prediction(sess, test_x, test_y, "_init")
summary_writer = tf.summary.FileWriter(output_path, graph=sess.graph)
logging.info("Start optimization")
avg_gradients = None
seld.save_dict= {'loss':[],'acc':[]}
for epoch in range(epochs):
test_x, test_y = data_provider(self.verification_batch_size)
total_loss = 0
for step in range((epoch * training_iters), ((epoch + 1) * training_iters)):
batch_x, batch_y = data_provider(self.batch_size)
# Run optimization op (backprop)
_, loss, lr= sess.run(
(self.train_rmsprop, self.loss, self.lr),
feed_dict={self.x: crop_to_shape_v2(batch_x,self.in_shape),
self.y: crop_to_shape_v2(batch_y,self.in_shape),
self.IsTraining:True,
self.Dropout_Rate: dropout})
if step % display_step == 0:
self.output_minibatch_stats(sess, summary_writer, step, batch_x,batch_y)
total_loss += loss
np.savez('log_data.npz',**self.save_dict)
self.output_epoch_stats(epoch, total_loss, training_iters, lr)
self.store_prediction(sess, test_x, test_y, "epoch_%s" % epoch)
save_path = self.save(sess, save_path)
logging.info("Optimization Finished!")
return save_path
def store_prediction(self, sess, batch_x, batch_y, name):
y = crop_to_shape_v2(batch_y,self.in_shape)
#print(y.shape)
prediction = sess.run(self.predicter, feed_dict={self.x: crop_to_shape_v2(batch_x,self.in_shape),
self.y: y,
self.Dropout_Rate: 0.,
self.IsTraining:False})
loss = sess.run(self.loss, feed_dict={self.x: crop_to_shape_v2(batch_x,self.in_shape),
self.y:y ,
self.IsTraining:False,
self.Dropout_Rate: 0})
logging.info("Verification error= {:.1f}%, loss= {:.4f}".format(error_rate(prediction,crop_to_shape_v2(batch_y,self.in_shape)),
loss))
img = util.combine_img_prediction(batch_x, batch_y, prediction)
util.save_image(img, "%s/%s.jpg" % (self.prediction_path, name))
def output_epoch_stats(self, epoch, total_loss, training_iters, lr):
logging.info(
"Epoch {:}, Average loss: {:.4f}, learning rate: {:.4f}".format(epoch, (total_loss / training_iters), lr))
def output_minibatch_stats(self, sess, summary_writer, step, batch_x, batch_y):
# Calculate batch loss and accuracy
summary_str, loss, acc, predictions = sess.run([self.summary_op,
self.loss,
self.accuracy,
self.predicter],
feed_dict={self.x:crop_to_shape_v2(batch_x,self.in_shape),
self.y: crop_to_shape_v2(batch_y,self.in_shape),
self.IsTraining: False,
self.Dropout_Rate:1.})
self.save_dict['loss'].append(loss)
self.save_dict['acc'].append(acc)
summary_writer.add_summary(summary_str, step)
summary_writer.flush()
logging.info(
"Iter {:}, Minibatch Loss= {:.4f}, Training Accuracy= {:.4f}".format(step,loss,acc))
def error_rate(predictions, labels):
"""
Return the error rate based on dense predictions and 1-hot labels.
"""
#print(np.unique(predictions),np.unique(labels))
return 100.0 - (
100.0 *
np.sum(np.sign(predictions-0.5) == np.sign(labels-0.5)) /
(predictions.shape[0] * predictions.shape[1] * predictions.shape[2]))
def get_image_summary(img, idx=0):
"""
Make an image summary for 4d tensor image with index idx
"""
V = tf.slice(img, (0, 0, 0, idx), (1, -1, -1, 1))
V -= tf.reduce_min(V)
V /= tf.reduce_max(V)
V *= 255
img_w = tf.shape(img)[1]
img_h = tf.shape(img)[2]
V = tf.reshape(V, tf.stack((img_w, img_h, 1)))
V = tf.transpose(V, (2, 0, 1))
V = tf.reshape(V, tf.stack((-1, img_w, img_h, 1)))
return V
| [
"tensorflow.shape",
"tensorflow.pad",
"tensorflow.transpose",
"tensorflow.reduce_sum",
"tensorflow.multiply",
"numpy.array",
"tensorflow.control_dependencies",
"tensorflow.train.write_graph",
"tensorflow.reduce_mean",
"tensorflow.cast",
"logging.info",
"tensorflow.slice",
"tensorflow.reduce_... | [((518, 591), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s %(message)s"""'}), "(level=logging.INFO, format='%(asctime)s %(message)s')\n", (537, 591), False, 'import logging\n'), ((21482, 21527), 'tensorflow.slice', 'tf.slice', (['img', '(0, 0, 0, idx)', '(1, -1, -1, 1)'], {}), '(img, (0, 0, 0, idx), (1, -1, -1, 1))\n', (21490, 21527), True, 'import tensorflow as tf\n'), ((21537, 21553), 'tensorflow.reduce_min', 'tf.reduce_min', (['V'], {}), '(V)\n', (21550, 21553), True, 'import tensorflow as tf\n'), ((21563, 21579), 'tensorflow.reduce_max', 'tf.reduce_max', (['V'], {}), '(V)\n', (21576, 21579), True, 'import tensorflow as tf\n'), ((21711, 21737), 'tensorflow.transpose', 'tf.transpose', (['V', '(2, 0, 1)'], {}), '(V, (2, 0, 1))\n', (21723, 21737), True, 'import tensorflow as tf\n'), ((926, 983), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""'], {'shape': '[None, 592, 800, channels]'}), "('float', shape=[None, 592, 800, channels])\n", (940, 983), True, 'import tensorflow as tf\n'), ((995, 1045), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""'], {'shape': '[None, 592, 800, 1]'}), "('float', shape=[None, 592, 800, 1])\n", (1009, 1045), True, 'import tensorflow as tf\n'), ((1068, 1094), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (1082, 1094), True, 'import tensorflow as tf\n'), ((1145, 1168), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {}), '(tf.bool)\n', (1159, 1168), True, 'import tensorflow as tf\n'), ((1417, 1443), 'tensorflow.sign', 'tf.sign', (['(self.logits - 0.5)'], {}), '(self.logits - 0.5)\n', (1424, 1443), True, 'import tensorflow as tf\n'), ((2340, 2373), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2371, 2373), True, 'import tensorflow as tf\n'), ((2376, 2412), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'self.loss'], {}), "('loss', self.loss)\n", (2393, 2412), True, 'import tensorflow as tf\n'), ((2415, 2469), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""cross_entropy"""', 'self.cross_entropy'], {}), "('cross_entropy', self.cross_entropy)\n", (2432, 2469), True, 'import tensorflow as tf\n'), ((2472, 2516), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""accuracy"""', 'self.accuracy'], {}), "('accuracy', self.accuracy)\n", (2489, 2516), True, 'import tensorflow as tf\n'), ((2519, 2562), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""learning_rate"""', 'self.lr'], {}), "('learning_rate', self.lr)\n", (2536, 2562), True, 'import tensorflow as tf\n'), ((2583, 2605), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (2603, 2605), True, 'import tensorflow as tf\n'), ((6608, 6724), 'tensorflow.contrib.layers.batch_norm', 'tf.contrib.layers.batch_norm', (['inputs', '(0.9)'], {'epsilon': '(1e-05)', 'activation_fn': 'tf.nn.relu', 'is_training': 'self.IsTraining'}), '(inputs, 0.9, epsilon=1e-05, activation_fn=tf.\n nn.relu, is_training=self.IsTraining)\n', (6636, 6724), True, 'import tensorflow as tf\n'), ((11806, 11827), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['output'], {}), '(output)\n', (11819, 11827), True, 'import tensorflow as tf\n'), ((12217, 12255), 'tensorflow.reshape', 'tf.reshape', (['logits', '[-1, self.n_class]'], {}), '(logits, [-1, self.n_class])\n', (12227, 12255), True, 'import tensorflow as tf\n'), ((12272, 12310), 'tensorflow.reshape', 'tf.reshape', (['self.y', '[-1, self.n_class]'], {}), '(self.y, [-1, self.n_class])\n', (12282, 12310), True, 'import tensorflow as tf\n'), ((13796, 13829), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (13827, 13829), True, 'import tensorflow as tf\n'), ((14569, 14585), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (14583, 14585), True, 'import tensorflow as tf\n'), ((14868, 14884), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (14882, 14884), True, 'import tensorflow as tf\n'), ((14921, 14978), 'logging.info', 'logging.info', (["('Model restored from file: %s' % model_path)"], {}), "('Model restored from file: %s' % model_path)\n", (14933, 14978), False, 'import logging\n'), ((15072, 15104), 'os.path.abspath', 'os.path.abspath', (['prediction_path'], {}), '(prediction_path)\n', (15087, 15104), False, 'import os\n'), ((15121, 15149), 'os.path.abspath', 'os.path.abspath', (['output_path'], {}), '(output_path)\n', (15136, 15149), False, 'import os\n'), ((16589, 16628), 'os.path.join', 'os.path.join', (['output_path', '"""model.ckpt"""'], {}), "(output_path, 'model.ckpt')\n", (16601, 16628), False, 'import os\n'), ((18828, 18868), 'layers.crop_to_shape_v2', 'crop_to_shape_v2', (['batch_y', 'self.in_shape'], {}), '(batch_y, self.in_shape)\n', (18844, 18868), False, 'from layers import weight_variable, weight_variable_devonc, bias_variable, conv2d, deconv2d, max_pool, crop_and_concat, pixel_wise_softmax_2, crop_to_shape_v2, cross_entropy\n'), ((19667, 19724), 'util.combine_img_prediction', 'util.combine_img_prediction', (['batch_x', 'batch_y', 'prediction'], {}), '(batch_x, batch_y, prediction)\n', (19694, 19724), False, 'import util\n'), ((19727, 19791), 'util.save_image', 'util.save_image', (['img', "('%s/%s.jpg' % (self.prediction_path, name))"], {}), "(img, '%s/%s.jpg' % (self.prediction_path, name))\n", (19742, 19791), False, 'import util\n'), ((21606, 21619), 'tensorflow.shape', 'tf.shape', (['img'], {}), '(img)\n', (21614, 21619), True, 'import tensorflow as tf\n'), ((21635, 21648), 'tensorflow.shape', 'tf.shape', (['img'], {}), '(img)\n', (21643, 21648), True, 'import tensorflow as tf\n'), ((21674, 21701), 'tensorflow.stack', 'tf.stack', (['(img_w, img_h, 1)'], {}), '((img_w, img_h, 1))\n', (21682, 21701), True, 'import tensorflow as tf\n'), ((21760, 21791), 'tensorflow.stack', 'tf.stack', (['(-1, img_w, img_h, 1)'], {}), '((-1, img_w, img_h, 1))\n', (21768, 21791), True, 'import tensorflow as tf\n'), ((1473, 1505), 'tensorflow.cast', 'tf.cast', (['self.predicter', 'tf.bool'], {}), '(self.predicter, tf.bool)\n', (1480, 1505), True, 'import tensorflow as tf\n'), ((1507, 1531), 'tensorflow.cast', 'tf.cast', (['self.y', 'tf.bool'], {}), '(self.y, tf.bool)\n', (1514, 1531), True, 'import tensorflow as tf\n'), ((1566, 1604), 'tensorflow.cast', 'tf.cast', (['self.correct_pred', 'tf.float32'], {}), '(self.correct_pred, tf.float32)\n', (1573, 1604), True, 'import tensorflow as tf\n'), ((1732, 1754), 'tensorflow.name_scope', 'tf.name_scope', (['"""steps"""'], {}), "('steps')\n", (1745, 1754), True, 'import tensorflow as tf\n'), ((1777, 1828), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'name': '"""global_step"""', 'trainable': '(False)'}), "(0, name='global_step', trainable=False)\n", (1788, 1828), True, 'import tensorflow as tf\n'), ((1839, 1858), 'tensorflow.name_scope', 'tf.name_scope', (['"""lr"""'], {}), "('lr')\n", (1852, 1858), True, 'import tensorflow as tf\n'), ((1873, 2002), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['learning_rate', 'self.train_step', 'self.decay_step', 'self.decay'], {'staircase': '(True)', 'name': '"""learning_rate"""'}), "(learning_rate, self.train_step, self.decay_step,\n self.decay, staircase=True, name='learning_rate')\n", (1899, 2002), True, 'import tensorflow as tf\n'), ((2008, 2032), 'tensorflow.name_scope', 'tf.name_scope', (['"""rmsprop"""'], {}), "('rmsprop')\n", (2021, 2032), True, 'import tensorflow as tf\n'), ((2052, 2100), 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', ([], {'learning_rate': 'self.lr'}), '(learning_rate=self.lr)\n', (2077, 2100), True, 'import tensorflow as tf\n'), ((2109, 2135), 'tensorflow.name_scope', 'tf.name_scope', (['"""minimizer"""'], {}), "('minimizer')\n", (2122, 2135), True, 'import tensorflow as tf\n'), ((2158, 2200), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.UPDATE_OPS'], {}), '(tf.GraphKeys.UPDATE_OPS)\n', (2175, 2200), True, 'import tensorflow as tf\n'), ((3093, 3112), 'tensorflow.name_scope', 'tf.name_scope', (['name'], {}), '(name)\n', (3106, 3112), True, 'import tensorflow as tf\n'), ((3338, 3429), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['inputs', 'kernel', '[1, strides, strides, 1]'], {'padding': 'pad', 'data_format': '"""NHWC"""'}), "(inputs, kernel, [1, strides, strides, 1], padding=pad,\n data_format='NHWC')\n", (3350, 3429), True, 'import tensorflow as tf\n'), ((3936, 3955), 'tensorflow.name_scope', 'tf.name_scope', (['name'], {}), '(name)\n', (3949, 3955), True, 'import tensorflow as tf\n'), ((4130, 4225), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['inputs', 'kernel', '[1, strides, strides, 1]'], {'padding': '"""VALID"""', 'data_format': '"""NHWC"""'}), "(inputs, kernel, [1, strides, strides, 1], padding='VALID',\n data_format='NHWC')\n", (4142, 4225), True, 'import tensorflow as tf\n'), ((4229, 4343), 'tensorflow.contrib.layers.batch_norm', 'tf.contrib.layers.batch_norm', (['conv', '(0.9)'], {'epsilon': '(1e-05)', 'activation_fn': 'tf.nn.relu', 'is_training': 'self.IsTraining'}), '(conv, 0.9, epsilon=1e-05, activation_fn=tf.nn.\n relu, is_training=self.IsTraining)\n', (4257, 4343), True, 'import tensorflow as tf\n'), ((4734, 4753), 'tensorflow.name_scope', 'tf.name_scope', (['name'], {}), '(name)\n', (4747, 4753), True, 'import tensorflow as tf\n'), ((5900, 5919), 'tensorflow.name_scope', 'tf.name_scope', (['name'], {}), '(name)\n', (5913, 5919), True, 'import tensorflow as tf\n'), ((6315, 6334), 'tensorflow.name_scope', 'tf.name_scope', (['name'], {}), '(name)\n', (6328, 6334), True, 'import tensorflow as tf\n'), ((6805, 6824), 'tensorflow.name_scope', 'tf.name_scope', (['name'], {}), '(name)\n', (6818, 6824), True, 'import tensorflow as tf\n'), ((6869, 6937), 'tensorflow.contrib.layers.max_pool2d', 'tf.contrib.layers.max_pool2d', (['bnr_1', '[2, 2]', '[2, 2]'], {'padding': '"""VALID"""'}), "(bnr_1, [2, 2], [2, 2], padding='VALID')\n", (6897, 6937), True, 'import tensorflow as tf\n'), ((7449, 7468), 'tensorflow.name_scope', 'tf.name_scope', (['name'], {}), '(name)\n', (7462, 7468), True, 'import tensorflow as tf\n'), ((7526, 7547), 'numpy.floor', 'np.floor', (['(lrnSize / 2)'], {}), '(lrnSize / 2)\n', (7534, 7547), True, 'import numpy as np\n'), ((8429, 8448), 'tensorflow.name_scope', 'tf.name_scope', (['name'], {}), '(name)\n', (8442, 8448), True, 'import tensorflow as tf\n'), ((8800, 8819), 'tensorflow.name_scope', 'tf.name_scope', (['name'], {}), '(name)\n', (8813, 8819), True, 'import tensorflow as tf\n'), ((9104, 9129), 'tensorflow.name_scope', 'tf.name_scope', (['"""upsample"""'], {}), "('upsample')\n", (9117, 9129), True, 'import tensorflow as tf\n'), ((9567, 9586), 'tensorflow.name_scope', 'tf.name_scope', (['name'], {}), '(name)\n', (9580, 9586), True, 'import tensorflow as tf\n'), ((9627, 9696), 'tensorflow.contrib.layers.max_pool2d', 'tf.contrib.layers.max_pool2d', (['inputs', '[2, 2]', '[2, 2]'], {'padding': '"""VALID"""'}), "(inputs, [2, 2], [2, 2], padding='VALID')\n", (9655, 9696), True, 'import tensorflow as tf\n'), ((10540, 10579), 'tensorflow.add_n', 'tf.add_n', (['[up[-1], up_2]'], {'name': '"""out_hg"""'}), "([up[-1], up_2], name='out_hg')\n", (10548, 10579), True, 'import tensorflow as tf\n'), ((10904, 10926), 'tensorflow.name_scope', 'tf.name_scope', (['"""model"""'], {}), "('model')\n", (10917, 10926), True, 'import tensorflow as tf\n'), ((13837, 13849), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (13847, 13849), True, 'import tensorflow as tf\n'), ((14025, 14100), 'numpy.empty', 'np.empty', (['(x_test.shape[0], x_test.shape[1], x_test.shape[2], self.n_class)'], {}), '((x_test.shape[0], x_test.shape[1], x_test.shape[2], self.n_class))\n', (14033, 14100), True, 'import numpy as np\n'), ((14112, 14118), 'time.time', 'time', ([], {}), '()\n', (14116, 14118), False, 'from time import time\n'), ((15233, 15287), 'shutil.rmtree', 'shutil.rmtree', (['abs_prediction_path'], {'ignore_errors': '(True)'}), '(abs_prediction_path, ignore_errors=True)\n', (15246, 15287), False, 'import shutil\n'), ((15345, 15391), 'shutil.rmtree', 'shutil.rmtree', (['output_path'], {'ignore_errors': '(True)'}), '(output_path, ignore_errors=True)\n', (15358, 15391), False, 'import shutil\n'), ((15443, 15478), 'os.path.exists', 'os.path.exists', (['abs_prediction_path'], {}), '(abs_prediction_path)\n', (15457, 15478), False, 'import os\n'), ((15547, 15579), 'os.makedirs', 'os.makedirs', (['abs_prediction_path'], {}), '(abs_prediction_path)\n', (15558, 15579), False, 'import os\n'), ((15590, 15617), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (15604, 15617), False, 'import os\n'), ((15678, 15702), 'os.makedirs', 'os.makedirs', (['output_path'], {}), '(output_path)\n', (15689, 15702), False, 'import os\n'), ((16914, 16926), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (16924, 16926), True, 'import tensorflow as tf\n'), ((17516, 17568), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['output_path'], {'graph': 'sess.graph'}), '(output_path, graph=sess.graph)\n', (17537, 17568), True, 'import tensorflow as tf\n'), ((17572, 17606), 'logging.info', 'logging.info', (['"""Start optimization"""'], {}), "('Start optimization')\n", (17584, 17606), False, 'import logging\n'), ((18702, 18740), 'logging.info', 'logging.info', (['"""Optimization Finished!"""'], {}), "('Optimization Finished!')\n", (18714, 18740), False, 'import logging\n'), ((2209, 2249), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['self.update_ops'], {}), '(self.update_ops)\n', (2232, 2249), True, 'import tensorflow as tf\n'), ((4763, 4786), 'tensorflow.name_scope', 'tf.name_scope', (['"""norm_1"""'], {}), "('norm_1')\n", (4776, 4786), True, 'import tensorflow as tf\n'), ((4801, 4917), 'tensorflow.contrib.layers.batch_norm', 'tf.contrib.layers.batch_norm', (['inputs', '(0.9)'], {'epsilon': '(1e-05)', 'activation_fn': 'tf.nn.relu', 'is_training': 'self.IsTraining'}), '(inputs, 0.9, epsilon=1e-05, activation_fn=tf.\n nn.relu, is_training=self.IsTraining)\n', (4829, 4917), True, 'import tensorflow as tf\n'), ((5026, 5049), 'tensorflow.name_scope', 'tf.name_scope', (['"""norm_2"""'], {}), "('norm_2')\n", (5039, 5049), True, 'import tensorflow as tf\n'), ((5064, 5180), 'tensorflow.contrib.layers.batch_norm', 'tf.contrib.layers.batch_norm', (['conv_1', '(0.9)'], {'epsilon': '(1e-05)', 'activation_fn': 'tf.nn.relu', 'is_training': 'self.IsTraining'}), '(conv_1, 0.9, epsilon=1e-05, activation_fn=tf.\n nn.relu, is_training=self.IsTraining)\n', (5092, 5180), True, 'import tensorflow as tf\n'), ((5361, 5384), 'tensorflow.name_scope', 'tf.name_scope', (['"""norm_3"""'], {}), "('norm_3')\n", (5374, 5384), True, 'import tensorflow as tf\n'), ((5399, 5515), 'tensorflow.contrib.layers.batch_norm', 'tf.contrib.layers.batch_norm', (['conv_2', '(0.9)'], {'epsilon': '(1e-05)', 'activation_fn': 'tf.nn.relu', 'is_training': 'self.IsTraining'}), '(conv_2, 0.9, epsilon=1e-05, activation_fn=tf.\n nn.relu, is_training=self.IsTraining)\n', (5427, 5515), True, 'import tensorflow as tf\n'), ((6525, 6567), 'tensorflow.add_n', 'tf.add_n', (['[convb, skipl]'], {'name': '"""res_block"""'}), "([convb, skipl], name='res_block')\n", (6533, 6567), True, 'import tensorflow as tf\n'), ((6957, 6999), 'numpy.array', 'np.array', (['[[0, 0], [1, 1], [1, 1], [0, 0]]'], {}), '([[0, 0], [1, 1], [1, 1], [0, 0]])\n', (6965, 6999), True, 'import numpy as np\n'), ((7129, 7171), 'numpy.array', 'np.array', (['[[0, 0], [1, 1], [1, 1], [0, 0]]'], {}), '([[0, 0], [1, 1], [1, 1], [0, 0]])\n', (7137, 7171), True, 'import numpy as np\n'), ((7570, 7612), 'numpy.array', 'np.array', (['[[0, 0], [1, 1], [1, 1], [0, 0]]'], {}), '([[0, 0], [1, 1], [1, 1], [0, 0]])\n', (7578, 7612), True, 'import numpy as np\n'), ((7688, 7754), 'numpy.array', 'np.array', (['[[0, 0], [padding, padding], [padding, padding], [0, 0]]'], {}), '([[0, 0], [padding, padding], [padding, padding], [0, 0]])\n', (7696, 7754), True, 'import numpy as np\n'), ((8310, 8335), 'tensorflow.concat', 'tf.concat', (['stacks'], {'axis': '(3)'}), '(stacks, axis=3)\n', (8319, 8335), True, 'import tensorflow as tf\n'), ((9438, 9473), 'layers.deconv2d', 'deconv2d', (['inputs', 'kernel', 'pool_size'], {}), '(inputs, kernel, pool_size)\n', (9446, 9473), False, 'from layers import weight_variable, weight_variable_devonc, bias_variable, conv2d, deconv2d, max_pool, crop_and_concat, pixel_wise_softmax_2, crop_to_shape_v2, cross_entropy\n'), ((10936, 10966), 'tensorflow.name_scope', 'tf.name_scope', (['"""preprocessing"""'], {}), "('preprocessing')\n", (10949, 10966), True, 'import tensorflow as tf\n'), ((10979, 11042), 'tensorflow.pad', 'tf.pad', (['in_node', '[[0, 0], [1, 1], [1, 1], [0, 0]]'], {'name': '"""pad_1"""'}), "(in_node, [[0, 0], [1, 1], [1, 1], [0, 0]], name='pad_1')\n", (10985, 11042), True, 'import tensorflow as tf\n'), ((11372, 11393), 'tensorflow.name_scope', 'tf.name_scope', (['"""unet"""'], {}), "('unet')\n", (11385, 11393), True, 'import tensorflow as tf\n'), ((11445, 11471), 'tensorflow.name_scope', 'tf.name_scope', (['"""attention"""'], {}), "('attention')\n", (11458, 11471), True, 'import tensorflow as tf\n'), ((11484, 11560), 'tensorflow.layers.dropout', 'tf.layers.dropout', (['in_node'], {'rate': 'self.Dropout_Rate', 'training': 'self.IsTraining'}), '(in_node, rate=self.Dropout_Rate, training=self.IsTraining)\n', (11501, 11560), True, 'import tensorflow as tf\n'), ((12531, 12570), 'tensorflow.multiply', 'tf.multiply', (['flat_labels', 'class_weights'], {}), '(flat_labels, class_weights)\n', (12542, 12570), True, 'import tensorflow as tf\n'), ((12588, 12621), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['weight_map'], {'axis': '(1)'}), '(weight_map, axis=1)\n', (12601, 12621), True, 'import tensorflow as tf\n'), ((12638, 12725), 'tensorflow.nn.softmax_cross_entropy_with_logits_v2', 'tf.nn.softmax_cross_entropy_with_logits_v2', ([], {'logits': 'flat_logits', 'labels': 'flat_labels'}), '(logits=flat_logits, labels=\n flat_labels)\n', (12680, 12725), True, 'import tensorflow as tf\n'), ((12811, 12844), 'tensorflow.multiply', 'tf.multiply', (['loss_map', 'weight_map'], {}), '(loss_map, weight_map)\n', (12822, 12844), True, 'import tensorflow as tf\n'), ((12857, 12886), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['weighted_loss'], {}), '(weighted_loss)\n', (12871, 12886), True, 'import tensorflow as tf\n'), ((13158, 13186), 'layers.pixel_wise_softmax_2', 'pixel_wise_softmax_2', (['logits'], {}), '(logits)\n', (13178, 13186), False, 'from layers import weight_variable, weight_variable_devonc, bias_variable, conv2d, deconv2d, max_pool, crop_and_concat, pixel_wise_softmax_2, crop_to_shape_v2, cross_entropy\n'), ((13205, 13239), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(prediction * self.y)'], {}), '(prediction * self.y)\n', (13218, 13239), True, 'import tensorflow as tf\n'), ((16959, 17027), 'tensorflow.train.write_graph', 'tf.train.write_graph', (['sess.graph_def', 'output_path', '"""graph.pb"""', '(False)'], {}), "(sess.graph_def, output_path, 'graph.pb', False)\n", (16979, 17027), True, 'import tensorflow as tf\n'), ((17079, 17121), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['output_path'], {}), '(output_path)\n', (17108, 17121), True, 'import tensorflow as tf\n'), ((18478, 18520), 'numpy.savez', 'np.savez', (['"""log_data.npz"""'], {}), "('log_data.npz', **self.save_dict)\n", (18486, 18520), True, 'import numpy as np\n'), ((3189, 3240), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {'uniform': '(False)'}), '(uniform=False)\n', (3225, 3240), True, 'import tensorflow as tf\n'), ((3981, 4032), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {'uniform': '(False)'}), '(uniform=False)\n', (4017, 4032), True, 'import tensorflow as tf\n'), ((5204, 5246), 'numpy.array', 'np.array', (['[[0, 0], [1, 1], [1, 1], [0, 0]]'], {}), '([[0, 0], [1, 1], [1, 1], [0, 0]])\n', (5212, 5246), True, 'import numpy as np\n'), ((6459, 6501), 'tensorflow.add_n', 'tf.add_n', (['[convb, skipl]'], {'name': '"""res_block"""'}), "([convb, skipl], name='res_block')\n", (6467, 6501), True, 'import tensorflow as tf\n'), ((7774, 7825), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {'uniform': '(False)'}), '(uniform=False)\n', (7810, 7825), True, 'import tensorflow as tf\n'), ((7951, 8030), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['pad_2', 'sharedK', '[1, 1, 1, 1]'], {'padding': '"""VALID"""', 'data_format': '"""NHWC"""'}), "(pad_2, sharedK, [1, 1, 1, 1], padding='VALID', data_format='NHWC')\n", (7963, 8030), True, 'import tensorflow as tf\n'), ((8050, 8136), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['Q[i - 1]', 'sharedK', '[1, 1, 1, 1]'], {'padding': '"""SAME"""', 'data_format': '"""NHWC"""'}), "(Q[i - 1], sharedK, [1, 1, 1, 1], padding='SAME', data_format=\n 'NHWC')\n", (8062, 8136), True, 'import tensorflow as tf\n'), ((8172, 8191), 'tensorflow.add_n', 'tf.add_n', (['[C[i], U]'], {}), '([C[i], U])\n', (8180, 8191), True, 'import tensorflow as tf\n'), ((9155, 9206), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {'uniform': '(False)'}), '(uniform=False)\n', (9191, 9206), True, 'import tensorflow as tf\n'), ((12470, 12511), 'numpy.array', 'np.array', (['class_weights'], {'dtype': 'np.float32'}), '(class_weights, dtype=np.float32)\n', (12478, 12511), True, 'import numpy as np\n'), ((12923, 13010), 'tensorflow.nn.softmax_cross_entropy_with_logits_v2', 'tf.nn.softmax_cross_entropy_with_logits_v2', ([], {'logits': 'flat_logits', 'labels': 'flat_labels'}), '(logits=flat_logits, labels=\n flat_labels)\n', (12965, 13010), True, 'import tensorflow as tf\n'), ((13285, 13306), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['self.y'], {}), '(self.y)\n', (13298, 13306), True, 'import tensorflow as tf\n'), ((18945, 18985), 'layers.crop_to_shape_v2', 'crop_to_shape_v2', (['batch_x', 'self.in_shape'], {}), '(batch_x, self.in_shape)\n', (18961, 18985), False, 'from layers import weight_variable, weight_variable_devonc, bias_variable, conv2d, deconv2d, max_pool, crop_and_concat, pixel_wise_softmax_2, crop_to_shape_v2, cross_entropy\n'), ((19233, 19273), 'layers.crop_to_shape_v2', 'crop_to_shape_v2', (['batch_x', 'self.in_shape'], {}), '(batch_x, self.in_shape)\n', (19249, 19273), False, 'from layers import weight_variable, weight_variable_devonc, bias_variable, conv2d, deconv2d, max_pool, crop_and_concat, pixel_wise_softmax_2, crop_to_shape_v2, cross_entropy\n'), ((19537, 19577), 'layers.crop_to_shape_v2', 'crop_to_shape_v2', (['batch_y', 'self.in_shape'], {}), '(batch_y, self.in_shape)\n', (19553, 19577), False, 'from layers import weight_variable, weight_variable_devonc, bias_variable, conv2d, deconv2d, max_pool, crop_and_concat, pixel_wise_softmax_2, crop_to_shape_v2, cross_entropy\n'), ((20453, 20493), 'layers.crop_to_shape_v2', 'crop_to_shape_v2', (['batch_x', 'self.in_shape'], {}), '(batch_x, self.in_shape)\n', (20469, 20493), False, 'from layers import weight_variable, weight_variable_devonc, bias_variable, conv2d, deconv2d, max_pool, crop_and_concat, pixel_wise_softmax_2, crop_to_shape_v2, cross_entropy\n'), ((20556, 20596), 'layers.crop_to_shape_v2', 'crop_to_shape_v2', (['batch_y', 'self.in_shape'], {}), '(batch_y, self.in_shape)\n', (20572, 20596), False, 'from layers import weight_variable, weight_variable_devonc, bias_variable, conv2d, deconv2d, max_pool, crop_and_concat, pixel_wise_softmax_2, crop_to_shape_v2, cross_entropy\n'), ((7298, 7314), 'tensorflow.shape', 'tf.shape', (['conv_2'], {}), '(conv_2)\n', (7306, 7314), True, 'import tensorflow as tf\n'), ((13257, 13282), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['prediction'], {}), '(prediction)\n', (13270, 13282), True, 'import tensorflow as tf\n'), ((14326, 14332), 'time.time', 'time', ([], {}), '()\n', (14330, 14332), False, 'from time import time\n'), ((21226, 21252), 'numpy.sign', 'np.sign', (['(predictions - 0.5)'], {}), '(predictions - 0.5)\n', (21233, 21252), True, 'import numpy as np\n'), ((21254, 21275), 'numpy.sign', 'np.sign', (['(labels - 0.5)'], {}), '(labels - 0.5)\n', (21261, 21275), True, 'import numpy as np\n'), ((1272, 1313), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['self.logits', '(1e-10)', '(1.0)'], {}), '(self.logits, 1e-10, 1.0)\n', (1288, 1313), True, 'import tensorflow as tf\n'), ((1355, 1400), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['(1 - self.logits)', '(1e-10)', '(1.0)'], {}), '(1 - self.logits, 1e-10, 1.0)\n', (1371, 1400), True, 'import tensorflow as tf\n'), ((14204, 14243), 'layers.crop_to_shape_v2', 'crop_to_shape_v2', (['x_test', 'self.in_shape'], {}), '(x_test, self.in_shape)\n', (14220, 14243), False, 'from layers import weight_variable, weight_variable_devonc, bias_variable, conv2d, deconv2d, max_pool, crop_and_concat, pixel_wise_softmax_2, crop_to_shape_v2, cross_entropy\n'), ((18114, 18154), 'layers.crop_to_shape_v2', 'crop_to_shape_v2', (['batch_x', 'self.in_shape'], {}), '(batch_x, self.in_shape)\n', (18130, 18154), False, 'from layers import weight_variable, weight_variable_devonc, bias_variable, conv2d, deconv2d, max_pool, crop_and_concat, pixel_wise_softmax_2, crop_to_shape_v2, cross_entropy\n'), ((18198, 18238), 'layers.crop_to_shape_v2', 'crop_to_shape_v2', (['batch_y', 'self.in_shape'], {}), '(batch_y, self.in_shape)\n', (18214, 18238), False, 'from layers import weight_variable, weight_variable_devonc, bias_variable, conv2d, deconv2d, max_pool, crop_and_concat, pixel_wise_softmax_2, crop_to_shape_v2, cross_entropy\n')] |
# first to start the nameserver start: python -m Pyro4.naming
import time
from threading import Thread
import numpy as np
import Pyro4
from rlkit.launchers import conf as config
Pyro4.config.SERIALIZERS_ACCEPTED = set(["pickle", "json", "marshal", "serpent"])
Pyro4.config.SERIALIZER = "pickle"
device_state = None
@Pyro4.expose
class DeviceState(object):
state = None
def get_state(self):
return device_state
def set_state(self, state):
global device_state
device_state = state
class SpaceMouseExpert:
def __init__(
self,
xyz_dims=3,
xyz_remap=[0, 1, 2],
xyz_scale=[1, 1, 1],
xyz_abs_threshold=0.0,
rot_dims=3,
rot_remap=[0, 1, 2],
rot_scale=[1, 1, 1],
rot_abs_threshold=0.0,
rot_discrete=False,
min_clip=-np.inf,
max_clip=np.inf,
):
"""TODO: fill in other params"""
self.xyz_dims = xyz_dims
self.xyz_remap = np.array(xyz_remap)
self.xyz_scale = np.array(xyz_scale)
self.xyz_abs_threshold = xyz_abs_threshold
self.rot_dims = rot_dims
self.rot_remap = rot_remap
self.rot_scale = rot_scale
self.rot_abs_threshold = rot_abs_threshold
self.rot_discrete = rot_discrete
self.min_clip = min_clip
self.max_clip = max_clip
self.thread = Thread(target=start_server)
self.thread.daemon = True
self.thread.start()
self.device_state = DeviceState()
def get_action(self, obs):
"""Must return (action, valid, reset, accept)"""
state = self.device_state.get_state()
# time.sleep(0.1)
if state is None:
return None, False, False, False
dpos, rotation, roll, pitch, yaw, accept, reset = (
state["dpos"],
state["rotation"],
state["roll"],
state["pitch"],
state["yaw"],
state["grasp"], # ["left_click"],
state["reset"], # ["right_click"],
)
xyz = dpos[self.xyz_remap]
xyz[np.abs(xyz) < self.xyz_abs_threshold] = 0.0
xyz = xyz * self.xyz_scale
xyz = np.clip(xyz, self.min_clip, self.max_clip)
rot = np.array([roll, pitch, yaw])
rot[np.abs(rot) < self.rot_abs_threshold] = 0.0
if self.rot_discrete:
max_i = np.argmax(np.abs(rot))
for i in range(len(rot)):
if i != max_i:
rot[i] = 0.0
rot = rot * self.rot_scale
rot = np.clip(rot, self.min_clip, self.max_clip)
a = np.concatenate([xyz[: self.xyz_dims], rot[: self.rot_dims]])
valid = not np.all(np.isclose(a, 0))
# print(a, roll, pitch, yaw, valid)
return (a, valid, reset, accept)
def start_server():
daemon = Pyro4.Daemon(config.SPACEMOUSE_HOSTNAME)
ns = Pyro4.locateNS() # find the name server
uri = daemon.register(DeviceState) # register the greeting maker as a Pyro object
ns.register(
"example.greeting", uri
) # register the object with a name in the name server
print("uri:", uri)
print("Server ready.")
daemon.requestLoop() # start the event loop of the server to wait for calls
if __name__ == "__main__":
expert = SpaceMouseExpert()
for i in range(100):
time.sleep(1)
print(expert.get_action(None))
| [
"numpy.clip",
"numpy.abs",
"numpy.isclose",
"Pyro4.locateNS",
"time.sleep",
"numpy.array",
"numpy.concatenate",
"Pyro4.Daemon",
"threading.Thread"
] | [((2846, 2886), 'Pyro4.Daemon', 'Pyro4.Daemon', (['config.SPACEMOUSE_HOSTNAME'], {}), '(config.SPACEMOUSE_HOSTNAME)\n', (2858, 2886), False, 'import Pyro4\n'), ((2896, 2912), 'Pyro4.locateNS', 'Pyro4.locateNS', ([], {}), '()\n', (2910, 2912), False, 'import Pyro4\n'), ((985, 1004), 'numpy.array', 'np.array', (['xyz_remap'], {}), '(xyz_remap)\n', (993, 1004), True, 'import numpy as np\n'), ((1030, 1049), 'numpy.array', 'np.array', (['xyz_scale'], {}), '(xyz_scale)\n', (1038, 1049), True, 'import numpy as np\n'), ((1384, 1411), 'threading.Thread', 'Thread', ([], {'target': 'start_server'}), '(target=start_server)\n', (1390, 1411), False, 'from threading import Thread\n'), ((2194, 2236), 'numpy.clip', 'np.clip', (['xyz', 'self.min_clip', 'self.max_clip'], {}), '(xyz, self.min_clip, self.max_clip)\n', (2201, 2236), True, 'import numpy as np\n'), ((2252, 2280), 'numpy.array', 'np.array', (['[roll, pitch, yaw]'], {}), '([roll, pitch, yaw])\n', (2260, 2280), True, 'import numpy as np\n'), ((2561, 2603), 'numpy.clip', 'np.clip', (['rot', 'self.min_clip', 'self.max_clip'], {}), '(rot, self.min_clip, self.max_clip)\n', (2568, 2603), True, 'import numpy as np\n'), ((2617, 2675), 'numpy.concatenate', 'np.concatenate', (['[xyz[:self.xyz_dims], rot[:self.rot_dims]]'], {}), '([xyz[:self.xyz_dims], rot[:self.rot_dims]])\n', (2631, 2675), True, 'import numpy as np\n'), ((3359, 3372), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3369, 3372), False, 'import time\n'), ((2101, 2112), 'numpy.abs', 'np.abs', (['xyz'], {}), '(xyz)\n', (2107, 2112), True, 'import numpy as np\n'), ((2293, 2304), 'numpy.abs', 'np.abs', (['rot'], {}), '(rot)\n', (2299, 2304), True, 'import numpy as np\n'), ((2397, 2408), 'numpy.abs', 'np.abs', (['rot'], {}), '(rot)\n', (2403, 2408), True, 'import numpy as np\n'), ((2706, 2722), 'numpy.isclose', 'np.isclose', (['a', '(0)'], {}), '(a, 0)\n', (2716, 2722), True, 'import numpy as np\n')] |
"""Functions used to manipulate pytorch tensors and numpy arrays."""
import numbers
import os
import tempfile
from collections import defaultdict
from typing import List, Dict, Optional, DefaultDict, Union, Any, cast
import PIL
import numpy as np
import torch
from PIL import Image
from moviepy import editor as mpy
from moviepy.editor import concatenate_videoclips
from tensorboardX import SummaryWriter as TBXSummaryWriter, summary as tbxsummary
from tensorboardX.proto.summary_pb2 import Summary as TBXSummary
# noinspection PyProtectedMember
from tensorboardX.utils import _prepare_video as tbx_prepare_video
from tensorboardX.x2num import make_np as tbxmake_np
from allenact.utils.system import get_logger
def to_device_recursively(
input: Any, device: Union[str, torch.device, int], inplace: bool = True
):
"""Recursively places tensors on the appropriate device."""
if input is None:
return input
elif isinstance(input, torch.Tensor):
return input.to(device) # type: ignore
elif isinstance(input, tuple):
return tuple(
to_device_recursively(input=subinput, device=device, inplace=inplace)
for subinput in input
)
elif isinstance(input, list):
if inplace:
for i in range(len(input)):
input[i] = to_device_recursively(
input=input[i], device=device, inplace=inplace
)
return input
else:
return [
to_device_recursively(input=subpart, device=device, inplace=inplace)
for subpart in input
]
elif isinstance(input, dict):
if inplace:
for key in input:
input[key] = to_device_recursively(
input=input[key], device=device, inplace=inplace
)
return input
else:
return {
k: to_device_recursively(input=input[k], device=device, inplace=inplace)
for k in input
}
elif isinstance(input, set):
if inplace:
for element in list(input):
input.remove(element)
input.add(
to_device_recursively(element, device=device, inplace=inplace)
)
else:
return set(
to_device_recursively(k, device=device, inplace=inplace) for k in input
)
elif isinstance(input, np.ndarray) or np.isscalar(input) or isinstance(input, str):
return input
elif hasattr(input, "to"):
# noinspection PyCallingNonCallable
return input.to(device=device, inplace=inplace)
else:
raise NotImplementedError(
"Sorry, value of type {} is not supported.".format(type(input))
)
def detach_recursively(input: Any, inplace=True):
"""Recursively detaches tensors in some data structure from their
computation graph."""
if input is None:
return input
elif isinstance(input, torch.Tensor):
return input.detach()
elif isinstance(input, tuple):
return tuple(
detach_recursively(input=subinput, inplace=inplace) for subinput in input
)
elif isinstance(input, list):
if inplace:
for i in range(len(input)):
input[i] = detach_recursively(input[i], inplace=inplace)
return input
else:
return [
detach_recursively(input=subinput, inplace=inplace)
for subinput in input
]
elif isinstance(input, dict):
if inplace:
for key in input:
input[key] = detach_recursively(input[key], inplace=inplace)
return input
else:
return {k: detach_recursively(input[k], inplace=inplace) for k in input}
elif isinstance(input, set):
if inplace:
for element in list(input):
input.remove(element)
input.add(detach_recursively(element, inplace=inplace))
else:
return set(detach_recursively(k, inplace=inplace) for k in input)
elif isinstance(input, np.ndarray) or np.isscalar(input) or isinstance(input, str):
return input
elif hasattr(input, "detach_recursively"):
# noinspection PyCallingNonCallable
return input.detach_recursively(inplace=inplace)
else:
raise NotImplementedError(
"Sorry, hidden state of type {} is not supported.".format(type(input))
)
def batch_observations(
observations: List[Dict], device: Optional[torch.device] = None
) -> Dict[str, Union[Dict, torch.Tensor]]:
"""Transpose a batch of observation dicts to a dict of batched
observations.
# Arguments
observations : List of dicts of observations.
device : The torch.device to put the resulting tensors on.
Will not move the tensors if None.
# Returns
Transposed dict of lists of observations.
"""
def dict_from_observation(
observation: Dict[str, Any]
) -> Dict[str, Union[Dict, List]]:
batch_dict: DefaultDict = defaultdict(list)
for sensor in observation:
if isinstance(observation[sensor], Dict):
batch_dict[sensor] = dict_from_observation(observation[sensor])
else:
batch_dict[sensor].append(to_tensor(observation[sensor]))
return batch_dict
def fill_dict_from_observations(
input_batch: Any, observation: Dict[str, Any]
) -> None:
for sensor in observation:
if isinstance(observation[sensor], Dict):
fill_dict_from_observations(input_batch[sensor], observation[sensor])
else:
input_batch[sensor].append(to_tensor(observation[sensor]))
def dict_to_batch(input_batch: Any) -> None:
for sensor in input_batch:
if isinstance(input_batch[sensor], Dict):
dict_to_batch(input_batch[sensor])
else:
input_batch[sensor] = torch.stack(
[batch.to(device=device) for batch in input_batch[sensor]], dim=0
)
if len(observations) == 0:
return cast(Dict[str, Union[Dict, torch.Tensor]], observations)
batch = dict_from_observation(observations[0])
for obs in observations[1:]:
fill_dict_from_observations(batch, obs)
dict_to_batch(batch)
return cast(Dict[str, Union[Dict, torch.Tensor]], batch)
def to_tensor(v) -> torch.Tensor:
"""Return a torch.Tensor version of the input.
# Parameters
v : Input values that can be coerced into being a tensor.
# Returns
A tensor version of the input.
"""
if torch.is_tensor(v):
return v
elif isinstance(v, np.ndarray):
return torch.from_numpy(v)
else:
return torch.tensor(
v, dtype=torch.int64 if isinstance(v, numbers.Integral) else torch.float
)
def tile_images(images: List[np.ndarray]) -> np.ndarray:
"""Tile multiple images into single image.
# Parameters
images : list of images where each image has dimension
(height x width x channels)
# Returns
Tiled image (new_height x width x channels).
"""
assert len(images) > 0, "empty list of images"
np_images = np.asarray(images)
n_images, height, width, n_channels = np_images.shape
new_height = int(np.ceil(np.sqrt(n_images)))
new_width = int(np.ceil(float(n_images) / new_height))
# pad with empty images to complete the rectangle
np_images = np.array(
images + [images[0] * 0 for _ in range(n_images, new_height * new_width)]
)
# img_HWhwc
out_image = np_images.reshape((new_height, new_width, height, width, n_channels))
# img_HhWwc
out_image = out_image.transpose(0, 2, 1, 3, 4)
# img_Hh_Ww_c
out_image = out_image.reshape((new_height * height, new_width * width, n_channels))
return out_image
class SummaryWriter(TBXSummaryWriter):
@staticmethod
def _video(tag, vid):
# noinspection PyProtectedMember
tag = tbxsummary._clean_tag(tag)
return TBXSummary(value=[TBXSummary.Value(tag=tag, image=vid)])
def add_vid(self, tag, vid, global_step=None, walltime=None):
self._get_file_writer().add_summary(
self._video(tag, vid), global_step, walltime
)
def add_image(
self, tag, img_tensor, global_step=None, walltime=None, dataformats="CHW"
):
self._get_file_writer().add_summary(
image(tag, img_tensor, dataformats=dataformats), global_step, walltime
)
def image(tag, tensor, rescale=1, dataformats="CHW"):
"""Outputs a `Summary` protocol buffer with images. The summary has up to
`max_images` summary values containing images. The images are built from
`tensor` which must be 3-D with shape `[height, width, channels]` and where
`channels` can be:
* 1: `tensor` is interpreted as Grayscale.
* 3: `tensor` is interpreted as RGB.
* 4: `tensor` is interpreted as RGBA.
# Parameters
tag: A name for the generated node. Will also serve as a series name in
TensorBoard.
tensor: A 3-D `uint8` or `float32` `Tensor` of shape `[height, width,
channels]` where `channels` is 1, 3, or 4.
'tensor' can either have values in [0, 1] (float32) or [0, 255] (uint8).
The image() function will scale the image values to [0, 255] by applying
a scale factor of either 1 (uint8) or 255 (float32).
rescale: The scale.
dataformats: Input image shape format.
# Returns
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
# noinspection PyProtectedMember
tag = tbxsummary._clean_tag(tag)
tensor = tbxmake_np(tensor)
tensor = convert_to_HWC(tensor, dataformats)
# Do not assume that user passes in values in [0, 255], use data type to detect
if tensor.dtype != np.uint8:
tensor = (tensor * 255.0).astype(np.uint8)
image = tbxsummary.make_image(tensor, rescale=rescale)
return TBXSummary(value=[TBXSummary.Value(tag=tag, image=image)])
def convert_to_HWC(tensor, input_format): # tensor: numpy array
assert len(set(input_format)) == len(
input_format
), "You can not use the same dimension shordhand twice. \
input_format: {}".format(
input_format
)
assert len(tensor.shape) == len(
input_format
), "size of input tensor and input format are different. \
tensor shape: {}, input_format: {}".format(
tensor.shape, input_format
)
input_format = input_format.upper()
if len(input_format) == 4:
index = [input_format.find(c) for c in "NCHW"]
tensor_NCHW = tensor.transpose(index)
tensor_CHW = make_grid(tensor_NCHW)
# noinspection PyTypeChecker
return tensor_CHW.transpose(1, 2, 0)
if len(input_format) == 3:
index = [input_format.find(c) for c in "HWC"]
tensor_HWC = tensor.transpose(index)
if tensor_HWC.shape[2] == 1:
tensor_HWC = np.concatenate([tensor_HWC, tensor_HWC, tensor_HWC], 2)
return tensor_HWC
if len(input_format) == 2:
index = [input_format.find(c) for c in "HW"]
tensor = tensor.transpose(index)
tensor = np.stack([tensor, tensor, tensor], 2)
return tensor
def make_grid(I, ncols=8):
# I: N1HW or N3HW
assert isinstance(I, np.ndarray), "plugin error, should pass numpy array here"
if I.shape[1] == 1:
I = np.concatenate([I, I, I], 1)
assert I.ndim == 4 and I.shape[1] == 3 or I.shape[1] == 4
nimg = I.shape[0]
H = I.shape[2]
W = I.shape[3]
ncols = min(nimg, ncols)
nrows = int(np.ceil(float(nimg) / ncols))
canvas = np.zeros((I.shape[1], H * nrows, W * ncols), dtype=I.dtype)
i = 0
for y in range(nrows):
for x in range(ncols):
if i >= nimg:
break
canvas[:, y * H : (y + 1) * H, x * W : (x + 1) * W] = I[i]
i = i + 1
return canvas
def tensor_to_video(tensor, fps=4):
tensor = tbxmake_np(tensor)
tensor = tbx_prepare_video(tensor)
# If user passes in uint8, then we don't need to rescale by 255
if tensor.dtype != np.uint8:
tensor = (tensor * 255.0).astype(np.uint8)
return tbxsummary.make_video(tensor, fps)
def tensor_to_clip(tensor, fps=4):
tensor = tbxmake_np(tensor)
tensor = tbx_prepare_video(tensor)
# If user passes in uint8, then we don't need to rescale by 255
if tensor.dtype != np.uint8:
tensor = (tensor * 255.0).astype(np.uint8)
t, h, w, c = tensor.shape
clip = mpy.ImageSequenceClip(list(tensor), fps=fps)
return clip, (h, w, c)
def clips_to_video(clips, h, w, c):
# encode sequence of images into gif string
clip = concatenate_videoclips(clips)
filename = tempfile.NamedTemporaryFile(suffix=".gif", delete=False).name
# moviepy >= 1.0.0 use logger=None to suppress output.
try:
clip.write_gif(filename, verbose=False, logger=None)
except TypeError:
get_logger().warning(
"Upgrade to moviepy >= 1.0.0 to suppress the progress bar."
)
clip.write_gif(filename, verbose=False)
with open(filename, "rb") as f:
tensor_string = f.read()
try:
os.remove(filename)
except OSError:
get_logger().warning("The temporary file used by moviepy cannot be deleted.")
return TBXSummary.Image(
height=h, width=w, colorspace=c, encoded_image_string=tensor_string
)
def process_video(render, max_clip_len=500, max_video_len=-1, fps=4):
output = []
hwc = None
if len(render) > 0:
if len(render) > max_video_len > 0:
get_logger().warning(
"Clipping video to first {} frames out of {} original frames".format(
max_video_len, len(render)
)
)
render = render[:max_video_len]
for clipstart in range(0, len(render), max_clip_len):
clip = render[clipstart : clipstart + max_clip_len]
try:
current = np.stack(clip, axis=0) # T, H, W, C
current = current.transpose((0, 3, 1, 2)) # T, C, H, W
current = np.expand_dims(current, axis=0) # 1, T, C, H, W
current, cur_hwc = tensor_to_clip(current, fps=fps)
if hwc is None:
hwc = cur_hwc
else:
assert (
hwc == cur_hwc
), "Inconsistent clip shape: previous {} current {}".format(
hwc, cur_hwc
)
output.append(current)
except MemoryError:
get_logger().error(
"Skipping video due to memory error with clip of length {}".format(
len(clip)
)
)
return None
else:
get_logger().warning("Calling process_video with 0 frames")
return None
assert len(output) > 0, "No clips to concatenate"
assert hwc is not None, "No tensor dims assigned"
try:
result = clips_to_video(output, *hwc)
except MemoryError:
get_logger().error("Skipping video due to memory error calling clips_to_video")
result = None
return result
class ScaleBothSides(object):
"""Rescales the input PIL.Image to the given 'width' and `height`.
Attributes
width: new width
height: new height
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, width: int, height: int, interpolation=Image.BILINEAR):
self.width = width
self.height = height
self.interpolation = interpolation
def __call__(self, img: PIL.Image) -> PIL.Image:
return img.resize((self.width, self.height), self.interpolation)
| [
"tensorboardX.utils._prepare_video",
"numpy.sqrt",
"torch.from_numpy",
"tensorboardX.proto.summary_pb2.Summary.Value",
"os.remove",
"numpy.isscalar",
"numpy.asarray",
"tensorboardX.summary.make_video",
"numpy.stack",
"tensorboardX.summary.make_image",
"tensorboardX.x2num.make_np",
"numpy.conca... | [((6482, 6531), 'typing.cast', 'cast', (['Dict[str, Union[Dict, torch.Tensor]]', 'batch'], {}), '(Dict[str, Union[Dict, torch.Tensor]], batch)\n', (6486, 6531), False, 'from typing import List, Dict, Optional, DefaultDict, Union, Any, cast\n'), ((6766, 6784), 'torch.is_tensor', 'torch.is_tensor', (['v'], {}), '(v)\n', (6781, 6784), False, 'import torch\n'), ((7368, 7386), 'numpy.asarray', 'np.asarray', (['images'], {}), '(images)\n', (7378, 7386), True, 'import numpy as np\n'), ((9821, 9847), 'tensorboardX.summary._clean_tag', 'tbxsummary._clean_tag', (['tag'], {}), '(tag)\n', (9842, 9847), True, 'from tensorboardX import SummaryWriter as TBXSummaryWriter, summary as tbxsummary\n'), ((9861, 9879), 'tensorboardX.x2num.make_np', 'tbxmake_np', (['tensor'], {}), '(tensor)\n', (9871, 9879), True, 'from tensorboardX.x2num import make_np as tbxmake_np\n'), ((10110, 10156), 'tensorboardX.summary.make_image', 'tbxsummary.make_image', (['tensor'], {'rescale': 'rescale'}), '(tensor, rescale=rescale)\n', (10131, 10156), True, 'from tensorboardX import SummaryWriter as TBXSummaryWriter, summary as tbxsummary\n'), ((11881, 11940), 'numpy.zeros', 'np.zeros', (['(I.shape[1], H * nrows, W * ncols)'], {'dtype': 'I.dtype'}), '((I.shape[1], H * nrows, W * ncols), dtype=I.dtype)\n', (11889, 11940), True, 'import numpy as np\n'), ((12219, 12237), 'tensorboardX.x2num.make_np', 'tbxmake_np', (['tensor'], {}), '(tensor)\n', (12229, 12237), True, 'from tensorboardX.x2num import make_np as tbxmake_np\n'), ((12251, 12276), 'tensorboardX.utils._prepare_video', 'tbx_prepare_video', (['tensor'], {}), '(tensor)\n', (12268, 12276), True, 'from tensorboardX.utils import _prepare_video as tbx_prepare_video\n'), ((12441, 12475), 'tensorboardX.summary.make_video', 'tbxsummary.make_video', (['tensor', 'fps'], {}), '(tensor, fps)\n', (12462, 12475), True, 'from tensorboardX import SummaryWriter as TBXSummaryWriter, summary as tbxsummary\n'), ((12526, 12544), 'tensorboardX.x2num.make_np', 'tbxmake_np', (['tensor'], {}), '(tensor)\n', (12536, 12544), True, 'from tensorboardX.x2num import make_np as tbxmake_np\n'), ((12558, 12583), 'tensorboardX.utils._prepare_video', 'tbx_prepare_video', (['tensor'], {}), '(tensor)\n', (12575, 12583), True, 'from tensorboardX.utils import _prepare_video as tbx_prepare_video\n'), ((12949, 12978), 'moviepy.editor.concatenate_videoclips', 'concatenate_videoclips', (['clips'], {}), '(clips)\n', (12971, 12978), False, 'from moviepy.editor import concatenate_videoclips\n'), ((13595, 13685), 'tensorboardX.proto.summary_pb2.Summary.Image', 'TBXSummary.Image', ([], {'height': 'h', 'width': 'w', 'colorspace': 'c', 'encoded_image_string': 'tensor_string'}), '(height=h, width=w, colorspace=c, encoded_image_string=\n tensor_string)\n', (13611, 13685), True, 'from tensorboardX.proto.summary_pb2 import Summary as TBXSummary\n'), ((5161, 5178), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (5172, 5178), False, 'from collections import defaultdict\n'), ((6253, 6309), 'typing.cast', 'cast', (['Dict[str, Union[Dict, torch.Tensor]]', 'observations'], {}), '(Dict[str, Union[Dict, torch.Tensor]], observations)\n', (6257, 6309), False, 'from typing import List, Dict, Optional, DefaultDict, Union, Any, cast\n'), ((8157, 8183), 'tensorboardX.summary._clean_tag', 'tbxsummary._clean_tag', (['tag'], {}), '(tag)\n', (8178, 8183), True, 'from tensorboardX import SummaryWriter as TBXSummaryWriter, summary as tbxsummary\n'), ((11411, 11448), 'numpy.stack', 'np.stack', (['[tensor, tensor, tensor]', '(2)'], {}), '([tensor, tensor, tensor], 2)\n', (11419, 11448), True, 'import numpy as np\n'), ((11642, 11670), 'numpy.concatenate', 'np.concatenate', (['[I, I, I]', '(1)'], {}), '([I, I, I], 1)\n', (11656, 11670), True, 'import numpy as np\n'), ((12995, 13051), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'suffix': '""".gif"""', 'delete': '(False)'}), "(suffix='.gif', delete=False)\n", (13022, 13051), False, 'import tempfile\n'), ((13457, 13476), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (13466, 13476), False, 'import os\n'), ((6854, 6873), 'torch.from_numpy', 'torch.from_numpy', (['v'], {}), '(v)\n', (6870, 6873), False, 'import torch\n'), ((7474, 7491), 'numpy.sqrt', 'np.sqrt', (['n_images'], {}), '(n_images)\n', (7481, 7491), True, 'import numpy as np\n'), ((11186, 11241), 'numpy.concatenate', 'np.concatenate', (['[tensor_HWC, tensor_HWC, tensor_HWC]', '(2)'], {}), '([tensor_HWC, tensor_HWC, tensor_HWC], 2)\n', (11200, 11241), True, 'import numpy as np\n'), ((10186, 10224), 'tensorboardX.proto.summary_pb2.Summary.Value', 'TBXSummary.Value', ([], {'tag': 'tag', 'image': 'image'}), '(tag=tag, image=image)\n', (10202, 10224), True, 'from tensorboardX.proto.summary_pb2 import Summary as TBXSummary\n'), ((14278, 14300), 'numpy.stack', 'np.stack', (['clip'], {'axis': '(0)'}), '(clip, axis=0)\n', (14286, 14300), True, 'import numpy as np\n'), ((14413, 14444), 'numpy.expand_dims', 'np.expand_dims', (['current'], {'axis': '(0)'}), '(current, axis=0)\n', (14427, 14444), True, 'import numpy as np\n'), ((15143, 15155), 'allenact.utils.system.get_logger', 'get_logger', ([], {}), '()\n', (15153, 15155), False, 'from allenact.utils.system import get_logger\n'), ((8217, 8253), 'tensorboardX.proto.summary_pb2.Summary.Value', 'TBXSummary.Value', ([], {'tag': 'tag', 'image': 'vid'}), '(tag=tag, image=vid)\n', (8233, 8253), True, 'from tensorboardX.proto.summary_pb2 import Summary as TBXSummary\n'), ((13217, 13229), 'allenact.utils.system.get_logger', 'get_logger', ([], {}), '()\n', (13227, 13229), False, 'from allenact.utils.system import get_logger\n'), ((13505, 13517), 'allenact.utils.system.get_logger', 'get_logger', ([], {}), '()\n', (13515, 13517), False, 'from allenact.utils.system import get_logger\n'), ((13878, 13890), 'allenact.utils.system.get_logger', 'get_logger', ([], {}), '()\n', (13888, 13890), False, 'from allenact.utils.system import get_logger\n'), ((15420, 15432), 'allenact.utils.system.get_logger', 'get_logger', ([], {}), '()\n', (15430, 15432), False, 'from allenact.utils.system import get_logger\n'), ((14915, 14927), 'allenact.utils.system.get_logger', 'get_logger', ([], {}), '()\n', (14925, 14927), False, 'from allenact.utils.system import get_logger\n'), ((2486, 2504), 'numpy.isscalar', 'np.isscalar', (['input'], {}), '(input)\n', (2497, 2504), True, 'import numpy as np\n'), ((4200, 4218), 'numpy.isscalar', 'np.isscalar', (['input'], {}), '(input)\n', (4211, 4218), True, 'import numpy as np\n')] |
import numpy as np
import math
from numpy.fft import fft, ifft
from ModulationPy import QAMModem
import matplotlib.pyplot as plt
import scipy
from scipy import special, interpolate, signal
import time
tic = time.time()
# Configuraçoes iniciais
N = 131072 # Numero de subportadoras
Mod = 16 # Ordem da modulacao
M = 1 # Numero de subsimbolos
Nset = np.arange(20, 65531, 1) # Alocacao de algumas subportadoras
Non = len(Nset) # Numero de portadoras alocadas
Np = 33 # Numero de portadoras pilotos
pilotValue = 1 # The known value each pilot transmits
SNR = np.arange(5,45,3) # Valores de SNR em dB
snr = 10**(SNR/10) # Valores de SNR linear
L = math.sqrt(Mod)
mu = 4*(L-1)/L # Número médio de vizinhos
E = 3/(L**2-1)
# Pilotos
allCarriers2 = np.arange(N) # Todas as portadoras incluindo as que não serã utilizadas
allCarriers = np.arange(Non)
pilotCarriers = allCarriers[::Non//Np] # Pilots is every (K/P)th carrier.
print(len(pilotCarriers))
# For convenience of channel estimation, let's make the last carriers also be a pilot
pilotCarriers = np.hstack([pilotCarriers, np.array([allCarriers[-1]])])
Np = Np+1
# data carriers are all remaining carriers
dataCarriers = np.delete(allCarriers, pilotCarriers)
#print (f'Todas as portadoras: {allCarriers}')
#print (f'Portadoras pilotos: {pilotCarriers}')
#print (f'Portadoras de dados: {dataCarriers}')
plt.plot(pilotCarriers, np.zeros_like(pilotCarriers), 'bo', label='pilot')
plt.plot(dataCarriers, np.zeros_like(dataCarriers), 'ro', label='data')
# Modulador QAM
c = np.random.randint(0, Mod, size=Non-len(pilotCarriers))
modem = QAMModem(Mod, bin_input=False, soft_decision=False, bin_output=False)
symbol = modem.modulate(c) # modulation
s = np.zeros(Non, dtype=complex) # Todas as N portadoras
s[dataCarriers] = symbol # allocate the pilot subcarriers
P = np.sum(abs(s)**2)/len(dataCarriers)
s[pilotCarriers] = pilotValue # Alocação das portadoras Pilotos
# Mapeamento dos símbolos nas portadoras
def mapeamento(s,Nset,N):
nset = Nset % N
assert len(nset) <= N
mset = 1
nset = nset+1
res1 = np.zeros( N, dtype=complex)
res1[nset]= s
d = res1
return d, nset, mset
def demapeamento(rf,nset,mset):
rf1 = rf[nset]
return rf1
d, nset,mset = mapeamento(s,Nset,N)
st = np.sqrt(N)*np.fft.ifft(d)
Pt = np.sum(abs(st)**2)/len(st)
# Inicialização
pe_teor = np.zeros(len(snr))
pe_sim = np.zeros(len(snr))
pe_simp = np.zeros(len(snr))
pe_sim_awgn = np.zeros(len(snr))
pe_teor_ep = np.zeros(len(snr))
MSE = np.zeros(len(snr))
erros = np.zeros(len(snr))
errosp = np.zeros(len(snr))
erros_awgn = np.zeros(len(snr))
# Resposta do canal
channelResponse = np.array([1, 0.7]) # the impulse response of the wireless channel
H_exact = np.fft.fft(channelResponse, N)
#plt.figure(figsize= (5,5))
#plt.plot(allCarriers2, abs(H_exact), label='Real Channel Response')
for idx in range(0,len(snr)):
n0 = P/snr[idx]
noise = np.sqrt(n0/2)*(np.random.randn(len(d)) + 1j*np.random.randn(len(d)))
y2 = st+noise
# Passando pelo canal
convolved = np.convolve(st, channelResponse, mode = 'same')
y = convolved+noise
rf = 1/(np.sqrt(N))*np.fft.fft(y)
rf_awgn = 1/np.sqrt(N)*np.fft.fft(y2)
# Demapeando os símbolos complexos
rf_rx= demapeamento(rf,nset,mset)
rf_rx_awgn= demapeamento(rf_awgn,nset,mset)
# Estimação do canal
pilots = rf_rx[pilotCarriers]
Hest_at_pilots = pilots / pilotValue # divide by the transmitted pilot values
Hest_abs = scipy.interpolate.interp1d(pilotCarriers, abs(Hest_at_pilots), kind='linear')(allCarriers)
Hest_phase = scipy.interpolate.interp1d(pilotCarriers, np.angle(Hest_at_pilots), kind='linear')(allCarriers)
Hest = Hest_abs * np.exp(1j*Hest_phase)
# Equalização
rf_eq = rf_rx / Hest
rf_eqp = rf_rx / H_exact[allCarriers]
# Removendo as portadoras pilotos
rf1 =rf_eq[dataCarriers]
rf2 = rf_eqp[dataCarriers]
rf3 = rf_rx_awgn[dataCarriers]
# simbolos estimados
c_est = modem.demodulate(rf1)
c_estp = modem.demodulate(rf2)
c_est_awgn = modem.demodulate(rf3)
# Contagem de erros
erros[idx] = np.sum(c != c_est)
errosp[idx] = np.sum(c != c_estp)
erros_awgn[idx] = np.sum(c != c_est_awgn)
# Probabilidade de erro Teórica OFDM em Canal AWGN
pe_teor[idx] = mu/2*special.erfc(np.sqrt(E*snr[idx])/np.sqrt(2))
# Probabilidade de erro Simulada OFDM em Canal AWGN
pe_sim[idx] = erros[idx]/len(c_est)
pe_simp[idx] = errosp[idx]/len(c_estp)
pe_sim_awgn[idx] = erros_awgn[idx]/len(c_est_awgn)
# Taxa de erro de bit téorica OFDM Seletivo
pe_teor_ep[idx] = (mu/(2*len(H_exact[Nset])))*sum(special.erfc(np.sqrt(abs(H_exact[Nset])**2*E*snr[idx])/np.sqrt(2)))
MSE[idx] = 1/len(Hest)*sum(abs(H_exact[Nset]-Hest))**2
plt.figure(figsize= (5,5))
plt.plot(rf1.real, rf1.imag, 'bo')
plt.title(f'Estimação Linear com SNR = {SNR[idx]} dB')
plt.figure(figsize= (5,5))
plt.plot(rf2.real, rf2.imag, 'bo')
plt.title('Estimação Perfeita')
plt.figure(figsize= (5,5))
plt.plot(rf3.real, rf3.imag, 'bo')
plt.title('AWGN')
plt.figure(figsize= (8,5))
plt.plot(Nset, abs(H_exact[Nset]), label='Real Channel Response')
plt.stem(pilotCarriers+min(Nset), abs(Hest_at_pilots), use_line_collection = True, label='Pilot estimates')
plt.plot(Nset, abs(Hest), label='Estimated channel via interpolation')
plt.grid(True); plt.xlabel('Carrier index'); plt.ylabel('$|H(f)|$'); plt.legend(fontsize=10)
plt.ylim(0,2)
fig = plt.figure(figsize=(8,5))
plt.plot(SNR, pe_teor_ep, label='OFDM-Seletive- Theoretical')
plt.scatter(SNR, pe_sim, facecolor='None', edgecolor='r', label='OFDM-Selective Channel- Linear Channel Estimation- Simulation')
plt.scatter(SNR, pe_sim_awgn, facecolor='None', edgecolor='b', label='OFDM-AWGN Channel- Simulation')
plt.scatter(SNR, pe_simp, facecolor='None', edgecolor='g', label='OFDM-Selective Channel- Perfect Channel Estimation- Simulation')
plt.plot(SNR, pe_teor, label='OFDM-AWGN Channel- Theoretical')
plt.yscale('log')
plt.xlabel('SNR (dB)')
plt.ylabel('Symbol Error Rate')
plt.grid()
plt.legend(fontsize=10)
plt.xlim([10, 30])
plt.ylim([1e-5, 1])
toc = time.time()
tempo = toc-tic
print(f'A simulação demorou {tempo} segundos')
plt.show() | [
"matplotlib.pyplot.grid",
"numpy.sqrt",
"numpy.convolve",
"matplotlib.pyplot.ylabel",
"math.sqrt",
"numpy.array",
"numpy.arange",
"numpy.delete",
"matplotlib.pyplot.xlabel",
"numpy.fft.fft",
"matplotlib.pyplot.plot",
"numpy.exp",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.ylim",
"ma... | [((207, 218), 'time.time', 'time.time', ([], {}), '()\n', (216, 218), False, 'import time\n'), ((422, 445), 'numpy.arange', 'np.arange', (['(20)', '(65531)', '(1)'], {}), '(20, 65531, 1)\n', (431, 445), True, 'import numpy as np\n'), ((689, 708), 'numpy.arange', 'np.arange', (['(5)', '(45)', '(3)'], {}), '(5, 45, 3)\n', (698, 708), True, 'import numpy as np\n'), ((800, 814), 'math.sqrt', 'math.sqrt', (['Mod'], {}), '(Mod)\n', (809, 814), False, 'import math\n'), ((916, 928), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (925, 928), True, 'import numpy as np\n'), ((1007, 1021), 'numpy.arange', 'np.arange', (['Non'], {}), '(Non)\n', (1016, 1021), True, 'import numpy as np\n'), ((1350, 1387), 'numpy.delete', 'np.delete', (['allCarriers', 'pilotCarriers'], {}), '(allCarriers, pilotCarriers)\n', (1359, 1387), True, 'import numpy as np\n'), ((1763, 1832), 'ModulationPy.QAMModem', 'QAMModem', (['Mod'], {'bin_input': '(False)', 'soft_decision': '(False)', 'bin_output': '(False)'}), '(Mod, bin_input=False, soft_decision=False, bin_output=False)\n', (1771, 1832), False, 'from ModulationPy import QAMModem\n'), ((1879, 1907), 'numpy.zeros', 'np.zeros', (['Non'], {'dtype': 'complex'}), '(Non, dtype=complex)\n', (1887, 1907), True, 'import numpy as np\n'), ((2825, 2843), 'numpy.array', 'np.array', (['[1, 0.7]'], {}), '([1, 0.7])\n', (2833, 2843), True, 'import numpy as np\n'), ((2902, 2932), 'numpy.fft.fft', 'np.fft.fft', (['channelResponse', 'N'], {}), '(channelResponse, N)\n', (2912, 2932), True, 'import numpy as np\n'), ((4960, 4986), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (4970, 4986), True, 'import matplotlib.pyplot as plt\n'), ((4987, 5021), 'matplotlib.pyplot.plot', 'plt.plot', (['rf1.real', 'rf1.imag', '"""bo"""'], {}), "(rf1.real, rf1.imag, 'bo')\n", (4995, 5021), True, 'import matplotlib.pyplot as plt\n'), ((5022, 5076), 'matplotlib.pyplot.title', 'plt.title', (['f"""Estimação Linear com SNR = {SNR[idx]} dB"""'], {}), "(f'Estimação Linear com SNR = {SNR[idx]} dB')\n", (5031, 5076), True, 'import matplotlib.pyplot as plt\n'), ((5078, 5104), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (5088, 5104), True, 'import matplotlib.pyplot as plt\n'), ((5105, 5139), 'matplotlib.pyplot.plot', 'plt.plot', (['rf2.real', 'rf2.imag', '"""bo"""'], {}), "(rf2.real, rf2.imag, 'bo')\n", (5113, 5139), True, 'import matplotlib.pyplot as plt\n'), ((5140, 5171), 'matplotlib.pyplot.title', 'plt.title', (['"""Estimação Perfeita"""'], {}), "('Estimação Perfeita')\n", (5149, 5171), True, 'import matplotlib.pyplot as plt\n'), ((5173, 5199), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (5183, 5199), True, 'import matplotlib.pyplot as plt\n'), ((5200, 5234), 'matplotlib.pyplot.plot', 'plt.plot', (['rf3.real', 'rf3.imag', '"""bo"""'], {}), "(rf3.real, rf3.imag, 'bo')\n", (5208, 5234), True, 'import matplotlib.pyplot as plt\n'), ((5235, 5252), 'matplotlib.pyplot.title', 'plt.title', (['"""AWGN"""'], {}), "('AWGN')\n", (5244, 5252), True, 'import matplotlib.pyplot as plt\n'), ((5254, 5280), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 5)'}), '(figsize=(8, 5))\n', (5264, 5280), True, 'import matplotlib.pyplot as plt\n'), ((5527, 5541), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (5535, 5541), True, 'import matplotlib.pyplot as plt\n'), ((5543, 5570), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Carrier index"""'], {}), "('Carrier index')\n", (5553, 5570), True, 'import matplotlib.pyplot as plt\n'), ((5572, 5594), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$|H(f)|$"""'], {}), "('$|H(f)|$')\n", (5582, 5594), True, 'import matplotlib.pyplot as plt\n'), ((5596, 5619), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(10)'}), '(fontsize=10)\n', (5606, 5619), True, 'import matplotlib.pyplot as plt\n'), ((5620, 5634), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(2)'], {}), '(0, 2)\n', (5628, 5634), True, 'import matplotlib.pyplot as plt\n'), ((5641, 5667), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 5)'}), '(figsize=(8, 5))\n', (5651, 5667), True, 'import matplotlib.pyplot as plt\n'), ((5667, 5728), 'matplotlib.pyplot.plot', 'plt.plot', (['SNR', 'pe_teor_ep'], {'label': '"""OFDM-Seletive- Theoretical"""'}), "(SNR, pe_teor_ep, label='OFDM-Seletive- Theoretical')\n", (5675, 5728), True, 'import matplotlib.pyplot as plt\n'), ((5729, 5862), 'matplotlib.pyplot.scatter', 'plt.scatter', (['SNR', 'pe_sim'], {'facecolor': '"""None"""', 'edgecolor': '"""r"""', 'label': '"""OFDM-Selective Channel- Linear Channel Estimation- Simulation"""'}), "(SNR, pe_sim, facecolor='None', edgecolor='r', label=\n 'OFDM-Selective Channel- Linear Channel Estimation- Simulation')\n", (5740, 5862), True, 'import matplotlib.pyplot as plt\n'), ((5858, 5964), 'matplotlib.pyplot.scatter', 'plt.scatter', (['SNR', 'pe_sim_awgn'], {'facecolor': '"""None"""', 'edgecolor': '"""b"""', 'label': '"""OFDM-AWGN Channel- Simulation"""'}), "(SNR, pe_sim_awgn, facecolor='None', edgecolor='b', label=\n 'OFDM-AWGN Channel- Simulation')\n", (5869, 5964), True, 'import matplotlib.pyplot as plt\n'), ((5960, 6095), 'matplotlib.pyplot.scatter', 'plt.scatter', (['SNR', 'pe_simp'], {'facecolor': '"""None"""', 'edgecolor': '"""g"""', 'label': '"""OFDM-Selective Channel- Perfect Channel Estimation- Simulation"""'}), "(SNR, pe_simp, facecolor='None', edgecolor='g', label=\n 'OFDM-Selective Channel- Perfect Channel Estimation- Simulation')\n", (5971, 6095), True, 'import matplotlib.pyplot as plt\n'), ((6091, 6153), 'matplotlib.pyplot.plot', 'plt.plot', (['SNR', 'pe_teor'], {'label': '"""OFDM-AWGN Channel- Theoretical"""'}), "(SNR, pe_teor, label='OFDM-AWGN Channel- Theoretical')\n", (6099, 6153), True, 'import matplotlib.pyplot as plt\n'), ((6154, 6171), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (6164, 6171), True, 'import matplotlib.pyplot as plt\n'), ((6172, 6194), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""SNR (dB)"""'], {}), "('SNR (dB)')\n", (6182, 6194), True, 'import matplotlib.pyplot as plt\n'), ((6195, 6226), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Symbol Error Rate"""'], {}), "('Symbol Error Rate')\n", (6205, 6226), True, 'import matplotlib.pyplot as plt\n'), ((6227, 6237), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (6235, 6237), True, 'import matplotlib.pyplot as plt\n'), ((6238, 6261), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(10)'}), '(fontsize=10)\n', (6248, 6261), True, 'import matplotlib.pyplot as plt\n'), ((6262, 6280), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[10, 30]'], {}), '([10, 30])\n', (6270, 6280), True, 'import matplotlib.pyplot as plt\n'), ((6281, 6301), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[1e-05, 1]'], {}), '([1e-05, 1])\n', (6289, 6301), True, 'import matplotlib.pyplot as plt\n'), ((6308, 6319), 'time.time', 'time.time', ([], {}), '()\n', (6317, 6319), False, 'import time\n'), ((6383, 6393), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6391, 6393), True, 'import matplotlib.pyplot as plt\n'), ((1556, 1584), 'numpy.zeros_like', 'np.zeros_like', (['pilotCarriers'], {}), '(pilotCarriers)\n', (1569, 1584), True, 'import numpy as np\n'), ((1630, 1657), 'numpy.zeros_like', 'np.zeros_like', (['dataCarriers'], {}), '(dataCarriers)\n', (1643, 1657), True, 'import numpy as np\n'), ((2254, 2280), 'numpy.zeros', 'np.zeros', (['N'], {'dtype': 'complex'}), '(N, dtype=complex)\n', (2262, 2280), True, 'import numpy as np\n'), ((2448, 2458), 'numpy.sqrt', 'np.sqrt', (['N'], {}), '(N)\n', (2455, 2458), True, 'import numpy as np\n'), ((2459, 2473), 'numpy.fft.ifft', 'np.fft.ifft', (['d'], {}), '(d)\n', (2470, 2473), True, 'import numpy as np\n'), ((3223, 3268), 'numpy.convolve', 'np.convolve', (['st', 'channelResponse'], {'mode': '"""same"""'}), "(st, channelResponse, mode='same')\n", (3234, 3268), True, 'import numpy as np\n'), ((4306, 4324), 'numpy.sum', 'np.sum', (['(c != c_est)'], {}), '(c != c_est)\n', (4312, 4324), True, 'import numpy as np\n'), ((4343, 4362), 'numpy.sum', 'np.sum', (['(c != c_estp)'], {}), '(c != c_estp)\n', (4349, 4362), True, 'import numpy as np\n'), ((4385, 4408), 'numpy.sum', 'np.sum', (['(c != c_est_awgn)'], {}), '(c != c_est_awgn)\n', (4391, 4408), True, 'import numpy as np\n'), ((1251, 1278), 'numpy.array', 'np.array', (['[allCarriers[-1]]'], {}), '([allCarriers[-1]])\n', (1259, 1278), True, 'import numpy as np\n'), ((3094, 3109), 'numpy.sqrt', 'np.sqrt', (['(n0 / 2)'], {}), '(n0 / 2)\n', (3101, 3109), True, 'import numpy as np\n'), ((3320, 3333), 'numpy.fft.fft', 'np.fft.fft', (['y'], {}), '(y)\n', (3330, 3333), True, 'import numpy as np\n'), ((3361, 3375), 'numpy.fft.fft', 'np.fft.fft', (['y2'], {}), '(y2)\n', (3371, 3375), True, 'import numpy as np\n'), ((3884, 3909), 'numpy.exp', 'np.exp', (['(1.0j * Hest_phase)'], {}), '(1.0j * Hest_phase)\n', (3890, 3909), True, 'import numpy as np\n'), ((3308, 3318), 'numpy.sqrt', 'np.sqrt', (['N'], {}), '(N)\n', (3315, 3318), True, 'import numpy as np\n'), ((3350, 3360), 'numpy.sqrt', 'np.sqrt', (['N'], {}), '(N)\n', (3357, 3360), True, 'import numpy as np\n'), ((3808, 3832), 'numpy.angle', 'np.angle', (['Hest_at_pilots'], {}), '(Hest_at_pilots)\n', (3816, 3832), True, 'import numpy as np\n'), ((4501, 4522), 'numpy.sqrt', 'np.sqrt', (['(E * snr[idx])'], {}), '(E * snr[idx])\n', (4508, 4522), True, 'import numpy as np\n'), ((4521, 4531), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (4528, 4531), True, 'import numpy as np\n'), ((4886, 4896), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (4893, 4896), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2021 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import numpy as np
import pytest
import pandapower as pp
try:
import pplog as logging
except ImportError:
import logging
def test_cost_mixed():
""" Testing a very simple network for the resulting cost value
constraints with OPF """
vm_max = 1.05
vm_min = 0.95
# create net
net = pp.create_empty_network()
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)
pp.create_gen(net, 1, p_mw=-0.1, controllable=True, min_p_mw=0.005, max_p_mw=0.15, max_q_mvar=.05,
min_q_mvar=-.05)
pp.create_ext_grid(net, 0)
pp.create_load(net, 1, p_mw=0.02, controllable=False, max_q_mvar=.05, max_p_mw=0.1, min_p_mw=0.0050,
min_q_mvar=-.05)
pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876,
c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,
max_loading_percent=100 * 690)
# testing some combinations
pp.create_poly_cost(net, 0, "gen", cp1_eur_per_mw=1)
pp.runopp(net)
assert net["OPF_converged"]
assert np.isclose(net.res_cost, net.res_gen.p_mw.values[0])
net.poly_cost.cp1_eur_per_mw.at[0] = 0
net.poly_cost.cp2_eur_per_mw2.at[0] = 1
pp.runopp(net)
assert net["OPF_converged"]
assert np.isclose(net.res_cost, net.res_gen.p_mw.values**2)
net.poly_cost.cp0_eur.at[0] = 1
pp.runopp(net)
assert net["OPF_converged"]
assert np.isclose(net.res_cost, net.res_gen.p_mw.values**2 + 1)
net.load.controllable.at[0] = True
pp.runopp(net)
assert np.isclose(net.res_cost, net.res_gen.p_mw.values ** 2 + 1)
net.load.controllable.at[0] = False
net.pwl_cost.drop(net.pwl_cost.index, inplace=True)
pp.create_pwl_cost(net, 0, "ext_grid", [[-1000, 0, -2000], [0, 1000, 2000]], power_type="p")
net.poly_cost.cp1_eur_per_mw.at[0] = 1000
net.poly_cost.cp2_eur_per_mw2.at[0] = 0
pp.runopp(net)
assert np.isclose(net.res_ext_grid.p_mw.values[0], 0, atol=1e-4)
assert np.isclose(net.res_cost, net.res_gen.p_mw.values[0]*1000, atol=1e-3)
def test_mixed_p_q_pol():
vm_max = 1.05
vm_min = 0.95
# create net
net = pp.create_empty_network()
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)
pp.create_gen(net, 1, p_mw=0.1, controllable=True, min_p_mw=0.005, max_p_mw=0.15, max_q_mvar=.05,
min_q_mvar=-.05)
pp.create_ext_grid(net, 0)
pp.create_load(net, 1, p_mw=0.02, controllable=False, max_q_mvar=.05, max_p_mw=0.1, min_p_mw=0.0050,
min_q_mvar=-.05)
pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876,
c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,
max_loading_percent=100 * 690)
# testing some combinations
pp.create_poly_cost(net, 0, "gen", cp1_eur_per_mw=1, cq1_eur_per_mvar=1)
pp.runopp(net)
assert net["OPF_converged"]
assert np.isclose(net.res_cost, (net.res_gen.p_mw.values + net.res_gen.q_mvar.values))
def test_mixed_p_q_pwl():
vm_max = 1.05
vm_min = 0.95
# create net
net = pp.create_empty_network()
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)
pp.create_gen(net, 1, p_mw=0.1, controllable=True, min_p_mw=0.005, max_p_mw=0.15, max_q_mvar=.05,
min_q_mvar=-.05)
pp.create_ext_grid(net, 0)
pp.create_load(net, 1, p_mw=0.02, controllable=False, max_q_mvar=.05, max_p_mw=0.1, min_p_mw=0.005,
min_q_mvar=-.05)
pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876,
c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,
max_loading_percent=100 * 690)
# testing some combinations
pp.create_pwl_cost(net, 0, "gen", [[-150, 150, 1]])
pp.create_pwl_cost(net, 0, "gen", [[-150, 150, 1]], power_type="q")
pp.runopp(net)
assert net["OPF_converged"]
assert np.allclose(net.res_cost, net.res_gen.p_mw.values + net.res_gen.q_mvar.values)
if __name__ == "__main__":
pytest.main([__file__, "-xs"])
| [
"numpy.allclose",
"numpy.isclose",
"pandapower.create_ext_grid",
"pandapower.create_empty_network",
"pandapower.create_line_from_parameters",
"pandapower.create_pwl_cost",
"pandapower.create_poly_cost",
"pandapower.create_load",
"pandapower.runopp",
"pandapower.create_gen",
"pytest.main",
"pan... | [((506, 531), 'pandapower.create_empty_network', 'pp.create_empty_network', ([], {}), '()\n', (529, 531), True, 'import pandapower as pp\n'), ((536, 602), 'pandapower.create_bus', 'pp.create_bus', (['net'], {'max_vm_pu': 'vm_max', 'min_vm_pu': 'vm_min', 'vn_kv': '(10.0)'}), '(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.0)\n', (549, 602), True, 'import pandapower as pp\n'), ((606, 671), 'pandapower.create_bus', 'pp.create_bus', (['net'], {'max_vm_pu': 'vm_max', 'min_vm_pu': 'vm_min', 'vn_kv': '(0.4)'}), '(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=0.4)\n', (619, 671), True, 'import pandapower as pp\n'), ((675, 796), 'pandapower.create_gen', 'pp.create_gen', (['net', '(1)'], {'p_mw': '(-0.1)', 'controllable': '(True)', 'min_p_mw': '(0.005)', 'max_p_mw': '(0.15)', 'max_q_mvar': '(0.05)', 'min_q_mvar': '(-0.05)'}), '(net, 1, p_mw=-0.1, controllable=True, min_p_mw=0.005,\n max_p_mw=0.15, max_q_mvar=0.05, min_q_mvar=-0.05)\n', (688, 796), True, 'import pandapower as pp\n'), ((813, 839), 'pandapower.create_ext_grid', 'pp.create_ext_grid', (['net', '(0)'], {}), '(net, 0)\n', (831, 839), True, 'import pandapower as pp\n'), ((844, 966), 'pandapower.create_load', 'pp.create_load', (['net', '(1)'], {'p_mw': '(0.02)', 'controllable': '(False)', 'max_q_mvar': '(0.05)', 'max_p_mw': '(0.1)', 'min_p_mw': '(0.005)', 'min_q_mvar': '(-0.05)'}), '(net, 1, p_mw=0.02, controllable=False, max_q_mvar=0.05,\n max_p_mw=0.1, min_p_mw=0.005, min_q_mvar=-0.05)\n', (858, 966), True, 'import pandapower as pp\n'), ((985, 1163), 'pandapower.create_line_from_parameters', 'pp.create_line_from_parameters', (['net', '(0)', '(1)', '(50)'], {'name': '"""line2"""', 'r_ohm_per_km': '(0.876)', 'c_nf_per_km': '(260.0)', 'max_i_ka': '(0.123)', 'x_ohm_per_km': '(0.1159876)', 'max_loading_percent': '(100 * 690)'}), "(net, 0, 1, 50, name='line2', r_ohm_per_km=\n 0.876, c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,\n max_loading_percent=100 * 690)\n", (1015, 1163), True, 'import pandapower as pp\n'), ((1262, 1314), 'pandapower.create_poly_cost', 'pp.create_poly_cost', (['net', '(0)', '"""gen"""'], {'cp1_eur_per_mw': '(1)'}), "(net, 0, 'gen', cp1_eur_per_mw=1)\n", (1281, 1314), True, 'import pandapower as pp\n'), ((1319, 1333), 'pandapower.runopp', 'pp.runopp', (['net'], {}), '(net)\n', (1328, 1333), True, 'import pandapower as pp\n'), ((1377, 1429), 'numpy.isclose', 'np.isclose', (['net.res_cost', 'net.res_gen.p_mw.values[0]'], {}), '(net.res_cost, net.res_gen.p_mw.values[0])\n', (1387, 1429), True, 'import numpy as np\n'), ((1522, 1536), 'pandapower.runopp', 'pp.runopp', (['net'], {}), '(net)\n', (1531, 1536), True, 'import pandapower as pp\n'), ((1580, 1634), 'numpy.isclose', 'np.isclose', (['net.res_cost', '(net.res_gen.p_mw.values ** 2)'], {}), '(net.res_cost, net.res_gen.p_mw.values ** 2)\n', (1590, 1634), True, 'import numpy as np\n'), ((1674, 1688), 'pandapower.runopp', 'pp.runopp', (['net'], {}), '(net)\n', (1683, 1688), True, 'import pandapower as pp\n'), ((1732, 1790), 'numpy.isclose', 'np.isclose', (['net.res_cost', '(net.res_gen.p_mw.values ** 2 + 1)'], {}), '(net.res_cost, net.res_gen.p_mw.values ** 2 + 1)\n', (1742, 1790), True, 'import numpy as np\n'), ((1833, 1847), 'pandapower.runopp', 'pp.runopp', (['net'], {}), '(net)\n', (1842, 1847), True, 'import pandapower as pp\n'), ((1859, 1917), 'numpy.isclose', 'np.isclose', (['net.res_cost', '(net.res_gen.p_mw.values ** 2 + 1)'], {}), '(net.res_cost, net.res_gen.p_mw.values ** 2 + 1)\n', (1869, 1917), True, 'import numpy as np\n'), ((2019, 2115), 'pandapower.create_pwl_cost', 'pp.create_pwl_cost', (['net', '(0)', '"""ext_grid"""', '[[-1000, 0, -2000], [0, 1000, 2000]]'], {'power_type': '"""p"""'}), "(net, 0, 'ext_grid', [[-1000, 0, -2000], [0, 1000, 2000]],\n power_type='p')\n", (2037, 2115), True, 'import pandapower as pp\n'), ((2207, 2221), 'pandapower.runopp', 'pp.runopp', (['net'], {}), '(net)\n', (2216, 2221), True, 'import pandapower as pp\n'), ((2233, 2292), 'numpy.isclose', 'np.isclose', (['net.res_ext_grid.p_mw.values[0]', '(0)'], {'atol': '(0.0001)'}), '(net.res_ext_grid.p_mw.values[0], 0, atol=0.0001)\n', (2243, 2292), True, 'import numpy as np\n'), ((2302, 2373), 'numpy.isclose', 'np.isclose', (['net.res_cost', '(net.res_gen.p_mw.values[0] * 1000)'], {'atol': '(0.001)'}), '(net.res_cost, net.res_gen.p_mw.values[0] * 1000, atol=0.001)\n', (2312, 2373), True, 'import numpy as np\n'), ((2463, 2488), 'pandapower.create_empty_network', 'pp.create_empty_network', ([], {}), '()\n', (2486, 2488), True, 'import pandapower as pp\n'), ((2493, 2559), 'pandapower.create_bus', 'pp.create_bus', (['net'], {'max_vm_pu': 'vm_max', 'min_vm_pu': 'vm_min', 'vn_kv': '(10.0)'}), '(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.0)\n', (2506, 2559), True, 'import pandapower as pp\n'), ((2563, 2628), 'pandapower.create_bus', 'pp.create_bus', (['net'], {'max_vm_pu': 'vm_max', 'min_vm_pu': 'vm_min', 'vn_kv': '(0.4)'}), '(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=0.4)\n', (2576, 2628), True, 'import pandapower as pp\n'), ((2632, 2753), 'pandapower.create_gen', 'pp.create_gen', (['net', '(1)'], {'p_mw': '(0.1)', 'controllable': '(True)', 'min_p_mw': '(0.005)', 'max_p_mw': '(0.15)', 'max_q_mvar': '(0.05)', 'min_q_mvar': '(-0.05)'}), '(net, 1, p_mw=0.1, controllable=True, min_p_mw=0.005, max_p_mw\n =0.15, max_q_mvar=0.05, min_q_mvar=-0.05)\n', (2645, 2753), True, 'import pandapower as pp\n'), ((2769, 2795), 'pandapower.create_ext_grid', 'pp.create_ext_grid', (['net', '(0)'], {}), '(net, 0)\n', (2787, 2795), True, 'import pandapower as pp\n'), ((2800, 2922), 'pandapower.create_load', 'pp.create_load', (['net', '(1)'], {'p_mw': '(0.02)', 'controllable': '(False)', 'max_q_mvar': '(0.05)', 'max_p_mw': '(0.1)', 'min_p_mw': '(0.005)', 'min_q_mvar': '(-0.05)'}), '(net, 1, p_mw=0.02, controllable=False, max_q_mvar=0.05,\n max_p_mw=0.1, min_p_mw=0.005, min_q_mvar=-0.05)\n', (2814, 2922), True, 'import pandapower as pp\n'), ((2941, 3119), 'pandapower.create_line_from_parameters', 'pp.create_line_from_parameters', (['net', '(0)', '(1)', '(50)'], {'name': '"""line2"""', 'r_ohm_per_km': '(0.876)', 'c_nf_per_km': '(260.0)', 'max_i_ka': '(0.123)', 'x_ohm_per_km': '(0.1159876)', 'max_loading_percent': '(100 * 690)'}), "(net, 0, 1, 50, name='line2', r_ohm_per_km=\n 0.876, c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,\n max_loading_percent=100 * 690)\n", (2971, 3119), True, 'import pandapower as pp\n'), ((3218, 3290), 'pandapower.create_poly_cost', 'pp.create_poly_cost', (['net', '(0)', '"""gen"""'], {'cp1_eur_per_mw': '(1)', 'cq1_eur_per_mvar': '(1)'}), "(net, 0, 'gen', cp1_eur_per_mw=1, cq1_eur_per_mvar=1)\n", (3237, 3290), True, 'import pandapower as pp\n'), ((3295, 3309), 'pandapower.runopp', 'pp.runopp', (['net'], {}), '(net)\n', (3304, 3309), True, 'import pandapower as pp\n'), ((3353, 3430), 'numpy.isclose', 'np.isclose', (['net.res_cost', '(net.res_gen.p_mw.values + net.res_gen.q_mvar.values)'], {}), '(net.res_cost, net.res_gen.p_mw.values + net.res_gen.q_mvar.values)\n', (3363, 3430), True, 'import numpy as np\n'), ((3525, 3550), 'pandapower.create_empty_network', 'pp.create_empty_network', ([], {}), '()\n', (3548, 3550), True, 'import pandapower as pp\n'), ((3555, 3621), 'pandapower.create_bus', 'pp.create_bus', (['net'], {'max_vm_pu': 'vm_max', 'min_vm_pu': 'vm_min', 'vn_kv': '(10.0)'}), '(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.0)\n', (3568, 3621), True, 'import pandapower as pp\n'), ((3625, 3690), 'pandapower.create_bus', 'pp.create_bus', (['net'], {'max_vm_pu': 'vm_max', 'min_vm_pu': 'vm_min', 'vn_kv': '(0.4)'}), '(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=0.4)\n', (3638, 3690), True, 'import pandapower as pp\n'), ((3694, 3815), 'pandapower.create_gen', 'pp.create_gen', (['net', '(1)'], {'p_mw': '(0.1)', 'controllable': '(True)', 'min_p_mw': '(0.005)', 'max_p_mw': '(0.15)', 'max_q_mvar': '(0.05)', 'min_q_mvar': '(-0.05)'}), '(net, 1, p_mw=0.1, controllable=True, min_p_mw=0.005, max_p_mw\n =0.15, max_q_mvar=0.05, min_q_mvar=-0.05)\n', (3707, 3815), True, 'import pandapower as pp\n'), ((3831, 3857), 'pandapower.create_ext_grid', 'pp.create_ext_grid', (['net', '(0)'], {}), '(net, 0)\n', (3849, 3857), True, 'import pandapower as pp\n'), ((3862, 3984), 'pandapower.create_load', 'pp.create_load', (['net', '(1)'], {'p_mw': '(0.02)', 'controllable': '(False)', 'max_q_mvar': '(0.05)', 'max_p_mw': '(0.1)', 'min_p_mw': '(0.005)', 'min_q_mvar': '(-0.05)'}), '(net, 1, p_mw=0.02, controllable=False, max_q_mvar=0.05,\n max_p_mw=0.1, min_p_mw=0.005, min_q_mvar=-0.05)\n', (3876, 3984), True, 'import pandapower as pp\n'), ((4002, 4180), 'pandapower.create_line_from_parameters', 'pp.create_line_from_parameters', (['net', '(0)', '(1)', '(50)'], {'name': '"""line2"""', 'r_ohm_per_km': '(0.876)', 'c_nf_per_km': '(260.0)', 'max_i_ka': '(0.123)', 'x_ohm_per_km': '(0.1159876)', 'max_loading_percent': '(100 * 690)'}), "(net, 0, 1, 50, name='line2', r_ohm_per_km=\n 0.876, c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,\n max_loading_percent=100 * 690)\n", (4032, 4180), True, 'import pandapower as pp\n'), ((4279, 4330), 'pandapower.create_pwl_cost', 'pp.create_pwl_cost', (['net', '(0)', '"""gen"""', '[[-150, 150, 1]]'], {}), "(net, 0, 'gen', [[-150, 150, 1]])\n", (4297, 4330), True, 'import pandapower as pp\n'), ((4335, 4402), 'pandapower.create_pwl_cost', 'pp.create_pwl_cost', (['net', '(0)', '"""gen"""', '[[-150, 150, 1]]'], {'power_type': '"""q"""'}), "(net, 0, 'gen', [[-150, 150, 1]], power_type='q')\n", (4353, 4402), True, 'import pandapower as pp\n'), ((4407, 4421), 'pandapower.runopp', 'pp.runopp', (['net'], {}), '(net)\n', (4416, 4421), True, 'import pandapower as pp\n'), ((4465, 4543), 'numpy.allclose', 'np.allclose', (['net.res_cost', '(net.res_gen.p_mw.values + net.res_gen.q_mvar.values)'], {}), '(net.res_cost, net.res_gen.p_mw.values + net.res_gen.q_mvar.values)\n', (4476, 4543), True, 'import numpy as np\n'), ((4577, 4607), 'pytest.main', 'pytest.main', (["[__file__, '-xs']"], {}), "([__file__, '-xs'])\n", (4588, 4607), False, 'import pytest\n')] |
import pyopenpose as op
import glob
from os.path import join
import cv2
import numpy as np
import tqdm
import json
from json import JSONEncoder
class NumpyArrayEncoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return JSONEncoder.default(self, obj)
source_dir = '/data/social_map/behave01'
output_file = '/data/social_map/list.txt'
model_folder = "/home/prota/Desktop/openpose/models"
filelist = glob.glob(join(source_dir, '*.png'))
filelist.sort()
### OPENPOSE PARAMS
params = dict()
params["model_folder"] = model_folder
params["face"] = False
params["hand"] = False
params["num_gpu"] = 1
params["num_gpu_start"] = 0
opWrapper = op.WrapperPython()
opWrapper.configure(params)
opWrapper.start()
with open(output_file, 'w') as of:
for i, f in enumerate(tqdm.tqdm(filelist, desc='Files to check if skeleton is present:')):
im = cv2.imread(f)
im_size = (im.shape[1], im.shape[0])
datum = op.Datum()
datum.cvInputData = im
opWrapper.emplaceAndPop([datum])
skeletal_coordinates = np.around(np.array(datum.poseKeypoints).tolist(), 2).tolist()
d = dict()
try:
d['file'] = f
d['n_skeletons'] = len(skeletal_coordinates)
# of.write('{} {} '.format(f, len(skeletal_coordinates)))
pos = list()
for ske in skeletal_coordinates:
heels = list()
lh = np.asarray(ske[21][:2], dtype=np.int32)
if lh.any() != 0:
heels.append(lh)
rh = np.asarray(ske[24][:2], dtype=np.int32)
if rh.any() != 0:
heels.append(rh)
av = [a.tolist() for a in heels]
if len(av) > 0:
av = np.mean(av, axis=0, dtype=np.int32)
# im = cv2.circle(im, av, 5, (255, 0, 0))
# cv2.imshow('image', im)
# cv2.waitKey(0)
pos.append(av.tolist())
d['skels'] = pos
except TypeError:
continue
j = json.dumps(d)
of.write(j + '\n')
of.flush()
| [
"numpy.mean",
"json.JSONEncoder.default",
"tqdm.tqdm",
"os.path.join",
"json.dumps",
"numpy.asarray",
"numpy.array",
"pyopenpose.Datum",
"pyopenpose.WrapperPython",
"cv2.imread"
] | [((717, 735), 'pyopenpose.WrapperPython', 'op.WrapperPython', ([], {}), '()\n', (733, 735), True, 'import pyopenpose as op\n'), ((490, 515), 'os.path.join', 'join', (['source_dir', '"""*.png"""'], {}), "(source_dir, '*.png')\n", (494, 515), False, 'from os.path import join\n'), ((299, 329), 'json.JSONEncoder.default', 'JSONEncoder.default', (['self', 'obj'], {}), '(self, obj)\n', (318, 329), False, 'from json import JSONEncoder\n'), ((844, 910), 'tqdm.tqdm', 'tqdm.tqdm', (['filelist'], {'desc': '"""Files to check if skeleton is present:"""'}), "(filelist, desc='Files to check if skeleton is present:')\n", (853, 910), False, 'import tqdm\n'), ((926, 939), 'cv2.imread', 'cv2.imread', (['f'], {}), '(f)\n', (936, 939), False, 'import cv2\n'), ((1001, 1011), 'pyopenpose.Datum', 'op.Datum', ([], {}), '()\n', (1009, 1011), True, 'import pyopenpose as op\n'), ((2147, 2160), 'json.dumps', 'json.dumps', (['d'], {}), '(d)\n', (2157, 2160), False, 'import json\n'), ((1485, 1524), 'numpy.asarray', 'np.asarray', (['ske[21][:2]'], {'dtype': 'np.int32'}), '(ske[21][:2], dtype=np.int32)\n', (1495, 1524), True, 'import numpy as np\n'), ((1617, 1656), 'numpy.asarray', 'np.asarray', (['ske[24][:2]'], {'dtype': 'np.int32'}), '(ske[24][:2], dtype=np.int32)\n', (1627, 1656), True, 'import numpy as np\n'), ((1834, 1869), 'numpy.mean', 'np.mean', (['av'], {'axis': '(0)', 'dtype': 'np.int32'}), '(av, axis=0, dtype=np.int32)\n', (1841, 1869), True, 'import numpy as np\n'), ((1126, 1155), 'numpy.array', 'np.array', (['datum.poseKeypoints'], {}), '(datum.poseKeypoints)\n', (1134, 1155), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 20 19:55:19 2019
@author: virati
Main script for forward modeling
"""
from DBSpace.control import proc_dEEG
import DBSpace as dbo
from DBSpace.visualizations import EEG_Viz
from DBSpace.control.TVB_DTI import DTI_support_model, plot_support_model, plot_EEG_masks
import scipy.stats as stats
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_context('paper')
sns.set(font_scale=3)
sns.set_style('white')
import mayavi
import mayavi.mlab as mlab
#assert(mayavi.__version__ == '4.7.1')
import pickle
import cmocean
plt.close('all')
mlab.close(all=True)
#%%
pt_list = ['906','907','908']
condit = 'OnT'
#%%
## Basic initialization methods, need to suppress figures from these and clean these up
eFrame = proc_dEEG.proc_dEEG(pts=pt_list,procsteps='conservative',condits=[condit])
eFrame.standard_pipeline()
#%%
eFrame.OnT_ctrl_dyn(condit=condit)
#%%
#The feature vector, in this case the frequencies
fvect = np.linspace(0,500,513)
do_coherence = False
#%%
# Here we do the forward modeling to do network dissection
#eFrame.pool_patients()
for band in ['Alpha']:
for pt in ['906']:
#30, 25 is good
EEG_support = DTI_support_model(pt,4,dti_parcel_thresh=20,eeg_thresh=50) #15,55 work
plot_support_model(EEG_support,pt,layers=[1,0,0])
plot_EEG_masks(EEG_support)
eFrame.support_analysis(support_struct=EEG_support,condit=condit,pt=pt,band=band,voltage=str(3)) | [
"seaborn.set",
"DBSpace.control.TVB_DTI.plot_support_model",
"seaborn.set_context",
"DBSpace.control.TVB_DTI.plot_EEG_masks",
"seaborn.set_style",
"matplotlib.pyplot.close",
"mayavi.mlab.close",
"numpy.linspace",
"DBSpace.control.TVB_DTI.DTI_support_model",
"DBSpace.control.proc_dEEG.proc_dEEG"
] | [((437, 461), 'seaborn.set_context', 'sns.set_context', (['"""paper"""'], {}), "('paper')\n", (452, 461), True, 'import seaborn as sns\n'), ((462, 483), 'seaborn.set', 'sns.set', ([], {'font_scale': '(3)'}), '(font_scale=3)\n', (469, 483), True, 'import seaborn as sns\n'), ((484, 506), 'seaborn.set_style', 'sns.set_style', (['"""white"""'], {}), "('white')\n", (497, 506), True, 'import seaborn as sns\n'), ((620, 636), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (629, 636), True, 'import matplotlib.pyplot as plt\n'), ((637, 657), 'mayavi.mlab.close', 'mlab.close', ([], {'all': '(True)'}), '(all=True)\n', (647, 657), True, 'import mayavi.mlab as mlab\n'), ((809, 885), 'DBSpace.control.proc_dEEG.proc_dEEG', 'proc_dEEG.proc_dEEG', ([], {'pts': 'pt_list', 'procsteps': '"""conservative"""', 'condits': '[condit]'}), "(pts=pt_list, procsteps='conservative', condits=[condit])\n", (828, 885), False, 'from DBSpace.control import proc_dEEG\n'), ((1012, 1036), 'numpy.linspace', 'np.linspace', (['(0)', '(500)', '(513)'], {}), '(0, 500, 513)\n', (1023, 1036), True, 'import numpy as np\n'), ((1236, 1297), 'DBSpace.control.TVB_DTI.DTI_support_model', 'DTI_support_model', (['pt', '(4)'], {'dti_parcel_thresh': '(20)', 'eeg_thresh': '(50)'}), '(pt, 4, dti_parcel_thresh=20, eeg_thresh=50)\n', (1253, 1297), False, 'from DBSpace.control.TVB_DTI import DTI_support_model, plot_support_model, plot_EEG_masks\n'), ((1315, 1368), 'DBSpace.control.TVB_DTI.plot_support_model', 'plot_support_model', (['EEG_support', 'pt'], {'layers': '[1, 0, 0]'}), '(EEG_support, pt, layers=[1, 0, 0])\n', (1333, 1368), False, 'from DBSpace.control.TVB_DTI import DTI_support_model, plot_support_model, plot_EEG_masks\n'), ((1375, 1402), 'DBSpace.control.TVB_DTI.plot_EEG_masks', 'plot_EEG_masks', (['EEG_support'], {}), '(EEG_support)\n', (1389, 1402), False, 'from DBSpace.control.TVB_DTI import DTI_support_model, plot_support_model, plot_EEG_masks\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.