code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import awkward as ak
import numpy as np
import pytest
from pytest_lazyfixture import lazy_fixture
from fast_carpenter.testing import FakeBEEvent
import fast_carpenter.tree_adapter as tree_adapter
from fast_carpenter.tree_adapter import ArrayMethods
###############################################################################
# Uproot3 tests
###############################################################################
@pytest.fixture
def uproot3_adapter(uproot3_tree):
return tree_adapter.create({"adapter": "uproot3", "tree": uproot3_tree})
def test_uproot3_num_entries(uproot3_tree, uproot3_adapter):
assert uproot3_adapter.num_entries == uproot3_tree.numentries
def test_uproot3_getitem(uproot3_tree, uproot3_adapter):
assert ak.all(uproot3_adapter["Muon_Py"] == uproot3_tree["Muon_Py"].array())
###############################################################################
# Uproot4 tests
###############################################################################
@pytest.fixture
def uproot4_adapter(uproot4_tree):
return tree_adapter.create({"adapter": "uproot4", "tree": uproot4_tree})
@pytest.fixture
def uproot4_ranged_adapter(uproot4_tree, event_range):
return tree_adapter.create_ranged(
{
"adapter": "uproot4",
"tree": uproot4_tree,
"start": event_range.start_entry,
"stop": event_range.stop_entry
}
)
@pytest.fixture
def uproot4_masked_adapter(uproot4_tree, event_range):
return tree_adapter.create_masked(
{
"adapter": "uproot4", "tree": uproot4_tree,
"start": event_range.start_entry, "stop": event_range.stop_entry,
"mask": [(i % 2) == 0 for i in range(event_range.start_entry, event_range.stop_entry)]
}
)
def test_uproot4_num_entries(uproot4_tree, uproot4_adapter):
assert uproot4_adapter.num_entries == uproot4_tree.num_entries
def test_uproot4_getitem(uproot4_tree, uproot4_adapter):
assert ak.all(uproot4_adapter["Muon_Py"] == uproot4_tree["Muon_Py"].array())
def test_uproot4_evaluate(uproot4_tree, uproot4_adapter):
result = uproot4_adapter.evaluate("Muon_Py * NMuon")
assert ak.num(result, axis=0) == ak.num(uproot4_tree["Muon_Py"].array(), axis=0)
def test_uproot4_range(uproot4_tree, uproot4_ranged_adapter, event_range):
assert uproot4_ranged_adapter.num_entries == event_range.entries_in_block
def test_uproot4_add_retrieve(uproot4_tree, uproot4_ranged_adapter):
muon_px = uproot4_ranged_adapter["Muon_Px"]
assert ArrayMethods.filtered_len(muon_px) == len(uproot4_ranged_adapter)
muon_py, muon_pz = uproot4_ranged_adapter.arrays(["Muon_Py", "Muon_Pz"], how=tuple)
muon_momentum = np.hypot(muon_py, muon_pz)
uproot4_ranged_adapter.new_variable("Muon_momentum", muon_momentum)
retrieve_momentum = uproot4_ranged_adapter["Muon_momentum"]
assert len(retrieve_momentum) == len(muon_momentum)
assert ak.all(ak.flatten(retrieve_momentum) == ak.flatten(muon_momentum))
def test_overwrite(uproot4_ranged_adapter):
muon_px = uproot4_ranged_adapter["Muon_Px"]
assert ("Muon_Px" in uproot4_ranged_adapter)
with pytest.raises(ValueError) as err:
uproot4_ranged_adapter.new_variable("Muon_Px", muon_px / muon_px)
assert "Muon_Px" in str(err)
def test_to_pandas(full_wrapped_tree):
chunk = FakeBEEvent(full_wrapped_tree, "mc")
inputs = ['Electron_Px', 'Electron_Py', 'EventWeight']
df = ArrayMethods.to_pandas(chunk.tree, inputs)
assert list(df.keys()) == inputs
def test_arraydict_to_pandas_with_new_variable(uproot4_ranged_adapter):
muon_py, muon_pz = uproot4_ranged_adapter.arrays(["Muon_Py", "Muon_Pz"], how=tuple)
muon_momentum = np.hypot(muon_py, muon_pz)
uproot4_ranged_adapter.new_variable("Muon_momentum", muon_momentum)
inputs = ['Muon_Py', 'Muon_Pz', 'Muon_momentum']
arrays = {
'Muon_Py': muon_py,
'Muon_Pz': muon_pz,
'Muon_momentum': muon_momentum,
}
df = ArrayMethods.arraydict_to_pandas(arrays)
assert list(df.keys()) == inputs
assert len(df) == ak.count_nonzero(muon_py)
@pytest.mark.parametrize(
"tree_under_test",
[
lazy_fixture("uproot4_adapter"),
lazy_fixture("uproot4_ranged_adapter"),
lazy_fixture("uproot4_masked_adapter"),
]
)
def test_to_pandas_with_new_variable(tree_under_test):
muon_py, muon_pz = tree_under_test.arrays(["Muon_Py", "Muon_Pz"], how=tuple)
muon_momentum = np.hypot(muon_py, muon_pz)
assert len(muon_momentum) == len(muon_py)
tree_under_test.new_variable("Muon_momentum", muon_momentum)
inputs = ['Muon_Py', 'Muon_Pz', 'Muon_momentum']
df = ArrayMethods.to_pandas(tree_under_test, inputs)
assert list(df.keys()) == inputs
assert len(df) == ak.count_nonzero(muon_py)
@pytest.mark.parametrize(
"tree_under_test, how",
[
(lazy_fixture("uproot4_adapter"), tuple),
(lazy_fixture("uproot4_adapter"), list),
(lazy_fixture("uproot4_ranged_adapter"), tuple),
(lazy_fixture("uproot4_ranged_adapter"), list),
(lazy_fixture("uproot4_masked_adapter"), tuple),
(lazy_fixture("uproot4_masked_adapter"), list),
]
)
def test_arrays_to_tuple_or_list(tree_under_test, how):
muon_py, muon_pz = tree_under_test.arrays(["Muon_Py", "Muon_Pz"], how=tuple)
muon_momentum = np.hypot(muon_py, muon_pz)
tree_under_test.new_variable("Muon_momentum", muon_momentum)
_, _, muon_momentum_new = tree_under_test.arrays(["Muon_Py", "Muon_Pz", "Muon_momentum"], how=how)
assert ak.all(muon_momentum_new == muon_momentum)
@pytest.mark.parametrize(
"tree_under_test",
[
lazy_fixture("uproot4_adapter"),
lazy_fixture("uproot4_ranged_adapter"),
lazy_fixture("uproot4_masked_adapter"),
]
)
def test_arrays_to_dict(tree_under_test):
muon_py, muon_pz = tree_under_test.arrays(["Muon_Py", "Muon_Pz"], how=tuple)
muon_momentum = np.hypot(muon_py, muon_pz)
tree_under_test.new_variable("Muon_momentum", muon_momentum)
array_dict = tree_under_test.arrays(["Muon_Py", "Muon_Pz", "Muon_momentum"], how=dict)
assert ak.all(array_dict["Muon_momentum"] == muon_momentum)
@pytest.mark.parametrize(
"tree_under_test",
[
lazy_fixture("uproot4_adapter"),
lazy_fixture("uproot4_ranged_adapter"),
lazy_fixture("uproot4_masked_adapter"),
]
)
def test_arrays_as_np_lists(tree_under_test):
muon_py, muon_pz = tree_under_test.arrays(["Muon_Py", "Muon_Pz"], how=tuple)
muon_momentum = np.hypot(muon_py, muon_pz)
tree_under_test.new_variable("Muon_momentum", muon_momentum)
np_array = ArrayMethods.arrays_as_np_array(tree_under_test, ["Muon_Py", "Muon_Pz", "Muon_momentum"], how=dict)
assert ak.all(np_array[-1] == muon_momentum)
| [
"fast_carpenter.tree_adapter.ArrayMethods.to_pandas",
"awkward.count_nonzero",
"fast_carpenter.tree_adapter.ArrayMethods.filtered_len",
"pytest_lazyfixture.lazy_fixture",
"fast_carpenter.tree_adapter.ArrayMethods.arrays_as_np_array",
"fast_carpenter.tree_adapter.create",
"awkward.all",
"awkward.num",
... | [((492, 557), 'fast_carpenter.tree_adapter.create', 'tree_adapter.create', (["{'adapter': 'uproot3', 'tree': uproot3_tree}"], {}), "({'adapter': 'uproot3', 'tree': uproot3_tree})\n", (511, 557), True, 'import fast_carpenter.tree_adapter as tree_adapter\n'), ((1068, 1133), 'fast_carpenter.tree_adapter.create', 'tree_adapter.create', (["{'adapter': 'uproot4', 'tree': uproot4_tree}"], {}), "({'adapter': 'uproot4', 'tree': uproot4_tree})\n", (1087, 1133), True, 'import fast_carpenter.tree_adapter as tree_adapter\n'), ((1218, 1360), 'fast_carpenter.tree_adapter.create_ranged', 'tree_adapter.create_ranged', (["{'adapter': 'uproot4', 'tree': uproot4_tree, 'start': event_range.\n start_entry, 'stop': event_range.stop_entry}"], {}), "({'adapter': 'uproot4', 'tree': uproot4_tree,\n 'start': event_range.start_entry, 'stop': event_range.stop_entry})\n", (1244, 1360), True, 'import fast_carpenter.tree_adapter as tree_adapter\n'), ((2732, 2758), 'numpy.hypot', 'np.hypot', (['muon_py', 'muon_pz'], {}), '(muon_py, muon_pz)\n', (2740, 2758), True, 'import numpy as np\n'), ((3376, 3412), 'fast_carpenter.testing.FakeBEEvent', 'FakeBEEvent', (['full_wrapped_tree', '"""mc"""'], {}), "(full_wrapped_tree, 'mc')\n", (3387, 3412), False, 'from fast_carpenter.testing import FakeBEEvent\n'), ((3481, 3523), 'fast_carpenter.tree_adapter.ArrayMethods.to_pandas', 'ArrayMethods.to_pandas', (['chunk.tree', 'inputs'], {}), '(chunk.tree, inputs)\n', (3503, 3523), False, 'from fast_carpenter.tree_adapter import ArrayMethods\n'), ((3743, 3769), 'numpy.hypot', 'np.hypot', (['muon_py', 'muon_pz'], {}), '(muon_py, muon_pz)\n', (3751, 3769), True, 'import numpy as np\n'), ((4022, 4062), 'fast_carpenter.tree_adapter.ArrayMethods.arraydict_to_pandas', 'ArrayMethods.arraydict_to_pandas', (['arrays'], {}), '(arrays)\n', (4054, 4062), False, 'from fast_carpenter.tree_adapter import ArrayMethods\n'), ((4507, 4533), 'numpy.hypot', 'np.hypot', (['muon_py', 'muon_pz'], {}), '(muon_py, muon_pz)\n', (4515, 4533), True, 'import numpy as np\n'), ((4708, 4755), 'fast_carpenter.tree_adapter.ArrayMethods.to_pandas', 'ArrayMethods.to_pandas', (['tree_under_test', 'inputs'], {}), '(tree_under_test, inputs)\n', (4730, 4755), False, 'from fast_carpenter.tree_adapter import ArrayMethods\n'), ((5394, 5420), 'numpy.hypot', 'np.hypot', (['muon_py', 'muon_pz'], {}), '(muon_py, muon_pz)\n', (5402, 5420), True, 'import numpy as np\n'), ((5600, 5642), 'awkward.all', 'ak.all', (['(muon_momentum_new == muon_momentum)'], {}), '(muon_momentum_new == muon_momentum)\n', (5606, 5642), True, 'import awkward as ak\n'), ((5988, 6014), 'numpy.hypot', 'np.hypot', (['muon_py', 'muon_pz'], {}), '(muon_py, muon_pz)\n', (5996, 6014), True, 'import numpy as np\n'), ((6182, 6234), 'awkward.all', 'ak.all', (["(array_dict['Muon_momentum'] == muon_momentum)"], {}), "(array_dict['Muon_momentum'] == muon_momentum)\n", (6188, 6234), True, 'import awkward as ak\n'), ((6584, 6610), 'numpy.hypot', 'np.hypot', (['muon_py', 'muon_pz'], {}), '(muon_py, muon_pz)\n', (6592, 6610), True, 'import numpy as np\n'), ((6691, 6794), 'fast_carpenter.tree_adapter.ArrayMethods.arrays_as_np_array', 'ArrayMethods.arrays_as_np_array', (['tree_under_test', "['Muon_Py', 'Muon_Pz', 'Muon_momentum']"], {'how': 'dict'}), "(tree_under_test, ['Muon_Py', 'Muon_Pz',\n 'Muon_momentum'], how=dict)\n", (6722, 6794), False, 'from fast_carpenter.tree_adapter import ArrayMethods\n'), ((6802, 6839), 'awkward.all', 'ak.all', (['(np_array[-1] == muon_momentum)'], {}), '(np_array[-1] == muon_momentum)\n', (6808, 6839), True, 'import awkward as ak\n'), ((2198, 2220), 'awkward.num', 'ak.num', (['result'], {'axis': '(0)'}), '(result, axis=0)\n', (2204, 2220), True, 'import awkward as ak\n'), ((2557, 2591), 'fast_carpenter.tree_adapter.ArrayMethods.filtered_len', 'ArrayMethods.filtered_len', (['muon_px'], {}), '(muon_px)\n', (2582, 2591), False, 'from fast_carpenter.tree_adapter import ArrayMethods\n'), ((3182, 3207), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3195, 3207), False, 'import pytest\n'), ((4123, 4148), 'awkward.count_nonzero', 'ak.count_nonzero', (['muon_py'], {}), '(muon_py)\n', (4139, 4148), True, 'import awkward as ak\n'), ((4816, 4841), 'awkward.count_nonzero', 'ak.count_nonzero', (['muon_py'], {}), '(muon_py)\n', (4832, 4841), True, 'import awkward as ak\n'), ((4214, 4245), 'pytest_lazyfixture.lazy_fixture', 'lazy_fixture', (['"""uproot4_adapter"""'], {}), "('uproot4_adapter')\n", (4226, 4245), False, 'from pytest_lazyfixture import lazy_fixture\n'), ((4255, 4293), 'pytest_lazyfixture.lazy_fixture', 'lazy_fixture', (['"""uproot4_ranged_adapter"""'], {}), "('uproot4_ranged_adapter')\n", (4267, 4293), False, 'from pytest_lazyfixture import lazy_fixture\n'), ((4303, 4341), 'pytest_lazyfixture.lazy_fixture', 'lazy_fixture', (['"""uproot4_masked_adapter"""'], {}), "('uproot4_masked_adapter')\n", (4315, 4341), False, 'from pytest_lazyfixture import lazy_fixture\n'), ((5708, 5739), 'pytest_lazyfixture.lazy_fixture', 'lazy_fixture', (['"""uproot4_adapter"""'], {}), "('uproot4_adapter')\n", (5720, 5739), False, 'from pytest_lazyfixture import lazy_fixture\n'), ((5749, 5787), 'pytest_lazyfixture.lazy_fixture', 'lazy_fixture', (['"""uproot4_ranged_adapter"""'], {}), "('uproot4_ranged_adapter')\n", (5761, 5787), False, 'from pytest_lazyfixture import lazy_fixture\n'), ((5797, 5835), 'pytest_lazyfixture.lazy_fixture', 'lazy_fixture', (['"""uproot4_masked_adapter"""'], {}), "('uproot4_masked_adapter')\n", (5809, 5835), False, 'from pytest_lazyfixture import lazy_fixture\n'), ((6300, 6331), 'pytest_lazyfixture.lazy_fixture', 'lazy_fixture', (['"""uproot4_adapter"""'], {}), "('uproot4_adapter')\n", (6312, 6331), False, 'from pytest_lazyfixture import lazy_fixture\n'), ((6341, 6379), 'pytest_lazyfixture.lazy_fixture', 'lazy_fixture', (['"""uproot4_ranged_adapter"""'], {}), "('uproot4_ranged_adapter')\n", (6353, 6379), False, 'from pytest_lazyfixture import lazy_fixture\n'), ((6389, 6427), 'pytest_lazyfixture.lazy_fixture', 'lazy_fixture', (['"""uproot4_masked_adapter"""'], {}), "('uproot4_masked_adapter')\n", (6401, 6427), False, 'from pytest_lazyfixture import lazy_fixture\n'), ((2970, 2999), 'awkward.flatten', 'ak.flatten', (['retrieve_momentum'], {}), '(retrieve_momentum)\n', (2980, 2999), True, 'import awkward as ak\n'), ((3003, 3028), 'awkward.flatten', 'ak.flatten', (['muon_momentum'], {}), '(muon_momentum)\n', (3013, 3028), True, 'import awkward as ak\n'), ((4913, 4944), 'pytest_lazyfixture.lazy_fixture', 'lazy_fixture', (['"""uproot4_adapter"""'], {}), "('uproot4_adapter')\n", (4925, 4944), False, 'from pytest_lazyfixture import lazy_fixture\n'), ((4963, 4994), 'pytest_lazyfixture.lazy_fixture', 'lazy_fixture', (['"""uproot4_adapter"""'], {}), "('uproot4_adapter')\n", (4975, 4994), False, 'from pytest_lazyfixture import lazy_fixture\n'), ((5012, 5050), 'pytest_lazyfixture.lazy_fixture', 'lazy_fixture', (['"""uproot4_ranged_adapter"""'], {}), "('uproot4_ranged_adapter')\n", (5024, 5050), False, 'from pytest_lazyfixture import lazy_fixture\n'), ((5069, 5107), 'pytest_lazyfixture.lazy_fixture', 'lazy_fixture', (['"""uproot4_ranged_adapter"""'], {}), "('uproot4_ranged_adapter')\n", (5081, 5107), False, 'from pytest_lazyfixture import lazy_fixture\n'), ((5125, 5163), 'pytest_lazyfixture.lazy_fixture', 'lazy_fixture', (['"""uproot4_masked_adapter"""'], {}), "('uproot4_masked_adapter')\n", (5137, 5163), False, 'from pytest_lazyfixture import lazy_fixture\n'), ((5182, 5220), 'pytest_lazyfixture.lazy_fixture', 'lazy_fixture', (['"""uproot4_masked_adapter"""'], {}), "('uproot4_masked_adapter')\n", (5194, 5220), False, 'from pytest_lazyfixture import lazy_fixture\n')] |
# coding: utf8
import numpy as np
class FootTrajectoryGenerator:
"""A foot trajectory generator that handles the generation of a 3D trajectory
with a 5th order polynomial to lead each foot from its location at the start of
its swing phase to its final location that has been decided by the FootstepPlanner
:param shoulders: A 2 by 4 numpy array, the position of shoulders in local frame
:param dt: A float, time step of the contact sequence
"""
def __init__(self, dt):
# Position of shoulders in local frame
self.shoulders = np.array(
[[0.1946, 0.1946, -0.1946, -0.1946], [0.14695, -0.14695, 0.14695, -0.14695]])
# Time step of the trajectory generator
self.dt = dt
# Desired (x, y) position of footsteps without lock mechanism before impact
# Received from the FootstepPlanner
# self.footsteps = self.shoulders.copy()
# Desired (x, y) position of footsteps with lock mechanism before impact
R = np.array([[0.0, -1.0], [1.0, 0.0]])
self.footsteps_lock = R @ self.shoulders.copy()
# Desired footsteps with lock in world frame for visualisation purpose
self.footsteps_lock_world = self.footsteps_lock.copy()
# Desired position, velocity and acceleration of feet in 3D, in local frame
self.desired_pos = np.vstack((R @ self.shoulders, np.zeros((1, 4))))
self.desired_vel = np.zeros(self.desired_pos.shape)
self.desired_acc = np.zeros(self.desired_pos.shape)
# Desired 3D position in world frame for visualisation purpose
self.desired_pos_world = self.desired_pos.copy()
# Maximum height at which the robot should lift its feet during swing phase
self.max_height_feet = 0.02
# Lock target positions of footholds before touchdown
self.t_lock_before_touchdown = 0.01
# Foot trajectory generator objects (one for each foot)
self.ftgs = [Foot_trajectory_generator(
self.max_height_feet, self.t_lock_before_touchdown) for i in range(4)]
# Initialization of ftgs objects
for i in range(4):
self.ftgs[i].x1 = self.desired_pos[0, i]
self.ftgs[i].y1 = self.desired_pos[1, i]
self.flag_initialisation = False
def update_desired_feet_pos(self, sequencer, fstep_planner, mpc):
# Initialisation of rotation from local frame to world frame
c, s = np.cos(mpc.q_w[5, 0]), np.sin(mpc.q_w[5, 0])
R = np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]])
# Initialisation of trajectory parameters
x0 = 0.0
dx0 = 0.0
ddx0 = 0.0
y0 = 0.0
dy0 = 0.0
ddy0 = 0.0
z0 = 0.0
dz0 = 0.0
ddz0 = 0.0
# The swing phase lasts T seconds
t1 = sequencer.T_gait - sequencer.t_stance
# For each foot
for i in range(4):
# Time remaining before touchdown
index = (np.where(sequencer.S[:, i] == True))[0][0]
t0 = t1 - index * sequencer.dt
# Current position of the foot
x0 = self.desired_pos[0, i]
y0 = self.desired_pos[1, i]
# Target position of the foot
x1 = fstep_planner.footsteps[0, i]
y1 = fstep_planner.footsteps[1, i]
# Update if the foot is in swing phase or is going to leave the ground
if ((sequencer.S[0, i] == True) and (sequencer.S[1, i] == False)):
t0 = 0
if (t0 != t1) and (t0 != (t1 - sequencer.dt)):
# Get desired 3D position
[x0, dx0, ddx0, y0, dy0, ddy0, z0, dz0, ddz0, gx1, gy1] = (self.ftgs[i]).get_next_foot(
x0, self.desired_vel[0, i], self.desired_acc[0, i],
y0, self.desired_vel[1, i], self.desired_acc[1, i],
x1, y1, t0, t1, self.dt)
if self.flag_initialisation:
# Retrieve result in terms of position, velocity and acceleration
self.desired_pos[:, i] = np.array([x0, y0, z0])
self.desired_vel[:, i] = np.array([dx0, dy0, dz0])
self.desired_acc[:, i] = np.array([ddx0, ddy0, ddz0])
# Update target position of the foot with lock
self.footsteps_lock[:, i] = np.array([gx1, gy1])
# Update variables in world frame
self.desired_pos_world[:, i:(i+1)] = np.vstack((mpc.q_w[0:2, 0:1], np.zeros((1, 1)))) + \
np.dot(R, self.desired_pos[:, i:(i+1)])
self.footsteps_lock_world[:, i:(i+1)] = mpc.q_w[0:2, 0:1] + \
np.dot(R[0:2, 0:2], self.footsteps_lock[:, i:(i+1)])
else:
self.desired_vel[:, i] = np.array([0.0, 0.0, 0.0])
self.desired_acc[:, i] = np.array([0.0, 0.0, 0.0])
if not self.flag_initialisation:
self.flag_initialisation = True
return 0
def update_frame(self, vel):
"""As we are working in local frame, the footsteps drift backwards
if the trunk is moving forwards as footsteps are not supposed to move
in the world frame
Keyword arguments:
vel -- Current velocity vector of the flying base (6 by 1, linear and angular stacked)
"""
# Displacement along x and y
c, s = np.cos(- vel[5, 0] * self.dt), np.sin(- vel[5, 0] * self.dt)
R = np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]])
# Update desired 3D position
self.desired_pos = np.dot(R, self.desired_pos -
self.dt * np.vstack((np.tile(vel[0:2, 0:1], (1, 4)), np.zeros((1, 4)))))
# Update desired 2D location of footsteps
self.footsteps_lock = np.dot(R[0:2, 0:2], self.footsteps_lock
- self.dt * np.tile(vel[0:2, 0:1], (1, 4)))
return 0
def update_viewer(self, viewer, initialisation):
"""Update display for visualization purpose
Keyword arguments:
:param viewer: A gepetto viewer object
:param initialisation: A bool, is it the first iteration of the main loop
"""
# Display locked target footholds with red spheres (gepetto gui)
rgbt = [1.0, 0.0, 0.0, 0.5]
for i in range(4):
if initialisation:
viewer.gui.addSphere("world/sphere"+str(i)+"_lock", .025, rgbt) # .1 is the radius
viewer.gui.applyConfiguration("world/sphere"+str(i)+"_lock",
(self.footsteps_lock_world[0, i], self.footsteps_lock_world[1, i],
0.0, 1., 0., 0., 0.))
# Display desired 3D position of feet with magenta spheres (gepetto gui)
rgbt = [1.0, 0.0, 1.0, 0.5]
for i in range(4):
if initialisation:
viewer.gui.addSphere("world/sphere"+str(i)+"_des", .03, rgbt) # .1 is the radius
viewer.gui.applyConfiguration("world/sphere"+str(i)+"_des",
(self.desired_pos_world[0, i], self.desired_pos_world[1, i],
self.desired_pos_world[2, i], 1., 0., 0., 0.))
return 0
# @thomasfla's trajectory generator
class Foot_trajectory_generator(object):
'''This class provide adaptative 3d trajectory for a foot from (x0,y0) to (x1,y1) using polynoms
A foot trajectory generator that handles the generation of a 3D trajectory
with a 5th order polynomial to lead each foot from its location at the start of
its swing phase to its final location that has been decided by the FootstepPlanner
Args:
- h (float): the height at which feet should be raised at the apex of the wing phase
- time_adaptative_disabled (float): how much time before touchdown is the desired position locked
'''
def __init__(self, h=0.03, time_adaptative_disabled=0.200, x_init=0.0, y_init=0.0):
# maximum heigth for the z coordonate
self.h = h
# when there is less than this time for the trajectory to finish, disable adaptative (using last computed coefficients)
# this parameter should always be a positive number less than the durration of a step
self.time_adaptative_disabled = time_adaptative_disabled
# memory of the last coeffs
self.lastCoeffs_x = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
self.lastCoeffs_y = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
# self.lastCoeffs = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
# 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
self.x1 = x_init
self.y1 = y_init
# express acceleration as: ddx0 = (coeff_acc_x_lin_a) * x1 + coeff_acc_x_lin_b
# ddy0 = (coeff_acc_y_lin_a) * x1 + coeff_acc_y_lin_b
# Remark : When the trajectory becomes non-adaptative coeff_acc_x_lin_a is = 0.0 and coeff_acc_y_lin_b contains the full information of the acceleration!
#self.coeff_acc_x_lin_a = 0.0
#self.coeff_acc_x_lin_b = 0.0
#self.coeff_acc_y_lin_a = 0.0
#self.coeff_acc_y_lin_b = 0.0
def get_next_foot(self, x0, dx0, ddx0, y0, dy0, ddy0, x1, y1, t0, t1, dt):
'''how to reach a foot position (here using polynomials profiles)'''
epsilon = 0.00
t2 = t1
t3 = t0
t1 -= 2*epsilon
t0 -= epsilon
h = self.h
adaptative_mode = (t1 - t0) > self.time_adaptative_disabled
if(adaptative_mode):
# compute polynoms coefficients for x and y
Ax5 = (ddx0*t0**2 - 2*ddx0*t0*t1 - 6*dx0*t0 + ddx0*t1**2 + 6*dx0*t1 + 12 *
x0 - 12*x1)/(2*(t0 - t1)**2*(t0**3 - 3*t0**2*t1 + 3*t0*t1**2 - t1**3))
Ax4 = (30*t0*x1 - 30*t0*x0 - 30*t1*x0 + 30*t1*x1 - 2*t0**3*ddx0 - 3*t1**3*ddx0 + 14*t0**2*dx0 - 16*t1**2*dx0 +
2*t0*t1*dx0 + 4*t0*t1**2*ddx0 + t0**2*t1*ddx0)/(2*(t0 - t1)**2*(t0**3 - 3*t0**2*t1 + 3*t0*t1**2 - t1**3))
Ax3 = (t0**4*ddx0 + 3*t1**4*ddx0 - 8*t0**3*dx0 + 12*t1**3*dx0 + 20*t0**2*x0 - 20*t0**2*x1 + 20*t1**2*x0 - 20*t1**2*x1 + 80*t0*t1*x0 - 80*t0 *
t1*x1 + 4*t0**3*t1*ddx0 + 28*t0*t1**2*dx0 - 32*t0**2*t1*dx0 - 8*t0**2*t1**2*ddx0)/(2*(t0 - t1)**2*(t0**3 - 3*t0**2*t1 + 3*t0*t1**2 - t1**3))
Ax2 = -(t1**5*ddx0 + 4*t0*t1**4*ddx0 + 3*t0**4*t1*ddx0 + 36*t0*t1**3*dx0 - 24*t0**3*t1*dx0 + 60*t0*t1**2*x0 + 60*t0**2*t1*x0 - 60*t0*t1 **
2*x1 - 60*t0**2*t1*x1 - 8*t0**2*t1**3*ddx0 - 12*t0**2*t1**2*dx0)/(2*(t0**2 - 2*t0*t1 + t1**2)*(t0**3 - 3*t0**2*t1 + 3*t0*t1**2 - t1**3))
Ax1 = -(2*t1**5*dx0 - 2*t0*t1**5*ddx0 - 10*t0*t1**4*dx0 + t0**2*t1**4*ddx0 + 4*t0**3*t1**3*ddx0 - 3*t0**4*t1**2*ddx0 - 16*t0**2 *
t1**3*dx0 + 24*t0**3*t1**2*dx0 - 60*t0**2*t1**2*x0 + 60*t0**2*t1**2*x1)/(2*(t0 - t1)**2*(t0**3 - 3*t0**2*t1 + 3*t0*t1**2 - t1**3))
Ax0 = (2*x1*t0**5 - ddx0*t0**4*t1**3 - 10*x1*t0**4*t1 + 2*ddx0*t0**3*t1**4 + 8*dx0*t0**3*t1**3 + 20*x1*t0**3*t1**2 - ddx0*t0**2*t1**5 - 10*dx0*t0 **
2*t1**4 - 20*x0*t0**2*t1**3 + 2*dx0*t0*t1**5 + 10*x0*t0*t1**4 - 2*x0*t1**5)/(2*(t0**2 - 2*t0*t1 + t1**2)*(t0**3 - 3*t0**2*t1 + 3*t0*t1**2 - t1**3))
Ay5 = (ddy0*t0**2 - 2*ddy0*t0*t1 - 6*dy0*t0 + ddy0*t1**2 + 6*dy0*t1 + 12 *
y0 - 12*y1)/(2*(t0 - t1)**2*(t0**3 - 3*t0**2*t1 + 3*t0*t1**2 - t1**3))
Ay4 = (30*t0*y1 - 30*t0*y0 - 30*t1*y0 + 30*t1*y1 - 2*t0**3*ddy0 - 3*t1**3*ddy0 + 14*t0**2*dy0 - 16*t1**2*dy0 +
2*t0*t1*dy0 + 4*t0*t1**2*ddy0 + t0**2*t1*ddy0)/(2*(t0 - t1)**2*(t0**3 - 3*t0**2*t1 + 3*t0*t1**2 - t1**3))
Ay3 = (t0**4*ddy0 + 3*t1**4*ddy0 - 8*t0**3*dy0 + 12*t1**3*dy0 + 20*t0**2*y0 - 20*t0**2*y1 + 20*t1**2*y0 - 20*t1**2*y1 + 80*t0*t1*y0 - 80*t0 *
t1*y1 + 4*t0**3*t1*ddy0 + 28*t0*t1**2*dy0 - 32*t0**2*t1*dy0 - 8*t0**2*t1**2*ddy0)/(2*(t0 - t1)**2*(t0**3 - 3*t0**2*t1 + 3*t0*t1**2 - t1**3))
Ay2 = -(t1**5*ddy0 + 4*t0*t1**4*ddy0 + 3*t0**4*t1*ddy0 + 36*t0*t1**3*dy0 - 24*t0**3*t1*dy0 + 60*t0*t1**2*y0 + 60*t0**2*t1*y0 - 60*t0*t1 **
2*y1 - 60*t0**2*t1*y1 - 8*t0**2*t1**3*ddy0 - 12*t0**2*t1**2*dy0)/(2*(t0**2 - 2*t0*t1 + t1**2)*(t0**3 - 3*t0**2*t1 + 3*t0*t1**2 - t1**3))
Ay1 = -(2*t1**5*dy0 - 2*t0*t1**5*ddy0 - 10*t0*t1**4*dy0 + t0**2*t1**4*ddy0 + 4*t0**3*t1**3*ddy0 - 3*t0**4*t1**2*ddy0 - 16*t0**2 *
t1**3*dy0 + 24*t0**3*t1**2*dy0 - 60*t0**2*t1**2*y0 + 60*t0**2*t1**2*y1)/(2*(t0 - t1)**2*(t0**3 - 3*t0**2*t1 + 3*t0*t1**2 - t1**3))
Ay0 = (2*y1*t0**5 - ddy0*t0**4*t1**3 - 10*y1*t0**4*t1 + 2*ddy0*t0**3*t1**4 + 8*dy0*t0**3*t1**3 + 20*y1*t0**3*t1**2 - ddy0*t0**2*t1**5 - 10*dy0*t0 **
2*t1**4 - 20*y0*t0**2*t1**3 + 2*dy0*t0*t1**5 + 10*y0*t0*t1**4 - 2*y0*t1**5)/(2*(t0**2 - 2*t0*t1 + t1**2)*(t0**3 - 3*t0**2*t1 + 3*t0*t1**2 - t1**3))
# den = (2*(t0 - t1)**2*(t0**3 - 3*t0**2*t1 + 3*t0*t1**2 - t1**3))
# We are more interested in the expression of coefficients as linear fonction of the final position (x1,y1)
# in fact: Ax5 = cx5*x1 + dx5
# Ax4 = cx4*x1 + dx4
# Ax3 = cx3*x1 + dx3
# Ax2 = cx2*x1 + dx2
# Ax1 = cx1*x1 + dx1
# Ax0 = cx0*x1 + dx0
# Same for Ay5..Ay0
"""cx5 = (-12)/den
dx5 = (ddx0*t0**2 - 2*ddx0*t0*t1 - 6*dx0*t0 + ddx0*t1**2 + 6*dx0*t1 + 12*x0)/den
cx4 = (30*t0 + 30*t1)/den
dx4 = (- 30*t0*x0 - 30*t1*x0 - 2*t0**3*ddx0 - 3*t1**3*ddx0 + 14*t0**2*dx0 -
16*t1**2*dx0 + 2*t0*t1*dx0 + 4*t0*t1**2*ddx0 + t0**2*t1*ddx0)/den
cx3 = (-20*t0**2 - 20*t1**2 - 80*t0*t1)/den
dx3 = (t0**4*ddx0 + 3*t1**4*ddx0 - 8*t0**3*dx0 + 12*t1**3*dx0 + 20*t0**2*x0 + 20*t1**2*x0 + 80 *
t0*t1*x0 + 4*t0**3*t1*ddx0 + 28*t0*t1**2*dx0 - 32*t0**2*t1*dx0 - 8*t0**2*t1**2*ddx0)/den
cx2 = -(- 60*t0*t1**2 - 60*t0**2*t1)/den
dx2 = -(t1**5*ddx0 + 4*t0*t1**4*ddx0 + 3*t0**4*t1*ddx0 + 36*t0*t1**3*dx0 - 24*t0**3*t1 *
dx0 + 60*t0*t1**2*x0 + 60*t0**2*t1*x0 - 8*t0**2*t1**3*ddx0 - 12*t0**2*t1**2*dx0)/den
cx1 = -(60*t0**2*t1**2)/den
dx1 = -(2*t1**5*dx0 - 2*t0*t1**5*ddx0 - 10*t0*t1**4*dx0 + t0**2*t1**4*ddx0 + 4*t0**3*t1**3*ddx0 -
3*t0**4*t1**2*ddx0 - 16*t0**2*t1**3*dx0 + 24*t0**3*t1**2*dx0 - 60*t0**2*t1**2*x0)/den
cx0 = (20*t0**3*t1**2 + 2*t0**5 - 10*t0**4*t1) / den
dx0 = (- ddx0*t0**4*t1**3 + 2*ddx0*t0**3*t1**4 + 8*dx0*t0**3*t1**3 - ddx0*t0**2*t1**5 - 10 *
dx0*t0**2*t1**4 - 20*x0*t0**2*t1**3 + 2*dx0*t0*t1**5 + 10*x0*t0*t1**4 - 2*x0*t1**5)/den
cy5 = (-12)/den
dy5 = (ddy0*t0**2 - 2*ddy0*t0*t1 - 6*dy0*t0 + ddy0*t1**2 + 6*dy0*t1 + 12*y0)/den
cy4 = (30*t0 + 30*t1)/den
dy4 = (- 30*t0*y0 - 30*t1*y0 - 2*t0**3*ddy0 - 3*t1**3*ddy0 + 14*t0**2*dy0 -
16*t1**2*dy0 + 2*t0*t1*dy0 + 4*t0*t1**2*ddy0 + t0**2*t1*ddy0)/den
cy3 = (-20*t0**2 - 20*t1**2 - 80*t0*t1)/den
dy3 = (t0**4*ddy0 + 3*t1**4*ddy0 - 8*t0**3*dy0 + 12*t1**3*dy0 + 20*t0**2*y0 + 20*t1**2*y0 + 80 *
t0*t1*y0 + 4*t0**3*t1*ddy0 + 28*t0*t1**2*dy0 - 32*t0**2*t1*dy0 - 8*t0**2*t1**2*ddy0)/den
cy2 = -(- 60*t0*t1**2 - 60*t0**2*t1)/den
dy2 = -(t1**5*ddy0 + 4*t0*t1**4*ddy0 + 3*t0**4*t1*ddy0 + 36*t0*t1**3*dy0 - 24*t0**3*t1 *
dy0 + 60*t0*t1**2*y0 + 60*t0**2*t1*y0 - 8*t0**2*t1**3*ddy0 - 12*t0**2*t1**2*dy0)/den
cy1 = -(60*t0**2*t1**2)/den
dy1 = -(2*t1**5*dy0 - 2*t0*t1**5*ddy0 - 10*t0*t1**4*dy0 + t0**2*t1**4*ddy0 + 4*t0**3*t1**3*ddy0 -
3*t0**4*t1**2*ddy0 - 16*t0**2*t1**3*dy0 + 24*t0**3*t1**2*dy0 - 60*t0**2*t1**2*y0)/den
cy0 = (20*t0**3*t1**2 + 2*t0**5 - 10*t0**4*t1) / den
dy0 = (- ddy0*t0**4*t1**3 + 2*ddy0*t0**3*t1**4 + 8*dy0*t0**3*t1**3 - ddy0*t0**2*t1**5 - 10 *
dy0*t0**2*t1**4 - 20*y0*t0**2*t1**3 + 2*dy0*t0*t1**5 + 10*y0*t0*t1**4 - 2*y0*t1**5)/den"""
# test should be zero : ok
# ~ print Ax0 - (cx0*x1 + dx0)
# ~ print Ax1 - (cx1*x1 + dx1)
# ~ print Ax2 - (cx2*x1 + dx2)
# ~ print Ax3 - (cx3*x1 + dx3)
# ~ print Ax4 - (cx4*x1 + dx4)
# ~ print Ax5 - (cx5*x1 + dx5)
self.lastCoeffs_x = [Ax5, Ax4, Ax3, Ax2, Ax1, Ax0] # save coeffs
self.lastCoeffs_y = [Ay5, Ay4, Ay3, Ay2, Ay1, Ay0]
# self.lastCoeffs = [cx5, cx4, cx3, cx2, cx1, cx0, dx5, dx4, dx3, dx2,
# dx1, dx0, cy5, cy4, cy3, cy2, cy1, cy0, dy5, dy4, dy3, dy2, dy1, dy0]
self.x1 = x1 # save last x1 value
self.y1 = y1 # save last y1 value
else:
[Ax5, Ax4, Ax3, Ax2, Ax1, Ax0] = self.lastCoeffs_x # use last coeffs
[Ay5, Ay4, Ay3, Ay2, Ay1, Ay0] = self.lastCoeffs_y
# [cx5, cx4, cx3, cx2, cx1, cx0, dx5, dx4, dx3, dx2, dx1, dx0, cy5, cy4,
# cy3, cy2, cy1, cy0, dy5, dy4, dy3, dy2, dy1, dy0] = self.lastCoeffs
# coefficients for z (deterministic)
Az6 = -h/((t2/2)**3*(t2 - t2/2)**3)
Az5 = (3*t2*h)/((t2/2)**3*(t2 - t2/2)**3)
Az4 = -(3*t2**2*h)/((t2/2)**3*(t2 - t2/2)**3)
Az3 = (t2**3*h)/((t2/2)**3*(t2 - t2/2)**3)
# get the next point
ev = t0+dt
evz = t3+dt
x1 = self.x1
y1 = self.y1
"""x0 = x1 * (cx0 + cx1*ev + cx2*ev**2 + cx3*ev**3 + cx4*ev**4 + cx5*ev**5) + \
dx0 + dx1*ev + dx2*ev**2 + dx3*ev**3 + dx4*ev**4 + dx5*ev**5
dx0 = x1 * (cx1 + 2*cx2*ev + 3*cx3*ev**2 + 4*cx4*ev**3 + 5*cx5*ev**4) + \
dx1 + 2*dx2*ev + 3*dx3*ev**2 + 4*dx4*ev**3 + 5*dx5*ev**4
ddx0 = x1 * (2*cx2 + 3*2*cx3*ev + 4*3*cx4*ev**2 + 5*4*cx5*ev**3) + \
2*dx2 + 3*2*dx3*ev + 4*3*dx4*ev**2 + 5*4*dx5*ev**3
y0 = y1 * (cy0 + cy1*ev + cy2*ev**2 + cy3*ev**3 + cy4*ev**4 + cy5*ev**5) + \
dy0 + dy1*ev + dy2*ev**2 + dy3*ev**3 + dy4*ev**4 + dy5*ev**5
dy0 = y1 * (cy1 + 2*cy2*ev + 3*cy3*ev**2 + 4*cy4*ev**3 + 5*cy5*ev**4) + \
dy1 + 2*dy2*ev + 3*dy3*ev**2 + 4*dy4*ev**3 + 5*dy5*ev**4
ddy0 = y1 * (2*cy2 + 3*2*cy3*ev + 4*3*cy4*ev**2 + 5*4*cy5*ev**3) + \
2*dy2 + 3*2*dy3*ev + 4*3*dy4*ev**2 + 5*4*dy5*ev**3"""
z0 = Az3*evz**3 + Az4*evz**4 + Az5*evz**5 + Az6*evz**6
dz0 = 3*Az3*evz**2 + 4*Az4*evz**3 + 5*Az5*evz**4 + 6*Az6*evz**5
ddz0 = 2*3*Az3*evz + 3*4*Az4*evz**2 + 4*5*Az5*evz**3 + 5*6*Az6*evz**4
if (t3 < epsilon) or (t3 > (t2-epsilon)):
return [x0, 0.0, 0.0, y0, 0.0, 0.0, z0, dz0, ddz0, self.x1, self.y1]
else:
x0 = Ax0 + Ax1*ev + Ax2*ev**2 + Ax3*ev**3 + Ax4*ev**4 + Ax5*ev**5
dx0 = Ax1 + 2*Ax2*ev + 3*Ax3*ev**2 + 4*Ax4*ev**3 + 5*Ax5*ev**4
ddx0 = 2*Ax2 + 3*2*Ax3*ev + 4*3*Ax4*ev**2 + 5*4*Ax5*ev**3
y0 = Ay0 + Ay1*ev + Ay2*ev**2 + Ay3*ev**3 + Ay4*ev**4 + Ay5*ev**5
dy0 = Ay1 + 2*Ay2*ev + 3*Ay3*ev**2 + 4*Ay4*ev**3 + 5*Ay5*ev**4
ddy0 = 2*Ay2 + 3*2*Ay3*ev + 4*3*Ay4*ev**2 + 5*4*Ay5*ev**3
return [x0, dx0, ddx0, y0, dy0, ddy0, z0, dz0, ddz0, self.x1, self.y1]
# expression de ddx0 comme une fonction lineaire de x1:
"""if(adaptative_mode):
self.coeff_acc_x_lin_a = 2*cx2 + 3*2*cx3*ev + 4*3*cx4*ev**2 + 5*4*cx5*ev**3
self.coeff_acc_x_lin_b = 2*dx2 + 3*2*dx3*ev + 4*3*dx4*ev**2 + 5*4*dx5*ev**3
self.coeff_acc_y_lin_a = 2*cy2 + 3*2*cy3*ev + 4*3*cy4*ev**2 + 5*4*cy5*ev**3
self.coeff_acc_y_lin_b = 2*dy2 + 3*2*dy3*ev + 4*3*dy4*ev**2 + 5*4*dy5*ev**3
else:
self.coeff_acc_x_lin_a = 0.0
self.coeff_acc_x_lin_b = x1 * (2*cx2 + 3*2*cx3*ev + 4*3*cx4*ev**2 + 5*4 *
cx5*ev**3) + 2*dx2 + 3*2*dx3*ev + 4*3*dx4*ev**2 + 5*4*dx5*ev**3
self.coeff_acc_y_lin_a = 0.0
self.coeff_acc_y_lin_b = y1 * (2*cy2 + 3*2*cy3*ev + 4*3*cy4*ev**2 + 5*4 *
cy5*ev**3) + 2*dy2 + 3*2*dy3*ev + 4*3*dy4*ev**2 + 5*4*dy5*ev**3"""
# get the target point (usefull for inform the MPC when we are not adaptative anymore.
# ev = t1
# ~ x1 =Ax0 + Ax1*ev + Ax2*ev**2 + Ax3*ev**3 + Ax4*ev**4 + Ax5*ev**5
# ~ dx1 =Ax1 + 2*Ax2*ev + 3*Ax3*ev**2 + 4*Ax4*ev**3 + 5*Ax5*ev**4
# ~ ddx1=2*Ax2 + 3*2*Ax3*ev + 4*3*Ax4*ev**2 + 5*4*Ax5*ev**3
# ~ y1 =Ay0 + Ay1*ev + Ay2*ev**2 + Ay3*ev**3 + Ay4*ev**4 + Ay5*ev**5
# ~ dy1 =Ay1 + 2*Ay2*ev + 3*Ay3*ev**2 + 4*Ay4*ev**3 + 5*Ay5*ev**4
# ~ ddy1=2*Ay2 + 3*2*Ay3*ev + 4*3*Ay4*ev**2 + 5*4*Ay5*ev**3
# return [x0, dx0, ddx0, y0, dy0, ddy0, z0, dz0, ddz0, self.x1, self.y1]
| [
"numpy.tile",
"numpy.where",
"numpy.array",
"numpy.zeros",
"numpy.dot",
"numpy.cos",
"numpy.sin"
] | [((575, 666), 'numpy.array', 'np.array', (['[[0.1946, 0.1946, -0.1946, -0.1946], [0.14695, -0.14695, 0.14695, -0.14695]]'], {}), '([[0.1946, 0.1946, -0.1946, -0.1946], [0.14695, -0.14695, 0.14695, \n -0.14695]])\n', (583, 666), True, 'import numpy as np\n'), ((1017, 1052), 'numpy.array', 'np.array', (['[[0.0, -1.0], [1.0, 0.0]]'], {}), '([[0.0, -1.0], [1.0, 0.0]])\n', (1025, 1052), True, 'import numpy as np\n'), ((1441, 1473), 'numpy.zeros', 'np.zeros', (['self.desired_pos.shape'], {}), '(self.desired_pos.shape)\n', (1449, 1473), True, 'import numpy as np\n'), ((1501, 1533), 'numpy.zeros', 'np.zeros', (['self.desired_pos.shape'], {}), '(self.desired_pos.shape)\n', (1509, 1533), True, 'import numpy as np\n'), ((2517, 2561), 'numpy.array', 'np.array', (['[[c, -s, 0], [s, c, 0], [0, 0, 1]]'], {}), '([[c, -s, 0], [s, c, 0], [0, 0, 1]])\n', (2525, 2561), True, 'import numpy as np\n'), ((5523, 5567), 'numpy.array', 'np.array', (['[[c, -s, 0], [s, c, 0], [0, 0, 1]]'], {}), '([[c, -s, 0], [s, c, 0], [0, 0, 1]])\n', (5531, 5567), True, 'import numpy as np\n'), ((2460, 2481), 'numpy.cos', 'np.cos', (['mpc.q_w[5, 0]'], {}), '(mpc.q_w[5, 0])\n', (2466, 2481), True, 'import numpy as np\n'), ((2483, 2504), 'numpy.sin', 'np.sin', (['mpc.q_w[5, 0]'], {}), '(mpc.q_w[5, 0])\n', (2489, 2504), True, 'import numpy as np\n'), ((5450, 5478), 'numpy.cos', 'np.cos', (['(-vel[5, 0] * self.dt)'], {}), '(-vel[5, 0] * self.dt)\n', (5456, 5478), True, 'import numpy as np\n'), ((5481, 5509), 'numpy.sin', 'np.sin', (['(-vel[5, 0] * self.dt)'], {}), '(-vel[5, 0] * self.dt)\n', (5487, 5509), True, 'import numpy as np\n'), ((1395, 1411), 'numpy.zeros', 'np.zeros', (['(1, 4)'], {}), '((1, 4))\n', (1403, 1411), True, 'import numpy as np\n'), ((4851, 4876), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (4859, 4876), True, 'import numpy as np\n'), ((4918, 4943), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (4926, 4943), True, 'import numpy as np\n'), ((2990, 3025), 'numpy.where', 'np.where', (['(sequencer.S[:, i] == True)'], {}), '(sequencer.S[:, i] == True)\n', (2998, 3025), True, 'import numpy as np\n'), ((4099, 4121), 'numpy.array', 'np.array', (['[x0, y0, z0]'], {}), '([x0, y0, z0])\n', (4107, 4121), True, 'import numpy as np\n'), ((4167, 4192), 'numpy.array', 'np.array', (['[dx0, dy0, dz0]'], {}), '([dx0, dy0, dz0])\n', (4175, 4192), True, 'import numpy as np\n'), ((4238, 4266), 'numpy.array', 'np.array', (['[ddx0, ddy0, ddz0]'], {}), '([ddx0, ddy0, ddz0])\n', (4246, 4266), True, 'import numpy as np\n'), ((4383, 4403), 'numpy.array', 'np.array', (['[gx1, gy1]'], {}), '([gx1, gy1])\n', (4391, 4403), True, 'import numpy as np\n'), ((5939, 5969), 'numpy.tile', 'np.tile', (['vel[0:2, 0:1]', '(1, 4)'], {}), '(vel[0:2, 0:1], (1, 4))\n', (5946, 5969), True, 'import numpy as np\n'), ((4593, 4632), 'numpy.dot', 'np.dot', (['R', 'self.desired_pos[:, i:i + 1]'], {}), '(R, self.desired_pos[:, i:i + 1])\n', (4599, 4632), True, 'import numpy as np\n'), ((4739, 4791), 'numpy.dot', 'np.dot', (['R[0:2, 0:2]', 'self.footsteps_lock[:, i:i + 1]'], {}), '(R[0:2, 0:2], self.footsteps_lock[:, i:i + 1])\n', (4745, 4791), True, 'import numpy as np\n'), ((5717, 5747), 'numpy.tile', 'np.tile', (['vel[0:2, 0:1]', '(1, 4)'], {}), '(vel[0:2, 0:1], (1, 4))\n', (5724, 5747), True, 'import numpy as np\n'), ((5749, 5765), 'numpy.zeros', 'np.zeros', (['(1, 4)'], {}), '((1, 4))\n', (5757, 5765), True, 'import numpy as np\n'), ((4546, 4562), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (4554, 4562), True, 'import numpy as np\n')] |
import numpy as np
class Trajectory:
def __init__(self):
pass
@classmethod
def LSPB(cls, q0, qf, tf, tb, t_step=0.07):
q0 = np.array(q0)
qf = np.array(qf)
if np.allclose(q0, qf):
t = [0.0]
q_pos = [qf]
return q_pos, t
# Define coefficients
ab = (q0 - qf) / (tb - tf) / tb
A = np.array([[tb, -tb, 0.0, 1.0, -1.0, 0.0],
[0.0, -(tf - tb), tf - tb, 0.0, -1.0, 1.0],
[1.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, tf, 0.0, 0.0, 1.0]])
b = np.block([[-ab * tb ** 2 / 2], [ab * (tf - tb) ** 2 / 2], [np.zeros_like(q0)], [q0], [ab * tf],
[qf + ab * tf ** 2 / 2]])
coeff = np.linalg.inv(A).dot(b)
C1 = coeff[0]
C2 = coeff[1]
C3 = coeff[2]
C4 = coeff[3]
C5 = coeff[4]
C6 = coeff[5]
# Calculate trajectories
t = np.arange(start=0.0, stop=tf, step=t_step)
t1 = t[t < tb].reshape(-1, 1)
t2 = t[(tb <= t) & (t < tf - tb)].reshape(-1, 1)
t3 = t[tf - tb <= t].reshape(-1, 1)
# Combine joint trajectories
traj1 = ab / 2 * t1 ** 2 + C1 * t1 + C4
traj2 = C2 * t2 + C5
traj3 = -ab / 2 * t3 ** 2 + C3 * t3 + C6
q_pos = np.concatenate((traj1, traj2, traj3))
assert ~np.isnan(t).any()
assert ~np.isnan(q_pos).any()
return q_pos, t
if __name__ == '__main__':
pass
| [
"numpy.allclose",
"numpy.array",
"numpy.linalg.inv",
"numpy.isnan",
"numpy.concatenate",
"numpy.zeros_like",
"numpy.arange"
] | [((155, 167), 'numpy.array', 'np.array', (['q0'], {}), '(q0)\n', (163, 167), True, 'import numpy as np\n'), ((181, 193), 'numpy.array', 'np.array', (['qf'], {}), '(qf)\n', (189, 193), True, 'import numpy as np\n'), ((205, 224), 'numpy.allclose', 'np.allclose', (['q0', 'qf'], {}), '(q0, qf)\n', (216, 224), True, 'import numpy as np\n'), ((384, 607), 'numpy.array', 'np.array', (['[[tb, -tb, 0.0, 1.0, -1.0, 0.0], [0.0, -(tf - tb), tf - tb, 0.0, -1.0, 1.0],\n [1.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0, 0.0, 0.0], [0.0, \n 0.0, 1.0, 0.0, 0.0, 0.0], [0.0, 0.0, tf, 0.0, 0.0, 1.0]]'], {}), '([[tb, -tb, 0.0, 1.0, -1.0, 0.0], [0.0, -(tf - tb), tf - tb, 0.0, -\n 1.0, 1.0], [1.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0, 0.0, \n 0.0], [0.0, 0.0, 1.0, 0.0, 0.0, 0.0], [0.0, 0.0, tf, 0.0, 0.0, 1.0]])\n', (392, 607), True, 'import numpy as np\n'), ((1082, 1124), 'numpy.arange', 'np.arange', ([], {'start': '(0.0)', 'stop': 'tf', 'step': 't_step'}), '(start=0.0, stop=tf, step=t_step)\n', (1091, 1124), True, 'import numpy as np\n'), ((1444, 1481), 'numpy.concatenate', 'np.concatenate', (['(traj1, traj2, traj3)'], {}), '((traj1, traj2, traj3))\n', (1458, 1481), True, 'import numpy as np\n'), ((880, 896), 'numpy.linalg.inv', 'np.linalg.inv', (['A'], {}), '(A)\n', (893, 896), True, 'import numpy as np\n'), ((779, 796), 'numpy.zeros_like', 'np.zeros_like', (['q0'], {}), '(q0)\n', (792, 796), True, 'import numpy as np\n'), ((1498, 1509), 'numpy.isnan', 'np.isnan', (['t'], {}), '(t)\n', (1506, 1509), True, 'import numpy as np\n'), ((1532, 1547), 'numpy.isnan', 'np.isnan', (['q_pos'], {}), '(q_pos)\n', (1540, 1547), True, 'import numpy as np\n')] |
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, Dataset, Subset
from torch.cuda import amp
import numpy as np
import logging
import time
import os
from os import mkdir, listdir, getcwd
from os.path import join, exists
from copy import deepcopy
class EarlyStopping(object):
'''
Performs early stopping if the validation loss has not
improved for a specified amount of epochs.
'''
def __init__(self, patience, logger=None):
self.patience = patience
self.n_bad_epochs = 0
self.running_best = np.inf
self.logger = logger
def __call__(self, loss):
return self.step(loss)
def step(self, loss):
'''
This function performs a step by checking if the new evaluated
loss is lower than the running best stored in the instance.
Args:
loss (float): new criterion value
Returns:
(bool) if early stopping is triggered
'''
# update number of bad epochs
if self.running_best > loss:
self.running_best = loss
self.n_bad_epochs = 0
else:
self.n_bad_epochs += 1
# check if early stopping criterion is fulfilled
if self.n_bad_epochs > self.patience:
if self.logger:
self.logger.info(
'Early Stopping: Criterion has'
f'not improved for {self.n_bad_epochs}.'
)
self._reset()
return True
else:
self.logger.info(
'Current Patience Level: '
f'{self.n_bad_epochs}/{self.patience}'
)
return False
def _reset(self):
'''
Reset the running best criterion and the number of bad epochs.
'''
self.n_bad_epochs = 0
self.running_best = np.inf
class Trainer(object):
__DEFAULT_CONFIG = {
'device': 'cpu', # 'cpu'/'cuda'/'cuda:0'/'cuda:1' ...
'epochs': 100,
'batches_per_epoch': 100,
'batch_size': 1,
'num_workers': 0, # dataloading workers
'output_folder': 'train_out',
'validation_split': 0.2,
'validation_indices': [],
'prefetch_validation': False, # push validation data to GPU
'early_stopping_patience': 0,
'amp': False, # Automatic mixed precision
'log_level': logging.INFO
}
def __init__(self, model, optimizer, criterion, dataset,
scheduler=None, train_config=None):
# training config
self._config = self._check_config_dict(train_config)
# prepare output folder structure
self._create_folder_structure()
# set essentials
self._model = model.to(device=self._config['device'])
self._criterion = criterion #.to(device=self._config['device'])
self._optimizer = optimizer
self._dataset = dataset
self._scheduler = scheduler
self._early_stopper = None
self._scaler = None
# init automated mixed precision (amp) training
if self._config['amp']:
self._scaler = amp.GradScaler()
# init logger
self._logger = self._init_logger()
# init current training status
self._train_status = self._init_train_status()
# validation and training subset init
self._train_subset, self._val_subset = self._init_data_subsets()
self._val_gpu_prefetch = None
# initialize early stopping
patience = self._config['early_stopping_patience']
if (patience > 0) and self._val_subset:
self._early_stopper = EarlyStopping(patience, logger=self._logger)
# utilities
self._save_step = self._config['epochs'] // 10
# save training config
self._save_config()
self._log_init_info()
def train(self):
'''
Implements the main training loop given the configuration of the
training dictionary (_config). The training loop trains the model
for the specified amount of epochs. In every iteration, the model
is trained on the specified amount of batches and then validated
on the validation set. The mean of both the training and validation
loss is reported every iteration.
'''
# some variables
device = self._config['device']
non_blocking = True if device != 'cpu' else False
batch_size = self._config['batch_size']
batches_per_epoch = self._config['batches_per_epoch']
# log device name
if 'cuda' in device or 'gpu' in device:
self._logger.info('Using GPU '
f'"{torch.cuda.get_device_name(device=device)}"'
' for training.')
# maybe prefetch validation data
if self._config['prefetch_validation']:
self._prefetch_val_data()
else:
self._logger.info('Training on CPU.')
# get starting and ending epoch
st_epoch = self._train_status['epoch']
max_epoch = self._config['epochs']
if st_epoch > 0:
self._logger.info(f'Resuming training from epoch {st_epoch} on.')
st_epoch += 1
else:
self._logger.info('Starting training.')
# print AMP info
if self._config['amp'] and self._scaler._enabled:
self._logger.info('Automatic Mixed Precision is ENABLED')
else:
self._logger.info('Automatic Mixed Precision is DISABLED')
# TRAINING LOOP
for e in range(st_epoch, max_epoch):
# current epoch
self._train_status['epoch'] = e
self._logger.info(f'Starting epoch {e}')
# measure time
t_start = time.perf_counter()
# run training iteration
# differentiate between amp and normal training
if self._config['amp']:
self._train_iter_amp(device, non_blocking, batch_size,
batches_per_epoch)
else:
self._train_iter(device, non_blocking, batch_size,
batches_per_epoch)
# maybe validate
if self._val_subset:
self._validate(device, non_blocking)
elapsed_time = time.perf_counter() - t_start
# print info
self._log_progress(elapsed_time)
# maybe adjust LR
if self._scheduler:
self._adjust_lr()
# check early stopping
if self._config['early_stopping_patience'] > 0:
val_loss = self._train_status['val_loss'][e].item()
if self._early_stopper(val_loss):
self.save_checkpoint()
break
# maybe save checkpoint
if (e % self._save_step == 0) or (e == self._config["epochs"]-1):
self.save_checkpoint()
def set_validation_subset(self, subset):
'''
Sets the validation subset.
Args:
subset (sequence): `torch.utils.data.Subset` or indices
corresponding to the used dataset.
'''
if isinstance(subset, Subset) and subset.dataset == self._dataset:
self._val_subset = subset
elif hasattr(subset, '__iter__'):
self._val_subset = Subset(self._dataset, subset)
else:
raise TypeError('Provide indices (sequence) or a'
'torch.utils.data.Subset instance!')
def save_checkpoint(self):
'''
Saves the current training status including model and optimizer
parameters to storage.
'''
self._logger.info('Saving checkpoint ...')
if self._train_status['amp_state_dict']:
self._train_status['amp_state_dict'] = self._scaler.state_dict()
# save current training status in checkpoints folder
path = join(self._config['output_folder'], 'checkpoints')
filename = 'train_chkpt_' + str(self._train_status['epoch']) + '.tar'
torch.save(self._train_status, join(path, filename))
# save training config
self._save_config()
def _train_iter(self, device, non_blocking, b, bpe):
'''
Runs one iteration of the training loop.
'''
self._logger.debug('Starting training iteration.')
iter_subset = self._sample_random_subset(self._train_subset, b * bpe)
train_loader = self._get_dataloader(iter_subset)
curr_epoch = self._train_status['epoch']
losses = torch.zeros(size=(bpe,))
# model to training mode
self._model.train()
# loop over batches
for idx, (inputs, targets) in enumerate(train_loader):
# zero gradients
self._optimizer.zero_grad()
# progress
if (idx % np.ceil(bpe / 10) == 0):
self._logger.debug(f"Processing training batch {idx}/{bpe}")
# push data to device
inputs = inputs.to(device=device, non_blocking=non_blocking)
targets = targets.to(device=device, non_blocking=non_blocking)
# forward pass
predictions = self._model(inputs)
del inputs
# loss
loss = self._criterion(predictions, targets)
del targets
# backprop
loss.backward()
self._optimizer.step()
# keep track of loss
losses[idx] = loss.item()
self._train_status['train_loss'][curr_epoch] = losses.mean()
def _train_iter_amp(self, device, non_blocking, b, bpe):
'''
Runs one iteration of the training loop using automatic mixed precision
forward and backward passes.
'''
self._logger.debug('Starting training iteration (AMP).')
iter_subset = self._sample_random_subset(self._train_subset, b * bpe)
train_loader = self._get_dataloader(iter_subset)
curr_epoch = self._train_status['epoch']
losses = torch.zeros(size=(bpe,))
# model to training mode
self._model.train()
# loop over batches
for idx, (inputs, targets) in enumerate(train_loader):
# zero gradients
self._optimizer.zero_grad()
# progress
if (idx % np.ceil(bpe / 10) == 0):
self._logger.debug(f"Processing training batch {idx}/{bpe}")
# push data to device
inputs = inputs.to(device=device, non_blocking=non_blocking)
targets = targets.to(device=device, non_blocking=non_blocking)
# autocast when forward passing
with amp.autocast():
predictions = self._model(inputs)
del inputs
# loss
loss = self._criterion(predictions, targets)
del targets
# scale loss and backprop
self._scaler.scale(loss).backward()
self._scaler.step(self._optimizer)
self._scaler.update()
# keep track of loss
losses[idx] = loss.item()
self._train_status['train_loss'][curr_epoch] = losses.mean()
def _validate(self, device, non_blocking):
'''
Perform validation on the validation set.
'''
self._logger.info('Validating ...')
# epoch
curr_epoch = self._train_status['epoch']
# eval mode
self._model.eval()
# use prefetched data if available, else use dataloader
if self._val_gpu_prefetch:
bpe = len(self._val_gpu_prefetch)
losses = torch.zeros(size=(bpe,))
# loop over validation batches
with torch.no_grad():
for idx, (inputs, targets) in enumerate(self._val_gpu_prefetch):
# progress
if (idx % np.ceil(bpe / 10) == 0):
self._logger.debug(f"Processing validation batch {idx}/{bpe}")
if self._config['amp']:
with amp.autocast():
predictions = self._model(inputs)
loss = self._criterion(predictions, targets)
else:
predictions = self._model(inputs)
loss = self._criterion(predictions, targets)
# save val loss
losses[idx] = loss.item()
else:
loader = self._get_dataloader(self._val_subset)
bpe = len(loader)
losses = torch.zeros(size=(bpe,))
with torch.no_grad():
for idx, (inputs, targets) in enumerate(loader):
if (idx % np.ceil(bpe / 10) == 0):
self._logger.debug(f"Processing validation batch {idx}/{bpe}")
inputs = inputs.to(device=device, non_blocking=non_blocking)
targets = targets.to(device=device, non_blocking=non_blocking)
if self._config['amp']:
with amp.autocast():
predictions = self._model(inputs)
del inputs
loss = self._criterion(predictions, targets)
del targets
else:
predictions = self._model(inputs)
del inputs
loss = self._criterion(predictions, targets)
del targets
losses[idx] = loss.item()
self._train_status['val_loss'][curr_epoch] = losses.mean()
def _log_progress(self, time):
e = self._train_status['epoch']
self._logger.info(f"Epoch {e} finished --> Elapsed Time: {time}s")
self._logger.info(f"Avg. train loss: {self._train_status['train_loss'][e].item()}")
self._logger.info(f"Avg. validation loss: {self._train_status['val_loss'][e].item()}")
def _init_train_status(self):
'''
Initializes training status.
Training status is a dict storing info about the current training
progress like current epoch, losses etc.
'''
train_status = {
'epoch': 0,
'model_state_dict': self._model.state_dict(), # reference
'optimizer_state_dict': self._optimizer.state_dict(), # reference
'scheduler_state_dict': self._scheduler.state_dict() if self._scheduler else None, # reference
'amp_state_dict': self._scaler.state_dict() if self._config['amp'] else None,
'train_loss': torch.zeros(size=(self._config['epochs'],)),
'val_loss': torch.zeros(size=(self._config['epochs'],))
}
return train_status
def _init_logger(self):
# date
from datetime import datetime
now = datetime.now()
date_str = now.strftime("%d_%m_%Y_%H.%M.%S")
# init logger
logger = logging.getLogger(f"{self.__class__.__name__}")
logger.setLevel(self._config['log_level'])
# log to console and file
fh = logging.FileHandler(join(getcwd(), self._config['output_folder'], 'logs', f"training_{date_str}.log"))
ch = logging.StreamHandler()
# formatter
formatter = logging.Formatter('%(asctime)s | %(name)s-%(levelname)s: %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add handler to logger
logger.addHandler(fh)
logger.addHandler(ch)
return logger
def _init_data_subsets(self):
'''
Initializes both the training and validation subset. Validation subset
is based on training config parameters 'validation indices' and
'validation_split'. Training set are all the data that is not the
validation set.
'''
val_split = self._config['validation_split']
val_indices = self._config['validation_indices']
n_data = len(self._dataset)
# check if proportion is specified
if val_split == .0 and not val_indices:
return self._dataset, None
if val_indices:
# given validation indices
val_set = Subset(self._dataset, val_indices)
# train set
train_indices = [
x for x in range(n_data) if x not in val_indices
]
train_set = Subset(self._dataset, train_indices)
return train_set, val_set
else:
# randomly split available data into validation and training set
N = int(val_split * n_data)
val_subset = self._sample_random_subset(self._dataset, N)
sampled_val_indices = val_subset.indices
self._config['validation_indices'] = sampled_val_indices
# train set
train_indices = [
x for x in range(n_data) if x not in sampled_val_indices
]
train_set = Subset(self._dataset, train_indices)
return train_set, val_subset
def _prefetch_val_data(self):
'''
Prefetch validation data: push validation tensors directly to the GPU or into pinned memory.
'''
self._logger.info('Prefetching validation data ...')
self._logger.debug('Trying to push the validation set to the GPU ...')
self._val_gpu_prefetch = []
for idx, (inputs, targets) in enumerate(self._get_dataloader(self._val_subset)):
inputs = inputs.to(device=self._config['device'])
targets = targets.to(device=self._config['device'])
self._val_gpu_prefetch.append((inputs, targets))
self._logger.debug('Pushed validation data to GPU.')
def _get_dataloader(self, data):
return DataLoader(
dataset = data,
batch_size = self._config['batch_size'],
shuffle = False,
num_workers = self._config['num_workers'],
pin_memory = False if self._config['device'] == 'cpu' else True,
drop_last = False
)
def _create_folder_structure(self):
'''
Creates necessary folder structures.
'logs' folder will hold training log files.
'checkpoints' holds training checkpoints as dictionaries in .tar files.
'''
# dirs
work_dir = getcwd()
output_folder = self._config['output_folder']
# top folder
try:
mkdir(join(work_dir, output_folder))
except FileExistsError:
print(f'Output folder {join(work_dir, output_folder)} already exists, checkpoints might get overwritten!')
# subfolders
try:
os.mkdir(join(work_dir, output_folder, 'logs'))
os.mkdir(join(work_dir, output_folder, 'checkpoints'))
except FileExistsError:
pass
def _log_init_info(self):
self._logger.info('Successfully initialized.')
self._logger.info(f'Model: {repr(self._model)}')
self._logger.info(f'Optimizer: {repr(self._optimizer)}')
self._logger.info(f'Criterion: {repr(self._criterion)}')
if (hasattr(self._dataset, '_normalize') and
hasattr(self._dataset, '_norm')):
if self._dataset._normalize:
self._logger.info(
f"Normalization: {repr(self._dataset._norm)}"
)
self._logger.info(f'Number of total data: {len(self._dataset)}')
self._logger.info('##### Training Configuration #####')
for key, val in self._config.items():
if key == 'validation_indices' and len(self._config['validation_indices']) > 25:
self._logger.info(
f'{key} ---> [{val[0]}, {val[1]}, {val[2]} ... {val[-3]}, {val[-2]}, {val[-1]}]'
)
elif key == 'log_level':
self._logger.info(f'{key} ---> {logging.getLevelName(val)}')
else:
self._logger.info(f'{key} ---> {val}')
self._logger.info('##### Training Configuration #####')
def _save_config(self):
torch.save(self._config, join(self._config['output_folder'], 'train_config.pt'))
def _adjust_lr(self):
'''
Performs learning rate adjustment according to the assigned
scheduler '_scheduler', i.e. calls the scheduler's step
function, which takes a loss value.
If training is performed with validation, the validation loss will be
the measure for adjustment, else training loss is used. The scheduler
itself is configured by the user before initializing the Trainer
class instance.
'''
epoch = self._train_status['epoch']
# current lr
before_lr = self._optimizer.param_groups[0]['lr']
if isinstance(self._scheduler, torch.optim.lr_scheduler.StepLR):
self._scheduler.step()
else:
#use val loss if validation is performed
if self._val_subset:
val_loss = self._train_status['val_loss'][epoch].item()
self._scheduler.step(val_loss)
else:
tr_loss = self._train_status['train_loss'][epoch].item()
self._scheduler.step(tr_loss)
# lr after step
after_lr = self._optimizer.param_groups[0]['lr']
# log lr update
if after_lr < before_lr:
self._logger.info(f'{self._scheduler.__class__.__name__} reduced the learning rate from {before_lr} to {after_lr}')
@staticmethod
def _sample_random_subset(data, N):
'''
Sample a random subset of the dataset for one iteration of the
training loop.
'''
# sample random subset w/o replacement
shuffled_indices = torch.randperm(len(data))
rand_subset = shuffled_indices[:N]
return Subset(data, rand_subset.tolist())
@staticmethod
def _load_config(path):
'''
Load the configuration dict from storage.
Args:
path (str): path to the .pt file.
'''
if exists(path):
train_config = torch.load(path)
if not isinstance(train_config, dict):
raise TypeError('The config file is not a python dictionary!')
return train_config
else:
raise FileNotFoundError('No training config was found!')
@classmethod
def _check_config_dict(cls, config_dict):
'''
Checks validity of the training configuration dictionary and fills
missing entries with default values.
Args:
config_dict (dict): Configuration dictionary.
Returns:
(dict): Checked and maybe modified training config dictionary.
'''
if config_dict is None:
return deepcopy(cls.__DEFAULT_CONFIG)
if isinstance(config_dict, dict):
dict_copy = deepcopy(config_dict)
for default_key, default_val in cls.__DEFAULT_CONFIG.items():
# fill with default value if empty and check for type
dict_copy.setdefault(default_key, default_val)
if type(dict_copy[default_key]) != type(default_val):
raise TypeError(f'{default_key} needs to be of {type(default_val)} but is {type(dict_copy[default_key])}!')
return dict_copy
else:
raise TypeError(f"Config needs to be of {type(cls.__DEFAULT_CONFIG)} but is {type(config_dict)}!")
@classmethod
def from_checkpoint(cls, model, optimizer, criterion, dataset, path,
scheduler=None, epoch=None):
'''
Resume training from checkpoint.
Args:
model, optimizer, criterion, dataset: essentials used in previous
trainings.
path (str): path to top level folder of the training output folder.
epoch (int, optional): epoch to resume from, appropriate
checkpoint has to exist. Defaults to the
most recent checkpoint.
'''
if exists(path):
# load config dict
config_dict = cls._load_config(join(path, 'train_config.pt'))
# load train status
chkpts = listdir(join(path, 'checkpoints'))
if not chkpts:
raise FileNotFoundError("No checkpoints were found!")
if epoch:
path_to_chkpt = join(
path, 'checkpoints', 'train_chkpt_' + str(epoch) + '.tar'
)
else:
# infer most recent chkpt
max_ep = 0
for chkpt in chkpts:
epoch = int(chkpt[12:-4])
if epoch > max_ep:
max_ep = epoch
path_to_chkpt = join(
path, 'checkpoints', 'train_chkpt_' + str(max_ep) + '.tar'
)
# checkpoint
train_status = torch.load(path_to_chkpt)
# init trainer
trainer = cls(model, optimizer, criterion, dataset,
scheduler, config_dict)
trainer._train_status = train_status
# restore model and optimizer checkpoints
trainer._model.load_state_dict(
trainer._train_status['model_state_dict']
)
trainer._optimizer.load_state_dict(
trainer._train_status['optimizer_state_dict']
)
# restore lr scheduler state if one exists
if trainer._scheduler:
trainer._scheduler.load_state_dict(
trainer._train_status['scheduler_state_dict']
)
# amp scaler state dict
if trainer._config['amp']:
trainer._scaler.load_state_dict(
trainer._train_status['amp_state_dict']
)
return trainer
else:
raise FileNotFoundError("The specified path does not exist!")
| [
"logging.getLogger",
"logging.StreamHandler",
"copy.deepcopy",
"os.path.exists",
"torch.cuda.amp.GradScaler",
"time.perf_counter",
"torch.cuda.amp.autocast",
"numpy.ceil",
"logging.getLevelName",
"torch.cuda.get_device_name",
"logging.Formatter",
"torch.load",
"os.path.join",
"os.getcwd",
... | [((8088, 8138), 'os.path.join', 'join', (["self._config['output_folder']", '"""checkpoints"""'], {}), "(self._config['output_folder'], 'checkpoints')\n", (8092, 8138), False, 'from os.path import join, exists\n'), ((8732, 8756), 'torch.zeros', 'torch.zeros', ([], {'size': '(bpe,)'}), '(size=(bpe,))\n', (8743, 8756), False, 'import torch\n'), ((10210, 10234), 'torch.zeros', 'torch.zeros', ([], {'size': '(bpe,)'}), '(size=(bpe,))\n', (10221, 10234), False, 'import torch\n'), ((15057, 15071), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (15069, 15071), False, 'from datetime import datetime\n'), ((15165, 15212), 'logging.getLogger', 'logging.getLogger', (['f"""{self.__class__.__name__}"""'], {}), "(f'{self.__class__.__name__}')\n", (15182, 15212), False, 'import logging\n'), ((15428, 15451), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (15449, 15451), False, 'import logging\n'), ((15493, 15563), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s | %(name)s-%(levelname)s: %(message)s"""'], {}), "('%(asctime)s | %(name)s-%(levelname)s: %(message)s')\n", (15510, 15563), False, 'import logging\n'), ((17988, 18197), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'data', 'batch_size': "self._config['batch_size']", 'shuffle': '(False)', 'num_workers': "self._config['num_workers']", 'pin_memory': "(False if self._config['device'] == 'cpu' else True)", 'drop_last': '(False)'}), "(dataset=data, batch_size=self._config['batch_size'], shuffle=\n False, num_workers=self._config['num_workers'], pin_memory=False if \n self._config['device'] == 'cpu' else True, drop_last=False)\n", (17998, 18197), False, 'from torch.utils.data import DataLoader, Dataset, Subset\n'), ((18559, 18567), 'os.getcwd', 'getcwd', ([], {}), '()\n', (18565, 18567), False, 'from os import mkdir, listdir, getcwd\n'), ((22308, 22320), 'os.path.exists', 'exists', (['path'], {}), '(path)\n', (22314, 22320), False, 'from os.path import join, exists\n'), ((24378, 24390), 'os.path.exists', 'exists', (['path'], {}), '(path)\n', (24384, 24390), False, 'from os.path import join, exists\n'), ((3168, 3184), 'torch.cuda.amp.GradScaler', 'amp.GradScaler', ([], {}), '()\n', (3182, 3184), False, 'from torch.cuda import amp\n'), ((5863, 5882), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (5880, 5882), False, 'import time\n'), ((8256, 8276), 'os.path.join', 'join', (['path', 'filename'], {}), '(path, filename)\n', (8260, 8276), False, 'from os.path import join, exists\n'), ((11815, 11839), 'torch.zeros', 'torch.zeros', ([], {'size': '(bpe,)'}), '(size=(bpe,))\n', (11826, 11839), False, 'import torch\n'), ((12757, 12781), 'torch.zeros', 'torch.zeros', ([], {'size': '(bpe,)'}), '(size=(bpe,))\n', (12768, 12781), False, 'import torch\n'), ((14809, 14852), 'torch.zeros', 'torch.zeros', ([], {'size': "(self._config['epochs'],)"}), "(size=(self._config['epochs'],))\n", (14820, 14852), False, 'import torch\n'), ((14878, 14921), 'torch.zeros', 'torch.zeros', ([], {'size': "(self._config['epochs'],)"}), "(size=(self._config['epochs'],))\n", (14889, 14921), False, 'import torch\n'), ((16422, 16456), 'torch.utils.data.Subset', 'Subset', (['self._dataset', 'val_indices'], {}), '(self._dataset, val_indices)\n', (16428, 16456), False, 'from torch.utils.data import DataLoader, Dataset, Subset\n'), ((16615, 16651), 'torch.utils.data.Subset', 'Subset', (['self._dataset', 'train_indices'], {}), '(self._dataset, train_indices)\n', (16621, 16651), False, 'from torch.utils.data import DataLoader, Dataset, Subset\n'), ((17180, 17216), 'torch.utils.data.Subset', 'Subset', (['self._dataset', 'train_indices'], {}), '(self._dataset, train_indices)\n', (17186, 17216), False, 'from torch.utils.data import DataLoader, Dataset, Subset\n'), ((20346, 20400), 'os.path.join', 'join', (["self._config['output_folder']", '"""train_config.pt"""'], {}), "(self._config['output_folder'], 'train_config.pt')\n", (20350, 20400), False, 'from os.path import join, exists\n'), ((22349, 22365), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (22359, 22365), False, 'import torch\n'), ((23037, 23067), 'copy.deepcopy', 'deepcopy', (['cls.__DEFAULT_CONFIG'], {}), '(cls.__DEFAULT_CONFIG)\n', (23045, 23067), False, 'from copy import deepcopy\n'), ((23136, 23157), 'copy.deepcopy', 'deepcopy', (['config_dict'], {}), '(config_dict)\n', (23144, 23157), False, 'from copy import deepcopy\n'), ((25277, 25302), 'torch.load', 'torch.load', (['path_to_chkpt'], {}), '(path_to_chkpt)\n', (25287, 25302), False, 'import torch\n'), ((6425, 6444), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (6442, 6444), False, 'import time\n'), ((7502, 7531), 'torch.utils.data.Subset', 'Subset', (['self._dataset', 'subset'], {}), '(self._dataset, subset)\n', (7508, 7531), False, 'from torch.utils.data import DataLoader, Dataset, Subset\n'), ((10851, 10865), 'torch.cuda.amp.autocast', 'amp.autocast', ([], {}), '()\n', (10863, 10865), False, 'from torch.cuda import amp\n'), ((11900, 11915), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11913, 11915), False, 'import torch\n'), ((12799, 12814), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (12812, 12814), False, 'import torch\n'), ((15337, 15345), 'os.getcwd', 'getcwd', ([], {}), '()\n', (15343, 15345), False, 'from os import mkdir, listdir, getcwd\n'), ((18675, 18704), 'os.path.join', 'join', (['work_dir', 'output_folder'], {}), '(work_dir, output_folder)\n', (18679, 18704), False, 'from os.path import join, exists\n'), ((18913, 18950), 'os.path.join', 'join', (['work_dir', 'output_folder', '"""logs"""'], {}), "(work_dir, output_folder, 'logs')\n", (18917, 18950), False, 'from os.path import join, exists\n'), ((18973, 19017), 'os.path.join', 'join', (['work_dir', 'output_folder', '"""checkpoints"""'], {}), "(work_dir, output_folder, 'checkpoints')\n", (18977, 19017), False, 'from os.path import join, exists\n'), ((24466, 24495), 'os.path.join', 'join', (['path', '"""train_config.pt"""'], {}), "(path, 'train_config.pt')\n", (24470, 24495), False, 'from os.path import join, exists\n'), ((24559, 24584), 'os.path.join', 'join', (['path', '"""checkpoints"""'], {}), "(path, 'checkpoints')\n", (24563, 24584), False, 'from os.path import join, exists\n'), ((9026, 9043), 'numpy.ceil', 'np.ceil', (['(bpe / 10)'], {}), '(bpe / 10)\n', (9033, 9043), True, 'import numpy as np\n'), ((10504, 10521), 'numpy.ceil', 'np.ceil', (['(bpe / 10)'], {}), '(bpe / 10)\n', (10511, 10521), True, 'import numpy as np\n'), ((4738, 4779), 'torch.cuda.get_device_name', 'torch.cuda.get_device_name', ([], {'device': 'device'}), '(device=device)\n', (4764, 4779), False, 'import torch\n'), ((12059, 12076), 'numpy.ceil', 'np.ceil', (['(bpe / 10)'], {}), '(bpe / 10)\n', (12066, 12076), True, 'import numpy as np\n'), ((12245, 12259), 'torch.cuda.amp.autocast', 'amp.autocast', ([], {}), '()\n', (12257, 12259), False, 'from torch.cuda import amp\n'), ((12911, 12928), 'numpy.ceil', 'np.ceil', (['(bpe / 10)'], {}), '(bpe / 10)\n', (12918, 12928), True, 'import numpy as np\n'), ((13261, 13275), 'torch.cuda.amp.autocast', 'amp.autocast', ([], {}), '()\n', (13273, 13275), False, 'from torch.cuda import amp\n'), ((18773, 18802), 'os.path.join', 'join', (['work_dir', 'output_folder'], {}), '(work_dir, output_folder)\n', (18777, 18802), False, 'from os.path import join, exists\n'), ((20118, 20143), 'logging.getLevelName', 'logging.getLevelName', (['val'], {}), '(val)\n', (20138, 20143), False, 'import logging\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 10 15:49:49 2021
@author: <NAME>
"""
import matplotlib.pyplot as plt
import cv2
import tensorflow_hub as hub
import tensorflow as tf
import numpy as np
import file_path as f
# Load the model from tensorflow hub
model = hub.load('https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/2')
#model = hub.load('https://tfhub.dev/google/magenta/arbitrary-image-stylization-inceptionv3')
# Function to load images
def load_image(img_path):
img = tf.io.read_file(img_path)
img = tf.image.decode_image(img, channels = 3)
img = tf.image.convert_image_dtype(img, tf.float32)
img = img[tf.newaxis, :]
return img
content_image = load_image(f.content_path)
style_image = load_image(f.style_path)
# Style your image based on a style image.
# Tensor flow image stylization is used ie. based on the model loaded
stylized_image = model(tf.constant(content_image), tf.constant(style_image))[0]
# Plot the image
plt.imshow(np.squeeze(stylized_image))
plt.show()
#Save the image
cv2.imwrite('image_generated.jpg', cv2.cvtColor(np.squeeze(stylized_image)*255, cv2.COLOR_BGR2GRAY)) | [
"tensorflow.image.convert_image_dtype",
"tensorflow.io.read_file",
"tensorflow_hub.load",
"numpy.squeeze",
"tensorflow.constant",
"tensorflow.image.decode_image",
"matplotlib.pyplot.show"
] | [((269, 355), 'tensorflow_hub.load', 'hub.load', (['"""https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/2"""'], {}), "(\n 'https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/2')\n", (277, 355), True, 'import tensorflow_hub as hub\n'), ((1019, 1029), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1027, 1029), True, 'import matplotlib.pyplot as plt\n'), ((508, 533), 'tensorflow.io.read_file', 'tf.io.read_file', (['img_path'], {}), '(img_path)\n', (523, 533), True, 'import tensorflow as tf\n'), ((544, 582), 'tensorflow.image.decode_image', 'tf.image.decode_image', (['img'], {'channels': '(3)'}), '(img, channels=3)\n', (565, 582), True, 'import tensorflow as tf\n'), ((595, 640), 'tensorflow.image.convert_image_dtype', 'tf.image.convert_image_dtype', (['img', 'tf.float32'], {}), '(img, tf.float32)\n', (623, 640), True, 'import tensorflow as tf\n'), ((991, 1017), 'numpy.squeeze', 'np.squeeze', (['stylized_image'], {}), '(stylized_image)\n', (1001, 1017), True, 'import numpy as np\n'), ((905, 931), 'tensorflow.constant', 'tf.constant', (['content_image'], {}), '(content_image)\n', (916, 931), True, 'import tensorflow as tf\n'), ((933, 957), 'tensorflow.constant', 'tf.constant', (['style_image'], {}), '(style_image)\n', (944, 957), True, 'import tensorflow as tf\n'), ((1095, 1121), 'numpy.squeeze', 'np.squeeze', (['stylized_image'], {}), '(stylized_image)\n', (1105, 1121), True, 'import numpy as np\n')] |
import numpy as np
from harmonic_equation import harmonic_equation
from equation import equation
import low_level_tools as llt
################################################################################
def eq_11_bc(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_exact = np.exp(x * y)
current[0][0, :] = u_exact[0, :]
current[0][-1, :] = u_exact[-1, :]
current[0][:, 0] = u_exact[:, 0]
current[0][:, -1] = u_exact[:, -1]
return current
def eq_11_exact(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_exact = np.exp(x * y)
return np.array([u_exact])
def eq_11_rhs(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_rhs = ((1 + np.exp(x * y)) * x ** 2 + (2 + np.cos(np.pi * x)) * y ** 2 + \
(1 + x * y) * np.exp(-x * y) + y * np.exp(x) + x * np.exp(y) + np.sin(np.pi * x * y)) * np.exp(x * y)
u_rhs[0, :] = 0
u_rhs[N - 1, :] = 0
u_rhs[:, N - 1] = 0
u_rhs[:, 0] = 0
rhs = np.array([u_rhs])
return rhs
def eq_11_coeff(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
###
a11 = 1 + np.exp(x * y)
b11 = 2 + np.cos(np.pi * x)
c11 = np.exp(-x * y)
d11 = np.exp(x)
e11 = np.exp(y)
f11 = np.sin(np.pi * x * y)
###
coeff1 = [a11, b11, c11, d11, e11, f11]
coeff = np.array([coeff1])
return coeff
################################################################################
def eq_red_fox_bc(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_exact = np.exp(x * y)
current[0][0, :] = u_exact[0, :]
current[0][-1, :] = u_exact[-1, :]
current[0][:, 0] = u_exact[:, 0]
current[0][:, -1] = u_exact[:, -1]
return current
def eq_red_fox_exact(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_exact = np.exp(x * y)
return np.array([u_exact])
def eq_red_fox_rhs(current, a=1):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_rhs = (x**2 + y**2 + a*y)*np.exp(x*y)
u_rhs[0, :] = 0
u_rhs[N - 1, :] = 0
u_rhs[:, N - 1] = 0
u_rhs[:, 0] = 0
rhs = np.array([u_rhs])
return rhs
def eq_red_fox_coeff(current, a=1):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
###
a11 = np.ones((N, N))
b11 = np.ones((N, N))
c11 = np.zeros((N, N))
d11 = a*np.ones((N, N))
e11 = np.zeros((N, N))
f11 = np.zeros((N, N))
###
coeff1 = [a11, b11, c11, d11, e11, f11]
coeff = np.array([coeff1])
return coeff
################################################################################
def eq_00_bc(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_exact = np.sin(np.pi * x) * np.sin(np.pi * y) / 2
current[0][0, :] = u_exact[0, :]
current[0][-1, :] = u_exact[-1, :]
current[0][:, 0] = u_exact[:, 0]
current[0][:, -1] = u_exact[:, -1]
return current
def eq_00_exact(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_exact = np.sin(np.pi * x) * np.sin(np.pi * y) / 2
return np.array([u_exact])
def eq_00_rhs(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_rhs = -np.pi ** 2 * np.sin(np.pi * x) * np.sin(np.pi * y) * (
4 + y * np.cos(x * np.pi) + 4 + x * np.exp(-x * y)) / 2 + \
np.pi ** 2 * np.cos(np.pi * x) * np.cos(np.pi * y) * np.exp(y * x) / 2 + \
np.pi * np.cos(np.pi * x) * np.sin(np.pi * y) * x * y ** 3 / 2 + \
np.pi * np.sin(np.pi * x) * np.cos(np.pi * y) * (y + x ** 2 + 0.2) / 2 + \
np.sinh(x + 3 * y) * np.sin(np.pi * x) * np.sin(np.pi * y) / 2
u_rhs[0, :] = 0;
u_rhs[N - 1, :] = 0;
u_rhs[:, N - 1] = 0;
u_rhs[:, 0] = 0
rhs = np.array([u_rhs])
return rhs
def eq_00_coeff(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
###
a11 = 4 + y * np.cos(x * np.pi)
b11 = 4 + x * np.exp(-x * y)
c11 = np.exp(y * x)
d11 = x * y ** 3
e11 = y + x ** 2 + 0.2
f11 = np.sinh(x + 3 * y)
###
coeff1 = [a11, b11, c11, d11, e11, f11]
coeff = np.array([coeff1])
return coeff
################################################################################
def eq_12_bc(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_exact = np.exp(x + y)
current[0][0, :] = u_exact[0, :]
current[0][-1, :] = u_exact[-1, :]
current[0][:, 0] = u_exact[:, 0]
current[0][:, -1] = u_exact[:, -1]
return current
def eq_12_exact(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_exact = np.exp(x + y)
return np.array([u_exact])
def eq_12_rhs(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_rhs = (4 + np.cos(2 * np.pi * x * y) + 2 + np.sin(np.pi * x * y) + np.exp(-x * y) \
+ np.exp(x) + np.exp(y) + np.sin(np.pi * x * y) + 2) * np.exp(x + y)
u_rhs[0, :] = 0;
u_rhs[N - 1, :] = 0;
u_rhs[:, N - 1] = 0;
u_rhs[:, 0] = 0
rhs = np.array([u_rhs])
return rhs
def eq_12_coeff(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
###
a11 = 4 + np.cos(2 * np.pi * x * y)
b11 = 2 + np.sin(np.pi * x * y)
c11 = np.exp(-x * y)
d11 = np.exp(x)
e11 = np.exp(y)
f11 = np.sin(np.pi * x * y) + 2
###
coeff1 = [a11, b11, c11, d11, e11, f11]
coeff = np.array([coeff1])
return coeff
################################################################################
def eq_13_bc(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_exact = y * np.exp(x)
current[0][0, :] = u_exact[0, :]
current[0][-1, :] = u_exact[-1, :]
current[0][:, 0] = u_exact[:, 0]
current[0][:, -1] = u_exact[:, -1]
return current
def eq_13_exact(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_exact = y * np.exp(x)
return np.array([u_exact])
def eq_13_rhs(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_rhs = (2 + x * np.exp(x * y) + 6 + np.sin(np.pi * x * y)) * y * np.exp(x) + \
x * np.exp(-x * y) * np.exp(x) + y ** 2 * np.exp(2 * x) + x * y ** 2 * np.exp(x) * np.exp(y)
u_rhs[0, :] = 0;
u_rhs[N - 1, :] = 0;
u_rhs[:, N - 1] = 0;
u_rhs[:, 0] = 0
rhs = np.array([u_rhs])
return rhs
def eq_13_coeff(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
###
a11 = 4 + y * np.exp(-x * y)
b11 = 2 + x * np.exp(x * y)
c11 = x * np.exp(-x * y)
d11 = y * np.exp(x)
e11 = x * y ** 2 * np.exp(y)
f11 = 6 + np.sin(np.pi * x * y)
###
coeff1 = [a11, b11, c11, d11, e11, f11]
coeff = np.array([coeff1])
return coeff
################################################################################
def eq_14_bc(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_exact = np.exp(x + y)
current[0][0, :] = u_exact[0, :]
current[0][-1, :] = u_exact[-1, :]
current[0][:, 0] = u_exact[:, 0]
current[0][:, -1] = u_exact[:, -1]
return current
def eq_14_exact(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_exact = np.exp(x + y)
return np.array([u_exact])
def eq_14_rhs(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
b = 4
a = 3
u_rhs = (b + np.exp(x * y) + a + np.exp(-x * y) +
np.cos(np.pi*(x + 2*y)) + np.sin(np.pi*(y + 2*x)))*np.exp(x + y)
u_rhs[0, :] = 0;
u_rhs[N - 1, :] = 0;
u_rhs[:, N - 1] = 0;
u_rhs[:, 0] = 0
rhs = np.array([u_rhs])
return rhs
def eq_14_coeff(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
###
b = 4
a = 3
a11 = b + np.exp(x * y)
b11 = a + np.exp(-x * y)
c11 = np.zeros((N, N))
d11 = np.cos(np.pi*(x + 2*y))
e11 = np.sin(np.pi*(y + 2*x))
f11 = np.zeros((N, N))
###
coeff1 = [a11, b11, c11, d11, e11, f11]
coeff = np.array([coeff1])
return coeff
################################################################################
def eq_21_bc(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_exact = np.exp(x * y)
v_exact = np.exp(2 * x * y)
###
current[0][0, :] = u_exact[0, :]
current[0][-1, :] = u_exact[-1, :]
current[0][:, 0] = u_exact[:, 0]
current[0][:, -1] = u_exact[:, -1]
###
current[1][0, :] = v_exact[0, :]
current[1][-1, :] = v_exact[-1, :]
current[1][:, 0] = v_exact[:, 0]
current[1][:, -1] = v_exact[:, -1]
###
return current
def eq_21_exact(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_exact = np.exp(x * y)
v_exact = np.exp(2 * x * y)
exact = np.array([u_exact, v_exact])
return exact
def eq_21_rhs(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_rhs = 20 * np.exp(2 * x * y) * x ** 2 - x - np.exp(-x * y) * y
v_rhs = np.exp(x * y) + 4 * (7 + (np.sin(np.pi * x * y)) ** 2) * np.exp(2 * x * y) * y ** 2 + 16 * np.exp(
3 * x * y) * x ** 2 - \
2 * x * np.exp(2 * x * y - x) - 2 * y * np.exp(2 * x * y - y) + (2 + 4 * x * y) * np.sin(
np.pi * x * y) * np.exp(2 * x * y)
v_rhs[0, :] = 0
v_rhs[N - 1, :] = 0
v_rhs[:, N - 1] = 0
v_rhs[:, 0] = 0
u_rhs[0, :] = 0
u_rhs[N - 1, :] = 0
u_rhs[:, N - 1] = 0
u_rhs[:, 0] = 0
rhs = np.array([u_rhs, v_rhs])
return rhs
def eq_21_coeff(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
###
a11 = 20 * np.exp(x * y)
b11 = 7 + (np.cos(np.pi * x * y)) ** 2
c11 = np.cos(np.pi * x * y)
d11 = -np.exp(-2 * x * y)
e11 = -np.exp(-x * y)
f11 = np.zeros((N, N))
###
a12 = np.zeros((N, N))
b12 = np.zeros((N, N))
c12 = np.zeros((N, N))
d12 = np.zeros((N, N))
e12 = np.zeros((N, N))
f12 = -((7 + (np.cos(np.pi * x * y)) ** 2) * y ** 2 + np.cos(np.pi * x * y) * (1 + x * y)) * np.exp(-x * y)
###
a22 = 4 * np.exp(x * y)
b22 = 7 + (np.sin(np.pi * x * y)) ** 2
c22 = np.sin(np.pi * x * y)
d22 = -np.exp(-y)
e22 = -np.exp(-x)
f22 = np.zeros((N, N))
###
a21 = np.zeros((N, N))
b21 = np.zeros((N, N))
c21 = np.zeros((N, N))
d21 = np.zeros((N, N))
e21 = np.zeros((N, N))
f21 = np.ones((N, N))
###
coeff1 = [a11, b11, c11, d11, e11, f11, a12, b12, c12, d12, e12, f12]
coeff2 = [a21, b21, c21, d21, e21, f21, a22, b22, c22, d22, e22, f22]
coeff = np.array([coeff1, coeff2])
return coeff
################################################################################
def eq_22_bc(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_exact = np.exp(x * y)
v_exact = np.exp(x + y)
###
current[0][0, :] = u_exact[0, :]
current[0][-1, :] = u_exact[-1, :]
current[0][:, 0] = u_exact[:, 0]
current[0][:, -1] = u_exact[:, -1]
###
current[1][0, :] = v_exact[0, :]
current[1][-1, :] = v_exact[-1, :]
current[1][:, 0] = v_exact[:, 0]
current[1][:, -1] = v_exact[:, -1]
###
return current
def eq_22_exact(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_exact = np.exp(x * y)
v_exact = np.exp(x + y)
exact = np.array([u_exact, v_exact])
return exact
def eq_22_rhs(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_rhs = ((1 + np.exp(x * y)) * x ** 2 + (2 + np.cos(np.pi * x)) * y ** 2 +
(1 + x * y) * np.exp(-x * y) + y * np.exp(x) + x * np.exp(y) + np.sin(np.pi * x * y)) * np.exp(x * y) + \
(4 + np.cos(2 * np.pi * x * y) + 2 + np.sin(np.pi * x * y) + np.exp(-x * y)
+ np.exp(x) + np.exp(y) + np.sin(np.pi * x * y) + 2) * np.exp(x + y)
v_rhs = (2 + np.log(1 + x) + 4 + np.exp(2 * x * y + 3) / 200 + np.log(1 + x * y) +
(1 + np.cos(4 * np.pi * x * y)) / 3 + 16 * np.ones((N, N))) * np.exp(x + y) + \
(20 * np.exp(x * y) * x ** 2 + (7 + (np.cos(np.pi * x * y)) ** 2) * y ** 2 +
np.cos(np.pi * x * y) * (x * y + 1) - y * np.exp(-2 * x * y) - x * np.exp(-x * y)) * np.exp(x * y)
v_rhs[0, :] = 0
v_rhs[N - 1, :] = 0
v_rhs[:, N - 1] = 0
v_rhs[:, 0] = 0
u_rhs[0, :] = 0
u_rhs[N - 1, :] = 0
u_rhs[:, N - 1] = 0
u_rhs[:, 0] = 0
rhs = np.array([u_rhs, v_rhs])
return rhs
def eq_22_coeff(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
###
a11 = 1 + np.exp(x * y)
b11 = 2 + np.cos(np.pi * x)
c11 = np.exp(-x * y)
d11 = np.exp(x)
e11 = np.exp(y)
f11 = np.sin(np.pi * x * y)
###
a12 = 4 + np.cos(2 * np.pi * x * y)
b12 = 2 + np.sin(np.pi * x * y)
c12 = np.exp(-x * y)
d12 = np.exp(x)
e12 = np.exp(y)
f12 = np.sin(np.pi * x * y) + 2
###
a22 = 2 + np.log(1 + x)
b22 = 4 * np.ones((N, N))
c22 = np.exp(2 * x * y + 3) / 200
d22 = np.log(1 + x * y)
e22 = (1 + np.cos(4 * np.pi * x * y)) / 3
f22 = 16 * np.ones((N, N))
###
a21 = 20 * np.exp(x * y)
b21 = 7 + (np.cos(np.pi * x * y)) ** 2
c21 = np.cos(np.pi * x * y)
d21 = -np.exp(-2 * x * y)
e21 = -np.exp(-x * y)
f21 = np.zeros((N, N))
###
coeff1 = [a11, b11, c11, d11, e11, f11, a12, b12, c12, d12, e12, f12]
coeff2 = [a21, b21, c21, d21, e21, f21, a22, b22, c22, d22, e22, f22]
coeff = np.array([coeff1, coeff2])
return coeff
################################################################################
def get_quasilinear(dim, number, a=1):
"""
This function provide two 1d quasilinear equations and to 2d quasilinear
equations.
-------------------
dim = 1, number = 1
[(1 + exp(x*y))d2y + (2 + cos(pi*x))d2x + exp(-x*y)dxdy + exp(x)dx +
exp(y)dy + sin(pi*x*y)]u = rhs
u_exact = exp(x*y), bc and rhs are taken from the operator and exact solution.
-------------------
dim = 1, number = 2
[(4 + cos(2*pi*x*y))d2y + (2 + sin(pi*x*y))d2x + exp(-x*y)dxdy + exp(x)dx +
exp(y)dy + sin(pi*x*y) + 2]u = rhs
u_exact = exp(x+y), bc and rhs are taken from the operator and exact solution.
-------------------
dim = 2, number = 1
Add description later!
Parameters
----------
dim: int
Dimensionality: 2 for two equations, 1 for the one equation.
number: int
The number of the equation: 1 or 2 in case of any dimensionality.
"""
if dim == 1:
if number == 0:
quasilinear = equation(eq_00_coeff, eq_00_rhs, 1, eq_00_bc, eq_00_exact)
if number == 1:
quasilinear = equation(eq_11_coeff, eq_11_rhs, 1, eq_11_bc, eq_11_exact)
if number == 2:
quasilinear = equation(eq_12_coeff, eq_12_rhs, 1, eq_12_bc, eq_12_exact)
if number == 3:
quasilinear = equation(eq_13_coeff, eq_13_rhs, 1, eq_13_bc, eq_13_exact)
if number == 4:
quasilinear = equation(eq_14_coeff, eq_14_rhs, 1, eq_14_bc, eq_14_exact)
if number == 'red fox':
rhs = lambda x: eq_red_fox_rhs(x, a)
coeff = lambda x: eq_red_fox_coeff(x, a)
quasilinear = equation(coeff, rhs, 1, eq_red_fox_bc, eq_red_fox_exact)
if dim == 2:
if number == 1:
quasilinear = equation(eq_21_coeff, eq_21_rhs, 2, eq_21_bc, eq_21_exact)
if number == 2:
quasilinear = equation(eq_22_coeff, eq_22_rhs, 2, eq_22_bc, eq_22_exact)
return quasilinear
################################################################################
def nleq_21_bc(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_exact = np.exp(-x * y)
v_exact = np.exp(-2 * x * y)
###
current[0][0, :] = u_exact[0, :]
current[0][-1, :] = u_exact[-1, :]
current[0][:, 0] = u_exact[:, 0]
current[0][:, -1] = u_exact[:, -1]
###
current[1][0, :] = v_exact[0, :]
current[1][-1, :] = v_exact[-1, :]
current[1][:, 0] = v_exact[:, 0]
current[1][:, -1] = v_exact[:, -1]
###
return current
def nleq_21_exact(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_exact = np.exp(-x * y)
v_exact = np.exp(-2 * x * y)
exact = np.array([u_exact, v_exact])
return exact
def nleq_21_rhs(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_rhs = y ** 2 + np.exp(x * y) * x ** 2 + np.exp(-5 * x * y)
v_rhs = np.exp(-5 * x * y) - 2 * y * np.exp(-2 * x * y) / 7 + \
4 * np.exp(-2 * x * y) * y ** 2 + 4 * (np.cos(np.pi * x) ** 2 + 1) * np.exp(-2 * x * y) * x ** 2
v_rhs[0, :] = 0;
v_rhs[-1, :] = 0;
v_rhs[:, -1] = 0;
v_rhs[:, 0] = 0
u_rhs[0, :] = 0;
u_rhs[-1, :] = 0;
u_rhs[:, -1] = 0;
u_rhs[:, 0] = 0
rhs = np.array([u_rhs, v_rhs])
return rhs
def nleq_21_coeff(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
u, v = current
x, y = np.meshgrid(z, z, indexing='ij')
###
a11 = np.exp(2 * x * y)
b11 = np.exp(x * y)
c11 = np.zeros((N, N))
d11 = np.zeros((N, N))
e11 = np.zeros((N, N))
f11 = np.zeros((N, N))
###
a12 = np.zeros((N, N))
b12 = np.zeros((N, N))
c12 = np.zeros((N, N))
d12 = np.zeros((N, N))
e12 = np.zeros((N, N))
f12 = u * v
###
a22 = np.cos(np.pi * x) ** 2 + 1
b22 = np.ones((N, N))
c22 = np.zeros((N, N))
d22 = np.ones((N, N)) / 7
e22 = np.zeros((N, N))
f22 = np.zeros((N, N))
###
a21 = np.zeros((N, N))
b21 = np.zeros((N, N))
c21 = np.zeros((N, N))
d21 = np.zeros((N, N))
e21 = np.zeros((N, N))
f21 = np.exp(-x * y) * u * v
###
coeff1 = [a11, b11, c11, d11, e11, f11, a12, b12, c12, d12, e12, f12]
coeff2 = [a21, b21, c21, d21, e21, f21, a22, b22, c22, d22, e22, f22]
coeff = np.array([coeff1, coeff2])
return coeff
def nleq_21_lcoeff(current):
N, M = current[0].shape
u, v = current
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
###
a11 = np.exp(2 * x * y)
b11 = np.exp(x * y)
c11 = np.zeros((N, N))
d11 = np.zeros((N, N))
e11 = np.zeros((N, N))
f11 = v ** 2
###
a12 = np.zeros((N, N))
b12 = np.zeros((N, N))
c12 = np.zeros((N, N))
d12 = np.zeros((N, N))
e12 = np.zeros((N, N))
f12 = 2 * u * v
###
a22 = np.cos(np.pi * x) ** 2 + 1
b22 = np.ones((N, N))
c22 = np.zeros((N, N))
d22 = np.ones((N, N)) / 7
e22 = np.zeros((N, N))
f22 = np.exp(-x * y) * u ** 2
###
a21 = np.zeros((N, N))
b21 = np.zeros((N, N))
c21 = np.zeros((N, N))
d21 = np.zeros((N, N))
e21 = np.zeros((N, N))
f21 = 2 * np.exp(-x * y) * u * v
###
coeff1 = [a11, b11, c11, d11, e11, f11, a12, b12, c12, d12, e12, f12]
coeff2 = [a21, b21, c21, d21, e21, f21, a22, b22, c22, d22, e22, f22]
coeff = np.array([coeff1, coeff2])
return coeff
################################################################################
def nleq1_bc(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_exact = np.exp(x * y)
current[0][0, :] = u_exact[0, :]
current[0][-1, :] = u_exact[-1, :]
current[0][:, 0] = u_exact[:, 0]
current[0][:, -1] = u_exact[:, -1]
return current
def nleq1_exact(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_exact = np.exp(x * y)
return np.array([u_exact])
def nleq1_rhs(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_rhs = np.exp(x * y) * ((4 + np.exp(x * y)) * y ** 2 + (4 + np.exp(-x * y)) * x ** 2 + \
(1 + x * y) * np.exp(-2 * x * y) + np.cos(np.pi * x * y) * y + np.sin(
np.pi * x * y) * x + np.sinh(2 * x * y))
u_rhs[0, :] = 0;
u_rhs[N - 1, :] = 0;
u_rhs[:, N - 1] = 0;
u_rhs[:, 0] = 0
rhs = np.array([u_rhs])
return rhs
def nleq1_l_coeff(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
###
a11 = 4 + np.exp(-x * y)
b11 = 4 + current[0]
c11 = np.exp(-2 * x * y)
d11 = np.cos(np.pi * x * y)
e11 = np.sin(np.pi * x * y)
f11 = np.sinh(2 * x * y) + llt.d2x(current[0], N)
###
coeff1 = [a11, b11, c11, d11, e11, f11]
coeff = np.array([coeff1])
return coeff
def nleq1_coeff(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
###
a11 = 4 + np.exp(-x * y)
b11 = 4 + current[0]
c11 = np.exp(-2 * x * y)
d11 = np.cos(np.pi * x * y)
e11 = np.sin(np.pi * x * y)
f11 = np.sinh(2 * x * y)
###
coeff1 = [a11, b11, c11, d11, e11, f11]
coeff = np.array([coeff1])
return coeff
################################################################################
def get_nonlinear(dim):
global nonlinear
if dim == 2:
nonlinear = equation(nleq_21_coeff, nleq_21_rhs, 2, nleq_21_bc, nleq_21_exact, nleq_21_lcoeff)
if dim == 1:
nonlinear = equation(nleq1_coeff, nleq1_rhs, 1, nleq1_bc, nleq1_exact, l_coeff=nleq1_l_coeff)
return nonlinear
################################################################################
def trivial_harmonic_bc(current):
N, M = current[0].shape
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_exact = x
v_exact = y
###
current[0][0, :] = u_exact[0, :]
current[0][-1, :] = u_exact[-1, :]
current[0][:, 0] = u_exact[:, 0]
current[0][:, -1] = u_exact[:, -1]
###
current[1][0, :] = v_exact[0, :]
current[1][-1, :] = v_exact[-1, :]
current[1][:, 0] = v_exact[:, 0]
current[1][:, -1] = v_exact[:, -1]
###
return current
def trivial_harmonic_rhs(current):
return np.zeros_like(current)
def basic_harmonic_coeff(current):
N, M = current[0].shape
u, v = current
u_x, v_x = llt.dx(u, N), llt.dx(v, N)
u_y, v_y = llt.dy(u, N), llt.dy(v, N)
###
a11 = u_x ** 2 + v_x ** 2
b11 = u_y ** 2 + v_y ** 2
c11 = -2 * (u_x * u_y + v_x * v_y)
d11 = np.zeros((N, N))
e11 = np.zeros((N, N))
f11 = np.zeros((N, N))
###
a12 = np.zeros((N, N))
b12 = np.zeros((N, N))
c12 = np.zeros((N, N))
d12 = np.zeros((N, N))
e12 = np.zeros((N, N))
f12 = np.zeros((N, N))
###
a22 = u_x ** 2 + v_x ** 2
b22 = u_y ** 2 + v_y ** 2
c22 = -2 * (u_x * u_y + v_x * v_y)
d22 = np.zeros((N, N))
e22 = np.zeros((N, N))
f22 = np.zeros((N, N))
###
a21 = np.zeros((N, N))
b21 = np.zeros((N, N))
c21 = np.zeros((N, N))
d21 = np.zeros((N, N))
e21 = np.zeros((N, N))
f21 = np.zeros((N, N))
###
coeff1 = [a11, b11, c11, d11, e11, f11, a12, b12, c12, d12, e12, f12]
coeff2 = [a21, b21, c21, d21, e21, f21, a22, b22, c22, d22, e22, f22]
coeff = np.array([coeff1, coeff2])
return coeff
def diagonal_metrics(current):
N, M = current[0].shape
g = np.zeros((2, 2, N, N))
g[1, 1], g[0, 0] = 1, 1
return g
def harmonic_coeff(current, metrics=diagonal_metrics):
N, M = current[0].shape
u, v = current
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_x, v_x = llt.dx(u, N), llt.dx(v, N)
u_y, v_y = llt.dy(u, N), llt.dy(v, N)
g = metrics(current)
det_g_x = g[0, 0]*g[1, 1] - g[0, 1]*g[1, 0]
J_xi = u_x*v_y - u_y*v_x
R = J_xi**2
###
a11 = g[0, 0]*u_x**2 + g[1, 1]*v_x**2 + 2*g[0, 1]*u_x*v_x
b11 = g[0, 0]*u_y**2 + g[1, 1]*v_y**2 + 2*g[0, 1]*u_y*v_y
c11 = -2*(g[0, 0]*u_x*u_y + g[1, 1]*v_x*v_y + g[0, 1]*(v_x*u_y + u_x*v_y))
d11 = R*llt.dy(g[0, 1]/np.sqrt(det_g_x), N)
e11 = -R*llt.dx(g[0, 1]/np.sqrt(det_g_x), N)
f11 = np.zeros((N, N))
###
a12 = np.zeros((N, N))
b12 = np.zeros((N, N))
c12 = np.zeros((N, N))
d12 = R*llt.dy(g[1, 1]/np.sqrt(det_g_x), N)
e12 = -R*llt.dx(g[1, 1]/np.sqrt(det_g_x), N)
f12 = np.zeros((N, N))
###
a22 = a11
b22 = b11
c22 = c11
d22 = -R*llt.dy(g[1, 0]/np.sqrt(det_g_x), N)
e22 = R*llt.dx(g[1, 0]/np.sqrt(det_g_x), N)
f22 = np.zeros((N, N))
###
a21 = np.zeros((N, N))
b21 = np.zeros((N, N))
c21 = np.zeros((N, N))
d21 = -R*llt.dy(g[0, 0]/np.sqrt(det_g_x), N)
e21 = R*llt.dx(g[0, 0]/np.sqrt(det_g_x), N)
f21 = np.zeros((N, N))
###
coeff1 = [a11, b11, c11, d11, e11, f11, a12, b12, c12, d12, e12, f12]
coeff2 = [a21, b21, c21, d21, e21, f21, a22, b22, c22, d22, e22, f22]
coeff = np.array([coeff1, coeff2])
return coeff
def winslow_coeff(current, metrics=diagonal_metrics):
N, M = current[0].shape
u, v = current
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_x, v_x = llt.dx(u, N), llt.dx(v, N)
u_y, v_y = llt.dy(u, N), llt.dy(v, N)
g = metrics(current)
det_g_x = g[0, 0]*g[1, 1] - g[0, 1]*g[1, 0]
J_xi = u_x*v_y - u_y*v_x
R = np.sqrt(det_g_x)*J_xi**2
###
a11 = g[0, 0]*u_x**2 + g[1, 1]*v_x**2 + 2*g[0, 1]*u_x*v_x
b11 = g[0, 0]*u_y**2 + g[1, 1]*v_y**2 + 2*g[0, 1]*u_y*v_y
c11 = -2*(g[0, 0]*u_x*u_y + g[1, 1]*v_x*v_y + g[0, 1]*(v_x*u_y + u_x*v_y))
d11 = R*llt.dy(g[0, 1]/det_g_x, N)
e11 = -R*llt.dx(g[0, 1]/det_g_x, N)
f11 = np.zeros((N, N))
###
a12 = np.zeros((N, N))
b12 = np.zeros((N, N))
c12 = np.zeros((N, N))
d12 = R*llt.dy(g[1, 1]/det_g_x, N)
e12 = -R*llt.dx(g[1, 1]/det_g_x, N)
f12 = np.zeros((N, N))
###
a22 = a11
b22 = b11
c22 = c11
d22 = -R*llt.dy(g[1, 0]/det_g_x, N)
e22 = R*llt.dx(g[1, 0]/det_g_x, N)
f22 = np.zeros((N, N))
###
a21 = np.zeros((N, N))
b21 = np.zeros((N, N))
c21 = np.zeros((N, N))
d21 = -R*llt.dy(g[0, 0]/det_g_x, N)
e21 = R*llt.dx(g[0, 0]/det_g_x, N)
f21 = np.zeros((N, N))
###
coeff1 = [a11, b11, c11, d11, e11, f11, a12, b12, c12, d12, e12, f12]
coeff2 = [a21, b21, c21, d21, e21, f21, a22, b22, c22, d22, e22, f22]
coeff = np.array([coeff1, coeff2])
return coeff
def advection_harmonic_rhs(current, metrics=diagonal_metrics):
rhs = np.zeros_like(current)
N, M = current[0].shape
u, v = current
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_x, v_x = llt.dx(u, N), llt.dx(v, N)
u_y, v_y = llt.dy(u, N), llt.dy(v, N)
g = metrics(N)
det_g_x = g[0, 0]*g[1, 1] - g[0, 1]*g[1, 0]
J_xi = u_x*v_y - u_y*v_x
R = J_xi**2
rhs[0] = -R*llt.dy(g[0, 1]/np.sqrt(det_g_x), N)*u_x + R*llt.dx(g[0, 1]/np.sqrt(det_g_x), N)*u_y
- R*llt.dy(g[1, 1]/np.sqrt(det_g_x), N)*v_x + R*llt.dx(g[1, 1]/np.sqrt(det_g_x), N)*v_y
rhs[1] = R*llt.dy(g[1, 0]/np.sqrt(det_g_x), N)*v_x - R*llt.dx(g[1, 0]/np.sqrt(det_g_x), N)*v_y
+ R*llt.dy(g[0, 0]/np.sqrt(det_g_x), N)*u_x - R*llt.dx(g[0, 0]/np.sqrt(det_g_x), N)*u_y
return rhs
def advection_free_harmonic_coeff(current, metrics=diagonal_metrics):
N, M = current[0].shape
u, v = current
z = np.linspace(0, 1, N)
x, y = np.meshgrid(z, z, indexing='ij')
u_x, v_x = llt.dx(u, N), llt.dx(v, N)
u_y, v_y = llt.dy(u, N), llt.dy(v, N)
g = metrics(N)
det_g_x = g[0, 0]*g[1, 1] - g[0, 1]*g[1, 0]
J_xi = u_x*v_y - u_y*v_x
R = J_xi**2
###
a11 = g[0, 0]*u_y**2 + g[1, 1]*v_y**2 + 2*g[0, 1]*u_y*v_y
b11 = g[0, 0]*u_x**2 + g[1, 1]*v_x**2 + 2*g[0, 1]*u_x*v_x
c11 = -2*(g[0, 0]*u_x*u_y + g[1, 1]*v_x*v_y + g[0, 1]*(v_x*u_y + u_x*v_y))
d11 = np.zeros((N, N))
e11 = np.zeros((N, N))
f11 = np.zeros((N, N))
###
a12 = np.zeros((N, N))
b12 = np.zeros((N, N))
c12 = np.zeros((N, N))
d12 = np.zeros((N, N))
e12 = np.zeros((N, N))
f12 = np.zeros((N, N))
###
a22 = a11
b22 = b11
c22 = c11
d22 = np.zeros((N, N))
e22 = np.zeros((N, N))
f22 = np.zeros((N, N))
###
a21 = np.zeros((N, N))
b21 = np.zeros((N, N))
c21 = np.zeros((N, N))
d21 = np.zeros((N, N))
e21 = np.zeros((N, N))
f21 = np.zeros((N, N))
###
coeff1 = [a11, b11, c11, d11, e11, f11, a12, b12, c12, d12, e12, f12]
coeff2 = [a21, b21, c21, d21, e21, f21, a22, b22, c22, d22, e22, f22]
coeff = np.array([coeff1, coeff2])
return coeff
def basic_mixed_harmonic_coeff(current):
N, M = current[0].shape
u, v = current
u_x, v_x = llt.dx(u, N), llt.dx(v, N)
u_y, v_y = llt.dy(u, N), llt.dy(v, N)
u_x_f, v_x_f = llt.dx_forward(u, N), llt.dx_forward(v, N)
u_y_f, v_y_f = llt.dy_forward(u, N), llt.dy_forward(v, N)
u_x_b, v_x_b = llt.dx_backward(u, N), llt.dx_backward(v, N)
u_y_b, v_y_b = llt.dy_backward(u, N), llt.dy_backward(v, N)
###
a11 = u_x_f * u_x_b + v_x_f * v_x_b
b11 = u_y_f * u_y_b + v_y_f * v_y_b
c11 = -2 * (u_x * u_y + v_x * v_y)
d11 = np.zeros((N, N))
e11 = np.zeros((N, N))
f11 = np.zeros((N, N))
###
a12 = np.zeros((N, N))
b12 = np.zeros((N, N))
c12 = np.zeros((N, N))
d12 = np.zeros((N, N))
e12 = np.zeros((N, N))
f12 = np.zeros((N, N))
###
a22 = u_x_f * u_x_b + v_x_f * v_x_b
b22 = u_y_f * u_y_b + v_y_f * v_y_b
c22 = -2 * (u_x * u_y + v_x * v_y)
d22 = np.zeros((N, N))
e22 = np.zeros((N, N))
f22 = np.zeros((N, N))
###
a21 = np.zeros((N, N))
b21 = np.zeros((N, N))
c21 = np.zeros((N, N))
d21 = np.zeros((N, N))
e21 = np.zeros((N, N))
f21 = np.zeros((N, N))
###
coeff1 = [a11, b11, c11, d11, e11, f11, a12, b12, c12, d12, e12, f12]
coeff2 = [a21, b21, c21, d21, e21, f21, a22, b22, c22, d22, e22, f22]
coeff = np.array([coeff1, coeff2])
return coeff
def basic_fair_newton_harmonic_linear_coeff(current):
N, M = current[0].shape
u, v = current
u_x, v_x = llt.dx(u, N), llt.dx(v, N)
u_y, v_y = llt.dy(u, N), llt.dy(v, N)
u_xx, v_xx = llt.d2x(u, N), llt.d2x(v, N)
u_yy, v_yy = llt.d2y(u, N), llt.d2y(v, N)
u_xy, v_xy = llt.dxdy(u, N), llt.dxdy(v, N)
u0, v0 = np.zeros_like(u), np.zeros_like(v)
###
a11 = u_x ** 2 + v_x ** 2
b11 = u_y ** 2 + v_y ** 2
c11 = -2 * (u_x * u_y + v_x * v_y)
d11 = 2 * (u_yy * u_x - u_xy * u_y)
e11 = 2 * (u_xx * u_y - u_xy * u_x)
f11 = np.zeros((N, N))
###
a12 = np.zeros((N, N))
b12 = np.zeros((N, N))
c12 = np.zeros((N, N))
d12 = 2 * (u_yy * v_x - u_xy * v_y)
e12 = 2 * (u_xx * v_y - u_xy * v_x)
f12 = np.zeros((N, N))
###
a22 = u_x ** 2 + v_x ** 2
b22 = u_y ** 2 + v_y ** 2
c22 = -2 * (u_x * u_y + v_x * v_y)
d22 = 2 * (v_yy * v_x - v_xy * v_y)
e22 = 2 * (v_xx * v_y - v_xy * v_x)
f22 = np.zeros((N, N))
###
a21 = np.zeros((N, N))
b21 = np.zeros((N, N))
c21 = np.zeros((N, N))
d21 = 2 * (v_yy * u_x - v_xy * u_y)
e21 = 2 * (v_xx * u_y - v_xy * u_x)
f21 = np.zeros((N, N))
###
coeff1 = [a11, b11, c11, d11, e11, f11, a12, b12, c12, d12, e12, f12]
coeff2 = [a21, b21, c21, d21, e21, f21, a22, b22, c22, d22, e22, f22]
coeff = np.array([coeff1, coeff2])
return coeff
################################################################################
def get_harmonic(name):
global res
if name == '<NAME>':
res = harmonic_equation(basic_harmonic_coeff, trivial_harmonic_rhs, 2, bc=trivial_harmonic_bc,
l_coeff=basic_fair_newton_harmonic_linear_coeff)
if name == 'Frozen Metric':
res = harmonic_equation(basic_harmonic_coeff, trivial_harmonic_rhs, 2, bc=trivial_harmonic_bc)
if name == 'Harmonic Frozen Metric':
res = harmonic_equation(harmonic_coeff, trivial_harmonic_rhs, 2, bc=trivial_harmonic_bc)
if name == 'Winslow Frozen Metric':
res = harmonic_equation(winslow_coeff, trivial_harmonic_rhs, 2, bc=trivial_harmonic_bc)
if name == 'Harmonic with Frozen Metric':
res = harmonic_equation(harmonic_coeff, trivial_harmonic_rhs, 2, bc=trivial_harmonic_bc)
if name == 'Upwind + Downwind Frozen Metric':
res = harmonic_equation(basic_mixed_harmonic_coeff, trivial_harmonic_rhs, 2, bc=trivial_harmonic_bc)
return res
| [
"numpy.sqrt",
"numpy.log",
"numpy.sinh",
"numpy.array",
"low_level_tools.dx_forward",
"numpy.sin",
"harmonic_equation.harmonic_equation",
"low_level_tools.d2y",
"low_level_tools.dy_forward",
"low_level_tools.dxdy",
"numpy.exp",
"numpy.linspace",
"low_level_tools.dx",
"numpy.meshgrid",
"n... | [((269, 289), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (280, 289), True, 'import numpy as np\n'), ((301, 333), 'numpy.meshgrid', 'np.meshgrid', (['z', 'z'], {'indexing': '"""ij"""'}), "(z, z, indexing='ij')\n", (312, 333), True, 'import numpy as np\n'), ((348, 361), 'numpy.exp', 'np.exp', (['(x * y)'], {}), '(x * y)\n', (354, 361), True, 'import numpy as np\n'), ((597, 617), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (608, 617), True, 'import numpy as np\n'), ((629, 661), 'numpy.meshgrid', 'np.meshgrid', (['z', 'z'], {'indexing': '"""ij"""'}), "(z, z, indexing='ij')\n", (640, 661), True, 'import numpy as np\n'), ((676, 689), 'numpy.exp', 'np.exp', (['(x * y)'], {}), '(x * y)\n', (682, 689), True, 'import numpy as np\n'), ((701, 720), 'numpy.array', 'np.array', (['[u_exact]'], {}), '([u_exact])\n', (709, 720), True, 'import numpy as np\n'), ((783, 803), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (794, 803), True, 'import numpy as np\n'), ((815, 847), 'numpy.meshgrid', 'np.meshgrid', (['z', 'z'], {'indexing': '"""ij"""'}), "(z, z, indexing='ij')\n", (826, 847), True, 'import numpy as np\n'), ((1142, 1159), 'numpy.array', 'np.array', (['[u_rhs]'], {}), '([u_rhs])\n', (1150, 1159), True, 'import numpy as np\n'), ((1239, 1259), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (1250, 1259), True, 'import numpy as np\n'), ((1271, 1303), 'numpy.meshgrid', 'np.meshgrid', (['z', 'z'], {'indexing': '"""ij"""'}), "(z, z, indexing='ij')\n", (1282, 1303), True, 'import numpy as np\n'), ((1382, 1396), 'numpy.exp', 'np.exp', (['(-x * y)'], {}), '(-x * y)\n', (1388, 1396), True, 'import numpy as np\n'), ((1407, 1416), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (1413, 1416), True, 'import numpy as np\n'), ((1427, 1436), 'numpy.exp', 'np.exp', (['y'], {}), '(y)\n', (1433, 1436), True, 'import numpy as np\n'), ((1447, 1468), 'numpy.sin', 'np.sin', (['(np.pi * x * y)'], {}), '(np.pi * x * y)\n', (1453, 1468), True, 'import numpy as np\n'), ((1533, 1551), 'numpy.array', 'np.array', (['[coeff1]'], {}), '([coeff1])\n', (1541, 1551), True, 'import numpy as np\n'), ((1717, 1737), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (1728, 1737), True, 'import numpy as np\n'), ((1749, 1781), 'numpy.meshgrid', 'np.meshgrid', (['z', 'z'], {'indexing': '"""ij"""'}), "(z, z, indexing='ij')\n", (1760, 1781), True, 'import numpy as np\n'), ((1796, 1809), 'numpy.exp', 'np.exp', (['(x * y)'], {}), '(x * y)\n', (1802, 1809), True, 'import numpy as np\n'), ((2050, 2070), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (2061, 2070), True, 'import numpy as np\n'), ((2082, 2114), 'numpy.meshgrid', 'np.meshgrid', (['z', 'z'], {'indexing': '"""ij"""'}), "(z, z, indexing='ij')\n", (2093, 2114), True, 'import numpy as np\n'), ((2129, 2142), 'numpy.exp', 'np.exp', (['(x * y)'], {}), '(x * y)\n', (2135, 2142), True, 'import numpy as np\n'), ((2154, 2173), 'numpy.array', 'np.array', (['[u_exact]'], {}), '([u_exact])\n', (2162, 2173), True, 'import numpy as np\n'), ((2246, 2266), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (2257, 2266), True, 'import numpy as np\n'), ((2278, 2310), 'numpy.meshgrid', 'np.meshgrid', (['z', 'z'], {'indexing': '"""ij"""'}), "(z, z, indexing='ij')\n", (2289, 2310), True, 'import numpy as np\n'), ((2453, 2470), 'numpy.array', 'np.array', (['[u_rhs]'], {}), '([u_rhs])\n', (2461, 2470), True, 'import numpy as np\n'), ((2560, 2580), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (2571, 2580), True, 'import numpy as np\n'), ((2592, 2624), 'numpy.meshgrid', 'np.meshgrid', (['z', 'z'], {'indexing': '"""ij"""'}), "(z, z, indexing='ij')\n", (2603, 2624), True, 'import numpy as np\n'), ((2643, 2658), 'numpy.ones', 'np.ones', (['(N, N)'], {}), '((N, N))\n', (2650, 2658), True, 'import numpy as np\n'), ((2669, 2684), 'numpy.ones', 'np.ones', (['(N, N)'], {}), '((N, N))\n', (2676, 2684), True, 'import numpy as np\n'), ((2695, 2711), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (2703, 2711), True, 'import numpy as np\n'), ((2750, 2766), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (2758, 2766), True, 'import numpy as np\n'), ((2777, 2793), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (2785, 2793), True, 'import numpy as np\n'), ((2858, 2876), 'numpy.array', 'np.array', (['[coeff1]'], {}), '([coeff1])\n', (2866, 2876), True, 'import numpy as np\n'), ((3037, 3057), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (3048, 3057), True, 'import numpy as np\n'), ((3069, 3101), 'numpy.meshgrid', 'np.meshgrid', (['z', 'z'], {'indexing': '"""ij"""'}), "(z, z, indexing='ij')\n", (3080, 3101), True, 'import numpy as np\n'), ((3393, 3413), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (3404, 3413), True, 'import numpy as np\n'), ((3425, 3457), 'numpy.meshgrid', 'np.meshgrid', (['z', 'z'], {'indexing': '"""ij"""'}), "(z, z, indexing='ij')\n", (3436, 3457), True, 'import numpy as np\n'), ((3525, 3544), 'numpy.array', 'np.array', (['[u_exact]'], {}), '([u_exact])\n', (3533, 3544), True, 'import numpy as np\n'), ((3607, 3627), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (3618, 3627), True, 'import numpy as np\n'), ((3639, 3671), 'numpy.meshgrid', 'np.meshgrid', (['z', 'z'], {'indexing': '"""ij"""'}), "(z, z, indexing='ij')\n", (3650, 3671), True, 'import numpy as np\n'), ((4241, 4258), 'numpy.array', 'np.array', (['[u_rhs]'], {}), '([u_rhs])\n', (4249, 4258), True, 'import numpy as np\n'), ((4338, 4358), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (4349, 4358), True, 'import numpy as np\n'), ((4370, 4402), 'numpy.meshgrid', 'np.meshgrid', (['z', 'z'], {'indexing': '"""ij"""'}), "(z, z, indexing='ij')\n", (4381, 4402), True, 'import numpy as np\n'), ((4490, 4503), 'numpy.exp', 'np.exp', (['(y * x)'], {}), '(y * x)\n', (4496, 4503), True, 'import numpy as np\n'), ((4562, 4580), 'numpy.sinh', 'np.sinh', (['(x + 3 * y)'], {}), '(x + 3 * y)\n', (4569, 4580), True, 'import numpy as np\n'), ((4645, 4663), 'numpy.array', 'np.array', (['[coeff1]'], {}), '([coeff1])\n', (4653, 4663), True, 'import numpy as np\n'), ((4824, 4844), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (4835, 4844), True, 'import numpy as np\n'), ((4856, 4888), 'numpy.meshgrid', 'np.meshgrid', (['z', 'z'], {'indexing': '"""ij"""'}), "(z, z, indexing='ij')\n", (4867, 4888), True, 'import numpy as np\n'), ((4903, 4916), 'numpy.exp', 'np.exp', (['(x + y)'], {}), '(x + y)\n', (4909, 4916), True, 'import numpy as np\n'), ((5152, 5172), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (5163, 5172), True, 'import numpy as np\n'), ((5184, 5216), 'numpy.meshgrid', 'np.meshgrid', (['z', 'z'], {'indexing': '"""ij"""'}), "(z, z, indexing='ij')\n", (5195, 5216), True, 'import numpy as np\n'), ((5231, 5244), 'numpy.exp', 'np.exp', (['(x + y)'], {}), '(x + y)\n', (5237, 5244), True, 'import numpy as np\n'), ((5256, 5275), 'numpy.array', 'np.array', (['[u_exact]'], {}), '([u_exact])\n', (5264, 5275), True, 'import numpy as np\n'), ((5338, 5358), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (5349, 5358), True, 'import numpy as np\n'), ((5370, 5402), 'numpy.meshgrid', 'np.meshgrid', (['z', 'z'], {'indexing': '"""ij"""'}), "(z, z, indexing='ij')\n", (5381, 5402), True, 'import numpy as np\n'), ((5676, 5693), 'numpy.array', 'np.array', (['[u_rhs]'], {}), '([u_rhs])\n', (5684, 5693), True, 'import numpy as np\n'), ((5773, 5793), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (5784, 5793), True, 'import numpy as np\n'), ((5805, 5837), 'numpy.meshgrid', 'np.meshgrid', (['z', 'z'], {'indexing': '"""ij"""'}), "(z, z, indexing='ij')\n", (5816, 5837), True, 'import numpy as np\n'), ((5932, 5946), 'numpy.exp', 'np.exp', (['(-x * y)'], {}), '(-x * y)\n', (5938, 5946), True, 'import numpy as np\n'), ((5957, 5966), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (5963, 5966), True, 'import numpy as np\n'), ((5977, 5986), 'numpy.exp', 'np.exp', (['y'], {}), '(y)\n', (5983, 5986), True, 'import numpy as np\n'), ((6087, 6105), 'numpy.array', 'np.array', (['[coeff1]'], {}), '([coeff1])\n', (6095, 6105), True, 'import numpy as np\n'), ((6266, 6286), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (6277, 6286), True, 'import numpy as np\n'), ((6298, 6330), 'numpy.meshgrid', 'np.meshgrid', (['z', 'z'], {'indexing': '"""ij"""'}), "(z, z, indexing='ij')\n", (6309, 6330), True, 'import numpy as np\n'), ((6594, 6614), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (6605, 6614), True, 'import numpy as np\n'), ((6626, 6658), 'numpy.meshgrid', 'np.meshgrid', (['z', 'z'], {'indexing': '"""ij"""'}), "(z, z, indexing='ij')\n", (6637, 6658), True, 'import numpy as np\n'), ((6698, 6717), 'numpy.array', 'np.array', (['[u_exact]'], {}), '([u_exact])\n', (6706, 6717), True, 'import numpy as np\n'), ((6780, 6800), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (6791, 6800), True, 'import numpy as np\n'), ((6812, 6844), 'numpy.meshgrid', 'np.meshgrid', (['z', 'z'], {'indexing': '"""ij"""'}), "(z, z, indexing='ij')\n", (6823, 6844), True, 'import numpy as np\n'), ((7135, 7152), 'numpy.array', 'np.array', (['[u_rhs]'], {}), '([u_rhs])\n', (7143, 7152), True, 'import numpy as np\n'), ((7232, 7252), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (7243, 7252), True, 'import numpy as np\n'), ((7264, 7296), 'numpy.meshgrid', 'np.meshgrid', (['z', 'z'], {'indexing': '"""ij"""'}), "(z, z, indexing='ij')\n", (7275, 7296), True, 'import numpy as np\n'), ((7556, 7574), 'numpy.array', 'np.array', (['[coeff1]'], {}), '([coeff1])\n', (7564, 7574), True, 'import numpy as np\n'), ((7735, 7755), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (7746, 7755), True, 'import numpy as np\n'), ((7767, 7799), 'numpy.meshgrid', 'np.meshgrid', (['z', 'z'], {'indexing': '"""ij"""'}), "(z, z, indexing='ij')\n", (7778, 7799), True, 'import numpy as np\n'), ((7814, 7827), 'numpy.exp', 'np.exp', (['(x + y)'], {}), '(x + y)\n', (7820, 7827), True, 'import numpy as np\n'), ((8063, 8083), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (8074, 8083), True, 'import numpy as np\n'), ((8095, 8127), 'numpy.meshgrid', 'np.meshgrid', (['z', 'z'], {'indexing': '"""ij"""'}), "(z, z, indexing='ij')\n", (8106, 8127), True, 'import numpy as np\n'), ((8142, 8155), 'numpy.exp', 'np.exp', (['(x + y)'], {}), '(x + y)\n', (8148, 8155), True, 'import numpy as np\n'), ((8167, 8186), 'numpy.array', 'np.array', (['[u_exact]'], {}), '([u_exact])\n', (8175, 8186), True, 'import numpy as np\n'), ((8249, 8269), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (8260, 8269), True, 'import numpy as np\n'), ((8281, 8313), 'numpy.meshgrid', 'np.meshgrid', (['z', 'z'], {'indexing': '"""ij"""'}), "(z, z, indexing='ij')\n", (8292, 8313), True, 'import numpy as np\n'), ((8566, 8583), 'numpy.array', 'np.array', (['[u_rhs]'], {}), '([u_rhs])\n', (8574, 8583), True, 'import numpy as np\n'), ((8663, 8683), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (8674, 8683), True, 'import numpy as np\n'), ((8695, 8727), 'numpy.meshgrid', 'np.meshgrid', (['z', 'z'], {'indexing': '"""ij"""'}), "(z, z, indexing='ij')\n", (8706, 8727), True, 'import numpy as np\n'), ((8823, 8839), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (8831, 8839), True, 'import numpy as np\n'), ((8850, 8877), 'numpy.cos', 'np.cos', (['(np.pi * (x + 2 * y))'], {}), '(np.pi * (x + 2 * y))\n', (8856, 8877), True, 'import numpy as np\n'), ((8884, 8911), 'numpy.sin', 'np.sin', (['(np.pi * (y + 2 * x))'], {}), '(np.pi * (y + 2 * x))\n', (8890, 8911), True, 'import numpy as np\n'), ((8918, 8934), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (8926, 8934), True, 'import numpy as np\n'), ((8999, 9017), 'numpy.array', 'np.array', (['[coeff1]'], {}), '([coeff1])\n', (9007, 9017), True, 'import numpy as np\n'), ((9177, 9197), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (9188, 9197), True, 'import numpy as np\n'), ((9209, 9241), 'numpy.meshgrid', 'np.meshgrid', (['z', 'z'], {'indexing': '"""ij"""'}), "(z, z, indexing='ij')\n", (9220, 9241), True, 'import numpy as np\n'), ((9256, 9269), 'numpy.exp', 'np.exp', (['(x * y)'], {}), '(x * y)\n', (9262, 9269), True, 'import numpy as np\n'), ((9284, 9301), 'numpy.exp', 'np.exp', (['(2 * x * y)'], {}), '(2 * x * y)\n', (9290, 9301), True, 'import numpy as np\n'), ((9713, 9733), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (9724, 9733), True, 'import numpy as np\n'), ((9745, 9777), 'numpy.meshgrid', 'np.meshgrid', (['z', 'z'], {'indexing': '"""ij"""'}), "(z, z, indexing='ij')\n", (9756, 9777), True, 'import numpy as np\n'), ((9792, 9805), 'numpy.exp', 'np.exp', (['(x * y)'], {}), '(x * y)\n', (9798, 9805), True, 'import numpy as np\n'), ((9820, 9837), 'numpy.exp', 'np.exp', (['(2 * x * y)'], {}), '(2 * x * y)\n', (9826, 9837), True, 'import numpy as np\n'), ((9850, 9878), 'numpy.array', 'np.array', (['[u_exact, v_exact]'], {}), '([u_exact, v_exact])\n', (9858, 9878), True, 'import numpy as np\n'), ((9958, 9978), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (9969, 9978), True, 'import numpy as np\n'), ((9990, 10022), 'numpy.meshgrid', 'np.meshgrid', (['z', 'z'], {'indexing': '"""ij"""'}), "(z, z, indexing='ij')\n", (10001, 10022), True, 'import numpy as np\n'), ((10566, 10590), 'numpy.array', 'np.array', (['[u_rhs, v_rhs]'], {}), '([u_rhs, v_rhs])\n', (10574, 10590), True, 'import numpy as np\n'), ((10670, 10690), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (10681, 10690), True, 'import numpy as np\n'), ((10702, 10734), 'numpy.meshgrid', 'np.meshgrid', (['z', 'z'], {'indexing': '"""ij"""'}), "(z, z, indexing='ij')\n", (10713, 10734), True, 'import numpy as np\n'), ((10825, 10846), 'numpy.cos', 'np.cos', (['(np.pi * x * y)'], {}), '(np.pi * x * y)\n', (10831, 10846), True, 'import numpy as np\n'), ((10913, 10929), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (10921, 10929), True, 'import numpy as np\n'), ((10948, 10964), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (10956, 10964), True, 'import numpy as np\n'), ((10975, 10991), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (10983, 10991), True, 'import numpy as np\n'), ((11002, 11018), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (11010, 11018), True, 'import numpy as np\n'), ((11029, 11045), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (11037, 11045), True, 'import numpy as np\n'), ((11056, 11072), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (11064, 11072), True, 'import numpy as np\n'), ((11274, 11295), 'numpy.sin', 'np.sin', (['(np.pi * x * y)'], {}), '(np.pi * x * y)\n', (11280, 11295), True, 'import numpy as np\n'), ((11350, 11366), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (11358, 11366), True, 'import numpy as np\n'), ((11385, 11401), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (11393, 11401), True, 'import numpy as np\n'), ((11412, 11428), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (11420, 11428), True, 'import numpy as np\n'), ((11439, 11455), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (11447, 11455), True, 'import numpy as np\n'), ((11466, 11482), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (11474, 11482), True, 'import numpy as np\n'), ((11493, 11509), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (11501, 11509), True, 'import numpy as np\n'), ((11520, 11535), 'numpy.ones', 'np.ones', (['(N, N)'], {}), '((N, N))\n', (11527, 11535), True, 'import numpy as np\n'), ((11704, 11730), 'numpy.array', 'np.array', (['[coeff1, coeff2]'], {}), '([coeff1, coeff2])\n', (11712, 11730), True, 'import numpy as np\n'), ((11891, 11911), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (11902, 11911), True, 'import numpy as np\n'), ((11923, 11955), 'numpy.meshgrid', 'np.meshgrid', (['z', 'z'], {'indexing': '"""ij"""'}), "(z, z, indexing='ij')\n", (11934, 11955), True, 'import numpy as np\n'), ((11970, 11983), 'numpy.exp', 'np.exp', (['(x * y)'], {}), '(x * y)\n', (11976, 11983), True, 'import numpy as np\n'), ((11998, 12011), 'numpy.exp', 'np.exp', (['(x + y)'], {}), '(x + y)\n', (12004, 12011), True, 'import numpy as np\n'), ((12423, 12443), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (12434, 12443), True, 'import numpy as np\n'), ((12455, 12487), 'numpy.meshgrid', 'np.meshgrid', (['z', 'z'], {'indexing': '"""ij"""'}), "(z, z, indexing='ij')\n", (12466, 12487), True, 'import numpy as np\n'), ((12502, 12515), 'numpy.exp', 'np.exp', (['(x * y)'], {}), '(x * y)\n', (12508, 12515), True, 'import numpy as np\n'), ((12530, 12543), 'numpy.exp', 'np.exp', (['(x + y)'], {}), '(x + y)\n', (12536, 12543), True, 'import numpy as np\n'), ((12556, 12584), 'numpy.array', 'np.array', (['[u_exact, v_exact]'], {}), '([u_exact, v_exact])\n', (12564, 12584), True, 'import numpy as np\n'), ((12664, 12684), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (12675, 12684), True, 'import numpy as np\n'), ((12696, 12728), 'numpy.meshgrid', 'np.meshgrid', (['z', 'z'], {'indexing': '"""ij"""'}), "(z, z, indexing='ij')\n", (12707, 12728), True, 'import numpy as np\n'), ((13664, 13688), 'numpy.array', 'np.array', (['[u_rhs, v_rhs]'], {}), '([u_rhs, v_rhs])\n', (13672, 13688), True, 'import numpy as np\n'), ((13768, 13788), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (13779, 13788), True, 'import numpy as np\n'), ((13800, 13832), 'numpy.meshgrid', 'np.meshgrid', (['z', 'z'], {'indexing': '"""ij"""'}), "(z, z, indexing='ij')\n", (13811, 13832), True, 'import numpy as np\n'), ((13911, 13925), 'numpy.exp', 'np.exp', (['(-x * y)'], {}), '(-x * y)\n', (13917, 13925), True, 'import numpy as np\n'), ((13936, 13945), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (13942, 13945), True, 'import numpy as np\n'), ((13956, 13965), 'numpy.exp', 'np.exp', (['y'], {}), '(y)\n', (13962, 13965), True, 'import numpy as np\n'), ((13976, 13997), 'numpy.sin', 'np.sin', (['(np.pi * x * y)'], {}), '(np.pi * x * y)\n', (13982, 13997), True, 'import numpy as np\n'), ((14092, 14106), 'numpy.exp', 'np.exp', (['(-x * y)'], {}), '(-x * y)\n', (14098, 14106), True, 'import numpy as np\n'), ((14117, 14126), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (14123, 14126), True, 'import numpy as np\n'), ((14137, 14146), 'numpy.exp', 'np.exp', (['y'], {}), '(y)\n', (14143, 14146), True, 'import numpy as np\n'), ((14297, 14314), 'numpy.log', 'np.log', (['(1 + x * y)'], {}), '(1 + x * y)\n', (14303, 14314), True, 'import numpy as np\n'), ((14482, 14503), 'numpy.cos', 'np.cos', (['(np.pi * x * y)'], {}), '(np.pi * x * y)\n', (14488, 14503), True, 'import numpy as np\n'), ((14570, 14586), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (14578, 14586), True, 'import numpy as np\n'), ((14755, 14781), 'numpy.array', 'np.array', (['[coeff1, coeff2]'], {}), '([coeff1, coeff2])\n', (14763, 14781), True, 'import numpy as np\n'), ((16978, 16998), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (16989, 16998), True, 'import numpy as np\n'), ((17010, 17042), 'numpy.meshgrid', 'np.meshgrid', (['z', 'z'], {'indexing': '"""ij"""'}), "(z, z, indexing='ij')\n", (17021, 17042), True, 'import numpy as np\n'), ((17057, 17071), 'numpy.exp', 'np.exp', (['(-x * y)'], {}), '(-x * y)\n', (17063, 17071), True, 'import numpy as np\n'), ((17086, 17104), 'numpy.exp', 'np.exp', (['(-2 * x * y)'], {}), '(-2 * x * y)\n', (17092, 17104), True, 'import numpy as np\n'), ((17518, 17538), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (17529, 17538), True, 'import numpy as np\n'), ((17550, 17582), 'numpy.meshgrid', 'np.meshgrid', (['z', 'z'], {'indexing': '"""ij"""'}), "(z, z, indexing='ij')\n", (17561, 17582), True, 'import numpy as np\n'), ((17597, 17611), 'numpy.exp', 'np.exp', (['(-x * y)'], {}), '(-x * y)\n', (17603, 17611), True, 'import numpy as np\n'), ((17626, 17644), 'numpy.exp', 'np.exp', (['(-2 * x * y)'], {}), '(-2 * x * y)\n', (17632, 17644), True, 'import numpy as np\n'), ((17657, 17685), 'numpy.array', 'np.array', (['[u_exact, v_exact]'], {}), '([u_exact, v_exact])\n', (17665, 17685), True, 'import numpy as np\n'), ((17767, 17787), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (17778, 17787), True, 'import numpy as np\n'), ((17799, 17831), 'numpy.meshgrid', 'np.meshgrid', (['z', 'z'], {'indexing': '"""ij"""'}), "(z, z, indexing='ij')\n", (17810, 17831), True, 'import numpy as np\n'), ((18254, 18278), 'numpy.array', 'np.array', (['[u_rhs, v_rhs]'], {}), '([u_rhs, v_rhs])\n', (18262, 18278), True, 'import numpy as np\n'), ((18360, 18380), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (18371, 18380), True, 'import numpy as np\n'), ((18411, 18443), 'numpy.meshgrid', 'np.meshgrid', (['z', 'z'], {'indexing': '"""ij"""'}), "(z, z, indexing='ij')\n", (18422, 18443), True, 'import numpy as np\n'), ((18462, 18479), 'numpy.exp', 'np.exp', (['(2 * x * y)'], {}), '(2 * x * y)\n', (18468, 18479), True, 'import numpy as np\n'), ((18490, 18503), 'numpy.exp', 'np.exp', (['(x * y)'], {}), '(x * y)\n', (18496, 18503), True, 'import numpy as np\n'), ((18514, 18530), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (18522, 18530), True, 'import numpy as np\n'), ((18541, 18557), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (18549, 18557), True, 'import numpy as np\n'), ((18568, 18584), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (18576, 18584), True, 'import numpy as np\n'), ((18595, 18611), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (18603, 18611), True, 'import numpy as np\n'), ((18630, 18646), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (18638, 18646), True, 'import numpy as np\n'), ((18657, 18673), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (18665, 18673), True, 'import numpy as np\n'), ((18684, 18700), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (18692, 18700), True, 'import numpy as np\n'), ((18711, 18727), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (18719, 18727), True, 'import numpy as np\n'), ((18738, 18754), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (18746, 18754), True, 'import numpy as np\n'), ((18826, 18841), 'numpy.ones', 'np.ones', (['(N, N)'], {}), '((N, N))\n', (18833, 18841), True, 'import numpy as np\n'), ((18852, 18868), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (18860, 18868), True, 'import numpy as np\n'), ((18909, 18925), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (18917, 18925), True, 'import numpy as np\n'), ((18936, 18952), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (18944, 18952), True, 'import numpy as np\n'), ((18971, 18987), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (18979, 18987), True, 'import numpy as np\n'), ((18998, 19014), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (19006, 19014), True, 'import numpy as np\n'), ((19025, 19041), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (19033, 19041), True, 'import numpy as np\n'), ((19052, 19068), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (19060, 19068), True, 'import numpy as np\n'), ((19079, 19095), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (19087, 19095), True, 'import numpy as np\n'), ((19297, 19323), 'numpy.array', 'np.array', (['[coeff1, coeff2]'], {}), '([coeff1, coeff2])\n', (19305, 19323), True, 'import numpy as np\n'), ((19427, 19447), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (19438, 19447), True, 'import numpy as np\n'), ((19459, 19491), 'numpy.meshgrid', 'np.meshgrid', (['z', 'z'], {'indexing': '"""ij"""'}), "(z, z, indexing='ij')\n", (19470, 19491), True, 'import numpy as np\n'), ((19510, 19527), 'numpy.exp', 'np.exp', (['(2 * x * y)'], {}), '(2 * x * y)\n', (19516, 19527), True, 'import numpy as np\n'), ((19538, 19551), 'numpy.exp', 'np.exp', (['(x * y)'], {}), '(x * y)\n', (19544, 19551), True, 'import numpy as np\n'), ((19562, 19578), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (19570, 19578), True, 'import numpy as np\n'), ((19589, 19605), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (19597, 19605), True, 'import numpy as np\n'), ((19616, 19632), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (19624, 19632), True, 'import numpy as np\n'), ((19668, 19684), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (19676, 19684), True, 'import numpy as np\n'), ((19695, 19711), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (19703, 19711), True, 'import numpy as np\n'), ((19722, 19738), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (19730, 19738), True, 'import numpy as np\n'), ((19749, 19765), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (19757, 19765), True, 'import numpy as np\n'), ((19776, 19792), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (19784, 19792), True, 'import numpy as np\n'), ((19868, 19883), 'numpy.ones', 'np.ones', (['(N, N)'], {}), '((N, N))\n', (19875, 19883), True, 'import numpy as np\n'), ((19894, 19910), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (19902, 19910), True, 'import numpy as np\n'), ((19951, 19967), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (19959, 19967), True, 'import numpy as np\n'), ((20020, 20036), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (20028, 20036), True, 'import numpy as np\n'), ((20047, 20063), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (20055, 20063), True, 'import numpy as np\n'), ((20074, 20090), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (20082, 20090), True, 'import numpy as np\n'), ((20101, 20117), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (20109, 20117), True, 'import numpy as np\n'), ((20128, 20144), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (20136, 20144), True, 'import numpy as np\n'), ((20350, 20376), 'numpy.array', 'np.array', (['[coeff1, coeff2]'], {}), '([coeff1, coeff2])\n', (20358, 20376), True, 'import numpy as np\n'), ((20537, 20557), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (20548, 20557), True, 'import numpy as np\n'), ((20569, 20601), 'numpy.meshgrid', 'np.meshgrid', (['z', 'z'], {'indexing': '"""ij"""'}), "(z, z, indexing='ij')\n", (20580, 20601), True, 'import numpy as np\n'), ((20616, 20629), 'numpy.exp', 'np.exp', (['(x * y)'], {}), '(x * y)\n', (20622, 20629), True, 'import numpy as np\n'), ((20865, 20885), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (20876, 20885), True, 'import numpy as np\n'), ((20897, 20929), 'numpy.meshgrid', 'np.meshgrid', (['z', 'z'], {'indexing': '"""ij"""'}), "(z, z, indexing='ij')\n", (20908, 20929), True, 'import numpy as np\n'), ((20944, 20957), 'numpy.exp', 'np.exp', (['(x * y)'], {}), '(x * y)\n', (20950, 20957), True, 'import numpy as np\n'), ((20969, 20988), 'numpy.array', 'np.array', (['[u_exact]'], {}), '([u_exact])\n', (20977, 20988), True, 'import numpy as np\n'), ((21051, 21071), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (21062, 21071), True, 'import numpy as np\n'), ((21083, 21115), 'numpy.meshgrid', 'np.meshgrid', (['z', 'z'], {'indexing': '"""ij"""'}), "(z, z, indexing='ij')\n", (21094, 21115), True, 'import numpy as np\n'), ((21468, 21485), 'numpy.array', 'np.array', (['[u_rhs]'], {}), '([u_rhs])\n', (21476, 21485), True, 'import numpy as np\n'), ((21567, 21587), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (21578, 21587), True, 'import numpy as np\n'), ((21599, 21631), 'numpy.meshgrid', 'np.meshgrid', (['z', 'z'], {'indexing': '"""ij"""'}), "(z, z, indexing='ij')\n", (21610, 21631), True, 'import numpy as np\n'), ((21704, 21722), 'numpy.exp', 'np.exp', (['(-2 * x * y)'], {}), '(-2 * x * y)\n', (21710, 21722), True, 'import numpy as np\n'), ((21733, 21754), 'numpy.cos', 'np.cos', (['(np.pi * x * y)'], {}), '(np.pi * x * y)\n', (21739, 21754), True, 'import numpy as np\n'), ((21765, 21786), 'numpy.sin', 'np.sin', (['(np.pi * x * y)'], {}), '(np.pi * x * y)\n', (21771, 21786), True, 'import numpy as np\n'), ((21905, 21923), 'numpy.array', 'np.array', (['[coeff1]'], {}), '([coeff1])\n', (21913, 21923), True, 'import numpy as np\n'), ((22005, 22025), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (22016, 22025), True, 'import numpy as np\n'), ((22037, 22069), 'numpy.meshgrid', 'np.meshgrid', (['z', 'z'], {'indexing': '"""ij"""'}), "(z, z, indexing='ij')\n", (22048, 22069), True, 'import numpy as np\n'), ((22142, 22160), 'numpy.exp', 'np.exp', (['(-2 * x * y)'], {}), '(-2 * x * y)\n', (22148, 22160), True, 'import numpy as np\n'), ((22171, 22192), 'numpy.cos', 'np.cos', (['(np.pi * x * y)'], {}), '(np.pi * x * y)\n', (22177, 22192), True, 'import numpy as np\n'), ((22203, 22224), 'numpy.sin', 'np.sin', (['(np.pi * x * y)'], {}), '(np.pi * x * y)\n', (22209, 22224), True, 'import numpy as np\n'), ((22235, 22253), 'numpy.sinh', 'np.sinh', (['(2 * x * y)'], {}), '(2 * x * y)\n', (22242, 22253), True, 'import numpy as np\n'), ((22318, 22336), 'numpy.array', 'np.array', (['[coeff1]'], {}), '([coeff1])\n', (22326, 22336), True, 'import numpy as np\n'), ((22898, 22918), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (22909, 22918), True, 'import numpy as np\n'), ((22930, 22962), 'numpy.meshgrid', 'np.meshgrid', (['z', 'z'], {'indexing': '"""ij"""'}), "(z, z, indexing='ij')\n", (22941, 22962), True, 'import numpy as np\n'), ((23389, 23411), 'numpy.zeros_like', 'np.zeros_like', (['current'], {}), '(current)\n', (23402, 23411), True, 'import numpy as np\n'), ((23696, 23712), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (23704, 23712), True, 'import numpy as np\n'), ((23723, 23739), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (23731, 23739), True, 'import numpy as np\n'), ((23750, 23766), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (23758, 23766), True, 'import numpy as np\n'), ((23785, 23801), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (23793, 23801), True, 'import numpy as np\n'), ((23812, 23828), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (23820, 23828), True, 'import numpy as np\n'), ((23839, 23855), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (23847, 23855), True, 'import numpy as np\n'), ((23866, 23882), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (23874, 23882), True, 'import numpy as np\n'), ((23893, 23909), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (23901, 23909), True, 'import numpy as np\n'), ((23920, 23936), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (23928, 23936), True, 'import numpy as np\n'), ((24054, 24070), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (24062, 24070), True, 'import numpy as np\n'), ((24081, 24097), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (24089, 24097), True, 'import numpy as np\n'), ((24108, 24124), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (24116, 24124), True, 'import numpy as np\n'), ((24143, 24159), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (24151, 24159), True, 'import numpy as np\n'), ((24170, 24186), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (24178, 24186), True, 'import numpy as np\n'), ((24197, 24213), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (24205, 24213), True, 'import numpy as np\n'), ((24224, 24240), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (24232, 24240), True, 'import numpy as np\n'), ((24251, 24267), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (24259, 24267), True, 'import numpy as np\n'), ((24278, 24294), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (24286, 24294), True, 'import numpy as np\n'), ((24463, 24489), 'numpy.array', 'np.array', (['[coeff1, coeff2]'], {}), '([coeff1, coeff2])\n', (24471, 24489), True, 'import numpy as np\n'), ((24575, 24597), 'numpy.zeros', 'np.zeros', (['(2, 2, N, N)'], {}), '((2, 2, N, N))\n', (24583, 24597), True, 'import numpy as np\n'), ((24750, 24770), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (24761, 24770), True, 'import numpy as np\n'), ((24782, 24814), 'numpy.meshgrid', 'np.meshgrid', (['z', 'z'], {'indexing': '"""ij"""'}), "(z, z, indexing='ij')\n", (24793, 24814), True, 'import numpy as np\n'), ((25335, 25351), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (25343, 25351), True, 'import numpy as np\n'), ((25370, 25386), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (25378, 25386), True, 'import numpy as np\n'), ((25397, 25413), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (25405, 25413), True, 'import numpy as np\n'), ((25424, 25440), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (25432, 25440), True, 'import numpy as np\n'), ((25548, 25564), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (25556, 25564), True, 'import numpy as np\n'), ((25722, 25738), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (25730, 25738), True, 'import numpy as np\n'), ((25757, 25773), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (25765, 25773), True, 'import numpy as np\n'), ((25784, 25800), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (25792, 25800), True, 'import numpy as np\n'), ((25811, 25827), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (25819, 25827), True, 'import numpy as np\n'), ((25935, 25951), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (25943, 25951), True, 'import numpy as np\n'), ((26120, 26146), 'numpy.array', 'np.array', (['[coeff1, coeff2]'], {}), '([coeff1, coeff2])\n', (26128, 26146), True, 'import numpy as np\n'), ((26274, 26294), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (26285, 26294), True, 'import numpy as np\n'), ((26306, 26338), 'numpy.meshgrid', 'np.meshgrid', (['z', 'z'], {'indexing': '"""ij"""'}), "(z, z, indexing='ij')\n", (26317, 26338), True, 'import numpy as np\n'), ((26858, 26874), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (26866, 26874), True, 'import numpy as np\n'), ((26893, 26909), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (26901, 26909), True, 'import numpy as np\n'), ((26920, 26936), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (26928, 26936), True, 'import numpy as np\n'), ((26947, 26963), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (26955, 26963), True, 'import numpy as np\n'), ((27053, 27069), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (27061, 27069), True, 'import numpy as np\n'), ((27209, 27225), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (27217, 27225), True, 'import numpy as np\n'), ((27244, 27260), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (27252, 27260), True, 'import numpy as np\n'), ((27271, 27287), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (27279, 27287), True, 'import numpy as np\n'), ((27298, 27314), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (27306, 27314), True, 'import numpy as np\n'), ((27404, 27420), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (27412, 27420), True, 'import numpy as np\n'), ((27589, 27615), 'numpy.array', 'np.array', (['[coeff1, coeff2]'], {}), '([coeff1, coeff2])\n', (27597, 27615), True, 'import numpy as np\n'), ((27707, 27729), 'numpy.zeros_like', 'np.zeros_like', (['current'], {}), '(current)\n', (27720, 27729), True, 'import numpy as np\n'), ((27785, 27805), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (27796, 27805), True, 'import numpy as np\n'), ((27817, 27849), 'numpy.meshgrid', 'np.meshgrid', (['z', 'z'], {'indexing': '"""ij"""'}), "(z, z, indexing='ij')\n", (27828, 27849), True, 'import numpy as np\n'), ((28570, 28590), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (28581, 28590), True, 'import numpy as np\n'), ((28602, 28634), 'numpy.meshgrid', 'np.meshgrid', (['z', 'z'], {'indexing': '"""ij"""'}), "(z, z, indexing='ij')\n", (28613, 28634), True, 'import numpy as np\n'), ((29052, 29068), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (29060, 29068), True, 'import numpy as np\n'), ((29079, 29095), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (29087, 29095), True, 'import numpy as np\n'), ((29106, 29122), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (29114, 29122), True, 'import numpy as np\n'), ((29141, 29157), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (29149, 29157), True, 'import numpy as np\n'), ((29168, 29184), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (29176, 29184), True, 'import numpy as np\n'), ((29195, 29211), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (29203, 29211), True, 'import numpy as np\n'), ((29222, 29238), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (29230, 29238), True, 'import numpy as np\n'), ((29249, 29265), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (29257, 29265), True, 'import numpy as np\n'), ((29276, 29292), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (29284, 29292), True, 'import numpy as np\n'), ((29353, 29369), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (29361, 29369), True, 'import numpy as np\n'), ((29380, 29396), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (29388, 29396), True, 'import numpy as np\n'), ((29407, 29423), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (29415, 29423), True, 'import numpy as np\n'), ((29442, 29458), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (29450, 29458), True, 'import numpy as np\n'), ((29469, 29485), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (29477, 29485), True, 'import numpy as np\n'), ((29496, 29512), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (29504, 29512), True, 'import numpy as np\n'), ((29523, 29539), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (29531, 29539), True, 'import numpy as np\n'), ((29550, 29566), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (29558, 29566), True, 'import numpy as np\n'), ((29577, 29593), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (29585, 29593), True, 'import numpy as np\n'), ((29762, 29788), 'numpy.array', 'np.array', (['[coeff1, coeff2]'], {}), '([coeff1, coeff2])\n', (29770, 29788), True, 'import numpy as np\n'), ((30368, 30384), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (30376, 30384), True, 'import numpy as np\n'), ((30395, 30411), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (30403, 30411), True, 'import numpy as np\n'), ((30422, 30438), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (30430, 30438), True, 'import numpy as np\n'), ((30457, 30473), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (30465, 30473), True, 'import numpy as np\n'), ((30484, 30500), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (30492, 30500), True, 'import numpy as np\n'), ((30511, 30527), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (30519, 30527), True, 'import numpy as np\n'), ((30538, 30554), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (30546, 30554), True, 'import numpy as np\n'), ((30565, 30581), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (30573, 30581), True, 'import numpy as np\n'), ((30592, 30608), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (30600, 30608), True, 'import numpy as np\n'), ((30746, 30762), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (30754, 30762), True, 'import numpy as np\n'), ((30773, 30789), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (30781, 30789), True, 'import numpy as np\n'), ((30800, 30816), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (30808, 30816), True, 'import numpy as np\n'), ((30835, 30851), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (30843, 30851), True, 'import numpy as np\n'), ((30862, 30878), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (30870, 30878), True, 'import numpy as np\n'), ((30889, 30905), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (30897, 30905), True, 'import numpy as np\n'), ((30916, 30932), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (30924, 30932), True, 'import numpy as np\n'), ((30943, 30959), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (30951, 30959), True, 'import numpy as np\n'), ((30970, 30986), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (30978, 30986), True, 'import numpy as np\n'), ((31155, 31181), 'numpy.array', 'np.array', (['[coeff1, coeff2]'], {}), '([coeff1, coeff2])\n', (31163, 31181), True, 'import numpy as np\n'), ((31771, 31787), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (31779, 31787), True, 'import numpy as np\n'), ((31806, 31822), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (31814, 31822), True, 'import numpy as np\n'), ((31833, 31849), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (31841, 31849), True, 'import numpy as np\n'), ((31860, 31876), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (31868, 31876), True, 'import numpy as np\n'), ((31967, 31983), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (31975, 31983), True, 'import numpy as np\n'), ((32181, 32197), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (32189, 32197), True, 'import numpy as np\n'), ((32216, 32232), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (32224, 32232), True, 'import numpy as np\n'), ((32243, 32259), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (32251, 32259), True, 'import numpy as np\n'), ((32270, 32286), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (32278, 32286), True, 'import numpy as np\n'), ((32377, 32393), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (32385, 32393), True, 'import numpy as np\n'), ((32562, 32588), 'numpy.array', 'np.array', (['[coeff1, coeff2]'], {}), '([coeff1, coeff2])\n', (32570, 32588), True, 'import numpy as np\n'), ((1030, 1043), 'numpy.exp', 'np.exp', (['(x * y)'], {}), '(x * y)\n', (1036, 1043), True, 'import numpy as np\n'), ((1326, 1339), 'numpy.exp', 'np.exp', (['(x * y)'], {}), '(x * y)\n', (1332, 1339), True, 'import numpy as np\n'), ((1354, 1371), 'numpy.cos', 'np.cos', (['(np.pi * x)'], {}), '(np.pi * x)\n', (1360, 1371), True, 'import numpy as np\n'), ((2343, 2356), 'numpy.exp', 'np.exp', (['(x * y)'], {}), '(x * y)\n', (2349, 2356), True, 'import numpy as np\n'), ((2724, 2739), 'numpy.ones', 'np.ones', (['(N, N)'], {}), '((N, N))\n', (2731, 2739), True, 'import numpy as np\n'), ((5561, 5574), 'numpy.exp', 'np.exp', (['(x + y)'], {}), '(x + y)\n', (5567, 5574), True, 'import numpy as np\n'), ((5860, 5885), 'numpy.cos', 'np.cos', (['(2 * np.pi * x * y)'], {}), '(2 * np.pi * x * y)\n', (5866, 5885), True, 'import numpy as np\n'), ((5900, 5921), 'numpy.sin', 'np.sin', (['(np.pi * x * y)'], {}), '(np.pi * x * y)\n', (5906, 5921), True, 'import numpy as np\n'), ((5997, 6018), 'numpy.sin', 'np.sin', (['(np.pi * x * y)'], {}), '(np.pi * x * y)\n', (6003, 6018), True, 'import numpy as np\n'), ((6349, 6358), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (6355, 6358), True, 'import numpy as np\n'), ((6677, 6686), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (6683, 6686), True, 'import numpy as np\n'), ((7384, 7398), 'numpy.exp', 'np.exp', (['(-x * y)'], {}), '(-x * y)\n', (7390, 7398), True, 'import numpy as np\n'), ((7413, 7422), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (7419, 7422), True, 'import numpy as np\n'), ((7446, 7455), 'numpy.exp', 'np.exp', (['y'], {}), '(y)\n', (7452, 7455), True, 'import numpy as np\n'), ((7470, 7491), 'numpy.sin', 'np.sin', (['(np.pi * x * y)'], {}), '(np.pi * x * y)\n', (7476, 7491), True, 'import numpy as np\n'), ((8451, 8464), 'numpy.exp', 'np.exp', (['(x + y)'], {}), '(x + y)\n', (8457, 8464), True, 'import numpy as np\n'), ((8770, 8783), 'numpy.exp', 'np.exp', (['(x * y)'], {}), '(x * y)\n', (8776, 8783), True, 'import numpy as np\n'), ((8798, 8812), 'numpy.exp', 'np.exp', (['(-x * y)'], {}), '(-x * y)\n', (8804, 8812), True, 'import numpy as np\n'), ((10758, 10771), 'numpy.exp', 'np.exp', (['(x * y)'], {}), '(x * y)\n', (10764, 10771), True, 'import numpy as np\n'), ((10858, 10876), 'numpy.exp', 'np.exp', (['(-2 * x * y)'], {}), '(-2 * x * y)\n', (10864, 10876), True, 'import numpy as np\n'), ((10888, 10902), 'numpy.exp', 'np.exp', (['(-x * y)'], {}), '(-x * y)\n', (10894, 10902), True, 'import numpy as np\n'), ((11170, 11184), 'numpy.exp', 'np.exp', (['(-x * y)'], {}), '(-x * y)\n', (11176, 11184), True, 'import numpy as np\n'), ((11207, 11220), 'numpy.exp', 'np.exp', (['(x * y)'], {}), '(x * y)\n', (11213, 11220), True, 'import numpy as np\n'), ((11307, 11317), 'numpy.exp', 'np.exp', (['(-y)'], {}), '(-y)\n', (11313, 11317), True, 'import numpy as np\n'), ((11329, 11339), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (11335, 11339), True, 'import numpy as np\n'), ((13855, 13868), 'numpy.exp', 'np.exp', (['(x * y)'], {}), '(x * y)\n', (13861, 13868), True, 'import numpy as np\n'), ((13883, 13900), 'numpy.cos', 'np.cos', (['(np.pi * x)'], {}), '(np.pi * x)\n', (13889, 13900), True, 'import numpy as np\n'), ((14020, 14045), 'numpy.cos', 'np.cos', (['(2 * np.pi * x * y)'], {}), '(2 * np.pi * x * y)\n', (14026, 14045), True, 'import numpy as np\n'), ((14060, 14081), 'numpy.sin', 'np.sin', (['(np.pi * x * y)'], {}), '(np.pi * x * y)\n', (14066, 14081), True, 'import numpy as np\n'), ((14157, 14178), 'numpy.sin', 'np.sin', (['(np.pi * x * y)'], {}), '(np.pi * x * y)\n', (14163, 14178), True, 'import numpy as np\n'), ((14205, 14218), 'numpy.log', 'np.log', (['(1 + x)'], {}), '(1 + x)\n', (14211, 14218), True, 'import numpy as np\n'), ((14233, 14248), 'numpy.ones', 'np.ones', (['(N, N)'], {}), '((N, N))\n', (14240, 14248), True, 'import numpy as np\n'), ((14259, 14280), 'numpy.exp', 'np.exp', (['(2 * x * y + 3)'], {}), '(2 * x * y + 3)\n', (14265, 14280), True, 'import numpy as np\n'), ((14376, 14391), 'numpy.ones', 'np.ones', (['(N, N)'], {}), '((N, N))\n', (14383, 14391), True, 'import numpy as np\n'), ((14415, 14428), 'numpy.exp', 'np.exp', (['(x * y)'], {}), '(x * y)\n', (14421, 14428), True, 'import numpy as np\n'), ((14515, 14533), 'numpy.exp', 'np.exp', (['(-2 * x * y)'], {}), '(-2 * x * y)\n', (14521, 14533), True, 'import numpy as np\n'), ((14545, 14559), 'numpy.exp', 'np.exp', (['(-x * y)'], {}), '(-x * y)\n', (14551, 14559), True, 'import numpy as np\n'), ((17878, 17896), 'numpy.exp', 'np.exp', (['(-5 * x * y)'], {}), '(-5 * x * y)\n', (17884, 17896), True, 'import numpy as np\n'), ((18879, 18894), 'numpy.ones', 'np.ones', (['(N, N)'], {}), '((N, N))\n', (18886, 18894), True, 'import numpy as np\n'), ((19921, 19936), 'numpy.ones', 'np.ones', (['(N, N)'], {}), '((N, N))\n', (19928, 19936), True, 'import numpy as np\n'), ((19978, 19992), 'numpy.exp', 'np.exp', (['(-x * y)'], {}), '(-x * y)\n', (19984, 19992), True, 'import numpy as np\n'), ((21128, 21141), 'numpy.exp', 'np.exp', (['(x * y)'], {}), '(x * y)\n', (21134, 21141), True, 'import numpy as np\n'), ((21654, 21668), 'numpy.exp', 'np.exp', (['(-x * y)'], {}), '(-x * y)\n', (21660, 21668), True, 'import numpy as np\n'), ((21797, 21815), 'numpy.sinh', 'np.sinh', (['(2 * x * y)'], {}), '(2 * x * y)\n', (21804, 21815), True, 'import numpy as np\n'), ((21818, 21840), 'low_level_tools.d2x', 'llt.d2x', (['current[0]', 'N'], {}), '(current[0], N)\n', (21825, 21840), True, 'import low_level_tools as llt\n'), ((22092, 22106), 'numpy.exp', 'np.exp', (['(-x * y)'], {}), '(-x * y)\n', (22098, 22106), True, 'import numpy as np\n'), ((22520, 22606), 'equation.equation', 'equation', (['nleq_21_coeff', 'nleq_21_rhs', '(2)', 'nleq_21_bc', 'nleq_21_exact', 'nleq_21_lcoeff'], {}), '(nleq_21_coeff, nleq_21_rhs, 2, nleq_21_bc, nleq_21_exact,\n nleq_21_lcoeff)\n', (22528, 22606), False, 'from equation import equation\n'), ((22640, 22726), 'equation.equation', 'equation', (['nleq1_coeff', 'nleq1_rhs', '(1)', 'nleq1_bc', 'nleq1_exact'], {'l_coeff': 'nleq1_l_coeff'}), '(nleq1_coeff, nleq1_rhs, 1, nleq1_bc, nleq1_exact, l_coeff=\n nleq1_l_coeff)\n', (22648, 22726), False, 'from equation import equation\n'), ((23510, 23522), 'low_level_tools.dx', 'llt.dx', (['u', 'N'], {}), '(u, N)\n', (23516, 23522), True, 'import low_level_tools as llt\n'), ((23524, 23536), 'low_level_tools.dx', 'llt.dx', (['v', 'N'], {}), '(v, N)\n', (23530, 23536), True, 'import low_level_tools as llt\n'), ((23552, 23564), 'low_level_tools.dy', 'llt.dy', (['u', 'N'], {}), '(u, N)\n', (23558, 23564), True, 'import low_level_tools as llt\n'), ((23566, 23578), 'low_level_tools.dy', 'llt.dy', (['v', 'N'], {}), '(v, N)\n', (23572, 23578), True, 'import low_level_tools as llt\n'), ((24830, 24842), 'low_level_tools.dx', 'llt.dx', (['u', 'N'], {}), '(u, N)\n', (24836, 24842), True, 'import low_level_tools as llt\n'), ((24844, 24856), 'low_level_tools.dx', 'llt.dx', (['v', 'N'], {}), '(v, N)\n', (24850, 24856), True, 'import low_level_tools as llt\n'), ((24872, 24884), 'low_level_tools.dy', 'llt.dy', (['u', 'N'], {}), '(u, N)\n', (24878, 24884), True, 'import low_level_tools as llt\n'), ((24886, 24898), 'low_level_tools.dy', 'llt.dy', (['v', 'N'], {}), '(v, N)\n', (24892, 24898), True, 'import low_level_tools as llt\n'), ((26354, 26366), 'low_level_tools.dx', 'llt.dx', (['u', 'N'], {}), '(u, N)\n', (26360, 26366), True, 'import low_level_tools as llt\n'), ((26368, 26380), 'low_level_tools.dx', 'llt.dx', (['v', 'N'], {}), '(v, N)\n', (26374, 26380), True, 'import low_level_tools as llt\n'), ((26396, 26408), 'low_level_tools.dy', 'llt.dy', (['u', 'N'], {}), '(u, N)\n', (26402, 26408), True, 'import low_level_tools as llt\n'), ((26410, 26422), 'low_level_tools.dy', 'llt.dy', (['v', 'N'], {}), '(v, N)\n', (26416, 26422), True, 'import low_level_tools as llt\n'), ((26533, 26549), 'numpy.sqrt', 'np.sqrt', (['det_g_x'], {}), '(det_g_x)\n', (26540, 26549), True, 'import numpy as np\n'), ((26781, 26809), 'low_level_tools.dy', 'llt.dy', (['(g[0, 1] / det_g_x)', 'N'], {}), '(g[0, 1] / det_g_x, N)\n', (26787, 26809), True, 'import low_level_tools as llt\n'), ((26821, 26849), 'low_level_tools.dx', 'llt.dx', (['(g[0, 1] / det_g_x)', 'N'], {}), '(g[0, 1] / det_g_x, N)\n', (26827, 26849), True, 'import low_level_tools as llt\n'), ((26976, 27004), 'low_level_tools.dy', 'llt.dy', (['(g[1, 1] / det_g_x)', 'N'], {}), '(g[1, 1] / det_g_x, N)\n', (26982, 27004), True, 'import low_level_tools as llt\n'), ((27016, 27044), 'low_level_tools.dx', 'llt.dx', (['(g[1, 1] / det_g_x)', 'N'], {}), '(g[1, 1] / det_g_x, N)\n', (27022, 27044), True, 'import low_level_tools as llt\n'), ((27133, 27161), 'low_level_tools.dy', 'llt.dy', (['(g[1, 0] / det_g_x)', 'N'], {}), '(g[1, 0] / det_g_x, N)\n', (27139, 27161), True, 'import low_level_tools as llt\n'), ((27172, 27200), 'low_level_tools.dx', 'llt.dx', (['(g[1, 0] / det_g_x)', 'N'], {}), '(g[1, 0] / det_g_x, N)\n', (27178, 27200), True, 'import low_level_tools as llt\n'), ((27328, 27356), 'low_level_tools.dy', 'llt.dy', (['(g[0, 0] / det_g_x)', 'N'], {}), '(g[0, 0] / det_g_x, N)\n', (27334, 27356), True, 'import low_level_tools as llt\n'), ((27367, 27395), 'low_level_tools.dx', 'llt.dx', (['(g[0, 0] / det_g_x)', 'N'], {}), '(g[0, 0] / det_g_x, N)\n', (27373, 27395), True, 'import low_level_tools as llt\n'), ((27865, 27877), 'low_level_tools.dx', 'llt.dx', (['u', 'N'], {}), '(u, N)\n', (27871, 27877), True, 'import low_level_tools as llt\n'), ((27879, 27891), 'low_level_tools.dx', 'llt.dx', (['v', 'N'], {}), '(v, N)\n', (27885, 27891), True, 'import low_level_tools as llt\n'), ((27907, 27919), 'low_level_tools.dy', 'llt.dy', (['u', 'N'], {}), '(u, N)\n', (27913, 27919), True, 'import low_level_tools as llt\n'), ((27921, 27933), 'low_level_tools.dy', 'llt.dy', (['v', 'N'], {}), '(v, N)\n', (27927, 27933), True, 'import low_level_tools as llt\n'), ((28650, 28662), 'low_level_tools.dx', 'llt.dx', (['u', 'N'], {}), '(u, N)\n', (28656, 28662), True, 'import low_level_tools as llt\n'), ((28664, 28676), 'low_level_tools.dx', 'llt.dx', (['v', 'N'], {}), '(v, N)\n', (28670, 28676), True, 'import low_level_tools as llt\n'), ((28692, 28704), 'low_level_tools.dy', 'llt.dy', (['u', 'N'], {}), '(u, N)\n', (28698, 28704), True, 'import low_level_tools as llt\n'), ((28706, 28718), 'low_level_tools.dy', 'llt.dy', (['v', 'N'], {}), '(v, N)\n', (28712, 28718), True, 'import low_level_tools as llt\n'), ((29910, 29922), 'low_level_tools.dx', 'llt.dx', (['u', 'N'], {}), '(u, N)\n', (29916, 29922), True, 'import low_level_tools as llt\n'), ((29924, 29936), 'low_level_tools.dx', 'llt.dx', (['v', 'N'], {}), '(v, N)\n', (29930, 29936), True, 'import low_level_tools as llt\n'), ((29952, 29964), 'low_level_tools.dy', 'llt.dy', (['u', 'N'], {}), '(u, N)\n', (29958, 29964), True, 'import low_level_tools as llt\n'), ((29966, 29978), 'low_level_tools.dy', 'llt.dy', (['v', 'N'], {}), '(v, N)\n', (29972, 29978), True, 'import low_level_tools as llt\n'), ((29998, 30018), 'low_level_tools.dx_forward', 'llt.dx_forward', (['u', 'N'], {}), '(u, N)\n', (30012, 30018), True, 'import low_level_tools as llt\n'), ((30020, 30040), 'low_level_tools.dx_forward', 'llt.dx_forward', (['v', 'N'], {}), '(v, N)\n', (30034, 30040), True, 'import low_level_tools as llt\n'), ((30060, 30080), 'low_level_tools.dy_forward', 'llt.dy_forward', (['u', 'N'], {}), '(u, N)\n', (30074, 30080), True, 'import low_level_tools as llt\n'), ((30082, 30102), 'low_level_tools.dy_forward', 'llt.dy_forward', (['v', 'N'], {}), '(v, N)\n', (30096, 30102), True, 'import low_level_tools as llt\n'), ((30122, 30143), 'low_level_tools.dx_backward', 'llt.dx_backward', (['u', 'N'], {}), '(u, N)\n', (30137, 30143), True, 'import low_level_tools as llt\n'), ((30145, 30166), 'low_level_tools.dx_backward', 'llt.dx_backward', (['v', 'N'], {}), '(v, N)\n', (30160, 30166), True, 'import low_level_tools as llt\n'), ((30186, 30207), 'low_level_tools.dy_backward', 'llt.dy_backward', (['u', 'N'], {}), '(u, N)\n', (30201, 30207), True, 'import low_level_tools as llt\n'), ((30209, 30230), 'low_level_tools.dy_backward', 'llt.dy_backward', (['v', 'N'], {}), '(v, N)\n', (30224, 30230), True, 'import low_level_tools as llt\n'), ((31317, 31329), 'low_level_tools.dx', 'llt.dx', (['u', 'N'], {}), '(u, N)\n', (31323, 31329), True, 'import low_level_tools as llt\n'), ((31331, 31343), 'low_level_tools.dx', 'llt.dx', (['v', 'N'], {}), '(v, N)\n', (31337, 31343), True, 'import low_level_tools as llt\n'), ((31359, 31371), 'low_level_tools.dy', 'llt.dy', (['u', 'N'], {}), '(u, N)\n', (31365, 31371), True, 'import low_level_tools as llt\n'), ((31373, 31385), 'low_level_tools.dy', 'llt.dy', (['v', 'N'], {}), '(v, N)\n', (31379, 31385), True, 'import low_level_tools as llt\n'), ((31403, 31416), 'low_level_tools.d2x', 'llt.d2x', (['u', 'N'], {}), '(u, N)\n', (31410, 31416), True, 'import low_level_tools as llt\n'), ((31418, 31431), 'low_level_tools.d2x', 'llt.d2x', (['v', 'N'], {}), '(v, N)\n', (31425, 31431), True, 'import low_level_tools as llt\n'), ((31449, 31462), 'low_level_tools.d2y', 'llt.d2y', (['u', 'N'], {}), '(u, N)\n', (31456, 31462), True, 'import low_level_tools as llt\n'), ((31464, 31477), 'low_level_tools.d2y', 'llt.d2y', (['v', 'N'], {}), '(v, N)\n', (31471, 31477), True, 'import low_level_tools as llt\n'), ((31495, 31509), 'low_level_tools.dxdy', 'llt.dxdy', (['u', 'N'], {}), '(u, N)\n', (31503, 31509), True, 'import low_level_tools as llt\n'), ((31511, 31525), 'low_level_tools.dxdy', 'llt.dxdy', (['v', 'N'], {}), '(v, N)\n', (31519, 31525), True, 'import low_level_tools as llt\n'), ((31539, 31555), 'numpy.zeros_like', 'np.zeros_like', (['u'], {}), '(u)\n', (31552, 31555), True, 'import numpy as np\n'), ((31557, 31573), 'numpy.zeros_like', 'np.zeros_like', (['v'], {}), '(v)\n', (31570, 31573), True, 'import numpy as np\n'), ((32767, 32909), 'harmonic_equation.harmonic_equation', 'harmonic_equation', (['basic_harmonic_coeff', 'trivial_harmonic_rhs', '(2)'], {'bc': 'trivial_harmonic_bc', 'l_coeff': 'basic_fair_newton_harmonic_linear_coeff'}), '(basic_harmonic_coeff, trivial_harmonic_rhs, 2, bc=\n trivial_harmonic_bc, l_coeff=basic_fair_newton_harmonic_linear_coeff)\n', (32784, 32909), False, 'from harmonic_equation import harmonic_equation\n'), ((32983, 33076), 'harmonic_equation.harmonic_equation', 'harmonic_equation', (['basic_harmonic_coeff', 'trivial_harmonic_rhs', '(2)'], {'bc': 'trivial_harmonic_bc'}), '(basic_harmonic_coeff, trivial_harmonic_rhs, 2, bc=\n trivial_harmonic_bc)\n', (33000, 33076), False, 'from harmonic_equation import harmonic_equation\n'), ((33127, 33214), 'harmonic_equation.harmonic_equation', 'harmonic_equation', (['harmonic_coeff', 'trivial_harmonic_rhs', '(2)'], {'bc': 'trivial_harmonic_bc'}), '(harmonic_coeff, trivial_harmonic_rhs, 2, bc=\n trivial_harmonic_bc)\n', (33144, 33214), False, 'from harmonic_equation import harmonic_equation\n'), ((33264, 33350), 'harmonic_equation.harmonic_equation', 'harmonic_equation', (['winslow_coeff', 'trivial_harmonic_rhs', '(2)'], {'bc': 'trivial_harmonic_bc'}), '(winslow_coeff, trivial_harmonic_rhs, 2, bc=\n trivial_harmonic_bc)\n', (33281, 33350), False, 'from harmonic_equation import harmonic_equation\n'), ((33406, 33493), 'harmonic_equation.harmonic_equation', 'harmonic_equation', (['harmonic_coeff', 'trivial_harmonic_rhs', '(2)'], {'bc': 'trivial_harmonic_bc'}), '(harmonic_coeff, trivial_harmonic_rhs, 2, bc=\n trivial_harmonic_bc)\n', (33423, 33493), False, 'from harmonic_equation import harmonic_equation\n'), ((33553, 33652), 'harmonic_equation.harmonic_equation', 'harmonic_equation', (['basic_mixed_harmonic_coeff', 'trivial_harmonic_rhs', '(2)'], {'bc': 'trivial_harmonic_bc'}), '(basic_mixed_harmonic_coeff, trivial_harmonic_rhs, 2, bc=\n trivial_harmonic_bc)\n', (33570, 33652), False, 'from harmonic_equation import harmonic_equation\n'), ((1005, 1026), 'numpy.sin', 'np.sin', (['(np.pi * x * y)'], {}), '(np.pi * x * y)\n', (1011, 1026), True, 'import numpy as np\n'), ((3116, 3133), 'numpy.sin', 'np.sin', (['(np.pi * x)'], {}), '(np.pi * x)\n', (3122, 3133), True, 'import numpy as np\n'), ((3136, 3153), 'numpy.sin', 'np.sin', (['(np.pi * y)'], {}), '(np.pi * y)\n', (3142, 3153), True, 'import numpy as np\n'), ((3472, 3489), 'numpy.sin', 'np.sin', (['(np.pi * x)'], {}), '(np.pi * x)\n', (3478, 3489), True, 'import numpy as np\n'), ((3492, 3509), 'numpy.sin', 'np.sin', (['(np.pi * y)'], {}), '(np.pi * y)\n', (3498, 3509), True, 'import numpy as np\n'), ((4429, 4446), 'numpy.cos', 'np.cos', (['(x * np.pi)'], {}), '(x * np.pi)\n', (4435, 4446), True, 'import numpy as np\n'), ((4465, 4479), 'numpy.exp', 'np.exp', (['(-x * y)'], {}), '(-x * y)\n', (4471, 4479), True, 'import numpy as np\n'), ((7024, 7033), 'numpy.exp', 'np.exp', (['y'], {}), '(y)\n', (7030, 7033), True, 'import numpy as np\n'), ((7323, 7337), 'numpy.exp', 'np.exp', (['(-x * y)'], {}), '(-x * y)\n', (7329, 7337), True, 'import numpy as np\n'), ((7356, 7369), 'numpy.exp', 'np.exp', (['(x * y)'], {}), '(x * y)\n', (7362, 7369), True, 'import numpy as np\n'), ((8426, 8453), 'numpy.sin', 'np.sin', (['(np.pi * (y + 2 * x))'], {}), '(np.pi * (y + 2 * x))\n', (8432, 8453), True, 'import numpy as np\n'), ((10073, 10087), 'numpy.exp', 'np.exp', (['(-x * y)'], {}), '(-x * y)\n', (10079, 10087), True, 'import numpy as np\n'), ((10362, 10379), 'numpy.exp', 'np.exp', (['(2 * x * y)'], {}), '(2 * x * y)\n', (10368, 10379), True, 'import numpy as np\n'), ((10787, 10808), 'numpy.cos', 'np.cos', (['(np.pi * x * y)'], {}), '(np.pi * x * y)\n', (10793, 10808), True, 'import numpy as np\n'), ((11236, 11257), 'numpy.sin', 'np.sin', (['(np.pi * x * y)'], {}), '(np.pi * x * y)\n', (11242, 11257), True, 'import numpy as np\n'), ((12909, 12922), 'numpy.exp', 'np.exp', (['(x * y)'], {}), '(x * y)\n', (12915, 12922), True, 'import numpy as np\n'), ((13083, 13096), 'numpy.exp', 'np.exp', (['(x + y)'], {}), '(x + y)\n', (13089, 13096), True, 'import numpy as np\n'), ((13259, 13272), 'numpy.exp', 'np.exp', (['(x + y)'], {}), '(x + y)\n', (13265, 13272), True, 'import numpy as np\n'), ((13464, 13477), 'numpy.exp', 'np.exp', (['(x * y)'], {}), '(x * y)\n', (13470, 13477), True, 'import numpy as np\n'), ((14330, 14355), 'numpy.cos', 'np.cos', (['(4 * np.pi * x * y)'], {}), '(4 * np.pi * x * y)\n', (14336, 14355), True, 'import numpy as np\n'), ((14444, 14465), 'numpy.cos', 'np.cos', (['(np.pi * x * y)'], {}), '(np.pi * x * y)\n', (14450, 14465), True, 'import numpy as np\n'), ((15864, 15922), 'equation.equation', 'equation', (['eq_00_coeff', 'eq_00_rhs', '(1)', 'eq_00_bc', 'eq_00_exact'], {}), '(eq_00_coeff, eq_00_rhs, 1, eq_00_bc, eq_00_exact)\n', (15872, 15922), False, 'from equation import equation\n'), ((15973, 16031), 'equation.equation', 'equation', (['eq_11_coeff', 'eq_11_rhs', '(1)', 'eq_11_bc', 'eq_11_exact'], {}), '(eq_11_coeff, eq_11_rhs, 1, eq_11_bc, eq_11_exact)\n', (15981, 16031), False, 'from equation import equation\n'), ((16082, 16140), 'equation.equation', 'equation', (['eq_12_coeff', 'eq_12_rhs', '(1)', 'eq_12_bc', 'eq_12_exact'], {}), '(eq_12_coeff, eq_12_rhs, 1, eq_12_bc, eq_12_exact)\n', (16090, 16140), False, 'from equation import equation\n'), ((16191, 16249), 'equation.equation', 'equation', (['eq_13_coeff', 'eq_13_rhs', '(1)', 'eq_13_bc', 'eq_13_exact'], {}), '(eq_13_coeff, eq_13_rhs, 1, eq_13_bc, eq_13_exact)\n', (16199, 16249), False, 'from equation import equation\n'), ((16300, 16358), 'equation.equation', 'equation', (['eq_14_coeff', 'eq_14_rhs', '(1)', 'eq_14_bc', 'eq_14_exact'], {}), '(eq_14_coeff, eq_14_rhs, 1, eq_14_bc, eq_14_exact)\n', (16308, 16358), False, 'from equation import equation\n'), ((16519, 16575), 'equation.equation', 'equation', (['coeff', 'rhs', '(1)', 'eq_red_fox_bc', 'eq_red_fox_exact'], {}), '(coeff, rhs, 1, eq_red_fox_bc, eq_red_fox_exact)\n', (16527, 16575), False, 'from equation import equation\n'), ((16643, 16701), 'equation.equation', 'equation', (['eq_21_coeff', 'eq_21_rhs', '(2)', 'eq_21_bc', 'eq_21_exact'], {}), '(eq_21_coeff, eq_21_rhs, 2, eq_21_bc, eq_21_exact)\n', (16651, 16701), False, 'from equation import equation\n'), ((16752, 16810), 'equation.equation', 'equation', (['eq_22_coeff', 'eq_22_rhs', '(2)', 'eq_22_bc', 'eq_22_exact'], {}), '(eq_22_coeff, eq_22_rhs, 2, eq_22_bc, eq_22_exact)\n', (16760, 16810), False, 'from equation import equation\n'), ((18789, 18806), 'numpy.cos', 'np.cos', (['(np.pi * x)'], {}), '(np.pi * x)\n', (18795, 18806), True, 'import numpy as np\n'), ((19106, 19120), 'numpy.exp', 'np.exp', (['(-x * y)'], {}), '(-x * y)\n', (19112, 19120), True, 'import numpy as np\n'), ((19831, 19848), 'numpy.cos', 'np.cos', (['(np.pi * x)'], {}), '(np.pi * x)\n', (19837, 19848), True, 'import numpy as np\n'), ((21347, 21365), 'numpy.sinh', 'np.sinh', (['(2 * x * y)'], {}), '(2 * x * y)\n', (21354, 21365), True, 'import numpy as np\n'), ((4118, 4135), 'numpy.sin', 'np.sin', (['(np.pi * y)'], {}), '(np.pi * y)\n', (4124, 4135), True, 'import numpy as np\n'), ((5532, 5553), 'numpy.sin', 'np.sin', (['(np.pi * x * y)'], {}), '(np.pi * x * y)\n', (5538, 5553), True, 'import numpy as np\n'), ((6983, 6996), 'numpy.exp', 'np.exp', (['(2 * x)'], {}), '(2 * x)\n', (6989, 6996), True, 'import numpy as np\n'), ((7012, 7021), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (7018, 7021), True, 'import numpy as np\n'), ((8400, 8427), 'numpy.cos', 'np.cos', (['(np.pi * (x + 2 * y))'], {}), '(np.pi * (x + 2 * y))\n', (8406, 8427), True, 'import numpy as np\n'), ((10287, 10308), 'numpy.exp', 'np.exp', (['(2 * x * y - y)'], {}), '(2 * x * y - y)\n', (10293, 10308), True, 'import numpy as np\n'), ((10329, 10350), 'numpy.sin', 'np.sin', (['(np.pi * x * y)'], {}), '(np.pi * x * y)\n', (10335, 10350), True, 'import numpy as np\n'), ((12884, 12905), 'numpy.sin', 'np.sin', (['(np.pi * x * y)'], {}), '(np.pi * x * y)\n', (12890, 12905), True, 'import numpy as np\n'), ((17853, 17866), 'numpy.exp', 'np.exp', (['(x * y)'], {}), '(x * y)\n', (17859, 17866), True, 'import numpy as np\n'), ((17909, 17927), 'numpy.exp', 'np.exp', (['(-5 * x * y)'], {}), '(-5 * x * y)\n', (17915, 17927), True, 'import numpy as np\n'), ((18046, 18064), 'numpy.exp', 'np.exp', (['(-2 * x * y)'], {}), '(-2 * x * y)\n', (18052, 18064), True, 'import numpy as np\n'), ((20159, 20173), 'numpy.exp', 'np.exp', (['(-x * y)'], {}), '(-x * y)\n', (20165, 20173), True, 'import numpy as np\n'), ((25255, 25271), 'numpy.sqrt', 'np.sqrt', (['det_g_x'], {}), '(det_g_x)\n', (25262, 25271), True, 'import numpy as np\n'), ((25304, 25320), 'numpy.sqrt', 'np.sqrt', (['det_g_x'], {}), '(det_g_x)\n', (25311, 25320), True, 'import numpy as np\n'), ((25468, 25484), 'numpy.sqrt', 'np.sqrt', (['det_g_x'], {}), '(det_g_x)\n', (25475, 25484), True, 'import numpy as np\n'), ((25517, 25533), 'numpy.sqrt', 'np.sqrt', (['det_g_x'], {}), '(det_g_x)\n', (25524, 25533), True, 'import numpy as np\n'), ((25643, 25659), 'numpy.sqrt', 'np.sqrt', (['det_g_x'], {}), '(det_g_x)\n', (25650, 25659), True, 'import numpy as np\n'), ((25691, 25707), 'numpy.sqrt', 'np.sqrt', (['det_g_x'], {}), '(det_g_x)\n', (25698, 25707), True, 'import numpy as np\n'), ((25856, 25872), 'numpy.sqrt', 'np.sqrt', (['det_g_x'], {}), '(det_g_x)\n', (25863, 25872), True, 'import numpy as np\n'), ((25904, 25920), 'numpy.sqrt', 'np.sqrt', (['det_g_x'], {}), '(det_g_x)\n', (25911, 25920), True, 'import numpy as np\n'), ((993, 1002), 'numpy.exp', 'np.exp', (['y'], {}), '(y)\n', (999, 1002), True, 'import numpy as np\n'), ((4077, 4095), 'numpy.sinh', 'np.sinh', (['(x + 3 * y)'], {}), '(x + 3 * y)\n', (4084, 4095), True, 'import numpy as np\n'), ((4098, 4115), 'numpy.sin', 'np.sin', (['(np.pi * x)'], {}), '(np.pi * x)\n', (4104, 4115), True, 'import numpy as np\n'), ((5520, 5529), 'numpy.exp', 'np.exp', (['y'], {}), '(y)\n', (5526, 5529), True, 'import numpy as np\n'), ((6915, 6924), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (6921, 6924), True, 'import numpy as np\n'), ((6962, 6971), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (6968, 6971), True, 'import numpy as np\n'), ((8371, 8385), 'numpy.exp', 'np.exp', (['(-x * y)'], {}), '(-x * y)\n', (8377, 8385), True, 'import numpy as np\n'), ((10040, 10057), 'numpy.exp', 'np.exp', (['(2 * x * y)'], {}), '(2 * x * y)\n', (10046, 10057), True, 'import numpy as np\n'), ((10255, 10276), 'numpy.exp', 'np.exp', (['(2 * x * y - x)'], {}), '(2 * x * y - x)\n', (10261, 10276), True, 'import numpy as np\n'), ((11131, 11152), 'numpy.cos', 'np.cos', (['(np.pi * x * y)'], {}), '(np.pi * x * y)\n', (11137, 11152), True, 'import numpy as np\n'), ((13054, 13075), 'numpy.sin', 'np.sin', (['(np.pi * x * y)'], {}), '(np.pi * x * y)\n', (13060, 13075), True, 'import numpy as np\n'), ((13240, 13255), 'numpy.ones', 'np.ones', (['(N, N)'], {}), '((N, N))\n', (13247, 13255), True, 'import numpy as np\n'), ((13446, 13460), 'numpy.exp', 'np.exp', (['(-x * y)'], {}), '(-x * y)\n', (13452, 13460), True, 'import numpy as np\n'), ((17981, 17999), 'numpy.exp', 'np.exp', (['(-2 * x * y)'], {}), '(-2 * x * y)\n', (17987, 17999), True, 'import numpy as np\n'), ((21302, 21323), 'numpy.sin', 'np.sin', (['(np.pi * x * y)'], {}), '(np.pi * x * y)\n', (21308, 21323), True, 'import numpy as np\n'), ((977, 986), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (983, 986), True, 'import numpy as np\n'), ((4018, 4035), 'numpy.cos', 'np.cos', (['(np.pi * y)'], {}), '(np.pi * y)\n', (4024, 4035), True, 'import numpy as np\n'), ((5508, 5517), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (5514, 5517), True, 'import numpy as np\n'), ((6945, 6959), 'numpy.exp', 'np.exp', (['(-x * y)'], {}), '(-x * y)\n', (6951, 6959), True, 'import numpy as np\n'), ((10104, 10117), 'numpy.exp', 'np.exp', (['(x * y)'], {}), '(x * y)\n', (10110, 10117), True, 'import numpy as np\n'), ((12872, 12881), 'numpy.exp', 'np.exp', (['y'], {}), '(y)\n', (12878, 12881), True, 'import numpy as np\n'), ((13042, 13051), 'numpy.exp', 'np.exp', (['y'], {}), '(y)\n', (13048, 13051), True, 'import numpy as np\n'), ((13164, 13181), 'numpy.log', 'np.log', (['(1 + x * y)'], {}), '(1 + x * y)\n', (13170, 13181), True, 'import numpy as np\n'), ((13421, 13439), 'numpy.exp', 'np.exp', (['(-2 * x * y)'], {}), '(-2 * x * y)\n', (13427, 13439), True, 'import numpy as np\n'), ((17938, 17956), 'numpy.exp', 'np.exp', (['(-2 * x * y)'], {}), '(-2 * x * y)\n', (17944, 17956), True, 'import numpy as np\n'), ((21274, 21295), 'numpy.cos', 'np.cos', (['(np.pi * x * y)'], {}), '(np.pi * x * y)\n', (21280, 21295), True, 'import numpy as np\n'), ((28077, 28093), 'numpy.sqrt', 'np.sqrt', (['det_g_x'], {}), '(det_g_x)\n', (28084, 28093), True, 'import numpy as np\n'), ((28121, 28137), 'numpy.sqrt', 'np.sqrt', (['det_g_x'], {}), '(det_g_x)\n', (28128, 28137), True, 'import numpy as np\n'), ((28169, 28185), 'numpy.sqrt', 'np.sqrt', (['det_g_x'], {}), '(det_g_x)\n', (28176, 28185), True, 'import numpy as np\n'), ((28213, 28229), 'numpy.sqrt', 'np.sqrt', (['det_g_x'], {}), '(det_g_x)\n', (28220, 28229), True, 'import numpy as np\n'), ((28268, 28284), 'numpy.sqrt', 'np.sqrt', (['det_g_x'], {}), '(det_g_x)\n', (28275, 28284), True, 'import numpy as np\n'), ((28312, 28328), 'numpy.sqrt', 'np.sqrt', (['det_g_x'], {}), '(det_g_x)\n', (28319, 28328), True, 'import numpy as np\n'), ((28360, 28376), 'numpy.sqrt', 'np.sqrt', (['det_g_x'], {}), '(det_g_x)\n', (28367, 28376), True, 'import numpy as np\n'), ((28404, 28420), 'numpy.sqrt', 'np.sqrt', (['det_g_x'], {}), '(det_g_x)\n', (28411, 28420), True, 'import numpy as np\n'), ((956, 970), 'numpy.exp', 'np.exp', (['(-x * y)'], {}), '(-x * y)\n', (962, 970), True, 'import numpy as np\n'), ((3877, 3890), 'numpy.exp', 'np.exp', (['(y * x)'], {}), '(y * x)\n', (3883, 3890), True, 'import numpy as np\n'), ((3998, 4015), 'numpy.sin', 'np.sin', (['(np.pi * x)'], {}), '(np.pi * x)\n', (4004, 4015), True, 'import numpy as np\n'), ((5476, 5490), 'numpy.exp', 'np.exp', (['(-x * y)'], {}), '(-x * y)\n', (5482, 5490), True, 'import numpy as np\n'), ((6886, 6907), 'numpy.sin', 'np.sin', (['(np.pi * x * y)'], {}), '(np.pi * x * y)\n', (6892, 6907), True, 'import numpy as np\n'), ((8351, 8364), 'numpy.exp', 'np.exp', (['(x * y)'], {}), '(x * y)\n', (8357, 8364), True, 'import numpy as np\n'), ((10195, 10212), 'numpy.exp', 'np.exp', (['(3 * x * y)'], {}), '(3 * x * y)\n', (10201, 10212), True, 'import numpy as np\n'), ((11091, 11112), 'numpy.cos', 'np.cos', (['(np.pi * x * y)'], {}), '(np.pi * x * y)\n', (11097, 11112), True, 'import numpy as np\n'), ((12856, 12865), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (12862, 12865), True, 'import numpy as np\n'), ((13030, 13039), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (13036, 13039), True, 'import numpy as np\n'), ((13202, 13227), 'numpy.cos', 'np.cos', (['(4 * np.pi * x * y)'], {}), '(4 * np.pi * x * y)\n', (13208, 13227), True, 'import numpy as np\n'), ((13379, 13400), 'numpy.cos', 'np.cos', (['(np.pi * x * y)'], {}), '(np.pi * x * y)\n', (13385, 13400), True, 'import numpy as np\n'), ((18016, 18033), 'numpy.cos', 'np.cos', (['(np.pi * x)'], {}), '(np.pi * x)\n', (18022, 18033), True, 'import numpy as np\n'), ((21253, 21271), 'numpy.exp', 'np.exp', (['(-2 * x * y)'], {}), '(-2 * x * y)\n', (21259, 21271), True, 'import numpy as np\n'), ((3718, 3735), 'numpy.sin', 'np.sin', (['(np.pi * y)'], {}), '(np.pi * y)\n', (3724, 3735), True, 'import numpy as np\n'), ((3857, 3874), 'numpy.cos', 'np.cos', (['(np.pi * y)'], {}), '(np.pi * y)\n', (3863, 3874), True, 'import numpy as np\n'), ((3939, 3956), 'numpy.sin', 'np.sin', (['(np.pi * y)'], {}), '(np.pi * y)\n', (3945, 3956), True, 'import numpy as np\n'), ((5452, 5473), 'numpy.sin', 'np.sin', (['(np.pi * x * y)'], {}), '(np.pi * x * y)\n', (5458, 5473), True, 'import numpy as np\n'), ((10161, 10178), 'numpy.exp', 'np.exp', (['(2 * x * y)'], {}), '(2 * x * y)\n', (10167, 10178), True, 'import numpy as np\n'), ((12835, 12849), 'numpy.exp', 'np.exp', (['(-x * y)'], {}), '(-x * y)\n', (12841, 12849), True, 'import numpy as np\n'), ((13000, 13014), 'numpy.exp', 'np.exp', (['(-x * y)'], {}), '(-x * y)\n', (13006, 13014), True, 'import numpy as np\n'), ((13134, 13155), 'numpy.exp', 'np.exp', (['(2 * x * y + 3)'], {}), '(2 * x * y + 3)\n', (13140, 13155), True, 'import numpy as np\n'), ((866, 879), 'numpy.exp', 'np.exp', (['(x * y)'], {}), '(x * y)\n', (872, 879), True, 'import numpy as np\n'), ((897, 914), 'numpy.cos', 'np.cos', (['(np.pi * x)'], {}), '(np.pi * x)\n', (903, 914), True, 'import numpy as np\n'), ((3698, 3715), 'numpy.sin', 'np.sin', (['(np.pi * x)'], {}), '(np.pi * x)\n', (3704, 3715), True, 'import numpy as np\n'), ((3788, 3802), 'numpy.exp', 'np.exp', (['(-x * y)'], {}), '(-x * y)\n', (3794, 3802), True, 'import numpy as np\n'), ((3837, 3854), 'numpy.cos', 'np.cos', (['(np.pi * x)'], {}), '(np.pi * x)\n', (3843, 3854), True, 'import numpy as np\n'), ((3919, 3936), 'numpy.cos', 'np.cos', (['(np.pi * x)'], {}), '(np.pi * x)\n', (3925, 3936), True, 'import numpy as np\n'), ((12976, 12997), 'numpy.sin', 'np.sin', (['(np.pi * x * y)'], {}), '(np.pi * x * y)\n', (12982, 12997), True, 'import numpy as np\n'), ((13114, 13127), 'numpy.log', 'np.log', (['(1 + x)'], {}), '(1 + x)\n', (13120, 13127), True, 'import numpy as np\n'), ((13295, 13308), 'numpy.exp', 'np.exp', (['(x * y)'], {}), '(x * y)\n', (13301, 13308), True, 'import numpy as np\n'), ((21150, 21163), 'numpy.exp', 'np.exp', (['(x * y)'], {}), '(x * y)\n', (21156, 21163), True, 'import numpy as np\n'), ((21181, 21195), 'numpy.exp', 'np.exp', (['(-x * y)'], {}), '(-x * y)\n', (21187, 21195), True, 'import numpy as np\n'), ((5420, 5445), 'numpy.cos', 'np.cos', (['(2 * np.pi * x * y)'], {}), '(2 * np.pi * x * y)\n', (5426, 5445), True, 'import numpy as np\n'), ((6866, 6879), 'numpy.exp', 'np.exp', (['(x * y)'], {}), '(x * y)\n', (6872, 6879), True, 'import numpy as np\n'), ((12747, 12760), 'numpy.exp', 'np.exp', (['(x * y)'], {}), '(x * y)\n', (12753, 12760), True, 'import numpy as np\n'), ((12778, 12795), 'numpy.cos', 'np.cos', (['(np.pi * x)'], {}), '(np.pi * x)\n', (12784, 12795), True, 'import numpy as np\n'), ((13326, 13347), 'numpy.cos', 'np.cos', (['(np.pi * x * y)'], {}), '(np.pi * x * y)\n', (13332, 13347), True, 'import numpy as np\n'), ((3760, 3777), 'numpy.cos', 'np.cos', (['(x * np.pi)'], {}), '(x * np.pi)\n', (3766, 3777), True, 'import numpy as np\n'), ((10130, 10151), 'numpy.sin', 'np.sin', (['(np.pi * x * y)'], {}), '(np.pi * x * y)\n', (10136, 10151), True, 'import numpy as np\n'), ((12944, 12969), 'numpy.cos', 'np.cos', (['(2 * np.pi * x * y)'], {}), '(2 * np.pi * x * y)\n', (12950, 12969), True, 'import numpy as np\n')] |
import numpy as np
import torch
import utils
if __name__ == '__main__':
args = utils.parse_args()
print(f'Running baseline for ALRS testing...\nArgs:\n{utils.args_to_str(args)}\n')
displayed_rendering_error = False
best_config = None
best_info_list = None
best_val_loss = np.inf
initial_lrs = [1e-1, 1e-2, 1e-3, 1e-4]
discount_steps = [10, 20, 50, 100]
discount_factors = [.99, .9, .88]
for initial_lr in initial_lrs:
for discount_step in discount_steps:
for discount_factor in discount_factors:
print(f'Initial LR: {initial_lr}\nDiscount step: {discount_step}\nDiscount factor: {discount_factor}')
args.initial_lr = initial_lr
env = utils.make_alrs_env(args, test=True, baseline=True)
env.reset()
done = False
global_step = 0
current_lr = initial_lr
info_list = []
while not done:
action, new_lr = utils.step_decay_action(current_lr, global_step, discount_step, discount_factor)
_, _, done, info = env.step(action)
global_step += args.update_freq
current_lr = new_lr
info_list.append(info)
try:
env.render()
except:
if not displayed_rendering_error:
displayed_rendering_error = True
print('Warning: device does not support rendering.')
val_loss = env.venv.envs[0].env.latest_end_val
print('Final validation loss:', val_loss)
if val_loss < best_val_loss:
best_config = {
'dataset': args.dataset,
'architecture': args.architecture,
'initial_lr': initial_lr,
'discount_step': discount_step,
'discount_factor': discount_factor,
'val_loss': val_loss,
'log_val_loss': np.log(val_loss)
}
best_info_list = info_list
best_val_loss = val_loss
print(f'Found best configuration:\n{best_config}')
filename = args.dataset+'_'+args.architecture
utils.dict_to_file(best_config, filename, path='data/baselines/')
utils.save_baseline(best_info_list, filename)
| [
"utils.save_baseline",
"utils.step_decay_action",
"numpy.log",
"utils.make_alrs_env",
"utils.args_to_str",
"utils.parse_args",
"utils.dict_to_file"
] | [((86, 104), 'utils.parse_args', 'utils.parse_args', ([], {}), '()\n', (102, 104), False, 'import utils\n'), ((2439, 2504), 'utils.dict_to_file', 'utils.dict_to_file', (['best_config', 'filename'], {'path': '"""data/baselines/"""'}), "(best_config, filename, path='data/baselines/')\n", (2457, 2504), False, 'import utils\n'), ((2509, 2554), 'utils.save_baseline', 'utils.save_baseline', (['best_info_list', 'filename'], {}), '(best_info_list, filename)\n', (2528, 2554), False, 'import utils\n'), ((163, 186), 'utils.args_to_str', 'utils.args_to_str', (['args'], {}), '(args)\n', (180, 186), False, 'import utils\n'), ((771, 822), 'utils.make_alrs_env', 'utils.make_alrs_env', (['args'], {'test': '(True)', 'baseline': '(True)'}), '(args, test=True, baseline=True)\n', (790, 822), False, 'import utils\n'), ((1054, 1139), 'utils.step_decay_action', 'utils.step_decay_action', (['current_lr', 'global_step', 'discount_step', 'discount_factor'], {}), '(current_lr, global_step, discount_step, discount_factor\n )\n', (1077, 1139), False, 'import utils\n'), ((2198, 2214), 'numpy.log', 'np.log', (['val_loss'], {}), '(val_loss)\n', (2204, 2214), True, 'import numpy as np\n')] |
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
""" Implementation of the performance instrumentation report. """
import json
import numpy as np
import re
class InstrumentationReport(object):
@staticmethod
def get_event_uuid(event):
uuid = (-1, -1, -1)
if 'args' in event:
args = event['args']
if 'sdfg_id' in args and args['sdfg_id'] is not None:
uuid = (args['sdfg_id'], -1, -1)
if 'state_id' in args and args['state_id'] is not None:
uuid = (uuid[0], args['state_id'], -1)
if 'id' in args and args['id'] is not None:
uuid = (uuid[0], uuid[1], args['id'])
return uuid
def __init__(self, filename: str):
# Parse file
match = re.match(r'.*report-(\d+)\.json', filename)
self.name = match.groups()[0] if match is not None else 'N/A'
self.durations = {}
self.counters = {}
self._sortcat = None
self._sortdesc = False
with open(filename, 'r') as fp:
report = json.load(fp)
if 'traceEvents' not in report or 'sdfgHash' not in report:
print(filename, 'is not a valid SDFG instrumentation report!')
return
self.sdfg_hash = report['sdfgHash']
events = report['traceEvents']
for event in events:
if 'ph' in event:
phase = event['ph']
name = event['name']
if phase == 'X':
uuid = self.get_event_uuid(event)
if uuid not in self.durations:
self.durations[uuid] = {}
if name not in self.durations[uuid]:
self.durations[uuid][name] = []
self.durations[uuid][name].append(event['dur'] / 1000)
if phase == 'C':
if name not in self.counters:
self.counters[name] = 0
self.counters[name] += event['args'][name]
def __repr__(self):
return 'InstrumentationReport(name=%s)' % self.name
def sortby(self, column: str, ascending: bool = False):
if (column and column.lower()
not in ('counter', 'value', 'min', 'max', 'mean', 'median')):
raise ValueError('Only Counter, Value, Min, Max, Mean, Median are '
'supported')
self._sortcat = column if column is None else column.lower()
self._sortdesc = not ascending
def _get_runtimes_string(self,
label,
runtimes,
element,
sdfg,
state,
string,
row_format,
colw,
with_element_heading=True):
indent = ''
if len(runtimes) > 0:
element_label = ''
if element[0] > -1 and element[1] > -1 and element[2] > -1:
# This element is a node.
if sdfg != element[0]:
# No parent SDFG row present yet, print it.
string += row_format.format('SDFG (' + str(element[0]) +
')',
'',
'',
'',
'',
width=colw)
sdfg = element[0]
if state != element[1]:
# No parent state row present yet, print it.
string += row_format.format('|-State (' + str(element[1]) +
')',
'',
'',
'',
'',
width=colw)
state = element[1]
element_label = '| |-Node (' + str(element[2]) + ')'
indent = '| | |'
elif element[0] > -1 and element[1] > -1:
# This element is a state.
if sdfg != element[0]:
# No parent SDFG row present yet, print it.
string += row_format.format('SDFG (' + str(element[0]) +
')',
'',
'',
'',
'',
width=colw)
sdfg = element[0]
state = element[1]
element_label = '|-State (' + str(element[1]) + ')'
indent = '| |'
elif element[0] > -1:
# This element is an SDFG.
sdfg = element[0]
state = -1
element_label = 'SDFG (' + str(element[0]) + ')'
indent = '|'
else:
element_label = 'N/A'
if with_element_heading:
string += row_format.format(element_label,
'',
'',
'',
'',
width=colw)
string += row_format.format(indent + label + ':',
'',
'',
'',
'',
width=colw)
string += row_format.format(indent,
'%.3f' % np.min(runtimes),
'%.3f' % np.mean(runtimes),
'%.3f' % np.median(runtimes),
'%.3f' % np.max(runtimes),
width=colw)
return string, sdfg, state
def getkey(self, element):
events = self.durations[element]
result = []
for event in events.keys():
runtimes = events[event]
result.extend(runtimes)
result = np.array(result)
if self._sortcat == 'min':
return np.min(result)
elif self._sortcat == 'max':
return np.max(result)
elif self._sortcat == 'mean':
return np.mean(result)
else: # if self._sortcat == 'median':
return np.median(result)
def __str__(self):
COLW = 15
COUNTER_COLW = 39
element_list = list(self.durations.keys())
element_list.sort()
row_format = ('{:<{width}}' * 5) + '\n'
counter_format = ('{:<{width}}' * 2) + '\n'
string = 'Instrumentation report\n'
string += 'SDFG Hash: ' + self.sdfg_hash + '\n'
if len(self.durations) > 0:
string += ('-' * (COLW * 5)) + '\n'
string += ('{:<{width}}' * 2).format(
'Element', 'Runtime (ms)', width=COLW) + '\n'
string += row_format.format('',
'Min',
'Mean',
'Median',
'Max',
width=COLW)
string += ('-' * (COLW * 5)) + '\n'
sdfg = -1
state = -1
if self._sortcat in ('min', 'mean', 'median', 'max'):
element_list = sorted(element_list,
key=self.getkey,
reverse=self._sortdesc)
for element in element_list:
events = self.durations[element]
if len(events) > 0:
with_element_heading = True
for event in events.keys():
runtimes = events[event]
string, sdfg, state = self._get_runtimes_string(
event, runtimes, element, sdfg, state, string,
row_format, COLW, with_element_heading)
with_element_heading = False
string += ('-' * (COLW * 5)) + '\n'
if len(self.counters) > 0:
string += ('-' * (COUNTER_COLW * 2)) + '\n'
string += ('{:<{width}}' * 2).format(
'Counter', 'Value', width=COUNTER_COLW) + '\n'
string += ('-' * (COUNTER_COLW * 2)) + '\n'
if self._sortcat == 'value':
counter_list = sorted(self.counters,
key=lambda k: self.counters[k],
reverse=self._sortdesc)
elif self._sortcat == 'counter':
counter_list = sorted(self.counters.keys(),
reverse=self._sortdesc)
else:
counter_list = self.counters.keys()
for counter in counter_list:
string += counter_format.format(counter,
self.counters[counter],
width=COUNTER_COLW)
string += ('-' * (COUNTER_COLW * 2)) + '\n'
return string
| [
"numpy.mean",
"numpy.median",
"re.match",
"numpy.max",
"numpy.array",
"numpy.min",
"json.load"
] | [((830, 874), 're.match', 're.match', (['""".*report-(\\\\d+)\\\\.json"""', 'filename'], {}), "('.*report-(\\\\d+)\\\\.json', filename)\n", (838, 874), False, 'import re\n'), ((6740, 6756), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (6748, 6756), True, 'import numpy as np\n'), ((1122, 1135), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (1131, 1135), False, 'import json\n'), ((6811, 6825), 'numpy.min', 'np.min', (['result'], {}), '(result)\n', (6817, 6825), True, 'import numpy as np\n'), ((6882, 6896), 'numpy.max', 'np.max', (['result'], {}), '(result)\n', (6888, 6896), True, 'import numpy as np\n'), ((6209, 6225), 'numpy.min', 'np.min', (['runtimes'], {}), '(runtimes)\n', (6215, 6225), True, 'import numpy as np\n'), ((6276, 6293), 'numpy.mean', 'np.mean', (['runtimes'], {}), '(runtimes)\n', (6283, 6293), True, 'import numpy as np\n'), ((6344, 6363), 'numpy.median', 'np.median', (['runtimes'], {}), '(runtimes)\n', (6353, 6363), True, 'import numpy as np\n'), ((6414, 6430), 'numpy.max', 'np.max', (['runtimes'], {}), '(runtimes)\n', (6420, 6430), True, 'import numpy as np\n'), ((6954, 6969), 'numpy.mean', 'np.mean', (['result'], {}), '(result)\n', (6961, 6969), True, 'import numpy as np\n'), ((7036, 7053), 'numpy.median', 'np.median', (['result'], {}), '(result)\n', (7045, 7053), True, 'import numpy as np\n')] |
import numpy
import matplotlib.pylab as plt
class LinearHeadHead(object):
"""
Solves the system:
\div \frac{\rho}{\mu} k \grad (p + \rho g z) = 0 on the domain [x_0, x_1] \cross [z_0,z_1]
Boundary conditions are given by:
h(x_0,z,t) = h_0 [m] => p(x_0,z,t)=(h_0-z) \rho g
h(x_1,z,t) = h_L [m] => p(x_1,z,t)=(h_L-z) \rho g
Parameters are in units of:
\rho : density, [kg/m^3]
\mu : viscosity, [kg / m s^2]
K : absolute permeability, [ m^2 ]
g : gravity, used in converting head to pressure, [ m / s^2 ]
"""
def __init__(self, params=None):
if params is None:
params = dict()
params.setdefault("x_0",0)
params.setdefault("x_1",100)
params.setdefault("z_0",0)
params.setdefault("z_1",10)
params.setdefault("k",1.1847e-12)
params.setdefault("rho",998.2)
params.setdefault("mu",1.002e-3)
params.setdefault("h_0",20.0)
params.setdefault("h_1",19.0)
params.setdefault("g",9.80665)
params.setdefault("p_atm",101325.0)
self.__dict__.update(params)
def head(self, coords):
"""
Compute the head at the x-values given by coords[:]
h(x) = h_0 + (x/L)*(h_1-h_0)
"""
head = numpy.zeros(len(coords))
head[:] = self.h_0 + ((self.h_1-self.h_0 )/(self.x_1-self.x_0))*coords[:,0]
return head
def pressure(self, coords):
"""
Compute the pressure at (x,z)-coordinates in the coords[:,:] array.
Note: coords has dimension len(coords) x 2.
"""
pressure = numpy.zeros((len(coords),),'d')
head = self.head(coords)
pressure[:]=self.p_atm+(head[:]-coords[:,1])*self.rho*self.g
return pressure
def createFromXML(filename):
# grab params from input file
params = dict()
import amanzi_xml.utils.io
xml = amanzi_xml.utils.io.fromFile(filename)
import amanzi_xml.utils.search as search
#
# Domain Size
#
xyz = search.find_tag_path(xml, ["amanzi_input","mesh","generate","box",]).get("low_coordinates")
params["x_0"] = float(xyz.split(',')[0])
params["z_0"] = float(xyz.split(',')[2])
xyz = search.find_tag_path(xml, ["amanzi_input","mesh","generate","box",]).get("high_coordinates")
params["x_1"] = float(xyz.split(',')[0])
params["z_1"] = float(xyz.split(',')[2])
#
# Material Properties
#
strK = search.find_tag_path(xml, ["amanzi_input","materials","material","permeability",]).get('x')
params["k"] = float(strK)
strMu = search.find_tag_path(xml, ["amanzi_input","phases","liquid_phase","viscosity",]).text
params["mu"] = float(strMu)
strRho = search.find_tag_path(xml, ["amanzi_input","phases","liquid_phase","density",]).text
params["rho"] = float(strRho)
#
# Boundary Conditions
#
strh0 = search.find_tag_path(xml, ["amanzi_input","boundary_conditions","boundary_condition,LeftBC","liquid_phase","liquid_component","hydrostatic",]).get("value")
params["h_0"] = float(strh0)
strhL = search.find_tag_path(xml, ["amanzi_input","boundary_conditions","boundary_condition,RightBC","liquid_phase","liquid_component","hydrostatic",]).get("value")
params["h_L"] = float(strhL)
#
# Standard Gravity
#
params.setdefault("g",9.80665)
# instantiate the class
return LinearHeadHead(params)
if __name__ == "__main__":
# Instantiate the class
lhh = LinearHeadHead()
# Get 11 equally spaced points: dx=(x_1-x_0)/10
x = numpy.linspace(lhh.x_0,lhh.x_1,11)
# Create space for a set of (x,z) points
coords = numpy.zeros((11,2))
# set x
coords[:,0]=x
# set z
coords[:,1]=3
# compute heads and pressures
h1 = lhh.head(coords)
p1 = lhh.pressure(coords)
# reset z
coords[:,1]=7
# compute heads and pressures
h2 = lhh.head(coords)
p2 = lhh.pressure(coords)
# plot
plt.plot(x,p1)
plt.plot(x,p2)
plt.xlabel('x-coordinate [m]')
plt.ylabel('Pressure [Pa]')
# show the plot
# plt.show()
| [
"amanzi_xml.utils.search.find_tag_path",
"matplotlib.pylab.xlabel",
"numpy.linspace",
"numpy.zeros",
"matplotlib.pylab.plot",
"matplotlib.pylab.ylabel"
] | [((3622, 3658), 'numpy.linspace', 'numpy.linspace', (['lhh.x_0', 'lhh.x_1', '(11)'], {}), '(lhh.x_0, lhh.x_1, 11)\n', (3636, 3658), False, 'import numpy\n'), ((3716, 3736), 'numpy.zeros', 'numpy.zeros', (['(11, 2)'], {}), '((11, 2))\n', (3727, 3736), False, 'import numpy\n'), ((4033, 4048), 'matplotlib.pylab.plot', 'plt.plot', (['x', 'p1'], {}), '(x, p1)\n', (4041, 4048), True, 'import matplotlib.pylab as plt\n'), ((4052, 4067), 'matplotlib.pylab.plot', 'plt.plot', (['x', 'p2'], {}), '(x, p2)\n', (4060, 4067), True, 'import matplotlib.pylab as plt\n'), ((4071, 4101), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""x-coordinate [m]"""'], {}), "('x-coordinate [m]')\n", (4081, 4101), True, 'import matplotlib.pylab as plt\n'), ((4106, 4133), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['"""Pressure [Pa]"""'], {}), "('Pressure [Pa]')\n", (4116, 4133), True, 'import matplotlib.pylab as plt\n'), ((2646, 2732), 'amanzi_xml.utils.search.find_tag_path', 'search.find_tag_path', (['xml', "['amanzi_input', 'phases', 'liquid_phase', 'viscosity']"], {}), "(xml, ['amanzi_input', 'phases', 'liquid_phase',\n 'viscosity'])\n", (2666, 2732), True, 'import amanzi_xml.utils.search as search\n'), ((2777, 2862), 'amanzi_xml.utils.search.find_tag_path', 'search.find_tag_path', (['xml', "['amanzi_input', 'phases', 'liquid_phase', 'density']"], {}), "(xml, ['amanzi_input', 'phases', 'liquid_phase', 'density']\n )\n", (2797, 2862), True, 'import amanzi_xml.utils.search as search\n'), ((2086, 2156), 'amanzi_xml.utils.search.find_tag_path', 'search.find_tag_path', (['xml', "['amanzi_input', 'mesh', 'generate', 'box']"], {}), "(xml, ['amanzi_input', 'mesh', 'generate', 'box'])\n", (2106, 2156), True, 'import amanzi_xml.utils.search as search\n'), ((2278, 2348), 'amanzi_xml.utils.search.find_tag_path', 'search.find_tag_path', (['xml', "['amanzi_input', 'mesh', 'generate', 'box']"], {}), "(xml, ['amanzi_input', 'mesh', 'generate', 'box'])\n", (2298, 2348), True, 'import amanzi_xml.utils.search as search\n'), ((2512, 2600), 'amanzi_xml.utils.search.find_tag_path', 'search.find_tag_path', (['xml', "['amanzi_input', 'materials', 'material', 'permeability']"], {}), "(xml, ['amanzi_input', 'materials', 'material',\n 'permeability'])\n", (2532, 2600), True, 'import amanzi_xml.utils.search as search\n'), ((2947, 3101), 'amanzi_xml.utils.search.find_tag_path', 'search.find_tag_path', (['xml', "['amanzi_input', 'boundary_conditions', 'boundary_condition,LeftBC',\n 'liquid_phase', 'liquid_component', 'hydrostatic']"], {}), "(xml, ['amanzi_input', 'boundary_conditions',\n 'boundary_condition,LeftBC', 'liquid_phase', 'liquid_component',\n 'hydrostatic'])\n", (2967, 3101), True, 'import amanzi_xml.utils.search as search\n'), ((3148, 3303), 'amanzi_xml.utils.search.find_tag_path', 'search.find_tag_path', (['xml', "['amanzi_input', 'boundary_conditions', 'boundary_condition,RightBC',\n 'liquid_phase', 'liquid_component', 'hydrostatic']"], {}), "(xml, ['amanzi_input', 'boundary_conditions',\n 'boundary_condition,RightBC', 'liquid_phase', 'liquid_component',\n 'hydrostatic'])\n", (3168, 3303), True, 'import amanzi_xml.utils.search as search\n')] |
"""Test the build_featurizer code."""
import os
import random
import warnings
import logging
import keras.backend as K
import numpy as np
import pytest
from keras.layers import Dense, Activation, Input
from keras.layers.merge import add
from keras.models import Sequential, Model
from pic2vec.build_featurizer import (_decapitate_model, _find_pooling_constant,
_splice_layer, _downsample_model_features,
_initialize_model, _check_downsampling_mismatch,
build_featurizer)
from pic2vec.squeezenet import SqueezeNet
from pic2vec.enums import MODELS, ATOL
random.seed(5102020)
# Create tensor for splicing
SPLICING_TENSOR = K.constant(3, shape=(3, 12))
# Create featurization for finding the pooling constant
POOLING_FEATURES = K.constant(2, shape=(3, 60))
# Path to checking prediction arrays for each model in _initialize_model
INITIALIZED_MODEL_TEST_ARRAY = 'tests/build_featurizer_testing/{}_test_prediction.npy'
@pytest.fixture(scope='module')
def check_model():
# Building the checking model
input_layer = Input(shape=(100, ))
layer = Dense(40)(input_layer)
layer = Activation('relu')(layer)
layer = Dense(20)(layer)
layer = Activation('relu')(layer)
layer = Dense(10)(layer)
layer = Activation('relu')(layer)
layer = Dense(5)(layer)
output_layer = Activation('softmax')(layer)
check_model = Model(inputs=input_layer, outputs=output_layer)
return check_model
def test_decapitate_model_lazy_input():
"""Test an error is raised when the model has a lazy input layer initialization"""
# Raise warning when model has lazy input layer initialization
error_model = Sequential([
Dense(40, input_shape=(100,)),
Dense(20),
Activation('softmax')])
with warnings.catch_warnings(record=True) as warning_check:
_decapitate_model(error_model, 1)
assert len(warning_check) == 1
assert "depth issues" in str(warning_check[-1].message)
def test_decapitate_model_too_deep(check_model):
"""Test error raised when model is decapitated too deep"""
# Check for Value Error when passed a depth >= (# of layers in network) - 1
with pytest.raises(ValueError):
_decapitate_model(check_model, 8)
def test_decapitate_model(check_model):
"""
This test creates a toy network, and checks that it calls the right errors
and checks that it decapitates the network correctly:
"""
# Create test model
test_model = _decapitate_model(check_model, 5)
# Make checks for all of the necessary features: the model outputs, the
# last layer, the last layer's connections, and the last layer's shape
assert test_model.layers[-1] == test_model.layers[3]
assert test_model.layers[3].outbound_nodes == []
assert test_model.outputs == [test_model.layers[3].output]
assert test_model.layers[-1].output_shape == (None, 20)
def test_splice_layer_bad_split():
"""Check error with bad split on the tensor"""
with pytest.raises(ValueError):
_splice_layer(SPLICING_TENSOR, 5)
def test_splice_layer():
"""Test method splices tensors correctly"""
# Create spliced and added layers via splicing function
list_of_spliced_layers = _splice_layer(SPLICING_TENSOR, 3)
# Add each of the layers together
x = add(list_of_spliced_layers)
# Create the spliced and added layers by hand
check_layer = K.constant(9, shape=(3, 4))
# Check the math
assert np.allclose(K.eval(check_layer), K.eval(x), atol=ATOL)
def test_find_pooling_constant_upsample():
"""Test error when trying to upsample"""
with pytest.raises(ValueError):
_find_pooling_constant(POOLING_FEATURES, 120)
def test_find_pooling_constant_bad_divisor():
"""Test error when trying to downsample to a non-divisor of the features"""
with pytest.raises(ValueError):
_find_pooling_constant(POOLING_FEATURES, 40)
with pytest.raises(ValueError):
_find_pooling_constant(POOLING_FEATURES, 0)
def test_find_pooling_constant():
"""Test that pooling constant given correct answer with good inputs"""
assert _find_pooling_constant(POOLING_FEATURES, 6) == 10
def test_downsample_model_features():
"""
Test creates a toy numpy array, and checks that the method
correctly downsamples the array into a hand-checked tensor
"""
# Create the spliced and averaged tensor via downsampling function
array = np.array([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
[11, 12, 13, 14, 15, 16, 17, 18, 19, 20],
[21, 22, 23, 24, 25, 26, 27, 28, 29, 30]
])
tensor = K.variable(array)
x = _downsample_model_features(tensor, 5)
# Create the spliced and averaged tensor by hand
check_array = np.array([[1.5, 3.5, 5.5, 7.5, 9.5],
[11.5, 13.5, 15.5, 17.5, 19.5],
[21.5, 23.5, 25.5, 27.5, 29.5]
])
check_tensor = K.variable(check_array)
# Check that they are equal: that it returns the correct tensor
assert np.allclose(K.eval(check_tensor), K.eval(x), atol=ATOL)
def test_check_downsampling_mismatch_bad_num_features():
"""Raises error with autodownsampling an odd number of features"""
with pytest.raises(ValueError):
_check_downsampling_mismatch(True, 0, 2049)
def test_check_downsampling_mismatch_autosample():
"""Test method correctly autosamples"""
# Testing automatic downsampling
assert _check_downsampling_mismatch(True, 0, 2048) == (True, 1024)
def test_check_downsampling_mismatch_no_sample():
"""Test method correctly returns with no sampling"""
# Testing no downsampling
assert _check_downsampling_mismatch(False, 0, 2048) == (False, 0)
def test_check_downsampling_mismatch_manual_sample():
"""Test method correctly returns with manual sampling"""
# Testing manual downsampling
assert _check_downsampling_mismatch(False, 512, 2048) == (True, 512)
def check_model_equal(model1, model2):
"""Check whether two models are equal"""
# Testing models are the same from loaded weights and downloaded from keras
assert len(model1.layers) == len(model2.layers)
for layer in range(len(model1.layers)):
for array in range(len(model1.layers[layer].get_weights())):
assert np.allclose(model1.layers[layer].get_weights()[array],
model2.layers[layer].get_weights()[array], atol=ATOL)
def test_initialize_model_weights_not_found():
"""Test error raised when the model can't find weights to load"""
error_weight = 'htraenoytinutroppodnocesaevahtondideduti/losfosraeyderdnuhenootdenmednocsecar'
try:
assert not os.path.isfile(error_weight)
except AssertionError:
logging.error('Whoops, that mirage exists. '
'Change error_weight to a file path that does not exist.')
with pytest.raises(IOError):
_initialize_model('squeezenet', error_weight)
def test_initialize_model_bad_weights():
"""
Test error raised when the model finds the weights file,
but it's not the right format
"""
bad_weights_file = open('bad_weights_test', 'w')
bad_weights_file.write('this should fail')
bad_weights_file.close()
error_weight = 'bad_weights_test'
try:
with pytest.raises(IOError):
_initialize_model('squeezenet', error_weight)
finally:
os.remove(error_weight)
def test_initialize_model_wrong_weights():
"""Test error raised when weights exist but don't match model"""
squeeze_weight_path = 'pic2vec/saved_models/squeezenet_weights_tf_dim_ordering_tf_kernels.h5'
assert os.path.isfile(squeeze_weight_path)
with pytest.raises(ValueError):
_initialize_model('vgg16', squeeze_weight_path)
INITIALIZE_MODEL_CASES = [
('squeezenet', [67], (1, 227, 227, 3)),
('vgg16', [23], (1, 224, 224, 3)),
('vgg19', [26], (1, 224, 224, 3)),
('resnet50', [176, 177], (1, 224, 224, 3)),
('inceptionv3', [313], (1, 299, 299, 3)),
('xception', [134], (1, 299, 299, 3)),
]
@pytest.mark.parametrize('model_str, expected_layers, test_size',
INITIALIZE_MODEL_CASES, ids=MODELS)
def test_initialize_model(model_str, expected_layers, test_size):
"""Test the initializations of each model"""
model = _initialize_model(model_str)
if model_str == 'squeezenet':
try:
model_downloaded_weights = SqueezeNet()
except Exception:
raise AssertionError('Problem loading SqueezeNet weights.')
check_model_equal(model, model_downloaded_weights)
# Versions of Keras 2.1.5 and later sometimes use different numbers of layers for these models,
# without changing any behavior for predictions.
# This checks that the model uses at least one of the expected numbers of layers.
assert len(model.layers) in expected_layers
# Create the test image to be predicted on
blank_image = np.zeros(test_size)
# Pre-checked prediction
existing_test_array = np.load(INITIALIZED_MODEL_TEST_ARRAY.format(model_str))
generated_test_array = model.predict_on_batch(blank_image)
# Check that each model predicts correctly to see if weights were correctly loaded
assert np.allclose(generated_test_array, existing_test_array, atol=ATOL)
del model
FEATURIZER_MODEL_DICT = dict.fromkeys(MODELS)
FEAT_CASES = [ # squeezenet
(1, False, 128, 128, 'squeezenet'), (1, False, 0, 512, 'squeezenet'),
(1, True, 0, 256, 'squeezenet'), (2, True, 0, 256, 'squeezenet'),
(2, False, 128, 128, 'squeezenet'), (2, False, 0, 512, 'squeezenet'),
(3, False, 96, 96, 'squeezenet'), (3, False, 0, 384, 'squeezenet'),
(3, True, 0, 192, 'squeezenet'), (4, True, 0, 192, 'squeezenet'),
(4, False, 96, 96, 'squeezenet'), (4, False, 0, 384, 'squeezenet'),
# vgg16
(1, False, 1024, 1024, 'vgg16'), (1, False, 0, 4096, 'vgg16'),
(1, True, 0, 2048, 'vgg16'), (2, True, 0, 2048, 'vgg16'),
(2, False, 1024, 1024, 'vgg16'), (2, False, 0, 4096, 'vgg16'),
(3, False, 128, 128, 'vgg16'), (3, False, 0, 512, 'vgg16'),
(3, True, 0, 256, 'vgg16'), (4, True, 0, 256, 'vgg16'),
(4, False, 128, 128, 'vgg16'), (4, False, 0, 512, 'vgg16'),
# vgg19
(1, False, 1024, 1024, 'vgg19'), (1, False, 0, 4096, 'vgg19'),
(1, True, 0, 2048, 'vgg19'), (2, True, 0, 2048, 'vgg19'),
(2, False, 1024, 1024, 'vgg19'), (2, False, 0, 4096, 'vgg19'),
(3, False, 128, 128, 'vgg19'), (3, False, 0, 512, 'vgg19'),
(3, True, 0, 256, 'vgg19'), (4, True, 0, 256, 'vgg19'),
(4, False, 128, 128, 'vgg19'), (4, False, 0, 512, 'vgg19'),
# resnet50
(1, False, 512, 512, 'resnet50'), (1, False, 0, 2048, 'resnet50'),
(1, True, 0, 1024, 'resnet50'), (2, True, 0, 1024, 'resnet50'),
(2, False, 512, 512, 'resnet50'), (2, False, 0, 2048, 'resnet50'),
(3, False, 512, 512, 'resnet50'), (3, False, 0, 2048, 'resnet50'),
(3, True, 0, 1024, 'resnet50'), (4, True, 0, 1024, 'resnet50'),
(4, False, 512, 512, 'resnet50'), (4, False, 0, 2048, 'resnet50'),
# inceptionv3
(1, False, 512, 512, 'inceptionv3'), (1, False, 0, 2048, 'inceptionv3'),
(1, True, 0, 1024, 'inceptionv3'), (2, True, 0, 1024, 'inceptionv3'),
(2, False, 512, 512, 'inceptionv3'), (2, False, 0, 2048, 'inceptionv3'),
(3, False, 512, 512, 'inceptionv3'), (3, False, 0, 2048, 'inceptionv3'),
(3, True, 0, 1024, 'inceptionv3'), (4, True, 0, 640, 'inceptionv3'),
(4, False, 320, 320, 'inceptionv3'), (4, False, 0, 1280, 'inceptionv3'),
# xception
(1, False, 512, 512, 'xception'), (1, False, 0, 2048, 'xception'),
(1, True, 0, 1024, 'xception'), (2, True, 0, 512, 'xception'),
(2, False, 256, 256, 'xception'), (2, False, 0, 1024, 'xception'),
(3, False, 182, 182, 'xception'), (3, False, 0, 728, 'xception'),
(3, True, 0, 364, 'xception'), (4, True, 0, 364, 'xception'),
(4, False, 182, 182, 'xception'), (4, False, 0, 728, 'xception')
]
@pytest.mark.parametrize('depth, autosample, sample_size, expected_size, model_str', FEAT_CASES)
def test_build_featurizer(depth, autosample, sample_size, expected_size, model_str):
"""Test all of the model iterations"""
if FEATURIZER_MODEL_DICT[model_str] is None:
FEATURIZER_MODEL_DICT[model_str] = _initialize_model(model_str)
model = build_featurizer(depth, autosample, sample_size,
model_str=model_str, loaded_model=FEATURIZER_MODEL_DICT[model_str])
assert model.layers[-1].output_shape == (None, expected_size)
del model
if __name__ == '__main__':
test_decapitate_model()
test_splice_layer()
test_find_pooling_constant()
test_downsample_model_features()
test_initialize_model()
test_build_featurizer()
| [
"pic2vec.build_featurizer._decapitate_model",
"numpy.array",
"keras.layers.Activation",
"pytest.fixture",
"keras.layers.Dense",
"logging.error",
"os.remove",
"keras.backend.constant",
"keras.layers.merge.add",
"keras.models.Model",
"pic2vec.build_featurizer._find_pooling_constant",
"pic2vec.bu... | [((670, 690), 'random.seed', 'random.seed', (['(5102020)'], {}), '(5102020)\n', (681, 690), False, 'import random\n'), ((739, 767), 'keras.backend.constant', 'K.constant', (['(3)'], {'shape': '(3, 12)'}), '(3, shape=(3, 12))\n', (749, 767), True, 'import keras.backend as K\n'), ((844, 872), 'keras.backend.constant', 'K.constant', (['(2)'], {'shape': '(3, 60)'}), '(2, shape=(3, 60))\n', (854, 872), True, 'import keras.backend as K\n'), ((1038, 1068), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (1052, 1068), False, 'import pytest\n'), ((8230, 8334), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""model_str, expected_layers, test_size"""', 'INITIALIZE_MODEL_CASES'], {'ids': 'MODELS'}), "('model_str, expected_layers, test_size',\n INITIALIZE_MODEL_CASES, ids=MODELS)\n", (8253, 8334), False, 'import pytest\n'), ((12146, 12246), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""depth, autosample, sample_size, expected_size, model_str"""', 'FEAT_CASES'], {}), "(\n 'depth, autosample, sample_size, expected_size, model_str', FEAT_CASES)\n", (12169, 12246), False, 'import pytest\n'), ((1140, 1159), 'keras.layers.Input', 'Input', ([], {'shape': '(100,)'}), '(shape=(100,))\n', (1145, 1159), False, 'from keras.layers import Dense, Activation, Input\n'), ((1463, 1510), 'keras.models.Model', 'Model', ([], {'inputs': 'input_layer', 'outputs': 'output_layer'}), '(inputs=input_layer, outputs=output_layer)\n', (1468, 1510), False, 'from keras.models import Sequential, Model\n'), ((2570, 2603), 'pic2vec.build_featurizer._decapitate_model', '_decapitate_model', (['check_model', '(5)'], {}), '(check_model, 5)\n', (2587, 2603), False, 'from pic2vec.build_featurizer import _decapitate_model, _find_pooling_constant, _splice_layer, _downsample_model_features, _initialize_model, _check_downsampling_mismatch, build_featurizer\n'), ((3319, 3352), 'pic2vec.build_featurizer._splice_layer', '_splice_layer', (['SPLICING_TENSOR', '(3)'], {}), '(SPLICING_TENSOR, 3)\n', (3332, 3352), False, 'from pic2vec.build_featurizer import _decapitate_model, _find_pooling_constant, _splice_layer, _downsample_model_features, _initialize_model, _check_downsampling_mismatch, build_featurizer\n'), ((3399, 3426), 'keras.layers.merge.add', 'add', (['list_of_spliced_layers'], {}), '(list_of_spliced_layers)\n', (3402, 3426), False, 'from keras.layers.merge import add\n'), ((3495, 3522), 'keras.backend.constant', 'K.constant', (['(9)'], {'shape': '(3, 4)'}), '(9, shape=(3, 4))\n', (3505, 3522), True, 'import keras.backend as K\n'), ((4533, 4664), 'numpy.array', 'np.array', (['[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [11, 12, 13, 14, 15, 16, 17, 18, 19, 20],\n [21, 22, 23, 24, 25, 26, 27, 28, 29, 30]]'], {}), '([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [11, 12, 13, 14, 15, 16, 17, 18,\n 19, 20], [21, 22, 23, 24, 25, 26, 27, 28, 29, 30]])\n', (4541, 4664), True, 'import numpy as np\n'), ((4741, 4758), 'keras.backend.variable', 'K.variable', (['array'], {}), '(array)\n', (4751, 4758), True, 'import keras.backend as K\n'), ((4768, 4805), 'pic2vec.build_featurizer._downsample_model_features', '_downsample_model_features', (['tensor', '(5)'], {}), '(tensor, 5)\n', (4794, 4805), False, 'from pic2vec.build_featurizer import _decapitate_model, _find_pooling_constant, _splice_layer, _downsample_model_features, _initialize_model, _check_downsampling_mismatch, build_featurizer\n'), ((4878, 4983), 'numpy.array', 'np.array', (['[[1.5, 3.5, 5.5, 7.5, 9.5], [11.5, 13.5, 15.5, 17.5, 19.5], [21.5, 23.5, \n 25.5, 27.5, 29.5]]'], {}), '([[1.5, 3.5, 5.5, 7.5, 9.5], [11.5, 13.5, 15.5, 17.5, 19.5], [21.5,\n 23.5, 25.5, 27.5, 29.5]])\n', (4886, 4983), True, 'import numpy as np\n'), ((5084, 5107), 'keras.backend.variable', 'K.variable', (['check_array'], {}), '(check_array)\n', (5094, 5107), True, 'import keras.backend as K\n'), ((7808, 7843), 'os.path.isfile', 'os.path.isfile', (['squeeze_weight_path'], {}), '(squeeze_weight_path)\n', (7822, 7843), False, 'import os\n'), ((8483, 8511), 'pic2vec.build_featurizer._initialize_model', '_initialize_model', (['model_str'], {}), '(model_str)\n', (8500, 8511), False, 'from pic2vec.build_featurizer import _decapitate_model, _find_pooling_constant, _splice_layer, _downsample_model_features, _initialize_model, _check_downsampling_mismatch, build_featurizer\n'), ((9123, 9142), 'numpy.zeros', 'np.zeros', (['test_size'], {}), '(test_size)\n', (9131, 9142), True, 'import numpy as np\n'), ((9418, 9483), 'numpy.allclose', 'np.allclose', (['generated_test_array', 'existing_test_array'], {'atol': 'ATOL'}), '(generated_test_array, existing_test_array, atol=ATOL)\n', (9429, 9483), True, 'import numpy as np\n'), ((12504, 12624), 'pic2vec.build_featurizer.build_featurizer', 'build_featurizer', (['depth', 'autosample', 'sample_size'], {'model_str': 'model_str', 'loaded_model': 'FEATURIZER_MODEL_DICT[model_str]'}), '(depth, autosample, sample_size, model_str=model_str,\n loaded_model=FEATURIZER_MODEL_DICT[model_str])\n', (12520, 12624), False, 'from pic2vec.build_featurizer import _decapitate_model, _find_pooling_constant, _splice_layer, _downsample_model_features, _initialize_model, _check_downsampling_mismatch, build_featurizer\n'), ((1173, 1182), 'keras.layers.Dense', 'Dense', (['(40)'], {}), '(40)\n', (1178, 1182), False, 'from keras.layers import Dense, Activation, Input\n'), ((1208, 1226), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1218, 1226), False, 'from keras.layers import Dense, Activation, Input\n'), ((1246, 1255), 'keras.layers.Dense', 'Dense', (['(20)'], {}), '(20)\n', (1251, 1255), False, 'from keras.layers import Dense, Activation, Input\n'), ((1275, 1293), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1285, 1293), False, 'from keras.layers import Dense, Activation, Input\n'), ((1313, 1322), 'keras.layers.Dense', 'Dense', (['(10)'], {}), '(10)\n', (1318, 1322), False, 'from keras.layers import Dense, Activation, Input\n'), ((1342, 1360), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1352, 1360), False, 'from keras.layers import Dense, Activation, Input\n'), ((1380, 1388), 'keras.layers.Dense', 'Dense', (['(5)'], {}), '(5)\n', (1385, 1388), False, 'from keras.layers import Dense, Activation, Input\n'), ((1415, 1436), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (1425, 1436), False, 'from keras.layers import Dense, Activation, Input\n'), ((1862, 1898), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (1885, 1898), False, 'import warnings\n'), ((1925, 1958), 'pic2vec.build_featurizer._decapitate_model', '_decapitate_model', (['error_model', '(1)'], {}), '(error_model, 1)\n', (1942, 1958), False, 'from pic2vec.build_featurizer import _decapitate_model, _find_pooling_constant, _splice_layer, _downsample_model_features, _initialize_model, _check_downsampling_mismatch, build_featurizer\n'), ((2265, 2290), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2278, 2290), False, 'import pytest\n'), ((2300, 2333), 'pic2vec.build_featurizer._decapitate_model', '_decapitate_model', (['check_model', '(8)'], {}), '(check_model, 8)\n', (2317, 2333), False, 'from pic2vec.build_featurizer import _decapitate_model, _find_pooling_constant, _splice_layer, _downsample_model_features, _initialize_model, _check_downsampling_mismatch, build_featurizer\n'), ((3086, 3111), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3099, 3111), False, 'import pytest\n'), ((3121, 3154), 'pic2vec.build_featurizer._splice_layer', '_splice_layer', (['SPLICING_TENSOR', '(5)'], {}), '(SPLICING_TENSOR, 5)\n', (3134, 3154), False, 'from pic2vec.build_featurizer import _decapitate_model, _find_pooling_constant, _splice_layer, _downsample_model_features, _initialize_model, _check_downsampling_mismatch, build_featurizer\n'), ((3567, 3586), 'keras.backend.eval', 'K.eval', (['check_layer'], {}), '(check_layer)\n', (3573, 3586), True, 'import keras.backend as K\n'), ((3588, 3597), 'keras.backend.eval', 'K.eval', (['x'], {}), '(x)\n', (3594, 3597), True, 'import keras.backend as K\n'), ((3709, 3734), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3722, 3734), False, 'import pytest\n'), ((3744, 3789), 'pic2vec.build_featurizer._find_pooling_constant', '_find_pooling_constant', (['POOLING_FEATURES', '(120)'], {}), '(POOLING_FEATURES, 120)\n', (3766, 3789), False, 'from pic2vec.build_featurizer import _decapitate_model, _find_pooling_constant, _splice_layer, _downsample_model_features, _initialize_model, _check_downsampling_mismatch, build_featurizer\n'), ((3927, 3952), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3940, 3952), False, 'import pytest\n'), ((3962, 4006), 'pic2vec.build_featurizer._find_pooling_constant', '_find_pooling_constant', (['POOLING_FEATURES', '(40)'], {}), '(POOLING_FEATURES, 40)\n', (3984, 4006), False, 'from pic2vec.build_featurizer import _decapitate_model, _find_pooling_constant, _splice_layer, _downsample_model_features, _initialize_model, _check_downsampling_mismatch, build_featurizer\n'), ((4017, 4042), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4030, 4042), False, 'import pytest\n'), ((4052, 4095), 'pic2vec.build_featurizer._find_pooling_constant', '_find_pooling_constant', (['POOLING_FEATURES', '(0)'], {}), '(POOLING_FEATURES, 0)\n', (4074, 4095), False, 'from pic2vec.build_featurizer import _decapitate_model, _find_pooling_constant, _splice_layer, _downsample_model_features, _initialize_model, _check_downsampling_mismatch, build_featurizer\n'), ((4218, 4261), 'pic2vec.build_featurizer._find_pooling_constant', '_find_pooling_constant', (['POOLING_FEATURES', '(6)'], {}), '(POOLING_FEATURES, 6)\n', (4240, 4261), False, 'from pic2vec.build_featurizer import _decapitate_model, _find_pooling_constant, _splice_layer, _downsample_model_features, _initialize_model, _check_downsampling_mismatch, build_featurizer\n'), ((5199, 5219), 'keras.backend.eval', 'K.eval', (['check_tensor'], {}), '(check_tensor)\n', (5205, 5219), True, 'import keras.backend as K\n'), ((5221, 5230), 'keras.backend.eval', 'K.eval', (['x'], {}), '(x)\n', (5227, 5230), True, 'import keras.backend as K\n'), ((5382, 5407), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5395, 5407), False, 'import pytest\n'), ((5417, 5460), 'pic2vec.build_featurizer._check_downsampling_mismatch', '_check_downsampling_mismatch', (['(True)', '(0)', '(2049)'], {}), '(True, 0, 2049)\n', (5445, 5460), False, 'from pic2vec.build_featurizer import _decapitate_model, _find_pooling_constant, _splice_layer, _downsample_model_features, _initialize_model, _check_downsampling_mismatch, build_featurizer\n'), ((5606, 5649), 'pic2vec.build_featurizer._check_downsampling_mismatch', '_check_downsampling_mismatch', (['(True)', '(0)', '(2048)'], {}), '(True, 0, 2048)\n', (5634, 5649), False, 'from pic2vec.build_featurizer import _decapitate_model, _find_pooling_constant, _splice_layer, _downsample_model_features, _initialize_model, _check_downsampling_mismatch, build_featurizer\n'), ((5816, 5860), 'pic2vec.build_featurizer._check_downsampling_mismatch', '_check_downsampling_mismatch', (['(False)', '(0)', '(2048)'], {}), '(False, 0, 2048)\n', (5844, 5860), False, 'from pic2vec.build_featurizer import _decapitate_model, _find_pooling_constant, _splice_layer, _downsample_model_features, _initialize_model, _check_downsampling_mismatch, build_featurizer\n'), ((6037, 6083), 'pic2vec.build_featurizer._check_downsampling_mismatch', '_check_downsampling_mismatch', (['(False)', '(512)', '(2048)'], {}), '(False, 512, 2048)\n', (6065, 6083), False, 'from pic2vec.build_featurizer import _decapitate_model, _find_pooling_constant, _splice_layer, _downsample_model_features, _initialize_model, _check_downsampling_mismatch, build_featurizer\n'), ((7036, 7058), 'pytest.raises', 'pytest.raises', (['IOError'], {}), '(IOError)\n', (7049, 7058), False, 'import pytest\n'), ((7068, 7113), 'pic2vec.build_featurizer._initialize_model', '_initialize_model', (['"""squeezenet"""', 'error_weight'], {}), "('squeezenet', error_weight)\n", (7085, 7113), False, 'from pic2vec.build_featurizer import _decapitate_model, _find_pooling_constant, _splice_layer, _downsample_model_features, _initialize_model, _check_downsampling_mismatch, build_featurizer\n'), ((7561, 7584), 'os.remove', 'os.remove', (['error_weight'], {}), '(error_weight)\n', (7570, 7584), False, 'import os\n'), ((7854, 7879), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7867, 7879), False, 'import pytest\n'), ((7889, 7936), 'pic2vec.build_featurizer._initialize_model', '_initialize_model', (['"""vgg16"""', 'squeeze_weight_path'], {}), "('vgg16', squeeze_weight_path)\n", (7906, 7936), False, 'from pic2vec.build_featurizer import _decapitate_model, _find_pooling_constant, _splice_layer, _downsample_model_features, _initialize_model, _check_downsampling_mismatch, build_featurizer\n'), ((12462, 12490), 'pic2vec.build_featurizer._initialize_model', '_initialize_model', (['model_str'], {}), '(model_str)\n', (12479, 12490), False, 'from pic2vec.build_featurizer import _decapitate_model, _find_pooling_constant, _splice_layer, _downsample_model_features, _initialize_model, _check_downsampling_mismatch, build_featurizer\n'), ((1770, 1799), 'keras.layers.Dense', 'Dense', (['(40)'], {'input_shape': '(100,)'}), '(40, input_shape=(100,))\n', (1775, 1799), False, 'from keras.layers import Dense, Activation, Input\n'), ((1809, 1818), 'keras.layers.Dense', 'Dense', (['(20)'], {}), '(20)\n', (1814, 1818), False, 'from keras.layers import Dense, Activation, Input\n'), ((1828, 1849), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (1838, 1849), False, 'from keras.layers import Dense, Activation, Input\n'), ((6836, 6864), 'os.path.isfile', 'os.path.isfile', (['error_weight'], {}), '(error_weight)\n', (6850, 6864), False, 'import os\n'), ((6900, 7010), 'logging.error', 'logging.error', (['"""Whoops, that mirage exists. Change error_weight to a file path that does not exist."""'], {}), "(\n 'Whoops, that mirage exists. Change error_weight to a file path that does not exist.'\n )\n", (6913, 7010), False, 'import logging\n'), ((7458, 7480), 'pytest.raises', 'pytest.raises', (['IOError'], {}), '(IOError)\n', (7471, 7480), False, 'import pytest\n'), ((7494, 7539), 'pic2vec.build_featurizer._initialize_model', '_initialize_model', (['"""squeezenet"""', 'error_weight'], {}), "('squeezenet', error_weight)\n", (7511, 7539), False, 'from pic2vec.build_featurizer import _decapitate_model, _find_pooling_constant, _splice_layer, _downsample_model_features, _initialize_model, _check_downsampling_mismatch, build_featurizer\n'), ((8599, 8611), 'pic2vec.squeezenet.SqueezeNet', 'SqueezeNet', ([], {}), '()\n', (8609, 8611), False, 'from pic2vec.squeezenet import SqueezeNet\n')] |
# -*- coding: utf-8 -*-#
"""
-------------------------------------------------------------------
Copyright (c) 2019-2022 Snow Lake Inc. All rights reserved.
Description :
File Name: __init__.py.py
Author : <EMAIL>
create date: 2021/7/8
-------------------------------------------------------------------
"""
import cv2
import numpy as np
import numba
@numba.jit(nopython=True)
def resize_image(image, w, h):
"""
resize image
Args:
image: h,w,c, normal
w: width
h: height
Returns:
h*w*c
"""
resized = np.zeros((h, w, image.shape[2]), dtype=np.float32)
part = np.zeros((image.shape[0], w, image.shape[2]), dtype=np.float32)
w_scale = (image.shape[1] - 1.0) / (resized.shape[1] - 1.0)
h_scale = (image.shape[0] - 1.0) / (resized.shape[0] - 1.0)
for k in range(image.shape[2]):
for r in range(image.shape[0]):
for c in range(w):
if c == w - 1 or image.shape[1] == 1:
val = image[r, image.shape[1] - 1, k]
else:
sx = c * w_scale
ix = int(sx)
dx = sx - ix
val = (1 - dx) * image[r, ix, k] + dx * image[r, ix + 1, k]
part[r, c, k] = val
for k in range(image.shape[2]):
for r in range(h):
sy = r * h_scale
iy = int(sy)
dy = sy - iy
for c in range(w):
val = (1 - dy) * part[iy, c, k]
resized[r, c, k] = val
if r == h - 1 or image.shape[0] == 1:
continue
for c in range(w):
val = dy * part[iy + 1, c, k]
resized[r, c, k] += val
return resized
def letterbox_image(image, new_size, padding_value=0.5):
"""
resize image with unchanged aspect ratio using padding
Args:
image: h,w,c, normal
new_size:w,h
padding_value: padding value
Returns:
h,w,c
"""
ih = image.shape[0]
iw = image.shape[1]
w, h = new_size
scale = min(w / iw, h / ih)
nw = int(iw * scale)
nh = int(ih * scale)
image = cv2.resize(image, (nw, nh), cv2.INTER_LINEAR)
offset = ((w - nw) // 2, (h - nh) // 2)
new_image = np.full((new_size[1], new_size[0], 3), padding_value, dtype=np.uint8)
new_image[offset[1]:offset[1] + nh, offset[0]:offset[0] + nw, :] = image
return new_image
def letterbox_image_resize(image, size):
'''resize image with unchanged aspect ratio using padding'''
h, w, _ = image.shape
if image.shape[0] != 416 or image.shape[1] != 416:
print('images resize:', w, h)
new_image = cv2.resize(image, (size[0], size[1]), cv2.INTER_LINEAR)
else:
new_image = image
return new_image
def _resize_image_test_(image_path, test_data_path, model_image_size):
image_data2 = np.fromfile(test_data_path, dtype=np.float32).reshape((3, model_image_size[0], model_image_size[1]))
image_data4 = np.swapaxes(image_data2, 0, 1)
image_data4 = np.swapaxes(image_data4, 1, 2)
# image_data = np.fromfile(image_path, dtype=np.float32).reshape((3, 1426, 1920))
# image_data_src = np.zeros((1426, 1920, 3))
# image_data_src[:, :, 0] = image_data[0, :, :]
# image_data_src[:, :, 1] = image_data[1, :, :]
# image_data_src[:, :, 2] = image_data[2, :, :]
img = np.array(cv2.imread(image_path, flags=cv2.IMREAD_COLOR), dtype='float32')
img /= 255.
img = img[..., [2, 1, 0]]
resized_data = resize_image(img, model_image_size[0], model_image_size[1])
resized_data = resized_data - image_data4
print(resized_data.shape)
print(resized_data[resized_data > 0.001].shape)
print(resized_data[resized_data < -0.001].shape)
print("test done.")
def _resize_image_test_letterbox_(image_path, model_image_size):
img = cv2.imread(image_path, flags=cv2.IMREAD_COLOR)
img2 = letterbox_image(img, model_image_size)
cv2.imshow("fff", img2)
cv2.waitKey(1000000)
# print(resized_data.shape)
# print(resized_data[resized_data > 0.001].shape)
# print(resized_data[resized_data < -0.001].shape)
print("test done.")
| [
"numpy.fromfile",
"cv2.imshow",
"numpy.swapaxes",
"numpy.zeros",
"numba.jit",
"cv2.waitKey",
"numpy.full",
"cv2.resize",
"cv2.imread"
] | [((382, 406), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (391, 406), False, 'import numba\n'), ((587, 637), 'numpy.zeros', 'np.zeros', (['(h, w, image.shape[2])'], {'dtype': 'np.float32'}), '((h, w, image.shape[2]), dtype=np.float32)\n', (595, 637), True, 'import numpy as np\n'), ((649, 712), 'numpy.zeros', 'np.zeros', (['(image.shape[0], w, image.shape[2])'], {'dtype': 'np.float32'}), '((image.shape[0], w, image.shape[2]), dtype=np.float32)\n', (657, 712), True, 'import numpy as np\n'), ((2197, 2242), 'cv2.resize', 'cv2.resize', (['image', '(nw, nh)', 'cv2.INTER_LINEAR'], {}), '(image, (nw, nh), cv2.INTER_LINEAR)\n', (2207, 2242), False, 'import cv2\n'), ((2303, 2372), 'numpy.full', 'np.full', (['(new_size[1], new_size[0], 3)', 'padding_value'], {'dtype': 'np.uint8'}), '((new_size[1], new_size[0], 3), padding_value, dtype=np.uint8)\n', (2310, 2372), True, 'import numpy as np\n'), ((3042, 3072), 'numpy.swapaxes', 'np.swapaxes', (['image_data2', '(0)', '(1)'], {}), '(image_data2, 0, 1)\n', (3053, 3072), True, 'import numpy as np\n'), ((3091, 3121), 'numpy.swapaxes', 'np.swapaxes', (['image_data4', '(1)', '(2)'], {}), '(image_data4, 1, 2)\n', (3102, 3121), True, 'import numpy as np\n'), ((3906, 3952), 'cv2.imread', 'cv2.imread', (['image_path'], {'flags': 'cv2.IMREAD_COLOR'}), '(image_path, flags=cv2.IMREAD_COLOR)\n', (3916, 3952), False, 'import cv2\n'), ((4008, 4031), 'cv2.imshow', 'cv2.imshow', (['"""fff"""', 'img2'], {}), "('fff', img2)\n", (4018, 4031), False, 'import cv2\n'), ((4036, 4056), 'cv2.waitKey', 'cv2.waitKey', (['(1000000)'], {}), '(1000000)\n', (4047, 4056), False, 'import cv2\n'), ((2719, 2774), 'cv2.resize', 'cv2.resize', (['image', '(size[0], size[1])', 'cv2.INTER_LINEAR'], {}), '(image, (size[0], size[1]), cv2.INTER_LINEAR)\n', (2729, 2774), False, 'import cv2\n'), ((3434, 3480), 'cv2.imread', 'cv2.imread', (['image_path'], {'flags': 'cv2.IMREAD_COLOR'}), '(image_path, flags=cv2.IMREAD_COLOR)\n', (3444, 3480), False, 'import cv2\n'), ((2923, 2968), 'numpy.fromfile', 'np.fromfile', (['test_data_path'], {'dtype': 'np.float32'}), '(test_data_path, dtype=np.float32)\n', (2934, 2968), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon May 11 14:34:08 2020
@author: <NAME> (<EMAIL>),
Finnish Meteorological Institute)
Olli's python implementation of ESA SNAP s2toolbox biophysical processor and
computation of vegetation indices.
See ATBD at https://step.esa.int/docs/extra/ATBD_S2ToolBox_L2B_V1.1.pdf
And java source code at
https://github.com/senbox-org/s2tbx/tree/master/s2tbx-biophysical/src/main/java/org/esa/s2tbx/biophysical
Caveats
Currently changes out of bounds inputs and outputs to nan (or min or max value
if output wihtin tolerance). Maybe output flagging information as well ( i.e.
diffferent flags input and output out of bounds).
Convex hull input checking currently disabled. It's computationally slow and
not sure of its benefits. Better to filter out bad data based on L2A quality
info/classification\
and hope averaging removes some bad pixels.
"""
import os
import numpy as np
import xarray as xr
# Read SNAP Biophysical processor neural network parameters
SNAP_BIO_BANDS = ["B3", "B4", "B5", "B6", "B7", "B8A", "B11", "B12"]
SNAP_BIO_RMSE = {
"fapar": 0.05,
"fcover": 0.04,
"lai": 0.89,
"lai_cab": 56,
"lai_cw": 0.03,
}
# path_to_s2tbx_biophysical
snap_bio_path = os.path.join(os.path.dirname(__file__), "snap-auxdata/biophysical/2_1/")
nn_params = {}
for var in ["FAPAR", "FCOVER", "LAI", "LAI_Cab", "LAI_Cw"]:
norm_minmax = np.loadtxt(
snap_bio_path + "%s/%s_Normalisation" % (var, var), delimiter=","
)
denorm_minmax = np.loadtxt(
snap_bio_path + "%s/%s_Denormalisation" % (var, var), delimiter=","
)
layer1_weights = np.loadtxt(
snap_bio_path + "%s/%s_Weights_Layer1_Neurons" % (var, var), delimiter=","
)
layer1_bias = np.loadtxt(
snap_bio_path + "%s/%s_Weights_Layer1_Bias" % (var, var), delimiter=","
).reshape(-1, 1)
layer2_weights = np.loadtxt(
snap_bio_path + "%s/%s_Weights_Layer2_Neurons" % (var, var), delimiter=","
).reshape(1, -1)
layer2_bias = np.loadtxt(
snap_bio_path + "%s/%s_Weights_Layer2_Bias" % (var, var), delimiter=","
).reshape(1, -1)
extreme_cases = np.loadtxt(
snap_bio_path + "%s/%s_ExtremeCases" % (var, var), delimiter=","
)
if var == "FCOVER":
nn_params[var] = {
"norm_minmax": norm_minmax,
"denorm_minmax": denorm_minmax,
"layer1_weights": layer1_weights,
"layer1_bias": layer1_bias,
"layer2_weights": layer2_weights,
"layer2_bias": layer2_bias,
"extreme_cases": extreme_cases,
}
else:
defdom_min = np.loadtxt(
snap_bio_path + "%s/%s_DefinitionDomain_MinMax" % (var, var), delimiter=","
)[0, :].reshape(-1, 1)
defdom_max = np.loadtxt(
snap_bio_path + "%s/%s_DefinitionDomain_MinMax" % (var, var), delimiter=","
)[1, :].reshape(-1, 1)
defdom_grid = np.loadtxt(
snap_bio_path + "%s/%s_DefinitionDomain_Grid" % (var, var), delimiter=","
)
nn_params[var] = {
"norm_minmax": norm_minmax,
"denorm_minmax": denorm_minmax,
"layer1_weights": layer1_weights,
"layer1_bias": layer1_bias,
"layer2_weights": layer2_weights,
"layer2_bias": layer2_bias,
"defdom_min": defdom_min,
"defdom_max": defdom_max,
"defdom_grid": defdom_grid,
"extreme_cases": extreme_cases,
}
def _normalization(x, x_min, x_max):
x_norm = 2 * (x - x_min) / (x_max - x_min) - 1
return x_norm
def _denormalization(y_norm, y_min, y_max):
y = 0.5 * (y_norm + 1) * (y_max - y_min)
return y
# LAI tests
# x = np.array([0.057979, 0.0078856, 0.093585, 0.2585, 0.28253, 0.30874, 0.1708, 0.069808, 0.98434, 0.40581, -0.55142]).reshape(-1,1)
# x1 = np.array([0.056024,0.012462 ,0.088543, 0.41626 ,0.49575, 0.51452 ,0.14425, 0.043583, 0.99367, 0.90957 ,-0.99999]).reshape(-1,1)
# =============================================================================
# def multidim_intersect(arr1, arr2):
# arr1_view = arr1.view([('',arr1.dtype)]*arr1.shape[1])
# arr2_view = arr2.view([('',arr2.dtype)]*arr2.shape[1])
# isin = np.isin(arr1_view, arr2_view)
# return isin
# =============================================================================
def _input_ouf_of_range(x, variable):
x_copy = x.copy()
x_bands = x_copy[:8, :]
# check min max domain
defdom_min = nn_params[variable]["defdom_min"][:, 0].reshape(-1, 1)
defdom_max = nn_params[variable]["defdom_max"][:, 0].reshape(-1, 1)
bad_input_mask = (x_bands < defdom_min) | (x_bands > defdom_max)
bad_vector = np.any(bad_input_mask, axis=0)
x_bands[:, bad_vector] = np.nan
# convex hull check, currently disabled due to time consumption vs benefit
# gridProject = lambda v: np.floor(10 * (v - defdom_min) / (defdom_max - defdom_min) + 1 ).astype(int)
# x_bands = gridProject(x_bands)
# isInGrid = lambda v: any((v == x).all() for x in nn_params[variable]['defdom_grid'])
# notInGrid = ~np.array([isInGrid(v) for v in x_bands.T])
# x[:,notInGrid | bad_vector] = np.nan
x_copy[:, bad_vector] = np.nan
return x_copy
def _output_ouf_of_range(output, variable):
new_output = np.copy(output)
tolerance = nn_params[variable]["extreme_cases"][0]
output_min = nn_params[variable]["extreme_cases"][1]
output_max = nn_params[variable]["extreme_cases"][2]
new_output[output < (output_min + tolerance)] = np.nan
new_output[(output > (output_min + tolerance)) & (output < output_min)] = output_min
new_output[(output < (output_max - tolerance)) & (output > output_max)] = output_max
new_output[output > (output_max - tolerance)] = np.nan
return new_output
def _compute_variable(x, variable):
x_norm = np.zeros_like(x)
x = _input_ouf_of_range(x, variable)
x_norm = _normalization(
x,
nn_params[variable]["norm_minmax"][:, 0].reshape(-1, 1),
nn_params[variable]["norm_minmax"][:, 1].reshape(-1, 1),
)
out_layer1 = np.tanh(
nn_params[variable]["layer1_weights"].dot(x_norm)
+ nn_params[variable]["layer1_bias"]
)
out_layer2 = (
nn_params[variable]["layer2_weights"].dot(out_layer1)
+ nn_params[variable]["layer2_bias"]
)
output = _denormalization(
out_layer2,
nn_params[variable]["denorm_minmax"][0],
nn_params[variable]["denorm_minmax"][1],
)[0]
output = _output_ouf_of_range(output, variable)
output = output.reshape(1, np.shape(x)[1])
return output
def _s2_lists_to_pixel_vectors(single_date_dict):
band_list = ["B3", "B4", "B5", "B6", "B7", "B8A", "B11", "B12"]
pixel_vector = np.zeros(shape=(11, len(single_date_dict[band_list[0]])))
for i, b in enumerate(band_list):
pixel_vector[i, :] = np.array(single_date_dict[b]) / 10000.0
pixel_vector[8, :] = np.cos(np.radians(single_date_dict["view_zenith"]))
pixel_vector[9, :] = np.cos(np.radians(single_date_dict["sun_zenith"]))
pixel_vector[10, :] = np.cos(
np.radians(single_date_dict["sun_azimuth"] - single_date_dict["view_azimuth"])
)
return pixel_vector
def compute_ndvi(dataset):
"""Compute NDVI
Parameters
----------
dataset : xarray dataset
Returns
-------
xarray dataset
Adds 'ndvi' xr array to xr dataset.
"""
b4 = dataset.band_data.sel(band="B4")
b8 = dataset.band_data.sel(band="B8A")
ndvi = (b8 - b4) / (b8 + b4)
return dataset.assign({"ndvi": ndvi})
def compute_ci_red_edge(dataset):
"""Compute CI_Red_Edge vegetation index.
Parameters
----------
dataset : xarray dataset
Returns
-------
xarray dataset
Adds 'ci_red_edge' xr array to xr dataset.
"""
b5 = dataset.band_data.sel(band="B5")
b7 = dataset.band_data.sel(band="B7")
ci_red_edge = (b7 / b5) - 1
return dataset.assign({"ci_red_edge": ci_red_edge})
def compute_gcc(dataset):
"""Compute GCC vegetation index.
Parameters
----------
dataset : xarray dataset
Returns
-------
xarray dataset
Adds 'gcc' xr array to xr dataset.
"""
b2 = dataset.band_data.sel(band="B2")
b3 = dataset.band_data.sel(band="B3")
b4 = dataset.band_data.sel(band="B4")
gcc = b3 / (b2 + b3 + b4)
return dataset.assign({"gcc": gcc})
def run_snap_biophys(dataset, variable):
"""Compute specified variable using the SNAP algorithm.
See ATBD at https://step.esa.int/docs/extra/ATBD_S2ToolBox_L2B_V1.1.pdf
Parameters
----------
dataset : xr dataset
xarray dataset.
variable : str
Options 'FAPAR', 'FCOVER', 'LAI', 'LAI_Cab' or 'LAI_Cw'
Returns
-------
xarray dataset
Adds the specified variable array to dataset (variable name in
lowercase).
"""
band_data = dataset.band_data.sel(band=SNAP_BIO_BANDS)
# generate view angle bands/layers
vz = (
np.ones_like(band_data[:, 0, :, :]).T
* np.cos(np.radians(dataset.view_zenith)).values
)
vz = vz[..., np.newaxis]
vzarr = xr.DataArray(
vz,
coords=[dataset.x, dataset.y, dataset.time, ["view_zenith"]],
dims=["x", "y", "time", "band"],
)
sz = (
np.ones_like(band_data[:, 0, :, :]).T
* np.cos(np.radians(dataset.sun_zenith)).values
)
sz = sz[..., np.newaxis]
szarr = xr.DataArray(
sz,
coords=[dataset.x, dataset.y, dataset.time, ["sun_zenith"]],
dims=["x", "y", "time", "band"],
)
raz = (
np.ones_like(band_data[:, 0, :, :]).T
* np.cos(np.radians(dataset.sun_azimuth - dataset.view_azimuth)).values
)
raz = raz[..., np.newaxis]
razarr = xr.DataArray(
raz,
coords=[dataset.x, dataset.y, dataset.time, ["relative_azimuth"]],
dims=["x", "y", "time", "band"],
)
newarr = xr.concat([band_data, vzarr, szarr, razarr], dim="band")
newarr = newarr.stack(yx=("y", "x"))
arr = xr.apply_ufunc(
_compute_variable,
newarr,
input_core_dims=[["band", "yx"]],
output_core_dims=[["yx"]],
kwargs={"variable": variable},
vectorize=True,
).unstack()
return dataset.assign({variable.lower(): arr})
def estimate_gpp_vi_lue(vi, daily_par, model_name):
"""Estimate GPP using simple vegetation index based models and PAR.
This function has not been properly tested (i.e.used for a while)
Parameters
----------
vi : float
Vegetation index values.
daily_par : float
Daily PAR as MJ/s/m².
model_name : str, optional
Name of the model (see biophys_xarray.GPP_LUE_models).
Returns
-------
gpp : float
Estimated gross primary productivity.
"""
vi_name = "_".join(model_name.split("_")[:-1])
gpp = GPP_LUE_MODELS[vi_name][model_name]["model"](vi, daily_par)
return gpp
# GPP estimation models
GPP_LUE_MODELS = {
"ci_red_edge": {
"ci_red_edge_1": {
"model": lambda vi, par: 4.80 * np.log(vi * par * 1e3) - 37.93,
"species": "soybean",
"reference": "Peng & Gitelson, 2012",
},
"ci_red_edge_2": {
"model": lambda vi, par: 0.31 * (vi * par) - 0.1,
"species": "grass",
"reference": "Huang et al. 2019",
},
},
"ci_green": {
"ci_green_1": {
"model": lambda vi, par: 5.13 * np.log(vi * par * 1e3) - 46.92,
"species": "soybean",
"reference": "Peng & Gitelson, 2012",
},
"ci_green_2": {
"model": lambda vi, par: 14.7 * np.log(vi * par * 1e3 + 27900.61) - 154,
"species": "maize",
"reference": "Peng & Gitelson, 2012",
},
},
"NDVI": {
"NDVI_1": {
"model": lambda vi, par: 2.07 * (vi * par) - 6.19,
"species": "soybean",
"reference": "Gitelson et al., 2012",
},
"NDVI_2": {
"model": lambda vi, par: 3.11 * (vi * par) - 9.22,
"species": "maize",
"reference": "Gitelson et al., 2012",
},
"NDVI_3": {
"model": lambda vi, par: (
-3.26 * 1e-8 * (vi * par * 1e3) ** 2
+ 1.7 * 1e-3 * (vi * par * 1e3)
- 2.17
),
"species": "soybean",
"reference": "Peng & Gitelson, 2012",
},
"NDVI_4": {
"model": lambda vi, par: 1.94e-3 * (vi * par * 1e3) - 2.59,
"species": "maize",
"reference": "Peng & Gitelson, 2012",
},
},
"gndvi": {
"gndvi_1": {
"model": lambda vi, par: 2.86 * (vi * par) - 11.9,
"species": "soybean",
"reference": "Gitelson et al., 2012",
},
"gndvi_2": {
"model": lambda vi, par: 4 * (vi * par) - 15.4,
"species": "maize",
"reference": "Gitelson et al., 2012",
},
},
"evi": {
"evi_1": {
"model": lambda vi, par: (2.26 * (vi * par) - 3.73),
"species": "soybean",
"reference": "Peng et al., 2013",
},
"evi_2": {
"model": lambda vi, par: (3.49 * (vi * par) - 4.92),
"species": "maize",
"reference": "Peng et al., 2013",
},
},
"reNDVI": {
"reNDVI_1": {
"model": lambda vi, par: 1.61 * (vi * par) - 1.75,
"species": "mixed",
"reference": "Wolanin et al., 2019",
},
"reNDVI_2": {
"model": lambda vi, par: (
-1.19 * 1e-7 * (vi * par * 1e3) ** 2
+ 3 * 1e-3 * (vi * par * 1e3)
- 2.70
),
"species": "soybean",
"reference": "Peng & Gitelson, 2012",
},
"reNDVI_3": {
"model": lambda vi, par: (
-3.41 * 1e-8 * (vi * par * 1e3) ** 2
+ 2.77 * 1e-3 * (vi * par * 1e3)
- 2.06
),
"species": "maize",
"reference": "Peng & Gitelson, 2012",
},
},
"fapar": {
"fapar_1": {
"model": lambda vi, par: 1.10 * (vi * par),
"species": "grass",
"reference": "Olli Qvidja test fapar*x*PAR",
}
},
}
| [
"numpy.radians",
"numpy.copy",
"numpy.shape",
"numpy.ones_like",
"numpy.log",
"numpy.any",
"xarray.concat",
"os.path.dirname",
"numpy.array",
"xarray.DataArray",
"numpy.loadtxt",
"xarray.apply_ufunc",
"numpy.zeros_like"
] | [((1240, 1265), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1255, 1265), False, 'import os\n'), ((1393, 1470), 'numpy.loadtxt', 'np.loadtxt', (["(snap_bio_path + '%s/%s_Normalisation' % (var, var))"], {'delimiter': '""","""'}), "(snap_bio_path + '%s/%s_Normalisation' % (var, var), delimiter=',')\n", (1403, 1470), True, 'import numpy as np\n'), ((1505, 1584), 'numpy.loadtxt', 'np.loadtxt', (["(snap_bio_path + '%s/%s_Denormalisation' % (var, var))"], {'delimiter': '""","""'}), "(snap_bio_path + '%s/%s_Denormalisation' % (var, var), delimiter=',')\n", (1515, 1584), True, 'import numpy as np\n'), ((1620, 1710), 'numpy.loadtxt', 'np.loadtxt', (["(snap_bio_path + '%s/%s_Weights_Layer1_Neurons' % (var, var))"], {'delimiter': '""","""'}), "(snap_bio_path + '%s/%s_Weights_Layer1_Neurons' % (var, var),\n delimiter=',')\n", (1630, 1710), True, 'import numpy as np\n'), ((2140, 2216), 'numpy.loadtxt', 'np.loadtxt', (["(snap_bio_path + '%s/%s_ExtremeCases' % (var, var))"], {'delimiter': '""","""'}), "(snap_bio_path + '%s/%s_ExtremeCases' % (var, var), delimiter=',')\n", (2150, 2216), True, 'import numpy as np\n'), ((4717, 4747), 'numpy.any', 'np.any', (['bad_input_mask'], {'axis': '(0)'}), '(bad_input_mask, axis=0)\n', (4723, 4747), True, 'import numpy as np\n'), ((5321, 5336), 'numpy.copy', 'np.copy', (['output'], {}), '(output)\n', (5328, 5336), True, 'import numpy as np\n'), ((5878, 5894), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (5891, 5894), True, 'import numpy as np\n'), ((9211, 9327), 'xarray.DataArray', 'xr.DataArray', (['vz'], {'coords': "[dataset.x, dataset.y, dataset.time, ['view_zenith']]", 'dims': "['x', 'y', 'time', 'band']"}), "(vz, coords=[dataset.x, dataset.y, dataset.time, ['view_zenith'\n ]], dims=['x', 'y', 'time', 'band'])\n", (9223, 9327), True, 'import xarray as xr\n'), ((9515, 9630), 'xarray.DataArray', 'xr.DataArray', (['sz'], {'coords': "[dataset.x, dataset.y, dataset.time, ['sun_zenith']]", 'dims': "['x', 'y', 'time', 'band']"}), "(sz, coords=[dataset.x, dataset.y, dataset.time, ['sun_zenith']\n ], dims=['x', 'y', 'time', 'band'])\n", (9527, 9630), True, 'import xarray as xr\n'), ((9846, 9968), 'xarray.DataArray', 'xr.DataArray', (['raz'], {'coords': "[dataset.x, dataset.y, dataset.time, ['relative_azimuth']]", 'dims': "['x', 'y', 'time', 'band']"}), "(raz, coords=[dataset.x, dataset.y, dataset.time, [\n 'relative_azimuth']], dims=['x', 'y', 'time', 'band'])\n", (9858, 9968), True, 'import xarray as xr\n'), ((10009, 10065), 'xarray.concat', 'xr.concat', (['[band_data, vzarr, szarr, razarr]'], {'dim': '"""band"""'}), "([band_data, vzarr, szarr, razarr], dim='band')\n", (10018, 10065), True, 'import xarray as xr\n'), ((2929, 3018), 'numpy.loadtxt', 'np.loadtxt', (["(snap_bio_path + '%s/%s_DefinitionDomain_Grid' % (var, var))"], {'delimiter': '""","""'}), "(snap_bio_path + '%s/%s_DefinitionDomain_Grid' % (var, var),\n delimiter=',')\n", (2939, 3018), True, 'import numpy as np\n'), ((6993, 7036), 'numpy.radians', 'np.radians', (["single_date_dict['view_zenith']"], {}), "(single_date_dict['view_zenith'])\n", (7003, 7036), True, 'import numpy as np\n'), ((7070, 7112), 'numpy.radians', 'np.radians', (["single_date_dict['sun_zenith']"], {}), "(single_date_dict['sun_zenith'])\n", (7080, 7112), True, 'import numpy as np\n'), ((7156, 7234), 'numpy.radians', 'np.radians', (["(single_date_dict['sun_azimuth'] - single_date_dict['view_azimuth'])"], {}), "(single_date_dict['sun_azimuth'] - single_date_dict['view_azimuth'])\n", (7166, 7234), True, 'import numpy as np\n'), ((1739, 1826), 'numpy.loadtxt', 'np.loadtxt', (["(snap_bio_path + '%s/%s_Weights_Layer1_Bias' % (var, var))"], {'delimiter': '""","""'}), "(snap_bio_path + '%s/%s_Weights_Layer1_Bias' % (var, var),\n delimiter=',')\n", (1749, 1826), True, 'import numpy as np\n'), ((1873, 1963), 'numpy.loadtxt', 'np.loadtxt', (["(snap_bio_path + '%s/%s_Weights_Layer2_Neurons' % (var, var))"], {'delimiter': '""","""'}), "(snap_bio_path + '%s/%s_Weights_Layer2_Neurons' % (var, var),\n delimiter=',')\n", (1883, 1963), True, 'import numpy as np\n'), ((2007, 2094), 'numpy.loadtxt', 'np.loadtxt', (["(snap_bio_path + '%s/%s_Weights_Layer2_Bias' % (var, var))"], {'delimiter': '""","""'}), "(snap_bio_path + '%s/%s_Weights_Layer2_Bias' % (var, var),\n delimiter=',')\n", (2017, 2094), True, 'import numpy as np\n'), ((6621, 6632), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (6629, 6632), True, 'import numpy as np\n'), ((6920, 6949), 'numpy.array', 'np.array', (['single_date_dict[b]'], {}), '(single_date_dict[b])\n', (6928, 6949), True, 'import numpy as np\n'), ((9069, 9104), 'numpy.ones_like', 'np.ones_like', (['band_data[:, 0, :, :]'], {}), '(band_data[:, 0, :, :])\n', (9081, 9104), True, 'import numpy as np\n'), ((9374, 9409), 'numpy.ones_like', 'np.ones_like', (['band_data[:, 0, :, :]'], {}), '(band_data[:, 0, :, :])\n', (9386, 9409), True, 'import numpy as np\n'), ((9678, 9713), 'numpy.ones_like', 'np.ones_like', (['band_data[:, 0, :, :]'], {}), '(band_data[:, 0, :, :])\n', (9690, 9713), True, 'import numpy as np\n'), ((10117, 10270), 'xarray.apply_ufunc', 'xr.apply_ufunc', (['_compute_variable', 'newarr'], {'input_core_dims': "[['band', 'yx']]", 'output_core_dims': "[['yx']]", 'kwargs': "{'variable': variable}", 'vectorize': '(True)'}), "(_compute_variable, newarr, input_core_dims=[['band', 'yx']],\n output_core_dims=[['yx']], kwargs={'variable': variable}, vectorize=True)\n", (10131, 10270), True, 'import xarray as xr\n'), ((9124, 9155), 'numpy.radians', 'np.radians', (['dataset.view_zenith'], {}), '(dataset.view_zenith)\n', (9134, 9155), True, 'import numpy as np\n'), ((9429, 9459), 'numpy.radians', 'np.radians', (['dataset.sun_zenith'], {}), '(dataset.sun_zenith)\n', (9439, 9459), True, 'import numpy as np\n'), ((9733, 9787), 'numpy.radians', 'np.radians', (['(dataset.sun_azimuth - dataset.view_azimuth)'], {}), '(dataset.sun_azimuth - dataset.view_azimuth)\n', (9743, 9787), True, 'import numpy as np\n'), ((2624, 2715), 'numpy.loadtxt', 'np.loadtxt', (["(snap_bio_path + '%s/%s_DefinitionDomain_MinMax' % (var, var))"], {'delimiter': '""","""'}), "(snap_bio_path + '%s/%s_DefinitionDomain_MinMax' % (var, var),\n delimiter=',')\n", (2634, 2715), True, 'import numpy as np\n'), ((2776, 2867), 'numpy.loadtxt', 'np.loadtxt', (["(snap_bio_path + '%s/%s_DefinitionDomain_MinMax' % (var, var))"], {'delimiter': '""","""'}), "(snap_bio_path + '%s/%s_DefinitionDomain_MinMax' % (var, var),\n delimiter=',')\n", (2786, 2867), True, 'import numpy as np\n'), ((11174, 11199), 'numpy.log', 'np.log', (['(vi * par * 1000.0)'], {}), '(vi * par * 1000.0)\n', (11180, 11199), True, 'import numpy as np\n'), ((11572, 11597), 'numpy.log', 'np.log', (['(vi * par * 1000.0)'], {}), '(vi * par * 1000.0)\n', (11578, 11597), True, 'import numpy as np\n'), ((11767, 11803), 'numpy.log', 'np.log', (['(vi * par * 1000.0 + 27900.61)'], {}), '(vi * par * 1000.0 + 27900.61)\n', (11773, 11803), True, 'import numpy as np\n')] |
import extract as extract
import numpy as np
from nltk.tokenize import word_tokenize
from sklearn import svm
import matplotlib.pyplot as plt
from sklearn.metrics import f1_score
import seaborn as sns
from sklearn import metrics
def build_train(filename):
extracted = extract.extract(filename)
train_x = []
train_y = []
for example in extracted:
similarity = cosine_similarity(example["reference"],example["candidate"])
train_x.append([float(example["bleu"]),float(similarity)])
train_y.append(0 if example["label"] == "H" else 1)
return np.array(train_x), np.array(train_y)
def build_test(filename):
extracted = extract.extract(filename)
test_x = []
test_y = []
for example in extracted:
similarity = cosine_similarity(example["reference"],example["candidate"])
test_x.append([float(example["bleu"]),float(similarity)])
test_y.append(0 if example["label"] == "H" else 1)
return np.array(test_x), np.array(test_y)
def cosine_similarity(x,y):
# tokenization
X_list = word_tokenize(x)
Y_list = word_tokenize(y)
l1 =[];l2 =[]
X_set = set(X_list)
Y_set = set(Y_list)
# form a set containing keywords of both strings
rvector = X_set.union(Y_set)
for w in rvector:
if w in X_set: l1.append(1) # create a vector
else: l1.append(0)
if w in Y_set: l2.append(1)
else: l2.append(0)
c = 0
# cosine formula
for i in range(len(rvector)):
c+= l1[i]*l2[i]
cosine = c / float((sum(l1)*sum(l2))**0.5)
return cosine
'''
Support Vector Machine:
- Input: (1) Bleu Score, (2) Cosine Similarity
'''
def svm_train(clf):
train_data_x, train_data_y = build_train("train.txt")
clf.fit(train_data_x, train_data_y)
return clf
def predict(clf):
test_data_x, test_data_y = build_test("test.txt")
predicted = []
for sample in test_data_x:
proc_sample = np.array([list(sample)])
print(proc_sample)
pred = clf.predict(proc_sample)[0]
predicted.append(pred)
return predicted, test_data_y
def predict_train_data(clf):
train_data_x, train_data_y = build_train("train.txt")
predicted = []
for sample in train_data_x:
proc_sample = np.array([list(sample)])
print(proc_sample)
pred = clf.predict(proc_sample)[0]
predicted.append(pred)
return predicted, train_data_y
def accuracy(ground_truth, prediction):
total = len(ground_truth)
correct = 0
for i in range(total):
if ground_truth[i] == prediction[i]:
correct += 1
return float(correct)/total
def f1score(ground_truth, prediction, average='macro'):
return f1_score(ground_truth, prediction)
def classify():
clf = svm.SVC()
clf = svm_train(clf)
pred, ground_truth = predict(clf)
pred_train, ground_truth_train = predict_train_data(clf)
print(pred)
acc = accuracy(ground_truth,pred)
acc_train = accuracy(ground_truth_train,pred_train)
print("The Test % Accuracy is: " + str(float(acc*100)) + "%")
print("The Test F1 Score computed using Sklearn is: " + str(f1score(ground_truth, pred)))
print("The Train % Accuracy is: " + str(float(acc_train*100)) + "%")
print("The Train F1 Score computed using Sklearn is: " + str(f1score(ground_truth_train, pred_train)))
cm = metrics.confusion_matrix(ground_truth, pred)
cm_train = metrics.confusion_matrix(ground_truth_train,pred_train)
print(cm)
print(cm_train)
plot_cm(acc, cm)
plot_cm(acc_train, cm_train)
def plot_cm(acc, cm):
plt.figure(figsize=(9,9))
sns.heatmap(cm, annot=True, fmt=".3f", linewidths=.5, square = True, cmap = 'Blues_r')
plt.ylabel('Actual label')
plt.xlabel('Predicted label')
all_sample_title = 'Accuracy Score: {0}'.format(acc)
plt.title(all_sample_title, size = 15)
plt.show()
classify()
| [
"sklearn.metrics.f1_score",
"sklearn.metrics.confusion_matrix",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"seaborn.heatmap",
"nltk.tokenize.word_tokenize",
"numpy.array",
"extract.extract",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"sklearn.... | [((269, 294), 'extract.extract', 'extract.extract', (['filename'], {}), '(filename)\n', (284, 294), True, 'import extract as extract\n'), ((627, 652), 'extract.extract', 'extract.extract', (['filename'], {}), '(filename)\n', (642, 652), True, 'import extract as extract\n'), ((995, 1011), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['x'], {}), '(x)\n', (1008, 1011), False, 'from nltk.tokenize import word_tokenize\n'), ((1024, 1040), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['y'], {}), '(y)\n', (1037, 1040), False, 'from nltk.tokenize import word_tokenize\n'), ((2504, 2538), 'sklearn.metrics.f1_score', 'f1_score', (['ground_truth', 'prediction'], {}), '(ground_truth, prediction)\n', (2512, 2538), False, 'from sklearn.metrics import f1_score\n'), ((2563, 2572), 'sklearn.svm.SVC', 'svm.SVC', ([], {}), '()\n', (2570, 2572), False, 'from sklearn import svm\n'), ((3124, 3168), 'sklearn.metrics.confusion_matrix', 'metrics.confusion_matrix', (['ground_truth', 'pred'], {}), '(ground_truth, pred)\n', (3148, 3168), False, 'from sklearn import metrics\n'), ((3181, 3237), 'sklearn.metrics.confusion_matrix', 'metrics.confusion_matrix', (['ground_truth_train', 'pred_train'], {}), '(ground_truth_train, pred_train)\n', (3205, 3237), False, 'from sklearn import metrics\n'), ((3338, 3364), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 9)'}), '(figsize=(9, 9))\n', (3348, 3364), True, 'import matplotlib.pyplot as plt\n'), ((3365, 3453), 'seaborn.heatmap', 'sns.heatmap', (['cm'], {'annot': '(True)', 'fmt': '""".3f"""', 'linewidths': '(0.5)', 'square': '(True)', 'cmap': '"""Blues_r"""'}), "(cm, annot=True, fmt='.3f', linewidths=0.5, square=True, cmap=\n 'Blues_r')\n", (3376, 3453), True, 'import seaborn as sns\n'), ((3453, 3479), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Actual label"""'], {}), "('Actual label')\n", (3463, 3479), True, 'import matplotlib.pyplot as plt\n'), ((3481, 3510), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted label"""'], {}), "('Predicted label')\n", (3491, 3510), True, 'import matplotlib.pyplot as plt\n'), ((3566, 3602), 'matplotlib.pyplot.title', 'plt.title', (['all_sample_title'], {'size': '(15)'}), '(all_sample_title, size=15)\n', (3575, 3602), True, 'import matplotlib.pyplot as plt\n'), ((3606, 3616), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3614, 3616), True, 'import matplotlib.pyplot as plt\n'), ((550, 567), 'numpy.array', 'np.array', (['train_x'], {}), '(train_x)\n', (558, 567), True, 'import numpy as np\n'), ((569, 586), 'numpy.array', 'np.array', (['train_y'], {}), '(train_y)\n', (577, 586), True, 'import numpy as np\n'), ((903, 919), 'numpy.array', 'np.array', (['test_x'], {}), '(test_x)\n', (911, 919), True, 'import numpy as np\n'), ((921, 937), 'numpy.array', 'np.array', (['test_y'], {}), '(test_y)\n', (929, 937), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
#!/usr/bin/env python3
"""
Created on Tue Jan 8 20:03:12 2019
@author: yansl
Target:
1. due to discreteness of image, the boundaries of two circles need to repair.
2. The core resolution is that the median filtering technique will be executed.
"""
import cv2
import numpy as np
#%%
"""
secondary functions
"""
def image_inpaint(mask, image, radius):
temp = image.copy()
#use mask to get all ill-condition points
#use Fast Marching Method to get rid of ill-condition point, which might be caused by computational precision and computer graphics.
output = cv2.inpaint(temp, mask, radius, cv2.INPAINT_TELEA)
return output
#%%
"""
primary functions
"""
def border_repair(cut_mask, image, ellipse, inpaintradius):
#make the mask template
#1. input two circles
'''
the maximum inscribed circle
'''
'''
mask processing part
'''
#*.grab ellipse's parameters
#important parameters (in opencv): axis(major axis(2a), minor axis(2b)) & center(x, y)
#note: semi-axis(semi-major axis(a), semi-minor axis(b))
(x, y) = ellipse[0]
(a2, b2) = ellipse[1]
#find the semi-minor axis in mathematics (float)
b = min((a2, b2)) / 2
#create temporary template
#sp[0] height(rows) y
#sp[1] width(colums) x
sp = cut_mask.shape[0: 2]
min_mask = np.zeros(sp, dtype=np.uint8)
#draw a maximum inscribed circle first
#few pixels (such as one pixel) have been shrunk in radius for solving bouding problem
radius = round(b + 1)
center = (round(x), round(y))
#*.border expansion
cv2.circle(min_mask, center, (radius - 3), color=255, thickness=7)
'''
the minimum circumscribed circle
'''
'''
mask processing part
'''
a = max((a2, b2)) / 2
radius = round(a + 8)
max_mask = min_mask.copy()
max_mask[max_mask >= 0] = 0
#*.border expansion
cv2.circle(max_mask, center, (radius - 4), color=255, thickness=9)
#2.inpaint
'''
image processing part
'''
'''
the minimum circumscribed circle
'''
repaired_max = image_inpaint(max_mask, image, inpaintradius)
'''
the maximum inscribed circle
'''
repaired_min = image_inpaint(min_mask, repaired_max, inpaintradius)
'''
The whole image combination
'''
repaired_image = repaired_min
return repaired_image | [
"cv2.circle",
"numpy.zeros",
"cv2.inpaint"
] | [((630, 680), 'cv2.inpaint', 'cv2.inpaint', (['temp', 'mask', 'radius', 'cv2.INPAINT_TELEA'], {}), '(temp, mask, radius, cv2.INPAINT_TELEA)\n', (641, 680), False, 'import cv2\n'), ((1409, 1437), 'numpy.zeros', 'np.zeros', (['sp'], {'dtype': 'np.uint8'}), '(sp, dtype=np.uint8)\n', (1417, 1437), True, 'import numpy as np\n'), ((1670, 1734), 'cv2.circle', 'cv2.circle', (['min_mask', 'center', '(radius - 3)'], {'color': '(255)', 'thickness': '(7)'}), '(min_mask, center, radius - 3, color=255, thickness=7)\n', (1680, 1734), False, 'import cv2\n'), ((1995, 2059), 'cv2.circle', 'cv2.circle', (['max_mask', 'center', '(radius - 4)'], {'color': '(255)', 'thickness': '(9)'}), '(max_mask, center, radius - 4, color=255, thickness=9)\n', (2005, 2059), False, 'import cv2\n')] |
import pandas as pd
import os
from tqdm import tqdm
from model.bert_things.pytorch_pretrained_bert.tokenization import BertTokenizer
from model.bert_things.pytorch_pretrained_bert import BertConfig, BertModel, BertPreTrainedModel
from model.bert_text_model import BertTextModel
from data_loader.utils.vocab import Vocab
import pickle
import sys
import logging
import numpy as np
import argparse
import torch
# ignore warnings from tokenization (sequence length is very long)
logging = logging.getLogger("model.bert_things.pytorch_pretrained_bert.tokenization").setLevel(logging.CRITICAL)
def embed_text(text_codes, device, bert, bert_seq_length=512, max_seq_len_text=30):
"""
Embed text using the pretrained bert model, this is done to speed up training later (we keep the bert model fixed, you can explore
fine tuning bert model also for downstream tasks..)
Args:
text_codes: tokenized text
device: device to run model on
bert: the bert model
bert_seq_length: maximum bert sequence length
max_seq_len_text: maximum occuring text sequence length in the corpus in terms of bert_seq_length
Output:
pooled_output: embedding
"""
x_text = torch.zeros((bert_seq_length, max_seq_len_text), dtype=torch.long)
x_mask = torch.zeros((bert_seq_length, max_seq_len_text,))
n = len(text_codes) // bert_seq_length - 1
for i in range(len(text_codes) // bert_seq_length):
x_text[:, i] = torch.Tensor(text_codes[i * bert_seq_length: (1 + i) * bert_seq_length])
x_mask[:, i] = 1
if (n * bert_seq_length <= len(text_codes)):
x_mask[len(text_codes) - bert_seq_length * (n + 1), n] = 1
x_text[:len(text_codes) - bert_seq_length * (n + 1), n] = torch.Tensor(text_codes[(n + 1) * bert_seq_length:])
x_text = x_text.to(device)
x_mask = x_mask.to(device)
with torch.no_grad():
_, pooled_output = bert(x_text.t(), attention_mask=x_mask.t())
return pooled_output
def compute_max_seq_len_text(df, col, tokenizer):
"""
Compute the maximum occuring sequence length in the dataset
Args:
df: dataframe containing the complete dataset
col: column name containing the text
tokenizer: map used to convert tokents to ids (assumes tokenier has a convert_tokens_to_ids function)
Output:
max_seq_len_text: (int)
"""
max_seq_len_text = 0
for i, r in df.iterrows():
text = r[col]
ttok = tokenizer.tokenize(text)
ttok = tokenizer.convert_tokens_to_ids(ttok)
if (len(ttok) > max_seq_len_text):
max_seq_len_text = len(ttok)
return max_seq_len_text
def _prepare_device(n_gpu_use):
"""
setup GPU device if available, move model into configured device
if n_gpu_use = 0, use cpu
"""
n_gpu = torch.cuda.device_count()
if n_gpu_use > 0 and n_gpu == 0:
logging.warning("Warning: There\'s no GPU available on this machine, training will be performed on CPU.")
n_gpu_use = -1
if n_gpu_use > n_gpu:
logging.warning("Warning: The number of GPU\'s configured to use is {}, but only {} are available on this machine.".format(n_gpu_use, n_gpu))
n_gpu_use = n_gpu
device = torch.device('cuda:0' if n_gpu_use > 0 else 'cpu')
list_ids = list(range(n_gpu_use))
return device, list_ids
def main():
"""
Will generate a dictionary as follows:
<key> patientid : <value> lsit of dicts, where each dict contains admission data
[
{<key> feature/label name : <value> feature/label value}
]
"""
parser = argparse.ArgumentParser(description='Generate Text+Code dataset')
parser.add_argument('-p', '--path', default=None, type=str, help='path to pandas dataframe where rows are admissions')
parser.add_argument('-vp', '--vocab_path', default='', type=str, help='path to where code vocabulary are stored assumes diagnoses vocab file named as diag.vocab and cpt vocab as cpt.vocab')
parser.add_argument('-s', '--save', default='./', type=str, help='path to save pkl files')
parser.add_argument('-et', '--embed_text', default=False, action='store_true', help='flag wether to embed text or not')
parser.add_argument('-cpb', '--bert_config_path', default=None, type=str, help='path to bert config')
parser.add_argument('-vpb', '--bert_vocab_path', default=None, type=str, help='path to bert vocab ')
parser.add_argument('-sdp', '--state_dict_path', default=None, type=str, help='path to bert state dict')
parser.add_argument('-gpu', '--gpu', default=0, type=int)
parser.add_argument('-bsl', '--max_bert_seq_len', default=512, type=int, help='maximum sequence length of bert model')
parser.add_argument('-tsld', '--text_seq_length_discharge', default=0, type=int, help='pass this if maximum text sequence length is known for discharge text to avoid long processing time')
parser.add_argument('-tslr', '--text_seq_length_rest', default=0, type=int, help='pass this if maximum text sequence length is known for rest of text (other than discharge) to avoid longer processing time')
parser.add_argument('-sc', '--short_code', default=False, action='store_true', help='flag for using short codes ')
parser.add_argument('-diag', '--diagnoses', default=False, action='store_true', help='flag for including diagnoses codes')
parser.add_argument('-proc', '--procedures', default=False, action='store_true', help='flag for including procedures codes')
parser.add_argument('-med', '--medications', default=False, action='store_true', help='flag for including medication codes')
parser.add_argument('-cpt', '--cpts', default=False, action='store_true', help='flag for including cpt codes')
parser.add_argument('-ma', '--min_adm', default=0, type=int)
args = parser.parse_args()
df = pd.read_pickle(args.path)
df_orig = df
# remove organ donor admissions
if ('DIAGNOSIS' in df.columns):
REMOVE_DIAGNOSIS = ~((df['DIAGNOSIS'] == 'ORGAN DONOR ACCOUNT') | (df['DIAGNOSIS'] == 'ORGAN DONOR') | \
(df['DIAGNOSIS'] == 'DONOR ACCOUNT'))
df = df[REMOVE_DIAGNOSIS]
df = df[~df['ICD9_CODE'].isna()] # drop patients with no icd9 code?
df = df[~(df['TEXT_REST'].isna() | df['TEXT_REST'].isna())]
if ('TIMEDELTA' in df.columns):
df['TIMEDELTA'] = df['TIMEDELTA'].fillna(pd.to_timedelta("0"))
df['TIMEDELTA'] = pd.to_timedelta(df['TIMEDELTA'])
df['TIMEDELTA'] = df['TIMEDELTA'].apply(lambda x: x.seconds)
pids = list(set(df['SUBJECT_ID'].tolist()))
# lambda
demographic_cols = {'AGE': [], 'GENDER': [], 'LAST_CAREUNIT': [],
'MARITAL_STATUS': [], 'ETHNICITY': [],
'DISCHARGE_LOCATION': []}
df.loc[:, 'MARITAL_STATUS'], demographic_cols['MARITAL_STATUS'] = pd.factorize(df['MARITAL_STATUS'])
df.loc[:, 'ETHNICITY'], demographic_cols['ETHNICITY'] = pd.factorize(df['ETHNICITY'])
df.loc[:, 'DISCHARGE_LOCATION'], demographic_cols['DISCHARGE_LOCATION'] = pd.factorize(df['DISCHARGE_LOCATION'])
df.loc[:, 'LAST_CAREUNIT'], demographic_cols['LAST_CAREUNIT'] = pd.factorize(df['LAST_CAREUNIT'])
df.loc[:, 'GENDER'], demographic_cols['GENDER'] = pd.factorize(df['GENDER'])
df.loc[:, 'AGE'] = df['AGE'].astype(int)
los_bins = [1, 2, 3, 4, 5, 6, 7, 8, 14, float('inf')]
los_labels = [1, 2, 3, 4, 5, 6, 7, 8, 9]
df.loc[:, 'LOS'] = pd.cut(df['LOS'], bins=los_bins, labels=los_labels)
temp_data = []
data = {}
diag_vocab = Vocab()
cpt_vocab = Vocab()
med_vocab = Vocab()
proc_vocab = Vocab()
if (args.vocab_path != ''):
#to use below checkout https://github.com/sajaddarabi/HCUP-US-EHR
if (args.diagnoses):
diag_vocab._build_from_file(os.path.join(args.vocab_path, 'diag.vocab'))
if (args.cpts):
cpt_vocab._build_from_file(os.path.join(args.vocab_path, 'cpt.vocab'))
#if (args.procedures):
# proc_vocab._build_from_file(os.path.join(args.vocab_path, 'proc.vocab'))
#if (args.med):
#med_vocab._build_from_file(os.path.join(args.vocab_path, 'med.vocab'))
if (os.path.exists(os.path.join(args.save, 'data.pkl'))):
temp_data = pickle.load(open(os.path.join(args.save, 'data.pkl'), 'rb'))
temp_data = temp_data['data']
t = list(temp_data.keys())
t = t[0]
d = 'text_embedding' in temp_data[t][0]
if (not d):
temp_data = []
else:
model = None
bert_config = None
torch.cuda.empty_cache()
if args.embed_text:
tokenizer = BertTokenizer(args.bert_vocab_path)
if args.embed_text and (len(temp_data) == 0):
bert_config = BertConfig(args.bert_config_path)
model = BertTextModel(bert_config)
state_dict = torch.load(args.state_dict_path)
model.init_bert_weights(state_dict)
device, _ = _prepare_device(args.gpu)
model = model.to(device)
max_seq_len_text_d = args.text_seq_length_discharge
max_seq_len_text_r = args.text_seq_length_rest
if max_seq_len_text_d == 0:
max_seq_len_text = compute_max_seq_len_text(df, 'TEXT_DISCHARGE', tokenizer)
max_seq_len_text = max_seq_len_text // args.max_bert_seq_len + 1
max_seq_len_text_d = max_seq_len_text
print("text sequence discharge length: {}".format(max_seq_len_text_d))
if max_seq_len_text_r == 0:
max_seq_len_text = compute_max_seq_len_text(df, 'TEXT_REST', tokenizer)
max_seq_len_text = max_seq_len_text // args.max_bert_seq_len + 1
max_seq_len_text_r = max_seq_len_text
print("text sequence rest length: {}".format(max_seq_len_text_r))
try:
for pid in tqdm(pids):
pid_df = df[df['SUBJECT_ID'] == pid]
pid_df = pid_df.sort_values('ADMITTIME').reset_index()
if (len(pid_df) < 1): # must atleast have two data points
continue
data[pid] = []
t = 0
hadm_ids = set(df['HADM_ID'])
for i, r in pid_df.iterrows():
#filt notes prior to n days and concatenate them
# leave discharge summary seperate
admit_data = {}
demographics = [r['AGE'], r['GENDER'], r['MARITAL_STATUS']]
icu_unit = np.zeros((demographic_cols['LAST_CAREUNIT'].size, ), dtype=int)
icu_unit[r['LAST_CAREUNIT']] = 1
demographics += list(icu_unit)
ethnicity = np.zeros((demographic_cols['ETHNICITY'].size, ), dtype=int)
ethnicity[r['ETHNICITY']] = 1
demographics += list(ethnicity)
ethnicity = np.zeros((demographic_cols['ETHNICITY'].size, ), dtype=int)
ethnicity[r['ETHNICITY']] = 1
demographics += list(ethnicity)
admit_data['demographics'] = demographics
dtok, ptok, mtok, ctok = [], [], [], []
diagnosis_codes, proc_codes, med_codes, cpt_codes = np.nan, np.nan, np.nan, np.nan
if args.diagnoses:
diagnosis_codes = r['ICD9_CODE']
if (diagnosis_codes == diagnosis_codes):
dtok = diag_vocab.convert_to_ids(diagnosis_codes , 'D', args.short_code)
if (args.procedures):
proc_codes = r['ICD9_CODE_PROCEDURE']
if (proc_codes == proc_codes):
ptok = proc_vocab.convert_to_ids(proc_codes, 'P', args.short_code)
if args.medications:
med_codes = r['NDC'] # issue with NDC what mapping version is being used..?
if (med_codes == med_codes):
mtok = med_vocab.convert_to_ids(med_codes, 'M')
if args.cpts:
cpt_codes = r['CPT_CD']
if (cpt_codes == cpt_codes):
ctok = cpt_vocab.convert_to_ids(cpt_codes, 'C')
admit_data['diagnoses'] = dtok
admit_data['procedures'] = ptok
admit_data['medications'] = mtok
admit_data['cptproc'] = ctok
if (r['TIMEDELTA'] == r['TIMEDELTA']):
t += r['TIMEDELTA']
admit_data['timedelta'] = t
text_discharge = r['TEXT_DISCHARGE']
text_rest = r['TEXT_REST']
ttokd = tokenizer.tokenize(text_discharge)
ttokd = tokenizer.convert_tokens_to_ids(ttokd)
ttokr = tokenizer.tokenize(text_rest)
ttokr = tokenizer.convert_tokens_to_ids(ttokr)
admit_data['text_discharge_raw'] = text_discharge
admit_data['text_rest_raw'] = text_rest
admit_data['text_discharge_len'] = len(ttokd)
admit_data['text_rest_len'] = len(ttokr)
admit_data['text_discharge_token'] = ttokd
admit_data['text_rest_token'] = ttokr
if len(temp_data) == 0:
if (args.embed_text):
ttokd = embed_text(ttokd, device, model, args.max_bert_seq_len, max_seq_len_text_d)
ttokd = ttokd.cpu().numpy()
ttokr = embed_text(ttokr, device, model, args.max_bert_seq_len, max_seq_len_text_r)
ttokr = ttokr.cpu().numpy()
else:
ttok = temp_data[pid][i]['text_embedding']
admit_data['text_embedding_discharge'] = ttokd
admit_data['text_embedding_rest'] = ttokr
admit_data['los'] = r['LOS']
admit_data['readmission'] = r['readmission_label']
admit_data['mortality'] = r['DEATHTIME'] == r['DEATHTIME']
data[pid].append(admit_data)
except Exception as error:
print(error)
import pdb; pdb.set_trace()
if (not os.path.exists(args.save)):
os.makedirs(args.save)
# temporarly save data incase something goes wrong ...
try:
with open(os.path.join(args.save, 'data.pkl'), 'wb') as handle:
data_dict = {}
data_dict['data'] = data
pickle.dump(data_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
except:
import pdb; pdb.set_trace()
pids = list(data.keys())
flatten = lambda x: [item for sublist in x for item in sublist]
data_info = {}
num_icd9_codes, num_proc_codes, num_med_codes = 0, 0, 0
data_info['num_patients'] = len(pids)
data_info['max_seq_len_text_d'] = max_seq_len_text_d
data_info['max_seq_len_text_r'] = max_seq_len_text_r
data_info['num_icd9_codes'] = 0
data_info['num_proc_codes'] = 0
data_info['num_med_codes'] = 0
if (args.diagnoses):
num_icd9_codes = len(set(flatten(df_orig['ICD9_CODE'].dropna())))
data_info['num_icd9_codes'] = num_icd9_codes
if (args.procedures):
num_proc_codes = len(set(flatten(df_orig['ICD9_CODE_PROCEDURE'].dropna())))
data_info['num_proc_codes'] = num_proc_codes
if (args.medications):
num_med_codes = len(set(flatten(df_orig['NDC'].dropna())))
data_info['num_med_codes'] = num_med_codes
data_info['demographics_shape'] = len(data[pids[0]][0]['demographics'])
data_info['demographic_cols'] = demographic_cols
data_info['total_codes'] = data_info['num_icd9_codes'] + data_info['num_proc_codes'] + data_info['num_med_codes']
if (not os.path.exists(args.save)):
os.makedirs(args.save)
with open(os.path.join(args.save, 'data.pkl'), 'wb') as handle:
data_dict = {}
data_dict['info'] = data_info
data_dict['data'] = data
pickle.dump(data_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(args.save, 'cpt_vocab.pkl'), 'wb') as handle:
pickle.dump(cpt_vocab, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(args.save, 'diag_vocab.pkl'), 'wb') as handle:
pickle.dump(diag_vocab, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(args.save, 'med_vocab.pkl'), 'wb') as handle:
pickle.dump(med_vocab, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(args.save, 'proc_vocab.pkl'), 'wb') as handle:
pickle.dump(proc_vocab, handle, protocol=pickle.HIGHEST_PROTOCOL)
if __name__ == '__main__':
main()
| [
"logging.getLogger",
"pandas.to_timedelta",
"torch.cuda.device_count",
"pandas.read_pickle",
"os.path.exists",
"argparse.ArgumentParser",
"model.bert_text_model.BertTextModel",
"model.bert_things.pytorch_pretrained_bert.tokenization.BertTokenizer",
"model.bert_things.pytorch_pretrained_bert.BertConf... | [((1252, 1318), 'torch.zeros', 'torch.zeros', (['(bert_seq_length, max_seq_len_text)'], {'dtype': 'torch.long'}), '((bert_seq_length, max_seq_len_text), dtype=torch.long)\n', (1263, 1318), False, 'import torch\n'), ((1332, 1380), 'torch.zeros', 'torch.zeros', (['(bert_seq_length, max_seq_len_text)'], {}), '((bert_seq_length, max_seq_len_text))\n', (1343, 1380), False, 'import torch\n'), ((2912, 2937), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (2935, 2937), False, 'import torch\n'), ((3355, 3405), 'torch.device', 'torch.device', (["('cuda:0' if n_gpu_use > 0 else 'cpu')"], {}), "('cuda:0' if n_gpu_use > 0 else 'cpu')\n", (3367, 3405), False, 'import torch\n'), ((3819, 3884), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generate Text+Code dataset"""'}), "(description='Generate Text+Code dataset')\n", (3842, 3884), False, 'import argparse\n'), ((6056, 6081), 'pandas.read_pickle', 'pd.read_pickle', (['args.path'], {}), '(args.path)\n', (6070, 6081), True, 'import pandas as pd\n'), ((7070, 7104), 'pandas.factorize', 'pd.factorize', (["df['MARITAL_STATUS']"], {}), "(df['MARITAL_STATUS'])\n", (7082, 7104), True, 'import pandas as pd\n'), ((7165, 7194), 'pandas.factorize', 'pd.factorize', (["df['ETHNICITY']"], {}), "(df['ETHNICITY'])\n", (7177, 7194), True, 'import pandas as pd\n'), ((7273, 7311), 'pandas.factorize', 'pd.factorize', (["df['DISCHARGE_LOCATION']"], {}), "(df['DISCHARGE_LOCATION'])\n", (7285, 7311), True, 'import pandas as pd\n'), ((7380, 7413), 'pandas.factorize', 'pd.factorize', (["df['LAST_CAREUNIT']"], {}), "(df['LAST_CAREUNIT'])\n", (7392, 7413), True, 'import pandas as pd\n'), ((7468, 7494), 'pandas.factorize', 'pd.factorize', (["df['GENDER']"], {}), "(df['GENDER'])\n", (7480, 7494), True, 'import pandas as pd\n'), ((7666, 7717), 'pandas.cut', 'pd.cut', (["df['LOS']"], {'bins': 'los_bins', 'labels': 'los_labels'}), "(df['LOS'], bins=los_bins, labels=los_labels)\n", (7672, 7717), True, 'import pandas as pd\n'), ((7772, 7779), 'data_loader.utils.vocab.Vocab', 'Vocab', ([], {}), '()\n', (7777, 7779), False, 'from data_loader.utils.vocab import Vocab\n'), ((7796, 7803), 'data_loader.utils.vocab.Vocab', 'Vocab', ([], {}), '()\n', (7801, 7803), False, 'from data_loader.utils.vocab import Vocab\n'), ((7820, 7827), 'data_loader.utils.vocab.Vocab', 'Vocab', ([], {}), '()\n', (7825, 7827), False, 'from data_loader.utils.vocab import Vocab\n'), ((7845, 7852), 'data_loader.utils.vocab.Vocab', 'Vocab', ([], {}), '()\n', (7850, 7852), False, 'from data_loader.utils.vocab import Vocab\n'), ((485, 560), 'logging.getLogger', 'logging.getLogger', (['"""model.bert_things.pytorch_pretrained_bert.tokenization"""'], {}), "('model.bert_things.pytorch_pretrained_bert.tokenization')\n", (502, 560), False, 'import logging\n'), ((1508, 1579), 'torch.Tensor', 'torch.Tensor', (['text_codes[i * bert_seq_length:(1 + i) * bert_seq_length]'], {}), '(text_codes[i * bert_seq_length:(1 + i) * bert_seq_length])\n', (1520, 1579), False, 'import torch\n'), ((1788, 1840), 'torch.Tensor', 'torch.Tensor', (['text_codes[(n + 1) * bert_seq_length:]'], {}), '(text_codes[(n + 1) * bert_seq_length:])\n', (1800, 1840), False, 'import torch\n'), ((1913, 1928), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1926, 1928), False, 'import torch\n'), ((2991, 3105), 'logging.warning', 'logging.warning', (['"""Warning: There\'s no GPU available on this machine, training will be performed on CPU."""'], {}), '(\n "Warning: There\'s no GPU available on this machine, training will be performed on CPU."\n )\n', (3006, 3105), False, 'import logging\n'), ((6651, 6683), 'pandas.to_timedelta', 'pd.to_timedelta', (["df['TIMEDELTA']"], {}), "(df['TIMEDELTA'])\n", (6666, 6683), True, 'import pandas as pd\n'), ((8433, 8468), 'os.path.join', 'os.path.join', (['args.save', '"""data.pkl"""'], {}), "(args.save, 'data.pkl')\n", (8445, 8468), False, 'import os\n'), ((8893, 8928), 'model.bert_things.pytorch_pretrained_bert.tokenization.BertTokenizer', 'BertTokenizer', (['args.bert_vocab_path'], {}), '(args.bert_vocab_path)\n', (8906, 8928), False, 'from model.bert_things.pytorch_pretrained_bert.tokenization import BertTokenizer\n'), ((9002, 9035), 'model.bert_things.pytorch_pretrained_bert.BertConfig', 'BertConfig', (['args.bert_config_path'], {}), '(args.bert_config_path)\n', (9012, 9035), False, 'from model.bert_things.pytorch_pretrained_bert import BertConfig, BertModel, BertPreTrainedModel\n'), ((9052, 9078), 'model.bert_text_model.BertTextModel', 'BertTextModel', (['bert_config'], {}), '(bert_config)\n', (9065, 9078), False, 'from model.bert_text_model import BertTextModel\n'), ((9100, 9132), 'torch.load', 'torch.load', (['args.state_dict_path'], {}), '(args.state_dict_path)\n', (9110, 9132), False, 'import torch\n'), ((10063, 10073), 'tqdm.tqdm', 'tqdm', (['pids'], {}), '(pids)\n', (10067, 10073), False, 'from tqdm import tqdm\n'), ((14289, 14314), 'os.path.exists', 'os.path.exists', (['args.save'], {}), '(args.save)\n', (14303, 14314), False, 'import os\n'), ((14325, 14347), 'os.makedirs', 'os.makedirs', (['args.save'], {}), '(args.save)\n', (14336, 14347), False, 'import os\n'), ((15835, 15860), 'os.path.exists', 'os.path.exists', (['args.save'], {}), '(args.save)\n', (15849, 15860), False, 'import os\n'), ((15871, 15893), 'os.makedirs', 'os.makedirs', (['args.save'], {}), '(args.save)\n', (15882, 15893), False, 'import os\n'), ((16065, 16129), 'pickle.dump', 'pickle.dump', (['data_dict', 'handle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(data_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)\n', (16076, 16129), False, 'import pickle\n'), ((16212, 16276), 'pickle.dump', 'pickle.dump', (['cpt_vocab', 'handle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(cpt_vocab, handle, protocol=pickle.HIGHEST_PROTOCOL)\n', (16223, 16276), False, 'import pickle\n'), ((16359, 16424), 'pickle.dump', 'pickle.dump', (['diag_vocab', 'handle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(diag_vocab, handle, protocol=pickle.HIGHEST_PROTOCOL)\n', (16370, 16424), False, 'import pickle\n'), ((16506, 16570), 'pickle.dump', 'pickle.dump', (['med_vocab', 'handle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(med_vocab, handle, protocol=pickle.HIGHEST_PROTOCOL)\n', (16517, 16570), False, 'import pickle\n'), ((16653, 16718), 'pickle.dump', 'pickle.dump', (['proc_vocab', 'handle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(proc_vocab, handle, protocol=pickle.HIGHEST_PROTOCOL)\n', (16664, 16718), False, 'import pickle\n'), ((6603, 6623), 'pandas.to_timedelta', 'pd.to_timedelta', (['"""0"""'], {}), "('0')\n", (6618, 6623), True, 'import pandas as pd\n'), ((8823, 8847), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (8845, 8847), False, 'import torch\n'), ((14260, 14275), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (14273, 14275), False, 'import pdb\n'), ((14566, 14630), 'pickle.dump', 'pickle.dump', (['data_dict', 'handle'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(data_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)\n', (14577, 14630), False, 'import pickle\n'), ((14663, 14678), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (14676, 14678), False, 'import pdb\n'), ((15909, 15944), 'os.path.join', 'os.path.join', (['args.save', '"""data.pkl"""'], {}), "(args.save, 'data.pkl')\n", (15921, 15944), False, 'import os\n'), ((16145, 16185), 'os.path.join', 'os.path.join', (['args.save', '"""cpt_vocab.pkl"""'], {}), "(args.save, 'cpt_vocab.pkl')\n", (16157, 16185), False, 'import os\n'), ((16291, 16332), 'os.path.join', 'os.path.join', (['args.save', '"""diag_vocab.pkl"""'], {}), "(args.save, 'diag_vocab.pkl')\n", (16303, 16332), False, 'import os\n'), ((16439, 16479), 'os.path.join', 'os.path.join', (['args.save', '"""med_vocab.pkl"""'], {}), "(args.save, 'med_vocab.pkl')\n", (16451, 16479), False, 'import os\n'), ((16585, 16626), 'os.path.join', 'os.path.join', (['args.save', '"""proc_vocab.pkl"""'], {}), "(args.save, 'proc_vocab.pkl')\n", (16597, 16626), False, 'import os\n'), ((8030, 8073), 'os.path.join', 'os.path.join', (['args.vocab_path', '"""diag.vocab"""'], {}), "(args.vocab_path, 'diag.vocab')\n", (8042, 8073), False, 'import os\n'), ((8138, 8180), 'os.path.join', 'os.path.join', (['args.vocab_path', '"""cpt.vocab"""'], {}), "(args.vocab_path, 'cpt.vocab')\n", (8150, 8180), False, 'import os\n'), ((8509, 8544), 'os.path.join', 'os.path.join', (['args.save', '"""data.pkl"""'], {}), "(args.save, 'data.pkl')\n", (8521, 8544), False, 'import os\n'), ((10669, 10731), 'numpy.zeros', 'np.zeros', (["(demographic_cols['LAST_CAREUNIT'].size,)"], {'dtype': 'int'}), "((demographic_cols['LAST_CAREUNIT'].size,), dtype=int)\n", (10677, 10731), True, 'import numpy as np\n'), ((10858, 10916), 'numpy.zeros', 'np.zeros', (["(demographic_cols['ETHNICITY'].size,)"], {'dtype': 'int'}), "((demographic_cols['ETHNICITY'].size,), dtype=int)\n", (10866, 10916), True, 'import numpy as np\n'), ((11041, 11099), 'numpy.zeros', 'np.zeros', (["(demographic_cols['ETHNICITY'].size,)"], {'dtype': 'int'}), "((demographic_cols['ETHNICITY'].size,), dtype=int)\n", (11049, 11099), True, 'import numpy as np\n'), ((14436, 14471), 'os.path.join', 'os.path.join', (['args.save', '"""data.pkl"""'], {}), "(args.save, 'data.pkl')\n", (14448, 14471), False, 'import os\n')] |
import os
import torch
from torch import nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
VERBOSE = True
USE_CUDA = True
def model_summary(model):
for idx, m in enumerate(model.modules()):
print(idx, '->', m)
def save_checkpoint(state, loss, prefix, ckptpath):
filename_late = os.path.join(ckptpath, "%s_%.5f.tar" % (prefix, loss))
torch.save(state, filename_late)
def adjust_learning_rate(initial, optimizer, epoch, factor=0.1):
lr = max(initial * (factor ** (epoch // 2)), 0.0001)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def set_learning_rate(lr, optimizer):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# https://github.com/pytorch/pytorch/issues/2830
def optimizer_cuda(optimizer):
for state in optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = v.cuda()
class AdaptiveLR(object):
def __init__(self, opt, initial_lr, num_iterations=1000):
self._lr = initial_lr
self.opt = opt
self.losses = []
self.window = num_iterations
self.min_lr = 0.0001
self.factor = 0.5
def update(self, loss):
losses = self.losses
while len(losses) > self.window:
losses.pop(0)
losses.append(loss)
if len(losses) < self.window:
return
avg_old = np.mean(losses[:self.window//2])
avg_new = np.mean(losses[self.window//2:])
if avg_new < avg_old:
return
self.lr = max(self.lr * self.factor, self.min_lr)
self.losses = [] # restart loss count
@property
def lr(self):
return self._lr
@lr.setter
def lr(self, val):
if VERBOSE:
print("resetting LR: %s -> %s" % (self._lr, val))
set_learning_rate(val, self.opt)
self._lr = val
def shuffle(data, labels):
s = np.arange(data.shape[0])
np.random.shuffle(s)
return data[s], labels[s]
class CosineSimilarityRegressionLoss(nn.Module):
def __init__(self):
super(CosineSimilarityRegressionLoss, self).__init__()
def forward(self, vec1, vec2, y):
mse = nn.MSELoss()
y_hat = F.cosine_similarity(vec1, vec2)
return mse(y_hat, y)
class CosineSimilarityLossWithL2Regularization(nn.Module):
def __init__(self, cos_sim_margin=0.1, l2_margin=0.1, alpha=0.1):
super(CosineSimilarityLossWithL2Regularization, self).__init__()
self.cos_sim_margin = cos_sim_margin
self.l2_margin = l2_margin
self.alpha = alpha
def forward(self, vec1, vec2, y):
assert vec1.size(0) == vec2.size(0)
ones = Variable(torch.ones(vec1.size(0), 1))
if USE_CUDA:
ones = ones.cuda()
# l2_1 = torch.clamp(torch.abs(ones - vec1.norm(p=2, dim=1)), max=1.0)
# l2_2 = torch.clamp(torch.abs(ones - vec2.norm(p=2, dim=1)), max=1.0)
# l2_1 = l2_1.mean()
# l2_2 = l2_2.mean()
l2_1 = F.l1_loss(ones, vec1.norm(p=2, dim=1))
l2_2 = F.l1_loss(ones, vec2.norm(p=2, dim=1))
loss = F.cosine_embedding_loss(vec1, vec2, y)
return loss + self.alpha * (l2_1 + l2_2)
| [
"numpy.mean",
"torch.nn.functional.cosine_similarity",
"os.path.join",
"torch.nn.functional.cosine_embedding_loss",
"torch.nn.MSELoss",
"torch.is_tensor",
"torch.save",
"numpy.arange",
"numpy.random.shuffle"
] | [((344, 398), 'os.path.join', 'os.path.join', (['ckptpath', "('%s_%.5f.tar' % (prefix, loss))"], {}), "(ckptpath, '%s_%.5f.tar' % (prefix, loss))\n", (356, 398), False, 'import os\n'), ((403, 435), 'torch.save', 'torch.save', (['state', 'filename_late'], {}), '(state, filename_late)\n', (413, 435), False, 'import torch\n'), ((1996, 2020), 'numpy.arange', 'np.arange', (['data.shape[0]'], {}), '(data.shape[0])\n', (2005, 2020), True, 'import numpy as np\n'), ((2025, 2045), 'numpy.random.shuffle', 'np.random.shuffle', (['s'], {}), '(s)\n', (2042, 2045), True, 'import numpy as np\n'), ((1476, 1510), 'numpy.mean', 'np.mean', (['losses[:self.window // 2]'], {}), '(losses[:self.window // 2])\n', (1483, 1510), True, 'import numpy as np\n'), ((1527, 1561), 'numpy.mean', 'np.mean', (['losses[self.window // 2:]'], {}), '(losses[self.window // 2:])\n', (1534, 1561), True, 'import numpy as np\n'), ((2267, 2279), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (2277, 2279), False, 'from torch import nn\n'), ((2296, 2327), 'torch.nn.functional.cosine_similarity', 'F.cosine_similarity', (['vec1', 'vec2'], {}), '(vec1, vec2)\n', (2315, 2327), True, 'import torch.nn.functional as F\n'), ((3195, 3233), 'torch.nn.functional.cosine_embedding_loss', 'F.cosine_embedding_loss', (['vec1', 'vec2', 'y'], {}), '(vec1, vec2, y)\n', (3218, 3233), True, 'import torch.nn.functional as F\n'), ((931, 949), 'torch.is_tensor', 'torch.is_tensor', (['v'], {}), '(v)\n', (946, 949), False, 'import torch\n')] |
def generateTable(data,k=4):
T = {}
for i in range(len(data)-k):
X = data[i:i+k]
Y = data[i+k]
#print("X %s and Y %s "%(X,Y))
if T.get(X) is None:
T[X] = {}
T[X][Y] = 1
else:
if T[X].get(Y) is None:
T[X][Y] = 1
else:
T[X][Y] += 1
return T
def convertFreqIntoProb(T):
for kx in T.keys():
s = float(sum(T[kx].values()))
for k in T[kx].keys():
T[kx][k] = T[kx][k]/s
return T
text_path = "Apna_Time_Aayega.txt"
def load_text(filename):
with open(filename,encoding='utf8') as f:
return f.read().lower()
text = load_text(text_path)
print(len(text))
#Train our Markov Chain
def trainMarkovChain(text,k=4):
T = generateTable(text,k)
T = convertFreqIntoProb(T)
return T
model = trainMarkovChain(text)
#Generate Text at Text Time!
import numpy as np
from numpy import random
random.seed(11)
# sampling !
def sample_next(ctx,T,k):
ctx = ctx[-k:]
if T.get(ctx) is None:
return " "
possible_Chars = list(T[ctx].keys())
possible_values = list(T[ctx].values())
#print(possible_Chars)
#print(possible_values)
return np.random.choice(possible_Chars,p=possible_values)
def generateText(starting_sent,k=4,maxLen=1996):
sentence = starting_sent
ctx = starting_sent[-k:]
for ix in range(maxLen):
next_prediction = sample_next(ctx,model,k)
sentence += next_prediction
ctx = sentence[-k:]
return sentence
text = generateText("apna",k=4,maxLen=1996)
print(text)
print(len(text))
with open("lyrics.txt","w") as f:
f.write(text)
| [
"numpy.random.choice",
"numpy.random.seed"
] | [((1078, 1093), 'numpy.random.seed', 'random.seed', (['(11)'], {}), '(11)\n', (1089, 1093), False, 'from numpy import random\n'), ((1371, 1422), 'numpy.random.choice', 'np.random.choice', (['possible_Chars'], {'p': 'possible_values'}), '(possible_Chars, p=possible_values)\n', (1387, 1422), True, 'import numpy as np\n')] |
import cv2
from matplotlib import pyplot as plt
import numpy as np
import glob
import pickle
images = glob.glob('images/*.jpg')
keypoints = []
scale_percent = 5
width = 300
height = 200
dim = (width, height)
objectpoints = []
path_image_founds = []
for im_name in images:
im = cv2.cvtColor(cv2.imread(im_name), cv2.COLOR_BGR2RGB)
im = cv2.resize(im, dim).astype('uint8')
found, corners = cv2.findChessboardCorners(im, (7, 10))
if found:
path_image_founds.append(im_name)
print(f'found for {im_name}')
objp = np.zeros((10 * 7, 3), np.float32)
objp[:, :2] = np.mgrid[0:7, 0:10].T.reshape(-1, 2)
objectpoints.append(objp)
points = np.squeeze(corners)
keypoints.append(points)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objectpoints, keypoints, dim,
None, None)
plt.show()
# Testing on image 1
i = 1
im = cv2.cvtColor(cv2.imread(path_image_founds[i]), cv2.COLOR_BGR2RGB)
im = cv2.resize(im, dim).astype('uint8')
points, e = cv2.projectPoints(objectpoints[i], rvecs[i], tvecs[i], mtx, dist)
points = np.squeeze(points.astype('uint8'))
keys = np.squeeze(keypoints[i])
plt.imshow(im)
plt.plot(keys[:, 0], keys[:, 1], 'go', markersize=2)
plt.plot(points[:, 0], points[:, 1], 'ro', markersize=2)
plt.show()
| [
"matplotlib.pyplot.imshow",
"cv2.projectPoints",
"matplotlib.pyplot.plot",
"numpy.squeeze",
"numpy.zeros",
"cv2.calibrateCamera",
"cv2.findChessboardCorners",
"cv2.resize",
"cv2.imread",
"glob.glob",
"matplotlib.pyplot.show"
] | [((103, 128), 'glob.glob', 'glob.glob', (['"""images/*.jpg"""'], {}), "('images/*.jpg')\n", (112, 128), False, 'import glob\n'), ((780, 841), 'cv2.calibrateCamera', 'cv2.calibrateCamera', (['objectpoints', 'keypoints', 'dim', 'None', 'None'], {}), '(objectpoints, keypoints, dim, None, None)\n', (799, 841), False, 'import cv2\n'), ((893, 903), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (901, 903), True, 'from matplotlib import pyplot as plt\n'), ((1058, 1123), 'cv2.projectPoints', 'cv2.projectPoints', (['objectpoints[i]', 'rvecs[i]', 'tvecs[i]', 'mtx', 'dist'], {}), '(objectpoints[i], rvecs[i], tvecs[i], mtx, dist)\n', (1075, 1123), False, 'import cv2\n'), ((1176, 1200), 'numpy.squeeze', 'np.squeeze', (['keypoints[i]'], {}), '(keypoints[i])\n', (1186, 1200), True, 'import numpy as np\n'), ((1201, 1215), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im'], {}), '(im)\n', (1211, 1215), True, 'from matplotlib import pyplot as plt\n'), ((1216, 1268), 'matplotlib.pyplot.plot', 'plt.plot', (['keys[:, 0]', 'keys[:, 1]', '"""go"""'], {'markersize': '(2)'}), "(keys[:, 0], keys[:, 1], 'go', markersize=2)\n", (1224, 1268), True, 'from matplotlib import pyplot as plt\n'), ((1269, 1325), 'matplotlib.pyplot.plot', 'plt.plot', (['points[:, 0]', 'points[:, 1]', '"""ro"""'], {'markersize': '(2)'}), "(points[:, 0], points[:, 1], 'ro', markersize=2)\n", (1277, 1325), True, 'from matplotlib import pyplot as plt\n'), ((1327, 1337), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1335, 1337), True, 'from matplotlib import pyplot as plt\n'), ((403, 441), 'cv2.findChessboardCorners', 'cv2.findChessboardCorners', (['im', '(7, 10)'], {}), '(im, (7, 10))\n', (428, 441), False, 'import cv2\n'), ((951, 983), 'cv2.imread', 'cv2.imread', (['path_image_founds[i]'], {}), '(path_image_founds[i])\n', (961, 983), False, 'import cv2\n'), ((296, 315), 'cv2.imread', 'cv2.imread', (['im_name'], {}), '(im_name)\n', (306, 315), False, 'import cv2\n'), ((551, 584), 'numpy.zeros', 'np.zeros', (['(10 * 7, 3)', 'np.float32'], {}), '((10 * 7, 3), np.float32)\n', (559, 584), True, 'import numpy as np\n'), ((695, 714), 'numpy.squeeze', 'np.squeeze', (['corners'], {}), '(corners)\n', (705, 714), True, 'import numpy as np\n'), ((1009, 1028), 'cv2.resize', 'cv2.resize', (['im', 'dim'], {}), '(im, dim)\n', (1019, 1028), False, 'import cv2\n'), ((346, 365), 'cv2.resize', 'cv2.resize', (['im', 'dim'], {}), '(im, dim)\n', (356, 365), False, 'import cv2\n')] |
#coding: UTF-8
# 信息利用率低:不同的机器学习算法和模型对数据中信息的利用是不同的,之前提到在线性模型中,使用对定性特征哑编码可以达到非线性的效果。类似地,对定量变量多项式化,或者进行其他的转换,都能达到非线性的效果。
from sklearn.preprocessing import PolynomialFeatures
import numpy as np
X = np.arange(6).reshape(3, 2)
print(X)
poly = PolynomialFeatures(2)
print(poly.fit_transform(X))
poly = PolynomialFeatures(interaction_only=True)
print(poly.fit_transform(X))
| [
"sklearn.preprocessing.PolynomialFeatures",
"numpy.arange"
] | [((239, 260), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', (['(2)'], {}), '(2)\n', (257, 260), False, 'from sklearn.preprocessing import PolynomialFeatures\n'), ((298, 339), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', ([], {'interaction_only': '(True)'}), '(interaction_only=True)\n', (316, 339), False, 'from sklearn.preprocessing import PolynomialFeatures\n'), ((195, 207), 'numpy.arange', 'np.arange', (['(6)'], {}), '(6)\n', (204, 207), True, 'import numpy as np\n')] |
import re
import torch
from torch import nn
import pandas as pd
import numpy as np
from torch._six import container_abcs, string_classes, int_classes
from torch.nn.utils import rnn as rnn_utils
from typing import TypeVar, Dict, List, Tuple
from ..typing import SocSeqList, SocSeqBatch, SocSeqPolicyBatch, SocSeqPolicyList
from . import soc_data
from . import java_utils as ju
DataTensor = TypeVar('DataTensor', np.ndarray, torch.Tensor)
np_str_obj_array_pattern = re.compile(r'[SaUO]')
def pad_seq_sas(inputs: SocSeqList) -> SocSeqBatch:
"""
Pad the different inputs
inputs is a list of (state_seq, actions_seq)
"""
xs_l = []
ys_l = []
mask_l = []
for tuple_seq in inputs:
x, y = tuple_seq
xs_l.append(x)
ys_l.append(y)
mask_l.append(torch.ones_like(y))
xs_t = rnn_utils.pad_sequence(xs_l, batch_first=True)
ys_t = rnn_utils.pad_sequence(ys_l, batch_first=True)
mask_t = rnn_utils.pad_sequence(mask_l, batch_first=True)
return xs_t, ys_t, mask_t
def pad_seq_policy(inputs: SocSeqPolicyList) -> SocSeqPolicyBatch:
"""
Pad the different inputs
inputs is a list of (state_seq, actions_seq)
"""
xs_l = []
ys_spatial_l = []
ys_linear_l = []
ys_action_l = []
mask_spatial_l = []
mask_linear_l = []
mask_action_l = []
for tuple_seq in inputs:
x, y = tuple_seq
y_spatial, y_linear, y_action = y
xs_l.append(x)
ys_spatial_l.append(y_spatial)
ys_linear_l.append(y_linear)
ys_action_l.append(y_action)
mask_spatial_l.append(torch.ones_like(y_spatial))
mask_linear_l.append(torch.ones_like(y_linear))
mask_action_l.append(torch.ones_like(y_action))
xs_t = rnn_utils.pad_sequence(xs_l, batch_first=True)
ys_spatial_t = rnn_utils.pad_sequence(ys_spatial_l, batch_first=True)
ys_linear_t = rnn_utils.pad_sequence(ys_linear_l, batch_first=True)
ys_action_t = rnn_utils.pad_sequence(ys_action_l, batch_first=True)
mask_spatial_t = rnn_utils.pad_sequence(mask_spatial_l, batch_first=True)
mask_linear_t = rnn_utils.pad_sequence(mask_linear_l, batch_first=True)
mask_action_t = rnn_utils.pad_sequence(mask_action_l, batch_first=True)
ys_t = (ys_spatial_t, ys_linear_t, ys_action_t)
mask_t = (mask_spatial_t, mask_linear_t, mask_action_t)
return xs_t, ys_t, mask_t
def pad_seq_text_policy(data_dict_l):
max_text_lengh = 0
for data_dict in data_dict_l:
max_text_lengh = max(max_text_lengh, data_dict['chat_history_t'].shape[1])
for i, data_dict in enumerate(data_dict_l):
if data_dict['chat_history_t'].shape[1] < max_text_lengh:
zeros_shape_history = [
data_dict['chat_history_t'].shape[0],
max_text_lengh - data_dict['chat_history_t'].shape[1],
data_dict['chat_history_t'].shape[2]
]
zeros = torch.zeros(
zeros_shape_history,
dtype=torch.float32,
device=data_dict['chat_mask_history_t'].device
)
data_dict_l[i]['chat_history_t'] = torch.cat([data_dict['chat_history_t'], zeros],
dim=1)
# yapf: disable
data_dict_l[i]['chat_mask_history_t'] = torch.cat(
[data_dict['chat_mask_history_t'], zeros[:, :, 0]],
dim=1
)
# yapf: enable
zeros_shape_future = [
data_dict['chat_future_t'].shape[0],
max_text_lengh - data_dict['chat_future_t'].shape[1],
data_dict['chat_future_t'].shape[2]
]
zeros_shape_future[1] = max_text_lengh - data_dict['chat_future_t'].shape[1]
zeros = torch.zeros(
zeros_shape_future, dtype=torch.float32, device=data_dict['chat_future_t'].device
)
data_dict_l[i]['chat_future_t'] = torch.cat([data_dict['chat_future_t'], zeros], dim=1)
# yapf: disable
data_dict_l[i]['chat_mask_future_t'] = torch.cat(
[data_dict['chat_mask_future_t'], zeros[:, :, 0]],
dim=1
)
# yapf: enable
return default_collate(data_dict_l)
def default_collate(batch):
r"""
Default Pytorch 1.6 collate function
Puts each data field into a tensor with outer dimension batch size
"""
elem = batch[0]
elem_type = type(elem)
if isinstance(elem, torch.Tensor):
out = None
if torch.utils.data.get_worker_info() is not None:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum([x.numel() for x in batch])
storage = elem.storage()._new_shared(numel)
out = elem.new(storage)
return torch.stack(batch, 0, out=out)
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
elem = batch[0]
if elem_type.__name__ == 'ndarray':
# array of string classes and object
if np_str_obj_array_pattern.search(elem.dtype.str) is not None:
default_collate_err_msg_format = (
"default_collate: batch must contain tensors, numpy arrays, numbers, "
"dicts or lists; found {}"
)
raise TypeError(default_collate_err_msg_format.format(elem.dtype))
return default_collate([torch.as_tensor(b) for b in batch])
elif elem.shape == (): # scalars
return torch.as_tensor(batch)
elif isinstance(elem, float):
return torch.tensor(batch, dtype=torch.float64)
elif isinstance(elem, int_classes):
return torch.tensor(batch)
elif isinstance(elem, string_classes):
return batch
elif isinstance(elem, container_abcs.Mapping):
return {key: default_collate([d[key] for d in batch]) for key in elem}
elif isinstance(elem, tuple) and hasattr(elem, '_fields'): # namedtuple
return elem_type(*(default_collate(samples) for samples in zip(*batch)))
elif isinstance(elem, container_abcs.Sequence):
# check to make sure that the elements in batch have consistent size
it = iter(batch)
elem_size = len(next(it))
if not all(len(elem) == elem_size for elem in it):
raise RuntimeError('each element in list of batch should be of equal size')
transposed = zip(*batch)
return [default_collate(samples) for samples in transposed]
raise TypeError(default_collate_err_msg_format.format(elem_type))
def preprocess_states(states_df: pd.DataFrame) -> pd.DataFrame:
"""
This function applies the preprocessing steps necessary to move from the raw
observation to a spatial representation.
The spatial representation is like this:
- plan 0: Tile type (hexlayout)
- plan 1: Tile number
- plan 2: Robber position
- plan 3: Game phase id
- plan 4: Development card left
- plan 5: Last dice result
- plan 6: Starting player id
- plan 7: Current player id
- plan 8: Current player has played a developement card during its turn
3 type of pieces, 6 way to put it around the hex
- plan 9-26: Player 1 pieces
- plan 27-44: Player 2 pieces
- plan 45-62: Player 3 pieces
- plan 63-80: Player 4 pieces
see java_utils.parse_player_infos for more information
- plan 81-121: Player 1 public info
- plan 122-162: Player 2 public info
- plan 163-203: Player 3 public info
- plan 204-244: Player 4 public info
State shape: 245x7x7
"""
states_df = states_df.copy()
del states_df['touchingnumbers']
del states_df['name']
del states_df['id']
states_df['gameturn'] = states_df['gameturn'].apply(ju.get_replicated_plan) \
.apply(normalize_gameturn)
states_df['hexlayout'] = states_df['hexlayout'].apply(ju.parse_layout) \
.apply(ju.mapping_1d_2d) \
.apply(normalize_hexlayout)
states_df['numberlayout'] = states_df['numberlayout'].apply(ju.parse_layout) \
.apply(ju.mapping_1d_2d) \
.apply(normalize_numberlayout)
states_df['robberhex'] = states_df['robberhex'].apply(ju.get_1d_id_from_hex) \
.apply(ju.get_2d_id) \
.apply(ju.get_one_hot_plan)
states_df['piecesonboard'] = states_df['piecesonboard'].apply(ju.parse_pieces)
states_df['gamestate'] = states_df['gamestate'].apply(ju.parse_game_phases)
states_df['devcardsleft'] = states_df['devcardsleft'].apply(ju.parse_devcardsleft)
states_df['diceresult'] = states_df['diceresult'].apply(ju.parse_dice_result)
states_df['startingplayer'] = states_df['startingplayer'].apply(ju.parse_starting_player)
states_df['currentplayer'] = states_df['currentplayer'].apply(ju.parse_current_player)
states_df['playeddevcard'] = states_df['playeddevcard'].apply(ju.get_replicated_plan)
states_df['playersresources'] = states_df['playersresources'].apply(ju.parse_player_resources) \
.apply(normalize_playersresources)
states_df['players'] = states_df['players'].apply(ju.parse_player_infos)
return states_df
def preprocess_actions(actions_df: pd.DataFrame) -> pd.DataFrame:
actions_df = actions_df.copy()
del actions_df['id']
del actions_df['beforestate']
del actions_df['afterstate']
del actions_df['value']
actions_df['type'] = actions_df['type'].apply(ju.parse_actions)
return actions_df
def preprocess_chats(
chats_df: pd.DataFrame, seq_length: int, first_state_idx: int = 1
) -> pd.DataFrame:
data: Dict[str, List] = {'message': [[] for i in range(seq_length)]}
for _, row in chats_df.iterrows():
i = row['current_state'] - first_state_idx
mess = "{}: {}".format(row['sender'], row['message'])
data['message'][i].append(mess)
data['message'] = list(map(lambda x: '' if len(x) == 0 else '\n'.join(x), data['message']))
p_chats_df = pd.DataFrame(data)
return p_chats_df
def stack_states_df(states_df: pd.DataFrame) -> torch.Tensor:
state_seq = []
for i in range(len(states_df.index)):
current_state_df = states_df.iloc[i]
current_state_np = np.concatenate([current_state_df[col] for col in soc_data.STATE_FIELDS],
axis=0) # yapf: ignore
state_seq.append(torch.tensor(current_state_np, dtype=torch.float32))
state_seq_t = torch.stack(state_seq)
return state_seq_t
def stack_actions_df(actions_df: pd.DataFrame) -> torch.Tensor:
action_seq = []
for i in range(len(actions_df.index)):
current_action_df = actions_df.iloc[i]
current_action_np = current_action_df['type']
action_seq.append(torch.tensor(current_action_np, dtype=torch.float32))
action_seq_t = torch.stack(action_seq)
return action_seq_t
def replace_firstnames(text, lm='bert'):
if lm == 'bert':
return text.replace('BayesBetty', 'Betty')\
.replace('BayesFranck', 'Peter')\
.replace('BayesJake', 'Jake')\
.replace('DRLSam', 'Sam')\
# .replace('\n', '[SEP]') # TODO: are we sure we want to use a separtor?
else:
raise NotImplementedError('LM {} is not supported'.format(lm))
def compute_text_features(
messages: List[str],
tokenizer,
text_model: nn.Module,
set_empty_text_to_zero: bool = False
) -> List[torch.Tensor]:
with torch.no_grad():
encoded_inputs = tokenizer(messages, padding=True, truncation=True, return_tensors="pt")
if next(text_model.parameters()).is_cuda:
encoded_inputs['input_ids'] = encoded_inputs['input_ids'].cuda()
encoded_inputs['token_type_ids'] = encoded_inputs['token_type_ids'].cuda()
encoded_inputs['attention_mask'] = encoded_inputs['attention_mask'].cuda()
last_hidden_state, pooler_output = text_model(**encoded_inputs)
if set_empty_text_to_zero is True:
for i in range(len(messages)):
if messages[i] == '':
encoded_inputs['attention_mask'][i] = 0
last_hidden_state[i] = 0
pooler_output[i] = 0
else:
empty_last_hidden_state = None
empty_pooler_output = None
last_hidden_state_list = []
pooler_output_list = []
# To speed things up, we compute only once the representation for the void sentences
# This works because there are much more void sentences than actual sentences
for i in range(len(messages)):
if messages[i] == '':
if empty_last_hidden_state is None:
empty_last_hidden_state, empty_pooler_output = text_model(
input_ids=encoded_inputs['input_ids'][i:i + 1],
token_type_ids=encoded_inputs['token_type_ids'][i:i + 1],
attention_mask=encoded_inputs['attention_mask'][i:i + 1],
)
if set_empty_text_to_zero is True:
empty_last_hidden_state = torch.zeros_like(empty_last_hidden_state)
empty_pooler_output = torch.zeros_like(empty_pooler_output)
encoded_inputs['attention_mask'][i] = 0
last_hidden_state_list.append(empty_last_hidden_state)
pooler_output_list.append(empty_pooler_output)
else:
last_hidden_state, pooler_output = text_model(
input_ids=encoded_inputs['input_ids'][i:i + 1],
token_type_ids=encoded_inputs['token_type_ids'][i:i + 1],
attention_mask=encoded_inputs['attention_mask'][i:i + 1],
)
last_hidden_state_list.append(last_hidden_state)
pooler_output_list.append(pooler_output)
last_hidden_state = torch.cat(last_hidden_state_list, dim=0)
pooler_output = torch.cat(pooler_output_list, dim=0)
mask = encoded_inputs['attention_mask'].to(torch.float32)
return [last_hidden_state, pooler_output, mask]
def normalize_hexlayout(data: DataTensor) -> DataTensor:
if isinstance(data, torch.Tensor):
data = data.clone().type(torch.float32) # type:ignore
# We add 1 to replace the -1 values with zeros and avoid any other 0 in the data
data += 1
# We make sure all the values are between 1 and 257 so that
# All log values are between 0 and 257
val = torch.tensor(255 + 1 + 1, dtype=data.dtype)
data = torch.sqrt(data + 1) / torch.sqrt(val)
else:
data = data.copy()
data += 1
data = np.sqrt(data + 1) / np.sqrt(255 + 1 + 1)
return data
def unnormalize_hexlayout(data: DataTensor) -> DataTensor:
if isinstance(data, torch.Tensor):
data = data.clone()
val = torch.tensor(255 + 1 + 1, dtype=data.dtype)
data = torch.square(data * torch.sqrt(val)) - 1
data = torch.round(data).type(torch.int64) # type:ignore
else:
data = data.copy()
data = np.square(data * np.sqrt(255 + 1 + 1)) - 1
data = np.round(data).astype(np.int64)
data -= 1
return data
def normalize_numberlayout(data: DataTensor) -> DataTensor:
if isinstance(data, torch.Tensor):
data = data.clone().type(torch.float32) # type:ignore
# We replace -1 with 0 to avoid sending any signals to the model
data[data == -1] = 0
# # We make sure all the values are between 0 and 1
data = data / 12.
else:
data = data.copy()
data[data == -1] = 0
data = data / 12.
return data
def unnormalize_numberlayout(data: DataTensor) -> DataTensor:
if isinstance(data, torch.Tensor):
data = data.clone()
data = data * 12
data = torch.round(data).type(torch.int64) # type:ignore
data[data == 0] = -1
else:
data = data.copy()
data = data * 12
data = np.round(data).astype(np.int64)
data[data == 0] = -1
return data
def normalize_gameturn(data: DataTensor) -> DataTensor:
if isinstance(data, torch.Tensor):
data = data.clone().type(torch.float32) # type:ignore
# There are rarely more than 40 game turns
data = data / 40.
else:
data = data.copy()
data = data / 40.
return data
def unnormalize_gameturn(data: DataTensor) -> DataTensor:
if isinstance(data, torch.Tensor):
data = data.clone()
data = data * 40
data = torch.round(data).type(torch.int64) # type:ignore
else:
data = data.copy()
data = data * 40
data = np.round(data).astype(np.int64)
return data
def normalize_playersresources(data: DataTensor) -> DataTensor:
if isinstance(data, torch.Tensor):
data = data.clone().type(torch.float32) # type:ignore
# There are 25 cards of each resources
data = data / 10.
else:
data = data.copy()
data = data / 10.
return data
def unnormalize_playersresources(data: DataTensor) -> DataTensor:
if isinstance(data, torch.Tensor):
data = data.clone()
data = data * 10
data = torch.round(data).type(torch.int64) # type:ignore
else:
data = data.copy()
data = data * 10
data = np.round(data).astype(np.int64)
return data
def find_actions_idxs(batch_sa_seq_t: torch.Tensor, action_name: str) -> torch.Tensor:
action_idx = soc_data.ACTIONS_NAMES.index(action_name)
action_seq = torch.argmax(batch_sa_seq_t[:, :, -len(soc_data.ACTIONS):, 0, 0], dim=2)
idxs = (action_seq == action_idx)
return idxs
def separate_state_data(state: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
spatial_states_l = []
linear_states_l = []
last_idx = 0
for field in soc_data.STATE_FIELDS:
field_type = soc_data.STATE_FIELDS_TYPE[field]
field_size = soc_data.STATE_FIELDS_SIZE[field]
if field_type in [3, 4, 5]:
sub_state = state[:, last_idx:last_idx + field_size]
spatial_states_l.append(sub_state)
else:
sub_state = state[:, last_idx:last_idx + field_size, 0, 0]
linear_states_l.append(sub_state)
last_idx += field_size
spatial_states_t = torch.cat(spatial_states_l, dim=1)
lineat_states_t = torch.cat(linear_states_l, dim=1)
return spatial_states_t, lineat_states_t
| [
"torch.ones_like",
"torch.as_tensor",
"numpy.sqrt",
"re.compile",
"torch.utils.data.get_worker_info",
"numpy.round",
"torch.stack",
"torch.sqrt",
"torch.nn.utils.rnn.pad_sequence",
"torch.tensor",
"torch.round",
"numpy.concatenate",
"pandas.DataFrame",
"torch.no_grad",
"torch.zeros_like"... | [((390, 437), 'typing.TypeVar', 'TypeVar', (['"""DataTensor"""', 'np.ndarray', 'torch.Tensor'], {}), "('DataTensor', np.ndarray, torch.Tensor)\n", (397, 437), False, 'from typing import TypeVar, Dict, List, Tuple\n'), ((465, 485), 're.compile', 're.compile', (['"""[SaUO]"""'], {}), "('[SaUO]')\n", (475, 485), False, 'import re\n'), ((843, 889), 'torch.nn.utils.rnn.pad_sequence', 'rnn_utils.pad_sequence', (['xs_l'], {'batch_first': '(True)'}), '(xs_l, batch_first=True)\n', (865, 889), True, 'from torch.nn.utils import rnn as rnn_utils\n'), ((901, 947), 'torch.nn.utils.rnn.pad_sequence', 'rnn_utils.pad_sequence', (['ys_l'], {'batch_first': '(True)'}), '(ys_l, batch_first=True)\n', (923, 947), True, 'from torch.nn.utils import rnn as rnn_utils\n'), ((961, 1009), 'torch.nn.utils.rnn.pad_sequence', 'rnn_utils.pad_sequence', (['mask_l'], {'batch_first': '(True)'}), '(mask_l, batch_first=True)\n', (983, 1009), True, 'from torch.nn.utils import rnn as rnn_utils\n'), ((1778, 1824), 'torch.nn.utils.rnn.pad_sequence', 'rnn_utils.pad_sequence', (['xs_l'], {'batch_first': '(True)'}), '(xs_l, batch_first=True)\n', (1800, 1824), True, 'from torch.nn.utils import rnn as rnn_utils\n'), ((1845, 1899), 'torch.nn.utils.rnn.pad_sequence', 'rnn_utils.pad_sequence', (['ys_spatial_l'], {'batch_first': '(True)'}), '(ys_spatial_l, batch_first=True)\n', (1867, 1899), True, 'from torch.nn.utils import rnn as rnn_utils\n'), ((1918, 1971), 'torch.nn.utils.rnn.pad_sequence', 'rnn_utils.pad_sequence', (['ys_linear_l'], {'batch_first': '(True)'}), '(ys_linear_l, batch_first=True)\n', (1940, 1971), True, 'from torch.nn.utils import rnn as rnn_utils\n'), ((1990, 2043), 'torch.nn.utils.rnn.pad_sequence', 'rnn_utils.pad_sequence', (['ys_action_l'], {'batch_first': '(True)'}), '(ys_action_l, batch_first=True)\n', (2012, 2043), True, 'from torch.nn.utils import rnn as rnn_utils\n'), ((2066, 2122), 'torch.nn.utils.rnn.pad_sequence', 'rnn_utils.pad_sequence', (['mask_spatial_l'], {'batch_first': '(True)'}), '(mask_spatial_l, batch_first=True)\n', (2088, 2122), True, 'from torch.nn.utils import rnn as rnn_utils\n'), ((2143, 2198), 'torch.nn.utils.rnn.pad_sequence', 'rnn_utils.pad_sequence', (['mask_linear_l'], {'batch_first': '(True)'}), '(mask_linear_l, batch_first=True)\n', (2165, 2198), True, 'from torch.nn.utils import rnn as rnn_utils\n'), ((2219, 2274), 'torch.nn.utils.rnn.pad_sequence', 'rnn_utils.pad_sequence', (['mask_action_l'], {'batch_first': '(True)'}), '(mask_action_l, batch_first=True)\n', (2241, 2274), True, 'from torch.nn.utils import rnn as rnn_utils\n'), ((10691, 10709), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (10703, 10709), True, 'import pandas as pd\n'), ((11168, 11190), 'torch.stack', 'torch.stack', (['state_seq'], {}), '(state_seq)\n', (11179, 11190), False, 'import torch\n'), ((11545, 11568), 'torch.stack', 'torch.stack', (['action_seq'], {}), '(action_seq)\n', (11556, 11568), False, 'import torch\n'), ((19226, 19260), 'torch.cat', 'torch.cat', (['spatial_states_l'], {'dim': '(1)'}), '(spatial_states_l, dim=1)\n', (19235, 19260), False, 'import torch\n'), ((19283, 19316), 'torch.cat', 'torch.cat', (['linear_states_l'], {'dim': '(1)'}), '(linear_states_l, dim=1)\n', (19292, 19316), False, 'import torch\n'), ((4954, 4984), 'torch.stack', 'torch.stack', (['batch', '(0)'], {'out': 'out'}), '(batch, 0, out=out)\n', (4965, 4984), False, 'import torch\n'), ((10931, 11016), 'numpy.concatenate', 'np.concatenate', (['[current_state_df[col] for col in soc_data.STATE_FIELDS]'], {'axis': '(0)'}), '([current_state_df[col] for col in soc_data.STATE_FIELDS], axis=0\n )\n', (10945, 11016), True, 'import numpy as np\n'), ((12177, 12192), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (12190, 12192), False, 'import torch\n'), ((15384, 15427), 'torch.tensor', 'torch.tensor', (['(255 + 1 + 1)'], {'dtype': 'data.dtype'}), '(255 + 1 + 1, dtype=data.dtype)\n', (15396, 15427), False, 'import torch\n'), ((15752, 15795), 'torch.tensor', 'torch.tensor', (['(255 + 1 + 1)'], {'dtype': 'data.dtype'}), '(255 + 1 + 1, dtype=data.dtype)\n', (15764, 15795), False, 'import torch\n'), ((811, 829), 'torch.ones_like', 'torch.ones_like', (['y'], {}), '(y)\n', (826, 829), False, 'import torch\n'), ((1626, 1652), 'torch.ones_like', 'torch.ones_like', (['y_spatial'], {}), '(y_spatial)\n', (1641, 1652), False, 'import torch\n'), ((1683, 1708), 'torch.ones_like', 'torch.ones_like', (['y_linear'], {}), '(y_linear)\n', (1698, 1708), False, 'import torch\n'), ((1739, 1764), 'torch.ones_like', 'torch.ones_like', (['y_action'], {}), '(y_action)\n', (1754, 1764), False, 'import torch\n'), ((2961, 3067), 'torch.zeros', 'torch.zeros', (['zeros_shape_history'], {'dtype': 'torch.float32', 'device': "data_dict['chat_mask_history_t'].device"}), "(zeros_shape_history, dtype=torch.float32, device=data_dict[\n 'chat_mask_history_t'].device)\n", (2972, 3067), False, 'import torch\n'), ((3173, 3227), 'torch.cat', 'torch.cat', (["[data_dict['chat_history_t'], zeros]"], {'dim': '(1)'}), "([data_dict['chat_history_t'], zeros], dim=1)\n", (3182, 3227), False, 'import torch\n'), ((3365, 3433), 'torch.cat', 'torch.cat', (["[data_dict['chat_mask_history_t'], zeros[:, :, 0]]"], {'dim': '(1)'}), "([data_dict['chat_mask_history_t'], zeros[:, :, 0]], dim=1)\n", (3374, 3433), False, 'import torch\n'), ((3841, 3940), 'torch.zeros', 'torch.zeros', (['zeros_shape_future'], {'dtype': 'torch.float32', 'device': "data_dict['chat_future_t'].device"}), "(zeros_shape_future, dtype=torch.float32, device=data_dict[\n 'chat_future_t'].device)\n", (3852, 3940), False, 'import torch\n'), ((4013, 4066), 'torch.cat', 'torch.cat', (["[data_dict['chat_future_t'], zeros]"], {'dim': '(1)'}), "([data_dict['chat_future_t'], zeros], dim=1)\n", (4022, 4066), False, 'import torch\n'), ((4146, 4213), 'torch.cat', 'torch.cat', (["[data_dict['chat_mask_future_t'], zeros[:, :, 0]]"], {'dim': '(1)'}), "([data_dict['chat_mask_future_t'], zeros[:, :, 0]], dim=1)\n", (4155, 4213), False, 'import torch\n'), ((4613, 4647), 'torch.utils.data.get_worker_info', 'torch.utils.data.get_worker_info', ([], {}), '()\n', (4645, 4647), False, 'import torch\n'), ((11096, 11147), 'torch.tensor', 'torch.tensor', (['current_state_np'], {'dtype': 'torch.float32'}), '(current_state_np, dtype=torch.float32)\n', (11108, 11147), False, 'import torch\n'), ((11471, 11523), 'torch.tensor', 'torch.tensor', (['current_action_np'], {'dtype': 'torch.float32'}), '(current_action_np, dtype=torch.float32)\n', (11483, 11523), False, 'import torch\n'), ((14761, 14801), 'torch.cat', 'torch.cat', (['last_hidden_state_list'], {'dim': '(0)'}), '(last_hidden_state_list, dim=0)\n', (14770, 14801), False, 'import torch\n'), ((14830, 14866), 'torch.cat', 'torch.cat', (['pooler_output_list'], {'dim': '(0)'}), '(pooler_output_list, dim=0)\n', (14839, 14866), False, 'import torch\n'), ((15443, 15463), 'torch.sqrt', 'torch.sqrt', (['(data + 1)'], {}), '(data + 1)\n', (15453, 15463), False, 'import torch\n'), ((15466, 15481), 'torch.sqrt', 'torch.sqrt', (['val'], {}), '(val)\n', (15476, 15481), False, 'import torch\n'), ((15552, 15569), 'numpy.sqrt', 'np.sqrt', (['(data + 1)'], {}), '(data + 1)\n', (15559, 15569), True, 'import numpy as np\n'), ((15572, 15592), 'numpy.sqrt', 'np.sqrt', (['(255 + 1 + 1)'], {}), '(255 + 1 + 1)\n', (15579, 15592), True, 'import numpy as np\n'), ((5799, 5839), 'torch.tensor', 'torch.tensor', (['batch'], {'dtype': 'torch.float64'}), '(batch, dtype=torch.float64)\n', (5811, 5839), False, 'import torch\n'), ((15867, 15884), 'torch.round', 'torch.round', (['data'], {}), '(data)\n', (15878, 15884), False, 'import torch\n'), ((16028, 16042), 'numpy.round', 'np.round', (['data'], {}), '(data)\n', (16036, 16042), True, 'import numpy as np\n'), ((16724, 16741), 'torch.round', 'torch.round', (['data'], {}), '(data)\n', (16735, 16741), False, 'import torch\n'), ((16881, 16895), 'numpy.round', 'np.round', (['data'], {}), '(data)\n', (16889, 16895), True, 'import numpy as np\n'), ((17443, 17460), 'torch.round', 'torch.round', (['data'], {}), '(data)\n', (17454, 17460), False, 'import torch\n'), ((17571, 17585), 'numpy.round', 'np.round', (['data'], {}), '(data)\n', (17579, 17585), True, 'import numpy as np\n'), ((18116, 18133), 'torch.round', 'torch.round', (['data'], {}), '(data)\n', (18127, 18133), False, 'import torch\n'), ((18244, 18258), 'numpy.round', 'np.round', (['data'], {}), '(data)\n', (18252, 18258), True, 'import numpy as np\n'), ((5727, 5749), 'torch.as_tensor', 'torch.as_tensor', (['batch'], {}), '(batch)\n', (5742, 5749), False, 'import torch\n'), ((5895, 5914), 'torch.tensor', 'torch.tensor', (['batch'], {}), '(batch)\n', (5907, 5914), False, 'import torch\n'), ((15831, 15846), 'torch.sqrt', 'torch.sqrt', (['val'], {}), '(val)\n', (15841, 15846), False, 'import torch\n'), ((15987, 16007), 'numpy.sqrt', 'np.sqrt', (['(255 + 1 + 1)'], {}), '(255 + 1 + 1)\n', (15994, 16007), True, 'import numpy as np\n'), ((5630, 5648), 'torch.as_tensor', 'torch.as_tensor', (['b'], {}), '(b)\n', (5645, 5648), False, 'import torch\n'), ((13920, 13961), 'torch.zeros_like', 'torch.zeros_like', (['empty_last_hidden_state'], {}), '(empty_last_hidden_state)\n', (13936, 13961), False, 'import torch\n'), ((14008, 14045), 'torch.zeros_like', 'torch.zeros_like', (['empty_pooler_output'], {}), '(empty_pooler_output)\n', (14024, 14045), False, 'import torch\n')] |
# model.py
import torch
from torch import nn
from torch import Tensor
from torch.autograd import Variable
import numpy as np
from sklearn.metrics import accuracy_score
class CNNText(nn.Module):
def __init__(self, config):
super(CNNText, self).__init__()
self.config = config
# Convolutional Layer
# We use 3 kernels as in original paper
# Size of kernels: (3,300),(4,300),(5,300)
self.conv1 = nn.Conv2d(in_channels=self.config.in_channels, out_channels=self.config.num_channels,
kernel_size=(self.config.kernel_size[0],self.config.embed_size),
stride=1, padding=0)
self.activation1 = nn.ReLU()
self.max_out1 = nn.MaxPool1d(self.config.max_sen_len - self.config.kernel_size[0]+1)
self.conv2 = nn.Conv2d(in_channels=self.config.in_channels, out_channels=self.config.num_channels,
kernel_size=(self.config.kernel_size[1],self.config.embed_size),
stride=1, padding=0)
self.activation2 = nn.ReLU()
self.max_out2 = nn.MaxPool1d(self.config.max_sen_len - self.config.kernel_size[1]+1)
self.conv3 = nn.Conv2d(in_channels=self.config.in_channels, out_channels=self.config.num_channels,
kernel_size=(self.config.kernel_size[2],self.config.embed_size),
stride=1, padding=0)
self.activation3 = nn.ReLU()
self.max_out3 = nn.MaxPool1d(self.config.max_sen_len - self.config.kernel_size[2]+1)
self.dropout = nn.Dropout(self.config.dropout_keep)
# Fully-Connected Layer
self.fc = nn.Linear(self.config.num_channels*len(self.config.kernel_size), self.config.output_size)
# Softmax non-linearity
self.softmax = nn.Softmax()
def forward(self, x):
x = x.unsqueeze(1) # (batch_size,max_seq_len,embed_size) => (batch_size,1,max_seq_len,embed_size)
conv_out1 = self.conv1(x).squeeze(3)
activation_out1 = self.activation1(conv_out1)
max_out1 = self.max_out1(activation_out1).squeeze(2)
conv_out2 = self.conv2(x).squeeze(3)
activation_out2 = self.activation2(conv_out2)
max_out2 = self.max_out2(activation_out2).squeeze(2)
conv_out3 = self.conv3(x).squeeze(3)
activation_out3 = self.activation3(conv_out3)
max_out3 = self.max_out3(activation_out3).squeeze(2)
all_out = torch.cat((max_out1, max_out2, max_out3), 1)
final_feature_map = self.dropout(all_out)
final_out = self.fc(final_feature_map)
return self.softmax(final_out)
def add_optimizer(self, optimizer):
self.optimizer = optimizer
def add_loss_op(self, loss_op):
self.loss_op = loss_op
def run_epoch(self, train_data, val_data):
train_x, train_y = train_data[0], train_data[1]
val_x, val_y = val_data[0], val_data[1]
iterator = data_iterator(train_x, train_y, self.config.batch_size)
train_losses = []
val_accuracies = []
losses = []
for i, (x,y) in enumerate(iterator):
self.optimizer.zero_grad()
x = Tensor(x).cuda()
y_pred = self.__call__(x)
loss = self.loss_op(y_pred, torch.cuda.LongTensor(y-1))
loss.backward()
losses.append(loss.data.cpu().numpy())
self.optimizer.step()
if (i + 1) % 50 == 0:
print("Iter: {}".format(i+1))
avg_train_loss = np.mean(losses)
train_losses.append(avg_train_loss)
print("\tAverage training loss: {:.5f}".format(avg_train_loss))
losses = []
# Evalute Accuracy on validation set
self.eval()
all_preds = []
val_iterator = data_iterator(val_x, val_y, self.config.batch_size)
for j, (x,y) in enumerate(val_iterator):
x = Variable(Tensor(x))
y_pred = self.__call__(x.cuda())
predicted = torch.max(y_pred.cpu().data, 1)[1] + 1
all_preds.extend(predicted.numpy())
score = accuracy_score(val_y, np.array(all_preds).flatten())
val_accuracies.append(score)
print("\tVal Accuracy: {:.4f}".format(score))
self.train()
return train_losses, val_accuracies
| [
"torch.nn.MaxPool1d",
"torch.cuda.LongTensor",
"torch.nn.ReLU",
"torch.nn.Dropout",
"numpy.mean",
"torch.nn.Softmax",
"torch.Tensor",
"torch.nn.Conv2d",
"numpy.array",
"torch.cat"
] | [((465, 647), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'self.config.in_channels', 'out_channels': 'self.config.num_channels', 'kernel_size': '(self.config.kernel_size[0], self.config.embed_size)', 'stride': '(1)', 'padding': '(0)'}), '(in_channels=self.config.in_channels, out_channels=self.config.\n num_channels, kernel_size=(self.config.kernel_size[0], self.config.\n embed_size), stride=1, padding=0)\n', (474, 647), False, 'from torch import nn\n'), ((726, 735), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (733, 735), False, 'from torch import nn\n'), ((760, 830), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', (['(self.config.max_sen_len - self.config.kernel_size[0] + 1)'], {}), '(self.config.max_sen_len - self.config.kernel_size[0] + 1)\n', (772, 830), False, 'from torch import nn\n'), ((851, 1033), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'self.config.in_channels', 'out_channels': 'self.config.num_channels', 'kernel_size': '(self.config.kernel_size[1], self.config.embed_size)', 'stride': '(1)', 'padding': '(0)'}), '(in_channels=self.config.in_channels, out_channels=self.config.\n num_channels, kernel_size=(self.config.kernel_size[1], self.config.\n embed_size), stride=1, padding=0)\n', (860, 1033), False, 'from torch import nn\n'), ((1112, 1121), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1119, 1121), False, 'from torch import nn\n'), ((1146, 1216), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', (['(self.config.max_sen_len - self.config.kernel_size[1] + 1)'], {}), '(self.config.max_sen_len - self.config.kernel_size[1] + 1)\n', (1158, 1216), False, 'from torch import nn\n'), ((1245, 1427), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'self.config.in_channels', 'out_channels': 'self.config.num_channels', 'kernel_size': '(self.config.kernel_size[2], self.config.embed_size)', 'stride': '(1)', 'padding': '(0)'}), '(in_channels=self.config.in_channels, out_channels=self.config.\n num_channels, kernel_size=(self.config.kernel_size[2], self.config.\n embed_size), stride=1, padding=0)\n', (1254, 1427), False, 'from torch import nn\n'), ((1506, 1515), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1513, 1515), False, 'from torch import nn\n'), ((1540, 1610), 'torch.nn.MaxPool1d', 'nn.MaxPool1d', (['(self.config.max_sen_len - self.config.kernel_size[2] + 1)'], {}), '(self.config.max_sen_len - self.config.kernel_size[2] + 1)\n', (1552, 1610), False, 'from torch import nn\n'), ((1641, 1677), 'torch.nn.Dropout', 'nn.Dropout', (['self.config.dropout_keep'], {}), '(self.config.dropout_keep)\n', (1651, 1677), False, 'from torch import nn\n'), ((1891, 1903), 'torch.nn.Softmax', 'nn.Softmax', ([], {}), '()\n', (1901, 1903), False, 'from torch import nn\n'), ((2579, 2623), 'torch.cat', 'torch.cat', (['(max_out1, max_out2, max_out3)', '(1)'], {}), '((max_out1, max_out2, max_out3), 1)\n', (2588, 2623), False, 'import torch\n'), ((3435, 3463), 'torch.cuda.LongTensor', 'torch.cuda.LongTensor', (['(y - 1)'], {}), '(y - 1)\n', (3456, 3463), False, 'import torch\n'), ((3699, 3714), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (3706, 3714), True, 'import numpy as np\n'), ((3340, 3349), 'torch.Tensor', 'Tensor', (['x'], {}), '(x)\n', (3346, 3349), False, 'from torch import Tensor\n'), ((4177, 4186), 'torch.Tensor', 'Tensor', (['x'], {}), '(x)\n', (4183, 4186), False, 'from torch import Tensor\n'), ((4414, 4433), 'numpy.array', 'np.array', (['all_preds'], {}), '(all_preds)\n', (4422, 4433), True, 'import numpy as np\n')] |
# Copyright (c) 2014, Vienna University of Technology (TU Wien), Department
# of Geodesy and Geoinformation (GEO).
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the Vienna University of Technology - Department of
# Geodesy and Geoinformation nor the names of its contributors may be used to
# endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL VIENNA UNIVERSITY OF TECHNOLOGY,
# DEPARTMENT OF GEODESY AND GEOINFORMATION BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Author: <NAME> <EMAIL>
# Creation date: 2014-08-04
"""
Description of module.
"""
import pandas as pd
import numpy as np
from poets.timedate.dekad import get_dekad_period
def calc_CDI(data, refparam=None, lags=[0, 10]):
"""
Calculates a weighted average over all columns of a pandas DataFrame.
Parameters
----------
data : pandas.DataFrame
Pandas DataFrame containing data to be averaged.
refparam : str, optional
Reference parameter. If not set, parameters will be weighted
equally.
lags : list of int, optional
Time periods to shift parameter against refparam, defaults to [0, 10].
Returns
-------
df : pandas DataFrame
Return the average of data
"""
cols = data.keys()
dat = np.array(data[cols])
dat = np.ma.masked_invalid(dat)
weights = calc_weights(data, refparam, lags)
if refparam is None:
avg = np.ma.average(dat, axis=1)
else:
avg = np.ma.average(dat, axis=1, weights=weights)
df = pd.DataFrame(avg, columns=['CDI'], index=data.index)
return df
def calc_weights(data, refparam, lags=[0, 10], exclude=None):
"""
Calculates the weights of parameters for weighted averaging. Weights
are calculated using correlation and time shift of each parameter
against the reference parameter. Parameters must be direct proportional
to reference parameter!
Parameters
----------
data : pandas.DataFrame
DataFrame containing data in columns.
refparam : str
Reference parameter.
lags : list of int, optional
Time periods to shift parameter against refparam,
defaults to [0, 10].
exclude : string, optional
Variable which should not be used for calculation of the weights.
Returns
-------
sorted_weights : list of int
Weights associated with the parameters in data.
"""
params = data.keys()
maxlag = {}
maxcorr = {}
weights = {}
sorted_weights = []
correlations = calc_correlation(data, refparam, lags, exclude)
for param in params:
if exclude is not None and exclude in param:
continue
maxlag[param] = correlations[param]['lag']
maxcorr[param] = correlations[param]['corr']
for key in maxlag.keys():
weights[key] = (float(maxlag[key])) / sum(maxlag.values()) * 100
for key in maxcorr.keys():
weights[key] = ((weights[key] +
(float(maxcorr[key]) / sum(maxcorr.values())) *
100) / 2)
for param in params:
if exclude is not None and exclude in param:
continue
sorted_weights.append(weights[param])
return sorted_weights
def calc_correlation(data, refparam, lags=[0, 10], exclude=None):
"""
Calculates the correlations between parameters and a reference
parameter given as columns in a DataFrame.
Parameters
----------
data : pandas.DataFrame
DataFrame containing data in columns.
refparam : str
Reference parameter.
lags : list of int, optional
Time periods to shift parameter against refparam,
defaults to [0, 10].
exclude : string, optional
Variable which should not be used for calculation of the correlation.
Returns
-------
correlation : dict
Dictionary containing correlations and max time lags.
"""
correlation = {}
for param in data.keys():
if exclude is not None and exclude in param:
continue
correlation[param] = {'corr': None, 'lag': None}
for i in range(lags[0], lags[1]):
i += abs(lags[0]) + 1
corr = data[param].corr(data[refparam].shift(periods=i),
method='pearson')
if correlation[param]['corr'] is None:
correlation[param]['corr'] = abs(corr)
correlation[param]['lag'] = i
if abs(corr) > abs(correlation[param]['corr']):
correlation[param]['corr'] = abs(corr)
correlation[param]['lag'] = i
if abs(corr) == abs(correlation[param]['corr']):
if abs(i) < abs(correlation[param]['lag']):
correlation[param]['corr'] = abs(corr)
correlation[param]['lag'] = i
return correlation
def calc_DI(data, inverse=False, interest_period=[6, 12, 24], scaled=False,
scale_zero=False, modf_all=False):
"""
Calculates a Drought Index based on an algorithm developed by
<NAME>.
Parameters
----------
data : pandas.DataFrame
Input data as Pandas DataFrame, must come with column names.
inverse : bool
Inverts the input time series; set True if time series is indirect
proportional to the expected output, e.g. Temperature with output
Temperature Drought Index.
interest_period : list of int, optional
interest periods used to calculate drought index,
defaults to [6, 12, 24]
scaled : boolean, optional
If True values will be scaled between 0 and 1.
scale_zero : boolean, optional
If True values will be shifted around zero, defaults to False.
modf_all : boolean, optional
If True values will be modified, independent of their min.
"""
ts_date = data.index
variables = data.keys()
data['period'] = get_dekad_period(ts_date)
for var in variables:
if inverse is True:
data[var] = ((data[var].max() + 1) - data[var])
if modf_all is True:
data['modf'] = data[var] + 1
del data[var]
elif data[var].min() == 0:
data['modf'] = data[var] + 1
del data[var]
else:
data['modf'] = data[var]
del data[var]
data['modf_avg'] = (data.groupby('period').modf
.transform(lambda x: x.mean()))
# Excess
# Dekads below long term average. If the statement is true the
# program return 1
data['exc'] = np.choose((data['modf_avg'] / data['modf']) >= 1,
[0, 1])
# Run length
# Maximum number of successive dekads below long term average
for ip in interest_period:
data['rlen'] = pd.rolling_apply(data['exc'], ip,
(lambda x:
len(max((''.join(str(j)
for j in map(int,
x)))
.split('0')))),
ip)
# get modified run length
max_rlen = data['rlen'].max()
data['rlen'] = (max_rlen + 1) - data['rlen']
# average run lenghts
rlen_avg = (data.groupby('period').modf
.transform(lambda x: x.mean()))
data['form'] = data['rlen'] / rlen_avg
# sumip matrix
# calculates sum of the values for each interest period
data['sumip'] = pd.rolling_apply(data['modf'], ip,
lambda x: np.nansum(x),
round(ip * 0.6))
# average values for each interest period over all years
sumip_avg = (data.groupby('period')['sumip']
.transform(lambda x: x.mean()))
data['nrl'] = data['sumip'] / sumip_avg
# calculating PDI/TDI
data['val'] = data['nrl'] * np.sqrt(data['form'])
# scaled index
dkey = var + '_DI_' + str(ip)
if scaled:
data[dkey] = ((data['val'] - data['val'].min()) /
(data['val'].max() - data['val'].min()))
else:
data[dkey] = data['val']
if scale_zero:
data[dkey] = data[dkey] - data[dkey].mean()
del (data['val'], data['nrl'], data['sumip'], data['rlen'],
data['form'])
# deletes not further relevant columns
del data['modf'], data['modf_avg'], data['exc']
del data['period']
return data
if __name__ == "__main__":
pass
| [
"numpy.ma.average",
"numpy.sqrt",
"pandas.DataFrame",
"numpy.array",
"poets.timedate.dekad.get_dekad_period",
"numpy.ma.masked_invalid",
"numpy.nansum",
"numpy.choose"
] | [((2499, 2519), 'numpy.array', 'np.array', (['data[cols]'], {}), '(data[cols])\n', (2507, 2519), True, 'import numpy as np\n'), ((2530, 2555), 'numpy.ma.masked_invalid', 'np.ma.masked_invalid', (['dat'], {}), '(dat)\n', (2550, 2555), True, 'import numpy as np\n'), ((2750, 2802), 'pandas.DataFrame', 'pd.DataFrame', (['avg'], {'columns': "['CDI']", 'index': 'data.index'}), "(avg, columns=['CDI'], index=data.index)\n", (2762, 2802), True, 'import pandas as pd\n'), ((7159, 7184), 'poets.timedate.dekad.get_dekad_period', 'get_dekad_period', (['ts_date'], {}), '(ts_date)\n', (7175, 7184), False, 'from poets.timedate.dekad import get_dekad_period\n'), ((2646, 2672), 'numpy.ma.average', 'np.ma.average', (['dat'], {'axis': '(1)'}), '(dat, axis=1)\n', (2659, 2672), True, 'import numpy as np\n'), ((2697, 2740), 'numpy.ma.average', 'np.ma.average', (['dat'], {'axis': '(1)', 'weights': 'weights'}), '(dat, axis=1, weights=weights)\n', (2710, 2740), True, 'import numpy as np\n'), ((7832, 7887), 'numpy.choose', 'np.choose', (["(data['modf_avg'] / data['modf'] >= 1)", '[0, 1]'], {}), "(data['modf_avg'] / data['modf'] >= 1, [0, 1])\n", (7841, 7887), True, 'import numpy as np\n'), ((9444, 9465), 'numpy.sqrt', 'np.sqrt', (["data['form']"], {}), "(data['form'])\n", (9451, 9465), True, 'import numpy as np\n'), ((9057, 9069), 'numpy.nansum', 'np.nansum', (['x'], {}), '(x)\n', (9066, 9069), True, 'import numpy as np\n')] |
import numpy as np
from scipy import fft
from scipy.io import wavfile
from sklearn.linear_model import LogisticRegression
import pickle
import pprint
# 准备音乐数据
def create_fft(g, n):
rad = "d:/tmp/genres/" + g + "/converted/" + g + "." + str(n).zfill(5) + ".au.wav"
(sample_rate, X) = wavfile.read(rad)
fft_features = abs(fft(X)[:1000])
sad = "d:/tmp/trainset/" + g + "." + str(n).zfill(5) + ".fft"
np.save(sad, fft_features)
genre_list = ["classical", "jazz", "country", "pop", "rock", "metal"]
for g in genre_list:
for n in range(100):
create_fft(g, n)
| [
"scipy.fft",
"scipy.io.wavfile.read",
"numpy.save"
] | [((293, 310), 'scipy.io.wavfile.read', 'wavfile.read', (['rad'], {}), '(rad)\n', (305, 310), False, 'from scipy.io import wavfile\n'), ((419, 445), 'numpy.save', 'np.save', (['sad', 'fft_features'], {}), '(sad, fft_features)\n', (426, 445), True, 'import numpy as np\n'), ((334, 340), 'scipy.fft', 'fft', (['X'], {}), '(X)\n', (337, 340), False, 'from scipy import fft\n')] |
import torch
import numpy as np
import time
import tempfile
import random
from itertools import count
import gc
import os
import glob
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import utils
default_variables_dict = utils.get_default_variable_dict()
LEAVE_PRINT_EVERY_N_SECS = default_variables_dict["LEAVE_PRINT_EVERY_N_SECS"]
ERASE_LINE = default_variables_dict["ERASE_LINE"]
class REINFORCE:
def __init__(self):
self.policy_model_fn = lambda nS, nA: PolicyNet(nS, nA, hidden_dims=(128, 64))
self.policy_optimizer_fn = lambda net, lr: optim.Adam(net.parameters(), lr=lr)
self.policy_optimizer_lr = 0.0005
def optimize_model(self):
T = len(self.rewards)
discounts = np.logspace(0, T, num=T, base=self.gamma, endpoint=False)
returns = np.array([np.sum(discounts[: T -t] * self.rewards[t:]) for t in range(T)])
discounts = torch.FloatTensor(discounts).unsqueeze(1)
returns = torch.FloatTensor(returns).unsqueeze(1)
self.logpas = torch.cat(self.logpas) # log probability of the actions selected
# pytorch does gradient descent by default --> minimizing negative performance is like max. performance
# log probability of actions selected is weighted by their discounted returns
policy_loss = -(discounts * returns * self.logpas).mean()
self.policy_optimizer.zero_grad()
policy_loss.backward()
self.policy_optimizer.step()
def interaction_step(self, state, env):
action, is_exploratory, logpa, _ = self.policy_model.full_pass(state)
new_state, reward, is_terminal, _ = env.step(action)
self.logpas.append(logpa)
self.rewards.append(reward)
self.episode_reward[-1] += reward
self.episode_timestep[-1] += 1
self.episode_exploration[-1] += int(is_exploratory)
return new_state, is_terminal
# this function is the entry point for training the agent
def train(self, make_env_fn, make_env_kargs, seed, gamma,
max_minutes, max_episodes, goal_mean_100_reward):
training_start, last_debug_time = time.time(), float('-inf')
self.checkpoint_dir = tempfile.mkdtemp()
self.make_env_fn = make_env_fn
self.make_env_kargs = make_env_kargs
self.seed = seed
self.gamma = gamma
env = self.make_env_fn(**self.make_env_kargs, seed=self.seed)
torch.manual_seed(self.seed) ; np.random.seed(self.seed) ; random.seed(self.seed)
state_dim, acts_dim = env.observation_space.shape[0], env.action_space.n
self.episode_timestep = []
self.episode_reward = []
self.episode_seconds = []
self.episode_exploration = []
self.evaluation_scores = []
self.policy_model = self.policy_model_fn(state_dim, acts_dim)
self.policy_optimizer = self.policy_optimizer_fn(self.policy_model,
self.policy_optimizer_lr)
result = np.empty((max_episodes, 5))
result[:] = np.nan
training_time = 0
for episode in range(1, max_episodes + 1):
episode_start = time.time()
state, is_terminal = env.reset(), False
self.episode_reward.append(0.0)
self.episode_timestep.append(0.0)
self.episode_exploration.append(0.0)
# collect rollout
self.logpas, self.rewards = [], []
for step in count():
state, is_terminal = self.interaction_step(state, env)
if is_terminal:
gc.collect()
break
self.optimize_model()
# stats
episode_elapsed = time.time() - episode_start
self.episode_seconds.append(episode_elapsed)
training_time += episode_elapsed
evaluation_score, _ = self.evaluate(self.policy_model, env)
utils.save_checkpoint(self.checkpoint_dir, episode-1, self.policy_model)
total_step = int(np.sum(self.episode_timestep))
self.evaluation_scores.append(evaluation_score)
mean_10_reward = np.mean(self.episode_reward[-10:])
std_10_reward = np.std(self.episode_reward[-10:])
mean_100_reward = np.mean(self.episode_reward[-100:])
std_100_reward = np.std(self.episode_reward[-100:])
mean_100_eval_score = np.mean(self.evaluation_scores[-100:])
std_100_eval_score = np.std(self.evaluation_scores[-100:])
lst_100_exp_rat = np.array(
self.episode_exploration[-100:] ) /np.array(self.episode_timestep[-100:])
mean_100_exp_rat = np.mean(lst_100_exp_rat)
std_100_exp_rat = np.std(lst_100_exp_rat)
wallclock_elapsed = time.time() - training_start
result[episode -1] = total_step, mean_100_reward, \
mean_100_eval_score, training_time, wallclock_elapsed
reached_debug_time = time.time() - last_debug_time >= LEAVE_PRINT_EVERY_N_SECS
reached_max_minutes = wallclock_elapsed >= max_minutes * 60
reached_max_episodes = episode >= max_episodes
reached_goal_mean_reward = mean_100_eval_score >= goal_mean_100_reward
training_is_over = reached_max_minutes or \
reached_max_episodes or \
reached_goal_mean_reward
elapsed_str = time.strftime("%H:%M:%S", time.gmtime(time.time() - training_start))
debug_message = 'el {}, ep {:04}, ts {:06}, '
debug_message += 'ar 10 {:05.1f}\u00B1{:05.1f}, '
debug_message += '100 {:05.1f}\u00B1{:05.1f}, '
debug_message += 'ex 100 {:02.1f}\u00B1{:02.1f}, '
debug_message += 'ev {:05.1f}\u00B1{:05.1f}'
debug_message = debug_message.format(
elapsed_str, episode -1, total_step, mean_10_reward, std_10_reward,
mean_100_reward, std_100_reward, mean_100_exp_rat, std_100_exp_rat,
mean_100_eval_score, std_100_eval_score)
print(debug_message, end='\r', flush=True)
if reached_debug_time or training_is_over:
print(ERASE_LINE + debug_message, flush=True)
last_debug_time = time.time()
if training_is_over:
if reached_max_minutes: print(u'--> reached_max_minutes \u2715')
if reached_max_episodes: print(u'--> reached_max_episodes \u2715')
if reached_goal_mean_reward: print(u'--> reached_goal_mean_reward \u2713')
break
final_eval_score, score_std = self.evaluate(self.policy_model, env, n_episodes=100)
wallclock_time = time.time() - training_start
print('Training complete.')
print('Final evaluation score {:.2f}\u00B1{:.2f} in {:.2f}s training time,'
' {:.2f}s wall-clock time.\n'.format(
final_eval_score, score_std, training_time, wallclock_time))
env.close() ; del env
self.get_cleaned_checkpoints()
return result, final_eval_score, training_time, wallclock_time
def evaluate(self, eval_policy_model, eval_env, n_episodes=1, greedy=True):
rs = []
for _ in range(n_episodes):
s, d = eval_env.reset(), False
rs.append(0)
for _ in count():
if greedy:
a = eval_policy_model.select_greedy_action(s)
else:
a = eval_policy_model.select_action(s)
s, r, d, _ = eval_env.step(a)
rs[-1] += r
if d: break
return np.mean(rs), np.std(rs)
def get_cleaned_checkpoints(self, n_checkpoints=5):
try:
return self.checkpoint_paths
except AttributeError:
self.checkpoint_paths = {}
paths = glob.glob(os.path.join(self.checkpoint_dir, '*.tar'))
paths_dic = {int(path.split('.')[-2]) :path for path in paths}
last_ep = max(paths_dic.keys())
# checkpoint_idxs = np.geomspace(1, last_ep+1, n_checkpoints, endpoint=True, dtype=np.int)-1
checkpoint_idxs = np.linspace(1, last_ep+1, n_checkpoints, endpoint=True, dtype=np.int) -1
for idx, path in paths_dic.items():
if idx in checkpoint_idxs:
self.checkpoint_paths[idx] = path
else:
os.unlink(path)
return self.checkpoint_paths
def demo_last(self, title='{} Agent - Fully Trained ', n_episodes=3, max_n_videos=3):
env = self.make_env_fn(**self.make_env_kargs, monitor_mode='evaluation', render=True, record=True)
title = title.format(self.__class__.__name__)
checkpoint_paths = self.get_cleaned_checkpoints()
last_ep = max(checkpoint_paths.keys())
self.policy_model.load_state_dict(torch.load(checkpoint_paths[last_ep]))
self.evaluate(self.policy_model, env, n_episodes=n_episodes)
env.close()
html_data = utils.get_gif_html(env_videos=env.videos,
title=title,
max_n_videos=max_n_videos)
del env
return html_data, title
def demo_progression(self, title='{} Agent - Progression', max_n_videos=5):
env = self.make_env_fn(**self.make_env_kargs, monitor_mode='evaluation', render=True, record=True)
title = title.format(self.__class__.__name__)
checkpoint_paths = self.get_cleaned_checkpoints()
for i in sorted(checkpoint_paths.keys()):
self.policy_model.load_state_dict(torch.load(checkpoint_paths[i]))
self.evaluate(self.policy_model, env, n_episodes=1)
env.close()
html_data = utils.get_gif_html(env_videos=env.videos,
title=title,
subtitle_eps=sorted(checkpoint_paths.keys()),
max_n_videos=max_n_videos)
del env
return html_data, title
class PolicyNet(nn.Module):
def __init__(self,
input_dim,
output_dim,
hidden_dims=(32, 32),
activation_fc=F.relu):
super(PolicyNet, self).__init__()
self.activation_fc = activation_fc
self.input_layer = nn.Linear(input_dim, hidden_dims[0])
self.hidden_layers = nn.ModuleList()
for i in range(len(hidden_dims) - 1):
hidden_layer = nn.Linear(hidden_dims[i], hidden_dims[i + 1])
self.hidden_layers.append(hidden_layer)
self.output_layer = nn.Linear(hidden_dims[-1], output_dim)
def _format(self, state):
x = state
if not isinstance(x, torch.Tensor):
x = torch.tensor(x,
dtype=torch.float32)
x = x.unsqueeze(0)
return x
def forward(self, state):
x = self._format(state)
x = self.activation_fc(self.input_layer(x))
for hidden_layer in self.hidden_layers:
x = self.activation_fc(hidden_layer(x))
return self.output_layer(x)
def full_pass(self, state):
logits = self.forward(state)
dist = torch.distributions.Categorical(logits=logits)
action = dist.sample()
logpa = dist.log_prob(action).unsqueeze(-1)
entropy = dist.entropy().unsqueeze(-1)
is_exploratory = action != np.argmax(logits.detach().numpy())
return action.item(), is_exploratory.item(), logpa, entropy
def select_action(self, state):
logits = self.forward(state)
dist = torch.distributions.Categorical(logits=logits)
action = dist.sample()
return action.item()
def select_greedy_action(self, state):
logits = self.forward(state)
return np.argmax(logits.detach().numpy()) | [
"torch.distributions.Categorical",
"numpy.array",
"numpy.mean",
"torch.nn.ModuleList",
"numpy.linspace",
"numpy.empty",
"numpy.random.seed",
"os.unlink",
"numpy.logspace",
"utils.get_gif_html",
"tempfile.mkdtemp",
"gc.collect",
"numpy.std",
"utils.get_default_variable_dict",
"time.time",... | [((258, 291), 'utils.get_default_variable_dict', 'utils.get_default_variable_dict', ([], {}), '()\n', (289, 291), False, 'import utils\n'), ((761, 818), 'numpy.logspace', 'np.logspace', (['(0)', 'T'], {'num': 'T', 'base': 'self.gamma', 'endpoint': '(False)'}), '(0, T, num=T, base=self.gamma, endpoint=False)\n', (772, 818), True, 'import numpy as np\n'), ((1055, 1077), 'torch.cat', 'torch.cat', (['self.logpas'], {}), '(self.logpas)\n', (1064, 1077), False, 'import torch\n'), ((2221, 2239), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (2237, 2239), False, 'import tempfile\n'), ((2455, 2483), 'torch.manual_seed', 'torch.manual_seed', (['self.seed'], {}), '(self.seed)\n', (2472, 2483), False, 'import torch\n'), ((2486, 2511), 'numpy.random.seed', 'np.random.seed', (['self.seed'], {}), '(self.seed)\n', (2500, 2511), True, 'import numpy as np\n'), ((2514, 2536), 'random.seed', 'random.seed', (['self.seed'], {}), '(self.seed)\n', (2525, 2536), False, 'import random\n'), ((3043, 3070), 'numpy.empty', 'np.empty', (['(max_episodes, 5)'], {}), '((max_episodes, 5))\n', (3051, 3070), True, 'import numpy as np\n'), ((9113, 9199), 'utils.get_gif_html', 'utils.get_gif_html', ([], {'env_videos': 'env.videos', 'title': 'title', 'max_n_videos': 'max_n_videos'}), '(env_videos=env.videos, title=title, max_n_videos=\n max_n_videos)\n', (9131, 9199), False, 'import utils\n'), ((10451, 10487), 'torch.nn.Linear', 'nn.Linear', (['input_dim', 'hidden_dims[0]'], {}), '(input_dim, hidden_dims[0])\n', (10460, 10487), True, 'import torch.nn as nn\n'), ((10517, 10532), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (10530, 10532), True, 'import torch.nn as nn\n'), ((10732, 10770), 'torch.nn.Linear', 'nn.Linear', (['hidden_dims[-1]', 'output_dim'], {}), '(hidden_dims[-1], output_dim)\n', (10741, 10770), True, 'import torch.nn as nn\n'), ((11330, 11376), 'torch.distributions.Categorical', 'torch.distributions.Categorical', ([], {'logits': 'logits'}), '(logits=logits)\n', (11361, 11376), False, 'import torch\n'), ((11734, 11780), 'torch.distributions.Categorical', 'torch.distributions.Categorical', ([], {'logits': 'logits'}), '(logits=logits)\n', (11765, 11780), False, 'import torch\n'), ((2163, 2174), 'time.time', 'time.time', ([], {}), '()\n', (2172, 2174), False, 'import time\n'), ((3203, 3214), 'time.time', 'time.time', ([], {}), '()\n', (3212, 3214), False, 'import time\n'), ((3509, 3516), 'itertools.count', 'count', ([], {}), '()\n', (3514, 3516), False, 'from itertools import count\n'), ((3980, 4054), 'utils.save_checkpoint', 'utils.save_checkpoint', (['self.checkpoint_dir', '(episode - 1)', 'self.policy_model'], {}), '(self.checkpoint_dir, episode - 1, self.policy_model)\n', (4001, 4054), False, 'import utils\n'), ((4204, 4238), 'numpy.mean', 'np.mean', (['self.episode_reward[-10:]'], {}), '(self.episode_reward[-10:])\n', (4211, 4238), True, 'import numpy as np\n'), ((4267, 4300), 'numpy.std', 'np.std', (['self.episode_reward[-10:]'], {}), '(self.episode_reward[-10:])\n', (4273, 4300), True, 'import numpy as np\n'), ((4331, 4366), 'numpy.mean', 'np.mean', (['self.episode_reward[-100:]'], {}), '(self.episode_reward[-100:])\n', (4338, 4366), True, 'import numpy as np\n'), ((4396, 4430), 'numpy.std', 'np.std', (['self.episode_reward[-100:]'], {}), '(self.episode_reward[-100:])\n', (4402, 4430), True, 'import numpy as np\n'), ((4465, 4503), 'numpy.mean', 'np.mean', (['self.evaluation_scores[-100:]'], {}), '(self.evaluation_scores[-100:])\n', (4472, 4503), True, 'import numpy as np\n'), ((4537, 4574), 'numpy.std', 'np.std', (['self.evaluation_scores[-100:]'], {}), '(self.evaluation_scores[-100:])\n', (4543, 4574), True, 'import numpy as np\n'), ((4736, 4760), 'numpy.mean', 'np.mean', (['lst_100_exp_rat'], {}), '(lst_100_exp_rat)\n', (4743, 4760), True, 'import numpy as np\n'), ((4791, 4814), 'numpy.std', 'np.std', (['lst_100_exp_rat'], {}), '(lst_100_exp_rat)\n', (4797, 4814), True, 'import numpy as np\n'), ((6819, 6830), 'time.time', 'time.time', ([], {}), '()\n', (6828, 6830), False, 'import time\n'), ((7455, 7462), 'itertools.count', 'count', ([], {}), '()\n', (7460, 7462), False, 'from itertools import count\n'), ((7755, 7766), 'numpy.mean', 'np.mean', (['rs'], {}), '(rs)\n', (7762, 7766), True, 'import numpy as np\n'), ((7768, 7778), 'numpy.std', 'np.std', (['rs'], {}), '(rs)\n', (7774, 7778), True, 'import numpy as np\n'), ((7987, 8029), 'os.path.join', 'os.path.join', (['self.checkpoint_dir', '"""*.tar"""'], {}), "(self.checkpoint_dir, '*.tar')\n", (7999, 8029), False, 'import os\n'), ((8269, 8340), 'numpy.linspace', 'np.linspace', (['(1)', '(last_ep + 1)', 'n_checkpoints'], {'endpoint': '(True)', 'dtype': 'np.int'}), '(1, last_ep + 1, n_checkpoints, endpoint=True, dtype=np.int)\n', (8280, 8340), True, 'import numpy as np\n'), ((8964, 9001), 'torch.load', 'torch.load', (['checkpoint_paths[last_ep]'], {}), '(checkpoint_paths[last_ep])\n', (8974, 9001), False, 'import torch\n'), ((10606, 10651), 'torch.nn.Linear', 'nn.Linear', (['hidden_dims[i]', 'hidden_dims[i + 1]'], {}), '(hidden_dims[i], hidden_dims[i + 1])\n', (10615, 10651), True, 'import torch.nn as nn\n'), ((10880, 10916), 'torch.tensor', 'torch.tensor', (['x'], {'dtype': 'torch.float32'}), '(x, dtype=torch.float32)\n', (10892, 10916), False, 'import torch\n'), ((847, 891), 'numpy.sum', 'np.sum', (['(discounts[:T - t] * self.rewards[t:])'], {}), '(discounts[:T - t] * self.rewards[t:])\n', (853, 891), True, 'import numpy as np\n'), ((933, 961), 'torch.FloatTensor', 'torch.FloatTensor', (['discounts'], {}), '(discounts)\n', (950, 961), False, 'import torch\n'), ((993, 1019), 'torch.FloatTensor', 'torch.FloatTensor', (['returns'], {}), '(returns)\n', (1010, 1019), False, 'import torch\n'), ((3766, 3777), 'time.time', 'time.time', ([], {}), '()\n', (3775, 3777), False, 'import time\n'), ((4083, 4112), 'numpy.sum', 'np.sum', (['self.episode_timestep'], {}), '(self.episode_timestep)\n', (4089, 4112), True, 'import numpy as np\n'), ((4605, 4646), 'numpy.array', 'np.array', (['self.episode_exploration[-100:]'], {}), '(self.episode_exploration[-100:])\n', (4613, 4646), True, 'import numpy as np\n'), ((4666, 4704), 'numpy.array', 'np.array', (['self.episode_timestep[-100:]'], {}), '(self.episode_timestep[-100:])\n', (4674, 4704), True, 'import numpy as np\n'), ((4848, 4859), 'time.time', 'time.time', ([], {}), '()\n', (4857, 4859), False, 'import time\n'), ((6379, 6390), 'time.time', 'time.time', ([], {}), '()\n', (6388, 6390), False, 'import time\n'), ((8510, 8525), 'os.unlink', 'os.unlink', (['path'], {}), '(path)\n', (8519, 8525), False, 'import os\n'), ((9718, 9749), 'torch.load', 'torch.load', (['checkpoint_paths[i]'], {}), '(checkpoint_paths[i])\n', (9728, 9749), False, 'import torch\n'), ((3641, 3653), 'gc.collect', 'gc.collect', ([], {}), '()\n', (3651, 3653), False, 'import gc\n'), ((5061, 5072), 'time.time', 'time.time', ([], {}), '()\n', (5070, 5072), False, 'import time\n'), ((5567, 5578), 'time.time', 'time.time', ([], {}), '()\n', (5576, 5578), False, 'import time\n')] |
# *****************************************************************************
# Copyright (c) 2019, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
import numpy as np
import time
import pandas as pd
import sdc
@sdc.jit
def accel_infer(n):
t1 = time.time()
X = np.random.ranf(n)
Y = np.random.ranf(n)
Z = np.random.ranf(n)
df = pd.DataFrame({'X': X, 'Y': Y, 'Z': Z})
g = 9.81
df['accel'] = np.sqrt(df.X**2 + df.Y**2 + (df.Z - g)**2)
threshold = df.accel.mean() + 5 * df.accel.std()
df['is_brake'] = (df.rolling(10)['accel'].mean() > threshold)
df.is_brake.fillna(False, inplace=True)
checksum = df.is_brake.sum()
t2 = time.time()
print("exec time:", t2 - t1)
return checksum
n = 10**8
accel_infer(n)
| [
"numpy.random.ranf",
"numpy.sqrt",
"time.time",
"pandas.DataFrame"
] | [((1617, 1628), 'time.time', 'time.time', ([], {}), '()\n', (1626, 1628), False, 'import time\n'), ((1637, 1654), 'numpy.random.ranf', 'np.random.ranf', (['n'], {}), '(n)\n', (1651, 1654), True, 'import numpy as np\n'), ((1663, 1680), 'numpy.random.ranf', 'np.random.ranf', (['n'], {}), '(n)\n', (1677, 1680), True, 'import numpy as np\n'), ((1689, 1706), 'numpy.random.ranf', 'np.random.ranf', (['n'], {}), '(n)\n', (1703, 1706), True, 'import numpy as np\n'), ((1717, 1755), 'pandas.DataFrame', 'pd.DataFrame', (["{'X': X, 'Y': Y, 'Z': Z}"], {}), "({'X': X, 'Y': Y, 'Z': Z})\n", (1729, 1755), True, 'import pandas as pd\n'), ((1788, 1836), 'numpy.sqrt', 'np.sqrt', (['(df.X ** 2 + df.Y ** 2 + (df.Z - g) ** 2)'], {}), '(df.X ** 2 + df.Y ** 2 + (df.Z - g) ** 2)\n', (1795, 1836), True, 'import numpy as np\n'), ((2037, 2048), 'time.time', 'time.time', ([], {}), '()\n', (2046, 2048), False, 'import time\n')] |
# -*- coding: utf-8 -*-
'''
Module for defining the classes related to the :math:`\\mathrm{A}^\\ast`
algorithm.
:math:`\\mathrm{A}^\\ast` algorithm (pronounced as "A star") is a search
algorithm proposed in 1968 by
`<NAME> & Raphael (1968) <https://doi.org/10.1109/TSSC.1968.300136>`_.
It optimally finds the minimum cost path in a graph from a start node
:math:`s` to a goal node :math:`g`; in other words, the shortest path if the
cost is given by its length. In addition, the algorithm does not expand as many
nodes as other algorithms do; then, for a graph with a huge quantity of nodes,
the computational performance is higher with respect to other search algorithms
The :math:`\\mathrm{A}^\\ast` algorithm works constantly evaluating an
evaluation function :math:`f(n)` composed of two parts: one is the actual cost
of the optimum path traveled from :math:`s` to the current node :math:`n`
given by the expression :math:`g(n)`, and the second is the cost of the optimum
path from the current node :math:`n` to :math:`g` given by the expression
:math:`h(n)`, which is the heuristic component of the algorithm and it could be
for example either the
`euclidean <https://en.wikipedia.org/wiki/euclidean_distance>`_ or
`manhattan <https://en.wikipedia.org/wiki/Taxicab_geometry>`_ distance.
Thereby, the evaluation function to measure the path cost is
:math:`f(n) = g(n) + h(n)`.
'''
# %%
class PreferredPath:
'''Creates an instance of an object that stores the indexes of a
polyline in the space of the grid-graph that represents a preferential path
to be followed when the :math:`\\mathrm{A}^\\ast` algorithm is applied. ::
PreferredPath(coordsIdx, factor)
The object has an attribute called factor that represents a coefficient
:math:`k` that multiplies the distance :math:`d` between the current node
:math:`n` and the polyline. Considering the above, the function for
evaluating the total cost of a node is modified as
:math:`f(n) = g(n) + h(n) + kd`
Attributes:
polyline (`numpy.ndarray`): (2, n) array with the indexes of a polyline
in the space of a grid-graph where the :math:`\\mathrm{A}^\\ast`
algorithm is applied. The first row corresponds to the rows and the
second to the columns of the grid-graph.
factor (`int` or `float`): Multiplier of the shortest distance
between the current node and the polyline.
Examples:
>>> from numpy import array
>>> from pybimstab.astar import PreferredPath
>>> coordsIdx = array([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13, 13],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
2, 3, 4, 5, 6, 7, 8, 9]])
>>> preferredPath = PreferredPath(coordsIdx, factor=1)
>>> preferredPath.__dict__
{'coordsIdx': array([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 13,
13, 13, 13, 13, 13, 13, 13, 13, 13],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
2, 3, 4, 5, 6, 7, 8, 9]]),
'factor': 1}
'''
def __init__(self, coordsIdx, factor):
'''
PreferredPath(coordsIdx, factor)
'''
from numpy import array
self.coordsIdx = array(coordsIdx)
self.factor = factor
# %%
class Node:
'''Creates an instance of an object that defines the structure of a
node that belongs to a grid-graph where is wanted to find the optimum path
through the :math:`\\mathrm{A}^\\ast` algorithm. ::
Node(pos=None, father=None, gCost=None, hCost=None, val=0)
Attributes:
pos (`tuple`): Indexes of the the current node in the grid-graph.
father (`tuple`): Indexes of the father node of the current node.
Default value is ``None``.
gCost (`int` or `float`): Length of the traveled path from the
start node to the current node. Default value is ``None``.
hCost (`int` or `float`): Heuristic length of the path from the
current node to the goal node. Default value is ``None``.
val (`int`): Value that store the node cell in the matrix that defines
the grid-graph. Default value is ``0``.
Note:
The class ``Node`` requires `numpy <http://www.numpy.org/>`_ and
`shapely <https://pypi.python.org/pypi/Shapely>`_
Examples:
>>> node = Node(pos=(6, 7), father=(5, 7), gCost=5, hCost=10, val=1)
>>> node.__dict__
{'father': (5, 7), 'gCost': 5, 'hCost': 10, 'pos': (6, 7), 'val': 1}
'''
def __init__(self, pos, father=None, gCost=None, hCost=None, val=0):
'''
Node(pos=None, father=None, gCost=None, hCost=None, val=0)
'''
self.pos = pos
self.father = father
self.gCost = gCost
self.hCost = hCost
self.val = val
def getHcost(self, goalNode, heuristic='manhattan', preferredPath=None):
'''Method to obtain the heuristic component, :math:`h(n)`, that
estimates the cost (or length) of the shortest path from the current
node :math:`n` to the goal node.
It must be selected either ``manhattan`` or ``euclidean`` as the model
to estimate the length of the optimum path.
- **manhattan** is the sum of the cathetus of the right triangle\
defined by the current node and the goal node.
- **euclidean** is the length of the hypotenuse of the right\
triangle defined by the current node and the goal node.
It is possible to append a polyline to force the path to follow a
preferential path.
Args:
goalNode (`Node` object): object with the structure of the goal
node.
heuristic (`str`): Name of the geometric model to determine the
heuristic distance. It must be selected either ``manhattan``
or ``euclidean``. The first one is the default value.
preferredPath (`Node` object): Optional argument of the class
``Node`` to force the path. ``None`` is the default value.
Returns:
(`int` or `float`): value of the estimated heuristic distance of\
the opmtimum path.
Examples:
>>> from pybimstab.astar import Node
>>> goalNode = Node(pos=(9, 9))
>>> node = Node(pos=(0, 0))
>>> node.getHcost(goalNode, heuristic='manhattan',
>>> preferredPath=None)
18
>>> from pybimstab.astar import Node
>>> goalNode = Node(pos=(9, 9))
>>> node = Node(pos=(0, 0))
>>> node.getHcost(goalNode, heuristic='euclidean',
>>> preferredPath=None)
12.727922061357855
'''
from shapely.geometry import LineString, Point
# Verifying if is wanted to append the extra modifier to the heuristic
plusCost = 0
if preferredPath is not None:
pline = LineString(preferredPath.coordsIdx.T)
point = Point(self.pos)
plusCost = point.distance(pline) * preferredPath.factor
# extract coordinates from structure
cNode = self.pos
gNode = goalNode.pos
if heuristic == 'manhattan':
cost = abs(cNode[0]-gNode[0]) + abs(cNode[1]-gNode[1])
elif heuristic == 'euclidean':
cost = ((cNode[0]-gNode[0])**2 + (cNode[1]-gNode[1])**2)**0.5
else:
print('Invalid heuristic, You must have selected manhattan or ' +
'euclidean. manhattan selected by default')
cost = abs(cNode[0]-gNode[0]) + abs(cNode[1]-gNode[1])
cost += plusCost
setattr(self, 'hCost', cost)
return cost
def getGcost(self, fatherNode):
'''Method to obtain the cumulated cost :math:`g(n)`, of the traveled
path from the start node to the current node :math:`n`.
Args:
fatherNode (``Node`` object): object with the structure\
of the current node's father.
Returns:
(`int` or `float`): traveled-path length from the the start node\
to the current node.
Examples:
>>> from pybimstab.astar import Node
>>> node = Node(pos=(9, 9))
>>> fatherNode = Node(pos=(9, 8), gCost=15)
>>> node.getGcost(fatherNode)
16
>>> from pybimstab.astar import Node
>>> node = Node(pos=(9, 9))
>>> fatherNode = Node(pos=(8, 8), gCost=15)
>>> node.getGcost(fatherNode)
16.4142
'''
from numpy import array
if any(array(self.pos) == array(fatherNode.pos)):
cost = fatherNode.gCost + 1 # horizontal or vertical movement.
else:
cost = fatherNode.gCost + 1.4142 # diagonal movement
setattr(self, 'gCost', cost)
return cost
# %%
class Astar:
'''Creates an instance of an object that defines the optimum path into a
grid-graph maze, from a start node to a goal node given. ::
Astar(grid, startNode, goalNode, heuristic='manhattan',
reverseLeft=True, reverseUp=True, preferredPath=None)
Attributes:
gridGraph (`MazeStructure` object): object with the structure\
of maze where is wanted to find the optimum path.
startNode (`tuple`, `list` or `numpy.ndarray`): indexes of \
the ``gridGraph.matrix`` where the initial node is located. It has\
to be a matrix cell, *ie*, ``gridGraph.matrix[startNode]==0``.
goalNode (`tuple`, `list` or `numpy.ndarray`): indexes of the\
``gridGraph.matrix`` where the ending node is located. It has to\
be a matrix cell, *ie*, ``gridGraph.matrix[goalNode]==0``.
heuristic (`str`): Name of the geometric model to determine the\
heuristic distance. It must be selected either ``manhattan``\
or ``euclidean``. The first one is the default value.
reverseLeft (`bool`): Logical variable to allow or not reverses\
movements to the left. Default value is ``True``.
reverseUp (`bool`): Logical variable to allow or not reverses\
movements to upward. Default value is ``True``.
*forcedPath (`PreferredPath` object): Optional aguments to force the\
optimum path close to a specific polyline.
Note:
The class ``Astar`` requires `NumPy <http://www.numpy.org/>`_\
and `Matplotlib <https://matplotlib.org/>`_.
Examples:
>>> from numpy import array
>>> from pybimstab.astar import PreferredPath, Astar
>>> grid = array([[ 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
>>> [ 0, 0, 0, 0, 1, 1, 0, 0, 0, 0],
>>> [ 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
>>> [ 0, 1, 1, 0, 0, 0, 0, 1, 0, 0],
>>> [ 0, 0, 0, 1, 0, 0, 0, 1, 1, 1],
>>> [ 0, 0, 1, 0, 1, 1, 0, 0, 0, 0],
>>> [ 0, 0, 1, 0, 1, 0, 1, 0, 0, 0],
>>> [ 1, 1, 0, 1, 0, 0, 0, 0, 1, 1],
>>> [ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
>>> [ 0, 0, 0, 0, 1, 0, 1, 0, 0, 1],
>>> [ 0, 1, 0, 0, 0, 1, 0, 0, 0, 0],
>>> [ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
>>> [ 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
>>> [ 1, 0, 0, 0, 1, 0, 0, 0, 0, 0]
>>> ])
>>> coordsIdx = array([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
>>> 13, 13, 13, 13, 13, 13, 13, 13, 13, 13],
>>> [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
>>> 2, 3, 4, 5, 6, 7, 8, 9]])
>>> preferredPath = PreferredPath(coordsIdx, factor=1)
>>> astar = Astar(grid, startNode=(0, 0), goalNode=(13, 9),
>>> heuristic='manhattan', reverseLeft=True,
>>> reverseUp=True, preferredPath=preferredPath)
>>> astar.__dict__.keys()
dict_keys(['grid', 'startNode', 'goalNode', 'heuristic', 'reverseLeft',
'reverseUp', 'preferredPath', 'mazeStr', 'optimumPath'])
'''
def __init__(self, grid, startNode, goalNode, heuristic='manhattan',
reverseLeft=True, reverseUp=True, preferredPath=None):
'''
Astar(grid, startNode, goalNode, heuristic='manhattan',
reverseLeft=True, reverseUp=True, preferredPath=None)
'''
self.grid = grid
self.startNode = startNode
self.goalNode = goalNode
self.heuristic = heuristic
self.reverseLeft = reverseLeft
self.reverseUp = reverseUp
self.preferredPath = preferredPath
# for defining the maze structure
self.defineMazeStr()
# Get the optimum path via A star algortihm
self.getPath()
def defineMazeStr(self):
'''Defines the clean structure of the grid-graph using objects
instaced from the class ``Node`` for each node (or cell of the grid).
Those cells with value equal to one or minus one will have a g-value
equal to infinite because over those cells, a path is impossible to be
traced.
Returns:
(`numpy.ndarray`): array with the same shape of the input grid\
where each cell is an object from the class ``Node``\
with the structure of its respective node.
Examples:
>>> # after executing the example of the class
>>> astar.mazeStr[0, 0].__dict__
{'father': (0, 0), 'gCost': 0, 'hCost': 22.0, 'pos': (0, 0),
'val': 0}
'''
import numpy as np
numberRows, numberCols = self.grid.shape
mazeStr = np.empty((numberRows, numberCols), dtype=object)
for m in range(numberRows):
for n in range(numberCols):
mazeStr[m, n] = Node(pos=(m, n), val=self.grid[m, n])
# blocks or outside cells have infinite value
if abs(self.grid[m, n]):
mazeStr[m, n].gCost = np.inf
setattr(self, 'mazeStr', mazeStr)
# setting the structure to the start node
mazeStr[self.startNode].father = self.startNode
mazeStr[self.startNode].getHcost(goalNode=mazeStr[self.goalNode],
heuristic=self.heuristic,
preferredPath=self.preferredPath)
mazeStr[self.startNode].gCost = 0
return mazeStr
def __checkerrors(self):
'''Function to check if there is any problem with either the start
or goal nodes. It includes either the existence of a block or an
unallowed cell in those nodes.'''
if self.mazeStr[self.startNode].val == -1:
raise ValueError('Start node: not in the allowed cells')
elif self.mazeStr[self.startNode].val == 1:
raise ValueError('Start node: is a block-cell')
elif self.mazeStr[self.goalNode].val == -1:
raise ValueError('Goal node: not in the allowed cells')
elif self.mazeStr[self.goalNode].val == 1:
raise ValueError('Goal node: is a block-cell')
return
def getNeighbours(self, node):
'''Method for obtaining the possible neighbours of an specific node
that belongs to the grid given.
Each neighbour is given as a tuple with the indexes of the grid.
Args:
node (``Node`` object): object with the structure of the node which
is wanted to know its possible neighbours.
Returns:
(`list`): Tuples with the indexes of possible neighbours of the\
node in question.
Examples:
>>> # after executing the example of the class
>>> from pybimstab.astar import Node
>>> astar.getNeighbours(Node(pos=(1, 1)))
[(0, 1), (0, 2), (1, 2), (2, 2), (2, 1), (2, 0), (1, 0), (0, 0)]
>>> astar.getNeighbours(Node(pos=(0, 0)))
[(0, 1), (1, 1), (1, 0)]
>>> astar.getNeighbours(Node(pos=(0, 1)))
[(0, 2), (1, 2), (1, 1), (1, 0), (0, 0)]
>>> astar.getNeighbours(Node(pos=(0, 2)))
[(1, 2), (1, 1), (0, 1)]
>>> astar.getNeighbours(Node(pos=(1, 2)))
[(0, 2), (2, 2), (2, 1), (1, 1), (0, 1)]
>>> astar.getNeighbours(Node(pos=(2, 2)))
[(1, 2), (2, 1), (1, 1)]
>>> astar.getNeighbours(Node(pos=(2, 1)))
[(1, 1), (1, 2), (2, 2), (2, 0), (1, 0)]
>>> astar.getNeighbours(Node(pos=(2, 0)))
[(1, 0), (1, 1), (2, 1)]
>>> astar.getNeighbours(Node(pos=(1, 0)))
[(0, 0), (0, 1), (1, 1), (2, 1), (2, 0)]
'''
i, j = node.pos
numberRows, numberCols = self.grid.shape
allNeighbours = [(i-1, j), (i-1, j+1), (i, j+1), (i+1, j+1), (i+1, j),
(i+1, j-1), (i, j-1), (i-1, j-1)]
neighbours = list()
for neighbour in allNeighbours:
if neighbour[0] < 0 or neighbour[1] < 0 or \
neighbour[0] == numberRows or neighbour[1] == numberCols:
continue
if not self.reverseLeft and neighbour[1] < j:
continue
if not self.reverseUp and neighbour[0] > i:
continue
neighbours.append(neighbour)
return neighbours
def getWayBack(self, node):
'''Method for obtaining the whole way back of an specific node which
has been opened by the *A star* algorithm.
Args:
node (``Node`` object): object with the structure of the node which
is wanted to know its way back.
Returns:
(`numpy.ndarray`): :math:`\\left(2 \\times n \\right)` array\
where :math:`n` is the number of nodes where the path has\
crossed; the first row of the array contains the abscisses and\
the second one contains the ordinates of the nodes into the\
grid-graph
Examples:
>>> import numpy as np
>>> from pybimstab.astar import PreferredPath, Node, Astar
>>> grid = np.zeros((3,3))
>>> grid[1:3, 0:2] = 1
>>> astar = Astar(grid, startNode=(0, 0), goalNode=(2, 2),
>>> heuristic='manhattan', reverseLeft=True,
>>> reverseUp=True, preferredPath=None)
>>> # returning the way back
>>> astar.getWayBack(astar.mazeStr[2, 2])
array([[2, 1, 0, 0],
[2, 1, 1, 0]])
>>> astar.getWayBack(astar.mazeStr[1, 2])
array([[1, 0, 0],
[2, 1, 0]])
>>> astar.getWayBack(astar.mazeStr[1, 1])
ValueError: Input node is a block. It doesn't have a wayback
'''
import numpy as np
if node.val == -1:
raise ValueError("Input node is outside the allowed cells")
elif node.val == 1:
raise ValueError(
"Input node is a block. It doesn't have a wayback")
pathX = list()
pathY = list()
if node.father is not None:
while node.father != node.pos:
pathX.append(node.pos[1])
pathY.append(node.pos[0])
node = self.mazeStr[node.father]
pathX.append(self.startNode[1])
pathY.append(self.startNode[0])
else:
pathX = []
pathY = []
wayBack = np.array([pathY, pathX])
return wayBack
def getPath(self):
'''Method for obtaining the optimum path between two points into a
grid-graph through the :math:`\\mathrm{A}^\\ast` algorithm
`<NAME> & Raphael (1968) <https://doi.org/10.1109/TSSC.1968.300136>`_.
Returns:
(`numpy.ndarray`): :math:`\\left(2 \\times n \\right)` array\
where :math:`n` is the number of nodes where the path has\
crossed; the first row of the array contains the abscisses and\
the second one contains the ordinates of the nodes into the\
grid-graph
Examples:
>>> from numpy import array
>>> from pybimstab.astar import PreferredPath, Astar
>>> grid = array([[ 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
>>> [ 0, 0, 0, 0, 1, 1, 0, 0, 0, 0],
>>> [ 0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
>>> [ 0, 1, 1, 0, 0, 0, 0, 1, 0, 0],
>>> [ 0, 0, 0, 1, 0, 0, 0, 1, 1, 1],
>>> [ 0, 0, 1, 0, 1, 1, 0, 0, 0, 0],
>>> [ 0, 0, 1, 0, 1, 0, 1, 0, 0, 0],
>>> [ 1, 1, 0, 1, 0, 0, 0, 0, 1, 1],
>>> [ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
>>> [ 0, 0, 0, 0, 1, 0, 1, 0, 0, 1],
>>> [ 0, 1, 0, 0, 0, 1, 0, 0, 0, 0],
>>> [ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
>>> [ 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
>>> [ 1, 0, 0, 0, 1, 0, 0, 0, 0, 0]
>>> ])
>>> coordsIdx = array(
>>> [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 13, 13, 13,
>>> 13, 13, 13, 13, 13, 13, 13],
>>> [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4,
>>> 5, 6, 7, 8, 9]])
>>> preferredPath = PreferredPath(coordsIdx, factor=1)
>>> # without a forced path
>>> astar = Astar(grid, startNode=(0, 0), goalNode=(13, 9),
>>> heuristic='manhattan', reverseLeft=True,
>>> reverseUp=True, preferredPath=None)
>>> astar.getPath()
array(
[[13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 4, 3, 2, 1, 0],
[ 9, 9, 9, 9, 8, 8, 7, 7, 6, 5, 4, 3, 2, 1, 0]])
>>> # with a forced path
>>> astar = Astar(grid, startNode=(0, 0), goalNode=(13, 9),
>>> heuristic='manhattan', reverseLeft=True,
>>> reverseUp=True, preferredPath=preferredPath)
>>> astar.getPath()
array([
[13, 13, 13, 13, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0],
[ 9, 8, 7, 6, 5, 4, 3, 2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0]])
>>> from numpy import array
>>> from pybimstab.astar import PreferredPath, Astar
>>> grid = array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 1, 0, 0, 1],
[0, 0, 1, 1, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 1, 0, 0, 1],
[0, 0, 0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 1],
[0, 0, 1, 0, 0, 1, 0, 0, 0, 0]])
>>> astar = Astar(grid, startNode=(0, 0), goalNode=(9, 9),
>>> heuristic='manhattan', reverseLeft=True,
>>> reverseUp=True, preferredPath=None)
>>> astar.getPath()
array([[9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[9, 8, 8, 8, 8, 8, 8, 8, 8, 7, 6, 5, 4, 3, 2, 1, 0]])
>>> astar = Astar(grid, startNode=(0, 0), goalNode=(9, 9),
>>> heuristic='euclidean', reverseLeft=True,
>>> reverseUp=True, preferredPath=None)
>>> astar.getPath()
array([[9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 0, 0, 0],
[9, 8, 7, 6, 5, 4, 5, 5, 4, 3, 2, 1, 0]])
'''
import numpy as np
# Check possible errors in the goal and start cells.
self.__checkerrors()
# Required lists
openList = list() # open nodes for potential paths
closeList = list() # open nodes that have been ruled out
openList.append(self.mazeStr[self.startNode])
continueWhile = True
# Main loop
while continueWhile:
# If there is not a solution, stop the program.
if len(openList) == 0:
raise ValueError('There is not a path between the start ' +
'and goal nodes')
# Get the cost of openList
costList = [node.getGcost(fatherNode=self.mazeStr[node.father]) +
node.getHcost(goalNode=self.mazeStr[self.goalNode],
heuristic=self.heuristic,
preferredPath=self.preferredPath)
for node in openList]
# Extract tail-node of current optimum path (currentNode)
minCostIdx = costList.index(min(costList)) # Index in the list
currentNode = openList[minCostIdx] # index of the current node
openList.pop(minCostIdx) # delete currentNode from openList
closeList.append(currentNode) # add currentNode to closeList
neighbours = self.getNeighbours(currentNode) # get the neighbours
# working on the neighbours of the current node
for neighbour in neighbours:
neighbourStr = self.mazeStr[neighbour]
if (neighbourStr.gCost is not None and
np.isinf(neighbourStr.gCost)) or \
neighbourStr in closeList:
continue
# verifying if some neighbour is the goal point
if neighbour == self.goalNode:
self.mazeStr[self.goalNode].father = currentNode.pos
self.mazeStr[neighbour].getGcost(
fatherNode=self.mazeStr[currentNode.pos])
self.mazeStr[neighbour].getHcost(
goalNode=self.mazeStr[self.goalNode],
heuristic=self.heuristic,
preferredPath=self.preferredPath)
continueWhile = False
break
# If any neighbour is the goal node, A Star continues.
else:
# If the neighbour is in openList: if true, compare the
# old gCost (from the previous father) and new gCost (from
# the current point which could be the new father) if new
# cost is bigger than the old one, leave the original
# father, else, change it to the new one.
if neighbourStr in openList:
oldGcost = neighbourStr.gCost
oldFather = neighbourStr.father
# changing the attributes: father is current node.
self.mazeStr[neighbour].father = currentNode.pos
newGcost = self.mazeStr[neighbour].getGcost(
fatherNode=self.mazeStr[currentNode.pos])
if newGcost > oldGcost:
# Coming back to the previous attributes.
self.mazeStr[neighbour].father = oldFather
self.mazeStr[neighbour].getGcost(
fatherNode=self.mazeStr[oldFather])
# If the neighbour is not in the closeList, put it in the
# openList.
elif neighbourStr not in closeList:
self.mazeStr[neighbour].father = currentNode.pos
self.mazeStr[neighbour].getGcost(
fatherNode=self.mazeStr[currentNode.pos])
self.mazeStr[neighbour].getHcost(
goalNode=self.mazeStr[self.goalNode],
heuristic=self.heuristic,
preferredPath=self.preferredPath)
openList.append(self.mazeStr[neighbour])
optimumPath = self.getWayBack(self.mazeStr[self.goalNode])
setattr(self, 'optimumPath', optimumPath)
return optimumPath
def plot(self, plotPreferredPath=False):
'''Method for generating a graphic of the optimum path returned from
the ``astar`` method.
Args:
plotForcedPath (`bool`): logical variable to check if the forced
path exists and is wanted to plot.
Returns:
(`matplotlib.figure.Figure`): object with the matplotlib structure\
of the plot. You might use it to save the figure for example.
Examples:
>>> from numpy import array
>>> from pybimstab.astar import PreferredPath, Astar
>>> grid = array([[0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
>>> [0, 0, 0, 0, 1, 1, 0, 0, 0, 0],
>>> [0, 0, 0, 1, 1, 0, 1, 0, 1, 0],
>>> [0, 1, 1, 0, 0, 0, 0, 1, 0, 0],
>>> [0, 0, 0, 1, 0, 0, 0, 1, 1, 1],
>>> [0, 0, 1, 0, 1, 1, 0, 0, 0, 0],
>>> [0, 0, 1, 0, 1, 0, 1, 0, 0, 0],
>>> [1, 1, 0, 1, 0, 0, 0, 0, 1, 1],
>>> [1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
>>> [0, 0, 0, 0, 1, 0, 1, 0, 0, 1],
>>> [0, 1, 0, 0, 0, 1, 0, 0, 0, 0],
>>> [1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
>>> [0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
>>> [1, 0, 0, 0, 1, 0, 0, 0, 0, 0]
>>> ])
>>> coordsIdx = array(
>>> [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 13, 13, 13,
>>> 13, 13, 13, 13, 13, 13, 13],
>>> [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4,
>>> 5, 6, 7, 8, 9]])
>>> preferredPath = PreferredPath(coordsIdx, factor=1)
>>> for typePP in [None, preferredPath]:
>>> astar = Astar(grid, startNode=(0, 0), goalNode=(13, 9),
>>> heuristic='manhattan', reverseLeft=True,
>>> reverseUp=True, preferredPath=typePP)
>>> fig = astar.plot(plotPreferredPath=True)
.. figure:: https://rawgit.com/eamontoyaa/pybimstab/master/examples/figures/astar_example1a.svg
:alt: astar_example1b
.. figure:: https://rawgit.com/eamontoyaa/pybimstab/master/examples/figures/astar_example1b.svg
:alt: astar_example1a
.. only:: html
:download:`example script<../examples/figuresScripts/astar_example1.py>`.
>>> from numpy import array
>>> from pybimstab.astar import Astar
>>> grid = array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 1, 0, 0, 1],
[0, 0, 1, 1, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 1, 0, 0, 1],
[0, 0, 0, 0, 1, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 1],
[0, 0, 1, 0, 0, 1, 0, 0, 0, 0]])
>>> for heuristic in ['manhattan', 'euclidean']:
>>> astar = Astar(grid, startNode=(0, 0), goalNode=(9, 9),
>>> heuristic=heuristic, reverseLeft=True,
>>> reverseUp=True, preferredPath=None)
>>> fig = astar.plot()
.. figure:: https://rawgit.com/eamontoyaa/pybimstab/master/examples/figures/astar_example2a.svg
:alt: astar_example1b
.. figure:: https://rawgit.com/eamontoyaa/pybimstab/master/examples/figures/astar_example2b.svg
:alt: astar_example2a
.. only:: html
:download:`example script<../examples/figuresScripts/astar_example2.py>`.
>>> from numpy import array
>>> from pybimstab.bim import BlocksInMatrix
>>> from pybimstab.astar import Astar
>>> seed = 111 # for repeatibilty
>>> boundary = array([[-5, 0, 5, 0, -5], [0, 10, 0, -10, 0]])
>>> bim = BlocksInMatrix(slopeCoords=boundary, blockProp=0.2,
>>> tileSize=0.4, seed=seed)
>>> astar = Astar(bim.grid, startNode=(0, 12), goalNode=(49, 12),
>>> heuristic='manhattan', reverseLeft=True,
>>> reverseUp=True, preferredPath=None)
>>> fig = astar.plot()
.. figure:: https://rawgit.com/eamontoyaa/pybimstab/master/examples/figures/astar_example3.svg
:alt: astar_example3
.. only:: html
:download:`example script<../examples/figuresScripts/astar_example3.py>`.
'''
from matplotlib import pyplot as plt
from matplotlib.colors import LinearSegmentedColormap as newcmap
import numpy as np
# Variables to control the color map and its legend
m, n = self.grid.shape # dimension of the grid
yCells, xCells = np.mgrid[slice(-0.5, m-0.5+1, 1), # grid coordinates
slice(-0.5, n-0.5+1, 1)]
if np.any(self.grid == -1): # colormap
cmap = newcmap.from_list(
'BIMcmap', ['white', 'lightgray', 'black'], 3)
ticks = [-1+0.333, 0, 1-0.333]
ticksLabels = ['Not allowed cells', 'Allowed cells',
'Hindered cells']
else:
cmap = newcmap.from_list('BIMcmap', ['lightgray', 'black'], 2)
ticks = [0.25, 0.75]
ticksLabels = ['Allowed cells', 'Hindered cells']
if m > 50 or n > 50:
edgecolor = 'None'
else:
edgecolor = 'k'
# Plot body
fig = plt.figure()
ax = fig.add_subplot(111)
bar = ax.pcolor(xCells, yCells, self.grid, cmap=cmap,
edgecolor=edgecolor)
ax.plot(self.startNode[1], self.startNode[0], '*r', label='Start node')
ax.plot(self.goalNode[1], self.goalNode[0], '.r', label='Goal node')
if plotPreferredPath and self.preferredPath is not None:
ax.plot(self.preferredPath.coordsIdx[1],
self.preferredPath.coordsIdx[0],
':r', lw=1.5, label='Preferred path')
ax.plot(self.optimumPath[1], self.optimumPath[0], '-r', lw=1.5,
label='Optimum path')
# Configuring the colorbar
bar = plt.colorbar(bar, ax=ax, ticks=ticks, pad=0.01,
shrink=0.15, aspect=3)
bar.ax.set_yticklabels(ticksLabels, fontsize='small')
# Plot settings
ax.set_aspect(1)
ax.legend(fontsize='small', bbox_to_anchor=(1.005, 1), loc=2)
plt.gca().invert_yaxis() # invert y-axis acording to de grid notation
fig.tight_layout()
return fig
# %%
'''
BSD 2 license.
Copyright (c) 2018, Universidad Nacional de Colombia, <NAME>
Araque and <NAME>.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
| [
"matplotlib.pyplot.gca",
"matplotlib.pyplot.colorbar",
"numpy.any",
"shapely.geometry.Point",
"numpy.array",
"matplotlib.pyplot.figure",
"shapely.geometry.LineString",
"numpy.empty",
"numpy.isinf",
"matplotlib.colors.LinearSegmentedColormap.from_list"
] | [((3406, 3422), 'numpy.array', 'array', (['coordsIdx'], {}), '(coordsIdx)\n', (3411, 3422), False, 'from numpy import array\n'), ((14228, 14276), 'numpy.empty', 'np.empty', (['(numberRows, numberCols)'], {'dtype': 'object'}), '((numberRows, numberCols), dtype=object)\n', (14236, 14276), True, 'import numpy as np\n'), ((20102, 20126), 'numpy.array', 'np.array', (['[pathY, pathX]'], {}), '([pathY, pathX])\n', (20110, 20126), True, 'import numpy as np\n'), ((34630, 34653), 'numpy.any', 'np.any', (['(self.grid == -1)'], {}), '(self.grid == -1)\n', (34636, 34653), True, 'import numpy as np\n'), ((35245, 35257), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (35255, 35257), True, 'from matplotlib import pyplot as plt\n'), ((35944, 36014), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['bar'], {'ax': 'ax', 'ticks': 'ticks', 'pad': '(0.01)', 'shrink': '(0.15)', 'aspect': '(3)'}), '(bar, ax=ax, ticks=ticks, pad=0.01, shrink=0.15, aspect=3)\n', (35956, 36014), True, 'from matplotlib import pyplot as plt\n'), ((7196, 7233), 'shapely.geometry.LineString', 'LineString', (['preferredPath.coordsIdx.T'], {}), '(preferredPath.coordsIdx.T)\n', (7206, 7233), False, 'from shapely.geometry import LineString, Point\n'), ((7254, 7269), 'shapely.geometry.Point', 'Point', (['self.pos'], {}), '(self.pos)\n', (7259, 7269), False, 'from shapely.geometry import LineString, Point\n'), ((34686, 34750), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'newcmap.from_list', (['"""BIMcmap"""', "['white', 'lightgray', 'black']", '(3)'], {}), "('BIMcmap', ['white', 'lightgray', 'black'], 3)\n", (34703, 34750), True, 'from matplotlib.colors import LinearSegmentedColormap as newcmap\n'), ((34958, 35013), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'newcmap.from_list', (['"""BIMcmap"""', "['lightgray', 'black']", '(2)'], {}), "('BIMcmap', ['lightgray', 'black'], 2)\n", (34975, 35013), True, 'from matplotlib.colors import LinearSegmentedColormap as newcmap\n'), ((8883, 8898), 'numpy.array', 'array', (['self.pos'], {}), '(self.pos)\n', (8888, 8898), False, 'from numpy import array\n'), ((8902, 8923), 'numpy.array', 'array', (['fatherNode.pos'], {}), '(fatherNode.pos)\n', (8907, 8923), False, 'from numpy import array\n'), ((36231, 36240), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (36238, 36240), True, 'from matplotlib import pyplot as plt\n'), ((26350, 26378), 'numpy.isinf', 'np.isinf', (['neighbourStr.gCost'], {}), '(neighbourStr.gCost)\n', (26358, 26378), True, 'import numpy as np\n')] |
import json
import numpy as np
import pandas as pd
from pyplan_engine.classes.evaluators.BaseEvaluator import BaseEvaluator
from pyplan_engine.common.classes.filterChoices import filterChoices
from pyplan_engine.common.classes.indexValuesReq import IndexValuesReq
from cubepy.cube import kindToString
class PandasEvaluator(BaseEvaluator):
PAGESIZE = 100
def evaluateNode(self, result, nodeDic, nodeId, dims=None, rows=None, columns=None, summaryBy="sum", bottomTotal=False, rightTotal=False, fromRow=0, toRow=0):
sby = np.nansum
if summaryBy == 'avg':
sby = np.nanmean
elif summaryBy == 'max':
sby = np.nanmax
elif summaryBy == 'min':
sby = np.nanmin
if (fromRow is None) or int(fromRow) <= 0:
fromRow = 1
if (toRow is None) or int(toRow) < 1:
toRow = 100
fromRow = int(fromRow)
toRow = int(toRow)
_filters = {}
_rows = []
_columns = []
theResult = self.prepareDataframeForTable(result)
if not rows is None:
for row in rows:
if self.hasDim(theResult, str(row["field"]).split(".")[0]):
_rows.append(str(row["field"]).split(".")[0])
self.addToFilter(row, _filters)
if not columns is None:
for column in columns:
if self.hasDim(theResult, str(column["field"]).split(".")[0]):
_columns.append(str(column["field"]).split(".")[0])
self.addToFilter(column, _filters)
if not dims is None:
for dim in dims:
if self.hasDim(theResult, str(dim["field"]).split(".")[0]):
self.addToFilter(dim, _filters)
res = None
pageInfo = None
dfResult = None
if len(_rows) == 0 and len(_columns) == 0:
dfResult = self.applyFilter(theResult, _filters)
# if have indexes sum all
if not dfResult.index is None and not dfResult.index.names is None and len(dfResult.index.names) > 0 and not dfResult.index.names[0] is None:
serieResult = dfResult.agg(sby)
dfResult = pd.DataFrame({"total": serieResult}).T
else:
needT = False
if len(_rows) == 0:
needT = True
aux = _rows
_rows = _columns
_columns = aux
_filteredDataFrame = self.applyFilter(theResult, _filters)
# Don't use margins = True to obtain totals. This have a bug for dataframes with more than 5 level indexes
dfResult = pd.DataFrame.pivot_table(
_filteredDataFrame, index=_rows, columns=_columns, aggfunc=sby, margins=False, margins_name="Total")
if needT:
dfResult = dfResult.T
aux = _rows
_rows = _columns
_columns = aux
if bottomTotal and dfResult.shape[0] > 1:
row_total = sby(dfResult.values, axis=0)
new_values = np.concatenate(
[dfResult.values, [row_total]], axis=0)
new_index = pd.Index(np.concatenate(
[dfResult.index.values, ["Total"]]))
_df = pd.DataFrame(
data=new_values, columns=dfResult.columns, index=new_index)
dfResult = _df
if rightTotal and dfResult.shape[1] > 1:
row_total = sby(dfResult.values, axis=1)
new_values = np.concatenate(
[dfResult.values, row_total.reshape(row_total.size, 1)], axis=1)
new_columns = np.concatenate([dfResult.columns, ["Total"]])
_df = pd.DataFrame(
data=new_values, columns=new_columns, index=dfResult.index)
dfResult = _df
if (dfResult.shape[0] > self.PAGESIZE):
if int(toRow) > dfResult.shape[0]:
toRow = dfResult.shape[0]
pageInfo = {
"fromRow": int(fromRow),
"toRow": int(toRow),
"totalRows": dfResult.shape[0]
}
#res = dfResult[fromRow-1:toRow].to_json(orient='split')
_range = list(range(fromRow-1, toRow))
if bottomTotal:
_range = _range + [len(dfResult)-1]
res = dfResult.iloc[_range].to_json(
orient='split', date_format='iso')
else:
res = dfResult[:300].to_json(orient='split', date_format='iso')
return self.createResult(res, type(theResult), resultIsJson=True, pageInfo=pageInfo, node=nodeDic[nodeId], onRow=(_rows[0] if len(_rows) > 0 else None), onColumn=(_columns[0] if len(_columns) > 0 else None))
def addToFilter(self, dim, filters):
if "values" in dim and dim["values"] is not None and len(dim["values"]) > 0:
for itemValue in dim["values"]:
field = str(dim["field"]).split(".")[0]
# if (field in filters):
# filters[field] += " or " + field + "==" + "'" + itemValue["value"] + "'"
# else:
# filters[field] = "( " + field + "==" + "'" + itemValue["value"] + "'"
if (field in filters):
filters[field].append(itemValue["value"])
else:
filters[field] = [itemValue["value"]]
def applyFilter(self, result, filters):
if not result is None:
if len(filters) > 0:
res = result
for key in filters:
res = res[res.index.get_level_values(
key).isin(filters[key])]
return res
else:
return result
def hasDim(self, result, dim):
if dim in result.index.names:
return True
elif dim in result.dtypes.index:
return True
elif dim in result.columns:
return True
else:
return False
def isIndexed(self, result):
if not result is None:
result = self.prepareDataframeForTable(result)
obj = result
if isinstance(obj, pd.DataFrame):
return self._isIndexedDataframe(obj)
return False
def getIndexes(self, node, result=None):
res = []
if not node._result is None:
obj = self.prepareDataframeForTable(node._result)
if isinstance(obj, pd.DataFrame):
if self.isIndexed(obj):
res = list(obj.index.names)
res = [x + "." + node.identifier for x in res]
return res
def getIndexesWithLevels(self, node, result=None):
res = []
if result is None:
result = node._result
if not result is None:
result = self.prepareDataframeForTable(result)
if self.isIndexed(result):
for indexItem in result.index.names:
itemDim = indexItem.split(",")[0]
item = {"field": itemDim+"."+node.identifier,
"name": itemDim, "description": "", "levels": []}
if node.model.existNode(itemDim):
levelNode = node.model.getNode(itemDim)
if levelNode.title:
item["name"] = levelNode.title
item["description"] = levelNode.description
if levelNode.numberFormat:
item["numberFormat"] = levelNode.numberFormat
else:
# try generate default formatter
if "datetime" in result.index.get_level_values(itemDim).dtype.name:
item["numberFormat"] = "2,DD,0,,0,0,4,0,$,5,FULL,0"
res.append(item)
return res
def getIndexValues(self, nodeDic, data: IndexValuesReq, result=None):
res = []
if data.node_id:
if (not data.node_id is None) & (data.node_id in nodeDic):
node = nodeDic[data.node_id]
if result is None:
result = node.result
if (f"{data.index_id}.{data.node_id}") in self.getIndexes(node):
if self.isIndexed(result):
prepared_result = self.prepareDataframeForTable(
node.result)
for index in prepared_result.index.levels:
if index.name == data.index_id:
res = self.checkDateFormat(
index.values).tolist()
break
else:
res = result[data.index_id].unique().tolist()
elif data.index_id:
if result is None:
result = nodeDic[data.index_id].result
res = list(result)
if data.text1:
text1 = data.text1.lower()
if data.filter == filterChoices.CONTAINS.value:
res = list(
filter(lambda item: text1 in str(item).lower(), res))
elif data.filter == filterChoices.NOT_CONTAINS.value:
res = list(
filter(lambda item: not text1 in str(item).lower(), res))
return res
def getIndexType(self, nodeDic, nodeId, indexId):
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
res = "S"
if nodeId:
if (not nodeId is None) & (nodeId in nodeDic):
node = nodeDic[nodeId]
nodeIndexes = self.getIndexes(node)
if (indexId+"."+nodeId) in nodeIndexes:
if self.isIndexed(node.result):
prepared_result = self.prepareDataframeForTable(
node.result)
for index in prepared_result.index.levels:
if index.name == indexId:
res = "S"
break
else:
#res = list(node.result[indexId].unique())[:1000]
res = "S"
elif indexId in nodeIndexes and isinstance(node.result, cubepy.Cube):
if str(node.result.axis(indexId).values.dtype) in numerics:
res = "N"
else:
res = "S"
return res
def getCubeMetadata(self, result, nodeDic, nodeId):
res = None
_result = self.prepareDataframeForPivot(result)
if isinstance(_result, pd.DataFrame):
res = {
"dims": [],
"measures": [],
"aggregator": "sum",
"isEditable": False,
"nodeProperties": {
"title": nodeDic[nodeId].title if not nodeDic[nodeId].title is None else nodeDic[nodeId].identifier,
"numberFormat": nodeDic[nodeId].numberFormat
}
}
for dim in self.getCubeIndexes(_result, nodeDic, nodeId):
field = dim.split(".")[0]
itemDim = {
"field": dim,
"name": field
}
if field in nodeDic:
if nodeDic[field].numberFormat:
itemDim["numberFormat"] = nodeDic[field].numberFormat
else:
if "datetime" in _result.index.get_level_values(field).dtype.name:
itemDim["numberFormat"] = "2,DD,0,,0,0,4,0,$,5,FULL,0"
res["dims"].append(itemDim)
res["dims"].append({
"field": "data_index",
"name": "Data Index"
})
numerics = ['int16', 'int32', 'int64',
'float16', 'float32', 'float64']
for col in _result.columns:
res["measures"].append({
"field": str(col),
"name": str(col)
})
_result = None
return res
def getCubeIndexes(self, result, nodeDic, nodeId):
res = list(result.index.names)
res = [x + "." + nodeDic[nodeId].identifier for x in res]
return res
def getCubeValues(self, result, nodeDic, nodeId, query):
_result = self.prepareDataframeForPivot(result)
if isinstance(_result, pd.DataFrame):
cube_indexes = self.getCubeIndexes(_result, nodeDic, nodeId)
_filters = {}
if not query["filters"] is None:
for dim in query["filters"]:
if "values" in dim and dim["values"] is not None and len(dim["values"]) > 0:
for itemValue in dim["values"]:
field = str(dim["field"]).split(".")[0]
if (field in _filters):
_filters[field].append(itemValue)
else:
_filters[field] = [itemValue]
_filteredResult = self.applyFilter(_result, _filters)
for col in query["columns"]:
if col in cube_indexes:
item = {
"field": col,
"count": 0,
"values": _filteredResult.index.get_level_values(col.split(".")[0]).unique().tolist()
}
# "values": _filteredResult[col.split(".")[0]].unique().tolist()
item["count"] = len(item["values"])
_cols = [x.split(".")[0] for x in query["columns"]]
if len(_cols) == 0:
listResult = _filteredResult[query["measures"]].sum(
).reset_index().values.tolist()
if len(listResult) > 0 and len(listResult[0]) > 1:
if np.isinf(listResult[0][1]):
listResult[0][1] = None
return [["data_index", "data_value"]] + listResult
else:
"""
# cambiado por lo de abajo para tome columnas con string
dfValues = pd.DataFrame.pivot_table(_filteredResult, index=_cols, aggfunc=np.sum)
firstCol = query["columns"] + ["data_index","data_value"]
res = [firstCol] + dfValues.reset_index().melt(id_vars=_cols, value_vars=query["measures"]).values.tolist()
return res
"""
"""
@cambiado por lo de abajo para permitir multiples agrupaciones por medida...muy picante
t1= _filteredResult.stack()
t1.index.set_names("data_index",level=t1.index.nlevels-1,inplace=True)
t2 = t1.iloc[t1.index.get_level_values("data_index").isin(query["measures"]) ].reset_index()[_cols + ["data_index",0]]
firstCol = query["columns"] + ["data_index","data_value"]
t3 = t2.groupby( _cols + ["data_index"]).aggregate({0:"sum"}).reset_index()
res = [firstCol] + t3.values[:10000].tolist()
t1=None
t2=None
t3=None
_result = None
return res
@cambiado por lo de abajo para permitir custom measures ... extra picante
"""
_measures = list(query["measures"])
useCustomFillMeasures = False
try:
# test if have groupMeasures property
_aa = _result.groupMeasures
useCustomFillMeasures = True
except AttributeError as ex:
pass
_groupedDF = None
if useCustomFillMeasures:
_groupedDF = _filteredResult.groupby(
_cols, sort=False).sum()
else:
_agg = dict()
for measure in _measures:
# TODO: POR AHORA sum, Mas adelante tomar de la query el tipo de agrupamiento
_agg[measure] = "sum"
_groupedDF = _filteredResult.groupby(
_cols, sort=False).agg(_agg)
if useCustomFillMeasures:
for key in _result.groupMeasures:
_groupedDF[key] = _result.groupMeasures[key](
_groupedDF)
finalDF = _groupedDF.reset_index().melt(id_vars=_cols,
value_vars=query["measures"], var_name="data_index", value_name="data_value")
# fill inf values only if is numeric
_kind = finalDF["data_value"].dtype.kind
if _kind in {'i', 'u', 'f', 'c'}:
if np.isinf(finalDF["data_value"]).any():
finalDF["data_value"][np.isinf(
finalDF["data_value"])] = 0
# fill nan values
finalDF["data_value"].fillna(0, inplace=True)
firstCol = query["columns"] + ["data_index", "data_value"]
sortedColumns = [
x.split(".")[0] for x in query["columns"]] + ["data_index", "data_value"]
res = [firstCol] + \
finalDF[sortedColumns].values[:1000000].tolist()
_result = None
return res
def getCubeDimensionValues(self, result, nodeDic, nodeId, query):
_result = self.prepareDataframeForPivot(result)
if isinstance(_result, pd.DataFrame):
if len(query["columns"]) > 0:
dimension = query["columns"][-1]
if dimension in nodeDic[nodeId].indexes:
#uniquelist = _result[dimension.split(".")[0]].unique()
uniquelist = _result.index.get_level_values(
dimension.split(".")[0]).unique()
# uniquelist.sort()
return uniquelist.sort_values().tolist()[:1000]
return []
def previewNode(self, nodeDic, nodeId):
from pyplan_engine.classes.Helpers import Helpers
from sys import getsizeof
res = {
"resultType": str(type(nodeDic[nodeId].result)),
"dims": [],
"columns": [],
"console": nodeDic[nodeId].lastEvaluationConsole,
"preview": ""
}
if isinstance(nodeDic[nodeId].result, pd.DataFrame):
cube = nodeDic[nodeId].result
if self.isIndexed(cube):
res["dims"] = list(cube.index.names)
for idx, col in enumerate(cube.columns.values[:500]):
res["columns"].append(
str(col) + " (" + kindToString(cube.dtypes[idx].kind) + ")")
res["preview"] += "Rows: " + str(len(cube.index))
#res += "\nColumns: " + ', '.join([''.join(row) for row in cube.columns.values[:500]])
res["preview"] += "\nShape: " + str(cube.shape)
res["preview"] += "\nMemory: " + \
str(round(cube.memory_usage(deep=True).sum() / 1024/1024, 2)) + " Mb"
#res["preview"] += "\nValues: \n" + str(cube.head(20))
res["preview"] += "\nValues: \n" + cube.head(20).to_string()
elif isinstance(nodeDic[nodeId].result, pd.Series):
serie = nodeDic[nodeId].result
if self.isIndexed(serie):
res["dims"] = list(serie.index.names)
res["preview"] += "Rows: " + str(len(serie.index))
res["preview"] += "\nMemory: " + \
str(round(serie.memory_usage(deep=True) / 1024/1024, 2)) + " Mb"
#res["preview"] += "\nValues: \n" + str(serie.head(20))
res["preview"] += "\nValues: \n" + serie.head(20).to_string()
elif isinstance(nodeDic[nodeId].result, pd.Index):
res["preview"] = str(nodeDic[nodeId].result)[:1000]
return json.dumps(res)
def ensureDataFrame(self, result):
res = result
if isinstance(res, pd.Series):
res = pd.DataFrame({"values": res})
return res
def exportFlatNode(self, nodeDic, nodeId, numberFormat, columnFormat, fileName):
if columnFormat == "tab":
columnFormat = "\t"
decimalSep = "."
if numberFormat == 'TSPDSC':
decimalSep = ","
_result = self.ensureDataFrame(nodeDic[nodeId].result)
if isinstance(_result, pd.DataFrame):
_result.to_csv(fileName, sep=columnFormat, encoding="iso-8859-1")
return True
return False
def postCalculate(self, node, result):
"""Method executed after calculate node
"""
if node.nodeClass == "index":
if isinstance(result, pd.Index) and result.name is None:
result.name = node.identifier
def copyAsValues(self, result, nodeDic, nodeId):
""" Copy node as values """
newDef = ""
if isinstance(result, pd.Index):
np.set_printoptions(threshold=np.prod(result.values.shape))
values = np.array2string(result.values, separator=",", precision=20, formatter={
'float_kind': lambda x: repr(x)}).replace('\n', '')
newDef = f"result = pd.Index({values})"
else:
return False
nodeDic[nodeId].definition = newDef
return True
def _isIndexedDataframe(self, dataframe):
"""Return True if dataframe is an indexed dataframe"""
return len(dataframe.index.names) > 1 or not dataframe.index.names[0] is None
def prepareDataframeForTable(self, result):
""" Prepare dataframe for use un tables and charts """
df = result
if isinstance(df, pd.Series):
df = pd.DataFrame({"values": df})
if self._isIndexedDataframe(df):
if df.size == 0:
df["values"] = np.nan
elif len(df.columns) > 1:
if isinstance(df.columns, pd.MultiIndex):
df.columns = df.columns.map(' | '.join)
df = df.stack()
if isinstance(df, pd.Series):
df = pd.DataFrame({"values": df})
current_columns_name = list(df.index.names)
current_columns_name[len(current_columns_name)-1] = "Measures"
df.index.names = current_columns_name
return df
def prepareDataframeForPivot(self, result):
""" Prepare dataframe for use in pivot cube"""
df = result
if isinstance(df, pd.Series):
df = pd.DataFrame({"values": df})
if self._isIndexedDataframe(df):
if isinstance(df.columns, pd.MultiIndex):
df.columns = df.columns.map(' | '.join)
df = df.select_dtypes(include=['float64', 'int64'])
if df.size == 0:
df["values"] = np.nan
# try to keep group measures
try:
df.groupMeasures = result.groupMeasures
except:
pass
return df
| [
"numpy.prod",
"cubepy.cube.kindToString",
"json.dumps",
"numpy.concatenate",
"pandas.DataFrame",
"numpy.isinf",
"pandas.DataFrame.pivot_table"
] | [((20206, 20221), 'json.dumps', 'json.dumps', (['res'], {}), '(res)\n', (20216, 20221), False, 'import json\n'), ((2670, 2799), 'pandas.DataFrame.pivot_table', 'pd.DataFrame.pivot_table', (['_filteredDataFrame'], {'index': '_rows', 'columns': '_columns', 'aggfunc': 'sby', 'margins': '(False)', 'margins_name': '"""Total"""'}), "(_filteredDataFrame, index=_rows, columns=_columns,\n aggfunc=sby, margins=False, margins_name='Total')\n", (2694, 2799), True, 'import pandas as pd\n'), ((20340, 20369), 'pandas.DataFrame', 'pd.DataFrame', (["{'values': res}"], {}), "({'values': res})\n", (20352, 20369), True, 'import pandas as pd\n'), ((22071, 22099), 'pandas.DataFrame', 'pd.DataFrame', (["{'values': df}"], {}), "({'values': df})\n", (22083, 22099), True, 'import pandas as pd\n'), ((22888, 22916), 'pandas.DataFrame', 'pd.DataFrame', (["{'values': df}"], {}), "({'values': df})\n", (22900, 22916), True, 'import pandas as pd\n'), ((3107, 3161), 'numpy.concatenate', 'np.concatenate', (['[dfResult.values, [row_total]]'], {'axis': '(0)'}), '([dfResult.values, [row_total]], axis=0)\n', (3121, 3161), True, 'import numpy as np\n'), ((3315, 3387), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'new_values', 'columns': 'dfResult.columns', 'index': 'new_index'}), '(data=new_values, columns=dfResult.columns, index=new_index)\n', (3327, 3387), True, 'import pandas as pd\n'), ((3711, 3756), 'numpy.concatenate', 'np.concatenate', (["[dfResult.columns, ['Total']]"], {}), "([dfResult.columns, ['Total']])\n", (3725, 3756), True, 'import numpy as np\n'), ((3779, 3851), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'new_values', 'columns': 'new_columns', 'index': 'dfResult.index'}), '(data=new_values, columns=new_columns, index=dfResult.index)\n', (3791, 3851), True, 'import pandas as pd\n'), ((2222, 2258), 'pandas.DataFrame', 'pd.DataFrame', (["{'total': serieResult}"], {}), "({'total': serieResult})\n", (2234, 2258), True, 'import pandas as pd\n'), ((3220, 3270), 'numpy.concatenate', 'np.concatenate', (["[dfResult.index.values, ['Total']]"], {}), "([dfResult.index.values, ['Total']])\n", (3234, 3270), True, 'import numpy as np\n'), ((14111, 14137), 'numpy.isinf', 'np.isinf', (['listResult[0][1]'], {}), '(listResult[0][1])\n', (14119, 14137), True, 'import numpy as np\n'), ((21320, 21348), 'numpy.prod', 'np.prod', (['result.values.shape'], {}), '(result.values.shape)\n', (21327, 21348), True, 'import numpy as np\n'), ((22468, 22496), 'pandas.DataFrame', 'pd.DataFrame', (["{'values': df}"], {}), "({'values': df})\n", (22480, 22496), True, 'import pandas as pd\n'), ((17059, 17090), 'numpy.isinf', 'np.isinf', (["finalDF['data_value']"], {}), "(finalDF['data_value'])\n", (17067, 17090), True, 'import numpy as np\n'), ((17144, 17175), 'numpy.isinf', 'np.isinf', (["finalDF['data_value']"], {}), "(finalDF['data_value'])\n", (17152, 17175), True, 'import numpy as np\n'), ((19001, 19036), 'cubepy.cube.kindToString', 'kindToString', (['cube.dtypes[idx].kind'], {}), '(cube.dtypes[idx].kind)\n', (19013, 19036), False, 'from cubepy.cube import kindToString\n')] |
from Hw.H5_CNN_Explaination.data import ImgDataset
from Hw.H5_CNN_Explaination.utils import *
from Hw.H5_CNN_Explaination.model import Classifier
from Hw.H3_CNN.model import CNN5
from Hw.H3_CNN.utils import get_best_checkpoint_path, test, evaluate
import copy
import matplotlib.pyplot as plt
import numpy as np
from lime import lime_image
import os
import torch
from torch import nn
import torch.optim as optim
import argparse
import ast
parser = argparse.ArgumentParser(usage="it's usage tip.", description="--h help info.")
parser.add_argument("--mode", choices=['saliency', 'explaination', 'lime'], required=True, type=str,
help="saliency - saliency map; explaination - filter explaination; lime - Local Interpretable ModelAgnostic Explanations",
dest="mode")
parser.add_argument("--visible_device", default=0, type=int, help="visible device",
dest="visible_device")
parser.add_argument("--data_dir", required=True, type=str, help="the dataset root dir", dest="data_dir")
parser.add_argument("--checkpoint_dir", default="./checkpoints", type=str, help="the output checkpoints dir",
dest="checkpoint_dir")
parser.add_argument("--checkpoint_path", default="", type=str, help="the output checkpoints path",
dest="checkpoint_path")
args = parser.parse_args()
print("arguments:")
for arg in vars(args):
print(arg, ":", getattr(args, arg))
print("-" * 100)
mode = args.mode
data_dir = args.data_dir
checkpoint_dir = args.checkpoint_dir
checkpoint_path = args.checkpoint_path
if not os.path.isdir(checkpoint_dir):
os.mkdir(checkpoint_dir)
# 通過 torch.cuda.is_available() 的回傳值進行判斷是否有使用 GPU 的環境,如果有的話 device 就設為 "cuda",沒有的話就設為 "cpu"
if torch.cuda.is_available():
visible_device = args.visible_device
torch.cuda.set_device(visible_device)
device = torch.device("cuda")
else:
device = torch.device("cpu")
print("device: {}".format(device))
########################################################################################################################
model = Classifier().to(device)
loss = nn.CrossEntropyLoss() # 因為是 classification task,所以 loss 使用 CrossEntropyLoss
checkpoint_path = checkpoint_path if len(checkpoint_path) != 0 else get_best_checkpoint_path(checkpoint_dir)
print("load checkpoint_path:{}".format(checkpoint_path))
checkpoint = torch.load(checkpoint_path, map_location=device) # 加载断点
model.load_state_dict(checkpoint['model_state_dict']) # 加载模型可学习参数
train_set = ImgDataset(data_dir, ["training", "validation"], "eval")
# 指定想要一起 visualize 的圖片 indices
img_indices = [830, 4218, 4707, 8598]
images, labels = train_set.get_batch(img_indices)
########################################################################################################################
# 这里需要对输入进行微分,主要在对图片进行微分时,不需要再对网络权重微分说,所以又model.eval(),这个在后面attack时是一样的道理,
# 与attack不一样的是,这里是可视化梯度,attack是应用梯度方向 * eps
if mode == "saliency":
saliencies = compute_saliency_maps(images, labels, model, device)
# 使用 matplotlib 畫出來
fig, axs = plt.subplots(2, len(img_indices), figsize=(15, 8))
for row, target in enumerate([images, saliencies]):
for column, img in enumerate(target):
axs[row][column].imshow(img.permute(1, 2, 0).detach().numpy())
# 小知識:permute 是什麼,為什麼這邊要用?
# 在 pytorch 的世界,image tensor 各 dimension 的意義通常為 (channels, height, width)
# 但在 matplolib 的世界,想要把一個 tensor 畫出來,形狀必須為 (height, width, channels)
# 因此 permute 是一個 pytorch 很方便的工具來做 dimension 間的轉換
# 這邊 img.permute(1, 2, 0),代表轉換後的 tensor,其
# - 第 0 個 dimension 為原本 img 的第 1 個 dimension,也就是 height
# - 第 1 個 dimension 為原本 img 的第 2 個 dimension,也就是 width
# - 第 2 個 dimension 為原本 img 的第 0 個 dimension,也就是 channels
plt.show()
plt.close()
# 從第二張圖片的 saliency,我們可以發現 model 有認出蛋黃的位置
# 從第三、四張圖片的 saliency,雖然不知道 model 細部用食物的哪個位置判斷,但可以發現 model 找出了食物的大致輪廓
########################################################################################################################
# 对model中cnn layer 进行 visualization
if mode == "explaination":
filter_activations, filter_visualizations = filter_explaination(copy.deepcopy(images), model, device=device,
cnnid=34,
filterid=0,
iteration=10, lr=0.1)
# 畫出 filter activations
fig, axs = plt.subplots(3, len(img_indices), figsize=(15, 8))
for i, img in enumerate(images):
axs[0][i].imshow(img.permute(1, 2, 0).detach().numpy())
for i, img in enumerate(filter_activations):
axs[1][i].imshow(normalize(img))
for i, img in enumerate(filter_visualizations):
axs[2][i].imshow(normalize(img.permute(1, 2, 0).numpy()))
plt.show()
plt.close()
# 根據圖片中的線條,可以猜測第 15 層 cnn 其第 0 個 filter 可能在認一些線條、甚至是 object boundary
# 因此給 filter 看一堆對比強烈的線條,他會覺得有好多 boundary 可以 activate
########################################################################################################################
# 核心思想,先分割随机遮挡得到不同分割图片,先用model得到预测值,用linear model进行拟合**各个**分割图片要求linear model输出和nn model一样
# 这里就体现一个思想:用简单model拟合复杂model,从最简单的linear model的weigh来表现复杂model侧重部分
if mode == "lime":
fig, axs = plt.subplots(1, 4, figsize=(15, 8))
np.random.seed(16)
# 讓實驗 reproducible
for idx, (image, label) in enumerate(zip(images.permute(0, 2, 3, 1).numpy(), labels)):
x = image.astype(np.double)
# lime 這個套件要吃 numpy array
explainer = lime_image.LimeImageExplainer()
def classifier_fn(input):
return predict(input, model, device)
explaination = explainer.explain_instance(image=x, classifier_fn=classifier_fn, segmentation_fn=segmentation)
# 基本上只要提供給 lime explainer 兩個關鍵的 function,事情就結束了
# classifier_fn 定義圖片如何經過 model 得到 prediction
# segmentation_fn 定義如何把圖片做 segmentation
# doc: https://lime-ml.readthedocs.io/en/latest/lime.html?highlight=explain_instance#lime.lime_image.LimeImageExplainer.explain_instance
lime_img, mask = explaination.get_image_and_mask(
label=label.item(),
positive_only=False,
hide_rest=False,
num_features=11,
min_weight=0.05
)
# 把 explainer 解釋的結果轉成圖片
# doc: https://lime-ml.readthedocs.io/en/latest/lime.html?highlight=get_image_and_mask#lime.lime_image.ImageExplanation.get_image_and_mask
axs[idx].imshow(lime_img)
plt.show()
plt.close()
# 從以下前三章圖可以看到,model 有認出食物的位置,並以該位置為主要的判斷依據
# 唯一例外是第四張圖,看起來 model 似乎比較喜歡直接去認「碗」的形狀,來判斷該圖中屬於 soup 這個 class
# 至於碗中的內容物被標成紅色,代表「單看碗中」的東西反而有礙辨認。
# 當 model 只看碗中黃色的一坨圓形,而沒看到「碗」時,可能就會覺得是其他黃色圓形的食物。
# lime 的笔记主要是每个图片输出对一个不同遮挡程度图预测的不同概率值,找一个线性model拟合这些输出概率,就会有一个权重值。 | [
"torch.nn.CrossEntropyLoss",
"argparse.ArgumentParser",
"matplotlib.pyplot.show",
"torch.load",
"lime.lime_image.LimeImageExplainer",
"matplotlib.pyplot.close",
"Hw.H5_CNN_Explaination.data.ImgDataset",
"Hw.H3_CNN.utils.get_best_checkpoint_path",
"torch.cuda.is_available",
"os.path.isdir",
"os.m... | [((449, 527), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'usage': '"""it\'s usage tip."""', 'description': '"""--h help info."""'}), '(usage="it\'s usage tip.", description=\'--h help info.\')\n', (472, 527), False, 'import argparse\n'), ((1744, 1769), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1767, 1769), False, 'import torch\n'), ((2125, 2146), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (2144, 2146), False, 'from torch import nn\n'), ((2382, 2430), 'torch.load', 'torch.load', (['checkpoint_path'], {'map_location': 'device'}), '(checkpoint_path, map_location=device)\n', (2392, 2430), False, 'import torch\n'), ((2519, 2575), 'Hw.H5_CNN_Explaination.data.ImgDataset', 'ImgDataset', (['data_dir', "['training', 'validation']", '"""eval"""'], {}), "(data_dir, ['training', 'validation'], 'eval')\n", (2529, 2575), False, 'from Hw.H5_CNN_Explaination.data import ImgDataset\n'), ((1589, 1618), 'os.path.isdir', 'os.path.isdir', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (1602, 1618), False, 'import os\n'), ((1624, 1648), 'os.mkdir', 'os.mkdir', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (1632, 1648), False, 'import os\n'), ((1816, 1853), 'torch.cuda.set_device', 'torch.cuda.set_device', (['visible_device'], {}), '(visible_device)\n', (1837, 1853), False, 'import torch\n'), ((1867, 1887), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (1879, 1887), False, 'import torch\n'), ((1907, 1926), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1919, 1926), False, 'import torch\n'), ((2271, 2311), 'Hw.H3_CNN.utils.get_best_checkpoint_path', 'get_best_checkpoint_path', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (2295, 2311), False, 'from Hw.H3_CNN.utils import get_best_checkpoint_path, test, evaluate\n'), ((3828, 3838), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3836, 3838), True, 'import matplotlib.pyplot as plt\n'), ((3843, 3854), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3852, 3854), True, 'import matplotlib.pyplot as plt\n'), ((4929, 4939), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4937, 4939), True, 'import matplotlib.pyplot as plt\n'), ((4944, 4955), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4953, 4955), True, 'import matplotlib.pyplot as plt\n'), ((5401, 5436), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(4)'], {'figsize': '(15, 8)'}), '(1, 4, figsize=(15, 8))\n', (5413, 5436), True, 'import matplotlib.pyplot as plt\n'), ((5441, 5459), 'numpy.random.seed', 'np.random.seed', (['(16)'], {}), '(16)\n', (5455, 5459), True, 'import numpy as np\n'), ((6640, 6650), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6648, 6650), True, 'import matplotlib.pyplot as plt\n'), ((6655, 6666), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6664, 6666), True, 'import matplotlib.pyplot as plt\n'), ((2094, 2106), 'Hw.H5_CNN_Explaination.model.Classifier', 'Classifier', ([], {}), '()\n', (2104, 2106), False, 'from Hw.H5_CNN_Explaination.model import Classifier\n'), ((4227, 4248), 'copy.deepcopy', 'copy.deepcopy', (['images'], {}), '(images)\n', (4240, 4248), False, 'import copy\n'), ((5665, 5696), 'lime.lime_image.LimeImageExplainer', 'lime_image.LimeImageExplainer', ([], {}), '()\n', (5694, 5696), False, 'from lime import lime_image\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 9/13/20 4:53 PM
# @Author : joshon
# @Site : hsae
# @File : test.py
# @Software: PyCharm
from tensorflow.keras.layers import Lambda ,Input
import tensorflow as tf
import numpy as np
import keras.backend as K
import cv2
# @tf.function
def f():
a = tf.constant([[10,10],[11.,1.]])
x = tf.constant([[1.,0.],[0.,1.]])
b = tf.Variable(12.)
y = tf.matmul(a, x) + b
print("PRINT: ", y)
tf.print("TF-PRINT: ", y)
return y
tt1 = K.variable(np.array([[[0, 22], [29, 38]], [[49, 33], [5, 3]], [[8, 8], [9, 9]]]))
tt2 = K.variable(np.array([[[55, 47], [88, 48]], [[28, 10], [15, 51]], [[5, 5], [6, 6]]]))
t1 = K.variable(np.array([[[1, 2], [2, 3]], [[4, 4], [5, 3]]]))
t2 = K.variable(np.array([[[7, 4], [8, 4]], [[2, 10], [15, 11]]]))
dd3 = K.concatenate([tt1 , tt2] , axis=0)
print("111")
# def slice(x,index):
# return x[:,:,index]
#
# a = Input(shape=(4,2))
# x1=Lambda(slice,output_shape=(4,1),arguments={'index':0})(a)
# x2 = Lambda(slice,output_shape=(4,1),arguments={'index':1})(a)
# Lambda(slice,output_shape=(4,1))([a,0])
#
# x1 = tf.reshape(x1,(4,1,1))
# x2 = tf.reshape(x2,(4,1,1)) | [
"tensorflow.Variable",
"numpy.array",
"tensorflow.constant",
"tensorflow.matmul",
"tensorflow.print",
"keras.backend.concatenate"
] | [((829, 862), 'keras.backend.concatenate', 'K.concatenate', (['[tt1, tt2]'], {'axis': '(0)'}), '([tt1, tt2], axis=0)\n', (842, 862), True, 'import keras.backend as K\n'), ((318, 354), 'tensorflow.constant', 'tf.constant', (['[[10, 10], [11.0, 1.0]]'], {}), '([[10, 10], [11.0, 1.0]])\n', (329, 354), True, 'import tensorflow as tf\n'), ((358, 395), 'tensorflow.constant', 'tf.constant', (['[[1.0, 0.0], [0.0, 1.0]]'], {}), '([[1.0, 0.0], [0.0, 1.0]])\n', (369, 395), True, 'import tensorflow as tf\n'), ((397, 414), 'tensorflow.Variable', 'tf.Variable', (['(12.0)'], {}), '(12.0)\n', (408, 414), True, 'import tensorflow as tf\n'), ((470, 495), 'tensorflow.print', 'tf.print', (['"""TF-PRINT: """', 'y'], {}), "('TF-PRINT: ', y)\n", (478, 495), True, 'import tensorflow as tf\n'), ((528, 597), 'numpy.array', 'np.array', (['[[[0, 22], [29, 38]], [[49, 33], [5, 3]], [[8, 8], [9, 9]]]'], {}), '([[[0, 22], [29, 38]], [[49, 33], [5, 3]], [[8, 8], [9, 9]]])\n', (536, 597), True, 'import numpy as np\n'), ((616, 688), 'numpy.array', 'np.array', (['[[[55, 47], [88, 48]], [[28, 10], [15, 51]], [[5, 5], [6, 6]]]'], {}), '([[[55, 47], [88, 48]], [[28, 10], [15, 51]], [[5, 5], [6, 6]]])\n', (624, 688), True, 'import numpy as np\n'), ((707, 753), 'numpy.array', 'np.array', (['[[[1, 2], [2, 3]], [[4, 4], [5, 3]]]'], {}), '([[[1, 2], [2, 3]], [[4, 4], [5, 3]]])\n', (715, 753), True, 'import numpy as np\n'), ((771, 820), 'numpy.array', 'np.array', (['[[[7, 4], [8, 4]], [[2, 10], [15, 11]]]'], {}), '([[[7, 4], [8, 4]], [[2, 10], [15, 11]]])\n', (779, 820), True, 'import numpy as np\n'), ((422, 437), 'tensorflow.matmul', 'tf.matmul', (['a', 'x'], {}), '(a, x)\n', (431, 437), True, 'import tensorflow as tf\n')] |
import io
import json
import requests
from pathlib import Path
import joblib
import numpy as np
import zipfile
def format_neurovault_metadata_url(collection_id):
return f"https://neurovault.org/api/collections/{collection_id}"
def format_neurovault_download_url(collection_id):
return f"https://neurovault.org/collections/{collection_id}/download"
def _download_collection(collection_id, target_dir):
collection_dir = target_dir / str(collection_id)
images_dir = collection_dir / "images"
images_dir.mkdir(exist_ok=True, parents=True)
# Download metadata
metadata_url = format_neurovault_metadata_url(collection_id)
response = requests.get(metadata_url)
assert response.status_code == 200
metadata = response.content
with open(target_dir / str(collection_id) / "collection_metadata.json", "w") as file:
file.write(str(metadata))
# Download data
data_url = format_neurovault_download_url(collection_id)
response = requests.get(data_url)
assert response.status_code == 200
data = response.content
# Unzip images in the folder
zipfile.ZipFile(io.BytesIO(data)).extractall(images_dir)
def _download_collections(collection_ids, target_dir):
[_download_collection(collection_id, target_dir) for collection_id in collection_ids]
def collection_page_download(collection_ids):
target_dir = Path(__file__).parent / "collection_page_dl"
target_dir.mkdir(exist_ok=True)
_download_collections(collection_ids=collection_ids, target_dir=target_dir)
def parallel_collection_page_download_neurovault(collection_ids, n_jobs=1):
target_dir = Path(__file__).parent / "collection_page_dl"
target_dir.mkdir(exist_ok=True)
splits = np.array_split(collection_ids, n_jobs)
joblib.Parallel(n_jobs=n_jobs)(
joblib.delayed(_download_collections)(
urls,
target_dir=target_dir,
) for urls in splits
)
| [
"pathlib.Path",
"io.BytesIO",
"requests.get",
"numpy.array_split",
"joblib.Parallel",
"joblib.delayed"
] | [((667, 693), 'requests.get', 'requests.get', (['metadata_url'], {}), '(metadata_url)\n', (679, 693), False, 'import requests\n'), ((987, 1009), 'requests.get', 'requests.get', (['data_url'], {}), '(data_url)\n', (999, 1009), False, 'import requests\n'), ((1736, 1774), 'numpy.array_split', 'np.array_split', (['collection_ids', 'n_jobs'], {}), '(collection_ids, n_jobs)\n', (1750, 1774), True, 'import numpy as np\n'), ((1780, 1810), 'joblib.Parallel', 'joblib.Parallel', ([], {'n_jobs': 'n_jobs'}), '(n_jobs=n_jobs)\n', (1795, 1810), False, 'import joblib\n'), ((1384, 1398), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1388, 1398), False, 'from pathlib import Path\n'), ((1641, 1655), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1645, 1655), False, 'from pathlib import Path\n'), ((1131, 1147), 'io.BytesIO', 'io.BytesIO', (['data'], {}), '(data)\n', (1141, 1147), False, 'import io\n'), ((1820, 1857), 'joblib.delayed', 'joblib.delayed', (['_download_collections'], {}), '(_download_collections)\n', (1834, 1857), False, 'import joblib\n')] |
# import the necessary packages
import sys
sys.path.append("../")
from pyimagesearch.utils.captchahelper import preprocess
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.models import load_model
from imutils.contours import sort_contours
from imutils import grab_contours
from imutils import paths
import numpy as np
import argparse
import cv2
# construct argument parser and parse arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input", required=True, help="path to input directory of images")
ap.add_argument("-m", "--model", required=True, help="path to pre-trained model")
args = vars(ap.parse_args())
print("[INFO] loading pre-trained model...")
model = load_model(args["model"])
print("[INFO] loading input images...")
imagePaths = list(paths.list_images(args["input"]))
imagePaths = np.random.choice(imagePaths, size=(10,), replace=False)
for imagePath in imagePaths:
image = cv2.imread(imagePath)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.copyMakeBorder(gray, 20, 20, 20, 20, cv2.BORDER_REPLICATE)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = grab_contours(cnts)
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:4] # sort contours by contour size
cnts = sort_contours(cnts)[0] # sort contours from left-to-right
output = cv2.merge([gray]*3)
predictions = []
for c in cnts:
(x, y, w, h) = cv2.boundingRect(c)
roi = gray[y - 5:y + h + 5, x - 5:x + w + 5]
roi = preprocess(roi, 28, 28)
roi = img_to_array(roi)
roi = np.expand_dims(roi, axis=0)
roi = roi / 255.0
pred = model.predict(roi).argmax(axis=1)[0] + 1
cv2.rectangle(output, (x - 3, y - 3), (x + w + 3, y + h + 3), (0, 255, 0), 2)
cv2.putText(output, str(pred), (x - 5, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2)
predictions.append(str(pred))
print("[INFO] captcha: {}".format("".join(predictions)))
cv2.imshow("Captcha", output)
cv2.waitKey(0) | [
"cv2.rectangle",
"cv2.imshow",
"tensorflow.keras.models.load_model",
"imutils.paths.list_images",
"sys.path.append",
"argparse.ArgumentParser",
"cv2.threshold",
"imutils.grab_contours",
"tensorflow.keras.preprocessing.image.img_to_array",
"cv2.waitKey",
"pyimagesearch.utils.captchahelper.preproc... | [((43, 65), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (58, 65), False, 'import sys\n'), ((436, 461), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (459, 461), False, 'import argparse\n'), ((717, 742), 'tensorflow.keras.models.load_model', 'load_model', (["args['model']"], {}), "(args['model'])\n", (727, 742), False, 'from tensorflow.keras.models import load_model\n'), ((849, 904), 'numpy.random.choice', 'np.random.choice', (['imagePaths'], {'size': '(10,)', 'replace': '(False)'}), '(imagePaths, size=(10,), replace=False)\n', (865, 904), True, 'import numpy as np\n'), ((802, 834), 'imutils.paths.list_images', 'paths.list_images', (["args['input']"], {}), "(args['input'])\n", (819, 834), False, 'from imutils import paths\n'), ((947, 968), 'cv2.imread', 'cv2.imread', (['imagePath'], {}), '(imagePath)\n', (957, 968), False, 'import cv2\n'), ((980, 1019), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (992, 1019), False, 'import cv2\n'), ((1031, 1093), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['gray', '(20)', '(20)', '(20)', '(20)', 'cv2.BORDER_REPLICATE'], {}), '(gray, 20, 20, 20, 20, cv2.BORDER_REPLICATE)\n', (1049, 1093), False, 'import cv2\n'), ((1278, 1297), 'imutils.grab_contours', 'grab_contours', (['cnts'], {}), '(cnts)\n', (1291, 1297), False, 'from imutils import grab_contours\n'), ((1476, 1497), 'cv2.merge', 'cv2.merge', (['([gray] * 3)'], {}), '([gray] * 3)\n', (1485, 1497), False, 'import cv2\n'), ((2120, 2149), 'cv2.imshow', 'cv2.imshow', (['"""Captcha"""', 'output'], {}), "('Captcha', output)\n", (2130, 2149), False, 'import cv2\n'), ((2154, 2168), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2165, 2168), False, 'import cv2\n'), ((1108, 1176), 'cv2.threshold', 'cv2.threshold', (['gray', '(0)', '(255)', '(cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)'], {}), '(gray, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)\n', (1121, 1176), False, 'import cv2\n'), ((1404, 1423), 'imutils.contours.sort_contours', 'sort_contours', (['cnts'], {}), '(cnts)\n', (1417, 1423), False, 'from imutils.contours import sort_contours\n'), ((1559, 1578), 'cv2.boundingRect', 'cv2.boundingRect', (['c'], {}), '(c)\n', (1575, 1578), False, 'import cv2\n'), ((1647, 1670), 'pyimagesearch.utils.captchahelper.preprocess', 'preprocess', (['roi', '(28)', '(28)'], {}), '(roi, 28, 28)\n', (1657, 1670), False, 'from pyimagesearch.utils.captchahelper import preprocess\n'), ((1685, 1702), 'tensorflow.keras.preprocessing.image.img_to_array', 'img_to_array', (['roi'], {}), '(roi)\n', (1697, 1702), False, 'from tensorflow.keras.preprocessing.image import img_to_array\n'), ((1717, 1744), 'numpy.expand_dims', 'np.expand_dims', (['roi'], {'axis': '(0)'}), '(roi, axis=0)\n', (1731, 1744), True, 'import numpy as np\n'), ((1836, 1913), 'cv2.rectangle', 'cv2.rectangle', (['output', '(x - 3, y - 3)', '(x + w + 3, y + h + 3)', '(0, 255, 0)', '(2)'], {}), '(output, (x - 3, y - 3), (x + w + 3, y + h + 3), (0, 255, 0), 2)\n', (1849, 1913), False, 'import cv2\n')] |
import numpy as np
from scipy.stats import linregress
import json
import sys
import getopt
try:
from matplotlib import pyplot as plt
except Exception:
import matplotlib
matplotlib.use('pdf')
from matplotlib import pyplot as plt
def save_plot(alg_name, file_suffix, y_label, legend_list, title_prefix,
is_log=False, axes=None, x_label='vertices', const_ve=0):
plt.title('{0} for {1} for {2} {3}'.format(
title_prefix, alg_name, const_ve,
'edges' if x_label is 'vertices' else 'vertices'
)
)
plt.legend(legend_list, loc='upper left')
if is_log:
plt.xlabel('Log of no. of {0}'.format(x_label))
axes.get_legend().remove()
else:
plt.xlabel('No. of {0}'.format(x_label))
plt.ylabel(y_label)
plt.grid()
plt.ticklabel_format(axis='both',
style='sci',
scilimits=(-3, 3),
useOffset=False)
plt.savefig('out/pdf/' + alg_name + '_' + file_suffix + '.pdf')
plt.clf()
def plot_mst():
mst_data = []
with open('out/mst.jsonl', 'r') as mst_file:
for line in mst_file:
mst_data.append(json.loads(line))
verts = [item['verts'] for item in mst_data]
edges = [item['edges'] for item in mst_data]
times_dicts = [item['times'] for item in mst_data]
times = {}
times['Boruvka'] = [time['Boruvka'] for time in times_dicts]
times['Kruskal'] = [time['Kruskal'] for time in times_dicts]
if all(vert == verts[0] for vert in verts):
x_label = 'edges'
const_ve = verts[0]
plt.plot(edges, times['Boruvka'], 'o-')
plt.plot(edges, times['Kruskal'], 'o-')
else:
x_label = 'vertices'
const_ve = edges[0]
plt.plot(verts, times['Boruvka'], 'o-')
plt.plot(verts, times['Kruskal'], 'o-')
legend = [
'Boruvka',
'Kruskal'
]
save_plot('mst', x_label, 'Time in microseconds',
legend,
'Exec time',
x_label=x_label, const_ve=const_ve)
linreg_text = ''
ax = plt.axes()
for alg in legend[:5]:
slope, err = plot_log(times[alg],
verts if x_label is 'vertices' else edges)
linreg_text += '{0}: slope={1}, err={2}\n'.format(
alg, np.around(slope, 3), np.around(err, 3)
)
plt.text(0.25, 0.05, # position of the text relative to axes
' Linregress:\n{0}'.format(linreg_text),
horizontalalignment='left',
verticalalignment='baseline',
transform=ax.transAxes,
fontdict=dict(
family='monospace',
color='darkred',
weight='bold',
size=12)
)
save_plot('mst',
'log_log_{0}'.format(x_label),
'Log of exec time',
[''],
'Log of exec time',
is_log=True,
axes=ax,
x_label=x_label,
const_ve=const_ve)
def plot_minpath():
minpath_data = []
plot_dfs = True
with open('out/minpath.jsonl', 'r') as mst_file:
for line in mst_file:
minpath_data.append(json.loads(line))
verts = [item[0] for item in minpath_data]
edges = [item[1] for item in minpath_data]
times = {}
times['dij_mat_tab'] = [item[2] for item in minpath_data]
times['dij_mat_heap'] = [item[3] for item in minpath_data]
times['dij_tab_tab'] = [item[4] for item in minpath_data]
times['dij_tab_heap'] = [item[5] for item in minpath_data]
times['floyd'] = [item[6] for item in minpath_data]
try:
times['dfs'] = [item[7] for item in minpath_data]
except Exception:
times['dfs'] = []
plot_dfs = False
if all(vert == verts[0] for vert in verts):
# If all verts are the same, make plot for edge number
x_label = 'edges'
const_ve = verts[0]
plt.plot(edges, times['dij_mat_tab'], 'o-')
plt.plot(edges, times['dij_mat_heap'], 'o-')
plt.plot(edges, times['dij_tab_tab'], 'o-')
plt.plot(edges, times['dij_tab_heap'], 'o-')
plt.plot(edges, times['floyd'], 'o-')
if plot_dfs:
plt.plot(edges, times['dfs'], 'o-')
else:
x_label = 'vertices'
const_ve = edges[0]
plt.plot(verts, times['dij_mat_tab'], 'o-')
plt.plot(verts, times['dij_mat_heap'], 'o-')
plt.plot(verts, times['dij_tab_tab'], 'o-')
plt.plot(verts, times['dij_tab_heap'], 'o-')
plt.plot(verts, times['floyd'], 'o-')
if plot_dfs:
plt.plot(verts, times['dfs'], 'o-')
legend = [
'dij_mat_tab',
'dij_mat_heap',
'dij_tab_tab',
'dij_tab_heap',
'floyd',
'dfs'
]
save_plot('minpath', x_label, 'Time in microseconds',
legend if plot_dfs else legend[:5],
'Exec time',
x_label=x_label, const_ve=const_ve)
linreg_text = ''
ax = plt.axes()
for alg in legend[:5]:
slope, err = plot_log(times[alg],
verts if x_label is 'vertices' else edges)
linreg_text += '{0}: slope={1}, err={2}\n'.format(
alg, np.around(slope, 3), np.around(err, 3)
)
plt.text(0.25, 0.05, # position of the text relative to axes
' Linregress:\n{0}'.format(linreg_text),
horizontalalignment='left',
verticalalignment='baseline',
transform=ax.transAxes,
fontdict=dict(
family='monospace',
color='darkred',
weight='bold',
size=12)
)
save_plot('minpath',
'log_log_{0}'.format(x_label),
'Log of exec time',
[''],
'Log of exec time',
is_log=True,
axes=ax,
x_label=x_label,
const_ve=const_ve)
def plot_log(execution_time_array, ve_number_arr):
'''Log plot of exec time
Not very universal, you may have to tweak
some numbers'''
data_big_val = ve_number_arr
if 0 not in execution_time_array:
exec_time_log_arr = np.log2(execution_time_array)
data_big_val_log = np.log2(data_big_val)
else:
print('Some of the values in exec_time are 0')
print('and logarithm of 0 is minus infinity.')
print('Discarding those values for this plot')
exec_time_arr = [x for x in execution_time_array if x is not 0]
exec_time_log_arr = np.log2(exec_time_arr)
arr_start = len(data_big_val) - len(exec_time_arr)
data_big_val_log = np.log2(data_big_val[arr_start:])
slope, _, _, _, err = linregress(data_big_val_log, exec_time_log_arr)
plt.plot(
data_big_val_log, exec_time_log_arr
)
return slope, err
def main(argv):
try:
opts, _ = getopt.getopt(argv, 'mp')
except getopt.GetoptError:
print('error')
exit()
for opt, _ in opts:
if opt == '-m':
plot_mst()
if opt == '-p':
plot_minpath()
if __name__ == '__main__':
main(sys.argv[1:])
| [
"scipy.stats.linregress",
"getopt.getopt",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"json.loads",
"matplotlib.pyplot.ylabel",
"matplotlib.use",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.ticklabel_format",
"numpy.around",
"nu... | [((559, 600), 'matplotlib.pyplot.legend', 'plt.legend', (['legend_list'], {'loc': '"""upper left"""'}), "(legend_list, loc='upper left')\n", (569, 600), True, 'from matplotlib import pyplot as plt\n'), ((770, 789), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y_label'], {}), '(y_label)\n', (780, 789), True, 'from matplotlib import pyplot as plt\n'), ((794, 804), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (802, 804), True, 'from matplotlib import pyplot as plt\n'), ((809, 896), 'matplotlib.pyplot.ticklabel_format', 'plt.ticklabel_format', ([], {'axis': '"""both"""', 'style': '"""sci"""', 'scilimits': '(-3, 3)', 'useOffset': '(False)'}), "(axis='both', style='sci', scilimits=(-3, 3), useOffset\n =False)\n", (829, 896), True, 'from matplotlib import pyplot as plt\n'), ((971, 1034), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('out/pdf/' + alg_name + '_' + file_suffix + '.pdf')"], {}), "('out/pdf/' + alg_name + '_' + file_suffix + '.pdf')\n", (982, 1034), True, 'from matplotlib import pyplot as plt\n'), ((1039, 1048), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1046, 1048), True, 'from matplotlib import pyplot as plt\n'), ((2113, 2123), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (2121, 2123), True, 'from matplotlib import pyplot as plt\n'), ((5111, 5121), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (5119, 5121), True, 'from matplotlib import pyplot as plt\n'), ((6887, 6934), 'scipy.stats.linregress', 'linregress', (['data_big_val_log', 'exec_time_log_arr'], {}), '(data_big_val_log, exec_time_log_arr)\n', (6897, 6934), False, 'from scipy.stats import linregress\n'), ((6939, 6984), 'matplotlib.pyplot.plot', 'plt.plot', (['data_big_val_log', 'exec_time_log_arr'], {}), '(data_big_val_log, exec_time_log_arr)\n', (6947, 6984), True, 'from matplotlib import pyplot as plt\n'), ((181, 202), 'matplotlib.use', 'matplotlib.use', (['"""pdf"""'], {}), "('pdf')\n", (195, 202), False, 'import matplotlib\n'), ((1619, 1658), 'matplotlib.pyplot.plot', 'plt.plot', (['edges', "times['Boruvka']", '"""o-"""'], {}), "(edges, times['Boruvka'], 'o-')\n", (1627, 1658), True, 'from matplotlib import pyplot as plt\n'), ((1667, 1706), 'matplotlib.pyplot.plot', 'plt.plot', (['edges', "times['Kruskal']", '"""o-"""'], {}), "(edges, times['Kruskal'], 'o-')\n", (1675, 1706), True, 'from matplotlib import pyplot as plt\n'), ((1782, 1821), 'matplotlib.pyplot.plot', 'plt.plot', (['verts', "times['Boruvka']", '"""o-"""'], {}), "(verts, times['Boruvka'], 'o-')\n", (1790, 1821), True, 'from matplotlib import pyplot as plt\n'), ((1830, 1869), 'matplotlib.pyplot.plot', 'plt.plot', (['verts', "times['Kruskal']", '"""o-"""'], {}), "(verts, times['Kruskal'], 'o-')\n", (1838, 1869), True, 'from matplotlib import pyplot as plt\n'), ((4039, 4082), 'matplotlib.pyplot.plot', 'plt.plot', (['edges', "times['dij_mat_tab']", '"""o-"""'], {}), "(edges, times['dij_mat_tab'], 'o-')\n", (4047, 4082), True, 'from matplotlib import pyplot as plt\n'), ((4091, 4135), 'matplotlib.pyplot.plot', 'plt.plot', (['edges', "times['dij_mat_heap']", '"""o-"""'], {}), "(edges, times['dij_mat_heap'], 'o-')\n", (4099, 4135), True, 'from matplotlib import pyplot as plt\n'), ((4144, 4187), 'matplotlib.pyplot.plot', 'plt.plot', (['edges', "times['dij_tab_tab']", '"""o-"""'], {}), "(edges, times['dij_tab_tab'], 'o-')\n", (4152, 4187), True, 'from matplotlib import pyplot as plt\n'), ((4196, 4240), 'matplotlib.pyplot.plot', 'plt.plot', (['edges', "times['dij_tab_heap']", '"""o-"""'], {}), "(edges, times['dij_tab_heap'], 'o-')\n", (4204, 4240), True, 'from matplotlib import pyplot as plt\n'), ((4249, 4286), 'matplotlib.pyplot.plot', 'plt.plot', (['edges', "times['floyd']", '"""o-"""'], {}), "(edges, times['floyd'], 'o-')\n", (4257, 4286), True, 'from matplotlib import pyplot as plt\n'), ((4431, 4474), 'matplotlib.pyplot.plot', 'plt.plot', (['verts', "times['dij_mat_tab']", '"""o-"""'], {}), "(verts, times['dij_mat_tab'], 'o-')\n", (4439, 4474), True, 'from matplotlib import pyplot as plt\n'), ((4483, 4527), 'matplotlib.pyplot.plot', 'plt.plot', (['verts', "times['dij_mat_heap']", '"""o-"""'], {}), "(verts, times['dij_mat_heap'], 'o-')\n", (4491, 4527), True, 'from matplotlib import pyplot as plt\n'), ((4536, 4579), 'matplotlib.pyplot.plot', 'plt.plot', (['verts', "times['dij_tab_tab']", '"""o-"""'], {}), "(verts, times['dij_tab_tab'], 'o-')\n", (4544, 4579), True, 'from matplotlib import pyplot as plt\n'), ((4588, 4632), 'matplotlib.pyplot.plot', 'plt.plot', (['verts', "times['dij_tab_heap']", '"""o-"""'], {}), "(verts, times['dij_tab_heap'], 'o-')\n", (4596, 4632), True, 'from matplotlib import pyplot as plt\n'), ((4641, 4678), 'matplotlib.pyplot.plot', 'plt.plot', (['verts', "times['floyd']", '"""o-"""'], {}), "(verts, times['floyd'], 'o-')\n", (4649, 4678), True, 'from matplotlib import pyplot as plt\n'), ((6363, 6392), 'numpy.log2', 'np.log2', (['execution_time_array'], {}), '(execution_time_array)\n', (6370, 6392), True, 'import numpy as np\n'), ((6420, 6441), 'numpy.log2', 'np.log2', (['data_big_val'], {}), '(data_big_val)\n', (6427, 6441), True, 'import numpy as np\n'), ((6717, 6739), 'numpy.log2', 'np.log2', (['exec_time_arr'], {}), '(exec_time_arr)\n', (6724, 6739), True, 'import numpy as np\n'), ((6826, 6859), 'numpy.log2', 'np.log2', (['data_big_val[arr_start:]'], {}), '(data_big_val[arr_start:])\n', (6833, 6859), True, 'import numpy as np\n'), ((7066, 7091), 'getopt.getopt', 'getopt.getopt', (['argv', '"""mp"""'], {}), "(argv, 'mp')\n", (7079, 7091), False, 'import getopt\n'), ((2342, 2361), 'numpy.around', 'np.around', (['slope', '(3)'], {}), '(slope, 3)\n', (2351, 2361), True, 'import numpy as np\n'), ((2363, 2380), 'numpy.around', 'np.around', (['err', '(3)'], {}), '(err, 3)\n', (2372, 2380), True, 'import numpy as np\n'), ((4320, 4355), 'matplotlib.pyplot.plot', 'plt.plot', (['edges', "times['dfs']", '"""o-"""'], {}), "(edges, times['dfs'], 'o-')\n", (4328, 4355), True, 'from matplotlib import pyplot as plt\n'), ((4712, 4747), 'matplotlib.pyplot.plot', 'plt.plot', (['verts', "times['dfs']", '"""o-"""'], {}), "(verts, times['dfs'], 'o-')\n", (4720, 4747), True, 'from matplotlib import pyplot as plt\n'), ((5340, 5359), 'numpy.around', 'np.around', (['slope', '(3)'], {}), '(slope, 3)\n', (5349, 5359), True, 'import numpy as np\n'), ((5361, 5378), 'numpy.around', 'np.around', (['err', '(3)'], {}), '(err, 3)\n', (5370, 5378), True, 'import numpy as np\n'), ((1192, 1208), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (1202, 1208), False, 'import json\n'), ((3292, 3308), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (3302, 3308), False, 'import json\n')] |
import pytest
def test_import_vdjtools_beta_w_validation():
import pandas as pd
import numpy as np
import os
from tcrdist.paths import path_to_base
from tcrdist.vdjtools_funcs import import_vdjtools
from tcrdist.repertoire import TCRrep
# Reformat vdj_tools input format for tcrdist3
vdj_tools_file_beta = os.path.join(path_to_base, 'tcrdist','data','formats','vdj.M_15_CD8_beta.clonotypes.TRB.txt.gz')
df_beta = import_vdjtools( vdj_tools_file = vdj_tools_file_beta ,
chain = 'beta',
organism = 'human',
db_file = 'alphabeta_gammadelta_db.tsv',
validate = True)
assert np.all(df_beta.columns == ['count', 'freq', 'cdr3_b_aa', 'v_b_gene', 'j_b_gene', 'cdr3_b_nucseq','valid_v', 'valid_j', 'valid_cdr3'])
# Can be directly imported into a TCRrep instance.
tr = TCRrep(
cell_df = df_beta[['count', 'freq', 'cdr3_b_aa', 'v_b_gene', 'j_b_gene']],
chains = ['beta'],
organism = 'human',
compute_distances = False)
def test_import_vdjtools_beta_no_validation():
import pandas as pd
import numpy as np
import os
from tcrdist.paths import path_to_base
from tcrdist.vdjtools_funcs import import_vdjtools
vdj_tools_file_beta = os.path.join(path_to_base, 'tcrdist','data','formats','vdj.M_15_CD8_beta.clonotypes.TRB.txt.gz')
df_beta = import_vdjtools( vdj_tools_file = vdj_tools_file_beta ,
chain = 'beta',
organism = 'human',
db_file = 'alphabeta_gammadelta_db.tsv',
validate = False)
assert np.all(df_beta.columns == ['count', 'freq', 'cdr3_b_aa', 'v_b_gene', 'j_b_gene', 'cdr3_b_nucseq','valid_v', 'valid_j', 'valid_cdr3'])
assert False in df_beta.valid_cdr3
assert False in df_beta.valid_v
assert False in df_beta.valid_j
def test_import_vdjtools_alpha_w_validation():
import pandas as pd
import numpy as np
import os
from tcrdist.paths import path_to_base
from tcrdist.vdjtools_funcs import import_vdjtools
vdj_tools_file_alpha = os.path.join(path_to_base, 'tcrdist','data','formats','vdj.M_15_CD8_alpha.clonotypes.TRA.txt.gz')
df_alpha = import_vdjtools( vdj_tools_file = vdj_tools_file_alpha,
chain = 'alpha',
organism = 'human',
db_file = 'alphabeta_gammadelta_db.tsv',
validate = True)
assert np.all(df_alpha.columns == ['count', 'freq', 'cdr3_a_aa', 'v_a_gene', 'j_a_gene', 'cdr3_a_nucseq','valid_v', 'valid_j', 'valid_cdr3'])
def test_import_vdjtools_alpha_no_validation():
import pandas as pd
import numpy as np
import os
from tcrdist.paths import path_to_base
from tcrdist.vdjtools_funcs import import_vdjtools
vdj_tools_file_alpha = os.path.join(path_to_base, 'tcrdist','data','formats','vdj.M_15_CD8_alpha.clonotypes.TRA.txt.gz')
df_alpha = import_vdjtools( vdj_tools_file = vdj_tools_file_alpha,
chain = 'alpha',
organism = 'human',
db_file = 'alphabeta_gammadelta_db.tsv',
validate = False)
assert np.all(df_alpha.columns == ['count', 'freq', 'cdr3_a_aa', 'v_a_gene', 'j_a_gene', 'cdr3_a_nucseq','valid_v', 'valid_j', 'valid_cdr3'])
assert False in df_alpha.valid_cdr3
assert False in df_alpha.valid_v
assert False in df_alpha.valid_j
| [
"tcrdist.repertoire.TCRrep",
"tcrdist.vdjtools_funcs.import_vdjtools",
"numpy.all",
"os.path.join"
] | [((342, 445), 'os.path.join', 'os.path.join', (['path_to_base', '"""tcrdist"""', '"""data"""', '"""formats"""', '"""vdj.M_15_CD8_beta.clonotypes.TRB.txt.gz"""'], {}), "(path_to_base, 'tcrdist', 'data', 'formats',\n 'vdj.M_15_CD8_beta.clonotypes.TRB.txt.gz')\n", (354, 445), False, 'import os\n'), ((453, 595), 'tcrdist.vdjtools_funcs.import_vdjtools', 'import_vdjtools', ([], {'vdj_tools_file': 'vdj_tools_file_beta', 'chain': '"""beta"""', 'organism': '"""human"""', 'db_file': '"""alphabeta_gammadelta_db.tsv"""', 'validate': '(True)'}), "(vdj_tools_file=vdj_tools_file_beta, chain='beta', organism=\n 'human', db_file='alphabeta_gammadelta_db.tsv', validate=True)\n", (468, 595), False, 'from tcrdist.vdjtools_funcs import import_vdjtools\n'), ((712, 850), 'numpy.all', 'np.all', (["(df_beta.columns == ['count', 'freq', 'cdr3_b_aa', 'v_b_gene', 'j_b_gene',\n 'cdr3_b_nucseq', 'valid_v', 'valid_j', 'valid_cdr3'])"], {}), "(df_beta.columns == ['count', 'freq', 'cdr3_b_aa', 'v_b_gene',\n 'j_b_gene', 'cdr3_b_nucseq', 'valid_v', 'valid_j', 'valid_cdr3'])\n", (718, 850), True, 'import numpy as np\n'), ((915, 1058), 'tcrdist.repertoire.TCRrep', 'TCRrep', ([], {'cell_df': "df_beta[['count', 'freq', 'cdr3_b_aa', 'v_b_gene', 'j_b_gene']]", 'chains': "['beta']", 'organism': '"""human"""', 'compute_distances': '(False)'}), "(cell_df=df_beta[['count', 'freq', 'cdr3_b_aa', 'v_b_gene',\n 'j_b_gene']], chains=['beta'], organism='human', compute_distances=False)\n", (921, 1058), False, 'from tcrdist.repertoire import TCRrep\n'), ((1333, 1436), 'os.path.join', 'os.path.join', (['path_to_base', '"""tcrdist"""', '"""data"""', '"""formats"""', '"""vdj.M_15_CD8_beta.clonotypes.TRB.txt.gz"""'], {}), "(path_to_base, 'tcrdist', 'data', 'formats',\n 'vdj.M_15_CD8_beta.clonotypes.TRB.txt.gz')\n", (1345, 1436), False, 'import os\n'), ((1444, 1587), 'tcrdist.vdjtools_funcs.import_vdjtools', 'import_vdjtools', ([], {'vdj_tools_file': 'vdj_tools_file_beta', 'chain': '"""beta"""', 'organism': '"""human"""', 'db_file': '"""alphabeta_gammadelta_db.tsv"""', 'validate': '(False)'}), "(vdj_tools_file=vdj_tools_file_beta, chain='beta', organism=\n 'human', db_file='alphabeta_gammadelta_db.tsv', validate=False)\n", (1459, 1587), False, 'from tcrdist.vdjtools_funcs import import_vdjtools\n'), ((1704, 1842), 'numpy.all', 'np.all', (["(df_beta.columns == ['count', 'freq', 'cdr3_b_aa', 'v_b_gene', 'j_b_gene',\n 'cdr3_b_nucseq', 'valid_v', 'valid_j', 'valid_cdr3'])"], {}), "(df_beta.columns == ['count', 'freq', 'cdr3_b_aa', 'v_b_gene',\n 'j_b_gene', 'cdr3_b_nucseq', 'valid_v', 'valid_j', 'valid_cdr3'])\n", (1710, 1842), True, 'import numpy as np\n'), ((2184, 2288), 'os.path.join', 'os.path.join', (['path_to_base', '"""tcrdist"""', '"""data"""', '"""formats"""', '"""vdj.M_15_CD8_alpha.clonotypes.TRA.txt.gz"""'], {}), "(path_to_base, 'tcrdist', 'data', 'formats',\n 'vdj.M_15_CD8_alpha.clonotypes.TRA.txt.gz')\n", (2196, 2288), False, 'import os\n'), ((2297, 2440), 'tcrdist.vdjtools_funcs.import_vdjtools', 'import_vdjtools', ([], {'vdj_tools_file': 'vdj_tools_file_alpha', 'chain': '"""alpha"""', 'organism': '"""human"""', 'db_file': '"""alphabeta_gammadelta_db.tsv"""', 'validate': '(True)'}), "(vdj_tools_file=vdj_tools_file_alpha, chain='alpha',\n organism='human', db_file='alphabeta_gammadelta_db.tsv', validate=True)\n", (2312, 2440), False, 'from tcrdist.vdjtools_funcs import import_vdjtools\n'), ((2555, 2694), 'numpy.all', 'np.all', (["(df_alpha.columns == ['count', 'freq', 'cdr3_a_aa', 'v_a_gene', 'j_a_gene',\n 'cdr3_a_nucseq', 'valid_v', 'valid_j', 'valid_cdr3'])"], {}), "(df_alpha.columns == ['count', 'freq', 'cdr3_a_aa', 'v_a_gene',\n 'j_a_gene', 'cdr3_a_nucseq', 'valid_v', 'valid_j', 'valid_cdr3'])\n", (2561, 2694), True, 'import numpy as np\n'), ((2925, 3029), 'os.path.join', 'os.path.join', (['path_to_base', '"""tcrdist"""', '"""data"""', '"""formats"""', '"""vdj.M_15_CD8_alpha.clonotypes.TRA.txt.gz"""'], {}), "(path_to_base, 'tcrdist', 'data', 'formats',\n 'vdj.M_15_CD8_alpha.clonotypes.TRA.txt.gz')\n", (2937, 3029), False, 'import os\n'), ((3038, 3182), 'tcrdist.vdjtools_funcs.import_vdjtools', 'import_vdjtools', ([], {'vdj_tools_file': 'vdj_tools_file_alpha', 'chain': '"""alpha"""', 'organism': '"""human"""', 'db_file': '"""alphabeta_gammadelta_db.tsv"""', 'validate': '(False)'}), "(vdj_tools_file=vdj_tools_file_alpha, chain='alpha',\n organism='human', db_file='alphabeta_gammadelta_db.tsv', validate=False)\n", (3053, 3182), False, 'from tcrdist.vdjtools_funcs import import_vdjtools\n'), ((3297, 3436), 'numpy.all', 'np.all', (["(df_alpha.columns == ['count', 'freq', 'cdr3_a_aa', 'v_a_gene', 'j_a_gene',\n 'cdr3_a_nucseq', 'valid_v', 'valid_j', 'valid_cdr3'])"], {}), "(df_alpha.columns == ['count', 'freq', 'cdr3_a_aa', 'v_a_gene',\n 'j_a_gene', 'cdr3_a_nucseq', 'valid_v', 'valid_j', 'valid_cdr3'])\n", (3303, 3436), True, 'import numpy as np\n')] |
import rdkit.Chem as Chem
import numpy as np
import os
'''
This script is meant to split the Tox21 train dataset into the
individual target datasets for training single-task models.
'''
if __name__ == '__main__':
# Read SDF
suppl = Chem.SDMolSupplier(
os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'data', 'tox21_10k_data_all.sdf'
),
sanitize = False
)
mols = []
smiles = []
ys = None
targets = [
'NR-AhR',
'NR-AR',
'NR-AR-LBD',
'NR-Aromatase',
'NR-ER',
'NR-ER-LBD',
'NR-PPAR-gamma',
'SR-ARE',
'SR-ATAD5',
'SR-HSE',
'SR-MMP',
'SR-p53'
]
j = 1
for mol in suppl:
mols.append(mol)
smiles.append(Chem.MolToSmiles(mol))
y = np.nan * np.ones((1, len(targets)))
for i, target in enumerate(targets):
try:
y[0, i] = bool(float(mol.GetProp(target)))
except Exception as e:
pass
if type(ys) == type(None):
ys = y
else:
ys = np.concatenate((ys, y))
if j % 500 == 0:
print('completed {} entries'.format(j))
j += 1
print(ys)
print(ys.shape)
for i, target in enumerate(targets):
print('Target {} has {} entries; {} active'.format(
target, sum(~np.isnan(ys[:, i])), np.sum(ys[~np.isnan(ys[:, i]), i])
))
with open(os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'data', 'tox21.smiles'
), 'w') as fid:
for j, smile in enumerate(smiles):
fid.write('{}\t{}\t{}\n'.format(smile, '??', '\t'.join([str(x) for x in ys[j, :]])))
| [
"os.path.dirname",
"numpy.isnan",
"rdkit.Chem.MolToSmiles",
"numpy.concatenate"
] | [((661, 682), 'rdkit.Chem.MolToSmiles', 'Chem.MolToSmiles', (['mol'], {}), '(mol)\n', (677, 682), True, 'import rdkit.Chem as Chem\n'), ((911, 934), 'numpy.concatenate', 'np.concatenate', (['(ys, y)'], {}), '((ys, y))\n', (925, 934), True, 'import numpy as np\n'), ((295, 320), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (310, 320), False, 'import os\n'), ((1252, 1277), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1267, 1277), False, 'import os\n'), ((1144, 1162), 'numpy.isnan', 'np.isnan', (['ys[:, i]'], {}), '(ys[:, i])\n', (1152, 1162), True, 'import numpy as np\n'), ((1176, 1194), 'numpy.isnan', 'np.isnan', (['ys[:, i]'], {}), '(ys[:, i])\n', (1184, 1194), True, 'import numpy as np\n')] |
# imports
import pickle
import cv2
import glob
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# Helper functions for camera calibration
# Directory for calibration images
image_dir = glob.glob('camera_cal/calibration*.jpg')
# Directory to Save objpoints and imgpoints on pickle
camera_cal_fname = './camera_cal/output_images/camera_cal.p'
def calibration():
"""
This function takes in a set of images used for calibration,
and outputs objpoints, imgpoints and corners to compute the
camera calibration and distortion coefficients using the cv2.calibrateCamera() function.
input: images
args:
output:objpoints, imgpoints, corners
"""
# To be used later to count number of images calibrated
images_calibrated = []
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6*9,3), np.float32)
objp[:,:2] = np.mgrid[0:9, 0:6].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
# Make a list of calibration images
images = image_dir
# Step through the list and search for chessboard corners
for idx, fname in enumerate(images):
img = cv2.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (9,6), None)
# If found, add object points, image points
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
images_calibrated.append(img)
# Draw and save the corners
cv2.drawChessboardCorners(img, (9,6), corners, ret)
write_name = 'Corners_found'+str(idx)+'.jpg'
cv2.imwrite('./camera_cal/output_images/'+ write_name, img)
cv2.destroyAllWindows()
# Do camera calibration given object points and image points
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
# save to pickle
camera_cal_dict = {'mtx':mtx, 'dist':dist}
pickle.dump(camera_cal_dict, open(camera_cal_fname, "wb"))
print('Camera calibrated using {0} images'.format(np.array(images_calibrated).shape[0]))
def get_calibration_matrix():
try:
dist_pickle = pickle.load( open( camera_cal_fname, "rb" ) )
except FileNotFoundError:
calibration()
dist_pickle = pickle.load( open( camera_cal_fname, "rb" ) )
mtx = dist_pickle["mtx"]
dist = dist_pickle["dist"]
return mtx, dist
| [
"cv2.imwrite",
"numpy.array",
"numpy.zeros",
"cv2.destroyAllWindows",
"cv2.cvtColor",
"cv2.calibrateCamera",
"cv2.findChessboardCorners",
"cv2.drawChessboardCorners",
"cv2.imread",
"glob.glob"
] | [((224, 264), 'glob.glob', 'glob.glob', (['"""camera_cal/calibration*.jpg"""'], {}), "('camera_cal/calibration*.jpg')\n", (233, 264), False, 'import glob\n'), ((897, 929), 'numpy.zeros', 'np.zeros', (['(6 * 9, 3)', 'np.float32'], {}), '((6 * 9, 3), np.float32)\n', (905, 929), True, 'import numpy as np\n'), ((1980, 2003), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2001, 2003), False, 'import cv2\n'), ((2109, 2180), 'cv2.calibrateCamera', 'cv2.calibrateCamera', (['objpoints', 'imgpoints', 'gray.shape[::-1]', 'None', 'None'], {}), '(objpoints, imgpoints, gray.shape[::-1], None, None)\n', (2128, 2180), False, 'import cv2\n'), ((1346, 1363), 'cv2.imread', 'cv2.imread', (['fname'], {}), '(fname)\n', (1356, 1363), False, 'import cv2\n'), ((1379, 1416), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (1391, 1416), False, 'import cv2\n'), ((1479, 1524), 'cv2.findChessboardCorners', 'cv2.findChessboardCorners', (['gray', '(9, 6)', 'None'], {}), '(gray, (9, 6), None)\n', (1504, 1524), False, 'import cv2\n'), ((1769, 1821), 'cv2.drawChessboardCorners', 'cv2.drawChessboardCorners', (['img', '(9, 6)', 'corners', 'ret'], {}), '(img, (9, 6), corners, ret)\n', (1794, 1821), False, 'import cv2\n'), ((1890, 1950), 'cv2.imwrite', 'cv2.imwrite', (["('./camera_cal/output_images/' + write_name)", 'img'], {}), "('./camera_cal/output_images/' + write_name, img)\n", (1901, 1950), False, 'import cv2\n'), ((2376, 2403), 'numpy.array', 'np.array', (['images_calibrated'], {}), '(images_calibrated)\n', (2384, 2403), True, 'import numpy as np\n')] |
### code from the notebook pycocoEvalDemo.ipynb
from pycocotools.coco import COCO
import cocoeval_modif
import numpy as np
import pandas as pd
from os.path import join
import logging
logging.basicConfig(format='%(levelname)s: %(filename)s L.%(lineno)d - %(message)s',
level=logging.INFO)
### Validation File:
# validFile = '/home/javier/CLEAR_IMAGE_AI/coco_validation_jsons/instances_val2014.json'
# validFile = '/home/javier/CLEAR_IMAGE_AI/coco_validation_jsons/person_keypoints_val2014.json'
### Predictions File:
# predictsFile = '/home/javier/cocoapi/results/instances_val2014_fakebbox100_results.json'
# predictsFile = '/home/javier/cocoapi/results/instances_val2014_fakesegm100_results.json'
# predictsFile = '/home/javier/cocoapi/results/person_keypoints_val2014_fakekeypoints100_results.json'
def main(predictsFile, validFile, annType, iouThrs=(0.5,0.75,0.05)):
"""evaluates from two json files (coco format). We could also do it on he fly, but since
the evaluation should be done with a bunch of images at the same time, this is probably
the first and main option to use.
The Step (third position) should be 0.05. Otherwise the AP might be = -1
This could probably be solved by changing the calculation of the IoU.
Nevertheless the mean calculation will only be done with the good values,
so it shouldn¡t affect much, just that some values will be missed in the
calculation of the mean.
It has been tested that this module works for any annotation type of:
annType = ['segm', 'bbox', 'keypoints']"""
cocoGt=COCO(validFile)
cocoDt=cocoGt.loadRes(predictsFile)
imgIds=sorted(cocoGt.getImgIds())
print("len imgs Ids", len(imgIds))
### added Javi: ... to avoid: IndexError: list index out of range on small datasets
min_imgs = min(len(imgIds), 100)
imgIds=imgIds[0:min_imgs]
imgId = imgIds[np.random.randint(min_imgs)]
# imgIds=imgIds[0:100]
# imgId = imgIds[np.random.randint(100)]
### running evaluation
cocoEval = cocoeval_modif.COCOeval(cocoGt, cocoDt, annType, iouThrs)
cocoEval.params.imgIds = imgIds
cocoEval.evaluate()
cocoEval.accumulate()
stats, stats_dicto = cocoEval.summarize()
print("\nstats: ", stats)
### ... return a list & dicto would be enough, but let's add some sugar:
df_AP = pd.DataFrame(stats_dicto['AP']).T
df_AR = pd.DataFrame(stats_dicto['AR']).T
### Show data frames in nice format:
try:
from tabulate import tabulate
df_AP['result'] = df_AP['result'].astype(float).round(3)
# df_AP_T = df_AP.T
pdtabulate=lambda df_AP:tabulate(df_AP, headers='keys', tablefmt='psql') # 'psql') # pipe
print("\nAP:")
print(pdtabulate(df_AP))
df_AR['result'] = df_AR['result'].astype(float).round(3)
# df_AR_T = df_AR.T
pdtabulate=lambda df_AR:tabulate(df_AR, headers='keys', tablefmt='psql') # 'psql') # pipe
print("\nAR:")
print(pdtabulate(df_AR))
except Exception as e:
print("error: \n", e)
print("\n We strongly recommend to pip install tabulate for visualizing pandas DataFrames in your linux terminal")
print("...")
print("\nAP DataFrame: \n", df_AP.T)
print("\nAR DataFrame: \n", df_AR.T)
print("\n[INFO]: For the moment we use 100 Max. Detects. for bbox and segmentation and 20 Max. Detects. for Keypoints Detection.")
print(" If you need something else you might have to change the code in 'evaluate.py' or in 'cocoeval_modified.py'\n")
# return the same results in 4 different formats (change it in the future, when we know which format is best)
return stats, stats_dicto, df_AP, df_AR
if __name__ == "__main__":
root_dir = "/media/javier/JaviHD/coco_dataset_2017/person_dog_coco/dataset"
predictsFile = join(root_dir, "output/coco_instances_results.json")
validFile = join(root_dir, "annotations/test.json")
# predictsFile = "/home/ubuntu/dataset/output/coco_instances_results.json"
#predictsFile = "/home/javier/cocoapi/results/person_keypoints_val2014_fakekeypoints100_results.json"
# validFile = "/home/ubuntu/dataset/annotations/test_mini.json"
#validFile = "/home/javier/CLEAR_IMAGE_AI/coco_validation_jsons/person_keypoints_val2014.json"
annType = 'bbox' # "keypoints"
#annType = "keypoints"
iouThrs = (0.5, 0.75, 0.05)
stats, stats_dicto, df_AP, df_AR = main(predictsFile, validFile, annType, iouThrs)
# print()
# logging.info(f"stats: {stats}\n")
# logging.info(f"stats_dicto: {stats_dicto}\n")
# logging.info(f"df_AP: {df_AP}\n")
# logging.info(f"df_AR: {df_AR}\n")
#########################################################################
# TO DO: #
# ------------ #
# #
# *** Return AP per Class! #
# /home/ubuntu/AutoTrainingPipeline/evaluation/cocoapi_ClearImageAI/ #
# ...cocoapi/PythonAPI/pycocotools/ #
# - cocoeval_modif.py #
# - evaluate.py #
# also check ~/anaconda3/envs/pytorch_p36/lib/python3.6/site-packages/ #
# ... detectron2/evaluation in detectron2 machine, #
# for more info and inspiration #
#########################################################################
| [
"logging.basicConfig",
"tabulate.tabulate",
"pycocotools.coco.COCO",
"os.path.join",
"numpy.random.randint",
"pandas.DataFrame",
"cocoeval_modif.COCOeval"
] | [((185, 301), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(levelname)s: %(filename)s L.%(lineno)d - %(message)s"""', 'level': 'logging.INFO'}), "(format=\n '%(levelname)s: %(filename)s L.%(lineno)d - %(message)s', level=\n logging.INFO)\n", (204, 301), False, 'import logging\n'), ((1572, 1587), 'pycocotools.coco.COCO', 'COCO', (['validFile'], {}), '(validFile)\n', (1576, 1587), False, 'from pycocotools.coco import COCO\n'), ((2024, 2081), 'cocoeval_modif.COCOeval', 'cocoeval_modif.COCOeval', (['cocoGt', 'cocoDt', 'annType', 'iouThrs'], {}), '(cocoGt, cocoDt, annType, iouThrs)\n', (2047, 2081), False, 'import cocoeval_modif\n'), ((3841, 3893), 'os.path.join', 'join', (['root_dir', '"""output/coco_instances_results.json"""'], {}), "(root_dir, 'output/coco_instances_results.json')\n", (3845, 3893), False, 'from os.path import join\n'), ((3910, 3949), 'os.path.join', 'join', (['root_dir', '"""annotations/test.json"""'], {}), "(root_dir, 'annotations/test.json')\n", (3914, 3949), False, 'from os.path import join\n'), ((1881, 1908), 'numpy.random.randint', 'np.random.randint', (['min_imgs'], {}), '(min_imgs)\n', (1898, 1908), True, 'import numpy as np\n'), ((2334, 2365), 'pandas.DataFrame', 'pd.DataFrame', (["stats_dicto['AP']"], {}), "(stats_dicto['AP'])\n", (2346, 2365), True, 'import pandas as pd\n'), ((2380, 2411), 'pandas.DataFrame', 'pd.DataFrame', (["stats_dicto['AR']"], {}), "(stats_dicto['AR'])\n", (2392, 2411), True, 'import pandas as pd\n'), ((2627, 2675), 'tabulate.tabulate', 'tabulate', (['df_AP'], {'headers': '"""keys"""', 'tablefmt': '"""psql"""'}), "(df_AP, headers='keys', tablefmt='psql')\n", (2635, 2675), False, 'from tabulate import tabulate\n'), ((2876, 2924), 'tabulate.tabulate', 'tabulate', (['df_AR'], {'headers': '"""keys"""', 'tablefmt': '"""psql"""'}), "(df_AR, headers='keys', tablefmt='psql')\n", (2884, 2924), False, 'from tabulate import tabulate\n')] |
"""
datasets used for integration testing
This is just a collection of useful datasets which seemed useful for testing
purposes. The classes in here should not be relied upon in production mode.
"""
import numpy as np
import vigra
from tsdl.tools import OpArrayPiperWithAccessCount
from tsdl.tools import OpReorderAxes
from tsdl.tools import OutputSlot
from tsdl.tools import Classification
from tsdl.tools import Regression
from .rk4 import default_mackey_glass_series
TAU = 2*np.pi
MAX_SEED = 4294967295
# pylint seems to be somewhat broken regarding mixins
# pylint: disable=C0103
# pylint: disable=C0111
class _BaseDataset(OpArrayPiperWithAccessCount):
"""
base class for all integration datasets
"""
@classmethod
def build(cls, d, graph=None, parent=None, workingdir=None):
rng = np.random.RandomState(hash(cls.__name__) % MAX_SEED)
op = super(_BaseDataset, cls).build(d, graph=graph, parent=parent,
workingdir=workingdir)
data = op.create_dataset(d, rng)
op.Input.setValue(data)
return op
@classmethod
def get_default_config(cls):
config = super(_BaseDataset, cls).get_default_config()
config["shape"] = (10000,)
return config
def create_dataset(self, config, rng):
"""
create a dataset with given config and RNG
overridden in subclasses
"""
raise NotImplementedError()
class OpNoisySine(_BaseDataset):
"""
a sine curve with added noise
"""
def create_dataset(self, config, rng):
num_examples = self._shape[0]
num_periods = 99.9
data = np.linspace(0, num_periods*TAU, num_examples)
data = (np.sin(data) + 1) / 2
noise = rng.normal(loc=0, scale=.02, size=(num_examples,))
data += noise
data = vigra.taggedView(data, axistags="t")
return data
class OpShuffledLinspace(_BaseDataset):
"""
equally spaced data from [0, 1], shuffled
"""
def create_dataset(self, config, rng):
data = np.linspace(0, 1, self._shape[0])
data = data[rng.permutation(len(data))]
data = vigra.taggedView(data, axistags='t')
data = data.astype(np.float32)
return data
class OpFeatures(OpReorderAxes):
"""
pass on features with added Valid slot
"""
Valid = OutputSlot()
@classmethod
def build(cls, *args, **kwargs):
op = super(OpFeatures, cls).build(*args, **kwargs)
op.AxisOrder.setValue('tc')
return op
def setupOutputs(self):
super(OpFeatures, self).setupOutputs()
# self.Output.meta.dtype = self.Input.meta.dtype
self.Valid.meta.shape = self.Output.meta.shape[:1]
self.Valid.meta.axistags = vigra.defaultAxistags('t')
self.Valid.meta.dtype = np.uint8
def execute(self, slot, subindex, roi, result):
if slot is not self.Valid:
super(OpFeatures, self).execute(slot, subindex, roi, result)
else:
result[:] = 1
class OpTarget(Classification, OpArrayPiperWithAccessCount):
"""
basic classification target
"""
Valid = OutputSlot()
def setupOutputs(self):
assert len(self.Input.meta.shape) == 1
self.Output.meta.shape = (self.Input.meta.shape[0], 2)
self.Output.meta.dtype = np.float32
self.Output.meta.axistags = vigra.defaultAxistags('tc')
self.Valid.meta.shape = self.Output.meta.shape[:1]
self.Valid.meta.axistags = vigra.defaultAxistags('t')
self.Valid.meta.dtype = np.uint8
def execute(self, slot, subindex, roi, result):
if slot is not self.Valid:
data = self.Input[roi.start[0]:roi.stop[0]].wait()
for i, channel in enumerate(range(roi.start[1], roi.stop[1])):
result[:, i] = np.where(data > .499, channel, 1-channel)
else:
result[:] = 1
class OpRegTarget(Regression, OpArrayPiperWithAccessCount):
"""
basic regression target
"""
Valid = OutputSlot()
def setupOutputs(self):
# assert len(self.Input.meta.shape) == 1
self.Output.meta.shape = (self.Input.meta.shape[0], 1)
self.Output.meta.dtype = np.float32
self.Output.meta.axistags = vigra.defaultAxistags('tc')
self.Valid.meta.shape = self.Output.meta.shape[:1]
self.Valid.meta.axistags = vigra.defaultAxistags('t')
self.Valid.meta.dtype = np.uint8
def execute(self, slot, subindex, roi, result):
if slot is self.Valid:
result[:] = 1
return
data = self.Input[roi.start[0]:roi.stop[0]].wait()
result[:, 0] = 1 - data
class OpRandomUnitSquare(_BaseDataset):
"""
random data from the (2D) unit square
"""
@classmethod
def get_default_config(cls):
config = _BaseDataset.get_default_config()
config["shape"] = (10000, 2)
return config
def create_dataset(self, config, rng):
data = rng.rand(*self._shape)
data = vigra.taggedView(data, axistags="tc")
return data
class OpRandomCorners(OpRandomUnitSquare):
"""
random data clustered in the unit square corners
"""
def create_dataset(self, config, rng):
noise = (rng.rand(*self._shape)-.5)*.2
data = np.random.choice([.1, .9], size=self._shape)
data += noise
data = vigra.taggedView(data, axistags="tc")
return data
class OpXORTarget(OpRegTarget):
"""
The result of (kinda) XORing channel 0 and 1
xor_cont(a, b) := 1 - (1 - a - b)^2
"""
def execute(self, slot, subindex, roi, result):
if slot is self.Valid:
result[:] = 1
return
data = self.Input[roi.start[0]:roi.stop[0], :].wait()
result[:, 0] = 1 - np.square(1 - data.sum(axis=1))
class OpNormTarget(OpRegTarget):
"""
euclidean norm of features
"""
def execute(self, slot, subindex, roi, result):
if slot is self.Valid:
result[:] = 1
return
data = self.Input[roi.start[0]:roi.stop[0]].wait()
result[:, 0] = np.sqrt(np.square(data).sum(axis=1)/2.0)
class OpMackeyGlass(_BaseDataset):
"""
dataset from Mackey-Glass function
"""
def create_dataset(self, config, rng):
data = default_mackey_glass_series()
upper = data.max()
lower = data.min()
data = (data - lower)/(upper - lower)
data = vigra.taggedView(data, axistags="tc").withAxes('t')
return data
| [
"numpy.random.choice",
"numpy.where",
"vigra.defaultAxistags",
"tsdl.tools.OutputSlot",
"numpy.square",
"numpy.linspace",
"numpy.sin",
"vigra.taggedView"
] | [((2385, 2397), 'tsdl.tools.OutputSlot', 'OutputSlot', ([], {}), '()\n', (2395, 2397), False, 'from tsdl.tools import OutputSlot\n'), ((3185, 3197), 'tsdl.tools.OutputSlot', 'OutputSlot', ([], {}), '()\n', (3195, 3197), False, 'from tsdl.tools import OutputSlot\n'), ((4064, 4076), 'tsdl.tools.OutputSlot', 'OutputSlot', ([], {}), '()\n', (4074, 4076), False, 'from tsdl.tools import OutputSlot\n'), ((1678, 1725), 'numpy.linspace', 'np.linspace', (['(0)', '(num_periods * TAU)', 'num_examples'], {}), '(0, num_periods * TAU, num_examples)\n', (1689, 1725), True, 'import numpy as np\n'), ((1866, 1902), 'vigra.taggedView', 'vigra.taggedView', (['data'], {'axistags': '"""t"""'}), "(data, axistags='t')\n", (1882, 1902), False, 'import vigra\n'), ((2086, 2119), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'self._shape[0]'], {}), '(0, 1, self._shape[0])\n', (2097, 2119), True, 'import numpy as np\n'), ((2183, 2219), 'vigra.taggedView', 'vigra.taggedView', (['data'], {'axistags': '"""t"""'}), "(data, axistags='t')\n", (2199, 2219), False, 'import vigra\n'), ((2793, 2819), 'vigra.defaultAxistags', 'vigra.defaultAxistags', (['"""t"""'], {}), "('t')\n", (2814, 2819), False, 'import vigra\n'), ((3417, 3444), 'vigra.defaultAxistags', 'vigra.defaultAxistags', (['"""tc"""'], {}), "('tc')\n", (3438, 3444), False, 'import vigra\n'), ((3539, 3565), 'vigra.defaultAxistags', 'vigra.defaultAxistags', (['"""t"""'], {}), "('t')\n", (3560, 3565), False, 'import vigra\n'), ((4298, 4325), 'vigra.defaultAxistags', 'vigra.defaultAxistags', (['"""tc"""'], {}), "('tc')\n", (4319, 4325), False, 'import vigra\n'), ((4420, 4446), 'vigra.defaultAxistags', 'vigra.defaultAxistags', (['"""t"""'], {}), "('t')\n", (4441, 4446), False, 'import vigra\n'), ((5065, 5102), 'vigra.taggedView', 'vigra.taggedView', (['data'], {'axistags': '"""tc"""'}), "(data, axistags='tc')\n", (5081, 5102), False, 'import vigra\n'), ((5343, 5389), 'numpy.random.choice', 'np.random.choice', (['[0.1, 0.9]'], {'size': 'self._shape'}), '([0.1, 0.9], size=self._shape)\n', (5359, 5389), True, 'import numpy as np\n'), ((5425, 5462), 'vigra.taggedView', 'vigra.taggedView', (['data'], {'axistags': '"""tc"""'}), "(data, axistags='tc')\n", (5441, 5462), False, 'import vigra\n'), ((1740, 1752), 'numpy.sin', 'np.sin', (['data'], {}), '(data)\n', (1746, 1752), True, 'import numpy as np\n'), ((3864, 3908), 'numpy.where', 'np.where', (['(data > 0.499)', 'channel', '(1 - channel)'], {}), '(data > 0.499, channel, 1 - channel)\n', (3872, 3908), True, 'import numpy as np\n'), ((6503, 6540), 'vigra.taggedView', 'vigra.taggedView', (['data'], {'axistags': '"""tc"""'}), "(data, axistags='tc')\n", (6519, 6540), False, 'import vigra\n'), ((6174, 6189), 'numpy.square', 'np.square', (['data'], {}), '(data)\n', (6183, 6189), True, 'import numpy as np\n')] |
import lsh_partition
import models
import torch
import pandas as pd
import numpy as np
import math
from distributed_rep import embeding
from models import core
from lsh_partition import lsh
from torch import nn
import time
class MatchingModel():
def __init__(self, embeding_style, embeding_src, schema, train_src=None, eval_src=None, prediction_src=None, args=None, model_pt=None, gt_src=None):
self.args = args
self.loss_reg = nn.SmoothL1Loss()
self.loss_cls = nn.CrossEntropyLoss()
self.prediction_src = prediction_src
self.model_pt = model_pt
self.eb_model = embeding.FastTextEmbeding(source_pt=embeding_source)
self.schema = schema
self.gt_src = gt_src
self.gt = pd.read_csv(gt_src)
if(embeding_style=="fasttext-avg"):
self.model = core.ERModel()
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.args['LR']) # optimize all parameters
self.dm_data_set_train = core.DMFormatDataset(train_src, self.eb_model, 'avg', schema=schema)
self.dm_data_set_eval = core.DMFormatDataset(eval_src, self.eb_model, 'avg', schema=schema)
def run_train_reg(self):
for epoch in range(self.args['EPOCH']):
tp = 0
count =0
self.optimizer.zero_grad()
for step, (x, y) in enumerate(self.dm_data_set_train):
out = self.model.forward(x)
y = torch.reshape(y, (-1,1))
#print(out, y)
loss = self.loss_reg(out, y)
if step % self.args['BATCH_SIZE'] == 0:
loss.backward() # backpropagation, compute gradients
self.optimizer.step() # apply gradients
if step % 100 == 0:
print("loss: ", loss)
self.optimizer.zero_grad() # clear gradients for this training step
tp = 0
count = 0
self.run_eval(0.1)
sm = torch.jit.script(self.model)
print("save: ", self.model_pt)
sm.save(self.model_pt)
return
def run_train_cls(self):
for epoch in range(self.args['EPOCH']):
tp = 0
count =0
self.optimizer.zero_grad()
for step, (x, y) in enumerate(self.dm_data_set_train):
out = self.model.forward(x)
loss = self.loss_cls(out, y)
if step % self.args['BATCH_SIZE'] == 0:
loss.backward() # backpropagation, compute gradients
self.optimizer.step() # apply gradients
if step % 256 == 0:
print("Epoch: ", epoch, " - loss: ", loss)
self.optimizer.zero_grad() # clear gradients for this training step
tp = 0
count = 0
self.run_eval(eva_model='cls')
sm = torch.jit.script(self.model)
print("save: ", self.model_pt)
sm.save(self.model_pt)
return
def run_eval(self, tau=None, eva_model='reg'):
tp = 0
if eva_model=='reg':
for step, (x, y) in enumerate(self.dm_data_set_eval):
out = self.model.forward(x)
y = torch.reshprediction_l_src.numpy()
y = y.numpy()
dis_ = x-y
dis_ = np.reshape(dis_,-1)[0]
dis_ = abs(dis_)
if(dis_ < tau):
tp += 1
print("prec: ", tp / self.dm_data_set_eval.length)
elif eva_model =='cls':
for step, (x, y) in enumerate(self.dm_data_set_eval):
out = self.model.forward(x)
out = torch.max(out, 1)[1].data.numpy()
print(out)
if(out[0] == y[0]):
tp += 1
print("prec: ", tp/self.dm_data_set_eval.length)
return
def run_test(self):
model = torch.jit.load(self.model_pt)
if(type(self.prediction_src == tuple)):
print(type(self.prediction_src))
print(self.prediction_src)
left_ = self.prediction_src[0]
right_ = self.prediction_src[1]
self.left_ = pd.read_csv(left_)
self.right_ = pd.read_csv(right_)
lsh_ = lsh.LSH(num_hash_func=1, num_hash_table=1, data_l=self.left_, data_r=self.right_, schema=self.schema, embeding_model=self.eb_model)
lsh_.index()
lsh_.show_hash_table()
result = []
for table_id in range(lsh_.num_hash_table):
table_ = lsh_.get_table(table_id)
for bucket_id in table_:
for key_l in table_[bucket_id]:
eb_l = np.reshape(table_[bucket_id][key_l], (-1, 4, 100))
for key_r in table_[bucket_id]:
if(key_l[0]== 'R' and key_r[0]== 'L'):
continue
if (key_l[0] == key_r[0]):
continue
else:
eb_r = np.reshape(table_[bucket_id][key_r], (-1,4,100))
eb_ = np.concatenate((eb_l, eb_r), axis=0)
eb_ = torch.tensor(eb_, dtype=torch.float32)
out = model(eb_)
pred_y = torch.max(out, 1)[1].data.numpy()
#print("TEST:", "pred", pred_y, key_l, key_r, eb_)
if(pred_y == [0]):
continue
else:
print("pred: ", pred_y[0], key_l[2:], key_r[2:])
result.append([ key_l[2:], key_r[2:]])
print(len(result))
self.get_f1(result)
return
def run_test_full_data(self):
model = torch.jit.load(self.model_pt)
if(type(self.prediction_src == tuple)):
print(self.prediction_src)
left_ = self.prediction_src[0]
right_ = self.prediction_src[1]
self.left_ = pd.read_csv(left_)
self.right_ = pd.read_csv(right_)
result = []
for index_l, row_l in self.left_.iterrows():
for index_r, row_r in self.right_.iterrows():
eb_l = []
eb_r = []
for attr in self.schema:
t_l = row_l[attr]
t_r = row_r[attr]
eb_l.append(self.eb_model.avg_embeding(str(t_l)))
eb_r.append(self.eb_model.avg_embeding(str(t_r)))
eb_l = np.reshape(eb_l, (-1,4,100))
eb_r = np.reshape(eb_r, (-1,4,100))
eb_ = np.concatenate((eb_l, eb_r), axis=0)
eb_ = torch.tensor(eb_, dtype=torch.float32)
out = model(eb_)
pred_y = torch.max(out, 1)[1].data.numpy()
print("FULL:","pred", pred_y, row_l["id"], row_r["id"], eb_)
return
def run_prediction(self, tuple_l, tuple_r,schema):
model = torch.jit.load(self.model_pt)
eb_l = []
eb_r = []
for attr in schema:
attr_l = "left_"+attr
attr_r = "right_"+attr
t_l = tuple_l[attr_l]
t_r = tuple_r[attr_r]
eb_l.append(self.eb_model.avg_embeding(str(t_l)))
eb_r.append(self.eb_model.avg_embeding(str(t_r)))
eb_l = np.reshape(eb_l, (-1, 4, 100))
eb_r = np.reshape(eb_r, (-1, 4, 100))
eb_ = np.concatenate((eb_l, eb_r), axis=0)
eb_ = torch.tensor(eb_, dtype=torch.float32)
out = model(eb_)
pred_y = torch.max(out, 1)[1].data.numpy()
print("pred: ", pred_y[0])
if pred_y ==[0]:
return False
else:
return True
def get_f1(self,result):
tp = 0
self.gt = pd.read_csv(self.gt_src)
print(self.gt)
for row in result:
data_slice = self.gt.loc[self.gt['idDBLP'] == row[0]]
data_slice = data_slice.loc[data_slice['idACM'] == int(row[1])]
if(data_slice.empty):
continue
else:
tp+=1
print(tp/len(self.gt))
return
if __name__ == '__main__':
schema = ['title', 'authors', 'venue', 'year']
embeding_source = '/home/LAB/zhuxk/project/REENet/models/embeding/dblp_acm.bin'
train_src = "/home/LAB/zhuxk/project/data/ER-dataset-benchmark/ER/DBLP-ACM/train_balance.csv"
eval_src = "/home/LAB/zhuxk/project/data/ER-dataset-benchmark/ER/DBLP-ACM/train_balance.csv"
model_pt = "/home/LAB/zhuxk/project/DeepER/models/DBLP_ACM_classification.py"
prediction_l_src = "/home/LAB/zhuxk/project/data/ER-dataset-benchmark/ER/DBLP-ACM/DBLP2.csv"
prediction_r_src = "/home/LAB/zhuxk/project/data/ER-dataset-benchmark/ER/DBLP-ACM/ACM.csv"
gt_src = "/home/LAB/zhuxk/project/data/ER-dataset-benchmark/ER/DBLP-ACM/DBLP-ACM_perfectMapping.csv"
args = {
"EPOCH":10,
"BATCH_SIZE":16,
"LR":0.0003
}
model = MatchingModel("fasttext-avg",
embeding_source,
schema,
train_src = train_src,
eval_src = eval_src,
args=args,
model_pt=model_pt,
prediction_src = (prediction_l_src, prediction_r_src),
gt_src = gt_src
)
time_start=time.time()
model.run_train_cls()
# data = pd.read_csv(eval_src)
# tuple_l = data[['left_title','left_authors','left_venue', 'left_year']]
# tuple_r = data[['right_title','right_authors','right_venue', 'right_year']]
# tuple_l = tuple_l.iloc[3100]
# tuple_r = tuple_r.iloc[3100]
#print("TTTT:", tuple_l, "||", tuple_r)
#model.run_test_full_data()
#model.run_test()
#model.run_prediction(tuple_l, tuple_r, schema)
time_end=time.time()
print('time cost',time_end-time_start,'s') | [
"torch.jit.script",
"models.core.DMFormatDataset",
"numpy.reshape",
"torch.nn.CrossEntropyLoss",
"pandas.read_csv",
"torch.jit.load",
"torch.max",
"lsh_partition.lsh.LSH",
"torch.reshape",
"torch.reshprediction_l_src.numpy",
"torch.tensor",
"models.core.ERModel",
"numpy.concatenate",
"dist... | [((9655, 9666), 'time.time', 'time.time', ([], {}), '()\n', (9664, 9666), False, 'import time\n'), ((10127, 10138), 'time.time', 'time.time', ([], {}), '()\n', (10136, 10138), False, 'import time\n'), ((449, 466), 'torch.nn.SmoothL1Loss', 'nn.SmoothL1Loss', ([], {}), '()\n', (464, 466), False, 'from torch import nn\n'), ((491, 512), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (510, 512), False, 'from torch import nn\n'), ((616, 668), 'distributed_rep.embeding.FastTextEmbeding', 'embeding.FastTextEmbeding', ([], {'source_pt': 'embeding_source'}), '(source_pt=embeding_source)\n', (641, 668), False, 'from distributed_rep import embeding\n'), ((745, 764), 'pandas.read_csv', 'pd.read_csv', (['gt_src'], {}), '(gt_src)\n', (756, 764), True, 'import pandas as pd\n'), ((2077, 2105), 'torch.jit.script', 'torch.jit.script', (['self.model'], {}), '(self.model)\n', (2093, 2105), False, 'import torch\n'), ((3034, 3062), 'torch.jit.script', 'torch.jit.script', (['self.model'], {}), '(self.model)\n', (3050, 3062), False, 'import torch\n'), ((4071, 4100), 'torch.jit.load', 'torch.jit.load', (['self.model_pt'], {}), '(self.model_pt)\n', (4085, 4100), False, 'import torch\n'), ((4343, 4361), 'pandas.read_csv', 'pd.read_csv', (['left_'], {}), '(left_)\n', (4354, 4361), True, 'import pandas as pd\n'), ((4385, 4404), 'pandas.read_csv', 'pd.read_csv', (['right_'], {}), '(right_)\n', (4396, 4404), True, 'import pandas as pd\n'), ((4420, 4556), 'lsh_partition.lsh.LSH', 'lsh.LSH', ([], {'num_hash_func': '(1)', 'num_hash_table': '(1)', 'data_l': 'self.left_', 'data_r': 'self.right_', 'schema': 'self.schema', 'embeding_model': 'self.eb_model'}), '(num_hash_func=1, num_hash_table=1, data_l=self.left_, data_r=self.\n right_, schema=self.schema, embeding_model=self.eb_model)\n', (4427, 4556), False, 'from lsh_partition import lsh\n'), ((5974, 6003), 'torch.jit.load', 'torch.jit.load', (['self.model_pt'], {}), '(self.model_pt)\n', (5988, 6003), False, 'import torch\n'), ((6201, 6219), 'pandas.read_csv', 'pd.read_csv', (['left_'], {}), '(left_)\n', (6212, 6219), True, 'import pandas as pd\n'), ((6243, 6262), 'pandas.read_csv', 'pd.read_csv', (['right_'], {}), '(right_)\n', (6254, 6262), True, 'import pandas as pd\n'), ((7183, 7212), 'torch.jit.load', 'torch.jit.load', (['self.model_pt'], {}), '(self.model_pt)\n', (7197, 7212), False, 'import torch\n'), ((7563, 7593), 'numpy.reshape', 'np.reshape', (['eb_l', '(-1, 4, 100)'], {}), '(eb_l, (-1, 4, 100))\n', (7573, 7593), True, 'import numpy as np\n'), ((7609, 7639), 'numpy.reshape', 'np.reshape', (['eb_r', '(-1, 4, 100)'], {}), '(eb_r, (-1, 4, 100))\n', (7619, 7639), True, 'import numpy as np\n'), ((7655, 7691), 'numpy.concatenate', 'np.concatenate', (['(eb_l, eb_r)'], {'axis': '(0)'}), '((eb_l, eb_r), axis=0)\n', (7669, 7691), True, 'import numpy as np\n'), ((7706, 7744), 'torch.tensor', 'torch.tensor', (['eb_'], {'dtype': 'torch.float32'}), '(eb_, dtype=torch.float32)\n', (7718, 7744), False, 'import torch\n'), ((8009, 8033), 'pandas.read_csv', 'pd.read_csv', (['self.gt_src'], {}), '(self.gt_src)\n', (8020, 8033), True, 'import pandas as pd\n'), ((834, 848), 'models.core.ERModel', 'core.ERModel', ([], {}), '()\n', (846, 848), False, 'from models import core\n'), ((1005, 1073), 'models.core.DMFormatDataset', 'core.DMFormatDataset', (['train_src', 'self.eb_model', '"""avg"""'], {'schema': 'schema'}), "(train_src, self.eb_model, 'avg', schema=schema)\n", (1025, 1073), False, 'from models import core\n'), ((1110, 1177), 'models.core.DMFormatDataset', 'core.DMFormatDataset', (['eval_src', 'self.eb_model', '"""avg"""'], {'schema': 'schema'}), "(eval_src, self.eb_model, 'avg', schema=schema)\n", (1130, 1177), False, 'from models import core\n'), ((1479, 1504), 'torch.reshape', 'torch.reshape', (['y', '(-1, 1)'], {}), '(y, (-1, 1))\n', (1492, 1504), False, 'import torch\n'), ((3375, 3409), 'torch.reshprediction_l_src.numpy', 'torch.reshprediction_l_src.numpy', ([], {}), '()\n', (3407, 3409), False, 'import torch\n'), ((6726, 6756), 'numpy.reshape', 'np.reshape', (['eb_l', '(-1, 4, 100)'], {}), '(eb_l, (-1, 4, 100))\n', (6736, 6756), True, 'import numpy as np\n'), ((6778, 6808), 'numpy.reshape', 'np.reshape', (['eb_r', '(-1, 4, 100)'], {}), '(eb_r, (-1, 4, 100))\n', (6788, 6808), True, 'import numpy as np\n'), ((6829, 6865), 'numpy.concatenate', 'np.concatenate', (['(eb_l, eb_r)'], {'axis': '(0)'}), '((eb_l, eb_r), axis=0)\n', (6843, 6865), True, 'import numpy as np\n'), ((6888, 6926), 'torch.tensor', 'torch.tensor', (['eb_'], {'dtype': 'torch.float32'}), '(eb_, dtype=torch.float32)\n', (6900, 6926), False, 'import torch\n'), ((3490, 3510), 'numpy.reshape', 'np.reshape', (['dis_', '(-1)'], {}), '(dis_, -1)\n', (3500, 3510), True, 'import numpy as np\n'), ((4835, 4885), 'numpy.reshape', 'np.reshape', (['table_[bucket_id][key_l]', '(-1, 4, 100)'], {}), '(table_[bucket_id][key_l], (-1, 4, 100))\n', (4845, 4885), True, 'import numpy as np\n'), ((7788, 7805), 'torch.max', 'torch.max', (['out', '(1)'], {}), '(out, 1)\n', (7797, 7805), False, 'import torch\n'), ((5191, 5241), 'numpy.reshape', 'np.reshape', (['table_[bucket_id][key_r]', '(-1, 4, 100)'], {}), '(table_[bucket_id][key_r], (-1, 4, 100))\n', (5201, 5241), True, 'import numpy as np\n'), ((5274, 5310), 'numpy.concatenate', 'np.concatenate', (['(eb_l, eb_r)'], {'axis': '(0)'}), '((eb_l, eb_r), axis=0)\n', (5288, 5310), True, 'import numpy as np\n'), ((5345, 5383), 'torch.tensor', 'torch.tensor', (['eb_'], {'dtype': 'torch.float32'}), '(eb_, dtype=torch.float32)\n', (5357, 5383), False, 'import torch\n'), ((6985, 7002), 'torch.max', 'torch.max', (['out', '(1)'], {}), '(out, 1)\n', (6994, 7002), False, 'import torch\n'), ((3833, 3850), 'torch.max', 'torch.max', (['out', '(1)'], {}), '(out, 1)\n', (3842, 3850), False, 'import torch\n'), ((5466, 5483), 'torch.max', 'torch.max', (['out', '(1)'], {}), '(out, 1)\n', (5475, 5483), False, 'import torch\n')] |
"""
This module handles the selection of image regions, the second stage of
processing.
"""
import re
from typing import Callable, Any, Sequence, Tuple
import numpy as np
import cv2
import matplotlib.pyplot as plt
import pytesseract
from fuzzywuzzy import process
from .text_row_extractors import RowExtraction
def pad_array(
image: "npt.NDArray",
pad_amount: int = 50,
whitespace_element: Any = 255
):
whitespace_line = np.repeat(whitespace_element, image.shape[1])
whitespace_line = np.array(whitespace_line, dtype=np.uint8)
return np.concatenate((
[whitespace_line] * pad_amount,
image,
[whitespace_line] * pad_amount
))
def pad_then_extract(
image: "npt.NDArray",
image_to_text_func: """Callable[
[npt.NDArray], str
]""" = pytesseract.image_to_string,
pad_amount: int = 50,
whitespace_element: Any = 255
):
return image_to_text_func(pad_array(image, pad_amount, whitespace_element))
class RowFilter:
"""
Filters regions from some RowExtraction by extracting text and applying
some predicate.
"""
def __init__(
self,
region_predicate: """Callable[
[npt.NDArray], bool
]"""
):
self._region_predicate = region_predicate
@property
def region_predicate(self) -> "Callable[[npt.NDArray], str]":
return self._region_predicate
@region_predicate.setter
def region_predicate(
self,
region_predicate: "Callable[[npt.NDArray], str]"
):
self._region_predicate = region_predicate
def filter_extraction(
self, extraction_obj: RowExtraction) -> RowExtraction:
"""
Return a RowExtraction obj with rows taken from the passed
RowExtraction obj, and matching the regular expression held by
this object.
"""
matching_indices = []
for index, row in enumerate(extraction_obj.rows):
if (self._region_predicate(row, index)):
matching_indices.append(index)
return RowExtraction(
extraction_obj.rows[matching_indices],
extraction_obj.row_indices[matching_indices]
)
class TextMatcher:
"""
Uses fuzzywuzzy to match a text from a passed image (in the form of a numpy
array) to a string from a set of predefined strings.
"""
def __init__(
self,
known_lines: Sequence[str],
regexes: Sequence[str],
image_to_text_func: """Callable[
[npt.NDArray], str
]""" = pad_then_extract
):
self._known_lines = known_lines
self._regexes = regexes
self._matches = [] # Store matches for convenience
self._image_to_text_func = image_to_text_func # For testing
def __call__(self, row: "[npt.NDArray]", index: int) -> bool:
"""
Extract the text from a numpy array representing an image and test
the best match to that string from this objects known_lines attribute
using Levenshtein distance against this objects regexes attribute.
"""
row_text = self._image_to_text_func(row)
if (row_text): # '' gives annoying fuzzywuzzy warnings
match = process.extractOne(row_text, self._known_lines)
self._matches.append((row_text, match[0]))
return any(
map(lambda regex: re.match(regex, match[0]), self._regexes)
)
else:
self._matches.append((row_text, "<NO MATCH FOUND>"))
return False
@property
def regexes(self) -> str:
return self._regexes
@regexes.setter
def regexes(self, regexes: str):
self._regexes = regexes
@property
def image_to_text_func(self) -> "Callable[[npt.NDArray], str]":
return self._image_to_text_func
@image_to_text_func.setter
def image_to_text_func(
self,
image_to_text_func: "Callable[[npt.NDArray], str]",
):
self._image_to_text_func = image_to_text_func
@property
def known_lines(self) -> Sequence[str]:
return self._known_lines
@property
def matches(self) -> Sequence[Tuple[str, str]]:
return self._matches
@known_lines.setter
def known_lines(self, known_lines: Sequence[str]):
self._known_lines = known_lines
@classmethod
def from_array(
cls,
image_as_array: "npt.NDArray",
regexes,
image_to_text_func: """Callable[
[npt.NDArray], str
]""" = pad_then_extract
) -> "TextMatcher":
extraction = pytesseract.image_to_string(image_as_array)
lines_of_text = str(extraction).splitlines()
return cls(lines_of_text, regexes, image_to_text_func)
class InteractiveMatcher:
"""
Class which allows users to select regions to add whitespace to by showing
them the regions.
"""
def __init__(
self,
all_rows: RowExtraction,
previous_regions_preview: int = 5,
show_previous_regions: bool = True
):
self._all_rows = all_rows
self._previous_regions_preview = previous_regions_preview
self._show_previous_regions = show_previous_regions
self._button_press = None
def __call__(self, row: "[npt.NDArray]", index: int) -> bool:
"""
Show the region to the user in a matplotlib figure and let them choose
whether to accept the region or not via a keypress.
"""
# First of all show the preview if appropriate
show_preview = self._show_previous_regions and index != 0
if (show_preview):
self.show_preview(index)
plt.subplot(2, 1, 2 if show_preview else 1)
plt.title("Current Region", fontweight="bold")
plt.xlabel("Input (y/n) to add whitespace before the region")
plt.connect('key_press_event', self.key_pressed)
plt.imshow(row, cmap="gray")
# Wait for keypress
while self._button_press not in ("y", "n"):
plt.waitforbuttonpress()
press, self._button_press = self._button_press, None
plt.close()
return press == "y"
def show_preview(self, row_index: int):
preview_length = min(self._previous_regions_preview, row_index)
preview_slice = slice(row_index - preview_length, row_index)
preview = cv2.vconcat([
pad_array(region, pad_amount=20)
for region in self._all_rows.rows[preview_slice]
])
plt.subplot(2, 1, 1)
plt.title("Previous Regions", fontweight="bold")
plt.imshow(preview, cmap="gray")
def key_pressed(self, event):
self._button_press = event.key
@property
def previous_regions_preview(self) -> int:
return self._previous_regions_preview
@previous_regions_preview.setter
def regexes(self, previous_regions_preview: int):
self._previous_regions_preview = previous_regions_preview
@property
def all_rows(self) -> RowExtraction:
return self._all_rows
@property
def show_previous_regions(self) -> bool:
return self._show_previous_regions
| [
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.waitforbuttonpress",
"numpy.repeat",
"matplotlib.pyplot.connect",
"matplotlib.pyplot.xlabel",
"re.match",
"matplotlib.pyplot.close",
"numpy.array",
"fuzzywuzzy.process.extractOne",
"numpy.concatenate",
"pytesseract.image_to_string",
"matplotlib.py... | [((454, 499), 'numpy.repeat', 'np.repeat', (['whitespace_element', 'image.shape[1]'], {}), '(whitespace_element, image.shape[1])\n', (463, 499), True, 'import numpy as np\n'), ((522, 563), 'numpy.array', 'np.array', (['whitespace_line'], {'dtype': 'np.uint8'}), '(whitespace_line, dtype=np.uint8)\n', (530, 563), True, 'import numpy as np\n'), ((575, 666), 'numpy.concatenate', 'np.concatenate', (['([whitespace_line] * pad_amount, image, [whitespace_line] * pad_amount)'], {}), '(([whitespace_line] * pad_amount, image, [whitespace_line] *\n pad_amount))\n', (589, 666), True, 'import numpy as np\n'), ((4723, 4766), 'pytesseract.image_to_string', 'pytesseract.image_to_string', (['image_as_array'], {}), '(image_as_array)\n', (4750, 4766), False, 'import pytesseract\n'), ((5821, 5864), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2 if show_preview else 1)'], {}), '(2, 1, 2 if show_preview else 1)\n', (5832, 5864), True, 'import matplotlib.pyplot as plt\n'), ((5873, 5919), 'matplotlib.pyplot.title', 'plt.title', (['"""Current Region"""'], {'fontweight': '"""bold"""'}), "('Current Region', fontweight='bold')\n", (5882, 5919), True, 'import matplotlib.pyplot as plt\n'), ((5928, 5989), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Input (y/n) to add whitespace before the region"""'], {}), "('Input (y/n) to add whitespace before the region')\n", (5938, 5989), True, 'import matplotlib.pyplot as plt\n'), ((5998, 6046), 'matplotlib.pyplot.connect', 'plt.connect', (['"""key_press_event"""', 'self.key_pressed'], {}), "('key_press_event', self.key_pressed)\n", (6009, 6046), True, 'import matplotlib.pyplot as plt\n'), ((6055, 6083), 'matplotlib.pyplot.imshow', 'plt.imshow', (['row'], {'cmap': '"""gray"""'}), "(row, cmap='gray')\n", (6065, 6083), True, 'import matplotlib.pyplot as plt\n'), ((6271, 6282), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6280, 6282), True, 'import matplotlib.pyplot as plt\n'), ((6655, 6675), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (6666, 6675), True, 'import matplotlib.pyplot as plt\n'), ((6684, 6732), 'matplotlib.pyplot.title', 'plt.title', (['"""Previous Regions"""'], {'fontweight': '"""bold"""'}), "('Previous Regions', fontweight='bold')\n", (6693, 6732), True, 'import matplotlib.pyplot as plt\n'), ((6741, 6773), 'matplotlib.pyplot.imshow', 'plt.imshow', (['preview'], {'cmap': '"""gray"""'}), "(preview, cmap='gray')\n", (6751, 6773), True, 'import matplotlib.pyplot as plt\n'), ((3323, 3370), 'fuzzywuzzy.process.extractOne', 'process.extractOne', (['row_text', 'self._known_lines'], {}), '(row_text, self._known_lines)\n', (3341, 3370), False, 'from fuzzywuzzy import process\n'), ((6177, 6201), 'matplotlib.pyplot.waitforbuttonpress', 'plt.waitforbuttonpress', ([], {}), '()\n', (6199, 6201), True, 'import matplotlib.pyplot as plt\n'), ((3484, 3509), 're.match', 're.match', (['regex', 'match[0]'], {}), '(regex, match[0])\n', (3492, 3509), False, 'import re\n')] |
import numpy as np
class Stencil:
'''
Initialise a Stencil instance with relative cell indices where
-1 and 0 are the adjacent cells upwind and downwind of the target face
respectively.
'''
def __init__(self, indices):
self.indices = np.array(indices)
def cellCentres(self, mesh, index):
origin = mesh.cellCentre(index)
Cs = []
for i in self.indices:
C = mesh.cellCentre(index + i)
if i < 0 and C > origin:
C -= 1
if i > 0 and C < origin:
C += 1
Cs += [C]
return np.array(Cs)
def relativeCellCentres(self, mesh, index):
origin = mesh.faceCentre(index)
Cs = []
for i in self.indices:
C = mesh.cellCentre(index + i)
if i < 0 and C > origin:
C -= 1
if i > 0 and C < origin:
C += 1
Cs += [C]
return np.array(Cs) - origin
def relativeFaceCentres(self, mesh, index):
origin = mesh.faceCentre(index)
Cfs = []
for i in np.append(self.indices, self.indices[-1]+1):
Cf = mesh.faceCentre(index + i)
if i < 0 and Cf > origin:
Cf -= 1
if i > 0 and Cf < origin:
Cf += 1
Cfs += [Cf]
return np.array(Cfs) - origin
def __len__(self):
return len(self.indices)
def __radd__(self, other):
return other + self.indices
def __repr__(self):
return str(self.indices)
| [
"numpy.append",
"numpy.array"
] | [((267, 284), 'numpy.array', 'np.array', (['indices'], {}), '(indices)\n', (275, 284), True, 'import numpy as np\n'), ((618, 630), 'numpy.array', 'np.array', (['Cs'], {}), '(Cs)\n', (626, 630), True, 'import numpy as np\n'), ((1119, 1164), 'numpy.append', 'np.append', (['self.indices', '(self.indices[-1] + 1)'], {}), '(self.indices, self.indices[-1] + 1)\n', (1128, 1164), True, 'import numpy as np\n'), ((972, 984), 'numpy.array', 'np.array', (['Cs'], {}), '(Cs)\n', (980, 984), True, 'import numpy as np\n'), ((1374, 1387), 'numpy.array', 'np.array', (['Cfs'], {}), '(Cfs)\n', (1382, 1387), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import gzip
import numpy
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
class DataSet(object):
def __init__(self,
images,
labels,
fake_data=False,
one_hot=False,
dtype=dtypes.float32,
reshape=False,
seed=None):
"""Construct a DataSet.
one_hot arg is used only if fake_data is true. `dtype` can be either
`uint8` to leave the input as `[0, 255]`, or `float32` to rescale into
`[0, 1]`. Seed arg provides for convenient deterministic testing.
"""
seed1, seed2 = random_seed.get_seed(seed)
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seed1 if seed is None else seed2)
dtype = dtypes.as_dtype(dtype).base_dtype
if dtype not in (dtypes.uint8, dtypes.float32):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' %
dtype)
if fake_data:
self._num_examples = 10000
self.one_hot = one_hot
else:
assert images.shape[0] == labels.shape[0], (
'images.shape: %s labels.shape: %s' % (images.shape, labels.shape))
self._num_examples = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
images = images.reshape(images.shape[0],
images.shape[1] * images.shape[2])
if dtype == dtypes.float32:
# Convert from [0, 255] -> [0.0, 1.0].
images = images.astype(numpy.float32)
images = numpy.multiply(images, 1.0 / 255.0)
self._images = images
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
@property
def images(self):
return self._images
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
@property
def epochs_completed(self):
return self._epochs_completed
def next_batch(self, batch_size, fake_data=False, shuffle=True):
"""Return the next `batch_size` examples from this data set."""
if fake_data:
fake_image = [1] * 784
if self.one_hot:
fake_label = [1] + [0] * 9
else:
fake_label = 0
return [fake_image for _ in xrange(batch_size)], [
fake_label for _ in xrange(batch_size)
]
start = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
perm0 = numpy.arange(self._num_examples)
numpy.random.shuffle(perm0)
self._images = self.images[perm0]
self._labels = self.labels[perm0]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
rest_num_examples = self._num_examples - start
images_rest_part = self._images[start:self._num_examples]
labels_rest_part = self._labels[start:self._num_examples]
# Shuffle the data
if shuffle:
perm = numpy.arange(self._num_examples)
numpy.random.shuffle(perm)
self._images = self.images[perm]
self._labels = self.labels[perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size - rest_num_examples
end = self._index_in_epoch
images_new_part = self._images[start:end]
labels_new_part = self._labels[start:end]
return numpy.concatenate((images_rest_part, images_new_part), axis=0) , numpy.concatenate((labels_rest_part, labels_new_part), axis=0)
else:
self._index_in_epoch += batch_size
end = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
def read_data_sets(train_dir,
fake_data=False,
one_hot=False,
dtype=dtypes.float32,
reshape=False,
validation_size=5000,
seed=None):
if fake_data:
def fake():
return DataSet(
[], [], fake_data=True, one_hot=one_hot, dtype=dtype, seed=seed)
train = fake()
validation = fake()
test = fake()
return base.Datasets(train=train, validation=validation, test=test)
data = np.load(train_dir)
validation_images = data['test_images']
validation_labels = data['test_labels']
train_images = data['train_images']
train_labels = data['train_labels']
options = dict(dtype=dtype, reshape=reshape, seed=seed)
train = DataSet(train_images[5000:], train_labels[5000:], **options)
validation = DataSet(train_images[:5000], validation_labels[:5000], **options)
test = DataSet(validation_images, validation_labels, **options)
return base.Datasets(train=train, validation=validation, test=test)
| [
"numpy.multiply",
"tensorflow.python.framework.dtypes.as_dtype",
"six.moves.xrange",
"tensorflow.contrib.learn.python.learn.datasets.base.Datasets",
"numpy.random.seed",
"numpy.concatenate",
"tensorflow.python.framework.random_seed.get_seed",
"numpy.load",
"numpy.arange",
"numpy.random.shuffle"
] | [((4669, 4687), 'numpy.load', 'np.load', (['train_dir'], {}), '(train_dir)\n', (4676, 4687), True, 'import numpy as np\n'), ((5143, 5203), 'tensorflow.contrib.learn.python.learn.datasets.base.Datasets', 'base.Datasets', ([], {'train': 'train', 'validation': 'validation', 'test': 'test'}), '(train=train, validation=validation, test=test)\n', (5156, 5203), False, 'from tensorflow.contrib.learn.python.learn.datasets import base\n'), ((909, 935), 'tensorflow.python.framework.random_seed.get_seed', 'random_seed.get_seed', (['seed'], {}), '(seed)\n', (929, 935), False, 'from tensorflow.python.framework import random_seed\n'), ((1017, 1068), 'numpy.random.seed', 'numpy.random.seed', (['(seed1 if seed is None else seed2)'], {}), '(seed1 if seed is None else seed2)\n', (1034, 1068), False, 'import numpy\n'), ((4598, 4658), 'tensorflow.contrib.learn.python.learn.datasets.base.Datasets', 'base.Datasets', ([], {'train': 'train', 'validation': 'validation', 'test': 'test'}), '(train=train, validation=validation, test=test)\n', (4611, 4658), False, 'from tensorflow.contrib.learn.python.learn.datasets import base\n'), ((1081, 1103), 'tensorflow.python.framework.dtypes.as_dtype', 'dtypes.as_dtype', (['dtype'], {}), '(dtype)\n', (1096, 1103), False, 'from tensorflow.python.framework import dtypes\n'), ((2917, 2949), 'numpy.arange', 'numpy.arange', (['self._num_examples'], {}), '(self._num_examples)\n', (2929, 2949), False, 'import numpy\n'), ((2956, 2983), 'numpy.random.shuffle', 'numpy.random.shuffle', (['perm0'], {}), '(perm0)\n', (2976, 2983), False, 'import numpy\n'), ((1975, 2010), 'numpy.multiply', 'numpy.multiply', (['images', '(1.0 / 255.0)'], {}), '(images, 1.0 / 255.0)\n', (1989, 2010), False, 'import numpy\n'), ((3479, 3511), 'numpy.arange', 'numpy.arange', (['self._num_examples'], {}), '(self._num_examples)\n', (3491, 3511), False, 'import numpy\n'), ((3520, 3546), 'numpy.random.shuffle', 'numpy.random.shuffle', (['perm'], {}), '(perm)\n', (3540, 3546), False, 'import numpy\n'), ((3872, 3934), 'numpy.concatenate', 'numpy.concatenate', (['(images_rest_part, images_new_part)'], {'axis': '(0)'}), '((images_rest_part, images_new_part), axis=0)\n', (3889, 3934), False, 'import numpy\n'), ((3937, 3999), 'numpy.concatenate', 'numpy.concatenate', (['(labels_rest_part, labels_new_part)'], {'axis': '(0)'}), '((labels_rest_part, labels_new_part), axis=0)\n', (3954, 3999), False, 'import numpy\n'), ((2693, 2711), 'six.moves.xrange', 'xrange', (['batch_size'], {}), '(batch_size)\n', (2699, 2711), False, 'from six.moves import xrange\n'), ((2746, 2764), 'six.moves.xrange', 'xrange', (['batch_size'], {}), '(batch_size)\n', (2752, 2764), False, 'from six.moves import xrange\n')] |
import utils
from os import path
import numpy as np
from scipy import stats, sparse
from paris_cluster import ParisClusterer
from sklearn.linear_model import LogisticRegression
from tqdm import tqdm
import pandas as pd
##Set a random seed to make it reproducible!
np.random.seed(utils.getSeed())
#load up data:
x, y = utils.load_feature_and_label_matrices(type='morgan')
##select a subset of columns of 'y' to use as a test matrix:
#this is the same each time thanks to setting the random.seed.
col_indices = np.random.choice(226, 100, replace=False)
x_, y_ = utils.get_subset(x, y, indices=col_indices)
#load CATS features as well:
catsMatrix, _ = utils.load_feature_and_label_matrices(type='cats')
catsMatrix_, __ = utils.get_subset(catsMatrix, y, indices=col_indices)
#Open a memory mapped distance matrix.
#We do this because the pairwise distance matrix for 100 targets does not fit in memory.
#It is nearly 100% dense and has 117747*117747 = 13864356009 elements. This is also
#Why it uses float16 (reducing the required storage space to ~26GB, c.f. 52GB for float32).
morgan_distance_matrix = np.memmap('./processed_data/distance_matrices/morgan_distance_matrix.dat', dtype=np.float16,
shape=(x_.shape[0], x_.shape[0]))
cats_distance_matrix = np.memmap('./processed_data/distance_matrices/cats_distance_matrix.dat', dtype=np.float16,
shape=(x_.shape[0], x_.shape[0]))
clusterer = ParisClusterer(x_.toarray())
clusterer.loadAdjacency('./processed_data/distance_matrices/wadj_ecfp.npz')
clusterer.fit()
#Store all the results in these:
df_before_trim = pd.DataFrame(columns = ['ave_cats', 'ave_morgan',
'ap_cats', 'ap_morgan',
'mcc_cats', 'mcc_morgan',
'ef_cats', 'ef_morgan'])
df_after_morgan_trim = pd.DataFrame(columns = ['ave_cats', 'ave_morgan',
'ap_cats', 'ap_morgan',
'mcc_cats', 'mcc_morgan',
'ef_cats', 'ef_morgan'])
df_after_cats_trim = pd.DataFrame(columns = ['ave_cats', 'ave_morgan',
'ap_cats', 'ap_morgan',
'mcc_cats', 'mcc_morgan',
'ef_cats', 'ef_morgan'])
targets = list()
cutoffs = list()
aves = list()
sizes = list()
loc_counter = 0
for _ in tqdm(range(1500), smoothing=0):
#choose a random target:
idx = np.random.choice(y_.shape[1])
#choose a random cluster size upper limit and cluster:
clusterSize = np.random.randint(300,10000)
clusterer.labels_ = utils.cut_balanced(clusterer.paris.dendrogram_, clusterSize)
clabels = np.unique(clusterer.labels_)
pos_labels = np.unique(clusterer.labels_[y_[:,idx]==1])
neg_labels = clabels[~np.isin(clabels, pos_labels)]
if min(len(pos_labels), len(neg_labels))<2:
print('Not enough positive clusters to split')
continue
test_clusters, train_clusters = utils.split_clusters(pos_labels, neg_labels, 0.2, [0.1,0.3], shuffle=True)
actives_test_idx, actives_train_idx, inactives_test_idx, inactives_train_idx = utils.get_four_matrices(y_,idx,clusterer,test_clusters,train_clusters)
print(actives_test_idx.shape[0], actives_train_idx.shape[0], inactives_test_idx.shape[0], inactives_train_idx.shape[0])
print(min([actives_test_idx.shape[0], actives_train_idx.shape[0], inactives_test_idx.shape[0], inactives_train_idx.shape[0]]))
if min([actives_test_idx.shape[0], actives_train_idx.shape[0], inactives_test_idx.shape[0], inactives_train_idx.shape[0]])<20:
print('Not enough ligands to train and test')
continue
ave_morgan_before= utils.calc_AVE_quick(morgan_distance_matrix, actives_train_idx, actives_test_idx,inactives_train_idx, inactives_test_idx)
if ave_morgan_before>0.2: #debiasing is unlikely to get a low AVE from this point. So this evaluation is not useful to us.
continue
######
###Trim wrt ECFP
#####
#trim from the inactives/train matrix first:
inactive_dmat = morgan_distance_matrix[inactives_test_idx]
print('New inactives train_idx', inactive_dmat.shape, inactives_train_idx.shape, inactives_test_idx.shape)
new_inactives_train_idx = utils.trim(inactive_dmat,
inactives_train_idx,
inactives_test_idx,
fraction_to_trim=0.2)
#then trim from the actives/train matrix:
active_dmat = morgan_distance_matrix[actives_test_idx]
print('New actives train_idx', active_dmat.shape, actives_train_idx.shape, actives_test_idx.shape)
new_actives_train_idx = utils.trim(active_dmat,
actives_train_idx,
actives_test_idx,
fraction_to_trim=0.2)
ave_morgan_after= utils.calc_AVE_quick(morgan_distance_matrix, new_actives_train_idx, actives_test_idx,new_inactives_train_idx, inactives_test_idx)
######
###Evaluate the untrimmed data:
######
ave_cats_before= utils.calc_AVE_quick(cats_distance_matrix, actives_train_idx, actives_test_idx,inactives_train_idx, inactives_test_idx)
results_morgan = utils.evaluate_split(x_, y_, idx, actives_train_idx, actives_test_idx, inactives_train_idx, inactives_test_idx, auroc=False, ap=True, mcc=True, ef=True)
results_cats = utils.evaluate_split(catsMatrix_, y_, idx, actives_train_idx, actives_test_idx, inactives_train_idx, inactives_test_idx, auroc=False, ap=True, mcc=True, ef=True)
df_before_trim.loc[loc_counter] = [ave_cats_before, ave_morgan_before,
results_cats['ap'], results_morgan['ap'],
results_cats['mcc'], results_morgan['mcc'],
results_cats['ef'], results_morgan['ef']]
######
###Evaluate the data trimmed wrt ECFP
######
results_morgan = utils.evaluate_split(x_, y_, idx, new_actives_train_idx, actives_test_idx, new_inactives_train_idx, inactives_test_idx, auroc=False, ap=True, mcc=True, ef=True)
results_cats = utils.evaluate_split(catsMatrix_, y_, idx, new_actives_train_idx, actives_test_idx, new_inactives_train_idx, inactives_test_idx, auroc=False, ap=True, mcc=True, ef=True)
ave_cats_after= utils.calc_AVE_quick(cats_distance_matrix, new_actives_train_idx, actives_test_idx,new_inactives_train_idx, inactives_test_idx)
df_after_morgan_trim.loc[loc_counter] = [ave_cats_after, ave_morgan_after,
results_cats['ap'], results_morgan['ap'],
results_cats['mcc'], results_morgan['mcc'],
results_cats['ef'], results_morgan['ef']]
######
###Trim wrt CATS
#####
#trim from the inactives/train matrix first:
inactive_dmat = cats_distance_matrix[inactives_test_idx]
print('New inactives train_idx', inactive_dmat.shape, inactives_train_idx.shape, inactives_test_idx.shape)
new_inactives_train_idx = utils.trim(inactive_dmat,
inactives_train_idx,
inactives_test_idx,
fraction_to_trim=0.2)
#then trim from the actives/train matrix:
active_dmat = cats_distance_matrix[actives_test_idx]
print('New actives train_idx', active_dmat.shape, actives_train_idx.shape, actives_test_idx.shape)
new_actives_train_idx = utils.trim(active_dmat,
actives_train_idx,
actives_test_idx,
fraction_to_trim=0.2)
######
###Evaluate the data trimmed wrt CATS
######
results_morgan = utils.evaluate_split(x_, y_, idx, new_actives_train_idx, actives_test_idx, new_inactives_train_idx, inactives_test_idx, auroc=False, ap=True, mcc=True, ef=True)
results_cats = utils.evaluate_split(catsMatrix_, y_, idx, new_actives_train_idx, actives_test_idx, new_inactives_train_idx, inactives_test_idx, auroc=False, ap=True, mcc=True, ef=True)
ave_morgan= utils.calc_AVE_quick(morgan_distance_matrix, new_actives_train_idx, actives_test_idx,new_inactives_train_idx, inactives_test_idx)
ave_cats= utils.calc_AVE_quick(cats_distance_matrix, new_actives_train_idx, actives_test_idx,new_inactives_train_idx, inactives_test_idx)
df_after_cats_trim.loc[loc_counter] = [ave_cats, ave_morgan,
results_cats['ap'], results_morgan['ap'],
results_cats['mcc'], results_morgan['mcc'],
results_cats['ef'], results_morgan['ef']]
#save data:
df_before_trim.to_csv('./processed_data/graph_cluster_both/df_before_trim.csv')
df_after_morgan_trim.to_csv('./processed_data/graph_cluster_both/df_after_morgan_trim.csv')
df_after_cats_trim.to_csv('./processed_data/graph_cluster_both/df_after_cats_trim.csv')
loc_counter += 1
| [
"numpy.unique",
"utils.getSeed",
"utils.get_subset",
"numpy.random.choice",
"utils.calc_AVE_quick",
"utils.trim",
"numpy.memmap",
"utils.evaluate_split",
"utils.split_clusters",
"numpy.isin",
"numpy.random.randint",
"utils.get_four_matrices",
"pandas.DataFrame",
"utils.cut_balanced",
"ut... | [((320, 372), 'utils.load_feature_and_label_matrices', 'utils.load_feature_and_label_matrices', ([], {'type': '"""morgan"""'}), "(type='morgan')\n", (357, 372), False, 'import utils\n'), ((511, 552), 'numpy.random.choice', 'np.random.choice', (['(226)', '(100)'], {'replace': '(False)'}), '(226, 100, replace=False)\n', (527, 552), True, 'import numpy as np\n'), ((562, 605), 'utils.get_subset', 'utils.get_subset', (['x', 'y'], {'indices': 'col_indices'}), '(x, y, indices=col_indices)\n', (578, 605), False, 'import utils\n'), ((652, 702), 'utils.load_feature_and_label_matrices', 'utils.load_feature_and_label_matrices', ([], {'type': '"""cats"""'}), "(type='cats')\n", (689, 702), False, 'import utils\n'), ((721, 773), 'utils.get_subset', 'utils.get_subset', (['catsMatrix', 'y'], {'indices': 'col_indices'}), '(catsMatrix, y, indices=col_indices)\n', (737, 773), False, 'import utils\n'), ((1105, 1235), 'numpy.memmap', 'np.memmap', (['"""./processed_data/distance_matrices/morgan_distance_matrix.dat"""'], {'dtype': 'np.float16', 'shape': '(x_.shape[0], x_.shape[0])'}), "('./processed_data/distance_matrices/morgan_distance_matrix.dat',\n dtype=np.float16, shape=(x_.shape[0], x_.shape[0]))\n", (1114, 1235), True, 'import numpy as np\n'), ((1269, 1397), 'numpy.memmap', 'np.memmap', (['"""./processed_data/distance_matrices/cats_distance_matrix.dat"""'], {'dtype': 'np.float16', 'shape': '(x_.shape[0], x_.shape[0])'}), "('./processed_data/distance_matrices/cats_distance_matrix.dat',\n dtype=np.float16, shape=(x_.shape[0], x_.shape[0]))\n", (1278, 1397), True, 'import numpy as np\n'), ((1595, 1721), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['ave_cats', 'ave_morgan', 'ap_cats', 'ap_morgan', 'mcc_cats', 'mcc_morgan',\n 'ef_cats', 'ef_morgan']"}), "(columns=['ave_cats', 'ave_morgan', 'ap_cats', 'ap_morgan',\n 'mcc_cats', 'mcc_morgan', 'ef_cats', 'ef_morgan'])\n", (1607, 1721), True, 'import pandas as pd\n'), ((1866, 1992), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['ave_cats', 'ave_morgan', 'ap_cats', 'ap_morgan', 'mcc_cats', 'mcc_morgan',\n 'ef_cats', 'ef_morgan']"}), "(columns=['ave_cats', 'ave_morgan', 'ap_cats', 'ap_morgan',\n 'mcc_cats', 'mcc_morgan', 'ef_cats', 'ef_morgan'])\n", (1878, 1992), True, 'import pandas as pd\n'), ((2153, 2279), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['ave_cats', 'ave_morgan', 'ap_cats', 'ap_morgan', 'mcc_cats', 'mcc_morgan',\n 'ef_cats', 'ef_morgan']"}), "(columns=['ave_cats', 'ave_morgan', 'ap_cats', 'ap_morgan',\n 'mcc_cats', 'mcc_morgan', 'ef_cats', 'ef_morgan'])\n", (2165, 2279), True, 'import pandas as pd\n'), ((280, 295), 'utils.getSeed', 'utils.getSeed', ([], {}), '()\n', (293, 295), False, 'import utils\n'), ((2576, 2605), 'numpy.random.choice', 'np.random.choice', (['y_.shape[1]'], {}), '(y_.shape[1])\n', (2592, 2605), True, 'import numpy as np\n'), ((2684, 2713), 'numpy.random.randint', 'np.random.randint', (['(300)', '(10000)'], {}), '(300, 10000)\n', (2701, 2713), True, 'import numpy as np\n'), ((2737, 2797), 'utils.cut_balanced', 'utils.cut_balanced', (['clusterer.paris.dendrogram_', 'clusterSize'], {}), '(clusterer.paris.dendrogram_, clusterSize)\n', (2755, 2797), False, 'import utils\n'), ((2813, 2841), 'numpy.unique', 'np.unique', (['clusterer.labels_'], {}), '(clusterer.labels_)\n', (2822, 2841), True, 'import numpy as np\n'), ((2859, 2904), 'numpy.unique', 'np.unique', (['clusterer.labels_[y_[:, idx] == 1]'], {}), '(clusterer.labels_[y_[:, idx] == 1])\n', (2868, 2904), True, 'import numpy as np\n'), ((3115, 3190), 'utils.split_clusters', 'utils.split_clusters', (['pos_labels', 'neg_labels', '(0.2)', '[0.1, 0.3]'], {'shuffle': '(True)'}), '(pos_labels, neg_labels, 0.2, [0.1, 0.3], shuffle=True)\n', (3135, 3190), False, 'import utils\n'), ((3274, 3348), 'utils.get_four_matrices', 'utils.get_four_matrices', (['y_', 'idx', 'clusterer', 'test_clusters', 'train_clusters'], {}), '(y_, idx, clusterer, test_clusters, train_clusters)\n', (3297, 3348), False, 'import utils\n'), ((3849, 3975), 'utils.calc_AVE_quick', 'utils.calc_AVE_quick', (['morgan_distance_matrix', 'actives_train_idx', 'actives_test_idx', 'inactives_train_idx', 'inactives_test_idx'], {}), '(morgan_distance_matrix, actives_train_idx,\n actives_test_idx, inactives_train_idx, inactives_test_idx)\n', (3869, 3975), False, 'import utils\n'), ((4418, 4510), 'utils.trim', 'utils.trim', (['inactive_dmat', 'inactives_train_idx', 'inactives_test_idx'], {'fraction_to_trim': '(0.2)'}), '(inactive_dmat, inactives_train_idx, inactives_test_idx,\n fraction_to_trim=0.2)\n', (4428, 4510), False, 'import utils\n'), ((4869, 4955), 'utils.trim', 'utils.trim', (['active_dmat', 'actives_train_idx', 'actives_test_idx'], {'fraction_to_trim': '(0.2)'}), '(active_dmat, actives_train_idx, actives_test_idx,\n fraction_to_trim=0.2)\n', (4879, 4955), False, 'import utils\n'), ((5086, 5220), 'utils.calc_AVE_quick', 'utils.calc_AVE_quick', (['morgan_distance_matrix', 'new_actives_train_idx', 'actives_test_idx', 'new_inactives_train_idx', 'inactives_test_idx'], {}), '(morgan_distance_matrix, new_actives_train_idx,\n actives_test_idx, new_inactives_train_idx, inactives_test_idx)\n', (5106, 5220), False, 'import utils\n'), ((5301, 5425), 'utils.calc_AVE_quick', 'utils.calc_AVE_quick', (['cats_distance_matrix', 'actives_train_idx', 'actives_test_idx', 'inactives_train_idx', 'inactives_test_idx'], {}), '(cats_distance_matrix, actives_train_idx,\n actives_test_idx, inactives_train_idx, inactives_test_idx)\n', (5321, 5425), False, 'import utils\n'), ((5442, 5602), 'utils.evaluate_split', 'utils.evaluate_split', (['x_', 'y_', 'idx', 'actives_train_idx', 'actives_test_idx', 'inactives_train_idx', 'inactives_test_idx'], {'auroc': '(False)', 'ap': '(True)', 'mcc': '(True)', 'ef': '(True)'}), '(x_, y_, idx, actives_train_idx, actives_test_idx,\n inactives_train_idx, inactives_test_idx, auroc=False, ap=True, mcc=True,\n ef=True)\n', (5462, 5602), False, 'import utils\n'), ((5614, 5783), 'utils.evaluate_split', 'utils.evaluate_split', (['catsMatrix_', 'y_', 'idx', 'actives_train_idx', 'actives_test_idx', 'inactives_train_idx', 'inactives_test_idx'], {'auroc': '(False)', 'ap': '(True)', 'mcc': '(True)', 'ef': '(True)'}), '(catsMatrix_, y_, idx, actives_train_idx,\n actives_test_idx, inactives_train_idx, inactives_test_idx, auroc=False,\n ap=True, mcc=True, ef=True)\n', (5634, 5783), False, 'import utils\n'), ((6183, 6352), 'utils.evaluate_split', 'utils.evaluate_split', (['x_', 'y_', 'idx', 'new_actives_train_idx', 'actives_test_idx', 'new_inactives_train_idx', 'inactives_test_idx'], {'auroc': '(False)', 'ap': '(True)', 'mcc': '(True)', 'ef': '(True)'}), '(x_, y_, idx, new_actives_train_idx, actives_test_idx,\n new_inactives_train_idx, inactives_test_idx, auroc=False, ap=True, mcc=\n True, ef=True)\n', (6203, 6352), False, 'import utils\n'), ((6363, 6541), 'utils.evaluate_split', 'utils.evaluate_split', (['catsMatrix_', 'y_', 'idx', 'new_actives_train_idx', 'actives_test_idx', 'new_inactives_train_idx', 'inactives_test_idx'], {'auroc': '(False)', 'ap': '(True)', 'mcc': '(True)', 'ef': '(True)'}), '(catsMatrix_, y_, idx, new_actives_train_idx,\n actives_test_idx, new_inactives_train_idx, inactives_test_idx, auroc=\n False, ap=True, mcc=True, ef=True)\n', (6383, 6541), False, 'import utils\n'), ((6553, 6685), 'utils.calc_AVE_quick', 'utils.calc_AVE_quick', (['cats_distance_matrix', 'new_actives_train_idx', 'actives_test_idx', 'new_inactives_train_idx', 'inactives_test_idx'], {}), '(cats_distance_matrix, new_actives_train_idx,\n actives_test_idx, new_inactives_train_idx, inactives_test_idx)\n', (6573, 6685), False, 'import utils\n'), ((7300, 7392), 'utils.trim', 'utils.trim', (['inactive_dmat', 'inactives_train_idx', 'inactives_test_idx'], {'fraction_to_trim': '(0.2)'}), '(inactive_dmat, inactives_train_idx, inactives_test_idx,\n fraction_to_trim=0.2)\n', (7310, 7392), False, 'import utils\n'), ((7749, 7835), 'utils.trim', 'utils.trim', (['active_dmat', 'actives_train_idx', 'actives_test_idx'], {'fraction_to_trim': '(0.2)'}), '(active_dmat, actives_train_idx, actives_test_idx,\n fraction_to_trim=0.2)\n', (7759, 7835), False, 'import utils\n'), ((8029, 8198), 'utils.evaluate_split', 'utils.evaluate_split', (['x_', 'y_', 'idx', 'new_actives_train_idx', 'actives_test_idx', 'new_inactives_train_idx', 'inactives_test_idx'], {'auroc': '(False)', 'ap': '(True)', 'mcc': '(True)', 'ef': '(True)'}), '(x_, y_, idx, new_actives_train_idx, actives_test_idx,\n new_inactives_train_idx, inactives_test_idx, auroc=False, ap=True, mcc=\n True, ef=True)\n', (8049, 8198), False, 'import utils\n'), ((8209, 8387), 'utils.evaluate_split', 'utils.evaluate_split', (['catsMatrix_', 'y_', 'idx', 'new_actives_train_idx', 'actives_test_idx', 'new_inactives_train_idx', 'inactives_test_idx'], {'auroc': '(False)', 'ap': '(True)', 'mcc': '(True)', 'ef': '(True)'}), '(catsMatrix_, y_, idx, new_actives_train_idx,\n actives_test_idx, new_inactives_train_idx, inactives_test_idx, auroc=\n False, ap=True, mcc=True, ef=True)\n', (8229, 8387), False, 'import utils\n'), ((8395, 8529), 'utils.calc_AVE_quick', 'utils.calc_AVE_quick', (['morgan_distance_matrix', 'new_actives_train_idx', 'actives_test_idx', 'new_inactives_train_idx', 'inactives_test_idx'], {}), '(morgan_distance_matrix, new_actives_train_idx,\n actives_test_idx, new_inactives_train_idx, inactives_test_idx)\n', (8415, 8529), False, 'import utils\n'), ((8539, 8671), 'utils.calc_AVE_quick', 'utils.calc_AVE_quick', (['cats_distance_matrix', 'new_actives_train_idx', 'actives_test_idx', 'new_inactives_train_idx', 'inactives_test_idx'], {}), '(cats_distance_matrix, new_actives_train_idx,\n actives_test_idx, new_inactives_train_idx, inactives_test_idx)\n', (8559, 8671), False, 'import utils\n'), ((2928, 2956), 'numpy.isin', 'np.isin', (['clabels', 'pos_labels'], {}), '(clabels, pos_labels)\n', (2935, 2956), True, 'import numpy as np\n')] |
# import sys
# sys.path.append('..')
# sys.path.append('../..')
import numpy as np
from pulse2percept import electrode2currentmap as e2cm
from pulse2percept import effectivecurrent2brightness as ec2b
from pulse2percept import utils
from pulse2percept import files as n2sf
# import npy2savedformats as n2sf
import matplotlib.pyplot as plt
import importlib as imp
#imp.reload(n2sf)
# Recreation of the Dorn 2013 paper, where subjects had to guess the direction of motion of a moving bar
# Surgeons were instructed to place the array centered over the macula (0, 0).
# Each of the 60 electrodes (in a 6 × 10 grid) were 200 μm in diameter
# The array (along the diagonal) covered an area of retina corresponding to
#about 20° in visual angle assuming 293 μm on the retina equates to 1° of
#visual angle. a=1.72, sqrt((a*6)^2+(a*10)^2)=20 so the 10 side is 17.2 degrees,
#the 6 side is 10.32 degrees
# Create electrode array for the Argus 2
# 293 μm equals 1 degree, electrode spacing is done in microns
# when you include the radius of the electrode the electrode centers span +/- 2362 and +/- 1312
# based on Ahuja et al 2013. Factors affecting perceptual thresholds in Argus ii retinal prosthesis subjects
# (figure 4, pdf is in retina folder) the mean height from the array should be 179.6 μm
# with a range of ~50-750μm
modelver='Nanduri' # this is the standard model based on the Nanduri 2012 paper.
# Alternative model is currently the 'Krishnan' model which assumes that charge accumulation
# occurs at the electrode, not neurally. The models are in fact metamers of each other,
xlist=[]
ylist=[]
rlist=[] #electrode radius, microns
hlist=[] # lift of electrode from retinal surface, microns
e_spacing=525 # spacing in microns
for x in np.arange(-2362, 2364, e_spacing):
for y in np.arange(-1312, 1314, e_spacing):
xlist.append(x)
ylist.append(y)
rlist.append(100) # electrode radiues
hlist.append(179.6)
# electrode lift from retinal surface,
# epiretinal array - distance to the ganglion layer
# subretinal array - distance to the bipolar layer
layers=['INL', 'NFL']
e_all = e2cm.ElectrodeArray(rlist,xlist,ylist,hlist, ptype='subretinal')
del xlist, ylist, rlist, hlist
# create retina, input variables include the sampling and how much of the retina is simulated, in microns
# (0,0 represents the fovea)
retinaname='1700by2900L80S150'
r = e2cm.Retina(axon_map=None,
sampling=150, ylo=-1700, yhi=1700, xlo=-2900, xhi=2900, axon_lambda=8)
e_rf=[]
for e in e_all.electrodes:
e_rf.append(e2cm.receptive_field(e, r.gridx, r.gridy,e_spacing))
[ecs, cs] = r.electrode_ecs(e_all, integrationtype='maxrule')
tm = ec2b.TemporalModel()
# create movie
# original screen was [52.74, 63.32] visual angle, res=[768 ,1024] # resolution of screen
# pixperdeg=degscreen/res
# no need to simulate the whole movie, just match it to the electrode array, xhi+xlo/294 (microns per degree)
degscreen=[10.32+5, 17.2+5] # match to array visual angle,
res=[e_rf[0].shape[0],e_rf[1].shape[1]] # resolution of screen
fps=30
# the bar is 1.4 inches in width at 12 inches,
# corresponds to 6.67 degrees visual angle
bar_width=6.77
[X,Y]=np.meshgrid(np.linspace(-degscreen[1]/2, degscreen[1]/2, res[1]),
np.linspace(-degscreen[0]/2, degscreen[0]/2, res[0]));
for o in np.arange(0, 2*np.pi,2): #DEBUG 2*np.pi/4): # each orientation
M=np.cos(o)*X +np.sin(o)*Y
# for sp in range (32:32): # DEBUG each speed, eventually 8:32
for sp in np.arange(32, 33, 1): #(7.9, 31.6, 3):
movie=np.zeros((res[0],res[1], int(np.ceil((70/5)*30))))
st=np.min(M)
fm_ct=1
while (st<np.max(M)):
img=np.zeros(M.shape)
ind=np.where((M>st) & (M<st+bar_width))
img[ind]=1
movie[:,:, fm_ct]=img
fm_ct=fm_ct+1
st=st+(sp/fps)
movie=movie[:,:, 0:fm_ct-1]
moviedur=movie.shape[2]/fps
del M, img
# create pulsetrain corresponding to the movie
pt=[]
for rf in e_rf:
rflum= e2cm.retinalmovie2electrodtimeseries(rf, movie)
ptrain=e2cm.Movie2Pulsetrain(rflum)
# plt.plot(rflum) plt.plot(pt[ct].data) plt.plot(ptrain.data)
pt.append(ptrain)
del movie
rsample=(1/fps)*pt[0].tsample # factor by which movies resampled for presentation
boom
brightness_movie = ec2b.pulse2percept(tm, ecs, r, pt, rsample)
# FILTERING BY ON OFF CELLS
# foveal vision is ~60 cpd. 293μm on the retina corresponds to 1 degree, so the smallest receptive field probably covers 293/60 ~=5μm,
# we set the largest as being 10 times bigger than that numbers roughly based on
# Field GD & <NAME> (2007) Information processing in the primate retina: circuitry and coding. Annual Review of Neuroscience 30:1-30
# Chose 30 different sizes fairly randomly
retinasizes=np.unique(np.ceil(np.array(np.linspace(5, 50, 15))/r.sampling))
retinasizes = np.array([i for i in retinasizes if i >= 2])
[onmovie, offmovie] = ec2b.onoffFiltering(brightness_movie.data, retinasizes)
[normalmovie, prostheticmovie] =ec2b.onoffRecombine(onmovie, offmovie)
# save the various movies
filename='Bar_S' + str(sp) + '_O' + str(o) + '_' + retinaname
n2sf.savemoviefiles(filename + '_orig', brightness_movie)
n2sf.savemoviefiles(filename + 'on', onmovie)
n2sf.savemoviefiles(filename + 'off', offmovie)
n2sf.savemoviefiles(filename + 'normal', normalmovie)
n2sf.savemoviefiles(filename + 'prosthetic', prostheticmovie)
| [
"pulse2percept.electrode2currentmap.Retina",
"pulse2percept.effectivecurrent2brightness.onoffRecombine",
"pulse2percept.effectivecurrent2brightness.TemporalModel",
"numpy.array",
"pulse2percept.electrode2currentmap.retinalmovie2electrodtimeseries",
"numpy.sin",
"pulse2percept.electrode2currentmap.Movie2... | [((1756, 1789), 'numpy.arange', 'np.arange', (['(-2362)', '(2364)', 'e_spacing'], {}), '(-2362, 2364, e_spacing)\n', (1765, 1789), True, 'import numpy as np\n'), ((2176, 2243), 'pulse2percept.electrode2currentmap.ElectrodeArray', 'e2cm.ElectrodeArray', (['rlist', 'xlist', 'ylist', 'hlist'], {'ptype': '"""subretinal"""'}), "(rlist, xlist, ylist, hlist, ptype='subretinal')\n", (2195, 2243), True, 'from pulse2percept import electrode2currentmap as e2cm\n'), ((2456, 2557), 'pulse2percept.electrode2currentmap.Retina', 'e2cm.Retina', ([], {'axon_map': 'None', 'sampling': '(150)', 'ylo': '(-1700)', 'yhi': '(1700)', 'xlo': '(-2900)', 'xhi': '(2900)', 'axon_lambda': '(8)'}), '(axon_map=None, sampling=150, ylo=-1700, yhi=1700, xlo=-2900,\n xhi=2900, axon_lambda=8)\n', (2467, 2557), True, 'from pulse2percept import electrode2currentmap as e2cm\n'), ((2766, 2786), 'pulse2percept.effectivecurrent2brightness.TemporalModel', 'ec2b.TemporalModel', ([], {}), '()\n', (2784, 2786), True, 'from pulse2percept import effectivecurrent2brightness as ec2b\n'), ((3421, 3447), 'numpy.arange', 'np.arange', (['(0)', '(2 * np.pi)', '(2)'], {}), '(0, 2 * np.pi, 2)\n', (3430, 3447), True, 'import numpy as np\n'), ((1806, 1839), 'numpy.arange', 'np.arange', (['(-1312)', '(1314)', 'e_spacing'], {}), '(-1312, 1314, e_spacing)\n', (1815, 1839), True, 'import numpy as np\n'), ((3301, 3357), 'numpy.linspace', 'np.linspace', (['(-degscreen[1] / 2)', '(degscreen[1] / 2)', 'res[1]'], {}), '(-degscreen[1] / 2, degscreen[1] / 2, res[1])\n', (3312, 3357), True, 'import numpy as np\n'), ((3356, 3412), 'numpy.linspace', 'np.linspace', (['(-degscreen[0] / 2)', '(degscreen[0] / 2)', 'res[0]'], {}), '(-degscreen[0] / 2, degscreen[0] / 2, res[0])\n', (3367, 3412), True, 'import numpy as np\n'), ((3597, 3617), 'numpy.arange', 'np.arange', (['(32)', '(33)', '(1)'], {}), '(32, 33, 1)\n', (3606, 3617), True, 'import numpy as np\n'), ((2631, 2683), 'pulse2percept.electrode2currentmap.receptive_field', 'e2cm.receptive_field', (['e', 'r.gridx', 'r.gridy', 'e_spacing'], {}), '(e, r.gridx, r.gridy, e_spacing)\n', (2651, 2683), True, 'from pulse2percept import electrode2currentmap as e2cm\n'), ((3712, 3721), 'numpy.min', 'np.min', (['M'], {}), '(M)\n', (3718, 3721), True, 'import numpy as np\n'), ((4555, 4598), 'pulse2percept.effectivecurrent2brightness.pulse2percept', 'ec2b.pulse2percept', (['tm', 'ecs', 'r', 'pt', 'rsample'], {}), '(tm, ecs, r, pt, rsample)\n', (4573, 4598), True, 'from pulse2percept import effectivecurrent2brightness as ec2b\n'), ((5196, 5240), 'numpy.array', 'np.array', (['[i for i in retinasizes if i >= 2]'], {}), '([i for i in retinasizes if i >= 2])\n', (5204, 5240), True, 'import numpy as np\n'), ((5280, 5335), 'pulse2percept.effectivecurrent2brightness.onoffFiltering', 'ec2b.onoffFiltering', (['brightness_movie.data', 'retinasizes'], {}), '(brightness_movie.data, retinasizes)\n', (5299, 5335), True, 'from pulse2percept import effectivecurrent2brightness as ec2b\n'), ((5376, 5414), 'pulse2percept.effectivecurrent2brightness.onoffRecombine', 'ec2b.onoffRecombine', (['onmovie', 'offmovie'], {}), '(onmovie, offmovie)\n', (5395, 5414), True, 'from pulse2percept import effectivecurrent2brightness as ec2b\n'), ((5534, 5591), 'pulse2percept.files.savemoviefiles', 'n2sf.savemoviefiles', (["(filename + '_orig')", 'brightness_movie'], {}), "(filename + '_orig', brightness_movie)\n", (5553, 5591), True, 'from pulse2percept import files as n2sf\n'), ((5600, 5645), 'pulse2percept.files.savemoviefiles', 'n2sf.savemoviefiles', (["(filename + 'on')", 'onmovie'], {}), "(filename + 'on', onmovie)\n", (5619, 5645), True, 'from pulse2percept import files as n2sf\n'), ((5654, 5701), 'pulse2percept.files.savemoviefiles', 'n2sf.savemoviefiles', (["(filename + 'off')", 'offmovie'], {}), "(filename + 'off', offmovie)\n", (5673, 5701), True, 'from pulse2percept import files as n2sf\n'), ((5710, 5763), 'pulse2percept.files.savemoviefiles', 'n2sf.savemoviefiles', (["(filename + 'normal')", 'normalmovie'], {}), "(filename + 'normal', normalmovie)\n", (5729, 5763), True, 'from pulse2percept import files as n2sf\n'), ((5773, 5834), 'pulse2percept.files.savemoviefiles', 'n2sf.savemoviefiles', (["(filename + 'prosthetic')", 'prostheticmovie'], {}), "(filename + 'prosthetic', prostheticmovie)\n", (5792, 5834), True, 'from pulse2percept import files as n2sf\n'), ((3490, 3499), 'numpy.cos', 'np.cos', (['o'], {}), '(o)\n', (3496, 3499), True, 'import numpy as np\n'), ((3503, 3512), 'numpy.sin', 'np.sin', (['o'], {}), '(o)\n', (3509, 3512), True, 'import numpy as np\n'), ((3756, 3765), 'numpy.max', 'np.max', (['M'], {}), '(M)\n', (3762, 3765), True, 'import numpy as np\n'), ((3784, 3801), 'numpy.zeros', 'np.zeros', (['M.shape'], {}), '(M.shape)\n', (3792, 3801), True, 'import numpy as np\n'), ((3818, 3859), 'numpy.where', 'np.where', (['((M > st) & (M < st + bar_width))'], {}), '((M > st) & (M < st + bar_width))\n', (3826, 3859), True, 'import numpy as np\n'), ((4188, 4235), 'pulse2percept.electrode2currentmap.retinalmovie2electrodtimeseries', 'e2cm.retinalmovie2electrodtimeseries', (['rf', 'movie'], {}), '(rf, movie)\n', (4224, 4235), True, 'from pulse2percept import electrode2currentmap as e2cm\n'), ((4257, 4285), 'pulse2percept.electrode2currentmap.Movie2Pulsetrain', 'e2cm.Movie2Pulsetrain', (['rflum'], {}), '(rflum)\n', (4278, 4285), True, 'from pulse2percept import electrode2currentmap as e2cm\n'), ((3679, 3699), 'numpy.ceil', 'np.ceil', (['(70 / 5 * 30)'], {}), '(70 / 5 * 30)\n', (3686, 3699), True, 'import numpy as np\n'), ((5137, 5159), 'numpy.linspace', 'np.linspace', (['(5)', '(50)', '(15)'], {}), '(5, 50, 15)\n', (5148, 5159), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
@package ion.agents.data.test.test_external_dataset_agent_slocum
@file ion/agents/data/test/test_external_dataset_agent_slocum.py
@author <NAME>
@brief
"""
# Import pyon first for monkey patching.
from pyon.public import log, IonObject
from pyon.ion.resource import PRED, RT
from interface.services.dm.idataset_management_service import DatasetManagementServiceClient
from interface.services.sa.idata_product_management_service import DataProductManagementServiceClient
from interface.services.sa.idata_acquisition_management_service import DataAcquisitionManagementServiceClient
from interface.services.coi.iresource_registry_service import ResourceRegistryServiceClient
from interface.objects import ExternalDatasetAgent, ExternalDatasetAgentInstance, ExternalDataProvider, DataSourceModel, ContactInformation, UpdateDescription, DatasetDescription, ExternalDataset, Institution, DataSource
from ion.services.dm.utility.granule_utils import time_series_domain
from ion.agents.data.test.test_external_dataset_agent import ExternalDatasetAgentTestBase, IonIntegrationTestCase
from nose.plugins.attrib import attr
#temp until stream defs are completed
from interface.services.dm.ipubsub_management_service import\
PubsubManagementServiceClient
from coverage_model.parameter import ParameterDictionary, ParameterContext
from coverage_model.parameter_types import QuantityType
import numpy
#DISABLED: attr('INT_LONG', group='eoi')
# these tests rely on the original handler mechanism which had several shortcomings leading to the poller/parser rewrite
class TestExternalDatasetAgent_Slocum(ExternalDatasetAgentTestBase,
IonIntegrationTestCase):
DVR_CONFIG = {
'dvr_mod': 'ion.agents.data.handlers.slocum_data_handler',
'dvr_cls': 'SlocumDataHandler', }
HIST_CONSTRAINTS_1 = {}
HIST_CONSTRAINTS_2 = {}
def _setup_resources(self):
# TODO: some or all of this (or some variation) should move to DAMS'
# Build the test resources for the dataset
dms_cli = DatasetManagementServiceClient()
dams_cli = DataAcquisitionManagementServiceClient()
dpms_cli = DataProductManagementServiceClient()
rr_cli = ResourceRegistryServiceClient()
pubsub_cli = PubsubManagementServiceClient()
eda = ExternalDatasetAgent(name='example dataset agent', handler_module=self.DVR_CONFIG['dvr_mod'],
handler_class=self.DVR_CONFIG['dvr_cls'])
eda_id = dams_cli.create_external_dataset_agent(eda)
eda_inst = ExternalDatasetAgentInstance(name='example dataset agent instance')
eda_inst_id = dams_cli.create_external_dataset_agent_instance(eda_inst, external_dataset_agent_id=eda_id)
# Create and register the necessary resources/objects
# Create DataProvider
dprov = ExternalDataProvider(name='example data provider', institution=Institution(), contact=ContactInformation())
dprov.contact.individual_names_given = '<NAME>'
dprov.contact.email = '<EMAIL>'
# Create DataSource
dsrc = DataSource(name='example datasource', protocol_type='FILE', institution=Institution(), contact=ContactInformation())
dsrc.connection_params['base_data_url'] = ''
dsrc.contact.individual_names_given = '<NAME>'
dsrc.contact.email = '<EMAIL>'
# Create ExternalDataset
ds_name = 'slocum_test_dataset'
dset = ExternalDataset(name=ds_name, dataset_description=DatasetDescription(), update_description=UpdateDescription(), contact=ContactInformation())
dset.dataset_description.parameters['base_url'] = 'test_data/slocum/'
dset.dataset_description.parameters['list_pattern'] = 'ru05-2012-021-0-0-sbd.dat'
dset.dataset_description.parameters['date_pattern'] = '%Y %j'
dset.dataset_description.parameters['date_extraction_pattern'] = 'ru05-([\d]{4})-([\d]{3})-\d-\d-sbd.dat'
dset.dataset_description.parameters['temporal_dimension'] = None
dset.dataset_description.parameters['zonal_dimension'] = None
dset.dataset_description.parameters['meridional_dimension'] = None
dset.dataset_description.parameters['vertical_dimension'] = None
dset.dataset_description.parameters['variables'] = [
'c_wpt_y_lmc',
'sci_water_cond',
'm_y_lmc',
'u_hd_fin_ap_inflection_holdoff',
'sci_m_present_time',
'm_leakdetect_voltage_forward',
'sci_bb3slo_b660_scaled',
'c_science_send_all',
'm_gps_status',
'm_water_vx',
'm_water_vy',
'c_heading',
'sci_fl3slo_chlor_units',
'u_hd_fin_ap_gain',
'm_vacuum',
'u_min_water_depth',
'm_gps_lat',
'm_veh_temp',
'f_fin_offset',
'u_hd_fin_ap_hardover_holdoff',
'c_alt_time',
'm_present_time',
'm_heading',
'sci_bb3slo_b532_scaled',
'sci_fl3slo_cdom_units',
'm_fin',
'x_cycle_overrun_in_ms',
'sci_water_pressure',
'u_hd_fin_ap_igain',
'sci_fl3slo_phyco_units',
'm_battpos',
'sci_bb3slo_b470_scaled',
'm_lat',
'm_gps_lon',
'sci_ctd41cp_timestamp',
'm_pressure',
'c_wpt_x_lmc',
'c_ballast_pumped',
'x_lmc_xy_source',
'm_lon',
'm_avg_speed',
'sci_water_temp',
'u_pitch_ap_gain',
'm_roll',
'm_tot_num_inflections',
'm_x_lmc',
'u_pitch_ap_deadband',
'm_final_water_vy',
'm_final_water_vx',
'm_water_depth',
'm_leakdetect_voltage',
'u_pitch_max_delta_battpos',
'm_coulomb_amphr',
'm_pitch', ]
# Create DataSourceModel
dsrc_model = DataSourceModel(name='slocum_model')
# dsrc_model.model = 'SLOCUM'
dsrc_model.data_handler_module = 'N/A'
dsrc_model.data_handler_class = 'N/A'
## Run everything through DAMS
ds_id = dams_cli.create_external_dataset(external_dataset=dset)
ext_dprov_id = dams_cli.create_external_data_provider(external_data_provider=dprov)
ext_dsrc_id = dams_cli.create_data_source(data_source=dsrc)
ext_dsrc_model_id = dams_cli.create_data_source_model(dsrc_model)
# Register the ExternalDataset
dproducer_id = dams_cli.register_external_data_set(external_dataset_id=ds_id)
# Or using each method
dams_cli.assign_data_source_to_external_data_provider(data_source_id=ext_dsrc_id, external_data_provider_id=ext_dprov_id)
dams_cli.assign_data_source_to_data_model(data_source_id=ext_dsrc_id, data_source_model_id=ext_dsrc_model_id)
dams_cli.assign_external_dataset_to_data_source(external_dataset_id=ds_id, data_source_id=ext_dsrc_id)
dams_cli.assign_external_dataset_to_agent_instance(external_dataset_id=ds_id, agent_instance_id=eda_inst_id)
# dams_cli.assign_external_data_agent_to_agent_instance(external_data_agent_id=self.eda_id, agent_instance_id=self.eda_inst_id)
#create temp streamdef so the data product can create the stream
pc_list = []
for pc_k, pc in self._create_parameter_dictionary().iteritems():
pc_list.append(dms_cli.create_parameter_context(pc_k, pc[1].dump()))
pdict_id = dms_cli.create_parameter_dictionary('slocum_param_dict', pc_list)
streamdef_id = pubsub_cli.create_stream_definition(name="slocum_stream_def", description="stream def for slocum testing", parameter_dictionary_id=pdict_id)
# dpms_cli.create_data_product()
# Generate the data product and associate it to the ExternalDataset
dprod = IonObject(RT.DataProduct,
name='slocum_parsed_product',
description='parsed slocum product')
dproduct_id = dpms_cli.create_data_product(data_product=dprod,
stream_definition_id=streamdef_id)
dams_cli.assign_data_product(input_resource_id=ds_id, data_product_id=dproduct_id)
stream_id, assn = rr_cli.find_objects(subject=dproduct_id, predicate=PRED.hasStream, object_type=RT.Stream, id_only=True)
stream_id = stream_id[0]
log.info('Created resources: {0}'.format({'ExternalDataset': ds_id, 'ExternalDataProvider': ext_dprov_id, 'DataSource': ext_dsrc_id, 'DataSourceModel': ext_dsrc_model_id, 'DataProducer': dproducer_id, 'DataProduct': dproduct_id, 'Stream': stream_id}))
# Create the logger for receiving publications
_, stream_route, _ = self.create_stream_and_logger(name='slocum', stream_id=stream_id)
self.EDA_RESOURCE_ID = ds_id
self.EDA_NAME = ds_name
self.DVR_CONFIG['dh_cfg'] = {
'TESTING': True,
'stream_id': stream_id,
'stream_route': stream_route,
'stream_def': streamdef_id,
'external_dataset_res': dset,
'data_producer_id': dproducer_id, # CBM: Should this be put in the main body of the config - with mod & cls?
'max_records': 20,
}
def _create_parameter_dictionary(self):
pdict = ParameterDictionary()
t_ctxt = ParameterContext('c_wpt_y_lmc', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('sci_water_cond', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_y_lmc', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('u_hd_fin_ap_inflection_holdoff', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('sci_m_present_time', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_leakdetect_voltage_forward', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('sci_bb3slo_b660_scaled', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('c_science_send_all', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_gps_status', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_water_vx', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_water_vy', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('c_heading', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('sci_fl3slo_chlor_units', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('u_hd_fin_ap_gain', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_vacuum', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('u_min_water_depth', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_gps_lat', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_veh_temp', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('f_fin_offset', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('u_hd_fin_ap_hardover_holdoff', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('c_alt_time', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_present_time', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_heading', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('sci_bb3slo_b532_scaled', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('sci_fl3slo_cdom_units', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_fin', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('x_cycle_overrun_in_ms', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('sci_water_pressure', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('u_hd_fin_ap_igain', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('sci_fl3slo_phyco_units', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_battpos', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('sci_bb3slo_b470_scaled', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_lat', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_gps_lon', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('sci_ctd41cp_timestamp', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_pressure', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('c_wpt_x_lmc', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('c_ballast_pumped', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('x_lmc_xy_source', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_lon', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_avg_speed', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('sci_water_temp', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('u_pitch_ap_gain', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_roll', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_tot_num_inflections', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_x_lmc', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('u_pitch_ap_deadband', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_final_water_vy', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_final_water_vx', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_water_depth', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_leakdetect_voltage', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('u_pitch_max_delta_battpos', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_coulomb_amphr', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
t_ctxt = ParameterContext('m_pitch', param_type=QuantityType(value_encoding=numpy.dtype('float32')))
t_ctxt.uom = 'unknown'
pdict.add_context(t_ctxt)
return pdict
| [
"numpy.dtype",
"interface.objects.ExternalDatasetAgentInstance",
"interface.services.sa.idata_product_management_service.DataProductManagementServiceClient",
"interface.objects.ContactInformation",
"interface.services.dm.idataset_management_service.DatasetManagementServiceClient",
"interface.services.sa.i... | [((2049, 2081), 'interface.services.dm.idataset_management_service.DatasetManagementServiceClient', 'DatasetManagementServiceClient', ([], {}), '()\n', (2079, 2081), False, 'from interface.services.dm.idataset_management_service import DatasetManagementServiceClient\n'), ((2101, 2141), 'interface.services.sa.idata_acquisition_management_service.DataAcquisitionManagementServiceClient', 'DataAcquisitionManagementServiceClient', ([], {}), '()\n', (2139, 2141), False, 'from interface.services.sa.idata_acquisition_management_service import DataAcquisitionManagementServiceClient\n'), ((2161, 2197), 'interface.services.sa.idata_product_management_service.DataProductManagementServiceClient', 'DataProductManagementServiceClient', ([], {}), '()\n', (2195, 2197), False, 'from interface.services.sa.idata_product_management_service import DataProductManagementServiceClient\n'), ((2215, 2246), 'interface.services.coi.iresource_registry_service.ResourceRegistryServiceClient', 'ResourceRegistryServiceClient', ([], {}), '()\n', (2244, 2246), False, 'from interface.services.coi.iresource_registry_service import ResourceRegistryServiceClient\n'), ((2268, 2299), 'interface.services.dm.ipubsub_management_service.PubsubManagementServiceClient', 'PubsubManagementServiceClient', ([], {}), '()\n', (2297, 2299), False, 'from interface.services.dm.ipubsub_management_service import PubsubManagementServiceClient\n'), ((2315, 2455), 'interface.objects.ExternalDatasetAgent', 'ExternalDatasetAgent', ([], {'name': '"""example dataset agent"""', 'handler_module': "self.DVR_CONFIG['dvr_mod']", 'handler_class': "self.DVR_CONFIG['dvr_cls']"}), "(name='example dataset agent', handler_module=self.\n DVR_CONFIG['dvr_mod'], handler_class=self.DVR_CONFIG['dvr_cls'])\n", (2335, 2455), False, 'from interface.objects import ExternalDatasetAgent, ExternalDatasetAgentInstance, ExternalDataProvider, DataSourceModel, ContactInformation, UpdateDescription, DatasetDescription, ExternalDataset, Institution, DataSource\n'), ((2544, 2611), 'interface.objects.ExternalDatasetAgentInstance', 'ExternalDatasetAgentInstance', ([], {'name': '"""example dataset agent instance"""'}), "(name='example dataset agent instance')\n", (2572, 2611), False, 'from interface.objects import ExternalDatasetAgent, ExternalDatasetAgentInstance, ExternalDataProvider, DataSourceModel, ContactInformation, UpdateDescription, DatasetDescription, ExternalDataset, Institution, DataSource\n'), ((6003, 6039), 'interface.objects.DataSourceModel', 'DataSourceModel', ([], {'name': '"""slocum_model"""'}), "(name='slocum_model')\n", (6018, 6039), False, 'from interface.objects import ExternalDatasetAgent, ExternalDatasetAgentInstance, ExternalDataProvider, DataSourceModel, ContactInformation, UpdateDescription, DatasetDescription, ExternalDataset, Institution, DataSource\n'), ((7945, 8042), 'pyon.public.IonObject', 'IonObject', (['RT.DataProduct'], {'name': '"""slocum_parsed_product"""', 'description': '"""parsed slocum product"""'}), "(RT.DataProduct, name='slocum_parsed_product', description=\n 'parsed slocum product')\n", (7954, 8042), False, 'from pyon.public import log, IonObject\n'), ((9374, 9395), 'coverage_model.parameter.ParameterDictionary', 'ParameterDictionary', ([], {}), '()\n', (9393, 9395), False, 'from coverage_model.parameter import ParameterDictionary, ParameterContext\n'), ((2899, 2912), 'interface.objects.Institution', 'Institution', ([], {}), '()\n', (2910, 2912), False, 'from interface.objects import ExternalDatasetAgent, ExternalDatasetAgentInstance, ExternalDataProvider, DataSourceModel, ContactInformation, UpdateDescription, DatasetDescription, ExternalDataset, Institution, DataSource\n'), ((2922, 2942), 'interface.objects.ContactInformation', 'ContactInformation', ([], {}), '()\n', (2940, 2942), False, 'from interface.objects import ExternalDatasetAgent, ExternalDatasetAgentInstance, ExternalDataProvider, DataSourceModel, ContactInformation, UpdateDescription, DatasetDescription, ExternalDataset, Institution, DataSource\n'), ((3156, 3169), 'interface.objects.Institution', 'Institution', ([], {}), '()\n', (3167, 3169), False, 'from interface.objects import ExternalDatasetAgent, ExternalDatasetAgentInstance, ExternalDataProvider, DataSourceModel, ContactInformation, UpdateDescription, DatasetDescription, ExternalDataset, Institution, DataSource\n'), ((3179, 3199), 'interface.objects.ContactInformation', 'ContactInformation', ([], {}), '()\n', (3197, 3199), False, 'from interface.objects import ExternalDatasetAgent, ExternalDatasetAgentInstance, ExternalDataProvider, DataSourceModel, ContactInformation, UpdateDescription, DatasetDescription, ExternalDataset, Institution, DataSource\n'), ((3487, 3507), 'interface.objects.DatasetDescription', 'DatasetDescription', ([], {}), '()\n', (3505, 3507), False, 'from interface.objects import ExternalDatasetAgent, ExternalDatasetAgentInstance, ExternalDataProvider, DataSourceModel, ContactInformation, UpdateDescription, DatasetDescription, ExternalDataset, Institution, DataSource\n'), ((3528, 3547), 'interface.objects.UpdateDescription', 'UpdateDescription', ([], {}), '()\n', (3545, 3547), False, 'from interface.objects import ExternalDatasetAgent, ExternalDatasetAgentInstance, ExternalDataProvider, DataSourceModel, ContactInformation, UpdateDescription, DatasetDescription, ExternalDataset, Institution, DataSource\n'), ((3557, 3577), 'interface.objects.ContactInformation', 'ContactInformation', ([], {}), '()\n', (3575, 3577), False, 'from interface.objects import ExternalDatasetAgent, ExternalDatasetAgentInstance, ExternalDataProvider, DataSourceModel, ContactInformation, UpdateDescription, DatasetDescription, ExternalDataset, Institution, DataSource\n'), ((9485, 9507), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (9496, 9507), False, 'import numpy\n'), ((9667, 9689), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (9678, 9689), False, 'import numpy\n'), ((9842, 9864), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (9853, 9864), False, 'import numpy\n'), ((10040, 10062), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (10051, 10062), False, 'import numpy\n'), ((10226, 10248), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (10237, 10248), False, 'import numpy\n'), ((10422, 10444), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (10433, 10444), False, 'import numpy\n'), ((10612, 10634), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (10623, 10634), False, 'import numpy\n'), ((10798, 10820), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (10809, 10820), False, 'import numpy\n'), ((10978, 11000), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (10989, 11000), False, 'import numpy\n'), ((11156, 11178), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (11167, 11178), False, 'import numpy\n'), ((11334, 11356), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (11345, 11356), False, 'import numpy\n'), ((11511, 11533), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (11522, 11533), False, 'import numpy\n'), ((11701, 11723), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (11712, 11723), False, 'import numpy\n'), ((11885, 11907), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (11896, 11907), False, 'import numpy\n'), ((12061, 12083), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (12072, 12083), False, 'import numpy\n'), ((12246, 12268), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (12257, 12268), False, 'import numpy\n'), ((12423, 12445), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (12434, 12445), False, 'import numpy\n'), ((12601, 12623), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (12612, 12623), False, 'import numpy\n'), ((12781, 12803), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (12792, 12803), False, 'import numpy\n'), ((12977, 12999), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (12988, 12999), False, 'import numpy\n'), ((13155, 13177), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (13166, 13177), False, 'import numpy\n'), ((13337, 13359), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (13348, 13359), False, 'import numpy\n'), ((13514, 13536), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (13525, 13536), False, 'import numpy\n'), ((13704, 13726), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (13715, 13726), False, 'import numpy\n'), ((13893, 13915), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (13904, 13915), False, 'import numpy\n'), ((14066, 14088), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (14077, 14088), False, 'import numpy\n'), ((14255, 14277), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (14266, 14277), False, 'import numpy\n'), ((14441, 14463), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (14452, 14463), False, 'import numpy\n'), ((14626, 14648), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (14637, 14648), False, 'import numpy\n'), ((14816, 14838), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (14827, 14838), False, 'import numpy\n'), ((14993, 15015), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (15004, 15015), False, 'import numpy\n'), ((15183, 15205), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (15194, 15205), False, 'import numpy\n'), ((15356, 15378), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (15367, 15378), False, 'import numpy\n'), ((15533, 15555), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (15544, 15555), False, 'import numpy\n'), ((15722, 15744), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (15733, 15744), False, 'import numpy\n'), ((15900, 15922), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (15911, 15922), False, 'import numpy\n'), ((16079, 16101), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (16090, 16101), False, 'import numpy\n'), ((16263, 16285), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (16274, 16285), False, 'import numpy\n'), ((16446, 16468), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (16457, 16468), False, 'import numpy\n'), ((16619, 16641), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (16630, 16641), False, 'import numpy\n'), ((16798, 16820), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (16809, 16820), False, 'import numpy\n'), ((16980, 17002), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (16991, 17002), False, 'import numpy\n'), ((17163, 17185), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (17174, 17185), False, 'import numpy\n'), ((17337, 17359), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (17348, 17359), False, 'import numpy\n'), ((17526, 17548), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (17537, 17548), False, 'import numpy\n'), ((17701, 17723), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (17712, 17723), False, 'import numpy\n'), ((17888, 17910), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (17899, 17910), False, 'import numpy\n'), ((18072, 18094), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (18083, 18094), False, 'import numpy\n'), ((18256, 18278), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (18267, 18278), False, 'import numpy\n'), ((18437, 18459), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (18448, 18459), False, 'import numpy\n'), ((18625, 18647), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (18636, 18647), False, 'import numpy\n'), ((18818, 18840), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (18829, 18840), False, 'import numpy\n'), ((19001, 19023), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (19012, 19023), False, 'import numpy\n'), ((19176, 19198), 'numpy.dtype', 'numpy.dtype', (['"""float32"""'], {}), "('float32')\n", (19187, 19198), False, 'import numpy\n')] |
from typing import Dict, Optional, Union
import numpy as np
from werkzeug import ImmutableMultiDict
from .endpoint import Endpoint
def predict(model, input_data: Union[Dict, ImmutableMultiDict], config: Endpoint):
# new model
if hasattr(model, "public_inputs"):
sample = {}
for k, v in dict(input_data).items():
try:
# GET request arguments are strings. If they should in fact be number, we try to convert them here
sample[k] = float(v)
except ValueError:
# Some arguments are in fact strings. So we let them.
sample[k] = v
res = model.predict(sample, "raw")
return res.to_dict("records")[0]
sample = config.process_input(input_data)
vec = np.array(sample).reshape(1, -1)
res = model.predict(vec)
return config.process_output(res)
| [
"numpy.array"
] | [((780, 796), 'numpy.array', 'np.array', (['sample'], {}), '(sample)\n', (788, 796), True, 'import numpy as np\n')] |
import numpy as np
import math
from scipy import special
import matplotlib.pyplot as plt
import sys
def pdf(x, a, b):
return math.gamma(a + b) / (math.gamma(a) * math.gamma(b)) * x**(a - 1) * (1 - x)**(b-1)
args = sys.argv[1:]
if len(args) != 2:
print("Invalid arguments. Example: main.py 2 5")
sys.exit(1)
a = int(args[0])
b = int(args[1])
print('a={:d}, b={:d}'.format(a, b))
x = np.linspace(0, 1.0, 100)
vpmf = np.vectorize(pdf)
y = vpmf(x, a, b)
plt.plot(x, y, color='g')
plt.xlabel('N')
plt.ylabel('P')
plt.show()
| [
"math.gamma",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.linspace",
"sys.exit",
"numpy.vectorize",
"matplotlib.pyplot.show"
] | [((393, 417), 'numpy.linspace', 'np.linspace', (['(0)', '(1.0)', '(100)'], {}), '(0, 1.0, 100)\n', (404, 417), True, 'import numpy as np\n'), ((425, 442), 'numpy.vectorize', 'np.vectorize', (['pdf'], {}), '(pdf)\n', (437, 442), True, 'import numpy as np\n'), ((462, 487), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'color': '"""g"""'}), "(x, y, color='g')\n", (470, 487), True, 'import matplotlib.pyplot as plt\n'), ((488, 503), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""N"""'], {}), "('N')\n", (498, 503), True, 'import matplotlib.pyplot as plt\n'), ((504, 519), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""P"""'], {}), "('P')\n", (514, 519), True, 'import matplotlib.pyplot as plt\n'), ((520, 530), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (528, 530), True, 'import matplotlib.pyplot as plt\n'), ((303, 314), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (311, 314), False, 'import sys\n'), ((128, 145), 'math.gamma', 'math.gamma', (['(a + b)'], {}), '(a + b)\n', (138, 145), False, 'import math\n'), ((149, 162), 'math.gamma', 'math.gamma', (['a'], {}), '(a)\n', (159, 162), False, 'import math\n'), ((165, 178), 'math.gamma', 'math.gamma', (['b'], {}), '(b)\n', (175, 178), False, 'import math\n')] |
import numpy as np
from ....constants import DATA
from .normalization_transform import NormalizationTransform
from .histogram_standardization import normalize
class HistogramRandomChange(NormalizationTransform):
"""
Same thins as HistogramStandardization but the landmarks is a random curve
"""
def __init__(self, masking_method=None,
nb_point_ini = 50, nb_smooth=5, verbose=False, **kwargs):
super().__init__(masking_method=masking_method, verbose=verbose, **kwargs)
self.nb_point_ini = nb_point_ini
self.nb_smooth = nb_smooth
def apply_normalization(self, sample, image_name, mask):
image_dict = sample[image_name]
landmarks = self.__get_random_landmarks()
image_dict[DATA] = normalize(
image_dict[DATA],
landmarks,
mask=mask,
)
def __get_random_landmarks(self):
y = np.squeeze(np.sort(np.random.rand(1, self.nb_point_ini)))
x1 = np.linspace(0, 1, self.nb_point_ini)
x2 = np.linspace(0, 1, 100)
y2 = np.interp(x2, x1, y)
y2 = self.smooth(y2, self.nb_smooth)
y2 = np.sort(y2)
y2 = y2 - np.min(y2)
y2 = (y2 / np.max(y2)) * 100
return y2
def smooth(self, y, box_pts):
box = np.ones(box_pts) / box_pts
y_smooth = np.convolve(y, box, mode='same')
return y_smooth
def get_curve_for_sample(yall):
"""
not use but other alternative to get a random landmarks from a set of landmarks
:return:
"""
i = np.random.randint(yall.shape[0])
y = np.squeeze(yall[i,:])
i = np.random.randint(y.shape)
print(i)
if i<yall.shape[1]/2:
y = y[i[0]:]
else:
y = y[0:i[0]]
x1 = np.linspace(0, 1, y.shape[0])
x2 = np.linspace(0, 1, 100)
y2 = np.interp(x2, x1, y)
y2 = y2 - np.min(y2)
y2 = (y2 / np.max(y2)) * 100
plt.plot(y2)
return y2
| [
"numpy.convolve",
"numpy.ones",
"numpy.random.rand",
"numpy.sort",
"numpy.squeeze",
"numpy.max",
"numpy.random.randint",
"numpy.linspace",
"numpy.interp",
"numpy.min"
] | [((985, 1021), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'self.nb_point_ini'], {}), '(0, 1, self.nb_point_ini)\n', (996, 1021), True, 'import numpy as np\n'), ((1035, 1057), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (1046, 1057), True, 'import numpy as np\n'), ((1071, 1091), 'numpy.interp', 'np.interp', (['x2', 'x1', 'y'], {}), '(x2, x1, y)\n', (1080, 1091), True, 'import numpy as np\n'), ((1150, 1161), 'numpy.sort', 'np.sort', (['y2'], {}), '(y2)\n', (1157, 1161), True, 'import numpy as np\n'), ((1343, 1375), 'numpy.convolve', 'np.convolve', (['y', 'box'], {'mode': '"""same"""'}), "(y, box, mode='same')\n", (1354, 1375), True, 'import numpy as np\n'), ((1580, 1612), 'numpy.random.randint', 'np.random.randint', (['yall.shape[0]'], {}), '(yall.shape[0])\n', (1597, 1612), True, 'import numpy as np\n'), ((1625, 1647), 'numpy.squeeze', 'np.squeeze', (['yall[i, :]'], {}), '(yall[i, :])\n', (1635, 1647), True, 'import numpy as np\n'), ((1660, 1686), 'numpy.random.randint', 'np.random.randint', (['y.shape'], {}), '(y.shape)\n', (1677, 1686), True, 'import numpy as np\n'), ((1813, 1842), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'y.shape[0]'], {}), '(0, 1, y.shape[0])\n', (1824, 1842), True, 'import numpy as np\n'), ((1856, 1878), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (1867, 1878), True, 'import numpy as np\n'), ((1892, 1912), 'numpy.interp', 'np.interp', (['x2', 'x1', 'y'], {}), '(x2, x1, y)\n', (1901, 1912), True, 'import numpy as np\n'), ((1180, 1190), 'numpy.min', 'np.min', (['y2'], {}), '(y2)\n', (1186, 1190), True, 'import numpy as np\n'), ((1297, 1313), 'numpy.ones', 'np.ones', (['box_pts'], {}), '(box_pts)\n', (1304, 1313), True, 'import numpy as np\n'), ((1932, 1942), 'numpy.min', 'np.min', (['y2'], {}), '(y2)\n', (1938, 1942), True, 'import numpy as np\n'), ((933, 969), 'numpy.random.rand', 'np.random.rand', (['(1)', 'self.nb_point_ini'], {}), '(1, self.nb_point_ini)\n', (947, 969), True, 'import numpy as np\n'), ((1210, 1220), 'numpy.max', 'np.max', (['y2'], {}), '(y2)\n', (1216, 1220), True, 'import numpy as np\n'), ((1962, 1972), 'numpy.max', 'np.max', (['y2'], {}), '(y2)\n', (1968, 1972), True, 'import numpy as np\n')] |
# By: <NAME>, 2018
# Ported to Keras from the official Tensorflow implementation by Magenta
# Most utilities in 'utils' remained the same as in the official implementation
""" SketchRNN data loading, callbacks and image manipulation utilities. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
import requests
import six
from six.moves import cStringIO as StringIO
import copy
import os
import sys
from keras.callbacks import Callback
import keras.backend as K
from keras.callbacks import LearningRateScheduler, TensorBoard
""" My Utilities """
def batch_generator(dataset, train):
""" Generator to feed into Keras' fit_generator for loading of data"""
count = 0 # batch counter for validation\test data
while True:
if train:
_, batch, s = dataset.random_batch()
else: # validation\test data
count = 0 if count == dataset.num_batches else count
_, batch, s = dataset.get_batch(count)
count += 1
encoder_input = batch[:, 1:dataset.max_seq_length + 1, :]
# The target/expected vectors of strokes
target_output = encoder_input
# Vectors of strokes to be fed to decoder (same as above, but lagged behind
# one step to include initial dummy value of (0, 0, 1, 0, 0))
decoder_input = batch[:, :dataset.max_seq_length, :]
yield ({'encoder_input': encoder_input, 'decoder_input': decoder_input}, {'output': target_output})
# load_dataset is the original implementation function, modified to fit Keras
def load_dataset(data_dir, model_params):
"""Loads the .npz file, and splits the set into train/valid/test."""
# normalizes the x and y columns using the training set.
# applies same scaling factor to valid and test set.
if isinstance(model_params.data_set, list):
datasets = model_params.data_set
else:
datasets = [model_params.data_set]
train_strokes = None
valid_strokes = None
test_strokes = None
for dataset in datasets:
data_filepath = os.path.join(data_dir, dataset)
if data_dir.startswith('http://') or data_dir.startswith('https://'):
print('Downloading %s', data_filepath)
response = requests.get(data_filepath)
data = np.load(StringIO(response.content))
else:
if six.PY3:
data = np.load(data_filepath, encoding='latin1')
else:
data = np.load(data_filepath)
print('Loaded {}/{}/{} from {}'.format(
len(data['train']), len(data['valid']), len(data['test']),
dataset))
if train_strokes is None:
train_strokes = data['train']
valid_strokes = data['valid']
test_strokes = data['test']
else:
train_strokes = np.concatenate((train_strokes, data['train']))
valid_strokes = np.concatenate((valid_strokes, data['valid']))
test_strokes = np.concatenate((test_strokes, data['test']))
all_strokes = np.concatenate((train_strokes, valid_strokes, test_strokes))
num_points = 0
for stroke in all_strokes:
num_points += len(stroke)
avg_len = num_points / len(all_strokes)
print('Dataset combined: {} ({}/{}/{}), avg len {}'.format(
len(all_strokes), len(train_strokes), len(valid_strokes),
len(test_strokes), int(avg_len)))
# calculate the max strokes we need.
max_seq_len = get_max_len(all_strokes)
# overwrite the hps with this calculation.
model_params.max_seq_len = max_seq_len
print('model_params.max_seq_len:', int(model_params.max_seq_len))
train_set = DataLoader(
train_strokes,
model_params.batch_size,
max_seq_length=model_params.max_seq_len,
random_scale_factor=model_params.random_scale_factor,
augment_stroke_prob=model_params.augment_stroke_prob)
normalizing_scale_factor = train_set.calculate_normalizing_scale_factor()
train_set.normalize(normalizing_scale_factor)
valid_set = DataLoader(
valid_strokes,
model_params.batch_size,
max_seq_length=model_params.max_seq_len,
random_scale_factor=0.0,
augment_stroke_prob=0.0)
valid_set.normalize(normalizing_scale_factor)
test_set = DataLoader(
test_strokes,
model_params.batch_size,
max_seq_length=model_params.max_seq_len,
random_scale_factor=0.0,
augment_stroke_prob=0.0)
test_set.normalize(normalizing_scale_factor)
print('normalizing_scale_factor ', normalizing_scale_factor)
result = [train_set, valid_set, test_set, model_params]
return result
class Logger(object):
""" Logger class to enable logging to file and terminal together """
def __init__(self, logsdir):
self.terminal = sys.stdout
self.log = open(os.path.join(logsdir, 'log.txt'), "a")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
pass
class DotDict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def __deepcopy__(self, memo):
return DotDict([(copy.deepcopy(k, memo), copy.deepcopy(v, memo)) for k, v in self.items()])
class LearningRateSchedulerPerBatch(LearningRateScheduler):
""" Callback class to modify the default learning rate scheduler to operate each batch"""
def __init__(self, schedule, verbose=0):
super(LearningRateSchedulerPerBatch, self).__init__(schedule, verbose)
self.count = 0 # Global batch index (the regular batch argument refers to the batch index within the epoch)
def on_epoch_begin(self, epoch, logs=None):
pass
def on_epoch_end(self, epoch, logs=None):
pass
def on_batch_begin(self, batch, logs=None):
super(LearningRateSchedulerPerBatch, self).on_epoch_begin(self.count, logs)
def on_batch_end(self, batch, logs=None):
super(LearningRateSchedulerPerBatch, self).on_epoch_end(self.count, logs)
self.count += 1
class KLWeightScheduler(Callback):
"""KL weight scheduler.
# Arguments
kl_weight: The tensor withholding the current KL weight term
schedule: a function that takes a batch index as input
(integer, indexed from 0) and returns a new learning rate as output (float).
verbose: int. 0: quiet, 1: update messages.
"""
def __init__(self, kl_weight, schedule, verbose=0):
super(KLWeightScheduler, self).__init__()
self.schedule = schedule
self.verbose = verbose
self.kl_weight = kl_weight
self.count = 0 # Global batch index (the regular batch argument refers to the batch index within the epoch)
def on_batch_begin(self, batch, logs=None):
new_kl_weight = self.schedule(self.count)
if not isinstance(new_kl_weight, (float, np.float32, np.float64)):
raise ValueError('The output of the "schedule" function '
'should be float.')
# Set new value
K.set_value(self.kl_weight, new_kl_weight)
if self.verbose > 0 and self.count % 20 == 0:
print('\nBatch %05d: KLWeightScheduler setting KL weight '
' to %s.' % (self.count + 1, new_kl_weight))
self.count += 1
class TensorBoardLR(TensorBoard):
""" A modification to the Tensorboard callback to also include the scalars of learning rate and KL weight"""
def __init__(self, *args, **kwargs):
self.kl_weight = kwargs.pop('kl_weight')
super().__init__(*args, **kwargs)
self.count = 0
def on_batch_end(self, batch, logs=None):
logs.update({'lr': K.eval(self.model.optimizer.lr),
'kl_weight': K.eval(self.kl_weight)})
super().on_batch_end(batch, logs)
# TODO: add automatic startup of TB on train start (and termination on train end?)
# def on_train_begin(self, logs=None):
# call(["tensorboard", "--logdir="+self.log_dir])
""" Original Implementation (Magenta) Utilities"""
def get_bounds(data, factor=10):
"""Return bounds of data."""
min_x = 0
max_x = 0
min_y = 0
max_y = 0
abs_x = 0
abs_y = 0
for i in range(len(data)):
x = float(data[i, 0]) / factor
y = float(data[i, 1]) / factor
abs_x += x
abs_y += y
min_x = min(min_x, abs_x)
min_y = min(min_y, abs_y)
max_x = max(max_x, abs_x)
max_y = max(max_y, abs_y)
return (min_x, max_x, min_y, max_y)
def slerp(p0, p1, t):
"""Spherical interpolation."""
omega = np.arccos(np.dot(p0 / np.linalg.norm(p0), p1 / np.linalg.norm(p1)))
so = np.sin(omega)
return np.sin((1.0 - t) * omega) / so * p0 + np.sin(t * omega) / so * p1
def lerp(p0, p1, t):
"""Linear interpolation."""
return (1.0 - t) * p0 + t * p1
# A note on formats:
# Sketches are encoded as a sequence of strokes. stroke-3 and stroke-5 are
# different stroke encodings.
# stroke-3 uses 3-tuples, consisting of x-offset, y-offset, and a binary
# variable which is 1 if the pen is lifted between this position and
# the next, and 0 otherwise.
# stroke-5 consists of x-offset, y-offset, and p_1, p_2, p_3, a binary
# one-hot vector of 3 possible pen states: pen down, pen up, end of sketch.
# See section 3.1 of https://arxiv.org/abs/1704.03477 for more detail.
# Sketch-RNN takes input in stroke-5 format, with sketches padded to a common
# maximum length and prefixed by the special start token [0, 0, 1, 0, 0]
# The QuickDraw dataset is stored using stroke-3.
def strokes_to_lines(strokes):
"""Convert stroke-3 format to polyline format."""
x = 0
y = 0
lines = []
line = []
for i in range(len(strokes)):
if strokes[i, 2] == 1:
x += float(strokes[i, 0])
y += float(strokes[i, 1])
line.append([x, y])
lines.append(line)
line = []
else:
x += float(strokes[i, 0])
y += float(strokes[i, 1])
line.append([x, y])
return lines
def lines_to_strokes(lines):
"""Convert polyline format to stroke-3 format."""
eos = 0
strokes = [[0, 0, 0]]
for line in lines:
linelen = len(line)
for i in range(linelen):
eos = 0 if i < linelen - 1 else 1
strokes.append([line[i][0], line[i][1], eos])
strokes = np.array(strokes)
strokes[1:, 0:2] -= strokes[:-1, 0:2]
return strokes[1:, :]
def augment_strokes(strokes, prob=0.0):
"""Perform data augmentation by randomly dropping out strokes."""
# drop each point within a line segments with a probability of prob
# note that the logic in the loop prevents points at the ends to be dropped.
result = []
prev_stroke = [0, 0, 1]
count = 0
stroke = [0, 0, 1] # Added to be safe.
for i in range(len(strokes)):
candidate = [strokes[i][0], strokes[i][1], strokes[i][2]]
if candidate[2] == 1 or prev_stroke[2] == 1:
count = 0
else:
count += 1
urnd = np.random.rand() # uniform random variable
if candidate[2] == 0 and prev_stroke[2] == 0 and count > 2 and urnd < prob:
stroke[0] += candidate[0]
stroke[1] += candidate[1]
else:
stroke = candidate
prev_stroke = stroke
result.append(stroke)
return np.array(result)
def scale_bound(stroke, average_dimension=10.0):
"""Scale an entire image to be less than a certain size."""
# stroke is a numpy array of [dx, dy, pstate], average_dimension is a float.
# modifies stroke directly.
bounds = get_bounds(stroke, 1)
max_dimension = max(bounds[1] - bounds[0], bounds[3] - bounds[2])
stroke[:, 0:2] /= (max_dimension / average_dimension)
def to_normal_strokes(big_stroke):
"""Convert from stroke-5 format (from sketch-rnn paper) back to stroke-3."""
l = 0
for i in range(len(big_stroke)):
if big_stroke[i, 4] > 0:
l = i
break
if l == 0:
l = len(big_stroke)
result = np.zeros((l, 3))
result[:, 0:2] = big_stroke[0:l, 0:2]
result[:, 2] = big_stroke[0:l, 3]
return result
def clean_strokes(sample_strokes, factor=100):
"""Cut irrelevant end points, scale to pixel space and store as integer."""
# Useful function for exporting data to .json format.
copy_stroke = []
added_final = False
for j in range(len(sample_strokes)):
finish_flag = int(sample_strokes[j][4])
if finish_flag == 0:
copy_stroke.append([
int(round(sample_strokes[j][0] * factor)),
int(round(sample_strokes[j][1] * factor)),
int(sample_strokes[j][2]),
int(sample_strokes[j][3]), finish_flag
])
else:
copy_stroke.append([0, 0, 0, 0, 1])
added_final = True
break
if not added_final:
copy_stroke.append([0, 0, 0, 0, 1])
return copy_stroke
def to_big_strokes(stroke, max_len=250):
"""Converts from stroke-3 to stroke-5 format and pads to given length."""
# (But does not insert special start token).
result = np.zeros((max_len, 5), dtype=float)
l = len(stroke)
assert l <= max_len
result[0:l, 0:2] = stroke[:, 0:2]
result[0:l, 3] = stroke[:, 2]
result[0:l, 2] = 1 - result[0:l, 3]
result[l:, 4] = 1
return result
def get_max_len(strokes):
"""Return the maximum length of an array of strokes."""
max_len = 0
for stroke in strokes:
ml = len(stroke)
if ml > max_len:
max_len = ml
return max_len
class DataLoader(object):
"""Class for loading data."""
def __init__(self,
strokes,
batch_size=100,
max_seq_length=250,
scale_factor=1.0,
random_scale_factor=0.0,
augment_stroke_prob=0.0,
limit=1000):
self.batch_size = batch_size # minibatch size
self.max_seq_length = max_seq_length # N_max in sketch-rnn paper
self.scale_factor = scale_factor # divide offsets by this factor
self.random_scale_factor = random_scale_factor # data augmentation method
# Removes large gaps in the data. x and y offsets are clamped to have
# absolute value no greater than this limit.
self.limit = limit
self.augment_stroke_prob = augment_stroke_prob # data augmentation method
self.start_stroke_token = [0, 0, 1, 0, 0] # S_0 in sketch-rnn paper
# sets self.strokes (list of ndarrays, one per sketch, in stroke-3 format,
# sorted by size)
self.preprocess(strokes)
def preprocess(self, strokes):
"""Remove entries from strokes having > max_seq_length points."""
raw_data = []
seq_len = []
count_data = 0
for i in range(len(strokes)):
data = strokes[i]
if len(data) <= (self.max_seq_length):
count_data += 1
# removes large gaps from the data
data = np.minimum(data, self.limit)
data = np.maximum(data, -self.limit)
data = np.array(data, dtype=np.float32)
data[:, 0:2] /= self.scale_factor
raw_data.append(data)
seq_len.append(len(data))
seq_len = np.array(seq_len) # nstrokes for each sketch
idx = np.argsort(seq_len)
self.strokes = []
for i in range(len(seq_len)):
self.strokes.append(raw_data[idx[i]])
print("total images <= max_seq_len is %d" % count_data)
self.num_batches = int(count_data / self.batch_size)
def random_sample(self):
"""Return a random sample, in stroke-3 format as used by draw_strokes."""
sample = np.copy(random.choice(self.strokes))
return sample
def random_scale(self, data):
"""Augment data by stretching x and y axis randomly [1-e, 1+e]."""
x_scale_factor = (
np.random.random() - 0.5) * 2 * self.random_scale_factor + 1.0
y_scale_factor = (
np.random.random() - 0.5) * 2 * self.random_scale_factor + 1.0
result = np.copy(data)
result[:, 0] *= x_scale_factor
result[:, 1] *= y_scale_factor
return result
def calculate_normalizing_scale_factor(self):
"""Calculate the normalizing factor explained in appendix of sketch-rnn."""
data = []
for i in range(len(self.strokes)):
if len(self.strokes[i]) > self.max_seq_length:
continue
for j in range(len(self.strokes[i])):
data.append(self.strokes[i][j, 0])
data.append(self.strokes[i][j, 1])
data = np.array(data)
return np.std(data)
def normalize(self, scale_factor=None):
"""Normalize entire dataset (delta_x, delta_y) by the scaling factor."""
if scale_factor is None:
scale_factor = self.calculate_normalizing_scale_factor()
self.scale_factor = scale_factor
for i in range(len(self.strokes)):
self.strokes[i][:, 0:2] /= self.scale_factor
def _get_batch_from_indices(self, indices):
"""Given a list of indices, return the potentially augmented batch."""
x_batch = []
seq_len = []
for idx in range(len(indices)):
i = indices[idx]
data = self.random_scale(self.strokes[i])
data_copy = np.copy(data)
if self.augment_stroke_prob > 0:
data_copy = augment_strokes(data_copy, self.augment_stroke_prob)
x_batch.append(data_copy)
length = len(data_copy)
seq_len.append(length)
seq_len = np.array(seq_len, dtype=int)
# We return three things: stroke-3 format, stroke-5 format, list of seq_len.
return x_batch, self.pad_batch(x_batch, self.max_seq_length), seq_len
def random_batch(self):
"""Return a randomised portion of the training data."""
idx = np.random.permutation(range(0, len(self.strokes)))[0:self.batch_size]
return self._get_batch_from_indices(idx)
def get_batch(self, idx):
"""Get the idx'th batch from the dataset."""
# print('DBG'+str(idx))
assert idx >= 0, "idx must be non negative"
assert idx < self.num_batches, "idx must be less than the number of batches:"
start_idx = idx * self.batch_size
indices = range(start_idx, start_idx + self.batch_size)
return self._get_batch_from_indices(indices)
def pad_batch(self, batch, max_len):
"""Pad the batch to be stroke-5 bigger format as described in paper."""
result = np.zeros((self.batch_size, max_len + 1, 5), dtype=float)
assert len(batch) == self.batch_size
for i in range(self.batch_size):
l = len(batch[i])
assert l <= max_len
result[i, 0:l, 0:2] = batch[i][:, 0:2]
result[i, 0:l, 3] = batch[i][:, 2]
result[i, 0:l, 2] = 1 - result[i, 0:l, 3]
result[i, l:, 4] = 1
# put in the first token, as described in sketch-rnn methodology
result[i, 1:, :] = result[i, :-1, :]
result[i, 0, :] = 0
result[i, 0, 2] = self.start_stroke_token[2] # setting S_0 from paper.
result[i, 0, 3] = self.start_stroke_token[3]
result[i, 0, 4] = self.start_stroke_token[4]
return result
| [
"numpy.random.rand",
"numpy.argsort",
"numpy.array",
"copy.deepcopy",
"numpy.linalg.norm",
"numpy.sin",
"numpy.random.random",
"numpy.concatenate",
"numpy.maximum",
"six.moves.cStringIO",
"random.choice",
"keras.backend.set_value",
"requests.get",
"numpy.std",
"numpy.copy",
"numpy.mini... | [((3132, 3192), 'numpy.concatenate', 'np.concatenate', (['(train_strokes, valid_strokes, test_strokes)'], {}), '((train_strokes, valid_strokes, test_strokes))\n', (3146, 3192), True, 'import numpy as np\n'), ((8890, 8903), 'numpy.sin', 'np.sin', (['omega'], {}), '(omega)\n', (8896, 8903), True, 'import numpy as np\n'), ((10633, 10650), 'numpy.array', 'np.array', (['strokes'], {}), '(strokes)\n', (10641, 10650), True, 'import numpy as np\n'), ((11640, 11656), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (11648, 11656), True, 'import numpy as np\n'), ((12338, 12354), 'numpy.zeros', 'np.zeros', (['(l, 3)'], {}), '((l, 3))\n', (12346, 12354), True, 'import numpy as np\n'), ((13453, 13488), 'numpy.zeros', 'np.zeros', (['(max_len, 5)'], {'dtype': 'float'}), '((max_len, 5), dtype=float)\n', (13461, 13488), True, 'import numpy as np\n'), ((2143, 2174), 'os.path.join', 'os.path.join', (['data_dir', 'dataset'], {}), '(data_dir, dataset)\n', (2155, 2174), False, 'import os\n'), ((7258, 7300), 'keras.backend.set_value', 'K.set_value', (['self.kl_weight', 'new_kl_weight'], {}), '(self.kl_weight, new_kl_weight)\n', (7269, 7300), True, 'import keras.backend as K\n'), ((11313, 11329), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (11327, 11329), True, 'import numpy as np\n'), ((15675, 15692), 'numpy.array', 'np.array', (['seq_len'], {}), '(seq_len)\n', (15683, 15692), True, 'import numpy as np\n'), ((15735, 15754), 'numpy.argsort', 'np.argsort', (['seq_len'], {}), '(seq_len)\n', (15745, 15754), True, 'import numpy as np\n'), ((16513, 16526), 'numpy.copy', 'np.copy', (['data'], {}), '(data)\n', (16520, 16526), True, 'import numpy as np\n'), ((17074, 17088), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (17082, 17088), True, 'import numpy as np\n'), ((17104, 17116), 'numpy.std', 'np.std', (['data'], {}), '(data)\n', (17110, 17116), True, 'import numpy as np\n'), ((18070, 18098), 'numpy.array', 'np.array', (['seq_len'], {'dtype': 'int'}), '(seq_len, dtype=int)\n', (18078, 18098), True, 'import numpy as np\n'), ((19040, 19096), 'numpy.zeros', 'np.zeros', (['(self.batch_size, max_len + 1, 5)'], {'dtype': 'float'}), '((self.batch_size, max_len + 1, 5), dtype=float)\n', (19048, 19096), True, 'import numpy as np\n'), ((2327, 2354), 'requests.get', 'requests.get', (['data_filepath'], {}), '(data_filepath)\n', (2339, 2354), False, 'import requests\n'), ((2919, 2965), 'numpy.concatenate', 'np.concatenate', (["(train_strokes, data['train'])"], {}), "((train_strokes, data['train']))\n", (2933, 2965), True, 'import numpy as np\n'), ((2994, 3040), 'numpy.concatenate', 'np.concatenate', (["(valid_strokes, data['valid'])"], {}), "((valid_strokes, data['valid']))\n", (3008, 3040), True, 'import numpy as np\n'), ((3068, 3112), 'numpy.concatenate', 'np.concatenate', (["(test_strokes, data['test'])"], {}), "((test_strokes, data['test']))\n", (3082, 3112), True, 'import numpy as np\n'), ((4957, 4989), 'os.path.join', 'os.path.join', (['logsdir', '"""log.txt"""'], {}), "(logsdir, 'log.txt')\n", (4969, 4989), False, 'import os\n'), ((16131, 16158), 'random.choice', 'random.choice', (['self.strokes'], {}), '(self.strokes)\n', (16144, 16158), False, 'import random\n'), ((17803, 17816), 'numpy.copy', 'np.copy', (['data'], {}), '(data)\n', (17810, 17816), True, 'import numpy as np\n'), ((2382, 2408), 'six.moves.cStringIO', 'StringIO', (['response.content'], {}), '(response.content)\n', (2390, 2408), True, 'from six.moves import cStringIO as StringIO\n'), ((2471, 2512), 'numpy.load', 'np.load', (['data_filepath'], {'encoding': '"""latin1"""'}), "(data_filepath, encoding='latin1')\n", (2478, 2512), True, 'import numpy as np\n'), ((2554, 2576), 'numpy.load', 'np.load', (['data_filepath'], {}), '(data_filepath)\n', (2561, 2576), True, 'import numpy as np\n'), ((7891, 7922), 'keras.backend.eval', 'K.eval', (['self.model.optimizer.lr'], {}), '(self.model.optimizer.lr)\n', (7897, 7922), True, 'import keras.backend as K\n'), ((7958, 7980), 'keras.backend.eval', 'K.eval', (['self.kl_weight'], {}), '(self.kl_weight)\n', (7964, 7980), True, 'import keras.backend as K\n'), ((8835, 8853), 'numpy.linalg.norm', 'np.linalg.norm', (['p0'], {}), '(p0)\n', (8849, 8853), True, 'import numpy as np\n'), ((8860, 8878), 'numpy.linalg.norm', 'np.linalg.norm', (['p1'], {}), '(p1)\n', (8874, 8878), True, 'import numpy as np\n'), ((8915, 8940), 'numpy.sin', 'np.sin', (['((1.0 - t) * omega)'], {}), '((1.0 - t) * omega)\n', (8921, 8940), True, 'import numpy as np\n'), ((8953, 8970), 'numpy.sin', 'np.sin', (['(t * omega)'], {}), '(t * omega)\n', (8959, 8970), True, 'import numpy as np\n'), ((15389, 15417), 'numpy.minimum', 'np.minimum', (['data', 'self.limit'], {}), '(data, self.limit)\n', (15399, 15417), True, 'import numpy as np\n'), ((15441, 15470), 'numpy.maximum', 'np.maximum', (['data', '(-self.limit)'], {}), '(data, -self.limit)\n', (15451, 15470), True, 'import numpy as np\n'), ((15494, 15526), 'numpy.array', 'np.array', (['data'], {'dtype': 'np.float32'}), '(data, dtype=np.float32)\n', (15502, 15526), True, 'import numpy as np\n'), ((5366, 5388), 'copy.deepcopy', 'copy.deepcopy', (['k', 'memo'], {}), '(k, memo)\n', (5379, 5388), False, 'import copy\n'), ((5390, 5412), 'copy.deepcopy', 'copy.deepcopy', (['v', 'memo'], {}), '(v, memo)\n', (5403, 5412), False, 'import copy\n'), ((16331, 16349), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (16347, 16349), True, 'import numpy as np\n'), ((16433, 16451), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (16449, 16451), True, 'import numpy as np\n')] |
import numpy as np
def load_swirls():
with np.load('a1_data.npz') as data:
x = data['swirls_x']
y = data['swirls_y']
return x, y
def load_noisy_circles():
with np.load('a1_data.npz') as data:
x = data['circles_x']
y = data['circles_y']
return x, y
def load_noisy_moons():
with np.load('a1_data.npz') as data:
x = data['moons_x']
y = data['moons_y']
return x, y
def load_partitioned_circles():
with np.load('a1_data.npz') as data:
x = data['partitioned_circles_x']
y = data['partitioned_circles_y']
return x, y
def sigmoid(x):
s = 1 / (1 + np.exp(-x))
return s
def compute_accuracy(X, Y, W1, B1, W2, B2):
""" Compute the accuracy of the model
Inputs:
X: NumPy array of feature data of shape (n, m)
Y: NumPy array of labels of shape (m,)
W1: NumPy array of first layer of parameters with shape (n_h, n_y)
B1: NumPy array of second layer bias parameters with shape (n_h, 1)
W2: NumPy array of second layer of parameters with shape (1, n_h)
B2: NumPy array of second layer bias parameters with shape (1, 1)
Returns:
NumPy array (m, ) of predictions. Values are 0 or 1.
"""
Y_predicted = predict(X, W1, B1, W2, B2)
accuracy = np.mean(Y_predicted == Y)
return accuracy
def load_data_and_hyperparams(data_set_name):
if data_set_name == 'noisy_circles':
X, Y = load_noisy_circles()
n_iters = 1400
learning_rate = 1.5
elif data_set_name == 'noisy_moons':
X, Y = load_noisy_moons()
n_iters = 1000
learning_rate = 1.8
elif data_set_name == 'swirls':
X, Y = load_swirls()
n_iters = 700
learning_rate = 1.2
elif data_set_name == 'flower':
X, Y = load_flower()
n_iters = 500
learning_rate = 1.2
else:
raise ValueError("Unexpected value '{0}' for data_set_name".format(data_set_name))
return X, Y, n_iters, learning_rate
| [
"numpy.exp",
"numpy.mean",
"numpy.load"
] | [((1325, 1350), 'numpy.mean', 'np.mean', (['(Y_predicted == Y)'], {}), '(Y_predicted == Y)\n', (1332, 1350), True, 'import numpy as np\n'), ((49, 71), 'numpy.load', 'np.load', (['"""a1_data.npz"""'], {}), "('a1_data.npz')\n", (56, 71), True, 'import numpy as np\n'), ((192, 214), 'numpy.load', 'np.load', (['"""a1_data.npz"""'], {}), "('a1_data.npz')\n", (199, 214), True, 'import numpy as np\n'), ((335, 357), 'numpy.load', 'np.load', (['"""a1_data.npz"""'], {}), "('a1_data.npz')\n", (342, 357), True, 'import numpy as np\n'), ((481, 503), 'numpy.load', 'np.load', (['"""a1_data.npz"""'], {}), "('a1_data.npz')\n", (488, 503), True, 'import numpy as np\n'), ((647, 657), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (653, 657), True, 'import numpy as np\n')] |
import numpy as np
n = int(input())
t, a = map(int, input().split())
h = list(map(int, input().split()))
h = np.array(h)
dift = abs((t - h * 0.006) - a)
ans = np.argmin(dift) + 1
print(ans)
| [
"numpy.argmin",
"numpy.array"
] | [((115, 126), 'numpy.array', 'np.array', (['h'], {}), '(h)\n', (123, 126), True, 'import numpy as np\n'), ((171, 186), 'numpy.argmin', 'np.argmin', (['dift'], {}), '(dift)\n', (180, 186), True, 'import numpy as np\n')] |
import torch
from timm.models import vision_transformer
from PIL import Image
from torch import nn
import os
import tarfile
import numpy as np
import random
import io
import torch
from torchvision import transforms
STRIDE = 1
EXTRACTION_FPS = 25
NUM_FRAMES = 4
def _sample_video_idx(vlen):
frame_stride = STRIDE * EXTRACTION_FPS
target_frames = min((vlen // frame_stride) + 1, NUM_FRAMES)
sample_range = (target_frames - 1) * frame_stride
possible_start_idx = range(0, vlen - sample_range + 1)
if len(possible_start_idx) == 0:
print(vlen, sample_range)
start_idx = random.choice(possible_start_idx)
# center crop
# start_idx = possible_start_idx[len(possible_start_idx) // 2]
res = np.linspace(start=start_idx, stop=start_idx + sample_range - 1, num=target_frames,
endpoint=False).astype(int)
return res
### instantiate model
model = vision_transformer.timesformer_base_patch16_224(num_frames=NUM_FRAMES)
model.head = nn.Identity()
model.pre_logits = nn.Identity()
model = model.cuda()
# load pretrained
checkpoint = torch.load(
'/work/maxbain/Libs/Alignment/saved/models/msrvtt_jsfusion_baseline__timesformer_ccep1__distilbert_base_uncased__NormSoftMax/0305_160357/model_best.pth')
model.load_state_dict(checkpoint) # , strict=False)
### get example video.
frame_dir = '/scratch/shared/beegfs/albanie/shared-datasets/MSRVTT/high-quality/frames-25fps-256px/tars/all'
target_video = 'video9991'
tar_fp = os.path.join(frame_dir, target_video + '.tar')
tf = tarfile.open(tar_fp)
contents = tf.getmembers()
target_names = np.array(sorted([x.name for x in contents if x.name.endswith('.jpg')]))
vid_len = len(target_names)
frame_idxs = _sample_video_idx(vid_len)
imgs = []
for fidx in frame_idxs:
if fidx < len(target_names):
fp = target_names[fidx]
image = tf.extractfile(fp)
image = image.read()
image = Image.open(io.BytesIO(image))
image = transforms.ToTensor()(image)
imgs.append(image)
if imgs == []:
imgs = [transforms.ToTensor()(Image.new('RGB', (300, 300), (255, 255, 255)))]
imgs = torch.stack(imgs)
input_res = 224
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
tsfm = transforms.Compose([
transforms.CenterCrop(256),
transforms.Resize(input_res),
normalize
])
imgs = tsfm(imgs)
import pdb; pdb.set_trace() | [
"torchvision.transforms.CenterCrop",
"tarfile.open",
"timm.models.vision_transformer.timesformer_base_patch16_224",
"random.choice",
"PIL.Image.new",
"torch.load",
"torch.stack",
"os.path.join",
"io.BytesIO",
"numpy.linspace",
"torchvision.transforms.Normalize",
"pdb.set_trace",
"torchvision... | [((907, 977), 'timm.models.vision_transformer.timesformer_base_patch16_224', 'vision_transformer.timesformer_base_patch16_224', ([], {'num_frames': 'NUM_FRAMES'}), '(num_frames=NUM_FRAMES)\n', (954, 977), False, 'from timm.models import vision_transformer\n'), ((991, 1004), 'torch.nn.Identity', 'nn.Identity', ([], {}), '()\n', (1002, 1004), False, 'from torch import nn\n'), ((1024, 1037), 'torch.nn.Identity', 'nn.Identity', ([], {}), '()\n', (1035, 1037), False, 'from torch import nn\n'), ((1090, 1264), 'torch.load', 'torch.load', (['"""/work/maxbain/Libs/Alignment/saved/models/msrvtt_jsfusion_baseline__timesformer_ccep1__distilbert_base_uncased__NormSoftMax/0305_160357/model_best.pth"""'], {}), "(\n '/work/maxbain/Libs/Alignment/saved/models/msrvtt_jsfusion_baseline__timesformer_ccep1__distilbert_base_uncased__NormSoftMax/0305_160357/model_best.pth'\n )\n", (1100, 1264), False, 'import torch\n'), ((1482, 1528), 'os.path.join', 'os.path.join', (['frame_dir', "(target_video + '.tar')"], {}), "(frame_dir, target_video + '.tar')\n", (1494, 1528), False, 'import os\n'), ((1535, 1555), 'tarfile.open', 'tarfile.open', (['tar_fp'], {}), '(tar_fp)\n', (1547, 1555), False, 'import tarfile\n'), ((2126, 2143), 'torch.stack', 'torch.stack', (['imgs'], {}), '(imgs)\n', (2137, 2143), False, 'import torch\n'), ((2172, 2247), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (2192, 2247), False, 'from torchvision import transforms\n'), ((2472, 2487), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (2485, 2487), False, 'import pdb\n'), ((599, 632), 'random.choice', 'random.choice', (['possible_start_idx'], {}), '(possible_start_idx)\n', (612, 632), False, 'import random\n'), ((2333, 2359), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(256)'], {}), '(256)\n', (2354, 2359), False, 'from torchvision import transforms\n'), ((2377, 2405), 'torchvision.transforms.Resize', 'transforms.Resize', (['input_res'], {}), '(input_res)\n', (2394, 2405), False, 'from torchvision import transforms\n'), ((728, 831), 'numpy.linspace', 'np.linspace', ([], {'start': 'start_idx', 'stop': '(start_idx + sample_range - 1)', 'num': 'target_frames', 'endpoint': '(False)'}), '(start=start_idx, stop=start_idx + sample_range - 1, num=\n target_frames, endpoint=False)\n', (739, 831), True, 'import numpy as np\n'), ((1929, 1946), 'io.BytesIO', 'io.BytesIO', (['image'], {}), '(image)\n', (1939, 1946), False, 'import io\n'), ((1964, 1985), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1983, 1985), False, 'from torchvision import transforms\n'), ((2048, 2069), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2067, 2069), False, 'from torchvision import transforms\n'), ((2070, 2115), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(300, 300)', '(255, 255, 255)'], {}), "('RGB', (300, 300), (255, 255, 255))\n", (2079, 2115), False, 'from PIL import Image\n')] |
# coding: utf-8
# DMUtils.py
# Dark Matter rate calculator as part of WIMpy_NREFT
#
# Author: <NAME>
# Email: <EMAIL>
# Last updated: 02/03/2018
import numpy as np
from numpy import pi, cos, sin
from scipy.integrate import trapz, cumtrapz, quad
from scipy.interpolate import interp1d
from numpy.random import rand
from scipy.special import erf
import os
#Nuclear structure functions
# import WD, WM, WMP2, WP1, WP2, WS1, WS2, WS1D
import WIMpy.WD as WD
import WIMpy.WM as WM
import WIMpy.WMP2 as WMP2
import WIMpy.WP1 as WP1
import WIMpy.WP2 as WP2
import WIMpy.WS1 as WS1
import WIMpy.WS2 as WS2
import WIMpy.WS1D as WS1D
#Load in the list of nuclear spins and atomic masses
target_list = np.loadtxt(os.path.dirname(os.path.realpath(__file__)) + "/Nuclei.txt", usecols=(0,), dtype=bytes).astype(str)
A_list = np.loadtxt(os.path.dirname(os.path.realpath(__file__)) + "/Nuclei.txt", usecols=(1,))
J_list = np.loadtxt(os.path.dirname(os.path.realpath(__file__)) + "/Nuclei.txt", usecols=(2,))
Jvals = dict(zip(target_list, J_list))
Avals = dict(zip(target_list, A_list))
#----------------------------------------------------
#---- Velocity Integrals (and helper functions) -----
#----------------------------------------------------
rho0 = 0.3 #GeV/cm^3
#---------------------------------------------------------
# Velocity integral eta
def calcEta(vmin, vlag=230.0, sigmav=156.0,vesc=544.0):
aplus = np.minimum((vmin+vlag), vmin*0.0 + vesc)/(np.sqrt(2)*sigmav)
aminus = np.minimum((vmin-vlag), vmin*0.0 + vesc)/(np.sqrt(2)*sigmav)
aesc = vesc/(np.sqrt(2)*sigmav)
vel_integral = 0
N = 1.0/(erf(aesc) - np.sqrt(2.0/np.pi)*(vesc/sigmav)*np.exp(-0.5*(vesc/sigmav)**2))
vel_integral = (0.5/vlag)*(erf(aplus) - erf(aminus))
vel_integral -= (1.0/(np.sqrt(np.pi)*vlag))*(aplus - aminus)*np.exp(-0.5*(vesc/sigmav)**2)
vel_integral = np.clip(vel_integral, 0, 1e30)
return N*vel_integral
#---------------------------------------------------------
# Modified velocity integral
def calcMEta(vmin, vlag=230.0, sigmav=156.0,vesc=544.0):
v0 = np.sqrt(2.0)*sigmav
amin = vmin/v0
aplus = np.minimum((vmin+vlag), vmin*0.0 + vesc)/v0
aminus = np.minimum((vmin-vlag), vmin*0.0 + vesc)/v0
aesc = vesc/v0
aE = vlag/v0
A = v0*((aminus/(2*np.sqrt(pi)*aE) + pi**-0.5)*np.exp(-aminus**2) - (aplus/(2*np.sqrt(pi)*aE) - pi**-0.5)*np.exp(-aplus**2))
B = (v0/(4.0*aE))*(1+2.0*aE**2)*(erf(aplus) - erf(aminus))
C = -(v0*pi**-0.5)*(2 + (1/(3.0*aE))*((amin + aesc - aminus)**3 - (amin + aesc - aplus)**3))*np.exp(-aesc**2)
return np.clip(A+B+C, 0, 1e10)/((3e5**2))
#-----------------------------------------------------------
# Minimum velocity
def vmin(E, A, m_x):
m_A = A*0.9315
mu = (m_A*m_x)/(m_A+m_x)
v = 3e5*np.sqrt((E/1e6)*(m_A)/(2*mu*mu))
return v
#-----------------------------------------------------------
# Reduced mass - input A as nucleon number and m_x in GeV
def reduced_m(A, m_x):
m_A = 0.9315*A
return (m_A * m_x)/(m_A + m_x)
# A helper function for calculating the prefactors to dRdE
def rate_prefactor(m_x):
#mu = 1.78e-27*reduced_m(1.0, m_x)
#return 1.38413e-12*rho0/(m_x*mu*mu)
mu = reduced_m(1.0, m_x)
return 4.34e41*rho0/(2.0*m_x*mu*mu)
#0.197 GeV = 1e13/cm
# -> GeV^-1 = 1.97e-14 cm
def coupling_to_xsec(c, m_x):
return (1.97e-14)**2*c**2*(reduced_m(1.0, m_x)**2)/np.pi
#----------------------------------------------------
#-------------------- Form Factors ------------------
#----------------------------------------------------
#-----------------------------------------------------------
# Standard Helm Form Factor for SI scattering
def calcSIFormFactor(E, m_N, old=False):
#Define conversion factor from amu-->keV
amu = 931.5*1e3
#Convert recoil energy to momentum transfer q in keV
q1 = np.sqrt(2*m_N*amu*E)
#Convert q into fm^-1
q2 = q1*(1e-12/1.97e-7)
#Calculate nuclear parameters
s = 0.9
a = 0.52
c = 1.23*(m_N**(1.0/3.0)) - 0.60
R1 = np.sqrt(c*c + 7*pi*pi*a*a/3.0 - 5*s*s)
if (old):
R1 = np.sqrt((1.2**2)*m_N**(2.0/3.0) - 5)
x = q2*R1
J1 = np.sin(x)/x**2 - np.cos(x)/x
F = 3*J1/x
formfactor = (F**2)*(np.exp(-(q2*s)**2))
#formfactor[E < 1e-3] = 1.0
return formfactor
#----------------------------------------------
#-------- RECOIL RATES ------------------------
#----------------------------------------------
#--------------------------------------------------------
# Standard Spin-Independent recoil rate
# for a particle with (N_p,N_n) protons and neutrons
def dRdE_standard(E, N_p, N_n, m_x, sig, vlag=232.0, sigmav=156.0, vesc=544.0):
A = N_p + N_n
#print A
int_factor = sig*calcSIFormFactor(E, A)*(A**2)
return rate_prefactor(m_x)*int_factor*calcEta(vmin(E, A, m_x), vlag, sigmav, vesc)
#--------------------------------------------------------
# Total number of events for standard SI DM
def Nevents_standard(E_min, E_max, N_p, N_n, m_x, sig, eff=None,vlag=232.0, sigmav=156.0, vesc=544.0):
if (eff == None):
integ = lambda x: dRdE_standard(x, N_p, N_n, m_x, sig)
print(" No efficiency!")
else:
integ = lambda x: eff(x)*dRdE_standard(x, N_p, N_n, m_x, sig)
return quad(integ, E_min, E_max)[0]
#--------------------------------------------------------
# Differential recoil rate in NREFT framework
# Calculates the contribution from the interference of operators
# i and j (with couplings cp and cn to protons and neutrons)
def dRdE_NREFT(E, m_x, cp, cn, target, vlag=232.0, sigmav=156.0, vesc=544.0):
A = Avals[target]
eta = calcEta(vmin(E, A, m_x),vlag=vlag, sigmav=sigmav, vesc=vesc)
meta = calcMEta(vmin(E, A, m_x),vlag=vlag, sigmav=sigmav, vesc=vesc)
amu = 931.5e3 # keV
q1 = np.sqrt(2*A*amu*E)
#Recoil momentum over nucleon mass
qr = q1/amu
# Required for form factors
q2 = q1*(1e-12/1.97e-7)
b = np.sqrt(41.467/(45*A**(-1.0/3.0) - 25*A**(-2.0/3.0)))
y = (q2*b/2)**2
#Dark matter spin factor
jx = 0.5
jfac = jx*(jx+1.0)
rate = E*0.0
c_sum = [cp[i] + cn[i] for i in range(11)]
c_diff = [cp[i] - cn[i] for i in range(11)]
c = [c_sum, c_diff]
for tau1 in [0,1]:
for tau2 in [0,1]:
c1 = c[tau1]
c2 = c[tau2]
R_M = c1[0]*c2[0]*eta + jfac/3.0*(qr**2*meta*c1[4]*c2[4] \
+ meta*c1[7]*c2[7] + qr**2*eta*c1[10]*c2[10])
rate += R_M*np.vectorize(WM.calcwm)(tau1, tau2, y, target)
R_P2 = 0.25*qr**2*c1[2]*c2[2]*eta
rate += R_P2*np.vectorize(WP2.calcwp2)(tau1, tau2, y, target)
#Watch out, this one is the wrong way round...
R_P2M = eta*c1[2]*c2[0]
rate += R_P2M*np.vectorize(WMP2.calcwmp2)(tau1, tau2, y, target)
R_S2 = eta*c1[9]*c2[9]*0.25*qr**2 + eta*jfac/12.0*(c1[3]*c2[3] + \
qr**2*(c1[3]*c2[5] + c1[5]*c2[3]) + qr**4*c1[5]*c2[5])
rate += R_S2*np.vectorize(WS2.calcws2)(tau1, tau2, y, target)
R_S1 = (1.0/8.0)*meta*(qr**2*c1[2]*c2[2] + c1[6]*c2[6]) +\
jfac/12.0*eta*(c1[3]*c2[3] + qr**2*c1[8]*c2[8])
rate += R_S1*np.vectorize(WS1.calcws1)(tau1, tau2, y, target)
R_D = jfac/3.0*eta*(qr**2*c1[4]*c2[4] + c1[7]*c2[7])
rate += R_D*np.vectorize(WD.calcwd)(tau1, tau2, y, target)
#This one might be flipped too
R_S1D = jfac/3.0*eta*(c1[4]*c2[3] - c1[7]*c2[8])
rate += R_S1D*np.vectorize(WS1D.calcws1d)(tau1, tau2, y, target)
conv = (rho0/2./np.pi/m_x)*1.69612985e14 # 1 GeV^-4 * cm^-3 * km^-1 * s * c^6 * hbar^2 to keV^-1 kg^-1 day^-1
rate = np.clip(rate, 0, 1e30)
return (4*np.pi/(2*Jvals[target]+1))*rate*conv
def dRdE_anapole(E, m_x, c_A, target, vlag=232.0, sigmav=156.0, vesc=544.0):
"""Return recoil rate for anapole Dark Matter.
Parameters
----------
* `E` [array]:
Recoil energies.
* `m_x` [float]:
Dark Matter mass in GeV.
* `c_A` [float]:
Dark Matter anapole moment (in GeV^-2).
* `target` [string]:
Recoil target.
* `vlag` [float] (optional):
Average lag speed of the lab in km/s. Default is 232.
* `sigmav` [float] (optional):
Velocity dispersion of the DM halo in km/s. Default is 156.
* `vesc` [float] (optional):
Escape speed in the Galactic frame in km/s. Default is 544.
Returns
-------
* `rate` [array like]:
Recoil rate in units of events/keV/kg/day.
"""
#See https://arxiv.org/pdf/1401.4508.pdf
alpha = 0.007297
e = np.sqrt(4*np.pi*alpha)
cn = np.zeros(11)
cp = np.zeros(11)
#Operator 8
cp[7] = -2.0*e*c_A
#Operator 9
cp[8] = -2.0*e*c_A
return dRdE_NREFT(E, m_x, cp, cn, target, vlag, sigmav, vesc)
def dRdE_magnetic(E, m_x, mu_x, target, vlag=232.0, sigmav=156.0, vesc=544.0):
"""Return recoil rate for magnetic dipole Dark Matter.
Parameters
----------
* `E` [array]:
Recoil energies.
* `m_x` [float]:
Dark Matter mass in GeV.
* `mu_x` [float]:
Dark Matter magnetic dipole (in units of the Bohr Magneton).
* `target` [string]:
Recoil target.
* `vlag` [float] (optional):
Average lag speed of the lab in km/s. Default is 232.
* `sigmav` [float] (optional):
Velocity dispersion of the DM halo in km/s. Default is 156.
* `vesc` [float] (optional):
Escape speed in the Galactic frame in km/s. Default is 544.
Returns
-------
* `rate` [array like]:
Recoil rate in units of events/keV/kg/day.
"""
A = Avals[target]
#See Eq. 62 of https://arxiv.org/pdf/1307.5955.pdf, but note
#that we're using some different normalisations for the operators
#so there are some extra factors of m_x and m_p lurking around...
amu = 931.5e3 # keV
q1 = np.sqrt(2*A*amu*E) #Recoil momentum in keV
alpha = 0.007297
e = np.sqrt(4*np.pi*alpha)
m_p = 0.9315
#Proton and neutron g-factors
gp = 5.59
gn = -3.83
#Bohr Magneton
#Tesla = 194.6*eV**2 # Tesla in natural units (with e = sqrt(4 pi alpha))
#muB = 5.7883818e-5*eV/Tesla # Bohr magneton
mu_B = 297.45 #GeV^-1 (in natural units (with e = sqrt(4 pi alpha)))
cp = [E*0.0 for i in range(11)]
cn = [E*0.0 for i in range(11)]
#Operator 1
cp[0] = e*(mu_x*mu_B)/(2.0*m_x)
#Operator 5
cp[4] = 2*e*(mu_x*mu_B)*m_p/(q1*1e-6)**2
#Operator 4
cp[3] = gp*e*(mu_x*mu_B)/m_p
cn[3] = gn*e*(mu_x*mu_B)/m_p
#Operator 6
cp[5] = -gp*e*(mu_x*mu_B)*m_p/(q1*1e-6)**2
cn[5] = -gn*e*(mu_x*mu_B)*m_p/(q1*1e-6)**2
return dRdE_NREFT(E, m_x, cp, cn, target, vlag, sigmav, vesc)
def dRdE_millicharge(E, m_x, epsilon, target, vlag=232.0, sigmav=156.0, vesc=544.0):
"""Return recoil rate for millicharged Dark Matter.
Parameters
----------
* `E` [array]:
Recoil energies.
* `m_x` [float]:
Dark Matter mass in GeV.
* `epsilon` [float]:
Dark Matter charge (in units of the electron charge).
* `target` [string]:
Recoil target.
* `vlag` [float] (optional):
Average lag speed of the lab in km/s. Default is 232.
* `sigmav` [float] (optional):
Velocity dispersion of the DM halo in km/s. Default is 156.
* `vesc` [float] (optional):
Escape speed in the Galactic frame in km/s. Default is 544.
Returns
-------
* `rate` [array like]:
Recoil rate in units of events/keV/kg/day.
"""
A = Avals[target]
eta = calcEta(vmin(E, A, m_x),vlag=vlag, sigmav=sigmav, vesc=vesc)
amu = 931.5e3 # keV
q1 = np.sqrt(2*A*amu*E)
#Recoil momentum over nucleon mass
qr = q1/amu
# Required for form factors
q2 = q1*(1e-12/1.97e-7)
b = np.sqrt(41.467/(45*A**(-1.0/3.0) - 25*A**(-2.0/3.0)))
y = (q2*b/2)**2
rate = E*0.0
#Calculate the coupling to protons
alpha = 0.007297
e = np.sqrt(4*np.pi*alpha)
m_p = 0.9315
cn = 0
cp = epsilon*e**2
c = [cp + cn, cp - cn]
for tau1 in [0,1]:
for tau2 in [0,1]:
c1 = c[tau1]
c2 = c[tau2]
R_M = c1*c2*eta/(q1*1e-6)**4
rate += R_M*np.vectorize(WM.calcwm)(tau1, tau2, y, target)
conv = (rho0/2./np.pi/m_x)*1.69612985e14 # 1 GeV^-4 * cm^-3 * km^-1 * s * c^6 * hbar^2 to keV^-1 kg^-1 day^-1
rate = np.clip(rate, 0, 1e30)
return (4*np.pi/(2*Jvals[target]+1))*rate*conv
#--------------------------------------------------------
# Number of events in NREFT
# See also dRdE_NREFT for more details
# Optionally, you can pass a function 'eff' defining the detector efficiency
def Nevents_NREFT(E_min, E_max, m_x, cp, cn, target, eff = None,vlag=232.0, sigmav=156.0, vesc=544.0):
if (eff == None):
eff = lambda x: 1
integ = lambda x: eff(x)*dRdE_NREFT(x, m_x, cp, cn, target, vlag, sigmav, vesc)
return quad(integ, E_min, E_max)[0]
#---------------------------------------------------------
#Code for the long range interactions (which we're not using...)
"""
#Long-range interactions
elif (i == 101):
rate = (qr**-4)*eta*FF_M(y)
elif (i == 104):
#rate = (qr**-4)*(1.0/16.0)*eta*FF_SD(E)
rate = 0 #ZERO BY DEFINITION!
elif (i == 105):
A = meta*FF_M(y)
B = eta*(qr**2)*FF_Delta(y)
rate = 0.25*(qr**-2.0)*(A+B)
elif (i == 106):
rate = (1.0/16.0)*eta*FF_Sigma2(y)
elif (i == 111):
rate = 0.25*eta*(qr**-2)*FF_M(y)
#Interference terms
else:
if ((i == 1 and j == 3) or (i == 3 and j == 1)):
rate = (1.0/2.0)*(qr**2)*eta*FF_MPhi2(y)
elif ((i == 4 and j == 5) or (i == 5 and j == 4)):
rate = -(1.0/8.0)*(qr**2)*eta*FF_Sigma1Delta(y)
elif ((i == 4 and j == 6) or (i == 6 and j == 4)):
rate = (1.0/16.0)*(qr**2)*eta*FF_Sigma2(y)
elif ((i == 8 and j == 9) or (i == 9 and j ==8)):
rate = (1.0/8.0)*(qr**2)*eta*FF_Sigma1Delta(y)
elif ((i == 104 and j == 105) or (i == 105 and j == 104)):
rate = -(1.0/8.0)*eta*FF_Sigma1Delta(y)
elif ((i == 104) and (j == 106) or (i == 106 and j == 104)):
rate = (1.0/16.0)*eta*FF_Sigma2(y)
"""
| [
"numpy.clip",
"numpy.sqrt",
"numpy.minimum",
"scipy.integrate.quad",
"numpy.exp",
"os.path.realpath",
"numpy.zeros",
"scipy.special.erf",
"numpy.cos",
"numpy.sin",
"numpy.vectorize"
] | [((1890, 1921), 'numpy.clip', 'np.clip', (['vel_integral', '(0)', '(1e+30)'], {}), '(vel_integral, 0, 1e+30)\n', (1897, 1921), True, 'import numpy as np\n'), ((3912, 3938), 'numpy.sqrt', 'np.sqrt', (['(2 * m_N * amu * E)'], {}), '(2 * m_N * amu * E)\n', (3919, 3938), True, 'import numpy as np\n'), ((4098, 4152), 'numpy.sqrt', 'np.sqrt', (['(c * c + 7 * pi * pi * a * a / 3.0 - 5 * s * s)'], {}), '(c * c + 7 * pi * pi * a * a / 3.0 - 5 * s * s)\n', (4105, 4152), True, 'import numpy as np\n'), ((5905, 5929), 'numpy.sqrt', 'np.sqrt', (['(2 * A * amu * E)'], {}), '(2 * A * amu * E)\n', (5912, 5929), True, 'import numpy as np\n'), ((6053, 6120), 'numpy.sqrt', 'np.sqrt', (['(41.467 / (45 * A ** (-1.0 / 3.0) - 25 * A ** (-2.0 / 3.0)))'], {}), '(41.467 / (45 * A ** (-1.0 / 3.0) - 25 * A ** (-2.0 / 3.0)))\n', (6060, 6120), True, 'import numpy as np\n'), ((7898, 7921), 'numpy.clip', 'np.clip', (['rate', '(0)', '(1e+30)'], {}), '(rate, 0, 1e+30)\n', (7905, 7921), True, 'import numpy as np\n'), ((8824, 8850), 'numpy.sqrt', 'np.sqrt', (['(4 * np.pi * alpha)'], {}), '(4 * np.pi * alpha)\n', (8831, 8850), True, 'import numpy as np\n'), ((8861, 8873), 'numpy.zeros', 'np.zeros', (['(11)'], {}), '(11)\n', (8869, 8873), True, 'import numpy as np\n'), ((8883, 8895), 'numpy.zeros', 'np.zeros', (['(11)'], {}), '(11)\n', (8891, 8895), True, 'import numpy as np\n'), ((10134, 10158), 'numpy.sqrt', 'np.sqrt', (['(2 * A * amu * E)'], {}), '(2 * A * amu * E)\n', (10141, 10158), True, 'import numpy as np\n'), ((10211, 10237), 'numpy.sqrt', 'np.sqrt', (['(4 * np.pi * alpha)'], {}), '(4 * np.pi * alpha)\n', (10218, 10237), True, 'import numpy as np\n'), ((11959, 11983), 'numpy.sqrt', 'np.sqrt', (['(2 * A * amu * E)'], {}), '(2 * A * amu * E)\n', (11966, 11983), True, 'import numpy as np\n'), ((12107, 12174), 'numpy.sqrt', 'np.sqrt', (['(41.467 / (45 * A ** (-1.0 / 3.0) - 25 * A ** (-2.0 / 3.0)))'], {}), '(41.467 / (45 * A ** (-1.0 / 3.0) - 25 * A ** (-2.0 / 3.0)))\n', (12114, 12174), True, 'import numpy as np\n'), ((12276, 12302), 'numpy.sqrt', 'np.sqrt', (['(4 * np.pi * alpha)'], {}), '(4 * np.pi * alpha)\n', (12283, 12302), True, 'import numpy as np\n'), ((12752, 12775), 'numpy.clip', 'np.clip', (['rate', '(0)', '(1e+30)'], {}), '(rate, 0, 1e+30)\n', (12759, 12775), True, 'import numpy as np\n'), ((1418, 1460), 'numpy.minimum', 'np.minimum', (['(vmin + vlag)', '(vmin * 0.0 + vesc)'], {}), '(vmin + vlag, vmin * 0.0 + vesc)\n', (1428, 1460), True, 'import numpy as np\n'), ((1492, 1534), 'numpy.minimum', 'np.minimum', (['(vmin - vlag)', '(vmin * 0.0 + vesc)'], {}), '(vmin - vlag, vmin * 0.0 + vesc)\n', (1502, 1534), True, 'import numpy as np\n'), ((1836, 1871), 'numpy.exp', 'np.exp', (['(-0.5 * (vesc / sigmav) ** 2)'], {}), '(-0.5 * (vesc / sigmav) ** 2)\n', (1842, 1871), True, 'import numpy as np\n'), ((2107, 2119), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (2114, 2119), True, 'import numpy as np\n'), ((2158, 2200), 'numpy.minimum', 'np.minimum', (['(vmin + vlag)', '(vmin * 0.0 + vesc)'], {}), '(vmin + vlag, vmin * 0.0 + vesc)\n', (2168, 2200), True, 'import numpy as np\n'), ((2215, 2257), 'numpy.minimum', 'np.minimum', (['(vmin - vlag)', '(vmin * 0.0 + vesc)'], {}), '(vmin - vlag, vmin * 0.0 + vesc)\n', (2225, 2257), True, 'import numpy as np\n'), ((2592, 2610), 'numpy.exp', 'np.exp', (['(-aesc ** 2)'], {}), '(-aesc ** 2)\n', (2598, 2610), True, 'import numpy as np\n'), ((2625, 2661), 'numpy.clip', 'np.clip', (['(A + B + C)', '(0)', '(10000000000.0)'], {}), '(A + B + C, 0, 10000000000.0)\n', (2632, 2661), True, 'import numpy as np\n'), ((2824, 2868), 'numpy.sqrt', 'np.sqrt', (['(E / 1000000.0 * m_A / (2 * mu * mu))'], {}), '(E / 1000000.0 * m_A / (2 * mu * mu))\n', (2831, 2868), True, 'import numpy as np\n'), ((4169, 4211), 'numpy.sqrt', 'np.sqrt', (['(1.2 ** 2 * m_N ** (2.0 / 3.0) - 5)'], {}), '(1.2 ** 2 * m_N ** (2.0 / 3.0) - 5)\n', (4176, 4211), True, 'import numpy as np\n'), ((4305, 4327), 'numpy.exp', 'np.exp', (['(-(q2 * s) ** 2)'], {}), '(-(q2 * s) ** 2)\n', (4311, 4327), True, 'import numpy as np\n'), ((5357, 5382), 'scipy.integrate.quad', 'quad', (['integ', 'E_min', 'E_max'], {}), '(integ, E_min, E_max)\n', (5361, 5382), False, 'from scipy.integrate import trapz, cumtrapz, quad\n'), ((13284, 13309), 'scipy.integrate.quad', 'quad', (['integ', 'E_min', 'E_max'], {}), '(integ, E_min, E_max)\n', (13288, 13309), False, 'from scipy.integrate import trapz, cumtrapz, quad\n'), ((842, 868), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (858, 868), False, 'import os\n'), ((937, 963), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (953, 963), False, 'import os\n'), ((1460, 1470), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1467, 1470), True, 'import numpy as np\n'), ((1534, 1544), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1541, 1544), True, 'import numpy as np\n'), ((1570, 1580), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1577, 1580), True, 'import numpy as np\n'), ((1633, 1642), 'scipy.special.erf', 'erf', (['aesc'], {}), '(aesc)\n', (1636, 1642), False, 'from scipy.special import erf\n'), ((1745, 1755), 'scipy.special.erf', 'erf', (['aplus'], {}), '(aplus)\n', (1748, 1755), False, 'from scipy.special import erf\n'), ((1758, 1769), 'scipy.special.erf', 'erf', (['aminus'], {}), '(aminus)\n', (1761, 1769), False, 'from scipy.special import erf\n'), ((2469, 2479), 'scipy.special.erf', 'erf', (['aplus'], {}), '(aplus)\n', (2472, 2479), False, 'from scipy.special import erf\n'), ((2482, 2493), 'scipy.special.erf', 'erf', (['aminus'], {}), '(aminus)\n', (2485, 2493), False, 'from scipy.special import erf\n'), ((4231, 4240), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (4237, 4240), True, 'import numpy as np\n'), ((4248, 4257), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (4254, 4257), True, 'import numpy as np\n'), ((1678, 1713), 'numpy.exp', 'np.exp', (['(-0.5 * (vesc / sigmav) ** 2)'], {}), '(-0.5 * (vesc / sigmav) ** 2)\n', (1684, 1713), True, 'import numpy as np\n'), ((2351, 2371), 'numpy.exp', 'np.exp', (['(-aminus ** 2)'], {}), '(-aminus ** 2)\n', (2357, 2371), True, 'import numpy as np\n'), ((2410, 2429), 'numpy.exp', 'np.exp', (['(-aplus ** 2)'], {}), '(-aplus ** 2)\n', (2416, 2429), True, 'import numpy as np\n'), ((722, 748), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (738, 748), False, 'import os\n'), ((1645, 1665), 'numpy.sqrt', 'np.sqrt', (['(2.0 / np.pi)'], {}), '(2.0 / np.pi)\n', (1652, 1665), True, 'import numpy as np\n'), ((1797, 1811), 'numpy.sqrt', 'np.sqrt', (['np.pi'], {}), '(np.pi)\n', (1804, 1811), True, 'import numpy as np\n'), ((6636, 6659), 'numpy.vectorize', 'np.vectorize', (['WM.calcwm'], {}), '(WM.calcwm)\n', (6648, 6659), True, 'import numpy as np\n'), ((6759, 6784), 'numpy.vectorize', 'np.vectorize', (['WP2.calcwp2'], {}), '(WP2.calcwp2)\n', (6771, 6784), True, 'import numpy as np\n'), ((6934, 6961), 'numpy.vectorize', 'np.vectorize', (['WMP2.calcwmp2'], {}), '(WMP2.calcwmp2)\n', (6946, 6961), True, 'import numpy as np\n'), ((7173, 7198), 'numpy.vectorize', 'np.vectorize', (['WS2.calcws2'], {}), '(WS2.calcws2)\n', (7185, 7198), True, 'import numpy as np\n'), ((7395, 7420), 'numpy.vectorize', 'np.vectorize', (['WS1.calcws1'], {}), '(WS1.calcws1)\n', (7407, 7420), True, 'import numpy as np\n'), ((7538, 7561), 'numpy.vectorize', 'np.vectorize', (['WD.calcwd'], {}), '(WD.calcwd)\n', (7550, 7561), True, 'import numpy as np\n'), ((7720, 7747), 'numpy.vectorize', 'np.vectorize', (['WS1D.calcws1d'], {}), '(WS1D.calcws1d)\n', (7732, 7747), True, 'import numpy as np\n'), ((12574, 12597), 'numpy.vectorize', 'np.vectorize', (['WM.calcwm'], {}), '(WM.calcwm)\n', (12586, 12597), True, 'import numpy as np\n'), ((2323, 2334), 'numpy.sqrt', 'np.sqrt', (['pi'], {}), '(pi)\n', (2330, 2334), True, 'import numpy as np\n'), ((2382, 2393), 'numpy.sqrt', 'np.sqrt', (['pi'], {}), '(pi)\n', (2389, 2393), True, 'import numpy as np\n')] |
import numpy as np
from cgn import LinearConstraint, Parameter
from cgn.translator.get_sub_matrix import get_sub_matrix
def test_get_sub_matrix():
n1 = 13
n2 = 1
n3 = 3
c = 10
x1 = Parameter(start=np.zeros(n1), name="x1")
x2 = Parameter(start=np.zeros(n2), name="x2")
x3 = Parameter(start=np.zeros(n3), name="x3")
a1 = np.random.randn(c, n1)
a2 = np.random.randn(c, n2)
a3 = np.random.randn(c, n3)
b = np.random.randn(c)
a = np.concatenate([a1, a2, a3], axis=1)
constraint = LinearConstraint(parameters=[x1, x2, x3], a=a, b=b, ctype="ineq")
a1t = get_sub_matrix(a, constraint, 0)
a2t = get_sub_matrix(a, constraint, 1)
a3t = get_sub_matrix(a, constraint, 2)
for a, at in zip([a1, a2, a3], [a1t, a2t, a3t]):
assert np.isclose(a, at).all()
def test_get_sub_matrix2():
# Test that get_sub_matrix also works for single parameter
n = 10
c = 5
x = Parameter(start=np.zeros(n), name="x")
a = np.random.randn(c, n)
b = np.random.randn(c)
constraint = LinearConstraint(parameters=[x], a=a, b=b, ctype="eq")
at = get_sub_matrix(a, constraint, 0)
assert np.isclose(a, at).all()
| [
"cgn.LinearConstraint",
"numpy.isclose",
"cgn.translator.get_sub_matrix.get_sub_matrix",
"numpy.zeros",
"numpy.concatenate",
"numpy.random.randn"
] | [((355, 377), 'numpy.random.randn', 'np.random.randn', (['c', 'n1'], {}), '(c, n1)\n', (370, 377), True, 'import numpy as np\n'), ((387, 409), 'numpy.random.randn', 'np.random.randn', (['c', 'n2'], {}), '(c, n2)\n', (402, 409), True, 'import numpy as np\n'), ((419, 441), 'numpy.random.randn', 'np.random.randn', (['c', 'n3'], {}), '(c, n3)\n', (434, 441), True, 'import numpy as np\n'), ((450, 468), 'numpy.random.randn', 'np.random.randn', (['c'], {}), '(c)\n', (465, 468), True, 'import numpy as np\n'), ((477, 513), 'numpy.concatenate', 'np.concatenate', (['[a1, a2, a3]'], {'axis': '(1)'}), '([a1, a2, a3], axis=1)\n', (491, 513), True, 'import numpy as np\n'), ((531, 596), 'cgn.LinearConstraint', 'LinearConstraint', ([], {'parameters': '[x1, x2, x3]', 'a': 'a', 'b': 'b', 'ctype': '"""ineq"""'}), "(parameters=[x1, x2, x3], a=a, b=b, ctype='ineq')\n", (547, 596), False, 'from cgn import LinearConstraint, Parameter\n'), ((607, 639), 'cgn.translator.get_sub_matrix.get_sub_matrix', 'get_sub_matrix', (['a', 'constraint', '(0)'], {}), '(a, constraint, 0)\n', (621, 639), False, 'from cgn.translator.get_sub_matrix import get_sub_matrix\n'), ((650, 682), 'cgn.translator.get_sub_matrix.get_sub_matrix', 'get_sub_matrix', (['a', 'constraint', '(1)'], {}), '(a, constraint, 1)\n', (664, 682), False, 'from cgn.translator.get_sub_matrix import get_sub_matrix\n'), ((693, 725), 'cgn.translator.get_sub_matrix.get_sub_matrix', 'get_sub_matrix', (['a', 'constraint', '(2)'], {}), '(a, constraint, 2)\n', (707, 725), False, 'from cgn.translator.get_sub_matrix import get_sub_matrix\n'), ((986, 1007), 'numpy.random.randn', 'np.random.randn', (['c', 'n'], {}), '(c, n)\n', (1001, 1007), True, 'import numpy as np\n'), ((1016, 1034), 'numpy.random.randn', 'np.random.randn', (['c'], {}), '(c)\n', (1031, 1034), True, 'import numpy as np\n'), ((1052, 1106), 'cgn.LinearConstraint', 'LinearConstraint', ([], {'parameters': '[x]', 'a': 'a', 'b': 'b', 'ctype': '"""eq"""'}), "(parameters=[x], a=a, b=b, ctype='eq')\n", (1068, 1106), False, 'from cgn import LinearConstraint, Parameter\n'), ((1116, 1148), 'cgn.translator.get_sub_matrix.get_sub_matrix', 'get_sub_matrix', (['a', 'constraint', '(0)'], {}), '(a, constraint, 0)\n', (1130, 1148), False, 'from cgn.translator.get_sub_matrix import get_sub_matrix\n'), ((221, 233), 'numpy.zeros', 'np.zeros', (['n1'], {}), '(n1)\n', (229, 233), True, 'import numpy as np\n'), ((271, 283), 'numpy.zeros', 'np.zeros', (['n2'], {}), '(n2)\n', (279, 283), True, 'import numpy as np\n'), ((321, 333), 'numpy.zeros', 'np.zeros', (['n3'], {}), '(n3)\n', (329, 333), True, 'import numpy as np\n'), ((955, 966), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (963, 966), True, 'import numpy as np\n'), ((1160, 1177), 'numpy.isclose', 'np.isclose', (['a', 'at'], {}), '(a, at)\n', (1170, 1177), True, 'import numpy as np\n'), ((794, 811), 'numpy.isclose', 'np.isclose', (['a', 'at'], {}), '(a, at)\n', (804, 811), True, 'import numpy as np\n')] |
from mechanism import Vector, get_joints, Mechanism
import numpy as np
import matplotlib.pyplot as plt
O2, O4, O6, A, B, C, D, E, F, G = get_joints('O2 O4 O6 A B C D E F G')
a = Vector((O4, B), r=2.5)
b = Vector((B, A), r=8.4)
c = Vector((O4, O2), r=12.5, theta=0, style='ground')
d = Vector((O2, A), r=5)
e = Vector((C, A), r=2.4, show=False)
f = Vector((C, E), r=8.9)
g = Vector((O6, E), r=3.2)
h = Vector((O2, O6), r=10.5, theta=np.pi/2, style='ground')
i = Vector((D, E), r=3, show=False)
j = Vector((D, F), r=6.4)
k = Vector((G, F), theta=np.deg2rad(150), style='dotted')
l = Vector((O6, G), r=1.2, theta=np.pi/2, style='dotted')
guess1 = np.concatenate((np.deg2rad([120, 20, 70, 170, 120]), np.array([7])))
guess2 = np.array([15, 15, 30, 12, 30, 3])
guess3 = np.array([10, 10, 30, -30, 20, 10])
def loops(x, inp):
temp = np.zeros((3, 2))
temp[0] = a(inp) + b(x[1]) - d(x[0]) - c()
temp[1] = f(x[2]) - g(x[3]) - h() + d(x[0]) - e(x[1])
temp[2] = j(x[4]) - k(x[5]) - l() + g(x[3]) - i(x[2])
return temp.flatten()
mechanism = Mechanism(vectors=(a, b, c, d, e, f, g, h, i, j, k, l), origin=O4, loops=loops,
pos=np.deg2rad(52.92024014972946), vel=-30, acc=0, guess=(guess1, guess2, guess3))
mechanism.calculate()
mechanism.tables(acceleration=True, velocity=True, position=True)
fig1, ax1 = mechanism.plot(cushion=2, show_joints=True)
fig2, ax2 = mechanism.plot(cushion=2, velocity=True, acceleration=True)
ax2.set_title('Showing Velocity and Acceleration')
O, A, B, C, P = get_joints('O A B C P')
a = Vector((O, A), r=2)
b = Vector((A, B), r=4.1)
c = Vector((C, B), r=3)
d = Vector((O, C), r=4, theta=0, style='ground')
e = Vector((A, P), r=2.5)
f = Vector((O, P), show=False)
def loops(x, i_):
temp = np.zeros((2, 2))
temp[0] = a(i_) + b(x[0]) - c(x[1]) - d()
temp[1] = a(i_) + e(x[0] + np.deg2rad(30)) - f(x[2], x[3])
return temp.flatten()
guess1 = np.concatenate((np.deg2rad([20, 60]), np.array([4]), np.deg2rad([48])))
mechanism = Mechanism(vectors=(a, c, b, d, e, f), origin=O, pos=np.deg2rad(45), guess=(guess1, ),
loops=loops)
mechanism.calculate()
mechanism.tables(position=True)
fig3, ax3 = mechanism.plot()
plt.show()
| [
"numpy.array",
"mechanism.Vector",
"numpy.zeros",
"numpy.deg2rad",
"mechanism.get_joints",
"matplotlib.pyplot.show"
] | [((138, 174), 'mechanism.get_joints', 'get_joints', (['"""O2 O4 O6 A B C D E F G"""'], {}), "('O2 O4 O6 A B C D E F G')\n", (148, 174), False, 'from mechanism import Vector, get_joints, Mechanism\n'), ((179, 201), 'mechanism.Vector', 'Vector', (['(O4, B)'], {'r': '(2.5)'}), '((O4, B), r=2.5)\n', (185, 201), False, 'from mechanism import Vector, get_joints, Mechanism\n'), ((206, 227), 'mechanism.Vector', 'Vector', (['(B, A)'], {'r': '(8.4)'}), '((B, A), r=8.4)\n', (212, 227), False, 'from mechanism import Vector, get_joints, Mechanism\n'), ((232, 281), 'mechanism.Vector', 'Vector', (['(O4, O2)'], {'r': '(12.5)', 'theta': '(0)', 'style': '"""ground"""'}), "((O4, O2), r=12.5, theta=0, style='ground')\n", (238, 281), False, 'from mechanism import Vector, get_joints, Mechanism\n'), ((286, 306), 'mechanism.Vector', 'Vector', (['(O2, A)'], {'r': '(5)'}), '((O2, A), r=5)\n', (292, 306), False, 'from mechanism import Vector, get_joints, Mechanism\n'), ((311, 344), 'mechanism.Vector', 'Vector', (['(C, A)'], {'r': '(2.4)', 'show': '(False)'}), '((C, A), r=2.4, show=False)\n', (317, 344), False, 'from mechanism import Vector, get_joints, Mechanism\n'), ((349, 370), 'mechanism.Vector', 'Vector', (['(C, E)'], {'r': '(8.9)'}), '((C, E), r=8.9)\n', (355, 370), False, 'from mechanism import Vector, get_joints, Mechanism\n'), ((375, 397), 'mechanism.Vector', 'Vector', (['(O6, E)'], {'r': '(3.2)'}), '((O6, E), r=3.2)\n', (381, 397), False, 'from mechanism import Vector, get_joints, Mechanism\n'), ((402, 459), 'mechanism.Vector', 'Vector', (['(O2, O6)'], {'r': '(10.5)', 'theta': '(np.pi / 2)', 'style': '"""ground"""'}), "((O2, O6), r=10.5, theta=np.pi / 2, style='ground')\n", (408, 459), False, 'from mechanism import Vector, get_joints, Mechanism\n'), ((462, 493), 'mechanism.Vector', 'Vector', (['(D, E)'], {'r': '(3)', 'show': '(False)'}), '((D, E), r=3, show=False)\n', (468, 493), False, 'from mechanism import Vector, get_joints, Mechanism\n'), ((498, 519), 'mechanism.Vector', 'Vector', (['(D, F)'], {'r': '(6.4)'}), '((D, F), r=6.4)\n', (504, 519), False, 'from mechanism import Vector, get_joints, Mechanism\n'), ((582, 637), 'mechanism.Vector', 'Vector', (['(O6, G)'], {'r': '(1.2)', 'theta': '(np.pi / 2)', 'style': '"""dotted"""'}), "((O6, G), r=1.2, theta=np.pi / 2, style='dotted')\n", (588, 637), False, 'from mechanism import Vector, get_joints, Mechanism\n'), ((724, 757), 'numpy.array', 'np.array', (['[15, 15, 30, 12, 30, 3]'], {}), '([15, 15, 30, 12, 30, 3])\n', (732, 757), True, 'import numpy as np\n'), ((767, 802), 'numpy.array', 'np.array', (['[10, 10, 30, -30, 20, 10]'], {}), '([10, 10, 30, -30, 20, 10])\n', (775, 802), True, 'import numpy as np\n'), ((1525, 1548), 'mechanism.get_joints', 'get_joints', (['"""O A B C P"""'], {}), "('O A B C P')\n", (1535, 1548), False, 'from mechanism import Vector, get_joints, Mechanism\n'), ((1553, 1572), 'mechanism.Vector', 'Vector', (['(O, A)'], {'r': '(2)'}), '((O, A), r=2)\n', (1559, 1572), False, 'from mechanism import Vector, get_joints, Mechanism\n'), ((1577, 1598), 'mechanism.Vector', 'Vector', (['(A, B)'], {'r': '(4.1)'}), '((A, B), r=4.1)\n', (1583, 1598), False, 'from mechanism import Vector, get_joints, Mechanism\n'), ((1603, 1622), 'mechanism.Vector', 'Vector', (['(C, B)'], {'r': '(3)'}), '((C, B), r=3)\n', (1609, 1622), False, 'from mechanism import Vector, get_joints, Mechanism\n'), ((1627, 1671), 'mechanism.Vector', 'Vector', (['(O, C)'], {'r': '(4)', 'theta': '(0)', 'style': '"""ground"""'}), "((O, C), r=4, theta=0, style='ground')\n", (1633, 1671), False, 'from mechanism import Vector, get_joints, Mechanism\n'), ((1676, 1697), 'mechanism.Vector', 'Vector', (['(A, P)'], {'r': '(2.5)'}), '((A, P), r=2.5)\n', (1682, 1697), False, 'from mechanism import Vector, get_joints, Mechanism\n'), ((1702, 1728), 'mechanism.Vector', 'Vector', (['(O, P)'], {'show': '(False)'}), '((O, P), show=False)\n', (1708, 1728), False, 'from mechanism import Vector, get_joints, Mechanism\n'), ((2212, 2222), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2220, 2222), True, 'import matplotlib.pyplot as plt\n'), ((835, 851), 'numpy.zeros', 'np.zeros', (['(3, 2)'], {}), '((3, 2))\n', (843, 851), True, 'import numpy as np\n'), ((1760, 1776), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (1768, 1776), True, 'import numpy as np\n'), ((545, 560), 'numpy.deg2rad', 'np.deg2rad', (['(150)'], {}), '(150)\n', (555, 560), True, 'import numpy as np\n'), ((662, 697), 'numpy.deg2rad', 'np.deg2rad', (['[120, 20, 70, 170, 120]'], {}), '([120, 20, 70, 170, 120])\n', (672, 697), True, 'import numpy as np\n'), ((699, 712), 'numpy.array', 'np.array', (['[7]'], {}), '([7])\n', (707, 712), True, 'import numpy as np\n'), ((1161, 1190), 'numpy.deg2rad', 'np.deg2rad', (['(52.92024014972946)'], {}), '(52.92024014972946)\n', (1171, 1190), True, 'import numpy as np\n'), ((1939, 1959), 'numpy.deg2rad', 'np.deg2rad', (['[20, 60]'], {}), '([20, 60])\n', (1949, 1959), True, 'import numpy as np\n'), ((1961, 1974), 'numpy.array', 'np.array', (['[4]'], {}), '([4])\n', (1969, 1974), True, 'import numpy as np\n'), ((1976, 1992), 'numpy.deg2rad', 'np.deg2rad', (['[48]'], {}), '([48])\n', (1986, 1992), True, 'import numpy as np\n'), ((2059, 2073), 'numpy.deg2rad', 'np.deg2rad', (['(45)'], {}), '(45)\n', (2069, 2073), True, 'import numpy as np\n'), ((1854, 1868), 'numpy.deg2rad', 'np.deg2rad', (['(30)'], {}), '(30)\n', (1864, 1868), True, 'import numpy as np\n')] |
# coding=utf-8
from sklearn import preprocessing
import numpy as np
import os
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# import tensorflow as tf
from helper import linear_regression as lr # my own module
from helper import general as general
data = pd.read_csv('ex1data1.txt', names=['population', 'profit'])
print(data.head())
# print (df.info())
sns.lmplot('population', 'profit', data, size=10, fit_reg=False)
# plt.show()
X = general.get_X(data)
print(X.shape, type(X))
y = general.get_y(data)
print(y.shape, type(y))
theta = np.zeros(X.shape[1])
print(theta.shape, type(theta))
print (lr.cost(theta, X, y))
epoch = 100000
min_max_scaler = preprocessing.MaxAbsScaler()
X= min_max_scaler.fit_transform(X)
final_theta, cost_data = lr.batch_gradient_decent(theta, X, y, epoch, 0.01)
print (cost_data[-1])
# theta_ne = lr.normal_equations(X, y)
# print (theta_ne)
# print (lr.cost(theta_ne, X, y)) | [
"seaborn.lmplot",
"pandas.read_csv",
"helper.general.get_y",
"numpy.zeros",
"helper.linear_regression.cost",
"helper.linear_regression.batch_gradient_decent",
"sklearn.preprocessing.MaxAbsScaler",
"helper.general.get_X"
] | [((284, 343), 'pandas.read_csv', 'pd.read_csv', (['"""ex1data1.txt"""'], {'names': "['population', 'profit']"}), "('ex1data1.txt', names=['population', 'profit'])\n", (295, 343), True, 'import pandas as pd\n'), ((385, 449), 'seaborn.lmplot', 'sns.lmplot', (['"""population"""', '"""profit"""', 'data'], {'size': '(10)', 'fit_reg': '(False)'}), "('population', 'profit', data, size=10, fit_reg=False)\n", (395, 449), True, 'import seaborn as sns\n'), ((467, 486), 'helper.general.get_X', 'general.get_X', (['data'], {}), '(data)\n', (480, 486), True, 'from helper import general as general\n'), ((516, 535), 'helper.general.get_y', 'general.get_y', (['data'], {}), '(data)\n', (529, 535), True, 'from helper import general as general\n'), ((569, 589), 'numpy.zeros', 'np.zeros', (['X.shape[1]'], {}), '(X.shape[1])\n', (577, 589), True, 'import numpy as np\n'), ((688, 716), 'sklearn.preprocessing.MaxAbsScaler', 'preprocessing.MaxAbsScaler', ([], {}), '()\n', (714, 716), False, 'from sklearn import preprocessing\n'), ((778, 828), 'helper.linear_regression.batch_gradient_decent', 'lr.batch_gradient_decent', (['theta', 'X', 'y', 'epoch', '(0.01)'], {}), '(theta, X, y, epoch, 0.01)\n', (802, 828), True, 'from helper import linear_regression as lr\n'), ((630, 650), 'helper.linear_regression.cost', 'lr.cost', (['theta', 'X', 'y'], {}), '(theta, X, y)\n', (637, 650), True, 'from helper import linear_regression as lr\n')] |
#Simple script which crawls a folder containing several sequence roots
#and loads all background frames in sequence names containing "empty"
#then reports scatterplot graphs of Hue vs. Value, Hue vs. Saturation,
#and Saturation vs. Value
import sys
import numpy as np
from FrameManager import *
from RGBTrainingTFWriter import *
import matplotlib
from glob import glob
import matplotlib.pyplot as plt
def randomRows(A, num_rows):
return A[np.random.choice(A.shape[0], num_rows, replace=False)]
if __name__ == '__main__':
sequenceRootRoot = sys.argv[1]
emptySequenceDirs = [y for x in os.walk(sequenceRootRoot) for y in glob(os.path.join(x[0], '*empty*/'))]
cameraLabels = ["0", "1", "2"]
rgbFrames = []
for sequenceDir in emptySequenceDirs:
for cam in cameraLabels:
fullPath = os.path.join(sequenceDir, cam)
if (os.path.isdir(fullPath)):
frameManager = FrameManager(fullPath)
LIMIT = 20
i = 0
while True:
frame = frameManager.getFrame()
#Great -- take all of the
i += 1
if (i == LIMIT):
rgbFrames.append(frame.getRGB())
break
advanced = frameManager.tryAdvance(1)
if (not advanced):
break
rgbFrames = np.reshape(np.array(rgbFrames), (-1, 3)).astype(np.float32) / 255.0
NUM_TO_PLOT = 5000
rgbFrames = randomRows(rgbFrames, NUM_TO_PLOT)
#Great, now convert the whole thing to HSV
hsvArray = matplotlib.colors.rgb_to_hsv(rgbFrames)
hsvArray *= np.array([[179.0, 255.0, 255.0]], dtype=np.float32)
#Okay, great. Now take the hsv array and generate the requisite plots
hues = hsvArray[:, 0]
sats = hsvArray[:, 1]
values = hsvArray[:, 2]
plt.subplot(131)
plt.scatter(hues, sats)
plt.subplot(132)
plt.scatter(sats, values)
plt.subplot(133)
plt.scatter(hues, values)
plt.show()
| [
"matplotlib.colors.rgb_to_hsv",
"numpy.random.choice",
"numpy.array",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
] | [((1637, 1676), 'matplotlib.colors.rgb_to_hsv', 'matplotlib.colors.rgb_to_hsv', (['rgbFrames'], {}), '(rgbFrames)\n', (1665, 1676), False, 'import matplotlib\n'), ((1693, 1744), 'numpy.array', 'np.array', (['[[179.0, 255.0, 255.0]]'], {'dtype': 'np.float32'}), '([[179.0, 255.0, 255.0]], dtype=np.float32)\n', (1701, 1744), True, 'import numpy as np\n'), ((1910, 1926), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(131)'], {}), '(131)\n', (1921, 1926), True, 'import matplotlib.pyplot as plt\n'), ((1935, 1958), 'matplotlib.pyplot.scatter', 'plt.scatter', (['hues', 'sats'], {}), '(hues, sats)\n', (1946, 1958), True, 'import matplotlib.pyplot as plt\n'), ((1964, 1980), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(132)'], {}), '(132)\n', (1975, 1980), True, 'import matplotlib.pyplot as plt\n'), ((1985, 2010), 'matplotlib.pyplot.scatter', 'plt.scatter', (['sats', 'values'], {}), '(sats, values)\n', (1996, 2010), True, 'import matplotlib.pyplot as plt\n'), ((2016, 2032), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(133)'], {}), '(133)\n', (2027, 2032), True, 'import matplotlib.pyplot as plt\n'), ((2037, 2062), 'matplotlib.pyplot.scatter', 'plt.scatter', (['hues', 'values'], {}), '(hues, values)\n', (2048, 2062), True, 'import matplotlib.pyplot as plt\n'), ((2067, 2077), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2075, 2077), True, 'import matplotlib.pyplot as plt\n'), ((445, 498), 'numpy.random.choice', 'np.random.choice', (['A.shape[0]', 'num_rows'], {'replace': '(False)'}), '(A.shape[0], num_rows, replace=False)\n', (461, 498), True, 'import numpy as np\n'), ((1437, 1456), 'numpy.array', 'np.array', (['rgbFrames'], {}), '(rgbFrames)\n', (1445, 1456), True, 'import numpy as np\n')] |
import numpy as np
from skimage import util, exposure, io, color
# from matplotlib import pyplot as plt
import cv2
def view_histogram_bw(image):
"""
Args: View the histogram of a black and white image
image: Float array of the image
Returns: Hist and its bin array
"""
hist, bins = np.histogram(image.ravel(), 256, [0, 256])
# Convert to a list of ints
hist = [int(i) for i in hist]
bins = [int(i) for i in bins]
return [hist], bins
def view_color_histogram(image):
"""
Args: View the histogram of a color image
image: Float array of the image
Returns: Hist and its bin array
"""
hist_all = []
for i in range(image.shape[2]):
hist, bins = np.histogram(image[:, :, i].ravel(), 256, [0, 256])
# Convert to a list of ints
hist = [int(i) for i in hist]
bins = [int(i) for i in bins]
# Append to collecg all channels
hist_all.append(hist)
return hist_all, bins
def histogram_eq(image):
"""
Args: Histogram equalization is a method in image processing of contrast
adjustment using the image's histogram.
image: float array for the image
Returns: Histogram equalize version of the image
"""
if len(image.shape) == 2:
img_bw = cv2.equalizeHist(image)
img_eq = color.gray2rgb(img_bw)
else:
img_y_cr_cb = cv2.cvtColor(image, cv2.COLOR_BGR2YCrCb)
y, cr, cb = cv2.split(img_y_cr_cb)
# Applying equalize Hist operation on Y channel.
y_eq = cv2.equalizeHist(y)
img_y_cr_cb_eq = cv2.merge((y_eq, cr, cb))
img_eq = cv2.cvtColor(img_y_cr_cb_eq, cv2.COLOR_YCR_CB2BGR)
return img_eq
def contrast_stretching(image):
"""
Args: Contrast stretching (often called normalization) is a simple image
enhancement technique that attempts to improve the contrast in an image
by `stretching' the range of intensity values it contains to span a
desired range of values
image:float array for the image
Returns: Contrast adjusted version of the image
"""
if len(image.shape) == 2:
image = color.gray2rgb(image)
p2, p98 = np.percentile(image, (2, 98))
img_rescale = exposure.rescale_intensity(image, in_range=(p2, p98))
return img_rescale
def log_compression(image):
"""
Args:The logarithmic operator is a simple point processor where the mapping
function is a logarithmic curve.In other words, each pixel value is
replaced with its logarithm. Most implementations take either the natural
logarithm or the base 10 logarithm. However, the basis does not influence
the shape of the logarithmic curve, only the scale of the output values
which are scaled for display on an 8-bit system. Hence, the basis does not
influence the degree of compression of the dynamic range.
image: float array for the image
Returns: Log compressed version of the image
"""
if len(image.shape) == 2:
image = color.gray2rgb(image)
logarithmic_corrected = exposure.adjust_log(image, 1)
return logarithmic_corrected
def reverse_video(image):
"""
Args: Invert the image, Minimum to Maximum intensity on each pixel of the
image
image: float array for the image
Returns: reverse/invert version of the image
"""
if len(image.shape) == 2:
image = color.gray2rgb(image)
inverted_img = util.invert(image)
return inverted_img
def gamma_correction(image):
"""
Args:Gamma correction, or often simply gamma, is a nonlinear operation used
to encode and decode luminance or tristimulus values in video or still
image systems. Gamma correction is, in the simplest cases, defined by the
power-law expression.
image: float array for the image
Returns: gamma corrected version of the image
"""
if len(image.shape) == 2:
image = color.gray2rgb(image)
gamma_corrected = exposure.adjust_gamma(image, 2)
return gamma_corrected
if __name__ == '__main__':
filename = "../TestImages/circles.png"
img = io.imread(filename)
img = color.gray2rgb(img)
img2 = histogram_eq(img)
# plt.imshow(img2)
# plt.show()
| [
"cv2.merge",
"skimage.util.invert",
"skimage.exposure.adjust_log",
"skimage.exposure.adjust_gamma",
"skimage.io.imread",
"cv2.equalizeHist",
"skimage.exposure.rescale_intensity",
"cv2.cvtColor",
"cv2.split",
"numpy.percentile",
"skimage.color.gray2rgb"
] | [((2188, 2217), 'numpy.percentile', 'np.percentile', (['image', '(2, 98)'], {}), '(image, (2, 98))\n', (2201, 2217), True, 'import numpy as np\n'), ((2236, 2289), 'skimage.exposure.rescale_intensity', 'exposure.rescale_intensity', (['image'], {'in_range': '(p2, p98)'}), '(image, in_range=(p2, p98))\n', (2262, 2289), False, 'from skimage import util, exposure, io, color\n'), ((3070, 3099), 'skimage.exposure.adjust_log', 'exposure.adjust_log', (['image', '(1)'], {}), '(image, 1)\n', (3089, 3099), False, 'from skimage import util, exposure, io, color\n'), ((3443, 3461), 'skimage.util.invert', 'util.invert', (['image'], {}), '(image)\n', (3454, 3461), False, 'from skimage import util, exposure, io, color\n'), ((3977, 4008), 'skimage.exposure.adjust_gamma', 'exposure.adjust_gamma', (['image', '(2)'], {}), '(image, 2)\n', (3998, 4008), False, 'from skimage import util, exposure, io, color\n'), ((4118, 4137), 'skimage.io.imread', 'io.imread', (['filename'], {}), '(filename)\n', (4127, 4137), False, 'from skimage import util, exposure, io, color\n'), ((4148, 4167), 'skimage.color.gray2rgb', 'color.gray2rgb', (['img'], {}), '(img)\n', (4162, 4167), False, 'from skimage import util, exposure, io, color\n'), ((1298, 1321), 'cv2.equalizeHist', 'cv2.equalizeHist', (['image'], {}), '(image)\n', (1314, 1321), False, 'import cv2\n'), ((1339, 1361), 'skimage.color.gray2rgb', 'color.gray2rgb', (['img_bw'], {}), '(img_bw)\n', (1353, 1361), False, 'from skimage import util, exposure, io, color\n'), ((1395, 1435), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2YCrCb'], {}), '(image, cv2.COLOR_BGR2YCrCb)\n', (1407, 1435), False, 'import cv2\n'), ((1456, 1478), 'cv2.split', 'cv2.split', (['img_y_cr_cb'], {}), '(img_y_cr_cb)\n', (1465, 1478), False, 'import cv2\n'), ((1552, 1571), 'cv2.equalizeHist', 'cv2.equalizeHist', (['y'], {}), '(y)\n', (1568, 1571), False, 'import cv2\n'), ((1598, 1623), 'cv2.merge', 'cv2.merge', (['(y_eq, cr, cb)'], {}), '((y_eq, cr, cb))\n', (1607, 1623), False, 'import cv2\n'), ((1641, 1691), 'cv2.cvtColor', 'cv2.cvtColor', (['img_y_cr_cb_eq', 'cv2.COLOR_YCR_CB2BGR'], {}), '(img_y_cr_cb_eq, cv2.COLOR_YCR_CB2BGR)\n', (1653, 1691), False, 'import cv2\n'), ((2151, 2172), 'skimage.color.gray2rgb', 'color.gray2rgb', (['image'], {}), '(image)\n', (2165, 2172), False, 'from skimage import util, exposure, io, color\n'), ((3020, 3041), 'skimage.color.gray2rgb', 'color.gray2rgb', (['image'], {}), '(image)\n', (3034, 3041), False, 'from skimage import util, exposure, io, color\n'), ((3401, 3422), 'skimage.color.gray2rgb', 'color.gray2rgb', (['image'], {}), '(image)\n', (3415, 3422), False, 'from skimage import util, exposure, io, color\n'), ((3932, 3953), 'skimage.color.gray2rgb', 'color.gray2rgb', (['image'], {}), '(image)\n', (3946, 3953), False, 'from skimage import util, exposure, io, color\n')] |
import numpy as np
import pytest
from skimage.measure import approximate_polygon, subdivide_polygon
from skimage.measure._polygon import _SUBDIVISION_MASKS
square = np.array([
[0, 0], [0, 1], [0, 2], [0, 3],
[1, 3], [2, 3], [3, 3],
[3, 2], [3, 1], [3, 0],
[2, 0], [1, 0], [0, 0]
])
def test_approximate_polygon():
out = approximate_polygon(square, 0.1)
np.testing.assert_array_equal(out, square[(0, 3, 6, 9, 12), :])
out = approximate_polygon(square, 2.2)
np.testing.assert_array_equal(out, square[(0, 6, 12), :])
out = approximate_polygon(square[(0, 1, 3, 4, 5, 6, 7, 9, 11, 12), :], 0.1)
np.testing.assert_array_equal(out, square[(0, 3, 6, 9, 12), :])
out = approximate_polygon(square, -1)
np.testing.assert_array_equal(out, square)
out = approximate_polygon(square, 0)
np.testing.assert_array_equal(out, square)
def test_subdivide_polygon():
new_square1 = square
new_square2 = square[:-1]
new_square3 = square[:-1]
# test iterative subdvision
for _ in range(10):
square1, square2, square3 = new_square1, new_square2, new_square3
# test different B-Spline degrees
for degree in range(1, 7):
mask_len = len(_SUBDIVISION_MASKS[degree][0])
# test circular
new_square1 = subdivide_polygon(square1, degree)
np.testing.assert_array_equal(new_square1[-1], new_square1[0])
np.testing.assert_equal(new_square1.shape[0],
2 * square1.shape[0] - 1)
# test non-circular
new_square2 = subdivide_polygon(square2, degree)
np.testing.assert_equal(new_square2.shape[0],
2 * (square2.shape[0] - mask_len + 1))
# test non-circular, preserve_ends
new_square3 = subdivide_polygon(square3, degree, True)
np.testing.assert_equal(new_square3[0], square3[0])
np.testing.assert_equal(new_square3[-1], square3[-1])
np.testing.assert_equal(new_square3.shape[0],
2 * (square3.shape[0] - mask_len + 2))
# not supported B-Spline degree
with pytest.raises(ValueError):
subdivide_polygon(square, 0)
with pytest.raises(ValueError):
subdivide_polygon(square, 8)
if __name__ == "__main__":
np.testing.run_module_suite()
| [
"numpy.testing.assert_equal",
"skimage.measure.approximate_polygon",
"skimage.measure.subdivide_polygon",
"numpy.array",
"pytest.raises",
"numpy.testing.run_module_suite",
"numpy.testing.assert_array_equal"
] | [((166, 285), 'numpy.array', 'np.array', (['[[0, 0], [0, 1], [0, 2], [0, 3], [1, 3], [2, 3], [3, 3], [3, 2], [3, 1], [3,\n 0], [2, 0], [1, 0], [0, 0]]'], {}), '([[0, 0], [0, 1], [0, 2], [0, 3], [1, 3], [2, 3], [3, 3], [3, 2], [\n 3, 1], [3, 0], [2, 0], [1, 0], [0, 0]])\n', (174, 285), True, 'import numpy as np\n'), ((343, 375), 'skimage.measure.approximate_polygon', 'approximate_polygon', (['square', '(0.1)'], {}), '(square, 0.1)\n', (362, 375), False, 'from skimage.measure import approximate_polygon, subdivide_polygon\n'), ((380, 443), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['out', 'square[(0, 3, 6, 9, 12), :]'], {}), '(out, square[(0, 3, 6, 9, 12), :])\n', (409, 443), True, 'import numpy as np\n'), ((455, 487), 'skimage.measure.approximate_polygon', 'approximate_polygon', (['square', '(2.2)'], {}), '(square, 2.2)\n', (474, 487), False, 'from skimage.measure import approximate_polygon, subdivide_polygon\n'), ((492, 549), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['out', 'square[(0, 6, 12), :]'], {}), '(out, square[(0, 6, 12), :])\n', (521, 549), True, 'import numpy as np\n'), ((561, 630), 'skimage.measure.approximate_polygon', 'approximate_polygon', (['square[(0, 1, 3, 4, 5, 6, 7, 9, 11, 12), :]', '(0.1)'], {}), '(square[(0, 1, 3, 4, 5, 6, 7, 9, 11, 12), :], 0.1)\n', (580, 630), False, 'from skimage.measure import approximate_polygon, subdivide_polygon\n'), ((635, 698), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['out', 'square[(0, 3, 6, 9, 12), :]'], {}), '(out, square[(0, 3, 6, 9, 12), :])\n', (664, 698), True, 'import numpy as np\n'), ((710, 741), 'skimage.measure.approximate_polygon', 'approximate_polygon', (['square', '(-1)'], {}), '(square, -1)\n', (729, 741), False, 'from skimage.measure import approximate_polygon, subdivide_polygon\n'), ((746, 788), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['out', 'square'], {}), '(out, square)\n', (775, 788), True, 'import numpy as np\n'), ((799, 829), 'skimage.measure.approximate_polygon', 'approximate_polygon', (['square', '(0)'], {}), '(square, 0)\n', (818, 829), False, 'from skimage.measure import approximate_polygon, subdivide_polygon\n'), ((834, 876), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['out', 'square'], {}), '(out, square)\n', (863, 876), True, 'import numpy as np\n'), ((2363, 2392), 'numpy.testing.run_module_suite', 'np.testing.run_module_suite', ([], {}), '()\n', (2390, 2392), True, 'import numpy as np\n'), ((2193, 2218), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2206, 2218), False, 'import pytest\n'), ((2228, 2256), 'skimage.measure.subdivide_polygon', 'subdivide_polygon', (['square', '(0)'], {}), '(square, 0)\n', (2245, 2256), False, 'from skimage.measure import approximate_polygon, subdivide_polygon\n'), ((2266, 2291), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2279, 2291), False, 'import pytest\n'), ((2301, 2329), 'skimage.measure.subdivide_polygon', 'subdivide_polygon', (['square', '(8)'], {}), '(square, 8)\n', (2318, 2329), False, 'from skimage.measure import approximate_polygon, subdivide_polygon\n'), ((1313, 1347), 'skimage.measure.subdivide_polygon', 'subdivide_polygon', (['square1', 'degree'], {}), '(square1, degree)\n', (1330, 1347), False, 'from skimage.measure import approximate_polygon, subdivide_polygon\n'), ((1360, 1422), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['new_square1[-1]', 'new_square1[0]'], {}), '(new_square1[-1], new_square1[0])\n', (1389, 1422), True, 'import numpy as np\n'), ((1435, 1506), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['new_square1.shape[0]', '(2 * square1.shape[0] - 1)'], {}), '(new_square1.shape[0], 2 * square1.shape[0] - 1)\n', (1458, 1506), True, 'import numpy as np\n'), ((1601, 1635), 'skimage.measure.subdivide_polygon', 'subdivide_polygon', (['square2', 'degree'], {}), '(square2, degree)\n', (1618, 1635), False, 'from skimage.measure import approximate_polygon, subdivide_polygon\n'), ((1648, 1736), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['new_square2.shape[0]', '(2 * (square2.shape[0] - mask_len + 1))'], {}), '(new_square2.shape[0], 2 * (square2.shape[0] -\n mask_len + 1))\n', (1671, 1736), True, 'import numpy as np\n'), ((1842, 1882), 'skimage.measure.subdivide_polygon', 'subdivide_polygon', (['square3', 'degree', '(True)'], {}), '(square3, degree, True)\n', (1859, 1882), False, 'from skimage.measure import approximate_polygon, subdivide_polygon\n'), ((1895, 1946), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['new_square3[0]', 'square3[0]'], {}), '(new_square3[0], square3[0])\n', (1918, 1946), True, 'import numpy as np\n'), ((1959, 2012), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['new_square3[-1]', 'square3[-1]'], {}), '(new_square3[-1], square3[-1])\n', (1982, 2012), True, 'import numpy as np\n'), ((2026, 2114), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['new_square3.shape[0]', '(2 * (square3.shape[0] - mask_len + 2))'], {}), '(new_square3.shape[0], 2 * (square3.shape[0] -\n mask_len + 2))\n', (2049, 2114), True, 'import numpy as np\n')] |
import numpy as np
# the type of float to use throughout the session.
_FLOATX = 'float32'
_EPSILON = 10e-8
_UID_PREFIXES = {}
def epsilon():
return _EPSILON
def set_epsilon(e):
global _EPSILON
_EPSILON = e
def floatx():
'''Returns the default float type, as a string
(e.g. 'float16', 'float32', 'float64').
'''
return _FLOATX
def set_floatx(floatx):
global _FLOATX
if floatx not in {'float16', 'float32', 'float64'}:
raise Exception('Unknown floatx type: ' + str(floatx))
floatx = str(floatx)
_FLOATX = floatx
def cast_to_floatx(x):
'''Cast a Numpy array to floatx.
'''
return np.asarray(x, dtype=_FLOATX)
def get_uid(prefix=''):
if prefix not in _UID_PREFIXES:
_UID_PREFIXES[prefix] = 1
return 1
else:
_UID_PREFIXES[prefix] += 1
return _UID_PREFIXES[prefix]
| [
"numpy.asarray"
] | [((652, 680), 'numpy.asarray', 'np.asarray', (['x'], {'dtype': '_FLOATX'}), '(x, dtype=_FLOATX)\n', (662, 680), True, 'import numpy as np\n')] |
import logging
import os
import signal
import subprocess
import tempfile
from typing import List, Sequence, Optional, Tuple, Union
import numpy as np
import pandas as pd
import prctl
import soundfile as sf
from d3m import container, utils
from d3m.base import utils as base_utils
from d3m.metadata import base as metadata_base, hyperparams
from d3m.primitive_interfaces import base, transformer
from distil.utils import CYTHON_DEP
import version
from joblib import Parallel, delayed
from tqdm import tqdm
__all__ = ("AudioDatasetLoaderPrimitive",)
logger = logging.getLogger(__name__)
class Hyperparams(hyperparams.Hyperparams):
sample = hyperparams.Hyperparameter[float](
default=1.0,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="a value ranging from 0.0 to 1.0 indicating how much of the source data to load",
)
dataframe_resource = hyperparams.Hyperparameter[Union[str, None]](
default=None,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description=".",
)
n_jobs = hyperparams.Hyperparameter[int](
default=64,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/ControlParameter"
],
description="The value of the n_jobs parameter for the joblib library",
)
class WavInput:
def __init__(self, data, sample_rate):
self.data = data
self.sample_rate = sample_rate
def convert_load_file(fileuri, start, end):
with tempfile.NamedTemporaryFile(mode="rb") as output_file:
# We use ffmpeg to convert all audio files to same format.
args = [
"ffmpeg",
"-y", # Always overwrite existing files.
"-nostdin", # No interaction.
"-i",
fileuri, # Input file.
"-vn", # There is no video.
#'-acodec', 'pcm_f32le', # We want everything in float32 dtype.
"-f",
"wav", # This will give us sample rate available in metadata.
output_file.name, # Output file.
]
try:
result = subprocess.run(
args,
stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
# Setting "pdeathsig" will make the ffmpeg process be killed if our process dies for any reason.
encoding="utf8",
check=True,
preexec_fn=lambda: prctl.set_pdeathsig(signal.SIGKILL),
)
except subprocess.CalledProcessError as error:
logger.error("Error running ffmpeg: %(stderr)s", {"stderr": error.stderr})
raise
info = sf.info(output_file.name)
if start is not None and end is not None and info.duration > 0:
start = int(info.frames * (start / info.duration))
end = int(info.frames * (end / info.duration))
audio_array, sample_rate = sf.read(
output_file.name, start=start, stop=end, dtype="int16"
)
else:
audio_array, sample_rate = sf.read(output_file.name, dtype="int16")
if len(audio_array.shape) == 1:
audio_array = audio_array.reshape(-1, 1)
if audio_array.shape[0] < sample_rate:
audio_array = np.vstack(
[
audio_array,
np.zeros(
(sample_rate - audio_array.shape[0], audio_array.shape[1]),
dtype="int16",
),
]
)
return WavInput(audio_array, sample_rate)
class AudioDatasetLoaderPrimitive(
transformer.TransformerPrimitiveBase[container.Dataset, container.List, Hyperparams]
):
"""
A primitive which reads columns referencing audio files.
Each column which has ``https://metadata.datadrivendiscovery.org/types/FileName`` semantic type
and a valid media type (``audio/aiff``, ``audio/flac``, ``audio/ogg``, ``audio/wav``, ``audio/mpeg``)
has every filename read into an audio represented as a numpy array. By default the resulting column
with read arrays is appended to existing columns.
The shape of numpy arrays is S x C. S is the number of samples, C is the number of
channels in an audio (e.g., C = 1 for mono, C = 2 for stereo). dtype is float32.
"""
metadata = metadata_base.PrimitiveMetadata(
{
"id": "f2a0cf71-0f61-41a7-a0ad-b907083ae56c",
"version": version.__version__,
"name": "Load audio collection from dataset into a single dataframe",
"python_path": "d3m.primitives.data_transformation.audio_reader.DistilAudioDatasetLoader",
"source": {
"name": "Distil",
"contact": "mailto:<EMAIL>",
"uris": [
"https://github.com/uncharted-distil/distil-primitives/blob/main/distil/primitives/audio_reader.py",
"https://github.com/uncharted-distil/distil-primitives",
],
},
"installation": [
CYTHON_DEP,
{
"type": metadata_base.PrimitiveInstallationType.PIP,
"package_uri": "git+https://github.com/uncharted-distil/distil-primitives.git@{git_commit}#egg=distil-primitives".format(
git_commit=utils.current_git_commit(os.path.dirname(__file__)),
),
},
],
"algorithm_types": [
metadata_base.PrimitiveAlgorithmType.FILE_MANIPULATION,
],
"primitive_family": metadata_base.PrimitiveFamily.DATA_TRANSFORMATION,
},
)
def produce(
self,
*,
inputs: container.Dataset,
timeout: float = None,
iterations: int = None,
) -> base.CallResult[container.DataFrame]:
logger.debug(f"Running {__name__}")
# get the learning data (the dataset entry point)
learning_id, learning_df = base_utils.get_tabular_resource(
inputs, None, pick_entry_point=True
)
learning_df = learning_df.head(
int(learning_df.shape[0] * self.hyperparams["sample"])
)
learning_df.metadata = self._update_metadata(
inputs.metadata, learning_id, learning_df
)
logger.debug(f"\n{learning_df}")
return base.CallResult(learning_df)
def produce_collection(
self,
*,
inputs: container.Dataset,
timeout: float = None,
iterations: int = None,
) -> base.CallResult[container.DataFrame]:
logger.debug(f"Running {__name__}")
# get the learning data (the dataset entry point)
learning_id, learning_df = base_utils.get_tabular_resource(
inputs, None, pick_entry_point=True
)
learning_df = learning_df.head(
int(learning_df.shape[0] * self.hyperparams["sample"])
)
learning_df.metadata = self._update_metadata(
inputs.metadata, learning_id, learning_df
)
# find the column that is acting as the foreign key and extract the resource + column it references
for i in range(
learning_df.metadata.query((metadata_base.ALL_ELEMENTS,))["dimension"][
"length"
]
):
column_metadata = learning_df.metadata.query_column(i)
if (
"foreign_key" in column_metadata
and column_metadata["foreign_key"]["type"] == "COLUMN"
):
resource_id = column_metadata["foreign_key"]["resource_id"]
file_column_idx = column_metadata["foreign_key"]["column_index"]
# get the learning data (the dataset entry point)
collection_id, collection_df = base_utils.get_tabular_resource(
inputs, resource_id
)
collection_df = collection_df.head(learning_df.shape[0])
collection_df.metadata = self._update_metadata(
inputs.metadata, collection_id, collection_df
)
# get the base path
base_path = collection_df.metadata.query(
(metadata_base.ALL_ELEMENTS, file_column_idx)
)["location_base_uris"][0]
# create fully resolved paths and load
paths = learning_df.iloc[:, file_column_idx] # TODO: remove, unused?
file_paths = []
for i, row in learning_df.iterrows():
if i % 100 == 0:
logger.debug(f"Loaded {i} / {len(learning_df.index)} files")
try:
start_end = row["start-end-time-slice-of-recording"]
start, end = [float(x) for x in start_end.split(",")]
file_paths.append(
(os.path.join(base_path, row["filename"]), start, end)
)
except AttributeError as e:
logger.warning("no start/end ts for {}".format(row))
file_paths.append(
(os.path.join(base_path, row["filename"]), None, None)
)
outputs = self._audio_load(self.hyperparams["n_jobs"], file_paths)
logger.debug(f"\n{outputs}")
result_df = pd.DataFrame({"audio": outputs}) # d3m container takes for_ever_
return base.CallResult(container.DataFrame(result_df, generate_metadata=False))
@classmethod
def _update_metadata(
cls,
metadata: metadata_base.DataMetadata,
resource_id: metadata_base.SelectorSegment,
for_value: Optional[container.DataFrame],
) -> metadata_base.DataMetadata:
resource_metadata = dict(metadata.query((resource_id,)))
if "structural_type" not in resource_metadata or not issubclass(
resource_metadata["structural_type"], container.DataFrame
):
raise TypeError(
'The Dataset resource is not a DataFrame, but "{type}".'.format(
type=resource_metadata.get("structural_type", None),
)
)
resource_metadata.update(
{
"schema": metadata_base.CONTAINER_SCHEMA_VERSION,
}
)
new_metadata = metadata_base.DataMetadata()
new_metadata = metadata.copy_to(new_metadata, (resource_id,))
new_metadata = new_metadata.remove_semantic_type(
(), "https://metadata.datadrivendiscovery.org/types/DatasetEntryPoint"
)
return new_metadata
@classmethod
def _audio_load(cls, n_jobs: int, files_in: Sequence[Tuple]) -> List:
jobs = [
delayed(convert_load_file)(f[0], float(f[1]), float(f[2]))
for f in tqdm(files_in, total=len(files_in))
]
files_out = Parallel(n_jobs=n_jobs, backend="loky", verbose=10)(jobs)
return files_out
| [
"logging.getLogger",
"soundfile.info",
"d3m.primitive_interfaces.base.CallResult",
"d3m.metadata.base.DataMetadata",
"d3m.container.DataFrame",
"d3m.base.utils.get_tabular_resource",
"os.path.join",
"joblib.Parallel",
"os.path.dirname",
"numpy.zeros",
"prctl.set_pdeathsig",
"tempfile.NamedTemp... | [((560, 587), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (577, 587), False, 'import logging\n'), ((1610, 1648), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""rb"""'}), "(mode='rb')\n", (1637, 1648), False, 'import tempfile\n'), ((2820, 2845), 'soundfile.info', 'sf.info', (['output_file.name'], {}), '(output_file.name)\n', (2827, 2845), True, 'import soundfile as sf\n'), ((6139, 6207), 'd3m.base.utils.get_tabular_resource', 'base_utils.get_tabular_resource', (['inputs', 'None'], {'pick_entry_point': '(True)'}), '(inputs, None, pick_entry_point=True)\n', (6170, 6207), True, 'from d3m.base import utils as base_utils\n'), ((6523, 6551), 'd3m.primitive_interfaces.base.CallResult', 'base.CallResult', (['learning_df'], {}), '(learning_df)\n', (6538, 6551), False, 'from d3m.primitive_interfaces import base, transformer\n'), ((6889, 6957), 'd3m.base.utils.get_tabular_resource', 'base_utils.get_tabular_resource', (['inputs', 'None'], {'pick_entry_point': '(True)'}), '(inputs, None, pick_entry_point=True)\n', (6920, 6957), True, 'from d3m.base import utils as base_utils\n'), ((7957, 8009), 'd3m.base.utils.get_tabular_resource', 'base_utils.get_tabular_resource', (['inputs', 'resource_id'], {}), '(inputs, resource_id)\n', (7988, 8009), True, 'from d3m.base import utils as base_utils\n'), ((9353, 9385), 'pandas.DataFrame', 'pd.DataFrame', (["{'audio': outputs}"], {}), "({'audio': outputs})\n", (9365, 9385), True, 'import pandas as pd\n'), ((10346, 10374), 'd3m.metadata.base.DataMetadata', 'metadata_base.DataMetadata', ([], {}), '()\n', (10372, 10374), True, 'from d3m.metadata import base as metadata_base, hyperparams\n'), ((3080, 3143), 'soundfile.read', 'sf.read', (['output_file.name'], {'start': 'start', 'stop': 'end', 'dtype': '"""int16"""'}), "(output_file.name, start=start, stop=end, dtype='int16')\n", (3087, 3143), True, 'import soundfile as sf\n'), ((3227, 3267), 'soundfile.read', 'sf.read', (['output_file.name'], {'dtype': '"""int16"""'}), "(output_file.name, dtype='int16')\n", (3234, 3267), True, 'import soundfile as sf\n'), ((9450, 9505), 'd3m.container.DataFrame', 'container.DataFrame', (['result_df'], {'generate_metadata': '(False)'}), '(result_df, generate_metadata=False)\n', (9469, 9505), False, 'from d3m import container, utils\n'), ((10892, 10943), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'n_jobs', 'backend': '"""loky"""', 'verbose': '(10)'}), "(n_jobs=n_jobs, backend='loky', verbose=10)\n", (10900, 10943), False, 'from joblib import Parallel, delayed\n'), ((3490, 3578), 'numpy.zeros', 'np.zeros', (['(sample_rate - audio_array.shape[0], audio_array.shape[1])'], {'dtype': '"""int16"""'}), "((sample_rate - audio_array.shape[0], audio_array.shape[1]), dtype=\n 'int16')\n", (3498, 3578), True, 'import numpy as np\n'), ((10746, 10772), 'joblib.delayed', 'delayed', (['convert_load_file'], {}), '(convert_load_file)\n', (10753, 10772), False, 'from joblib import Parallel, delayed\n'), ((2593, 2628), 'prctl.set_pdeathsig', 'prctl.set_pdeathsig', (['signal.SIGKILL'], {}), '(signal.SIGKILL)\n', (2612, 2628), False, 'import prctl\n'), ((8909, 8949), 'os.path.join', 'os.path.join', (['base_path', "row['filename']"], {}), "(base_path, row['filename'])\n", (8921, 8949), False, 'import os\n'), ((9146, 9186), 'os.path.join', 'os.path.join', (['base_path', "row['filename']"], {}), "(base_path, row['filename'])\n", (9158, 9186), False, 'import os\n'), ((5508, 5533), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (5523, 5533), False, 'import os\n')] |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker as tick
from statistics import mean
from tqdm import tqdm
import multiprocessing as mp
from . import model as dymod
class Filter:
"""誤ベクトル数の確認,誤ベクトル数によるフィルタリング処理"""
@classmethod
def get_incorrect_vector_example(cls, file_list, example_number):
"""含まれる瞬時データの内指定した個数のデータがそれぞれ持つ誤ベクトル数"""
incorrect_vector_list = []
try:
file_list = file_list[0:example_number]
except:
pass
for i, file in enumerate(tqdm(file_list)):
total_incorrect_vector = cls.get_total_incorrect_vector(file)
incorrect_vector_list.append(total_incorrect_vector)
return incorrect_vector_list
@classmethod
def get_incorrect_vector_all(cls, file_list):
"""含まれる瞬時データ全てがそれぞれ持つ誤ベクトル数を表示する"""
incorrect_vector_list = []
for i, file in enumerate(tqdm(file_list)):
total_incorrect_vector = cls.get_total_incorrect_vector(file)
incorrect_vector_list.append(total_incorrect_vector)
return incorrect_vector_list
@classmethod
def show_incorrect_vector_example(cls, file_list, example_number):
"""含まれる瞬時データの内指定した個数のデータがそれぞれ持つ誤ベクトル数を表示する"""
incorrect_vector_list = []
try:
file_list = file_list[0:example_number]
except:
pass
for i, file in enumerate(tqdm(file_list)):
total_incorrect_vector = cls.get_total_incorrect_vector(file)
incorrect_vector_list.append(total_incorrect_vector)
incorrect_vector_mean = mean(incorrect_vector_list)
# plot
plt.title('incorrect vector NO. of first {} data'.format(example_number))
plt.scatter(range(len(incorrect_vector_list)), incorrect_vector_list)
plt.axhline(incorrect_vector_mean, color='black')
plt.text(0, incorrect_vector_mean + 50, 'mean value = ' + str(incorrect_vector_mean))
plt.gca().yaxis.set_minor_locator(tick.MultipleLocator(100))
plt.grid(which='minor')
plt.show()
@classmethod
def show_incorrect_vector_all(cls, file_list):
"""含まれる瞬時データ全てがそれぞれ持つ誤ベクトル数を表示する"""
incorrect_vector_list = []
for i, file in enumerate(tqdm(file_list)):
total_incorrect_vector = cls.get_total_incorrect_vector(file)
incorrect_vector_list.append(total_incorrect_vector)
incorrect_vector_mean = mean(incorrect_vector_list)
# plot
plt.title('incorrect vector NO. of all data')
plt.scatter(range(len(incorrect_vector_list)), incorrect_vector_list)
plt.axhline(incorrect_vector_mean, color='black')
plt.text(0, incorrect_vector_mean + 50, 'mean value = ' + str(incorrect_vector_mean))
plt.grid()
plt.show()
@staticmethod
def filter_incorrect_vector(file_list, filter_value):
"""ファイル名のリストから,誤ベクトル数がfilter_value以上のファイルの名前を除外する"""
before = len(file_list)
print('Filtering...')
total_core = mp.cpu_count()
pool = mp.Pool(total_core)
args = [(file_list, total_core, i, filter_value) for i in range(total_core)]
callback = pool.map(parallel_task, args)
error_index_list = []
for each_error_index_list in callback:
for error_index in each_error_index_list:
error_index_list.append(error_index)
error_index_list.sort(reverse=True)
for error_index in error_index_list:
del file_list[error_index]
after = len(file_list)
print('Finish!\nFiltered data:', str(before - after) + '/' + str(before))
return file_list
@staticmethod
def get_total_incorrect_vector(file):
"""瞬時データに含まれる誤ベクトルの数を返す"""
data = dymod.InstantData(file)
status = data.get_data('Status')
return np.sum((status == 1) | (status == 17))
def parallel_task(args):
"""並列計算タスク"""
file_list, total_core, current_core, filter_value = args
file_count = len(file_list)
start = int(file_count * current_core // total_core)
end = int(file_count * (current_core + 1) // total_core)-1
header = dymod.InstantData.get_header_row(file_list[0])
error_file_index_list = []
text = 'filtering task ' + str(current_core + 1) + '/' + str(total_core)
for i in tqdm(range(start, end), desc=text):
status = pd.read_csv(file_list[i], header=header)['Status']
if np.sum((status == 1) | (status == 17)) >= filter_value:
error_file_index_list.append(i)
return error_file_index_list
filtering = Filter()
| [
"statistics.mean",
"matplotlib.pyplot.grid",
"pandas.read_csv",
"matplotlib.ticker.MultipleLocator",
"matplotlib.pyplot.gca",
"tqdm.tqdm",
"multiprocessing.cpu_count",
"matplotlib.pyplot.axhline",
"numpy.sum",
"multiprocessing.Pool",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
] | [((1637, 1664), 'statistics.mean', 'mean', (['incorrect_vector_list'], {}), '(incorrect_vector_list)\n', (1641, 1664), False, 'from statistics import mean\n'), ((1849, 1898), 'matplotlib.pyplot.axhline', 'plt.axhline', (['incorrect_vector_mean'], {'color': '"""black"""'}), "(incorrect_vector_mean, color='black')\n", (1860, 1898), True, 'import matplotlib.pyplot as plt\n'), ((2070, 2093), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'which': '"""minor"""'}), "(which='minor')\n", (2078, 2093), True, 'import matplotlib.pyplot as plt\n'), ((2102, 2112), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2110, 2112), True, 'import matplotlib.pyplot as plt\n'), ((2483, 2510), 'statistics.mean', 'mean', (['incorrect_vector_list'], {}), '(incorrect_vector_list)\n', (2487, 2510), False, 'from statistics import mean\n'), ((2535, 2580), 'matplotlib.pyplot.title', 'plt.title', (['"""incorrect vector NO. of all data"""'], {}), "('incorrect vector NO. of all data')\n", (2544, 2580), True, 'import matplotlib.pyplot as plt\n'), ((2667, 2716), 'matplotlib.pyplot.axhline', 'plt.axhline', (['incorrect_vector_mean'], {'color': '"""black"""'}), "(incorrect_vector_mean, color='black')\n", (2678, 2716), True, 'import matplotlib.pyplot as plt\n'), ((2819, 2829), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2827, 2829), True, 'import matplotlib.pyplot as plt\n'), ((2838, 2848), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2846, 2848), True, 'import matplotlib.pyplot as plt\n'), ((3070, 3084), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (3082, 3084), True, 'import multiprocessing as mp\n'), ((3100, 3119), 'multiprocessing.Pool', 'mp.Pool', (['total_core'], {}), '(total_core)\n', (3107, 3119), True, 'import multiprocessing as mp\n'), ((3895, 3933), 'numpy.sum', 'np.sum', (['((status == 1) | (status == 17))'], {}), '((status == 1) | (status == 17))\n', (3901, 3933), True, 'import numpy as np\n'), ((571, 586), 'tqdm.tqdm', 'tqdm', (['file_list'], {}), '(file_list)\n', (575, 586), False, 'from tqdm import tqdm\n'), ((945, 960), 'tqdm.tqdm', 'tqdm', (['file_list'], {}), '(file_list)\n', (949, 960), False, 'from tqdm import tqdm\n'), ((1448, 1463), 'tqdm.tqdm', 'tqdm', (['file_list'], {}), '(file_list)\n', (1452, 1463), False, 'from tqdm import tqdm\n'), ((2035, 2060), 'matplotlib.ticker.MultipleLocator', 'tick.MultipleLocator', (['(100)'], {}), '(100)\n', (2055, 2060), True, 'import matplotlib.ticker as tick\n'), ((2294, 2309), 'tqdm.tqdm', 'tqdm', (['file_list'], {}), '(file_list)\n', (2298, 2309), False, 'from tqdm import tqdm\n'), ((4426, 4466), 'pandas.read_csv', 'pd.read_csv', (['file_list[i]'], {'header': 'header'}), '(file_list[i], header=header)\n', (4437, 4466), True, 'import pandas as pd\n'), ((4488, 4526), 'numpy.sum', 'np.sum', (['((status == 1) | (status == 17))'], {}), '((status == 1) | (status == 17))\n', (4494, 4526), True, 'import numpy as np\n'), ((2001, 2010), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2008, 2010), True, 'import matplotlib.pyplot as plt\n')] |
from scannerpy import Database, Job, ColumnType, DeviceType
import os
import sys
import math
import numpy as np
from tqdm import tqdm
import six.moves.urllib as urllib
import kernels
# What model to download.
MODEL_TEMPLATE_URL = 'http://download.tensorflow.org/models/object_detection/{:s}.tar.gz'
if __name__ == '__main__':
if len(sys.argv) <= 1:
print('Usage: {:s} path/to/your/video/file.mp4'.format(sys.argv[0]))
sys.exit(1)
movie_path = sys.argv[1]
print('Detecting objects in movie {}'.format(movie_path))
movie_name = os.path.splitext(os.path.basename(movie_path))[0]
db = Database()
[input_table], failed = db.ingest_videos([('example', movie_path)], force=True)
stride = 1
frame = db.sources.FrameColumn()
strided_frame = db.streams.Stride(frame, stride)
model_name = 'ssd_mobilenet_v1_coco_2017_11_17'
model_url = MODEL_TEMPLATE_URL.format(model_name)
# Call the newly created object detect op
objdet_frame = db.ops.ObjDetect(
frame=strided_frame,
dnn_url=model_url,
device=DeviceType.GPU if db.has_gpu() else DeviceType.CPU,
batch=2)
output_op = db.sinks.Column(columns={'bundled_data': objdet_frame})
job = Job(
op_args={
frame: db.table('example').column('frame'),
output_op: 'example_obj_detect',
})
[out_table] = db.run(output=output_op, jobs=[job], force=True,
pipeline_instances_per_node=1)
out_table.profiler().write_trace('obj.trace')
print('Extracting data from Scanner output...')
# bundled_data_list is a list of bundled_data
# bundled data format: [box position(x1 y1 x2 y2), box class, box score]
bundled_data_list = [
np.fromstring(box, dtype=np.float32)
for box in tqdm(out_table.column('bundled_data').load())
]
print('Successfully extracted data from Scanner output!')
# run non-maximum suppression
bundled_np_list = kernels.nms_bulk(bundled_data_list)
bundled_np_list = kernels.smooth_box(bundled_np_list, min_score_thresh=0.5)
print('Writing frames to {:s}_obj_detect.mp4'.format(movie_name))
frame = db.sources.FrameColumn()
bundled_data = db.sources.Python()
strided_frame = db.streams.Stride(frame, stride)
drawn_frame = db.ops.TFDrawBoxes(frame=strided_frame,
bundled_data=bundled_data,
min_score_thresh=0.5)
output_op = db.sinks.Column(columns={'frame': drawn_frame})
job = Job(
op_args={
frame: db.table('example').column('frame'),
bundled_data: {'data': pickle.dumps(bundled_np_list)},
output_op: 'example_drawn_frames',
})
[out_table] = db.run(output=output_op, jobs=[job], force=True,
pipeline_instances_per_node=1)
out_table.column('frame').save_mp4(movie_name + '_obj_detect')
print('Successfully generated {:s}_obj_detect.mp4'.format(movie_name))
| [
"scannerpy.Database",
"os.path.basename",
"sys.exit",
"kernels.smooth_box",
"numpy.fromstring",
"kernels.nms_bulk"
] | [((622, 632), 'scannerpy.Database', 'Database', ([], {}), '()\n', (630, 632), False, 'from scannerpy import Database, Job, ColumnType, DeviceType\n'), ((1991, 2026), 'kernels.nms_bulk', 'kernels.nms_bulk', (['bundled_data_list'], {}), '(bundled_data_list)\n', (2007, 2026), False, 'import kernels\n'), ((2049, 2106), 'kernels.smooth_box', 'kernels.smooth_box', (['bundled_np_list'], {'min_score_thresh': '(0.5)'}), '(bundled_np_list, min_score_thresh=0.5)\n', (2067, 2106), False, 'import kernels\n'), ((441, 452), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (449, 452), False, 'import sys\n'), ((1764, 1800), 'numpy.fromstring', 'np.fromstring', (['box'], {'dtype': 'np.float32'}), '(box, dtype=np.float32)\n', (1777, 1800), True, 'import numpy as np\n'), ((579, 607), 'os.path.basename', 'os.path.basename', (['movie_path'], {}), '(movie_path)\n', (595, 607), False, 'import os\n')] |
import fiona
import numpy as np
import os
import pytest
import rasterio
import mapchete
from mapchete.index import zoom_index_gen
from mapchete.io import get_boto3_bucket
@pytest.mark.remote
def test_remote_indexes(mp_s3_tmpdir, gtiff_s3):
zoom = 7
gtiff_s3.dict.update(zoom_levels=zoom)
def gen_indexes_and_check():
# generate indexes
list(
zoom_index_gen(
mp=mp,
zoom=zoom,
out_dir=mp.config.output.path,
geojson=True,
txt=True,
vrt=True,
)
)
# assert GeoJSON exists
with fiona.open(
os.path.join(mp.config.output.path, "%s.geojson" % zoom)
) as src:
assert len(src) == 2
# assert TXT exists
txt_index = os.path.join(mp.config.output.path, "%s.txt" % zoom)
bucket = get_boto3_bucket(txt_index.split("/")[2])
key = "/".join(txt_index.split("/")[3:])
for obj in bucket.objects.filter(Prefix=key):
if obj.key == key:
content = obj.get()["Body"].read().decode()
assert len([l + "\n" for l in content.split("\n") if l]) == 2
# assert VRT exists
with rasterio.open(os.path.join(mp.config.output.path, "%s.vrt" % zoom)) as src:
assert src.read().any()
with mapchete.open(gtiff_s3.dict) as mp:
# write output data
mp.batch_process(zoom=zoom)
# generate indexes and check
gen_indexes_and_check()
# generate indexes again and assert nothing has changes
gen_indexes_and_check()
def test_vrt(mp_tmpdir, cleantopo_br):
zoom = 8
with mapchete.open(
dict(cleantopo_br.dict, zoom_levels=dict(min=0, max=zoom))
) as mp:
# generate output
mp.batch_process(zoom=zoom)
# generate index
list(
zoom_index_gen(
mp=mp,
zoom=zoom,
out_dir=mp.config.output.path,
vrt=True,
)
)
output_tiles = list(
mp.config.output_pyramid.tiles_from_bounds(
mp.config.bounds_at_zoom(zoom=zoom), zoom=zoom
)
)
bounds = (
min([t.left for t in output_tiles]),
min([t.bottom for t in output_tiles]),
max([t.right for t in output_tiles]),
max([t.top for t in output_tiles]),
)
# bounds = mp.config.effective_bounds
vrt_index = os.path.join(mp.config.output.path, "%s.vrt" % zoom)
with rasterio.open(vrt_index) as vrt:
assert vrt.driver == "VRT"
assert vrt.dtypes[0] == "uint16"
assert vrt.meta["dtype"] == "uint16"
assert vrt.count == 1
assert vrt.nodata == 0
assert vrt.bounds == bounds
vrt_data = vrt.read()
assert vrt_data.any()
# generate a VRT using GDAL and compare
out_dir = os.path.join(mp_tmpdir, "cleantopo_br")
temp_vrt = os.path.join(out_dir, str(zoom) + "_gdal.vrt")
gdalbuildvrt = "gdalbuildvrt %s %s/%s/*/*.tif > /dev/null" % (
temp_vrt,
out_dir,
zoom,
)
os.system(gdalbuildvrt)
with rasterio.open(temp_vrt, "r") as gdal_vrt:
assert gdal_vrt.dtypes[0] == "uint16"
assert gdal_vrt.meta["dtype"] == "uint16"
assert gdal_vrt.count == 1
assert gdal_vrt.nodata == 0
assert gdal_vrt.bounds == bounds
gdal_vrt_data = gdal_vrt.read()
assert np.array_equal(vrt_data, gdal_vrt_data)
# make sure handling an existing VRT works
with mapchete.open(
dict(cleantopo_br.dict, zoom_levels=dict(min=0, max=zoom))
) as mp:
# generate output
mp.batch_process(zoom=zoom)
# generate index
list(
zoom_index_gen(
mp=mp,
zoom=zoom,
out_dir=mp.config.output.path,
vrt=True,
)
)
def test_vrt_mercator(mp_tmpdir, cleantopo_br_mercator):
zoom = 8
with mapchete.open(
dict(cleantopo_br_mercator.dict, zoom_levels=dict(min=0, max=zoom))
) as mp:
# generate output
mp.batch_process(zoom=zoom)
# generate index
list(
zoom_index_gen(
mp=mp,
zoom=zoom,
out_dir=mp.config.output.path,
vrt=True,
)
)
output_tiles = list(
mp.config.output_pyramid.tiles_from_bounds(
mp.config.bounds_at_zoom(zoom=zoom), zoom=zoom
)
)
bounds = (
min([t.left for t in output_tiles]),
min([t.bottom for t in output_tiles]),
max([t.right for t in output_tiles]),
max([t.top for t in output_tiles]),
)
# bounds = mp.config.effective_bounds
vrt_index = os.path.join(mp.config.output.path, "%s.vrt" % zoom)
with rasterio.open(vrt_index) as vrt:
assert vrt.driver == "VRT"
assert vrt.dtypes[0] == "uint16"
assert vrt.meta["dtype"] == "uint16"
assert vrt.count == 1
assert vrt.nodata == 0
for vrt_b, b in zip(vrt.bounds, bounds):
assert round(vrt_b, 6) == round(b, 6)
vrt_data = vrt.read()
assert vrt_data.any()
# generate a VRT using GDAL and compare
out_dir = os.path.join(mp_tmpdir, "cleantopo_br_mercator")
temp_vrt = os.path.join(out_dir, str(zoom) + "_gdal.vrt")
gdalbuildvrt = "gdalbuildvrt %s %s/%s/*/*.tif > /dev/null" % (
temp_vrt,
out_dir,
zoom,
)
os.system(gdalbuildvrt)
with rasterio.open(temp_vrt, "r") as gdal_vrt:
assert gdal_vrt.dtypes[0] == "uint16"
assert gdal_vrt.meta["dtype"] == "uint16"
assert gdal_vrt.count == 1
assert gdal_vrt.nodata == 0
for vrt_b, b in zip(vrt.bounds, bounds):
assert round(vrt_b, 6) == round(b, 6)
gdal_vrt_data = gdal_vrt.read()
assert np.array_equal(vrt_data, gdal_vrt_data)
assert gdal_vrt_data.any()
# make sure handling an existing VRT works
with mapchete.open(
dict(cleantopo_br_mercator.dict, zoom_levels=dict(min=0, max=zoom))
) as mp:
# generate output
mp.batch_process(zoom=zoom)
# generate index
list(
zoom_index_gen(
mp=mp,
zoom=zoom,
out_dir=mp.config.output.path,
vrt=True,
)
)
| [
"mapchete.index.zoom_index_gen",
"rasterio.open",
"os.path.join",
"numpy.array_equal",
"os.system",
"mapchete.open"
] | [((2545, 2597), 'os.path.join', 'os.path.join', (['mp.config.output.path', "('%s.vrt' % zoom)"], {}), "(mp.config.output.path, '%s.vrt' % zoom)\n", (2557, 2597), False, 'import os\n'), ((2978, 3017), 'os.path.join', 'os.path.join', (['mp_tmpdir', '"""cleantopo_br"""'], {}), "(mp_tmpdir, 'cleantopo_br')\n", (2990, 3017), False, 'import os\n'), ((3206, 3229), 'os.system', 'os.system', (['gdalbuildvrt'], {}), '(gdalbuildvrt)\n', (3215, 3229), False, 'import os\n'), ((4937, 4989), 'os.path.join', 'os.path.join', (['mp.config.output.path', "('%s.vrt' % zoom)"], {}), "(mp.config.output.path, '%s.vrt' % zoom)\n", (4949, 4989), False, 'import os\n'), ((5433, 5481), 'os.path.join', 'os.path.join', (['mp_tmpdir', '"""cleantopo_br_mercator"""'], {}), "(mp_tmpdir, 'cleantopo_br_mercator')\n", (5445, 5481), False, 'import os\n'), ((5670, 5693), 'os.system', 'os.system', (['gdalbuildvrt'], {}), '(gdalbuildvrt)\n', (5679, 5693), False, 'import os\n'), ((833, 885), 'os.path.join', 'os.path.join', (['mp.config.output.path', "('%s.txt' % zoom)"], {}), "(mp.config.output.path, '%s.txt' % zoom)\n", (845, 885), False, 'import os\n'), ((1381, 1409), 'mapchete.open', 'mapchete.open', (['gtiff_s3.dict'], {}), '(gtiff_s3.dict)\n', (1394, 1409), False, 'import mapchete\n'), ((2608, 2632), 'rasterio.open', 'rasterio.open', (['vrt_index'], {}), '(vrt_index)\n', (2621, 2632), False, 'import rasterio\n'), ((3239, 3267), 'rasterio.open', 'rasterio.open', (['temp_vrt', '"""r"""'], {}), "(temp_vrt, 'r')\n", (3252, 3267), False, 'import rasterio\n'), ((3544, 3583), 'numpy.array_equal', 'np.array_equal', (['vrt_data', 'gdal_vrt_data'], {}), '(vrt_data, gdal_vrt_data)\n', (3558, 3583), True, 'import numpy as np\n'), ((5000, 5024), 'rasterio.open', 'rasterio.open', (['vrt_index'], {}), '(vrt_index)\n', (5013, 5024), False, 'import rasterio\n'), ((5703, 5731), 'rasterio.open', 'rasterio.open', (['temp_vrt', '"""r"""'], {}), "(temp_vrt, 'r')\n", (5716, 5731), False, 'import rasterio\n'), ((6066, 6105), 'numpy.array_equal', 'np.array_equal', (['vrt_data', 'gdal_vrt_data'], {}), '(vrt_data, gdal_vrt_data)\n', (6080, 6105), True, 'import numpy as np\n'), ((387, 489), 'mapchete.index.zoom_index_gen', 'zoom_index_gen', ([], {'mp': 'mp', 'zoom': 'zoom', 'out_dir': 'mp.config.output.path', 'geojson': '(True)', 'txt': '(True)', 'vrt': '(True)'}), '(mp=mp, zoom=zoom, out_dir=mp.config.output.path, geojson=\n True, txt=True, vrt=True)\n', (401, 489), False, 'from mapchete.index import zoom_index_gen\n'), ((1920, 1993), 'mapchete.index.zoom_index_gen', 'zoom_index_gen', ([], {'mp': 'mp', 'zoom': 'zoom', 'out_dir': 'mp.config.output.path', 'vrt': '(True)'}), '(mp=mp, zoom=zoom, out_dir=mp.config.output.path, vrt=True)\n', (1934, 1993), False, 'from mapchete.index import zoom_index_gen\n'), ((3850, 3923), 'mapchete.index.zoom_index_gen', 'zoom_index_gen', ([], {'mp': 'mp', 'zoom': 'zoom', 'out_dir': 'mp.config.output.path', 'vrt': '(True)'}), '(mp=mp, zoom=zoom, out_dir=mp.config.output.path, vrt=True)\n', (3864, 3923), False, 'from mapchete.index import zoom_index_gen\n'), ((4312, 4385), 'mapchete.index.zoom_index_gen', 'zoom_index_gen', ([], {'mp': 'mp', 'zoom': 'zoom', 'out_dir': 'mp.config.output.path', 'vrt': '(True)'}), '(mp=mp, zoom=zoom, out_dir=mp.config.output.path, vrt=True)\n', (4326, 4385), False, 'from mapchete.index import zoom_index_gen\n'), ((6416, 6489), 'mapchete.index.zoom_index_gen', 'zoom_index_gen', ([], {'mp': 'mp', 'zoom': 'zoom', 'out_dir': 'mp.config.output.path', 'vrt': '(True)'}), '(mp=mp, zoom=zoom, out_dir=mp.config.output.path, vrt=True)\n', (6430, 6489), False, 'from mapchete.index import zoom_index_gen\n'), ((676, 732), 'os.path.join', 'os.path.join', (['mp.config.output.path', "('%s.geojson' % zoom)"], {}), "(mp.config.output.path, '%s.geojson' % zoom)\n", (688, 732), False, 'import os\n'), ((1273, 1325), 'os.path.join', 'os.path.join', (['mp.config.output.path', "('%s.vrt' % zoom)"], {}), "(mp.config.output.path, '%s.vrt' % zoom)\n", (1285, 1325), False, 'import os\n')] |
# %%
"""
This module contains copies of the classes SOMToolBox_Parse and SomViz provided by the lecturers.
"""
import pandas as pd
import numpy as np
import gzip
from scipy.spatial import distance_matrix, distance
from ipywidgets import Layout, HBox, Box, widgets, interact
import plotly.graph_objects as go
class SOMToolBox_Parse:
def __init__(self, filename):
self.filename = filename
def read_weight_file(self, ):
df = pd.DataFrame()
if self.filename[-3:len(self.filename)] == '.gz':
with gzip.open(self.filename, 'rb') as file:
df, vec_dim, xdim, ydim = self._read_vector_file_to_df(df, file)
else:
with open(self.filename, 'rb') as file:
df, vec_dim, xdim, ydim = self._read_vector_file_to_df(df, file)
file.close()
return df.astype('float64'), vec_dim, xdim, ydim
def _read_vector_file_to_df(self, df, file):
xdim, ydim, vec_dim, position = 0, 0, 0, 0
for byte in file:
line = byte.decode('UTF-8')
if line.startswith('$'):
xdim, ydim, vec_dim = self._parse_vector_file_metadata(line, xdim, ydim, vec_dim)
if xdim > 0 and ydim > 0 and len(df.columns) == 0:
df = pd.DataFrame(index=range(0, ydim * xdim), columns=range(0, vec_dim))
else:
if len(df.columns) == 0 or vec_dim == 0:
raise ValueError('Weight file has no correct Dimensional information.')
position = self._parse_weight_file_data(line, position, vec_dim, df)
return df, vec_dim, xdim, ydim
def _parse_weight_file_data(self, line, position, vec_dim, df):
splitted = line.split(' ')
try:
df.values[position] = list(np.array(splitted[0:vec_dim]).astype(float))
position += 1
except:
raise ValueError('The input-vector file does not match its unit-dimension.')
return position
def _parse_vector_file_metadata(self, line, xdim, ydim, vec_dim):
splitted = line.split(' ')
if splitted[0] == '$XDIM':
xdim = int(splitted[1])
elif splitted[0] == '$YDIM':
ydim = int(splitted[1])
elif splitted[0] == '$VEC_DIM':
vec_dim = int(splitted[1])
return xdim, ydim, vec_dim
# %%
class SomViz:
def __init__(self, weights, m, n):
self.weights = weights
self.m = m
self.n = n
def umatrix(self, som_map=None, color="Viridis", interp="best", title=""):
um = np.zeros((self.m * self.n, 1))
neuron_locs = list()
for i in range(self.m):
for j in range(self.n):
neuron_locs.append(np.array([i, j]))
neuron_distmat = distance_matrix(neuron_locs, neuron_locs)
for i in range(self.m * self.n):
neighbor_idxs = neuron_distmat[i] <= 1
neighbor_weights = self.weights[neighbor_idxs]
um[i] = distance_matrix(np.expand_dims(self.weights[i], 0), neighbor_weights).mean()
if som_map == None:
return self.plot(um.reshape(self.m, self.n), color=color, interp=interp, title=title)
else:
som_map.data[0].z = um.reshape(self.m, self.n)
def hithist(self, som_map=None, idata=[], color='RdBu', interp="best", title=""):
hist = [0] * self.n * self.m
for v in idata:
position = np.argmin(np.sqrt(np.sum(np.power(self.weights - v, 2), axis=1)))
hist[position] += 1
if som_map == None:
return self.plot(np.array(hist).reshape(self.m, self.n), color=color, interp=interp, title=title)
else:
som_map.data[0].z = np.array(hist).reshape(self.m, self.n)
def component_plane(self, som_map=None, component=0, color="Viridis", interp="best", title=""):
if som_map == None:
return self.plot(self.weights[:, component].reshape(-1, self.n), color=color, interp=interp, title=title)
else:
som_map.data[0].z = self.weights[:, component].reshape(-1, self.n)
def sdh(self, som_map=None, idata=[], sdh_type=1, factor=1, draw=True, color="Cividis", interp="best", title=""):
import heapq
sdh_m = [0] * self.m * self.n
cs = 0
for i in range(0, factor): cs += factor - i
for vector in idata:
dist = np.sqrt(np.sum(np.power(self.weights - vector, 2), axis=1))
c = heapq.nsmallest(factor, range(len(dist)), key=dist.__getitem__)
if (sdh_type == 1):
for j in range(0, factor): sdh_m[c[j]] += (factor - j) / cs # normalized
if (sdh_type == 2):
for j in range(0, factor): sdh_m[c[j]] += 1.0 / dist[c[j]] # based on distance
if (sdh_type == 3):
dmin = min(dist)
for j in range(0, factor): sdh_m[c[j]] += 1.0 - (dist[c[j]] - dmin) / (max(dist) - dmin)
if som_map == None:
return self.plot(np.array(sdh_m).reshape(-1, self.n), color=color, interp=interp, title=title)
else:
som_map.data[0].z = np.array(sdh_m).reshape(-1, self.n)
def project_data(self, som_m=None, idata=[], title=""):
data_y = []
data_x = []
for v in idata:
position = np.argmin(np.sqrt(np.sum(np.power(self.weights - v, 2), axis=1)))
x, y = position % self.n, position // self.n
data_x.extend([x])
data_y.extend([y])
if som_m != None: som_m.add_trace(
go.Scatter(x=data_x, y=data_y, mode="markers", marker_color='rgba(255, 255, 255, 0.8)', ))
def time_series(self, som_m=None, idata=[], wsize=50, title=""): # not tested
data_y = []
data_x = [i for i in range(0, len(idata))]
data_x2 = []
data_y2 = []
qmin = np.Inf
qmax = 0
step = 1
ps = []
for v in idata:
matrix = np.sqrt(np.sum(np.power(self.weights - v, 2), axis=1))
position = np.argmin(matrix)
qerror = matrix[position]
if qmin > qerror: qmin = qerror
if qmax < qerror: qmax = qerror
ps.append((position, qerror))
markerc = []
for v in ps:
data_y.extend([v[0]])
rez = v[1] / qmax
markerc.append('rgba(0, 0, 0, ' + str(rez) + ')')
x, y = v[0] % self.n, v[0] // self.n
if x == 0:
y = np.random.uniform(low=y, high=y + .1)
elif x == self.m - 1:
y = np.random.uniform(low=y - .1, high=y)
elif y == 0:
x = np.random.uniform(low=x, high=x + .1)
elif y == self.n - 1:
x = np.random.uniform(low=x - .1, high=x)
else:
x, y = np.random.uniform(low=x - .1, high=x + .1), np.random.uniform(low=y - .1, high=y + .1)
data_x2.extend([x])
data_y2.extend([y])
ts_plot = go.FigureWidget(go.Scatter(x=[], y=[], mode="markers", marker_color=markerc,
marker=dict(colorscale='Viridis', showscale=True,
color=np.random.randn(500))))
ts_plot.update_xaxes(range=[0, wsize])
ts_plot.data[0].x, ts_plot.data[0].y = data_x, data_y
som_m.add_trace(go.Scatter(x=data_x2, y=data_y2, mode="markers", ))
som_m.layout.height = 500
ts_plot.layout.height = 500
som_m.layout.width = 500
ts_plot.layout.width = 1300
return HBox([go.FigureWidget(som_m), go.FigureWidget(ts_plot)])
def plot(self, matrix, color="Viridis", interp="best", title=""):
return go.FigureWidget(go.Heatmap(z=matrix, zsmooth=interp, showscale=False, colorscale=color),
layout=go.Layout(width=700, height=700, title=title, title_x=0.5, ))
if __name__ == "__main__":
from sklearn import datasets, preprocessing
from src.NeighbourhoodGraph import NeighbourhoodGraph
iris = datasets.load_iris().data
# min_max_scaler = preprocessing.MinMaxScaler()
# iris = min_max_scaler.fit_transform(iris)
smap = SOMToolBox_Parse('../input/iris/iris.wgt.gz')
s_weights, sdim, smap_x_dim, smap_y_dim = smap.read_weight_file()
s_weights = s_weights.to_numpy()
ng_iris = NeighbourhoodGraph(s_weights, smap_x_dim, smap_y_dim, input_data=iris)
ng_iris_trace_3nn = ng_iris.get_trace(knn=3)
go.FigureWidget(data=ng_iris_trace_3nn,
layout=go.Layout(width=700, height=700, title="Iris: NeighbourhoodGraph (3-NN)")).show()
vis_iris = SomViz(s_weights, smap_x_dim, smap_y_dim)
um_iris = vis_iris.umatrix(title="Iris: Umatrix + NeighbourhoodGraph (3-NN)")
um_iris.add_trace(ng_iris_trace_3nn)
um_iris.show()
# We can reuse all traces
ng_iris_trace_05r = ng_iris.get_trace(radius=0.5)
um_iris.data = [um_iris.data[0]]
um_iris.add_trace(ng_iris_trace_05r)
um_iris.layout = go.Layout(width=700, height=700, title="Iris: Umatrix + NeighbourhoodGraph (0.5 Radius)",
title_x=0.5, )
um_iris.show()
hithist_iris = vis_iris.hithist(idata=iris, title="Iris: HisHist + NeighbourhoodGraph (3-NN)")
hithist_iris.add_trace(ng_iris_trace_3nn)
hithist_iris.show() | [
"sklearn.datasets.load_iris",
"plotly.graph_objects.Heatmap",
"plotly.graph_objects.Layout",
"gzip.open",
"numpy.power",
"scipy.spatial.distance_matrix",
"src.NeighbourhoodGraph.NeighbourhoodGraph",
"numpy.zeros",
"plotly.graph_objects.Scatter",
"plotly.graph_objects.FigureWidget",
"numpy.array"... | [((8420, 8490), 'src.NeighbourhoodGraph.NeighbourhoodGraph', 'NeighbourhoodGraph', (['s_weights', 'smap_x_dim', 'smap_y_dim'], {'input_data': 'iris'}), '(s_weights, smap_x_dim, smap_y_dim, input_data=iris)\n', (8438, 8490), False, 'from src.NeighbourhoodGraph import NeighbourhoodGraph\n'), ((9079, 9186), 'plotly.graph_objects.Layout', 'go.Layout', ([], {'width': '(700)', 'height': '(700)', 'title': '"""Iris: Umatrix + NeighbourhoodGraph (0.5 Radius)"""', 'title_x': '(0.5)'}), "(width=700, height=700, title=\n 'Iris: Umatrix + NeighbourhoodGraph (0.5 Radius)', title_x=0.5)\n", (9088, 9186), True, 'import plotly.graph_objects as go\n'), ((451, 465), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (463, 465), True, 'import pandas as pd\n'), ((2585, 2615), 'numpy.zeros', 'np.zeros', (['(self.m * self.n, 1)'], {}), '((self.m * self.n, 1))\n', (2593, 2615), True, 'import numpy as np\n'), ((2791, 2832), 'scipy.spatial.distance_matrix', 'distance_matrix', (['neuron_locs', 'neuron_locs'], {}), '(neuron_locs, neuron_locs)\n', (2806, 2832), False, 'from scipy.spatial import distance_matrix, distance\n'), ((8114, 8134), 'sklearn.datasets.load_iris', 'datasets.load_iris', ([], {}), '()\n', (8132, 8134), False, 'from sklearn import datasets, preprocessing\n'), ((6068, 6085), 'numpy.argmin', 'np.argmin', (['matrix'], {}), '(matrix)\n', (6077, 6085), True, 'import numpy as np\n'), ((7427, 7475), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'data_x2', 'y': 'data_y2', 'mode': '"""markers"""'}), "(x=data_x2, y=data_y2, mode='markers')\n", (7437, 7475), True, 'import plotly.graph_objects as go\n'), ((7794, 7865), 'plotly.graph_objects.Heatmap', 'go.Heatmap', ([], {'z': 'matrix', 'zsmooth': 'interp', 'showscale': '(False)', 'colorscale': 'color'}), '(z=matrix, zsmooth=interp, showscale=False, colorscale=color)\n', (7804, 7865), True, 'import plotly.graph_objects as go\n'), ((541, 571), 'gzip.open', 'gzip.open', (['self.filename', '"""rb"""'], {}), "(self.filename, 'rb')\n", (550, 571), False, 'import gzip\n'), ((5580, 5672), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'data_x', 'y': 'data_y', 'mode': '"""markers"""', 'marker_color': '"""rgba(255, 255, 255, 0.8)"""'}), "(x=data_x, y=data_y, mode='markers', marker_color=\n 'rgba(255, 255, 255, 0.8)')\n", (5590, 5672), True, 'import plotly.graph_objects as go\n'), ((6517, 6555), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'y', 'high': '(y + 0.1)'}), '(low=y, high=y + 0.1)\n', (6534, 6555), True, 'import numpy as np\n'), ((7641, 7663), 'plotly.graph_objects.FigureWidget', 'go.FigureWidget', (['som_m'], {}), '(som_m)\n', (7656, 7663), True, 'import plotly.graph_objects as go\n'), ((7665, 7689), 'plotly.graph_objects.FigureWidget', 'go.FigureWidget', (['ts_plot'], {}), '(ts_plot)\n', (7680, 7689), True, 'import plotly.graph_objects as go\n'), ((7905, 7963), 'plotly.graph_objects.Layout', 'go.Layout', ([], {'width': '(700)', 'height': '(700)', 'title': 'title', 'title_x': '(0.5)'}), '(width=700, height=700, title=title, title_x=0.5)\n', (7914, 7963), True, 'import plotly.graph_objects as go\n'), ((2748, 2764), 'numpy.array', 'np.array', (['[i, j]'], {}), '([i, j])\n', (2756, 2764), True, 'import numpy as np\n'), ((3736, 3750), 'numpy.array', 'np.array', (['hist'], {}), '(hist)\n', (3744, 3750), True, 'import numpy as np\n'), ((4426, 4460), 'numpy.power', 'np.power', (['(self.weights - vector)', '(2)'], {}), '(self.weights - vector, 2)\n', (4434, 4460), True, 'import numpy as np\n'), ((5154, 5169), 'numpy.array', 'np.array', (['sdh_m'], {}), '(sdh_m)\n', (5162, 5169), True, 'import numpy as np\n'), ((6005, 6034), 'numpy.power', 'np.power', (['(self.weights - v)', '(2)'], {}), '(self.weights - v, 2)\n', (6013, 6034), True, 'import numpy as np\n'), ((6609, 6647), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(y - 0.1)', 'high': 'y'}), '(low=y - 0.1, high=y)\n', (6626, 6647), True, 'import numpy as np\n'), ((8612, 8685), 'plotly.graph_objects.Layout', 'go.Layout', ([], {'width': '(700)', 'height': '(700)', 'title': '"""Iris: NeighbourhoodGraph (3-NN)"""'}), "(width=700, height=700, title='Iris: NeighbourhoodGraph (3-NN)')\n", (8621, 8685), True, 'import plotly.graph_objects as go\n'), ((1798, 1827), 'numpy.array', 'np.array', (['splitted[0:vec_dim]'], {}), '(splitted[0:vec_dim])\n', (1806, 1827), True, 'import numpy as np\n'), ((3021, 3055), 'numpy.expand_dims', 'np.expand_dims', (['self.weights[i]', '(0)'], {}), '(self.weights[i], 0)\n', (3035, 3055), True, 'import numpy as np\n'), ((3478, 3507), 'numpy.power', 'np.power', (['(self.weights - v)', '(2)'], {}), '(self.weights - v, 2)\n', (3486, 3507), True, 'import numpy as np\n'), ((3609, 3623), 'numpy.array', 'np.array', (['hist'], {}), '(hist)\n', (3617, 3623), True, 'import numpy as np\n'), ((5030, 5045), 'numpy.array', 'np.array', (['sdh_m'], {}), '(sdh_m)\n', (5038, 5045), True, 'import numpy as np\n'), ((5364, 5393), 'numpy.power', 'np.power', (['(self.weights - v)', '(2)'], {}), '(self.weights - v, 2)\n', (5372, 5393), True, 'import numpy as np\n'), ((6692, 6730), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'x', 'high': '(x + 0.1)'}), '(low=x, high=x + 0.1)\n', (6709, 6730), True, 'import numpy as np\n'), ((6784, 6822), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(x - 0.1)', 'high': 'x'}), '(low=x - 0.1, high=x)\n', (6801, 6822), True, 'import numpy as np\n'), ((7269, 7289), 'numpy.random.randn', 'np.random.randn', (['(500)'], {}), '(500)\n', (7284, 7289), True, 'import numpy as np\n'), ((6863, 6907), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(x - 0.1)', 'high': '(x + 0.1)'}), '(low=x - 0.1, high=x + 0.1)\n', (6880, 6907), True, 'import numpy as np\n'), ((6907, 6951), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(y - 0.1)', 'high': '(y + 0.1)'}), '(low=y - 0.1, high=y + 0.1)\n', (6924, 6951), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# pylint: disable=W0201
import sys
import argparse
import yaml
import numpy as np
import random
import os.path as osp
# torch
import torch
import torch.nn as nn
import torch.optim as optim
# torchlight
import torchlight
from torchlight import str2bool
from torchlight import DictAction
from torchlight import import_class
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.metrics import confusion_matrix
from sklearn.metrics import recall_score
from .io import IO
import subprocess
class Processor(IO):
"""
Base Processor
"""
def __init__(self, argv=None):
self.load_arg(argv)
self.init_environment()
self.load_model()
self.load_weights()
self.gpu()
self.load_data()
self.load_optimizer()
self.label = []
def init_environment(self):
super().init_environment()
self.result = dict()
self.iter_info = dict()
self.epoch_info = dict()
self.meta_info = dict(epoch=0, iter=0)
self.set_seed(self.arg.seed)
def get_gpu_memory_map(self):
"""Get the current gpu usage.
Returns
-------
usage: dict
Keys are device ids as integers.
Values are memory usage as integers in MB.
"""
result = subprocess.check_output(
[
'nvidia-smi', '--query-gpu=memory.used',
'--format=csv,nounits,noheader'
], encoding='utf-8')
# Convert lines into a dictionary
gpu_memory = [int(x) for x in result.strip().split('\n')]
gpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory))
return gpu_memory_map
def load_optimizer(self):
pass
def test_conf(self, evaluation=True):
self.model.eval()
loader = self.data_loader['test']
loss_value = []
result_frag = []
label_frag = []
for data, label, index, _ in loader:
# get data
data = data.float().to(self.dev)
label = label.long().to(self.dev)
# inference
with torch.no_grad():
output = self.model(data)
result_frag.append(output.data.cpu().numpy())
# get loss
if evaluation:
loss = self.loss(output, label)
loss_value.append(loss.mean().item())
label_frag.append(label.data.cpu().numpy())
self.result = np.concatenate(result_frag)
if evaluation:
self.label = np.concatenate(label_frag)
self.epoch_info['mean_loss']= np.mean(loss_value)
self.show_epoch_info()
# show top-k accuracy
for k in self.arg.show_topk:
self.show_topk(k)
rank = self.result.argsort()
rank = rank[:, -1]
plt.figure(figsize=(5,5))
confusion = confusion_matrix(self.label, rank)
print(confusion[0, :].sum())
confusion = confusion / confusion[0, :].sum()
confusion = 100 * confusion
plt.matshow(confusion, cmap=plt.cm.Greens)
plt.colorbar()
# for i in range(len(confusion)):
# for j in range(len(confusion)):
# string = str(round(confusion[i,j],1))
# plt.annotate(string, xy=(i, j), horizontalalignment='center', verticalalignment='center', fontsize=8)
plt.title('Ours', fontsize=18)
plt.ylabel('True label', fontsize=15)
plt.xlabel('Predicted label', fontsize=15)
plt.savefig(osp.join(self.arg.work_dir, 'confusion.jpg'), bbox_inches='tight')
def save_model(self, model, name):
model_path = '{}/{}'.format(self.work_dir, name)
torch.save({
'model_state_dict': self.model.state_dict(),
'sensinet_state_dict': self.sensinet.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
'optimizer_sensinet_state_dict': self.optimizer_sensinet.state_dict(),
'meta_epoch': self.meta_info['epoch'],
'meta_iter': self.meta_info['iter']
}, model_path)
self.print_log('The model has been saved as {}.'.format(model_path))
def load_weights(self):
# self.arg.phase = 'test'
# self.arg.weights = osp.join(self.arg.work_dir, 'best_model.pt')
if self.arg.weights:
checkpoint = torch.load(self.arg.weights)
self.model.load_state_dict(checkpoint)
# self.model.load_state_dict(checkpoint['model_state_dict'])
# self.sensinet.load_state_dict(checkpoint['sensinet_state_dict'])
# self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
# self.optimizer_sensinet.load_state_dict(checkpoint['optimizer_sensinet_state_dict'])
# self.arg.start_epoch = checkpoint['meta_epoch']
# self.meta_info['meta_iter'] = checkpoint['meta_iter']
def show_topk(self, k):
rank = self.result.argsort()
hit_top_k = [l in rank[i, -k:] for i, l in enumerate(self.label)]
hit_top_k_cls = []
hit_top_k_cls_num = []
for cls in range(self.arg.model_args['num_classes']):
hit_top_k_cls.append([(l in rank[i, -k:]) * (l == cls) for i, l in enumerate(self.label)])
hit_top_k_cls_num.append([l == cls for i, l in enumerate(self.label)])
accuracy = sum(hit_top_k) * 1.0 / len(hit_top_k)
accuracy_cls = [sum(hit_top_k_cls[i]) * 1.0 / sum(hit_top_k_cls_num[i]) for i in range(self.arg.model_args['num_classes'])]
if accuracy > self.best_acc:
self.best_acc = accuracy
filename = 'best_model.pt'
self.io.save_model(self.model, filename)
self.train_writer.add_scalar('accuracy/test_acc', 100 * accuracy, self.meta_info['epoch'])
for i in range(self.arg.model_args['num_classes']):
self.train_writer.add_scalar('accuracy/test_acc_cls_' + str(i), 100 * accuracy_cls[i], self.meta_info['epoch'])
self.io.print_log('\tTop{}: {:.2f}%'.format(k, 100 * accuracy))
self.io.print_log('\tBest accuracy Top{}: {:.2f}%'.format(k, 100 * self.best_acc))
def load_data(self):
Feeder = import_class(self.arg.feeder)
if 'debug' not in self.arg.train_feeder_args:
self.arg.train_feeder_args['debug'] = self.arg.debug
self.data_loader = dict()
if self.arg.phase == 'train':
self.data_loader['train'] = torch.utils.data.DataLoader(
dataset=Feeder(**self.arg.train_feeder_args),
batch_size=self.arg.batch_size,
shuffle=True,
num_workers=self.arg.num_worker * torchlight.ngpu(
self.arg.device),
drop_last=True, pin_memory=True)
self.data_loader['meta_train'] = torch.utils.data.DataLoader(
dataset=Feeder(**self.arg.train_feeder_args),
batch_size=self.arg.batch_size,
shuffle=True,
num_workers=self.arg.num_worker * torchlight.ngpu(
self.arg.device),
drop_last=True, pin_memory=True)
if self.arg.test_feeder_args:
self.data_loader['test'] = torch.utils.data.DataLoader(
dataset=Feeder(**self.arg.test_feeder_args),
batch_size=self.arg.test_batch_size,
shuffle=False,
num_workers=self.arg.num_worker * torchlight.ngpu(
self.arg.device), pin_memory=True)
def show_epoch_info(self):
for k, v in self.epoch_info.items():
self.io.print_log('\t{}: {}'.format(k, v))
if self.arg.pavi_log:
self.io.log('train', self.meta_info['iter'], self.epoch_info)
def show_iter_info(self):
if self.meta_info['iter'] % self.arg.log_interval == 0:
info ='\tIter {} Done.'.format(self.meta_info['iter'])
for k, v in self.iter_info.items():
if isinstance(v, float):
info = info + ' | {}: {:.4f}'.format(k, v)
else:
info = info + ' | {}: {}'.format(k, v)
self.io.print_log(info)
if self.arg.pavi_log:
self.io.log('train', self.meta_info['iter'], self.iter_info)
def train(self):
for _ in range(100):
self.iter_info['loss'] = 0
self.show_iter_info()
self.meta_info['iter'] += 1
self.epoch_info['mean loss'] = 0
self.show_epoch_info()
def test(self):
for _ in range(100):
self.iter_info['loss'] = 1
self.show_iter_info()
self.epoch_info['mean loss'] = 1
self.show_epoch_info()
def set_seed(self, seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def start(self):
self.io.print_log('Parameters:\n{}\n'.format(str(vars(self.arg))))
# training phase
if self.arg.phase == 'train':
for epoch in range(self.arg.start_epoch, self.arg.num_epoch):
self.meta_info['epoch'] = epoch
# training
self.io.print_log('Training epoch: {}'.format(epoch))
self.train()
self.io.print_log('Done.')
# save model
if ((epoch + 1) % self.arg.save_interval == 0) or (
epoch + 1 == self.arg.num_epoch):
filename = 'epoch{}_model.pt'.format(epoch + 1)
self.io.save_model(self.model, filename)
# evaluation
if ((epoch + 1) % self.arg.eval_interval == 0) or (
epoch + 1 == self.arg.num_epoch):
self.io.print_log('Eval epoch: {}'.format(epoch))
self.test()
self.io.print_log('Done.')
# test phase
elif self.arg.phase == 'test':
# the path of weights must be appointed
if self.arg.weights is None:
raise ValueError('Please appoint --weights.')
self.io.print_log('Model: {}.'.format(self.arg.model))
self.io.print_log('Weights: {}.'.format(self.arg.weights))
# evaluation
self.io.print_log('Evaluation Start:')
self.test()
self.io.print_log('Done.\n')
# save the output of model
if self.arg.save_result:
result_dict = dict(
zip(self.data_loader['test'].dataset.sample_name,
self.result))
self.io.save_pkl(result_dict, 'test_result.pkl')
@staticmethod
def get_parser(add_help=False):
#region arguments yapf: disable
# parameter priority: command line > config > default
parser = argparse.ArgumentParser( add_help=add_help, description='Base Processor')
parser.add_argument('-w', '--work_dir', default='./work_dir/tmp', help='the work folder for storing results')
parser.add_argument('-c', '--config', default=None, help='path to the configuration file')
# processor
parser.add_argument('--phase', default='train', help='must be train or test')
parser.add_argument('--save_result', type=str2bool, default=False, help='if ture, the output of the model will be stored')
parser.add_argument('--start_epoch', type=int, default=0, help='start training from which epoch')
parser.add_argument('--num_epoch', type=int, default=80, help='stop training in which epoch')
parser.add_argument('--use_gpu', type=str2bool, default=True, help='use GPUs or not')
parser.add_argument('--device', type=int, default=0, nargs='+', help='the indexes of GPUs for training or testing')
# visulize and debug
parser.add_argument('--log_interval', type=int, default=100, help='the interval for printing messages (#iteration)')
parser.add_argument('--save_interval', type=int, default=10, help='the interval for storing models (#iteration)')
parser.add_argument('--eval_interval', type=int, default=5, help='the interval for evaluating models (#iteration)')
parser.add_argument('--save_log', type=str2bool, default=True, help='save logging or not')
parser.add_argument('--print_log', type=str2bool, default=True, help='print logging or not')
parser.add_argument('--pavi_log', type=str2bool, default=False, help='logging on pavi or not')
# feeder
parser.add_argument('--feeder', default='feeder.feeder', help='data loader will be used')
parser.add_argument('--num_worker', type=int, default=4, help='the number of worker per gpu for data loader')
parser.add_argument('--train_feeder_args', action=DictAction, default=dict(), help='the arguments of data loader for training')
parser.add_argument('--train_meta_feeder_args', action=DictAction, default=dict(), help='the arguments of meta data loader for training')
parser.add_argument('--test_feeder_args', action=DictAction, default=dict(), help='the arguments of data loader for test')
parser.add_argument('--batch_size', type=int, default=256, help='training batch size')
parser.add_argument('--test_batch_size', type=int, default=256, help='test batch size')
parser.add_argument('--debug', action="store_true", help='less data, faster loading')
# model
parser.add_argument('--model', default=None, help='the model will be used')
parser.add_argument('--model_args', action=DictAction, default=dict(), help='the arguments of model')
parser.add_argument('--weights', default=None, help='the weights for network initialization')
parser.add_argument('--ignore_weights', type=str, default=[], nargs='+', help='the name of weights which will be ignored in the initialization')
parser.add_argument('--warmup_epoch', type=int, default=0, help='the name of weights which will be ignored in the initialization')
parser.add_argument('--alpha_factor', type=float, default=0.1, help='initial learning rate')
parser.add_argument('--seed', type=int, default=1, help='the model will be used')
#endregion yapf: enable
return parser
| [
"matplotlib.pyplot.ylabel",
"torchlight.import_class",
"numpy.mean",
"argparse.ArgumentParser",
"matplotlib.pyplot.xlabel",
"numpy.random.seed",
"numpy.concatenate",
"sklearn.metrics.confusion_matrix",
"subprocess.check_output",
"matplotlib.use",
"matplotlib.pyplot.title",
"matplotlib.pyplot.m... | [((365, 386), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (379, 386), False, 'import matplotlib\n'), ((1391, 1512), 'subprocess.check_output', 'subprocess.check_output', (["['nvidia-smi', '--query-gpu=memory.used', '--format=csv,nounits,noheader']"], {'encoding': '"""utf-8"""'}), "(['nvidia-smi', '--query-gpu=memory.used',\n '--format=csv,nounits,noheader'], encoding='utf-8')\n", (1414, 1512), False, 'import subprocess\n'), ((2574, 2601), 'numpy.concatenate', 'np.concatenate', (['result_frag'], {}), '(result_frag)\n', (2588, 2601), True, 'import numpy as np\n'), ((2967, 2993), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (2977, 2993), True, 'import matplotlib.pyplot as plt\n'), ((3013, 3047), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['self.label', 'rank'], {}), '(self.label, rank)\n', (3029, 3047), False, 'from sklearn.metrics import confusion_matrix\n'), ((3183, 3225), 'matplotlib.pyplot.matshow', 'plt.matshow', (['confusion'], {'cmap': 'plt.cm.Greens'}), '(confusion, cmap=plt.cm.Greens)\n', (3194, 3225), True, 'import matplotlib.pyplot as plt\n'), ((3235, 3249), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (3247, 3249), True, 'import matplotlib.pyplot as plt\n'), ((3523, 3553), 'matplotlib.pyplot.title', 'plt.title', (['"""Ours"""'], {'fontsize': '(18)'}), "('Ours', fontsize=18)\n", (3532, 3553), True, 'import matplotlib.pyplot as plt\n'), ((3562, 3599), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True label"""'], {'fontsize': '(15)'}), "('True label', fontsize=15)\n", (3572, 3599), True, 'import matplotlib.pyplot as plt\n'), ((3608, 3650), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted label"""'], {'fontsize': '(15)'}), "('Predicted label', fontsize=15)\n", (3618, 3650), True, 'import matplotlib.pyplot as plt\n'), ((6361, 6390), 'torchlight.import_class', 'import_class', (['self.arg.feeder'], {}), '(self.arg.feeder)\n', (6373, 6390), False, 'from torchlight import import_class\n'), ((8936, 8959), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (8953, 8959), False, 'import torch\n'), ((8968, 9000), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (8994, 9000), False, 'import torch\n'), ((9009, 9029), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (9023, 9029), True, 'import numpy as np\n'), ((9038, 9055), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (9049, 9055), False, 'import random\n'), ((11108, 11180), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'add_help': 'add_help', 'description': '"""Base Processor"""'}), "(add_help=add_help, description='Base Processor')\n", (11131, 11180), False, 'import argparse\n'), ((2661, 2687), 'numpy.concatenate', 'np.concatenate', (['label_frag'], {}), '(label_frag)\n', (2675, 2687), True, 'import numpy as np\n'), ((2730, 2749), 'numpy.mean', 'np.mean', (['loss_value'], {}), '(loss_value)\n', (2737, 2749), True, 'import numpy as np\n'), ((3680, 3724), 'os.path.join', 'osp.join', (['self.arg.work_dir', '"""confusion.jpg"""'], {}), "(self.arg.work_dir, 'confusion.jpg')\n", (3688, 3724), True, 'import os.path as osp\n'), ((4527, 4555), 'torch.load', 'torch.load', (['self.arg.weights'], {}), '(self.arg.weights)\n', (4537, 4555), False, 'import torch\n'), ((2221, 2236), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2234, 2236), False, 'import torch\n'), ((6841, 6873), 'torchlight.ngpu', 'torchlight.ngpu', (['self.arg.device'], {}), '(self.arg.device)\n', (6856, 6873), False, 'import torchlight\n'), ((7209, 7241), 'torchlight.ngpu', 'torchlight.ngpu', (['self.arg.device'], {}), '(self.arg.device)\n', (7224, 7241), False, 'import torchlight\n'), ((7614, 7646), 'torchlight.ngpu', 'torchlight.ngpu', (['self.arg.device'], {}), '(self.arg.device)\n', (7629, 7646), False, 'import torchlight\n')] |
from pyridge.generic.scaler import Scaler
import numpy as np
class LogScaler(Scaler):
"""
Scaler for that transform the values in a logaritmic
scaler.
"""
def __init__(self):
self.min_: np.float
def get_params(self):
return {'min_': self.min_}
def fit(self, values):
self.min_ = np.min(values, axis=0)
def transform(self, values):
return np.log(values + (1.0 - self.min_))
def fit_transform(self, values):
self.fit(values)
return self.transform(values)
def inverse_transform(self, values):
return np.exp(values) - (1.0 - self.min_)
| [
"numpy.exp",
"numpy.log",
"numpy.min"
] | [((335, 357), 'numpy.min', 'np.min', (['values'], {'axis': '(0)'}), '(values, axis=0)\n', (341, 357), True, 'import numpy as np\n'), ((407, 441), 'numpy.log', 'np.log', (['(values + (1.0 - self.min_))'], {}), '(values + (1.0 - self.min_))\n', (413, 441), True, 'import numpy as np\n'), ((600, 614), 'numpy.exp', 'np.exp', (['values'], {}), '(values)\n', (606, 614), True, 'import numpy as np\n')] |
#! /usr/bin/env python
# title : TurboTest.py
# description : This script tests the turbo decoding for parallel concatenated convolutional codes
# author : <NAME>
# python_version : 3.5.2
import numpy as np
from numpy.random import rand, randn
from scipy.stats import norm
import matplotlib.pyplot as plt
from Trellis import Trellis
from ConvTrellisDef import ConvTrellisDef
from ConvEncoder import TurboEncoder
from SisoDecoder import SisoDecoder
from Interleaver import Interleaver
from TurboDecoder import TurboDecoder
def main(n_data=512, n_blocks=10, verbose=True, do_plot=True):
# parameters
EbNodB_range = [-1, 0, 0.8, 1, 1.2, 1.3]
gp_forward = [[1, 1, 0, 1]]
gp_feedback = [0, 0, 1, 1]
# create interleaver instances
il = Interleaver()
il.gen_qpp_perm(n_data)
# create trellises, encoders and decoders instances
trellis_p = Trellis(ConvTrellisDef(gp_forward, gp_feedback))
trellis_identity = Trellis(ConvTrellisDef([[1]]))
csiso = SisoDecoder(trellis_p)
csiso.backward_init = False
trellises = [trellis_identity, trellis_p, trellis_p]
turboenc = TurboEncoder(trellises, il)
td = TurboDecoder(il, csiso, csiso)
# loop over all SNRs
error_vec = []
blocks_vec = []
for EbNodB in EbNodB_range:
if verbose:
print("----- Simulating EbN0 = " + str(EbNodB) + " -----")
# loop over several code blocks
errors_acc = [0] * td.iterations
blocks = 0
for k in range(n_blocks):
blocks += 1
# generate data
data_u = list((rand(n_data) >= 0.5).astype(int))
# turbo encoding
encoded_streams = turboenc.encode(data_u)
encoded = np.array(turboenc.flatten(encoded_streams))
# additive noise
EbNo = 10.0 ** (EbNodB / 10.0)
noise_std = np.sqrt(2 + 1) / np.sqrt(2 * EbNo)
encoded_rx = 2 * encoded - 1 + noise_std * randn(len(encoded))
# turbo decoding
[ys, yp1, yp2] = turboenc.extract(encoded_rx) # extract streams
d, errors = td.decode(ys, yp1, yp2, data_u)
if verbose:
print('block : ' + str(k))
print('errors per iteration: ' + str(errors))
errors_acc = list(np.array(errors_acc) + np.array(errors))
if errors_acc[3] > 3 * n_data: # simulation stopping criteria
break
error_vec.append(errors_acc)
blocks_vec.append(blocks)
# summary output and plot
error_vec_t = list(np.transpose(np.array(error_vec)))
if verbose:
print("Simulated errors: " + str(error_vec_t))
if do_plot:
for i in range(0, td.iterations):
ber = list(np.array(error_vec_t[i]) / (np.array(blocks_vec) * n_data))
plt.plot(EbNodB_range, ber, 'x-')
ber_uncoded = norm.sf(np.sqrt(2 * np.array(10 ** (np.array(EbNodB_range) / 10))))
plt.plot(EbNodB_range, ber_uncoded, 'k:')
plt.xscale('linear')
plt.yscale('log', nonposy='mask')
plt.xlabel('EbNo(dB)')
plt.ylabel('BER')
plt.grid(True, which="both")
plt.title('BPSK modulation, Turbo Code, max log MAP decoding')
plt.show()
if __name__ == "__main__":
main()
| [
"ConvEncoder.TurboEncoder",
"matplotlib.pyplot.grid",
"numpy.sqrt",
"numpy.random.rand",
"matplotlib.pyplot.xscale",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"ConvTrellisDef.ConvTrellisDef",
"Interleaver.Interleaver",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.yscale",
"numpy.... | [((782, 795), 'Interleaver.Interleaver', 'Interleaver', ([], {}), '()\n', (793, 795), False, 'from Interleaver import Interleaver\n'), ((1012, 1034), 'SisoDecoder.SisoDecoder', 'SisoDecoder', (['trellis_p'], {}), '(trellis_p)\n', (1023, 1034), False, 'from SisoDecoder import SisoDecoder\n'), ((1139, 1166), 'ConvEncoder.TurboEncoder', 'TurboEncoder', (['trellises', 'il'], {}), '(trellises, il)\n', (1151, 1166), False, 'from ConvEncoder import TurboEncoder\n'), ((1176, 1206), 'TurboDecoder.TurboDecoder', 'TurboDecoder', (['il', 'csiso', 'csiso'], {}), '(il, csiso, csiso)\n', (1188, 1206), False, 'from TurboDecoder import TurboDecoder\n'), ((905, 944), 'ConvTrellisDef.ConvTrellisDef', 'ConvTrellisDef', (['gp_forward', 'gp_feedback'], {}), '(gp_forward, gp_feedback)\n', (919, 944), False, 'from ConvTrellisDef import ConvTrellisDef\n'), ((977, 998), 'ConvTrellisDef.ConvTrellisDef', 'ConvTrellisDef', (['[[1]]'], {}), '([[1]])\n', (991, 998), False, 'from ConvTrellisDef import ConvTrellisDef\n'), ((2995, 3036), 'matplotlib.pyplot.plot', 'plt.plot', (['EbNodB_range', 'ber_uncoded', '"""k:"""'], {}), "(EbNodB_range, ber_uncoded, 'k:')\n", (3003, 3036), True, 'import matplotlib.pyplot as plt\n'), ((3045, 3065), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""linear"""'], {}), "('linear')\n", (3055, 3065), True, 'import matplotlib.pyplot as plt\n'), ((3074, 3107), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {'nonposy': '"""mask"""'}), "('log', nonposy='mask')\n", (3084, 3107), True, 'import matplotlib.pyplot as plt\n'), ((3116, 3138), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""EbNo(dB)"""'], {}), "('EbNo(dB)')\n", (3126, 3138), True, 'import matplotlib.pyplot as plt\n'), ((3147, 3164), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""BER"""'], {}), "('BER')\n", (3157, 3164), True, 'import matplotlib.pyplot as plt\n'), ((3173, 3201), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {'which': '"""both"""'}), "(True, which='both')\n", (3181, 3201), True, 'import matplotlib.pyplot as plt\n'), ((3210, 3272), 'matplotlib.pyplot.title', 'plt.title', (['"""BPSK modulation, Turbo Code, max log MAP decoding"""'], {}), "('BPSK modulation, Turbo Code, max log MAP decoding')\n", (3219, 3272), True, 'import matplotlib.pyplot as plt\n'), ((3281, 3291), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3289, 3291), True, 'import matplotlib.pyplot as plt\n'), ((2617, 2636), 'numpy.array', 'np.array', (['error_vec'], {}), '(error_vec)\n', (2625, 2636), True, 'import numpy as np\n'), ((2863, 2896), 'matplotlib.pyplot.plot', 'plt.plot', (['EbNodB_range', 'ber', '"""x-"""'], {}), "(EbNodB_range, ber, 'x-')\n", (2871, 2896), True, 'import matplotlib.pyplot as plt\n'), ((1893, 1907), 'numpy.sqrt', 'np.sqrt', (['(2 + 1)'], {}), '(2 + 1)\n', (1900, 1907), True, 'import numpy as np\n'), ((1910, 1927), 'numpy.sqrt', 'np.sqrt', (['(2 * EbNo)'], {}), '(2 * EbNo)\n', (1917, 1927), True, 'import numpy as np\n'), ((2339, 2359), 'numpy.array', 'np.array', (['errors_acc'], {}), '(errors_acc)\n', (2347, 2359), True, 'import numpy as np\n'), ((2362, 2378), 'numpy.array', 'np.array', (['errors'], {}), '(errors)\n', (2370, 2378), True, 'import numpy as np\n'), ((2791, 2815), 'numpy.array', 'np.array', (['error_vec_t[i]'], {}), '(error_vec_t[i])\n', (2799, 2815), True, 'import numpy as np\n'), ((2819, 2839), 'numpy.array', 'np.array', (['blocks_vec'], {}), '(blocks_vec)\n', (2827, 2839), True, 'import numpy as np\n'), ((1612, 1624), 'numpy.random.rand', 'rand', (['n_data'], {}), '(n_data)\n', (1616, 1624), False, 'from numpy.random import rand, randn\n'), ((2955, 2977), 'numpy.array', 'np.array', (['EbNodB_range'], {}), '(EbNodB_range)\n', (2963, 2977), True, 'import numpy as np\n')] |
## GROUP
import numpy as np
import cv2
from PIL import Image
import os
from options.test_options import TestOptions
from options.train_options import TrainOptions
from data import create_dataset
from models import create_model
from util.visualizer import save_images
from util import html
import torch
import torchvision
import torchvision.transforms as transforms
#### GROUP5 code ####
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_BUFFERSIZE, 1);
framesize = 600 # squared
grayscale = False
# setup cyclegan model:
opt = TrainOptions().parse() # get test options
# hard-code some parameters for test
opt.num_threads = 0 # test code only supports num_threads = 1
opt.batch_size = 1 # test code only supports batch_size = 1
opt.serial_batches = True # disable data shuffling; comment this line if results on randomly chosen images are needed.
opt.no_flip = True # no flip; comment this line if results on flipped images are needed.
opt.display_id = -1 # no visdom display; the test code saves the results to a HTML file.
opt.phase = 'test'
opt.epoch = 'latest'
#opt.name="cartoonfaces"
opt.name="cartoonfaces-ld0_0001"
#opt.name="cartoonfaces-ld0_00001"
# hack a bit to get it up and running:
opt.isTrain = False
model = create_model(opt) # create a model given opt.model and other options
model.isTrain = False # fix this
model.setup(opt) # regular setup: load and print networks; create schedulers
# setup dataset
dataset = create_dataset(opt)
brightness = 0.5
transform = transforms.Compose([torchvision.transforms.functional.hflip,
transforms.CenterCrop(256),
transforms.ToTensor(),
transforms.Normalize((brightness, brightness, brightness),
(0.5, 0.5, 0.5))])
def to_image(tensor, nrow=8, padding=2,
normalize=False, range=None, scale_each=False, pad_value=0):
"""from torchvision utils: converts a given Tensor into an image file.
Args:
tensor (Tensor or list): Image to be saved. If given a mini-batch tensor,
saves the tensor as a grid of images by calling ``make_grid``.
**kwargs: Other arguments are documented in ``make_grid``.
"""
grid = torchvision.utils.make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value,
normalize=normalize, range=range, scale_each=scale_each)
# Add 0.5 after unnormalizing to [0, 255] to round to nearest integer
ndarr = grid.mul_(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()
im = Image.fromarray(ndarr)
return im
def to_image2(tensor):
nparray = tensor2im(tensor)
im = Image.fromarray(nparray)
return im
def tensor2im(input_image, imtype=np.uint8):
""""Converts a Tensor array into a numpy image array.
Parameters:
input_image (tensor) -- the input image tensor array
imtype (type) -- the desired type of the converted numpy array
"""
if not isinstance(input_image, np.ndarray):
if isinstance(input_image, torch.Tensor): # get the data from a variable
image_tensor = input_image.data
else:
return input_image
image_numpy = image_tensor[0].cpu().float().numpy() # convert it into a numpy array
if image_numpy.shape[0] == 1: # grayscale to RGB
image_numpy = np.tile(image_numpy, (3, 1, 1))
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0 # post-processing: tranpose and scaling
else: # if it is a numpy array, do nothing
image_numpy = input_image
return image_numpy.astype(imtype)
def concatenate(images):
widths, heights = zip(*(i.size for i in images))
total_width = sum(widths)
max_height = max(heights)
result = Image.new('RGB', (total_width, max_height))
x_offset = 0
for im in images:
result.paste(im, (x_offset,0))
x_offset += im.size[0]
return result
show_cartoon_to_face = False
if (show_cartoon_to_face):
for i, data in enumerate(dataset):
theB = data['B']
real_B = theB['img']
#img_B = to_image2(real_B[0,:,:,:])
img_B = to_image2(real_B)
img_A = model.gen_A(real_B)
img_A = to_image2(img_A)
img_BA = concatenate([img_B, img_A])
img_BA.save('real.jpg')
print('real generated')
while(True):
# Capture drop buffer
ret, frame = cap.read()
ret, frame = cap.read()
ret, frame = cap.read()
# Our operations on the frame come here
if grayscale:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
hight, width, depth = frame.shape
crop_pixel = int((width - hight)/2) # crop square
cropped_frame = frame[:, crop_pixel:width-crop_pixel]
resized_frame = cv2.resize(cropped_frame, (framesize, framesize))
cvframe = cv2.cvtColor(resized_frame, cv2.COLOR_BGR2RGB)
pil_img = Image.fromarray(cvframe)
img = transform(pil_img)
img = img.view(1, 3, 256, 256)
img_A = to_image2(img)
img_B = model.gen_B(img)
#torchvision.utils.save_image(img_B[0, :, :, :], 'comic.png')
img_B = to_image2(img_B)
img_AB = concatenate([img_A, img_B])
img_AB.save('comic.jpg')
print('real generated')
#print(frame.shape, " ", cropped_frame.shape, " ", resized_frame.shape)
# Display the resulting frame
#cv2.imshow('frame',resized_frame)
#if cv2.waitKey(10) == 27:
#break # esc to quit
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
#### END of code ####
| [
"torchvision.transforms.CenterCrop",
"numpy.tile",
"PIL.Image.fromarray",
"data.create_dataset",
"PIL.Image.new",
"options.train_options.TrainOptions",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.cvtColor",
"torchvision.transforms.Normalize",
"torchvision.utils.make_grid",
"cv2.resize",
... | [((419, 438), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (435, 438), False, 'import cv2\n'), ((1283, 1300), 'models.create_model', 'create_model', (['opt'], {}), '(opt)\n', (1295, 1300), False, 'from models import create_model\n'), ((1513, 1532), 'data.create_dataset', 'create_dataset', (['opt'], {}), '(opt)\n', (1527, 1532), False, 'from data import create_dataset\n'), ((5784, 5807), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5805, 5807), False, 'import cv2\n'), ((2363, 2509), 'torchvision.utils.make_grid', 'torchvision.utils.make_grid', (['tensor'], {'nrow': 'nrow', 'padding': 'padding', 'pad_value': 'pad_value', 'normalize': 'normalize', 'range': 'range', 'scale_each': 'scale_each'}), '(tensor, nrow=nrow, padding=padding, pad_value=\n pad_value, normalize=normalize, range=range, scale_each=scale_each)\n', (2390, 2509), False, 'import torchvision\n'), ((2713, 2735), 'PIL.Image.fromarray', 'Image.fromarray', (['ndarr'], {}), '(ndarr)\n', (2728, 2735), False, 'from PIL import Image\n'), ((2820, 2844), 'PIL.Image.fromarray', 'Image.fromarray', (['nparray'], {}), '(nparray)\n', (2835, 2844), False, 'from PIL import Image\n'), ((3976, 4019), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(total_width, max_height)'], {}), "('RGB', (total_width, max_height))\n", (3985, 4019), False, 'from PIL import Image\n'), ((5016, 5065), 'cv2.resize', 'cv2.resize', (['cropped_frame', '(framesize, framesize)'], {}), '(cropped_frame, (framesize, framesize))\n', (5026, 5065), False, 'import cv2\n'), ((5081, 5127), 'cv2.cvtColor', 'cv2.cvtColor', (['resized_frame', 'cv2.COLOR_BGR2RGB'], {}), '(resized_frame, cv2.COLOR_BGR2RGB)\n', (5093, 5127), False, 'import cv2\n'), ((5150, 5174), 'PIL.Image.fromarray', 'Image.fromarray', (['cvframe'], {}), '(cvframe)\n', (5165, 5174), False, 'from PIL import Image\n'), ((557, 571), 'options.train_options.TrainOptions', 'TrainOptions', ([], {}), '()\n', (569, 571), False, 'from options.train_options import TrainOptions\n'), ((1662, 1688), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(256)'], {}), '(256)\n', (1683, 1688), True, 'import torchvision.transforms as transforms\n'), ((1723, 1744), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1742, 1744), True, 'import torchvision.transforms as transforms\n'), ((1779, 1854), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(brightness, brightness, brightness)', '(0.5, 0.5, 0.5)'], {}), '((brightness, brightness, brightness), (0.5, 0.5, 0.5))\n', (1799, 1854), True, 'import torchvision.transforms as transforms\n'), ((4800, 4839), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (4812, 4839), False, 'import cv2\n'), ((3542, 3573), 'numpy.tile', 'np.tile', (['image_numpy', '(3, 1, 1)'], {}), '(image_numpy, (3, 1, 1))\n', (3549, 3573), True, 'import numpy as np\n'), ((3598, 3634), 'numpy.transpose', 'np.transpose', (['image_numpy', '(1, 2, 0)'], {}), '(image_numpy, (1, 2, 0))\n', (3610, 3634), True, 'import numpy as np\n')] |
# --------------
import pandas as pd
import scipy.stats as stats
import math
import numpy as np
import warnings
warnings.filterwarnings('ignore')
#Sample_Size
sample_size=2000
#Z_Critical Score
z_critical = stats.norm.ppf(q = 0.95)
# path [File location variable]
data=pd.read_csv(path)
#Code starts here
data_sample=data.sample(n=sample_size,random_state=0)
sample_mean=data_sample['installment'].mean()
sample_std=data_sample['installment'].std()
margin_of_error=z_critical*(sample_std/(sample_size**(1/2)))
confidence_interval=(sample_mean-margin_of_error,sample_mean+margin_of_error)
true_mean=data['installment'].mean()
if (true_mean in confidence_interval ):
print('Yes')
else:
print('No')
# --------------
import matplotlib.pyplot as plt
import numpy as np
#Different sample sizes to take
sample_size=np.array([20,50,100])
#Code starts here
#Creating different subplots
fig,axes=plt.subplots(3,1, figsize=(10,20))
#Running loop to iterate through rows
for i in range(len(sample_size)):
#Initialising a list
m=[]
#Loop to implement the no. of samples
for j in range(1000):
#Finding mean of a random sample
mean=data['installment'].sample(sample_size[i]).mean()
#Appending the mean to the list
m.append(mean)
#Converting the list to series
mean_series=pd.Series(m)
#Plotting the histogram for the series
axes[i].hist(mean_series, normed=True)
#Displaying the plot
plt.show()
#Code ends here
# --------------
#Importing header files
from statsmodels.stats.weightstats import ztest
#Code starts here
data['int.rate']= data['int.rate'].map(lambda x: x.strip('%')).astype('float64')
data['int.rate'] = data['int.rate'] / 100
z_statistic, p_value = ztest(x1=data[data['purpose']=='small_business']['int.rate'], value=data['int.rate'].mean(), alternative='larger')
print("z_statistic", z_statistic)
print("p_value", p_value)
# --------------
#Importing header files
from statsmodels.stats.weightstats import ztest
#Code starts here
z_statistic, p_value = ztest(x1=data[data['paid.back.loan']=='No']['installment'], x2=data[data['paid.back.loan']=='Yes']['installment'])
print(p_value)
# --------------
#Importing header files
from scipy.stats import chi2_contingency
#Critical value
critical_value = stats.chi2.ppf(q = 0.95, # Find the critical value for 95% confidence*
df = 6) # Df = number of variable categories(in purpose) - 1
#Code starts here
yes = data[data['paid.back.loan'] == 'Yes']['purpose'].value_counts()
no = data[data['paid.back.loan'] == 'No']['purpose'].value_counts()
observed = pd.concat([yes.T, no.T], axis=1, keys=['Yes', 'No'])
chi2, p, dof, ex = chi2_contingency(observed)
print('Chi-2 Statistic: ',chi2)
print('Critical Value: ',critical_value)
print('p-value: ',p)
if(chi2>critical_value):
print('Null Hypothesis Rejected!')
else:
print('Null Hypothesis Accepted!')
| [
"warnings.filterwarnings",
"pandas.Series",
"pandas.read_csv",
"scipy.stats.chi2_contingency",
"scipy.stats.norm.ppf",
"numpy.array",
"scipy.stats.chi2.ppf",
"statsmodels.stats.weightstats.ztest",
"pandas.concat",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((119, 152), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (142, 152), False, 'import warnings\n'), ((220, 242), 'scipy.stats.norm.ppf', 'stats.norm.ppf', ([], {'q': '(0.95)'}), '(q=0.95)\n', (234, 242), True, 'import scipy.stats as stats\n'), ((297, 314), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (308, 314), True, 'import pandas as pd\n'), ((868, 891), 'numpy.array', 'np.array', (['[20, 50, 100]'], {}), '([20, 50, 100])\n', (876, 891), True, 'import numpy as np\n'), ((953, 989), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'figsize': '(10, 20)'}), '(3, 1, figsize=(10, 20))\n', (965, 989), True, 'import matplotlib.pyplot as plt\n'), ((1585, 1595), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1593, 1595), True, 'import matplotlib.pyplot as plt\n'), ((2207, 2330), 'statsmodels.stats.weightstats.ztest', 'ztest', ([], {'x1': "data[data['paid.back.loan'] == 'No']['installment']", 'x2': "data[data['paid.back.loan'] == 'Yes']['installment']"}), "(x1=data[data['paid.back.loan'] == 'No']['installment'], x2=data[data[\n 'paid.back.loan'] == 'Yes']['installment'])\n", (2212, 2330), False, 'from statsmodels.stats.weightstats import ztest\n'), ((2465, 2493), 'scipy.stats.chi2.ppf', 'stats.chi2.ppf', ([], {'q': '(0.95)', 'df': '(6)'}), '(q=0.95, df=6)\n', (2479, 2493), True, 'import scipy.stats as stats\n'), ((2795, 2847), 'pandas.concat', 'pd.concat', (['[yes.T, no.T]'], {'axis': '(1)', 'keys': "['Yes', 'No']"}), "([yes.T, no.T], axis=1, keys=['Yes', 'No'])\n", (2804, 2847), True, 'import pandas as pd\n'), ((2868, 2894), 'scipy.stats.chi2_contingency', 'chi2_contingency', (['observed'], {}), '(observed)\n', (2884, 2894), False, 'from scipy.stats import chi2_contingency\n'), ((1446, 1458), 'pandas.Series', 'pd.Series', (['m'], {}), '(m)\n', (1455, 1458), True, 'import pandas as pd\n')] |
import numpy as np
from pysal.lib.common import requires
@requires('matplotlib')
def shift_colormap(cmap, start=0, midpoint=0.5, stop=1.0, name='shiftedcmap'):
'''
Function to offset the "center" of a colormap. Useful for
data with a negative min and positive max and you want the
middle of the colormap's dynamic range to be at zero
Parameters
----------
cmap : The matplotlib colormap to be altered
start : Offset from lowest point in the colormap's range.
Defaults to 0.0 (no lower ofset). Should be between
0.0 and `midpoint`.
midpoint : The new center of the colormap. Defaults to
0.5 (no shift). Should be between 0.0 and 1.0. In
general, this should be 1 - vmax/(vmax + abs(vmin))
For example if your data range from -15.0 to +5.0 and
you want the center of the colormap at 0.0, `midpoint`
should be set to 1 - 5/(5 + 15)) or 0.75
stop : Offset from highets point in the colormap's range.
Defaults to 1.0 (no upper ofset). Should be between
`midpoint` and 1.0.
Returns
-------
new_cmap : A new colormap that has been shifted.
'''
import matplotlib as mpl
import matplotlib.pyplot as plt
cdict = {
'red': [],
'green': [],
'blue': [],
'alpha': []
}
# regular index to compute the colors
reg_index = np.linspace(start, stop, 257)
# shifted index to match the data
shift_index = np.hstack([
np.linspace(0.0, midpoint, 128, endpoint=False),
np.linspace(midpoint, 1.0, 129, endpoint=True)
])
for ri, si in zip(reg_index, shift_index):
r, g, b, a = cmap(ri)
cdict['red'].append((si, r, r))
cdict['green'].append((si, g, g))
cdict['blue'].append((si, b, b))
cdict['alpha'].append((si, a, a))
new_cmap = mpl.colors.LinearSegmentedColormap(name, cdict)
plt.register_cmap(cmap=new_cmap)
return new_cmap
@requires('matplotlib')
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):
'''
Function to truncate a colormap by selecting a subset of the original colormap's values
Parameters
----------
cmap : Mmatplotlib colormap to be altered
minval : Minimum value of the original colormap to include in the truncated colormap
maxval : Maximum value of the original colormap to include in the truncated colormap
n : Number of intervals between the min and max values for the gradient of the truncated colormap
Returns
-------
new_cmap : A new colormap that has been shifted.
'''
import matplotlib as mpl
new_cmap = mpl.colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
@requires('matplotlib')
@requires('geopandas')
def compare_surfaces(data, var1, var2, gwr_t, gwr_bw, mgwr_t, mgwr_bw, name,
kwargs1, kwargs2, savefig=None):
'''
Function that creates comparative visualization of GWR and MGWR surfaces.
Parameters
----------
data : pandas or geopandas Dataframe
gwr/mgwr results
var1 : string
name of gwr parameter estimate column in frame
var2 : string
name of mgwr parameter estimate column in frame
gwr_t : string
name of gwr t-values column in frame associated with var1
gwr_bw : float
bandwidth for gwr model for var1
mgwr_t : string
name of mgwr t-values column in frame associated with var2
mgwr_bw: float
bandwidth for mgwr model for var2
name : string
common variable name to use for title
kwargs1:
additional plotting arguments for gwr surface
kwargs2:
additional plotting arguments for mgwr surface
savefig: string, optional
path to save the figure. Default is None. Not to save figure.
'''
import matplotlib.pyplot as plt
import geopandas as gp
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(45,20))
ax0 = axes[0]
ax0.set_title('GWR ' + name + ' Surface (BW: ' + str(gwr_bw) +')', fontsize=40)
ax1 = axes[1]
ax1.set_title('MGWR ' + name + ' Surface (BW: ' + str(mgwr_bw) +')', fontsize=40)
#Set color map
cmap = plt.cm.seismic
#Find min and max values of the two combined datasets
gwr_min = data[var1].min()
gwr_max = data[var1].max()
mgwr_min = data[var2].min()
mgwr_max = data[var2].max()
vmin = np.min([gwr_min, mgwr_min])
vmax = np.max([gwr_max, mgwr_max])
#If all values are negative use the negative half of the colormap
if (vmin < 0) & (vmax < 0):
cmap = truncate_colormap(cmap, 0.0, 0.5)
#If all values are positive use the positive half of the colormap
elif (vmin > 0) & (vmax > 0):
cmap = truncate_colormap(cmap, 0.5, 1.0)
#Otherwise, there are positive and negative values so the colormap so zero is the midpoint
else:
cmap = shift_colormap(cmap, start=0.0, midpoint=1 - vmax/(vmax + abs(vmin)), stop=1.)
#Create scalar mappable for colorbar and stretch colormap across range of data values
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=vmin, vmax=vmax))
#Plot GWR parameters
data.plot(var1, cmap=sm.cmap, ax=ax0, vmin=vmin, vmax=vmax, **kwargs1)
if (gwr_t == 0).any():
data[gwr_t == 0].plot(color='lightgrey', ax=ax0, **kwargs2)
#Plot MGWR parameters
data.plot(var2, cmap=sm.cmap, ax=ax1, vmin=vmin, vmax=vmax, **kwargs1)
if (mgwr_t == 0).any():
data[mgwr_t == 0].plot(color='lightgrey', ax=ax1, **kwargs2)
#Set figure options and plot
fig.tight_layout()
fig.subplots_adjust(right=0.9)
cax = fig.add_axes([0.92, 0.14, 0.03, 0.75])
sm._A = []
cbar = fig.colorbar(sm, cax=cax)
cbar.ax.tick_params(labelsize=50)
ax0.get_xaxis().set_visible(False)
ax0.get_yaxis().set_visible(False)
ax1.get_xaxis().set_visible(False)
ax1.get_yaxis().set_visible(False)
if savefig is not None:
plt.savefig(savefig)
plt.show()
| [
"matplotlib.pyplot.savefig",
"matplotlib.colors.LinearSegmentedColormap",
"matplotlib.pyplot.Normalize",
"numpy.max",
"numpy.linspace",
"pysal.lib.common.requires",
"numpy.min",
"matplotlib.pyplot.register_cmap",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((59, 81), 'pysal.lib.common.requires', 'requires', (['"""matplotlib"""'], {}), "('matplotlib')\n", (67, 81), False, 'from pysal.lib.common import requires\n'), ((1974, 1996), 'pysal.lib.common.requires', 'requires', (['"""matplotlib"""'], {}), "('matplotlib')\n", (1982, 1996), False, 'from pysal.lib.common import requires\n'), ((2853, 2875), 'pysal.lib.common.requires', 'requires', (['"""matplotlib"""'], {}), "('matplotlib')\n", (2861, 2875), False, 'from pysal.lib.common import requires\n'), ((2877, 2898), 'pysal.lib.common.requires', 'requires', (['"""geopandas"""'], {}), "('geopandas')\n", (2885, 2898), False, 'from pysal.lib.common import requires\n'), ((1387, 1416), 'numpy.linspace', 'np.linspace', (['start', 'stop', '(257)'], {}), '(start, stop, 257)\n', (1398, 1416), True, 'import numpy as np\n'), ((1866, 1913), 'matplotlib.colors.LinearSegmentedColormap', 'mpl.colors.LinearSegmentedColormap', (['name', 'cdict'], {}), '(name, cdict)\n', (1900, 1913), True, 'import matplotlib as mpl\n'), ((1918, 1950), 'matplotlib.pyplot.register_cmap', 'plt.register_cmap', ([], {'cmap': 'new_cmap'}), '(cmap=new_cmap)\n', (1935, 1950), True, 'import matplotlib.pyplot as plt\n'), ((4091, 4139), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(2)', 'figsize': '(45, 20)'}), '(nrows=1, ncols=2, figsize=(45, 20))\n', (4103, 4139), True, 'import matplotlib.pyplot as plt\n'), ((4587, 4614), 'numpy.min', 'np.min', (['[gwr_min, mgwr_min]'], {}), '([gwr_min, mgwr_min])\n', (4593, 4614), True, 'import numpy as np\n'), ((4626, 4653), 'numpy.max', 'np.max', (['[gwr_max, mgwr_max]'], {}), '([gwr_max, mgwr_max])\n', (4632, 4653), True, 'import numpy as np\n'), ((6179, 6189), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6187, 6189), True, 'import matplotlib.pyplot as plt\n'), ((6154, 6174), 'matplotlib.pyplot.savefig', 'plt.savefig', (['savefig'], {}), '(savefig)\n', (6165, 6174), True, 'import matplotlib.pyplot as plt\n'), ((1494, 1541), 'numpy.linspace', 'np.linspace', (['(0.0)', 'midpoint', '(128)'], {'endpoint': '(False)'}), '(0.0, midpoint, 128, endpoint=False)\n', (1505, 1541), True, 'import numpy as np\n'), ((1552, 1598), 'numpy.linspace', 'np.linspace', (['midpoint', '(1.0)', '(129)'], {'endpoint': '(True)'}), '(midpoint, 1.0, 129, endpoint=True)\n', (1563, 1598), True, 'import numpy as np\n'), ((2798, 2828), 'numpy.linspace', 'np.linspace', (['minval', 'maxval', 'n'], {}), '(minval, maxval, n)\n', (2809, 2828), True, 'import numpy as np\n'), ((5296, 5331), 'matplotlib.pyplot.Normalize', 'plt.Normalize', ([], {'vmin': 'vmin', 'vmax': 'vmax'}), '(vmin=vmin, vmax=vmax)\n', (5309, 5331), True, 'import matplotlib.pyplot as plt\n')] |
import scipy
import numpy as np
import unittest as ut
from qfactor import get_distance
from qfactor.gates import RxGate
class TestRxGateConstructor ( ut.TestCase ):
def test_rxgate_constructor_invalid ( self ):
self.assertRaises( TypeError, RxGate, 1, 0 )
self.assertRaises( TypeError, RxGate, "a", 0 )
self.assertRaises( TypeError, RxGate, [0, 1], 0 )
self.assertRaises( ValueError, RxGate, np.pi/2, -1 )
self.assertRaises( TypeError, RxGate, np.pi/2, [0, 1] )
self.assertRaises( TypeError, RxGate, np.pi/2, (0, 1) )
self.assertRaises( TypeError, RxGate, np.pi/2, ("a") )
self.assertRaises( TypeError, RxGate, np.pi/2, 0, 0 )
def test_rxgate_constructor_valid ( self ):
gate = RxGate( np.pi, 0, True )
X = np.array( [ [ 0, 1 ], [ 1, 0 ] ] )
Rx = scipy.linalg.expm( -1j * np.pi/2 * X )
self.assertTrue( get_distance( [ gate ], Rx ) < 1e-15 )
self.assertTrue( np.array_equal( gate.location, (0,) ) )
self.assertEqual( gate.gate_size, 1 )
self.assertTrue( gate.fixed )
if __name__ == '__main__':
ut.main()
| [
"qfactor.get_distance",
"numpy.array",
"qfactor.gates.RxGate",
"scipy.linalg.expm",
"numpy.array_equal",
"unittest.main"
] | [((1137, 1146), 'unittest.main', 'ut.main', ([], {}), '()\n', (1144, 1146), True, 'import unittest as ut\n'), ((767, 789), 'qfactor.gates.RxGate', 'RxGate', (['np.pi', '(0)', '(True)'], {}), '(np.pi, 0, True)\n', (773, 789), False, 'from qfactor.gates import RxGate\n'), ((804, 830), 'numpy.array', 'np.array', (['[[0, 1], [1, 0]]'], {}), '([[0, 1], [1, 0]])\n', (812, 830), True, 'import numpy as np\n'), ((852, 892), 'scipy.linalg.expm', 'scipy.linalg.expm', (['(-1.0j * np.pi / 2 * X)'], {}), '(-1.0j * np.pi / 2 * X)\n', (869, 892), False, 'import scipy\n'), ((980, 1015), 'numpy.array_equal', 'np.array_equal', (['gate.location', '(0,)'], {}), '(gate.location, (0,))\n', (994, 1015), True, 'import numpy as np\n'), ((916, 940), 'qfactor.get_distance', 'get_distance', (['[gate]', 'Rx'], {}), '([gate], Rx)\n', (928, 940), False, 'from qfactor import get_distance\n')] |
from __future__ import absolute_import
from __future__ import print_function
import pylab as plt
import sys
sys.path.insert(0, r'c:\work\dist\git\camb')
import camb
from cosmomc_to_camb import get_camb_params
import planckStyle as s
import numpy as np
from planck import SN
import os
g = s.getSinglePlotter()
like = SN.SN_likelihood(os.path.join(os.path.dirname(__file__), r'../../data/Pantheon/full_long18.dataset'))
JLA = SN.SN_likelihood(os.path.join(os.path.dirname(__file__), r'../../data/jla.dataset'), marginalize=False)
common = []
for name in like.names:
common.append(name in JLA.names or 'SDSS' + name in JLA.names or 'sn' + name in JLA.names)
common = np.array(common, dtype=np.bool)
print(like.nsn, np.sum(common), like.nsn - np.sum(common))
redshifts = np.logspace(-2, 1, 1000)
samples = g.sampleAnalyser.samplesForRoot('base_plikHM_TTTEEE_lowl_lowE_lensing')
ixs = samples.randomSingleSamples_indices()
dists = np.zeros((len(ixs), len(redshifts)))
sndists = np.zeros((len(ixs), like.nsn))
for i, ix in enumerate(ixs):
dic = samples.getParamSampleDict(ix)
camb_pars = get_camb_params(dic)
results = camb.get_background(camb_pars)
dists[i, :] = 5 * np.log10((1 + redshifts) ** 2 * results.angular_diameter_distance(redshifts))
sndists[i, :] = 5 * np.log10((1 + like.zcmb) ** 2 * results.angular_diameter_distance(like.zcmb))
paramdic = g.bestfit('base_plikHM_TTTEEE_lowl_lowE_lensing').getParamDict()
camb_pars = get_camb_params(paramdic)
results = camb.get_background(camb_pars)
invvars = 1.0 / like.pre_vars
wtval = np.sum(invvars)
offset = 5 * np.log10(1e-5)
lumdists = 5 * np.log10((1 + like.zcmb) * (1+like.zhel) * results.angular_diameter_distance(like.zcmb))
redshifts = np.logspace(-2, 1, 1000)
d = results.angular_diameter_distance(redshifts)
theory = 5 * np.log10((1 + redshifts) ** 2 * d)
planck_means = np.zeros(redshifts.shape)
planck_err = planck_means.copy()
plotdists = dists.copy()
for i in range(dists.shape[0]):
# normalize optimally as though SN points
estimated_scriptm = np.sum((sndists[i, :] - lumdists) * invvars) / wtval
plotdists[i, :] -= estimated_scriptm
for i in range(len(planck_means)):
planck_means[i] = np.mean(plotdists[:, i]) - theory[i]
planck_err[i] = np.std(plotdists[:, i])
estimated_scriptm = np.sum((like.mag - lumdists) * invvars) / wtval
m = like.mag - estimated_scriptm - offset
pred = lumdists - offset
cov = np.linalg.inv(like.invcov)
diagerrs = np.sqrt(np.diag(cov))
##Binned
ix = np.argsort(like.zcmb)
mins = []
maxs = []
Cinvd = like.invcov.dot(m - pred)
zinv = like.invcov.dot(np.log(like.zcmb))
nbin = 20
di = like.nsn // nbin + 1
bins = [ix[i:i + di] for i in range(0, like.nsn, di)]
bins = bins[:-1] + [bins[-1][:-10], bins[-1][-10:]]
nbin = len(bins)
Csmall = np.zeros((nbin, nbin))
x = np.zeros(nbin)
zx = np.zeros(nbin)
for i, ixs in enumerate(bins):
for j, ixs2 in enumerate(bins):
Csmall[i, j] = np.sum(like.invcov[np.ix_(ixs, ixs2)])
x[i] = np.sum(Cinvd[ixs])
zx[i] = np.sum(zinv[ixs])
smallcov = np.linalg.inv(Csmall)
bandpowers = smallcov.dot(x)
zbands = np.exp(smallcov.dot(zx))
errs = np.sqrt(np.diag(smallcov))
for ixs in bins:
mins += [np.min(like.zcmb[ixs])]
maxs += [np.max(like.zcmb[ixs])]
mins = np.array(mins)
maxs = np.array(maxs)
fig, axs = plt.subplots(2, 1, figsize=(5, 4), sharex='col', gridspec_kw={'height_ratios': [3, 2]})
ax = axs[0]
s.plotBands(redshifts, planck_means, planck_err)
ax.errorbar(like.zcmb[common], m[common] - pred[common], diagerrs[common], fmt='.', lw=0.3, markersize=2,
label='JLA and Pantheon')
ax.set_xscale('log')
common = np.array([not x for x in common])
ax.errorbar(like.zcmb[common], m[common] - pred[common], diagerrs[common], fmt='.', lw=0.3, markersize=2,
label='Pantheon only')
ax.axhline(0, color='k', lw=1)
ax.set_xlim(0.01, 3)
ax.legend(loc='lower right');
ax.set_yticks([-0.8, -0.4, 0, 0.4, 0.8])
ax.tick_params(axis='both', which='major', labelsize=10)
ax = axs[1]
ax.errorbar(zbands, bandpowers, errs, [zbands - mins, maxs - zbands], fmt='.', marker='o', color='C2', markersize=4)
ax.set_xscale('log')
ax.axhline(0, color='k', lw=1)
ax.set_xlabel('$z$', fontsize=13)
#fig.text(0.01, 0.5, r'$\mu -\mathcal{M} - \mu_{\rm{Planck}}$', va='center', rotation='vertical', fontsize=12)
fig.text(0.01, 0.5, r'$\mu - \mu_{\rm{Planck}}$', va='center', rotation='vertical', fontsize=13)
ax.set_xticks([0.01, 0.1, 1])
ax.set_xticklabels(['0.01', '0.1', '1'])
ax.set_yticks([-0.1, -0.05, 0, 0.05])
ax.tick_params(axis='both', which='major', labelsize=10)
plt.subplots_adjust(hspace=0)
plt.savefig('../../outputs/Pantheon.pdf', bbox_inches='tight')
| [
"sys.path.insert",
"pylab.subplots_adjust",
"numpy.log10",
"pylab.savefig",
"planckStyle.plotBands",
"numpy.log",
"numpy.argsort",
"numpy.array",
"camb.get_background",
"numpy.mean",
"numpy.ix_",
"numpy.max",
"cosmomc_to_camb.get_camb_params",
"numpy.min",
"pylab.subplots",
"numpy.logs... | [((109, 156), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""c:\\\\work\\\\dist\\\\git\\\\camb"""'], {}), "(0, 'c:\\\\work\\\\dist\\\\git\\\\camb')\n", (124, 156), False, 'import sys\n'), ((290, 310), 'planckStyle.getSinglePlotter', 's.getSinglePlotter', ([], {}), '()\n', (308, 310), True, 'import planckStyle as s\n'), ((672, 703), 'numpy.array', 'np.array', (['common'], {'dtype': 'np.bool'}), '(common, dtype=np.bool)\n', (680, 703), True, 'import numpy as np\n'), ((776, 800), 'numpy.logspace', 'np.logspace', (['(-2)', '(1)', '(1000)'], {}), '(-2, 1, 1000)\n', (787, 800), True, 'import numpy as np\n'), ((1456, 1481), 'cosmomc_to_camb.get_camb_params', 'get_camb_params', (['paramdic'], {}), '(paramdic)\n', (1471, 1481), False, 'from cosmomc_to_camb import get_camb_params\n'), ((1492, 1522), 'camb.get_background', 'camb.get_background', (['camb_pars'], {}), '(camb_pars)\n', (1511, 1522), False, 'import camb\n'), ((1562, 1577), 'numpy.sum', 'np.sum', (['invvars'], {}), '(invvars)\n', (1568, 1577), True, 'import numpy as np\n'), ((1724, 1748), 'numpy.logspace', 'np.logspace', (['(-2)', '(1)', '(1000)'], {}), '(-2, 1, 1000)\n', (1735, 1748), True, 'import numpy as np\n'), ((1861, 1886), 'numpy.zeros', 'np.zeros', (['redshifts.shape'], {}), '(redshifts.shape)\n', (1869, 1886), True, 'import numpy as np\n'), ((2423, 2449), 'numpy.linalg.inv', 'np.linalg.inv', (['like.invcov'], {}), '(like.invcov)\n', (2436, 2449), True, 'import numpy as np\n'), ((2498, 2519), 'numpy.argsort', 'np.argsort', (['like.zcmb'], {}), '(like.zcmb)\n', (2508, 2519), True, 'import numpy as np\n'), ((2786, 2808), 'numpy.zeros', 'np.zeros', (['(nbin, nbin)'], {}), '((nbin, nbin))\n', (2794, 2808), True, 'import numpy as np\n'), ((2813, 2827), 'numpy.zeros', 'np.zeros', (['nbin'], {}), '(nbin)\n', (2821, 2827), True, 'import numpy as np\n'), ((2833, 2847), 'numpy.zeros', 'np.zeros', (['nbin'], {}), '(nbin)\n', (2841, 2847), True, 'import numpy as np\n'), ((3050, 3071), 'numpy.linalg.inv', 'np.linalg.inv', (['Csmall'], {}), '(Csmall)\n', (3063, 3071), True, 'import numpy as np\n'), ((3268, 3282), 'numpy.array', 'np.array', (['mins'], {}), '(mins)\n', (3276, 3282), True, 'import numpy as np\n'), ((3290, 3304), 'numpy.array', 'np.array', (['maxs'], {}), '(maxs)\n', (3298, 3304), True, 'import numpy as np\n'), ((3317, 3409), 'pylab.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(5, 4)', 'sharex': '"""col"""', 'gridspec_kw': "{'height_ratios': [3, 2]}"}), "(2, 1, figsize=(5, 4), sharex='col', gridspec_kw={\n 'height_ratios': [3, 2]})\n", (3329, 3409), True, 'import pylab as plt\n'), ((3417, 3465), 'planckStyle.plotBands', 's.plotBands', (['redshifts', 'planck_means', 'planck_err'], {}), '(redshifts, planck_means, planck_err)\n', (3428, 3465), True, 'import planckStyle as s\n'), ((3640, 3675), 'numpy.array', 'np.array', (['[(not x) for x in common]'], {}), '([(not x) for x in common])\n', (3648, 3675), True, 'import numpy as np\n'), ((4585, 4614), 'pylab.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0)'}), '(hspace=0)\n', (4604, 4614), True, 'import pylab as plt\n'), ((4616, 4678), 'pylab.savefig', 'plt.savefig', (['"""../../outputs/Pantheon.pdf"""'], {'bbox_inches': '"""tight"""'}), "('../../outputs/Pantheon.pdf', bbox_inches='tight')\n", (4627, 4678), True, 'import pylab as plt\n'), ((720, 734), 'numpy.sum', 'np.sum', (['common'], {}), '(common)\n', (726, 734), True, 'import numpy as np\n'), ((1099, 1119), 'cosmomc_to_camb.get_camb_params', 'get_camb_params', (['dic'], {}), '(dic)\n', (1114, 1119), False, 'from cosmomc_to_camb import get_camb_params\n'), ((1134, 1164), 'camb.get_background', 'camb.get_background', (['camb_pars'], {}), '(camb_pars)\n', (1153, 1164), False, 'import camb\n'), ((1592, 1607), 'numpy.log10', 'np.log10', (['(1e-05)'], {}), '(1e-05)\n', (1600, 1607), True, 'import numpy as np\n'), ((1811, 1845), 'numpy.log10', 'np.log10', (['((1 + redshifts) ** 2 * d)'], {}), '((1 + redshifts) ** 2 * d)\n', (1819, 1845), True, 'import numpy as np\n'), ((2256, 2279), 'numpy.std', 'np.std', (['plotdists[:, i]'], {}), '(plotdists[:, i])\n', (2262, 2279), True, 'import numpy as np\n'), ((2301, 2340), 'numpy.sum', 'np.sum', (['((like.mag - lumdists) * invvars)'], {}), '((like.mag - lumdists) * invvars)\n', (2307, 2340), True, 'import numpy as np\n'), ((2469, 2481), 'numpy.diag', 'np.diag', (['cov'], {}), '(cov)\n', (2476, 2481), True, 'import numpy as np\n'), ((2598, 2615), 'numpy.log', 'np.log', (['like.zcmb'], {}), '(like.zcmb)\n', (2604, 2615), True, 'import numpy as np\n'), ((2989, 3007), 'numpy.sum', 'np.sum', (['Cinvd[ixs]'], {}), '(Cinvd[ixs])\n', (2995, 3007), True, 'import numpy as np\n'), ((3020, 3037), 'numpy.sum', 'np.sum', (['zinv[ixs]'], {}), '(zinv[ixs])\n', (3026, 3037), True, 'import numpy as np\n'), ((3150, 3167), 'numpy.diag', 'np.diag', (['smallcov'], {}), '(smallcov)\n', (3157, 3167), True, 'import numpy as np\n'), ((349, 374), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (364, 374), False, 'import os\n'), ((457, 482), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (472, 482), False, 'import os\n'), ((747, 761), 'numpy.sum', 'np.sum', (['common'], {}), '(common)\n', (753, 761), True, 'import numpy as np\n'), ((2047, 2091), 'numpy.sum', 'np.sum', (['((sndists[i, :] - lumdists) * invvars)'], {}), '((sndists[i, :] - lumdists) * invvars)\n', (2053, 2091), True, 'import numpy as np\n'), ((2199, 2223), 'numpy.mean', 'np.mean', (['plotdists[:, i]'], {}), '(plotdists[:, i])\n', (2206, 2223), True, 'import numpy as np\n'), ((3200, 3222), 'numpy.min', 'np.min', (['like.zcmb[ixs]'], {}), '(like.zcmb[ixs])\n', (3206, 3222), True, 'import numpy as np\n'), ((3237, 3259), 'numpy.max', 'np.max', (['like.zcmb[ixs]'], {}), '(like.zcmb[ixs])\n', (3243, 3259), True, 'import numpy as np\n'), ((2958, 2975), 'numpy.ix_', 'np.ix_', (['ixs', 'ixs2'], {}), '(ixs, ixs2)\n', (2964, 2975), True, 'import numpy as np\n')] |
import math
import numpy as np
from scipy.signal import convolve2d
from skimage.morphology import dilation, disk
def generate_cross(size, lw):
assert size % 2 == 1
center = math.floor(size/2.)
cross = np.zeros([size, size])
cross[center, :] = np.ones(size)
cross[:, center] = np.ones(size)
if lw != 0:
cross = dilation(cross, disk(lw))
return cross
def kernel_gaussian(d, sigma):
return math.exp(-d/sigma**2)
def generate_cross_kernel(kernel_size, f_kernel, lw, angle, **kwargs):
kernel = generate_cross(kernel_size, lw)
dr = np.array([0.5, 0.5])
center = np.array([math.floor(kernel_size/2.)]*2) + dr
for x in range(kernel_size):
for y in range(kernel_size):
point = np.array([x, y]) + dr
kernel[x, y] = kernel[x, y]*f_kernel(np.linalg.norm(point - center), **kwargs)
return kernel
def hough_cross_convolve(edges, kernel_size=5, kernel_sigma=2., lw=1, angles=None):
if angles is None:
angles = [0.]
kernels = [generate_cross_kernel(kernel_size, kernel_gaussian, lw, angle, sigma=kernel_sigma) for angle in angles]
hough_space = np.zeros([len(angles), edges.shape[0], edges.shape[1]])
for kernel, i in zip(kernels, range(len(angles))):
hough_space[i] = convolve2d(edges, kernel, mode='same')
return hough_space
from skimage.util import crop
from skimage.measure import label
import matplotlib.pyplot as plt
def hough_cross_connectivity(edges, kernel_size, lw=0., angles=None):
def _hough(edges, kernel):
hough_space = np.zeros(edges.shape)
kernel_radius = math.floor(kernel_size/2.)
for i in range(edges.shape[0]):
xmin = i - kernel_radius - 1
xmax = i + kernel_radius
if xmin < 0 or xmax > edges.shape[0]:
continue
for j in range(edges.shape[1]):
ymin = j - kernel_radius - 1
ymax = j + kernel_radius
if ymin < 0 or ymax > edges.shape[1]:
continue
neighborhood = edges[xmin:xmax, ymin:ymax]
labeled = label(kernel*neighborhood)
value = labeled[kernel_radius, kernel_radius]
if value == 0:
continue
else:
hough_space[i, j] = np.sum(labeled == value)
return hough_space
if angles is None:
angles = [0]
kernels = [generate_cross(kernel_size, lw) for angle in angles]
hough_space = np.zeros([len(angles), edges.shape[0], edges.shape[1]])
for kernel, i in zip(kernels, range(len(angles))):
hough_space[i] = _hough(edges, kernel)
return hough_space
| [
"scipy.signal.convolve2d",
"numpy.ones",
"math.floor",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.linalg.norm",
"math.exp",
"skimage.morphology.disk",
"skimage.measure.label"
] | [((187, 209), 'math.floor', 'math.floor', (['(size / 2.0)'], {}), '(size / 2.0)\n', (197, 209), False, 'import math\n'), ((219, 241), 'numpy.zeros', 'np.zeros', (['[size, size]'], {}), '([size, size])\n', (227, 241), True, 'import numpy as np\n'), ((265, 278), 'numpy.ones', 'np.ones', (['size'], {}), '(size)\n', (272, 278), True, 'import numpy as np\n'), ((302, 315), 'numpy.ones', 'np.ones', (['size'], {}), '(size)\n', (309, 315), True, 'import numpy as np\n'), ((440, 465), 'math.exp', 'math.exp', (['(-d / sigma ** 2)'], {}), '(-d / sigma ** 2)\n', (448, 465), False, 'import math\n'), ((589, 609), 'numpy.array', 'np.array', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (597, 609), True, 'import numpy as np\n'), ((1296, 1334), 'scipy.signal.convolve2d', 'convolve2d', (['edges', 'kernel'], {'mode': '"""same"""'}), "(edges, kernel, mode='same')\n", (1306, 1334), False, 'from scipy.signal import convolve2d\n'), ((1580, 1601), 'numpy.zeros', 'np.zeros', (['edges.shape'], {}), '(edges.shape)\n', (1588, 1601), True, 'import numpy as np\n'), ((1626, 1655), 'math.floor', 'math.floor', (['(kernel_size / 2.0)'], {}), '(kernel_size / 2.0)\n', (1636, 1655), False, 'import math\n'), ((365, 373), 'skimage.morphology.disk', 'disk', (['lw'], {}), '(lw)\n', (369, 373), False, 'from skimage.morphology import dilation, disk\n'), ((759, 775), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (767, 775), True, 'import numpy as np\n'), ((2146, 2174), 'skimage.measure.label', 'label', (['(kernel * neighborhood)'], {}), '(kernel * neighborhood)\n', (2151, 2174), False, 'from skimage.measure import label\n'), ((633, 662), 'math.floor', 'math.floor', (['(kernel_size / 2.0)'], {}), '(kernel_size / 2.0)\n', (643, 662), False, 'import math\n'), ((830, 860), 'numpy.linalg.norm', 'np.linalg.norm', (['(point - center)'], {}), '(point - center)\n', (844, 860), True, 'import numpy as np\n'), ((2358, 2382), 'numpy.sum', 'np.sum', (['(labeled == value)'], {}), '(labeled == value)\n', (2364, 2382), True, 'import numpy as np\n')] |
"""Unit tests for echo_classification.py."""
import copy
import unittest
import numpy
from gewittergefahr.gg_utils import grids
from gewittergefahr.gg_utils import radar_utils
from gewittergefahr.gg_utils import echo_classification as echo_classifn
TOLERANCE = 1e-6
# The following constants are used to test _estimate_melting_levels.
MELTING_LEVEL_LATITUDES_DEG = numpy.linspace(-90., 90., num=19)
MELTING_LEVEL_TIME_UNIX_SEC = 1541823287 # 041447 UTC 10 Nov 2018
MELTING_LEVELS_M_ASL = (
echo_classifn.MELT_LEVEL_INTERCEPT_BY_MONTH_M_ASL[10] +
echo_classifn.MELT_LEVEL_SLOPE_BY_MONTH_M_DEG01[10] *
numpy.absolute(MELTING_LEVEL_LATITUDES_DEG)
)
# The following constants are used to test _neigh_metres_to_rowcol.
LARGE_GRID_HEIGHTS_M_ASL = radar_utils.get_valid_heights(
data_source=radar_utils.MYRORSS_SOURCE_ID, field_name=radar_utils.REFL_NAME)
LARGE_GRID_METADATA_DICT = {
echo_classifn.MIN_LATITUDE_KEY: 20.,
echo_classifn.LATITUDE_SPACING_KEY: 0.01,
echo_classifn.MIN_LONGITUDE_KEY: 230.,
echo_classifn.LONGITUDE_SPACING_KEY: 0.01,
echo_classifn.HEIGHTS_KEY: LARGE_GRID_HEIGHTS_M_ASL
}
THESE_LATITUDES_DEG, THESE_LONGITUDES_DEG = grids.get_latlng_grid_points(
min_latitude_deg=LARGE_GRID_METADATA_DICT[echo_classifn.MIN_LATITUDE_KEY],
min_longitude_deg=LARGE_GRID_METADATA_DICT[echo_classifn.MIN_LONGITUDE_KEY],
lat_spacing_deg=LARGE_GRID_METADATA_DICT[
echo_classifn.LATITUDE_SPACING_KEY],
lng_spacing_deg=LARGE_GRID_METADATA_DICT[
echo_classifn.LONGITUDE_SPACING_KEY],
num_rows=7001, num_columns=3501)
LARGE_GRID_METADATA_DICT[echo_classifn.LATITUDES_KEY] = THESE_LATITUDES_DEG
LARGE_GRID_METADATA_DICT[echo_classifn.LONGITUDES_KEY] = THESE_LONGITUDES_DEG
LARGE_RADIUS_METRES = 12000.
NUM_ROWS_IN_LARGE_NEIGH = 23
NUM_COLUMNS_IN_LARGE_NEIGH = 29
GRID_METADATA_DICT = {
echo_classifn.MIN_LATITUDE_KEY: 35.1,
echo_classifn.LATITUDE_SPACING_KEY: 0.2,
echo_classifn.MIN_LONGITUDE_KEY: 262.1,
echo_classifn.LONGITUDE_SPACING_KEY: 0.2,
echo_classifn.HEIGHTS_KEY: numpy.array([1000, 4000, 7000])
}
THESE_LATITUDES_DEG, THESE_LONGITUDES_DEG = grids.get_latlng_grid_points(
min_latitude_deg=GRID_METADATA_DICT[echo_classifn.MIN_LATITUDE_KEY],
min_longitude_deg=GRID_METADATA_DICT[echo_classifn.MIN_LONGITUDE_KEY],
lat_spacing_deg=GRID_METADATA_DICT[echo_classifn.LATITUDE_SPACING_KEY],
lng_spacing_deg=GRID_METADATA_DICT[echo_classifn.LONGITUDE_SPACING_KEY],
num_rows=5, num_columns=7)
GRID_METADATA_DICT[echo_classifn.LATITUDES_KEY] = THESE_LATITUDES_DEG
GRID_METADATA_DICT[echo_classifn.LONGITUDES_KEY] = THESE_LONGITUDES_DEG
NEIGH_RADIUS_METRES = 12000.
NUM_ROWS_IN_NEIGH = 3
NUM_COLUMNS_IN_NEIGH = 3
# The following constants are used to test _get_peakedness.
THIS_FIRST_MATRIX = numpy.array([[0, 1, 2, 3, 4, 5, 6],
[0, 1, 2, 3, 4, 5, 6],
[0, 1, 2, 3, 4, 5, 6],
[0, 1, 2, 3, 4, 5, 6],
[0, 1, 2, 3, 4, 5, 20]])
THIS_SECOND_MATRIX = numpy.array([[0, 0, 0, 0, 0, 0, 0],
[2, 2, 2, 2, 2, 2, 2],
[4, 4, 4, 4, 4, 4, 4],
[6, 6, 6, 6, 6, 6, 6],
[8, 8, 8, 8, 8, 8, 20]])
THIS_THIRD_MATRIX = numpy.array([[0, 1, 2, 3, 4, 5, 6],
[3, 4, 5, 6, 7, 8, 9],
[6, 7, 8, 9, 10, 11, 12],
[9, 10, 11, 12, 13, 14, 15],
[12, 13, 14, 15, 16, 17, 20]])
REFLECTIVITY_MATRIX_DBZ = numpy.stack(
(THIS_FIRST_MATRIX, THIS_SECOND_MATRIX, THIS_THIRD_MATRIX), axis=-1
).astype(float)
THIS_FIRST_MATRIX = numpy.array([[0, 0, 1, 2, 3, 4, 0],
[0, 1, 2, 3, 4, 5, 5],
[0, 1, 2, 3, 4, 5, 5],
[0, 1, 2, 3, 4, 5, 5],
[0, 0, 1, 2, 3, 4, 0]])
THIS_SECOND_MATRIX = numpy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 2, 2, 2, 2, 2, 0],
[2, 4, 4, 4, 4, 4, 2],
[4, 6, 6, 6, 6, 6, 4],
[0, 6, 6, 6, 6, 6, 0]])
THIS_THIRD_MATRIX = numpy.array([[0, 1, 2, 3, 4, 5, 0],
[1, 4, 5, 6, 7, 8, 6],
[4, 7, 8, 9, 10, 11, 9],
[7, 10, 11, 12, 13, 14, 12],
[0, 10, 11, 12, 13, 14, 0]])
THIS_MATRIX = numpy.stack(
(THIS_FIRST_MATRIX, THIS_SECOND_MATRIX, THIS_THIRD_MATRIX), axis=-1
).astype(float)
PEAKEDNESS_MATRIX_DBZ = REFLECTIVITY_MATRIX_DBZ - THIS_MATRIX
# The following constants are used to test _apply_convective_criterion1.
MAX_PEAKEDNESS_HEIGHT_M_ASL = 9000.
CRITERION1_FLAG_MATRIX = numpy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 1]], dtype=bool)
# The following constants are used to test _apply_convective_criterion2.
VALID_TIME_UNIX_SEC = 1541823287 # 041447 UTC 10 Nov 2018
MIN_COMPOSITE_REFL_AML_DBZ = 15.
CRITERION2_FLAG_MATRIX = numpy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 1, 1, 1, 1]], dtype=bool)
# The following constants are used to test _apply_convective_criterion3.
MIN_ECHO_TOP_M_ASL = 6000.
ECHO_TOP_LEVEL_DBZ = 13.
CRITERION3_FLAG_MATRIX = numpy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1]], dtype=bool)
# The following constants are used to test _apply_convective_criterion4.
CRITERION4_FLAG_MATRIX = copy.deepcopy(CRITERION3_FLAG_MATRIX)
DUMMY_CRITERION3_FLAG_MATRIX = numpy.array([[1, 0, 0, 1, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 1],
[1, 0, 0, 1, 0, 1, 0],
[1, 0, 0, 0, 1, 0, 0]], dtype=bool)
DUMMY_CRITERION4_FLAG_MATRIX = numpy.array([[0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 1],
[1, 0, 0, 1, 0, 1, 0],
[1, 0, 0, 0, 1, 0, 0]], dtype=bool)
# The following constants are used to test _apply_convective_criterion5.
MIN_COMPOSITE_REFL_DBZ = 6.
CRITERION5_FLAG_MATRIX = numpy.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1]], dtype=bool)
# The following constants are used to test find_classification_file.
TOP_DIRECTORY_NAME = 'foo'
CLASSIFN_FILE_NAME_UNZIPPED = (
'foo/2018/20181109/echo_classification_2018-11-10-041447.nc')
CLASSIFN_FILE_NAME_ZIPPED = (
'foo/2018/20181109/echo_classification_2018-11-10-041447.nc.gz')
class EchoClassificationTests(unittest.TestCase):
"""Each method is a unit test for echo_classification.py."""
def test_estimate_melting_levels(self):
"""Ensures correct output from _estimate_melting_levels."""
these_heights_m_asl = echo_classifn._estimate_melting_levels(
latitudes_deg=MELTING_LEVEL_LATITUDES_DEG,
valid_time_unix_sec=MELTING_LEVEL_TIME_UNIX_SEC)
self.assertTrue(numpy.allclose(
these_heights_m_asl, MELTING_LEVELS_M_ASL, atol=TOLERANCE))
def test_neigh_metres_to_rowcol_large(self):
"""Ensures correct output from _neigh_metres_to_rowcol.
In this case the grid is very large (3501 x 7001).
"""
this_num_rows, this_num_columns = echo_classifn._neigh_metres_to_rowcol(
neigh_radius_metres=LARGE_RADIUS_METRES,
grid_metadata_dict=LARGE_GRID_METADATA_DICT)
self.assertTrue(this_num_rows == NUM_ROWS_IN_LARGE_NEIGH)
self.assertTrue(this_num_columns == NUM_COLUMNS_IN_LARGE_NEIGH)
def test_neigh_metres_to_rowcol_small(self):
"""Ensures correct output from _neigh_metres_to_rowcol.
In this case the grid is small (5 x 7).
"""
this_num_rows, this_num_columns = echo_classifn._neigh_metres_to_rowcol(
neigh_radius_metres=NEIGH_RADIUS_METRES,
grid_metadata_dict=GRID_METADATA_DICT)
self.assertTrue(this_num_rows == NUM_ROWS_IN_NEIGH)
self.assertTrue(this_num_columns == NUM_COLUMNS_IN_NEIGH)
def test_get_peakedness(self):
"""Ensures correct output from _get_peakedness."""
this_matrix_dbz = echo_classifn._get_peakedness(
reflectivity_matrix_dbz=REFLECTIVITY_MATRIX_DBZ,
num_rows_in_neigh=NUM_ROWS_IN_NEIGH,
num_columns_in_neigh=NUM_COLUMNS_IN_NEIGH)
self.assertTrue(numpy.allclose(
this_matrix_dbz, PEAKEDNESS_MATRIX_DBZ, atol=TOLERANCE))
def test_apply_convective_criterion1(self):
"""Ensures correct output from _apply_convective_criterion1."""
this_flag_matrix = echo_classifn._apply_convective_criterion1(
reflectivity_matrix_dbz=REFLECTIVITY_MATRIX_DBZ,
peakedness_neigh_metres=NEIGH_RADIUS_METRES,
max_peakedness_height_m_asl=MAX_PEAKEDNESS_HEIGHT_M_ASL,
halve_resolution_for_peakedness=False,
min_composite_refl_dbz=None,
grid_metadata_dict=GRID_METADATA_DICT)
self.assertTrue(numpy.array_equal(
this_flag_matrix, CRITERION1_FLAG_MATRIX))
def test_apply_convective_criterion2(self):
"""Ensures correct output from _apply_convective_criterion2."""
this_flag_matrix = echo_classifn._apply_convective_criterion2(
reflectivity_matrix_dbz=REFLECTIVITY_MATRIX_DBZ,
convective_flag_matrix=CRITERION1_FLAG_MATRIX,
grid_metadata_dict=GRID_METADATA_DICT,
valid_time_unix_sec=VALID_TIME_UNIX_SEC,
min_composite_refl_aml_dbz=MIN_COMPOSITE_REFL_AML_DBZ)
self.assertTrue(numpy.array_equal(
this_flag_matrix, CRITERION2_FLAG_MATRIX))
def test_apply_convective_criterion3(self):
"""Ensures correct output from _apply_convective_criterion3."""
this_flag_matrix = echo_classifn._apply_convective_criterion3(
reflectivity_matrix_dbz=REFLECTIVITY_MATRIX_DBZ,
convective_flag_matrix=CRITERION2_FLAG_MATRIX,
grid_metadata_dict=GRID_METADATA_DICT,
min_echo_top_m_asl=MIN_ECHO_TOP_M_ASL,
echo_top_level_dbz=ECHO_TOP_LEVEL_DBZ)
self.assertTrue(numpy.array_equal(
this_flag_matrix, CRITERION3_FLAG_MATRIX))
def test_apply_convective_criterion4_main(self):
"""Ensures correct output from _apply_convective_criterion4.
In this case the input is the "main" flag matrix (the criterion-3 matrix
created by actually running `_apply_convective_criterion3`).
"""
this_flag_matrix = echo_classifn._apply_convective_criterion4(
convective_flag_matrix=CRITERION3_FLAG_MATRIX, min_size_pixels=2
)
self.assertTrue(numpy.array_equal(
this_flag_matrix, CRITERION4_FLAG_MATRIX
))
def test_apply_convective_criterion4_dummy(self):
"""Ensures correct output from _apply_convective_criterion4.
In this case the input is a "dummy" matrix (*not* created by running
`_apply_convective_criterion3`).
"""
this_flag_matrix = echo_classifn._apply_convective_criterion4(
convective_flag_matrix=DUMMY_CRITERION3_FLAG_MATRIX,
min_size_pixels=2
)
self.assertTrue(numpy.array_equal(
this_flag_matrix, DUMMY_CRITERION4_FLAG_MATRIX
))
def test_apply_convective_criterion5(self):
"""Ensures correct output from _apply_convective_criterion5."""
this_flag_matrix = echo_classifn._apply_convective_criterion5(
reflectivity_matrix_dbz=REFLECTIVITY_MATRIX_DBZ,
convective_flag_matrix=CRITERION4_FLAG_MATRIX,
min_composite_refl_dbz=MIN_COMPOSITE_REFL_DBZ)
self.assertTrue(numpy.array_equal(
this_flag_matrix, CRITERION5_FLAG_MATRIX))
def test_find_convective_pixels(self):
"""Ensures correct output from find_convective_pixels."""
option_dict = {
echo_classifn.PEAKEDNESS_NEIGH_KEY: NEIGH_RADIUS_METRES,
echo_classifn.MAX_PEAKEDNESS_HEIGHT_KEY:
MAX_PEAKEDNESS_HEIGHT_M_ASL,
echo_classifn.MIN_ECHO_TOP_KEY: MIN_ECHO_TOP_M_ASL,
echo_classifn.ECHO_TOP_LEVEL_KEY: ECHO_TOP_LEVEL_DBZ,
echo_classifn.MIN_COMPOSITE_REFL_CRITERION1_KEY: None,
echo_classifn.MIN_COMPOSITE_REFL_CRITERION5_KEY:
MIN_COMPOSITE_REFL_DBZ,
echo_classifn.MIN_COMPOSITE_REFL_AML_KEY: MIN_COMPOSITE_REFL_AML_DBZ
}
this_flag_matrix = echo_classifn.find_convective_pixels(
reflectivity_matrix_dbz=REFLECTIVITY_MATRIX_DBZ,
grid_metadata_dict=GRID_METADATA_DICT,
valid_time_unix_sec=VALID_TIME_UNIX_SEC, option_dict=option_dict
)[0]
self.assertTrue(numpy.array_equal(
this_flag_matrix, CRITERION5_FLAG_MATRIX))
def test_find_file_desire_zipped_allow_either_no_raise(self):
"""Ensures correct output from find_classification_file.
In this case, desire_zipped = True; allow_zipped_or_unzipped = True;
and raise_error_if_missing = False.
"""
this_file_name = echo_classifn.find_classification_file(
top_directory_name=TOP_DIRECTORY_NAME,
valid_time_unix_sec=VALID_TIME_UNIX_SEC, desire_zipped=True,
allow_zipped_or_unzipped=True, raise_error_if_missing=False)
self.assertTrue(this_file_name == CLASSIFN_FILE_NAME_UNZIPPED)
def test_find_file_desire_zipped_allow_zipped_no_raise(self):
"""Ensures correct output from find_classification_file.
In this case, desire_zipped = True; allow_zipped_or_unzipped = False;
and raise_error_if_missing = False.
"""
this_file_name = echo_classifn.find_classification_file(
top_directory_name=TOP_DIRECTORY_NAME,
valid_time_unix_sec=VALID_TIME_UNIX_SEC, desire_zipped=True,
allow_zipped_or_unzipped=False, raise_error_if_missing=False)
self.assertTrue(this_file_name == CLASSIFN_FILE_NAME_ZIPPED)
def test_find_file_desire_unzipped_allow_either_no_raise(self):
"""Ensures correct output from find_classification_file.
In this case, desire_zipped = False; allow_zipped_or_unzipped = True;
and raise_error_if_missing = False.
"""
this_file_name = echo_classifn.find_classification_file(
top_directory_name=TOP_DIRECTORY_NAME,
valid_time_unix_sec=VALID_TIME_UNIX_SEC, desire_zipped=False,
allow_zipped_or_unzipped=True, raise_error_if_missing=False)
self.assertTrue(this_file_name == CLASSIFN_FILE_NAME_ZIPPED)
def test_find_file_desire_unzipped_allow_unzipped_no_raise(self):
"""Ensures correct output from find_classification_file.
In this case, desire_zipped = False; allow_zipped_or_unzipped = True;
and raise_error_if_missing = False.
"""
this_file_name = echo_classifn.find_classification_file(
top_directory_name=TOP_DIRECTORY_NAME,
valid_time_unix_sec=VALID_TIME_UNIX_SEC, desire_zipped=False,
allow_zipped_or_unzipped=False, raise_error_if_missing=False)
self.assertTrue(this_file_name == CLASSIFN_FILE_NAME_UNZIPPED)
def test_find_file_desire_zipped_allow_either_raise(self):
"""Ensures correct output from find_classification_file.
In this case, desire_zipped = True; allow_zipped_or_unzipped = True;
and raise_error_if_missing = True.
"""
with self.assertRaises(ValueError):
echo_classifn.find_classification_file(
top_directory_name=TOP_DIRECTORY_NAME,
valid_time_unix_sec=VALID_TIME_UNIX_SEC, desire_zipped=True,
allow_zipped_or_unzipped=True, raise_error_if_missing=True)
def test_find_file_desire_zipped_allow_zipped_raise(self):
"""Ensures correct output from find_classification_file.
In this case, desire_zipped = True; allow_zipped_or_unzipped = False;
and raise_error_if_missing = True.
"""
with self.assertRaises(ValueError):
echo_classifn.find_classification_file(
top_directory_name=TOP_DIRECTORY_NAME,
valid_time_unix_sec=VALID_TIME_UNIX_SEC, desire_zipped=True,
allow_zipped_or_unzipped=False, raise_error_if_missing=True)
def test_find_file_desire_unzipped_allow_either_raise(self):
"""Ensures correct output from find_classification_file.
In this case, desire_zipped = False; allow_zipped_or_unzipped = True;
and raise_error_if_missing = True.
"""
with self.assertRaises(ValueError):
echo_classifn.find_classification_file(
top_directory_name=TOP_DIRECTORY_NAME,
valid_time_unix_sec=VALID_TIME_UNIX_SEC, desire_zipped=False,
allow_zipped_or_unzipped=True, raise_error_if_missing=True)
def test_find_file_desire_unzipped_allow_unzipped_raise(self):
"""Ensures correct output from find_classification_file.
In this case, desire_zipped = False; allow_zipped_or_unzipped = True;
and raise_error_if_missing = True.
"""
with self.assertRaises(ValueError):
echo_classifn.find_classification_file(
top_directory_name=TOP_DIRECTORY_NAME,
valid_time_unix_sec=VALID_TIME_UNIX_SEC, desire_zipped=False,
allow_zipped_or_unzipped=False, raise_error_if_missing=True)
if __name__ == '__main__':
unittest.main()
| [
"gewittergefahr.gg_utils.echo_classification._apply_convective_criterion1",
"gewittergefahr.gg_utils.echo_classification._apply_convective_criterion5",
"gewittergefahr.gg_utils.echo_classification._get_peakedness",
"gewittergefahr.gg_utils.echo_classification._apply_convective_criterion4",
"numpy.array",
... | [((368, 403), 'numpy.linspace', 'numpy.linspace', (['(-90.0)', '(90.0)'], {'num': '(19)'}), '(-90.0, 90.0, num=19)\n', (382, 403), False, 'import numpy\n'), ((758, 868), 'gewittergefahr.gg_utils.radar_utils.get_valid_heights', 'radar_utils.get_valid_heights', ([], {'data_source': 'radar_utils.MYRORSS_SOURCE_ID', 'field_name': 'radar_utils.REFL_NAME'}), '(data_source=radar_utils.MYRORSS_SOURCE_ID,\n field_name=radar_utils.REFL_NAME)\n', (787, 868), False, 'from gewittergefahr.gg_utils import radar_utils\n'), ((1180, 1574), 'gewittergefahr.gg_utils.grids.get_latlng_grid_points', 'grids.get_latlng_grid_points', ([], {'min_latitude_deg': 'LARGE_GRID_METADATA_DICT[echo_classifn.MIN_LATITUDE_KEY]', 'min_longitude_deg': 'LARGE_GRID_METADATA_DICT[echo_classifn.MIN_LONGITUDE_KEY]', 'lat_spacing_deg': 'LARGE_GRID_METADATA_DICT[echo_classifn.LATITUDE_SPACING_KEY]', 'lng_spacing_deg': 'LARGE_GRID_METADATA_DICT[echo_classifn.LONGITUDE_SPACING_KEY]', 'num_rows': '(7001)', 'num_columns': '(3501)'}), '(min_latitude_deg=LARGE_GRID_METADATA_DICT[\n echo_classifn.MIN_LATITUDE_KEY], min_longitude_deg=\n LARGE_GRID_METADATA_DICT[echo_classifn.MIN_LONGITUDE_KEY],\n lat_spacing_deg=LARGE_GRID_METADATA_DICT[echo_classifn.\n LATITUDE_SPACING_KEY], lng_spacing_deg=LARGE_GRID_METADATA_DICT[\n echo_classifn.LONGITUDE_SPACING_KEY], num_rows=7001, num_columns=3501)\n', (1208, 1574), False, 'from gewittergefahr.gg_utils import grids\n'), ((2147, 2507), 'gewittergefahr.gg_utils.grids.get_latlng_grid_points', 'grids.get_latlng_grid_points', ([], {'min_latitude_deg': 'GRID_METADATA_DICT[echo_classifn.MIN_LATITUDE_KEY]', 'min_longitude_deg': 'GRID_METADATA_DICT[echo_classifn.MIN_LONGITUDE_KEY]', 'lat_spacing_deg': 'GRID_METADATA_DICT[echo_classifn.LATITUDE_SPACING_KEY]', 'lng_spacing_deg': 'GRID_METADATA_DICT[echo_classifn.LONGITUDE_SPACING_KEY]', 'num_rows': '(5)', 'num_columns': '(7)'}), '(min_latitude_deg=GRID_METADATA_DICT[\n echo_classifn.MIN_LATITUDE_KEY], min_longitude_deg=GRID_METADATA_DICT[\n echo_classifn.MIN_LONGITUDE_KEY], lat_spacing_deg=GRID_METADATA_DICT[\n echo_classifn.LATITUDE_SPACING_KEY], lng_spacing_deg=GRID_METADATA_DICT\n [echo_classifn.LONGITUDE_SPACING_KEY], num_rows=5, num_columns=7)\n', (2175, 2507), False, 'from gewittergefahr.gg_utils import grids\n'), ((2810, 2944), 'numpy.array', 'numpy.array', (['[[0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1,\n 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 20]]'], {}), '([[0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, \n 5, 6], [0, 1, 2, 3, 4, 5, 6], [0, 1, 2, 3, 4, 5, 20]])\n', (2821, 2944), False, 'import numpy\n'), ((3094, 3228), 'numpy.array', 'numpy.array', (['[[0, 0, 0, 0, 0, 0, 0], [2, 2, 2, 2, 2, 2, 2], [4, 4, 4, 4, 4, 4, 4], [6, 6,\n 6, 6, 6, 6, 6], [8, 8, 8, 8, 8, 8, 20]]'], {}), '([[0, 0, 0, 0, 0, 0, 0], [2, 2, 2, 2, 2, 2, 2], [4, 4, 4, 4, 4, \n 4, 4], [6, 6, 6, 6, 6, 6, 6], [8, 8, 8, 8, 8, 8, 20]])\n', (3105, 3228), False, 'import numpy\n'), ((3381, 3529), 'numpy.array', 'numpy.array', (['[[0, 1, 2, 3, 4, 5, 6], [3, 4, 5, 6, 7, 8, 9], [6, 7, 8, 9, 10, 11, 12], [9,\n 10, 11, 12, 13, 14, 15], [12, 13, 14, 15, 16, 17, 20]]'], {}), '([[0, 1, 2, 3, 4, 5, 6], [3, 4, 5, 6, 7, 8, 9], [6, 7, 8, 9, 10,\n 11, 12], [9, 10, 11, 12, 13, 14, 15], [12, 13, 14, 15, 16, 17, 20]])\n', (3392, 3529), False, 'import numpy\n'), ((3807, 3940), 'numpy.array', 'numpy.array', (['[[0, 0, 1, 2, 3, 4, 0], [0, 1, 2, 3, 4, 5, 5], [0, 1, 2, 3, 4, 5, 5], [0, 1,\n 2, 3, 4, 5, 5], [0, 0, 1, 2, 3, 4, 0]]'], {}), '([[0, 0, 1, 2, 3, 4, 0], [0, 1, 2, 3, 4, 5, 5], [0, 1, 2, 3, 4, \n 5, 5], [0, 1, 2, 3, 4, 5, 5], [0, 0, 1, 2, 3, 4, 0]])\n', (3818, 3940), False, 'import numpy\n'), ((4090, 4223), 'numpy.array', 'numpy.array', (['[[0, 0, 0, 0, 0, 0, 0], [0, 2, 2, 2, 2, 2, 0], [2, 4, 4, 4, 4, 4, 2], [4, 6,\n 6, 6, 6, 6, 4], [0, 6, 6, 6, 6, 6, 0]]'], {}), '([[0, 0, 0, 0, 0, 0, 0], [0, 2, 2, 2, 2, 2, 0], [2, 4, 4, 4, 4, \n 4, 2], [4, 6, 6, 6, 6, 6, 4], [0, 6, 6, 6, 6, 6, 0]])\n', (4101, 4223), False, 'import numpy\n'), ((4376, 4521), 'numpy.array', 'numpy.array', (['[[0, 1, 2, 3, 4, 5, 0], [1, 4, 5, 6, 7, 8, 6], [4, 7, 8, 9, 10, 11, 9], [7,\n 10, 11, 12, 13, 14, 12], [0, 10, 11, 12, 13, 14, 0]]'], {}), '([[0, 1, 2, 3, 4, 5, 0], [1, 4, 5, 6, 7, 8, 6], [4, 7, 8, 9, 10,\n 11, 9], [7, 10, 11, 12, 13, 14, 12], [0, 10, 11, 12, 13, 14, 0]])\n', (4387, 4521), False, 'import numpy\n'), ((4964, 5109), 'numpy.array', 'numpy.array', (['[[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0,\n 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 1]]'], {'dtype': 'bool'}), '([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, \n 0, 0], [0, 0, 0, 0, 0, 0, 0], [1, 0, 0, 0, 0, 0, 1]], dtype=bool)\n', (4975, 5109), False, 'import numpy\n'), ((5449, 5594), 'numpy.array', 'numpy.array', (['[[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0,\n 0, 0, 0, 0, 1], [1, 0, 0, 1, 1, 1, 1]]'], {'dtype': 'bool'}), '([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, \n 0, 0], [0, 0, 0, 0, 0, 0, 1], [1, 0, 0, 1, 1, 1, 1]], dtype=bool)\n', (5460, 5594), False, 'import numpy\n'), ((5894, 6039), 'numpy.array', 'numpy.array', (['[[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0,\n 0, 0, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1]]'], {'dtype': 'bool'}), '([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, \n 0, 0], [0, 0, 0, 0, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1]], dtype=bool)\n', (5905, 6039), False, 'import numpy\n'), ((6286, 6323), 'copy.deepcopy', 'copy.deepcopy', (['CRITERION3_FLAG_MATRIX'], {}), '(CRITERION3_FLAG_MATRIX)\n', (6299, 6323), False, 'import copy\n'), ((6356, 6501), 'numpy.array', 'numpy.array', (['[[1, 0, 0, 1, 1, 0, 1], [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 1], [1, 0,\n 0, 1, 0, 1, 0], [1, 0, 0, 0, 1, 0, 0]]'], {'dtype': 'bool'}), '([[1, 0, 0, 1, 1, 0, 1], [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, \n 0, 1], [1, 0, 0, 1, 0, 1, 0], [1, 0, 0, 0, 1, 0, 0]], dtype=bool)\n', (6367, 6501), False, 'import numpy\n'), ((6705, 6850), 'numpy.array', 'numpy.array', (['[[0, 0, 0, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 1], [1, 0,\n 0, 1, 0, 1, 0], [1, 0, 0, 0, 1, 0, 0]]'], {'dtype': 'bool'}), '([[0, 0, 0, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, \n 0, 1], [1, 0, 0, 1, 0, 1, 0], [1, 0, 0, 0, 1, 0, 0]], dtype=bool)\n', (6716, 6850), False, 'import numpy\n'), ((7150, 7295), 'numpy.array', 'numpy.array', (['[[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, 1, 1], [1, 1,\n 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1]]'], {'dtype': 'bool'}), '([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 1, 1, \n 1, 1], [1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1]], dtype=bool)\n', (7161, 7295), False, 'import numpy\n'), ((2068, 2099), 'numpy.array', 'numpy.array', (['[1000, 4000, 7000]'], {}), '([1000, 4000, 7000])\n', (2079, 2099), False, 'import numpy\n'), ((18825, 18840), 'unittest.main', 'unittest.main', ([], {}), '()\n', (18838, 18840), False, 'import unittest\n'), ((616, 659), 'numpy.absolute', 'numpy.absolute', (['MELTING_LEVEL_LATITUDES_DEG'], {}), '(MELTING_LEVEL_LATITUDES_DEG)\n', (630, 659), False, 'import numpy\n'), ((3685, 3770), 'numpy.stack', 'numpy.stack', (['(THIS_FIRST_MATRIX, THIS_SECOND_MATRIX, THIS_THIRD_MATRIX)'], {'axis': '(-1)'}), '((THIS_FIRST_MATRIX, THIS_SECOND_MATRIX, THIS_THIRD_MATRIX), axis=-1\n )\n', (3696, 3770), False, 'import numpy\n'), ((4665, 4750), 'numpy.stack', 'numpy.stack', (['(THIS_FIRST_MATRIX, THIS_SECOND_MATRIX, THIS_THIRD_MATRIX)'], {'axis': '(-1)'}), '((THIS_FIRST_MATRIX, THIS_SECOND_MATRIX, THIS_THIRD_MATRIX), axis=-1\n )\n', (4676, 4750), False, 'import numpy\n'), ((7998, 8138), 'gewittergefahr.gg_utils.echo_classification._estimate_melting_levels', 'echo_classifn._estimate_melting_levels', ([], {'latitudes_deg': 'MELTING_LEVEL_LATITUDES_DEG', 'valid_time_unix_sec': 'MELTING_LEVEL_TIME_UNIX_SEC'}), '(latitudes_deg=\n MELTING_LEVEL_LATITUDES_DEG, valid_time_unix_sec=\n MELTING_LEVEL_TIME_UNIX_SEC)\n', (8036, 8138), True, 'from gewittergefahr.gg_utils import echo_classification as echo_classifn\n'), ((8496, 8624), 'gewittergefahr.gg_utils.echo_classification._neigh_metres_to_rowcol', 'echo_classifn._neigh_metres_to_rowcol', ([], {'neigh_radius_metres': 'LARGE_RADIUS_METRES', 'grid_metadata_dict': 'LARGE_GRID_METADATA_DICT'}), '(neigh_radius_metres=\n LARGE_RADIUS_METRES, grid_metadata_dict=LARGE_GRID_METADATA_DICT)\n', (8533, 8624), True, 'from gewittergefahr.gg_utils import echo_classification as echo_classifn\n'), ((9002, 9124), 'gewittergefahr.gg_utils.echo_classification._neigh_metres_to_rowcol', 'echo_classifn._neigh_metres_to_rowcol', ([], {'neigh_radius_metres': 'NEIGH_RADIUS_METRES', 'grid_metadata_dict': 'GRID_METADATA_DICT'}), '(neigh_radius_metres=\n NEIGH_RADIUS_METRES, grid_metadata_dict=GRID_METADATA_DICT)\n', (9039, 9124), True, 'from gewittergefahr.gg_utils import echo_classification as echo_classifn\n'), ((9394, 9561), 'gewittergefahr.gg_utils.echo_classification._get_peakedness', 'echo_classifn._get_peakedness', ([], {'reflectivity_matrix_dbz': 'REFLECTIVITY_MATRIX_DBZ', 'num_rows_in_neigh': 'NUM_ROWS_IN_NEIGH', 'num_columns_in_neigh': 'NUM_COLUMNS_IN_NEIGH'}), '(reflectivity_matrix_dbz=\n REFLECTIVITY_MATRIX_DBZ, num_rows_in_neigh=NUM_ROWS_IN_NEIGH,\n num_columns_in_neigh=NUM_COLUMNS_IN_NEIGH)\n', (9423, 9561), True, 'from gewittergefahr.gg_utils import echo_classification as echo_classifn\n'), ((9849, 10166), 'gewittergefahr.gg_utils.echo_classification._apply_convective_criterion1', 'echo_classifn._apply_convective_criterion1', ([], {'reflectivity_matrix_dbz': 'REFLECTIVITY_MATRIX_DBZ', 'peakedness_neigh_metres': 'NEIGH_RADIUS_METRES', 'max_peakedness_height_m_asl': 'MAX_PEAKEDNESS_HEIGHT_M_ASL', 'halve_resolution_for_peakedness': '(False)', 'min_composite_refl_dbz': 'None', 'grid_metadata_dict': 'GRID_METADATA_DICT'}), '(reflectivity_matrix_dbz=\n REFLECTIVITY_MATRIX_DBZ, peakedness_neigh_metres=NEIGH_RADIUS_METRES,\n max_peakedness_height_m_asl=MAX_PEAKEDNESS_HEIGHT_M_ASL,\n halve_resolution_for_peakedness=False, min_composite_refl_dbz=None,\n grid_metadata_dict=GRID_METADATA_DICT)\n', (9891, 10166), True, 'from gewittergefahr.gg_utils import echo_classification as echo_classifn\n'), ((10471, 10758), 'gewittergefahr.gg_utils.echo_classification._apply_convective_criterion2', 'echo_classifn._apply_convective_criterion2', ([], {'reflectivity_matrix_dbz': 'REFLECTIVITY_MATRIX_DBZ', 'convective_flag_matrix': 'CRITERION1_FLAG_MATRIX', 'grid_metadata_dict': 'GRID_METADATA_DICT', 'valid_time_unix_sec': 'VALID_TIME_UNIX_SEC', 'min_composite_refl_aml_dbz': 'MIN_COMPOSITE_REFL_AML_DBZ'}), '(reflectivity_matrix_dbz=\n REFLECTIVITY_MATRIX_DBZ, convective_flag_matrix=CRITERION1_FLAG_MATRIX,\n grid_metadata_dict=GRID_METADATA_DICT, valid_time_unix_sec=\n VALID_TIME_UNIX_SEC, min_composite_refl_aml_dbz=MIN_COMPOSITE_REFL_AML_DBZ)\n', (10513, 10758), True, 'from gewittergefahr.gg_utils import echo_classification as echo_classifn\n'), ((11054, 11323), 'gewittergefahr.gg_utils.echo_classification._apply_convective_criterion3', 'echo_classifn._apply_convective_criterion3', ([], {'reflectivity_matrix_dbz': 'REFLECTIVITY_MATRIX_DBZ', 'convective_flag_matrix': 'CRITERION2_FLAG_MATRIX', 'grid_metadata_dict': 'GRID_METADATA_DICT', 'min_echo_top_m_asl': 'MIN_ECHO_TOP_M_ASL', 'echo_top_level_dbz': 'ECHO_TOP_LEVEL_DBZ'}), '(reflectivity_matrix_dbz=\n REFLECTIVITY_MATRIX_DBZ, convective_flag_matrix=CRITERION2_FLAG_MATRIX,\n grid_metadata_dict=GRID_METADATA_DICT, min_echo_top_m_asl=\n MIN_ECHO_TOP_M_ASL, echo_top_level_dbz=ECHO_TOP_LEVEL_DBZ)\n', (11096, 11323), True, 'from gewittergefahr.gg_utils import echo_classification as echo_classifn\n'), ((11784, 11897), 'gewittergefahr.gg_utils.echo_classification._apply_convective_criterion4', 'echo_classifn._apply_convective_criterion4', ([], {'convective_flag_matrix': 'CRITERION3_FLAG_MATRIX', 'min_size_pixels': '(2)'}), '(convective_flag_matrix=\n CRITERION3_FLAG_MATRIX, min_size_pixels=2)\n', (11826, 11897), True, 'from gewittergefahr.gg_utils import echo_classification as echo_classifn\n'), ((12306, 12425), 'gewittergefahr.gg_utils.echo_classification._apply_convective_criterion4', 'echo_classifn._apply_convective_criterion4', ([], {'convective_flag_matrix': 'DUMMY_CRITERION3_FLAG_MATRIX', 'min_size_pixels': '(2)'}), '(convective_flag_matrix=\n DUMMY_CRITERION3_FLAG_MATRIX, min_size_pixels=2)\n', (12348, 12425), True, 'from gewittergefahr.gg_utils import echo_classification as echo_classifn\n'), ((12718, 12912), 'gewittergefahr.gg_utils.echo_classification._apply_convective_criterion5', 'echo_classifn._apply_convective_criterion5', ([], {'reflectivity_matrix_dbz': 'REFLECTIVITY_MATRIX_DBZ', 'convective_flag_matrix': 'CRITERION4_FLAG_MATRIX', 'min_composite_refl_dbz': 'MIN_COMPOSITE_REFL_DBZ'}), '(reflectivity_matrix_dbz=\n REFLECTIVITY_MATRIX_DBZ, convective_flag_matrix=CRITERION4_FLAG_MATRIX,\n min_composite_refl_dbz=MIN_COMPOSITE_REFL_DBZ)\n', (12760, 12912), True, 'from gewittergefahr.gg_utils import echo_classification as echo_classifn\n'), ((14390, 14602), 'gewittergefahr.gg_utils.echo_classification.find_classification_file', 'echo_classifn.find_classification_file', ([], {'top_directory_name': 'TOP_DIRECTORY_NAME', 'valid_time_unix_sec': 'VALID_TIME_UNIX_SEC', 'desire_zipped': '(True)', 'allow_zipped_or_unzipped': '(True)', 'raise_error_if_missing': '(False)'}), '(top_directory_name=\n TOP_DIRECTORY_NAME, valid_time_unix_sec=VALID_TIME_UNIX_SEC,\n desire_zipped=True, allow_zipped_or_unzipped=True,\n raise_error_if_missing=False)\n', (14428, 14602), True, 'from gewittergefahr.gg_utils import echo_classification as echo_classifn\n'), ((14992, 15205), 'gewittergefahr.gg_utils.echo_classification.find_classification_file', 'echo_classifn.find_classification_file', ([], {'top_directory_name': 'TOP_DIRECTORY_NAME', 'valid_time_unix_sec': 'VALID_TIME_UNIX_SEC', 'desire_zipped': '(True)', 'allow_zipped_or_unzipped': '(False)', 'raise_error_if_missing': '(False)'}), '(top_directory_name=\n TOP_DIRECTORY_NAME, valid_time_unix_sec=VALID_TIME_UNIX_SEC,\n desire_zipped=True, allow_zipped_or_unzipped=False,\n raise_error_if_missing=False)\n', (15030, 15205), True, 'from gewittergefahr.gg_utils import echo_classification as echo_classifn\n'), ((15595, 15808), 'gewittergefahr.gg_utils.echo_classification.find_classification_file', 'echo_classifn.find_classification_file', ([], {'top_directory_name': 'TOP_DIRECTORY_NAME', 'valid_time_unix_sec': 'VALID_TIME_UNIX_SEC', 'desire_zipped': '(False)', 'allow_zipped_or_unzipped': '(True)', 'raise_error_if_missing': '(False)'}), '(top_directory_name=\n TOP_DIRECTORY_NAME, valid_time_unix_sec=VALID_TIME_UNIX_SEC,\n desire_zipped=False, allow_zipped_or_unzipped=True,\n raise_error_if_missing=False)\n', (15633, 15808), True, 'from gewittergefahr.gg_utils import echo_classification as echo_classifn\n'), ((16200, 16414), 'gewittergefahr.gg_utils.echo_classification.find_classification_file', 'echo_classifn.find_classification_file', ([], {'top_directory_name': 'TOP_DIRECTORY_NAME', 'valid_time_unix_sec': 'VALID_TIME_UNIX_SEC', 'desire_zipped': '(False)', 'allow_zipped_or_unzipped': '(False)', 'raise_error_if_missing': '(False)'}), '(top_directory_name=\n TOP_DIRECTORY_NAME, valid_time_unix_sec=VALID_TIME_UNIX_SEC,\n desire_zipped=False, allow_zipped_or_unzipped=False,\n raise_error_if_missing=False)\n', (16238, 16414), True, 'from gewittergefahr.gg_utils import echo_classification as echo_classifn\n'), ((8179, 8252), 'numpy.allclose', 'numpy.allclose', (['these_heights_m_asl', 'MELTING_LEVELS_M_ASL'], {'atol': 'TOLERANCE'}), '(these_heights_m_asl, MELTING_LEVELS_M_ASL, atol=TOLERANCE)\n', (8193, 8252), False, 'import numpy\n'), ((9615, 9685), 'numpy.allclose', 'numpy.allclose', (['this_matrix_dbz', 'PEAKEDNESS_MATRIX_DBZ'], {'atol': 'TOLERANCE'}), '(this_matrix_dbz, PEAKEDNESS_MATRIX_DBZ, atol=TOLERANCE)\n', (9629, 9685), False, 'import numpy\n'), ((10248, 10307), 'numpy.array_equal', 'numpy.array_equal', (['this_flag_matrix', 'CRITERION1_FLAG_MATRIX'], {}), '(this_flag_matrix, CRITERION1_FLAG_MATRIX)\n', (10265, 10307), False, 'import numpy\n'), ((10831, 10890), 'numpy.array_equal', 'numpy.array_equal', (['this_flag_matrix', 'CRITERION2_FLAG_MATRIX'], {}), '(this_flag_matrix, CRITERION2_FLAG_MATRIX)\n', (10848, 10890), False, 'import numpy\n'), ((11396, 11455), 'numpy.array_equal', 'numpy.array_equal', (['this_flag_matrix', 'CRITERION3_FLAG_MATRIX'], {}), '(this_flag_matrix, CRITERION3_FLAG_MATRIX)\n', (11413, 11455), False, 'import numpy\n'), ((11940, 11999), 'numpy.array_equal', 'numpy.array_equal', (['this_flag_matrix', 'CRITERION4_FLAG_MATRIX'], {}), '(this_flag_matrix, CRITERION4_FLAG_MATRIX)\n', (11957, 11999), False, 'import numpy\n'), ((12480, 12545), 'numpy.array_equal', 'numpy.array_equal', (['this_flag_matrix', 'DUMMY_CRITERION4_FLAG_MATRIX'], {}), '(this_flag_matrix, DUMMY_CRITERION4_FLAG_MATRIX)\n', (12497, 12545), False, 'import numpy\n'), ((12966, 13025), 'numpy.array_equal', 'numpy.array_equal', (['this_flag_matrix', 'CRITERION5_FLAG_MATRIX'], {}), '(this_flag_matrix, CRITERION5_FLAG_MATRIX)\n', (12983, 13025), False, 'import numpy\n'), ((13759, 13958), 'gewittergefahr.gg_utils.echo_classification.find_convective_pixels', 'echo_classifn.find_convective_pixels', ([], {'reflectivity_matrix_dbz': 'REFLECTIVITY_MATRIX_DBZ', 'grid_metadata_dict': 'GRID_METADATA_DICT', 'valid_time_unix_sec': 'VALID_TIME_UNIX_SEC', 'option_dict': 'option_dict'}), '(reflectivity_matrix_dbz=\n REFLECTIVITY_MATRIX_DBZ, grid_metadata_dict=GRID_METADATA_DICT,\n valid_time_unix_sec=VALID_TIME_UNIX_SEC, option_dict=option_dict)\n', (13795, 13958), True, 'from gewittergefahr.gg_utils import echo_classification as echo_classifn\n'), ((14024, 14083), 'numpy.array_equal', 'numpy.array_equal', (['this_flag_matrix', 'CRITERION5_FLAG_MATRIX'], {}), '(this_flag_matrix, CRITERION5_FLAG_MATRIX)\n', (14041, 14083), False, 'import numpy\n'), ((16830, 17041), 'gewittergefahr.gg_utils.echo_classification.find_classification_file', 'echo_classifn.find_classification_file', ([], {'top_directory_name': 'TOP_DIRECTORY_NAME', 'valid_time_unix_sec': 'VALID_TIME_UNIX_SEC', 'desire_zipped': '(True)', 'allow_zipped_or_unzipped': '(True)', 'raise_error_if_missing': '(True)'}), '(top_directory_name=\n TOP_DIRECTORY_NAME, valid_time_unix_sec=VALID_TIME_UNIX_SEC,\n desire_zipped=True, allow_zipped_or_unzipped=True,\n raise_error_if_missing=True)\n', (16868, 17041), True, 'from gewittergefahr.gg_utils import echo_classification as echo_classifn\n'), ((17398, 17610), 'gewittergefahr.gg_utils.echo_classification.find_classification_file', 'echo_classifn.find_classification_file', ([], {'top_directory_name': 'TOP_DIRECTORY_NAME', 'valid_time_unix_sec': 'VALID_TIME_UNIX_SEC', 'desire_zipped': '(True)', 'allow_zipped_or_unzipped': '(False)', 'raise_error_if_missing': '(True)'}), '(top_directory_name=\n TOP_DIRECTORY_NAME, valid_time_unix_sec=VALID_TIME_UNIX_SEC,\n desire_zipped=True, allow_zipped_or_unzipped=False,\n raise_error_if_missing=True)\n', (17436, 17610), True, 'from gewittergefahr.gg_utils import echo_classification as echo_classifn\n'), ((17969, 18181), 'gewittergefahr.gg_utils.echo_classification.find_classification_file', 'echo_classifn.find_classification_file', ([], {'top_directory_name': 'TOP_DIRECTORY_NAME', 'valid_time_unix_sec': 'VALID_TIME_UNIX_SEC', 'desire_zipped': '(False)', 'allow_zipped_or_unzipped': '(True)', 'raise_error_if_missing': '(True)'}), '(top_directory_name=\n TOP_DIRECTORY_NAME, valid_time_unix_sec=VALID_TIME_UNIX_SEC,\n desire_zipped=False, allow_zipped_or_unzipped=True,\n raise_error_if_missing=True)\n', (18007, 18181), True, 'from gewittergefahr.gg_utils import echo_classification as echo_classifn\n'), ((18542, 18755), 'gewittergefahr.gg_utils.echo_classification.find_classification_file', 'echo_classifn.find_classification_file', ([], {'top_directory_name': 'TOP_DIRECTORY_NAME', 'valid_time_unix_sec': 'VALID_TIME_UNIX_SEC', 'desire_zipped': '(False)', 'allow_zipped_or_unzipped': '(False)', 'raise_error_if_missing': '(True)'}), '(top_directory_name=\n TOP_DIRECTORY_NAME, valid_time_unix_sec=VALID_TIME_UNIX_SEC,\n desire_zipped=False, allow_zipped_or_unzipped=False,\n raise_error_if_missing=True)\n', (18580, 18755), True, 'from gewittergefahr.gg_utils import echo_classification as echo_classifn\n')] |
"""
Sets ligth-related constants
"""
import yaml
import numpy as np
LUT_VOX_DIV = np.zeros(0)
N_OP_CHANNEL = 0
LIGHT_SIMULATED = True
OP_CHANNEL_EFFICIENCY = np.zeros(0)
#: Prescale factor analogous to ScintPreScale in LArSoft FIXME
SCINT_PRESCALE = 1
#: Ion + excitation work function in `MeV`
W_PH = 19.5e-6 # MeV
def set_light_properties(detprop_file):
"""
The function loads the detector properties YAML file
and stores the light-related constants as global variables
Args:
detprop_file (str): detector properties YAML filename
"""
global LUT_VOX_DIV
global N_OP_CHANNEL
global LIGHT_SIMULATED
global OP_CHANNEL_EFFICIENCY
with open(detprop_file) as df:
detprop = yaml.load(df, Loader=yaml.FullLoader)
try:
LUT_VOX_DIV = np.array(detprop['lut_vox_div'])
N_OP_CHANNEL = detprop['n_op_channel']
OP_CHANNEL_EFFICIENCY = np.array(detprop['op_channel_efficiency'])
except KeyError:
LIGHT_SIMULATED = False | [
"numpy.array",
"numpy.zeros",
"yaml.load"
] | [((83, 94), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (91, 94), True, 'import numpy as np\n'), ((159, 170), 'numpy.zeros', 'np.zeros', (['(0)'], {}), '(0)\n', (167, 170), True, 'import numpy as np\n'), ((729, 766), 'yaml.load', 'yaml.load', (['df'], {'Loader': 'yaml.FullLoader'}), '(df, Loader=yaml.FullLoader)\n', (738, 766), False, 'import yaml\n'), ((799, 831), 'numpy.array', 'np.array', (["detprop['lut_vox_div']"], {}), "(detprop['lut_vox_div'])\n", (807, 831), True, 'import numpy as np\n'), ((911, 953), 'numpy.array', 'np.array', (["detprop['op_channel_efficiency']"], {}), "(detprop['op_channel_efficiency'])\n", (919, 953), True, 'import numpy as np\n')] |
import numpy as np
import random
import cv2
import os
def Draw(image, result):
# output 저장을 위한 경로 설정
output_path = os.path.dirname(image) + "/ocred_" + os.path.basename(image)
# Draw TextBox with opencv
img = cv2.imread(image)
np.random.seed(42)
COLORS = np.random.randint(0, 255, size=(255, 3), dtype="uint8")
for i in result:
x = i[0][0][0]
y = i[0][0][1]
w = i[0][1][0] - i[0][0][0]
h = i[0][2][1] - i[0][1][1]
color_idx = random.randint(0, 255)
color = [int(c) for c in COLORS[color_idx]]
img = cv2.rectangle(img, (x, y), (x + w, y + h), color, 2)
# save image
cv2.imwrite(output_path, img)
| [
"cv2.rectangle",
"cv2.imwrite",
"os.path.dirname",
"numpy.random.randint",
"os.path.basename",
"numpy.random.seed",
"cv2.imread",
"random.randint"
] | [((230, 247), 'cv2.imread', 'cv2.imread', (['image'], {}), '(image)\n', (240, 247), False, 'import cv2\n'), ((252, 270), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (266, 270), True, 'import numpy as np\n'), ((284, 339), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)'], {'size': '(255, 3)', 'dtype': '"""uint8"""'}), "(0, 255, size=(255, 3), dtype='uint8')\n", (301, 339), True, 'import numpy as np\n'), ((665, 694), 'cv2.imwrite', 'cv2.imwrite', (['output_path', 'img'], {}), '(output_path, img)\n', (676, 694), False, 'import cv2\n'), ((163, 186), 'os.path.basename', 'os.path.basename', (['image'], {}), '(image)\n', (179, 186), False, 'import os\n'), ((501, 523), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (515, 523), False, 'import random\n'), ((590, 642), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x, y)', '(x + w, y + h)', 'color', '(2)'], {}), '(img, (x, y), (x + w, y + h), color, 2)\n', (603, 642), False, 'import cv2\n'), ((126, 148), 'os.path.dirname', 'os.path.dirname', (['image'], {}), '(image)\n', (141, 148), False, 'import os\n')] |
import matplotlib.pyplot as plt
import numpy as np
import json
if __name__ == '__main__':
x = np.arange(100)
y = x*x
z = x*x + 10*x
with open("example.json") as json_file:
s = json.load(json_file)
plt.rcParams.update(s)
plt.plot(x,y,label='Y=x*x');
plt.plot(x,z,label='Y=x*x+10*x');
plt.title('Nice JSON example');
plt.legend();
plt.plot();
plt.savefig('json_example.png') | [
"matplotlib.pyplot.savefig",
"numpy.arange",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.rcParams.update",
"json.load",
"matplotlib.pyplot.title",
"matplotlib.pyplot.legend"
] | [((98, 112), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (107, 112), True, 'import numpy as np\n'), ((210, 232), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (['s'], {}), '(s)\n', (229, 232), True, 'import matplotlib.pyplot as plt\n'), ((235, 264), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'label': '"""Y=x*x"""'}), "(x, y, label='Y=x*x')\n", (243, 264), True, 'import matplotlib.pyplot as plt\n'), ((265, 299), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'z'], {'label': '"""Y=x*x+10*x"""'}), "(x, z, label='Y=x*x+10*x')\n", (273, 299), True, 'import matplotlib.pyplot as plt\n'), ((300, 330), 'matplotlib.pyplot.title', 'plt.title', (['"""Nice JSON example"""'], {}), "('Nice JSON example')\n", (309, 330), True, 'import matplotlib.pyplot as plt\n'), ((333, 345), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (343, 345), True, 'import matplotlib.pyplot as plt\n'), ((348, 358), 'matplotlib.pyplot.plot', 'plt.plot', ([], {}), '()\n', (356, 358), True, 'import matplotlib.pyplot as plt\n'), ((361, 392), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""json_example.png"""'], {}), "('json_example.png')\n", (372, 392), True, 'import matplotlib.pyplot as plt\n'), ((186, 206), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (195, 206), False, 'import json\n')] |
import ROOT as root
import numpy as np
import uncertainties.unumpy as unp
from uncertainties import ufloat
from uncertainties.unumpy import nominal_values as noms
from uncertainties.unumpy import std_devs as stds
from array import array
import sys
############### Readout command line argument
try:
name_of_folder = sys.argv[1]
try:
plot_style = sys.argv[2]
except IndexError:
plot_style = None
except IndexError:
print('No Argument given Or other Index out of Range Er')
sys.path.insert(0, './' + name_of_folder + '/')
########################## import pyData.py ######################################
from pyData import *
##################################################### Set Cnavas Style #############################
root.gStyle.SetOptTitle(0)
root.gStyle.SetOptFit(1)
root.gStyle.SetLabelSize(.05, "XY");
root.gStyle.SetTitleSize(.05, "XY");
root.gStyle.SetTitleOffset(1, "XY");
root.gStyle.SetStatFontSize(.08)
########################### Def Gaus function ######################
personal_gaus = root.TF1("personal_gaus", " [0] * exp( -0.5 * ( (x - [1]) / [2] ) * ( (x - [1]) / [2] ) ) ")
name_params = [ "amplitude/[MeanVcal]", "mean/[Col]", "sigma/[Col]"]
personal_gaus.SetParName(0,'Amplitude')
personal_gaus.SetParName(2,'Sigma')
if plot_style == 'thesis':
personal_gaus.SetParName(1,'Mittelwert')
else :
personal_gaus.SetParName(1,'Mean')
############################### Save Data in list #######################################
mean_value_col_list = []
mean_error_col_list = []
x_value = []
x_error = []
##############################################################################################################################
################################### Getting the mean hit value of all columns near the laserspot #############################
###############################################################################################################################
################################## Set sum area, size of sensetive area ###############################
xmin = 20
xmax = 26
ymin = 62
ymax = 72
#################################### calculating mean of each coloum ################################
for i in range(xmin,xmax): # going thru all col
content = []
error = []
x_value.append(i)
x_error.append(0.5)
test_error = []
for j in range(ymin,ymax): # going thru all rows
if qMap_Ag_C0_V0.GetBinContent(i,j) != 0:
content.append( qMap_Ag_C0_V0.GetBinContent(i,j)) # Is this the real error
N = qMap_Ag_C0_V0.GetBinEntries( qMap_Ag_C0_V0.GetBin(i,j))
if N == 1:
new_error = np.sqrt( ( qMap_Ag_C0_V0.GetBinContent(i,j) - qMap_Ag_C0_V0.GetBinContent(i,j)/N) **2)
else:
new_error = np.sqrt( 1/(N-1) * ( qMap_Ag_C0_V0.GetBinContent(i,j) - qMap_Ag_C0_V0.GetBinContent(i,j)/N) **2)
#error.append( 1/N * np.sqrt(qMap_Ag_C0_V0.GetBinContent(i,j) *N ) ) # Is this the real error
error.append( new_error ) # Is this the real error
else:
pass
content_bin = unp.uarray( content, error)
mean_content_col = content_bin.sum() # mean value of each bin in the col
# Saving values in lists
mean_value_col_list.append( noms(mean_content_col))
mean_error_col_list.append( stds(mean_content_col) )
########################### Create errorbar plot #####################################
errorbar_plot_col = root.TGraphErrors( len(x_value), array( 'f', x_value- np.ones(len(x_value))), array( 'f', mean_value_col_list), array( 'f', x_error), array( 'f', mean_error_col_list) )
x_value -= np.ones(len(x_value))
############################## Set axis label and range of errobar plot ##################################
if plot_style == 'thesis':
errorbar_plot_col.GetXaxis().SetTitle("Spalte")
errorbar_plot_col.GetYaxis().SetTitle("Summe Hits / Vcal")
else:
errorbar_plot_col.GetXaxis().SetTitle("Col")
errorbar_plot_col.GetYaxis().SetTitle("Mean Hit / Vcal")
errorbar_plot_col.SetMinimum(0)
errorbar_plot_col.SetMaximum( max( mean_value_col_list) + 0.3 * max(mean_value_col_list) )
####################### create Canvas and FIT ##########################################
c1 = root.TCanvas("c1", "c1", 1980, 1080)
c1.SetGrid()
if name_of_folder == '7_mm':
personal_gaus.SetParLimits(0, max(mean_value_col_list) * .2, max(mean_value_col_list) * 1.5 )
personal_gaus.SetParLimits(1, np.mean(x_value) * .7, np.mean(x_value) * 1.2 )
personal_gaus.SetParLimits(2, np.std(np.array(x_value)) * 0.03, np.std(np.array(x_value)) * 1.4 )
elif name_of_folder == '14_mm':
personal_gaus.SetParLimits(0, max(mean_value_col_list) * .4, max(mean_value_col_list) * 1.5 )
personal_gaus.SetParLimits(1, np.mean(x_value) * .8, np.mean(x_value) * 1.1 )
personal_gaus.SetParLimits(2, np.std(np.array(x_value)) * 0.03, np.std(np.array(x_value))*1.1 )
else:
personal_gaus.SetParLimits(0, max(mean_value_col_list) * .5, max(mean_value_col_list) * 1.8 )
personal_gaus.SetParLimits(1, np.mean(x_value) * .7, np.mean(x_value) * 1.2 )
personal_gaus.SetParLimits(2, np.std(np.array(x_value)) * 0.03, np.std(np.array(x_value)) * 1.2 )
errorbar_plot_col.Fit(personal_gaus, "", "", min(x_value) -0.5 , max( x_value) +0.5 )
#errorbar_plot_col.Fit("gaus", "", "", min(x_value) -0.5 , max( x_value) +0.5 )
errorbar_plot_col.Draw("ap*")
############################### Create legend ####################################
if plot_style == 'thesis':
legend = root.TLegend(0.15,0.71,0.37,0.93)
legend.SetTextSize(0.055)
legend.AddEntry(errorbar_plot_col,"Summe Hits","lep")
legend.AddEntry( personal_gaus,"Fit","l")
legend.Draw()
else:
legend = root.TLegend(0.65,0.47,0.98,0.7)
legend.SetTextSize(0.04)
legend.AddEntry(errorbar_plot_col,"Row sum hit value","lep")
legend.AddEntry( personal_gaus,"Gaussian Fit","l")
legend.Draw()
######## Transfer Sigma from Bin to mumeter ############################
sigma_mu_meter_col = ufloat(personal_gaus.GetParameter(2), personal_gaus.GetParError(2)) * 150 # 150 is pixel size in y direction
#############################################################################
############################### Save parameter and plot ###########################################
with open( f'./fit_params/{name_of_folder}_fit_parameters_col_xaxis.txt', 'w') as file:
for i in range(0,3):
file.write( name_params[i] + ' ' + str( personal_gaus.GetParameter(i) ) + ' ' + str(personal_gaus.GetParError(i)) + '\n')
with open( f'./fit_parameters_col_xaxis.txt', 'a') as file:
file.write( name_of_folder + 'Amplitude/Sigma/Mean:' + ' ' + str( personal_gaus.GetParameter(0) ) + ' ' + str(personal_gaus.GetParError(0)) + ' ' + str( personal_gaus.GetParameter(1) ) + ' ' + str(personal_gaus.GetParError(1)) + ' ' + str( personal_gaus.GetParameter(2) ) + ' ' + str(personal_gaus.GetParError(2)) + '\n')
with open( f'./sigma_col_xaxis.txt', 'a') as file:
file.write( name_params[i] + '_' + name_of_folder + ' ' + str( personal_gaus.GetParameter(2) ) + ' ' + str(personal_gaus.GetParError(2)) + '\n')
with open( f'./sigma_col_in_mumeter_xaxis.txt', 'a') as file:
file.write( name_params[i] +'_' + name_of_folder + ' ' + str( noms(sigma_mu_meter_col) ) + ' ' + str( stds(sigma_mu_meter_col) ) + '\n')
c1.SaveAs(f'./plots/{name_of_folder}_erorbar_plot_col.pdf')
##############################################################################################################################
################################### Getting the mean hit value of all rows near the laserspot #############################
###############################################################################################################################
############################Reset lists###########################################
mean_value_row_list = []
mean_error_row_list = []
x_value = []
x_error = []
row_with_hits = []
#################################### calculating mean of each row #####################################
for i in range(ymin,ymax): # going thru all rows
content = []
error = []
x_value.append(i)
x_error.append(0.5)
for j in range(xmin,xmax): # going thru all col
if qMap_Ag_C0_V0.GetBinContent(j,i) != 0:
content.append( qMap_Ag_C0_V0.GetBinContent(j,i))
N = qMap_Ag_C0_V0.GetBinEntries( qMap_Ag_C0_V0.GetBin(j,i))
if N == 1:
new_error = np.sqrt( ( qMap_Ag_C0_V0.GetBinContent(i,j) - qMap_Ag_C0_V0.GetBinContent(i,j)/N )**2)
else:
new_error = np.sqrt( 1/(N-1) * ( qMap_Ag_C0_V0.GetBinContent(i,j) - qMap_Ag_C0_V0.GetBinContent(i,j)/N) **2)
#error.append( 1/N * np.sqrt(qMap_Ag_C0_V0.GetBinContent(j,i) * N ) )
error.append( new_error)
else:
pass
content_bin = unp.uarray( content, error)
mean_content_row = content_bin.sum() # mean value of each bin in the col
# Saving values in lists
mean_value_row_list.append( noms(mean_content_row))
mean_error_row_list.append( stds(mean_content_row))
############################# Create new errorbar plot ####################################
errorbar_plot_rows = root.TGraphErrors( len(x_value), array( 'f', x_value - np.ones(len(x_value))), array( 'f', mean_value_row_list), array( 'f', x_error), array( 'f', mean_error_row_list) )
x_value -= np.ones(len(x_value))
errorbar_plot_rows.GetXaxis().SetNdivisions(20)
############################### create Canvas ########################################
c2 = root.TCanvas("c2", "c2", 1980, 1080);
c2.SetGrid()
############################## Set axis label of errobar plot ##################################
if plot_style == 'thesis':
errorbar_plot_rows.GetXaxis().SetTitle("Zeile")
errorbar_plot_rows.GetYaxis().SetTitle("Summe Hits / Vcal")
else:
errorbar_plot_rows.GetXaxis().SetTitle("Row")
errorbar_plot_rows.GetYaxis().SetTitle("Mean Hit / Vcal")
errorbar_plot_rows.SetMinimum(0)
if name_of_folder == '10-5_mm':
errorbar_plot_rows.SetMaximum( max(mean_value_row_list) + 0.15 * max(mean_value_row_list) )
elif name_of_folder == '11_mm':
errorbar_plot_rows.SetMaximum( max(mean_value_row_list) + 0.9 * max(mean_value_row_list) )
elif name_of_folder == '9_mm':
errorbar_plot_rows.SetMaximum( max(mean_value_row_list) + 0.4 * max(mean_value_row_list) )
else:
errorbar_plot_rows.SetMaximum( max(mean_value_row_list) + 0.3 * max(mean_value_row_list) )
############################### Plot fucntion and fit #############################################
if name_of_folder == '10-5_mm':
print(np.std(np.array(x_value)))
personal_gaus.SetParLimits(0, max(mean_value_row_list) * .5, max(mean_value_row_list) * 1.5 )
personal_gaus.SetParLimits(1, np.mean(x_value) * .9, np.mean(x_value) * 1.12)
personal_gaus.SetParLimits(2, np.std(np.array(x_value)) * .1, np.std(np.array(x_value))*0.6 )
elif name_of_folder == '11_mm':
#personal_gaus.SetParameter(1, 66 )
personal_gaus.SetParLimits(0, max(mean_value_row_list) * .5, max(mean_value_row_list) * 1.8)
personal_gaus.SetParLimits(1, np.mean(x_value) * .9, np.mean(x_value) * 1.12 )
personal_gaus.SetParLimits(2, np.std(np.array(x_value)) * .05, np.std(np.array(x_value))*0.8 )
elif name_of_folder == '7_mm':
personal_gaus.SetParLimits(0, max(mean_value_row_list) * .2, max(mean_value_row_list)*1.2 )
personal_gaus.SetParLimits(1, np.mean(x_value) * .7, np.mean(x_value) * 1.3)
personal_gaus.SetParLimits(2, np.std(np.array(x_value)) * .1, np.std(np.array(x_value)) * 1.05 )
elif name_of_folder == '6_mm':
personal_gaus.SetParLimits(0, max(mean_value_row_list) * .2, max(mean_value_row_list) * 1.31 )
personal_gaus.SetParLimits(1, np.mean(x_value) -3, np.mean(x_value)+3 )
personal_gaus.SetParLimits(2, np.std(np.array(x_value)) * .1, np.std(np.array(x_value)) *1.05 )
elif name_of_folder == '9-5_mm':
personal_gaus.SetParLimits(0, max(mean_value_row_list) * .4, max(mean_value_row_list) * 1.3 )
personal_gaus.SetParLimits(1, np.mean(x_value) -1/2, np.mean(x_value)+1 )
personal_gaus.SetParLimits(2, np.std(np.array(x_value)) * .2, np.std(np.array(x_value)) )
elif name_of_folder == '9_mm':
personal_gaus.SetParLimits(0, max(mean_value_row_list) * .4, max(mean_value_row_list) * 1.3 )
personal_gaus.SetParLimits(1, np.mean(x_value) -1/2, np.mean(x_value)+1 )
personal_gaus.SetParLimits(2, np.std(np.array(x_value)) * .1, np.std(np.array(x_value)) )
elif name_of_folder == '12_mm':
personal_gaus.SetParLimits(0, max(mean_value_row_list) * .4, max(mean_value_row_list) * 1.3 )
personal_gaus.SetParLimits(1, np.mean(x_value) -1/2, np.mean(x_value)+1 )
personal_gaus.SetParLimits(2, np.std(np.array(x_value)) * .1, np.std(np.array(x_value)) )
elif name_of_folder == '13_mm':
personal_gaus.SetParLimits(0, max(mean_value_row_list) * .4, max(mean_value_row_list) * 1.3 )
personal_gaus.SetParLimits(1, np.mean(x_value) -1/2, np.mean(x_value)+1 )
personal_gaus.SetParLimits(2, np.std(np.array(x_value)) * .1, np.std(np.array(x_value)) )
elif name_of_folder == '14_mm':
personal_gaus.SetParLimits(0, max(mean_value_row_list) * .4, max(mean_value_row_list) * 1.3 )
personal_gaus.SetParLimits(1, np.mean(x_value) -1/2, np.mean(x_value)+1 )
personal_gaus.SetParLimits(2, np.std(np.array(x_value)) * .1, np.std(np.array(x_value)) )
elif name_of_folder == '5_mm':
personal_gaus.SetParLimits(0, max(mean_value_row_list) * .4, max(mean_value_row_list) * 1.3 )
personal_gaus.SetParLimits(1, np.mean(x_value) -1/2, np.mean(x_value)+1 )
personal_gaus.SetParLimits(2, np.std(np.array(x_value)) * .1, np.std(np.array(x_value)) )
#
#
elif name_of_folder == '10_mm':
personal_gaus.SetParameter(0, max(mean_value_row_list) )
personal_gaus.SetParLimits(0, max(mean_value_row_list) * .4, max(mean_value_row_list) * 1.8)
personal_gaus.SetParLimits(1, np.mean(x_value) * .7, np.mean(x_value) * 1.3 )
personal_gaus.SetParLimits(2, np.std(np.array(x_value)) * .1, np.std(np.array(x_value))*1.4 )
#
#elif name_of_folder == '15_mm':
# #personal_gaus.SetParameter(0, 743 )
# #personal_gaus.SetParameter(1, 66 )
# #personal_gaus.SetParameter(2, 3.05)
#
# personal_gaus.SetParLimits(0,max(mean_value_row_list) * .4, max(mean_value_row_list) * 1.2 )
# personal_gaus.SetParLimits(1, np.mean(x_value) * .3, np.mean(x_value) * 1.4 )
# personal_gaus.SetParLimits(2,np.std(np.array(x_value)) * .2, np.std(np.array(x_value)) * 1.6 )
#
#
#
else:
personal_gaus.SetParLimits(0, max(mean_value_row_list) * .4, max(mean_value_row_list) * 1.5 )
personal_gaus.SetParLimits(1, np.mean(x_value) * .8, np.mean(x_value) * 1.2 )
personal_gaus.SetParLimits(2, np.std(np.array(x_value)) * .2, np.std(np.array(x_value)) * 1.1 )
errorbar_plot_rows.Fit( personal_gaus, "", "", min(x_value) -0.5 , max( x_value) +0.5 )
errorbar_plot_rows.Draw("ap*")
##################################### create legend ################################################
if plot_style == 'thesis':
legend = root.TLegend(0.15,0.71,0.37,0.93)
legend.SetTextSize(0.055)
legend.AddEntry(errorbar_plot_rows,"Summe Hits","lep")
legend.AddEntry( personal_gaus,"Fit","l")
legend.Draw()
else:
legend = root.TLegend(0.65,0.47,0.98,0.7)
legend.SetTextSize(0.04)
legend.AddEntry(errorbar_plot_col,"Row sum hit value","lep")
legend.AddEntry( personal_gaus,"Gaussian Fit","l")
legend.Draw()
######## Transfer Sigma from Bin to mumeter ############################
sigma_mu_meter_row = ufloat(personal_gaus.GetParameter(2), personal_gaus.GetParError(2)) * 100 # 100 is pixel size in y direction
#############################################################################
########################################### saveplot and fit params ########################################
with open( f'./fit_params/{name_of_folder}_fit_parameters_row_yaxis.txt', 'w') as file:
for i in range(0,3):
file.write( name_params[i] + ' ' + str( personal_gaus.GetParameter(i) ) + ' ' + str(personal_gaus.GetParError(i)) + '\n')
with open( f'./sigma_row_yaxis.txt', 'a') as file:
file.write( name_params[i] +'_' + name_of_folder + ' ' + str( personal_gaus.GetParameter(2) ) + ' ' + str(personal_gaus.GetParError(2)) + '\n')
with open( f'./sigma_row_in_mumeter_yaxis.txt', 'a') as file:
file.write( name_params[i] +'_' + name_of_folder + ' ' + str( noms(sigma_mu_meter_row) ) + ' ' + str( stds(sigma_mu_meter_row) ) + '\n')
with open( f'./fit_parameters_row_yaxis.txt', 'a') as file:
file.write( name_of_folder + 'Amplitude/Sigma/Mean:' + ' ' + str( personal_gaus.GetParameter(0) ) + ' ' + str(personal_gaus.GetParError(0)) + ' ' + str( personal_gaus.GetParameter(1) ) + ' ' + str(personal_gaus.GetParError(1)) + ' ' + str( personal_gaus.GetParameter(2) ) + ' ' + str(personal_gaus.GetParError(2)) + '\n')
c2.SaveAs(f'./plots/{name_of_folder}_erorbar_plot_row.pdf')
| [
"numpy.mean",
"sys.path.insert",
"ROOT.gStyle.SetStatFontSize",
"array.array",
"ROOT.TCanvas",
"ROOT.gStyle.SetLabelSize",
"ROOT.TLegend",
"ROOT.gStyle.SetOptTitle",
"ROOT.gStyle.SetTitleOffset",
"numpy.array",
"uncertainties.unumpy.nominal_values",
"uncertainties.unumpy.std_devs",
"ROOT.gSt... | [((511, 558), 'sys.path.insert', 'sys.path.insert', (['(0)', "('./' + name_of_folder + '/')"], {}), "(0, './' + name_of_folder + '/')\n", (526, 558), False, 'import sys\n'), ((771, 797), 'ROOT.gStyle.SetOptTitle', 'root.gStyle.SetOptTitle', (['(0)'], {}), '(0)\n', (794, 797), True, 'import ROOT as root\n'), ((798, 822), 'ROOT.gStyle.SetOptFit', 'root.gStyle.SetOptFit', (['(1)'], {}), '(1)\n', (819, 822), True, 'import ROOT as root\n'), ((823, 859), 'ROOT.gStyle.SetLabelSize', 'root.gStyle.SetLabelSize', (['(0.05)', '"""XY"""'], {}), "(0.05, 'XY')\n", (847, 859), True, 'import ROOT as root\n'), ((860, 896), 'ROOT.gStyle.SetTitleSize', 'root.gStyle.SetTitleSize', (['(0.05)', '"""XY"""'], {}), "(0.05, 'XY')\n", (884, 896), True, 'import ROOT as root\n'), ((897, 932), 'ROOT.gStyle.SetTitleOffset', 'root.gStyle.SetTitleOffset', (['(1)', '"""XY"""'], {}), "(1, 'XY')\n", (923, 932), True, 'import ROOT as root\n'), ((934, 967), 'ROOT.gStyle.SetStatFontSize', 'root.gStyle.SetStatFontSize', (['(0.08)'], {}), '(0.08)\n', (961, 967), True, 'import ROOT as root\n'), ((1056, 1157), 'ROOT.TF1', 'root.TF1', (['"""personal_gaus"""', '""" [0] * exp( -0.5 * ( (x - [1]) / [2] ) * ( (x - [1]) / [2] ) ) """'], {}), "('personal_gaus',\n ' [0] * exp( -0.5 * ( (x - [1]) / [2] ) * ( (x - [1]) / [2] ) ) ')\n", (1064, 1157), True, 'import ROOT as root\n'), ((4294, 4330), 'ROOT.TCanvas', 'root.TCanvas', (['"""c1"""', '"""c1"""', '(1980)', '(1080)'], {}), "('c1', 'c1', 1980, 1080)\n", (4306, 4330), True, 'import ROOT as root\n'), ((9686, 9722), 'ROOT.TCanvas', 'root.TCanvas', (['"""c2"""', '"""c2"""', '(1980)', '(1080)'], {}), "('c2', 'c2', 1980, 1080)\n", (9698, 9722), True, 'import ROOT as root\n'), ((3145, 3171), 'uncertainties.unumpy.uarray', 'unp.uarray', (['content', 'error'], {}), '(content, error)\n', (3155, 3171), True, 'import uncertainties.unumpy as unp\n'), ((3581, 3612), 'array.array', 'array', (['"""f"""', 'mean_value_col_list'], {}), "('f', mean_value_col_list)\n", (3586, 3612), False, 'from array import array\n'), ((3615, 3634), 'array.array', 'array', (['"""f"""', 'x_error'], {}), "('f', x_error)\n", (3620, 3634), False, 'from array import array\n'), ((3637, 3668), 'array.array', 'array', (['"""f"""', 'mean_error_col_list'], {}), "('f', mean_error_col_list)\n", (3642, 3668), False, 'from array import array\n'), ((5584, 5620), 'ROOT.TLegend', 'root.TLegend', (['(0.15)', '(0.71)', '(0.37)', '(0.93)'], {}), '(0.15, 0.71, 0.37, 0.93)\n', (5596, 5620), True, 'import ROOT as root\n'), ((5791, 5826), 'ROOT.TLegend', 'root.TLegend', (['(0.65)', '(0.47)', '(0.98)', '(0.7)'], {}), '(0.65, 0.47, 0.98, 0.7)\n', (5803, 5826), True, 'import ROOT as root\n'), ((8982, 9008), 'uncertainties.unumpy.uarray', 'unp.uarray', (['content', 'error'], {}), '(content, error)\n', (8992, 9008), True, 'import uncertainties.unumpy as unp\n'), ((9421, 9452), 'array.array', 'array', (['"""f"""', 'mean_value_row_list'], {}), "('f', mean_value_row_list)\n", (9426, 9452), False, 'from array import array\n'), ((9455, 9474), 'array.array', 'array', (['"""f"""', 'x_error'], {}), "('f', x_error)\n", (9460, 9474), False, 'from array import array\n'), ((9477, 9508), 'array.array', 'array', (['"""f"""', 'mean_error_row_list'], {}), "('f', mean_error_row_list)\n", (9482, 9508), False, 'from array import array\n'), ((15232, 15268), 'ROOT.TLegend', 'root.TLegend', (['(0.15)', '(0.71)', '(0.37)', '(0.93)'], {}), '(0.15, 0.71, 0.37, 0.93)\n', (15244, 15268), True, 'import ROOT as root\n'), ((15440, 15475), 'ROOT.TLegend', 'root.TLegend', (['(0.65)', '(0.47)', '(0.98)', '(0.7)'], {}), '(0.65, 0.47, 0.98, 0.7)\n', (15452, 15475), True, 'import ROOT as root\n'), ((3312, 3334), 'uncertainties.unumpy.nominal_values', 'noms', (['mean_content_col'], {}), '(mean_content_col)\n', (3316, 3334), True, 'from uncertainties.unumpy import nominal_values as noms\n'), ((3368, 3390), 'uncertainties.unumpy.std_devs', 'stds', (['mean_content_col'], {}), '(mean_content_col)\n', (3372, 3390), True, 'from uncertainties.unumpy import std_devs as stds\n'), ((9148, 9170), 'uncertainties.unumpy.nominal_values', 'noms', (['mean_content_row'], {}), '(mean_content_row)\n', (9152, 9170), True, 'from uncertainties.unumpy import nominal_values as noms\n'), ((9204, 9226), 'uncertainties.unumpy.std_devs', 'stds', (['mean_content_row'], {}), '(mean_content_row)\n', (9208, 9226), True, 'from uncertainties.unumpy import std_devs as stds\n'), ((4508, 4524), 'numpy.mean', 'np.mean', (['x_value'], {}), '(x_value)\n', (4515, 4524), True, 'import numpy as np\n'), ((4531, 4547), 'numpy.mean', 'np.mean', (['x_value'], {}), '(x_value)\n', (4538, 4547), True, 'import numpy as np\n'), ((10766, 10783), 'numpy.array', 'np.array', (['x_value'], {}), '(x_value)\n', (10774, 10783), True, 'import numpy as np\n'), ((10918, 10934), 'numpy.mean', 'np.mean', (['x_value'], {}), '(x_value)\n', (10925, 10934), True, 'import numpy as np\n'), ((10941, 10957), 'numpy.mean', 'np.mean', (['x_value'], {}), '(x_value)\n', (10948, 10957), True, 'import numpy as np\n'), ((4597, 4614), 'numpy.array', 'np.array', (['x_value'], {}), '(x_value)\n', (4605, 4614), True, 'import numpy as np\n'), ((4631, 4648), 'numpy.array', 'np.array', (['x_value'], {}), '(x_value)\n', (4639, 4648), True, 'import numpy as np\n'), ((4823, 4839), 'numpy.mean', 'np.mean', (['x_value'], {}), '(x_value)\n', (4830, 4839), True, 'import numpy as np\n'), ((4846, 4862), 'numpy.mean', 'np.mean', (['x_value'], {}), '(x_value)\n', (4853, 4862), True, 'import numpy as np\n'), ((5111, 5127), 'numpy.mean', 'np.mean', (['x_value'], {}), '(x_value)\n', (5118, 5127), True, 'import numpy as np\n'), ((5134, 5150), 'numpy.mean', 'np.mean', (['x_value'], {}), '(x_value)\n', (5141, 5150), True, 'import numpy as np\n'), ((11007, 11024), 'numpy.array', 'np.array', (['x_value'], {}), '(x_value)\n', (11015, 11024), True, 'import numpy as np\n'), ((11039, 11056), 'numpy.array', 'np.array', (['x_value'], {}), '(x_value)\n', (11047, 11056), True, 'import numpy as np\n'), ((11269, 11285), 'numpy.mean', 'np.mean', (['x_value'], {}), '(x_value)\n', (11276, 11285), True, 'import numpy as np\n'), ((11292, 11308), 'numpy.mean', 'np.mean', (['x_value'], {}), '(x_value)\n', (11299, 11308), True, 'import numpy as np\n'), ((4912, 4929), 'numpy.array', 'np.array', (['x_value'], {}), '(x_value)\n', (4920, 4929), True, 'import numpy as np\n'), ((4946, 4963), 'numpy.array', 'np.array', (['x_value'], {}), '(x_value)\n', (4954, 4963), True, 'import numpy as np\n'), ((5200, 5217), 'numpy.array', 'np.array', (['x_value'], {}), '(x_value)\n', (5208, 5217), True, 'import numpy as np\n'), ((5234, 5251), 'numpy.array', 'np.array', (['x_value'], {}), '(x_value)\n', (5242, 5251), True, 'import numpy as np\n'), ((7393, 7417), 'uncertainties.unumpy.std_devs', 'stds', (['sigma_mu_meter_col'], {}), '(sigma_mu_meter_col)\n', (7397, 7417), True, 'from uncertainties.unumpy import std_devs as stds\n'), ((11359, 11376), 'numpy.array', 'np.array', (['x_value'], {}), '(x_value)\n', (11367, 11376), True, 'import numpy as np\n'), ((11392, 11409), 'numpy.array', 'np.array', (['x_value'], {}), '(x_value)\n', (11400, 11409), True, 'import numpy as np\n'), ((11579, 11595), 'numpy.mean', 'np.mean', (['x_value'], {}), '(x_value)\n', (11586, 11595), True, 'import numpy as np\n'), ((11602, 11618), 'numpy.mean', 'np.mean', (['x_value'], {}), '(x_value)\n', (11609, 11618), True, 'import numpy as np\n'), ((16657, 16681), 'uncertainties.unumpy.std_devs', 'stds', (['sigma_mu_meter_row'], {}), '(sigma_mu_meter_row)\n', (16661, 16681), True, 'from uncertainties.unumpy import std_devs as stds\n'), ((11667, 11684), 'numpy.array', 'np.array', (['x_value'], {}), '(x_value)\n', (11675, 11684), True, 'import numpy as np\n'), ((11699, 11716), 'numpy.array', 'np.array', (['x_value'], {}), '(x_value)\n', (11707, 11716), True, 'import numpy as np\n'), ((11892, 11908), 'numpy.mean', 'np.mean', (['x_value'], {}), '(x_value)\n', (11899, 11908), True, 'import numpy as np\n'), ((11913, 11929), 'numpy.mean', 'np.mean', (['x_value'], {}), '(x_value)\n', (11920, 11929), True, 'import numpy as np\n'), ((7353, 7377), 'uncertainties.unumpy.nominal_values', 'noms', (['sigma_mu_meter_col'], {}), '(sigma_mu_meter_col)\n', (7357, 7377), True, 'from uncertainties.unumpy import nominal_values as noms\n'), ((11975, 11992), 'numpy.array', 'np.array', (['x_value'], {}), '(x_value)\n', (11983, 11992), True, 'import numpy as np\n'), ((12007, 12024), 'numpy.array', 'np.array', (['x_value'], {}), '(x_value)\n', (12015, 12024), True, 'import numpy as np\n'), ((12201, 12217), 'numpy.mean', 'np.mean', (['x_value'], {}), '(x_value)\n', (12208, 12217), True, 'import numpy as np\n'), ((12224, 12240), 'numpy.mean', 'np.mean', (['x_value'], {}), '(x_value)\n', (12231, 12240), True, 'import numpy as np\n'), ((12318, 12335), 'numpy.array', 'np.array', (['x_value'], {}), '(x_value)\n', (12326, 12335), True, 'import numpy as np\n'), ((16617, 16641), 'uncertainties.unumpy.nominal_values', 'noms', (['sigma_mu_meter_row'], {}), '(sigma_mu_meter_row)\n', (16621, 16641), True, 'from uncertainties.unumpy import nominal_values as noms\n'), ((12286, 12303), 'numpy.array', 'np.array', (['x_value'], {}), '(x_value)\n', (12294, 12303), True, 'import numpy as np\n'), ((12504, 12520), 'numpy.mean', 'np.mean', (['x_value'], {}), '(x_value)\n', (12511, 12520), True, 'import numpy as np\n'), ((12527, 12543), 'numpy.mean', 'np.mean', (['x_value'], {}), '(x_value)\n', (12534, 12543), True, 'import numpy as np\n'), ((12621, 12638), 'numpy.array', 'np.array', (['x_value'], {}), '(x_value)\n', (12629, 12638), True, 'import numpy as np\n'), ((12589, 12606), 'numpy.array', 'np.array', (['x_value'], {}), '(x_value)\n', (12597, 12606), True, 'import numpy as np\n'), ((12808, 12824), 'numpy.mean', 'np.mean', (['x_value'], {}), '(x_value)\n', (12815, 12824), True, 'import numpy as np\n'), ((12831, 12847), 'numpy.mean', 'np.mean', (['x_value'], {}), '(x_value)\n', (12838, 12847), True, 'import numpy as np\n'), ((12925, 12942), 'numpy.array', 'np.array', (['x_value'], {}), '(x_value)\n', (12933, 12942), True, 'import numpy as np\n'), ((12893, 12910), 'numpy.array', 'np.array', (['x_value'], {}), '(x_value)\n', (12901, 12910), True, 'import numpy as np\n'), ((13112, 13128), 'numpy.mean', 'np.mean', (['x_value'], {}), '(x_value)\n', (13119, 13128), True, 'import numpy as np\n'), ((13135, 13151), 'numpy.mean', 'np.mean', (['x_value'], {}), '(x_value)\n', (13142, 13151), True, 'import numpy as np\n'), ((13229, 13246), 'numpy.array', 'np.array', (['x_value'], {}), '(x_value)\n', (13237, 13246), True, 'import numpy as np\n'), ((13197, 13214), 'numpy.array', 'np.array', (['x_value'], {}), '(x_value)\n', (13205, 13214), True, 'import numpy as np\n'), ((13416, 13432), 'numpy.mean', 'np.mean', (['x_value'], {}), '(x_value)\n', (13423, 13432), True, 'import numpy as np\n'), ((13439, 13455), 'numpy.mean', 'np.mean', (['x_value'], {}), '(x_value)\n', (13446, 13455), True, 'import numpy as np\n'), ((13533, 13550), 'numpy.array', 'np.array', (['x_value'], {}), '(x_value)\n', (13541, 13550), True, 'import numpy as np\n'), ((13501, 13518), 'numpy.array', 'np.array', (['x_value'], {}), '(x_value)\n', (13509, 13518), True, 'import numpy as np\n'), ((13719, 13735), 'numpy.mean', 'np.mean', (['x_value'], {}), '(x_value)\n', (13726, 13735), True, 'import numpy as np\n'), ((13742, 13758), 'numpy.mean', 'np.mean', (['x_value'], {}), '(x_value)\n', (13749, 13758), True, 'import numpy as np\n'), ((13836, 13853), 'numpy.array', 'np.array', (['x_value'], {}), '(x_value)\n', (13844, 13853), True, 'import numpy as np\n'), ((13804, 13821), 'numpy.array', 'np.array', (['x_value'], {}), '(x_value)\n', (13812, 13821), True, 'import numpy as np\n'), ((14087, 14103), 'numpy.mean', 'np.mean', (['x_value'], {}), '(x_value)\n', (14094, 14103), True, 'import numpy as np\n'), ((14110, 14126), 'numpy.mean', 'np.mean', (['x_value'], {}), '(x_value)\n', (14117, 14126), True, 'import numpy as np\n'), ((14821, 14837), 'numpy.mean', 'np.mean', (['x_value'], {}), '(x_value)\n', (14828, 14837), True, 'import numpy as np\n'), ((14844, 14860), 'numpy.mean', 'np.mean', (['x_value'], {}), '(x_value)\n', (14851, 14860), True, 'import numpy as np\n'), ((14176, 14193), 'numpy.array', 'np.array', (['x_value'], {}), '(x_value)\n', (14184, 14193), True, 'import numpy as np\n'), ((14208, 14225), 'numpy.array', 'np.array', (['x_value'], {}), '(x_value)\n', (14216, 14225), True, 'import numpy as np\n'), ((14910, 14927), 'numpy.array', 'np.array', (['x_value'], {}), '(x_value)\n', (14918, 14927), True, 'import numpy as np\n'), ((14942, 14959), 'numpy.array', 'np.array', (['x_value'], {}), '(x_value)\n', (14950, 14959), True, 'import numpy as np\n')] |
import torch as th
import math
import numpy as np
from video_loader import VideoLoader
from torch.utils.data import DataLoader
import argparse
from preprocessing import Preprocessing
from random_sequence_shuffler import RandomSequenceSampler
import torch.nn.functional as F
from tqdm import tqdm
import os
import clip
parser = argparse.ArgumentParser(description='Easy video feature extractor')
parser.add_argument(
'--csv',
type=str,
help='input csv with video input path')
parser.add_argument('--batch_size', type=int, default=64,
help='batch size')
parser.add_argument(
'--clip_len', type=float, default=3/2,
help='decoding length of clip (in seconds)')
parser.add_argument(
'--overwrite', action='store_true',
help='allow overwrite output files')
parser.add_argument('--half_precision', type=int, default=1,
help='output half precision float')
parser.add_argument('--num_decoding_thread', type=int, default=4,
help='Num parallel thread for video decoding')
parser.add_argument('--model_version', type=str, default="ViT-B/32",
choices=["ViT-B/32", "RN50x4"],
help='Num parallel thread for video decoding')
args = parser.parse_args()
# model_version = "RN50x4" # "RN50x4" # "ViT-B/32"
output_feat_size = 512 if args.model_version == "ViT-B/32" else 640
dataset = VideoLoader(
args.csv,
framerate=1/args.clip_len,
size=224 if args.model_version == "ViT-B/32" else 288,
centercrop=True,
overwrite=args.overwrite,
model_version=args.model_version
)
n_dataset = len(dataset)
sampler = RandomSequenceSampler(n_dataset, 10)
loader = DataLoader(
dataset,
batch_size=1,
shuffle=False,
num_workers=args.num_decoding_thread,
sampler=sampler if n_dataset > 10 else None,
)
preprocess = Preprocessing()
model, _ = clip.load(args.model_version, device="cuda")
totatl_num_frames = 0
with th.no_grad():
for k, data in enumerate(tqdm(loader)):
if data == {}:
print("problematic video file")
continue
input_file = data['input'][0]
output_file = data['output'][0]
if args.model_version == "RN50x4":
output_file = output_file.replace(
"clip-vit_features", "clip-rn50x4_features")
if os.path.isfile(output_file):
# print(f'Video {input_file} already processed.')
continue
elif not os.path.isfile(input_file):
print(f'{input_file}, does not exist.\n')
elif len(data['video'].shape) > 4:
video = data['video'].squeeze(0)
if len(video.shape) == 4:
video = preprocess(video)
n_chunk = len(video)
features = th.cuda.FloatTensor(
n_chunk, output_feat_size).fill_(0)
n_iter = int(math.ceil(n_chunk / float(args.batch_size)))
for i in range(n_iter):
min_ind = i * args.batch_size
max_ind = (i + 1) * args.batch_size
video_batch = video[min_ind:max_ind].cuda()
batch_features = model.encode_image(video_batch)
features[min_ind:max_ind] = batch_features
features = features.cpu().numpy()
if args.half_precision:
features = features.astype('float16')
totatl_num_frames += features.shape[0]
# safeguard output path before saving
dirname = os.path.dirname(output_file)
if not os.path.exists(dirname):
print(f"Output directory {dirname} does not exists, creating...")
os.makedirs(dirname)
np.savez(output_file, features=features)
else:
print(f'{input_file}, failed at ffprobe.\n')
print(f"Total number of frames: {totatl_num_frames}")
| [
"os.path.exists",
"random_sequence_shuffler.RandomSequenceSampler",
"numpy.savez",
"torch.cuda.FloatTensor",
"argparse.ArgumentParser",
"preprocessing.Preprocessing",
"os.makedirs",
"tqdm.tqdm",
"video_loader.VideoLoader",
"os.path.isfile",
"os.path.dirname",
"torch.utils.data.DataLoader",
"... | [((329, 396), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Easy video feature extractor"""'}), "(description='Easy video feature extractor')\n", (352, 396), False, 'import argparse\n'), ((1414, 1606), 'video_loader.VideoLoader', 'VideoLoader', (['args.csv'], {'framerate': '(1 / args.clip_len)', 'size': "(224 if args.model_version == 'ViT-B/32' else 288)", 'centercrop': '(True)', 'overwrite': 'args.overwrite', 'model_version': 'args.model_version'}), "(args.csv, framerate=1 / args.clip_len, size=224 if args.\n model_version == 'ViT-B/32' else 288, centercrop=True, overwrite=args.\n overwrite, model_version=args.model_version)\n", (1425, 1606), False, 'from video_loader import VideoLoader\n'), ((1656, 1692), 'random_sequence_shuffler.RandomSequenceSampler', 'RandomSequenceSampler', (['n_dataset', '(10)'], {}), '(n_dataset, 10)\n', (1677, 1692), False, 'from random_sequence_shuffler import RandomSequenceSampler\n'), ((1702, 1838), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': '(1)', 'shuffle': '(False)', 'num_workers': 'args.num_decoding_thread', 'sampler': '(sampler if n_dataset > 10 else None)'}), '(dataset, batch_size=1, shuffle=False, num_workers=args.\n num_decoding_thread, sampler=sampler if n_dataset > 10 else None)\n', (1712, 1838), False, 'from torch.utils.data import DataLoader\n'), ((1870, 1885), 'preprocessing.Preprocessing', 'Preprocessing', ([], {}), '()\n', (1883, 1885), False, 'from preprocessing import Preprocessing\n'), ((1897, 1941), 'clip.load', 'clip.load', (['args.model_version'], {'device': '"""cuda"""'}), "(args.model_version, device='cuda')\n", (1906, 1941), False, 'import clip\n'), ((1970, 1982), 'torch.no_grad', 'th.no_grad', ([], {}), '()\n', (1980, 1982), True, 'import torch as th\n'), ((2013, 2025), 'tqdm.tqdm', 'tqdm', (['loader'], {}), '(loader)\n', (2017, 2025), False, 'from tqdm import tqdm\n'), ((2356, 2383), 'os.path.isfile', 'os.path.isfile', (['output_file'], {}), '(output_file)\n', (2370, 2383), False, 'import os\n'), ((2485, 2511), 'os.path.isfile', 'os.path.isfile', (['input_file'], {}), '(input_file)\n', (2499, 2511), False, 'import os\n'), ((3575, 3603), 'os.path.dirname', 'os.path.dirname', (['output_file'], {}), '(output_file)\n', (3590, 3603), False, 'import os\n'), ((3795, 3835), 'numpy.savez', 'np.savez', (['output_file'], {'features': 'features'}), '(output_file, features=features)\n', (3803, 3835), True, 'import numpy as np\n'), ((3627, 3650), 'os.path.exists', 'os.path.exists', (['dirname'], {}), '(dirname)\n', (3641, 3650), False, 'import os\n'), ((3758, 3778), 'os.makedirs', 'os.makedirs', (['dirname'], {}), '(dirname)\n', (3769, 3778), False, 'import os\n'), ((2799, 2845), 'torch.cuda.FloatTensor', 'th.cuda.FloatTensor', (['n_chunk', 'output_feat_size'], {}), '(n_chunk, output_feat_size)\n', (2818, 2845), True, 'import torch as th\n')] |
# Copyright 2017 ProjectQ-Framework (www.projectq.ch)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Registers the Z-Y decomposition for an arbitrary one qubit gate.
See paper "Elementary gates for quantum computing" by <NAME> et al.,
arXiv:quant-ph/9503016v1. (Note: They use different gate definitions!)
Or see theorem 4.1 in Nielsen and Chuang.
Decompose an arbitrary one qubit gate U into
U = e^(i alpha) Rz(beta) Ry(gamma) Rz(delta). If a gate V is element of SU(2),
i.e., determinant == 1, then
V = Rz(beta) Ry(gamma) Rz(delta)
"""
import cmath
import itertools
import math
import numpy
from projectq.cengines import DecompositionRule
from projectq.meta import Control, get_control_count
from projectq.ops import BasicGate, Ph, Ry, Rz
TOLERANCE = 1e-12
def _recognize_arb1qubit(cmd):
"""
Recognize an arbitrary one qubit gate which has a matrix property.
It does not allow gates which have control qubits as otherwise the
AutoReplacer might go into an infinite loop. Use
carb1qubit2cnotrzandry instead.
"""
try:
m = cmd.gate.matrix
if len(m) == 2 and get_control_count(cmd) == 0:
return True
else:
return False
except:
return False
def _test_parameters(matrix, a, b_half, c_half, d_half):
"""
It builds matrix U with parameters (a, b/2, c/2, d/2) and compares against
matrix.
U = [[exp(j*(a-b/2-d/2))*cos(c/2), -exp(j*(a-b/2+d/2))*sin(c/2)],
[exp(j*(a+b/2-d/2))*sin(c/2), exp(j*(a+b/2+d/2))*cos(c/2)]]
Args:
matrix(list): 2x2 matrix
a: parameter of U
b_half: b/2. parameter of U
c_half: c/2. parameter of U
d_half: d/2. parameter of U
Returns:
True if matrix elements of U and `matrix` are TOLERANCE close.
"""
U = [[cmath.exp(1j*(a-b_half-d_half))*math.cos(c_half),
-cmath.exp(1j*(a-b_half+d_half))*math.sin(c_half)],
[cmath.exp(1j*(a+b_half-d_half))*math.sin(c_half),
cmath.exp(1j*(a+b_half+d_half))*math.cos(c_half)]]
return numpy.allclose(U, matrix, rtol=10*TOLERANCE, atol=TOLERANCE)
def _find_parameters(matrix):
"""
Given a 2x2 unitary matrix, find the parameters
a, b/2, c/2, and d/2 such that
matrix == [[exp(j*(a-b/2-d/2))*cos(c/2), -exp(j*(a-b/2+d/2))*sin(c/2)],
[exp(j*(a+b/2-d/2))*sin(c/2), exp(j*(a+b/2+d/2))*cos(c/2)]]
Note:
If the matrix is element of SU(2) (determinant == 1), then
we can choose a = 0.
Args:
matrix(list): 2x2 unitary matrix
Returns:
parameters of the matrix: (a, b/2, c/2, d/2)
"""
# Determine a, b/2, c/2 and d/2 (3 different cases).
# Note: everything is modulo 2pi.
# Case 1: sin(c/2) == 0:
if abs(matrix[0][1]) < TOLERANCE:
two_a = cmath.phase(matrix[0][0]*matrix[1][1]) % (2*math.pi)
if abs(two_a) < TOLERANCE or abs(two_a) > 2*math.pi-TOLERANCE:
# from 2a==0 (mod 2pi), it follows that a==0 or a==pi,
# w.l.g. we can choose a==0 because (see U above)
# c/2 -> c/2 + pi would have the same effect as as a==0 -> a==pi.
a = 0
else:
a = two_a/2.
d_half = 0 # w.l.g
b = cmath.phase(matrix[1][1])-cmath.phase(matrix[0][0])
possible_b_half = [(b/2.) % (2*math.pi), (b/2.+math.pi) % (2*math.pi)]
# As we have fixed a, we need to find correct sign for cos(c/2)
possible_c_half = [0.0, math.pi]
found = False
for b_half, c_half in itertools.product(possible_b_half,
possible_c_half):
if _test_parameters(matrix, a, b_half, c_half, d_half):
found = True
break
if not found:
raise Exception("Couldn't find parameters for matrix ", matrix,
"This shouldn't happen. Maybe the matrix is " +
"not unitary?")
# Case 2: cos(c/2) == 0:
elif abs(matrix[0][0]) < TOLERANCE:
two_a = cmath.phase(-matrix[0][1]*matrix[1][0]) % (2*math.pi)
if abs(two_a) < TOLERANCE or abs(two_a) > 2*math.pi-TOLERANCE:
# from 2a==0 (mod 2pi), it follows that a==0 or a==pi,
# w.l.g. we can choose a==0 because (see U above)
# c/2 -> c/2 + pi would have the same effect as as a==0 -> a==pi.
a = 0
else:
a = two_a/2.
d_half = 0 # w.l.g
b = cmath.phase(matrix[1][0])-cmath.phase(matrix[0][1]) + math.pi
possible_b_half = [(b/2.) % (2*math.pi), (b/2.+math.pi) % (2*math.pi)]
# As we have fixed a, we need to find correct sign for sin(c/2)
possible_c_half = [math.pi/2., 3./2.*math.pi]
found = False
for b_half, c_half in itertools.product(possible_b_half,
possible_c_half):
if _test_parameters(matrix, a, b_half, c_half, d_half):
found = True
break
if not found:
raise Exception("Couldn't find parameters for matrix ", matrix,
"This shouldn't happen. Maybe the matrix is " +
"not unitary?")
# Case 3: sin(c/2) != 0 and cos(c/2) !=0:
else:
two_a = cmath.phase(matrix[0][0]*matrix[1][1]) % (2*math.pi)
if abs(two_a) < TOLERANCE or abs(two_a) > 2*math.pi-TOLERANCE:
# from 2a==0 (mod 2pi), it follows that a==0 or a==pi,
# w.l.g. we can choose a==0 because (see U above)
# c/2 -> c/2 + pi would have the same effect as as a==0 -> a==pi.
a = 0
else:
a = two_a/2.
two_d = 2.*cmath.phase(matrix[0][1])-2.*cmath.phase(matrix[0][0])
possible_d_half = [two_d/4. % (2*math.pi),
(two_d/4.+math.pi/2.) % (2*math.pi),
(two_d/4.+math.pi) % (2*math.pi),
(two_d/4.+3./2.*math.pi) % (2*math.pi)]
two_b = 2.*cmath.phase(matrix[1][0])-2.*cmath.phase(matrix[0][0])
possible_b_half = [two_b/4. % (2*math.pi),
(two_b/4.+math.pi/2.) % (2*math.pi),
(two_b/4.+math.pi) % (2*math.pi),
(two_b/4.+3./2.*math.pi) % (2*math.pi)]
tmp = math.acos(abs(matrix[1][1]))
possible_c_half = [tmp % (2*math.pi),
(tmp+math.pi) % (2*math.pi),
(-1.*tmp) % (2*math.pi),
(-1.*tmp+math.pi) % (2*math.pi)]
found = False
for b_half, c_half, d_half in itertools.product(possible_b_half,
possible_c_half,
possible_d_half):
if _test_parameters(matrix, a, b_half, c_half, d_half):
found = True
break
if not found:
raise Exception("Couldn't find parameters for matrix ", matrix,
"This shouldn't happen. Maybe the matrix is " +
"not unitary?")
return (a, b_half, c_half, d_half)
def _decompose_arb1qubit(cmd):
"""
Use Z-Y decomposition of Nielsen and Chuang (Theorem 4.1).
An arbitrary one qubit gate matrix can be writen as
U = [[exp(j*(a-b/2-d/2))*cos(c/2), -exp(j*(a-b/2+d/2))*sin(c/2)],
[exp(j*(a+b/2-d/2))*sin(c/2), exp(j*(a+b/2+d/2))*cos(c/2)]]
where a,b,c,d are real numbers.
Then U = exp(j*a) Rz(b) Ry(c) Rz(d).
If the matrix is element of SU(2) (determinant == 1), then
we can choose a = 0.
"""
matrix = cmd.gate.matrix.tolist()
a, b_half, c_half, d_half = _find_parameters(matrix)
qb = cmd.qubits
eng = cmd.engine
with Control(eng, cmd.control_qubits):
if Rz(2*d_half) != Rz(0):
Rz(2*d_half) | qb
if Ry(2*c_half) != Ry(0):
Ry(2*c_half) | qb
if Rz(2*b_half) != Rz(0):
Rz(2*b_half) | qb
if a != 0:
Ph(a) | qb
all_defined_decomposition_rules = [
DecompositionRule(BasicGate, _decompose_arb1qubit, _recognize_arb1qubit)
]
| [
"numpy.allclose",
"projectq.cengines.DecompositionRule",
"projectq.ops.Ph",
"projectq.ops.Ry",
"itertools.product",
"math.cos",
"cmath.exp",
"cmath.phase",
"projectq.meta.Control",
"math.sin",
"projectq.ops.Rz",
"projectq.meta.get_control_count"
] | [((2586, 2648), 'numpy.allclose', 'numpy.allclose', (['U', 'matrix'], {'rtol': '(10 * TOLERANCE)', 'atol': 'TOLERANCE'}), '(U, matrix, rtol=10 * TOLERANCE, atol=TOLERANCE)\n', (2600, 2648), False, 'import numpy\n'), ((8659, 8731), 'projectq.cengines.DecompositionRule', 'DecompositionRule', (['BasicGate', '_decompose_arb1qubit', '_recognize_arb1qubit'], {}), '(BasicGate, _decompose_arb1qubit, _recognize_arb1qubit)\n', (8676, 8731), False, 'from projectq.cengines import DecompositionRule\n'), ((4053, 4104), 'itertools.product', 'itertools.product', (['possible_b_half', 'possible_c_half'], {}), '(possible_b_half, possible_c_half)\n', (4070, 4104), False, 'import itertools\n'), ((8349, 8381), 'projectq.meta.Control', 'Control', (['eng', 'cmd.control_qubits'], {}), '(eng, cmd.control_qubits)\n', (8356, 8381), False, 'from projectq.meta import Control, get_control_count\n'), ((3329, 3369), 'cmath.phase', 'cmath.phase', (['(matrix[0][0] * matrix[1][1])'], {}), '(matrix[0][0] * matrix[1][1])\n', (3340, 3369), False, 'import cmath\n'), ((3757, 3782), 'cmath.phase', 'cmath.phase', (['matrix[1][1]'], {}), '(matrix[1][1])\n', (3768, 3782), False, 'import cmath\n'), ((3783, 3808), 'cmath.phase', 'cmath.phase', (['matrix[0][0]'], {}), '(matrix[0][0])\n', (3794, 3808), False, 'import cmath\n'), ((5324, 5375), 'itertools.product', 'itertools.product', (['possible_b_half', 'possible_c_half'], {}), '(possible_b_half, possible_c_half)\n', (5341, 5375), False, 'import itertools\n'), ((7173, 7241), 'itertools.product', 'itertools.product', (['possible_b_half', 'possible_c_half', 'possible_d_half'], {}), '(possible_b_half, possible_c_half, possible_d_half)\n', (7190, 7241), False, 'import itertools\n'), ((8394, 8408), 'projectq.ops.Rz', 'Rz', (['(2 * d_half)'], {}), '(2 * d_half)\n', (8396, 8408), False, 'from projectq.ops import BasicGate, Ph, Ry, Rz\n'), ((8410, 8415), 'projectq.ops.Rz', 'Rz', (['(0)'], {}), '(0)\n', (8412, 8415), False, 'from projectq.ops import BasicGate, Ph, Ry, Rz\n'), ((8458, 8472), 'projectq.ops.Ry', 'Ry', (['(2 * c_half)'], {}), '(2 * c_half)\n', (8460, 8472), False, 'from projectq.ops import BasicGate, Ph, Ry, Rz\n'), ((8474, 8479), 'projectq.ops.Ry', 'Ry', (['(0)'], {}), '(0)\n', (8476, 8479), False, 'from projectq.ops import BasicGate, Ph, Ry, Rz\n'), ((8522, 8536), 'projectq.ops.Rz', 'Rz', (['(2 * b_half)'], {}), '(2 * b_half)\n', (8524, 8536), False, 'from projectq.ops import BasicGate, Ph, Ry, Rz\n'), ((8538, 8543), 'projectq.ops.Rz', 'Rz', (['(0)'], {}), '(0)\n', (8540, 8543), False, 'from projectq.ops import BasicGate, Ph, Ry, Rz\n'), ((1638, 1660), 'projectq.meta.get_control_count', 'get_control_count', (['cmd'], {}), '(cmd)\n', (1655, 1660), False, 'from projectq.meta import Control, get_control_count\n'), ((2342, 2381), 'cmath.exp', 'cmath.exp', (['(1.0j * (a - b_half - d_half))'], {}), '(1.0j * (a - b_half - d_half))\n', (2351, 2381), False, 'import cmath\n'), ((2374, 2390), 'math.cos', 'math.cos', (['c_half'], {}), '(c_half)\n', (2382, 2390), False, 'import math\n'), ((2435, 2451), 'math.sin', 'math.sin', (['c_half'], {}), '(c_half)\n', (2443, 2451), False, 'import math\n'), ((2464, 2503), 'cmath.exp', 'cmath.exp', (['(1.0j * (a + b_half - d_half))'], {}), '(1.0j * (a + b_half - d_half))\n', (2473, 2503), False, 'import cmath\n'), ((2496, 2512), 'math.sin', 'math.sin', (['c_half'], {}), '(c_half)\n', (2504, 2512), False, 'import math\n'), ((2524, 2563), 'cmath.exp', 'cmath.exp', (['(1.0j * (a + b_half + d_half))'], {}), '(1.0j * (a + b_half + d_half))\n', (2533, 2563), False, 'import cmath\n'), ((2556, 2572), 'math.cos', 'math.cos', (['c_half'], {}), '(c_half)\n', (2564, 2572), False, 'import math\n'), ((4576, 4617), 'cmath.phase', 'cmath.phase', (['(-matrix[0][1] * matrix[1][0])'], {}), '(-matrix[0][1] * matrix[1][0])\n', (4587, 4617), False, 'import cmath\n'), ((5834, 5874), 'cmath.phase', 'cmath.phase', (['(matrix[0][0] * matrix[1][1])'], {}), '(matrix[0][0] * matrix[1][1])\n', (5845, 5874), False, 'import cmath\n'), ((8429, 8443), 'projectq.ops.Rz', 'Rz', (['(2 * d_half)'], {}), '(2 * d_half)\n', (8431, 8443), False, 'from projectq.ops import BasicGate, Ph, Ry, Rz\n'), ((8493, 8507), 'projectq.ops.Ry', 'Ry', (['(2 * c_half)'], {}), '(2 * c_half)\n', (8495, 8507), False, 'from projectq.ops import BasicGate, Ph, Ry, Rz\n'), ((8557, 8571), 'projectq.ops.Rz', 'Rz', (['(2 * b_half)'], {}), '(2 * b_half)\n', (8559, 8571), False, 'from projectq.ops import BasicGate, Ph, Ry, Rz\n'), ((8606, 8611), 'projectq.ops.Ph', 'Ph', (['a'], {}), '(a)\n', (8608, 8611), False, 'from projectq.ops import BasicGate, Ph, Ry, Rz\n'), ((2403, 2442), 'cmath.exp', 'cmath.exp', (['(1.0j * (a - b_half + d_half))'], {}), '(1.0j * (a - b_half + d_half))\n', (2412, 2442), False, 'import cmath\n'), ((5005, 5030), 'cmath.phase', 'cmath.phase', (['matrix[1][0]'], {}), '(matrix[1][0])\n', (5016, 5030), False, 'import cmath\n'), ((5031, 5056), 'cmath.phase', 'cmath.phase', (['matrix[0][1]'], {}), '(matrix[0][1])\n', (5042, 5056), False, 'import cmath\n'), ((6241, 6266), 'cmath.phase', 'cmath.phase', (['matrix[0][1]'], {}), '(matrix[0][1])\n', (6252, 6266), False, 'import cmath\n'), ((6270, 6295), 'cmath.phase', 'cmath.phase', (['matrix[0][0]'], {}), '(matrix[0][0])\n', (6281, 6295), False, 'import cmath\n'), ((6558, 6583), 'cmath.phase', 'cmath.phase', (['matrix[1][0]'], {}), '(matrix[1][0])\n', (6569, 6583), False, 'import cmath\n'), ((6587, 6612), 'cmath.phase', 'cmath.phase', (['matrix[0][0]'], {}), '(matrix[0][0])\n', (6598, 6612), False, 'import cmath\n')] |
import minifemlib
from minifemlib import Elements
import triangulation
from triangulation import Triangulation
import dmsh
import numpy as np
import matplotlib.pyplot as plt
from scipy.spatial import Delaunay
from scipy.sparse import csc_matrix, linalg as sla
import scipy.linalg
from matplotlib import rcParams
from matplotlib import animation
import matplotlib.cm as cm
def condIniciales(n):
if (n == 1):
def g1(x,y):
return(x<y)
return g1
elif (n == 2):
def g2(x,y):
return(x<0)
return g2
elif (n == 3):
def g3(x,y):
return(x<(3/4))*(x>(1/4))
return g3
elif (n == 4):
def g4(x,y):
return((x**2)+(y**2)<(1/4))
return g4
else:
def g5(x,y):
return((x**2)+(y**2)>(1/3))*((x**2)+(y**2)<(2/3))
return g5
#Terminos de acople:
def F(u,v):
return(0.1*np.ones(len(u))-u+(u**2)*v)
def G(u,v):
return(1*np.ones(len(u))-(u**2)*v)
#StiffnessLaplacian de <NAME>
def StiffnessLaplacian2(T,Egeom,order):
# Matriz de rigidez del laplaciano.
# Reservamos espacio
n_nodes = len(T.points)
n_elem = len(T.simplices)
A = np.zeros([n_nodes,n_nodes])
B = np.zeros([n_nodes,n_nodes])
# Construimos bases de Lagrange y nodos de cuadratura
phi, gradphi, Xnodes, quadw = Elements(Egeom,order)
# Pre-calcular matriz local: int_T0 gradphi_i gradphi_j dx
S = np.zeros([3,3])
R = np.zeros([3,3])
for i in range(3):
for j in range(3):
S[i,j] = np.sum(np.sum(gradphi(i,Xnodes)*gradphi(j,Xnodes),1)*quadw)
R[i,j] = np.sum(np.array(phi(i,Xnodes)*phi(j,Xnodes))*quadw)
# Matriz global, recorriendo los elementos
for i in range(n_elem):
# Índices de los vértices del triángulo i-ésimo (T_i)
vertex_index = T.simplices[i]
# Contribución a la matriz de rigidez
A[np.ix_(vertex_index,vertex_index)] = A[np.ix_(vertex_index,vertex_index)] + R
B[np.ix_(vertex_index,vertex_index)] = B[np.ix_(vertex_index,vertex_index)] + S
return A, B
def matrizTuring(T,Egeom,order,u0,tiempo,dt,d,gamma,f,g):
n_nodes = len(T.points)
n_elem = len(T.simplices)
phi, gradphi, Xnodes, quadw = Elements(Egeom,order)
A, B = StiffnessLaplacian2(T,Egeom,order)
t=np.arange(0,dt+tiempo,dt)
Lt=len(t)
u=np.zeros((n_nodes,Lt))
v=np.zeros((n_nodes,Lt))
for ni in range(0,n_nodes):
ci=condIniciales(u0)
u[ni,0]=ci(T.points[ni,0],T.points[ni,1]);
v[ni,0]=1-u[ni,0];
#Ciclo temporal
M=scipy.linalg.block_diag(A+dt*B,A+dt*d*B)
M=csc_matrix(M)
print('LU')
M_lu = sla.splu(M)
vector=np.zeros(2*n_nodes)
print('iteraciones')
for l in range(0,Lt-1):
vector[0:n_nodes]=np.dot(A,u[:,l])+dt*gamma*np.dot(A,f(u[:,l],v[:,l]))
vector[n_nodes:2*n_nodes]=np.dot(A,v[:,l])+dt*gamma*np.dot(A,g(u[:,l],v[:,l]))
aux=M_lu.solve(vector)
u[:,l+1]=aux[0:n_nodes]
v[:,l+1]=aux[n_nodes:2*n_nodes]
print('Preparando plot')
frames = []
fig = plt.figure()
for n in range(0,Lt):
im=plt.tripcolor(T.points[:,0], T.points[:,1],T.simplices, u[:,n],shading='flat')
frames.append([im])
ani = animation.ArtistAnimation(fig, frames, interval=100, blit=True,repeat_delay=1000)
# ani.save('movie.mp4')
plt.show()
print('Dmsh')
c = dmsh.Circle([0, 0], 1)
points, cells = dmsh.generate(c, 0.05)
T=Triangulation(points, cells)
Tf = 0.5
dt = 0.0001
d = 10
gamma = 200
u0 = 3
print('Llamando solver')
matrizTuring(T,'triangular',1,u0,Tf,dt,d,gamma,F,G)
| [
"triangulation.Triangulation",
"scipy.sparse.linalg.splu",
"dmsh.Circle",
"numpy.ix_",
"matplotlib.animation.ArtistAnimation",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.dot",
"dmsh.generate",
"minifemlib.Elements",
"scipy.sparse.csc_matrix",
"matplotlib.pyplot.tripcolor",
"numpy.aran... | [((3475, 3497), 'dmsh.Circle', 'dmsh.Circle', (['[0, 0]', '(1)'], {}), '([0, 0], 1)\n', (3486, 3497), False, 'import dmsh\n'), ((3514, 3536), 'dmsh.generate', 'dmsh.generate', (['c', '(0.05)'], {}), '(c, 0.05)\n', (3527, 3536), False, 'import dmsh\n'), ((3539, 3567), 'triangulation.Triangulation', 'Triangulation', (['points', 'cells'], {}), '(points, cells)\n', (3552, 3567), False, 'from triangulation import Triangulation\n'), ((1205, 1233), 'numpy.zeros', 'np.zeros', (['[n_nodes, n_nodes]'], {}), '([n_nodes, n_nodes])\n', (1213, 1233), True, 'import numpy as np\n'), ((1241, 1269), 'numpy.zeros', 'np.zeros', (['[n_nodes, n_nodes]'], {}), '([n_nodes, n_nodes])\n', (1249, 1269), True, 'import numpy as np\n'), ((1366, 1388), 'minifemlib.Elements', 'Elements', (['Egeom', 'order'], {}), '(Egeom, order)\n', (1374, 1388), False, 'from minifemlib import Elements\n'), ((1464, 1480), 'numpy.zeros', 'np.zeros', (['[3, 3]'], {}), '([3, 3])\n', (1472, 1480), True, 'import numpy as np\n'), ((1488, 1504), 'numpy.zeros', 'np.zeros', (['[3, 3]'], {}), '([3, 3])\n', (1496, 1504), True, 'import numpy as np\n'), ((2301, 2323), 'minifemlib.Elements', 'Elements', (['Egeom', 'order'], {}), '(Egeom, order)\n', (2309, 2323), False, 'from minifemlib import Elements\n'), ((2375, 2404), 'numpy.arange', 'np.arange', (['(0)', '(dt + tiempo)', 'dt'], {}), '(0, dt + tiempo, dt)\n', (2384, 2404), True, 'import numpy as np\n'), ((2421, 2444), 'numpy.zeros', 'np.zeros', (['(n_nodes, Lt)'], {}), '((n_nodes, Lt))\n', (2429, 2444), True, 'import numpy as np\n'), ((2450, 2473), 'numpy.zeros', 'np.zeros', (['(n_nodes, Lt)'], {}), '((n_nodes, Lt))\n', (2458, 2473), True, 'import numpy as np\n'), ((2691, 2704), 'scipy.sparse.csc_matrix', 'csc_matrix', (['M'], {}), '(M)\n', (2701, 2704), False, 'from scipy.sparse import csc_matrix, linalg as sla\n'), ((2732, 2743), 'scipy.sparse.linalg.splu', 'sla.splu', (['M'], {}), '(M)\n', (2740, 2743), True, 'from scipy.sparse import csc_matrix, linalg as sla\n'), ((2755, 2776), 'numpy.zeros', 'np.zeros', (['(2 * n_nodes)'], {}), '(2 * n_nodes)\n', (2763, 2776), True, 'import numpy as np\n'), ((3162, 3174), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3172, 3174), True, 'import matplotlib.pyplot as plt\n'), ((3330, 3416), 'matplotlib.animation.ArtistAnimation', 'animation.ArtistAnimation', (['fig', 'frames'], {'interval': '(100)', 'blit': '(True)', 'repeat_delay': '(1000)'}), '(fig, frames, interval=100, blit=True,\n repeat_delay=1000)\n', (3355, 3416), False, 'from matplotlib import animation\n'), ((3444, 3454), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3452, 3454), True, 'import matplotlib.pyplot as plt\n'), ((3212, 3300), 'matplotlib.pyplot.tripcolor', 'plt.tripcolor', (['T.points[:, 0]', 'T.points[:, 1]', 'T.simplices', 'u[:, n]'], {'shading': '"""flat"""'}), "(T.points[:, 0], T.points[:, 1], T.simplices, u[:, n], shading\n ='flat')\n", (3225, 3300), True, 'import matplotlib.pyplot as plt\n'), ((1962, 1996), 'numpy.ix_', 'np.ix_', (['vertex_index', 'vertex_index'], {}), '(vertex_index, vertex_index)\n', (1968, 1996), True, 'import numpy as np\n'), ((2050, 2084), 'numpy.ix_', 'np.ix_', (['vertex_index', 'vertex_index'], {}), '(vertex_index, vertex_index)\n', (2056, 2084), True, 'import numpy as np\n'), ((2859, 2877), 'numpy.dot', 'np.dot', (['A', 'u[:, l]'], {}), '(A, u[:, l])\n', (2865, 2877), True, 'import numpy as np\n'), ((2946, 2964), 'numpy.dot', 'np.dot', (['A', 'v[:, l]'], {}), '(A, v[:, l])\n', (2952, 2964), True, 'import numpy as np\n'), ((2001, 2035), 'numpy.ix_', 'np.ix_', (['vertex_index', 'vertex_index'], {}), '(vertex_index, vertex_index)\n', (2007, 2035), True, 'import numpy as np\n'), ((2089, 2123), 'numpy.ix_', 'np.ix_', (['vertex_index', 'vertex_index'], {}), '(vertex_index, vertex_index)\n', (2095, 2123), True, 'import numpy as np\n')] |
import numpy as np
from patterns.pattern import Pattern
class ThreeBlackCrows(Pattern):
def __init__(self, data, lower_shadow_threshold: float = 0.5):
"""Constructor of ThreeBlackCrows class
Parameters
----------
data : pandas dataframe
A pandas dataframe, expected to have at least the Open, High, Low, Close, Volume columns
lower_shadow_threshold : float
The threshold ratio above which the lower shadow is not small enough
"""
super().__init__(data)
self.lower_shadow_threshold = lower_shadow_threshold
def compute_pattern(self):
"""
Computes if a candlestick is a three white soldiers patterns.
Conditions are the following from <NAME>:
- three negative candles, with tall real body
- short or no lower shadow (threshold if 80% of real body by default, quite high but can be adjusted)
- ideally, open of candles 2 and 3 is inside the real body of the previous one, but won't happen much in crypto
Returns
-------
self.data : pandas.DataFrame
the input dataframe, with two new columns:
- 'three_black_crows' with bool
"""
# Three positive candles in a row
three_negative = np.all([self.real_body < 0,
self.real_body.shift() < 0,
self.real_body.shift(2) < 0],
axis=0)
# Three short lower shadows in a row
lower_shadows = np.all([self.lower_shadow / np.abs(self.real_body) < self.lower_shadow_threshold,
self.lower_shadow.shift() / np.abs(self.real_body.shift()) < self.lower_shadow_threshold,
self.lower_shadow.shift(2) / np.abs(self.real_body.shift(2)) < self.lower_shadow_threshold],
axis=0)
self.data['three_black_crows'] = np.logical_and(three_negative, lower_shadows)
return self.data
| [
"numpy.abs",
"numpy.logical_and"
] | [((1973, 2018), 'numpy.logical_and', 'np.logical_and', (['three_negative', 'lower_shadows'], {}), '(three_negative, lower_shadows)\n', (1987, 2018), True, 'import numpy as np\n'), ((1591, 1613), 'numpy.abs', 'np.abs', (['self.real_body'], {}), '(self.real_body)\n', (1597, 1613), True, 'import numpy as np\n')] |
import numpy as np
def ornstein_uhlenbeck(input, theta=0.1, sigma=0.2):
"""Ornstein-Uhlembeck perturbation. Using Gaussian Wiener process."""
noise_perturb = -theta*input + sigma*np.random.normal()
return input + noise_perturb
noise = 0
for _ in range(20):
noise = ornstein_uhlenbeck(noise)
print(noise/(np.pi)) | [
"numpy.random.normal"
] | [((192, 210), 'numpy.random.normal', 'np.random.normal', ([], {}), '()\n', (208, 210), True, 'import numpy as np\n')] |
# Copyright (C) 2020 NumS Development Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import numpy as np
from nums.core.array.application import ArrayApplication
def test_quantile_percentile(app_inst: ArrayApplication):
# see https://github.com/dask/dask/blob/main/dask/array/tests/test_percentiles.py
qs = [0, 50, 100]
methods = ["tdigest"]
interpolations = ["linear"]
np_x = np.ones((10,))
ba_x = app_inst.ones(shape=(10,), block_shape=(2,))
for q, method, interpolation in itertools.product(qs, methods, interpolations):
assert app_inst.quantile(
ba_x, q / 100, method=method, interpolation=interpolation
).get() == np.quantile(np_x, q / 100)
assert app_inst.percentile(
ba_x, q, method=method, interpolation=interpolation
).get() == np.percentile(np_x, q)
np_x = np.array([0, 0, 5, 5, 5, 5, 20, 20])
ba_x = app_inst.array(np_x, block_shape=(3,))
for q, method, interpolation in itertools.product(qs, methods, interpolations):
assert app_inst.quantile(
ba_x, q / 100, method=method, interpolation=interpolation
).get() == np.quantile(np_x, q / 100)
assert app_inst.percentile(
ba_x, q, method=method, interpolation=interpolation
).get() == np.percentile(np_x, q)
def test_quickselect(app_inst: ArrayApplication):
# pylint: disable=protected-access
# Simple tests
np_x = np.array([3, 7, 2, 4, 5, 1, 5, 6])
ba_x = app_inst.array(np_x, block_shape=(3,))
ba_oids = ba_x.flattened_oids()
correct = [7, 6, 5, 5, 4, 3, 2, 1]
for i in range(-8, 8):
value_oid = app_inst._quickselect(ba_oids, i)
value = app_inst.cm.get(value_oid)
assert value == correct[i]
# Randomized tests
shapes = [(50,), (437,), (1000,)]
block_shapes = [(10,), (23,), (50,)]
kth = [-50, -42, -25, -13, 0, 8, 25, 36, 49]
for shape, block_shape, k in itertools.product(shapes, block_shapes, kth):
ba_x = app_inst.random.random(shape=shape, block_shape=block_shape)
ba_oids = ba_x.flattened_oids()
value_oid = app_inst._quickselect(ba_oids, k)
value = app_inst.cm.get(value_oid)
assert value == np.partition(ba_x.get(), -k - 1)[-k - 1]
def test_median(app_inst: ArrayApplication):
# Simple tests
np_x = np.array([7, 2, 4, 5, 1, 5, 6])
ba_x = app_inst.array(np_x, block_shape=(3,))
assert app_inst.median(ba_x).get() == np.median(np_x)
np_x = np.array([3, 7, 2, 4, 5, 1, 5, 6])
ba_x = app_inst.array(np_x, block_shape=(3,))
assert app_inst.median(ba_x).get() == np.median(np_x)
# Randomized tests
shapes = [(50,), (437,), (1000,)]
block_shapes = [(10,), (23,), (50,)]
for shape, block_shape in itertools.product(shapes, block_shapes):
ba_x = app_inst.random.random(shape=shape, block_shape=block_shape)
assert app_inst.median(ba_x).get() == np.median(ba_x.get())
def test_top_k(app_inst: ArrayApplication):
# Simple tests
np_x = np.array([3, 7, 2, 4, 5, 1, 5, 6])
ba_x = app_inst.array(np_x, block_shape=(3,))
for k in range(1, len(np_x) + 1):
# Largest
ba_v, ba_i = app_inst.top_k(ba_x, k)
np_v = np.partition(np_x, -k)[-k:]
assert len(ba_v.get()) == k and len(ba_i.get()) == k
for v, i in zip(ba_v.get(), ba_i.get()):
assert v in np_v
assert np_x[i] == v
# Smallest
ba_v, ba_i = app_inst.top_k(ba_x, k, largest=False)
np_v = np.partition(np_x, k - 1)[:k]
assert len(ba_v.get()) == k and len(ba_i.get()) == k
for v, i in zip(ba_v.get(), ba_i.get()):
assert v in np_v
assert np_x[i] == v
# Randomized tests
shapes = [(50,), (437,), (1000,)]
block_shapes = [(10,), (23,), (50,)]
ks = range(1, 51, 15)
for shape, block_shape, k in itertools.product(shapes, block_shapes, ks):
ba_x = app_inst.random.random(shape=shape, block_shape=block_shape)
np_x = ba_x.get()
# Largest
ba_v, ba_i = app_inst.top_k(ba_x, k)
np_v = np.partition(np_x, -k)[-k:]
assert len(ba_v.get()) == k and len(ba_i.get()) == k
for v, i in zip(ba_v.get(), ba_i.get()):
assert v in np_v
assert np_x[i] == v
# Smallest
ba_v, ba_i = app_inst.top_k(ba_x, k, largest=False)
np_v = np.partition(np_x, k - 1)[:k]
assert len(ba_v.get()) == k and len(ba_i.get()) == k
for v, i in zip(ba_v.get(), ba_i.get()):
assert v in np_v
assert np_x[i] == v
def test_cov(app_inst):
np_x = np.arange(30).reshape(10, 3)
ba_x = app_inst.array(np_x, block_shape=(3, 2))
assert np.allclose(
np.cov(np_x, rowvar=False, bias=False),
app_inst.cov(ba_x, rowvar=False, bias=False).get(),
)
assert np.allclose(
np.cov(np_x, rowvar=False, bias=True),
app_inst.cov(ba_x, rowvar=False, bias=True).get(),
)
assert np.allclose(
np.cov(np_x, rowvar=True, bias=False),
app_inst.cov(ba_x, rowvar=True, bias=False).get(),
)
assert np.allclose(
np.cov(np_x, rowvar=True, bias=True),
app_inst.cov(ba_x, rowvar=True, bias=True).get(),
)
assert (
np.cov(np_x, dtype=np.float16).dtype
== app_inst.cov(ba_x, dtype=np.float16).get().dtype
)
if __name__ == "__main__":
# pylint: disable=import-error
from nums.core import application_manager
from nums.core import settings
settings.system_name = "serial"
app_inst = application_manager.instance()
test_quantile_percentile(app_inst)
test_cov(app_inst)
# test_quickselect(app_inst)
# test_median(app_inst)
# test_top_k(app_inst)
| [
"numpy.median",
"numpy.ones",
"nums.core.application_manager.instance",
"itertools.product",
"numpy.partition",
"numpy.array",
"numpy.quantile",
"numpy.percentile",
"numpy.cov",
"numpy.arange"
] | [((925, 939), 'numpy.ones', 'np.ones', (['(10,)'], {}), '((10,))\n', (932, 939), True, 'import numpy as np\n'), ((1032, 1078), 'itertools.product', 'itertools.product', (['qs', 'methods', 'interpolations'], {}), '(qs, methods, interpolations)\n', (1049, 1078), False, 'import itertools\n'), ((1384, 1420), 'numpy.array', 'np.array', (['[0, 0, 5, 5, 5, 5, 20, 20]'], {}), '([0, 0, 5, 5, 5, 5, 20, 20])\n', (1392, 1420), True, 'import numpy as np\n'), ((1507, 1553), 'itertools.product', 'itertools.product', (['qs', 'methods', 'interpolations'], {}), '(qs, methods, interpolations)\n', (1524, 1553), False, 'import itertools\n'), ((1968, 2002), 'numpy.array', 'np.array', (['[3, 7, 2, 4, 5, 1, 5, 6]'], {}), '([3, 7, 2, 4, 5, 1, 5, 6])\n', (1976, 2002), True, 'import numpy as np\n'), ((2472, 2516), 'itertools.product', 'itertools.product', (['shapes', 'block_shapes', 'kth'], {}), '(shapes, block_shapes, kth)\n', (2489, 2516), False, 'import itertools\n'), ((2873, 2904), 'numpy.array', 'np.array', (['[7, 2, 4, 5, 1, 5, 6]'], {}), '([7, 2, 4, 5, 1, 5, 6])\n', (2881, 2904), True, 'import numpy as np\n'), ((3025, 3059), 'numpy.array', 'np.array', (['[3, 7, 2, 4, 5, 1, 5, 6]'], {}), '([3, 7, 2, 4, 5, 1, 5, 6])\n', (3033, 3059), True, 'import numpy as np\n'), ((3301, 3340), 'itertools.product', 'itertools.product', (['shapes', 'block_shapes'], {}), '(shapes, block_shapes)\n', (3318, 3340), False, 'import itertools\n'), ((3562, 3596), 'numpy.array', 'np.array', (['[3, 7, 2, 4, 5, 1, 5, 6]'], {}), '([3, 7, 2, 4, 5, 1, 5, 6])\n', (3570, 3596), True, 'import numpy as np\n'), ((4419, 4462), 'itertools.product', 'itertools.product', (['shapes', 'block_shapes', 'ks'], {}), '(shapes, block_shapes, ks)\n', (4436, 4462), False, 'import itertools\n'), ((6122, 6152), 'nums.core.application_manager.instance', 'application_manager.instance', ([], {}), '()\n', (6150, 6152), False, 'from nums.core import application_manager\n'), ((2997, 3012), 'numpy.median', 'np.median', (['np_x'], {}), '(np_x)\n', (3006, 3012), True, 'import numpy as np\n'), ((3152, 3167), 'numpy.median', 'np.median', (['np_x'], {}), '(np_x)\n', (3161, 3167), True, 'import numpy as np\n'), ((5289, 5327), 'numpy.cov', 'np.cov', (['np_x'], {'rowvar': '(False)', 'bias': '(False)'}), '(np_x, rowvar=False, bias=False)\n', (5295, 5327), True, 'import numpy as np\n'), ((5427, 5464), 'numpy.cov', 'np.cov', (['np_x'], {'rowvar': '(False)', 'bias': '(True)'}), '(np_x, rowvar=False, bias=True)\n', (5433, 5464), True, 'import numpy as np\n'), ((5563, 5600), 'numpy.cov', 'np.cov', (['np_x'], {'rowvar': '(True)', 'bias': '(False)'}), '(np_x, rowvar=True, bias=False)\n', (5569, 5600), True, 'import numpy as np\n'), ((5699, 5735), 'numpy.cov', 'np.cov', (['np_x'], {'rowvar': '(True)', 'bias': '(True)'}), '(np_x, rowvar=True, bias=True)\n', (5705, 5735), True, 'import numpy as np\n'), ((1203, 1229), 'numpy.quantile', 'np.quantile', (['np_x', '(q / 100)'], {}), '(np_x, q / 100)\n', (1214, 1229), True, 'import numpy as np\n'), ((1349, 1371), 'numpy.percentile', 'np.percentile', (['np_x', 'q'], {}), '(np_x, q)\n', (1362, 1371), True, 'import numpy as np\n'), ((1678, 1704), 'numpy.quantile', 'np.quantile', (['np_x', '(q / 100)'], {}), '(np_x, q / 100)\n', (1689, 1704), True, 'import numpy as np\n'), ((1824, 1846), 'numpy.percentile', 'np.percentile', (['np_x', 'q'], {}), '(np_x, q)\n', (1837, 1846), True, 'import numpy as np\n'), ((3763, 3785), 'numpy.partition', 'np.partition', (['np_x', '(-k)'], {}), '(np_x, -k)\n', (3775, 3785), True, 'import numpy as np\n'), ((4056, 4081), 'numpy.partition', 'np.partition', (['np_x', '(k - 1)'], {}), '(np_x, k - 1)\n', (4068, 4081), True, 'import numpy as np\n'), ((4644, 4666), 'numpy.partition', 'np.partition', (['np_x', '(-k)'], {}), '(np_x, -k)\n', (4656, 4666), True, 'import numpy as np\n'), ((4937, 4962), 'numpy.partition', 'np.partition', (['np_x', '(k - 1)'], {}), '(np_x, k - 1)\n', (4949, 4962), True, 'import numpy as np\n'), ((5176, 5189), 'numpy.arange', 'np.arange', (['(30)'], {}), '(30)\n', (5185, 5189), True, 'import numpy as np\n'), ((5822, 5852), 'numpy.cov', 'np.cov', (['np_x'], {'dtype': 'np.float16'}), '(np_x, dtype=np.float16)\n', (5828, 5852), True, 'import numpy as np\n')] |
# -- coding: utf-8 --
# -- coding: utf-8 --
import tensorflow as tf
import numpy as np
import argparse
from model.hyparameter import parameter
import pandas as pd
class DataIterator():
def __init__(self,
site_id=0,
pollutant_id=4,
is_training=True,
time_size=3,
prediction_size=1,
data_divide=0.9,
window_step=1,
normalize=False):
'''
:param is_training: while is_training is True,the model is training state
:param field_len:
:param time_size:
:param prediction_size:
:param target_site:
'''
self.min_value=0.000000000001
self.site_id=site_id # ozone ID
self.pollutant_id=pollutant_id
self.time_size=time_size # time series length of input
self.prediction_size=prediction_size # the length of prediction
self.is_training=is_training # true or false
self.data_divide=data_divide # the divide between in training set and test set ratio
self.window_step=window_step # windows step
self.train_data=self.get_source_data('/Users/guojianzou/pollutant-prediction/data/train.csv').values
self.test_data=self.get_source_data('/Users/guojianzou/pollutant-prediction/data/test.csv').values
# self.data=self.source_data.loc[self.source_data['ZoneID']==self.site_id]
print(self.train_data.shape, self.test_data.shape)
self.total_data=np.concatenate([self.train_data,self.test_data],axis=0)
# 初始化每时刻的输入
self.max,self.min=self.get_max_min(self.total_data) # max and min are list type, used for the later normalization
self.normalize=normalize
if self.normalize:
self.normalization(self.train_data) #normalization
self.normalization(self.test_data) # normalization
def get_source_data(self,file_path):
'''
:return:
'''
data=None
try:
data = pd.read_csv(file_path, encoding='utf-8')
except IOError:
print("Error: do not to find or failed to read the file")
else:
print("successful to read the data")
return data
def get_max_min(self, data):
'''
:return: the max and min value of input features
'''
self.min_list=[]
self.max_list=[]
# print('the shape of features is :',data.shape[1])
for i in range(data.shape[1]):
self.min_list.append(min(data[:,i]))
self.max_list.append(max(data[:,i]))
# print('the max feature list is :',self.max_list)
# print('the min feature list is :', self.min_list)
return self.max_list,self.min_list
def normalization(self,data):
for i in range(data.shape[1]):
data[:,i]=(data[:,i] - np.array(self.min[i])) / (np.array(self.max[i]) - np.array(self.min[i]+self.min_value))
def generator(self):
'''
:return: yield the data of every time,
shape:input_series:[time_size,field_size]
label:[predict_size]
'''
if self.is_training: data=self.train_data
else: data=self.test_data
shape=data.shape
low,high=0,shape[0]
while (low+self.time_size+self.prediction_size) <= high:
label=data[low + self.time_size: low + self.time_size + self.prediction_size, self.pollutant_id]
yield (np.array(data[low:low+self.time_size]), label)
if self.is_training: low += self.window_step
else: low += self.prediction_size
def next_batch(self, batch_size=32, epochs=1, is_training=True):
'''
:return the iterator!!!
:param batch_size:
:param epochs:
:return:
'''
self.is_training=is_training
dataset=tf.data.Dataset.from_generator(self.generator,output_types=(tf.float32,tf.float32))
if self.is_training:
dataset=dataset.shuffle(buffer_size=int(self.train_data.shape[0]-self.time_size-self.prediction_size)//self.window_step)
dataset=dataset.repeat(count=epochs)
dataset=dataset.batch(batch_size=batch_size)
iterator=dataset.make_one_shot_iterator()
return iterator.get_next()
#
if __name__=='__main__':
para = parameter(argparse.ArgumentParser())
para = para.get_para()
iter=DataIterator(site_id=0,normalize=True,time_size=48,prediction_size=24,window_step=para.step)
# print(iter.data.loc[iter.data['ZoneID']==0])
next=iter.next_batch(32,10,is_training=True)
with tf.Session() as sess:
for i in range(4000):
print(i)
x,y=sess.run(next)
print(x.shape)
print(y.shape) | [
"argparse.ArgumentParser",
"pandas.read_csv",
"tensorflow.Session",
"tensorflow.data.Dataset.from_generator",
"numpy.array",
"numpy.concatenate"
] | [((1590, 1647), 'numpy.concatenate', 'np.concatenate', (['[self.train_data, self.test_data]'], {'axis': '(0)'}), '([self.train_data, self.test_data], axis=0)\n', (1604, 1647), True, 'import numpy as np\n'), ((3958, 4048), 'tensorflow.data.Dataset.from_generator', 'tf.data.Dataset.from_generator', (['self.generator'], {'output_types': '(tf.float32, tf.float32)'}), '(self.generator, output_types=(tf.float32, tf\n .float32))\n', (3988, 4048), True, 'import tensorflow as tf\n'), ((4441, 4466), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4464, 4466), False, 'import argparse\n'), ((4708, 4720), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4718, 4720), True, 'import tensorflow as tf\n'), ((2112, 2152), 'pandas.read_csv', 'pd.read_csv', (['file_path'], {'encoding': '"""utf-8"""'}), "(file_path, encoding='utf-8')\n", (2123, 2152), True, 'import pandas as pd\n'), ((2963, 2984), 'numpy.array', 'np.array', (['self.min[i]'], {}), '(self.min[i])\n', (2971, 2984), True, 'import numpy as np\n'), ((2989, 3010), 'numpy.array', 'np.array', (['self.max[i]'], {}), '(self.max[i])\n', (2997, 3010), True, 'import numpy as np\n'), ((3013, 3051), 'numpy.array', 'np.array', (['(self.min[i] + self.min_value)'], {}), '(self.min[i] + self.min_value)\n', (3021, 3051), True, 'import numpy as np\n'), ((3561, 3601), 'numpy.array', 'np.array', (['data[low:low + self.time_size]'], {}), '(data[low:low + self.time_size])\n', (3569, 3601), True, 'import numpy as np\n')] |
'''BernoulliNB gave slightly better results than MultinomialNB on just TF-IDF feature vector.'''
import numpy as np
#Load the binary files of sarcastic and non-sarcastic tweets
sarcasm=np.load("posproc.npy")
neutral=np.load("negproc.npy")
#Print sample data
print ("10 sample sarcastic lines:")
print (sarcasm[:10])
print ("10 sample non-sarcastic lines:")
print (neutral[:10])
#Import necessary libraries
import gensim
from gensim import corpora
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import accuracy_score
from scipy.sparse import csr_matrix
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.preprocessing import FunctionTransformer
from textblob import TextBlob
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import RFE
def get_text_length(x):
return np.array([len(t) for t in x]).reshape(-1, 1)
def get_sentiment_score(x):
scores=[]
for tweet in x:
temp=tweet.split()
l=len(temp)
first=temp[0:int(l/2)]
second=temp[1+int(l/2):]
blob = TextBlob(' '.join(first))
s1=blob.sentiment.polarity
blob=TextBlob(' '.join(second))
s2=blob.sentiment.polarity
score=0
if s1<0 and s2>=0:
score=s1-s2
elif s1>=0 and s2<0:
score=s2-s1
elif s1<=0 and s2<=0:
score=-1*(s1+s2)
else:
score=s1+s2
scores.append(score)
return np.array(scores).reshape(-1, 1)
'''
def get_topic(x):
l=len(x)
dictionary = corpora.Dictionary([i.split() for i in x])
doc_term_matrix = [dictionary.doc2bow(doc.split()) for doc in x]
lda = gensim.models.ldamodel.LdaModel
ldamodel = lda(doc_term_matrix, num_topics=l, id2word = dictionary, passes=50)
topics=ldamodel.get_topics()
topics=topics[:,0]
return (topics)
#print (ldamodel.print_topics(num_topics=100, num_words=1))
'''
labels=[]
sarcasm_size=len(sarcasm)
print ("Total sarcastic lines = "+str(sarcasm_size))
neutral_size=len(neutral)
print ("Total non-sarcastic lines = "+str(neutral_size))
for i in range(0,sarcasm_size):
labels.append(1)
for i in range(0,neutral_size):
labels.append(0)
print (len(labels))
dataset=np.concatenate([sarcasm,neutral])
print ("Total length of dataset = "+str(len(dataset)))
#get_topic(dataset)
#Classify using Naive Bayes:
from sklearn.naive_bayes import BernoulliNB
#vec, clf = TfidfVectorizer(min_df=5), BernoulliNB()
#td_matrix = vec.fit_transform(dataset)
clf = Pipeline([
('features', FeatureUnion([
('text', Pipeline([
('vec', TfidfVectorizer(min_df=5,ngram_range=(1,2))),
])),
('length', Pipeline([
('count', FunctionTransformer(get_text_length, validate=False)),
])),
('sentiment', Pipeline([
('senti', FunctionTransformer(get_sentiment_score, validate=False)),
]))
#('topics', Pipeline([('topic', FunctionTransformer(get_topic, validate=False)),]))
])),
('clf', LinearSVC())])
print ("Length of dataset = "+str(len(dataset)))
print ("Length of the labels = "+str(len(labels)))
X_train, X_test, y_train, y_test = train_test_split(dataset, labels,test_size=0.2, random_state=0)
'''
rfe = RFE(clf, 500)
fit = rfe.fit(X_train, y_train)
print("Num Features: %d") % fit.n_features_
print("Selected Features: %s") % fit.support_
print("Feature Ranking: %s") % fit.ranking_
'''
clf.fit(X_train, y_train)
y_out = clf.predict(X_test)
print("Accuracy on held-out data: ", str(100*accuracy_score(y_out, y_test))[0:5], "%\n")
from sklearn.metrics import precision_score, recall_score, f1_score
print (precision_score(y_out, y_test))
print (recall_score(y_out, y_test))
print (f1_score(y_out, y_test))
#Accuracy on held-out data: MultinomialNB 83.79 %, BernoulliNB 84.49%, DecisionTree=84.40%, RandomForest=82.39%
# After removing stopwords and lemmatizing.. accuracy changed to 84.89% in BernoulliNB.
# After adding length of the tweet as a feature, nothing changed.
# Accuracy after using unigrams and bigrams both is 85.35%
| [
"sklearn.metrics.f1_score",
"sklearn.model_selection.train_test_split",
"sklearn.svm.LinearSVC",
"sklearn.metrics.precision_score",
"sklearn.metrics.recall_score",
"numpy.array",
"sklearn.feature_extraction.text.TfidfVectorizer",
"numpy.concatenate",
"sklearn.preprocessing.FunctionTransformer",
"n... | [((187, 209), 'numpy.load', 'np.load', (['"""posproc.npy"""'], {}), "('posproc.npy')\n", (194, 209), True, 'import numpy as np\n'), ((218, 240), 'numpy.load', 'np.load', (['"""negproc.npy"""'], {}), "('negproc.npy')\n", (225, 240), True, 'import numpy as np\n'), ((2439, 2473), 'numpy.concatenate', 'np.concatenate', (['[sarcasm, neutral]'], {}), '([sarcasm, neutral])\n', (2453, 2473), True, 'import numpy as np\n'), ((3399, 3463), 'sklearn.model_selection.train_test_split', 'train_test_split', (['dataset', 'labels'], {'test_size': '(0.2)', 'random_state': '(0)'}), '(dataset, labels, test_size=0.2, random_state=0)\n', (3415, 3463), False, 'from sklearn.model_selection import train_test_split\n'), ((3885, 3915), 'sklearn.metrics.precision_score', 'precision_score', (['y_out', 'y_test'], {}), '(y_out, y_test)\n', (3900, 3915), False, 'from sklearn.metrics import precision_score, recall_score, f1_score\n'), ((3924, 3951), 'sklearn.metrics.recall_score', 'recall_score', (['y_out', 'y_test'], {}), '(y_out, y_test)\n', (3936, 3951), False, 'from sklearn.metrics import precision_score, recall_score, f1_score\n'), ((3960, 3983), 'sklearn.metrics.f1_score', 'f1_score', (['y_out', 'y_test'], {}), '(y_out, y_test)\n', (3968, 3983), False, 'from sklearn.metrics import precision_score, recall_score, f1_score\n'), ((1659, 1675), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (1667, 1675), True, 'import numpy as np\n'), ((3247, 3258), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {}), '()\n', (3256, 3258), False, 'from sklearn.svm import LinearSVC\n'), ((3765, 3794), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_out', 'y_test'], {}), '(y_out, y_test)\n', (3779, 3794), False, 'from sklearn.metrics import accuracy_score\n'), ((2815, 2860), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'min_df': '(5)', 'ngram_range': '(1, 2)'}), '(min_df=5, ngram_range=(1, 2))\n', (2830, 2860), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((2926, 2978), 'sklearn.preprocessing.FunctionTransformer', 'FunctionTransformer', (['get_text_length'], {'validate': '(False)'}), '(get_text_length, validate=False)\n', (2945, 2978), False, 'from sklearn.preprocessing import FunctionTransformer\n'), ((3049, 3105), 'sklearn.preprocessing.FunctionTransformer', 'FunctionTransformer', (['get_sentiment_score'], {'validate': '(False)'}), '(get_sentiment_score, validate=False)\n', (3068, 3105), False, 'from sklearn.preprocessing import FunctionTransformer\n')] |
from enn import *
import numpy as np
from grid_LSTM import netLSTM, netLSTM_full
from grid_data_v2 import TextDataset
import grid_data_v2 as grid_data
from grid_configuration import config
from util import Record, save_var, get_file_list, Regeneralize, list_to_csv
from torch.autograd import Variable
from torch.utils.data import DataLoader
import torch
import time
import matplotlib.pyplot as plt
import pickle
import pandas as pd
import os
plt.ion()
seed = config.seed
torch.random.manual_seed(seed)
np.random.seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.set_device(config.deviceID)
print(config.test_ID)
PATH = config.path
if not os.path.exists(PATH):
os.mkdir(PATH)
'''Parmaters used in the net.'''
ERROR_PER = config.ERROR_PER
NE = config.ne # number of ensemble
GAMMA = config.GAMMA
T = config.T
''' load data and initialize enn net'''
text = TextDataset(config)
textLoader = DataLoader(text, batch_size=config.batch_size, shuffle=True,
num_workers=config.num_workers, drop_last=config.drop_last)
criterion = torch.nn.MSELoss()
def train(net_enn, input_, target):
dstb_y = lamuda.Lamuda(target, NE, ERROR_PER)
train_losses = Record()
losses = Record()
lamuda_history = Record()
std_history = Record()
pred_history = Record()
initial_parameters = net_enn.initial_parameters
initial_pred = net_enn.output(input_)
train_losses.update(criterion(initial_pred.mean(0), target).tolist())
losses.update(criterion(initial_pred.mean(0), target).tolist())
std_history.update(dstb_y.std(initial_pred))
pred_history.update(initial_pred)
lamuda_history.update(dstb_y.lamuda(initial_pred))
for j in range(T):
torch.cuda.empty_cache()
params = net_enn.get_parameter()
dstb_y.update()
time_ = time.strftime('%Y%m%d_%H_%M_%S')
delta = enrml.EnRML(pred_history.get_latest(mean=False), params, initial_parameters,
lamuda_history.get_latest(mean=False), dstb_y.dstb, ERROR_PER)
params_raw = net_enn.update_parameter(delta)
torch.cuda.empty_cache()
pred = net_enn.output(input_)
loss_new = criterion(pred.mean(0), target).tolist()
bigger = train_losses.check(loss_new)
record_while = 0
while bigger:
record_while += 1
lamuda_history.update(lamuda_history.get_latest(mean=False) * GAMMA)
if lamuda_history.get_latest(mean=False) > GAMMA ** 5:
lamuda_history.update(lamuda_history.data[0])
print('abandon current iteration')
net_enn.set_parameter(params)
loss_new = train_losses.get_latest()
dstb_y.update()
params_raw = params
break
dstb_y.update()
net_enn.set_parameter(params)
delta = enrml.EnRML(pred_history.get_latest(mean=False), params, initial_parameters,
lamuda_history.get_latest(mean=False), dstb_y.dstb, ERROR_PER)
params_raw = net_enn.update_parameter(delta)
torch.cuda.empty_cache()
pred = net_enn.output(input_)
loss_new = criterion(pred.mean(0), target).tolist()
print('update losses, new loss:{}'.format(loss_new))
bigger = train_losses.check(loss_new)
train_losses.update(loss_new)
save_var(params_raw, '{}/{}_params'.format(PATH, time_))
print("iteration:{} \t current train losses:{}".format(j, train_losses.get_latest(mean=True)))
with open('{}/loss.txt'.format(PATH), 'a') as f:
f.write(time.strftime('%Y%m%d_%H_%M_%S')+','+str(train_losses.get_latest(mean=True))+',\n')
f.close()
pred_history.update(pred)
std_history.update(dstb_y.std(pred))
if std_history.bigger():
lamuda_history.update(lamuda_history.get_latest(mean=False))
else:
lamuda_tmp = lamuda_history.get_latest(mean=False) / GAMMA
if lamuda_tmp < 0.005:
lamuda_tmp = 0.005
lamuda_history.update(lamuda_tmp)
return net_enn, train_losses.get_latest(mean=True), pred_history.get_latest(mean=False)
# def predict(data, params=None, model_predict=None): # 每次预测24h,然后拼接在一起获得预测结果
# result = []
# input_ = torch.tensor(data)
# input_ = Variable(input_.view(1, len(data), config.input_dim).float()).cuda()
# if params is not None:
# model_predict.set_parameter(params)
# i = 0
# while i <= len(data) - config.train_len:
# pred = model_predict.output(input_[:, i:i+config.train_len, :])
# result.append(pred[:, -24:, :])
# print('predicting: {} to {}'.format(i, i + config.train_len))
# i += 24
# #save_var(result, 'result')
# return torch.cat(result, dim=1)
def predict_full(data, params=None, model_predict=None): # 直接对待预测区域全序列预测,不进行拼接处理
input_ = torch.tensor(data)
input_ = Variable(input_.view(1, len(data), config.input_dim).float()).cuda()
if params is not None:
model_predict.set_parameter(params)
pred = model_predict.output(input_)
return pred
def draw_result(enn_net):
param_list = get_file_list('params', config.path)
params = pickle.load(open(param_list[-1], 'rb'))
print("use parameter file: {}".format(param_list[-1]))
enn_net.set_parameter(params)
for i, k in enumerate(config.test_ID):
input_ = text.test_data_input_list[i]
target = text.test_data_output_list[i]
raw_data = pd.read_csv("data/{}.csv".format(config.data_list[k-1]))
real_std = raw_data.LOAD.std()
real_mean = raw_data.LOAD.mean()
raw = np.array(raw_data.LOAD)[config.predict_len:-config.predict_len]
pred = predict_full(input_, params=params, model_predict=enn_net)
# 平移,baseline
#pred = np.zeros((config.ne, np.shape(raw)[0], 1))#趋势平移
#pred = np.ones((config.ne, np.shape(raw)[0], 1)) #纯粹平移
# save the result right from the enn net
np.savetxt('result/e{}-p{}-pred_w{}.csv'.format(config.experiment_ID, PATH, k),
np.array(pred)[:, :, 0].T, delimiter=',')
# 加上平均趋势,获得真正的ratio
if grid_data.use_mean_ratio == True:
if grid_data.use_different_mean_ratio == True:
mean_ratio = text.mean_ratio_all_pred[k-1,:]
elif grid_data.use_CV_ratio == True:
mean_ratio = text.mean_ratio_group_pred[config.test_set_ID-1,:]
else:
mean_ratio = text.mean_ratio_all_ave_pred
print('mean_ratio:', mean_ratio)
pred = np.array(pred) + mean_ratio.reshape(-1,1) # 补充上平均趋势,ave表示这是所有区平均后的趋势
#test1 = mean_ratio.reshape(-1,1)
#pred=np.array([list(test1) for i in range(100)])
np.savetxt('result/e{}-p{}-pred_mean_ratio_w{}.csv'.format(config.experiment_ID, PATH, k),
np.array(pred)[:, :, 0].T, delimiter=',')
print('test_pred:', np.shape(pred))
target = target + mean_ratio.reshape(-1,1)
loss = criterion(torch.tensor(pred.mean(0)[:, 0]).float().cpu(), torch.tensor(target[:, 0]).float())
else:
loss = criterion(torch.tensor(pred.mean(0)[:, 0]).float().cpu(), torch.tensor(target[:, 0]).float())
print("ID{}\t test loss: {}".format(k, loss))
mean = grid_data.load_mean[k - 1][0]
std = grid_data.load_std[k - 1][0]
pred_ratio = Regeneralize(np.array(pred[:, :, 0]), mean, std)
pred_real = pred_ratio * raw
#pred_real = pred_ratio
target_ratio = Regeneralize(target, mean, std).reshape(-1,1)
target_real = target_ratio * raw.reshape(-1,1)
#target_real = target_ratio
loss_ratio = criterion(torch.tensor(pred_ratio.mean(0)).float(), torch.tensor(target_ratio[:, 0]).float()) #new
print("ID{}\t ratio loss: {}".format(k, loss_ratio))
#target_real = np.array(raw_data.LOAD)[config.predict_len*2:]
# make a normalization of real load value:
loss_relative = np.mean(np.abs(pred_real.mean(0) - target_real.reshape(-1))/target_real.reshape(-1))
std = 1 * pred_real.std(0)
pred_normalized = (pred_real.mean(0) - real_mean) / real_std
target_normalized = (target_real.reshape(-1) - real_mean) / real_std
print('pred_normalized shape:', np.shape(pred_normalized))
print('target_normalized shape:', np.shape(target_normalized))
loss_real = criterion(Variable(torch.tensor(pred_normalized).float()),
Variable(torch.tensor(target_normalized).float()))
print("ID{}\t relative loss: {}".format(k, loss_relative))
print("ID{}\t real loss: {}".format(k, loss_real))
with open('{}/test_loss_{}.csv'.format(PATH, config.experiment_ID), 'a') as f:
f.write('{},{},{},{},{},{}\n'.format(k, loss, loss_ratio, loss_real, loss_relative, std.mean()))
f.close()
print('std:', std.mean())
x = np.arange(len(target))
np.savetxt('result/e{}-p{}-pred_w{}_real.csv'.format(config.experiment_ID, PATH, k),
np.array(pred_real).T, delimiter=',')
print('flag1')
np.savetxt('result/e{}-p{}-target_w{}_real.csv'.format(config.experiment_ID, PATH, k),
target, delimiter=',')
print('Plotting')
plt.figure(figsize=(100, 5))
plt.plot(target_real, label='target', color='black', alpha=0.4)
plt.errorbar(x, pred_real.mean(0), yerr=std, color='red', alpha=0.7)
plt.title(str(k) + '-' + config.info)
plt.legend()
plt.savefig('{}/ID{}.png'.format(PATH, k))
plt.show()
print('flag2')
def evaluate(enn_net, epoch):
#params = os.path.join(config.path, 'parameters_epoch%d' % epoch)
param_list = get_file_list('params', config.path)
#param_index = int(len(param_list)*epoch/config.epoch)-1
param_index = int(len(param_list)*(epoch+1)/config.epoch)-1
print('total number of saved parameters: %d, using no %d' % (len(param_list), param_index))
params = param_list[param_index]
print("use parameter file: {}".format(params))
params = pickle.load(open(params, 'rb'))
enn_net.set_parameter(params)
for i, k in enumerate(config.test_ID):
input_ = text.test_data_input_list[i] # ->array(data_len * in_dim)
target = text.test_data_output_list[i]# ->array(data_len * 1)
raw_data = pd.read_csv("data/{}.csv".format(config.data_list[k-1]))
real_std = raw_data.LOAD.std()
real_mean = raw_data.LOAD.mean()
raw = np.array(raw_data.LOAD)[config.predict_len:-config.predict_len]
pred = predict_full(input_, params=params, model_predict=enn_net)# ->tensor(ensemble_size*data_len*1)
# 平移,baseline
#pred = np.zeros((config.ne, np.shape(raw)[0], 1))#趋势平移
#pred = np.ones((config.ne, np.shape(raw)[0], 1)) #纯粹平移
# save the result right from the enn net
np.savetxt('result/e{}-epoch{}-pred_w{}.csv'.format(config.experiment_ID, epoch, k),
np.array(pred)[:, :, 0].T, delimiter=',')
# 加上平均趋势,获得真正的ratio
if grid_data.use_mean_ratio == True:
if grid_data.use_different_mean_ratio == True:
mean_ratio = text.mean_ratio_all_pred[k-1,:]
elif grid_data.use_CV_ratio == True:
mean_ratio = text.mean_ratio_group_pred[config.test_set_ID-1,:]
else:
mean_ratio = text.mean_ratio_all_ave_pred
print('mean_ratio:', mean_ratio)
pred = np.array(pred) + mean_ratio.reshape(-1,1) # 补充上平均趋势,ave表示这是所有区平均后的趋势
#test1 = mean_ratio.reshape(-1,1)
#pred=np.array([list(test1) for i in range(100)])
np.savetxt('result/e{}-epoch{}-pred_mean_ratio_w{}.csv'.format(config.experiment_ID, epoch, k),
np.array(pred)[:, :, 0].T, delimiter=',')
print('test_pred:', np.shape(pred))
target = target + mean_ratio.reshape(-1,1)
loss = criterion(torch.tensor(pred.mean(0)[:, 0]).float().cpu(), torch.tensor(target[:, 0]).float())
else:
loss = criterion(torch.tensor(pred.mean(0)[:, 0]).float().cpu(), torch.tensor(target[:, 0]).float())
print("ID{}\t test loss: {}".format(k, loss))
mean = grid_data.load_mean[k - 1][0]
std = grid_data.load_std[k - 1][0]
pred_ratio = Regeneralize(np.array(pred[:, :, 0]), mean, std)
pred_real = pred_ratio * raw
#pred_real = pred_ratio
target_ratio = Regeneralize(target, mean, std).reshape(-1,1)
target_real = target_ratio * raw.reshape(-1,1)
#target_real = target_ratio
loss_ratio = criterion(torch.tensor(pred_ratio.mean(0)).float(), torch.tensor(target_ratio[:, 0]).float()) #new
print("ID{}\t ratio loss: {}".format(k, loss_ratio))
#target_real = np.array(raw_data.LOAD)[config.predict_len*2:]
# make a normalization of real load value:
loss_relative = np.mean(np.abs(pred_real.mean(0) - target_real.reshape(-1))/target_real.reshape(-1))
std = 1 * pred_real.std(0)
pred_normalized = (pred_real.mean(0) - real_mean) / real_std
target_normalized = (target_real.reshape(-1) - real_mean) / real_std
print('pred_normalized shape:', np.shape(pred_normalized))
print('target_normalized shape:', np.shape(target_normalized))
loss_real = criterion(Variable(torch.tensor(pred_normalized).float()),
Variable(torch.tensor(target_normalized).float()))
print("ID{}\t relative loss: {}".format(k, loss_relative))
print("ID{}\t real loss: {}".format(k, loss_real))
f = open(r'{}/epoch{}_test_loss_{}.csv'.format(PATH, epoch, config.experiment_ID), 'a')
f.write('{},{},{},{},{},{}\n'.format(k, loss, loss_ratio, loss_real, loss_relative, std.mean()))
f.close()
print('std:', std.mean())
x = np.arange(len(target))
np.savetxt('result/e{}-epoch{}-pred_w{}_real.csv'.format(config.experiment_ID, epoch, k),
np.array(pred_real).T, delimiter=',')
print('flag1')
np.savetxt('result/e{}-epoch{}-target_w{}_real.csv'.format(config.experiment_ID, epoch, k),
target, delimiter=',')
print('Plotting')
plt.figure(figsize=(100, 5))
plt.plot(target_real, label='target', color='black', alpha=0.4)
plt.errorbar(x, pred_real.mean(0), yerr=std, color='red', alpha=0.7)
plt.title(str(k) + '-' + config.info)
plt.legend()
plt.savefig('{}/ID{}_epoch{}.png'.format(PATH, k, epoch))
plt.show()
print('flag2')
def save_result(enn_net):
test_loss = []
for i, k in enumerate(config.test_ID):
input_ = text.test_data_input_list[i]
target = text.test_data_output_list[i]
pred = predict_full(input_, params=None, model=enn_net)
loss = criterion(pred.cpu(), torch.tensor(target).float())
test_loss.append(loss)
with open(PATH + '/test_loss.txt', 'a') as f:
f.write(time.strftime('%Y%m%d_%H_%M_%S') + ',' + str(test_loss) + ',\n')
f.close()
def run():
with open('{}/time.txt'.format(PATH), 'a') as f:
f.write('{},\n'.format(time.strftime('%Y%m%d_%H_%M_%S')))
f.close()
model = netLSTM()
with torch.no_grad():
model = model.cuda()
net_enn_train = enn.ENN(model, NE)
for epoch in range(config.epoch):
for i, data in enumerate(textLoader):
print('#'*30)
print("{}: batch{}".format(time.strftime('%Y%m%d_%H_%M_%S'), i))
input_, target = data
#input_ = torch.from_numpy(np.stack(list(shrink(input_, 5)), axis=1))
#target = torch.from_numpy(np.stack(list(shrink(target, 5)), axis=1))
with torch.no_grad():
input_, target = map(Variable, (input_.float(), target.float()))
target = target[:, -config.predict_len:, :]
print(target.shape)
target = target.reshape(-1, config.output_dim)
input_ = input_.cuda()
target = target.cuda()
net_enn_train, loss, pred_data = train(net_enn_train, input_, target)
# save pred and target while training
save_dir = os.path.join(PATH, 'predict_history')
if not os.path.exists(save_dir):
os.mkdir(save_dir)
save_data = {}
save_data['pred'] = np.array(pred_data.mean(0)[:, 0])
save_data['target'] = np.array(np.array(target[:, 0]))
save_data = pd.DataFrame.from_dict(save_data)
save_filename = '{}_{}.csv'.format(epoch, i)
save_data.to_csv(os.path.join(save_dir, save_filename))
"""
with open('predict_history'+'/pred.txt', 'a') as f:
f.write(list_to_csv(np.array(pred_data.mean(0)[:, 0])) + '\n')
f.close()
with open('predict_history'+'/target.txt', 'a') as f:
f.write(list_to_csv(np.array(target[:, 0])) + '\n')
f.close()
"""
with open(PATH+'/time.txt', 'a') as f:
f.write(time.strftime('%Y%m%d_%H_%M_%S') + ',' + str(loss) + ',\n')
f.close()
with torch.no_grad():
params = net_enn_train.get_parameter()
filename = PATH+"/parameters_epoch{}".format(epoch)
save_var(params, filename)
del params
if __name__ == '__main__':
run() # only include the training process based on the netLSTM model.
model = netLSTM_full() # netLSTM and netLSTM_full share the weights and bias. The only difference between these models is the output length.
with torch.no_grad():
model = model.cuda()
net_enn = enn.ENN(model, NE)
#draw_result(net_enn)#
evaluate(net_enn, 4)# count from 0
print(config.test_ID)
| [
"grid_LSTM.netLSTM",
"util.Record",
"torch.nn.MSELoss",
"numpy.array",
"grid_LSTM.netLSTM_full",
"torch.random.manual_seed",
"os.path.exists",
"matplotlib.pyplot.plot",
"pandas.DataFrame.from_dict",
"numpy.random.seed",
"os.mkdir",
"util.get_file_list",
"matplotlib.pyplot.ion",
"numpy.shap... | [((442, 451), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (449, 451), True, 'import matplotlib.pyplot as plt\n'), ((472, 502), 'torch.random.manual_seed', 'torch.random.manual_seed', (['seed'], {}), '(seed)\n', (496, 502), False, 'import torch\n'), ((503, 523), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (517, 523), True, 'import numpy as np\n'), ((524, 556), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (550, 556), False, 'import torch\n'), ((562, 600), 'torch.cuda.set_device', 'torch.cuda.set_device', (['config.deviceID'], {}), '(config.deviceID)\n', (583, 600), False, 'import torch\n'), ((871, 890), 'grid_data_v2.TextDataset', 'TextDataset', (['config'], {}), '(config)\n', (882, 890), False, 'from grid_data_v2 import TextDataset\n'), ((904, 1029), 'torch.utils.data.DataLoader', 'DataLoader', (['text'], {'batch_size': 'config.batch_size', 'shuffle': '(True)', 'num_workers': 'config.num_workers', 'drop_last': 'config.drop_last'}), '(text, batch_size=config.batch_size, shuffle=True, num_workers=\n config.num_workers, drop_last=config.drop_last)\n', (914, 1029), False, 'from torch.utils.data import DataLoader\n'), ((1061, 1079), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (1077, 1079), False, 'import torch\n'), ((649, 669), 'os.path.exists', 'os.path.exists', (['PATH'], {}), '(PATH)\n', (663, 669), False, 'import os\n'), ((675, 689), 'os.mkdir', 'os.mkdir', (['PATH'], {}), '(PATH)\n', (683, 689), False, 'import os\n'), ((1187, 1195), 'util.Record', 'Record', ([], {}), '()\n', (1193, 1195), False, 'from util import Record, save_var, get_file_list, Regeneralize, list_to_csv\n'), ((1209, 1217), 'util.Record', 'Record', ([], {}), '()\n', (1215, 1217), False, 'from util import Record, save_var, get_file_list, Regeneralize, list_to_csv\n'), ((1239, 1247), 'util.Record', 'Record', ([], {}), '()\n', (1245, 1247), False, 'from util import Record, save_var, get_file_list, Regeneralize, list_to_csv\n'), ((1266, 1274), 'util.Record', 'Record', ([], {}), '()\n', (1272, 1274), False, 'from util import Record, save_var, get_file_list, Regeneralize, list_to_csv\n'), ((1294, 1302), 'util.Record', 'Record', ([], {}), '()\n', (1300, 1302), False, 'from util import Record, save_var, get_file_list, Regeneralize, list_to_csv\n'), ((4963, 4981), 'torch.tensor', 'torch.tensor', (['data'], {}), '(data)\n', (4975, 4981), False, 'import torch\n'), ((5236, 5272), 'util.get_file_list', 'get_file_list', (['"""params"""', 'config.path'], {}), "('params', config.path)\n", (5249, 5272), False, 'from util import Record, save_var, get_file_list, Regeneralize, list_to_csv\n'), ((9956, 9992), 'util.get_file_list', 'get_file_list', (['"""params"""', 'config.path'], {}), "('params', config.path)\n", (9969, 9992), False, 'from util import Record, save_var, get_file_list, Regeneralize, list_to_csv\n'), ((15566, 15575), 'grid_LSTM.netLSTM', 'netLSTM', ([], {}), '()\n', (15573, 15575), False, 'from grid_LSTM import netLSTM, netLSTM_full\n'), ((17868, 17882), 'grid_LSTM.netLSTM_full', 'netLSTM_full', ([], {}), '()\n', (17880, 17882), False, 'from grid_LSTM import netLSTM, netLSTM_full\n'), ((1714, 1738), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (1736, 1738), False, 'import torch\n'), ((1820, 1852), 'time.strftime', 'time.strftime', (['"""%Y%m%d_%H_%M_%S"""'], {}), "('%Y%m%d_%H_%M_%S')\n", (1833, 1852), False, 'import time\n'), ((2103, 2127), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (2125, 2127), False, 'import torch\n'), ((9469, 9497), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(100, 5)'}), '(figsize=(100, 5))\n', (9479, 9497), True, 'import matplotlib.pyplot as plt\n'), ((9506, 9569), 'matplotlib.pyplot.plot', 'plt.plot', (['target_real'], {'label': '"""target"""', 'color': '"""black"""', 'alpha': '(0.4)'}), "(target_real, label='target', color='black', alpha=0.4)\n", (9514, 9569), True, 'import matplotlib.pyplot as plt\n'), ((9701, 9713), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (9711, 9713), True, 'import matplotlib.pyplot as plt\n'), ((9773, 9783), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9781, 9783), True, 'import matplotlib.pyplot as plt\n'), ((14561, 14589), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(100, 5)'}), '(figsize=(100, 5))\n', (14571, 14589), True, 'import matplotlib.pyplot as plt\n'), ((14598, 14661), 'matplotlib.pyplot.plot', 'plt.plot', (['target_real'], {'label': '"""target"""', 'color': '"""black"""', 'alpha': '(0.4)'}), "(target_real, label='target', color='black', alpha=0.4)\n", (14606, 14661), True, 'import matplotlib.pyplot as plt\n'), ((14793, 14805), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (14803, 14805), True, 'import matplotlib.pyplot as plt\n'), ((14880, 14890), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14888, 14890), True, 'import matplotlib.pyplot as plt\n'), ((15585, 15600), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (15598, 15600), False, 'import torch\n'), ((18010, 18025), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (18023, 18025), False, 'import torch\n'), ((3130, 3154), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (3152, 3154), False, 'import torch\n'), ((5725, 5748), 'numpy.array', 'np.array', (['raw_data.LOAD'], {}), '(raw_data.LOAD)\n', (5733, 5748), True, 'import numpy as np\n'), ((7560, 7583), 'numpy.array', 'np.array', (['pred[:, :, 0]'], {}), '(pred[:, :, 0])\n', (7568, 7583), True, 'import numpy as np\n'), ((8457, 8482), 'numpy.shape', 'np.shape', (['pred_normalized'], {}), '(pred_normalized)\n', (8465, 8482), True, 'import numpy as np\n'), ((8526, 8553), 'numpy.shape', 'np.shape', (['target_normalized'], {}), '(target_normalized)\n', (8534, 8553), True, 'import numpy as np\n'), ((10739, 10762), 'numpy.array', 'np.array', (['raw_data.LOAD'], {}), '(raw_data.LOAD)\n', (10747, 10762), True, 'import numpy as np\n'), ((12636, 12659), 'numpy.array', 'np.array', (['pred[:, :, 0]'], {}), '(pred[:, :, 0])\n', (12644, 12659), True, 'import numpy as np\n'), ((13533, 13558), 'numpy.shape', 'np.shape', (['pred_normalized'], {}), '(pred_normalized)\n', (13541, 13558), True, 'import numpy as np\n'), ((13602, 13629), 'numpy.shape', 'np.shape', (['target_normalized'], {}), '(target_normalized)\n', (13610, 13629), True, 'import numpy as np\n'), ((16575, 16612), 'os.path.join', 'os.path.join', (['PATH', '"""predict_history"""'], {}), "(PATH, 'predict_history')\n", (16587, 16612), False, 'import os\n'), ((16877, 16910), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['save_data'], {}), '(save_data)\n', (16899, 16910), True, 'import pandas as pd\n'), ((17559, 17574), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (17572, 17574), False, 'import torch\n'), ((17703, 17729), 'util.save_var', 'save_var', (['params', 'filename'], {}), '(params, filename)\n', (17711, 17729), False, 'from util import Record, save_var, get_file_list, Regeneralize, list_to_csv\n'), ((6691, 6705), 'numpy.array', 'np.array', (['pred'], {}), '(pred)\n', (6699, 6705), True, 'import numpy as np\n'), ((7067, 7081), 'numpy.shape', 'np.shape', (['pred'], {}), '(pred)\n', (7075, 7081), True, 'import numpy as np\n'), ((7688, 7719), 'util.Regeneralize', 'Regeneralize', (['target', 'mean', 'std'], {}), '(target, mean, std)\n', (7700, 7719), False, 'from util import Record, save_var, get_file_list, Regeneralize, list_to_csv\n'), ((9236, 9255), 'numpy.array', 'np.array', (['pred_real'], {}), '(pred_real)\n', (9244, 9255), True, 'import numpy as np\n'), ((11762, 11776), 'numpy.array', 'np.array', (['pred'], {}), '(pred)\n', (11770, 11776), True, 'import numpy as np\n'), ((12143, 12157), 'numpy.shape', 'np.shape', (['pred'], {}), '(pred)\n', (12151, 12157), True, 'import numpy as np\n'), ((12764, 12795), 'util.Regeneralize', 'Regeneralize', (['target', 'mean', 'std'], {}), '(target, mean, std)\n', (12776, 12795), False, 'from util import Record, save_var, get_file_list, Regeneralize, list_to_csv\n'), ((14323, 14342), 'numpy.array', 'np.array', (['pred_real'], {}), '(pred_real)\n', (14331, 14342), True, 'import numpy as np\n'), ((15505, 15537), 'time.strftime', 'time.strftime', (['"""%Y%m%d_%H_%M_%S"""'], {}), "('%Y%m%d_%H_%M_%S')\n", (15518, 15537), False, 'import time\n'), ((16072, 16087), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (16085, 16087), False, 'import torch\n'), ((16632, 16656), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (16646, 16656), False, 'import os\n'), ((16674, 16692), 'os.mkdir', 'os.mkdir', (['save_dir'], {}), '(save_dir)\n', (16682, 16692), False, 'import os\n'), ((16829, 16851), 'numpy.array', 'np.array', (['target[:, 0]'], {}), '(target[:, 0])\n', (16837, 16851), True, 'import numpy as np\n'), ((16997, 17034), 'os.path.join', 'os.path.join', (['save_dir', 'save_filename'], {}), '(save_dir, save_filename)\n', (17009, 17034), False, 'import os\n'), ((6179, 6193), 'numpy.array', 'np.array', (['pred'], {}), '(pred)\n', (6187, 6193), True, 'import numpy as np\n'), ((7898, 7930), 'torch.tensor', 'torch.tensor', (['target_ratio[:, 0]'], {}), '(target_ratio[:, 0])\n', (7910, 7930), False, 'import torch\n'), ((11234, 11248), 'numpy.array', 'np.array', (['pred'], {}), '(pred)\n', (11242, 11248), True, 'import numpy as np\n'), ((12974, 13006), 'torch.tensor', 'torch.tensor', (['target_ratio[:, 0]'], {}), '(target_ratio[:, 0])\n', (12986, 13006), False, 'import torch\n'), ((15202, 15222), 'torch.tensor', 'torch.tensor', (['target'], {}), '(target)\n', (15214, 15222), False, 'import torch\n'), ((15819, 15851), 'time.strftime', 'time.strftime', (['"""%Y%m%d_%H_%M_%S"""'], {}), "('%Y%m%d_%H_%M_%S')\n", (15832, 15851), False, 'import time\n'), ((6991, 7005), 'numpy.array', 'np.array', (['pred'], {}), '(pred)\n', (6999, 7005), True, 'import numpy as np\n'), ((7221, 7247), 'torch.tensor', 'torch.tensor', (['target[:, 0]'], {}), '(target[:, 0])\n', (7233, 7247), False, 'import torch\n'), ((7348, 7374), 'torch.tensor', 'torch.tensor', (['target[:, 0]'], {}), '(target[:, 0])\n', (7360, 7374), False, 'import torch\n'), ((8594, 8623), 'torch.tensor', 'torch.tensor', (['pred_normalized'], {}), '(pred_normalized)\n', (8606, 8623), False, 'import torch\n'), ((8673, 8704), 'torch.tensor', 'torch.tensor', (['target_normalized'], {}), '(target_normalized)\n', (8685, 8704), False, 'import torch\n'), ((12067, 12081), 'numpy.array', 'np.array', (['pred'], {}), '(pred)\n', (12075, 12081), True, 'import numpy as np\n'), ((12297, 12323), 'torch.tensor', 'torch.tensor', (['target[:, 0]'], {}), '(target[:, 0])\n', (12309, 12323), False, 'import torch\n'), ((12424, 12450), 'torch.tensor', 'torch.tensor', (['target[:, 0]'], {}), '(target[:, 0])\n', (12436, 12450), False, 'import torch\n'), ((13670, 13699), 'torch.tensor', 'torch.tensor', (['pred_normalized'], {}), '(pred_normalized)\n', (13682, 13699), False, 'import torch\n'), ((13749, 13780), 'torch.tensor', 'torch.tensor', (['target_normalized'], {}), '(target_normalized)\n', (13761, 13780), False, 'import torch\n'), ((15329, 15361), 'time.strftime', 'time.strftime', (['"""%Y%m%d_%H_%M_%S"""'], {}), "('%Y%m%d_%H_%M_%S')\n", (15342, 15361), False, 'import time\n'), ((3659, 3691), 'time.strftime', 'time.strftime', (['"""%Y%m%d_%H_%M_%S"""'], {}), "('%Y%m%d_%H_%M_%S')\n", (3672, 3691), False, 'import time\n'), ((17464, 17496), 'time.strftime', 'time.strftime', (['"""%Y%m%d_%H_%M_%S"""'], {}), "('%Y%m%d_%H_%M_%S')\n", (17477, 17496), False, 'import time\n')] |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Nipype translation of ANTs' workflows."""
import numpy as np
# general purpose
from pkg_resources import resource_filename as pkgr_fn
# nipype
from nipype.pipeline import engine as pe
from nipype.interfaces import utility as niu
from nipype.interfaces.ants import N4BiasFieldCorrection
from nipype.interfaces.ants.utils import AI
# niworkflows
from niworkflows.anat.ants import init_atropos_wf, ATROPOS_MODELS
from niworkflows.interfaces.ants import ImageMath
from niworkflows.interfaces.images import RegridToZooms, ValidateImage
from niworkflows.interfaces.nibabel import ApplyMask, Binarize
from niworkflows.interfaces.fixes import (
FixHeaderRegistration as Registration,
FixHeaderApplyTransforms as ApplyTransforms,
)
from niworkflows.interfaces.registration import (
SimpleBeforeAfterRPT as SimpleBeforeAfter
)
from templateflow.api import get as get_template
from ..utils.filtering import (
gaussian_filter as _gauss_filter,
truncation as _trunc
)
HIRES_ZOOMS = (1, 1, 1)
LOWRES_ZOOMS = (2, 2, 2)
def init_infant_brain_extraction_wf(
ants_affine_init=False,
bspline_fitting_distance=200,
debug=False,
in_template="MNIInfant",
template_specs=None,
interim_checkpoints=True,
mem_gb=3.0,
mri_scheme="T2w",
name="infant_brain_extraction_wf",
atropos_model=None,
omp_nthreads=None,
output_dir=None,
use_float=True,
):
"""
Build an atlas-based brain extraction pipeline for infant T2w MRI data.
Parameters
----------
ants_affine_init : :obj:`bool`, optional
Set-up a pre-initialization step with ``antsAI`` to account for mis-oriented images.
"""
inputnode = pe.Node(
niu.IdentityInterface(fields=["in_files", "in_mask"]), name="inputnode"
)
outputnode = pe.Node(
niu.IdentityInterface(fields=["out_corrected", "out_brain", "out_mask"]),
name="outputnode"
)
template_specs = template_specs or {}
# Find a suitable target template in TemplateFlow
tpl_target_path = get_template(
in_template,
suffix=mri_scheme,
**template_specs
)
if not tpl_target_path:
raise RuntimeError(
f"An instance of template <tpl-{in_template}> with MR scheme '{mri_scheme}'"
" could not be found.")
# tpl_brainmask_path = get_template(
# in_template, desc="brain", suffix="probseg", **template_specs
# )
# if not tpl_brainmask_path:
# ignore probseg for the time being
tpl_brainmask_path = get_template(
in_template, desc="brain", suffix="mask", **template_specs
)
tpl_regmask_path = get_template(
in_template, desc="BrainCerebellumExtraction", suffix="mask", **template_specs
)
# validate images
val_tmpl = pe.Node(ValidateImage(), name='val_tmpl')
val_tmpl.inputs.in_file = _pop(tpl_target_path)
val_target = pe.Node(ValidateImage(), name='val_target')
# Resample both target and template to a controlled, isotropic resolution
res_tmpl = pe.Node(RegridToZooms(zooms=HIRES_ZOOMS), name="res_tmpl") # testing
res_target = pe.Node(RegridToZooms(zooms=HIRES_ZOOMS), name="res_target") # testing
gauss_tmpl = pe.Node(niu.Function(function=_gauss_filter), name="gauss_tmpl")
# Spatial normalization step
lap_tmpl = pe.Node(ImageMath(operation="Laplacian", op2="0.4 1"), name="lap_tmpl")
lap_target = pe.Node(ImageMath(operation="Laplacian", op2="0.4 1"), name="lap_target")
# Merge image nodes
mrg_target = pe.Node(niu.Merge(2), name="mrg_target")
mrg_tmpl = pe.Node(niu.Merge(2), name="mrg_tmpl")
norm_lap_tmpl = pe.Node(niu.Function(function=_trunc), name="norm_lap_tmpl")
norm_lap_tmpl.inputs.dtype = "float32"
norm_lap_tmpl.inputs.out_max = 1.0
norm_lap_tmpl.inputs.percentile = (0.01, 99.99)
norm_lap_tmpl.inputs.clip_max = None
norm_lap_target = pe.Node(niu.Function(function=_trunc), name="norm_lap_target")
norm_lap_target.inputs.dtype = "float32"
norm_lap_target.inputs.out_max = 1.0
norm_lap_target.inputs.percentile = (0.01, 99.99)
norm_lap_target.inputs.clip_max = None
# Set up initial spatial normalization
ants_params = "testing" if debug else "precise"
norm = pe.Node(
Registration(from_file=pkgr_fn(
"niworkflows.data",
f"antsBrainExtraction_{ants_params}.json")
),
name="norm",
n_procs=omp_nthreads,
mem_gb=mem_gb,
)
norm.inputs.float = use_float
# main workflow
wf = pe.Workflow(name)
# Create a buffer interface as a cache for the actual inputs to registration
buffernode = pe.Node(niu.IdentityInterface(
fields=["hires_target", "smooth_target"]), name="buffernode")
# truncate target intensity for N4 correction
clip_target = pe.Node(
niu.Function(function=_trunc),
name="clip_target",
)
clip_tmpl = pe.Node(
niu.Function(function=_trunc),
name="clip_tmpl",
)
#clip_tmpl.inputs.in_file = _pop(tpl_target_path)
# INU correction of the target image
init_n4 = pe.Node(
N4BiasFieldCorrection(
dimension=3,
save_bias=False,
copy_header=True,
n_iterations=[50] * (4 - debug),
convergence_threshold=1e-7,
shrink_factor=4,
bspline_fitting_distance=bspline_fitting_distance,
),
n_procs=omp_nthreads,
name="init_n4",
)
clip_inu = pe.Node(
niu.Function(function=_trunc),
name="clip_inu",
)
gauss_target = pe.Node(niu.Function(function=_gauss_filter), name="gauss_target")
wf.connect([
# truncation, resampling, and initial N4
(inputnode, val_target, [(("in_files", _pop), "in_file")]),
# (inputnode, res_target, [(("in_files", _pop), "in_file")]),
(val_target, res_target, [("out_file", "in_file")]),
(res_target, clip_target, [("out_file", "in_file")]),
(val_tmpl, clip_tmpl, [("out_file", "in_file")]),
(clip_tmpl, res_tmpl, [("out", "in_file")]),
(clip_target, init_n4, [("out", "input_image")]),
(init_n4, clip_inu, [("output_image", "in_file")]),
(clip_inu, gauss_target, [("out", "in_file")]),
(clip_inu, buffernode, [("out", "hires_target")]),
(gauss_target, buffernode, [("out", "smooth_target")]),
(res_tmpl, gauss_tmpl, [("out_file", "in_file")]),
# (clip_tmpl, gauss_tmpl, [("out", "in_file")]),
])
# Graft a template registration-mask if present
if tpl_regmask_path:
hires_mask = pe.Node(
ApplyTransforms(
input_image=_pop(tpl_regmask_path),
transforms="identity",
interpolation="NearestNeighbor",
float=True),
name="hires_mask",
mem_gb=1
)
wf.connect([
(res_tmpl, hires_mask, [("out_file", "reference_image")]),
])
map_brainmask = pe.Node(
ApplyTransforms(interpolation="Gaussian", float=True),
name="map_brainmask",
mem_gb=1
)
map_brainmask.inputs.input_image = str(tpl_brainmask_path)
thr_brainmask = pe.Node(Binarize(thresh_low=0.80),
name="thr_brainmask")
bspline_grid = pe.Node(niu.Function(function=_bspline_distance),
name="bspline_grid")
# Refine INU correction
final_n4 = pe.Node(
N4BiasFieldCorrection(
dimension=3,
save_bias=True,
copy_header=True,
n_iterations=[50] * 5,
convergence_threshold=1e-7,
rescale_intensities=True,
shrink_factor=4,
),
n_procs=omp_nthreads,
name="final_n4",
)
final_mask = pe.Node(ApplyMask(), name="final_mask")
if atropos_model is None:
atropos_model = tuple(ATROPOS_MODELS[mri_scheme].values())
atropos_wf = init_atropos_wf(
use_random_seed=False,
omp_nthreads=omp_nthreads,
mem_gb=mem_gb,
in_segmentation_model=atropos_model,
)
# if tpl_regmask_path:
# atropos_wf.get_node('inputnode').inputs.in_mask_dilated = tpl_regmask_path
sel_wm = pe.Node(niu.Select(index=atropos_model[-1] - 1), name='sel_wm',
run_without_submitting=True)
wf.connect([
(inputnode, map_brainmask, [(("in_files", _pop), "reference_image")]),
(inputnode, final_n4, [(("in_files", _pop), "input_image")]),
(inputnode, bspline_grid, [(("in_files", _pop), "in_file")]),
# (bspline_grid, final_n4, [("out", "bspline_fitting_distance")]),
(bspline_grid, final_n4, [("out", "args")]),
# merge laplacian and original images
(buffernode, lap_target, [("smooth_target", "op1")]),
(buffernode, mrg_target, [("hires_target", "in1")]),
(lap_target, norm_lap_target, [("output_image", "in_file")]),
(norm_lap_target, mrg_target, [("out", "in2")]),
# Template massaging
(res_tmpl, lap_tmpl, [("out_file", "op1")]),
(res_tmpl, mrg_tmpl, [("out_file", "in1")]),
(lap_tmpl, norm_lap_tmpl, [("output_image", "in_file")]),
(norm_lap_tmpl, mrg_tmpl, [("out", "in2")]),
# spatial normalization
(mrg_target, norm, [("out", "moving_image")]),
(mrg_tmpl, norm, [("out", "fixed_image")]),
(norm, map_brainmask, [
("reverse_transforms", "transforms"),
("reverse_invert_flags", "invert_transform_flags")]),
(map_brainmask, thr_brainmask, [("output_image", "in_file")]),
# take a second pass of N4
(map_brainmask, final_n4, [("output_image", "weight_image")]),
(final_n4, final_mask, [("output_image", "in_file")]),
(thr_brainmask, final_mask, [("out_mask", "in_mask")]),
(final_n4, outputnode, [("output_image", "out_corrected")]),
(thr_brainmask, outputnode, [("out_mask", "out_mask")]),
(final_mask, outputnode, [("out_file", "out_brain")]),
])
# wf.disconnect([
# (get_brainmask, apply_mask, [('output_image', 'mask_file')]),
# (copy_xform, outputnode, [('out_mask', 'out_mask')]),
# ])
# wf.connect([
# (init_n4, atropos_wf, [
# ('output_image', 'inputnode.in_files')]), # intensity image
# (thr_brainmask, atropos_wf, [
# ('out_mask', 'inputnode.in_mask')]),
# (atropos_wf, sel_wm, [('outputnode.out_tpms', 'inlist')]),
# (sel_wm, final_n4, [('out', 'weight_image')]),
# ])
# wf.connect([
# (atropos_wf, outputnode, [
# ('outputnode.out_mask', 'out_mask'),
# ('outputnode.out_segm', 'out_segm'),
# ('outputnode.out_tpms', 'out_tpms')]),
# ])
if tpl_regmask_path:
wf.connect([
(hires_mask, norm, [
("output_image", "fixed_image_masks")]),
# (hires_mask, atropos_wf, [
# ("output_image", "inputnode.in_mask_dilated")]),
])
if interim_checkpoints:
final_apply = pe.Node(
ApplyTransforms(
interpolation="BSpline",
float=True),
name="final_apply",
mem_gb=1
)
final_report = pe.Node(SimpleBeforeAfter(
before_label=f"tpl-{in_template}",
after_label="target",
out_report="final_report.svg"),
name="final_report"
)
wf.connect([
(inputnode, final_apply, [(("in_files", _pop), "reference_image")]),
(res_tmpl, final_apply, [("out_file", "input_image")]),
(norm, final_apply, [
("reverse_transforms", "transforms"),
("reverse_invert_flags", "invert_transform_flags")]),
(final_apply, final_report, [("output_image", "before")]),
(outputnode, final_report, [("out_corrected", "after"),
("out_mask", "wm_seg")]),
])
if output_dir:
from nipype.interfaces.io import DataSink
ds_final_inu = pe.Node(DataSink(base_directory=str(output_dir.parent)),
name="ds_final_inu")
ds_final_msk = pe.Node(DataSink(base_directory=str(output_dir.parent)),
name="ds_final_msk")
ds_report = pe.Node(DataSink(base_directory=str(output_dir.parent)),
name="ds_report")
wf.connect([
(outputnode, ds_final_inu, [
("out_corrected", f"{output_dir.name}.@inu_corrected")]),
(outputnode, ds_final_msk, [
("out_mask", f"{output_dir.name}.@brainmask")]),
(final_report, ds_report, [
("out_report", f"{output_dir.name}.@report")]),
])
if not ants_affine_init:
return wf
# Initialize transforms with antsAI
lowres_tmpl = pe.Node(RegridToZooms(zooms=LOWRES_ZOOMS), name="lowres_tmpl")
lowres_target = pe.Node(RegridToZooms(zooms=LOWRES_ZOOMS), name="lowres_target")
init_aff = pe.Node(
AI(
metric=("Mattes", 32, "Regular", 0.25),
transform=("Affine", 0.1),
search_factor=(15, 0.1),
principal_axes=False,
convergence=(10, 1e-6, 10),
search_grid=(40, (0, 40, 40)),
verbose=True,
),
name="init_aff",
n_procs=omp_nthreads,
)
wf.connect([
(gauss_tmpl, lowres_tmpl, [("out", "in_file")]),
(lowres_tmpl, init_aff, [("out_file", "fixed_image")]),
(gauss_target, lowres_target, [("out", "in_file")]),
(lowres_target, init_aff, [("out_file", "moving_image")]),
(init_aff, norm, [("output_transform", "initial_moving_transform")]),
])
if tpl_regmask_path:
lowres_mask = pe.Node(
ApplyTransforms(
input_image=_pop(tpl_regmask_path),
transforms="identity",
interpolation="MultiLabel",
float=True),
name="lowres_mask",
mem_gb=1
)
wf.connect([
(lowres_tmpl, lowres_mask, [("out_file", "reference_image")]),
(lowres_mask, init_aff, [("output_image", "fixed_image_mask")]),
])
if interim_checkpoints:
init_apply = pe.Node(
ApplyTransforms(
interpolation="BSpline",
float=True),
name="init_apply",
mem_gb=1
)
init_report = pe.Node(SimpleBeforeAfter(
before_label=f"tpl-{in_template}",
after_label="target",
out_report="init_report.svg"),
name="init_report"
)
wf.connect([
(lowres_target, init_apply, [("out_file", "input_image")]),
(res_tmpl, init_apply, [("out_file", "reference_image")]),
(init_aff, init_apply, [("output_transform", "transforms")]),
(init_apply, init_report, [("output_image", "after")]),
(res_tmpl, init_report, [("out_file", "before")]),
])
if output_dir:
ds_init_report = pe.Node(DataSink(base_directory=str(output_dir.parent)),
name="ds_init_report")
wf.connect(
init_report, "out_report",
ds_init_report, f"{output_dir.name}.@init_report"
)
return wf
def _pop(in_files):
if isinstance(in_files, (list, tuple)):
return in_files[0]
return in_files
def _bspline_distance(in_file, spacings=(20, 20, 20)):
import numpy as np
import nibabel as nb
img = nb.load(in_file)
extent = (np.array(img.shape[:3]) - 1) * img.header.get_zooms()[:3]
retval = [f"{v}" for v in np.ceil(extent / np.array(spacings)).astype(int)]
return f"-b {'x'.join(retval)}"
| [
"niworkflows.interfaces.fixes.FixHeaderApplyTransforms",
"nibabel.load",
"numpy.array",
"nipype.interfaces.utility.IdentityInterface",
"niworkflows.interfaces.nibabel.Binarize",
"nipype.pipeline.engine.Workflow",
"nipype.interfaces.utility.Select",
"nipype.interfaces.ants.N4BiasFieldCorrection",
"te... | [((2147, 2209), 'templateflow.api.get', 'get_template', (['in_template'], {'suffix': 'mri_scheme'}), '(in_template, suffix=mri_scheme, **template_specs)\n', (2159, 2209), True, 'from templateflow.api import get as get_template\n'), ((2642, 2714), 'templateflow.api.get', 'get_template', (['in_template'], {'desc': '"""brain"""', 'suffix': '"""mask"""'}), "(in_template, desc='brain', suffix='mask', **template_specs)\n", (2654, 2714), True, 'from templateflow.api import get as get_template\n'), ((2753, 2849), 'templateflow.api.get', 'get_template', (['in_template'], {'desc': '"""BrainCerebellumExtraction"""', 'suffix': '"""mask"""'}), "(in_template, desc='BrainCerebellumExtraction', suffix='mask',\n **template_specs)\n", (2765, 2849), True, 'from templateflow.api import get as get_template\n'), ((4662, 4679), 'nipype.pipeline.engine.Workflow', 'pe.Workflow', (['name'], {}), '(name)\n', (4673, 4679), True, 'from nipype.pipeline import engine as pe\n'), ((8103, 8225), 'niworkflows.anat.ants.init_atropos_wf', 'init_atropos_wf', ([], {'use_random_seed': '(False)', 'omp_nthreads': 'omp_nthreads', 'mem_gb': 'mem_gb', 'in_segmentation_model': 'atropos_model'}), '(use_random_seed=False, omp_nthreads=omp_nthreads, mem_gb=\n mem_gb, in_segmentation_model=atropos_model)\n', (8118, 8225), False, 'from niworkflows.anat.ants import init_atropos_wf, ATROPOS_MODELS\n'), ((15847, 15863), 'nibabel.load', 'nb.load', (['in_file'], {}), '(in_file)\n', (15854, 15863), True, 'import nibabel as nb\n'), ((1810, 1863), 'nipype.interfaces.utility.IdentityInterface', 'niu.IdentityInterface', ([], {'fields': "['in_files', 'in_mask']"}), "(fields=['in_files', 'in_mask'])\n", (1831, 1863), True, 'from nipype.interfaces import utility as niu\n'), ((1922, 1994), 'nipype.interfaces.utility.IdentityInterface', 'niu.IdentityInterface', ([], {'fields': "['out_corrected', 'out_brain', 'out_mask']"}), "(fields=['out_corrected', 'out_brain', 'out_mask'])\n", (1943, 1994), True, 'from nipype.interfaces import utility as niu\n'), ((2906, 2921), 'niworkflows.interfaces.images.ValidateImage', 'ValidateImage', ([], {}), '()\n', (2919, 2921), False, 'from niworkflows.interfaces.images import RegridToZooms, ValidateImage\n'), ((3018, 3033), 'niworkflows.interfaces.images.ValidateImage', 'ValidateImage', ([], {}), '()\n', (3031, 3033), False, 'from niworkflows.interfaces.images import RegridToZooms, ValidateImage\n'), ((3156, 3188), 'niworkflows.interfaces.images.RegridToZooms', 'RegridToZooms', ([], {'zooms': 'HIRES_ZOOMS'}), '(zooms=HIRES_ZOOMS)\n', (3169, 3188), False, 'from niworkflows.interfaces.images import RegridToZooms, ValidateImage\n'), ((3243, 3275), 'niworkflows.interfaces.images.RegridToZooms', 'RegridToZooms', ([], {'zooms': 'HIRES_ZOOMS'}), '(zooms=HIRES_ZOOMS)\n', (3256, 3275), False, 'from niworkflows.interfaces.images import RegridToZooms, ValidateImage\n'), ((3332, 3368), 'nipype.interfaces.utility.Function', 'niu.Function', ([], {'function': '_gauss_filter'}), '(function=_gauss_filter)\n', (3344, 3368), True, 'from nipype.interfaces import utility as niu\n'), ((3446, 3491), 'niworkflows.interfaces.ants.ImageMath', 'ImageMath', ([], {'operation': '"""Laplacian"""', 'op2': '"""0.4 1"""'}), "(operation='Laplacian', op2='0.4 1')\n", (3455, 3491), False, 'from niworkflows.interfaces.ants import ImageMath\n'), ((3535, 3580), 'niworkflows.interfaces.ants.ImageMath', 'ImageMath', ([], {'operation': '"""Laplacian"""', 'op2': '"""0.4 1"""'}), "(operation='Laplacian', op2='0.4 1')\n", (3544, 3580), False, 'from niworkflows.interfaces.ants import ImageMath\n'), ((3651, 3663), 'nipype.interfaces.utility.Merge', 'niu.Merge', (['(2)'], {}), '(2)\n', (3660, 3663), True, 'from nipype.interfaces import utility as niu\n'), ((3707, 3719), 'nipype.interfaces.utility.Merge', 'niu.Merge', (['(2)'], {}), '(2)\n', (3716, 3719), True, 'from nipype.interfaces import utility as niu\n'), ((3767, 3796), 'nipype.interfaces.utility.Function', 'niu.Function', ([], {'function': '_trunc'}), '(function=_trunc)\n', (3779, 3796), True, 'from nipype.interfaces import utility as niu\n'), ((4026, 4055), 'nipype.interfaces.utility.Function', 'niu.Function', ([], {'function': '_trunc'}), '(function=_trunc)\n', (4038, 4055), True, 'from nipype.interfaces import utility as niu\n'), ((4786, 4849), 'nipype.interfaces.utility.IdentityInterface', 'niu.IdentityInterface', ([], {'fields': "['hires_target', 'smooth_target']"}), "(fields=['hires_target', 'smooth_target'])\n", (4807, 4849), True, 'from nipype.interfaces import utility as niu\n'), ((4965, 4994), 'nipype.interfaces.utility.Function', 'niu.Function', ([], {'function': '_trunc'}), '(function=_trunc)\n', (4977, 4994), True, 'from nipype.interfaces import utility as niu\n'), ((5063, 5092), 'nipype.interfaces.utility.Function', 'niu.Function', ([], {'function': '_trunc'}), '(function=_trunc)\n', (5075, 5092), True, 'from nipype.interfaces import utility as niu\n'), ((5253, 5460), 'nipype.interfaces.ants.N4BiasFieldCorrection', 'N4BiasFieldCorrection', ([], {'dimension': '(3)', 'save_bias': '(False)', 'copy_header': '(True)', 'n_iterations': '([50] * (4 - debug))', 'convergence_threshold': '(1e-07)', 'shrink_factor': '(4)', 'bspline_fitting_distance': 'bspline_fitting_distance'}), '(dimension=3, save_bias=False, copy_header=True,\n n_iterations=[50] * (4 - debug), convergence_threshold=1e-07,\n shrink_factor=4, bspline_fitting_distance=bspline_fitting_distance)\n', (5274, 5460), False, 'from nipype.interfaces.ants import N4BiasFieldCorrection\n'), ((5640, 5669), 'nipype.interfaces.utility.Function', 'niu.Function', ([], {'function': '_trunc'}), '(function=_trunc)\n', (5652, 5669), True, 'from nipype.interfaces import utility as niu\n'), ((5729, 5765), 'nipype.interfaces.utility.Function', 'niu.Function', ([], {'function': '_gauss_filter'}), '(function=_gauss_filter)\n', (5741, 5765), True, 'from nipype.interfaces import utility as niu\n'), ((7155, 7208), 'niworkflows.interfaces.fixes.FixHeaderApplyTransforms', 'ApplyTransforms', ([], {'interpolation': '"""Gaussian"""', 'float': '(True)'}), "(interpolation='Gaussian', float=True)\n", (7170, 7208), True, 'from niworkflows.interfaces.fixes import FixHeaderRegistration as Registration, FixHeaderApplyTransforms as ApplyTransforms\n'), ((7355, 7379), 'niworkflows.interfaces.nibabel.Binarize', 'Binarize', ([], {'thresh_low': '(0.8)'}), '(thresh_low=0.8)\n', (7363, 7379), False, 'from niworkflows.interfaces.nibabel import ApplyMask, Binarize\n'), ((7459, 7499), 'nipype.interfaces.utility.Function', 'niu.Function', ([], {'function': '_bspline_distance'}), '(function=_bspline_distance)\n', (7471, 7499), True, 'from nipype.interfaces import utility as niu\n'), ((7610, 7782), 'nipype.interfaces.ants.N4BiasFieldCorrection', 'N4BiasFieldCorrection', ([], {'dimension': '(3)', 'save_bias': '(True)', 'copy_header': '(True)', 'n_iterations': '([50] * 5)', 'convergence_threshold': '(1e-07)', 'rescale_intensities': '(True)', 'shrink_factor': '(4)'}), '(dimension=3, save_bias=True, copy_header=True,\n n_iterations=[50] * 5, convergence_threshold=1e-07, rescale_intensities\n =True, shrink_factor=4)\n', (7631, 7782), False, 'from nipype.interfaces.ants import N4BiasFieldCorrection\n'), ((7955, 7966), 'niworkflows.interfaces.nibabel.ApplyMask', 'ApplyMask', ([], {}), '()\n', (7964, 7966), False, 'from niworkflows.interfaces.nibabel import ApplyMask, Binarize\n'), ((8394, 8433), 'nipype.interfaces.utility.Select', 'niu.Select', ([], {'index': '(atropos_model[-1] - 1)'}), '(index=atropos_model[-1] - 1)\n', (8404, 8433), True, 'from nipype.interfaces import utility as niu\n'), ((13122, 13155), 'niworkflows.interfaces.images.RegridToZooms', 'RegridToZooms', ([], {'zooms': 'LOWRES_ZOOMS'}), '(zooms=LOWRES_ZOOMS)\n', (13135, 13155), False, 'from niworkflows.interfaces.images import RegridToZooms, ValidateImage\n'), ((13205, 13238), 'niworkflows.interfaces.images.RegridToZooms', 'RegridToZooms', ([], {'zooms': 'LOWRES_ZOOMS'}), '(zooms=LOWRES_ZOOMS)\n', (13218, 13238), False, 'from niworkflows.interfaces.images import RegridToZooms, ValidateImage\n'), ((13295, 13494), 'nipype.interfaces.ants.utils.AI', 'AI', ([], {'metric': "('Mattes', 32, 'Regular', 0.25)", 'transform': "('Affine', 0.1)", 'search_factor': '(15, 0.1)', 'principal_axes': '(False)', 'convergence': '(10, 1e-06, 10)', 'search_grid': '(40, (0, 40, 40))', 'verbose': '(True)'}), "(metric=('Mattes', 32, 'Regular', 0.25), transform=('Affine', 0.1),\n search_factor=(15, 0.1), principal_axes=False, convergence=(10, 1e-06, \n 10), search_grid=(40, (0, 40, 40)), verbose=True)\n", (13297, 13494), False, 'from nipype.interfaces.ants.utils import AI\n'), ((11281, 11333), 'niworkflows.interfaces.fixes.FixHeaderApplyTransforms', 'ApplyTransforms', ([], {'interpolation': '"""BSpline"""', 'float': '(True)'}), "(interpolation='BSpline', float=True)\n", (11296, 11333), True, 'from niworkflows.interfaces.fixes import FixHeaderRegistration as Registration, FixHeaderApplyTransforms as ApplyTransforms\n'), ((11462, 11571), 'niworkflows.interfaces.registration.SimpleBeforeAfterRPT', 'SimpleBeforeAfter', ([], {'before_label': 'f"""tpl-{in_template}"""', 'after_label': '"""target"""', 'out_report': '"""final_report.svg"""'}), "(before_label=f'tpl-{in_template}', after_label='target',\n out_report='final_report.svg')\n", (11479, 11571), True, 'from niworkflows.interfaces.registration import SimpleBeforeAfterRPT as SimpleBeforeAfter\n'), ((14561, 14613), 'niworkflows.interfaces.fixes.FixHeaderApplyTransforms', 'ApplyTransforms', ([], {'interpolation': '"""BSpline"""', 'float': '(True)'}), "(interpolation='BSpline', float=True)\n", (14576, 14613), True, 'from niworkflows.interfaces.fixes import FixHeaderRegistration as Registration, FixHeaderApplyTransforms as ApplyTransforms\n'), ((14740, 14848), 'niworkflows.interfaces.registration.SimpleBeforeAfterRPT', 'SimpleBeforeAfter', ([], {'before_label': 'f"""tpl-{in_template}"""', 'after_label': '"""target"""', 'out_report': '"""init_report.svg"""'}), "(before_label=f'tpl-{in_template}', after_label='target',\n out_report='init_report.svg')\n", (14757, 14848), True, 'from niworkflows.interfaces.registration import SimpleBeforeAfterRPT as SimpleBeforeAfter\n'), ((15878, 15901), 'numpy.array', 'np.array', (['img.shape[:3]'], {}), '(img.shape[:3])\n', (15886, 15901), True, 'import numpy as np\n'), ((4411, 4481), 'pkg_resources.resource_filename', 'pkgr_fn', (['"""niworkflows.data"""', 'f"""antsBrainExtraction_{ants_params}.json"""'], {}), "('niworkflows.data', f'antsBrainExtraction_{ants_params}.json')\n", (4418, 4481), True, 'from pkg_resources import resource_filename as pkgr_fn\n'), ((15983, 16001), 'numpy.array', 'np.array', (['spacings'], {}), '(spacings)\n', (15991, 16001), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.ideep_test_util as mu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class SqueezeTest(hu.HypothesisTestCase):
@given(
squeeze_dims=st.lists(st.integers(0, 3), min_size=1, max_size=3),
inplace=st.booleans(),
**mu.gcs
)
def test_squeeze(self, squeeze_dims, inplace, gc, dc):
shape = [
1 if dim in squeeze_dims else np.random.randint(1, 5)
for dim in range(4)
]
X = np.random.rand(*shape).astype(np.float32)
op = core.CreateOperator(
"Squeeze", "X", "X" if inplace else "Y", dims=squeeze_dims
)
self.assertDeviceChecks(dc, op, [X], [0])
if __name__ == "__main__":
unittest.main()
| [
"numpy.random.rand",
"hypothesis.strategies.integers",
"unittest.skipIf",
"numpy.random.randint",
"hypothesis.strategies.booleans",
"caffe2.python.core.CreateOperator",
"unittest.main"
] | [((399, 464), 'unittest.skipIf', 'unittest.skipIf', (['(not workspace.C.use_mkldnn)', '"""No MKLDNN support."""'], {}), "(not workspace.C.use_mkldnn, 'No MKLDNN support.')\n", (414, 464), False, 'import unittest\n'), ((1104, 1119), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1117, 1119), False, 'import unittest\n'), ((912, 991), 'caffe2.python.core.CreateOperator', 'core.CreateOperator', (['"""Squeeze"""', '"""X"""', "('X' if inplace else 'Y')"], {'dims': 'squeeze_dims'}), "('Squeeze', 'X', 'X' if inplace else 'Y', dims=squeeze_dims)\n", (931, 991), False, 'from caffe2.python import core, workspace\n'), ((613, 626), 'hypothesis.strategies.booleans', 'st.booleans', ([], {}), '()\n', (624, 626), True, 'import hypothesis.strategies as st\n'), ((775, 798), 'numpy.random.randint', 'np.random.randint', (['(1)', '(5)'], {}), '(1, 5)\n', (792, 798), True, 'import numpy as np\n'), ((856, 878), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (870, 878), True, 'import numpy as np\n'), ((552, 569), 'hypothesis.strategies.integers', 'st.integers', (['(0)', '(3)'], {}), '(0, 3)\n', (563, 569), True, 'import hypothesis.strategies as st\n')] |
from __future__ import print_function
path = '/network/aopp/cirrus/pred/kloewer/swm_bf_cntrl/data/'
dpath = '/network/aopp/cirrus/pred/kloewer/swm_back_ronew/'
outpath = '/network/home/aopp/kloewer/swm/paperplot/'
import os; os.chdir(path) # change working directory
import numpy as np
from scipy import sparse
import matplotlib.pyplot as plt
import time as tictoc
from netCDF4 import Dataset
import glob
from matplotlib.colors import BoundaryNorm,LogNorm
import cmocean
plt.rcParams['mathtext.fontset'] = 'cm'
plt.rcParams['mathtext.rm'] = 'serif'
# OPTIONS
runfolder = [0,6,3]
print('Plots for run ' + str(runfolder))
# read data
runpath1 = path+'run%04i' % runfolder[0]
D1 = np.load(runpath1+'/analysis/power_map.npy').all()
param1 = np.load(runpath1+'/param.npy').all()
runpath2 = path+'run%04i' % runfolder[1]
D2 = np.load(runpath2+'/analysis/power_map.npy').all()
param2 = np.load(runpath2+'/param.npy').all()
runpath3 = dpath+'run%04i' % runfolder[2]
D3 = np.load(runpath3+'/analysis/power_map.npy').all()
param3 = np.load(runpath3+'/param.npy').all()
print(param3['n_diss'])
# functions
def h2mat(h,param):
return h.reshape((param['ny'],param['nx']))
def u2mat(u,param):
return u.reshape((param['ny'],param['nx']-1))
def v2mat(v,param):
return v.reshape((param['ny']-1,param['nx']))
def q2mat(q,param):
return q.reshape((param['ny']+1,param['nx']+1))
#
m = [0]*(len(runfolder)*3)
s = 1e3
expo = 1.
# LOW RESOLUTION RUN
in1 = h2mat(D1['InPower_T'],param1)*s
m[0] = in1.mean()
in1 = np.sign(in1)*abs(in1)**expo
bf1 = h2mat(D1['BfricPower_T'],param1)*s
m[1] = bf1.mean()
bf1 = np.sign(bf1)*abs(bf1)**expo
ex1 = h2mat(D1['ExPower_T'],param1)*s
m[2] = ex1.mean()
ex1 = np.sign(ex1)*abs(ex1)**expo
# BACKSCATTER RUN
in3 = h2mat(D3['InPower_T'],param3)*s
m[3] = in3.mean()
in3 = np.sign(in3)*abs(in3)**expo
bf3 = h2mat(D3['BfricPower_T'],param3)*s
m[4] = bf3.mean()
bf3 = np.sign(bf3)*abs(bf3)**expo
D3['ExPower_T'] += D3['BackPower_T']
ex3 = h2mat(D3['ExPower_T'],param3)*s
m[5] = ex3.mean()
ex3 = np.sign(ex3)*abs(ex3)**expo
# bs = h2mat(D3['BackPower_T'],param3)*s
# m[6] = bs.mean()
# bs = np.sign(bs)*np.sqrt(abs(bs))
# HIGH RESOLUTION RUN
in2 = h2mat(D2['InPower_T'],param2)*s
m[6] = in2.mean()
in2 = np.sign(in2)*abs(in2)**expo
bf2 = h2mat(D2['BfricPower_T'],param2)*s
m[7] = bf2.mean()
bf2 = np.sign(bf2)*abs(bf2)**expo
ex2 = h2mat(D2['ExPower_T'],param2)*s
m[8] = ex2.mean()
ex2 = np.sign(ex2)*abs(ex2)**expo
mround = [np.round(mi,2) for mi in m]
budget_closed = np.array(m).reshape((3,-1))
## PLOTTING
fig,axs = plt.subplots(3,3,figsize=(8.8,9),sharex=True,sharey=True)
plt.tight_layout(rect=[0,.07,1,0.97])
fig.subplots_adjust(wspace=0.03,hspace=0.03)
pos = axs[-1,0].get_position()
pos2 = axs[-1,-1].get_position()
cax = fig.add_axes([pos.x0,0.06,pos2.x1-pos.x0,0.02])
levs = np.linspace(-75**expo,75**expo,31)
tik = np.array([-75,-50,-25,0,25,50,75])
tik = np.sign(tik)*abs(tik)**expo
q1 = axs[0,0].contourf(param1['x_T'],param1['y_T'],in1,levs,cmap=cmocean.cm.balance,extend='both')
cbar = fig.colorbar(q1,cax=cax,orientation='horizontal',ticks=tik)
cbar.set_label(r'Power [Wm$^{-2} \cdot 10^{-3}$]')
cbar.set_ticklabels(np.round(abs(tik)**expo*np.sign(tik)).astype(int))
axs[1,0].contourf(param3['x_T'],param3['y_T'],in3,levs,cmap=cmocean.cm.balance,extend='both')
axs[2,0].contourf(param2['x_T'],param2['y_T'],in2,levs,cmap=cmocean.cm.balance,extend='both')
axs[0,2].contourf(param1['x_T'],param1['y_T'],ex1,levs,cmap=cmocean.cm.balance,extend='both')
axs[1,2].contourf(param3['x_T'],param3['y_T'],ex3,levs,cmap=cmocean.cm.balance,extend='both')
axs[2,2].contourf(param2['x_T'],param2['y_T'],ex2,levs,cmap=cmocean.cm.balance,extend='both')
axs[0,1].contourf(param1['x_T'],param1['y_T'],bf1,levs,cmap=cmocean.cm.balance,extend='both')
axs[1,1].contourf(param3['x_T'],param3['y_T'],bf3,levs,cmap=cmocean.cm.balance,extend='both')
axs[2,1].contourf(param2['x_T'],param2['y_T'],bf2,levs,cmap=cmocean.cm.balance,extend='both')
#axs[1,3].contourf(param3['x_T'],param3['y_T'],bs,levs,cmap=cmocean.cm.balance,extend='both')
axs[0,0].set_title('Wind forcing power',loc='left')
axs[0,1].set_title('Bottom friction power',loc='left')
axs[0,2].set_title('Biharmonic viscosity\n+ backscatter power',loc='left')
#axs[1,3].set_title('Backscatter power',loc='left')
abc = 'abcdefghijkl'
abci = 0
for i,axcol in enumerate(axs):
for j,ax in enumerate(axcol):
# if not(i in [0,2] and j == 3):
plt.text(0.93,0.93,abc[abci],transform=ax.transAxes,fontweight='bold')
plt.text(0.965,0.87,"%.2f" % mround[abci],transform=ax.transAxes,ha='right')
abci += 1
axs[0,0].set_xticks([])
axs[0,0].set_yticks([])
axs[0,0].set_xlim(15e3,param1['Lx']-15e3)
axs[0,0].set_ylim(15e3,param1['Ly']-15e3)
axs[0,0].set_ylabel(r'Low resolution, $\Delta x = $30km',fontsize=11)
axs[1,0].set_ylabel(r'LR + strong backscatter',fontsize=11)
axs[2,0].set_ylabel(r'High resolution, $\Delta x = $7.5km',fontsize=11)
axs[-1,0].set_xlabel(r'$x$')
axs[-1,1].set_xlabel(r'$x$')
axs[-1,2].set_xlabel(r'$x$')
#axs[-1,3].set_xlabel(r'$x$')
#axs[0,3].set_frame_on(False)
#axs[2,3].set_frame_on(False)
axs[0,2].set_ylabel(r'$y$')
axs[0,2].yaxis.set_label_position('right')
axs[1,2].set_ylabel(r'$y$')
axs[1,2].yaxis.set_label_position('right')
axs[2,2].set_ylabel(r'$y$')
axs[2,2].yaxis.set_label_position('right')
plt.savefig(outpath+'plots/power_maps_bs.png',dpi=300)
plt.close(fig)
| [
"matplotlib.pyplot.text",
"matplotlib.pyplot.savefig",
"os.chdir",
"numpy.array",
"numpy.linspace",
"matplotlib.pyplot.close",
"numpy.sign",
"matplotlib.pyplot.tight_layout",
"numpy.load",
"matplotlib.pyplot.subplots",
"numpy.round"
] | [((227, 241), 'os.chdir', 'os.chdir', (['path'], {}), '(path)\n', (235, 241), False, 'import os\n'), ((2573, 2635), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(3)'], {'figsize': '(8.8, 9)', 'sharex': '(True)', 'sharey': '(True)'}), '(3, 3, figsize=(8.8, 9), sharex=True, sharey=True)\n', (2585, 2635), True, 'import matplotlib.pyplot as plt\n'), ((2632, 2673), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'rect': '[0, 0.07, 1, 0.97]'}), '(rect=[0, 0.07, 1, 0.97])\n', (2648, 2673), True, 'import matplotlib.pyplot as plt\n'), ((2842, 2882), 'numpy.linspace', 'np.linspace', (['(-75 ** expo)', '(75 ** expo)', '(31)'], {}), '(-75 ** expo, 75 ** expo, 31)\n', (2853, 2882), True, 'import numpy as np\n'), ((2883, 2923), 'numpy.array', 'np.array', (['[-75, -50, -25, 0, 25, 50, 75]'], {}), '([-75, -50, -25, 0, 25, 50, 75])\n', (2891, 2923), True, 'import numpy as np\n'), ((5391, 5448), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(outpath + 'plots/power_maps_bs.png')"], {'dpi': '(300)'}), "(outpath + 'plots/power_maps_bs.png', dpi=300)\n", (5402, 5448), True, 'import matplotlib.pyplot as plt\n'), ((5446, 5460), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (5455, 5460), True, 'import matplotlib.pyplot as plt\n'), ((1521, 1533), 'numpy.sign', 'np.sign', (['in1'], {}), '(in1)\n', (1528, 1533), True, 'import numpy as np\n'), ((1615, 1627), 'numpy.sign', 'np.sign', (['bf1'], {}), '(bf1)\n', (1622, 1627), True, 'import numpy as np\n'), ((1706, 1718), 'numpy.sign', 'np.sign', (['ex1'], {}), '(ex1)\n', (1713, 1718), True, 'import numpy as np\n'), ((1815, 1827), 'numpy.sign', 'np.sign', (['in3'], {}), '(in3)\n', (1822, 1827), True, 'import numpy as np\n'), ((1909, 1921), 'numpy.sign', 'np.sign', (['bf3'], {}), '(bf3)\n', (1916, 1921), True, 'import numpy as np\n'), ((2038, 2050), 'numpy.sign', 'np.sign', (['ex3'], {}), '(ex3)\n', (2045, 2050), True, 'import numpy as np\n'), ((2248, 2260), 'numpy.sign', 'np.sign', (['in2'], {}), '(in2)\n', (2255, 2260), True, 'import numpy as np\n'), ((2342, 2354), 'numpy.sign', 'np.sign', (['bf2'], {}), '(bf2)\n', (2349, 2354), True, 'import numpy as np\n'), ((2433, 2445), 'numpy.sign', 'np.sign', (['ex2'], {}), '(ex2)\n', (2440, 2445), True, 'import numpy as np\n'), ((2472, 2487), 'numpy.round', 'np.round', (['mi', '(2)'], {}), '(mi, 2)\n', (2480, 2487), True, 'import numpy as np\n'), ((2924, 2936), 'numpy.sign', 'np.sign', (['tik'], {}), '(tik)\n', (2931, 2936), True, 'import numpy as np\n'), ((684, 729), 'numpy.load', 'np.load', (["(runpath1 + '/analysis/power_map.npy')"], {}), "(runpath1 + '/analysis/power_map.npy')\n", (691, 729), True, 'import numpy as np\n'), ((743, 775), 'numpy.load', 'np.load', (["(runpath1 + '/param.npy')"], {}), "(runpath1 + '/param.npy')\n", (750, 775), True, 'import numpy as np\n'), ((827, 872), 'numpy.load', 'np.load', (["(runpath2 + '/analysis/power_map.npy')"], {}), "(runpath2 + '/analysis/power_map.npy')\n", (834, 872), True, 'import numpy as np\n'), ((886, 918), 'numpy.load', 'np.load', (["(runpath2 + '/param.npy')"], {}), "(runpath2 + '/param.npy')\n", (893, 918), True, 'import numpy as np\n'), ((971, 1016), 'numpy.load', 'np.load', (["(runpath3 + '/analysis/power_map.npy')"], {}), "(runpath3 + '/analysis/power_map.npy')\n", (978, 1016), True, 'import numpy as np\n'), ((1030, 1062), 'numpy.load', 'np.load', (["(runpath3 + '/param.npy')"], {}), "(runpath3 + '/param.npy')\n", (1037, 1062), True, 'import numpy as np\n'), ((2517, 2528), 'numpy.array', 'np.array', (['m'], {}), '(m)\n', (2525, 2528), True, 'import numpy as np\n'), ((4485, 4559), 'matplotlib.pyplot.text', 'plt.text', (['(0.93)', '(0.93)', 'abc[abci]'], {'transform': 'ax.transAxes', 'fontweight': '"""bold"""'}), "(0.93, 0.93, abc[abci], transform=ax.transAxes, fontweight='bold')\n", (4493, 4559), True, 'import matplotlib.pyplot as plt\n'), ((4564, 4649), 'matplotlib.pyplot.text', 'plt.text', (['(0.965)', '(0.87)', "('%.2f' % mround[abci])"], {'transform': 'ax.transAxes', 'ha': '"""right"""'}), "(0.965, 0.87, '%.2f' % mround[abci], transform=ax.transAxes, ha='right'\n )\n", (4572, 4649), True, 'import matplotlib.pyplot as plt\n'), ((3214, 3226), 'numpy.sign', 'np.sign', (['tik'], {}), '(tik)\n', (3221, 3226), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
# @Author: dbirman
# @Date: 2017-06-14 16:51:24
import sys
import csv
import random
import webbrowser
import os
import numpy as np
# other functions
def num_rows(data):
for j in range(1, 4):
if len(data[j]) == 0:
return j
return 4
def main():
"""read the input file"""
print('Reading file sinfo.csv')
csvfile = open('sinfo.csv', 'rb')
csvreader = csv.reader(csvfile)
file = list(csvreader)
# display statistics
finished = [0., 0., 0.]
hold = np.zeros((3, len(file) - 1))
hold[:] = np.nan
total = 601
for i in range(1, len(file)):
for j in range(1, 4):
if len(file[i][j]) > 0:
finished[j - 1] = finished[j - 1] + 1
hold[j - 1, i - 1] = int(file[i][j])
finished = np.divide(np.round(np.divide(finished, total) * 1000), 10)
print('Completed: ' + str(finished[0]) + '% ' +
str(finished[1]) + '% ' + str(finished[2]) + '%')
print('Total: ' + str(np.round(np.divide(np.sum(finished), 3))) + '%')
input("Waiting: [enter]")
# file[1:] are all the rows
order = range(1, len(file))
random.shuffle(order)
# pick a random row
for row in order:
# check how many entires it has
curEnt = num_rows(file[row])
if curEnt <= 1:
# if less than 1, run the row
print('Check participant #' + file[row][0])
fname = os.getcwd() + '/abide/' + file[row][0]
if os.path.isfile(fname):
webbrowser.open('file://' + fname)
quality = input("Quality? [-1/0/1/e/c] ")
if quality == 'e':
break
if quality == 'c':
print('Current comment: ' + file[row][4])
comment = input("Comment: ")
if len(comment) > 0:
file[row][4] = comment
quality = input("Quality? [-1/0/1/e] ")
if quality == 'e':
break
file[row][curEnt] = quality
else:
print('File does not exist')
print('Writing file sinfo.csv')
outfile = open('sinfo.csv', 'wb')
csvwriter = csv.writer(outfile)
csvwriter.writerows(file)
print('Ending')
if __name__ == '__main__':
main()
sys.exit(0)
| [
"random.shuffle",
"csv.writer",
"webbrowser.open",
"os.getcwd",
"os.path.isfile",
"numpy.sum",
"sys.exit",
"csv.reader",
"numpy.divide"
] | [((559, 578), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (569, 578), False, 'import csv\n'), ((1304, 1325), 'random.shuffle', 'random.shuffle', (['order'], {}), '(order)\n', (1318, 1325), False, 'import random\n'), ((2391, 2410), 'csv.writer', 'csv.writer', (['outfile'], {}), '(outfile)\n', (2401, 2410), False, 'import csv\n'), ((2505, 2516), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2513, 2516), False, 'import sys\n'), ((1645, 1666), 'os.path.isfile', 'os.path.isfile', (['fname'], {}), '(fname)\n', (1659, 1666), False, 'import os\n'), ((978, 1004), 'numpy.divide', 'np.divide', (['finished', 'total'], {}), '(finished, total)\n', (987, 1004), True, 'import numpy as np\n'), ((1684, 1718), 'webbrowser.open', 'webbrowser.open', (["('file://' + fname)"], {}), "('file://' + fname)\n", (1699, 1718), False, 'import webbrowser\n'), ((1591, 1602), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1600, 1602), False, 'import os\n'), ((1175, 1191), 'numpy.sum', 'np.sum', (['finished'], {}), '(finished)\n', (1181, 1191), True, 'import numpy as np\n')] |
"""
Logic for wavefunction evaluation.
"""
import logging
import time
import jax
import functools
import numpy as np
from jax import numpy as jnp
from deeperwin.configuration import EvaluationConfig
from deeperwin.hamiltonian import get_local_energy, calculate_forces
from deeperwin.loggers import DataLogger
from deeperwin.mcmc import MCMCState, MetropolisHastingsMonteCarlo, calculate_metrics
LOGGER = logging.getLogger("dpe")
@functools.partial(jax.jit, static_argnums=(0, 1))
def _evaluation_step(log_psi_squared, mcmc, mcmc_state, params):
mcmc_state = mcmc.run_inter_steps(log_psi_squared, params, mcmc_state, "eval")
E_loc = get_local_energy(log_psi_squared, *mcmc_state.model_args, *params)
return mcmc_state, E_loc
def _build_force_polynomial_coefficients(R_core, polynomial_degree):
j = np.arange(1, polynomial_degree + 1)
A = R_core ** 2 / (2 + j[np.newaxis, :] + j[:, np.newaxis] + 1)
b = 1 / (j + 1)
coeff = np.linalg.solve(A, b)
coeff = np.reshape(coeff, [-1, 1, 1, 1, 1])
return coeff
def build_evaluation_step(log_psi_sqr_func, mcmc, eval_config: EvaluationConfig):
def _evaluation_step(mcmc_state: MCMCState, params):
mcmc_state = mcmc.run_inter_steps(log_psi_sqr_func, params, mcmc_state)
if eval_config.calculate_energies:
E_loc = get_local_energy(log_psi_sqr_func, *mcmc_state.model_args, *params)
else:
E_loc = None
if eval_config.forces is not None:
poly_coeffs = _build_force_polynomial_coefficients(eval_config.forces.R_core,
eval_config.forces.polynomial_degree)
forces = calculate_forces(*mcmc_state.model_args, mcmc_state.log_psi_sqr, log_psi_sqr_func, params,
eval_config.forces, poly_coeffs)
else:
forces = None
return mcmc_state, E_loc, forces
return jax.jit(_evaluation_step)
def evaluate_wavefunction(
log_psi_squared,
trainable_params,
fixed_params,
mcmc: MetropolisHastingsMonteCarlo,
mcmc_state: MCMCState,
config: EvaluationConfig,
logger: DataLogger = None,
evaluation_step_func=None
):
params = (trainable_params, fixed_params)
LOGGER.debug("Starting burn-in for evaluation...")
mcmc_state = mcmc.run_burn_in_eval(log_psi_squared, params, mcmc_state)
if evaluation_step_func is None:
evaluation_step_func = build_evaluation_step(log_psi_squared, mcmc, config)
t_start = time.time()
E_eval_mean = []
forces_mean = []
for n_epoch in range(config.n_epochs):
mcmc_state, E_epoch, forces = evaluation_step_func(mcmc_state, (trainable_params, fixed_params))
t_end = time.time()
if E_epoch is not None:
E_eval_mean.append(jnp.nanmean(E_epoch))
if logger is not None:
if E_epoch is not None:
logger.log_metrics(*calculate_metrics(n_epoch, E_epoch, mcmc_state, (t_end - t_start), "eval"))
if forces is not None:
logger.log_metric("forces", forces, n_epoch, "eval")
forces_mean.append(forces)
t_start = t_end
forces_mean = jnp.array(forces_mean) if (len(forces_mean) > 0) else None
return jnp.array(E_eval_mean), forces_mean, mcmc_state
| [
"logging.getLogger",
"deeperwin.hamiltonian.get_local_energy",
"numpy.linalg.solve",
"numpy.reshape",
"jax.numpy.array",
"functools.partial",
"jax.jit",
"jax.numpy.nanmean",
"deeperwin.mcmc.calculate_metrics",
"deeperwin.hamiltonian.calculate_forces",
"time.time",
"numpy.arange"
] | [((408, 432), 'logging.getLogger', 'logging.getLogger', (['"""dpe"""'], {}), "('dpe')\n", (425, 432), False, 'import logging\n'), ((436, 485), 'functools.partial', 'functools.partial', (['jax.jit'], {'static_argnums': '(0, 1)'}), '(jax.jit, static_argnums=(0, 1))\n', (453, 485), False, 'import functools\n'), ((646, 712), 'deeperwin.hamiltonian.get_local_energy', 'get_local_energy', (['log_psi_squared', '*mcmc_state.model_args', '*params'], {}), '(log_psi_squared, *mcmc_state.model_args, *params)\n', (662, 712), False, 'from deeperwin.hamiltonian import get_local_energy, calculate_forces\n'), ((821, 856), 'numpy.arange', 'np.arange', (['(1)', '(polynomial_degree + 1)'], {}), '(1, polynomial_degree + 1)\n', (830, 856), True, 'import numpy as np\n'), ((957, 978), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'b'], {}), '(A, b)\n', (972, 978), True, 'import numpy as np\n'), ((991, 1026), 'numpy.reshape', 'np.reshape', (['coeff', '[-1, 1, 1, 1, 1]'], {}), '(coeff, [-1, 1, 1, 1, 1])\n', (1001, 1026), True, 'import numpy as np\n'), ((1943, 1968), 'jax.jit', 'jax.jit', (['_evaluation_step'], {}), '(_evaluation_step)\n', (1950, 1968), False, 'import jax\n'), ((2566, 2577), 'time.time', 'time.time', ([], {}), '()\n', (2575, 2577), False, 'import time\n'), ((2784, 2795), 'time.time', 'time.time', ([], {}), '()\n', (2793, 2795), False, 'import time\n'), ((3249, 3271), 'jax.numpy.array', 'jnp.array', (['forces_mean'], {}), '(forces_mean)\n', (3258, 3271), True, 'from jax import numpy as jnp\n'), ((3319, 3341), 'jax.numpy.array', 'jnp.array', (['E_eval_mean'], {}), '(E_eval_mean)\n', (3328, 3341), True, 'from jax import numpy as jnp\n'), ((1327, 1394), 'deeperwin.hamiltonian.get_local_energy', 'get_local_energy', (['log_psi_sqr_func', '*mcmc_state.model_args', '*params'], {}), '(log_psi_sqr_func, *mcmc_state.model_args, *params)\n', (1343, 1394), False, 'from deeperwin.hamiltonian import get_local_energy, calculate_forces\n'), ((1689, 1816), 'deeperwin.hamiltonian.calculate_forces', 'calculate_forces', (['*mcmc_state.model_args', 'mcmc_state.log_psi_sqr', 'log_psi_sqr_func', 'params', 'eval_config.forces', 'poly_coeffs'], {}), '(*mcmc_state.model_args, mcmc_state.log_psi_sqr,\n log_psi_sqr_func, params, eval_config.forces, poly_coeffs)\n', (1705, 1816), False, 'from deeperwin.hamiltonian import get_local_energy, calculate_forces\n'), ((2859, 2879), 'jax.numpy.nanmean', 'jnp.nanmean', (['E_epoch'], {}), '(E_epoch)\n', (2870, 2879), True, 'from jax import numpy as jnp\n'), ((2984, 3056), 'deeperwin.mcmc.calculate_metrics', 'calculate_metrics', (['n_epoch', 'E_epoch', 'mcmc_state', '(t_end - t_start)', '"""eval"""'], {}), "(n_epoch, E_epoch, mcmc_state, t_end - t_start, 'eval')\n", (3001, 3056), False, 'from deeperwin.mcmc import MCMCState, MetropolisHastingsMonteCarlo, calculate_metrics\n')] |
import numpy as np
import matplotlib.pyplot as plt
import barnes as bn
def gr_to_bv(g, r):
"""
From https://www.sdss3.org/dr8/algorithms/sdssUBVRITransform.php
Returns the B-V value and the RMS error.
"""
return .62*(g - r) + .15, .07
def random_stars(fname, N):
"""
Randomly draw stellar properties from the output of a trilegal field.
fname: str, the name of the trilegal output file, e.g.
"output574523944248.dat"
N: int, the number of stars you want to draw.
Returns an array of ages and B-V colours
"""
# load data
Gc, logAge, m_h, m_ini, logL, logTeff, logg, m_M0, Av, m2_m1, mbol, u, g,\
r, i, z, Mact = np.genfromtxt(fname).T
# remove stars fainter than 25th and brighter than 16th with logg < 4
m = (16 < r) * (r < 28) * logg > 4
logAge, g, r, logTeff = logAge[m], g[m], r[m], logTeff[m]
# randomly select stars.
stars = np.random.choice(np.arange(len(logAge)), N)
logAges, gs, rs = logAge[stars], g[stars], r[stars]
logTeff = logTeff[stars]
# convert to B-V
bvs, bverrs = gr_to_bv(gs, rs)
return logAges, bvs, logTeff, rs
if __name__ == "__main__":
fname = "output574523944248.dat"
nstars = 1000
logAges, bvs = random_stars(fname, nstars)
age = 10**logAges * 1e-6
ps = bn.period(age, bvs)
plt.clf()
plt.hist(ps)
plt.savefig("period_hist")
| [
"matplotlib.pyplot.hist",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.clf",
"barnes.period",
"numpy.genfromtxt"
] | [((1313, 1332), 'barnes.period', 'bn.period', (['age', 'bvs'], {}), '(age, bvs)\n', (1322, 1332), True, 'import barnes as bn\n'), ((1338, 1347), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1345, 1347), True, 'import matplotlib.pyplot as plt\n'), ((1352, 1364), 'matplotlib.pyplot.hist', 'plt.hist', (['ps'], {}), '(ps)\n', (1360, 1364), True, 'import matplotlib.pyplot as plt\n'), ((1369, 1395), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""period_hist"""'], {}), "('period_hist')\n", (1380, 1395), True, 'import matplotlib.pyplot as plt\n'), ((679, 699), 'numpy.genfromtxt', 'np.genfromtxt', (['fname'], {}), '(fname)\n', (692, 699), True, 'import numpy as np\n')] |
from subprocess import STDOUT
import sys
from tf.transformations import rotation_matrix
sys.path.insert(0, './yolov5')
from yolov5.utils.datasets import LoadImages, LoadStreams,LoadWebcam,LoadRealsense
from yolov5.utils.general import check_img_size, non_max_suppression, scale_coords
from yolov5.utils.torch_utils import select_device, time_synchronized
from deep_sort_pytorch.utils.parser import get_config
from deep_sort_pytorch.deep_sort import DeepSort
import argparse
import os
import platform
import shutil
import time
from pathlib import Path
import cv2
import torch
import torch.backends.cudnn as cudnn
'''
直接再track.py改watchout,在v2.0的基础上加上了
- goodenbox 黄金分割法裁剪框(其实kcf利用了六个点的深度来判断效果更加)
并且和抗的算法匹配
version:3.0
'''
import numpy as np
from visualization_msgs.msg import Marker,MarkerArray
import rospy
from numba import jit
from tf import TransformListener
from APF_BASE_utils import BASE_TOOLS_for_car as To_1
from APF_FOLLOW_utils import FOLLOW_TOOLS_for_car as To_2
palette = (2 ** 11 - 1, 2 ** 15 - 1, 2 ** 20 - 1)
def bbox_rel(*xyxy):
"""" Calculates the relative bounding box from absolute pixel values. """
bbox_left = min([xyxy[0].item(), xyxy[2].item()])
bbox_top = min([xyxy[1].item(), xyxy[3].item()])
bbox_w = abs(xyxy[0].item() - xyxy[2].item())
bbox_h = abs(xyxy[1].item() - xyxy[3].item())
x_c = (bbox_left + bbox_w / 2)
y_c = (bbox_top + bbox_h / 2)
w = bbox_w
h = bbox_h
return x_c, y_c, w, h
def compute_color_for_labels(label):
"""
Simple function that adds fixed color depending on the class
"""
color = [int((p * (label ** 2 - label + 1)) % 255) for p in palette]
return tuple(color)
def showdepth(boxes,depth):
for box in boxes:
x1,y1,x2,y2 = [int(i) for i in box]
for u in range(x1,x2):
for v in range(y1,y2):
print(depth[v,u]*0.001)
#注意 offset 光心偏移
def draw_boxes(img, bbox, identities=None, offset=(0, 0)):
for i, box in enumerate(bbox):
x1, y1, x2, y2 = [int(i) for i in box]
x1 += offset[0]
x2 += offset[0]
y1 += offset[1]
y2 += offset[1]
import math
x1 = x1 + math.ceil((x2-x1)*0.382)
x2 = x1 + math.ceil((x2-x1)*0.618)
y1 = y1 + math.ceil((y2-y1)*0.382)
y2 = y1 + math.ceil((y2-y1)*0.618)
# print(img.shape)
# print(x1,y1,x2,y2)
# box text and bar
id = int(identities[i]) if identities is not None else 0
color = compute_color_for_labels(id)
label = '{}{:d}'.format("", id)
t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 2, 2)[0]
cv2.rectangle(img, (x1, y1), (x2, y2), color, 3)
# cv2.rectangle(
# img, (x1, y1), (x1 + t_size[0] + 3, y1 + t_size[1] + 4), color, -1)
# cv2.putText(img, label, (x1, y1 +
# t_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 2, [255, 255, 255], 2)
return img
class Watchout:
def __init__(self):
self.lasttime = rospy.Time.now()
self.thistime = rospy.Time.now()
self.scale = 0.001
self.idcenvel = [] #id cx,cy,vx,vy
self.depth_thres = 10.0 #深度阀值
# 内参
fx = 609.2713012695312
cx = 316.67022705078125
fy = 608.010498046875
cy = 244.8178253173828
self.K = np.array([[1.0/fx,0,-cx/fx],
[0,1.0/fy,-cy/fy],
[0.0 , 0.0, 1.0]])
self.lines = [[0,1],[1,3],[3,2],[2,0],
[0,4],[2,6],[1,5],[3,7],
[4,5],[5,7],[7,6],[6,4]]
self.pub = rospy.Publisher('Personbox',MarkerArray,queue_size=1)
self.rate = rospy.Rate(10)
self.listener = TransformListener()
def watch(self,opt, save_img=False):
out, source,weights, view_img, save_txt, imgsz = \
opt.output, opt.source ,opt.weights, opt.view_img, opt.save_txt, opt.img_size
# initialize deepsort
cfg = get_config()
cfg.merge_from_file(opt.config_deepsort)
deepsort = DeepSort(cfg.DEEPSORT.REID_CKPT,
max_dist=cfg.DEEPSORT.MAX_DIST, min_confidence=cfg.DEEPSORT.MIN_CONFIDENCE,
nms_max_overlap=cfg.DEEPSORT.NMS_MAX_OVERLAP, max_iou_distance=cfg.DEEPSORT.MAX_IOU_DISTANCE,
max_age=cfg.DEEPSORT.MAX_AGE, n_init=cfg.DEEPSORT.N_INIT, nn_budget=cfg.DEEPSORT.NN_BUDGET,
use_cuda=True)
# Initialize
device = select_device(opt.device)
half = device.type != 'cpu' # half precision only supported on CUDA
# Load model
model = torch.load(weights, map_location=device)['model'].float() # load to FP32
model.to(device).eval()
if half:
model.half() # to FP16
# Set Dataloader
vid_path, vid_writer = None, None
if source=='0':
dataset = LoadWebcam(source,imgsz)
view_img = True
cudnn.benchmark = True # set True to speed up constant image size inference
else:
dataset = LoadRealsense('0',img_size=imgsz)
view_img = True
cudnn.benchmark = True # set True to speed up constant image size inference
# Get names and colors
names = model.module.names if hasattr(model, 'module') else model.names
# Run inference
t0 = time.time()
img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
# run once
_ = model(img.half() if half else img) if device.type != 'cpu' else None
vis, pos_end = To_1.init(mapsize=150, scale=15)
# vis, pos_end, id_ = To_2.init(mapsize=150, scale=15)
for frame_idx, (path, img, im0, depth) in enumerate(dataset):
self.thistime = rospy.Time.now()
img = torch.from_numpy(img).to(device)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
t1 = time_synchronized()
pred = model(img, augment=opt.augment)[0]
# Apply NMS
# [xyxy, conf, cls] n*6
pred = non_max_suppression(
pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms)
t2 = time_synchronized()
# Print time (inference + NMS)
print('Done. (%.3fs)' % ( t2 - t1))
# Process detections
for i, det in enumerate(pred): # detections per image
im0 = im0.copy()
if det is not None and len(det):
# Rescale boxes from img_size to im0 size 即处理 xyxy
det[:, :4] = scale_coords(
img.shape[2:], det[:, :4], im0.shape).round()
bbox_xywh = []
confs = []
# Adapt detections to deep sort input format
# deepsort的输入类型为 centerx,centery,w,h,confidence,
for *xyxy, conf, cls in det:
x_c, y_c, bbox_w, bbox_h = bbox_rel(*xyxy)
obj = [x_c, y_c, bbox_w, bbox_h]
bbox_xywh.append(obj)
confs.append([conf.item()])
xywhs = torch.Tensor(bbox_xywh)
confss = torch.Tensor(confs)
# Pass detections to deepsort
# outputs : x1 y1 x2 y2 id
outputs = deepsort.update(xywhs, confss, im0)
# draw boxes for visualization
if len(outputs) > 0:
bbox_xyxy = outputs[:, :4]
identities = outputs[:, -1]
draw_boxes(im0, bbox_xyxy, identities)
t3 = rospy.Time.now()
# self.publish3dbox(depth,bbox_xyxy,identities)
# if not self.init:
# import threading
# thread = threading.Thread(target=self.publish3dbox,args=(depth,bbox_xyxy,identities))
# thread.start()
# self.init = 1
# print('开启成功')
blocklist = self.twodbox(depth,bbox_xyxy,identities)
pos_now = (0, 0, 0, 0, 0)
vx, vy, w, f = To_1.Vis_and_deside(vis=vis, pos_now=pos_now,
pos_end=pos_end, blocklist=blocklist)
# vx, vy, w, f, id_ = To_2.Vis_and_deside(vis=vis, pos_now=pos_now,
# pos_end=pos_end, blocklist=blocklist,id_=id_)
print(f'Creating markderarrary use {(rospy.Time.now()-t3).to_sec()} s ')
print(self.idcenvel)
else:
deepsort.increment_ages()
# Stream results
if view_img:
cv2.imshow('watchout', im0)
if cv2.waitKey(1) == ord('q') or rospy.is_shutdown(): # q to quit
# thread.join()
print('Done. (%.3fs)' % (time.time() - t0))
raise StopIteration
self.lasttime = self.thistime
def goodenbox(self,bbox_xyxy):
pass
# @jit
def create_box(self,depth_img,box,offset=(0,0)):
# 计算公式 x = (u*depth - cx*depth)/fx y = (v*depth - cy*depth)/fy
# 先将 像素坐标 uv1 * depth
x1,y1,x2,y2 = [int(i) for i in box]
w = x2 - x1
h = y2 - y1
#黄金比例切割背景
import math
u1 = math.ceil(x1+0.382*w)
u2 = math.ceil(x1+0.618*w)
v1 = math.ceil(y1+0.382*h)
v2 = math.ceil(y1+0.618*h)
uv1 = []
for u in range(u1,u2):
for v in range(v1,v2):
depth = float(depth_img[v,u])*self.scale
if depth > self.depth_thres:
continue
else:
uv1.append([u*depth,v*depth,depth])
if(len(uv1)<1):
print("create_error")
return 0,0,None
# 3*n
uvd = np.array(uv1).T
# 将 uvd * 相机内参矩阵 K 转化为相机坐标的 xyz 但 相机坐标的 xyz 对应着三维空间中的 yzx
# n*3
yzx = self.K.dot(uvd).T
# 用均值代替质心
cx = yzx[:,2].mean()
cy = yzx[:,0].mean()
# 找到八个顶点
xmax = yzx[:,2].max()
xmin = yzx[:,2].min()
ymax = yzx[:,0].max()
ymin = yzx[:,0].min()
zmax = yzx[:,1].max()
zmin = yzx[:,1].min()
from sensor_msgs.msg import PointCloud
pcl = PointCloud()
pcl.header.frame_id = '/camera'
pcl.header.frame_id = self.thistime
pcl.points.append((cx,cy,0))
pcl.points.append((xmax,ymax,zmax))
pcl.points.append((xmin,ymin,zmin))
# tranform point in camera to gobal
import tf
try:
self.listener.lookupTransform('/map','/camera',rospy.Time(3))
except:
exit
self.listener.transformPointCloud('/map',pcl)
from geometry_msgs.msg import Point
points = [Point(xmin,ymin,zmin),Point(xmax,ymin,zmin),
Point(xmin,ymax,zmin),Point(xmax,ymax,zmin),
Point(xmin,ymin,zmax),Point(xmax,ymin,zmax),
Point(xmin,ymax,zmax),Point(xmax,ymax,zmax)]
# 创建 bbox
marker = Marker()
marker.header.frame_id = 'map'
marker.header.stamp = rospy.Time.now()
marker.action = Marker.ADD
marker.type = Marker.LINE_LIST
# marker.lifetime = rospy.Duration(0)
marker.color.r = 1
marker.color.g = 0
marker.color.b = 0
marker.color.a = 1
marker.scale.x = 0.2
marker.points = []
for line in self.lines:
marker.points.append(points[line[0]])
marker.points.append(points[line[1]])
return cx , cy , marker
# @jit
def publish3dbox(self,depth_img,bbox,identities=None,offset=(0,0)):
markerarray = MarkerArray()
dt = (self.thistime - self.lasttime).to_sec()
idcentvel_tmp = []
# 生成markerarray 并 进行匹配计算 idcentvel
for i,id_ in enumerate(identities):
marker = Marker()
cx,cy,marker = self.create_box(depth_img,bbox[i],offset)
marker.id = id_
markerarray.markers.append(marker)
flag = 0
# 妙处:初始化时是空列表,同时完成了第一次时间的初始化
for idcv in self.idcenvel:
if id_ == idcv[0]:
vx = (cx - idcv[1])/dt
vy = (cy - idcv[2])/dt
idcentvel_tmp.append([id_,cx,cy,vx,vy])
flag = 1
break
if not flag:
vx=vy=0.0
idcentvel_tmp.append([id_,cx,cy,vx,vy])
self.idcenvel = idcentvel_tmp
print('idcenvel',self.idcenvel)
self.pub.publish(markerarray)
def drawsquare(self,xyxy,depth):
# 计算公式 x = (u*depth - cx*depth)/fx y = (v*depth - cy*depth)/fy
# 先将 像素坐标 uv1 * depth
x1,y1,x2,y2 = [int(i) for i in xyxy]
w = x2 - x1
h = y2 - y1
#黄金比例切割背景
import math
u1 = math.ceil(x1+0.382*w)
u2 = math.ceil(x1+0.618*w)
v1 = math.ceil(y1+0.382*h)
v2 = math.ceil(y1+0.618*h)
uvd = []
for u in range(u1,u2):
for v in range(v1,v2):
depth_ = float(depth[v,u])*self.scale
if depth_ > 10: continue
else: uvd.append([u*depth_,v*depth_,depth_])
yzx = self.K.dot(np.array(uvd).T).T
# 用均值代替质心
cx = yzx[:,2].mean()
cy = yzx[:,0].mean()
# 找到八个顶点
xmax = yzx[:,2].max()
xmin = yzx[:,2].min()
ymax = yzx[:,0].max()
ymin = yzx[:,0].min()
zmax = yzx[:,1].max()
zmin = yzx[:,1].min()
# from sensor_msgs.msg import PointCloud
# pcl = PointCloud()
# pcl.header.frame_id = '/camera_frame'
# pcl.header.frame_id = self.thistime
# pcl.points.append((cx,cy,0))
# pcl.points.append((xmax,ymax,zmax))
# pcl.points.append((xmin,ymin,zmin))
# print(pcl)
# tranform point in camera to gobal
import tf
# try:
(trans,rot) = self.listener.lookupTransform(target_frame='/map',source_frame='/camera_frame',time=rospy.Time(0))
print('transform yes')
# print(type(trans))
# print(rot)
from scipy.spatial.transform import Rotation as R
r = R.from_quat(rot)
rostate_matrix = r.as_matrix()
vector = np.array((cx,cy,0))
print('firstvector=',vector)
vector = vector+trans
vector = rostate_matrix.dot(vector)
# cx = pcl.points[0].x
# cy = pcl.points[0].y
# r1 = abs(pcl.points[2].x-pcl.points[1].x)
# r2 = abs(pcl.points[2].y-pcl.points[1].y)
# r = min([r1,r2])
print('trans=',trans)
print('rot=',rostate_matrix)
print('second=',vector)
return vector[0] , vector[1] , (xmax-xmin)/2
def twodbox(self,depth,bbox,identities=None,offset=(0,0)):
dt = (self.thistime - self.lasttime).to_sec()
print('dt=',dt)
idcentvel_tmp = []
for i,id in enumerate(identities):
cx,cy,r = self.drawsquare(bbox[i],depth)
# 妙处:初始化时是空列表,同时完成了第一次时间的初始化
flag = 0
for idcv in self.idcenvel:
if id == idcv[0]:
vx = (cx - idcv[1])/dt
vy = (cy - idcv[2])/dt
if abs(vx) < 0.01: vx=0.0
if abs(vy) < 0.01: vy=0.0
idcentvel_tmp.append((id,cx,cy,vx,vy,0.5))
flag = 1
break
if not flag:
vx = vy = 0.0
idcentvel_tmp.append((id,cx,cy,vx,vy,0.5))
## update idcenvel
self.idcenvel = idcentvel_tmp
print(idcentvel_tmp)
return self.idcenvel
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--weights', type=str,
default='yolov5/weights/yolov5s.pt', help='model.pt path')
# file/folder, 0 for webcam
parser.add_argument('--source', type=str,
default='inference/images', help='source')
parser.add_argument('--output', type=str, default='inference/output',
help='output folder') # output folder
parser.add_argument('--img-size', type=int, default=640,
help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float,
default=0.4, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float,
default=0.5, help='IOU threshold for NMS')
parser.add_argument('--fourcc', type=str, default='mp4v',
help='output video codec (verify ffmpeg support)')
parser.add_argument('--device', default='',
help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--view-img', action='store_true',
help='display results')
parser.add_argument('--save-txt', action='store_true',
help='save results to *.txt')
# class 0 is person
parser.add_argument('--classes', nargs='+', type=int,
default=[0], help='filter by class')
parser.add_argument('--agnostic-nms', action='store_true',
help='class-agnostic NMS')
parser.add_argument('--augment', action='store_true',
help='augmented inference')
parser.add_argument("--config_deepsort", type=str,
default="deep_sort_pytorch/configs/deep_sort.yaml")
args = parser.parse_args()
args.img_size = check_img_size(args.img_size)
print(args)
rospy.init_node('watchout')
watchout = Watchout()
with torch.no_grad():
watchout.watch(args)
| [
"cv2.rectangle",
"sys.path.insert",
"visualization_msgs.msg.Marker",
"APF_BASE_utils.BASE_TOOLS_for_car.Vis_and_deside",
"visualization_msgs.msg.MarkerArray",
"rospy.init_node",
"torch.from_numpy",
"cv2.imshow",
"numpy.array",
"rospy.Rate",
"tf.TransformListener",
"yolov5.utils.general.check_i... | [((88, 118), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""./yolov5"""'], {}), "(0, './yolov5')\n", (103, 118), False, 'import sys\n'), ((16647, 16672), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (16670, 16672), False, 'import argparse\n'), ((18469, 18498), 'yolov5.utils.general.check_img_size', 'check_img_size', (['args.img_size'], {}), '(args.img_size)\n', (18483, 18498), False, 'from yolov5.utils.general import check_img_size, non_max_suppression, scale_coords\n'), ((18520, 18547), 'rospy.init_node', 'rospy.init_node', (['"""watchout"""'], {}), "('watchout')\n", (18535, 18547), False, 'import rospy\n'), ((2678, 2726), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x1, y1)', '(x2, y2)', 'color', '(3)'], {}), '(img, (x1, y1), (x2, y2), color, 3)\n', (2691, 2726), False, 'import cv2\n'), ((3056, 3072), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (3070, 3072), False, 'import rospy\n'), ((3097, 3113), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (3111, 3113), False, 'import rospy\n'), ((3400, 3477), 'numpy.array', 'np.array', (['[[1.0 / fx, 0, -cx / fx], [0, 1.0 / fy, -cy / fy], [0.0, 0.0, 1.0]]'], {}), '([[1.0 / fx, 0, -cx / fx], [0, 1.0 / fy, -cy / fy], [0.0, 0.0, 1.0]])\n', (3408, 3477), True, 'import numpy as np\n'), ((3684, 3739), 'rospy.Publisher', 'rospy.Publisher', (['"""Personbox"""', 'MarkerArray'], {'queue_size': '(1)'}), "('Personbox', MarkerArray, queue_size=1)\n", (3699, 3739), False, 'import rospy\n'), ((3758, 3772), 'rospy.Rate', 'rospy.Rate', (['(10)'], {}), '(10)\n', (3768, 3772), False, 'import rospy\n'), ((3797, 3816), 'tf.TransformListener', 'TransformListener', ([], {}), '()\n', (3814, 3816), False, 'from tf import TransformListener\n'), ((4053, 4065), 'deep_sort_pytorch.utils.parser.get_config', 'get_config', ([], {}), '()\n', (4063, 4065), False, 'from deep_sort_pytorch.utils.parser import get_config\n'), ((4134, 4462), 'deep_sort_pytorch.deep_sort.DeepSort', 'DeepSort', (['cfg.DEEPSORT.REID_CKPT'], {'max_dist': 'cfg.DEEPSORT.MAX_DIST', 'min_confidence': 'cfg.DEEPSORT.MIN_CONFIDENCE', 'nms_max_overlap': 'cfg.DEEPSORT.NMS_MAX_OVERLAP', 'max_iou_distance': 'cfg.DEEPSORT.MAX_IOU_DISTANCE', 'max_age': 'cfg.DEEPSORT.MAX_AGE', 'n_init': 'cfg.DEEPSORT.N_INIT', 'nn_budget': 'cfg.DEEPSORT.NN_BUDGET', 'use_cuda': '(True)'}), '(cfg.DEEPSORT.REID_CKPT, max_dist=cfg.DEEPSORT.MAX_DIST,\n min_confidence=cfg.DEEPSORT.MIN_CONFIDENCE, nms_max_overlap=cfg.\n DEEPSORT.NMS_MAX_OVERLAP, max_iou_distance=cfg.DEEPSORT.\n MAX_IOU_DISTANCE, max_age=cfg.DEEPSORT.MAX_AGE, n_init=cfg.DEEPSORT.\n N_INIT, nn_budget=cfg.DEEPSORT.NN_BUDGET, use_cuda=True)\n', (4142, 4462), False, 'from deep_sort_pytorch.deep_sort import DeepSort\n'), ((4595, 4620), 'yolov5.utils.torch_utils.select_device', 'select_device', (['opt.device'], {}), '(opt.device)\n', (4608, 4620), False, 'from yolov5.utils.torch_utils import select_device, time_synchronized\n'), ((5502, 5513), 'time.time', 'time.time', ([], {}), '()\n', (5511, 5513), False, 'import time\n'), ((5528, 5576), 'torch.zeros', 'torch.zeros', (['(1, 3, imgsz, imgsz)'], {'device': 'device'}), '((1, 3, imgsz, imgsz), device=device)\n', (5539, 5576), False, 'import torch\n'), ((5716, 5748), 'APF_BASE_utils.BASE_TOOLS_for_car.init', 'To_1.init', ([], {'mapsize': '(150)', 'scale': '(15)'}), '(mapsize=150, scale=15)\n', (5725, 5748), True, 'from APF_BASE_utils import BASE_TOOLS_for_car as To_1\n'), ((10017, 10042), 'math.ceil', 'math.ceil', (['(x1 + 0.382 * w)'], {}), '(x1 + 0.382 * w)\n', (10026, 10042), False, 'import math\n'), ((10052, 10077), 'math.ceil', 'math.ceil', (['(x1 + 0.618 * w)'], {}), '(x1 + 0.618 * w)\n', (10061, 10077), False, 'import math\n'), ((10087, 10112), 'math.ceil', 'math.ceil', (['(y1 + 0.382 * h)'], {}), '(y1 + 0.382 * h)\n', (10096, 10112), False, 'import math\n'), ((10122, 10147), 'math.ceil', 'math.ceil', (['(y1 + 0.618 * h)'], {}), '(y1 + 0.618 * h)\n', (10131, 10147), False, 'import math\n'), ((11033, 11045), 'sensor_msgs.msg.PointCloud', 'PointCloud', ([], {}), '()\n', (11043, 11045), False, 'from sensor_msgs.msg import PointCloud\n'), ((11842, 11850), 'visualization_msgs.msg.Marker', 'Marker', ([], {}), '()\n', (11848, 11850), False, 'from visualization_msgs.msg import Marker, MarkerArray\n'), ((11920, 11936), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (11934, 11936), False, 'import rospy\n'), ((12504, 12517), 'visualization_msgs.msg.MarkerArray', 'MarkerArray', ([], {}), '()\n', (12515, 12517), False, 'from visualization_msgs.msg import Marker, MarkerArray\n'), ((13714, 13739), 'math.ceil', 'math.ceil', (['(x1 + 0.382 * w)'], {}), '(x1 + 0.382 * w)\n', (13723, 13739), False, 'import math\n'), ((13749, 13774), 'math.ceil', 'math.ceil', (['(x1 + 0.618 * w)'], {}), '(x1 + 0.618 * w)\n', (13758, 13774), False, 'import math\n'), ((13784, 13809), 'math.ceil', 'math.ceil', (['(y1 + 0.382 * h)'], {}), '(y1 + 0.382 * h)\n', (13793, 13809), False, 'import math\n'), ((13819, 13844), 'math.ceil', 'math.ceil', (['(y1 + 0.618 * h)'], {}), '(y1 + 0.618 * h)\n', (13828, 13844), False, 'import math\n'), ((15091, 15107), 'scipy.spatial.transform.Rotation.from_quat', 'R.from_quat', (['rot'], {}), '(rot)\n', (15102, 15107), True, 'from scipy.spatial.transform import Rotation as R\n'), ((15164, 15185), 'numpy.array', 'np.array', (['(cx, cy, 0)'], {}), '((cx, cy, 0))\n', (15172, 15185), True, 'import numpy as np\n'), ((18584, 18599), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (18597, 18599), False, 'import torch\n'), ((2210, 2238), 'math.ceil', 'math.ceil', (['((x2 - x1) * 0.382)'], {}), '((x2 - x1) * 0.382)\n', (2219, 2238), False, 'import math\n'), ((2253, 2281), 'math.ceil', 'math.ceil', (['((x2 - x1) * 0.618)'], {}), '((x2 - x1) * 0.618)\n', (2262, 2281), False, 'import math\n'), ((2296, 2324), 'math.ceil', 'math.ceil', (['((y2 - y1) * 0.382)'], {}), '((y2 - y1) * 0.382)\n', (2305, 2324), False, 'import math\n'), ((2339, 2367), 'math.ceil', 'math.ceil', (['((y2 - y1) * 0.618)'], {}), '((y2 - y1) * 0.618)\n', (2348, 2367), False, 'import math\n'), ((2614, 2666), 'cv2.getTextSize', 'cv2.getTextSize', (['label', 'cv2.FONT_HERSHEY_PLAIN', '(2)', '(2)'], {}), '(label, cv2.FONT_HERSHEY_PLAIN, 2, 2)\n', (2629, 2666), False, 'import cv2\n'), ((5015, 5040), 'yolov5.utils.datasets.LoadWebcam', 'LoadWebcam', (['source', 'imgsz'], {}), '(source, imgsz)\n', (5025, 5040), False, 'from yolov5.utils.datasets import LoadImages, LoadStreams, LoadWebcam, LoadRealsense\n'), ((5193, 5227), 'yolov5.utils.datasets.LoadRealsense', 'LoadRealsense', (['"""0"""'], {'img_size': 'imgsz'}), "('0', img_size=imgsz)\n", (5206, 5227), False, 'from yolov5.utils.datasets import LoadImages, LoadStreams, LoadWebcam, LoadRealsense\n'), ((5911, 5927), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (5925, 5927), False, 'import rospy\n'), ((6221, 6240), 'yolov5.utils.torch_utils.time_synchronized', 'time_synchronized', ([], {}), '()\n', (6238, 6240), False, 'from yolov5.utils.torch_utils import select_device, time_synchronized\n'), ((6375, 6484), 'yolov5.utils.general.non_max_suppression', 'non_max_suppression', (['pred', 'opt.conf_thres', 'opt.iou_thres'], {'classes': 'opt.classes', 'agnostic': 'opt.agnostic_nms'}), '(pred, opt.conf_thres, opt.iou_thres, classes=opt.\n classes, agnostic=opt.agnostic_nms)\n', (6394, 6484), False, 'from yolov5.utils.general import check_img_size, non_max_suppression, scale_coords\n'), ((6527, 6546), 'yolov5.utils.torch_utils.time_synchronized', 'time_synchronized', ([], {}), '()\n', (6544, 6546), False, 'from yolov5.utils.torch_utils import select_device, time_synchronized\n'), ((10562, 10575), 'numpy.array', 'np.array', (['uv1'], {}), '(uv1)\n', (10570, 10575), True, 'import numpy as np\n'), ((11565, 11588), 'geometry_msgs.msg.Point', 'Point', (['xmin', 'ymin', 'zmin'], {}), '(xmin, ymin, zmin)\n', (11570, 11588), False, 'from geometry_msgs.msg import Point\n'), ((11587, 11610), 'geometry_msgs.msg.Point', 'Point', (['xmax', 'ymin', 'zmin'], {}), '(xmax, ymin, zmin)\n', (11592, 11610), False, 'from geometry_msgs.msg import Point\n'), ((11630, 11653), 'geometry_msgs.msg.Point', 'Point', (['xmin', 'ymax', 'zmin'], {}), '(xmin, ymax, zmin)\n', (11635, 11653), False, 'from geometry_msgs.msg import Point\n'), ((11652, 11675), 'geometry_msgs.msg.Point', 'Point', (['xmax', 'ymax', 'zmin'], {}), '(xmax, ymax, zmin)\n', (11657, 11675), False, 'from geometry_msgs.msg import Point\n'), ((11695, 11718), 'geometry_msgs.msg.Point', 'Point', (['xmin', 'ymin', 'zmax'], {}), '(xmin, ymin, zmax)\n', (11700, 11718), False, 'from geometry_msgs.msg import Point\n'), ((11717, 11740), 'geometry_msgs.msg.Point', 'Point', (['xmax', 'ymin', 'zmax'], {}), '(xmax, ymin, zmax)\n', (11722, 11740), False, 'from geometry_msgs.msg import Point\n'), ((11760, 11783), 'geometry_msgs.msg.Point', 'Point', (['xmin', 'ymax', 'zmax'], {}), '(xmin, ymax, zmax)\n', (11765, 11783), False, 'from geometry_msgs.msg import Point\n'), ((11782, 11805), 'geometry_msgs.msg.Point', 'Point', (['xmax', 'ymax', 'zmax'], {}), '(xmax, ymax, zmax)\n', (11787, 11805), False, 'from geometry_msgs.msg import Point\n'), ((12707, 12715), 'visualization_msgs.msg.Marker', 'Marker', ([], {}), '()\n', (12713, 12715), False, 'from visualization_msgs.msg import Marker, MarkerArray\n'), ((11392, 11405), 'rospy.Time', 'rospy.Time', (['(3)'], {}), '(3)\n', (11402, 11405), False, 'import rospy\n'), ((14923, 14936), 'rospy.Time', 'rospy.Time', (['(0)'], {}), '(0)\n', (14933, 14936), False, 'import rospy\n'), ((4741, 4781), 'torch.load', 'torch.load', (['weights'], {'map_location': 'device'}), '(weights, map_location=device)\n', (4751, 4781), False, 'import torch\n'), ((5946, 5967), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (5962, 5967), False, 'import torch\n'), ((7536, 7559), 'torch.Tensor', 'torch.Tensor', (['bbox_xywh'], {}), '(bbox_xywh)\n', (7548, 7559), False, 'import torch\n'), ((7589, 7608), 'torch.Tensor', 'torch.Tensor', (['confs'], {}), '(confs)\n', (7601, 7608), False, 'import torch\n'), ((9325, 9352), 'cv2.imshow', 'cv2.imshow', (['"""watchout"""', 'im0'], {}), "('watchout', im0)\n", (9335, 9352), False, 'import cv2\n'), ((14116, 14129), 'numpy.array', 'np.array', (['uvd'], {}), '(uvd)\n', (14124, 14129), True, 'import numpy as np\n'), ((8061, 8077), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (8075, 8077), False, 'import rospy\n'), ((8685, 8773), 'APF_BASE_utils.BASE_TOOLS_for_car.Vis_and_deside', 'To_1.Vis_and_deside', ([], {'vis': 'vis', 'pos_now': 'pos_now', 'pos_end': 'pos_end', 'blocklist': 'blocklist'}), '(vis=vis, pos_now=pos_now, pos_end=pos_end, blocklist=\n blocklist)\n', (8704, 8773), True, 'from APF_BASE_utils import BASE_TOOLS_for_car as To_1\n'), ((9406, 9425), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (9423, 9425), False, 'import rospy\n'), ((6948, 6998), 'yolov5.utils.general.scale_coords', 'scale_coords', (['img.shape[2:]', 'det[:, :4]', 'im0.shape'], {}), '(img.shape[2:], det[:, :4], im0.shape)\n', (6960, 6998), False, 'from yolov5.utils.general import check_img_size, non_max_suppression, scale_coords\n'), ((9376, 9390), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (9387, 9390), False, 'import cv2\n'), ((9529, 9540), 'time.time', 'time.time', ([], {}), '()\n', (9538, 9540), False, 'import time\n'), ((9093, 9109), 'rospy.Time.now', 'rospy.Time.now', ([], {}), '()\n', (9107, 9109), False, 'import rospy\n')] |
#! /usr/bin/env python
"""
This node publishes the joint states to make a square trajectory with the SCARA's end-effector
@author: <NAME> (<EMAIL>)
"""
import rospy
import rospkg
rospack = rospkg.RosPack()
import sys
sys.path.insert(0, rospack.get_path('first_assignment')+"/scripts")
from sensor_msgs.msg import JointState
from geometry_msgs.msg import Pose, PoseArray
from IK_function import scara_IK
import numpy as np
class SquareTrajectoryPublisher():
"""docstring for SquareTrajectoryPublisher"""
def __init__(self):
self._publisher = None
self._path_publisher = rospy.Publisher('desired_path', PoseArray, queue_size=10)
self._path = PoseArray()
self._path.header.frame_id = 'base'
self._dt = 0.1
self._v = 0.05
#the 4 vertices of the square
self._vertices = [ [0.27, -0.15, 0],
[0.57, -0.15, 0.1],
[0.57, 0.15, 0.1],
[0.27, 0.15, 0] ]
self._joint_names = ['rotational1', 'rotational2', 'translation']
self._current_segment = 0
self._current_idx = 0
self._waypoints = None
self.compute_waypoints()
def set_topic_name(self, name):
self._publisher = rospy.Publisher(name, JointState, queue_size=10)
def next_segment(self):
if self._current_segment is 3:
self._current_segment = 0
else:
self._current_segment += 1
self._current_idx = 0
def return_list_of_waypoints(self, p1, p2, dp, it):
waypoints = list()
current_p = p1
waypoints.append(current_p)
p = Pose()
p.position.x = current_p[0]
p.position.y = current_p[1]
p.position.z = current_p[2]
p.orientation.w = 0.707
p.orientation.y = 0.707
self._path.poses.append(p)
for i in range(1, int(abs(it))):
current_p = current_p + dp
waypoints.append(current_p)
p = Pose()
p.position.x = current_p[0]
p.position.y = current_p[1]
p.position.z = current_p[2]
p.orientation.w = 0.707
p.orientation.y = 0.707
self._path.poses.append(p)
waypoints.append(p2)
return waypoints
def compute_waypoints(self):
ds = self._v*self._dt
v1 = np.array(self._vertices[0])
v2 = np.array(self._vertices[1])
v3 = np.array(self._vertices[2])
v4 = np.array(self._vertices[3])
v = v2 - v1
v_ds = v/ds
it = abs(v_ds[(np.absolute(v_ds)).argmax()])
dv = v/float(it)
v1v2 = self.return_list_of_waypoints(v1, v2, dv, it)
v = v3 - v2
v_ds = v/ds
it = abs(v_ds[(np.absolute(v_ds)).argmax()])
dv = v/float(it)
v2v3 = self.return_list_of_waypoints(v2, v3, dv, it)
v = v4 - v3
v_ds = v/ds
it = abs(v_ds[(np.absolute(v_ds)).argmax()])
dv = v/float(it)
v3v4 = self.return_list_of_waypoints(v3, v4, dv, it)
v = v1 - v4
v_ds = v/ds
it = abs(v_ds[(np.absolute(v_ds)).argmax()])
dv = v/float(it)
v4v1 = self.return_list_of_waypoints(v4, v1, dv, it)
self._waypoints = [v1v2, v2v3, v3v4, v4v1]
def send_joint_position(self, position):
m = JointState()
m.name = self._joint_names
m.position = position
self._publisher.publish(m)
def publish_path(self):
self._path_publisher.publish(self._path)
def execute_step(self):
l = len(self._waypoints[self._current_segment])
if(self._current_idx >= l):
self.next_segment()
desired_point = (self._waypoints[self._current_segment])[self._current_idx]
self._current_idx += 1
q = scara_IK(desired_point)
self.send_joint_position(q)
def main():
rospy.init_node('trajectory_publisher')
topic_name = rospy.get_param('topic_name', 'controller/joint_states')
rate = rospy.Rate(10)
trajectory_publisher = SquareTrajectoryPublisher()
trajectory_publisher.set_topic_name(topic_name)
while not rospy.is_shutdown():
trajectory_publisher.execute_step()
# trajectory_publisher.publish_path()
rate.sleep()
if __name__ == '__main__':
main()
| [
"rospy.is_shutdown",
"rospy.init_node",
"rospy.get_param",
"sensor_msgs.msg.JointState",
"numpy.absolute",
"IK_function.scara_IK",
"numpy.array",
"rospkg.RosPack",
"rospy.Rate",
"geometry_msgs.msg.Pose",
"rospy.Publisher",
"geometry_msgs.msg.PoseArray"
] | [((200, 216), 'rospkg.RosPack', 'rospkg.RosPack', ([], {}), '()\n', (214, 216), False, 'import rospkg\n'), ((3964, 4003), 'rospy.init_node', 'rospy.init_node', (['"""trajectory_publisher"""'], {}), "('trajectory_publisher')\n", (3979, 4003), False, 'import rospy\n'), ((4021, 4077), 'rospy.get_param', 'rospy.get_param', (['"""topic_name"""', '"""controller/joint_states"""'], {}), "('topic_name', 'controller/joint_states')\n", (4036, 4077), False, 'import rospy\n'), ((4089, 4103), 'rospy.Rate', 'rospy.Rate', (['(10)'], {}), '(10)\n', (4099, 4103), False, 'import rospy\n'), ((605, 662), 'rospy.Publisher', 'rospy.Publisher', (['"""desired_path"""', 'PoseArray'], {'queue_size': '(10)'}), "('desired_path', PoseArray, queue_size=10)\n", (620, 662), False, 'import rospy\n'), ((684, 695), 'geometry_msgs.msg.PoseArray', 'PoseArray', ([], {}), '()\n', (693, 695), False, 'from geometry_msgs.msg import Pose, PoseArray\n'), ((1279, 1327), 'rospy.Publisher', 'rospy.Publisher', (['name', 'JointState'], {'queue_size': '(10)'}), '(name, JointState, queue_size=10)\n', (1294, 1327), False, 'import rospy\n'), ((1673, 1679), 'geometry_msgs.msg.Pose', 'Pose', ([], {}), '()\n', (1677, 1679), False, 'from geometry_msgs.msg import Pose, PoseArray\n'), ((2395, 2422), 'numpy.array', 'np.array', (['self._vertices[0]'], {}), '(self._vertices[0])\n', (2403, 2422), True, 'import numpy as np\n'), ((2436, 2463), 'numpy.array', 'np.array', (['self._vertices[1]'], {}), '(self._vertices[1])\n', (2444, 2463), True, 'import numpy as np\n'), ((2477, 2504), 'numpy.array', 'np.array', (['self._vertices[2]'], {}), '(self._vertices[2])\n', (2485, 2504), True, 'import numpy as np\n'), ((2518, 2545), 'numpy.array', 'np.array', (['self._vertices[3]'], {}), '(self._vertices[3])\n', (2526, 2545), True, 'import numpy as np\n'), ((3378, 3390), 'sensor_msgs.msg.JointState', 'JointState', ([], {}), '()\n', (3388, 3390), False, 'from sensor_msgs.msg import JointState\n'), ((3871, 3894), 'IK_function.scara_IK', 'scara_IK', (['desired_point'], {}), '(desired_point)\n', (3879, 3894), False, 'from IK_function import scara_IK\n'), ((4225, 4244), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (4242, 4244), False, 'import rospy\n'), ((2023, 2029), 'geometry_msgs.msg.Pose', 'Pose', ([], {}), '()\n', (2027, 2029), False, 'from geometry_msgs.msg import Pose, PoseArray\n'), ((2610, 2627), 'numpy.absolute', 'np.absolute', (['v_ds'], {}), '(v_ds)\n', (2621, 2627), True, 'import numpy as np\n'), ((2790, 2807), 'numpy.absolute', 'np.absolute', (['v_ds'], {}), '(v_ds)\n', (2801, 2807), True, 'import numpy as np\n'), ((2971, 2988), 'numpy.absolute', 'np.absolute', (['v_ds'], {}), '(v_ds)\n', (2982, 2988), True, 'import numpy as np\n'), ((3152, 3169), 'numpy.absolute', 'np.absolute', (['v_ds'], {}), '(v_ds)\n', (3163, 3169), True, 'import numpy as np\n')] |
import numpy
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QMessageBox, QApplication
from PyQt5.QtCore import QRect
from PyQt5.QtGui import QTextCursor
from orangewidget import gui
from orangewidget.settings import Setting
from oasys.widgets import gui as oasysgui
from oasys.widgets.widget import AutomaticWidget
from silx.gui.plot import Plot2D
from orangecontrib.wofry.util.wofry_util import ImageViewWithFWHM
from orangecontrib.wofry.widgets.gui.python_script import PythonScript
class WofryWidget(AutomaticWidget):
maintainer = "<NAME>"
maintainer_email = "<EMAIL>(<EMAIL>"
IMAGE_WIDTH = 760
IMAGE_HEIGHT = 545
MAX_WIDTH = 1320
MAX_HEIGHT = 705
CONTROL_AREA_WIDTH = 410
TABS_AREA_HEIGHT = 545
want_main_area = 1
view_type=Setting(1)
def __init__(self, is_automatic=True, show_view_options=True, show_script_tab=True):
super().__init__(is_automatic)
geom = QApplication.desktop().availableGeometry()
self.setGeometry(QRect(round(geom.width()*0.05),
round(geom.height()*0.05),
round(min(geom.width()*0.98, self.MAX_WIDTH)),
round(min(geom.height()*0.95, self.MAX_HEIGHT))))
self.setMaximumHeight(self.geometry().height())
self.setMaximumWidth(self.geometry().width())
self.controlArea.setFixedWidth(self.CONTROL_AREA_WIDTH)
self.main_tabs = oasysgui.tabWidget(self.mainArea)
plot_tab = oasysgui.createTabPage(self.main_tabs, "Results")
out_tab = oasysgui.createTabPage(self.main_tabs, "Output")
#
# add script tab to tabs panel
#
if show_script_tab:
script_tab = oasysgui.createTabPage(self.main_tabs, "Script")
self.wofry_script = PythonScript()
self.wofry_script.code_area.setFixedHeight(400)
script_box = gui.widgetBox(script_tab, "Python script", addSpace=True, orientation="horizontal")
script_box.layout().addWidget(self.wofry_script)
if show_view_options == True:
view_box = oasysgui.widgetBox(plot_tab, "Results Options", addSpace=False, orientation="horizontal")
view_box_1 = oasysgui.widgetBox(view_box, "", addSpace=False, orientation="vertical", width=350)
self.view_type_combo = gui.comboBox(view_box_1, self, "view_type", label="View Results",
labelWidth=220,
items=["No", "Yes (image)","Yes (image + hist.)"],
callback=self.set_ViewType, sendSelectedValue=False, orientation="horizontal")
else:
self.view_type = 1
self.tab = []
self.tabs = oasysgui.tabWidget(plot_tab)
self.initializeTabs()
self.set_ViewType()
self.wofry_output = oasysgui.textArea(height=600, width=600)
out_box = gui.widgetBox(out_tab, "System Output", addSpace=True, orientation="horizontal")
out_box.layout().addWidget(self.wofry_output)
gui.rubber(self.mainArea)
def initializeTabs(self):
raise NotImplementedError()
def set_ViewType(self):
self.progressBarInit()
try:
self.initializeTabs()
self.plot_results()
except Exception as exception:
QtWidgets.QMessageBox.critical(self, "Error",
str(exception),
QtWidgets.QMessageBox.Ok)
if self.IS_DEVELOP: raise exception
self.progressBarFinished()
def plot_results(self, progressBarValue=80):
if not self.view_type == 0:
self.do_plot_results(progressBarValue)
def do_plot_results(self, progressBarValue):
raise NotImplementedError()
def plot_data1D(self, x, y, progressBarValue, tabs_canvas_index, plot_canvas_index, title="", xtitle="", ytitle="",
log_x=False, log_y=False, color='blue', replace=True, control=False, calculate_fwhm=True,
xrange=None, yrange=None, symbol=''):
if tabs_canvas_index is None: tabs_canvas_index = 0 #back compatibility?
self.tab[tabs_canvas_index].layout().removeItem(self.tab[tabs_canvas_index].layout().itemAt(0))
self.plot_canvas[plot_canvas_index] = oasysgui.plotWindow(parent=None,
backend=None,
resetzoom=True,
autoScale=False,
logScale=True,
grid=True,
curveStyle=True,
colormap=False,
aspectRatio=False,
yInverted=False,
copy=True,
save=True,
print_=True,
control=control,
position=True,
roi=False,
mask=False,
fit=False)
self.plot_canvas[plot_canvas_index].setDefaultPlotLines(True)
self.plot_canvas[plot_canvas_index].setActiveCurveColor(color='blue')
self.plot_canvas[plot_canvas_index].setGraphXLabel(xtitle)
self.plot_canvas[plot_canvas_index].setGraphYLabel(ytitle)
# ALLOW FIT BUTTON HERE
self.plot_canvas[plot_canvas_index].fitAction.setVisible(True)
# overwrite FWHM and peak values
if calculate_fwhm:
try:
t = numpy.where(y>=max(y)*0.5)
x_left,x_right = x[t[0][0]], x[t[0][-1]]
self.plot_canvas[plot_canvas_index].addMarker(x_left, 0.5*y.max(), legend="G1",
text="FWHM=%5.2f"%(numpy.abs(x_right-x_left)),
color="pink",selectable=False, draggable=False,
symbol="+", constraint=None)
self.plot_canvas[plot_canvas_index].addMarker(x_right, 0.5*y.max(), legend="G2", text=None, color="pink",
selectable=False, draggable=False, symbol="+", constraint=None)
except:
pass
self.tab[tabs_canvas_index].layout().addWidget(self.plot_canvas[plot_canvas_index])
WofryWidget.plot_histo(self.plot_canvas[plot_canvas_index], x, y, title, xtitle, ytitle, color, replace, symbol=symbol)
self.plot_canvas[plot_canvas_index].setXAxisLogarithmic(log_x)
self.plot_canvas[plot_canvas_index].setYAxisLogarithmic(log_y)
if xrange is not None:
self.plot_canvas[plot_canvas_index].setGraphXLimits(xrange[0],xrange[1])
if yrange is not None:
self.plot_canvas[plot_canvas_index].setGraphYLimits(yrange[0],yrange[1])
if min(y) < 0:
if log_y:
self.plot_canvas[plot_canvas_index].setGraphYLimits(min(y)*1.2, max(y)*1.2)
else:
self.plot_canvas[plot_canvas_index].setGraphYLimits(min(y)*1.01, max(y)*1.01)
else:
if log_y:
self.plot_canvas[plot_canvas_index].setGraphYLimits(min(y), max(y)*1.2)
else:
self.plot_canvas[plot_canvas_index].setGraphYLimits(min(y)*0.99, max(y)*1.01)
self.progressBarSet(progressBarValue)
def plot_data2D(self, data2D, dataX, dataY, progressBarValue, tabs_canvas_index, plot_canvas_index,
title="",xtitle="", ytitle=""):
if self.view_type == 0:
pass
elif self.view_type == 1:
self.plot_data2D_only_image(data2D, dataX, dataY, progressBarValue, tabs_canvas_index,plot_canvas_index,
title=title, xtitle=xtitle, ytitle=ytitle)
elif self.view_type == 2:
self.plot_data2D_with_histograms(data2D, dataX, dataY, progressBarValue, tabs_canvas_index,plot_canvas_index,
title=title, xtitle=xtitle, ytitle=ytitle)
def plot_data2D_only_image(self, data2D, dataX, dataY, progressBarValue, tabs_canvas_index, plot_canvas_index,
title="", xtitle="", ytitle=""):
self.tab[tabs_canvas_index].layout().removeItem(self.tab[tabs_canvas_index].layout().itemAt(0))
xmin = dataX[0] # numpy.min(dataX)
xmax = dataX[-1] # numpy.max(dataX)
ymin = dataY[0] # numpy.min(dataY)
ymax = dataY[-1] # numpy.max(dataY)
origin = (xmin, ymin)
scale = (abs((xmax-xmin)/len(dataX)), abs((ymax-ymin)/len(dataY)))
data_to_plot = data2D.T
colormap = {"name":"temperature", "normalization":"linear", "autoscale":True, "vmin":0, "vmax":0, "colors":256}
self.plot_canvas[plot_canvas_index] = Plot2D()
self.plot_canvas[plot_canvas_index].resetZoom()
self.plot_canvas[plot_canvas_index].setXAxisAutoScale(True)
self.plot_canvas[plot_canvas_index].setYAxisAutoScale(True)
self.plot_canvas[plot_canvas_index].setGraphGrid(False)
self.plot_canvas[plot_canvas_index].setKeepDataAspectRatio(True)
self.plot_canvas[plot_canvas_index].yAxisInvertedAction.setVisible(False)
self.plot_canvas[plot_canvas_index].setXAxisLogarithmic(False)
self.plot_canvas[plot_canvas_index].setYAxisLogarithmic(False)
#silx 0.4.0
self.plot_canvas[plot_canvas_index].getMaskAction().setVisible(False)
self.plot_canvas[plot_canvas_index].getRoiAction().setVisible(False)
self.plot_canvas[plot_canvas_index].getColormapAction().setVisible(True)
self.plot_canvas[plot_canvas_index].setKeepDataAspectRatio(False)
self.plot_canvas[plot_canvas_index].addImage(numpy.array(data_to_plot),
legend="None",
scale=scale,
origin=origin,
colormap=colormap,
replace=True)
self.plot_canvas[plot_canvas_index].setGraphXLabel(xtitle)
self.plot_canvas[plot_canvas_index].setGraphYLabel(ytitle)
self.plot_canvas[plot_canvas_index].setGraphTitle(title)
self.tab[tabs_canvas_index].layout().addWidget(self.plot_canvas[plot_canvas_index])
self.progressBarSet(progressBarValue)
def plot_data2D_with_histograms(self, data2D, dataX, dataY, progressBarValue, tabs_canvas_index, plot_canvas_index,
title="", xtitle="", ytitle=""):
xum = "H [\u03BCm]"
yum = "V [\u03BCm]"
self.tab[tabs_canvas_index].layout().removeItem(self.tab[tabs_canvas_index].layout().itemAt(0))
data_to_plot = data2D
self.plot_canvas[plot_canvas_index] = ImageViewWithFWHM() #Plot2D()
colormap = {"name":"temperature", "normalization":"linear", "autoscale":True, "vmin":0, "vmax":0, "colors":256}
self.plot_canvas[plot_canvas_index].plot_2D(numpy.array(data_to_plot),dataX,dataY,factor1=1e0,factor2=1e0,
title=title,xtitle=xtitle, ytitle=ytitle,xum=xum,yum=yum,colormap=colormap)
self.tab[tabs_canvas_index].layout().addWidget(self.plot_canvas[plot_canvas_index])
self.progressBarSet(progressBarValue)
@classmethod
def plot_histo(cls, plot_window, x, y, title, xtitle, ytitle, color='blue', replace=True, symbol=''):
import matplotlib
matplotlib.rcParams['axes.formatter.useoffset']='False'
plot_window.addCurve(x, y, title, symbol=symbol, color=color, xlabel=xtitle, ylabel=ytitle, replace=replace) #'+', '^', ','
if not xtitle is None: plot_window.setGraphXLabel(xtitle)
if not ytitle is None: plot_window.setGraphYLabel(ytitle)
if not title is None: plot_window.setGraphTitle(title)
plot_window.resetZoom()
plot_window.replot()
plot_window.setActiveCurve(title)
def writeStdOut(self, text):
cursor = self.wofry_output.textCursor()
cursor.movePosition(QTextCursor.End)
cursor.insertText(text)
self.wofry_output.setTextCursor(cursor)
self.wofry_output.ensureCursorVisible()
| [
"oasys.widgets.gui.widgetBox",
"numpy.abs",
"orangecontrib.wofry.widgets.gui.python_script.PythonScript",
"oasys.widgets.gui.plotWindow",
"orangewidget.gui.comboBox",
"oasys.widgets.gui.createTabPage",
"orangewidget.gui.widgetBox",
"orangewidget.settings.Setting",
"numpy.array",
"oasys.widgets.gui... | [((787, 797), 'orangewidget.settings.Setting', 'Setting', (['(1)'], {}), '(1)\n', (794, 797), False, 'from orangewidget.settings import Setting\n'), ((1462, 1495), 'oasys.widgets.gui.tabWidget', 'oasysgui.tabWidget', (['self.mainArea'], {}), '(self.mainArea)\n', (1480, 1495), True, 'from oasys.widgets import gui as oasysgui\n'), ((1515, 1564), 'oasys.widgets.gui.createTabPage', 'oasysgui.createTabPage', (['self.main_tabs', '"""Results"""'], {}), "(self.main_tabs, 'Results')\n", (1537, 1564), True, 'from oasys.widgets import gui as oasysgui\n'), ((1583, 1631), 'oasys.widgets.gui.createTabPage', 'oasysgui.createTabPage', (['self.main_tabs', '"""Output"""'], {}), "(self.main_tabs, 'Output')\n", (1605, 1631), True, 'from oasys.widgets import gui as oasysgui\n'), ((2811, 2839), 'oasys.widgets.gui.tabWidget', 'oasysgui.tabWidget', (['plot_tab'], {}), '(plot_tab)\n', (2829, 2839), True, 'from oasys.widgets import gui as oasysgui\n'), ((2929, 2969), 'oasys.widgets.gui.textArea', 'oasysgui.textArea', ([], {'height': '(600)', 'width': '(600)'}), '(height=600, width=600)\n', (2946, 2969), True, 'from oasys.widgets import gui as oasysgui\n'), ((2989, 3074), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['out_tab', '"""System Output"""'], {'addSpace': '(True)', 'orientation': '"""horizontal"""'}), "(out_tab, 'System Output', addSpace=True, orientation='horizontal'\n )\n", (3002, 3074), False, 'from orangewidget import gui\n'), ((3133, 3158), 'orangewidget.gui.rubber', 'gui.rubber', (['self.mainArea'], {}), '(self.mainArea)\n', (3143, 3158), False, 'from orangewidget import gui\n'), ((4394, 4682), 'oasys.widgets.gui.plotWindow', 'oasysgui.plotWindow', ([], {'parent': 'None', 'backend': 'None', 'resetzoom': '(True)', 'autoScale': '(False)', 'logScale': '(True)', 'grid': '(True)', 'curveStyle': '(True)', 'colormap': '(False)', 'aspectRatio': '(False)', 'yInverted': '(False)', 'copy': '(True)', 'save': '(True)', 'print_': '(True)', 'control': 'control', 'position': '(True)', 'roi': '(False)', 'mask': '(False)', 'fit': '(False)'}), '(parent=None, backend=None, resetzoom=True, autoScale=\n False, logScale=True, grid=True, curveStyle=True, colormap=False,\n aspectRatio=False, yInverted=False, copy=True, save=True, print_=True,\n control=control, position=True, roi=False, mask=False, fit=False)\n', (4413, 4682), True, 'from oasys.widgets import gui as oasysgui\n'), ((9605, 9613), 'silx.gui.plot.Plot2D', 'Plot2D', ([], {}), '()\n', (9611, 9613), False, 'from silx.gui.plot import Plot2D\n'), ((11692, 11711), 'orangecontrib.wofry.util.wofry_util.ImageViewWithFWHM', 'ImageViewWithFWHM', ([], {}), '()\n', (11709, 11711), False, 'from orangecontrib.wofry.util.wofry_util import ImageViewWithFWHM\n'), ((1744, 1792), 'oasys.widgets.gui.createTabPage', 'oasysgui.createTabPage', (['self.main_tabs', '"""Script"""'], {}), "(self.main_tabs, 'Script')\n", (1766, 1792), True, 'from oasys.widgets import gui as oasysgui\n'), ((1825, 1839), 'orangecontrib.wofry.widgets.gui.python_script.PythonScript', 'PythonScript', ([], {}), '()\n', (1837, 1839), False, 'from orangecontrib.wofry.widgets.gui.python_script import PythonScript\n'), ((1925, 2013), 'orangewidget.gui.widgetBox', 'gui.widgetBox', (['script_tab', '"""Python script"""'], {'addSpace': '(True)', 'orientation': '"""horizontal"""'}), "(script_tab, 'Python script', addSpace=True, orientation=\n 'horizontal')\n", (1938, 2013), False, 'from orangewidget import gui\n'), ((2132, 2226), 'oasys.widgets.gui.widgetBox', 'oasysgui.widgetBox', (['plot_tab', '"""Results Options"""'], {'addSpace': '(False)', 'orientation': '"""horizontal"""'}), "(plot_tab, 'Results Options', addSpace=False, orientation\n ='horizontal')\n", (2150, 2226), True, 'from oasys.widgets import gui as oasysgui\n'), ((2247, 2334), 'oasys.widgets.gui.widgetBox', 'oasysgui.widgetBox', (['view_box', '""""""'], {'addSpace': '(False)', 'orientation': '"""vertical"""', 'width': '(350)'}), "(view_box, '', addSpace=False, orientation='vertical',\n width=350)\n", (2265, 2334), True, 'from oasys.widgets import gui as oasysgui\n'), ((2367, 2592), 'orangewidget.gui.comboBox', 'gui.comboBox', (['view_box_1', 'self', '"""view_type"""'], {'label': '"""View Results"""', 'labelWidth': '(220)', 'items': "['No', 'Yes (image)', 'Yes (image + hist.)']", 'callback': 'self.set_ViewType', 'sendSelectedValue': '(False)', 'orientation': '"""horizontal"""'}), "(view_box_1, self, 'view_type', label='View Results',\n labelWidth=220, items=['No', 'Yes (image)', 'Yes (image + hist.)'],\n callback=self.set_ViewType, sendSelectedValue=False, orientation=\n 'horizontal')\n", (2379, 2592), False, 'from orangewidget import gui\n'), ((10553, 10578), 'numpy.array', 'numpy.array', (['data_to_plot'], {}), '(data_to_plot)\n', (10564, 10578), False, 'import numpy\n'), ((11896, 11921), 'numpy.array', 'numpy.array', (['data_to_plot'], {}), '(data_to_plot)\n', (11907, 11921), False, 'import numpy\n'), ((943, 965), 'PyQt5.QtWidgets.QApplication.desktop', 'QApplication.desktop', ([], {}), '()\n', (963, 965), False, 'from PyQt5.QtWidgets import QMessageBox, QApplication\n'), ((6549, 6576), 'numpy.abs', 'numpy.abs', (['(x_right - x_left)'], {}), '(x_right - x_left)\n', (6558, 6576), False, 'import numpy\n')] |
import numpy
from scipy.ndimage.filters import gaussian_filter as gf
from scipy.special import expit
import sys
import os
import matplotlib.pyplot as plt
import imageio.core
#config
rootDir = 'd:/projects/astronomy/tgas/'
#code
number_of_bins = 2
w = 800//number_of_bins
argv = sys.argv
hotSlices = False
brightSlices = False
if argv[1] == 'hot':
sigma = 15/number_of_bins
spread = 2000
hotSlices = True
elif argv[1] == 'bright':
sigma = 5/number_of_bins
spread = 150
brightSlices = True
tgas = {}
a = numpy.zeros(shape=(w*2,w*2,w*2))
fp = open(rootDir+'output/star_list/stars.csv','r')
line = fp.readline()
while len(line) != 0:
bits = line.strip().split(',')
if len(bits) > 1:
name, colourIndex, xg, yg, zg, glon, glat, m, plx, pmra, pmdec, isHot, isBright = bits
if (hotSlices and isHot == '1') or (brightSlices and isBright == '1'):
x = int(round(float(xg)/number_of_bins))+w
y = int(round(float(yg)/number_of_bins))+w
z = int(round(float(zg)/number_of_bins))+w
if x >= 0 and x < 2*w and y >= 0 and y < 2*w and z >= 0 and z < 2*w:
a[x][y][z] += 1
line = fp.readline()
fp.close()
gaussian = gf(a, sigma=sigma, truncate=3)
b = 2*(expit(spread*gaussian)-0.5)
if not os.path.isdir(rootDir+'output/slices'):
os.mkdir(rootDir+'output/slices')
if hotSlices:
sliceDir = rootDir+'output/slices/hot/'
elif brightSlices:
sliceDir = rootDir+'output/slices/bright/'
if not os.path.isdir(sliceDir):
os.mkdir(sliceDir)
if not os.path.isdir(sliceDir+'cm'):
os.mkdir(sliceDir+'cm')
if not os.path.isdir(sliceDir+'16bit'):
os.mkdir(sliceDir+'16bit')
count = 0
for tgasSlice in numpy.dsplit(b,2*w):
print('slice', count)
filename=sliceDir+'16bit/slice_'+str(count).zfill(4)+'.pgm'
b2 = imageio.core.image_as_uint(tgasSlice, bitdepth=16)
imageio.imwrite(filename,b2)
filename=sliceDir+'cm/slice_'+str(count).zfill(4)+'.png'
plt.imsave(filename,tgasSlice.squeeze(),cmap='inferno')
count += 1 | [
"scipy.ndimage.filters.gaussian_filter",
"scipy.special.expit",
"numpy.dsplit",
"numpy.zeros",
"os.path.isdir",
"os.mkdir"
] | [((568, 608), 'numpy.zeros', 'numpy.zeros', ([], {'shape': '(w * 2, w * 2, w * 2)'}), '(shape=(w * 2, w * 2, w * 2))\n', (579, 608), False, 'import numpy\n'), ((1273, 1303), 'scipy.ndimage.filters.gaussian_filter', 'gf', (['a'], {'sigma': 'sigma', 'truncate': '(3)'}), '(a, sigma=sigma, truncate=3)\n', (1275, 1303), True, 'from scipy.ndimage.filters import gaussian_filter as gf\n'), ((1793, 1815), 'numpy.dsplit', 'numpy.dsplit', (['b', '(2 * w)'], {}), '(b, 2 * w)\n', (1805, 1815), False, 'import numpy\n'), ((1350, 1390), 'os.path.isdir', 'os.path.isdir', (["(rootDir + 'output/slices')"], {}), "(rootDir + 'output/slices')\n", (1363, 1390), False, 'import os\n'), ((1395, 1430), 'os.mkdir', 'os.mkdir', (["(rootDir + 'output/slices')"], {}), "(rootDir + 'output/slices')\n", (1403, 1430), False, 'import os\n'), ((1569, 1592), 'os.path.isdir', 'os.path.isdir', (['sliceDir'], {}), '(sliceDir)\n', (1582, 1592), False, 'import os\n'), ((1599, 1617), 'os.mkdir', 'os.mkdir', (['sliceDir'], {}), '(sliceDir)\n', (1607, 1617), False, 'import os\n'), ((1628, 1658), 'os.path.isdir', 'os.path.isdir', (["(sliceDir + 'cm')"], {}), "(sliceDir + 'cm')\n", (1641, 1658), False, 'import os\n'), ((1663, 1688), 'os.mkdir', 'os.mkdir', (["(sliceDir + 'cm')"], {}), "(sliceDir + 'cm')\n", (1671, 1688), False, 'import os\n'), ((1697, 1730), 'os.path.isdir', 'os.path.isdir', (["(sliceDir + '16bit')"], {}), "(sliceDir + '16bit')\n", (1710, 1730), False, 'import os\n'), ((1735, 1763), 'os.mkdir', 'os.mkdir', (["(sliceDir + '16bit')"], {}), "(sliceDir + '16bit')\n", (1743, 1763), False, 'import os\n'), ((1312, 1336), 'scipy.special.expit', 'expit', (['(spread * gaussian)'], {}), '(spread * gaussian)\n', (1317, 1336), False, 'from scipy.special import expit\n')] |
from functools import reduce
from operator import mul
from quantecon import cartesian
import numpy as np
from numpy import zeros
def prod(l): return reduce(mul, l, 1.0)
from dolo.numeric.misc import mlinspace
class Grid:
def nodes(self):
return self.__nodes__
def n_nodes(self):
return self.__nodes__.shape[0]
def node(self, i):
return self.__nodes__[i,:]
class EmptyGrid(Grid):
type = 'empty'
def nodes(self):
return None
def n_nodes(self):
return 0
def node(self, i):
return None
class PointGrid(Grid):
type = 'point'
def __init__(self, point):
self.point = np.array(point)
def nodes(self):
return None
def n_nodes(self):
return 1
def node(self, i):
return None
class UnstructuredGrid(Grid):
type = 'unstructured'
def __init__(self, nodes):
nodes = np.array(nodes, dtype=float)
self.min = nodes.min(axis=0)
self.max = nodes.max(axis=0)
self.__nodes__ = nodes
class CartesianGrid(Grid):
type = 'cartesian'
def __init__(self, min, max, n=[]):
self.min = np.array(min, dtype=float)
self.max = np.array(max, dtype=float)
if len(n) == 0:
self.n = np.zeros(n, dtype=int) + 20
else:
self.n = np.array(n, dtype=int)
self.__nodes__ = mlinspace(self.min, self.max, self.n)
class NonUniformCartesianGrid(Grid):
type = "NonUniformCartesian"
def __init__(self, list_of_nodes):
list_of_nodes = [np.array(l) for l in list_of_nodes]
self.min = [min(l) for l in list_of_nodes]
self.max = [max(l) for l in list_of_nodes]
self.__nodes__ = cartesian(list_of_nodes)
class SmolyakGrid(Grid):
type = "Smolyak"
def __init__(self, min, max, mu=2):
print(min, max, mu)
from interpolation.smolyak import SmolyakGrid as ISmolyakGrid
min = np.array(min)
max = np.array(max)
self.min = min
self.max = max
self.mu = mu
d = len(min)
print(mu)
sg = ISmolyakGrid(d, mu, lb=min, ub=max)
self.sg = sg
self.__nodes__ = sg.grid
def cat_grids(grid_1, grid_2):
if isinstance(grid_1, EmptyGrid):
return grid_2
if isinstance(grid_1, CartesianGrid) and isinstance(grid_2, CartesianGrid):
min = np.concatenate([grid_1.min, grid_2.min])
max = np.concatenate([grid_1.max, grid_2.max])
n = np.concatenate([grid_1.n, grid_2.n])
return CartesianGrid(min, max, n)
else:
raise Exception("Not Implemented.")
# compat
def node(grid, i): return grid.node(i)
def nodes(grid): return grid.nodes()
def n_nodes(grid): return grid.n_nodes()
if __name__ == "__main__":
print("Cartsian Grid")
grid = CartesianGrid([0.1, 0.3], [9, 0.4], [50, 10])
print(grid.nodes())
print(nodes(grid))
print("UnstructuredGrid")
ugrid = UnstructuredGrid([[0.1, 0.3], [9, 0.4], [50, 10]])
print(nodes(ugrid))
print(node(ugrid,0))
print(n_nodes(ugrid))
print("Non Uniform CartesianGrid")
ugrid = NonUniformCartesianGrid([[0.1, 0.3], [9, 0.4], [50, 10]])
print(nodes(ugrid))
print(node(ugrid,0))
print(n_nodes(ugrid))
print("Smolyak Grid")
sg = SmolyakGrid([0.1, 0.2], [1.0, 2.0], 2)
print(nodes(sg))
print(node(sg, 1))
print(n_nodes(sg))
| [
"functools.reduce",
"dolo.numeric.misc.mlinspace",
"interpolation.smolyak.SmolyakGrid",
"numpy.array",
"numpy.zeros",
"numpy.concatenate",
"quantecon.cartesian"
] | [((151, 170), 'functools.reduce', 'reduce', (['mul', 'l', '(1.0)'], {}), '(mul, l, 1.0)\n', (157, 170), False, 'from functools import reduce\n'), ((665, 680), 'numpy.array', 'np.array', (['point'], {}), '(point)\n', (673, 680), True, 'import numpy as np\n'), ((912, 940), 'numpy.array', 'np.array', (['nodes'], {'dtype': 'float'}), '(nodes, dtype=float)\n', (920, 940), True, 'import numpy as np\n'), ((1159, 1185), 'numpy.array', 'np.array', (['min'], {'dtype': 'float'}), '(min, dtype=float)\n', (1167, 1185), True, 'import numpy as np\n'), ((1205, 1231), 'numpy.array', 'np.array', (['max'], {'dtype': 'float'}), '(max, dtype=float)\n', (1213, 1231), True, 'import numpy as np\n'), ((1388, 1425), 'dolo.numeric.misc.mlinspace', 'mlinspace', (['self.min', 'self.max', 'self.n'], {}), '(self.min, self.max, self.n)\n', (1397, 1425), False, 'from dolo.numeric.misc import mlinspace\n'), ((1727, 1751), 'quantecon.cartesian', 'cartesian', (['list_of_nodes'], {}), '(list_of_nodes)\n', (1736, 1751), False, 'from quantecon import cartesian\n'), ((1956, 1969), 'numpy.array', 'np.array', (['min'], {}), '(min)\n', (1964, 1969), True, 'import numpy as np\n'), ((1984, 1997), 'numpy.array', 'np.array', (['max'], {}), '(max)\n', (1992, 1997), True, 'import numpy as np\n'), ((2117, 2152), 'interpolation.smolyak.SmolyakGrid', 'ISmolyakGrid', (['d', 'mu'], {'lb': 'min', 'ub': 'max'}), '(d, mu, lb=min, ub=max)\n', (2129, 2152), True, 'from interpolation.smolyak import SmolyakGrid as ISmolyakGrid\n'), ((2394, 2434), 'numpy.concatenate', 'np.concatenate', (['[grid_1.min, grid_2.min]'], {}), '([grid_1.min, grid_2.min])\n', (2408, 2434), True, 'import numpy as np\n'), ((2449, 2489), 'numpy.concatenate', 'np.concatenate', (['[grid_1.max, grid_2.max]'], {}), '([grid_1.max, grid_2.max])\n', (2463, 2489), True, 'import numpy as np\n'), ((2502, 2538), 'numpy.concatenate', 'np.concatenate', (['[grid_1.n, grid_2.n]'], {}), '([grid_1.n, grid_2.n])\n', (2516, 2538), True, 'import numpy as np\n'), ((1340, 1362), 'numpy.array', 'np.array', (['n'], {'dtype': 'int'}), '(n, dtype=int)\n', (1348, 1362), True, 'import numpy as np\n'), ((1564, 1575), 'numpy.array', 'np.array', (['l'], {}), '(l)\n', (1572, 1575), True, 'import numpy as np\n'), ((1277, 1299), 'numpy.zeros', 'np.zeros', (['n'], {'dtype': 'int'}), '(n, dtype=int)\n', (1285, 1299), True, 'import numpy as np\n')] |
import numpy as np
from gym import utils, spaces
from gym.envs.mujoco import mujoco_env
from gym.envs.robotics.rotations import quat2euler, euler2quat, mat2euler
import os
# import random
from random import uniform, randint, randrange
from mjremote import mjremote
import time
from doorenv2.envs.doorenv import DoorEnv
class DoorEnvBlueV1(DoorEnv, utils.EzPickle):
def __init__(self,
port=1050,
unity=False,visionnet_input=False,
world_path='/home/demo/DoorGym/world_generator/world/pull_floatinghook',
pos_control=False,
ik_control=False
):
super().__init__(
port=port,
unity=unity,
visionnet_input=visionnet_input,
world_path=world_path,
pos_control=pos_control,
)
utils.EzPickle.__init__(self)
def gripper_action_gen(self, a):
self.gripper_action = np.array([a[-1],-a[-1],a[-1],-a[-1]])
return np.concatenate((a,self.gripper_action))
def randomized_property(self):
self.model.body_mass[10:16] = self.sample_gaussiannormal(self.model_origin.body_mass[10:16], 0.2) # gaussiannormal x original_mass
self.model.dof_damping[0:10] = self.sample_gaussiannormal(self.model_origin.dof_damping[0:10], 0.2) # gaussiannormal x original_damping
self.model.actuator_gainprm[:,0] = self.sample_gaussiannormal(self.model_origin.actuator_gainprm[:,0], 0.1) # gaussiannormal x original_damping
def _reset_model(self, gg=2, hooked=False, untucked=False):
qpos = self.init_qpos
if self.xml_path.find("float")>-1:
qpos = self.np_random.uniform(low=-0.3, high=0.3, size=self.model.nq) + self.init_qpos
if self.xml_path.find("hook")>-1:
qpos[self.nn-1] = np.random.uniform(0.0,3.13)
if self.xml_path.find("gripper")>-1:
qpos[self.nn-2] = np.random.uniform(0.0,3.13)
elif self.xml_path.find("mobile")>-1:
qpos[0] = 0.0 + uniform(-0.0, 0.0) # x_slider
qpos[1] = 0.0 + uniform(-0.0, -0.0) # y_slider
qpos[2] = 0.0 + uniform(-2.3412, 3.3999) # base_roll_joint
qpos[3] = 0.0 + uniform(-2.2944, 0) # shoulder_lift_joint
qpos[4] = 0.0 + uniform(-2.6761, 2.6761) # shoulder_roll_joint
qpos[5] = 1.0 + uniform(-2.2944, 0) # elbow_lift_joint
qpos[6] = 0.0 + uniform(-2.6761, 2.6761) # elbow_roll_joint
qpos[7] = 1.0 + uniform(-2.2944, 0) # wrist_lift_joint
qpos[8] = 0.0 + uniform(-2.6761, 2.6761) # wrist_roll_joint
else:
qpos = self.init_qpos
qpos[0] = 0.0 + uniform(-0.1, 0.1) # base_roll_joint
qpos[1] = 0.0 + uniform(-0.1, 0.1) # shoulder_lift_joint
qpos[2] = 0.0 + uniform(-0.1, 0.1) # shoulder_roll_joint
qpos[3] = 0.0 + uniform(-0.1, 0.1) # elbow_lift_joint
qpos[4] = 0.0 + uniform(-0.1, 0.1) # elbow_roll_joint
qpos[5] = 0.0 + uniform(-0.1, 0.1) # wrist_lift_joint
qpos[6] = 0.0 + uniform(-0.1, 0.1) # wrist_roll_joint
if self.xml_path.find("pull")>-1:
self.goal = self.np_random.uniform(low=-.15, high=.15, size=gg)
if self.xml_path.find("lefthinge")>-1:
self.goal[0] = np.random.uniform(-0.15,0.05)
self.goal[1] = np.random.uniform(-0.15,0.15)
else:
self.goal[0] = np.random.uniform(-0.05,0.15)
self.goal[1] = np.random.uniform(-0.15,0.15)
else:
self.goal = np.zeros(gg)
self.goal[0] = np.random.uniform(-0.15,0.15)
qpos[self.nn:-gg] = 0
qpos[-gg:] = self.goal
# qvel = self.init_qvel
# self.set_state(qpos, qvel)
if hooked:
if self.xml_path.find("float")>-1:
robot_origin = np.array([1.0, 0, 1.2])
if self.xml_path.find("lever")>-1:
goal_in_xyz = self.sim.data.get_geom_xpos("door_knob_4") - robot_origin
offset_to_hook = np.array([0.13,0.0,0.0])
elif self.xml_path.find("round")>-1:
goal_in_xyz = self.sim.data.get_geom_xpos("door_knob_2") - robot_origin
offset_to_hook = np.array([0.0,0.0,0.0])
elif self.xml_path.find("pull")>-1:
goal_in_xyz = self.sim.data.get_geom_xpos("door_knob_7") - robot_origin
offset_to_hook = np.array([0.13,0.0,0.0])
else:
assert "not sure about the door knob type"
if self.xml_path.find("hook")>-1:
offset_to_hook_randomness = np.array([np.random.uniform(-0.01,0.01), np.random.uniform(-0.005,0.005), np.random.uniform(-0.06,0.06)])
hand_init_pos_3D = goal_in_xyz + offset_to_hook + offset_to_hook_randomness
hand_ori_random = self.np_random.uniform(low=-0.05, high=0.05, size=3)
wrist_dir_chance = np.random.randint(100)
if wrist_dir_chance>=50:
hand_ori_random[-1] = np.random.uniform(0.0,0.4)
else:
hand_ori_random[-1] = np.random.uniform(2.74,3.14)
qpos[:self.nn] = np.concatenate((hand_init_pos_3D,hand_ori_random))
if self.xml_path.find("gripper")>-1:
offset_to_hook_randomness = np.array([0.0, 0.0, np.random.uniform(-0.06,0.06)])
hand_init_pos_3D = goal_in_xyz + offset_to_hook + offset_to_hook_randomness
hand_ori_random = self.np_random.uniform(low=-0.01, high=0.01, size=3)
wrist_dir_chance = np.random.randint(100)
if wrist_dir_chance>=50:
hand_ori_random[-1] = np.random.uniform(0.0,0.01)
else:
hand_ori_random[-1] = np.random.uniform(3.13,3.14)
qpos[:self.nn-1] = np.concatenate((hand_init_pos_3D,hand_ori_random))
qpos[0] -= 0.02
qpos[self.nn: self.nn+4] = np.array([1.0,-1.0,1.0,-1.0])
qvel = self.init_qvel
self.set_state(qpos, qvel)
if self.unity:
self.remote.setqpos(self.sim.data.qpos)
return self._get_obs()
def get_robot_joints(self):
return np.concatenate([
self.sim.data.qpos.flat[:self.nn],
self.sim.data.qvel.flat[:self.nn]])
def get_finger_target(self):
if self.xml_path.find("hook")>-1:
return self.sim.data.get_geom_xpos("hookfinger_2")
elif self.xml_path.find("gripper")>-1:
return (self.sim.data.get_geom_xpos("fingerleft2") \
+ self.sim.data.get_geom_xpos("fingerright2"))/2.0
else:
assert "not sure about the end-effector type"
def get_finger_ori(self):
if self.xml_path.find("hook")>-1:
return quat2euler(self.sim.data.get_body_xquat("robotfinger_hook_target"))
elif self.xml_path.find("gripper")>-1:
return quat2euler(self.sim.data.get_body_xquat("robotwrist_rolllink"))
else:
assert "not sure about the end-effector type"
def get_finger_quat(self):
if self.xml_path.find("hook")>-1:
return self.sim.data.get_body_xquat("robotfinger_hook_target")
elif self.xml_path.find("gripper")>-1:
return self.sim.data.get_body_xquat("robotwrist_rolllink")
else:
assert "not sure about the end-effector type"
class DoorEnvBlueV2(DoorEnv, utils.EzPickle):
def __init__(self,
port=1050,
unity=False,
visionnet_input=False,
vision_obs=False,
world_path='/home/demo/DoorGym/world_generator/world/pull_floatinghook',
pos_control=False,
ik_control=False,
imgsize_h=640,
imgsize_w=640
):
# print("1st passed", imgsize_h)
super().__init__(
port=port,
unity=unity,
visionnet_input=visionnet_input,
vision_obs = vision_obs,
world_path=world_path,
pos_control=pos_control,
ik_control=ik_control,
imgsize_h=imgsize_h,
imgsize_w=imgsize_w
)
utils.EzPickle.__init__(self)
def gripper_action_gen(self, a):
self.gripper_action = np.array([a[-1],-a[-1],a[-1],-a[-1]])
return np.concatenate((a,self.gripper_action))
def physics_randomization(self):
self.model.body_mass[1:18] = self.sample_gaussiannormal(self.model_origin.body_mass[1:18], 0.2) # gaussiannormal x original_mass
self.model.dof_damping[0:12] = self.sample_gaussiannormal(self.model_origin.dof_damping[0:12], 0.2) # gaussiannormal x original_damping
self.model.actuator_gainprm[:,0] = self.sample_gaussiannormal(self.model_origin.actuator_gainprm[:,0], 0.1) # gaussiannormal x original_damping
def set_base_pos(self, pos_list=[0.6, 0.35, 0.7]):
for i,x in enumerate(pos_list):
self.model.body_pos[1,i] = x
# def color_randomization(self):
# import pprint as pp
# import sys
# pp.pprint(dir(self.model), width=1)
# print(">>>>>before>>>>>>>")
# pp.pprint(self.model.geom_rgba)
# geom_n = self.model.geom_rgba.shape[0]
# geom_rgba = []
# for i in range(geom_n):
# geom_rgba.append([randrange(1,100)/100.0, randrange(1,100)/100.0, randrange(1,100)/100.0, 1.0])
# self.model.geom_rgba[:,:] = np.array(geom_rgba)
# self.model.cam_quat[:,:] = np.array(euler2quat(cam_ori))
# self.model.cam_fovy[:] = np.array(cam_fovy)
# print(">>>>>after>>>>>>>")
# pp.pprint(self.model.geom_rgba)
# pp.pprint(self.model.cam_quat)
# pp.pprint(self.model.cam_fovy)
# sys.exit(1)
def _reset_model(self, gg=2, hooked=False, untucked=False):
def randomize():
qpos = self.init_qpos
# qpos[0] = uniform(-3.3999, 2.3412) # base_roll_joint
# qpos[1] = uniform(-2.2944, 0) # shoulder_lift_joint
# qpos[2] = uniform(-2.6761, 2.6761) # shoulder_roll_joint
# qpos[3] = uniform(-2.2944, 0) # elbow_lift_joint
# qpos[4] = uniform(-2.6761, 2.6761) # elbow_roll_joint
# qpos[5] = uniform(-2.2944, 0) # wrist_lift_joint
# qpos[6] = uniform(-2.6761, 2.676) # wrist_roll_joint
qpos[0] = 0.0 + uniform(-0.1, 0.1) # base_roll_joint
qpos[1] = -2.310 + uniform(-0.0, 0.1) # shoulder_lift_joint
qpos[2] = 1.571 + uniform(-0.1, 0.1) # shoulder_roll_joint
qpos[3] = -0.750 + uniform(-0.1, 0.1) # elbow_lift_joint
qpos[4] = -1.571 + uniform(-0.1, 0.1) # elbow_roll_joint
qpos[5] = 0.0 + uniform(-0.1, 0.1) # wrist_lift_joint
qpos[6] = 0.0 + uniform(-0.1, 0.1) # wrist_roll_joint
if self.xml_path.find("pull")>-1:
self.goal = self.np_random.uniform(low=-.15, high=.15, size=gg)
if self.xml_path.find("lefthinge")>-1:
self.goal[0] = np.random.uniform(-0.15,0.05)
self.goal[1] = np.random.uniform(-0.15,0.15)
else:
self.goal[0] = np.random.uniform(-0.05,0.15)
self.goal[1] = np.random.uniform(-0.15,0.15)
else:
self.goal = np.zeros(gg)
self.goal[0] = np.random.uniform(-0.15,0.15)
qpos[self.nn:-gg] = 0
qpos[-gg:] = self.goal
qvel = self.init_qvel
self.set_state(qpos, qvel)
collision = True
while collision:
# print("collision found! Count: ", self.sim.data.ncon)
randomize()
collision = self.sim.data.ncon > 0
# import pprint as pp
# pp.pprint(dir(env.env.sim.data))
# print("final collision count: ", self.sim.data.ncon)
# import sys
# sys.exit(1)
if self.unity:
self.remote.setqpos(self.sim.data.qpos)
return self._get_obs()
def get_robot_joints(self):
if self.ik_control:
return np.concatenate([
self.get_finger_target(),
self.get_finger_quat(),
self.get_gripper_pos(),
self.get_finger_vel(),
self.get_finger_angvel(),
])
else:
return np.concatenate([
self.sim.data.qpos.flat[:self.nn],
self.sim.data.qvel.flat[:self.nn]
])
def get_finger_target(self):
return (self.sim.data.get_geom_xpos("fingerleft2") \
+ self.sim.data.get_geom_xpos("fingerright2"))/2.0
def get_base_pos(self):
return self.sim.data.get_body_xpos("robotbase_link")
def get_finger_ori(self):
return quat2euler(self.sim.data.get_body_xquat("robotwrist_rolllink"))
def get_finger_quat(self):
return self.sim.data.get_body_xquat("robotwrist_rolllink")
def get_finger_vel(self):
return self.sim.data.get_body_xvelp("robotwrist_rolllink")
def get_finger_angvel(self):
return self.sim.data.get_body_xvelr("robotwrist_rolllink")
def get_gripper_pos(self):
return np.array([self.sim.data.get_joint_qpos("right_gripper_joint")]) | [
"random.uniform",
"numpy.array",
"numpy.zeros",
"gym.utils.EzPickle.__init__",
"numpy.random.randint",
"numpy.concatenate",
"numpy.random.uniform"
] | [((845, 874), 'gym.utils.EzPickle.__init__', 'utils.EzPickle.__init__', (['self'], {}), '(self)\n', (868, 874), False, 'from gym import utils, spaces\n'), ((947, 987), 'numpy.array', 'np.array', (['[a[-1], -a[-1], a[-1], -a[-1]]'], {}), '([a[-1], -a[-1], a[-1], -a[-1]])\n', (955, 987), True, 'import numpy as np\n'), ((1004, 1044), 'numpy.concatenate', 'np.concatenate', (['(a, self.gripper_action)'], {}), '((a, self.gripper_action))\n', (1018, 1044), True, 'import numpy as np\n'), ((6588, 6679), 'numpy.concatenate', 'np.concatenate', (['[self.sim.data.qpos.flat[:self.nn], self.sim.data.qvel.flat[:self.nn]]'], {}), '([self.sim.data.qpos.flat[:self.nn], self.sim.data.qvel.flat[\n :self.nn]])\n', (6602, 6679), True, 'import numpy as np\n'), ((8612, 8641), 'gym.utils.EzPickle.__init__', 'utils.EzPickle.__init__', (['self'], {}), '(self)\n', (8635, 8641), False, 'from gym import utils, spaces\n'), ((8714, 8754), 'numpy.array', 'np.array', (['[a[-1], -a[-1], a[-1], -a[-1]]'], {}), '([a[-1], -a[-1], a[-1], -a[-1]])\n', (8722, 8754), True, 'import numpy as np\n'), ((8771, 8811), 'numpy.concatenate', 'np.concatenate', (['(a, self.gripper_action)'], {}), '((a, self.gripper_action))\n', (8785, 8811), True, 'import numpy as np\n'), ((3704, 3716), 'numpy.zeros', 'np.zeros', (['gg'], {}), '(gg)\n', (3712, 3716), True, 'import numpy as np\n'), ((3744, 3774), 'numpy.random.uniform', 'np.random.uniform', (['(-0.15)', '(0.15)'], {}), '(-0.15, 0.15)\n', (3761, 3774), True, 'import numpy as np\n'), ((12867, 12958), 'numpy.concatenate', 'np.concatenate', (['[self.sim.data.qpos.flat[:self.nn], self.sim.data.qvel.flat[:self.nn]]'], {}), '([self.sim.data.qpos.flat[:self.nn], self.sim.data.qvel.flat[\n :self.nn]])\n', (12881, 12958), True, 'import numpy as np\n'), ((1833, 1861), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(3.13)'], {}), '(0.0, 3.13)\n', (1850, 1861), True, 'import numpy as np\n'), ((1944, 1972), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(3.13)'], {}), '(0.0, 3.13)\n', (1961, 1972), True, 'import numpy as np\n'), ((3435, 3465), 'numpy.random.uniform', 'np.random.uniform', (['(-0.15)', '(0.05)'], {}), '(-0.15, 0.05)\n', (3452, 3465), True, 'import numpy as np\n'), ((3496, 3526), 'numpy.random.uniform', 'np.random.uniform', (['(-0.15)', '(0.15)'], {}), '(-0.15, 0.15)\n', (3513, 3526), True, 'import numpy as np\n'), ((3575, 3605), 'numpy.random.uniform', 'np.random.uniform', (['(-0.05)', '(0.15)'], {}), '(-0.05, 0.15)\n', (3592, 3605), True, 'import numpy as np\n'), ((3636, 3666), 'numpy.random.uniform', 'np.random.uniform', (['(-0.15)', '(0.15)'], {}), '(-0.15, 0.15)\n', (3653, 3666), True, 'import numpy as np\n'), ((4003, 4026), 'numpy.array', 'np.array', (['[1.0, 0, 1.2]'], {}), '([1.0, 0, 1.2])\n', (4011, 4026), True, 'import numpy as np\n'), ((10835, 10853), 'random.uniform', 'uniform', (['(-0.1)', '(0.1)'], {}), '(-0.1, 0.1)\n', (10842, 10853), False, 'from random import uniform, randint, randrange\n'), ((10907, 10925), 'random.uniform', 'uniform', (['(-0.0)', '(0.1)'], {}), '(-0.0, 0.1)\n', (10914, 10925), False, 'from random import uniform, randint, randrange\n'), ((10983, 11001), 'random.uniform', 'uniform', (['(-0.1)', '(0.1)'], {}), '(-0.1, 0.1)\n', (10990, 11001), False, 'from random import uniform, randint, randrange\n'), ((11059, 11077), 'random.uniform', 'uniform', (['(-0.1)', '(0.1)'], {}), '(-0.1, 0.1)\n', (11066, 11077), False, 'from random import uniform, randint, randrange\n'), ((11132, 11150), 'random.uniform', 'uniform', (['(-0.1)', '(0.1)'], {}), '(-0.1, 0.1)\n', (11139, 11150), False, 'from random import uniform, randint, randrange\n'), ((11203, 11221), 'random.uniform', 'uniform', (['(-0.1)', '(0.1)'], {}), '(-0.1, 0.1)\n', (11210, 11221), False, 'from random import uniform, randint, randrange\n'), ((11274, 11292), 'random.uniform', 'uniform', (['(-0.1)', '(0.1)'], {}), '(-0.1, 0.1)\n', (11281, 11292), False, 'from random import uniform, randint, randrange\n'), ((11825, 11837), 'numpy.zeros', 'np.zeros', (['gg'], {}), '(gg)\n', (11833, 11837), True, 'import numpy as np\n'), ((11869, 11899), 'numpy.random.uniform', 'np.random.uniform', (['(-0.15)', '(0.15)'], {}), '(-0.15, 0.15)\n', (11886, 11899), True, 'import numpy as np\n'), ((2046, 2064), 'random.uniform', 'uniform', (['(-0.0)', '(0.0)'], {}), '(-0.0, 0.0)\n', (2053, 2064), False, 'from random import uniform, randint, randrange\n'), ((2114, 2133), 'random.uniform', 'uniform', (['(-0.0)', '(-0.0)'], {}), '(-0.0, -0.0)\n', (2121, 2133), False, 'from random import uniform, randint, randrange\n'), ((2182, 2206), 'random.uniform', 'uniform', (['(-2.3412)', '(3.3999)'], {}), '(-2.3412, 3.3999)\n', (2189, 2206), False, 'from random import uniform, randint, randrange\n'), ((2257, 2276), 'random.uniform', 'uniform', (['(-2.2944)', '(0)'], {}), '(-2.2944, 0)\n', (2264, 2276), False, 'from random import uniform, randint, randrange\n'), ((2336, 2360), 'random.uniform', 'uniform', (['(-2.6761)', '(2.6761)'], {}), '(-2.6761, 2.6761)\n', (2343, 2360), False, 'from random import uniform, randint, randrange\n'), ((2415, 2434), 'random.uniform', 'uniform', (['(-2.2944)', '(0)'], {}), '(-2.2944, 0)\n', (2422, 2434), False, 'from random import uniform, randint, randrange\n'), ((2491, 2515), 'random.uniform', 'uniform', (['(-2.6761)', '(2.6761)'], {}), '(-2.6761, 2.6761)\n', (2498, 2515), False, 'from random import uniform, randint, randrange\n'), ((2567, 2586), 'random.uniform', 'uniform', (['(-2.2944)', '(0)'], {}), '(-2.2944, 0)\n', (2574, 2586), False, 'from random import uniform, randint, randrange\n'), ((2643, 2667), 'random.uniform', 'uniform', (['(-2.6761)', '(2.6761)'], {}), '(-2.6761, 2.6761)\n', (2650, 2667), False, 'from random import uniform, randint, randrange\n'), ((2767, 2785), 'random.uniform', 'uniform', (['(-0.1)', '(0.1)'], {}), '(-0.1, 0.1)\n', (2774, 2785), False, 'from random import uniform, randint, randrange\n'), ((2836, 2854), 'random.uniform', 'uniform', (['(-0.1)', '(0.1)'], {}), '(-0.1, 0.1)\n', (2843, 2854), False, 'from random import uniform, randint, randrange\n'), ((2909, 2927), 'random.uniform', 'uniform', (['(-0.1)', '(0.1)'], {}), '(-0.1, 0.1)\n', (2916, 2927), False, 'from random import uniform, randint, randrange\n'), ((2982, 3000), 'random.uniform', 'uniform', (['(-0.1)', '(0.1)'], {}), '(-0.1, 0.1)\n', (2989, 3000), False, 'from random import uniform, randint, randrange\n'), ((3052, 3070), 'random.uniform', 'uniform', (['(-0.1)', '(0.1)'], {}), '(-0.1, 0.1)\n', (3059, 3070), False, 'from random import uniform, randint, randrange\n'), ((3122, 3140), 'random.uniform', 'uniform', (['(-0.1)', '(0.1)'], {}), '(-0.1, 0.1)\n', (3129, 3140), False, 'from random import uniform, randint, randrange\n'), ((3192, 3210), 'random.uniform', 'uniform', (['(-0.1)', '(0.1)'], {}), '(-0.1, 0.1)\n', (3199, 3210), False, 'from random import uniform, randint, randrange\n'), ((4207, 4233), 'numpy.array', 'np.array', (['[0.13, 0.0, 0.0]'], {}), '([0.13, 0.0, 0.0])\n', (4215, 4233), True, 'import numpy as np\n'), ((5179, 5201), 'numpy.random.randint', 'np.random.randint', (['(100)'], {}), '(100)\n', (5196, 5201), True, 'import numpy as np\n'), ((5458, 5509), 'numpy.concatenate', 'np.concatenate', (['(hand_init_pos_3D, hand_ori_random)'], {}), '((hand_init_pos_3D, hand_ori_random))\n', (5472, 5509), True, 'import numpy as np\n'), ((5906, 5928), 'numpy.random.randint', 'np.random.randint', (['(100)'], {}), '(100)\n', (5923, 5928), True, 'import numpy as np\n'), ((6188, 6239), 'numpy.concatenate', 'np.concatenate', (['(hand_init_pos_3D, hand_ori_random)'], {}), '((hand_init_pos_3D, hand_ori_random))\n', (6202, 6239), True, 'import numpy as np\n'), ((6322, 6354), 'numpy.array', 'np.array', (['[1.0, -1.0, 1.0, -1.0]'], {}), '([1.0, -1.0, 1.0, -1.0])\n', (6330, 6354), True, 'import numpy as np\n'), ((11532, 11562), 'numpy.random.uniform', 'np.random.uniform', (['(-0.15)', '(0.05)'], {}), '(-0.15, 0.05)\n', (11549, 11562), True, 'import numpy as np\n'), ((11597, 11627), 'numpy.random.uniform', 'np.random.uniform', (['(-0.15)', '(0.15)'], {}), '(-0.15, 0.15)\n', (11614, 11627), True, 'import numpy as np\n'), ((11684, 11714), 'numpy.random.uniform', 'np.random.uniform', (['(-0.05)', '(0.15)'], {}), '(-0.05, 0.15)\n', (11701, 11714), True, 'import numpy as np\n'), ((11749, 11779), 'numpy.random.uniform', 'np.random.uniform', (['(-0.15)', '(0.15)'], {}), '(-0.15, 0.15)\n', (11766, 11779), True, 'import numpy as np\n'), ((4414, 4439), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (4422, 4439), True, 'import numpy as np\n'), ((5293, 5320), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(0.4)'], {}), '(0.0, 0.4)\n', (5310, 5320), True, 'import numpy as np\n'), ((5392, 5421), 'numpy.random.uniform', 'np.random.uniform', (['(2.74)', '(3.14)'], {}), '(2.74, 3.14)\n', (5409, 5421), True, 'import numpy as np\n'), ((6020, 6048), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(0.01)'], {}), '(0.0, 0.01)\n', (6037, 6048), True, 'import numpy as np\n'), ((6120, 6149), 'numpy.random.uniform', 'np.random.uniform', (['(3.13)', '(3.14)'], {}), '(3.13, 3.14)\n', (6137, 6149), True, 'import numpy as np\n'), ((4620, 4646), 'numpy.array', 'np.array', (['[0.13, 0.0, 0.0]'], {}), '([0.13, 0.0, 0.0])\n', (4628, 4646), True, 'import numpy as np\n'), ((4840, 4870), 'numpy.random.uniform', 'np.random.uniform', (['(-0.01)', '(0.01)'], {}), '(-0.01, 0.01)\n', (4857, 4870), True, 'import numpy as np\n'), ((4871, 4903), 'numpy.random.uniform', 'np.random.uniform', (['(-0.005)', '(0.005)'], {}), '(-0.005, 0.005)\n', (4888, 4903), True, 'import numpy as np\n'), ((4904, 4934), 'numpy.random.uniform', 'np.random.uniform', (['(-0.06)', '(0.06)'], {}), '(-0.06, 0.06)\n', (4921, 4934), True, 'import numpy as np\n'), ((5631, 5661), 'numpy.random.uniform', 'np.random.uniform', (['(-0.06)', '(0.06)'], {}), '(-0.06, 0.06)\n', (5648, 5661), True, 'import numpy as np\n')] |
import numpy as np
from BDMesh.Mesh1D import Mesh1D
MyMesh = Mesh1D(0.0, 10.0, 0.0, 0.0)
print(MyMesh)
print(MyMesh.physical_boundary_1)
MyMesh.physical_boundary_1 = 2
print(MyMesh.physical_boundary_1)
MyMesh2 = Mesh1D(2.0, 10.0, 0.0, 0.0)
print(MyMesh)
print(MyMesh2)
print(MyMesh == MyMesh2)
print(MyMesh == 2)
print(MyMesh.local_nodes)
MyMesh.local_nodes = np.linspace(0.0, 1.0, num=11, endpoint=True, dtype=np.float64)
print(MyMesh.local_nodes)
print(MyMesh.num)
print(MyMesh.physical_nodes)
| [
"BDMesh.Mesh1D.Mesh1D",
"numpy.linspace"
] | [((63, 90), 'BDMesh.Mesh1D.Mesh1D', 'Mesh1D', (['(0.0)', '(10.0)', '(0.0)', '(0.0)'], {}), '(0.0, 10.0, 0.0, 0.0)\n', (69, 90), False, 'from BDMesh.Mesh1D import Mesh1D\n'), ((217, 244), 'BDMesh.Mesh1D.Mesh1D', 'Mesh1D', (['(2.0)', '(10.0)', '(0.0)', '(0.0)'], {}), '(2.0, 10.0, 0.0, 0.0)\n', (223, 244), False, 'from BDMesh.Mesh1D import Mesh1D\n'), ((368, 430), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)'], {'num': '(11)', 'endpoint': '(True)', 'dtype': 'np.float64'}), '(0.0, 1.0, num=11, endpoint=True, dtype=np.float64)\n', (379, 430), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.