code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import numpy.testing as npt
import pytest
import freud
matplotlib.use("agg")
class TestGaussianDensity:
def test_random_point_with_cell_list(self):
fftpack = pytest.importorskip("scipy.fftpack")
fft = fftpack.fft
fftshift = fftpack.fftshift
width = 20
r_max = 10.0
sigma = 0.1
num_points = 10000
box_size = r_max * 3.1
box, points = freud.data.make_random_system(box_size, num_points, is2D=True)
for w in (width, (width, width), [width, width]):
gd = freud.density.GaussianDensity(w, r_max, sigma)
# Test access
with pytest.raises(AttributeError):
gd.box
with pytest.raises(AttributeError):
gd.density
gd.compute((box, points))
# Test access
gd.box
gd.density
# Verify the output dimensions are correct
assert gd.density.shape == (width, width)
assert np.prod(gd.density.shape) == np.prod(gd.width)
myDiff = gd.density
myFFT = fft(fft(myDiff[:, :], axis=1), axis=0)
myDiff = (myFFT * np.conj(myFFT)).real
myDiff = fftshift(myDiff)[:, :]
npt.assert_equal(
np.where(myDiff == np.max(myDiff)),
(np.array([width // 2]), np.array([width // 2])),
)
def test_change_box_dimension(self):
width = 20
r_max = 9.9
sigma = 0.01
num_points = 100
box_size = r_max * 3.1
# test that a 3D system computed after computing a 2D system will fail
box, points = freud.data.make_random_system(box_size, num_points, is2D=True)
gd = freud.density.GaussianDensity(width, r_max, sigma)
gd.compute((box, points))
test_box, test_points = freud.data.make_random_system(
box_size, num_points, is2D=False
)
with pytest.raises(ValueError):
gd.compute((test_box, test_points))
# test that a 2D system computed after computing a 3D system will fail
box, points = freud.data.make_random_system(box_size, num_points, is2D=False)
gd = freud.density.GaussianDensity(width, r_max, sigma)
gd.compute((box, points))
test_box, test_points = freud.data.make_random_system(
box_size, num_points, is2D=True
)
with pytest.raises(ValueError):
gd.compute((test_box, test_points))
def test_sum_2d(self):
# Ensure that each point's Gaussian sums to 1
width = 20
r_max = 9.9
sigma = 2
box_size = width
gd = freud.density.GaussianDensity(width, r_max, sigma)
for num_points in [1, 10, 100]:
box, points = freud.data.make_random_system(box_size, num_points, is2D=True)
gd.compute(system=(box, points))
# This has discretization error as well as single-precision error
assert np.isclose(np.sum(gd.density), num_points, rtol=1e-4)
def test_sum_3d(self):
# Ensure that each point's Gaussian sums to 1
width = 20
r_max = 9.9
sigma = 2
box_size = width
gd = freud.density.GaussianDensity(width, r_max, sigma)
for num_points in [1, 10, 100]:
box, points = freud.data.make_random_system(
box_size, num_points, is2D=False
)
gd.compute(system=(box, points))
# This has discretization error as well as single-precision error
assert np.isclose(np.sum(gd.density), num_points, rtol=1e-4)
def test_sum_values_2d(self):
# Ensure that the Gaussian convolution sums to the sum of the values
width = 20
r_max = 9.9
sigma = 2
box_size = width
gd = freud.density.GaussianDensity(width, r_max, sigma)
for num_points in [1, 10, 100]:
system = freud.data.make_random_system(box_size, num_points, is2D=True)
values = np.random.rand(num_points)
gd.compute(system, values)
# This has discretization error as well as single-precision error
assert np.isclose(np.sum(gd.density), np.sum(values), rtol=1e-4)
def test_sum_values_3d(self):
# Ensure that the Gaussian convolution sums to the sum of the values
width = 20
r_max = 9.9
sigma = 2
box_size = width
gd = freud.density.GaussianDensity(width, r_max, sigma)
for num_points in [1, 10, 100]:
system = freud.data.make_random_system(box_size, num_points, is2D=False)
values = np.random.rand(num_points)
gd.compute(system, values)
# This has discretization error as well as single-precision error
assert np.isclose(np.sum(gd.density), np.sum(values), rtol=1e-4)
def test_repr(self):
gd = freud.density.GaussianDensity(100, 10.0, 0.1)
assert str(gd) == str(eval(repr(gd)))
# Use both signatures
gd3 = freud.density.GaussianDensity((98, 99, 100), 10.0, 0.1)
assert str(gd3) == str(eval(repr(gd3)))
def test_repr_png(self):
width = 20
r_max = 2.0
sigma = 0.01
num_points = 100
box_size = r_max * 3.1
box, points = freud.data.make_random_system(box_size, num_points, is2D=True)
gd = freud.density.GaussianDensity(width, r_max, sigma)
with pytest.raises(AttributeError):
gd.plot()
assert gd._repr_png_() is None
gd.compute((box, points))
gd.plot()
gd = freud.density.GaussianDensity(width, r_max, sigma)
test_box = freud.box.Box.cube(box_size)
gd.compute((test_box, points))
gd.plot()
assert gd._repr_png_() is None
plt.close("all")
| [
"numpy.prod",
"numpy.random.rand",
"freud.data.make_random_system",
"matplotlib.use",
"numpy.conj",
"numpy.max",
"freud.box.Box.cube",
"matplotlib.pyplot.close",
"numpy.sum",
"pytest.importorskip",
"pytest.raises",
"numpy.array",
"freud.density.GaussianDensity"
] | [((126, 147), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (140, 147), False, 'import matplotlib\n'), ((243, 279), 'pytest.importorskip', 'pytest.importorskip', (['"""scipy.fftpack"""'], {}), "('scipy.fftpack')\n", (262, 279), False, 'import pytest\n'), ((483, 545), 'freud.data.make_random_system', 'freud.data.make_random_system', (['box_size', 'num_points'], {'is2D': '(True)'}), '(box_size, num_points, is2D=True)\n', (512, 545), False, 'import freud\n'), ((1734, 1796), 'freud.data.make_random_system', 'freud.data.make_random_system', (['box_size', 'num_points'], {'is2D': '(True)'}), '(box_size, num_points, is2D=True)\n', (1763, 1796), False, 'import freud\n'), ((1810, 1860), 'freud.density.GaussianDensity', 'freud.density.GaussianDensity', (['width', 'r_max', 'sigma'], {}), '(width, r_max, sigma)\n', (1839, 1860), False, 'import freud\n'), ((1928, 1991), 'freud.data.make_random_system', 'freud.data.make_random_system', (['box_size', 'num_points'], {'is2D': '(False)'}), '(box_size, num_points, is2D=False)\n', (1957, 1991), False, 'import freud\n'), ((2204, 2267), 'freud.data.make_random_system', 'freud.data.make_random_system', (['box_size', 'num_points'], {'is2D': '(False)'}), '(box_size, num_points, is2D=False)\n', (2233, 2267), False, 'import freud\n'), ((2281, 2331), 'freud.density.GaussianDensity', 'freud.density.GaussianDensity', (['width', 'r_max', 'sigma'], {}), '(width, r_max, sigma)\n', (2310, 2331), False, 'import freud\n'), ((2399, 2461), 'freud.data.make_random_system', 'freud.data.make_random_system', (['box_size', 'num_points'], {'is2D': '(True)'}), '(box_size, num_points, is2D=True)\n', (2428, 2461), False, 'import freud\n'), ((2749, 2799), 'freud.density.GaussianDensity', 'freud.density.GaussianDensity', (['width', 'r_max', 'sigma'], {}), '(width, r_max, sigma)\n', (2778, 2799), False, 'import freud\n'), ((3302, 3352), 'freud.density.GaussianDensity', 'freud.density.GaussianDensity', (['width', 'r_max', 'sigma'], {}), '(width, r_max, sigma)\n', (3331, 3352), False, 'import freud\n'), ((3916, 3966), 'freud.density.GaussianDensity', 'freud.density.GaussianDensity', (['width', 'r_max', 'sigma'], {}), '(width, r_max, sigma)\n', (3945, 3966), False, 'import freud\n'), ((4540, 4590), 'freud.density.GaussianDensity', 'freud.density.GaussianDensity', (['width', 'r_max', 'sigma'], {}), '(width, r_max, sigma)\n', (4569, 4590), False, 'import freud\n'), ((4997, 5042), 'freud.density.GaussianDensity', 'freud.density.GaussianDensity', (['(100)', '(10.0)', '(0.1)'], {}), '(100, 10.0, 0.1)\n', (5026, 5042), False, 'import freud\n'), ((5134, 5189), 'freud.density.GaussianDensity', 'freud.density.GaussianDensity', (['(98, 99, 100)', '(10.0)', '(0.1)'], {}), '((98, 99, 100), 10.0, 0.1)\n', (5163, 5189), False, 'import freud\n'), ((5406, 5468), 'freud.data.make_random_system', 'freud.data.make_random_system', (['box_size', 'num_points'], {'is2D': '(True)'}), '(box_size, num_points, is2D=True)\n', (5435, 5468), False, 'import freud\n'), ((5482, 5532), 'freud.density.GaussianDensity', 'freud.density.GaussianDensity', (['width', 'r_max', 'sigma'], {}), '(width, r_max, sigma)\n', (5511, 5532), False, 'import freud\n'), ((5706, 5756), 'freud.density.GaussianDensity', 'freud.density.GaussianDensity', (['width', 'r_max', 'sigma'], {}), '(width, r_max, sigma)\n', (5735, 5756), False, 'import freud\n'), ((5776, 5804), 'freud.box.Box.cube', 'freud.box.Box.cube', (['box_size'], {}), '(box_size)\n', (5794, 5804), False, 'import freud\n'), ((5909, 5925), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (5918, 5925), True, 'import matplotlib.pyplot as plt\n'), ((621, 667), 'freud.density.GaussianDensity', 'freud.density.GaussianDensity', (['w', 'r_max', 'sigma'], {}), '(w, r_max, sigma)\n', (650, 667), False, 'import freud\n'), ((2027, 2052), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2040, 2052), False, 'import pytest\n'), ((2497, 2522), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2510, 2522), False, 'import pytest\n'), ((2866, 2928), 'freud.data.make_random_system', 'freud.data.make_random_system', (['box_size', 'num_points'], {'is2D': '(True)'}), '(box_size, num_points, is2D=True)\n', (2895, 2928), False, 'import freud\n'), ((3419, 3482), 'freud.data.make_random_system', 'freud.data.make_random_system', (['box_size', 'num_points'], {'is2D': '(False)'}), '(box_size, num_points, is2D=False)\n', (3448, 3482), False, 'import freud\n'), ((4028, 4090), 'freud.data.make_random_system', 'freud.data.make_random_system', (['box_size', 'num_points'], {'is2D': '(True)'}), '(box_size, num_points, is2D=True)\n', (4057, 4090), False, 'import freud\n'), ((4112, 4138), 'numpy.random.rand', 'np.random.rand', (['num_points'], {}), '(num_points)\n', (4126, 4138), True, 'import numpy as np\n'), ((4652, 4715), 'freud.data.make_random_system', 'freud.data.make_random_system', (['box_size', 'num_points'], {'is2D': '(False)'}), '(box_size, num_points, is2D=False)\n', (4681, 4715), False, 'import freud\n'), ((4737, 4763), 'numpy.random.rand', 'np.random.rand', (['num_points'], {}), '(num_points)\n', (4751, 4763), True, 'import numpy as np\n'), ((5547, 5576), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (5560, 5576), False, 'import pytest\n'), ((712, 741), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (725, 741), False, 'import pytest\n'), ((783, 812), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (796, 812), False, 'import pytest\n'), ((1078, 1103), 'numpy.prod', 'np.prod', (['gd.density.shape'], {}), '(gd.density.shape)\n', (1085, 1103), True, 'import numpy as np\n'), ((1107, 1124), 'numpy.prod', 'np.prod', (['gd.width'], {}), '(gd.width)\n', (1114, 1124), True, 'import numpy as np\n'), ((3082, 3100), 'numpy.sum', 'np.sum', (['gd.density'], {}), '(gd.density)\n', (3088, 3100), True, 'import numpy as np\n'), ((3666, 3684), 'numpy.sum', 'np.sum', (['gd.density'], {}), '(gd.density)\n', (3672, 3684), True, 'import numpy as np\n'), ((4286, 4304), 'numpy.sum', 'np.sum', (['gd.density'], {}), '(gd.density)\n', (4292, 4304), True, 'import numpy as np\n'), ((4306, 4320), 'numpy.sum', 'np.sum', (['values'], {}), '(values)\n', (4312, 4320), True, 'import numpy as np\n'), ((4911, 4929), 'numpy.sum', 'np.sum', (['gd.density'], {}), '(gd.density)\n', (4917, 4929), True, 'import numpy as np\n'), ((4931, 4945), 'numpy.sum', 'np.sum', (['values'], {}), '(values)\n', (4937, 4945), True, 'import numpy as np\n'), ((1247, 1261), 'numpy.conj', 'np.conj', (['myFFT'], {}), '(myFFT)\n', (1254, 1261), True, 'import numpy as np\n'), ((1411, 1433), 'numpy.array', 'np.array', (['[width // 2]'], {}), '([width // 2])\n', (1419, 1433), True, 'import numpy as np\n'), ((1435, 1457), 'numpy.array', 'np.array', (['[width // 2]'], {}), '([width // 2])\n', (1443, 1457), True, 'import numpy as np\n'), ((1377, 1391), 'numpy.max', 'np.max', (['myDiff'], {}), '(myDiff)\n', (1383, 1391), True, 'import numpy as np\n')] |
import numpy as np
import os
import warnings
from Input import Input
class InputFromData(Input):
"""
Used to draw random samples from a data file.
"""
def __init__(self, input_filename, delimiter=" ", skip_header=0,
shuffle_data=True):
"""
:param input_filename: path of file containing data to be sampled.
:type input_filename: string
:param delimiter: Character used to separate data in data file.
Can also be an integer to specify width of each entry.
:type delimiter: str or int
:param skip_header: Number of header rows to skip in data file.
:type skip_header: int
:param shuffle_data: Whether or not to randomly shuffle data during
initialization.
:type shuffle_data: bool
"""
if not os.path.isfile(input_filename):
raise IOError("input_filename must refer to a file.")
self._data = np.genfromtxt(input_filename,
delimiter=delimiter,
skip_header=skip_header)
# Data should not contain NaN.
if np.isnan(self._data).any():
raise ValueError("Input data file contains invalid (NaN) entries.")
# Output should be shape (num_samples, sample_size), so reshape
# one dimensional data to a 2d array with one column.
if len(self._data .shape) == 1:
self._data = self._data.reshape(self._data.shape[0], -1)
if shuffle_data:
np.random.shuffle(self._data)
self._index = 0
def draw_samples(self, num_samples):
"""
Returns an array of samples from the previously loaded file data.
:param num_samples: Number of samples to be returned.
:type num_samples: int
:return: 2d ndarray of samples, each row being one sample.
For one dimensional input data, this will have
shape (num_samples, 1)
"""
if not isinstance(num_samples, int):
raise TypeError("num_samples must be an integer.")
if num_samples <= 0:
raise ValueError("num_samples must be a positive integer.")
# Otherwise return the requested sample and increment the index.
sample = self._data[self._index: self._index + num_samples]
self._index += num_samples
sample_size = sample.shape[0]
if num_samples > sample_size:
error_message = "Only " + str(sample_size) + " of the " + \
str(num_samples) + " requested samples are " + \
"available.\nEither provide more sample data " + \
"or increase epsilon to reduce sample size needed."
warning = UserWarning(error_message)
warnings.warn(warning)
return np.copy(sample)
def reset_sampling(self):
"""
Used to restart sampling from beginning of data set.
"""
self._index = 0
| [
"numpy.copy",
"os.path.isfile",
"numpy.isnan",
"warnings.warn",
"numpy.genfromtxt",
"numpy.random.shuffle"
] | [((975, 1050), 'numpy.genfromtxt', 'np.genfromtxt', (['input_filename'], {'delimiter': 'delimiter', 'skip_header': 'skip_header'}), '(input_filename, delimiter=delimiter, skip_header=skip_header)\n', (988, 1050), True, 'import numpy as np\n'), ((2896, 2911), 'numpy.copy', 'np.copy', (['sample'], {}), '(sample)\n', (2903, 2911), True, 'import numpy as np\n'), ((855, 885), 'os.path.isfile', 'os.path.isfile', (['input_filename'], {}), '(input_filename)\n', (869, 885), False, 'import os\n'), ((1562, 1591), 'numpy.random.shuffle', 'np.random.shuffle', (['self._data'], {}), '(self._data)\n', (1579, 1591), True, 'import numpy as np\n'), ((2857, 2879), 'warnings.warn', 'warnings.warn', (['warning'], {}), '(warning)\n', (2870, 2879), False, 'import warnings\n'), ((1172, 1192), 'numpy.isnan', 'np.isnan', (['self._data'], {}), '(self._data)\n', (1180, 1192), True, 'import numpy as np\n')] |
'''
Created on Nov 29, 2020
@author: manik
'''
import numpy as np
import src.person_properties_util as idx
class Movement():
"""
Class providing abstraction into each movement of the population
"""
def update_persons(self, persons: np.ndarray, size: int,
speed: float = 0.1,
heading_update_chance: float = 0.02) -> np.ndarray:
"""
Randomly updates/initializes the destination each person is headed to
and corresponding speed randomly.
Parameters
----------
person : np.ndarray
The NumPy array containing the details of the persons to be
updated.
size : int
The size of the array of the persons to be updated to.
speed : float, optional
Mean of the speed to be generated randomly, by default 0.1.
heading_update_chance : float, optional
The odds of updating the destination of each person, by default
0.02.
Returns
-------
np.ndarray
The upated NumPy array with updated values
"""
# For updating the x position
# Generate a random array with update chance for each person in
# the population
update = np.random.random(size=(size,))
# Get the persons in the population who have a lower or equal to
# chance of getting updated in this epoch
shp = update[update <= heading_update_chance].shape
# Update the position for the direction in which they are heading
persons[:, idx.x_dir][update <= heading_update_chance] = np.random \
.normal(loc=0, scale=1/3, size=shp)
# For updating the y position, do the same
update = np.random.random(size=(size,))
shp = update[update <= heading_update_chance].shape
persons[:, idx.y_dir][update <= heading_update_chance] = np.random \
.normal(loc=0, scale=1/3, size=shp)
# Update the speed by generating a random normal distribution using
# the argument speed as the parameter
update = np.random.random(size=(size,))
shp = update[update <= heading_update_chance].shape
persons[:, idx.speed][update <= heading_update_chance] = np.random \
.normal(loc=speed, scale=speed / 3, size=shp)
persons[:, idx.speed] = np.clip(persons[:, idx.speed], a_min=0.0005,
a_max=0.01)
# Return the updated array
return persons
def out_of_bounds(self, persons: np.ndarray, xbounds: list,
ybounds: list) -> np.ndarray:
"""
Check if the individual is heading out of bounds of the specified
bounds.
Parameters
----------
person : np.ndarray
The NumPy array containing the details of the individuals
xbounds : list
List containing bounds for X axis.
ybounds : list
List containing bounds for Y axis.
Returns
-------
np.ndarray
The upated NumPy array with updated values
"""
# Store shape of list of people who are heading out of bounds based
# on X bound [0]
shp = persons[:, 4][(persons[:, 2] <= xbounds[:, 0]) &
(persons[:, 4] < 0)].shape
# Update them randomly using a normal distribution
persons[:, 4][(persons[:, 2] <= xbounds[:, 0]) &
(persons[:, 4] < 0)] = \
np.clip(np.random.normal(loc=0.5, scale=0.5/3, size=shp),
a_min=0.05, a_max=1)
# Store shape of list of people who are heading out of bounds based
# on X bound [1]
shp = persons[:, 4][(persons[:, 2] >= xbounds[:, 1]) &
(persons[:, 4] > 0)].shape
# Update them randomly using a normal distribution
persons[:, 4][(persons[:, 2] >= xbounds[:, 1]) &
(persons[:, 4] > 0)] = \
np.clip(-np.random.normal(loc=0.5, scale=0.5/3, size=shp),
a_min=-1, a_max=-0.05)
# Store shape of list of people who are heading out of bounds based
# on Y bound [0]
shp = persons[:, 5][(persons[:, 3] <= ybounds[:, 0]) &
(persons[:, 5] < 0)].shape
# Update them randomly using a normal distribution
persons[:, 5][(persons[:, 3] <= ybounds[:, 0]) &
(persons[:, 5] < 0)] = \
np.clip(np.random.normal(loc=0.5, scale=0.5/3, size=shp),
a_min=0.05, a_max=1)
# Store shape of list of people who are heading out of bounds based
# on Y bound [1]
shp = persons[:, 5][(persons[:, 3] >= ybounds[:, 1]) &
(persons[:, 5] > 0)].shape
# Update them randomly using a normal distribution
persons[:, 5][(persons[:, 3] >= ybounds[:, 1]) &
(persons[:, 5] > 0)] = \
np.clip(-np.random.normal(loc=0.5, scale=0.5/3, size=shp),
a_min=-1, a_max=-0.05)
return persons
def update_pop(self, persons: np.ndarray):
"""
Update function to move people physically in the graph.
This function adds the X and Y direction value to the current postion
of the individual to move them.
Parameters
----------
person : np.ndarray
The NumPy array containing the details of the persons to be updated
Returns
-------
np.ndarray
The upated NumPy array with updated values
"""
filter = (persons[:, idx.current_state] != 3) & \
(persons[:, idx.social_distance] == 0)
# x
persons[:, 2][filter] = persons[:, 2][filter] + \
(persons[:, 4][filter] * persons[:, 6][filter])
# y
persons[:, 3][filter] = persons[:, 3][filter] + \
(persons[:, 5][filter] * persons[:, 6][filter])
return persons
| [
"numpy.random.random",
"numpy.clip",
"numpy.random.normal"
] | [((1288, 1318), 'numpy.random.random', 'np.random.random', ([], {'size': '(size,)'}), '(size=(size,))\n', (1304, 1318), True, 'import numpy as np\n'), ((1643, 1689), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0)', 'scale': '(1 / 3)', 'size': 'shp'}), '(loc=0, scale=1 / 3, size=shp)\n', (1659, 1689), True, 'import numpy as np\n'), ((1772, 1802), 'numpy.random.random', 'np.random.random', ([], {'size': '(size,)'}), '(size=(size,))\n', (1788, 1802), True, 'import numpy as np\n'), ((1928, 1974), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0)', 'scale': '(1 / 3)', 'size': 'shp'}), '(loc=0, scale=1 / 3, size=shp)\n', (1944, 1974), True, 'import numpy as np\n'), ((2128, 2158), 'numpy.random.random', 'np.random.random', ([], {'size': '(size,)'}), '(size=(size,))\n', (2144, 2158), True, 'import numpy as np\n'), ((2284, 2338), 'numpy.random.normal', 'np.random.normal', ([], {'loc': 'speed', 'scale': '(speed / 3)', 'size': 'shp'}), '(loc=speed, scale=speed / 3, size=shp)\n', (2300, 2338), True, 'import numpy as np\n'), ((2386, 2442), 'numpy.clip', 'np.clip', (['persons[:, idx.speed]'], {'a_min': '(0.0005)', 'a_max': '(0.01)'}), '(persons[:, idx.speed], a_min=0.0005, a_max=0.01)\n', (2393, 2442), True, 'import numpy as np\n'), ((3560, 3610), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.5)', 'scale': '(0.5 / 3)', 'size': 'shp'}), '(loc=0.5, scale=0.5 / 3, size=shp)\n', (3576, 3610), True, 'import numpy as np\n'), ((4552, 4602), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.5)', 'scale': '(0.5 / 3)', 'size': 'shp'}), '(loc=0.5, scale=0.5 / 3, size=shp)\n', (4568, 4602), True, 'import numpy as np\n'), ((4055, 4105), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.5)', 'scale': '(0.5 / 3)', 'size': 'shp'}), '(loc=0.5, scale=0.5 / 3, size=shp)\n', (4071, 4105), True, 'import numpy as np\n'), ((5047, 5097), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.5)', 'scale': '(0.5 / 3)', 'size': 'shp'}), '(loc=0.5, scale=0.5 / 3, size=shp)\n', (5063, 5097), True, 'import numpy as np\n')] |
import numpy as np
import sympy as sp
from devitoboundary.symbolics.symbols import a, n, n_max
def standard_stencil(deriv, space_order, offset=0., as_float=True):
"""
Generate a stencil expression with standard weightings. Offset can be
applied to this stencil to evaluate at non-node positions.
Parameters
----------
deriv : int
The derivative order for the stencil
space_order : int
The space order of the discretization
offset : float
The offset at which the derivative is to be evaluated. In grid
increments. Default is 0.
as_float : bool
Convert stencil to np.float32. Default is True.
Returns
-------
stencil_expr : sympy.Add
The stencil expression
"""
# Want to start by calculating standard stencil expansions
min_index = -offset - space_order/2
x_list = [i + min_index for i in range(space_order+1)]
base_coeffs = sp.finite_diff_weights(deriv, x_list, 0)[-1][-1]
if as_float:
return np.array(base_coeffs, dtype=np.float32)
else:
return np.array(base_coeffs, dtype=object)
def generic_function(val, deriv=0):
"""
Returns specified derivative of a polynomial series. To be used in the place
of functions for specification of boundary conditions.
Parameters
----------
val : Sympy symbol
The variable of the function: x_b should be used.
deriv : int
The order of the derivative. Default is zero.
"""
x_poly = sp.symbols('x_poly')
polynomial = sp.Sum(a[n]*x_poly**n,
(n, 0, n_max))
return sp.diff(polynomial, x_poly, deriv).subs(x_poly, val)
def get_grid_offset(function, axis):
"""
For a function, return the grid offset for a specified axis.
Parameters
----------
function : devito Function
The function to get the offset of
axis : int
The axis for which offset should be recovered
"""
if function.is_Staggered:
stagger = function.staggered
if isinstance(stagger, tuple):
if function.space_dimensions[axis] in stagger:
return 0.5
elif -function.space_dimensions[axis] in stagger:
return -0.5
else:
if function.space_dimensions[axis] == stagger:
return 0.5
elif -function.space_dimensions[axis] == stagger:
return -0.5
return 0.
| [
"sympy.Sum",
"sympy.symbols",
"numpy.array",
"sympy.finite_diff_weights",
"sympy.diff"
] | [((1516, 1536), 'sympy.symbols', 'sp.symbols', (['"""x_poly"""'], {}), "('x_poly')\n", (1526, 1536), True, 'import sympy as sp\n'), ((1554, 1595), 'sympy.Sum', 'sp.Sum', (['(a[n] * x_poly ** n)', '(n, 0, n_max)'], {}), '(a[n] * x_poly ** n, (n, 0, n_max))\n', (1560, 1595), True, 'import sympy as sp\n'), ((1026, 1065), 'numpy.array', 'np.array', (['base_coeffs'], {'dtype': 'np.float32'}), '(base_coeffs, dtype=np.float32)\n', (1034, 1065), True, 'import numpy as np\n'), ((1091, 1126), 'numpy.array', 'np.array', (['base_coeffs'], {'dtype': 'object'}), '(base_coeffs, dtype=object)\n', (1099, 1126), True, 'import numpy as np\n'), ((944, 984), 'sympy.finite_diff_weights', 'sp.finite_diff_weights', (['deriv', 'x_list', '(0)'], {}), '(deriv, x_list, 0)\n', (966, 984), True, 'import sympy as sp\n'), ((1627, 1661), 'sympy.diff', 'sp.diff', (['polynomial', 'x_poly', 'deriv'], {}), '(polynomial, x_poly, deriv)\n', (1634, 1661), True, 'import sympy as sp\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[11]:
import pandas as pd
import numpy as np
import glob,os
from glob import iglob
#import scanpy as sc
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import RocCurveDisplay
from sklearn.datasets import load_wine
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
import os
import matplotlib as mpl
#os.environ["KMP_DUPLICATE_LIB_OK"] = "FALSE"
import time
import random
mpl.rcParams['pdf.fonttype']=42
mpl.rcParams['ps.fonttype']=42
# # single cell sle part
# In[2]:
features=pd.read_csv('./combined_gene_for_machine_learning.csv',index_col=1)
features=np.append(features.index.values,'patient')
features=np.delete(features,[3,7,16,17,18,76,78,79])
# In[3]:
path = '../GSE135779_SLE/test/aHD/'
file = glob.glob(os.path.join(path, "*.csv.gz"))
hd = []
for f in file:
hd.append(pd.read_csv(f,index_col=0).loc[features[1:80],:].T)
for i in range(len(hd)):
hd[i]['patient']=0
# In[4]:
path = '../GSE135779_SLE/test/cHD/'
file = glob.glob(os.path.join(path, "*.csv.gz"))
chd = []
for f in file:
chd.append(pd.read_csv(f,index_col=0).loc[features[1:80],:].T)
for i in range(len(chd)):
chd[i]['patient']=0
# In[5]:
hd_m=hd[0]
for i in range(1,len(hd)):
hd_m=pd.concat([hd_m,hd[i]],axis=0)
# In[6]:
chd_m=chd[0]
for i in range(1,len(chd)):
chd_m=pd.concat([chd_m,chd[i]],axis=0)
# In[7]:
path = '../GSE135779_SLE/test/aSLE/'
file = glob.glob(os.path.join(path, "*.csv.gz"))
asle = []
for f in file:
asle.append(pd.read_csv(f,index_col=0).loc[features[1:80],:].T)
for i in range(len(asle)):
asle[i]['patient']=1
asle_m=asle[0]
for i in range(1,len(asle)):
asle_m=pd.concat([asle_m,asle[i]],axis=0)
# In[8]:
path = '../GSE135779_SLE/test/cSLE/'
file = glob.glob(os.path.join(path, "*.csv.gz"))
csle = []
for f in file:
csle.append(pd.read_csv(f,index_col=0).loc[features[1:80],:].T)
for i in range(len(csle)):
csle[i]['patient']=1
csle_m=csle[0]
for i in range(1,len(csle)):
csle_m=pd.concat([csle_m,csle[i]],axis=0)
# In[9]:
df=pd.concat([hd_m,chd_m,asle_m,csle_m],axis=0)
# In[20]:
scorel = []
for i in range(185,205):
rfc = RandomForestClassifier(n_estimators=i+1,n_jobs=-1,random_state=0)
score = cross_val_score(rfc,data,label,cv=10).mean()
scorel.append(score)
print(max(scorel),([*range(185,200)][scorel.index(max(scorel))]))
plt.figure(figsize=[20,5])
plt.plot(range(185,205),scorel)
plt.show()
# In[19]:
data=df.drop(columns=['patient'])
start=time.time()
scorel = []
for i in range(0,200,10): # 迭代建立包含0-200棵决策树的RF模型进行对比
rfc = RandomForestClassifier(n_estimators=i+1,n_jobs=-1,random_state=0)
score = cross_val_score(rfc,data,label,cv=10).mean()
scorel.append(score)
print(max(scorel),(scorel.index(max(scorel))*10)+1)
end=time.time()
print('Running time: %s Seconds'%(end-start))
plt.figure(figsize=[20,5])
plt.plot(range(1,201,10),scorel)
plt.show()
# In[14]:
label=df.patient.values
Xtrain, Xtest, Ytrain, Ytest = train_test_split(df.drop(columns=['patient']),label,test_size=0.3)
rfc = RandomForestClassifier(random_state=0,class_weight='balanced',n_estimators=199)
rfc = rfc.fit(Xtrain,Ytrain)
score_r = rfc.score(Xtest,Ytest)
c=pd.DataFrame(rfc.feature_importances_)
#a.index=df.columns.values
print("Random Forest:{}".format(score_r))
fig = plt.figure(figsize=(8, 8))
ax = plt.gca()
rfc_disp = RocCurveDisplay.from_estimator(rfc, Xtest, Ytest, ax=ax, alpha=0.8)
plt.legend(loc=4,prop={'size': 10})
plt.xlabel('False Positive Rate', fontsize=18)
plt.ylabel('True Positive Rate', fontsize=16)
ax.plot([0, 1], [0, 1], ls="--", c=".3")
#plt.savefig('./figure6_and_7/auc_result_of_sc_sle_model.pdf',width=4,height=5)
# # reproduction of Figure6A
# In[16]:
pre_auc=pd.DataFrame(skf.split(df_n,label))
test_index=pre_auc.iloc[1,1]
train_index=pre_auc.iloc[1,0]
fig = plt.figure(figsize=(8, 8))
ax = plt.gca()
rfc = rfc.fit(df_n.iloc[train_index],label[train_index])
rfc_disp = RocCurveDisplay.from_estimator(rfc,df_n.iloc[test_index] ,label[test_index],
ax=ax, alpha=0.8)
plt.legend(loc=4,prop={'size': 10})
plt.xlabel('False Positive Rate', fontsize=18)
plt.ylabel('True Positive Rate', fontsize=16)
ax.plot([0, 1], [0, 1], ls="--", c=".3")
#plt.savefig('./figure6_and_7/auc_result_of_sc_sle_model.pdf',width=4,height=5)
# In[ ]:
df_n=df.drop(columns=['patient'])
rfc_l = []
fpr_l=[]
tpr_l=[]
acc_l=[]
skf =StratifiedKFold(n_splits=10)
#for i in range(1000):
for train_index, test_index in skf.split(df_n,label):
rfc = RandomForestClassifier(random_state=0,class_weight="balanced",oob_score=True)
rfc = rfc.fit(df_n.iloc[train_index],label[train_index])
rfc_l.append(roc_auc_score(label[test_index], rfc.predict_proba(df_n.iloc[test_index])[:, 1]))
#fpr_l.append(metrics.roc_curve(label[test_index],rfc.predict(df_n.iloc[test_index]), pos_label=1))
acc_l.append(accuracy_score(label[test_index], rfc.predict(df_n.iloc[test_index])))
print(np.mean(acc_l))
print(np.std(acc_l))
# In[ ]:
print(cross_val_score(rfc,df.drop(columns=['patient']),label,cv=10).mean())
print(cross_val_score(rfc,df.drop(columns=['patient']),label,cv=10).var())
print(np.mean(rfc_l))
print(np.std(rfc_l))
# In[13]:
print(np.mean(rfc_l))
print(np.std(rfc_l))
# In[43]:
c['feature_importance']=features[1:80]
fig, ax = plt.subplots(figsize=(15, 5))
ax.bar(x=c['feature_importance'], height=c[0])
ax.set_title("Feature importance for SLE single cell model", fontsize=15)
plt.xticks(rotation = 90)
plt.savefig('./figure6_and_7/sc_model_sle.pdf',width=15,height=5)
| [
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"sklearn.model_selection.StratifiedKFold",
"numpy.mean",
"numpy.delete",
"matplotlib.pyplot.xlabel",
"pandas.DataFrame",
"sklearn.model_selection.cross_val_score",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.gca",
"... | [((1084, 1152), 'pandas.read_csv', 'pd.read_csv', (['"""./combined_gene_for_machine_learning.csv"""'], {'index_col': '(1)'}), "('./combined_gene_for_machine_learning.csv', index_col=1)\n", (1095, 1152), True, 'import pandas as pd\n'), ((1161, 1204), 'numpy.append', 'np.append', (['features.index.values', '"""patient"""'], {}), "(features.index.values, 'patient')\n", (1170, 1204), True, 'import numpy as np\n'), ((1213, 1264), 'numpy.delete', 'np.delete', (['features', '[3, 7, 16, 17, 18, 76, 78, 79]'], {}), '(features, [3, 7, 16, 17, 18, 76, 78, 79])\n', (1222, 1264), True, 'import numpy as np\n'), ((2605, 2653), 'pandas.concat', 'pd.concat', (['[hd_m, chd_m, asle_m, csle_m]'], {'axis': '(0)'}), '([hd_m, chd_m, asle_m, csle_m], axis=0)\n', (2614, 2653), True, 'import pandas as pd\n'), ((2919, 2946), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[20, 5]'}), '(figsize=[20, 5])\n', (2929, 2946), True, 'import matplotlib.pyplot as plt\n'), ((2978, 2988), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2986, 2988), True, 'import matplotlib.pyplot as plt\n'), ((3043, 3054), 'time.time', 'time.time', ([], {}), '()\n', (3052, 3054), False, 'import time\n'), ((3328, 3339), 'time.time', 'time.time', ([], {}), '()\n', (3337, 3339), False, 'import time\n'), ((3386, 3413), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[20, 5]'}), '(figsize=[20, 5])\n', (3396, 3413), True, 'import matplotlib.pyplot as plt\n'), ((3446, 3456), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3454, 3456), True, 'import matplotlib.pyplot as plt\n'), ((3599, 3684), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'random_state': '(0)', 'class_weight': '"""balanced"""', 'n_estimators': '(199)'}), "(random_state=0, class_weight='balanced',\n n_estimators=199)\n", (3621, 3684), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((3743, 3781), 'pandas.DataFrame', 'pd.DataFrame', (['rfc.feature_importances_'], {}), '(rfc.feature_importances_)\n', (3755, 3781), True, 'import pandas as pd\n'), ((3857, 3883), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (3867, 3883), True, 'import matplotlib.pyplot as plt\n'), ((3889, 3898), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3896, 3898), True, 'import matplotlib.pyplot as plt\n'), ((3910, 3977), 'sklearn.metrics.RocCurveDisplay.from_estimator', 'RocCurveDisplay.from_estimator', (['rfc', 'Xtest', 'Ytest'], {'ax': 'ax', 'alpha': '(0.8)'}), '(rfc, Xtest, Ytest, ax=ax, alpha=0.8)\n', (3940, 3977), False, 'from sklearn.metrics import RocCurveDisplay\n'), ((3978, 4014), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(4)', 'prop': "{'size': 10}"}), "(loc=4, prop={'size': 10})\n", (3988, 4014), True, 'import matplotlib.pyplot as plt\n'), ((4014, 4060), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {'fontsize': '(18)'}), "('False Positive Rate', fontsize=18)\n", (4024, 4060), True, 'import matplotlib.pyplot as plt\n'), ((4061, 4106), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {'fontsize': '(16)'}), "('True Positive Rate', fontsize=16)\n", (4071, 4106), True, 'import matplotlib.pyplot as plt\n'), ((4381, 4407), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (4391, 4407), True, 'import matplotlib.pyplot as plt\n'), ((4413, 4422), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4420, 4422), True, 'import matplotlib.pyplot as plt\n'), ((4491, 4591), 'sklearn.metrics.RocCurveDisplay.from_estimator', 'RocCurveDisplay.from_estimator', (['rfc', 'df_n.iloc[test_index]', 'label[test_index]'], {'ax': 'ax', 'alpha': '(0.8)'}), '(rfc, df_n.iloc[test_index], label[test_index\n ], ax=ax, alpha=0.8)\n', (4521, 4591), False, 'from sklearn.metrics import RocCurveDisplay\n'), ((4587, 4623), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(4)', 'prop': "{'size': 10}"}), "(loc=4, prop={'size': 10})\n", (4597, 4623), True, 'import matplotlib.pyplot as plt\n'), ((4623, 4669), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {'fontsize': '(18)'}), "('False Positive Rate', fontsize=18)\n", (4633, 4669), True, 'import matplotlib.pyplot as plt\n'), ((4670, 4715), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {'fontsize': '(16)'}), "('True Positive Rate', fontsize=16)\n", (4680, 4715), True, 'import matplotlib.pyplot as plt\n'), ((4927, 4955), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': '(10)'}), '(n_splits=10)\n', (4942, 4955), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((5858, 5887), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(15, 5)'}), '(figsize=(15, 5))\n', (5870, 5887), True, 'import matplotlib.pyplot as plt\n'), ((6009, 6032), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (6019, 6032), True, 'import matplotlib.pyplot as plt\n'), ((6035, 6102), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./figure6_and_7/sc_model_sle.pdf"""'], {'width': '(15)', 'height': '(5)'}), "('./figure6_and_7/sc_model_sle.pdf', width=15, height=5)\n", (6046, 6102), True, 'import matplotlib.pyplot as plt\n'), ((1323, 1353), 'os.path.join', 'os.path.join', (['path', '"""*.csv.gz"""'], {}), "(path, '*.csv.gz')\n", (1335, 1353), False, 'import os\n'), ((1558, 1588), 'os.path.join', 'os.path.join', (['path', '"""*.csv.gz"""'], {}), "(path, '*.csv.gz')\n", (1570, 1588), False, 'import os\n'), ((1792, 1824), 'pandas.concat', 'pd.concat', (['[hd_m, hd[i]]'], {'axis': '(0)'}), '([hd_m, hd[i]], axis=0)\n', (1801, 1824), True, 'import pandas as pd\n'), ((1888, 1922), 'pandas.concat', 'pd.concat', (['[chd_m, chd[i]]'], {'axis': '(0)'}), '([chd_m, chd[i]], axis=0)\n', (1897, 1922), True, 'import pandas as pd\n'), ((1988, 2018), 'os.path.join', 'os.path.join', (['path', '"""*.csv.gz"""'], {}), "(path, '*.csv.gz')\n", (2000, 2018), False, 'import os\n'), ((2220, 2256), 'pandas.concat', 'pd.concat', (['[asle_m, asle[i]]'], {'axis': '(0)'}), '([asle_m, asle[i]], axis=0)\n', (2229, 2256), True, 'import pandas as pd\n'), ((2322, 2352), 'os.path.join', 'os.path.join', (['path', '"""*.csv.gz"""'], {}), "(path, '*.csv.gz')\n", (2334, 2352), False, 'import os\n'), ((2554, 2590), 'pandas.concat', 'pd.concat', (['[csle_m, csle[i]]'], {'axis': '(0)'}), '([csle_m, csle[i]], axis=0)\n', (2563, 2590), True, 'import pandas as pd\n'), ((2709, 2778), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(i + 1)', 'n_jobs': '(-1)', 'random_state': '(0)'}), '(n_estimators=i + 1, n_jobs=-1, random_state=0)\n', (2731, 2778), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((3128, 3197), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(i + 1)', 'n_jobs': '(-1)', 'random_state': '(0)'}), '(n_estimators=i + 1, n_jobs=-1, random_state=0)\n', (3150, 3197), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((5043, 5122), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'random_state': '(0)', 'class_weight': '"""balanced"""', 'oob_score': '(True)'}), "(random_state=0, class_weight='balanced', oob_score=True)\n", (5065, 5122), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((5488, 5502), 'numpy.mean', 'np.mean', (['acc_l'], {}), '(acc_l)\n', (5495, 5502), True, 'import numpy as np\n'), ((5510, 5523), 'numpy.std', 'np.std', (['acc_l'], {}), '(acc_l)\n', (5516, 5523), True, 'import numpy as np\n'), ((5699, 5713), 'numpy.mean', 'np.mean', (['rfc_l'], {}), '(rfc_l)\n', (5706, 5713), True, 'import numpy as np\n'), ((5721, 5734), 'numpy.std', 'np.std', (['rfc_l'], {}), '(rfc_l)\n', (5727, 5734), True, 'import numpy as np\n'), ((5756, 5770), 'numpy.mean', 'np.mean', (['rfc_l'], {}), '(rfc_l)\n', (5763, 5770), True, 'import numpy as np\n'), ((5778, 5791), 'numpy.std', 'np.std', (['rfc_l'], {}), '(rfc_l)\n', (5784, 5791), True, 'import numpy as np\n'), ((2785, 2825), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['rfc', 'data', 'label'], {'cv': '(10)'}), '(rfc, data, label, cv=10)\n', (2800, 2825), False, 'from sklearn.model_selection import cross_val_score\n'), ((3204, 3244), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['rfc', 'data', 'label'], {'cv': '(10)'}), '(rfc, data, label, cv=10)\n', (3219, 3244), False, 'from sklearn.model_selection import cross_val_score\n'), ((1392, 1419), 'pandas.read_csv', 'pd.read_csv', (['f'], {'index_col': '(0)'}), '(f, index_col=0)\n', (1403, 1419), True, 'import pandas as pd\n'), ((1629, 1656), 'pandas.read_csv', 'pd.read_csv', (['f'], {'index_col': '(0)'}), '(f, index_col=0)\n', (1640, 1656), True, 'import pandas as pd\n'), ((2061, 2088), 'pandas.read_csv', 'pd.read_csv', (['f'], {'index_col': '(0)'}), '(f, index_col=0)\n', (2072, 2088), True, 'import pandas as pd\n'), ((2395, 2422), 'pandas.read_csv', 'pd.read_csv', (['f'], {'index_col': '(0)'}), '(f, index_col=0)\n', (2406, 2422), True, 'import pandas as pd\n')] |
import os
import cv2
import torch
import numpy as np
import torchvision
from tqdm import tqdm
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Args:
checkpoints_dir = '/path/to/checkpoints/dir'
# FOR BDD
dataroot = '/path/to/data/BDD100k/'
segmentation_network_name = 'deeplabv3_bdd'
dataset_mode = 'bdd'
save_dir_masks = '/path/to/dataset/BDD/bdd100k/seg/predicted_masks/val'
n_classes = 20
## FOR CELEBAMASK-HQ
#dataroot = '/path/to/CelebAMask-HQ/'
#segmentation_network_name = 'deeplabv3_celebamhq'
#dataset_mode = 'celebamhq'
#save_dir_masks = '/path/to/dataset/CelebAMask-HQ/test/predicted_masks'
#n_classes = 19
seed = 42
batch_size = 8
no_flip = False
phase='train'
ckpt_iter='best'
num_epochs=50
pretrained = False
opt=Args()
def get_dataset_name(mode):
if mode == "bdd":
return "BDDDataset_for_deeplab"
if mode == "celebamhq":
return "CelebAMaskHQDataset_for_deeplab"
else:
ValueError("There is no such dataset regime as %s" % mode)
def get_dataloaders(opt):
dataset_name = get_dataset_name(opt.dataset_mode)
file = __import__("data."+dataset_name)
dataset_train = file.__dict__[dataset_name].__dict__[dataset_name](opt, for_metrics=False)
dataset_val = file.__dict__[dataset_name].__dict__[dataset_name](opt, for_metrics=True)
print("Created %s, size train: %d, size val: %d" % (dataset_name, len(dataset_train), len(dataset_val)))
dataloader_train = torch.utils.data.DataLoader(dataset_train, batch_size = opt.batch_size, shuffle = True, drop_last=True)
dataloader_val = torch.utils.data.DataLoader(dataset_val, batch_size = opt.batch_size, shuffle = False, drop_last=False)
return dataloader_train, dataloader_val
# Load validation data
_, dataloader_val = get_dataloaders(opt)
# Load trained deeplab model
deeplabv3 = torchvision.models.segmentation.deeplabv3_resnet101(pretrained=False, num_classes=opt.n_classes)
checkpoint = torch.load(os.path.join(opt.checkpoints_dir, opt.segmentation_network_name, 'checkpoint.tar'))
deeplabv3.load_state_dict(checkpoint['model_state_dict'])
start_epoch = checkpoint['epoch']
lowest_loss = checkpoint['loss']
print("Checkpoint has been correctly loaded. Starting from epoch", start_epoch, "with last val loss", lowest_loss)
deeplabv3.eval().cuda()
if not os.path.exists(opt.save_dir_masks):
os.mkdir(save_dir_masks)
# Forward validation data in the deeplabv3
for data in tqdm(dataloader_val):
inputs = data['image'].to(device)
pred = deeplabv3(inputs)['out']
pred_labels = pred.argmax(1)
paths = data['name']
for j in range(opt.batch_size):
mask = np.asarray(pred_labels[j].cpu())
mask = np.where(mask == 0, 256, mask)
mask -= 1
#print(mask)
cv2.imwrite(os.path.join(opt.save_dir_masks, paths[j].replace('jpg', 'png')), mask)
break
| [
"torchvision.models.segmentation.deeplabv3_resnet101",
"os.path.exists",
"numpy.where",
"tqdm.tqdm",
"os.path.join",
"torch.cuda.is_available",
"os.mkdir",
"torch.utils.data.DataLoader"
] | [((1929, 2029), 'torchvision.models.segmentation.deeplabv3_resnet101', 'torchvision.models.segmentation.deeplabv3_resnet101', ([], {'pretrained': '(False)', 'num_classes': 'opt.n_classes'}), '(pretrained=False,\n num_classes=opt.n_classes)\n', (1980, 2029), False, 'import torchvision\n'), ((2527, 2547), 'tqdm.tqdm', 'tqdm', (['dataloader_val'], {}), '(dataloader_val)\n', (2531, 2547), False, 'from tqdm import tqdm\n'), ((1548, 1651), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset_train'], {'batch_size': 'opt.batch_size', 'shuffle': '(True)', 'drop_last': '(True)'}), '(dataset_train, batch_size=opt.batch_size,\n shuffle=True, drop_last=True)\n', (1575, 1651), False, 'import torch\n'), ((1673, 1777), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset_val'], {'batch_size': 'opt.batch_size', 'shuffle': '(False)', 'drop_last': '(False)'}), '(dataset_val, batch_size=opt.batch_size, shuffle\n =False, drop_last=False)\n', (1700, 1777), False, 'import torch\n'), ((2050, 2136), 'os.path.join', 'os.path.join', (['opt.checkpoints_dir', 'opt.segmentation_network_name', '"""checkpoint.tar"""'], {}), "(opt.checkpoints_dir, opt.segmentation_network_name,\n 'checkpoint.tar')\n", (2062, 2136), False, 'import os\n'), ((2406, 2440), 'os.path.exists', 'os.path.exists', (['opt.save_dir_masks'], {}), '(opt.save_dir_masks)\n', (2420, 2440), False, 'import os\n'), ((2446, 2470), 'os.mkdir', 'os.mkdir', (['save_dir_masks'], {}), '(save_dir_masks)\n', (2454, 2470), False, 'import os\n'), ((131, 156), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (154, 156), False, 'import torch\n'), ((2784, 2814), 'numpy.where', 'np.where', (['(mask == 0)', '(256)', 'mask'], {}), '(mask == 0, 256, mask)\n', (2792, 2814), True, 'import numpy as np\n')] |
from numpy.core.fromnumeric import take
from gi.repository import Gtk, GLib, Gio
from matplotlib.backends.backend_gtk3agg import (
FigureCanvasGTK3Agg as FigureCanvas)
from matplotlib.figure import Figure
import numpy as np
import time
import threading
import serial
# from pyfirmata import Arduino, util
from stepper import StepperMotor
import glob
import sys
import gi
gi.require_version('Gtk', '3.0')
# TODO: Take constants into a separate file.
MAGNETOMETER = "Magnetometer"
ACCELEROMETER = "Accelerometer"
GYROSCOPE = "Gyroscope"
class AppWindow(Gtk.ApplicationWindow):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.timer = None
self.set_border_width(10)
hpaned = Gtk.Paned.new(Gtk.Orientation.HORIZONTAL)
vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=5)
# Label Serial Port
serial_port_label = Gtk.Label.new("Serial Port:")
vbox.pack_start(serial_port_label, False, True, 0)
# Combobox Serial Port
ports = self.getSerialPorts()
port_combobox = Gtk.ComboBoxText()
port_combobox.set_entry_text_column(0)
port_combobox.connect("changed", self.on_port_change)
for port in ports:
port_combobox.append_text(str(port))
port_combobox.set_active(0)
self.port = str(port_combobox.get_active_text())
vbox.pack_start(port_combobox, False, False, 0)
# Label Samples
samples_label = Gtk.Label.new("Samples: ")
vbox.pack_start(samples_label, False, False, 0)
# Spinbox samples
samples_spin = Gtk.SpinButton.new_with_range(1, 1000, 10)
samples_spin.set_digits(0)
samples_spin.connect("value-changed", self.on_samples_changed)
vbox.pack_start(samples_spin, False, False, 0)
# Label Sensor Reading
serial_port_label = Gtk.Label.new("MPU sensor to be read:")
vbox.pack_start(serial_port_label, False, True, 0)
# Combobox Serial Port
sensor_options = [ACCELEROMETER, GYROSCOPE, MAGNETOMETER] # MPU options
sensor_combobox = Gtk.ComboBoxText()
sensor_combobox.set_entry_text_column(0)
sensor_combobox.connect("changed", self.on_sensor_option_change)
for option in sensor_options:
sensor_combobox.append_text(str(option))
sensor_combobox.set_active(2)
vbox.pack_start(sensor_combobox, False, False, 0)
# Button Start
self.start_button = Gtk.Button.new_with_label("Start")
self.start_button.connect("clicked", self.on_button_start)
vbox.pack_start(self.start_button, False, False, 0)
# Button Stop
self.stop_button = Gtk.Button.new_with_label("Stop")
self.stop_button.connect("clicked", self.on_button_stop)
vbox.pack_start(self.stop_button, False, False, 0)
# Button Save
self.save_button = Gtk.Button.new_with_label("Save")
self.save_button.connect("clicked", self.on_button_save)
vbox.pack_start(self.save_button, False, False, 0)
# Button Calibration
self.stepper_motor_button = Gtk.Button.new_with_label("Stepper Routine")
self.stepper_motor_button.connect("clicked", self.on_button_calibrate)
vbox.pack_start(self.stepper_motor_button, False, False, 0)
# Button Calibration
self.calibrate_button = Gtk.Button.new_with_label("Calibrate")
self.calibrate_button.connect("clicked", self.on_button_calibrate)
vbox.pack_start(self.calibrate_button, False, False, 0)
hpaned.add1(vbox)
# App vars initialization
self.current_sensor = str(sensor_combobox.get_active_text())
self.logic_level = 5.0
# self.baud_rate = 9600
self.baud_rate = 115200
self.board_resolution = 1023
self.samples = 0
self.micro_board = None
self.time_interval = 0.050 # seconds (s)
self.values = []
# Example sine wave plot on init
self.fig = Figure(figsize=(5, 4), dpi=100)
self.ax = self.fig.add_subplot(111)
self.x = np.arange(0.0, 3.0, 0.015)
self.y = ((self.logic_level / 2) + (self.logic_level/2)) * \
np.sin(2*np.pi*self.x)
self.ax.plot(self.x, self.y, 'C1o--')
self.ax.set_xlabel("Time (s)")
self.ax.set_ylabel("Voltage (V)")
self.ax.grid(visible=True)
self.ax.set_title(f"Sample Graph")
# Add Graph to Canvas
self.canvas = FigureCanvas(self.fig)
self.canvas.set_size_request(300, 250)
hpaned.add2(self.canvas)
self.add(hpaned)
self.set_size_request(800, 600)
self.show_all()
def draw(self, x, y):
self.ax.clear()
self.ax.plot(x, y, 'C1o--')
self.ax.set_xlabel("x")
self.ax.set_ylabel("y")
self.ax.grid(visible=True)
self.ax.set_title(f"{self.current_sensor} reading.")
self.canvas.draw()
""" draw_magnetometer()
Receives a numpy list X and Y to graph the elipsis read by the MPU Magnetometer
on the X and Y axis.
"""
def draw_magnetometer(self, x, y):
self.ax.clear()
self.ax.plot(x, y, 'C1o--')
self.ax.set_xlabel("x")
self.ax.set_ylabel("y")
self.ax.grid(visible=True)
self.ax.set_title(f"Magnetometer reading.")
for i in range(x.size):
xitem = x[i]
yitem = y[i]
#etiqueta = "{:.1f}".format(xitem)
etiqueta = str(i)
self.ax.annotate(etiqueta, (xitem,yitem), textcoords="offset points",xytext=(0,10),ha="center")
# self.ax.set_xlim([-50, 50])
# self.ax.set_ylim([-50, 50])
self.canvas.draw()
""" draw_calibrated_magnetometer()
Receives a numpy list X and Y to graph the elipsis read by the MPU Magnetometer
on the X and Y axis.
"""
def draw_calibrated_magnetometer(self, x, y, mx ,my):
self.ax.clear()
self.ax.plot(x, y, 'C1o--')
self.ax.plot(mx, my, 'C2o--')
self.ax.set_xlabel("x")
self.ax.set_ylabel("y")
self.ax.grid(visible=True)
self.ax.set_title("Magnetometer calibration")
self.canvas.draw()
""" getSerialPorts()
Explore serial ports available and reuturn a list of string names.
Works both on Windows and Linux.
"""
def getSerialPorts(self) -> list:
if sys.platform.startswith('win'):
ports = ['COM%s' % (i + 1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# this excludes your current terminal "/dev/tty"
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
result = []
for port in ports:
try:
s = serial.Serial(port, 9600)
s.close()
result.append(port)
except:
pass
return result
""" on_port_change()
Updates the serial port when the combobox changes.
"""
def on_port_change(self, combo):
available_port = str(combo.get_active_text())
if available_port != None:
self.port = available_port
else:
self.port = None
self.on_no_port_available(self)
""" on_no_port_available()
Shows an pop up window with an error message when no serial port is found.
"""
def on_no_port_available(self, widget):
port_dialog = Gtk.MessageDialog(transient_for=self,
flags=0,
message_type=Gtk.MessageType.ERROR,
buttons=Gtk.ButtonsType.OK,
text="No serial port available",
title="Serial Port")
port_dialog.run()
port_dialog.destroy()
""" on_samples_changed()
Updates the amount of samples.
"""
def on_samples_changed(self, samples_spin):
self.samples = samples_spin.get_value_as_int()
def on_sensor_option_change(self, combo):
self.current_sensor = str(combo.get_active_text())
"""on_button_start()
Start button starts a async thread that executes the get_time()
method to read from the arduino board.
"""
def on_button_start(self, widget):
print("Start")
self.stepper_routine_thread = threading.Thread(target=self.stepper_routine)
self.stepper_routine_thread.daemon = True
self.timer = threading.Thread(target=self.get_time)
self.event = threading.Event()
self.timer.daemon = True
self.stepper_routine_thread.start()
self.timer.start()
def stepper_routine(self):
stepper = StepperMotor()
stepper.routine()
"""get_time()
This method reads the serial port from the arduino. It stores data in a Numpy
array t for time, and v for value read.
"""
def get_time(self):
time_value = value = count = 0
self.x = np.array([])
self.y = np.array([])
self.start_button.hide()
self.save_button.hide()
self.stop_button.show()
self.calibrate_button.hide()
take_data = False
if self.micro_board != None:
print("Closing board before init")
self.micro_board.close()
# Initialiaze Serial Connection if there's a valid Serial port selected
if self.port != None:
try:
print("Opening Serial Comm on port:", self.port)
# Serial initialization
self.micro_board = serial.Serial(
str(self.port), self.baud_rate, timeout=1)
time.sleep(1)
# Reset Buffer
self.micro_board.reset_input_buffer()
# Reading flag set to tue
take_data = True
except:
if not self.event.is_set():
print("Stop")
# Stop thread
self.event.set()
self.timer = None
GLib.idle_add(self.on_failed_connection)
take_data = False
else:
print("No serial port available. Restart.")
# Serial port reading when reading flag is true.
if take_data:
if time_value == 0:
if self.current_sensor == MAGNETOMETER:
# stepper = StepperMotor()
# stepper.routine()
print("X (mT) \t Y (mT) \t Magnetometer")
elif self.current_sensor == ACCELEROMETER:
print("X (mss) \t Y (mss) \t Accelerometer")
elif self.current_sensor == GYROSCOPE:
print("X (rad) \t Y (rad) \t Gyroscope")
else:
print("X () \t Y ()")
while not self.event.is_set():
# Stop when we get to the samples amount limit.
if count >= self.samples:
print("Sampling completed - Stoping...")
# Stop thread
self.event.set()
# Reset timer
self.timer = None
# Close Serial connection
if self.micro_board != None:
self.micro_board.reset_input_buffer()
self.micro_board.close()
break
try:
# Read serial port and decode.
temp = str(self.micro_board.readline().decode('cp437'))
temp = temp.replace("\n", "")
mpu_reading = temp.split(",")
# Append reading into app graph vars
if self.current_sensor == MAGNETOMETER:
# XY Plane
print(mpu_reading[6], mpu_reading[7])
self.x = np.append(self.x, float(mpu_reading[6]))
self.y = np.append(self.y, float(mpu_reading[7]))
elif self.current_sensor == GYROSCOPE:
# XY Plane
print(mpu_reading[3], mpu_reading[4])
self.x = np.append(self.x, float(mpu_reading[4]))
self.y = np.append(self.y, float(mpu_reading[3]))
elif self.current_sensor == ACCELEROMETER:
# XY Plane
print(mpu_reading[0], mpu_reading[1])
self.x = np.append(self.x, float(mpu_reading[0]))
self.y = np.append(self.y, float(mpu_reading[1]))
except Exception as e:
print("Cannot make reading. //", e)
pass
# Reading delay
time.sleep(self.time_interval)
# Current sample count increase
count += 1
# Update time by our time interval
time_value += self.time_interval
time.sleep(0.5)
# Draw reading after completed sampling.
if self.current_sensor == MAGNETOMETER:
self.draw_magnetometer(self.x, self.y)
elif self.current_sensor == ACCELEROMETER:
self.draw(self.x, self.y)
elif self.current_sensor == GYROSCOPE:
self.draw(self.x, self.y)
# Show buttons adter sampling is completed
self.start_button.show()
self.save_button.show()
self.stop_button.hide()
if self.current_sensor == MAGNETOMETER:
self.calibrate_button.show()
else:
self.calibrate_button.hide()
""" on_faild_connection()
Shows an pop up window with an error message when the initilization connection with the board failed.
"""
def on_faild_connection(self):
print("Failed Connection")
failed_connection_dialog = Gtk.MessageDialog(transient_for=self,
flags=0,
message_type=Gtk.MessageType.ERROR,
text="Board communication error. No data will be taken",
title="Serial Error")
failed_connection_dialog.run()
failed_connection_dialog.destroy()
def on_button_stop(self, widget):
print("Stop Button")
self.event.set()
self.timer = None
if self.micro_board != None:
self.micro_board.reset_input_buffer()
self.micro_board.close()
def on_button_save(self, widget):
print("Save Button")
self.save_button.hide()
self.start_button.hide()
save_dialog = Gtk.FileChooserDialog(
title="Save file as...", parent=self, action=Gtk.FileChooserAction.SAVE)
save_dialog.add_buttons(Gtk.STOCK_CANCEL,
Gtk.ResponseType.CANCEL,
Gtk.STOCK_SAVE,
Gtk.ResponseType.OK)
filter_csv = Gtk.FileFilter()
filter_csv.add_pattern("*.CSV")
filter_csv.set_name("CSV")
save_dialog.add_filter(filter_csv)
response = save_dialog.run()
# self.values.append(str(time_value) +
# "," + "{0:.4f}".format(value))
if response == Gtk.ResponseType.OK:
filename = save_dialog.get_filename()
if not filename.endswith(".csv"):
filename += ".csv"
new_file = open(filename, 'w')
new_file.write("Time(s),Voltage(V)" + "\n")
for i in range(self.x.size):
# Write Magnetometer reading from memory
new_file.write("{0:.4f}".format(self.x[i]) + "," + "{0:.4f}".format(self.y[i]) + "\n")
# new_file.write(self.values[i] + "\n")
new_file.close()
save_dialog.destroy()
self.start_button.show()
self.save_button.show()
def on_button_calibrate(self, widget):
print("Calibrate button")
if not self.x[0] or not self.y[0]:
print("Unable to make calibration. No data or data corrupted.")
return
mx,my = self.getMagnetometerCalibrationValues(self.x, self.y)
self.draw_calibrated_magnetometer(self.x, self.y, mx, my)
def getMagnetometerCalibrationValues(self, x, y):
x_sf, y_sf, x_off, y_off = self.getMagnetometerCalibrationParameters(x, y)
print(f"x_sf = {x_sf}, y_sf = {y_sf}, x_off = {x_off}, y_off = {y_off}")
mx = np.array([])
my = np.array([])
for x_i, y_i in np.nditer([x, y]):
mx_i = x_sf * x_i + x_off
my_i = y_sf * y_i + y_off
mx = np.append(mx, mx_i)
my = np.append(my, my_i)
return mx, my
def getMagnetometerCalibrationParameters(self, x, y):
x_min = x.min()
x_max = x.max()
y_min = y.min()
y_max = y.max()
# Scale Factor
x_sf = (y_max - y_min) / (x_max - x_min)
y_sf = (x_max - x_min) / (y_max - y_min)
if x_sf <= 1:
x_sf = 1
if y_sf <= 1:
y_sf = 1
# Offset
x_off = ((x_max - x_min) / 2 - x_max) * x_sf
y_off = ((y_max - y_min) / 2 - y_max) * y_sf
return x_sf, y_sf, x_off, y_off
class Application(Gtk.Application):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.window = None
def do_activate(self):
if not self.window:
self.window = AppWindow(
application=self, title="Single Point Measurement - PyGtk")
self.window.show_all()
self.window.save_button.hide()
self.window.stop_button.hide()
self.window.calibrate_button.hide()
self.window.present()
def do_shutdown(self):
if self.window.micro_board != None:
try:
self.micro_board.close()
except:
pass
print("Byeee")
Gtk.Application.do_shutdown(self)
if self.window:
self.window.destroy()
if __name__ == "__main__":
app = Application()
app.run(sys.argv)
| [
"gi.repository.Gtk.FileFilter",
"stepper.StepperMotor",
"sys.platform.startswith",
"time.sleep",
"numpy.array",
"numpy.sin",
"gi.repository.Gtk.Button.new_with_label",
"numpy.arange",
"gi.repository.GLib.idle_add",
"gi.repository.Gtk.Paned.new",
"gi.repository.Gtk.Label.new",
"glob.glob",
"g... | [((375, 407), 'gi.require_version', 'gi.require_version', (['"""Gtk"""', '"""3.0"""'], {}), "('Gtk', '3.0')\n", (393, 407), False, 'import gi\n'), ((750, 791), 'gi.repository.Gtk.Paned.new', 'Gtk.Paned.new', (['Gtk.Orientation.HORIZONTAL'], {}), '(Gtk.Orientation.HORIZONTAL)\n', (763, 791), False, 'from gi.repository import Gtk, GLib, Gio\n'), ((807, 863), 'gi.repository.Gtk.Box', 'Gtk.Box', ([], {'orientation': 'Gtk.Orientation.VERTICAL', 'spacing': '(5)'}), '(orientation=Gtk.Orientation.VERTICAL, spacing=5)\n', (814, 863), False, 'from gi.repository import Gtk, GLib, Gio\n'), ((921, 950), 'gi.repository.Gtk.Label.new', 'Gtk.Label.new', (['"""Serial Port:"""'], {}), "('Serial Port:')\n", (934, 950), False, 'from gi.repository import Gtk, GLib, Gio\n'), ((1103, 1121), 'gi.repository.Gtk.ComboBoxText', 'Gtk.ComboBoxText', ([], {}), '()\n', (1119, 1121), False, 'from gi.repository import Gtk, GLib, Gio\n'), ((1505, 1531), 'gi.repository.Gtk.Label.new', 'Gtk.Label.new', (['"""Samples: """'], {}), "('Samples: ')\n", (1518, 1531), False, 'from gi.repository import Gtk, GLib, Gio\n'), ((1638, 1680), 'gi.repository.Gtk.SpinButton.new_with_range', 'Gtk.SpinButton.new_with_range', (['(1)', '(1000)', '(10)'], {}), '(1, 1000, 10)\n', (1667, 1680), False, 'from gi.repository import Gtk, GLib, Gio\n'), ((1902, 1941), 'gi.repository.Gtk.Label.new', 'Gtk.Label.new', (['"""MPU sensor to be read:"""'], {}), "('MPU sensor to be read:')\n", (1915, 1941), False, 'from gi.repository import Gtk, GLib, Gio\n'), ((2138, 2156), 'gi.repository.Gtk.ComboBoxText', 'Gtk.ComboBoxText', ([], {}), '()\n', (2154, 2156), False, 'from gi.repository import Gtk, GLib, Gio\n'), ((2517, 2551), 'gi.repository.Gtk.Button.new_with_label', 'Gtk.Button.new_with_label', (['"""Start"""'], {}), "('Start')\n", (2542, 2551), False, 'from gi.repository import Gtk, GLib, Gio\n'), ((2728, 2761), 'gi.repository.Gtk.Button.new_with_label', 'Gtk.Button.new_with_label', (['"""Stop"""'], {}), "('Stop')\n", (2753, 2761), False, 'from gi.repository import Gtk, GLib, Gio\n'), ((2935, 2968), 'gi.repository.Gtk.Button.new_with_label', 'Gtk.Button.new_with_label', (['"""Save"""'], {}), "('Save')\n", (2960, 2968), False, 'from gi.repository import Gtk, GLib, Gio\n'), ((3158, 3202), 'gi.repository.Gtk.Button.new_with_label', 'Gtk.Button.new_with_label', (['"""Stepper Routine"""'], {}), "('Stepper Routine')\n", (3183, 3202), False, 'from gi.repository import Gtk, GLib, Gio\n'), ((3411, 3449), 'gi.repository.Gtk.Button.new_with_label', 'Gtk.Button.new_with_label', (['"""Calibrate"""'], {}), "('Calibrate')\n", (3436, 3449), False, 'from gi.repository import Gtk, GLib, Gio\n'), ((4058, 4089), 'matplotlib.figure.Figure', 'Figure', ([], {'figsize': '(5, 4)', 'dpi': '(100)'}), '(figsize=(5, 4), dpi=100)\n', (4064, 4089), False, 'from matplotlib.figure import Figure\n'), ((4151, 4177), 'numpy.arange', 'np.arange', (['(0.0)', '(3.0)', '(0.015)'], {}), '(0.0, 3.0, 0.015)\n', (4160, 4177), True, 'import numpy as np\n'), ((4541, 4563), 'matplotlib.backends.backend_gtk3agg.FigureCanvasGTK3Agg', 'FigureCanvas', (['self.fig'], {}), '(self.fig)\n', (4553, 4563), True, 'from matplotlib.backends.backend_gtk3agg import FigureCanvasGTK3Agg as FigureCanvas\n'), ((6477, 6507), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win"""'], {}), "('win')\n", (6500, 6507), False, 'import sys\n'), ((7694, 7867), 'gi.repository.Gtk.MessageDialog', 'Gtk.MessageDialog', ([], {'transient_for': 'self', 'flags': '(0)', 'message_type': 'Gtk.MessageType.ERROR', 'buttons': 'Gtk.ButtonsType.OK', 'text': '"""No serial port available"""', 'title': '"""Serial Port"""'}), "(transient_for=self, flags=0, message_type=Gtk.MessageType\n .ERROR, buttons=Gtk.ButtonsType.OK, text='No serial port available',\n title='Serial Port')\n", (7711, 7867), False, 'from gi.repository import Gtk, GLib, Gio\n'), ((8646, 8691), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.stepper_routine'}), '(target=self.stepper_routine)\n', (8662, 8691), False, 'import threading\n'), ((8763, 8801), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.get_time'}), '(target=self.get_time)\n', (8779, 8801), False, 'import threading\n'), ((8823, 8840), 'threading.Event', 'threading.Event', ([], {}), '()\n', (8838, 8840), False, 'import threading\n'), ((8996, 9010), 'stepper.StepperMotor', 'StepperMotor', ([], {}), '()\n', (9008, 9010), False, 'from stepper import StepperMotor\n'), ((9272, 9284), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (9280, 9284), True, 'import numpy as np\n'), ((9302, 9314), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (9310, 9314), True, 'import numpy as np\n'), ((14276, 14447), 'gi.repository.Gtk.MessageDialog', 'Gtk.MessageDialog', ([], {'transient_for': 'self', 'flags': '(0)', 'message_type': 'Gtk.MessageType.ERROR', 'text': '"""Board communication error. No data will be taken"""', 'title': '"""Serial Error"""'}), "(transient_for=self, flags=0, message_type=Gtk.MessageType\n .ERROR, text='Board communication error. No data will be taken', title=\n 'Serial Error')\n", (14293, 14447), False, 'from gi.repository import Gtk, GLib, Gio\n'), ((15134, 15233), 'gi.repository.Gtk.FileChooserDialog', 'Gtk.FileChooserDialog', ([], {'title': '"""Save file as..."""', 'parent': 'self', 'action': 'Gtk.FileChooserAction.SAVE'}), "(title='Save file as...', parent=self, action=Gtk.\n FileChooserAction.SAVE)\n", (15155, 15233), False, 'from gi.repository import Gtk, GLib, Gio\n'), ((15471, 15487), 'gi.repository.Gtk.FileFilter', 'Gtk.FileFilter', ([], {}), '()\n', (15485, 15487), False, 'from gi.repository import Gtk, GLib, Gio\n'), ((17010, 17022), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (17018, 17022), True, 'import numpy as np\n'), ((17036, 17048), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (17044, 17048), True, 'import numpy as np\n'), ((17073, 17090), 'numpy.nditer', 'np.nditer', (['[x, y]'], {}), '([x, y])\n', (17082, 17090), True, 'import numpy as np\n'), ((18528, 18561), 'gi.repository.Gtk.Application.do_shutdown', 'Gtk.Application.do_shutdown', (['self'], {}), '(self)\n', (18555, 18561), False, 'from gi.repository import Gtk, GLib, Gio\n'), ((4259, 4285), 'numpy.sin', 'np.sin', (['(2 * np.pi * self.x)'], {}), '(2 * np.pi * self.x)\n', (4265, 4285), True, 'import numpy as np\n'), ((13335, 13350), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (13345, 13350), False, 'import time\n'), ((17185, 17204), 'numpy.append', 'np.append', (['mx', 'mx_i'], {}), '(mx, mx_i)\n', (17194, 17204), True, 'import numpy as np\n'), ((17222, 17241), 'numpy.append', 'np.append', (['my', 'my_i'], {}), '(my, my_i)\n', (17231, 17241), True, 'import numpy as np\n'), ((6582, 6614), 'sys.platform.startswith', 'sys.platform.startswith', (['"""linux"""'], {}), "('linux')\n", (6605, 6614), False, 'import sys\n'), ((6618, 6651), 'sys.platform.startswith', 'sys.platform.startswith', (['"""cygwin"""'], {}), "('cygwin')\n", (6641, 6651), False, 'import sys\n'), ((6734, 6764), 'glob.glob', 'glob.glob', (['"""/dev/tty[A-Za-z]*"""'], {}), "('/dev/tty[A-Za-z]*')\n", (6743, 6764), False, 'import glob\n'), ((6778, 6811), 'sys.platform.startswith', 'sys.platform.startswith', (['"""darwin"""'], {}), "('darwin')\n", (6801, 6811), False, 'import sys\n'), ((7015, 7040), 'serial.Serial', 'serial.Serial', (['port', '(9600)'], {}), '(port, 9600)\n', (7028, 7040), False, 'import serial\n'), ((9958, 9971), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (9968, 9971), False, 'import time\n'), ((13116, 13146), 'time.sleep', 'time.sleep', (['self.time_interval'], {}), '(self.time_interval)\n', (13126, 13146), False, 'import time\n'), ((6833, 6856), 'glob.glob', 'glob.glob', (['"""/dev/tty.*"""'], {}), "('/dev/tty.*')\n", (6842, 6856), False, 'import glob\n'), ((10355, 10395), 'gi.repository.GLib.idle_add', 'GLib.idle_add', (['self.on_failed_connection'], {}), '(self.on_failed_connection)\n', (10368, 10395), False, 'from gi.repository import Gtk, GLib, Gio\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
import os
import io
import time
import numpy as np
import scipy.io as sio
from scipy import sparse as sp
from scipy import spatial
import cPickle as pkl
import networkx as nx
import random
import math
from collections import Counter
import graph
def cost(a,b):
ep = 0.001
m = max(a,b) + ep
mi = min(a,b) + ep
return ((m/mi) - 1)
def cos_sim(node_vec, neb_vec):
cos_ = 1 - spatial.distance.cosine(node_vec, neb_vec)
return cos_
def create_degree(G):
print (" - Creating degree vectors...")
degrees = {}
degrees_sorted = set()
degree_permuted = np.zeros((len(G.keys()), ))
for v in G.keys():
degree = len(G[v])
degrees_sorted.add(degree)
degree_permuted[v] = degree
if(degree not in degrees):
degrees[degree] = {}
degrees[degree]['vertices'] = []
degrees[degree]['vertices'].append(v)
degrees_sorted = np.array(list(degrees_sorted),dtype='int')
#degree_permuted = degrees_sorted
degrees_sorted = np.sort(degrees_sorted)
l = len(degrees_sorted)
for index, degree in enumerate(degrees_sorted):
if(index > 0):
degrees[degree]['before'] = degrees_sorted[index - 1]
if(index < (l - 1)):
degrees[degree]['after'] = degrees_sorted[index + 1]
print ("- Degree vectors created.")
return degrees, degree_permuted
def verifyDegrees(degree_v_root,degree_a,degree_b):
if(degree_b == -1):
degree_now = degree_a
elif(degree_a == -1):
degree_now = degree_b
elif(abs(degree_b - degree_v_root) < abs(degree_a - degree_v_root)):
degree_now = degree_b
else:
degree_now = degree_a
return degree_now
def dump_to_disk(f, file_name):
with open(file_name + '.pkl', 'wb') as handle:
pkl.dump(f, handle, protocol = pkl.HIGHEST_PROTOCOL)
def load_pkl(file_name):
with open(file_name + '.pkl', 'rb') as handle:
val = pkl.load(handle)
return val
def sparse_to_tuple(sparse_mx):
if not sp.isspmatrix_coo(sparse_mx):
sparse_mx = sparse_mx.tocoo()
coords = np.vstack((sparse_mx.row, sparse_mx.col)).transpose()
values = sparse_mx.data
shape = sparse_mx.shape
return coords, values, shape
def parse_index_file(filename):
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def sample_mask(idx, l):
"""Create mask."""
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=np.bool)
def load_pdata(dataset_str):
if dataset_str != 'cora' and dataset_str != 'citeseer' and dataset_str != 'pubmed':
print ('Use datasets other than Planetoid, change load functions')
pass
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
objects = []
for i in xrange(len(names)):
objects.append(pkl.load(open("./data/ind.{}.{}".format(dataset_str, names[i]))))
x, y, tx, ty, allx, ally, graph = tuple(objects)
test_idx_reorder = parse_index_file("./data/ind.{}.test.index".format(dataset_str))
test_idx_range = np.sort(test_idx_reorder)
if dataset_str == 'citeseer':
test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range-min(test_idx_range), :] = tx
tx = tx_extended
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
ty_extended[test_idx_range-min(test_idx_range), :] = ty
ty = ty_extended
features = sp.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
labels = np.vstack((ally, ty))
labels[test_idx_reorder, :] = labels[test_idx_range, :]
idx_test = test_idx_range.tolist()
idx_train = range(len(y))
train_mask = sample_mask(idx_train, labels.shape[0])
test_mask = sample_mask(idx_test, labels.shape[0])
y_train = np.zeros(labels.shape)
y_test = np.zeros(labels.shape)
y_train[train_mask, :] = labels[train_mask, :]
y_test[test_mask, :] = labels[test_mask, :]
train_out = []
for i in idx_train:
ll = y_train[i].tolist()
ll = ll.index(1) + 1
train_out.append([i, ll])
train_out = np.array(train_out)
test_out = []
for i in idx_test:
ll = y_test[i].tolist()
ll = ll.index(1) + 1
test_out.append([i, ll])
test_out = np.array(test_out)
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
adj = adj - sp.dia_matrix((adj.diagonal()[np.newaxis, :], [0]), shape=adj.shape)
adj.eliminate_zeros()
# Check that diag is zero:
assert np.diag(adj.todense()).sum() == 0
adj_triu = sp.triu(adj)
adj_tuple = sparse_to_tuple(adj_triu)
edges = adj_tuple[0]
edges_all = sparse_to_tuple(adj)[0]
num_mask = int(np.floor(edges.shape[0] / 10.))
return graph, features, train_out, test_out
| [
"networkx.from_dict_of_lists",
"scipy.spatial.distance.cosine",
"cPickle.dump",
"scipy.sparse.vstack",
"numpy.sort",
"scipy.sparse.triu",
"numpy.floor",
"numpy.array",
"numpy.zeros",
"numpy.vstack",
"scipy.sparse.isspmatrix_coo",
"cPickle.load"
] | [((1112, 1135), 'numpy.sort', 'np.sort', (['degrees_sorted'], {}), '(degrees_sorted)\n', (1119, 1135), True, 'import numpy as np\n'), ((2524, 2535), 'numpy.zeros', 'np.zeros', (['l'], {}), '(l)\n', (2532, 2535), True, 'import numpy as np\n'), ((2567, 2596), 'numpy.array', 'np.array', (['mask'], {'dtype': 'np.bool'}), '(mask, dtype=np.bool)\n', (2575, 2596), True, 'import numpy as np\n'), ((3176, 3201), 'numpy.sort', 'np.sort', (['test_idx_reorder'], {}), '(test_idx_reorder)\n', (3183, 3201), True, 'import numpy as np\n'), ((3780, 3801), 'numpy.vstack', 'np.vstack', (['(ally, ty)'], {}), '((ally, ty))\n', (3789, 3801), True, 'import numpy as np\n'), ((4073, 4095), 'numpy.zeros', 'np.zeros', (['labels.shape'], {}), '(labels.shape)\n', (4081, 4095), True, 'import numpy as np\n'), ((4110, 4132), 'numpy.zeros', 'np.zeros', (['labels.shape'], {}), '(labels.shape)\n', (4118, 4132), True, 'import numpy as np\n'), ((4397, 4416), 'numpy.array', 'np.array', (['train_out'], {}), '(train_out)\n', (4405, 4416), True, 'import numpy as np\n'), ((4575, 4593), 'numpy.array', 'np.array', (['test_out'], {}), '(test_out)\n', (4583, 4593), True, 'import numpy as np\n'), ((4864, 4876), 'scipy.sparse.triu', 'sp.triu', (['adj'], {}), '(adj)\n', (4871, 4876), True, 'from scipy import sparse as sp\n'), ((543, 585), 'scipy.spatial.distance.cosine', 'spatial.distance.cosine', (['node_vec', 'neb_vec'], {}), '(node_vec, neb_vec)\n', (566, 585), False, 'from scipy import spatial\n'), ((1872, 1922), 'cPickle.dump', 'pkl.dump', (['f', 'handle'], {'protocol': 'pkl.HIGHEST_PROTOCOL'}), '(f, handle, protocol=pkl.HIGHEST_PROTOCOL)\n', (1880, 1922), True, 'import cPickle as pkl\n'), ((2011, 2027), 'cPickle.load', 'pkl.load', (['handle'], {}), '(handle)\n', (2019, 2027), True, 'import cPickle as pkl\n'), ((2088, 2116), 'scipy.sparse.isspmatrix_coo', 'sp.isspmatrix_coo', (['sparse_mx'], {}), '(sparse_mx)\n', (2105, 2116), True, 'from scipy import sparse as sp\n'), ((4625, 4653), 'networkx.from_dict_of_lists', 'nx.from_dict_of_lists', (['graph'], {}), '(graph)\n', (4646, 4653), True, 'import networkx as nx\n'), ((5009, 5040), 'numpy.floor', 'np.floor', (['(edges.shape[0] / 10.0)'], {}), '(edges.shape[0] / 10.0)\n', (5017, 5040), True, 'import numpy as np\n'), ((2171, 2212), 'numpy.vstack', 'np.vstack', (['(sparse_mx.row, sparse_mx.col)'], {}), '((sparse_mx.row, sparse_mx.col))\n', (2180, 2212), True, 'import numpy as np\n'), ((3669, 3690), 'scipy.sparse.vstack', 'sp.vstack', (['(allx, tx)'], {}), '((allx, tx))\n', (3678, 3690), True, 'from scipy import sparse as sp\n')] |
import unittest
import sys
import os.path
from os.path import exists, join
import json
import functools
import inspect
from utils_for_tests import SimpleCase, WS_DIR
try:
import tensorflow
TF_INSTALLED=True
if tensorflow.__version__.startswith('2.'):
TF_VERSION=2
else:
TF_VERSION=1
except ImportError:
TF_INSTALLED=False
try:
import numpy
NUMPY_INSTALLED=True
except ImportError:
NUMPY_INSTALLED=False
try:
import pandas
PANDAS_INSTALLED=True
except ImportError:
PANDAS_INSTALLED=False
from dataworkspaces.kits.wrapper_utils import NotSupportedError
def generator_from_arrays(x, y):
assert len(x)==len(y)
# keras expects the same number of dimensions, so, we reshape to add one more
old_shape = x[0].shape
new_shape = (1, old_shape[0], old_shape[1])
for i in range(len(y)):
yield(x[i].reshape(new_shape), y[i].reshape((1,1)))
class TestTensorflowKit(SimpleCase):
def setUp(self):
super().setUp()
if TF_INSTALLED:
import tensorflow as tf
self.sequential = tf.keras.Sequential
def tearDown(self):
super().tearDown()
if TF_INSTALLED:
import tensorflow as tf
tf.keras.Sequential = self.sequential
def _take_snapshot(self):
self._run_dws(['snapshot', 'S1'], cwd=WS_DIR)
@unittest.skipUnless(TF_INSTALLED, "SKIP: Tensorflow not available")
def test_wrapper_for_numpy(self):
"""This test follows the basic classification tutorial.
"""
import tensorflow as tf
import tensorflow.keras as keras
self._setup_initial_repo(git_resources='results', api_resources='fashion-mnist-data')
from dataworkspaces.kits.tensorflow import add_lineage_to_keras_model_class, CheckpointConfig
keras.Sequential = add_lineage_to_keras_model_class(keras.Sequential,
input_resource='fashion-mnist-data',
verbose=True,
workspace_dir=WS_DIR,
checkpoint_config=CheckpointConfig('fashion',
monitor='loss',
save_best_only=True))
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
train_images = train_images / 255.0
test_images = test_images / 255.0
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(train_images, train_labels, epochs=5)
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print("test accuracy: %s" % test_acc)
results_file = join(WS_DIR, 'results/results.json')
self.assertTrue(exists(results_file), "missing file %s" % results_file)
with open(results_file, 'r') as f:
data = json.load(f)
self.assertAlmostEqual(test_acc, data['metrics']['accuracy' if TF_VERSION==2 else 'acc'])
self.assertAlmostEqual(test_loss, data['metrics']['loss'])
self._take_snapshot()
@unittest.skipUnless(TF_INSTALLED, "SKIP: Tensorflow not available")
@unittest.skipUnless(NUMPY_INSTALLED, "SKIP: numpy not installed")
@unittest.skipUnless(PANDAS_INSTALLED, 'SKIP: pandas not available')
def test_wrapper_for_dataset(self):
"""This follows the csv tutorial (titanic data set)
"""
import tensorflow as tf
import pandas as pd
import numpy as np
self._setup_initial_repo(git_resources='results', api_resources='titanic-data')
TRAIN_DATA_URL = "https://storage.googleapis.com/tf-datasets/titanic/train.csv"
TEST_DATA_URL = "https://storage.googleapis.com/tf-datasets/titanic/eval.csv"
train_file_path = tf.keras.utils.get_file("train.csv", TRAIN_DATA_URL)
test_file_path = tf.keras.utils.get_file("eval.csv", TEST_DATA_URL)
LABEL_COLUMN = 'survived'
LABELS = [0, 1]
def get_dataset(file_path, **kwargs):
dataset = tf.data.experimental.make_csv_dataset(
file_path,
batch_size=5, # Artificially small to make examples easier to show.
label_name=LABEL_COLUMN,
na_value="?",
num_epochs=1,
ignore_errors=True,
**kwargs)
return dataset
raw_train_data = get_dataset(train_file_path)
raw_test_data = get_dataset(test_file_path)
SELECT_COLUMNS = ['survived', 'age', 'n_siblings_spouses', 'parch', 'fare']
DEFAULTS = [0, 0.0, 0.0, 0.0, 0.0]
temp_dataset = get_dataset(train_file_path,
select_columns=SELECT_COLUMNS,
column_defaults = DEFAULTS)
def pack(features, label):
return tf.stack(list(features.values()), axis=-1), label
packed_dataset = temp_dataset.map(pack)
class PackNumericFeatures(object):
def __init__(self, names):
self.names = names
def __call__(self, features, labels):
numeric_freatures = [features.pop(name) for name in self.names]
numeric_features = [tf.cast(feat, tf.float32) for feat in numeric_freatures]
numeric_features = tf.stack(numeric_features, axis=-1)
features['numeric'] = numeric_features
#print('features type: %s, labels type: %s' % (type(features), type(labels)))
return features, labels
NUMERIC_FEATURES = ['age','n_siblings_spouses','parch', 'fare']
packed_train_data = raw_train_data.map(PackNumericFeatures(NUMERIC_FEATURES))
packed_test_data = raw_test_data.map(
PackNumericFeatures(NUMERIC_FEATURES))
desc = pd.read_csv(train_file_path)[NUMERIC_FEATURES].describe()
MEAN = np.array(desc.T['mean'])
STD = np.array(desc.T['std'])
def normalize_numeric_data(data, mean, std):
# Center the data
return (data-mean)/std
normalizer = functools.partial(normalize_numeric_data, mean=MEAN, std=STD)
numeric_column = tf.feature_column.numeric_column('numeric', normalizer_fn=normalizer, shape=[len(NUMERIC_FEATURES)])
numeric_columns = [numeric_column]
numeric_layer = tf.keras.layers.DenseFeatures(numeric_columns)
CATEGORIES = {
'sex': ['male', 'female'],
'class' : ['First', 'Second', 'Third'],
'deck' : ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J'],
'embark_town' : ['Cherbourg', 'Southhampton', 'Queenstown'],
'alone' : ['y', 'n']
}
categorical_columns = []
for feature, vocab in CATEGORIES.items():
cat_col = tf.feature_column.categorical_column_with_vocabulary_list(
key=feature, vocabulary_list=vocab)
categorical_columns.append(tf.feature_column.indicator_column(cat_col))
categorical_layer = tf.keras.layers.DenseFeatures(categorical_columns)
preprocessing_layer = tf.keras.layers.DenseFeatures(categorical_columns+numeric_columns)
from dataworkspaces.kits.tensorflow import add_lineage_to_keras_model_class, CheckpointConfig
tf.keras.Sequential = add_lineage_to_keras_model_class(tf.keras.Sequential, input_resource='titanic-data',
workspace_dir=WS_DIR,
checkpoint_config=CheckpointConfig('fashion',
monitor='loss',
save_best_only=True),
verbose=True)
model = tf.keras.Sequential([
preprocessing_layer,
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid'),
])
model.compile(
loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
train_data = packed_train_data.shuffle(500)
test_data = packed_test_data
if TF_VERSION==1:
with self.assertRaises(NotSupportedError):
model.fit(train_data, epochs=20)
return # stop early, not supported in 1.x
else:
model.fit(train_data, epochs=20)
test_loss, test_accuracy = model.evaluate(test_data)
print('\n\nTest Loss {}, Test Accuracy {}'.format(test_loss, test_accuracy))
self.assertAlmostEqual(test_accuracy, 0.88, delta=0.2)
self.assertAlmostEqual(test_loss, 0.31, delta=0.3)
predictions = model.predict(test_data)
results_file = join(WS_DIR, 'results/results.json')
self.assertTrue(exists(results_file), "missing file %s" % results_file)
with open(results_file, 'r') as f:
data = json.load(f)
self.assertAlmostEqual(test_accuracy, data['metrics']['accuracy' if TF_VERSION==2 else 'acc'])
self.assertAlmostEqual(test_loss, data['metrics']['loss'])
self._take_snapshot()
@unittest.skipUnless(TF_INSTALLED, "SKIP: Tensorflow not available")
def test_wrapper_for_generators(self):
"""This test follows the basic classification tutorial, modified for using
the fit_generator() and eval_generator() methods.
"""
import tensorflow as tf
import tensorflow.keras as keras
self._setup_initial_repo(git_resources='results', api_resources='fashion-mnist-data')
from dataworkspaces.kits.tensorflow import add_lineage_to_keras_model_class, CheckpointConfig
keras.Sequential = add_lineage_to_keras_model_class(keras.Sequential,
input_resource='fashion-mnist-data',
verbose=True,
workspace_dir=WS_DIR,
checkpoint_config=CheckpointConfig('fashion',
monitor='loss',
save_best_only=True))
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
train_images = train_images / 255.0
test_images = test_images / 255.0
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
g = generator_from_arrays(train_images, train_labels)
self.assertTrue(inspect.isgenerator(g))
model.fit(g, epochs=5, steps_per_epoch=2)
g2 = generator_from_arrays(test_images, test_labels)
test_loss, test_acc = model.evaluate(g2, steps=len(test_labels), verbose=2)
print("test accuracy: %s" % test_acc)
results_file = join(WS_DIR, 'results/results.json')
self.assertTrue(exists(results_file), "missing file %s" % results_file)
with open(results_file, 'r') as f:
data = json.load(f)
self.assertAlmostEqual(test_acc, data['metrics']['accuracy' if TF_VERSION==2 else 'acc'])
self.assertAlmostEqual(test_loss, data['metrics']['loss'])
self._take_snapshot()
@unittest.skipUnless(TF_INSTALLED, "SKIP: Tensorflow not available")
def test_wrapper_for_keras_sequence(self):
"""This test follows the basic classification tutorial, modified for using
the fit_generator() and eval_generator() methods.
"""
import tensorflow as tf
import tensorflow.keras as keras
import tensorflow.keras.utils as kerasutils
class KSequence(kerasutils.Sequence):
def __init__(self, x, y):
assert len(x)==len(y)
self.x = x
self.y = y
old_shape = x[0].shape
self.new_shape = (1, old_shape[0], old_shape[1])
def __iter__(self):
return generator_from_arrays(self.x, self.y)
def __getitem__(self, idx):
return (self.x[idx].reshape(self.new_shape), self.y[idx].reshape((1,1)))
def __len__(self):
return len(self.y)
self._setup_initial_repo(git_resources='results', api_resources='fashion-mnist-data')
from dataworkspaces.kits.tensorflow import add_lineage_to_keras_model_class, CheckpointConfig
keras.Sequential = add_lineage_to_keras_model_class(keras.Sequential,
input_resource='fashion-mnist-data',
verbose=True,
workspace_dir=WS_DIR,
checkpoint_config=CheckpointConfig('fashion',
monitor='loss',
save_best_only=True))
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
train_images = train_images / 255.0
test_images = test_images / 255.0
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
g = KSequence(train_images, train_labels)
model.fit(g, epochs=5, steps_per_epoch=2)
g2 = KSequence(test_images, test_labels)
test_loss, test_acc = model.evaluate(g2, steps=len(test_labels), verbose=2)
print("test accuracy: %s" % test_acc)
results_file = join(WS_DIR, 'results/results.json')
self.assertTrue(exists(results_file), "missing file %s" % results_file)
with open(results_file, 'r') as f:
data = json.load(f)
self.assertAlmostEqual(test_acc, data['metrics']['accuracy' if TF_VERSION==2 else 'acc'])
self.assertAlmostEqual(test_loss, data['metrics']['loss'])
self._take_snapshot()
if __name__ == '__main__':
unittest.main()
| [
"pandas.read_csv",
"tensorflow.feature_column.indicator_column",
"numpy.array",
"tensorflow.keras.layers.Dense",
"unittest.main",
"tensorflow.cast",
"os.path.exists",
"tensorflow.keras.layers.DenseFeatures",
"inspect.isgenerator",
"tensorflow.stack",
"dataworkspaces.kits.tensorflow.CheckpointCon... | [((224, 263), 'tensorflow.__version__.startswith', 'tensorflow.__version__.startswith', (['"""2."""'], {}), "('2.')\n", (257, 263), False, 'import tensorflow\n'), ((1370, 1437), 'unittest.skipUnless', 'unittest.skipUnless', (['TF_INSTALLED', '"""SKIP: Tensorflow not available"""'], {}), "(TF_INSTALLED, 'SKIP: Tensorflow not available')\n", (1389, 1437), False, 'import unittest\n'), ((3680, 3747), 'unittest.skipUnless', 'unittest.skipUnless', (['TF_INSTALLED', '"""SKIP: Tensorflow not available"""'], {}), "(TF_INSTALLED, 'SKIP: Tensorflow not available')\n", (3699, 3747), False, 'import unittest\n'), ((3753, 3818), 'unittest.skipUnless', 'unittest.skipUnless', (['NUMPY_INSTALLED', '"""SKIP: numpy not installed"""'], {}), "(NUMPY_INSTALLED, 'SKIP: numpy not installed')\n", (3772, 3818), False, 'import unittest\n'), ((3824, 3891), 'unittest.skipUnless', 'unittest.skipUnless', (['PANDAS_INSTALLED', '"""SKIP: pandas not available"""'], {}), "(PANDAS_INSTALLED, 'SKIP: pandas not available')\n", (3843, 3891), False, 'import unittest\n'), ((9948, 10015), 'unittest.skipUnless', 'unittest.skipUnless', (['TF_INSTALLED', '"""SKIP: Tensorflow not available"""'], {}), "(TF_INSTALLED, 'SKIP: Tensorflow not available')\n", (9967, 10015), False, 'import unittest\n'), ((12507, 12574), 'unittest.skipUnless', 'unittest.skipUnless', (['TF_INSTALLED', '"""SKIP: Tensorflow not available"""'], {}), "(TF_INSTALLED, 'SKIP: Tensorflow not available')\n", (12526, 12574), False, 'import unittest\n'), ((15648, 15663), 'unittest.main', 'unittest.main', ([], {}), '()\n', (15661, 15663), False, 'import unittest\n'), ((3287, 3323), 'os.path.join', 'join', (['WS_DIR', '"""results/results.json"""'], {}), "(WS_DIR, 'results/results.json')\n", (3291, 3323), False, 'from os.path import exists, join\n'), ((4379, 4431), 'tensorflow.keras.utils.get_file', 'tf.keras.utils.get_file', (['"""train.csv"""', 'TRAIN_DATA_URL'], {}), "('train.csv', TRAIN_DATA_URL)\n", (4402, 4431), True, 'import tensorflow as tf\n'), ((4457, 4507), 'tensorflow.keras.utils.get_file', 'tf.keras.utils.get_file', (['"""eval.csv"""', 'TEST_DATA_URL'], {}), "('eval.csv', TEST_DATA_URL)\n", (4480, 4507), True, 'import tensorflow as tf\n'), ((6491, 6515), 'numpy.array', 'np.array', (["desc.T['mean']"], {}), "(desc.T['mean'])\n", (6499, 6515), True, 'import numpy as np\n'), ((6530, 6553), 'numpy.array', 'np.array', (["desc.T['std']"], {}), "(desc.T['std'])\n", (6538, 6553), True, 'import numpy as np\n'), ((6693, 6754), 'functools.partial', 'functools.partial', (['normalize_numeric_data'], {'mean': 'MEAN', 'std': 'STD'}), '(normalize_numeric_data, mean=MEAN, std=STD)\n', (6710, 6754), False, 'import functools\n'), ((6948, 6994), 'tensorflow.keras.layers.DenseFeatures', 'tf.keras.layers.DenseFeatures', (['numeric_columns'], {}), '(numeric_columns)\n', (6977, 6994), True, 'import tensorflow as tf\n'), ((7626, 7676), 'tensorflow.keras.layers.DenseFeatures', 'tf.keras.layers.DenseFeatures', (['categorical_columns'], {}), '(categorical_columns)\n', (7655, 7676), True, 'import tensorflow as tf\n'), ((7707, 7775), 'tensorflow.keras.layers.DenseFeatures', 'tf.keras.layers.DenseFeatures', (['(categorical_columns + numeric_columns)'], {}), '(categorical_columns + numeric_columns)\n', (7736, 7775), True, 'import tensorflow as tf\n'), ((9550, 9586), 'os.path.join', 'join', (['WS_DIR', '"""results/results.json"""'], {}), "(WS_DIR, 'results/results.json')\n", (9554, 9586), False, 'from os.path import exists, join\n'), ((12114, 12150), 'os.path.join', 'join', (['WS_DIR', '"""results/results.json"""'], {}), "(WS_DIR, 'results/results.json')\n", (12118, 12150), False, 'from os.path import exists, join\n'), ((15229, 15265), 'os.path.join', 'join', (['WS_DIR', '"""results/results.json"""'], {}), "(WS_DIR, 'results/results.json')\n", (15233, 15265), False, 'from os.path import exists, join\n'), ((3348, 3368), 'os.path.exists', 'exists', (['results_file'], {}), '(results_file)\n', (3354, 3368), False, 'from os.path import exists, join\n'), ((3466, 3478), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3475, 3478), False, 'import json\n'), ((4634, 4784), 'tensorflow.data.experimental.make_csv_dataset', 'tf.data.experimental.make_csv_dataset', (['file_path'], {'batch_size': '(5)', 'label_name': 'LABEL_COLUMN', 'na_value': '"""?"""', 'num_epochs': '(1)', 'ignore_errors': '(True)'}), "(file_path, batch_size=5, label_name=\n LABEL_COLUMN, na_value='?', num_epochs=1, ignore_errors=True, **kwargs)\n", (4671, 4784), True, 'import tensorflow as tf\n'), ((7403, 7500), 'tensorflow.feature_column.categorical_column_with_vocabulary_list', 'tf.feature_column.categorical_column_with_vocabulary_list', ([], {'key': 'feature', 'vocabulary_list': 'vocab'}), '(key=feature,\n vocabulary_list=vocab)\n', (7460, 7500), True, 'import tensorflow as tf\n'), ((9611, 9631), 'os.path.exists', 'exists', (['results_file'], {}), '(results_file)\n', (9617, 9631), False, 'from os.path import exists, join\n'), ((9729, 9741), 'json.load', 'json.load', (['f'], {}), '(f)\n', (9738, 9741), False, 'import json\n'), ((11826, 11848), 'inspect.isgenerator', 'inspect.isgenerator', (['g'], {}), '(g)\n', (11845, 11848), False, 'import inspect\n'), ((12175, 12195), 'os.path.exists', 'exists', (['results_file'], {}), '(results_file)\n', (12181, 12195), False, 'from os.path import exists, join\n'), ((12293, 12305), 'json.load', 'json.load', (['f'], {}), '(f)\n', (12302, 12305), False, 'import json\n'), ((15290, 15310), 'os.path.exists', 'exists', (['results_file'], {}), '(results_file)\n', (15296, 15310), False, 'from os.path import exists, join\n'), ((15408, 15420), 'json.load', 'json.load', (['f'], {}), '(f)\n', (15417, 15420), False, 'import json\n'), ((2230, 2294), 'dataworkspaces.kits.tensorflow.CheckpointConfig', 'CheckpointConfig', (['"""fashion"""'], {'monitor': '"""loss"""', 'save_best_only': '(True)'}), "('fashion', monitor='loss', save_best_only=True)\n", (2246, 2294), False, 'from dataworkspaces.kits.tensorflow import add_lineage_to_keras_model_class, CheckpointConfig\n'), ((2765, 2807), 'tensorflow.keras.layers.Flatten', 'keras.layers.Flatten', ([], {'input_shape': '(28, 28)'}), '(input_shape=(28, 28))\n', (2785, 2807), True, 'import tensorflow.keras as keras\n'), ((2821, 2863), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (2839, 2863), True, 'import tensorflow.keras as keras\n'), ((2877, 2921), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(10)'], {'activation': '"""softmax"""'}), "(10, activation='softmax')\n", (2895, 2921), True, 'import tensorflow.keras as keras\n'), ((5920, 5955), 'tensorflow.stack', 'tf.stack', (['numeric_features'], {'axis': '(-1)'}), '(numeric_features, axis=-1)\n', (5928, 5955), True, 'import tensorflow as tf\n'), ((7553, 7596), 'tensorflow.feature_column.indicator_column', 'tf.feature_column.indicator_column', (['cat_col'], {}), '(cat_col)\n', (7587, 7596), True, 'import tensorflow as tf\n'), ((8157, 8221), 'dataworkspaces.kits.tensorflow.CheckpointConfig', 'CheckpointConfig', (['"""fashion"""'], {'monitor': '"""loss"""', 'save_best_only': '(True)'}), "('fashion', monitor='loss', save_best_only=True)\n", (8173, 8221), False, 'from dataworkspaces.kits.tensorflow import add_lineage_to_keras_model_class, CheckpointConfig\n'), ((8576, 8621), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (8597, 8621), True, 'import tensorflow as tf\n'), ((8635, 8680), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (8656, 8680), True, 'import tensorflow as tf\n'), ((8694, 8740), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (8715, 8740), True, 'import tensorflow as tf\n'), ((10890, 10954), 'dataworkspaces.kits.tensorflow.CheckpointConfig', 'CheckpointConfig', (['"""fashion"""'], {'monitor': '"""loss"""', 'save_best_only': '(True)'}), "('fashion', monitor='loss', save_best_only=True)\n", (10906, 10954), False, 'from dataworkspaces.kits.tensorflow import add_lineage_to_keras_model_class, CheckpointConfig\n'), ((11426, 11468), 'tensorflow.keras.layers.Flatten', 'keras.layers.Flatten', ([], {'input_shape': '(28, 28)'}), '(input_shape=(28, 28))\n', (11446, 11468), True, 'import tensorflow.keras as keras\n'), ((11482, 11524), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (11500, 11524), True, 'import tensorflow.keras as keras\n'), ((11538, 11582), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(10)'], {'activation': '"""softmax"""'}), "(10, activation='softmax')\n", (11556, 11582), True, 'import tensorflow.keras as keras\n'), ((14077, 14141), 'dataworkspaces.kits.tensorflow.CheckpointConfig', 'CheckpointConfig', (['"""fashion"""'], {'monitor': '"""loss"""', 'save_best_only': '(True)'}), "('fashion', monitor='loss', save_best_only=True)\n", (14093, 14141), False, 'from dataworkspaces.kits.tensorflow import add_lineage_to_keras_model_class, CheckpointConfig\n'), ((14613, 14655), 'tensorflow.keras.layers.Flatten', 'keras.layers.Flatten', ([], {'input_shape': '(28, 28)'}), '(input_shape=(28, 28))\n', (14633, 14655), True, 'import tensorflow.keras as keras\n'), ((14669, 14711), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (14687, 14711), True, 'import tensorflow.keras as keras\n'), ((14725, 14769), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(10)'], {'activation': '"""softmax"""'}), "(10, activation='softmax')\n", (14743, 14769), True, 'import tensorflow.keras as keras\n'), ((5828, 5853), 'tensorflow.cast', 'tf.cast', (['feat', 'tf.float32'], {}), '(feat, tf.float32)\n', (5835, 5853), True, 'import tensorflow as tf\n'), ((6418, 6446), 'pandas.read_csv', 'pd.read_csv', (['train_file_path'], {}), '(train_file_path)\n', (6429, 6446), True, 'import pandas as pd\n')] |
import gensim
from gensim.utils import simple_preprocess
from gensim.parsing.preprocessing import STOPWORDS
from nltk.stem import WordNetLemmatizer, SnowballStemmer
from nltk.stem.porter import *
import numpy as np
np.random.seed(400)
import nltk
nltk.download('wordnet')
import pandas as pd
data = pd.read_csv('abcnews-date-text.csv', error_bad_lines=False);
# We only need the Headlines text column from the data
data_text = data[:300000][['headline_text']];
data_text['index'] = data_text.index
documents = data_text
processed_docs=np.load("processed_docs.npy",allow_pickle=True)
'''
Create a dictionary from 'processed_docs' containing the number of times a word appears
in the training set using gensim.corpora.Dictionary and call it 'dictionary'
'''
dictionary = gensim.corpora.Dictionary(processed_docs)
# apply dictionary.filter_extremes() with the parameters mentioned above
dictionary.filter_extremes(no_below=5, no_above=0.5, keep_n=100000)
'''
Create the Bag-of-words model for each document i.e for each document we create a dictionary reporting how many
words and how many times those words appear. Save this to 'bow_corpus'
'''
bow_corpus=[dictionary.doc2bow(doc) for doc in processed_docs]
'''
Create tf-idf model object using models.TfidfModel on 'bow_corpus' and save it to 'tfidf'
'''
from gensim import corpora, models
tfidf = models.TfidfModel(bow_corpus)
'''
Apply transformation to the entire corpus
'''
corpus_tfidf = tfidf[bow_corpus]
lda_model = gensim.models.LdaMulticore(bow_corpus,
num_topics = 10,
id2word = dictionary,
passes = 5)
lda_model_tfidf = gensim.models.LdaMulticore(corpus_tfidf,
num_topics = 10,
id2word = dictionary,
passes = 5)
document_num = 4310
# Our test document is document number 4310
for index, score in sorted(lda_model[bow_corpus[document_num]], key=lambda tup: -1*tup[1]):
print("\nScore: {}\t \nTopic: {}".format(score, lda_model.print_topic(index, 10)))
for index, score in sorted(lda_model_tfidf[bow_corpus[document_num]], key=lambda tup: -1*tup[1]):
print("\nScore: {}\t \nTopic: {}".format(score, lda_model_tfidf.print_topic(index, 10))) | [
"gensim.corpora.Dictionary",
"nltk.download",
"pandas.read_csv",
"gensim.models.LdaMulticore",
"numpy.random.seed",
"numpy.load",
"gensim.models.TfidfModel"
] | [((215, 234), 'numpy.random.seed', 'np.random.seed', (['(400)'], {}), '(400)\n', (229, 234), True, 'import numpy as np\n'), ((247, 271), 'nltk.download', 'nltk.download', (['"""wordnet"""'], {}), "('wordnet')\n", (260, 271), False, 'import nltk\n'), ((301, 360), 'pandas.read_csv', 'pd.read_csv', (['"""abcnews-date-text.csv"""'], {'error_bad_lines': '(False)'}), "('abcnews-date-text.csv', error_bad_lines=False)\n", (312, 360), True, 'import pandas as pd\n'), ((539, 587), 'numpy.load', 'np.load', (['"""processed_docs.npy"""'], {'allow_pickle': '(True)'}), "('processed_docs.npy', allow_pickle=True)\n", (546, 587), True, 'import numpy as np\n'), ((775, 816), 'gensim.corpora.Dictionary', 'gensim.corpora.Dictionary', (['processed_docs'], {}), '(processed_docs)\n', (800, 816), False, 'import gensim\n'), ((1357, 1386), 'gensim.models.TfidfModel', 'models.TfidfModel', (['bow_corpus'], {}), '(bow_corpus)\n', (1374, 1386), False, 'from gensim import corpora, models\n'), ((1485, 1572), 'gensim.models.LdaMulticore', 'gensim.models.LdaMulticore', (['bow_corpus'], {'num_topics': '(10)', 'id2word': 'dictionary', 'passes': '(5)'}), '(bow_corpus, num_topics=10, id2word=dictionary,\n passes=5)\n', (1511, 1572), False, 'import gensim\n'), ((1741, 1830), 'gensim.models.LdaMulticore', 'gensim.models.LdaMulticore', (['corpus_tfidf'], {'num_topics': '(10)', 'id2word': 'dictionary', 'passes': '(5)'}), '(corpus_tfidf, num_topics=10, id2word=dictionary,\n passes=5)\n', (1767, 1830), False, 'import gensim\n')] |
#!/usr/bin/env python3
#####################################
#
# Filename : test_Superimposer.py
#
# Projectname : diSTruct
#
# Author : <NAME>
#
# Creation Date : Fri 18 May 2018 06:28:53 PM CEST
#
# Last Modified : Fri 29 Mar 2019 04:12:22 PM CET
#
#####################################
import numpy as np
from pytest import approx
from distruct import config
from distruct import Superimposer
from distruct import Distructure
from distruct.tools.pdb import cull_atoms
testFilePath = config.data_path + "tests/"
def test_superimposer():
x = np.array([[51.65, -1.90, 50.07],
[50.40, -1.23, 50.65],
[50.68, -0.04, 51.54],
[50.22, -0.02, 52.85]], 'f')
y = np.array([[51.30, -2.99, 46.54],
[51.09, -1.88, 47.58],
[52.36, -1.20, 48.03],
[52.71, -1.18, 49.38]], 'f')
# x = np.array([[-1., 0., 5.],
# [1., 0., 5.]], 'f')
# y = np.array([[0., 1., 0.],
# [0., -1., 0.]], 'f')
sup = Superimposer()
sup.set_coords(x, y)
# TODO is this really that bad??
assert sup.rms == approx(0., abs=1e-2)
return
def test_superimposer_atoms():
from Bio.PDB.PDBParser import PDBParser
code = '1ptq'
fileName = testFilePath + code + '.pdb'
fixedS = PDBParser().get_structure(code, fileName)
movingS = PDBParser().get_structure(code, fileName)
# TODO transform moving
sup = Superimposer()
sup.set_atoms(list(fixedS.get_atoms()), list(movingS.get_atoms()))
assert sup.rms == approx(0.)
return
def test_superimposer_structure():
from Bio import SeqIO
from Bio.PDB import PDBParser
code = '1ptq'
fileName = testFilePath + code + '.pdb'
refStructure = PDBParser().get_structure(code, fileName)
sequences = []
with open(fileName, 'r') as f:
sequences = [r.seq for r in SeqIO.parse(f, "pdb-seqres")]
pass
ds = Distructure('test', sequences, [[r.get_id() for r in refStructure.get_residues() if r.get_id()[0] == ' ']])
ds.generate_primary_contacts()
ds.run()
refStructure = PDBParser().get_structure(code, fileName)
sup = Superimposer()
sup.set_structures(refStructure, ds)
return
def test_compare():
"""
Compare the result of the diSTruct superimposer to the biopython one.
"""
from Bio import SeqIO
from Bio.PDB import Superimposer as BPSuperimposer
from Bio.PDB import PDBParser
from distruct.tools.pdb import get_contacts
code = '1ptq'
fileName = testFilePath + code + '.pdb'
refStructure = PDBParser().get_structure(code, fileName)
contacts = get_contacts(refStructure[0], cutOff=5., minSeqDist=0)
sequences = []
with open(fileName, 'r') as f:
sequences = [r.seq for r in SeqIO.parse(f, "pdb-seqres")]
pass
ds = Distructure(
'test',
sequences,
[
[r.get_id() for r in c if r.get_id()[0] == ' ']
for c in refStructure[0]
]
)
ds.generate_primary_contacts()
ds.set_tertiary_contacts(contacts)
ds.run()
refStructure = PDBParser().get_structure(code, fileName)
tempStructure = ds.copy()
refAtoms = list(cull_atoms(refStructure.get_atoms(), ds))
resAtoms = list(cull_atoms(tempStructure.get_atoms(), refStructure))
assert len(refAtoms) > 3
assert len(refAtoms) == len(resAtoms)
dssup = Superimposer()
dssup.set_atoms(refAtoms, resAtoms)
dsRMSD = dssup.rms
bpsup = BPSuperimposer()
bpsup.set_atoms(refAtoms, resAtoms)
bpRMSD = bpsup.rms
for atom in resAtoms:
atom.set_coord(-1 * atom.get_coord())
pass
bpsup.set_atoms(refAtoms, resAtoms)
if bpsup.rms < bpRMSD:
bpRMSD = bpsup.rms
pass
assert dsRMSD == approx(bpRMSD)
return
| [
"pytest.approx",
"distruct.tools.pdb.get_contacts",
"Bio.PDB.PDBParser",
"distruct.Superimposer",
"numpy.array",
"Bio.SeqIO.parse",
"Bio.PDB.Superimposer"
] | [((554, 663), 'numpy.array', 'np.array', (['[[51.65, -1.9, 50.07], [50.4, -1.23, 50.65], [50.68, -0.04, 51.54], [50.22,\n -0.02, 52.85]]', '"""f"""'], {}), "([[51.65, -1.9, 50.07], [50.4, -1.23, 50.65], [50.68, -0.04, 51.54],\n [50.22, -0.02, 52.85]], 'f')\n", (562, 663), True, 'import numpy as np\n'), ((698, 807), 'numpy.array', 'np.array', (['[[51.3, -2.99, 46.54], [51.09, -1.88, 47.58], [52.36, -1.2, 48.03], [52.71,\n -1.18, 49.38]]', '"""f"""'], {}), "([[51.3, -2.99, 46.54], [51.09, -1.88, 47.58], [52.36, -1.2, 48.03],\n [52.71, -1.18, 49.38]], 'f')\n", (706, 807), True, 'import numpy as np\n'), ((992, 1006), 'distruct.Superimposer', 'Superimposer', ([], {}), '()\n', (1004, 1006), False, 'from distruct import Superimposer\n'), ((1416, 1430), 'distruct.Superimposer', 'Superimposer', ([], {}), '()\n', (1428, 1430), False, 'from distruct import Superimposer\n'), ((2143, 2157), 'distruct.Superimposer', 'Superimposer', ([], {}), '()\n', (2155, 2157), False, 'from distruct import Superimposer\n'), ((2627, 2682), 'distruct.tools.pdb.get_contacts', 'get_contacts', (['refStructure[0]'], {'cutOff': '(5.0)', 'minSeqDist': '(0)'}), '(refStructure[0], cutOff=5.0, minSeqDist=0)\n', (2639, 2682), False, 'from distruct.tools.pdb import get_contacts\n'), ((3421, 3435), 'distruct.Superimposer', 'Superimposer', ([], {}), '()\n', (3433, 3435), False, 'from distruct import Superimposer\n'), ((3512, 3528), 'Bio.PDB.Superimposer', 'BPSuperimposer', ([], {}), '()\n', (3526, 3528), True, 'from Bio.PDB import Superimposer as BPSuperimposer\n'), ((1091, 1112), 'pytest.approx', 'approx', (['(0.0)'], {'abs': '(0.01)'}), '(0.0, abs=0.01)\n', (1097, 1112), False, 'from pytest import approx\n'), ((1525, 1536), 'pytest.approx', 'approx', (['(0.0)'], {}), '(0.0)\n', (1531, 1536), False, 'from pytest import approx\n'), ((3808, 3822), 'pytest.approx', 'approx', (['bpRMSD'], {}), '(bpRMSD)\n', (3814, 3822), False, 'from pytest import approx\n'), ((1278, 1289), 'Bio.PDB.PDBParser', 'PDBParser', ([], {}), '()\n', (1287, 1289), False, 'from Bio.PDB import PDBParser\n'), ((1334, 1345), 'Bio.PDB.PDBParser', 'PDBParser', ([], {}), '()\n', (1343, 1345), False, 'from Bio.PDB import PDBParser\n'), ((1728, 1739), 'Bio.PDB.PDBParser', 'PDBParser', ([], {}), '()\n', (1737, 1739), False, 'from Bio.PDB import PDBParser\n'), ((2090, 2101), 'Bio.PDB.PDBParser', 'PDBParser', ([], {}), '()\n', (2099, 2101), False, 'from Bio.PDB import PDBParser\n'), ((2570, 2581), 'Bio.PDB.PDBParser', 'PDBParser', ([], {}), '()\n', (2579, 2581), False, 'from Bio.PDB import PDBParser\n'), ((3128, 3139), 'Bio.PDB.PDBParser', 'PDBParser', ([], {}), '()\n', (3137, 3139), False, 'from Bio.PDB import PDBParser\n'), ((1861, 1889), 'Bio.SeqIO.parse', 'SeqIO.parse', (['f', '"""pdb-seqres"""'], {}), "(f, 'pdb-seqres')\n", (1872, 1889), False, 'from Bio import SeqIO\n'), ((2773, 2801), 'Bio.SeqIO.parse', 'SeqIO.parse', (['f', '"""pdb-seqres"""'], {}), "(f, 'pdb-seqres')\n", (2784, 2801), False, 'from Bio import SeqIO\n')] |
import argparse
import logging
import os
import numpy as np
import torch.cuda
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
cudnn.benchmark = True
from utils.iam_loader import IAMLoader
from config import *
from models import HTRNetC
from utils.auxilary_functions import affine_transformation
from valid_deforms import local_deform, morphological, uncertainty_reduction
import torch.nn.functional as F
logging.basicConfig(format='[%(asctime)s, %(levelname)s, %(name)s] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger('HTR-Experiment::train')
logger.info('--- Running HTR Training ---')
# argument parsing
parser = argparse.ArgumentParser()
# - train arguments
parser.add_argument('--learning_rate', '-lr', type=float, default=1e-3,
help='lr')
parser.add_argument('--solver_type', '-st', choices=['SGD', 'Adam'], default='Adam',
help='Which solver type to use. Possible: SGD, Adam. Default: Adam')
parser.add_argument('--display', action='store', type=int, default=100,
help='The number of iterations after which to display the loss values. Default: 100')
parser.add_argument('--gpu_id', '-gpu', action='store', type=int, default='0',
help='The ID of the GPU to use. If not specified, training is run in CPU mode.')
parser.add_argument('--df', action='store_true')
parser.add_argument('--batch_size', action='store', type=int, default=20)
parser.add_argument('--epochs', action='store', type=int, default=120)
parser.add_argument('--restart_epochs', action='store', type=int, default=40)
args = parser.parse_args()
print(args.df)
device = torch.device("cuda:"+str(args.gpu_id) if torch.cuda.is_available() else "cpu")
logger.info('###########################################')
# prepare datset loader
logger.info('Loading dataset.')
aug_transforms = [lambda x: affine_transformation(x, s=.1)]
train_set = IAMLoader('train', level=level, fixed_size=fixed_size, transforms=aug_transforms)
test_set = IAMLoader('test', level=level, fixed_size=fixed_size)
batch_size = args.batch_size
# augmentation using data sampler
train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=8)
test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=False, num_workers=8)
# load CNN
logger.info('Preparing Net...')
save_name = 'iam_cnn'
if args.df:
save_name += '_df'
save_name += "_asd2.pt"
net = HTRNetC(cnn_cfg, cnn_top, len(classes), stn=False, df=args.df)
#net.load_state_dict(torch.load(save_name))
net.to(device)
nlr = args.learning_rate
max_epochs = args.epochs
restart_epochs = args.restart_epochs
parameters = list(net.parameters())
optimizer = torch.optim.AdamW(parameters, nlr, weight_decay=0.00005)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, restart_epochs)
def train(epoch):
net.train()
closs = []
for iter_idx, (img, transcr) in enumerate(train_loader):
optimizer.zero_grad()
img = Variable(img.to(device))
# geometrical and morphological deformations
'''
rids = torch.BoolTensor(torch.bernoulli(.1 * torch.ones(img.size(0))).bool())
if sum(rids) > 1:
u = 4 # 2 ** np.random.randint(2, 5)
img[rids] = local_deform(img[rids], rm=None, dscale=u, a=np.random.uniform(2.0, 4.0))
rids = torch.BoolTensor(torch.bernoulli(.1 * torch.ones(img.size(0))).bool())
if sum(rids) > 1:
u = 4 # 2 ** np.random.randint(2, 5)
img[rids] = morphological(img[rids], rm=None, dscale=u)
'''
output = net(img.detach()) #.permute(2, 0, 1)
act_lens = torch.IntTensor(img.size(0)*[output.size(0)])
labels = torch.IntTensor([cdict[c] for c in ''.join(transcr)])
label_lens = torch.IntTensor([len(t) for t in transcr])
ls_output = F.log_softmax(output, dim=2).cpu()
loss_val = F.ctc_loss(ls_output, labels, act_lens, label_lens, zero_infinity=True, reduction='sum') / img.size(0)
closs += [loss_val.data]
loss_val.backward()
optimizer.step()
# mean runing errors??
if iter_idx % args.display == args.display-1:
logger.info('Epoch %d, Iteration %d: %f', epoch, iter_idx+1, sum(closs)/len(closs))
closs = []
net.eval()
tst_img, tst_transcr = test_set.__getitem__(np.random.randint(test_set.__len__()))
print('orig:: ' + tst_transcr)
with torch.no_grad():
tst_o = net(Variable(tst_img.to(device)).unsqueeze(0)) #.permute(2, 0, 1)
tdec = tst_o.argmax(2).permute(1, 0).cpu().numpy().squeeze()
tt = [v for j, v in enumerate(tdec) if j == 0 or v != tdec[j - 1]]
print('gdec:: ' + ''.join([icdict[t] for t in tt]).replace('_', ''))
net.train()
import editdistance
def test(epoch, ur=False, k=5):
net.eval()
logger.info('Testing at epoch %d', epoch)
tdecs = []
transcrs = []
for (img, transcr) in test_loader:
img = Variable(img.to(device))
if ur:
img = uncertainty_reduction(net, img, dscale=4, a=0.01, lr=1e-3, N=k, nc=25).detach()
with torch.no_grad():
o = net(img) #.permute(2, 0, 1)
tdec = o.argmax(2).permute(1, 0).view(img.size(0), -1).cpu().numpy()
tdecs += [tdec]
transcrs += list(transcr)
tdecs = np.concatenate(tdecs)
cer, wer = [], []
cntc, cntw = 0, 0
for tdec, transcr in zip(tdecs, transcrs):
transcr = transcr.strip()
tt = [v for j, v in enumerate(tdec) if j == 0 or v != tdec[j - 1]]
dec_transcr = ''.join([icdict[t] for t in tt]).replace('_', '')
dec_transcr = dec_transcr.strip()
# calculate CER and WER
cc = float(editdistance.eval(dec_transcr, transcr))
ww = float(editdistance.eval(dec_transcr.split(' '), transcr.split(' ')))
cntc += len(transcr)
cntw += len(transcr.split(' '))
cer += [cc]
wer += [ww]
cer = sum(cer) / cntc
wer = sum(wer) / cntw
logger.info('CER at epoch %d: %f', epoch, cer)
logger.info('WER at epoch %d: %f', epoch, wer)
net.train()
cnt = 0
logger.info('Training:')
for epoch in range(1, max_epochs + 1):
train(epoch)
scheduler.step()
if epoch % 2 == 0:
test(epoch)
if epoch % 10 == 0:
logger.info('Saving net after %d epochs', epoch)
torch.save(net.cpu().state_dict(), save_name)
net.to(device)
if epoch % restart_epochs == 0:
parameters = list(net.parameters())
optimizer = torch.optim.AdamW(parameters, nlr, weight_decay=0.00005)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, restart_epochs)
torch.save(net.cpu().state_dict(), save_name)
net.to(device)
test(-1)
for k in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]:
print('-------------------- :: ' + str(k) + ' :: --------------------')
#for _ in range(3):
test(-1, ur=True, k=k)
| [
"logging.basicConfig",
"logging.getLogger",
"utils.iam_loader.IAMLoader",
"torch.nn.functional.ctc_loss",
"argparse.ArgumentParser",
"numpy.concatenate",
"torch.utils.data.DataLoader",
"torch.nn.functional.log_softmax",
"utils.auxilary_functions.affine_transformation",
"valid_deforms.uncertainty_r... | [((519, 658), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""[%(asctime)s, %(levelname)s, %(name)s] %(message)s"""', 'datefmt': '"""%Y-%m-%d %H:%M:%S"""', 'level': 'logging.INFO'}), "(format=\n '[%(asctime)s, %(levelname)s, %(name)s] %(message)s', datefmt=\n '%Y-%m-%d %H:%M:%S', level=logging.INFO)\n", (538, 658), False, 'import logging\n'), ((701, 743), 'logging.getLogger', 'logging.getLogger', (['"""HTR-Experiment::train"""'], {}), "('HTR-Experiment::train')\n", (718, 743), False, 'import logging\n'), ((819, 844), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (842, 844), False, 'import argparse\n'), ((2126, 2212), 'utils.iam_loader.IAMLoader', 'IAMLoader', (['"""train"""'], {'level': 'level', 'fixed_size': 'fixed_size', 'transforms': 'aug_transforms'}), "('train', level=level, fixed_size=fixed_size, transforms=\n aug_transforms)\n", (2135, 2212), False, 'from utils.iam_loader import IAMLoader\n'), ((2220, 2273), 'utils.iam_loader.IAMLoader', 'IAMLoader', (['"""test"""'], {'level': 'level', 'fixed_size': 'fixed_size'}), "('test', level=level, fixed_size=fixed_size)\n", (2229, 2273), False, 'from utils.iam_loader import IAMLoader\n'), ((2357, 2430), 'torch.utils.data.DataLoader', 'DataLoader', (['train_set'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': '(8)'}), '(train_set, batch_size=batch_size, shuffle=True, num_workers=8)\n', (2367, 2430), False, 'from torch.utils.data import DataLoader\n'), ((2446, 2519), 'torch.utils.data.DataLoader', 'DataLoader', (['test_set'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': '(8)'}), '(test_set, batch_size=batch_size, shuffle=False, num_workers=8)\n', (2456, 2519), False, 'from torch.utils.data import DataLoader\n'), ((5760, 5781), 'numpy.concatenate', 'np.concatenate', (['tdecs'], {}), '(tdecs)\n', (5774, 5781), True, 'import numpy as np\n'), ((2079, 2110), 'utils.auxilary_functions.affine_transformation', 'affine_transformation', (['x'], {'s': '(0.1)'}), '(x, s=0.1)\n', (2100, 2110), False, 'from utils.auxilary_functions import affine_transformation\n'), ((4193, 4285), 'torch.nn.functional.ctc_loss', 'F.ctc_loss', (['ls_output', 'labels', 'act_lens', 'label_lens'], {'zero_infinity': '(True)', 'reduction': '"""sum"""'}), "(ls_output, labels, act_lens, label_lens, zero_infinity=True,\n reduction='sum')\n", (4203, 4285), True, 'import torch.nn.functional as F\n'), ((6160, 6199), 'editdistance.eval', 'editdistance.eval', (['dec_transcr', 'transcr'], {}), '(dec_transcr, transcr)\n', (6177, 6199), False, 'import editdistance\n'), ((4138, 4166), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['output'], {'dim': '(2)'}), '(output, dim=2)\n', (4151, 4166), True, 'import torch.nn.functional as F\n'), ((5449, 5520), 'valid_deforms.uncertainty_reduction', 'uncertainty_reduction', (['net', 'img'], {'dscale': '(4)', 'a': '(0.01)', 'lr': '(0.001)', 'N': 'k', 'nc': '(25)'}), '(net, img, dscale=4, a=0.01, lr=0.001, N=k, nc=25)\n', (5470, 5520), False, 'from valid_deforms import local_deform, morphological, uncertainty_reduction\n')] |
#!/usr/bin/env python
import functools
import math
import numpy
import hypothesis
import hypothesis.extra.numpy
from hypothesis.strategies import complex_numbers, floats
import libnu.array
from test import eq
arrays = functools.partial(
hypothesis.extra.numpy.arrays,
shape=10,
unique=True,
)
numpy.ones = functools.partial(numpy.ones, dtype=numpy.float32)
numpy.zeros = functools.partial(numpy.zeros, dtype=numpy.float32)
numpy.linspace = functools.partial(numpy.linspace, dtype=numpy.float32)
@hypothesis.given(arrays(dtype=numpy.float32, elements=floats(-1.0, 1.0)))
def test_maxmin(x):
assert libnu.array.max(x) >= libnu.array.min(x)
assert libnu.array.min(x) <= libnu.array.max(x)
assert libnu.array.max(x) == x[libnu.array.argmax(x)]
assert libnu.array.min(x) == x[libnu.array.argmin(x)]
assert libnu.array.max(numpy.sin(x)) <= 1.0
assert libnu.array.min(numpy.sin(x)) >= -1.0
@hypothesis.given(
arrays(dtype=numpy.float32, elements=floats(-1.0, 1.0)),
arrays(dtype=numpy.float32, elements=floats(-1.0, 1.0)),
)
def test_add(x, y):
z = numpy.zeros(x.size)
assert all(libnu.array.add(x, z) == x)
assert all(libnu.array.add(x, y) == libnu.array.add(y, x))
assert eq(libnu.array.add(x, y), x + y, 1e-8)
@hypothesis.given(
arrays(dtype=numpy.float32, elements=floats(-1.0, 1.0)),
arrays(dtype=numpy.float32, elements=floats(-1.0, 1.0)),
)
def test_multiply(x, y):
z = numpy.ones(x.size)
assert all(libnu.array.multiply(x, z) == x)
assert all(libnu.array.multiply(x, y) == libnu.array.multiply(y, x))
assert eq(libnu.array.multiply(x, y), x * y, 1e-8)
@hypothesis.given(
arrays(dtype=numpy.complex64, elements=complex_numbers(0.0, 1.0)),
arrays(dtype=numpy.complex64, elements=complex_numbers(0.0, 1.0)),
)
def test_cadd(x, y):
z = numpy.zeros(x.size, dtype=numpy.complex64)
assert all(libnu.array.cadd(x, z) == x)
assert all(libnu.array.cadd(x, y) == libnu.array.cadd(y, x))
@hypothesis.given(
arrays(dtype=numpy.complex64, elements=complex_numbers(0.0, 1.0)),
arrays(dtype=numpy.complex64, elements=complex_numbers(0.0, 1.0)),
)
def test_cmul(x, y):
z = numpy.ones(x.size, dtype=numpy.complex64)
assert all(libnu.array.cmul(x, z) == x)
assert all(libnu.array.cmul(x, y) == libnu.array.cmul(y, x))
@hypothesis.given(
arrays(dtype=numpy.complex64, elements=complex_numbers(0.0, 1.0))
)
def test_conj(x):
assert all(libnu.array.conj(libnu.array.conj(x)) == x)
@hypothesis.given(
arrays(dtype=numpy.float32, elements=floats(0.0, 2.0 * math.pi))
)
def test_cossin(x):
cosx = libnu.array.cos(x)
sinx = libnu.array.sin(x)
assert eq(cosx ** 2 + sinx ** 2, 1.0, 1e-4)
assert eq(cosx, libnu.array.cos(x + 2.0 * math.pi), 1e-4)
assert eq(sinx, libnu.array.sin(x + 2.0 * math.pi), 1e-4)
assert eq(cosx, libnu.array.sin(x + math.pi / 2.0), 1e-4)
@hypothesis.given(
arrays(dtype=numpy.float32, elements=floats(1e-8, math.e))
)
def test_explog(x):
logx = libnu.array.log(x)
expx = libnu.array.exp(x)
assert eq(libnu.array.exp(logx), x, 1e-4)
assert eq(libnu.array.log(expx), x, 1e-4)
assert eq(libnu.array.exp(x + 2.0), expx * math.exp(2.0), 1e-4)
assert eq(libnu.array.log(x * 2.0), logx + math.log(2.0), 1e-4)
@hypothesis.given(floats(0.0, 10.0), floats(20.0, 100.0))
def test_linspace(a, b):
n = 10000
x = libnu.array.linspace(a, b, n)
d = x[1:] - x[:-1]
assert all(d > 0.0)
assert numpy.mean(d) <= (b - a) / (n - 1)
assert numpy.mean(d) >= (b - a) / (n + 1)
if __name__ == '__main__':
test_maxmin()
test_add()
test_multiply()
test_cadd()
test_cmul()
test_conj()
test_cossin()
test_explog()
test_linspace()
| [
"numpy.mean",
"numpy.ones",
"test.eq",
"hypothesis.strategies.floats",
"math.log",
"numpy.zeros",
"functools.partial",
"hypothesis.strategies.complex_numbers",
"numpy.sin",
"math.exp"
] | [((223, 294), 'functools.partial', 'functools.partial', (['hypothesis.extra.numpy.arrays'], {'shape': '(10)', 'unique': '(True)'}), '(hypothesis.extra.numpy.arrays, shape=10, unique=True)\n', (240, 294), False, 'import functools\n'), ((323, 373), 'functools.partial', 'functools.partial', (['numpy.ones'], {'dtype': 'numpy.float32'}), '(numpy.ones, dtype=numpy.float32)\n', (340, 373), False, 'import functools\n'), ((388, 439), 'functools.partial', 'functools.partial', (['numpy.zeros'], {'dtype': 'numpy.float32'}), '(numpy.zeros, dtype=numpy.float32)\n', (405, 439), False, 'import functools\n'), ((457, 511), 'functools.partial', 'functools.partial', (['numpy.linspace'], {'dtype': 'numpy.float32'}), '(numpy.linspace, dtype=numpy.float32)\n', (474, 511), False, 'import functools\n'), ((1099, 1118), 'numpy.zeros', 'numpy.zeros', (['x.size'], {}), '(x.size)\n', (1110, 1118), False, 'import numpy\n'), ((1453, 1471), 'numpy.ones', 'numpy.ones', (['x.size'], {}), '(x.size)\n', (1463, 1471), False, 'import numpy\n'), ((1842, 1884), 'numpy.zeros', 'numpy.zeros', (['x.size'], {'dtype': 'numpy.complex64'}), '(x.size, dtype=numpy.complex64)\n', (1853, 1884), False, 'import numpy\n'), ((2188, 2229), 'numpy.ones', 'numpy.ones', (['x.size'], {'dtype': 'numpy.complex64'}), '(x.size, dtype=numpy.complex64)\n', (2198, 2229), False, 'import numpy\n'), ((2692, 2730), 'test.eq', 'eq', (['(cosx ** 2 + sinx ** 2)', '(1.0)', '(0.0001)'], {}), '(cosx ** 2 + sinx ** 2, 1.0, 0.0001)\n', (2694, 2730), False, 'from test import eq\n'), ((3329, 3346), 'hypothesis.strategies.floats', 'floats', (['(0.0)', '(10.0)'], {}), '(0.0, 10.0)\n', (3335, 3346), False, 'from hypothesis.strategies import complex_numbers, floats\n'), ((3348, 3367), 'hypothesis.strategies.floats', 'floats', (['(20.0)', '(100.0)'], {}), '(20.0, 100.0)\n', (3354, 3367), False, 'from hypothesis.strategies import complex_numbers, floats\n'), ((3504, 3517), 'numpy.mean', 'numpy.mean', (['d'], {}), '(d)\n', (3514, 3517), False, 'import numpy\n'), ((3550, 3563), 'numpy.mean', 'numpy.mean', (['d'], {}), '(d)\n', (3560, 3563), False, 'import numpy\n'), ((856, 868), 'numpy.sin', 'numpy.sin', (['x'], {}), '(x)\n', (865, 868), False, 'import numpy\n'), ((904, 916), 'numpy.sin', 'numpy.sin', (['x'], {}), '(x)\n', (913, 916), False, 'import numpy\n'), ((569, 586), 'hypothesis.strategies.floats', 'floats', (['(-1.0)', '(1.0)'], {}), '(-1.0, 1.0)\n', (575, 586), False, 'from hypothesis.strategies import complex_numbers, floats\n'), ((988, 1005), 'hypothesis.strategies.floats', 'floats', (['(-1.0)', '(1.0)'], {}), '(-1.0, 1.0)\n', (994, 1005), False, 'from hypothesis.strategies import complex_numbers, floats\n'), ((1049, 1066), 'hypothesis.strategies.floats', 'floats', (['(-1.0)', '(1.0)'], {}), '(-1.0, 1.0)\n', (1055, 1066), False, 'from hypothesis.strategies import complex_numbers, floats\n'), ((1337, 1354), 'hypothesis.strategies.floats', 'floats', (['(-1.0)', '(1.0)'], {}), '(-1.0, 1.0)\n', (1343, 1354), False, 'from hypothesis.strategies import complex_numbers, floats\n'), ((1398, 1415), 'hypothesis.strategies.floats', 'floats', (['(-1.0)', '(1.0)'], {}), '(-1.0, 1.0)\n', (1404, 1415), False, 'from hypothesis.strategies import complex_numbers, floats\n'), ((1712, 1737), 'hypothesis.strategies.complex_numbers', 'complex_numbers', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (1727, 1737), False, 'from hypothesis.strategies import complex_numbers, floats\n'), ((1783, 1808), 'hypothesis.strategies.complex_numbers', 'complex_numbers', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (1798, 1808), False, 'from hypothesis.strategies import complex_numbers, floats\n'), ((2058, 2083), 'hypothesis.strategies.complex_numbers', 'complex_numbers', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (2073, 2083), False, 'from hypothesis.strategies import complex_numbers, floats\n'), ((2129, 2154), 'hypothesis.strategies.complex_numbers', 'complex_numbers', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (2144, 2154), False, 'from hypothesis.strategies import complex_numbers, floats\n'), ((2403, 2428), 'hypothesis.strategies.complex_numbers', 'complex_numbers', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (2418, 2428), False, 'from hypothesis.strategies import complex_numbers, floats\n'), ((2571, 2597), 'hypothesis.strategies.floats', 'floats', (['(0.0)', '(2.0 * math.pi)'], {}), '(0.0, 2.0 * math.pi)\n', (2577, 2597), False, 'from hypothesis.strategies import complex_numbers, floats\n'), ((3220, 3233), 'math.exp', 'math.exp', (['(2.0)'], {}), '(2.0)\n', (3228, 3233), False, 'import math\n'), ((3288, 3301), 'math.log', 'math.log', (['(2.0)'], {}), '(2.0)\n', (3296, 3301), False, 'import math\n'), ((2977, 2998), 'hypothesis.strategies.floats', 'floats', (['(1e-08)', 'math.e'], {}), '(1e-08, math.e)\n', (2983, 2998), False, 'from hypothesis.strategies import complex_numbers, floats\n')] |
''' Spline2.py: wrapper for B. Thijsse et al.'s hyper-spline routines.
Yet another spline interpolation routine. The problem: given a set of
experimental data with noise, find the spline with the optimal number of
knots.
Solution:
They use the usual kind of routines to determine least-squares
splines from a given set of knot points. The problem REALLY
boils down to: how many knots do you use? There are two
extremes: put a knot point on each data point to get an
interpolating spline (which sucks for experimental data with
noise). The other extreme is to have the minimal set of knots
to define a polynomial of order k (e.g., a cubic). This also
sucks. Somewhere between the two extremes is a number of
knots that optimally recovers the information in the data and
smooths out the noise.
spline2 starts with a large number of knots (interpolating
spline) and iteratively removes knots until a figure of merit
reaches some prescribed value. In this case, this figure of
merit is the Durbin-Watson statistic, which measures the auto-
correlation between the residuals of the spline fit.
For more details, see:
* <NAME> et al., "A Practical Algorithm for Least-Squares
spline Approximation of Data Containing Noise", Computers in Physics,
vol 12 no. 4 July 1998
* http://structureandchange.3me.tudelft.nl/
'''
from . import spline2c
import numpy as num
acffuncs = ['exp','gauss','linear','sinc']
def spline2(x, y, w=None, sigma=None, rsigma=None, xrange=None, degree=3,
acfsearch=0, acffunc='exp', ksi=None, n=None,
allownonopt=1, lopt=None, rejlev=0.05, xlog=0,
interactive=0, verbose=0, full_output=0):
'''Solve for the optimal spline given a set of (x,y) data.
Args:
w (float array): specify weights (1/sigma) for each data point
sigma (float): specify an absolute sigma for all data points:
w[i] = 1/sigma
this will then force a regular chi-square fit instead of DW
rsigma (float): specify a relative sigma for all data ponts:
w[i] = 1/(y*rsigma)
xrange (2-tuple): Only use data in interval (xrange[0], xrange[1])
degree (int): degree of the spline (default: 3 for cubic spline)
acfsearch (bool): perform an automated search for autocorrelation.
acffunc (str): Use acffunc as the autocorrelation function.
can be one of 'exp','gauss','linear', or 'sinc'. Default:
'exp'
ksi (float): Use a specific autocorrelation length equal to ksi
n (int): Only search for autocorrelation on index interval n
allownonopt (bool): Allow splines with non-optimized breakpoints
(default True)
lopt (int): Force knot optimization to start with lpot knots.
rejlev (float): Use rejection level on statistical tests of rejlev
(default 0.05)
xlog (bool): Take the log10(x) before spline fitting. Default: False
verbose (bool): Lots of output to stdout. Default: False
interactive (bool): Allows the user to choose the optimal spline
manually.
full_output (bool): along with tck, return the following statistics:
rms, dws (Durbin-Watson statistic), lfin (final number
of knot points), ksi (computed auto-correlation length),
acffit (indicator of how well the assumed auto-
correlation function represents the data),
Returns:
(tuple): (t, c, k) if full_output=0 ((t,c,k), rms, dws, lfin, ksi,
acffit) if full_output=1
- t: array of lfin+1 knot points
- c: array of lfin+k-1 spline coefficients
- k: order of the spline (note: order = degree+1, so this is 4
for a cubic spline!)
The tuple (t,c,k) can be input to routines such as evalsp().
- rms: dispersion of the fitted spline
- dws: Durbin-Watson statistic
- lfin: final number of knot points
- ksi: computed auto-correlation length
- acffit: how well the correlations agree with assumed funcion.
'''
x = num.asarray(x).astype(num.float64)
y = num.asarray(y).astype(num.float64)
xin = x.astype(num.float64)
yin = y.astype(num.float64)
if w is not None:
w = num.asarray(w).astype(num.float64)
win = w.astype(num.float64)
else:
win = x*0.0 + 1.0 # assume no weight info.
if not (len(xin) == len(yin) == len(win)):
raise IndexError("Arrays x,y, and w must have same length")
rel=0
fixed_sigma=0
fixval=0
if sigma is not None or rsigma is not None:
fixed_sigma = 1
if sigma is not None:
fixval = sigma
rel = 0
else:
fixval = rsigma
rel = 1
acf_ind = acffuncs.index(acffunc) + 1
if xrange is not None:
xbegin = xrange[0]
xend = xrange[1]
xflag = 1
else:
xbegin = 0
xend = 0
xflag = 0
if n is not None:
nset = 1
n_max = n_min = n
else:
nset = 0
n_max = n_min = 1
if ksi is not None:
ksiset = 1
ksibegin = ksiend = ksi
else:
ksiset = 0
ksibegin = ksiend = 0.0
if lopt is not None:
lind = 1
else:
lind = 0
lopt = 0
result = spline2c.spline2(xin, yin, win, degree, acfsearch, acf_ind, xflag,
xbegin,xend, 0, 0, 0, xlog, nset, ksiset, n_min, n_max, ksibegin,
ksiend, rejlev, rel, fixed_sigma, fixval, lind, lopt, allownonopt,
interactive, verbose)
if full_output:
return((result[0:3]), result[3:])
else:
return(result[0:3])
def evalsp(x, tck, deriv=0):
'''Evaluate a spline computed by spline2.
Args:
x (float array or scalar): The spline is evaluated at the points x.
tck (3-tuple): a tuple of (knots, coefficents, k) that are returned
as the first output of spline2.
deriv (int): if > 0, compute the deriv-th derivative of the spline
Returns:
float array: evaluated interpolant or derivative thereof.
'''
if len(num.shape(x)) == 0:
x = num.array([x]).astype(num.float64)
else:
x = num.asarray(x).astype(num.float64)
xin = x.astype(num.float64)
t = tck[0]
c = tck[1]
l = len(t) - 1
k = tck[2]
result = spline2c.evalsp(xin, k, l, t, c, deriv)
return(result)
def eval_extrema(tck):
'''Attempts to find the extrema of the spline (where S'(x)==0).
Args:
tck (3-tuple): tuple of (knots, coefficients, k) that are returned as
the first output of spline2.
Returns:
3-tuple: (xextr, yextr, signs) where
- xextr are the x-positions of the extrema,
- yextr are S(extr), and
- signs provice the sign of the 2nd derivative S''(extr):
- signs[i] < 0 --> maximum,
- signs[i] > 0 --> minimum,
- signs[i] close to 0 --> inflection point
'''
t = tck[0]
c = tck[1]
l = len(t) - 1
k = tck[2]
if k <= 2:
raise ValueError("Spline order must be at least 2 for finding extrema")
result = spline2c.eval_extrema(k, l, t, c)
return(result)
def eval_inflect(tck):
'''Attempts to find the inflection points of the spline (where S''(x)==0).
Args:
tck (3-tuple): tuple of (knots, coefficients, k) that are returned as
the first output of spline2.
Returns:
3-tuple: (xinflect, yinflect, dyinflect) where
- xinflect are the x-positions of the inflection points,
- yminflect are S(xinflect), and
- dyinflect are S'(xinflect).
'''
t = tck[0]
c = tck[1]
l = len(t) - 1
k = tck[2]
if k <= 3:
raise ValueError("Spline order must be at least 3 for finding inflections")
result = spline2c.eval_inflect(k,l,t,c)
return(result)
def eval_integ(x0, x1, tck):
'''Evaluates the integral from x0 to x1 of the spline defined by tck.
Args:
x0 (float): lower limit of integration
x1 (float): upper limit of integration
tck (3-tuple): tuple of (knots, coefficients, k) that are returned as
the first output of spline2.
Returns:
float: the integration.
'''
t = tck[0]
c = tck[1]
l = len(t) - 1
k = tck[2]
if x0 < t[0] or x1 > t[1]:
raise ValueError("integration limits beyond spline definition")
result = spline2c.eval_integ(x0, x1, k, l, t, c)
return(result)
def eval_x(value, tck):
'''Attempts to find the roots of the equation S(x) = value for the spline
defined by tck.
Args:
value (float): value to solve the root for
tck (3-tuple): tuple of (knots, coefficients, k) that are returned as
the first output of spline2.
Returns:
float array: roots of the equation.
'''
t = tck[0]
c = tck[1]
l = len(t) - 1
k = tck[2]
result = spline2c.eval_x(value, k, l, t, c)
return(result)
| [
"numpy.array",
"numpy.shape",
"numpy.asarray"
] | [((4296, 4310), 'numpy.asarray', 'num.asarray', (['x'], {}), '(x)\n', (4307, 4310), True, 'import numpy as num\n'), ((4338, 4352), 'numpy.asarray', 'num.asarray', (['y'], {}), '(y)\n', (4349, 4352), True, 'import numpy as num\n'), ((6280, 6292), 'numpy.shape', 'num.shape', (['x'], {}), '(x)\n', (6289, 6292), True, 'import numpy as num\n'), ((4466, 4480), 'numpy.asarray', 'num.asarray', (['w'], {}), '(w)\n', (4477, 4480), True, 'import numpy as num\n'), ((6310, 6324), 'numpy.array', 'num.array', (['[x]'], {}), '([x])\n', (6319, 6324), True, 'import numpy as num\n'), ((6364, 6378), 'numpy.asarray', 'num.asarray', (['x'], {}), '(x)\n', (6375, 6378), True, 'import numpy as num\n')] |
"""
Copyright (C) 2012 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import numpy as np
class RnnEvaluator(object):
"""A compiled RNN as a computable object. Takes a network in a form that
is efficient to activate so that the network can be evaluated quickly.
Produced by :class:`LayeredRnnGenotype`'s ``compile`` method.
:param numNeurons: The number of neurons in the network, incl. bias, input,
output, and hidden
:type numNeurons: ``int``
:param inputs: A list of slices or indexes that can be applied to the
current state to set input values. The list should contain
one item per input layer, and each item should be capable
of being provided to ``__setitem__`` in order to set the
input values. So, for instance, an entry ``slice(5,10)``
at index ``2`` in the array would imply that the third
input layer state values are stored between the 5th and 10th
index in the state, and that the input layer contains
five values.
:type inputs: A ``list`` of ``slice``s or ``int``s, potentially mixed
:param outputs: A list of slices or indexes that can be applied to the
current state to extract output values. Like the structure
for ``inputs``, but used to retrieve outputs.
:type outputs: A ``list`` of ``slice``s or ``int``s, potentially mixed
:param weightStack: A list of lists of entries that apply the weights
of a neural network. Each entry contains a weight
matrix (``numpy.ndarray``), a ``slice`` indicating
the source indices, and a ``slice`` indicating the
target indices. Each nested list represents a set of
operations that may be performed concurrently, i.e.
that may be parallellized. Successive elements of the
outermost list must be performed serially.
:type weightStack: A ``list`` of ``list``s each with a `tuple` containing
(``numpy.ndarray``, ``slice``, ``slice``)
:param activationStack: A list of (``slice``,function) tuples that contains
the activation functions that must be applied for
each layer. The ``slice`` indicates the location of
the layer in the state array, and the function is
used to activate that portion of the state array.
"""
def __init__(self, numNeurons, inputs, outputs, weightStack, activationStack):
self.numNeurons = numNeurons
self.inputs = inputs
self.outputs = outputs
self.weightStack = weightStack
self.activationStack = activationStack
self.clear()
def clear(self):
"""Reset the state of the network."""
self.state = np.zeros(self.numNeurons)
def setInputs(self, inputs):
"""Takes an array of arrays of floats and writes
it into the state at the inputs
:param inputs: a list of arrays of floats, with each nested array matching
the size of the input layer as specified in ``self.inputs``
:type inputs: a list of arrays/lists of floats
"""
# don't check match for efficiency
for idxs,vals in zip(self.inputs, inputs):
self.state[idxs] = vals
def getOutputs(self):
"""Produces an array of floats corresponding to the outputs.
:returns: a list of arrays of floats, with each nested array matching
the size of the input layer as specified in ``self.outputs``
"""
return [self.state[idxs] for idxs in self.outputs]
def activate(self):
"""Advance the network to the next state based on the current state."""
next = np.zeros(self.numNeurons)
for step in self.weightStack:
for w, frmIdxs, toIdxs in step:
next[toIdxs] += np.dot(w, self.state[frmIdxs])
for idxs, act in self.activationStack:
next[idxs] = act(next[idxs])
self.state = next
def call(self, inputs, times=5):
self.setInputs(inputs)
for i in xrange(times):
self.activate()
return self.getOutputs()
def __call__(self, inputs, times=5):
return self.call(inputs, times) | [
"numpy.dot",
"numpy.zeros"
] | [((4026, 4051), 'numpy.zeros', 'np.zeros', (['self.numNeurons'], {}), '(self.numNeurons)\n', (4034, 4051), True, 'import numpy as np\n'), ((5043, 5068), 'numpy.zeros', 'np.zeros', (['self.numNeurons'], {}), '(self.numNeurons)\n', (5051, 5068), True, 'import numpy as np\n'), ((5183, 5213), 'numpy.dot', 'np.dot', (['w', 'self.state[frmIdxs]'], {}), '(w, self.state[frmIdxs])\n', (5189, 5213), True, 'import numpy as np\n')] |
""" Computes the American option price using the deep optimal stopping (DOS).
It is the implementation of the deep optimal stopping (DOS) introduced in
(deep optimal stopping, Becker, Cheridito and Jentzen, 2020).
"""
import numpy as np
import torch
import torch.optim as optim
import torch.utils.data as tdata
from optimal_stopping.algorithms.backward_induction import \
backward_induction_pricer
from optimal_stopping.algorithms.utils import neural_networks
def init_weights(m):
if isinstance(m, torch.nn.Linear):
torch.manual_seed(42)
# torch.nn.init.zeros_(m.weight)
torch.nn.init.xavier_uniform_(m.weight)
m.bias.data.fill_(0.01)
class DeepOptimalStopping(backward_induction_pricer.AmericanOptionPricer):
"""Computes the American option price using the deep optimal stopping (DOS)
"""
def __init__(self, model, payoff, nb_epochs=20, nb_batches=None,
hidden_size=10, use_path=False):
del nb_batches
super().__init__(model, payoff, use_path=use_path)
if self.use_path:
state_size = model.nb_stocks * (model.nb_dates+1)
else:
state_size = model.nb_stocks
self.neural_stopping = OptimalStoppingOptimization(
state_size, model.nb_paths, hidden_size=hidden_size,
nb_iters=nb_epochs)
def stop(self, stock_values, immediate_exercise_values,
discounted_next_values, h=None):
""" see base class """
if self.use_path:
# shape [paths, stocks, dates up to now]
stock_values = np.flip(stock_values, axis=2)
# add zeros to get shape [paths, stocks, dates+1]
stock_values = np.concatenate(
[stock_values, np.zeros(
(stock_values.shape[0], stock_values.shape[1],
self.model.nb_dates + 1 - stock_values.shape[2]))], axis=-1)
stock_values = stock_values.reshape((stock_values.shape[0], -1))
self.neural_stopping.train_network(
stock_values[:self.split],
immediate_exercise_values.reshape(-1, 1)[:self.split],
discounted_next_values[:self.split])
inputs = stock_values
stopping_rule = self.neural_stopping.evaluate_network(inputs)
return stopping_rule
class OptimalStoppingOptimization(object):
"""Train/evaluation of the neural network used for the stopping decision"""
def __init__(self, nb_stocks, nb_paths, hidden_size=10, nb_iters=20,
batch_size=2000):
self.nb_stocks = nb_stocks
self.nb_paths = nb_paths
self.nb_iters = nb_iters
self.batch_size = batch_size
self.network = neural_networks.NetworkDOS(
self.nb_stocks, hidden_size=hidden_size).double()
self.network.apply(init_weights)
def _Loss(self, X):
return -torch.mean(X)
def train_network(self, stock_values, immediate_exercise_value,
discounted_next_values):
optimizer = optim.Adam(self.network.parameters())
discounted_next_values = torch.from_numpy(discounted_next_values).double()
immediate_exercise_value = torch.from_numpy(immediate_exercise_value).double()
inputs = stock_values
X_inputs = torch.from_numpy(inputs).double()
self.network.train(True)
ones = torch.ones(len(discounted_next_values))
for iteration in range(self.nb_iters):
for batch in tdata.BatchSampler(
tdata.RandomSampler(range(len(X_inputs)), replacement=False),
batch_size=self.batch_size, drop_last=False):
optimizer.zero_grad()
with torch.set_grad_enabled(True):
outputs = self.network(X_inputs[batch]).reshape(-1)
values = (immediate_exercise_value[batch].reshape(-1) * outputs +
discounted_next_values[batch] * (ones[batch] - outputs))
loss = self._Loss(values)
loss.backward()
optimizer.step()
def evaluate_network(self, X_inputs):
self.network.train(False)
X_inputs = torch.from_numpy(X_inputs).double()
outputs = self.network(X_inputs)
return outputs.view(len(X_inputs)).detach().numpy()
| [
"torch.manual_seed",
"numpy.flip",
"torch.nn.init.xavier_uniform_",
"torch.mean",
"torch.from_numpy",
"numpy.zeros",
"optimal_stopping.algorithms.utils.neural_networks.NetworkDOS",
"torch.set_grad_enabled"
] | [((529, 550), 'torch.manual_seed', 'torch.manual_seed', (['(42)'], {}), '(42)\n', (546, 550), False, 'import torch\n'), ((592, 631), 'torch.nn.init.xavier_uniform_', 'torch.nn.init.xavier_uniform_', (['m.weight'], {}), '(m.weight)\n', (621, 631), False, 'import torch\n'), ((1496, 1525), 'numpy.flip', 'np.flip', (['stock_values'], {'axis': '(2)'}), '(stock_values, axis=2)\n', (1503, 1525), True, 'import numpy as np\n'), ((2674, 2687), 'torch.mean', 'torch.mean', (['X'], {}), '(X)\n', (2684, 2687), False, 'import torch\n'), ((2518, 2585), 'optimal_stopping.algorithms.utils.neural_networks.NetworkDOS', 'neural_networks.NetworkDOS', (['self.nb_stocks'], {'hidden_size': 'hidden_size'}), '(self.nb_stocks, hidden_size=hidden_size)\n', (2544, 2585), False, 'from optimal_stopping.algorithms.utils import neural_networks\n'), ((2883, 2923), 'torch.from_numpy', 'torch.from_numpy', (['discounted_next_values'], {}), '(discounted_next_values)\n', (2899, 2923), False, 'import torch\n'), ((2964, 3006), 'torch.from_numpy', 'torch.from_numpy', (['immediate_exercise_value'], {}), '(immediate_exercise_value)\n', (2980, 3006), False, 'import torch\n'), ((3057, 3081), 'torch.from_numpy', 'torch.from_numpy', (['inputs'], {}), '(inputs)\n', (3073, 3081), False, 'import torch\n'), ((3854, 3880), 'torch.from_numpy', 'torch.from_numpy', (['X_inputs'], {}), '(X_inputs)\n', (3870, 3880), False, 'import torch\n'), ((1642, 1751), 'numpy.zeros', 'np.zeros', (['(stock_values.shape[0], stock_values.shape[1], self.model.nb_dates + 1 -\n stock_values.shape[2])'], {}), '((stock_values.shape[0], stock_values.shape[1], self.model.nb_dates +\n 1 - stock_values.shape[2]))\n', (1650, 1751), True, 'import numpy as np\n'), ((3434, 3462), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(True)'], {}), '(True)\n', (3456, 3462), False, 'import torch\n')] |
import numpy as np
from typing import Sequence
import pyrado
from pyrado.utils.data_types import EnvSpec
from pyrado.tasks.base import Task
from pyrado.tasks.utils import never_succeeded
from pyrado.tasks.reward_functions import RewFcn
class EndlessFlippingTask(Task):
"""
Task class for flipping an object around one axis about a desired angle. Once the new angle is equal to the
old angle plus/minus a given angle delta, the new angle becomes the old one and the flipping continues.
"""
def __init__(self,
env_spec: EnvSpec,
rew_fcn: RewFcn,
init_angle: float,
des_angle_delta: float = np.pi/2.,
angle_tol: float = 1/180.*np.pi):
"""
Constructor
:param env_spec: environment specification of a simulated or real environment
:param rew_fcn: reward function, an instance of a subclass of RewFcn
:param init_angle: initial angle
:param des_angle_delta: desired angle that counts as a flip
:param angle_tol: tolerance
"""
if not isinstance(env_spec, EnvSpec):
raise pyrado.TypeErr(given=env_spec, expected_type=EnvSpec)
if not isinstance(rew_fcn, RewFcn):
raise pyrado.TypeErr(given=rew_fcn, expected_type=RewFcn)
self._env_spec = env_spec
self._rew_fcn = rew_fcn
self._init_angle = init_angle
self._last_angle = init_angle
self.des_angle_delta = des_angle_delta
self.angle_tol = angle_tol
self._held_rew = 0.
@property
def env_spec(self) -> EnvSpec:
return self._env_spec
@property
def rew_fcn(self) -> RewFcn:
return self._rew_fcn
@rew_fcn.setter
def rew_fcn(self, rew_fcn: RewFcn):
if not isinstance(rew_fcn, RewFcn):
raise pyrado.TypeErr(given=rew_fcn, expected_type=RewFcn)
self._rew_fcn = rew_fcn
def reset(self, env_spec: EnvSpec, init_angle: float = None, **kwargs):
"""
Reset the task.
:param env_spec: environment specification
:param init_angle: override initial angle
:param kwargs: keyword arguments forwarded to the reward function, e.g. the initial state
"""
# Update the environment specification at every reset of the environment since the spaces could change
self._env_spec = env_spec
# Reset the internal quantities to recognize the flips
self._last_angle = init_angle if init_angle is not None else self._init_angle
self._held_rew = 0.
# Some reward functions scale with the state and action bounds
self._rew_fcn.reset(state_space=env_spec.state_space, act_space=env_spec.act_space, **kwargs)
def step_rew(self, state: np.ndarray, act: np.ndarray, remaining_steps: int = None) -> float:
# We don't care about the flip direction or the number of revolutions.
des_angles_both = np.array([[self._last_angle + self.des_angle_delta],
[self._last_angle - self.des_angle_delta]])
err_state = des_angles_both - state
err_state = np.fmod(err_state, 2*np.pi) # map to [-2pi, 2pi]
# Choose the closer angle for the reward. Operate on state and action errors
rew = self._held_rew + self._rew_fcn(np.min(err_state, axis=0), -act, remaining_steps) # act_des = 0
# Check if the flip was successful
succ_idx = abs(err_state) <= self.angle_tol
if any(succ_idx):
# If successful, increase the permanent reward and memorize the achieved goal angle
self._last_angle = float(des_angles_both[succ_idx])
self._held_rew += self._rew_fcn(np.min(err_state, axis=0), -act, remaining_steps)
return rew
def has_succeeded(self, state: np.ndarray) -> bool:
return never_succeeded()
| [
"pyrado.tasks.utils.never_succeeded",
"numpy.array",
"numpy.fmod",
"numpy.min",
"pyrado.TypeErr"
] | [((2969, 3069), 'numpy.array', 'np.array', (['[[self._last_angle + self.des_angle_delta], [self._last_angle - self.\n des_angle_delta]]'], {}), '([[self._last_angle + self.des_angle_delta], [self._last_angle -\n self.des_angle_delta]])\n', (2977, 3069), True, 'import numpy as np\n'), ((3166, 3195), 'numpy.fmod', 'np.fmod', (['err_state', '(2 * np.pi)'], {}), '(err_state, 2 * np.pi)\n', (3173, 3195), True, 'import numpy as np\n'), ((3880, 3897), 'pyrado.tasks.utils.never_succeeded', 'never_succeeded', ([], {}), '()\n', (3895, 3897), False, 'from pyrado.tasks.utils import never_succeeded\n'), ((1158, 1211), 'pyrado.TypeErr', 'pyrado.TypeErr', ([], {'given': 'env_spec', 'expected_type': 'EnvSpec'}), '(given=env_spec, expected_type=EnvSpec)\n', (1172, 1211), False, 'import pyrado\n'), ((1274, 1325), 'pyrado.TypeErr', 'pyrado.TypeErr', ([], {'given': 'rew_fcn', 'expected_type': 'RewFcn'}), '(given=rew_fcn, expected_type=RewFcn)\n', (1288, 1325), False, 'import pyrado\n'), ((1859, 1910), 'pyrado.TypeErr', 'pyrado.TypeErr', ([], {'given': 'rew_fcn', 'expected_type': 'RewFcn'}), '(given=rew_fcn, expected_type=RewFcn)\n', (1873, 1910), False, 'import pyrado\n'), ((3347, 3372), 'numpy.min', 'np.min', (['err_state'], {'axis': '(0)'}), '(err_state, axis=0)\n', (3353, 3372), True, 'import numpy as np\n'), ((3738, 3763), 'numpy.min', 'np.min', (['err_state'], {'axis': '(0)'}), '(err_state, axis=0)\n', (3744, 3763), True, 'import numpy as np\n')] |
import os
import sys
import argparse
import numpy as np
from .validate_ic import validate_ic
__all__ = ["validation_pipeline"]
def validation_pipeline(cat_folder, visit_num, sne_SED_path):
"""
Parameters
----------
cat_folder is a string; the path to the directory containing the
phosim_NNNNN.txt catalog
visit_num is an int; the obsHistID of the pointing
sne_SED_path is a string; the path to the parent directory of
the Dynamic/ dir containing SNe SEDs
"""
filter_list = ['u', 'g', 'r', 'i', 'z', 'y']
print("Loading visit info")
with open(os.path.join(cat_folder, 'phosim_cat_%i.txt' % visit_num)) as f:
for line in f:
line_info = line.split(' ')
if line_info[0] == 'mjd':
visit_mjd = float(line_info[1])
elif line_info[0] == 'filter':
visit_band = filter_list[int(line_info[1])]
elif line_info[0] == 'vistime':
delta_t = float(line_info[1])/2.
# This converts the way phosim wants the time to opsim time
visit_mjd -= delta_t/86400.0
sne_SED_file_dir = 'Dynamic'
twinkles_data_dir = os.path.join(os.environ['TWINKLES_DIR'], 'data')
agn_cache_file_name = os.path.join(twinkles_data_dir,
'cosmoDC2_v1.1.4_agn_cache.csv')
sne_cache_file_name = os.path.join(twinkles_data_dir,
'cosmoDC2_v1.1.4_sne_cache.csv')
sprinkled_agn_data_name = os.path.join(twinkles_data_dir,
'cosmoDC2_v1.1.4_matched_AGN.fits')
sprinkled_sne_data_name = os.path.join(twinkles_data_dir,
'cosmoDC2_v1.1.4_sne_cat.csv')
print("Running tests")
val_cat = validate_ic(agn_cache_file=agn_cache_file_name,
sne_cache_file=sne_cache_file_name,
sprinkled_agn_data=sprinkled_agn_data_name,
sprinkled_sne_data=sprinkled_sne_data_name)
df_gal, df_pt_src = val_cat.load_cat(cat_folder, visit_num)
# Verify that the InstanceCatalog pipline ignored
# the Sersic components of rows corresponding to
# duplicate images of AGN (those rows would have
# galaxy_id == (galaxy_id+1.5e10)*100000 as per
# the uniqueId mangling scheme in the sprinkler)
df_gal_galaxy_id = df_gal['uniqueId'].values//1024
large_galaxy_id = df_gal_galaxy_id>1.0e11
if large_galaxy_id.any():
raise RuntimeError("Some galaxies that should have been "
"replaced by the sprinkler were not.")
# Make sure that none of the point sources have magNorm
# placeholders (999 or None) by the time they reach the
# InstanceCatalog
pt_src_magnorm = df_pt_src['phosimMagNorm'].values
invalid_magnorm = (pt_src_magnorm>900.0) | np.isnan(pt_src_magnorm)
if invalid_magnorm.any():
raise RuntimeError("Some point sources have invalid magNorms")
# run AGN tests
spr_agn = val_cat.process_sprinkled_agn(df_pt_src)
if len(spr_agn)>0:
agn_lens_gals = val_cat.process_agn_lenses(spr_agn, df_gal)
agn_location_test = val_cat.compare_agn_location(spr_agn, agn_lens_gals)
test_agn_inputs = val_cat.compare_agn_inputs(spr_agn, agn_lens_gals)
test_agn_lens_mags = val_cat.compare_agn_lens_mags(spr_agn,
agn_lens_gals,
visit_band)
test_agn_image_mags = val_cat.compare_agn_image_mags(spr_agn,
agn_lens_gals,
visit_mjd,
visit_band)
# run SNe tests
sne_lens_gals = val_cat.process_sne_lenses(df_gal)
if len(sne_lens_gals)>0:
spr_sne = val_cat.process_sprinkled_sne(df_pt_src, sne_SED_file_dir)
test_sne_image_mags = val_cat.compare_sne_image_mags(spr_sne,
sne_lens_gals,
visit_mjd,
visit_band)
sne_location_test = val_cat.compare_sne_location(spr_sne, sne_lens_gals)
test_sne_lens_inputs = val_cat.compare_sne_lens_inputs(sne_lens_gals)
test_sne_image_inputs = val_cat.compare_sne_image_inputs(spr_sne,
sne_lens_gals,
visit_mjd,
sne_SED_file_dir,
sne_SED_path)
test_sne_lens_mags = val_cat.compare_sne_lens_mags(sne_lens_gals,
visit_band)
| [
"os.path.join",
"numpy.isnan"
] | [((1163, 1211), 'os.path.join', 'os.path.join', (["os.environ['TWINKLES_DIR']", '"""data"""'], {}), "(os.environ['TWINKLES_DIR'], 'data')\n", (1175, 1211), False, 'import os\n'), ((1238, 1302), 'os.path.join', 'os.path.join', (['twinkles_data_dir', '"""cosmoDC2_v1.1.4_agn_cache.csv"""'], {}), "(twinkles_data_dir, 'cosmoDC2_v1.1.4_agn_cache.csv')\n", (1250, 1302), False, 'import os\n'), ((1368, 1432), 'os.path.join', 'os.path.join', (['twinkles_data_dir', '"""cosmoDC2_v1.1.4_sne_cache.csv"""'], {}), "(twinkles_data_dir, 'cosmoDC2_v1.1.4_sne_cache.csv')\n", (1380, 1432), False, 'import os\n'), ((1502, 1569), 'os.path.join', 'os.path.join', (['twinkles_data_dir', '"""cosmoDC2_v1.1.4_matched_AGN.fits"""'], {}), "(twinkles_data_dir, 'cosmoDC2_v1.1.4_matched_AGN.fits')\n", (1514, 1569), False, 'import os\n'), ((1643, 1705), 'os.path.join', 'os.path.join', (['twinkles_data_dir', '"""cosmoDC2_v1.1.4_sne_cat.csv"""'], {}), "(twinkles_data_dir, 'cosmoDC2_v1.1.4_sne_cat.csv')\n", (1655, 1705), False, 'import os\n'), ((2880, 2904), 'numpy.isnan', 'np.isnan', (['pt_src_magnorm'], {}), '(pt_src_magnorm)\n', (2888, 2904), True, 'import numpy as np\n'), ((596, 653), 'os.path.join', 'os.path.join', (['cat_folder', "('phosim_cat_%i.txt' % visit_num)"], {}), "(cat_folder, 'phosim_cat_%i.txt' % visit_num)\n", (608, 653), False, 'import os\n')] |
####################################################################
# Copyright (c) 2020 <NAME> #
# #
# This source code is licensed under the MIT license found in the #
# LICENSE file in the root directory of this source tree. #
####################################################################
import numpy, os
from typing import Tuple, List
from .c_core import CppCad2D, CppMeshDynTri2D, CppMesher_Cad2D, CppVoxelGrid, CppMapper, AABB3
from .c_core import CAD_EDGE_GEOM_LINE, CAD_EDGE_GEOM_BEZIER_CUBIC, CAD_EDGE_GEOM_BEZIER_QUADRATIC
from .c_core import cppCad2D_ImportSVG, cppSVG_Polyline
from .c_core import cad_getPointsEdge
from .c_core import TRI, QUAD, HEX, TET, LINE
'''
from .c_core import \
meshquad3d_voxelgrid, \
meshquad3d_subdiv, \
meshhex3d_voxelgrid, \
meshhex3d_subdiv,\
meshdyntri2d_initialize
from .c_core import meshtri3d_read_ply, meshtri3d_read_obj, meshtri3d_read_nastran, meshtri3d_write_obj
from .c_core import setXY_MeshDynTri2D
from .c_core import cad_getPointsEdge, cppJArray_MeshPsup, quality_meshTri2D
from .c_core import copyMeshDynTri2D
from .c_core import setTopology_ExtrudeTri2Tet
from .c_core import cppNormalVtx_Mesh, cppEdge_Mesh
'''
from .c_core import cppMvc
from .c_core import numpyXYTri_MeshDynTri2D
from .msh import MeshDynTri2D
####################
class Cad2D():
def __init__(self):
self.ccad = CppCad2D()
def draw(self) -> None:
self.ccad.draw()
def mouse(self,btn,action,mods,src,dir,view_height) -> None:
if btn == 0:
if action == 1:
self.ccad.pick(src[0],src[1],view_height)
def motion(self,src0,src1,dir) -> None:
self.ccad.drag_picked(src1[0],src1[1], src0[0],src0[1])
def minmax_xyz(self):
return self.ccad.minmax_xyz();
######
def clear(self) -> None:
"""clear all the cad elements"""
self.ccad.clear()
def pick(self, x, y, view_height) -> None:
self.ccad.pick(x,y,view_height)
def add_polygon(self,list_xy) -> None:
self.ccad.add_polygon(list_xy)
self.ccad.check()
def add_vtx_edge(self, iedge, pos:List[float]) -> None:
self.ccad.add_vtx_edge(pos[0],pos[1],iedge)
self.ccad.check()
def add_vtx_face(self, iface, pos:List[float]) -> None:
self.ccad.add_vtx_face(pos[0],pos[1],iface)
self.ccad.check()
def set_edge_type(self, iedge:int, type:int, param:List[float]):
self.ccad.set_edge_type(iedge,type,param)
def edge_type(self, iedge:int) -> int:
return self.ccad.edge_type(iedge)
def iedge_picked(self) -> int:
return self.ccad.iedge_picked
def ivtx_picked(self) -> int:
return self.ccad.ivtx_picked
def iface_picked(self) -> int:
return self.ccad.iface_picked
def clean_picked(self) -> None:
self.ccad.ivtx_picked = -1
self.ccad.iedge_picked = -1
self.ccad.iface_picked = -1
def points_edge(self, list_edge_index, np_xy, tolerance=0.01):
return cad_getPointsEdge(self.ccad,list_edge_index, np_xy, tolerance=tolerance)
def import_svg(self,path0:str,scale=(1.0,1.0)):
if not os.path.isfile(path0):
return false
cppCad2D_ImportSVG(self.ccad,path0,scale[0],scale[1])
self.ccad.check()
def export_svg(self,path0:str,scale=1.0):
list_xy = self.ccad.xy_vtxctrl_face(0)
str0 = cppSVG_Polyline(list_xy,scale)
with open(path0, mode='w') as f:
f.write(str0)
########################################################################################
class CadMesh2D(Cad2D):
def __init__(self,edge_length:float):
super().__init__()
self.ccad.is_draw_face = False
self.edge_length = edge_length
self.dmsh = MeshDynTri2D() # this object does not reallocate
self.map_cad2msh = None # this object reallocate
self.listW = list()
self.is_sync_mesh = True
self.mesher = Mesher_Cad2D(edge_length=edge_length)
def draw(self):
self.ccad.draw()
self.dmsh.draw()
def motion(self,src0,src1,dir):
self.drag_picked(src1[0],src1[1], src0[0],src0[1])
def minmax_xyz(self):
return self.dmsh.minmax_xyz()
#####
def drag_picked(self, s1x,s1y, s0x,s0y):
self.ccad.drag_picked(s1x,s1y, s0x,s0y)
if not self.is_sync_mesh:
return
assert len(self.listW) == self.ccad.nface()
for iface in range(self.ccad.nface()):
list_xy_bound = self.ccad.xy_vtxctrl_face(iface)
np_xy_bound = numpy.array(list_xy_bound).reshape([-1, 2])
np_pos_face = numpy.dot(self.listW[iface][1],np_xy_bound)
self.dmsh.np_pos[self.listW[iface][0]] = np_pos_face
self.dmsh.syncXY_from_npPos()
'''
max_asp,min_area = quality_meshTri2D(self.dmsh.np_pos,self.dmsh.np_elm)
if max_asp > 5.0 or min_area < 0.0:
self.remesh()
'''
def remesh(self):
self.mesher.meshing(self,self.dmsh)
####
self.listW.clear()
for iface in range(self.ccad.nface()):
npIndPoint_face = self.mesher.points_on_faces([iface],self)
npPosPoint_face = self.dmsh.np_pos[npIndPoint_face]
np_xy_bound = numpy.array(self.ccad.xy_vtxctrl_face(iface)).reshape([-1, 2])
W = cppMvc(npPosPoint_face, np_xy_bound)
assert W.shape[0] == npPosPoint_face.shape[0]
assert W.shape[1] == np_xy_bound.shape[0]
self.listW.append( [npIndPoint_face,W] )
assert len(self.listW) == self.ccad.nface()
def add_vtx_edge(self, iedge:int, pos:List[float]):
super().add_vtx_edge(iedge,[pos[0],pos[1]])
self.remesh()
# def add_polygon(self,list_xy):
# self.ccad.add_polygon(list_xy)
# def set_edge_type(self, iedge:int, type:int, param:List[float]):
# super().set_edge_type(iedge,type,param)
#####################################################
class Mesher_Cad2D():
def __init__(self,edge_length=0.01):
self.cmshr = CppMesher_Cad2D()
self.cmshr.edge_length = edge_length
def points_on_faces(self,list_iface:List[int],cad:Cad2D) -> numpy.ndarray:
list_points = self.cmshr.points_on_faces(list_iface,cad.ccad)
return numpy.array(list_points,dtype=numpy.int32)
def points_on_edges(self,list_iedge:List[int],cad:Cad2D) -> numpy.ndarray:
list_points = self.cmshr.points_on_edges(list_iedge,cad.ccad)
return numpy.array(list_points,dtype=numpy.int32)
def points_on_one_edge(self,iedge:int,is_endpoints:bool, cad:Cad2D) -> numpy.ndarray:
list_points = self.cmshr.points_on_one_edge(iedge,is_endpoints,cad.ccad)
return numpy.array(list_points,dtype=numpy.int32)
def meshing(self,cad:Cad2D,dmesh=None):
cdmsh = CppMeshDynTri2D()
self.cmshr.meshing(cdmsh,cad.ccad)
np_pos, np_elm = numpyXYTri_MeshDynTri2D(cdmsh)
if dmesh is None:
dmesh = MeshDynTri2D()
dmesh.cdmsh = cdmsh
dmesh.np_pos = np_pos
dmesh.np_elm = np_elm
dmesh.elem_type = TRI
return dmesh
| [
"os.path.isfile",
"numpy.array",
"numpy.dot"
] | [((6030, 6073), 'numpy.array', 'numpy.array', (['list_points'], {'dtype': 'numpy.int32'}), '(list_points, dtype=numpy.int32)\n', (6041, 6073), False, 'import numpy, os\n'), ((6228, 6271), 'numpy.array', 'numpy.array', (['list_points'], {'dtype': 'numpy.int32'}), '(list_points, dtype=numpy.int32)\n', (6239, 6271), False, 'import numpy, os\n'), ((6448, 6491), 'numpy.array', 'numpy.array', (['list_points'], {'dtype': 'numpy.int32'}), '(list_points, dtype=numpy.int32)\n', (6459, 6491), False, 'import numpy, os\n'), ((3124, 3145), 'os.path.isfile', 'os.path.isfile', (['path0'], {}), '(path0)\n', (3138, 3145), False, 'import numpy, os\n'), ((4489, 4533), 'numpy.dot', 'numpy.dot', (['self.listW[iface][1]', 'np_xy_bound'], {}), '(self.listW[iface][1], np_xy_bound)\n', (4498, 4533), False, 'import numpy, os\n'), ((4425, 4451), 'numpy.array', 'numpy.array', (['list_xy_bound'], {}), '(list_xy_bound)\n', (4436, 4451), False, 'import numpy, os\n')] |
import shapefile
# import finoa
import shapely
# import matplotlib
import numpy as np
import matplotlib.pyplot as plt
# import matplotlib.pyplot as plt
# import pandas as pd
from pyproj import Proj, transform
# import stateplane
__author__ = '<NAME>'
def distPL(Point, lineseg):
# input:Point(x,y);lineseg[(x,y),(x,y),(x,y)...]
# output:distance
xp, yp = Point[0], Point[1] # lat lon
distlist = []
for i in range(len(lineseg) - 1):
x1, y1 = lineseg[i][0], lineseg[i][1]
x2, y2 = lineseg[i + 1][0], lineseg[i + 1][1]
point = [x1, y1, x2, y2]
dist12 = [np.sqrt((xp - x1) ** 2 + (yp - y1) ** 2), np.sqrt((xp - x2) ** 2 + (yp - y2) ** 2)]
dist3 = (abs((y2 - y1) * xp - (x2 - x1) * yp + x2 * y1 - y2 * x1)) / (np.sqrt((y2 - y1) ** 2 + (x2 - x1) ** 2))
if x1 == x2:
if yp > min(y1, y2) and yp < max(y1, y2):
dist = dist3
else:
dist = np.min(dist12)
elif y1 == y2:
if xp > min(x1, x2) and xp < max(x1, x2):
dist = dist3
else:
dist = np.min(dist12)
else:
k = (y2 - y1) / (x2 - x1)
x0 = (k ** 2 * x1 + k * (yp - y1) + xp) / (k ** 2 + 1)
y0 = k * (x0 - x1) + y1
if x0 > min(x1, x2) and x0 < max(x1, x2) and y0 > min(y1, y2) and y0 < max(y1, y2):
dist = dist3
else:
dist = np.min(dist12)
distlist.append(dist)
return np.min(distlist)
def distPP(P1, P2):
distP = np.sqrt((P1[0] - P2[0]) ** 2 + (P1[1] - P2[1]) ** 2)
return distP
def LineInPointBox(i, startkeyuni, endkeyuni, bufferlat, bufferlon, road):
if min(endkeyuni[i][0][0], startkeyuni[i][0][0]) - bufferlon < road.shape.bbox[0] and \
max(endkeyuni[i][0][0], startkeyuni[i][0][0]) + bufferlon > road.shape.bbox[0] and \
min(endkeyuni[i][0][1], startkeyuni[i][0][1]) - bufferlat < road.shape.bbox[1] and \
max(endkeyuni[i][0][1], startkeyuni[i][0][1]) + bufferlat > road.shape.bbox[1]:
return True
elif min(endkeyuni[i][0][0], startkeyuni[i][0][0]) - bufferlon < road.shape.bbox[2] and \
max(endkeyuni[i][0][0], startkeyuni[i][0][0]) + bufferlon > road.shape.bbox[2] and \
min(endkeyuni[i][0][1], startkeyuni[i][0][1]) - bufferlat < road.shape.bbox[3] and \
max(endkeyuni[i][0][1], startkeyuni[i][0][1]) + bufferlat > road.shape.bbox[3]:
return True
else:
return False
def PointBox(i, startkeyuni, endkeyuni, bufferlat, bufferlon, road):
if road.shape.bbox[0] - bufferlon < startkeyuni[i][0][0] and road.shape.bbox[2] + bufferlon > startkeyuni[i][0][0] \
and road.shape.bbox[1] - bufferlat < startkeyuni[i][0][1] and road.shape.bbox[3] + bufferlat > \
startkeyuni[i][0][1]:
return True
elif road.shape.bbox[0] - bufferlon < endkeyuni[i][0][0] and road.shape.bbox[2] + bufferlon > endkeyuni[i][0][0] \
and road.shape.bbox[1] - bufferlat < endkeyuni[i][0][1] and road.shape.bbox[3] + bufferlat > \
endkeyuni[i][0][1]:
return True
elif min(endkeyuni[i][0][0], startkeyuni[i][0][0]) - bufferlon < road.shape.bbox[0] and \
max(endkeyuni[i][0][0], startkeyuni[i][0][0]) + bufferlon > road.shape.bbox[0] and \
min(endkeyuni[i][0][1], startkeyuni[i][0][1]) - bufferlat < road.shape.bbox[1] and \
max(endkeyuni[i][0][1], startkeyuni[i][0][1]) + bufferlat > road.shape.bbox[1]:
return True
elif min(endkeyuni[i][0][0], startkeyuni[i][0][0]) - bufferlon < road.shape.bbox[2] and \
max(endkeyuni[i][0][0], startkeyuni[i][0][0]) + bufferlon > road.shape.bbox[2] and \
min(endkeyuni[i][0][1], startkeyuni[i][0][1]) - bufferlat < road.shape.bbox[3] and \
max(endkeyuni[i][0][1], startkeyuni[i][0][1]) + bufferlat > road.shape.bbox[3]:
return True
else:
return False
def PointInLineBox(i, startkeyuni, bufferlat, bufferlon, roadshapebbox):
if roadshapebbox[0] - bufferlon < startkeyuni[i][0][0] and roadshapebbox[2] + bufferlon > startkeyuni[i][0][0] \
and roadshapebbox[1] - bufferlat < startkeyuni[i][0][1] and roadshapebbox[3] + bufferlat > \
startkeyuni[i][0][1]:
return True
else:
return False
def DrawKeyPoint(i, road_shaperecords, endkeyuni, startkeyuni, wz_start, wz_end, wz_route, bufferlat, bufferlon):
plt.figure(1)
# plt.subplot(211)
for road in road_shaperecords:
if road.record[0] == str(wz_route[i]).zfill(4):
shape3 = road
shape_ex = shape3.shape
x_lon = []
y_lat = []
for ip in range(len(shape_ex.points)):
lat1 = shape_ex.points[ip][1]
lon1 = shape_ex.points[ip][0]
x_lon.append(lon1)
y_lat.append(lat1)
# print(x_lon,y_lat)
# plt.plot(x_lon,y_lat)
ori_wz_end_lat = wz_end["y_end"][i]
ori_wz_end_lon = wz_end["x_end"][i]
# print(case_wz_end_lat, case_wz_end_lon)
# plt.plot(ori_wz_end_lon,ori_wz_end_lat,'og',label ='Original End')
ori_wz_start_lat = wz_start["y_bgn"][i]
ori_wz_start_lon = wz_start["x_bgn"][i]
# print(case_wz_end_lat, case_wz_end_lon)
# plt.plot(ori_wz_start_lon,ori_wz_start_lat,'ok',label ='Original Start')
case_wz_end_lat = endkeyuni[i][0][1]
case_wz_end_lon = endkeyuni[i][0][0]
# print(case_wz_end_lat, case_wz_end_lon)
# plt.plot(case_wz_end_lon,case_wz_end_lat,'*b',label ='Matched End')
case_wz_start_lat = startkeyuni[i][0][1]
case_wz_start_lon = startkeyuni[i][0][0]
# print(case_wz_start_lat, case_wz_start_lon)
# plt.plot(case_wz_start_lon,case_wz_start_lat,'*r',label ='Matched Start')
plt.legend(loc='upper right')
plt.title('Big Map of Work Zone-' + str(i) + " on State Plane")
plt.savefig("./CMU_rcrs_all_events_08-2015_04-2017/Figures/BigMapStatePlane_" + str(i) + ".png")
plt.figure(2)
# plt.subplot(212)
for road in road_shaperecords:
if road.record[0] == str(wz_route[i]).zfill(4) and \
(LineInPointBox(i, startkeyuni, endkeyuni, 1e-2, 1e-2, road) or \
PointInLineBox(i, startkeyuni, bufferlat, bufferlon, road.shape.bbox) or \
PointInLineBox(i, endkeyuni, bufferlat, bufferlon, road.shape.bbox)):
shape3 = road
shape_ex = shape3.shape
x_lon = []
y_lat = []
for ip in range(len(shape_ex.points)):
lat1 = shape_ex.points[ip][1]
lon1 = shape_ex.points[ip][0]
x_lon.append(lon1)
y_lat.append(lat1)
# print(x_lon,y_lat)
# plt.plot(x_lon,y_lat)
ori_wz_end_lat = wz_end["y_end"][i]
ori_wz_end_lon = wz_end["x_end"][i]
# print(case_wz_end_lat, case_wz_end_lon)
# plt.plot(ori_wz_end_lon,ori_wz_end_lat,'og',label ='Original End')
ori_wz_start_lat = wz_start["y_bgn"][i]
ori_wz_start_lon = wz_start["x_bgn"][i]
# print(case_wz_end_lat, case_wz_end_lon)
# plt.plot(ori_wz_start_lon,ori_wz_start_lat,'ok',label ='Original Start')
case_wz_end_lat = endkeyuni[i][0][1]
case_wz_end_lon = endkeyuni[i][0][0]
# print(case_wz_end_lat, case_wz_end_lon)
# plt.plot(case_wz_end_lon,case_wz_end_lat,'*b',label ='Matched End')
case_wz_start_lat = startkeyuni[i][0][1]
case_wz_start_lon = startkeyuni[i][0][0]
# print(case_wz_start_lat, case_wz_start_lon)
# plt.plot(case_wz_start_lon,case_wz_start_lat,'*r',label ='Matched Start')
plt.legend(loc='upper right')
plt.title('Detailed Map of Work Zone-' + str(i) + " on State Plane")
plt.savefig("./CMU_rcrs_all_events_08-2015_04-2017/Figures/DetailedMapStatePlane_" + str(i) + ".png")
plt.show()
def road_direction(wz_direction, startkeyuni_i, road_point_j1):
wzx, wzy = startkeyuni_i[0][0], startkeyuni_i[0][1]
roadx, roady = road_point_j1[0], road_point_j1[1]
if wz_direction == "NORTH":
if roadx > wzx:
return True
else:
return False
elif wz_direction == "SOUTH":
if roadx < wzx:
return True
else:
return False
elif wz_direction == "EAST":
if roady < wzy:
return True
else:
return False
elif wz_direction == "WEST":
if roady > wzy:
return True
else:
return False
# significant error!!!!!!!!!!!!!!!!!!!!!!!!!!!
def drawline(i):
shapetest = shapefile.Reader("./CMU_rcrs_all_events_08-2015_04-2017/shapefile/SplittedLine_WZ" + str(i) + ".shp")
shapetest_sr = shapetest.shapeRecords()
# print(len(shapetest_sr))
plt.figure(1)
for road in shapetest_sr:
x_lon = []
y_lat = []
for ip in range(len(road.shape.points)):
# lat1, lon1 =transform(inProj,outProj,shape_ex.points[ip][0],shape_ex.points[ip][1])
x_lon.append(road.shape.points[ip][0])
y_lat.append(road.shape.points[ip][1])
# print(x_lon)
# plt.plot(x_lon,y_lat)
case_wz_end_lon, case_wz_end_lat = endkeyuni[i][0][0], endkeyuni[i][0][1]
# print(case_wz_end_lat, case_wz_end_lon)
# plt.plot(case_wz_end_lon,case_wz_end_lat,'*b',label ='Matched End')
case_wz_start_lon, case_wz_start_lat = startkeyuni[i][0][0], startkeyuni[i][0][1]
# print(case_wz_start_lat, case_wz_start_lon)
# plt.plot(case_wz_start_lon,case_wz_start_lat,'*r',label ='Matched Start')
plt.legend(loc='upper right')
plt.title('Splitted of Work Zone-' + str(i) + " on State South")
plt.savefig("./CMU_rcrs_all_events_08-2015_04-2017/Figures/lineMapStateSouth_" + str(i) + ".png")
def callinelength5(i):
shapetest = shapefile.Reader("./CMU_rcrs_all_events_08-2015_04-2017/shapefile5/SplittedLine_WZ" + str(i) + ".shp")
shapetest_sr = shapetest.shapeRecords()
length = 0
for road in shapetest_sr:
for ip in range(len(road.shape.points) - 1):
lon1, lat1 = road.shape.points[ip][0], road.shape.points[ip][1]
lon2, lat2 = road.shape.points[ip + 1][0], road.shape.points[ip + 1][1]
length = length + np.sqrt((lon1 - lon2) ** 2 + (lat1 - lat2) ** 2)
return length
def callinelength2019(i, loc="./CMU_rcrs_all_events_08-2015_04-2017/shapefile5/SplittedLine_WZ"):
shapetest = shapefile.Reader(loc + str(i) + ".shp")
shapetest_sr = shapetest.shapeRecords()
length = 0
for road in shapetest_sr:
for ip in range(len(road.shape.points) - 1):
lon1, lat1 = road.shape.points[ip][0], road.shape.points[ip][1]
lon2, lat2 = road.shape.points[ip + 1][0], road.shape.points[ip + 1][1]
length = length + np.sqrt((lon1 - lon2) ** 2 + (lat1 - lat2) ** 2)
return length
def direction_match(wzdiri, roaddiri):
if str(wzdiri)[0:1] == roaddiri:
return True
elif (str(wzdiri)[0:1] == 'B') and (roaddiri == 'O'):
return True
elif roaddiri == 'B':
return True
else:
return False
def FindSplittedLine5_bi(i, roads, road_shaperecords, endkeyuni, startkeyuni, wz_route, bufferlat, bufferlon,
workzone_DIRECTION):
w = shapefile.Writer()
w.fields = roads.fields[1:]
records = []
pointsparts = []
k1 = []
k2 = []
collection = set()
smark = 0
emark = 0
scountlist = 0
ecountlist = 0
for road in road_shaperecords:
if (road.record[0] == str(wz_route[i]).zfill(4)) and (direction_match(workzone_DIRECTION[i], road.record[10])):
if PointInLineBox(i, startkeyuni, bufferlat, bufferlon, road.shape.bbox):
for j in range(len(road.shape.points) - 1):
vetice = [[road.shape.points[j][0], road.shape.points[j][1]],
[road.shape.points[j + 1][0], road.shape.points[j + 1][1]]]
scount = distPL(startkeyuni[i][0], vetice) < 10 and distPL(endkeyuni[i][0], vetice) < distPP(
endkeyuni[i][0], startkeyuni[i][0])
scountlist = scountlist + scount
if PointInLineBox(i, endkeyuni, bufferlat, bufferlon, road.shape.bbox):
for j in range(len(road.shape.points) - 1):
vetice = [[road.shape.points[j][0], road.shape.points[j][1]],
[road.shape.points[j + 1][0], road.shape.points[j + 1][1]]]
ecount = distPL(endkeyuni[i][0], vetice) < 10 and distPL(startkeyuni[i][0], vetice) < distPP(
endkeyuni[i][0], startkeyuni[i][0])
ecountlist = ecountlist + ecount
for road in road_shaperecords:
if (road.record[0] == str(wz_route[i]).zfill(4)) and (direction_match(workzone_DIRECTION[i], road.record[10])):
collection.add(road)
if PointInLineBox(i, startkeyuni, bufferlat, bufferlon, road.shape.bbox):
for j in range(len(road.shape.points) - 1):
subpart = []
vetice = [[road.shape.points[j][0], road.shape.points[j][1]],
[road.shape.points[j + 1][0], road.shape.points[j + 1][1]]]
if distPL(startkeyuni[i][0], vetice) < 10 and distPL(endkeyuni[i][0], vetice) < distPP(
endkeyuni[i][0], startkeyuni[i][0]):
smark = smark + 1
cosup = distPP(endkeyuni[i][0], [road.shape.points[j][0], \
road.shape.points[j][1]]) - distPP(startkeyuni[i][0], [
road.shape.points[j + 1][0], \
road.shape.points[j + 1][1]])
if cosup > 0:
if smark <= 1:
srecord = [road.record]
spoints = []
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k1 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(startkeyuni[i][0], k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove((road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
spoints.remove((road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
spoints.remove((road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
spoints.append(k1)
break
# print("k1 ori = "+str(k1))
collection.remove(road)
else:
if scountlist < 3:
if distPP(endkeyuni[i][0], [road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][
1]]) < distPP(endkeyuni[i][0], k1):
srecord = [road.record]
spoints = []
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k1 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(startkeyuni[i][0],
k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
spoints.append(k1)
break
# print("scountlist ==1,k1="+str(k1))
collection.remove(road)
else:
if road_direction(workzone_DIRECTION[i], startkeyuni[i], road.shape.points[j + 1]):
srecord = [road.record]
spoints = []
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k1 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(startkeyuni[i][0],
k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
spoints.append(k1)
break
# print(scountlist,smark)
# print("road_direction,k1="+str(k1))
# print(road.record)
collection.remove(road)
else:
if smark <= 1:
srecord = [road.record]
spoints = []
for jm in range(j + 1):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
k1 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(startkeyuni[i][0], k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(j):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
spoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
spoints.insert(0, k1)
break
collection.remove(road)
else:
if scountlist == 1:
if distPP(endkeyuni[i][0], [road.shape.points[0][0], \
road.shape.points[0][1]]) < distPP(endkeyuni[i][0], k1):
srecord = [road.record]
spoints = []
for jm in range(j):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
k1 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(startkeyuni[i][0],
k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(j + 1):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
spoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
spoints.insert(0, k1)
break
collection.remove(road)
else:
if road_direction(workzone_DIRECTION[i], startkeyuni[i], road.shape.points[j]):
srecord = [road.record]
spoints = []
k1 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
for jm in range(j):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(startkeyuni[i][0],
k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(j):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
spoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
spoints.insert(0, k1)
break
collection.remove(road)
else:
continue
elif PointInLineBox(i, endkeyuni, bufferlat, bufferlon, road.shape.bbox):
# print('a')
for j in range(len(road.shape.points) - 1):
subpart = []
vetice = [[road.shape.points[j][0], road.shape.points[j][1]],
[road.shape.points[j + 1][0], road.shape.points[j + 1][1]]]
if distPL(endkeyuni[i][0], vetice) < 10 and distPL(startkeyuni[i][0], vetice) < distPP(
endkeyuni[i][0], startkeyuni[i][0]):
emark = emark + 1
cosup = distPP(startkeyuni[i][0], [road.shape.points[j][0], \
road.shape.points[j][1]]) - distPP(startkeyuni[i][0], [
road.shape.points[j + 1][0], \
road.shape.points[j + 1][1]])
if cosup > 0:
# print('d')
if emark <= 1:
erecord = [road.record]
epoints = []
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k2 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0], k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove((road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
epoints.remove((road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
epoints.remove((road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
epoints.append(k2)
break
# print("cosup>0emark<=1")
# print(k2)
try:
collection.remove(road)
except:
pass
else:
if ecountlist == 1:
if distPP(startkeyuni[i][0], [road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][
1]]) < distPP(startkeyuni[i][0], k2):
erecord = [road.record]
epoints = []
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k2 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0], k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
epoints.append(k2)
break
try:
collection.remove(road)
except:
pass
else:
if road_direction(workzone_DIRECTION[i], endkeyuni[i], road.shape.points[j + 1]):
erecord = [road.record]
epoints = []
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k2 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0], k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
epoints.append(k2)
break
# print(k2)
try:
collection.remove(road)
except:
pass
else:
# print("e")
# print(cosup)
# print(j)
if emark <= 1:
# print(emark)
# print("emark<=1")
erecord = [road.record]
epoints = []
for jm in range(j + 1):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
k2 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0], k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(j):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
epoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
epoints.insert(0, k2)
break
# print(k2)
try:
collection.remove(road)
except:
pass
else:
if ecountlist == 1:
if distPP(startkeyuni[i][0], [road.shape.points[0][0], \
road.shape.points[0][1]]) < distPP(startkeyuni[i][0],
k2):
erecord = [road.record]
epoints = []
k2 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
for jm in range(j + 1):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0], k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(j):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
epoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
epoints.insert(0, k2)
break
# print("ecountlist=2")
# print(k2)
try:
collection.remove(road)
except:
pass
else:
if road_direction(workzone_DIRECTION[i], endkeyuni[i], road.shape.points[j]):
# print("f")
erecord = [road.record]
epoints = []
for jm in range(j + 1):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
k2 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0], k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(j):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
epoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
epoints.insert(0, k2)
break
# print(k2)
try:
collection.remove(road)
except:
pass
else:
continue
# handle collection, delete the odd/even segnumber that is different from
# print('k2='+str(k2))
# print('k1='+str(k1))
# print("scountlist,ecountlist,smark,emark")
# print(scountlist,ecountlist,smark,emark)
records.append(srecord)
pointsparts.append([spoints])
try:
records.append(erecord)
pointsparts.append([epoints])
except:
pass
itercount = 0
while (k1 == []) and itercount < 10:
bufferlat, bufferlon = bufferlat * 5, bufferlon * 5
records = []
pointsparts = []
k1 = []
k2 = []
collection = set()
smark = 0
emark = 0
scountlist = 0
ecountlist = 0
for road in road_shaperecords:
if (road.record[0] == str(wz_route[i]).zfill(4)) and (
direction_match(workzone_DIRECTION[i], road.record[10])):
if PointInLineBox(i, startkeyuni, bufferlat, bufferlon, road.shape.bbox):
for j in range(len(road.shape.points) - 1):
vetice = [[road.shape.points[j][0], road.shape.points[j][1]],
[road.shape.points[j + 1][0], road.shape.points[j + 1][1]]]
scount = distPL(startkeyuni[i][0], vetice) < 10 and distPL(endkeyuni[i][0], vetice) < distPP(
endkeyuni[i][0], startkeyuni[i][0])
scountlist = scountlist + scount
if PointInLineBox(i, endkeyuni, bufferlat, bufferlon, road.shape.bbox):
for j in range(len(road.shape.points) - 1):
vetice = [[road.shape.points[j][0], road.shape.points[j][1]],
[road.shape.points[j + 1][0], road.shape.points[j + 1][1]]]
ecount = distPL(endkeyuni[i][0], vetice) < 10 and distPL(startkeyuni[i][0], vetice) < distPP(
endkeyuni[i][0], startkeyuni[i][0])
ecountlist = ecountlist + ecount
for road in road_shaperecords:
if (road.record[0] == str(wz_route[i]).zfill(4)) and (
direction_match(workzone_DIRECTION[i], road.record[10])):
collection.add(road)
if PointInLineBox(i, startkeyuni, bufferlat, bufferlon, road.shape.bbox):
for j in range(len(road.shape.points) - 1):
subpart = []
vetice = [[road.shape.points[j][0], road.shape.points[j][1]],
[road.shape.points[j + 1][0], road.shape.points[j + 1][1]]]
if distPL(startkeyuni[i][0], vetice) < 10:
smark = smark + 1
cosup = distPP(endkeyuni[i][0], [road.shape.points[j][0], \
road.shape.points[j][1]]) - distPP(startkeyuni[i][0], [
road.shape.points[j + 1][0], \
road.shape.points[j + 1][1]])
if cosup > 0:
if smark <= 1:
srecord = [road.record]
spoints = []
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k1 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(startkeyuni[i][0], k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove((road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
spoints.remove((road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
spoints.remove((road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
spoints.append(k1)
break
collection.remove(road)
else:
if scountlist == 1:
if distPP(endkeyuni[i][0], [road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][
1]]) < distPP(endkeyuni[i][0], k1):
srecord = [road.record]
spoints = []
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k1 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(
startkeyuni[i][0], k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][
1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][
1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
spoints.append(k1)
break
collection.remove(road)
else:
if road_direction(workzone_DIRECTION[i], startkeyuni[i],
road.shape.points[j + 1]):
srecord = [road.record]
spoints = []
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k1 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(
startkeyuni[i][0], k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][
1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][
1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
spoints.append(k1)
break
collection.remove(road)
else:
if smark <= 1:
srecord = [road.record]
spoints = []
for jm in range(j + 1):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
k1 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(startkeyuni[i][0], k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(j):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
spoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
spoints.insert(0, k1)
break
collection.remove(road)
else:
if scountlist == 1:
if distPP(endkeyuni[i][0], [road.shape.points[0][0], \
road.shape.points[0][1]]) < distPP(endkeyuni[i][0],
k1):
srecord = [road.record]
spoints = []
for jm in range(j):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
k1 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(
startkeyuni[i][0], k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(j + 1):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
spoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
spoints.insert(0, k1)
break
collection.remove(road)
else:
if road_direction(workzone_DIRECTION[i], startkeyuni[i], road.shape.points[j]):
srecord = [road.record]
spoints = []
k1 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
for jm in range(j):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(
startkeyuni[i][0], k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(j):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
spoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
spoints.insert(0, k1)
break
collection.remove(road)
else:
continue
elif PointInLineBox(i, endkeyuni, bufferlat, bufferlon, road.shape.bbox):
# print('a')
for j in range(len(road.shape.points) - 1):
subpart = []
vetice = [[road.shape.points[j][0], road.shape.points[j][1]],
[road.shape.points[j + 1][0], road.shape.points[j + 1][1]]]
if distPL(endkeyuni[i][0], vetice) < 10:
emark = emark + 1
cosup = distPP(startkeyuni[i][0], [road.shape.points[j][0], \
road.shape.points[j][1]]) - distPP(startkeyuni[i][0], [
road.shape.points[j + 1][0], \
road.shape.points[j + 1][1]])
if cosup > 0:
# print('d')
if emark <= 1:
erecord = [road.record]
epoints = []
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k2 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0], k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove((road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
epoints.remove((road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
epoints.remove((road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
epoints.append(k2)
break
# print("cosup>0emark<=1")
# print(k2)
try:
collection.remove(road)
except:
pass
else:
if ecountlist == 1:
if distPP(startkeyuni[i][0], [road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][
1]]) < distPP(startkeyuni[i][0], k2):
erecord = [road.record]
epoints = []
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k2 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0],
k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][
1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][
1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
epoints.append(k2)
break
try:
collection.remove(road)
except:
pass
else:
if road_direction(workzone_DIRECTION[i], endkeyuni[i],
road.shape.points[j + 1]):
erecord = [road.record]
epoints = []
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k2 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0],
k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][
1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][
1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
epoints.append(k2)
break
# print(k2)
try:
collection.remove(road)
except:
pass
else:
# print("e")
# print(cosup)
# print(j)
if emark <= 1:
# print(emark)
# print("emark<=1")
erecord = [road.record]
epoints = []
for jm in range(j + 1):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
k2 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0], k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(j):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
epoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
epoints.insert(0, k2)
break
# print(k2)
try:
collection.remove(road)
except:
pass
else:
if ecountlist == 1:
if distPP(startkeyuni[i][0], [road.shape.points[0][0], \
road.shape.points[0][1]]) < distPP(
startkeyuni[i][0], k2):
erecord = [road.record]
epoints = []
k2 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
for jm in range(j + 1):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0],
k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(j):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
epoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
epoints.insert(0, k2)
break
# print("ecountlist=2")
# print(k2)
try:
collection.remove(road)
except:
pass
else:
if road_direction(workzone_DIRECTION[i], endkeyuni[i], road.shape.points[j]):
# print("f")
erecord = [road.record]
epoints = []
for jm in range(j + 1):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
k2 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0],
k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(j):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
epoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
epoints.insert(0, k2)
break
# print(k2)
try:
collection.remove(road)
except:
pass
else:
continue
itercount = itercount + 1
# print(itercount)
# print(k1,k2)
k1_latlon = k1[1], k1[0]
try:
k2_latlon = k2[1], k2[0]
except:
k2_latlon = startkeyuni[i][0][1], startkeyuni[i][0][0]
case_wz_end_lat, case_wz_end_lon = endkeyuni[i][0][1], endkeyuni[i][0][0]
# print(case_wz_end_lat, case_wz_end_lon)
# plt.figure(i)
# plt.plot(case_wz_end_lon,case_wz_end_lat,'*b',label ='Matched End')
case_wz_start_lat, case_wz_start_lon = startkeyuni[i][0][1], startkeyuni[i][0][0]
# print(case_wz_start_lat, case_wz_start_lon)
# plt.plot(case_wz_start_lon,case_wz_start_lat,'*r',label ='Matched Start')
# plt.plot(k1_latlon[1],k1_latlon[0],'ob',label ='k1')
# plt.plot(k2_latlon[1],k2_latlon[0],'og',label ='k2')
iter_count = 1
k12_j = distPP(k1_latlon, k2_latlon) > 5 and distPP(k1, endkeyuni[i][0]) > 1 and distPP(k2, startkeyuni[i][0]) > 1
while k12_j and iter_count < 100:
collection2 = collection.copy()
for road in collection:
if distPP(k1_latlon, [road.shape.points[0][1], road.shape.points[0][0]]) < 1:
records.append([road.record])
pointsparts.append([road.shape.points])
k1_latlon = road.shape.points[len(road.shape.points) - 1][1], \
road.shape.points[len(road.shape.points) - 1][0]
# plt.plot(k1_latlon[1],k1_latlon[0],'ob',label ='k1')
collection2.remove(road)
# print('l1')
break
k1 = k1_latlon[1], k1_latlon[0]
k2 = k2_latlon[1], k2_latlon[0]
# print("k1="+str(k1)+", k2="+str(k2))
k12_j = distPP(k1_latlon, k2_latlon) > 5 and distPP(k1, endkeyuni[i][0]) > 1 and distPP(k2,
startkeyuni[i][0]) > 1
if k12_j == False:
break
collection3 = collection2.copy()
for road in collection2:
if distPP(k2_latlon, [road.shape.points[0][1], road.shape.points[0][0]]) < 1:
records.append([road.record])
pointsparts.append([road.shape.points])
k2_latlon = road.shape.points[len(road.shape.points) - 1][1], \
road.shape.points[len(road.shape.points) - 1][0]
# plt.plot(k2_latlon[1],k2_latlon[0],'or',label ='k2')
collection3.remove(road)
# print('l2')
break
k1 = k1_latlon[1], k1_latlon[0]
k2 = k2_latlon[1], k2_latlon[0]
# print("k1="+str(k1)+", k2="+str(k2))
k12_j = distPP(k1_latlon, k2_latlon) > 5 and distPP(k1, endkeyuni[i][0]) > 1 and distPP(k2,
startkeyuni[i][0]) > 1
if k12_j == False:
break
collection4 = collection3.copy()
for road in collection3:
if distPP(k1_latlon, [road.shape.points[len(road.shape.points) - 1][1],
road.shape.points[len(road.shape.points) - 1][0]]) < 1:
records.append([road.record])
pointsparts.append([road.shape.points])
k1_latlon = road.shape.points[0][1], road.shape.points[0][0]
# plt.plot(k1_latlon[1],k1_latlon[0],'ob',label ='k1')
collection4.remove(road)
# print('l3')
break
k1 = k1_latlon[1], k1_latlon[0]
k2 = k2_latlon[1], k2_latlon[0]
# print("k1="+str(k1)+", k2="+str(k2))
k12_j = distPP(k1_latlon, k2_latlon) > 5 and distPP(k1, endkeyuni[i][0]) > 1 and distPP(k2,
startkeyuni[i][0]) > 1
if k12_j == False:
break
collection5 = collection4.copy()
for road in collection4:
if distPP(k2_latlon, [road.shape.points[len(road.shape.points) - 1][1],
road.shape.points[len(road.shape.points) - 1][0]]) < 1:
records.append([road.record])
pointsparts.append([road.shape.points])
k2_latlon = road.shape.points[0][1], road.shape.points[0][0]
# plt.plot(k2_latlon[1],k2_latlon[0],'or',label ='k2')
collection5.remove(road)
# print('l4')
break
iter_count = iter_count + 1
k1 = k1_latlon[1], k1_latlon[0]
k2 = k2_latlon[1], k2_latlon[0]
# print("k1="+str(k1)+", k2="+str(k2))
k12_j = distPP(k1_latlon, k2_latlon) > 5 and distPP(k1, endkeyuni[i][0]) > 5 and distPP(k2,
startkeyuni[i][0]) > 5
if k12_j == False:
break
collection = collection5.copy()
# print(iter_count)
# plt.legend(loc='upper right')
# elif LineInPointBox(i,startkeyuni,endkeyuni,1e-2,1e-2,road):
# pointsparts.append(road.shape.points)
# records.append(road.record)
# print(len(pointsparts))
# print(len(records))
for idex in range(len(pointsparts)):
# print(pointsparts[idex])
# print(records[idex])
# print(len(records[idex][0]))
w.line(parts=pointsparts[idex])
w.record(*records[idex][0])
w.null()
w.save('CMU_rcrs_all_events_08-2015_04-2017/shapefile5_bi/SplittedLine_WZ' + str(i))
def FindSplittedLine6_5(i, roads, road_shaperecords, endkeyuni, startkeyuni, wz_route, bufferlat, bufferlon,
workzone_DIRECTION):
w = shapefile.Writer()
w.fields = roads.fields[1:]
records = []
pointsparts = []
k1 = []
k2 = []
collection = set()
smark = 0
emark = 0
scountlist = 0
ecountlist = 0
for road in road_shaperecords:
if road.record[0] == str(wz_route[i]).zfill(4):
if PointInLineBox(i, startkeyuni, bufferlat, bufferlon, road.shape.bbox):
for j in range(len(road.shape.points) - 1):
vetice = [[road.shape.points[j][0], road.shape.points[j][1]],
[road.shape.points[j + 1][0], road.shape.points[j + 1][1]]]
scount = distPL(startkeyuni[i][0], vetice) < 1 and distPL(endkeyuni[i][0], vetice) < distPP(
endkeyuni[i][0], startkeyuni[i][0])
scountlist = scountlist + scount
if PointInLineBox(i, endkeyuni, bufferlat, bufferlon, road.shape.bbox):
for j in range(len(road.shape.points) - 1):
vetice = [[road.shape.points[j][0], road.shape.points[j][1]],
[road.shape.points[j + 1][0], road.shape.points[j + 1][1]]]
ecount = distPL(endkeyuni[i][0], vetice) < 1 and distPL(startkeyuni[i][0], vetice) < distPP(
endkeyuni[i][0], startkeyuni[i][0])
ecountlist = ecountlist + ecount
for road in road_shaperecords:
if road.record[0] == str(wz_route[i]).zfill(4):
collection.add(road)
if PointInLineBox(i, startkeyuni, bufferlat, bufferlon, road.shape.bbox):
for j in range(len(road.shape.points) - 1):
subpart = []
vetice = [[road.shape.points[j][0], road.shape.points[j][1]],
[road.shape.points[j + 1][0], road.shape.points[j + 1][1]]]
if distPL(startkeyuni[i][0], vetice) < 1 and distPL(endkeyuni[i][0], vetice) < distPP(
endkeyuni[i][0], startkeyuni[i][0]):
smark = smark + 1
cosup = distPP(endkeyuni[i][0], [road.shape.points[j][0], \
road.shape.points[j][1]]) - distPP(startkeyuni[i][0], [
road.shape.points[j + 1][0], \
road.shape.points[j + 1][1]])
if cosup > 0:
if smark <= 1:
srecord = [road.record]
spoints = []
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k1 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(startkeyuni[i][0], k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove((road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
spoints.remove((road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
spoints.remove((road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
spoints.append(k1)
break
# print("k1 ori = "+str(k1))
collection.remove(road)
else:
if scountlist < 3:
if distPP(endkeyuni[i][0], [road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][
1]]) < distPP(endkeyuni[i][0], k1):
srecord = [road.record]
spoints = []
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k1 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(startkeyuni[i][0],
k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
spoints.append(k1)
break
# print("scountlist ==1,k1="+str(k1))
collection.remove(road)
else:
if road_direction(workzone_DIRECTION[i], startkeyuni[i], road.shape.points[j + 1]):
srecord = [road.record]
spoints = []
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k1 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(startkeyuni[i][0],
k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
spoints.append(k1)
break
print(scountlist)
print(smark)
print("road_direction,k1=" + str(k1))
collection.remove(road)
else:
if smark <= 1:
srecord = [road.record]
spoints = []
for jm in range(j + 1):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
k1 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(startkeyuni[i][0], k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(j):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
spoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
spoints.insert(0, k1)
break
collection.remove(road)
else:
if scountlist == 1:
if distPP(endkeyuni[i][0], [road.shape.points[0][0], \
road.shape.points[0][1]]) < distPP(endkeyuni[i][0], k1):
srecord = [road.record]
spoints = []
for jm in range(j):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
k1 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(startkeyuni[i][0],
k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(j + 1):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
spoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
spoints.insert(0, k1)
break
collection.remove(road)
else:
if road_direction(workzone_DIRECTION[i], startkeyuni[i], road.shape.points[j]):
srecord = [road.record]
spoints = []
k1 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
for jm in range(j):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(startkeyuni[i][0],
k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(j):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
spoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
spoints.insert(0, k1)
break
collection.remove(road)
else:
continue
elif PointInLineBox(i, endkeyuni, bufferlat, bufferlon, road.shape.bbox):
# print('a')
for j in range(len(road.shape.points) - 1):
subpart = []
vetice = [[road.shape.points[j][0], road.shape.points[j][1]],
[road.shape.points[j + 1][0], road.shape.points[j + 1][1]]]
if distPL(endkeyuni[i][0], vetice) < 1 and distPL(startkeyuni[i][0], vetice) < distPP(
endkeyuni[i][0], startkeyuni[i][0]):
emark = emark + 1
cosup = distPP(startkeyuni[i][0], [road.shape.points[j][0], \
road.shape.points[j][1]]) - distPP(startkeyuni[i][0], [
road.shape.points[j + 1][0], \
road.shape.points[j + 1][1]])
if cosup > 0:
# print('d')
if emark <= 1:
erecord = [road.record]
epoints = []
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k2 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0], k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove((road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
epoints.remove((road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
epoints.remove((road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
epoints.append(k2)
break
# print("cosup>0emark<=1")
# print(k2)
try:
collection.remove(road)
except:
pass
else:
if ecountlist == 1:
if distPP(startkeyuni[i][0], [road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][
1]]) < distPP(startkeyuni[i][0], k2):
erecord = [road.record]
epoints = []
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k2 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0], k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
epoints.append(k2)
break
try:
collection.remove(road)
except:
pass
else:
if road_direction(workzone_DIRECTION[i], endkeyuni[i], road.shape.points[j + 1]):
erecord = [road.record]
epoints = []
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k2 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0], k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
epoints.append(k2)
break
# print(k2)
try:
collection.remove(road)
except:
pass
else:
# print("e")
# print(cosup)
# print(j)
if emark <= 1:
# print(emark)
# print("emark<=1")
erecord = [road.record]
epoints = []
for jm in range(j + 1):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
k2 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0], k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(j):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
epoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
epoints.insert(0, k2)
break
# print(k2)
try:
collection.remove(road)
except:
pass
else:
if ecountlist == 1:
if distPP(startkeyuni[i][0], [road.shape.points[0][0], \
road.shape.points[0][1]]) < distPP(startkeyuni[i][0],
k2):
erecord = [road.record]
epoints = []
k2 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
for jm in range(j + 1):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0], k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(j):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
epoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
epoints.insert(0, k2)
break
# print("ecountlist=2")
# print(k2)
try:
collection.remove(road)
except:
pass
else:
if road_direction(workzone_DIRECTION[i], endkeyuni[i], road.shape.points[j]):
# print("f")
erecord = [road.record]
epoints = []
for jm in range(j + 1):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
k2 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0], k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(j):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
epoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
epoints.insert(0, k2)
break
# print(k2)
try:
collection.remove(road)
except:
pass
else:
continue
# handle collection, delete the odd/even segnumber that is different from
# print('k2='+str(k2))
# print('k1='+str(k1))
# print("scountlist,ecountlist,smark,emark")
# print(scountlist,ecountlist,smark,emark)
records.append(srecord)
pointsparts.append([spoints])
try:
records.append(erecord)
pointsparts.append([epoints])
except:
pass
itercount = 0
while (k1 == []) and itercount < 10:
bufferlat, bufferlon = bufferlat * 5, bufferlon * 5
records = []
pointsparts = []
k1 = []
k2 = []
collection = set()
smark = 0
emark = 0
scountlist = 0
ecountlist = 0
for road in road_shaperecords:
if road.record[0] == str(wz_route[i]).zfill(4):
if PointInLineBox(i, startkeyuni, bufferlat, bufferlon, road.shape.bbox):
for j in range(len(road.shape.points) - 1):
vetice = [[road.shape.points[j][0], road.shape.points[j][1]],
[road.shape.points[j + 1][0], road.shape.points[j + 1][1]]]
scount = distPL(startkeyuni[i][0], vetice) < 1 and distPL(endkeyuni[i][0], vetice) < distPP(
endkeyuni[i][0], startkeyuni[i][0])
scountlist = scountlist + scount
if PointInLineBox(i, endkeyuni, bufferlat, bufferlon, road.shape.bbox):
for j in range(len(road.shape.points) - 1):
vetice = [[road.shape.points[j][0], road.shape.points[j][1]],
[road.shape.points[j + 1][0], road.shape.points[j + 1][1]]]
ecount = distPL(endkeyuni[i][0], vetice) < 1 and distPL(startkeyuni[i][0], vetice) < distPP(
endkeyuni[i][0], startkeyuni[i][0])
ecountlist = ecountlist + ecount
for road in road_shaperecords:
if road.record[0] == str(wz_route[i]).zfill(4):
collection.add(road)
if PointInLineBox(i, startkeyuni, bufferlat, bufferlon, road.shape.bbox):
for j in range(len(road.shape.points) - 1):
subpart = []
vetice = [[road.shape.points[j][0], road.shape.points[j][1]],
[road.shape.points[j + 1][0], road.shape.points[j + 1][1]]]
if distPL(startkeyuni[i][0], vetice) < 1:
smark = smark + 1
cosup = distPP(endkeyuni[i][0], [road.shape.points[j][0], \
road.shape.points[j][1]]) - distPP(startkeyuni[i][0], [
road.shape.points[j + 1][0], \
road.shape.points[j + 1][1]])
if cosup > 0:
if smark <= 1:
srecord = [road.record]
spoints = []
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k1 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(startkeyuni[i][0], k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove((road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
spoints.remove((road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
spoints.remove((road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
spoints.append(k1)
break
collection.remove(road)
else:
if scountlist == 1:
if distPP(endkeyuni[i][0], [road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][
1]]) < distPP(endkeyuni[i][0], k1):
srecord = [road.record]
spoints = []
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k1 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(
startkeyuni[i][0], k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][
1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][
1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
spoints.append(k1)
break
collection.remove(road)
else:
if road_direction(workzone_DIRECTION[i], startkeyuni[i],
road.shape.points[j + 1]):
srecord = [road.record]
spoints = []
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k1 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(
startkeyuni[i][0], k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][
1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][
1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
spoints.append(k1)
break
collection.remove(road)
else:
if smark <= 1:
srecord = [road.record]
spoints = []
for jm in range(j + 1):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
k1 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(startkeyuni[i][0], k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(j):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
spoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
spoints.insert(0, k1)
break
collection.remove(road)
else:
if scountlist == 1:
if distPP(endkeyuni[i][0], [road.shape.points[0][0], \
road.shape.points[0][1]]) < distPP(endkeyuni[i][0],
k1):
srecord = [road.record]
spoints = []
for jm in range(j):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
k1 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(
startkeyuni[i][0], k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(j + 1):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
spoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
spoints.insert(0, k1)
break
collection.remove(road)
else:
if road_direction(workzone_DIRECTION[i], startkeyuni[i], road.shape.points[j]):
srecord = [road.record]
spoints = []
k1 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
for jm in range(j):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(
startkeyuni[i][0], k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(j):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
spoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
spoints.insert(0, k1)
break
collection.remove(road)
else:
continue
elif PointInLineBox(i, endkeyuni, bufferlat, bufferlon, road.shape.bbox):
# print('a')
for j in range(len(road.shape.points) - 1):
subpart = []
vetice = [[road.shape.points[j][0], road.shape.points[j][1]],
[road.shape.points[j + 1][0], road.shape.points[j + 1][1]]]
if distPL(endkeyuni[i][0], vetice) < 1:
emark = emark + 1
cosup = distPP(startkeyuni[i][0], [road.shape.points[j][0], \
road.shape.points[j][1]]) - distPP(startkeyuni[i][0], [
road.shape.points[j + 1][0], \
road.shape.points[j + 1][1]])
if cosup > 0:
# print('d')
if emark <= 1:
erecord = [road.record]
epoints = []
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k2 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0], k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove((road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
epoints.remove((road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
epoints.remove((road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
epoints.append(k2)
break
# print("cosup>0emark<=1")
# print(k2)
try:
collection.remove(road)
except:
pass
else:
if ecountlist == 1:
if distPP(startkeyuni[i][0], [road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][
1]]) < distPP(startkeyuni[i][0], k2):
erecord = [road.record]
epoints = []
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k2 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0],
k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][
1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][
1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
epoints.append(k2)
break
try:
collection.remove(road)
except:
pass
else:
if road_direction(workzone_DIRECTION[i], endkeyuni[i],
road.shape.points[j + 1]):
erecord = [road.record]
epoints = []
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k2 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0],
k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][
1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][
1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
epoints.append(k2)
break
# print(k2)
try:
collection.remove(road)
except:
pass
else:
# print("e")
# print(cosup)
# print(j)
if emark <= 1:
# print(emark)
# print("emark<=1")
erecord = [road.record]
epoints = []
for jm in range(j + 1):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
k2 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0], k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(j):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
epoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
epoints.insert(0, k2)
break
# print(k2)
try:
collection.remove(road)
except:
pass
else:
if ecountlist == 1:
if distPP(startkeyuni[i][0], [road.shape.points[0][0], \
road.shape.points[0][1]]) < distPP(
startkeyuni[i][0], k2):
erecord = [road.record]
epoints = []
k2 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
for jm in range(j + 1):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0],
k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(j):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
epoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
epoints.insert(0, k2)
break
# print("ecountlist=2")
# print(k2)
try:
collection.remove(road)
except:
pass
else:
if road_direction(workzone_DIRECTION[i], endkeyuni[i], road.shape.points[j]):
# print("f")
erecord = [road.record]
epoints = []
for jm in range(j + 1):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
k2 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0],
k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(j):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
epoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
epoints.insert(0, k2)
break
# print(k2)
try:
collection.remove(road)
except:
pass
else:
continue
itercount = itercount + 1
# print(itercount)
# print(k1,k2)
k1_latlon = k1[1], k1[0]
try:
k2_latlon = k2[1], k2[0]
except:
k2_latlon = startkeyuni[i][0][1], startkeyuni[i][0][0]
case_wz_end_lat, case_wz_end_lon = endkeyuni[i][0][1], endkeyuni[i][0][0]
# print(case_wz_end_lat, case_wz_end_lon)
# plt.figure(i)
# plt.plot(case_wz_end_lon,case_wz_end_lat,'*b',label ='Matched End')
case_wz_start_lat, case_wz_start_lon = startkeyuni[i][0][1], startkeyuni[i][0][0]
# print(case_wz_start_lat, case_wz_start_lon)
# plt.plot(case_wz_start_lon,case_wz_start_lat,'*r',label ='Matched Start')
# plt.plot(k1_latlon[1],k1_latlon[0],'ob',label ='k1')
# plt.plot(k2_latlon[1],k2_latlon[0],'og',label ='k2')
iter_count = 1
k12_j = distPP(k1_latlon, k2_latlon) > 5 and distPP(k1, endkeyuni[i][0]) > 1 and distPP(k2, startkeyuni[i][0]) > 1
while k12_j and iter_count < 100:
collection2 = collection.copy()
for road in collection:
if distPP(k1_latlon, [road.shape.points[0][1], road.shape.points[0][0]]) < 1:
records.append([road.record])
pointsparts.append([road.shape.points])
k1_latlon = road.shape.points[len(road.shape.points) - 1][1], \
road.shape.points[len(road.shape.points) - 1][0]
# plt.plot(k1_latlon[1],k1_latlon[0],'ob',label ='k1')
collection2.remove(road)
# print('l1')
break
k1 = k1_latlon[1], k1_latlon[0]
k2 = k2_latlon[1], k2_latlon[0]
# print("k1="+str(k1)+", k2="+str(k2))
k12_j = distPP(k1_latlon, k2_latlon) > 5 and distPP(k1, endkeyuni[i][0]) > 1 and distPP(k2,
startkeyuni[i][0]) > 1
if k12_j == False:
break
collection3 = collection2.copy()
for road in collection2:
if distPP(k2_latlon, [road.shape.points[0][1], road.shape.points[0][0]]) < 1:
records.append([road.record])
pointsparts.append([road.shape.points])
k2_latlon = road.shape.points[len(road.shape.points) - 1][1], \
road.shape.points[len(road.shape.points) - 1][0]
# plt.plot(k2_latlon[1],k2_latlon[0],'or',label ='k2')
collection3.remove(road)
# print('l2')
break
k1 = k1_latlon[1], k1_latlon[0]
k2 = k2_latlon[1], k2_latlon[0]
# print("k1="+str(k1)+", k2="+str(k2))
k12_j = distPP(k1_latlon, k2_latlon) > 5 and distPP(k1, endkeyuni[i][0]) > 1 and distPP(k2,
startkeyuni[i][0]) > 1
if k12_j == False:
break
collection4 = collection3.copy()
for road in collection3:
if distPP(k1_latlon, [road.shape.points[len(road.shape.points) - 1][1],
road.shape.points[len(road.shape.points) - 1][0]]) < 1:
records.append([road.record])
pointsparts.append([road.shape.points])
k1_latlon = road.shape.points[0][1], road.shape.points[0][0]
# plt.plot(k1_latlon[1],k1_latlon[0],'ob',label ='k1')
collection4.remove(road)
# print('l3')
break
k1 = k1_latlon[1], k1_latlon[0]
k2 = k2_latlon[1], k2_latlon[0]
# print("k1="+str(k1)+", k2="+str(k2))
k12_j = distPP(k1_latlon, k2_latlon) > 5 and distPP(k1, endkeyuni[i][0]) > 1 and distPP(k2,
startkeyuni[i][0]) > 1
if k12_j == False:
break
collection5 = collection4.copy()
for road in collection4:
if distPP(k2_latlon, [road.shape.points[len(road.shape.points) - 1][1],
road.shape.points[len(road.shape.points) - 1][0]]) < 1:
records.append([road.record])
pointsparts.append([road.shape.points])
k2_latlon = road.shape.points[0][1], road.shape.points[0][0]
# plt.plot(k2_latlon[1],k2_latlon[0],'or',label ='k2')
collection5.remove(road)
# print('l4')
break
iter_count = iter_count + 1
k1 = k1_latlon[1], k1_latlon[0]
k2 = k2_latlon[1], k2_latlon[0]
# print("k1="+str(k1)+", k2="+str(k2))
k12_j = distPP(k1_latlon, k2_latlon) > 5 and distPP(k1, endkeyuni[i][0]) > 5 and distPP(k2,
startkeyuni[i][0]) > 5
if k12_j == False:
break
collection = collection5.copy()
# print(iter_count)
# plt.legend(loc='upper right')
# elif LineInPointBox(i,startkeyuni,endkeyuni,1e-2,1e-2,road):
# pointsparts.append(road.shape.points)
# records.append(road.record)
# print(len(pointsparts))
# print(len(records))
for idex in range(len(pointsparts)):
# print(pointsparts[idex])
# print(records[idex])
# print(len(records[idex][0]))
w.line(parts=pointsparts[idex])
w.record(*records[idex][0])
w.null()
w.save('CMU_rcrs_all_events_08-2015_04-2017/shapefile5/SplittedLine_WZ' + str(i))
def FindSplittedLine7(i, roads, road_shaperecords, endkeyuni, startkeyuni, wz_route, bufferlat, bufferlon,
workzone_DIRECTION, fileloc='CMU_rcrs_all_events_08-2015_04-2017/shapefile5/SplittedLine_WZ'):
w = shapefile.Writer()
w.fields = roads.fields[1:]
records = []
pointsparts = []
k1 = []
k2 = []
collection = set()
smark = 0
emark = 0
scountlist = 0
ecountlist = 0
for road in road_shaperecords:
if road.record[0] == str(wz_route[i]).zfill(4):
if PointInLineBox(i, startkeyuni, bufferlat, bufferlon, road.shape.bbox):
for j in range(len(road.shape.points) - 1):
vetice = [[road.shape.points[j][0], road.shape.points[j][1]],
[road.shape.points[j + 1][0], road.shape.points[j + 1][1]]]
scount = distPL(startkeyuni[i][0], vetice) < 1 and distPL(endkeyuni[i][0], vetice) < distPP(
endkeyuni[i][0], startkeyuni[i][0])
scountlist = scountlist + scount
if PointInLineBox(i, endkeyuni, bufferlat, bufferlon, road.shape.bbox):
for j in range(len(road.shape.points) - 1):
vetice = [[road.shape.points[j][0], road.shape.points[j][1]],
[road.shape.points[j + 1][0], road.shape.points[j + 1][1]]]
ecount = distPL(endkeyuni[i][0], vetice) < 1 and distPL(startkeyuni[i][0], vetice) < distPP(
endkeyuni[i][0], startkeyuni[i][0])
ecountlist = ecountlist + ecount
for road in road_shaperecords:
if road.record[0] == str(wz_route[i]).zfill(4):
collection.add(road)
if PointInLineBox(i, startkeyuni, bufferlat, bufferlon, road.shape.bbox):
for j in range(len(road.shape.points) - 1):
subpart = []
vetice = [[road.shape.points[j][0], road.shape.points[j][1]],
[road.shape.points[j + 1][0], road.shape.points[j + 1][1]]]
if distPL(startkeyuni[i][0], vetice) < 1 and distPL(endkeyuni[i][0], vetice) < distPP(
endkeyuni[i][0], startkeyuni[i][0]):
smark = smark + 1
cosup = distPP(endkeyuni[i][0], [road.shape.points[j][0], \
road.shape.points[j][1]]) - distPP(startkeyuni[i][0], [
road.shape.points[j + 1][0], \
road.shape.points[j + 1][1]])
if cosup > 0:
if smark <= 1:
srecord = [road.record]
spoints = []
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k1 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(startkeyuni[i][0], k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove((road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
spoints.remove((road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
spoints.remove((road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
spoints.append(k1)
break
# print("k1 ori = "+str(k1))
collection.remove(road)
else:
if scountlist < 3:
if distPP(endkeyuni[i][0], [road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][
1]]) < distPP(endkeyuni[i][0], k1):
srecord = [road.record]
spoints = []
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k1 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(startkeyuni[i][0],
k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
spoints.append(k1)
break
# print("scountlist ==1,k1="+str(k1))
collection.remove(road)
else:
if road_direction(workzone_DIRECTION[i], startkeyuni[i], road.shape.points[j + 1]):
srecord = [road.record]
spoints = []
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k1 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(startkeyuni[i][0],
k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
spoints.append(k1)
break
print(scountlist)
print(smark)
print("road_direction,k1=" + str(k1))
collection.remove(road)
else:
if smark <= 1:
srecord = [road.record]
spoints = []
for jm in range(j + 1):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
k1 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(startkeyuni[i][0], k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(j):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
spoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
spoints.insert(0, k1)
break
collection.remove(road)
else:
if scountlist == 1:
if distPP(endkeyuni[i][0], [road.shape.points[0][0], \
road.shape.points[0][1]]) < distPP(endkeyuni[i][0], k1):
srecord = [road.record]
spoints = []
for jm in range(j):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
k1 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(startkeyuni[i][0],
k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(j + 1):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
spoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
spoints.insert(0, k1)
break
collection.remove(road)
else:
if road_direction(workzone_DIRECTION[i], startkeyuni[i], road.shape.points[j]):
srecord = [road.record]
spoints = []
k1 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
for jm in range(j):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(startkeyuni[i][0],
k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(j):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
spoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
spoints.insert(0, k1)
break
collection.remove(road)
else:
continue
elif PointInLineBox(i, endkeyuni, bufferlat, bufferlon, road.shape.bbox):
# print('a')
for j in range(len(road.shape.points) - 1):
subpart = []
vetice = [[road.shape.points[j][0], road.shape.points[j][1]],
[road.shape.points[j + 1][0], road.shape.points[j + 1][1]]]
if distPL(endkeyuni[i][0], vetice) < 1 and distPL(startkeyuni[i][0], vetice) < distPP(
endkeyuni[i][0], startkeyuni[i][0]):
emark = emark + 1
cosup = distPP(startkeyuni[i][0], [road.shape.points[j][0], \
road.shape.points[j][1]]) - distPP(startkeyuni[i][0], [
road.shape.points[j + 1][0], \
road.shape.points[j + 1][1]])
if cosup > 0:
# print('d')
if emark <= 1:
erecord = [road.record]
epoints = []
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k2 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0], k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove((road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
epoints.remove((road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
epoints.remove((road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
epoints.append(k2)
break
# print("cosup>0emark<=1")
# print(k2)
try:
collection.remove(road)
except:
pass
else:
if ecountlist == 1:
if distPP(startkeyuni[i][0], [road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][
1]]) < distPP(startkeyuni[i][0], k2):
erecord = [road.record]
epoints = []
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k2 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0], k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
epoints.append(k2)
break
try:
collection.remove(road)
except:
pass
else:
if road_direction(workzone_DIRECTION[i], endkeyuni[i], road.shape.points[j + 1]):
erecord = [road.record]
epoints = []
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k2 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0], k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
epoints.append(k2)
break
# print(k2)
try:
collection.remove(road)
except:
pass
else:
# print("e")
# print(cosup)
# print(j)
if emark <= 1:
# print(emark)
# print("emark<=1")
erecord = [road.record]
epoints = []
for jm in range(j + 1):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
k2 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0], k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(j):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
epoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
epoints.insert(0, k2)
break
# print(k2)
try:
collection.remove(road)
except:
pass
else:
if ecountlist == 1:
if distPP(startkeyuni[i][0], [road.shape.points[0][0], \
road.shape.points[0][1]]) < distPP(startkeyuni[i][0],
k2):
erecord = [road.record]
epoints = []
k2 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
for jm in range(j + 1):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0], k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(j):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
epoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
epoints.insert(0, k2)
break
# print("ecountlist=2")
# print(k2)
try:
collection.remove(road)
except:
pass
else:
if road_direction(workzone_DIRECTION[i], endkeyuni[i], road.shape.points[j]):
# print("f")
erecord = [road.record]
epoints = []
for jm in range(j + 1):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
k2 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0], k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(j):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
epoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
epoints.insert(0, k2)
break
# print(k2)
try:
collection.remove(road)
except:
pass
else:
continue
# handle collection, delete the odd/even segnumber that is different from
# print('k2='+str(k2))
# print('k1='+str(k1))
# print("scountlist,ecountlist,smark,emark")
# print(scountlist,ecountlist,smark,emark)
records.append(srecord)
pointsparts.append([spoints])
try:
records.append(erecord)
pointsparts.append([epoints])
except:
pass
itercount = 0
while (k1 == []) and itercount < 10:
bufferlat, bufferlon = bufferlat * 5, bufferlon * 5
records = []
pointsparts = []
k1 = []
k2 = []
collection = set()
smark = 0
emark = 0
scountlist = 0
ecountlist = 0
for road in road_shaperecords:
if road.record[0] == str(wz_route[i]).zfill(4):
if PointInLineBox(i, startkeyuni, bufferlat, bufferlon, road.shape.bbox):
for j in range(len(road.shape.points) - 1):
vetice = [[road.shape.points[j][0], road.shape.points[j][1]],
[road.shape.points[j + 1][0], road.shape.points[j + 1][1]]]
scount = distPL(startkeyuni[i][0], vetice) < 1 and distPL(endkeyuni[i][0], vetice) < distPP(
endkeyuni[i][0], startkeyuni[i][0])
scountlist = scountlist + scount
if PointInLineBox(i, endkeyuni, bufferlat, bufferlon, road.shape.bbox):
for j in range(len(road.shape.points) - 1):
vetice = [[road.shape.points[j][0], road.shape.points[j][1]],
[road.shape.points[j + 1][0], road.shape.points[j + 1][1]]]
ecount = distPL(endkeyuni[i][0], vetice) < 1 and distPL(startkeyuni[i][0], vetice) < distPP(
endkeyuni[i][0], startkeyuni[i][0])
ecountlist = ecountlist + ecount
for road in road_shaperecords:
if road.record[0] == str(wz_route[i]).zfill(4):
collection.add(road)
if PointInLineBox(i, startkeyuni, bufferlat, bufferlon, road.shape.bbox):
for j in range(len(road.shape.points) - 1):
subpart = []
vetice = [[road.shape.points[j][0], road.shape.points[j][1]],
[road.shape.points[j + 1][0], road.shape.points[j + 1][1]]]
if distPL(startkeyuni[i][0], vetice) < 1:
smark = smark + 1
cosup = distPP(endkeyuni[i][0], [road.shape.points[j][0], \
road.shape.points[j][1]]) - distPP(startkeyuni[i][0], [
road.shape.points[j + 1][0], \
road.shape.points[j + 1][1]])
if cosup > 0:
if smark <= 1:
srecord = [road.record]
spoints = []
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k1 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(startkeyuni[i][0], k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove((road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
spoints.remove((road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
spoints.remove((road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
spoints.append(k1)
break
collection.remove(road)
else:
if scountlist == 1:
if distPP(endkeyuni[i][0], [road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][
1]]) < distPP(endkeyuni[i][0], k1):
srecord = [road.record]
spoints = []
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k1 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(
startkeyuni[i][0], k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][
1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][
1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
spoints.append(k1)
break
collection.remove(road)
else:
if road_direction(workzone_DIRECTION[i], startkeyuni[i],
road.shape.points[j + 1]):
srecord = [road.record]
spoints = []
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k1 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(
startkeyuni[i][0], k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][
1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][
1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
spoints.append(k1)
break
collection.remove(road)
else:
if smark <= 1:
srecord = [road.record]
spoints = []
for jm in range(j + 1):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
k1 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(startkeyuni[i][0], k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(j):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
spoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
spoints.insert(0, k1)
break
collection.remove(road)
else:
if scountlist == 1:
if distPP(endkeyuni[i][0], [road.shape.points[0][0], \
road.shape.points[0][1]]) < distPP(endkeyuni[i][0],
k1):
srecord = [road.record]
spoints = []
for jm in range(j):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
k1 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(
startkeyuni[i][0], k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(j + 1):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
spoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
spoints.insert(0, k1)
break
collection.remove(road)
else:
if road_direction(workzone_DIRECTION[i], startkeyuni[i], road.shape.points[j]):
srecord = [road.record]
spoints = []
k1 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
for jm in range(j):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(
startkeyuni[i][0], k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(j):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
spoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
spoints.insert(0, k1)
break
collection.remove(road)
else:
continue
elif PointInLineBox(i, endkeyuni, bufferlat, bufferlon, road.shape.bbox):
# print('a')
for j in range(len(road.shape.points) - 1):
subpart = []
vetice = [[road.shape.points[j][0], road.shape.points[j][1]],
[road.shape.points[j + 1][0], road.shape.points[j + 1][1]]]
if distPL(endkeyuni[i][0], vetice) < 1:
emark = emark + 1
cosup = distPP(startkeyuni[i][0], [road.shape.points[j][0], \
road.shape.points[j][1]]) - distPP(startkeyuni[i][0], [
road.shape.points[j + 1][0], \
road.shape.points[j + 1][1]])
if cosup > 0:
# print('d')
if emark <= 1:
erecord = [road.record]
epoints = []
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k2 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0], k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove((road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
epoints.remove((road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
epoints.remove((road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
epoints.append(k2)
break
# print("cosup>0emark<=1")
# print(k2)
try:
collection.remove(road)
except:
pass
else:
if ecountlist == 1:
if distPP(startkeyuni[i][0], [road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][
1]]) < distPP(startkeyuni[i][0], k2):
erecord = [road.record]
epoints = []
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k2 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0],
k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][
1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][
1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
epoints.append(k2)
break
try:
collection.remove(road)
except:
pass
else:
if road_direction(workzone_DIRECTION[i], endkeyuni[i],
road.shape.points[j + 1]):
erecord = [road.record]
epoints = []
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k2 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0],
k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][
1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][
1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
epoints.append(k2)
break
# print(k2)
try:
collection.remove(road)
except:
pass
else:
# print("e")
# print(cosup)
# print(j)
if emark <= 1:
# print(emark)
# print("emark<=1")
erecord = [road.record]
epoints = []
for jm in range(j + 1):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
k2 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0], k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(j):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
epoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
epoints.insert(0, k2)
break
# print(k2)
try:
collection.remove(road)
except:
pass
else:
if ecountlist == 1:
if distPP(startkeyuni[i][0], [road.shape.points[0][0], \
road.shape.points[0][1]]) < distPP(
startkeyuni[i][0], k2):
erecord = [road.record]
epoints = []
k2 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
for jm in range(j + 1):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0],
k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(j):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
epoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
epoints.insert(0, k2)
break
# print("ecountlist=2")
# print(k2)
try:
collection.remove(road)
except:
pass
else:
if road_direction(workzone_DIRECTION[i], endkeyuni[i], road.shape.points[j]):
# print("f")
erecord = [road.record]
epoints = []
for jm in range(j + 1):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
k2 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0],
k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(j):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
epoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
epoints.insert(0, k2)
break
# print(k2)
try:
collection.remove(road)
except:
pass
else:
continue
itercount = itercount + 1
# print(itercount)
# print(k1,k2)
k1_latlon = k1[1], k1[0]
try:
k2_latlon = k2[1], k2[0]
except:
k2_latlon = startkeyuni[i][0][1], startkeyuni[i][0][0]
case_wz_end_lat, case_wz_end_lon = endkeyuni[i][0][1], endkeyuni[i][0][0]
# print(case_wz_end_lat, case_wz_end_lon)
# plt.figure(i)
# plt.plot(case_wz_end_lon,case_wz_end_lat,'*b',label ='Matched End')
case_wz_start_lat, case_wz_start_lon = startkeyuni[i][0][1], startkeyuni[i][0][0]
# print(case_wz_start_lat, case_wz_start_lon)
# plt.plot(case_wz_start_lon,case_wz_start_lat,'*r',label ='Matched Start')
# plt.plot(k1_latlon[1],k1_latlon[0],'ob',label ='k1')
# plt.plot(k2_latlon[1],k2_latlon[0],'og',label ='k2')
iter_count = 1
k12_j = distPP(k1_latlon, k2_latlon) > 5 and distPP(k1, endkeyuni[i][0]) > 1 and distPP(k2, startkeyuni[i][0]) > 1
while k12_j and iter_count < 100:
collection2 = collection.copy()
for road in collection:
if distPP(k1_latlon, [road.shape.points[0][1], road.shape.points[0][0]]) < 1:
records.append([road.record])
pointsparts.append([road.shape.points])
k1_latlon = road.shape.points[len(road.shape.points) - 1][1], \
road.shape.points[len(road.shape.points) - 1][0]
# plt.plot(k1_latlon[1],k1_latlon[0],'ob',label ='k1')
collection2.remove(road)
# print('l1')
break
k1 = k1_latlon[1], k1_latlon[0]
k2 = k2_latlon[1], k2_latlon[0]
# print("k1="+str(k1)+", k2="+str(k2))
k12_j = distPP(k1_latlon, k2_latlon) > 5 and distPP(k1, endkeyuni[i][0]) > 1 and distPP(k2,
startkeyuni[i][0]) > 1
if k12_j == False:
break
collection3 = collection2.copy()
for road in collection2:
if distPP(k2_latlon, [road.shape.points[0][1], road.shape.points[0][0]]) < 1:
records.append([road.record])
pointsparts.append([road.shape.points])
k2_latlon = road.shape.points[len(road.shape.points) - 1][1], \
road.shape.points[len(road.shape.points) - 1][0]
# plt.plot(k2_latlon[1],k2_latlon[0],'or',label ='k2')
collection3.remove(road)
# print('l2')
break
k1 = k1_latlon[1], k1_latlon[0]
k2 = k2_latlon[1], k2_latlon[0]
# print("k1="+str(k1)+", k2="+str(k2))
k12_j = distPP(k1_latlon, k2_latlon) > 5 and distPP(k1, endkeyuni[i][0]) > 1 and distPP(k2,
startkeyuni[i][0]) > 1
if k12_j == False:
break
collection4 = collection3.copy()
for road in collection3:
if distPP(k1_latlon, [road.shape.points[len(road.shape.points) - 1][1],
road.shape.points[len(road.shape.points) - 1][0]]) < 1:
records.append([road.record])
pointsparts.append([road.shape.points])
k1_latlon = road.shape.points[0][1], road.shape.points[0][0]
# plt.plot(k1_latlon[1],k1_latlon[0],'ob',label ='k1')
collection4.remove(road)
# print('l3')
break
k1 = k1_latlon[1], k1_latlon[0]
k2 = k2_latlon[1], k2_latlon[0]
# print("k1="+str(k1)+", k2="+str(k2))
k12_j = distPP(k1_latlon, k2_latlon) > 5 and distPP(k1, endkeyuni[i][0]) > 1 and distPP(k2,
startkeyuni[i][0]) > 1
if k12_j == False:
break
collection5 = collection4.copy()
for road in collection4:
if distPP(k2_latlon, [road.shape.points[len(road.shape.points) - 1][1],
road.shape.points[len(road.shape.points) - 1][0]]) < 1:
records.append([road.record])
pointsparts.append([road.shape.points])
k2_latlon = road.shape.points[0][1], road.shape.points[0][0]
# plt.plot(k2_latlon[1],k2_latlon[0],'or',label ='k2')
collection5.remove(road)
# print('l4')
break
iter_count = iter_count + 1
k1 = k1_latlon[1], k1_latlon[0]
k2 = k2_latlon[1], k2_latlon[0]
# print("k1="+str(k1)+", k2="+str(k2))
k12_j = distPP(k1_latlon, k2_latlon) > 5 and distPP(k1, endkeyuni[i][0]) > 5 and distPP(k2,
startkeyuni[i][0]) > 5
if k12_j == False:
break
collection = collection5.copy()
# print(iter_count)
# plt.legend(loc='upper right')
# elif LineInPointBox(i,startkeyuni,endkeyuni,1e-2,1e-2,road):
# pointsparts.append(road.shape.points)
# records.append(road.record)
# print(len(pointsparts))
# print(len(records))
for idex in range(len(pointsparts)):
# print(pointsparts[idex])
# print(records[idex])
# print(len(records[idex][0]))
w.line(parts=pointsparts[idex])
# w.line( pointsparts[idex])
w.record(*records[idex][0])
w.null()
w.save(fileloc + str(i))
# bi
def FindSplittedLine7_bi(i, roads, road_shaperecords, endkeyuni, startkeyuni, wz_route, bufferlat, bufferlon,
workzone_DIRECTION, fileloc):
w = shapefile.Writer()
w.fields = roads.fields[1:]
records = []
pointsparts = []
k1 = []
k2 = []
collection = set()
smark = 0
emark = 0
scountlist = 0
ecountlist = 0
for road in road_shaperecords:
if (road.record[0] == str(wz_route[i]).zfill(4)) and (direction_match(workzone_DIRECTION[i], road.record[10])):
if PointInLineBox(i, startkeyuni, bufferlat, bufferlon, road.shape.bbox):
for j in range(len(road.shape.points) - 1):
vetice = [[road.shape.points[j][0], road.shape.points[j][1]],
[road.shape.points[j + 1][0], road.shape.points[j + 1][1]]]
scount = distPL(startkeyuni[i][0], vetice) < 10 and distPL(endkeyuni[i][0], vetice) < distPP(
endkeyuni[i][0], startkeyuni[i][0])
scountlist = scountlist + scount
if PointInLineBox(i, endkeyuni, bufferlat, bufferlon, road.shape.bbox):
for j in range(len(road.shape.points) - 1):
vetice = [[road.shape.points[j][0], road.shape.points[j][1]],
[road.shape.points[j + 1][0], road.shape.points[j + 1][1]]]
ecount = distPL(endkeyuni[i][0], vetice) < 10 and distPL(startkeyuni[i][0], vetice) < distPP(
endkeyuni[i][0], startkeyuni[i][0])
ecountlist = ecountlist + ecount
for road in road_shaperecords:
if (road.record[0] == str(wz_route[i]).zfill(4)) and (direction_match(workzone_DIRECTION[i], road.record[10])):
collection.add(road)
if PointInLineBox(i, startkeyuni, bufferlat, bufferlon, road.shape.bbox):
for j in range(len(road.shape.points) - 1):
subpart = []
vetice = [[road.shape.points[j][0], road.shape.points[j][1]],
[road.shape.points[j + 1][0], road.shape.points[j + 1][1]]]
if distPL(startkeyuni[i][0], vetice) < 10 and distPL(endkeyuni[i][0], vetice) < distPP(
endkeyuni[i][0], startkeyuni[i][0]):
smark = smark + 1
cosup = distPP(endkeyuni[i][0], [road.shape.points[j][0], \
road.shape.points[j][1]]) - distPP(startkeyuni[i][0], [
road.shape.points[j + 1][0], \
road.shape.points[j + 1][1]])
if cosup > 0:
if smark <= 1:
srecord = [road.record]
spoints = []
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k1 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(startkeyuni[i][0], k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove((road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
spoints.remove((road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
spoints.remove((road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
spoints.append(k1)
break
# print("k1 ori = "+str(k1))
collection.remove(road)
else:
if scountlist < 3:
if distPP(endkeyuni[i][0], [road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][
1]]) < distPP(endkeyuni[i][0], k1):
srecord = [road.record]
spoints = []
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k1 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(startkeyuni[i][0],
k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
spoints.append(k1)
break
# print("scountlist ==1,k1="+str(k1))
collection.remove(road)
else:
if road_direction(workzone_DIRECTION[i], startkeyuni[i], road.shape.points[j + 1]):
srecord = [road.record]
spoints = []
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k1 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(startkeyuni[i][0],
k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
spoints.append(k1)
break
# print(scountlist,smark)
# print("road_direction,k1="+str(k1))
# print(road.record)
collection.remove(road)
else:
if smark <= 1:
srecord = [road.record]
spoints = []
for jm in range(j + 1):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
k1 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(startkeyuni[i][0], k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(j):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
spoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
spoints.insert(0, k1)
break
collection.remove(road)
else:
if scountlist == 1:
if distPP(endkeyuni[i][0], [road.shape.points[0][0], \
road.shape.points[0][1]]) < distPP(endkeyuni[i][0], k1):
srecord = [road.record]
spoints = []
for jm in range(j):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
k1 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(startkeyuni[i][0],
k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(j + 1):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
spoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
spoints.insert(0, k1)
break
collection.remove(road)
else:
if road_direction(workzone_DIRECTION[i], startkeyuni[i], road.shape.points[j]):
srecord = [road.record]
spoints = []
k1 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
for jm in range(j):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(startkeyuni[i][0],
k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(j):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
spoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
spoints.insert(0, k1)
break
collection.remove(road)
else:
continue
elif PointInLineBox(i, endkeyuni, bufferlat, bufferlon, road.shape.bbox):
# print('a')
for j in range(len(road.shape.points) - 1):
subpart = []
vetice = [[road.shape.points[j][0], road.shape.points[j][1]],
[road.shape.points[j + 1][0], road.shape.points[j + 1][1]]]
if distPL(endkeyuni[i][0], vetice) < 10 and distPL(startkeyuni[i][0], vetice) < distPP(
endkeyuni[i][0], startkeyuni[i][0]):
emark = emark + 1
cosup = distPP(startkeyuni[i][0], [road.shape.points[j][0], \
road.shape.points[j][1]]) - distPP(startkeyuni[i][0], [
road.shape.points[j + 1][0], \
road.shape.points[j + 1][1]])
if cosup > 0:
# print('d')
if emark <= 1:
erecord = [road.record]
epoints = []
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k2 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0], k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove((road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
epoints.remove((road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
epoints.remove((road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
epoints.append(k2)
break
# print("cosup>0emark<=1")
# print(k2)
try:
collection.remove(road)
except:
pass
else:
if ecountlist == 1:
if distPP(startkeyuni[i][0], [road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][
1]]) < distPP(startkeyuni[i][0], k2):
erecord = [road.record]
epoints = []
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k2 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0], k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
epoints.append(k2)
break
try:
collection.remove(road)
except:
pass
else:
if road_direction(workzone_DIRECTION[i], endkeyuni[i], road.shape.points[j + 1]):
erecord = [road.record]
epoints = []
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k2 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0], k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
epoints.append(k2)
break
# print(k2)
try:
collection.remove(road)
except:
pass
else:
# print("e")
# print(cosup)
# print(j)
if emark <= 1:
# print(emark)
# print("emark<=1")
erecord = [road.record]
epoints = []
for jm in range(j + 1):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
k2 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0], k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(j):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
epoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
epoints.insert(0, k2)
break
# print(k2)
try:
collection.remove(road)
except:
pass
else:
if ecountlist == 1:
if distPP(startkeyuni[i][0], [road.shape.points[0][0], \
road.shape.points[0][1]]) < distPP(startkeyuni[i][0],
k2):
erecord = [road.record]
epoints = []
k2 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
for jm in range(j + 1):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0], k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(j):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
epoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
epoints.insert(0, k2)
break
# print("ecountlist=2")
# print(k2)
try:
collection.remove(road)
except:
pass
else:
if road_direction(workzone_DIRECTION[i], endkeyuni[i], road.shape.points[j]):
# print("f")
erecord = [road.record]
epoints = []
for jm in range(j + 1):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
k2 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0], k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(j):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
epoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
epoints.insert(0, k2)
break
# print(k2)
try:
collection.remove(road)
except:
pass
else:
continue
# handle collection, delete the odd/even segnumber that is different from
# print('k2='+str(k2))
# print('k1='+str(k1))
# print("scountlist,ecountlist,smark,emark")
# print(scountlist,ecountlist,smark,emark)
records.append(srecord)
pointsparts.append([spoints])
try:
records.append(erecord)
pointsparts.append([epoints])
except:
pass
itercount = 0
while (k1 == []) and itercount < 10:
bufferlat, bufferlon = bufferlat * 5, bufferlon * 5
records = []
pointsparts = []
k1 = []
k2 = []
collection = set()
smark = 0
emark = 0
scountlist = 0
ecountlist = 0
for road in road_shaperecords:
if (road.record[0] == str(wz_route[i]).zfill(4)) and (
direction_match(workzone_DIRECTION[i], road.record[10])):
if PointInLineBox(i, startkeyuni, bufferlat, bufferlon, road.shape.bbox):
for j in range(len(road.shape.points) - 1):
vetice = [[road.shape.points[j][0], road.shape.points[j][1]],
[road.shape.points[j + 1][0], road.shape.points[j + 1][1]]]
scount = distPL(startkeyuni[i][0], vetice) < 10 and distPL(endkeyuni[i][0], vetice) < distPP(
endkeyuni[i][0], startkeyuni[i][0])
scountlist = scountlist + scount
if PointInLineBox(i, endkeyuni, bufferlat, bufferlon, road.shape.bbox):
for j in range(len(road.shape.points) - 1):
vetice = [[road.shape.points[j][0], road.shape.points[j][1]],
[road.shape.points[j + 1][0], road.shape.points[j + 1][1]]]
ecount = distPL(endkeyuni[i][0], vetice) < 10 and distPL(startkeyuni[i][0], vetice) < distPP(
endkeyuni[i][0], startkeyuni[i][0])
ecountlist = ecountlist + ecount
for road in road_shaperecords:
if (road.record[0] == str(wz_route[i]).zfill(4)) and (
direction_match(workzone_DIRECTION[i], road.record[10])):
collection.add(road)
if PointInLineBox(i, startkeyuni, bufferlat, bufferlon, road.shape.bbox):
for j in range(len(road.shape.points) - 1):
subpart = []
vetice = [[road.shape.points[j][0], road.shape.points[j][1]],
[road.shape.points[j + 1][0], road.shape.points[j + 1][1]]]
if distPL(startkeyuni[i][0], vetice) < 10:
smark = smark + 1
cosup = distPP(endkeyuni[i][0], [road.shape.points[j][0], \
road.shape.points[j][1]]) - distPP(startkeyuni[i][0], [
road.shape.points[j + 1][0], \
road.shape.points[j + 1][1]])
if cosup > 0:
if smark <= 1:
srecord = [road.record]
spoints = []
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k1 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(startkeyuni[i][0], k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove((road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
spoints.remove((road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
spoints.remove((road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
spoints.append(k1)
break
collection.remove(road)
else:
if scountlist == 1:
if distPP(endkeyuni[i][0], [road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][
1]]) < distPP(endkeyuni[i][0], k1):
srecord = [road.record]
spoints = []
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k1 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(
startkeyuni[i][0], k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][
1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][
1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
spoints.append(k1)
break
collection.remove(road)
else:
if road_direction(workzone_DIRECTION[i], startkeyuni[i],
road.shape.points[j + 1]):
srecord = [road.record]
spoints = []
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k1 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(
startkeyuni[i][0], k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][
1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][
1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
spoints.remove(
(road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
spoints.append(k1)
break
collection.remove(road)
else:
if smark <= 1:
srecord = [road.record]
spoints = []
for jm in range(j + 1):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
k1 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(startkeyuni[i][0], k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(j):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
spoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
spoints.insert(0, k1)
break
collection.remove(road)
else:
if scountlist == 1:
if distPP(endkeyuni[i][0], [road.shape.points[0][0], \
road.shape.points[0][1]]) < distPP(endkeyuni[i][0],
k1):
srecord = [road.record]
spoints = []
for jm in range(j):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
k1 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(
startkeyuni[i][0], k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(j + 1):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
spoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
spoints.insert(0, k1)
break
collection.remove(road)
else:
if road_direction(workzone_DIRECTION[i], startkeyuni[i], road.shape.points[j]):
srecord = [road.record]
spoints = []
k1 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
for jm in range(j):
spoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
spoints.append((startkeyuni[i][0][0], startkeyuni[i][0][1]))
cosk1 = distPP(startkeyuni[i][0], endkeyuni[i][0]) - distPP(
startkeyuni[i][0], k1)
if cosk1 < 0:
k1 = ((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for js in range(j):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(endkeyuni[i][0], vetice_mm) > 1:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
spoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
spoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
spoints.insert(0, k1)
break
collection.remove(road)
else:
continue
elif PointInLineBox(i, endkeyuni, bufferlat, bufferlon, road.shape.bbox):
# print('a')
for j in range(len(road.shape.points) - 1):
subpart = []
vetice = [[road.shape.points[j][0], road.shape.points[j][1]],
[road.shape.points[j + 1][0], road.shape.points[j + 1][1]]]
if distPL(endkeyuni[i][0], vetice) < 10:
emark = emark + 1
cosup = distPP(startkeyuni[i][0], [road.shape.points[j][0], \
road.shape.points[j][1]]) - distPP(startkeyuni[i][0], [
road.shape.points[j + 1][0], \
road.shape.points[j + 1][1]])
if cosup > 0:
# print('d')
if emark <= 1:
erecord = [road.record]
epoints = []
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k2 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0], k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove((road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
epoints.remove((road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
epoints.remove((road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
epoints.append(k2)
break
# print("cosup>0emark<=1")
# print(k2)
try:
collection.remove(road)
except:
pass
else:
if ecountlist == 1:
if distPP(startkeyuni[i][0], [road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][
1]]) < distPP(startkeyuni[i][0], k2):
erecord = [road.record]
epoints = []
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k2 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0],
k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][
1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][
1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
epoints.append(k2)
break
try:
collection.remove(road)
except:
pass
else:
if road_direction(workzone_DIRECTION[i], endkeyuni[i],
road.shape.points[j + 1]):
erecord = [road.record]
epoints = []
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
for jm in range(j + 1, len(road.shape.points)):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
k2 = ((road.shape.points[len(road.shape.points) - 1][0], \
road.shape.points[len(road.shape.points) - 1][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0],
k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(len(road.shape.points) - j - 2):
vetice_mm = [[road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][
1]], \
[road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][
1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
else:
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 1][0], \
road.shape.points[len(road.shape.points) - js - 1][1]))
epoints.remove(
(road.shape.points[len(road.shape.points) - js - 2][0], \
road.shape.points[len(road.shape.points) - js - 2][1]))
epoints.append(k2)
break
# print(k2)
try:
collection.remove(road)
except:
pass
else:
# print("e")
# print(cosup)
# print(j)
if emark <= 1:
# print(emark)
# print("emark<=1")
erecord = [road.record]
epoints = []
for jm in range(j + 1):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
k2 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0], k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(j):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
epoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
epoints.insert(0, k2)
break
# print(k2)
try:
collection.remove(road)
except:
pass
else:
if ecountlist == 1:
if distPP(startkeyuni[i][0], [road.shape.points[0][0], \
road.shape.points[0][1]]) < distPP(
startkeyuni[i][0], k2):
erecord = [road.record]
epoints = []
k2 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
for jm in range(j + 1):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0],
k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(j):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
epoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
epoints.insert(0, k2)
break
# print("ecountlist=2")
# print(k2)
try:
collection.remove(road)
except:
pass
else:
if road_direction(workzone_DIRECTION[i], endkeyuni[i], road.shape.points[j]):
# print("f")
erecord = [road.record]
epoints = []
for jm in range(j + 1):
epoints.append((road.shape.points[jm][0], road.shape.points[jm][1]))
epoints.append((endkeyuni[i][0][0], endkeyuni[i][0][1]))
k2 = ((road.shape.points[0][0], \
road.shape.points[0][1]))
cosk2 = distPP(endkeyuni[i][0], startkeyuni[i][0]) - distPP(endkeyuni[i][0],
k2)
if cosk2 < 0:
k2 = ((startkeyuni[i][0][0], startkeyuni[i][0][1]))
for js in range(j):
vetice_mm = [[road.shape.points[js][0], \
road.shape.points[js][1]], \
[road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]]]
if distPL(startkeyuni[i][0], vetice_mm) > 1:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
else:
epoints.remove((road.shape.points[js][0], \
road.shape.points[js][1]))
epoints.remove((road.shape.points[js + 1][0], \
road.shape.points[js + 1][1]))
epoints.insert(0, k2)
break
# print(k2)
try:
collection.remove(road)
except:
pass
else:
continue
itercount = itercount + 1
# print(itercount)
# print(k1,k2)
k1_latlon = k1[1], k1[0]
try:
k2_latlon = k2[1], k2[0]
except:
k2_latlon = startkeyuni[i][0][1], startkeyuni[i][0][0]
case_wz_end_lat, case_wz_end_lon = endkeyuni[i][0][1], endkeyuni[i][0][0]
# print(case_wz_end_lat, case_wz_end_lon)
# plt.figure(i)
# plt.plot(case_wz_end_lon,case_wz_end_lat,'*b',label ='Matched End')
case_wz_start_lat, case_wz_start_lon = startkeyuni[i][0][1], startkeyuni[i][0][0]
# print(case_wz_start_lat, case_wz_start_lon)
# plt.plot(case_wz_start_lon,case_wz_start_lat,'*r',label ='Matched Start')
# plt.plot(k1_latlon[1],k1_latlon[0],'ob',label ='k1')
# plt.plot(k2_latlon[1],k2_latlon[0],'og',label ='k2')
iter_count = 1
k12_j = distPP(k1_latlon, k2_latlon) > 5 and distPP(k1, endkeyuni[i][0]) > 1 and distPP(k2, startkeyuni[i][0]) > 1
while k12_j and iter_count < 100:
collection2 = collection.copy()
for road in collection:
if distPP(k1_latlon, [road.shape.points[0][1], road.shape.points[0][0]]) < 1:
records.append([road.record])
pointsparts.append([road.shape.points])
k1_latlon = road.shape.points[len(road.shape.points) - 1][1], \
road.shape.points[len(road.shape.points) - 1][0]
# plt.plot(k1_latlon[1],k1_latlon[0],'ob',label ='k1')
collection2.remove(road)
# print('l1')
break
k1 = k1_latlon[1], k1_latlon[0]
k2 = k2_latlon[1], k2_latlon[0]
# print("k1="+str(k1)+", k2="+str(k2))
k12_j = distPP(k1_latlon, k2_latlon) > 5 and distPP(k1, endkeyuni[i][0]) > 1 and distPP(k2,
startkeyuni[i][0]) > 1
if k12_j == False:
break
collection3 = collection2.copy()
for road in collection2:
if distPP(k2_latlon, [road.shape.points[0][1], road.shape.points[0][0]]) < 1:
records.append([road.record])
pointsparts.append([road.shape.points])
k2_latlon = road.shape.points[len(road.shape.points) - 1][1], \
road.shape.points[len(road.shape.points) - 1][0]
# plt.plot(k2_latlon[1],k2_latlon[0],'or',label ='k2')
collection3.remove(road)
# print('l2')
break
k1 = k1_latlon[1], k1_latlon[0]
k2 = k2_latlon[1], k2_latlon[0]
# print("k1="+str(k1)+", k2="+str(k2))
k12_j = distPP(k1_latlon, k2_latlon) > 5 and distPP(k1, endkeyuni[i][0]) > 1 and distPP(k2,
startkeyuni[i][0]) > 1
if k12_j == False:
break
collection4 = collection3.copy()
for road in collection3:
if distPP(k1_latlon, [road.shape.points[len(road.shape.points) - 1][1],
road.shape.points[len(road.shape.points) - 1][0]]) < 1:
records.append([road.record])
pointsparts.append([road.shape.points])
k1_latlon = road.shape.points[0][1], road.shape.points[0][0]
# plt.plot(k1_latlon[1],k1_latlon[0],'ob',label ='k1')
collection4.remove(road)
# print('l3')
break
k1 = k1_latlon[1], k1_latlon[0]
k2 = k2_latlon[1], k2_latlon[0]
# print("k1="+str(k1)+", k2="+str(k2))
k12_j = distPP(k1_latlon, k2_latlon) > 5 and distPP(k1, endkeyuni[i][0]) > 1 and distPP(k2,
startkeyuni[i][0]) > 1
if k12_j == False:
break
collection5 = collection4.copy()
for road in collection4:
if distPP(k2_latlon, [road.shape.points[len(road.shape.points) - 1][1],
road.shape.points[len(road.shape.points) - 1][0]]) < 1:
records.append([road.record])
pointsparts.append([road.shape.points])
k2_latlon = road.shape.points[0][1], road.shape.points[0][0]
# plt.plot(k2_latlon[1],k2_latlon[0],'or',label ='k2')
collection5.remove(road)
# print('l4')
break
iter_count = iter_count + 1
k1 = k1_latlon[1], k1_latlon[0]
k2 = k2_latlon[1], k2_latlon[0]
# print("k1="+str(k1)+", k2="+str(k2))
k12_j = distPP(k1_latlon, k2_latlon) > 5 and distPP(k1, endkeyuni[i][0]) > 5 and distPP(k2,
startkeyuni[i][0]) > 5
if k12_j == False:
break
collection = collection5.copy()
# print(iter_count)
# plt.legend(loc='upper right')
# elif LineInPointBox(i,startkeyuni,endkeyuni,1e-2,1e-2,road):
# pointsparts.append(road.shape.points)
# records.append(road.record)
# print(len(pointsparts))
# print(len(records))
for idex in range(len(pointsparts)):
# print(pointsparts[idex])
# print(records[idex])
# print(len(records[idex][0]))
w.line(parts=pointsparts[idex])
w.record(*records[idex][0])
w.null()
w.save(fileloc + str(i))
| [
"numpy.sqrt",
"shapefile.Writer",
"matplotlib.pyplot.figure",
"numpy.min",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((1512, 1528), 'numpy.min', 'np.min', (['distlist'], {}), '(distlist)\n', (1518, 1528), True, 'import numpy as np\n'), ((1563, 1615), 'numpy.sqrt', 'np.sqrt', (['((P1[0] - P2[0]) ** 2 + (P1[1] - P2[1]) ** 2)'], {}), '((P1[0] - P2[0]) ** 2 + (P1[1] - P2[1]) ** 2)\n', (1570, 1615), True, 'import numpy as np\n'), ((4507, 4520), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (4517, 4520), True, 'import matplotlib.pyplot as plt\n'), ((5863, 5892), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (5873, 5892), True, 'import matplotlib.pyplot as plt\n'), ((6067, 6080), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (6077, 6080), True, 'import matplotlib.pyplot as plt\n'), ((7689, 7718), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (7699, 7718), True, 'import matplotlib.pyplot as plt\n'), ((7903, 7913), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7911, 7913), True, 'import matplotlib.pyplot as plt\n'), ((8836, 8849), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (8846, 8849), True, 'import matplotlib.pyplot as plt\n'), ((9637, 9666), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (9647, 9666), True, 'import matplotlib.pyplot as plt\n'), ((11356, 11374), 'shapefile.Writer', 'shapefile.Writer', ([], {}), '()\n', (11372, 11374), False, 'import shapefile\n'), ((92228, 92246), 'shapefile.Writer', 'shapefile.Writer', ([], {}), '()\n', (92244, 92246), False, 'import shapefile\n'), ((172861, 172879), 'shapefile.Writer', 'shapefile.Writer', ([], {}), '()\n', (172877, 172879), False, 'import shapefile\n'), ((253420, 253438), 'shapefile.Writer', 'shapefile.Writer', ([], {}), '()\n', (253436, 253438), False, 'import shapefile\n'), ((609, 649), 'numpy.sqrt', 'np.sqrt', (['((xp - x1) ** 2 + (yp - y1) ** 2)'], {}), '((xp - x1) ** 2 + (yp - y1) ** 2)\n', (616, 649), True, 'import numpy as np\n'), ((651, 691), 'numpy.sqrt', 'np.sqrt', (['((xp - x2) ** 2 + (yp - y2) ** 2)'], {}), '((xp - x2) ** 2 + (yp - y2) ** 2)\n', (658, 691), True, 'import numpy as np\n'), ((771, 811), 'numpy.sqrt', 'np.sqrt', (['((y2 - y1) ** 2 + (x2 - x1) ** 2)'], {}), '((y2 - y1) ** 2 + (x2 - x1) ** 2)\n', (778, 811), True, 'import numpy as np\n'), ((958, 972), 'numpy.min', 'np.min', (['dist12'], {}), '(dist12)\n', (964, 972), True, 'import numpy as np\n'), ((10315, 10363), 'numpy.sqrt', 'np.sqrt', (['((lon1 - lon2) ** 2 + (lat1 - lat2) ** 2)'], {}), '((lon1 - lon2) ** 2 + (lat1 - lat2) ** 2)\n', (10322, 10363), True, 'import numpy as np\n'), ((10870, 10918), 'numpy.sqrt', 'np.sqrt', (['((lon1 - lon2) ** 2 + (lat1 - lat2) ** 2)'], {}), '((lon1 - lon2) ** 2 + (lat1 - lat2) ** 2)\n', (10877, 10918), True, 'import numpy as np\n'), ((1120, 1134), 'numpy.min', 'np.min', (['dist12'], {}), '(dist12)\n', (1126, 1134), True, 'import numpy as np\n'), ((1456, 1470), 'numpy.min', 'np.min', (['dist12'], {}), '(dist12)\n', (1462, 1470), True, 'import numpy as np\n')] |
#! /usr/bin/env python3
# Copyright(c) 2019 Intel Corporation.
# License: MIT See LICENSE file in root directory.
from argparse import ArgumentParser, SUPPRESS
from openvino.inference_engine import IENetwork, IEPlugin, IECore
import cv2
import logging as log
import numpy as np
import os
import sys
import time
# Specify target device
FRAME_WIDTH = 640
FRAME_HEIGHT = 480
RED_COLOR = (255, 0, 0)
GREEN_COLOR = (50, 255, 50)
DARK_GREEN_COLOR = (10, 150, 50)
YELLOW_COLOR = (50, 255, 255)
def build_argparser():
parser = ArgumentParser(add_help=False)
args = parser.add_argument_group('Options')
args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Show this help message and exit.')
args.add_argument("-m", "--mirror", action="store_true", help="Flip camera")
args.add_argument("-fps", "--show_fps", action="store_true", help="Show fps information on top of camera view")
args.add_argument("--face_ir", metavar="FACE_DETECTION_IR_File", type=str,
default="/face-detection-retail-0004.xml",
help="Absolute path to the face detection neural network IR file.")
args.add_argument("-emotion_ir", metavar="EMOTION_RECOGNITION_IR_File", type=str,
default="/emotions-recognition-retail-0003.xml",
help="Absolute path to the emotion detection neural network IR file.")
return parser
def main():
log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
args = build_argparser().parse_args()
face_model_xml = os.getcwd() + args.face_ir
face_model_bin = os.path.splitext(face_model_xml)[0] + ".bin"
emotions_model_xml = os.getcwd() + args.emotion_ir
emotions_model_bin = os.path.splitext(emotions_model_xml)[0] + ".bin"
device = "MYRIAD"
fps = ""
camera_id = 0
emotionLabel = ['Neutral', 'Happy', 'Sad', 'Surprise', 'Anger']
cap = cv2.VideoCapture(camera_id)
log.info("Loading Camera id {}".format(camera_id))
# Read IR - face detection
face_net = IENetwork(model=face_model_xml, weights=face_model_bin)
log.info("Face-Detection network has been loaded:\n\t{}\n\t{}".format(face_model_xml, face_model_bin))
# Read IR - emotions recognition
emotion_net = IENetwork(model=emotions_model_xml, weights=emotions_model_bin)
log.info("Emotions-Recognition network has been loaded:\n\t{}\n\t{}".format(emotions_model_xml, emotions_model_bin))
log.info("Setting device: {}".format(device))
plugin = IEPlugin(device=device)
log.info("Loading Face-Detection model to the plugin")
face_exec_net = plugin.load(network=face_net)
# Set configurations for face detection
face_input_blob = next(iter(face_net.inputs))
face_out_blob = next(iter(face_net.outputs))
log.info("Loading Emotions-Recognition model to the plugin")
emotion_exec_net = plugin.load(network=emotion_net)
# Set configurations for emotion detection
emotion_input_blob = next(iter(emotion_net.inputs))
emotion_out_blob = next(iter(emotion_net.outputs))
if args.mirror:
log.info("Using camera mirror")
log.info("emotions-recognition-retail sample is starting...")
while cap.isOpened():
t1 = time.time()
ret_val, img = cap.read()
if not ret_val:
break
if args.mirror:
img = cv2.flip(img, 1)
prepimg = cv2.resize(img, (300, 300))
prepimg = prepimg[np.newaxis, :, :, :]
prepimg = prepimg.transpose((0, 3, 1, 2))
face_outputs = face_exec_net.infer(inputs={face_input_blob: prepimg})
res = face_exec_net.requests[0].outputs[face_out_blob]
for detection in res[0][0]:
confidence = float(detection[2])
xmin = int(detection[3] * img.shape[1])
ymin = int(detection[4] * img.shape[0])
xmax = int(detection[5] * img.shape[1])
ymax = int(detection[6] * img.shape[0])
if confidence > 0.7:
cv2.rectangle(img, (xmin, ymin), (xmax, ymax), color=GREEN_COLOR)
if ymin >= 64 and ymax >= 64:
emoimg = img[ymin:ymax, xmin:xmax]
emoimg = cv2.resize(emoimg, (64, 64))
emoimg = emoimg.transpose((2, 0, 1))
emoimg = emoimg.reshape(1, 3, 64, 64)
emotion_outputs = emotion_exec_net.infer(inputs={emotion_input_blob: emoimg})
res = emotion_exec_net.requests[0].outputs[emotion_out_blob]
out_emotion_reshape = res.reshape(-1, 5)
emotion_text = emotionLabel[np.argmax(out_emotion_reshape)]
cv2.putText(img, emotion_text, (abs(xmin), abs(ymin - 10)), cv2.FONT_HERSHEY_DUPLEX, 0.7, (50, 255, 255), 1, 1)
if args.show_fps:
elapsed_time = time.time() - t1
fps = "(Playback) {:.1f} FPS".format(1 / elapsed_time)
cv2.putText(img, fps, (15, FRAME_HEIGHT - 15), cv2.FONT_HERSHEY_SIMPLEX, 0.4, YELLOW_COLOR, 1, cv2.LINE_AA)
cv2.putText(img, "Hit 'ESC' or 'q' to Exit", (FRAME_WIDTH - 150, FRAME_HEIGHT - 15), cv2.FONT_HERSHEY_SIMPLEX, 0.4, YELLOW_COLOR, 1, cv2.LINE_AA)
cv2.imshow('emotions-recognition-retail sample', img)
waitkey = cv2.waitKey(1)
if waitkey & 0xFF == ord('q') or waitkey == 27:
break # esc or 'q' to quit
cv2.destroyAllWindows()
if __name__ == '__main__':
sys.exit(main() or 0)
| [
"logging.basicConfig",
"cv2.rectangle",
"openvino.inference_engine.IEPlugin",
"cv2.flip",
"argparse.ArgumentParser",
"os.path.splitext",
"logging.info",
"numpy.argmax",
"os.getcwd",
"cv2.putText",
"cv2.imshow",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"time.time",
"c... | [((529, 559), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'add_help': '(False)'}), '(add_help=False)\n', (543, 559), False, 'from argparse import ArgumentParser, SUPPRESS\n'), ((1437, 1531), 'logging.basicConfig', 'log.basicConfig', ([], {'format': '"""[ %(levelname)s ] %(message)s"""', 'level': 'log.INFO', 'stream': 'sys.stdout'}), "(format='[ %(levelname)s ] %(message)s', level=log.INFO,\n stream=sys.stdout)\n", (1452, 1531), True, 'import logging as log\n'), ((1948, 1975), 'cv2.VideoCapture', 'cv2.VideoCapture', (['camera_id'], {}), '(camera_id)\n', (1964, 1975), False, 'import cv2\n'), ((2078, 2133), 'openvino.inference_engine.IENetwork', 'IENetwork', ([], {'model': 'face_model_xml', 'weights': 'face_model_bin'}), '(model=face_model_xml, weights=face_model_bin)\n', (2087, 2133), False, 'from openvino.inference_engine import IENetwork, IEPlugin, IECore\n'), ((2297, 2360), 'openvino.inference_engine.IENetwork', 'IENetwork', ([], {'model': 'emotions_model_xml', 'weights': 'emotions_model_bin'}), '(model=emotions_model_xml, weights=emotions_model_bin)\n', (2306, 2360), False, 'from openvino.inference_engine import IENetwork, IEPlugin, IECore\n'), ((2546, 2569), 'openvino.inference_engine.IEPlugin', 'IEPlugin', ([], {'device': 'device'}), '(device=device)\n', (2554, 2569), False, 'from openvino.inference_engine import IENetwork, IEPlugin, IECore\n'), ((2575, 2629), 'logging.info', 'log.info', (['"""Loading Face-Detection model to the plugin"""'], {}), "('Loading Face-Detection model to the plugin')\n", (2583, 2629), True, 'import logging as log\n'), ((2828, 2888), 'logging.info', 'log.info', (['"""Loading Emotions-Recognition model to the plugin"""'], {}), "('Loading Emotions-Recognition model to the plugin')\n", (2836, 2888), True, 'import logging as log\n'), ((3169, 3230), 'logging.info', 'log.info', (['"""emotions-recognition-retail sample is starting..."""'], {}), "('emotions-recognition-retail sample is starting...')\n", (3177, 3230), True, 'import logging as log\n'), ((5449, 5472), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5470, 5472), False, 'import cv2\n'), ((1592, 1603), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1601, 1603), False, 'import os\n'), ((1711, 1722), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1720, 1722), False, 'import os\n'), ((3132, 3163), 'logging.info', 'log.info', (['"""Using camera mirror"""'], {}), "('Using camera mirror')\n", (3140, 3163), True, 'import logging as log\n'), ((3270, 3281), 'time.time', 'time.time', ([], {}), '()\n', (3279, 3281), False, 'import time\n'), ((3438, 3465), 'cv2.resize', 'cv2.resize', (['img', '(300, 300)'], {}), '(img, (300, 300))\n', (3448, 3465), False, 'import cv2\n'), ((5105, 5260), 'cv2.putText', 'cv2.putText', (['img', '"""Hit \'ESC\' or \'q\' to Exit"""', '(FRAME_WIDTH - 150, FRAME_HEIGHT - 15)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.4)', 'YELLOW_COLOR', '(1)', 'cv2.LINE_AA'], {}), '(img, "Hit \'ESC\' or \'q\' to Exit", (FRAME_WIDTH - 150, \n FRAME_HEIGHT - 15), cv2.FONT_HERSHEY_SIMPLEX, 0.4, YELLOW_COLOR, 1, cv2\n .LINE_AA)\n', (5116, 5260), False, 'import cv2\n'), ((5260, 5313), 'cv2.imshow', 'cv2.imshow', (['"""emotions-recognition-retail sample"""', 'img'], {}), "('emotions-recognition-retail sample', img)\n", (5270, 5313), False, 'import cv2\n'), ((5333, 5347), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (5344, 5347), False, 'import cv2\n'), ((1640, 1672), 'os.path.splitext', 'os.path.splitext', (['face_model_xml'], {}), '(face_model_xml)\n', (1656, 1672), False, 'import os\n'), ((1766, 1802), 'os.path.splitext', 'os.path.splitext', (['emotions_model_xml'], {}), '(emotions_model_xml)\n', (1782, 1802), False, 'import os\n'), ((3402, 3418), 'cv2.flip', 'cv2.flip', (['img', '(1)'], {}), '(img, 1)\n', (3410, 3418), False, 'import cv2\n'), ((4988, 5100), 'cv2.putText', 'cv2.putText', (['img', 'fps', '(15, FRAME_HEIGHT - 15)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.4)', 'YELLOW_COLOR', '(1)', 'cv2.LINE_AA'], {}), '(img, fps, (15, FRAME_HEIGHT - 15), cv2.FONT_HERSHEY_SIMPLEX, \n 0.4, YELLOW_COLOR, 1, cv2.LINE_AA)\n', (4999, 5100), False, 'import cv2\n'), ((4044, 4109), 'cv2.rectangle', 'cv2.rectangle', (['img', '(xmin, ymin)', '(xmax, ymax)'], {'color': 'GREEN_COLOR'}), '(img, (xmin, ymin), (xmax, ymax), color=GREEN_COLOR)\n', (4057, 4109), False, 'import cv2\n'), ((4892, 4903), 'time.time', 'time.time', ([], {}), '()\n', (4901, 4903), False, 'import time\n'), ((4241, 4269), 'cv2.resize', 'cv2.resize', (['emoimg', '(64, 64)'], {}), '(emoimg, (64, 64))\n', (4251, 4269), False, 'import cv2\n'), ((4673, 4703), 'numpy.argmax', 'np.argmax', (['out_emotion_reshape'], {}), '(out_emotion_reshape)\n', (4682, 4703), True, 'import numpy as np\n')] |
"""
dealing with sampling things
"""
import os
import sys
import copy
import random
import numpy as np
import global_settings
import update_settings as us
import job_drivers
def get_uniform_uncertainties(n_size=3, value=3.0, exclude=None):
"""
return uniform uncertainties
"""
result = np.ones(n_size)
for i, _ in enumerate(result):
result[i] = float(value)
if exclude is not None:
for idx in exclude:
idx_valid = int(idx)
if idx_valid >= 0 and idx_valid < n_size:
result[idx_valid] = 1.0
return result
def get_random_coef(uniform_uncertainties=None):
"""
generate a vector of random numbers in range of 0-1
here we constraint the size >= 1
"""
if uniform_uncertainties is None:
return
n_size = len(uniform_uncertainties)
if n_size <= 1:
return
result = np.ones(n_size)
for i, _ in enumerate(result):
result[i] = random.uniform(
1.0 / uniform_uncertainties[i], uniform_uncertainties[i])
return result
def run_a_sample(data_dir):
"""
run a monte carlo sample with some size
"""
# sensitivity analysis settings
s_a_s = global_settings.get_s_a_setting(data_dir)
# global k file name, all together
g_k_f_n = os.path.join(data_dir, "output", "k_global.csv")
if os.path.isfile(g_k_f_n):
os.remove(g_k_f_n)
# global target file name, all together, target is ignition delay time (ign) here
g_t_f_n = os.path.join(data_dir, "output", "ign_global.csv")
if os.path.isfile(g_t_f_n):
os.remove(g_t_f_n)
# local target file name
l_t_f_n = os.path.join(data_dir, "output", "ign_local.csv")
u_u = get_uniform_uncertainties(
s_a_s['n_dim'], s_a_s['default_uncertainty'], s_a_s['exclude'])
# save constant uncertainty to file
f_n_u_const = os.path.join(data_dir, "output", "uncertainties_const.csv")
np.savetxt(f_n_u_const, u_u, fmt='%.18e', delimiter=',', newline='\n')
for _ in range(s_a_s['n_run']):
r_c = get_random_coef(uniform_uncertainties=u_u)
spe_idx_conc = copy.deepcopy(s_a_s['spe_idx_conc'])
print(spe_idx_conc)
for s_i in spe_idx_conc:
if int(s_i) >= 0 and int(s_i) < len(r_c):
spe_idx_conc[s_i] *= r_c[int(s_i)]
us.update_s_a_setting(data_dir,
init_temp=s_a_s['init_temp'],
critical_temp=s_a_s['critical_temp'],
target_temp=s_a_s['target_temp'],
end_temp=s_a_s['end_temp'],
spe_idx_conc=spe_idx_conc)
flag = job_drivers.make_run_timeout(data_dir, timeout=s_a_s['timeout'])
# local target time
local_t_t = np.loadtxt(l_t_f_n, dtype=float, delimiter=',')
local_t_t = [local_t_t]
# is successfully run a sample, save to file
if flag is True:
r_c = r_c.reshape((1, len(r_c)))
with open(g_k_f_n, 'ab') as f_handler:
np.savetxt(f_handler, r_c, fmt='%.18e',
delimiter=',', newline='\n')
with open(g_t_f_n, 'ab') as f_handler:
np.savetxt(f_handler, local_t_t, fmt='%.18e',
delimiter=',', newline='\n')
if __name__ == '__main__':
DATA_DIR = os.path.abspath(os.path.join(os.path.realpath(
sys.argv[0]), os.pardir, os.pardir, os.pardir, os.pardir, "SOHR_DATA"))
print(DATA_DIR)
run_a_sample(DATA_DIR)
| [
"random.uniform",
"numpy.ones",
"global_settings.get_s_a_setting",
"os.path.join",
"update_settings.update_s_a_setting",
"os.path.isfile",
"os.path.realpath",
"job_drivers.make_run_timeout",
"numpy.savetxt",
"copy.deepcopy",
"numpy.loadtxt",
"os.remove"
] | [((305, 320), 'numpy.ones', 'np.ones', (['n_size'], {}), '(n_size)\n', (312, 320), True, 'import numpy as np\n'), ((891, 906), 'numpy.ones', 'np.ones', (['n_size'], {}), '(n_size)\n', (898, 906), True, 'import numpy as np\n'), ((1204, 1245), 'global_settings.get_s_a_setting', 'global_settings.get_s_a_setting', (['data_dir'], {}), '(data_dir)\n', (1235, 1245), False, 'import global_settings\n'), ((1300, 1348), 'os.path.join', 'os.path.join', (['data_dir', '"""output"""', '"""k_global.csv"""'], {}), "(data_dir, 'output', 'k_global.csv')\n", (1312, 1348), False, 'import os\n'), ((1356, 1379), 'os.path.isfile', 'os.path.isfile', (['g_k_f_n'], {}), '(g_k_f_n)\n', (1370, 1379), False, 'import os\n'), ((1508, 1558), 'os.path.join', 'os.path.join', (['data_dir', '"""output"""', '"""ign_global.csv"""'], {}), "(data_dir, 'output', 'ign_global.csv')\n", (1520, 1558), False, 'import os\n'), ((1566, 1589), 'os.path.isfile', 'os.path.isfile', (['g_t_f_n'], {}), '(g_t_f_n)\n', (1580, 1589), False, 'import os\n'), ((1661, 1710), 'os.path.join', 'os.path.join', (['data_dir', '"""output"""', '"""ign_local.csv"""'], {}), "(data_dir, 'output', 'ign_local.csv')\n", (1673, 1710), False, 'import os\n'), ((1879, 1938), 'os.path.join', 'os.path.join', (['data_dir', '"""output"""', '"""uncertainties_const.csv"""'], {}), "(data_dir, 'output', 'uncertainties_const.csv')\n", (1891, 1938), False, 'import os\n'), ((1943, 2013), 'numpy.savetxt', 'np.savetxt', (['f_n_u_const', 'u_u'], {'fmt': '"""%.18e"""', 'delimiter': '""","""', 'newline': '"""\n"""'}), "(f_n_u_const, u_u, fmt='%.18e', delimiter=',', newline='\\n')\n", (1953, 2013), True, 'import numpy as np\n'), ((962, 1034), 'random.uniform', 'random.uniform', (['(1.0 / uniform_uncertainties[i])', 'uniform_uncertainties[i]'], {}), '(1.0 / uniform_uncertainties[i], uniform_uncertainties[i])\n', (976, 1034), False, 'import random\n'), ((1389, 1407), 'os.remove', 'os.remove', (['g_k_f_n'], {}), '(g_k_f_n)\n', (1398, 1407), False, 'import os\n'), ((1599, 1617), 'os.remove', 'os.remove', (['g_t_f_n'], {}), '(g_t_f_n)\n', (1608, 1617), False, 'import os\n'), ((2133, 2169), 'copy.deepcopy', 'copy.deepcopy', (["s_a_s['spe_idx_conc']"], {}), "(s_a_s['spe_idx_conc'])\n", (2146, 2169), False, 'import copy\n'), ((2345, 2543), 'update_settings.update_s_a_setting', 'us.update_s_a_setting', (['data_dir'], {'init_temp': "s_a_s['init_temp']", 'critical_temp': "s_a_s['critical_temp']", 'target_temp': "s_a_s['target_temp']", 'end_temp': "s_a_s['end_temp']", 'spe_idx_conc': 'spe_idx_conc'}), "(data_dir, init_temp=s_a_s['init_temp'], critical_temp\n =s_a_s['critical_temp'], target_temp=s_a_s['target_temp'], end_temp=\n s_a_s['end_temp'], spe_idx_conc=spe_idx_conc)\n", (2366, 2543), True, 'import update_settings as us\n'), ((2700, 2764), 'job_drivers.make_run_timeout', 'job_drivers.make_run_timeout', (['data_dir'], {'timeout': "s_a_s['timeout']"}), "(data_dir, timeout=s_a_s['timeout'])\n", (2728, 2764), False, 'import job_drivers\n'), ((2814, 2861), 'numpy.loadtxt', 'np.loadtxt', (['l_t_f_n'], {'dtype': 'float', 'delimiter': '""","""'}), "(l_t_f_n, dtype=float, delimiter=',')\n", (2824, 2861), True, 'import numpy as np\n'), ((3423, 3452), 'os.path.realpath', 'os.path.realpath', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (3439, 3452), False, 'import os\n'), ((3085, 3153), 'numpy.savetxt', 'np.savetxt', (['f_handler', 'r_c'], {'fmt': '"""%.18e"""', 'delimiter': '""","""', 'newline': '"""\n"""'}), "(f_handler, r_c, fmt='%.18e', delimiter=',', newline='\\n')\n", (3095, 3153), True, 'import numpy as np\n'), ((3248, 3322), 'numpy.savetxt', 'np.savetxt', (['f_handler', 'local_t_t'], {'fmt': '"""%.18e"""', 'delimiter': '""","""', 'newline': '"""\n"""'}), "(f_handler, local_t_t, fmt='%.18e', delimiter=',', newline='\\n')\n", (3258, 3322), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Functions to evaluate a trained model
Note: The file was more or less taken from Spotlight
"""
import numpy as np
import scipy.stats as st
FLOAT_MAX = np.finfo(np.float32).max
def mrr_score(model, test, train=None):
"""
Compute mean reciprocal rank (MRR) scores. One score
is given for every user with interactions in the test
set, representing the mean reciprocal rank of all their
test items.
Parameters
----------
model: fitted instance of a recommender model
The model to evaluate.
test: :class:`spotlight.interactions.Interactions`
Test interactions.
train: :class:`spotlight.interactions.Interactions`, optional
Train interactions. If supplied, scores of known
interactions will be set to very low values and so not
affect the MRR.
Returns
-------
mrr scores: numpy array of shape (num_users,)
Array of MRR scores for each user in test.
"""
test = test.tocsr()
if train is not None:
train = train.tocsr()
mrrs = []
for user_id, row in enumerate(test):
if not len(row.indices):
continue
predictions = -model.predict(user_id)
if train is not None:
predictions[train[user_id].indices] = FLOAT_MAX
mrr = (1.0 / st.rankdata(predictions)[row.indices]).mean()
mrrs.append(mrr)
return np.array(mrrs)
def _get_precision_recall(predictions, targets, k):
predictions = predictions[:k]
n_hit = len(set(predictions).intersection(set(targets)))
return float(n_hit) / len(predictions), float(n_hit) / len(targets)
def precision_recall_score(model, test, train=None, k=10):
"""
Compute Precision@k and Recall@k scores. One score
is given for every user with interactions in the test
set, representing the Precision@k and Recall@k of all their
test items.
Parameters
----------
model: fitted instance of a recommender model
The model to evaluate.
test: :class:`spotlight.interactions.Interactions`
Test interactions.
train: :class:`spotlight.interactions.Interactions`, optional
Train interactions. If supplied, scores of known
interactions will not affect the computed metrics.
k: int or array of int,
The maximum number of predicted items
Returns
-------
(Precision@k, Recall@k): numpy array of shape (num_users, len(k))
A tuple of Precisions@k and Recalls@k for each user in test.
If k is a scalar, will return a tuple of vectors. If k is an
array, will return a tuple of arrays, where each row corresponds
to a user and each column corresponds to a value of k.
"""
test = test.tocsr()
if train is not None:
train = train.tocsr()
if np.isscalar(k):
k = np.array([k])
precision = []
recall = []
for user_id, row in enumerate(test):
if not len(row.indices):
continue
predictions = -model.predict(user_id)
if train is not None:
rated = train[user_id].indices
predictions[rated] = FLOAT_MAX
predictions = predictions.argsort()
targets = row.indices
user_precision, user_recall = zip(*[
_get_precision_recall(predictions, targets, x)
for x in k
])
precision.append(user_precision)
recall.append(user_recall)
precision = np.array(precision).squeeze()
recall = np.array(recall).squeeze()
return precision, recall
def auc_score(model, test, train=None, auc_selection_seed=42):
"""
See https://arxiv.org/pdf/1508.06091.pdf
Args:
model:
test:
train:
auc_selection_seed:
Returns:
"""
# TODO: Implement known positive removal (not urgent as not applicable for Movielens)
test = test.tocsr()
np.random.seed(auc_selection_seed)
auc_score = []
for user_id, row in enumerate(test):
if not len(row.indices):
continue
# Make predictions for all items
predictions = model.predict(user_id)
pos_targets = row.indices
n_preds = len(pos_targets)
neg_targets = np.setdiff1d(np.arange(len(predictions)), pos_targets)
neg_targets = np.random.choice(neg_targets, size=n_preds, replace=False)
# Obtain predictions for all positives
pos_predictions = predictions[pos_targets]
# Obtain predictions for random set of unobserved that has the same length
# as the positives
neg_predictions = predictions[neg_targets]
# Compare both ratings for ranking distortions, i.e. positive < negative
user_auc_score = (pos_predictions > neg_predictions).sum()/n_preds
auc_score.append(user_auc_score)
return np.array(auc_score)
def rmse_score(model, test):
"""
Compute RMSE score for test interactions.
Parameters
----------
model: fitted instance of a recommender model
The model to evaluate.
test: :class:`spotlight.interactions.Interactions`
Test interactions.
Returns
-------
rmse_score: float
The RMSE score.
"""
predictions = model.predict(test.user_ids, test.item_ids)
ratings = np.clip(test.ratings, 0, 1) # bring -1 to 0
return np.sqrt(((ratings - predictions) ** 2).mean())
| [
"numpy.clip",
"numpy.isscalar",
"scipy.stats.rankdata",
"numpy.random.choice",
"numpy.array",
"numpy.random.seed",
"numpy.finfo"
] | [((184, 204), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (192, 204), True, 'import numpy as np\n'), ((1428, 1442), 'numpy.array', 'np.array', (['mrrs'], {}), '(mrrs)\n', (1436, 1442), True, 'import numpy as np\n'), ((2846, 2860), 'numpy.isscalar', 'np.isscalar', (['k'], {}), '(k)\n', (2857, 2860), True, 'import numpy as np\n'), ((3936, 3970), 'numpy.random.seed', 'np.random.seed', (['auc_selection_seed'], {}), '(auc_selection_seed)\n', (3950, 3970), True, 'import numpy as np\n'), ((4875, 4894), 'numpy.array', 'np.array', (['auc_score'], {}), '(auc_score)\n', (4883, 4894), True, 'import numpy as np\n'), ((5332, 5359), 'numpy.clip', 'np.clip', (['test.ratings', '(0)', '(1)'], {}), '(test.ratings, 0, 1)\n', (5339, 5359), True, 'import numpy as np\n'), ((2874, 2887), 'numpy.array', 'np.array', (['[k]'], {}), '([k])\n', (2882, 2887), True, 'import numpy as np\n'), ((4344, 4402), 'numpy.random.choice', 'np.random.choice', (['neg_targets'], {'size': 'n_preds', 'replace': '(False)'}), '(neg_targets, size=n_preds, replace=False)\n', (4360, 4402), True, 'import numpy as np\n'), ((3494, 3513), 'numpy.array', 'np.array', (['precision'], {}), '(precision)\n', (3502, 3513), True, 'import numpy as np\n'), ((3537, 3553), 'numpy.array', 'np.array', (['recall'], {}), '(recall)\n', (3545, 3553), True, 'import numpy as np\n'), ((1344, 1368), 'scipy.stats.rankdata', 'st.rankdata', (['predictions'], {}), '(predictions)\n', (1355, 1368), True, 'import scipy.stats as st\n')] |
"""
Contains unit tests for the functions in the package.
Author: <NAME>
Year: 2021
"""
import time
import numpy as np
import pandas as pd
from .classifier_comparisons import _rank_single_dataset
# -----------------------------------------
# Main function
# -----------------------------------------
def main():
""" Run the unit tests. """
# src.classifier_comparisons._rank_single_dataset
_test_rank_single_dataset()
# -----------------------------------------
# Unit tests
# -----------------------------------------
def _test_rank_single_dataset():
""" Unit tests for src.classifier_comparisons._rank_single_dataset. """
print("\nTesting: src.classifier_comparisons._rank_single_dataset:")
# test 0
test = np.array([0.3, 0.1, 0.04, 0.6, 1.0])
solution = np.array([3, 2, 1, 4, 5])
print("Expected:", solution, "-- Method:", _rank_single_dataset(test, 0.01))
# test 1
test = np.array([0.0, 0.0, 0.0, 0.0, 0.0])
solution = np.array([3, 3, 3, 3, 3])
print("Expected:", solution, "-- Method:", _rank_single_dataset(test, 0.01))
# test 2
test = np.array([0.0, 0.0, 0.0, 0.0, 0.1])
solution = np.array([2.5, 2.5, 2.5, 2.5, 5])
print("Expected:", solution, "-- Method:", _rank_single_dataset(test, 0.01))
# test 3
test = np.array([0.0, 0.5])
solution = np.array([1, 2])
print("Expected:", solution, "-- Method:", _rank_single_dataset(test, 0.01))
# test 4
test = np.array([0.0, 0.5])
solution = np.array([1.5, 1.5])
print("Expected:", solution, "-- Method:", _rank_single_dataset(test, 0.500001))
# test 5
test = np.array([0.0, 0.1, 0.22, 0.2, 0.33, 0.3301, 1.0, 1.0, 1.01])
solution = np.array([1, 2, 4, 3, 5.5, 5.5, 8, 8, 8])
print("Expected:", solution, "-- Method:", _rank_single_dataset(test, 0.0101))
# test 6
test = np.array([0.0, 0.1, 0.22, 0.2, 0.33, 0.3301, 1.0, 1.0, 1.01])
solution = np.array([1, 2, 4, 3, 5.5, 5.5, 7.5, 7.5, 9])
print("Expected:", solution, "-- Method:", _rank_single_dataset(test, 0.01))
# test 7
test = np.array([0.002, 0.002, 0.100, 0.101, 0.500, 0.500, 0.500])
solution = np.array([1.5, 1.5, 3, 4, 6, 6, 6])
print("Expected:", solution, "-- Method:", _rank_single_dataset(test, 0.0))
if __name__ == "__main__":
main() | [
"numpy.array"
] | [((755, 791), 'numpy.array', 'np.array', (['[0.3, 0.1, 0.04, 0.6, 1.0]'], {}), '([0.3, 0.1, 0.04, 0.6, 1.0])\n', (763, 791), True, 'import numpy as np\n'), ((807, 832), 'numpy.array', 'np.array', (['[3, 2, 1, 4, 5]'], {}), '([3, 2, 1, 4, 5])\n', (815, 832), True, 'import numpy as np\n'), ((939, 974), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0])\n', (947, 974), True, 'import numpy as np\n'), ((990, 1015), 'numpy.array', 'np.array', (['[3, 3, 3, 3, 3]'], {}), '([3, 3, 3, 3, 3])\n', (998, 1015), True, 'import numpy as np\n'), ((1122, 1157), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.1]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.1])\n', (1130, 1157), True, 'import numpy as np\n'), ((1173, 1206), 'numpy.array', 'np.array', (['[2.5, 2.5, 2.5, 2.5, 5]'], {}), '([2.5, 2.5, 2.5, 2.5, 5])\n', (1181, 1206), True, 'import numpy as np\n'), ((1313, 1333), 'numpy.array', 'np.array', (['[0.0, 0.5]'], {}), '([0.0, 0.5])\n', (1321, 1333), True, 'import numpy as np\n'), ((1349, 1365), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (1357, 1365), True, 'import numpy as np\n'), ((1472, 1492), 'numpy.array', 'np.array', (['[0.0, 0.5]'], {}), '([0.0, 0.5])\n', (1480, 1492), True, 'import numpy as np\n'), ((1508, 1528), 'numpy.array', 'np.array', (['[1.5, 1.5]'], {}), '([1.5, 1.5])\n', (1516, 1528), True, 'import numpy as np\n'), ((1639, 1700), 'numpy.array', 'np.array', (['[0.0, 0.1, 0.22, 0.2, 0.33, 0.3301, 1.0, 1.0, 1.01]'], {}), '([0.0, 0.1, 0.22, 0.2, 0.33, 0.3301, 1.0, 1.0, 1.01])\n', (1647, 1700), True, 'import numpy as np\n'), ((1716, 1757), 'numpy.array', 'np.array', (['[1, 2, 4, 3, 5.5, 5.5, 8, 8, 8]'], {}), '([1, 2, 4, 3, 5.5, 5.5, 8, 8, 8])\n', (1724, 1757), True, 'import numpy as np\n'), ((1866, 1927), 'numpy.array', 'np.array', (['[0.0, 0.1, 0.22, 0.2, 0.33, 0.3301, 1.0, 1.0, 1.01]'], {}), '([0.0, 0.1, 0.22, 0.2, 0.33, 0.3301, 1.0, 1.0, 1.01])\n', (1874, 1927), True, 'import numpy as np\n'), ((1943, 1988), 'numpy.array', 'np.array', (['[1, 2, 4, 3, 5.5, 5.5, 7.5, 7.5, 9]'], {}), '([1, 2, 4, 3, 5.5, 5.5, 7.5, 7.5, 9])\n', (1951, 1988), True, 'import numpy as np\n'), ((2095, 2146), 'numpy.array', 'np.array', (['[0.002, 0.002, 0.1, 0.101, 0.5, 0.5, 0.5]'], {}), '([0.002, 0.002, 0.1, 0.101, 0.5, 0.5, 0.5])\n', (2103, 2146), True, 'import numpy as np\n'), ((2170, 2205), 'numpy.array', 'np.array', (['[1.5, 1.5, 3, 4, 6, 6, 6]'], {}), '([1.5, 1.5, 3, 4, 6, 6, 6])\n', (2178, 2205), True, 'import numpy as np\n')] |
#!/usr/local/bin/python
# coding=utf-8
import numpy as np
import matplotlib.pylab as plt
N = 10
# Generates 1D Laplace matrix using Dirichlet boundary conditions
def generate_1D(N):
L = np.zeros(shape=((N - 1), (N - 1)))
for i in range(0, N - 1): # rows
for j in range(0, N - 1): # columns
L[i, j] = 0.0
if i == j:
L[i, j] = 2.0
if i + 1 == j or j + 1 == i:
L[i, j] = -1
return L
L = generate_1D(N)
b = np.random.randn(N - 1)
x = np.linalg.solve(L, b)
points = [x[i - 1] for i in range(1, (len(x) + 1))]
points.append(0)
interval = np.linspace(0.0, 1.0, num=N)
fig = plt.Figure()
fig, ax = plt.subplots()
ax.plot(interval, points, "bo-")
plt.show() | [
"matplotlib.pylab.subplots",
"numpy.linalg.solve",
"matplotlib.pylab.Figure",
"numpy.linspace",
"numpy.zeros",
"matplotlib.pylab.show",
"numpy.random.randn"
] | [((504, 526), 'numpy.random.randn', 'np.random.randn', (['(N - 1)'], {}), '(N - 1)\n', (519, 526), True, 'import numpy as np\n'), ((531, 552), 'numpy.linalg.solve', 'np.linalg.solve', (['L', 'b'], {}), '(L, b)\n', (546, 552), True, 'import numpy as np\n'), ((634, 662), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)'], {'num': 'N'}), '(0.0, 1.0, num=N)\n', (645, 662), True, 'import numpy as np\n'), ((670, 682), 'matplotlib.pylab.Figure', 'plt.Figure', ([], {}), '()\n', (680, 682), True, 'import matplotlib.pylab as plt\n'), ((693, 707), 'matplotlib.pylab.subplots', 'plt.subplots', ([], {}), '()\n', (705, 707), True, 'import matplotlib.pylab as plt\n'), ((741, 751), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (749, 751), True, 'import matplotlib.pylab as plt\n'), ((193, 223), 'numpy.zeros', 'np.zeros', ([], {'shape': '(N - 1, N - 1)'}), '(shape=(N - 1, N - 1))\n', (201, 223), True, 'import numpy as np\n')] |
# Copyright (C) 2021 poypoyan
from setuptools import setup
from Cython.Build import cythonize
import numpy
setup(
name="edhsmm",
version="0.1.2",
description="An(other) implementation of Explicit Duration HMM/HSMM in Python 3",
long_description=open("README.md", encoding="utf-8").read(),
long_description_content_type="text/markdown",
author="poypoyan",
author_email="<EMAIL>",
url="https://github.com/poypoyan/edhsmm",
license="MIT",
classifiers=[
"Development Status :: 3 - Alpha",
"License :: OSI Approved",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Topic :: Software Development",
"Topic :: Scientific/Engineering",
"Programming Language :: Cython",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
],
packages=["edhsmm"],
ext_modules=cythonize("edhsmm/hsmm_core_x.pyx",
include_path = [numpy.get_include()]),
python_requires=">=3.5", # compatibility with hmmlearn
install_requires=[
"numpy>=1.10", # compatibility with hmmlearn
"scikit-learn>=0.16", # sklearn.utils.check_array
"scipy>=0.19", # scipy.special.logsumexp
],
) | [
"numpy.get_include"
] | [((993, 1012), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (1010, 1012), False, 'import numpy\n')] |
from __future__ import annotations
from dataclasses import dataclass
from typing import Sequence, Tuple, Optional, List, Dict, Any, Iterable
import logging
from pathlib import Path
import os
from concurrent.futures import ThreadPoolExecutor
from requests import HTTPError
from catpy.applications import CatmaidClientApplication
from catpy.applications.morphology import lol_to_df
import pandas as pd
import numpy as np
from .bbox import Bbox
from .utils import CoordZYX
logger = logging.getLogger(__name__)
DEFAULT_WORKERS = 10
def treenode_table(response):
edit_time_dtype = None
df = lol_to_df(
response,
[
"treenode_id",
"parent_id",
"x",
"y",
"z",
"confidence",
"radius",
"skeleton_id",
"edit_time",
"user_id",
],
[
np.uint64,
pd.UInt64Dtype(),
np.float64,
np.float64,
np.float64,
np.int8,
np.float64,
np.uint64,
edit_time_dtype,
np.uint64,
],
)
# df.index = df.treenode_id
return df
def connector_node_table(response):
edit_time_dtype = None
df = lol_to_df(
response,
["connector_id", "x", "y", "z", "confidence", "edit_time", "user_id"],
[
np.uint64,
np.float64,
np.float64,
np.float64,
np.int8,
edit_time_dtype,
np.uint64,
],
)
return df
def merge_node_tables(dfs: Sequence[pd.DataFrame], drop_subset=None):
merged = pd.concat(dfs, ignore_index=True)
deduped = merged.drop_duplicates(subset=drop_subset)
return deduped
def merge_treenode_tables(dfs: Sequence[pd.DataFrame]):
df = merge_node_tables(dfs, ["treenode_id", "skeleton_id"])
# if len(df.treenode_id) == len(np.unique(df.treenode_id)):
# df.index = df.treenode_id
# else:
# raise ValueError("Resulting treenode table does not have unique rows")
return df
def merge_connector_tables(dfs: Sequence[pd.DataFrame]):
return merge_node_tables(dfs, ["connector_id"])
@dataclass
class ConnectorPartner:
link_id: int
partner_id: int
confidence: int
skeleton_id: int
relation_id: int
relation_name: str
@dataclass
class ConnectorDetail:
# 'connector_id': detail[0],
# 'x': detail[1],
# 'y': detail[2],
# 'z': detail[3],
# 'confidence': detail[4],
# 'partners': [p for p in detail[5]]
connector_id: int
location: CoordZYX
confidence: int
partners: List[ConnectorPartner]
@classmethod
def from_response(cls, response):
return cls(
response["connector_id"],
CoordZYX(response["z"], response["y"], response["x"]),
response["confidence"],
[ConnectorPartner(**p) for p in response["partners"]],
)
@staticmethod
def to_connector_partners_df(details: Iterable[ConnectorDetail]):
dims = ["x", "y", "z"]
conn_ids = []
locs = []
partners_dfs = []
for det in details:
conn_ids.append(det.connector_id)
locs.append([det.location[d] for d in dims])
partners_dfs.append(det.to_partners_df())
connectors = pd.DataFrame(
np.array(conn_ids, dtype=np.uint64), columns=["connector_id"]
)
connectors[dims] = pd.DataFrame(np.array(locs), columns=dims)
first, *others = partners_dfs
partners = first.append(list(others))
return connectors, partners
def to_partners_df(self):
headers = ("skeleton_id", "treenode_id", "connector_id", "is_presynaptic")
is_presyn = []
ids = []
for p in self.partners:
is_presyn.append(p.relation_name.startswith("pre"))
ids.append([p.skeleton_id, p.partner_id, self.connector_id])
df = pd.DataFrame(np.array(ids, dtype=np.uint64), columns=headers[:-1])
df[headers[-1]] = np.array(is_presyn, bool)
return df
class Catmaid(CatmaidClientApplication):
def nodes_in_bbox(
self,
bbox: Bbox,
treenodes=True,
connectors=True,
splits: Sequence[int] = (2, 2, 2),
) -> Tuple[Optional[pd.DataFrame], Optional[pd.DataFrame]]:
logger.debug("Getting nodes in bbox %s", bbox)
data = bbox.to_catmaid()
try:
response = self.post((self.project_id, "/node/list"), data)
except HTTPError as e:
if e.errno == 504:
logger.warning("Server timeout; splitting Bbox")
response = {3: True}
else:
raise e
if not response[3]:
tn_df = treenode_table(response[0]) if treenodes else None
conn_df = connector_node_table(response[1]) if connectors else None
logger.debug("Got %s treenodes, %s connectors", len(tn_df), len(conn_df))
return tn_df, conn_df
# node limit reached
logger.info("Splitting bbox into %s", splits)
tn_dfs = []
conn_dfs: List[pd.DataFrame] = []
for sub_bb in bbox.split(*splits):
tn_df, conn_df = self.nodes_in_bbox(sub_bb, treenodes, connectors, splits)
if treenodes and tn_df is not None:
tn_dfs.append(tn_df)
if connectors and conn_df is not None:
conn_dfs.append(conn_df)
return (
merge_treenode_tables(tn_dfs) if treenodes else None,
merge_connector_tables(conn_dfs) if connectors else None,
)
def connector_detail(self, conn_id: int):
return ConnectorDetail.from_response(
self.get(f"{self.project_id}/connectors/{conn_id}")
)
def connector_detail_many(self, conn_ids, threads=DEFAULT_WORKERS):
yield from batch(
self.connector_detail, [to_args_kwargs(c) for c in conn_ids], threads
)
ArgsKwargs = Tuple[Sequence, Dict[str, Any]]
def to_args_kwargs(*args, **kwargs):
return args, kwargs
def batch(fn, args_kwargs: Iterable[ArgsKwargs], workers=DEFAULT_WORKERS):
with ThreadPoolExecutor(workers) as exe:
futs = [exe.submit(fn, *args, **kwargs) for args, kwargs in args_kwargs]
for f in futs:
yield f.result()
def get_creds() -> Path:
try:
return Path(os.environ["CATMAID_CREDENTIALS"])
except KeyError:
raise RuntimeError(
"Use CATMAID_CREDENTIALS env var to give location of catmaid credentials file"
)
def get_catmaid() -> Catmaid:
return Catmaid.from_json(get_creds())
| [
"logging.getLogger",
"pandas.UInt64Dtype",
"pathlib.Path",
"concurrent.futures.ThreadPoolExecutor",
"numpy.array",
"pandas.concat",
"catpy.applications.morphology.lol_to_df"
] | [((482, 509), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (499, 509), False, 'import logging\n'), ((1269, 1453), 'catpy.applications.morphology.lol_to_df', 'lol_to_df', (['response', "['connector_id', 'x', 'y', 'z', 'confidence', 'edit_time', 'user_id']", '[np.uint64, np.float64, np.float64, np.float64, np.int8, edit_time_dtype,\n np.uint64]'], {}), "(response, ['connector_id', 'x', 'y', 'z', 'confidence',\n 'edit_time', 'user_id'], [np.uint64, np.float64, np.float64, np.float64,\n np.int8, edit_time_dtype, np.uint64])\n", (1278, 1453), False, 'from catpy.applications.morphology import lol_to_df\n'), ((1671, 1704), 'pandas.concat', 'pd.concat', (['dfs'], {'ignore_index': '(True)'}), '(dfs, ignore_index=True)\n', (1680, 1704), True, 'import pandas as pd\n'), ((4094, 4119), 'numpy.array', 'np.array', (['is_presyn', 'bool'], {}), '(is_presyn, bool)\n', (4102, 4119), True, 'import numpy as np\n'), ((6244, 6271), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', (['workers'], {}), '(workers)\n', (6262, 6271), False, 'from concurrent.futures import ThreadPoolExecutor\n'), ((6464, 6503), 'pathlib.Path', 'Path', (["os.environ['CATMAID_CREDENTIALS']"], {}), "(os.environ['CATMAID_CREDENTIALS'])\n", (6468, 6503), False, 'from pathlib import Path\n'), ((922, 938), 'pandas.UInt64Dtype', 'pd.UInt64Dtype', ([], {}), '()\n', (936, 938), True, 'import pandas as pd\n'), ((3402, 3437), 'numpy.array', 'np.array', (['conn_ids'], {'dtype': 'np.uint64'}), '(conn_ids, dtype=np.uint64)\n', (3410, 3437), True, 'import numpy as np\n'), ((3514, 3528), 'numpy.array', 'np.array', (['locs'], {}), '(locs)\n', (3522, 3528), True, 'import numpy as np\n'), ((4014, 4044), 'numpy.array', 'np.array', (['ids'], {'dtype': 'np.uint64'}), '(ids, dtype=np.uint64)\n', (4022, 4044), True, 'import numpy as np\n')] |
"""Code used to create the examples in the overflow paper.
This module contains the code used to create the examples in the paper at
https://arxiv.org/pdf/2001.09611v1, referred to as the overflow paper.
Execute this module as a script to reproduce the examples. The code has been
tested using Python 3.5.2, NumPy 1.11.1, and SciPy 0.18.1 via the
Anaconda 4.2.0 (64-bit) installation.
"""
import matplotlib.pyplot as plt
import numpy as np
from create_overflow_networks import square_network_example
from create_overflow_networks import worst_case_example
from overflow_algorithm import solve_overflow_traffic_equation
def progress_bar(i, n, width=20):
"""Provides a simple progress bar.
Visualizes the how many iterations of a loop have been completed. Does not
take actual or estimated remaining computation time into account.
Args:
i: Number of iterations of the loop that are completed.
n: Total number of iterations of the loop.
width: The number of characters used to visualize the progress.
"""
STRING_L = "X"
STRING_R = "."
frac = int(np.floor((i / n) * width))
str_l = STRING_L * frac
str_r = STRING_R * (width - frac)
print("\rSolving: [" + str_l + str_r + "]", end="")
def square_network_overflow_plot(m, grid_nr, show_progress=False):
"""Creates a heat map as in Example 1 of the overflow paper.
First defines a grid of parameter values for the delta and epsilon
parameters of the family of networks presented in Example 1. Computes the
solution to the overflow equation for each pair of parameters delta and
epsilon and determines the fraction of overflowing nodes for that pair.
Plots a heat map of these fractions.
Args:
m: Number of cells for the network of Example 1.
grid_nr: Number of parameter values used for each parameter not
including 0.0. The grid consists of grid_nr + 1 evenly spaced
points in [0, 1] with end points 0.0 and 1.0.
show_progress: Indicates whether a progress bar is shown.
"""
n = 4*(m**2)
alpha = np.zeros(n)
alpha[0] = n
mu = np.zeros(n) + 1.0
grid_nr = grid_nr + 1
delta_space = np.linspace(0, 1.0, grid_nr)
epsilon_space = np.linspace(0, 1.0, grid_nr)
overflow_proportion = np.zeros([grid_nr, grid_nr])
if show_progress:
print("")
for i in range(grid_nr):
for j in range(grid_nr):
delta = delta_space[i]
epsilon = epsilon_space[j]
alpha, mu, P, Q = square_network_example(m, delta, epsilon)
sol = solve_overflow_traffic_equation(alpha, mu, P, Q)
overflow_proportion[i, j] = sum(sol >= mu) / n
if show_progress:
progress_bar(i * grid_nr + (j + 1), grid_nr**2)
if show_progress:
print("")
# Plot results.
# Set font sizes for plots.
SMALL_FONT = 8
MEDIUM_FONT = 18
LARGE_FONT = 22
plt.rc('font', size=SMALL_FONT) # Default text font size
plt.rc('axes', titlesize=MEDIUM_FONT) # Axes titles font size
plt.rc('axes', labelsize=LARGE_FONT) # Axes lables font size
plt.rc('xtick', labelsize=MEDIUM_FONT) # xtick lables font size
plt.rc('ytick', labelsize=MEDIUM_FONT) # ytick lables font size
plt.rc('legend', fontsize=MEDIUM_FONT) # legend font size
plt.rc('figure', titlesize=MEDIUM_FONT) # Figure title font size
# Flip the matrix for better plotting of the vertical axis.
overflow_proportion = np.flipud(overflow_proportion)
fig = plt.figure()
ax = fig.add_subplot(111)
im = ax.matshow(overflow_proportion)
# Set ticks at horizontal and vertical axes;
# the yticks are flipped to correspond with the flipped matrix.
ticks_max = 6
ticks_step = np.floor((grid_nr - 1) / (ticks_max - 1))
ticks_index = np.arange(start=0, stop=grid_nr, step=ticks_step, dtype=int)
ax.set_xticks(ticks_index)
ax.set_yticks(np.flipud(ticks_index))
# Label ticks with the corresponding values.
delta_labels = [delta_space[i] for i in ticks_index]
epsilon_labels = [epsilon_space[i] for i in ticks_index]
ax.set_xticklabels(delta_labels)
ax.set_yticklabels(epsilon_labels)
# Show ticks on horizontal axis at the bottom.
ax.tick_params(axis="x", bottom=True, top=False,
labelbottom=True, labeltop=False)
# Set x label and y label.
plt.xlabel("$\delta$")
plt.ylabel("$\epsilon$")
# Plot the figure.
fig.colorbar(im)
plt.show()
if __name__ == "__main__":
show_progress = True # Progress bar will be shown if True
# Number of grid points used for the parameter space of the square network.
# Choose grid_nr >= 1.
# Step size is 1 / grid_nr for a total number of 1 + grid_nr grid points.
grid_nr = 100
# Number of blocks in the horizontal direction of the square network.
# Choose m >= 2.
m = 3
# Run the square network example with m = 3.
square_network_overflow_plot(m, grid_nr, show_progress)
# Run the square network example with m = 5.
m = 5
square_network_overflow_plot(m, grid_nr, show_progress)
# Check that the worst case examples indeed require the maximum number of
# iterations of the overflow algorithm.
n = 75
results = np.zeros(n)
if show_progress:
print("")
for i in range(1, n + 1):
alpha, mu, P, Q = worst_case_example(i)
x = solve_overflow_traffic_equation(alpha, mu, P, Q, count=True)
labda, solve_count, max_count = x
results[i - 1] = (solve_count == max_count)
if show_progress:
progress_bar(i, n)
if show_progress:
print("")
if results.all(): # Every solve_count == max_count
print("All solutions required the maximum number of iterations.")
else:
print("At least one solution required less than the maximum number " +
"of iterations.")
| [
"numpy.flipud",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.floor",
"overflow_algorithm.solve_overflow_traffic_equation",
"create_overflow_networks.square_network_example",
"numpy.zeros",
"numpy.linspace",
"matplotlib.pyplot.figure",
"create_overflow_networks.w... | [((2160, 2171), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (2168, 2171), True, 'import numpy as np\n'), ((2264, 2292), 'numpy.linspace', 'np.linspace', (['(0)', '(1.0)', 'grid_nr'], {}), '(0, 1.0, grid_nr)\n', (2275, 2292), True, 'import numpy as np\n'), ((2314, 2342), 'numpy.linspace', 'np.linspace', (['(0)', '(1.0)', 'grid_nr'], {}), '(0, 1.0, grid_nr)\n', (2325, 2342), True, 'import numpy as np\n'), ((2370, 2398), 'numpy.zeros', 'np.zeros', (['[grid_nr, grid_nr]'], {}), '([grid_nr, grid_nr])\n', (2378, 2398), True, 'import numpy as np\n'), ((3044, 3075), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': 'SMALL_FONT'}), "('font', size=SMALL_FONT)\n", (3050, 3075), True, 'import matplotlib.pyplot as plt\n'), ((3107, 3144), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'titlesize': 'MEDIUM_FONT'}), "('axes', titlesize=MEDIUM_FONT)\n", (3113, 3144), True, 'import matplotlib.pyplot as plt\n'), ((3175, 3211), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'labelsize': 'LARGE_FONT'}), "('axes', labelsize=LARGE_FONT)\n", (3181, 3211), True, 'import matplotlib.pyplot as plt\n'), ((3242, 3280), 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick"""'], {'labelsize': 'MEDIUM_FONT'}), "('xtick', labelsize=MEDIUM_FONT)\n", (3248, 3280), True, 'import matplotlib.pyplot as plt\n'), ((3312, 3350), 'matplotlib.pyplot.rc', 'plt.rc', (['"""ytick"""'], {'labelsize': 'MEDIUM_FONT'}), "('ytick', labelsize=MEDIUM_FONT)\n", (3318, 3350), True, 'import matplotlib.pyplot as plt\n'), ((3382, 3420), 'matplotlib.pyplot.rc', 'plt.rc', (['"""legend"""'], {'fontsize': 'MEDIUM_FONT'}), "('legend', fontsize=MEDIUM_FONT)\n", (3388, 3420), True, 'import matplotlib.pyplot as plt\n'), ((3446, 3485), 'matplotlib.pyplot.rc', 'plt.rc', (['"""figure"""'], {'titlesize': 'MEDIUM_FONT'}), "('figure', titlesize=MEDIUM_FONT)\n", (3452, 3485), True, 'import matplotlib.pyplot as plt\n'), ((3606, 3636), 'numpy.flipud', 'np.flipud', (['overflow_proportion'], {}), '(overflow_proportion)\n', (3615, 3636), True, 'import numpy as np\n'), ((3650, 3662), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3660, 3662), True, 'import matplotlib.pyplot as plt\n'), ((3894, 3935), 'numpy.floor', 'np.floor', (['((grid_nr - 1) / (ticks_max - 1))'], {}), '((grid_nr - 1) / (ticks_max - 1))\n', (3902, 3935), True, 'import numpy as np\n'), ((3955, 4015), 'numpy.arange', 'np.arange', ([], {'start': '(0)', 'stop': 'grid_nr', 'step': 'ticks_step', 'dtype': 'int'}), '(start=0, stop=grid_nr, step=ticks_step, dtype=int)\n', (3964, 4015), True, 'import numpy as np\n'), ((4536, 4559), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\delta$"""'], {}), "('$\\\\delta$')\n", (4546, 4559), True, 'import matplotlib.pyplot as plt\n'), ((4564, 4589), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\epsilon$"""'], {}), "('$\\\\epsilon$')\n", (4574, 4589), True, 'import matplotlib.pyplot as plt\n'), ((4640, 4650), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4648, 4650), True, 'import matplotlib.pyplot as plt\n'), ((5452, 5463), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (5460, 5463), True, 'import numpy as np\n'), ((1136, 1159), 'numpy.floor', 'np.floor', (['(i / n * width)'], {}), '(i / n * width)\n', (1144, 1159), True, 'import numpy as np\n'), ((2200, 2211), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (2208, 2211), True, 'import numpy as np\n'), ((4067, 4089), 'numpy.flipud', 'np.flipud', (['ticks_index'], {}), '(ticks_index)\n', (4076, 4089), True, 'import numpy as np\n'), ((5564, 5585), 'create_overflow_networks.worst_case_example', 'worst_case_example', (['i'], {}), '(i)\n', (5582, 5585), False, 'from create_overflow_networks import worst_case_example\n'), ((5599, 5659), 'overflow_algorithm.solve_overflow_traffic_equation', 'solve_overflow_traffic_equation', (['alpha', 'mu', 'P', 'Q'], {'count': '(True)'}), '(alpha, mu, P, Q, count=True)\n', (5630, 5659), False, 'from overflow_algorithm import solve_overflow_traffic_equation\n'), ((2612, 2653), 'create_overflow_networks.square_network_example', 'square_network_example', (['m', 'delta', 'epsilon'], {}), '(m, delta, epsilon)\n', (2634, 2653), False, 'from create_overflow_networks import square_network_example\n'), ((2673, 2721), 'overflow_algorithm.solve_overflow_traffic_equation', 'solve_overflow_traffic_equation', (['alpha', 'mu', 'P', 'Q'], {}), '(alpha, mu, P, Q)\n', (2704, 2721), False, 'from overflow_algorithm import solve_overflow_traffic_equation\n')] |
import numpy as np
import pytest
from keras.utils.test_utils import layer_test
from keras import layers
from keras.models import Sequential
@pytest.mark.parametrize(
'padding,stride,data_format',
[(padding, stride, data_format)
for padding in ['valid', 'same']
for stride in [1, 2]
for data_format in ['channels_first', 'channels_last']]
)
def test_maxpooling_1d(padding, stride, data_format):
layer_test(layers.MaxPooling1D,
kwargs={'strides': stride,
'padding': padding,
'data_format': data_format},
input_shape=(3, 5, 4))
@pytest.mark.parametrize(
'strides',
[(1, 1), (2, 3)]
)
def test_maxpooling_2d(strides):
pool_size = (3, 3)
layer_test(layers.MaxPooling2D,
kwargs={'strides': strides,
'padding': 'valid',
'pool_size': pool_size},
input_shape=(3, 5, 6, 4))
@pytest.mark.parametrize(
'strides,data_format,input_shape',
[(2, None, (3, 11, 12, 10, 4)),
(3, 'channels_first', (3, 4, 11, 12, 10))]
)
def test_maxpooling_3d(strides, data_format, input_shape):
pool_size = (3, 3, 3)
layer_test(layers.MaxPooling3D,
kwargs={'strides': strides,
'padding': 'valid',
'data_format': data_format,
'pool_size': pool_size},
input_shape=input_shape)
@pytest.mark.parametrize(
'padding,stride,data_format',
[(padding, stride, data_format)
for padding in ['valid', 'same']
for stride in [1, 2]
for data_format in ['channels_first', 'channels_last']]
)
def test_averagepooling_1d(padding, stride, data_format):
layer_test(layers.AveragePooling1D,
kwargs={'strides': stride,
'padding': padding,
'data_format': data_format},
input_shape=(3, 5, 4))
@pytest.mark.parametrize(
'strides,padding,data_format,input_shape',
[((2, 2), 'same', None, (3, 5, 6, 4)),
((2, 2), 'valid', None, (3, 5, 6, 4))]
)
def test_averagepooling_2d(strides, padding, data_format, input_shape):
layer_test(layers.AveragePooling2D,
kwargs={'strides': strides,
'padding': padding,
'pool_size': (2, 2),
'data_format': data_format},
input_shape=input_shape)
@pytest.mark.parametrize(
'strides,data_format,input_shape',
[(2, None, (3, 11, 12, 10, 4)),
(3, 'channels_first', (3, 4, 11, 12, 10))]
)
def test_averagepooling_3d(strides, data_format, input_shape):
pool_size = (3, 3, 3)
layer_test(layers.AveragePooling3D,
kwargs={'strides': strides,
'padding': 'valid',
'data_format': data_format,
'pool_size': pool_size},
input_shape=input_shape)
@pytest.mark.parametrize(
'data_format,pooling_class',
[(data_format, pooling_class)
for data_format in ['channels_first', 'channels_last']
for pooling_class in [layers.GlobalMaxPooling1D,
layers.GlobalAveragePooling1D]]
)
def test_globalpooling_1d(data_format, pooling_class):
layer_test(pooling_class,
kwargs={'data_format': data_format},
input_shape=(3, 4, 5))
def test_globalpooling_1d_supports_masking():
# Test GlobalAveragePooling1D supports masking
model = Sequential()
model.add(layers.Masking(mask_value=0., input_shape=(3, 4)))
model.add(layers.GlobalAveragePooling1D())
model.compile(loss='mae', optimizer='adam')
model_input = np.random.randint(low=1, high=5, size=(2, 3, 4))
model_input[0, 1:, :] = 0
output = model.predict(model_input)
assert np.array_equal(output[0], model_input[0, 0, :])
@pytest.mark.parametrize(
'data_format,pooling_class',
[(data_format, pooling_class)
for data_format in ['channels_first', 'channels_last']
for pooling_class in [layers.GlobalMaxPooling2D,
layers.GlobalAveragePooling2D]]
)
def test_globalpooling_2d(data_format, pooling_class):
layer_test(pooling_class,
kwargs={'data_format': data_format},
input_shape=(3, 4, 5, 6))
@pytest.mark.parametrize(
'data_format,pooling_class',
[(data_format, pooling_class)
for data_format in ['channels_first', 'channels_last']
for pooling_class in [layers.GlobalMaxPooling3D,
layers.GlobalAveragePooling3D]]
)
def test_globalpooling_3d(data_format, pooling_class):
layer_test(pooling_class,
kwargs={'data_format': data_format},
input_shape=(3, 4, 3, 4, 3))
if __name__ == '__main__':
pytest.main([__file__])
| [
"keras.layers.Masking",
"keras.layers.GlobalAveragePooling1D",
"keras.utils.test_utils.layer_test",
"keras.models.Sequential",
"pytest.main",
"pytest.mark.parametrize",
"numpy.random.randint",
"numpy.array_equal"
] | [((144, 348), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""padding,stride,data_format"""', "[(padding, stride, data_format) for padding in ['valid', 'same'] for stride in\n [1, 2] for data_format in ['channels_first', 'channels_last']]"], {}), "('padding,stride,data_format', [(padding, stride,\n data_format) for padding in ['valid', 'same'] for stride in [1, 2] for\n data_format in ['channels_first', 'channels_last']])\n", (167, 348), False, 'import pytest\n'), ((634, 686), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""strides"""', '[(1, 1), (2, 3)]'], {}), "('strides', [(1, 1), (2, 3)])\n", (657, 686), False, 'import pytest\n'), ((967, 1106), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""strides,data_format,input_shape"""', "[(2, None, (3, 11, 12, 10, 4)), (3, 'channels_first', (3, 4, 11, 12, 10))]"], {}), "('strides,data_format,input_shape', [(2, None, (3, \n 11, 12, 10, 4)), (3, 'channels_first', (3, 4, 11, 12, 10))])\n", (990, 1106), False, 'import pytest\n'), ((1466, 1670), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""padding,stride,data_format"""', "[(padding, stride, data_format) for padding in ['valid', 'same'] for stride in\n [1, 2] for data_format in ['channels_first', 'channels_last']]"], {}), "('padding,stride,data_format', [(padding, stride,\n data_format) for padding in ['valid', 'same'] for stride in [1, 2] for\n data_format in ['channels_first', 'channels_last']])\n", (1489, 1670), False, 'import pytest\n'), ((1964, 2113), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""strides,padding,data_format,input_shape"""', "[((2, 2), 'same', None, (3, 5, 6, 4)), ((2, 2), 'valid', None, (3, 5, 6, 4))]"], {}), "('strides,padding,data_format,input_shape', [((2, 2),\n 'same', None, (3, 5, 6, 4)), ((2, 2), 'valid', None, (3, 5, 6, 4))])\n", (1987, 2113), False, 'import pytest\n'), ((2462, 2601), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""strides,data_format,input_shape"""', "[(2, None, (3, 11, 12, 10, 4)), (3, 'channels_first', (3, 4, 11, 12, 10))]"], {}), "('strides,data_format,input_shape', [(2, None, (3, \n 11, 12, 10, 4)), (3, 'channels_first', (3, 4, 11, 12, 10))])\n", (2485, 2601), False, 'import pytest\n'), ((2970, 3202), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""data_format,pooling_class"""', "[(data_format, pooling_class) for data_format in ['channels_first',\n 'channels_last'] for pooling_class in [layers.GlobalMaxPooling1D,\n layers.GlobalAveragePooling1D]]"], {}), "('data_format,pooling_class', [(data_format,\n pooling_class) for data_format in ['channels_first', 'channels_last'] for\n pooling_class in [layers.GlobalMaxPooling1D, layers.\n GlobalAveragePooling1D]])\n", (2993, 3202), False, 'import pytest\n'), ((3896, 4128), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""data_format,pooling_class"""', "[(data_format, pooling_class) for data_format in ['channels_first',\n 'channels_last'] for pooling_class in [layers.GlobalMaxPooling2D,\n layers.GlobalAveragePooling2D]]"], {}), "('data_format,pooling_class', [(data_format,\n pooling_class) for data_format in ['channels_first', 'channels_last'] for\n pooling_class in [layers.GlobalMaxPooling2D, layers.\n GlobalAveragePooling2D]])\n", (3919, 4128), False, 'import pytest\n'), ((4344, 4576), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""data_format,pooling_class"""', "[(data_format, pooling_class) for data_format in ['channels_first',\n 'channels_last'] for pooling_class in [layers.GlobalMaxPooling3D,\n layers.GlobalAveragePooling3D]]"], {}), "('data_format,pooling_class', [(data_format,\n pooling_class) for data_format in ['channels_first', 'channels_last'] for\n pooling_class in [layers.GlobalMaxPooling3D, layers.\n GlobalAveragePooling3D]])\n", (4367, 4576), False, 'import pytest\n'), ((424, 558), 'keras.utils.test_utils.layer_test', 'layer_test', (['layers.MaxPooling1D'], {'kwargs': "{'strides': stride, 'padding': padding, 'data_format': data_format}", 'input_shape': '(3, 5, 4)'}), "(layers.MaxPooling1D, kwargs={'strides': stride, 'padding':\n padding, 'data_format': data_format}, input_shape=(3, 5, 4))\n", (434, 558), False, 'from keras.utils.test_utils import layer_test\n'), ((757, 891), 'keras.utils.test_utils.layer_test', 'layer_test', (['layers.MaxPooling2D'], {'kwargs': "{'strides': strides, 'padding': 'valid', 'pool_size': pool_size}", 'input_shape': '(3, 5, 6, 4)'}), "(layers.MaxPooling2D, kwargs={'strides': strides, 'padding':\n 'valid', 'pool_size': pool_size}, input_shape=(3, 5, 6, 4))\n", (767, 891), False, 'from keras.utils.test_utils import layer_test\n'), ((1206, 1371), 'keras.utils.test_utils.layer_test', 'layer_test', (['layers.MaxPooling3D'], {'kwargs': "{'strides': strides, 'padding': 'valid', 'data_format': data_format,\n 'pool_size': pool_size}", 'input_shape': 'input_shape'}), "(layers.MaxPooling3D, kwargs={'strides': strides, 'padding':\n 'valid', 'data_format': data_format, 'pool_size': pool_size},\n input_shape=input_shape)\n", (1216, 1371), False, 'from keras.utils.test_utils import layer_test\n'), ((1750, 1888), 'keras.utils.test_utils.layer_test', 'layer_test', (['layers.AveragePooling1D'], {'kwargs': "{'strides': stride, 'padding': padding, 'data_format': data_format}", 'input_shape': '(3, 5, 4)'}), "(layers.AveragePooling1D, kwargs={'strides': stride, 'padding':\n padding, 'data_format': data_format}, input_shape=(3, 5, 4))\n", (1760, 1888), False, 'from keras.utils.test_utils import layer_test\n'), ((2201, 2368), 'keras.utils.test_utils.layer_test', 'layer_test', (['layers.AveragePooling2D'], {'kwargs': "{'strides': strides, 'padding': padding, 'pool_size': (2, 2), 'data_format':\n data_format}", 'input_shape': 'input_shape'}), "(layers.AveragePooling2D, kwargs={'strides': strides, 'padding':\n padding, 'pool_size': (2, 2), 'data_format': data_format}, input_shape=\n input_shape)\n", (2211, 2368), False, 'from keras.utils.test_utils import layer_test\n'), ((2706, 2875), 'keras.utils.test_utils.layer_test', 'layer_test', (['layers.AveragePooling3D'], {'kwargs': "{'strides': strides, 'padding': 'valid', 'data_format': data_format,\n 'pool_size': pool_size}", 'input_shape': 'input_shape'}), "(layers.AveragePooling3D, kwargs={'strides': strides, 'padding':\n 'valid', 'data_format': data_format, 'pool_size': pool_size},\n input_shape=input_shape)\n", (2716, 2875), False, 'from keras.utils.test_utils import layer_test\n'), ((3296, 3386), 'keras.utils.test_utils.layer_test', 'layer_test', (['pooling_class'], {'kwargs': "{'data_format': data_format}", 'input_shape': '(3, 4, 5)'}), "(pooling_class, kwargs={'data_format': data_format}, input_shape=\n (3, 4, 5))\n", (3306, 3386), False, 'from keras.utils.test_utils import layer_test\n'), ((3523, 3535), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3533, 3535), False, 'from keras.models import Sequential\n'), ((3715, 3763), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(5)', 'size': '(2, 3, 4)'}), '(low=1, high=5, size=(2, 3, 4))\n', (3732, 3763), True, 'import numpy as np\n'), ((3845, 3892), 'numpy.array_equal', 'np.array_equal', (['output[0]', 'model_input[0, 0, :]'], {}), '(output[0], model_input[0, 0, :])\n', (3859, 3892), True, 'import numpy as np\n'), ((4222, 4315), 'keras.utils.test_utils.layer_test', 'layer_test', (['pooling_class'], {'kwargs': "{'data_format': data_format}", 'input_shape': '(3, 4, 5, 6)'}), "(pooling_class, kwargs={'data_format': data_format}, input_shape=\n (3, 4, 5, 6))\n", (4232, 4315), False, 'from keras.utils.test_utils import layer_test\n'), ((4670, 4766), 'keras.utils.test_utils.layer_test', 'layer_test', (['pooling_class'], {'kwargs': "{'data_format': data_format}", 'input_shape': '(3, 4, 3, 4, 3)'}), "(pooling_class, kwargs={'data_format': data_format}, input_shape=\n (3, 4, 3, 4, 3))\n", (4680, 4766), False, 'from keras.utils.test_utils import layer_test\n'), ((4825, 4848), 'pytest.main', 'pytest.main', (['[__file__]'], {}), '([__file__])\n', (4836, 4848), False, 'import pytest\n'), ((3550, 3600), 'keras.layers.Masking', 'layers.Masking', ([], {'mask_value': '(0.0)', 'input_shape': '(3, 4)'}), '(mask_value=0.0, input_shape=(3, 4))\n', (3564, 3600), False, 'from keras import layers\n'), ((3615, 3646), 'keras.layers.GlobalAveragePooling1D', 'layers.GlobalAveragePooling1D', ([], {}), '()\n', (3644, 3646), False, 'from keras import layers\n')] |
from dataclasses import astuple, dataclass
import numpy as np
import torch
from copy import deepcopy
class Binarizer:
def __init__(self, binarization):
self.bin = binarization
def __call__(self, tens):
if self.bin is None:
return tens
elif self.bin[0] == "neq":
return tens != self.bin[1]
elif self.bin[0] == "eq":
return tens == self.bin[1]
elif self.bin[0] == "gt":
return tens > self.bin[1]
elif self.bin[0] == "lt":
return tens < self.bin[1]
class PartialSpecification:
def __init__(self, f, **kwargs):
self.f = f
self.constr_kwargs = kwargs
for k, v in kwargs.items():
setattr(self, k, v)
def __call__(self, **kwargs):
return self.f(**self.constr_kwargs, **kwargs)
def __getitem__(self, k):
return self.constr_kwargs[k]
@dataclass
class Translation:
x: float
y: float
def __iter__(self):
return iter(astuple(self))
def __add__(self, T):
return Translation(self.x + T.x, self.y + T.y)
def __sub__(self, T):
return Translation(self.x - T.x, self.y - T.y)
def __mul__(self, scalar):
return Translation(self.x * scalar, self.y * scalar)
def __rmul__(self, scalar):
return self.__mul__(scalar)
def __floordiv__(self, scalar):
return Translation(self.x // scalar, self.y // scalar)
def __truediv__(self, scalar):
return Translation(self.x / scalar, self.y / scalar)
def to_tensor(self, **kwargs):
return torch.tensor([[[[self.x]], [[self.y]]]], **kwargs)
def round(self, ndigits=None):
return Translation(round(self.x, ndigits), round(self.y, ndigits))
def copy(self):
return deepcopy(self)
def round_to_mip(self, src_mip, tgt_mip):
if tgt_mip is None:
return self.copy()
elif tgt_mip <= src_mip:
return Translation(self.x, self.y) # Return copy
else:
snap_factor = 2 ** (tgt_mip - src_mip)
return (self // snap_factor) * snap_factor
def percentile_trans_adjuster(field, h=25, l=75, unaligned_img=None):
if field is None:
result = Translation(0, 0)
else:
nonzero_field_mask = (field[:, 0] != 0) & (field[:, 1] != 0)
if unaligned_img is not None:
no_tissue = field.field().from_pixels()(unaligned_img) == 0
nonzero_field_mask[..., no_tissue.squeeze()] = False
nonzero_field = field[..., nonzero_field_mask.squeeze()].squeeze()
if nonzero_field.sum() == 0:
result = Translation(0, 0)
else:
low_l = percentile(nonzero_field, l)
high_l = percentile(nonzero_field, h)
mid = 0.5 * (low_l + high_l)
result = Translation(x=int(mid[0]), y=int(mid[1]))
return result
def percentile(field, q):
# https://gist.github.com/spezold/42a451682422beb42bc43ad0c0967a30
"""
Return the ``q``-th percentile of the flattened input tensor's data.
CAUTION:
* Needs PyTorch >= 1.1.0, as ``torch.kthvalue()`` is used.
* Values are not interpolated, which corresponds to
``numpy.percentile(..., interpolation="nearest")``.
:param field: Input tensor.
:param q: Percentile to compute, which must be between 0 and 100 inclusive.
:return: Resulting value (scalar).
"""
# Note that ``kthvalue()`` works one-based, i.e. the first sorted value
# indeed corresponds to k=1, not k=0! Use float(q) instead of q directly,
# so that ``round()`` returns an integer, even if q is a np.float32.
k = 1 + round(0.01 * float(q) * (field.shape[1] - 1))
result = field.kthvalue(k, dim=1).values
return result
def crop(**kwargs):
raise NotImplementedError
def expand_to_dims(tens, dims):
tens_dims = len(tens.shape)
assert (tens_dims) <= dims
tens = tens[(None,) * (dims - tens_dims)]
return tens
def cast_tensor_type(tens, dtype):
"""
tens: pytorch tens
dtype: string, eg 'float', 'int', 'byte'
"""
if dtype is not None:
assert hasattr(tens, dtype)
return getattr(tens, dtype)()
else:
return tens
def read_mask_list(mask_list, bcube, mip):
result = None
for m in mask_list:
this_data = m.read(bcube=bcube, mip=mip).to(torch.bool)
if result is None:
result = this_data
else:
result = result | this_data
return result
def crop(data, c):
if c == 0:
return data
else:
if data.shape[-1] == data.shape[-2]:
return data[..., c:-c, c:-c]
elif data.shape[-2] == data.shape[-3] and data.shape[-1] == 2: # field
return data[..., c:-c, c:-c, :]
def coarsen_mask(mask, n=1, flip=False):
kernel = np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]])
kernel_var = (
torch.FloatTensor(kernel).unsqueeze(0).unsqueeze(0).to(mask.device).float()
)
k = torch.nn.Parameter(data=kernel_var, requires_grad=False)
for _ in range(n):
if flip:
mask = mask.logical_not()
mask = torch.nn.functional.conv2d(mask.float(), kernel_var, padding=1) > 1
if flip:
mask = mask.logical_not()
mask = mask
return mask
def zeros(*args, **kwargs):
return torch.zeros(*args, **kwargs)
| [
"torch.FloatTensor",
"numpy.array",
"torch.tensor",
"torch.nn.Parameter",
"dataclasses.astuple",
"copy.deepcopy",
"torch.zeros"
] | [((4869, 4912), 'numpy.array', 'np.array', (['[[1, 1, 1], [1, 1, 1], [1, 1, 1]]'], {}), '([[1, 1, 1], [1, 1, 1], [1, 1, 1]])\n', (4877, 4912), True, 'import numpy as np\n'), ((5030, 5086), 'torch.nn.Parameter', 'torch.nn.Parameter', ([], {'data': 'kernel_var', 'requires_grad': '(False)'}), '(data=kernel_var, requires_grad=False)\n', (5048, 5086), False, 'import torch\n'), ((5381, 5409), 'torch.zeros', 'torch.zeros', (['*args'], {}), '(*args, **kwargs)\n', (5392, 5409), False, 'import torch\n'), ((1605, 1655), 'torch.tensor', 'torch.tensor', (['[[[[self.x]], [[self.y]]]]'], {}), '([[[[self.x]], [[self.y]]]], **kwargs)\n', (1617, 1655), False, 'import torch\n'), ((1803, 1817), 'copy.deepcopy', 'deepcopy', (['self'], {}), '(self)\n', (1811, 1817), False, 'from copy import deepcopy\n'), ((1016, 1029), 'dataclasses.astuple', 'astuple', (['self'], {}), '(self)\n', (1023, 1029), False, 'from dataclasses import astuple, dataclass\n'), ((4940, 4965), 'torch.FloatTensor', 'torch.FloatTensor', (['kernel'], {}), '(kernel)\n', (4957, 4965), False, 'import torch\n')] |
import torch
import numpy as np
from utils import get_2d_joints, get_all_32joints
from utils.data import un_normalize_data, H36M_NAMES
dim_to_use_2d = [0, 1, 2, 3, 4, 5, 6, 7, 12, 13, 14, 15, 16, 17, 24, 25, 26, 27, 30, 31, 34, 35, 36, 37,
38, 39, 50, 51, 52, 53, 54, 55]
dim_to_use_3d = [3, 4, 5, 6, 7, 8, 9, 10, 11, 18, 19, 20, 21, 22, 23, 24, 25, 26, 36, 37, 38, 39, 40, 41,
42, 43, 44, 45, 46, 47, 51, 52, 53, 54, 55, 56, 57, 58, 59, 75, 76, 77, 78, 79, 80, 81, 82, 83]
dim_to_ignore_2d = np.delete(np.arange(len(H36M_NAMES) * 2), dim_to_use_2d)
dim_to_ignore_3d = np.delete(np.arange(len(H36M_NAMES) * 3), dim_to_use_3d)
data_mean_3d = np.load('models/mean_3d.npy')
data_std_3d = np.load('models/std_3d.npy')
def get_data_sequence(batch, device, hg_model, model, images, joints_2d, joints_3d, conf):
batch = batch.to(device)
images.append(batch.detach().cpu().numpy())
predicted_2d_poses = get_all_32joints(get_2d_joints(hg_model, batch), 2, dim_to_ignore_2d) # batch x 16 x 2
joints_2d.append(predicted_2d_poses)
# Normalize
# data_mean = np.mean(predicted_2d_poses, axis=0)
# data_std = np.std(predicted_2d_poses, axis=0)
predicted_2d_poses = predicted_2d_poses[:, dim_to_use_2d]
# mu = data_mean[dim_to_use_2d]
# stddev = data_std[dim_to_use_2d]
# predicted_2d_poses = np.divide((predicted_2d_poses - mu), stddev)
# Apply our model
poses_2d = torch.tensor(predicted_2d_poses).to(device, torch.float)
poses_3d = model(poses_2d).detach().cpu().numpy()
# poses_3d = get_all_32joints(poses_3d, 3, dim_to_ignore_3d)
poses_3d = un_normalize_data(poses_3d, data_mean_3d,
data_std_3d, dim_to_ignore_3d)
# poses_3d = poses_3d.reshape(poses_3d.shape[0], 16, 3)
joints_3d.append(poses_3d)
def get_data_human(batch, device, human_dataset, model, images, joints_2d, joints_3d, conf):
data_2d, data_3d, root_position, keys = batch
if conf.eval.video_constraints.use:
data_2d_cur = data_2d[:, 0, conf.eval.video_constraints.frames_before]
data_2d = data_2d[:, 0].reshape(data_2d.size(0), -1)
else:
data_2d_cur = data_2d
predicted_3d = model(data_2d.to(device, torch.float)).detach().cpu().numpy()
data_2d_un = un_normalize_data(data_2d_cur, human_dataset.data_mean_2d,
human_dataset.data_std_2d, human_dataset.dim_to_ignore_2d)
data_3d_pred_un = un_normalize_data(predicted_3d, human_dataset.data_mean_3d,
human_dataset.data_std_3d, human_dataset.dim_to_ignore_3d)
joints_2d.append(data_2d_un)
joints_3d.append(data_3d_pred_un)
| [
"torch.tensor",
"utils.data.un_normalize_data",
"numpy.load",
"utils.get_2d_joints"
] | [((679, 708), 'numpy.load', 'np.load', (['"""models/mean_3d.npy"""'], {}), "('models/mean_3d.npy')\n", (686, 708), True, 'import numpy as np\n'), ((723, 751), 'numpy.load', 'np.load', (['"""models/std_3d.npy"""'], {}), "('models/std_3d.npy')\n", (730, 751), True, 'import numpy as np\n'), ((1636, 1708), 'utils.data.un_normalize_data', 'un_normalize_data', (['poses_3d', 'data_mean_3d', 'data_std_3d', 'dim_to_ignore_3d'], {}), '(poses_3d, data_mean_3d, data_std_3d, dim_to_ignore_3d)\n', (1653, 1708), False, 'from utils.data import un_normalize_data, H36M_NAMES\n'), ((2296, 2418), 'utils.data.un_normalize_data', 'un_normalize_data', (['data_2d_cur', 'human_dataset.data_mean_2d', 'human_dataset.data_std_2d', 'human_dataset.dim_to_ignore_2d'], {}), '(data_2d_cur, human_dataset.data_mean_2d, human_dataset.\n data_std_2d, human_dataset.dim_to_ignore_2d)\n', (2313, 2418), False, 'from utils.data import un_normalize_data, H36M_NAMES\n'), ((2471, 2594), 'utils.data.un_normalize_data', 'un_normalize_data', (['predicted_3d', 'human_dataset.data_mean_3d', 'human_dataset.data_std_3d', 'human_dataset.dim_to_ignore_3d'], {}), '(predicted_3d, human_dataset.data_mean_3d, human_dataset.\n data_std_3d, human_dataset.dim_to_ignore_3d)\n', (2488, 2594), False, 'from utils.data import un_normalize_data, H36M_NAMES\n'), ((964, 994), 'utils.get_2d_joints', 'get_2d_joints', (['hg_model', 'batch'], {}), '(hg_model, batch)\n', (977, 994), False, 'from utils import get_2d_joints, get_all_32joints\n'), ((1445, 1477), 'torch.tensor', 'torch.tensor', (['predicted_2d_poses'], {}), '(predicted_2d_poses)\n', (1457, 1477), False, 'import torch\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import cv2, os, sys
import numpy as np
from abc import ABC, abstractmethod
def extractImage(path):
# error handller if the intended path is not found
image = cv2.imread(path, cv2.IMREAD_GRAYSCALE);
return image
def checkImage(image):
"""
Args:
image: input image to be checked
Returns:
binary image
Raises:
RGB image, grayscale image, all-black, and all-white image
"""
if len(image.shape) > 2:
print("ERROR: non-binary image (RGB)");
sys.exit();
smallest = image.min(axis=0).min(axis=0); # lowest pixel value; should be 0 (black)
largest = image.max(axis=0).max(axis=0); # highest pixel value; should be 1 (white)
if (smallest == 0 and largest == 0):
print("ERROR: non-binary image (all black)");
sys.exit();
elif (smallest == 255 and largest == 255):
print("ERROR: non-binary image (all white)");
sys.exit();
elif (smallest > 0 or largest < 255 ):
print("ERROR: non-binary image (grayscale)");
sys.exit();
else:
return True
class FGScale(ABC):
"""
An abstract base class that enables image erosion or dilation PRE trimap
Attribute: binary image
Method: scaling with two inputs: image and iterations
"""
def __init__(self, image):
self.image = image;
@abstractmethod
def scaling(self, image, iteration):
pass
class Erosion(FGScale):
def __init__(self, image):
self.image = image
def scaling(self, image, erosion):
erosion = int(erosion)
kernel = np.ones((3,3), np.uint8) ## Design an odd-sized erosion kernel
image = cv2.erode(image, kernel, iterations=erosion) ## The number of erosions
image = np.where(image > 0, 255, image) ## Any gray-clored pixel becomes white (smoothing)
# Error-handler to prevent entire foreground annihilation
if cv2.countNonZero(image) == 0:
print("ERROR: foreground has been entirely eroded");
sys.exit();
return image;
class Dilation(FGScale):
def __init__(self, image):
self.image = image
def scaling(self, image, dilation):
dilation = int(dilation)
kernel = np.ones((3,3), np.uint8) ## Design an odd-sized erosion kernel
image = cv2.dilate(image, kernel, iterations=dilation) ## The number of dilations
image = np.where(image > 0, 255, image) ## Any gray-clored pixel becomes white (smoothing)
# Error-handler to prevent entire foreground domination
height = image.shape[0];
width = image.shape[1];
totalpixels = height*width;
n_white_pix = np.sum(image == 255)
if n_white_pix == totalpixels:
print("ERROR: foreground has been entirely expanded");
sys.exit();
return image;
#############################################
### TESTING SECTION ###
#############################################
if __name__ == '__main__':
path = "./images/test_images/test_image_12.png"
image = extractImage(path)
unit01 = Erosion(image)
new_image = unit01.scaling(image, 2)
cv2.imshow('Displayed Image', new_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
"cv2.countNonZero",
"numpy.ones",
"numpy.where",
"cv2.erode",
"cv2.imshow",
"numpy.sum",
"cv2.destroyAllWindows",
"sys.exit",
"cv2.dilate",
"cv2.waitKey",
"cv2.imread"
] | [((213, 251), 'cv2.imread', 'cv2.imread', (['path', 'cv2.IMREAD_GRAYSCALE'], {}), '(path, cv2.IMREAD_GRAYSCALE)\n', (223, 251), False, 'import cv2, os, sys\n'), ((3314, 3354), 'cv2.imshow', 'cv2.imshow', (['"""Displayed Image"""', 'new_image'], {}), "('Displayed Image', new_image)\n", (3324, 3354), False, 'import cv2, os, sys\n'), ((3359, 3373), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (3370, 3373), False, 'import cv2, os, sys\n'), ((3378, 3401), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3399, 3401), False, 'import cv2, os, sys\n'), ((560, 570), 'sys.exit', 'sys.exit', ([], {}), '()\n', (568, 570), False, 'import cv2, os, sys\n'), ((854, 864), 'sys.exit', 'sys.exit', ([], {}), '()\n', (862, 864), False, 'import cv2, os, sys\n'), ((1648, 1673), 'numpy.ones', 'np.ones', (['(3, 3)', 'np.uint8'], {}), '((3, 3), np.uint8)\n', (1655, 1673), True, 'import numpy as np\n'), ((1747, 1791), 'cv2.erode', 'cv2.erode', (['image', 'kernel'], {'iterations': 'erosion'}), '(image, kernel, iterations=erosion)\n', (1756, 1791), False, 'import cv2, os, sys\n'), ((1835, 1866), 'numpy.where', 'np.where', (['(image > 0)', '(255)', 'image'], {}), '(image > 0, 255, image)\n', (1843, 1866), True, 'import numpy as np\n'), ((2325, 2350), 'numpy.ones', 'np.ones', (['(3, 3)', 'np.uint8'], {}), '((3, 3), np.uint8)\n', (2332, 2350), True, 'import numpy as np\n'), ((2427, 2473), 'cv2.dilate', 'cv2.dilate', (['image', 'kernel'], {'iterations': 'dilation'}), '(image, kernel, iterations=dilation)\n', (2437, 2473), False, 'import cv2, os, sys\n'), ((2518, 2549), 'numpy.where', 'np.where', (['(image > 0)', '(255)', 'image'], {}), '(image > 0, 255, image)\n', (2526, 2549), True, 'import numpy as np\n'), ((2804, 2824), 'numpy.sum', 'np.sum', (['(image == 255)'], {}), '(image == 255)\n', (2810, 2824), True, 'import numpy as np\n'), ((975, 985), 'sys.exit', 'sys.exit', ([], {}), '()\n', (983, 985), False, 'import cv2, os, sys\n'), ((2009, 2032), 'cv2.countNonZero', 'cv2.countNonZero', (['image'], {}), '(image)\n', (2025, 2032), False, 'import cv2, os, sys\n'), ((2116, 2126), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2124, 2126), False, 'import cv2, os, sys\n'), ((2952, 2962), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2960, 2962), False, 'import cv2, os, sys\n'), ((1092, 1102), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1100, 1102), False, 'import cv2, os, sys\n')] |
import os, glob
import subprocess
from os import listdir
from os.path import isfile, join
import time
import traceback
import shutil
import numpy as np
import csv
import time
def checkForUpdate(count, lastUpdate):
sn = os.environ['SERVERNUM']
hdfs = os.environ['HDFS']
updateCount = -1
print("UPDATE CHECK: SERVER")
print([count, lastUpdate])
while(updateCount == -1):
time.sleep(10)
print('Checking')
for i in range(lastUpdate+1, count):
print([i,count])
print("In Check "+str(i))
out = os.popen('/opt/hadoop/bin/hadoop fs -test -e hdfs://'+hdfs+':9000/server_'+
sn+'/run_'+str(i)+'/knndata && echo $?').read()
try:
if(int(out) == 0):
updateCount = i
print("Update Ready: "+str(i))
except:
traceback.print_exc()
print("Update Not Ready")
break;
if(updateCount > -1):
try:
os.remove('knndatasetGI')
except:
pass
out = os.popen('/opt/hadoop/bin/hadoop fs -copyToLocal hdfs://'+hdfs+':9000/server_'+
sn+'/run_'+str(updateCount)+'/knndata knndatasetGI').read()
return updateCount
def updateLocal():
pwd = os.getcwd()
print(pwd)
knndata = np.genfromtxt('knndatasetGI',delimiter=',')
os.chdir(pwd+'/tmp/')
for f in glob.glob('*'):
print(f)
for f in glob.glob('*.JPG'):
fname = f.split('.')[0]
#cmd = 'bash '+pwd+'/Parser/runParser.sh '+pwd+'/tmp/'+str(i)+'/'+f
#print(cmd)
#out = os.popen(cmd).read().rstrip()[1:-1].split(',')
#line = []
#for feat in out:
# line.append(float(feat.split('=')[1]))
data = []
with open(pwd+'/tmp/'+fname+'.csv') as csvf:
reader = csv.reader(csvf)
data = list(reader)[0]
line = []
for d in data:
line.append(float(d))
#print(line)
npl = np.asarray(line).reshape((1,17))
#Replace repeats in dataset with new info?
#print('WHOLE ARR')
#print(npl)
#print('PART')
#print(npl[0][1:13])
if knndata == []:
knndata = npl
else:
knndata = np.concatenate((knndata, npl))
print(npl.shape)
print(knndata.shape)
os.chdir(pwd)
np.savetxt("knndatasetGI",knndata,delimiter=',')
def getCount():
sn = os.environ['SERVERNUM']
hdfs = os.environ['HDFS']
updateCount = -1
check = 0
while(True):
print("Looking for update: "+str(check))
out = os.popen('/opt/hadoop/bin/hadoop fs -test -e hdfs://'+hdfs+':9000/server_'+
sn+'/run_'+str(check)+'/knndata && echo $?').read()
try:
if(int(out) == 0):
updateCount = check
check += 1
else:
print('No Update Ready: '+str(check))
return updateCount
except:
print('No Update Found: '+str(check))
traceback.print_exc()
return updateCount
return -1
def getCountGlobal():
updateCount = -1
check = 0
hdfs = os.environ['HDFS']
while(True):
print("Looking for update: "+str(check))
out = os.popen('/opt/hadoop/bin/hadoop fs -test -e hdfs://'+hdfs+':9000/global/run_'+str(check)+'/knndata && echo $?').read()
try:
if(int(out) == 0):
updateCount = check
check += 1
else:
print('No Update Ready: '+str(check))
return updateCount
except:
print('No Update Found: '+str(check))
traceback.print_exc()
return updateCount
return -1
def checkForGlobalUpdate(count, lastUpdate):
updateCount = -1
print([count, lastUpdate])
print("UPDATE CHECK: GLOBAL")
hdfs = os.environ['HDFS']
while(updateCount == -1):
time.sleep(10)
for i in range(lastUpdate+1, count):
print("In Check GLOBAL"+str(i))
out = os.popen('/opt/hadoop/bin/hadoop fs -test -e hdfs://'+hdfs+':9000/global/run_'+str(i)+'/knndata && echo $?').read()
try:
if(int(out) == 0):
updateCount = i
print("Update Ready: "+str(i))
except:
print('Update Not Ready')
break;
if(updateCount > -1):
try:
os.remove('knndatasetGI')
except:
pass
out = os.popen('/opt/hadoop/bin/hadoop fs -copyToLocal hdfs://'+hdfs+':9000/global'+
'/run_'+str(updateCount)+'/knndata knndatasetGI').read()
return updateCount
def get_size(start_path):
total_size = 0
for dirpath, dirnames, filenames in os.walk(start_path):
for f in filenames:
fp = os.path.join(dirpath, f)
# skip if it is symbolic link
if not os.path.islink(fp):
total_size += os.path.getsize(fp)
return total_size
cwd = os.getcwd()
csvs = []
os.chdir('/home/mydata/Worker'+sn+'_'+wn+'/')
for f in glob.glob("*.csv"):
csvs.append(f)
os.chdir(cwd)
sn=os.environ["SERVERNUM"]
wn=os.environ["WORKERNUM"]
hdfs = os.environ['HDFS']
try:
subprocess.call(['/opt/hadoop/bin/hadoop','fs','-mkdir', 'hdfs://'+hdfs+':9000/worker'+sn+'_'+wn])
except Exception as e:
print(e)
#lastUpdate = getCount()
#lastUpdate = getCountGlobal()
lastUpdate = -1
count = 0
print([lastUpdate, count])
start = time.time()
DSVal = 1500
shutil.copyfile('/home/mydata/Worker'+sn+'_'+wn+'/knn'+str(DSVal),'/home/sim/knndatasetGI')
for rangeCounter in range(1,2):
hdfs = os.environ['HDFS']
#DSVal = rangeCounter*500
shutil.copyfile('/home/mydata/Worker'+sn+'_'+wn+'/knn'+str(DSVal),'/home/sim/knndatasetGI')
for f in csvs:
if(count != 0):
print("Checking for Server Update")
#time.sleep(300)
lastUpdate = checkForUpdate(count, lastUpdate)
#lastUpdate = checkForGlobalUpdate(count, lastUpdate)
#updateLocal()
subprocess.call(['bash','runNet.bash','/home/mydata/Worker'+sn+'_'+wn+'/'+f, '80', str(100),'5'])
os.chdir('tmp/')
#subprocess.call(['/opt/hadoop/bin/hadoop','fs','-mkdir','hdfs://127.0.0.1:9000/worker'+sn+'_'+wn+'/run_'+str(count)])
nImgs = len(glob.glob("*.JPG"))
f = open('/home/mydata/Worker'+sn+'_'+wn+'/run_'+str(count), 'w+')
f.truncate(0)
f.write('len = '+str(nImgs)+'\n')
energy = open('/home/sim/tmp/energy','r+')
contents = energy.read()
tput = float(contents.split()[7]) * (1024*1024)
transferSize = get_size('/home/sim/tmp')
transferTime = transferSize/tput
print(transferTime)
#time.sleep(transferTime)
contents += 'Transfer Time: '+str(transferTime)+'\n'
f.write(contents+'\n')
print('Transfer Size: '+str(transferSize))
energy.close()
f.close()
os.remove('/home/sim/tmp/energy')
#for x in glob.glob("*.*"):
subprocess.call(['/opt/hadoop/bin/hadoop','fs','-put','/home/sim/tmp/','hdfs://'+hdfs+':9000/worker'+sn+'_'+wn+'/'])
subprocess.call(['/opt/hadoop/bin/hadoop','fs','-mv','hdfs://'+hdfs+':9000/worker'+sn+'_'+wn+'/tmp','hdfs://127.0.0.1:9000/worker'+sn+'_'+wn+'/run_'+str(count)])
subprocess.call(['/opt/hadoop/bin/hadoop','fs','-touchz','hdfs://'+hdfs+':9000/worker'+sn+'_'+wn+'/run_'+str(count)+"/done"])
os.chdir(cwd)
end = time.time()
print("Time so far: "+str(end-start))
count += 1
end = time.time()
print(end-start)
f = open('/home/sim/tmp/runtime','w+')
f.write('Total Time: '+str(end-start)+'\n')
f.close()
subprocess.call(['/opt/hadoop/bin/hadoop','fs','-put','/home/sim/tmp/runtime','hdfs://'+hdfs+':9000/worker'+sn+'_'+wn+'/'])
| [
"os.path.getsize",
"numpy.genfromtxt",
"os.walk",
"os.path.join",
"numpy.asarray",
"time.sleep",
"os.getcwd",
"os.chdir",
"subprocess.call",
"numpy.savetxt",
"csv.reader",
"numpy.concatenate",
"os.path.islink",
"traceback.print_exc",
"time.time",
"glob.glob",
"os.remove"
] | [((5282, 5293), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (5291, 5293), False, 'import os, glob\n'), ((5304, 5357), 'os.chdir', 'os.chdir', (["('/home/mydata/Worker' + sn + '_' + wn + '/')"], {}), "('/home/mydata/Worker' + sn + '_' + wn + '/')\n", (5312, 5357), False, 'import os, glob\n'), ((5359, 5377), 'glob.glob', 'glob.glob', (['"""*.csv"""'], {}), "('*.csv')\n", (5368, 5377), False, 'import os, glob\n'), ((5399, 5412), 'os.chdir', 'os.chdir', (['cwd'], {}), '(cwd)\n', (5407, 5412), False, 'import os, glob\n'), ((5759, 5770), 'time.time', 'time.time', ([], {}), '()\n', (5768, 5770), False, 'import time\n'), ((7934, 7945), 'time.time', 'time.time', ([], {}), '()\n', (7943, 7945), False, 'import time\n'), ((8058, 8205), 'subprocess.call', 'subprocess.call', (["['/opt/hadoop/bin/hadoop', 'fs', '-put', '/home/sim/tmp/runtime', 'hdfs://' +\n hdfs + ':9000/worker' + sn + '_' + wn + '/']"], {}), "(['/opt/hadoop/bin/hadoop', 'fs', '-put',\n '/home/sim/tmp/runtime', 'hdfs://' + hdfs + ':9000/worker' + sn + '_' +\n wn + '/'])\n", (8073, 8205), False, 'import subprocess\n'), ((1313, 1324), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1322, 1324), False, 'import os, glob\n'), ((1371, 1415), 'numpy.genfromtxt', 'np.genfromtxt', (['"""knndatasetGI"""'], {'delimiter': '""","""'}), "('knndatasetGI', delimiter=',')\n", (1384, 1415), True, 'import numpy as np\n'), ((1424, 1447), 'os.chdir', 'os.chdir', (["(pwd + '/tmp/')"], {}), "(pwd + '/tmp/')\n", (1432, 1447), False, 'import os, glob\n'), ((1463, 1477), 'glob.glob', 'glob.glob', (['"""*"""'], {}), "('*')\n", (1472, 1477), False, 'import os, glob\n'), ((1517, 1535), 'glob.glob', 'glob.glob', (['"""*.JPG"""'], {}), "('*.JPG')\n", (1526, 1535), False, 'import os, glob\n'), ((2560, 2573), 'os.chdir', 'os.chdir', (['pwd'], {}), '(pwd)\n', (2568, 2573), False, 'import os, glob\n'), ((2583, 2633), 'numpy.savetxt', 'np.savetxt', (['"""knndatasetGI"""', 'knndata'], {'delimiter': '""","""'}), "('knndatasetGI', knndata, delimiter=',')\n", (2593, 2633), True, 'import numpy as np\n'), ((5031, 5050), 'os.walk', 'os.walk', (['start_path'], {}), '(start_path)\n', (5038, 5050), False, 'import os, glob\n'), ((5504, 5618), 'subprocess.call', 'subprocess.call', (["['/opt/hadoop/bin/hadoop', 'fs', '-mkdir', 'hdfs://' + hdfs +\n ':9000/worker' + sn + '_' + wn]"], {}), "(['/opt/hadoop/bin/hadoop', 'fs', '-mkdir', 'hdfs://' + hdfs +\n ':9000/worker' + sn + '_' + wn])\n", (5519, 5618), False, 'import subprocess\n'), ((402, 416), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (412, 416), False, 'import time\n'), ((4181, 4195), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (4191, 4195), False, 'import time\n'), ((6455, 6471), 'os.chdir', 'os.chdir', (['"""tmp/"""'], {}), "('tmp/')\n", (6463, 6471), False, 'import os, glob\n'), ((7299, 7332), 'os.remove', 'os.remove', (['"""/home/sim/tmp/energy"""'], {}), "('/home/sim/tmp/energy')\n", (7308, 7332), False, 'import os, glob\n'), ((7378, 7515), 'subprocess.call', 'subprocess.call', (["['/opt/hadoop/bin/hadoop', 'fs', '-put', '/home/sim/tmp/', 'hdfs://' + hdfs +\n ':9000/worker' + sn + '_' + wn + '/']"], {}), "(['/opt/hadoop/bin/hadoop', 'fs', '-put', '/home/sim/tmp/', \n 'hdfs://' + hdfs + ':9000/worker' + sn + '_' + wn + '/'])\n", (7393, 7515), False, 'import subprocess\n'), ((7808, 7821), 'os.chdir', 'os.chdir', (['cwd'], {}), '(cwd)\n', (7816, 7821), False, 'import os, glob\n'), ((7850, 7861), 'time.time', 'time.time', ([], {}), '()\n', (7859, 7861), False, 'import time\n'), ((1028, 1053), 'os.remove', 'os.remove', (['"""knndatasetGI"""'], {}), "('knndatasetGI')\n", (1037, 1053), False, 'import os, glob\n'), ((1958, 1974), 'csv.reader', 'csv.reader', (['csvf'], {}), '(csvf)\n', (1968, 1974), False, 'import csv\n'), ((2457, 2487), 'numpy.concatenate', 'np.concatenate', (['(knndata, npl)'], {}), '((knndata, npl))\n', (2471, 2487), True, 'import numpy as np\n'), ((4699, 4724), 'os.remove', 'os.remove', (['"""knndatasetGI"""'], {}), "('knndatasetGI')\n", (4708, 4724), False, 'import os, glob\n'), ((5097, 5121), 'os.path.join', 'os.path.join', (['dirpath', 'f'], {}), '(dirpath, f)\n', (5109, 5121), False, 'import os, glob\n'), ((6621, 6639), 'glob.glob', 'glob.glob', (['"""*.JPG"""'], {}), "('*.JPG')\n", (6630, 6639), False, 'import os, glob\n'), ((2147, 2163), 'numpy.asarray', 'np.asarray', (['line'], {}), '(line)\n', (2157, 2163), True, 'import numpy as np\n'), ((3265, 3286), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (3284, 3286), False, 'import traceback\n'), ((3913, 3934), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (3932, 3934), False, 'import traceback\n'), ((5183, 5201), 'os.path.islink', 'os.path.islink', (['fp'], {}), '(fp)\n', (5197, 5201), False, 'import os, glob\n'), ((5233, 5252), 'os.path.getsize', 'os.path.getsize', (['fp'], {}), '(fp)\n', (5248, 5252), False, 'import os, glob\n'), ((890, 911), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (909, 911), False, 'import traceback\n')] |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.cm import get_cmap
from pandas import DataFrame
from scipy.interpolate import griddata
import glob
import os
#Package Imports
from .read import load_mol_abund,\
load_rates, get_reac_str, total_rates,\
load_radfield,\
read_levels,read_trans
from .misc import contour_points, get_contour_arr, remove_nan, sigfig, iterable, nint
from . import __path__ as pkg_path
#Functions for setting and getting global directory path
# where chemical code is located, base_dir
def set_base_dir(direc):
if direc[-1]!='/':
direc = direc+'/'
fpath = pkg_path[0]+'/pkg_files/base_dir.txt'
f = open(fpath,'w')
f.write(direc)
f.close()
def get_base_dir():
fpath = pkg_path[0]+'/pkg_files/base_dir.txt'
try:
f = open(fpath)
direc = f.read()
f.close()
assert os.path.exists(direc)
except (OSError, AssertionError) as e:
direc = pkg_path[0]+'/test_files/'
return direc
#Some constants that get used throughout.
mp = 1.67e-24 #Mass of proton in g
mau = 1.496e11 #Conversion from AU to meters.
class chem_mod:
'''
A class to handle loading, viewing, and manipulating output from
the disk chemical modeling code presented in Fogel et al. 2011.
For more in-depth documentation, visit
https://github.com/richardseifert/Chemvene
To create an instance, the following three paths must be provided.
outdir - string path to the runs/ directory where model output is
stored.
environ - string path to the environ/ directory used to run your
chemical model. (Must given it outdir/environ doesn't exist)
inp - string filename of the input file used to run your model.
(Must be given if outdir/0io* doesn't exits)
'''
################################################################################
################################ Initialization ################################
################################################################################
def __init__(self,outdir,environ=None,inp=None,base_dir=None):
self.outdir = outdir
if self.outdir[-1] != '/':
self.outdir += '/'
if base_dir is None:
self.bsd = get_base_dir()
else:
self.bsd = base_dir
if not environ is None:
self.set_environ(environ)
elif os.path.exists(self.outdir+'environ/'):
self.set_environ(self.outdir+'environ/')
else:
raise FileNotFoundError("Could not determine environ/ directory to use for this model.")
if not inp is None:
self.set_environ(environ)
else:
outdir_0io_paths = glob.glob(self.outdir+'0io*')
if len(outdir_0io_paths) > 0:
self.set_inp(outdir_0io_paths[0].split('/')[-1])
else:
raise FileNotFoundError("Could not determine 0io file to use for this model.")
self.phys = DataFrame()
self.radfields = {}
self.abunds = {}
self.rates = {}
self.load_physical()
self.load_times()
def set_environ(self,environ):
self.environ = environ
if self.environ[-1] != '/':
self.environ += '/'
def set_inp(self,inp):
self.inp = self.environ+inp
inp_types = ['spec','reac','uv','xray','isrf','rn']
self.inp_paths = {k:None for k in inp_types}
d = np.genfromtxt(self.inp,dtype=str)
for i,k in enumerate(inp_types):
if os.path.exists(self.bsd+d[i]):
self.inp_paths[k] = self.bsd+d[i]
def copy(self):
'''
Make a hard copy of a chem_mod instance.
'''
#Initialize
new_inst = chem_mod(outdir=self.outdir,environ=self.environ,inp=self.inp)
#Hard copy physical quantities
new_inst.phys = self.phys.copy()
#for q in self.phys.columns:
# new_inst.set_quant(q,self.phys[q])
#Hard copy abundances
for mol in self.abunds.keys():
new_inst.abunds[mol] = self.abunds[mol].copy()
#Hard copy rates
for rid in self.rates.keys():
new_inst.rates[rid] = self.rates[rid].copy()
return new_inst
################################################################################
############################### General Loading ################################
################################################################################
def merge(self,tbl):
'''
Prepare a given table to be merged according to position, R and zAU.
ARGUMENTS:
tbl - A pandas table containing the two columns 'R' and either 'shell' or 'zAU'.
RETURNS:
merged - A tbl with the same number of rows as phys. The returned table
has values ordered according to phys['R'] and phys['shell']
'''
#Match R values to their nearest R values in phys['R'].
#This is necessary for the relational merge to work.
phys_R = np.array(list(set(self.phys['R'])))
diffs = np.vstack([(pr-tbl['R'])**2 for pr in phys_R])
inds = np.argmin(diffs,axis=0)
tbl['R'] = phys_R[inds]
#Merge according to columns of phys.
if 'shell' in tbl.columns:
merged = self.phys.merge(tbl,'left',on=['R','shell'])
elif 'zAU' in tbl.columns:
#Match by nearest R and zAU.
# pandas.DataFrame.merge has failed me in this reguard.. :(
# So I just had to do it myself, huney, using griddata.
merge_cols = [col for col in tbl.columns if not col in self.phys.columns]
points = np.vstack([tbl['R'],tbl['zAU']]).T
values = np.array(tbl[merge_cols])
phys_points = np.array([self.phys['R'],self.phys['zAU']]).T
matched = griddata(points,values,phys_points,method='nearest')
merged = self.phys.copy()
for i,col in enumerate(merge_cols):
merged[col] = matched[:,i]
return merged
def set_times(self,tbl):
'''
Method that takes a table with times as column headers and changes the headers
to match the nearest model timesteps.
ARGUMENTS:
tbl - A pandas.DataFrame object with times (in years) as columns header.
RETURNS:
The same table, but times have been corrected to the nearest model times.
'''
ctimes = tbl.columns
mtimes = self.nearest_times(ctimes,itr=True)
return tbl.rename(columns=dict(zip(ctimes,mtimes)))
################################################################################
############################# Handling Timesteps ###############################
################################################################################
def load_times(self):
'''
Method that reads the 2times.inp file for the model and produces an array of
the time at each model timestep.
No arguments or returns; times are stored in self.times variable.
'''
f = open(self.outdir+'2times.inp')
f.readline()
t_end = float(f.readline().split()[0].replace('D','E'))
t_start = float(f.readline().split()[0].replace('D','E'))
nsteps = int(f.readline().split()[0])
self.times = sigfig(np.logspace(np.log10(t_start),np.log10(t_end),nsteps), 4)
def nearest_times(self,times,itr=False):
'''
Function for finding nearest timesteps to a given time or list of times.
ARGUMENTS:
times - Time or list of times. Must be values that can be cast to floats.
itr - Boolean whether or not to return a scalar if possible. Default False.
If a single time is given, itr=False will return a scalar value.
itr=True will return a list of length one.
'''
#If None was given, do nothing. Return None.
if times is None:
return times
#Otherwise, check if time is iterable. If it's not, make it a single-valued array.
try:
iter(times)
times = np.array(times).astype(float)
except TypeError:
times = np.asarray([times]).astype(float)
#Find the nearest times in self.times.
nearest = self.times[ np.argmin([ (self.times - t)**2 for t in times ], axis=1) ]
#Depending on the number of times given, return an array or a scalar.
if len(nearest) == 1 and not itr:
return nearest[0]
else:
return nearest
def nearest_time_i(self,time):
return np.argmin( (self.times - time)**2 )
################################################################################
######################## Read/Write Model Quantities ###########################
################################################################################
def load_physical(self):
'''
Method that loads the disk physical model from 1environ files.
'''
env_paths = glob.glob(self.environ+'1environ*')
#Determine number of shells in model.
f1 = open(env_paths[0])
for i,line in enumerate(f1):
if i==2:
nshells = int(line.strip())
f1.close()
break
dat = np.array([])
shells = np.array([np.arange(nshells)+1]).T
for path in env_paths:
d = np.loadtxt(path,skiprows=3)
d = np.hstack([d,shells])
if len(dat) != 0:
dat = np.vstack([dat,d])
else:
dat = d
#Get header from test file.
f = open(env_paths[0])
header = f.readline()
f.close()
for i,k in enumerate(header.split()+['shell']):
self.phys[k] = dat[:,i]
def load_field(self,field,path=None):
if path is None:
path = self.inp_paths[field]
print("Loading %s field from: %s"%(field,path))
dat = load_radfield(path)
R = dat[:,0]
zAU = dat[:,1]
spec = dat[:,2]
flux = dat[:,3]
self.radfields[field] = DataFrame()
spec_vals = np.unique(spec)
for sv in spec_vals:
mask = spec==sv
tbl = DataFrame()
tbl['R'] = R[mask]
tbl['zAU'] = zAU[mask]
tbl['flux'] = flux[mask]
self.radfields[field][sv] = self.merge(tbl)['flux']
def limedir(self,strmol):
'''
Function that produces string limefg path for a given species.
It's a pretty pointless method, because I only need the limefg path
twice, when loading and writing species abundances. But, I figured
if I ever want to change where I save limefg or what I want to rename
the directory, I can just change it once in this method.
ARGUMENTS:
strmol - String name of the species.
RETURNS:
string path of a directory where limefg should go.
'''
return self.outdir+'e1/limefg_'+strmol+'/'
def grab_mol(self,strmol,*args,**kwargs):
if not strmol in self.abunds:
self.load_mol(strmol,*args,**kwargs)
def load_mol(self,strmol,times=None,write=True):
'''
Method that loads abundances of a given species,
potentially at a given time or times.
If limefg exists for this species (it has previously been loaded and saved),
then species is loaded from this (quicker). Otherwise, species is loaded
directly from r*.out files.
ARGUMENTS:
strmol - string name of the species to load.
times - Time steps to load species at. Only works if species is saved
in limefg format. Optional; default times=None -> load all times.
RETURNS:
Nothing, abundances are stored in self.abunds[strmol]. Column headers are
model times. Use self.get_quant to get strmol at a specific time (See below).
'''
#Look for strmol in limefg format.
limedir = self.limedir(strmol)
if not os.path.exists(limedir):
#If not in limefg, load from scratch (and write to limefg).
self.read_mol(strmol,write=write)
return
#Load from limefg
print("Loading from limefg.")
self.abunds[strmol] = DataFrame()
outpaths = glob.glob(self.outdir+'e1/r*.out')
limepaths = glob.glob(limedir+'*time*.dat')
tnum = [int(lp.split('time')[-1].split('.')[0]) for lp in limepaths]
limepaths = np.array(limepaths)[np.argsort(tnum)]
#Only load files for times requested.
all_times = self.times
if times is None:
times = all_times
else:
times = self.nearest_times(times,itr=True)
limepaths = [ lp for t,lp in zip(all_times,limepaths) if t in times ]
abunds = np.array([])
columns = ['R','zAU','rho','Tgas','Tdust','abund','fg']
for time,path in zip(times,limepaths):
dat = np.loadtxt(path)
tbl = DataFrame(dat,columns=columns)
tbl['R'] /= mau
tbl['zAU'] /= mau
merged = self.merge(tbl)
self.abunds[strmol][time] = merged['abund']/2
# ^ factor of 2 because LIME wants abundance per H2 instead of per H
#Tweak times to be exact values from self.times.
self.abunds[strmol] = self.set_times(self.abunds[strmol])
def read_mol(self,strmol,write=False):
'''
Method that reads abundances of a given species from r*.out files.
ARGUMENTS:
strmol - string name of the species to load.
RETURNS:
Nothing, abundances are stored in self.abunds[strmol]. Column headers are
model times. Use self.get_quant to get strmol at a specific time (See below).
'''
#Load from e1 files.
dat = load_mol_abund(self.outdir+'e1/',strmol)
times = list(set(dat[:,0]))
t = dat[:,0]
R = dat[:,1]
shell = dat[:,2]
abunds = dat[:,3]
#Construct table with abundances at each timestep.
mol_abund = DataFrame({time:abunds[t==time] for time in sorted(times)})
mol_abund['shell'] = shell[t==times[0]]
mol_abund['R'] = R[t==times[0]]
#Merge table with existing self.phys physical table.
self.abunds[strmol] = self.merge(mol_abund)[times]
#Tweak times to be exact values from self.times.
self.abunds[strmol] = self.set_times(self.abunds[strmol])
if write:
#Write abundances in limefg format.
self.write_mol(strmol)
def write_mol(self,strmol,times=None,label=None,savedir=None,tag=''):
'''
Method that writes abundances for a species in the limefg format
used by LIME radiative transfer.
ARGUMENTS:
strmol - string name of the species to load.
'''
if not strmol in self.abunds.keys():
self.read_mol(strmol)
if label is None:
label = strmol
else:
label = strmol+'_'+label
savetbl = self.phys[['R','zAU','rho','Tgas','Tdust']]
savetbl.loc[:,'rho'] *= 0.8/(2.0*mp) * 1e6
savetbl.loc[:,'abund'] = np.zeros_like(savetbl['R']) #Place holder.
# Match tmp table and physical table by positions.
tmp = np.genfromtxt(pkg_path[0]+'/pkg_files/imlup_gaia_v2_abrig_model_Tgas_SB_G04.txt')
inds = [np.argmin(( tmp[:,0]-R)**2 + (tmp[:,1]-z)**2 ) for R,z in zip(self.phys['R'],self.phys['zAU'])]
tmp_sort = tmp[inds]
fghere = np.array(tmp_sort[:,2]/(tmp_sort[:,3]*tmp_sort[:,7]))
fghere[(tmp_sort[:,3] <= 1e-30) | (tmp_sort[:,7] <= 1e-30)] = 1e20
fghere[savetbl['R'] > 313.] = 1e20 # this is for IM LUP SPECIFICALLY!! no large grains beyond this radius
savetbl.loc[:,'fg'] = fghere
savetbl.loc[:,'R'] *= mau
savetbl.loc[:,'zAU'] *= mau
if savedir is None:
limedir = self.limedir(label)
else:
limedir = savedir
if limedir[-1]!='/': limedir=limedir+'/'
if not os.path.exists(limedir):
os.makedirs(limedir)
if not times is None:
times = self.nearest_times(times,itr=True)
else:
times = np.sort(np.unique(self.abunds[strmol].columns))
for time in times:
i = self.nearest_time_i(time)
fname=limedir+tag+strmol+'_time'+str(i)+'.dat'
abu = 2*np.array(self.abunds[strmol][time])
# ^ factor of 2 because LIME wants abundance per H2, not per H.
abu[(savetbl['rho'] <= 1e2) | (abu < 1e-32)] = 0.0
savetbl.loc[:,'abund'] = abu
no_nan = remove_nan(self.phys['R'],abu)
savearr = np.array(savetbl)[no_nan]
np.savetxt(fname,savearr,fmt='%15.7E')
################################################################################
######################### Handling Chemical Reactions ##########################
################################################################################
def get_reac_str(self,reac_id,fmt='ascii'):
'''
Method that obtains a string representation of a given reaction in the
chemical network.
ARGUMENTS:
reac_id - Integer ID for the reaction.
fmt - Desired format of the reaction string.
Options:
ascii - Plain text, no subscript or superscript.
latex - Formatted to include subscripts and superscripts
when interpreted by LaTeX.
RETURNS:
Reaction string.
'''
return get_reac_str(self.inp_paths['reac'], reac_id, fmt)
def load_reac(self,strmol,reacs,times=None,radii=None,zones=None):
'''
Method for loading reaction rates for a specific reaction or reactions
involving a specific species, optionally at specific times or radii.
ARGUMENTS:
strmol - Species involved in the reaction(s). This is used as the prefix
for the *.rout files that contain reaction rates.
reacs - Scalar or array of integer reaction IDs.
times - Model timesteps at which to load reaction rates. Default is all times.
radii - Model radii at which to load reaction rates. Default is all radii.
RETURNS:
Nothing, rates are stored in self.rates[reac_id]. Column headers are
model times. Use self.get_quant to get rates at a specific time (See below).
'''
#Check that this molecule has reaction rates collated.
reac_files = glob.glob(self.outdir+'e1/rates/'+strmol+'_*.rout')
if len(reac_files) == 0:
print("Warning: This molecule has no reaction rates stored for %s. \
Doing nothing and continuing.")
#Find nearest times to those given.
if not times is None:
times = self.nearest_times(times)
#Load from e1 files.
dat = load_rates(self.outdir+'e1/rates/',strmol,reacs,times,radii,zones)
times = list(set(dat[:,0]))
t = dat[:,0]
R = dat[:,1]
shell = dat[:,2]
reac_ids = dat[:,3]
reac_rates = dat[:,4]
try:
iter(reacs)
except TypeError:
reacs = [ reacs ]
for reac in reacs:
self.rates[reac] = DataFrame()
for time in times:
#Construct table with abundances at each timestep.
mask = (reac_ids==reac) & (t==time)
tbl = DataFrame()
tbl[time] = reac_rates[mask]
tbl['shell'] = shell[mask]
tbl['R'] = R[mask]
self.rates[reac][time] = self.merge(tbl)[time]
def rank_reacs(self,strmol,time=None,R=None,zone=None):
'''
Method for ranking reactions involving a particular species
according to the reaction rates, optionally at a specific time
and/or radius in the model.
ARGUMENTS:
strmol - The species whose reactions will be ranked.
time - Timestep at which to rank reactions.
Default, sum over all timesteps.
R - Radius at which to rank reactions.
Default, sum over all radii.
'''
if not time is None:
time = self.nearest_times(time)
rates = total_rates(self.outdir+'e1/rates/',strmol,times=time,radii=R,zones=zone)
return rates
################################################################################
############################# Requesting Model Data ############################
################################################################################
def get_quant(self,quant,time=0,mask=None,fmt='numpy'):
'''
Method for obtaining model quantity at all locations of the disk,
at a specific time.
ARGUMENTS:
quant - Name of quantity. String for physical quantities and species
abundances. Integer for reaction IDs.
For convenience of other methods that use get_quant, if an array
of values is passed, get_quant will do nothing and return the
array passed to it.
time - Float value of the time at which to get the quantity.
RETURNS:
1D array of quant values corresponding to R and shell/zAU columns of self.phys
'''
### Retrieve 2-D quant values ###
quant = self._retrieve_quant(quant,time=time)
if mask is None:
mask = np.ones_like(quant).astype(bool)
elif fmt == 'contour':
raise ValueError("Cannot return contour-formatted arrays with mask")
if fmt == 'numpy':
return np.array(quant[mask])
elif fmt == 'pandas':
return quant[mask]
elif fmt == 'contour':
nx = len(list(set(self.phys['R'])))
ny = len(list(set(self.phys['shell'])))
return get_contour_arr(quant,nx,ny,sortx=self.phys['R'])
else:
raise ValueError("Unrecognized format: %s"%(fmt))
def _retrieve_quant(self,quant,time=0):
if iterable(quant):
return quant #quant passed is already 2-D values.
elif self._validate_phys(quant):
return self.phys[quant]
elif self._validate_abun(quant,time=time):
return self._get_abun(quant,time=time)
elif quant in self.rates.keys():
times = np.array(self.rates[quant].columns)
nearest = times[np.argmin((times-time)**2)]
quant = self.rates[quant][nearest]
if np.nanmean(quant) < 0:
quant = -quant
return quant
elif self._validate_radf(quant):
return self._get_radf(quant)
else:
raise ValueError("The quantity %s was not found for this model."%(quant))
## _validate functions are used to determine if the quantity name
## provided is a valid (i.e. loadable) quantity of the given type.
## Quantities can be Physical Model Quantities (phys), Abundances (abun),
## Radiation Fields (radf), or Reactions (reac).
## Each funtion returns True if the given quant is loadable for the given
## quantity type, and they're used to determine how to load different
## quantities in get_quant.
def _validate_phys(self,quant):
return quant in self.phys.columns
def _validate_abun(self,quant,time=0):
if quant[0] == 'n':
quant = quant[1:]
try:
self.grab_mol(quant,times=time)
return True
except IndexError:
return False
def _validate_radf(self,quant):
#Try extracting field name from quant string
try:
field = quant.split('_')[0]
except AttributeError:
return False #Not a valid radiation field name.
#If field is already loaded, return True
if field in self.radfields.keys():
return True
try:
self.load_field(field)
return True
except (TypeError, KeyError) as e:
return False
def _validate_reac(self,quant):
pass
## _get functions are used to load quantites of each type delineated above.
## They are used by get_quant to load/retrieve different types of model quantities.
def _get_abun(self,quant,time=0):
if quant[0] == 'n': #Return species density
times = np.array(self.abunds[quant[1:]].columns)
nearest = times[np.argmin((times-time)**2)]
ab = self.abunds[quant[1:]][nearest]
rho = self.get_quant('rho')
nH = rho / mp
return ab*nH
else: #Return species abundance
times = np.array(self.abunds[quant].columns)
nearest = times[np.argmin((times-time)**2)]
return self.abunds[quant][nearest]
def _get_radf(self,quant):
if len(quant.split('_')) == 1: #If no option is provided, return full field
field,opt = quant,None
else: #Otherwise, evaluate option and return
field,opt = quant.split('_')[:2]
#Evaluate option and return.
if opt is None:
return self.radfields[field]
if opt in self.radfields[field].columns:
return self.radfields[field][opt]
elif opt == 'intphot':
sarr = np.sort(self.radfields[field].columns)
farrs = [self.radfields[field][s] for s in sarr]
fint = np.sum([(f1+f2)*(s2-s1)/2. for s1,s2,f1,f2 in zip(sarr[:-1],sarr[1:],farrs[:-1],farrs[1:])],axis=0)
return fint
elif opt == 'interg':
erg_per_phot = {'xray':lambda s: s*1.6022e-9, 'uv':lambda s:6.626e-27*3e10/(s/1e8),'isrf':lambda s:6.626e-27*3e10/(s/1e8)}
sarr = np.sort(self.radfields[field].columns)
farrs = [erg_per_phot[field](s)*self.radfields[field][s] for s in sarr]
fint = np.sum([(f1+f2)*(s2-s1)/2. for s1,s2,f1,f2 in zip(sarr[:-1],sarr[1:],farrs[:-1],farrs[1:])],axis=0)
return fint
else:
raise ValueError("Invalid radiation field option, %s"%(opt))
def get_spatial(self,yaxis='z',fmt='numpy'):
R = self.get_quant('R',fmt=fmt)
Y = self.get_quant('zAU',fmt=fmt)
if yaxis == 'z/r':
Y = Y/R
elif yaxis == 'zone':
Y = self.phys['shell']
elif not yaxis=='z':
raise ValueError("Unrecognized yaxis: %s"%(yaxis))
return R,Y
def z_quant(self,quant,R=100,time=0):
'''
Method for obtaining quant as a function of Z at a particular radius and time.
ARGUMENTS:
quant - The quantity you're interested in. Could be physical quantity,
chemical species for abundances, or reaction ID for rates.
R - Radius at which to return quant. Default is R = 100 AU.
time - Time at which to return quant. Defaults to first timestep.
Returns
z - 1D heights in AU.
quant - 1D quant values corresponding to z.
'''
#Copy quant string before it's overwritten.
quant_str = (str(quant)+'.')[:-1]
#Find nearest R value in grid.
radii = np.array(list(set(self.phys['R'])))
R = radii[np.argmin(np.abs(radii-R))]
#Get 1-D arrays of z and quant at specified R value.
R_mask = self.phys['R'] == R
z = np.array(self.phys['zAU'][ R_mask ])
quant = self.get_quant(quant,time)
quant = np.array(quant[ R_mask ])
#Sort by z
sort = np.argsort(z)
z = z[sort]
quant = quant[sort]
return z,quant
def R_quant(self,quant,zone=0,time=0):
'''
Method for obtaining quant as a function of radius at a particular zone and time.
ARGUMENTS:
quant - The quantity you're interested in. Could be physical quantity,
chemical species for abundances, or reaction ID for rates.
zone - Shell at which to return quant. Default is zone = 0, the
outer layer of the disk.
time - Time at which to return quant. Defaults to first timestep.
Returns
R - 1D radii in AU.
quant - 1D quant values corresponding to R.
'''
#Copy quant string before it's overwritten.
quant_str = (str(quant)+'.')[:-1] #This weirdness is to force python to hardcopy the string.
#Find nearest R value in grid.
zones = np.array(list(set(self.phys['shell'])))
zone = zones[np.argmin(np.abs(zones-zone))]
#Get 1-D arrays of z and quant at specified R value.
zone_mask = self.phys['shell'] == zone
R = np.array(self.phys['R'][ zone_mask ])
quant = self.get_quant(quant,time)
quant = np.array(quant[ zone_mask ])
#Sort by z
sort = np.argsort(R)
R = R[sort]
quant = quant[sort]
return R,quant
def column_density(self,strmol,time=0):
'''
Method for producing columnd density profile for a given species.
ARGUMENTS:
strmol - string of the molecule you want to get column density of.
time - timestep you want columnd density at.
RETURNS:
R_vals - Radius values.
cd - Corresponding column densities at those radii.
'''
#Load number density of strmol (cm^-3).
try:
nX = self.get_quant('n'+strmol,time=time)
except:
nX = self.get_quant(strmol,time=time)
#Load corresponding disk locations.
R = np.array(self.get_quant('R'))
R_vals = np.unique(R) #Get unique values of R.
R_vals = R_vals[np.argsort(R_vals)]
Z = np.array(self.get_quant('zAU'))
#At each radius, numerically integrate number density over the disk height
# to get column density in cm^-2
cd = np.zeros_like(R_vals)
for i,r in enumerate(R_vals):
at_R = R == r
n = nX[at_R]
z = Z[at_R]
z = z*mau * 100 #Convert from AU to cm
cd[i] = 2*nint(z,n) #The 2 is to account for both halves of the disk.
return R_vals,cd
def optical_depth(self,strmol,trans,lambdafile=None,time=0):
'''
Method for producing column density profile for a given species.
ARGUMENTS:
strmol - string of the molecule you want to get column density of.
time - timestep you want columnd density at.
RETURNS:
R_vals - Radius values.
cd - Corresponding column densities at those radii.
'''
#Define some relevant constants
h = 6.6260755e-27 #erg s
c = 2.99792458e10 #cm s^-1
kb = 1.380658e-16 #erg K^-1
#Load number density of strmol (cm^-3).
try:
nX = self.get_quant('n'+strmol,time=time)
except:
nX = self.get_quant(strmol,time=time)
#Load corresponding disk locations.
zone = np.array(self.get_quant('shell'))
zone_vals = np.unique(zone)
zone_vals = zone_vals[np.argsort(zone_vals)]
Rarr = np.array(self.get_quant('R'))
Zarr = np.array(self.get_quant('zAU'))
# and temperatures!
Tarr = np.array(self.get_quant('Tgas'))
#Read lambda file
levels = read_levels(lambdafile)
transitions = read_trans(lambdafile)
# Get list of energies and statistical weights for each level.
En = levels[:,1]*h*c
gn = levels[:,2]
# Get constants for this transition.
A = transitions[trans,3] #Aul for this transition.
print('Einstein A (s^-1)',A)
freq = transitions[trans,4]*1e9
print('Frequency:',freq/1e9,'GHz')
lam = c/freq
print('Wavelength:',lam,'cm')
# get upper- and lower-state energies and statistical weights
Elower=En[0]
glower=gn[0]
Eupper=En[1]
gupper=gn[1]
for lid,E,g in zip(levels[:,0],En,gn):
if lid==transitions[trans,1]:
Eupper = E
gupper = g
if lid==transitions[trans,2]:
Elower = E
glower = g
grat = gupper/glower
#Functions so evaluate at each location.
partition_func = lambda T,E=En: np.array([np.sum(np.exp(-E/(kb*temp))) for temp in T])
integrand = lambda n,T: n/partition_func(T)*glower*np.exp(-Elower/(kb*T))*(1-np.exp(-h*freq/(kb*T)))
tau = np.zeros_like(zone)
for i,zn1,zn2 in zip(np.arange(len(zone_vals)-1),zone_vals[:-1],zone_vals[1:]):
in_zn1 = zone == zn1
in_zn2 = zone == zn2
n1 = nX[in_zn1]
n2 = nX[in_zn2]
Z1 = Zarr[in_zn1]
Z2 = Zarr[in_zn2]
T1 = Tarr[in_zn1]
T2 = Tarr[in_zn2]
tau[in_zn2] = tau[in_zn1] + grat*A/(8*np.pi) * lam**2 * 0.5*(Z1-Z2)*(integrand(n1,T1)+integrand(n2,T2))
return tau
def get_spec(self,field,r,z):
R = self.get_quant('R')
R_vals = np.unique(R)
nearest_R = R_vals[np.argmin(np.abs(R_vals-r))]
Z = self.get_quant('zAU')
Z_vals = Z_vals[R == nearest_R]
nearest_Z = Z_vals[np.argmin(np.abs(Z_vals-z))]
mask = (R == nearest_R) & (Z == nearest_Z)
spec_all = self.radfields[field]
spec_vals = np.sort(spec_all.columns)
intensity_vals = np.zeros_like(spec_vals)
for i,sv in enumerate(spec_vals):
intensity_vals[i] = spec_all[sv][mask]
return spec_vals,intensity_vals
################################################################################
############################# Altering Model Data ##############################
################################################################################
def set_quant(self,quant,val,mask=None,time=None):
'''
Method for setting a model quantity (e.g. Tgas, CO abudance, etc.) to a new
value or set of values, optionally within a masked region only.
ARGUMENTS:
quant - The quantity to be changed
Must be found in either self.phys, self.abunds, or self.rates
val - The new value or array of values for this quantity
mask - A pre-generated mask for where the quantity should be changed.
Default, no masking.
Ex.) #Enhancing model CO abundance within 50 AU
cmod = chem_mod(someoutdir)
cmod.grab_mol('CO')
R = cmod.get_quant('R')
mask = R < 50 # Returns array of Trues and Falses.
cmod.set_quant('CO',1e-4,mask=mask)
'''
#If mask not given, make an all-True mask (equiv to not masking).
if type(mask) is type(None):
mask = np.ones_like(self.phys['R']).astype(bool)
#If val is another chem_mod instance, take the masked quant from
# that chem_mod instance
if isinstance(val,chem_mod):
val = val.get_quant(quant,time=time if not time is None else 0)
if quant in self.phys.columns:
self.phys[quant][mask] = val
elif quant in self.abunds.keys():
times = np.array(self.abunds[quant].columns)
if time is None:
for t in times:
self.abunds[quant][t][mask] = val
else:
nearest = times[np.argmin((times-time)**2)]
self.abunds[quant][nearest][mask] = val
elif quant in self.rates.keys():
times = np.array(self.rates[quant].columns)
if time is None:
for t in times:
self.rates[quant][t][mask] = val
else:
nearest = times[np.argmin((times-time)**2)]
self.rates[quant][nearest][mask] = val
elif quant[0]=='n' and quant[1:] in self.abunds.keys():
#Compute abundances corresponding to given density, val.
rho = self.get_quant('rho')
nH = rho/mp
ab_val = val/nH
times = np.array(self.abunds[quant[1:]].columns)
if time is None:
for t in times:
self.abunds[quant[1:]][t][mask] = ab_val
else:
nearest = times[np.argmin((times-time)**2)]
self.abunds[quant[1:]][nearest][mask] = ab_val
else:
raise ValueError("The quantity %s was not found for this model."%(quant))
def set_all(self,other,mask):
'''
Replace model quantities with those of another chem_mod instance within a
specified mask.
ARGUMENTS:
other - A chem_mod instance.
mask - An array of Trues and Falses to be used when setting model
quantities to those of the given cmod, other.
'''
#Set physical quantities
for q in self.phys.columns:
try:
self.set_quant(q,other.get_quant(q)[mask],mask=mask)
except ValueError:
print("Warning: Quantity %s was not found for this model and is not \
being set."%(q))
for mol in self.abunds.keys():
times = np.array(self.abunds[mol].columns)
for t in times:
try:
self.set_quant(mol,other.get_quant(mol,time=t)[mask],mask=mask,time=t)
except ValueError:
print("Warning: Quantity %s at time %s was not found for this model and is not \
being set."%(mol,t))
for rid in self.rates.keys():
times = np.array(self.rates[rid].columns)
for t in times:
try:
self.set_quant(rid,other.get_quant(rid,time=t)[mask],mask=mask,time=t)
except ValueError:
print("Warning: Quantity %s at time %s was not found for this model and is not \
being set."%(rid,t))
################################################################################
################################## Plotting ####################################
################################################################################
def profile_quant(self,quant,time=0,vmin=None,vmax=None,plot_grid=False,yaxis='z',xscale='linear',yscale='linear',return_artist=False,**kwargs):
'''
Method for plotting disk profile in a specified quantity (e.g. Dust temperature, HCO+ abundance, etc.).
ARGUMENTS:
quant - The quantity you want to see a disk profile of.
time - The timestep at which to produce the profile. Defaults to first timestep.
log - Plot profile on logspace colormap. Defaults to True.
ax - matplotlib.pyplot.axes object to plot profile onto. Default, make a new one.
vmin,vmax - Colormap upper and lower bounds. By default, they are determined from the
minimum and maximum values of the quantity you're plotting.
levels - Number of contour levels to use, or array of contour values.
plot_grid - Boolean whether or not to plot gridpoints on top of contours. Defaults to False.
RETURNS:
ax - The axes object with the contours plotted.
'''
quant = self.get_quant(quant,time)
R,Y = self.get_spatial(yaxis=yaxis)
if yaxis == 'z/r':
ylabel = 'Z/R'
elif yaxis == 'zone':
ylabel = 'Zone'
else:
ylabel = 'Z (AU)'
if vmin is None:
vmin = np.nanmin(quant[quant>0])
if vmax is None:
vmax = np.nanmax(quant[quant>0])
nx = len(list(set(self.phys['R'])))
ny = len(list(set(self.phys['shell'])))
if return_artist:
ax,cont = contour_points(R,Y,quant,nx=nx,ny=ny,vmin=vmin,vmax=vmax,return_artist=True,**kwargs)
else:
ax = contour_points(R,Y,quant,nx=nx,ny=ny,vmin=vmin,vmax=vmax,**kwargs)
ax.set_xscale(xscale)
ax.set_yscale(yscale)
if plot_grid:
ax.scatter(R,Y,s=1,color='black')
ax.set_xlabel('R (AU)')
ax.set_ylabel(ylabel)
if return_artist:
return ax,cont
return ax
def profile_reac(self,reac,time=0,**kwargs):
'''
Method for plotting disk profile in the rate of a specific reaction.
Same as profile_reac above, but it grabs the reaction string to use
as a title.
'''
ax = self.profile_quant(reac,time=time,**kwargs)
ax.set_title( self.get_reac_str(reac,fmt='latex') )
def profile_best_reacs(self,strmol,n,time=None,rank_R=None,**kwargs):
rates = self.rank_reacs(strmol,time,rank_R)
rates = rates[:n]
for rid,rate in rates:
print("Loading "+self.get_reac_str(rid))
self.load_reac(strmol,rid,times=time)
if not 'cmap' in kwargs:
if rate > 0:
cmap = 'Blues'
else:
cmap = 'Reds'
self.profile_reac(rid,time=time,cmap=cmap,**kwargs)
else:
self.profile_reac(rid,time=time,**kwargs)
def plot_best_reacs(self,strmol,n,R=None,zone=None,time=None,plot_mols=None,\
total=True,cmap_pro='Blues',cmap_des='Reds',load_n=None,\
ls_pro='--',ls_des='-.',ax=None):
if not R is None and not zone is None:
raise ValueError("Both R and zone cannot be given; give one or the other.")
#Create axes.
if ax is None:
fig,ax = plt.subplots()
ax.set_ylabel('Rate')
ax.set_yscale('log',nonposy='clip')
if not R is None:
ax.set_xlabel('Z (AU)')
if not zone is None:
ax.set_xlabel('R (AU)')
#Handle colormap nonsense.
if type(cmap_pro) == str:
cmap_pro = get_cmap(cmap_pro)
if type(cmap_des) == str:
cmap_des = get_cmap(cmap_des)
#Figure out how many reactions to load.
n = int(n)
if load_n is None:
load_n = n
#Rank rates. Take strongest n reactions.
rates = self.rank_reacs(strmol,time,R=R,zone=zone)
rates = rates[:load_n]
#Count number of reactions producing and destroying strmol.
n_pro = len(rates[:n][rates[:n][:,1] >= 0])
n_des = len(rates[:n][rates[:n][:,1] < 0])
pro = 0
des = 0
for rid,rate in rates:
if rate >= 0:
c = cmap_pro(1-pro/n_pro)
ls = ls_pro
pro += 1
else:
c = cmap_des(1-des/n_des)
ls = ls_des
des += 1
print("Loading %d: %s, %15.7E"%(int(rid),self.get_reac_str(rid),rate))
self.load_reac(strmol,rid,times=time,radii=R,zones=zone)
if not R is None:
x,rt = self.z_quant(rid,R=R,time=time)
if not zone is None:
x,rt = self.R_quant(rid,zone=zone,time=time)
if pro+des <= n:
#Only plot n rates.
ax.plot(x,rt,color=c,ls=ls,label="%d: %s"%(rid,self.get_reac_str(rid,fmt='latex')))
if total:
try:
rt_pro
rt_des
except NameError:
rt_pro = np.zeros_like(x)
rt_des = np.zeros_like(x)
rt[np.isnan(rt)] = 0
if rate >= 0:
rt_pro += rt
else:
rt_des += rt
if total:
ax.plot(x,rt_des,color='red',label='Destruction Rate')
ax.plot(x,rt_pro,color='blue',label='Prodution Rate')
if not plot_mols is None:
if type(plot_mols) == str:
plot_mols = [plot_mols]
sax = ax.twinx()
sax.set_yscale('log',nonposy='clip')
for mol in plot_mols:
self.load_mol(mol,times=time)
z,ab = self.z_quant(mol,R=R,time=time) #BUG for zone cuts this breaks, R=None.
sax.plot(z,ab,label=mol)
ax.legend(loc=0)
return ax
| [
"numpy.log10",
"numpy.hstack",
"numpy.argsort",
"numpy.array",
"numpy.nanmean",
"numpy.nanmin",
"numpy.genfromtxt",
"numpy.arange",
"os.path.exists",
"numpy.sort",
"numpy.asarray",
"numpy.exp",
"numpy.vstack",
"numpy.nanmax",
"numpy.argmin",
"pandas.DataFrame",
"matplotlib.cm.get_cma... | [((926, 947), 'os.path.exists', 'os.path.exists', (['direc'], {}), '(direc)\n', (940, 947), False, 'import os\n'), ((3161, 3172), 'pandas.DataFrame', 'DataFrame', ([], {}), '()\n', (3170, 3172), False, 'from pandas import DataFrame\n'), ((3630, 3664), 'numpy.genfromtxt', 'np.genfromtxt', (['self.inp'], {'dtype': 'str'}), '(self.inp, dtype=str)\n', (3643, 3664), True, 'import numpy as np\n'), ((5337, 5389), 'numpy.vstack', 'np.vstack', (["[((pr - tbl['R']) ** 2) for pr in phys_R]"], {}), "([((pr - tbl['R']) ** 2) for pr in phys_R])\n", (5346, 5389), True, 'import numpy as np\n'), ((5399, 5423), 'numpy.argmin', 'np.argmin', (['diffs'], {'axis': '(0)'}), '(diffs, axis=0)\n', (5408, 5423), True, 'import numpy as np\n'), ((8963, 8998), 'numpy.argmin', 'np.argmin', (['((self.times - time) ** 2)'], {}), '((self.times - time) ** 2)\n', (8972, 8998), True, 'import numpy as np\n'), ((9399, 9436), 'glob.glob', 'glob.glob', (["(self.environ + '1environ*')"], {}), "(self.environ + '1environ*')\n", (9408, 9436), False, 'import glob\n'), ((9680, 9692), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (9688, 9692), True, 'import numpy as np\n'), ((10521, 10532), 'pandas.DataFrame', 'DataFrame', ([], {}), '()\n', (10530, 10532), False, 'from pandas import DataFrame\n'), ((10553, 10568), 'numpy.unique', 'np.unique', (['spec'], {}), '(spec)\n', (10562, 10568), True, 'import numpy as np\n'), ((12760, 12771), 'pandas.DataFrame', 'DataFrame', ([], {}), '()\n', (12769, 12771), False, 'from pandas import DataFrame\n'), ((12791, 12827), 'glob.glob', 'glob.glob', (["(self.outdir + 'e1/r*.out')"], {}), "(self.outdir + 'e1/r*.out')\n", (12800, 12827), False, 'import glob\n'), ((12855, 12888), 'glob.glob', 'glob.glob', (["(limedir + '*time*.dat')"], {}), "(limedir + '*time*.dat')\n", (12864, 12888), False, 'import glob\n'), ((13329, 13341), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (13337, 13341), True, 'import numpy as np\n'), ((15715, 15742), 'numpy.zeros_like', 'np.zeros_like', (["savetbl['R']"], {}), "(savetbl['R'])\n", (15728, 15742), True, 'import numpy as np\n'), ((15832, 15919), 'numpy.genfromtxt', 'np.genfromtxt', (["(pkg_path[0] + '/pkg_files/imlup_gaia_v2_abrig_model_Tgas_SB_G04.txt')"], {}), "(pkg_path[0] +\n '/pkg_files/imlup_gaia_v2_abrig_model_Tgas_SB_G04.txt')\n", (15845, 15919), True, 'import numpy as np\n'), ((16077, 16137), 'numpy.array', 'np.array', (['(tmp_sort[:, 2] / (tmp_sort[:, 3] * tmp_sort[:, 7]))'], {}), '(tmp_sort[:, 2] / (tmp_sort[:, 3] * tmp_sort[:, 7]))\n', (16085, 16137), True, 'import numpy as np\n'), ((19233, 19290), 'glob.glob', 'glob.glob', (["(self.outdir + 'e1/rates/' + strmol + '_*.rout')"], {}), "(self.outdir + 'e1/rates/' + strmol + '_*.rout')\n", (19242, 19290), False, 'import glob\n'), ((28275, 28309), 'numpy.array', 'np.array', (["self.phys['zAU'][R_mask]"], {}), "(self.phys['zAU'][R_mask])\n", (28283, 28309), True, 'import numpy as np\n'), ((28371, 28394), 'numpy.array', 'np.array', (['quant[R_mask]'], {}), '(quant[R_mask])\n', (28379, 28394), True, 'import numpy as np\n'), ((28432, 28445), 'numpy.argsort', 'np.argsort', (['z'], {}), '(z)\n', (28442, 28445), True, 'import numpy as np\n'), ((29593, 29628), 'numpy.array', 'np.array', (["self.phys['R'][zone_mask]"], {}), "(self.phys['R'][zone_mask])\n", (29601, 29628), True, 'import numpy as np\n'), ((29690, 29716), 'numpy.array', 'np.array', (['quant[zone_mask]'], {}), '(quant[zone_mask])\n', (29698, 29716), True, 'import numpy as np\n'), ((29754, 29767), 'numpy.argsort', 'np.argsort', (['R'], {}), '(R)\n', (29764, 29767), True, 'import numpy as np\n'), ((30564, 30576), 'numpy.unique', 'np.unique', (['R'], {}), '(R)\n', (30573, 30576), True, 'import numpy as np\n'), ((30828, 30849), 'numpy.zeros_like', 'np.zeros_like', (['R_vals'], {}), '(R_vals)\n', (30841, 30849), True, 'import numpy as np\n'), ((32024, 32039), 'numpy.unique', 'np.unique', (['zone'], {}), '(zone)\n', (32033, 32039), True, 'import numpy as np\n'), ((33477, 33496), 'numpy.zeros_like', 'np.zeros_like', (['zone'], {}), '(zone)\n', (33490, 33496), True, 'import numpy as np\n'), ((34059, 34071), 'numpy.unique', 'np.unique', (['R'], {}), '(R)\n', (34068, 34071), True, 'import numpy as np\n'), ((34379, 34404), 'numpy.sort', 'np.sort', (['spec_all.columns'], {}), '(spec_all.columns)\n', (34386, 34404), True, 'import numpy as np\n'), ((34430, 34454), 'numpy.zeros_like', 'np.zeros_like', (['spec_vals'], {}), '(spec_vals)\n', (34443, 34454), True, 'import numpy as np\n'), ((2571, 2611), 'os.path.exists', 'os.path.exists', (["(self.outdir + 'environ/')"], {}), "(self.outdir + 'environ/')\n", (2585, 2611), False, 'import os\n'), ((2891, 2922), 'glob.glob', 'glob.glob', (["(self.outdir + '0io*')"], {}), "(self.outdir + '0io*')\n", (2900, 2922), False, 'import glob\n'), ((3720, 3751), 'os.path.exists', 'os.path.exists', (['(self.bsd + d[i])'], {}), '(self.bsd + d[i])\n', (3734, 3751), False, 'import os\n'), ((8661, 8720), 'numpy.argmin', 'np.argmin', (['[((self.times - t) ** 2) for t in times]'], {'axis': '(1)'}), '([((self.times - t) ** 2) for t in times], axis=1)\n', (8670, 8720), True, 'import numpy as np\n'), ((9792, 9820), 'numpy.loadtxt', 'np.loadtxt', (['path'], {'skiprows': '(3)'}), '(path, skiprows=3)\n', (9802, 9820), True, 'import numpy as np\n'), ((9836, 9858), 'numpy.hstack', 'np.hstack', (['[d, shells]'], {}), '([d, shells])\n', (9845, 9858), True, 'import numpy as np\n'), ((10644, 10655), 'pandas.DataFrame', 'DataFrame', ([], {}), '()\n', (10653, 10655), False, 'from pandas import DataFrame\n'), ((12503, 12526), 'os.path.exists', 'os.path.exists', (['limedir'], {}), '(limedir)\n', (12517, 12526), False, 'import os\n'), ((12984, 13003), 'numpy.array', 'np.array', (['limepaths'], {}), '(limepaths)\n', (12992, 13003), True, 'import numpy as np\n'), ((13004, 13020), 'numpy.argsort', 'np.argsort', (['tnum'], {}), '(tnum)\n', (13014, 13020), True, 'import numpy as np\n'), ((13471, 13487), 'numpy.loadtxt', 'np.loadtxt', (['path'], {}), '(path)\n', (13481, 13487), True, 'import numpy as np\n'), ((13506, 13537), 'pandas.DataFrame', 'DataFrame', (['dat'], {'columns': 'columns'}), '(dat, columns=columns)\n', (13515, 13537), False, 'from pandas import DataFrame\n'), ((15935, 15989), 'numpy.argmin', 'np.argmin', (['((tmp[:, 0] - R) ** 2 + (tmp[:, 1] - z) ** 2)'], {}), '((tmp[:, 0] - R) ** 2 + (tmp[:, 1] - z) ** 2)\n', (15944, 15989), True, 'import numpy as np\n'), ((16612, 16635), 'os.path.exists', 'os.path.exists', (['limedir'], {}), '(limedir)\n', (16626, 16635), False, 'import os\n'), ((16649, 16669), 'os.makedirs', 'os.makedirs', (['limedir'], {}), '(limedir)\n', (16660, 16669), False, 'import os\n'), ((17327, 17367), 'numpy.savetxt', 'np.savetxt', (['fname', 'savearr'], {'fmt': '"""%15.7E"""'}), "(fname, savearr, fmt='%15.7E')\n", (17337, 17367), True, 'import numpy as np\n'), ((19994, 20005), 'pandas.DataFrame', 'DataFrame', ([], {}), '()\n', (20003, 20005), False, 'from pandas import DataFrame\n'), ((22456, 22477), 'numpy.array', 'np.array', (['quant[mask]'], {}), '(quant[mask])\n', (22464, 22477), True, 'import numpy as np\n'), ((25226, 25266), 'numpy.array', 'np.array', (['self.abunds[quant[1:]].columns'], {}), '(self.abunds[quant[1:]].columns)\n', (25234, 25266), True, 'import numpy as np\n'), ((25523, 25559), 'numpy.array', 'np.array', (['self.abunds[quant].columns'], {}), '(self.abunds[quant].columns)\n', (25531, 25559), True, 'import numpy as np\n'), ((30626, 30644), 'numpy.argsort', 'np.argsort', (['R_vals'], {}), '(R_vals)\n', (30636, 30644), True, 'import numpy as np\n'), ((32070, 32091), 'numpy.argsort', 'np.argsort', (['zone_vals'], {}), '(zone_vals)\n', (32080, 32091), True, 'import numpy as np\n'), ((38315, 38349), 'numpy.array', 'np.array', (['self.abunds[mol].columns'], {}), '(self.abunds[mol].columns)\n', (38323, 38349), True, 'import numpy as np\n'), ((38732, 38765), 'numpy.array', 'np.array', (['self.rates[rid].columns'], {}), '(self.rates[rid].columns)\n', (38740, 38765), True, 'import numpy as np\n'), ((40756, 40783), 'numpy.nanmin', 'np.nanmin', (['quant[quant > 0]'], {}), '(quant[quant > 0])\n', (40765, 40783), True, 'import numpy as np\n'), ((40826, 40853), 'numpy.nanmax', 'np.nanmax', (['quant[quant > 0]'], {}), '(quant[quant > 0])\n', (40835, 40853), True, 'import numpy as np\n'), ((42822, 42836), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (42834, 42836), True, 'import matplotlib.pyplot as plt\n'), ((43131, 43149), 'matplotlib.cm.get_cmap', 'get_cmap', (['cmap_pro'], {}), '(cmap_pro)\n', (43139, 43149), False, 'from matplotlib.cm import get_cmap\n'), ((43207, 43225), 'matplotlib.cm.get_cmap', 'get_cmap', (['cmap_des'], {}), '(cmap_des)\n', (43215, 43225), False, 'from matplotlib.cm import get_cmap\n'), ((5983, 6008), 'numpy.array', 'np.array', (['tbl[merge_cols]'], {}), '(tbl[merge_cols])\n', (5991, 6008), True, 'import numpy as np\n'), ((6103, 6158), 'scipy.interpolate.griddata', 'griddata', (['points', 'values', 'phys_points'], {'method': '"""nearest"""'}), "(points, values, phys_points, method='nearest')\n", (6111, 6158), False, 'from scipy.interpolate import griddata\n'), ((7635, 7652), 'numpy.log10', 'np.log10', (['t_start'], {}), '(t_start)\n', (7643, 7652), True, 'import numpy as np\n'), ((7653, 7668), 'numpy.log10', 'np.log10', (['t_end'], {}), '(t_end)\n', (7661, 7668), True, 'import numpy as np\n'), ((9910, 9929), 'numpy.vstack', 'np.vstack', (['[dat, d]'], {}), '([dat, d])\n', (9919, 9929), True, 'import numpy as np\n'), ((16797, 16835), 'numpy.unique', 'np.unique', (['self.abunds[strmol].columns'], {}), '(self.abunds[strmol].columns)\n', (16806, 16835), True, 'import numpy as np\n'), ((16985, 17020), 'numpy.array', 'np.array', (['self.abunds[strmol][time]'], {}), '(self.abunds[strmol][time])\n', (16993, 17020), True, 'import numpy as np\n'), ((17276, 17293), 'numpy.array', 'np.array', (['savetbl'], {}), '(savetbl)\n', (17284, 17293), True, 'import numpy as np\n'), ((20178, 20189), 'pandas.DataFrame', 'DataFrame', ([], {}), '()\n', (20187, 20189), False, 'from pandas import DataFrame\n'), ((25295, 25325), 'numpy.argmin', 'np.argmin', (['((times - time) ** 2)'], {}), '((times - time) ** 2)\n', (25304, 25325), True, 'import numpy as np\n'), ((25588, 25618), 'numpy.argmin', 'np.argmin', (['((times - time) ** 2)'], {}), '((times - time) ** 2)\n', (25597, 25618), True, 'import numpy as np\n'), ((26192, 26230), 'numpy.sort', 'np.sort', (['self.radfields[field].columns'], {}), '(self.radfields[field].columns)\n', (26199, 26230), True, 'import numpy as np\n'), ((28146, 28163), 'numpy.abs', 'np.abs', (['(radii - R)'], {}), '(radii - R)\n', (28152, 28163), True, 'import numpy as np\n'), ((29451, 29471), 'numpy.abs', 'np.abs', (['(zones - zone)'], {}), '(zones - zone)\n', (29457, 29471), True, 'import numpy as np\n'), ((34109, 34127), 'numpy.abs', 'np.abs', (['(R_vals - r)'], {}), '(R_vals - r)\n', (34115, 34127), True, 'import numpy as np\n'), ((34239, 34257), 'numpy.abs', 'np.abs', (['(Z_vals - z)'], {}), '(Z_vals - z)\n', (34245, 34257), True, 'import numpy as np\n'), ((36278, 36314), 'numpy.array', 'np.array', (['self.abunds[quant].columns'], {}), '(self.abunds[quant].columns)\n', (36286, 36314), True, 'import numpy as np\n'), ((5927, 5960), 'numpy.vstack', 'np.vstack', (["[tbl['R'], tbl['zAU']]"], {}), "([tbl['R'], tbl['zAU']])\n", (5936, 5960), True, 'import numpy as np\n'), ((6035, 6079), 'numpy.array', 'np.array', (["[self.phys['R'], self.phys['zAU']]"], {}), "([self.phys['R'], self.phys['zAU']])\n", (6043, 6079), True, 'import numpy as np\n'), ((8465, 8480), 'numpy.array', 'np.array', (['times'], {}), '(times)\n', (8473, 8480), True, 'import numpy as np\n'), ((22264, 22283), 'numpy.ones_like', 'np.ones_like', (['quant'], {}), '(quant)\n', (22276, 22283), True, 'import numpy as np\n'), ((26619, 26657), 'numpy.sort', 'np.sort', (['self.radfields[field].columns'], {}), '(self.radfields[field].columns)\n', (26626, 26657), True, 'import numpy as np\n'), ((33412, 33438), 'numpy.exp', 'np.exp', (['(-Elower / (kb * T))'], {}), '(-Elower / (kb * T))\n', (33418, 33438), True, 'import numpy as np\n'), ((33438, 33466), 'numpy.exp', 'np.exp', (['(-h * freq / (kb * T))'], {}), '(-h * freq / (kb * T))\n', (33444, 33466), True, 'import numpy as np\n'), ((35873, 35901), 'numpy.ones_like', 'np.ones_like', (["self.phys['R']"], {}), "(self.phys['R'])\n", (35885, 35901), True, 'import numpy as np\n'), ((36625, 36660), 'numpy.array', 'np.array', (['self.rates[quant].columns'], {}), '(self.rates[quant].columns)\n', (36633, 36660), True, 'import numpy as np\n'), ((44693, 44705), 'numpy.isnan', 'np.isnan', (['rt'], {}), '(rt)\n', (44701, 44705), True, 'import numpy as np\n'), ((8541, 8560), 'numpy.asarray', 'np.asarray', (['[times]'], {}), '([times])\n', (8551, 8560), True, 'import numpy as np\n'), ((9720, 9738), 'numpy.arange', 'np.arange', (['nshells'], {}), '(nshells)\n', (9729, 9738), True, 'import numpy as np\n'), ((23191, 23226), 'numpy.array', 'np.array', (['self.rates[quant].columns'], {}), '(self.rates[quant].columns)\n', (23199, 23226), True, 'import numpy as np\n'), ((33315, 33339), 'numpy.exp', 'np.exp', (['(-E / (kb * temp))'], {}), '(-E / (kb * temp))\n', (33321, 33339), True, 'import numpy as np\n'), ((36480, 36510), 'numpy.argmin', 'np.argmin', (['((times - time) ** 2)'], {}), '((times - time) ** 2)\n', (36489, 36510), True, 'import numpy as np\n'), ((37154, 37194), 'numpy.array', 'np.array', (['self.abunds[quant[1:]].columns'], {}), '(self.abunds[quant[1:]].columns)\n', (37162, 37194), True, 'import numpy as np\n'), ((44611, 44627), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (44624, 44627), True, 'import numpy as np\n'), ((44657, 44673), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (44670, 44673), True, 'import numpy as np\n'), ((23255, 23285), 'numpy.argmin', 'np.argmin', (['((times - time) ** 2)'], {}), '((times - time) ** 2)\n', (23264, 23285), True, 'import numpy as np\n'), ((23345, 23362), 'numpy.nanmean', 'np.nanmean', (['quant'], {}), '(quant)\n', (23355, 23362), True, 'import numpy as np\n'), ((36825, 36855), 'numpy.argmin', 'np.argmin', (['((times - time) ** 2)'], {}), '((times - time) ** 2)\n', (36834, 36855), True, 'import numpy as np\n'), ((37368, 37398), 'numpy.argmin', 'np.argmin', (['((times - time) ** 2)'], {}), '((times - time) ** 2)\n', (37377, 37398), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
x,y = np.linspace(0,5,100),np.linspace(0,2,100)
X,Y = np.meshgrid(x,y)
U = X
V = Y*(1-Y)
speed = np.sqrt(U*U + V*V)
start = [[.3,.15], [0.3,1], [.3,1.5],[3,1.5]]
fig0, ax0 = plt.subplots()
strm = ax0.streamplot(x,y, U, V, color=(.75,.90,.93))
strmS = ax0.streamplot(x,y, U, V, start_points=start, color="crimson", linewidth=2)
plt.show() | [
"numpy.sqrt",
"numpy.linspace",
"numpy.meshgrid",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((106, 123), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (117, 123), True, 'import numpy as np\n'), ((149, 171), 'numpy.sqrt', 'np.sqrt', (['(U * U + V * V)'], {}), '(U * U + V * V)\n', (156, 171), True, 'import numpy as np\n'), ((228, 242), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (240, 242), True, 'import matplotlib.pyplot as plt\n'), ((383, 393), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (391, 393), True, 'import matplotlib.pyplot as plt\n'), ((58, 80), 'numpy.linspace', 'np.linspace', (['(0)', '(5)', '(100)'], {}), '(0, 5, 100)\n', (69, 80), True, 'import numpy as np\n'), ((79, 101), 'numpy.linspace', 'np.linspace', (['(0)', '(2)', '(100)'], {}), '(0, 2, 100)\n', (90, 101), True, 'import numpy as np\n')] |
# !pip install datasets transformers[sentencepiece]
# !apt install git-lfs
# !git config --global user.email "<EMAIL>"
# !git config --global user.name "<NAME>"
import torch
import seqeval
import numpy as np
from datasets import load_dataset
from datasets import load_metric
from transformers import AutoTokenizer
from transformers import DataCollatorForTokenClassification
from transformers import AutoModelForTokenClassification
from transformers import TrainingArguments
from transformers import Trainer
def main():
def check_cuda():
if torch.cuda.is_available():
torch.cuda.current_device()
torch.cuda.device(0)
torch.cuda.device_count()
torch.cuda.get_device_name(0)
else:
print("No GPU Available.")
check_cuda()
#load raw dataset
raw_datasets = load_dataset("conll2003")
#get features from ner tags
ner_feature = raw_datasets["train"].features["ner_tags"]
#get feature label names
label_names = ner_feature.feature.names
#load bert model
model_checkpoint = "bert-base-cased"
#load tokeniser
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
#align labels with tokens
def align_labels_with_tokens(labels, word_ids):
new_labels = []
current_word = None
for word_id in word_ids:
if word_id != current_word:
# Start of a new word!
current_word = word_id
label = -100 if word_id is None else labels[word_id]
new_labels.append(label)
elif word_id is None:
# Special token
new_labels.append(-100)
else:
# Same word as previous token
label = labels[word_id]
# If the label is B-XXX we change it to I-XXX
if label % 2 == 1:
label += 1
new_labels.append(label)
return new_labels
#tokenise and align labels
def tokenize_and_align_labels(examples):
tokenized_inputs = tokenizer(
examples["tokens"], truncation=True, is_split_into_words=True
)
all_labels = examples["ner_tags"]
new_labels = []
for i, labels in enumerate(all_labels):
word_ids = tokenized_inputs.word_ids(i)
new_labels.append(align_labels_with_tokens(labels, word_ids))
tokenized_inputs["labels"] = new_labels
return tokenized_inputs
#tokenise datasets
tokenized_datasets = raw_datasets.map(
tokenize_and_align_labels,
batched=True,
remove_columns=raw_datasets["train"].column_names,
)
#data collator
data_collator = DataCollatorForTokenClassification(tokenizer=tokenizer)
#metrics
metric = load_metric("seqeval")
#compute metrics
def compute_metrics(eval_preds):
logits, labels = eval_preds
predictions = np.argmax(logits, axis=-1)
# Remove ignored index (special tokens) and convert to labels
true_labels = [[label_names[l] for l in label if l != -100] for label in labels]
true_predictions = [
[label_names[p] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
all_metrics = metric.compute(predictions=true_predictions, references=true_labels)
return {
"precision": all_metrics["overall_precision"],
"recall": all_metrics["overall_recall"],
"f1": all_metrics["overall_f1"],
"accuracy": all_metrics["overall_accuracy"],
}
#label id mappings
id2label = {str(i): label for i, label in enumerate(label_names)}
label2id = {v: k for k, v in id2label.items()}
#define model
model = AutoModelForTokenClassification.from_pretrained(
model_checkpoint,
id2label=id2label,
label2id=label2id,
)
#define training arguments
args = TrainingArguments(
"bert-finetuned-ner",
evaluation_strategy="epoch",
save_strategy="epoch",
learning_rate=2e-5,
num_train_epochs=3,
weight_decay=0.01,
push_to_hub=True,
)
trainer = Trainer(
model=model,
args=args,
train_dataset=tokenized_datasets["train"],
eval_dataset=tokenized_datasets["validation"],
data_collator=data_collator,
compute_metrics=compute_metrics,
tokenizer=tokenizer,
)
trainer.train()
if __name__ == '__main__':
main()
| [
"torch.cuda.get_device_name",
"datasets.load_metric",
"transformers.TrainingArguments",
"torch.cuda.device",
"numpy.argmax",
"torch.cuda.device_count",
"transformers.AutoModelForTokenClassification.from_pretrained",
"torch.cuda.is_available",
"datasets.load_dataset",
"transformers.AutoTokenizer.fr... | [((849, 874), 'datasets.load_dataset', 'load_dataset', (['"""conll2003"""'], {}), "('conll2003')\n", (861, 874), False, 'from datasets import load_dataset\n'), ((1141, 1188), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['model_checkpoint'], {}), '(model_checkpoint)\n', (1170, 1188), False, 'from transformers import AutoTokenizer\n'), ((2740, 2795), 'transformers.DataCollatorForTokenClassification', 'DataCollatorForTokenClassification', ([], {'tokenizer': 'tokenizer'}), '(tokenizer=tokenizer)\n', (2774, 2795), False, 'from transformers import DataCollatorForTokenClassification\n'), ((2823, 2845), 'datasets.load_metric', 'load_metric', (['"""seqeval"""'], {}), "('seqeval')\n", (2834, 2845), False, 'from datasets import load_metric\n'), ((3838, 3946), 'transformers.AutoModelForTokenClassification.from_pretrained', 'AutoModelForTokenClassification.from_pretrained', (['model_checkpoint'], {'id2label': 'id2label', 'label2id': 'label2id'}), '(model_checkpoint, id2label=\n id2label, label2id=label2id)\n', (3885, 3946), False, 'from transformers import AutoModelForTokenClassification\n'), ((4016, 4193), 'transformers.TrainingArguments', 'TrainingArguments', (['"""bert-finetuned-ner"""'], {'evaluation_strategy': '"""epoch"""', 'save_strategy': '"""epoch"""', 'learning_rate': '(2e-05)', 'num_train_epochs': '(3)', 'weight_decay': '(0.01)', 'push_to_hub': '(True)'}), "('bert-finetuned-ner', evaluation_strategy='epoch',\n save_strategy='epoch', learning_rate=2e-05, num_train_epochs=3,\n weight_decay=0.01, push_to_hub=True)\n", (4033, 4193), False, 'from transformers import TrainingArguments\n'), ((4263, 4476), 'transformers.Trainer', 'Trainer', ([], {'model': 'model', 'args': 'args', 'train_dataset': "tokenized_datasets['train']", 'eval_dataset': "tokenized_datasets['validation']", 'data_collator': 'data_collator', 'compute_metrics': 'compute_metrics', 'tokenizer': 'tokenizer'}), "(model=model, args=args, train_dataset=tokenized_datasets['train'],\n eval_dataset=tokenized_datasets['validation'], data_collator=\n data_collator, compute_metrics=compute_metrics, tokenizer=tokenizer)\n", (4270, 4476), False, 'from transformers import Trainer\n'), ((556, 581), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (579, 581), False, 'import torch\n'), ((2963, 2989), 'numpy.argmax', 'np.argmax', (['logits'], {'axis': '(-1)'}), '(logits, axis=-1)\n', (2972, 2989), True, 'import numpy as np\n'), ((595, 622), 'torch.cuda.current_device', 'torch.cuda.current_device', ([], {}), '()\n', (620, 622), False, 'import torch\n'), ((635, 655), 'torch.cuda.device', 'torch.cuda.device', (['(0)'], {}), '(0)\n', (652, 655), False, 'import torch\n'), ((668, 693), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (691, 693), False, 'import torch\n'), ((706, 735), 'torch.cuda.get_device_name', 'torch.cuda.get_device_name', (['(0)'], {}), '(0)\n', (732, 735), False, 'import torch\n')] |
from os.path import join
import numpy as np
import matplotlib as mpl
# For headless environments
mpl.use('Agg') # NOQA
import matplotlib.pyplot as plt
PLOT_CURVES = 'plot_curves'
def plot_curves(run_path):
"""Plot the training and validation accuracy over epochs.
# Arguments
run_path: the path to the files for a run
"""
log_path = join(run_path, 'log.txt')
log = np.genfromtxt(log_path, delimiter=',', skip_header=1)
epochs = log[:, 0]
acc = log[:, 1]
val_acc = log[:, 3]
plt.figure()
plt.title('Training Log')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.grid()
plt.plot(epochs, acc, '-', label='Training')
plt.plot(epochs, val_acc, '--', label='Validation')
plt.legend(loc='best')
accuracy_path = join(run_path, 'accuracy.pdf')
plt.savefig(accuracy_path, format='pdf', dpi=300)
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"matplotlib.use",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"os.path.join",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"numpy.genfromtxt",
"matplotlib.pyplot.legend"
] | [((98, 112), 'matplotlib.use', 'mpl.use', (['"""Agg"""'], {}), "('Agg')\n", (105, 112), True, 'import matplotlib as mpl\n'), ((362, 387), 'os.path.join', 'join', (['run_path', '"""log.txt"""'], {}), "(run_path, 'log.txt')\n", (366, 387), False, 'from os.path import join\n'), ((399, 452), 'numpy.genfromtxt', 'np.genfromtxt', (['log_path'], {'delimiter': '""","""', 'skip_header': '(1)'}), "(log_path, delimiter=',', skip_header=1)\n", (412, 452), True, 'import numpy as np\n'), ((525, 537), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (535, 537), True, 'import matplotlib.pyplot as plt\n'), ((542, 567), 'matplotlib.pyplot.title', 'plt.title', (['"""Training Log"""'], {}), "('Training Log')\n", (551, 567), True, 'import matplotlib.pyplot as plt\n'), ((572, 592), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (582, 592), True, 'import matplotlib.pyplot as plt\n'), ((597, 619), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (607, 619), True, 'import matplotlib.pyplot as plt\n'), ((625, 635), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (633, 635), True, 'import matplotlib.pyplot as plt\n'), ((640, 684), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'acc', '"""-"""'], {'label': '"""Training"""'}), "(epochs, acc, '-', label='Training')\n", (648, 684), True, 'import matplotlib.pyplot as plt\n'), ((689, 740), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'val_acc', '"""--"""'], {'label': '"""Validation"""'}), "(epochs, val_acc, '--', label='Validation')\n", (697, 740), True, 'import matplotlib.pyplot as plt\n'), ((746, 768), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (756, 768), True, 'import matplotlib.pyplot as plt\n'), ((789, 819), 'os.path.join', 'join', (['run_path', '"""accuracy.pdf"""'], {}), "(run_path, 'accuracy.pdf')\n", (793, 819), False, 'from os.path import join\n'), ((824, 873), 'matplotlib.pyplot.savefig', 'plt.savefig', (['accuracy_path'], {'format': '"""pdf"""', 'dpi': '(300)'}), "(accuracy_path, format='pdf', dpi=300)\n", (835, 873), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
from PIL import Image
import skimage
import skimage.transform
import scipy.io as io
import matplotlib.pyplot as plt
import utils.common_utils as cu
import scipy.io
def load_data(path, f):
img = np.array(Image.open(path))
img = skimage.transform.resize(img, (img.shape[0]//f,img.shape[1]//f), anti_aliasing = True)
img /= np.max(img)
return img
def load_mask(path, target_shape):
shutter = io.loadmat(path)['shutter_indicator']
shutter = shutter[shutter.shape[0]//4:-shutter.shape[0]//4,shutter.shape[1]//4:-shutter.shape[1]//4,:]
shutter = skimage.transform.resize(shutter, (target_shape[0],target_shape[1]), anti_aliasing = True)
return shutter
def load_simulated():
downsampling_factor = 16
mask_np = scipy.io.loadmat('data/single_shot_video/shutter_ds.mat')['shutter_indicator'][1:-2,...]
meas_np = scipy.io.loadmat('data/single_shot_video/meas_simulated.mat')['im']
mask_np = mask_np[meas_np.shape[0]//2:-meas_np.shape[0]//2, meas_np.shape[1]//2:-meas_np.shape[1]//2]
psf_np = load_data('data/single_shot_video/psf.tif',downsampling_factor)[1:][...,1]
return meas_np, psf_np, mask_np
def preplot(recons):
recons = cu.ts_to_np(recons).transpose(1,2,0)
recons /= np.max(recons)
return recons[recons.shape[0]//4:-recons.shape[0]//4,recons.shape[1]//4:-recons.shape[1]//4]
def preplot2(recons):
recons = cu.ts_to_np(recons).transpose(2,3,0,1)
recons /= np.max(recons)
recons = np.clip(recons, 0,1)
return recons
def plot(channel, recons):
recons = preplot(recons)
#n = random.randint(0,recons.shape[-1]-1)
#frame = recons[:,:,n]
plt.imshow(np.mean(recons,-1), cmap='gray')
plt.title('Reconstruction: channel %d mean projection'%(channel))
plt.show()
def plot3d(recons):
recons = recons[0].detach().cpu().numpy().transpose(2,3,0,1)
#n = random.randint(0,recons.shape[-1]-1)
#frame = recons[:,:,n]
plt.imshow(np.mean(recons,-1))
plt.title('Reconstruction: mean projection')
plt.show()
def plot_slider(x):
plt.title('Reconstruction: frame %d'%(x))
plt.axis('off')
plt.imshow(video[:,:,:,x])
return x | [
"numpy.clip",
"matplotlib.pyplot.imshow",
"numpy.mean",
"PIL.Image.open",
"utils.common_utils.ts_to_np",
"scipy.io.loadmat",
"numpy.max",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.title",
"skimage.transform.resize",
"matplotlib.pyplot.show"
] | [((255, 348), 'skimage.transform.resize', 'skimage.transform.resize', (['img', '(img.shape[0] // f, img.shape[1] // f)'], {'anti_aliasing': '(True)'}), '(img, (img.shape[0] // f, img.shape[1] // f),\n anti_aliasing=True)\n', (279, 348), False, 'import skimage\n'), ((353, 364), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (359, 364), True, 'import numpy as np\n'), ((588, 681), 'skimage.transform.resize', 'skimage.transform.resize', (['shutter', '(target_shape[0], target_shape[1])'], {'anti_aliasing': '(True)'}), '(shutter, (target_shape[0], target_shape[1]),\n anti_aliasing=True)\n', (612, 681), False, 'import skimage\n'), ((1251, 1265), 'numpy.max', 'np.max', (['recons'], {}), '(recons)\n', (1257, 1265), True, 'import numpy as np\n'), ((1452, 1466), 'numpy.max', 'np.max', (['recons'], {}), '(recons)\n', (1458, 1466), True, 'import numpy as np\n'), ((1480, 1501), 'numpy.clip', 'np.clip', (['recons', '(0)', '(1)'], {}), '(recons, 0, 1)\n', (1487, 1501), True, 'import numpy as np\n'), ((1701, 1766), 'matplotlib.pyplot.title', 'plt.title', (["('Reconstruction: channel %d mean projection' % channel)"], {}), "('Reconstruction: channel %d mean projection' % channel)\n", (1710, 1766), True, 'import matplotlib.pyplot as plt\n'), ((1771, 1781), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1779, 1781), True, 'import matplotlib.pyplot as plt\n'), ((1979, 2023), 'matplotlib.pyplot.title', 'plt.title', (['"""Reconstruction: mean projection"""'], {}), "('Reconstruction: mean projection')\n", (1988, 2023), True, 'import matplotlib.pyplot as plt\n'), ((2028, 2038), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2036, 2038), True, 'import matplotlib.pyplot as plt\n'), ((2063, 2104), 'matplotlib.pyplot.title', 'plt.title', (["('Reconstruction: frame %d' % x)"], {}), "('Reconstruction: frame %d' % x)\n", (2072, 2104), True, 'import matplotlib.pyplot as plt\n'), ((2109, 2124), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2117, 2124), True, 'import matplotlib.pyplot as plt\n'), ((2129, 2158), 'matplotlib.pyplot.imshow', 'plt.imshow', (['video[:, :, :, x]'], {}), '(video[:, :, :, x])\n', (2139, 2158), True, 'import matplotlib.pyplot as plt\n'), ((227, 243), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (237, 243), False, 'from PIL import Image\n'), ((429, 445), 'scipy.io.loadmat', 'io.loadmat', (['path'], {}), '(path)\n', (439, 445), True, 'import scipy.io as io\n'), ((1664, 1683), 'numpy.mean', 'np.mean', (['recons', '(-1)'], {}), '(recons, -1)\n', (1671, 1683), True, 'import numpy as np\n'), ((1955, 1974), 'numpy.mean', 'np.mean', (['recons', '(-1)'], {}), '(recons, -1)\n', (1962, 1974), True, 'import numpy as np\n'), ((1200, 1219), 'utils.common_utils.ts_to_np', 'cu.ts_to_np', (['recons'], {}), '(recons)\n', (1211, 1219), True, 'import utils.common_utils as cu\n'), ((1399, 1418), 'utils.common_utils.ts_to_np', 'cu.ts_to_np', (['recons'], {}), '(recons)\n', (1410, 1418), True, 'import utils.common_utils as cu\n')] |
"""
Packing module
==============
:synopsis: Prepares packed spheres for tessellation.
.. moduleauthor:: <NAME> <<EMAIL>>
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from __future__ import division, print_function
import struct
import os
import time
import random
import subprocess
import numpy as np
import pandas as pd
from scipy.stats import lognorm
import matplotlib.pyplot as plt
import spack
def simple_packing(diam):
"""Simple and fast algorithm for packing.
Often leads to overlapping spheres. Can lead to infinite loop, thus it
raises Exception after 10 s. Use of this algorithm at this stage is
discouraged.
Args:
diam (ndarray): array of sphere diameters
Returns:
DataFrame: center positions and diameters of spheres
Raises:
Exception: when running for more than 10 s
"""
number_of_cells = len(diam)
rads = diam / 2
rads.sort()
vol = sum((2 * rads)**3)
vol = vol * 1.40
lch = vol**(1.00 / 3.00)
centers = np.zeros((number_of_cells, 3))
finished = False
while not finished:
j = -1
timeout = time.time() + 10
while number_of_cells >= j:
if time.time() > timeout:
raise Exception('Timed out!')
j = j + 1
if j == number_of_cells:
finished = True
break
# pick new coordinates
pick_x = lch * random.random()
pick_y = lch * random.random()
pick_z = lch * random.random()
while (rads[j] < pick_x <= lch - rads[j] and
rads[j] < pick_y <= lch - rads[j] and
rads[j] < pick_z <= lch - rads[j]):
pick_x = lch * random.random()
pick_y = lch * random.random()
pick_z = lch * random.random()
centers[j][0] = pick_x
centers[j][1] = pick_y
centers[j][2] = pick_z
# new sphere must not overlap with already existing sphere
if j > 0:
for i in range(0, j):
if ((((((pick_x - centers[i][0])**2) +
((pick_y - centers[i][1])**2) +
((pick_z - centers[i][2])**2))**0.5) -
(rads[j] + rads[i])) < 0) and i != j:
centers[j][0], centers[j][0], centers[j][0] = 0, 0, 0
j = j - 1
break
dtf = pd.DataFrame(centers, columns=('x', 'y', 'z'))
dtf['d'] = 2 * rads
return dtf
def create_input(npart, domain=1.0):
"""Create input file for packing-generation program.
Function creates ``generation.conf`` file with some default inputs.
Args:
npart (int): number of spheres
domain (float, optional): size of domain
"""
txt = """Particles count: {0}
Packing size: {1} {1} {1}
Generation start: 1
Seed: 341
Steps to write: 1000
Boundaries mode: 1
Contraction rate: 1.328910e-005
""".format(npart, domain)
with open('generation.conf', 'w') as fout:
fout.write(txt)
def make_csd(shape, scale, npart):
"""Create cell size distribution and save it to file.
Log-normal distribution from scipy is used. Creates ``diameters.txt`` file
with sphere diameters.
Args:
shape (float): shape size parameter of log-normal distribution
scale (float): scale size parameter of log-normal distribution
npart (int): number of spheres
Returns:
ndarray: array of sphere diameters
"""
if shape == 0:
diam = [scale + 0 * x for x in range(npart)]
else:
diam = lognorm.rvs(shape, scale=scale, size=npart)
with open('diameters.txt', 'w') as fout:
for rad in diam:
fout.write('{0}\n'.format(rad))
return diam
def save_csd(fname, diam, shape, scale, show_plot=False):
"""Save cell size distribution plot.
Creates files ``*.Packing_histogram.png`` and ``*.Packing_histogram.pdf``
with cell size distribution histogram and continual probability density
function.
Args:
fname (str): base filename
diam (ndarray): array of sphere diameters
shape (float): shape size parameter of log-normal distribution
scale (float): scale size parameter of log-normal distribution
show_plot (bool, optional): create window with plot
"""
if shape == 0:
xpos = np.linspace(scale / 2, scale * 2, 100)
else:
xpos = np.linspace(lognorm.ppf(0.01, shape, scale=scale),
lognorm.ppf(0.99, shape, scale=scale), 100)
plt.figure(figsize=(12, 8))
plt.rcParams.update({'font.size': 16})
plt.plot(xpos, lognorm.pdf(xpos, shape, scale=scale), lw=3, label='input')
plt.hist(diam, density=True, label='spheres')
plt.grid()
plt.xlabel('Size')
plt.ylabel('Probability density function')
plt.legend()
plt.savefig(fname + 'Packing_histogram.png', dpi=300)
plt.savefig(fname + 'Packing_histogram.pdf')
if show_plot:
plt.show()
def read_results():
"""Reads results of packing algorithm.
Packing results are read from ``packing.nfo`` and ``packing.xyzd`` files.
Returns:
DataFrame: center positions and diameters of spheres
"""
with open("packing.nfo", "r") as fin:
fin.readline()
fin.readline()
por_theory = float(fin.readline().split()[2])
por_final = float(fin.readline().split()[2])
print('Theoretical porosity:', por_theory)
print('Final porosity:', por_final)
data = pd.DataFrame(columns=('x', 'y', 'z', 'd'))
with open("packing.xyzd", "rb") as fin:
btxt = fin.read()
txt = list(struct.unpack("<" + "d" * (len(btxt) // 8), btxt))
data = pd.DataFrame(np.reshape(txt, (-1, 4)),
columns=('x', 'y', 'z', 'd'))
data['d'] = data['d'] * ((1 - por_final) / (1 - por_theory))**(1 / 3)
return data
def render_packing(fname, data, domain=1.0, pixels=1000):
"""Save picture of packed domain.
Uses `spack <https://pyspack.readthedocs.io/en/latest/>`_. Creates
``*Packing.png`` file.
Args:
fname (str): base filename
data (DataFrame): center positions and diameters of spheres
domain (float, optional): size of domain
pixels (int, optional): picture resolution
"""
pack = spack.Packing(data[['x', 'y', 'z']], data['d'], L=domain)
print(pack.contacts())
scene = pack.scene(rot=np.pi / 4, camera_height=0.5,
camera_dist=2.5e1, angle=4, cmap='autumn',
floater_color=None)
scene.render(fname + 'Packing.png', width=pixels,
height=pixels, antialiasing=0.0001)
def generate_structure(flag):
"""Runs the packing algorithm.
``PackingGeneration.exe`` must exist. ``generation.conf`` must exist.
Args:
flag (str): argument to be passed to packing-generation program
"""
if os.path.isfile("packing.nfo"):
os.remove("packing.nfo")
subprocess.Popen(['PackingGeneration.exe', flag]).wait()
def clean_files():
"""Delete unnecessary files."""
flist = [
'contraction_energies.txt',
'diameters.txt',
'generation.conf',
'packing_init.xyzd',
'packing.nfo',
'packing_prev.xyzd',
'packing.xyzd',
]
for fil in flist:
if os.path.exists(fil):
os.remove(fil)
def pack_spheres(fname, shape, scale, number_of_cells, algorithm, maxit,
render, clean):
"""Packs spheres into periodic domain.
Creates file ending ``Packing.csv`` with sphere centers and radii. Simple
model is implemented directly, other algorithms use Vasili Baranov's `code
<https://github.com/VasiliBaranov/packing-generation>`_.
Args:
fname (str): base filename
shape (float): shape size parameter of log-normal distribution
scale (float): scale size parameter of log-normal distribution
number_of_cells (int): number of spheres
algorithm (str): name of packing algorithm
maxit (int): number of tries for packing algorithm
render (bool): save picture of packing if True
clean (bool): delete redundant files if True
Raises:
Exception: when maximum number of iterations was reached
"""
if algorithm == 'simple':
diam = make_csd(shape, scale, number_of_cells)
data = simple_packing(diam)
else:
create_input(number_of_cells)
for i in range(maxit):
print('Iteration: {}'.format(i + 1))
diam = make_csd(shape, scale, number_of_cells)
generate_structure('-' + algorithm)
if os.path.isfile("packing.nfo"):
break
if not os.path.isfile("packing.nfo"):
raise Exception(
'Packing algorithm failed. ' +
'Try to change number of particles or size distribution.')
data = read_results()
save_csd(fname, diam, shape, scale)
data.to_csv(fname + 'Packing.csv', index=None)
if render:
render_packing(fname, data)
if clean:
clean_files()
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.ylabel",
"scipy.stats.lognorm.rvs",
"os.remove",
"os.path.exists",
"numpy.reshape",
"subprocess.Popen",
"matplotlib.pyplot.xlabel",
"scipy.stats.lognorm.ppf",
"numpy.linspace",
"pandas.DataFrame",
"matplotlib.pyplot.savef... | [((1045, 1075), 'numpy.zeros', 'np.zeros', (['(number_of_cells, 3)'], {}), '((number_of_cells, 3))\n', (1053, 1075), True, 'import numpy as np\n'), ((2548, 2594), 'pandas.DataFrame', 'pd.DataFrame', (['centers'], {'columns': "('x', 'y', 'z')"}), "(centers, columns=('x', 'y', 'z'))\n", (2560, 2594), True, 'import pandas as pd\n'), ((4772, 4799), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (4782, 4799), True, 'import matplotlib.pyplot as plt\n'), ((4805, 4843), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 16}"], {}), "({'font.size': 16})\n", (4824, 4843), True, 'import matplotlib.pyplot as plt\n'), ((4929, 4974), 'matplotlib.pyplot.hist', 'plt.hist', (['diam'], {'density': '(True)', 'label': '"""spheres"""'}), "(diam, density=True, label='spheres')\n", (4937, 4974), True, 'import matplotlib.pyplot as plt\n'), ((4980, 4990), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (4988, 4990), True, 'import matplotlib.pyplot as plt\n'), ((4996, 5014), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Size"""'], {}), "('Size')\n", (5006, 5014), True, 'import matplotlib.pyplot as plt\n'), ((5020, 5062), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Probability density function"""'], {}), "('Probability density function')\n", (5030, 5062), True, 'import matplotlib.pyplot as plt\n'), ((5068, 5080), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5078, 5080), True, 'import matplotlib.pyplot as plt\n'), ((5086, 5139), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fname + 'Packing_histogram.png')"], {'dpi': '(300)'}), "(fname + 'Packing_histogram.png', dpi=300)\n", (5097, 5139), True, 'import matplotlib.pyplot as plt\n'), ((5145, 5189), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(fname + 'Packing_histogram.pdf')"], {}), "(fname + 'Packing_histogram.pdf')\n", (5156, 5189), True, 'import matplotlib.pyplot as plt\n'), ((5775, 5817), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "('x', 'y', 'z', 'd')"}), "(columns=('x', 'y', 'z', 'd'))\n", (5787, 5817), True, 'import pandas as pd\n'), ((6612, 6669), 'spack.Packing', 'spack.Packing', (["data[['x', 'y', 'z']]", "data['d']"], {'L': 'domain'}), "(data[['x', 'y', 'z']], data['d'], L=domain)\n", (6625, 6669), False, 'import spack\n'), ((7227, 7256), 'os.path.isfile', 'os.path.isfile', (['"""packing.nfo"""'], {}), "('packing.nfo')\n", (7241, 7256), False, 'import os\n'), ((3772, 3815), 'scipy.stats.lognorm.rvs', 'lognorm.rvs', (['shape'], {'scale': 'scale', 'size': 'npart'}), '(shape, scale=scale, size=npart)\n', (3783, 3815), False, 'from scipy.stats import lognorm\n'), ((4578, 4616), 'numpy.linspace', 'np.linspace', (['(scale / 2)', '(scale * 2)', '(100)'], {}), '(scale / 2, scale * 2, 100)\n', (4589, 4616), True, 'import numpy as np\n'), ((4864, 4901), 'scipy.stats.lognorm.pdf', 'lognorm.pdf', (['xpos', 'shape'], {'scale': 'scale'}), '(xpos, shape, scale=scale)\n', (4875, 4901), False, 'from scipy.stats import lognorm\n'), ((5218, 5228), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5226, 5228), True, 'import matplotlib.pyplot as plt\n'), ((7267, 7291), 'os.remove', 'os.remove', (['"""packing.nfo"""'], {}), "('packing.nfo')\n", (7276, 7291), False, 'import os\n'), ((7672, 7691), 'os.path.exists', 'os.path.exists', (['fil'], {}), '(fil)\n', (7686, 7691), False, 'import os\n'), ((1158, 1169), 'time.time', 'time.time', ([], {}), '()\n', (1167, 1169), False, 'import time\n'), ((4656, 4693), 'scipy.stats.lognorm.ppf', 'lognorm.ppf', (['(0.01)', 'shape'], {'scale': 'scale'}), '(0.01, shape, scale=scale)\n', (4667, 4693), False, 'from scipy.stats import lognorm\n'), ((4723, 4760), 'scipy.stats.lognorm.ppf', 'lognorm.ppf', (['(0.99)', 'shape'], {'scale': 'scale'}), '(0.99, shape, scale=scale)\n', (4734, 4760), False, 'from scipy.stats import lognorm\n'), ((5990, 6014), 'numpy.reshape', 'np.reshape', (['txt', '(-1, 4)'], {}), '(txt, (-1, 4))\n', (6000, 6014), True, 'import numpy as np\n'), ((7297, 7346), 'subprocess.Popen', 'subprocess.Popen', (["['PackingGeneration.exe', flag]"], {}), "(['PackingGeneration.exe', flag])\n", (7313, 7346), False, 'import subprocess\n'), ((7706, 7720), 'os.remove', 'os.remove', (['fil'], {}), '(fil)\n', (7715, 7720), False, 'import os\n'), ((9036, 9065), 'os.path.isfile', 'os.path.isfile', (['"""packing.nfo"""'], {}), "('packing.nfo')\n", (9050, 9065), False, 'import os\n'), ((9106, 9135), 'os.path.isfile', 'os.path.isfile', (['"""packing.nfo"""'], {}), "('packing.nfo')\n", (9120, 9135), False, 'import os\n'), ((1228, 1239), 'time.time', 'time.time', ([], {}), '()\n', (1237, 1239), False, 'import time\n'), ((1479, 1494), 'random.random', 'random.random', ([], {}), '()\n', (1492, 1494), False, 'import random\n'), ((1523, 1538), 'random.random', 'random.random', ([], {}), '()\n', (1536, 1538), False, 'import random\n'), ((1567, 1582), 'random.random', 'random.random', ([], {}), '()\n', (1580, 1582), False, 'import random\n'), ((1787, 1802), 'random.random', 'random.random', ([], {}), '()\n', (1800, 1802), False, 'import random\n'), ((1835, 1850), 'random.random', 'random.random', ([], {}), '()\n', (1848, 1850), False, 'import random\n'), ((1883, 1898), 'random.random', 'random.random', ([], {}), '()\n', (1896, 1898), False, 'import random\n')] |
import torch
import torch.nn as nn
import numpy as np
import torch.distributions as TD
import scipy
import scipy.linalg
from copy import deepcopy
from multipledispatch import dispatch
from collections import Iterable
import sdepy
from .em import batchItoEuler
from .em_proxrec import torchBatchItoEulerProxrec
class OU_distrib_modeler:
'''
This class models distribution X(t) of OrnsteinUhlenbeck process
dX(t) = - \grad \frac{1}{2}(x - b)^T A (x - b) dt + \sqrt{2 \beta^{-1}} d W(t)
b is n-dim vector
A is (n \cross n) invertible symmetric matrix
\beta is a positive scalar parameters
W(t) is standart n-dim Wiener process
'''
def _U_rotate(self, M):
if len(M.shape) == 1:
return self.U @ np.diag(M) @ self.U.conj().T
return self.U @ M @ self.U.conj().T
def __init__(self, A, b, beta):
if isinstance(A, torch.Tensor):
A = A.detach().cpu()
if isinstance(b, torch.Tensor):
b = b.detach().cpu()
self.A = np.asarray(A)
self.b = np.asarray(b)
self.beta = beta
assert self.A.shape[0] == self.A.shape[1], 'matrix A must be square'
# assert np.allclose(self.A.T, self.A, 1e-13), 'matrix A must be symmetric'
self.A = 0.5*(self.A + self.A.T)
assert self.b.shape[0] == self.A.shape[0], 'b an A dimensions must coincide'
assert np.linalg.matrix_rank(self.A, tol=1e-6) == self.A.shape[0], 'matrix A must have full rank'
self.theta = self.A
T, U = scipy.linalg.schur(self.theta)
# assert np.allclose(T, np.diag(np.diagonal(T)), 1e-13)
self.T = np.diagonal(T)
self.U = U
def _get_add_params(self, t):
_scale_param = self._U_rotate(np.exp(-self.T * t))
_add_param = (np.eye(self.b.shape[0]) - _scale_param).dot(self.b)
return _scale_param, _add_param
def _get_var_param(self, t):
return 2. * (1./self.beta) * self._U_rotate((1. - np.exp(- 2.*self.T * t))/(2. * self.T))
def get_distrib_params(self, X_0, t, dtype=torch.float32, device='cpu'):
X_0 = np.asarray(X_0)
_scale, _add = self._get_add_params(t)
mean = _scale.dot(X_0) + _add
# e_min_th_t = self._U_rotate(np.exp(-self.T * t))
# mean = e_min_th_t.dot(X_0) + (np.eye(self.b.shape[0]) - e_min_th_t).dot(self.b)
# var = 2. * (1./self.beta) * self._U_rotate((1. - np.exp(- 2.*self.T * t))/(2. * self.T))
var = self._get_var_param(t)
trc_mean = torch.tensor(mean, dtype=dtype).to(device)
trc_var = torch.tensor(var, dtype=dtype).to(device)
return trc_mean, trc_var
def get_distrib(self, X_0, t, dtype=torch.float32, device='cpu'):
mean, var = self.get_distrib_params(X_0, t, dtype=dtype, device=device)
return TD.MultivariateNormal(mean, var)
def get_normal_distrib_params(mvnormal_distribution):
assert isinstance(mvnormal_distribution, (TD.Normal, TD.MultivariateNormal))
mean = mvnormal_distribution.mean
var = 0.
if isinstance(mvnormal_distribution, TD.MultivariateNormal):
var = mvnormal_distribution.covariance_matrix
else:
var = mvnormal_distribution.scale ** 2
if var.size(0) == 1:
var = var.view(1, 1)
else:
var = torch.diag(var)
return mean, var
def create_ou_distrib_modeler(mvnormal_distribution, beta=1.0):
mean, var = get_normal_distrib_params(mvnormal_distribution)
var *= beta
return OU_distrib_modeler(torch.inverse(var), mean, beta)
class OU_tDeterministic(TD.MultivariateNormal):
@staticmethod
def _get_params(X_0, ou_distrib_modeler, t):
return ou_distrib_modeler.get_distrib_params(
X_0, t, dtype=X_0.dtype, device=X_0.device)
def __init__(self, X_0, ou_distrib_modeler, t):
super().__init__(*self._get_params(X_0, ou_distrib_modeler, t))
class OU_tNormal(TD.MultivariateNormal):
@staticmethod
def _get_params(init_distrib, ou_distrib_modeler, t):
assert isinstance(init_distrib, (TD.Normal, TD.MultivariateNormal))
b, A = get_normal_distrib_params(init_distrib)
i_A = torch.inverse(A)
dtype = b.dtype
device = b.device
F, g = ou_distrib_modeler._get_add_params(t)
F, g = torch.tensor(F, dtype=dtype).to(device), torch.tensor(g, dtype=dtype).to(device)
Sigma = torch.tensor(ou_distrib_modeler._get_var_param(t), dtype=dtype).to(device)
i_Sigma = torch.inverse(Sigma)
i_Xi = F.T @ i_Sigma @ F + i_A
Xi = torch.inverse(i_Xi)
Psi = i_Sigma - i_Sigma @ F @ Xi @ F.T @ i_Sigma
i_Psi = torch.inverse(Psi)
phi = i_Sigma @ F @ Xi @ i_A @ b
_mean = g + i_Psi @ phi
return _mean, i_Psi
def __init__(self, init_distrib, ou_distrib_modeler, t):
super().__init__(*self._get_params(init_distrib, ou_distrib_modeler, t))
class OU_tMixtureNormal(TD.MixtureSameFamily):
@dispatch(TD.Distribution, OU_distrib_modeler, object)
def __init__(self, init_distrib, ou_distrib_modeler, t):
assert isinstance(init_distrib, TD.MixtureSameFamily)
mixture = init_distrib.mixture_distribution
comp = init_distrib.component_distribution
assert isinstance(comp, TD.MultivariateNormal)
means = comp.loc
vars = comp.covariance_matrix
return self.__init__(mixture, means, vars, ou_distrib_modeler, t)
@dispatch(TD.Distribution, Iterable, Iterable, OU_distrib_modeler, object)
def __init__(self, mixture_distrib, means, vars, ou_distrib_modeler, t):
assert len(means) == len(vars)
f_means = []
f_vars = []
for i in range(len(means)):
distrib = TD.MultivariateNormal(means[i], vars[i])
f_mean, f_var = OU_tNormal._get_params(distrib, ou_distrib_modeler, t)
f_means.append(f_mean)
f_vars.append(f_var)
f_distrib = TD.MultivariateNormal(torch.stack(f_means), torch.stack(f_vars))
super().__init__(mixture_distrib, f_distrib)
def create_em_proxrec_samples(
x0, pdf0, final_distrib, t_fin, t_stp,
beta=1., verbose=False, **proxrec_params):
'''
creates diffusion samples along with pdf estimate using https://arxiv.org/pdf/1809.10844.pdf
'''
assert isinstance(final_distrib, (TD.Normal, TD.MultivariateNormal))
fin_mean, fin_var = get_normal_distrib_params(final_distrib)
device = x0.device
dtype = 'float32' if x0.dtype == torch.float32 else 'float64'
targ_grad_potential = get_ou_potential_func(
fin_mean, fin_var, dim_first=False, beta=beta,
grad=True, _type='torch', dtype=dtype, device=device)
targ_potential = get_ou_potential_func(
fin_mean, fin_var, dim_first=False, beta=beta,
grad=False, _type='torch', dtype=dtype, device=device)
assert len(x0.shape) == 2
x_fin, pdf_fin = torchBatchItoEulerProxrec(
targ_potential, x0, pdf0, t_stp, t_fin, beta=beta, verbose=verbose,
grad_pot_func=targ_grad_potential, **proxrec_params)
return x_fin, pdf_fin
@dispatch(np.ndarray, TD.Distribution, float, float, int)
def create_em_samples(
x0, final_distrib,
t_fin, t_stp, n_samples,
beta=1., return_init_spls=False):
'''
creates diffusion samples using Euler-Maruyama iterations
:Parameters:
x0 : np.ndarray : particles distributed according to initial distribution
init_distrib: torch.Distribution like : particles initial distribution
final_distrib: torch.Distribution like : final MultivariateNormal distribution
t_fin : float : particles observation time (start time is 0)
t_stp : float : time step of EM iterations
n_samples : int :count of particles to propagate
beta : float : diffusion magnitude
'''
assert isinstance(final_distrib, (TD.Normal, TD.MultivariateNormal))
fin_mean, fin_var = get_normal_distrib_params(final_distrib)
targ_grad_potential = get_ou_potential_func(
fin_mean, fin_var, dim_first=False, beta=beta, grad=True)
np_fin_var_inv = torch.inverse(fin_var).cpu().numpy()
np_fin_mean = fin_mean.cpu().numpy()
def minus_targ_grad_potential(x):
return - targ_grad_potential(x)
assert x0.shape[0] == n_samples
assert len(x0.shape) == 2
x_fin = batchItoEuler(minus_targ_grad_potential, x0, t_stp, t_fin, beta=beta)
if not return_init_spls:
return x_fin
return x0, x_fin
@dispatch(TD.Distribution, TD.Distribution, float, float, int)
def create_em_samples(
init_distrib, final_distrib,
t_fin, t_stp, n_samples, beta=1.,
return_init_spls=False):
'''
creates diffusion samples using Euler-Maruyama iterations
:Parameters:
init_distrib: torch.Distribution like : particles initial distribution
'''
x0 = init_distrib.sample((n_samples,)).cpu().numpy()
return create_em_samples(
x0, final_distrib, t_fin, t_stp, n_samples,
beta=beta, return_init_spls=return_init_spls)
def generate_ou_target(dim, mean_scale=1., dtype=torch.float32, device='cpu'):
var = make_spd_matrix(dim)
mean = np.random.randn(dim) * mean_scale
trc_var = torch.tensor(var, dtype=dtype).to(device)
trc_mean = torch.tensor(mean, dtype=dtype).to(device)
targ_distrib = TD.MultivariateNormal(trc_mean, trc_var)
init = np.random.randn(dim) * mean_scale
return targ_distrib, mean, var
def get_ou_potential_func(
mean, var, dim_first=True, beta=1., grad=False,
_type='numpy', device='cpu', dtype='float32'):
assert _type in ['numpy', 'torch']
assert dtype in ['float32', 'float64']
if isinstance(var, torch.Tensor):
var = var.detach().cpu().numpy()
if isinstance(mean, torch.Tensor):
mean = mean.detach().cpu().numpy()
if isinstance(var, list):
var = np.array(var)
if isinstance(mean, list):
mean = np.array(mean)
if isinstance(var, float):
var = np.array(var).reshape(1, 1)
if isinstance(mean, float):
mean = np.array(mean).reshape(1)
assert len(var.shape) == 2
assert len(mean.shape) == 1
assert var.shape[0] == var.shape[1]
assert var.shape[0] == mean.shape[0]
var_inv = np.linalg.inv(var)
dim = mean.shape[0]
if _type == 'numpy':
def ou_potential_func_dim_first(x):
assert x.shape[0] == dim
x_norm = x - mean.reshape((-1, 1))
w = (1. / (2. * beta)) * np.sum(x_norm * np.dot(var_inv, x_norm), axis=0)
return w
def ou_grad_potential_func_dim_first(x):
x_norm = x - mean.reshape((-1, 1))
return np.dot(var_inv, x_norm) / beta
else:
dtype = torch.float32 if dtype == 'float32' else torch.float64
mean = torch.tensor(mean, dtype=dtype, device=device)
var_inv = torch.tensor(var_inv, dtype=dtype, device=device)
def ou_potential_func_dim_first(x):
assert x.size(0) == dim
x_norm = x - mean.view((-1, 1))
w = (1. / (2. * beta)) * torch.sum(x_norm * torch.matmul(var_inv, x_norm), dim=0)
return w
def ou_grad_potential_func_dim_first(x):
x_norm = x - mean.view((-1, 1))
return torch.matmul(var_inv, x_norm) / beta
if dim_first:
if grad:
return ou_grad_potential_func_dim_first
else:
return ou_potential_func_dim_first
if grad:
return lambda x : ou_grad_potential_func_dim_first(x.T).T
else:
return lambda x : ou_potential_func_dim_first(x.T).T
if __name__ == "__main__":
##############
A = np.array([[1., 0.5], [0.5, 2.]])
b = np.array([0.1, 0.7])
nm = TD.MultivariateNormal(torch.tensor(b), torch.tensor(A))
beta=2.0
ou_d_m = create_ou_distrib_modeler(nm, beta)
X_0 = np.array([0.4, 2.3])
mean, var = ou_d_m.get_distrib_params(X_0, 70.)
assert np.allclose(mean, b, 1e-5)
assert np.allclose(var, A, 1e-5)
##############
d = OU_tDeterministic(X_0, ou_d_m, 0.01)
print(d.distrib.mean)
print(d.distrib.covariance_matrix)
| [
"numpy.linalg.matrix_rank",
"numpy.array",
"multipledispatch.dispatch",
"numpy.asarray",
"numpy.exp",
"numpy.dot",
"torch.matmul",
"scipy.linalg.schur",
"numpy.diagonal",
"numpy.eye",
"numpy.allclose",
"numpy.random.randn",
"torch.stack",
"numpy.diag",
"torch.tensor",
"numpy.linalg.inv... | [((7132, 7188), 'multipledispatch.dispatch', 'dispatch', (['np.ndarray', 'TD.Distribution', 'float', 'float', 'int'], {}), '(np.ndarray, TD.Distribution, float, float, int)\n', (7140, 7188), False, 'from multipledispatch import dispatch\n'), ((8496, 8557), 'multipledispatch.dispatch', 'dispatch', (['TD.Distribution', 'TD.Distribution', 'float', 'float', 'int'], {}), '(TD.Distribution, TD.Distribution, float, float, int)\n', (8504, 8557), False, 'from multipledispatch import dispatch\n'), ((4989, 5042), 'multipledispatch.dispatch', 'dispatch', (['TD.Distribution', 'OU_distrib_modeler', 'object'], {}), '(TD.Distribution, OU_distrib_modeler, object)\n', (4997, 5042), False, 'from multipledispatch import dispatch\n'), ((5471, 5544), 'multipledispatch.dispatch', 'dispatch', (['TD.Distribution', 'Iterable', 'Iterable', 'OU_distrib_modeler', 'object'], {}), '(TD.Distribution, Iterable, Iterable, OU_distrib_modeler, object)\n', (5479, 5544), False, 'from multipledispatch import dispatch\n'), ((9336, 9376), 'torch.distributions.MultivariateNormal', 'TD.MultivariateNormal', (['trc_mean', 'trc_var'], {}), '(trc_mean, trc_var)\n', (9357, 9376), True, 'import torch.distributions as TD\n'), ((10257, 10275), 'numpy.linalg.inv', 'np.linalg.inv', (['var'], {}), '(var)\n', (10270, 10275), True, 'import numpy as np\n'), ((11684, 11718), 'numpy.array', 'np.array', (['[[1.0, 0.5], [0.5, 2.0]]'], {}), '([[1.0, 0.5], [0.5, 2.0]])\n', (11692, 11718), True, 'import numpy as np\n'), ((11725, 11745), 'numpy.array', 'np.array', (['[0.1, 0.7]'], {}), '([0.1, 0.7])\n', (11733, 11745), True, 'import numpy as np\n'), ((11883, 11903), 'numpy.array', 'np.array', (['[0.4, 2.3]'], {}), '([0.4, 2.3])\n', (11891, 11903), True, 'import numpy as np\n'), ((11967, 11994), 'numpy.allclose', 'np.allclose', (['mean', 'b', '(1e-05)'], {}), '(mean, b, 1e-05)\n', (11978, 11994), True, 'import numpy as np\n'), ((12005, 12031), 'numpy.allclose', 'np.allclose', (['var', 'A', '(1e-05)'], {}), '(var, A, 1e-05)\n', (12016, 12031), True, 'import numpy as np\n'), ((1021, 1034), 'numpy.asarray', 'np.asarray', (['A'], {}), '(A)\n', (1031, 1034), True, 'import numpy as np\n'), ((1052, 1065), 'numpy.asarray', 'np.asarray', (['b'], {}), '(b)\n', (1062, 1065), True, 'import numpy as np\n'), ((1528, 1558), 'scipy.linalg.schur', 'scipy.linalg.schur', (['self.theta'], {}), '(self.theta)\n', (1546, 1558), False, 'import scipy\n'), ((1640, 1654), 'numpy.diagonal', 'np.diagonal', (['T'], {}), '(T)\n', (1651, 1654), True, 'import numpy as np\n'), ((2118, 2133), 'numpy.asarray', 'np.asarray', (['X_0'], {}), '(X_0)\n', (2128, 2133), True, 'import numpy as np\n'), ((2829, 2861), 'torch.distributions.MultivariateNormal', 'TD.MultivariateNormal', (['mean', 'var'], {}), '(mean, var)\n', (2850, 2861), True, 'import torch.distributions as TD\n'), ((3532, 3550), 'torch.inverse', 'torch.inverse', (['var'], {}), '(var)\n', (3545, 3550), False, 'import torch\n'), ((4181, 4197), 'torch.inverse', 'torch.inverse', (['A'], {}), '(A)\n', (4194, 4197), False, 'import torch\n'), ((4506, 4526), 'torch.inverse', 'torch.inverse', (['Sigma'], {}), '(Sigma)\n', (4519, 4526), False, 'import torch\n'), ((4579, 4598), 'torch.inverse', 'torch.inverse', (['i_Xi'], {}), '(i_Xi)\n', (4592, 4598), False, 'import torch\n'), ((4672, 4690), 'torch.inverse', 'torch.inverse', (['Psi'], {}), '(Psi)\n', (4685, 4690), False, 'import torch\n'), ((9169, 9189), 'numpy.random.randn', 'np.random.randn', (['dim'], {}), '(dim)\n', (9184, 9189), True, 'import numpy as np\n'), ((9388, 9408), 'numpy.random.randn', 'np.random.randn', (['dim'], {}), '(dim)\n', (9403, 9408), True, 'import numpy as np\n'), ((9878, 9891), 'numpy.array', 'np.array', (['var'], {}), '(var)\n', (9886, 9891), True, 'import numpy as np\n'), ((9938, 9952), 'numpy.array', 'np.array', (['mean'], {}), '(mean)\n', (9946, 9952), True, 'import numpy as np\n'), ((10812, 10858), 'torch.tensor', 'torch.tensor', (['mean'], {'dtype': 'dtype', 'device': 'device'}), '(mean, dtype=dtype, device=device)\n', (10824, 10858), False, 'import torch\n'), ((10877, 10926), 'torch.tensor', 'torch.tensor', (['var_inv'], {'dtype': 'dtype', 'device': 'device'}), '(var_inv, dtype=dtype, device=device)\n', (10889, 10926), False, 'import torch\n'), ((11777, 11792), 'torch.tensor', 'torch.tensor', (['b'], {}), '(b)\n', (11789, 11792), False, 'import torch\n'), ((11794, 11809), 'torch.tensor', 'torch.tensor', (['A'], {}), '(A)\n', (11806, 11809), False, 'import torch\n'), ((1394, 1434), 'numpy.linalg.matrix_rank', 'np.linalg.matrix_rank', (['self.A'], {'tol': '(1e-06)'}), '(self.A, tol=1e-06)\n', (1415, 1434), True, 'import numpy as np\n'), ((1751, 1770), 'numpy.exp', 'np.exp', (['(-self.T * t)'], {}), '(-self.T * t)\n', (1757, 1770), True, 'import numpy as np\n'), ((3319, 3334), 'torch.diag', 'torch.diag', (['var'], {}), '(var)\n', (3329, 3334), False, 'import torch\n'), ((5760, 5800), 'torch.distributions.MultivariateNormal', 'TD.MultivariateNormal', (['means[i]', 'vars[i]'], {}), '(means[i], vars[i])\n', (5781, 5800), True, 'import torch.distributions as TD\n'), ((5994, 6014), 'torch.stack', 'torch.stack', (['f_means'], {}), '(f_means)\n', (6005, 6014), False, 'import torch\n'), ((6016, 6035), 'torch.stack', 'torch.stack', (['f_vars'], {}), '(f_vars)\n', (6027, 6035), False, 'import torch\n'), ((9217, 9247), 'torch.tensor', 'torch.tensor', (['var'], {'dtype': 'dtype'}), '(var, dtype=dtype)\n', (9229, 9247), False, 'import torch\n'), ((9274, 9305), 'torch.tensor', 'torch.tensor', (['mean'], {'dtype': 'dtype'}), '(mean, dtype=dtype)\n', (9286, 9305), False, 'import torch\n'), ((2523, 2554), 'torch.tensor', 'torch.tensor', (['mean'], {'dtype': 'dtype'}), '(mean, dtype=dtype)\n', (2535, 2554), False, 'import torch\n'), ((2584, 2614), 'torch.tensor', 'torch.tensor', (['var'], {'dtype': 'dtype'}), '(var, dtype=dtype)\n', (2596, 2614), False, 'import torch\n'), ((9998, 10011), 'numpy.array', 'np.array', (['var'], {}), '(var)\n', (10006, 10011), True, 'import numpy as np\n'), ((10073, 10087), 'numpy.array', 'np.array', (['mean'], {}), '(mean)\n', (10081, 10087), True, 'import numpy as np\n'), ((10685, 10708), 'numpy.dot', 'np.dot', (['var_inv', 'x_norm'], {}), '(var_inv, x_norm)\n', (10691, 10708), True, 'import numpy as np\n'), ((11287, 11316), 'torch.matmul', 'torch.matmul', (['var_inv', 'x_norm'], {}), '(var_inv, x_norm)\n', (11299, 11316), False, 'import torch\n'), ((748, 758), 'numpy.diag', 'np.diag', (['M'], {}), '(M)\n', (755, 758), True, 'import numpy as np\n'), ((1794, 1817), 'numpy.eye', 'np.eye', (['self.b.shape[0]'], {}), '(self.b.shape[0])\n', (1800, 1817), True, 'import numpy as np\n'), ((4316, 4344), 'torch.tensor', 'torch.tensor', (['F'], {'dtype': 'dtype'}), '(F, dtype=dtype)\n', (4328, 4344), False, 'import torch\n'), ((4357, 4385), 'torch.tensor', 'torch.tensor', (['g'], {'dtype': 'dtype'}), '(g, dtype=dtype)\n', (4369, 4385), False, 'import torch\n'), ((8113, 8135), 'torch.inverse', 'torch.inverse', (['fin_var'], {}), '(fin_var)\n', (8126, 8135), False, 'import torch\n'), ((1982, 2007), 'numpy.exp', 'np.exp', (['(-2.0 * self.T * t)'], {}), '(-2.0 * self.T * t)\n', (1988, 2007), True, 'import numpy as np\n'), ((10507, 10530), 'numpy.dot', 'np.dot', (['var_inv', 'x_norm'], {}), '(var_inv, x_norm)\n', (10513, 10530), True, 'import numpy as np\n'), ((11107, 11136), 'torch.matmul', 'torch.matmul', (['var_inv', 'x_norm'], {}), '(var_inv, x_norm)\n', (11119, 11136), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 10 13:01:49 2020
@author: saksh
"""
import numpy as np
np.random.seed(1337)
import tensorflow as tf
import pandas as pd
from statsmodels.tsa.api import VAR
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.svm import SVR
from sklearn.preprocessing import MinMaxScaler
from matplotlib.pyplot import *
from datetime import datetime
"""
Calculate VAR residuals. Information criteria for optimal lag = AIC
"""
def var_resids(label1, label2, data_cache):
model = VAR(data_cache[[label1,label2]])
model_fit = model.fit(maxlags = 10, ic = 'aic', trend = 'c')
return model_fit.resid[label1]
"""
Data split = 80-10-10
MinMaxScaler applied to both input and output, range = [-1, 1]
LSTM model uses windowing of 3 input steps
"""
def make_datasets(df, target_column = True, train_size = 0.9, model_name = 'dense', input_steps = 3):
if target_column:
data = df.iloc[:, :-1]
data = np.array(data, dtype = np.float32)
targets = np.array(df.iloc[:,-1], dtype = np.float32)
X_train, X_test, y_train, y_test = train_test_split(data,targets, train_size = train_size, shuffle = False)
input_scaler = MinMaxScaler(feature_range = (-1,1))
input_scaler.fit(X_train)
X_train = input_scaler.transform(X_train)
X_test = input_scaler.transform(X_test)
y_train = y_train.reshape(len(y_train), 1)
y_test = y_test.reshape(len(y_test), 1)
output_scaler = MinMaxScaler(feature_range = (-1,1))
output_scaler.fit(y_train)
y_train = output_scaler.transform(y_train)
y_test = output_scaler.transform(y_test)
if model_name == 'dense':
return X_train, X_test, y_train, y_test, output_scaler
elif model_name == 'lstm':
y_train = y_train.reshape(len(y_train), 1)
input_ds_train = np.hstack((X_train, y_train))
X_train, y_train = split_sequences(input_ds_train, input_steps)
y_test = y_test.reshape(len(y_test), 1)
input_ds_test = np.hstack((X_test, y_test))
X_test, y_test = split_sequences(input_ds_test, input_steps)
return X_train, X_test, y_train, y_test, output_scaler
else:
data = np.array(df, dtype = np.float32)
X_train, X_test = train_test_split(data, train_size = train_size)
return X_train, X_test
"""
Early stopping is defined, can be enabled by adding early_stopping to callback
Inputs are batched: batch size = 32
Provides tensorboard accessibility
"""
def nn_model_compile(model, X_train_data, y_train_data, patience = 2, MAX_EPOCHS = 20):
tf.keras.backend.clear_session()
model.compile(optimizer = tf.optimizers.SGD(), loss = tf.losses.MeanSquaredError(), metrics = [tf.metrics.RootMeanSquaredError()])
logdir = "logs\\fit\\" + datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=logdir)
early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=patience, mode='min')
final_res = model.fit(x = X_train_data, y = y_train_data, validation_split = 0.1, epochs = MAX_EPOCHS, batch_size = 32, callbacks=[tensorboard_callback])
return final_res
"""
epsilon = 0.0001
"""
def svr_model(X_train, y_train, param_grid):
model = GridSearchCV(SVR(epsilon = 0.0001), param_grid, return_train_score=True)
model.fit(X_train, y_train)
return model
def split_sequences(input_arr, n_steps):
X, y = list(), list()
for i in range(len(input_arr)):
end_ix = i + n_steps
# check if we are beyond the dataset
if end_ix > len(input_arr):
break
# gather input and output parts of the pattern
_x, _y = input_arr[i:end_ix, :-1], input_arr[end_ix-1, -1]
X.append(_x)
y.append(_y)
return np.array(X), np.array(y)
def make_save_plot(index, y_test, y_pred, figsize = (6, 6), xlabel = "Date", ylabel = "Market Volatility (Normalized Data)", y_lim = [0.0000, 0.0015], filepath = "default.svg"):
df_plot = pd.DataFrame(index = index[-len(y_test):])
df_plot['target_variable'] = y_test
df_plot['predictions'] = np.abs(y_pred)
fig, ax = subplots()
df_plot.plot(figsize=figsize, ax=ax, ylabel = ylabel, xlabel = xlabel)
ax.legend()
ax.set_ylim(y_lim)
savefig(filepath, transparent = True, bbox_inches = 'tight') | [
"numpy.abs",
"statsmodels.tsa.api.VAR",
"tensorflow.keras.callbacks.TensorBoard",
"tensorflow.losses.MeanSquaredError",
"numpy.hstack",
"sklearn.model_selection.train_test_split",
"tensorflow.optimizers.SGD",
"tensorflow.metrics.RootMeanSquaredError",
"tensorflow.keras.callbacks.EarlyStopping",
"n... | [((110, 130), 'numpy.random.seed', 'np.random.seed', (['(1337)'], {}), '(1337)\n', (124, 130), True, 'import numpy as np\n'), ((564, 597), 'statsmodels.tsa.api.VAR', 'VAR', (['data_cache[[label1, label2]]'], {}), '(data_cache[[label1, label2]])\n', (567, 597), False, 'from statsmodels.tsa.api import VAR\n'), ((2768, 2800), 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), '()\n', (2798, 2800), True, 'import tensorflow as tf\n'), ((3036, 3082), 'tensorflow.keras.callbacks.TensorBoard', 'tf.keras.callbacks.TensorBoard', ([], {'log_dir': 'logdir'}), '(log_dir=logdir)\n', (3066, 3082), True, 'import tensorflow as tf\n'), ((3105, 3192), 'tensorflow.keras.callbacks.EarlyStopping', 'tf.keras.callbacks.EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': 'patience', 'mode': '"""min"""'}), "(monitor='val_loss', patience=patience,\n mode='min')\n", (3137, 3192), True, 'import tensorflow as tf\n'), ((4344, 4358), 'numpy.abs', 'np.abs', (['y_pred'], {}), '(y_pred)\n', (4350, 4358), True, 'import numpy as np\n'), ((1022, 1054), 'numpy.array', 'np.array', (['data'], {'dtype': 'np.float32'}), '(data, dtype=np.float32)\n', (1030, 1054), True, 'import numpy as np\n'), ((1076, 1118), 'numpy.array', 'np.array', (['df.iloc[:, -1]'], {'dtype': 'np.float32'}), '(df.iloc[:, -1], dtype=np.float32)\n', (1084, 1118), True, 'import numpy as np\n'), ((1164, 1233), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'targets'], {'train_size': 'train_size', 'shuffle': '(False)'}), '(data, targets, train_size=train_size, shuffle=False)\n', (1180, 1233), False, 'from sklearn.model_selection import train_test_split, GridSearchCV\n'), ((1261, 1296), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(-1, 1)'}), '(feature_range=(-1, 1))\n', (1273, 1296), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((1569, 1604), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(-1, 1)'}), '(feature_range=(-1, 1))\n', (1581, 1604), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((2369, 2399), 'numpy.array', 'np.array', (['df'], {'dtype': 'np.float32'}), '(df, dtype=np.float32)\n', (2377, 2399), True, 'import numpy as np\n'), ((2429, 2474), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data'], {'train_size': 'train_size'}), '(data, train_size=train_size)\n', (2445, 2474), False, 'from sklearn.model_selection import train_test_split, GridSearchCV\n'), ((3478, 3497), 'sklearn.svm.SVR', 'SVR', ([], {'epsilon': '(0.0001)'}), '(epsilon=0.0001)\n', (3481, 3497), False, 'from sklearn.svm import SVR\n'), ((4009, 4020), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (4017, 4020), True, 'import numpy as np\n'), ((4022, 4033), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (4030, 4033), True, 'import numpy as np\n'), ((2832, 2851), 'tensorflow.optimizers.SGD', 'tf.optimizers.SGD', ([], {}), '()\n', (2849, 2851), True, 'import tensorflow as tf\n'), ((2860, 2888), 'tensorflow.losses.MeanSquaredError', 'tf.losses.MeanSquaredError', ([], {}), '()\n', (2886, 2888), True, 'import tensorflow as tf\n'), ((1969, 1998), 'numpy.hstack', 'np.hstack', (['(X_train, y_train)'], {}), '((X_train, y_train))\n', (1978, 1998), True, 'import numpy as np\n'), ((2172, 2199), 'numpy.hstack', 'np.hstack', (['(X_test, y_test)'], {}), '((X_test, y_test))\n', (2181, 2199), True, 'import numpy as np\n'), ((2901, 2934), 'tensorflow.metrics.RootMeanSquaredError', 'tf.metrics.RootMeanSquaredError', ([], {}), '()\n', (2932, 2934), True, 'import tensorflow as tf\n'), ((2967, 2981), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2979, 2981), False, 'from datetime import datetime\n')] |
import numpy as np
from PIL import Image, ImageDraw, ImageFont
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
import torch.autograd as autograd
from torch.autograd import Variable
cuda = True if torch.cuda.is_available() else False
Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
# -------------------------------
# WGAN
# -------------------------------
def conv_norm_relu_module(
norm_type, norm_layer, input_nc, ngf, kernel_size, padding, stride=1, relu="relu"
):
model = [
nn.Conv2d(
input_nc, ngf, kernel_size=kernel_size, padding=padding, stride=stride
)
]
if norm_layer:
model += [norm_layer(ngf)]
if relu == "relu":
model += [nn.ReLU(True)]
elif relu == "Lrelu":
model += [nn.LeakyReLU(0.2, True)]
return model
class ResnetBlock(nn.Module):
def __init__(self, dim, padding_type, norm_layer, use_dropout, norm_type="batch"):
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(
dim, padding_type, norm_layer, use_dropout, norm_type
)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, norm_type):
conv_block = []
p = 0
# TODO: support padding types
assert padding_type == "zero"
p = 1
# TODO: InstanceNorm
conv_block += conv_norm_relu_module(norm_type, norm_layer, dim, dim, 3, p)
if use_dropout:
conv_block += [nn.Dropout(0.5)]
else:
conv_block += [nn.Dropout(0.0)]
if norm_type == "batch" or norm_type == "instance":
conv_block += [
nn.Conv2d(dim, dim, kernel_size=3, padding=p),
norm_layer(dim),
]
else:
assert "norm not defined"
return nn.Sequential(*conv_block)
def forward(self, x):
out = x + self.conv_block(x)
return out
class ResnetGenerator(nn.Module):
def __init__(
self,
input_nc,
output_nc,
ngf=64,
norm_layer=nn.InstanceNorm2d,
use_dropout=False,
n_blocks=6,
norm_type="batch",
gpu_ids=[],
):
assert n_blocks >= 0
super(ResnetGenerator, self).__init__()
self.input_nc = input_nc
self.output_nc = output_nc
self.ngf = ngf
self.gpu_ids = gpu_ids
model = conv_norm_relu_module(norm_type, norm_layer, input_nc, ngf, 7, 3)
n_downsampling = 2
for i in range(n_downsampling):
factor_ch = 3 # 2**i : 3**i is a more complicated filter
mult = factor_ch ** i
model += conv_norm_relu_module(
norm_type,
norm_layer,
ngf * mult,
ngf * mult * factor_ch,
3,
1,
stride=2,
)
mult = factor_ch ** n_downsampling
for i in range(n_blocks):
model += [
ResnetBlock(
ngf * mult,
"zero",
norm_layer=norm_layer,
use_dropout=use_dropout,
norm_type=norm_type,
)
]
for i in range(n_downsampling):
mult = factor_ch ** (n_downsampling - i)
model += convTranspose_norm_relu_module(
norm_type,
norm_layer,
ngf * mult,
int(ngf * mult / factor_ch),
3,
1,
stride=2,
output_padding=1,
)
if norm_type == "batch" or norm_type == "instance":
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=3)]
else:
assert "norm not defined"
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input, encoder=False):
if self.gpu_ids and isinstance(input.data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
class Generator(nn.Module):
def __init__(self, img_shape, in_dim=100):
super(Generator, self).__init__()
self.img_shape = img_shape
def block(in_feat, out_feat, normalize=True):
layers = [nn.Linear(in_feat, out_feat)]
if normalize:
layers.append(nn.BatchNorm1d(out_feat, 0.8))
layers.append(nn.LeakyReLU(0.2, inplace=True))
return layers
self.model = nn.Sequential(
# *block(in_dim, 128, normalize=False),
nn.Linear(in_dim, 128),
nn.Linear(128, 128),
nn.Linear(128, 128),
*block(128, 128, normalize=False),
*block(128, 256, normalize=False),
*block(256, 512, normalize=False),
*block(512, 1024, normalize=False),
nn.Linear(1024, int(np.prod(img_shape))),
# nn.Tanh(),
nn.Sigmoid(),
)
def forward(self, z):
img = self.model(z)
img = img.view(z.shape[0], *self.img_shape)
return img
# class Discriminator(nn.Module):
# def __init__(self, img_shape):
# super(Discriminator, self).__init__()
# self.model = nn.Sequential(
# nn.Linear(int(np.prod(img_shape)), 512),
# nn.LeakyReLU(0.2, inplace=True),
# nn.Linear(512, 256),
# nn.LeakyReLU(0.2, inplace=True),
# nn.Linear(256, 1),
# )
# def forward(self, img):
# img_flat = img.view(img.shape[0], -1)
# validity = self.model(img_flat)
# return validity
class Discriminator(nn.Module):
def __init__(self, img_shape):
super(Discriminator, self).__init__()
def discriminator_block(in_filters, out_filters, bn=True):
block = [
nn.Conv2d(in_filters, out_filters, 3, 2, 1),
nn.LeakyReLU(0.2, inplace=True),
nn.Dropout2d(0.25),
]
if bn:
block.append(nn.BatchNorm2d(out_filters, 0.8))
return block
self.model = nn.Sequential(
*discriminator_block(img_shape[0], 16, bn=False),
*discriminator_block(16, 32),
*discriminator_block(32, 64),
*discriminator_block(64, 128),
)
ds_size = img_shape[1] // 2 ** 4
self.adv_layer = nn.Sequential(nn.Linear(128 * ds_size ** 2, 1), nn.Sigmoid())
def forward(self, img):
out = self.model(img)
out = out.view(out.shape[0], -1)
validity = self.adv_layer(out)
# print(validity.shape)
return validity
def compute_gradient_penalty(D, real_samples, fake_samples):
"""Calculates the gradient penalty loss for WGAN GP"""
# Random weight term for interpolation between real and fake samples
alpha = Tensor(np.random.random((real_samples.size(0), 1, 1, 1)))
# Get random interpolation between real and fake samples
interpolates = (alpha * real_samples + ((1 - alpha) * fake_samples)).requires_grad_(
True
)
d_interpolates = D(interpolates)
fake = Variable(Tensor(real_samples.shape[0], 1).fill_(1.0), requires_grad=False)
# Get gradient w.r.t. interpolates
gradients = autograd.grad(
outputs=d_interpolates,
inputs=interpolates,
grad_outputs=fake,
create_graph=True,
retain_graph=True,
only_inputs=True,
)[0]
gradients = gradients.view(gradients.size(0), -1)
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
return gradient_penalty
| [
"torch.nn.Sigmoid",
"torch.nn.ReLU",
"torch.nn.parallel.data_parallel",
"torch.nn.Dropout",
"torch.nn.Tanh",
"torch.nn.LeakyReLU",
"numpy.prod",
"torch.nn.Sequential",
"torch.nn.BatchNorm2d",
"torch.nn.Dropout2d",
"torch.nn.Conv2d",
"torch.nn.BatchNorm1d",
"torch.cuda.is_available",
"torch... | [((260, 285), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (283, 285), False, 'import torch\n'), ((589, 675), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_nc', 'ngf'], {'kernel_size': 'kernel_size', 'padding': 'padding', 'stride': 'stride'}), '(input_nc, ngf, kernel_size=kernel_size, padding=padding, stride=\n stride)\n', (598, 675), True, 'import torch.nn as nn\n'), ((1908, 1934), 'torch.nn.Sequential', 'nn.Sequential', (['*conv_block'], {}), '(*conv_block)\n', (1921, 1934), True, 'import torch.nn as nn\n'), ((3956, 3977), 'torch.nn.Sequential', 'nn.Sequential', (['*model'], {}), '(*model)\n', (3969, 3977), True, 'import torch.nn as nn\n'), ((7463, 7601), 'torch.autograd.grad', 'autograd.grad', ([], {'outputs': 'd_interpolates', 'inputs': 'interpolates', 'grad_outputs': 'fake', 'create_graph': '(True)', 'retain_graph': '(True)', 'only_inputs': '(True)'}), '(outputs=d_interpolates, inputs=interpolates, grad_outputs=\n fake, create_graph=True, retain_graph=True, only_inputs=True)\n', (7476, 7601), True, 'import torch.autograd as autograd\n'), ((795, 808), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (802, 808), True, 'import torch.nn as nn\n'), ((3923, 3932), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (3930, 3932), True, 'import torch.nn as nn\n'), ((4120, 4178), 'torch.nn.parallel.data_parallel', 'nn.parallel.data_parallel', (['self.model', 'input', 'self.gpu_ids'], {}), '(self.model, input, self.gpu_ids)\n', (4145, 4178), True, 'import torch.nn as nn\n'), ((4764, 4786), 'torch.nn.Linear', 'nn.Linear', (['in_dim', '(128)'], {}), '(in_dim, 128)\n', (4773, 4786), True, 'import torch.nn as nn\n'), ((4800, 4819), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(128)'], {}), '(128, 128)\n', (4809, 4819), True, 'import torch.nn as nn\n'), ((4833, 4852), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(128)'], {}), '(128, 128)\n', (4842, 4852), True, 'import torch.nn as nn\n'), ((5134, 5146), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (5144, 5146), True, 'import torch.nn as nn\n'), ((6608, 6640), 'torch.nn.Linear', 'nn.Linear', (['(128 * ds_size ** 2)', '(1)'], {}), '(128 * ds_size ** 2, 1)\n', (6617, 6640), True, 'import torch.nn as nn\n'), ((6642, 6654), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (6652, 6654), True, 'import torch.nn as nn\n'), ((854, 877), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)', '(True)'], {}), '(0.2, True)\n', (866, 877), True, 'import torch.nn as nn\n'), ((1566, 1581), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (1576, 1581), True, 'import torch.nn as nn\n'), ((1624, 1639), 'torch.nn.Dropout', 'nn.Dropout', (['(0.0)'], {}), '(0.0)\n', (1634, 1639), True, 'import torch.nn as nn\n'), ((1746, 1791), 'torch.nn.Conv2d', 'nn.Conv2d', (['dim', 'dim'], {'kernel_size': '(3)', 'padding': 'p'}), '(dim, dim, kernel_size=3, padding=p)\n', (1755, 1791), True, 'import torch.nn as nn\n'), ((3799, 3850), 'torch.nn.Conv2d', 'nn.Conv2d', (['ngf', 'output_nc'], {'kernel_size': '(7)', 'padding': '(3)'}), '(ngf, output_nc, kernel_size=7, padding=3)\n', (3808, 3850), True, 'import torch.nn as nn\n'), ((4461, 4489), 'torch.nn.Linear', 'nn.Linear', (['in_feat', 'out_feat'], {}), '(in_feat, out_feat)\n', (4470, 4489), True, 'import torch.nn as nn\n'), ((4604, 4635), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (4616, 4635), True, 'import torch.nn as nn\n'), ((6040, 6083), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_filters', 'out_filters', '(3)', '(2)', '(1)'], {}), '(in_filters, out_filters, 3, 2, 1)\n', (6049, 6083), True, 'import torch.nn as nn\n'), ((6101, 6132), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (6113, 6132), True, 'import torch.nn as nn\n'), ((6150, 6168), 'torch.nn.Dropout2d', 'nn.Dropout2d', (['(0.25)'], {}), '(0.25)\n', (6162, 6168), True, 'import torch.nn as nn\n'), ((4547, 4576), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_feat', '(0.8)'], {}), '(out_feat, 0.8)\n', (4561, 4576), True, 'import torch.nn as nn\n'), ((5075, 5093), 'numpy.prod', 'np.prod', (['img_shape'], {}), '(img_shape)\n', (5082, 5093), True, 'import numpy as np\n'), ((6232, 6264), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_filters', '(0.8)'], {}), '(out_filters, 0.8)\n', (6246, 6264), True, 'import torch.nn as nn\n')] |
import numpy as np
import cv2
import torch
import torch.backends.cudnn as cudnn
from numpy import random
from utils.datasets import letterbox
from models.experimental import attempt_load
from utils.general import non_max_suppression, scale_coords
from utils.torch_utils import select_device
model = None
device = None
def init(gpu):
# intialise model
global model, device
device = select_device(str(gpu))
model = attempt_load("checkpoints/face/yolo.pt", map_location=device)
model.half()
model(torch.zeros(1, 3, 512, 512).to(device).type_as(next(model.parameters())))
def xywh(im0):
"""
returns x,y,w,h
"""
# format image
stride = int(model.stride.max())
img = letterbox(im0, 512,stride=stride)[0]
img = img[:, :, ::-1].transpose(2, 0, 1)
img = np.ascontiguousarray(img)
img = torch.from_numpy(img).to(device)
img = img.half()
img /= 255.0
if img.ndimension() == 3: img = img.unsqueeze(0)
# generate prodictions
pred = model(img, augment=False)[0]
pred = non_max_suppression(pred, 0.5, 0.45, agnostic=True)
if len(pred):
det = pred[0]
# resize predictions
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
for *xyxy, conf, cls in reversed(det):
x1, y1, x2, y2 = xyxy
# return each prediction and x,y,w,h
yield int((x1+x2)/2), int((y1+y2)/2), int(x2-x1), int(y2-y1)
def crop(image,opt):
# centre crop image
x = int(opt.load_size / 2 - opt.crop_size / 2)
h = opt.crop_size
return image[x:x+h,x:x+h]
def resize(image,opt):
shape = [opt.load_size] * 2
# values taken from initial dataset
width = 86
height = 119
for x,y,w,h in xywh(image):
if w < opt.min_face_size:
# ignore small faces
continue
ratio = np.sqrt(float(width*height)/float(w*h))
# resize image using linear algebra
yield crop(cv2.warpAffine(image,np.float32([[ratio,0,int(shape[0]/2-ratio*x)],[0,ratio,int(shape[1]/2-ratio*y)]]),(shape[0],shape[1])),opt)
| [
"models.experimental.attempt_load",
"torch.from_numpy",
"numpy.ascontiguousarray",
"utils.general.non_max_suppression",
"utils.general.scale_coords",
"utils.datasets.letterbox",
"torch.zeros"
] | [((432, 493), 'models.experimental.attempt_load', 'attempt_load', (['"""checkpoints/face/yolo.pt"""'], {'map_location': 'device'}), "('checkpoints/face/yolo.pt', map_location=device)\n", (444, 493), False, 'from models.experimental import attempt_load\n'), ((805, 830), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['img'], {}), '(img)\n', (825, 830), True, 'import numpy as np\n'), ((1043, 1094), 'utils.general.non_max_suppression', 'non_max_suppression', (['pred', '(0.5)', '(0.45)'], {'agnostic': '(True)'}), '(pred, 0.5, 0.45, agnostic=True)\n', (1062, 1094), False, 'from utils.general import non_max_suppression, scale_coords\n'), ((713, 747), 'utils.datasets.letterbox', 'letterbox', (['im0', '(512)'], {'stride': 'stride'}), '(im0, 512, stride=stride)\n', (722, 747), False, 'from utils.datasets import letterbox\n'), ((841, 862), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (857, 862), False, 'import torch\n'), ((1185, 1235), 'utils.general.scale_coords', 'scale_coords', (['img.shape[2:]', 'det[:, :4]', 'im0.shape'], {}), '(img.shape[2:], det[:, :4], im0.shape)\n', (1197, 1235), False, 'from utils.general import non_max_suppression, scale_coords\n'), ((521, 548), 'torch.zeros', 'torch.zeros', (['(1)', '(3)', '(512)', '(512)'], {}), '(1, 3, 512, 512)\n', (532, 548), False, 'import torch\n')] |
import pandas as pd
import numpy as np
import os
from collections import Counter
from scipy.cluster.hierarchy import linkage, fcluster
from scipy.spatial.distance import squareform
def find_correlation_clusters(corr,corr_thresh):
dissimilarity = 1.0 - corr
hierarchy = linkage(squareform(dissimilarity), method='single')
diss_thresh = 1.0 - corr_thresh
labels = fcluster(hierarchy, diss_thresh, criterion='distance')
return labels
def relabel_clusters(labels,metric_columns):
cluster_count = Counter(labels)
cluster_order = {cluster[0]: idx for idx, cluster in enumerate(cluster_count.most_common())}
relabeled_clusters = [cluster_order[l] for l in labels]
relabled_count = Counter(relabeled_clusters)
labeled_column_df = pd.DataFrame({'group': relabeled_clusters, 'column': metric_columns}).sort_values(
['group', 'column'], ascending=[True, True])
return labeled_column_df, relabled_count
def make_load_matrix(labeled_column_df,metric_columns,relabled_count,corr):
load_mat = np.zeros((len(metric_columns), len(relabled_count)))
for row in labeled_column_df.iterrows():
orig_col = metric_columns.index(row[1][1])
if relabled_count[row[1][0]]>1:
load_mat[orig_col, row[1][0]] = 1.0/ (np.sqrt(corr) * float(relabled_count[row[1][0]]) )
else:
load_mat[orig_col, row[1][0]] = 1.0
is_group = load_mat.astype(bool).sum(axis=0) > 1
column_names=['metric_group_{}'.format(d + 1) if is_group[d]
else labeled_column_df.loc[labeled_column_df['group']==d,'column'].iloc[0]
for d in range(0, load_mat.shape[1])]
loadmat_df = pd.DataFrame(load_mat, index=metric_columns, columns=column_names)
loadmat_df['name'] = loadmat_df.index
sort_cols = list(loadmat_df.columns.values)
sort_order = [False] * loadmat_df.shape[1]
sort_order[-1] = True
loadmat_df = loadmat_df.sort_values(sort_cols, ascending=sort_order)
loadmat_df = loadmat_df.drop('name', axis=1)
return loadmat_df
def save_load_matrix(data_set_path,loadmat_df, labeled_column_df):
save_path = data_set_path.replace('.csv', '_load_mat.csv')
print('saving loadings to ' + save_path)
loadmat_df.to_csv(save_path)
save_path = data_set_path.replace('.csv', '_groupmets.csv')
print('saving metric groups to ' + save_path)
group_lists=['|'.join(labeled_column_df[labeled_column_df['group']==g]['column'])
for g in set(labeled_column_df['group'])]
pd.DataFrame(group_lists,index=loadmat_df.columns.values,columns=['metrics']).to_csv(save_path)
def find_metric_groups(data_set_path,group_corr_thresh=0.5):
score_save_path=data_set_path.replace('.csv','_scores.csv')
assert os.path.isfile(score_save_path),'You must run listing 5.3 or 7.5 to save metric scores first'
score_data = pd.read_csv(score_save_path,index_col=[0,1])
score_data.drop('is_churn',axis=1,inplace=True)
metric_columns = list(score_data.columns.values)
labels = find_correlation_clusters(score_data.corr(),group_corr_thresh)
labeled_column_df, relabled_count = relabel_clusters(labels,metric_columns)
loadmat_df = make_load_matrix(labeled_column_df, metric_columns, relabled_count,group_corr_thresh)
save_load_matrix(data_set_path,loadmat_df,labeled_column_df)
| [
"scipy.spatial.distance.squareform",
"numpy.sqrt",
"pandas.read_csv",
"collections.Counter",
"os.path.isfile",
"pandas.DataFrame",
"scipy.cluster.hierarchy.fcluster"
] | [((380, 434), 'scipy.cluster.hierarchy.fcluster', 'fcluster', (['hierarchy', 'diss_thresh'], {'criterion': '"""distance"""'}), "(hierarchy, diss_thresh, criterion='distance')\n", (388, 434), False, 'from scipy.cluster.hierarchy import linkage, fcluster\n'), ((519, 534), 'collections.Counter', 'Counter', (['labels'], {}), '(labels)\n', (526, 534), False, 'from collections import Counter\n'), ((713, 740), 'collections.Counter', 'Counter', (['relabeled_clusters'], {}), '(relabeled_clusters)\n', (720, 740), False, 'from collections import Counter\n'), ((1685, 1751), 'pandas.DataFrame', 'pd.DataFrame', (['load_mat'], {'index': 'metric_columns', 'columns': 'column_names'}), '(load_mat, index=metric_columns, columns=column_names)\n', (1697, 1751), True, 'import pandas as pd\n'), ((2768, 2799), 'os.path.isfile', 'os.path.isfile', (['score_save_path'], {}), '(score_save_path)\n', (2782, 2799), False, 'import os\n'), ((2879, 2925), 'pandas.read_csv', 'pd.read_csv', (['score_save_path'], {'index_col': '[0, 1]'}), '(score_save_path, index_col=[0, 1])\n', (2890, 2925), True, 'import pandas as pd\n'), ((287, 312), 'scipy.spatial.distance.squareform', 'squareform', (['dissimilarity'], {}), '(dissimilarity)\n', (297, 312), False, 'from scipy.spatial.distance import squareform\n'), ((765, 834), 'pandas.DataFrame', 'pd.DataFrame', (["{'group': relabeled_clusters, 'column': metric_columns}"], {}), "({'group': relabeled_clusters, 'column': metric_columns})\n", (777, 834), True, 'import pandas as pd\n'), ((2534, 2613), 'pandas.DataFrame', 'pd.DataFrame', (['group_lists'], {'index': 'loadmat_df.columns.values', 'columns': "['metrics']"}), "(group_lists, index=loadmat_df.columns.values, columns=['metrics'])\n", (2546, 2613), True, 'import pandas as pd\n'), ((1278, 1291), 'numpy.sqrt', 'np.sqrt', (['corr'], {}), '(corr)\n', (1285, 1291), True, 'import numpy as np\n')] |
import random
from typing import Tuple
import numpy as np
from numpy.ma.core import where
import numpy.ma as ma
TOTALLAYERS = 2
# SNACKLAYER = 0
# HAZARDLAYER = 1
# RESULTLAYER = 2
# each snake layer represents the number of turns left until that location is no longer occupied by the snake
# TOTALSNAKELAYERS = 2
# SNAKELAYERHEAD = 0
# SNAKELAYERBODY = 1
# each square indicates how many turns this snake will be in that square for
# SNAKELAYERTURNSREMAINING = 0
# SNAKELAYERHEALTH = 1
BOARDLAYER = 0
SNAKELAYER = 1 # each square is a combination of the number of turns it will remain on that square and the health of the snake
# key:
# 8 bits: turns remaining (max 256 length - should be enough?)
# 7 bits: health (max 128, health only goes up to 100)
# 1 bit: is ded
MAXHEALTH = 100
MAXHEALTHENCODED = MAXHEALTH << 8
SNAKEMASKMOVESREMAINING = 0b11111111
SNAKEMASKHEALTH = 0b111111100000000
SNAKEMASKDEAD = 0b1000000000000000
SNAKEOFFSETMOVES = 0
SNAKEOFFSETHEALTH = 8
SNAKEOFFSETDEAD = 15
BOARDMASKSNACK = 0b1
BOARDOFFSETSNACK = 0
BOARDMASKSAUCE = 0b10
BOARDOFFSETSAUCE = 1
NUMBERSNAKES = 2
PROBABILITYSNACK = 0.5
# player -1 = snakelayer 0, player 1 = snakelayer 1
# def get_layer(player: int, layer: int) -> int:
# if player is -1:
# return TOTALDATALAYERS + layer
# return TOTALDATALAYERS + TOTALSNAKELAYERS + layer
class Board():
def __init__(self, x=7, y=7) -> None:
"Set up initial board configuration."
self.x = x
self.y = y
self.prob_snack = PROBABILITYSNACK
# self.pieces: np.ndarray = np.zeros(
# (self.x, self.y, TOTALLAYERS), dtype=np.int32)
self.pieces: np.ndarray = np.zeros(
(self.x, self.y, TOTALLAYERS), dtype=np.int32)
def __getitem__(self, index: int) -> np.array:
return self.pieces[index]
# not returning the layer since to apply it we need to apply to head, body, and turnsremaining
def legal_moves(self, player: int) -> object():
# snake_board = self[:, :, get_layer(player, SNAKELAYERTURNSREMAINING)]
board = self[:, :, SNAKELAYER]
# print("snake in legal_moves",snake)
if player == -1:
board = -board
head = np.where(board == np.amax(board))
# print("head", head)
x = head[0][0]
y = head[1][0]
legal_points = list()
good_points = list()
for (dx, dy) in [(0, 1), (0, -1), (1, 0), (-1, 0)]:
if x+dx < self.x and x+dx >= 0 and y+dy < self.y and y+dy >= 0:
legal_points.append((x+dx, y+dy))
if board[x+dx, y+dy] == 0:
good_points.append((x+dx, y+dy))
# good points don't include neckbreak. if we have no other choice we need to neckbreak.
if len(good_points) > 0:
return good_points
return legal_points
def add_snack(self) -> None:
roll = random.uniform(0, 1)
if roll > self.prob_snack:
return
# find zeros then flatten it
blank_spaces = np.argwhere((self.pieces[:, :, SNAKELAYER] | np.bitwise_and(
self.pieces[:, :, BOARDLAYER], BOARDMASKSNACK)) == 0)
# print(blank_spaces)
if len(blank_spaces[:]) == 0:
return
# generate a random integer the length of the total blank spaces, and get the location correspond to that number
(x, y) = blank_spaces[random.randint(0, len(blank_spaces[:])-1), :]
self.pieces[x, y, BOARDLAYER] = self.pieces[x,
y, BOARDLAYER] + 1 << BOARDOFFSETSNACK
def get_result(self, player: int) -> int:
snake_layer = np.copy(self.pieces[:, :, SNAKELAYER])
if player == -1:
snake_layer = -snake_layer
player_dead = snake_layer[snake_layer > 0][0] >> SNAKEOFFSETDEAD
opponent_dead = (-1*snake_layer[snake_layer < 0][0]) >> SNAKEOFFSETDEAD
# player_dead = np.right_shift(
# snake_layer, SNAKEOFFSETDEAD, where=snake_layer > 0)
# opponent_dead = np.right_shift(
# -snake_layer, SNAKEOFFSETDEAD, where=snake_layer < 0)
# if player == 1:
# player_dead = dead_layer[dead_layer > 0][0] >> 15
# opponent_dead = (-1*dead_layer[dead_layer < 0][0]) >> 15
# # print("player 1", player_dead, opponent_dead)
# else:
# player_dead = (-1*dead_layer[dead_layer < 0][0]) >> 15
# opponent_dead = dead_layer[dead_layer > 0][0] >> 15
# print("player -1", player_dead, opponent_dead)
if player_dead == 0 and opponent_dead == 0:
return 0
if player_dead == 0 and opponent_dead == 1:
return player
if player_dead == 1 and opponent_dead == 0:
return -player
if player_dead == 1 and opponent_dead == 1:
return 1e-4
def execute_move(self, move: Tuple[int, int], player: int) -> np.ndarray:
(x, y) = move
# multiplying by player so we just operate on everything above 0
pieces = np.copy(self.pieces)
board = np.copy(self.pieces[:, :, BOARDLAYER])
snakes = np.copy(player*self.pieces[:, :, SNAKELAYER])
# get the current value for what should be the head (turns remaining + health)
head_value = np.amax(snakes)
# decrease all positions by 1 turn remaining and 1 health
snakes = np.subtract(snakes, (1 << SNAKEOFFSETMOVES) + (1 << SNAKEOFFSETHEALTH),
where=snakes > 0)
# zero out any columns the snake is no longer in
# print("snakes")
# print(snakes)
snakes = np.where(np.bitwise_and(snakes,
SNAKEMASKMOVESREMAINING) != 0, snakes, 0)
# print("snakes after")
# print(snakes)
# check collisions
# TODO: make this engine tell you that you lost if the opponent could have moved into this square in one turn
# this is a crude approach to dealing with the simultaneousnous of the game.
# should also make it say you won if the opponent was forced to move into this square on their next turn
# and are shorter than you.
if snakes[x, y] != 0:
# print("player", player, "died on square", x, y)
snakes = np.add(snakes, SNAKEMASKDEAD,
where=snakes > 0)
pieces[:, :, BOARDLAYER] = board
pieces[:, :, SNAKELAYER] = player*snakes
return pieces
# add new head now that we've checked for collisions. include health loss.
snakes[x, y] = head_value - (1 << SNAKEOFFSETHEALTH)
got_snack = (board[x, y] & BOARDMASKSNACK) == 1
# print("snack check", board[x, y],
# BOARDMASKSNACK, (board[x, y] | BOARDMASKSNACK))
health = snakes[snakes > 0][0] >> SNAKEOFFSETHEALTH
# if no snacks on our head, check if we died then return either way. rest of the function is dealing with removing snack.
if not got_snack:
if health == 0:
snakes = np.add(snakes, 1 << SNAKEOFFSETDEAD, where=snakes > 0)
pieces[:, :, BOARDLAYER] = board
pieces[:, :, SNAKELAYER] = player*snakes
return pieces
# remove snack
# print("got snack", x, y)
board[x, y] = board[x, y] - (1 << BOARDOFFSETSNACK)
# get amount to add to get back to 100 (quicker than two bit masks i think)
health_delta = MAXHEALTH - health
# add the health delta and 1 to the turnsremaining bits since it will now persist for an extra move
# print("snakes before add health")
# print(snakes)
# snakes = np.add(snakes, (health_delta << SNAKEOFFSETHEALTH) + 1,
# where=snakes > 0)
me = np.where(np.bitwise_and(snakes,
SNAKEMASKMOVESREMAINING) != 0, snakes, 0)
me = np.where(me > 0, (health_delta << SNAKEOFFSETHEALTH) + 1, 0)
snakes = snakes + me
# print("snakes after add health")
# print(snakes)
pieces[:, :, BOARDLAYER] = board
pieces[:, :, SNAKELAYER] = player*snakes
return pieces
# def check_deaths(self):
# for snake in self.snakes:
# for other_snake in self.snakes:
# for other_snake_piece in other_snake.body:
# # make sure it's not you and your own head
# if snake.id == other_snake.id and other_snake_piece == snake.body[0]:
# continue
# (x, y) = other_snake_piece
# if x < 0 or y < 0 or x >= self.x or y >= self.y:
# snake.died_turn = self.turn
# snake.died_reason = "out of bounds"
# if snake.body[0] == other_snake_piece:
# snake.died_turn = self.turn
# snake.died_reason = "collided with snake"
def to_string(self):
return "{}:{}:{}:{}:{}".format(self.x, self.y, self.snacks, self.hazards,
["[{}:{}:{}:{}]".format(snake.body, snake.health, snake.died_turn, snake.died_reason) for snake in self.snakes])
def pretty(self):
# pieces = np.copy(self.pieces)
# all_turns_remaining = np.zeros((self.x, self.y))
# all_heads = np.zeros((self.x, self.y))
# for snake in range(self.number_snakes):
# turns_remaining_layer = get_layer(snake, SNAKELAYERTURNSREMAINING)
# all_turns_remaining = all_turns_remaining + \
# pieces[:, :, turns_remaining_layer]
# heads_layer = get_layer(snake, SNAKELAYERHEAD)
# all_heads = all_heads+pieces[:, :, heads_layer]
# dead_layer = get_layer(snake, SNAKELAYERDEAD)
# print(f"snake {snake} dead: {pieces[0, 0, dead_layer]}")
# print("turns")
# print(all_turns_remaining)
# print("heads")
# print(all_heads)
# print("food")
pieces = np.copy(self.pieces)
print("raw snake")
print(pieces[:, :, SNAKELAYER])
print("raw board")
print(pieces[:, :, BOARDLAYER])
print("snacks")
print(np.bitwise_and(pieces[:, :, BOARDLAYER], BOARDMASKSNACK))
print("snakes")
print(np.bitwise_and(
np.absolute(pieces[:, :, SNAKELAYER]), SNAKEMASKMOVESREMAINING) * np.where(pieces[:, :, SNAKELAYER] > 0, 1, -1))
print("health")
print(np.right_shift(np.bitwise_and(
np.absolute(pieces[:, :, SNAKELAYER]), SNAKEMASKHEALTH), SNAKEOFFSETHEALTH) * np.where(pieces[:, :, SNAKELAYER] > 0, 1, -1))
print("dead")
print(np.right_shift(np.bitwise_and(
np.absolute(pieces[:, :, SNAKELAYER]), SNAKEMASKDEAD), SNAKEOFFSETDEAD) * np.where(pieces[:, :, SNAKELAYER] > 0, 1, -1))
# def execute_move(self, move: Tuple[int, int], player: int) -> np.ndarray:
# (x, y) = move
# # multiplying by player so everything is positive relative to the current player
# pieces = np.copy(player*self.pieces)
# snacks = np.copy(player*self.pieces[:, :, SNACKLAYER])
# snakes = np.copy(player*self.pieces[:, :, SNAKELAYER])
# # # calculate layer values once for efficiency
# # body_layer = get_layer(player, SNAKELAYERBODY)
# # decrement all turns remaining by one
# # TODO:check if reassigning this variable slows us down
# # turns_remaining_layer = get_layer(player, SNAKELAYERTURNSREMAINING)
# # turns_remaining_layer_opponent = get_layer(
# # -1*player, SNAKELAYERTURNSREMAINING)
# # turns_remaining = snakes[:, :, turns_remaining_layer]
# # get the current value for what should be the head (turns remaining + health)
# head_value = np.amax(snakes)
# # decrease all positions by 1 turn remaining and 1 health
# # then set anything with 0 steps remaining to zero to clear out other snake data
# # TODO: figure out the most efficient way to do this.
# # if player == 1:
# snakes = np.subtract(snakes, 1 + 1 << SNAKEOFFSETHEALTH,
# where=snakes > 0)
# zeroer = np.where(np.bitwise_or(snakes,
# SNAKEMASKMOVESREMAINING) != 0, 1, 0)
# # else:
# # snakes = np.add(snakes, 1,
# # where=snakes < 0)
# # zeroer = np.where(
# # np.bitwise_or((-1*snakes), SNAKEMASKMOVESREMAINING) != 0, 1, 0)
# snakes = zeroer * snakes
# # # set new head
# # snakes[x, y, turns_remaining_layer] = max
# # # TODO: check if we're on food now and increment the minimum by one if so
# # # update body
# # body_layer = get_layer(player, SNAKELAYERBODY)
# # head_layer = get_layer(player, SNAKELAYERHEAD)
# # OR with current head location since we haven't moved it yet
# # snakes[:, :, body_layer] = np.logical_or(
# # snakes[:, :, body_layer], snakes[:, :, head_layer])
# # # AND with turns remaining to remove old tail
# # snakes[:, :, body_layer] = np.logical_and(
# # snakes[:, :, body_layer], snakes[:, :, turns_remaining_layer])
# # # move head to new location
# # snakes[:, :, head_layer] = np.zeros((self.x, self.y))
# # snakes[x, y, head_layer] = 1
# # check collisions
# # TODO: make this engine tell you that you lost if the opponent could have moved into this square in one turn
# # this is a crude approach to dealing with the simultaneousnous of the game.
# # should also make it say you won if the opponent was forced to move into this square on their next turn
# # and are shorter than you.
# if snakes[x, y] != 0:
# # max = np.amax(turns_remaining)
# # if we ran into someone, set our deadbit
# # if player == 1:
# snakes = np.add(snakes, SNAKEMASKDEAD,
# where=snakes > 0)
# # else:
# # snakes = np.subtract(snakes, SNAKEMASKDEAD,
# # where=snakes < 0)
# # snakes[:, :, RESULTLAYER] = -player
# # can stop here since we ded
# pieces[:, :, SNAKELAYER] = player*snakes
# return pieces
# # decrement health
# # health_layer = get_layer(player, SNAKELAYERHEALTH)
# # if player == 1:
# # snakes = np.subtract(snakes, 1*SNAKEOFFSETHEALTH,
# # where=snakes > 0)
# # print(health)
# # print(head_value)
# # add new head now that we've checked for collisions
# snakes[x, y] = head_value
# # else:
# # snakes = np.add(snakes, 1*SNAKEOFFSETHEALTH,
# # where=snakes < 0)
# # if our head is now on a snack, update health to MAXHEALTH, remove snack, and give our tail an extra piece
# # eaten_snacks = np.logical_and(
# # snakes[:, :, head_layer], snakes[:, :, SNACKLAYER])
# # snacks = np.copy(self.snakes[:, :, SNACKLAYER])
# got_snack = snacks[x, y] == 1
# health = snakes[snakes > 0][0] >> SNAKEOFFSETHEALTH
# # if no snacks on our head, check if we died then return either way. rest of the function is dealing with removing snack.
# if not got_snack:
# # if player == 1:
# # else:
# # health = -snakes[snakes < 0][0]
# if health == 0:
# # check if opponent is on 1 health. since we are taking turns this most likely means it's a draw.
# # (ie they will die on the next move)
# # another crude adaptation for simultaneous play that doesn't capture the chance they could get health on that turn.
# # incredibly rare that in real play two players will starve out at the same time with even a small amount of randomness.
# # health_layer_opponent = get_layer(player, SNAKELAYERHEALTH)
# # if snakes[0, 0, health_layer_opponent] is 1:
# # snakes[:, :, RESULTLAYER] = 1e-4
# # return snakes
# # zeroer is the shape of the current player
# snakes = snakes + zeroer*SNAKEMASKDEAD
# pieces[:, :, SNAKELAYER] = player*snakes
# return pieces
# # remove snack
# snacks[x, y] = 0
# # get amount to add to get back to 100 (quicker than two bit masks i think)
# health_delta = MAXHEALTH - health
# # add the health delta and 1 to the turnsremaining bits since it will now persist for an extra move
# snakes = np.add(snakes, health_delta >> SNAKEOFFSETHEALTH + 1,
# where=snakes > 0)
# # # make health MAXHEALTH
# # snakes[:, :, health_layer] = MAXHEALTH
# # # add 1 to our tail
# # turns_remaining = snakes[:, :, turns_remaining_layer]
# # tail_location = np.argmin(ma.masked_where(
# # turns_remaining == 0, turns_remaining))
# # snakes[tail_location % self.x, int(
# # tail_location/self.x), turns_remaining_layer] += 1
# pieces[:, :, SNACKLAYER] = snacks
# pieces[:, :, SNAKELAYER] = player*snakes
# return pieces
| [
"numpy.copy",
"random.uniform",
"numpy.add",
"numpy.where",
"numpy.absolute",
"numpy.subtract",
"numpy.bitwise_and",
"numpy.zeros",
"numpy.amax"
] | [((1681, 1736), 'numpy.zeros', 'np.zeros', (['(self.x, self.y, TOTALLAYERS)'], {'dtype': 'np.int32'}), '((self.x, self.y, TOTALLAYERS), dtype=np.int32)\n', (1689, 1736), True, 'import numpy as np\n'), ((2913, 2933), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (2927, 2933), False, 'import random\n'), ((3678, 3716), 'numpy.copy', 'np.copy', (['self.pieces[:, :, SNAKELAYER]'], {}), '(self.pieces[:, :, SNAKELAYER])\n', (3685, 3716), True, 'import numpy as np\n'), ((5087, 5107), 'numpy.copy', 'np.copy', (['self.pieces'], {}), '(self.pieces)\n', (5094, 5107), True, 'import numpy as np\n'), ((5124, 5162), 'numpy.copy', 'np.copy', (['self.pieces[:, :, BOARDLAYER]'], {}), '(self.pieces[:, :, BOARDLAYER])\n', (5131, 5162), True, 'import numpy as np\n'), ((5180, 5227), 'numpy.copy', 'np.copy', (['(player * self.pieces[:, :, SNAKELAYER])'], {}), '(player * self.pieces[:, :, SNAKELAYER])\n', (5187, 5227), True, 'import numpy as np\n'), ((5335, 5350), 'numpy.amax', 'np.amax', (['snakes'], {}), '(snakes)\n', (5342, 5350), True, 'import numpy as np\n'), ((5435, 5528), 'numpy.subtract', 'np.subtract', (['snakes', '((1 << SNAKEOFFSETMOVES) + (1 << SNAKEOFFSETHEALTH))'], {'where': '(snakes > 0)'}), '(snakes, (1 << SNAKEOFFSETMOVES) + (1 << SNAKEOFFSETHEALTH),\n where=snakes > 0)\n', (5446, 5528), True, 'import numpy as np\n'), ((7982, 8042), 'numpy.where', 'np.where', (['(me > 0)', '((health_delta << SNAKEOFFSETHEALTH) + 1)', '(0)'], {}), '(me > 0, (health_delta << SNAKEOFFSETHEALTH) + 1, 0)\n', (7990, 8042), True, 'import numpy as np\n'), ((10125, 10145), 'numpy.copy', 'np.copy', (['self.pieces'], {}), '(self.pieces)\n', (10132, 10145), True, 'import numpy as np\n'), ((6343, 6390), 'numpy.add', 'np.add', (['snakes', 'SNAKEMASKDEAD'], {'where': '(snakes > 0)'}), '(snakes, SNAKEMASKDEAD, where=snakes > 0)\n', (6349, 6390), True, 'import numpy as np\n'), ((10319, 10375), 'numpy.bitwise_and', 'np.bitwise_and', (['pieces[:, :, BOARDLAYER]', 'BOARDMASKSNACK'], {}), '(pieces[:, :, BOARDLAYER], BOARDMASKSNACK)\n', (10333, 10375), True, 'import numpy as np\n'), ((2240, 2254), 'numpy.amax', 'np.amax', (['board'], {}), '(board)\n', (2247, 2254), True, 'import numpy as np\n'), ((5687, 5734), 'numpy.bitwise_and', 'np.bitwise_and', (['snakes', 'SNAKEMASKMOVESREMAINING'], {}), '(snakes, SNAKEMASKMOVESREMAINING)\n', (5701, 5734), True, 'import numpy as np\n'), ((7123, 7177), 'numpy.add', 'np.add', (['snakes', '(1 << SNAKEOFFSETDEAD)'], {'where': '(snakes > 0)'}), '(snakes, 1 << SNAKEOFFSETDEAD, where=snakes > 0)\n', (7129, 7177), True, 'import numpy as np\n'), ((7867, 7914), 'numpy.bitwise_and', 'np.bitwise_and', (['snakes', 'SNAKEMASKMOVESREMAINING'], {}), '(snakes, SNAKEMASKMOVESREMAINING)\n', (7881, 7914), True, 'import numpy as np\n'), ((10509, 10554), 'numpy.where', 'np.where', (['(pieces[:, :, SNAKELAYER] > 0)', '(1)', '(-1)'], {}), '(pieces[:, :, SNAKELAYER] > 0, 1, -1)\n', (10517, 10554), True, 'import numpy as np\n'), ((10716, 10761), 'numpy.where', 'np.where', (['(pieces[:, :, SNAKELAYER] > 0)', '(1)', '(-1)'], {}), '(pieces[:, :, SNAKELAYER] > 0, 1, -1)\n', (10724, 10761), True, 'import numpy as np\n'), ((10917, 10962), 'numpy.where', 'np.where', (['(pieces[:, :, SNAKELAYER] > 0)', '(1)', '(-1)'], {}), '(pieces[:, :, SNAKELAYER] > 0, 1, -1)\n', (10925, 10962), True, 'import numpy as np\n'), ((3094, 3155), 'numpy.bitwise_and', 'np.bitwise_and', (['self.pieces[:, :, BOARDLAYER]', 'BOARDMASKSNACK'], {}), '(self.pieces[:, :, BOARDLAYER], BOARDMASKSNACK)\n', (3108, 3155), True, 'import numpy as np\n'), ((10443, 10480), 'numpy.absolute', 'np.absolute', (['pieces[:, :, SNAKELAYER]'], {}), '(pieces[:, :, SNAKELAYER])\n', (10454, 10480), True, 'import numpy as np\n'), ((10638, 10675), 'numpy.absolute', 'np.absolute', (['pieces[:, :, SNAKELAYER]'], {}), '(pieces[:, :, SNAKELAYER])\n', (10649, 10675), True, 'import numpy as np\n'), ((10843, 10880), 'numpy.absolute', 'np.absolute', (['pieces[:, :, SNAKELAYER]'], {}), '(pieces[:, :, SNAKELAYER])\n', (10854, 10880), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.lines as lines
import matplotlib.text as text
import matplotlib.patches as patches
import argparse
import sys
from matplotlib.backends.backend_pdf import PdfPages
from emma.processing.dsp import butter_filter
from common import *
def delete_border(axis):
axis.spines['top'].set_visible(False)
axis.spines['right'].set_visible(False)
axis.spines['bottom'].set_visible(False)
axis.spines['left'].set_visible(False)
def print_des_indices(meta):
for i, m in enumerate(meta):
if m["op"] == "des_openssl":
print(i)
def draw_box(axis, start, end, message):
fontprop = matplotlib.font_manager.FontProperties(family="Linux Libertine", size=10)
# Create a Rectangle patch
offset = 20
box = patches.Rectangle((start, offset), end-start, 512-(2*offset), linewidth=2, edgecolor='black', facecolor='none')
axis.add_patch(box)
description = text.Text(end - (end-start)//2, 90, message, ha='center', va='bottom', axes=axis, fontproperties=fontprop)
axis.add_artist(description)
def generate_graph(spike_path, des_path, spike_index, des_index):
# Setup
plt.rcParams.update({
'font.size': 14,
'font.sans-serif': ["Linux Libertine"],
})
fs = 56.0e6
bins_to_draw = 512
vmin = 0.000000000000001
vmax = 0.000000001
fft_size = 512
overlap = 0.90
colormap = 'plasma'
samples_per_bin = fft_size - int(fft_size * overlap)
fig, (spike_ax, des_ax) = plt.subplots(1, 2, figsize=(8, 3), dpi=300)
# DES with spike plot
spike_arch = np.load(spike_path, allow_pickle=True)[spike_index]
spike_offset = 14
print(len(spike_arch))
print(get_stft(spike_arch, show_plot=False).shape[1]*samples_per_bin)
spike_arch = get_stft(spike_arch, show_plot=False, fft_size=fft_size, overlap_rate=overlap)[:, spike_offset:spike_offset+bins_to_draw]
spike_len = spike_arch.shape[1]
spike_arch = np.fft.fftshift(spike_arch, axes=0)
n = matplotlib.colors.LogNorm(vmin=vmin, vmax=vmax, clip=False)
spike_ax.imshow(spike_arch, norm=n, interpolation='bicubic', aspect='auto', cmap=colormap)
# Fix y axis
freqs = np.fft.fftshift(np.fft.fftfreq(512, d=1.0/fs))
spike_ax.set_yticks([0, 128, 256, 384, 512])
freqs = list(freqs)
freqs.append(fs/2)
spike_ax.set_yticklabels([int(freqs[x] / 1e6) for x in spike_ax.get_yticks()])
# Fix x axis
spike_ax.set_xlim(0, spike_len)
spike_ax.set_xticks([x / samples_per_bin for x in [0, 5000, 10000, 15000, 20000, 25000]])
spike_ax.set_xticklabels([f"{int(x*samples_per_bin):,d}" for x in spike_ax.get_xticks()])
spike_ax.set_xlabel("Time (samples)")
spike_ax.set_ylabel("Frequency (MHz)")
spike_ax.set_title("OpenSSL DES (occluded)")
# Draw box
draw_box(spike_ax, 102, 232, "")
# -----------
# DES plot
des_arch = np.load(des_path, allow_pickle=True)[des_index]
des_arch = get_stft(des_arch, show_plot=False, fft_size=fft_size, overlap_rate=overlap)[:, 0:bins_to_draw]
des_len = des_arch.shape[1]
des_arch = np.fft.fftshift(des_arch, axes=0)
n = matplotlib.colors.LogNorm(vmin=vmin, vmax=vmax, clip=False)
des_ax.imshow(des_arch, norm=n, interpolation='bicubic', aspect='auto', cmap=colormap)
# Fix y axis
freqs = np.fft.fftshift(np.fft.fftfreq(512, d=1.0/fs))
des_ax.set_yticks([0, 128, 256, 384, 512])
freqs = list(freqs)
freqs.append(fs/2)
des_ax.set_yticklabels([int(freqs[x] / 1e6) for x in des_ax.get_yticks()])
# Fix x axis
des_ax.set_xlim(0, des_len)
des_ax.set_xticks([x / samples_per_bin for x in [0, 5000, 10000, 15000, 20000, 25000]])
des_ax.set_xticklabels([f"{int(x*samples_per_bin):,d}" for x in des_ax.get_xticks()])
des_ax.set_xlabel("Time (samples)")
des_ax.set_ylabel("Frequency (MHz)")
des_ax.set_title("OpenSSL DES")
# Draw box
draw_box(des_ax, 96, 174, "")
# Render
plt.tight_layout()
with PdfPages('desspikeexample.pdf') as pdf:
pdf.savefig()
parser = argparse.ArgumentParser(description="Draw DES problem figure.")
args = parser.parse_args()
args.spike_path = "./datasets/nodemcu-random-train2/2020-02-17_11-21-00_296506_traces.npy"
args.des_path = "./datasets/nodemcu-random-train2/2020-02-17_11-21-00_296506_traces.npy"
spike_meta = load_meta(args.spike_path.replace("_traces.npy", "_meta.p"))
print_des_indices(spike_meta)
generate_graph(args.spike_path, args.des_path, spike_index=10, des_index=14)
| [
"matplotlib.patches.Rectangle",
"argparse.ArgumentParser",
"matplotlib.font_manager.FontProperties",
"numpy.fft.fftfreq",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.tight_layout",
"matplotlib.backends.backend_pdf.PdfPages",
"numpy.fft.fftshift",
"numpy.lo... | [((4107, 4170), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Draw DES problem figure."""'}), "(description='Draw DES problem figure.')\n", (4130, 4170), False, 'import argparse\n'), ((698, 771), 'matplotlib.font_manager.FontProperties', 'matplotlib.font_manager.FontProperties', ([], {'family': '"""Linux Libertine"""', 'size': '(10)'}), "(family='Linux Libertine', size=10)\n", (736, 771), False, 'import matplotlib\n'), ((830, 950), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['(start, offset)', '(end - start)', '(512 - 2 * offset)'], {'linewidth': '(2)', 'edgecolor': '"""black"""', 'facecolor': '"""none"""'}), "((start, offset), end - start, 512 - 2 * offset, linewidth\n =2, edgecolor='black', facecolor='none')\n", (847, 950), True, 'import matplotlib.patches as patches\n'), ((985, 1099), 'matplotlib.text.Text', 'text.Text', (['(end - (end - start) // 2)', '(90)', 'message'], {'ha': '"""center"""', 'va': '"""bottom"""', 'axes': 'axis', 'fontproperties': 'fontprop'}), "(end - (end - start) // 2, 90, message, ha='center', va='bottom',\n axes=axis, fontproperties=fontprop)\n", (994, 1099), True, 'import matplotlib.text as text\n'), ((1209, 1287), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 14, 'font.sans-serif': ['Linux Libertine']}"], {}), "({'font.size': 14, 'font.sans-serif': ['Linux Libertine']})\n", (1228, 1287), True, 'import matplotlib.pyplot as plt\n'), ((1551, 1594), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(8, 3)', 'dpi': '(300)'}), '(1, 2, figsize=(8, 3), dpi=300)\n', (1563, 1594), True, 'import matplotlib.pyplot as plt\n'), ((2006, 2041), 'numpy.fft.fftshift', 'np.fft.fftshift', (['spike_arch'], {'axes': '(0)'}), '(spike_arch, axes=0)\n', (2021, 2041), True, 'import numpy as np\n'), ((2050, 2109), 'matplotlib.colors.LogNorm', 'matplotlib.colors.LogNorm', ([], {'vmin': 'vmin', 'vmax': 'vmax', 'clip': '(False)'}), '(vmin=vmin, vmax=vmax, clip=False)\n', (2075, 2109), False, 'import matplotlib\n'), ((3146, 3179), 'numpy.fft.fftshift', 'np.fft.fftshift', (['des_arch'], {'axes': '(0)'}), '(des_arch, axes=0)\n', (3161, 3179), True, 'import numpy as np\n'), ((3188, 3247), 'matplotlib.colors.LogNorm', 'matplotlib.colors.LogNorm', ([], {'vmin': 'vmin', 'vmax': 'vmax', 'clip': '(False)'}), '(vmin=vmin, vmax=vmax, clip=False)\n', (3213, 3247), False, 'import matplotlib\n'), ((4006, 4024), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4022, 4024), True, 'import matplotlib.pyplot as plt\n'), ((1639, 1677), 'numpy.load', 'np.load', (['spike_path'], {'allow_pickle': '(True)'}), '(spike_path, allow_pickle=True)\n', (1646, 1677), True, 'import numpy as np\n'), ((2251, 2282), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['(512)'], {'d': '(1.0 / fs)'}), '(512, d=1.0 / fs)\n', (2265, 2282), True, 'import numpy as np\n'), ((2940, 2976), 'numpy.load', 'np.load', (['des_path'], {'allow_pickle': '(True)'}), '(des_path, allow_pickle=True)\n', (2947, 2976), True, 'import numpy as np\n'), ((3385, 3416), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['(512)'], {'d': '(1.0 / fs)'}), '(512, d=1.0 / fs)\n', (3399, 3416), True, 'import numpy as np\n'), ((4034, 4065), 'matplotlib.backends.backend_pdf.PdfPages', 'PdfPages', (['"""desspikeexample.pdf"""'], {}), "('desspikeexample.pdf')\n", (4042, 4065), False, 'from matplotlib.backends.backend_pdf import PdfPages\n')] |
from PIL import ImageDraw, Image
import numpy as np
import hashlib
import random
# array_list = [1]
background_color = '#F2F1F2'
colors = ['#CD00CD', 'Red', 'Orange', "#66FF00", "#2A52BE"]
def generate_array(bytes):
## Generate array
for i in range(100):
# Array 6 * 12
need_array = np.array([bit == '1' for byte in bytes[3:3 + 9] for bit in bin(byte)[2:].zfill(8)]).reshape(6, 12)
# Get full array 12 * 12
need_array = np.concatenate((need_array, need_array[::-1]), axis=0)
for i in range(12):
need_array[0, i] = 0
need_array[11, i] = 0
need_array[i, 0] = 0
need_array[i, 11] = 0
return need_array
def generate_pyxies(pyxie_size: int, s: str) -> None:
bytes = hashlib.md5(s.encode('utf-8')).digest()
need_color = generate_array(bytes)
## Draw image
img_size = (pyxie_size, pyxie_size)
block_size = pyxie_size // 12 # Size
img = Image.new('RGB', img_size, background_color)
draw = ImageDraw.Draw(img)
for x in range(pyxie_size):
for y in range(pyxie_size):
need_to_paint = need_color[x // block_size, y // block_size]
if need_to_paint:
draw.point((x, y), random.choice(colors))
format = 'jpeg'
path = f'CryptoPyxie_{s}.{format}'
img.save(path, format)
if __name__ == "__main__":
# argv[1] - size of pictures (pixels)
# argv[2] - int - hash generate
from sys import argv
cryptopyxie_size = int(argv[1])
cryptopyxie_name = argv[2]
cycle = generate_pyxies(int(cryptopyxie_size // 12 * 12), cryptopyxie_name) | [
"PIL.Image.new",
"PIL.ImageDraw.Draw",
"random.choice",
"numpy.concatenate"
] | [((974, 1018), 'PIL.Image.new', 'Image.new', (['"""RGB"""', 'img_size', 'background_color'], {}), "('RGB', img_size, background_color)\n", (983, 1018), False, 'from PIL import ImageDraw, Image\n'), ((1030, 1049), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img'], {}), '(img)\n', (1044, 1049), False, 'from PIL import ImageDraw, Image\n'), ((468, 522), 'numpy.concatenate', 'np.concatenate', (['(need_array, need_array[::-1])'], {'axis': '(0)'}), '((need_array, need_array[::-1]), axis=0)\n', (482, 522), True, 'import numpy as np\n'), ((1257, 1278), 'random.choice', 'random.choice', (['colors'], {}), '(colors)\n', (1270, 1278), False, 'import random\n')] |
import numpy as np
import scipy.signal
__all__ = ['instant_parameters']
#-----------------------------------
def instant_parameters(signal, fs = None):
'''
Instant parameters estimation:
..math::
analitc_signal = hilbert(signal)
envelope = |analitc_signal|
phase = unwrap(angle(analitc_signal))
frequency = diff(phase)
Paramteres
-------------
signal: 1d ndarray,
input signal (can be real or complex);
fs: float or None,
sampling frequecny, fs = signal.size, if None
Return
-------------
frequency: 1d ndarray,
instant frequency to time relation.
envelope: 1d ndarray,
instant envelope to time relation.
phase: 1d ndarray,
instant pahse to time relation.
'''
if fs is None:
fs = len(signal)
signal = np.asarray(signal)
if signal.dtype != complex:
analytic = scipy.signal.hilbert(signal)
else:
analytic = signal
envelope = np.abs(analytic)
angles = np.angle(analytic)
phase = np.unwrap(angles)
frequency = np.concatenate((np.diff(angles),
[angles[-2] - angles[-1]]))
for i in range(frequency.size):
if frequency[i]< 0:
if i>0: frequency[i] = frequency[i-1]
else: frequency[i] = frequency[i+1]
frequency = frequency*fs/(2*np.pi)
return frequency, envelope, phase
| [
"numpy.abs",
"numpy.unwrap",
"numpy.asarray",
"numpy.diff",
"numpy.angle"
] | [((867, 885), 'numpy.asarray', 'np.asarray', (['signal'], {}), '(signal)\n', (877, 885), True, 'import numpy as np\n'), ((1023, 1039), 'numpy.abs', 'np.abs', (['analytic'], {}), '(analytic)\n', (1029, 1039), True, 'import numpy as np\n'), ((1055, 1073), 'numpy.angle', 'np.angle', (['analytic'], {}), '(analytic)\n', (1063, 1073), True, 'import numpy as np\n'), ((1089, 1106), 'numpy.unwrap', 'np.unwrap', (['angles'], {}), '(angles)\n', (1098, 1106), True, 'import numpy as np\n'), ((1140, 1155), 'numpy.diff', 'np.diff', (['angles'], {}), '(angles)\n', (1147, 1155), True, 'import numpy as np\n')] |
#Start up torch dist package
import torch
import torch.distributed as dist
dist.init_process_group(backend='mpi')
#Load classes for simulations and controls
from brownian_fts import BrownianParticle
import numpy as np
#Starting and ending configuration.
start = torch.tensor([[-1.0]])
end = torch.tensor([[1.0]])
def initializer(s):
return (1-s)*start+s*end
alphas = torch.linspace(0.0,1,dist.get_world_size()+2)[1:-1]
bp_simulator = BrownianParticle(dt=2e-3,gamma=1.0, kT = 0.4, initial=initializer(alphas[dist.get_rank()]),prefix='test',save_config=False)
#Generate data for validation test
#For this 1D brownian example case, the TSE is generated by the middle replica.
#Note that This is assuming that you run an odd number of MPI processes
data = np.loadtxt('test_bp_{}.txt'.format(int((dist.get_world_size()+1)/2)))
#If I run 10 processes, that's 10*10 = 100 initial configurations!
num_configs = 10
trials = 500
validation_io = open("test_validation_{}.txt".format(dist.get_rank()+1),"w")
import tqdm
#For loop over initial states
if dist.get_rank() == 0:
print("Ready to generate validation test!".format(dist.get_rank()+1))
for i in range(num_configs):
counts = []
initial_config = torch.from_numpy(np.array([[np.random.choice(data[:,0])]]).astype(np.float32)).detach().clone()
for j in tqdm.tqdm(range(trials)):
hitting = False
bp_simulator.qt = initial_config.detach().clone()
#Run simulation and stop until it falls into the product or reactant state
while hitting is False:
bp_simulator.runUnbiased()
if np.abs(bp_simulator.qt.item()) >= 1.0:
if bp_simulator.qt.item() < 0:
counts.append(0.0)
elif bp_simulator.qt.item() > 0:
counts.append(1.0)
hitting = True
#Compute the committor after a certain number of trials
counts = np.array(counts)
mean_count = np.mean(counts)
conf_count = 1.96*np.std(counts)/len(counts)**0.5 #do 95 % confidence interval
#Save into io
if validation_io is not None:
validation_io.write('{} {} {} \n'.format(mean_count, conf_count,initial_config.item()))
validation_io.flush()
| [
"numpy.mean",
"numpy.random.choice",
"numpy.std",
"torch.tensor",
"numpy.array",
"torch.distributed.get_rank",
"torch.distributed.init_process_group",
"torch.distributed.get_world_size"
] | [((75, 113), 'torch.distributed.init_process_group', 'dist.init_process_group', ([], {'backend': '"""mpi"""'}), "(backend='mpi')\n", (98, 113), True, 'import torch.distributed as dist\n'), ((264, 286), 'torch.tensor', 'torch.tensor', (['[[-1.0]]'], {}), '([[-1.0]])\n', (276, 286), False, 'import torch\n'), ((293, 314), 'torch.tensor', 'torch.tensor', (['[[1.0]]'], {}), '([[1.0]])\n', (305, 314), False, 'import torch\n'), ((1050, 1065), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (1063, 1065), True, 'import torch.distributed as dist\n'), ((1919, 1935), 'numpy.array', 'np.array', (['counts'], {}), '(counts)\n', (1927, 1935), True, 'import numpy as np\n'), ((1953, 1968), 'numpy.mean', 'np.mean', (['counts'], {}), '(counts)\n', (1960, 1968), True, 'import numpy as np\n'), ((394, 415), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (413, 415), True, 'import torch.distributed as dist\n'), ((981, 996), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (994, 996), True, 'import torch.distributed as dist\n'), ((1992, 2006), 'numpy.std', 'np.std', (['counts'], {}), '(counts)\n', (1998, 2006), True, 'import numpy as np\n'), ((514, 529), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (527, 529), True, 'import torch.distributed as dist\n'), ((1126, 1141), 'torch.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (1139, 1141), True, 'import torch.distributed as dist\n'), ((800, 821), 'torch.distributed.get_world_size', 'dist.get_world_size', ([], {}), '()\n', (819, 821), True, 'import torch.distributed as dist\n'), ((1240, 1268), 'numpy.random.choice', 'np.random.choice', (['data[:, 0]'], {}), '(data[:, 0])\n', (1256, 1268), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Imports
import os
import pickle
import pandas as pd
from warnings import simplefilter
from model_funs import fasta_frame, ohe_fun, flatten_sequence
import numpy as np
from numpy import array
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow.python.util import deprecation
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv1D, Dense, MaxPooling1D, Flatten, Dropout
from keras.utils import to_categorical
from keras.callbacks import ModelCheckpoint
# Suppress warnings
deprecation._PRINT_DEPRECATION_WARNINGS = False
simplefilter(action = 'ignore', category = FutureWarning)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# Files
dataset = 'nadd_ml_df_train_set_00.01.csv'
model_wb = "model_nadd_cnn_wb.hdf5"
model_final = "model_nadd_cnn.hdf5"
model_history = "model_nadd_cnn_saved_history"
model_plot = 'model_nadd_cnn.png'
# Set seed for model reproducibility
SEED = 13
tf.compat.v1.random.set_random_seed(SEED)
np.random.seed(SEED)
# Load saved dataframe
seq_df = pd.read_csv(dataset)
# Transform sequences and labels
## Sequences
x_sequence_arrays = ohe_fun(seq_df)
x_flat_2d = flatten_sequence(x_sequence_arrays)
## Labels
y_str = seq_df['label']
lbenc = preprocessing.LabelBinarizer()
ynn = lbenc.fit_transform(y_str)
encoded = to_categorical(ynn)
# Split dataset in training and test sets
x_train, x_test, ynn_train, ynn_test = train_test_split(x_flat_2d,
encoded,
test_size = 0.20,
random_state = SEED,
stratify = y_str)
# Expand dimensions for deep learning model
x_train_3d = np.expand_dims(x_train, axis=2)
x_test_3d = np.expand_dims(x_test, axis=2)
# CNN model
model_cnn = Sequential()
# CNN layers
## Input and first hidden layer
model_cnn.add(Conv1D(filters=32,
kernel_size=8,
input_shape=(x_train.shape[1], 1),
activation='relu'))
model_cnn.add(MaxPooling1D(pool_size=4))
## Second hidden layer
model_cnn.add(Conv1D(filters=24,
kernel_size=8,
activation='relu'))
model_cnn.add(MaxPooling1D(pool_size=4))
## Third hidden layer
model_cnn.add(Conv1D(filters=24,
kernel_size=8,
activation='relu'))
model_cnn.add(MaxPooling1D(pool_size=4))
# Flatten data for dense layers
model_cnn.add(Flatten())
# Dense layers
## Fourth hidden layer
model_cnn.add(Dense(24, activation='relu'))
model_cnn.add(Dropout(0.4))
## Output layer
model_cnn.add(Dense(2, activation='softmax'))
# Compile model
model_cnn.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# Checkpoint best weights
filepath = model_wb
checkpoint = ModelCheckpoint(filepath,
monitor='val_acc',
verbose=1,
save_best_only=True,
mode='max')
callbacks_list = [checkpoint]
# Fit the model
history_cnn = model_cnn.fit(x_train_3d, ynn_train,
epochs = 15,
batch_size = 10,
verbose = 1,
callbacks = callbacks_list,
validation_split = 0.0,
validation_data = (x_test_3d,ynn_test),
shuffle = True)
# Save model
model_cnn.save(model_final)
# Save history
with open(model_history,"wb") as file_pi:
pickle.dump(history_cnn.history, file_pi)
# Plot model
tf.keras.utils.plot_model(model_cnn,
show_shapes = True,
show_layer_names = True,
to_file = model_plot)
| [
"pandas.read_csv",
"keras.utils.to_categorical",
"tensorflow.keras.utils.plot_model",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.MaxPooling1D",
"sklearn.preprocessing.LabelBinarizer",
"tensorflow.compat.v1.random.set_random_seed",
"numpy.random.seed",
"warnings.simplefilter",
"model... | [((679, 732), 'warnings.simplefilter', 'simplefilter', ([], {'action': '"""ignore"""', 'category': 'FutureWarning'}), "(action='ignore', category=FutureWarning)\n", (691, 732), False, 'from warnings import simplefilter\n'), ((1030, 1071), 'tensorflow.compat.v1.random.set_random_seed', 'tf.compat.v1.random.set_random_seed', (['SEED'], {}), '(SEED)\n', (1065, 1071), True, 'import tensorflow as tf\n'), ((1072, 1092), 'numpy.random.seed', 'np.random.seed', (['SEED'], {}), '(SEED)\n', (1086, 1092), True, 'import numpy as np\n'), ((1125, 1145), 'pandas.read_csv', 'pd.read_csv', (['dataset'], {}), '(dataset)\n', (1136, 1145), True, 'import pandas as pd\n'), ((1212, 1227), 'model_funs.ohe_fun', 'ohe_fun', (['seq_df'], {}), '(seq_df)\n', (1219, 1227), False, 'from model_funs import fasta_frame, ohe_fun, flatten_sequence\n'), ((1240, 1275), 'model_funs.flatten_sequence', 'flatten_sequence', (['x_sequence_arrays'], {}), '(x_sequence_arrays)\n', (1256, 1275), False, 'from model_funs import fasta_frame, ohe_fun, flatten_sequence\n'), ((1318, 1348), 'sklearn.preprocessing.LabelBinarizer', 'preprocessing.LabelBinarizer', ([], {}), '()\n', (1346, 1348), False, 'from sklearn import preprocessing\n'), ((1392, 1411), 'keras.utils.to_categorical', 'to_categorical', (['ynn'], {}), '(ynn)\n', (1406, 1411), False, 'from keras.utils import to_categorical\n'), ((1493, 1583), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x_flat_2d', 'encoded'], {'test_size': '(0.2)', 'random_state': 'SEED', 'stratify': 'y_str'}), '(x_flat_2d, encoded, test_size=0.2, random_state=SEED,\n stratify=y_str)\n', (1509, 1583), False, 'from sklearn.model_selection import train_test_split\n'), ((1868, 1899), 'numpy.expand_dims', 'np.expand_dims', (['x_train'], {'axis': '(2)'}), '(x_train, axis=2)\n', (1882, 1899), True, 'import numpy as np\n'), ((1912, 1942), 'numpy.expand_dims', 'np.expand_dims', (['x_test'], {'axis': '(2)'}), '(x_test, axis=2)\n', (1926, 1942), True, 'import numpy as np\n'), ((1967, 1979), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1977, 1979), False, 'from tensorflow.keras.models import Sequential\n'), ((3003, 3095), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['filepath'], {'monitor': '"""val_acc"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""max"""'}), "(filepath, monitor='val_acc', verbose=1, save_best_only=True,\n mode='max')\n", (3018, 3095), False, 'from keras.callbacks import ModelCheckpoint\n'), ((3803, 3905), 'tensorflow.keras.utils.plot_model', 'tf.keras.utils.plot_model', (['model_cnn'], {'show_shapes': '(True)', 'show_layer_names': '(True)', 'to_file': 'model_plot'}), '(model_cnn, show_shapes=True, show_layer_names=\n True, to_file=model_plot)\n', (3828, 3905), True, 'import tensorflow as tf\n'), ((2039, 2130), 'tensorflow.keras.layers.Conv1D', 'Conv1D', ([], {'filters': '(32)', 'kernel_size': '(8)', 'input_shape': '(x_train.shape[1], 1)', 'activation': '"""relu"""'}), "(filters=32, kernel_size=8, input_shape=(x_train.shape[1], 1),\n activation='relu')\n", (2045, 2130), False, 'from tensorflow.keras.layers import Conv1D, Dense, MaxPooling1D, Flatten, Dropout\n'), ((2202, 2227), 'tensorflow.keras.layers.MaxPooling1D', 'MaxPooling1D', ([], {'pool_size': '(4)'}), '(pool_size=4)\n', (2214, 2227), False, 'from tensorflow.keras.layers import Conv1D, Dense, MaxPooling1D, Flatten, Dropout\n'), ((2266, 2318), 'tensorflow.keras.layers.Conv1D', 'Conv1D', ([], {'filters': '(24)', 'kernel_size': '(8)', 'activation': '"""relu"""'}), "(filters=24, kernel_size=8, activation='relu')\n", (2272, 2318), False, 'from tensorflow.keras.layers import Conv1D, Dense, MaxPooling1D, Flatten, Dropout\n'), ((2374, 2399), 'tensorflow.keras.layers.MaxPooling1D', 'MaxPooling1D', ([], {'pool_size': '(4)'}), '(pool_size=4)\n', (2386, 2399), False, 'from tensorflow.keras.layers import Conv1D, Dense, MaxPooling1D, Flatten, Dropout\n'), ((2437, 2489), 'tensorflow.keras.layers.Conv1D', 'Conv1D', ([], {'filters': '(24)', 'kernel_size': '(8)', 'activation': '"""relu"""'}), "(filters=24, kernel_size=8, activation='relu')\n", (2443, 2489), False, 'from tensorflow.keras.layers import Conv1D, Dense, MaxPooling1D, Flatten, Dropout\n'), ((2545, 2570), 'tensorflow.keras.layers.MaxPooling1D', 'MaxPooling1D', ([], {'pool_size': '(4)'}), '(pool_size=4)\n', (2557, 2570), False, 'from tensorflow.keras.layers import Conv1D, Dense, MaxPooling1D, Flatten, Dropout\n'), ((2618, 2627), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (2625, 2627), False, 'from tensorflow.keras.layers import Conv1D, Dense, MaxPooling1D, Flatten, Dropout\n'), ((2681, 2709), 'tensorflow.keras.layers.Dense', 'Dense', (['(24)'], {'activation': '"""relu"""'}), "(24, activation='relu')\n", (2686, 2709), False, 'from tensorflow.keras.layers import Conv1D, Dense, MaxPooling1D, Flatten, Dropout\n'), ((2725, 2737), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.4)'], {}), '(0.4)\n', (2732, 2737), False, 'from tensorflow.keras.layers import Conv1D, Dense, MaxPooling1D, Flatten, Dropout\n'), ((2769, 2799), 'tensorflow.keras.layers.Dense', 'Dense', (['(2)'], {'activation': '"""softmax"""'}), "(2, activation='softmax')\n", (2774, 2799), False, 'from tensorflow.keras.layers import Conv1D, Dense, MaxPooling1D, Flatten, Dropout\n'), ((3748, 3789), 'pickle.dump', 'pickle.dump', (['history_cnn.history', 'file_pi'], {}), '(history_cnn.history, file_pi)\n', (3759, 3789), False, 'import pickle\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Program: Fit peaks with Lorentzian distribution
Version: 20201123
@author: <NAME> (GitHub: @pranabdas)
data = suv.fit_lorentz(x, y, a='', x0='', gamma='', xmin='', xmax='')
"""
def fit_lorentz(x, y, a='', x0='', gamma='', xmin='', xmax='', num=1000):
import numpy as np
from scipy import optimize
def lorentz(x, a, x0, gamma):
return a*gamma**2/(np.pi*gamma*((x - x0)**2 + gamma**2))
if xmin:
xmin = float(xmin)
xmin_id = np.where(abs(x-xmin)==min(abs(x-xmin)))[0][0]
x = x[xmin_id:]
y = y[xmin_id:]
else:
xmin = x[0]
if xmax:
xmax = float(xmax)
xmax_id = np.where(abs(x-xmax)==min(abs(x-xmax)))[0][0]
x = x[:xmax_id]
y = y[:xmax_id]
else:
xmax = x[-1]
if not a:
a = max(y)
if not x0:
x0 = x[np.where(y==max(y))][0]
if not gamma:
gamma = abs(x[-1] - x[0])/10
params, params_covariance = optimize.curve_fit(lorentz, x, y, p0=[a, x0, gamma])
print("a =", params[0],"\nx0 =", params[1], "\ngamma =", params[2], \
"\nFWHM =", 2 * params[2])
fit_x = np.linspace(xmin, xmax, num)
fit_y = lorentz(fit_x, params[0], params[1], params[2])
return fit_x, fit_y
| [
"scipy.optimize.curve_fit",
"numpy.linspace"
] | [((1003, 1055), 'scipy.optimize.curve_fit', 'optimize.curve_fit', (['lorentz', 'x', 'y'], {'p0': '[a, x0, gamma]'}), '(lorentz, x, y, p0=[a, x0, gamma])\n', (1021, 1055), False, 'from scipy import optimize\n'), ((1179, 1207), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', 'num'], {}), '(xmin, xmax, num)\n', (1190, 1207), True, 'import numpy as np\n')] |
from rdkit import Chem
from smdt.descriptors import AtomProperty
import numpy
import pandas as pd
def _CalculateGearyAutocorrelation(mol, lag=1, propertylabel='m'):
"""
**Internal used only**
Calculation of Geary autocorrelation descriptors based on
different property weights.
"""
Natom = mol.GetNumAtoms()
prolist = []
for i in mol.GetAtoms():
temp = AtomProperty.GetRelativeAtomicProperty(i.GetSymbol(), propertyname=propertylabel)
prolist.append(temp)
aveweight = sum(prolist) / Natom
tempp = [numpy.square(x - aveweight) for x in prolist]
GetDistanceMatrix = Chem.GetDistanceMatrix(mol)
res = 0.0
index = 0
for i in range(Natom):
for j in range(Natom):
if GetDistanceMatrix[i, j] == lag:
atom1 = mol.GetAtomWithIdx(i)
atom2 = mol.GetAtomWithIdx(j)
temp1 = AtomProperty.GetRelativeAtomicProperty(element=atom1.GetSymbol(), propertyname=propertylabel)
temp2 = AtomProperty.GetRelativeAtomicProperty(element=atom2.GetSymbol(), propertyname=propertylabel)
res = res + numpy.square(temp1 - temp2)
index = index + 1
else:
res = res + 0.0
if sum(tempp) == 0 or index == 0:
result = 0
else:
result = (res / index / 2) / (sum(tempp) / (Natom - 1))
return round(result, 3)
def CalculateGearyAutoMass(mol):
"""
Calculation of Geary autocorrelation descriptors based on
carbon-scaled atomic mass.
"""
res = {}
for i in range(8):
res['GATSm' + str(i + 1)] = _CalculateGearyAutocorrelation(mol, lag=i + 1, propertylabel='m')
return res
def CalculateGearyAutoVolume(mol):
"""
Calculation of Geary autocorrelation descriptors based on
carbon-scaled atomic van <NAME>als volume.
"""
res = {}
for i in range(8):
res['GATSv' + str(i + 1)] = _CalculateGearyAutocorrelation(mol, lag=i + 1, propertylabel='V')
return res
def CalculateGearyAutoElectronegativity(mol):
"""
Calculation of Geary autocorrelation descriptors based on
carbon-scaled atomic Sanderson electronegativity.
"""
res = {}
for i in range(8):
res['GATSe' + str(i + 1)] = _CalculateGearyAutocorrelation(mol, lag=i + 1, propertylabel='En')
return res
def CalculateGearyAutoPolarizability(mol):
"""
Calculation of Geary autocorrelation descriptors based on
carbon-scaled atomic polarizability.
"""
res = {}
for i in range(8):
res['GATSp' + str(i + 1)] = _CalculateGearyAutocorrelation(mol, lag=i + 1, propertylabel='alapha')
return res
def GetGearyAutoofMol(mol):
"""
Calcualate all Geary autocorrelation descriptors.
"""
res = {}
res.update(CalculateGearyAutoMass(mol))
res.update(CalculateGearyAutoVolume(mol))
res.update(CalculateGearyAutoElectronegativity(mol))
res.update(CalculateGearyAutoPolarizability(mol))
return res
def getGearyAuto(df_x):
"""
Calculates all Geary Auto-correlation descriptors for the dataset
Parameters:
df_x: pandas.DataFrame
SMILES DataFrame
Returns:
geary_descriptors: pandas.DataFrame
Geary Auto-correlation Descriptors DataFrame
"""
labels = []
for i in range(8):
labels.append('GATSm' + str(i + 1))
labels.append('GATSv' + str(i + 1))
labels.append('GATSe' + str(i + 1))
labels.append('GATSp' + str(i + 1))
r = {}
for key in labels:
r[key] = []
for m in df_x['SMILES']:
mol = Chem.MolFromSmiles(m)
res = GetGearyAutoofMol(mol)
for key in labels:
r[key].append(res[key])
geary_descriptors = pd.DataFrame(r).round(3)
return pd.DataFrame(geary_descriptors) | [
"rdkit.Chem.GetDistanceMatrix",
"rdkit.Chem.MolFromSmiles",
"pandas.DataFrame",
"numpy.square"
] | [((655, 682), 'rdkit.Chem.GetDistanceMatrix', 'Chem.GetDistanceMatrix', (['mol'], {}), '(mol)\n', (677, 682), False, 'from rdkit import Chem\n'), ((3986, 4017), 'pandas.DataFrame', 'pd.DataFrame', (['geary_descriptors'], {}), '(geary_descriptors)\n', (3998, 4017), True, 'import pandas as pd\n'), ((582, 609), 'numpy.square', 'numpy.square', (['(x - aveweight)'], {}), '(x - aveweight)\n', (594, 609), False, 'import numpy\n'), ((3799, 3820), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['m'], {}), '(m)\n', (3817, 3820), False, 'from rdkit import Chem\n'), ((3949, 3964), 'pandas.DataFrame', 'pd.DataFrame', (['r'], {}), '(r)\n', (3961, 3964), True, 'import pandas as pd\n'), ((1182, 1209), 'numpy.square', 'numpy.square', (['(temp1 - temp2)'], {}), '(temp1 - temp2)\n', (1194, 1209), False, 'import numpy\n')] |
import os
import random
import numpy as np
import argparse
import logging
import pickle
from pprint import pformat
from exps.data import get_modelnet40_data_fps
from settree.set_data import SetDataset, OPERATIONS, flatten_datasets
import exps.eval_utils as eval
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--exp_name", type=str, default='test')
parser.add_argument("--log", action='store_true')
parser.add_argument("--seed", type=int, default=45)
parser.add_argument('--save', action='store_true')
args = parser.parse_args()
log_dir = os.path.join(os.path.abspath('__file__' + '/../'), 'outputs', 'fps')
eval.create_logger(log_dir=log_dir,
log_name=args.exp_name,
dump=args.log)
logging.info('Args:\n' + pformat(vars(args)))
np.random.seed(args.seed)
random.seed(args.seed)
# x_train, y_train, x_test, y_test = get_modelnet40_data(down_sample=10,
# do_standardize=True,
# flip=False,
# seed=args.seed)
x_train, y_train, x_test, y_test = get_modelnet40_data_fps()
ds_train = SetDataset(records=x_train, is_init=True)
ds_test = SetDataset(records=x_test, is_init=True)
logging.info(args)
shared_gbdt_params = {'n_estimators': 150,
'learning_rate': 0.1,
'max_depth': 6,
'max_features': None,
'subsample': 1,
'random_state': args.seed}
set_params = {'n_estimators': shared_gbdt_params['n_estimators'],
'operations': OPERATIONS,
'splitter': 'sklearn',
'use_attention_set': True,
'use_attention_set_comp': False,
'attention_set_limit': 5,
'max_depth': shared_gbdt_params['max_depth'],
'max_features': shared_gbdt_params['max_features'],
'subsample': shared_gbdt_params['subsample'],
'random_state': shared_gbdt_params['random_state'],
'save_path': os.path.join(log_dir, '{}_checkpoint.pkl'.format(args.exp_name)),
'validation_fraction': 0.1,
'tol': 1e-3,
'n_iter_no_change': 5,
'verbose': 3}
sklearn_params = {'n_estimators': shared_gbdt_params['n_estimators'],
'criterion': 'mse',
'learning_rate': shared_gbdt_params['learning_rate'],
'max_depth': shared_gbdt_params['max_depth'],
'max_features': shared_gbdt_params['max_features'],
'subsample': shared_gbdt_params['subsample'],
'validation_fraction': 0.1,
'tol': 1e-3,
'n_iter_no_change': 5,
'verbose': 3,
'random_state': shared_gbdt_params['random_state']}
xgboost_params = {#'objective': 'binary:logistic', # 'multi:softmax', binary:logistic
'max_depth': shared_gbdt_params['max_depth'],
'n_jobs': 10,
'learning_rate': shared_gbdt_params['learning_rate'],
'n_estimators': shared_gbdt_params['n_estimators'],
'colsample_bytree': shared_gbdt_params['max_features'],
'subsample': shared_gbdt_params['subsample'],
'reg_lambda': 0,
'reg_alpha': 0,
'verbosity': 0,
'random_state': shared_gbdt_params['random_state'],
'seed': shared_gbdt_params['random_state']}
x_train, x_test = flatten_datasets(ds_train, ds_test,
operations_list=set_params['operations'],
ds_val=None)
xgboost_gbtd = eval.train_and_predict_xgboost(xgboost_params,
x_train, y_train,
x_test, y_test,
val_x=None, val_y=None,
early_stopping_rounds=None,
mode='multi_cls')
set_gbtd = eval.train_and_predict_set_gbdt(set_params,
ds_train, y_train,
ds_test, y_test,
eval_train=False,
mode='multi_cls')
if args.save:
pkl_filename = os.path.join(log_dir, '{}_model.pkl'.format(args.exp_name))
with open(pkl_filename, 'wb') as file:
pickle.dump(set_gbtd, file)
| [
"pickle.dump",
"settree.set_data.SetDataset",
"argparse.ArgumentParser",
"exps.data.get_modelnet40_data_fps",
"random.seed",
"exps.eval_utils.create_logger",
"numpy.random.seed",
"os.path.abspath",
"exps.eval_utils.train_and_predict_xgboost",
"logging.info",
"exps.eval_utils.train_and_predict_se... | [((307, 332), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (330, 332), False, 'import argparse\n'), ((681, 755), 'exps.eval_utils.create_logger', 'eval.create_logger', ([], {'log_dir': 'log_dir', 'log_name': 'args.exp_name', 'dump': 'args.log'}), '(log_dir=log_dir, log_name=args.exp_name, dump=args.log)\n', (699, 755), True, 'import exps.eval_utils as eval\n'), ((857, 882), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (871, 882), True, 'import numpy as np\n'), ((887, 909), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (898, 909), False, 'import random\n'), ((1259, 1284), 'exps.data.get_modelnet40_data_fps', 'get_modelnet40_data_fps', ([], {}), '()\n', (1282, 1284), False, 'from exps.data import get_modelnet40_data_fps\n'), ((1301, 1342), 'settree.set_data.SetDataset', 'SetDataset', ([], {'records': 'x_train', 'is_init': '(True)'}), '(records=x_train, is_init=True)\n', (1311, 1342), False, 'from settree.set_data import SetDataset, OPERATIONS, flatten_datasets\n'), ((1357, 1397), 'settree.set_data.SetDataset', 'SetDataset', ([], {'records': 'x_test', 'is_init': '(True)'}), '(records=x_test, is_init=True)\n', (1367, 1397), False, 'from settree.set_data import SetDataset, OPERATIONS, flatten_datasets\n'), ((1403, 1421), 'logging.info', 'logging.info', (['args'], {}), '(args)\n', (1415, 1421), False, 'import logging\n'), ((3914, 4009), 'settree.set_data.flatten_datasets', 'flatten_datasets', (['ds_train', 'ds_test'], {'operations_list': "set_params['operations']", 'ds_val': 'None'}), "(ds_train, ds_test, operations_list=set_params['operations'\n ], ds_val=None)\n", (3930, 4009), False, 'from settree.set_data import SetDataset, OPERATIONS, flatten_datasets\n'), ((4103, 4262), 'exps.eval_utils.train_and_predict_xgboost', 'eval.train_and_predict_xgboost', (['xgboost_params', 'x_train', 'y_train', 'x_test', 'y_test'], {'val_x': 'None', 'val_y': 'None', 'early_stopping_rounds': 'None', 'mode': '"""multi_cls"""'}), "(xgboost_params, x_train, y_train, x_test,\n y_test, val_x=None, val_y=None, early_stopping_rounds=None, mode=\n 'multi_cls')\n", (4133, 4262), True, 'import exps.eval_utils as eval\n'), ((4520, 4639), 'exps.eval_utils.train_and_predict_set_gbdt', 'eval.train_and_predict_set_gbdt', (['set_params', 'ds_train', 'y_train', 'ds_test', 'y_test'], {'eval_train': '(False)', 'mode': '"""multi_cls"""'}), "(set_params, ds_train, y_train, ds_test,\n y_test, eval_train=False, mode='multi_cls')\n", (4551, 4639), True, 'import exps.eval_utils as eval\n'), ((621, 657), 'os.path.abspath', 'os.path.abspath', (["('__file__' + '/../')"], {}), "('__file__' + '/../')\n", (636, 657), False, 'import os\n'), ((4985, 5012), 'pickle.dump', 'pickle.dump', (['set_gbtd', 'file'], {}), '(set_gbtd, file)\n', (4996, 5012), False, 'import pickle\n')] |
import numpy
import h5py
import scipy.sparse
from pyscf import gto, scf, mcscf, fci, ao2mo, lib
from pauxy.systems.generic import Generic
from pauxy.utils.from_pyscf import generate_integrals
from pauxy.utils.io import (
write_qmcpack_wfn,
write_qmcpack_dense,
write_input
)
mol = gto.M(atom=[('N', 0, 0, 0), ('N', (0,0,3.0))], basis='sto-3g', verbose=3,
unit='Bohr')
mf = scf.RHF(mol)
mf.chkfile = 'scf.chk'
ehf = mf.kernel()
M = 6
N = 6
mc = mcscf.CASSCF(mf, M, N)
mc.chkfile = 'scf.chk'
mc.kernel()
e_tot, e_cas, fcivec, mo, mo_energy = mc.kernel()
print(ehf, e_tot)
# Rotate by casscf mo coeffs.
h1e, chol, nelec, enuc = generate_integrals(mol, mf.get_hcore(), mo,
chol_cut=1e-5, verbose=True)
write_qmcpack_dense(h1e, chol.T.copy(), nelec,
h1e.shape[-1], enuc=enuc, filename='afqmc.h5')
coeff, occa, occb = zip(*fci.addons.large_ci(fcivec, M, (3,3),
tol=0.1, return_strs=False))
core = [i for i in range(mc.ncore)]
occa = [numpy.array(core + [o + mc.ncore for o in oa]) for oa in occa]
occb = [numpy.array(core + [o + mc.ncore for o in ob]) for ob in occb]
coeff = numpy.array(coeff,dtype=numpy.complex128)
# Sort in ascending order.
ixs = numpy.argsort(numpy.abs(coeff))[::-1]
coeff = coeff[ixs]
occa = numpy.array(occa)[ixs]
occb = numpy.array(occb)[ixs]
nmo = mf.mo_coeff.shape[-1]
rdm = mc.make_rdm1()
eigs, eigv = numpy.linalg.eigh(rdm)
psi0a = eigv[::-1,:mol.nelec[0]].copy()
psi0b = eigv[::-1,:mol.nelec[1]].copy()
psi0 = [psi0a, psi0b]
write_qmcpack_wfn('afqmc.h5', (coeff,occa,occb), 'uhf',
mol.nelec, nmo, init=psi0, mode='a')
write_input('input.json', 'afqmc.h5', 'afqmc.h5')
| [
"numpy.abs",
"pyscf.gto.M",
"pyscf.fci.addons.large_ci",
"numpy.array",
"pyscf.mcscf.CASSCF",
"pauxy.utils.io.write_qmcpack_wfn",
"numpy.linalg.eigh",
"pyscf.scf.RHF",
"pauxy.utils.io.write_input"
] | [((314, 406), 'pyscf.gto.M', 'gto.M', ([], {'atom': "[('N', 0, 0, 0), ('N', (0, 0, 3.0))]", 'basis': '"""sto-3g"""', 'verbose': '(3)', 'unit': '"""Bohr"""'}), "(atom=[('N', 0, 0, 0), ('N', (0, 0, 3.0))], basis='sto-3g', verbose=3,\n unit='Bohr')\n", (319, 406), False, 'from pyscf import gto, scf, mcscf, fci, ao2mo, lib\n'), ((418, 430), 'pyscf.scf.RHF', 'scf.RHF', (['mol'], {}), '(mol)\n', (425, 430), False, 'from pyscf import gto, scf, mcscf, fci, ao2mo, lib\n'), ((489, 511), 'pyscf.mcscf.CASSCF', 'mcscf.CASSCF', (['mf', 'M', 'N'], {}), '(mf, M, N)\n', (501, 511), False, 'from pyscf import gto, scf, mcscf, fci, ao2mo, lib\n'), ((1220, 1262), 'numpy.array', 'numpy.array', (['coeff'], {'dtype': 'numpy.complex128'}), '(coeff, dtype=numpy.complex128)\n', (1231, 1262), False, 'import numpy\n'), ((1475, 1497), 'numpy.linalg.eigh', 'numpy.linalg.eigh', (['rdm'], {}), '(rdm)\n', (1492, 1497), False, 'import numpy\n'), ((1600, 1698), 'pauxy.utils.io.write_qmcpack_wfn', 'write_qmcpack_wfn', (['"""afqmc.h5"""', '(coeff, occa, occb)', '"""uhf"""', 'mol.nelec', 'nmo'], {'init': 'psi0', 'mode': '"""a"""'}), "('afqmc.h5', (coeff, occa, occb), 'uhf', mol.nelec, nmo,\n init=psi0, mode='a')\n", (1617, 1698), False, 'from pauxy.utils.io import write_qmcpack_wfn, write_qmcpack_dense, write_input\n'), ((1711, 1760), 'pauxy.utils.io.write_input', 'write_input', (['"""input.json"""', '"""afqmc.h5"""', '"""afqmc.h5"""'], {}), "('input.json', 'afqmc.h5', 'afqmc.h5')\n", (1722, 1760), False, 'from pauxy.utils.io import write_qmcpack_wfn, write_qmcpack_dense, write_input\n'), ((1078, 1126), 'numpy.array', 'numpy.array', (['(core + [(o + mc.ncore) for o in oa])'], {}), '(core + [(o + mc.ncore) for o in oa])\n', (1089, 1126), False, 'import numpy\n'), ((1149, 1197), 'numpy.array', 'numpy.array', (['(core + [(o + mc.ncore) for o in ob])'], {}), '(core + [(o + mc.ncore) for o in ob])\n', (1160, 1197), False, 'import numpy\n'), ((1359, 1376), 'numpy.array', 'numpy.array', (['occa'], {}), '(occa)\n', (1370, 1376), False, 'import numpy\n'), ((1389, 1406), 'numpy.array', 'numpy.array', (['occb'], {}), '(occb)\n', (1400, 1406), False, 'import numpy\n'), ((926, 992), 'pyscf.fci.addons.large_ci', 'fci.addons.large_ci', (['fcivec', 'M', '(3, 3)'], {'tol': '(0.1)', 'return_strs': '(False)'}), '(fcivec, M, (3, 3), tol=0.1, return_strs=False)\n', (945, 992), False, 'from pyscf import gto, scf, mcscf, fci, ao2mo, lib\n'), ((1309, 1325), 'numpy.abs', 'numpy.abs', (['coeff'], {}), '(coeff)\n', (1318, 1325), False, 'import numpy\n')] |
import json
import numpy as np
from collections import OrderedDict
from src.evaluation.summary_loader import load_processed_dataset
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
sns.set()
sns.set_style("darkgrid")
n_videos = 50
videos = {}
n_splits = 5
x_axis = []
y_axis = []
df = pd.DataFrame(columns=['Videos', 'F1-scores', 'Split Type'])
# original splits
for split in range(n_splits):
path = '../results/TVSum/video_scores/original splits/video_scores{}.txt'.format(split)
print(path)
with open(path, 'r') as infile:
videos = json.load(infile)
print(videos.keys())
for key in videos.keys():
# d = {'Videos': key, 'F1-scores': videos[key]}
d = pd.Series({'Videos': key, 'F1-scores': videos[key], 'Split Type': 'Original splits'})
df = df.append(d, ignore_index=True)
# non-overlapping splits
for split in range(n_splits):
path = '../results/TVSum/video_scores/non overlapping splits/video_scores{}.txt'.format(split)
print(path)
with open(path, 'r') as infile:
videos = json.load(infile)
print(videos.keys())
for key in videos.keys():
# d = {'Videos': key, 'F1-scores': videos[key]}
d = pd.Series({'Videos': key, 'F1-scores': videos[key], 'Split Type': 'non-overlapping splits'})
df = df.append(d, ignore_index=True)
# y_axis = list(videos.values())
# x_axis = list(videos.keys())
# print(list(x_axis))
# d = {'Videos': x_axis, 'F1-scores': y_axis, 'u':[True,True,True,True,True]}
df['Videos'] = df['Videos'].astype(int)
df = df.sort_values(by=['Videos'])
print(df)
sns.relplot(x="Videos", y="F1-scores", dashes=True, style='Split Type', hue='Split Type', markers=True, kind="line",
data=df)
plt.xticks(np.arange(1, n_videos + 1))
plt.show()
# print(x_axis)
# print(y_axis)
| [
"pandas.Series",
"seaborn.set",
"numpy.arange",
"seaborn.set_style",
"json.load",
"pandas.DataFrame",
"seaborn.relplot",
"matplotlib.pyplot.show"
] | [((208, 217), 'seaborn.set', 'sns.set', ([], {}), '()\n', (215, 217), True, 'import seaborn as sns\n'), ((218, 243), 'seaborn.set_style', 'sns.set_style', (['"""darkgrid"""'], {}), "('darkgrid')\n", (231, 243), True, 'import seaborn as sns\n'), ((315, 374), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Videos', 'F1-scores', 'Split Type']"}), "(columns=['Videos', 'F1-scores', 'Split Type'])\n", (327, 374), True, 'import pandas as pd\n'), ((1656, 1786), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""Videos"""', 'y': '"""F1-scores"""', 'dashes': '(True)', 'style': '"""Split Type"""', 'hue': '"""Split Type"""', 'markers': '(True)', 'kind': '"""line"""', 'data': 'df'}), "(x='Videos', y='F1-scores', dashes=True, style='Split Type', hue\n ='Split Type', markers=True, kind='line', data=df)\n", (1667, 1786), True, 'import seaborn as sns\n'), ((1833, 1843), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1841, 1843), True, 'import matplotlib.pyplot as plt\n'), ((1805, 1831), 'numpy.arange', 'np.arange', (['(1)', '(n_videos + 1)'], {}), '(1, n_videos + 1)\n', (1814, 1831), True, 'import numpy as np\n'), ((586, 603), 'json.load', 'json.load', (['infile'], {}), '(infile)\n', (595, 603), False, 'import json\n'), ((1103, 1120), 'json.load', 'json.load', (['infile'], {}), '(infile)\n', (1112, 1120), False, 'import json\n'), ((743, 832), 'pandas.Series', 'pd.Series', (["{'Videos': key, 'F1-scores': videos[key], 'Split Type': 'Original splits'}"], {}), "({'Videos': key, 'F1-scores': videos[key], 'Split Type':\n 'Original splits'})\n", (752, 832), True, 'import pandas as pd\n'), ((1260, 1356), 'pandas.Series', 'pd.Series', (["{'Videos': key, 'F1-scores': videos[key], 'Split Type':\n 'non-overlapping splits'}"], {}), "({'Videos': key, 'F1-scores': videos[key], 'Split Type':\n 'non-overlapping splits'})\n", (1269, 1356), True, 'import pandas as pd\n')] |
#!/usr/bin/env python3
import argparse
import random
import json
import logging
import pandas as pd
from scipy import stats
import spacy
import time
from tqdm import tqdm, trange
import torch
import torch.nn.functional as F
import numpy as np
import os
from pytorch_pretrained_bert import GPT2LMHeadModel, GPT2Tokenizer, OpenAIGPTTokenizer
DATA_DIR = '../data'.format(os.getenv('HOME'))
q_sep = 'Q'
a_sep = 'A'
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
# Copied from examples/retrieve_and_edit.py (7/18)
def clean_up_tokenization_spaces(out_string):
"""Converts an output string (de-BPE-ed) using de-tokenization algorithm from OpenAI GPT."""
out_string = out_string.replace('<unk>', '')
out_string = out_string.replace(' .', '.').replace(' ?', '?').replace(' !', '!').replace(' ,', ','
).replace(" ' ", "'").replace(" n't", "n't").replace(" 'm", "'m").replace(" do not", " don't"
).replace(" 's", "'s").replace(" 've", "'ve").replace(" 're", "'re")
return out_string
# Copied from examples/run_lm.py (7/21)
def load_dataset(split, task_name, debug=False, seed=42):
""" Output a list of tuples(story, 1st continuation, 2nd continuation, label) """
assert split in {'train', 'dev'}, 'Split "{}" not yet supported'.format(split)
examples = [] # Fill examples based on task_name
tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
def format_text(text):
"""Standardizes text using OpenAI GPT's tokenizer (includes lowercasing)"""
return tokenizer.decode(tokenizer.encode(text.strip())).strip()
if task_name == 'rocstories':
file_version = 'test' if split == 'dev' else 'val'
dataset_path = '{0}/rocstories/cloze_test_{1}__spring2016 - cloze_test_ALL_{1}.csv'.format(
DATA_DIR, file_version)
with open(dataset_path, encoding='utf_8') as f:
f = csv.reader(f)
next(f) # Skip the first line
for line in tqdm(f):
examples.append((' '.join(line[1:5]), line[5], line[6], int(line[-1])-1))
elif task_name == 'sqa.q-subqs':
sqa_split = 'test' if split == 'dev' else 'train'
wtq_split = 'pristine-unseen-tables' if split == 'test' else 'training'
df_subq = pd.read_csv('{}/sqa/{}.tsv'.format(DATA_DIR, sqa_split),
delimiter='\t', encoding='utf-8')
df_q = pd.read_csv('{}/WikiTableQuestions/data/{}.tsv'.format(DATA_DIR, wtq_split),
delimiter='\t', encoding='utf-8')
qids_with_subqs = list(set(df_subq['id']))
qids_with_subqs.sort()
for qid in tqdm(qids_with_subqs):
qid = qid.replace('ns', 'nt')
assert len(df_q[df_q.id == qid].utterance.values) > 0, 'Invalid QID: {}'.format(qid)
q = df_q[df_q.id == qid].utterance.values[0]
df_subq_qid = df_subq[df_subq.id == qid]
annotators = list(set(df_subq_qid.annotator))
annotators.sort()
for annotator in annotators:
df_subq_qid_annotator = df_subq_qid[df_subq_qid.annotator == annotator]
positions = list(set(df_subq_qid_annotator.position))
positions.sort()
subqs = [df_subq_qid_annotator[df_subq_qid_annotator.position == position].question.values[0].strip()
for position in positions]
example_tokens = [q] + subqs
if '?' not in example_tokens:
print(example_tokens)
example = ' '.join(example_tokens).strip()
examples.append(example)
elif task_name in {'squad.q', 'squad.q-q'}:
file_path = '{}/squad/{}-v2.0.json'.format(DATA_DIR, split)
with open(file_path, 'r') as f:
data = json.load(f)
shuffler = random.Random(seed)
for article in tqdm(data['data']):
for para in article['paragraphs']:
qs = [format_text(qa['question']) for qa in para['qas']]
if task_name == 'squad.q':
examples += qs
elif task_name == 'squad.q-q':
shuffler.shuffle(qs)
if (len(qs) % 2) == 1:
# qs.append('') # Use for generative model? Doesn't encourage repeating the original Q.
qs.append(qs[-1]) # Use for ranking model. Pair last Q with itself if it would be unpaired.
for q1, q2 in zip(qs[::2], qs[1::2]):
examples.append((q1 + ' ' + q2).strip())
else:
raise NotImplementedError(task_name)
if debug and (len(examples) > 100):
break
elif task_name == 'squad.sf-q':
with open('{}/squad/{}-v2.0.json'.format(DATA_DIR, split), 'r') as f:
data = json.load(f)
# Sentence tokenization
nlp = spacy.load("en_core_web_sm")
for article in tqdm(data['data']):
for para in article['paragraphs']:
nlp_para = nlp(para['context'])
para_sents = list(nlp_para.sents)
for qa in para['qas']:
if qa['is_impossible']:
continue # Only generate answerable Qs
# Find sentence containing answer
ans_dict = qa['answers'][0] # Find supporting fact based on first answer only
ans_start = ans_dict['answer_start']
ans_end = ans_start + len(ans_dict['text']) - 1 # Inclusive
sf_start, sf_end = None, None # Supporting facts sometimes span multiple sentences
for sent in para_sents:
if (sent.start_char <= ans_start) and (ans_start < sent.end_char):
sf_start = sent.start_char
if (sent.start_char <= ans_end) and (ans_end < sent.end_char):
sf_end = sent.end_char
if (sf_start is not None) and (sf_end is not None):
break
assert (sf_start is not None) and (sf_end is not None), 'Answer sent not found'
answer_sf = para['context'][sf_start: sf_end]
assert ans_dict['text'] in answer_sf, \
'Answer text "{}" not in answer sentence "{}"'.format(ans_dict['text'], answer_sf)
examples.append(format_text(answer_sf) + ' ' + q_sep + ' ' +
format_text(qa['question']) + ' ' + a_sep)
elif task_name in {'hotpot.q-subqs', 'hotpot.q-subqs.comparison'}:
if task_name == 'hotpot.q-subqs.comparison':
decomposition_types = ['comparison']
elif task_name == 'hotpot.q-subqs':
decomposition_types = ['intersec', 'bridge', 'comparison']
else:
raise NotImplementedError(task_name)
# Load types of each question
with open('{}/hotpot-all/{}.json'.format(DATA_DIR, split)) as f:
full_data = json.load(f)
id_to_qtype = {}
for example in full_data['data']:
qa = example['paragraphs'][0]['qas'][0]
id_to_qtype[qa['id']] = qa['type']
# Load sub-Qs
data = {}
for decomposition_type in decomposition_types:
filepath = '{}/decomposition-data-nq-version/decomposition-{}-{}-nq.json'.format(
DATA_DIR, decomposition_type, split)
with open(filepath, 'r') as f:
data.update({qid: qinfo for qid, qinfo in json.load(f).items() if id_to_qtype[qid] == 'comparison'})
for qid, q in tqdm(data.items()):
if 'subquestions' in q:
q['subquestion1'], q['subquestion2'] = q['subquestions']
example_text = ''
for qtype in ['question', 'subquestion1', 'subquestion2']:
example_text += q_sep + ' ' + format_text(q[qtype]) + ' '
if 'op' in q:
op = q['op'].lower().replace('_', ' ').capitalize()
example_text += q_sep + ' ' + format_text(op) + ' '
examples.append(example_text.replace('[ answer ]', 'ANSWER') + q_sep)
elif task_name == 'hotpot.q-sfs-a':
with open('{}/hotpot-orig/hotpot_{}_v1.json'.format(
DATA_DIR, 'train' if split == 'train' else 'dev_distractor'), 'r') as f:
hotpot_orig = json.load(f)
missing_sfs = 0
for example in tqdm(hotpot_orig):
example_text = example['question'].strip()
prev_sf_title = ''
prev_sf_sent_index = -1
for sf_title, sf_sent_index in example['supporting_facts']:
for context_title, context_sents in example['context']:
if context_title == sf_title:
if sf_sent_index >= len(context_sents):
missing_sfs += 1
continue
if sf_title == prev_sf_title:
if sf_sent_index == (prev_sf_sent_index + 1):
join_text = ' '
else:
join_text = ' ... '
else:
join_text = ' [' + sf_title.strip() + '] '
example_text += join_text + context_sents[sf_sent_index].strip()
prev_sf_title = sf_title
prev_sf_sent_index = sf_sent_index
example_text = format_text(example_text) + ' ' + a_sep + ' ' + format_text(example['answer']) + ' ' + q_sep
examples.append(example_text)
print('missing_sfs', missing_sfs)
assert missing_sfs < 100, 'Too many missing_sfs ({})'.format(missing_sfs)
elif task_name == 'hotpot.subqs-subas-q-a':
num_shards = 100 if split == 'train' else 10
data = {'data': []}
data_subas = {}
for shard_no in range(num_shards):
file_prefix = 'comparison_decomposed_{}_generations.num_shards={}.shard_no={}'.format(
split, num_shards, shard_no)
with open('{}/decomposed-predictions/{}.json'.format(DATA_DIR, file_prefix), 'r') as f:
data['data'] += json.load(f)['data']
with open('../DecompRC/DecompRC/out/hotpot/{}.nbest_predictions.json'.format(file_prefix)) as f:
data_subas.update(json.load(f))
print('Loading recomposition QA examples...')
for example in tqdm(data['data']):
qid = example['paragraphs'][0]['qas_orig'][0]['id']
question = example['paragraphs'][0]['qas_orig'][0]['question']
answer = example['paragraphs'][0]['qas_orig'][0]['final_answers'][0]
subqs = [qa['question'] for qa in example['paragraphs'][0]['qas']]
subas = []
if len(subqs) != 2:
continue # TODO: Make this fail gracefully: ~6 bad splits, ~12 repetition in sub-question (in train)
for i in range(len(subqs)):
subqid = qid + '-' + str(i)
if subqid in data_subas:
subas.append(data_subas[subqid][0]['text'])
if len(subqs) == len(subas):
example_text = ''
for q, a in zip(subqs + [question], subas + [answer]):
example_text += q_sep + ' ' + format_text(q) + ' ' + a_sep + ' ' + format_text(a) + ' '
examples.append(example_text + q_sep)
else:
raise NotImplementedError(task_name)
print('Read {} examples.'.format(len(examples)))
assert len(examples) > 0, 'Error: Read 0 examples.'
return examples
def top_k_logits(logits, k):
"""
Masks everything but the k top entries as -infinity (1e10).
Used to mask logits such that e^-infinity -> 0 won't contribute to the
sum of the denominator.
"""
if k == 0:
return logits
else:
values = torch.topk(logits, k)[0]
batch_mins = values[:, -1].view(-1, 1).expand_as(logits)
return torch.where(logits < batch_mins, torch.ones_like(logits) * -1e10, logits)
def sample_sequence(model, length, task_name=None, start_token=None, batch_size=None, context=None, temperature=1,
top_k=0, device='cuda', sample=True, end_token=None):
if start_token is None:
assert context is not None, 'Specify exactly one of start_token and context!'
context = torch.tensor(context, device=device, dtype=torch.long).unsqueeze(0).repeat(batch_size, 1)
else:
assert context is None, 'Specify exactly one of start_token and context!'
context = torch.full((batch_size, 1), start_token, device=device, dtype=torch.long)
prev = context
output = context
past = None
with torch.no_grad():
for i in range(length):
logits, past = model(prev, past=past)
logits = logits[:, -1, :] / temperature
logits = top_k_logits(logits, k=top_k)
log_probs = F.softmax(logits, dim=-1)
if sample:
prev = torch.multinomial(log_probs, num_samples=1)
else:
_, prev = torch.topk(log_probs, k=1, dim=-1)
output = torch.cat((output, prev), dim=1)
if end_token is not None: # Decode until final token generated
if task_name == 'hotpot.q-subqs.comparison':
num_req_end_tokens = 4
elif task_name == 'hotpot.subqs-subas-q-a':
num_req_end_tokens = 3
elif task_name in {'hotpot.q-sfs-a', 'squad.sf-q'}:
num_req_end_tokens = 1
else:
raise NotImplementedError(task_name)
if ((output == end_token).sum(1) >= num_req_end_tokens).all():
break
return output
def run_model():
start_time = time.time()
parser = argparse.ArgumentParser()
parser.add_argument('--model_name_or_path', type=str, default='gpt2',
help='pretrained model name or path to local checkpoint')
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--nsamples", type=int, default=1)
parser.add_argument("--batch_size", type=int, default=-1)
parser.add_argument("--length", type=int, default=-1)
parser.add_argument("--temperature", type=float, default=1.0)
parser.add_argument("--top_k", type=int, default=0)
parser.add_argument('--unconditional', action='store_true', help='If true, unconditional generation.')
parser.add_argument('--context_path', type=str, default=None,
help='path to contexts to condition on (SQuAD-formatted json).')
parser.add_argument("--num_shards", default=1, type=int, required=False,
help="# of total data splits for distributed eval")
parser.add_argument("--shard_no", default=0, type=int, required=False, help="Distributed eval data split index.")
parser.add_argument("--no_task", action='store_true', help="No decoding task (use interactive).")
args = parser.parse_args()
print(args)
if args.batch_size == -1:
args.batch_size = 1
assert args.nsamples % args.batch_size == 0
np.random.seed(args.seed)
torch.random.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
enc = GPT2Tokenizer.from_pretrained(args.model_name_or_path)
model = GPT2LMHeadModel.from_pretrained(args.model_name_or_path)
args_path = os.path.join(args.model_name_or_path, 'training_args.bin')
model_args = torch.load(args_path) if os.path.exists(args_path) else None
model.half()
model.to(device)
model.eval()
if args.length == -1:
args.length = model.config.n_ctx // 2
elif args.length > model.config.n_ctx:
raise ValueError("Can't get samples longer than window size: %s" % model.config.n_ctx)
format_tokenizer = None
if ('squad' in args.model_name_or_path) or ('hotpot' in args.model_name_or_path):
format_tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
task_name = None if ((model_args is None) or args.no_task) else model_args.task_name
if task_name is not None:
# Backward-compatibility for task_name
if task_name == 'hotpotqa-recomposition-supporting-fact':
task_name = 'hotpot.q-sfs-a'
elif task_name == 'hotpotqa-recomposition':
task_name = 'hotpot.subqs-subas-q-a'
elif task_name == 'hotpot-comparison-questions-cond-lm':
task_name = 'hotpot.q-subqs.comparison'
filename = args.context_path.split('/')[-1]
split = None
for possible_split in ['train', 'dev', 'test']:
if possible_split in filename:
split = possible_split
break
assert split is not None, 'Unable to determine split for context_path {}'.format(args.context_path)
with open(args.context_path, 'r') as f:
all_data = json.load(f)
# Filter for comparison questions only
examples = [] # Need to fill in this variable to add prompts
if task_name == 'hotpot.q-subqs.comparison':
save_data = {'data': []}
for i, example in enumerate(all_data['data']):
if (i % args.num_shards) != args.shard_no:
continue
assert len(example['paragraphs']) == 1, 'Unexpected # paragraphs: {}'.format(len(example['paragraphs']))
assert len(example['paragraphs'][0]['qas']) == 1, 'Unexpected # paragraphs: {}'.format(
len(example['paragraphs'][0]['qas']))
if example['paragraphs'][0]['qas'][0]['type'] == 'comparison':
example['paragraphs'][0]['qas_orig'] = example['paragraphs'][0]['qas']
example['paragraphs'][0]['qas'] = []
example['prompt'] = ['paragraphs'][0]['qas_orig'][0]['question']
if format_tokenizer:
example['prompt'] = format_tokenizer.decode(
format_tokenizer.encode(example['prompt'].strip())).strip()
example['prompt'] = q_sep + ' ' + example['prompt'] + ' ' + q_sep
save_data['data'].append(example)
examples.append(example['prompt'])
elif task_name in {'hotpot.subqs-subas-q-a', 'hotpot.q-sfs-a', 'squad.sf-q'}:
if task_name == 'squad.sf-q':
eos_sep = a_sep
eoi_sep = q_sep
else:
eos_sep = q_sep
eoi_sep = a_sep
examples_with_answers = load_dataset(split, task_name)
answers = []
stats_sum = {'em': 0, 'f1': 0}
for example_with_answer in examples_with_answers:
example, answer = example_with_answer.rsplit(eoi_sep, 1)
examples.append(example + eoi_sep)
answers.append(answer.strip(eos_sep).strip())
else:
raise NotImplementedError('task_name {}'.format(task_name))
tqdm_bar = trange(1000 if task_name is None else len(examples), desc='Evaluating')
for d in tqdm_bar:
generated = 0
context_tokens = None
start_token = enc.encoder['<|endoftext|>']
out_start_index = 1
if not args.unconditional:
if task_name is None:
raw_text = input("Model prompt >>> ")
while not raw_text:
print('Prompt should not be empty!')
raw_text = input("Model prompt >>> ")
if format_tokenizer:
raw_text = format_tokenizer.decode(format_tokenizer.encode(raw_text.strip())).strip()
else:
raw_text = examples[d]
if task_name == 'hotpot.q-subqs.comparison':
assert save_data['data'][d]['prompt'] == examples[d]
context_tokens = enc.encode(raw_text)
start_token = None
out_start_index = len(context_tokens)
for _ in range(args.nsamples // args.batch_size):
out = sample_sequence(
model=model, length=args.length, task_name=task_name, context=context_tokens, start_token=start_token,
batch_size=args.batch_size, temperature=args.temperature, top_k=args.top_k, device=device,
end_token=None if task_name is None else enc.encode(' ' + eos_sep)[0]
)
out = out[:, out_start_index:].tolist()
for i in range(args.batch_size):
generated += 1
text = enc.decode(out[i])
print(text)
if task_name == 'hotpot.q-subqs.comparison':
text = text.strip(q_sep) # Remove EOS token
subqs = text.split(q_sep) # Split apart generated sub-Qs TODO: + operator (if applicable)
# assert len(subqs) == 2, 'Unexpected # of generated subqs: {}'.format(len(subqs) == 2) # TODO: Add
for subq_no, subq_text in enumerate(subqs[:-1]):
subq_text = subq_text.strip()
save_data['data'][d]['paragraphs'][0]['qas'].append({
# 'final_answers': [],
'question': subq_text,
'level': 'subquestion',
'type': 'subquestion' + str(subq_no),
'id': save_data['data'][d]['paragraphs'][0]['qas_orig'][0]['id'] + '-' + str(subq_no),
'answers': [[] for _ in range(len(save_data['data'][d]['paragraphs'][0]['context']))],
})
save_data['data'][d]['paragraphs'][0]['op'] = subqs[-1].replace(' ', '_').upper()
elif task_name in {'hotpot.subqs-subas-q-a', 'hotpot.q-sfs-a', 'squad.sf-q'}:
# EM
pred_answer = clean_up_tokenization_spaces(text.split(eos_sep)[0].strip())
answer = clean_up_tokenization_spaces(answers[d])
em = pred_answer == answer
stats_sum['em'] += em
# F1
pred_answer_bow = set(pred_answer.split())
answer_bow = set(answer.split())
precision = len(pred_answer_bow.intersection(answer_bow)) / len(pred_answer_bow)
recall = len(pred_answer_bow.intersection(answer_bow)) / len(answer_bow)
f1 = stats.hmean([precision, recall]) if ((precision != 0) and (recall != 0)) else 0
stats_sum['f1'] += f1
print(raw_text)
print(int(em), round(100 * f1), pred_answer, '/', answers[d])
tqdm_bar.desc = "Evaluation EM: {:.1%} F1: {:.1%}".format(stats_sum['em'] / (d + 1),
stats_sum['f1'] / (d + 1))
# Print or save results
if task_name == 'hotpot.q-subqs.comparison':
with open('data/decomposed-predictions/comparison_decomposed_{}_generations.num_shards={}.shard_no={}.json'.format(split, args.num_shards, args.shard_no), 'w') as f:
json.dump(save_data, f)
elif task_name in {'hotpot.subqs-subas-q-a', 'hotpot.q-sfs-a'}:
print("Evaluation EM: {:.1%} F1: {:.1%}".format(stats_sum['em'] / len(examples),
stats_sum['f1'] / len(examples)))
print('Completed in {:.0f}s'.format(time.time() - start_time))
if __name__ == '__main__':
run_model()
| [
"logging.getLogger",
"pytorch_pretrained_bert.GPT2LMHeadModel.from_pretrained",
"torch.cuda.is_available",
"pytorch_pretrained_bert.GPT2Tokenizer.from_pretrained",
"torch.nn.functional.softmax",
"torch.random.manual_seed",
"os.path.exists",
"argparse.ArgumentParser",
"random.Random",
"spacy.load",... | [((416, 559), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(levelname)s - %(name)s - %(message)s"""', 'datefmt': '"""%m/%d/%Y %H:%M:%S"""', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt=\n '%m/%d/%Y %H:%M:%S', level=logging.INFO)\n", (435, 559), False, 'import logging\n'), ((605, 632), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (622, 632), False, 'import logging\n'), ((372, 389), 'os.getenv', 'os.getenv', (['"""HOME"""'], {}), "('HOME')\n", (381, 389), False, 'import os\n'), ((1529, 1577), 'pytorch_pretrained_bert.OpenAIGPTTokenizer.from_pretrained', 'OpenAIGPTTokenizer.from_pretrained', (['"""openai-gpt"""'], {}), "('openai-gpt')\n", (1563, 1577), False, 'from pytorch_pretrained_bert import GPT2LMHeadModel, GPT2Tokenizer, OpenAIGPTTokenizer\n'), ((14158, 14169), 'time.time', 'time.time', ([], {}), '()\n', (14167, 14169), False, 'import time\n'), ((14183, 14208), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (14206, 14208), False, 'import argparse\n'), ((15515, 15540), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (15529, 15540), True, 'import numpy as np\n'), ((15545, 15580), 'torch.random.manual_seed', 'torch.random.manual_seed', (['args.seed'], {}), '(args.seed)\n', (15569, 15580), False, 'import torch\n'), ((15585, 15618), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (15607, 15618), False, 'import torch\n'), ((15704, 15758), 'pytorch_pretrained_bert.GPT2Tokenizer.from_pretrained', 'GPT2Tokenizer.from_pretrained', (['args.model_name_or_path'], {}), '(args.model_name_or_path)\n', (15733, 15758), False, 'from pytorch_pretrained_bert import GPT2LMHeadModel, GPT2Tokenizer, OpenAIGPTTokenizer\n'), ((15771, 15827), 'pytorch_pretrained_bert.GPT2LMHeadModel.from_pretrained', 'GPT2LMHeadModel.from_pretrained', (['args.model_name_or_path'], {}), '(args.model_name_or_path)\n', (15802, 15827), False, 'from pytorch_pretrained_bert import GPT2LMHeadModel, GPT2Tokenizer, OpenAIGPTTokenizer\n'), ((15844, 15902), 'os.path.join', 'os.path.join', (['args.model_name_or_path', '"""training_args.bin"""'], {}), "(args.model_name_or_path, 'training_args.bin')\n", (15856, 15902), False, 'import os\n'), ((12912, 12985), 'torch.full', 'torch.full', (['(batch_size, 1)', 'start_token'], {'device': 'device', 'dtype': 'torch.long'}), '((batch_size, 1), start_token, device=device, dtype=torch.long)\n', (12922, 12985), False, 'import torch\n'), ((13051, 13066), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13064, 13066), False, 'import torch\n'), ((15945, 15970), 'os.path.exists', 'os.path.exists', (['args_path'], {}), '(args_path)\n', (15959, 15970), False, 'import os\n'), ((15920, 15941), 'torch.load', 'torch.load', (['args_path'], {}), '(args_path)\n', (15930, 15941), False, 'import torch\n'), ((16389, 16437), 'pytorch_pretrained_bert.OpenAIGPTTokenizer.from_pretrained', 'OpenAIGPTTokenizer.from_pretrained', (['"""openai-gpt"""'], {}), "('openai-gpt')\n", (16423, 16437), False, 'from pytorch_pretrained_bert import GPT2LMHeadModel, GPT2Tokenizer, OpenAIGPTTokenizer\n'), ((2145, 2152), 'tqdm.tqdm', 'tqdm', (['f'], {}), '(f)\n', (2149, 2152), False, 'from tqdm import tqdm, trange\n'), ((2816, 2837), 'tqdm.tqdm', 'tqdm', (['qids_with_subqs'], {}), '(qids_with_subqs)\n', (2820, 2837), False, 'from tqdm import tqdm, trange\n'), ((12210, 12231), 'torch.topk', 'torch.topk', (['logits', 'k'], {}), '(logits, k)\n', (12220, 12231), False, 'import torch\n'), ((13277, 13302), 'torch.nn.functional.softmax', 'F.softmax', (['logits'], {'dim': '(-1)'}), '(logits, dim=-1)\n', (13286, 13302), True, 'import torch.nn.functional as F\n'), ((13493, 13525), 'torch.cat', 'torch.cat', (['(output, prev)'], {'dim': '(1)'}), '((output, prev), dim=1)\n', (13502, 13525), False, 'import torch\n'), ((15655, 15680), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (15678, 15680), False, 'import torch\n'), ((17344, 17356), 'json.load', 'json.load', (['f'], {}), '(f)\n', (17353, 17356), False, 'import json\n'), ((23488, 23511), 'json.dump', 'json.dump', (['save_data', 'f'], {}), '(save_data, f)\n', (23497, 23511), False, 'import json\n'), ((4020, 4039), 'random.Random', 'random.Random', (['seed'], {}), '(seed)\n', (4033, 4039), False, 'import random\n'), ((4063, 4081), 'tqdm.tqdm', 'tqdm', (["data['data']"], {}), "(data['data'])\n", (4067, 4081), False, 'from tqdm import tqdm, trange\n'), ((12348, 12371), 'torch.ones_like', 'torch.ones_like', (['logits'], {}), '(logits)\n', (12363, 12371), False, 'import torch\n'), ((13349, 13392), 'torch.multinomial', 'torch.multinomial', (['log_probs'], {'num_samples': '(1)'}), '(log_probs, num_samples=1)\n', (13366, 13392), False, 'import torch\n'), ((13437, 13471), 'torch.topk', 'torch.topk', (['log_probs'], {'k': '(1)', 'dim': '(-1)'}), '(log_probs, k=1, dim=-1)\n', (13447, 13471), False, 'import torch\n'), ((23799, 23810), 'time.time', 'time.time', ([], {}), '()\n', (23808, 23810), False, 'import time\n'), ((3987, 3999), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3996, 3999), False, 'import json\n'), ((5108, 5136), 'spacy.load', 'spacy.load', (['"""en_core_web_sm"""'], {}), "('en_core_web_sm')\n", (5118, 5136), False, 'import spacy\n'), ((5160, 5178), 'tqdm.tqdm', 'tqdm', (["data['data']"], {}), "(data['data'])\n", (5164, 5178), False, 'from tqdm import tqdm, trange\n'), ((5048, 5060), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5057, 5060), False, 'import json\n'), ((12712, 12766), 'torch.tensor', 'torch.tensor', (['context'], {'device': 'device', 'dtype': 'torch.long'}), '(context, device=device, dtype=torch.long)\n', (12724, 12766), False, 'import torch\n'), ((22793, 22825), 'scipy.stats.hmean', 'stats.hmean', (['[precision, recall]'], {}), '([precision, recall])\n', (22804, 22825), False, 'from scipy import stats\n'), ((7286, 7298), 'json.load', 'json.load', (['f'], {}), '(f)\n', (7295, 7298), False, 'import json\n'), ((8717, 8734), 'tqdm.tqdm', 'tqdm', (['hotpot_orig'], {}), '(hotpot_orig)\n', (8721, 8734), False, 'from tqdm import tqdm, trange\n'), ((8656, 8668), 'json.load', 'json.load', (['f'], {}), '(f)\n', (8665, 8668), False, 'import json\n'), ((10757, 10775), 'tqdm.tqdm', 'tqdm', (["data['data']"], {}), "(data['data'])\n", (10761, 10775), False, 'from tqdm import tqdm, trange\n'), ((10501, 10513), 'json.load', 'json.load', (['f'], {}), '(f)\n', (10510, 10513), False, 'import json\n'), ((10665, 10677), 'json.load', 'json.load', (['f'], {}), '(f)\n', (10674, 10677), False, 'import json\n'), ((7809, 7821), 'json.load', 'json.load', (['f'], {}), '(f)\n', (7818, 7821), False, 'import json\n')] |
import numpy as np
from scipy.stats import entropy
from scipy.spatial.distance import cosine
def jsd(p1, p2) -> float:
'''Returns the Jensen Shannon Divergence'''
p1 = np.asarray(p1)
p2 = np.asarray(p2)
p1 /= p1.sum()
p2 /= p2.sum()
m = (p1 + p2) / 2
return (entropy(p1, m) + entropy(p2, m)) / 2
def cosine_similarity(a, b) -> float:
'''Returns the Cosine Similarity'''
a = np.asarray(a)
b = np.asarray(b)
return 1 - cosine(a, b)
def cosine_distance(a, b) -> float:
'''Returns the Cosine Distance'''
a = np.asarray(a)
b = np.asarray(b)
return cosine(a, b)
def div(a, b) -> float:
'''Returns the Difference Between Diversities (DIV)'''
a = np.asarray(a)
b = np.asarray(b)
# centroids
muA, muB = a.mean(axis=0), b.mean(axis=0)
# diversities
divA = np.array([cosine_distance(x, muA) for x in a])
divB = np.array([cosine_distance(x, muB) for x in b])
return abs(divA.mean(axis=0) - divB.mean(axis=0))
def pdis(a, b, label_a, label_b) -> float:
'''Return the Cosine Distance between prototype embeddings'''
# unique labels and size
unique_a, counts_a = np.unique(label_a, return_counts=True)
unique_b, counts_b = np.unique(label_b, return_counts=True)
# cluster centroids
mu_a = np.array([a[label_a == label, :].mean(axis=0) for label in unique_a])
mu_b = np.array([b[label_b == label, :].mean(axis=0) for label in unique_b])
return cosine_distance(mu_a.mean(axis=0), mu_b.mean(axis=0))
def pdiv(a, b, label_a, label_b) -> float:
'''Difference between prototype embedding diversities'''
# unique labels and size
unique_a, counts_a = np.unique(label_a, return_counts=True)
unique_b, counts_b = np.unique(label_b, return_counts=True)
# cluster centroids
mu_a = np.array([a[label_a == label, :].mean(axis=0) for label in unique_a])
mu_b = np.array([b[label_b == label, :].mean(axis=0) for label in unique_b])
return div(mu_a, mu_b)
| [
"scipy.spatial.distance.cosine",
"numpy.asarray",
"numpy.unique",
"scipy.stats.entropy"
] | [((185, 199), 'numpy.asarray', 'np.asarray', (['p1'], {}), '(p1)\n', (195, 199), True, 'import numpy as np\n'), ((210, 224), 'numpy.asarray', 'np.asarray', (['p2'], {}), '(p2)\n', (220, 224), True, 'import numpy as np\n'), ((431, 444), 'numpy.asarray', 'np.asarray', (['a'], {}), '(a)\n', (441, 444), True, 'import numpy as np\n'), ((454, 467), 'numpy.asarray', 'np.asarray', (['b'], {}), '(b)\n', (464, 467), True, 'import numpy as np\n'), ((586, 599), 'numpy.asarray', 'np.asarray', (['a'], {}), '(a)\n', (596, 599), True, 'import numpy as np\n'), ((609, 622), 'numpy.asarray', 'np.asarray', (['b'], {}), '(b)\n', (619, 622), True, 'import numpy as np\n'), ((635, 647), 'scipy.spatial.distance.cosine', 'cosine', (['a', 'b'], {}), '(a, b)\n', (641, 647), False, 'from scipy.spatial.distance import cosine\n'), ((746, 759), 'numpy.asarray', 'np.asarray', (['a'], {}), '(a)\n', (756, 759), True, 'import numpy as np\n'), ((769, 782), 'numpy.asarray', 'np.asarray', (['b'], {}), '(b)\n', (779, 782), True, 'import numpy as np\n'), ((1218, 1256), 'numpy.unique', 'np.unique', (['label_a'], {'return_counts': '(True)'}), '(label_a, return_counts=True)\n', (1227, 1256), True, 'import numpy as np\n'), ((1283, 1321), 'numpy.unique', 'np.unique', (['label_b'], {'return_counts': '(True)'}), '(label_b, return_counts=True)\n', (1292, 1321), True, 'import numpy as np\n'), ((1749, 1787), 'numpy.unique', 'np.unique', (['label_a'], {'return_counts': '(True)'}), '(label_a, return_counts=True)\n', (1758, 1787), True, 'import numpy as np\n'), ((1814, 1852), 'numpy.unique', 'np.unique', (['label_b'], {'return_counts': '(True)'}), '(label_b, return_counts=True)\n', (1823, 1852), True, 'import numpy as np\n'), ((484, 496), 'scipy.spatial.distance.cosine', 'cosine', (['a', 'b'], {}), '(a, b)\n', (490, 496), False, 'from scipy.spatial.distance import cosine\n'), ((301, 315), 'scipy.stats.entropy', 'entropy', (['p1', 'm'], {}), '(p1, m)\n', (308, 315), False, 'from scipy.stats import entropy\n'), ((318, 332), 'scipy.stats.entropy', 'entropy', (['p2', 'm'], {}), '(p2, m)\n', (325, 332), False, 'from scipy.stats import entropy\n')] |
"""
Mesh class containing geometry information
"""
from pyrr import matrix44
import numpy
class Mesh:
"""Mesh info and geometry"""
def __init__(self, name, vao=None, material=None, attributes=None, bbox_min=None, bbox_max=None):
"""
:param name: Name of the mesh
:param vao: VAO
:param material: Material
:param attributes: Details info about each mesh attribute (dict)
{
"NORMAL": {"name": "in_normal", "components": 3, "type": GL_FLOAT},
"POSITION": {"name": "in_position", "components": 3, "type": GL_FLOAT}
}
"""
self.name = name
self.vao = vao
self.material = material
self.attributes = attributes or {}
self.bbox_min = bbox_min
self.bbox_max = bbox_max
self.mesh_program = None
def draw(self, projection_matrix=None, view_matrix=None, camera_matrix=None, time=0):
"""
Draw the mesh using the assigned mesh program
:param projection_matrix: projection_matrix (bytes)
:param view_matrix: view_matrix (bytes)
:param camera_matrix: camera_matrix (bytes)
"""
if self.mesh_program:
self.mesh_program.draw(
self,
projection_matrix=projection_matrix,
view_matrix=view_matrix,
camera_matrix=camera_matrix,
time=time
)
def draw_bbox(self, proj_matrix, view_matrix, cam_matrix, program, vao):
program["m_proj"].write(proj_matrix)
program["m_view"].write(view_matrix)
program["m_cam"].write(cam_matrix)
program["bb_min"].write(self.bbox_min.astype('f4').tobytes())
program["bb_max"].write(self.bbox_max.astype('f4').tobytes())
program["color"].value = (0.75, 0.75, 0.75)
vao.render(program)
def add_attribute(self, attr_type, name, components):
"""
Add metadata about the mesh
:param attr_type: POSITION, NORMAL etc
:param name: The attribute name used in the program
:param components: Number of floats
"""
self.attributes[attr_type] = {"name": name, "components": components}
def calc_global_bbox(self, view_matrix, bbox_min, bbox_max):
# Copy and extend to vec4
bb1 = numpy.append(self.bbox_min[:], 1.0)
bb2 = numpy.append(self.bbox_max[:], 1.0)
# Transform the bbox values
bmin = matrix44.apply_to_vector(view_matrix, bb1),
bmax = matrix44.apply_to_vector(view_matrix, bb2),
bmin = numpy.asarray(bmin)[0]
bmax = numpy.asarray(bmax)[0]
# If a rotation happened there is an axis change and we have to ensure max-min is positive
for i in range(3):
if bmax[i] - bmin[i] < 0:
bmin[i], bmax[i] = bmax[i], bmin[i]
if bbox_min is None or bbox_max is None:
return bmin[0:3], bmax[0:3]
for i in range(3):
bbox_min[i] = min(bbox_min[i], bmin[i])
for i in range(3):
bbox_max[i] = max(bbox_max[i], bmax[i])
return bbox_min, bbox_max
def has_normals(self):
return "NORMAL" in self.attributes
def has_uvs(self, layer=0):
return "TEXCOORD_{}".format(layer) in self.attributes
| [
"numpy.append",
"numpy.asarray",
"pyrr.matrix44.apply_to_vector"
] | [((2345, 2380), 'numpy.append', 'numpy.append', (['self.bbox_min[:]', '(1.0)'], {}), '(self.bbox_min[:], 1.0)\n', (2357, 2380), False, 'import numpy\n'), ((2395, 2430), 'numpy.append', 'numpy.append', (['self.bbox_max[:]', '(1.0)'], {}), '(self.bbox_max[:], 1.0)\n', (2407, 2430), False, 'import numpy\n'), ((2483, 2525), 'pyrr.matrix44.apply_to_vector', 'matrix44.apply_to_vector', (['view_matrix', 'bb1'], {}), '(view_matrix, bb1)\n', (2507, 2525), False, 'from pyrr import matrix44\n'), ((2542, 2584), 'pyrr.matrix44.apply_to_vector', 'matrix44.apply_to_vector', (['view_matrix', 'bb2'], {}), '(view_matrix, bb2)\n', (2566, 2584), False, 'from pyrr import matrix44\n'), ((2601, 2620), 'numpy.asarray', 'numpy.asarray', (['bmin'], {}), '(bmin)\n', (2614, 2620), False, 'import numpy\n'), ((2639, 2658), 'numpy.asarray', 'numpy.asarray', (['bmax'], {}), '(bmax)\n', (2652, 2658), False, 'import numpy\n')] |
from sklearn import datasets, linear_model, preprocessing, decomposition, manifold, svm
from sklearn.metrics import make_scorer, accuracy_score
import numpy as np
from sklearn.model_selection import cross_validate, cross_val_score, train_test_split
import matplotlib.pyplot as plt
import time
#######################################################################
# PREPROCESSING DATA
#######################################################################
# Loading the AwA dataset
X = np.loadtxt('/home/cristianopatricio/Documents/Datasets/Animals_with_Attributes2/Features/ResNet101/AwA2-features.txt')
y = np.loadtxt('/home/cristianopatricio/Documents/Datasets/Animals_with_Attributes2/Features/ResNet101/AwA2-labels.txt')
print('The shape of X is: ' + str(X.shape))
print('The shape of Y is: ' + str(y.shape))
print('Number of classes: ' + str(len(np.unique(y))))
# Split into train and test sets (40 classes for training and 10 classe for test)
lbl = preprocessing.LabelEncoder()
y_train = lbl.fit_transform(y[np.where((y > 0) & (y < 41))])
X_train = X[np.where((y > 0) & (y < 41))]
#print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
#print(np.unique(y_train))
#######################################################################
# TRAIN
#######################################################################
#Create a svm Classifier
model = linear_model.LogisticRegression(solver='lbfgs', multi_class='auto', max_iter=5000)
tic = time.time()
#Train the model using the training sets
model.fit(X_train, y_train)
toc = time.time()
# We have Weight matrix, W z x d
W = model.coef_.T
#W = np.loadtxt('weights.txt')
# Saving the W in a text file
np.savetxt("weights.txt", W)
print("W shape: ", W.shape)
# Signatures matrix for training data
S = np.loadtxt('/home/cristianopatricio/Documents/Datasets/AwA2-base/Animals_with_Attributes2/predicate-matrix-binary.txt')
S_train = S[0:40].T
print("S_train shape: ", S_train.shape)
#print("S shape: ", S.shape)
# From W and S calculate V, d x a
V = np.linalg.lstsq(S_train.T, W.T, rcond=None)[0].T
print("V shape", V.shape)
W_new = np.dot(S_train.T, V.T).T
print("W_new shape: ", W_new.shape)
print("%f" % (np.sum(np.sqrt((W_new-W)**2))))
# Predictions that happens in the training phase of zero shot learning
#for ys, x in zip(y_train, X_train):
# print(np.argmax(np.dot(x.T, W_new)), ys)
#################################################################
# INFERENCE
#################################################################
lbl = preprocessing.LabelEncoder()
y_test = lbl.fit_transform(y[np.where((y > 40) & (y < 51))])
X_test = X[np.where((y > 40) & (y < 51))]
S_test = S[40:].T
print("S_test shape: ", S_test.shape)
# Calculate the new Weight/Coefficient matrix
W_new = np.dot(S_test.T, V.T).T
print("W_new shape: ", W_new.shape)
# Check performance
correct = 0
i = 0
for i, (ys, x) in enumerate(zip(y_test, X_test)):
if np.argmax(np.dot(x.T, W_new)) == ys:
correct += 1
print("Results: ", correct, i, correct / float(i))
print("Training time: %.2f min." % ((toc-tic)/60.0))
###########################################################
# SAVE RESULTS TXT
###########################################################
file = open("results_eszsl_AwA2.txt","a")
file.write("No. samples: " + str(i) + "\n" + "No. correct samples: " + str(correct) + "\n" + "Percentage: " + str(correct / float(i)) + "\n" + "Exec. time: " + str((toc-tic)/60.0) + "min.")
file.close()
| [
"sklearn.preprocessing.LabelEncoder",
"numpy.sqrt",
"numpy.unique",
"numpy.where",
"sklearn.linear_model.LogisticRegression",
"numpy.dot",
"numpy.savetxt",
"numpy.linalg.lstsq",
"numpy.loadtxt",
"time.time"
] | [((492, 620), 'numpy.loadtxt', 'np.loadtxt', (['"""/home/cristianopatricio/Documents/Datasets/Animals_with_Attributes2/Features/ResNet101/AwA2-features.txt"""'], {}), "(\n '/home/cristianopatricio/Documents/Datasets/Animals_with_Attributes2/Features/ResNet101/AwA2-features.txt'\n )\n", (502, 620), True, 'import numpy as np\n'), ((615, 741), 'numpy.loadtxt', 'np.loadtxt', (['"""/home/cristianopatricio/Documents/Datasets/Animals_with_Attributes2/Features/ResNet101/AwA2-labels.txt"""'], {}), "(\n '/home/cristianopatricio/Documents/Datasets/Animals_with_Attributes2/Features/ResNet101/AwA2-labels.txt'\n )\n", (625, 741), True, 'import numpy as np\n'), ((964, 992), 'sklearn.preprocessing.LabelEncoder', 'preprocessing.LabelEncoder', ([], {}), '()\n', (990, 992), False, 'from sklearn import datasets, linear_model, preprocessing, decomposition, manifold, svm\n'), ((1378, 1464), 'sklearn.linear_model.LogisticRegression', 'linear_model.LogisticRegression', ([], {'solver': '"""lbfgs"""', 'multi_class': '"""auto"""', 'max_iter': '(5000)'}), "(solver='lbfgs', multi_class='auto',\n max_iter=5000)\n", (1409, 1464), False, 'from sklearn import datasets, linear_model, preprocessing, decomposition, manifold, svm\n'), ((1468, 1479), 'time.time', 'time.time', ([], {}), '()\n', (1477, 1479), False, 'import time\n'), ((1555, 1566), 'time.time', 'time.time', ([], {}), '()\n', (1564, 1566), False, 'import time\n'), ((1682, 1710), 'numpy.savetxt', 'np.savetxt', (['"""weights.txt"""', 'W'], {}), "('weights.txt', W)\n", (1692, 1710), True, 'import numpy as np\n'), ((1784, 1913), 'numpy.loadtxt', 'np.loadtxt', (['"""/home/cristianopatricio/Documents/Datasets/AwA2-base/Animals_with_Attributes2/predicate-matrix-binary.txt"""'], {}), "(\n '/home/cristianopatricio/Documents/Datasets/AwA2-base/Animals_with_Attributes2/predicate-matrix-binary.txt'\n )\n", (1794, 1913), True, 'import numpy as np\n'), ((2528, 2556), 'sklearn.preprocessing.LabelEncoder', 'preprocessing.LabelEncoder', ([], {}), '()\n', (2554, 2556), False, 'from sklearn import datasets, linear_model, preprocessing, decomposition, manifold, svm\n'), ((1066, 1094), 'numpy.where', 'np.where', (['((y > 0) & (y < 41))'], {}), '((y > 0) & (y < 41))\n', (1074, 1094), True, 'import numpy as np\n'), ((2117, 2139), 'numpy.dot', 'np.dot', (['S_train.T', 'V.T'], {}), '(S_train.T, V.T)\n', (2123, 2139), True, 'import numpy as np\n'), ((2629, 2658), 'numpy.where', 'np.where', (['((y > 40) & (y < 51))'], {}), '((y > 40) & (y < 51))\n', (2637, 2658), True, 'import numpy as np\n'), ((2775, 2796), 'numpy.dot', 'np.dot', (['S_test.T', 'V.T'], {}), '(S_test.T, V.T)\n', (2781, 2796), True, 'import numpy as np\n'), ((1023, 1051), 'numpy.where', 'np.where', (['((y > 0) & (y < 41))'], {}), '((y > 0) & (y < 41))\n', (1031, 1051), True, 'import numpy as np\n'), ((2033, 2076), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['S_train.T', 'W.T'], {'rcond': 'None'}), '(S_train.T, W.T, rcond=None)\n', (2048, 2076), True, 'import numpy as np\n'), ((2586, 2615), 'numpy.where', 'np.where', (['((y > 40) & (y < 51))'], {}), '((y > 40) & (y < 51))\n', (2594, 2615), True, 'import numpy as np\n'), ((2200, 2225), 'numpy.sqrt', 'np.sqrt', (['((W_new - W) ** 2)'], {}), '((W_new - W) ** 2)\n', (2207, 2225), True, 'import numpy as np\n'), ((2938, 2956), 'numpy.dot', 'np.dot', (['x.T', 'W_new'], {}), '(x.T, W_new)\n', (2944, 2956), True, 'import numpy as np\n'), ((859, 871), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (868, 871), True, 'import numpy as np\n')] |
#Functions used for gibbs sampling
# <NAME>
#02 April 2019
import pandas as pd
import numpy as np
class gibbs:
def gibbs_difference(y, ind, mu0 = 50, tau0 = 1/625, del0 = 0, gamma0 = 1/625, a0 = 0.5, b0 = 50, maxiter = 5000):
y1 = y[ind == 1]
y2 = y[ind == 2]
n1 = len(y1)
n2 = len(y2)
#initial values
mu = (y1.mean() + y2.mean()) / 2
delta = (y1.mean() - y2.mean()) / 2
df_samples = pd.DataFrame(columns=["mu", "del", "tau",'theta'])
##### Gibbs sampler
an = a0 + (n1 + n2)/2
for i in range(maxiter):
##update tau
bn = b0 + 0.5 * (sum((y1 - mu - delta)**2) + sum((y2 - mu + delta)**2))
tau = np.random.gamma(an, 1/bn)
##
##update mu
taun = tau0 + tau * (n1 + n2)
mun = (tau0 * mu0 + tau * (sum(y1 - delta) + sum(y2 + delta))) / taun
mu = np.random.normal(mun, np.sqrt(1/taun))
##
##update delta
gamman = gamma0 + tau*(n1 + n2)
deln = ( del0 * gamma0 + tau * (sum(y1 - mu) - sum(y2 - mu))) / gamman
delta = np.random.normal(deln, np.sqrt(1/gamman))
df_samples.loc[i] = [mu, delta, tau,1/np.sqrt(tau)]
return df_samples
def gibbs_m(y, ind, mu0 = 50, gamma0 = 1/25,eta0 = 1/2, t0 = 50, a0 = 1/2, b0 = 50, maxiter = 5000):
### starting values
m = ind.nunique()
ybar = theta = [np.mean(y[ind == i]) for i in ind.unique()]
tau_w = np.mean([1/np.var(y[ind == i]) for i in ind.unique()]) ##within group precision
mu = np.mean(theta)
tau_b = 1/np.var(theta) ##between group precision
n_m = [len(y[ind == i]) for i in ind.unique()]
an = a0 + sum(n_m)/2
### setup MCMC
theta_mat = pd.DataFrame(columns = list(ind.unique()))
mat_store = pd.DataFrame(columns = ["mu", "tau_w", "tau_b","theta_w", "theta_b"])
for i in range(maxiter):
# sample new values of the thetas
theta = []
for j in range(m):
taun = n_m[j]*tau_w + tau_b
thetan = (ybar[j] * n_m[j] * tau_w + mu * tau_b) / taun
theta.append(np.random.normal(thetan, np.sqrt(1/taun)))
#sample new value of tau_w
ss = 0
for j in range(m):
ss = ss + sum([ (x - theta[j])**2 for x in y[ind == j+1]])
bn = b0 + ss/2
tau_w = np.random.gamma(an, 1/bn)
#sample a new value of mu
gammam = m * tau_b + gamma0
mum = (np.mean(theta) * m * tau_b + mu0 * gamma0) / gammam
mu = np.random.normal(mum, np.sqrt(1/gammam))
# sample a new value of tau_b
etam = eta0 + m/2
tm = t0 + sum([(t-mu)**2 for t in theta])/2
tau_b = np.random.gamma(etam, 1/tm)
#store results
theta_mat.loc[i] = theta
mat_store.loc[i] = [mu, tau_w, tau_b,1/np.sqrt(tau_w),1/np.sqrt(tau_b) ]
if i%500 == 0: print("{}/{}".format(i,maxiter))
return (theta_mat,mat_store)
| [
"numpy.mean",
"numpy.sqrt",
"numpy.random.gamma",
"pandas.DataFrame",
"numpy.var"
] | [((457, 508), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['mu', 'del', 'tau', 'theta']"}), "(columns=['mu', 'del', 'tau', 'theta'])\n", (469, 508), True, 'import pandas as pd\n'), ((1639, 1653), 'numpy.mean', 'np.mean', (['theta'], {}), '(theta)\n', (1646, 1653), True, 'import numpy as np\n'), ((1904, 1972), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['mu', 'tau_w', 'tau_b', 'theta_w', 'theta_b']"}), "(columns=['mu', 'tau_w', 'tau_b', 'theta_w', 'theta_b'])\n", (1916, 1972), True, 'import pandas as pd\n'), ((729, 756), 'numpy.random.gamma', 'np.random.gamma', (['an', '(1 / bn)'], {}), '(an, 1 / bn)\n', (744, 756), True, 'import numpy as np\n'), ((1486, 1506), 'numpy.mean', 'np.mean', (['y[ind == i]'], {}), '(y[ind == i])\n', (1493, 1506), True, 'import numpy as np\n'), ((1672, 1685), 'numpy.var', 'np.var', (['theta'], {}), '(theta)\n', (1678, 1685), True, 'import numpy as np\n'), ((2512, 2539), 'numpy.random.gamma', 'np.random.gamma', (['an', '(1 / bn)'], {}), '(an, 1 / bn)\n', (2527, 2539), True, 'import numpy as np\n'), ((2895, 2924), 'numpy.random.gamma', 'np.random.gamma', (['etam', '(1 / tm)'], {}), '(etam, 1 / tm)\n', (2910, 2924), True, 'import numpy as np\n'), ((959, 976), 'numpy.sqrt', 'np.sqrt', (['(1 / taun)'], {}), '(1 / taun)\n', (966, 976), True, 'import numpy as np\n'), ((1190, 1209), 'numpy.sqrt', 'np.sqrt', (['(1 / gamman)'], {}), '(1 / gamman)\n', (1197, 1209), True, 'import numpy as np\n'), ((2727, 2746), 'numpy.sqrt', 'np.sqrt', (['(1 / gammam)'], {}), '(1 / gammam)\n', (2734, 2746), True, 'import numpy as np\n'), ((1260, 1272), 'numpy.sqrt', 'np.sqrt', (['tau'], {}), '(tau)\n', (1267, 1272), True, 'import numpy as np\n'), ((1557, 1576), 'numpy.var', 'np.var', (['y[ind == i]'], {}), '(y[ind == i])\n', (1563, 1576), True, 'import numpy as np\n'), ((3039, 3053), 'numpy.sqrt', 'np.sqrt', (['tau_w'], {}), '(tau_w)\n', (3046, 3053), True, 'import numpy as np\n'), ((3056, 3070), 'numpy.sqrt', 'np.sqrt', (['tau_b'], {}), '(tau_b)\n', (3063, 3070), True, 'import numpy as np\n'), ((2280, 2297), 'numpy.sqrt', 'np.sqrt', (['(1 / taun)'], {}), '(1 / taun)\n', (2287, 2297), True, 'import numpy as np\n'), ((2636, 2650), 'numpy.mean', 'np.mean', (['theta'], {}), '(theta)\n', (2643, 2650), True, 'import numpy as np\n')] |
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import cv2
# Note origin is top-left corner of the image
# Formula for bx,by,bh,w
# bx=sigmoid(tx)+cx , by=sigmoid(ty)+cy
# where tx,ty is prediction of NN and cx,cy is the origin
# i.e top left corner of present grid cell
#bw=pw*exp(tw), bh=ph*exp(th)
# where tw,th is prediction of NN
# pw,ph is height and width of predefined bounding box
def letterbox_image(img, inp_dim):
'''resize image with unchanged aspect ratio using padding'''
img_w, img_h = img.shape[1], img.shape[0]
w,h = inp_dim
new_w = int(img_w * min(torch.true_divide(w ,img_w), torch.true_divide(h, img_h)))
new_h = int(img_h * min(torch.true_divide(w ,img_w), torch.true_divide(h, img_h)))
resized_image = cv2.resize(img, (new_w, new_h), interpolation=cv2.INTER_CUBIC)
canvas = np.full((inp_dim[1], inp_dim[0], 3), 128)
canvas[(h - new_h) // 2:(h - new_h) // 2 + new_h, (w - new_w) // 2:(w - new_w) // 2 + new_w, :] = resized_image
return canvas
def prep_image(img, inp_dim):
"""
Prepare image for inputting to the neural network.
Here img , in_dim both are single values which are fed one by one to this fun
by the map function
Returns a Variable
"""
img = letterbox_image(img, inp_dim)
img = img[:, :, ::-1].transpose((2, 0, 1)).copy()
img = torch.from_numpy(img).float().div(255.0).unsqueeze(0)
return img
def load_classes(namesfile):
fp = open(namesfile, "r")
names = fp.read().split("\n")[:-1]
return names
def bbox_iou(box1, box2):
"""
Returns the IoU of two bounding boxes
Note:
box1 contains bbx attributes of 1 bbx
box2 contains bbx attributes of all other bbx
"""
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3]
# get the corrdinates of the intersection rectangle
inter_rect_x1 = torch.max(b1_x1, b2_x1)
inter_rect_y1 = torch.max(b1_y1, b2_y1)
inter_rect_x2 = torch.min(b1_x2, b2_x2)
inter_rect_y2 = torch.min(b1_y2, b2_y2)
# Intersection area
inter_area = torch.clamp(inter_rect_x2 - inter_rect_x1 + 1, min=0) * torch.clamp(inter_rect_y2 - inter_rect_y1 + 1,
min=0)
# Union Area
b1_area = (b1_x2 - b1_x1 + 1) * (b1_y2 - b1_y1 + 1)
b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1)
iou = inter_area / (b1_area + b2_area - inter_area)
return iou
def predict_transform(prediction, inp_dim, anchors, num_classes, CUDA = True):
# predict_transform takes in 5 parameters;
# prediction (our output), inp_dim (input image dimension),
# anchors, num_classes, and an optional CUDA flag
batch_size = prediction.size(0)
stride = inp_dim // prediction.size(2)
grid_size = inp_dim // stride
bbox_attrs = 5 + num_classes # 5 + 80
num_anchors = len(anchors) # 3
# There is [13,13] grid and each grid, at a particular scale
# Outputs 3 bounding boxes.
# Total bounding boxes= 13 * 13 * 3 = 507
# Bounding box attributes= 5 + C = 85
#[1, 255, 13, 13]
prediction = prediction.view(batch_size, bbox_attrs * num_anchors, grid_size * grid_size)
#[1, 255, 169]
prediction = prediction.transpose(1, 2).contiguous()
#[1, 169, 255]
prediction = prediction.view(batch_size, grid_size * grid_size * num_anchors, bbox_attrs)
#[1, 507, 85]
anchors = [(a[0] / stride, a[1] / stride) for a in anchors]
#Sigmoid the centre_X, centre_Y. and object confidencce
prediction[:,:,0] = torch.sigmoid(prediction[:,:,0])
prediction[:,:,1] = torch.sigmoid(prediction[:,:,1])
prediction[:,:,4] = torch.sigmoid(prediction[:,:,4])
#Add the center offsets
grid = np.arange(grid_size)
a,b = np.meshgrid(grid, grid) #[13,13]
x_offset = torch.FloatTensor(a).view(-1,1) #[169,1]
y_offset = torch.FloatTensor(b).view(-1,1) #[169,1]
if CUDA:
x_offset = x_offset.cuda()
y_offset = y_offset.cuda()
# Here this is what happens
# [169,1],[169,1]--cat--> [169,2]--repeat-->[169,6]--view-->
# [507,2]--unsqueeze-->[1,507,2]
x_y_offset = torch.cat((x_offset, y_offset), 1).repeat(1,num_anchors).view(-1,2).unsqueeze(0)
prediction[:,:,:2] += x_y_offset
# Log space transform height and the width
# Anchors are a tensor where each element is a tuple h,w
anchors = torch.FloatTensor(anchors)
if CUDA:
anchors = anchors.cuda()
# This is what happens here
# [3, 2]--repeat-->[507, 2]--unsqueeze-->[1,507,2]
anchors = anchors.repeat(grid_size*grid_size, 1).unsqueeze(0)
prediction[:,:,2:4] = torch.exp(prediction[:,:,2:4])*anchors
# Apply sigmoid activation to the the class scores
prediction[:, :, 5: 5 + num_classes] = torch.sigmoid((prediction[:, :, 5: 5 + num_classes]))
# Resize the detections map to the size of the input image
prediction[:, :, :4] *= stride
return prediction
def write_results(prediction, confidence, num_classes, nms_conf = 0.4):
# Object Confidence Thresholding
conf_mask = (prediction[:, :, 4] > confidence).float().unsqueeze(2)
prediction = prediction * conf_mask
# Performing Non-maximum Suppression
box_corner = prediction.new(prediction.shape)
box_corner[:,:,0] = (prediction[:,:,0] - prediction[:,:,2]/2)
box_corner[:,:,1] = (prediction[:,:,1] - prediction[:,:,3]/2)
box_corner[:,:,2] = (prediction[:,:,0] + prediction[:,:,2]/2)
box_corner[:,:,3] = (prediction[:,:,1] + prediction[:,:,3]/2)
prediction[:,:,:4] = box_corner[:,:,:4]
batch_size = prediction.size(0)
write = False
for ind in range(batch_size):
image_pred = prediction[ind]
#image_pred is now 2D tensor
#confidence threshholding
#NMS
# At this point, we're only concerned with the class score
# having the maximum value. So, we remove the 80 class scores
# from each row, and instead add the index of the class having
# the maximum values, as well the class score of that class.
max_conf, max_conf_score = torch.max(image_pred[:, 5:5 + num_classes], 1)
# torch.max() returns max value and the index where max exists
max_conf = max_conf.float().unsqueeze(1)
max_conf_score = max_conf_score.float().unsqueeze(1)
# concatinating index values and max probability with box coordinates as columns
seq = (image_pred[:, :5], max_conf, max_conf_score)
image_pred = torch.cat(seq, 1)
# Now image_pred dim is [num_bbox,7]
# We have set bounding box attributes of those boxes with objectness
# score less than the threshold as zero. Now we will remove them.
non_zero_ind = (torch.nonzero(image_pred[:, 4]))
try:
image_pred_ = image_pred[non_zero_ind.squeeze(), :].view(-1, 7)
except:
continue
# For PyTorch 0.4 compatibility
# Since the above code with not raise exception for no detection
# as scalars are supported in PyTorch 0.4
if image_pred_.shape[0] == 0:
continue
# The try-except block is there to handle situations where
# we get no detections. In that case, we use continue to skip
# the rest of the loop body for this image.
# Get the various classes detected in the image
try:
img_classes = torch.unique(image_pred_[:, -1])
# -1 index holds the class index
except:
continue
for cls in img_classes:
# perform NMS
# get the detections with one particular class
cls_mask = image_pred_ * (image_pred_[:, -1] == cls).float().unsqueeze(1)
# Unsqueeze is used as broadcasting b/w vector and matrix is not possible
# Hence we make the vector a matrix using unsqueeze
# Tensors are broadcastable if
# When iterating over the dimension sizes, starting at the trailing dimension,
# the dimension sizes must either be equal, one of them is 1,
# or one of them does not exist.
# Now cls_mask dim=[num_bbx,7]
# Basically now all bbx attributes are zero for those bbxs which
# doesnt contain present class
class_mask_ind = torch.nonzero(cls_mask[:, -2]).squeeze()
image_pred_class = image_pred_[class_mask_ind].view(-1, 7)
# sort the detections such that the entry with the maximum objectness
# confidence is at the top
sorted_, conf_sort_index = torch.sort(image_pred_class[:, 4], descending=True)
image_pred_class = image_pred_class[conf_sort_index]
idx = image_pred_class.size(0) # Number of detections
for i in range(idx):
# Get the IOUs of all boxes that come after the one we are looking at
# in the loop
try:
ious = bbox_iou(image_pred_class[i].unsqueeze(0), image_pred_class[i + 1:])
except ValueError:
break
except IndexError:
break
# Zero out all the detections that have IoU > treshhold
iou_mask = (ious < nms_conf).float().unsqueeze(1)
image_pred_class[i + 1:] *= iou_mask
# Remove the non-zero entries
non_zero_ind = torch.nonzero(image_pred_class[:, 4]).squeeze()
image_pred_class = image_pred_class[non_zero_ind].view(-1, 7)
# We use the write flag to indicate whether the tensor has been initialized
# or not. Once it has been initialized, we concatenate subsequent detections to it.
# At the end of loop that iterates over classes, we add the resultant
# detections to the tensor output.
#Concatenate the batch_id of the image to the detection
#this helps us identify which image does the detection correspond to
#We use a linear structure to hold ALL the detections from the batch
#the batch_dim is flattened
#batch is identified by extra batch column
batch_ind = image_pred_class.new(image_pred_class.size(0), 1).fill_(ind)
# Repeat the batch_id for as many detections of the class cls in the image
seq = batch_ind, image_pred_class
if not write:
output = torch.cat(seq, 1)
write = True
else:
out = torch.cat(seq, 1)
output = torch.cat((output, out))
# At the end of the function, we check whether output has been initialized at all or not.
# If it hasn't been means there's hasn't been a single detection in any images of the batch.
# In that case, we return 0.
try:
return output
except:
return 0
| [
"torch.sort",
"torch.unique",
"torch.max",
"torch.sigmoid",
"torch.clamp",
"torch.min",
"torch.exp",
"torch.cat",
"torch.nonzero",
"numpy.meshgrid",
"torch.from_numpy",
"numpy.full",
"cv2.resize",
"torch.FloatTensor",
"numpy.arange",
"torch.true_divide"
] | [((887, 949), 'cv2.resize', 'cv2.resize', (['img', '(new_w, new_h)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(img, (new_w, new_h), interpolation=cv2.INTER_CUBIC)\n', (897, 949), False, 'import cv2\n'), ((966, 1007), 'numpy.full', 'np.full', (['(inp_dim[1], inp_dim[0], 3)', '(128)'], {}), '((inp_dim[1], inp_dim[0], 3), 128)\n', (973, 1007), True, 'import numpy as np\n'), ((2182, 2205), 'torch.max', 'torch.max', (['b1_x1', 'b2_x1'], {}), '(b1_x1, b2_x1)\n', (2191, 2205), False, 'import torch\n'), ((2227, 2250), 'torch.max', 'torch.max', (['b1_y1', 'b2_y1'], {}), '(b1_y1, b2_y1)\n', (2236, 2250), False, 'import torch\n'), ((2272, 2295), 'torch.min', 'torch.min', (['b1_x2', 'b2_x2'], {}), '(b1_x2, b2_x2)\n', (2281, 2295), False, 'import torch\n'), ((2317, 2340), 'torch.min', 'torch.min', (['b1_y2', 'b2_y2'], {}), '(b1_y2, b2_y2)\n', (2326, 2340), False, 'import torch\n'), ((3917, 3951), 'torch.sigmoid', 'torch.sigmoid', (['prediction[:, :, 0]'], {}), '(prediction[:, :, 0])\n', (3930, 3951), False, 'import torch\n'), ((3975, 4009), 'torch.sigmoid', 'torch.sigmoid', (['prediction[:, :, 1]'], {}), '(prediction[:, :, 1])\n', (3988, 4009), False, 'import torch\n'), ((4033, 4067), 'torch.sigmoid', 'torch.sigmoid', (['prediction[:, :, 4]'], {}), '(prediction[:, :, 4])\n', (4046, 4067), False, 'import torch\n'), ((4109, 4129), 'numpy.arange', 'np.arange', (['grid_size'], {}), '(grid_size)\n', (4118, 4129), True, 'import numpy as np\n'), ((4141, 4164), 'numpy.meshgrid', 'np.meshgrid', (['grid', 'grid'], {}), '(grid, grid)\n', (4152, 4164), True, 'import numpy as np\n'), ((4791, 4817), 'torch.FloatTensor', 'torch.FloatTensor', (['anchors'], {}), '(anchors)\n', (4808, 4817), False, 'import torch\n'), ((5192, 5242), 'torch.sigmoid', 'torch.sigmoid', (['prediction[:, :, 5:5 + num_classes]'], {}), '(prediction[:, :, 5:5 + num_classes])\n', (5205, 5242), False, 'import torch\n'), ((2386, 2439), 'torch.clamp', 'torch.clamp', (['(inter_rect_x2 - inter_rect_x1 + 1)'], {'min': '(0)'}), '(inter_rect_x2 - inter_rect_x1 + 1, min=0)\n', (2397, 2439), False, 'import torch\n'), ((2442, 2495), 'torch.clamp', 'torch.clamp', (['(inter_rect_y2 - inter_rect_y1 + 1)'], {'min': '(0)'}), '(inter_rect_y2 - inter_rect_y1 + 1, min=0)\n', (2453, 2495), False, 'import torch\n'), ((5051, 5083), 'torch.exp', 'torch.exp', (['prediction[:, :, 2:4]'], {}), '(prediction[:, :, 2:4])\n', (5060, 5083), False, 'import torch\n'), ((6555, 6601), 'torch.max', 'torch.max', (['image_pred[:, 5:5 + num_classes]', '(1)'], {}), '(image_pred[:, 5:5 + num_classes], 1)\n', (6564, 6601), False, 'import torch\n'), ((6959, 6976), 'torch.cat', 'torch.cat', (['seq', '(1)'], {}), '(seq, 1)\n', (6968, 6976), False, 'import torch\n'), ((7203, 7234), 'torch.nonzero', 'torch.nonzero', (['image_pred[:, 4]'], {}), '(image_pred[:, 4])\n', (7216, 7234), False, 'import torch\n'), ((4194, 4214), 'torch.FloatTensor', 'torch.FloatTensor', (['a'], {}), '(a)\n', (4211, 4214), False, 'import torch\n'), ((4252, 4272), 'torch.FloatTensor', 'torch.FloatTensor', (['b'], {}), '(b)\n', (4269, 4272), False, 'import torch\n'), ((7888, 7920), 'torch.unique', 'torch.unique', (['image_pred_[:, -1]'], {}), '(image_pred_[:, -1])\n', (7900, 7920), False, 'import torch\n'), ((9088, 9139), 'torch.sort', 'torch.sort', (['image_pred_class[:, 4]'], {'descending': '(True)'}), '(image_pred_class[:, 4], descending=True)\n', (9098, 9139), False, 'import torch\n'), ((719, 746), 'torch.true_divide', 'torch.true_divide', (['w', 'img_w'], {}), '(w, img_w)\n', (736, 746), False, 'import torch\n'), ((748, 775), 'torch.true_divide', 'torch.true_divide', (['h', 'img_h'], {}), '(h, img_h)\n', (765, 775), False, 'import torch\n'), ((807, 834), 'torch.true_divide', 'torch.true_divide', (['w', 'img_w'], {}), '(w, img_w)\n', (824, 834), False, 'import torch\n'), ((836, 863), 'torch.true_divide', 'torch.true_divide', (['h', 'img_h'], {}), '(h, img_h)\n', (853, 863), False, 'import torch\n'), ((11025, 11042), 'torch.cat', 'torch.cat', (['seq', '(1)'], {}), '(seq, 1)\n', (11034, 11042), False, 'import torch\n'), ((11115, 11132), 'torch.cat', 'torch.cat', (['seq', '(1)'], {}), '(seq, 1)\n', (11124, 11132), False, 'import torch\n'), ((11159, 11183), 'torch.cat', 'torch.cat', (['(output, out)'], {}), '((output, out))\n', (11168, 11183), False, 'import torch\n'), ((8810, 8840), 'torch.nonzero', 'torch.nonzero', (['cls_mask[:, -2]'], {}), '(cls_mask[:, -2])\n', (8823, 8840), False, 'import torch\n'), ((9951, 9988), 'torch.nonzero', 'torch.nonzero', (['image_pred_class[:, 4]'], {}), '(image_pred_class[:, 4])\n', (9964, 9988), False, 'import torch\n'), ((1497, 1518), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (1513, 1518), False, 'import torch\n'), ((4541, 4575), 'torch.cat', 'torch.cat', (['(x_offset, y_offset)', '(1)'], {}), '((x_offset, y_offset), 1)\n', (4550, 4575), False, 'import torch\n')] |
import numpy as np
from envs.EnvWrapper import EnvWrapper
class LunarLanderWithNoise(EnvWrapper):
def __init__(self, random_state):
super(LunarLanderWithNoise, self).__init__("LunarLander-v2", random_state)
self.state_sz = 256
def transform_obs(self, obs):
return np.concatenate((obs, np.random.uniform(size=248)))
| [
"numpy.random.uniform"
] | [((320, 347), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(248)'}), '(size=248)\n', (337, 347), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/18/2019 1:38 PM
# @Author : chinshin
# @FileName: ggnn_preprocessor.py
from __future__ import unicode_literals
from collections import defaultdict
import numpy as np
from rdkit import Chem
from chainer_chemistry.dataset.preprocessors.common \
import construct_atomic_number_array, construct_discrete_edge_matrix, MolFeatureExtractionError
from chainer_chemistry.dataset.preprocessors.common import type_check_num_atoms
from chainer_chemistry.dataset.preprocessors.mol_preprocessor \
import MolPreprocessor
class MyGGNNPreprocessor(MolPreprocessor):
"""GGNN Preprocessor
Args:
max_atoms (int): Max number of atoms for each molecule, if the
number of atoms is more than this value, this data is simply
ignored.
Setting negative value indicates no limit for max atoms.
out_size (int): It specifies the size of array returned by
`get_input_features`.
If the number of atoms in the molecule is less than this value,
the returned arrays is padded to have fixed size.
Setting negative value indicates do not pad returned array.
add_Hs (bool): If True, implicit Hs are added.
kekulize (bool): If True, Kekulizes the molecule.
"""
def construct_dict(self):
# global dict for index-based encoding
self.atom_dict = defaultdict(lambda: len(self.atom_dict))
self.bond_dict = defaultdict(lambda: len(self.bond_dict))
self.fingerprint_dict = defaultdict(lambda: len(self.fingerprint_dict))
self.edge_dict = defaultdict(lambda: len(self.edge_dict))
def __init__(self, max_atoms=-1, out_size=-1, add_Hs=False,
kekulize=False, radius=0):
super(MyGGNNPreprocessor, self).__init__(
add_Hs=add_Hs, kekulize=kekulize)
if 0 <= max_atoms < out_size and out_size >= 0:
raise ValueError('max_atoms {} must be less or equal to '
'out_size {}'.format(max_atoms, out_size))
self.max_atoms = max_atoms
self.out_size = out_size
self.radius = radius
self.construct_dict()
# def get_input_features(self, mol):
# """
# get input features
# Input feature contains: atom array and adjacency matrix
# Args:
# mol (Mol):
#
# Returns:
#
# """
# type_check_num_atoms(mol, self.max_atoms)
# atom_array = construct_atomic_number_array(mol, out_size=self.out_size)
# adj_array = construct_discrete_edge_matrix(mol, out_size=self.out_size)
# return atom_array, adj_array
def get_input_features(self, mol):
type_check_num_atoms(mol, self.max_atoms)
atoms = self.create_atoms(mol)
i_jbond_dict = self.create_ijbonddict(mol)
subgraph_array = self.extract_subgraph(atoms, i_jbond_dict, self.radius)
adj_array = construct_discrete_edge_matrix(mol)
return subgraph_array, adj_array
def create_atoms(self, mol):
"""
Create a list of atom IDs considering the aromaticity
:param mol: rdkit.Chem.Mol object
:return:
"""
atoms = [a.GetSymbol() for a in mol.GetAtoms()]
for a in mol.GetAromaticAtoms():
i = a.GetIdx()
atoms[i] = (atoms[i], "aromatic")
atoms = [self.atom_dict[a] for a in atoms]
return np.array(atoms, dtype=np.int32)
def create_ijbonddict(self, mol):
"""
Create a dictionary, which each key is a node ID
and each value is the tuples of its neighboring node
and bond IDs.
:param mol: rdkit.Chem.Mol object
:return: i_jbond_dict
"""
i_jbond_dict = defaultdict(lambda: [])
for b in mol.GetBonds():
i, j = b.GetBeginAtomIdx(), b.GetEndAtomIdx()
bond = self.bond_dict[str(b.GetBondType())]
i_jbond_dict[i].append((j, bond))
i_jbond_dict[j].append((i, bond))
return i_jbond_dict
def extract_subgraph(self, atoms, i_jbond_dict, radius):
"""
Extract the r-radius subgraphs from a molecular graph using WL algorithm
:param atoms:
:param i_jbond_dict:
:param radius:
:return:
"""
if (len(atoms) == 1) or (radius == 0):
fingerprints = [self.fingerprint_dict[a] for a in atoms]
else:
nodes = atoms
i_jedge_dict = i_jbond_dict
for _ in range(radius):
"""Update each node ID considering its neighboring nodes and edges
(i.e., r-radius subgraphs or fingerprints)."""
fingerprints = []
for i, j_edge in i_jedge_dict.items():
neighbors = [(nodes[j], edge) for j, edge in j_edge]
fingerprint = (nodes[i], tuple(sorted(neighbors)))
fingerprints.append(self.fingerprint_dict[fingerprint])
nodes = fingerprints
"""Also update each edge ID considering two nodes
on its both sides."""
_i_jedge_dict = defaultdict(lambda: [])
for i, j_edge in i_jedge_dict.items():
for j, edge in j_edge:
both_side = tuple(sorted((nodes[i], nodes[j])))
edge = self.edge_dict[(both_side, edge)]
_i_jedge_dict[i].append((j, edge))
i_jedge_dict = _i_jedge_dict
return np.array(fingerprints, dtype=np.int32)
@staticmethod
def create_adjacency(mol):
"""
:param mol: rdkit.Chem.Mol object
:return:
"""
adjacency = Chem.GetAdjacencyMatrix(mol)
return np.array(adjacency, dtype=np.int32)
| [
"chainer_chemistry.dataset.preprocessors.common.construct_discrete_edge_matrix",
"chainer_chemistry.dataset.preprocessors.common.type_check_num_atoms",
"numpy.array",
"collections.defaultdict",
"rdkit.Chem.GetAdjacencyMatrix"
] | [((2821, 2862), 'chainer_chemistry.dataset.preprocessors.common.type_check_num_atoms', 'type_check_num_atoms', (['mol', 'self.max_atoms'], {}), '(mol, self.max_atoms)\n', (2841, 2862), False, 'from chainer_chemistry.dataset.preprocessors.common import type_check_num_atoms\n'), ((3062, 3097), 'chainer_chemistry.dataset.preprocessors.common.construct_discrete_edge_matrix', 'construct_discrete_edge_matrix', (['mol'], {}), '(mol)\n', (3092, 3097), False, 'from chainer_chemistry.dataset.preprocessors.common import construct_atomic_number_array, construct_discrete_edge_matrix, MolFeatureExtractionError\n'), ((3570, 3601), 'numpy.array', 'np.array', (['atoms'], {'dtype': 'np.int32'}), '(atoms, dtype=np.int32)\n', (3578, 3601), True, 'import numpy as np\n'), ((3910, 3934), 'collections.defaultdict', 'defaultdict', (['(lambda : [])'], {}), '(lambda : [])\n', (3921, 3934), False, 'from collections import defaultdict\n'), ((5746, 5784), 'numpy.array', 'np.array', (['fingerprints'], {'dtype': 'np.int32'}), '(fingerprints, dtype=np.int32)\n', (5754, 5784), True, 'import numpy as np\n'), ((5946, 5974), 'rdkit.Chem.GetAdjacencyMatrix', 'Chem.GetAdjacencyMatrix', (['mol'], {}), '(mol)\n', (5969, 5974), False, 'from rdkit import Chem\n'), ((5991, 6026), 'numpy.array', 'np.array', (['adjacency'], {'dtype': 'np.int32'}), '(adjacency, dtype=np.int32)\n', (5999, 6026), True, 'import numpy as np\n'), ((5359, 5383), 'collections.defaultdict', 'defaultdict', (['(lambda : [])'], {}), '(lambda : [])\n', (5370, 5383), False, 'from collections import defaultdict\n')] |
#!/usr/bin/env python
"""
Convert text data to embeddings
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, <NAME>"
__license__ = "The MIT License"
__email__ = "<EMAIL>"
"""
import os
import logging
import re
import numpy as np
import keras
from gensim.models.word2vec import Word2Vec
from project.text_to_id import map_text_to_word_list
log = logging.getLogger(__name__)
logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO")) # Change the 2nd arg to INFO to suppress debug logging
RESERVED_WORD_LIST = ["<UNK>", "<EOS>", "<PAD>"]
MODEL_PATH = "/tmp/tp/word2vec_example.model"
def map_text_list_to_embedding(text_list, label_for_text_list, num_labels, label_to_id):
"""
Parameters
----------
text_list: list of str
List of text
label_for_text_list: list of str
List of labels, which is the ground truth for each text on the text_list
num_labels:
Number of labels
label_to_id: dict
Label to integer id mapping
Returns
-------
x: ndarray
Numpy array of mean word embeddings for each text.
y: ndarray
Numpy array of indices representing labels
missing_words: set
Set of words not in the Word2Vec model's dictionary.
"""
model = Word2Vec.load(MODEL_PATH)
missing_words = set()
x_list = list()
y_list = list()
total_found_in_dict = 0
total_not_in_dict = 0
for i, text in enumerate(text_list):
log.debug("Processing post: [%d]" % (i + 1))
words_in_text = map_text_to_word_list(text)
word_v_list = list()
for w in words_in_text:
try:
v = model[w]
except KeyError:
missing_words.add(w)
#log.warning("Skipping %s" % (w))
total_not_in_dict += 1
continue
word_v_list.append(v)
total_found_in_dict += 1
if len(word_v_list) == 0:
# log.warning("Did not find any words in vocabulary. Skipping the text.")
continue
# For now, do not change non-zero element to 1.
label_id = label_to_id[label_for_text_list[i]]
label_id = keras.utils.to_categorical(label_id, num_labels).astype(np.float32)
label_id = label_id.reshape(1, num_labels)
# Squish word_id_list
word_v_np = np.array(word_v_list)
word_count = word_v_np.shape[0]
word_v_mean = np.sum(word_v_np, axis=0)/word_count
word_v_sum = np.sum(word_v_np, axis=0)
#log.info("word_v_mean.shape")
#log.info(word_v_mean.shape)
x_list.append(word_v_mean)
# x_list.append(word_v_sum)
y_list.append(label_id)
x = np.array(x_list)
print(x.shape)
y = np.concatenate(y_list)
assert x.shape[0] == y.shape[0]
log.info("Number of words found in dict: %d" % (total_found_in_dict))
log.info("Number of words not found in dict: %d" % (total_not_in_dict))
return x, y, missing_words | [
"logging.getLogger",
"gensim.models.word2vec.Word2Vec.load",
"project.text_to_id.map_text_to_word_list",
"os.environ.get",
"keras.utils.to_categorical",
"numpy.array",
"numpy.sum",
"numpy.concatenate"
] | [((354, 381), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (371, 381), False, 'import logging\n'), ((1259, 1284), 'gensim.models.word2vec.Word2Vec.load', 'Word2Vec.load', (['MODEL_PATH'], {}), '(MODEL_PATH)\n', (1272, 1284), False, 'from gensim.models.word2vec import Word2Vec\n'), ((2714, 2730), 'numpy.array', 'np.array', (['x_list'], {}), '(x_list)\n', (2722, 2730), True, 'import numpy as np\n'), ((2758, 2780), 'numpy.concatenate', 'np.concatenate', (['y_list'], {}), '(y_list)\n', (2772, 2780), True, 'import numpy as np\n'), ((408, 442), 'os.environ.get', 'os.environ.get', (['"""LOGLEVEL"""', '"""INFO"""'], {}), "('LOGLEVEL', 'INFO')\n", (422, 442), False, 'import os\n'), ((1524, 1551), 'project.text_to_id.map_text_to_word_list', 'map_text_to_word_list', (['text'], {}), '(text)\n', (1545, 1551), False, 'from project.text_to_id import map_text_to_word_list\n'), ((2356, 2377), 'numpy.array', 'np.array', (['word_v_list'], {}), '(word_v_list)\n', (2364, 2377), True, 'import numpy as np\n'), ((2498, 2523), 'numpy.sum', 'np.sum', (['word_v_np'], {'axis': '(0)'}), '(word_v_np, axis=0)\n', (2504, 2523), True, 'import numpy as np\n'), ((2440, 2465), 'numpy.sum', 'np.sum', (['word_v_np'], {'axis': '(0)'}), '(word_v_np, axis=0)\n', (2446, 2465), True, 'import numpy as np\n'), ((2186, 2234), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['label_id', 'num_labels'], {}), '(label_id, num_labels)\n', (2212, 2234), False, 'import keras\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 28 09:08:29 2021
@author: <NAME>
"""
from . import multiasset as ma
from . import opt_abc as opt
import numpy as np
import scipy.stats as spst
class BsmBasketAsianJu2002(ma.NormBasket):
def __init__(self, sigma, cor=None, weight=None, intr=0.0, divr=0.0, is_fwd=False):
"""
Args:
sigma: model volatilities of `n_asset` assets. (n_asset, ) array
cor: correlation. If matrix, used as it is. (n_asset, n_asset)
If scalar, correlation matrix is constructed with all same off-diagonal values.
weight: asset weights, If None, equally weighted as 1/n_asset
If scalar, equal weights of the value
If 1-D array, uses as it is. (n_asset, )
intr: interest rate (domestic interest rate)
divr: vector of dividend/convenience yield (foreign interest rate) 0-D or (n_asset, ) array
is_fwd: if True, treat `spot` as forward price. False by default.
"""
super().__init__(
sigma, cor=cor, weight=weight, intr=intr, divr=divr, is_fwd=is_fwd
)
global num_asset
num_asset = len(self.weight)
def average_s(self, spot, texp, basket=True):
# cal the forward price of asset num in the basket
if basket:
if np.isscalar(spot):
spot = np.full(num_asset, spot)
if np.isscalar(self.divr):
self.divr = np.full(num_asset, self.divr)
av_s = np.zeros(num_asset)
for num in range(num_asset):
av_s[num] = (
self.weight[num]
* spot[num]
* np.exp((self.intr - self.divr[num]) * texp)
)
else:
if np.isscalar(spot):
spot = np.full(num_asset, spot)
if np.isscalar(self.divr):
self.divr = np.full(num_asset, self.divr)
av_s = np.zeros(num_asset)
for num in range(num_asset):
av_s[num] = (
self.weight[num]
* spot[num]
* np.exp(
(self.intr - self.divr[num]) * texp / (num_asset - 1) * num
)
)
self.av_s = av_s
def average_rho(self, texp, basket=True):
# cal the rho between asset i and j
if basket:
av_rho = np.zeros((num_asset, num_asset))
for i in range(num_asset):
for j in range(num_asset):
av_rho[i, j] = (
self.cor_m[i, j] * self.sigma[i] * self.sigma[j] * texp
)
else:
av_rho = np.zeros((num_asset, num_asset))
for i in range(num_asset):
for j in range(i, num_asset):
av_rho[i, j] = self.sigma[0] ** 2 * texp / (num_asset - 1) * i
av_rho[j, i] = av_rho[i, j]
self.av_rho = av_rho
def u1(self, spot, texp):
# the first momentum of log normal distribution#
u1_value = self.av_s.sum()
# u1_value = self.weight @ (spot * np.exp((self.intr-self.divr)*texp))
return u1_value
def u2(self, z):
# the second momentum of log normal distribution#
u2_value = 0
for i in range(num_asset):
for j in range(num_asset):
u2_value += (
self.av_s[i] * self.av_s[j] * np.exp(z * z * self.av_rho[i, j])
)
return u2_value
def u2_1st_der(self):
u2_1st_value = 0
for i in range(num_asset):
for j in range(num_asset):
u2_1st_value += self.av_s[i] * self.av_s[j] * self.av_rho[i, j]
return u2_1st_value
def u2_2nd_der(self):
u2_2nd_value = 0
for i in range(num_asset):
for j in range(num_asset):
u2_2nd_value += self.av_s[i] * self.av_s[j] * pow(self.av_rho[i, j], 2)
return u2_2nd_value
def u2_3rd_der(self):
u2_3rd_value = 0
for i in range(num_asset):
for j in range(num_asset):
u2_3rd_value += self.av_s[i] * self.av_s[j] * pow(self.av_rho[i, j], 3)
return u2_3rd_value
def ak_bar(self):
# calculate the average a, save to self#
av_a = self.av_rho @ self.av_s
self.av_a = av_a
def e_a12_a2(self):
return 2 * self.av_s @ pow(self.av_a, 2)
def e_a12_a22(self):
value = 0
for i in range(num_asset):
for j in range(num_asset):
value += (
self.av_a[i]
* self.av_s[i]
* self.av_rho[i, j]
* self.av_a[j]
* self.av_s[j]
)
value *= 8
value += 2 * self.u2_1st_der() * self.u2_2nd_der()
return value
def e_a13_a3(self):
return 6 * self.av_s @ pow(self.av_a, 3)
def e_a1_a2_a3(self):
value = 0
for i in range(num_asset):
for j in range(num_asset):
value += (
self.av_s[i]
* pow(self.av_rho[i, j], 2)
* self.av_a[j]
* self.av_s[j]
)
value *= 6
return value
def e_a23(self):
value = 0
temp = np.zeros((num_asset, num_asset))
for i in range(num_asset):
for j in range(num_asset):
temp[i, j] = (
pow(self.av_s[i], 0.5) * self.av_rho[i, j] * pow(self.av_s[j], 0.5)
)
for i in range(num_asset):
for j in range(num_asset):
for k in range(num_asset):
value += temp[i, j] * temp[j, k] * temp[k, i]
value *= 8
return value
def func_a1(self, z):
return -pow(z, 2) * self.u2_1st_der() / 2 / self.u2(0)
def func_a2(self, z):
return 2 * pow(self.func_a1(z), 2) - pow(
z, 4
) * self.u2_2nd_der() / 2 / self.u2(0)
def func_a3(self, z):
return (
6 * self.func_a1(z) * self.func_a2(z)
- 4 * pow(self.func_a1(z), 3)
- pow(z, 6) * self.u2_3rd_der() / 2 / self.u2(0)
)
def func_b1(self, spot, texp, z):
return pow(z, 4) * self.e_a12_a2() / 4 / pow(self.u1(spot, texp), 3)
def func_b2(self, z):
return pow(self.func_a1(z), 2) - self.func_a2(z) / 2
def func_c1(self, spot, texp, z):
return -self.func_a1(z) * self.func_b1(spot, texp, z)
def func_c2(self, spot, texp, z):
return (
pow(z, 6)
* (9 * self.e_a12_a22() + 4 * self.e_a13_a3())
/ 144
/ pow(self.u1(spot, texp), 4)
)
def func_c3(self, spot, texp, z):
return (
pow(z, 6)
* (4 * self.e_a1_a2_a3() + self.e_a23())
/ 48
/ pow(self.u1(spot, texp), 3)
)
def func_c4(self, z):
return (
self.func_a1(z) * self.func_a2(z)
- 2 * pow(self.func_a1(z), 3) / 3
- self.func_a3(z) / 6
)
def func_d1(self, spot, texp, z):
return 0.5 * (
6 * pow(self.func_a1(z), 2)
+ self.func_a2(z)
- 4 * self.func_b1(spot, texp, z)
+ 2 * self.func_b2(z)
) - 1 / 6 * (
120 * pow(self.func_a1(z), 3)
- self.func_a3(z)
+ 6
* (
24 * self.func_c1(spot, texp, z)
- 6 * self.func_c2(spot, texp, z)
+ 2 * self.func_c3(spot, texp, z)
- self.func_c4(z)
)
)
def func_d2(self, spot, texp, z):
return 0.5 * (
10 * pow(self.func_a1(z), 2)
+ self.func_a2(z)
- 6 * self.func_b1(spot, texp, z)
+ 2 * self.func_b2(z)
) - (
128 * pow(self.func_a1(z), 3) / 3
- self.func_a3(z) / 6
+ 2 * self.func_a1(z) * self.func_b1(spot, texp, z)
- self.func_a1(z) * self.func_b2(z)
+ 50 * self.func_c1(spot, texp, z)
- 11 * self.func_c2(spot, texp, z)
+ 3 * self.func_c3(spot, texp, z)
- self.func_c4(z)
)
def func_d3(self, spot, texp, z):
return (
2 * pow(self.func_a1(z), 2)
- self.func_b1(spot, texp, z)
- 1
/ 3
* (
88 * pow(self.func_a1(z), 3)
+ 3
* self.func_a1(z)
* (5 * self.func_b1(spot, texp, z) - 2 * self.func_b2(z))
+ 3
* (
35 * self.func_c1(spot, texp, z)
- 6 * self.func_c2(spot, texp, z)
+ self.func_c3(spot, texp, z)
)
)
)
def func_d4(self, spot, texp, z):
return (
-20 * pow(self.func_a1(z), 3) / 3
+ self.func_a1(z) * (-4 * self.func_b1(spot, texp, z) + self.func_b2(z))
- 10 * self.func_c1(spot, texp, z)
+ self.func_c2(spot, texp, z)
)
def price(self, strike, spot, texp, cp=1, basket=True):
if np.isscalar(spot):
spot = np.full(num_asset, spot)
if np.isscalar(self.divr):
self.divr = np.full(num_asset, self.divr)
if basket:
self.average_s(spot, texp)
self.average_rho(texp)
else:
self.average_s(spot, texp, False)
self.average_rho(texp, False)
self.ak_bar()
m1 = 2 * np.log(self.u1(spot, texp)) - 0.5 * np.log(self.u2(1))
v1 = np.log(self.u2(1)) - 2 * np.log(self.u1(spot, texp))
sqrtv1 = np.sqrt(v1)
y = np.log(strike)
y1 = (m1 - y) / np.sqrt(v1) + sqrtv1
y2 = y1 - sqrtv1
z1 = (
self.func_d2(spot, texp, 1)
- self.func_d3(spot, texp, 1)
+ self.func_d4(spot, texp, 1)
)
z2 = self.func_d3(spot, texp, 1) - self.func_d4(spot, texp, 1)
z3 = self.func_d4(spot, texp, 1)
bc = (
self.u1(spot, texp)
* np.exp(-self.intr * texp)
* spst.norm.cdf(y1, loc=0, scale=1)
- strike * np.exp(-self.intr * texp) * spst.norm.cdf(y2, loc=0, scale=1)
+ np.exp(-self.intr * texp)
* strike
* (
z1 * spst.norm.pdf(y, loc=m1, scale=sqrtv1)
+ z2 * spst.norm.pdf(y, loc=m1, scale=sqrtv1) * (m1 - y) / v1
+ z3
* ((y - m1) * (y - m1) / v1 / v1 - 1 / v1)
* spst.norm.pdf(y, loc=m1, scale=sqrtv1)
)
)
if cp == 1:
return bc
elif cp == -1:
return np.exp(-self.intr * texp) * (strike - self.u1(spot, texp)) + bc
else:
return -1
class BsmContinuousAsianJu2002(opt.OptABC):
def price(self, strike, spot, texp, cp=1):
if np.isscalar(spot) == False:
print("spot should not be array")
return 0
elif np.isscalar(self.divr) == False:
print("dividend should not be array")
return 0
else:
g = self.intr - self.divr
gt = g * texp
u1 = spot * (np.exp(gt) - 1) / g / texp
u2 = (
2
* spot ** 2
* (
(np.exp((2 * g + self.sigma ** 2) * texp) - 1)
/ (2 * g + self.sigma ** 2)
- (np.exp(gt) - 1) / g
)
/ texp
/ texp
/ (g + self.sigma ** 2)
)
z1 = -pow(self.sigma, 4) * texp ** 2 * (
1 / 45
+ gt / 180
- 11 * gt ** 2 / 15120
- pow(gt, 3) / 2520
+ pow(gt, 4) / 113400
) - pow(self.sigma, 6) * pow(texp, 3) * (
1 / 11340
- 13 * gt / 30240
- 17 * gt ** 2 / 226800
+ 23 * pow(gt, 3) / 453600
+ 59 * pow(gt, 4) / 5987520
)
z2 = -pow(self.sigma, 4) * texp ** 2 * (
1 / 90
+ gt / 360
- 11 * gt ** 2 / 30240
- pow(gt, 3) / 5040
+ pow(gt, 4) / 226800
) - pow(self.sigma, 6) * pow(texp, 3) * (
31 / 22680
- 11 * gt / 60480
- 37 * gt ** 2 / 151200
- 19 * pow(gt, 3) / 302400
+ 953 * pow(gt, 4) / 59875200
)
z3 = (
pow(self.sigma, 6)
* pow(texp, 3)
* (
2 / 2835
- gt / 60480
- 2 * gt ** 2 / 14175
- 17 * pow(gt, 3) / 907200
+ 13 * pow(gt, 4) / 1247400
)
)
m1 = 2 * np.log(u1) - 0.5 * np.log(u2)
v1 = np.log(u2) - 2 * np.log(u1)
sqrtv1 = np.sqrt(v1)
y = np.log(strike)
y1 = (m1 - y) / np.sqrt(v1) + sqrtv1
y2 = y1 - sqrtv1
bc = (
u1 * np.exp(-self.intr * texp) * spst.norm.cdf(y1, loc=0, scale=1)
- strike * np.exp(-self.intr * texp) * spst.norm.cdf(y2, loc=0, scale=1)
+ np.exp(-self.intr * texp)
* strike
* (
z1 * spst.norm.pdf(y, loc=m1, scale=sqrtv1)
+ z2 * spst.norm.pdf(y, loc=m1, scale=sqrtv1) * (m1 - y) / v1
+ z3
* ((y - m1) * (y - m1) / v1 / v1 - 1 / v1)
* spst.norm.pdf(y, loc=m1, scale=sqrtv1)
)
)
if cp == 1:
return bc
elif cp == -1:
return np.exp(-self.intr * texp) * (strike - u1) + bc
else:
return -1
| [
"numpy.sqrt",
"numpy.isscalar",
"numpy.log",
"numpy.exp",
"numpy.zeros",
"scipy.stats.norm.pdf",
"numpy.full",
"scipy.stats.norm.cdf"
] | [((5440, 5472), 'numpy.zeros', 'np.zeros', (['(num_asset, num_asset)'], {}), '((num_asset, num_asset))\n', (5448, 5472), True, 'import numpy as np\n'), ((9337, 9354), 'numpy.isscalar', 'np.isscalar', (['spot'], {}), '(spot)\n', (9348, 9354), True, 'import numpy as np\n'), ((9411, 9433), 'numpy.isscalar', 'np.isscalar', (['self.divr'], {}), '(self.divr)\n', (9422, 9433), True, 'import numpy as np\n'), ((9861, 9872), 'numpy.sqrt', 'np.sqrt', (['v1'], {}), '(v1)\n', (9868, 9872), True, 'import numpy as np\n'), ((9885, 9899), 'numpy.log', 'np.log', (['strike'], {}), '(strike)\n', (9891, 9899), True, 'import numpy as np\n'), ((1355, 1372), 'numpy.isscalar', 'np.isscalar', (['spot'], {}), '(spot)\n', (1366, 1372), True, 'import numpy as np\n'), ((1437, 1459), 'numpy.isscalar', 'np.isscalar', (['self.divr'], {}), '(self.divr)\n', (1448, 1459), True, 'import numpy as np\n'), ((1538, 1557), 'numpy.zeros', 'np.zeros', (['num_asset'], {}), '(num_asset)\n', (1546, 1557), True, 'import numpy as np\n'), ((1811, 1828), 'numpy.isscalar', 'np.isscalar', (['spot'], {}), '(spot)\n', (1822, 1828), True, 'import numpy as np\n'), ((1893, 1915), 'numpy.isscalar', 'np.isscalar', (['self.divr'], {}), '(self.divr)\n', (1904, 1915), True, 'import numpy as np\n'), ((1994, 2013), 'numpy.zeros', 'np.zeros', (['num_asset'], {}), '(num_asset)\n', (2002, 2013), True, 'import numpy as np\n'), ((2464, 2496), 'numpy.zeros', 'np.zeros', (['(num_asset, num_asset)'], {}), '((num_asset, num_asset))\n', (2472, 2496), True, 'import numpy as np\n'), ((2753, 2785), 'numpy.zeros', 'np.zeros', (['(num_asset, num_asset)'], {}), '((num_asset, num_asset))\n', (2761, 2785), True, 'import numpy as np\n'), ((9375, 9399), 'numpy.full', 'np.full', (['num_asset', 'spot'], {}), '(num_asset, spot)\n', (9382, 9399), True, 'import numpy as np\n'), ((9459, 9488), 'numpy.full', 'np.full', (['num_asset', 'self.divr'], {}), '(num_asset, self.divr)\n', (9466, 9488), True, 'import numpy as np\n'), ((11116, 11133), 'numpy.isscalar', 'np.isscalar', (['spot'], {}), '(spot)\n', (11127, 11133), True, 'import numpy as np\n'), ((1397, 1421), 'numpy.full', 'np.full', (['num_asset', 'spot'], {}), '(num_asset, spot)\n', (1404, 1421), True, 'import numpy as np\n'), ((1489, 1518), 'numpy.full', 'np.full', (['num_asset', 'self.divr'], {}), '(num_asset, self.divr)\n', (1496, 1518), True, 'import numpy as np\n'), ((1853, 1877), 'numpy.full', 'np.full', (['num_asset', 'spot'], {}), '(num_asset, spot)\n', (1860, 1877), True, 'import numpy as np\n'), ((1945, 1974), 'numpy.full', 'np.full', (['num_asset', 'self.divr'], {}), '(num_asset, self.divr)\n', (1952, 1974), True, 'import numpy as np\n'), ((9924, 9935), 'numpy.sqrt', 'np.sqrt', (['v1'], {}), '(v1)\n', (9931, 9935), True, 'import numpy as np\n'), ((11224, 11246), 'numpy.isscalar', 'np.isscalar', (['self.divr'], {}), '(self.divr)\n', (11235, 11246), True, 'import numpy as np\n'), ((13217, 13228), 'numpy.sqrt', 'np.sqrt', (['v1'], {}), '(v1)\n', (13224, 13228), True, 'import numpy as np\n'), ((13245, 13259), 'numpy.log', 'np.log', (['strike'], {}), '(strike)\n', (13251, 13259), True, 'import numpy as np\n'), ((1720, 1763), 'numpy.exp', 'np.exp', (['((self.intr - self.divr[num]) * texp)'], {}), '((self.intr - self.divr[num]) * texp)\n', (1726, 1763), True, 'import numpy as np\n'), ((2176, 2243), 'numpy.exp', 'np.exp', (['((self.intr - self.divr[num]) * texp / (num_asset - 1) * num)'], {}), '((self.intr - self.divr[num]) * texp / (num_asset - 1) * num)\n', (2182, 2243), True, 'import numpy as np\n'), ((3512, 3545), 'numpy.exp', 'np.exp', (['(z * z * self.av_rho[i, j])'], {}), '(z * z * self.av_rho[i, j])\n', (3518, 3545), True, 'import numpy as np\n'), ((10332, 10365), 'scipy.stats.norm.cdf', 'spst.norm.cdf', (['y1'], {'loc': '(0)', 'scale': '(1)'}), '(y1, loc=0, scale=1)\n', (10345, 10365), True, 'import scipy.stats as spst\n'), ((10417, 10450), 'scipy.stats.norm.cdf', 'spst.norm.cdf', (['y2'], {'loc': '(0)', 'scale': '(1)'}), '(y2, loc=0, scale=1)\n', (10430, 10450), True, 'import scipy.stats as spst\n'), ((10465, 10490), 'numpy.exp', 'np.exp', (['(-self.intr * texp)'], {}), '(-self.intr * texp)\n', (10471, 10490), True, 'import numpy as np\n'), ((13168, 13178), 'numpy.log', 'np.log', (['u2'], {}), '(u2)\n', (13174, 13178), True, 'import numpy as np\n'), ((10292, 10317), 'numpy.exp', 'np.exp', (['(-self.intr * texp)'], {}), '(-self.intr * texp)\n', (10298, 10317), True, 'import numpy as np\n'), ((10389, 10414), 'numpy.exp', 'np.exp', (['(-self.intr * texp)'], {}), '(-self.intr * texp)\n', (10395, 10414), True, 'import numpy as np\n'), ((10764, 10802), 'scipy.stats.norm.pdf', 'spst.norm.pdf', (['y'], {'loc': 'm1', 'scale': 'sqrtv1'}), '(y, loc=m1, scale=sqrtv1)\n', (10777, 10802), True, 'import scipy.stats as spst\n'), ((10911, 10936), 'numpy.exp', 'np.exp', (['(-self.intr * texp)'], {}), '(-self.intr * texp)\n', (10917, 10936), True, 'import numpy as np\n'), ((13121, 13131), 'numpy.log', 'np.log', (['u1'], {}), '(u1)\n', (13127, 13131), True, 'import numpy as np\n'), ((13140, 13150), 'numpy.log', 'np.log', (['u2'], {}), '(u2)\n', (13146, 13150), True, 'import numpy as np\n'), ((13185, 13195), 'numpy.log', 'np.log', (['u1'], {}), '(u1)\n', (13191, 13195), True, 'import numpy as np\n'), ((13288, 13299), 'numpy.sqrt', 'np.sqrt', (['v1'], {}), '(v1)\n', (13295, 13299), True, 'import numpy as np\n'), ((14029, 14054), 'numpy.exp', 'np.exp', (['(-self.intr * texp)'], {}), '(-self.intr * texp)\n', (14035, 14054), True, 'import numpy as np\n'), ((10549, 10587), 'scipy.stats.norm.pdf', 'spst.norm.pdf', (['y'], {'loc': 'm1', 'scale': 'sqrtv1'}), '(y, loc=m1, scale=sqrtv1)\n', (10562, 10587), True, 'import scipy.stats as spst\n'), ((13406, 13439), 'scipy.stats.norm.cdf', 'spst.norm.cdf', (['y1'], {'loc': '(0)', 'scale': '(1)'}), '(y1, loc=0, scale=1)\n', (13419, 13439), True, 'import scipy.stats as spst\n'), ((13495, 13528), 'scipy.stats.norm.cdf', 'spst.norm.cdf', (['y2'], {'loc': '(0)', 'scale': '(1)'}), '(y2, loc=0, scale=1)\n', (13508, 13528), True, 'import scipy.stats as spst\n'), ((13547, 13572), 'numpy.exp', 'np.exp', (['(-self.intr * texp)'], {}), '(-self.intr * texp)\n', (13553, 13572), True, 'import numpy as np\n'), ((11431, 11441), 'numpy.exp', 'np.exp', (['gt'], {}), '(gt)\n', (11437, 11441), True, 'import numpy as np\n'), ((13378, 13403), 'numpy.exp', 'np.exp', (['(-self.intr * texp)'], {}), '(-self.intr * texp)\n', (13384, 13403), True, 'import numpy as np\n'), ((13467, 13492), 'numpy.exp', 'np.exp', (['(-self.intr * texp)'], {}), '(-self.intr * texp)\n', (13473, 13492), True, 'import numpy as np\n'), ((13874, 13912), 'scipy.stats.norm.pdf', 'spst.norm.pdf', (['y'], {'loc': 'm1', 'scale': 'sqrtv1'}), '(y, loc=m1, scale=sqrtv1)\n', (13887, 13912), True, 'import scipy.stats as spst\n'), ((10611, 10649), 'scipy.stats.norm.pdf', 'spst.norm.pdf', (['y'], {'loc': 'm1', 'scale': 'sqrtv1'}), '(y, loc=m1, scale=sqrtv1)\n', (10624, 10649), True, 'import scipy.stats as spst\n'), ((13643, 13681), 'scipy.stats.norm.pdf', 'spst.norm.pdf', (['y'], {'loc': 'm1', 'scale': 'sqrtv1'}), '(y, loc=m1, scale=sqrtv1)\n', (13656, 13681), True, 'import scipy.stats as spst\n'), ((11564, 11604), 'numpy.exp', 'np.exp', (['((2 * g + self.sigma ** 2) * texp)'], {}), '((2 * g + self.sigma ** 2) * texp)\n', (11570, 11604), True, 'import numpy as np\n'), ((11681, 11691), 'numpy.exp', 'np.exp', (['gt'], {}), '(gt)\n', (11687, 11691), True, 'import numpy as np\n'), ((13709, 13747), 'scipy.stats.norm.pdf', 'spst.norm.pdf', (['y'], {'loc': 'm1', 'scale': 'sqrtv1'}), '(y, loc=m1, scale=sqrtv1)\n', (13722, 13747), True, 'import scipy.stats as spst\n')] |
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import numpy as np
import os
import datetime
'''
se_txt_to_npz
'''
# # spatial embedding
# f = open("sz/SE(sz).txt", mode='r')
# lines = f.readlines()
# temp = lines[0].split(' ')
# N, dims = int(temp[0]), int(temp[1])
# SE = np.zeros(shape=(N, dims), dtype=np.float32)
# for line in lines[1:]:
# temp = line.split(' ')
# index = int(temp[0])
# SE[index] = temp[1:]
# today = str(datetime.date.today().strftime("%Y%m%d"))
# SE_save_file_name="sz/SE(sz)-{}".format(today)
# np.savez_compressed(SE_save_file_name,SE=SE)
# log string
def log_string(log, string):
log.write(string + '\n')
log.flush()
print(string)
def print_parameters(log):
parameters = 0
for variable in tf.trainable_variables():
parameters += np.product([x.value for x in variable.get_shape()])
log_string(log, 'trainable parameters: {:,}'.format(parameters))
'''
给出一个路径(单级目录或多级别目录),若它不存在,则创建;若存在,则跳过
'''
def create_path(dir):
if not os.path.exists(dir):
os.makedirs(dir)
'''
功能:产生输入模式的字符串,如 P0D3W0
'''
def input_name(args):
num = len(args.input_types)
name = ''
for i in range(num):
name += args.input_types[i] + str(args.input_steps[i])
return name
'''
数据集产生序列后,存放有关的数据/日志/模型
'''
def create_dir_PDW(args):
dir_name = input_name(args) # 文件夹名,按输入模式命名
PDW_dir = os.path.join(args.dataset_dir, dir_name) # 总路径
log_dir = os.path.join(PDW_dir, 'log') # args.dataset_dir/PDW/log,日志路径
save_dir = os.path.join(PDW_dir, 'save', args.model) # args.dataset_dir/PDW/save/model,模型保存路径
data_dir = os.path.join(PDW_dir, 'data', args.model) # args.dataset_dir/PDW/data/model, 数据保存路径
create_path(save_dir)
create_path(log_dir)
create_path(data_dir)
return log_dir, save_dir, data_dir, dir_name
'''
打开日志文件(追加模式),提示开始,打印参数
'''
def create_log(args, type):
dir_name = input_name(args) # 文件夹名,按输入模式命名
log_file = os.path.join(args.dataset_dir, dir_name, 'log', args.model+'_'+type) # args.dataset_dir/PDW/log/ANN_data_log,日志路径
log = open(log_file, 'a') # append the log
return log
def path(args,data_mode):
dir_name = input_name(args) # 文件夹名,以输入模式命名
data_path = os.path.join(args.dataset_dir, args.model, data_mode, dir_name) # 路径
create_path(data_path) # 创建
log_file = os.path.join(data_path, 'data_log')
log = open(log_file, 'a') # append the log
return dir_name, data_path, log
# mat; 矩阵(D,T,N,N),nd.array类型,元素数据类型是int or float32
# start_val: 区间的起始值,区间是闭区间[start,end]
# end_val: 区间的结束值
# 返回值: 这个区间在整个矩阵中占得比例
def get_pro(mat, start_val, end_val):
mat = np.around(mat)
days, T, N, _ = mat.shape
sum = 0
for i in range(start_val, end_val+1):
sum += np.sum(mat == i)
total = days*T*N*N
proportion = sum/total
return proportion
# 检查多维数组中是否存在nan,inf
def check_inf_nan(Ms):
nan_num = np.sum(np.isnan(Ms).astype(np.float32))
inf_num = np.sum(np.isinf(Ms).astype(np.float32))
print("Number of nan",nan_num,"Number of inf",inf_num) | [
"os.path.exists",
"tensorflow.compat.v1.disable_v2_behavior",
"os.makedirs",
"os.path.join",
"numpy.sum",
"numpy.isnan",
"numpy.around",
"tensorflow.compat.v1.trainable_variables",
"numpy.isinf"
] | [((34, 58), 'tensorflow.compat.v1.disable_v2_behavior', 'tf.disable_v2_behavior', ([], {}), '()\n', (56, 58), True, 'import tensorflow.compat.v1 as tf\n'), ((763, 787), 'tensorflow.compat.v1.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (785, 787), True, 'import tensorflow.compat.v1 as tf\n'), ((1383, 1423), 'os.path.join', 'os.path.join', (['args.dataset_dir', 'dir_name'], {}), '(args.dataset_dir, dir_name)\n', (1395, 1423), False, 'import os\n'), ((1444, 1472), 'os.path.join', 'os.path.join', (['PDW_dir', '"""log"""'], {}), "(PDW_dir, 'log')\n", (1456, 1472), False, 'import os\n'), ((1521, 1562), 'os.path.join', 'os.path.join', (['PDW_dir', '"""save"""', 'args.model'], {}), "(PDW_dir, 'save', args.model)\n", (1533, 1562), False, 'import os\n'), ((1620, 1661), 'os.path.join', 'os.path.join', (['PDW_dir', '"""data"""', 'args.model'], {}), "(PDW_dir, 'data', args.model)\n", (1632, 1661), False, 'import os\n'), ((1954, 2026), 'os.path.join', 'os.path.join', (['args.dataset_dir', 'dir_name', '"""log"""', "(args.model + '_' + type)"], {}), "(args.dataset_dir, dir_name, 'log', args.model + '_' + type)\n", (1966, 2026), False, 'import os\n'), ((2221, 2284), 'os.path.join', 'os.path.join', (['args.dataset_dir', 'args.model', 'data_mode', 'dir_name'], {}), '(args.dataset_dir, args.model, data_mode, dir_name)\n', (2233, 2284), False, 'import os\n'), ((2337, 2372), 'os.path.join', 'os.path.join', (['data_path', '"""data_log"""'], {}), "(data_path, 'data_log')\n", (2349, 2372), False, 'import os\n'), ((2637, 2651), 'numpy.around', 'np.around', (['mat'], {}), '(mat)\n', (2646, 2651), True, 'import numpy as np\n'), ((1011, 1030), 'os.path.exists', 'os.path.exists', (['dir'], {}), '(dir)\n', (1025, 1030), False, 'import os\n'), ((1040, 1056), 'os.makedirs', 'os.makedirs', (['dir'], {}), '(dir)\n', (1051, 1056), False, 'import os\n'), ((2751, 2767), 'numpy.sum', 'np.sum', (['(mat == i)'], {}), '(mat == i)\n', (2757, 2767), True, 'import numpy as np\n'), ((2906, 2918), 'numpy.isnan', 'np.isnan', (['Ms'], {}), '(Ms)\n', (2914, 2918), True, 'import numpy as np\n'), ((2960, 2972), 'numpy.isinf', 'np.isinf', (['Ms'], {}), '(Ms)\n', (2968, 2972), True, 'import numpy as np\n')] |
# iris/train.py
import json
from argparse import Namespace
from typing import Dict, Tuple
import numpy as np
import optuna
import pandas as pd
import torch
import torch.nn as nn
from numpyencoder import NumpyEncoder
from sklearn.preprocessing import LabelEncoder
# from config import config
from config.config import logger
from iris import data, eval, models, utils
lr = 1e-2
class Trainer:
"""Object used to facilitate training"""
def __init__(
self,
model: nn.Module,
device: torch.device = torch.device("cpu"),
loss_fn=None,
optimizer=None,
scheduler=None,
trial: optuna.trial._trial.Trial = None,
):
# set params
self.model = model
self.device = device
self.loss_fn = loss_fn
self.optimizer = optimizer
self.scheduler = scheduler
self.trial = trial
def train_step(self, dataloader: torch.utils.data.DataLoader):
"""Train step
Args:
dataloader: torch dataloader to load batches from
"""
# set model to train mode
self.model.train()
loss = 0.0
# iterate over train batches
for i, batch in enumerate(dataloader):
# step
batch = [item.to(self.device) for item in batch] # set device
inputs, targets = batch[:-1][0], batch[-1]
self.optimizer.zero_grad() # reset gradients
z = self.model(inputs) # Forward pass
J = self.loss_fn(z, targets) # Define loss
J.backward() # backward pass
self.optimizer.step() # update weights
# Cumulative Metrics
loss += (J.detach().item() - loss) / (i + 1)
return loss
def eval_step(self, dataloader: torch.utils.data.DataLoader):
"""Evaluation (val/test) step
Args:
dataloader : torch.dataloader to load batches from
"""
# set model to eval mode
self.model.eval()
loss = 0.0
y_trues, y_probs = [], []
# Iterate over val batches
with torch.inference_mode():
for i, batch in enumerate(dataloader):
# Step
batch = [item.to(self.device) for item in batch] # set device
inputs, y_true = batch[:-1][0], batch[-1]
z = self.model(inputs) # forward pass
J = self.loss_fn(z, y_true).item()
# Cumelative metrics
loss += (J - loss) / (i + 1)
# store outputs
y_prob = torch.sigmoid(z).cpu().numpy()
y_probs.extend(y_prob)
y_trues.extend(y_true.cpu().numpy())
return loss, np.vstack(y_trues), np.vstack(y_probs)
def predict_step(self, dataloader: torch.utils.data.DataLoader):
"""Predictin function ( inference step)
Note:
Loss is not calculated for this loop
Args:
dataloader : torch dataloader to load batches from
"""
# set model to eval mode
self.model.eval()
y_trues, y_probs = [], []
# Iterate over batchs
with torch.inference_mode():
for i, batch in enumerate(dataloader):
# Forward pass
batch = [item.to(self.device) for item in batch]
inputs, y_true = batch[:-1][0], batch[-1]
z = self.model(inputs)
# Store outputs
y_prob = torch.sigmoid(z).cpu().numpy()
y_probs.extend(y_prob)
y_trues.extend(y_true.cpu().numpy())
return np.vstack(y_trues), np.vstack(y_probs)
def train(
self,
num_epochs: int,
patience: int,
train_dataloader: torch.utils.data.DataLoader,
val_dataloader: torch.utils.data.DataLoader,
) -> Tuple:
"""Training loop
Args:
num epochs (int): max num of epochs to train for 9 can stop early if not model not imporving
patience (int): Number of acceptable epochs for continuous degrading performance.
train_dataloader: dataloader object with trainig data split
val_dataloader: dataloader object with validation data split
Raises:
optuna.TrialPruned: Early stopping of the optimization trial if poor performance.
Returns:
The best validation loss and the trained model from that point.
"""
best_val_loss = np.inf
best_model = None
_patience = patience
for epoch in range(num_epochs):
# steps
train_loss = self.train_step(dataloader=train_dataloader)
val_loss, _, _ = self.eval_step(dataloader=val_dataloader)
self.scheduler.step(val_loss)
# Pruning based on the intemediate valus
if self.trial:
self.trial.report(val_loss, epoch)
if self.trial.should_prune():
logger.info("failure trials pruned!")
raise optuna.TrialPruned()
# Early stoping
if val_loss < best_val_loss:
best_val_loss = val_loss
best_model = self.model
_patience = patience # reset _patience
else:
_patience -= 1
if not _patience:
logger.info("Stopping early!")
break
# logging
logger.info(
f"Epoch: {epoch+1} | "
f"train_loss: {train_loss:.5f}, "
f"val_loss: {val_loss:.5f}, "
f"lr: {self.optimizer.param_groups[0]['lr']:.2E}, "
f"_patience: {_patience}"
)
return best_val_loss, best_model
def train(params: Namespace = None, trial: optuna.trial._trial.Trial = None) -> Dict:
"""Operations for training
ARGS:
params (Namespace): Iput params for operations.
trial ( optuna.trial._trial.Trail,optional): Optuna optimization trial, defaults to None
Returns:
Artifacts to save and load for later"""
utils.set_seed(seed=params.seed)
device = utils.set_device(cuda=params.cuda)
# Get data this data clensing can be done seperately later
path = "https://raw.githubusercontent.com/jbrownlee/Datasets/master/iris.csv"
df = pd.read_csv(path, header=None, names=["f1", "f2", "f3", "f4", "class"])
df = df.sample(frac=1).reset_index(drop=True)
df["class"] = LabelEncoder().fit_transform(df["class"])
train_df = df[: int(len(df) * 0.8)].reset_index(drop=True)
test_df = df[int(len(df) * 0.8) : int(len(df) * 0.9)].reset_index(drop=True)
val_df = df[int(len(df) * 0.9) :].reset_index(drop=True)
X_train, y_train = train_df.values[:, :-1], train_df.values[:, -1]
X_val, y_val = val_df.values[:, :-1], val_df.values[:, -1]
train_dataset = data.CSVDataset(X=X_train, y=y_train)
val_dataset = data.CSVDataset(X=X_val, y=y_val)
train_dataloader = train_dataset.get_dataloader(batch_size=params.batch_size)
val_dataloader = val_dataset.get_dataloader(batch_size=params.batch_size)
model = models.initialize_model(device=torch.device("cpu"))
# Trainer module
logger.info(
f"Parameters: {json.dumps(params.__dict__, indent=2, cls=NumpyEncoder)}"
)
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, mode="min", factor=0.1, patience=5
)
trainer = Trainer(
model=model,
device=device,
loss_fn=nn.CrossEntropyLoss(),
optimizer=optimizer,
scheduler=scheduler,
trial=trial,
)
# Train
best_val_loss, best_model = trainer.train(100, 10, train_dataloader, val_dataloader)
# Find best threshold
# y_true, y_prob = trainer.eval_step(dataloader=train_dl)
# params.threshold = find_best_threshold(y_true=y_true, y_prob=y_prob)
# Evaluate model
artifacts = {
"params": params,
"model": best_model,
"loss": best_val_loss,
}
device = torch.device("cpu")
y_true, y_pred, performance = eval.evaluate(df=test_df, artifacts=artifacts)
artifacts["performance"] = performance
return artifacts
def objective(params: Namespace, trial: optuna.trial._trial.Trial) -> float:
"""Objective function for optimization trials.
Args:
params (Namespace): Input parameters for each trial (see `config/params.json`).
trial (optuna.trial._trial.Trial): Optuna optimization trial.
Returns:
F1 score from evaluating the trained model on the test data split.
"""
# Paramters (to tune)
params.hidden_dim = trial.suggest_int("hidden_dim", 16, 32)
params.dropout_p = trial.suggest_uniform("dropout_p", 0.3, 0.8)
params.lr = trial.suggest_loguniform("lr", 5e-5, 5e-4)
# Train (can move some of these outside for efficiency)
logger.info(f"\nTrial {trial.number}:")
logger.info(json.dumps(trial.params, indent=2))
artifacts = train(params=params, trial=trial)
# Set additional attributes
params = artifacts["params"]
performance = artifacts["performance"]
logger.info(json.dumps(performance["overall"], indent=2))
trial.set_user_attr("precision", performance["overall"]["precision"])
trial.set_user_attr("recall", performance["overall"]["recall"])
trial.set_user_attr("f1", performance["overall"]["f1"])
return performance["overall"]["f1"]
| [
"sklearn.preprocessing.LabelEncoder",
"optuna.TrialPruned",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.nn.CrossEntropyLoss",
"pandas.read_csv",
"iris.eval.evaluate",
"json.dumps",
"torch.sigmoid",
"config.config.logger.info",
"iris.utils.set_seed",
"torch.inference_mode",
"numpy.vsta... | [((6145, 6177), 'iris.utils.set_seed', 'utils.set_seed', ([], {'seed': 'params.seed'}), '(seed=params.seed)\n', (6159, 6177), False, 'from iris import data, eval, models, utils\n'), ((6191, 6225), 'iris.utils.set_device', 'utils.set_device', ([], {'cuda': 'params.cuda'}), '(cuda=params.cuda)\n', (6207, 6225), False, 'from iris import data, eval, models, utils\n'), ((6381, 6452), 'pandas.read_csv', 'pd.read_csv', (['path'], {'header': 'None', 'names': "['f1', 'f2', 'f3', 'f4', 'class']"}), "(path, header=None, names=['f1', 'f2', 'f3', 'f4', 'class'])\n", (6392, 6452), True, 'import pandas as pd\n'), ((6923, 6960), 'iris.data.CSVDataset', 'data.CSVDataset', ([], {'X': 'X_train', 'y': 'y_train'}), '(X=X_train, y=y_train)\n', (6938, 6960), False, 'from iris import data, eval, models, utils\n'), ((6979, 7012), 'iris.data.CSVDataset', 'data.CSVDataset', ([], {'X': 'X_val', 'y': 'y_val'}), '(X=X_val, y=y_val)\n', (6994, 7012), False, 'from iris import data, eval, models, utils\n'), ((7440, 7534), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'torch.optim.lr_scheduler.ReduceLROnPlateau', (['optimizer'], {'mode': '"""min"""', 'factor': '(0.1)', 'patience': '(5)'}), "(optimizer, mode='min', factor=\n 0.1, patience=5)\n", (7482, 7534), False, 'import torch\n'), ((8146, 8165), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (8158, 8165), False, 'import torch\n'), ((8200, 8246), 'iris.eval.evaluate', 'eval.evaluate', ([], {'df': 'test_df', 'artifacts': 'artifacts'}), '(df=test_df, artifacts=artifacts)\n', (8213, 8246), False, 'from iris import data, eval, models, utils\n'), ((8990, 9032), 'config.config.logger.info', 'logger.info', (['f"""\nTrial {trial.number}:"""'], {}), '(f"""\nTrial {trial.number}:""")\n', (9001, 9032), False, 'from config.config import logger\n'), ((532, 551), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (544, 551), False, 'import torch\n'), ((9046, 9080), 'json.dumps', 'json.dumps', (['trial.params'], {'indent': '(2)'}), '(trial.params, indent=2)\n', (9056, 9080), False, 'import json\n'), ((9257, 9301), 'json.dumps', 'json.dumps', (["performance['overall']"], {'indent': '(2)'}), "(performance['overall'], indent=2)\n", (9267, 9301), False, 'import json\n'), ((2102, 2124), 'torch.inference_mode', 'torch.inference_mode', ([], {}), '()\n', (2122, 2124), False, 'import torch\n'), ((2729, 2747), 'numpy.vstack', 'np.vstack', (['y_trues'], {}), '(y_trues)\n', (2738, 2747), True, 'import numpy as np\n'), ((2749, 2767), 'numpy.vstack', 'np.vstack', (['y_probs'], {}), '(y_probs)\n', (2758, 2767), True, 'import numpy as np\n'), ((3176, 3198), 'torch.inference_mode', 'torch.inference_mode', ([], {}), '()\n', (3196, 3198), False, 'import torch\n'), ((3641, 3659), 'numpy.vstack', 'np.vstack', (['y_trues'], {}), '(y_trues)\n', (3650, 3659), True, 'import numpy as np\n'), ((3661, 3679), 'numpy.vstack', 'np.vstack', (['y_probs'], {}), '(y_probs)\n', (3670, 3679), True, 'import numpy as np\n'), ((5484, 5657), 'config.config.logger.info', 'logger.info', (['f"""Epoch: {epoch + 1} | train_loss: {train_loss:.5f}, val_loss: {val_loss:.5f}, lr: {self.optimizer.param_groups[0][\'lr\']:.2E}, _patience: {_patience}"""'], {}), '(\n f"Epoch: {epoch + 1} | train_loss: {train_loss:.5f}, val_loss: {val_loss:.5f}, lr: {self.optimizer.param_groups[0][\'lr\']:.2E}, _patience: {_patience}"\n )\n', (5495, 5657), False, 'from config.config import logger\n'), ((6521, 6535), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (6533, 6535), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((7217, 7236), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (7229, 7236), False, 'import torch\n'), ((7627, 7648), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (7646, 7648), True, 'import torch.nn as nn\n'), ((5396, 5426), 'config.config.logger.info', 'logger.info', (['"""Stopping early!"""'], {}), "('Stopping early!')\n", (5407, 5426), False, 'from config.config import logger\n'), ((7300, 7355), 'json.dumps', 'json.dumps', (['params.__dict__'], {'indent': '(2)', 'cls': 'NumpyEncoder'}), '(params.__dict__, indent=2, cls=NumpyEncoder)\n', (7310, 7355), False, 'import json\n'), ((5009, 5046), 'config.config.logger.info', 'logger.info', (['"""failure trials pruned!"""'], {}), "('failure trials pruned!')\n", (5020, 5046), False, 'from config.config import logger\n'), ((5073, 5093), 'optuna.TrialPruned', 'optuna.TrialPruned', ([], {}), '()\n', (5091, 5093), False, 'import optuna\n'), ((2584, 2600), 'torch.sigmoid', 'torch.sigmoid', (['z'], {}), '(z)\n', (2597, 2600), False, 'import torch\n'), ((3502, 3518), 'torch.sigmoid', 'torch.sigmoid', (['z'], {}), '(z)\n', (3515, 3518), False, 'import torch\n')] |
import pdb
import logging
from pathlib import Path
import PIL # type: ignore
import click
from typing import Tuple
from guppy import hpy # type: ignore
import numpy as np # type: ignore
import matplotlib.pyplot as plt # type: ignore
import torch
from sklearn.metrics import pairwise_distances # type: ignore
from tqdm import tqdm # type: ignore
from utils import slice_image, glue_images, load_target_img, load_cifar_imgs # type: ignore
from perceptual_repr import extract_features # type: ignore
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
logger = logging.getLogger('Image gluing genetic algorithm')
def generate_random_individuals(
block_array_size: Tuple[int, int, int],
img_database_size: int
) -> np.ndarray:
return np.random.randint(
low = 0,
high = img_database_size,
size = block_array_size
)
def save_individuals(
individuals: np.ndarray,
scores: np.ndarray,
img_database: np.ndarray,
generation_idx: int,
output_folder: Path
) -> None:
imgs_to_glue = img_database[individuals]
glued_imgs = glue_images(imgs_to_glue)
for img_idx, (img, score) in enumerate(zip(glued_imgs, scores)):
pil_img = PIL.Image.fromarray(img)
img_fn = output_folder / f'gen_{generation_idx:04d}_img_{img_idx}_score_{score}.png'
pil_img.save(img_fn)
def score_individuals(
individuals: np.ndarray,
img_database: np.ndarray,
target_img_repr: np.ndarray,
batch_size: int
) -> np.ndarray:
imgs_to_glue = img_database[individuals]
glued_imgs = glue_images(imgs_to_glue)
glued_imgs_repr = extract_features(
images = glued_imgs,
layer_idx = 7,
batch_size = batch_size
)
scores = pairwise_distances(target_img_repr, glued_imgs_repr)
return scores[0]
def score_and_sort_population(
population: np.ndarray,
img_database: np.ndarray,
target_img_repr: np.ndarray,
batch_size: int
) -> Tuple[np.ndarray, np.ndarray]:
population_scores = score_individuals(
individuals = population,
img_database = img_database,
target_img_repr = target_img_repr,
batch_size = batch_size
)
sorted_indices = np.argsort(population_scores)
population_scores = population_scores[sorted_indices]
population = population[sorted_indices]
return population, population_scores
def reproductions(
population: np.ndarray,
first_parent_indices: np.ndarray,
second_parent_indices: np.ndarray
) -> np.ndarray:
first_parents = population[first_parent_indices]
second_parents = population[second_parent_indices]
mask = np.random.random(first_parents.shape) > .5
return mask * first_parents + (1 - mask) * second_parents
def mutation(
population: np.ndarray,
img_database_size: int,
n_mutation: int,
proba: float
) -> np.ndarray:
individuals_to_mutate = population[
np.random.permutation(len(population))[:n_mutation]
].copy()
mutation_mask = np.random.random(individuals_to_mutate.shape) < proba
img_to_mutate = individuals_to_mutate[mutation_mask]
img_to_mutate = np.random.randint(
low = 0,
high = img_database_size,
size = len(img_to_mutate)
)
return individuals_to_mutate
def remove_duplicate_individuals(population: np.ndarray) -> np.ndarray:
population = np.unique(population, axis = 0)
return population
def run_generation(
pop_size: int,
n_gen: int,
n_mutation: int,
mutation_proba: float,
n_reprod: int,
n_select: int,
n_new_ind: int,
block_size:int,
target_img: np.ndarray,
img_database: np.ndarray,
batch_size: int,
output_folder: Path,
) -> None:
target_img_slices = slice_image(
img = target_img,
block_size = block_size
)
target_img_repr = extract_features(
images = target_img[None, ...],
layer_idx = 7,
batch_size = 1
)
population = generate_random_individuals(
block_array_size = (
pop_size,
target_img_slices.shape[0],
target_img_slices.shape[1]
),
img_database_size = len(img_database)
)
population, population_scores = score_and_sort_population(
population = population,
img_database = img_database,
target_img_repr = target_img_repr,
batch_size = batch_size
)
for generation_idx in tqdm(range(n_gen)):
# h = hpy()
# print(h.heap())
logger.info(f'Start of generation {generation_idx}')
logger.info(f'\nTop scores {population_scores[:5]}')
logger.info(f'Performing reproductions')
reprod_parent_1 = np.random.randint(
low = 0,
high = n_select,
size = (n_reprod, )
)
reprod_parent_2 = np.random.randint(
low = 0,
high = len(population),
size = (n_reprod, )
)
reprod_children = reproductions(
population,
reprod_parent_1,
reprod_parent_2
)
intermediate_population = np.concatenate((
population,
reprod_children
))
logger.info(f'Performing mutations')
mutated_population = mutation(
population = intermediate_population,
img_database_size = len(img_database),
n_mutation = n_mutation,
proba = mutation_proba
)
logger.info('Generating random individuals')
new_individuals = generate_random_individuals(
block_array_size = (
n_new_ind,
target_img_slices.shape[0],
target_img_slices.shape[1]
),
img_database_size = len(img_database)
)
logger.info(f'Scoring population')
new_population = np.concatenate((
intermediate_population,
mutated_population,
new_individuals
))
new_population, new_population_scores = score_and_sort_population(
population = new_population,
img_database = img_database,
target_img_repr = target_img_repr,
batch_size = batch_size
)
population = new_population[:pop_size]
population_scores = population_scores[:pop_size]
# Duplication removal
population = remove_duplicate_individuals(population)
if len(population) < pop_size:
logger.info(f'{pop_size - len(population)} duplicates removed, '
'filling back the population')
new_individuals = generate_random_individuals(
block_array_size = (
pop_size - len(population),
target_img_slices.shape[0],
target_img_slices.shape[1]
),
img_database_size = len(img_database)
)
population = np.concatenate((
population,
new_individuals
))
population, population_scores = score_and_sort_population(
population = population,
img_database = img_database,
target_img_repr = target_img_repr,
batch_size = batch_size
)
if generation_idx % 5 == 0:
save_individuals(
population[:3],
population_scores[:3],
img_database,
generation_idx,
output_folder
)
@click.command()
@click.argument('target_img_fn', type = click.Path(exists = True))
@click.argument('output_folder', type = click.Path(exists = True))
@click.argument('target_img_height', type = int)
@click.argument('target_img_width', type = int)
@click.argument('pop_size', type = int)
@click.argument('n_gen', type = int)
@click.argument('n_reprod', type = int)
@click.argument('n_select', type = int)
@click.argument('n_mutation', type = int)
@click.argument('mutation_proba', type = float)
@click.argument('n_new_ind', type = int)
def main(
target_img_fn: str,
output_folder: str,
target_img_height: int,
target_img_width: int,
pop_size: int,
n_gen: int,
n_reprod: int,
n_select: int,
n_mutation: int,
mutation_proba: float,
n_new_ind: int
) -> None:
target_img_path = Path(target_img_fn)
output_folder_path = Path(output_folder)
logger.info(f'Target image path: {target_img_path}')
logger.info(f'Target image height: {target_img_height}')
logger.info(f'Target image width: {target_img_width}')
logger.info(f'Output folder path: {output_folder_path}')
logger.info(f'Population size: {pop_size}')
logger.info(f'Number of generation: {n_gen}')
logger.info(f'Number of reproductions: {n_reprod}')
logger.info(f'First parent selection range: {n_select}')
logger.info(f'Number of mutated individuals: {n_mutation}')
logger.info(f'Mutation image change probability: {100 * mutation_proba:5.3f}%')
logger.info(f'Number of new random individuals at each generation: {n_new_ind}')
target_img = load_target_img(
target_img_path,
target_img_height,
target_img_width
)
img_database = load_cifar_imgs('data', 16)
logger.info(f'Image database shape: {img_database.shape}')
run_generation(
pop_size = pop_size,
n_mutation = n_mutation,
mutation_proba = mutation_proba,
n_gen = n_gen,
n_reprod = n_reprod,
n_select = n_select,
n_new_ind = n_new_ind,
block_size = 16,
target_img = target_img,
img_database = img_database,
batch_size = 1,
output_folder = output_folder_path
)
if __name__ == '__main__':
main()
| [
"logging.basicConfig",
"logging.getLogger",
"click.argument",
"PIL.Image.fromarray",
"numpy.unique",
"pathlib.Path",
"numpy.random.random",
"utils.load_cifar_imgs",
"sklearn.metrics.pairwise_distances",
"utils.slice_image",
"numpy.argsort",
"numpy.random.randint",
"utils.glue_images",
"cli... | [((565, 620), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': 'log_fmt'}), '(level=logging.INFO, format=log_fmt)\n', (584, 620), False, 'import logging\n'), ((630, 681), 'logging.getLogger', 'logging.getLogger', (['"""Image gluing genetic algorithm"""'], {}), "('Image gluing genetic algorithm')\n", (647, 681), False, 'import logging\n'), ((7620, 7635), 'click.command', 'click.command', ([], {}), '()\n', (7633, 7635), False, 'import click\n'), ((7771, 7816), 'click.argument', 'click.argument', (['"""target_img_height"""'], {'type': 'int'}), "('target_img_height', type=int)\n", (7785, 7816), False, 'import click\n'), ((7820, 7864), 'click.argument', 'click.argument', (['"""target_img_width"""'], {'type': 'int'}), "('target_img_width', type=int)\n", (7834, 7864), False, 'import click\n'), ((7868, 7904), 'click.argument', 'click.argument', (['"""pop_size"""'], {'type': 'int'}), "('pop_size', type=int)\n", (7882, 7904), False, 'import click\n'), ((7908, 7941), 'click.argument', 'click.argument', (['"""n_gen"""'], {'type': 'int'}), "('n_gen', type=int)\n", (7922, 7941), False, 'import click\n'), ((7945, 7981), 'click.argument', 'click.argument', (['"""n_reprod"""'], {'type': 'int'}), "('n_reprod', type=int)\n", (7959, 7981), False, 'import click\n'), ((7985, 8021), 'click.argument', 'click.argument', (['"""n_select"""'], {'type': 'int'}), "('n_select', type=int)\n", (7999, 8021), False, 'import click\n'), ((8025, 8063), 'click.argument', 'click.argument', (['"""n_mutation"""'], {'type': 'int'}), "('n_mutation', type=int)\n", (8039, 8063), False, 'import click\n'), ((8067, 8111), 'click.argument', 'click.argument', (['"""mutation_proba"""'], {'type': 'float'}), "('mutation_proba', type=float)\n", (8081, 8111), False, 'import click\n'), ((8115, 8152), 'click.argument', 'click.argument', (['"""n_new_ind"""'], {'type': 'int'}), "('n_new_ind', type=int)\n", (8129, 8152), False, 'import click\n'), ((815, 886), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': 'img_database_size', 'size': 'block_array_size'}), '(low=0, high=img_database_size, size=block_array_size)\n', (832, 886), True, 'import numpy as np\n'), ((1151, 1176), 'utils.glue_images', 'glue_images', (['imgs_to_glue'], {}), '(imgs_to_glue)\n', (1162, 1176), False, 'from utils import slice_image, glue_images, load_target_img, load_cifar_imgs\n'), ((1626, 1651), 'utils.glue_images', 'glue_images', (['imgs_to_glue'], {}), '(imgs_to_glue)\n', (1637, 1651), False, 'from utils import slice_image, glue_images, load_target_img, load_cifar_imgs\n'), ((1674, 1745), 'perceptual_repr.extract_features', 'extract_features', ([], {'images': 'glued_imgs', 'layer_idx': '(7)', 'batch_size': 'batch_size'}), '(images=glued_imgs, layer_idx=7, batch_size=batch_size)\n', (1690, 1745), False, 'from perceptual_repr import extract_features\n'), ((1795, 1847), 'sklearn.metrics.pairwise_distances', 'pairwise_distances', (['target_img_repr', 'glued_imgs_repr'], {}), '(target_img_repr, glued_imgs_repr)\n', (1813, 1847), False, 'from sklearn.metrics import pairwise_distances\n'), ((2265, 2294), 'numpy.argsort', 'np.argsort', (['population_scores'], {}), '(population_scores)\n', (2275, 2294), True, 'import numpy as np\n'), ((3429, 3458), 'numpy.unique', 'np.unique', (['population'], {'axis': '(0)'}), '(population, axis=0)\n', (3438, 3458), True, 'import numpy as np\n'), ((3805, 3855), 'utils.slice_image', 'slice_image', ([], {'img': 'target_img', 'block_size': 'block_size'}), '(img=target_img, block_size=block_size)\n', (3816, 3855), False, 'from utils import slice_image, glue_images, load_target_img, load_cifar_imgs\n'), ((3904, 3977), 'perceptual_repr.extract_features', 'extract_features', ([], {'images': 'target_img[None, ...]', 'layer_idx': '(7)', 'batch_size': '(1)'}), '(images=target_img[None, ...], layer_idx=7, batch_size=1)\n', (3920, 3977), False, 'from perceptual_repr import extract_features\n'), ((8441, 8460), 'pathlib.Path', 'Path', (['target_img_fn'], {}), '(target_img_fn)\n', (8445, 8460), False, 'from pathlib import Path\n'), ((8486, 8505), 'pathlib.Path', 'Path', (['output_folder'], {}), '(output_folder)\n', (8490, 8505), False, 'from pathlib import Path\n'), ((9210, 9279), 'utils.load_target_img', 'load_target_img', (['target_img_path', 'target_img_height', 'target_img_width'], {}), '(target_img_path, target_img_height, target_img_width)\n', (9225, 9279), False, 'from utils import slice_image, glue_images, load_target_img, load_cifar_imgs\n'), ((9330, 9357), 'utils.load_cifar_imgs', 'load_cifar_imgs', (['"""data"""', '(16)'], {}), "('data', 16)\n", (9345, 9357), False, 'from utils import slice_image, glue_images, load_target_img, load_cifar_imgs\n'), ((1264, 1288), 'PIL.Image.fromarray', 'PIL.Image.fromarray', (['img'], {}), '(img)\n', (1283, 1288), False, 'import PIL\n'), ((2699, 2736), 'numpy.random.random', 'np.random.random', (['first_parents.shape'], {}), '(first_parents.shape)\n', (2715, 2736), True, 'import numpy as np\n'), ((3064, 3109), 'numpy.random.random', 'np.random.random', (['individuals_to_mutate.shape'], {}), '(individuals_to_mutate.shape)\n', (3080, 3109), True, 'import numpy as np\n'), ((4756, 4813), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': 'n_select', 'size': '(n_reprod,)'}), '(low=0, high=n_select, size=(n_reprod,))\n', (4773, 4813), True, 'import numpy as np\n'), ((5178, 5223), 'numpy.concatenate', 'np.concatenate', (['(population, reprod_children)'], {}), '((population, reprod_children))\n', (5192, 5223), True, 'import numpy as np\n'), ((5925, 6003), 'numpy.concatenate', 'np.concatenate', (['(intermediate_population, mutated_population, new_individuals)'], {}), '((intermediate_population, mutated_population, new_individuals))\n', (5939, 6003), True, 'import numpy as np\n'), ((7676, 7699), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (7686, 7699), False, 'import click\n'), ((7743, 7766), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (7753, 7766), False, 'import click\n'), ((7019, 7064), 'numpy.concatenate', 'np.concatenate', (['(population, new_individuals)'], {}), '((population, new_individuals))\n', (7033, 7064), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
from dgl.data.utils import load_graphs
import numpy as np
from sklearn import linear_model
plt.rcParams.update({'font.size': 14})
def design_size(design_file):
g, _ = load_graphs('data/dgl/' + design_file + '.def.dgl')
return g[0].num_nodes(), g[0].num_edges()
def analyze():
train_file = 'data/train.csv'
test_file = 'data/test.csv'
num_nodes = []
num_edges = []
runtimes = []
with open(train_file, 'r') as f:
f.readline() # to skip header
for line in f:
design_file, runtime = line.strip().split(',')
nodes, edges = design_size(design_file)
num_nodes.append(nodes)
num_edges.append(edges)
runtimes.append(float(runtime))
with open(test_file, 'r') as f:
f.readline() # to skip header
for line in f:
design_file, runtime = line.strip().split(',')
nodes, edges = design_size(design_file)
num_nodes.append(nodes)
num_edges.append(edges)
runtimes.append(float(runtime))
s = [x for x in zip(num_nodes, runtimes) if x[0] >= 5000 and x[0] <= 20000]
x, y = list(zip(*s))
plt.scatter(x, y)
plt.xlabel('Design Size (# cells)')
plt.ylabel('Runtime (seconds)')
plt.tight_layout()
plt.show()
def train():
train_file = 'data/train.csv'
test_file = 'data/test.csv'
num_nodes = []
num_edges = []
runtimes = []
with open(train_file, 'r') as f:
f.readline() # to skip header
for line in f:
design_file, runtime = line.strip().split(',')
nodes, edges = design_size(design_file)
num_nodes.append(nodes)
num_edges.append(edges)
runtimes.append(float(runtime))
regr = linear_model.LinearRegression()
regr.fit(np.array(num_nodes).reshape(-1, 1), np.array(runtimes).reshape(-1, 1))
num_nodes = []
num_edges = []
runtimes = []
with open(test_file, 'r') as f:
f.readline() # to skip header
for line in f:
design_file, runtime = line.strip().split(',')
nodes, edges = design_size(design_file)
num_nodes.append(nodes)
num_edges.append(edges)
runtimes.append(float(runtime))
pred = regr.predict(np.array(num_nodes).reshape(-1, 1))
# calculate avg error
errors = []
for i in range(len(pred)):
e = abs(pred[i].item() - runtimes[i]) / min(pred[i].item(), runtimes[i])
errors.append(e)
print(sum(errors) / len(errors))
if __name__ == '__main__':
analyze()
train()
| [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.rcParams.update",
"numpy.array",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.tight_layout",
"dgl.data.utils.load_graphs",
"sklearn.linear_model.LinearRegression",
"matplotlib.pyplot.show"
] | [((124, 162), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 14}"], {}), "({'font.size': 14})\n", (143, 162), True, 'import matplotlib.pyplot as plt\n'), ((205, 256), 'dgl.data.utils.load_graphs', 'load_graphs', (["('data/dgl/' + design_file + '.def.dgl')"], {}), "('data/dgl/' + design_file + '.def.dgl')\n", (216, 256), False, 'from dgl.data.utils import load_graphs\n'), ((1209, 1226), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {}), '(x, y)\n', (1220, 1226), True, 'import matplotlib.pyplot as plt\n'), ((1231, 1266), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Design Size (# cells)"""'], {}), "('Design Size (# cells)')\n", (1241, 1266), True, 'import matplotlib.pyplot as plt\n'), ((1271, 1302), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Runtime (seconds)"""'], {}), "('Runtime (seconds)')\n", (1281, 1302), True, 'import matplotlib.pyplot as plt\n'), ((1307, 1325), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1323, 1325), True, 'import matplotlib.pyplot as plt\n'), ((1330, 1340), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1338, 1340), True, 'import matplotlib.pyplot as plt\n'), ((1830, 1861), 'sklearn.linear_model.LinearRegression', 'linear_model.LinearRegression', ([], {}), '()\n', (1859, 1861), False, 'from sklearn import linear_model\n'), ((1875, 1894), 'numpy.array', 'np.array', (['num_nodes'], {}), '(num_nodes)\n', (1883, 1894), True, 'import numpy as np\n'), ((1911, 1929), 'numpy.array', 'np.array', (['runtimes'], {}), '(runtimes)\n', (1919, 1929), True, 'import numpy as np\n'), ((2363, 2382), 'numpy.array', 'np.array', (['num_nodes'], {}), '(num_nodes)\n', (2371, 2382), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from pylab import *
import pickle
import tensorflow as tf
import random
import os
from sklearn.model_selection import train_test_split
import matplotlib.lines as mlines
from random import randint
from sklearn import preprocessing
from sklearn.model_selection import KFold
import keras
from keras.models import Sequential
import itertools
from itertools import product
import glob
import os.path
from os import path
def find_best(network, K):
# For every file saved during the cross vallidation, it picks the one that returns the lowest loss in the test set
# and returns the best parameters for the network and the corresponding loss associated with it
# The argument "network" is 1 for the outcome mechanism and 2 for the treatment mechanism
all_filenames = glob.glob("*network{}.csv".format(network))
losses = dict()
keywords = []
for f in all_filenames:
df = pd.read_csv(f)
loss = np.array(df["1"])
key = f.split("/")[-1]
key = key[:-4]
key = "-".join(key.split("-")[1:])
if key not in losses:
losses[key] = []
losses[key].append(loss[~np.isnan(loss)][-1])
best = list(losses.keys())[0]
current = np.inf
for key in losses.keys():
if np.mean(losses[key]) < current:
current = np.mean(losses[key])
best = key
f = open("K0-" + best + ".pkl", "rb")
parameters = pickle.load(f)
return parameters, current
def divide_data(M, k, seed):
# The argument M is the data corresponding to matrix M in the main file and k is the number of folds
# This splits the data into k random folds, as the nuisance parameters have to be learnt with one part of the data
# and the ATE/ATT coefficients have to be learnt with the other part. The part indexed by "train" is used to
# learn the nuisances parameters and the part "test" is used to learn the parameters of interest (ATE/ATT)
# This data is used later in the neural_net function
X_test = []
Y_test = []
X_train = []
Y_train = []
kf = KFold(n_splits=k, random_state=seed, shuffle=True)
for train_index, test_index in kf.split(M):
x_train = M[train_index][:, :-1]
y_train = M[train_index][:, -1]
x_test = M[test_index][:, :-1]
y_test = M[test_index][:, -1]
X_train.append(x_train)
Y_train.append(y_train)
X_test.append(x_test)
Y_test.append(y_test)
return X_train, Y_train, X_test, Y_test
def weights_biases(perm):
# Returns the weights given the dimensions specified in the argument
# These weights are then used in the MLP function where they weight
# each input
initializer = tf.compat.v1.keras.initializers.glorot_normal()
weights = {}
for i in range(len(perm) - 1):
weights["h" + str(i)] = tf.Variable(
initializer([perm[i], perm[i + 1]]), trainable=True
)
weights["b" + str(i)] = tf.Variable(tf.zeros([1, perm[i + 1]]), trainable=True)
return weights
def train(
X_train, y_train, X_test, y_test, epoch, batchSize, optimizer, cost, x, y, sess
):
# Trains the neural network given the train and test data and specifications
# in the arguments
# For every batch computes the loss and gives the overall loss in both, the
# train set and the test set. The cost function is defined in the neural_net
# function below.
L = []
L_test = []
for e in range(epoch):
K = []
for k in range(len(X_test) // batchSize):
batchX_test = X_test[k * batchSize : (k + 1) * batchSize]
batchY_test = y_test[k * batchSize : (k + 1) * batchSize]
K.append(sess.run(cost, feed_dict={x: batchX_test, y: batchY_test}))
L_test.append(np.mean(K))
permutation = np.random.permutation(len(X_train))
for i in range(len(X_train) // batchSize):
I = permutation[i * batchSize : (i + 1) * batchSize]
sess.run(optimizer, feed_dict={x: X_train[I], y: y_train[I]})
L.append(sess.run(cost, feed_dict={x: X_train[I], y: y_train[I]}))
if i % 10 == 0:
print("Step " + str(i) + ", Minibatch Loss= " + "{:.6f}".format(L[-1]))
return L, L_test
def predict(X, batchSize, x, pred, sess):
# Gives the predictions of the output given the input X
P = []
print(len(X))
for i in range(len(X) // batchSize):
P.append(sess.run(pred, feed_dict={x: X[i * batchSize : (i + 1) * batchSize]}))
return np.concatenate(P)
def MLP(x, weights):
# Gives the output from the network. In each layer of the network, the input is
# multiplied by the corresponding weight and trasformed with the ReLu non linearity.
# It also returns the regularized l2 loss. The non linearity can be changed to
# "leaky_relu" or "sigmoid"
layer = tf.matmul(x, weights["h0"]) + weights["b0"]
reg_loss = tf.nn.l2_loss(weights["h0"])
for i in range(1, len(weights) // 2):
layer = (
tf.matmul(tf.nn.relu(layer), weights["h" + str(i)]) + weights["b" + str(i)]
)
reg_loss = reg_loss + tf.nn.l2_loss(weights["h" + str(i)])
return tf.squeeze(layer), reg_loss
def save_data(
q,
nr_layers,
perm,
batch_size,
lr,
reg_constant,
loss,
network,
L,
L_test,
y_test1,
pred_y_test,
):
# This function saves the data in files with the name indicating the k fold,
# the set of parameters used, and the network (the network is 1 for the
# outcome network or 2 for the treatment network)
filename = (
"K{}-Nr_Layers{}-perm{}-batch_size{}-lr{}-reg_constant{}-loss{}-network{}"
)
description = filename.format(
q, nr_layers, perm, batch_size, lr, reg_constant, loss, network
)
# In each csv file, it saves the train and test loss, the actual values of the
# output and the predicted ones
df1 = pd.DataFrame({"Loss_Train": L})
df2 = pd.DataFrame({"Loss_test": L_test})
df3 = pd.DataFrame({"Actual_values": y_test1})
df4 = pd.DataFrame({"Predicted_Values": pred_y_test})
df5 = pd.DataFrame({"Description": description}, index=[0])
df = pd.concat([df1, df2, df3, df4, df5], ignore_index=True, axis=1)
df.to_csv(description + ".csv")
# Creates pickle files for each of the csv files.
f = open(description + ".pkl", "wb")
pickle.dump(
{
"Nr_Layers": nr_layers,
"neurons": perm,
"batch_sizes": batch_size,
"lrs": lr,
"reg_constants": reg_constant,
"losses": loss,
},
f,
)
f.close()
def do_i_exist(q, nr_layers, perm, batch_size, lr, reg_constant, loss, network):
# Checks if the file is already saved so that it does not repeat the training
# for the same hyperparameters during the cross validation procedure later
filename = (
"K{}-Nr_Layers{}-perm{}-batch_size{}-lr{}-reg_constant{}-loss{}-network{}"
)
description = filename.format(
q, nr_layers, perm, batch_size, lr, reg_constant, loss, network
)
file_name = description + ".pkl"
return path.exists(file_name)
def neural_net(
Y_max,
Y_min,
k,
X_neural,
Y_neural,
X_theta,
Y_theta,
network,
cross_validate,
batch_sizes,
Nr_Layers,
neurons,
lrs,
reg_constants,
losses,
):
# The main neural network function, which given the input data and the
# hyperparameters returns the output from both, the first and the second
# network. This output is then to be used in the main file for the
# computation of the ATE/ATT and their standard errors.
# The data indexed by "neural" is used to learn the nuisance parameters
# and the part indexed by "theta" is used to compute the ATE/ATT
config = tf.ConfigProto(
intra_op_parallelism_threads=20,
inter_op_parallelism_threads=20,
allow_soft_placement=True,
device_count={"CPU": 20},
)
# Set the number of epochs
epochs = 50
# G0 are the predicted values of the first network (for the outcome mechanism)
# with the treatment D set to 0
# G1 are the predicted values of the first network (for the outcome mechanism)
# with the treatment D set to 1
# G are the predicted values for the first network (for the outcome mechanism)
# without changing the original input
# D is the treatment variable
# Y is the outcome variable
# M is the predicted outcome for the second netwrok (for the treatment mechanism)
G_0 = []
G_1 = []
G = []
D = []
Y = []
M = []
if cross_validate:
# Takes all possbile combinations of the hyperparameters set by the user and
# cross validates to find the best combination
possibilities = product(
batch_sizes, neurons, lrs, reg_constants, losses, Nr_Layers
)
else:
# Uses the best combinations of the hyperparameters after the cross validation
possibilities = product(
[batch_sizes], [neurons], [lrs], [reg_constants], [losses], [Nr_Layers]
)
for batch_size, neuron, lr, reg_constant, loss, nr_layers in possibilities:
for q in range(k):
perm = (neuron) * nr_layers
# For every fold q, check if for that particular combination of hyperparameters
# the file exists with the do_i_ exist function defined before. If it exists it
# tries the next combination, if not it performs the training below
if (
do_i_exist(
q, nr_layers, perm, batch_size, lr, reg_constant, loss, network
)
and cross_validate
):
continue
x_neural, x_theta = X_neural[q], X_theta[q]
y_theta = Y_theta[q]
y_neural = Y_neural[q]
X_train = x_neural
X_test = x_theta
y_train = y_neural
y_test = y_theta
if network == 2:
# If network is 1 you use the whole input X (which includes treatment D as
# the last column) to predict the outcome Y.
# But if network is 2 we are dealing with the treatment mechanism, thus we
# try to predict the treatment D which is in the last row in X. Thus we use
# that as "y" and the rest of the variables in X as the input
y_theta = x_theta[:, -1]
x_theta = x_theta[:, :-1]
y_train = X_train[:, -1]
y_test = X_test[:, -1]
X_train = X_train[:, :-1]
X_test = X_test[:, :-1]
tf.compat.v1.reset_default_graph()
# Construct the boundaries for the piecewise constant learning rate
boundary_a = (epochs * (len(X_train) // batch_size)) // 2
boundary_b = boundary_a + boundary_a // 2
boundaries = [boundary_a, boundary_b]
n_input = np.shape(X_test)[1]
# Create the tensorflow placeholders for the input and the output with the
# corresponding shapes
x = tf.placeholder("float", [batch_size, np.shape(X_test)[1]])
y = tf.placeholder("float", [batch_size])
# Use the function "weights_biases" defined before to generate the weights with the
# dimensions specified to be then used in MLP function, multiplying the input and
# giving the output and the reg_loss to be used in the cost function below to
# penalize very big or very large weights
weights = weights_biases((n_input,) + perm + (1,))
output, reg_loss = MLP(x, weights)
# Given a type of loss function defined by the user, it computes the cost accordingly
if loss == "MSE":
pred = output
cost = tf.keras.losses.MSE(y, output)
elif loss == "Cross Entropy":
pred = tf.nn.sigmoid(output)
cost = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=output)
)
# Add the regularization term to the loss function, set the piecewise constant learning
# rate using the boundaries created earlier and with these, use the adam optimizer to
# find the weights that minimize the cost.
cost = cost + reg_constant * reg_loss
global_step = tf.Variable(0)
learningRate = tf.train.piecewise_constant(global_step, boundaries, lr)
optimizer = tf.train.AdamOptimizer(learning_rate=learningRate).minimize(
cost
)
init = tf.initialize_all_variables()
with tf.Session(config=config) as sess:
sess.run(init)
# Lastly, train the network with the optimized weights and get the loss in both the
# train and test set
L, L_test = train(
X_train,
y_train,
X_test,
y_test,
epochs,
batch_size,
optimizer,
cost,
x,
y,
sess,
)
print("Optimization finished")
print("Mean squared error:", L_test[-1])
pred_y_test = predict(X_test, batch_size, x, pred, sess)
y_test1 = y_test[: len(pred_y_test)]
if cross_validate:
# If this training is part of the cross validation, it saves the losses, the
# actual values and the predictions in the csv and pickle files as described
# in the save_data function
save_data(
q,
nr_layers,
perm,
batch_size,
lr,
reg_constant,
loss,
network,
L,
L_test,
y_test1,
pred_y_test,
)
continue
# For each network selected, the function returnes the actual values and the predicted
# ones. For network 1, it also returns the predictions with the input D set to 0 (G0)
# and the output D set to 1 (G1) which is needed to construct the scores for obtaining
# the ATE and ATT in the main file.
if network == 1:
x_theta_1 = np.copy(x_theta)
x_theta_1[:, -1] = 1
x_theta_0 = np.copy(x_theta)
x_theta_0[:, -1] = 0
G.append(
predict(x_theta, batch_size, x, pred, sess) * (Y_max - Y_min)
+ Y_min
)
G_0.append(
predict(x_theta_0, batch_size, x, pred, sess) * (Y_max - Y_min)
+ Y_min
)
G_1.append(
predict(x_theta_1, batch_size, x, pred, sess) * (Y_max - Y_min)
+ Y_min
)
D.append(x_theta[: len(G_0[-1]), -1])
Y.append((y_theta[: len(G_0[-1])]) * (Y_max - Y_min) + Y_min)
else:
M.append(predict(x_theta, batch_size, x, pred, sess))
D.append(y_theta[: len(M[-1])])
if cross_validate:
return None
if network == 1:
return G, G_1, G_0, D, Y, L_test
else:
return M, D
| [
"tensorflow.keras.losses.MSE",
"pandas.read_csv",
"numpy.array",
"tensorflow.compat.v1.keras.initializers.glorot_normal",
"sklearn.model_selection.KFold",
"os.path.exists",
"numpy.mean",
"tensorflow.placeholder",
"itertools.product",
"tensorflow.Session",
"tensorflow.nn.sigmoid",
"tensorflow.m... | [((1498, 1512), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1509, 1512), False, 'import pickle\n'), ((2169, 2219), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': 'k', 'random_state': 'seed', 'shuffle': '(True)'}), '(n_splits=k, random_state=seed, shuffle=True)\n', (2174, 2219), False, 'from sklearn.model_selection import KFold\n'), ((2823, 2870), 'tensorflow.compat.v1.keras.initializers.glorot_normal', 'tf.compat.v1.keras.initializers.glorot_normal', ([], {}), '()\n', (2868, 2870), True, 'import tensorflow as tf\n'), ((4706, 4723), 'numpy.concatenate', 'np.concatenate', (['P'], {}), '(P)\n', (4720, 4723), True, 'import numpy as np\n'), ((5115, 5143), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (["weights['h0']"], {}), "(weights['h0'])\n", (5128, 5143), True, 'import tensorflow as tf\n'), ((6171, 6202), 'pandas.DataFrame', 'pd.DataFrame', (["{'Loss_Train': L}"], {}), "({'Loss_Train': L})\n", (6183, 6202), True, 'import pandas as pd\n'), ((6214, 6249), 'pandas.DataFrame', 'pd.DataFrame', (["{'Loss_test': L_test}"], {}), "({'Loss_test': L_test})\n", (6226, 6249), True, 'import pandas as pd\n'), ((6261, 6301), 'pandas.DataFrame', 'pd.DataFrame', (["{'Actual_values': y_test1}"], {}), "({'Actual_values': y_test1})\n", (6273, 6301), True, 'import pandas as pd\n'), ((6313, 6360), 'pandas.DataFrame', 'pd.DataFrame', (["{'Predicted_Values': pred_y_test}"], {}), "({'Predicted_Values': pred_y_test})\n", (6325, 6360), True, 'import pandas as pd\n'), ((6372, 6425), 'pandas.DataFrame', 'pd.DataFrame', (["{'Description': description}"], {'index': '[0]'}), "({'Description': description}, index=[0])\n", (6384, 6425), True, 'import pandas as pd\n'), ((6436, 6499), 'pandas.concat', 'pd.concat', (['[df1, df2, df3, df4, df5]'], {'ignore_index': '(True)', 'axis': '(1)'}), '([df1, df2, df3, df4, df5], ignore_index=True, axis=1)\n', (6445, 6499), True, 'import pandas as pd\n'), ((6639, 6785), 'pickle.dump', 'pickle.dump', (["{'Nr_Layers': nr_layers, 'neurons': perm, 'batch_sizes': batch_size, 'lrs':\n lr, 'reg_constants': reg_constant, 'losses': loss}", 'f'], {}), "({'Nr_Layers': nr_layers, 'neurons': perm, 'batch_sizes':\n batch_size, 'lrs': lr, 'reg_constants': reg_constant, 'losses': loss}, f)\n", (6650, 6785), False, 'import pickle\n'), ((7439, 7461), 'os.path.exists', 'path.exists', (['file_name'], {}), '(file_name)\n', (7450, 7461), False, 'from os import path\n'), ((8155, 8296), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'intra_op_parallelism_threads': '(20)', 'inter_op_parallelism_threads': '(20)', 'allow_soft_placement': '(True)', 'device_count': "{'CPU': 20}"}), "(intra_op_parallelism_threads=20,\n inter_op_parallelism_threads=20, allow_soft_placement=True,\n device_count={'CPU': 20})\n", (8169, 8296), True, 'import tensorflow as tf\n'), ((970, 984), 'pandas.read_csv', 'pd.read_csv', (['f'], {}), '(f)\n', (981, 984), True, 'import pandas as pd\n'), ((1001, 1018), 'numpy.array', 'np.array', (["df['1']"], {}), "(df['1'])\n", (1009, 1018), True, 'import numpy as np\n'), ((5055, 5082), 'tensorflow.matmul', 'tf.matmul', (['x', "weights['h0']"], {}), "(x, weights['h0'])\n", (5064, 5082), True, 'import tensorflow as tf\n'), ((5390, 5407), 'tensorflow.squeeze', 'tf.squeeze', (['layer'], {}), '(layer)\n', (5400, 5407), True, 'import tensorflow as tf\n'), ((9173, 9241), 'itertools.product', 'product', (['batch_sizes', 'neurons', 'lrs', 'reg_constants', 'losses', 'Nr_Layers'], {}), '(batch_sizes, neurons, lrs, reg_constants, losses, Nr_Layers)\n', (9180, 9241), False, 'from itertools import product\n'), ((9390, 9475), 'itertools.product', 'product', (['[batch_sizes]', '[neurons]', '[lrs]', '[reg_constants]', '[losses]', '[Nr_Layers]'], {}), '([batch_sizes], [neurons], [lrs], [reg_constants], [losses], [Nr_Layers]\n )\n', (9397, 9475), False, 'from itertools import product\n'), ((1337, 1357), 'numpy.mean', 'np.mean', (['losses[key]'], {}), '(losses[key])\n', (1344, 1357), True, 'import numpy as np\n'), ((1392, 1412), 'numpy.mean', 'np.mean', (['losses[key]'], {}), '(losses[key])\n', (1399, 1412), True, 'import numpy as np\n'), ((3094, 3120), 'tensorflow.zeros', 'tf.zeros', (['[1, perm[i + 1]]'], {}), '([1, perm[i + 1]])\n', (3102, 3120), True, 'import tensorflow as tf\n'), ((3934, 3944), 'numpy.mean', 'np.mean', (['K'], {}), '(K)\n', (3941, 3944), True, 'import numpy as np\n'), ((11112, 11146), 'tensorflow.compat.v1.reset_default_graph', 'tf.compat.v1.reset_default_graph', ([], {}), '()\n', (11144, 11146), True, 'import tensorflow as tf\n'), ((11667, 11704), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[batch_size]'], {}), "('float', [batch_size])\n", (11681, 11704), True, 'import tensorflow as tf\n'), ((12939, 12953), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {}), '(0)\n', (12950, 12953), True, 'import tensorflow as tf\n'), ((12982, 13038), 'tensorflow.train.piecewise_constant', 'tf.train.piecewise_constant', (['global_step', 'boundaries', 'lr'], {}), '(global_step, boundaries, lr)\n', (13009, 13038), True, 'import tensorflow as tf\n'), ((13184, 13213), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (13211, 13213), True, 'import tensorflow as tf\n'), ((5231, 5248), 'tensorflow.nn.relu', 'tf.nn.relu', (['layer'], {}), '(layer)\n', (5241, 5248), True, 'import tensorflow as tf\n'), ((11430, 11446), 'numpy.shape', 'np.shape', (['X_test'], {}), '(X_test)\n', (11438, 11446), True, 'import numpy as np\n'), ((12340, 12370), 'tensorflow.keras.losses.MSE', 'tf.keras.losses.MSE', (['y', 'output'], {}), '(y, output)\n', (12359, 12370), True, 'import tensorflow as tf\n'), ((13234, 13259), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (13244, 13259), True, 'import tensorflow as tf\n'), ((12438, 12459), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['output'], {}), '(output)\n', (12451, 12459), True, 'import tensorflow as tf\n'), ((13064, 13114), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'learningRate'}), '(learning_rate=learningRate)\n', (13086, 13114), True, 'import tensorflow as tf\n'), ((15244, 15260), 'numpy.copy', 'np.copy', (['x_theta'], {}), '(x_theta)\n', (15251, 15260), True, 'import numpy as np\n'), ((15338, 15354), 'numpy.copy', 'np.copy', (['x_theta'], {}), '(x_theta)\n', (15345, 15354), True, 'import numpy as np\n'), ((1214, 1228), 'numpy.isnan', 'np.isnan', (['loss'], {}), '(loss)\n', (1222, 1228), True, 'import numpy as np\n'), ((11628, 11644), 'numpy.shape', 'np.shape', (['X_test'], {}), '(X_test)\n', (11636, 11644), True, 'import numpy as np\n'), ((12521, 12585), 'tensorflow.nn.sigmoid_cross_entropy_with_logits', 'tf.nn.sigmoid_cross_entropy_with_logits', ([], {'labels': 'y', 'logits': 'output'}), '(labels=y, logits=output)\n', (12560, 12585), True, 'import tensorflow as tf\n')] |
from __future__ import division, print_function, absolute_import
import os
import numpy as np
from dipy.direction.peaks import (PeaksAndMetrics,
reshape_peaks_for_visualization)
from dipy.core.sphere import Sphere
from dipy.io.image import save_nifti
import h5py
def _safe_save(group, array, name):
""" Safe saving of arrays with specific names
Parameters
----------
group : HDF5 group
array : array
name : string
"""
if array is not None:
ds = group.create_dataset(name, shape=array.shape,
dtype=array.dtype, chunks=True)
ds[:] = array
def load_peaks(fname, verbose=False):
""" Load a PeaksAndMetrics HDF5 file (PAM5)
Parameters
----------
fname : string
Filename of PAM5 file.
verbose : bool
Print summary information about the loaded file.
Returns
-------
pam : PeaksAndMetrics object
"""
if os.path.splitext(fname)[1].lower() != '.pam5':
raise IOError('This function supports only PAM5 (HDF5) files')
f = h5py.File(fname, 'r')
pam = PeaksAndMetrics()
pamh = f['pam']
version = f.attrs['version']
if version != '0.0.1':
raise IOError('Incorrect PAM5 file version {0}'.format(version,))
try:
affine = pamh['affine'][:]
except KeyError:
affine = None
peak_dirs = pamh['peak_dirs'][:]
peak_values = pamh['peak_values'][:]
peak_indices = pamh['peak_indices'][:]
try:
shm_coeff = pamh['shm_coeff'][:]
except KeyError:
shm_coeff = None
sphere_vertices = pamh['sphere_vertices'][:]
try:
odf = pamh['odf'][:]
except KeyError:
odf = None
pam.affine = affine
pam.peak_dirs = peak_dirs
pam.peak_values = peak_values
pam.peak_indices = peak_indices
pam.shm_coeff = shm_coeff
pam.sphere = Sphere(xyz=sphere_vertices)
pam.B = pamh['B'][:]
pam.total_weight = pamh['total_weight'][:][0]
pam.ang_thr = pamh['ang_thr'][:][0]
pam.gfa = pamh['gfa'][:]
pam.qa = pamh['qa'][:]
pam.odf = odf
f.close()
if verbose:
print('PAM5 version')
print(version)
print('Affine')
print(pam.affine)
print('Dirs shape')
print(pam.peak_dirs.shape)
print('SH shape')
if pam.shm_coeff is not None:
print(pam.shm_coeff.shape)
else:
print('None')
print('ODF shape')
if pam.odf is not None:
print(pam.odf.shape)
else:
print('None')
print('Total weight')
print(pam.total_weight)
print('Angular threshold')
print(pam.ang_thr)
print('Sphere vertices shape')
print(pam.sphere.vertices.shape)
return pam
def save_peaks(fname, pam, affine=None, verbose=False):
""" Save all important attributes of object PeaksAndMetrics in a PAM5 file
(HDF5).
Parameters
----------
fname : string
Filename of PAM5 file
pam : PeaksAndMetrics
Object holding peak_dirs, shm_coeffs and other attributes
affine : array
The 4x4 matrix transforming the date from native to world coordinates.
PeaksAndMetrics should have that attribute but if not it can be
provided here. Default None.
verbose : bool
Print summary information about the saved file.
"""
if os.path.splitext(fname)[1] != '.pam5':
raise IOError('This function saves only PAM5 (HDF5) files')
if not (hasattr(pam, 'peak_dirs') and hasattr(pam, 'peak_values') and
hasattr(pam, 'peak_indices')):
msg = 'Cannot save object without peak_dirs, peak_values'
msg += ' and peak_indices'
raise ValueError(msg)
f = h5py.File(fname, 'w')
group = f.create_group('pam')
f.attrs['version'] = u'0.0.1'
version_string = f.attrs['version']
affine = pam.affine if hasattr(pam, 'affine') else affine
shm_coeff = pam.shm_coeff if hasattr(pam, 'shm_coeff') else None
odf = pam.odf if hasattr(pam, 'odf') else None
_safe_save(group, affine, 'affine')
_safe_save(group, pam.peak_dirs, 'peak_dirs')
_safe_save(group, pam.peak_values, 'peak_values')
_safe_save(group, pam.peak_indices, 'peak_indices')
_safe_save(group, shm_coeff, 'shm_coeff')
_safe_save(group, pam.sphere.vertices, 'sphere_vertices')
_safe_save(group, pam.B, 'B')
_safe_save(group, np.array([pam.total_weight]), 'total_weight')
_safe_save(group, np.array([pam.ang_thr]), 'ang_thr')
_safe_save(group, pam.gfa, 'gfa')
_safe_save(group, pam.qa, 'qa')
_safe_save(group, odf, 'odf')
f.close()
if verbose:
print('PAM5 version')
print(version_string)
print('Affine')
print(affine)
print('Dirs shape')
print(pam.peak_dirs.shape)
print('SH shape')
if shm_coeff is not None:
print(shm_coeff.shape)
else:
print('None')
print('ODF shape')
if odf is not None:
print(pam.odf.shape)
else:
print('None')
print('Total weight')
print(pam.total_weight)
print('Angular threshold')
print(pam.ang_thr)
print('Sphere vertices shape')
print(pam.sphere.vertices.shape)
return pam
def peaks_to_niftis(pam,
fname_shm,
fname_dirs,
fname_values,
fname_indices,
fname_gfa,
reshape_dirs=False):
""" Save SH, directions, indices and values of peaks to Nifti.
"""
save_nifti(fname_shm, pam.shm_coeff.astype(np.float32), pam.affine)
if reshape_dirs:
pam_dirs = reshape_peaks_for_visualization(pam)
else:
pam_dirs = pam.peak_dirs.astype(np.float32)
save_nifti(fname_dirs, pam_dirs, pam.affine)
save_nifti(fname_values, pam.peak_values.astype(np.float32),
pam.affine)
save_nifti(fname_indices, pam.peak_indices, pam.affine)
save_nifti(fname_gfa, pam.gfa, pam.affine)
| [
"dipy.core.sphere.Sphere",
"os.path.splitext",
"dipy.io.image.save_nifti",
"h5py.File",
"numpy.array",
"dipy.direction.peaks.PeaksAndMetrics",
"dipy.direction.peaks.reshape_peaks_for_visualization"
] | [((1105, 1126), 'h5py.File', 'h5py.File', (['fname', '"""r"""'], {}), "(fname, 'r')\n", (1114, 1126), False, 'import h5py\n'), ((1138, 1155), 'dipy.direction.peaks.PeaksAndMetrics', 'PeaksAndMetrics', ([], {}), '()\n', (1153, 1155), False, 'from dipy.direction.peaks import PeaksAndMetrics, reshape_peaks_for_visualization\n'), ((1921, 1948), 'dipy.core.sphere.Sphere', 'Sphere', ([], {'xyz': 'sphere_vertices'}), '(xyz=sphere_vertices)\n', (1927, 1948), False, 'from dipy.core.sphere import Sphere\n'), ((3816, 3837), 'h5py.File', 'h5py.File', (['fname', '"""w"""'], {}), "(fname, 'w')\n", (3825, 3837), False, 'import h5py\n'), ((5948, 5992), 'dipy.io.image.save_nifti', 'save_nifti', (['fname_dirs', 'pam_dirs', 'pam.affine'], {}), '(fname_dirs, pam_dirs, pam.affine)\n', (5958, 5992), False, 'from dipy.io.image import save_nifti\n'), ((6103, 6158), 'dipy.io.image.save_nifti', 'save_nifti', (['fname_indices', 'pam.peak_indices', 'pam.affine'], {}), '(fname_indices, pam.peak_indices, pam.affine)\n', (6113, 6158), False, 'from dipy.io.image import save_nifti\n'), ((6168, 6210), 'dipy.io.image.save_nifti', 'save_nifti', (['fname_gfa', 'pam.gfa', 'pam.affine'], {}), '(fname_gfa, pam.gfa, pam.affine)\n', (6178, 6210), False, 'from dipy.io.image import save_nifti\n'), ((4496, 4524), 'numpy.array', 'np.array', (['[pam.total_weight]'], {}), '([pam.total_weight])\n', (4504, 4524), True, 'import numpy as np\n'), ((4564, 4587), 'numpy.array', 'np.array', (['[pam.ang_thr]'], {}), '([pam.ang_thr])\n', (4572, 4587), True, 'import numpy as np\n'), ((5832, 5868), 'dipy.direction.peaks.reshape_peaks_for_visualization', 'reshape_peaks_for_visualization', (['pam'], {}), '(pam)\n', (5863, 5868), False, 'from dipy.direction.peaks import PeaksAndMetrics, reshape_peaks_for_visualization\n'), ((3450, 3473), 'os.path.splitext', 'os.path.splitext', (['fname'], {}), '(fname)\n', (3466, 3473), False, 'import os\n'), ((978, 1001), 'os.path.splitext', 'os.path.splitext', (['fname'], {}), '(fname)\n', (994, 1001), False, 'import os\n')] |
import json
import os
from typing import List, Tuple, Callable, Any
import numpy as np
from piepline.data_producer import BasicDataset, DataProducer
from pietoolbelt.pipeline.abstract_step import AbstractStepDirResult
from pietoolbelt.pipeline.predict.common import AbstractPredictResult
class ThresholdsSearchResult(AbstractStepDirResult):
def __init__(self, path: str):
super().__init__(path)
self._meta_file_path = os.path.join(path, 'threshold.json')
self._thresholds = {'result': None, 'values': []}
def add_cmb(self, thresh: float, err: float):
self._thresholds['values'].append({'thresh': thresh, 'err': err})
def set_res(self, thresh: float, err: float):
self._thresholds['result'].append({'thresh': thresh, 'err': err})
def _dump_data(self):
with open(self._meta_file_path, 'w') as meta_file:
json.dump(self._thresholds, meta_file, indent=4)
class ThresholdsSearch:
def __init__(self, predict_result: AbstractPredictResult, dataset: BasicDataset, calc_error: Callable[[Any, Any], List[float]],
reduce: Callable[[List[float]], float]):
self._predict_result = predict_result
self._dataset = dataset
self._calc_error = calc_error
self._reduce = reduce
def calc_accuracy_on_thresh(self, data_producer: DataProducer, threshold: float) -> float:
loader = data_producer.get_loader()
errors_results = []
for data in loader:
cur_predicts = []
for idx in data['data_idx']:
pred = self._predict_result.get_predict(idx)
cur_predicts.append(np.where(pred >= threshold, 1, 0).astype(np.float32))
errors_results.extend(self._calc_error(np.concatenate(cur_predicts, axis=0), data['target'].numpy()))
return float(self._reduce(errors_results))
def run(self, thresholds: List[float], batch_size: int, workers_num: int) -> Tuple[float, float]:
dp = DataProducer(self._dataset, batch_size=batch_size, num_workers=workers_num).pass_indices(need_pass=True)
best_accuracy, best_idx = None, None
for idx, thresh in enumerate(thresholds):
cur_accuracy = self.calc_accuracy_on_thresh(dp, thresh)
if best_accuracy is None or best_accuracy < cur_accuracy:
best_accuracy, best_idx = cur_accuracy, idx
return thresholds[best_idx], best_accuracy
| [
"numpy.where",
"os.path.join",
"numpy.concatenate",
"piepline.data_producer.DataProducer",
"json.dump"
] | [((442, 478), 'os.path.join', 'os.path.join', (['path', '"""threshold.json"""'], {}), "(path, 'threshold.json')\n", (454, 478), False, 'import os\n'), ((886, 934), 'json.dump', 'json.dump', (['self._thresholds', 'meta_file'], {'indent': '(4)'}), '(self._thresholds, meta_file, indent=4)\n', (895, 934), False, 'import json\n'), ((1998, 2073), 'piepline.data_producer.DataProducer', 'DataProducer', (['self._dataset'], {'batch_size': 'batch_size', 'num_workers': 'workers_num'}), '(self._dataset, batch_size=batch_size, num_workers=workers_num)\n', (2010, 2073), False, 'from piepline.data_producer import BasicDataset, DataProducer\n'), ((1767, 1803), 'numpy.concatenate', 'np.concatenate', (['cur_predicts'], {'axis': '(0)'}), '(cur_predicts, axis=0)\n', (1781, 1803), True, 'import numpy as np\n'), ((1662, 1695), 'numpy.where', 'np.where', (['(pred >= threshold)', '(1)', '(0)'], {}), '(pred >= threshold, 1, 0)\n', (1670, 1695), True, 'import numpy as np\n')] |
import os
import os.path as osp
import json
import pydicom
import imageio
import argparse
import numpy as np
from glob import glob
from tqdm import tqdm
from sklearn.metrics import cohen_kappa_score
import sys
sys.path.append("..")
from GLD.utils import AverageMeter, cal_dice, Logger
from data_processing import calc_interval
SEX = {"M": 0, "F": 1}
def get_age_sex(path):
dcm_name = glob(f"{path}/I*")[1]
ds = pydicom.dcmread(dcm_name, force=True)
age = int(ds.StudyDate[:4]) - int(ds.PatientBirthDate[:4])
sex = SEX[ds.PatientSex]
return age, sex
def area_change_ratio(pred_arr, cur_arr, tar_arr):
all_delta_c = ((pred_arr > 0).sum() - (tar_arr > 0).sum()) / (tar_arr > 0).sum().clip(1, None)
les1_delta_c = ((pred_arr == 1).sum() - (tar_arr == 1).sum()) / (tar_arr == 1).sum().clip(1, None)
les2_delta_c = ((pred_arr == 2).sum() - (tar_arr == 2).sum()) / (tar_arr == 2).sum().clip(1, None)
# abs
all_delta_c = np.abs(all_delta_c).clip(0, 1)
les1_delta_c = np.abs(les1_delta_c).clip(0, 1)
les2_delta_c = np.abs(les2_delta_c).clip(0, 1)
return all_delta_c, les1_delta_c, les2_delta_c
def location_change_ratio(pred_arr, tar_arr):
sub_indices = [np.arange(x) for x in pred_arr.shape]
indices = np.meshgrid(*sub_indices, indexing="ij")
# gt
all_gt_mass = [((tar_arr > 0) * x).mean() for x in indices]
l1_gt_mass = [((tar_arr == 1) * x).mean() for x in indices]
l2_gt_mass = [((tar_arr == 2) * x).mean() for x in indices]
# pred
all_p_mass = [((pred_arr > 0) * x).mean() for x in indices]
l1_p_mass = [((pred_arr == 1) * x).mean() for x in indices]
l2_p_mass = [((pred_arr == 2) * x).mean() for x in indices]
# distance
all_d = np.linalg.norm(np.array(all_p_mass) - np.array(all_gt_mass))
l1_d = np.linalg.norm(np.array(l1_p_mass) - np.array(l1_gt_mass))
l2_d = np.linalg.norm(np.array(l2_p_mass) - np.array(l2_gt_mass))
return all_d, l1_d, l2_d
def coef_FoM(pred_arr, cur_arr, tar_arr):
gt_c = (cur_arr != tar_arr) * 1.0
x1 = (pred_arr != cur_arr) * gt_c
x2 = (pred_arr != cur_arr) * (1 - gt_c)
x3 = (pred_arr == cur_arr) * gt_c
x4 = (pred_arr == cur_arr) * (1 - gt_c)
false_x1 = (pred_arr != tar_arr) * x1
A = x3.sum()
B = x1.sum()
C = false_x1.sum()
D = x2.sum()
return B / (A + B + C + D).clip(1, None)
def flat_to_single(input_arr):
arr = np.concatenate((1 - input_arr.sum((-1), keepdims=True), input_arr), axis=-1)
return np.argmax(arr, axis=-1)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--data", default="manual_same15")
parser.add_argument("-r", "--radius", default=3)
parser.add_argument("-p", "--phase", default="valid")
parser.add_argument("-c", "--ckp", default="cald_23")
args = parser.parse_args()
args.ckp = f"{args.ckp}/{args.phase}"
prefix = "/home/dyj/disk1/covid/for-github/LesionDevelopment"
res_dir = osp.join(prefix, "output", args.ckp)
with open(f"{prefix}/data/{args.data}_{args.radius}/{args.phase}_slices.json", "r") as fp:
les_names = json.load(fp)["multilesions"]
les_names = [[y[1] for y in x] for x in les_names]
logger = Logger(osp.join(res_dir, "0-results.txt"))
logger.set_names(
[
"Name",
"Age",
"Gender",
"Interval",
"Dice all",
"Dice l1",
"Dice l2",
"kappa",
"FoM",
"LDR all",
"LDR l1",
"LDR l2",
"Dice all",
"Dice l1",
"Dice l2",
"kappa",
"FoM",
"LDR all",
"LDR l1",
"LDR l2",
]
)
all_dice = [AverageMeter() for _ in range(2)]
les1_dice = [AverageMeter() for _ in range(2)]
les2_dice = [AverageMeter() for _ in range(2)]
kappa = [AverageMeter() for _ in range(2)]
FoM = [AverageMeter() for _ in range(2)]
all_ldr = [AverageMeter() for _ in range(2)]
les1_ldr = [AverageMeter() for _ in range(2)]
les2_ldr = [AverageMeter() for _ in range(2)]
idx = 0
for phase_lst in les_names:
pred_name = phase_lst[2].replace("data", f"output/{args.ckp}")
try:
pred_res = imageio.imread(pred_name).astype(np.float32) / 255
print(f"{idx} => {'/'.join(pred_name.split('/')[-7:])}")
idx += 1
except:
continue
# manual label and prediction
height, width = pred_res.shape[:2]
height = height // 2
width = width // 5
pre_label = pred_res[-height:, :width, 1:]
cur_label = pred_res[-height:, width : 2 * width, 1:]
tar_label = pred_res[-height:, 2 * width : 3 * width, 1:]
pre_label = flat_to_single(pre_label)
cur_label = flat_to_single(cur_label)
tar_label = flat_to_single(tar_label)
cur_pred = pred_res[-height:, 3 * width : 4 * width, 1:]
tar_pred = pred_res[-height:, 4 * width : 5 * width, 1:]
cur_pred = flat_to_single(cur_pred)
tar_pred = flat_to_single(tar_pred)
# dice for current stage
all_dice[0].update(cal_dice(cur_pred, cur_label))
les1_dice[0].update(cal_dice(cur_pred == 1, cur_label == 1))
les2_dice[0].update(cal_dice(cur_pred == 2, cur_label == 2))
# Kappa
kappa[0].update(cohen_kappa_score(cur_label.reshape(-1), cur_pred.reshape(-1)))
# FoM
FoM[0].update(coef_FoM(cur_pred, pre_label, cur_label))
lcr = location_change_ratio(cur_pred, cur_label)
all_ldr[0].update(lcr[0])
les1_ldr[0].update(lcr[1])
les2_ldr[0].update(lcr[2])
# dice for next stage
all_dice[1].update(cal_dice(tar_pred, tar_label))
les1_dice[1].update(cal_dice(tar_pred == 1, tar_label == 1))
les2_dice[1].update(cal_dice(tar_pred == 2, tar_label == 2))
# Kappa
kappa[1].update(cohen_kappa_score(tar_label.reshape(-1), tar_pred.reshape(-1)))
# FoM
FoM[1].update(coef_FoM(tar_pred, cur_label, tar_label))
lcr = location_change_ratio(tar_pred, tar_label)
all_ldr[1].update(lcr[0])
les1_ldr[1].update(lcr[1])
les2_ldr[1].update(lcr[2])
cur_name = osp.join(*(pred_name.split("/")[-6:]))
with open(
osp.join(prefix, "data", "original", "images", f"{osp.join(*(cur_name.split('/')[2:5]))}.txt"), "r"
) as fp:
dcm_path = fp.readline()
dcm_path = dcm_path.replace(old_pre, new_pre)
age, sex = get_age_sex(dcm_path)
interval = calc_interval([x.split("/")[-2] for x in phase_lst])
logger.append(
[
cur_name,
age,
sex,
int(interval[-1]),
all_dice[0].val,
les1_dice[0].val,
les2_dice[0].val,
kappa[0].val,
FoM[0].val,
all_ldr[0].val,
les1_ldr[0].val,
les2_ldr[0].val,
all_dice[1].val,
les1_dice[1].val,
les2_dice[1].val,
kappa[1].val,
FoM[1].val,
all_ldr[1].val,
les1_ldr[1].val,
les2_ldr[1].val,
]
)
logger.close()
print_str = [f"the average dice for the whole is {all_dice[0].avg} {all_dice[1].avg}"]
print_str.append(f"the average dice for lesion 1 is {les1_dice[0].avg} {les1_dice[1].avg}")
print_str.append(f"the average dice for lesion 2 is {les2_dice[0].avg} {les2_dice[1].avg}")
print_str.append(f"the average kappa for the whole is {kappa[0].avg} {kappa[1].avg}")
print_str.append(f"the average FoM for the whole is {FoM[0].avg} {FoM[1].avg}")
print_str.append(f"the average LDR for the whole is {all_ldr[0].avg} {all_ldr[1].avg}")
print_str.append(f"the average LDR for lesion 1 is {les1_ldr[0].avg} {les1_ldr[1].avg}")
print_str.append(f"the average LDR for lesion 2 is {les2_ldr[0].avg} {les2_ldr[1].avg}")
print("\n".join(print_str))
with open(osp.join(res_dir, "metrics.txt"), "w") as fp:
fp.write("\n".join(print_str) + "\n")
| [
"numpy.abs",
"imageio.imread",
"pydicom.dcmread",
"argparse.ArgumentParser",
"numpy.arange",
"GLD.utils.cal_dice",
"os.path.join",
"numpy.argmax",
"GLD.utils.AverageMeter",
"numpy.array",
"json.load",
"numpy.meshgrid",
"sys.path.append",
"glob.glob"
] | [((211, 232), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (226, 232), False, 'import sys\n'), ((423, 460), 'pydicom.dcmread', 'pydicom.dcmread', (['dcm_name'], {'force': '(True)'}), '(dcm_name, force=True)\n', (438, 460), False, 'import pydicom\n'), ((1262, 1302), 'numpy.meshgrid', 'np.meshgrid', (['*sub_indices'], {'indexing': '"""ij"""'}), "(*sub_indices, indexing='ij')\n", (1273, 1302), True, 'import numpy as np\n'), ((2502, 2525), 'numpy.argmax', 'np.argmax', (['arr'], {'axis': '(-1)'}), '(arr, axis=-1)\n', (2511, 2525), True, 'import numpy as np\n'), ((2568, 2593), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2591, 2593), False, 'import argparse\n'), ((2983, 3019), 'os.path.join', 'osp.join', (['prefix', '"""output"""', 'args.ckp'], {}), "(prefix, 'output', args.ckp)\n", (2991, 3019), True, 'import os.path as osp\n'), ((392, 410), 'glob.glob', 'glob', (['f"""{path}/I*"""'], {}), "(f'{path}/I*')\n", (396, 410), False, 'from glob import glob\n'), ((1210, 1222), 'numpy.arange', 'np.arange', (['x'], {}), '(x)\n', (1219, 1222), True, 'import numpy as np\n'), ((3242, 3276), 'os.path.join', 'osp.join', (['res_dir', '"""0-results.txt"""'], {}), "(res_dir, '0-results.txt')\n", (3250, 3276), True, 'import os.path as osp\n'), ((3782, 3796), 'GLD.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (3794, 3796), False, 'from GLD.utils import AverageMeter, cal_dice, Logger\n'), ((3833, 3847), 'GLD.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (3845, 3847), False, 'from GLD.utils import AverageMeter, cal_dice, Logger\n'), ((3884, 3898), 'GLD.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (3896, 3898), False, 'from GLD.utils import AverageMeter, cal_dice, Logger\n'), ((3931, 3945), 'GLD.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (3943, 3945), False, 'from GLD.utils import AverageMeter, cal_dice, Logger\n'), ((3976, 3990), 'GLD.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (3988, 3990), False, 'from GLD.utils import AverageMeter, cal_dice, Logger\n'), ((4025, 4039), 'GLD.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (4037, 4039), False, 'from GLD.utils import AverageMeter, cal_dice, Logger\n'), ((4075, 4089), 'GLD.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (4087, 4089), False, 'from GLD.utils import AverageMeter, cal_dice, Logger\n'), ((4125, 4139), 'GLD.utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (4137, 4139), False, 'from GLD.utils import AverageMeter, cal_dice, Logger\n'), ((959, 978), 'numpy.abs', 'np.abs', (['all_delta_c'], {}), '(all_delta_c)\n', (965, 978), True, 'import numpy as np\n'), ((1009, 1029), 'numpy.abs', 'np.abs', (['les1_delta_c'], {}), '(les1_delta_c)\n', (1015, 1029), True, 'import numpy as np\n'), ((1060, 1080), 'numpy.abs', 'np.abs', (['les2_delta_c'], {}), '(les2_delta_c)\n', (1066, 1080), True, 'import numpy as np\n'), ((1749, 1769), 'numpy.array', 'np.array', (['all_p_mass'], {}), '(all_p_mass)\n', (1757, 1769), True, 'import numpy as np\n'), ((1772, 1793), 'numpy.array', 'np.array', (['all_gt_mass'], {}), '(all_gt_mass)\n', (1780, 1793), True, 'import numpy as np\n'), ((1821, 1840), 'numpy.array', 'np.array', (['l1_p_mass'], {}), '(l1_p_mass)\n', (1829, 1840), True, 'import numpy as np\n'), ((1843, 1863), 'numpy.array', 'np.array', (['l1_gt_mass'], {}), '(l1_gt_mass)\n', (1851, 1863), True, 'import numpy as np\n'), ((1891, 1910), 'numpy.array', 'np.array', (['l2_p_mass'], {}), '(l2_p_mass)\n', (1899, 1910), True, 'import numpy as np\n'), ((1913, 1933), 'numpy.array', 'np.array', (['l2_gt_mass'], {}), '(l2_gt_mass)\n', (1921, 1933), True, 'import numpy as np\n'), ((3136, 3149), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (3145, 3149), False, 'import json\n'), ((5225, 5254), 'GLD.utils.cal_dice', 'cal_dice', (['cur_pred', 'cur_label'], {}), '(cur_pred, cur_label)\n', (5233, 5254), False, 'from GLD.utils import AverageMeter, cal_dice, Logger\n'), ((5284, 5323), 'GLD.utils.cal_dice', 'cal_dice', (['(cur_pred == 1)', '(cur_label == 1)'], {}), '(cur_pred == 1, cur_label == 1)\n', (5292, 5323), False, 'from GLD.utils import AverageMeter, cal_dice, Logger\n'), ((5353, 5392), 'GLD.utils.cal_dice', 'cal_dice', (['(cur_pred == 2)', '(cur_label == 2)'], {}), '(cur_pred == 2, cur_label == 2)\n', (5361, 5392), False, 'from GLD.utils import AverageMeter, cal_dice, Logger\n'), ((5795, 5824), 'GLD.utils.cal_dice', 'cal_dice', (['tar_pred', 'tar_label'], {}), '(tar_pred, tar_label)\n', (5803, 5824), False, 'from GLD.utils import AverageMeter, cal_dice, Logger\n'), ((5854, 5893), 'GLD.utils.cal_dice', 'cal_dice', (['(tar_pred == 1)', '(tar_label == 1)'], {}), '(tar_pred == 1, tar_label == 1)\n', (5862, 5893), False, 'from GLD.utils import AverageMeter, cal_dice, Logger\n'), ((5923, 5962), 'GLD.utils.cal_dice', 'cal_dice', (['(tar_pred == 2)', '(tar_label == 2)'], {}), '(tar_pred == 2, tar_label == 2)\n', (5931, 5962), False, 'from GLD.utils import AverageMeter, cal_dice, Logger\n'), ((8199, 8231), 'os.path.join', 'osp.join', (['res_dir', '"""metrics.txt"""'], {}), "(res_dir, 'metrics.txt')\n", (8207, 8231), True, 'import os.path as osp\n'), ((4311, 4336), 'imageio.imread', 'imageio.imread', (['pred_name'], {}), '(pred_name)\n', (4325, 4336), False, 'import imageio\n')] |
Name = 'ReshapeTable'
Label = 'Reshape Table'
FilterCategory = 'CSM Geophysics Filters'
Help = 'This filter will take a vtkTable object and reshape it. This filter essentially treats vtkTables as 2D matrices and reshapes them using numpy.reshape in a C contiguous manner. Unfortunately, data fields will be renamed arbitrarily because VTK data arrays require a name.'
NumberOfInputs = 1
InputDataType = 'vtkTable'
OutputDataType = 'vtkTable'
ExtraXml = ''
Properties = dict(
ncols=6,
nrows=126,
#Fortran_Ordering=False # TODO: Fortran_Ordering
)
def RequestData():
import numpy as np
from vtk.util import numpy_support as nps
pdi = self.GetInput() #vtkTable
pdo = self.GetOutput() #vtkTable
# Get number of columns
cols = pdi.GetNumberOfColumns()
# Get number of rows
rows = pdi.GetColumn(0).GetNumberOfTuples() # TODO is the necessary?
# Make a 2D numpy array and fille with data from input table
data = np.empty((cols,rows))
for i in range(cols):
c = pdi.GetColumn(i)
data[i] = nps.vtk_to_numpy(c)
order = 'C'
'''
# Cannot use Fortran because nps needs contigous arrays
if Fortran_Ordering:
order = 'F'
'''
if ((ncols*nrows) != (cols*rows)):
raise Exception('Total number of elements must remain %d. Check reshape dimensions.' % (cols*rows))
# Use numpy.reshape() to reshape data NOTE: only 2D because its a table
# NOTE: column access of this reshape is not contigous
data = np.reshape(data, (nrows,ncols), order=order)
pdo.SetNumberOfRows(nrows)
# Add new array to output table and assign incremental names (e.g. Field0)
for i in range(ncols):
# Make a contigous array from the column we want
col = np.array(data[:,i])
# allow type to be determined by input
insert = nps.numpy_to_vtk(num_array=col, deep=True) # array_type=vtk.VTK_FLOAT
# VTK arrays need a name. Set arbitrarily
insert.SetName('Field%d' % i)
#pdo.AddColumn(insert) # these are not getting added to the output table
# ... work around:
pdo.GetRowData().AddArray(insert) # NOTE: this is in the FieldData
| [
"numpy.reshape",
"numpy.array",
"numpy.empty",
"vtk.util.numpy_support.numpy_to_vtk",
"vtk.util.numpy_support.vtk_to_numpy"
] | [((965, 987), 'numpy.empty', 'np.empty', (['(cols, rows)'], {}), '((cols, rows))\n', (973, 987), True, 'import numpy as np\n'), ((1513, 1558), 'numpy.reshape', 'np.reshape', (['data', '(nrows, ncols)'], {'order': 'order'}), '(data, (nrows, ncols), order=order)\n', (1523, 1558), True, 'import numpy as np\n'), ((1060, 1079), 'vtk.util.numpy_support.vtk_to_numpy', 'nps.vtk_to_numpy', (['c'], {}), '(c)\n', (1076, 1079), True, 'from vtk.util import numpy_support as nps\n'), ((1767, 1787), 'numpy.array', 'np.array', (['data[:, i]'], {}), '(data[:, i])\n', (1775, 1787), True, 'import numpy as np\n'), ((1851, 1893), 'vtk.util.numpy_support.numpy_to_vtk', 'nps.numpy_to_vtk', ([], {'num_array': 'col', 'deep': '(True)'}), '(num_array=col, deep=True)\n', (1867, 1893), True, 'from vtk.util import numpy_support as nps\n')] |
"""Pull gSSURGO data based on mukeys."""
# https://gdal.org/python/
# https://gis.stackexchange.com/a/200477/32531
import os
import sys
import sqlite3
import gdal
import pandas as pd
import numpy as np
from pyproj import Proj, transform
from .aoi import state_by_bbox
def query_gpkg(src_tif, gpkg_path, sql_query, out_raster):
r"""Pull gSSURGO data based on mukeys.
:param str src_tif: location of an AOI tif file
:param str gpkg_path: location of folder containing state gpkg databases
:param str sql_query: an SQL query string or location of an SQL query file
:param str out_raster: location of the output raster
Examples
--------
gssurgo.query_gpkg(src_tif = "tests/aoi.tif", gpkg_path = "gpkgs", \
sql_query = 'SELECT mukey, nonirryield_r \
FROM mucropyld \
WHERE (cropname = "Corn")', \
out_raster = "tests/nonirryield_r.tif")
"""
ds = gdal.Open(src_tif)
gtransform = ds.GetGeoTransform()
pixelWidth = gtransform[1]
pixelHeight = gtransform[5]
nrow = ds.RasterYSize
ncol = ds.RasterXSize
xmin = gtransform[0]
ymax = gtransform[3]
xmax = xmin + ncol * pixelWidth
ymin = ymax - nrow * pixelHeight
# https://gis.stackexchange.com/a/78944/32531
outProj = Proj(init='epsg:4326')
inProj = Proj('+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=23 +lon_0=-96 \
+x_0=0 +y_0=0 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m \
+no_defs')
xmin, ymin = transform(inProj, outProj, xmin, ymin)
xmax, ymax = transform(inProj, outProj, xmax, ymax)
raw_values = ds.ReadAsArray()
pixel_values = raw_values.flatten()
pixel_values = pd.DataFrame(pixel_values, columns = ['mukey'])
pixel_values.mukey = pixel_values.mukey.astype(int)
# print(table.mukey.describe())
# print(table[table.mukey.isin([186365, 1455241])])
# find src gpkgs
src_gpkg = state_by_bbox(fpath = gpkg_path, ext = "gpkg", xmin = xmin,
xmax = xmax, ymin = ymin, ymax = ymax)
# read data and join to raster index
if(os.path.isfile(sql_query)):
sql_query = open(sql_query, 'r').read()
if(len(src_gpkg) == 1):
db = sqlite3.connect(''.join(src_gpkg))
table = pd.read_sql_query(sql_query, db)
table.mukey = table.mukey.astype(int)
else:
db = sqlite3.connect(''.join(src_gpkg[0]))
table1 = pd.read_sql_query(sql_query, db)
table1.mukey = table1.mukey.astype(int)
db = sqlite3.connect(''.join(src_gpkg[1]))
table2 = pd.read_sql_query(sql_query, db)
table2.mukey = table2.mukey.astype(int)
table = table1.append(table2)
pixel_values = pd.merge(left = pixel_values, right = table,
how = 'left', on = 'mukey')
pixel_values = pixel_values.iloc[:, 1].values
pixel_values = np.reshape(pixel_values, (nrow, ncol))
# print(pixel_values)
# create output raster
driver = ds.GetDriver()
out_data = driver.Create(out_raster, ncol, nrow, 1, gdal.GDT_Float32)
out_data.SetGeoTransform(ds.GetGeoTransform())
out_data.SetProjection(ds.GetProjection())
out_band = out_data.GetRasterBand(1)
out_band.WriteArray(pixel_values)
out_band.FlushCache()
out_data = None
# os.remove("temp.tif")
| [
"pandas.read_sql_query",
"gdal.Open",
"numpy.reshape",
"pandas.merge",
"pyproj.transform",
"os.path.isfile",
"pyproj.Proj",
"pandas.DataFrame"
] | [((1019, 1037), 'gdal.Open', 'gdal.Open', (['src_tif'], {}), '(src_tif)\n', (1028, 1037), False, 'import gdal\n'), ((1381, 1403), 'pyproj.Proj', 'Proj', ([], {'init': '"""epsg:4326"""'}), "(init='epsg:4326')\n", (1385, 1403), False, 'from pyproj import Proj, transform\n'), ((1417, 1595), 'pyproj.Proj', 'Proj', (['"""+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=23 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"""'], {}), "(\n '+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=23 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs'\n )\n", (1421, 1595), False, 'from pyproj import Proj, transform\n'), ((1607, 1645), 'pyproj.transform', 'transform', (['inProj', 'outProj', 'xmin', 'ymin'], {}), '(inProj, outProj, xmin, ymin)\n', (1616, 1645), False, 'from pyproj import Proj, transform\n'), ((1663, 1701), 'pyproj.transform', 'transform', (['inProj', 'outProj', 'xmax', 'ymax'], {}), '(inProj, outProj, xmax, ymax)\n', (1672, 1701), False, 'from pyproj import Proj, transform\n'), ((1796, 1841), 'pandas.DataFrame', 'pd.DataFrame', (['pixel_values'], {'columns': "['mukey']"}), "(pixel_values, columns=['mukey'])\n", (1808, 1841), True, 'import pandas as pd\n'), ((2207, 2232), 'os.path.isfile', 'os.path.isfile', (['sql_query'], {}), '(sql_query)\n', (2221, 2232), False, 'import os\n'), ((2823, 2887), 'pandas.merge', 'pd.merge', ([], {'left': 'pixel_values', 'right': 'table', 'how': '"""left"""', 'on': '"""mukey"""'}), "(left=pixel_values, right=table, how='left', on='mukey')\n", (2831, 2887), True, 'import pandas as pd\n'), ((2993, 3031), 'numpy.reshape', 'np.reshape', (['pixel_values', '(nrow, ncol)'], {}), '(pixel_values, (nrow, ncol))\n', (3003, 3031), True, 'import numpy as np\n'), ((2376, 2408), 'pandas.read_sql_query', 'pd.read_sql_query', (['sql_query', 'db'], {}), '(sql_query, db)\n', (2393, 2408), True, 'import pandas as pd\n'), ((2533, 2565), 'pandas.read_sql_query', 'pd.read_sql_query', (['sql_query', 'db'], {}), '(sql_query, db)\n', (2550, 2565), True, 'import pandas as pd\n'), ((2683, 2715), 'pandas.read_sql_query', 'pd.read_sql_query', (['sql_query', 'db'], {}), '(sql_query, db)\n', (2700, 2715), True, 'import pandas as pd\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Copyright [2020] [Indian Institute of Science, Bangalore]
SPDX-License-Identifier: Apache-2.0
"""
import pandas as pd
import numpy as np
import geopandas as gpd
from shapely.geometry import Point, MultiPolygon
def seedIndividuals(city):
cityDF = gpd.read_file("./data/base/"+city+"/city.geojson")
individuals = pd.read_json('./data/'+city+'-100K-300students/individuals.json')
N = len(individuals)
seed_file = pd.read_csv('./data/base/'+city+'/seed_file.csv')
seed = pd.DataFrame({ "infection_status": np.full(N,0), "time_of_infection": np.full(N,0), "time_of_hospitalisation":np.full(N,0)})
wards = []
for i in range(0,len(seed_file)):
cases_lt = float(seed_file.loc[i,'LatLong'].split(', ')[0])
cases_ln = float(seed_file.loc[i,'LatLong'].split(', ')[1])
point = Point(cases_ln, cases_lt)
for j in range(0,len(cityDF)):
if MultiPolygon(cityDF.loc[j,'geometry']).contains(point):
wards.append(cityDF.loc[j,'wardNo'])
break
wards=np.array(wards)
seed_file.insert(seed_file.shape[1],"ward", wards)
# first, find dates and arrange from 1 onwards
temp = []
for i in range(0,len(seed_file)):
date = seed_file.loc[i,'date_onset_symptoms'].split('.')
if date[2]=='2019' and date[1]=='12':
temp.append(int(date[0]))
elif date[2]=='2020' and date[1]=='01':
temp.append(int(date[0])+31)
first_symptoms_day_offset = min(temp)
for i in range(0,len(seed_file)):
age = min(seed_file.loc[i,'age'],80)
ward = seed_file.loc[i,'ward']
# first check if there is an individual with excat age and ward
possible_individual_ids = np.where(np.logical_and(individuals['age'].values==age,individuals['wardNo'].values==ward))[0]
possible_individual_ids = np.setdiff1d(possible_individual_ids, possible_individual_ids[np.where(seed.loc[possible_individual_ids,'infection_status'].values==1)[0]])
if len(possible_individual_ids) > 0:
date = seed_file.loc[i,'date_onset_symptoms'].split('.')
if date[2]=='2019' and date[1]=='12':
day = int(date[0])-first_symptoms_day_offset
elif date[2]=='2020' and date[1]=='01':
day = int(date[0])+31 - first_symptoms_day_offset
hospital_admission = seed_file.loc[i,'date_admission_hospital'].split('.')
if hospital_admission[2]=='2019' and hospital_admission[1]=='12':
hospitalised_day = int(hospital_admission[0]) - first_symptoms_day_offset
elif hospital_admission[2]=='2020' and hospital_admission[1]=='01':
hospitalised_day = int(hospital_admission[0]) + 31 - first_symptoms_day_offset
seed.at[possible_individual_ids[0],'infection_status'] = 1
seed.at[possible_individual_ids[0],'time_of_infection'] = day
seed.at[possible_individual_ids[0],'time_of_hospitalisation'] = hospitalised_day
else: # if not, increase the age band to age-3, age+2 and search. if not found, ignore the seed.
possible_individual_ids = np.where(np.logical_and(np.isin(individuals['age'].values,np.arange(age-3,age+2)),individuals['wardNo'].values==ward))[0]
possible_individual_ids = np.setdiff1d(possible_individual_ids, possible_individual_ids[np.where(seed.loc[possible_individual_ids,'infection_status'].values==1)[0]])
if len(possible_individual_ids) > 0:
date = seed_file.loc[i,'date_onset_symptoms'].split('.')
if date[2]=='2019' and date[1]=='12':
day = int(date[0])-first_symptoms_day_offset
elif date[2]=='2020' and date[1]=='01':
day = int(date[0])+31 - first_symptoms_day_offset
hospital_admission = seed_file.loc[i,'date_admission_hospital'].split('.')
if hospital_admission[2]=='2019' and hospital_admission[1]=='12':
hospitalised_day = int(hospital_admission[0]) - first_symptoms_day_offset
elif hospital_admission[2]=='2020' and hospital_admission[1]=='01':
hospitalised_day = int(hospital_admission[0]) + 31 - first_symptoms_day_offset
seed.at[possible_individual_ids[0],'infection_status'] = 1
seed.at[possible_individual_ids[0],'time_of_infection'] = day
seed.at[possible_individual_ids[0],'time_of_hospitalisation'] = hospitalised_day
# write files
individuals = individuals.set_index("id")
individuals['infection_status'] = seed['infection_status'].values
individuals['time_of_infection'] = seed['time_of_infection'].values
individuals['time_of_hospitalisation'] = seed['time_of_hospitalisation'].values
individuals = individuals.reset_index()
individuals.to_json('./data/'+city+'-100K-300students/individuals.json',orient='records')
return seed | [
"geopandas.read_file",
"pandas.read_csv",
"numpy.logical_and",
"numpy.where",
"numpy.arange",
"shapely.geometry.Point",
"numpy.array",
"numpy.full",
"pandas.read_json",
"shapely.geometry.MultiPolygon"
] | [((304, 358), 'geopandas.read_file', 'gpd.read_file', (["('./data/base/' + city + '/city.geojson')"], {}), "('./data/base/' + city + '/city.geojson')\n", (317, 358), True, 'import geopandas as gpd\n'), ((378, 447), 'pandas.read_json', 'pd.read_json', (["('./data/' + city + '-100K-300students/individuals.json')"], {}), "('./data/' + city + '-100K-300students/individuals.json')\n", (390, 447), True, 'import pandas as pd\n'), ((485, 538), 'pandas.read_csv', 'pd.read_csv', (["('./data/base/' + city + '/seed_file.csv')"], {}), "('./data/base/' + city + '/seed_file.csv')\n", (496, 538), True, 'import pandas as pd\n'), ((1107, 1122), 'numpy.array', 'np.array', (['wards'], {}), '(wards)\n', (1115, 1122), True, 'import numpy as np\n'), ((886, 911), 'shapely.geometry.Point', 'Point', (['cases_ln', 'cases_lt'], {}), '(cases_ln, cases_lt)\n', (891, 911), False, 'from shapely.geometry import Point, MultiPolygon\n'), ((586, 599), 'numpy.full', 'np.full', (['N', '(0)'], {}), '(N, 0)\n', (593, 599), True, 'import numpy as np\n'), ((621, 634), 'numpy.full', 'np.full', (['N', '(0)'], {}), '(N, 0)\n', (628, 634), True, 'import numpy as np\n'), ((661, 674), 'numpy.full', 'np.full', (['N', '(0)'], {}), '(N, 0)\n', (668, 674), True, 'import numpy as np\n'), ((1813, 1904), 'numpy.logical_and', 'np.logical_and', (["(individuals['age'].values == age)", "(individuals['wardNo'].values == ward)"], {}), "(individuals['age'].values == age, individuals['wardNo'].\n values == ward)\n", (1827, 1904), True, 'import numpy as np\n'), ((966, 1005), 'shapely.geometry.MultiPolygon', 'MultiPolygon', (["cityDF.loc[j, 'geometry']"], {}), "(cityDF.loc[j, 'geometry'])\n", (978, 1005), False, 'from shapely.geometry import Point, MultiPolygon\n'), ((1995, 2070), 'numpy.where', 'np.where', (["(seed.loc[possible_individual_ids, 'infection_status'].values == 1)"], {}), "(seed.loc[possible_individual_ids, 'infection_status'].values == 1)\n", (2003, 2070), True, 'import numpy as np\n'), ((3447, 3522), 'numpy.where', 'np.where', (["(seed.loc[possible_individual_ids, 'infection_status'].values == 1)"], {}), "(seed.loc[possible_individual_ids, 'infection_status'].values == 1)\n", (3455, 3522), True, 'import numpy as np\n'), ((3283, 3310), 'numpy.arange', 'np.arange', (['(age - 3)', '(age + 2)'], {}), '(age - 3, age + 2)\n', (3292, 3310), True, 'import numpy as np\n')] |
import numpy as np
def value_iteration(env, theta=0.0001, discount_factor=1.0):
"""
Value Iteration Algorithm.
Args:
env: OpenAI environment. env.P represents the transition probabilities of the environment.
theta: Stopping threshold. If the value of all states changes less than theta
in one iteration we are done.
discount_factor: lambda time discount factor.
Returns:
A tuple (policy, V) of the optimal policy and the optimal value function.
"""
V = np.zeros(env.nS)
Policy = np.zeros([env.nS, env.nA])
while True:
ConvergeFlag = True
for StateIdx, StateName in enumerate(env.P.keys()):
ActionValues = np.zeros(env.nA)
StateInfo = env.P[StateName]
for ActionIdx, ActionName in enumerate(StateInfo.keys()):
# For now assume that all probabilities are 1
ActionInfo = StateInfo[ActionName][0]
Reward = ActionInfo[2]
NextState = ActionInfo[1]
NextStateValue = V[NextState]
ActionValues[ActionIdx] = Reward + discount_factor*NextStateValue
UpdatedValue = np.max(ActionValues)
ConvergeFlag = ConvergeFlag & (np.abs(V[StateIdx]-UpdatedValue) < theta)
V[StateIdx] = UpdatedValue
if ConvergeFlag: break
for StateIdx, StateName in enumerate(env.P.keys()):
StateInfo = env.P[StateName]
ActionValues = np.zeros(env.nA)
for ActionIdx, ActionName in enumerate(StateInfo.keys()):
# For now assume that all probabilities are 1
ActionInfo = StateInfo[ActionName][0]
Reward = ActionInfo[2]
NextState = ActionInfo[1]
NextStateValue = V[NextState]
ActionValues[ActionIdx] = Reward + discount_factor*NextStateValue
MaxValueIdx = np.argmax(ActionValues)
Policy[StateIdx,:] = 0
Policy[StateIdx,MaxValueIdx] = 1
return Policy, V | [
"numpy.abs",
"numpy.zeros",
"numpy.argmax",
"numpy.max"
] | [((545, 561), 'numpy.zeros', 'np.zeros', (['env.nS'], {}), '(env.nS)\n', (553, 561), True, 'import numpy as np\n'), ((575, 601), 'numpy.zeros', 'np.zeros', (['[env.nS, env.nA]'], {}), '([env.nS, env.nA])\n', (583, 601), True, 'import numpy as np\n'), ((1589, 1605), 'numpy.zeros', 'np.zeros', (['env.nA'], {}), '(env.nA)\n', (1597, 1605), True, 'import numpy as np\n'), ((2031, 2054), 'numpy.argmax', 'np.argmax', (['ActionValues'], {}), '(ActionValues)\n', (2040, 2054), True, 'import numpy as np\n'), ((741, 757), 'numpy.zeros', 'np.zeros', (['env.nA'], {}), '(env.nA)\n', (749, 757), True, 'import numpy as np\n'), ((1294, 1314), 'numpy.max', 'np.max', (['ActionValues'], {}), '(ActionValues)\n', (1300, 1314), True, 'import numpy as np\n'), ((1358, 1392), 'numpy.abs', 'np.abs', (['(V[StateIdx] - UpdatedValue)'], {}), '(V[StateIdx] - UpdatedValue)\n', (1364, 1392), True, 'import numpy as np\n')] |
# Author: <NAME>
# github.com/kaylani2
# kaylani AT gta DOT ufrj DOT br
## Load dataset, describe, hadle categorical attributes
## CICIDS used as an example
import pandas as pd
import numpy as np
import sys
# Random state for eproducibility
STATE = 0
## Hard to not go over 80 columns
CICIDS_DIRECTORY = '../../datasets/cicids/MachineLearningCVE/'
CICIDS_MONDAY_FILENAME = 'Monday-WorkingHours.pcap_ISCX.csv'
CICIDS_WEDNESDAY_FILENAME = 'Wednesday-workingHours.pcap_ISCX.csv'
CICIDS_MONDAY = CICIDS_DIRECTORY + CICIDS_MONDAY_FILENAME
CICIDS_WEDNESDAY = CICIDS_DIRECTORY + CICIDS_WEDNESDAY_FILENAME
###############################################################################
## Load dataset
###############################################################################
df = pd.read_csv (CICIDS_WEDNESDAY)
## Fraction dataframe for quicker testing (copying code is hard)
df = df.sample (frac = 0.1, replace = True, random_state = 0)
print ('Using fractured dataframe.')
###############################################################################
## Display generic (dataset independent) information
###############################################################################
print ('Dataframe shape (lines, collumns):', df.shape, '\n')
print ('First 5 entries:\n', df [:5], '\n')
print ('Dataframe attributes:\n', df.keys (), '\n')
## Note the pesky spaces before ALMOST all attributes
## This is annoying and could be removed, but we'll try to operate on the
## dataset "as is"
df.info (verbose = False) # Make it true to find individual atribute types
print (df.describe ()) # Brief statistical description on NUMERICAL atributes
print ('Dataframe contains NaN values:', df.isnull ().values.any ())
nanColumns = [i for i in df.columns if df [i].isnull ().any ()]
print ('NaN columns:', nanColumns)
## Reminder: pearson only considers numerical atributes (ignores catgorical)
#correlationMatrix = df.corr (method = 'pearson')
#print ('Pearson:', correlationMatrix)
## You may want to plot the correlation matrix, but it gets hard to read
## when you have too many attributes. It's probably better to get the values
## you want with a set threshold directly from the matrix.
#import matplotlib.pyplot as plt
#import seaborn as sns
#plt.figure (figsize = (12,10))
#cor = df.corr ()
#sns.heatmap (cor, annot = True, cmap = plt.cm.Reds)
#plt.show ()
###############################################################################
## Display specific (dataset dependent) information, we're using CICIDS
###############################################################################
## Remember the pesky spaces?
print ('Label types:', df [' Label'].unique ())
print ('Label distribution:\n', df [' Label'].value_counts ())
## Note that we may want to group the attacks together when handling the
## target as a categorical attribute, since there are so few samples of some
## of them.
###############################################################################
## Perform some form of basic preprocessing
###############################################################################
## For basic feature selection the correlation matrix can help identify
## highly correlated features (which are bad/cursed). In order to find
## which features have the highest predictive power, it's necessary
## to convert the target to a numeric value. After that it's possible to use
## a simple filter approach or a wrapper method (backward elimination, forward
## selection...) to select features.
## You may also choose to convert the dataframe to a numpy array and continue.
## Remove NaN and inf values
df.replace ('Infinity', np.nan, inplace = True) ## Or other text values
df.replace (np.inf, np.nan, inplace = True) ## Remove infinity
df.replace (np.nan, 0, inplace = True)
## We can also use scikit-learn to use other strategies for substitution
print ('Dataframe contains NaN values:', df.isnull ().values.any ())
nanColumns = [i for i in df.columns if df [i].isnull ().any ()]
print ('NaN columns:', nanColumns)
###############################################################################
## Encode categorical attributes (this may be done before finding pearson)
###############################################################################
print ('Label types before conversion:', df [' Label'].unique ())
df [' Label'] = df [' Label'].replace ('BENIGN', 0)
df [' Label'] = df [' Label'].replace ('DoS slowloris', 1)
df [' Label'] = df [' Label'].replace ('DoS Slowhttptest', 1)
df [' Label'] = df [' Label'].replace ('DoS Hulk', 1)
df [' Label'] = df [' Label'].replace ('DoS GoldenEye', 1)
df [' Label'] = df [' Label'].replace ('Heartbleed', 1)
print ('Label types after conversion:', df [' Label'].unique ())
df.info (verbose = False)
###############################################################################
## Convert dataframe to a numpy array (usually the last column is the target)
###############################################################################
X = df.iloc [:, :-1].values
y = df.iloc [:, -1].values
###############################################################################
## Split dataset into train and test sets
###############################################################################
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split (X, y, test_size = 1/5,
random_state = STATE)
print ('X_train shape:', X_train.shape)
print ('y_train shape:', y_train.shape)
print ('X_test shape:', X_test.shape)
print ('y_test shape:', y_test.shape)
###############################################################################
## Create learning model (LR)
###############################################################################
## Important: The regression model adjusts the attributes' scales internally
from sklearn.linear_model import LinearRegression
regressor = LinearRegression ()
###############################################################################
## Fit the model
###############################################################################
regressor.fit (X_train, y_train)
y_pred_train = regressor.predict (X_train)
y_pred_test = regressor.predict (X_test)
###############################################################################
## Analyze results
###############################################################################
import math
from sklearn.metrics import mean_squared_error, r2_score
print ('\nTraining performance: ')
print ('MSE = %.3f' % mean_squared_error (y_train, y_pred_train) )
print ('RMSE = %.3f' % math.sqrt (mean_squared_error (y_train, y_pred_train)))
print ('R2 = %.3f' % r2_score (y_train, y_pred_train) )
print ('\nTesting performance: ')
print ('MSE = %.3f' % mean_squared_error (y_test , y_pred_test) )
print ('RMSE = %.3f' % math.sqrt (mean_squared_error (y_test , y_pred_test)))
print ('R2 = %.3f' % r2_score (y_test , y_pred_test) )
print ('\nCoefficients:\n',
np.append (regressor.intercept_ , regressor.coef_ ))
sys.exit ()
| [
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.mean_squared_error",
"numpy.append",
"sys.exit",
"sklearn.metrics.r2_score",
"sklearn.linear_model.LinearRegression"
] | [((784, 813), 'pandas.read_csv', 'pd.read_csv', (['CICIDS_WEDNESDAY'], {}), '(CICIDS_WEDNESDAY)\n', (795, 813), True, 'import pandas as pd\n'), ((5358, 5417), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(1 / 5)', 'random_state': 'STATE'}), '(X, y, test_size=1 / 5, random_state=STATE)\n', (5374, 5417), False, 'from sklearn.model_selection import train_test_split\n'), ((5960, 5978), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (5976, 5978), False, 'from sklearn.linear_model import LinearRegression\n'), ((7152, 7162), 'sys.exit', 'sys.exit', ([], {}), '()\n', (7160, 7162), False, 'import sys\n'), ((7098, 7146), 'numpy.append', 'np.append', (['regressor.intercept_', 'regressor.coef_'], {}), '(regressor.intercept_, regressor.coef_)\n', (7107, 7146), True, 'import numpy as np\n'), ((6594, 6635), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_train', 'y_pred_train'], {}), '(y_train, y_pred_train)\n', (6612, 6635), False, 'from sklearn.metrics import mean_squared_error, r2_score\n'), ((6761, 6792), 'sklearn.metrics.r2_score', 'r2_score', (['y_train', 'y_pred_train'], {}), '(y_train, y_pred_train)\n', (6769, 6792), False, 'from sklearn.metrics import mean_squared_error, r2_score\n'), ((6864, 6903), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_test', 'y_pred_test'], {}), '(y_test, y_pred_test)\n', (6882, 6903), False, 'from sklearn.metrics import mean_squared_error, r2_score\n'), ((7029, 7058), 'sklearn.metrics.r2_score', 'r2_score', (['y_test', 'y_pred_test'], {}), '(y_test, y_pred_test)\n', (7037, 7058), False, 'from sklearn.metrics import mean_squared_error, r2_score\n'), ((6673, 6714), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_train', 'y_pred_train'], {}), '(y_train, y_pred_train)\n', (6691, 6714), False, 'from sklearn.metrics import mean_squared_error, r2_score\n'), ((6942, 6981), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y_test', 'y_pred_test'], {}), '(y_test, y_pred_test)\n', (6960, 6981), False, 'from sklearn.metrics import mean_squared_error, r2_score\n')] |
import numpy as np
class GA(object):
def __init__(self, nInd, nCrom, probCruz, probMut, nGer, fCusto, tipoSel='roleta', tipoCruz='ponto', tipoMut='bit-a-bit', elit=True, verbose=True):
'''
Algoritmo genético para problemas de minimização de custo.
nInd - Número de indivíduos
nCrom - Número de cromossomos (dimensão do problema)
probCruz - Probabilidade de cruzamento
probMut - Probabilidade de mutação
tipoSel - Tipo de seleção: 'roleta' ou 'torneio'. Roleta é o padrão
tipoCruz - Tipo de cruzamento: 'ponto' ou 'uniforme'. Ponto a ponto é o padrão
tipoMut - Tipo de mutação: 'bit-a-bit' ou 'aleatBit'. Bit a bit é o padrão
Elit - Se terá elitismo. Inicialmente True.
nGer - Número de gerações
fcusto - Função custo
verbose - Se a otimização deve mostrar logs de geração e menor custo atual.
'''
self.nInd = nInd
self.nCrom = nCrom
self.probCruz = probCruz
self.probMut = probMut
self.nGer = nGer
self.fCusto = fCusto
self.tipoSel = tipoSel
self.tipoCruz = tipoCruz
self.tipoMut = tipoMut
self.elit = elit
self.verbose = verbose
self.bestSol = None
self.bestCusto = np.inf # Inicia como infinito, para que o proximo custo sempre seja menor
# Criando população de maneira aleatória
self.pop = np.random.randint(0, 2, (self.nInd, self.nCrom))
self.custos = np.ones(self.nInd)
# Iniciando otimização
self.inicia_otim()
def avalia_pop(self):
'''
Faz a avaliação da população e atualiza o vetor de custos
'''
for i in range(self.nInd):
self.custos[i] = self.fCusto(self.pop[i])
def selecao_roleta(self):
'''
Método de seleção do tipo roleta viciada
'''
# Transformação do vetor de custos para suportar valores negativos e inverter os valores maiores nos menores e vice-versa.
# O número 2 é para que o maior custo normalizado fique com valor unitário, possuindo ainda uma chance de ser selecionado.
# Caso não exista custos com valores negativos, o menor custo ficará com valor 2 e o maior com 1.
novo_custo = 2 - self.custos/np.abs(self.custos.max())
soma_custo = novo_custo.sum()
custo_acum = np.zeros(self.nInd)
pais = self.pop.copy()
for i in range(self.nInd):
custo_acum[i] = np.sum(novo_custo[:i+1])
for j in range(self.nInd):
val_selected = soma_custo*np.random.random()
pos_pai = np.where(custo_acum>=val_selected)[0][0]
pais[j] = self.pop[pos_pai]
return pais
def selecao_torneio(self):
pais = self.pop.copy()
for i in range(self.nInd):
pos_indv1 = np.random.randint(0, self.nInd)
pos_indv2 = np.random.randint(0, self.nInd)
if self.custos[pos_indv1]<=self.custos[pos_indv2]:
pais[i] = self.pop[pos_indv1]
else:
pais[i] = self.pop[pos_indv2]
return pais
def cruzamento_ponto(self, pais):
filhos = pais.copy()
for i in range(0, self.nInd, 2):
pai1 = pais[i]
pai2 = pais[i+1]
filho1 = pai1.copy()
filho2 = pai2.copy()
if np.random.random() <= self.probCruz:
ponto_cruz = np.random.randint(0, self.nCrom)
filho1[:ponto_cruz] = pai2[:ponto_cruz]
filho2[:ponto_cruz] = pai1[:ponto_cruz]
filhos[i] = filho1
filhos[i+1] = filho2
return filhos
def cruzamento_uniforme(self, pais):
filhos = pais.copy()
for i in range(0, self.nInd, 2):
pai1 = pais[i]
pai2 = pais[i+1]
filho1 = pai1.copy()
filho2 = pai2.copy()
if np.random.random() <= self.probCruz:
for c in range(self.nCrom):
if np.random.randint(0,2)==1:
filho1[c] = pai2[c]
filho2[c] = pai1[c]
filhos[i] = filho1
filhos[i+1] = filho2
return filhos
def mutacao_bit(self, filhos):
filhos_m = filhos.copy()
for i, f in enumerate(filhos):
for j in range(self.nCrom):
if np.random.random() <= self.probMut:
f[j] = 1-f[j] # Inverte de 1 para 0 e de 0 para 1
filhos_m[i] = f
return filhos_m
def mutacao_aleatbit(self, filhos):
filhos_m = filhos.copy()
for i, f in enumerate(filhos):
if np.random.random() <= self.probMut:
bit_mut = np.random.randint(0, self.nCrom)
f[bit_mut] = 1 - f[bit_mut] # Inverte de 1 para 0 e de 0 para 1 no bit aleatório
filhos_m[i] = f
return filhos_m
def set_best(self):
best_pos = np.argmin(self.custos)
custo = self.custos[best_pos]
if custo < self.bestCusto:
self.bestCusto = custo
self.bestSol = self.pop[best_pos]
def inicia_otim(self):
print("=== Iniciando otimização ===\n")
for g in range(self.nGer):
self.avalia_pop()
if self.elit: self.set_best()
if self.verbose:
if self.elit:
print("{:.0f}º geração - Melhor custo: {:.3f}".format(g+1, self.bestCusto))
else:
print("{:.0f}º geração - Melhor custo: {:.3f}".format(g+1, self.custos.min()))
# Seleção dos pais
if self.tipoSel == 'roleta':
pais = self.selecao_roleta()
else:
pais = self.selecao_torneio()
# Cruzamento para criação dos filhos
if self.tipoCruz == 'ponto':
filhos = self.cruzamento_ponto(pais)
else:
filhos = self.cruzamento_uniforme(pais)
# Mutação dos filhos
if self.tipoMut == 'bit-a-bit':
filhos_m = self.mutacao_bit(filhos)
else:
filhos_m = self.mutacao_aleatbit(filhos)
self.pop = filhos_m
self.set_best()
if self.verbose:
if self.elit:
print("{:.0f}º geração - Melhor custo: {:.3f}".format(g+1, self.bestCusto))
else:
print("{:.0f}º geração - Melhor custo: {:.3f}".format(g+1, self.custos.min()))
print("\n=== Fim da otimização ===")
# ============================================
| [
"numpy.ones",
"numpy.random.random",
"numpy.where",
"numpy.sum",
"numpy.random.randint",
"numpy.zeros",
"numpy.argmin"
] | [((1474, 1522), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', '(self.nInd, self.nCrom)'], {}), '(0, 2, (self.nInd, self.nCrom))\n', (1491, 1522), True, 'import numpy as np\n'), ((1548, 1566), 'numpy.ones', 'np.ones', (['self.nInd'], {}), '(self.nInd)\n', (1555, 1566), True, 'import numpy as np\n'), ((2471, 2490), 'numpy.zeros', 'np.zeros', (['self.nInd'], {}), '(self.nInd)\n', (2479, 2490), True, 'import numpy as np\n'), ((5319, 5341), 'numpy.argmin', 'np.argmin', (['self.custos'], {}), '(self.custos)\n', (5328, 5341), True, 'import numpy as np\n'), ((2592, 2618), 'numpy.sum', 'np.sum', (['novo_custo[:i + 1]'], {}), '(novo_custo[:i + 1])\n', (2598, 2618), True, 'import numpy as np\n'), ((2980, 3011), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.nInd'], {}), '(0, self.nInd)\n', (2997, 3011), True, 'import numpy as np\n'), ((3037, 3068), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.nInd'], {}), '(0, self.nInd)\n', (3054, 3068), True, 'import numpy as np\n'), ((2694, 2712), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2710, 2712), True, 'import numpy as np\n'), ((3566, 3584), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3582, 3584), True, 'import numpy as np\n'), ((3635, 3667), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.nCrom'], {}), '(0, self.nCrom)\n', (3652, 3667), True, 'import numpy as np\n'), ((4159, 4177), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (4175, 4177), True, 'import numpy as np\n'), ((4998, 5016), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (5014, 5016), True, 'import numpy as np\n'), ((5061, 5093), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.nCrom'], {}), '(0, self.nCrom)\n', (5078, 5093), True, 'import numpy as np\n'), ((2736, 2772), 'numpy.where', 'np.where', (['(custo_acum >= val_selected)'], {}), '(custo_acum >= val_selected)\n', (2744, 2772), True, 'import numpy as np\n'), ((4674, 4692), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (4690, 4692), True, 'import numpy as np\n'), ((4265, 4288), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (4282, 4288), True, 'import numpy as np\n')] |
import re
from collections import defaultdict, Counter
from numbers import Number
from typing import Optional, List, Dict
import numpy as np
import torch
from allennlp.common import FromParams, Registrable
from dataclasses import dataclass, replace
from pycocoevalcap.bleu.bleu import Bleu
from pycocoevalcap.cider.cider import Cider
import third_party.detection_metrics.lib.Evaluator as det_evaluator
from gpv2.data.dataset import VqaExample, ClsExample, WebQaExample, LocalizationExample, \
CaptioningExample
from gpv2.data.gpv_datasets import COCO_CATEGORIES
from gpv2.data.synonyms import SYNONYMS
from gpv2.model.model import GPVExampleOutput
from gpv2.train.vqa2_eval_data import punct, periodStrip, commaStrip, manualMap, articles, \
contractions
from gpv2.utils import py_utils
from gpv2.utils.image_utils import get_image_size
from gpv2.utils.quiet_ptbtokenizer import QuitePTBTokenizer
def vqa_score(answer, ground_truth_answer_counts):
gt_answers = {k.lower(): v for k, v in ground_truth_answer_counts.items()}
return min(gt_answers.get(answer, 0) / 3, 1)
@dataclass(frozen=True)
class ResultKey(FromParams):
"""Key for a result from a model"""
metric_name: str
subset_name: Optional[str] = None
dataset_name: Optional[str] = None
def __str__(self):
out = [self.dataset_name, self.subset_name, self.metric_name]
return "/".join(x for x in out if x is not None)
def __repr__(self):
return str(self)
class Evaluator(Registrable):
"""Computes evaluations metrics"""
def evaluate(
self, examples: List, predictions: Dict[str, GPVExampleOutput],
allow_partial=False, subset_mapping=None
) -> Dict[ResultKey, Number]:
"""Computes corpus wide metrics
:param examples: List of source examples
:param predictions: example key -> model output
:param allow_partial: Allow the predictions to only cover a subset of `examples`,
in which only those predictions should be evaluated
:param subset_mapping: Function that maps example -> list of strings, names of the subsets that
example is part of
"""
raise NotImplementedError()
class PerExampleEvaluator(Evaluator):
"""Computes per-examples evaluations metrics"""
def evaluate_examples(self, examples: List, predictions: Dict[str, GPVExampleOutput])-> List[Dict[str, Number]]:
raise NotImplementedError()
def evaluate(
self,
examples: List,
predictions: Dict[str, GPVExampleOutput],
allow_partial=False,
mean=True,
subset_mapping=None
) -> Dict[ResultKey, Number]:
examples_with_predictions = [x for x in examples if x.get_gpv_id() in predictions]
if not allow_partial and (len(examples) != len(examples_with_predictions)):
raise ValueError(f"Only {len(examples_with_predictions)}/{len(examples)} "
f"of examples have predictions")
examples = examples_with_predictions
per_example_scores = self.evaluate_examples(examples, predictions)
per_metric_scores = py_utils.transpose_list_of_dicts(per_example_scores)
subsets = defaultdict(list)
all_ids = [x.get_gpv_id() for x in examples]
id_to_ix = {k: i for i, k in enumerate(all_ids)}
subsets[None] = list(range(len(all_ids)))
if subset_mapping is not None:
for example in examples:
example_id = id_to_ix[example.get_gpv_id()]
for subset in subset_mapping(example):
subsets[subset].append(example_id)
out = {}
for metric_name, score in per_metric_scores.items():
score = np.array(score)
for subset_name, ixs in subsets.items():
if mean:
out[ResultKey(metric_name, subset_name)] = float(np.mean(score[ixs]))
else:
out[ResultKey(metric_name, subset_name)] = (float(np.sum(score[ixs])), len(ixs))
return out
@Evaluator.register("vqa-evaluator")
class VqaEvaluator(PerExampleEvaluator):
def evaluate_examples(self, examples: List[VqaExample],
predictions: Dict[str, GPVExampleOutput], add_scores=False):
out = []
for example in examples:
answer = predictions[example.gpv_id].text[0]
score = vqa_score(answer.lower(), example.answers)
out.append(dict(score=score))
return out
@Evaluator.register("cls-evaluator")
class ClsEvaluator(PerExampleEvaluator):
def evaluate_examples(self, examples: List[ClsExample], predictions: Dict[str, GPVExampleOutput]):
out = []
for example in examples:
answer = predictions[example.gpv_id].text[0].lower()
gt_answer = SYNONYMS[example.category]
out.append(dict(accuracy=answer in gt_answer))
return out
@Evaluator.register("webqa-evaluator")
class WebQaEvaluator(PerExampleEvaluator):
def evaluate_examples(self, examples: List[WebQaExample], predictions: Dict[str, GPVExampleOutput]):
out = []
for example in examples:
answer = predictions[example.get_gpv_id()].text[0].lower()
gt_answer = SYNONYMS[example.answer] if example.answer in SYNONYMS else [example.answer]
out.append(dict(accuracy=answer in gt_answer))
return out
@Evaluator.register("dce-cls")
class DceClsEvaluator(PerExampleEvaluator):
def __init__(self, top_k: Optional[List[int]]=(5,)):
self.top_k = top_k
def evaluate_examples(self, examples: List[ClsExample], predictions: Dict[str, GPVExampleOutput]):
out = []
for example in examples:
answers = [x.lower() for x in predictions[example.get_gpv_id()].text]
gt = [example.category]
vals = {"accuracy": answers[0] in gt}
if self.top_k is not None:
for k in self.top_k:
assert len(answers) >= k
vals[f"top{k}-acc"] = any(a in gt for a in answers[:k])
out.append(vals)
return out
def compute_vqa_accuracy(
gt_answers: List[str],
pred_answers: List[str]) -> List[float]:
ngt_answers = [preprocess_answer(ans) for ans in gt_answers]
topk_npred_answers = [preprocess_answer(ans) for ans in pred_answers]
gt_consensus = Counter(ngt_answers)
return [vqa_accuracy(ans, gt_consensus) for ans in topk_npred_answers]
def vqa_accuracy(npred_answer: str, gt_consensus: Counter):
return min(gt_consensus[npred_answer]/3,1)
def processPunctuation(inText):
outText = inText
for p in punct:
if (p + ' ' in inText or ' ' + p in inText) or (re.search(commaStrip, inText) != None):
outText = outText.replace(p, '')
else:
outText = outText.replace(p, ' ')
outText = periodStrip.sub("",outText,re.UNICODE)
return outText
def processDigitArticle(inText):
outText = []
tempText = inText.lower().split()
for word in tempText:
word = manualMap.setdefault(word, word)
if word not in articles:
outText.append(word)
else:
pass
for wordId, word in enumerate(outText):
if word in contractions:
outText[wordId] = contractions[word]
outText = ' '.join(outText)
return outText
def preprocess_answer(ans):
ans = ans.replace('\n', ' ')
ans = ans.replace('\t',' ')
ans = ans.lower().strip()
return processDigitArticle(processPunctuation(ans))
@Evaluator.register("opensce-vqa")
class DceVqaEvaluator(PerExampleEvaluator):
def __init__(self, top_k: Optional[List[int]]=(5,)):
self.top_k = top_k
if top_k is not None:
assert all(x > 0 for x in top_k)
def evaluate_examples(self, examples: List[VqaExample], predictions: Dict[str, GPVExampleOutput]):
out = []
for example in examples:
max_k = 1 if self.top_k is None else max(self.top_k)
answers = predictions[example.get_gpv_id()].text[:max_k]
gt = example.answers
scores = compute_vqa_accuracy(gt, answers)
vals = dict(acc=scores[0])
if self.top_k:
for k in self.top_k:
if k > len(scores):
raise ValueError(f"Cannot evaluate top-{k}, but only have top-{len(scores)} predictions")
vals[f"top{k}-acc"] = max(scores[:k])
out.append(vals)
return out
@Evaluator.register("localization-evaluator")
@Evaluator.register("detect-evaluator")
class LocalizationEvaluator(PerExampleEvaluator):
def __init__(self, iou_thresh=0.5):
self.iou_thresh = iou_thresh
def evaluate_examples(self, examples: List[LocalizationExample], predictions: Dict[str, GPVExampleOutput],
return_pr=False):
eval_engine = det_evaluator.Evaluator()
out = []
for i, ex in enumerate(examples):
pred = predictions[ex.gpv_id]
scores = pred.relevance
pred_boxes = pred.boxes.copy()
gt_boxes = np.array(ex.bboxes)
# Convert cx cy, w, h -> x1, y1, w, h
pred_boxes[:, 0] = pred_boxes[:, 0] - 0.5 * pred_boxes[:, 2]
pred_boxes[:, 1] = pred_boxes[:, 1] - 0.5 * pred_boxes[:, 3]
B = pred_boxes.shape[0]
all_boxes = det_evaluator.BoundingBoxes()
W, H = get_image_size(ex.image_id)
for b in range(B):
x, y, w, h = pred_boxes[b]
all_boxes.addBoundingBox(det_evaluator.BoundingBox(
imageName=ex.image_id,
classId=ex.category,
x=x,
y=y,
w=w,
h=h,
typeCoordinates=det_evaluator.CoordinatesType.Relative,
imgSize=(W, H),
bbType=det_evaluator.BBType.Detected,
classConfidence=scores[b],
format=det_evaluator.BBFormat.XYWH))
normalized_gt = all(all(val <= 1.0 for val in b) for b in gt_boxes)
if not normalized_gt:
# convert to relative coordinates
# TODO its a bit of hack to check this by looking coordinates > 1.0
# but we need this check atm since DCE stores relative scaling
# coco uses absolute
W, H = get_image_size(ex.image_id)
gt_boxes[:, 0] = gt_boxes[:, 0] / W
gt_boxes[:, 1] = gt_boxes[:, 1] / H
gt_boxes[:, 2] = gt_boxes[:, 2] / W
gt_boxes[:, 3] = gt_boxes[:, 3] / H
B = gt_boxes.shape[0]
for b in range(B):
x, y, w, h = gt_boxes[b]
all_boxes.addBoundingBox(det_evaluator.BoundingBox(
imageName=ex.image_id,
classId=ex.category,
x=x,
y=y,
w=w,
h=h,
typeCoordinates=det_evaluator.CoordinatesType.Relative,
imgSize=(W, H),
bbType=det_evaluator.BBType.GroundTruth,
format=det_evaluator.BBFormat.XYWH))
det_metrics = eval_engine.GetPascalVOCMetrics(all_boxes, self.iou_thresh)
if return_pr:
out.append(det_metrics[0])
else:
out.append({"AP": det_metrics[0]['AP']})
return out
def get_per_caption_data(examples: List[CaptioningExample], predictions):
# In per-caption evaluation the model makes one prediction for each ground truth
# caption, each of which it still evaluated against all the captions,
caption_examples = []
caption_predictions = {}
for ex in examples:
pred = predictions[ex.gpv_id]
for cap in ex.captions:
caption_examples.append(CaptioningExample(cap.gpv_id, ex.image_id, ex.captions))
caption_predictions[cap.gpv_id] = pred
return caption_examples, caption_predictions
@Evaluator.register("cap-evaluator")
class CaptionEvaluator(Evaluator):
def __init__(self, cider=True, bleu=4, per_caption=False):
self.cider = cider
self.bleu = bleu
self.per_caption = per_caption
scorers = {}
if cider:
# from exp.ours.eval.fast_cider import FastCider
scorers["cider"] = Cider()
if bleu:
scorers["bleu"] = Bleu(bleu)
self.scorers = scorers
self.tokenizer = QuitePTBTokenizer()
def evaluate(
self,
examples: List,
predictions: Dict[str, GPVExampleOutput],
allow_partial=False,
subset_mapping=None,
):
examples_with_predictions = [x for x in examples if x.get_gpv_id() in predictions]
if not allow_partial and (len(examples) != len(examples_with_predictions)):
raise ValueError(f"Only {len(examples_with_predictions)}/{len(examples)} "
f"of examples have predictions")
examples = examples_with_predictions
if self.per_caption:
examples, predictions = get_per_caption_data(examples, predictions)
subsets = defaultdict(list)
subsets[None] = examples
if subset_mapping is not None:
for example in examples:
example_subsets = subset_mapping(example)
for subset in example_subsets:
subsets[subset].append(example)
out = {}
for subset_name, examples in subsets.items():
all_scores = self._get_scores(examples, predictions)
results = {}
for name, scorer in self.scorers.items():
corpus_scores, _ = all_scores[name]
if isinstance(scorer, Cider):
results["cider"] = corpus_scores
elif isinstance(scorer, Bleu):
scores, _ = all_scores[name]
for i, score in enumerate(corpus_scores):
results[f"bleu{i+1}"] = score
if subset_name is not None:
results["n"] = len(examples)
out.update({ResultKey(metric_name=k, subset_name=subset_name): v for k, v in results.items()})
return out
def evaluate_examples(self, examples: List[CaptioningExample], predictions: Dict[str, GPVExampleOutput]):
all_scores = self._get_scores(examples, predictions)
per_examples_scores = [{} for _ in examples]
for name, scorer in self.scorers.items():
score, scores = all_scores[name]
if isinstance(scorer, Cider):
for score, ex_scores in zip(scores, per_examples_scores):
ex_scores["cider"] = score
elif isinstance(scorer, Bleu):
scores = py_utils.transpose_lists(scores)
for score, ex_scores in zip(scores, per_examples_scores):
for i, s in enumerate(score):
ex_scores[f"bleu{i+1}"] = s
return per_examples_scores
def _get_scores(self, examples: List[CaptioningExample], predictions: Dict[str, GPVExampleOutput]):
gts = {}
res = {}
for ix, instance in enumerate(examples):
key = instance.get_gpv_id()
assert key not in res
res[key] = [predictions[instance.get_gpv_id()].text[0]]
gts[key] = [x.caption.lower() for x in instance.captions]
res = self.tokenizer.tokenize(res)
gts = self.tokenizer.tokenize(gts)
scores = {}
for name, scorer in self.scorers.items():
if isinstance(scorer, Bleu):
scores[name] = scorer.compute_score(gts, res, verbose=0)
else:
scores[name] = scorer.compute_score(gts, res)
return scores
| [
"third_party.detection_metrics.lib.Evaluator.BoundingBoxes",
"pycocoevalcap.bleu.bleu.Bleu",
"dataclasses.dataclass",
"numpy.array",
"third_party.detection_metrics.lib.Evaluator.Evaluator",
"gpv2.data.dataset.CaptioningExample",
"re.search",
"gpv2.utils.image_utils.get_image_size",
"numpy.mean",
"... | [((1083, 1105), 'dataclasses.dataclass', 'dataclass', ([], {'frozen': '(True)'}), '(frozen=True)\n', (1092, 1105), False, 'from dataclasses import dataclass, replace\n'), ((6042, 6062), 'collections.Counter', 'Counter', (['ngt_answers'], {}), '(ngt_answers)\n', (6049, 6062), False, 'from collections import defaultdict, Counter\n'), ((6507, 6547), 'gpv2.train.vqa2_eval_data.periodStrip.sub', 'periodStrip.sub', (['""""""', 'outText', 're.UNICODE'], {}), "('', outText, re.UNICODE)\n", (6522, 6547), False, 'from gpv2.train.vqa2_eval_data import punct, periodStrip, commaStrip, manualMap, articles, contractions\n'), ((3049, 3101), 'gpv2.utils.py_utils.transpose_list_of_dicts', 'py_utils.transpose_list_of_dicts', (['per_example_scores'], {}), '(per_example_scores)\n', (3081, 3101), False, 'from gpv2.utils import py_utils\n'), ((3117, 3134), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (3128, 3134), False, 'from collections import defaultdict, Counter\n'), ((6684, 6716), 'gpv2.train.vqa2_eval_data.manualMap.setdefault', 'manualMap.setdefault', (['word', 'word'], {}), '(word, word)\n', (6704, 6716), False, 'from gpv2.train.vqa2_eval_data import punct, periodStrip, commaStrip, manualMap, articles, contractions\n'), ((8376, 8401), 'third_party.detection_metrics.lib.Evaluator.Evaluator', 'det_evaluator.Evaluator', ([], {}), '()\n', (8399, 8401), True, 'import third_party.detection_metrics.lib.Evaluator as det_evaluator\n'), ((11551, 11570), 'gpv2.utils.quiet_ptbtokenizer.QuitePTBTokenizer', 'QuitePTBTokenizer', ([], {}), '()\n', (11568, 11570), False, 'from gpv2.utils.quiet_ptbtokenizer import QuitePTBTokenizer\n'), ((12189, 12206), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (12200, 12206), False, 'from collections import defaultdict, Counter\n'), ((3580, 3595), 'numpy.array', 'np.array', (['score'], {}), '(score)\n', (3588, 3595), True, 'import numpy as np\n'), ((8574, 8593), 'numpy.array', 'np.array', (['ex.bboxes'], {}), '(ex.bboxes)\n', (8582, 8593), True, 'import numpy as np\n'), ((8822, 8851), 'third_party.detection_metrics.lib.Evaluator.BoundingBoxes', 'det_evaluator.BoundingBoxes', ([], {}), '()\n', (8849, 8851), True, 'import third_party.detection_metrics.lib.Evaluator as det_evaluator\n'), ((8865, 8892), 'gpv2.utils.image_utils.get_image_size', 'get_image_size', (['ex.image_id'], {}), '(ex.image_id)\n', (8879, 8892), False, 'from gpv2.utils.image_utils import get_image_size\n'), ((11447, 11454), 'pycocoevalcap.cider.cider.Cider', 'Cider', ([], {}), '()\n', (11452, 11454), False, 'from pycocoevalcap.cider.cider import Cider\n'), ((11492, 11502), 'pycocoevalcap.bleu.bleu.Bleu', 'Bleu', (['bleu'], {}), '(bleu)\n', (11496, 11502), False, 'from pycocoevalcap.bleu.bleu import Bleu\n'), ((6366, 6395), 're.search', 're.search', (['commaStrip', 'inText'], {}), '(commaStrip, inText)\n', (6375, 6395), False, 'import re\n'), ((9698, 9725), 'gpv2.utils.image_utils.get_image_size', 'get_image_size', (['ex.image_id'], {}), '(ex.image_id)\n', (9712, 9725), False, 'from gpv2.utils.image_utils import get_image_size\n'), ((10973, 11028), 'gpv2.data.dataset.CaptioningExample', 'CaptioningExample', (['cap.gpv_id', 'ex.image_id', 'ex.captions'], {}), '(cap.gpv_id, ex.image_id, ex.captions)\n', (10990, 11028), False, 'from gpv2.data.dataset import VqaExample, ClsExample, WebQaExample, LocalizationExample, CaptioningExample\n'), ((8987, 9262), 'third_party.detection_metrics.lib.Evaluator.BoundingBox', 'det_evaluator.BoundingBox', ([], {'imageName': 'ex.image_id', 'classId': 'ex.category', 'x': 'x', 'y': 'y', 'w': 'w', 'h': 'h', 'typeCoordinates': 'det_evaluator.CoordinatesType.Relative', 'imgSize': '(W, H)', 'bbType': 'det_evaluator.BBType.Detected', 'classConfidence': 'scores[b]', 'format': 'det_evaluator.BBFormat.XYWH'}), '(imageName=ex.image_id, classId=ex.category, x=x,\n y=y, w=w, h=h, typeCoordinates=det_evaluator.CoordinatesType.Relative,\n imgSize=(W, H), bbType=det_evaluator.BBType.Detected, classConfidence=\n scores[b], format=det_evaluator.BBFormat.XYWH)\n', (9012, 9262), True, 'import third_party.detection_metrics.lib.Evaluator as det_evaluator\n'), ((10023, 10274), 'third_party.detection_metrics.lib.Evaluator.BoundingBox', 'det_evaluator.BoundingBox', ([], {'imageName': 'ex.image_id', 'classId': 'ex.category', 'x': 'x', 'y': 'y', 'w': 'w', 'h': 'h', 'typeCoordinates': 'det_evaluator.CoordinatesType.Relative', 'imgSize': '(W, H)', 'bbType': 'det_evaluator.BBType.GroundTruth', 'format': 'det_evaluator.BBFormat.XYWH'}), '(imageName=ex.image_id, classId=ex.category, x=x,\n y=y, w=w, h=h, typeCoordinates=det_evaluator.CoordinatesType.Relative,\n imgSize=(W, H), bbType=det_evaluator.BBType.GroundTruth, format=\n det_evaluator.BBFormat.XYWH)\n', (10048, 10274), True, 'import third_party.detection_metrics.lib.Evaluator as det_evaluator\n'), ((13602, 13634), 'gpv2.utils.py_utils.transpose_lists', 'py_utils.transpose_lists', (['scores'], {}), '(scores)\n', (13626, 13634), False, 'from gpv2.utils import py_utils\n'), ((3719, 3738), 'numpy.mean', 'np.mean', (['score[ixs]'], {}), '(score[ixs])\n', (3726, 3738), True, 'import numpy as np\n'), ((3814, 3832), 'numpy.sum', 'np.sum', (['score[ixs]'], {}), '(score[ixs])\n', (3820, 3832), True, 'import numpy as np\n')] |
# Copyright (c) 2015-2018 by the parties listed in the AUTHORS file.
# All rights reserved. Use of this source code is governed by
# a BSD-style license that can be found in the LICENSE file.
# from memory_profiler import profile
from collections import OrderedDict
import os
from toast_planck.preproc_modules import Pnt2Planeter, MapSampler
import toast.healpix
import toast.qarray
import astropy.io.fits as pf
import healpy as hp
import numpy as np
PLANETS = ["mars", "jupiter", "saturn", "uranus", "neptune"]
class Target(object):
def __init__(self, name, lon, lat, radius, info):
self.name = name
self.lon = lon
self.lat = lat
self.theta = np.radians(90 - self.lat)
self.phi = np.radians(self.lon)
self.vec = toast.healpix.ang2vec(
np.array([self.theta]), np.array([self.phi]))
self.radius = radius
self.info = info
XAXIS, YAXIS, ZAXIS = np.eye(3)
# import warnings
# warnings.filterwarnings("error")
class OpExtractPlanck(toast.Operator):
"""
Extract TOD in the vicinity of point sources and other coordinates
of interest.
Args:
RIMO -- Reduced instrument model, used for noise and position angle
catalog -- catalog of targets
radius -- search radius around the target [arc min]
"""
def __init__(
self, rimo, catalog, radius, comm, out=".", common_flag_mask=255,
flag_mask=255, pnt_mask=2, sso_mask=2, maskfile=None, bg=None,
full_rings=False, recalibrate_bg=False):
self.comm = comm
if comm is None:
self.rank = 0
else:
self.rank = self.comm.rank
self.rimo = rimo
self.out = out
self.catalog = catalog
# parse the catalog
self.parse_catalog()
self.radius = radius
self.common_flag_mask = common_flag_mask
self.flag_mask = flag_mask
self.pnt_mask = pnt_mask
self.sso_mask = sso_mask
self.masksampler = None
if maskfile is not None:
self.masksampler = MapSampler(maskfile, pol=False, comm=self.comm)
self.mapsampler = None
if bg is not None:
self.mapsampler = MapSampler(bg, pol=True, comm=self.comm)
self.full_rings = full_rings
self.recalibrate_bg = recalibrate_bg
def parse_catalog(self):
if self.rank == 0:
header = True
self.targets = OrderedDict()
for line in open(self.catalog, "r"):
if line.startswith("#"):
continue
parts = line.split(",")
if header:
self.target_coord = parts[0].strip()
self.ntarget = np.int(parts[1])
header = False
else:
name = parts[0].strip()
lon = np.float(parts[1])
lat = np.float(parts[2])
radius = np.float(parts[3])
info = parts[4].strip()
self.targets[name] = Target(name, lon, lat, radius, info)
else:
self.target_coord = None
self.targets = None
if self.comm is not None:
self.target_coord = self.comm.bcast(self.target_coord, root=0)
self.targets = self.comm.bcast(self.targets, root=0)
def collect_detector_data(
self, target, det, timevec, signalvec, thetavec, phivec, psivec,
dthetavec, dphivec, ringnumbervec, pntflagvec, qwvec, uwvec,
phasevec):
"""Collect and write out data for this detector
"""
if self.rank == 0:
print(" gathering {} data".format(det), flush=True)
cols = []
column_specs = [
("time", timevec, np.float64, "second"),
("signal", signalvec, np.float32, "K_CMB"),
("theta", thetavec, np.float32, "radians"),
("phi", phivec, np.float32, "radians"),
("psi", psivec, np.float32, "radians"),
("dtheta", dthetavec, np.float32, "arc min"),
("dphi", dphivec, np.float32, "arc min"),
("ring", ringnumbervec, np.int32, "ring number"),
("pntflag", pntflagvec, np.int8, "pointing flag"),
("qweight", qwvec, np.float32, "Stokes Q weight"),
("uweight", uwvec, np.float32, "Stokes U weight"),
("phase", phasevec, np.float32, "radian"),
]
is_sorted = None
ind = None
for name, vec, dtype, unit in column_specs:
if len(vec) == 0:
vec = np.array([], dtype=dtype)
if self.comm is not None:
self.comm.Barrier()
vec = self.comm.gather(vec)
else:
vec = [vec]
if self.rank != 0:
continue
vec = np.hstack(vec).astype(dtype)
if is_sorted is None:
# First vector is time
is_sorted = np.all(np.diff(vec) >= 0)
if not is_sorted:
# The times are not sorted if multiple ring
# ranges were given
ind = np.argsort(vec)
if ind is not None:
vec = vec[ind]
vec = vec.reshape([1, -1])
if dtype == np.float32:
form = "{}E".format(vec.size)
elif dtype == np.float64:
form = "{}D".format(vec.size)
elif dtype == np.int32:
form = "{}I".format(vec.size)
elif dtype == np.int8:
form = "{}B".format(vec.size)
else:
raise RuntimeError(
"Unknown datatype {}".format(dtype))
cols.append(
pf.Column(name=name, format=form, array=vec, unit=unit))
if self.rank == 0 and vec.size > 0:
hdulist = [pf.PrimaryHDU()]
hdu = pf.BinTableHDU.from_columns(pf.ColDefs(cols))
hdu.header["extname"] = det
hdu.header["detector"] = det
hdu.header["fsample"] = (self.rimo[det].fsample, "sampling rate")
hdu.header["psi"] = (
self.rimo[det].psi_uv + self.rimo[det].psi_pol,
"polarization angle")
eps = self.rimo[det].epsilon
eta = (1 - eps) / (1 + eps)
hdu.header["eta"] = (eta, "polarization efficiency")
hdu.header["target"] = (target.name, "target name")
hdu.header["info"] = (target.info, "target info")
hdu.header["lon"] = (target.lon, "target longitude")
hdu.header["lat"] = (target.lat, "target latitude")
hdu.header["coord"] = (self.target_coord, "Coordinate system")
hdu.header["radius"] = (target.radius + self.radius,
"search radius")
hdulist.append(hdu)
print(" gathered {} samples".format(vec.size), flush=True)
filename = os.path.join(
self.out, "small_dataset_{}_{}.fits".format(target.name, det))
pf.HDUList(hdulist).writeto(filename, overwrite=True)
print("small dataset saved to {}".format(filename), flush=True)
if self.comm is not None:
self.comm.barrier()
return
def process_ring(
self, istart, istop, ring_offset, iring, tod, det, planetmode,
planeter, target, cos_lim, psidet, timevec, signalvec, thetavec,
phivec, psivec, dthetavec, dphivec, ringnumbervec, pntflagvec,
qwvec, uwvec, phasevec):
""" Collect samples that fall within the search radius
"""
ind = slice(istart, istop)
ring_number = (np.zeros(istop - istart, dtype=np.int) +
ring_offset + iring)
times = tod.local_times()[ind]
common_flags = (tod.local_common_flags()[ind] &
self.common_flag_mask)
pnt_flags = (tod.local_common_flags()[ind] &
self.pnt_mask)
phase = tod.local_phase()[ind]
signal = tod.local_signal(det)[ind]
flags = tod.local_flags(det)[ind] & self.flag_mask
flags |= common_flags
quat = tod.local_pointing(det)[ind]
iquweights = tod.local_weights(det)[ind]
# Which samples are within the search radius?
vec = toast.qarray.rotate(quat, ZAXIS)
# Check if the TOD coordinate system matches the catalog
if self.target_coord.upper() != tod.coord.upper():
coord_matrix = hp.rotator.get_coordconv_matrix(
[tod.coord, self.target_coord])[0]
coord_quat = toast.qarray.from_rotmat(coord_matrix)
vec = toast.qarray.rotate(coord_quat, vec)
if planetmode:
dp, planetvec = planeter.cosdist_vec(
vec.T, times, full_output=True)
else:
dp = np.dot(vec, target.vec.T).ravel()
good = dp > cos_lim
good[flags != 0] = False
ngood = np.sum(good)
if ngood > 0:
if self.full_rings:
good = flags == 0
# Rotate these samples into a frame where the
# detector looks along the X axis.
# print("{} hits {} {} times".format(
# det, target.name, ngood))
theta, phi, psi = toast.qarray.to_angles(
quat[good])
if self.masksampler is not None:
not_masked = self.masksampler.at(theta, phi) > .5
theta = theta[not_masked]
phi = phi[not_masked]
psi = psi[not_masked]
good[good] = not_masked
bg = 0
if self.mapsampler is not None:
bg = self.mapsampler.atpol(theta, phi,
iquweights[good])
if self.recalibrate_bg:
ind_fit = (
tod.local_flags(det)[ind][good] & self.sso_mask == 0)
templates = np.vstack([
np.ones(np.sum(ind_fit)),
bg[ind_fit]])
invcov = np.dot(templates, templates.T)
cov = np.linalg.inv(invcov)
proj = np.dot(templates, signal[good])
offset, gain = np.dot(cov, proj)
bg = gain * bg + offset
psirot = toast.qarray.rotation(vec[good], -psi + psidet)
thetarot = toast.qarray.rotation(YAXIS, np.pi / 2 - theta)
phirot = toast.qarray.rotation(ZAXIS, -phi)
rot = toast.qarray.mult(
thetarot, toast.qarray.mult(phirot, psirot))
if planetmode:
tvec = toast.qarray.rotate(rot, planetvec[:, good].T)
else:
tvec = toast.qarray.rotate(rot, target.vec)
ttheta, tphi = toast.healpix.vec2ang(tvec)
thetavec.append(theta)
phivec.append(phi)
psivec.append(psi)
# invert the source position into PSF
dthetavec.append(ttheta - np.pi / 2)
dphivec.append(-tphi)
timevec.append(times[good])
signalvec.append(signal[good] - bg)
ringnumbervec.append(ring_number[good])
pntflagvec.append(pnt_flags[good])
_, qw, uw = iquweights[good].T
qwvec.append(qw)
uwvec.append(uw)
phasevec.append(phase[good])
return
# @profile
def exec(self, data):
dets = data.obs[0]["tod"].local_dets
for target in self.targets.values():
planetmode = target.name.lower() in PLANETS
# Collect samples around the target on every process
if self.rank == 0:
print("Collecting data for {}. planetmode = {}".format(
target.name, planetmode), flush=True)
if planetmode:
planeter = Pnt2Planeter(target.name.lower())
else:
planeter = None
# Avoid taking arc cosines. Measure the radius in dot
# product instead.
cos_lim = np.cos(np.radians((self.radius + target.radius) / 60))
for det in dets:
# We only want the position angle without the extra
# polarization angle
psidet = np.radians(
self.rimo[det].psi_uv + self.rimo[det].psi_pol - 90)
timevec = []
thetavec = []
phivec = []
psivec = []
dthetavec = []
dphivec = []
signalvec = []
ringnumbervec = []
pntflagvec = []
qwvec = []
uwvec = []
phasevec = []
for obs in data.obs:
tod = obs["tod"]
if "intervals" not in obs:
raise RuntimeError(
"observation must specify intervals")
intervals = tod.local_intervals(obs["intervals"])
local_starts = [ival.first for ival in intervals]
local_stops = [ival.last + 1 for ival in intervals]
ring_offset = tod.globalfirst_ring
for interval in obs["intervals"]:
if interval.last < tod.local_samples[0]:
ring_offset += 1
for iring, (istart, istop) in enumerate(zip(local_starts,
local_stops)):
self.process_ring(
istart, istop, ring_offset, iring, tod, det,
planetmode, planeter, target, cos_lim, psidet,
timevec, signalvec, thetavec, phivec, psivec,
dthetavec, dphivec, ringnumbervec, pntflagvec,
qwvec, uwvec, phasevec)
if len(timevec) > 0:
timevec = np.hstack(timevec)
signalvec = np.hstack(signalvec)
thetavec = np.hstack(thetavec)
phivec = np.hstack(phivec)
psivec = np.hstack(psivec)
dthetavec = np.hstack(dthetavec)
dphivec = np.hstack(dphivec)
dphivec[dphivec < -np.pi] += 2 * np.pi
dphivec[dphivec > np.pi] -= 2 * np.pi
dthetavec *= 180 / np.pi * 60.
dphivec *= 180 / np.pi * 60.
ringnumbervec = np.hstack(ringnumbervec)
pntflagvec = np.hstack(pntflagvec)
qwvec = np.hstack(qwvec)
uwvec = np.hstack(uwvec)
phasevec = np.hstack(phasevec)
self.collect_detector_data(
target, det, timevec, signalvec, thetavec,
phivec, psivec, dthetavec, dphivec, ringnumbervec,
pntflagvec, qwvec, uwvec, phasevec)
"""
if self.rank != 0:
continue
import matplotlib.pyplot as plt
plt.figure()
plt.plot(np.degrees(phivec), 90-np.degrees(thetavec), ".")
plt.figure()
plt.plot(np.degrees(dphivec)*60, np.degrees(dthetavec)*60, ".")
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
ax.scatter(dphivec, dthetavec, signalvec, c="r", marker=".")
plt.show()
import pdb
pdb.set_trace()
"""
return
| [
"numpy.radians",
"astropy.io.fits.ColDefs",
"numpy.hstack",
"healpy.rotator.get_coordconv_matrix",
"numpy.argsort",
"numpy.array",
"astropy.io.fits.Column",
"numpy.diff",
"numpy.dot",
"numpy.eye",
"collections.OrderedDict",
"astropy.io.fits.PrimaryHDU",
"toast_planck.preproc_modules.MapSampl... | [((933, 942), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (939, 942), True, 'import numpy as np\n'), ((689, 714), 'numpy.radians', 'np.radians', (['(90 - self.lat)'], {}), '(90 - self.lat)\n', (699, 714), True, 'import numpy as np\n'), ((734, 754), 'numpy.radians', 'np.radians', (['self.lon'], {}), '(self.lon)\n', (744, 754), True, 'import numpy as np\n'), ((9100, 9112), 'numpy.sum', 'np.sum', (['good'], {}), '(good)\n', (9106, 9112), True, 'import numpy as np\n'), ((809, 831), 'numpy.array', 'np.array', (['[self.theta]'], {}), '([self.theta])\n', (817, 831), True, 'import numpy as np\n'), ((833, 853), 'numpy.array', 'np.array', (['[self.phi]'], {}), '([self.phi])\n', (841, 853), True, 'import numpy as np\n'), ((2091, 2138), 'toast_planck.preproc_modules.MapSampler', 'MapSampler', (['maskfile'], {'pol': '(False)', 'comm': 'self.comm'}), '(maskfile, pol=False, comm=self.comm)\n', (2101, 2138), False, 'from toast_planck.preproc_modules import Pnt2Planeter, MapSampler\n'), ((2227, 2267), 'toast_planck.preproc_modules.MapSampler', 'MapSampler', (['bg'], {'pol': '(True)', 'comm': 'self.comm'}), '(bg, pol=True, comm=self.comm)\n', (2237, 2267), False, 'from toast_planck.preproc_modules import Pnt2Planeter, MapSampler\n'), ((2460, 2473), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2471, 2473), False, 'from collections import OrderedDict\n'), ((4646, 4671), 'numpy.array', 'np.array', (['[]'], {'dtype': 'dtype'}), '([], dtype=dtype)\n', (4654, 4671), True, 'import numpy as np\n'), ((5829, 5884), 'astropy.io.fits.Column', 'pf.Column', ([], {'name': 'name', 'format': 'form', 'array': 'vec', 'unit': 'unit'}), '(name=name, format=form, array=vec, unit=unit)\n', (5838, 5884), True, 'import astropy.io.fits as pf\n'), ((5966, 5981), 'astropy.io.fits.PrimaryHDU', 'pf.PrimaryHDU', ([], {}), '()\n', (5979, 5981), True, 'import astropy.io.fits as pf\n'), ((6029, 6045), 'astropy.io.fits.ColDefs', 'pf.ColDefs', (['cols'], {}), '(cols)\n', (6039, 6045), True, 'import astropy.io.fits as pf\n'), ((7801, 7839), 'numpy.zeros', 'np.zeros', (['(istop - istart)'], {'dtype': 'np.int'}), '(istop - istart, dtype=np.int)\n', (7809, 7839), True, 'import numpy as np\n'), ((8634, 8697), 'healpy.rotator.get_coordconv_matrix', 'hp.rotator.get_coordconv_matrix', (['[tod.coord, self.target_coord]'], {}), '([tod.coord, self.target_coord])\n', (8665, 8697), True, 'import healpy as hp\n'), ((12251, 12297), 'numpy.radians', 'np.radians', (['((self.radius + target.radius) / 60)'], {}), '((self.radius + target.radius) / 60)\n', (12261, 12297), True, 'import numpy as np\n'), ((12458, 12521), 'numpy.radians', 'np.radians', (['(self.rimo[det].psi_uv + self.rimo[det].psi_pol - 90)'], {}), '(self.rimo[det].psi_uv + self.rimo[det].psi_pol - 90)\n', (12468, 12521), True, 'import numpy as np\n'), ((2752, 2768), 'numpy.int', 'np.int', (['parts[1]'], {}), '(parts[1])\n', (2758, 2768), True, 'import numpy as np\n'), ((2896, 2914), 'numpy.float', 'np.float', (['parts[1]'], {}), '(parts[1])\n', (2904, 2914), True, 'import numpy as np\n'), ((2941, 2959), 'numpy.float', 'np.float', (['parts[2]'], {}), '(parts[2])\n', (2949, 2959), True, 'import numpy as np\n'), ((2989, 3007), 'numpy.float', 'np.float', (['parts[3]'], {}), '(parts[3])\n', (2997, 3007), True, 'import numpy as np\n'), ((4910, 4924), 'numpy.hstack', 'np.hstack', (['vec'], {}), '(vec)\n', (4919, 4924), True, 'import numpy as np\n'), ((5230, 5245), 'numpy.argsort', 'np.argsort', (['vec'], {}), '(vec)\n', (5240, 5245), True, 'import numpy as np\n'), ((7168, 7187), 'astropy.io.fits.HDUList', 'pf.HDUList', (['hdulist'], {}), '(hdulist)\n', (7178, 7187), True, 'import astropy.io.fits as pf\n'), ((8989, 9014), 'numpy.dot', 'np.dot', (['vec', 'target.vec.T'], {}), '(vec, target.vec.T)\n', (8995, 9014), True, 'import numpy as np\n'), ((10240, 10270), 'numpy.dot', 'np.dot', (['templates', 'templates.T'], {}), '(templates, templates.T)\n', (10246, 10270), True, 'import numpy as np\n'), ((10297, 10318), 'numpy.linalg.inv', 'np.linalg.inv', (['invcov'], {}), '(invcov)\n', (10310, 10318), True, 'import numpy as np\n'), ((10346, 10377), 'numpy.dot', 'np.dot', (['templates', 'signal[good]'], {}), '(templates, signal[good])\n', (10352, 10377), True, 'import numpy as np\n'), ((10413, 10430), 'numpy.dot', 'np.dot', (['cov', 'proj'], {}), '(cov, proj)\n', (10419, 10430), True, 'import numpy as np\n'), ((14180, 14198), 'numpy.hstack', 'np.hstack', (['timevec'], {}), '(timevec)\n', (14189, 14198), True, 'import numpy as np\n'), ((14231, 14251), 'numpy.hstack', 'np.hstack', (['signalvec'], {}), '(signalvec)\n', (14240, 14251), True, 'import numpy as np\n'), ((14283, 14302), 'numpy.hstack', 'np.hstack', (['thetavec'], {}), '(thetavec)\n', (14292, 14302), True, 'import numpy as np\n'), ((14332, 14349), 'numpy.hstack', 'np.hstack', (['phivec'], {}), '(phivec)\n', (14341, 14349), True, 'import numpy as np\n'), ((14379, 14396), 'numpy.hstack', 'np.hstack', (['psivec'], {}), '(psivec)\n', (14388, 14396), True, 'import numpy as np\n'), ((14429, 14449), 'numpy.hstack', 'np.hstack', (['dthetavec'], {}), '(dthetavec)\n', (14438, 14449), True, 'import numpy as np\n'), ((14480, 14498), 'numpy.hstack', 'np.hstack', (['dphivec'], {}), '(dphivec)\n', (14489, 14498), True, 'import numpy as np\n'), ((14752, 14776), 'numpy.hstack', 'np.hstack', (['ringnumbervec'], {}), '(ringnumbervec)\n', (14761, 14776), True, 'import numpy as np\n'), ((14810, 14831), 'numpy.hstack', 'np.hstack', (['pntflagvec'], {}), '(pntflagvec)\n', (14819, 14831), True, 'import numpy as np\n'), ((14860, 14876), 'numpy.hstack', 'np.hstack', (['qwvec'], {}), '(qwvec)\n', (14869, 14876), True, 'import numpy as np\n'), ((14905, 14921), 'numpy.hstack', 'np.hstack', (['uwvec'], {}), '(uwvec)\n', (14914, 14921), True, 'import numpy as np\n'), ((14953, 14972), 'numpy.hstack', 'np.hstack', (['phasevec'], {}), '(phasevec)\n', (14962, 14972), True, 'import numpy as np\n'), ((5047, 5059), 'numpy.diff', 'np.diff', (['vec'], {}), '(vec)\n', (5054, 5059), True, 'import numpy as np\n'), ((10155, 10170), 'numpy.sum', 'np.sum', (['ind_fit'], {}), '(ind_fit)\n', (10161, 10170), True, 'import numpy as np\n')] |
import torch
import numpy
import environment as env
from environment import get_valid_directions, move, prettyprint
class Policy(nn.Module):
def __init__(self):
super(Policy, self).__init__()
self.input_dim = 4 # onehot of possible paths
self.output_dim = 4 # action probs
self.hidden_dim = 32
self.layers = 2
self.temperature = 1.2
self.gru = nn.GRU(self.input_dim, self.hidden_dim, self.layers, batch_first=True)
self.lin = nn.Linear(self.hidden_dim, self.output_dim)
self.relu = nn.ReLU()
self.softmax = nn.Softmax(dim=1)
def forward(self, x, h=None):
if h is not None:
out, h = self.gru(x, h)
else:
out, h = self.gru(x)
out = out[:, -1]
out = self.lin(out)
out = self.relu(out)
out = self.softmax(out / self.temperature)
return out, h
def save(self, file):
torch.save(self.state_dict(), file)
def load(self, file, device):
self.load_state_dict(torch.load(file, map_location=torch.device(device)))
def set_parameters_to(self, policy):
self.load_state_dict(policy.state_dict())
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
batch_size = 16
discount = 0.8
learnrate = 0.02
epochs = 100
simulations = 10
max_steps = 50
with_baseline = True
# An dieser Stelle nicht genannt und etwas mit dem man rumspielen kann ist ein temperature Parameter im GRU Netz. Der smoothed ein
# wenig die Policy und soll verhindern, dass die Funktion nicht auf ungewünschte Modalwerte kollabiert. Das kann gerade am Anfang schnell
# passieren.
# Außerdem wird im environment ein negativer Reward von 0.5 für das gegen die Wand laufen gegeben.
# Der Prozess hängt extrem stark vom Zufall ab! Es kann durchaus Runs geben, bei denen mit den gegebenen Epochen und Parametern kein
# nennenswerter Erfolg erzielt wird. Es sollte aber abgesehen von Ausreißern recht zuverlässig funktionieren. Man muss das zum Testen
# auch nicht komplett durchlaufen lassen.
direction_indices = {
'left': 0,
'right': 1,
'up': 2,
'down': 3
}
direction_strings = {
v: k for (k, v) in direction_indices.items()
}
def to_onehot(directions):
state = torch.zeros((4,), device=device)
for direction in directions:
state[direction_indices[direction]] = 1
return state
"""
Gibt eine Matrix der Größe (batchsize, 1, 4) zurück, wobei jedes Element in Dimension 0 ein one hot kodierter Zustand ist. Die 1 in
der Mitte kann man ignorieren. Das ist die Eingabe in das Netz.
"""
def to_batch(directions):
batch = torch.zeros((batch_size, 1, 4), device=device)
for i, dirs in enumerate(directions):
batch[i] = to_onehot(dirs)
return batch
cache = []
"""
Ein Policy-Gradient Update.
param probs: Ein Tensor [trajectory_length, batch_size] von Logprobabilities der ausgeführten Aktionen
param rewards: Ein Tensor [trajectory_length, batch_size] von Belohnungen, die für die jeweils ausgeführten Aktionen erhalten wurden.
"""
def policy_gradient(optimizer, probs, rewards):
total = torch.zeros((batch_size,), device=device)
optimizer.zero_grad()
if with_baseline:
baseline = 0
if len(cache) > 10:
history = torch.stack(cache, dim=0)
baseline = torch.mean(history)
#DEBUG
print('BASELINE ', baseline.item())
#/DEBUG
cache.append(torch.stack(rewards, dim=0))
if len(cache) > 20:
cache.pop(0)
for step, (prob, reward) in enumerate(zip(probs, rewards)): # Jeweils ein Schritt für alle Trajektorien im Batch
if with_baseline:
reward = reward - baseline
total = total + discount**step * reward * prob
total = torch.sum(total) / batch_size
loss = -total
loss.backward()
optimizer.step()
#DEBUG
print('LOSS ', loss.item())
#/DEBUG
"""
Gegeben ein Batch von Positionen und einer Policy werden Aktionen ausgewählt und im Environment ausgeführt.
param policy: Das Policy Netzwerk
param positions: Ein Batch (als Liste) von Feld-IDs. (Nummer des Feldes, wo sich der Agent befindet)
param hidden: Der hidden state des Policy-RNNs
"""
def step(policy, positions, hidden=None):
directions = [get_valid_directions(p) for p in positions]
batch = to_batch(directions)
if hidden is not None:
policies, hidden = policy(batch, hidden)
else:
policies, hidden = policy(batch)
# Sample Aktionen (Indizes) aus der aktuellen Policy
distributions = torch.distributions.Categorical(policies)
actions = distributions.sample()
probs = distributions.log_prob(actions)
# Transformation der Aktionen in Strings (left, up ...)
actions = [direction_strings[index.item()] for index in actions]
rewards = torch.zeros((batch_size,), device=device)
next_positions = []
# Ausführen der Aktionen und Feedback speichern
for i, (action, position) in enumerate(zip(actions, positions)):
next_position, reward = move(action, position)
rewards[i] = reward
next_positions.append(next_position)
return next_positions, probs, rewards, hidden
"""
Eine Monte-Carlo Simulation, die den Wert eines Zustandes approximieren soll.
param policy: Die Policy der während der Simulation gefolgt werden soll.
param hidden: Der hidden state des Policy Netzes.
param positions: Ein Batch von Positionen (Feld_IDs), für die wir den Wert approximieren wollen.
param simulations: Anzahl der Simulationen, die wir machen. Am Ende wird über alle Simulationen gemittelt.
param steps: Anzahl der Schritte, die wir pro Simulation machen
param current_reward: Die Belohnung für den Schritt der uns in die aktuelle Position gebracht hat.
"""
def montecarlo(policy, hidden, positions, simulations, steps, current_reward):
with torch.no_grad():
rewards = torch.zeros((simulations, batch_size), device=device)
for s in range(simulations):
simulated_rewards = torch.zeros((0, batch_size), device=device)
for i in range(steps): # steps
positions, _, reward, hidden = step(policy, positions, hidden)
simulated_rewards = torch.cat((simulated_rewards, reward[None, :]), dim=0)
rewards[s] = torch.sum(simulated_rewards, dim=0) + current_reward
rewards = torch.mean(rewards, dim=0)
return rewards
"""
Diese Methode geht nun einen Schritt weiter: Die Umgebung gibt uns nicht mehr nach jeder Aktion einen
Reward, wie in der basic.py:train_with_policy_gradient() Funktion. Stattdessen müssen wir diesen über
Simulationen ermitteln.
Außerdem weiß der Agent nun nicht mehr, auf welchem Feld er sich befindet. In den Methoden zuvor haben
wir für die Zustandskodierung einen Onehot Vektor der Länge 36 für 36 Felder verwendet. Nun geben wir
dem Netz nur noch einen Onehot Vektor der Länge 4, für den gilt, dass Index i = 1, gdw. i frei und
i = 0, wenn sich in Richtung i eine Mauer befindet.
Wir verwenden deshalb statt eines einfachen Feed-Forward Netzes ein rekurrentes Netz, mit der Idee,
dass die Policy gegeben einen Zustand von der bisherigen Trajektorie abhängt (sonst ließen sich zwei
Felder mit identischen "Ausgängen" ja auch nicht unterscheiden).
Die Funktionen unterscheiden sich im Wesentlichen nicht: Dazugekommen ist der Aufruf der montecarlo()
Funktion, statt ein Abruf des Feld-Values.
"""
def train():
policy = Policy().to(device)
rollout = Policy().to(device)
optimizer = torch.optim.Adam(policy.parameters(), lr=learnrate)
for epoch in range(epochs):
rollout.set_parameters_to(policy) # Kopie des Netzes für die MC-Simulation
policy.train()
rollout.eval() # Sehr wichtig in PyTorch, wenn ihr ein Netz nutzt, dass man nicht trainieren will!
position = [env.entry_id for _ in range(batch_size)] # Die Startpositionen für einen Batch
hidden = None
probs = []
rewards = []
#DEBUG
render_positions = {i: [] for i in range(36)}
#/DEBUG
for current_step in range(max_steps):
position, prob, reward, hidden = step(policy, position, hidden)
#missing_steps = max_steps - (current_step + 1)
simulation = montecarlo(rollout, hidden, position, simulations, 20, reward)
#DEBUG
for sample in range(batch_size):
pos = position[sample]
val = simulation[sample].item()
render_positions[pos].append(val)
#/DEBUG
rewards.append(simulation)
probs.append(prob)
policy_gradient(optimizer, probs, rewards)
#DEBUG
prettyprint([len(item) for item in render_positions.values()])
prettyprint([numpy.mean(item) for item in render_positions.values()])
print('SUCESS CASES ', position.count(env.exit_id), ' of ', batch_size)
print('=========== FINISHED RUN ', epoch, ' ===========\n')
#/DEBUG
if __name__ == '__main__':
train()
| [
"numpy.mean",
"torch.distributions.Categorical",
"environment.get_valid_directions",
"torch.mean",
"torch.stack",
"torch.cuda.is_available",
"torch.sum",
"environment.move",
"torch.no_grad",
"torch.zeros",
"torch.cat",
"torch.device"
] | [((1234, 1259), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1257, 1259), False, 'import torch\n'), ((1210, 1230), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (1222, 1230), False, 'import torch\n'), ((1265, 1284), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1277, 1284), False, 'import torch\n'), ((2294, 2326), 'torch.zeros', 'torch.zeros', (['(4,)'], {'device': 'device'}), '((4,), device=device)\n', (2305, 2326), False, 'import torch\n'), ((2671, 2717), 'torch.zeros', 'torch.zeros', (['(batch_size, 1, 4)'], {'device': 'device'}), '((batch_size, 1, 4), device=device)\n', (2682, 2717), False, 'import torch\n'), ((3162, 3203), 'torch.zeros', 'torch.zeros', (['(batch_size,)'], {'device': 'device'}), '((batch_size,), device=device)\n', (3173, 3203), False, 'import torch\n'), ((4631, 4672), 'torch.distributions.Categorical', 'torch.distributions.Categorical', (['policies'], {}), '(policies)\n', (4662, 4672), False, 'import torch\n'), ((4900, 4941), 'torch.zeros', 'torch.zeros', (['(batch_size,)'], {'device': 'device'}), '((batch_size,), device=device)\n', (4911, 4941), False, 'import torch\n'), ((3840, 3856), 'torch.sum', 'torch.sum', (['total'], {}), '(total)\n', (3849, 3856), False, 'import torch\n'), ((4346, 4369), 'environment.get_valid_directions', 'get_valid_directions', (['p'], {}), '(p)\n', (4366, 4369), False, 'from environment import get_valid_directions, move, prettyprint\n'), ((5121, 5143), 'environment.move', 'move', (['action', 'position'], {}), '(action, position)\n', (5125, 5143), False, 'from environment import get_valid_directions, move, prettyprint\n'), ((5935, 5950), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5948, 5950), False, 'import torch\n'), ((5970, 6023), 'torch.zeros', 'torch.zeros', (['(simulations, batch_size)'], {'device': 'device'}), '((simulations, batch_size), device=device)\n', (5981, 6023), False, 'import torch\n'), ((6451, 6477), 'torch.mean', 'torch.mean', (['rewards'], {'dim': '(0)'}), '(rewards, dim=0)\n', (6461, 6477), False, 'import torch\n'), ((3325, 3350), 'torch.stack', 'torch.stack', (['cache'], {'dim': '(0)'}), '(cache, dim=0)\n', (3336, 3350), False, 'import torch\n'), ((3374, 3393), 'torch.mean', 'torch.mean', (['history'], {}), '(history)\n', (3384, 3393), False, 'import torch\n'), ((3504, 3531), 'torch.stack', 'torch.stack', (['rewards'], {'dim': '(0)'}), '(rewards, dim=0)\n', (3515, 3531), False, 'import torch\n'), ((6094, 6137), 'torch.zeros', 'torch.zeros', (['(0, batch_size)'], {'device': 'device'}), '((0, batch_size), device=device)\n', (6105, 6137), False, 'import torch\n'), ((6298, 6352), 'torch.cat', 'torch.cat', (['(simulated_rewards, reward[None, :])'], {'dim': '(0)'}), '((simulated_rewards, reward[None, :]), dim=0)\n', (6307, 6352), False, 'import torch\n'), ((6379, 6414), 'torch.sum', 'torch.sum', (['simulated_rewards'], {'dim': '(0)'}), '(simulated_rewards, dim=0)\n', (6388, 6414), False, 'import torch\n'), ((8888, 8904), 'numpy.mean', 'numpy.mean', (['item'], {}), '(item)\n', (8898, 8904), False, 'import numpy\n'), ((1084, 1104), 'torch.device', 'torch.device', (['device'], {}), '(device)\n', (1096, 1104), False, 'import torch\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# LICENSE
#
# Copyright (C) 2010-2018 GEM Foundation, <NAME>, <NAME>,
# <NAME>.
#
# The Hazard Modeller's Toolkit is free software: you can redistribute
# it and/or modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either version
# 3 of the License, or (at your option) any later version.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>
#
# DISCLAIMER
#
# The software Hazard Modeller's Toolkit (openquake.hmtk) provided herein
# is released as a prototype implementation on behalf of
# scientists and engineers working within the GEM Foundation (Global
# Earthquake Model).
#
# It is distributed for the purpose of open collaboration and in the
# hope that it will be useful to the scientific, engineering, disaster
# risk and software design communities.
#
# The software is NOT distributed as part of GEM's OpenQuake suite
# (https://www.globalquakemodel.org/tools-products) and must be considered as a
# separate entity. The software provided herein is designed and implemented
# by scientific staff. It is not developed to the design standards, nor
# subject to same level of critical review by professional software
# developers, as GEM's OpenQuake software suite.
#
# Feedback and contribution to the software is welcome, and can be
# directed to the hazard scientific staff of the GEM Model Facility
# (<EMAIL>).
#
# The Hazard Modeller's Toolkit (openquake.hmtk) is therefore distributed WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# The GEM Foundation, and the authors of the software, assume no
# liability for use of the software.
# -*- coding: utf-8 -*-
'''
Tests the construction and methods within the :class:
openquake.hmtk.sources.point_source.mtkPointSource
'''
import unittest
import warnings
import numpy as np
from copy import deepcopy
from openquake.hazardlib.geo.point import Point
from openquake.hazardlib.source.point import PointSource
from openquake.hazardlib.tom import PoissonTOM
from openquake.hazardlib.pmf import PMF
from openquake.hazardlib.mfd.truncated_gr import TruncatedGRMFD
from openquake.hazardlib.scalerel.wc1994 import WC1994
from openquake.hmtk.sources.point_source import mtkPointSource
from openquake.hmtk.seismicity.catalogue import Catalogue
from openquake.hmtk.seismicity.selector import CatalogueSelector
TOM = PoissonTOM(50.0)
SOURCE_ATTRIBUTES = ['mfd', 'name', 'geometry', 'nodal_plane_dist', 'typology',
'upper_depth', 'catalogue', 'rupt_aspect_ratio',
'lower_depth', 'id', 'hypo_depth_dist', 'mag_scale_rel',
'trt']
class TestPointSource(unittest.TestCase):
'''
Tester class for openquake.hmtk.sources.point_source.mtkAreaSource
'''
def setUp(self):
warnings.simplefilter("ignore") # Suppress warnings during test
self.catalogue = Catalogue()
self.point_source = mtkPointSource('101', 'A Point Source')
def test_point_source_instantiation(self):
# Tests the core (minimal) instantiation of the class
# Check source has all required attributes
self.assertListEqual(sorted(self.point_source.__dict__),
sorted(SOURCE_ATTRIBUTES))
self.assertEqual(self.point_source.id, '101')
self.assertEqual(self.point_source.name, 'A Point Source')
self.assertEqual(self.point_source.typology, 'Point')
def test_depth_checker(self):
# Tests the checker to ensure correct depth values
# Bad Case - Negative upper depths
with self.assertRaises(ValueError) as ver:
self.point_source._check_seismogenic_depths(-1.0, 20.)
self.assertEqual(str(ver.exception),
'Upper seismogenic depth must be greater than or '
'equal to 0.0!')
# Bad Case - Lower depth smaller than upper depth
with self.assertRaises(ValueError) as ver:
self.point_source._check_seismogenic_depths(30., 20.)
self.assertEqual(str(ver.exception),
'Lower seismogenic depth must take a greater value '
'than upper seismogenic depth')
# Good Case
self.point_source._check_seismogenic_depths(0.0, 20.)
self.assertAlmostEqual(0.0, self.point_source.upper_depth)
self.assertAlmostEqual(20.0, self.point_source.lower_depth)
def test_geometry_inputs(self):
# Tests the geometry definitions
simple_point = Point(2.0, 3.0)
simple_point_array = np.array([2.0, 3.0])
# Using nhlib.geo.polygon.Polygon class as input
self.point_source.create_geometry(simple_point, 0.0, 30.0)
# Check that geometry is an instance of nhlib.geo.polygon.Polygon
self.assertTrue(isinstance(self.point_source.geometry, Point))
self.assertAlmostEqual(self.point_source.geometry.longitude, 2.0)
self.assertAlmostEqual(self.point_source.geometry.latitude, 3.0)
self.assertAlmostEqual(self.point_source.geometry.depth, 0.0)
self.assertAlmostEqual(0.0, self.point_source.upper_depth)
self.assertAlmostEqual(30.0, self.point_source.lower_depth)
self.point_source = mtkPointSource('101', 'A Point Source')
# Using numpy array as input
self.point_source.create_geometry(simple_point_array, 0.0, 30.0)
self.assertTrue(isinstance(self.point_source.geometry, Point))
self.assertAlmostEqual(self.point_source.geometry.longitude, 2.0)
self.assertAlmostEqual(self.point_source.geometry.latitude, 3.0)
self.assertAlmostEqual(self.point_source.geometry.depth, 0.0)
self.assertAlmostEqual(0.0, self.point_source.upper_depth)
self.assertAlmostEqual(30.0, self.point_source.lower_depth)
# For any other input type - check ValueError is raised
self.point_source = mtkPointSource('101', 'A Point Source')
with self.assertRaises(ValueError) as ver:
self.point_source.create_geometry('a bad input', 0.0, 30.0)
self.assertEqual(str(ver.exception),
'Unrecognised or unsupported geometry definition')
def test_select_events_in_circular_distance(self):
# Basic test of method to select events within a distance of the point
self.point_source = mtkPointSource('101', 'A Point Source')
simple_point = Point(4.5, 4.5)
self.catalogue.data['eventID'] = np.arange(0, 7, 1)
self.catalogue.data['longitude'] = np.arange(4.0, 7.5, 0.5)
self.catalogue.data['latitude'] = np.arange(4.0, 7.5, 0.5)
self.catalogue.data['depth'] = np.ones(7, dtype=float)
# Simple Case - 100 km epicentral distance
selector0 = CatalogueSelector(self.catalogue)
self.point_source.create_geometry(simple_point, 0., 30.)
self.point_source.select_catalogue_within_distance(selector0, 100.,
'epicentral')
np.testing.assert_array_almost_equal(
np.array([0, 1, 2]),
self.point_source.catalogue.data['eventID'])
np.testing.assert_array_almost_equal(
np.array([4., 4.5, 5.]),
self.point_source.catalogue.data['longitude'])
np.testing.assert_array_almost_equal(
np.array([4., 4.5, 5.]),
self.point_source.catalogue.data['latitude'])
np.testing.assert_array_almost_equal(
np.array([1., 1., 1.]),
self.point_source.catalogue.data['depth'])
# Simple case - 100 km hypocentral distance (hypocentre at 70 km)
self.point_source.select_catalogue_within_distance(
selector0, 100., 'hypocentral', 70.)
np.testing.assert_array_almost_equal(
np.array([1]),
self.point_source.catalogue.data['eventID'])
np.testing.assert_array_almost_equal(
np.array([4.5]),
self.point_source.catalogue.data['longitude'])
np.testing.assert_array_almost_equal(
np.array([4.5]),
self.point_source.catalogue.data['latitude'])
np.testing.assert_array_almost_equal(
np.array([1.]),
self.point_source.catalogue.data['depth'])
def test_select_events_within_cell(self):
# Tests the selection of events within a cell centred on the point
self.point_source = mtkPointSource('101', 'A Point Source')
simple_point = Point(4.5, 4.5)
self.point_source.create_geometry(simple_point, 0., 30.)
self.catalogue = Catalogue()
self.catalogue.data['eventID'] = np.arange(0, 7, 1)
self.catalogue.data['longitude'] = np.arange(4.0, 7.5, 0.5)
self.catalogue.data['latitude'] = np.arange(4.0, 7.5, 0.5)
self.catalogue.data['depth'] = np.ones(7, dtype=float)
selector0 = CatalogueSelector(self.catalogue)
# Simple case - 200 km by 200 km cell centred on point
self.point_source.select_catalogue_within_cell(selector0, 100.)
np.testing.assert_array_almost_equal(
np.array([4., 4.5, 5.]),
self.point_source.catalogue.data['longitude'])
np.testing.assert_array_almost_equal(
np.array([4., 4.5, 5.]),
self.point_source.catalogue.data['latitude'])
np.testing.assert_array_almost_equal(
np.array([1., 1., 1.]),
self.point_source.catalogue.data['depth'])
def test_select_catalogue(self):
# Tests the select_catalogue function - essentially a wrapper to the
# two selection functions
self.point_source = mtkPointSource('101', 'A Point Source')
simple_point = Point(4.5, 4.5)
self.point_source.create_geometry(simple_point, 0., 30.)
# Bad case - no events in catalogue
self.catalogue = Catalogue()
selector0 = CatalogueSelector(self.catalogue)
with self.assertRaises(ValueError) as ver:
self.point_source.select_catalogue(selector0, 100.)
self.assertEqual(str(ver.exception),
'No events found in catalogue!')
# Create a catalogue
self.catalogue = Catalogue()
self.catalogue.data['eventID'] = np.arange(0, 7, 1)
self.catalogue.data['longitude'] = np.arange(4.0, 7.5, 0.5)
self.catalogue.data['latitude'] = np.arange(4.0, 7.5, 0.5)
self.catalogue.data['depth'] = np.ones(7, dtype=float)
selector0 = CatalogueSelector(self.catalogue)
# To ensure that square function is called - compare against direct
# instance
# First implementation - compare select within distance
self.point_source.select_catalogue_within_distance(selector0,
100.,
'epicentral')
expected_catalogue = deepcopy(self.point_source.catalogue)
self.point_source.catalogue = None # Reset catalogue
self.point_source.select_catalogue(selector0, 100., 'circle')
np.testing.assert_array_equal(
self.point_source.catalogue.data['eventID'],
expected_catalogue.data['eventID'])
# Second implementation - compare select within cell
expected_catalogue = None
self.point_source.select_catalogue_within_cell(selector0, 150.)
expected_catalogue = deepcopy(self.point_source.catalogue)
self.point_source.catalogue = None # Reset catalogue
self.point_source.select_catalogue(selector0, 150., 'square')
np.testing.assert_array_equal(
self.point_source.catalogue.data['eventID'],
expected_catalogue.data['eventID'])
# Finally ensure error is raised when input is neither
# 'circle' nor 'square'
with self.assertRaises(ValueError) as ver:
self.point_source.select_catalogue(selector0, 100., 'bad input')
self.assertEqual(str(ver.exception),
'Unrecognised selection type for point source!')
def test_create_oq_hazardlib_point_source(self):
# Tests the function to create a point source model
mfd1 = TruncatedGRMFD(5.0, 8.0, 0.1, 3.0, 1.0)
self.point_source = mtkPointSource(
'001',
'A Point Source',
trt='Active Shallow Crust',
geometry=Point(10., 10.),
upper_depth=0.,
lower_depth=20.,
mag_scale_rel=None,
rupt_aspect_ratio=1.0,
mfd=mfd1,
nodal_plane_dist=None,
hypo_depth_dist=None)
test_source = self.point_source.create_oqhazardlib_source(
TOM, 2.0, True)
self.assertIsInstance(test_source, PointSource)
self.assertIsInstance(test_source.mfd, TruncatedGRMFD)
self.assertAlmostEqual(test_source.mfd.b_val, 1.0)
self.assertIsInstance(test_source.nodal_plane_distribution, PMF)
self.assertIsInstance(test_source.hypocenter_distribution, PMF)
self.assertIsInstance(test_source.magnitude_scaling_relationship,
WC1994)
def tearDown(self):
warnings.resetwarnings()
| [
"openquake.hazardlib.mfd.truncated_gr.TruncatedGRMFD",
"numpy.ones",
"openquake.hazardlib.tom.PoissonTOM",
"openquake.hmtk.seismicity.catalogue.Catalogue",
"numpy.testing.assert_array_equal",
"warnings.resetwarnings",
"openquake.hmtk.sources.point_source.mtkPointSource",
"openquake.hazardlib.geo.point... | [((2647, 2663), 'openquake.hazardlib.tom.PoissonTOM', 'PoissonTOM', (['(50.0)'], {}), '(50.0)\n', (2657, 2663), False, 'from openquake.hazardlib.tom import PoissonTOM\n'), ((3081, 3112), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (3102, 3112), False, 'import warnings\n'), ((3171, 3182), 'openquake.hmtk.seismicity.catalogue.Catalogue', 'Catalogue', ([], {}), '()\n', (3180, 3182), False, 'from openquake.hmtk.seismicity.catalogue import Catalogue\n'), ((3211, 3250), 'openquake.hmtk.sources.point_source.mtkPointSource', 'mtkPointSource', (['"""101"""', '"""A Point Source"""'], {}), "('101', 'A Point Source')\n", (3225, 3250), False, 'from openquake.hmtk.sources.point_source import mtkPointSource\n'), ((4809, 4824), 'openquake.hazardlib.geo.point.Point', 'Point', (['(2.0)', '(3.0)'], {}), '(2.0, 3.0)\n', (4814, 4824), False, 'from openquake.hazardlib.geo.point import Point\n'), ((4854, 4874), 'numpy.array', 'np.array', (['[2.0, 3.0]'], {}), '([2.0, 3.0])\n', (4862, 4874), True, 'import numpy as np\n'), ((5526, 5565), 'openquake.hmtk.sources.point_source.mtkPointSource', 'mtkPointSource', (['"""101"""', '"""A Point Source"""'], {}), "('101', 'A Point Source')\n", (5540, 5565), False, 'from openquake.hmtk.sources.point_source import mtkPointSource\n'), ((6192, 6231), 'openquake.hmtk.sources.point_source.mtkPointSource', 'mtkPointSource', (['"""101"""', '"""A Point Source"""'], {}), "('101', 'A Point Source')\n", (6206, 6231), False, 'from openquake.hmtk.sources.point_source import mtkPointSource\n'), ((6639, 6678), 'openquake.hmtk.sources.point_source.mtkPointSource', 'mtkPointSource', (['"""101"""', '"""A Point Source"""'], {}), "('101', 'A Point Source')\n", (6653, 6678), False, 'from openquake.hmtk.sources.point_source import mtkPointSource\n'), ((6702, 6717), 'openquake.hazardlib.geo.point.Point', 'Point', (['(4.5)', '(4.5)'], {}), '(4.5, 4.5)\n', (6707, 6717), False, 'from openquake.hazardlib.geo.point import Point\n'), ((6760, 6778), 'numpy.arange', 'np.arange', (['(0)', '(7)', '(1)'], {}), '(0, 7, 1)\n', (6769, 6778), True, 'import numpy as np\n'), ((6822, 6846), 'numpy.arange', 'np.arange', (['(4.0)', '(7.5)', '(0.5)'], {}), '(4.0, 7.5, 0.5)\n', (6831, 6846), True, 'import numpy as np\n'), ((6889, 6913), 'numpy.arange', 'np.arange', (['(4.0)', '(7.5)', '(0.5)'], {}), '(4.0, 7.5, 0.5)\n', (6898, 6913), True, 'import numpy as np\n'), ((6953, 6976), 'numpy.ones', 'np.ones', (['(7)'], {'dtype': 'float'}), '(7, dtype=float)\n', (6960, 6976), True, 'import numpy as np\n'), ((7048, 7081), 'openquake.hmtk.seismicity.selector.CatalogueSelector', 'CatalogueSelector', (['self.catalogue'], {}), '(self.catalogue)\n', (7065, 7081), False, 'from openquake.hmtk.seismicity.selector import CatalogueSelector\n'), ((8718, 8757), 'openquake.hmtk.sources.point_source.mtkPointSource', 'mtkPointSource', (['"""101"""', '"""A Point Source"""'], {}), "('101', 'A Point Source')\n", (8732, 8757), False, 'from openquake.hmtk.sources.point_source import mtkPointSource\n'), ((8781, 8796), 'openquake.hazardlib.geo.point.Point', 'Point', (['(4.5)', '(4.5)'], {}), '(4.5, 4.5)\n', (8786, 8796), False, 'from openquake.hazardlib.geo.point import Point\n'), ((8887, 8898), 'openquake.hmtk.seismicity.catalogue.Catalogue', 'Catalogue', ([], {}), '()\n', (8896, 8898), False, 'from openquake.hmtk.seismicity.catalogue import Catalogue\n'), ((8940, 8958), 'numpy.arange', 'np.arange', (['(0)', '(7)', '(1)'], {}), '(0, 7, 1)\n', (8949, 8958), True, 'import numpy as np\n'), ((9002, 9026), 'numpy.arange', 'np.arange', (['(4.0)', '(7.5)', '(0.5)'], {}), '(4.0, 7.5, 0.5)\n', (9011, 9026), True, 'import numpy as np\n'), ((9069, 9093), 'numpy.arange', 'np.arange', (['(4.0)', '(7.5)', '(0.5)'], {}), '(4.0, 7.5, 0.5)\n', (9078, 9093), True, 'import numpy as np\n'), ((9133, 9156), 'numpy.ones', 'np.ones', (['(7)'], {'dtype': 'float'}), '(7, dtype=float)\n', (9140, 9156), True, 'import numpy as np\n'), ((9177, 9210), 'openquake.hmtk.seismicity.selector.CatalogueSelector', 'CatalogueSelector', (['self.catalogue'], {}), '(self.catalogue)\n', (9194, 9210), False, 'from openquake.hmtk.seismicity.selector import CatalogueSelector\n'), ((9946, 9985), 'openquake.hmtk.sources.point_source.mtkPointSource', 'mtkPointSource', (['"""101"""', '"""A Point Source"""'], {}), "('101', 'A Point Source')\n", (9960, 9985), False, 'from openquake.hmtk.sources.point_source import mtkPointSource\n'), ((10009, 10024), 'openquake.hazardlib.geo.point.Point', 'Point', (['(4.5)', '(4.5)'], {}), '(4.5, 4.5)\n', (10014, 10024), False, 'from openquake.hazardlib.geo.point import Point\n'), ((10160, 10171), 'openquake.hmtk.seismicity.catalogue.Catalogue', 'Catalogue', ([], {}), '()\n', (10169, 10171), False, 'from openquake.hmtk.seismicity.catalogue import Catalogue\n'), ((10192, 10225), 'openquake.hmtk.seismicity.selector.CatalogueSelector', 'CatalogueSelector', (['self.catalogue'], {}), '(self.catalogue)\n', (10209, 10225), False, 'from openquake.hmtk.seismicity.selector import CatalogueSelector\n'), ((10508, 10519), 'openquake.hmtk.seismicity.catalogue.Catalogue', 'Catalogue', ([], {}), '()\n', (10517, 10519), False, 'from openquake.hmtk.seismicity.catalogue import Catalogue\n'), ((10561, 10579), 'numpy.arange', 'np.arange', (['(0)', '(7)', '(1)'], {}), '(0, 7, 1)\n', (10570, 10579), True, 'import numpy as np\n'), ((10623, 10647), 'numpy.arange', 'np.arange', (['(4.0)', '(7.5)', '(0.5)'], {}), '(4.0, 7.5, 0.5)\n', (10632, 10647), True, 'import numpy as np\n'), ((10690, 10714), 'numpy.arange', 'np.arange', (['(4.0)', '(7.5)', '(0.5)'], {}), '(4.0, 7.5, 0.5)\n', (10699, 10714), True, 'import numpy as np\n'), ((10754, 10777), 'numpy.ones', 'np.ones', (['(7)'], {'dtype': 'float'}), '(7, dtype=float)\n', (10761, 10777), True, 'import numpy as np\n'), ((10798, 10831), 'openquake.hmtk.seismicity.selector.CatalogueSelector', 'CatalogueSelector', (['self.catalogue'], {}), '(self.catalogue)\n', (10815, 10831), False, 'from openquake.hmtk.seismicity.selector import CatalogueSelector\n'), ((11229, 11266), 'copy.deepcopy', 'deepcopy', (['self.point_source.catalogue'], {}), '(self.point_source.catalogue)\n', (11237, 11266), False, 'from copy import deepcopy\n'), ((11407, 11521), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["self.point_source.catalogue.data['eventID']", "expected_catalogue.data['eventID']"], {}), "(self.point_source.catalogue.data['eventID'],\n expected_catalogue.data['eventID'])\n", (11436, 11521), True, 'import numpy as np\n'), ((11741, 11778), 'copy.deepcopy', 'deepcopy', (['self.point_source.catalogue'], {}), '(self.point_source.catalogue)\n', (11749, 11778), False, 'from copy import deepcopy\n'), ((11919, 12033), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (["self.point_source.catalogue.data['eventID']", "expected_catalogue.data['eventID']"], {}), "(self.point_source.catalogue.data['eventID'],\n expected_catalogue.data['eventID'])\n", (11948, 12033), True, 'import numpy as np\n'), ((12527, 12566), 'openquake.hazardlib.mfd.truncated_gr.TruncatedGRMFD', 'TruncatedGRMFD', (['(5.0)', '(8.0)', '(0.1)', '(3.0)', '(1.0)'], {}), '(5.0, 8.0, 0.1, 3.0, 1.0)\n', (12541, 12566), False, 'from openquake.hazardlib.mfd.truncated_gr import TruncatedGRMFD\n'), ((13516, 13540), 'warnings.resetwarnings', 'warnings.resetwarnings', ([], {}), '()\n', (13538, 13540), False, 'import warnings\n'), ((7354, 7373), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (7362, 7373), True, 'import numpy as np\n'), ((7490, 7515), 'numpy.array', 'np.array', (['[4.0, 4.5, 5.0]'], {}), '([4.0, 4.5, 5.0])\n', (7498, 7515), True, 'import numpy as np\n'), ((7633, 7658), 'numpy.array', 'np.array', (['[4.0, 4.5, 5.0]'], {}), '([4.0, 4.5, 5.0])\n', (7641, 7658), True, 'import numpy as np\n'), ((7775, 7800), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (7783, 7800), True, 'import numpy as np\n'), ((8097, 8110), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (8105, 8110), True, 'import numpy as np\n'), ((8228, 8243), 'numpy.array', 'np.array', (['[4.5]'], {}), '([4.5])\n', (8236, 8243), True, 'import numpy as np\n'), ((8363, 8378), 'numpy.array', 'np.array', (['[4.5]'], {}), '([4.5])\n', (8371, 8378), True, 'import numpy as np\n'), ((8497, 8512), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (8505, 8512), True, 'import numpy as np\n'), ((9405, 9430), 'numpy.array', 'np.array', (['[4.0, 4.5, 5.0]'], {}), '([4.0, 4.5, 5.0])\n', (9413, 9430), True, 'import numpy as np\n'), ((9548, 9573), 'numpy.array', 'np.array', (['[4.0, 4.5, 5.0]'], {}), '([4.0, 4.5, 5.0])\n', (9556, 9573), True, 'import numpy as np\n'), ((9690, 9715), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0])\n', (9698, 9715), True, 'import numpy as np\n'), ((12721, 12738), 'openquake.hazardlib.geo.point.Point', 'Point', (['(10.0)', '(10.0)'], {}), '(10.0, 10.0)\n', (12726, 12738), False, 'from openquake.hazardlib.geo.point import Point\n')] |
import unittest
import numpy as np
from pax import core, plugin
from pax.datastructure import Event, Peak
class TestPosRecMaxPMT(unittest.TestCase):
def setUp(self):
self.pax = core.Processor(config_names='XENON100', just_testing=True, config_dict={'pax': {
'plugin_group_names': ['test'],
'test': 'MaxPMT.PosRecMaxPMT'}})
self.plugin = self.pax.get_plugin_by_name('PosRecMaxPMT')
def tearDown(self):
delattr(self, 'pax')
delattr(self, 'plugin')
@staticmethod
def example_event(channels_with_something, area_per_channel=1):
bla = np.zeros(243)
bla[np.array(channels_with_something)] = area_per_channel
e = Event.empty_event()
e.peaks.append(Peak({'left': 5,
'right': 9,
'type': 'S2',
'detector': 'tpc',
'area_per_channel': bla}))
return e
def test_get_maxpmt_plugin(self):
self.assertIsInstance(self.plugin, plugin.TransformPlugin)
self.assertEqual(self.plugin.__class__.__name__, 'PosRecMaxPMT')
def test_posrec(self):
"""Test a hitpattern of all ones and one 2 (at PMT 42)"""
ch = 42 # Could test more locations, little point
hitp = np.ones(len(self.plugin.config['channels_top']))
hitp[ch] = 2
e = self.example_event(channels_with_something=self.plugin.config['channels_top'],
area_per_channel=hitp)
e = self.plugin.transform_event(e)
self.assertIsInstance(e, Event)
self.assertEqual(len(e.peaks), 1)
self.assertEqual(len(e.S2s()), 1)
self.assertEqual(len(e.peaks[0].reconstructed_positions), 1)
rp = e.peaks[0].reconstructed_positions[0]
self.assertEqual(rp.algorithm, self.plugin.name)
self.assertEqual(rp.x, self.plugin.config['pmts'][ch]['position']['x'])
self.assertEqual(rp.y, self.plugin.config['pmts'][ch]['position']['y'])
if __name__ == '__main__':
unittest.main()
| [
"pax.core.Processor",
"numpy.array",
"numpy.zeros",
"pax.datastructure.Event.empty_event",
"unittest.main",
"pax.datastructure.Peak"
] | [((2092, 2107), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2105, 2107), False, 'import unittest\n'), ((193, 342), 'pax.core.Processor', 'core.Processor', ([], {'config_names': '"""XENON100"""', 'just_testing': '(True)', 'config_dict': "{'pax': {'plugin_group_names': ['test'], 'test': 'MaxPMT.PosRecMaxPMT'}}"}), "(config_names='XENON100', just_testing=True, config_dict={\n 'pax': {'plugin_group_names': ['test'], 'test': 'MaxPMT.PosRecMaxPMT'}})\n", (207, 342), False, 'from pax import core, plugin\n'), ((630, 643), 'numpy.zeros', 'np.zeros', (['(243)'], {}), '(243)\n', (638, 643), True, 'import numpy as np\n'), ((722, 741), 'pax.datastructure.Event.empty_event', 'Event.empty_event', ([], {}), '()\n', (739, 741), False, 'from pax.datastructure import Event, Peak\n'), ((656, 689), 'numpy.array', 'np.array', (['channels_with_something'], {}), '(channels_with_something)\n', (664, 689), True, 'import numpy as np\n'), ((765, 856), 'pax.datastructure.Peak', 'Peak', (["{'left': 5, 'right': 9, 'type': 'S2', 'detector': 'tpc', 'area_per_channel':\n bla}"], {}), "({'left': 5, 'right': 9, 'type': 'S2', 'detector': 'tpc',\n 'area_per_channel': bla})\n", (769, 856), False, 'from pax.datastructure import Event, Peak\n')] |
"""
Provides infilling support for stitched IFCB rev 1 images
Based on pyifcb API
"""
import numpy as np
from scipy.interpolate import Rbf
from functools32 import lru_cache
from ifcb.data.utils import BaseDictlike
from ifcb.data.stitching import Stitcher
_LEGACY_EPS = 0.000001
def normz(a):
m = np.max(a) + _LEGACY_EPS
return a / m
def avg(l):
return sum(l) / len(l)
def mv(eh):
eps = _LEGACY_EPS # no dividing by zero
colors = np.arange(256)
n = np.sum(eh) + eps
s = np.sum(eh * colors)
mean = s / n
variance = np.sum((colors - mean) ** 2 * eh) / n
return (mean, variance)
# FIXME this is still too sensitive to lower modes
def bright_mv(image, mask=None):
b = np.arange(257)
if mask is not None:
eh, _ = np.histogram(image[mask].ravel(), bins=b)
else:
eh, _ = np.histogram(image.ravel(), bins=b)
return bright_mv_hist(eh)
_BMH_KERNEL = np.array([2, 2, 2, 2, 2, 4, 8, 2, 1, 1, 1, 1, 1])
def bright_mv_hist(histogram, exclude=[0, 255]):
histogram = np.array(histogram)
histogram[np.array(exclude)] = 0
# smooth the filter, preferring peaks with sharp declines on the higher luminance end
peak = np.convolve(histogram, _BMH_KERNEL, "same")
# now smooth that to eliminate noise
peak = np.convolve(peak, np.ones(9), "same")
# scale original signal to the normalized smoothed signal;
# that will tend to deattenuate secondary peaks, and reduce variance of bimodal distros
scaled = normz(peak) ** 20 * histogram
# now compute mean and variance
return mv(scaled)
def extract_background(image, estimated_background):
bg_mask = image - estimated_background
# now compute threshold from histogram
h, _ = np.histogram(bg_mask, bins=np.arange(257))
# reject dark part with threshold
total = np.sum(h)
running = np.cumsum(h)
threshold = np.argmax(running > total * 0.95)
table = np.zeros(256, dtype=np.uint8)
table[threshold:] = 255
bg_mask = np.take(table, bg_mask)
m = np.logical_not(bg_mask)
bg_mask[m] = image[m]
return bg_mask
def edges_mask(b, target_number, raw_stitch):
r1, r2 = target_number, target_number + 1
ns = b.schema
x = min(b[r1][ns.ROI_X], b[r2][ns.ROI_X])
y = min(b[r1][ns.ROI_Y], b[r2][ns.ROI_Y])
inset_factor = 25
insets = []
for r in [r1, r2]:
insets += [
b[r][ns.ROI_WIDTH] / inset_factor,
b[r][ns.ROI_HEIGHT] / inset_factor,
]
inset = np.sum(insets) / len(insets) # integer division
edges = raw_stitch.mask == False
for r in [r1, r2]:
rx = b[r][ns.ROI_X] - x
ry = b[r][ns.ROI_Y] - y
edges[
ry + inset : ry + b[r][ns.ROI_HEIGHT] - inset - 1,
rx + inset : rx + b[r][ns.ROI_WIDTH] - inset - 1,
] = False
return edges
def infill(b, target_number, raw_stitch):
# step 1: compute masks
s = raw_stitch.filled(fill_value=0)
rois_mask = raw_stitch.mask == False
gaps_mask = raw_stitch.mask # where the missing data is
edges = edges_mask(b, target_number, raw_stitch)
# step 2: estimate background from edges
# compute the mean and variance of the edges
mean, variance = bright_mv(s, edges)
# now use that as an estimated background
s[gaps_mask] = mean
# step 3: compute "probable background": low luminance delta from estimated bg
w, h = s.shape
flat_bg = np.full((w, h), mean, dtype=np.uint8)
bg = extract_background(s, flat_bg)
# also mask out the gaps, which are not "probable background"
bg[gaps_mask] = 255
# step 3a: improve mean/variance estimate
mean, variance = bright_mv(bg)
std_dev = np.sqrt(variance)
# step 4: sample probable background to compute RBF for illumination gradient
# grid
div = 6
means, nodes = [], []
rad = avg([h, w]) / div
rad_step = int(rad / 2) + 1
for x in range(0, h + rad, rad):
for y in range(0, w + rad, rad):
for r in range(rad, max(h, w), int(rad / 3) + 1):
x1, y1, x2, y2 = (
max(0, x - r),
max(0, y - r),
min(h - 1, x + r),
min(w - 1, y + r),
)
region = bg[y1:y2, x1:x2]
nabe, _ = np.histogram(region, bins=np.arange(257))
(m, v) = bright_mv_hist(nabe)
if m > 0 and m < 255: # reject outliers
nodes.append((x, y))
means.append(m)
break
# now construct radial basis functions for mean, based on the samples
mean_rbf = Rbf([x for x, y in nodes], [y for x, y in nodes], means, epsilon=rad)
# step 5: fill gaps with mean based on RBF and variance from bright_mv(edges)
np.random.seed(0)
noise = np.full((w, h), mean)
mx, my = np.where(gaps_mask)
noise[mx, my] = mean_rbf(my, mx)
std_dev *= 0.66 # err on the side of smoother rather than noisier
gaussian = np.random.normal(0, std_dev, size=mx.shape[0])
noise[mx, my] += gaussian
# step 6: final composite
s[gaps_mask] = noise[gaps_mask]
return s, rois_mask
class InfilledImages(BaseDictlike):
"""
Wraps ``Bin`` to perform infilling of stitched images.
Provides dict-like interface; keys are target numbers,
each value is a pair of the infilled image and a mask
indicating which pixels contain real (non-infill) data.
Images that do not need to be infilled are also returned,
but with None as the second (mask) pair of the tuple
"""
def __init__(self, the_bin):
"""
:param the_bin: the bin to delegate to
:type the_bin: Bin
"""
self.bin = the_bin
self.stitcher = Stitcher(the_bin)
@lru_cache()
def excluded_targets(self):
"""
Returns the target numbers of the targets that should
be ignored in the original raw bin, because those targets
are the second of a pair of stitched ROIs.
This is just each included key + 1.
"""
return map(lambda x: x + 1, self.stitcher.keys())
def iterkeys(self):
for k in self.bin:
if k not in self.excluded_targets():
yield k
def has_key(self, target_number):
in_bin = target_number in self.bin.images
excluded = target_number in self.excluded_targets()
return in_bin and not excluded
@lru_cache(maxsize=2)
def __getitem__(self, target_number):
if target_number in self.stitcher:
raw_stitch = self.stitcher[target_number]
im, mask = infill(self.bin, target_number, raw_stitch)
return (im, mask)
else:
im = self.bin.images[target_number]
return (im, None)
| [
"numpy.convolve",
"numpy.sqrt",
"numpy.logical_not",
"numpy.array",
"numpy.cumsum",
"numpy.arange",
"numpy.where",
"numpy.max",
"numpy.take",
"numpy.random.seed",
"numpy.random.normal",
"numpy.ones",
"numpy.argmax",
"ifcb.data.stitching.Stitcher",
"functools32.lru_cache",
"numpy.sum",
... | [((925, 974), 'numpy.array', 'np.array', (['[2, 2, 2, 2, 2, 4, 8, 2, 1, 1, 1, 1, 1]'], {}), '([2, 2, 2, 2, 2, 4, 8, 2, 1, 1, 1, 1, 1])\n', (933, 974), True, 'import numpy as np\n'), ((459, 473), 'numpy.arange', 'np.arange', (['(256)'], {}), '(256)\n', (468, 473), True, 'import numpy as np\n'), ((507, 526), 'numpy.sum', 'np.sum', (['(eh * colors)'], {}), '(eh * colors)\n', (513, 526), True, 'import numpy as np\n'), ((719, 733), 'numpy.arange', 'np.arange', (['(257)'], {}), '(257)\n', (728, 733), True, 'import numpy as np\n'), ((1042, 1061), 'numpy.array', 'np.array', (['histogram'], {}), '(histogram)\n', (1050, 1061), True, 'import numpy as np\n'), ((1200, 1243), 'numpy.convolve', 'np.convolve', (['histogram', '_BMH_KERNEL', '"""same"""'], {}), "(histogram, _BMH_KERNEL, 'same')\n", (1211, 1243), True, 'import numpy as np\n'), ((1835, 1844), 'numpy.sum', 'np.sum', (['h'], {}), '(h)\n', (1841, 1844), True, 'import numpy as np\n'), ((1859, 1871), 'numpy.cumsum', 'np.cumsum', (['h'], {}), '(h)\n', (1868, 1871), True, 'import numpy as np\n'), ((1888, 1921), 'numpy.argmax', 'np.argmax', (['(running > total * 0.95)'], {}), '(running > total * 0.95)\n', (1897, 1921), True, 'import numpy as np\n'), ((1934, 1963), 'numpy.zeros', 'np.zeros', (['(256)'], {'dtype': 'np.uint8'}), '(256, dtype=np.uint8)\n', (1942, 1963), True, 'import numpy as np\n'), ((2006, 2029), 'numpy.take', 'np.take', (['table', 'bg_mask'], {}), '(table, bg_mask)\n', (2013, 2029), True, 'import numpy as np\n'), ((2038, 2061), 'numpy.logical_not', 'np.logical_not', (['bg_mask'], {}), '(bg_mask)\n', (2052, 2061), True, 'import numpy as np\n'), ((3450, 3487), 'numpy.full', 'np.full', (['(w, h)', 'mean'], {'dtype': 'np.uint8'}), '((w, h), mean, dtype=np.uint8)\n', (3457, 3487), True, 'import numpy as np\n'), ((3713, 3730), 'numpy.sqrt', 'np.sqrt', (['variance'], {}), '(variance)\n', (3720, 3730), True, 'import numpy as np\n'), ((4668, 4737), 'scipy.interpolate.Rbf', 'Rbf', (['[x for x, y in nodes]', '[y for x, y in nodes]', 'means'], {'epsilon': 'rad'}), '([x for x, y in nodes], [y for x, y in nodes], means, epsilon=rad)\n', (4671, 4737), False, 'from scipy.interpolate import Rbf\n'), ((4824, 4841), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (4838, 4841), True, 'import numpy as np\n'), ((4854, 4875), 'numpy.full', 'np.full', (['(w, h)', 'mean'], {}), '((w, h), mean)\n', (4861, 4875), True, 'import numpy as np\n'), ((4889, 4908), 'numpy.where', 'np.where', (['gaps_mask'], {}), '(gaps_mask)\n', (4897, 4908), True, 'import numpy as np\n'), ((5032, 5078), 'numpy.random.normal', 'np.random.normal', (['(0)', 'std_dev'], {'size': 'mx.shape[0]'}), '(0, std_dev, size=mx.shape[0])\n', (5048, 5078), True, 'import numpy as np\n'), ((5817, 5828), 'functools32.lru_cache', 'lru_cache', ([], {}), '()\n', (5826, 5828), False, 'from functools32 import lru_cache\n'), ((6486, 6506), 'functools32.lru_cache', 'lru_cache', ([], {'maxsize': '(2)'}), '(maxsize=2)\n', (6495, 6506), False, 'from functools32 import lru_cache\n'), ((305, 314), 'numpy.max', 'np.max', (['a'], {}), '(a)\n', (311, 314), True, 'import numpy as np\n'), ((482, 492), 'numpy.sum', 'np.sum', (['eh'], {}), '(eh)\n', (488, 492), True, 'import numpy as np\n'), ((559, 592), 'numpy.sum', 'np.sum', (['((colors - mean) ** 2 * eh)'], {}), '((colors - mean) ** 2 * eh)\n', (565, 592), True, 'import numpy as np\n'), ((1076, 1093), 'numpy.array', 'np.array', (['exclude'], {}), '(exclude)\n', (1084, 1093), True, 'import numpy as np\n'), ((1314, 1324), 'numpy.ones', 'np.ones', (['(9)'], {}), '(9)\n', (1321, 1324), True, 'import numpy as np\n'), ((2511, 2525), 'numpy.sum', 'np.sum', (['insets'], {}), '(insets)\n', (2517, 2525), True, 'import numpy as np\n'), ((5793, 5810), 'ifcb.data.stitching.Stitcher', 'Stitcher', (['the_bin'], {}), '(the_bin)\n', (5801, 5810), False, 'from ifcb.data.stitching import Stitcher\n'), ((1769, 1783), 'numpy.arange', 'np.arange', (['(257)'], {}), '(257)\n', (1778, 1783), True, 'import numpy as np\n'), ((4357, 4371), 'numpy.arange', 'np.arange', (['(257)'], {}), '(257)\n', (4366, 4371), True, 'import numpy as np\n')] |
from math import sqrt
from baseClasses.Template import *
import os, numpy as np, random
from helper.functions import outputObj, loadOBJ, scaleValues
from PIL import Image as im
class EurecomTemplate(Template):
folderTemplate = None
faceMarks = []
layersChar = None
overFlow = None
underFlow = None
def loadImage(self):
if self.rawRepr[-3:] == 'bmp':
imageFace = im.open(self.rawRepr).convert('L')
self.layersChar = np.zeros((imageFace.size[0],imageFace.size[1],4))
else:
a, b, imageFace, y = loadOBJ(os.path.join('temporaryTemplate',str(self.itemClass) + '_' + self.folderTemplate + '_' + self.typeTemplate + '.obj'))
self.image = imageFace
def loadSymFilledImage(self):
if self.lazyLoading:
self.rawRepr = self.rawRepr[0:-4] + '_symmetricfilled.obj'
else:
a, b, imageFace, y = loadOBJ(self.rawRepr[0:-4] + '_symmetricfilled.obj')
self.image = imageFace
def save(self,saveOnPath=False):
if (not saveOnPath):
if (not os.path.exists('temporaryTemplate')):
os.makedirs('temporaryTemplate')
outputObj(self.image,os.path.join('temporaryTemplate',str(self.itemClass) + '_' + self.folderTemplate + '_' + self.typeTemplate + '.obj'))
self.outputMarks()
else:
pathCImg = self.rawRepr.split(os.path.sep)
if pathCImg.index('EURECOM_Kinect_Face_Dataset') >= 0:
fileName = pathCImg[-1]
pathCImg = os.path.sep.join(pathCImg[0:-2])
self.image.save(os.path.join(pathCImg, 'Depth', 'DepthBMP',
fileName[0:-4] + '_newdepth.bmp'))
else:
self.image.save(self.rawRepr[0:-4] + '_newdepth.bmp')
def existsPreProcessingFile(self):
fullImgPath = ''
pathCImg = self.rawRepr.split(os.path.sep)
if pathCImg.index('EURECOM_Kinect_Face_Dataset') >= 0:
fileName = pathCImg[-1]
pathCImg = os.path.sep.join(pathCImg[0:-2])
fullImgPath = os.path.join(pathCImg, 'Depth', 'DepthBMP',
fileName[0:-4] + '_newdepth.bmp')
else:
fullImgPath = self.rawRepr[0:-4] + '_newdepth.bmp'
return os.path.exists(fullImgPath)
def outputMarks(self,saveOnPath=False,typeTemplate='Depth'):
if (not saveOnPath):
if (not os.path.exists('temporaryTemplate')):
os.makedirs('temporaryTemplate')
f = open(os.path.join('temporaryTemplate',str(self.itemClass) + '_' + self.folderTemplate + '_' + self.typeTemplate + '.txt'),'w')
f.write('\n'.join([ '\t'.join(map(str,x)) for x in self.faceMarks]))
f.close()
else:
filesPath = self.rawRepr.split('/')
fileName = filesPath[-1].split('.')
fileName = fileName[0].split('_')
if typeTemplate == 'Depth':
filesPath = os.path.join('/'.join(filesPath[:-3]),'Mark','MarkRGB','rgb_'+fileName[1]+'_'+filesPath[:-3][len(filesPath) - 4]+'_'+fileName[3]+'_Points_newdepth.txt')
elif typeTemplate == '3DObj':
filesPath = os.path.join('/'.join(filesPath[:-3]),'Mark','Mark3DObj','depth_'+fileName[1]+'_'+filesPath[:-3][len(filesPath) - 4]+'_'+fileName[3]+'_Points_OBJ_newdepth.txt')
f = open(filesPath,'w')
f.write('\n'.join([ '\t'.join(map(str,x)) for x in self.faceMarks]))
f.close()
def loadMarks(self,typeTemplate='Depth'):
if os.path.sep in typeTemplate:
typeTemplate = typeTemplate.split(os.path.sep)
typeTemplate = typeTemplate[0]
filesPath = self.rawRepr.split(os.path.sep)
fileName = filesPath[-1].split('.')
fileName = fileName[0].split('_')
if typeTemplate.lower() == 'depth':
filesPath = os.path.join(os.path.sep.join(filesPath[:-3]),'Mark','MarkRGB','rgb_'+fileName[1]+'_'+filesPath[:-3][len(filesPath) - 4]+'_'+fileName[3]+'_Points.txt')
elif typeTemplate.lower() == '3dobj':
filesPath = os.path.join(os.path.sep.join(filesPath[:-3]),'Mark','Mark3DObj','depth_'+fileName[1]+'_'+filesPath[:-3][len(filesPath) - 4]+'_'+fileName[3]+'_Points_OBJ.txt')
elif typeTemplate.lower() == 'newdepth':
filesPath = os.path.join(os.path.sep.join(filesPath[:-3]),'Mark','MarkRGB','rgb_'+fileName[1]+'_'+filesPath[:-3][len(filesPath) - 4]+'_'+fileName[3]+'_Points_newdepth.txt')
self.faceMarks = []
if (os.path.exists(filesPath)):
fileMark = open(filesPath,'r')
for p in fileMark:
self.faceMarks.append(list(map(float,p.split('\t'))))
else:
filesPath = self.rawRepr.split(os.path.sep)
filesPath = os.path.join(os.path.sep.join(filesPath[:-2]),'Mark','Mark3DObj','depth_'+fileName[1]+'_'+self.folderTemplate+'_'+fileName[3]+'_Points_OBJ.txt')
fileMark = open(filesPath,'r')
for p in fileMark:
self.faceMarks.append(list(map(float,p.split('\t'))))
def saveTXTChars(self):
f = open('teste.txt','w')
f.write(' '.join(map(str,self.features)) + '\n')
f.close()
def loadNewDepthImage(self):
self.image = im.open(self.rawRepr[0:-4] + '_newdepth.bmp')
self.loadMarks('newdepth')
def saveImageTraining(self,avgImageSave=True,pathImage='generated_images_lbp'):
if (avgImageSave):
avImage = np.zeros((self.layersChar.shape[0],self.layersChar.shape[1]))
for i in range(self.layersChar.shape[0]):
for j in range(self.layersChar.shape[1]):
avImage[i,j] = self.layersChar[i,j,0] + self.layersChar[i,j,1] + self.layersChar[i,j,2] + self.layersChar[i,j,3]
avImage[i,j] = avImage[i,j] / 4
avImage = im.fromarray(np.uint8(avImage))
fullPath = self.rawRepr.split(os.path.sep)
fullPath = fullPath[-1].split('.')
fullPath = fullPath[0]
self.layersChar = scaleValues(0,255,self.layersChar)
imageSaveDLP = im.fromarray(np.uint8(self.layersChar))
pathNImage = pathImage+'/'+str(self.itemClass) + '_' + self.folderTemplate + '_' + fullPath +'.png'
imageSaveDLP.save(pathNImage)
def saveHistogramImage(self,imageSave=None,folder='generated_images_wld'):
if (imageSave is None):
imageSave = self.features
fullPath = self.rawRepr.split(os.path.sep)
fullPath = fullPath[-1].split('.')
fullPath = fullPath[0]
imageSaveDLP = im.fromarray(imageSave)
pathNImage = folder + '/'+str(self.itemClass) + '_' + self.folderTemplate + '_' + fullPath +'.jpg'
while (os.path.exists(pathNImage)):
idxRandomIm = random.randint(1,255)
pathNImage = folder+'/'+str(self.itemClass) + '_' + self.folderTemplate + '_' + fullPath +'_'+str(idxRandomIm)+'.png'
imageSaveDLP.convert('RGB').save(pathNImage)
def saveMasks(self,folder,filetype):
if self.overFlow is None or self.underFlow is None:
return None
fullPath = self.rawRepr.split(os.path.sep)
fullPath = fullPath[-1].split('.')
fullPath = fullPath[0]
imageSaveDLP = None
if not os.path.exists(folder):
os.makedirs(folder)
pathNImage = folder+'/'+str(self.itemClass) + '_' + self.folderTemplate + '_' + fullPath + '_' + filetype + '.bmp'
if filetype == 'overflow':
self.overFlow = scaleValues(0,255,self.overFlow)
imageSaveDLP = im.fromarray(self.overFlow)
else:
self.underFlow = scaleValues(0, 255, self.underFlow)
imageSaveDLP = im.fromarray(self.underFlow)
imageSaveDLP.convert('RGB').save(pathNImage)
def isFileExists(self,pathImage,filetype='png'):
fullPath = self.rawRepr.split(os.path.sep)
fullPath = fullPath[-1].split('.')
fullPath = fullPath[0]
pathNImage = pathImage + '/' + str(self.itemClass) + '_' + self.folderTemplate + '_' + fullPath + '.' +filetype
return os.path.exists(pathNImage) | [
"helper.functions.scaleValues",
"os.path.exists",
"PIL.Image.fromarray",
"PIL.Image.open",
"numpy.uint8",
"os.makedirs",
"os.path.join",
"numpy.zeros",
"os.path.sep.join",
"random.randint",
"helper.functions.loadOBJ"
] | [((2344, 2371), 'os.path.exists', 'os.path.exists', (['fullImgPath'], {}), '(fullImgPath)\n', (2358, 2371), False, 'import os, numpy as np, random\n'), ((4618, 4643), 'os.path.exists', 'os.path.exists', (['filesPath'], {}), '(filesPath)\n', (4632, 4643), False, 'import os, numpy as np, random\n'), ((5367, 5412), 'PIL.Image.open', 'im.open', (["(self.rawRepr[0:-4] + '_newdepth.bmp')"], {}), "(self.rawRepr[0:-4] + '_newdepth.bmp')\n", (5374, 5412), True, 'from PIL import Image as im\n'), ((6163, 6199), 'helper.functions.scaleValues', 'scaleValues', (['(0)', '(255)', 'self.layersChar'], {}), '(0, 255, self.layersChar)\n', (6174, 6199), False, 'from helper.functions import outputObj, loadOBJ, scaleValues\n'), ((6706, 6729), 'PIL.Image.fromarray', 'im.fromarray', (['imageSave'], {}), '(imageSave)\n', (6718, 6729), True, 'from PIL import Image as im\n'), ((6852, 6878), 'os.path.exists', 'os.path.exists', (['pathNImage'], {}), '(pathNImage)\n', (6866, 6878), False, 'import os, numpy as np, random\n'), ((8240, 8266), 'os.path.exists', 'os.path.exists', (['pathNImage'], {}), '(pathNImage)\n', (8254, 8266), False, 'import os, numpy as np, random\n'), ((474, 525), 'numpy.zeros', 'np.zeros', (['(imageFace.size[0], imageFace.size[1], 4)'], {}), '((imageFace.size[0], imageFace.size[1], 4))\n', (482, 525), True, 'import os, numpy as np, random\n'), ((910, 962), 'helper.functions.loadOBJ', 'loadOBJ', (["(self.rawRepr[0:-4] + '_symmetricfilled.obj')"], {}), "(self.rawRepr[0:-4] + '_symmetricfilled.obj')\n", (917, 962), False, 'from helper.functions import outputObj, loadOBJ, scaleValues\n'), ((2073, 2105), 'os.path.sep.join', 'os.path.sep.join', (['pathCImg[0:-2]'], {}), '(pathCImg[0:-2])\n', (2089, 2105), False, 'import os, numpy as np, random\n'), ((2132, 2209), 'os.path.join', 'os.path.join', (['pathCImg', '"""Depth"""', '"""DepthBMP"""', "(fileName[0:-4] + '_newdepth.bmp')"], {}), "(pathCImg, 'Depth', 'DepthBMP', fileName[0:-4] + '_newdepth.bmp')\n", (2144, 2209), False, 'import os, numpy as np, random\n'), ((5582, 5644), 'numpy.zeros', 'np.zeros', (['(self.layersChar.shape[0], self.layersChar.shape[1])'], {}), '((self.layersChar.shape[0], self.layersChar.shape[1]))\n', (5590, 5644), True, 'import os, numpy as np, random\n'), ((6234, 6259), 'numpy.uint8', 'np.uint8', (['self.layersChar'], {}), '(self.layersChar)\n', (6242, 6259), True, 'import os, numpy as np, random\n'), ((6907, 6929), 'random.randint', 'random.randint', (['(1)', '(255)'], {}), '(1, 255)\n', (6921, 6929), False, 'import os, numpy as np, random\n'), ((7407, 7429), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (7421, 7429), False, 'import os, numpy as np, random\n'), ((7443, 7462), 'os.makedirs', 'os.makedirs', (['folder'], {}), '(folder)\n', (7454, 7462), False, 'import os, numpy as np, random\n'), ((7649, 7683), 'helper.functions.scaleValues', 'scaleValues', (['(0)', '(255)', 'self.overFlow'], {}), '(0, 255, self.overFlow)\n', (7660, 7683), False, 'from helper.functions import outputObj, loadOBJ, scaleValues\n'), ((7709, 7736), 'PIL.Image.fromarray', 'im.fromarray', (['self.overFlow'], {}), '(self.overFlow)\n', (7721, 7736), True, 'from PIL import Image as im\n'), ((7780, 7815), 'helper.functions.scaleValues', 'scaleValues', (['(0)', '(255)', 'self.underFlow'], {}), '(0, 255, self.underFlow)\n', (7791, 7815), False, 'from helper.functions import outputObj, loadOBJ, scaleValues\n'), ((7843, 7871), 'PIL.Image.fromarray', 'im.fromarray', (['self.underFlow'], {}), '(self.underFlow)\n', (7855, 7871), True, 'from PIL import Image as im\n'), ((1085, 1120), 'os.path.exists', 'os.path.exists', (['"""temporaryTemplate"""'], {}), "('temporaryTemplate')\n", (1099, 1120), False, 'import os, numpy as np, random\n'), ((1139, 1171), 'os.makedirs', 'os.makedirs', (['"""temporaryTemplate"""'], {}), "('temporaryTemplate')\n", (1150, 1171), False, 'import os, numpy as np, random\n'), ((1558, 1590), 'os.path.sep.join', 'os.path.sep.join', (['pathCImg[0:-2]'], {}), '(pathCImg[0:-2])\n', (1574, 1590), False, 'import os, numpy as np, random\n'), ((2487, 2522), 'os.path.exists', 'os.path.exists', (['"""temporaryTemplate"""'], {}), "('temporaryTemplate')\n", (2501, 2522), False, 'import os, numpy as np, random\n'), ((2541, 2573), 'os.makedirs', 'os.makedirs', (['"""temporaryTemplate"""'], {}), "('temporaryTemplate')\n", (2552, 2573), False, 'import os, numpy as np, random\n'), ((3975, 4007), 'os.path.sep.join', 'os.path.sep.join', (['filesPath[:-3]'], {}), '(filesPath[:-3])\n', (3991, 4007), False, 'import os, numpy as np, random\n'), ((4897, 4929), 'os.path.sep.join', 'os.path.sep.join', (['filesPath[:-2]'], {}), '(filesPath[:-2])\n', (4913, 4929), False, 'import os, numpy as np, random\n'), ((5984, 6001), 'numpy.uint8', 'np.uint8', (['avImage'], {}), '(avImage)\n', (5992, 6001), True, 'import os, numpy as np, random\n'), ((409, 430), 'PIL.Image.open', 'im.open', (['self.rawRepr'], {}), '(self.rawRepr)\n', (416, 430), True, 'from PIL import Image as im\n'), ((1623, 1700), 'os.path.join', 'os.path.join', (['pathCImg', '"""Depth"""', '"""DepthBMP"""', "(fileName[0:-4] + '_newdepth.bmp')"], {}), "(pathCImg, 'Depth', 'DepthBMP', fileName[0:-4] + '_newdepth.bmp')\n", (1635, 1700), False, 'import os, numpy as np, random\n'), ((4197, 4229), 'os.path.sep.join', 'os.path.sep.join', (['filesPath[:-3]'], {}), '(filesPath[:-3])\n', (4213, 4229), False, 'import os, numpy as np, random\n'), ((4430, 4462), 'os.path.sep.join', 'os.path.sep.join', (['filesPath[:-3]'], {}), '(filesPath[:-3])\n', (4446, 4462), False, 'import os, numpy as np, random\n')] |
import numpy as np
import cv2
#def maxpoolGlobal(res):
def MaxPoolingDos(Img):
fr=len(Img)//2
cr=len(Img[0])//2
Resultado=np.zeros((fr,cr),np.uint8)
#Proceso del maxPooling
a=0
for i in range(0,len(Img),2):
b=0
for j in range(0,len(Img),2):
Resultado[a][b]=np.amax(Img[i:i+2,j:j+2])
b+=1
a+=1
return Resultado
| [
"numpy.zeros",
"numpy.amax"
] | [((135, 163), 'numpy.zeros', 'np.zeros', (['(fr, cr)', 'np.uint8'], {}), '((fr, cr), np.uint8)\n', (143, 163), True, 'import numpy as np\n'), ((316, 346), 'numpy.amax', 'np.amax', (['Img[i:i + 2, j:j + 2]'], {}), '(Img[i:i + 2, j:j + 2])\n', (323, 346), True, 'import numpy as np\n')] |
import numpy as np
from data import io
'''
flatten(imageset)
- converts an image to a column vector
'''
def flatten(imageset):
flat = imageset.reshape(imageset.shape[ 0 ], -1).T
return flat
'''
weight(U, dataset, shi)
- calculate weight of each train sample
- using first k pca
'''
def weight(U, dataset, shi):
return np.dot(U.T, dataset - shi)
'''
pca(k, trainset):
- returns the first k pca
'''
def pca(k, trainset):
shi = np.asmatrix(trainset.mean(axis = 1)).T
phi = trainset - shi
covariance = np.dot(phi, phi.T)
eig_vals, eig_vecs = np.linalg.eig(covariance)
i = eig_vals.argsort()[-k:][::-1]
eig_vals = eig_vals[i]
component = eig_vecs[:, i]
return component, shi
'''
train_pca(n, k, image_size, directory)
- calculate pca, weight
'''
def train_pca(n, k, image_size, directory):
trainset = io.load(n, image_size, directory)
print("trainset shape:", trainset.shape)
trainset = flatten(trainset)
print("trainset shape after flattening:", trainset.shape)
U, shi = pca(k, trainset)
print("Principle components:", U.shape)
print("Average Column Vector:", shi.shape)
train_weight = weight(U, trainset, shi)
print("train weight data shape:", train_weight.shape)
return U, shi, train_weight
'''
test_pca(n, image_size, directory, U, shi, train_weight)
- maps the testset with pca
- calculates test weights
'''
def test_pca(n, image_size, directory, U, shi, train_weight):
testset = io.load(n, image_size, directory)
print("testset shape:", testset.shape)
testset = flatten(testset)
print("testset shape after flattening::", testset.shape)
test_weight = weight(U, testset, shi)
return test_weight
| [
"numpy.dot",
"data.io.load",
"numpy.linalg.eig"
] | [((345, 371), 'numpy.dot', 'np.dot', (['U.T', '(dataset - shi)'], {}), '(U.T, dataset - shi)\n', (351, 371), True, 'import numpy as np\n'), ((542, 560), 'numpy.dot', 'np.dot', (['phi', 'phi.T'], {}), '(phi, phi.T)\n', (548, 560), True, 'import numpy as np\n'), ((586, 611), 'numpy.linalg.eig', 'np.linalg.eig', (['covariance'], {}), '(covariance)\n', (599, 611), True, 'import numpy as np\n'), ((879, 912), 'data.io.load', 'io.load', (['n', 'image_size', 'directory'], {}), '(n, image_size, directory)\n', (886, 912), False, 'from data import io\n'), ((1533, 1566), 'data.io.load', 'io.load', (['n', 'image_size', 'directory'], {}), '(n, image_size, directory)\n', (1540, 1566), False, 'from data import io\n')] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 12 14:16:29 2018
Script gathering functions related to the psd and fft calculations.
@author: misiak
"""
import numpy as np
def psd(fft, fs, weight=None):
"""
Computes the Power Spectral Density (PSD) from the Fast Fourier Transform
(FFT given by numpy.fft.fft).
Parameters
==========
fft : nd.array
FFT array whose frequency are ordered as the numpy.fft.fft function
result (i.e. [0, positive, negative]).
fs : float
Sampling frequency.
weight : None or array_like
Weights of the frequencies in the psd calculation. If None, the
weight are all 1 which correponds to the boxcar window.
Returns
=======
freq : nd.array
Frequency array containing only the positive frequencies (and the 0th).
psd : nd.array
PSD array.
"""
nfft = fft.shape[0]
if weight == None:
s1 = nfft
s2 = nfft
else :
s1 = np.sum(weight)
s2 = np.sum(np.array(weight)**2)
# Nyquist frequency
#fny = float(fs) / 2
# Frequency resolution
#fres = float(fs) / nfft
# Equivalent Noise BandWidth
enbw = float(fs) * s2 / s1**2
freq = np.fft.fftfreq(nfft, fs**-1)
if nfft % 2:
num_freqs = (nfft + 1)//2
else:
num_freqs = nfft//2 + 1
# Correcting the sign of the last point
freq[num_freqs-1] *= -1
freq = freq[1:num_freqs]
fft = fft[..., 1:num_freqs]
psd_array = np.abs(fft)**2 / (enbw * s1**2)
if nfft % 2:
psd_array[..., :] *= 2
else:
# Last point is unpaired Nyquist freq point, don't double
psd_array[..., :-1] *= 2
return freq, psd_array
def psd_freq(time_array):
fs = (time_array[1] - time_array[0])**-1
nfft = time_array.shape[0]
freq = np.fft.fftfreq(nfft, fs**-1)
if nfft % 2:
num_freqs = (nfft + 1)//2
else:
num_freqs = nfft//2 + 1
# Correcting the sign of the last point
freq[num_freqs-1] *= -1
freq = freq[1:num_freqs]
return freq
def angle_psd(fft):
"""
Complex phase of the fft term, on the positive frequencies only.
See also: inv_psd
"""
nfft = fft.shape[0]
if nfft % 2:
num_freqs = (nfft + 1)//2
else:
num_freqs = nfft//2 + 1
return np.angle(fft[..., 1:num_freqs])
def inv_psd(psd, fs, angle=None, mean=0):
"""
Create a temporal array from the psd and the complex phase of the fft.
If angle is set to None (by default), the phase are randomized, which
gives a noise array.
"""
psd_array = np.array(psd)
nfft = len(psd_array)*2
#if weight == None:
s1 = nfft
s2 = nfft
# Equivalent Noise BandWidth
enbw = float(fs) * s2 / s1**2
# normalization
psd_array[:-1] /= 2
fft_all_freq = np.sqrt( enbw * s1**2 * psd_array)
# phases
phi_array = np.random.uniform(0, 2*np.pi, nfft//2-1)
if angle is None:
phi_array = np.random.uniform(0, 2*np.pi, nfft//2-1)
else:
assert len(psd) == len(angle)
phi_array = angle[:-1]
# mean at zero
fft_0 = np.array([mean*nfft,])
# negative frequency with last frequency
fft_neg = np.array(fft_all_freq, dtype='complex')
fft_neg[:-1] *= np.exp(-1j*phi_array)
# positive frequency without last frequency
fft_pos = np.conjugate(fft_neg)[:-1]
# concatenating into fft_array
fft_neg = fft_neg[::-1]
fft_array = np.concatenate((fft_0, fft_pos, fft_neg))
return np.fft.ifft(fft_array).real
def psd_from_fft2(fft2, fs, weight=None):
""" Same as psd, except with fft**2.
Return freq_array and psd_array.
"""
nfft = fft2.shape[0]
if weight == None:
s1 = nfft
s2 = nfft
else :
s1 = np.sum(weight)
s2 = np.sum(np.array(weight)**2)
# Nyquist frequency
#fny = float(fs) / 2
# Frequency resolution
#fres = float(fs) / nfft
# Equivalent Noise BandWidth
enbw = float(fs) * s2 / s1**2
freq = np.fft.fftfreq(nfft, fs**-1)
if nfft % 2:
num_freqs = (nfft + 1)//2
else:
num_freqs = nfft//2 + 1
# Correcting the sign of the last point
freq[num_freqs-1] *= -1
freq = freq[1:num_freqs]
fft2 = fft2[..., 1:num_freqs]
psd_array = fft2 / (enbw * s1**2)
if nfft % 2:
psd_array[..., :] *= 2
else:
# Last point is unpaired Nyquist freq point, don't double
psd_array[..., :-1] *= 2
return freq, psd_array
| [
"numpy.abs",
"numpy.sqrt",
"numpy.fft.fftfreq",
"numpy.conjugate",
"numpy.angle",
"numpy.exp",
"numpy.array",
"numpy.sum",
"numpy.concatenate",
"numpy.random.uniform",
"numpy.fft.ifft"
] | [((1254, 1284), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['nfft', '(fs ** -1)'], {}), '(nfft, fs ** -1)\n', (1268, 1284), True, 'import numpy as np\n'), ((1879, 1909), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['nfft', '(fs ** -1)'], {}), '(nfft, fs ** -1)\n', (1893, 1909), True, 'import numpy as np\n'), ((2402, 2433), 'numpy.angle', 'np.angle', (['fft[..., 1:num_freqs]'], {}), '(fft[..., 1:num_freqs])\n', (2410, 2433), True, 'import numpy as np\n'), ((2689, 2702), 'numpy.array', 'np.array', (['psd'], {}), '(psd)\n', (2697, 2702), True, 'import numpy as np\n'), ((2919, 2954), 'numpy.sqrt', 'np.sqrt', (['(enbw * s1 ** 2 * psd_array)'], {}), '(enbw * s1 ** 2 * psd_array)\n', (2926, 2954), True, 'import numpy as np\n'), ((2989, 3035), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(2 * np.pi)', '(nfft // 2 - 1)'], {}), '(0, 2 * np.pi, nfft // 2 - 1)\n', (3006, 3035), True, 'import numpy as np\n'), ((3229, 3252), 'numpy.array', 'np.array', (['[mean * nfft]'], {}), '([mean * nfft])\n', (3237, 3252), True, 'import numpy as np\n'), ((3316, 3355), 'numpy.array', 'np.array', (['fft_all_freq'], {'dtype': '"""complex"""'}), "(fft_all_freq, dtype='complex')\n", (3324, 3355), True, 'import numpy as np\n'), ((3381, 3406), 'numpy.exp', 'np.exp', (['(-1.0j * phi_array)'], {}), '(-1.0j * phi_array)\n', (3387, 3406), True, 'import numpy as np\n'), ((3576, 3617), 'numpy.concatenate', 'np.concatenate', (['(fft_0, fft_pos, fft_neg)'], {}), '((fft_0, fft_pos, fft_neg))\n', (3590, 3617), True, 'import numpy as np\n'), ((4152, 4182), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['nfft', '(fs ** -1)'], {}), '(nfft, fs ** -1)\n', (4166, 4182), True, 'import numpy as np\n'), ((1011, 1025), 'numpy.sum', 'np.sum', (['weight'], {}), '(weight)\n', (1017, 1025), True, 'import numpy as np\n'), ((3073, 3119), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(2 * np.pi)', '(nfft // 2 - 1)'], {}), '(0, 2 * np.pi, nfft // 2 - 1)\n', (3090, 3119), True, 'import numpy as np\n'), ((3465, 3486), 'numpy.conjugate', 'np.conjugate', (['fft_neg'], {}), '(fft_neg)\n', (3477, 3486), True, 'import numpy as np\n'), ((3634, 3656), 'numpy.fft.ifft', 'np.fft.ifft', (['fft_array'], {}), '(fft_array)\n', (3645, 3656), True, 'import numpy as np\n'), ((3909, 3923), 'numpy.sum', 'np.sum', (['weight'], {}), '(weight)\n', (3915, 3923), True, 'import numpy as np\n'), ((1536, 1547), 'numpy.abs', 'np.abs', (['fft'], {}), '(fft)\n', (1542, 1547), True, 'import numpy as np\n'), ((1046, 1062), 'numpy.array', 'np.array', (['weight'], {}), '(weight)\n', (1054, 1062), True, 'import numpy as np\n'), ((3944, 3960), 'numpy.array', 'np.array', (['weight'], {}), '(weight)\n', (3952, 3960), True, 'import numpy as np\n')] |
################################################################################
# Copyright (c) 2019. ContinualAI. All rights reserved. #
# Copyrights licensed under the MIT License. #
# See the accompanying LICENSE file for terms. #
# #
# Date: 15-07-2019 #
# Author: ContinualAI #
# E-mail: <EMAIL> #
# Website: continualai.org #
################################################################################
""" Tensorboard test """
# Python 2-3 compatible
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from torch.utils.tensorboard import SummaryWriter
import numpy as np
import json
import keyword
import torch
import tensorflow as tf
import tensorboard as tb
tf.io.gfile = tb.compat.tensorflow_stub.io.gfile
# Tensorboard setup
exp_name = "test"
log_dir = '/home/vincenzo/avalanche-dev/logs/' + exp_name
writer = SummaryWriter(log_dir)
writer.add_hparams(
{'lr': 0.1, 'bsize': 1},
{'hparam/accuracy': 10, 'hparam/loss': 10}
)
hyper = json.dumps({
"mb_size": 12, "inc_train_ep": 10})
for c in ["{", "}", '"']:
hyper = hyper.replace(c, "")
hyper = hyper.replace(",","<br>")
writer.add_text('hyper', hyper, 0)
# We only need to specify the layout once (instead of per experience).
for n_iter in range(100):
writer.add_scalar('Loss/train', np.random.random(), n_iter)
writer.add_scalar('Loss/test', np.random.random(), n_iter)
writer.add_scalar('Accuracy/train', np.random.random(), n_iter)
writer.add_scalar('Accuracy/test', np.random.random(), n_iter)
writer.add_scalar('Efficiency/ram', np.random.random(), n_iter)
writer.add_scalar('Efficiency/disk', np.random.random(), n_iter)
img_batch = np.zeros((16, 3, 100, 100))
for i in range(16):
img_batch[i, 0] = np.arange(0, 10000).reshape(100, 100) / 10000 / 16 * i
img_batch[i, 1] = (1 - np.arange(0, 10000).reshape(100, 100) / 10000) / 16 * i
writer.add_images('confusion matrices', img_batch, 0)
for i in range(10):
img = np.zeros((3, 100, 100))
img[0] = np.arange(0, 10000).reshape(100, 100) / 10000 / 16 * i
img[1] = 1 - np.arange(0, 10000).reshape(100, 100) / 10000 / 16 * i
writer.add_image('evolving cm', img, i)
for i in range(10):
x = np.random.random(1000)
writer.add_histogram('distribution centers', x + i, i)
meta = []
while len(meta)<100:
meta = meta+keyword.kwlist # get some strings
meta = meta[:100]
for i, v in enumerate(meta):
meta[i] = v+str(i)
label_img = torch.rand(100, 3, 10, 32)
for i in range(100):
label_img[i]*=i/100.0
writer.add_embedding(torch.randn(100, 5), metadata=meta, label_img=label_img)
writer.add_embedding(torch.randn(100, 5), label_img=label_img)
writer.add_embedding(torch.randn(100, 5), metadata=meta)
layout = {'Taiwan':{'twse':['Multiline',['twse/0050', 'twse/2330']]},
'USA':{ 'dow':['Margin', ['dow/aaa', 'dow/bbb', 'dow/ccc']],
'nasdaq':['Margin', ['nasdaq/aaa', 'nasdaq/bbb', 'nasdaq/ccc']]}}
writer.add_custom_scalars(layout)
for n_iter in range(1000):
writer.add_scalar('nasdaq/aaa', np.random.random(), n_iter)
writer.add_scalar('nasdaq/bbb', np.random.random(), n_iter)
writer.add_scalar('nasdaq/ccc', np.random.random(), n_iter)
writer.add_scalar('dow/aaa', np.random.random(), n_iter)
writer.add_scalar('dow/bbb', np.random.random(), n_iter)
writer.add_scalar('dow/ccc', np.random.random(), n_iter)
writer.add_scalar('twse/0050', np.random.random(), n_iter)
writer.add_scalar('twse/2330', np.random.random(), n_iter)
vertices_tensor = torch.as_tensor([
[1, 1, 1],
[-1, -1, 1],
[1, -1, -1],
[-1, 1, -1],
], dtype=torch.float).unsqueeze(0)
colors_tensor = torch.as_tensor([
[255, 0, 0],
[0, 255, 0],
[0, 0, 255],
[255, 0, 255],
], dtype=torch.int).unsqueeze(0)
faces_tensor = torch.as_tensor([
[0, 2, 3],
[0, 3, 1],
[0, 1, 2],
[1, 3, 2],
], dtype=torch.int).unsqueeze(0)
writer.add_mesh('my_mesh', vertices=vertices_tensor, colors=colors_tensor, faces=faces_tensor)
r = 5
for i in range(100):
writer.add_scalars('run_14h', {'xsinx':i*np.sin(i/r),
'xcosx':i*np.cos(i/r),
'tanx': np.tan(i/r)}, i)
writer.close()
writer.flush()
writer.close() | [
"torch.utils.tensorboard.SummaryWriter",
"torch.as_tensor",
"numpy.tan",
"numpy.arange",
"numpy.random.random",
"json.dumps",
"numpy.zeros",
"numpy.cos",
"numpy.sin",
"torch.randn",
"torch.rand"
] | [((1300, 1322), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['log_dir'], {}), '(log_dir)\n', (1313, 1322), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((1436, 1483), 'json.dumps', 'json.dumps', (["{'mb_size': 12, 'inc_train_ep': 10}"], {}), "({'mb_size': 12, 'inc_train_ep': 10})\n", (1446, 1483), False, 'import json\n'), ((2155, 2182), 'numpy.zeros', 'np.zeros', (['(16, 3, 100, 100)'], {}), '((16, 3, 100, 100))\n', (2163, 2182), True, 'import numpy as np\n'), ((2961, 2987), 'torch.rand', 'torch.rand', (['(100)', '(3)', '(10)', '(32)'], {}), '(100, 3, 10, 32)\n', (2971, 2987), False, 'import torch\n'), ((2457, 2480), 'numpy.zeros', 'np.zeros', (['(3, 100, 100)'], {}), '((3, 100, 100))\n', (2465, 2480), True, 'import numpy as np\n'), ((2702, 2724), 'numpy.random.random', 'np.random.random', (['(1000)'], {}), '(1000)\n', (2718, 2724), True, 'import numpy as np\n'), ((3061, 3080), 'torch.randn', 'torch.randn', (['(100)', '(5)'], {}), '(100, 5)\n', (3072, 3080), False, 'import torch\n'), ((3140, 3159), 'torch.randn', 'torch.randn', (['(100)', '(5)'], {}), '(100, 5)\n', (3151, 3159), False, 'import torch\n'), ((3204, 3223), 'torch.randn', 'torch.randn', (['(100)', '(5)'], {}), '(100, 5)\n', (3215, 3223), False, 'import torch\n'), ((1766, 1784), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1782, 1784), True, 'import numpy as np\n'), ((1830, 1848), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1846, 1848), True, 'import numpy as np\n'), ((1901, 1919), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1917, 1919), True, 'import numpy as np\n'), ((1969, 1987), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1985, 1987), True, 'import numpy as np\n'), ((2040, 2058), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2056, 2058), True, 'import numpy as np\n'), ((2110, 2128), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2126, 2128), True, 'import numpy as np\n'), ((3583, 3601), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3599, 3601), True, 'import numpy as np\n'), ((3648, 3666), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3664, 3666), True, 'import numpy as np\n'), ((3713, 3731), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3729, 3731), True, 'import numpy as np\n'), ((3775, 3793), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3791, 3793), True, 'import numpy as np\n'), ((3837, 3855), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3853, 3855), True, 'import numpy as np\n'), ((3899, 3917), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3915, 3917), True, 'import numpy as np\n'), ((3963, 3981), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3979, 3981), True, 'import numpy as np\n'), ((4027, 4045), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (4043, 4045), True, 'import numpy as np\n'), ((4076, 4167), 'torch.as_tensor', 'torch.as_tensor', (['[[1, 1, 1], [-1, -1, 1], [1, -1, -1], [-1, 1, -1]]'], {'dtype': 'torch.float'}), '([[1, 1, 1], [-1, -1, 1], [1, -1, -1], [-1, 1, -1]], dtype=\n torch.float)\n', (4091, 4167), False, 'import torch\n'), ((4217, 4309), 'torch.as_tensor', 'torch.as_tensor', (['[[255, 0, 0], [0, 255, 0], [0, 0, 255], [255, 0, 255]]'], {'dtype': 'torch.int'}), '([[255, 0, 0], [0, 255, 0], [0, 0, 255], [255, 0, 255]],\n dtype=torch.int)\n', (4232, 4309), False, 'import torch\n'), ((4359, 4437), 'torch.as_tensor', 'torch.as_tensor', (['[[0, 2, 3], [0, 3, 1], [0, 1, 2], [1, 3, 2]]'], {'dtype': 'torch.int'}), '([[0, 2, 3], [0, 3, 1], [0, 1, 2], [1, 3, 2]], dtype=torch.int)\n', (4374, 4437), False, 'import torch\n'), ((4768, 4781), 'numpy.tan', 'np.tan', (['(i / r)'], {}), '(i / r)\n', (4774, 4781), True, 'import numpy as np\n'), ((4650, 4663), 'numpy.sin', 'np.sin', (['(i / r)'], {}), '(i / r)\n', (4656, 4663), True, 'import numpy as np\n'), ((4710, 4723), 'numpy.cos', 'np.cos', (['(i / r)'], {}), '(i / r)\n', (4716, 4723), True, 'import numpy as np\n'), ((2227, 2246), 'numpy.arange', 'np.arange', (['(0)', '(10000)'], {}), '(0, 10000)\n', (2236, 2246), True, 'import numpy as np\n'), ((2495, 2514), 'numpy.arange', 'np.arange', (['(0)', '(10000)'], {}), '(0, 10000)\n', (2504, 2514), True, 'import numpy as np\n'), ((2310, 2329), 'numpy.arange', 'np.arange', (['(0)', '(10000)'], {}), '(0, 10000)\n', (2319, 2329), True, 'import numpy as np\n'), ((2568, 2587), 'numpy.arange', 'np.arange', (['(0)', '(10000)'], {}), '(0, 10000)\n', (2577, 2587), True, 'import numpy as np\n')] |
#! /usr/bin/env python3
"""
Copyright 2021 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#
# Classify applications into 104 classes given their raw code.
#
# The representation (graph) is created from IR.
#
import os
import sys
import numpy as np
from absl import app, flags, logging
from yacos.info.ncc import Inst2Vec
def execute(argv):
"""Extract a graph representation."""
del argv
FLAGS = flags.FLAGS
# Verify datset directory.
if not os.path.isdir(FLAGS.dataset_directory):
logging.error('Dataset directory {} does not exist.'.format(
FLAGS.dataset_directory)
)
sys.exit(1)
folders = [
os.path.join(FLAGS.dataset_directory, subdir)
for subdir in os.listdir(FLAGS.dataset_directory)
if os.path.isdir(os.path.join(FLAGS.dataset_directory, subdir))
]
idx = FLAGS.dataset_directory.rfind('/')
last_folder = FLAGS.dataset_directory[idx+1:]
# Extract int2vec
inst2vec = {}
max_length = []
for folder in folders:
inst2vec[folder] = {}
# Extract "inst2vec" from the file
Inst2Vec.prepare_benchmark(folder)
rep = Inst2Vec.extract(data_type="index")
for bench, indexes in rep.items():
inst2vec[folder][bench] = indexes
max_length.append(len(indexes))
Inst2Vec.remove_data_directory()
# Padding
max_length = max(max_length)
unk_idx, _ = Inst2Vec.unknown
embeddings = Inst2Vec.embeddings
for folder, data in inst2vec.items():
# Create the output directory.
outdir = os.path.join(folder.replace(last_folder,
'{}_inst2vec'.format(last_folder)))
os.makedirs(outdir, exist_ok=True)
for bench, indexes in data.items():
if FLAGS.index:
padding = [idx for idx in indexes]
if FLAGS.padding:
for i in range(len(indexes), max_length):
padding.append(unk_idx)
else:
padding = [list(embeddings[idx]) for idx in indexes]
if FLAGS.padding:
for i in range(len(indexes), max_length):
padding += list(embeddings[unk_idx])
filename = os.path.join(outdir, bench)
np.savez_compressed(filename, values=padding)
del embeddings
# Execute
if __name__ == '__main__':
# app
flags.DEFINE_string('dataset_directory',
None,
'Dataset directory')
flags.DEFINE_boolean('index',
False,
'Extract only the indexes.')
flags.DEFINE_boolean('padding',
False,
'Padding the representation.')
flags.mark_flag_as_required('dataset_directory')
app.run(execute)
| [
"os.listdir",
"numpy.savez_compressed",
"os.makedirs",
"os.path.join",
"absl.app.run",
"absl.flags.DEFINE_boolean",
"absl.flags.mark_flag_as_required",
"os.path.isdir",
"sys.exit",
"yacos.info.ncc.Inst2Vec.remove_data_directory",
"yacos.info.ncc.Inst2Vec.extract",
"yacos.info.ncc.Inst2Vec.prep... | [((2962, 3029), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""dataset_directory"""', 'None', '"""Dataset directory"""'], {}), "('dataset_directory', None, 'Dataset directory')\n", (2981, 3029), False, 'from absl import app, flags, logging\n'), ((3082, 3147), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""index"""', '(False)', '"""Extract only the indexes."""'], {}), "('index', False, 'Extract only the indexes.')\n", (3102, 3147), False, 'from absl import app, flags, logging\n'), ((3202, 3271), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""padding"""', '(False)', '"""Padding the representation."""'], {}), "('padding', False, 'Padding the representation.')\n", (3222, 3271), False, 'from absl import app, flags, logging\n'), ((3326, 3374), 'absl.flags.mark_flag_as_required', 'flags.mark_flag_as_required', (['"""dataset_directory"""'], {}), "('dataset_directory')\n", (3353, 3374), False, 'from absl import app, flags, logging\n'), ((3380, 3396), 'absl.app.run', 'app.run', (['execute'], {}), '(execute)\n', (3387, 3396), False, 'from absl import app, flags, logging\n'), ((956, 994), 'os.path.isdir', 'os.path.isdir', (['FLAGS.dataset_directory'], {}), '(FLAGS.dataset_directory)\n', (969, 994), False, 'import os\n'), ((1120, 1131), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1128, 1131), False, 'import sys\n'), ((1165, 1210), 'os.path.join', 'os.path.join', (['FLAGS.dataset_directory', 'subdir'], {}), '(FLAGS.dataset_directory, subdir)\n', (1177, 1210), False, 'import os\n'), ((1639, 1673), 'yacos.info.ncc.Inst2Vec.prepare_benchmark', 'Inst2Vec.prepare_benchmark', (['folder'], {}), '(folder)\n', (1665, 1673), False, 'from yacos.info.ncc import Inst2Vec\n'), ((1688, 1723), 'yacos.info.ncc.Inst2Vec.extract', 'Inst2Vec.extract', ([], {'data_type': '"""index"""'}), "(data_type='index')\n", (1704, 1723), False, 'from yacos.info.ncc import Inst2Vec\n'), ((1865, 1897), 'yacos.info.ncc.Inst2Vec.remove_data_directory', 'Inst2Vec.remove_data_directory', ([], {}), '()\n', (1895, 1897), False, 'from yacos.info.ncc import Inst2Vec\n'), ((2232, 2266), 'os.makedirs', 'os.makedirs', (['outdir'], {'exist_ok': '(True)'}), '(outdir, exist_ok=True)\n', (2243, 2266), False, 'import os\n'), ((1241, 1276), 'os.listdir', 'os.listdir', (['FLAGS.dataset_directory'], {}), '(FLAGS.dataset_directory)\n', (1251, 1276), False, 'import os\n'), ((2803, 2830), 'os.path.join', 'os.path.join', (['outdir', 'bench'], {}), '(outdir, bench)\n', (2815, 2830), False, 'import os\n'), ((2843, 2888), 'numpy.savez_compressed', 'np.savez_compressed', (['filename'], {'values': 'padding'}), '(filename, values=padding)\n', (2862, 2888), True, 'import numpy as np\n'), ((1310, 1355), 'os.path.join', 'os.path.join', (['FLAGS.dataset_directory', 'subdir'], {}), '(FLAGS.dataset_directory, subdir)\n', (1322, 1355), False, 'import os\n')] |
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.figure import Figure
from numpy.random.mtrand import RandomState
from torch.utils.data import Dataset
def draw_samples(dataset: Dataset,
cols: int, rows: int,
width: float = 3, height: float = 3, fontsize: int = 8,
random_state: RandomState = None) -> Figure:
random_state = random_state or np.random.RandomState()
count = cols * rows
indices = random_state.choice(np.arange(len(dataset)), count, replace=False)
fig = plt.figure(figsize=(cols * width, rows * height))
for i, index in enumerate(indices):
image, cls, name = dataset[index]
plt.subplot(rows, cols, i + 1)
plt.title(f'#{index} - [{cls}] {name}', fontsize=fontsize)
plt.imshow(image)
plt.grid(False)
plt.axis('off')
return fig
| [
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"numpy.random.RandomState"
] | [((556, 605), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(cols * width, rows * height)'}), '(figsize=(cols * width, rows * height))\n', (566, 605), True, 'import matplotlib.pyplot as plt\n'), ((415, 438), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (436, 438), True, 'import numpy as np\n'), ((698, 728), 'matplotlib.pyplot.subplot', 'plt.subplot', (['rows', 'cols', '(i + 1)'], {}), '(rows, cols, i + 1)\n', (709, 728), True, 'import matplotlib.pyplot as plt\n'), ((737, 795), 'matplotlib.pyplot.title', 'plt.title', (['f"""#{index} - [{cls}] {name}"""'], {'fontsize': 'fontsize'}), "(f'#{index} - [{cls}] {name}', fontsize=fontsize)\n", (746, 795), True, 'import matplotlib.pyplot as plt\n'), ((804, 821), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (814, 821), True, 'import matplotlib.pyplot as plt\n'), ((830, 845), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (838, 845), True, 'import matplotlib.pyplot as plt\n'), ((854, 869), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (862, 869), True, 'import matplotlib.pyplot as plt\n')] |
from sklearn.svm import SVC
from imblearn.over_sampling import SMOTE
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
import time
import argparse
import util
import numpy as np
import pandas as pd
def main(args):
X = pd.read_csv(args.data)
y = pd.read_csv(args.labels)
X, y = SMOTE().fit_resample(X, y)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20)
svclassifier = SVC(kernel='linear')
svclassifier.fit(X_train, y_train)
y_pred = svclassifier.predict(X_test)
print(confusion_matrix(y_test, y_pred))
print(classification_report(y_test, y_pred))
X_test = pd.read_csv(args.test_set)
y_pred = svclassifier.predict(X_test)
util.write_csv('predicted_svm.csv', np.transpose(
np.array(y_pred, ndmin=2)))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Script for running ANN')
parser.add_argument(
'--data',
dest='data',
metavar='<path-to-data>',
required=True,
help='Path to the data file'
)
parser.add_argument(
'--labels',
dest='labels',
metavar='<path-to-labels>',
required=True,
help='Path to the labels file'
)
parser.add_argument(
'--test-set',
dest='test_set',
metavar='<path-to-test-set>',
required=True,
help='Path to the test_set file'
)
args = parser.parse_args()
start_time = time.time()
main(args)
print("--- %s seconds ---" % (time.time() - start_time))
| [
"sklearn.metrics.confusion_matrix",
"argparse.ArgumentParser",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.classification_report",
"imblearn.over_sampling.SMOTE",
"numpy.array",
"time.time",
"sklearn.svm.SVC"
] | [((296, 318), 'pandas.read_csv', 'pd.read_csv', (['args.data'], {}), '(args.data)\n', (307, 318), True, 'import pandas as pd\n'), ((327, 351), 'pandas.read_csv', 'pd.read_csv', (['args.labels'], {}), '(args.labels)\n', (338, 351), True, 'import pandas as pd\n'), ((431, 468), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)'}), '(X, y, test_size=0.2)\n', (447, 468), False, 'from sklearn.model_selection import train_test_split\n'), ((489, 509), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""linear"""'}), "(kernel='linear')\n", (492, 509), False, 'from sklearn.svm import SVC\n'), ((700, 726), 'pandas.read_csv', 'pd.read_csv', (['args.test_set'], {}), '(args.test_set)\n', (711, 726), True, 'import pandas as pd\n'), ((900, 961), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Script for running ANN"""'}), "(description='Script for running ANN')\n", (923, 961), False, 'import argparse\n'), ((1531, 1542), 'time.time', 'time.time', ([], {}), '()\n', (1540, 1542), False, 'import time\n'), ((603, 635), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (619, 635), False, 'from sklearn.metrics import classification_report, confusion_matrix\n'), ((647, 684), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (668, 684), False, 'from sklearn.metrics import classification_report, confusion_matrix\n'), ((364, 371), 'imblearn.over_sampling.SMOTE', 'SMOTE', ([], {}), '()\n', (369, 371), False, 'from imblearn.over_sampling import SMOTE\n'), ((831, 856), 'numpy.array', 'np.array', (['y_pred'], {'ndmin': '(2)'}), '(y_pred, ndmin=2)\n', (839, 856), True, 'import numpy as np\n'), ((1592, 1603), 'time.time', 'time.time', ([], {}), '()\n', (1601, 1603), False, 'import time\n')] |
#!/usr/bin/env python
#vim:fileencoding=UTF-8
import sys
import math
from numpy import histogram
if len(sys.argv) != 8:
print('Usage: SCRIPT [angle data] [polar data] [theta from] [theta to] [phi from] [phi to] [output prefix]')
print(' (theta and phi is in the unit of degree)')
sys.exit(2)
file_in = open(sys.argv[1],'r')
file_pfx = sys.argv[-1]
file_pol = open(sys.argv[2])
theta_from = float(sys.argv[3])
theta_to = float(sys.argv[4])
phi_from = float(sys.argv[5])
phi_to = float(sys.argv[6])
file_out = open(file_pfx+"_hist.out",'w')
file_out.write('#PROGRAM: mapk_angle_hist_by_position.py\n')
file_out.write('#angle data: %s\n' % (sys.argv[1]))
file_out.write('#polar data: %s\n' % (sys.argv[2]))
file_out.write('#theta from %f to %f\n' % (theta_from,theta_to))
file_out.write('#phi from %f to %f\n' % (phi_from,phi_to))
COL_THETA = 2 - 1
COL_POL_THETA = 4 - 1
COL_POL_PHI = 5 - 1
#theta_bins = [x*5.0 for x in xrange(0,37)] # 5度
theta_bins = [x*10.0 for x in range(0,19)] # 10度
#theta_bins = [x*15.0 for x in xrange(0,13)] # 15度
theta = []
weight = []
num_ang = 0 # for check number of lines are consistent
num_pol = 0
''' loop for file reading'''
for l in file_in:
if l.find('#') != -1:
continue
num_ang += 1
'''read next polar position'''
l_pol = file_pol.readline()
while l_pol.find('#') != -1:
l_pol = file_pol.readline()
num_pol += 1
l_pol_sp = l_pol.split()
pol_t = float(l_pol_sp[COL_POL_THETA])
pol_p = float(l_pol_sp[COL_POL_PHI])
''' judge'''
if pol_t < theta_from or theta_to < pol_t:
continue
if pol_p < phi_from or phi_to < pol_p:
continue
'''(accepted) => add angle data '''
lsp = l.split()
t = float(lsp[COL_THETA])
theta.append(t)
weight.append(1.0/math.sin(math.radians(t)))
if num_ang != num_pol:
print('Error: angle data and polar coordinate data are inconsistent!')
print('ABORT')
sys.exit(2)
h, theta_edge = histogram(theta,bins=theta_bins)
hw, theta_edge = histogram(theta,weights=weight, bins=theta_bins)
#hd, theta_edge = histogram(theta,bins=theta_bins,density=True)
hsum = float(sum(h))
hwsum = sum(hw)
for i,x in enumerate(h):
ang = (theta_edge[i]+theta_edge[i+1])*0.5
jcb = math.sin(math.radians(ang))
file_out.write('%8.3f %10.6f %10.6f %10i\n'
% (ang, x/hsum/jcb, hw[i]/hwsum, x))
file_out.close() | [
"numpy.histogram",
"math.radians",
"sys.exit"
] | [((2015, 2048), 'numpy.histogram', 'histogram', (['theta'], {'bins': 'theta_bins'}), '(theta, bins=theta_bins)\n', (2024, 2048), False, 'from numpy import histogram\n'), ((2065, 2114), 'numpy.histogram', 'histogram', (['theta'], {'weights': 'weight', 'bins': 'theta_bins'}), '(theta, weights=weight, bins=theta_bins)\n', (2074, 2114), False, 'from numpy import histogram\n'), ((296, 307), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (304, 307), False, 'import sys\n'), ((1969, 1980), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (1977, 1980), False, 'import sys\n'), ((2312, 2329), 'math.radians', 'math.radians', (['ang'], {}), '(ang)\n', (2324, 2329), False, 'import math\n'), ((1829, 1844), 'math.radians', 'math.radians', (['t'], {}), '(t)\n', (1841, 1844), False, 'import math\n')] |
import numpy as np
def skew(x: np.ndarray) -> np.ndarray:
"""
Args:
x: An array of shape (3,).
Returns:
The skew symmetric array of shape (3, 3).
"""
return np.array([[0, -x[2], x[1]], [x[2], 0, -x[0]], [-x[1], x[0], 0]])
| [
"numpy.array"
] | [((196, 260), 'numpy.array', 'np.array', (['[[0, -x[2], x[1]], [x[2], 0, -x[0]], [-x[1], x[0], 0]]'], {}), '([[0, -x[2], x[1]], [x[2], 0, -x[0]], [-x[1], x[0], 0]])\n', (204, 260), True, 'import numpy as np\n')] |
import sys
import cv2
import numpy as np
import imutils
from imutils import paths
import argparse
class Orthomosaic:
def __init__(self, debug):
cv2.namedWindow("output", cv2.WINDOW_NORMAL)
self.no_raw_images = []
self.temp_image = []
self.final_image = []
self.debug = debug
pass
def load_dataset(self):
self.ap = argparse.ArgumentParser()
self.ap.add_argument("-i", "--images", type=str, required=True,
help="path to input directory of images to stitch")
self.ap.add_argument("-o", "--output", type=str, required=True,
help="path to the output image")
self.args = vars(self.ap.parse_args())
# grab the paths to the input images and initialize our images list
if self.debug:
print("[INFO] Importing Images...")
self.imagePaths = sorted(list(paths.list_images(self.args["images"])))
self.images = []
for imagePath in self.imagePaths:
self.image_temp = cv2.imread(imagePath)
scale_percent = 100 # percent of original size
width = int(self.image_temp.shape[1] * scale_percent / 100)
height = int(self.image_temp.shape[0] * scale_percent / 100)
dim = (width, height)
# resize image
self.image = cv2.resize(self.image_temp, dim)
# self.image = imutils.resize(self.image_temp, width=500)
self.images.append(self.image)
if self.debug:
print("[INFO] Importing Complete")
# cv2.imshow("output",self.images[1])
# cv2.waitKey(0)
# cv2.destroyAllWindows()
def mixer(self):
self.no_raw_images = len(self.images)
if self.debug:
print(f"[INFO] {self.no_raw_images} Images have been loaded")
for x in range(self.no_raw_images):
if x == 0:
self.temp_image = self.sticher(self.images[x],self.images[x+1])
elif x < self.no_raw_images-1 :
self.temp_image = self.sticher(self.temp_image,self.images[x+1])
else:
self.final_image = self.temp_image
# self.final_image = self.sticher(self.images[0], self.images[1])
cv2.imshow("output", self.final_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
pass
def sticher(self, image1, image2):
# image1_grayscale = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
# image2_grayscale = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
self.image1 = image1
self.image2 = image2
orb = cv2.ORB_create(nfeatures=1000)
print(self.image1.shape)
# Find the key points and descriptors with ORB
keypoints1, descriptors1 = orb.detectAndCompute(self.image1, None)
keypoints2, descriptors2 = orb.detectAndCompute(self.image2, None)
bf = cv2.BFMatcher_create(cv2.NORM_HAMMING)
matches = bf.knnMatch(descriptors1, descriptors2, k=2)
all_matches = []
for m, n in matches:
all_matches.append(m)
good = []
for m, n in matches:
if m.distance < 0.6 * n.distance:
good.append(m)
# Set minimum match condition
MIN_MATCH_COUNT = 0
if len(good) > MIN_MATCH_COUNT:
# Convert keypoints to an argument for findHomography
src_pts = np.float32(
[keypoints1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
dst_pts = np.float32(
[keypoints2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
# Establish a homography
M, _ = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
result = self.wrap_images(image2, image1, M)
# cv2.imwrite('test4.jpg',result)
# cv2.imshow("output_image",result)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
return result
else:
print("Error")
pass
def wrap_images(self, image1, image2, H):
rows1, cols1 = image1.shape[:2]
rows2, cols2 = image2.shape[:2]
H = H
list_of_points_1 = np.float32(
[[0, 0], [0, rows1], [cols1, rows1], [cols1, 0]]).reshape(-1, 1, 2)
temp_points = np.float32(
[[0, 0], [0, rows2], [cols2, rows2], [cols2, 0]]).reshape(-1, 1, 2)
# When we have established a homography we need to warp perspective
# Change field of view
list_of_points_2 = cv2.perspectiveTransform(temp_points, H)
list_of_points = np.concatenate(
(list_of_points_1, list_of_points_2), axis=0)
[x_min, y_min] = np.int32(list_of_points.min(axis=0).ravel() - 0.5)
[x_max, y_max] = np.int32(list_of_points.max(axis=0).ravel() + 0.5)
translation_dist = [-x_min, -y_min]
H_translation = np.array([[1, 0, translation_dist[0]], [
0, 1, translation_dist[1]], [0, 0, 1]])
output_img = cv2.warpPerspective(
image2, H_translation.dot(H), (x_max-x_min, y_max-y_min))
output_img[translation_dist[1]:rows1+translation_dist[1],
translation_dist[0]:cols1+translation_dist[0]] = image1
return output_img
# initialize OpenCV's image stitcher object and then perform the image
# stitching
if __name__ == "__main__":
tester = Orthomosaic(debug=True)
tester.load_dataset()
tester.mixer()
else:
pass
| [
"cv2.resize",
"argparse.ArgumentParser",
"cv2.findHomography",
"numpy.float32",
"cv2.imshow",
"cv2.BFMatcher_create",
"numpy.array",
"cv2.ORB_create",
"cv2.destroyAllWindows",
"imutils.paths.list_images",
"numpy.concatenate",
"cv2.perspectiveTransform",
"cv2.waitKey",
"cv2.namedWindow",
... | [((158, 202), 'cv2.namedWindow', 'cv2.namedWindow', (['"""output"""', 'cv2.WINDOW_NORMAL'], {}), "('output', cv2.WINDOW_NORMAL)\n", (173, 202), False, 'import cv2\n'), ((381, 406), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (404, 406), False, 'import argparse\n'), ((2304, 2342), 'cv2.imshow', 'cv2.imshow', (['"""output"""', 'self.final_image'], {}), "('output', self.final_image)\n", (2314, 2342), False, 'import cv2\n'), ((2351, 2365), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2362, 2365), False, 'import cv2\n'), ((2374, 2397), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2395, 2397), False, 'import cv2\n'), ((2663, 2693), 'cv2.ORB_create', 'cv2.ORB_create', ([], {'nfeatures': '(1000)'}), '(nfeatures=1000)\n', (2677, 2693), False, 'import cv2\n'), ((2947, 2985), 'cv2.BFMatcher_create', 'cv2.BFMatcher_create', (['cv2.NORM_HAMMING'], {}), '(cv2.NORM_HAMMING)\n', (2967, 2985), False, 'import cv2\n'), ((4584, 4624), 'cv2.perspectiveTransform', 'cv2.perspectiveTransform', (['temp_points', 'H'], {}), '(temp_points, H)\n', (4608, 4624), False, 'import cv2\n'), ((4650, 4710), 'numpy.concatenate', 'np.concatenate', (['(list_of_points_1, list_of_points_2)'], {'axis': '(0)'}), '((list_of_points_1, list_of_points_2), axis=0)\n', (4664, 4710), True, 'import numpy as np\n'), ((4946, 5025), 'numpy.array', 'np.array', (['[[1, 0, translation_dist[0]], [0, 1, translation_dist[1]], [0, 0, 1]]'], {}), '([[1, 0, translation_dist[0]], [0, 1, translation_dist[1]], [0, 0, 1]])\n', (4954, 5025), True, 'import numpy as np\n'), ((1065, 1086), 'cv2.imread', 'cv2.imread', (['imagePath'], {}), '(imagePath)\n', (1075, 1086), False, 'import cv2\n'), ((1377, 1409), 'cv2.resize', 'cv2.resize', (['self.image_temp', 'dim'], {}), '(self.image_temp, dim)\n', (1387, 1409), False, 'import cv2\n'), ((3717, 3770), 'cv2.findHomography', 'cv2.findHomography', (['src_pts', 'dst_pts', 'cv2.RANSAC', '(5.0)'], {}), '(src_pts, dst_pts, cv2.RANSAC, 5.0)\n', (3735, 3770), False, 'import cv2\n'), ((927, 965), 'imutils.paths.list_images', 'paths.list_images', (["self.args['images']"], {}), "(self.args['images'])\n", (944, 965), False, 'from imutils import paths\n'), ((4244, 4304), 'numpy.float32', 'np.float32', (['[[0, 0], [0, rows1], [cols1, rows1], [cols1, 0]]'], {}), '([[0, 0], [0, rows1], [cols1, rows1], [cols1, 0]])\n', (4254, 4304), True, 'import numpy as np\n'), ((4358, 4418), 'numpy.float32', 'np.float32', (['[[0, 0], [0, rows2], [cols2, rows2], [cols2, 0]]'], {}), '([[0, 0], [0, rows2], [cols2, rows2], [cols2, 0]])\n', (4368, 4418), True, 'import numpy as np\n'), ((3460, 3513), 'numpy.float32', 'np.float32', (['[keypoints1[m.queryIdx].pt for m in good]'], {}), '([keypoints1[m.queryIdx].pt for m in good])\n', (3470, 3513), True, 'import numpy as np\n'), ((3571, 3624), 'numpy.float32', 'np.float32', (['[keypoints2[m.trainIdx].pt for m in good]'], {}), '([keypoints2[m.trainIdx].pt for m in good])\n', (3581, 3624), True, 'import numpy as np\n')] |
import unittest
from ParamSklearn.components.classification.multinomial_nb import \
MultinomialNB
from ParamSklearn.util import _test_classifier, _test_classifier_iterative_fit, \
get_dataset
import numpy as np
import sklearn.metrics
class MultinomialNBComponentTest(unittest.TestCase):
def test_default_configuration(self):
for i in range(10):
predictions, targets = \
_test_classifier(MultinomialNB)
self.assertAlmostEqual(0.97999999999999998,
sklearn.metrics.accuracy_score(predictions,
targets))
def test_default_configuration_iterative_fit(self):
for i in range(10):
predictions, targets = \
_test_classifier_iterative_fit(MultinomialNB)
self.assertAlmostEqual(0.97999999999999998,
sklearn.metrics.accuracy_score(predictions,
targets))
def test_default_configuration_negative_values(self):
# Custon preprocessing test to check if clipping to zero works
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits')
original_X_train = X_train.copy()
ss = sklearn.preprocessing.StandardScaler()
X_train = ss.fit_transform(X_train)
configuration_space = MultinomialNB.get_hyperparameter_search_space()
default = configuration_space.get_default_configuration()
cls = MultinomialNB(random_state=1, **{hp_name: default[hp_name]
for hp_name in default
if default[hp_name] is not None})
cls = cls.fit(X_train, Y_train)
prediction = cls.predict(X_test)
self.assertAlmostEqual(np.nanmean(prediction == Y_test),
0.88888888888888884) | [
"ParamSklearn.util.get_dataset",
"ParamSklearn.components.classification.multinomial_nb.MultinomialNB.get_hyperparameter_search_space",
"ParamSklearn.util._test_classifier_iterative_fit",
"ParamSklearn.util._test_classifier",
"numpy.nanmean",
"ParamSklearn.components.classification.multinomial_nb.Multinom... | [((1233, 1262), 'ParamSklearn.util.get_dataset', 'get_dataset', ([], {'dataset': '"""digits"""'}), "(dataset='digits')\n", (1244, 1262), False, 'from ParamSklearn.util import _test_classifier, _test_classifier_iterative_fit, get_dataset\n'), ((1431, 1478), 'ParamSklearn.components.classification.multinomial_nb.MultinomialNB.get_hyperparameter_search_space', 'MultinomialNB.get_hyperparameter_search_space', ([], {}), '()\n', (1476, 1478), False, 'from ParamSklearn.components.classification.multinomial_nb import MultinomialNB\n'), ((1560, 1679), 'ParamSklearn.components.classification.multinomial_nb.MultinomialNB', 'MultinomialNB', ([], {'random_state': '(1)'}), '(random_state=1, **{hp_name: default[hp_name] for hp_name in\n default if default[hp_name] is not None})\n', (1573, 1679), False, 'from ParamSklearn.components.classification.multinomial_nb import MultinomialNB\n'), ((422, 453), 'ParamSklearn.util._test_classifier', '_test_classifier', (['MultinomialNB'], {}), '(MultinomialNB)\n', (438, 453), False, 'from ParamSklearn.util import _test_classifier, _test_classifier_iterative_fit, get_dataset\n'), ((803, 848), 'ParamSklearn.util._test_classifier_iterative_fit', '_test_classifier_iterative_fit', (['MultinomialNB'], {}), '(MultinomialNB)\n', (833, 848), False, 'from ParamSklearn.util import _test_classifier, _test_classifier_iterative_fit, get_dataset\n'), ((1883, 1915), 'numpy.nanmean', 'np.nanmean', (['(prediction == Y_test)'], {}), '(prediction == Y_test)\n', (1893, 1915), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
# County / Unitary Authorities Apr-2021
# NOMIS API - Population estimates - local authority based by five year age band
url = "https://www.nomisweb.co.uk/api/v01/dataset/NM_31_1.data.csv?geography=1807745025...1807745028,1807745030...1807745032,1807745034...1807745083,1807745085,1807745282,1807745283,1807745086...1807745155,1807745157...1807745164,1807745166...1807745170,1807745172...1807745177,1807745179...1807745194,1807745196,1807745197,1807745199,1807745201...1807745218,1807745221,1807745222,1807745224,1807745226...1807745231,1807745233,1807745234,1807745236...1807745244,1807745271...1807745281&date=latest&sex=7&age=0,24,22,25&measures=20100,20301&signature=NPK-be81606366125733ff591b:0x55c28d4f3b15b3d94ea6f86d2ba90f4e761c43c3"
df_population = pd.read_csv(url)
df_population = df_population[
[
"DATE",
"GEOGRAPHY_NAME",
"GEOGRAPHY_CODE",
"GEOGRAPHY_TYPE",
"AGE_NAME",
"MEASURES_NAME",
"OBS_VALUE",
]
]
df_population = df_population[df_population["GEOGRAPHY_CODE"].str.contains("E")]
df_population_pivot = df_population.pivot_table(
index=["GEOGRAPHY_CODE"], columns=["AGE_NAME", "MEASURES_NAME"], values="OBS_VALUE"
).reset_index()
df_population_pivot.columns = df_population_pivot.columns.map("|".join).str.strip("|")
# NOMIS - annual population survey
url = "https://www.nomisweb.co.uk/api/v01/dataset/NM_17_5.data.csv?geography=1807745025...1807745028,1807745030...1807745032,1807745034...1807745083,1807745085,1807745282,1807745283,1807745086...1807745155,1807745157...1807745164,1807745166...1807745170,1807745172...1807745177,1807745179...1807745194,1807745196,1807745197,1807745199,1807745201...1807745218,1807745221,1807745222,1807745224,1807745226...1807745231,1807745233,1807745234,1807745236...1807745244&date=latestMINUS4&variable=18,45,248,249,111,1487,1488,1537,290,720...722,344,84,72,74,1463,1464,1558,885...888,416,418,1349,602...605,434...437,197,202&measures=20599,21001,21002,21003&signature=NPK-be81606366125733ff591b:0xf38669505a2fa883628ec2471d9566bbd3dd3563"
df_survey = pd.read_csv(url)
df_survey = df_survey[
[
"DATE",
"GEOGRAPHY_NAME",
"GEOGRAPHY_CODE",
"GEOGRAPHY_TYPE",
"MEASURES_NAME",
"VARIABLE_NAME",
"OBS_VALUE",
]
]
df_survey = df_survey[df_survey["GEOGRAPHY_CODE"].str.contains("E")]
df_survey_pivot = df_survey.pivot_table(
index=["GEOGRAPHY_CODE"],
columns=["VARIABLE_NAME", "MEASURES_NAME"],
values="OBS_VALUE",
).reset_index()
df_survey_pivot.columns = df_survey_pivot.columns.map("|".join).str.strip("|")
# NOMIS - annual survey of hours and earnings - workplace analysis
url = "/api/v01/dataset/NM_99_1.data.csv?geography=1807745025...1807745028,1807745030...1807745032,1807745034...1807745083,1807745085,1807745282,1807745283,1807745086...1807745155,1807745157...1807745164,1807745166...1807745170,1807745172...1807745177,1807745179...1807745194,1807745196,1807745197,1807745199,1807745201...1807745218,1807745221,1807745222,1807745224,1807745226...1807745231,1807745233,1807745234,1807745236...1807745244&date=latestMINUS1&sex=8,9&item=2&pay=1,7&measures=20100,20701&signature=NPK-be81606366125733ff591b:0xf6642f02ae17b8d9fb60ef7229a0b28010dbfb3b"
df_workplace = pd.read_csv(url)
df_workplace = df_workplace[
[
"DATE",
"GEOGRAPHY_NAME",
"GEOGRAPHY_CODE",
"GEOGRAPHY_TYPE",
"SEX_NAME",
"PAY_NAME",
"MEASURES_NAME",
"OBS_VALUE",
]
]
df_workplace = df_workplace[df_workplace["GEOGRAPHY_CODE"].str.contains("E")]
df_workplace_pivot = df_workplace.pivot_table(
index=["GEOGRAPHY_CODE"],
columns=["SEX_NAME", "PAY_NAME", "MEASURES_NAME"],
values="OBS_VALUE",
).reset_index()
df_workplace_pivot.columns = df_workplace_pivot.columns.map("|".join).str.strip("|")
# NOMIS - jobs density
url = "https://www.nomisweb.co.uk/api/v01/dataset/NM_57_1.data.csv?geography=1807745025...1807745028,1807745030...1807745032,1807745034...1807745083,1807745085,1807745282,1807745283,1807745086...1807745155,1807745157...1807745164,1807745166...1807745170,1807745172...1807745177,1807745179...1807745194,1807745196,1807745197,1807745199,1807745201...1807745218,1807745221,1807745222,1807745224,1807745226...1807745231,1807745233,1807745234,1807745236...1807745244,1807745271...1807745281&date=latest&item=1,3&measures=20100&signature=NPK-be81606366125733ff591b:0xc789ee7ace7b897a1eed8f6e2ca5e3ad42ac8540"
df_density = pd.read_csv(url)
df_density = df_density[
[
"DATE",
"GEOGRAPHY_NAME",
"GEOGRAPHY_CODE",
"GEOGRAPHY_TYPE",
"ITEM_NAME",
"MEASURES_NAME",
"OBS_VALUE",
]
]
df_density = df_density[df_density["GEOGRAPHY_CODE"].str.contains("E")]
df_density_pivot = df_density.pivot_table(
index=["GEOGRAPHY_CODE"], columns=["ITEM_NAME", "MEASURES_NAME"], values="OBS_VALUE"
).reset_index()
df_density_pivot.columns = df_density_pivot.columns.map("|".join).str.strip("|")
# NOMIS - Claimant count
url = "https://www.nomisweb.co.uk/api/v01/dataset/NM_162_1.data.csv?geography=1807745025...1807745028,1807745030...1807745032,1807745034...1807745083,1807745085,1807745282,1807745283,1807745086...1807745155,1807745157...1807745164,1807745166...1807745170,1807745172...1807745177,1807745179...1807745194,1807745196,1807745197,1807745199,1807745201...1807745218,1807745221,1807745222,1807745224,1807745226...1807745231,1807745233,1807745234,1807745236...1807745244,1807745271...1807745281&date=latestMINUS24&gender=0&age=0&measure=1...4&measures=20100&signature=NPK-be81606366125733ff591b:0xb2f5e7659af9ab5b972c8eacf5e12aa1c7aef5bf"
df_claim = pd.read_csv(url)
df_claim = df_claim[
[
"DATE",
"GEOGRAPHY_NAME",
"GEOGRAPHY_CODE",
"GEOGRAPHY_TYPE",
"MEASURE_NAME",
"OBS_VALUE",
]
]
df_claim = df_claim[df_claim["GEOGRAPHY_CODE"].str.contains("E")]
df_claim_pivot = df_claim.pivot_table(
index=["GEOGRAPHY_CODE"], columns=["MEASURE_NAME"], values="OBS_VALUE"
).reset_index()
# London Min Wage
url = "https://opendata.arcgis.com/datasets/3ba3daf9278f47daba0f561889c3521a_0.csv"
df_london = pd.read_csv(url)
df_london["london_min_wage"] = np.where(df_london["RGN19NM"] == "London", True, False)
df_london.drop(list(df_london.filter(["FID", "LAD19NM"])), axis=1, inplace=True)
df_london.rename(columns={"LAD19CD": "GEOGRAPHY_CODE"}, inplace=True)
# Merge to master file
merged_master = pd.merge(
df_population_pivot, df_survey_pivot, on=["GEOGRAPHY_CODE"], how="left"
)
merged_master = pd.merge(
merged_master, df_density_pivot, on=["GEOGRAPHY_CODE"], how="left"
)
merged_master = pd.merge(
merged_master, df_workplace_pivot, on=["GEOGRAPHY_CODE"], how="left"
)
merged_master = pd.merge(
merged_master, df_claim_pivot, on=["GEOGRAPHY_CODE"], how="left"
)
merged_master = pd.merge(merged_master, df_london, on=["GEOGRAPHY_CODE"], how="left")
merged_master.reset_index().to_csv("data/master_file.csv", index=False)
col_list = list(merged_master.columns)
df = pd.DataFrame(col_list)
# saving the dataframe
df.to_csv("data/metrics.csv")
| [
"numpy.where",
"pandas.merge",
"pandas.DataFrame",
"pandas.read_csv"
] | [((800, 816), 'pandas.read_csv', 'pd.read_csv', (['url'], {}), '(url)\n', (811, 816), True, 'import pandas as pd\n'), ((2121, 2137), 'pandas.read_csv', 'pd.read_csv', (['url'], {}), '(url)\n', (2132, 2137), True, 'import pandas as pd\n'), ((3310, 3326), 'pandas.read_csv', 'pd.read_csv', (['url'], {}), '(url)\n', (3321, 3326), True, 'import pandas as pd\n'), ((4526, 4542), 'pandas.read_csv', 'pd.read_csv', (['url'], {}), '(url)\n', (4537, 4542), True, 'import pandas as pd\n'), ((5713, 5729), 'pandas.read_csv', 'pd.read_csv', (['url'], {}), '(url)\n', (5724, 5729), True, 'import pandas as pd\n'), ((6216, 6232), 'pandas.read_csv', 'pd.read_csv', (['url'], {}), '(url)\n', (6227, 6232), True, 'import pandas as pd\n'), ((6264, 6319), 'numpy.where', 'np.where', (["(df_london['RGN19NM'] == 'London')", '(True)', '(False)'], {}), "(df_london['RGN19NM'] == 'London', True, False)\n", (6272, 6319), True, 'import numpy as np\n'), ((6511, 6597), 'pandas.merge', 'pd.merge', (['df_population_pivot', 'df_survey_pivot'], {'on': "['GEOGRAPHY_CODE']", 'how': '"""left"""'}), "(df_population_pivot, df_survey_pivot, on=['GEOGRAPHY_CODE'], how=\n 'left')\n", (6519, 6597), True, 'import pandas as pd\n'), ((6615, 6691), 'pandas.merge', 'pd.merge', (['merged_master', 'df_density_pivot'], {'on': "['GEOGRAPHY_CODE']", 'how': '"""left"""'}), "(merged_master, df_density_pivot, on=['GEOGRAPHY_CODE'], how='left')\n", (6623, 6691), True, 'import pandas as pd\n'), ((6714, 6792), 'pandas.merge', 'pd.merge', (['merged_master', 'df_workplace_pivot'], {'on': "['GEOGRAPHY_CODE']", 'how': '"""left"""'}), "(merged_master, df_workplace_pivot, on=['GEOGRAPHY_CODE'], how='left')\n", (6722, 6792), True, 'import pandas as pd\n'), ((6815, 6889), 'pandas.merge', 'pd.merge', (['merged_master', 'df_claim_pivot'], {'on': "['GEOGRAPHY_CODE']", 'how': '"""left"""'}), "(merged_master, df_claim_pivot, on=['GEOGRAPHY_CODE'], how='left')\n", (6823, 6889), True, 'import pandas as pd\n'), ((6912, 6981), 'pandas.merge', 'pd.merge', (['merged_master', 'df_london'], {'on': "['GEOGRAPHY_CODE']", 'how': '"""left"""'}), "(merged_master, df_london, on=['GEOGRAPHY_CODE'], how='left')\n", (6920, 6981), True, 'import pandas as pd\n'), ((7099, 7121), 'pandas.DataFrame', 'pd.DataFrame', (['col_list'], {}), '(col_list)\n', (7111, 7121), True, 'import pandas as pd\n')] |
#-*-coding:utf8-*-#
import os
from cv2 import cv2
from PIL import Image,ImageDraw
from datetime import datetime
import time
import tensorflow as tf
import numpy as np
import gender_train_data as train_data
from gender_train_data import labels_text
import matplotlib.pyplot as plt
# 人脸检测
class DetectFaces():
def __init__(self):
pass
#detectFaces()返回图像中所有人脸的矩形坐标(矩形左上、右下顶点)
#使用haar特征的级联分类器haarcascade_frontalface_default.xml,在haarcascades目录下还有其他的训练好的xml文件可供选择。
#注:haarcascades目录下训练好的分类器必须以灰度图作为输入。
def detectFaces(self,image_name):
img = cv2.imread(image_name)
face_cascade = cv2.CascadeClassifier("./haar/haarcascades/haarcascade_frontalface_default.xml")
if img.ndim == 3:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
else:
gray = img #if语句:如果img维度为3,说明不是灰度图,先转化为灰度图gray,如果不为3,也就是2,原图就是灰度图
faces = face_cascade.detectMultiScale(gray, 1.2, 5)#1.3和5是特征的最小、最大检测窗口,它改变检测结果也会改变
result = []
for (x,y,width,height) in faces:
result.append((x,y,x+width,y+height))
return result
#保存人脸图
def saveFaces(self,image_name):
faces = self.detectFaces(image_name)
if faces:
#将人脸保存在save_dir目录下。
#Image模块:Image.open获取图像句柄,crop剪切图像(剪切的区域就是detectFaces返回的坐标),save保存。
save_dir = image_name.split('.')[0]+"_faces"
os.mkdir(save_dir)
count = 0
for (x1,y1,x2,y2) in faces:
file_name = os.path.join(save_dir,str(count)+".jpg")
Image.open(image_name).crop((x1,y1,x2,y2)).save(file_name)
count+=1
#在原图像上画矩形,框出所有人脸。
#调用Image模块的draw方法,Image.open获取图像句柄,ImageDraw.Draw获取该图像的draw实例,然后调用该draw实例的rectangle方法画矩形(矩形的坐标即
#detectFaces返回的坐标),outline是矩形线条颜色(B,G,R)。
#注:原始图像如果是灰度图,则去掉outline,因为灰度图没有RGB可言。drawEyes、detectSmiles也一样。
def drawFaces(self,image_name):
faces = self.detectFaces(image_name)
if faces:
img = Image.open(image_name)
draw_instance = ImageDraw.Draw(img)
for (x1,y1,x2,y2) in faces:
draw_instance.rectangle((x1,y1,x2,y2), outline=(255, 0,0))
img.save(image_name)
#检测眼睛,返回坐标
#由于眼睛在人脸上,我们往往是先检测出人脸,再细入地检测眼睛。故detectEyes可在detectFaces基础上来进行,代码中需要注意“相对坐标”。
#当然也可以在整张图片上直接使用分类器,这种方法代码跟detectFaces一样,这里不多说。
def detectEyes(self,image_name):
eye_cascade = cv2.CascadeClassifier('./haar/haarcascades/haarcascade_eye.xml')
faces = self.detectFaces(image_name)
img = cv2.imread(image_name)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
result = []
for (x1,y1,x2,y2) in faces:
roi_gray = gray[y1:y2, x1:x2]
eyes = eye_cascade.detectMultiScale(roi_gray,1.3,2)
for (ex,ey,ew,eh) in eyes:
result.append((x1+ex,y1+ey,x1+ex+ew,y1+ey+eh))
return result
#在原图像上框出眼睛.
def drawEyes(self,image_name):
eyes = self.detectEyes(image_name)
if eyes:
img = Image.open(image_name)
draw_instance = ImageDraw.Draw(img)
for (x1,y1,x2,y2) in eyes:
draw_instance.rectangle((x1,y1,x2,y2), outline=(0, 0,255))
img.save(image_name)
#检测笑脸
def detectSmiles(self,image_name):
img = cv2.imread(image_name)
smiles_cascade = cv2.CascadeClassifier("./haar/haarcascades/haarcascade_smile.xml")
if img.ndim == 3:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
else:
gray = img #if语句:如果img维度为3,说明不是灰度图,先转化为灰度图gray,如果不为3,也就是2,原图就是灰度图
smiles = smiles_cascade.detectMultiScale(gray,4,5)
result = []
for (x,y,width,height) in smiles:
result.append((x,y,x+width,y+height))
return result
#在原图像上框出笑脸
def drawSmiles(self,image_name):
smiles = self.detectSmiles(image_name)
if smiles:
img = Image.open(image_name)
draw_instance = ImageDraw.Draw(img)
for (x1,y1,x2,y2) in smiles:
draw_instance.rectangle((x1,y1,x2,y2), outline=(100, 100,0))
img.save(image_name)
def gender_classify(self,image_name,area):
img = cv2.imread(image_name)
img = img[area[1]:area[3],area[0]:area[2]]
img = cv2.resize(img, (92,112),interpolation = cv2.INTER_AREA).flatten()
ball=np.array([img])
#
np.set_printoptions(suppress=True)
#取一张图片
input_image = ball#
#input_image = train_data.images[0:1]
labels = train_data.labels[0:1]
fig2,ax2 = plt.subplots(figsize=(2,2))
#input_image=cv2.resize(input_image,( 92,112),interpolation=cv2.INTER_CUBIC)
ax2.imshow(np.reshape(input_image, (112, 92,3)))
#plt.show()
sess = tf.Session()
graph_path=os.path.abspath('./model/my-gender-v1.0.meta')
model=os.path.abspath('./model/')
server = tf.train.import_meta_graph(graph_path)
server.restore(sess,tf.train.latest_checkpoint(model))
graph = tf.get_default_graph()
#填充feed_dict
x = graph.get_tensor_by_name('input_images:0')
y = graph.get_tensor_by_name('input_labels:0')
feed_dict={x:input_image,y:labels}
#第一层卷积+池化
relu_1 = graph.get_tensor_by_name('relu_1:0')
max_pool_1 = graph.get_tensor_by_name('max_pool_1:0')
#第二层卷积+池化
relu_2 = graph.get_tensor_by_name('relu_2:0')
max_pool_2 = graph.get_tensor_by_name('max_pool_2:0')
#第三层卷积+池化
relu_3 = graph.get_tensor_by_name('relu_3:0')
max_pool_3 = graph.get_tensor_by_name('max_pool_3:0')
#全连接最后一层输出
f_softmax = graph.get_tensor_by_name('f_softmax:0')
#relu_1_r,max_pool_1_,relu_2,max_pool_2,relu_3,max_pool_3,f_softmax=sess.run([relu_1,max_pool_1,relu_2,max_pool_2,relu_3,max_pool_3,f_softmax],feed_dict)
#----------------------------------各个层特征可视化-------------------------------
#conv1 特征
r1_relu = sess.run(relu_1,feed_dict)
r1_tranpose = sess.run(tf.transpose(r1_relu,[3,0,1,2]))
fig,ax = plt.subplots(nrows=1,ncols=16,figsize=(16,1))
for i in range(16):
ax[i].imshow(r1_tranpose[i][0])
plt.title('Conv1 16*112*92')
#plt.show()
#pool1特征
max_pool_1 = sess.run(max_pool_1,feed_dict)
r1_tranpose = sess.run(tf.transpose(max_pool_1,[3,0,1,2]))
fig,ax = plt.subplots(nrows=1,ncols=16,figsize=(16,1))
for i in range(16):
ax[i].imshow(r1_tranpose[i][0])
plt.title('Pool1 16*56*46')
#plt.show()
#conv2 特征
r2_relu = sess.run(relu_2,feed_dict)
r2_tranpose = sess.run(tf.transpose(r2_relu,[3,0,1,2]))
fig,ax = plt.subplots(nrows=1,ncols=32,figsize=(32,1))
for i in range(32):
ax[i].imshow(r2_tranpose[i][0])
plt.title('Conv2 32*56*46')
#plt.show()
#pool2 特征
max_pool_2 = sess.run(max_pool_2,feed_dict)
tranpose = sess.run(tf.transpose(max_pool_2,[3,0,1,2]))
fig,ax = plt.subplots(nrows=1,ncols=32,figsize=(32,1))
for i in range(32):
ax[i].imshow(tranpose[i][0])
plt.title('Pool2 32*28*23')
#plt.show()
#conv3 特征
r3_relu = sess.run(relu_3,feed_dict)
tranpose = sess.run(tf.transpose(r3_relu,[3,0,1,2]))
fig,ax = plt.subplots(nrows=1,ncols=64,figsize=(32,1))
for i in range(64):
ax[i].imshow(tranpose[i][0])
plt.title('Conv3 64*28*23')
#plt.show()
#pool3 特征
max_pool_3 = sess.run(max_pool_3,feed_dict)
tranpose = sess.run(tf.transpose(max_pool_3,[3,0,1,2]))
fig,ax = plt.subplots(nrows=1,ncols=64,figsize=(32,1))
for i in range(64):
ax[i].imshow(tranpose[i][0])
plt.title('Pool3 64*14*12')
#plt.show()
result=sess.run(f_softmax,feed_dict)
print(result)
print(labels_text[np.argmax(result)])
if __name__ == '__main__':
time1=datetime.now()
detect_obj=DetectFaces()
result=detect_obj.detectFaces('heat.jpg')
time2=datetime.now()
print("耗时:"+str(time2-time1))
if len(result)>0:
print("有人存在!!---》人数为:"+str(len(result)))
else:
print('视频图像中无人!!')
for res in result:
detect_obj.gender_classify('heat.jpg',res)
#detect_obj.drawFaces('./resources/pic/slx.jpg')
#detect_obj.drawSmiles('./resources/pic/slx.jpg')
#detect_obj.saveFaces('./resources/pic/slx.jpg')
"""
上面的代码将眼睛、人脸、笑脸在不同的图像上框出,如果需要在同一张图像上框出,改一下代码就可以了。
总之,利用opencv里训练好的haar特征的xml文件,在图片上检测出人脸的坐标,利用这个坐标,我们可以将人脸区域剪切保存,也可以在原图上将人脸框出。剪切保存人脸以及用矩形工具框出人脸,本程序使用的是PIL里的Image、ImageDraw模块。
此外,opencv里面也有画矩形的模块,同样可以用来框出人脸。
"""
| [
"tensorflow.transpose",
"numpy.array",
"PIL.ImageDraw.Draw",
"numpy.reshape",
"tensorflow.Session",
"cv2.cv2.resize",
"os.mkdir",
"cv2.cv2.cvtColor",
"tensorflow.get_default_graph",
"numpy.argmax",
"tensorflow.train.import_meta_graph",
"tensorflow.train.latest_checkpoint",
"matplotlib.pyplot... | [((8159, 8173), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (8171, 8173), False, 'from datetime import datetime\n'), ((8264, 8278), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (8276, 8278), False, 'from datetime import datetime\n'), ((576, 598), 'cv2.cv2.imread', 'cv2.imread', (['image_name'], {}), '(image_name)\n', (586, 598), False, 'from cv2 import cv2\n'), ((622, 707), 'cv2.cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""./haar/haarcascades/haarcascade_frontalface_default.xml"""'], {}), "('./haar/haarcascades/haarcascade_frontalface_default.xml'\n )\n", (643, 707), False, 'from cv2 import cv2\n'), ((2454, 2518), 'cv2.cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""./haar/haarcascades/haarcascade_eye.xml"""'], {}), "('./haar/haarcascades/haarcascade_eye.xml')\n", (2475, 2518), False, 'from cv2 import cv2\n'), ((2583, 2605), 'cv2.cv2.imread', 'cv2.imread', (['image_name'], {}), '(image_name)\n', (2593, 2605), False, 'from cv2 import cv2\n'), ((2621, 2658), 'cv2.cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (2633, 2658), False, 'from cv2 import cv2\n'), ((3372, 3394), 'cv2.cv2.imread', 'cv2.imread', (['image_name'], {}), '(image_name)\n', (3382, 3394), False, 'from cv2 import cv2\n'), ((3420, 3486), 'cv2.cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""./haar/haarcascades/haarcascade_smile.xml"""'], {}), "('./haar/haarcascades/haarcascade_smile.xml')\n", (3441, 3486), False, 'from cv2 import cv2\n'), ((4290, 4312), 'cv2.cv2.imread', 'cv2.imread', (['image_name'], {}), '(image_name)\n', (4300, 4312), False, 'from cv2 import cv2\n'), ((4459, 4474), 'numpy.array', 'np.array', (['[img]'], {}), '([img])\n', (4467, 4474), True, 'import numpy as np\n'), ((4493, 4527), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)'}), '(suppress=True)\n', (4512, 4527), True, 'import numpy as np\n'), ((4677, 4705), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(2, 2)'}), '(figsize=(2, 2))\n', (4689, 4705), True, 'import matplotlib.pyplot as plt\n'), ((4883, 4895), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4893, 4895), True, 'import tensorflow as tf\n'), ((4915, 4961), 'os.path.abspath', 'os.path.abspath', (['"""./model/my-gender-v1.0.meta"""'], {}), "('./model/my-gender-v1.0.meta')\n", (4930, 4961), False, 'import os\n'), ((4976, 5003), 'os.path.abspath', 'os.path.abspath', (['"""./model/"""'], {}), "('./model/')\n", (4991, 5003), False, 'import os\n'), ((5022, 5060), 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (['graph_path'], {}), '(graph_path)\n', (5048, 5060), True, 'import tensorflow as tf\n'), ((5141, 5163), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (5161, 5163), True, 'import tensorflow as tf\n'), ((6223, 6271), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(16)', 'figsize': '(16, 1)'}), '(nrows=1, ncols=16, figsize=(16, 1))\n', (6235, 6271), True, 'import matplotlib.pyplot as plt\n'), ((6349, 6377), 'matplotlib.pyplot.title', 'plt.title', (['"""Conv1 16*112*92"""'], {}), "('Conv1 16*112*92')\n", (6358, 6377), True, 'import matplotlib.pyplot as plt\n'), ((6552, 6600), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(16)', 'figsize': '(16, 1)'}), '(nrows=1, ncols=16, figsize=(16, 1))\n', (6564, 6600), True, 'import matplotlib.pyplot as plt\n'), ((6678, 6705), 'matplotlib.pyplot.title', 'plt.title', (['"""Pool1 16*56*46"""'], {}), "('Pool1 16*56*46')\n", (6687, 6705), True, 'import matplotlib.pyplot as plt\n'), ((6872, 6920), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(32)', 'figsize': '(32, 1)'}), '(nrows=1, ncols=32, figsize=(32, 1))\n', (6884, 6920), True, 'import matplotlib.pyplot as plt\n'), ((6998, 7025), 'matplotlib.pyplot.title', 'plt.title', (['"""Conv2 32*56*46"""'], {}), "('Conv2 32*56*46')\n", (7007, 7025), True, 'import matplotlib.pyplot as plt\n'), ((7198, 7246), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(32)', 'figsize': '(32, 1)'}), '(nrows=1, ncols=32, figsize=(32, 1))\n', (7210, 7246), True, 'import matplotlib.pyplot as plt\n'), ((7321, 7348), 'matplotlib.pyplot.title', 'plt.title', (['"""Pool2 32*28*23"""'], {}), "('Pool2 32*28*23')\n", (7330, 7348), True, 'import matplotlib.pyplot as plt\n'), ((7512, 7560), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(64)', 'figsize': '(32, 1)'}), '(nrows=1, ncols=64, figsize=(32, 1))\n', (7524, 7560), True, 'import matplotlib.pyplot as plt\n'), ((7635, 7662), 'matplotlib.pyplot.title', 'plt.title', (['"""Conv3 64*28*23"""'], {}), "('Conv3 64*28*23')\n", (7644, 7662), True, 'import matplotlib.pyplot as plt\n'), ((7835, 7883), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(64)', 'figsize': '(32, 1)'}), '(nrows=1, ncols=64, figsize=(32, 1))\n', (7847, 7883), True, 'import matplotlib.pyplot as plt\n'), ((7958, 7985), 'matplotlib.pyplot.title', 'plt.title', (['"""Pool3 64*14*12"""'], {}), "('Pool3 64*14*12')\n", (7967, 7985), True, 'import matplotlib.pyplot as plt\n'), ((748, 785), 'cv2.cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (760, 785), False, 'from cv2 import cv2\n'), ((1408, 1426), 'os.mkdir', 'os.mkdir', (['save_dir'], {}), '(save_dir)\n', (1416, 1426), False, 'import os\n'), ((2021, 2043), 'PIL.Image.open', 'Image.open', (['image_name'], {}), '(image_name)\n', (2031, 2043), False, 'from PIL import Image, ImageDraw\n'), ((2072, 2091), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img'], {}), '(img)\n', (2086, 2091), False, 'from PIL import Image, ImageDraw\n'), ((3084, 3106), 'PIL.Image.open', 'Image.open', (['image_name'], {}), '(image_name)\n', (3094, 3106), False, 'from PIL import Image, ImageDraw\n'), ((3135, 3154), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img'], {}), '(img)\n', (3149, 3154), False, 'from PIL import Image, ImageDraw\n'), ((3532, 3569), 'cv2.cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (3544, 3569), False, 'from cv2 import cv2\n'), ((4006, 4028), 'PIL.Image.open', 'Image.open', (['image_name'], {}), '(image_name)\n', (4016, 4028), False, 'from PIL import Image, ImageDraw\n'), ((4057, 4076), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img'], {}), '(img)\n', (4071, 4076), False, 'from PIL import Image, ImageDraw\n'), ((4809, 4846), 'numpy.reshape', 'np.reshape', (['input_image', '(112, 92, 3)'], {}), '(input_image, (112, 92, 3))\n', (4819, 4846), True, 'import numpy as np\n'), ((5089, 5122), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['model'], {}), '(model)\n', (5115, 5122), True, 'import tensorflow as tf\n'), ((6173, 6208), 'tensorflow.transpose', 'tf.transpose', (['r1_relu', '[3, 0, 1, 2]'], {}), '(r1_relu, [3, 0, 1, 2])\n', (6185, 6208), True, 'import tensorflow as tf\n'), ((6499, 6537), 'tensorflow.transpose', 'tf.transpose', (['max_pool_1', '[3, 0, 1, 2]'], {}), '(max_pool_1, [3, 0, 1, 2])\n', (6511, 6537), True, 'import tensorflow as tf\n'), ((6822, 6857), 'tensorflow.transpose', 'tf.transpose', (['r2_relu', '[3, 0, 1, 2]'], {}), '(r2_relu, [3, 0, 1, 2])\n', (6834, 6857), True, 'import tensorflow as tf\n'), ((7145, 7183), 'tensorflow.transpose', 'tf.transpose', (['max_pool_2', '[3, 0, 1, 2]'], {}), '(max_pool_2, [3, 0, 1, 2])\n', (7157, 7183), True, 'import tensorflow as tf\n'), ((7462, 7497), 'tensorflow.transpose', 'tf.transpose', (['r3_relu', '[3, 0, 1, 2]'], {}), '(r3_relu, [3, 0, 1, 2])\n', (7474, 7497), True, 'import tensorflow as tf\n'), ((7782, 7820), 'tensorflow.transpose', 'tf.transpose', (['max_pool_3', '[3, 0, 1, 2]'], {}), '(max_pool_3, [3, 0, 1, 2])\n', (7794, 7820), True, 'import tensorflow as tf\n'), ((4378, 4434), 'cv2.cv2.resize', 'cv2.resize', (['img', '(92, 112)'], {'interpolation': 'cv2.INTER_AREA'}), '(img, (92, 112), interpolation=cv2.INTER_AREA)\n', (4388, 4434), False, 'from cv2 import cv2\n'), ((8100, 8117), 'numpy.argmax', 'np.argmax', (['result'], {}), '(result)\n', (8109, 8117), True, 'import numpy as np\n'), ((1574, 1596), 'PIL.Image.open', 'Image.open', (['image_name'], {}), '(image_name)\n', (1584, 1596), False, 'from PIL import Image, ImageDraw\n')] |
from typing import List, Tuple
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as scs
from scipy.optimize import minimize
class DistrManager:
def __init__(
self, left_border: int = -1.8, right_border: int = 2, step: int = 0.2
) -> None:
self._left_border = left_border
self._right_border = right_border
self._step = step
def get_range(self) -> np.ndarray:
return np.arange(self._left_border, self._right_border, self._step)
def eval_x(self, x: float) -> float:
return 2 + 2 * x
def get_relation(self, x: List[float]) -> List[float]:
return [self.eval_x(x_i) + scs.norm.rvs(0, 1) for x_i in x]
def mess_relation(self, y: List[float]) -> List[float]:
print("\nAdd error to relation\n")
y[0] += 10
y[-1] -= 10
return y
def _get_mnk_params(self, x: float, y: float) -> Tuple[float, float]:
beta_1 = (np.mean(x * y) - np.mean(x) * np.mean(y)) / (
np.mean(x ** 2) - np.mean(x) ** 2
)
beta_0 = np.mean(y) - beta_1 * np.mean(x)
return beta_0, beta_1
def mnk(self, x: float, y: float) -> List[float]:
beta_0, beta_1 = self._get_mnk_params(x, y)
print(f"MNK:\t beta_0 = {beta_0}\t beta_1 = {beta_1}")
return [beta_0 + beta_1 * element for element in x]
def _minimize_mnm(self, x_0: Tuple[float, float], x: float, y: float) -> float:
return sum(abs(y[idx] - x_0[0] - x_0[1] * x_val) for idx, x_val in enumerate(x))
def _get_mnm_params(self, x: float, y: float) -> Tuple[float, float]:
beta_0, beta_1 = self._get_mnk_params(x, y)
minimized = minimize(
self._minimize_mnm, [beta_0, beta_1], args=(x, y), method="SLSQP"
)
return minimized.x[0], minimized.x[1]
def mnm(self, x: float, y: float) -> List[float]:
beta_0, beta_1 = self._get_mnm_params(x, y)
print(f"MNM:\t beta_0 = {beta_0}\t beta_1 = {beta_1}")
return [beta_0 + beta_1 * element for element in x]
def draw(self, x: float, y: float, name: str) -> None:
y_mnk = self.mnk(x, y)
y_mnm = self.mnm(x, y)
mnk_dist = sum((self.eval_x(x)[i] - y_mnk[i]) ** 2 for i in range(len(y)))
mnm_dist = sum(abs(self.eval_x(x)[i] - y_mnm[i]) for i in range(len(y)))
print(f"MNK distance = {mnk_dist}\t MNM distance = {mnm_dist}")
plt.plot(x, self.eval_x(x), color="red", label="Ideal")
plt.plot(x, y_mnk, color="green", label="MNK")
plt.plot(x, y_mnm, color="orange", label="MNM")
plt.scatter(x, y, c="blue", label="Sample")
plt.xlim([self._left_border, self._right_border])
plt.grid()
plt.legend()
plt.title(name)
plt.show()
| [
"numpy.mean",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.legend",
"scipy.optimize.minimize",
"matplotlib.pyplot.plot",
"scipy.stats.norm.rvs",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((441, 501), 'numpy.arange', 'np.arange', (['self._left_border', 'self._right_border', 'self._step'], {}), '(self._left_border, self._right_border, self._step)\n', (450, 501), True, 'import numpy as np\n'), ((1683, 1758), 'scipy.optimize.minimize', 'minimize', (['self._minimize_mnm', '[beta_0, beta_1]'], {'args': '(x, y)', 'method': '"""SLSQP"""'}), "(self._minimize_mnm, [beta_0, beta_1], args=(x, y), method='SLSQP')\n", (1691, 1758), False, 'from scipy.optimize import minimize\n'), ((2489, 2535), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_mnk'], {'color': '"""green"""', 'label': '"""MNK"""'}), "(x, y_mnk, color='green', label='MNK')\n", (2497, 2535), True, 'import matplotlib.pyplot as plt\n'), ((2544, 2591), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_mnm'], {'color': '"""orange"""', 'label': '"""MNM"""'}), "(x, y_mnm, color='orange', label='MNM')\n", (2552, 2591), True, 'import matplotlib.pyplot as plt\n'), ((2600, 2643), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'c': '"""blue"""', 'label': '"""Sample"""'}), "(x, y, c='blue', label='Sample')\n", (2611, 2643), True, 'import matplotlib.pyplot as plt\n'), ((2652, 2701), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[self._left_border, self._right_border]'], {}), '([self._left_border, self._right_border])\n', (2660, 2701), True, 'import matplotlib.pyplot as plt\n'), ((2710, 2720), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2718, 2720), True, 'import matplotlib.pyplot as plt\n'), ((2729, 2741), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2739, 2741), True, 'import matplotlib.pyplot as plt\n'), ((2750, 2765), 'matplotlib.pyplot.title', 'plt.title', (['name'], {}), '(name)\n', (2759, 2765), True, 'import matplotlib.pyplot as plt\n'), ((2774, 2784), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2782, 2784), True, 'import matplotlib.pyplot as plt\n'), ((1069, 1079), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (1076, 1079), True, 'import numpy as np\n'), ((664, 682), 'scipy.stats.norm.rvs', 'scs.norm.rvs', (['(0)', '(1)'], {}), '(0, 1)\n', (676, 682), True, 'import scipy.stats as scs\n'), ((950, 964), 'numpy.mean', 'np.mean', (['(x * y)'], {}), '(x * y)\n', (957, 964), True, 'import numpy as np\n'), ((1008, 1023), 'numpy.mean', 'np.mean', (['(x ** 2)'], {}), '(x ** 2)\n', (1015, 1023), True, 'import numpy as np\n'), ((1091, 1101), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (1098, 1101), True, 'import numpy as np\n'), ((967, 977), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (974, 977), True, 'import numpy as np\n'), ((980, 990), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (987, 990), True, 'import numpy as np\n'), ((1026, 1036), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (1033, 1036), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.