code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: © 2021 Massachusetts Institute of Technology.
# SPDX-FileCopyrightText: © 2021 <NAME> <<EMAIL>>
# NOTICE: authors should document their contributions in concisely in NOTICE
# with details inline in source files, comments, and docstrings.
"""
"""
import numpy as np
def percolate_inplace(A, B, C, D, E, which, ranges, keep=True):
if keep:
# make the inverse array through argsorts
argranges = np.argsort([r[0] for r in ranges])
argranges = np.argsort(argranges)
ranges = sorted(ranges)
def percolate1(mat):
segments = []
for r in ranges:
A1, A2 = r
Sl1 = slice(A1, A2)
segments.append(np.copy(mat[Sl1, :]))
past_r = ranges[0]
past_idx = past_r[0]
for r in ranges[1:]:
Sl1 = slice(past_r[1], r[0])
next_idx = past_idx + r[0] - past_r[1]
Sl2 = slice(past_idx, next_idx)
mat[Sl2, :] = mat[Sl1, :]
past_idx = next_idx
past_r = r
Sl1 = slice(past_r[1], mat.shape[0])
next_idx = past_idx + mat.shape[0] - past_r[1]
Sl2 = slice(past_idx, next_idx)
mat[Sl2, :] = mat[Sl1, :]
past_idx = next_idx
for idx_range in argranges:
seg = segments[idx_range]
next_idx = past_idx + seg.shape[0]
Sl2 = slice(past_idx, next_idx)
mat[Sl2, :] = seg
past_idx = next_idx
def percolate2(mat):
segments = []
for r in ranges:
A1, A2 = r
Sl1 = slice(A1, A2)
segments.append(np.copy(mat[:, Sl1]))
past_r = ranges[0]
past_idx = past_r[0]
for r in ranges[1:]:
Sl1 = slice(past_r[1], r[0])
next_idx = past_idx + r[0] - past_r[1]
Sl2 = slice(past_idx, next_idx)
mat[:, Sl2] = mat[:, Sl1]
past_idx = next_idx
past_r = r
Sl1 = slice(past_r[1], mat.shape[1])
next_idx = past_idx + mat.shape[1] - past_r[1]
Sl2 = slice(past_idx, next_idx)
mat[:, Sl2] = mat[:, Sl1]
past_idx = next_idx
for idx_range in argranges:
seg = segments[idx_range]
next_idx = past_idx + seg.shape[1]
Sl2 = slice(past_idx, next_idx)
mat[:, Sl2] = seg
past_idx = next_idx
else:
ranges = sorted(ranges)
def percolate1(mat):
past_r = ranges[0]
past_idx = past_r[0]
for r in ranges[1:]:
Sl1 = slice(past_r[1], r[0])
next_idx = past_idx + r[0] - past_r[1]
Sl2 = slice(past_idx, next_idx)
mat[Sl2, :] = mat[Sl1, :]
past_idx = next_idx
past_r = r
Sl1 = slice(past_r[1], mat.shape[0])
next_idx = past_idx + mat.shape[0] - past_r[1]
Sl2 = slice(past_idx, next_idx)
mat[Sl2, :] = mat[Sl1, :]
def percolate2(mat):
past_r = ranges[0]
past_idx = past_r[0]
for r in ranges[1:]:
Sl1 = slice(past_r[1], r[0])
next_idx = past_idx + r[0] - past_r[1]
Sl2 = slice(past_idx, next_idx)
mat[:, Sl2] = mat[:, Sl1]
past_idx = next_idx
past_r = r
Sl1 = slice(past_r[1], mat.shape[1])
next_idx = past_idx + mat.shape[1] - past_r[1]
Sl2 = slice(past_idx, next_idx)
mat[:, Sl2] = mat[:, Sl1]
if which == "inputs":
percolate2(B)
percolate2(D)
elif which == "output":
percolate1(C)
percolate1(D)
elif which == "states":
percolate2(C)
percolate2(A)
percolate2(E)
elif which == "constr":
percolate1(B)
percolate1(A)
percolate1(E)
else:
raise RuntimeError("Unrecognized 'which' argument")
return
| [
"numpy.argsort",
"numpy.copy"
] | [((526, 560), 'numpy.argsort', 'np.argsort', (['[r[0] for r in ranges]'], {}), '([r[0] for r in ranges])\n', (536, 560), True, 'import numpy as np\n'), ((581, 602), 'numpy.argsort', 'np.argsort', (['argranges'], {}), '(argranges)\n', (591, 602), True, 'import numpy as np\n'), ((815, 835), 'numpy.copy', 'np.copy', (['mat[Sl1, :]'], {}), '(mat[Sl1, :])\n', (822, 835), True, 'import numpy as np\n'), ((1840, 1860), 'numpy.copy', 'np.copy', (['mat[:, Sl1]'], {}), '(mat[:, Sl1])\n', (1847, 1860), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 2020, <NAME>
import numpy as np
import pandas as pd
from unittest import TestCase, mock, main
from ..core.io import load_image, ImageBatchGenerator
def _mock_imread(image_file_path):
"""
Mocks `cv2.imread`.
:param image_file_path: mock path to the image,
image name must be a numeric value
:return: mock image, pixel values equal to the
numeric image name
"""
image = np.zeros(shape=(16, 16, 3))
return image + float(image_file_path.split('/')[-1])
class LoadImageTest(TestCase):
"""
Tests `io.load_image`.
"""
@mock.patch('cv2.imread', lambda _: None)
def test_image_not_found(self):
"""
Tests that exception is raised when image is not found.
"""
with self.assertRaises(FileNotFoundError):
_ = load_image('test')
@mock.patch('cv2.imread', lambda _: np.zeros(shape=(20, 20)))
def test_load_resize_expand(self):
"""
Test that image is successfully loaded, rescaled,
and dimensions are correctly expanded.
"""
result = load_image('test', size=(10, 10))
expected = np.zeros(shape=(10, 10, 1))
np.testing.assert_array_equal(result, expected)
@mock.patch('cv2.imread', lambda _: np.zeros(shape=(19, 20)))
def test_load_not_square(self):
"""
Test that an exception is reaised if a non-square image
is loaded.
"""
with self.assertRaises(ValueError):
_ = load_image('test')
class ImageBatchGeneratorTest(TestCase):
"""
Tests `io.ImageBatchGenerator` class. Tests that
batches are correctly defined and that images with
labels can be retrieved.
"""
def setUp(self):
"""
Sets up the tests.
"""
image_ids = map(str, range(1, 7))
self._image_rle_masks = {k: v for k, v in zip(image_ids, [np.nan, '1 4 7 2'] * 3)}
self._gen = ImageBatchGenerator(
image_directory='test',
image_rle_masks=self._image_rle_masks,
batch_size=4,
shuffle=False,
image_size=None,
mask_size=(4, 4))
def test_len(self):
"""
Tests that generator length (number of batches)
is correct.
"""
self.assertTrue(len(self._gen), 2)
def test_index_slice(self):
"""
Tests `IndexError` is raised when using
slice indexing.
"""
with self.assertRaises(IndexError):
self._gen[0:2]
def test_index_out_of_bounds(self):
"""
Tests `IndexError` is raised when
accessing out of bounds index.
"""
with self.assertRaises(IndexError):
self._gen[2]
def test_get_image_file_paths(self):
"""
Tests that `io.ImageBatchGenerator._get_image_file_paths`
correctly assembles file paths from image ids.
"""
result = self._gen._get_image_file_paths(['1', '2'])
expected = ['test/1', 'test/2']
self.assertListEqual(result, expected)
@mock.patch('cv2.imread', _mock_imread)
def test_get_mean(self):
"""
Tests that `io.ImageBatchGenerator._get_mean`
correctly computes mean pixel value.
"""
result = self._gen._get_mean()
expected = np.mean([i / 255 for i in range(1, 7)])
self.assertAlmostEqual(result, expected, places=6)
def test_get_image_mask(self):
"""
Tests that `io.ImageBatchGenerator._get_image_mask`
produces the correct target image mask.
"""
for test_case_rle in (np.nan, '2 42'):
expected = np.zeros(shape=(4, 4))
if not pd.isnull(test_case_rle):
expected[0, 0] = 1
result = self._gen._get_image_mask(test_case_rle)
np.testing.assert_array_equal(result, expected)
@mock.patch('cv2.imread', _mock_imread)
def test_getitem(self):
"""
Tests that correct batches are successfully
retrieved.
"""
def get_expected_x_y(n, id_offset):
"""
Creates expected results.
:param n: number of expected images
:param id_offset: start of the id range
of the expected images
:return: expected x and y arrays
"""
expected_x = np.zeros(shape=(n, 16, 16, 3))
for i in range(n):
expected_x[i, :, :, :] = (id_offset + i + 1) / 255
expected_y_0 = np.zeros(shape=(4, 4))
expected_y_1 = np.zeros(shape=(4, 4))
expected_y_1[0, 0] = 1
expected_y = np.array([expected_y_0.ravel(), expected_y_1.ravel()] * (n // 2))
return expected_x, expected_y
for i, n, offset in ((0, 4, 0), (1, 2, 4)):
result_x, result_y = self._gen[i]
expected_x, expected_y = get_expected_x_y(n, offset)
np.testing.assert_array_equal(result_y, expected_y)
np.testing.assert_array_almost_equal(result_x, expected_x, decimal=6)
@mock.patch('cv2.imread', _mock_imread)
def test_conjure(self):
"""
Tests that `io.ImageBatchGenerator` correctly materializes
all the data.
"""
result_x, result_y = self._gen.conjure()
expected_shape_x = (6, 16, 16, 3)
expected_shape_y = (6, 16)
tests = ((result_x.shape, expected_shape_x),
(result_y.shape, expected_shape_y))
for result, expected in tests:
self.assertTupleEqual(result, expected)
class ImageBatchGeneratorShuffleTest(TestCase):
"""
Tests image shuffeling in `io.ImageBatchGenerator`.
"""
def test_shuffle(self):
"""
Tests image id indexes are in different
order after epoch end.
"""
np.random.seed(0)
gen = ImageBatchGenerator(
image_directory=None,
image_rle_masks={'1': np.nan, '2': np.nan, '3': '1 3'},
mask_size=(4, 4))
indexes_before = gen._indexes
gen.on_epoch_end()
indexes_after = gen._indexes
expected_before = np.array([2, 1, 0])
expected_after = np.array([2, 0, 1])
np.testing.assert_array_equal(indexes_before, expected_before)
np.testing.assert_array_equal(indexes_after, expected_after)
if __name__ == '__main__':
main()
| [
"unittest.main",
"numpy.random.seed",
"numpy.testing.assert_array_equal",
"numpy.zeros",
"pandas.isnull",
"unittest.mock.patch",
"numpy.array",
"numpy.testing.assert_array_almost_equal"
] | [((486, 513), 'numpy.zeros', 'np.zeros', ([], {'shape': '(16, 16, 3)'}), '(shape=(16, 16, 3))\n', (494, 513), True, 'import numpy as np\n'), ((653, 693), 'unittest.mock.patch', 'mock.patch', (['"""cv2.imread"""', '(lambda _: None)'], {}), "('cv2.imread', lambda _: None)\n", (663, 693), False, 'from unittest import TestCase, mock, main\n'), ((3158, 3196), 'unittest.mock.patch', 'mock.patch', (['"""cv2.imread"""', '_mock_imread'], {}), "('cv2.imread', _mock_imread)\n", (3168, 3196), False, 'from unittest import TestCase, mock, main\n'), ((3979, 4017), 'unittest.mock.patch', 'mock.patch', (['"""cv2.imread"""', '_mock_imread'], {}), "('cv2.imread', _mock_imread)\n", (3989, 4017), False, 'from unittest import TestCase, mock, main\n'), ((5197, 5235), 'unittest.mock.patch', 'mock.patch', (['"""cv2.imread"""', '_mock_imread'], {}), "('cv2.imread', _mock_imread)\n", (5207, 5235), False, 'from unittest import TestCase, mock, main\n'), ((6521, 6527), 'unittest.main', 'main', ([], {}), '()\n', (6525, 6527), False, 'from unittest import TestCase, mock, main\n'), ((1211, 1238), 'numpy.zeros', 'np.zeros', ([], {'shape': '(10, 10, 1)'}), '(shape=(10, 10, 1))\n', (1219, 1238), True, 'import numpy as np\n'), ((1247, 1294), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['result', 'expected'], {}), '(result, expected)\n', (1276, 1294), True, 'import numpy as np\n'), ((5966, 5983), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (5980, 5983), True, 'import numpy as np\n'), ((6282, 6301), 'numpy.array', 'np.array', (['[2, 1, 0]'], {}), '([2, 1, 0])\n', (6290, 6301), True, 'import numpy as np\n'), ((6327, 6346), 'numpy.array', 'np.array', (['[2, 0, 1]'], {}), '([2, 0, 1])\n', (6335, 6346), True, 'import numpy as np\n'), ((6356, 6418), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['indexes_before', 'expected_before'], {}), '(indexes_before, expected_before)\n', (6385, 6418), True, 'import numpy as np\n'), ((6427, 6487), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['indexes_after', 'expected_after'], {}), '(indexes_after, expected_after)\n', (6456, 6487), True, 'import numpy as np\n'), ((946, 970), 'numpy.zeros', 'np.zeros', ([], {'shape': '(20, 20)'}), '(shape=(20, 20))\n', (954, 970), True, 'import numpy as np\n'), ((1336, 1360), 'numpy.zeros', 'np.zeros', ([], {'shape': '(19, 20)'}), '(shape=(19, 20))\n', (1344, 1360), True, 'import numpy as np\n'), ((3747, 3769), 'numpy.zeros', 'np.zeros', ([], {'shape': '(4, 4)'}), '(shape=(4, 4))\n', (3755, 3769), True, 'import numpy as np\n'), ((3925, 3972), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['result', 'expected'], {}), '(result, expected)\n', (3954, 3972), True, 'import numpy as np\n'), ((4481, 4511), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n, 16, 16, 3)'}), '(shape=(n, 16, 16, 3))\n', (4489, 4511), True, 'import numpy as np\n'), ((4638, 4660), 'numpy.zeros', 'np.zeros', ([], {'shape': '(4, 4)'}), '(shape=(4, 4))\n', (4646, 4660), True, 'import numpy as np\n'), ((4688, 4710), 'numpy.zeros', 'np.zeros', ([], {'shape': '(4, 4)'}), '(shape=(4, 4))\n', (4696, 4710), True, 'import numpy as np\n'), ((5057, 5108), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['result_y', 'expected_y'], {}), '(result_y, expected_y)\n', (5086, 5108), True, 'import numpy as np\n'), ((5121, 5190), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['result_x', 'expected_x'], {'decimal': '(6)'}), '(result_x, expected_x, decimal=6)\n', (5157, 5190), True, 'import numpy as np\n'), ((3789, 3813), 'pandas.isnull', 'pd.isnull', (['test_case_rle'], {}), '(test_case_rle)\n', (3798, 3813), True, 'import pandas as pd\n')] |
# q learning
from pyqlearning.qlearning.boltzmann_q_learning import BoltzmannQLearning
from pyqlearning.qlearning.greedy_q_learning import GreedyQLearning
# testing
import unittest
# data
import numpy as np
# package
from utils import FIFOQueue, EPSILON
class AdaptiveBetaQLearning(BoltzmannQLearning):
def __init__(self, buffer_size=5, sigma=1., delta=1, inverse_temp=True):
"""
:param buffer: buffer size for historical TDE
:param sigma: sigma parameter for beta function
:param delta: delta parater for beta update, default 1: fastest adaptation of exploration
:param inverse_temp: whether optimizes for beta (inverse temperature) or for sigmoid (temperature
parameter)
"""
super().__init__(self)
self.TDE = FIFOQueue(capacity=buffer_size)
self.sigma = sigma
if inverse_temp:
self.__to_sigmoid = lambda x: 1/x
self.update_exploration = self.__update_beta
self.explore = EPSILON
else:
self.__to_sigmoid = lambda x: EPSILON if x == 0 else x
self.update_exploration = self.__update_temp
self.explore = 1
self.delta = delta
def __update_temp(self):
self.explore += self.delta * (np.exp(self.sigma * self.TDE.abs_avg()) - 1 - self.explore)
def __update_beta(self):
self.explore += self.delta * (1/(np.exp(self.sigma * self.TDE.abs_avg()) - 1 + EPSILON)-self.explore)
def __calculate_sigmoid(self):
"""
Function of temperature in BoltzmannQLearning. Modified here to
SOFTMAX VBDE: f(s, \sigma) = \frac{e^{-\sigma |TDE|}}{1-e^{-\sigma |TDE|}}
:return:
"""
return self.__to_sigmoid(self.explore)
def update_q(self, state_key, action_key, reward_value, next_max_q):
'''
Update Q-Value while at the mean time update
Args:
state_key: The key of state.
action_key: The key of action.
reward_value: R-Value(Reward).
next_max_q: Maximum Q-Value.
'''
# Now Q-Value.
q = self.extract_q_df(state_key, action_key)
self.TDE.add(reward_value + self.gamma_value * next_max_q - q)
self.update_exploration()
super(BoltzmannQLearning).update_q(state_key, action_key, reward_value, next_max_q)
class AdaptiveEpsilonGreedy(GreedyQLearning):
def __init__(self, sigma=1.):
super().__init__(self)
self.sigma = sigma
def update_q(self, state_key, action_key, reward_value, next_max_q):
'''
Update Q-Value.
Args:
state_key: The key of state.
action_key: The key of action.
reward_value: R-Value(Reward).
next_max_q: Maximum Q-Value.
'''
# Now Q-Value.
self.TDE = reward_value + self.gamma_value * next_max_q - self.extract_q_df(state_key, action_key)
super(BoltzmannQLearning).update_q(state_key, action_key, reward_value, next_max_q)
def get_epsilon_greedy_rate(self):
''' getter '''
temp = np.exp(-np.abs(self.TDE) / self.sigma)
return (1-temp) / (1+ temp)
class sigmoid_tests(unittest.TestCase):
def test_trivial(self):
test = AdaptiveBetaQLearning()
self.assertEqual(test.sigma, 1)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"numpy.abs",
"utils.FIFOQueue"
] | [((3456, 3471), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3469, 3471), False, 'import unittest\n'), ((790, 821), 'utils.FIFOQueue', 'FIFOQueue', ([], {'capacity': 'buffer_size'}), '(capacity=buffer_size)\n', (799, 821), False, 'from utils import FIFOQueue, EPSILON\n'), ((3206, 3222), 'numpy.abs', 'np.abs', (['self.TDE'], {}), '(self.TDE)\n', (3212, 3222), True, 'import numpy as np\n')] |
import numpy as np
import pytest
import pymodaq.daq_utils
from pymodaq.daq_utils import scanner
from pymodaq.daq_utils import exceptions as exceptions
class TestScanInfo:
def test_ScanInfo(self):
Nsteps = 10
axes_indexes = np.array([])
axes_unique = np.array([])
positions = np.array([])
kwargs = ['test1', 'test2']
scan_param = scanner.ScanInfo(Nsteps, positions, axes_indexes, axes_unique, kwargs=kwargs)
assert scan_param.Nsteps is Nsteps
assert scan_param.axes_indexes is axes_indexes
assert scan_param.axes_unique is axes_unique
assert scan_param.positions is positions
assert scan_param.kwargs is kwargs
def test__repr__(self):
Nsteps = 10
axes_indexes = np.array([])
axes_unique = np.array([])
positions = np.array([])
scan_param = scanner.ScanInfo(Nsteps, positions, axes_indexes, axes_unique)
assert scan_param.__repr__()
scan_param = scanner.ScanInfo()
assert scan_param.__repr__()
class TestScanParameters:
def test_ScanParameters(self):
starts = [1, 2]
stops = [10, 20]
steps = [1, 2]
scan_param = scanner.ScanParameters(starts=starts, stops=stops, steps=steps)
scan_param.vectors = None
assert scan_param.Naxes == 1
assert scan_param.scan_type == 'Scan1D'
assert scan_param.scan_subtype == 'Linear'
assert scan_param.starts == starts
assert scan_param.stops == stops
assert scan_param.steps == steps
with pytest.raises(ValueError):
scanner.ScanParameters(scan_type='test', starts=starts, stops=stops, steps=steps)
with pytest.raises(ValueError):
scanner.ScanParameters(scan_subtype='test', starts=starts, stops=stops, steps=steps)
def test_getattr(self):
starts = [1, 2]
stops = [10, 20]
steps = [1, 2]
positions = np.array([[1], [2], [3], [4], [5], [6], [7], [8], [9], [10]])
axes_indexes = np.array([[0], [1], [2], [3], [4], [5], [6], [7], [8], [9]])
axes_unique = [np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])]
scan_param = scanner.ScanParameters(starts=starts, stops=stops, steps=steps)
assert scan_param.__getattr__('Nsteps') == 10
assert np.array_equal(scan_param.__getattr__('positions'), positions)
assert np.array_equal(scan_param.__getattr__('axes_indexes'), axes_indexes)
assert np.array_equal(scan_param.__getattr__('axes_unique'), axes_unique)
assert scan_param.__getattr__('adaptive_loss') == None
with pytest.raises(ValueError):
scan_param.__getattr__('test')
def test_get_info_from_positions(self):
starts = [1, 2]
stops = [10, 20]
steps = [1, 2]
positions = np.array([1, 2, 3, 4])
scan_param = scanner.ScanParameters(starts=starts, stops=stops, steps=steps, positions=positions)
result = scan_param.get_info_from_positions(positions)
assert np.array_equal(result.positions, np.expand_dims(positions, 1))
assert np.array_equal(result.axes_unique, [positions])
assert scan_param.get_info_from_positions(None)
def test_set_scan(self):
# Scan1D
starts = [1, 2]
stops = [10, 20]
steps = [1, 2]
positions = np.array([[1], [2], [3], [4]])
scan_param = scanner.ScanParameters(starts=starts, stops=stops, steps=steps, positions=positions)
result = scan_param.set_scan()
assert np.array_equal(result.positions, scan_param.get_info_from_positions(positions).positions)
scan_param = scanner.ScanParameters(starts=starts, stops=stops, steps=steps,
positions=positions, scan_subtype='Random')
result = scan_param.set_scan()
for value in positions:
assert value in result.positions
# Scan2D
np.array([[1, 2], [2, 3], [3, 4], [4, 5]])
def test_set_scan_spiral(self):
nsteps = 10
starts = np.array([10.1, -5.87])
steps = np.array([0.12, 1])
rmaxs = np.rint(nsteps / 2) * steps
positions = scanner.set_scan_spiral(starts, rmaxs, steps, nsteps=None)
positions2 = scanner.set_scan_spiral(starts, [], steps, nsteps=nsteps)
assert isinstance(positions, np.ndarray)
assert positions.shape == (121, 2)
assert np.all(positions == pytest.approx(positions2))
positions = scanner.set_scan_spiral(starts, np.rint(10000 / 2) * steps, steps)
assert positions.shape[0] == 16384
def test_set_scan_linear(self):
positions = scanner.set_scan_linear(np.array([0, 0]), np.array([1, -21]), np.array([0.1, -0.3]))
assert positions.shape == (781, 2)
positions = scanner.set_scan_linear(np.array([0, 0]), np.array([1, -21]), np.array([0.01, -0.03]))
assert positions.shape == (10032, 2)
positions = scanner.set_scan_linear(np.array([0, 0]), np.array([1, -21]),
np.array([0.01, -0.03]), False, 1000)
assert positions.shape == (1092, 2)
# test back and forth
positionsbf = scanner.set_scan_linear(np.array([0, 0]), np.array([1, 21]), np.array([0.1, 0.3]), True, 1000)
assert positionsbf.shape == (781, 2)
nx = len(np.unique(positionsbf[:, 0]))
ny = len(np.unique(positionsbf[:, 1]))
assert np.all(positionsbf[:ny - 1, 1] == positionsbf[2 * ny - 1:ny:-1, 1])
positions = scanner.set_scan_linear(np.array([0, 0]), np.array([1, 21]), np.array([0., 0.3]))
assert positions.shape == (1, 2)
positions = scanner.set_scan_linear(np.array([0, 0]), np.array([1, -21]), np.array([0.1, 0.3]))
assert positions.shape == (1, 2)
positions = scanner.set_scan_linear(np.array([0, 0]), np.array([0, 21]), np.array([0.1, 0.3]))
assert positions.shape == (1, 2)
def test_set_scan_random(self):
positions = scanner.set_scan_linear(np.array([0, 0]), np.array([1, -21]), np.array([0.1, -0.3]))
positions_r = scanner.set_scan_random(np.array([0, 0]), np.array([1, -21]), np.array([0.1, -0.3]))
assert positions_r.shape == positions.shape
for pos in positions_r:
assert pos in positions
| [
"numpy.unique",
"numpy.expand_dims",
"pytest.approx",
"pymodaq.daq_utils.scanner.ScanInfo",
"pytest.raises",
"numpy.rint",
"pymodaq.daq_utils.scanner.ScanParameters",
"numpy.array",
"numpy.array_equal",
"pymodaq.daq_utils.scanner.set_scan_spiral",
"numpy.all"
] | [((246, 258), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (254, 258), True, 'import numpy as np\n'), ((281, 293), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (289, 293), True, 'import numpy as np\n'), ((314, 326), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (322, 326), True, 'import numpy as np\n'), ((384, 461), 'pymodaq.daq_utils.scanner.ScanInfo', 'scanner.ScanInfo', (['Nsteps', 'positions', 'axes_indexes', 'axes_unique'], {'kwargs': 'kwargs'}), '(Nsteps, positions, axes_indexes, axes_unique, kwargs=kwargs)\n', (400, 461), False, 'from pymodaq.daq_utils import scanner\n'), ((777, 789), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (785, 789), True, 'import numpy as np\n'), ((812, 824), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (820, 824), True, 'import numpy as np\n'), ((845, 857), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (853, 857), True, 'import numpy as np\n'), ((879, 941), 'pymodaq.daq_utils.scanner.ScanInfo', 'scanner.ScanInfo', (['Nsteps', 'positions', 'axes_indexes', 'axes_unique'], {}), '(Nsteps, positions, axes_indexes, axes_unique)\n', (895, 941), False, 'from pymodaq.daq_utils import scanner\n'), ((1001, 1019), 'pymodaq.daq_utils.scanner.ScanInfo', 'scanner.ScanInfo', ([], {}), '()\n', (1017, 1019), False, 'from pymodaq.daq_utils import scanner\n'), ((1212, 1275), 'pymodaq.daq_utils.scanner.ScanParameters', 'scanner.ScanParameters', ([], {'starts': 'starts', 'stops': 'stops', 'steps': 'steps'}), '(starts=starts, stops=stops, steps=steps)\n', (1234, 1275), False, 'from pymodaq.daq_utils import scanner\n'), ((1965, 2026), 'numpy.array', 'np.array', (['[[1], [2], [3], [4], [5], [6], [7], [8], [9], [10]]'], {}), '([[1], [2], [3], [4], [5], [6], [7], [8], [9], [10]])\n', (1973, 2026), True, 'import numpy as np\n'), ((2050, 2110), 'numpy.array', 'np.array', (['[[0], [1], [2], [3], [4], [5], [6], [7], [8], [9]]'], {}), '([[0], [1], [2], [3], [4], [5], [6], [7], [8], [9]])\n', (2058, 2110), True, 'import numpy as np\n'), ((2198, 2261), 'pymodaq.daq_utils.scanner.ScanParameters', 'scanner.ScanParameters', ([], {'starts': 'starts', 'stops': 'stops', 'steps': 'steps'}), '(starts=starts, stops=stops, steps=steps)\n', (2220, 2261), False, 'from pymodaq.daq_utils import scanner\n'), ((2844, 2866), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (2852, 2866), True, 'import numpy as np\n'), ((2888, 2977), 'pymodaq.daq_utils.scanner.ScanParameters', 'scanner.ScanParameters', ([], {'starts': 'starts', 'stops': 'stops', 'steps': 'steps', 'positions': 'positions'}), '(starts=starts, stops=stops, steps=steps, positions=\n positions)\n', (2910, 2977), False, 'from pymodaq.daq_utils import scanner\n'), ((3129, 3176), 'numpy.array_equal', 'np.array_equal', (['result.axes_unique', '[positions]'], {}), '(result.axes_unique, [positions])\n', (3143, 3176), True, 'import numpy as np\n'), ((3372, 3402), 'numpy.array', 'np.array', (['[[1], [2], [3], [4]]'], {}), '([[1], [2], [3], [4]])\n', (3380, 3402), True, 'import numpy as np\n'), ((3424, 3513), 'pymodaq.daq_utils.scanner.ScanParameters', 'scanner.ScanParameters', ([], {'starts': 'starts', 'stops': 'stops', 'steps': 'steps', 'positions': 'positions'}), '(starts=starts, stops=stops, steps=steps, positions=\n positions)\n', (3446, 3513), False, 'from pymodaq.daq_utils import scanner\n'), ((3675, 3787), 'pymodaq.daq_utils.scanner.ScanParameters', 'scanner.ScanParameters', ([], {'starts': 'starts', 'stops': 'stops', 'steps': 'steps', 'positions': 'positions', 'scan_subtype': '"""Random"""'}), "(starts=starts, stops=stops, steps=steps, positions=\n positions, scan_subtype='Random')\n", (3697, 3787), False, 'from pymodaq.daq_utils import scanner\n'), ((3969, 4011), 'numpy.array', 'np.array', (['[[1, 2], [2, 3], [3, 4], [4, 5]]'], {}), '([[1, 2], [2, 3], [3, 4], [4, 5]])\n', (3977, 4011), True, 'import numpy as np\n'), ((4086, 4109), 'numpy.array', 'np.array', (['[10.1, -5.87]'], {}), '([10.1, -5.87])\n', (4094, 4109), True, 'import numpy as np\n'), ((4126, 4145), 'numpy.array', 'np.array', (['[0.12, 1]'], {}), '([0.12, 1])\n', (4134, 4145), True, 'import numpy as np\n'), ((4211, 4269), 'pymodaq.daq_utils.scanner.set_scan_spiral', 'scanner.set_scan_spiral', (['starts', 'rmaxs', 'steps'], {'nsteps': 'None'}), '(starts, rmaxs, steps, nsteps=None)\n', (4234, 4269), False, 'from pymodaq.daq_utils import scanner\n'), ((4292, 4349), 'pymodaq.daq_utils.scanner.set_scan_spiral', 'scanner.set_scan_spiral', (['starts', '[]', 'steps'], {'nsteps': 'nsteps'}), '(starts, [], steps, nsteps=nsteps)\n', (4315, 4349), False, 'from pymodaq.daq_utils import scanner\n'), ((5485, 5552), 'numpy.all', 'np.all', (['(positionsbf[:ny - 1, 1] == positionsbf[2 * ny - 1:ny:-1, 1])'], {}), '(positionsbf[:ny - 1, 1] == positionsbf[2 * ny - 1:ny:-1, 1])\n', (5491, 5552), True, 'import numpy as np\n'), ((1585, 1610), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1598, 1610), False, 'import pytest\n'), ((1624, 1710), 'pymodaq.daq_utils.scanner.ScanParameters', 'scanner.ScanParameters', ([], {'scan_type': '"""test"""', 'starts': 'starts', 'stops': 'stops', 'steps': 'steps'}), "(scan_type='test', starts=starts, stops=stops, steps=\n steps)\n", (1646, 1710), False, 'from pymodaq.daq_utils import scanner\n'), ((1720, 1745), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1733, 1745), False, 'import pytest\n'), ((1759, 1847), 'pymodaq.daq_utils.scanner.ScanParameters', 'scanner.ScanParameters', ([], {'scan_subtype': '"""test"""', 'starts': 'starts', 'stops': 'stops', 'steps': 'steps'}), "(scan_subtype='test', starts=starts, stops=stops,\n steps=steps)\n", (1781, 1847), False, 'from pymodaq.daq_utils import scanner\n'), ((2134, 2175), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]'], {}), '([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\n', (2142, 2175), True, 'import numpy as np\n'), ((2637, 2662), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2650, 2662), False, 'import pytest\n'), ((3084, 3112), 'numpy.expand_dims', 'np.expand_dims', (['positions', '(1)'], {}), '(positions, 1)\n', (3098, 3112), True, 'import numpy as np\n'), ((4162, 4181), 'numpy.rint', 'np.rint', (['(nsteps / 2)'], {}), '(nsteps / 2)\n', (4169, 4181), True, 'import numpy as np\n'), ((4716, 4732), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (4724, 4732), True, 'import numpy as np\n'), ((4734, 4752), 'numpy.array', 'np.array', (['[1, -21]'], {}), '([1, -21])\n', (4742, 4752), True, 'import numpy as np\n'), ((4754, 4775), 'numpy.array', 'np.array', (['[0.1, -0.3]'], {}), '([0.1, -0.3])\n', (4762, 4775), True, 'import numpy as np\n'), ((4866, 4882), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (4874, 4882), True, 'import numpy as np\n'), ((4884, 4902), 'numpy.array', 'np.array', (['[1, -21]'], {}), '([1, -21])\n', (4892, 4902), True, 'import numpy as np\n'), ((4904, 4927), 'numpy.array', 'np.array', (['[0.01, -0.03]'], {}), '([0.01, -0.03])\n', (4912, 4927), True, 'import numpy as np\n'), ((5018, 5034), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (5026, 5034), True, 'import numpy as np\n'), ((5036, 5054), 'numpy.array', 'np.array', (['[1, -21]'], {}), '([1, -21])\n', (5044, 5054), True, 'import numpy as np\n'), ((5100, 5123), 'numpy.array', 'np.array', (['[0.01, -0.03]'], {}), '([0.01, -0.03])\n', (5108, 5123), True, 'import numpy as np\n'), ((5260, 5276), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (5268, 5276), True, 'import numpy as np\n'), ((5278, 5295), 'numpy.array', 'np.array', (['[1, 21]'], {}), '([1, 21])\n', (5286, 5295), True, 'import numpy as np\n'), ((5297, 5317), 'numpy.array', 'np.array', (['[0.1, 0.3]'], {}), '([0.1, 0.3])\n', (5305, 5317), True, 'import numpy as np\n'), ((5393, 5421), 'numpy.unique', 'np.unique', (['positionsbf[:, 0]'], {}), '(positionsbf[:, 0])\n', (5402, 5421), True, 'import numpy as np\n'), ((5440, 5468), 'numpy.unique', 'np.unique', (['positionsbf[:, 1]'], {}), '(positionsbf[:, 1])\n', (5449, 5468), True, 'import numpy as np\n'), ((5598, 5614), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (5606, 5614), True, 'import numpy as np\n'), ((5616, 5633), 'numpy.array', 'np.array', (['[1, 21]'], {}), '([1, 21])\n', (5624, 5633), True, 'import numpy as np\n'), ((5635, 5655), 'numpy.array', 'np.array', (['[0.0, 0.3]'], {}), '([0.0, 0.3])\n', (5643, 5655), True, 'import numpy as np\n'), ((5741, 5757), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (5749, 5757), True, 'import numpy as np\n'), ((5759, 5777), 'numpy.array', 'np.array', (['[1, -21]'], {}), '([1, -21])\n', (5767, 5777), True, 'import numpy as np\n'), ((5779, 5799), 'numpy.array', 'np.array', (['[0.1, 0.3]'], {}), '([0.1, 0.3])\n', (5787, 5799), True, 'import numpy as np\n'), ((5886, 5902), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (5894, 5902), True, 'import numpy as np\n'), ((5904, 5921), 'numpy.array', 'np.array', (['[0, 21]'], {}), '([0, 21])\n', (5912, 5921), True, 'import numpy as np\n'), ((5923, 5943), 'numpy.array', 'np.array', (['[0.1, 0.3]'], {}), '([0.1, 0.3])\n', (5931, 5943), True, 'import numpy as np\n'), ((6067, 6083), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (6075, 6083), True, 'import numpy as np\n'), ((6085, 6103), 'numpy.array', 'np.array', (['[1, -21]'], {}), '([1, -21])\n', (6093, 6103), True, 'import numpy as np\n'), ((6105, 6126), 'numpy.array', 'np.array', (['[0.1, -0.3]'], {}), '([0.1, -0.3])\n', (6113, 6126), True, 'import numpy as np\n'), ((6174, 6190), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (6182, 6190), True, 'import numpy as np\n'), ((6192, 6210), 'numpy.array', 'np.array', (['[1, -21]'], {}), '([1, -21])\n', (6200, 6210), True, 'import numpy as np\n'), ((6212, 6233), 'numpy.array', 'np.array', (['[0.1, -0.3]'], {}), '([0.1, -0.3])\n', (6220, 6233), True, 'import numpy as np\n'), ((4477, 4502), 'pytest.approx', 'pytest.approx', (['positions2'], {}), '(positions2)\n', (4490, 4502), False, 'import pytest\n'), ((4557, 4575), 'numpy.rint', 'np.rint', (['(10000 / 2)'], {}), '(10000 / 2)\n', (4564, 4575), True, 'import numpy as np\n')] |
import torch
from torch.autograd import Variable
import numpy as np
import time, math
import matplotlib.pyplot as plt
import time, math
import matplotlib.pyplot as plt
import os
import torch.nn as nn
from skimage import data,io
import cv2
torch.backends.cudnn.enabled = True
def PSNR(pred, gt):#this function(tested) can be used in 2D or 3D
imdff = pred - gt
rmse = math.sqrt(np.mean(imdff ** 2))
if rmse == 0:
return 100
return 20 * math.log10(255.0 / rmse)
def generate_2Dimage(array_like,save_mode='3D_VDSR_/',image_format='bmp'):
if not isinstance(array_like,np.ndarray):
array_like=np.asarray(array_like)
# shape=array_like.shape()
if not os.path.exists('3D_VDSR_/'):
os.mkdir(save_mode)
for count,every_image in enumerate(array_like):
cv2.imwrite(save_mode+str(count+1)+'.'+image_format,every_image)
print ('Successfully save'+str(count)+image_format+'image!')
def display_2Dimage(image_array,mode='gray'):
plt.imshow(image_array,cmap=mode)
plt.show()
#opt = parser.parse_args()
CUDA_ENABLE = 0
#if CUDA_ENABLE and not torch.cuda.is_available():
# raise Exception("No GPU found, please run without --cuda")
model_path="model/0809-1245L__model/model_epoch_2.pkl"#current filepath
model = torch.load(model_path)['model']
model=model.cpu()
params=model.state_dict()
#for item in params:
# print (item)
# print(params['module.residual_layer.0.conv.weight'])
if CUDA_ENABLE:
model = model.cuda()
print ('Using GPU acceleration!')
else :
model=model.cpu()
print ('Using CPU to compute')
def read_imagecollection(file_path):
imageset_path=os.path.join(os.getcwd(),file_path)
imgs=io.imread_collection(imageset_path)
imgs_arrayset=[]
for img in imgs:
imgs_arrayset.append(img)
imgs_arrayset=np.asarray(imgs_arrayset).astype(np.float)
print ('Shape of imageset is:',imgs_arrayset.shape)
return imgs_arrayset
dataset_interp=read_imagecollection('images/LR/*.bmp')
dataset_interp=dataset_interp/255#normlize the gray rank to 0-1
dataset_interp=dataset_interp[:40,:2580,:2580]
num,h,w=dataset_interp.shape
batch_generate_size=20
reconstruction_output=np.zeros((num,h,w))
for count_d in range((num//batch_generate_size)):
for count_h in range((h//batch_generate_size)):
for count_w in range((w//batch_generate_size)):
pixel_start_d=count_d*batch_generate_size
pixel_end_d=(count_d+1)*batch_generate_size
pixel_start_h=count_h*batch_generate_size
pixel_end_h=(count_h+1)*batch_generate_size
pixel_start_w=count_w*batch_generate_size
pixel_end_w=(count_w+1)*batch_generate_size
testdata=dataset_interp[pixel_start_d:pixel_end_d,pixel_start_h:pixel_end_h,pixel_start_w:pixel_end_w]
print ('input data from interplation:',testdata.shape)
testdata=testdata.reshape(1,1,batch_generate_size,batch_generate_size,batch_generate_size)
# testdata=torch.cuda.FloatTensor(testdata)
testdata=torch.Tensor(testdata)
print (testdata)
if CUDA_ENABLE:
testdata_variable=Variable(testdata).cuda()
testdata_output=model(testdata_variable)
output=testdata_output.data.cpu().numpy().squeeze()
print ('Using GPU to accelerate....')
else :
testdata_variable=Variable(testdata).cpu()
print (type(testdata_variable))
testdata_output=model(testdata_variable)
output=testdata_output.data.numpy().squeeze()
print ('Using cpu to accelerate....')
torch._C._cuda_emptyCache()
output=output*255#restore to the gray rank0-255
reconstruction_output[pixel_start_d:pixel_end_d,pixel_start_h:pixel_end_h,pixel_start_w:pixel_end_w]=output#
del testdata_variable
dataset_ori=read_imagecollection('images/HR/*.bmp')
dataset_interp=dataset_interp*255
print ('PSNR of interp:',PSNR(dataset_interp,dataset_ori[:40,:2580,:2580]))
print ('PSNR of reconstructor:',PSNR(reconstruction_output,dataset_ori[:40,:2580,:2580]))
generate_2Dimage(reconstruction_output)
| [
"os.mkdir",
"matplotlib.pyplot.show",
"torch._C._cuda_emptyCache",
"os.getcwd",
"matplotlib.pyplot.imshow",
"torch.load",
"numpy.asarray",
"numpy.zeros",
"os.path.exists",
"torch.autograd.Variable",
"math.log10",
"numpy.mean",
"torch.Tensor",
"skimage.io.imread_collection"
] | [((2297, 2318), 'numpy.zeros', 'np.zeros', (['(num, h, w)'], {}), '((num, h, w))\n', (2305, 2318), True, 'import numpy as np\n'), ((1028, 1062), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image_array'], {'cmap': 'mode'}), '(image_array, cmap=mode)\n', (1038, 1062), True, 'import matplotlib.pyplot as plt\n'), ((1067, 1077), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1075, 1077), True, 'import matplotlib.pyplot as plt\n'), ((1336, 1358), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (1346, 1358), False, 'import torch\n'), ((1779, 1814), 'skimage.io.imread_collection', 'io.imread_collection', (['imageset_path'], {}), '(imageset_path)\n', (1799, 1814), False, 'from skimage import data, io\n'), ((404, 423), 'numpy.mean', 'np.mean', (['(imdff ** 2)'], {}), '(imdff ** 2)\n', (411, 423), True, 'import numpy as np\n'), ((481, 505), 'math.log10', 'math.log10', (['(255.0 / rmse)'], {}), '(255.0 / rmse)\n', (491, 505), False, 'import time, math\n'), ((657, 679), 'numpy.asarray', 'np.asarray', (['array_like'], {}), '(array_like)\n', (667, 679), True, 'import numpy as np\n'), ((723, 750), 'os.path.exists', 'os.path.exists', (['"""3D_VDSR_/"""'], {}), "('3D_VDSR_/')\n", (737, 750), False, 'import os\n'), ((761, 780), 'os.mkdir', 'os.mkdir', (['save_mode'], {}), '(save_mode)\n', (769, 780), False, 'import os\n'), ((1746, 1757), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1755, 1757), False, 'import os\n'), ((1913, 1938), 'numpy.asarray', 'np.asarray', (['imgs_arrayset'], {}), '(imgs_arrayset)\n', (1923, 1938), True, 'import numpy as np\n'), ((3180, 3202), 'torch.Tensor', 'torch.Tensor', (['testdata'], {}), '(testdata)\n', (3192, 3202), False, 'import torch\n'), ((3824, 3851), 'torch._C._cuda_emptyCache', 'torch._C._cuda_emptyCache', ([], {}), '()\n', (3849, 3851), False, 'import torch\n'), ((3298, 3316), 'torch.autograd.Variable', 'Variable', (['testdata'], {}), '(testdata)\n', (3306, 3316), False, 'from torch.autograd import Variable\n'), ((3561, 3579), 'torch.autograd.Variable', 'Variable', (['testdata'], {}), '(testdata)\n', (3569, 3579), False, 'from torch.autograd import Variable\n')] |
# Author: <NAME>
# Warsaw University of Technology
# Modified by: <NAME>, <NAME>, <NAME> (Poznan University of Technology 2021)
import torch
from torch.utils.data import DataLoader
import MinkowskiEngine as ME
from numba import njit, jit
import numpy as np
from datasets.oxford import OxfordDataset, IntensityDataset, TrainTransform, TrainSetTransform
from datasets.samplers import BatchSampler
from misc.utils import MinkLocParams
def make_datasets(params: MinkLocParams, debug=False):
# Create training and validation datasets
datasets = {}
train_transform = TrainTransform(params.aug_mode)
train_set_transform = TrainSetTransform(params.aug_mode)
if debug:
max_elems = 1000
else:
max_elems = None
use_intensity = params.model_params.version in ['MinkLoc3D-I', 'MinkLoc3D-SI']
if params.dataset_name in ['USyd', 'IntensityOxford']:
datasets['train'] = IntensityDataset(params.dataset_folder, params.train_file, params.num_points,
params.max_distance, use_intensity, params.dataset_name, train_transform,
set_transform=train_set_transform, max_elems=max_elems)
else:
datasets['train'] = OxfordDataset(params.dataset_folder, params.train_file, params.num_points,
params.max_distance, train_transform,
set_transform=train_set_transform, max_elems=max_elems)
val_transform = None
if params.val_file is not None:
if params.dataset_name in ['USyd', 'IntensityOxford']:
datasets['train'] = IntensityDataset(params.dataset_folder, params.val_file, params.num_points,
params.max_distance, use_intensity, params.dataset_name, val_transform)
else:
datasets['val'] = OxfordDataset(params.dataset_folder, params.val_file,
params.num_points, params.max_distance, val_transform)
return datasets
def make_eval_dataset(params: MinkLocParams):
# Create evaluation datasets
use_intensity = params.model_params.version in ['MinkLoc3D-I', 'MinkLoc3D-SI']
if params.dataset_name in ['USyd', 'IntensityOxford']:
dataset = IntensityDataset(params.dataset_folder, params.test_file, params.num_points,
params.max_distance, use_intensity, params.dataset_name, transform=None)
else:
dataset = OxfordDataset(params.dataset_folder, params.test_file, params.num_points, params.max_distance,
transform=None)
return dataset
def make_collate_fn(dataset: OxfordDataset, version, dataset_name, mink_quantization_size=None):
# set_transform: the transform to be applied to all batch elements
def collate_fn(data_list):
# Constructs a batch object
clouds = [e[0] for e in data_list]
labels = [e[1] for e in data_list]
batch = torch.stack(clouds, dim=0) # Produces (batch_size, n_points, point_dim) tensor
if dataset.set_transform is not None:
# Apply the same transformation on all dataset elements
batch = dataset.set_transform(batch)
if mink_quantization_size is None:
# Not a MinkowskiEngine based model
batch = {'cloud': batch}
else:
if version == 'MinkLoc3D':
coords = [ME.utils.sparse_quantize(coordinates=e, quantization_size=mink_quantization_size)
for e in batch]
coords = ME.utils.batched_coordinates(coords)
# Assign a dummy feature equal to 1 to each point
# Coords must be on CPU, features can be on GPU - see MinkowskiEngine documentation
feats = torch.ones((coords.shape[0], 1), dtype=torch.float32)
elif version == 'MinkLoc3D-I':
coords = []
feats = []
for e in batch:
c, f = ME.utils.sparse_quantize(coordinates=e[:, :3], features=e[:, 3].reshape([-1, 1]),
quantization_size=mink_quantization_size)
coords.append(c)
feats.append(f)
coords = ME.utils.batched_coordinates(coords)
feats = torch.cat(feats, dim=0)
elif version == 'MinkLoc3D-S':
coords = []
for e in batch:
# Convert coordinates to spherical
spherical_e = torch.tensor(to_spherical(e.numpy(), dataset_name), dtype=torch.float)
c = ME.utils.sparse_quantize(coordinates=spherical_e[:, :3], quantization_size=mink_quantization_size)
coords.append(c)
coords = ME.utils.batched_coordinates(coords)
feats = torch.ones((coords.shape[0], 1), dtype=torch.float32)
elif version == 'MinkLoc3D-SI':
coords = []
feats = []
for e in batch:
# Convert coordinates to spherical
spherical_e = torch.tensor(to_spherical(e.numpy(), dataset_name), dtype=torch.float)
c, f = ME.utils.sparse_quantize(coordinates=spherical_e[:, :3], features=spherical_e[:, 3].reshape([-1, 1]),
quantization_size=mink_quantization_size)
coords.append(c)
feats.append(f)
coords = ME.utils.batched_coordinates(coords)
feats = torch.cat(feats, dim=0)
batch = {'coords': coords, 'features': feats}
# Compute positives and negatives mask
# dataset.queries[label]['positives'] is bitarray
positives_mask = [[dataset.queries[label]['positives'][e] for e in labels] for label in labels]
negatives_mask = [[dataset.queries[label]['negatives'][e] for e in labels] for label in labels]
positives_mask = torch.tensor(positives_mask)
negatives_mask = torch.tensor(negatives_mask)
# Returns (batch_size, n_points, 3) tensor and positives_mask and
# negatives_mask which are batch_size x batch_size boolean tensors
return batch, positives_mask, negatives_mask
return collate_fn
def make_dataloaders(params: MinkLocParams, debug=False):
"""
Create training and validation dataloaders that return groups of k=2 similar elements
:param train_params:
:param model_params:
:return:
"""
datasets = make_datasets(params, debug=debug)
dataloders = {}
train_sampler = BatchSampler(datasets['train'], batch_size=params.batch_size,
batch_size_limit=params.batch_size_limit,
batch_expansion_rate=params.batch_expansion_rate)
# Collate function collates items into a batch and applies a 'set transform' on the entire batch
train_collate_fn = make_collate_fn(datasets['train'], params.model_params.version, params.dataset_name,
params.model_params.mink_quantization_size)
dataloders['train'] = DataLoader(datasets['train'], batch_sampler=train_sampler, collate_fn=train_collate_fn,
num_workers=params.num_workers, pin_memory=True)
if 'val' in datasets:
val_sampler = BatchSampler(datasets['val'], batch_size=params.batch_size)
# Collate function collates items into a batch and applies a 'set transform' on the entire batch
# Currently validation dataset has empty set_transform function, but it may change in the future
val_collate_fn = make_collate_fn(datasets['val'], params.model_params.version, params.dataset_name,
params.model_params.mink_quantization_size)
dataloders['val'] = DataLoader(datasets['val'], batch_sampler=val_sampler, collate_fn=val_collate_fn,
num_workers=params.num_workers, pin_memory=True)
return dataloders
@njit
def to_spherical(points, dataset_name):
spherical_points = []
for point in points:
if (np.abs(point[:3]) < 1e-4).all():
continue
r = np.linalg.norm(point[:3])
# Theta is calculated as an angle measured from the y-axis towards the x-axis
# Shifted to range (0, 360)
theta = np.arctan2(point[1], point[0]) * 180 / np.pi
if theta < 0:
theta += 360
if dataset_name == "USyd":
# VLP-16 has 2 deg VRes and (+15, -15 VFoV).
# Phi calculated from the vertical axis, so (75, 105)
# Shifted to (0, 30)
phi = (np.arccos(point[2] / r) * 180 / np.pi) - 75
elif dataset_name in ['IntensityOxford', 'Oxford']:
# Oxford scans are built from a 2D scanner.
# Phi calculated from the vertical axis, so (0, 180)
phi = np.arccos(point[2] / r) * 180 / np.pi
elif dataset_name == 'KITTI':
# HDL-64 has 0.4 deg VRes and (+2, -24.8 VFoV).
# Phi calculated from the vertical axis, so (88, 114.8)
# Shifted to (0, 26.8)
phi = (np.arccos(point[2] / r) * 180 / np.pi) - 88
if point.shape[-1] == 4:
spherical_points.append([r, theta, phi, point[3]])
else:
spherical_points.append([r, theta, phi])
return spherical_points
| [
"torch.ones",
"datasets.oxford.IntensityDataset",
"datasets.oxford.OxfordDataset",
"torch.tensor",
"torch.stack",
"torch.utils.data.DataLoader",
"numpy.arctan2",
"numpy.abs",
"datasets.oxford.TrainSetTransform",
"MinkowskiEngine.utils.sparse_quantize",
"torch.cat",
"numpy.linalg.norm",
"Mink... | [((577, 608), 'datasets.oxford.TrainTransform', 'TrainTransform', (['params.aug_mode'], {}), '(params.aug_mode)\n', (591, 608), False, 'from datasets.oxford import OxfordDataset, IntensityDataset, TrainTransform, TrainSetTransform\n'), ((635, 669), 'datasets.oxford.TrainSetTransform', 'TrainSetTransform', (['params.aug_mode'], {}), '(params.aug_mode)\n', (652, 669), False, 'from datasets.oxford import OxfordDataset, IntensityDataset, TrainTransform, TrainSetTransform\n'), ((6728, 6890), 'datasets.samplers.BatchSampler', 'BatchSampler', (["datasets['train']"], {'batch_size': 'params.batch_size', 'batch_size_limit': 'params.batch_size_limit', 'batch_expansion_rate': 'params.batch_expansion_rate'}), "(datasets['train'], batch_size=params.batch_size,\n batch_size_limit=params.batch_size_limit, batch_expansion_rate=params.\n batch_expansion_rate)\n", (6740, 6890), False, 'from datasets.samplers import BatchSampler\n'), ((7267, 7408), 'torch.utils.data.DataLoader', 'DataLoader', (["datasets['train']"], {'batch_sampler': 'train_sampler', 'collate_fn': 'train_collate_fn', 'num_workers': 'params.num_workers', 'pin_memory': '(True)'}), "(datasets['train'], batch_sampler=train_sampler, collate_fn=\n train_collate_fn, num_workers=params.num_workers, pin_memory=True)\n", (7277, 7408), False, 'from torch.utils.data import DataLoader\n'), ((916, 1132), 'datasets.oxford.IntensityDataset', 'IntensityDataset', (['params.dataset_folder', 'params.train_file', 'params.num_points', 'params.max_distance', 'use_intensity', 'params.dataset_name', 'train_transform'], {'set_transform': 'train_set_transform', 'max_elems': 'max_elems'}), '(params.dataset_folder, params.train_file, params.\n num_points, params.max_distance, use_intensity, params.dataset_name,\n train_transform, set_transform=train_set_transform, max_elems=max_elems)\n', (932, 1132), False, 'from datasets.oxford import OxfordDataset, IntensityDataset, TrainTransform, TrainSetTransform\n'), ((1252, 1428), 'datasets.oxford.OxfordDataset', 'OxfordDataset', (['params.dataset_folder', 'params.train_file', 'params.num_points', 'params.max_distance', 'train_transform'], {'set_transform': 'train_set_transform', 'max_elems': 'max_elems'}), '(params.dataset_folder, params.train_file, params.num_points,\n params.max_distance, train_transform, set_transform=train_set_transform,\n max_elems=max_elems)\n', (1265, 1428), False, 'from datasets.oxford import OxfordDataset, IntensityDataset, TrainTransform, TrainSetTransform\n'), ((2319, 2472), 'datasets.oxford.IntensityDataset', 'IntensityDataset', (['params.dataset_folder', 'params.test_file', 'params.num_points', 'params.max_distance', 'use_intensity', 'params.dataset_name'], {'transform': 'None'}), '(params.dataset_folder, params.test_file, params.num_points,\n params.max_distance, use_intensity, params.dataset_name, transform=None)\n', (2335, 2472), False, 'from datasets.oxford import OxfordDataset, IntensityDataset, TrainTransform, TrainSetTransform\n'), ((2532, 2646), 'datasets.oxford.OxfordDataset', 'OxfordDataset', (['params.dataset_folder', 'params.test_file', 'params.num_points', 'params.max_distance'], {'transform': 'None'}), '(params.dataset_folder, params.test_file, params.num_points,\n params.max_distance, transform=None)\n', (2545, 2646), False, 'from datasets.oxford import OxfordDataset, IntensityDataset, TrainTransform, TrainSetTransform\n'), ((3034, 3060), 'torch.stack', 'torch.stack', (['clouds'], {'dim': '(0)'}), '(clouds, dim=0)\n', (3045, 3060), False, 'import torch\n'), ((6099, 6127), 'torch.tensor', 'torch.tensor', (['positives_mask'], {}), '(positives_mask)\n', (6111, 6127), False, 'import torch\n'), ((6153, 6181), 'torch.tensor', 'torch.tensor', (['negatives_mask'], {}), '(negatives_mask)\n', (6165, 6181), False, 'import torch\n'), ((7490, 7549), 'datasets.samplers.BatchSampler', 'BatchSampler', (["datasets['val']"], {'batch_size': 'params.batch_size'}), "(datasets['val'], batch_size=params.batch_size)\n", (7502, 7549), False, 'from datasets.samplers import BatchSampler\n'), ((7982, 8117), 'torch.utils.data.DataLoader', 'DataLoader', (["datasets['val']"], {'batch_sampler': 'val_sampler', 'collate_fn': 'val_collate_fn', 'num_workers': 'params.num_workers', 'pin_memory': '(True)'}), "(datasets['val'], batch_sampler=val_sampler, collate_fn=\n val_collate_fn, num_workers=params.num_workers, pin_memory=True)\n", (7992, 8117), False, 'from torch.utils.data import DataLoader\n'), ((8353, 8378), 'numpy.linalg.norm', 'np.linalg.norm', (['point[:3]'], {}), '(point[:3])\n', (8367, 8378), True, 'import numpy as np\n'), ((1662, 1813), 'datasets.oxford.IntensityDataset', 'IntensityDataset', (['params.dataset_folder', 'params.val_file', 'params.num_points', 'params.max_distance', 'use_intensity', 'params.dataset_name', 'val_transform'], {}), '(params.dataset_folder, params.val_file, params.num_points,\n params.max_distance, use_intensity, params.dataset_name, val_transform)\n', (1678, 1813), False, 'from datasets.oxford import OxfordDataset, IntensityDataset, TrainTransform, TrainSetTransform\n'), ((1903, 2015), 'datasets.oxford.OxfordDataset', 'OxfordDataset', (['params.dataset_folder', 'params.val_file', 'params.num_points', 'params.max_distance', 'val_transform'], {}), '(params.dataset_folder, params.val_file, params.num_points,\n params.max_distance, val_transform)\n', (1916, 2015), False, 'from datasets.oxford import OxfordDataset, IntensityDataset, TrainTransform, TrainSetTransform\n'), ((3639, 3675), 'MinkowskiEngine.utils.batched_coordinates', 'ME.utils.batched_coordinates', (['coords'], {}), '(coords)\n', (3667, 3675), True, 'import MinkowskiEngine as ME\n'), ((3866, 3919), 'torch.ones', 'torch.ones', (['(coords.shape[0], 1)'], {'dtype': 'torch.float32'}), '((coords.shape[0], 1), dtype=torch.float32)\n', (3876, 3919), False, 'import torch\n'), ((8518, 8548), 'numpy.arctan2', 'np.arctan2', (['point[1]', 'point[0]'], {}), '(point[1], point[0])\n', (8528, 8548), True, 'import numpy as np\n'), ((3490, 3576), 'MinkowskiEngine.utils.sparse_quantize', 'ME.utils.sparse_quantize', ([], {'coordinates': 'e', 'quantization_size': 'mink_quantization_size'}), '(coordinates=e, quantization_size=\n mink_quantization_size)\n', (3514, 3576), True, 'import MinkowskiEngine as ME\n'), ((4352, 4388), 'MinkowskiEngine.utils.batched_coordinates', 'ME.utils.batched_coordinates', (['coords'], {}), '(coords)\n', (4380, 4388), True, 'import MinkowskiEngine as ME\n'), ((4413, 4436), 'torch.cat', 'torch.cat', (['feats'], {'dim': '(0)'}), '(feats, dim=0)\n', (4422, 4436), False, 'import torch\n'), ((8286, 8303), 'numpy.abs', 'np.abs', (['point[:3]'], {}), '(point[:3])\n', (8292, 8303), True, 'import numpy as np\n'), ((4887, 4923), 'MinkowskiEngine.utils.batched_coordinates', 'ME.utils.batched_coordinates', (['coords'], {}), '(coords)\n', (4915, 4923), True, 'import MinkowskiEngine as ME\n'), ((4948, 5001), 'torch.ones', 'torch.ones', (['(coords.shape[0], 1)'], {'dtype': 'torch.float32'}), '((coords.shape[0], 1), dtype=torch.float32)\n', (4958, 5001), False, 'import torch\n'), ((8821, 8844), 'numpy.arccos', 'np.arccos', (['(point[2] / r)'], {}), '(point[2] / r)\n', (8830, 8844), True, 'import numpy as np\n'), ((9065, 9088), 'numpy.arccos', 'np.arccos', (['(point[2] / r)'], {}), '(point[2] / r)\n', (9074, 9088), True, 'import numpy as np\n'), ((4725, 4828), 'MinkowskiEngine.utils.sparse_quantize', 'ME.utils.sparse_quantize', ([], {'coordinates': 'spherical_e[:, :3]', 'quantization_size': 'mink_quantization_size'}), '(coordinates=spherical_e[:, :3], quantization_size=\n mink_quantization_size)\n', (4749, 4828), True, 'import MinkowskiEngine as ME\n'), ((5615, 5651), 'MinkowskiEngine.utils.batched_coordinates', 'ME.utils.batched_coordinates', (['coords'], {}), '(coords)\n', (5643, 5651), True, 'import MinkowskiEngine as ME\n'), ((5676, 5699), 'torch.cat', 'torch.cat', (['feats'], {'dim': '(0)'}), '(feats, dim=0)\n', (5685, 5699), False, 'import torch\n'), ((9324, 9347), 'numpy.arccos', 'np.arccos', (['(point[2] / r)'], {}), '(point[2] / r)\n', (9333, 9347), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
quick_plot
9 Oct 2013
<NAME> (dent.earl a gmail.com)
A quick plotting program for creating fast sketches of data.
"""
##############################
# Copyright (C) 2013-2014 by
# <NAME> (<EMAIL>, <EMAIL>)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
##############################
# plotting boilerplate / cargo cult
import matplotlib
matplotlib.use('Agg')
#####
# the param pdf.fonttype allows for text to be editable in Illustrator.
# Use either Output Type 3 (Type3) or Type 42 (TrueType)
matplotlib.rcParams['pdf.fonttype'] = 42
import matplotlib.backends.backend_pdf as pltBack
import matplotlib.lines as lines
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
from matplotlib.ticker import ScalarFormatter, FormatStrFormatter
import numpy
##############################
from argparse import ArgumentParser
import os
from scipy.stats import scoreatpercentile, linregress, gaussian_kde
import sys
import random
COLOR_MAPS = [m for m in plt.cm.datad if not m.endswith("_r")]
class BadInput(Exception):
pass
class Row(object):
""" Class Row holds a single line of a file in split format.
"""
def __init__(self):
self.columns = []
self.line_number = None
class Data(object):
""" Class Data holds data from one file for plotting.
"""
def __init__(self):
self.rows = None # this will be a list of lists.
self.x = None # this will be a numpy array
self.y = None
self.xtick_labels = None
self.label = ''
def process_data(self, args):
if args.mode == 'matrix':
self._create_matrix(args)
i = 1
for r in self.rows:
x = None
y = None
label = None
if len(args.columns) > 1:
# get 2D data
x = self._get_element(r, args.columns[0])
y = self._get_element(r, args.columns[1])
if numpy.isnan(x):
continue
if args.xmin > x:
args.xmin = x
if args.xmax < x:
args.xmax = x
else:
# get just 1D data
y = self._get_element(r, args.columns[0])
# xtick_labels
if args.xtick_label_column is not None:
label = self._get_element(r, args.xtick_label_column, label_mode=True)
if numpy.isnan(y):
continue
if args.ymin > y:
args.ymin = y
if args.ymax < y:
args.ymax = y
if x is None:
if args.mode in ('scatter', 'line'):
x = i
if self.x is None:
self.x = []
self.x.append(x)
i += 1
else:
if self.x is None:
self.x = []
self.x.append(x)
if self.y is None:
self.y = []
self.y.append(y)
if label is not None:
if self.xtick_labels is None:
self.xtick_labels = []
self.xtick_labels.append(label)
# finally, turn the lists into arrays
if x is not None:
self.x = numpy.array(self.x)
self.y = numpy.array(self.y)
def reverse_matrix_rows(self):
""" reverse the matrix row order for matrix plotting.
"""
self.matrix = self.matrix[::-1, :]
def reverse_matrix_cols(self):
""" reverse the matrix column order for matrix plotting.
"""
self.matrix = self.matrix[:, ::-1]
def _get_element(self, row, i, label_mode=False):
""" internal method, retrieve value in row located at index.
if label_mode is True, just return the element at position i.
"""
if label_mode:
return row.columns[i]
try:
x = float(row.columns[i])
except ValueError:
sys.stderr.write(
'Bad input when trying to process file %s at column %d on line %d: %s\n'
% (self.label, i, row.line_number, ' '.join(row.columns)))
raise
return x
def _create_matrix(self, args):
""" inspect self.rows and try to create a single data matrix from all input.
"""
num_rows = len(self.rows)
num_cols = len(self.rows[0].columns)
for i in xrange(0, num_rows):
if len(self.rows[i].columns) > num_cols:
self.matrix = None
# this is an error state, all rows need to have same number of columns
return
self.matrix = numpy.zeros((num_rows, num_cols), dtype=float)
for i in xrange(0, len(self.rows)):
for j in xrange(0, len(self.rows[i].columns)):
try:
self.matrix[i][j] = self._get_element(self.rows[i], j)
except ValueError:
pass
def InitArguments(parser):
""" Initialize arguments for the program.
Args:
parser: an argparse parser object
"""
parser.add_argument('files', nargs='+', help='files to plot')
parser.add_argument('--out', dest='out', default='my_plot',
type=str,
help=('path/filename where figure will be created. No '
'extension needed. default=%(default)s'))
parser.add_argument('--mode', dest='mode', default='line', type=str,
help=('plotting mode. may be in (line, scatter, '
'column, bar, hist, tick, barcode, point, contour, '
'density, matrix) default=%(default)s'))
parser.add_argument('--columns', dest='columns', default=None, type=str,
help=('two numbers, comma separated, can be reverse '
'order, indicates x,y for plotting. 1-based.'))
parser.add_argument('--xtick_label_column', type=int,
help=('for plot modes bar and column, using this will '
'allow for the xtick labels to be shown. 1-based.'))
parser.add_argument('--downsample', default=None, type=int,
help=('Randomly sample only n values from each input. '
'Can help cutdown on runtime and output size '
'for pdfs.'))
parser.add_argument('--colors', dest='colors', default='brewer', type=str,
help=('color palatte mode. may be in (bostock, brewer, '
'mono, hcl_ggplot2) '
'default=%(default)s'))
parser.add_argument('--color_index_offset', dest='color_index_offset',
type=int, default=0,
help=('index offset value to shift the starting point '
'of the selected color map. default=%(default)s'))
parser.add_argument('--alpha', default=1.0, type=float,
help='alpha value for markers in --mode scatter')
parser.add_argument('--dot_size', '--markersize', dest='markersize',
default=2.0, type=float,
help='value for markers in --mode scatter')
parser.add_argument('--marker', dest='marker', default=None, type=str,
help='Marker to use.')
parser.add_argument('--linewidth', dest='linewidth', default=2.0,
type=float,
help='Line width for the plot. default=%(default)s')
parser.add_argument('--logy', dest='is_log_y', default=False,
action='store_true',
help='Put the y-axis into log. default=%(default)s')
parser.add_argument('--logx', dest='is_log_x', default=False,
action='store_true',
help='Put the x-axis into log. default=%(default)s')
parser.add_argument('--title', dest='title', type=str,
default='sentinel_value',
help='Plot title.')
parser.add_argument('--xlabel', dest='xlabel', type=str,
default='sentinel_value',
help='X-axis label.')
parser.add_argument('--ylabel', dest='ylabel', type=str,
default='sentinel_value',
help='Y-axis label.')
parser.add_argument(
'--xmin', dest='user_xmin', default=sys.maxint, type=float,
help='xmin value.')
parser.add_argument(
'--xmax', dest='user_xmax', default=-sys.maxint, type=float,
help='xmax value.')
parser.add_argument(
'--ymin', dest='user_ymin', default=sys.maxint, type=float,
help='ymin value.')
parser.add_argument(
'--ymax', dest='user_ymax', default=-sys.maxint, type=float,
help='ymax value.')
parser.add_argument('--height', dest='height', default=4.0, type=float,
help='height of image, in inches. default=%(default)s')
parser.add_argument('--width', dest='width', default=9.0, type=float,
help='width of image, in inches. default=%(default)s')
parser.add_argument('--dpi', dest='dpi', default=300,
type=int,
help=('dots per inch of raster outputs, i.e. '
'if --outFormat is all or png. '
'default=%(default)s'))
parser.add_argument('--out_format', dest='out_format', default='pdf',
type=str,
help=('output format [pdf|png|eps|all]. '
'default=%(default)s'))
parser.add_argument('--no_legend', dest='is_legend', default=True,
action='store_false',
help=('Turns off the filename / color legend. '
'Helpful for large numbers of files.'))
parser.add_argument('--regression', dest='regression', default=False,
action='store_true',
help='turn on a simple linear regression line')
parser.add_argument('--jitter', dest='jitter', default=False,
action='store_true',
help='turn on jitter for certain plotting modes')
parser.add_argument('--random_seed', dest='random_seed', default=None,
type=int,
help=('Random seed for use with --jitter and '
'--downsample flags.'))
parser.add_argument('--aspect_equal', dest='aspect_equal', default=False,
action='store_true',
help='Turn on equal aspect ratio for the plot')
contour = parser.add_argument_group('contour mode')
contour.add_argument('--contour_bin', dest='contour_bin', default=10,
type=int,
help=('Bin size of the contour plot. Smaller integers '
'lead to smoother curves.'))
contour.add_argument('--contour_logspace', dest='contour_logspace',
default=False, action='store_true',
help=('Switch the contour lines from linear spacing '
'to log spacing'))
contour.add_argument('--contour_num_levels', dest='contour_num_levels',
default=6, type=int,
help=('The number of levels in the contour plot, '
'default=%(default)s'))
density = parser.add_argument_group('density mode')
density.add_argument('--density_covariance', dest='density_covariance',
type=float,
help=('Gaussian kernel density estimate covariance, '
'raising the value leads to smoother curves. '
'This roughly corresponds to bandwidth in R. '
'Default is to discover the value automatically.'))
density.add_argument('--density_num_bins', dest='density_num_bins', type=int,
default=200,
help=('Number of "bins" for the density curve. '
'default=%(default)s'))
matrix = parser.add_argument_group('matrix mode')
matrix.add_argument('--matrix_matshow', default=False, action='store_true',
help=('Switches the drawing call from pcolor() to '
'matshow(). matshow() uses rasters, pcolor() uses '
'vectors. For very large matrices matshow() may be '
'desirable.'))
matrix.add_argument('--matrix_cmap', type=str, default='binary',
help=('The colormap to be used. default=%(default)s. '
'Possible values: ' + '%s' % ', '.join(COLOR_MAPS)))
matrix.add_argument('--matrix_no_colorbar', default=False,
action='store_true',
help='turn off the colorbar.')
matrix.add_argument('--matrix_discritize_colormap', type=int, default=0,
help='Number of bins to discritize colormap')
matrix.add_argument('--matrix_colormap_min', type=float,
help='Lower bound of colormap')
matrix.add_argument('--matrix_colormap_max', type=float,
help='Upper bound of colormap')
def CheckArguments(args, parser):
""" Verify that input arguments are correct and sufficient.
Args:
args: an argparse arguments object
parser: an argparse parser object
"""
args.recognized_modes = ['line', 'scatter', 'bar', 'column', 'hist',
'histogram', 'tick', 'barcode', 'point', 'contour',
'density', 'matrix']
if len(args.files) > 0:
for f in args.files:
if not os.path.exists(f):
parser.error('File %s does not exist.\n' % f)
else:
parser.error('File paths must be passed in on command line!')
if args.dpi < 72:
parser.error('--dpi %d less than screen res, 72. Must be >= 72.'
% args.dpi)
if args.out_format not in ('pdf', 'png', 'eps', 'all'):
parser.error('Unrecognized --out_format %s. Choose one from: '
'pdf png eps all.' % args.out_format)
if args.mode not in args.recognized_modes:
parser.error('Unrecognized --mode %s. Choose one from: %s'
% (args.mode, str(args.recognized_modes)))
if args.mode == 'histogram':
args.mode = 'hist'
if args.mode == 'contour':
if len(args.files) > 1:
parser.error('--mode=contour does not permit more than one file '
'to be plotted at a time.')
if args.colors not in ('bostock', 'brewer', 'mono'):
parser.error('Unrecognized --colors %s palette. Choose one from: '
'bostock brewer mono.' % args.colors)
if (args.out.endswith('.png') or args.out.endswith('.pdf') or
args.out.endswith('.eps')):
args.out = args.out[:-4]
args.xmax = -sys.maxint
args.xmin = sys.maxint
args.ymax = -sys.maxint
args.ymin = sys.maxint
if args.contour_bin < 3:
parser.error('--contour_bin must be greater than 3.')
DefineColors(parser, args)
DefineColumns(parser, args)
if args.xtick_label_column is not None:
args.xtick_label_column -= 1
if args.xtick_label_column in args.columns:
parser.error('--xtick_label_column %d appears in --columns %s.' %
(args.xtick_label_column, str(args.columns)))
if args.random_seed is not None and args.jitter:
numpy.random.seed(seed=args.random_seed)
if args.random_seed is not None and args.downsample:
random.seed(args.random_seed)
if (args.matrix_discritize_colormap == 1 or
args.matrix_discritize_colormap < 0):
parser.error('--matrix_discritize_colormap must be either 0, '
'or greater than 1')
def DefineColumns(parser, args):
""" Based on --columns, define columns to use for plotting.
Args:
args: an argparse arguments object
"""
if args.columns is None:
if args.mode in ('scatter', 'line', 'contour'):
args.columns = '1,2'
else:
args.columns = '1'
columns = args.columns.split(',')
if len(columns) > 2:
parser.error('Too many --columns specified, can only take 2.')
if len(columns) < 1:
parser.error('Too few --columns specified, needs at least 1.')
for i in xrange(0, len(columns)):
try:
x = int(columns[i])
except ValueError:
parser.error('--columns input must be an integer')
if x < 1:
parser.error('--columns input must be a positive non-zero integer')
columns[i] = x
args.columns = []
for i in xrange(0, len(columns)):
args.columns.append(columns[i] - 1)
def DefineColors(parser, args):
""" Based on --colors, define the set of colors to use in the plot.
Args:
args: an argparse arguments object
"""
# TODO: allow for a way to override the color list
if args.colors == 'bostock':
args.colors_light = ['#aec7e8', # l blue
'#ffbb78', # l orange
'#98df8a', # l green
'#ff9896', # l red
'#c5b0d5', # l purple
'#c49c94', # l brown
'#f7b6d2', # l lavender
'#c7c7c7', # l gray
'#dbdb8d', # l olive
'#9edae5', # l aqua
]
args.colors_medium = ['#1f77b4', # d blue
'#ff7f0e', # d orange
'#2ca02c', # d green
'#d62728', # d red
'#9467bd', # d purple
'#8c564b', # d brown
'#e377c2', # d lavender
'#7f7f7f', # d gray
'#bcbd22', # d olive
'#17becf', # d aqua
]
args.colors_dark = []
elif args.colors == 'brewer':
args.colors_light = [(136, 189, 230), # l blue
(251, 178, 88), # l orange
(144, 205, 151), # l green
(246, 170, 201), # l red
(191, 165, 84), # l brown
(188, 153, 199), # l purple
(240, 126, 110), # l magenta
(140, 140, 140), # l grey
(237, 221, 70), # l yellow
]
args.colors_medium = [( 93, 165, 218), # m blue
(250, 164, 58), # m orange
( 96, 189, 104), # m green
(241, 124, 167), # m red
(178, 145, 47), # m brown
(178, 118, 178), # m purple
(241, 88, 84), # m magenta
( 77, 77, 77), # m grey
(222, 207, 63), # m yellow
]
args.colors_dark = [( 38, 93, 171), # d blue
(223, 92, 36), # d orange
( 5, 151, 72), # d green
(229, 18, 111), # d red
(157, 114, 42), # d brown
(123, 58, 150), # d purple
(203, 32, 39), # d magenta
( 0, 0, 0), # black
(199, 180, 46), # d yellow
]
elif args.colors == 'mono':
args.colors_light = [(140, 140, 140), # l grey
]
args.colors_medium = [( 77, 77, 77), # m grey
]
args.colors_dark = [( 0, 0, 0), # black
]
elif args.colors == 'hcl_ggplot2':
args.colors_light = [(158, 217, 255), # l blue
(246, 209, 146), # l mustard
( 93, 237, 189), # l green
(255, 189, 187), # l pink
(182, 228, 149), # l olive
( 51, 235, 236), # l teal
(241, 194, 255), # l purple
(255, 179, 234), # l magenta
]
args.colors_medium = [( 98, 162, 209), # m blue
(190, 154, 87), # m mustard
(223, 133, 131), # m pink
( 0, 183, 134), # m green
(126, 173, 90), # m olive
( 0, 180, 181), # m teal
(187, 134, 209), # m purple
(225, 122, 179), # m magenta
]
args.colors_dark = [( 0, 163, 255), # d blue
(213, 151, 0), # d mustard
( 0, 201, 106), # d green
(254, 102, 97), # d pink
( 98, 183, 0), # d olive
( 1, 196, 200), # d teal
(219, 95, 255), # d purple
(255, 40, 201), # d magenta
]
if isinstance(args.colors_light[0], tuple):
CorrectColorTuples(args)
if args.matrix_cmap not in COLOR_MAPS:
parser.error('--cmap %s not a valid option. Pick from %s'
% (args.matrix_cmap, ', '.join(COLOR_MAPS)))
def CorrectColorTuples(args):
""" Corrects the 0-255 values in colors_light and colors_medium to 0.0 - 1.0.
Args:
args: an argparse arguments object
"""
for i in xrange(0, len(args.colors_light)):
args.colors_light[i] = (args.colors_light[i][0] / 255.0,
args.colors_light[i][1] / 255.0,
args.colors_light[i][2] / 255.0,)
for i in xrange(0, len(args.colors_medium)):
args.colors_medium[i] = (args.colors_medium[i][0] / 255.0,
args.colors_medium[i][1] / 255.0,
args.colors_medium[i][2] / 255.0,)
for i in xrange(0, len(args.colors_dark)):
args.colors_dark[i] = (args.colors_dark[i][0] / 255.0,
args.colors_dark[i][1] / 255.0,
args.colors_dark[i][2] / 255.0,)
def InitImage(args):
""" Initialize a new image.
Args:
args: an argparse arguments object
Returns:
fig: a matplotlib figure object
pdf: a matplotlib pdf drawing (backend) object
"""
pdf = None
if args.out_format == 'pdf' or args.out_format == 'all':
pdf = pltBack.PdfPages(args.out + '.pdf')
fig = plt.figure(figsize=(args.width, args.height),
dpi=args.dpi, facecolor='w')
return (fig, pdf)
def EstablishAxes(fig, args):
""" Create a single axis on the figure object.
Args:
fig: a matplotlib figure object
args: an argparse arguments object
Returns:
ax: a matplotlib axis object
Raises:
ValueError: If an unknown spine location is passed.
"""
# left 0.99 inches, right 0.54 inches, width 7.47 inches
# bottom 0.68 inches, top 0.28 inches, height 3.04 inches
args.axLeft = 0.99 / args.width
args.axRight = 1.0 - (0.54 / args.width)
args.axWidth = args.axRight - args.axLeft
args.axBottom = 0.68 / args.height
args.axTop = 1.0 - (0.28 / args.height)
args.axHeight = args.axTop - args.axBottom
ax = fig.add_axes([args.axLeft, args.axBottom,
args.axWidth, args.axHeight])
ax.yaxis.set_major_locator(pylab.NullLocator())
ax.xaxis.set_major_locator(pylab.NullLocator())
for loc, spine in ax.spines.iteritems():
if loc in ['left', 'bottom']:
spine.set_position(('outward', 10))
elif loc in ['right', 'top']:
spine.set_color('none')
else:
raise ValueError('unknown spine location: %s' % loc)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
return ax
def WriteImage(fig, pdf, args):
""" Write the image to disk.
Args:
fig: a matplotlib figure object
pdf: a matplotlib pdf drawing (backend) object
args: an argparse arguments object
"""
if args.out_format == 'pdf':
fig.savefig(pdf, format = 'pdf')
pdf.close()
elif args.out_format == 'png':
fig.savefig(args.out + '.png', format='png', dpi=args.dpi)
elif args.out_format == 'all':
fig.savefig(pdf, format='pdf')
pdf.close()
fig.savefig(args.out + '.png', format='png', dpi=args.dpi)
fig.savefig(args.out + '.eps', format='eps')
elif args.out_format == 'eps':
fig.savefig(args.out + '.eps', format='eps')
def ColorPicker(i, args):
""" Returns a valid matplotlib color based on the index, plot mode and palette.
Args:
i: index, integer
args: an argparse arguments object
Returns:
color: a valid matplotlib color, or a list of colors if mode is hist
or a name of a valid matplotlib colormap or a matplotlib color map
if the mode is contour
"""
i += args.color_index_offset
if args.mode in ('column', 'bar'):
return args.colors_light[i % len(args.colors_light)]
elif args.mode in ('hist'):
# hist requires a list of colors be returned
colors = []
for i in xrange(0, i):
colors.append(args.colors_light[i % len(args.colors_light)])
return colors
elif args.mode in ('contour'):
colors = 'k'
return colors
elif args.mode in ('line', 'scatter', 'tick', 'point', 'barcode', 'density'):
return args.colors_medium[i % len(args.colors_medium)]
def PlotDensity(data_list, ax, args):
""" Plot one dimensional data as density curves.
Args:
data_list: a list of Data objects
ax: a matplotlib axis object
args: an argparse arguments object
"""
# first create density list, then pass that to PlotLineScatter
density_list = []
for data in data_list:
d = Data()
d.label = data.label
density = gaussian_kde(data.y)
x_data = numpy.linspace(numpy.min(data.y),
numpy.max(data.y),
args.density_num_bins)
if args.density_covariance is not None:
density.covariance_factor = lambda : args.density_covariance
density._compute_covariance() # bad mojo calling privates like this
d.x = numpy.array(x_data)
d.y = numpy.array(density(x_data))
density_list.append(d)
PlotLineScatter(density_list, ax, args)
def HandleColormapLimits(data, args):
""" Return appropriate values for upper and lower bound colorbar limits.
Args:
data: a Data object
args: an argparse arguments object
Returns:
lb: a lower bound value for the map
ub: an upper bound value for the map
"""
if args.matrix_colormap_min is not None:
lb = args.matrix_colormap_min
else:
lb = numpy.min(numpy.min(data.matrix))
if args.matrix_colormap_max is not None:
ub = args.matrix_colormap_max
else:
ub = numpy.max(numpy.max(data.matrix))
return lb, ub
def PlotMatrix(data_list, ax, args):
""" Plot a matrix as a 2D matrix.
Args:
data_list: a list of Data objects
ax: a matplotlib axis object
args: an argparse arguments object
"""
if len(data_list) > 1:
raise BadInput('You cannot create a contour plot with more '
'than one input file')
data = data_list[0]
data.reverse_matrix_rows()
data.reverse_matrix_cols()
cmap = plt.get_cmap(args.matrix_cmap)
cmap_lb, cmap_ub = HandleColormapLimits(data, args)
if args.matrix_discritize_colormap:
cmap_list = [cmap(i) for i in range(cmap.N)]
cmap = cmap.from_list('Discritized', cmap_list, cmap.N)
bounds = numpy.linspace(cmap_lb, cmap_ub,
args.matrix_discritize_colormap + 1)
norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N)
else:
norm = matplotlib.colors.Normalize(vmin=cmap_lb, vmax=cmap_ub)
if args.matrix_matshow:
plt.matshow(data.matrix, fignum=False, origin='upper',
cmap=cmap, norm=norm)
else:
plt.pcolor(data.matrix, cmap=cmap, norm=norm)
if not args.matrix_no_colorbar:
cb = plt.colorbar()
cb.outline.set_linewidth(0)
if not args.matrix_discritize_colormap:
cb.set_clim(cmap_lb, cmap_ub) # colormap limits for continuous
ax.xaxis.set_ticks_position('none')
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks_position('none')
ax.yaxis.set_ticks([])
plt.box(on=False)
def PlotTwoDimension(data_list, ax, args):
""" Plot two dimensional data.
Args:
data_list: a list of Data objects
ax: a matplotlib axis object
args: an argparse arguments object
"""
if args.mode in ('scatter', 'line'):
PlotLineScatter(data_list, ax, args)
elif args.mode == 'contour':
PlotContour(data_list, ax, args)
elif args.mode == 'density':
PlotDensity(data_list, ax, args)
elif args.mode == 'matrix':
PlotMatrix(data_list, ax, args)
def PlotContour(data_list, ax, args):
""" Plot two dimensional density contour.
Args:
data_list: a list of Data objects
ax: a matplotlib axis object
args: an argparse arguments object
"""
if len(data_list) > 1:
raise BadInput('You cannot create a contour plot with more '
'than one input file')
data = data_list[0]
x = data.x
y = data.y
H, xedges, yedges = numpy.histogram2d(
x, y, range=[[min(x), max(x)], [min(y), max(y)]],
bins=(args.contour_bin, args.contour_bin))
extent = [yedges[0], yedges[-1], xedges[0], xedges[-1]]
c_max = max(map(max, H))
c_min = min(map(min, H))
nc_levels = args.contour_num_levels
if args.contour_logspace:
c_levels = numpy.logspace(c_min, c_max, nc_levels)
else:
c_levels = numpy.linspace(c_min, c_max, nc_levels)
im = plt.imshow(H, interpolation='bilinear', origin='lower',
cmap=matplotlib.cm.binary, extent=extent)
c_set = plt.contour(H, extent=extent, origin='lower',
levels=c_levels, colors=ColorPicker(0, args))
plt.clabel(c_set, colors='red', inline=True, fmt='%1.0i',
rightside_up=True)
# for c in c_set.collections:
# c.set_linestyle('solid')
def PlotLineScatter(data_list, ax, args):
""" Plot two dimensional line or scatter data.
Args:
data_list: a list of Data objects
ax: a matplotlib axis object
args: an argparse arguments object
"""
if args.mode == 'scatter':
args.linewidth = 0.0
alpha = args.alpha
if args.marker is None:
marker = 'o'
else:
marker = args.marker
else:
marker = args.marker
alpha = 1.0
args.xmin = min(map(min, map(lambda data: data.x, data_list)))
args.xmax = max(map(max, map(lambda data: data.x, data_list)))
args.ymin = min(map(min, map(lambda data: data.y, data_list)))
args.ymax = max(map(max, map(lambda data: data.y, data_list)))
for i, data in enumerate(data_list, 0):
ax.add_line(
lines.Line2D(xdata=data.x,
ydata=data.y,
color=ColorPicker(i, args),
marker=marker,
markersize=args.markersize,
markerfacecolor=ColorPicker(i, args),
markeredgecolor='None',
alpha=alpha,
linewidth=args.linewidth))
if args.regression:
rxlist = numpy.array(data.x)
rylist = numpy.array(data.y)
A = numpy.array([rxlist, numpy.ones(len(rxlist))])
try:
w = numpy.linalg.lstsq(A.T, rylist)[0]
except ValueError:
sys.stderr.write('Warning, unable to perform regression!\n')
return
sorted_rxlist = numpy.array(sorted(rxlist))
fitline = w[0] * sorted_rxlist + w[1]
ax.add_line(
lines.Line2D(xdata=[numpy.min(rxlist), numpy.max(rxlist)],
ydata=[fitline[0], fitline[-1]],
color='red',
linestyle='--'))
slope, intercept, r_value, p_value, stderr = linregress(rxlist, rylist)
if w[1] > 0:
op = '+'
else:
op = '-'
w[1] = abs(w[1])
ax.text(x=(rxlist[0] + rxlist[-1]) / 2.0,
y=(fitline[0] + fitline[-1]) / 2.0,
s=('%f * x %s %f,\n$r^2$=%f, $p$=%f'
% (w[0], op, w[1], r_value * r_value, p_value)),
verticalalignment='bottom',
horizontalalignment='center')
def ReadFiles(args):
""" Read and parse all input files.
Args:
args: an argparse arguments object
Returns:
data_list: a list of numpy arrays of dimension (n by c) where
n is the number of observations and c is the number of columns.
"""
data_list = []
for a_file in args.files:
num_columns = None
f = open(a_file, 'r')
rows = []
line_number = 0
for line in f:
line_number += 1
line = line.strip()
if line.startswith('#'):
continue
r = Row()
r.columns = line.split()
r.line_number = line_number
if num_columns is None:
num_columns = len(r.columns)
if num_columns < max(args.columns):
raise BadInput('Input file %s has only %d columns, you requested a '
'column, %d, which is out of bounds.'
% (a_file, num_columns, max(args.columns)))
else:
if num_columns != len(r.columns):
raise BadInput('Input file %s had %d columns, switches to %d '
'columns on line %d:\n%s\n'
% (a_file, num_columns, len(r.columns),
line_number, line))
rows.append(r)
f.close()
d = Data()
d.label = os.path.basename(a_file)
if args.downsample:
if len(rows) > args.downsample:
rows = random.sample(rows, args.downsample)
d.rows = rows
d.process_data(args)
data_list.append(d)
return data_list
def PlotOneDimension(data_list, ax, args):
""" Plot one dimensional data.
Args:
data_list: a list of Data objects.
ax: a matplotlib axis object.
args: an argparse arguments object.
"""
if args.mode == 'bar' or args.mode == 'column':
PlotColumns(data_list, ax, args)
elif args.mode == 'hist':
PlotHistogram(data_list, ax, args)
elif args.mode == 'tick' or args.mode == 'barcode':
PlotTicks(data_list, ax, args)
elif args.mode == 'point':
PlotPoints(data_list, ax, args)
def PlotHistogram(data_list, ax, args):
""" Plot one dimensional data as histogram.
Args:
data_list: a list of Data objects.
ax: a matplotlib axis object.
args: an argparse arguments object.
"""
width = 2.0 / 3.0 / len(data_list)
datas = []
for data in data_list:
datas.append(data.y)
n, bins, patch_groups = ax.hist(
datas, color=ColorPicker(len(data_list), args), histtype='bar')
for pg in patch_groups:
if isinstance(pg, matplotlib.container.BarContainer):
# if there are multiple files, pg will be a BarContainer
for patch in pg.patches:
patch.set_edgecolor('none')
else:
# if there is only one file to plot, pg is a Rectangle
pg.set_edgecolor('white')
def PlotColumns(data_list, ax, args):
""" Plot one dimensional data as column / bar plot.
Args:
data_list: a list of Data objects.
ax: a matplotlib axis object.
args: an argparse arguments object.
"""
width = 2.0 / 3.0 / len(data_list)
data_min = min(map(numpy.min, map(lambda x: x.y, data_list)))
data_max = max(map(numpy.max, map(lambda x: x.y, data_list)))
args.xmin = 0
args.xmax = max(map(len, map(lambda data: data.y, data_list)))
for i, data in enumerate(data_list, 0):
data.x = range(0, len(data.y))
data.x = numpy.add(data.x, width * i) # offset
rects = ax.bar(data.x,
data.y,
width,
color=ColorPicker(i, args),
linewidth=0.0,
alpha=1.0)
ax.xaxis.set_ticklabels([])
xmin, xmax, ymin, ymax = ax.axis()
xmin, xmax = HandleLimits(xmin, xmax, args.user_xmin, args.user_xmax)
ymin, ymax = HandleLimits(min(0.0, data_min), ymax,
args.user_ymin, args.user_ymax)
args.ymin = ymin
args.ymax = ymax
ax.set_ylim([ymin, ymax])
if args.xtick_label_column is not None:
ax.xaxis.set_ticks(numpy.arange(0, len(data.xtick_labels)) + width / 2.)
ax.xaxis.set_ticklabels(data.xtick_labels, rotation=35,
horizontalalignment='right')
args.xmin = xmin
args.xmax = xmax
ax.set_xlim([xmin, xmax])
def GetTickYValues(i, args):
""" Produce the lower and upper y values for a Tick plot.
Args:
i: Integer offset of this set of values.
args: an argparse arguments object.
Returns:
y0, y1: the lower and upper y values for a Tick Plot
"""
if args.jitter:
lo = numpy.random.uniform(low=0.0, high=0.3)
return i + lo, i + lo + 0.1
else:
return i, i + 0.8
def HandleLimits(data_min, data_max, user_min, user_max):
""" Decides whether to use the data values or user supplied values.
Args:
data_min: minimum value from the data
data_max: maximum value from the data
user_min: possibly a user requested value for min
user_max: possibly a user requested value for max
Returns:
a_min: the correct minimum
a_max: the correct maximum
"""
a_min, a_max = data_min, data_max
if user_min != sys.maxint:
a_min = user_min
if user_max != -sys.maxint:
a_max = user_max
return a_min, a_max
def PlotTicks(data_list, ax, args):
""" Plot one dimensional data as tick marks on a line.
Args:
data_list: a list of Data objects
ax: a matplotlib axis object
args: an argparse arguments object
"""
data_min = min(map(numpy.min, map(lambda x: x.y, data_list)))
data_max = max(map(numpy.max, map(lambda x: x.y, data_list)))
data_range = data_max - data_min
if data_range == 0.0:
data_min, data_max, data_range = -0.5, 0.5, 1.0
data_min -= data_range * 0.1
data_max += data_range * 0.1
for i, data in enumerate(data_list, 0):
for d in data.y:
y0, y1 = GetTickYValues(i, args)
ax.add_line(
lines.Line2D(xdata=[d, d],
ydata=[y0, y1],
color=ColorPicker(i, args),
marker=None,
markersize=args.markersize,
markerfacecolor=ColorPicker(i, args),
markeredgecolor='None',
alpha=args.alpha,
linewidth=args.linewidth))
ymin, ymax = HandleLimits(0.0, len(data_list),
args.user_ymin, args.user_ymax)
ax.set_ylim([ymin, ymax])
xmin, xmax = HandleLimits(data_min, data_max,
args.user_xmin, args.user_xmax)
ax.set_xlim([xmin, xmax])
ax.yaxis.set_ticks_position('none')
ax.yaxis.set_ticks([])
def GetPointYValues(n, i, args):
""" Produce the y values for a Point plot.
Args:
n: number of values to produce.
i: Integer offset of this set of values.
args: an argparse arguments object.
Returns:
y: a list of y values for a Point plot.
"""
if args.jitter:
return numpy.random.uniform(low=i, high=i + 0.5, size=n)
else:
return [i] * n
def PlotPoints(data_list, ax, args):
""" Plot one dimensional data as points on a line.
Args:
data_list: a list of Data objects
ax: a matplotlib axis object
args: an argparse arguments object
"""
data_min = min(map(numpy.min, map(lambda x: x.y, data_list)))
data_max = max(map(numpy.max, map(lambda x: x.y, data_list)))
data_range = data_max - data_min
if data_range == 0.0:
data_min, data_max, data_range = -0.5, 0.5, 1.0
data_min -= data_range * 0.1
data_max += data_range * 0.1
for i, data in enumerate(data_list, 0):
data.x = GetPointYValues(len(data.y), i, args)
ax.add_line(
lines.Line2D(xdata=data.y,
ydata=data.x,
color=ColorPicker(i, args),
marker='o',
markersize=args.markersize,
markerfacecolor=ColorPicker(i, args),
markeredgecolor='None',
alpha=args.alpha,
linewidth=0.0))
ymin, ymax = HandleLimits(-0.5, len(data_list),
args.user_ymin, args.user_ymax)
ax.set_ylim([-0.5, len(data_list)])
xmin, xmax = HandleLimits(data_min, data_max,
args.user_xmin, args.user_xmax)
ax.set_xlim([data_min, data_max])
ax.yaxis.set_ticks_position('none')
ax.yaxis.set_ticks([])
def PlotData(data_list, ax, args):
""" Plot all of the data according to input arguments.
Args:
data_list: a list of Data objects.
ax: a matplotlib axis object.
args: an argparse argument object.
"""
if args.mode in ('scatter', 'line', 'contour', 'density', 'matrix'):
PlotTwoDimension(data_list, ax, args)
elif args.mode in ('bar', 'column', 'hist', 'tick', 'barcode', 'point'):
PlotOneDimension(data_list, ax, args)
def MakeProxyPlots(args):
""" Make some proxy plots for use with legends.
Proxy plots are plots that are not actually drawn but whose
colors are used for correctly populating a legend.
Args:
args: an argparse argument object.
Returns:
proxy_plots: A list of matplotlib plot objects.
"""
if args.mode != 'hist':
proxy_plots = []
for i, afile in enumerate(args.files, 0):
proxy_plots.append(
plt.Rectangle(
(0, 0), 1, 1,
fc=ColorPicker(i, args),
ec=ColorPicker(i, args)))
else:
proxy_plots = []
for i, afile in enumerate(args.files, 0):
proxy_plots.append(
plt.Rectangle(
(0, 0), 1, 1,
fc=ColorPicker(len(args.files), args)[i],
ec=ColorPicker(len(args.files), args)[i]))
return proxy_plots
def MakeLegendLabels(args):
""" Make labels for use with legends.
Args:
args: an argparse argument object
Returns:
legend_labels: A list of strings.
"""
legend_labels = []
for afile in args.files:
legend_labels.append(os.path.basename(afile))
return legend_labels
def CleanAxis(ax, args):
""" Clean the axis up, apply scales, add legend.
Args:
ax: a matplotlib axis object
args: an argparse argument object
"""
# y axis
if args.is_log_y:
ax.set_yscale('log')
else:
if args.mode not in ('hist', 'tick', 'barcode', 'point', 'bar', 'column'):
arange = args.ymax - args.ymin
ymin, ymax = HandleLimits(args.ymin - arange * 0.05,
args.ymax + arange * 0.05,
args.user_ymin, args.user_ymax)
ax.set_ylim([ymin, ymax])
# x axis
if args.is_log_x:
ax.set_xscale('log')
else:
if args.mode not in ('hist', 'tick', 'barcode', 'point', 'bar', 'column'):
arange = args.xmax - args.xmin
xmin, xmax = HandleLimits(args.xmin - arange * 0.05,
args.xmax + arange * 0.05,
args.user_xmin, args.user_xmax)
ax.set_xlim([xmin, xmax])
# labels
if args.xlabel != 'sentinel_value':
ax.set_xlabel(args.xlabel)
if args.ylabel != 'sentinel_value':
if args.mode in ('tick', 'barcode', 'point'):
sys.stderr.write(
'Warning, --ylabel specified while '
'--mode=(tick, barcode or point), ylabel not displayed\n')
else:
ax.set_ylabel(args.ylabel)
if args.title != 'sentinel_value':
ax.set_title(args.title)
# legend
if args.is_legend:
if args.mode not in ['contour', 'matrix']:
proxy_plots = MakeProxyPlots(args)
legend_labels = MakeLegendLabels(args)
leg = plt.legend(proxy_plots, legend_labels, 'upper right', numpoints=1)
leg._drawFrame = False
# aspect ratio
if args.aspect_equal or args.mode == 'matrix':
ax.axis('equal')
def main():
usage = '%(prog)s file1 file2 file3... [options]\n\n'
description = ('%(prog)s is a tool to produce quick plots. col1 '
'of input file is x value col2 is y value. If '
'the --mode is column/bar/hist then only col1 is '
'used.')
parser = ArgumentParser(usage=usage, description=description)
InitArguments(parser)
args = parser.parse_args()
CheckArguments(args, parser)
fig, pdf = InitImage(args)
ax = EstablishAxes(fig, args)
data_list = ReadFiles(args)
PlotData(data_list, ax, args)
CleanAxis(ax, args)
WriteImage(fig, pdf, args)
if __name__ == '__main__':
main()
| [
"matplotlib.backends.backend_pdf.PdfPages",
"numpy.random.seed",
"argparse.ArgumentParser",
"random.sample",
"numpy.logspace",
"matplotlib.pyplot.box",
"numpy.isnan",
"matplotlib.pyplot.figure",
"matplotlib.colors.Normalize",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.matshow",
"os.path.ex... | [((1376, 1397), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (1390, 1397), False, 'import matplotlib\n'), ((23013, 23087), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(args.width, args.height)', 'dpi': 'args.dpi', 'facecolor': '"""w"""'}), "(figsize=(args.width, args.height), dpi=args.dpi, facecolor='w')\n", (23023, 23087), True, 'import matplotlib.pyplot as plt\n'), ((27749, 27779), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['args.matrix_cmap'], {}), '(args.matrix_cmap)\n', (27761, 27779), True, 'import matplotlib.pyplot as plt\n'), ((28738, 28755), 'matplotlib.pyplot.box', 'plt.box', ([], {'on': '(False)'}), '(on=False)\n', (28745, 28755), True, 'import matplotlib.pyplot as plt\n'), ((30075, 30177), 'matplotlib.pyplot.imshow', 'plt.imshow', (['H'], {'interpolation': '"""bilinear"""', 'origin': '"""lower"""', 'cmap': 'matplotlib.cm.binary', 'extent': 'extent'}), "(H, interpolation='bilinear', origin='lower', cmap=matplotlib.cm.\n binary, extent=extent)\n", (30085, 30177), True, 'import matplotlib.pyplot as plt\n'), ((30317, 30393), 'matplotlib.pyplot.clabel', 'plt.clabel', (['c_set'], {'colors': '"""red"""', 'inline': '(True)', 'fmt': '"""%1.0i"""', 'rightside_up': '(True)'}), "(c_set, colors='red', inline=True, fmt='%1.0i', rightside_up=True)\n", (30327, 30393), True, 'import matplotlib.pyplot as plt\n'), ((44494, 44546), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'usage': 'usage', 'description': 'description'}), '(usage=usage, description=description)\n', (44508, 44546), False, 'from argparse import ArgumentParser\n'), ((3976, 3995), 'numpy.array', 'numpy.array', (['self.y'], {}), '(self.y)\n', (3987, 3995), False, 'import numpy\n'), ((15928, 15968), 'numpy.random.seed', 'numpy.random.seed', ([], {'seed': 'args.random_seed'}), '(seed=args.random_seed)\n', (15945, 15968), False, 'import numpy\n'), ((16028, 16057), 'random.seed', 'random.seed', (['args.random_seed'], {}), '(args.random_seed)\n', (16039, 16057), False, 'import random\n'), ((22969, 23004), 'matplotlib.backends.backend_pdf.PdfPages', 'pltBack.PdfPages', (["(args.out + '.pdf')"], {}), "(args.out + '.pdf')\n", (22985, 23004), True, 'import matplotlib.backends.backend_pdf as pltBack\n'), ((23902, 23921), 'matplotlib.pylab.NullLocator', 'pylab.NullLocator', ([], {}), '()\n', (23919, 23921), True, 'import matplotlib.pylab as pylab\n'), ((23952, 23971), 'matplotlib.pylab.NullLocator', 'pylab.NullLocator', ([], {}), '()\n', (23969, 23971), True, 'import matplotlib.pylab as pylab\n'), ((26281, 26301), 'scipy.stats.gaussian_kde', 'gaussian_kde', (['data.y'], {}), '(data.y)\n', (26293, 26301), False, 'from scipy.stats import scoreatpercentile, linregress, gaussian_kde\n'), ((26643, 26662), 'numpy.array', 'numpy.array', (['x_data'], {}), '(x_data)\n', (26654, 26662), False, 'import numpy\n'), ((27994, 28063), 'numpy.linspace', 'numpy.linspace', (['cmap_lb', 'cmap_ub', '(args.matrix_discritize_colormap + 1)'], {}), '(cmap_lb, cmap_ub, args.matrix_discritize_colormap + 1)\n', (28008, 28063), False, 'import numpy\n'), ((28103, 28149), 'matplotlib.colors.BoundaryNorm', 'matplotlib.colors.BoundaryNorm', (['bounds', 'cmap.N'], {}), '(bounds, cmap.N)\n', (28133, 28149), False, 'import matplotlib\n'), ((28169, 28224), 'matplotlib.colors.Normalize', 'matplotlib.colors.Normalize', ([], {'vmin': 'cmap_lb', 'vmax': 'cmap_ub'}), '(vmin=cmap_lb, vmax=cmap_ub)\n', (28196, 28224), False, 'import matplotlib\n'), ((28255, 28331), 'matplotlib.pyplot.matshow', 'plt.matshow', (['data.matrix'], {'fignum': '(False)', 'origin': '"""upper"""', 'cmap': 'cmap', 'norm': 'norm'}), "(data.matrix, fignum=False, origin='upper', cmap=cmap, norm=norm)\n", (28266, 28331), True, 'import matplotlib.pyplot as plt\n'), ((28360, 28405), 'matplotlib.pyplot.pcolor', 'plt.pcolor', (['data.matrix'], {'cmap': 'cmap', 'norm': 'norm'}), '(data.matrix, cmap=cmap, norm=norm)\n', (28370, 28405), True, 'import matplotlib.pyplot as plt\n'), ((28449, 28463), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (28461, 28463), True, 'import matplotlib.pyplot as plt\n'), ((29965, 30004), 'numpy.logspace', 'numpy.logspace', (['c_min', 'c_max', 'nc_levels'], {}), '(c_min, c_max, nc_levels)\n', (29979, 30004), False, 'import numpy\n'), ((30028, 30067), 'numpy.linspace', 'numpy.linspace', (['c_min', 'c_max', 'nc_levels'], {}), '(c_min, c_max, nc_levels)\n', (30042, 30067), False, 'import numpy\n'), ((33950, 33974), 'os.path.basename', 'os.path.basename', (['a_file'], {}), '(a_file)\n', (33966, 33974), False, 'import os\n'), ((35983, 36011), 'numpy.add', 'numpy.add', (['data.x', '(width * i)'], {}), '(data.x, width * i)\n', (35992, 36011), False, 'import numpy\n'), ((37123, 37162), 'numpy.random.uniform', 'numpy.random.uniform', ([], {'low': '(0.0)', 'high': '(0.3)'}), '(low=0.0, high=0.3)\n', (37143, 37162), False, 'import numpy\n'), ((39469, 39518), 'numpy.random.uniform', 'numpy.random.uniform', ([], {'low': 'i', 'high': '(i + 0.5)', 'size': 'n'}), '(low=i, high=i + 0.5, size=n)\n', (39489, 39518), False, 'import numpy\n'), ((3268, 3282), 'numpy.isnan', 'numpy.isnan', (['y'], {}), '(y)\n', (3279, 3282), False, 'import numpy\n'), ((3943, 3962), 'numpy.array', 'numpy.array', (['self.x'], {}), '(self.x)\n', (3954, 3962), False, 'import numpy\n'), ((5190, 5236), 'numpy.zeros', 'numpy.zeros', (['(num_rows, num_cols)'], {'dtype': 'float'}), '((num_rows, num_cols), dtype=float)\n', (5201, 5236), False, 'import numpy\n'), ((26330, 26347), 'numpy.min', 'numpy.min', (['data.y'], {}), '(data.y)\n', (26339, 26347), False, 'import numpy\n'), ((26377, 26394), 'numpy.max', 'numpy.max', (['data.y'], {}), '(data.y)\n', (26386, 26394), False, 'import numpy\n'), ((27160, 27182), 'numpy.min', 'numpy.min', (['data.matrix'], {}), '(data.matrix)\n', (27169, 27182), False, 'import numpy\n'), ((27288, 27310), 'numpy.max', 'numpy.max', (['data.matrix'], {}), '(data.matrix)\n', (27297, 27310), False, 'import numpy\n'), ((31628, 31647), 'numpy.array', 'numpy.array', (['data.x'], {}), '(data.x)\n', (31639, 31647), False, 'import numpy\n'), ((31663, 31682), 'numpy.array', 'numpy.array', (['data.y'], {}), '(data.y)\n', (31674, 31682), False, 'import numpy\n'), ((32265, 32291), 'scipy.stats.linregress', 'linregress', (['rxlist', 'rylist'], {}), '(rxlist, rylist)\n', (32275, 32291), False, 'from scipy.stats import scoreatpercentile, linregress, gaussian_kde\n'), ((42413, 42436), 'os.path.basename', 'os.path.basename', (['afile'], {}), '(afile)\n', (42429, 42436), False, 'import os\n'), ((43587, 43709), 'sys.stderr.write', 'sys.stderr.write', (['"""Warning, --ylabel specified while --mode=(tick, barcode or point), ylabel not displayed\n"""'], {}), '(\n """Warning, --ylabel specified while --mode=(tick, barcode or point), ylabel not displayed\n"""\n )\n', (43603, 43709), False, 'import sys\n'), ((44003, 44069), 'matplotlib.pyplot.legend', 'plt.legend', (['proxy_plots', 'legend_labels', '"""upper right"""'], {'numpoints': '(1)'}), "(proxy_plots, legend_labels, 'upper right', numpoints=1)\n", (44013, 44069), True, 'import matplotlib.pyplot as plt\n'), ((2887, 2901), 'numpy.isnan', 'numpy.isnan', (['x'], {}), '(x)\n', (2898, 2901), False, 'import numpy\n'), ((14216, 14233), 'os.path.exists', 'os.path.exists', (['f'], {}), '(f)\n', (14230, 14233), False, 'import os\n'), ((34052, 34088), 'random.sample', 'random.sample', (['rows', 'args.downsample'], {}), '(rows, args.downsample)\n', (34065, 34088), False, 'import random\n'), ((31763, 31794), 'numpy.linalg.lstsq', 'numpy.linalg.lstsq', (['A.T', 'rylist'], {}), '(A.T, rylist)\n', (31781, 31794), False, 'import numpy\n'), ((31831, 31891), 'sys.stderr.write', 'sys.stderr.write', (['"""Warning, unable to perform regression!\n"""'], {}), "('Warning, unable to perform regression!\\n')\n", (31847, 31891), False, 'import sys\n'), ((32049, 32066), 'numpy.min', 'numpy.min', (['rxlist'], {}), '(rxlist)\n', (32058, 32066), False, 'import numpy\n'), ((32068, 32085), 'numpy.max', 'numpy.max', (['rxlist'], {}), '(rxlist)\n', (32077, 32085), False, 'import numpy\n')] |
import torch
import numpy as np
import os
import time
def search(dataset, batch_size=1, shuffle=False, sampler=None,
batch_sampler=None, collate_fn=None,
pin_memory=False, drop_last=False, timeout=0,
worker_init_fn=None, *, prefetch_factor=2,
persistent_workers=False, threshold=5.):
'''
threshold(float): Threshold for measuring the new optimum, to only focus on significant changes. unit is second.
'''
max_num_workers = os.cpu_count() #refer to https://github.com/pytorch/pytorch/blob/master/torch/utils/data/dataloader.py
init_num_workers = max_num_workers // 2
num_workers_list = [init_num_workers, 0, 1]
num_workers_list += list(np.arange(start= 2, stop=init_num_workers + 1)[::-1])
num_workers_list += list(np.arange(init_num_workers + 1, max_num_workers + 1))
num_workers_list = np.array(num_workers_list)
# input [1, 0, 1, 2, 3], output [1, 0, 2, 3]
_, order_preserved_indexes = np.unique(num_workers_list, return_index=True)
num_workers_list = num_workers_list[np.sort(order_preserved_indexes)]
optimal_num_worker = 0
min_total_time = np.finfo(np.float).max
skip = np.zeros(len(num_workers_list))
for i, num_workers in enumerate(num_workers_list): # [0, max_num_workers]
if skip[i]:
continue
loader = torch.utils.data.DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=shuffle,
sampler=sampler,
batch_sampler=batch_sampler,
num_workers=num_workers,
collate_fn=collate_fn,
pin_memory=pin_memory,
drop_last=drop_last,
timeout=timeout,
worker_init_fn=worker_init_fn,
prefetch_factor=prefetch_factor,
persistent_workers=persistent_workers)
t1 = time.time()
total_time = 0
for _ in loader:
t2 = time.time()
total_time += t2 - t1
if total_time > min_total_time:
break
t1 = time.time()
if total_time < min_total_time:
optimal_num_worker = num_workers
if min_total_time - total_time < threshold:
break
min_total_time = total_time
else: # total_time >= min_total_time
if num_workers == 0:
skip[num_workers_list == 1] = 1
elif num_workers >= 2 and num_workers < optimal_num_worker:
skip[num_workers_list < optimal_num_worker] = 1
else:
break
return optimal_num_worker
| [
"torch.utils.data.DataLoader",
"time.time",
"os.cpu_count",
"numpy.sort",
"numpy.finfo",
"numpy.array",
"numpy.arange",
"numpy.unique"
] | [((494, 508), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (506, 508), False, 'import os\n'), ((885, 911), 'numpy.array', 'np.array', (['num_workers_list'], {}), '(num_workers_list)\n', (893, 911), True, 'import numpy as np\n'), ((999, 1045), 'numpy.unique', 'np.unique', (['num_workers_list'], {'return_index': '(True)'}), '(num_workers_list, return_index=True)\n', (1008, 1045), True, 'import numpy as np\n'), ((807, 859), 'numpy.arange', 'np.arange', (['(init_num_workers + 1)', '(max_num_workers + 1)'], {}), '(init_num_workers + 1, max_num_workers + 1)\n', (816, 859), True, 'import numpy as np\n'), ((1087, 1119), 'numpy.sort', 'np.sort', (['order_preserved_indexes'], {}), '(order_preserved_indexes)\n', (1094, 1119), True, 'import numpy as np\n'), ((1170, 1188), 'numpy.finfo', 'np.finfo', (['np.float'], {}), '(np.float)\n', (1178, 1188), True, 'import numpy as np\n'), ((1391, 1752), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', ([], {'dataset': 'dataset', 'batch_size': 'batch_size', 'shuffle': 'shuffle', 'sampler': 'sampler', 'batch_sampler': 'batch_sampler', 'num_workers': 'num_workers', 'collate_fn': 'collate_fn', 'pin_memory': 'pin_memory', 'drop_last': 'drop_last', 'timeout': 'timeout', 'worker_init_fn': 'worker_init_fn', 'prefetch_factor': 'prefetch_factor', 'persistent_workers': 'persistent_workers'}), '(dataset=dataset, batch_size=batch_size, shuffle\n =shuffle, sampler=sampler, batch_sampler=batch_sampler, num_workers=\n num_workers, collate_fn=collate_fn, pin_memory=pin_memory, drop_last=\n drop_last, timeout=timeout, worker_init_fn=worker_init_fn,\n prefetch_factor=prefetch_factor, persistent_workers=persistent_workers)\n', (1418, 1752), False, 'import torch\n'), ((2286, 2297), 'time.time', 'time.time', ([], {}), '()\n', (2295, 2297), False, 'import time\n'), ((723, 768), 'numpy.arange', 'np.arange', ([], {'start': '(2)', 'stop': '(init_num_workers + 1)'}), '(start=2, stop=init_num_workers + 1)\n', (732, 768), True, 'import numpy as np\n'), ((2363, 2374), 'time.time', 'time.time', ([], {}), '()\n', (2372, 2374), False, 'import time\n'), ((2492, 2503), 'time.time', 'time.time', ([], {}), '()\n', (2501, 2503), False, 'import time\n')] |
import time
import sys
import argparse
# import image and DL processing
import cv2
import numpy as np
import dlib
from random import randrange
# from edgetpu.detection.engine import DetectionEngine
from pycoral.adapters import common
from pycoral.adapters import detect
from pycoral.utils.edgetpu import make_interpreter
from scipy.interpolate import UnivariateSpline
from imutils.video import VideoStream
from PIL import Image, ImageDraw
# import local helper classes
from faceextractor import FaceDataExtractor
from recognizer import FaceRecognizer
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-o", "--output", default=False, action="store_true",
help="Display dalek PoV")
ap.add_argument("-f", "--face", type=float, default=0.7,
help="Face detection certainty")
ap.add_argument("-r", "--recognize", type=float, default=0.7,
help="Face recognition certainty")
args = vars(ap.parse_args())
print(args)
print("Loading face detection engine...")
interpreter = make_interpreter("/home/pi/coral-dalek/mobilenet_ssd_v2_face_quant_postprocess_edgetpu.tflite")
interpreter.allocate_tensors()
print("Loading face landmark detection engine...")
shape_pred = dlib.shape_predictor("./shape_predictor_5_face_landmarks.dat")
face_ext = FaceDataExtractor()
print("Loading face recognitn engine...")
facerec = dlib.face_recognition_model_v1("./dlib_face_recognition_resnet_model_v1.dat")
face_recog = FaceRecognizer()
# https://www.askaswiss.com/2016/02/how-to-manipulate-color-temperature-opencv-python.html
if args['output']:
pov = 0
overlay=[]
overlay.append(cv2.imread('dalekpov-a.png'))
overlay.append(cv2.imread('dalekpov-b.png'))
overlay.append(cv2.imread('dalekpov-c.png'))
def create_transform(x, y):
spl = UnivariateSpline(x, y)
return spl(range(256))
inc_col = create_transform([0, 64, 128, 192, 256],[150, 175, 200, 225, 256])
dec_col = create_transform([0, 64, 128, 192, 256],[28, 64, 90, 110, 128])
print("Starting video capture")
vc = cv2.VideoCapture(0)
if not vc.isOpened():
print("Cannot open USB camera.")
exit()
cap_width = vc.get(cv2.CAP_PROP_FRAME_WIDTH)
cap_height = vc.get(cv2.CAP_PROP_FRAME_HEIGHT)
cap_fps = vc.get(cv2.CAP_PROP_FPS)
print(cap_width," x ", cap_height," @ ", cap_fps)
print("Entering main loop, press CTRL+C to exit...")
while True:
try:
ret, frame = vc.read()
if not ret:
print("No frame received from camera; exiting...")
break
# Convert frame from color_coverted = cv2.cvtColor(opencv_image, cv2.COLOR_BGR2RGB)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = Image.fromarray(frame)
_, scale = common.set_resized_input(
interpreter, image.size, lambda size: image.resize(size, Image.ANTIALIAS))
interpreter.invoke()
face_box_list = detect.get_objects(interpreter, args['face'], scale)
draw = ImageDraw.Draw(image)
for face in face_box_list:
bbox = face.bbox
draw.rectangle([(bbox.xmin, bbox.ymin), (bbox.xmax, bbox.ymax)], outline='black')
box = dlib.rectangle(left = bbox.xmin,
right = bbox.xmax,
top = bbox.ymin,
bottom = bbox.ymax)
shape = shape_pred(frame, box)
if shape:
face_chip_img = dlib.get_face_chip(frame, shape)
face_descriptor = facerec.compute_face_descriptor(face_chip_img)
name = face_recog.recognize_face(face_descriptor, threshold = args['recognize'])
if name:
if output:
draw.text((bbox.xmin, bbox.ymin - 20), name, fill='black')
else:
print(name)
if args['output']:
displayImage = np.asarray(image)
blue, green, red = cv2.split(displayImage)
red = cv2.LUT(red, dec_col).astype(np.uint8)
blue = cv2.LUT(blue, dec_col).astype(np.uint8)
green = cv2.LUT(green, inc_col).astype(np.uint8)
displayImage = cv2.merge((red, green, blue))
# displayImage = cv2.cvtColor(displayImage, cv2.COLOR_BGR2GRAY)
if (randrange(10) > 6): pov = randrange(3)
displayImage = cv2.addWeighted(displayImage,0.8,overlay[pov],0.2,0)
cv2.imshow('<NAME> PoV', displayImage)
if cv2.waitKey(1) == ord('q'):
raise KeyboardInterrupt
except KeyboardInterrupt:
vc.release()
cv2.destroyAllWindows()
print("Program halted by CTRL+C")
sys.exit(0) | [
"pycoral.utils.edgetpu.make_interpreter",
"argparse.ArgumentParser",
"faceextractor.FaceDataExtractor",
"dlib.rectangle",
"cv2.imshow",
"dlib.shape_predictor",
"recognizer.FaceRecognizer",
"cv2.cvtColor",
"scipy.interpolate.UnivariateSpline",
"cv2.split",
"cv2.LUT",
"PIL.ImageDraw.Draw",
"cv... | [((616, 641), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (639, 641), False, 'import argparse\n'), ((1027, 1132), 'pycoral.utils.edgetpu.make_interpreter', 'make_interpreter', (['"""/home/pi/coral-dalek/mobilenet_ssd_v2_face_quant_postprocess_edgetpu.tflite"""'], {}), "(\n '/home/pi/coral-dalek/mobilenet_ssd_v2_face_quant_postprocess_edgetpu.tflite'\n )\n", (1043, 1132), False, 'from pycoral.utils.edgetpu import make_interpreter\n'), ((1219, 1281), 'dlib.shape_predictor', 'dlib.shape_predictor', (['"""./shape_predictor_5_face_landmarks.dat"""'], {}), "('./shape_predictor_5_face_landmarks.dat')\n", (1239, 1281), False, 'import dlib\n'), ((1293, 1312), 'faceextractor.FaceDataExtractor', 'FaceDataExtractor', ([], {}), '()\n', (1310, 1312), False, 'from faceextractor import FaceDataExtractor\n'), ((1365, 1442), 'dlib.face_recognition_model_v1', 'dlib.face_recognition_model_v1', (['"""./dlib_face_recognition_resnet_model_v1.dat"""'], {}), "('./dlib_face_recognition_resnet_model_v1.dat')\n", (1395, 1442), False, 'import dlib\n'), ((1456, 1472), 'recognizer.FaceRecognizer', 'FaceRecognizer', ([], {}), '()\n', (1470, 1472), False, 'from recognizer import FaceRecognizer\n'), ((2059, 2078), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (2075, 2078), False, 'import cv2\n'), ((1631, 1659), 'cv2.imread', 'cv2.imread', (['"""dalekpov-a.png"""'], {}), "('dalekpov-a.png')\n", (1641, 1659), False, 'import cv2\n'), ((1680, 1708), 'cv2.imread', 'cv2.imread', (['"""dalekpov-b.png"""'], {}), "('dalekpov-b.png')\n", (1690, 1708), False, 'import cv2\n'), ((1729, 1757), 'cv2.imread', 'cv2.imread', (['"""dalekpov-c.png"""'], {}), "('dalekpov-c.png')\n", (1739, 1757), False, 'import cv2\n'), ((1806, 1828), 'scipy.interpolate.UnivariateSpline', 'UnivariateSpline', (['x', 'y'], {}), '(x, y)\n', (1822, 1828), False, 'from scipy.interpolate import UnivariateSpline\n'), ((2643, 2681), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB'], {}), '(frame, cv2.COLOR_BGR2RGB)\n', (2655, 2681), False, 'import cv2\n'), ((2698, 2720), 'PIL.Image.fromarray', 'Image.fromarray', (['frame'], {}), '(frame)\n', (2713, 2720), False, 'from PIL import Image, ImageDraw\n'), ((2906, 2958), 'pycoral.adapters.detect.get_objects', 'detect.get_objects', (['interpreter', "args['face']", 'scale'], {}), "(interpreter, args['face'], scale)\n", (2924, 2958), False, 'from pycoral.adapters import detect\n'), ((2975, 2996), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['image'], {}), '(image)\n', (2989, 2996), False, 'from PIL import Image, ImageDraw\n'), ((3173, 3258), 'dlib.rectangle', 'dlib.rectangle', ([], {'left': 'bbox.xmin', 'right': 'bbox.xmax', 'top': 'bbox.ymin', 'bottom': 'bbox.ymax'}), '(left=bbox.xmin, right=bbox.xmax, top=bbox.ymin, bottom=bbox.ymax\n )\n', (3187, 3258), False, 'import dlib\n'), ((3910, 3927), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (3920, 3927), True, 'import numpy as np\n'), ((3959, 3982), 'cv2.split', 'cv2.split', (['displayImage'], {}), '(displayImage)\n', (3968, 3982), False, 'import cv2\n'), ((4187, 4216), 'cv2.merge', 'cv2.merge', (['(red, green, blue)'], {}), '((red, green, blue))\n', (4196, 4216), False, 'import cv2\n'), ((4376, 4432), 'cv2.addWeighted', 'cv2.addWeighted', (['displayImage', '(0.8)', 'overlay[pov]', '(0.2)', '(0)'], {}), '(displayImage, 0.8, overlay[pov], 0.2, 0)\n', (4391, 4432), False, 'import cv2\n'), ((4441, 4479), 'cv2.imshow', 'cv2.imshow', (['"""<NAME> PoV"""', 'displayImage'], {}), "('<NAME> PoV', displayImage)\n", (4451, 4479), False, 'import cv2\n'), ((4622, 4645), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4643, 4645), False, 'import cv2\n'), ((4696, 4707), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (4704, 4707), False, 'import sys\n'), ((3455, 3487), 'dlib.get_face_chip', 'dlib.get_face_chip', (['frame', 'shape'], {}), '(frame, shape)\n', (3473, 3487), False, 'import dlib\n'), ((4310, 4323), 'random.randrange', 'randrange', (['(10)'], {}), '(10)\n', (4319, 4323), False, 'from random import randrange\n'), ((4336, 4348), 'random.randrange', 'randrange', (['(3)'], {}), '(3)\n', (4345, 4348), False, 'from random import randrange\n'), ((4495, 4509), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (4506, 4509), False, 'import cv2\n'), ((4001, 4022), 'cv2.LUT', 'cv2.LUT', (['red', 'dec_col'], {}), '(red, dec_col)\n', (4008, 4022), False, 'import cv2\n'), ((4059, 4081), 'cv2.LUT', 'cv2.LUT', (['blue', 'dec_col'], {}), '(blue, dec_col)\n', (4066, 4081), False, 'import cv2\n'), ((4119, 4142), 'cv2.LUT', 'cv2.LUT', (['green', 'inc_col'], {}), '(green, inc_col)\n', (4126, 4142), False, 'import cv2\n')] |
from datetime import datetime
import numpy as np
from keras.callbacks import EarlyStopping
from keras.layers import Conv2D, MaxPooling2D, Flatten
from keras.layers import Dense, Dropout, LSTM
from keras.layers.wrappers import TimeDistributed
from keras.models import Sequential
from keras.utils import to_categorical
from helpers.metrics import final_metric, confusion_metric_vis
from helpers.preprocessing import preprocessing
##https://github.com/sagarvegad/Video-Classification-CNN-and-LSTM-/blob/master/train_CNN.py
def build_sequential(nb_steps, nb_width, nb_height, nb_channels, input_channels, kernel_size):
# define CNN model
model = Sequential()
model.add(TimeDistributed(Conv2D(nb_channels, kernel_size, activation='relu'), input_shape=(nb_steps, nb_width, nb_height, input_channels)))
#model.add(TimeDistributed(Dropout(0.5)))
model.add(TimeDistributed(Conv2D(nb_channels, (3, 3), activation='relu', padding='same')))
model.add(TimeDistributed(MaxPooling2D(pool_size=(2, 2))))
m#odel.add(TimeDistributed(Conv2D(32, (3, 3), activation='relu', padding='same')))
#model.add(TimeDistributed(Dropout(0.5)))
#model.add(TimeDistributed(Conv2D(32, (3, 3), activation='relu', padding='same')))
#model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))
#model.add(TimeDistributed(Conv2D(64, (5, 5), activation='relu')))
#model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))
model.add(TimeDistributed(Flatten()))
model.add(TimeDistributed(Dropout(0.5)))
#model.add(TimeDistributed(Dense(200)))
model.add(LSTM(5, return_sequences=False, name="lstm_layer", dropout=0.2))
model.add(Dense(20, activation='relu', name='first_dense'))
model.add(Dropout(0.5))
#model.add(Dense(32, activation="relu", name="second_dense"))
#model.add(Dropout(0.4))
model.add(Dense(1, activation='softmax', name="last_dense"))
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])#, final_metric
return model
def evaluate_sequential(X, y, x_test):
# Hyperparameter!
nb_channels=3
patience = 3
batch_size=1
epochs = 20
kernel_size = 2
# X = np.atleast_2d(X)
# if X.shape[0] == 1:
# X = X.T
nb_samples, nb_steps, nb_width, nb_height, input_channels = X.shape
print('\nfunctional_net ({} samples by {} series)'.format(nb_samples, nb_steps))
model = build_sequential(kernel_size=kernel_size, nb_steps=nb_steps, nb_width=nb_width, nb_height=nb_height, nb_channels=nb_channels, input_channels=input_channels) # , Neurons = Neurons
#print('\nModel with input size {}, output size {}, {} conv filters of length {}'.format(model.input_shape))
print(model.summary())
print('\nInput features:', X.shape, '\nOutput labels:', y.shape, sep='\n')
earlystop = EarlyStopping(monitor='val_accuracy', min_delta=0.0, patience=patience, verbose=2,
mode='auto')
time_before = datetime.now()
model.fit(X, y,
epochs=epochs, batch_size=batch_size, validation_split=0.2, shuffle=True, callbacks=[earlystop],
) # , class_weight=class_weights
time_after = datetime.now()
print("fitting took {} seconds".format(time_after - time_before))
y_pred = np.argmax(model.predict(x_test), axis=1)
y_true = np.argmax(y, axis=1)
try:
confusion_metric_vis(y_true=y_true, y_pred=y_pred)
except:
pass
y_test = np.argmax(model.predict(x_test), axis=1)
return y_test | [
"helpers.metrics.confusion_metric_vis",
"numpy.argmax",
"keras.layers.LSTM",
"keras.layers.Dropout",
"keras.layers.Flatten",
"keras.callbacks.EarlyStopping",
"keras.layers.Dense",
"keras.layers.Conv2D",
"keras.models.Sequential",
"datetime.datetime.now",
"keras.layers.MaxPooling2D"
] | [((654, 666), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (664, 666), False, 'from keras.models import Sequential\n'), ((2868, 2967), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_accuracy"""', 'min_delta': '(0.0)', 'patience': 'patience', 'verbose': '(2)', 'mode': '"""auto"""'}), "(monitor='val_accuracy', min_delta=0.0, patience=patience,\n verbose=2, mode='auto')\n", (2881, 2967), False, 'from keras.callbacks import EarlyStopping\n'), ((3028, 3042), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3040, 3042), False, 'from datetime import datetime\n'), ((3239, 3253), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3251, 3253), False, 'from datetime import datetime\n'), ((3392, 3412), 'numpy.argmax', 'np.argmax', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (3401, 3412), True, 'import numpy as np\n'), ((1592, 1655), 'keras.layers.LSTM', 'LSTM', (['(5)'], {'return_sequences': '(False)', 'name': '"""lstm_layer"""', 'dropout': '(0.2)'}), "(5, return_sequences=False, name='lstm_layer', dropout=0.2)\n", (1596, 1655), False, 'from keras.layers import Dense, Dropout, LSTM\n'), ((1671, 1719), 'keras.layers.Dense', 'Dense', (['(20)'], {'activation': '"""relu"""', 'name': '"""first_dense"""'}), "(20, activation='relu', name='first_dense')\n", (1676, 1719), False, 'from keras.layers import Dense, Dropout, LSTM\n'), ((1735, 1747), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (1742, 1747), False, 'from keras.layers import Dense, Dropout, LSTM\n'), ((1858, 1907), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""softmax"""', 'name': '"""last_dense"""'}), "(1, activation='softmax', name='last_dense')\n", (1863, 1907), False, 'from keras.layers import Dense, Dropout, LSTM\n'), ((3430, 3480), 'helpers.metrics.confusion_metric_vis', 'confusion_metric_vis', ([], {'y_true': 'y_true', 'y_pred': 'y_pred'}), '(y_true=y_true, y_pred=y_pred)\n', (3450, 3480), False, 'from helpers.metrics import final_metric, confusion_metric_vis\n'), ((697, 748), 'keras.layers.Conv2D', 'Conv2D', (['nb_channels', 'kernel_size'], {'activation': '"""relu"""'}), "(nb_channels, kernel_size, activation='relu')\n", (703, 748), False, 'from keras.layers import Conv2D, MaxPooling2D, Flatten\n'), ((888, 950), 'keras.layers.Conv2D', 'Conv2D', (['nb_channels', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(nb_channels, (3, 3), activation='relu', padding='same')\n", (894, 950), False, 'from keras.layers import Conv2D, MaxPooling2D, Flatten\n'), ((983, 1013), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (995, 1013), False, 'from keras.layers import Conv2D, MaxPooling2D, Flatten\n'), ((1477, 1486), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (1484, 1486), False, 'from keras.layers import Conv2D, MaxPooling2D, Flatten\n'), ((1519, 1531), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (1526, 1531), False, 'from keras.layers import Dense, Dropout, LSTM\n')] |
"""
Home Work 5
<NAME>
Exercise 8b
15863
"""
from math import sqrt
from time import perf_counter
import matplotlib.pyplot as plt
import numpy as np
start = perf_counter()
M = 1.989 * 10 ** 30
vx = 0
vy = 15768000
G = 6.6375 * 10 ** -5
x0 = 4 * 10 ** 9
y0 = 0
def f(solution, t):
x = solution[0]
y = solution[1]
r = sqrt(x ** 2 + y ** 2)
x1 = solution[2]
y1 = solution[3]
fx = x1
fy = y1
fx1 = -G * M * x / r ** 3
fy1 = -G * M * y / r ** 3
return np.array([fx, fy, fx1, fy1])
ti = 0
tf = 2 * 48
h = .0005
tpoints = np.arange(ti, tf, h)
solution = np.array([x0, y0, vx, vy], float)
xpoints = []
ypoints = []
for t in tpoints:
xpoints.append(solution[0])
ypoints.append(solution[1])
k1 = h * f(solution, t)
k2 = h * f(solution + 0.5 * k1, t + 0.5 * h)
k3 = h * f(solution + 0.5 * k2, t + 0.5 * h)
k4 = h * f(solution + k3, t + h)
solution += (k1 + 2 * k2 + 2 * k3 + k4) / 6
plt.plot(xpoints, ypoints)
plt.xlabel('X in km')
plt.ylabel('Y in km')
plt.title('Trajectory')
plt.show()
end = perf_counter()
print('Time taken = {} seconds for 1 orbit.'.format((end - start) / 2))
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"math.sqrt",
"time.perf_counter",
"numpy.array",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((159, 173), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (171, 173), False, 'from time import perf_counter\n'), ((562, 582), 'numpy.arange', 'np.arange', (['ti', 'tf', 'h'], {}), '(ti, tf, h)\n', (571, 582), True, 'import numpy as np\n'), ((594, 627), 'numpy.array', 'np.array', (['[x0, y0, vx, vy]', 'float'], {}), '([x0, y0, vx, vy], float)\n', (602, 627), True, 'import numpy as np\n'), ((948, 974), 'matplotlib.pyplot.plot', 'plt.plot', (['xpoints', 'ypoints'], {}), '(xpoints, ypoints)\n', (956, 974), True, 'import matplotlib.pyplot as plt\n'), ((975, 996), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X in km"""'], {}), "('X in km')\n", (985, 996), True, 'import matplotlib.pyplot as plt\n'), ((997, 1018), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y in km"""'], {}), "('Y in km')\n", (1007, 1018), True, 'import matplotlib.pyplot as plt\n'), ((1019, 1042), 'matplotlib.pyplot.title', 'plt.title', (['"""Trajectory"""'], {}), "('Trajectory')\n", (1028, 1042), True, 'import matplotlib.pyplot as plt\n'), ((1043, 1053), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1051, 1053), True, 'import matplotlib.pyplot as plt\n'), ((1061, 1075), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (1073, 1075), False, 'from time import perf_counter\n'), ((333, 354), 'math.sqrt', 'sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (337, 354), False, 'from math import sqrt\n'), ((492, 520), 'numpy.array', 'np.array', (['[fx, fy, fx1, fy1]'], {}), '([fx, fy, fx1, fy1])\n', (500, 520), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# (C) Copyright 2018-2020
# Faculty of Applied Sciences
# Delft University of Technology
# <NAME>, November 2020.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
from nose.tools import nottest
import numpy
import numpy as np
from kernel_tuner import run_kernel
from test_expdist_ref import bhatdist_python_reference
from test_utils import get_kernel_path, generate_inputs, call_reference_function
from test_matrix_functions import generate_wrapper
compiler_options = ['-I'+get_kernel_path('expdist')]
@nottest
def test_against_reference(cost, A, B, scale_A, scale_B, m, n, ndim, nblocks, params):
#call the GPU function
with open(get_kernel_path('expdist')+'kernels.cu', 'r') as f:
kernel_string = f.read()
#mimic Hamid's testcase
#A = numpy.ones_like(A)
#B = 2.0*numpy.ones_like(B)
#scale_A = 0.1*numpy.ones_like(scale_A)
#scale_B = 0.1*numpy.ones_like(scale_B)
print(A, B, scale_A, scale_B)
nblocks = numpy.int32( numpy.ceil(m / float(params["block_size_x"]*params["tile_size_x"])) *
numpy.ceil(n / float(params["block_size_y"]*params["tile_size_y"])) )
arguments = [A, B, m, n, scale_A, scale_B, cost]
grid_div_x = ["block_size_x", "tile_size_x"]
grid_div_y = ["block_size_y", "tile_size_y"]
if ndim == 2:
answer = run_kernel("ExpDist", kernel_string, (m, n), arguments, params,
compiler_options=compiler_options, grid_div_x=grid_div_x, grid_div_y=grid_div_y)
else:
answer = run_kernel("ExpDist3D", kernel_string, (m, n), arguments, params,
compiler_options=compiler_options, grid_div_x=grid_div_x, grid_div_y=grid_div_y)
#collect the results from the first kernel
cross_term = answer[6]
print("intermediate cross_term")
print(cross_term)
print(numpy.sum(cross_term))
#call the second kernel to reduce the per thread block cross terms to a single value
out = numpy.zeros(1).astype(numpy.float64)
arguments = [out, cross_term, m, n, nblocks]
answer = run_kernel("reduce_cross_term", kernel_string, 1, arguments, {"block_size_x": 128},
compiler_options=compiler_options, grid_div_x=[])
#call the reference function
ref_cost = call_reference_function(m, n, ndim, A, B, scale_A, scale_B, cost)
#final cross term
cost = answer[0][0]
if ndim == 3:
refpy_cost = bhatdist_python_reference(cost, A, B, m, n, ndim, scale_A, scale_B)
print("reference python")
print(refpy_cost)
print("reference")
print(ref_cost)
print("answer")
print(cost)
print("reference")
print("%30.20e" % ref_cost)
print("answer")
print("%30.20e" % cost)
if ndim == 3:
print("reference python")
print("%30.20e" % refpy_cost)
assert numpy.isclose(ref_cost, cost, atol=1e-5)
@nottest
def test_expdist_kernel(dim=2):
ndim = numpy.int32(dim)
m = numpy.int32(103)
n = numpy.int32(59)
#block_size_x=32, block_size_y=4, tile_size_x=2, tile_size_y=1, use_shared_mem=1
params = dict()
params["block_size_x"] = 32
params["block_size_y"] = 4
params["tile_size_x"] = 2
params["tile_size_y"] = 1
params["use_shared_mem"] = 1
nblocks = numpy.int32( numpy.ceil(m / float(params["block_size_x"]*params["tile_size_x"])) *
numpy.ceil(n / float(params["block_size_y"]*params["tile_size_y"])) )
cost, A, B, scale_A, scale_B = generate_inputs(m, n, ndim, nblocks)
test_against_reference(cost, A, B, scale_A, scale_B, m, n, ndim, nblocks, params)
def test_expdist_kernel2D():
test_expdist_kernel(dim=2)
def test_expdist_kernel3D():
test_expdist_kernel(dim=3)
@nottest
def test_expdist_kernel_column(dim=2):
#setup test input
allocation_size = int(3000)
ndim = numpy.int32(dim)
size = numpy.int32(2000)
params = dict()
params["block_size_x"] = 32
params["block_size_y"] = 4
params["tile_size_x"] = 2
params["tile_size_y"] = 4
params["use_shared_mem"] = 1
nblocks = numpy.int32( numpy.ceil(size / float(params["block_size_x"]*params["tile_size_x"])) )
cost, A, B, scale_A, scale_B = generate_inputs(allocation_size, size, ndim, nblocks)
#call the reference function
ref_cost = call_reference_function(size, size, ndim, A, B, scale_A, scale_B, cost)
#call the GPU function
with open(get_kernel_path('expdist')+'kernels.cu', 'r') as f:
kernel_string = f.read()
arguments = [A, B, size, size, scale_A, scale_B, cost]
grid_div_x = ["block_size_x", "tile_size_x"]
if ndim == 2:
answer = run_kernel("ExpDist_column", kernel_string, size, arguments, params,
compiler_options=compiler_options, grid_div_x=grid_div_x)
else:
answer = run_kernel("ExpDist_column3D", kernel_string, size, arguments, params,
compiler_options=compiler_options, grid_div_x=grid_div_x)
#collect the results from the first kernel
cross_term = answer[6]
print("intermediate cross_term")
print(cross_term)
#call the second kernel to reduce the per thread block cross terms to a single value
out = numpy.zeros(1).astype(numpy.float64)
arguments = [out, cross_term, size, size, nblocks]
answer = run_kernel("reduce_cross_term", kernel_string, 1, arguments, {"block_size_x": 128},
compiler_options=compiler_options, grid_div_x=[])
#final cross term
cost = answer[0][0]
print("reference")
print(ref_cost)
print("answer")
print(cost)
print("reference")
print("%30.20e" % ref_cost)
print("answer")
print("%30.20e" % cost)
assert numpy.isclose(ref_cost, cost, atol=1e-5)
@nottest
def test_expdist_kernel_column2D():
test_expdist_kernel_column(dim=2)
@nottest
def test_expdist_kernel_column3D():
test_expdist_kernel_column(dim=3)
@nottest
def test_hostfunction(dim=2):
#setup test input
ndim = numpy.int32(dim)
m = numpy.int32(2003)
n = numpy.int32(1009)
nblocks = numpy.int32(numpy.ceil(m / (32*2)) * numpy.ceil(n / (4*4)))
cost, A, B, scale_A, scale_B = generate_inputs(m, n, ndim, nblocks)
#host function will do the rotation, so we need to supply the scales indirectly
scale_B = numpy.absolute(0.01*numpy.random.randn(n*2).astype(numpy.float64))
rotation_matrix = numpy.eye(3).astype(numpy.float64).flatten()
#mimic Hamid's testcase
A = numpy.ones_like(A)
B = 2.0*numpy.ones_like(B)
scale_A = 0.1*numpy.ones_like(scale_A)
scale_B = 0.1*numpy.ones_like(scale_B)
#call the reference function
#with open(get_kernel_path('expdist')+'expdist_c.cu', 'r') as f:
# kernel_string = f.read()
kernel_string = get_kernel_path('expdist')+'expdist_c.cu'
f = "call_expdist"
scale_B_rot = scale_B
if ndim == 3:
#first call the rotate scales kernel
rotated_scales = numpy.zeros(n*9).astype(numpy.float64)
args = [rotated_scales, rotation_matrix, n, scale_B]
answer = run_kernel("call_rotate_scales_double", kernel_string, 1, args, {},
lang="C", compiler_options=['-I'+get_kernel_path('expdist'), "-Wno-deprecated-gpu-targets"], compiler='nvcc')
scale_B_rot = answer[0]
f = "call_expdist3D_double"
arguments = [cost, A, B, m, n, ndim, scale_A, scale_B_rot]
answer = run_kernel(f, kernel_string, 1, arguments, {},
lang="C", compiler_options=['-I'+get_kernel_path('expdist'), "-Wno-deprecated-gpu-targets"], compiler='nvcc')
ref_cost = answer[0][0]
#call the host function
arguments = [cost, A, B, m, n, ndim, scale_A, scale_B, numpy.int32(100000), rotation_matrix, np.int32(0)]
#with open(get_kernel_path('expdist')+'expdist.cu', 'r') as f:
# kernel_string = f.read()
kernel_string = get_kernel_path('expdist')+'expdist.cu'
answer = run_kernel("test_GPUExpDistHost", kernel_string, 1, arguments, {},
lang="C", compiler_options=compiler_options+['-arch=sm_30'])
cost = answer[0][0]
print("reference")
print(ref_cost)
print("answer")
print(cost)
assert numpy.isclose(ref_cost, cost, atol=1e-5)
def test_hostfunction2D():
test_hostfunction(dim=2)
def test_hostfunction3D():
test_hostfunction(dim=3)
@nottest
def test_hostfunction_largeN():
#setup test input
allocation_size = numpy.int32(1e6)
size = numpy.int32(40000)
ndim = numpy.int32(2)
params = dict()
params["block_size_x"] = 32
params["block_size_y"] = 4
params["tile_size_x"] = 2
params["tile_size_y"] = 1
params["use_shared_mem"] = 1
#compute nblocks for when using the expdist kernel
nblocks = numpy.int32( numpy.ceil(size / float(params["block_size_x"]*params["tile_size_x"])) *
numpy.ceil(size / float(params["block_size_y"]*params["tile_size_y"])) )
#ensure that this test actually causes the host code to call the column kernel
assert nblocks > allocation_size
#compute the nblocks actually used by the column kernel
nblocks = numpy.int32(numpy.ceil(size / float(params["block_size_x"] * params["tile_size_x"])))
#generate input data
cost, A, B, scale_A, scale_B = generate_inputs(allocation_size, allocation_size, ndim, nblocks)
#call the ExpDist_column kernel directly for reference
arguments = [A, B, size, size, scale_A, scale_B, cost]
grid_div_x = ["block_size_x", "tile_size_x"]
with open(get_kernel_path('expdist')+'kernels.cu', 'r') as f:
kernel_string = f.read()
answer = run_kernel("ExpDist_column", kernel_string, size, arguments, params,
compiler_options=compiler_options, grid_div_x=grid_div_x)
ref_cost = numpy.sum(answer[6])
#call the host function
rot_matrix = numpy.eye(3).astype(numpy.float64)
arguments = [cost, A, B, size, size, ndim, scale_A, scale_B, allocation_size, rot_matrix, np.int32(0)]
with open(get_kernel_path('expdist')+'expdist.cu', 'r') as f:
kernel_string = f.read()
answer = run_kernel("test_GPUExpDistHost", kernel_string, size, arguments, {},
lang="C", compiler_options=compiler_options+['-arch=sm_30'])
cost = answer[0][0]
print("reference")
print(ref_cost)
print("answer")
print(cost)
assert numpy.isclose(ref_cost, cost, atol=1e-5)
def test_rotate_scales():
n = np.int32(200)
#define a rotation matrix
theta_x = np.deg2rad(32)
Rx = np.array([[np.cos(theta_x), -np.sin(theta_x), 0],
[np.sin(theta_x), np.cos(theta_x), 0],
[0, 0, 1]])
theta_y = np.deg2rad(64)
Ry = np.array([[np.cos(theta_y), 0, np.sin(theta_y)],
[0, 1, 0],
[-np.sin(theta_y), 0, np.cos(theta_y)]])
R = Ry.dot(Rx)
rotated_scales = np.zeros((n,3,3), dtype=np.float64)
scale_B = numpy.absolute(0.001*numpy.random.randn(n*2).astype(numpy.float64))
args = [rotated_scales.flatten(), R, n, scale_B]
kernel_string = generate_wrapper("rotate_scales", "expdist_ref.h", args)
cp = ['--std=c++11', '-I'+get_kernel_path('expdist'), "-Wno-deprecated-gpu-targets"]
print(kernel_string)
expected = run_kernel("call_function", kernel_string, 1, args, {},
lang="C", compiler_options=cp, compiler="nvcc")
# call the GPU function
with open(get_kernel_path('expdist')+'expdist.cu', 'r') as f:
kernel_string = f.read()
cmem_args = {"rotation_matrixd": R.flatten(), "rotation_matrix_transposedd": R.T.flatten()}
params = dict(block_size_x = 128)
arguments = [rotated_scales.flatten(), n, scale_B]
answer = run_kernel("rotate_scales_double", kernel_string, n, arguments,
params, lang="CUDA", compiler_options=cp, cmem_args=cmem_args)
print("reference")
print(expected[0])
print("answer")
print(answer[0])
assert np.allclose(answer[0], expected[0])
if __name__ == "__main__":
test_expdist_kernel(dim=3)
| [
"test_expdist_ref.bhatdist_python_reference",
"numpy.ones_like",
"numpy.sum",
"test_utils.get_kernel_path",
"numpy.deg2rad",
"numpy.ceil",
"numpy.random.randn",
"numpy.allclose",
"test_utils.generate_inputs",
"numpy.zeros",
"test_utils.call_reference_function",
"numpy.isclose",
"numpy.sin",
... | [((2250, 2388), 'kernel_tuner.run_kernel', 'run_kernel', (['"""reduce_cross_term"""', 'kernel_string', '(1)', 'arguments', "{'block_size_x': 128}"], {'compiler_options': 'compiler_options', 'grid_div_x': '[]'}), "('reduce_cross_term', kernel_string, 1, arguments, {\n 'block_size_x': 128}, compiler_options=compiler_options, grid_div_x=[])\n", (2260, 2388), False, 'from kernel_tuner import run_kernel\n'), ((2448, 2513), 'test_utils.call_reference_function', 'call_reference_function', (['m', 'n', 'ndim', 'A', 'B', 'scale_A', 'scale_B', 'cost'], {}), '(m, n, ndim, A, B, scale_A, scale_B, cost)\n', (2471, 2513), False, 'from test_utils import get_kernel_path, generate_inputs, call_reference_function\n'), ((3015, 3056), 'numpy.isclose', 'numpy.isclose', (['ref_cost', 'cost'], {'atol': '(1e-05)'}), '(ref_cost, cost, atol=1e-05)\n', (3028, 3056), False, 'import numpy\n'), ((3110, 3126), 'numpy.int32', 'numpy.int32', (['dim'], {}), '(dim)\n', (3121, 3126), False, 'import numpy\n'), ((3135, 3151), 'numpy.int32', 'numpy.int32', (['(103)'], {}), '(103)\n', (3146, 3151), False, 'import numpy\n'), ((3160, 3175), 'numpy.int32', 'numpy.int32', (['(59)'], {}), '(59)\n', (3171, 3175), False, 'import numpy\n'), ((3670, 3706), 'test_utils.generate_inputs', 'generate_inputs', (['m', 'n', 'ndim', 'nblocks'], {}), '(m, n, ndim, nblocks)\n', (3685, 3706), False, 'from test_utils import get_kernel_path, generate_inputs, call_reference_function\n'), ((4035, 4051), 'numpy.int32', 'numpy.int32', (['dim'], {}), '(dim)\n', (4046, 4051), False, 'import numpy\n'), ((4063, 4080), 'numpy.int32', 'numpy.int32', (['(2000)'], {}), '(2000)\n', (4074, 4080), False, 'import numpy\n'), ((4395, 4448), 'test_utils.generate_inputs', 'generate_inputs', (['allocation_size', 'size', 'ndim', 'nblocks'], {}), '(allocation_size, size, ndim, nblocks)\n', (4410, 4448), False, 'from test_utils import get_kernel_path, generate_inputs, call_reference_function\n'), ((4498, 4569), 'test_utils.call_reference_function', 'call_reference_function', (['size', 'size', 'ndim', 'A', 'B', 'scale_A', 'scale_B', 'cost'], {}), '(size, size, ndim, A, B, scale_A, scale_B, cost)\n', (4521, 4569), False, 'from test_utils import get_kernel_path, generate_inputs, call_reference_function\n'), ((5504, 5642), 'kernel_tuner.run_kernel', 'run_kernel', (['"""reduce_cross_term"""', 'kernel_string', '(1)', 'arguments', "{'block_size_x': 128}"], {'compiler_options': 'compiler_options', 'grid_div_x': '[]'}), "('reduce_cross_term', kernel_string, 1, arguments, {\n 'block_size_x': 128}, compiler_options=compiler_options, grid_div_x=[])\n", (5514, 5642), False, 'from kernel_tuner import run_kernel\n'), ((5896, 5937), 'numpy.isclose', 'numpy.isclose', (['ref_cost', 'cost'], {'atol': '(1e-05)'}), '(ref_cost, cost, atol=1e-05)\n', (5909, 5937), False, 'import numpy\n'), ((6179, 6195), 'numpy.int32', 'numpy.int32', (['dim'], {}), '(dim)\n', (6190, 6195), False, 'import numpy\n'), ((6205, 6222), 'numpy.int32', 'numpy.int32', (['(2003)'], {}), '(2003)\n', (6216, 6222), False, 'import numpy\n'), ((6231, 6248), 'numpy.int32', 'numpy.int32', (['(1009)'], {}), '(1009)\n', (6242, 6248), False, 'import numpy\n'), ((6359, 6395), 'test_utils.generate_inputs', 'generate_inputs', (['m', 'n', 'ndim', 'nblocks'], {}), '(m, n, ndim, nblocks)\n', (6374, 6395), False, 'from test_utils import get_kernel_path, generate_inputs, call_reference_function\n'), ((6665, 6683), 'numpy.ones_like', 'numpy.ones_like', (['A'], {}), '(A)\n', (6680, 6683), False, 'import numpy\n'), ((8110, 8243), 'kernel_tuner.run_kernel', 'run_kernel', (['"""test_GPUExpDistHost"""', 'kernel_string', '(1)', 'arguments', '{}'], {'lang': '"""C"""', 'compiler_options': "(compiler_options + ['-arch=sm_30'])"}), "('test_GPUExpDistHost', kernel_string, 1, arguments, {}, lang='C',\n compiler_options=compiler_options + ['-arch=sm_30'])\n", (8120, 8243), False, 'from kernel_tuner import run_kernel\n'), ((8370, 8411), 'numpy.isclose', 'numpy.isclose', (['ref_cost', 'cost'], {'atol': '(1e-05)'}), '(ref_cost, cost, atol=1e-05)\n', (8383, 8411), False, 'import numpy\n'), ((8613, 8635), 'numpy.int32', 'numpy.int32', (['(1000000.0)'], {}), '(1000000.0)\n', (8624, 8635), False, 'import numpy\n'), ((8641, 8659), 'numpy.int32', 'numpy.int32', (['(40000)'], {}), '(40000)\n', (8652, 8659), False, 'import numpy\n'), ((8671, 8685), 'numpy.int32', 'numpy.int32', (['(2)'], {}), '(2)\n', (8682, 8685), False, 'import numpy\n'), ((9462, 9526), 'test_utils.generate_inputs', 'generate_inputs', (['allocation_size', 'allocation_size', 'ndim', 'nblocks'], {}), '(allocation_size, allocation_size, ndim, nblocks)\n', (9477, 9526), False, 'from test_utils import get_kernel_path, generate_inputs, call_reference_function\n'), ((9807, 9937), 'kernel_tuner.run_kernel', 'run_kernel', (['"""ExpDist_column"""', 'kernel_string', 'size', 'arguments', 'params'], {'compiler_options': 'compiler_options', 'grid_div_x': 'grid_div_x'}), "('ExpDist_column', kernel_string, size, arguments, params,\n compiler_options=compiler_options, grid_div_x=grid_div_x)\n", (9817, 9937), False, 'from kernel_tuner import run_kernel\n'), ((9964, 9984), 'numpy.sum', 'numpy.sum', (['answer[6]'], {}), '(answer[6])\n', (9973, 9984), False, 'import numpy\n'), ((10285, 10422), 'kernel_tuner.run_kernel', 'run_kernel', (['"""test_GPUExpDistHost"""', 'kernel_string', 'size', 'arguments', '{}'], {'lang': '"""C"""', 'compiler_options': "(compiler_options + ['-arch=sm_30'])"}), "('test_GPUExpDistHost', kernel_string, size, arguments, {}, lang=\n 'C', compiler_options=compiler_options + ['-arch=sm_30'])\n", (10295, 10422), False, 'from kernel_tuner import run_kernel\n'), ((10548, 10589), 'numpy.isclose', 'numpy.isclose', (['ref_cost', 'cost'], {'atol': '(1e-05)'}), '(ref_cost, cost, atol=1e-05)\n', (10561, 10589), False, 'import numpy\n'), ((10630, 10643), 'numpy.int32', 'np.int32', (['(200)'], {}), '(200)\n', (10638, 10643), True, 'import numpy as np\n'), ((10689, 10703), 'numpy.deg2rad', 'np.deg2rad', (['(32)'], {}), '(32)\n', (10699, 10703), True, 'import numpy as np\n'), ((10867, 10881), 'numpy.deg2rad', 'np.deg2rad', (['(64)'], {}), '(64)\n', (10877, 10881), True, 'import numpy as np\n'), ((11071, 11108), 'numpy.zeros', 'np.zeros', (['(n, 3, 3)'], {'dtype': 'np.float64'}), '((n, 3, 3), dtype=np.float64)\n', (11079, 11108), True, 'import numpy as np\n'), ((11265, 11321), 'test_matrix_functions.generate_wrapper', 'generate_wrapper', (['"""rotate_scales"""', '"""expdist_ref.h"""', 'args'], {}), "('rotate_scales', 'expdist_ref.h', args)\n", (11281, 11321), False, 'from test_matrix_functions import generate_wrapper\n'), ((11453, 11560), 'kernel_tuner.run_kernel', 'run_kernel', (['"""call_function"""', 'kernel_string', '(1)', 'args', '{}'], {'lang': '"""C"""', 'compiler_options': 'cp', 'compiler': '"""nvcc"""'}), "('call_function', kernel_string, 1, args, {}, lang='C',\n compiler_options=cp, compiler='nvcc')\n", (11463, 11560), False, 'from kernel_tuner import run_kernel\n'), ((11903, 12033), 'kernel_tuner.run_kernel', 'run_kernel', (['"""rotate_scales_double"""', 'kernel_string', 'n', 'arguments', 'params'], {'lang': '"""CUDA"""', 'compiler_options': 'cp', 'cmem_args': 'cmem_args'}), "('rotate_scales_double', kernel_string, n, arguments, params,\n lang='CUDA', compiler_options=cp, cmem_args=cmem_args)\n", (11913, 12033), False, 'from kernel_tuner import run_kernel\n'), ((12154, 12189), 'numpy.allclose', 'np.allclose', (['answer[0]', 'expected[0]'], {}), '(answer[0], expected[0])\n', (12165, 12189), True, 'import numpy as np\n'), ((676, 702), 'test_utils.get_kernel_path', 'get_kernel_path', (['"""expdist"""'], {}), "('expdist')\n", (691, 702), False, 'from test_utils import get_kernel_path, generate_inputs, call_reference_function\n'), ((1526, 1679), 'kernel_tuner.run_kernel', 'run_kernel', (['"""ExpDist"""', 'kernel_string', '(m, n)', 'arguments', 'params'], {'compiler_options': 'compiler_options', 'grid_div_x': 'grid_div_x', 'grid_div_y': 'grid_div_y'}), "('ExpDist', kernel_string, (m, n), arguments, params,\n compiler_options=compiler_options, grid_div_x=grid_div_x, grid_div_y=\n grid_div_y)\n", (1536, 1679), False, 'from kernel_tuner import run_kernel\n'), ((1717, 1872), 'kernel_tuner.run_kernel', 'run_kernel', (['"""ExpDist3D"""', 'kernel_string', '(m, n)', 'arguments', 'params'], {'compiler_options': 'compiler_options', 'grid_div_x': 'grid_div_x', 'grid_div_y': 'grid_div_y'}), "('ExpDist3D', kernel_string, (m, n), arguments, params,\n compiler_options=compiler_options, grid_div_x=grid_div_x, grid_div_y=\n grid_div_y)\n", (1727, 1872), False, 'from kernel_tuner import run_kernel\n'), ((2027, 2048), 'numpy.sum', 'numpy.sum', (['cross_term'], {}), '(cross_term)\n', (2036, 2048), False, 'import numpy\n'), ((2601, 2668), 'test_expdist_ref.bhatdist_python_reference', 'bhatdist_python_reference', (['cost', 'A', 'B', 'm', 'n', 'ndim', 'scale_A', 'scale_B'], {}), '(cost, A, B, m, n, ndim, scale_A, scale_B)\n', (2626, 2668), False, 'from test_expdist_ref import bhatdist_python_reference\n'), ((4843, 4973), 'kernel_tuner.run_kernel', 'run_kernel', (['"""ExpDist_column"""', 'kernel_string', 'size', 'arguments', 'params'], {'compiler_options': 'compiler_options', 'grid_div_x': 'grid_div_x'}), "('ExpDist_column', kernel_string, size, arguments, params,\n compiler_options=compiler_options, grid_div_x=grid_div_x)\n", (4853, 4973), False, 'from kernel_tuner import run_kernel\n'), ((5016, 5148), 'kernel_tuner.run_kernel', 'run_kernel', (['"""ExpDist_column3D"""', 'kernel_string', 'size', 'arguments', 'params'], {'compiler_options': 'compiler_options', 'grid_div_x': 'grid_div_x'}), "('ExpDist_column3D', kernel_string, size, arguments, params,\n compiler_options=compiler_options, grid_div_x=grid_div_x)\n", (5026, 5148), False, 'from kernel_tuner import run_kernel\n'), ((6696, 6714), 'numpy.ones_like', 'numpy.ones_like', (['B'], {}), '(B)\n', (6711, 6714), False, 'import numpy\n'), ((6733, 6757), 'numpy.ones_like', 'numpy.ones_like', (['scale_A'], {}), '(scale_A)\n', (6748, 6757), False, 'import numpy\n'), ((6776, 6800), 'numpy.ones_like', 'numpy.ones_like', (['scale_B'], {}), '(scale_B)\n', (6791, 6800), False, 'import numpy\n'), ((6958, 6984), 'test_utils.get_kernel_path', 'get_kernel_path', (['"""expdist"""'], {}), "('expdist')\n", (6973, 6984), False, 'from test_utils import get_kernel_path, generate_inputs, call_reference_function\n'), ((7885, 7904), 'numpy.int32', 'numpy.int32', (['(100000)'], {}), '(100000)\n', (7896, 7904), False, 'import numpy\n'), ((7923, 7934), 'numpy.int32', 'np.int32', (['(0)'], {}), '(0)\n', (7931, 7934), True, 'import numpy as np\n'), ((8057, 8083), 'test_utils.get_kernel_path', 'get_kernel_path', (['"""expdist"""'], {}), "('expdist')\n", (8072, 8083), False, 'from test_utils import get_kernel_path, generate_inputs, call_reference_function\n'), ((10160, 10171), 'numpy.int32', 'np.int32', (['(0)'], {}), '(0)\n', (10168, 10171), True, 'import numpy as np\n'), ((2150, 2164), 'numpy.zeros', 'numpy.zeros', (['(1)'], {}), '(1)\n', (2161, 2164), False, 'import numpy\n'), ((5398, 5412), 'numpy.zeros', 'numpy.zeros', (['(1)'], {}), '(1)\n', (5409, 5412), False, 'import numpy\n'), ((6275, 6299), 'numpy.ceil', 'numpy.ceil', (['(m / (32 * 2))'], {}), '(m / (32 * 2))\n', (6285, 6299), False, 'import numpy\n'), ((6300, 6323), 'numpy.ceil', 'numpy.ceil', (['(n / (4 * 4))'], {}), '(n / (4 * 4))\n', (6310, 6323), False, 'import numpy\n'), ((10031, 10043), 'numpy.eye', 'numpy.eye', (['(3)'], {}), '(3)\n', (10040, 10043), False, 'import numpy\n'), ((11352, 11378), 'test_utils.get_kernel_path', 'get_kernel_path', (['"""expdist"""'], {}), "('expdist')\n", (11367, 11378), False, 'from test_utils import get_kernel_path, generate_inputs, call_reference_function\n'), ((845, 871), 'test_utils.get_kernel_path', 'get_kernel_path', (['"""expdist"""'], {}), "('expdist')\n", (860, 871), False, 'from test_utils import get_kernel_path, generate_inputs, call_reference_function\n'), ((4612, 4638), 'test_utils.get_kernel_path', 'get_kernel_path', (['"""expdist"""'], {}), "('expdist')\n", (4627, 4638), False, 'from test_utils import get_kernel_path, generate_inputs, call_reference_function\n'), ((7138, 7156), 'numpy.zeros', 'numpy.zeros', (['(n * 9)'], {}), '(n * 9)\n', (7149, 7156), False, 'import numpy\n'), ((9709, 9735), 'test_utils.get_kernel_path', 'get_kernel_path', (['"""expdist"""'], {}), "('expdist')\n", (9724, 9735), False, 'from test_utils import get_kernel_path, generate_inputs, call_reference_function\n'), ((10187, 10213), 'test_utils.get_kernel_path', 'get_kernel_path', (['"""expdist"""'], {}), "('expdist')\n", (10202, 10213), False, 'from test_utils import get_kernel_path, generate_inputs, call_reference_function\n'), ((10724, 10739), 'numpy.cos', 'np.cos', (['theta_x'], {}), '(theta_x)\n', (10730, 10739), True, 'import numpy as np\n'), ((10783, 10798), 'numpy.sin', 'np.sin', (['theta_x'], {}), '(theta_x)\n', (10789, 10798), True, 'import numpy as np\n'), ((10800, 10815), 'numpy.cos', 'np.cos', (['theta_x'], {}), '(theta_x)\n', (10806, 10815), True, 'import numpy as np\n'), ((10902, 10917), 'numpy.cos', 'np.cos', (['theta_y'], {}), '(theta_y)\n', (10908, 10917), True, 'import numpy as np\n'), ((10922, 10937), 'numpy.sin', 'np.sin', (['theta_y'], {}), '(theta_y)\n', (10928, 10937), True, 'import numpy as np\n'), ((11011, 11026), 'numpy.cos', 'np.cos', (['theta_y'], {}), '(theta_y)\n', (11017, 11026), True, 'import numpy as np\n'), ((11615, 11641), 'test_utils.get_kernel_path', 'get_kernel_path', (['"""expdist"""'], {}), "('expdist')\n", (11630, 11641), False, 'from test_utils import get_kernel_path, generate_inputs, call_reference_function\n'), ((6514, 6539), 'numpy.random.randn', 'numpy.random.randn', (['(n * 2)'], {}), '(n * 2)\n', (6532, 6539), False, 'import numpy\n'), ((6583, 6595), 'numpy.eye', 'numpy.eye', (['(3)'], {}), '(3)\n', (6592, 6595), False, 'import numpy\n'), ((7690, 7716), 'test_utils.get_kernel_path', 'get_kernel_path', (['"""expdist"""'], {}), "('expdist')\n", (7705, 7716), False, 'from test_utils import get_kernel_path, generate_inputs, call_reference_function\n'), ((10742, 10757), 'numpy.sin', 'np.sin', (['theta_x'], {}), '(theta_x)\n', (10748, 10757), True, 'import numpy as np\n'), ((10991, 11006), 'numpy.sin', 'np.sin', (['theta_y'], {}), '(theta_y)\n', (10997, 11006), True, 'import numpy as np\n'), ((11143, 11168), 'numpy.random.randn', 'numpy.random.randn', (['(n * 2)'], {}), '(n * 2)\n', (11161, 11168), False, 'import numpy\n'), ((7372, 7398), 'test_utils.get_kernel_path', 'get_kernel_path', (['"""expdist"""'], {}), "('expdist')\n", (7387, 7398), False, 'from test_utils import get_kernel_path, generate_inputs, call_reference_function\n')] |
import dask
import numpy as np
import tqdm
import time
import sys
import pandas as pd
import metod_alg as mt
from metod_alg import objective_functions as mt_obj
from metod_alg import metod_analysis as mt_ays
from metod_alg import check_metod_class as prev_mt_alg
def check_sp_fp(starting_points, store_minimizer_des, num_p, func_args):
"""
Checks that the local minimizer at a starting point is the same as the
local minimizer at the final point.
Parameters
----------
starting_points : 2-d array with shape (num_p, d)
Array containing starting points.
store_minimizer_des : list
List containing local minimizers, found after
applying local descent at each starting point.
num_p : integer
Number of starting points.
"""
count = 0
for k in range(num_p):
pos_minimizer = (mt_ays.calc_minimizer_sev_quad_no_dist_check
(store_minimizer_des[k], *func_args))
pos_sp = (mt_ays.calc_minimizer_sev_quad_no_dist_check
(starting_points[k], *func_args))
if pos_minimizer != pos_sp:
count += 1
assert(count == 0)
def check_classification_points_metod(classification_points,
unique_minimizers_metod,
check_func,
func_args):
"""
Matches the disovered local minimizers from METOD (found either by local
descent or early termination of descents) with true local minimizers.
Parameters
----------
classification_points : 1-d array with shape (num_p,)
Array containing minimizer index number in which a
point belongs to (found either by local descent or
early termination of descents within the METOD
algorithm).
unique_minimizers_metod : list
Unique minimizers found by the METOD algorithm.
func_args : integer
Arguments passed to f and g.
"""
class_store_x0 = np.zeros((len(classification_points)))
for j in range(len(classification_points)):
class_store_x0[j] = (check_func
(unique_minimizers_metod[int(classification_points[j])],
*func_args))
return class_store_x0
def metod_numerical_exp_quad(f, g, func_args, d,
num_p, beta, tolerance, projection,
const, m, option, met, initial_guess,
set_x, bounds_set_x, relax_sd_it, sd_its,
check_func):
"""
Apply the METOD algorithm with specified parameters. If sd_its =
True, multistart will also be applied with the same starting points
as METOD.
Parameters
----------
f : Objective function.
``f(x, *func_args) -> float``
where ``x`` is a 1-D array with shape (d, ) and func_args is a
tuple of arguments needed to compute the function value.
g : Gradient.
``g(x, *func_args) -> 1-D array with shape (d, )``
where ``x`` is a 1-D array with shape (d, ) and func_args is a
tuple of arguments needed to compute the gradient.
func_args : tuple
Arguments passed to f and g.
d : integer
Size of dimension.
num_p : integer
Number of random points generated.
beta : float or integer
Small constant step size to compute the partner points.
tolerance: float
Stopping condition for steepest descent iterations. Apply
steepest descent iterations until the norm
of g(point, *func_args) is less than some tolerance.
Also check that the norm of the gradient at a starting point
is larger than some tolerance.
projection : boolean
If projection is True, points are projected back to
bounds_set_x. If projection is False, points are
not projected.
const : float or integer
In order to classify a point as a new local minimizer, the
euclidean distance between the point and all other discovered local
minimizers must be larger than const.
m : integer
Number of iterations of steepest descent to apply to a point
before making decision on terminating descents.
option : string
Choose from 'minimize', 'minimize_scalar' or
'forward_backward_tracking'. For more
information on 'minimize' or 'minimize_scalar' see
https://docs.scipy.org/doc/scipy/reference/optimize.html.
met : string
If option = 'minimize' or option = 'minimize_scalar', choose
appropiate method. For more information see
- https://docs.scipy.org/doc/scipy/reference/generated/
scipy.optimize.minimize.html#scipy.optimize.minimize
- https://docs.scipy.org/doc/scipy/reference/generated/
scipy.optimize.minimize_scalar.html#scipy.optimize.minimize_scalar.
If option = 'forward_backward_tracking', then met does not need to
be specified.
initial_guess : float or integer
Initial guess passed to scipy.optimize.minimize and the
upper bound for the bracket interval when using the
'Brent' or 'Golden' method for
scipy.optimize.minimize_scalar. Also the initial guess
for option='forward_backward_tracking'. This
is recommended to be small.
set_x : string
If set_x = 'random', random starting points
are generated for the METOD algorithm. If set_x = 'sobol',
then a numpy.array with shape (num points * 2, d) of Sobol
sequence samples are generated using SALib [1], which are
randomly shuffled and used as starting points for the METOD
algorithm.
bounds_set_x : tuple
Bounds used for set_x = 'random', set_x = 'sobol' and
also for projection = True.
relax_sd_it : float or integer
Multiply the step size by a small constant in [0, 2], to
obtain a new step size for steepest descent iterations. This
process is known as relaxed steepest descent [2].
sd_its : boolean
If sd_its = True, multistart is applied with the same starting
points as METOD. If sd_its = False, only METOD is applied.
check_func : function
A function which checks the local minimizers obtained by
METOD or multistart with the true local minimizers of the
objective function.
Returns
-------
if sd_its == True:
unique_number_of_minimizers_mult: integer
Total number of unique minimizers
found by applying multistart.
unique_number_of_minimizers_metod: integer
Total number of unique minimizers
found by applying METOD.
extra_descents : integer
Number of excessive descents. Occurs when
[3, Eq. 9] does not hold for trajectories
that belong to the region of attraction
of the same local minimizer.
time_taken_metod: float
Amount of time (in seconds) the METOD algorithm
takes.
time_taken_des: float
Amount of time (in seconds) multistart takes.
np.min(func_vals_of_minimizers_metod) : float
Minimum function value found
using METOD.
np.min(store_func_vals_mult) : float
Minimum function value found using
multistart.
grad_evals_metod : 1-D array with shape (num_p,)
Number of gradient evaluations used either to reach
a local minimizer if [3, Eq. 9] does not hold or the
number of gradient evaluations used during the warm
up period.
grad_evals_mult : 1-D array with shape (num_p,)
Number of gradient evaluations used to reach a local
minimizer for each starting point when using
Multistart.
store_grad_norms : 1-D array with shape (num_p,)
Euclidean norm of the gradient at each starting
point.
starting_points : 2-D array with shape (num_p, d)
Each row contains each starting point used by METOD
and Multistart.
prop_class_sd_metod : float
Proportion of times the classification of a point
using the METOD algorithm is different to the
true classification using Multistart.
count_gr_2 : integer
Number of times inequality [3, Eq. 9] is satisfied for
more than one region of attraction.
else:
unique_number_of_minimizers_metod: integer
Total number of unique minimizers
found by applying METOD.
extra_descents : integer
Number of excessive descents. Occurs when
[3, Eq. 9] does not hold for trajectories
that belong to the region of attraction
of the same local minimizer.
time_taken_metod: float
Amount of time (in seconds) the METOD algorithm
takes.
np.min(func_vals_of_minimizers_metod) : float
Minimum function value found
using METOD.
grad_evals_metod : 1-D array with shape (num_p,)
Number of gradient evaluations used either to reach
a local minimizer if [3, Eq. 9] does not hold or the
number of gradient evaluations used during the warm
up period.
store_grad_norms : 1-D array with shape (num_p,)
Euclidean norm of the gradient at each starting
point.
starting_points : 2-D array with shape (num_p, d)
Each row contains each starting point used by METOD.
count_gr_2 : integer
Number of times inequality [3, Eq. 9] is satisfied for
more than one region of attraction.
References
----------
1) <NAME> al, (2017), SALib: An open-source Python library for
Sensitivity Analysis, Journal of Open Source Software, 2(9), 97, doi:10.
21105/joss.00097
2) <NAME>., <NAME>.: Relaxed steepest descent and
cauchy-barzilai- borwein method. Computational Optimization and
Applications 21(2), 155–167 (2002)
3) <NAME>., <NAME>., <NAME>., <NAME>.: Multistart
with early termination of descents. Journal of Global Optimization pp.
1–16 (2019)
"""
t0 = time.time()
(unique_minimizers_metod,
unique_number_of_minimizers_metod,
func_vals_of_minimizers_metod,
extra_descents,
starting_points,
grad_evals_metod,
classification_points,
count_gr_2, missed_minimizers,
total_checks) = prev_mt_alg.metod_class(f, g, func_args, d, num_p, beta,
tolerance, projection, const, m,
option, met, initial_guess,
set_x, bounds_set_x, relax_sd_it)
t1 = time.time()
time_taken_metod = t1-t0
mt_obj.check_unique_minimizers(unique_minimizers_metod,
unique_number_of_minimizers_metod,
check_func, func_args)
class_store_x0 = check_classification_points_metod(classification_points,
unique_minimizers_metod,
check_func,
func_args)
store_grad_norms = np.zeros((num_p))
for j in range(num_p):
store_grad_norms[j] = np.linalg.norm(g(starting_points[j], *func_args))
if sd_its == True:
(unique_minimizers_mult,
unique_number_of_minimizers_mult,
store_func_vals_mult,
time_taken_des,
store_minimizer_des,
grad_evals_mult) = mt.multistart(f, g, func_args, d, starting_points,
num_p, tolerance, projection, const,
option, met, initial_guess,
bounds_set_x, relax_sd_it)
mt_obj.check_unique_minimizers(store_minimizer_des,
unique_number_of_minimizers_mult,
check_func, func_args)
mt_obj.check_minimizers_mult_metod(unique_minimizers_metod,
unique_minimizers_mult)
prop_class_sd_metod = (prev_mt_alg.check_classification_sd_metod
(store_minimizer_des, class_store_x0,
check_func, func_args))
check_sp_fp(starting_points, store_minimizer_des, num_p, func_args)
return (unique_number_of_minimizers_mult,
unique_number_of_minimizers_metod,
extra_descents,
time_taken_metod,
time_taken_des,
np.min(func_vals_of_minimizers_metod),
np.min(store_func_vals_mult),
grad_evals_metod,
grad_evals_mult,
store_grad_norms,
starting_points,
prop_class_sd_metod,
count_gr_2, missed_minimizers,
total_checks)
else:
return (unique_number_of_minimizers_metod,
extra_descents,
time_taken_metod,
np.min(func_vals_of_minimizers_metod),
grad_evals_metod,
store_grad_norms,
starting_points,
count_gr_2, missed_minimizers,
total_checks)
@dask.delayed
def all_functions_metod(f, g, p, lambda_1, lambda_2, d,
num_p, beta, tolerance, projection,
const, m, option, met, initial_guess,
set_x, bounds_set_x, relax_sd_it, sd_its,
check_func, num_func, random_seed, type_func):
"""
Generate each function required for the METOD algorithm and save outputs
to csv files.
Parameters
----------
f : Objective function.
``f(x, *func_args) -> float``
where ``x`` is a 1-D array with shape (d, ) and func_args is a
tuple of arguments needed to compute the function value.
g : Gradient.
``g(x, *func_args) -> 1-D array with shape (d, )``
where ``x`` is a 1-D array with shape (d, ) and func_args is a
tuple of arguments needed to compute the gradient.
p : integer
Number of local minima.
lambda_1 : integer
Smallest eigenvalue of diagonal matrix.
lambda_2 : integer
Largest eigenvalue of diagonal matrix.
d : integer
Size of dimension.
num_p : integer
Number of random points generated.
beta : float or integer
Small constant step size to compute the partner points.
tolerance: float
Stopping condition for steepest descent iterations. Apply
steepest descent iterations until the norm
of g(point, *func_args) is less than some tolerance.
Also check that the norm of the gradient at a starting point
is larger than some tolerance.
projection : boolean
If projection is True, points are projected back to
bounds_set_x. If projection is False, points are
not projected.
const : float or integer
In order to classify a point as a new local minimizer, the
euclidean distance between the point and all other discovered local
minimizers must be larger than const.
m : integer
Number of iterations of steepest descent to apply to a point
before making decision on terminating descents.
option : string
Choose from 'minimize', 'minimize_scalar' or
'forward_backward_tracking'. For more
information on 'minimize' or 'minimize_scalar' see
https://docs.scipy.org/doc/scipy/reference/optimize.html.
met : string
If option = 'minimize' or option = 'minimize_scalar', choose
appropiate method. For more information see
- https://docs.scipy.org/doc/scipy/reference/generated/
scipy.optimize.minimize.html#scipy.optimize.minimize
- https://docs.scipy.org/doc/scipy/reference/generated/
scipy.optimize.minimize_scalar.html#scipy.optimize.minimize_scalar.
If option = 'forward_backward_tracking', then met does not need to
be specified.
initial_guess : float or integer
Initial guess passed to scipy.optimize.minimize and the
upper bound for the bracket interval when using the
'Brent' or 'Golden' method for
scipy.optimize.minimize_scalar. Also the initial guess
for option='forward_backward_tracking'. This
is recommended to be small.
set_x : string
If set_x = 'random', random starting points
are generated for the METOD algorithm. If set_x = 'sobol',
then a numpy.array with shape (num points * 2, d) of Sobol
sequence samples are generated using SALib [1], which are
randomly shuffled and used as starting points for the METOD
algorithm.
bounds_set_x : tuple
Bounds used for set_x = 'random', set_x = 'sobol' and
also for projection = True.
relax_sd_it : float or integer
Multiply the step size by a small constant in [0, 2], to
obtain a new step size for steepest descent iterations. This
process is known as relaxed steepest descent [2].
sd_its : boolean
If sd_its = True, multistart is applied with the same starting
points as METOD. If sd_its = False, only METOD is applied.
check_func : function
A function which checks the local minimizers obtained by
METOD or multistart with the true local minimizers of the
objective function.
num_func : integer
Number of random functions to generate.
random_seed : integer
Value to initialize pseudo-random number generator.
type_func : string
Indicate version of objective function.
References
----------
1) <NAME> al, (2017), SALib: An open-source Python library for
Sensitivity Analysis, Journal of Open Source Software, 2(9), 97, doi:10.
21105/joss.00097
2) <NAME>., <NAME>.: Relaxed steepest descent and
cauchy-barzilai- borwein method. Computational Optimization and
Applications 21(2), 155–167 (2002)
3) <NAME>., <NAME>., <NAME>., <NAME>.: Multistart
with early termination of descents. Journal of Global Optimization pp.
1–16 (2019)
"""
number_minimizers_per_func_metod = np.zeros((num_func))
number_extra_descents_per_func_metod = np.zeros((num_func))
time_metod = np.zeros((num_func))
func_val_metod = np.zeros((num_func))
store_grad_norms = np.zeros((num_func, num_p))
store_grad_evals_metod = np.zeros((num_func, num_p))
store_count_gr_2 = np.zeros((num_func))
store_missed_minimizers = np.zeros((num_func))
store_total_checks = np.zeros((num_func))
if sd_its == True:
number_minimizers_per_func_multistart = np.zeros((num_func))
time_multistart = np.zeros((num_func))
func_val_multistart = np.zeros((num_func))
store_grad_evals_mult = np.zeros((num_func, num_p))
store_prop_class_sd_metod = np.zeros((num_func))
np.random.seed(random_seed)
for func in tqdm.tqdm(range(num_func)):
store_A = np.zeros((p, d, d))
store_x0 = np.zeros((p, d))
store_rotation = np.zeros((p, d, d))
for i in range(p):
diag_vals = np.zeros(d)
diag_vals[:2] = np.array([lambda_1, lambda_2])
diag_vals[2:] = np.random.uniform(lambda_1 + 1,
lambda_2 - 1, (d - 2))
store_A[i] = np.diag(diag_vals)
store_x0[i] = np.random.uniform(0, 1, (d,))
store_rotation[i] = mt_obj.calculate_rotation_matrix(d, 3)
matrix_test = (np.transpose(store_rotation, (0, 2, 1)) @ store_A @
store_rotation)
func_args = (p, store_x0, matrix_test)
if sd_its == True:
(number_minimizers_per_func_multistart[func],
number_minimizers_per_func_metod[func],
number_extra_descents_per_func_metod[func],
time_metod[func],
time_multistart[func],
func_val_metod[func],
func_val_multistart[func],
store_grad_evals_metod[func],
store_grad_evals_mult[func],
store_grad_norms[func],
starting_points,
store_prop_class_sd_metod[func],
store_count_gr_2[func],
store_missed_minimizers[func],
store_total_checks[func]) = (metod_numerical_exp_quad
(f, g, func_args, d,
num_p, beta, tolerance, projection,
const, m, option, met, initial_guess,
set_x, bounds_set_x, relax_sd_it,
sd_its, check_func))
if func == 0:
store_starting_points = np.array(starting_points)
else:
store_starting_points = np.vstack([store_starting_points,
np.array(starting_points)])
else:
(number_minimizers_per_func_metod[func],
number_extra_descents_per_func_metod[func],
time_metod[func],
func_val_metod[func],
store_grad_evals_metod[func],
store_grad_norms[func],
starting_points,
store_count_gr_2[func],
store_missed_minimizers[func],
store_total_checks[func]) = (metod_numerical_exp_quad
(f, g, func_args, d,
num_p, beta, tolerance, projection,
const, m, option, met, initial_guess,
set_x, bounds_set_x, relax_sd_it,
sd_its, check_func))
if func == 0:
store_starting_points = np.array(starting_points)
else:
store_starting_points = np.vstack([store_starting_points,
np.array(starting_points)])
np.savetxt('quad_grad_norm_beta_%s_m=%s_d=%s'
'_p=%s_%s_%s_%s_%s_%s.csv' %
(beta, m, d, p, set_x, num_p, option[0], initial_guess,
type_func),
store_grad_norms,
delimiter=',')
np.savetxt('quad_grad_evals_metod_beta_%s_m=%s_d=%s'
'p=%s_%s_%s_%s_%s_%s.csv' %
(beta, m, d, p, set_x, num_p, option[0], initial_guess,
type_func),
store_grad_evals_metod,
delimiter=',')
if sd_its == True:
table = pd.DataFrame({
"number_minimizers_per_func_metod":
number_minimizers_per_func_metod,
"number_extra_descents_per_func_metod":
number_extra_descents_per_func_metod,
"number_minimizers_per_func_multistart":
number_minimizers_per_func_multistart,
"time_metod": time_metod,
"time_multistart": time_multistart,
"min_func_val_metod": func_val_metod,
"min_func_val_multistart": func_val_multistart,
"prop_class": store_prop_class_sd_metod,
"greater_than_one_region": store_count_gr_2,
"total_times_minimizer_missed": store_missed_minimizers,
"total_no_times_inequals_sat": store_total_checks})
table.to_csv(table.to_csv
('quad_sd_metod_beta_%s_m=%s_d=%s_p=%s'
'_%s_%s_%s_%s_%s.csv' %
(beta, m, d, p, set_x,
num_p, option[0], initial_guess, type_func)))
np.savetxt('quad_grad_evals_mult_beta_%s_m=%s_d=%s'
'p=%s_%s_%s_%s_%s_%s.csv' %
(beta, m, d, p, set_x, num_p, option[0], initial_guess,
type_func),
store_grad_evals_mult,
delimiter=',')
np.savetxt('quad_sd_start_p_beta_%s_m=%s_d=%s'
'_p=%s_%s_%s_%s_%s_%s.csv' %
(beta, m, d, p, set_x, num_p, option[0], initial_guess,
type_func),
store_starting_points,
delimiter=',')
else:
table = pd.DataFrame({
"number_minimizers_per_func_metod":
number_minimizers_per_func_metod,
"number_extra_descents_per_func_metod":
number_extra_descents_per_func_metod,
"time_metod": time_metod,
"min_func_val_metod": func_val_metod,
"greater_than_one_region": store_count_gr_2,
"total_times_minimizer_missed": store_missed_minimizers,
"total_no_times_inequals_sat": store_total_checks})
table.to_csv(table.to_csv
('quad_metod_beta_%s_m=%s_d=%s_p=%s'
'_%s_%s_%s_%s_%s.csv' %
(beta, m, d, p, set_x,
num_p, option[0], initial_guess, type_func)))
np.savetxt('quad_start_p_beta_%s_m=%s_d=%s'
'_p=%s_%s_%s_%s_%s_%s.csv' %
(beta, m, d, p, set_x, num_p, option[0], initial_guess,
type_func),
store_starting_points,
delimiter=',')
if __name__ == "__main__":
"""
To obtain the same results as in [1], set optional input parameters
to the following:
d : set the dimension to either 50 or 100.
num_p : 1000.
beta : set beta to be either 0.005, 0.01, 0.05 or 0.1.
m : set warm up period to be either 2 or 3.
set_x : 'random'.
sd_its : True.
p : 50.
option : 'minimize'.
met : 'Nelder-Mead'.
initial_guess : 0.05.
random_seed : either random_seed = 1997 when d = 50 or
random_seed = 121 when d = 100.
type_func : either type_func = 'new' to obtain results in thesis or
type_func = 'old' to obtain results in [1].
References
----------
1) <NAME>., <NAME>., <NAME>., <NAME>.: Multistart
with early termination of descents. Journal of Global Optimization pp.
1–16 (2019)
"""
d = int(sys.argv[1])
num_p = int(sys.argv[2])
beta = float(sys.argv[3])
m = int(sys.argv[4])
set_x = str(sys.argv[5])
sd_its = eval(sys.argv[6])
p = int(sys.argv[7])
option = str(sys.argv[8])
met = str(sys.argv[9])
initial_guess = float(sys.argv[10])
random_seed = int(sys.argv[11])
type_func = str(sys.argv[12])
if type_func == 'old':
f = prev_mt_alg.quad_function
g = prev_mt_alg.quad_gradient
check_func = prev_mt_alg.calc_minimizer_quad
elif type_func == 'new':
f = mt_obj.several_quad_function
g = mt_obj.several_quad_gradient
check_func = mt_obj.calc_minimizer_sev_quad
tolerance = 0.001
projection = False
const = 0.1
bounds_set_x = (0, 1)
relax_sd_it = 1
lambda_1 = 1
lambda_2 = 10
num_func = 100
num_workers = 1
task = all_functions_metod(f, g, p, lambda_1, lambda_2, d,
num_p, beta, tolerance, projection,
const, m, option, met, initial_guess,
set_x, bounds_set_x, relax_sd_it, sd_its,
check_func, num_func, random_seed, type_func)
result = dask.compute(task, num_workers=num_workers)
| [
"pandas.DataFrame",
"numpy.random.uniform",
"metod_alg.objective_functions.calculate_rotation_matrix",
"numpy.random.seed",
"metod_alg.check_metod_class.metod_class",
"metod_alg.objective_functions.check_minimizers_mult_metod",
"metod_alg.check_metod_class.check_classification_sd_metod",
"numpy.savetx... | [((11755, 11766), 'time.time', 'time.time', ([], {}), '()\n', (11764, 11766), False, 'import time\n'), ((12024, 12183), 'metod_alg.check_metod_class.metod_class', 'prev_mt_alg.metod_class', (['f', 'g', 'func_args', 'd', 'num_p', 'beta', 'tolerance', 'projection', 'const', 'm', 'option', 'met', 'initial_guess', 'set_x', 'bounds_set_x', 'relax_sd_it'], {}), '(f, g, func_args, d, num_p, beta, tolerance,\n projection, const, m, option, met, initial_guess, set_x, bounds_set_x,\n relax_sd_it)\n', (12047, 12183), True, 'from metod_alg import check_metod_class as prev_mt_alg\n'), ((12320, 12331), 'time.time', 'time.time', ([], {}), '()\n', (12329, 12331), False, 'import time\n'), ((12365, 12482), 'metod_alg.objective_functions.check_unique_minimizers', 'mt_obj.check_unique_minimizers', (['unique_minimizers_metod', 'unique_number_of_minimizers_metod', 'check_func', 'func_args'], {}), '(unique_minimizers_metod,\n unique_number_of_minimizers_metod, check_func, func_args)\n', (12395, 12482), True, 'from metod_alg import objective_functions as mt_obj\n'), ((12865, 12880), 'numpy.zeros', 'np.zeros', (['num_p'], {}), '(num_p)\n', (12873, 12880), True, 'import numpy as np\n'), ((20384, 20402), 'numpy.zeros', 'np.zeros', (['num_func'], {}), '(num_func)\n', (20392, 20402), True, 'import numpy as np\n'), ((20448, 20466), 'numpy.zeros', 'np.zeros', (['num_func'], {}), '(num_func)\n', (20456, 20466), True, 'import numpy as np\n'), ((20486, 20504), 'numpy.zeros', 'np.zeros', (['num_func'], {}), '(num_func)\n', (20494, 20504), True, 'import numpy as np\n'), ((20528, 20546), 'numpy.zeros', 'np.zeros', (['num_func'], {}), '(num_func)\n', (20536, 20546), True, 'import numpy as np\n'), ((20572, 20599), 'numpy.zeros', 'np.zeros', (['(num_func, num_p)'], {}), '((num_func, num_p))\n', (20580, 20599), True, 'import numpy as np\n'), ((20629, 20656), 'numpy.zeros', 'np.zeros', (['(num_func, num_p)'], {}), '((num_func, num_p))\n', (20637, 20656), True, 'import numpy as np\n'), ((20680, 20698), 'numpy.zeros', 'np.zeros', (['num_func'], {}), '(num_func)\n', (20688, 20698), True, 'import numpy as np\n'), ((20731, 20749), 'numpy.zeros', 'np.zeros', (['num_func'], {}), '(num_func)\n', (20739, 20749), True, 'import numpy as np\n'), ((20777, 20795), 'numpy.zeros', 'np.zeros', (['num_func'], {}), '(num_func)\n', (20785, 20795), True, 'import numpy as np\n'), ((21110, 21137), 'numpy.random.seed', 'np.random.seed', (['random_seed'], {}), '(random_seed)\n', (21124, 21137), True, 'import numpy as np\n'), ((24214, 24395), 'numpy.savetxt', 'np.savetxt', (["('quad_grad_norm_beta_%s_m=%s_d=%s_p=%s_%s_%s_%s_%s_%s.csv' % (beta, m, d,\n p, set_x, num_p, option[0], initial_guess, type_func))", 'store_grad_norms'], {'delimiter': '""","""'}), "('quad_grad_norm_beta_%s_m=%s_d=%s_p=%s_%s_%s_%s_%s_%s.csv' % (\n beta, m, d, p, set_x, num_p, option[0], initial_guess, type_func),\n store_grad_norms, delimiter=',')\n", (24224, 24395), True, 'import numpy as np\n'), ((24471, 24663), 'numpy.savetxt', 'np.savetxt', (["('quad_grad_evals_metod_beta_%s_m=%s_d=%sp=%s_%s_%s_%s_%s_%s.csv' % (beta,\n m, d, p, set_x, num_p, option[0], initial_guess, type_func))", 'store_grad_evals_metod'], {'delimiter': '""","""'}), "('quad_grad_evals_metod_beta_%s_m=%s_d=%sp=%s_%s_%s_%s_%s_%s.csv' %\n (beta, m, d, p, set_x, num_p, option[0], initial_guess, type_func),\n store_grad_evals_metod, delimiter=',')\n", (24481, 24663), True, 'import numpy as np\n'), ((29871, 29914), 'dask.compute', 'dask.compute', (['task'], {'num_workers': 'num_workers'}), '(task, num_workers=num_workers)\n', (29883, 29914), False, 'import dask\n'), ((913, 998), 'metod_alg.metod_analysis.calc_minimizer_sev_quad_no_dist_check', 'mt_ays.calc_minimizer_sev_quad_no_dist_check', (['store_minimizer_des[k]', '*func_args'], {}), '(store_minimizer_des[k], *func_args\n )\n', (957, 998), True, 'from metod_alg import metod_analysis as mt_ays\n'), ((1039, 1115), 'metod_alg.metod_analysis.calc_minimizer_sev_quad_no_dist_check', 'mt_ays.calc_minimizer_sev_quad_no_dist_check', (['starting_points[k]', '*func_args'], {}), '(starting_points[k], *func_args)\n', (1083, 1115), True, 'from metod_alg import metod_analysis as mt_ays\n'), ((13204, 13350), 'metod_alg.multistart', 'mt.multistart', (['f', 'g', 'func_args', 'd', 'starting_points', 'num_p', 'tolerance', 'projection', 'const', 'option', 'met', 'initial_guess', 'bounds_set_x', 'relax_sd_it'], {}), '(f, g, func_args, d, starting_points, num_p, tolerance,\n projection, const, option, met, initial_guess, bounds_set_x, relax_sd_it)\n', (13217, 13350), True, 'import metod_alg as mt\n'), ((13482, 13594), 'metod_alg.objective_functions.check_unique_minimizers', 'mt_obj.check_unique_minimizers', (['store_minimizer_des', 'unique_number_of_minimizers_mult', 'check_func', 'func_args'], {}), '(store_minimizer_des,\n unique_number_of_minimizers_mult, check_func, func_args)\n', (13512, 13594), True, 'from metod_alg import objective_functions as mt_obj\n'), ((13678, 13765), 'metod_alg.objective_functions.check_minimizers_mult_metod', 'mt_obj.check_minimizers_mult_metod', (['unique_minimizers_metod', 'unique_minimizers_mult'], {}), '(unique_minimizers_metod,\n unique_minimizers_mult)\n', (13712, 13765), True, 'from metod_alg import objective_functions as mt_obj\n'), ((13837, 13942), 'metod_alg.check_metod_class.check_classification_sd_metod', 'prev_mt_alg.check_classification_sd_metod', (['store_minimizer_des', 'class_store_x0', 'check_func', 'func_args'], {}), '(store_minimizer_des,\n class_store_x0, check_func, func_args)\n', (13878, 13942), True, 'from metod_alg import check_metod_class as prev_mt_alg\n'), ((20869, 20887), 'numpy.zeros', 'np.zeros', (['num_func'], {}), '(num_func)\n', (20877, 20887), True, 'import numpy as np\n'), ((20916, 20934), 'numpy.zeros', 'np.zeros', (['num_func'], {}), '(num_func)\n', (20924, 20934), True, 'import numpy as np\n'), ((20967, 20985), 'numpy.zeros', 'np.zeros', (['num_func'], {}), '(num_func)\n', (20975, 20985), True, 'import numpy as np\n'), ((21020, 21047), 'numpy.zeros', 'np.zeros', (['(num_func, num_p)'], {}), '((num_func, num_p))\n', (21028, 21047), True, 'import numpy as np\n'), ((21084, 21102), 'numpy.zeros', 'np.zeros', (['num_func'], {}), '(num_func)\n', (21092, 21102), True, 'import numpy as np\n'), ((21200, 21219), 'numpy.zeros', 'np.zeros', (['(p, d, d)'], {}), '((p, d, d))\n', (21208, 21219), True, 'import numpy as np\n'), ((21239, 21255), 'numpy.zeros', 'np.zeros', (['(p, d)'], {}), '((p, d))\n', (21247, 21255), True, 'import numpy as np\n'), ((21281, 21300), 'numpy.zeros', 'np.zeros', (['(p, d, d)'], {}), '((p, d, d))\n', (21289, 21300), True, 'import numpy as np\n'), ((24775, 25404), 'pandas.DataFrame', 'pd.DataFrame', (["{'number_minimizers_per_func_metod': number_minimizers_per_func_metod,\n 'number_extra_descents_per_func_metod':\n number_extra_descents_per_func_metod,\n 'number_minimizers_per_func_multistart':\n number_minimizers_per_func_multistart, 'time_metod': time_metod,\n 'time_multistart': time_multistart, 'min_func_val_metod':\n func_val_metod, 'min_func_val_multistart': func_val_multistart,\n 'prop_class': store_prop_class_sd_metod, 'greater_than_one_region':\n store_count_gr_2, 'total_times_minimizer_missed':\n store_missed_minimizers, 'total_no_times_inequals_sat': store_total_checks}"], {}), "({'number_minimizers_per_func_metod':\n number_minimizers_per_func_metod,\n 'number_extra_descents_per_func_metod':\n number_extra_descents_per_func_metod,\n 'number_minimizers_per_func_multistart':\n number_minimizers_per_func_multistart, 'time_metod': time_metod,\n 'time_multistart': time_multistart, 'min_func_val_metod':\n func_val_metod, 'min_func_val_multistart': func_val_multistart,\n 'prop_class': store_prop_class_sd_metod, 'greater_than_one_region':\n store_count_gr_2, 'total_times_minimizer_missed':\n store_missed_minimizers, 'total_no_times_inequals_sat': store_total_checks}\n )\n", (24787, 25404), True, 'import pandas as pd\n'), ((26017, 26207), 'numpy.savetxt', 'np.savetxt', (["('quad_grad_evals_mult_beta_%s_m=%s_d=%sp=%s_%s_%s_%s_%s_%s.csv' % (beta, m,\n d, p, set_x, num_p, option[0], initial_guess, type_func))", 'store_grad_evals_mult'], {'delimiter': '""","""'}), "('quad_grad_evals_mult_beta_%s_m=%s_d=%sp=%s_%s_%s_%s_%s_%s.csv' %\n (beta, m, d, p, set_x, num_p, option[0], initial_guess, type_func),\n store_grad_evals_mult, delimiter=',')\n", (26027, 26207), True, 'import numpy as np\n'), ((26307, 26494), 'numpy.savetxt', 'np.savetxt', (["('quad_sd_start_p_beta_%s_m=%s_d=%s_p=%s_%s_%s_%s_%s_%s.csv' % (beta, m, d,\n p, set_x, num_p, option[0], initial_guess, type_func))", 'store_starting_points'], {'delimiter': '""","""'}), "('quad_sd_start_p_beta_%s_m=%s_d=%s_p=%s_%s_%s_%s_%s_%s.csv' % (\n beta, m, d, p, set_x, num_p, option[0], initial_guess, type_func),\n store_starting_points, delimiter=',')\n", (26317, 26494), True, 'import numpy as np\n'), ((26611, 27019), 'pandas.DataFrame', 'pd.DataFrame', (["{'number_minimizers_per_func_metod': number_minimizers_per_func_metod,\n 'number_extra_descents_per_func_metod':\n number_extra_descents_per_func_metod, 'time_metod': time_metod,\n 'min_func_val_metod': func_val_metod, 'greater_than_one_region':\n store_count_gr_2, 'total_times_minimizer_missed':\n store_missed_minimizers, 'total_no_times_inequals_sat': store_total_checks}"], {}), "({'number_minimizers_per_func_metod':\n number_minimizers_per_func_metod,\n 'number_extra_descents_per_func_metod':\n number_extra_descents_per_func_metod, 'time_metod': time_metod,\n 'min_func_val_metod': func_val_metod, 'greater_than_one_region':\n store_count_gr_2, 'total_times_minimizer_missed':\n store_missed_minimizers, 'total_no_times_inequals_sat': store_total_checks}\n )\n", (26623, 27019), True, 'import pandas as pd\n'), ((27505, 27688), 'numpy.savetxt', 'np.savetxt', (["('quad_start_p_beta_%s_m=%s_d=%s_p=%s_%s_%s_%s_%s_%s.csv' % (beta, m, d, p,\n set_x, num_p, option[0], initial_guess, type_func))", 'store_starting_points'], {'delimiter': '""","""'}), "('quad_start_p_beta_%s_m=%s_d=%s_p=%s_%s_%s_%s_%s_%s.csv' % (beta,\n m, d, p, set_x, num_p, option[0], initial_guess, type_func),\n store_starting_points, delimiter=',')\n", (27515, 27688), True, 'import numpy as np\n'), ((14297, 14334), 'numpy.min', 'np.min', (['func_vals_of_minimizers_metod'], {}), '(func_vals_of_minimizers_metod)\n', (14303, 14334), True, 'import numpy as np\n'), ((14352, 14380), 'numpy.min', 'np.min', (['store_func_vals_mult'], {}), '(store_func_vals_mult)\n', (14358, 14380), True, 'import numpy as np\n'), ((14774, 14811), 'numpy.min', 'np.min', (['func_vals_of_minimizers_metod'], {}), '(func_vals_of_minimizers_metod)\n', (14780, 14811), True, 'import numpy as np\n'), ((21352, 21363), 'numpy.zeros', 'np.zeros', (['d'], {}), '(d)\n', (21360, 21363), True, 'import numpy as np\n'), ((21392, 21422), 'numpy.array', 'np.array', (['[lambda_1, lambda_2]'], {}), '([lambda_1, lambda_2])\n', (21400, 21422), True, 'import numpy as np\n'), ((21451, 21503), 'numpy.random.uniform', 'np.random.uniform', (['(lambda_1 + 1)', '(lambda_2 - 1)', '(d - 2)'], {}), '(lambda_1 + 1, lambda_2 - 1, d - 2)\n', (21468, 21503), True, 'import numpy as np\n'), ((21577, 21595), 'numpy.diag', 'np.diag', (['diag_vals'], {}), '(diag_vals)\n', (21584, 21595), True, 'import numpy as np\n'), ((21622, 21651), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(d,)'], {}), '(0, 1, (d,))\n', (21639, 21651), True, 'import numpy as np\n'), ((21684, 21722), 'metod_alg.objective_functions.calculate_rotation_matrix', 'mt_obj.calculate_rotation_matrix', (['d', '(3)'], {}), '(d, 3)\n', (21716, 21722), True, 'from metod_alg import objective_functions as mt_obj\n'), ((21746, 21785), 'numpy.transpose', 'np.transpose', (['store_rotation', '(0, 2, 1)'], {}), '(store_rotation, (0, 2, 1))\n', (21758, 21785), True, 'import numpy as np\n'), ((22967, 22992), 'numpy.array', 'np.array', (['starting_points'], {}), '(starting_points)\n', (22975, 22992), True, 'import numpy as np\n'), ((24012, 24037), 'numpy.array', 'np.array', (['starting_points'], {}), '(starting_points)\n', (24020, 24037), True, 'import numpy as np\n'), ((23136, 23161), 'numpy.array', 'np.array', (['starting_points'], {}), '(starting_points)\n', (23144, 23161), True, 'import numpy as np\n'), ((24181, 24206), 'numpy.array', 'np.array', (['starting_points'], {}), '(starting_points)\n', (24189, 24206), True, 'import numpy as np\n')] |
import unittest
import numpy as np
from pykrev import diversity_indices, ordination_matrix, bray_curtis_matrix, compound_class, normalise_intensity
class TestDIVERSITY(unittest.TestCase):
def setUp(self):
pass
def test_sum_relative_intensity(self):
z = np.array([100,200,300])
correct = np.array([100/600,200/600,300/600])
res = normalise_intensity(z)
self.assertIsNone(np.testing.assert_array_equal(np.round(res,3),np.round(correct,3)))
def test_max_relative_intensity(self):
z = np.array([100,200,300])
correct = np.array([100/300,200/300,300/300])
res = normalise_intensity(z, norm_method = 'max')
self.assertIsNone(np.testing.assert_array_equal(np.round(res,3),np.round(correct,3)))
def test_unit_relative_intensity(self):
z = np.array([100,200,300])
res = normalise_intensity(z, norm_method = 'unit_vector')
self.assertEqual(np.round(sum(res**2),3),1.00)
def test_zscore_relative_intensity(self):
z = np.array([100,200,300,400,21,321,342,543])
res = normalise_intensity(z, norm_method = 'zscore')
self.assertEqual(np.round(np.mean(res),3),0.00)
self.assertEqual(np.round(np.std(res),3),1.00)
def test_minmax_relative_intensity(self):
z = np.array([100,200,300,400,21,321,342,543])
res = normalise_intensity(z, norm_method = 'minmax')
self.assertEqual(min(res),0)
self.assertEqual(max(res),1)
def test_mean_relative_intensity(self):
z = np.array([100,200,300,400,21,321,342,543])
res = normalise_intensity(z, norm_method = 'mean')
self.assertEqual(np.round(np.mean(res),3),0.00)
def test_median_relative_intensity(self):
z = np.array([100,200,300,400,21,321,342,543])
res = normalise_intensity(z, norm_method = 'median')
self.assertEqual(np.round(np.median(res),3),0.00)
def test_binary_relative_intensity(self):
z = np.array([100,0,300,400,21,321,0,543])
res = normalise_intensity(z, norm_method = 'binary')
self.assertEqual(sum(res),6)
def test_richness(self):
x = ['C13H14O5','C13H14N2O4S2','C36H45ClN6O12','C9H11NO2', 'C9H11NO3', 'C11H12N2O2', 'C5H7NO3', 'C5H9NO3', 'C6H12N2O4S2','C6H11NO3S']
z = np.array([1000,2432,3000,4201,2000,5990,1000,6520,8000,9001])
correct = {'D_r':10}
res = diversity_indices(x,z, indices = ['r'])
self.assertEqual(res, correct)
def test_GS(self):
x = ['C13H14O5','C13H14N2O4S2','C36H45ClN6O12','C9H11NO2', 'C9H11NO3', 'C11H12N2O2', 'C5H7NO3', 'C5H9NO3', 'C6H12N2O4S2','C6H11NO3S']
z = np.array([1000,2432,3000,4201,2000,5990,1000,6520,8000,9001])
correct = {'D_a_GS':0.8593}
res = diversity_indices(x,z, indices = ['GS'])
self.assertEqual(np.around(res['D_a_GS'],3), np.around(correct['D_a_GS'],3))
def test_SW(self):
x = ['C13H14O5','C13H14N2O4S2','C36H45ClN6O12','C9H11NO2', 'C9H11NO3', 'C11H12N2O2', 'C5H7NO3', 'C5H9NO3', 'C6H12N2O4S2','C6H11NO3S']
z = np.array([1000,2432,3000,4201,2000,5990,1000,6520,8000,9001])
correct = {'D_a_SW':2.09}
res = diversity_indices(x,z, indices = ['SW'])
self.assertEqual(np.around(res['D_a_SW'],3), np.around(correct['D_a_SW'],3))
def test_functionalC(self):
x = ['C13H14O5','C13H14N2O4S2','C36H45ClN6O12','C9H11NO2', 'C9H11NO3', 'C11H12N2O2', 'C5H7NO3', 'C5H9NO3', 'C6H12N2O4S2','C6H11NO3S']
z = np.array([1000,2432,3000,4201,2000,5990,1000,6520,8000,9001])
res = diversity_indices(x,z, indices = ['N'])
def test_functionalNC(self):
x = ['C13H14O5','C13H14N2O4S2','C36H45ClN6O12','C9H11NO2', 'C9H11NO3', 'C11H12N2O2', 'C5H7NO3', 'C5H9NO3', 'C6H12N2O4S2','C6H11NO3S']
z = np.array([1000,2432,3000,4201,2000,5990,1000,6520,8000,9001])
res = diversity_indices(x,z, indices = ['NC'])
def test_functionalrAI(self):
x = ['C13H14O5','C13H14N2O4S2','C36H45ClN6O12','C9H11NO2', 'C9H11NO3', 'C11H12N2O2', 'C5H7NO3', 'C5H9NO3', 'C6H12N2O4S2','C6H11NO3S']
z = np.array([1000,2432,3000,4201,2000,5990,1000,6520,8000,9001])
res = diversity_indices(x,z, indices = ['rAI'])
def test_functionalmz(self):
x = ['C13H14O5','C13H14N2O4S2','C36H45ClN6O12','C9H11NO2', 'C9H11NO3', 'C11H12N2O2', 'C5H7NO3', 'C5H9NO3', 'C6H12N2O4S2','C6H11NO3S']
z = np.array([1000,2432,3000,4201,2000,5990,1000,6520,8000,9001])
mz = np.array([232,340,132,904,321,431,3424,200,3204,1000])
res = diversity_indices(x,z, mz_list = mz, indices = ['mz'])
def test_ordination_matrix(self):
x = ['C13H14O5','C13H14N2O4S2','C36H45ClN6O12','C9H11NO2', 'C9H11NO3', 'C11H12N2O2', 'C5H7NO3', 'C5H9NO3', 'C6H12N2O4S2','C6H11NO3S']
x2 = ['C13H14O5','C13H14N2O4S2','C36H45ClN6O12','C9H11NO2', 'C9H31NO3', 'C11H12N1O2', 'C5H73O3', 'C5H9NO3', 'C6H12N2O4S2','C6H11NO3S']
z = np.array([1000,2432,3000,4201,2000,5990,1000,6520,8000,9001])
z2 = np.array([1000,2432,3000,4201,2000,5990,1000,6520,8000,9001])
ores = ordination_matrix(molecular_formulas = [x,x2],peak_intensities = [z,z2])
def test_normalise_ordination(self):
x = ['C13H14O5','C13H14N2O4S2','C36H45ClN6O12','C9H11NO2', 'C9H11NO3', 'C11H12N2O2', 'C5H7NO3', 'C5H9NO3', 'C6H12N2O4S2','C6H11NO3S']
x2 = ['C13H14O5','C13H14N2O4S2','C36H45ClN6O12','C9H11NO2', 'C9H31NO3', 'C11H12N1O2', 'C5H73O3', 'C5H9NO3', 'C6H12N2O4S2','C6H11NO3S']
z = np.array([1000,2432,3000,4201,2000,5990,1000,6520,8000,9001])
z2 = np.array([1000,2432,3000,4201,2000,5990,1000,6520,8000,9001])
ores = ordination_matrix(molecular_formulas = [x,x2],peak_intensities = [z,z2])
n1res = normalise_intensity(ores)
n2res = normalise_intensity(ores, norm_subset = 'PPP', norm_method = 'binary')
n3res = normalise_intensity(ores, norm_subset = 'LOS', p_L = 3, norm_method = 'minmax')
n4res = normalise_intensity(ores, norm_subset = 'PPP', p_P = 0.73, norm_method = 'zscore')
n5res = normalise_intensity(ores, norm_subset = 'PPP', p_P = 0.02, norm_method = 'mean')
n6res = normalise_intensity(ores, norm_subset = 'LOS', p_P = 0.02, norm_method = 'mean', p_L = 1000)
n7res = normalise_intensity(ores, norm_subset = 'LOS', p_P = 0.02, norm_method = 'mean', p_L = 1000, log = True)
n8res = normalise_intensity(ores, norm_subset = 'ALL', p_P = 0.02, norm_method = 'none', p_L = 1000, log = True)
def test_bray_curtis_matrix(self):
x = ['C13H14O5','C13H14N2O4S2','C36H45ClN6O12','C9H11NO2', 'C9H11NO3', 'C11H12N2O2', 'C5H7NO3', 'C5H9NO3', 'C6H12N2O4S2','C6H11NO3S']
x2 = ['C13H14O5','C13H14N2O4S2','C36H45ClN6O12','C9H11NO2', 'C9H31NO3', 'C11H12N1O2', 'C5H73O3', 'C5H9NO3', 'C6H12N2O4S2','C6H11NO3S']
z = np.array([1000,2432,3000,4201,2000,5990,1000,6520,8000,9001])
z2 = np.array([1000,2432,3000,4201,2000,5990,1000,6520,8000,9001])
ores = ordination_matrix(molecular_formulas = [x,x2],peak_intensities = [z,z2])
bres = bray_curtis_matrix(np.array(ores))
def test_compound_class_MSCC(self):
x = ['C13H14O5','C13H14N2O4S2','C36H45ClN6O12','C9H11NO2', 'C9H11NO3', 'C11H12N2O2', 'C5H7NO3', 'C5H9NO3', 'C6H12N2O4S2','C6H11NO3S']
z = np.array([1000,2432,3000,4201,2000,5990,1000,6520,8000,9001])
res = compound_class(x,mass_list =z, method = 'MSCC')
def test_compound_class_KELL(self):
x = ['C13H14O5','C13H14N2O4S2','C36H45ClN6O12','C9H11NO2', 'C9H11NO3', 'C11H12N2O2', 'C5H7NO3', 'C5H9NO3', 'C6H12N2O4S2','C6H11NO3S']
z = np.array([1000,2432,3000,4201,2000,5990,1000,6520,8000,9001])
res = compound_class(x, method = 'KELL')
def test_compound_class_FORM(self):
x = ['C13H14O5','C13H14N2O4S2','C36H45ClN6O12','C9H11NO2', 'C9H11NO3', 'C11H12N2O2', 'C5H7NO3', 'C5H9NO3', 'C6H12N2O4S2','C6H11NO3S']
z = np.array([1000,2432,3000,4201,2000,5990,1000,6520,8000,9001])
res = compound_class(x, method = 'FORM')
def test_compound_class_KEGG(self):
x = ['C13H14O5','C13H14N2O4S2','C36H45ClN6O12','C9H11NO2', 'C9H11NO3', 'C11H12N2O2', 'C5H7NO3', 'C5H9NO3', 'C6H12N2O4S2','C6H11NO3S']
z = np.array([1000,2432,3000,4201,2000,5990,1000,6520,8000,9001])
res = compound_class(x, method = 'KEGG_All')
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"pykrev.diversity_indices",
"numpy.std",
"numpy.median",
"pykrev.compound_class",
"numpy.round",
"numpy.around",
"numpy.mean",
"numpy.array",
"pykrev.normalise_intensity",
"pykrev.ordination_matrix"
] | [((8426, 8441), 'unittest.main', 'unittest.main', ([], {}), '()\n', (8439, 8441), False, 'import unittest\n'), ((280, 305), 'numpy.array', 'np.array', (['[100, 200, 300]'], {}), '([100, 200, 300])\n', (288, 305), True, 'import numpy as np\n'), ((322, 365), 'numpy.array', 'np.array', (['[100 / 600, 200 / 600, 300 / 600]'], {}), '([100 / 600, 200 / 600, 300 / 600])\n', (330, 365), True, 'import numpy as np\n'), ((372, 394), 'pykrev.normalise_intensity', 'normalise_intensity', (['z'], {}), '(z)\n', (391, 394), False, 'from pykrev import diversity_indices, ordination_matrix, bray_curtis_matrix, compound_class, normalise_intensity\n'), ((545, 570), 'numpy.array', 'np.array', (['[100, 200, 300]'], {}), '([100, 200, 300])\n', (553, 570), True, 'import numpy as np\n'), ((587, 630), 'numpy.array', 'np.array', (['[100 / 300, 200 / 300, 300 / 300]'], {}), '([100 / 300, 200 / 300, 300 / 300])\n', (595, 630), True, 'import numpy as np\n'), ((637, 678), 'pykrev.normalise_intensity', 'normalise_intensity', (['z'], {'norm_method': '"""max"""'}), "(z, norm_method='max')\n", (656, 678), False, 'from pykrev import diversity_indices, ordination_matrix, bray_curtis_matrix, compound_class, normalise_intensity\n'), ((832, 857), 'numpy.array', 'np.array', (['[100, 200, 300]'], {}), '([100, 200, 300])\n', (840, 857), True, 'import numpy as np\n'), ((870, 919), 'pykrev.normalise_intensity', 'normalise_intensity', (['z'], {'norm_method': '"""unit_vector"""'}), "(z, norm_method='unit_vector')\n", (889, 919), False, 'from pykrev import diversity_indices, ordination_matrix, bray_curtis_matrix, compound_class, normalise_intensity\n'), ((1036, 1085), 'numpy.array', 'np.array', (['[100, 200, 300, 400, 21, 321, 342, 543]'], {}), '([100, 200, 300, 400, 21, 321, 342, 543])\n', (1044, 1085), True, 'import numpy as np\n'), ((1093, 1137), 'pykrev.normalise_intensity', 'normalise_intensity', (['z'], {'norm_method': '"""zscore"""'}), "(z, norm_method='zscore')\n", (1112, 1137), False, 'from pykrev import diversity_indices, ordination_matrix, bray_curtis_matrix, compound_class, normalise_intensity\n'), ((1310, 1359), 'numpy.array', 'np.array', (['[100, 200, 300, 400, 21, 321, 342, 543]'], {}), '([100, 200, 300, 400, 21, 321, 342, 543])\n', (1318, 1359), True, 'import numpy as np\n'), ((1367, 1411), 'pykrev.normalise_intensity', 'normalise_intensity', (['z'], {'norm_method': '"""minmax"""'}), "(z, norm_method='minmax')\n", (1386, 1411), False, 'from pykrev import diversity_indices, ordination_matrix, bray_curtis_matrix, compound_class, normalise_intensity\n'), ((1545, 1594), 'numpy.array', 'np.array', (['[100, 200, 300, 400, 21, 321, 342, 543]'], {}), '([100, 200, 300, 400, 21, 321, 342, 543])\n', (1553, 1594), True, 'import numpy as np\n'), ((1602, 1644), 'pykrev.normalise_intensity', 'normalise_intensity', (['z'], {'norm_method': '"""mean"""'}), "(z, norm_method='mean')\n", (1621, 1644), False, 'from pykrev import diversity_indices, ordination_matrix, bray_curtis_matrix, compound_class, normalise_intensity\n'), ((1770, 1819), 'numpy.array', 'np.array', (['[100, 200, 300, 400, 21, 321, 342, 543]'], {}), '([100, 200, 300, 400, 21, 321, 342, 543])\n', (1778, 1819), True, 'import numpy as np\n'), ((1827, 1871), 'pykrev.normalise_intensity', 'normalise_intensity', (['z'], {'norm_method': '"""median"""'}), "(z, norm_method='median')\n", (1846, 1871), False, 'from pykrev import diversity_indices, ordination_matrix, bray_curtis_matrix, compound_class, normalise_intensity\n'), ((1991, 2036), 'numpy.array', 'np.array', (['[100, 0, 300, 400, 21, 321, 0, 543]'], {}), '([100, 0, 300, 400, 21, 321, 0, 543])\n', (1999, 2036), True, 'import numpy as np\n'), ((2044, 2088), 'pykrev.normalise_intensity', 'normalise_intensity', (['z'], {'norm_method': '"""binary"""'}), "(z, norm_method='binary')\n", (2063, 2088), False, 'from pykrev import diversity_indices, ordination_matrix, bray_curtis_matrix, compound_class, normalise_intensity\n'), ((2320, 2390), 'numpy.array', 'np.array', (['[1000, 2432, 3000, 4201, 2000, 5990, 1000, 6520, 8000, 9001]'], {}), '([1000, 2432, 3000, 4201, 2000, 5990, 1000, 6520, 8000, 9001])\n', (2328, 2390), True, 'import numpy as np\n'), ((2425, 2463), 'pykrev.diversity_indices', 'diversity_indices', (['x', 'z'], {'indices': "['r']"}), "(x, z, indices=['r'])\n", (2442, 2463), False, 'from pykrev import diversity_indices, ordination_matrix, bray_curtis_matrix, compound_class, normalise_intensity\n'), ((2682, 2752), 'numpy.array', 'np.array', (['[1000, 2432, 3000, 4201, 2000, 5990, 1000, 6520, 8000, 9001]'], {}), '([1000, 2432, 3000, 4201, 2000, 5990, 1000, 6520, 8000, 9001])\n', (2690, 2752), True, 'import numpy as np\n'), ((2794, 2833), 'pykrev.diversity_indices', 'diversity_indices', (['x', 'z'], {'indices': "['GS']"}), "(x, z, indices=['GS'])\n", (2811, 2833), False, 'from pykrev import diversity_indices, ordination_matrix, bray_curtis_matrix, compound_class, normalise_intensity\n'), ((3098, 3168), 'numpy.array', 'np.array', (['[1000, 2432, 3000, 4201, 2000, 5990, 1000, 6520, 8000, 9001]'], {}), '([1000, 2432, 3000, 4201, 2000, 5990, 1000, 6520, 8000, 9001])\n', (3106, 3168), True, 'import numpy as np\n'), ((3208, 3247), 'pykrev.diversity_indices', 'diversity_indices', (['x', 'z'], {'indices': "['SW']"}), "(x, z, indices=['SW'])\n", (3225, 3247), False, 'from pykrev import diversity_indices, ordination_matrix, bray_curtis_matrix, compound_class, normalise_intensity\n'), ((3521, 3591), 'numpy.array', 'np.array', (['[1000, 2432, 3000, 4201, 2000, 5990, 1000, 6520, 8000, 9001]'], {}), '([1000, 2432, 3000, 4201, 2000, 5990, 1000, 6520, 8000, 9001])\n', (3529, 3591), True, 'import numpy as np\n'), ((3597, 3635), 'pykrev.diversity_indices', 'diversity_indices', (['x', 'z'], {'indices': "['N']"}), "(x, z, indices=['N'])\n", (3614, 3635), False, 'from pykrev import diversity_indices, ordination_matrix, bray_curtis_matrix, compound_class, normalise_intensity\n'), ((3825, 3895), 'numpy.array', 'np.array', (['[1000, 2432, 3000, 4201, 2000, 5990, 1000, 6520, 8000, 9001]'], {}), '([1000, 2432, 3000, 4201, 2000, 5990, 1000, 6520, 8000, 9001])\n', (3833, 3895), True, 'import numpy as np\n'), ((3901, 3940), 'pykrev.diversity_indices', 'diversity_indices', (['x', 'z'], {'indices': "['NC']"}), "(x, z, indices=['NC'])\n", (3918, 3940), False, 'from pykrev import diversity_indices, ordination_matrix, bray_curtis_matrix, compound_class, normalise_intensity\n'), ((4131, 4201), 'numpy.array', 'np.array', (['[1000, 2432, 3000, 4201, 2000, 5990, 1000, 6520, 8000, 9001]'], {}), '([1000, 2432, 3000, 4201, 2000, 5990, 1000, 6520, 8000, 9001])\n', (4139, 4201), True, 'import numpy as np\n'), ((4207, 4247), 'pykrev.diversity_indices', 'diversity_indices', (['x', 'z'], {'indices': "['rAI']"}), "(x, z, indices=['rAI'])\n", (4224, 4247), False, 'from pykrev import diversity_indices, ordination_matrix, bray_curtis_matrix, compound_class, normalise_intensity\n'), ((4437, 4507), 'numpy.array', 'np.array', (['[1000, 2432, 3000, 4201, 2000, 5990, 1000, 6520, 8000, 9001]'], {}), '([1000, 2432, 3000, 4201, 2000, 5990, 1000, 6520, 8000, 9001])\n', (4445, 4507), True, 'import numpy as np\n'), ((4512, 4575), 'numpy.array', 'np.array', (['[232, 340, 132, 904, 321, 431, 3424, 200, 3204, 1000]'], {}), '([232, 340, 132, 904, 321, 431, 3424, 200, 3204, 1000])\n', (4520, 4575), True, 'import numpy as np\n'), ((4581, 4632), 'pykrev.diversity_indices', 'diversity_indices', (['x', 'z'], {'mz_list': 'mz', 'indices': "['mz']"}), "(x, z, mz_list=mz, indices=['mz'])\n", (4598, 4632), False, 'from pykrev import diversity_indices, ordination_matrix, bray_curtis_matrix, compound_class, normalise_intensity\n'), ((4972, 5042), 'numpy.array', 'np.array', (['[1000, 2432, 3000, 4201, 2000, 5990, 1000, 6520, 8000, 9001]'], {}), '([1000, 2432, 3000, 4201, 2000, 5990, 1000, 6520, 8000, 9001])\n', (4980, 5042), True, 'import numpy as np\n'), ((5047, 5117), 'numpy.array', 'np.array', (['[1000, 2432, 3000, 4201, 2000, 5990, 1000, 6520, 8000, 9001]'], {}), '([1000, 2432, 3000, 4201, 2000, 5990, 1000, 6520, 8000, 9001])\n', (5055, 5117), True, 'import numpy as np\n'), ((5124, 5195), 'pykrev.ordination_matrix', 'ordination_matrix', ([], {'molecular_formulas': '[x, x2]', 'peak_intensities': '[z, z2]'}), '(molecular_formulas=[x, x2], peak_intensities=[z, z2])\n', (5141, 5195), False, 'from pykrev import diversity_indices, ordination_matrix, bray_curtis_matrix, compound_class, normalise_intensity\n'), ((5536, 5606), 'numpy.array', 'np.array', (['[1000, 2432, 3000, 4201, 2000, 5990, 1000, 6520, 8000, 9001]'], {}), '([1000, 2432, 3000, 4201, 2000, 5990, 1000, 6520, 8000, 9001])\n', (5544, 5606), True, 'import numpy as np\n'), ((5611, 5681), 'numpy.array', 'np.array', (['[1000, 2432, 3000, 4201, 2000, 5990, 1000, 6520, 8000, 9001]'], {}), '([1000, 2432, 3000, 4201, 2000, 5990, 1000, 6520, 8000, 9001])\n', (5619, 5681), True, 'import numpy as np\n'), ((5688, 5759), 'pykrev.ordination_matrix', 'ordination_matrix', ([], {'molecular_formulas': '[x, x2]', 'peak_intensities': '[z, z2]'}), '(molecular_formulas=[x, x2], peak_intensities=[z, z2])\n', (5705, 5759), False, 'from pykrev import diversity_indices, ordination_matrix, bray_curtis_matrix, compound_class, normalise_intensity\n'), ((5777, 5802), 'pykrev.normalise_intensity', 'normalise_intensity', (['ores'], {}), '(ores)\n', (5796, 5802), False, 'from pykrev import diversity_indices, ordination_matrix, bray_curtis_matrix, compound_class, normalise_intensity\n'), ((5819, 5885), 'pykrev.normalise_intensity', 'normalise_intensity', (['ores'], {'norm_subset': '"""PPP"""', 'norm_method': '"""binary"""'}), "(ores, norm_subset='PPP', norm_method='binary')\n", (5838, 5885), False, 'from pykrev import diversity_indices, ordination_matrix, bray_curtis_matrix, compound_class, normalise_intensity\n'), ((5906, 5979), 'pykrev.normalise_intensity', 'normalise_intensity', (['ores'], {'norm_subset': '"""LOS"""', 'p_L': '(3)', 'norm_method': '"""minmax"""'}), "(ores, norm_subset='LOS', p_L=3, norm_method='minmax')\n", (5925, 5979), False, 'from pykrev import diversity_indices, ordination_matrix, bray_curtis_matrix, compound_class, normalise_intensity\n'), ((6002, 6078), 'pykrev.normalise_intensity', 'normalise_intensity', (['ores'], {'norm_subset': '"""PPP"""', 'p_P': '(0.73)', 'norm_method': '"""zscore"""'}), "(ores, norm_subset='PPP', p_P=0.73, norm_method='zscore')\n", (6021, 6078), False, 'from pykrev import diversity_indices, ordination_matrix, bray_curtis_matrix, compound_class, normalise_intensity\n'), ((6101, 6175), 'pykrev.normalise_intensity', 'normalise_intensity', (['ores'], {'norm_subset': '"""PPP"""', 'p_P': '(0.02)', 'norm_method': '"""mean"""'}), "(ores, norm_subset='PPP', p_P=0.02, norm_method='mean')\n", (6120, 6175), False, 'from pykrev import diversity_indices, ordination_matrix, bray_curtis_matrix, compound_class, normalise_intensity\n'), ((6198, 6286), 'pykrev.normalise_intensity', 'normalise_intensity', (['ores'], {'norm_subset': '"""LOS"""', 'p_P': '(0.02)', 'norm_method': '"""mean"""', 'p_L': '(1000)'}), "(ores, norm_subset='LOS', p_P=0.02, norm_method='mean',\n p_L=1000)\n", (6217, 6286), False, 'from pykrev import diversity_indices, ordination_matrix, bray_curtis_matrix, compound_class, normalise_intensity\n'), ((6307, 6405), 'pykrev.normalise_intensity', 'normalise_intensity', (['ores'], {'norm_subset': '"""LOS"""', 'p_P': '(0.02)', 'norm_method': '"""mean"""', 'p_L': '(1000)', 'log': '(True)'}), "(ores, norm_subset='LOS', p_P=0.02, norm_method='mean',\n p_L=1000, log=True)\n", (6326, 6405), False, 'from pykrev import diversity_indices, ordination_matrix, bray_curtis_matrix, compound_class, normalise_intensity\n'), ((6428, 6526), 'pykrev.normalise_intensity', 'normalise_intensity', (['ores'], {'norm_subset': '"""ALL"""', 'p_P': '(0.02)', 'norm_method': '"""none"""', 'p_L': '(1000)', 'log': '(True)'}), "(ores, norm_subset='ALL', p_P=0.02, norm_method='none',\n p_L=1000, log=True)\n", (6447, 6526), False, 'from pykrev import diversity_indices, ordination_matrix, bray_curtis_matrix, compound_class, normalise_intensity\n'), ((6878, 6948), 'numpy.array', 'np.array', (['[1000, 2432, 3000, 4201, 2000, 5990, 1000, 6520, 8000, 9001]'], {}), '([1000, 2432, 3000, 4201, 2000, 5990, 1000, 6520, 8000, 9001])\n', (6886, 6948), True, 'import numpy as np\n'), ((6953, 7023), 'numpy.array', 'np.array', (['[1000, 2432, 3000, 4201, 2000, 5990, 1000, 6520, 8000, 9001]'], {}), '([1000, 2432, 3000, 4201, 2000, 5990, 1000, 6520, 8000, 9001])\n', (6961, 7023), True, 'import numpy as np\n'), ((7030, 7101), 'pykrev.ordination_matrix', 'ordination_matrix', ([], {'molecular_formulas': '[x, x2]', 'peak_intensities': '[z, z2]'}), '(molecular_formulas=[x, x2], peak_intensities=[z, z2])\n', (7047, 7101), False, 'from pykrev import diversity_indices, ordination_matrix, bray_curtis_matrix, compound_class, normalise_intensity\n'), ((7348, 7418), 'numpy.array', 'np.array', (['[1000, 2432, 3000, 4201, 2000, 5990, 1000, 6520, 8000, 9001]'], {}), '([1000, 2432, 3000, 4201, 2000, 5990, 1000, 6520, 8000, 9001])\n', (7356, 7418), True, 'import numpy as np\n'), ((7424, 7469), 'pykrev.compound_class', 'compound_class', (['x'], {'mass_list': 'z', 'method': '"""MSCC"""'}), "(x, mass_list=z, method='MSCC')\n", (7438, 7469), False, 'from pykrev import diversity_indices, ordination_matrix, bray_curtis_matrix, compound_class, normalise_intensity\n'), ((7667, 7737), 'numpy.array', 'np.array', (['[1000, 2432, 3000, 4201, 2000, 5990, 1000, 6520, 8000, 9001]'], {}), '([1000, 2432, 3000, 4201, 2000, 5990, 1000, 6520, 8000, 9001])\n', (7675, 7737), True, 'import numpy as np\n'), ((7743, 7775), 'pykrev.compound_class', 'compound_class', (['x'], {'method': '"""KELL"""'}), "(x, method='KELL')\n", (7757, 7775), False, 'from pykrev import diversity_indices, ordination_matrix, bray_curtis_matrix, compound_class, normalise_intensity\n'), ((7973, 8043), 'numpy.array', 'np.array', (['[1000, 2432, 3000, 4201, 2000, 5990, 1000, 6520, 8000, 9001]'], {}), '([1000, 2432, 3000, 4201, 2000, 5990, 1000, 6520, 8000, 9001])\n', (7981, 8043), True, 'import numpy as np\n'), ((8049, 8081), 'pykrev.compound_class', 'compound_class', (['x'], {'method': '"""FORM"""'}), "(x, method='FORM')\n", (8063, 8081), False, 'from pykrev import diversity_indices, ordination_matrix, bray_curtis_matrix, compound_class, normalise_intensity\n'), ((8279, 8349), 'numpy.array', 'np.array', (['[1000, 2432, 3000, 4201, 2000, 5990, 1000, 6520, 8000, 9001]'], {}), '([1000, 2432, 3000, 4201, 2000, 5990, 1000, 6520, 8000, 9001])\n', (8287, 8349), True, 'import numpy as np\n'), ((8355, 8391), 'pykrev.compound_class', 'compound_class', (['x'], {'method': '"""KEGG_All"""'}), "(x, method='KEGG_All')\n", (8369, 8391), False, 'from pykrev import diversity_indices, ordination_matrix, bray_curtis_matrix, compound_class, normalise_intensity\n'), ((2860, 2887), 'numpy.around', 'np.around', (["res['D_a_GS']", '(3)'], {}), "(res['D_a_GS'], 3)\n", (2869, 2887), True, 'import numpy as np\n'), ((2888, 2919), 'numpy.around', 'np.around', (["correct['D_a_GS']", '(3)'], {}), "(correct['D_a_GS'], 3)\n", (2897, 2919), True, 'import numpy as np\n'), ((3274, 3301), 'numpy.around', 'np.around', (["res['D_a_SW']", '(3)'], {}), "(res['D_a_SW'], 3)\n", (3283, 3301), True, 'import numpy as np\n'), ((3302, 3333), 'numpy.around', 'np.around', (["correct['D_a_SW']", '(3)'], {}), "(correct['D_a_SW'], 3)\n", (3311, 3333), True, 'import numpy as np\n'), ((7137, 7151), 'numpy.array', 'np.array', (['ores'], {}), '(ores)\n', (7145, 7151), True, 'import numpy as np\n'), ((451, 467), 'numpy.round', 'np.round', (['res', '(3)'], {}), '(res, 3)\n', (459, 467), True, 'import numpy as np\n'), ((467, 487), 'numpy.round', 'np.round', (['correct', '(3)'], {}), '(correct, 3)\n', (475, 487), True, 'import numpy as np\n'), ((737, 753), 'numpy.round', 'np.round', (['res', '(3)'], {}), '(res, 3)\n', (745, 753), True, 'import numpy as np\n'), ((753, 773), 'numpy.round', 'np.round', (['correct', '(3)'], {}), '(correct, 3)\n', (761, 773), True, 'import numpy as np\n'), ((1174, 1186), 'numpy.mean', 'np.mean', (['res'], {}), '(res)\n', (1181, 1186), True, 'import numpy as np\n'), ((1230, 1241), 'numpy.std', 'np.std', (['res'], {}), '(res)\n', (1236, 1241), True, 'import numpy as np\n'), ((1681, 1693), 'numpy.mean', 'np.mean', (['res'], {}), '(res)\n', (1688, 1693), True, 'import numpy as np\n'), ((1908, 1922), 'numpy.median', 'np.median', (['res'], {}), '(res)\n', (1917, 1922), True, 'import numpy as np\n')] |
class Grams:
def __init__(self, text):
'''Takes in a list of words representing a document
and returns a list of ngrams or skipgrams.
text : list
A list of words representing a document.
'''
self._text = text
self._counter = len(text)
def ngrams(self, ngram=2, skip=0):
'''Produce ngrams or skipgrams.
ngram: int
Number of words per gram.
skip : int
The number of words to skip for each gram'''
# the output
out = []
# process the doc word-by-word
for i in range(self._counter):
try:
# create the list for single instance of grams
words = [self._text[i]]
# find the grams
for ii in range(ngram - 1):
words.append(self._text[i + ii + 1 + skip])
# add to the output
out.append(words)
# handle the case where end is near
except IndexError:
pass
return out
def combinations(self, length=3):
'''Produce every possible combination of n length.
length | int | length of the combinations
'''
import itertools
out = []
for i in list(itertools.combinations(self._text, length)):
if len(i) == len(set(i)):
out.append(i)
return out
def flexgrams(self, length=3, flex_n=1):
'''Simple flexgram where all combinations of certain length
are returned, but n items are randomly dropped. The final
length must be shorter than number of items minus flex_n
length | int | length of the resulting combinations
flex_n | int | the number of items to randomly drop
'''
import random
import itertools
combinations = list(itertools.combinations(self._text, length))
out = []
for i in [random.sample(i, k=length-flex_n) for i in combinations]:
if len(i) == len(set(i)):
if i not in out:
out.append(i)
return out
def sobolgrams(self, length=3, n=2):
'''Sobolgram is a random variation of a flexgram, where
instead of following a sequential order of strings and then
randomly dropping words, there is no order at all and grams
are formed based on random pick within the doc.
length | int | length of the resulting combinations
n | int | number of grams to draw
'''
import random
out = []
while len(out) < n:
gram = random.sample(self._text, k=length)
if len(set(gram)) == length:
if gram not in out:
out.append(gram)
return out
def skipgrams(self, length=3, skip_nth=2):
'''Simple flexgram where all combinations of certain length
are returned, but n items are randomly dropped. The final
length must be shorter than number of items minus flex_n
length | int | length of the resulting combinations
skip_nth | int | the number of item to skip
'''
import itertools
import numpy as np
combinations = list(itertools.combinations(self._text, length))
# create indicies
indicies = list(range(length))
indicies.remove(skip_nth)
out = []
for i in [np.array(i)[indicies].tolist() for i in combinations]:
if i not in out:
out.append(i)
return out
| [
"random.sample",
"itertools.combinations",
"numpy.array"
] | [((1320, 1362), 'itertools.combinations', 'itertools.combinations', (['self._text', 'length'], {}), '(self._text, length)\n', (1342, 1362), False, 'import itertools\n'), ((1909, 1951), 'itertools.combinations', 'itertools.combinations', (['self._text', 'length'], {}), '(self._text, length)\n', (1931, 1951), False, 'import itertools\n'), ((1990, 2025), 'random.sample', 'random.sample', (['i'], {'k': '(length - flex_n)'}), '(i, k=length - flex_n)\n', (2003, 2025), False, 'import random\n'), ((2678, 2713), 'random.sample', 'random.sample', (['self._text'], {'k': 'length'}), '(self._text, k=length)\n', (2691, 2713), False, 'import random\n'), ((3305, 3347), 'itertools.combinations', 'itertools.combinations', (['self._text', 'length'], {}), '(self._text, length)\n', (3327, 3347), False, 'import itertools\n'), ((3486, 3497), 'numpy.array', 'np.array', (['i'], {}), '(i)\n', (3494, 3497), True, 'import numpy as np\n')] |
import argparse
import itertools
import logging
import multiprocessing
import os
import random
import sys
import networkit as nk
import networkx as nx
import numpy as np
from utils import np_output_filename
parser = argparse.ArgumentParser(
description='Compute graph sectional curvatures')
parser.add_argument('--input', type=str, required=True, help='The input graph.')
parser.add_argument('--force', action='store_true', help='Re-generate them.')
parser.add_argument(
'--min_num_nodes',
type=int,
default=100,
help='The minimum number of nodes in the largest connected '
'component to keep.')
parser.add_argument(
'--sample_ratio',
type=float,
default=0.5,
help='The percentage of all nodes to use as reference nodes `a`.')
parser.add_argument(
'--max_neigh_pairs',
type=int,
default=int(1e4),
help='The maximum number of neighbor pairs to compute seccurvs for.')
parser.add_argument(
'--inherit_filename',
action='store_true',
help='Whether the file format of the degrees file should be inheritted '
'from the input.')
parser.add_argument(
'--n_cpus',
type=int,
default=multiprocessing.cpu_count(),
help='The number of CPUs used for parallelization.')
args = parser.parse_args()
out_file = np_output_filename(args.input, 'seccurvs', args.inherit_filename)
if os.path.isfile(out_file) and not args.force:
logging.warning('The sectional curvatures already exist: %s', out_file)
sys.exit(0)
# load the graph
g = nx.convert_node_labels_to_integers(nx.read_edgelist(args.input))
connected_components = list(nx.connected_components(g))
if len(connected_components) == 0:
logging.fatal('Empty graph. This is most probably due to a too small a '
'distance threshold.')
sys.exit(1)
if len(connected_components) > 1:
logging.warning('The input graph has %d connected components. ',
len(connected_components))
g = g.subgraph(max(connected_components, key=len))
g = nx.convert_node_labels_to_integers(g)
num_nodes = g.number_of_nodes()
if num_nodes > args.min_num_nodes:
logging.warning('Keeping the largest only with %d nodes.', num_nodes)
else:
logging.fatal(
'The largest connected component has %d nodes. Dropping '
'this scenario.', num_nodes)
sys.exit(1)
n_nodes = g.number_of_nodes()
num_ref_nodes = int(args.sample_ratio * n_nodes)
# compute the shortest paths
gk = nk.nxadapter.nx2nk(g)
shortest_paths = nk.distance.APSP(gk).run().getDistances()
dists = {u: {} for u in range(n_nodes)}
for i, u in enumerate(g.nodes()):
for j, v in enumerate(g.nodes()):
dists[u][v] = int(shortest_paths[i][j])
# compute the sectional curvature samples
def sectional_curvatures(m):
seccurvs = []
neighs = list(g[m])
n_neighs = len(neighs)
neigh_pairs = [(neighs[i], neighs[j])
for i in range(n_neighs)
for j in range(i + 1, n_neighs)]
for b, c in random.sample(neigh_pairs,
min(args.max_neigh_pairs, len(neigh_pairs))):
xis = []
for a in random.sample(range(n_nodes), num_ref_nodes):
if a == m: continue
xi = dists[a][m]**2 + dists[b][c]**2 / 4 - \
(dists[a][b]**2 + dists[a][c]**2) / 2
xi_g = xi / dists[a][m] / 2
xis.append(xi_g)
seccurvs.append(np.mean(xis))
return seccurvs
# parallalize over nodes ``m``
pool = multiprocessing.Pool(args.n_cpus)
seccurvs = pool.map(sectional_curvatures, range(n_nodes))
seccurvs = list(itertools.chain(*seccurvs))
# save them
np.save(out_file, seccurvs)
| [
"numpy.save",
"argparse.ArgumentParser",
"logging.warning",
"os.path.isfile",
"networkx.connected_components",
"logging.fatal",
"networkx.convert_node_labels_to_integers",
"numpy.mean",
"multiprocessing.Pool",
"networkit.nxadapter.nx2nk",
"networkx.read_edgelist",
"networkit.distance.APSP",
... | [((219, 292), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Compute graph sectional curvatures"""'}), "(description='Compute graph sectional curvatures')\n", (242, 292), False, 'import argparse\n'), ((1374, 1439), 'utils.np_output_filename', 'np_output_filename', (['args.input', '"""seccurvs"""', 'args.inherit_filename'], {}), "(args.input, 'seccurvs', args.inherit_filename)\n", (1392, 1439), False, 'from utils import np_output_filename\n'), ((2582, 2603), 'networkit.nxadapter.nx2nk', 'nk.nxadapter.nx2nk', (['g'], {}), '(g)\n', (2600, 2603), True, 'import networkit as nk\n'), ((3618, 3651), 'multiprocessing.Pool', 'multiprocessing.Pool', (['args.n_cpus'], {}), '(args.n_cpus)\n', (3638, 3651), False, 'import multiprocessing\n'), ((3767, 3794), 'numpy.save', 'np.save', (['out_file', 'seccurvs'], {}), '(out_file, seccurvs)\n', (3774, 3794), True, 'import numpy as np\n'), ((1443, 1467), 'os.path.isfile', 'os.path.isfile', (['out_file'], {}), '(out_file)\n', (1457, 1467), False, 'import os\n'), ((1492, 1563), 'logging.warning', 'logging.warning', (['"""The sectional curvatures already exist: %s"""', 'out_file'], {}), "('The sectional curvatures already exist: %s', out_file)\n", (1507, 1563), False, 'import logging\n'), ((1568, 1579), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1576, 1579), False, 'import sys\n'), ((1637, 1665), 'networkx.read_edgelist', 'nx.read_edgelist', (['args.input'], {}), '(args.input)\n', (1653, 1665), True, 'import networkx as nx\n'), ((1695, 1721), 'networkx.connected_components', 'nx.connected_components', (['g'], {}), '(g)\n', (1718, 1721), True, 'import networkx as nx\n'), ((1762, 1864), 'logging.fatal', 'logging.fatal', (['"""Empty graph. This is most probably due to a too small a distance threshold."""'], {}), "(\n 'Empty graph. This is most probably due to a too small a distance threshold.'\n )\n", (1775, 1864), False, 'import logging\n'), ((1880, 1891), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1888, 1891), False, 'import sys\n'), ((2105, 2142), 'networkx.convert_node_labels_to_integers', 'nx.convert_node_labels_to_integers', (['g'], {}), '(g)\n', (2139, 2142), True, 'import networkx as nx\n'), ((3726, 3752), 'itertools.chain', 'itertools.chain', (['*seccurvs'], {}), '(*seccurvs)\n', (3741, 3752), False, 'import itertools\n'), ((1245, 1272), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (1270, 1272), False, 'import multiprocessing\n'), ((2226, 2295), 'logging.warning', 'logging.warning', (['"""Keeping the largest only with %d nodes."""', 'num_nodes'], {}), "('Keeping the largest only with %d nodes.', num_nodes)\n", (2241, 2295), False, 'import logging\n'), ((2314, 2420), 'logging.fatal', 'logging.fatal', (['"""The largest connected component has %d nodes. Dropping this scenario."""', 'num_nodes'], {}), "(\n 'The largest connected component has %d nodes. Dropping this scenario.',\n num_nodes)\n", (2327, 2420), False, 'import logging\n'), ((2456, 2467), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2464, 2467), False, 'import sys\n'), ((3543, 3555), 'numpy.mean', 'np.mean', (['xis'], {}), '(xis)\n', (3550, 3555), True, 'import numpy as np\n'), ((2621, 2641), 'networkit.distance.APSP', 'nk.distance.APSP', (['gk'], {}), '(gk)\n', (2637, 2641), True, 'import networkit as nk\n')] |
import numpy as np
from numba import jit
from scipy.interpolate import interp1d
from scipy.stats import pearsonr
from matplotlib import gridspec
from zestipy.data_structures import waveform, redshift_data, load_sdss_templatefiles
from zestipy.plotting_tools import plot_skylines
import matplotlib.pyplot as plt
from astropy.table import Table
from matplotlib.widgets import RadioButtons, Button
import sys
from types import *
import pdb
prior_coeff = 0.02
class z_est:
def __init__(self,lower_w=3900.0,upper_w=7400.0,lower_z=0.1,upper_z=0.5,z_res=3.0e-5,prior_width = 0.02, use_zprior=False,skip_initial_priors=True,auto_pilot=True):
'''
Initialize redshift estimate parameters
'''
#set class attributes
self.lower_w = lower_w
self.upper_w = upper_w
self.lower_z = lower_z
self.upper_z = upper_z
self.template_waveforms = np.array([])
self.z_res = z_res
self.auto = auto_pilot
#create redshift array and initialize correlation value array
self.ztest = np.arange(self.lower_z,self.upper_z,self.z_res)
self.corr_val_i = np.zeros(self.ztest.size)
self.qualityval = 0
#set redshift prior flag
if skip_initial_priors:
if use_zprior:
self.est_pre_z = '1'
self.z_prior_width = prior_width
else:
self.est_pre_z = '3'
self.z_prior_width = prior_width
self.uline_n = 'HK'
self.uline = 3950.0
else:
self.est_pre_z = input('(1) Use a known prior [Examples: median of known redshifts. Galaxy photoz measurements] \n'\
'(2) View spectrum and specify a redshift prior \n'\
'(3) No prior\n')
#catch and correct false entry
_est_enter = False
self.uline_n = input('What is the name of a spectral line you wish to use to identify redshift priors? '\
'[Default: HK]: ')
if not self.uline_n:
self.uline_n = 'HK'
self.uline = input('Please list the approx. rest wavelength (in angstroms) of that line you seek to identify in your spectra '\
'[Default: HK lines are at about 3950]: ')
if self.uline:
self.uline = np.float(self.uline)
else:
self.uline = 3950.0
while not _est_enter:
if self.est_pre_z == '1':
self.z_prior_width = prior_width
print('redshift prior width has been set to',self.z_prior_width)
_est_enter = True
elif self.est_pre_z == '2':
self.z_prior_width = prior_width
print('redshift prior width has been set to',self.z_prior_width)
_est_enter = True
elif self.est_pre_z == '3':
self.z_prior_width = prior_width
_est_enter = True
else:
self.est_pre_z = input('Incorrect entry: Please enter either (1), (2), or (3).')
#remind user to set the correct values in next step
if self.est_pre_z == '1':
print('Make sure to set the gal_prior argument to the value of the known redshift prior: '\
'[Example: z_est.redshift_estimate(gal_prior=0.1)]')
#postconditions
assert self.est_pre_z, "Must define redshift prior flag"
assert self.est_pre_z == '1' or self.est_pre_z == '2' or self.est_pre_z == '3', \
"Incorrect string value for prior"
def add_template(self,new_waveform):
self.template_waveforms = np.append(self.template_waveforms,[new_waveform])
def add_templates(self,new_waveforms):
for new_waveform in new_waveforms:
self.add_template(new_waveform)
def add_sdsstemplates_fromfile(self,path_to_files='.',filenames=['spDR2-023.fit']):
new_waveforms = load_sdss_templatefiles(path_to_files,filenames)
self.add_templates(new_waveforms)
def redshift_estimate(self,test_waveform,gal_prior=None):
'''
estimate redshift for object
'''
#manage redshift prior
self.gal_prior = gal_prior
test_flux = test_waveform.flux
test_wave = test_waveform.wave
#handle single redshift prior flag
if self.est_pre_z == '1':
if self.gal_prior:
self.pre_z_est = self.gal_prior
else:
nospec = input('You said you are either using a spectroscopic or photometric redshift prior. '\
'You need to specify a prior value! Either enter a number in now or type (q) to exit')
if nospec == 'q':
sys.exit()
elif not nospec:
sys.exit()
else:
self.gal_prior = np.float(nospec)
self.pre_z_est = self.gal_prior
#handle user prior flag
if self.est_pre_z == '2':
print('Take a look at the plotted galaxy spectrum and note, approximately, at what wavelength do you see the '+self.uline_n+' line. '\
'Then close the plot and enter that wavelength in angstroms.')
plt.plot(test_wave,test_flux)
plt.xlim(self.lower_w,self.upper_w)
plt.show()
line_init = input(self.uline_n+' approx. wavelength (A): ')
self.pre_z_est = np.float(line_init)/self.uline - 1
#handle no prior flag
if self.est_pre_z == '3':
self.pre_z_est = None
redshift_output = self.__find_best_fit(self.pre_z_est,self.z_prior_width,test_waveform)
return redshift_output
def __find_best_fit(self,z_est,z_prior_width,test_waveform):
if self.auto:
bestfit_info = redshift_data()
else:
bestfit_info = redshift_data(qualityval=0)
out_table = Table(names=['template','max_cor','redshift'],dtype=['S10',float,float])
for template in self.template_waveforms:
redshift_est,cor,ztest,corr_val = self._cross_cor(z_est,z_prior_width,test_waveform,template)
if not self.auto:
self.qualityval = 1
self.first_pass = True
user_zestimate = self._GUI_display(redshift_est,ztest,corr_val,test_waveform,template)
if user_zestimate != None:
#try:
self.first_pass = False
redshift_est,cor,ztest,corr_val = self._cross_cor(user_zestimate,z_prior_width,test_waveform,template)
user_zestimate = self._GUI_display(redshift_est,ztest,corr_val,test_waveform,template)
if user_zestimate != None:
redshift_est = user_zestimate
if user_zestimate > self.lower_z and user_zestimate < self.upper_z:
cor = np.asarray(corr_val[np.argmin(np.abs(ztest-user_zestimate))])
else:
cor = np.zeros(1)
#except AttributeError:
# pass
#print "Template %s, Est Red = %f" % (template.name,redshift_est)
#redshift_est = self.spectra2.finalz
cor = np.max(cor)
if self.auto:
if (cor>bestfit_info.max_cor):
bestfit_info.best_zest = redshift_est
bestfit_info.max_cor = cor
bestfit_info.ztest_vals = ztest
bestfit_info.corr_vals = corr_val
bestfit_info.template = template
else:
if self.qualityval > bestfit_info.qualityval:
bestfit_info.best_zest = redshift_est
bestfit_info.max_cor = cor
bestfit_info.ztest_vals = ztest
bestfit_info.corr_vals = corr_val
bestfit_info.template = template
bestfit_info.qualityval=self.qualityval
elif (self.qualityval == bestfit_info.qualityval) \
and (cor>=bestfit_info.max_cor):
bestfit_info.best_zest = redshift_est
bestfit_info.max_cor = cor
bestfit_info.ztest_vals = ztest
bestfit_info.corr_vals = corr_val
bestfit_info.template = template
bestfit_info.qualityval=self.qualityval
bestfit_info.summary_table = out_table
return bestfit_info
def _cross_cor(self,z_est,coeff,test_waveform,temp_waveform):
'''
This function cross-correlates a continuum subtracted template spectrum with a continuum subtracted observed spectrum.
It then returns an estimate of the redshift, the correlation value at that redshift, the array of redshifts tested,
and the unnormalized correlation value.
'''
cont_subd_test_flux = test_waveform.continuum_subtracted_flux
test_wave = test_waveform.masked_wave
cont_subd_temp_flux = temp_waveform.continuum_subtracted_flux
temp_wave = temp_waveform.masked_wave
cut_at_lowerw = np.greater(test_wave,self.lower_w)
cut_at_higherw = np.less(test_wave,self.upper_w)
index_of_85thpercentile = int(0.85*len(cont_subd_test_flux))
science_85perc_wavelength = np.sort(test_wave)[index_of_85thpercentile]
z_max = (science_85perc_wavelength/np.float(temp_waveform.min_lam))-1.
z_max_mask = np.where(self.ztest<z_max)
ztest = self.ztest[z_max_mask]
corr_val_i = self.corr_val_i[z_max_mask]
# Find
#loop over each possible redshift to compute correlation values
for i in range(ztest.size):
z = ztest[i]
#redshift the template wavelengths
wshift = temp_wave*(1+z)
min_wshift = temp_waveform.min_lam*(1+z)
max_wshift = temp_waveform.max_lam*(1+z)
#identify the wavelength diff between the lower wave limit and the redshifted template spectrum
#if the limit is above the minimum wavelength of the redshifted template spectrum...
if (min_wshift < self.lower_w):
lower_bound = cut_at_lowerw
else:
lower_bound = np.greater(test_wave,min_wshift)
if (max_wshift > self.upper_w):
upper_bound = cut_at_higherw
else:
upper_bound = np.less(test_wave,max_wshift)
#pdb.set_trace()
bounds = np.bitwise_and(lower_bound,upper_bound)
test_wave_vals = test_wave[bounds]
test_flux_vals = cont_subd_test_flux[bounds]
#del upper_bound, lower_bound
#interpolate the redshifted template spectrum and estimate the flux at the observed spectrum wavelengths
inter = interp1d(wshift,cont_subd_temp_flux)
et_flux_range = inter(test_wave_vals)
#calculate the pearson r correlation value between the observed and template flux
corr_val_i[i] = pearsonr(et_flux_range,test_flux_vals)[0]
#normalize the correlation values as a function of redshift
where_finite = np.isfinite(corr_val_i)
corr_val = corr_val_i[where_finite]#+1)/np.trapz((corr_val_i[where_finite]+1),ztest[where_finite])
finite_ztest = ztest[where_finite]
#multiply in prior to likelihood if specified
if z_est:
unc = coeff * (1 + z_est)
zlowerbound = np.max([z_est-5*unc,self.lower_z])
zupperbound = np.min([z_est+5*unc,self.upper_z])
assert zlowerbound < zupperbound, "Lower bound of z must be below upper bound"
if zupperbound < zlowerbound:
print("Top hat prior failed, proceeding without prior")
zupperbound = self.upper_z
zlowerbound = self.lower_z
#make redshift estimate
zrange_mask = np.where((finite_ztest>zlowerbound)&(finite_ztest<zupperbound))[0]
else:
unc = coeff
zrange_mask = np.where((finite_ztest>self.lower_z)&(finite_ztest<self.upper_z))[0]
#pdb.set_trace()
max_correlation_index = np.nanargmax(corr_val[zrange_mask])
redshift_est = (finite_ztest[zrange_mask])[max_correlation_index] # NOTE: returns only first index even if multiple indices are equally to max value
#save correlation value at maximum redshift likelihood
cor = ((corr_val_i[where_finite])[zrange_mask])[max_correlation_index]
return redshift_est, cor, finite_ztest,corr_val
def _GUI_display(self,redshift_est,ztest,corr_val,test_waveform,template_waveform):
wave = test_waveform.masked_wave
flux_sci = test_waveform.masked_flux
'''Display the spectrum and reference lines.'''
self.fig = plt.figure(figsize=(10, 8))
gs = gridspec.GridSpec(2, 1, height_ratios=[2, 1])
ax2 = plt.subplot(gs[0])
ax = plt.subplot(gs[1])
plt.subplots_adjust(top=0.96,bottom=0.04,left=0.04,right=0.92)
figManager = plt.get_current_fig_manager()
figManager.window.showMaximized()
ax.plot(ztest,corr_val,'b')
pspec_corr = ax.axvline(redshift_est,color='k',ls='--')
ax.set_xlabel('Redshift')
ax.set_ylabel('Correlation')
self.pspec, = ax2.plot(wave,flux_sci)
ax2.set_ylim(np.min(flux_sci),np.max(flux_sci))
ax2.set_xlim(wave[0],wave[-1])
ax2.plot()
# Plot sky lines:
# Red = HK
# Purple = OII, Halpha
# Black = sky
# Blue = Emission
# Orange = Absorption
vlin_pspecs = plot_skylines(ax2,redshift_est)
# Include information in plot
self.fig.text(0.923, 0.9, '%s' % template_waveform.name, bbox=dict(facecolor='white', alpha=1.),fontsize=18)
self.fig.text(0.922, 0.8, 'Blue/ = Emission\nPurple Lines', bbox=dict(facecolor='white', alpha=1.))
self.fig.text(0.922, 0.78, 'Red/ = Absorption\nOrange Lines', bbox=dict(facecolor='white', alpha=1.))
self.fig.text(0.922, 0.74, 'Black = Sky\n Lines', bbox=dict(facecolor='white', alpha=1.))
# from left, from bottom, width, height
rax = plt.axes([0.925, 0.43, 0.07, 0.22])
# Setup User input box on plot
if self.qualityval == 1:
radio = RadioButtons(rax, ('1 - No Clue ','2 - Slight\n Chance', '3 - Maybe', '4 - Probably', '5 - Clear'))
else:
radio = RadioButtons(rax, ('1 - No Clue ','2 - Slight\n Chance', '3 - Maybe', '4 - Probably', '5 - Clear'),active=1)
def qualfunc(label):
if label == '5 - Clear':
self.qualityval = 5
elif label == '4 - Probably':
self.qualityval = 4
elif label == '3 - Maybe':
self.qualityval = 3
elif label == '2 - Slight\n Chance':
self.qualityval = 2
else:
self.qualityval = 1
radio.on_clicked(qualfunc)
# from left, from bottom, width, height
closeax = plt.axes([0.93, 0.18, 0.06, 0.08])
button = Button(closeax, 'Accept & Close', hovercolor='0.975')
def closeplot(event):
plt.close()
button.on_clicked(closeplot)
skip_spec_ax = plt.axes([0.93, 0.94, 0.06, 0.04])
skip_button = Button(skip_spec_ax, 'skip spectra', hovercolor='0.975')
def skip_spec(event):
plt.close()
self.qualityval = 0
skip_button.on_clicked(skip_spec)
#ax2.set_xlim(self.lower_w,self.upper_w)
ax2.set_xlabel('Wavelength (A)')
ax2.set_ylabel('Counts')
# Setup the classification
if self.first_pass:
line_est = Estimateline(self.pspec,ax2,self.uline_n)
else:
spectra2 = DragSpectra(vlin_pspecs,pspec_corr,redshift_est,ax2)
self.fig.canvas.mpl_connect('motion_notify_event',spectra2.on_motion)
self.fig.canvas.mpl_connect('button_press_event',spectra2.on_press)
self.fig.canvas.mpl_connect('button_release_event',spectra2.on_release)
plt.show()
if self.qualityval == 0:
return None
elif self.first_pass:
if line_est.lam == 0:
return None
else:
return line_est.lam/self.uline - 1.0
else:
return spectra2.finalz
class DragSpectra:
'''Class to drage the spectra back and forth to match lines of interest'''
def __init__(self,vlin_spectra,corr_spec,redshift_estimate,ax5):
self.ax5 = ax5
self.corr_spec = corr_spec
self.yzs = self.corr_spec.get_data()[1]
print('begin shift')
self.vlin_spectra = vlin_spectra
self.vline_ys = vlin_spectra[0].get_data()[1]
self.pressed = False
self.finalz = redshift_estimate
#figure.canvas.mpl_connect('motion_notify_event',self.on_motion)
#figure.canvas.mpl_connect('button_press_event',self.on_press)
#figure.canvas.mpl_connect('button_release_event',self.on_release)
def on_motion(self,evt):
if self.pressed:
#dx = evt.xdata - self.mouse_x
#print "%d %d" % (evt.xdata,self.mouse_x)
newz = ((evt.xdata/self.mouse_x)*(1.+self.z_on_press))-1. #((1. + (dx/self.mouse_x))*(1.+self.z0))-1.
newxs = self.vline_lams*(evt.xdata/self.mouse_x) # equivalent to spec_x*((1+newz)/(1+z0))
for i in np.arange(len(self.vlin_spectra)):
self.vlin_spectra[i].set_data([newxs[i], newxs[i]], self.vline_ys)
self.corr_spec.set_data([newz, newz], self.yzs)
plt.draw()
def on_press(self,evt):
if evt.inaxes == self.ax5:
self.mouse_x = evt.xdata
self.z_on_press = self.corr_spec.get_data()[0][0]
self.vline_lams = np.array([self.vlin_spectra[x].get_data()[0][0] for x in np.arange(len(self.vlin_spectra))])
self.pressed = True
else: return
def on_release(self,evt):
if evt.inaxes == self.ax5:
self.pressed = False
try:
self.finalz = self.corr_spec.get_data()[0][0]
except AttributeError:
self.finalz = self.finalz
else: return
class Estimateline:
'''Class to manually estimate where lines are located'''
def __init__(self,pspec,ax5,uline):
print('If redshift calibration appears correct, hit "Accept and Close". '\
'Otherwise, "right click" approx. where the '+uline+' line is in the plotted spectrum. '\
'The program will re-correlate based on this guess.')
self.ax5 = ax5
self.cid3 = pspec.figure.canvas.mpl_connect('button_press_event',self.onclick)
self.lam = 0
def on_key_press(self,event):
if event.key == 'shift':
self.shift_is_held = True
def on_key_release(self, event):
if event.key == 'shift':
self.shift_is_held = False
def onclick(self,event):
if event.inaxes == self.ax5:
if event.button == 3:
print('xdata=%f, ydata%f'%(event.xdata, event.ydata))
self.lam = event.xdata
plt.close()
'''
if event.button == 1:
#if self.shift_is_held:
# print 'xdata=%f, ydata%f'%(event.xdata, event.ydata)
# self.lam = event.xdata
# plt.close()
#else:
plt.close()
'''
else: return
if __name__ == '__main__':
R = z_est()
| [
"numpy.abs",
"matplotlib.pyplot.axes",
"numpy.greater",
"matplotlib.pyplot.figure",
"numpy.arange",
"scipy.interpolate.interp1d",
"matplotlib.widgets.RadioButtons",
"matplotlib.pyplot.close",
"numpy.isfinite",
"zestipy.data_structures.load_sdss_templatefiles",
"matplotlib.pyplot.draw",
"numpy.... | [((920, 932), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (928, 932), True, 'import numpy as np\n'), ((1082, 1131), 'numpy.arange', 'np.arange', (['self.lower_z', 'self.upper_z', 'self.z_res'], {}), '(self.lower_z, self.upper_z, self.z_res)\n', (1091, 1131), True, 'import numpy as np\n'), ((1156, 1181), 'numpy.zeros', 'np.zeros', (['self.ztest.size'], {}), '(self.ztest.size)\n', (1164, 1181), True, 'import numpy as np\n'), ((3875, 3925), 'numpy.append', 'np.append', (['self.template_waveforms', '[new_waveform]'], {}), '(self.template_waveforms, [new_waveform])\n', (3884, 3925), True, 'import numpy as np\n'), ((4178, 4227), 'zestipy.data_structures.load_sdss_templatefiles', 'load_sdss_templatefiles', (['path_to_files', 'filenames'], {}), '(path_to_files, filenames)\n', (4201, 4227), False, 'from zestipy.data_structures import waveform, redshift_data, load_sdss_templatefiles\n'), ((6227, 6304), 'astropy.table.Table', 'Table', ([], {'names': "['template', 'max_cor', 'redshift']", 'dtype': "['S10', float, float]"}), "(names=['template', 'max_cor', 'redshift'], dtype=['S10', float, float])\n", (6232, 6304), False, 'from astropy.table import Table\n'), ((9574, 9609), 'numpy.greater', 'np.greater', (['test_wave', 'self.lower_w'], {}), '(test_wave, self.lower_w)\n', (9584, 9609), True, 'import numpy as np\n'), ((9638, 9670), 'numpy.less', 'np.less', (['test_wave', 'self.upper_w'], {}), '(test_wave, self.upper_w)\n', (9645, 9670), True, 'import numpy as np\n'), ((9939, 9967), 'numpy.where', 'np.where', (['(self.ztest < z_max)'], {}), '(self.ztest < z_max)\n', (9947, 9967), True, 'import numpy as np\n'), ((11654, 11677), 'numpy.isfinite', 'np.isfinite', (['corr_val_i'], {}), '(corr_val_i)\n', (11665, 11677), True, 'import numpy as np\n'), ((12670, 12705), 'numpy.nanargmax', 'np.nanargmax', (['corr_val[zrange_mask]'], {}), '(corr_val[zrange_mask])\n', (12682, 12705), True, 'import numpy as np\n'), ((13334, 13361), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (13344, 13361), True, 'import matplotlib.pyplot as plt\n'), ((13376, 13421), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(2)', '(1)'], {'height_ratios': '[2, 1]'}), '(2, 1, height_ratios=[2, 1])\n', (13393, 13421), False, 'from matplotlib import gridspec\n'), ((13436, 13454), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0]'], {}), '(gs[0])\n', (13447, 13454), True, 'import matplotlib.pyplot as plt\n'), ((13468, 13486), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[1]'], {}), '(gs[1])\n', (13479, 13486), True, 'import matplotlib.pyplot as plt\n'), ((13495, 13560), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.96)', 'bottom': '(0.04)', 'left': '(0.04)', 'right': '(0.92)'}), '(top=0.96, bottom=0.04, left=0.04, right=0.92)\n', (13514, 13560), True, 'import matplotlib.pyplot as plt\n'), ((13580, 13609), 'matplotlib.pyplot.get_current_fig_manager', 'plt.get_current_fig_manager', ([], {}), '()\n', (13607, 13609), True, 'import matplotlib.pyplot as plt\n'), ((14161, 14193), 'zestipy.plotting_tools.plot_skylines', 'plot_skylines', (['ax2', 'redshift_est'], {}), '(ax2, redshift_est)\n', (14174, 14193), False, 'from zestipy.plotting_tools import plot_skylines\n'), ((14753, 14788), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.925, 0.43, 0.07, 0.22]'], {}), '([0.925, 0.43, 0.07, 0.22])\n', (14761, 14788), True, 'import matplotlib.pyplot as plt\n'), ((15647, 15681), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.93, 0.18, 0.06, 0.08]'], {}), '([0.93, 0.18, 0.06, 0.08])\n', (15655, 15681), True, 'import matplotlib.pyplot as plt\n'), ((15699, 15752), 'matplotlib.widgets.Button', 'Button', (['closeax', '"""Accept & Close"""'], {'hovercolor': '"""0.975"""'}), "(closeax, 'Accept & Close', hovercolor='0.975')\n", (15705, 15752), False, 'from matplotlib.widgets import RadioButtons, Button\n'), ((15890, 15924), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.93, 0.94, 0.06, 0.04]'], {}), '([0.93, 0.94, 0.06, 0.04])\n', (15898, 15924), True, 'import matplotlib.pyplot as plt\n'), ((15947, 16003), 'matplotlib.widgets.Button', 'Button', (['skip_spec_ax', '"""skip spectra"""'], {'hovercolor': '"""0.975"""'}), "(skip_spec_ax, 'skip spectra', hovercolor='0.975')\n", (15953, 16003), False, 'from matplotlib.widgets import RadioButtons, Button\n'), ((16772, 16782), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16780, 16782), True, 'import matplotlib.pyplot as plt\n'), ((5527, 5557), 'matplotlib.pyplot.plot', 'plt.plot', (['test_wave', 'test_flux'], {}), '(test_wave, test_flux)\n', (5535, 5557), True, 'import matplotlib.pyplot as plt\n'), ((5569, 5605), 'matplotlib.pyplot.xlim', 'plt.xlim', (['self.lower_w', 'self.upper_w'], {}), '(self.lower_w, self.upper_w)\n', (5577, 5605), True, 'import matplotlib.pyplot as plt\n'), ((5617, 5627), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5625, 5627), True, 'import matplotlib.pyplot as plt\n'), ((6109, 6124), 'zestipy.data_structures.redshift_data', 'redshift_data', ([], {}), '()\n', (6122, 6124), False, 'from zestipy.data_structures import waveform, redshift_data, load_sdss_templatefiles\n'), ((6166, 6193), 'zestipy.data_structures.redshift_data', 'redshift_data', ([], {'qualityval': '(0)'}), '(qualityval=0)\n', (6179, 6193), False, 'from zestipy.data_structures import waveform, redshift_data, load_sdss_templatefiles\n'), ((7629, 7640), 'numpy.max', 'np.max', (['cor'], {}), '(cor)\n', (7635, 7640), True, 'import numpy as np\n'), ((9776, 9794), 'numpy.sort', 'np.sort', (['test_wave'], {}), '(test_wave)\n', (9783, 9794), True, 'import numpy as np\n'), ((10986, 11026), 'numpy.bitwise_and', 'np.bitwise_and', (['lower_bound', 'upper_bound'], {}), '(lower_bound, upper_bound)\n', (11000, 11026), True, 'import numpy as np\n'), ((11310, 11347), 'scipy.interpolate.interp1d', 'interp1d', (['wshift', 'cont_subd_temp_flux'], {}), '(wshift, cont_subd_temp_flux)\n', (11318, 11347), False, 'from scipy.interpolate import interp1d\n'), ((11964, 12003), 'numpy.max', 'np.max', (['[z_est - 5 * unc, self.lower_z]'], {}), '([z_est - 5 * unc, self.lower_z])\n', (11970, 12003), True, 'import numpy as np\n'), ((12025, 12064), 'numpy.min', 'np.min', (['[z_est + 5 * unc, self.upper_z]'], {}), '([z_est + 5 * unc, self.upper_z])\n', (12031, 12064), True, 'import numpy as np\n'), ((13892, 13908), 'numpy.min', 'np.min', (['flux_sci'], {}), '(flux_sci)\n', (13898, 13908), True, 'import numpy as np\n'), ((13909, 13925), 'numpy.max', 'np.max', (['flux_sci'], {}), '(flux_sci)\n', (13915, 13925), True, 'import numpy as np\n'), ((14890, 15005), 'matplotlib.widgets.RadioButtons', 'RadioButtons', (['rax', '(\'1 - No Clue \', """2 - Slight\n Chance""", \'3 - Maybe\',\n \'4 - Probably\', \'5 - Clear\')'], {}), '(rax, (\'1 - No Clue \', """2 - Slight\n Chance""",\n \'3 - Maybe\', \'4 - Probably\', \'5 - Clear\'))\n', (14902, 15005), False, 'from matplotlib.widgets import RadioButtons, Button\n'), ((15032, 15157), 'matplotlib.widgets.RadioButtons', 'RadioButtons', (['rax', '(\'1 - No Clue \', """2 - Slight\n Chance""", \'3 - Maybe\',\n \'4 - Probably\', \'5 - Clear\')'], {'active': '(1)'}), '(rax, (\'1 - No Clue \', """2 - Slight\n Chance""",\n \'3 - Maybe\', \'4 - Probably\', \'5 - Clear\'), active=1)\n', (15044, 15157), False, 'from matplotlib.widgets import RadioButtons, Button\n'), ((15804, 15815), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (15813, 15815), True, 'import matplotlib.pyplot as plt\n'), ((16055, 16066), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (16064, 16066), True, 'import matplotlib.pyplot as plt\n'), ((18365, 18375), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (18373, 18375), True, 'import matplotlib.pyplot as plt\n'), ((2444, 2464), 'numpy.float', 'np.float', (['self.uline'], {}), '(self.uline)\n', (2452, 2464), True, 'import numpy as np\n'), ((9873, 9904), 'numpy.float', 'np.float', (['temp_waveform.min_lam'], {}), '(temp_waveform.min_lam)\n', (9881, 9904), True, 'import numpy as np\n'), ((10734, 10767), 'numpy.greater', 'np.greater', (['test_wave', 'min_wshift'], {}), '(test_wave, min_wshift)\n', (10744, 10767), True, 'import numpy as np\n'), ((10905, 10935), 'numpy.less', 'np.less', (['test_wave', 'max_wshift'], {}), '(test_wave, max_wshift)\n', (10912, 10935), True, 'import numpy as np\n'), ((11520, 11559), 'scipy.stats.pearsonr', 'pearsonr', (['et_flux_range', 'test_flux_vals'], {}), '(et_flux_range, test_flux_vals)\n', (11528, 11559), False, 'from scipy.stats import pearsonr\n'), ((12413, 12482), 'numpy.where', 'np.where', (['((finite_ztest > zlowerbound) & (finite_ztest < zupperbound))'], {}), '((finite_ztest > zlowerbound) & (finite_ztest < zupperbound))\n', (12421, 12482), True, 'import numpy as np\n'), ((12544, 12615), 'numpy.where', 'np.where', (['((finite_ztest > self.lower_z) & (finite_ztest < self.upper_z))'], {}), '((finite_ztest > self.lower_z) & (finite_ztest < self.upper_z))\n', (12552, 12615), True, 'import numpy as np\n'), ((19929, 19940), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (19938, 19940), True, 'import matplotlib.pyplot as plt\n'), ((5015, 5025), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5023, 5025), False, 'import sys\n'), ((5729, 5748), 'numpy.float', 'np.float', (['line_init'], {}), '(line_init)\n', (5737, 5748), True, 'import numpy as np\n'), ((5079, 5089), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5087, 5089), False, 'import sys\n'), ((5149, 5165), 'numpy.float', 'np.float', (['nospec'], {}), '(nospec)\n', (5157, 5165), True, 'import numpy as np\n'), ((7393, 7404), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (7401, 7404), True, 'import numpy as np\n'), ((7297, 7327), 'numpy.abs', 'np.abs', (['(ztest - user_zestimate)'], {}), '(ztest - user_zestimate)\n', (7303, 7327), True, 'import numpy as np\n')] |
# Copyright 2020 AstroLab Software
# Author: <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dash
from dash.exceptions import PreventUpdate
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
import dash_table
import healpy as hp
import pandas as pd
import numpy as np
import astropy.units as u
from astropy.time import Time, TimeDelta
from astropy.coordinates import SkyCoord
from app import app
from app import client, clientT, clientP, clientS, nlimit
from apps.utils import extract_row
from apps.utils import convert_jd
from apps.utils import extract_fink_classification
from apps.utils import markdownify_objectid
msg = """
Fill one of the field on the left, and click on the _Submit Query_ button.
* _**Search by Object ID:** Enter a valid object ID to access its data, e.g. try_:
* ZTF19acmdpyr, ZTF19acnjwgm, ZTF17aaaabte, ZTF20abqehqf, ZTF18acuajcr
* _**Conesearch:** Peform a conesearch around a position on the sky given by (RA, Dec, radius). RA/Dec can be in decimal degrees, or sexagesimal in the form hh:mm:ss and dd:mm:ss. Radius is in arcsecond. Examples of valid searches:_
* 271.3914265, 45.2545134, 5 or 18:05:33.94, 45:15:16.25, 5
* _**Search by Date:** Choose a starting date and a time window to see all alerts in this period. Dates are in UTC, and the time window in minutes. Example of valid search:_
* 2019-11-03 02:40:00
* _**Get latest 100 alerts by class:** Choose a class of interest using the dropdown menu to see the 100 latest alerts processed by Fink._
_The table shows:_
- _objectId: Unique identifier for this object_
- _RA: Right Ascension of candidate; J2000 (deg)_
- _Dec: Declination of candidate; J2000 (deg)_
- _last seen: last date the object has been seen by Fink_
- _classification: Classification inferred by Fink (Supernova candidate, Microlensing candidate, Solar System Object, SIMBAD class, ...)_
- _ndethist: Number of spatially-coincident detections falling within 1.5 arcsec going back to beginning of survey; only detections that fell on the same field and readout-channel ID where the input candidate was observed are counted. All raw detections down to a photometric S/N of ~ 3 are included._
"""
simbad_types = pd.read_csv('assets/simbad_types.csv', header=None)[0].values
simbad_types = np.sort(simbad_types)
object_id = dbc.FormGroup(
[
dbc.Label("Search by Object ID"),
dbc.Input(
placeholder="e.g. ZTF19acmdpyr",
type="text",
id='objectid',
debounce=True
),
dbc.FormText("Enter a ZTF objectId, e.g. ZTF19acnjwgm"),
], style={'width': '100%', 'display': 'inline-block'}
)
conesearch = dbc.FormGroup(
[
dbc.Label("Conesearch"),
dbc.Input(
placeholder="ra, dec, radius",
type="text",
id='conesearch',
debounce=True
),
dbc.FormText("e.g. 271.3914265, 45.2545134, 5"),
], style={'width': '100%', 'display': 'inline-block'}
)
date_range = dbc.FormGroup(
[
dbc.Label("Search by Date"),
html.Br(),
dbc.Input(
placeholder="YYYY-MM-DD HH:mm:ss",
type="text",
id='startdate',
debounce=True,
),
dbc.FormText("Start date in UTC, e.g. 2019-11-03 02:40:00"),
dbc.Input(
placeholder="window [min]",
type="number",
id='window',
max=180,
debounce=True,
),
dbc.FormText("Time window in minutes"),
], style={'width': '100%', 'display': 'inline-block'}
)
dropdown = dbc.FormGroup(
[
dbc.Label("Get latest 100 alerts by class"),
dcc.Dropdown(
id='class-dropdown',
options=[
{'label': 'All classes', 'value': 'allclasses'},
{'label': 'Fink derived classes', 'disabled': True, 'value': 'None'},
{'label': 'Early Supernova candidates', 'value': 'Early SN candidate'},
{'label': 'Supernova candidates', 'value': 'SN candidate'},
{'label': 'Microlensing candidates', 'value': 'Microlensing candidate'},
{'label': 'Solar System Object candidates', 'value': 'Solar System'},
{'label': 'Ambiguous', 'value': 'Ambiguous'},
{'label': 'Simbad crossmatch', 'disabled': True, 'value': 'None'},
*[{'label': simtype, 'value': simtype} for simtype in simbad_types]
],
searchable=True,
clearable=True,
placeholder="Start typing or choose a class",
)
], style={'width': '100%', 'display': 'inline-block'}
)
submit_button = dbc.Button(
'Submit Query',
id='submit_query',
style={'width': '100%', 'display': 'inline-block'},
block=True
)
advanced_search_button = dbc.Button(
"Advanced Search",
id="collapse-button",
className="mb-3",
color="primary",
)
advanced_search = html.Div(
[
dbc.Collapse(
dbc.Card(dbc.CardBody([dropdown])),
id="collapse"
),
html.Br()
], style={'width': '100%', 'display': 'inline-block'}
)
@app.callback(
Output("collapse", "is_open"),
[Input("collapse-button", "n_clicks")],
[State("collapse", "is_open")],
)
def toggle_collapse(n, is_open):
if n:
return not is_open
return is_open
layout = html.Div(
[
dbc.Container(
[
html.Br(),
dbc.Row([
dbc.Col(
[
dbc.Row(
html.Img(
src="/assets/Fink_PrimaryLogo_WEB.png",
height="50px"
)
),
html.Br(),
dbc.Row(object_id),
dbc.Row(conesearch),
dbc.Row(date_range),
#dbc.Row(advanced_search_button),
#dbc.Row(advanced_search),
dbc.Row(dropdown),
dbc.Row(submit_button),
], width=3
),
dbc.Col([
html.H6(id="table"),
dbc.Card(
dbc.CardBody(
dcc.Markdown(msg)
), style={
'backgroundColor': 'rgb(248, 248, 248, .7)'
}
)
], width=9),
]),
], className="mb-8", fluid=True, style={'width': '95%'}
)
], className='home', style={
'background-image': 'linear-gradient(rgba(255,255,255,0.5), rgba(255,255,255,0.5)), url(/assets/background.png)',
'background-size': 'contain'
}
)
@app.callback(
Output("table", "children"),
[
Input("submit_query", "n_clicks"),
Input("objectid", "value"),
Input("conesearch", "value"),
Input('startdate', 'value'),
Input('window', 'value'),
Input('class-dropdown', 'value')
]
)
def construct_table(n_clicks, objectid, radecradius, startdate, window, alert_class):
""" Query the HBase database and format results into a DataFrame.
Parameters
----------
n_clicks: int
Represents the number of times that the button has been clicked on.
objectid: str
ObjectId as given by the user
radecradius: str
stringified comma-separated conesearch query (RA, Dec, radius)
startdate: str
Start date in format YYYY/MM/DD HH:mm:ss (UTC)
window: int
Number minutes
alert_class: str
Class of the alert to search against
Returns
----------
dash_table
Dash table containing aggregated data by object ID.
"""
# Trigger the query only if the submit button is pressed.
changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]
if 'submit_query' not in changed_id:
raise PreventUpdate
# wrong query
wrong_id = (objectid is None) or (objectid == '')
wrong_conesearch = (radecradius is None) or (radecradius == '')
wrong_date = (startdate is None) or (startdate == '')
wrong_class = (alert_class is None) or (alert_class == '')
# If nothing has been filled
if n_clicks is not None and wrong_id and wrong_conesearch and wrong_date and wrong_class:
return html.Table()
# Columns of interest
colnames = [
'i:objectId', 'i:ra', 'i:dec', 'i:jd', 'd:cdsxmatch', 'i:ndethist'
]
colnames_added_values = [
'd:cdsxmatch',
'd:roid',
'd:mulens_class_1',
'd:mulens_class_2',
'd:snn_snia_vs_nonia',
'd:snn_sn_vs_all',
'd:rfscore',
'i:ndethist',
'i:drb',
'i:classtar'
]
# Column name to display
colnames_to_display = [
'objectId', 'RA', 'Dec', 'last seen', 'classification', 'ndethist'
]
# Types of columns
dtypes_ = [
np.str, np.float, np.float, np.float, np.str, np.int
]
dtypes = {i: j for i, j in zip(colnames, dtypes_)}
# default table
if n_clicks is None:
# # TODO: change that to date search
return html.Table()
# Search for latest alerts for a specific class
if alert_class is not None and alert_class != '' and alert_class != 'allclasses':
clientS.setLimit(100)
clientS.setRangeScan(True)
clientS.setReversed(True)
# start of the Fink operations
jd_start = Time('2019-11-01 00:00:00').jd
jd_stop = Time.now().jd
results = clientS.scan(
"",
"key:key:{}_{},key:key:{}_{}".format(
alert_class,
jd_start,
alert_class,
jd_stop
),
",".join(colnames + colnames_added_values), 0, False, False
)
# Search for latest alerts (all classes)
elif alert_class == 'allclasses':
clientT.setLimit(100)
clientT.setRangeScan(True)
clientT.setReversed(True)
# start of the Fink operations
jd_start = Time('2019-11-01 00:00:00').jd
jd_stop = Time.now().jd
to_evaluate = "key:key:{},key:key:{}".format(jd_start, jd_stop)
results = clientT.scan(
"",
to_evaluate,
",".join(colnames + colnames_added_values),
0, True, True
)
elif radecradius is not None and radecradius != '':
clientP.setLimit(1000)
# Interpret user input.
# TODO: unsafe method...
ra, dec, radius = radecradius.split(',')
if ':' in ra:
coord = SkyCoord(ra, dec, unit=(u.hourangle, u.deg))
ra = coord.ra.deg
dec = coord.dec.deg
radius = float(radius) / 3600.
else:
ra, dec, radius = float(ra), float(dec), float(radius) / 3600.
# angle to vec conversion
vec = hp.ang2vec(np.pi / 2.0 - np.pi / 180.0 * dec, np.pi / 180.0 * ra)
# list of neighbour pixels
pixs = hp.query_disc(131072, vec, np.pi / 180 * radius, inclusive=True)
# Send request
to_evaluate = ",".join(['key:key:{}'.format(i) for i in pixs])
results = clientP.scan(
"",
to_evaluate,
",".join(colnames + colnames_added_values),
0, True, True
)
elif startdate is not None and window is not None and startdate != '':
# Time to jd
jd_start = Time(startdate).jd
jd_end = jd_start + TimeDelta(window * 60, format='sec').jd
# Send the request. RangeScan.
clientT.setRangeScan(True)
to_evaluate = "key:key:{},key:key:{}".format(jd_start, jd_end)
results = clientT.scan(
"",
to_evaluate,
",".join(colnames + colnames_added_values),
0, True, True
)
else:
# objectId search
# TODO: check input with a regex
to_evaluate = "key:key:{}".format(objectid)
results = client.scan(
"",
to_evaluate,
",".join(colnames + colnames_added_values),
0, True, True
)
# reset the limit in case it has been changed above
client.setLimit(nlimit)
if results.isEmpty():
return html.Table()
# Loop over results and construct the dataframe
pdfs = pd.DataFrame.from_dict(results, orient='index')
# Fink final classification
classifications = extract_fink_classification(
pdfs['d:cdsxmatch'],
pdfs['d:roid'],
pdfs['d:mulens_class_1'],
pdfs['d:mulens_class_2'],
pdfs['d:snn_snia_vs_nonia'],
pdfs['d:snn_sn_vs_all'],
pdfs['d:rfscore'],
pdfs['i:ndethist'],
pdfs['i:drb'],
pdfs['i:classtar']
)
# inplace (booo)
pdfs['d:cdsxmatch'] = classifications
pdfs = pdfs[colnames]
# Make clickable objectId
pdfs['i:objectId'] = pdfs['i:objectId'].apply(markdownify_objectid)
# Column values are string by default - convert them
pdfs = pdfs.astype(dtype=dtypes)
# Rename columns
pdfs = pdfs.rename(
columns={i: j for i, j in zip(colnames, colnames_to_display)}
)
# Display only the last alert
pdfs = pdfs.loc[pdfs.groupby('objectId')['last seen'].idxmax()]
pdfs['last seen'] = pdfs['last seen'].apply(convert_jd)
# round numeric values for better display
pdfs = pdfs.round(2)
table = dash_table.DataTable(
data=pdfs.sort_values('last seen', ascending=False).to_dict('records'),
columns=[
{
'id': c,
'name': c,
'type': 'text',
'presentation': 'markdown'
} for c in colnames_to_display
],
page_size=10,
style_as_list_view=True,
sort_action="native",
filter_action="native",
markdown_options={'link_target': '_blank'},
style_data={
'backgroundColor': 'rgb(248, 248, 248, .7)'
},
style_cell={'padding': '5px', 'textAlign': 'center'},
style_data_conditional=[
{
'if': {'row_index': 'odd'},
'backgroundColor': 'rgb(248, 248, 248, .7)'
}
],
style_header={
'backgroundColor': 'rgb(230, 230, 230)',
'fontWeight': 'bold'
}
)
return table
| [
"app.clientS.setReversed",
"app.clientT.setRangeScan",
"app.clientP.setLimit",
"pandas.read_csv",
"dash_bootstrap_components.CardBody",
"app.client.setLimit",
"app.clientS.setRangeScan",
"app.clientS.setLimit",
"dash_html_components.Table",
"dash_bootstrap_components.Label",
"dash.dependencies.S... | [((2881, 2902), 'numpy.sort', 'np.sort', (['simbad_types'], {}), '(simbad_types)\n', (2888, 2902), True, 'import numpy as np\n'), ((5289, 5402), 'dash_bootstrap_components.Button', 'dbc.Button', (['"""Submit Query"""'], {'id': '"""submit_query"""', 'style': "{'width': '100%', 'display': 'inline-block'}", 'block': '(True)'}), "('Submit Query', id='submit_query', style={'width': '100%',\n 'display': 'inline-block'}, block=True)\n", (5299, 5402), True, 'import dash_bootstrap_components as dbc\n'), ((5443, 5534), 'dash_bootstrap_components.Button', 'dbc.Button', (['"""Advanced Search"""'], {'id': '"""collapse-button"""', 'className': '"""mb-3"""', 'color': '"""primary"""'}), "('Advanced Search', id='collapse-button', className='mb-3', color\n ='primary')\n", (5453, 5534), True, 'import dash_bootstrap_components as dbc\n'), ((5788, 5817), 'dash.dependencies.Output', 'Output', (['"""collapse"""', '"""is_open"""'], {}), "('collapse', 'is_open')\n", (5794, 5817), False, 'from dash.dependencies import Input, Output, State\n'), ((13096, 13119), 'app.client.setLimit', 'client.setLimit', (['nlimit'], {}), '(nlimit)\n', (13111, 13119), False, 'from app import client, clientT, clientP, clientS, nlimit\n'), ((13239, 13286), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['results'], {'orient': '"""index"""'}), "(results, orient='index')\n", (13261, 13286), True, 'import pandas as pd\n'), ((13342, 13600), 'apps.utils.extract_fink_classification', 'extract_fink_classification', (["pdfs['d:cdsxmatch']", "pdfs['d:roid']", "pdfs['d:mulens_class_1']", "pdfs['d:mulens_class_2']", "pdfs['d:snn_snia_vs_nonia']", "pdfs['d:snn_sn_vs_all']", "pdfs['d:rfscore']", "pdfs['i:ndethist']", "pdfs['i:drb']", "pdfs['i:classtar']"], {}), "(pdfs['d:cdsxmatch'], pdfs['d:roid'], pdfs[\n 'd:mulens_class_1'], pdfs['d:mulens_class_2'], pdfs[\n 'd:snn_snia_vs_nonia'], pdfs['d:snn_sn_vs_all'], pdfs['d:rfscore'],\n pdfs['i:ndethist'], pdfs['i:drb'], pdfs['i:classtar'])\n", (13369, 13600), False, 'from apps.utils import extract_fink_classification\n'), ((7614, 7641), 'dash.dependencies.Output', 'Output', (['"""table"""', '"""children"""'], {}), "('table', 'children')\n", (7620, 7641), False, 'from dash.dependencies import Input, Output, State\n'), ((2804, 2855), 'pandas.read_csv', 'pd.read_csv', (['"""assets/simbad_types.csv"""'], {'header': 'None'}), "('assets/simbad_types.csv', header=None)\n", (2815, 2855), True, 'import pandas as pd\n'), ((2945, 2977), 'dash_bootstrap_components.Label', 'dbc.Label', (['"""Search by Object ID"""'], {}), "('Search by Object ID')\n", (2954, 2977), True, 'import dash_bootstrap_components as dbc\n'), ((2987, 3076), 'dash_bootstrap_components.Input', 'dbc.Input', ([], {'placeholder': '"""e.g. ZTF19acmdpyr"""', 'type': '"""text"""', 'id': '"""objectid"""', 'debounce': '(True)'}), "(placeholder='e.g. ZTF19acmdpyr', type='text', id='objectid',\n debounce=True)\n", (2996, 3076), True, 'import dash_bootstrap_components as dbc\n'), ((3140, 3195), 'dash_bootstrap_components.FormText', 'dbc.FormText', (['"""Enter a ZTF objectId, e.g. ZTF19acnjwgm"""'], {}), "('Enter a ZTF objectId, e.g. ZTF19acnjwgm')\n", (3152, 3195), True, 'import dash_bootstrap_components as dbc\n'), ((3300, 3323), 'dash_bootstrap_components.Label', 'dbc.Label', (['"""Conesearch"""'], {}), "('Conesearch')\n", (3309, 3323), True, 'import dash_bootstrap_components as dbc\n'), ((3333, 3422), 'dash_bootstrap_components.Input', 'dbc.Input', ([], {'placeholder': '"""ra, dec, radius"""', 'type': '"""text"""', 'id': '"""conesearch"""', 'debounce': '(True)'}), "(placeholder='ra, dec, radius', type='text', id='conesearch',\n debounce=True)\n", (3342, 3422), True, 'import dash_bootstrap_components as dbc\n'), ((3486, 3533), 'dash_bootstrap_components.FormText', 'dbc.FormText', (['"""e.g. 271.3914265, 45.2545134, 5"""'], {}), "('e.g. 271.3914265, 45.2545134, 5')\n", (3498, 3533), True, 'import dash_bootstrap_components as dbc\n'), ((3638, 3665), 'dash_bootstrap_components.Label', 'dbc.Label', (['"""Search by Date"""'], {}), "('Search by Date')\n", (3647, 3665), True, 'import dash_bootstrap_components as dbc\n'), ((3675, 3684), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (3682, 3684), True, 'import dash_html_components as html\n'), ((3694, 3786), 'dash_bootstrap_components.Input', 'dbc.Input', ([], {'placeholder': '"""YYYY-MM-DD HH:mm:ss"""', 'type': '"""text"""', 'id': '"""startdate"""', 'debounce': '(True)'}), "(placeholder='YYYY-MM-DD HH:mm:ss', type='text', id='startdate',\n debounce=True)\n", (3703, 3786), True, 'import dash_bootstrap_components as dbc\n'), ((3851, 3910), 'dash_bootstrap_components.FormText', 'dbc.FormText', (['"""Start date in UTC, e.g. 2019-11-03 02:40:00"""'], {}), "('Start date in UTC, e.g. 2019-11-03 02:40:00')\n", (3863, 3910), True, 'import dash_bootstrap_components as dbc\n'), ((3920, 4013), 'dash_bootstrap_components.Input', 'dbc.Input', ([], {'placeholder': '"""window [min]"""', 'type': '"""number"""', 'id': '"""window"""', 'max': '(180)', 'debounce': '(True)'}), "(placeholder='window [min]', type='number', id='window', max=180,\n debounce=True)\n", (3929, 4013), True, 'import dash_bootstrap_components as dbc\n'), ((4090, 4128), 'dash_bootstrap_components.FormText', 'dbc.FormText', (['"""Time window in minutes"""'], {}), "('Time window in minutes')\n", (4102, 4128), True, 'import dash_bootstrap_components as dbc\n'), ((4231, 4274), 'dash_bootstrap_components.Label', 'dbc.Label', (['"""Get latest 100 alerts by class"""'], {}), "('Get latest 100 alerts by class')\n", (4240, 4274), True, 'import dash_bootstrap_components as dbc\n'), ((4284, 5024), 'dash_core_components.Dropdown', 'dcc.Dropdown', ([], {'id': '"""class-dropdown"""', 'options': "[{'label': 'All classes', 'value': 'allclasses'}, {'label':\n 'Fink derived classes', 'disabled': True, 'value': 'None'}, {'label':\n 'Early Supernova candidates', 'value': 'Early SN candidate'}, {'label':\n 'Supernova candidates', 'value': 'SN candidate'}, {'label':\n 'Microlensing candidates', 'value': 'Microlensing candidate'}, {'label':\n 'Solar System Object candidates', 'value': 'Solar System'}, {'label':\n 'Ambiguous', 'value': 'Ambiguous'}, {'label': 'Simbad crossmatch',\n 'disabled': True, 'value': 'None'}, *[{'label': simtype, 'value':\n simtype} for simtype in simbad_types]]", 'searchable': '(True)', 'clearable': '(True)', 'placeholder': '"""Start typing or choose a class"""'}), "(id='class-dropdown', options=[{'label': 'All classes', 'value':\n 'allclasses'}, {'label': 'Fink derived classes', 'disabled': True,\n 'value': 'None'}, {'label': 'Early Supernova candidates', 'value':\n 'Early SN candidate'}, {'label': 'Supernova candidates', 'value':\n 'SN candidate'}, {'label': 'Microlensing candidates', 'value':\n 'Microlensing candidate'}, {'label': 'Solar System Object candidates',\n 'value': 'Solar System'}, {'label': 'Ambiguous', 'value': 'Ambiguous'},\n {'label': 'Simbad crossmatch', 'disabled': True, 'value': 'None'}, *[{\n 'label': simtype, 'value': simtype} for simtype in simbad_types]],\n searchable=True, clearable=True, placeholder=\n 'Start typing or choose a class')\n", (4296, 5024), True, 'import dash_core_components as dcc\n'), ((5698, 5707), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (5705, 5707), True, 'import dash_html_components as html\n'), ((5824, 5860), 'dash.dependencies.Input', 'Input', (['"""collapse-button"""', '"""n_clicks"""'], {}), "('collapse-button', 'n_clicks')\n", (5829, 5860), False, 'from dash.dependencies import Input, Output, State\n'), ((5868, 5896), 'dash.dependencies.State', 'State', (['"""collapse"""', '"""is_open"""'], {}), "('collapse', 'is_open')\n", (5873, 5896), False, 'from dash.dependencies import Input, Output, State\n'), ((9217, 9229), 'dash_html_components.Table', 'html.Table', ([], {}), '()\n', (9227, 9229), True, 'import dash_html_components as html\n'), ((10035, 10047), 'dash_html_components.Table', 'html.Table', ([], {}), '()\n', (10045, 10047), True, 'import dash_html_components as html\n'), ((10195, 10216), 'app.clientS.setLimit', 'clientS.setLimit', (['(100)'], {}), '(100)\n', (10211, 10216), False, 'from app import client, clientT, clientP, clientS, nlimit\n'), ((10225, 10251), 'app.clientS.setRangeScan', 'clientS.setRangeScan', (['(True)'], {}), '(True)\n', (10245, 10251), False, 'from app import client, clientT, clientP, clientS, nlimit\n'), ((10260, 10285), 'app.clientS.setReversed', 'clientS.setReversed', (['(True)'], {}), '(True)\n', (10279, 10285), False, 'from app import client, clientT, clientP, clientS, nlimit\n'), ((13162, 13174), 'dash_html_components.Table', 'html.Table', ([], {}), '()\n', (13172, 13174), True, 'import dash_html_components as html\n'), ((7657, 7690), 'dash.dependencies.Input', 'Input', (['"""submit_query"""', '"""n_clicks"""'], {}), "('submit_query', 'n_clicks')\n", (7662, 7690), False, 'from dash.dependencies import Input, Output, State\n'), ((7700, 7726), 'dash.dependencies.Input', 'Input', (['"""objectid"""', '"""value"""'], {}), "('objectid', 'value')\n", (7705, 7726), False, 'from dash.dependencies import Input, Output, State\n'), ((7736, 7764), 'dash.dependencies.Input', 'Input', (['"""conesearch"""', '"""value"""'], {}), "('conesearch', 'value')\n", (7741, 7764), False, 'from dash.dependencies import Input, Output, State\n'), ((7774, 7801), 'dash.dependencies.Input', 'Input', (['"""startdate"""', '"""value"""'], {}), "('startdate', 'value')\n", (7779, 7801), False, 'from dash.dependencies import Input, Output, State\n'), ((7811, 7835), 'dash.dependencies.Input', 'Input', (['"""window"""', '"""value"""'], {}), "('window', 'value')\n", (7816, 7835), False, 'from dash.dependencies import Input, Output, State\n'), ((7845, 7877), 'dash.dependencies.Input', 'Input', (['"""class-dropdown"""', '"""value"""'], {}), "('class-dropdown', 'value')\n", (7850, 7877), False, 'from dash.dependencies import Input, Output, State\n'), ((10345, 10372), 'astropy.time.Time', 'Time', (['"""2019-11-01 00:00:00"""'], {}), "('2019-11-01 00:00:00')\n", (10349, 10372), False, 'from astropy.time import Time, TimeDelta\n'), ((10394, 10404), 'astropy.time.Time.now', 'Time.now', ([], {}), '()\n', (10402, 10404), False, 'from astropy.time import Time, TimeDelta\n'), ((10803, 10824), 'app.clientT.setLimit', 'clientT.setLimit', (['(100)'], {}), '(100)\n', (10819, 10824), False, 'from app import client, clientT, clientP, clientS, nlimit\n'), ((10833, 10859), 'app.clientT.setRangeScan', 'clientT.setRangeScan', (['(True)'], {}), '(True)\n', (10853, 10859), False, 'from app import client, clientT, clientP, clientS, nlimit\n'), ((10868, 10893), 'app.clientT.setReversed', 'clientT.setReversed', (['(True)'], {}), '(True)\n', (10887, 10893), False, 'from app import client, clientT, clientP, clientS, nlimit\n'), ((5626, 5650), 'dash_bootstrap_components.CardBody', 'dbc.CardBody', (['[dropdown]'], {}), '([dropdown])\n', (5638, 5650), True, 'import dash_bootstrap_components as dbc\n'), ((6070, 6079), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (6077, 6079), True, 'import dash_html_components as html\n'), ((10953, 10980), 'astropy.time.Time', 'Time', (['"""2019-11-01 00:00:00"""'], {}), "('2019-11-01 00:00:00')\n", (10957, 10980), False, 'from astropy.time import Time, TimeDelta\n'), ((11002, 11012), 'astropy.time.Time.now', 'Time.now', ([], {}), '()\n', (11010, 11012), False, 'from astropy.time import Time, TimeDelta\n'), ((11318, 11340), 'app.clientP.setLimit', 'clientP.setLimit', (['(1000)'], {}), '(1000)\n', (11334, 11340), False, 'from app import client, clientT, clientP, clientS, nlimit\n'), ((11786, 11851), 'healpy.ang2vec', 'hp.ang2vec', (['(np.pi / 2.0 - np.pi / 180.0 * dec)', '(np.pi / 180.0 * ra)'], {}), '(np.pi / 2.0 - np.pi / 180.0 * dec, np.pi / 180.0 * ra)\n', (11796, 11851), True, 'import healpy as hp\n'), ((11903, 11967), 'healpy.query_disc', 'hp.query_disc', (['(131072)', 'vec', '(np.pi / 180 * radius)'], {'inclusive': '(True)'}), '(131072, vec, np.pi / 180 * radius, inclusive=True)\n', (11916, 11967), True, 'import healpy as hp\n'), ((11498, 11542), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['ra', 'dec'], {'unit': '(u.hourangle, u.deg)'}), '(ra, dec, unit=(u.hourangle, u.deg))\n', (11506, 11542), False, 'from astropy.coordinates import SkyCoord\n'), ((12478, 12504), 'app.clientT.setRangeScan', 'clientT.setRangeScan', (['(True)'], {}), '(True)\n', (12498, 12504), False, 'from app import client, clientT, clientP, clientS, nlimit\n'), ((12343, 12358), 'astropy.time.Time', 'Time', (['startdate'], {}), '(startdate)\n', (12347, 12358), False, 'from astropy.time import Time, TimeDelta\n'), ((12390, 12426), 'astropy.time.TimeDelta', 'TimeDelta', (['(window * 60)'], {'format': '"""sec"""'}), "(window * 60, format='sec')\n", (12399, 12426), False, 'from astropy.time import Time, TimeDelta\n'), ((6460, 6469), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (6467, 6469), True, 'import dash_html_components as html\n'), ((6499, 6517), 'dash_bootstrap_components.Row', 'dbc.Row', (['object_id'], {}), '(object_id)\n', (6506, 6517), True, 'import dash_bootstrap_components as dbc\n'), ((6547, 6566), 'dash_bootstrap_components.Row', 'dbc.Row', (['conesearch'], {}), '(conesearch)\n', (6554, 6566), True, 'import dash_bootstrap_components as dbc\n'), ((6596, 6615), 'dash_bootstrap_components.Row', 'dbc.Row', (['date_range'], {}), '(date_range)\n', (6603, 6615), True, 'import dash_bootstrap_components as dbc\n'), ((6762, 6779), 'dash_bootstrap_components.Row', 'dbc.Row', (['dropdown'], {}), '(dropdown)\n', (6769, 6779), True, 'import dash_bootstrap_components as dbc\n'), ((6809, 6831), 'dash_bootstrap_components.Row', 'dbc.Row', (['submit_button'], {}), '(submit_button)\n', (6816, 6831), True, 'import dash_bootstrap_components as dbc\n'), ((6945, 6964), 'dash_html_components.H6', 'html.H6', ([], {'id': '"""table"""'}), "(id='table')\n", (6952, 6964), True, 'import dash_html_components as html\n'), ((6231, 6294), 'dash_html_components.Img', 'html.Img', ([], {'src': '"""/assets/Fink_PrimaryLogo_WEB.png"""', 'height': '"""50px"""'}), "(src='/assets/Fink_PrimaryLogo_WEB.png', height='50px')\n", (6239, 6294), True, 'import dash_html_components as html\n'), ((7074, 7091), 'dash_core_components.Markdown', 'dcc.Markdown', (['msg'], {}), '(msg)\n', (7086, 7091), True, 'import dash_core_components as dcc\n')] |
# Copyright 2020 The IREE Authors
#
# Licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
"""Space To Batch ND tests."""
from absl import app
from iree.tf.support import tf_test_utils
from iree.tf.support import tf_utils
import numpy as np
import tensorflow.compat.v2 as tf
class SpaceToBatchModule(tf.Module):
@tf.function(input_signature=[tf.TensorSpec([1, 8, 2], tf.float32)])
def batch_to_space_nd(self, x):
block_shape = [3]
paddings = [[3, 4]]
return tf.space_to_batch_nd(x, block_shape, paddings)
class SpaceToBatchTest(tf_test_utils.TracedModuleTestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._modules = tf_test_utils.compile_tf_module(SpaceToBatchModule)
def test_space_to_batch_inference(self):
def space_to_batch_inference(module):
x = np.linspace(0, 15, 16, dtype=np.float32)
x = np.reshape(x, [1, 8, 2])
module.batch_to_space_nd(x)
self.compare_backends(space_to_batch_inference, self._modules)
def main(argv):
del argv # Unused
if hasattr(tf, 'enable_v2_behavior'):
tf.enable_v2_behavior()
tf.test.main()
if __name__ == '__main__':
app.run(main)
| [
"iree.tf.support.tf_test_utils.compile_tf_module",
"tensorflow.compat.v2.test.main",
"tensorflow.compat.v2.enable_v2_behavior",
"tensorflow.compat.v2.TensorSpec",
"absl.app.run",
"numpy.reshape",
"tensorflow.compat.v2.space_to_batch_nd",
"numpy.linspace"
] | [((1247, 1261), 'tensorflow.compat.v2.test.main', 'tf.test.main', ([], {}), '()\n', (1259, 1261), True, 'import tensorflow.compat.v2 as tf\n'), ((1293, 1306), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (1300, 1306), False, 'from absl import app\n'), ((604, 650), 'tensorflow.compat.v2.space_to_batch_nd', 'tf.space_to_batch_nd', (['x', 'block_shape', 'paddings'], {}), '(x, block_shape, paddings)\n', (624, 650), True, 'import tensorflow.compat.v2 as tf\n'), ((811, 862), 'iree.tf.support.tf_test_utils.compile_tf_module', 'tf_test_utils.compile_tf_module', (['SpaceToBatchModule'], {}), '(SpaceToBatchModule)\n', (842, 862), False, 'from iree.tf.support import tf_test_utils\n'), ((1221, 1244), 'tensorflow.compat.v2.enable_v2_behavior', 'tf.enable_v2_behavior', ([], {}), '()\n', (1242, 1244), True, 'import tensorflow.compat.v2 as tf\n'), ((960, 1000), 'numpy.linspace', 'np.linspace', (['(0)', '(15)', '(16)'], {'dtype': 'np.float32'}), '(0, 15, 16, dtype=np.float32)\n', (971, 1000), True, 'import numpy as np\n'), ((1011, 1035), 'numpy.reshape', 'np.reshape', (['x', '[1, 8, 2]'], {}), '(x, [1, 8, 2])\n', (1021, 1035), True, 'import numpy as np\n'), ((474, 510), 'tensorflow.compat.v2.TensorSpec', 'tf.TensorSpec', (['[1, 8, 2]', 'tf.float32'], {}), '([1, 8, 2], tf.float32)\n', (487, 510), True, 'import tensorflow.compat.v2 as tf\n')] |
import os
import sys
import shutil
import numpy as np
import config
import utils
import csv
import torch
import torchvision.models as models
from torch import nn
from time import sleep
from tqdm import tqdm, trange
from utils import h5_save, data_transforms, image_pair_generator, image_pair_length, get_prefix
def rename(beg, end, src, dst, target_beg=1, DEBUG=True, prefix=''):
'''
raw data format: each paired in a single folder named from 1 to n.
src: source path
dst: destination path
'''
print(f"Renaming : {src} -> {dst}")
print('')
for i in trange(beg, end+1):
p = os.path.join(src, str(i))
l = os.listdir(p)
l = [x for x in l if 'jpg' in x.lower()]
src_a_name = str(i) + '.jpg'
dst_a_name = '%04d_A.jpg' % (i+target_beg-beg)
src_a = os.path.join(p, src_a_name)
dst_a = os.path.join(dst, dst_a_name)
src_b_name = [x for x in l if x.lower() != str(i) + '.jpg']
assert len(src_b_name) == 1
src_b_name = src_b_name[0]
dst_b_name = '%04d_B.jpg' % (i+target_beg-beg)
src_b = os.path.join(p, src_b_name)
dst_b = os.path.join(dst, dst_b_name)
infostr = f"{src_a_name} -> {dst_a_name} ; {src_b_name} -> {dst_b_name}"
tqdm.write(prefix + infostr)
if DEBUG:
sleep(0.05)
pass
else:
shutil.copyfile(src_a, dst_a)
shutil.copyfile(src_b, dst_b)
def rename_many(param_list, dst, DEBUG=True, prefix=''):
starts_from = 1
for beg, end, src in param_list:
rename(beg, end, src, dst, target_beg=starts_from, DEBUG=DEBUG, prefix=prefix)
starts_from += end - beg + 1
print('')
def resize(src, dst, x, y, DEBUG=True, prefix=''):
'''
resize images in src to dst as resolution x,y.
Notice: images in domain A is 16:13, images in domain B is 4:3.
We may need to work on this a little bit.
'''
print(f"Resizing : {src} -> {dst}")
print('')
l = sorted(os.listdir(src))
l = [x for x in l if 'jpg' in x.lower()]
for f in tqdm(l):
f_src = os.path.join(src, f)
f_dst = os.path.join(dst, f)
# convert myfigure.png -resize 200x100 myfigure.jpg
cmd = f'convert {f_src} -resize {x}x{y}! {f_dst}'
if DEBUG:
tqdm.write(prefix + cmd)
sleep(0.05)
else:
tqdm.write(prefix + f)
os.system(cmd)
def train_val_divide(path, train_percentage=0.80, seed=42):
assert train_percentage<=1 and train_percentage >= 0
l = sorted(os.listdir(path))
l = [x for x in l if 'jpg' in x.lower()]
assert len(l) % 2 == 0
n = len(l) // 2
A_list, B_list = utils.name_list(n, path='')
# A_list, B_list = np.array(A_list), np.array(B_list)
np.random.seed(seed)
permut = np.random.permutation(n)
permut_train, permut_val = permut[:int(n*train_percentage)], permut[int(n*train_percentage):]
with open(os.path.join(path, 'train.csv'), 'w') as csvfile:
csvWriter = csv.writer(csvfile)
csvWriter.writerow(['A', 'B'])
for idx in permut_train:
csvWriter.writerow([A_list[idx], B_list[idx]])
with open(os.path.join(path, 'val.csv'), 'w') as csvfile:
csvWriter = csv.writer(csvfile)
csvWriter.writerow(['A', 'B'])
for idx in permut_val:
csvWriter.writerow([A_list[idx], B_list[idx]])
return permut_train, permut_val
def generate_feature(data_dir, feature_map_file_name, feature_shape, eval=True):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
sys.stdout.flush()
normalize = data_transforms['norm']
# Pretrained Model
pretrained_model = models.resnet34(pretrained=True)
if config.RESNET_POOLING == 'fixed' and str(pretrained_model.avgpool)[:8] == 'Adaptive':
pretrained_model.avgpool = nn.AvgPool2d(kernel_size=7, stride=1, padding=0)
feature_extractor = nn.Sequential(*list(pretrained_model.children())[:-1])
del pretrained_model
feature_extractor.to(device)
if eval:
feature_extractor.eval()
for param in feature_extractor.parameters():
param.requires_grad = False
print(feature_extractor)
sys.stdout.flush()
# Generating
n = image_pair_length(data_dir)
f_a = np.zeros((n,)+feature_shape)
f_b = np.zeros((n,)+feature_shape)
for i, (a_img, b_img) in enumerate(image_pair_generator(data_dir)):
print("Generating feature maps of %d th pair." % (i))
sys.stdout.flush()
a_tensor = normalize(a_img).unsqueeze(0).to(device)
b_tensor = normalize(b_img).unsqueeze(0).to(device)
a = feature_extractor(a_tensor).squeeze()
b = feature_extractor(b_tensor).squeeze()
f_a[i] = a.cpu().detach().numpy()
f_b[i] = b.cpu().detach().numpy()
h5_save(feature_map_file_name, f_a, f_b)
def _full_pipeline(DEBUG=True):
print("Move and rename all images from different folders to a single folder using an unified naming.")
prefix = get_prefix(DEBUG)
rename_many(
[
(1 , 20 , config.KUNSHAN_1_RAW),
(1 , 49 , config.PARIS_1_RAW),
(1 , 9 , config.SHENNONGJIA_1_RAW),
(1 , 100, config.SUZHOU_1_RAW),
(101, 200, config.SUZHOU_2_RAW),
(201, 300, config.SUZHOU_3_RAW),
(301, 343, config.SUZHOU_4_RAW),
(1 , 160, config.SWISS_1_RAW),
(0 , 39 , config.SWISS_2_RAW),
(1 , 113, config.SWISS_3_RAW),
(1 , 57 , config.WEIHAI_1_RAW),
(1 , 69 , config.WUXI_1_RAW),
],
dst=config.FULL_DATA,
DEBUG=DEBUG,
prefix=prefix
)
resize(config.FULL_DATA, config.FULL_960x720, x=960, y=720, DEBUG=DEBUG, prefix=prefix)
print(train_val_divide(config.FULL_960x720))
def _swiss_pipeline(DEBUG=True):
print("Move and rename Swiss images from different folders to a single folder using an unified naming.")
prefix = get_prefix(DEBUG)
rename_many(
[
(1 , 160, config.SWISS_1_RAW),
(0 , 39 , config.SWISS_2_RAW),
(1 , 113, config.SWISS_3_RAW),
],
dst=config.SWISS_DATA,
DEBUG=DEBUG,
prefix=prefix
)
resize(config.SWISS_DATA, config.SWISS_960x720, x=960, y=720, DEBUG=DEBUG, prefix=prefix)
# was 1280x720 for swiss
print(train_val_divide(config.SWISS_960x720))
def _suzhou_pipeline(DEBUG=True):
print("Move and rename Suzhou images from different folders to a single folder using an unified naming.")
prefix = get_prefix(DEBUG)
rename_many(
[
(1 , 100, config.SUZHOU_1_RAW),
(101, 200, config.SUZHOU_2_RAW),
(201, 300, config.SUZHOU_3_RAW),
(301, 343, config.SUZHOU_4_RAW),
],
dst=config.SUZHOU_DATA,
DEBUG=DEBUG,
prefix=prefix
)
resize(config.SUZHOU_DATA, config.SUZHOU_960x720, x=960, y=720, DEBUG=DEBUG, prefix=prefix)
# was 1280x720 for swiss
print(train_val_divide(config.SUZHOU_960x720))
def _england_pipeline(DEBUG = True):
prefix = get_prefix(DEBUG)
def rename_subfolders(path):
for folder_name in os.listdir(path):
cmd = f"mv {os.path.join(path, folder_name)} {os.path.join(path, folder_name[2:])}"
print(cmd)
if not DEBUG:
os.system(cmd)
rename_subfolders(config.ENGLAND_BIRMINGHAM_RAW)
rename_subfolders(config.ENGLAND_COVENTRY_RAW)
rename_subfolders(config.ENGLAND_LIVERPOOL_RAW)
rename_subfolders(config.ENGLAND_PEAK_RAW)
rename_many(
[
(1, 37, config.ENGLAND_BIRMINGHAM_RAW),
(2, 3, config.ENGLAND_COVENTRY_RAW),
(5, 13, config.ENGLAND_COVENTRY_RAW),
(15,18, config.ENGLAND_COVENTRY_RAW),
(1, 41, config.ENGLAND_LIVERPOOL_RAW),
(1, 14, config.ENGLAND_PEAK_RAW),
],
dst = config.ENGLAND_DATA,
DEBUG=DEBUG,
prefix=''
)
resize(
src = config.ENGLAND_DATA,
dst = config.ENGLAND_960x720,
x = 960,
y = 720,
DEBUG = DEBUG,
prefix= prefix
)
print(train_val_divide(config.ENGLAND_960x720, train_percentage=0))
if __name__ == '__main__':
# _full_pipeline(DEBUG=False)
# _swiss_pipeline(DEBUG=False)
# _suzhou_pipeline(DEBUG=False)
# generate_feature(data_dir=config.FULL_960x720,
# feature_map_file_name=config.FULL_960x720_FEATURE_RES34,
# feature_shape=config.RES34_960x720_SHAPE, )
_england_pipeline(DEBUG=False)
print('Done.')
| [
"numpy.random.seed",
"sys.stdout.flush",
"torchvision.models.resnet34",
"os.path.join",
"utils.h5_save",
"shutil.copyfile",
"torch.nn.AvgPool2d",
"tqdm.tqdm",
"csv.writer",
"tqdm.trange",
"utils.image_pair_length",
"os.system",
"time.sleep",
"torch.cuda.is_available",
"numpy.random.permu... | [((583, 603), 'tqdm.trange', 'trange', (['beg', '(end + 1)'], {}), '(beg, end + 1)\n', (589, 603), False, 'from tqdm import tqdm, trange\n'), ((2094, 2101), 'tqdm.tqdm', 'tqdm', (['l'], {}), '(l)\n', (2098, 2101), False, 'from tqdm import tqdm, trange\n'), ((2714, 2741), 'utils.name_list', 'utils.name_list', (['n'], {'path': '""""""'}), "(n, path='')\n", (2729, 2741), False, 'import utils\n'), ((2804, 2824), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2818, 2824), True, 'import numpy as np\n'), ((2838, 2862), 'numpy.random.permutation', 'np.random.permutation', (['n'], {}), '(n)\n', (2859, 2862), True, 'import numpy as np\n'), ((3643, 3661), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3659, 3661), False, 'import sys\n'), ((3749, 3781), 'torchvision.models.resnet34', 'models.resnet34', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (3764, 3781), True, 'import torchvision.models as models\n'), ((4260, 4278), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4276, 4278), False, 'import sys\n'), ((4305, 4332), 'utils.image_pair_length', 'image_pair_length', (['data_dir'], {}), '(data_dir)\n', (4322, 4332), False, 'from utils import h5_save, data_transforms, image_pair_generator, image_pair_length, get_prefix\n'), ((4343, 4373), 'numpy.zeros', 'np.zeros', (['((n,) + feature_shape)'], {}), '((n,) + feature_shape)\n', (4351, 4373), True, 'import numpy as np\n'), ((4382, 4412), 'numpy.zeros', 'np.zeros', (['((n,) + feature_shape)'], {}), '((n,) + feature_shape)\n', (4390, 4412), True, 'import numpy as np\n'), ((4880, 4920), 'utils.h5_save', 'h5_save', (['feature_map_file_name', 'f_a', 'f_b'], {}), '(feature_map_file_name, f_a, f_b)\n', (4887, 4920), False, 'from utils import h5_save, data_transforms, image_pair_generator, image_pair_length, get_prefix\n'), ((5074, 5091), 'utils.get_prefix', 'get_prefix', (['DEBUG'], {}), '(DEBUG)\n', (5084, 5091), False, 'from utils import h5_save, data_transforms, image_pair_generator, image_pair_length, get_prefix\n'), ((6046, 6063), 'utils.get_prefix', 'get_prefix', (['DEBUG'], {}), '(DEBUG)\n', (6056, 6063), False, 'from utils import h5_save, data_transforms, image_pair_generator, image_pair_length, get_prefix\n'), ((6645, 6662), 'utils.get_prefix', 'get_prefix', (['DEBUG'], {}), '(DEBUG)\n', (6655, 6662), False, 'from utils import h5_save, data_transforms, image_pair_generator, image_pair_length, get_prefix\n'), ((7189, 7206), 'utils.get_prefix', 'get_prefix', (['DEBUG'], {}), '(DEBUG)\n', (7199, 7206), False, 'from utils import h5_save, data_transforms, image_pair_generator, image_pair_length, get_prefix\n'), ((653, 666), 'os.listdir', 'os.listdir', (['p'], {}), '(p)\n', (663, 666), False, 'import os\n'), ((825, 852), 'os.path.join', 'os.path.join', (['p', 'src_a_name'], {}), '(p, src_a_name)\n', (837, 852), False, 'import os\n'), ((869, 898), 'os.path.join', 'os.path.join', (['dst', 'dst_a_name'], {}), '(dst, dst_a_name)\n', (881, 898), False, 'import os\n'), ((1110, 1137), 'os.path.join', 'os.path.join', (['p', 'src_b_name'], {}), '(p, src_b_name)\n', (1122, 1137), False, 'import os\n'), ((1154, 1183), 'os.path.join', 'os.path.join', (['dst', 'dst_b_name'], {}), '(dst, dst_b_name)\n', (1166, 1183), False, 'import os\n'), ((1274, 1302), 'tqdm.tqdm.write', 'tqdm.write', (['(prefix + infostr)'], {}), '(prefix + infostr)\n', (1284, 1302), False, 'from tqdm import tqdm, trange\n'), ((2019, 2034), 'os.listdir', 'os.listdir', (['src'], {}), '(src)\n', (2029, 2034), False, 'import os\n'), ((2119, 2139), 'os.path.join', 'os.path.join', (['src', 'f'], {}), '(src, f)\n', (2131, 2139), False, 'import os\n'), ((2156, 2176), 'os.path.join', 'os.path.join', (['dst', 'f'], {}), '(dst, f)\n', (2168, 2176), False, 'import os\n'), ((2583, 2599), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (2593, 2599), False, 'import os\n'), ((3045, 3064), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (3055, 3064), False, 'import csv\n'), ((3278, 3297), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (3288, 3297), False, 'import csv\n'), ((3910, 3958), 'torch.nn.AvgPool2d', 'nn.AvgPool2d', ([], {'kernel_size': '(7)', 'stride': '(1)', 'padding': '(0)'}), '(kernel_size=7, stride=1, padding=0)\n', (3922, 3958), False, 'from torch import nn\n'), ((4450, 4480), 'utils.image_pair_generator', 'image_pair_generator', (['data_dir'], {}), '(data_dir)\n', (4470, 4480), False, 'from utils import h5_save, data_transforms, image_pair_generator, image_pair_length, get_prefix\n'), ((4553, 4571), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4569, 4571), False, 'import sys\n'), ((7267, 7283), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (7277, 7283), False, 'import os\n'), ((1333, 1344), 'time.sleep', 'sleep', (['(0.05)'], {}), '(0.05)\n', (1338, 1344), False, 'from time import sleep\n'), ((1388, 1417), 'shutil.copyfile', 'shutil.copyfile', (['src_a', 'dst_a'], {}), '(src_a, dst_a)\n', (1403, 1417), False, 'import shutil\n'), ((1430, 1459), 'shutil.copyfile', 'shutil.copyfile', (['src_b', 'dst_b'], {}), '(src_b, dst_b)\n', (1445, 1459), False, 'import shutil\n'), ((2325, 2349), 'tqdm.tqdm.write', 'tqdm.write', (['(prefix + cmd)'], {}), '(prefix + cmd)\n', (2335, 2349), False, 'from tqdm import tqdm, trange\n'), ((2362, 2373), 'time.sleep', 'sleep', (['(0.05)'], {}), '(0.05)\n', (2367, 2373), False, 'from time import sleep\n'), ((2400, 2422), 'tqdm.tqdm.write', 'tqdm.write', (['(prefix + f)'], {}), '(prefix + f)\n', (2410, 2422), False, 'from tqdm import tqdm, trange\n'), ((2435, 2449), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (2444, 2449), False, 'import os\n'), ((2975, 3006), 'os.path.join', 'os.path.join', (['path', '"""train.csv"""'], {}), "(path, 'train.csv')\n", (2987, 3006), False, 'import os\n'), ((3210, 3239), 'os.path.join', 'os.path.join', (['path', '"""val.csv"""'], {}), "(path, 'val.csv')\n", (3222, 3239), False, 'import os\n'), ((3583, 3608), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3606, 3608), False, 'import torch\n'), ((7446, 7460), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (7455, 7460), False, 'import os\n'), ((7309, 7340), 'os.path.join', 'os.path.join', (['path', 'folder_name'], {}), '(path, folder_name)\n', (7321, 7340), False, 'import os\n'), ((7343, 7378), 'os.path.join', 'os.path.join', (['path', 'folder_name[2:]'], {}), '(path, folder_name[2:])\n', (7355, 7378), False, 'import os\n')] |
# Licence Apache-2.0
import feature_gen_dt
from ..util import util
from ..transformers import Transformer
from typing import List, Union
import numpy as np
import pandas as pd
import databricks.koalas as ks
class DeltaTime(Transformer):
"""Create new columns based on the time difference in sec. between two columns.
Parameters
----------
columns : List[str]
List of columns.
Examples
---------
* fit & transform with `pandas`
>>> import pandas as pd
>>> from gators.feature_generation_dt import DeltaTime
>>> X = pd.DataFrame({
... 'A': ['2020-01-01T23', '2020-01-15T18', pd.NaT],
... 'B': [0, 1, 0],
... 'C': ['2020-01-02T05', '2020-01-15T23', pd.NaT]})
>>> obj = DeltaTime(columns_a=['C'], columns_b=['A'])
>>> obj.fit_transform(X)
A B C C__A__Deltatime[s]
0 2020-01-01 23:00:00 0 2020-01-02 05:00:00 21600.0
1 2020-01-15 18:00:00 1 2020-01-15 23:00:00 18000.0
2 NaT 0 NaT NaN
* fit & transform with `koalas`
>>> import databricks.koalas as ks
>>> from gators.feature_generation_dt import DeltaTime
>>> X = ks.DataFrame({
... 'A': ['2020-01-01T23', '2020-01-15T18', pd.NaT],
... 'B': [0, 1, 0],
... 'C': ['2020-01-02T05', '2020-01-15T23', pd.NaT]})
>>> obj = DeltaTime(columns_a=['C'], columns_b=['A'])
>>> obj.fit_transform(X)
A B C C__A__Deltatime[s]
0 2020-01-01 23:00:00 0 2020-01-02 05:00:00 21600.0
1 2020-01-15 18:00:00 1 2020-01-15 23:00:00 18000.0
2 NaT 0 NaT NaN
* fit with `pandas` & transform with `NumPy`
>>> import pandas as pd
>>> from gators.feature_generation_dt import DeltaTime
>>> X = pd.DataFrame({
... 'A': ['2020-01-01T23', '2020-01-15T18', pd.NaT],
... 'B': [0, 1, 0],
... 'C': ['2020-01-02T05', '2020-01-15T23', pd.NaT]})
>>> obj = DeltaTime(columns_a=['C'], columns_b=['A'])
>>> _ = obj.fit(X)
>>> obj.transform_numpy(X.to_numpy())
array([[Timestamp('2020-01-01 23:00:00'), 0,
Timestamp('2020-01-02 05:00:00'), 21600.0],
[Timestamp('2020-01-15 18:00:00'), 1,
Timestamp('2020-01-15 23:00:00'), 18000.0],
[NaT, 0, NaT, nan]], dtype=object)
* fit with `koalas` & transform with `NumPy`
>>> import databricks.koalas as ks
>>> from gators.feature_generation_dt import DeltaTime
>>> X = ks.DataFrame({
... 'A': ['2020-01-01T23', '2020-01-15T18', pd.NaT],
... 'B': [0, 1, 0],
... 'C': ['2020-01-02T05', '2020-01-15T23', pd.NaT]})
>>> obj = DeltaTime(columns_a=['C'], columns_b=['A'])
>>> _ = obj.fit(X)
>>> obj.transform_numpy(X.to_numpy())
array([[Timestamp('2020-01-01 23:00:00'), 0,
Timestamp('2020-01-02 05:00:00'), 21600.0],
[Timestamp('2020-01-15 18:00:00'), 1,
Timestamp('2020-01-15 23:00:00'), 18000.0],
[NaT, 0, NaT, nan]], dtype=object)
"""
def __init__(self, columns_a: List[str], columns_b: List[str]):
Transformer.__init__(self)
if not isinstance(columns_a, list):
raise TypeError('`columns_a` should be a list.')
if not columns_a:
raise ValueError('`columns_a` should not be empty.')
if not isinstance(columns_b, list):
raise TypeError('`columns_b` should be a list.')
if not columns_b:
raise ValueError('`columns_b` should not be empty.')
if len(columns_b) != len(columns_a):
raise ValueError(
'`columns_a` and `columns_b` should have the same length.')
self.unit = 's'
self.columns_a = columns_a
self.columns_b = columns_b
self.deltatime_dtype = f'timedelta64[{self.unit}]'
self.column_names = [
f'{c_a}__{c_b}__Deltatime[{self.unit}]'
for c_a, c_b in zip(columns_a, columns_b)
]
self.column_mapping = {
name: [c_a, c_b] for name, c_a, c_b
in zip(self.column_names, columns_a, columns_b)
}
def fit(self,
X: Union[pd.DataFrame, ks.DataFrame],
y: Union[pd.Series, ks.Series] = None) -> 'DeltaTime':
"""Fit the transformer on the dataframe `X`.
Parameters
----------
X : pd.DataFrame
Input dataframe.
y : Union[pd.Series, ks.Series], default to None.
Target values.
Returns
-------
DeltaTime
Instance of itself.
"""
self.check_dataframe(X)
columns = list(set(self.columns_a+self.columns_b))
X_datetime_dtype = X.iloc[:5000][columns].dtypes
for column in columns:
if not np.issubdtype(X_datetime_dtype[column], np.datetime64):
raise TypeError(
"""
Datetime columns should be of subtype np.datetime64.
Use `ConvertColumnDatatype` to convert the dtype.
""")
self.idx_columns_a = util.get_idx_columns(
columns=X.columns,
selected_columns=self.columns_a,
)
self.idx_columns_b = util.get_idx_columns(
columns=X.columns,
selected_columns=self.columns_b,
)
return self
def transform(
self, X: Union[pd.DataFrame, ks.DataFrame]
) -> Union[pd.DataFrame, ks.DataFrame]:
"""Transform the dataframe `X`.
Parameters
----------
X : Union[pd.DataFrame, ks.DataFrame].
Input dataframe.
Returns
-------
Union[pd.DataFrame, ks.DataFrame]
Transformed dataframe.
"""
self.check_dataframe(X)
if isinstance(X, pd.DataFrame):
for name, c_a, c_b in zip(self.column_names, self.columns_a, self.columns_b):
X.loc[:, name] = (X[c_a] - X[c_b]).astype(self.deltatime_dtype)
return X
for name, c_a, c_b in zip(self.column_names, self.columns_a, self.columns_b):
X = X.assign(
dummy=(X[c_a].astype(float) - X[c_b].astype(float))).rename(
columns={'dummy': name})
return X
def transform_numpy(self, X: np.ndarray) -> np.ndarray:
"""Transform the array X.
Parameters
----------
X : np.ndarray
Input array.
Returns
-------
np.ndarray:
Array with the datetime features added.
"""
self.check_array(X)
return feature_gen_dt.deltatime(
X, self.idx_columns_a, self.idx_columns_b)
| [
"feature_gen_dt.deltatime",
"numpy.issubdtype"
] | [((6708, 6775), 'feature_gen_dt.deltatime', 'feature_gen_dt.deltatime', (['X', 'self.idx_columns_a', 'self.idx_columns_b'], {}), '(X, self.idx_columns_a, self.idx_columns_b)\n', (6732, 6775), False, 'import feature_gen_dt\n'), ((4893, 4947), 'numpy.issubdtype', 'np.issubdtype', (['X_datetime_dtype[column]', 'np.datetime64'], {}), '(X_datetime_dtype[column], np.datetime64)\n', (4906, 4947), True, 'import numpy as np\n')] |
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Contains the projectq interface to the Qrack framework, a stand-alone open
source GPU-accelerated C++ simulator, which has to be built first.
"""
import math
import random
import numpy as np
from enum import IntEnum
from projectq.cengines import BasicEngine
from projectq.meta import get_control_count, LogicalQubitIDTag
from projectq.ops import (Swap,
SqrtSwap,
Measure,
FlushGate,
Allocate,
Deallocate,
UniformlyControlledRy,
UniformlyControlledRz,
StatePreparation,
QubitOperator,
TimeEvolution)
from projectq.libs.math import (AddConstant,
AddConstantModN,
MultiplyByConstantModN)
#DivideByConstantModN)
from projectq.types import WeakQubitRef
from ._qracksim import QrackSimulator as SimulatorBackend
class SimulatorType(IntEnum):
QINTERFACE_QUNIT = 1
QINTERFACE_QENGINE = 2
QINTERFACE_QUNIT_MULTI = 3
class Simulator(BasicEngine):
"""
The Qrack Simulator is a compiler engine which simulates a quantum computer
using C++ and OpenCL-based kernels.
To use the Qrack Simulator, first install the Qrack framework, available at
https://github.com/vm6502q/qrack. (See the README there, and the Qrack
documentation at https://vm6502q.readthedocs.io/en/latest/.) Then, run the
ProjectQ setup.py script with the global option "--with-qracksimulator".
"""
def __init__(self, gate_fusion=False, rnd_seed=None, ocl_dev=-1, simulator_type = SimulatorType.QINTERFACE_QUNIT, build_from_source = False, save_binaries = False, cache_path = "*"):
"""
Construct the Qrack simulator object and initialize it with a
random seed.
Args:
gate_fusion (bool): If True, gates are cached and only executed
once a certain gate-size has been reached (not yet implemented).
rnd_seed (int): Random seed (uses random.randint(0, 4294967295) by
default).
ocl_dev (int): Specify the OpenCL device to use. By default, Qrack
uses the last device in the system list, because this is
usually a GPU.
Note:
If the Qrack Simulator extension was not built or cannot be found,
the Simulator defaults to a Python implementation of the kernels.
While this is much slower, it is still good enough to run basic
quantum algorithms.
"""
try:
from ._qracksim import QrackSimulator as SimulatorBackend
except:
raise ModuleNotFoundError("QrackSimulator module could not be found. Build ProjectQ with global option '--with-qracksimulator'.")
if rnd_seed is None:
rnd_seed = random.randint(0, 4294967295)
BasicEngine.__init__(self)
self._simulator = SimulatorBackend(rnd_seed, ocl_dev, simulator_type, build_from_source, save_binaries, cache_path)
def is_available(self, cmd):
"""
Specialized implementation of is_available: The simulator can deal
with all arbitrarily-controlled single-bit gates, as well as
addition, subtraction, and multiplication gates, when their modulo
is the number of permutations in the register.
Args:
cmd (Command): Command for which to check availability (single-
qubit gate, arbitrary controls)
Returns:
True if it can be simulated and False otherwise.
"""
try:
if (cmd.gate == Measure or
cmd.gate == Allocate or cmd.gate == Deallocate or
cmd.gate == Swap or cmd.gate == SqrtSwap or
isinstance(cmd.gate, AddConstant) or
isinstance(cmd.gate, UniformlyControlledRy) or
isinstance(cmd.gate, UniformlyControlledRz) or
isinstance(cmd.gate, TimeEvolution)):
return True
elif (isinstance(cmd.gate, AddConstantModN) and (1 << len(cmd.qubits)) == cmd.gate.N):
return True
elif (isinstance(cmd.gate, MultiplyByConstantModN) and (1 << len(cmd.qubits)) == cmd.gate.N):
return True
#elif (isinstance(cmd.gate, DivideByConstantModN) and (1 << len(cmd.qubits)) == cmd.gate.N):
# return True
except:
pass
try:
if (isinstance(cmd.gate, StatePreparation) and not cmd.control_qubits):
# Qrack has inexpensive ways of preparing a partial state, without controls.
return True
elif (isinstance(cmd.gate, QubitOperator) and not np.isclose(1, np.absolute(cmd.gate.coefficient))):
return True
except:
pass
try:
m = cmd.gate.matrix
# Allow up to 1-qubit gates
if len(m) > 2 ** 1:
return False
return True
except:
return False
def _convert_logical_to_mapped_qureg(self, qureg):
"""
Converts a qureg from logical to mapped qubits if there is a mapper.
Args:
qureg (list[Qubit],Qureg): Logical quantum bits
"""
mapper = self.main_engine.mapper
if mapper is not None:
mapped_qureg = []
for qubit in qureg:
if qubit.id not in mapper.current_mapping:
raise RuntimeError("Unknown qubit id. "
"Please make sure you have called "
"eng.flush().")
new_qubit = WeakQubitRef(qubit.engine,
mapper.current_mapping[qubit.id])
mapped_qureg.append(new_qubit)
return mapped_qureg
else:
return qureg
def get_expectation_value(self, qubit_operator, qureg):
"""
Get the expectation value of qubit_operator w.r.t. the current wave
function represented by the supplied quantum register.
Args:
qubit_operator (projectq.ops.QubitOperator): Operator to measure.
qureg (list[Qubit],Qureg): Quantum bits to measure.
Returns:
Expectation value
Note:
Make sure all previous commands (especially allocations) have
passed through the compilation chain (call main_engine.flush() to
make sure).
Note:
If there is a mapper present in the compiler, this function
automatically converts from logical qubits to mapped qubits for
the qureg argument.
Raises:
Exception: If `qubit_operator` acts on more qubits than present in
the `qureg` argument.
"""
qureg = self._convert_logical_to_mapped_qureg(qureg)
num_qubits = len(qureg)
for term, _ in qubit_operator.terms.items():
if not term == () and term[-1][0] >= num_qubits:
raise Exception("qubit_operator acts on more qubits than "
"contained in the qureg.")
operator = [(list(term), coeff) for (term, coeff)
in qubit_operator.terms.items()]
return self._simulator.get_expectation_value(operator,
[qb.id for qb in qureg])
def apply_qubit_operator(self, qubit_operator, qureg):
"""
Apply a (possibly non-unitary) qubit_operator to the current wave
function represented by the supplied quantum register.
Args:
qubit_operator (projectq.ops.QubitOperator): Operator to apply.
qureg (list[Qubit],Qureg): Quantum bits to which to apply the
operator.
Raises:
Exception: If `qubit_operator` acts on more qubits than present in
the `qureg` argument.
Warning:
This function allows applying non-unitary gates and it will not
re-normalize the wave function! It is for numerical experiments
only and should not be used for other purposes.
Note:
Make sure all previous commands (especially allocations) have
passed through the compilation chain (call main_engine.flush() to
make sure).
Note:
If there is a mapper present in the compiler, this function
automatically converts from logical qubits to mapped qubits for
the qureg argument.
"""
qureg = self._convert_logical_to_mapped_qureg(qureg)
num_qubits = len(qureg)
for term, _ in qubit_operator.terms.items():
if not term == () and term[-1][0] >= num_qubits:
raise Exception("qubit_operator acts on more qubits than "
"contained in the qureg.")
operator = [(list(term), coeff) for (term, coeff)
in qubit_operator.terms.items()]
return self._simulator.apply_qubit_operator(operator,
[qb.id for qb in qureg])
def get_probability(self, bit_string, qureg):
"""
Return the probability of the outcome `bit_string` when measuring
the quantum register `qureg`.
Args:
bit_string (list[bool|int]|string[0|1]): Measurement outcome.
qureg (Qureg|list[Qubit]): Quantum register.
Returns:
Probability of measuring the provided bit string.
Note:
Make sure all previous commands (especially allocations) have
passed through the compilation chain (call main_engine.flush() to
make sure).
Note:
If there is a mapper present in the compiler, this function
automatically converts from logical qubits to mapped qubits for
the qureg argument.
"""
qureg = self._convert_logical_to_mapped_qureg(qureg)
bit_string = [bool(int(b)) for b in bit_string]
return self._simulator.get_probability(bit_string,
[qb.id for qb in qureg])
def get_amplitude(self, bit_string, qureg):
"""
Return the wave function amplitude of the supplied `bit_string`.
The ordering is given by the quantum register `qureg`, which must
contain all allocated qubits.
Args:
bit_string (list[bool|int]|string[0|1]): Computational basis state
qureg (Qureg|list[Qubit]): Quantum register determining the
ordering. Must contain all allocated qubits.
Returns:
Wave function amplitude of the provided bit string.
Note:
This is a cheat function for debugging only. The underlying Qrack
engine is explicitly Schmidt-decomposed, and the full permutation
basis wavefunction is not actually the internal state of the engine,
but it is descriptively equivalent.
Note:
Make sure all previous commands (especially allocations) have
passed through the compilation chain (call main_engine.flush() to
make sure).
Note:
If there is a mapper present in the compiler, this function
automatically converts from logical qubits to mapped qubits for
the qureg argument.
"""
qureg = self._convert_logical_to_mapped_qureg(qureg)
bit_string = [bool(int(b)) for b in bit_string]
return self._simulator.get_amplitude(bit_string,
[qb.id for qb in qureg])
def set_wavefunction(self, wavefunction, qureg):
"""
Set the wavefunction and the qubit ordering of the simulator.
The simulator will adopt the ordering of qureg (instead of reordering
the wavefunction).
Args:
wavefunction (list[complex]): Array of complex amplitudes
describing the wavefunction (must be normalized).
qureg (Qureg|list[Qubit]): Quantum register determining the
ordering. Must contain all allocated qubits.
Note:
This is a cheat function for debugging only. The underlying Qrack
engine is explicitly Schmidt-decomposed, and the full permutation
basis wavefunction is not actually the internal state of the engine,
but it is descriptively equivalent.
Note:
Make sure all previous commands (especially allocations) have
passed through the compilation chain (call main_engine.flush() to
make sure).
Note:
If there is a mapper present in the compiler, this function
automatically converts from logical qubits to mapped qubits for
the qureg argument.
"""
qureg = self._convert_logical_to_mapped_qureg(qureg)
self._simulator.set_wavefunction(wavefunction,
[qb.id for qb in qureg])
def collapse_wavefunction(self, qureg, values):
"""
Collapse a quantum register onto a classical basis state.
Args:
qureg (Qureg|list[Qubit]): Qubits to collapse.
values (list[bool|int]|string[0|1]): Measurement outcome for each
of the qubits in `qureg`.
Raises:
RuntimeError: If an outcome has probability (approximately) 0 or
if unknown qubits are provided (see note).
Note:
Make sure all previous commands have passed through the
compilation chain (call main_engine.flush() to make sure).
Note:
If there is a mapper present in the compiler, this function
automatically converts from logical qubits to mapped qubits for
the qureg argument.
"""
qureg = self._convert_logical_to_mapped_qureg(qureg)
return self._simulator.collapse_wavefunction([qb.id for qb in qureg],
[bool(int(v)) for v in
values])
def cheat(self):
"""
Access the ordering of the qubits and a representation of the state vector.
Returns:
A tuple where the first entry is a dictionary mapping qubit
indices to bit-locations and the second entry is the corresponding
state vector.
Note:
This is a cheat function for debugging only. The underlying Qrack
engine is explicitly Schmidt-decomposed, and the full permutation
basis wavefunction is not actually the internal state of the engine,
but it is descriptively equivalent.
Note:
Make sure all previous commands have passed through the
compilation chain (call main_engine.flush() to make sure).
Note:
If there is a mapper present in the compiler, this function
DOES NOT automatically convert from logical qubits to mapped
qubits.
"""
return self._simulator.cheat()
def _handle(self, cmd):
"""
Handle all commands, i.e., call the member functions of the Qrack-
simulator object corresponding to measurement, allocation/
deallocation, and (controlled) single-qubit gate.
Args:
cmd (Command): Command to handle.
Raises:
Exception: If a non-single-qubit gate needs to be processed
(which should never happen due to is_available).
"""
if cmd.gate == Measure:
assert(get_control_count(cmd) == 0)
ids = [qb.id for qr in cmd.qubits for qb in qr]
out = self._simulator.measure_qubits(ids)
i = 0
for qr in cmd.qubits:
for qb in qr:
# Check if a mapper assigned a different logical id
logical_id_tag = None
for tag in cmd.tags:
if isinstance(tag, LogicalQubitIDTag):
logical_id_tag = tag
if logical_id_tag is not None:
qb = WeakQubitRef(qb.engine,
logical_id_tag.logical_qubit_id)
self.main_engine.set_measurement_result(qb, out[i])
i += 1
elif cmd.gate == Allocate:
ID = cmd.qubits[0][0].id
self._simulator.allocate_qubit(ID)
elif cmd.gate == Deallocate:
ID = cmd.qubits[0][0].id
self._simulator.deallocate_qubit(ID)
elif cmd.gate == Swap:
ids1 = [qb.id for qb in cmd.qubits[0]]
ids2 = [qb.id for qb in cmd.qubits[1]]
self._simulator.apply_controlled_swap(ids1, ids2,
[qb.id for qb in
cmd.control_qubits])
elif cmd.gate == SqrtSwap:
ids1 = [qb.id for qb in cmd.qubits[0]]
ids2 = [qb.id for qb in cmd.qubits[1]]
self._simulator.apply_controlled_sqrtswap(ids1, ids2,
[qb.id for qb in
cmd.control_qubits])
elif isinstance(cmd.gate, QubitOperator):
ids = [qb.id for qb in cmd.qubits[0]]
self.apply_qubit_operator(cmd.gate, ids)
elif isinstance(cmd.gate, AddConstant) or isinstance(cmd.gate, AddConstantModN):
#Unless there's a carry, the only unitary addition is mod (2^len(ids))
ids = [qb.id for qr in cmd.qubits for qb in qr]
if cmd.gate.a > 0:
self._simulator.apply_controlled_inc(ids,
[qb.id for qb in
cmd.control_qubits],
cmd.gate.a)
elif cmd.gate.a < 0:
self._simulator.apply_controlled_dec(ids,
[qb.id for qb in
cmd.control_qubits],
abs(cmd.gate.a))
elif isinstance(cmd.gate, MultiplyByConstantModN):
#Unless there's a carry, the only unitary addition is mod (2^len(ids))
ids = [qb.id for qr in cmd.qubits for qb in qr]
self._simulator.apply_controlled_mul(ids,
[qb.id for qb in
cmd.control_qubits],
cmd.gate.a)
#elif isinstance(cmd.gate, DivideByConstantModN):
# #Unless there's a carry, the only unitary addition is mod (2^len(ids))
# ids = [qb.id for qr in cmd.qubits for qb in qr]
# self._simulator.apply_controlled_div(ids,
# [qb.id for qb in
# cmd.control_qubits],
# cmd.gate.a)
#elif isinstance(cmd.gate, TimeEvolution):
# op = [(list(term), coeff) for (term, coeff)
# in cmd.gate.hamiltonian.terms.items()]
# t = cmd.gate.time
# qubitids = [qb.id for qb in cmd.qubits[0]]
# ctrlids = [qb.id for qb in cmd.control_qubits]
# self._simulator.emulate_time_evolution(op, t, qubitids, ctrlids)
elif isinstance(cmd.gate, UniformlyControlledRy):
qubits = [qb.id for qr in cmd.qubits for qb in qr]
target = qubits[-1]
controls = qubits[:-1]
self._simulator.apply_uniformly_controlled_ry([angle for angle in
cmd.gate.angles],
[target],
controls)
elif isinstance(cmd.gate, UniformlyControlledRz):
qubits = [qb.id for qr in cmd.qubits for qb in qr]
target = qubits[-1]
controls = qubits[:-1]
self._simulator.apply_uniformly_controlled_rz([angle for angle in
cmd.gate.angles],
[target],
controls)
elif isinstance(cmd.gate, StatePreparation):
ids = [qb.id for qb in cmd.qubits[0]]
self._simulator.prepare_state(ids,
[amp for amp in
cmd.gate.final_state])
elif len(cmd.gate.matrix) <= 2 ** 1:
matrix = cmd.gate.matrix
ids = [qb.id for qr in cmd.qubits for qb in qr]
if not 2 ** len(ids) == len(cmd.gate.matrix):
raise Exception("Simulator: Error applying {} gate: "
"{}-qubit gate applied to {} qubits.".format(
str(cmd.gate),
int(math.log(len(cmd.gate.matrix), 2)),
len(ids)))
self._simulator.apply_controlled_gate(matrix.tolist(),
ids,
[qb.id for qb in
cmd.control_qubits])
else:
raise Exception("This simulator only supports controlled 1-qubit"
" gates with controls and arithmetic!\nPlease add"
" an auto-replacer engine to your list of compiler"
" engines.")
def receive(self, command_list):
"""
Receive a list of commands from the previous engine and handle them
(simulate them classically) prior to sending them on to the next
engine.
Args:
command_list (list<Command>): List of commands to execute on the
simulator.
"""
for cmd in command_list:
if not cmd.gate == FlushGate():
self._handle(cmd)
else:
# flush gate - Qrack automatically flushes, but this guarantees that we've finihsed.
self._simulator.run()
if not self.is_last_engine:
self.send([cmd])
| [
"numpy.absolute",
"random.randint",
"projectq.meta.get_control_count",
"projectq.cengines.BasicEngine.__init__",
"projectq.ops.FlushGate",
"projectq.types.WeakQubitRef"
] | [((3600, 3626), 'projectq.cengines.BasicEngine.__init__', 'BasicEngine.__init__', (['self'], {}), '(self)\n', (3620, 3626), False, 'from projectq.cengines import BasicEngine\n'), ((3562, 3591), 'random.randint', 'random.randint', (['(0)', '(4294967295)'], {}), '(0, 4294967295)\n', (3576, 3591), False, 'import random\n'), ((6441, 6501), 'projectq.types.WeakQubitRef', 'WeakQubitRef', (['qubit.engine', 'mapper.current_mapping[qubit.id]'], {}), '(qubit.engine, mapper.current_mapping[qubit.id])\n', (6453, 6501), False, 'from projectq.types import WeakQubitRef\n'), ((16573, 16595), 'projectq.meta.get_control_count', 'get_control_count', (['cmd'], {}), '(cmd)\n', (16590, 16595), False, 'from projectq.meta import get_control_count, LogicalQubitIDTag\n'), ((23284, 23295), 'projectq.ops.FlushGate', 'FlushGate', ([], {}), '()\n', (23293, 23295), False, 'from projectq.ops import Swap, SqrtSwap, Measure, FlushGate, Allocate, Deallocate, UniformlyControlledRy, UniformlyControlledRz, StatePreparation, QubitOperator, TimeEvolution\n'), ((17145, 17201), 'projectq.types.WeakQubitRef', 'WeakQubitRef', (['qb.engine', 'logical_id_tag.logical_qubit_id'], {}), '(qb.engine, logical_id_tag.logical_qubit_id)\n', (17157, 17201), False, 'from projectq.types import WeakQubitRef\n'), ((5488, 5521), 'numpy.absolute', 'np.absolute', (['cmd.gate.coefficient'], {}), '(cmd.gate.coefficient)\n', (5499, 5521), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from hmmlearn import hmm
from sklearn.preprocessing import LabelEncoder
def generate_p_matrix(num_states):
p = np.zeros([num_states, num_states])
for i in range(num_states):
row_sum = 0
for j in range(num_states):
p[i][j] = np.random.rand(1)
row_sum += p[i][j]
p[i,:] = p[i,:]/row_sum
return p
def generate_b_matrix(num_states, num_objects):
b = np.zeros([num_states, num_objects])
for i in range(num_states):
row_sum = 0
for j in range(num_objects):
b[i][j] = np.random.rand(1)
row_sum += b[i][j]
b[i,:] = b[i,:]/row_sum
return b
def next_state(p, cur_state):
r = np.random.rand(1)
state_transition_prob = p[cur_state-1]
for i in range(len(state_transition_prob)):
if r <= sum(state_transition_prob[:i+1]):
return i+1
return len(state_transition_prob)
def plot(y, y_label):
fig = plt.figure(figsize= (7,6))
x = list(range(2,20))
plt.plot(x, y, marker='o')
plt.xlabel('Number of states')
plt.ylabel(y_label)
plt.title(y_label+' VS Number of states')
fig.patch.set_facecolor('w')
plt.show()
def current_observation(b, cur_state):
r = np.random.rand(1)
event_prob = b[cur_state-1]
for i in range(len(event_prob)):
if r <= sum(event_prob[:i+1]):
return i+1
return len(event_prob)
np.random.seed(200321513)
num_objects = 3
num_states = 4
V = (1,2,3) #set of all symbols
pi = [1,0,0,0]
one_step_transition_matrix = generate_p_matrix(num_states)
event_matrix = generate_b_matrix(4,3)
print(f'Testing for normalization, sum of b matrix rows: {np.sum(event_matrix, axis=1)}')
num_observations = 5000
observations = []
states = []
states.append(1) #initial state is 1
while len(observations) < num_observations:
observations.append(current_observation(event_matrix, states[-1]))
states.append(next_state(one_step_transition_matrix, states[-1]))
states = states[:-1]
aic_list = []
bic_list = []
log_likelihood_list = []
X = [[obs] for obs in LabelEncoder().fit_transform(observations)]
for n in range(2, 20):
hmm_model = hmm.MultinomialHMM(n_components=n, random_state=10)
hmm_model.fit(X)
log_likelihood = hmm_model.score(X)
num_parameters = n*n + n*num_objects + n
aic = -2*log_likelihood + 2*num_parameters
bic = -2*log_likelihood + num_parameters* np.log(len(observations))
log_likelihood_list.append(log_likelihood)
aic_list.append(aic)
bic_list.append(bic)
plot(log_likelihood_list, 'Log likelihood')
plot(aic_list, 'AIC')
plot(bic_list, 'BIC')
#final model
hmm_model = hmm.MultinomialHMM(n_components=4, random_state=10)
hmm_model.fit(X)
print('Transmission probability')
print(hmm_model.transmat_)
print('Emission probability')
print(hmm_model.emissionprob_)
print('Initial probability')
print(hmm_model.startprob_)
| [
"matplotlib.pyplot.title",
"numpy.random.seed",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.sum",
"numpy.random.rand",
"numpy.zeros",
"sklearn.preprocessing.LabelEncoder",
"matplotlib.pyplot.figure",
"hmmlearn.hmm.MultinomialHMM",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.x... | [((1347, 1372), 'numpy.random.seed', 'np.random.seed', (['(200321513)'], {}), '(200321513)\n', (1361, 1372), True, 'import numpy as np\n'), ((2575, 2626), 'hmmlearn.hmm.MultinomialHMM', 'hmm.MultinomialHMM', ([], {'n_components': '(4)', 'random_state': '(10)'}), '(n_components=4, random_state=10)\n', (2593, 2626), False, 'from hmmlearn import hmm\n'), ((165, 199), 'numpy.zeros', 'np.zeros', (['[num_states, num_states]'], {}), '([num_states, num_states])\n', (173, 199), True, 'import numpy as np\n'), ((431, 466), 'numpy.zeros', 'np.zeros', (['[num_states, num_objects]'], {}), '([num_states, num_objects])\n', (439, 466), True, 'import numpy as np\n'), ((681, 698), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (695, 698), True, 'import numpy as np\n'), ((917, 943), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 6)'}), '(figsize=(7, 6))\n', (927, 943), True, 'import matplotlib.pyplot as plt\n'), ((970, 996), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'marker': '"""o"""'}), "(x, y, marker='o')\n", (978, 996), True, 'import matplotlib.pyplot as plt\n'), ((999, 1029), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of states"""'], {}), "('Number of states')\n", (1009, 1029), True, 'import matplotlib.pyplot as plt\n'), ((1032, 1051), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y_label'], {}), '(y_label)\n', (1042, 1051), True, 'import matplotlib.pyplot as plt\n'), ((1054, 1097), 'matplotlib.pyplot.title', 'plt.title', (["(y_label + ' VS Number of states')"], {}), "(y_label + ' VS Number of states')\n", (1063, 1097), True, 'import matplotlib.pyplot as plt\n'), ((1129, 1139), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1137, 1139), True, 'import matplotlib.pyplot as plt\n'), ((1186, 1203), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (1200, 1203), True, 'import numpy as np\n'), ((2095, 2146), 'hmmlearn.hmm.MultinomialHMM', 'hmm.MultinomialHMM', ([], {'n_components': 'n', 'random_state': '(10)'}), '(n_components=n, random_state=10)\n', (2113, 2146), False, 'from hmmlearn import hmm\n'), ((294, 311), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (308, 311), True, 'import numpy as np\n'), ((562, 579), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (576, 579), True, 'import numpy as np\n'), ((1609, 1637), 'numpy.sum', 'np.sum', (['event_matrix'], {'axis': '(1)'}), '(event_matrix, axis=1)\n', (1615, 1637), True, 'import numpy as np\n'), ((2013, 2027), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (2025, 2027), False, 'from sklearn.preprocessing import LabelEncoder\n')] |
import numpy as np
import matplotlib as mpl
from seaborn.external.version import Version
def MarkerStyle(marker=None, fillstyle=None):
"""
Allow MarkerStyle to accept a MarkerStyle object as parameter.
Supports matplotlib < 3.3.0
https://github.com/matplotlib/matplotlib/pull/16692
"""
if isinstance(marker, mpl.markers.MarkerStyle):
if fillstyle is None:
return marker
else:
marker = marker.get_marker()
return mpl.markers.MarkerStyle(marker, fillstyle)
def norm_from_scale(scale, norm):
"""Produce a Normalize object given a Scale and min/max domain limits."""
# This is an internal maplotlib function that simplifies things to access
# It is likely to become part of the matplotlib API at some point:
# https://github.com/matplotlib/matplotlib/issues/20329
if isinstance(norm, mpl.colors.Normalize):
return norm
if scale is None:
return None
if norm is None:
vmin = vmax = None
else:
vmin, vmax = norm # TODO more helpful error if this fails?
class ScaledNorm(mpl.colors.Normalize):
def __call__(self, value, clip=None):
# From github.com/matplotlib/matplotlib/blob/v3.4.2/lib/matplotlib/colors.py
# See github.com/matplotlib/matplotlib/tree/v3.4.2/LICENSE
value, is_scalar = self.process_value(value)
self.autoscale_None(value)
if self.vmin > self.vmax:
raise ValueError("vmin must be less or equal to vmax")
if self.vmin == self.vmax:
return np.full_like(value, 0)
if clip is None:
clip = self.clip
if clip:
value = np.clip(value, self.vmin, self.vmax)
# ***** Seaborn changes start ****
t_value = self.transform(value).reshape(np.shape(value))
t_vmin, t_vmax = self.transform([self.vmin, self.vmax])
# ***** Seaborn changes end *****
if not np.isfinite([t_vmin, t_vmax]).all():
raise ValueError("Invalid vmin or vmax")
t_value -= t_vmin
t_value /= (t_vmax - t_vmin)
t_value = np.ma.masked_invalid(t_value, copy=False)
return t_value[0] if is_scalar else t_value
new_norm = ScaledNorm(vmin, vmax)
new_norm.transform = scale.get_transform().transform
return new_norm
def scale_factory(scale, axis, **kwargs):
"""
Backwards compatability for creation of independent scales.
Matplotlib scales require an Axis object for instantiation on < 3.4.
But the axis is not used, aside from extraction of the axis_name in LogScale.
"""
modify_transform = False
if Version(mpl.__version__) < Version("3.4"):
if axis[0] in "xy":
modify_transform = True
axis = axis[0]
base = kwargs.pop("base", None)
if base is not None:
kwargs[f"base{axis}"] = base
nonpos = kwargs.pop("nonpositive", None)
if nonpos is not None:
kwargs[f"nonpos{axis}"] = nonpos
if isinstance(scale, str):
class Axis:
axis_name = axis
axis = Axis()
scale = mpl.scale.scale_factory(scale, axis, **kwargs)
if modify_transform:
transform = scale.get_transform()
transform.base = kwargs.get("base", 10)
if kwargs.get("nonpositive") == "mask":
# Setting a private attribute, but we only get here
# on an old matplotlib, so this won't break going forwards
transform._clip = False
return scale
def set_scale_obj(ax, axis, scale):
"""Handle backwards compatability with setting matplotlib scale."""
if Version(mpl.__version__) < Version("3.4"):
# The ability to pass a BaseScale instance to Axes.set_{}scale was added
# to matplotlib in version 3.4.0: GH: matplotlib/matplotlib/pull/19089
# Workaround: use the scale name, which is restrictive only if the user
# wants to define a custom scale; they'll need to update the registry too.
if scale.name is None:
# Hack to support our custom Formatter-less CatScale
return
method = getattr(ax, f"set_{axis}scale")
kws = {}
if scale.name == "function":
trans = scale.get_transform()
kws["functions"] = (trans._forward, trans._inverse)
method(scale.name, **kws)
else:
ax.set(**{f"{axis}scale": scale})
| [
"matplotlib.scale.scale_factory",
"numpy.full_like",
"numpy.ma.masked_invalid",
"numpy.clip",
"numpy.isfinite",
"numpy.shape",
"matplotlib.markers.MarkerStyle",
"seaborn.external.version.Version"
] | [((484, 526), 'matplotlib.markers.MarkerStyle', 'mpl.markers.MarkerStyle', (['marker', 'fillstyle'], {}), '(marker, fillstyle)\n', (507, 526), True, 'import matplotlib as mpl\n'), ((3250, 3296), 'matplotlib.scale.scale_factory', 'mpl.scale.scale_factory', (['scale', 'axis'], {}), '(scale, axis, **kwargs)\n', (3273, 3296), True, 'import matplotlib as mpl\n'), ((2741, 2765), 'seaborn.external.version.Version', 'Version', (['mpl.__version__'], {}), '(mpl.__version__)\n', (2748, 2765), False, 'from seaborn.external.version import Version\n'), ((2768, 2782), 'seaborn.external.version.Version', 'Version', (['"""3.4"""'], {}), "('3.4')\n", (2775, 2782), False, 'from seaborn.external.version import Version\n'), ((3767, 3791), 'seaborn.external.version.Version', 'Version', (['mpl.__version__'], {}), '(mpl.__version__)\n', (3774, 3791), False, 'from seaborn.external.version import Version\n'), ((3794, 3808), 'seaborn.external.version.Version', 'Version', (['"""3.4"""'], {}), "('3.4')\n", (3801, 3808), False, 'from seaborn.external.version import Version\n'), ((2209, 2250), 'numpy.ma.masked_invalid', 'np.ma.masked_invalid', (['t_value'], {'copy': '(False)'}), '(t_value, copy=False)\n', (2229, 2250), True, 'import numpy as np\n'), ((1606, 1628), 'numpy.full_like', 'np.full_like', (['value', '(0)'], {}), '(value, 0)\n', (1618, 1628), True, 'import numpy as np\n'), ((1736, 1772), 'numpy.clip', 'np.clip', (['value', 'self.vmin', 'self.vmax'], {}), '(value, self.vmin, self.vmax)\n', (1743, 1772), True, 'import numpy as np\n'), ((1872, 1887), 'numpy.shape', 'np.shape', (['value'], {}), '(value)\n', (1880, 1887), True, 'import numpy as np\n'), ((2022, 2051), 'numpy.isfinite', 'np.isfinite', (['[t_vmin, t_vmax]'], {}), '([t_vmin, t_vmax])\n', (2033, 2051), True, 'import numpy as np\n')] |
from __future__ import (absolute_import, print_function,
unicode_literals, division)
import pytest
import pandas as pd
import numpy as np
from .context import gragrapy as gg
from . import assert_data_equal
def test_stat_identity():
stat = gg.stat.identity()
iris = gg.data.iris
assert_data_equal(iris, stat.transform(iris))
@pytest.mark.parametrize('window', [5, 25])
def test_stat_smooth_mavg(window):
x = sorted(np.random.randn(50)*4)
y = sorted(np.random.randn(50))
df = pd.DataFrame({'x': x, 'y': y})
stat = gg.stat.smooth(method='mavg', window=window)
trans = stat.transform_group(df)
assert trans.x.isnull().sum() == 0
assert trans.y.isnull().sum() == window-1
assert trans.ymin.isnull().sum() == window-1
assert trans.ymax.isnull().sum() == window-1
# Check the error bars surround the smoothed curve
assert (trans.y.isnull()
| ((trans.ymin < trans.y) & (trans.y < trans.ymax))).all()
# The smoothed curve should be monotonically increasing
diffs = trans.diff()[['x', 'y']]
assert (diffs.isnull() | (diffs > 0)).all().all()
def test_stat_smooth_lm():
x = sorted(np.random.randn(50)*4)
y = sorted(np.random.randn(50))
df = pd.DataFrame({'x': x, 'y': y})
stat = gg.stat.smooth(method='lm')
trans = stat.transform_group(df)
# Check the error bars surround the smoothed curve
assert ((trans.ymin < trans.y) & (trans.y < trans.ymax)).all()
# The smoothed curve should be monotonically increasing
diffs = trans.diff()[['x', 'y']]
assert (diffs.isnull() | (diffs > 0)).all().all()
def test_stat_bin():
df = pd.DataFrame({'x': [ 1,1,1,2,2,3 ]})
trans = gg.stat.bin(bins=3).transform_group(df)
assert set(trans.weight) == {1,2,3}
def test_stat_boxplot():
df = pd.DataFrame({'x': 1, 'y': np.random.randn(100)})
df = df.append(pd.DataFrame({'x': 1, 'y': [-10, -50, 300]}))
trans = gg.stat.boxplot().transform_group(df)
inliers = trans.youtlier.isnull()
assert len(trans[inliers]) == 1
stats = trans[inliers].iloc[0]
assert stats.ymin < stats.lower < stats.ymid < stats.upper < stats.ymax
assert stats.x == 1
outliers = trans[~inliers]
assert len(outliers) >= 3
assert outliers['ymin lower ymid upper ymax'.split()].isnull().all().all()
assert set(outliers.youtlier).issuperset({-10, -50, 300})
def test_grouping():
class ToyStat(gg.stat.Stat):
def transform_group(self, df, scales=None):
return pd.DataFrame([[2,3],[5,7]], columns='a b'.split())
df = pd.DataFrame([[1, 11, 21, 31, 41],
[1, 11, 21, 31, 40],
[1, 11, 21, 31, 41],
[2, 12, 22, 32, 42],
[2, 12, 22, 30, 42],
[2, 12, 22, 30, 42],
[3, 13, 23, 33, 43],
[3, 13, 23, 33, 43]],
columns='group a x y z'.split())
expect = pd.DataFrame([[1, 2, 3, 21],
[1, 5, 7, 21],
[2, 2, 3, 22],
[2, 5, 7, 22],
[3, 2, 3, 23],
[3, 5, 7, 23]],
columns='group a b x'.split())
assert_data_equal(ToyStat().transform(df), expect)
| [
"pandas.DataFrame",
"pytest.mark.parametrize",
"numpy.random.randn"
] | [((365, 407), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""window"""', '[5, 25]'], {}), "('window', [5, 25])\n", (388, 407), False, 'import pytest\n'), ((526, 556), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': x, 'y': y}"], {}), "({'x': x, 'y': y})\n", (538, 556), True, 'import pandas as pd\n'), ((1253, 1283), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': x, 'y': y}"], {}), "({'x': x, 'y': y})\n", (1265, 1283), True, 'import pandas as pd\n'), ((1666, 1705), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': [1, 1, 1, 2, 2, 3]}"], {}), "({'x': [1, 1, 1, 2, 2, 3]})\n", (1678, 1705), True, 'import pandas as pd\n'), ((496, 515), 'numpy.random.randn', 'np.random.randn', (['(50)'], {}), '(50)\n', (511, 515), True, 'import numpy as np\n'), ((1223, 1242), 'numpy.random.randn', 'np.random.randn', (['(50)'], {}), '(50)\n', (1238, 1242), True, 'import numpy as np\n'), ((1899, 1943), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': 1, 'y': [-10, -50, 300]}"], {}), "({'x': 1, 'y': [-10, -50, 300]})\n", (1911, 1943), True, 'import pandas as pd\n'), ((458, 477), 'numpy.random.randn', 'np.random.randn', (['(50)'], {}), '(50)\n', (473, 477), True, 'import numpy as np\n'), ((1185, 1204), 'numpy.random.randn', 'np.random.randn', (['(50)'], {}), '(50)\n', (1200, 1204), True, 'import numpy as np\n'), ((1857, 1877), 'numpy.random.randn', 'np.random.randn', (['(100)'], {}), '(100)\n', (1872, 1877), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
#Set variables
Clk = 0
Reset = 0
shift_reg = np.array([1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0])
Semilla = np.array([0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1])
Longitud = 1
Polinomio = 1
reg_1 = 5
Salida_record_array = np.array([])
#Loop que refleja el funcionamiento del sistema de los flip flops conectados (shift register)
for i in range(30):
#Si reset esta en bajo se puede continuar
if Reset == 0:
#Si es la primera vuelta entra esta condición
if i == 0:
shift_reg[0:29] = Semilla[0:29]
a = str(i)
print(a+"---------------------------------------------------------------"+a)
print(shift_reg)
#Para toda las demás vueltas que no sean la primera, siguen esta condición
else:
#Movimento de shift en verilog
#shift_reg[0:29] = shift_reg << 1
#Movimiento de shift al shift_reg por medio de loop y registros auxiliares
for j in range(29):
if reg_1 == 5:
reg_1 = shift_reg[j]
reg_2 = shift_reg[j+1]
shift_reg[j+1] = reg_1
else:
reg_1 = reg_2
reg_2 = shift_reg[j+1]
shift_reg[j+1] = reg_1
#Condición para elegir tanto la realimentación como la salida
#Condición 1
if Longitud == 1:
#Realimentación
Salida = shift_reg[29]
shift_reg[0] = shift_reg[29] ^ shift_reg[19]
#Imprime Salida
print(Salida)
#Se guarda el regitro de cada salida por vuelta, respetando el orden
y = np.append(y, Salida)
#Condición 2
elif Longitud == 2:
#Realimentación
Salida = shift_reg[24]
shift_reg[0] = shift_reg[24] ^ shift_reg[14]
#Imprime Salida
print(Salida)
#Se guarda el registro de cada salida por vuelta, resptando el orden
y = np.append(y, Salida)
#Condición 3
else:
#Realimentación
Salida = shift_reg[19]
shift_reg[0] = shift_reg[19] ^ shift_reg[9]
#Imprime Salida
print(Salida)
#Se guarda el regitro de cada salida por vuelta, respetando el orden
y = np.append(y, Salida)
#a = str(i)
#print(a+"---------------------------------------------------------------"+a)
#print(shift_reg)
print(y)
#Grafica que muestra el flujo de la salida de acuerdo al número de estado
#Se utilizan 30 muestras para este caso
plt.plot([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29], y, 'ro')
plt.grid(True)
plt.show()
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.append",
"numpy.array",
"matplotlib.pyplot.grid"
] | [((103, 207), 'numpy.array', 'np.array', (['[1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0,\n 1, 0, 1, 0, 0]'], {}), '([1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1,\n 1, 0, 0, 1, 0, 1, 0, 0])\n', (111, 207), True, 'import numpy as np\n'), ((215, 319), 'numpy.array', 'np.array', (['[0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0,\n 0, 1, 1, 0, 1]'], {}), '([0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1,\n 1, 1, 0, 0, 1, 1, 0, 1])\n', (223, 319), True, 'import numpy as np\n'), ((379, 391), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (387, 391), True, 'import numpy as np\n'), ((3044, 3174), 'matplotlib.pyplot.plot', 'plt.plot', (['[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,\n 22, 23, 24, 25, 26, 27, 28, 29]', 'y', '"""ro"""'], {}), "([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,\n 20, 21, 22, 23, 24, 25, 26, 27, 28, 29], y, 'ro')\n", (3052, 3174), True, 'import matplotlib.pyplot as plt\n'), ((3144, 3158), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (3152, 3158), True, 'import matplotlib.pyplot as plt\n'), ((3160, 3170), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3168, 3170), True, 'import matplotlib.pyplot as plt\n'), ((1930, 1950), 'numpy.append', 'np.append', (['y', 'Salida'], {}), '(y, Salida)\n', (1939, 1950), True, 'import numpy as np\n'), ((2330, 2350), 'numpy.append', 'np.append', (['y', 'Salida'], {}), '(y, Salida)\n', (2339, 2350), True, 'import numpy as np\n'), ((2715, 2735), 'numpy.append', 'np.append', (['y', 'Salida'], {}), '(y, Salida)\n', (2724, 2735), True, 'import numpy as np\n')] |
from sklearn.decomposition import IncrementalPCA
import numpy as np
class IPCAEstimator:
def __init__(self, n_components):
self.n_components = n_components
self.whiten = False
self.transformer = IncrementalPCA(
n_components, whiten=self.whiten, batch_size=max(100, 5 * n_components)
)
self.batch_support = True
def get_param_str(self):
return "ipca_c{}{}".format(self.n_components, "_w" if self.whiten else "")
def fit(self, X):
self.transformer.fit(X)
def fit_partial(self, X):
try:
self.transformer.partial_fit(X)
self.transformer.n_samples_seen_ = self.transformer.n_samples_seen_.astype(
np.int64
) # avoid overflow
return True
except ValueError as e:
print(f"\nIPCA error:", e)
return False
def get_components(self):
stdev = np.sqrt(self.transformer.explained_variance_) # already sorted
var_ratio = self.transformer.explained_variance_ratio_
return (
self.transformer.components_,
stdev,
var_ratio,
) # PCA outputs are normalized
| [
"numpy.sqrt"
] | [((937, 982), 'numpy.sqrt', 'np.sqrt', (['self.transformer.explained_variance_'], {}), '(self.transformer.explained_variance_)\n', (944, 982), True, 'import numpy as np\n')] |
hlp = """
Datasets available at
http://sci2s.ugr.es/keel/category.php?cat=reg
"""
import gzip
from glob import glob
from numpy import genfromtxt
from os.path import join, realpath, dirname, basename
KEEL_PATH = join(dirname(realpath(__file__)), "keel")
KEEL_DATASETS = map(lambda p: basename(p), glob(join(KEEL_PATH, "*")))
def load_keel(name="abalone", n=None):
"""
Load an keel dataset.
:param name: Dataset name.
:param n: Maximum number of examples.
:return: Dataset in standard form.
"""
fp = gzip.open(join(KEEL_PATH, name, "%s.dat.gz" % name))
data = genfromtxt(fp, delimiter=",", skip_header=1, dtype=float, comments="@")
X = data[:, :-1]
y = data[:, -1].ravel()
labels = map(lambda i: "a%d" % i, range(X.shape[1]))
if n is not None and n < X.shape[0]:
X = X[:n, :]
y = y[:n]
return {
"data": X, "target": y, "labels": labels
} | [
"os.path.realpath",
"os.path.join",
"numpy.genfromtxt",
"os.path.basename"
] | [((601, 672), 'numpy.genfromtxt', 'genfromtxt', (['fp'], {'delimiter': '""","""', 'skip_header': '(1)', 'dtype': 'float', 'comments': '"""@"""'}), "(fp, delimiter=',', skip_header=1, dtype=float, comments='@')\n", (611, 672), False, 'from numpy import genfromtxt\n'), ((234, 252), 'os.path.realpath', 'realpath', (['__file__'], {}), '(__file__)\n', (242, 252), False, 'from os.path import join, realpath, dirname, basename\n'), ((293, 304), 'os.path.basename', 'basename', (['p'], {}), '(p)\n', (301, 304), False, 'from os.path import join, realpath, dirname, basename\n'), ((311, 331), 'os.path.join', 'join', (['KEEL_PATH', '"""*"""'], {}), "(KEEL_PATH, '*')\n", (315, 331), False, 'from os.path import join, realpath, dirname, basename\n'), ((547, 588), 'os.path.join', 'join', (['KEEL_PATH', 'name', "('%s.dat.gz' % name)"], {}), "(KEEL_PATH, name, '%s.dat.gz' % name)\n", (551, 588), False, 'from os.path import join, realpath, dirname, basename\n')] |
import numpy as N
from pebl import data
from pebl.test import testfile
class TestFileParsing:
def setUp(self):
self.data = data.fromfile(testfile('testdata1.txt'))
self.expected_observations = N.array([[ 2.5, 0. , 1.7],
[ 1.1, 1.7, 2.3],
[ 4.2, 999.3, 12. ]])
self.expected_dtype = N.dtype(float)
self.expected_varnames = ['var1', 'var2', 'var3']
self.expected_missing = N.array([[False, True, False],
[False, False, False],
[False, False, False]], dtype=bool)
self.expected_interventions = N.array([[ True, True, False],
[False, True, False],
[False, False, False]], dtype=bool)
self.expected_arities = [-1,-1,-1]
def test_observations(self):
assert (self.data.observations == self.expected_observations).all()
def test_dtype(self):
assert self.data.observations.dtype == self.expected_dtype
def test_varnames(self):
assert [v.name for v in self.data.variables] == self.expected_varnames
def test_missing(self):
assert (self.data.missing == self.expected_missing).all()
def test_interventions(self):
assert (self.data.interventions == self.expected_interventions).all()
def test_arities(self):
assert [v.arity for v in self.data.variables] == self.expected_arities
class TestComplexFileParsing(TestFileParsing):
def setUp(self):
self.data = data.fromfile(testfile('testdata2.txt'))
self.expected_observations = N.array([[ 0. , 0. , 1.25, 0. ],
[ 1. , 1. , 1.1 , 1. ],
[ 1. , 2. , 0.45, 1. ]])
self.expected_dtype = N.dtype(float) # because one continuous variable
self.expected_varnames = ['shh', 'ptchp', 'smo', 'outcome']
self.expected_interventions = N.array([[ True, True, False, False],
[False, True, False, False],
[False, False, False, False]], dtype=bool)
self.expected_missing = N.array([[False, False, False, False],
[False, False, False, False],
[False, False, False, False]], dtype=bool)
self.expected_arities = [2, 3, -1, 2]
def test_classlabels(self):
assert self.data.variables[3].labels == ['good', 'bad']
class TestFileParsing_WithSampleNames(TestFileParsing):
def setUp(self):
self.data = data.fromfile(testfile('testdata3.txt'))
self.expected_observations = N.array([[0, 0], [1, 1], [1,2]])
self.expected_missing = N.array([[0, 0], [0, 0], [0, 0]], dtype=bool)
self.expected_interventions = N.array([[1, 1], [0, 1], [0, 0]], dtype=bool)
self.expected_varnames = ['shh', 'ptchp']
self.expected_samplenames = ['sample1', 'sample2', 'sample3']
self.expected_arities = [2,3]
self.expected_dtype = N.dtype(int)
def test_sample_names(self):
assert [s.name for s in self.data.samples] == self.expected_samplenames
class TestFileParsing_WithSampleNames2(TestFileParsing_WithSampleNames):
def setUp(self):
self.data = data.fromfile(testfile('testdata4.txt')) # no tab before variable names
self.expected_observations = N.array([[0, 0], [1, 1], [1,2]])
self.expected_missing = N.array([[0, 0], [0, 0], [0, 0]], dtype=bool)
self.expected_interventions = N.array([[1, 1], [0, 1], [0, 0]], dtype=bool)
self.expected_varnames = ['shh', 'ptchp']
self.expected_samplenames = ['sample1', 'sample2', 'sample3']
self.expected_arities = [2,3]
self.expected_dtype = N.dtype(int)
class TestManualDataCreations:
def setUp(self):
obs = N.array([[1.2, 1.4, 2.1, 2.2, 1.1],
[2.3, 1.1, 2.1, 3.2, 1.3],
[3.2, 0.0, 2.2, 2.5, 1.6],
[4.2, 2.4, 3.2, 2.1, 2.8],
[2.7, 1.5, 0.0, 1.5, 1.1],
[1.1, 2.3, 2.1, 1.7, 3.2] ])
interventions = N.array([[0,0,0,0,0],
[0,1,0,0,0],
[0,0,1,1,0],
[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,1,0] ])
missing = N.array([[0,0,0,0,0],
[0,0,0,0,0],
[0,1,0,0,0],
[0,1,0,0,0],
[0,0,1,0,0],
[0,0,0,0,0] ])
variablenames = ["gene A", "gene B", "receptor protein C", " receptor D", "E kinase protein"]
samplenames = ["head.wt", "limb.wt", "head.shh_knockout", "head.gli_knockout",
"limb.shh_knockout", "limb.gli_knockout"]
self.data = data.Dataset(
obs,
missing.astype(bool),
interventions.astype(bool),
N.array([data.Variable(n) for n in variablenames]),
N.array([data.Sample(n) for n in samplenames])
)
def test_missing(self):
x,y = N.where(self.data.missing)
assert (x == N.array([2, 3, 4])).all() and \
(y == N.array([1, 1, 2])).all()
def test_missing2(self):
assert self.data.missing[N.where(self.data.missing)].tolist() == [ True, True, True]
def test_missing3(self):
assert (N.transpose(N.where(self.data.missing)) == N.array([[2, 1],[3, 1],[4, 2]])).all()
def test_subset1(self):
expected = N.array([[ 1.2, 2.1, 1.1],
[ 2.3, 2.1, 1.3],
[ 3.2, 2.2, 1.6],
[ 4.2, 3.2, 2.8],
[ 2.7, 0. , 1.1],
[ 1.1, 2.1, 3.2]])
assert (self.data.subset(variables=[0,2,4]).observations == expected).all()
def test_subset2(self):
expected = N.array([[ 1.2, 1.4, 2.1, 2.2, 1.1],
[ 3.2, 0. , 2.2, 2.5, 1.6]])
assert (self.data.subset(samples=[0,2]).observations == expected).all()
def test_subset3(self):
subset = self.data.subset(variables=[0,2], samples=[1,2])
expected = N.array([[ 2.3, 2.1],
[ 3.2, 2.2]])
assert (subset.observations == expected).all()
def test_subset3_interventions(self):
subset = self.data.subset(variables=[0,2], samples=[1,2])
expected = N.array([[False, False],
[False, True]], dtype=bool)
assert (subset.interventions == expected).all()
def test_subset3_missing(self):
subset = self.data.subset(variables=[0,2], samples=[1,2])
expected = N.array([[False, False],
[False, False]], dtype=bool)
assert (subset.missing == expected).all()
def test_subset3_varnames(self):
subset = self.data.subset(variables=[0,2], samples=[1,2])
expected = ['gene A', 'receptor protein C']
assert [v.name for v in subset.variables] == expected
def test_subset3_samplenames(self):
subset = self.data.subset(variables=[0,2], samples=[1,2])
expected = ['limb.wt', 'head.shh_knockout']
assert [s.name for s in subset.samples] == expected
class TestDataDiscretization:
def setUp(self):
self.data = data.fromfile(testfile('testdata5.txt'))
self.data.discretize()
self.expected_original = \
N.array([[ 1.2, 1.4, 2.1, 2.2, 1.1],
[ 2.3, 1.1, 2.1, 3.2, 1.3],
[ 3.2, 0. , 1.2, 2.5, 1.6],
[ 4.2, 2.4, 3.2, 2.1, 2.8],
[ 2.7, 1.5, 0. , 1.5, 1.1],
[ 1.1, 2.3, 2.1, 1.7, 3.2],
[ 2.3, 1.1, 4.3, 2.3, 1.1],
[ 3.2, 2.6, 1.9, 1.7, 1.1],
[ 2.1, 1.5, 3. , 1.4, 1.1]])
self.expected_discretized = \
N.array([[0, 1, 1, 1, 0],
[1, 0, 1, 2, 1],
[2, 0, 0, 2, 2],
[2, 2, 2, 1, 2],
[1, 1, 0, 0, 0],
[0, 2, 1, 0, 2],
[1, 0, 2, 2, 0],
[2, 2, 0, 0, 0],
[0, 1, 2, 0, 0]])
self.expected_arities = [3,3,3,3,3]
def test_orig_observations(self):
assert (self.data.original_observations == self.expected_original).all()
def test_disc_observations(self):
assert (self.data.observations == self.expected_discretized).all()
def test_arity(self):
assert [v.arity for v in self.data.variables] == self.expected_arities
class TestDataDiscretizationWithMissing:
"""Respond to Issue 32: Pebl should ignore the missing values when
selecting bins for each data point. Discretization for this should be
the same as if there were no missing data, as in TestDataDiscretization.
"""
def setUp(self):
self.data = data.fromfile(testfile('testdata5m.txt'))
self.data.discretize()
self.expected_original = \
N.array([[ 1.2, 1.4, 2.1, 2.2, 1.1],
[ 2.3, 1.1, 2.1, 3.2, 1.3],
[ 3.2, 0. , 1.2, 2.5, 1.6],
[ 4.2, 2.4, 3.2, 2.1, 2.8],
[ 2.7, 1.5, 0. , 1.5, 1.1],
[ 1.1, 2.3, 2.1, 1.7, 3.2],
[ 2.3, 1.1, 4.3, 2.3, 1.1],
[ 3.2, 2.6, 1.9, 1.7, 1.1],
[ 2.1, 1.5, 3. , 1.4, 1.1],
[ 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. ]])
self.expected_discretized = \
N.array([[0, 1, 1, 1, 0],
[1, 0, 1, 2, 1],
[2, 0, 0, 2, 2],
[2, 2, 2, 1, 2],
[1, 1, 0, 0, 0],
[0, 2, 1, 0, 2],
[1, 0, 2, 2, 0],
[2, 2, 0, 0, 0],
[0, 1, 2, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
self.expected_arities = [3,3,3,3,3]
self.expected_missing = N.array([[False, False, False, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
[True , True , True , True , True ],
[True , True , True , True , True ],
[True , True , True , True , True ]],
dtype=bool)
def test_orig_observations(self):
assert (self.data.original_observations == self.expected_original).all()
def test_disc_observations(self):
assert (self.data.observations == self.expected_discretized).all()
def test_arity(self):
assert [v.arity for v in self.data.variables] == self.expected_arities
def test_missing(self):
assert (self.data.missing == self.expected_missing).all()
class TestSelectiveDataDiscretization(TestDataDiscretization):
def setUp(self):
self.data = data.fromfile(testfile('testdata5.txt'))
self.data.discretize(includevars=[0,2])
self.expected_original = \
N.array([[ 1.2, 1.4, 2.1, 2.2, 1.1],
[ 2.3, 1.1, 2.1, 3.2, 1.3],
[ 3.2, 0. , 1.2, 2.5, 1.6],
[ 4.2, 2.4, 3.2, 2.1, 2.8],
[ 2.7, 1.5, 0. , 1.5, 1.1],
[ 1.1, 2.3, 2.1, 1.7, 3.2],
[ 2.3, 1.1, 4.3, 2.3, 1.1],
[ 3.2, 2.6, 1.9, 1.7, 1.1],
[ 2.1, 1.5, 3. , 1.4, 1.1]])
self.expected_discretized = \
N.array([[ 0. , 1.4, 1. , 2.2, 1.1],
[ 1. , 1.1, 1. , 3.2, 1.3],
[ 2. , 0. , 0. , 2.5, 1.6],
[ 2. , 2.4, 2. , 2.1, 2.8],
[ 1. , 1.5, 0. , 1.5, 1.1],
[ 0. , 2.3, 1. , 1.7, 3.2],
[ 1. , 1.1, 2. , 2.3, 1.1],
[ 2. , 2.6, 0. , 1.7, 1.1],
[ 0. , 1.5, 2. , 1.4, 1.1]])
self.expected_arities = [3,-1,3,-1,-1]
class TestSelectiveDataDiscretization2(TestDataDiscretization):
def setUp(self):
self.data = data.fromfile(testfile('testdata5.txt'))
self.data.discretize(excludevars=[0,1])
self.expected_original = \
N.array([[ 1.2, 1.4, 2.1, 2.2, 1.1],
[ 2.3, 1.1, 2.1, 3.2, 1.3],
[ 3.2, 0. , 1.2, 2.5, 1.6],
[ 4.2, 2.4, 3.2, 2.1, 2.8],
[ 2.7, 1.5, 0. , 1.5, 1.1],
[ 1.1, 2.3, 2.1, 1.7, 3.2],
[ 2.3, 1.1, 4.3, 2.3, 1.1],
[ 3.2, 2.6, 1.9, 1.7, 1.1],
[ 2.1, 1.5, 3. , 1.4, 1.1]])
self.expected_discretized = \
N.array([[ 1.2, 1.4, 1. , 1. , 0. ],
[ 2.3, 1.1, 1. , 2. , 1. ],
[ 3.2, 0. , 0. , 2. , 2. ],
[ 4.2, 2.4, 2. , 1. , 2. ],
[ 2.7, 1.5, 0. , 0. , 0. ],
[ 1.1, 2.3, 1. , 0. , 2. ],
[ 2.3, 1.1, 2. , 2. , 0. ],
[ 3.2, 2.6, 0. , 0. , 0. ],
[ 2.1, 1.5, 2. , 0. , 0. ]])
self.expected_arities = [-1,-1,3,3,3]
def test_arity_checking():
try:
# arity specified is less than number of unique values!!
dataset = data.fromfile(testfile('testdata6.txt'))
except data.IncorrectArityError:
assert True
else:
assert False
def test_arity_checking2():
try:
# arity specified is MORE than number of unique values. this is ok.
dataset = data.fromfile(testfile('testdata7.txt'))
except:
assert False
assert [v.arity for v in dataset.variables] == [3,4,3,6]
| [
"numpy.dtype",
"pebl.test.testfile",
"pebl.data.Sample",
"numpy.where",
"numpy.array",
"pebl.data.Variable"
] | [((215, 278), 'numpy.array', 'N.array', (['[[2.5, 0.0, 1.7], [1.1, 1.7, 2.3], [4.2, 999.3, 12.0]]'], {}), '([[2.5, 0.0, 1.7], [1.1, 1.7, 2.3], [4.2, 999.3, 12.0]])\n', (222, 278), True, 'import numpy as N\n'), ((425, 439), 'numpy.dtype', 'N.dtype', (['float'], {}), '(float)\n', (432, 439), True, 'import numpy as N\n'), ((530, 624), 'numpy.array', 'N.array', (['[[False, True, False], [False, False, False], [False, False, False]]'], {'dtype': 'bool'}), '([[False, True, False], [False, False, False], [False, False, False]\n ], dtype=bool)\n', (537, 624), True, 'import numpy as N\n'), ((741, 832), 'numpy.array', 'N.array', (['[[True, True, False], [False, True, False], [False, False, False]]'], {'dtype': 'bool'}), '([[True, True, False], [False, True, False], [False, False, False]],\n dtype=bool)\n', (748, 832), True, 'import numpy as N\n'), ((1770, 1847), 'numpy.array', 'N.array', (['[[0.0, 0.0, 1.25, 0.0], [1.0, 1.0, 1.1, 1.0], [1.0, 2.0, 0.45, 1.0]]'], {}), '([[0.0, 0.0, 1.25, 0.0], [1.0, 1.0, 1.1, 1.0], [1.0, 2.0, 0.45, 1.0]])\n', (1777, 1847), True, 'import numpy as N\n'), ((1992, 2006), 'numpy.dtype', 'N.dtype', (['float'], {}), '(float)\n', (1999, 2006), True, 'import numpy as N\n'), ((2147, 2260), 'numpy.array', 'N.array', (['[[True, True, False, False], [False, True, False, False], [False, False, \n False, False]]'], {'dtype': 'bool'}), '([[True, True, False, False], [False, True, False, False], [False, \n False, False, False]], dtype=bool)\n', (2154, 2260), True, 'import numpy as N\n'), ((2385, 2500), 'numpy.array', 'N.array', (['[[False, False, False, False], [False, False, False, False], [False, False,\n False, False]]'], {'dtype': 'bool'}), '([[False, False, False, False], [False, False, False, False], [False,\n False, False, False]], dtype=bool)\n', (2392, 2500), True, 'import numpy as N\n'), ((2908, 2941), 'numpy.array', 'N.array', (['[[0, 0], [1, 1], [1, 2]]'], {}), '([[0, 0], [1, 1], [1, 2]])\n', (2915, 2941), True, 'import numpy as N\n'), ((2973, 3018), 'numpy.array', 'N.array', (['[[0, 0], [0, 0], [0, 0]]'], {'dtype': 'bool'}), '([[0, 0], [0, 0], [0, 0]], dtype=bool)\n', (2980, 3018), True, 'import numpy as N\n'), ((3057, 3102), 'numpy.array', 'N.array', (['[[1, 1], [0, 1], [0, 0]]'], {'dtype': 'bool'}), '([[1, 1], [0, 1], [0, 0]], dtype=bool)\n', (3064, 3102), True, 'import numpy as N\n'), ((3291, 3303), 'numpy.dtype', 'N.dtype', (['int'], {}), '(int)\n', (3298, 3303), True, 'import numpy as N\n'), ((3650, 3683), 'numpy.array', 'N.array', (['[[0, 0], [1, 1], [1, 2]]'], {}), '([[0, 0], [1, 1], [1, 2]])\n', (3657, 3683), True, 'import numpy as N\n'), ((3715, 3760), 'numpy.array', 'N.array', (['[[0, 0], [0, 0], [0, 0]]'], {'dtype': 'bool'}), '([[0, 0], [0, 0], [0, 0]], dtype=bool)\n', (3722, 3760), True, 'import numpy as N\n'), ((3799, 3844), 'numpy.array', 'N.array', (['[[1, 1], [0, 1], [0, 0]]'], {'dtype': 'bool'}), '([[1, 1], [0, 1], [0, 0]], dtype=bool)\n', (3806, 3844), True, 'import numpy as N\n'), ((4033, 4045), 'numpy.dtype', 'N.dtype', (['int'], {}), '(int)\n', (4040, 4045), True, 'import numpy as N\n'), ((4113, 4294), 'numpy.array', 'N.array', (['[[1.2, 1.4, 2.1, 2.2, 1.1], [2.3, 1.1, 2.1, 3.2, 1.3], [3.2, 0.0, 2.2, 2.5,\n 1.6], [4.2, 2.4, 3.2, 2.1, 2.8], [2.7, 1.5, 0.0, 1.5, 1.1], [1.1, 2.3, \n 2.1, 1.7, 3.2]]'], {}), '([[1.2, 1.4, 2.1, 2.2, 1.1], [2.3, 1.1, 2.1, 3.2, 1.3], [3.2, 0.0, \n 2.2, 2.5, 1.6], [4.2, 2.4, 3.2, 2.1, 2.8], [2.7, 1.5, 0.0, 1.5, 1.1], [\n 1.1, 2.3, 2.1, 1.7, 3.2]])\n', (4120, 4294), True, 'import numpy as N\n'), ((4426, 4541), 'numpy.array', 'N.array', (['[[0, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 1, 1, 0], [0, 0, 0, 0, 0], [0, 0,\n 0, 0, 0], [0, 0, 0, 1, 0]]'], {}), '([[0, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 1, 1, 0], [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0], [0, 0, 0, 1, 0]])\n', (4433, 4541), True, 'import numpy as N\n'), ((4699, 4814), 'numpy.array', 'N.array', (['[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0,\n 1, 0, 0], [0, 0, 0, 0, 0]]'], {}), '([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 1, 0, 0, 0],\n [0, 0, 1, 0, 0], [0, 0, 0, 0, 0]])\n', (4706, 4814), True, 'import numpy as N\n'), ((5511, 5537), 'numpy.where', 'N.where', (['self.data.missing'], {}), '(self.data.missing)\n', (5518, 5537), True, 'import numpy as N\n'), ((5941, 6056), 'numpy.array', 'N.array', (['[[1.2, 2.1, 1.1], [2.3, 2.1, 1.3], [3.2, 2.2, 1.6], [4.2, 3.2, 2.8], [2.7, \n 0.0, 1.1], [1.1, 2.1, 3.2]]'], {}), '([[1.2, 2.1, 1.1], [2.3, 2.1, 1.3], [3.2, 2.2, 1.6], [4.2, 3.2, 2.8],\n [2.7, 0.0, 1.1], [1.1, 2.1, 3.2]])\n', (5948, 6056), True, 'import numpy as N\n'), ((6343, 6406), 'numpy.array', 'N.array', (['[[1.2, 1.4, 2.1, 2.2, 1.1], [3.2, 0.0, 2.2, 2.5, 1.6]]'], {}), '([[1.2, 1.4, 2.1, 2.2, 1.1], [3.2, 0.0, 2.2, 2.5, 1.6]])\n', (6350, 6406), True, 'import numpy as N\n'), ((6639, 6672), 'numpy.array', 'N.array', (['[[2.3, 2.1], [3.2, 2.2]]'], {}), '([[2.3, 2.1], [3.2, 2.2]])\n', (6646, 6672), True, 'import numpy as N\n'), ((6888, 6940), 'numpy.array', 'N.array', (['[[False, False], [False, True]]'], {'dtype': 'bool'}), '([[False, False], [False, True]], dtype=bool)\n', (6895, 6940), True, 'import numpy as N\n'), ((7148, 7201), 'numpy.array', 'N.array', (['[[False, False], [False, False]]'], {'dtype': 'bool'}), '([[False, False], [False, False]], dtype=bool)\n', (7155, 7201), True, 'import numpy as N\n'), ((7917, 8184), 'numpy.array', 'N.array', (['[[1.2, 1.4, 2.1, 2.2, 1.1], [2.3, 1.1, 2.1, 3.2, 1.3], [3.2, 0.0, 1.2, 2.5,\n 1.6], [4.2, 2.4, 3.2, 2.1, 2.8], [2.7, 1.5, 0.0, 1.5, 1.1], [1.1, 2.3, \n 2.1, 1.7, 3.2], [2.3, 1.1, 4.3, 2.3, 1.1], [3.2, 2.6, 1.9, 1.7, 1.1], [\n 2.1, 1.5, 3.0, 1.4, 1.1]]'], {}), '([[1.2, 1.4, 2.1, 2.2, 1.1], [2.3, 1.1, 2.1, 3.2, 1.3], [3.2, 0.0, \n 1.2, 2.5, 1.6], [4.2, 2.4, 3.2, 2.1, 2.8], [2.7, 1.5, 0.0, 1.5, 1.1], [\n 1.1, 2.3, 2.1, 1.7, 3.2], [2.3, 1.1, 4.3, 2.3, 1.1], [3.2, 2.6, 1.9, \n 1.7, 1.1], [2.1, 1.5, 3.0, 1.4, 1.1]])\n', (7924, 8184), True, 'import numpy as N\n'), ((8433, 8603), 'numpy.array', 'N.array', (['[[0, 1, 1, 1, 0], [1, 0, 1, 2, 1], [2, 0, 0, 2, 2], [2, 2, 2, 1, 2], [1, 1,\n 0, 0, 0], [0, 2, 1, 0, 2], [1, 0, 2, 2, 0], [2, 2, 0, 0, 0], [0, 1, 2, \n 0, 0]]'], {}), '([[0, 1, 1, 1, 0], [1, 0, 1, 2, 1], [2, 0, 0, 2, 2], [2, 2, 2, 1, 2],\n [1, 1, 0, 0, 0], [0, 2, 1, 0, 2], [1, 0, 2, 2, 0], [2, 2, 0, 0, 0], [0,\n 1, 2, 0, 0]])\n', (8440, 8603), True, 'import numpy as N\n'), ((9576, 9929), 'numpy.array', 'N.array', (['[[1.2, 1.4, 2.1, 2.2, 1.1], [2.3, 1.1, 2.1, 3.2, 1.3], [3.2, 0.0, 1.2, 2.5,\n 1.6], [4.2, 2.4, 3.2, 2.1, 2.8], [2.7, 1.5, 0.0, 1.5, 1.1], [1.1, 2.3, \n 2.1, 1.7, 3.2], [2.3, 1.1, 4.3, 2.3, 1.1], [3.2, 2.6, 1.9, 1.7, 1.1], [\n 2.1, 1.5, 3.0, 1.4, 1.1], [0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, \n 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0]]'], {}), '([[1.2, 1.4, 2.1, 2.2, 1.1], [2.3, 1.1, 2.1, 3.2, 1.3], [3.2, 0.0, \n 1.2, 2.5, 1.6], [4.2, 2.4, 3.2, 2.1, 2.8], [2.7, 1.5, 0.0, 1.5, 1.1], [\n 1.1, 2.3, 2.1, 1.7, 3.2], [2.3, 1.1, 4.3, 2.3, 1.1], [3.2, 2.6, 1.9, \n 1.7, 1.1], [2.1, 1.5, 3.0, 1.4, 1.1], [0.0, 0.0, 0.0, 0.0, 0.0], [0.0, \n 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0]])\n', (9583, 9929), True, 'import numpy as N\n'), ((10251, 10472), 'numpy.array', 'N.array', (['[[0, 1, 1, 1, 0], [1, 0, 1, 2, 1], [2, 0, 0, 2, 2], [2, 2, 2, 1, 2], [1, 1,\n 0, 0, 0], [0, 2, 1, 0, 2], [1, 0, 2, 2, 0], [2, 2, 0, 0, 0], [0, 1, 2, \n 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]'], {}), '([[0, 1, 1, 1, 0], [1, 0, 1, 2, 1], [2, 0, 0, 2, 2], [2, 2, 2, 1, 2],\n [1, 1, 0, 0, 0], [0, 2, 1, 0, 2], [1, 0, 2, 2, 0], [2, 2, 0, 0, 0], [0,\n 1, 2, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]])\n', (10258, 10472), True, 'import numpy as N\n'), ((10761, 11240), 'numpy.array', 'N.array', (['[[False, False, False, False, False], [False, False, False, False, False],\n [False, False, False, False, False], [False, False, False, False, False\n ], [False, False, False, False, False], [False, False, False, False, \n False], [False, False, False, False, False], [False, False, False, \n False, False], [False, False, False, False, False], [True, True, True, \n True, True], [True, True, True, True, True], [True, True, True, True, True]\n ]'], {'dtype': 'bool'}), '([[False, False, False, False, False], [False, False, False, False, \n False], [False, False, False, False, False], [False, False, False, \n False, False], [False, False, False, False, False], [False, False, \n False, False, False], [False, False, False, False, False], [False, \n False, False, False, False], [False, False, False, False, False], [True,\n True, True, True, True], [True, True, True, True, True], [True, True, \n True, True, True]], dtype=bool)\n', (10768, 11240), True, 'import numpy as N\n'), ((12396, 12663), 'numpy.array', 'N.array', (['[[1.2, 1.4, 2.1, 2.2, 1.1], [2.3, 1.1, 2.1, 3.2, 1.3], [3.2, 0.0, 1.2, 2.5,\n 1.6], [4.2, 2.4, 3.2, 2.1, 2.8], [2.7, 1.5, 0.0, 1.5, 1.1], [1.1, 2.3, \n 2.1, 1.7, 3.2], [2.3, 1.1, 4.3, 2.3, 1.1], [3.2, 2.6, 1.9, 1.7, 1.1], [\n 2.1, 1.5, 3.0, 1.4, 1.1]]'], {}), '([[1.2, 1.4, 2.1, 2.2, 1.1], [2.3, 1.1, 2.1, 3.2, 1.3], [3.2, 0.0, \n 1.2, 2.5, 1.6], [4.2, 2.4, 3.2, 2.1, 2.8], [2.7, 1.5, 0.0, 1.5, 1.1], [\n 1.1, 2.3, 2.1, 1.7, 3.2], [2.3, 1.1, 4.3, 2.3, 1.1], [3.2, 2.6, 1.9, \n 1.7, 1.1], [2.1, 1.5, 3.0, 1.4, 1.1]])\n', (12403, 12663), True, 'import numpy as N\n'), ((12912, 13179), 'numpy.array', 'N.array', (['[[0.0, 1.4, 1.0, 2.2, 1.1], [1.0, 1.1, 1.0, 3.2, 1.3], [2.0, 0.0, 0.0, 2.5,\n 1.6], [2.0, 2.4, 2.0, 2.1, 2.8], [1.0, 1.5, 0.0, 1.5, 1.1], [0.0, 2.3, \n 1.0, 1.7, 3.2], [1.0, 1.1, 2.0, 2.3, 1.1], [2.0, 2.6, 0.0, 1.7, 1.1], [\n 0.0, 1.5, 2.0, 1.4, 1.1]]'], {}), '([[0.0, 1.4, 1.0, 2.2, 1.1], [1.0, 1.1, 1.0, 3.2, 1.3], [2.0, 0.0, \n 0.0, 2.5, 1.6], [2.0, 2.4, 2.0, 2.1, 2.8], [1.0, 1.5, 0.0, 1.5, 1.1], [\n 0.0, 2.3, 1.0, 1.7, 3.2], [1.0, 1.1, 2.0, 2.3, 1.1], [2.0, 2.6, 0.0, \n 1.7, 1.1], [0.0, 1.5, 2.0, 1.4, 1.1]])\n', (12919, 13179), True, 'import numpy as N\n'), ((13675, 13942), 'numpy.array', 'N.array', (['[[1.2, 1.4, 2.1, 2.2, 1.1], [2.3, 1.1, 2.1, 3.2, 1.3], [3.2, 0.0, 1.2, 2.5,\n 1.6], [4.2, 2.4, 3.2, 2.1, 2.8], [2.7, 1.5, 0.0, 1.5, 1.1], [1.1, 2.3, \n 2.1, 1.7, 3.2], [2.3, 1.1, 4.3, 2.3, 1.1], [3.2, 2.6, 1.9, 1.7, 1.1], [\n 2.1, 1.5, 3.0, 1.4, 1.1]]'], {}), '([[1.2, 1.4, 2.1, 2.2, 1.1], [2.3, 1.1, 2.1, 3.2, 1.3], [3.2, 0.0, \n 1.2, 2.5, 1.6], [4.2, 2.4, 3.2, 2.1, 2.8], [2.7, 1.5, 0.0, 1.5, 1.1], [\n 1.1, 2.3, 2.1, 1.7, 3.2], [2.3, 1.1, 4.3, 2.3, 1.1], [3.2, 2.6, 1.9, \n 1.7, 1.1], [2.1, 1.5, 3.0, 1.4, 1.1]])\n', (13682, 13942), True, 'import numpy as N\n'), ((14191, 14458), 'numpy.array', 'N.array', (['[[1.2, 1.4, 1.0, 1.0, 0.0], [2.3, 1.1, 1.0, 2.0, 1.0], [3.2, 0.0, 0.0, 2.0,\n 2.0], [4.2, 2.4, 2.0, 1.0, 2.0], [2.7, 1.5, 0.0, 0.0, 0.0], [1.1, 2.3, \n 1.0, 0.0, 2.0], [2.3, 1.1, 2.0, 2.0, 0.0], [3.2, 2.6, 0.0, 0.0, 0.0], [\n 2.1, 1.5, 2.0, 0.0, 0.0]]'], {}), '([[1.2, 1.4, 1.0, 1.0, 0.0], [2.3, 1.1, 1.0, 2.0, 1.0], [3.2, 0.0, \n 0.0, 2.0, 2.0], [4.2, 2.4, 2.0, 1.0, 2.0], [2.7, 1.5, 0.0, 0.0, 0.0], [\n 1.1, 2.3, 1.0, 0.0, 2.0], [2.3, 1.1, 2.0, 2.0, 0.0], [3.2, 2.6, 0.0, \n 0.0, 0.0], [2.1, 1.5, 2.0, 0.0, 0.0]])\n', (14198, 14458), True, 'import numpy as N\n'), ((151, 176), 'pebl.test.testfile', 'testfile', (['"""testdata1.txt"""'], {}), "('testdata1.txt')\n", (159, 176), False, 'from pebl.test import testfile\n'), ((1706, 1731), 'pebl.test.testfile', 'testfile', (['"""testdata2.txt"""'], {}), "('testdata2.txt')\n", (1714, 1731), False, 'from pebl.test import testfile\n'), ((2844, 2869), 'pebl.test.testfile', 'testfile', (['"""testdata3.txt"""'], {}), "('testdata3.txt')\n", (2852, 2869), False, 'from pebl.test import testfile\n'), ((3555, 3580), 'pebl.test.testfile', 'testfile', (['"""testdata4.txt"""'], {}), "('testdata4.txt')\n", (3563, 3580), False, 'from pebl.test import testfile\n'), ((7812, 7837), 'pebl.test.testfile', 'testfile', (['"""testdata5.txt"""'], {}), "('testdata5.txt')\n", (7820, 7837), False, 'from pebl.test import testfile\n'), ((9470, 9496), 'pebl.test.testfile', 'testfile', (['"""testdata5m.txt"""'], {}), "('testdata5m.txt')\n", (9478, 9496), False, 'from pebl.test import testfile\n'), ((12274, 12299), 'pebl.test.testfile', 'testfile', (['"""testdata5.txt"""'], {}), "('testdata5.txt')\n", (12282, 12299), False, 'from pebl.test import testfile\n'), ((13553, 13578), 'pebl.test.testfile', 'testfile', (['"""testdata5.txt"""'], {}), "('testdata5.txt')\n", (13561, 13578), False, 'from pebl.test import testfile\n'), ((14837, 14862), 'pebl.test.testfile', 'testfile', (['"""testdata6.txt"""'], {}), "('testdata6.txt')\n", (14845, 14862), False, 'from pebl.test import testfile\n'), ((15098, 15123), 'pebl.test.testfile', 'testfile', (['"""testdata7.txt"""'], {}), "('testdata7.txt')\n", (15106, 15123), False, 'from pebl.test import testfile\n'), ((5350, 5366), 'pebl.data.Variable', 'data.Variable', (['n'], {}), '(n)\n', (5363, 5366), False, 'from pebl import data\n'), ((5420, 5434), 'pebl.data.Sample', 'data.Sample', (['n'], {}), '(n)\n', (5431, 5434), False, 'from pebl import data\n'), ((5854, 5887), 'numpy.array', 'N.array', (['[[2, 1], [3, 1], [4, 2]]'], {}), '([[2, 1], [3, 1], [4, 2]])\n', (5861, 5887), True, 'import numpy as N\n'), ((5560, 5578), 'numpy.array', 'N.array', (['[2, 3, 4]'], {}), '([2, 3, 4])\n', (5567, 5578), True, 'import numpy as N\n'), ((5614, 5632), 'numpy.array', 'N.array', (['[1, 1, 2]'], {}), '([1, 1, 2])\n', (5621, 5632), True, 'import numpy as N\n'), ((5703, 5729), 'numpy.where', 'N.where', (['self.data.missing'], {}), '(self.data.missing)\n', (5710, 5729), True, 'import numpy as N\n'), ((5823, 5849), 'numpy.where', 'N.where', (['self.data.missing'], {}), '(self.data.missing)\n', (5830, 5849), True, 'import numpy as N\n')] |
#coding=utf-8
"""Module for statistical test
The function of this Module is served for statistical test.
"""
from scipy import stats
import numpy as np
import math
import pandas as pd
def Hosmer_Lemeshow_Test(bins_true, bins_pred, bins_tot, n_bins=10, in_sample=False):
"""Hosmer-Lemeshow Test for testing calibration.
Parameters
----------
bins_true : numpy.array
True Number of people in each group.
bins_pred : numpy.array
Pred Number of people in each group.
bins_tot : numpy.array
Totol Number of people in each group.
n_bins : int
Number of groups.
in_sample : bool, default False
Is Calibration-Test in sample.
Returns
-------
tuple
chi2 value and P value.
Examples
--------
>>> Hosmer_Lemeshow_Test(bins_true, bins_pred, bins_tot, n_bins=5)
"""
v_chi2 = sum((bins_true - bins_pred)**2 / bins_pred / (1.0 - bins_pred / bins_tot))
degree_of_freedom = n_bins - 2 if in_sample else n_bins
p = stats.chi2.sf(v_chi2, degree_of_freedom)
return v_chi2, p
def Delong_Test(y_true, pred_a, pred_b):
"""Delong-Test for comparing two predictive model.
Parameters
----------
y_true : numpy.array or pandas.Series.
True label.
pred_a : numpy.array or pandas.Series.
Prediction of model A.
pred_b : numpy.array or pandas.Series.
Prediction of model B.
Returns
-------
tuple
chi2 value and P-value.
Examples
--------
>>> # pred_proba1 = xgb1.predict_proba(test_X)
>>> # pred_proba2 = xgb2.predict_proba(test_X)
>>> Delong_test(test_y, pred_proba1[:, 1], pred_proba2[:, 1])
"""
idx = 0
a_x, v_ax = [], []
a_y, v_ay = [], []
b_x, v_bx = [], []
b_y, v_by = [], []
for label in y_true:
if label == 0:
a_y.append(pred_a[idx])
b_y.append(pred_b[idx])
else:
a_x.append(pred_a[idx])
b_x.append(pred_b[idx])
idx += 1
n1 = len(a_x)
n2 = len(a_y)
for x in a_x:
cnt = .0
for y in a_y:
if y < x:
cnt += 1
elif y == x:
cnt += 0.5
v_ax.append(cnt)
for y in a_y:
cnt = .0
for x in a_x:
if y < x:
cnt += 1
elif y == x:
cnt += 0.5
v_ay.append(cnt)
for x in b_x:
cnt = .0
for y in b_y:
if y < x:
cnt += 1
elif y == x:
cnt += 0.5
v_bx.append(cnt)
for y in b_y:
cnt = .0
for x in b_x:
if y < x:
cnt += 1
elif y == x:
cnt += 0.5
v_by.append(cnt)
theta_a = sum(v_ax) / (n1 * n2)
theta_b = sum(v_bx) / (n1 * n2)
theta = np.array([theta_a, theta_b]).reshape((1, 2))
V = np.array([v_ax, v_bx]).T / n2
Z = np.array([v_ay, v_by]).T / n1
Sv = np.dot((V - theta).T, (V - theta)) / (n1 - 1)
Sz = np.dot((Z - theta).T, (Z - theta))/ (n2 - 1)
L = np.array([[1.0, -1.0]])
u = np.dot(L, theta.T) / np.sqrt(np.dot(np.dot(L, (Sv / n1) + (Sz / n2)), L.T))
pval = stats.norm.sf(np.abs(u))
return u, 2.0 * pval
def VIF_Test(data, cols=None):
"""Variance Inflation Factors for each variable.
Parameters
----------
data : pandas.DataFrame
Targeted data.
cols : list(str), default `None`
Given columns to calculate VIF.
Returns
-------
pandas.Series
Return VIF for each variable included in cols.
Examples
--------
>>> VIF_Test(data[x_cols])
"""
from statsmodels.stats.outliers_influence import variance_inflation_factor
from statsmodels.tools.tools import add_constant
if cols is None:
cols = list(data.columns)
X = add_constant(data[cols])
res = pd.Series([variance_inflation_factor(X.values, i) for i in range(X.shape[1])],
index=X.columns)
print(res)
return res | [
"scipy.stats.chi2.sf",
"numpy.abs",
"statsmodels.stats.outliers_influence.variance_inflation_factor",
"numpy.array",
"numpy.dot",
"statsmodels.tools.tools.add_constant"
] | [((1024, 1064), 'scipy.stats.chi2.sf', 'stats.chi2.sf', (['v_chi2', 'degree_of_freedom'], {}), '(v_chi2, degree_of_freedom)\n', (1037, 1064), False, 'from scipy import stats\n'), ((3104, 3127), 'numpy.array', 'np.array', (['[[1.0, -1.0]]'], {}), '([[1.0, -1.0]])\n', (3112, 3127), True, 'import numpy as np\n'), ((3876, 3900), 'statsmodels.tools.tools.add_constant', 'add_constant', (['data[cols]'], {}), '(data[cols])\n', (3888, 3900), False, 'from statsmodels.tools.tools import add_constant\n'), ((2996, 3028), 'numpy.dot', 'np.dot', (['(V - theta).T', '(V - theta)'], {}), '((V - theta).T, V - theta)\n', (3002, 3028), True, 'import numpy as np\n'), ((3051, 3083), 'numpy.dot', 'np.dot', (['(Z - theta).T', '(Z - theta)'], {}), '((Z - theta).T, Z - theta)\n', (3057, 3083), True, 'import numpy as np\n'), ((3136, 3154), 'numpy.dot', 'np.dot', (['L', 'theta.T'], {}), '(L, theta.T)\n', (3142, 3154), True, 'import numpy as np\n'), ((3237, 3246), 'numpy.abs', 'np.abs', (['u'], {}), '(u)\n', (3243, 3246), True, 'import numpy as np\n'), ((2866, 2894), 'numpy.array', 'np.array', (['[theta_a, theta_b]'], {}), '([theta_a, theta_b])\n', (2874, 2894), True, 'import numpy as np\n'), ((2919, 2941), 'numpy.array', 'np.array', (['[v_ax, v_bx]'], {}), '([v_ax, v_bx])\n', (2927, 2941), True, 'import numpy as np\n'), ((2957, 2979), 'numpy.array', 'np.array', (['[v_ay, v_by]'], {}), '([v_ay, v_by])\n', (2965, 2979), True, 'import numpy as np\n'), ((3922, 3960), 'statsmodels.stats.outliers_influence.variance_inflation_factor', 'variance_inflation_factor', (['X.values', 'i'], {}), '(X.values, i)\n', (3947, 3960), False, 'from statsmodels.stats.outliers_influence import variance_inflation_factor\n'), ((3172, 3200), 'numpy.dot', 'np.dot', (['L', '(Sv / n1 + Sz / n2)'], {}), '(L, Sv / n1 + Sz / n2)\n', (3178, 3200), True, 'import numpy as np\n')] |
from sklearn.utils import check_random_state
import numpy as np
import logging
__all__=['bootstrap'] #everything that will be imported by import *, like in __init__
def bootstrap(data, n_bootstraps=10000, user_statistic=lambda x:np.mean(x,axis=1), kwargs=None, pass_indices=False, random_state=1):
"""Compute bootstraped statistics of a dataset.
inputs
----------
data : array_like
An n-dimensional data array of size n_samples by n_attributes
n_bootstraps : integer
the number of bootstrap samples to compute. Note that internally,
two arrays of size (n_bootstraps, n_samples) will be allocated.
For very large numbers of bootstraps, this can cause memory issues.
user_statistic : function
The statistic to be computed. This should take an array of data
of size (n_bootstraps, n_samples) and return the row-wise statistics
of the data. default: lambda x:np.mean(x,axis=1)
kwargs : dictionary (optional)
A dictionary of keyword arguments to be passed to the
user_statistic function.
pass_indices : boolean (optional)
if True, then the indices of the points rather than the points
themselves are passed to `user_statistic`
random_state: RandomState or an int seed (0 by default)
Returns
-------
distribution : ndarray
the bootstrapped distribution of statistics (length = n_bootstraps)
code from [https://github.com/astroML/astroML/blob/master/astroML/resample.py]
"""
# we don't set kwargs={} by default in the argument list, because using
# a mutable type as a default argument can lead to strange results
if kwargs is None:
kwargs = {}
rng = check_random_state(random_state)
data = np.asarray(data)
if data.ndim != 1:
n_samples = data.shape[0]
logging.warning("bootstrap data are n-dimensional: assuming ordered n_samples by n_attributes")
else:
n_samples = data.size
# Generate random indices with repetition
ind = rng.randint(n_samples, size=(n_bootstraps, n_samples))
data = data[ind].reshape(-1, data[ind].shape[-1])
# Call the function
if pass_indices:
stat_bootstrap = user_statistic(ind, **kwargs)
else:
stat_bootstrap = user_statistic(data, **kwargs)
return stat_bootstrap
| [
"numpy.mean",
"numpy.asarray",
"sklearn.utils.check_random_state",
"logging.warning"
] | [((1732, 1764), 'sklearn.utils.check_random_state', 'check_random_state', (['random_state'], {}), '(random_state)\n', (1750, 1764), False, 'from sklearn.utils import check_random_state\n'), ((1776, 1792), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (1786, 1792), True, 'import numpy as np\n'), ((231, 249), 'numpy.mean', 'np.mean', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (238, 249), True, 'import numpy as np\n'), ((1858, 1963), 'logging.warning', 'logging.warning', (['"""bootstrap data are n-dimensional: assuming ordered n_samples by n_attributes"""'], {}), "(\n 'bootstrap data are n-dimensional: assuming ordered n_samples by n_attributes'\n )\n", (1873, 1963), False, 'import logging\n')] |
import numpy as np
import cv2
# ffttools
async def fftd(img, backwards=False):
# shape of img can be (m,n), (m,n,1) or (m,n,2)
# in my test, fft provided by numpy and scipy are slower than cv2.dft
return cv2.dft(np.float32(img), flags = ((cv2.DFT_INVERSE | cv2.DFT_SCALE) if backwards else cv2.DFT_COMPLEX_OUTPUT)) # 'flags =' is necessary!
async def real(img):
return img[:,:,0]
async def imag(img):
return img[:,:,1]
async def complexMultiplication(a, b):
res = np.zeros(a.shape, a.dtype)
res[:,:,0] = a[:,:,0]*b[:,:,0] - a[:,:,1]*b[:,:,1]
res[:,:,1] = a[:,:,0]*b[:,:,1] + a[:,:,1]*b[:,:,0]
return res
async def complexDivision(a, b):
res = np.zeros(a.shape, a.dtype)
divisor = 1. / (b[:,:,0]**2 + b[:,:,1]**2)
res[:,:,0] = (a[:,:,0]*b[:,:,0] + a[:,:,1]*b[:,:,1]) * divisor
res[:,:,1] = (a[:,:,1]*b[:,:,0] + a[:,:,0]*b[:,:,1]) * divisor
return res
async def rearrange(img):
#return np.fft.fftshift(img, axes=(0,1))
assert(img.ndim==2)
img_ = np.zeros(img.shape, img.dtype)
xh, yh = img.shape[1]//2, img.shape[0]//2
img_[0:yh,0:xh], img_[yh:img.shape[0],xh:img.shape[1]] = img[yh:img.shape[0],xh:img.shape[1]], img[0:yh,0:xh]
img_[0:yh,xh:img.shape[1]], img_[yh:img.shape[0],0:xh] = img[yh:img.shape[0],0:xh], img[0:yh,xh:img.shape[1]]
return img_
# recttools
async def x2(rect):
return rect[0] + rect[2]
async def y2(rect):
return rect[1] + rect[3]
async def limit(rect, limit):
if(rect[0]+rect[2] > limit[0]+limit[2]):
rect[2] = limit[0]+limit[2]-rect[0]
if(rect[1]+rect[3] > limit[1]+limit[3]):
rect[3] = limit[1]+limit[3]-rect[1]
if(rect[0] < limit[0]):
rect[2] -= (limit[0]-rect[0])
rect[0] = limit[0]
if(rect[1] < limit[1]):
rect[3] -= (limit[1]-rect[1])
rect[1] = limit[1]
if(rect[2] < 0):
rect[2] = 0
if(rect[3] < 0):
rect[3] = 0
return rect
async def getBorder(original, limited):
res = [0,0,0,0]
res[0] = limited[0] - original[0]
res[1] = limited[1] - original[1]
res[2] = await x2(original) - await x2(limited)
res[3] = await y2(original) - await y2(limited)
assert(np.all(np.array(res) >= 0))
return res
async def subwindow(img, window, borderType=cv2.BORDER_CONSTANT):
cutWindow = [x for x in window]
await limit(cutWindow, [0,0,img.shape[1],img.shape[0]]) # modify cutWindow
assert(cutWindow[2]>=0 and cutWindow[3]>=0)
border = await getBorder(window, cutWindow)
res = img[cutWindow[1]:cutWindow[1]+cutWindow[3], cutWindow[0]:cutWindow[0]+cutWindow[2]]
if(border != [0,0,0,0]):
res = cv2.copyMakeBorder(res, border[1], border[3], border[0], border[2], borderType)
return res
async def subPixelPeak(self, left, center, right):
divisor = 2*center - right - left #float
return (0 if abs(divisor)<1e-3 else 0.5*(right-left)/divisor)
async def createGaussianPeak(self, sizey, sizex):
syh, sxh = sizey/2, sizex/2
output_sigma = np.sqrt(sizex*sizey) / self.padding * self.output_sigma_factor
mult = -0.5 / (output_sigma*output_sigma)
y, x = np.ogrid[0:sizey, 0:sizex]
y, x = (y-syh)**2, (x-sxh)**2
res = np.exp(mult * (y+x))
return await fftd(res)
async def gaussianCorrelation(self, x1, x2):
c = cv2.mulSpectrums(await fftd(x1), await fftd(x2), 0, conjB = True) # 'conjB=' is necessary!
c = await fftd(c, True)
c = await real(c)
c = await rearrange(c)
if(x1.ndim==3 and x2.ndim==3):
d = (np.sum(x1[:,:,0]*x1[:,:,0]) + np.sum(x2[:,:,0]*x2[:,:,0]) - 2.0*c) / (self.size_patch[0]*self.size_patch[1]*self.size_patch[2])
elif(x1.ndim==2 and x2.ndim==2):
d = (np.sum(x1*x1) + np.sum(x2*x2) - 2.0*c) / (self.size_patch[0]*self.size_patch[1]*self.size_patch[2])
d = d * (d>=0)
d = np.exp(-d / (self.sigma*self.sigma))
return d
async def getFeatures(self, image, inithann, scale_adjust=1.0):
extracted_roi = [0,0,0,0] #[int,int,int,int]
cx = self._roi[0] + self._roi[2]/2 #float
cy = self._roi[1] + self._roi[3]/2 #float
if(inithann):
padded_w = self._roi[2] * self.padding
padded_h = self._roi[3] * self.padding
if(self.template_size > 1):
if(padded_w >= padded_h):
self._scale = padded_w / float(self.template_size)
else:
self._scale = padded_h / float(self.template_size)
self._tmpl_sz[0] = int(padded_w // self._scale)
self._tmpl_sz[1] = int(padded_h // self._scale)
else:
self._tmpl_sz[0] = int(padded_w)
self._tmpl_sz[1] = int(padded_h)
self._scale = 1.
self._tmpl_sz[0] = int(self._tmpl_sz[0]) // 2 * 2
self._tmpl_sz[1] = int(self._tmpl_sz[1]) // 2 * 2
extracted_roi[2] = int(scale_adjust * self._scale * self._tmpl_sz[0])
extracted_roi[3] = int(scale_adjust * self._scale * self._tmpl_sz[1])
extracted_roi[0] = int(cx - extracted_roi[2]/2)
extracted_roi[1] = int(cy - extracted_roi[3]/2)
z = await subwindow(image, extracted_roi, cv2.BORDER_REPLICATE)
if(z.shape[1]!=self._tmpl_sz[0] or z.shape[0]!=self._tmpl_sz[1]):
z = cv2.resize(z, tuple(self._tmpl_sz))
if(z.ndim==3 and z.shape[2]==3):
FeaturesMap = cv2.cvtColor(z, cv2.COLOR_BGR2GRAY) # z:(size_patch[0], size_patch[1], 3) FeaturesMap:(size_patch[0], size_patch[1]) #np.int8 #0~255
elif(z.ndim==2):
FeaturesMap = z #(size_patch[0], size_patch[1]) #np.int8 #0~255
FeaturesMap = FeaturesMap.astype(np.float32) / 255.0 - 0.5
self.size_patch = [z.shape[0], z.shape[1], 1]
# if(inithann):
# self.createHanningMats() # createHanningMats need size_patch
#
# FeaturesMap = self.hann * FeaturesMap
return FeaturesMap
async def detect(self, z, x):
k = await gaussianCorrelation(self, x, z)
res = await real(await fftd(await complexMultiplication(self._alphaf, await fftd(k)), True))
_, pv, _, pi = cv2.minMaxLoc(res) # pv:float pi:tuple of int
p = [float(pi[0]), float(pi[1])] # cv::Point2f, [x,y] #[float,float]
if(pi[0]>0 and pi[0]<res.shape[1]-1):
p[0] += await subPixelPeak(self, res[pi[1],pi[0]-1], pv, res[pi[1],pi[0]+1])
if(pi[1]>0 and pi[1]<res.shape[0]-1):
p[1] += await subPixelPeak(self, res[pi[1]-1,pi[0]], pv, res[pi[1]+1,pi[0]])
p[0] -= res.shape[1] / 2.
p[1] -= res.shape[0] / 2.
return p, pv
# side effect
async def train(self, x, train_interp_factor):
k = await gaussianCorrelation(self, x, x)
alphaf = await complexDivision(self._prob, await fftd(k) + self.lambdar)
self._tmpl = (1-train_interp_factor)*self._tmpl + train_interp_factor*x
self._alphaf = (1-train_interp_factor)*self._alphaf + train_interp_factor*alphaf
return self._roi
# side effect
async def update(self, image):
if(self._roi[0]+self._roi[2] <= 0): self._roi[0] = -self._roi[2] + 1
if(self._roi[1]+self._roi[3] <= 0): self._roi[1] = -self._roi[2] + 1
if(self._roi[0] >= image.shape[1]-1): self._roi[0] = image.shape[1] - 2
if(self._roi[1] >= image.shape[0]-1): self._roi[1] = image.shape[0] - 2
cx = self._roi[0] + self._roi[2]/2.
cy = self._roi[1] + self._roi[3]/2.
loc, peak_value = await detect(self, self._tmpl, await getFeatures(self, image, 0, 1.0))
if(self.scale_step != 1):
# Test at a smaller _scale
new_loc1, new_peak_value1 = await detect(self, self._tmpl, await getFeatures(self, image, 0, 1.0/self.scale_step))
# Test at a bigger _scale
new_loc2, new_peak_value2 = await detect(self, self._tmpl, await getFeatures(self, image, 0, self.scale_step))
if(self.scale_weight*new_peak_value1 > peak_value and new_peak_value1>new_peak_value2):
loc = new_loc1
peak_value = new_peak_value1
self._scale /= self.scale_step
self._roi[2] /= self.scale_step
self._roi[3] /= self.scale_step
elif(self.scale_weight*new_peak_value2 > peak_value):
loc = new_loc2
peak_value = new_peak_value2
self._scale *= self.scale_step
self._roi[2] *= self.scale_step
self._roi[3] *= self.scale_step
self._roi[0] = cx - self._roi[2]/2.0 + loc[0]*self.cell_size*self._scale
self._roi[1] = cy - self._roi[3]/2.0 + loc[1]*self.cell_size*self._scale
if(self._roi[0] >= image.shape[1]-1): self._roi[0] = image.shape[1] - 1
if(self._roi[1] >= image.shape[0]-1): self._roi[1] = image.shape[0] - 1
if(self._roi[0]+self._roi[2] <= 0): self._roi[0] = -self._roi[2] + 2
if(self._roi[1]+self._roi[3] <= 0): self._roi[1] = -self._roi[3] + 2
assert(self._roi[2]>0 and self._roi[3]>0)
x = await getFeatures(self, image, 0, 1.0)
return await train(self, x, self.interp_factor)
| [
"numpy.sum",
"cv2.cvtColor",
"numpy.float32",
"numpy.zeros",
"cv2.copyMakeBorder",
"numpy.array",
"numpy.exp",
"cv2.minMaxLoc",
"numpy.sqrt"
] | [((477, 503), 'numpy.zeros', 'np.zeros', (['a.shape', 'a.dtype'], {}), '(a.shape, a.dtype)\n', (485, 503), True, 'import numpy as np\n'), ((662, 688), 'numpy.zeros', 'np.zeros', (['a.shape', 'a.dtype'], {}), '(a.shape, a.dtype)\n', (670, 688), True, 'import numpy as np\n'), ((972, 1002), 'numpy.zeros', 'np.zeros', (['img.shape', 'img.dtype'], {}), '(img.shape, img.dtype)\n', (980, 1002), True, 'import numpy as np\n'), ((3005, 3027), 'numpy.exp', 'np.exp', (['(mult * (y + x))'], {}), '(mult * (y + x))\n', (3011, 3027), True, 'import numpy as np\n'), ((3593, 3631), 'numpy.exp', 'np.exp', (['(-d / (self.sigma * self.sigma))'], {}), '(-d / (self.sigma * self.sigma))\n', (3599, 3631), True, 'import numpy as np\n'), ((5559, 5577), 'cv2.minMaxLoc', 'cv2.minMaxLoc', (['res'], {}), '(res)\n', (5572, 5577), False, 'import cv2\n'), ((217, 232), 'numpy.float32', 'np.float32', (['img'], {}), '(img)\n', (227, 232), True, 'import numpy as np\n'), ((2479, 2558), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['res', 'border[1]', 'border[3]', 'border[0]', 'border[2]', 'borderType'], {}), '(res, border[1], border[3], border[0], border[2], borderType)\n', (2497, 2558), False, 'import cv2\n'), ((4893, 4928), 'cv2.cvtColor', 'cv2.cvtColor', (['z', 'cv2.COLOR_BGR2GRAY'], {}), '(z, cv2.COLOR_BGR2GRAY)\n', (4905, 4928), False, 'import cv2\n'), ((2052, 2065), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (2060, 2065), True, 'import numpy as np\n'), ((2826, 2848), 'numpy.sqrt', 'np.sqrt', (['(sizex * sizey)'], {}), '(sizex * sizey)\n', (2833, 2848), True, 'import numpy as np\n'), ((3302, 3335), 'numpy.sum', 'np.sum', (['(x1[:, :, 0] * x1[:, :, 0])'], {}), '(x1[:, :, 0] * x1[:, :, 0])\n', (3308, 3335), True, 'import numpy as np\n'), ((3332, 3365), 'numpy.sum', 'np.sum', (['(x2[:, :, 0] * x2[:, :, 0])'], {}), '(x2[:, :, 0] * x2[:, :, 0])\n', (3338, 3365), True, 'import numpy as np\n'), ((3471, 3486), 'numpy.sum', 'np.sum', (['(x1 * x1)'], {}), '(x1 * x1)\n', (3477, 3486), True, 'import numpy as np\n'), ((3487, 3502), 'numpy.sum', 'np.sum', (['(x2 * x2)'], {}), '(x2 * x2)\n', (3493, 3502), True, 'import numpy as np\n')] |
import numpy as np
from divHretention import DEFAULT_TIME, compute_c_max, compute_inventory, \
compute_surface_temperature
class Exposition:
"""Object containing information regarding the exposure conditions based
on an input file.
An input file for **ITER** must have the following columns:
"x" (arc length in m),
"Te" (electron temperature in eV),
"Ti" (ion temperature in eV),
"D_temp_atm" (atom temperature eV),
"D_flux_ion" (ion flux in m-2 s-1),
"D_flux_atm" (atom flux in m-2 s-1),
"Wtot" (heat flux in W/m2)
An input file for **WEST** must have the following columns
"s_cell_m" (arc length in m),
"E_imp_ion_eV" (ion energy in eV),
"E_imp_atom_eV" (atom temperature eV),
"alpha_V_ion_deg" (angle of incidence for ions in deg),
"alpha_V_atom_deg" (angle of incidence for atoms in deg),
"flux_inc_ion_m2s1" (ion flux in m-2 s-1),
"flux_inc_atom_m2s1" (atom flux in m-2 s-1),
"net_energy_flux_Wm2" (heat flux in W/m2)
Args:
filename (str): file path
filetype (str): "ITER" or "WEST"
inventory (bool, optional): If True, inventory will be computed on
construction. Defaults to True.
"""
def __init__(self, filename, filetype, inventory=True):
self.filename = filename
self.filetype = filetype
self.arc_length = []
self.E_ion = []
self.E_atom = []
self.ion_flux = []
self.atom_flux = []
self.net_heat_flux = []
self.angles_ions = []
self.angles_atoms = []
self.data = None
self.extract_data()
self.remove_nan_values()
self.compute_surface_temperature()
self.compute_surface_concentration()
if inventory:
self.compute_inventory()
def compute_surface_temperature(self):
"""Computes the surface temperature based on the thermal study
performed in Delaporte-Mathurin et al, SREP 2020
https://www.nature.com/articles/s41598-020-74844-w
"""
self.temperature = compute_surface_temperature(self.net_heat_flux)
def compute_surface_concentration(self):
"""Computes the surface H concentration
"""
self.concentration = compute_c_max(
self.temperature,
self.E_ion,
self.E_atom,
self.angles_ions,
self.angles_atoms,
self.ion_flux,
self.atom_flux)
def compute_inventory(self, time=DEFAULT_TIME):
"""Computes the H inventory and the standard deviation based on
self.temperature, self.concentration and time. The inventory and
standard deviation are stored in the attributes self.inventory and
self.stdev_in.
Args:
time (float, optional): Exposure time (s). Defaults to
DEFAULT_TIME.
"""
# compute inventory as a function of temperature and concentration
self.inventory, self.stdev_inv = compute_inventory(
self.temperature, self.concentration, time=time)
def extract_data(self):
"""Extracts exposure data from a CSV file
"""
if self.filetype not in ["ITER", "WEST"]:
raise ValueError("Unknown filetype")
if self.filetype == "ITER":
self.extract_ITER_data(self.filename)
elif self.filetype == "WEST":
self.extract_WEST_data(self.filename)
def extract_WEST_data(self, filename):
self.data = np.genfromtxt(filename, delimiter=";", names=True)
arc_length_0 = 0.6 # this is the assumed beggining of the target
self.arc_length = self.data["s_cell_m"] - arc_length_0
self.E_ion = self.data["E_imp_ion_eV"]
self.E_atom = self.data["E_imp_atom_eV"]
self.angles_ions = self.data["alpha_V_ion_deg"]
self.angles_atoms = self.data["alpha_V_atom_deg"]
self.ion_flux = self.data["flux_inc_ion_m2s1"]
self.atom_flux = self.data["flux_inc_atom_m2s1"]
self.net_heat_flux = self.data["net_energy_flux_Wm2"]
def extract_ITER_data(self, filename):
self.data = np.genfromtxt(filename, delimiter=",", names=True)
self.arc_length = self.data["x"]
self.E_ion = 3*self.data["Te"] + 2*self.data["Ti"]
self.E_atom = self.data["D_temp_atm"]
# angles not given
default_angle_ion = 60
default_angle_atom = 45
self.angles_ions = np.ones(self.arc_length.shape)*default_angle_ion
self.angles_atoms = np.ones(self.arc_length.shape)*default_angle_atom
self.ion_flux = self.data["D_flux_ion"]
self.atom_flux = self.data["D_flux_atm"]
self.net_heat_flux = self.data["Wtot"]
def remove_nan_values(self):
# remove NaN in angles
default_angle_ion = 60
default_angle_atom = 45
np.nan_to_num(self.angles_ions, copy=False, nan=default_angle_ion)
np.nan_to_num(self.angles_atoms, copy=False, nan=default_angle_atom)
# remove Nan in energy
default_energy = 0.0
np.nan_to_num(self.E_ion, copy=False, nan=default_energy)
np.nan_to_num(self.E_atom, copy=False, nan=default_energy)
if __name__ == "__main__":
pass
| [
"numpy.nan_to_num",
"divHretention.compute_c_max",
"numpy.genfromtxt",
"numpy.ones",
"divHretention.compute_inventory",
"divHretention.compute_surface_temperature"
] | [((2094, 2141), 'divHretention.compute_surface_temperature', 'compute_surface_temperature', (['self.net_heat_flux'], {}), '(self.net_heat_flux)\n', (2121, 2141), False, 'from divHretention import DEFAULT_TIME, compute_c_max, compute_inventory, compute_surface_temperature\n'), ((2277, 2405), 'divHretention.compute_c_max', 'compute_c_max', (['self.temperature', 'self.E_ion', 'self.E_atom', 'self.angles_ions', 'self.angles_atoms', 'self.ion_flux', 'self.atom_flux'], {}), '(self.temperature, self.E_ion, self.E_atom, self.angles_ions,\n self.angles_atoms, self.ion_flux, self.atom_flux)\n', (2290, 2405), False, 'from divHretention import DEFAULT_TIME, compute_c_max, compute_inventory, compute_surface_temperature\n'), ((3023, 3089), 'divHretention.compute_inventory', 'compute_inventory', (['self.temperature', 'self.concentration'], {'time': 'time'}), '(self.temperature, self.concentration, time=time)\n', (3040, 3089), False, 'from divHretention import DEFAULT_TIME, compute_c_max, compute_inventory, compute_surface_temperature\n'), ((3531, 3581), 'numpy.genfromtxt', 'np.genfromtxt', (['filename'], {'delimiter': '""";"""', 'names': '(True)'}), "(filename, delimiter=';', names=True)\n", (3544, 3581), True, 'import numpy as np\n'), ((4169, 4219), 'numpy.genfromtxt', 'np.genfromtxt', (['filename'], {'delimiter': '""","""', 'names': '(True)'}), "(filename, delimiter=',', names=True)\n", (4182, 4219), True, 'import numpy as np\n'), ((4892, 4958), 'numpy.nan_to_num', 'np.nan_to_num', (['self.angles_ions'], {'copy': '(False)', 'nan': 'default_angle_ion'}), '(self.angles_ions, copy=False, nan=default_angle_ion)\n', (4905, 4958), True, 'import numpy as np\n'), ((4967, 5035), 'numpy.nan_to_num', 'np.nan_to_num', (['self.angles_atoms'], {'copy': '(False)', 'nan': 'default_angle_atom'}), '(self.angles_atoms, copy=False, nan=default_angle_atom)\n', (4980, 5035), True, 'import numpy as np\n'), ((5105, 5162), 'numpy.nan_to_num', 'np.nan_to_num', (['self.E_ion'], {'copy': '(False)', 'nan': 'default_energy'}), '(self.E_ion, copy=False, nan=default_energy)\n', (5118, 5162), True, 'import numpy as np\n'), ((5171, 5229), 'numpy.nan_to_num', 'np.nan_to_num', (['self.E_atom'], {'copy': '(False)', 'nan': 'default_energy'}), '(self.E_atom, copy=False, nan=default_energy)\n', (5184, 5229), True, 'import numpy as np\n'), ((4484, 4514), 'numpy.ones', 'np.ones', (['self.arc_length.shape'], {}), '(self.arc_length.shape)\n', (4491, 4514), True, 'import numpy as np\n'), ((4561, 4591), 'numpy.ones', 'np.ones', (['self.arc_length.shape'], {}), '(self.arc_length.shape)\n', (4568, 4591), True, 'import numpy as np\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
from minihack.tiles import glyph2tile, MAXOTHTILE
from nle.nethack import MAX_GLYPH
import numpy as np
import pkg_resources
import pickle
import os
class GlyphMapper:
"""This class is used to map glyphs to rgb pixels."""
def __init__(self):
self.tiles = self.load_tiles()
def load_tiles(self):
"""This function expects that tile.npy already exists.
If it doesn't, call make_tiles.py in win/
"""
tile_rgb_path = os.path.join(
pkg_resources.resource_filename("minihack", "tiles"),
"tiles.pkl",
)
return pickle.load(open(tile_rgb_path, "rb"))
def glyph_id_to_rgb(self, glyph_id):
tile_id = glyph2tile[glyph_id]
assert 0 <= tile_id <= MAXOTHTILE
return self.tiles[tile_id]
def _glyph_to_rgb(self, glyphs):
# Expects glhyphs as two-dimensional numpy ndarray
cols = None
col = None
for i in range(glyphs.shape[1]):
for j in range(glyphs.shape[0]):
rgb = self.glyph_id_to_rgb(glyphs[j, i])
if col is None:
col = rgb
else:
col = np.concatenate((col, rgb))
if cols is None:
cols = col
else:
cols = np.concatenate((cols, col), axis=1)
col = None
return cols
def to_rgb(self, glyphs):
return self._glyph_to_rgb(glyphs)
| [
"numpy.concatenate",
"pkg_resources.resource_filename"
] | [((546, 598), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""minihack"""', '"""tiles"""'], {}), "('minihack', 'tiles')\n", (577, 598), False, 'import pkg_resources\n'), ((1363, 1398), 'numpy.concatenate', 'np.concatenate', (['(cols, col)'], {'axis': '(1)'}), '((cols, col), axis=1)\n', (1377, 1398), True, 'import numpy as np\n'), ((1238, 1264), 'numpy.concatenate', 'np.concatenate', (['(col, rgb)'], {}), '((col, rgb))\n', (1252, 1264), True, 'import numpy as np\n')] |
#!/usr/bin/python
import sys
# import nwb
from nwb import nwb_file
from nwb import nwb_utils as utils
import numpy as np
import h5py
from sys import version_info
import re
import numpy as np
# test creation of arrays with different number of dimensions.
# This test made many to ensure consistency between data files
# created in matlab and python, since matlab stores arrays in
# column major order and python in row major order. The
# nwb file created by the matlab version of this test should
# match that created by the python version (e.g. this test).
def create_nwb_file():
if __file__.startswith("./"):
fname = "s" + __file__[3:-3] + ".nwb"
else:
fname = "s" + __file__[1:-3] + ".nwb"
settings = {}
settings["file_name"] = fname
settings["identifier"] = utils.create_identifier("String test")
settings["mode"] = "w"
settings["start_time"] = "Sat Jul 04 2015 3:14:16"
settings["description"] = "Test array layout storage"
settings["verbosity"] = "none"
f = nwb_file.open(**settings)
return (f, fname)
# special h5py type for variable length strings
# var_len_dt = h5py.special_dtype(vlen=unicode)
# tuples of: name, value
# make all of them int32 using numpy
array_examples = [
("oneD_range5", np.array([0, 1, 2, 3, 4], dtype='int32')),
("twoD_2rows_3cols", np.array([[ 0, 1, 2], [3, 4, 5]], dtype='int32')),
("threeD_2x2x3", np.array([[[1,2,3],[2,3,4]],[[3,4,5],[4,5,6]]], dtype='int32'))
]
def display_examples():
global array_examples
for example in array_examples:
name, val = example
print ("%s\t%s"% (name, val))
def vals_match(a, b):
match = a == b
if not isinstance(match, bool):
match = match.all()
return match
def values_match(expected, found):
match = vals_match(expected, found)
if not match and version_info[0] > 2:
# try matching after converting bytes to unicode (python 3 strings)
# in python 3, default string type is unicode, but these are stored as
# ascii bytes if possible in the hdf5 file, and read back as bytes
# for match to work, they must be converted back to unicode strings
match = vals_match(expected, make_str(found))
return match
def test_array_layout():
global array_examples
f, fname = create_nwb_file()
ang = f.make_group("analysis")
stg = ang.make_custom_group("arrays")
for example in array_examples:
name, val = example
# print("Setting %s attribute" % name)
# prepend 'ga_' on attribute name stored in group
ga_name = "ga_%s" % name
stg.set_attr(ga_name, val)
# print("Setting %s dataset" % name)
# also save attribute with same name
# prepend 'da_' on attribute name stored with dataset
da_name = "da_%s" % name
stg.set_custom_dataset(name, val, attrs={da_name: val})
f.close()
# now read created file and verify values match
f = h5py.File(fname, "r")
stg = f["analysis/arrays"]
errors = []
for example in array_examples:
name, val = example
# check attribute value
# prepend 'ga_' on attribute name stored in group
ga_name = "ga_%s" % name
aval = stg.attrs[ga_name]
if not values_match(val, aval):
error = "attribute %s, expected='%s' (type %s) \nfound='%s' (type %s)" % (
ga_name, val, type(val), aval, type(aval))
errors.append(error)
# check dataset value
dval = stg[name].value
if not values_match(val, dval):
error = "dataset %s, expected='%s' (type %s) \nfound='%s' (type %s)" % (
name, val, type(val), aval, type(aval))
errors.append(error)
# check dataset attribute value
# prepend 'da_' on attribute name stored with dataset
da_name = "da_%s" % name
dsaval = stg[name].attrs[da_name]
if not values_match(val, dsaval):
error = "dataset %s, expected='%s' (type %s) \nfound='%s' (type %s)" % (
da_name, val, type(val), aval, type(aval))
errors.append(error)
f.close()
if len(errors) > 0:
sys.exit("Errors found:\n%s" % "\n".join(errors))
print("%s PASSED" % __file__)
# display_examples()
test_array_layout()
| [
"nwb.nwb_file.open",
"h5py.File",
"numpy.array",
"nwb.nwb_utils.create_identifier"
] | [((802, 840), 'nwb.nwb_utils.create_identifier', 'utils.create_identifier', (['"""String test"""'], {}), "('String test')\n", (825, 840), True, 'from nwb import nwb_utils as utils\n'), ((1024, 1049), 'nwb.nwb_file.open', 'nwb_file.open', ([], {}), '(**settings)\n', (1037, 1049), False, 'from nwb import nwb_file\n'), ((2982, 3003), 'h5py.File', 'h5py.File', (['fname', '"""r"""'], {}), "(fname, 'r')\n", (2991, 3003), False, 'import h5py\n'), ((1271, 1311), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4]'], {'dtype': '"""int32"""'}), "([0, 1, 2, 3, 4], dtype='int32')\n", (1279, 1311), True, 'import numpy as np\n'), ((1339, 1386), 'numpy.array', 'np.array', (['[[0, 1, 2], [3, 4, 5]]'], {'dtype': '"""int32"""'}), "([[0, 1, 2], [3, 4, 5]], dtype='int32')\n", (1347, 1386), True, 'import numpy as np\n'), ((1411, 1484), 'numpy.array', 'np.array', (['[[[1, 2, 3], [2, 3, 4]], [[3, 4, 5], [4, 5, 6]]]'], {'dtype': '"""int32"""'}), "([[[1, 2, 3], [2, 3, 4]], [[3, 4, 5], [4, 5, 6]]], dtype='int32')\n", (1419, 1484), True, 'import numpy as np\n')] |
"""
Class and functions for scene node
"""
from enum import Enum, unique
import numpy as np
from ..math.matrix import Matrix44
from ..math.transform import rotation_btw_vectors
from ..geometry.colour import Colour
from ..geometry.primitive import create_sphere, create_plane, create_cuboid
from ..geometry.mesh import BoundingBox
from ..util.misc import Attributes
from ...config import settings
class Node:
"""Creates Node object.
:param mesh: mesh to add to node
:type mesh: Union[Mesh, None]
"""
@unique
class RenderMode(Enum):
Solid = 'Solid'
Wireframe = 'Wireframe'
Transparent = 'Transparent'
Outline = 'Outline'
@unique
class RenderPrimitive(Enum):
Lines = 'Lines'
Triangles = 'Triangles'
def __init__(self, mesh=None):
if mesh is None:
self._vertices = np.array([])
self.indices = np.array([])
self.normals = np.array([])
self._bounding_box = None
self._colour = Colour.black()
else:
self._vertices = mesh.vertices
self.indices = mesh.indices
self.normals = mesh.normals
self._bounding_box = mesh.bounding_box
self._colour = mesh.colour
self._render_mode = Node.RenderMode.Solid
self.render_primitive = Node.RenderPrimitive.Triangles
self.transform = Matrix44.identity()
self.parent = None
self._visible = True
self.selected = False
self.children = []
def copy(self, transform=None):
"""Creates shallow copy of node with unique transformation matrix
:param transform: transformation matrix
:type transform: Union[Matrix44, None]
:return: shallow copy of node
:rtype: Node
"""
node = Node()
node._vertices = self._vertices
node.indices = self.indices
node.normals = self.normals
node.bounding_box = self.bounding_box
node._colour = self._colour
node._render_mode = self._render_mode
node.render_primitive = self.render_primitive
node.transform = self.transform if transform is None else transform
node.parent = self.parent
node._visible = self._visible
node.selected = self.selected
node.children = self.children
return node
@property
def vertices(self):
return self._vertices
@vertices.setter
def vertices(self, value):
"""Updates the bounding box of the node when vertices are changed
:param value: N x 3 array of vertices
:type value: numpy.ndarray
"""
self._vertices = value
max_pos, min_pos = BoundingBox.fromPoints(self._vertices).bounds
for node in self.children:
max_pos = np.maximum(node.bounding_box.max, max_pos)
min_pos = np.minimum(node.bounding_box.min, min_pos)
self.bounding_box = BoundingBox(max_pos, min_pos)
@property
def colour(self):
if self._colour is None and self.parent:
return self.parent.colour
return self._colour
@colour.setter
def colour(self, value):
self._colour = value
@property
def visible(self):
if self._visible is None and self.parent:
return self.parent.visible
return self._visible
@visible.setter
def visible(self, value):
self._visible = value
@property
def render_mode(self):
if self._render_mode is None and self.parent:
return self.parent.render_mode
return self._render_mode
@render_mode.setter
def render_mode(self, value):
self._render_mode = value
def isEmpty(self):
"""Checks if Node is empty
:return: indicates node is empty
:rtype: bool
"""
if not self.children and len(self.vertices) == 0:
return True
return False
def addChild(self, child_node):
"""Adds child to the node and recomputes the bounding box to include child
:param child_node: child node to add
:type child_node: Node
"""
if child_node.isEmpty():
return
child_node.parent = self
self.children.append(child_node)
max_pos, min_pos = child_node.bounding_box.bounds
if self.bounding_box is not None:
max_pos = np.maximum(self.bounding_box.max, max_pos)
min_pos = np.minimum(self.bounding_box.min, min_pos)
self.bounding_box = BoundingBox(max_pos, min_pos)
def translate(self, offset):
"""Translates node
:param offset: 3 x 1 array of offsets for X, Y and Z axis
:type offset: Union[numpy.ndarray, sscanss.core.scene.Vector3]
"""
if self.isEmpty():
return
self.transform @= Matrix44.fromTranslation(offset)
def flatten(self):
"""Recursively flattens the tree formed by nested nodes
:return: flattened node
:rtype: Node
"""
new_node = Node()
new_node.bounding_box = self.bounding_box
for node in self.children:
if node.children:
new_node.children.extend(node.flatten().children)
elif not node.isEmpty():
node.parent = None
new_node.children.append(node)
if len(self.vertices) != 0:
parent = self.copy()
parent.vertices = self.vertices
new_node.children.append(parent)
return new_node
@property
def bounding_box(self):
return None if self._bounding_box is None else self._bounding_box.transform(self.transform)
@bounding_box.setter
def bounding_box(self, value):
self._bounding_box = value
def create_sample_node(samples, render_mode=Node.RenderMode.Solid):
"""Creates node for samples
:param samples: sample mesh
:type samples: Dict[str, Mesh]
:param render_mode: render mode
:type render_mode: Node.RenderMode
:return: node containing sample
:rtype: Node
"""
sample_node = Node()
sample_node.colour = Colour(*settings.value(settings.Key.Sample_Colour))
sample_node.render_mode = render_mode
for sample_mesh in samples.values():
child = Node(sample_mesh)
child.colour = None
child.render_mode = None
sample_node.addChild(child)
return sample_node
def create_fiducial_node(fiducials, visible=True):
"""Creates node for fiducial points
:param fiducials: fiducial points
:type fiducials: numpy.recarray
:param visible: indicates node is visible
:type visible: bool
:return: node containing fiducial points
:rtype: Node
"""
fiducial_node = Node()
fiducial_node.visible = visible
fiducial_node.render_mode = Node.RenderMode.Solid
enabled_colour = Colour(*settings.value(settings.Key.Fiducial_Colour))
disabled_colour = Colour(*settings.value(settings.Key.Fiducial_Disabled_Colour))
size = settings.value(settings.Key.Fiducial_Size)
for point, enabled in fiducials:
fiducial_mesh = create_sphere(size)
fiducial_mesh.translate(point)
child = Node(fiducial_mesh)
child.colour = enabled_colour if enabled else disabled_colour
child.render_mode = None
child.visible = None
fiducial_node.addChild(child)
return fiducial_node
def create_measurement_point_node(points, visible=True):
"""Creates node for measurement points
:param points: measurement points
:type points: numpy.recarray
:param visible: indicates node is visible
:type visible: bool
:return: node containing measurement points
:rtype: Node
"""
measurement_point_node = Node()
measurement_point_node.visible = visible
measurement_point_node.render_mode = Node.RenderMode.Solid
enabled_colour = Colour(*settings.value(settings.Key.Measurement_Colour))
disabled_colour = Colour(*settings.value(settings.Key.Measurement_Disabled_Colour))
size = settings.value(settings.Key.Measurement_Size)
for point, enabled in points:
x, y, z = point
child = Node()
child.vertices = np.array([[x - size, y, z],
[x + size, y, z],
[x, y - size, z],
[x, y + size, z],
[x, y, z - size],
[x, y, z + size]])
child.indices = np.array([0, 1, 2, 3, 4, 5])
child.colour = enabled_colour if enabled else disabled_colour
child.render_mode = None
child.visible = None
child.render_primitive = Node.RenderPrimitive.Lines
measurement_point_node.addChild(child)
return measurement_point_node
def create_measurement_vector_node(points, vectors, alignment, visible=True):
"""Creates node for measurement vectors
:param points: measurement points
:type points: numpy.recarray
:param vectors: measurement vectors
:type vectors: numpy.ndarray
:param alignment: vector alignment
:type alignment: int
:param visible: indicates node is visible
:type visible: bool
:return: node containing measurement vectors
:rtype: Node
"""
measurement_vector_node = Node()
measurement_vector_node.visible = visible
measurement_vector_node.render_mode = Node.RenderMode.Solid
if vectors.shape[0] == 0:
return measurement_vector_node
alignment = 0 if alignment >= vectors.shape[2] else alignment
size = settings.value(settings.Key.Vector_Size)
colours = [Colour(*settings.value(settings.Key.Vector_1_Colour)),
Colour(*settings.value(settings.Key.Vector_2_Colour))]
for k in range(vectors.shape[2]):
start_point = points.points
for j in range(0, vectors.shape[1]//3):
end_point = start_point + size * vectors[:, j*3:j*3+3, k]
vertices = np.column_stack((start_point, end_point)).reshape(-1, 3)
child = Node()
child.vertices = vertices
child.indices = np.arange(vertices.shape[0])
if j < 2:
child.colour = colours[j]
else:
np.random.seed(j)
child.colour = Colour(*np.random.random(3))
child.render_mode = None
child.visible = alignment == k
child.render_primitive = Node.RenderPrimitive.Lines
measurement_vector_node.addChild(child)
return measurement_vector_node
def create_plane_node(plane, width, height):
"""Creates node for cross-sectional plane
:param plane: plane normal and point
:type plane: Plane
:param width: plane width
:type width: float
:param height: plane height
:type height: float
:return: node containing plane
:rtype: Node
"""
plane_mesh = create_plane(plane, width, height)
node = Node(plane_mesh)
node.render_mode = Node.RenderMode.Transparent
node.colour = Colour(*settings.value(settings.Key.Cross_Sectional_Plane_Colour))
return node
def create_beam_node(instrument, bounds, visible=False):
"""Creates node for beam
:param instrument: instrument object
:type instrument: Instrument
:param bounds: bounding box of the instrument scene
:type bounds: BoundingBox
:param visible: indicates node is visible
:type visible: bool
:return: node containing beam
:rtype: Node
"""
node = Node()
node.render_mode = Node.RenderMode.Solid
node.colour = Colour(0.80, 0.45, 0.45)
node.visible = visible
jaws = instrument.jaws
detectors = instrument.detectors
q_vectors = instrument.q_vectors
gauge_volume = instrument.gauge_volume
width, height = jaws.aperture
beam_source = jaws.beam_source
beam_direction = jaws.beam_direction
cuboid_axis = np.array([0., 1., 0.])
bound_max = np.dot(bounds.max - beam_source, beam_direction)
bound_min = np.dot(bounds.min - beam_source, beam_direction)
depth = max(bound_min, bound_max)
mesh = create_cuboid(width, height, depth)
m = Matrix44.fromTranslation(beam_source)
m[0:3, 0:3] = rotation_btw_vectors(beam_direction, cuboid_axis)
m = m @ Matrix44.fromTranslation([0., -depth/2, 0.])
mesh.transform(m)
if instrument.beam_in_gauge_volume:
for index, detector in enumerate(detectors.values()):
if detector.current_collimator is None:
continue
bound_max = np.dot(bounds.max - gauge_volume, detector.diffracted_beam)
bound_min = np.dot(bounds.min - gauge_volume, detector.diffracted_beam)
depth = max(bound_min, bound_max)
sub_mesh = create_cuboid(width, height, depth)
m = Matrix44.fromTranslation(gauge_volume)
m[0:3, 0:3] = rotation_btw_vectors(cuboid_axis, detector.diffracted_beam)
m = m @ Matrix44.fromTranslation([0., depth/2, 0.])
mesh.append(sub_mesh.transformed(m))
# draw q_vector
end_point = gauge_volume + q_vectors[index] * depth/2
vertices = np.array((gauge_volume, end_point))
child = Node()
child.vertices = vertices
child.indices = np.arange(vertices.shape[0])
child.colour = Colour(0.60, 0.25, 0.25)
child.render_primitive = Node.RenderPrimitive.Lines
node.addChild(child)
node.vertices = mesh.vertices
node.indices = mesh.indices
node.normals = mesh.normals
return node
def create_instrument_node(instrument, return_ids=False):
"""Creates node for a given instrument.
:param instrument: instrument
:type instrument: Instrument
:param return_ids: flag indicating ids are required
:type return_ids: bool
:return: 3D model of instrument and dict to identify nodes
:rtype: Tuple[Node, Dict[str, int]]
"""
node = Node()
count = 0
model = instrument.positioning_stack.model()
count += len(model.flatten().children)
cache = {Attributes.Positioner.value: count}
node.addChild(model)
for detector in instrument.detectors.values():
model = detector.model()
count += len(model.flatten().children)
cache[f'{Attributes.Detector.value}_{detector.name}'] = count
node.addChild(model)
model = instrument.jaws.model()
count += len(model.flatten().children)
cache[Attributes.Jaws.value] = count
node.addChild(model)
for name, model in instrument.fixed_hardware.items():
count += 1
cache[f'{Attributes.Fixture.value}_{name}'] = count
node.addChild(Node(model))
if return_ids:
return node.flatten(), cache
return node.flatten()
| [
"numpy.minimum",
"numpy.maximum",
"numpy.random.seed",
"numpy.random.random",
"numpy.arange",
"numpy.array",
"numpy.column_stack",
"numpy.dot"
] | [((11974, 11999), 'numpy.array', 'np.array', (['[0.0, 1.0, 0.0]'], {}), '([0.0, 1.0, 0.0])\n', (11982, 11999), True, 'import numpy as np\n'), ((12014, 12062), 'numpy.dot', 'np.dot', (['(bounds.max - beam_source)', 'beam_direction'], {}), '(bounds.max - beam_source, beam_direction)\n', (12020, 12062), True, 'import numpy as np\n'), ((12079, 12127), 'numpy.dot', 'np.dot', (['(bounds.min - beam_source)', 'beam_direction'], {}), '(bounds.min - beam_source, beam_direction)\n', (12085, 12127), True, 'import numpy as np\n'), ((8247, 8369), 'numpy.array', 'np.array', (['[[x - size, y, z], [x + size, y, z], [x, y - size, z], [x, y + size, z], [x,\n y, z - size], [x, y, z + size]]'], {}), '([[x - size, y, z], [x + size, y, z], [x, y - size, z], [x, y +\n size, z], [x, y, z - size], [x, y, z + size]])\n', (8255, 8369), True, 'import numpy as np\n'), ((8566, 8594), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5]'], {}), '([0, 1, 2, 3, 4, 5])\n', (8574, 8594), True, 'import numpy as np\n'), ((870, 882), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (878, 882), True, 'import numpy as np\n'), ((910, 922), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (918, 922), True, 'import numpy as np\n'), ((950, 962), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (958, 962), True, 'import numpy as np\n'), ((2832, 2874), 'numpy.maximum', 'np.maximum', (['node.bounding_box.max', 'max_pos'], {}), '(node.bounding_box.max, max_pos)\n', (2842, 2874), True, 'import numpy as np\n'), ((2897, 2939), 'numpy.minimum', 'np.minimum', (['node.bounding_box.min', 'min_pos'], {}), '(node.bounding_box.min, min_pos)\n', (2907, 2939), True, 'import numpy as np\n'), ((4429, 4471), 'numpy.maximum', 'np.maximum', (['self.bounding_box.max', 'max_pos'], {}), '(self.bounding_box.max, max_pos)\n', (4439, 4471), True, 'import numpy as np\n'), ((4494, 4536), 'numpy.minimum', 'np.minimum', (['self.bounding_box.min', 'min_pos'], {}), '(self.bounding_box.min, min_pos)\n', (4504, 4536), True, 'import numpy as np\n'), ((10190, 10218), 'numpy.arange', 'np.arange', (['vertices.shape[0]'], {}), '(vertices.shape[0])\n', (10199, 10218), True, 'import numpy as np\n'), ((12611, 12670), 'numpy.dot', 'np.dot', (['(bounds.max - gauge_volume)', 'detector.diffracted_beam'], {}), '(bounds.max - gauge_volume, detector.diffracted_beam)\n', (12617, 12670), True, 'import numpy as np\n'), ((12695, 12754), 'numpy.dot', 'np.dot', (['(bounds.min - gauge_volume)', 'detector.diffracted_beam'], {}), '(bounds.min - gauge_volume, detector.diffracted_beam)\n', (12701, 12754), True, 'import numpy as np\n'), ((13232, 13267), 'numpy.array', 'np.array', (['(gauge_volume, end_point)'], {}), '((gauge_volume, end_point))\n', (13240, 13267), True, 'import numpy as np\n'), ((13362, 13390), 'numpy.arange', 'np.arange', (['vertices.shape[0]'], {}), '(vertices.shape[0])\n', (13371, 13390), True, 'import numpy as np\n'), ((10317, 10334), 'numpy.random.seed', 'np.random.seed', (['j'], {}), '(j)\n', (10331, 10334), True, 'import numpy as np\n'), ((10039, 10080), 'numpy.column_stack', 'np.column_stack', (['(start_point, end_point)'], {}), '((start_point, end_point))\n', (10054, 10080), True, 'import numpy as np\n'), ((10374, 10393), 'numpy.random.random', 'np.random.random', (['(3)'], {}), '(3)\n', (10390, 10393), True, 'import numpy as np\n')] |
import numpy as np
from scipy import spatial
class FormationControl():
""" Formation control primitive using region based shape control.
Coded by: <NAME>, Date: 18/9/2019
"""
def __init__(self):
# Initialise the parameters
return None
def get_vel(self, j, curr_pos, min_dis, centroid_pos, alpha, gamma,
path_vel, vel_max, a, b, knn, formation_type):
# Calculate pairwise distance
curr_loc = curr_pos[j, :]
if len(curr_pos) < 6:
knn = len(curr_pos)
peers_pos = curr_pos[spatial.KDTree(curr_pos).query(curr_loc, k=knn
)[1], :]
# Calculate the velocity of each neighboor particle
k = 1 / len(peers_pos) # constant
g_lij = (min_dis**2) - np.linalg.norm(
curr_loc - peers_pos, axis=1, ord=2)
del_g_ij = 2 * (peers_pos - curr_loc)
temp = np.maximum(0, g_lij / (min_dis**2))**2
P_ij = k * np.dot(temp, del_g_ij)
temp = (curr_loc - centroid_pos) / np.array([a, b])
f_g_ij = np.linalg.norm(temp, ord=2) - 1
# Calculate path velocity
kl = 1 # constant
del_f_g_ij = 1 * (curr_loc - centroid_pos)
del_zeta_ij = (kl * max(0, f_g_ij)) * del_f_g_ij
vel = path_vel - (alpha * del_zeta_ij) - (gamma * P_ij)
if vel_max is not None:
vel[0] = self.getFeasibleSpeed(vel[0], vel_max)
vel[1] = self.getFeasibleSpeed(vel[1], vel_max)
return vel
def getFeasibleSpeed(self, vel, vel_max):
"""This function limit the velocity returned
by get_vel function for the stability
Parameters
----------
vel : float
Calculated velocity
vel_max : float
Maximum allowed velocity
"""
if vel > 0:
vel = min(vel_max, vel)
else:
vel = max(-vel_max, vel)
return vel
def execute(self, vehicles, next_pos, centroid_pos, dt, formation_type):
"""Get the position of the formation control
Parameters
----------
vehicles : list
A list containing UAV or UGV class
centroid_pos : array
An array containing the x, y, and z position
dt : float
Time step to be used for distance calculation
"""
# Parameters
vel_max = 100
a = 3
b = 3
knn = 6
vmax = vehicles[0].speed
alpha = 1
gamma = 1
min_dis = 2
all_drones_pose = np.zeros((len(vehicles), 2))
for i, vehicle in enumerate(vehicles):
all_drones_pose[i] = vehicle.current_pos[0:2]
vel_combined = []
for j, vehicle in enumerate(vehicles):
path = np.array([next_pos[0], next_pos[1]]) - centroid_pos
path_vel = (1 / dt) * path
vel = self.get_vel(j, all_drones_pose, min_dis, centroid_pos,
alpha, gamma, path_vel, vel_max, a, b, knn,
formation_type)
# Normalize the velocity
if np.linalg.norm(vel) > vmax:
vel = (vmax / np.linalg.norm(vel)) * vel
vel_combined.append(vel)
# New position
new_pos = np.zeros(3)
new_pos[0:2] = vehicle.current_pos[0:2] + vel * dt
# Update position
if vehicle.type == 'uav':
new_pos[2] = 12.0
vehicle.updated_pos = new_pos
else:
new_pos[2] = 1.5
vehicle.updated_pos = new_pos
vel_combined = np.linalg.norm(np.array(vel_combined), axis=1)
if np.max(vel_combined) < 0.015 * len(all_drones_pose):
formation_done = True
else:
formation_done = False
return vehicles, formation_done
| [
"numpy.maximum",
"numpy.zeros",
"numpy.max",
"numpy.array",
"numpy.linalg.norm",
"scipy.spatial.KDTree",
"numpy.dot"
] | [((820, 871), 'numpy.linalg.norm', 'np.linalg.norm', (['(curr_loc - peers_pos)'], {'axis': '(1)', 'ord': '(2)'}), '(curr_loc - peers_pos, axis=1, ord=2)\n', (834, 871), True, 'import numpy as np\n'), ((946, 981), 'numpy.maximum', 'np.maximum', (['(0)', '(g_lij / min_dis ** 2)'], {}), '(0, g_lij / min_dis ** 2)\n', (956, 981), True, 'import numpy as np\n'), ((1004, 1026), 'numpy.dot', 'np.dot', (['temp', 'del_g_ij'], {}), '(temp, del_g_ij)\n', (1010, 1026), True, 'import numpy as np\n'), ((1071, 1087), 'numpy.array', 'np.array', (['[a, b]'], {}), '([a, b])\n', (1079, 1087), True, 'import numpy as np\n'), ((1105, 1132), 'numpy.linalg.norm', 'np.linalg.norm', (['temp'], {'ord': '(2)'}), '(temp, ord=2)\n', (1119, 1132), True, 'import numpy as np\n'), ((3339, 3350), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (3347, 3350), True, 'import numpy as np\n'), ((3699, 3721), 'numpy.array', 'np.array', (['vel_combined'], {}), '(vel_combined)\n', (3707, 3721), True, 'import numpy as np\n'), ((3743, 3763), 'numpy.max', 'np.max', (['vel_combined'], {}), '(vel_combined)\n', (3749, 3763), True, 'import numpy as np\n'), ((2828, 2864), 'numpy.array', 'np.array', (['[next_pos[0], next_pos[1]]'], {}), '([next_pos[0], next_pos[1]])\n', (2836, 2864), True, 'import numpy as np\n'), ((3167, 3186), 'numpy.linalg.norm', 'np.linalg.norm', (['vel'], {}), '(vel)\n', (3181, 3186), True, 'import numpy as np\n'), ((3225, 3244), 'numpy.linalg.norm', 'np.linalg.norm', (['vel'], {}), '(vel)\n', (3239, 3244), True, 'import numpy as np\n'), ((569, 593), 'scipy.spatial.KDTree', 'spatial.KDTree', (['curr_pos'], {}), '(curr_pos)\n', (583, 593), False, 'from scipy import spatial\n')] |
import os
import sys
import json
import datetime
import numpy as np
import skimage.draw
from pathlib import Path
import skimage.io
import tensorflow as tf
# os.environ["TF_MIN_GPU_MULTIPROCESSOR_COUNT"] = "4"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
with open('../../settings.json') as f:
setting = json.load(f)
# Root directory of the project
ROOT_DIR = os.getcwd()
# Import Mask RCNN
sys.path.append(ROOT_DIR)
DEFAULT_LOGS_DIR = os.path.join('../../', setting['LOGS_DIR'])
from mrcnn.config import Config
from mrcnn import utils as utils
from mrcnn import model as modellib
from imgaug import augmenters as iaa
############################################################
# Configurations
############################################################
IMGSIZE = (1024, 1024)
class AdrivingConfig(Config):
"""Configuration for training on the toy dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "Adriving"
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
GPU_COUNT = 1
IMAGES_PER_GPU = 1
# Number of training steps per epoch
STEPS_PER_EPOCH = 10
VALIDATION_STEPS = 1
# Number of classes (including background)
NUM_CLASSES = 1 + 7 # Background + baloon
MEAN_PIXEL = np.array([88.59672608, 95.91837699, 98.90089033])
RPN_NMS_THRESHOLD = 0.6
TRAIN_ROIS_PER_IMAGE = 600
RPN_TRAIN_ANCHORS_PER_IMAGE = 320
MAX_GT_INSTANCES = 80
POST_NMS_ROIS_TRAINING = 4000
POST_NMS_ROIS_INFERENCE = 2000
DETECTION_NMS_THRESHOLD = 0.3
DETECTION_MIN_CONFIDENCE = 0.5
DETECTION_MAX_INSTANCES = 100
IMAGE_MIN_DIM = IMGSIZE[0]
IMAGE_MAX_DIM = IMGSIZE[1]
IMAGE_RESIZE_MODE = "none"
MASK_SHAPE = [28, 28]
OPTIMIZER = 'SGD'
LEARNING_RATE = 1e-6
EPSILON = 1e-6
GRADIENT_CLIP_NORM = 5
ACCUM_ITERS = 1
############################################################
# Dataset
############################################################
from adriving_util import *
def train(model):
"""Train the model."""
# Training dataset.
augmentation = iaa.SomeOf((0, 2), [
iaa.Fliplr(0.5),
iaa.Multiply((0.9, 1.1))
# iaa.GaussianBlur(sigma=(0.0, 1.0))
])
dataset_train = AdrivingDatasetNoResize()
dataset_train.load_adriving(data_dir, "train", size = IMGSIZE)
dataset_train.prepare()
print(len(dataset_train.image_ids))
# Validation dataset
dataset_val = AdrivingDatasetNoResize()
dataset_val.load_adriving(data_dir, "val", size = IMGSIZE)
dataset_val.prepare()
# *** This training schedule is an example. Update to your needs ***
# Since we're using a very small dataset, and starting from
# COCO trained weights, we don't need to train too long. Also,
# no need to train all layers, just the heads should do it.
print("Training network heads")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
augmentation=augmentation,
epochs=40,
layers='heads')
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
augmentation=augmentation,
epochs=50,
layers='4+')
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE/10,
augmentation=augmentation,
epochs=100,
layers='3+')
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE/10,
augmentation=augmentation,
epochs=100,
layers='all')
# --------------------------------------------------------------------------
############################################################
# Training
############################################################
if __name__ == '__main__':
config = AdrivingConfig()
config.display()
DEVICE = "/gpu:0" # /cpu:0 or /gpu:0
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=os.path.join(DEFAULT_LOGS_DIR, 'mask_rcnn'))
COCO_WEIGHTS_PATH = os.path.join('../../', setting['MODEL_CHECKPOINT_DIR'], 'mask_rcnn_coco.h5')
weights_path = COCO_WEIGHTS_PATH
model.load_weights(weights_path, by_name=True, exclude=[
"mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
# weights_path = model.find_last()[1]
# weights_path = 'weights/run1/mask_rcnn_adriving_aug_1024_1024_1e-4_0507_ep0052.h5'
# model.load_weights(COCO_WEIGHTS_PATH, by_name=True)
data_dir = Path(os.path.join('../../', setting['TEST_DATA_CLEAN_PATH'], 'train_val'))
train(model)
| [
"sys.path.append",
"json.load",
"os.getcwd",
"imgaug.augmenters.Fliplr",
"numpy.array",
"imgaug.augmenters.Multiply",
"os.path.join"
] | [((362, 373), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (371, 373), False, 'import os\n'), ((394, 419), 'sys.path.append', 'sys.path.append', (['ROOT_DIR'], {}), '(ROOT_DIR)\n', (409, 419), False, 'import sys\n'), ((440, 483), 'os.path.join', 'os.path.join', (['"""../../"""', "setting['LOGS_DIR']"], {}), "('../../', setting['LOGS_DIR'])\n", (452, 483), False, 'import os\n'), ((304, 316), 'json.load', 'json.load', (['f'], {}), '(f)\n', (313, 316), False, 'import json\n'), ((1383, 1432), 'numpy.array', 'np.array', (['[88.59672608, 95.91837699, 98.90089033]'], {}), '([88.59672608, 95.91837699, 98.90089033])\n', (1391, 1432), True, 'import numpy as np\n'), ((4276, 4352), 'os.path.join', 'os.path.join', (['"""../../"""', "setting['MODEL_CHECKPOINT_DIR']", '"""mask_rcnn_coco.h5"""'], {}), "('../../', setting['MODEL_CHECKPOINT_DIR'], 'mask_rcnn_coco.h5')\n", (4288, 4352), False, 'import os\n'), ((4760, 4828), 'os.path.join', 'os.path.join', (['"""../../"""', "setting['TEST_DATA_CLEAN_PATH']", '"""train_val"""'], {}), "('../../', setting['TEST_DATA_CLEAN_PATH'], 'train_val')\n", (4772, 4828), False, 'import os\n'), ((2277, 2292), 'imgaug.augmenters.Fliplr', 'iaa.Fliplr', (['(0.5)'], {}), '(0.5)\n', (2287, 2292), True, 'from imgaug import augmenters as iaa\n'), ((2302, 2326), 'imgaug.augmenters.Multiply', 'iaa.Multiply', (['(0.9, 1.1)'], {}), '((0.9, 1.1))\n', (2314, 2326), True, 'from imgaug import augmenters as iaa\n'), ((4202, 4245), 'os.path.join', 'os.path.join', (['DEFAULT_LOGS_DIR', '"""mask_rcnn"""'], {}), "(DEFAULT_LOGS_DIR, 'mask_rcnn')\n", (4214, 4245), False, 'import os\n')] |
"""
Author: <NAME> (<EMAIL>)
Copyright © 2021, United States Government, as represented by the Administrator
of the National Aeronautics and Space Administration. All rights reserved.
The HybridQ: A Hybrid Simulator for Quantum Circuits platform is licensed under
the Apache License, Version 2.0 (the "License"); you may not use this file
except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0.
Unless required by applicable law or agreed to in writing, software distributed
under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from __future__ import annotations
from os import environ
from os.path import basename
_detect_mpi = 'DISABLE_MPI_AUTODETECT' not in environ and '_' in environ and basename(
environ['_']) in ['mpiexec', 'mpirun']
from more_itertools import distribute, chunked, flatten
from psutil import virtual_memory, getloadavg
from functools import partial as partial_func
from hybridq.utils import globalize, kron
import hybridq.circuit.utils as utils
from collections import defaultdict
from hybridq.circuit import Circuit
from multiprocessing import Pool
from itertools import product
from hybridq.gate import Gate
from time import sleep, time
from tqdm.auto import tqdm
from warnings import warn
from os import cpu_count
import numpy as np
import numba
import sys
# Index multiplier
_gate_mul = 20
# 1q gates (0 <= index < 20)
_I = 0
_X = 1
_Y = 2
_Z = 3
_H = 4
_RX = 10
_RY = 11
_RZ = 12
_MATRIX_1 = 19
# 2q gates (20 <= index < 40)
_CZ = 20
_ISWAP = 21
_SWAP = 22
_MATRIX_2 = 39
# 3q gates (40 <= index < 60)
_MATRIX_3 = 59
# 4q gates (60 <= index < 80)
_MATRIX_4 = 79
# 5q gates (80 <= index < 100)
_MATRIX_5 = 99
_MATRIX_SET = [
_MATRIX_1,
_MATRIX_2,
_MATRIX_3,
_MATRIX_4,
_MATRIX_5,
]
@numba.njit(fastmath=True, cache=True)
def _update_pauli_string(gates, qubits, params, pauli_string: list[int],
phase: float, pos_shift: int, eps: float,
atol: float) -> float:
# Get branches
_branches = []
for pos in range(pos_shift, len(gates)):
# Get gate
_name = gates[pos]
# Get number of qubits
_n_qubits = (_name // _gate_mul) + 1
# Get qubits and paulis
if _n_qubits == 5:
q1, q2, q3, q4, q5 = qubits[pos][0], qubits[pos][1], qubits[pos][
2], qubits[pos][3], qubits[pos][4]
s1, s2, s3, s4, s5 = pauli_string[q1], pauli_string[
q2], pauli_string[q3], pauli_string[q4], pauli_string[q5]
if s1 == s2 == s3 == s4 == s5 == _I:
continue
elif _n_qubits == 4:
q1, q2, q3, q4 = qubits[pos][0], qubits[pos][1], qubits[pos][
2], qubits[pos][3]
s1, s2, s3, s4 = pauli_string[q1], pauli_string[q2], pauli_string[
q3], pauli_string[q4]
if s1 == s2 == s3 == s4 == _I:
continue
elif _n_qubits == 3:
q1, q2, q3 = qubits[pos][0], qubits[pos][1], qubits[pos][2]
s1, s2, s3 = pauli_string[q1], pauli_string[q2], pauli_string[q3]
if s1 == s2 == s3 == _I:
continue
elif _n_qubits == 2:
q1, q2 = qubits[pos][0], qubits[pos][1]
s1, s2 = pauli_string[q1], pauli_string[q2]
if s1 == s2 == _I:
continue
else:
q1 = qubits[pos][0]
s1 = pauli_string[q1]
if s1 == _I:
continue
# I
if _name == _I:
# Just ignore
pass
# X, Y, Z
elif _name in [_X, _Y, _Z]:
# Update phase
if s1 != _name:
phase *= -1
# H
elif _name == _H:
if s1 == _X:
pauli_string[q1] = _Z
elif s1 == _Z:
pauli_string[q1] = _X
elif s1 == _Y:
phase *= -1
# ISWAP
elif _name == _ISWAP:
if s1 == _X and s2 == _I:
pauli_string[q1] = _Z
pauli_string[q2] = _Y
phase *= -1
elif s1 == _Y and s2 == _I:
pauli_string[q1] = _Z
pauli_string[q2] = _X
elif s1 == _Z and s2 == _I:
pauli_string[q1] = _I
pauli_string[q2] = _Z
elif s1 == _I and s2 == _X:
pauli_string[q1] = _Y
pauli_string[q2] = _Z
phase *= -1
elif s1 == _Y and s2 == _X:
pauli_string[q1] = _X
pauli_string[q2] = _Y
elif s1 == _Z and s2 == _X:
pauli_string[q1] = _Y
pauli_string[q2] = _I
phase *= -1
elif s1 == _I and s2 == _Y:
pauli_string[q1] = _X
pauli_string[q2] = _Z
elif s1 == _X and s2 == _Y:
pauli_string[q1] = _Y
pauli_string[q2] = _X
elif s1 == _Z and s2 == _Y:
pauli_string[q1] = _X
pauli_string[q2] = _I
elif s1 == _I and s2 == _Z:
pauli_string[q1] = _Z
pauli_string[q2] = _I
elif s1 == _X and s2 == _Z:
pauli_string[q1] = _I
pauli_string[q2] = _Y
phase *= -1
elif s1 == _Y and s2 == _Z:
pauli_string[q1] = _I
pauli_string[q2] = _X
# CZ
elif _name == _CZ:
# Combine indexes
if s1 in [_X, _Y] and s2 == _I:
pauli_string[q2] = _Z
elif s1 == _I and s2 in [_X, _Y]:
pauli_string[q1] = _Z
elif s1 in [_X, _Y] and s2 == _Z:
pauli_string[q2] = _I
elif s1 == _Z and s2 in [_X, _Y]:
pauli_string[q1] = _I
elif s1 == _X and s2 == _X:
pauli_string[q1] = _Y
pauli_string[q2] = _Y
elif s1 == _X and s2 == _Y:
pauli_string[q1] = _Y
pauli_string[q2] = _X
phase *= -1
elif s1 == _Y and s2 == _X:
pauli_string[q1] = _X
pauli_string[q2] = _Y
phase *= -1
elif s1 == _Y and s2 == _Y:
pauli_string[q1] = _X
pauli_string[q2] = _X
# SWAP
elif _name == _SWAP:
pauli_string[q1], pauli_string[q2] = pauli_string[q2], pauli_string[
q1]
elif _name == _MATRIX_1:
# Get weights
_ws = np.reshape(params[pos][:16], (4, 4))[s1]
# Get indexes where weights are different from zeros
_idxs = np.where(np.abs(_ws) > eps)[0]
# Get only weights different from zeros
_ws = _ws[_idxs]
# Sort weights
_pos = np.argsort(np.abs(_ws))[::-1]
_ws = _ws[_pos]
_idxs = _idxs[_pos]
for _i in range(1, len(_idxs)):
# Get position and weight
_p = _idxs[_i]
_w = _ws[_i]
# Get new phase
_phase = _w * phase
# Check phase is large enough
if abs(_phase) > atol:
# Branch
pauli_string[q1] = _p
_branches.append((np.copy(pauli_string), _phase, pos + 1))
# Keep going with the largest weight
pauli_string[q1] = _idxs[0]
phase *= _ws[0]
elif _name == _MATRIX_2:
# Get weights
_ws = np.reshape(params[pos][:256], (4, 4, 16))[s1, s2]
# Get indexes where weights are different from zeros
_idxs = np.where(np.abs(_ws) > eps)[0]
# Get only weights different from zeros
_ws = _ws[_idxs]
# Sort weights
_pos = np.argsort(np.abs(_ws))[::-1]
_ws = _ws[_pos]
_idxs = _idxs[_pos]
for _i in range(1, len(_idxs)):
# Get position and weight
_p = _idxs[_i]
_w = _ws[_i]
# Get new phase
_phase = _w * phase
# Check phase is large enough
if abs(_phase) > atol:
# Get gates
_g2 = _p % 4
_g1 = _p // 4
# Branch
pauli_string[q1] = _g1
pauli_string[q2] = _g2
_branches.append((np.copy(pauli_string), _phase, pos + 1))
# Keep going with the largest weight
_p = _idxs[0]
_g2 = _p % 4
_g1 = _p // 4
pauli_string[q1] = _g1
pauli_string[q2] = _g2
phase *= _ws[0]
elif _name == _MATRIX_3:
# Get weights
_ws = np.reshape(params[pos][:4096], (4, 4, 4, 64))[s1, s2, s3]
# Get indexes where weights are different from zeros
_idxs = np.where(np.abs(_ws) > eps)[0]
# Get only weights different from zeros
_ws = _ws[_idxs]
# Sort weights
_pos = np.argsort(np.abs(_ws))[::-1]
_ws = _ws[_pos]
_idxs = _idxs[_pos]
for _i in range(1, len(_idxs)):
# Get position and weight
_p = _idxs[_i]
_w = _ws[_i]
# Get new phase
_phase = _w * phase
# Check phase is large enough
if abs(_phase) > atol:
# Get gates
_g3 = _p % 4
_g2 = (_p // 4) % 4
_g1 = _p // 16
# Branch
pauli_string[q1] = _g1
pauli_string[q2] = _g2
pauli_string[q3] = _g3
_branches.append((np.copy(pauli_string), _phase, pos + 1))
# Keep going with the largest weight
_p = _idxs[0]
_g3 = _p % 4
_g2 = (_p // 4) % 4
_g1 = _p // 16
pauli_string[q1] = _g1
pauli_string[q2] = _g2
pauli_string[q3] = _g3
phase *= _ws[0]
elif _name == _MATRIX_4:
# Get weights
_ws = np.reshape(params[pos][:65536], (4, 4, 4, 4, 256))[s1, s2, s3,
s4]
# Get indexes where weights are different from zeros
_idxs = np.where(np.abs(_ws) > eps)[0]
# Get only weights different from zeros
_ws = _ws[_idxs]
# Sort weights
_pos = np.argsort(np.abs(_ws))[::-1]
_ws = _ws[_pos]
_idxs = _idxs[_pos]
for _i in range(1, len(_idxs)):
# Get position and weight
_p = _idxs[_i]
_w = _ws[_i]
# Get new phase
_phase = _w * phase
# Check phase is large enough
if abs(_phase) > atol:
# Get gates
_g4 = _p % 4
_g3 = (_p // 4) % 4
_g2 = (_p // 16) % 4
_g1 = _p // 64
# Branch
pauli_string[q1] = _g1
pauli_string[q2] = _g2
pauli_string[q3] = _g3
pauli_string[q4] = _g4
_branches.append((np.copy(pauli_string), _phase, pos + 1))
# Keep going with the largest weight
_p = _idxs[0]
_g4 = _p % 4
_g3 = (_p // 4) % 4
_g2 = (_p // 16) % 4
_g1 = _p // 64
pauli_string[q1] = _g1
pauli_string[q2] = _g2
pauli_string[q3] = _g3
pauli_string[q4] = _g4
phase *= _ws[0]
elif _name == _MATRIX_5:
# Get weights
_ws = np.reshape(params[pos][:1048576],
(4, 4, 4, 4, 4, 1024))[s1, s2, s3, s4, s5]
# Get indexes where weights are different from zeros
_idxs = np.where(np.abs(_ws) > eps)[0]
# Get only weights different from zeros
_ws = _ws[_idxs]
# Sort weights
_pos = np.argsort(np.abs(_ws))[::-1]
_ws = _ws[_pos]
_idxs = _idxs[_pos]
for _i in range(1, len(_idxs)):
# Get position and weight
_p = _idxs[_i]
_w = _ws[_i]
# Get new phase
_phase = _w * phase
# Check phase is large enough
if abs(_phase) > atol:
# Get gates
_g5 = _p % 4
_g4 = (_p // 4) % 4
_g3 = (_p // 16) % 4
_g2 = (_p // 64) % 4
_g1 = _p // 256
# Branch
pauli_string[q1] = _g1
pauli_string[q2] = _g2
pauli_string[q3] = _g3
pauli_string[q4] = _g4
pauli_string[q5] = _g5
_branches.append((np.copy(pauli_string), _phase, pos + 1))
# Keep going with the largest weight
_p = _idxs[0]
_g5 = _p % 4
_g4 = (_p // 4) % 4
_g3 = (_p // 16) % 4
_g2 = (_p // 64) % 4
_g1 = _p // 256
pauli_string[q1] = _g1
pauli_string[q2] = _g2
pauli_string[q3] = _g3
pauli_string[q4] = _g4
pauli_string[q5] = _g5
phase *= _ws[0]
return (pauli_string, phase), _branches
# Pre-process circuit
def _process_gate(gate, **kwargs):
# Set default
kwargs.setdefault('LS_cache', {})
kwargs.setdefault('P_cache', {})
def _GenerateLinearSystem(n_qubits):
# Check if number of qubits is supported
if f'_MATRIX_{n_qubits}' not in globals():
raise ValueError('Too many qubits')
if n_qubits not in kwargs['LS_cache']:
I = Gate('I').matrix().astype('complex128')
X = Gate('X').matrix().astype('complex128')
Y = Gate('Y').matrix().astype('complex128')
Z = Gate('Z').matrix().astype('complex128')
W = [I, X, Y, Z]
for _ in range(n_qubits - 1):
W = [kron(g1, g2) for g1 in W for g2 in [I, X, Y, Z]]
W = np.linalg.inv(np.reshape(W, (2**(2 * n_qubits),) * 2).T)
kwargs['LS_cache'][n_qubits] = W
return kwargs['LS_cache'][n_qubits]
def _GetPauliOperator(*ps):
# Check if number of qubits is supported
if f'_MATRIX_{len(ps)}' not in globals():
raise ValueError('Too many qubits')
ps = ''.join(ps)
if ps not in kwargs['P_cache']:
kwargs['P_cache'][ps] = kron(*(Gate(g).matrix() for g in ps))
return kwargs['P_cache'][ps]
# Get matrix
_U = gate.matrix()
# Get qubits
_q = gate.qubits
# Get linear system
_LS = _GenerateLinearSystem(len(_q))
# Decompose
_params = np.real(
np.array([
_LS.dot(_U.conj().T.dot(_GetPauliOperator(*_gs)).dot(_U).flatten())
for _gs in product(*(('IXYZ',) * len(_q)))
]).flatten())
return [(f'MATRIX_{len(_q)}', gate.qubits, _params)]
def _breadth_first_search(_update, db, branches, max_n_branches, infos, verbose,
**kwargs):
# Explore all branches (breadth-first search)
with tqdm(desc="Collect branches", disable=not verbose) as pbar:
# Explore all branches
while branches and len(branches) < max_n_branches:
# Get new branches
(_new_ps, _new_ph), _new_branches = _update(*branches.pop())
# Collect results
kwargs['collect'](db, kwargs['transform'](_new_ps), _new_ph)
# Update branches
branches.extend(_new_branches)
# Update infos
infos['largest_n_branches_in_memory'] = max(
len(branches), infos['largest_n_branches_in_memory'])
# Sort
branches = sorted(branches, key=lambda x: -x[2])
# Update infos
infos['n_explored_branches'] += 1
# Update progressbar
pbar.set_description(
f'Collect branches ({len(branches)}/{max_n_branches})')
# Sort branches accordingly to the position shift
branches = sorted(branches, key=lambda x: x[2])
return branches
def _depth_first_search(_update, db, branches, parallel, infos, info_init,
verbose, mpi_rank, mpi_size, **kwargs):
# Define parallel core
def _parallel_core(branches, db=None):
# Initialize db
if db is None:
db = kwargs['db_init']()
# Convert to list
branches = list(branches)
# Initialize infos
infos = info_init()
# Explore all branches
while branches:
# Get new branches
(_new_ps, _new_ph), _new_branches = _update(*branches.pop())
# Collect results
kwargs['collect'](db, kwargs['transform'](_new_ps), _new_ph)
# Update branches
branches.extend(_new_branches)
# Update infos
infos['largest_n_branches_in_memory'] = max(
len(branches), infos['largest_n_branches_in_memory'])
# Update infos
infos['n_explored_branches'] += 1
return db, infos
# If no parallelization is requires, explore branchces one by one
if parallel == 1:
from more_itertools import ichunked
# Get number of chunks
chunk_size = max(1, len(branches) // 100)
for _bs in tqdm(ichunked(branches, chunk_size),
total=len(branches) // chunk_size,
desc=f'Mem={virtual_memory().percent}%',
disable=not verbose):
# Update database and infos
db, _infos = _parallel_core(_bs, db)
# Update infos
infos['n_explored_branches'] += _infos['n_explored_branches']
infos['largest_n_branches_in_memory'] = max(
_infos['largest_n_branches_in_memory'],
infos['largest_n_branches_in_memory'])
# Otherwise, distribute workload among different cores
else:
with globalize(_parallel_core) as _parallel_core, Pool(
parallel) as pool:
# Apply async
_fps = [
pool.apply_async(_parallel_core, (_branches,))
for _branches in distribute(kwargs['n_chunks'], branches)
]
_status = [False] * len(_fps)
with tqdm(total=len(_fps),
desc=f'Mem={virtual_memory().percent}%',
disable=not verbose) as pbar:
_pending = len(_fps)
while _pending:
# Wait
sleep(kwargs['sleep_time'])
# Activate/disactivate
if verbose:
pbar.disable = int(time()) % mpi_size != mpi_rank
# Get virtual memory
_vm = virtual_memory()
_pending = 0
for _x, (_p, _s) in enumerate(zip(_fps, _status)):
if not _p.ready():
_pending += 1
elif not _s:
# Collect data
_new_db, _infos = _p.get()
# Merge datasets
kwargs['merge'](db, _new_db)
# Clear dataset
_new_db.clear()
# Update infos
infos['n_explored_branches'] += _infos[
'n_explored_branches']
infos['largest_n_branches_in_memory'] = max(
_infos['largest_n_branches_in_memory'],
infos['largest_n_branches_in_memory'])
# Set status
_status[_x] = True
# Update pbar
if verbose:
pbar.set_description(
(f'[{mpi_rank}] ' if mpi_size > 1 else '') + \
f'Mem={_vm.percent}%, ' + \
f'NThreads={infos["n_threads"]}, ' + \
f'NCPUs={infos["n_cpus"]}, ' + \
f'LoadAvg={getloadavg()[0]/infos["n_cpus"]*100:1.2f}%, ' + \
f'NBranches={infos["n_explored_branches"]}'
)
pbar.n = len(_fps) - _pending
pbar.refresh()
# Update infos
infos['average_virtual_memory (GB)'] = (
infos['average_virtual_memory (GB)'][0] +
_vm.used / 2**30,
infos['average_virtual_memory (GB)'][1] + 1)
infos['peak_virtual_memory (GB)'] = max(
infos['peak_virtual_memory (GB)'], _vm.used / 2**30)
# If memory above threshold, raise error
if _vm.percent > kwargs['max_virtual_memory']:
raise MemoryError(
f'Memory above threshold: {_vm.percent}% > {kwargs["max_virtual_memory"]}%'
)
# Last refresh
if verbose:
pbar.refresh()
# Check all chunks have been explored
assert (np.alltrue(_status))
def update_pauli_string(circuit: Circuit,
pauli_string: {Circuit, dict[str, float]},
phase: float = 1,
parallel: {bool, int} = False,
return_info: bool = False,
use_mpi: bool = None,
compress: int = 4,
simplify: bool = True,
remove_id_gates: bool = True,
float_type: any = 'float32',
verbose: bool = False,
**kwargs) -> defaultdict:
"""
Evolve density matrix accordingly to `circuit` using `pauli_string` as
initial product state. The evolved density matrix will be represented as a
set of different Pauli strings, each of them with a different phase, such
that their sum corresponds to the evolved density matrix. The number of
branches depends on the number of non-Clifford gates in `circuit`.
Parameters
----------
circuit: Circuit
Circuit to use to evolve `pauli_string`.
pauli_string: {Circuit, dict[str, float]}
Pauli string to be evolved. `pauli_string` must be a `Circuit` composed
of single qubit Pauli `Gate`s (that is, either `Gate('I')`, `Gate('X')`,
`Gate('Y')` or `Gate('Z')`), each one acting on every qubit of
`circuit`. If a dictionary is provided, every key of `pauli_string` must
be a valid Pauli string. The size of each Pauli string must be equal to
the number of qubits in `circuit`. Values in `pauli_string` will be
used as inital phase for the given string.
phase: float, optional
Initial phase for `pauli_string`.
atol: float, optional
Discard all Pauli strings that have an absolute amplitude smaller than
`atol`.
parallel: int, optional
Parallelize simulation (where possible). If `True`, the number of
available cpus is used. Otherwise, a `parallel` number of threads is
used.
return_info: bool
Return extra information collected during the evolution.
use_mpi: bool, optional
Use `MPI` if available. Unless `use_mpi=False`, `MPI` will be used if
detected (for instance, if `mpiexec` is used to called HybridQ). If
`use_mpi=True`, force the use of `MPI` (in case `MPI` is not
automatically detected).
compress: int, optional
Compress `Circuit` using `utils.compress` prior the simulation.
simplify: bool, optional
Simplify `Circuit` using `utils.simplify` prior the simulation.
remove_id_gates: bool, optional
Remove `ID` gates prior the simulation.
float_type: any, optional
Float type to use for the simulation.
verbose: bool, optional
Verbose output.
Returns
-------
dict[str, float] [, dict[any, any]]
If `return_info=False`, `update_pauli_string` returns a `dict` of Pauli
strings and the corresponding amplitude. The full density matrix can be
reconstructed by resumming over all the Pauli string, weighted with the
corresponding amplitude. If `return_info=True`, information gathered
during the simulation are also returned.
Other Parameters
----------------
eps: float, optional (default: auto)
Do not branch if the branch weight for the given non-Clifford operation
is smaller than `eps`. `atol=1e-7` if `float_type=float32`, otherwise `atol=1e-8`
if `float_type=float64`.
atol: float, optional (default: auto)
Remove elements from final state if such element as an absolute amplitude
smaller than `atol`. `atol=1e-8` if `float_type=float32`, otherwise `atol=1e-12`
if `float_type=float64`.
branch_atol: float, optional
Stop branching if the branch absolute amplitude is smaller than
`branch_atol`. If not specified, it will be equal to `atol`.
max_breadth_first_branches: int (default: auto)
Max number of branches to collect using breadth first search. The number
of branches collect during the breadth first phase will be split among
the different threads (or nodes if using `MPI`).
n_chunks: int (default: auto)
Number of chunks to divide the branches obtained during the breadth
first phase. The default value is twelve times the number of threads.
max_virtual_memory: float (default: 80)
Max virtual memory (%) that can be using during the simulation. If the
used virtual memory is above `max_virtual_memory`, `update_pauli_string`
will raise an error.
sleep_time: float (default: 0.1)
Completition of parallel processes is checked every `sleep_time`
seconds.
Example
-------
>>> from hybridq.circuit import utils
>>> import numpy as np
>>>
>>> # Define circuit
>>> circuit = Circuit(
>>> [Gate('X', qubits=[0])**1.2,
>>> Gate('ISWAP', qubits=[0, 1])**2.3])
>>>
>>> # Define Pauli string
>>> pauli_string = Circuit([Gate('Z', qubits=[1])])
>>>
>>> # Get density matrix decomposed in Pauli strings
>>> dm = clifford.update_pauli_string(circuit=circuit,
>>> pauli_string=pauli_string,
>>> float_type='float64')
>>>
>>> dm
defaultdict(<function hybridq.circuit.simulation.clifford.update_pauli_string.<locals>._db_init.<locals>.<lambda>()>,
{'IZ': 0.7938926261462365,
'YI': -0.12114687473997318,
'ZI': -0.166744368113685,
'ZX': 0.2377641290737882,
'YX': -0.3272542485937367,
'XY': -0.40450849718747345})
>>> # Reconstruct density matrix
>>> U = sum(phase * np.kron(Gate(g1).matrix(),
>>> Gate(g2).matrix()) for (g1, g2), phase in dm.items())
>>>
>>> U
array([[ 0.62714826+0.j , 0.23776413+0.j ,
0. +0.12114687j, 0. +0.73176275j],
[ 0.23776413+0.j , -0.96063699+0.j ,
0. -0.07725425j, 0. +0.12114687j],
[ 0. -0.12114687j, 0. +0.07725425j,
0.96063699+0.j , -0.23776413+0.j ],
[ 0. -0.73176275j, 0. -0.12114687j,
-0.23776413+0.j , -0.62714826+0.j ]])
>>> np.allclose(utils.matrix(circuit + pauli_string + circuit.inv()),
>>> U,
>>> atol=1e-8)
True
>>> U[0b11, 0b11]
(-0.6271482580325515+0j)
"""
# ==== Set default parameters ====
# If use_mpi==False, force the non-use of MPI
if use_mpi is None and _detect_mpi:
# Warn that MPI is used because detected
warn("MPI has been detected. Using MPI.")
# Set MPI to true
use_mpi = True
# If parallel==True, use number of cpus
if type(parallel) is bool:
parallel = cpu_count() if parallel else 1
else:
parallel = int(parallel)
if parallel <= 0:
warn("'parallel' must be a positive integer. Setting parallel=1")
parallel = 1
# utils.globalize may not work properly on MacOSX systems .. for now, let's
# disable parallelization for MacOSX
if parallel > 1:
from platform import system
from warnings import warn
if system() == 'Darwin':
warn(
"'utils.globalize' may not work on MacOSX. Disabling parallelization."
)
parallel = 1
# Fix atol
if 'atol' in kwargs:
atol = kwargs['atol']
del (kwargs['atol'])
else:
float_type = np.dtype(float_type)
if float_type == np.float64:
atol = 1e-12
elif float_type == np.float32:
atol = 1e-8
else:
raise ValueError(f'Unsupported array dtype: {float_type}')
# Fix branch_atol
if 'branch_atol' in kwargs:
branch_atol = kwargs['branch_atol']
del (kwargs['branch_atol'])
else:
branch_atol = atol
# Fix eps
if 'eps' in kwargs:
eps = kwargs['eps']
del (kwargs['eps'])
else:
float_type = np.dtype(float_type)
if float_type == np.float64:
eps = 1e-8
elif float_type == np.float32:
eps = 1e-7
else:
raise ValueError(f'Unsupported array dtype: {float_type}')
# Set default db initialization
def _db_init():
return defaultdict(int)
# Set default transform
def _transform(ps):
# Join bitstring
return ''.join({_X: 'X', _Y: 'Y', _Z: 'Z', _I: 'I'}[op] for op in ps)
# Set default collect
def _collect(db, ps, ph):
# Update final paulis
db[ps] += ph
# Remove elements close to zero
if abs(db[ps]) < atol:
del (db[ps])
# Set default merge
def _merge(db, db_new, use_tuple=False):
# Update final paulis
for ps, ph in db_new if use_tuple else db_new.items():
# Collect results
kwargs['collect'](db, ps, ph)
kwargs.setdefault('max_breadth_first_branches', min(4 * 12 * parallel,
2**14))
kwargs.setdefault('n_chunks', 12 * parallel)
kwargs.setdefault('max_virtual_memory', 80)
kwargs.setdefault('sleep_time', 0.1)
kwargs.setdefault('collect', _collect)
kwargs.setdefault('transform', _transform)
kwargs.setdefault('merge', _merge)
kwargs.setdefault('db_init', _db_init)
# Get MPI info
if use_mpi:
from mpi4py import MPI
_mpi_comm = MPI.COMM_WORLD
_mpi_size = _mpi_comm.Get_size()
_mpi_rank = _mpi_comm.Get_rank()
kwargs.setdefault('max_breadth_first_branches_mpi',
min(_mpi_size * 2**9, 2**14))
kwargs.setdefault('mpi_chunk_max_size', 2**20)
kwargs.setdefault('mpi_merge', True)
# Get complex_type from float_type
complex_type = (np.array([1], dtype=float_type) +
1j * np.array([1], dtype=float_type)).dtype
# Local verbose
_verbose = verbose and (not use_mpi or _mpi_rank == 0)
# =========== CHECKS =============
if type(pauli_string) == Circuit:
from collections import Counter
# Initialize error message
_err_msg = "'pauli_string' must contain only I, X, Y and Z gates acting on different qubits."
# Check qubits match with circuit
if any(g.n_qubits != 1 or not g.qubits for g in pauli_string) or set(
pauli_string.all_qubits()).difference(
circuit.all_qubits()) or set(
Counter(gate.qubits[0]
for gate in pauli_string).values()).difference(
[1]):
raise ValueError(_err_msg)
# Get ideal paulis
_ig = list(map(lambda n: Gate(n).matrix(), 'IXYZ'))
# Get the correct pauli
def _get_pauli(gate):
# Get matrix
U = gate.matrix()
# Get right pauli
p = next(
(p for x, p in enumerate('IXYZ') if np.allclose(_ig[x], U)),
None)
# If not found, raise error
if not p:
raise ValueError(_err_msg)
# Otherwise, return pauli
return Gate(p, qubits=gate.qubits)
# Reconstruct paulis
pauli_string = Circuit(map(_get_pauli, pauli_string))
else:
# Check that all strings only have I,X,Y,Z tokens
_n_qubits = len(circuit.all_qubits())
if any(
set(p).difference('IXYZ') or len(p) != _n_qubits
for p in pauli_string):
raise ValueError(
f"'pauli_string' must contain only I, X, Y and Z gates acting on different qubits."
)
# ================================
# Start pre-processing time
_prep_time = time()
# Get qubits
_qubits = circuit.all_qubits()
# Remove ID gates
if remove_id_gates:
circuit = Circuit(gate for gate in circuit if gate.name != 'I')
# Simplify circuit
if simplify:
# Get qubits to pin
if type(pauli_string) == Circuit:
# Pinned qubits
_pinned_qubits = pauli_string.all_qubits()
else:
# Find qubits to pin
_pinned_qubits = set.union(
*({q
for q, g in zip(_qubits, p)
if g != 'I'}
for p in pauli_string))
# Simplify
circuit = utils.simplify(circuit,
remove_id_gates=remove_id_gates,
verbose=_verbose)
circuit = utils.popright(utils.simplify(circuit),
pinned_qubits=set(_pinned_qubits).intersection(
circuit.all_qubits()),
verbose=_verbose)
# Compress circuit
circuit = Circuit(
utils.to_matrix_gate(c, complex_type=complex_type)
for c in tqdm(utils.compress(circuit, max_n_qubits=compress),
disable=not _verbose,
desc=f"Compress ({int(compress)})"))
# Pad missing qubits
circuit += Circuit(
Gate('MATRIX', [q], U=np.eye(2))
for q in set(_qubits).difference(circuit.all_qubits()))
# Get qubits map
qubits_map = kwargs['qubits_map'] if 'qubits_map' in kwargs else {
q: x for x, q in enumerate(circuit.all_qubits())
}
# Pre-process circuit
_LS_cache = {}
_P_cache = {}
circuit = [
g for gate in tqdm(reversed(circuit),
total=len(circuit),
disable=not _verbose,
desc='Pre-processing')
for g in _process_gate(gate, LS_cache=_LS_cache, P_cache=_P_cache)
]
_LS_cache.clear()
_P_cache.clear()
del (_LS_cache)
del (_P_cache)
# Get maximum number of qubits and parameters
_max_n_qubits = max(max(len(gate[1]) for gate in circuit), 2)
_max_n_params = max(len(gate[2]) for gate in circuit)
# Get qubits
qubits = np.array([
np.pad([qubits_map[q]
for q in gate[1]], (0, _max_n_qubits - len(gate[1])))
for gate in circuit
],
dtype='int32')
# Get parameters
params = np.round(
np.array([
np.pad(gate[2], (0, _max_n_params - len(gate[2])))
for gate in circuit
],
dtype=float_type),
-int(np.floor(np.log10(atol))) if atol < 1 else 0)
# Remove -0
params[np.abs(params) == 0] = 0
# Quick check
assert (all('_' + gate[0] in globals() for gate in circuit))
# Get gates
gates = np.array([globals()['_' + gate[0]] for gate in circuit],
dtype='int')
# Compute expected number of paths
_log2_n_expected_branches = 0
for _idx in np.where(np.isin(gates, _MATRIX_SET))[0]:
_nq = (gates[_idx] // _gate_mul) + 1
_p = params[_idx][:4**(2 * _nq)]
_log2_n_expected_branches += np.sum(
np.log2(
np.sum(np.abs(np.reshape(_p, (4**_nq, 4**_nq))) > eps,
axis=1))) / 4**_nq
# Check
assert (len(gates) == len(qubits) and len(gates) == len(params))
# Initialize branches
if type(pauli_string) == Circuit:
# Convert Pauli string
_pauli_string = np.array([_I] * len(qubits_map), dtype='int')
for gate in pauli_string:
if gate.name != 'I':
_pauli_string[qubits_map[gate.qubits[0]]] = {
'X': _X,
'Y': _Y,
'Z': _Z
}[gate.name]
# Initialize branches
branches = [(_pauli_string, phase, 0)]
else:
# Initialize branches
branches = [(np.array([{
'I': _I,
'X': _X,
'Y': _Y,
'Z': _Z
}[g]
for g in p],
dtype='int'), phase, 0)
for p, phase in pauli_string.items()
if abs(phase) > atol]
# Initialize final Pauli strings
db = kwargs['db_init']()
# Define update function
_update = partial_func(_update_pauli_string,
gates,
qubits,
params,
eps=eps,
atol=branch_atol)
# End pre-processing time
_prep_time = time() - _prep_time
# Initialize infos
_info_init = lambda: {
'n_explored_branches': 0,
'largest_n_branches_in_memory': 0,
'peak_virtual_memory (GB)': virtual_memory().used / 2**30,
'average_virtual_memory (GB)': (virtual_memory().used / 2**30, 1),
'n_threads': parallel,
'n_cpus': cpu_count(),
'eps': eps,
'atol': atol,
'branch_atol': branch_atol,
'float_type': str(float_type),
'log2_n_expected_branches': _log2_n_expected_branches
}
infos = _info_init()
infos['memory_baseline (GB)'] = virtual_memory().used / 2**30
if not use_mpi or _mpi_rank == 0:
infos['n_explored_branches'] = 1
infos['largest_n_branches_in_memory'] = 1
# Start clock
_init_time = time()
# Scatter first batch of branches to different MPI nodes
if use_mpi and _mpi_size > 1:
if _mpi_rank == 0:
# Explore branches (breadth-first search)
branches = _breadth_first_search(
_update,
db,
branches,
max_n_branches=kwargs['max_breadth_first_branches_mpi'],
infos=infos,
verbose=verbose,
mpi_rank=_mpi_rank,
**kwargs)
# Distribute branches
branches = _mpi_comm.scatter(
[list(x) for x in distribute(_mpi_size, branches)], root=0)
# Explore branches (breadth-first search)
branches = _breadth_first_search(
_update,
db,
branches,
max_n_branches=kwargs['max_breadth_first_branches'],
infos=infos,
verbose=verbose if not use_mpi or _mpi_rank == 0 else False,
**kwargs)
# If there are remaining branches, use depth-first search
if branches:
_depth_first_search(_update,
db,
branches,
parallel=parallel,
infos=infos,
info_init=_info_init,
verbose=verbose,
mpi_rank=_mpi_rank if use_mpi else 0,
mpi_size=_mpi_size if use_mpi else 1,
**kwargs)
# Update infos
infos['average_virtual_memory (GB)'] = infos['average_virtual_memory (GB)'][
0] / infos['average_virtual_memory (GB)'][1] - infos[
'memory_baseline (GB)']
infos['peak_virtual_memory (GB)'] -= infos['memory_baseline (GB)']
# Update branching time
infos['branching_time (s)'] = time() - _init_time
# Collect results
if use_mpi and _mpi_size > 1 and kwargs['mpi_merge']:
for _k in infos:
infos[_k] = [infos[_k]]
# Initialize pbar
if _mpi_rank == 0:
pbar = tqdm(total=int(np.ceil(np.log2(_mpi_size))),
disable=not verbose,
desc='Collect results')
# Initialize tag and size
_tag = 0
_size = _mpi_size
while _size > 1:
# Update progressbar
if _mpi_rank == 0:
pbar.set_description(
f'Collect results (Mem={virtual_memory().percent}%)')
# Get shift
_shift = (_size // 2) + (_size % 2)
if _mpi_rank < (_size // 2):
# Get infos
_infos = _mpi_comm.recv(source=_mpi_rank + _shift, tag=_tag)
# Update infos
for _k in infos:
infos[_k].extend(_infos[_k])
# Get number of chunks
_n_chunks = _mpi_comm.recv(source=_mpi_rank + _shift,
tag=_tag + 1)
if _n_chunks > 1:
# Initialize _process
with tqdm(range(_n_chunks),
desc='Get db',
leave=False,
disable=_mpi_rank != 0) as pbar:
for _ in pbar:
# Receive db
_db = _mpi_comm.recv(source=_mpi_rank + _shift,
tag=_tag + 2)
# Merge datasets
kwargs['merge'](db, _db, use_tuple=True)
# Update description
pbar.set_description(
f'Get db (Mem={virtual_memory().percent}%)')
# Clear dataset
_db.clear()
else:
# Receive db
_db = _mpi_comm.recv(source=_mpi_rank + _shift,
tag=_tag + 2)
# Merge datasets
kwargs['merge'](db, _db)
# Clear dataset
_db.clear()
elif _shift <= _mpi_rank < _size:
# Remove default_factory because pickle is picky regarding local objects
db.default_factory = None
# Send infos
_mpi_comm.send(infos, dest=_mpi_rank - _shift, tag=_tag)
# Compute chunks
_n_chunks = kwargs['mpi_chunk_max_size']
_n_chunks = (len(db) // _n_chunks) + (
(len(db) % _n_chunks) != 0)
# Send number of chunks
_mpi_comm.send(_n_chunks, dest=_mpi_rank - _shift, tag=_tag + 1)
if _n_chunks > 1:
# Split db in chunks
for _db in chunked(db.items(),
kwargs['mpi_chunk_max_size']):
_mpi_comm.send(_db,
dest=_mpi_rank - _shift,
tag=_tag + 2)
else:
# Send db
_mpi_comm.send(db, dest=_mpi_rank - _shift, tag=_tag + 2)
# Reset db and infos
db.clear()
infos.clear()
# update size
_tag += 3
_size = _shift
_mpi_comm.barrier()
# Update progressbar
if _mpi_rank == 0:
pbar.set_description(
f'Collect results (Mem={virtual_memory().percent}%)')
pbar.update()
# Update runtime
if not use_mpi or _mpi_rank == 0 or not kwargs['mpi_merge']:
infos['runtime (s)'] = time() - _init_time
infos['pre-processing (s)'] = _prep_time
# Check that all the others dbs/infos (excluding rank==0) has been cleared up
if use_mpi and _mpi_rank > 0 and kwargs['mpi_merge']:
assert (not len(db) and not len(infos))
if return_info:
return db, infos
else:
return db
def expectation_value(circuit: Circuit, op: Circuit, initial_state: str,
**kwargs) -> float:
"""
Compute the expectation value of `op` for the given `circuit`, using
`initial_state` as initial state.
Parameters
----------
circuit: Circuit
Circuit to simulate.
op: Circuit
Operator used to compute the expectation value. `op` must be a valid
`Circuit` containing only Pauli gates (that is, either `I`, `X`, `Y` or
`Z` gates) acting on different qubits.
initial_state: str
Initial state used to compute the expectation value. Valid tokens for
`initial_state` are:
- `0`: qubit is set to `0` in the computational basis,
- `1`: qubit is set to `1` in the computational basis,
- `+`: qubit is set to `+` state in the computational basis,
- `-`: qubit is set to `-` state in the computational basis.
Returns
-------
float [, dict[any, any]]
The expectation value of the operator `op`. If `return_info=True`,
information gathered during the simulation are also returned.
Other Parameters
----------------
`expectation_value` uses all valid parameters for `update_pauli_string`.
See Also
--------
`update_pauli_string`
Example
-------
>>> # Define circuit
>>> circuit = Circuit(
>>> [Gate('X', qubits=[0])**1.2,
>>> Gate('ISWAP', qubits=[0, 1])**2.3])
>>>
>>> # Define operator
>>> op = Circuit([Gate('Z', qubits=[1])])
>>>
>>> # Get expectation value
>>> clifford.expectation_value(circuit=circuit,
>>> op=op,
>>> initial_state='11',
>>> float_type='float64')
-0.6271482580325515
"""
# ==== Set default parameters ====
def _db_init():
return [0]
def _collect(db, ops, ph):
# Compute expectation value given pauli string
if next((False for x in ops if x in 'XY'), True):
db[0] += ph
def _merge(db, db_new):
db[0] += db_new[0]
def _prepare_state(state, qubits):
c = Circuit()
for q, s in zip(qubits, state):
if s == '0':
pass
elif s == '1':
c.append(Gate('X', [q]))
elif s == '+':
c.append(Gate('H', [q]))
elif s == '-':
c.extend([Gate('X', [q]), Gate('H', [q])])
else:
raise ValueError(f"Unexpected token '{s}'")
return c
kwargs.setdefault('use_mpi', None)
kwargs.setdefault('return_info', False)
# If use_mpi==False, force the non-use of MPI
if kwargs['use_mpi'] is None and _detect_mpi:
# Warn that MPI is used because detected
warn("MPI has been detected. Using MPI.")
# Set MPI to true
kwargs['use_mpi'] = True
# Get MPI info
if kwargs['use_mpi']:
from mpi4py import MPI
_mpi_comm = MPI.COMM_WORLD
_mpi_size = _mpi_comm.Get_size()
_mpi_rank = _mpi_comm.Get_rank()
# ================================
# Prepare initial state
if type(initial_state) == str:
# Check
if len(initial_state) != len(circuit.all_qubits()):
raise ValueError(
f"'initial_state' has the wrong number of qubits "
f"(expected {len(circuit.all_qubits())}, got {len(initial_state)})."
)
# Get state
initial_state = _prepare_state(initial_state, circuit.all_qubits())
else:
raise ValueError(
f"'{type(initial_state)}' not supported for 'initial_state'.")
# Get expectation value
_res = update_pauli_string(initial_state + circuit,
op,
db_init=_db_init,
collect=_collect,
merge=_merge,
mpi_merge=False,
**kwargs)
# Collect results
if kwargs['use_mpi'] and _mpi_size > 1:
_all_res = _mpi_comm.gather(_res, root=0)
if _mpi_rank == 0:
if kwargs['return_info']:
infos = {}
for _, _infos in _all_res:
for _k, _v in _infos.items():
if _k not in infos:
infos[_k] = [_v]
else:
infos[_k].append(_v)
exp_value = sum(ev[0] for ev, _ in _all_res)
else:
exp_value = sum(ev[0] for ev in _all_res)
else:
exp_value = infos = None
else:
if kwargs['return_info']:
exp_value = _res[0][0]
infos = _res[1]
else:
exp_value = _res[0]
# Return expectation value
if kwargs['return_info']:
return exp_value, infos
else:
return exp_value
| [
"numpy.isin",
"psutil.virtual_memory",
"numpy.abs",
"hybridq.circuit.Circuit",
"hybridq.gate.Gate",
"numpy.allclose",
"numba.njit",
"hybridq.circuit.utils.simplify",
"collections.defaultdict",
"hybridq.utils.globalize",
"numpy.eye",
"numpy.copy",
"hybridq.circuit.utils.compress",
"numpy.re... | [((1994, 2031), 'numba.njit', 'numba.njit', ([], {'fastmath': '(True)', 'cache': '(True)'}), '(fastmath=True, cache=True)\n', (2004, 2031), False, 'import numba\n'), ((34379, 34385), 'time.time', 'time', ([], {}), '()\n', (34383, 34385), False, 'from time import sleep, time\n'), ((38794, 38883), 'functools.partial', 'partial_func', (['_update_pauli_string', 'gates', 'qubits', 'params'], {'eps': 'eps', 'atol': 'branch_atol'}), '(_update_pauli_string, gates, qubits, params, eps=eps, atol=\n branch_atol)\n', (38806, 38883), True, 'from functools import partial as partial_func\n'), ((39855, 39861), 'time.time', 'time', ([], {}), '()\n', (39859, 39861), False, 'from time import sleep, time\n'), ((941, 963), 'os.path.basename', 'basename', (["environ['_']"], {}), "(environ['_'])\n", (949, 963), False, 'from os.path import basename\n'), ((15971, 16021), 'tqdm.auto.tqdm', 'tqdm', ([], {'desc': '"""Collect branches"""', 'disable': '(not verbose)'}), "(desc='Collect branches', disable=not verbose)\n", (15975, 16021), False, 'from tqdm.auto import tqdm\n'), ((22255, 22274), 'numpy.alltrue', 'np.alltrue', (['_status'], {}), '(_status)\n', (22265, 22274), True, 'import numpy as np\n'), ((29129, 29170), 'warnings.warn', 'warn', (['"""MPI has been detected. Using MPI."""'], {}), "('MPI has been detected. Using MPI.')\n", (29133, 29170), False, 'from warnings import warn\n'), ((30041, 30061), 'numpy.dtype', 'np.dtype', (['float_type'], {}), '(float_type)\n', (30049, 30061), True, 'import numpy as np\n'), ((30570, 30590), 'numpy.dtype', 'np.dtype', (['float_type'], {}), '(float_type)\n', (30578, 30590), True, 'import numpy as np\n'), ((30870, 30886), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (30881, 30886), False, 'from collections import defaultdict\n'), ((34504, 34557), 'hybridq.circuit.Circuit', 'Circuit', (["(gate for gate in circuit if gate.name != 'I')"], {}), "(gate for gate in circuit if gate.name != 'I')\n", (34511, 34557), False, 'from hybridq.circuit import Circuit\n'), ((35020, 35094), 'hybridq.circuit.utils.simplify', 'utils.simplify', (['circuit'], {'remove_id_gates': 'remove_id_gates', 'verbose': '_verbose'}), '(circuit, remove_id_gates=remove_id_gates, verbose=_verbose)\n', (35034, 35094), True, 'import hybridq.circuit.utils as utils\n'), ((39062, 39068), 'time.time', 'time', ([], {}), '()\n', (39066, 39068), False, 'from time import sleep, time\n'), ((41671, 41677), 'time.time', 'time', ([], {}), '()\n', (41675, 41677), False, 'from time import sleep, time\n'), ((48159, 48168), 'hybridq.circuit.Circuit', 'Circuit', ([], {}), '()\n', (48166, 48168), False, 'from hybridq.circuit import Circuit\n'), ((48815, 48856), 'warnings.warn', 'warn', (['"""MPI has been detected. Using MPI."""'], {}), "('MPI has been detected. Using MPI.')\n", (48819, 48856), False, 'from warnings import warn\n'), ((18235, 18265), 'more_itertools.ichunked', 'ichunked', (['branches', 'chunk_size'], {}), '(branches, chunk_size)\n', (18243, 18265), False, 'from more_itertools import ichunked\n'), ((18879, 18904), 'hybridq.utils.globalize', 'globalize', (['_parallel_core'], {}), '(_parallel_core)\n', (18888, 18904), False, 'from hybridq.utils import globalize, kron\n'), ((18924, 18938), 'multiprocessing.Pool', 'Pool', (['parallel'], {}), '(parallel)\n', (18928, 18938), False, 'from multiprocessing import Pool\n'), ((29316, 29327), 'os.cpu_count', 'cpu_count', ([], {}), '()\n', (29325, 29327), False, 'from os import cpu_count\n'), ((29428, 29493), 'warnings.warn', 'warn', (['"""\'parallel\' must be a positive integer. Setting parallel=1"""'], {}), '("\'parallel\' must be a positive integer. Setting parallel=1")\n', (29432, 29493), False, 'from warnings import warn\n'), ((29744, 29752), 'platform.system', 'system', ([], {}), '()\n', (29750, 29752), False, 'from platform import system\n'), ((29778, 29854), 'warnings.warn', 'warn', (['"""\'utils.globalize\' may not work on MacOSX. Disabling parallelization."""'], {}), '("\'utils.globalize\' may not work on MacOSX. Disabling parallelization.")\n', (29782, 29854), False, 'from warnings import warn\n'), ((32397, 32428), 'numpy.array', 'np.array', (['[1]'], {'dtype': 'float_type'}), '([1], dtype=float_type)\n', (32405, 32428), True, 'import numpy as np\n'), ((33788, 33815), 'hybridq.gate.Gate', 'Gate', (['p'], {'qubits': 'gate.qubits'}), '(p, qubits=gate.qubits)\n', (33792, 33815), False, 'from hybridq.gate import Gate\n'), ((35194, 35217), 'hybridq.circuit.utils.simplify', 'utils.simplify', (['circuit'], {}), '(circuit)\n', (35208, 35217), True, 'import hybridq.circuit.utils as utils\n'), ((35466, 35516), 'hybridq.circuit.utils.to_matrix_gate', 'utils.to_matrix_gate', (['c'], {'complex_type': 'complex_type'}), '(c, complex_type=complex_type)\n', (35486, 35516), True, 'import hybridq.circuit.utils as utils\n'), ((37118, 37132), 'numpy.abs', 'np.abs', (['params'], {}), '(params)\n', (37124, 37132), True, 'import numpy as np\n'), ((37446, 37473), 'numpy.isin', 'np.isin', (['gates', '_MATRIX_SET'], {}), '(gates, _MATRIX_SET)\n', (37453, 37473), True, 'import numpy as np\n'), ((39401, 39412), 'os.cpu_count', 'cpu_count', ([], {}), '()\n', (39410, 39412), False, 'from os import cpu_count\n'), ((39660, 39676), 'psutil.virtual_memory', 'virtual_memory', ([], {}), '()\n', (39674, 39676), False, 'from psutil import virtual_memory, getloadavg\n'), ((45641, 45647), 'time.time', 'time', ([], {}), '()\n', (45645, 45647), False, 'from time import sleep, time\n'), ((32456, 32487), 'numpy.array', 'np.array', (['[1]'], {'dtype': 'float_type'}), '([1], dtype=float_type)\n', (32464, 32487), True, 'import numpy as np\n'), ((38378, 38453), 'numpy.array', 'np.array', (["[{'I': _I, 'X': _X, 'Y': _Y, 'Z': _Z}[g] for g in p]"], {'dtype': '"""int"""'}), "([{'I': _I, 'X': _X, 'Y': _Y, 'Z': _Z}[g] for g in p], dtype='int')\n", (38386, 38453), True, 'import numpy as np\n'), ((14800, 14812), 'hybridq.utils.kron', 'kron', (['g1', 'g2'], {}), '(g1, g2)\n', (14804, 14812), False, 'from hybridq.utils import globalize, kron\n'), ((14880, 14921), 'numpy.reshape', 'np.reshape', (['W', '((2 ** (2 * n_qubits),) * 2)'], {}), '(W, (2 ** (2 * n_qubits),) * 2)\n', (14890, 14921), True, 'import numpy as np\n'), ((19109, 19149), 'more_itertools.distribute', 'distribute', (["kwargs['n_chunks']", 'branches'], {}), "(kwargs['n_chunks'], branches)\n", (19119, 19149), False, 'from more_itertools import distribute, chunked, flatten\n'), ((19479, 19506), 'time.sleep', 'sleep', (["kwargs['sleep_time']"], {}), "(kwargs['sleep_time'])\n", (19484, 19506), False, 'from time import sleep, time\n'), ((19725, 19741), 'psutil.virtual_memory', 'virtual_memory', ([], {}), '()\n', (19739, 19741), False, 'from psutil import virtual_memory, getloadavg\n'), ((35539, 35585), 'hybridq.circuit.utils.compress', 'utils.compress', (['circuit'], {'max_n_qubits': 'compress'}), '(circuit, max_n_qubits=compress)\n', (35553, 35585), True, 'import hybridq.circuit.utils as utils\n'), ((35770, 35779), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (35776, 35779), True, 'import numpy as np\n'), ((39246, 39262), 'psutil.virtual_memory', 'virtual_memory', ([], {}), '()\n', (39260, 39262), False, 'from psutil import virtual_memory, getloadavg\n'), ((40454, 40485), 'more_itertools.distribute', 'distribute', (['_mpi_size', 'branches'], {}), '(_mpi_size, branches)\n', (40464, 40485), False, 'from more_itertools import distribute, chunked, flatten\n'), ((33577, 33599), 'numpy.allclose', 'np.allclose', (['_ig[x]', 'U'], {}), '(_ig[x], U)\n', (33588, 33599), True, 'import numpy as np\n'), ((37053, 37067), 'numpy.log10', 'np.log10', (['atol'], {}), '(atol)\n', (37061, 37067), True, 'import numpy as np\n'), ((39317, 39333), 'psutil.virtual_memory', 'virtual_memory', ([], {}), '()\n', (39331, 39333), False, 'from psutil import virtual_memory, getloadavg\n'), ((48307, 48321), 'hybridq.gate.Gate', 'Gate', (['"""X"""', '[q]'], {}), "('X', [q])\n", (48311, 48321), False, 'from hybridq.gate import Gate\n'), ((14499, 14508), 'hybridq.gate.Gate', 'Gate', (['"""I"""'], {}), "('I')\n", (14503, 14508), False, 'from hybridq.gate import Gate\n'), ((14555, 14564), 'hybridq.gate.Gate', 'Gate', (['"""X"""'], {}), "('X')\n", (14559, 14564), False, 'from hybridq.gate import Gate\n'), ((14611, 14620), 'hybridq.gate.Gate', 'Gate', (['"""Y"""'], {}), "('Y')\n", (14615, 14620), False, 'from hybridq.gate import Gate\n'), ((14667, 14676), 'hybridq.gate.Gate', 'Gate', (['"""Z"""'], {}), "('Z')\n", (14671, 14676), False, 'from hybridq.gate import Gate\n'), ((18362, 18378), 'psutil.virtual_memory', 'virtual_memory', ([], {}), '()\n', (18376, 18378), False, 'from psutil import virtual_memory, getloadavg\n'), ((33327, 33334), 'hybridq.gate.Gate', 'Gate', (['n'], {}), '(n)\n', (33331, 33334), False, 'from hybridq.gate import Gate\n'), ((41930, 41948), 'numpy.log2', 'np.log2', (['_mpi_size'], {}), '(_mpi_size)\n', (41937, 41948), True, 'import numpy as np\n'), ((48375, 48389), 'hybridq.gate.Gate', 'Gate', (['"""H"""', '[q]'], {}), "('H', [q])\n", (48379, 48389), False, 'from hybridq.gate import Gate\n'), ((15304, 15311), 'hybridq.gate.Gate', 'Gate', (['g'], {}), '(g)\n', (15308, 15311), False, 'from hybridq.gate import Gate\n'), ((33082, 33130), 'collections.Counter', 'Counter', (['(gate.qubits[0] for gate in pauli_string)'], {}), '(gate.qubits[0] for gate in pauli_string)\n', (33089, 33130), False, 'from collections import Counter\n'), ((37661, 37697), 'numpy.reshape', 'np.reshape', (['_p', '(4 ** _nq, 4 ** _nq)'], {}), '(_p, (4 ** _nq, 4 ** _nq))\n', (37671, 37697), True, 'import numpy as np\n'), ((42294, 42310), 'psutil.virtual_memory', 'virtual_memory', ([], {}), '()\n', (42308, 42310), False, 'from psutil import virtual_memory, getloadavg\n'), ((45463, 45479), 'psutil.virtual_memory', 'virtual_memory', ([], {}), '()\n', (45477, 45479), False, 'from psutil import virtual_memory, getloadavg\n'), ((19280, 19296), 'psutil.virtual_memory', 'virtual_memory', ([], {}), '()\n', (19294, 19296), False, 'from psutil import virtual_memory, getloadavg\n'), ((19626, 19632), 'time.time', 'time', ([], {}), '()\n', (19630, 19632), False, 'from time import sleep, time\n'), ((48444, 48458), 'hybridq.gate.Gate', 'Gate', (['"""X"""', '[q]'], {}), "('X', [q])\n", (48448, 48458), False, 'from hybridq.gate import Gate\n'), ((48460, 48474), 'hybridq.gate.Gate', 'Gate', (['"""H"""', '[q]'], {}), "('H', [q])\n", (48464, 48474), False, 'from hybridq.gate import Gate\n'), ((6880, 6916), 'numpy.reshape', 'np.reshape', (['params[pos][:16]', '(4, 4)'], {}), '(params[pos][:16], (4, 4))\n', (6890, 6916), True, 'import numpy as np\n'), ((7178, 7189), 'numpy.abs', 'np.abs', (['_ws'], {}), '(_ws)\n', (7184, 7189), True, 'import numpy as np\n'), ((7908, 7949), 'numpy.reshape', 'np.reshape', (['params[pos][:256]', '(4, 4, 16)'], {}), '(params[pos][:256], (4, 4, 16))\n', (7918, 7949), True, 'import numpy as np\n'), ((43582, 43598), 'psutil.virtual_memory', 'virtual_memory', ([], {}), '()\n', (43596, 43598), False, 'from psutil import virtual_memory, getloadavg\n'), ((7016, 7027), 'numpy.abs', 'np.abs', (['_ws'], {}), '(_ws)\n', (7022, 7027), True, 'import numpy as np\n'), ((8215, 8226), 'numpy.abs', 'np.abs', (['_ws'], {}), '(_ws)\n', (8221, 8226), True, 'import numpy as np\n'), ((9196, 9241), 'numpy.reshape', 'np.reshape', (['params[pos][:4096]', '(4, 4, 4, 64)'], {}), '(params[pos][:4096], (4, 4, 4, 64))\n', (9206, 9241), True, 'import numpy as np\n'), ((7670, 7691), 'numpy.copy', 'np.copy', (['pauli_string'], {}), '(pauli_string)\n', (7677, 7691), True, 'import numpy as np\n'), ((8053, 8064), 'numpy.abs', 'np.abs', (['_ws'], {}), '(_ws)\n', (8059, 8064), True, 'import numpy as np\n'), ((9511, 9522), 'numpy.abs', 'np.abs', (['_ws'], {}), '(_ws)\n', (9517, 9522), True, 'import numpy as np\n'), ((10644, 10694), 'numpy.reshape', 'np.reshape', (['params[pos][:65536]', '(4, 4, 4, 4, 256)'], {}), '(params[pos][:65536], (4, 4, 4, 4, 256))\n', (10654, 10694), True, 'import numpy as np\n'), ((8851, 8872), 'numpy.copy', 'np.copy', (['pauli_string'], {}), '(pauli_string)\n', (8858, 8872), True, 'import numpy as np\n'), ((9349, 9360), 'numpy.abs', 'np.abs', (['_ws'], {}), '(_ws)\n', (9355, 9360), True, 'import numpy as np\n'), ((11037, 11048), 'numpy.abs', 'np.abs', (['_ws'], {}), '(_ws)\n', (11043, 11048), True, 'import numpy as np\n'), ((12322, 12378), 'numpy.reshape', 'np.reshape', (['params[pos][:1048576]', '(4, 4, 4, 4, 4, 1024)'], {}), '(params[pos][:1048576], (4, 4, 4, 4, 4, 1024))\n', (12332, 12378), True, 'import numpy as np\n'), ((21142, 21154), 'psutil.getloadavg', 'getloadavg', ([], {}), '()\n', (21152, 21154), False, 'from psutil import virtual_memory, getloadavg\n'), ((10231, 10252), 'numpy.copy', 'np.copy', (['pauli_string'], {}), '(pauli_string)\n', (10238, 10252), True, 'import numpy as np\n'), ((10875, 10886), 'numpy.abs', 'np.abs', (['_ws'], {}), '(_ws)\n', (10881, 10886), True, 'import numpy as np\n'), ((12685, 12696), 'numpy.abs', 'np.abs', (['_ws'], {}), '(_ws)\n', (12691, 12696), True, 'import numpy as np\n'), ((11841, 11862), 'numpy.copy', 'np.copy', (['pauli_string'], {}), '(pauli_string)\n', (11848, 11862), True, 'import numpy as np\n'), ((12523, 12534), 'numpy.abs', 'np.abs', (['_ws'], {}), '(_ws)\n', (12529, 12534), True, 'import numpy as np\n'), ((13574, 13595), 'numpy.copy', 'np.copy', (['pauli_string'], {}), '(pauli_string)\n', (13581, 13595), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#------------------------------------------------------
# @ File : mixup.py
# @ Description:
# @ Author : <NAME>
# @ Contact : <EMAIL>
# @ License : Copyright (c) 2017-2018
# @ Time : 2020/11/10 下午3:21
# @ Software : PyCharm
#-------------------------------------------------------
import numpy as np
import torch
__all__ = ['mix_up']
def mix_up(images, labels, alpha=0.2):
"""
mix up inputs pairs of target and lambda
:param images:
:param labels:
:return:
"""
if alpha > 0:
np.random.beta(alpha, alpha)
beta_distributed = torch.distributions.beta.Beta(alpha, alpha)
lambda_ = beta_distributed.sample([]).item()
else:
lambda_ = 1
batch_size = images.size(0)
index = torch.randperm(batch_size, device=images.device)
lambda_ = max(lambda_, 1 - lambda_)
mixed_images = lambda_ * images + (1 - lambda_) * images[index, :]
labels_a = labels.long()
labels_b = labels[index].long()
return mixed_images, labels_a, labels_b, lambda_
if __name__ == "__main__":
alpha = 0.2
beta_distributed = torch.distributions.beta.Beta(alpha, alpha)
images = torch.randn((10, 3, 224, 224), dtype=torch.float32)
labels = torch.randint(0, 6, size=(10, ))
mixed_images, labels_a, labels_b, lambda_ = mix_up(images, labels, alpha)
print('Done')
| [
"torch.distributions.beta.Beta",
"torch.randint",
"numpy.random.beta",
"torch.randn",
"torch.randperm"
] | [((816, 864), 'torch.randperm', 'torch.randperm', (['batch_size'], {'device': 'images.device'}), '(batch_size, device=images.device)\n', (830, 864), False, 'import torch\n'), ((1168, 1211), 'torch.distributions.beta.Beta', 'torch.distributions.beta.Beta', (['alpha', 'alpha'], {}), '(alpha, alpha)\n', (1197, 1211), False, 'import torch\n'), ((1226, 1277), 'torch.randn', 'torch.randn', (['(10, 3, 224, 224)'], {'dtype': 'torch.float32'}), '((10, 3, 224, 224), dtype=torch.float32)\n', (1237, 1277), False, 'import torch\n'), ((1291, 1322), 'torch.randint', 'torch.randint', (['(0)', '(6)'], {'size': '(10,)'}), '(0, 6, size=(10,))\n', (1304, 1322), False, 'import torch\n'), ((588, 616), 'numpy.random.beta', 'np.random.beta', (['alpha', 'alpha'], {}), '(alpha, alpha)\n', (602, 616), True, 'import numpy as np\n'), ((644, 687), 'torch.distributions.beta.Beta', 'torch.distributions.beta.Beta', (['alpha', 'alpha'], {}), '(alpha, alpha)\n', (673, 687), False, 'import torch\n')] |
from load import ROOT as R
import gna.constructors as C
import numpy as N
from collections import OrderedDict
from gna.bundle import *
from scipy.interpolate import interp1d
class reactor_anu_uncorr_v01(TransformationBundleLegacy):
debug = False
def __init__(self, *args, **kwargs):
super(reactor_anu_uncorr_v01, self).__init__( *args, **kwargs )
self.edges = self.shared.reactor_anu_edges.data()
self.bundles=OrderedDict( self=self )
self.load_data()
def build(self):
uncpars = OrderedDict()
for name, vars in self.uncorr_vars.items():
with self.common_namespace:
uncpar_t = C.VarArray(vars, ns=self.common_namespace)
uncpar_t.vararray.setLabel('Uncorr correction:\n'+name)
uncpars[name]=uncpar_t
self.objects[('uncorrelated_correction', name)] = uncpar_t
self.transformations_out[name] = uncpar_t.transformations[0]
self.outputs[name] = uncpar_t.single()
def define_variables(self):
self.uncorr_vars=OrderedDict()
for ns in self.namespaces:
isotope = ns.name
uncfcn = interp1d( *self.uncertainties_uncorr[isotope] )
for i in range(self.edges.size):
name = self.cfg.uncnames.format( isotope=isotope, index=i )
self.uncorr_vars.setdefault(isotope, []).append(name)
en=self.edges[i]
var = self.common_namespace.reqparameter( name, central=1.0, sigma=uncfcn(en) )
var.setLabel('Uncorrelated {} anu spectrum correction for {} MeV'.format(isotope, en))
def load_data(self):
self.uncertainties_uncorr = OrderedDict()
dtype = [ ('enu', 'd'), ('yield', 'd') ]
if self.debug:
print('Load files:')
for ns in self.namespaces:
unc_uncorr = self.load_file(self.cfg.uncertainties, dtype, isotope=ns.name, mode='uncorr')
self.uncertainties_uncorr[ns.name] = unc_uncorr
def load_file(self, filenames, dtype, **kwargs):
for format in filenames:
fname = format.format(**kwargs)
try:
data = N.loadtxt(fname, dtype, unpack=True)
except:
pass
else:
if self.debug:
print( kwargs, fname )
print( data )
return data
raise Exception('Failed to load file for '+str(kwargs))
| [
"collections.OrderedDict",
"scipy.interpolate.interp1d",
"gna.constructors.VarArray",
"numpy.loadtxt"
] | [((445, 467), 'collections.OrderedDict', 'OrderedDict', ([], {'self': 'self'}), '(self=self)\n', (456, 467), False, 'from collections import OrderedDict\n'), ((536, 549), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (547, 549), False, 'from collections import OrderedDict\n'), ((1081, 1094), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1092, 1094), False, 'from collections import OrderedDict\n'), ((1715, 1728), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1726, 1728), False, 'from collections import OrderedDict\n'), ((1181, 1226), 'scipy.interpolate.interp1d', 'interp1d', (['*self.uncertainties_uncorr[isotope]'], {}), '(*self.uncertainties_uncorr[isotope])\n', (1189, 1226), False, 'from scipy.interpolate import interp1d\n'), ((669, 711), 'gna.constructors.VarArray', 'C.VarArray', (['vars'], {'ns': 'self.common_namespace'}), '(vars, ns=self.common_namespace)\n', (679, 711), True, 'import gna.constructors as C\n'), ((2204, 2240), 'numpy.loadtxt', 'N.loadtxt', (['fname', 'dtype'], {'unpack': '(True)'}), '(fname, dtype, unpack=True)\n', (2213, 2240), True, 'import numpy as N\n')] |
import numpy as np
import pandas as pd
from os import system, listdir
from time import time
from sys import exit
from wikipedia import summary
from re import sub
from keras.models import load_model
from keras.preprocessing.image import load_img, img_to_array
from keras.applications.imagenet_utils import preprocess_input
def get_movements():
file = open('./../data/movements.txt')
movements_ls = [val.strip() for val in file]
return movements_ls
def get_images():
base_path = './../images'
images = listdir(base_path)
print('\n '.join(images))
return(images)
def get_data():
base_path = './../data/'
raw_data = pd.read_csv(base_path + 'all_data_info.csv', dtype=object)
data = pd.DataFrame(raw_data)
relevant_col = ['artist', 'style', 'new_filename']
new_data = data[relevant_col]
return new_data
def get_art_movement(image_path):
model = load_model('./../data/resnet_model_movements_tmp.h5')
try:
img = load_img('./../images/' + image_path, target_size=(224, 224))
img = img_to_array(img)
img = np.expand_dims(img, axis=0)
img = preprocess_input(img)
except:
print('\nOh! An unexpected error occured in using input file. Please try different file.')
return 0
preds = model.predict(img)
styles = {}
file = open('./../data/dict_movements.txt')
styles = eval(file.read())
index = np.argmax(preds)
for key, val in styles.items():
if val == index:
print(key, 'with precentage prediction of', np.max(preds))
return key
def get_artist(image_path, art_movement, movements_ls, data):
# new_data = data.loc[data['style'] == art_movement]
# possible_artists = new_data['artist'].tolist()
# possible_artists = list(set(possible_artists))
model = load_model('./../data/resnet_model_artists_tmp.h5')
try:
img = load_img('./../images/' + image_path, target_size=(224, 224))
img = img_to_array(img)
img = np.expand_dims(img, axis=0)
img = preprocess_input(img)
except:
print('\nOh! An unexpected error occured in using input file. Please try different file.')
return 0
preds = model.predict(img)
preds = preds.tolist()
tmp_ls = []
artists = {}
file = open('./../data/dict_artists.txt')
artists = eval(file.read())
for i, val in enumerate(preds[0]):
tmp_ls.append((i, val))
tmp_ls.sort(key=lambda tup: tup[1], reverse=True)
for i, _ in tmp_ls:
for j, val_two in artists.items():
# if i == val_two and j in possible_artists:
if i == val_two:
print('Most probable artist is', j)
# result = sub('[\(\[].*?[\)\]]', '', summary(j)).strip()
result = summary(j)
print(result)
return 1
return 0
def go_on():
start_over = input('\nEnter yes if you want to test more images: ')
if start_over.lower() == 'yes':
main()
else:
exit()
def main():
system('clear')
print('Hi, welcome to pyArt!')
print('\nPlease make sure that the image you want to analyze is in the list given below')
images = get_images()
movements_ls = get_movements()
test_img = input('\nEnter the image name you want to analyze (including the format as shown in the list): ')
if not (test_img in images):
print('\nSorry, the image is not present in the database')
go_on()
else:
image_path = test_img
returned_movement = get_art_movement(image_path)
if returned_movement == 0:
go_on()
data = get_data()
returned_artist = get_artist(image_path, returned_movement, movements_ls, data)
if returned_artist == 0:
go_on()
go_on()
if __name__ == '__main__':
main() | [
"keras.models.load_model",
"pandas.DataFrame",
"numpy.argmax",
"pandas.read_csv",
"os.system",
"numpy.expand_dims",
"keras.preprocessing.image.img_to_array",
"keras.preprocessing.image.load_img",
"numpy.max",
"keras.applications.imagenet_utils.preprocess_input",
"wikipedia.summary",
"os.listdi... | [((508, 526), 'os.listdir', 'listdir', (['base_path'], {}), '(base_path)\n', (515, 526), False, 'from os import system, listdir\n'), ((626, 684), 'pandas.read_csv', 'pd.read_csv', (["(base_path + 'all_data_info.csv')"], {'dtype': 'object'}), "(base_path + 'all_data_info.csv', dtype=object)\n", (637, 684), True, 'import pandas as pd\n'), ((693, 715), 'pandas.DataFrame', 'pd.DataFrame', (['raw_data'], {}), '(raw_data)\n', (705, 715), True, 'import pandas as pd\n'), ((861, 914), 'keras.models.load_model', 'load_model', (['"""./../data/resnet_model_movements_tmp.h5"""'], {}), "('./../data/resnet_model_movements_tmp.h5')\n", (871, 914), False, 'from keras.models import load_model\n'), ((1323, 1339), 'numpy.argmax', 'np.argmax', (['preds'], {}), '(preds)\n', (1332, 1339), True, 'import numpy as np\n'), ((1695, 1746), 'keras.models.load_model', 'load_model', (['"""./../data/resnet_model_artists_tmp.h5"""'], {}), "('./../data/resnet_model_artists_tmp.h5')\n", (1705, 1746), False, 'from keras.models import load_model\n'), ((2748, 2763), 'os.system', 'system', (['"""clear"""'], {}), "('clear')\n", (2754, 2763), False, 'from os import system, listdir\n'), ((930, 991), 'keras.preprocessing.image.load_img', 'load_img', (["('./../images/' + image_path)"], {'target_size': '(224, 224)'}), "('./../images/' + image_path, target_size=(224, 224))\n", (938, 991), False, 'from keras.preprocessing.image import load_img, img_to_array\n'), ((1000, 1017), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['img'], {}), '(img)\n', (1012, 1017), False, 'from keras.preprocessing.image import load_img, img_to_array\n'), ((1026, 1053), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (1040, 1053), True, 'import numpy as np\n'), ((1062, 1083), 'keras.applications.imagenet_utils.preprocess_input', 'preprocess_input', (['img'], {}), '(img)\n', (1078, 1083), False, 'from keras.applications.imagenet_utils import preprocess_input\n'), ((1762, 1823), 'keras.preprocessing.image.load_img', 'load_img', (["('./../images/' + image_path)"], {'target_size': '(224, 224)'}), "('./../images/' + image_path, target_size=(224, 224))\n", (1770, 1823), False, 'from keras.preprocessing.image import load_img, img_to_array\n'), ((1832, 1849), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['img'], {}), '(img)\n', (1844, 1849), False, 'from keras.preprocessing.image import load_img, img_to_array\n'), ((1858, 1885), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (1872, 1885), True, 'import numpy as np\n'), ((1894, 1915), 'keras.applications.imagenet_utils.preprocess_input', 'preprocess_input', (['img'], {}), '(img)\n', (1910, 1915), False, 'from keras.applications.imagenet_utils import preprocess_input\n'), ((2727, 2733), 'sys.exit', 'exit', ([], {}), '()\n', (2731, 2733), False, 'from sys import exit\n'), ((1440, 1453), 'numpy.max', 'np.max', (['preds'], {}), '(preds)\n', (1446, 1453), True, 'import numpy as np\n'), ((2539, 2549), 'wikipedia.summary', 'summary', (['j'], {}), '(j)\n', (2546, 2549), False, 'from wikipedia import summary\n')] |
import cv2
from utils import helpers
import numpy as np
class Processing(object):
# 输入一个config,image返回一个mask
@staticmethod
def process(image,config):
def nms(input_image):
'''
非极大值抑制用于找寻mask连通区域最大的一块
输入:
原始图像
输出:
连通区域最大的一块轮廓
'''
if len(input_image.shape) == 3:
input_image = cv2.cvtColor(input_image, cv2.COLOR_BGR2GRAY)
# 二值化处理
res, input_image = cv2.threshold(input_image, 127, 255, 0)
kernel = np.ones((5, 5), np.uint8)
# 形态学处理腐蚀
input_image = cv2.morphologyEx(input_image, cv2.MORPH_OPEN, kernel)
input_image = cv2.morphologyEx(input_image, cv2.MORPH_CLOSE, kernel)
# 找轮廓
contours, hierachy = cv2.findContours(input_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
# 遍历返回最大连通区域
max_area = 0
temp = 0
for cnt in contours:
area = cv2.contourArea(cnt)
if area > max_area:
max_area = area
temp = cnt
return temp
# 对于最后维度的双通道,返回其最大值所在下标,这里就完成了分割,对于每个位置上的点,0表示轨道,1表示背景
output_image = helpers.reverse_one_hot(image)
# 上色
out_vis_image = helpers.colour_code_segmentation(output_image, eval(config["image"]["label_values"]))
#
out_vis_image = cv2.cvtColor(np.uint8(out_vis_image), cv2.COLOR_RGB2BGR)
mask = np.zeros(out_vis_image.shape).astype(np.uint8) # 得到一张全黑的背景
# 非极大值抑制得到连通区域最大的轮廓
postion = nms(out_vis_image)
# 填充颜色
_ = cv2.fillPoly(mask, [postion], eval(config["image"]["color"]), cv2.LINE_AA)
return mask
| [
"cv2.contourArea",
"numpy.uint8",
"utils.helpers.reverse_one_hot",
"cv2.cvtColor",
"cv2.morphologyEx",
"cv2.threshold",
"numpy.zeros",
"numpy.ones",
"cv2.findContours"
] | [((1272, 1302), 'utils.helpers.reverse_one_hot', 'helpers.reverse_one_hot', (['image'], {}), '(image)\n', (1295, 1302), False, 'from utils import helpers\n'), ((515, 554), 'cv2.threshold', 'cv2.threshold', (['input_image', '(127)', '(255)', '(0)'], {}), '(input_image, 127, 255, 0)\n', (528, 554), False, 'import cv2\n'), ((576, 601), 'numpy.ones', 'np.ones', (['(5, 5)', 'np.uint8'], {}), '((5, 5), np.uint8)\n', (583, 601), True, 'import numpy as np\n'), ((650, 703), 'cv2.morphologyEx', 'cv2.morphologyEx', (['input_image', 'cv2.MORPH_OPEN', 'kernel'], {}), '(input_image, cv2.MORPH_OPEN, kernel)\n', (666, 703), False, 'import cv2\n'), ((730, 784), 'cv2.morphologyEx', 'cv2.morphologyEx', (['input_image', 'cv2.MORPH_CLOSE', 'kernel'], {}), '(input_image, cv2.MORPH_CLOSE, kernel)\n', (746, 784), False, 'import cv2\n'), ((837, 904), 'cv2.findContours', 'cv2.findContours', (['input_image', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_NONE'], {}), '(input_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\n', (853, 904), False, 'import cv2\n'), ((1475, 1498), 'numpy.uint8', 'np.uint8', (['out_vis_image'], {}), '(out_vis_image)\n', (1483, 1498), True, 'import numpy as np\n'), ((418, 463), 'cv2.cvtColor', 'cv2.cvtColor', (['input_image', 'cv2.COLOR_BGR2GRAY'], {}), '(input_image, cv2.COLOR_BGR2GRAY)\n', (430, 463), False, 'import cv2\n'), ((1033, 1053), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (1048, 1053), False, 'import cv2\n'), ((1535, 1564), 'numpy.zeros', 'np.zeros', (['out_vis_image.shape'], {}), '(out_vis_image.shape)\n', (1543, 1564), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf8
#
# Copyright (c) 2020 Centre National d'Etudes Spatiales (CNES).
#
# This file is part of PANDORA
#
# https://github.com/CNES/Pandora_pandora
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains functions associated to the disparity map computation step.
"""
import xarray as xr
import numpy as np
from scipy.ndimage.morphology import binary_dilation
from pandora.img_tools import compute_std_raster
from pandora.constants import *
def to_disp(cv: xr.Dataset, invalid_value: float = 0, img_ref: xr.Dataset = None, img_sec: xr.Dataset = None) -> xr.Dataset:
"""
Disparity computation by applying the Winner Takes All strategy
:param cv: the cost volume datset
:type cv:
xarray.Dataset, with the data variables:
- cost_volume 3D xarray.DataArray (row, col, disp)
- confidence_measure 3D xarray.DataArray (row, col, indicator)
:param invalid_value: disparity to assign to invalid pixels
:type invalid_value: float
:param img_ref: reference Dataset image
:type img_ref:
xarray.Dataset containing :
- im : 2D (row, col) xarray.DataArray
- msk : 2D (row, col) xarray.DataArray
:param img_sec: secondary Dataset image
:type img_sec:
xarray.Dataset containing :
- im : 2D (row, col) xarray.DataArray
- msk : 2D (row, col) xarray.DataArray
:return: Dataset with the disparity map and the confidence measure
:rtype:
xarray.Dataset with the data variables :
- disparity_map 2D xarray.DataArray (row, col)
- confidence_measure 3D xarray.DataArray (row, col, indicator)
"""
indices_nan = np.isnan(cv['cost_volume'].data)
# Winner Takes All strategy
if cv.attrs['type_measure'] == 'max':
# Disparities are computed by selecting the maximal cost at each pixel
cv['cost_volume'].data[indices_nan] = -np.inf
disp = argmax_split(cv)
else:
# Disparities are computed by selecting the minimal cost at each pixel
cv['cost_volume'].data[indices_nan] = np.inf
disp = argmin_split(cv)
cv['cost_volume'].data[indices_nan] = np.nan
row = cv.coords['row']
col = cv.coords['col']
# ----- Disparity map -----
disp_map = xr.Dataset({'disparity_map': (['row', 'col'], disp)}, coords={'row': row, 'col': col})
invalid_mc = np.min(indices_nan, axis=2)
# Pixels where the disparity interval is missing in the secondary image, have a disparity value invalid_value
invalid_pixel = np.where(invalid_mc == True)
disp_map['disparity_map'].data[invalid_pixel] = invalid_value
# Save the disparity map in the cost volume
cv['disp_indices'] = disp_map['disparity_map'].copy(deep=True)
disp_map.attrs = cv.attrs
d_range = cv.coords['disp'].data
disp_map.attrs['disp_min'] = d_range[0]
disp_map.attrs['disp_max'] = d_range[-1]
# ----- Confidence measure -----
# Allocate the confidence measure in the disparity_map dataset
disp_map['confidence_measure'] = cv['confidence_measure']
# Remove temporary values
del indices_nan
del invalid_mc
return disp_map
def validity_mask(disp: xr.Dataset, img_ref: xr.Dataset, img_sec: xr.Dataset, cv: xr.Dataset = None, **cfg: int) -> xr.Dataset:
"""
Create the validity mask of the disparity map
:param disp: dataset with the disparity map and the confidence measure
:type disp:
xarray.Dataset with the data variables :
- disparity_map 2D xarray.DataArray (row, col)
- confidence_measure 3D xarray.DataArray(row, col, indicator)
:param img_ref: reference Dataset image
:type img_ref:
xarray.Dataset containing :
- im : 2D (row, col) xarray.DataArray
- msk : 2D (row, col) xarray.DataArray
:param img_sec: secondary Dataset image
:type img_sec:
xarray.Dataset containing :
- im : 2D (row, col) xarray.DataArray
- msk : 2D (row, col) xarray.DataArray
:param cv: cost volume dataset
:type cv:
xarray.Dataset, with the data variables:
- cost_volume 3D xarray.DataArray (row, col, disp)
- confidence_measure 3D xarray.DataArray (row, col, indicator)
:param cfg: images configuration containing the mask convention : valid_pixels, no_data
:type cfg: dict
:return: the dataset disparity with the data variable validity_mask
:rtype :
xarray.Dataset with the data variables :
- disparity_map 2D xarray.DataArray (row, col)
- confidence_measure 3D xarray.DataArray (row, col, indicator)
- validity_mak 2D xarray.DataArray (row, col)
"""
# Allocate the validity mask
disp['validity_mask'] = xr.DataArray(np.zeros(disp['disparity_map'].shape, dtype=np.uint16), dims=['row', 'col'])
d_min = int(disp.attrs['disp_min'])
d_max = int(disp.attrs['disp_max'])
col = disp.coords['col'].data
row = disp.coords['row'].data
# Negative disparity range
if d_max < 0:
bit_1 = np.where((col + d_max) < col[0])
# Information: the disparity interval is incomplete (border reached in the secondary image)
disp['validity_mask'].data[:, np.where(((col + d_max) >= col[0]) & ((col + d_min) < col[0]))] +=\
PANDORA_MSK_PIXEL_SEC_INCOMPLETE_DISPARITY_RANGE
else:
# Positive disparity range
if d_min > 0:
bit_1 = np.where((col + d_min) > col[-1])
# Information: the disparity interval is incomplete (border reached in the secondary image)
disp['validity_mask'].data[:, np.where(((col + d_min) <= col[-1]) & ((col + d_max) > col[-1]))] +=\
PANDORA_MSK_PIXEL_SEC_INCOMPLETE_DISPARITY_RANGE
# Disparity range contains 0
else:
bit_1 = ([], )
# Information: the disparity interval is incomplete (border reached in the secondary image)
disp['validity_mask'].data[:, np.where(((col + d_min) < col[0]) | (col + d_max > col[-1]))] +=\
PANDORA_MSK_PIXEL_SEC_INCOMPLETE_DISPARITY_RANGE
# Invalid pixel : the disparity interval is missing in the secondary image ( disparity range
# outside the image )
disp['validity_mask'].data[:, bit_1] += PANDORA_MSK_PIXEL_SEC_NODATA_OR_DISPARITY_RANGE_MISSING
if 'msk' in img_ref.data_vars:
_, r_mask = xr.align(disp['validity_mask'], img_ref['msk'])
# Dilatation : pixels that contains no_data in their aggregation window become no_data
dil = binary_dilation(img_ref['msk'].data == cfg['no_data'],
structure=np.ones((disp.attrs['window_size'], disp.attrs['window_size'])), iterations=1)
offset = disp.attrs['offset_row_col']
if offset != 0:
dil = dil[offset:-offset, offset:-offset]
# Invalid pixel : no_data in the reference image
disp['validity_mask'] += dil.astype(np.uint16) * PANDORA_MSK_PIXEL_REF_NODATA_OR_BORDER
# Invalid pixel : invalidated by the validity mask of the reference image given as input
disp['validity_mask'] += xr.where((r_mask != cfg['no_data']) & (r_mask != cfg['valid_pixels']),
PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_REF, 0).astype(np.uint16)
if 'msk' in img_sec.data_vars:
_, r_mask = xr.align(disp['validity_mask'], img_sec['msk'])
# Dilatation : pixels that contains no_data in their aggregation window become no_data
dil = binary_dilation(img_sec['msk'].data == cfg['no_data'],
structure=np.ones((disp.attrs['window_size'], disp.attrs['window_size'])), iterations=1)
offset = disp.attrs['offset_row_col']
if offset != 0:
dil = dil[offset:-offset, offset:-offset]
r_mask = xr.where((r_mask != cfg['no_data']) & (r_mask != cfg['valid_pixels']), 1, 0).data
# Useful to calculate the case where the disparity interval is incomplete, and all remaining secondary
# positions are invalidated by the secondary mask
b_2_7 = np.zeros((len(row), len(col)), dtype=np.uint16)
# Useful to calculate the case where no_data in the secondary image invalidated the disparity interval
no_data_sec = np.zeros((len(row), len(col)), dtype=np.uint16)
col_range = np.arange(len(col))
for d in range(d_min, d_max+1):
# Diagonal in the cost volume
col_d = col_range + d
valid_index = np.where((col_d >= col_range[0]) & (col_d <= col_range[-1]))
# No_data and masked pixels do not raise the same flag, we need to treat them differently
b_2_7[:, col_range[valid_index]] += r_mask[:, col_d[valid_index]].astype(np.uint16)
b_2_7[:, col_range[np.setdiff1d(col_range, valid_index)]] += 1
no_data_sec[:, col_range[valid_index]] += dil[:, col_d[valid_index]]
no_data_sec[:, col_range[np.setdiff1d(col_range, valid_index)]] += 1
# Exclusion of pixels that have flag 1 already enabled
b_2_7[:, bit_1[0]] = 0
no_data_sec[:, bit_1[0]] = 0
# Invalid pixel: secondary positions invalidated by the mask of the secondary image given as input
disp['validity_mask'].data[np.where(b_2_7 == len(range(d_min, d_max+1)))] +=\
PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_SEC
# If Invalid pixel : the disparity interval is missing in the secondary image (disparity interval
# is invalidated by no_data in the secondary image )
disp['validity_mask'].data[np.where(no_data_sec == len(range(d_min, d_max + 1)))] +=\
PANDORA_MSK_PIXEL_SEC_NODATA_OR_DISPARITY_RANGE_MISSING
return disp
def argmin_split(cost_volume: xr.Dataset) -> np.ndarray:
"""
Find the indices of the minimum values for a 3D DataArray, along axis 2.
Memory consumption is reduced by splitting the 3D Array.
:param cost_volume: the cost volume dataset
:type cost_volume: xarray.Dataset
:return: the disparities for which the cost volume values are the smallest
:rtype: np.ndarray
"""
ny, nx, nd = cost_volume['cost_volume'].shape
disp = np.zeros((ny, nx), dtype=np.float32)
# Numpy argmin is making a copy of the cost volume.
# To reduce memory, numpy argmin is applied on a small part of the cost volume.
# The cost volume is split (along the row axis) into multiple sub-arrays with a step of 100
cv_chunked_y = np.array_split(cost_volume['cost_volume'].data, np.arange(100, ny, 100), axis=0)
y_begin = 0
for y in range(len(cv_chunked_y)):
# To reduce memory, the cost volume is split (along the col axis) into multiple sub-arrays with a step of 100
cv_chunked_x = np.array_split(cv_chunked_y[y], np.arange(100, nx, 100), axis=1)
x_begin = 0
for x in range(len(cv_chunked_x)):
disp[y_begin:y_begin + cv_chunked_y[y].shape[0], x_begin: x_begin + cv_chunked_x[x].shape[1]] = \
cost_volume.coords['disp'].data[np.argmin(cv_chunked_x[x], axis=2)]
x_begin += cv_chunked_x[x].shape[1]
y_begin += cv_chunked_y[y].shape[0]
return disp
def argmax_split(cost_volume: xr.Dataset) -> np.ndarray:
"""
Find the indices of the maximum values for a 3D DataArray, along axis 2.
Memory consumption is reduced by splitting the 3D Array.
:param cost_volume: the cost volume dataset
:type cost_volume: xarray.Dataset
:return: the disparities for which the cost volume values are the highest
:rtype: np.ndarray
"""
ny, nx, nd = cost_volume['cost_volume'].shape
disp = np.zeros((ny, nx), dtype=np.float32)
# Numpy argmax is making a copy of the cost volume.
# To reduce memory, numpy argmax is applied on a small part of the cost volume.
# The cost volume is split (along the row axis) into multiple sub-arrays with a step of 100
cv_chunked_y = np.array_split(cost_volume['cost_volume'].data, np.arange(100, ny, 100), axis=0)
y_begin = 0
for y in range(len(cv_chunked_y)):
# To reduce memory, the cost volume is split (along the col axis) into multiple sub-arrays with a step of 100
cv_chunked_x = np.array_split(cv_chunked_y[y], np.arange(100, nx, 100), axis=1)
x_begin = 0
for x in range(len(cv_chunked_x)):
disp[y_begin:y_begin + cv_chunked_y[y].shape[0], x_begin: x_begin + cv_chunked_x[x].shape[1]] = \
cost_volume.coords['disp'].data[np.argmax(cv_chunked_x[x], axis=2)]
x_begin += cv_chunked_x[x].shape[1]
y_begin += cv_chunked_y[y].shape[0]
return disp
def coefficient_map(cv: xr.DataArray) -> xr.DataArray:
"""
Return the coefficient map
:param cv: cost volume
:type cv: xarray.Dataset, with the data variables cost_volume 3D xarray.DataArray (row, col, disp)
:return: the coefficient map
:rtype : 2D DataArray (row, col)
"""
row = cv.coords['row']
col = cv.coords['col']
# Create the coefficient map
coeff_map = xr.DataArray(cv['cost_volume'].sel(disp=cv['disp_indices']).astype(np.float32),
coords=[('row', row), ('col', col)])
coeff_map.name = 'Coefficient Map'
coeff_map.attrs = cv.attrs
return coeff_map
def approximate_right_disparity(cv: xr.Dataset, img_sec: xr.Dataset, invalid_value: float = 0,
img_ref: xr.Dataset = None) -> xr.Dataset:
"""
Create the right disparity map, by a diagonal search for the minimum in the reference cost volume
ERNST, <NAME>, Heiko.
Mutual information based semi-global stereo matching on the GPU.
In : International Symposium on Visual Computing. Springer, Berlin, Heidelberg, 2008. p. 228-239.
:param cv: the cost volume dataset
:type cv:
xarray.Dataset, with the data variables:
- cost_volume 3D xarray.DataArray (row, col, disp)
- confidence_measure 3D xarray.DataArray (row, col, indicator)
:param img_sec: secondary Dataset image
:type img_sec:
xarray.Dataset containing :
- im : 2D (row, col) xarray.DataArray
- msk : 2D (row, col) xarray.DataArray
:param invalid_value: disparity to assign to invalid pixels
:type invalid_value: float
:param img_ref: reference Dataset image
:type img_ref:
xarray.Dataset containing :
- im : 2D (row, col) xarray.DataArray
- msk : 2D (row, col) xarray.DataArray
:return: Dataset with the secondary disparity map, the confidence measure and the validity mask
:rtype:
xarray.Dataset with the data variables :
- disparity_map 2D xarray.DataArray (row, col)
- confidence_measure 3D xarray.DataArray (row, col, indicator)
- validity_mask 2D xarray.DataArray (row, col)
"""
disp_range = cv.coords['disp'].data.astype(float)
col_range = cv.coords['col'].data
row_range = cv.coords['row'].data
# Extract integer disparity
disp_range = np.extract(np.mod(disp_range, 1) == 0, disp_range)
# Allocate the disparity map
data = np.zeros((len(row_range), len(col_range))).astype(np.float32)
disp_map = xr.Dataset({'disparity_map': (['row', 'col'], data)},
coords={'row': cv.coords['row'], 'col': cv.coords['col']})
# Allocate the confidence measure
confidence_measure = compute_std_raster(img_sec, cv.attrs['window_size']).reshape(len(row_range), len(col_range), 1)
disp_map = disp_map.assign_coords(indicator=['disparity_pandora_intensityStd'])
disp_map['confidence_measure'] = xr.DataArray(data=confidence_measure.astype(np.float32),
dims=['row', 'col', 'indicator'])
# Allocate the validity mask
disp_map['validity_mask'] = xr.DataArray(np.zeros(disp_map['disparity_map'].shape, dtype=np.uint16),
dims=['row', 'col'])
disp_map.attrs = cv.attrs
d_range = cv.coords['disp'].data
disp_map.attrs['disp_min'] = d_range[0]
disp_map.attrs['disp_max'] = d_range[-1]
disp_map.attrs['right_left_mode'] = "approximate"
for c in col_range:
x_d = c - disp_range
valid = np.where((x_d >= col_range[0]) & (x_d <= col_range[-1]))
# The disparity interval is missing in the reference image
if x_d[valid].size == 0:
disp_map['disparity_map'].loc[dict(col=c)] = invalid_value
# Invalid pixel : the disparity interval is missing in the secondary image
disp_map['validity_mask'].loc[dict(col=c)] += PANDORA_MSK_PIXEL_SEC_NODATA_OR_DISPARITY_RANGE_MISSING
else:
# Diagonal search for the minimum or maximum
if cv.attrs['measure'] == 'zncc':
min_ = cv['cost_volume'].sel(col=xr.DataArray(np.flip(x_d[valid]), dims='disp_'),
disp=xr.DataArray(np.flip(disp_range[valid]),
dims='disp_')).argmax(dim='disp_')
else:
min_ = cv['cost_volume'].sel(col=xr.DataArray(np.flip(x_d[valid]), dims='disp_'),
disp=xr.DataArray(np.flip(disp_range[valid]),
dims='disp_')).argmin(dim='disp_')
# Disparity interval is incomplete
if x_d[valid].size != disp_range.size:
# Information: the disparity interval is incomplete (border reached in the secondary image)
disp_map['validity_mask'].loc[dict(col=c)] += PANDORA_MSK_PIXEL_SEC_INCOMPLETE_DISPARITY_RANGE
disp_map['disparity_map'].loc[dict(col=c)] = -1 * np.flip(disp_range[valid])[min_.data]
return disp_map
def resize(dataset: xr.Dataset, invalid_value: float = 0) -> xr.Dataset:
"""
Pixels whose aggregation window exceeds the reference image are truncated in the output products.
This function returns the output products with the size of the input images : add rows and columns that have been
truncated. These added pixels will have bit 0 = 1 ( Invalid pixel : border of the reference image )
in the validity_mask and will have the disparity = invalid_value in the disparity map.
:param dataset: Dataset which contains the output products
:type dataset: xarray.Dataset with the variables :
- disparity_map 2D xarray.DataArray (row, col)
- confidence_measure 3D xarray.DataArray(row, col, indicator)
- validity_mask 2D xarray.DataArray (row, col)
:param invalid_value: disparity to assign to invalid pixels ( pixels whose aggregation window exceeds the image)
:type invalid_value: float
:return: the dataset with the size of the input images
:rtype : xarray.Dataset with the variables :
- disparity_map 2D xarray.DataArray (row, col)
- confidence_measure 3D xarray.DataArray(row, col, indicator)
- validity_mask 2D xarray.DataArray (row, col)
"""
offset = dataset.attrs['offset_row_col']
if offset == 0:
return dataset
c_row = dataset.coords['row']
c_col = dataset.coords['col']
row = np.arange(c_row[0] - offset, c_row[-1] + 1 + offset)
col = np.arange(c_col[0] - offset, c_col[-1] + 1 + offset)
resize_disparity = xr.Dataset()
for array in dataset:
if array == 'disparity_map':
data = xr.DataArray(np.full((len(row), len(col)), invalid_value, dtype=np.float32), coords=[row, col],
dims=['row', 'col'])
resize_disparity[array] = dataset[array].combine_first(data)
if array == 'confidence_measure':
depth = len(dataset.coords['indicator'])
data = xr.DataArray(data=np.full((len(row), len(col), depth), np.nan, dtype=np.float32),
coords={'row': row, 'col': col}, dims=['row', 'col', 'indicator'])
resize_disparity[array] = dataset[array].combine_first(data)
if array == 'validity_mask':
data = xr.DataArray(np.zeros((len(row), len(col)), dtype=np.uint16), coords=[row, col], dims=['row', 'col'])
# Invalid pixel : border of the reference image
data += PANDORA_MSK_PIXEL_REF_NODATA_OR_BORDER
resize_disparity[array] = dataset[array].combine_first(data).astype(np.uint16)
if array == 'interpolated_coeff':
data = xr.DataArray(np.full((len(row), len(col)), np.nan, dtype=np.float32), coords=[row, col],
dims=['row', 'col'])
resize_disparity[array] = dataset[array].combine_first(data)
resize_disparity.attrs = dataset.attrs
resize_disparity.attrs['offset_row_col'] = 0
return resize_disparity
| [
"numpy.setdiff1d",
"numpy.flip",
"xarray.align",
"numpy.argmax",
"numpy.zeros",
"pandora.img_tools.compute_std_raster",
"numpy.isnan",
"xarray.Dataset",
"numpy.mod",
"numpy.ones",
"numpy.min",
"numpy.where",
"numpy.arange",
"numpy.argmin",
"xarray.where"
] | [((2230, 2262), 'numpy.isnan', 'np.isnan', (["cv['cost_volume'].data"], {}), "(cv['cost_volume'].data)\n", (2238, 2262), True, 'import numpy as np\n'), ((2829, 2919), 'xarray.Dataset', 'xr.Dataset', (["{'disparity_map': (['row', 'col'], disp)}"], {'coords': "{'row': row, 'col': col}"}), "({'disparity_map': (['row', 'col'], disp)}, coords={'row': row,\n 'col': col})\n", (2839, 2919), True, 'import xarray as xr\n'), ((2934, 2961), 'numpy.min', 'np.min', (['indices_nan'], {'axis': '(2)'}), '(indices_nan, axis=2)\n', (2940, 2961), True, 'import numpy as np\n'), ((3096, 3124), 'numpy.where', 'np.where', (['(invalid_mc == True)'], {}), '(invalid_mc == True)\n', (3104, 3124), True, 'import numpy as np\n'), ((10780, 10816), 'numpy.zeros', 'np.zeros', (['(ny, nx)'], {'dtype': 'np.float32'}), '((ny, nx), dtype=np.float32)\n', (10788, 10816), True, 'import numpy as np\n'), ((12247, 12283), 'numpy.zeros', 'np.zeros', (['(ny, nx)'], {'dtype': 'np.float32'}), '((ny, nx), dtype=np.float32)\n', (12255, 12283), True, 'import numpy as np\n'), ((15829, 15946), 'xarray.Dataset', 'xr.Dataset', (["{'disparity_map': (['row', 'col'], data)}"], {'coords': "{'row': cv.coords['row'], 'col': cv.coords['col']}"}), "({'disparity_map': (['row', 'col'], data)}, coords={'row': cv.\n coords['row'], 'col': cv.coords['col']})\n", (15839, 15946), True, 'import xarray as xr\n'), ((19868, 19920), 'numpy.arange', 'np.arange', (['(c_row[0] - offset)', '(c_row[-1] + 1 + offset)'], {}), '(c_row[0] - offset, c_row[-1] + 1 + offset)\n', (19877, 19920), True, 'import numpy as np\n'), ((19931, 19983), 'numpy.arange', 'np.arange', (['(c_col[0] - offset)', '(c_col[-1] + 1 + offset)'], {}), '(c_col[0] - offset, c_col[-1] + 1 + offset)\n', (19940, 19983), True, 'import numpy as np\n'), ((20008, 20020), 'xarray.Dataset', 'xr.Dataset', ([], {}), '()\n', (20018, 20020), True, 'import xarray as xr\n'), ((5340, 5394), 'numpy.zeros', 'np.zeros', (["disp['disparity_map'].shape"], {'dtype': 'np.uint16'}), "(disp['disparity_map'].shape, dtype=np.uint16)\n", (5348, 5394), True, 'import numpy as np\n'), ((5632, 5662), 'numpy.where', 'np.where', (['(col + d_max < col[0])'], {}), '(col + d_max < col[0])\n', (5640, 5662), True, 'import numpy as np\n'), ((6970, 7017), 'xarray.align', 'xr.align', (["disp['validity_mask']", "img_ref['msk']"], {}), "(disp['validity_mask'], img_ref['msk'])\n", (6978, 7017), True, 'import xarray as xr\n'), ((7941, 7988), 'xarray.align', 'xr.align', (["disp['validity_mask']", "img_sec['msk']"], {}), "(disp['validity_mask'], img_sec['msk'])\n", (7949, 7988), True, 'import xarray as xr\n'), ((11121, 11144), 'numpy.arange', 'np.arange', (['(100)', 'ny', '(100)'], {}), '(100, ny, 100)\n', (11130, 11144), True, 'import numpy as np\n'), ((12588, 12611), 'numpy.arange', 'np.arange', (['(100)', 'ny', '(100)'], {}), '(100, ny, 100)\n', (12597, 12611), True, 'import numpy as np\n'), ((16469, 16527), 'numpy.zeros', 'np.zeros', (["disp_map['disparity_map'].shape"], {'dtype': 'np.uint16'}), "(disp_map['disparity_map'].shape, dtype=np.uint16)\n", (16477, 16527), True, 'import numpy as np\n'), ((16876, 16932), 'numpy.where', 'np.where', (['((x_d >= col_range[0]) & (x_d <= col_range[-1]))'], {}), '((x_d >= col_range[0]) & (x_d <= col_range[-1]))\n', (16884, 16932), True, 'import numpy as np\n'), ((6019, 6050), 'numpy.where', 'np.where', (['(col + d_min > col[-1])'], {}), '(col + d_min > col[-1])\n', (6027, 6050), True, 'import numpy as np\n'), ((8415, 8491), 'xarray.where', 'xr.where', (["((r_mask != cfg['no_data']) & (r_mask != cfg['valid_pixels']))", '(1)', '(0)'], {}), "((r_mask != cfg['no_data']) & (r_mask != cfg['valid_pixels']), 1, 0)\n", (8423, 8491), True, 'import xarray as xr\n'), ((9095, 9155), 'numpy.where', 'np.where', (['((col_d >= col_range[0]) & (col_d <= col_range[-1]))'], {}), '((col_d >= col_range[0]) & (col_d <= col_range[-1]))\n', (9103, 9155), True, 'import numpy as np\n'), ((11384, 11407), 'numpy.arange', 'np.arange', (['(100)', 'nx', '(100)'], {}), '(100, nx, 100)\n', (11393, 11407), True, 'import numpy as np\n'), ((12851, 12874), 'numpy.arange', 'np.arange', (['(100)', 'nx', '(100)'], {}), '(100, nx, 100)\n', (12860, 12874), True, 'import numpy as np\n'), ((15667, 15688), 'numpy.mod', 'np.mod', (['disp_range', '(1)'], {}), '(disp_range, 1)\n', (15673, 15688), True, 'import numpy as np\n'), ((16032, 16084), 'pandora.img_tools.compute_std_raster', 'compute_std_raster', (['img_sec', "cv.attrs['window_size']"], {}), "(img_sec, cv.attrs['window_size'])\n", (16050, 16084), False, 'from pandora.img_tools import compute_std_raster\n'), ((5803, 5861), 'numpy.where', 'np.where', (['((col + d_max >= col[0]) & (col + d_min < col[0]))'], {}), '((col + d_max >= col[0]) & (col + d_min < col[0]))\n', (5811, 5861), True, 'import numpy as np\n'), ((7223, 7286), 'numpy.ones', 'np.ones', (["(disp.attrs['window_size'], disp.attrs['window_size'])"], {}), "((disp.attrs['window_size'], disp.attrs['window_size']))\n", (7230, 7286), True, 'import numpy as np\n'), ((7711, 7828), 'xarray.where', 'xr.where', (["((r_mask != cfg['no_data']) & (r_mask != cfg['valid_pixels']))", 'PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_REF', '(0)'], {}), "((r_mask != cfg['no_data']) & (r_mask != cfg['valid_pixels']),\n PANDORA_MSK_PIXEL_IN_VALIDITY_MASK_REF, 0)\n", (7719, 7828), True, 'import xarray as xr\n'), ((8194, 8257), 'numpy.ones', 'np.ones', (["(disp.attrs['window_size'], disp.attrs['window_size'])"], {}), "((disp.attrs['window_size'], disp.attrs['window_size']))\n", (8201, 8257), True, 'import numpy as np\n'), ((11639, 11673), 'numpy.argmin', 'np.argmin', (['cv_chunked_x[x]'], {'axis': '(2)'}), '(cv_chunked_x[x], axis=2)\n', (11648, 11673), True, 'import numpy as np\n'), ((13106, 13140), 'numpy.argmax', 'np.argmax', (['cv_chunked_x[x]'], {'axis': '(2)'}), '(cv_chunked_x[x], axis=2)\n', (13115, 13140), True, 'import numpy as np\n'), ((6199, 6259), 'numpy.where', 'np.where', (['((col + d_min <= col[-1]) & (col + d_max > col[-1]))'], {}), '((col + d_min <= col[-1]) & (col + d_max > col[-1]))\n', (6207, 6259), True, 'import numpy as np\n'), ((6559, 6617), 'numpy.where', 'np.where', (['((col + d_min < col[0]) | (col + d_max > col[-1]))'], {}), '((col + d_min < col[0]) | (col + d_max > col[-1]))\n', (6567, 6617), True, 'import numpy as np\n'), ((18398, 18424), 'numpy.flip', 'np.flip', (['disp_range[valid]'], {}), '(disp_range[valid])\n', (18405, 18424), True, 'import numpy as np\n'), ((9386, 9422), 'numpy.setdiff1d', 'np.setdiff1d', (['col_range', 'valid_index'], {}), '(col_range, valid_index)\n', (9398, 9422), True, 'import numpy as np\n'), ((9549, 9585), 'numpy.setdiff1d', 'np.setdiff1d', (['col_range', 'valid_index'], {}), '(col_range, valid_index)\n', (9561, 9585), True, 'import numpy as np\n'), ((17486, 17505), 'numpy.flip', 'np.flip', (['x_d[valid]'], {}), '(x_d[valid])\n', (17493, 17505), True, 'import numpy as np\n'), ((17585, 17611), 'numpy.flip', 'np.flip', (['disp_range[valid]'], {}), '(disp_range[valid])\n', (17592, 17611), True, 'import numpy as np\n'), ((17791, 17810), 'numpy.flip', 'np.flip', (['x_d[valid]'], {}), '(x_d[valid])\n', (17798, 17810), True, 'import numpy as np\n'), ((17890, 17916), 'numpy.flip', 'np.flip', (['disp_range[valid]'], {}), '(disp_range[valid])\n', (17897, 17916), True, 'import numpy as np\n')] |
# ===============================================================================================================
# Copyright (c) 2019, Cornell University. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that
# the following conditions are met:
#
# * Redistributions of source code must retain the above copyright otice, this list of conditions and
# the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and
# the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# * Neither the name of Cornell University nor the names of its contributors may be used to endorse or
# promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
#
# Author: <NAME> (<EMAIL>)
#
# The research is based upon work supported by the Office of the Director of National Intelligence (ODNI),
# Intelligence Advanced Research Projects Activity (IARPA), via DOI/IBC Contract Number D17PC00287.
# The U.S. Government is authorized to reproduce and distribute copies of this work for Governmental purposes.
# ===============================================================================================================
import os
import json
from lib.rpc_model import RPCModel
from lib.gen_grid import gen_grid
from lib.solve_affine import solve_affine
from lib.solve_perspective import solve_perspective
import numpy as np
from pyquaternion import Quaternion
from lib.check_error import check_perspective_error
import logging
from lib.latlon_utm_converter import eastnorth_to_latlon
from coordinate_system import global_to_local
def discretize_volume(work_dir):
bbx_file = os.path.join(work_dir, 'aoi.json')
with open(bbx_file) as fp:
bbx = json.load(fp)
ul_easting = bbx['ul_easting']
ul_northing = bbx['ul_northing']
lr_easting = bbx['lr_easting']
lr_northing = bbx['lr_northing']
zone_number = bbx['zone_number']
hemisphere = bbx['hemisphere']
alt_min = bbx['alt_min']
alt_max = bbx['alt_max']
# each grid-cell is about 5 meters * 5 meters * 5 meters
xy_axis_grid_points = 100
z_axis_grid_points = 20
# create north_east_height grid
# note that this is a left-handed coordinate system
north_points = np.linspace(ul_northing, lr_northing, xy_axis_grid_points)
east_points = np.linspace(ul_easting, lr_easting, xy_axis_grid_points)
alt_points = np.linspace(alt_min, alt_max, z_axis_grid_points)
north_points, east_points, alt_points = gen_grid(north_points, east_points, alt_points)
# convert to lat lon
lat_points, lon_points = eastnorth_to_latlon(east_points, north_points, zone_number, hemisphere)
# convert to local utm
ll_easting = ul_easting
ll_northing = lr_northing
xx_utm = east_points - ll_easting
yy_utm = north_points - ll_northing
zz_utm = alt_points
# convert to enu
latlonalt = np.hstack((lat_points, lon_points, alt_points))
utm_local = np.hstack((xx_utm, yy_utm, zz_utm))
xx_enu, yy_enu, zz_enu = global_to_local(work_dir, lat_points, lon_points, alt_points)
enu = np.hstack((xx_enu, yy_enu, zz_enu))
return latlonalt, utm_local, enu
class CameraApprox(object):
def __init__(self, work_dir):
self.work_dir = work_dir
self.latlonalt, self.utm_local, self.enu = discretize_volume(work_dir)
self.img_names = []
self.rpc_models = []
self.region_dicts = []
metas_subdir = os.path.join(self.work_dir, 'metas/')
for item in sorted(os.listdir(metas_subdir)):
self.img_names.append(item[:-5] + '.png')
with open(os.path.join(metas_subdir, item)) as fp:
self.rpc_models.append(RPCModel(json.load(fp)))
self.cnt = len(self.rpc_models)
self.out_dir = os.path.join(work_dir, 'approx_camera')
if not os.path.exists(self.out_dir):
os.mkdir(self.out_dir)
def approx_affine_latlonalt(self):
logging.info('deriving an affine camera approximation...')
logging.info('scene coordinate frame is in lat, lon, alt')
lat_points = self.latlonalt[:, 0:1]
lon_points = self.latlonalt[:, 1:2]
alt_points = self.latlonalt[:, 2:3]
affine_dict = {}
for i in range(self.cnt):
col, row = self.rpc_models[i].projection(lat_points, lon_points, alt_points)
# make sure all the points lie inside the image
width = self.rpc_models[i].width
height = self.rpc_models[i].height
keep_mask = np.logical_and(col >= 0, row >= 0)
keep_mask = np.logical_and(keep_mask, col < width)
keep_mask = np.logical_and(keep_mask, row < height)
P = solve_affine(lat_points, lon_points, alt_points, col, row, keep_mask)
# write to file
img_name = self.img_names[i]
P = list(P.reshape((8,)))
affine_dict[img_name] = [width, height] + P
with open(os.path.join(self.out_dir, 'affine_latlonalt.json'), 'w') as fp:
json.dump(affine_dict, fp, indent=2)
bbx = { 'lat_min': np.min(lat_points),
'lat_max': np.max(lat_points),
'lon_min': np.min(lon_points),
'lon_max': np.max(lon_points),
'alt_min': np.min(alt_points),
'alt_max': np.max(alt_points)}
with open(os.path.join(self.out_dir, 'bbx_latlonalt.json'), 'w') as fp:
json.dump(bbx, fp, indent=2)
def approx_perspective_enu(self):
logging.info('deriving a perspective camera approximation...')
logging.info('scene coordinate frame is in ENU')
perspective_dict = {}
errors_txt = 'img_name, mean_proj_err (pixels), median_proj_err (pixels), max_proj_err (pixels), mean_inv_proj_err (meters), median_inv_proj_err (meters), max_inv_proj_err (meters)\n'
lat_points = self.latlonalt[:, 0:1]
lon_points = self.latlonalt[:, 1:2]
alt_points = self.latlonalt[:, 2:3]
xx = self.enu[:, 0:1]
yy = self.enu[:, 1:2]
zz = self.enu[:, 2:3]
for i in range(self.cnt):
col, row = self.rpc_models[i].projection(lat_points, lon_points, alt_points)
# make sure all the points lie inside the image
width = self.rpc_models[i].width
height = self.rpc_models[i].height
keep_mask = np.logical_and(col >= 0, row >= 0)
keep_mask = np.logical_and(keep_mask, col < width)
keep_mask = np.logical_and(keep_mask, row < height)
K, R, t = solve_perspective(xx, yy, zz, col, row, keep_mask)
qvec = Quaternion(matrix=R)
# fx, fy, cx, cy, s, qvec, t
params = [width, height, K[0, 0], K[1, 1], K[0, 2], K[1, 2], K[0, 1],
qvec[0], qvec[1], qvec[2], qvec[3],
t[0, 0], t[1, 0], t[2, 0]]
img_name = self.img_names[i]
perspective_dict[img_name] = params
# check approximation error
tmp = check_perspective_error(xx, yy, zz, col, row, K, R, t, keep_mask)
errors_txt += '{}, {}, {}, {}, {}, {}, {}\n'.format(img_name, tmp[0], tmp[1], tmp[2], tmp[3], tmp[4], tmp[5])
with open(os.path.join(self.out_dir, 'perspective_enu.json'), 'w') as fp:
json.dump(perspective_dict, fp, indent=2)
with open(os.path.join(self.out_dir, 'perspective_enu_error.csv'), 'w') as fp:
fp.write(errors_txt)
bbx = { 'xx_min': np.min(xx),
'xx_max': np.max(xx),
'yy_min': np.min(yy),
'yy_max': np.max(yy),
'zz_min': np.min(zz),
'zz_max': np.max(zz)}
with open(os.path.join(self.out_dir, 'bbx_enu.json'), 'w') as fp:
json.dump(bbx, fp, indent=2)
if __name__ == '__main__':
pass
| [
"os.mkdir",
"lib.solve_perspective.solve_perspective",
"lib.solve_affine.solve_affine",
"os.path.join",
"lib.gen_grid.gen_grid",
"os.path.exists",
"numpy.max",
"pyquaternion.Quaternion",
"numpy.linspace",
"json.dump",
"numpy.hstack",
"numpy.min",
"os.listdir",
"json.load",
"numpy.logical... | [((2617, 2651), 'os.path.join', 'os.path.join', (['work_dir', '"""aoi.json"""'], {}), "(work_dir, 'aoi.json')\n", (2629, 2651), False, 'import os\n'), ((3218, 3276), 'numpy.linspace', 'np.linspace', (['ul_northing', 'lr_northing', 'xy_axis_grid_points'], {}), '(ul_northing, lr_northing, xy_axis_grid_points)\n', (3229, 3276), True, 'import numpy as np\n'), ((3295, 3351), 'numpy.linspace', 'np.linspace', (['ul_easting', 'lr_easting', 'xy_axis_grid_points'], {}), '(ul_easting, lr_easting, xy_axis_grid_points)\n', (3306, 3351), True, 'import numpy as np\n'), ((3369, 3418), 'numpy.linspace', 'np.linspace', (['alt_min', 'alt_max', 'z_axis_grid_points'], {}), '(alt_min, alt_max, z_axis_grid_points)\n', (3380, 3418), True, 'import numpy as np\n'), ((3463, 3510), 'lib.gen_grid.gen_grid', 'gen_grid', (['north_points', 'east_points', 'alt_points'], {}), '(north_points, east_points, alt_points)\n', (3471, 3510), False, 'from lib.gen_grid import gen_grid\n'), ((3566, 3637), 'lib.latlon_utm_converter.eastnorth_to_latlon', 'eastnorth_to_latlon', (['east_points', 'north_points', 'zone_number', 'hemisphere'], {}), '(east_points, north_points, zone_number, hemisphere)\n', (3585, 3637), False, 'from lib.latlon_utm_converter import eastnorth_to_latlon\n'), ((3864, 3911), 'numpy.hstack', 'np.hstack', (['(lat_points, lon_points, alt_points)'], {}), '((lat_points, lon_points, alt_points))\n', (3873, 3911), True, 'import numpy as np\n'), ((3928, 3963), 'numpy.hstack', 'np.hstack', (['(xx_utm, yy_utm, zz_utm)'], {}), '((xx_utm, yy_utm, zz_utm))\n', (3937, 3963), True, 'import numpy as np\n'), ((3993, 4054), 'coordinate_system.global_to_local', 'global_to_local', (['work_dir', 'lat_points', 'lon_points', 'alt_points'], {}), '(work_dir, lat_points, lon_points, alt_points)\n', (4008, 4054), False, 'from coordinate_system import global_to_local\n'), ((4065, 4100), 'numpy.hstack', 'np.hstack', (['(xx_enu, yy_enu, zz_enu)'], {}), '((xx_enu, yy_enu, zz_enu))\n', (4074, 4100), True, 'import numpy as np\n'), ((2697, 2710), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (2706, 2710), False, 'import json\n'), ((4428, 4465), 'os.path.join', 'os.path.join', (['self.work_dir', '"""metas/"""'], {}), "(self.work_dir, 'metas/')\n", (4440, 4465), False, 'import os\n'), ((4766, 4805), 'os.path.join', 'os.path.join', (['work_dir', '"""approx_camera"""'], {}), "(work_dir, 'approx_camera')\n", (4778, 4805), False, 'import os\n'), ((4934, 4992), 'logging.info', 'logging.info', (['"""deriving an affine camera approximation..."""'], {}), "('deriving an affine camera approximation...')\n", (4946, 4992), False, 'import logging\n'), ((5001, 5059), 'logging.info', 'logging.info', (['"""scene coordinate frame is in lat, lon, alt"""'], {}), "('scene coordinate frame is in lat, lon, alt')\n", (5013, 5059), False, 'import logging\n'), ((6516, 6578), 'logging.info', 'logging.info', (['"""deriving a perspective camera approximation..."""'], {}), "('deriving a perspective camera approximation...')\n", (6528, 6578), False, 'import logging\n'), ((6587, 6635), 'logging.info', 'logging.info', (['"""scene coordinate frame is in ENU"""'], {}), "('scene coordinate frame is in ENU')\n", (6599, 6635), False, 'import logging\n'), ((4493, 4517), 'os.listdir', 'os.listdir', (['metas_subdir'], {}), '(metas_subdir)\n', (4503, 4517), False, 'import os\n'), ((4821, 4849), 'os.path.exists', 'os.path.exists', (['self.out_dir'], {}), '(self.out_dir)\n', (4835, 4849), False, 'import os\n'), ((4863, 4885), 'os.mkdir', 'os.mkdir', (['self.out_dir'], {}), '(self.out_dir)\n', (4871, 4885), False, 'import os\n'), ((5519, 5553), 'numpy.logical_and', 'np.logical_and', (['(col >= 0)', '(row >= 0)'], {}), '(col >= 0, row >= 0)\n', (5533, 5553), True, 'import numpy as np\n'), ((5578, 5616), 'numpy.logical_and', 'np.logical_and', (['keep_mask', '(col < width)'], {}), '(keep_mask, col < width)\n', (5592, 5616), True, 'import numpy as np\n'), ((5641, 5680), 'numpy.logical_and', 'np.logical_and', (['keep_mask', '(row < height)'], {}), '(keep_mask, row < height)\n', (5655, 5680), True, 'import numpy as np\n'), ((5698, 5767), 'lib.solve_affine.solve_affine', 'solve_affine', (['lat_points', 'lon_points', 'alt_points', 'col', 'row', 'keep_mask'], {}), '(lat_points, lon_points, alt_points, col, row, keep_mask)\n', (5710, 5767), False, 'from lib.solve_affine import solve_affine\n'), ((6028, 6064), 'json.dump', 'json.dump', (['affine_dict', 'fp'], {'indent': '(2)'}), '(affine_dict, fp, indent=2)\n', (6037, 6064), False, 'import json\n'), ((6093, 6111), 'numpy.min', 'np.min', (['lat_points'], {}), '(lat_points)\n', (6099, 6111), True, 'import numpy as np\n'), ((6140, 6158), 'numpy.max', 'np.max', (['lat_points'], {}), '(lat_points)\n', (6146, 6158), True, 'import numpy as np\n'), ((6187, 6205), 'numpy.min', 'np.min', (['lon_points'], {}), '(lon_points)\n', (6193, 6205), True, 'import numpy as np\n'), ((6234, 6252), 'numpy.max', 'np.max', (['lon_points'], {}), '(lon_points)\n', (6240, 6252), True, 'import numpy as np\n'), ((6281, 6299), 'numpy.min', 'np.min', (['alt_points'], {}), '(alt_points)\n', (6287, 6299), True, 'import numpy as np\n'), ((6328, 6346), 'numpy.max', 'np.max', (['alt_points'], {}), '(alt_points)\n', (6334, 6346), True, 'import numpy as np\n'), ((6440, 6468), 'json.dump', 'json.dump', (['bbx', 'fp'], {'indent': '(2)'}), '(bbx, fp, indent=2)\n', (6449, 6468), False, 'import json\n'), ((7385, 7419), 'numpy.logical_and', 'np.logical_and', (['(col >= 0)', '(row >= 0)'], {}), '(col >= 0, row >= 0)\n', (7399, 7419), True, 'import numpy as np\n'), ((7444, 7482), 'numpy.logical_and', 'np.logical_and', (['keep_mask', '(col < width)'], {}), '(keep_mask, col < width)\n', (7458, 7482), True, 'import numpy as np\n'), ((7507, 7546), 'numpy.logical_and', 'np.logical_and', (['keep_mask', '(row < height)'], {}), '(keep_mask, row < height)\n', (7521, 7546), True, 'import numpy as np\n'), ((7570, 7620), 'lib.solve_perspective.solve_perspective', 'solve_perspective', (['xx', 'yy', 'zz', 'col', 'row', 'keep_mask'], {}), '(xx, yy, zz, col, row, keep_mask)\n', (7587, 7620), False, 'from lib.solve_perspective import solve_perspective\n'), ((7641, 7661), 'pyquaternion.Quaternion', 'Quaternion', ([], {'matrix': 'R'}), '(matrix=R)\n', (7651, 7661), False, 'from pyquaternion import Quaternion\n'), ((8043, 8108), 'lib.check_error.check_perspective_error', 'check_perspective_error', (['xx', 'yy', 'zz', 'col', 'row', 'K', 'R', 't', 'keep_mask'], {}), '(xx, yy, zz, col, row, K, R, t, keep_mask)\n', (8066, 8108), False, 'from lib.check_error import check_perspective_error\n'), ((8326, 8367), 'json.dump', 'json.dump', (['perspective_dict', 'fp'], {'indent': '(2)'}), '(perspective_dict, fp, indent=2)\n', (8335, 8367), False, 'import json\n'), ((8516, 8526), 'numpy.min', 'np.min', (['xx'], {}), '(xx)\n', (8522, 8526), True, 'import numpy as np\n'), ((8554, 8564), 'numpy.max', 'np.max', (['xx'], {}), '(xx)\n', (8560, 8564), True, 'import numpy as np\n'), ((8592, 8602), 'numpy.min', 'np.min', (['yy'], {}), '(yy)\n', (8598, 8602), True, 'import numpy as np\n'), ((8630, 8640), 'numpy.max', 'np.max', (['yy'], {}), '(yy)\n', (8636, 8640), True, 'import numpy as np\n'), ((8668, 8678), 'numpy.min', 'np.min', (['zz'], {}), '(zz)\n', (8674, 8678), True, 'import numpy as np\n'), ((8706, 8716), 'numpy.max', 'np.max', (['zz'], {}), '(zz)\n', (8712, 8716), True, 'import numpy as np\n'), ((8804, 8832), 'json.dump', 'json.dump', (['bbx', 'fp'], {'indent': '(2)'}), '(bbx, fp, indent=2)\n', (8813, 8832), False, 'import json\n'), ((5951, 6002), 'os.path.join', 'os.path.join', (['self.out_dir', '"""affine_latlonalt.json"""'], {}), "(self.out_dir, 'affine_latlonalt.json')\n", (5963, 6002), False, 'import os\n'), ((6366, 6414), 'os.path.join', 'os.path.join', (['self.out_dir', '"""bbx_latlonalt.json"""'], {}), "(self.out_dir, 'bbx_latlonalt.json')\n", (6378, 6414), False, 'import os\n'), ((8250, 8300), 'os.path.join', 'os.path.join', (['self.out_dir', '"""perspective_enu.json"""'], {}), "(self.out_dir, 'perspective_enu.json')\n", (8262, 8300), False, 'import os\n'), ((8387, 8442), 'os.path.join', 'os.path.join', (['self.out_dir', '"""perspective_enu_error.csv"""'], {}), "(self.out_dir, 'perspective_enu_error.csv')\n", (8399, 8442), False, 'import os\n'), ((8736, 8778), 'os.path.join', 'os.path.join', (['self.out_dir', '"""bbx_enu.json"""'], {}), "(self.out_dir, 'bbx_enu.json')\n", (8748, 8778), False, 'import os\n'), ((4596, 4628), 'os.path.join', 'os.path.join', (['metas_subdir', 'item'], {}), '(metas_subdir, item)\n', (4608, 4628), False, 'import os\n'), ((4685, 4698), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (4694, 4698), False, 'import json\n')] |
# -*- coding: utf-8 -*-
r"""Data handling
"""
import copy
import numbers
import warnings
import numpy as np
from collections import OrderedDict
from multiprocessing import cpu_count, Pool # @UnresolvedImport
from .analysis import Analysis
from .plot import PlotData
def _call_bin_parallel(arg, **kwarg):
r"""Wrapper function to work around pickling problem in Python 2.7
"""
return Data._bin_parallel(*arg, **kwarg)
class Data(PlotData, Analysis):
u"""Data class for handling multi-dimensional scattering data. If input
file type is not supported, data can be entered manually.
Parameters
----------
Q : array_like, optional
Default: None. **Q** in a column oriented array of [*q*\ :sub:`x`,
*q*\ :sub:`y`, *q*\ :sub:`z`, *ℏω*, *T*]
h : ndarray or float, optional
Default: 0. Array of Q\ :sub:`x` in reciprocal lattice units.
k : ndarray or float, optional
Default: 0. Array of Q\ :sub:`y` in reciprocal lattice units.
l : ndarray or float, optional
Default: 0. Array of Q\ :sub:`x` in reciprocal lattice units.
e : ndarray or float, optional
Default: 0. Array of ℏω in meV.
temp : ndarray or float, optional
Default: 0. Array of sample temperatures in K.
detector : ndarray or float, optional
Default: 0. Array of measured counts on detector.
monitor : ndarray or float, optional
Default: 0. Array of measured counts on monitor.
time : ndarray or float, optional
Default: 0. Array of time per point in minutes.
time_norm : bool, optional
Default: False. If True, calls to :attr:`intensity` and :attr:`error`
with normalize to time instead of monitor
Attributes
----------
h
k
l
e
temp
intensity
error
detailed_balance_factor
Methods
-------
bin
combine_data
subtract_background
integrate
position
width
estimate_background
subtract_background
dynamic_susceptibility
scattering_function
plot
plot_volume
plot_contour
plot_line
get_keys
get_bounds
"""
def __init__(self, Q=None, h=0., k=0., l=0., e=0., temp=0., detector=0., monitor=0., error=None, time=0.,
time_norm=False, **kwargs):
self._data = OrderedDict()
self.data_keys = {'monitor': 'monitor', 'detector': 'detector', 'time': 'time'}
self.Q_keys = {'h': 'h', 'k': 'k', 'l': 'l', 'e': 'e', 'temp': 'temp'}
if Q is None:
try:
n_dim = max([len(item) for item in (h, k, l, e, temp, detector, monitor, time) if
not isinstance(item, numbers.Number)])
except (ValueError, UnboundLocalError):
n_dim = 1
self.Q = np.empty((n_dim, 5))
for arg, key in zip((h, k, l, e, temp), ('h', 'k', 'l', 'e', 'temp')):
if isinstance(arg, numbers.Number):
arg = np.array([arg] * n_dim)
try:
self._data[self.Q_keys[key]] = np.array(arg)
except (ValueError, KeyError):
raise
else:
self.Q = Q
n_dim = Q.shape[1]
for arg, key in zip((detector, monitor, time), ('detector', 'monitor', 'time')):
if isinstance(arg, numbers.Number):
arg = np.array([arg] * n_dim)
self._data[self.data_keys[key]] = np.array(arg)
self.m0 = np.nanmax(self.monitor)
self.t0 = np.nanmax(self.time)
self.time_norm = time_norm
if error is not None:
self.error = error
for key, value in kwargs.items():
setattr(self, key, value)
def __add__(self, right):
try:
return self.combine_data(right, ret=True)
except TypeError:
raise
def __iadd__(self, right):
try:
self.combine_data(right, ret=False)
except TypeError:
raise
def __sub__(self, right):
try:
return self.subtract_background(right, ret=True)
except (TypeError, ValueError):
raise
def __isub__(self, right):
try:
self.subtract_background(right, ret=False)
except (TypeError, ValueError):
raise
def __mul__(self, right):
temp_obj = copy.deepcopy(self)
temp_obj.detector = self.detector * right
return temp_obj
def __imul__(self, right):
self.detector *= right
def __div__(self, right):
temp_obj = copy.deepcopy(self)
temp_obj.detector = self.detector / right
return temp_obj
def __idiv__(self, right):
self.detector /= right
def __truediv__(self, right):
temp_obj = copy.deepcopy(self)
temp_obj.detector = self.detector / right
return temp_obj
def __itruediv__(self, right):
self.detector /= right
def __floordiv__(self, right):
temp_obj = copy.deepcopy(self)
temp_obj.detector = self.detector // right
return temp_obj
def __ifloordiv__(self, other):
self.detector //= right
def __pow__(self, right):
temp_obj = copy.deepcopy(self)
temp_obj.detector **= right
return temp_obj
def __eq__(self, right):
if not np.all(sorted(list(self.data.keys())) == sorted(list(right.data.keys()))):
return False
for key, value in self.data.items():
if not np.all(value == right.data[key]):
return False
return True
def __ne__(self, right):
return not self.__eq__(right)
@property
def Q(self):
r"""Returns a Q matrix with columns h,k,l,e,temp
"""
return np.vstack((self.data[self.Q_keys[i]].flatten() for i in ['h', 'k', 'l', 'e', 'temp'])).T
@Q.setter
def Q(self, value):
for col, key in zip(value.T, ['h', 'k', 'l', 'e', 'temp']):
self._data[self.Q_keys[key]] = col
@property
def detector(self):
r"""Returns the raw counts on the detector
"""
return self.data[self.data_keys['detector']]
@detector.setter
def detector(self, value):
self.data[self.data_keys['detector']] = value
@property
def monitor(self):
r"""Returns the monitor
"""
return self.data[self.data_keys['monitor']]
@monitor.setter
def monitor(self, value):
self.data[self.data_keys['monitor']] = value
@property
def time(self):
r"""Returns the time measured
"""
return self.data[self.data_keys['time']]
@time.setter
def time(self, value):
self.data[self.data_keys['time']] = value
@property
def h(self):
r"""Returns lattice parameter q\ :sub:`x`\ , *i.e.* h
Equivalent to Q[:, 0]
"""
return self.data[self.Q_keys['h']]
@h.setter
def h(self, value):
r"""Set h to appropriate column of Q
"""
if isinstance(value, numbers.Number):
value = np.array([value] * self.data[self.data_keys['detector']].shape[0])
if value.shape != self.data[self.data_keys['detector']].shape:
raise ValueError("""Input value must have the shape ({0},) or be a float.""".format(
self.data[self.data_keys['detector']].shape))
else:
self.data[self.Q_keys['h']] = np.array(value)
@property
def k(self):
r"""Returns lattice parameter q\ :sub:`y`\ , *i.e.* k
Equivalent to Q[:, 1]
"""
return self.data[self.Q_keys['k']]
@k.setter
def k(self, value):
r"""Set k to appropriate column of Q
"""
if isinstance(value, numbers.Number):
value = np.array([value] * self.data[self.data_keys['detector']].shape[0])
if value.shape != self.data[self.data_keys['detector']].shape:
raise ValueError("""Input value must have the shape ({0},) or be a float.""".format(
self.data[self.data_keys['detector']].shape))
else:
self.data[self.Q_keys['k']] = np.array(value)
@property
def l(self):
r"""Returns lattice parameter q\ :sub:`z`\ , *i.e.* l
Equivalent to Q[:, 2]
"""
return self.data[self.Q_keys['l']]
@l.setter
def l(self, value):
r"""Set l to appropriate column of Q
"""
if isinstance(value, numbers.Number):
value = value = np.array([value] * self.data[self.data_keys['detector']].shape[0])
if value.shape != self.data[self.data_keys['detector']].shape:
raise ValueError("""Input value must have the shape ({0},) or be a float.""".format(
self.data[self.data_keys['detector']].shape))
else:
self.data[self.Q_keys['l']] = np.array(value)
@property
def e(self):
r"""Returns energy transfer
Equivalent to Q[:, 3]
"""
return self.data[self.Q_keys['e']]
@e.setter
def e(self, value):
r"""Set e to appropriate column of Q
"""
if isinstance(value, numbers.Number):
value = np.array([value] * self.data[self.data_keys['detector']].shape[0])
if value.shape != self.data[self.data_keys['detector']].shape:
raise ValueError("""Input value must have the shape ({0},) or be a float.""".format(
self.data[self.data_keys['detector']].shape))
else:
self.data[self.Q_keys['e']] = np.array(value)
@property
def temp(self):
r"""Returns temperature
Equivalent to Q[:, 4]
"""
return self.data[self.Q_keys['temp']]
@temp.setter
def temp(self, value):
r"""Set temp to appropriate column of Q
"""
if isinstance(value, numbers.Number):
value = np.array([value] * self.data[self.data_keys['detector']].shape[0])
if value.shape != self.data[self.data_keys['detector']].shape:
raise ValueError("""Input value must have the shape ({0},) or be a float.""".format(
self.data[self.data_keys['detector']].shape))
else:
self.data[self.Q_keys['temp']] = np.array(value)
@property
def intensity(self):
r"""Returns the monitor or time normalized intensity
"""
if self.time_norm:
if self.t0 == 0:
self.t0 = np.nanmax(self.time)
return self.detector / self.time * self.t0
else:
if self.m0 == 0:
self.m0 = np.nanmax(self.monitor)
return self.detector / self.monitor * self.m0
@property
def error(self):
r"""Returns error of monitor or time normalized intensity
"""
try:
if self._err is not None:
err = self._err
else:
err = np.sqrt(self.detector)
except AttributeError:
self._err = None
err = np.sqrt(self.detector)
if self.time_norm:
if self.t0 == 0:
self.t0 = np.nanmax(self.time)
return err / self.time * self.t0
else:
if self.m0 == 0:
self.m0 = np.nanmax(self.monitor)
return err / self.monitor * self.m0
@error.setter
def error(self, value):
r"""Set error in detector counts
"""
if isinstance(value, numbers.Number):
value = np.array([value] * self.detector.shape[0])
if value.shape != self.detector.shape:
raise ValueError("""Input value must have the shape ({0},) or be a float.""".format(self.detector.shape[0]))
self._err = value
@property
def data(self):
r"""Returns all of the raw data in column format
"""
return self._data
@data.setter
def data(self, value):
self._data = value
@property
def data_columns(self):
r"""Returns a list of the raw data columns
"""
return list(self.data.keys())
def combine_data(self, obj, **kwargs):
r"""Combines multiple data sets
Parameters
----------
obj : Data_object
Data_object with equivalent data columns
tols : ndarray or float, optional
Tolerances for combining two data sets. Default: 5e-4.
ret : bool, optional
Return the combined data set, or merge. Default: False
"""
if not isinstance(obj, Data):
raise TypeError('You can only combine two Data objects: input object is the wrong format!')
tols = np.array([5.e-4 for i in range(len(obj._data) - len(self.data_keys))])
try:
if kwargs['tols'] is not None:
tols = np.array(kwargs['tols'])
except KeyError:
pass
# combine
_data_temp = copy.deepcopy(self._data)
for i in range(len(obj._data[obj.data_keys['detector']])):
new_vals = np.array([val[i] for k, val in obj._data.items() if k not in list(obj.data_keys.values())])
for j in range(len(self._data[self.data_keys['detector']])):
orig_vals = np.array(
[val[j] for k, val in self._data.items() if k not in list(self.data_keys.values())])
if (np.abs(orig_vals - new_vals) <= tols).all():
for _key, _value in _data_temp.items():
if _key in list(self.data_keys.values()):
_data_temp[_key][j] += obj._data[_key][i]
break
else:
for _key, _value in _data_temp.items():
_data_temp[_key] = np.concatenate((_value, np.array([obj._data[_key][i]])))
# sort
ind = np.lexsort(tuple(value for key, value in _data_temp.items() if key not in list(self.data_keys.values())))
_data = OrderedDict()
for key, value in _data_temp.items():
_data[key] = value[ind]
if 'ret' in kwargs and kwargs['ret']:
output = Data()
output._data = _data
return output
else:
self._data = _data
def subtract_background(self, background_data, x=None, ret=True):
r"""Subtract background data.
Parameters
----------
background_data : Data object
Data object containing the data wishing to be subtracted
x : str, optional
data_column key of x-axis values over which background should be
subtracted. Used for cases where background data is not taken at
exactly same points as data being subtracted. Default: None
ret : bool, optional
Set False if background should be subtracted in place.
Default: True
Returns
-------
data : Data object
Data object contained subtracted data
"""
if not isinstance(background_data, Data):
raise TypeError('You can only combine two Data objects: input object is the wrong format!')
if self.time_norm != background_data.time_norm:
warnings.warn(
'Normalization of detector is different. One is normalized to time, and the other to monitor.')
if x is None:
try:
_new_intensity = self.intensity - background_data.intensity
_new_error = np.sqrt(
np.array([np.max([err1 ** 2, err2 ** 2]) for err1, err2 in zip(self.error, background_data.error)]))
except ValueError:
raise ValueError(
'Data objects are incompatible shapes: try subtract_background method for more options')
else:
try:
bg_x = background_data.data[x]
self_x = self.data[x]
except KeyError:
try:
bg_x = background_data.data[self.Q_keys[x]]
self_x = self.data[self.Q_keys[x]]
except (AttributeError, KeyError):
raise KeyError('Invalid key for data_column.')
try:
from scipy.interpolate import griddata
bg_intensity_grid = griddata(bg_x, background_data.intensity, self_x, method='nearest')
bg_error_grid = np.sqrt(griddata(bg_x, background_data.error ** 2, self_x, method='nearest'))
except ImportError:
warnings.warn('Background subtraction failed. Scipy Import Error, use more recent version of Python')
if ret:
return self
_new_intensity = self.intensity - bg_intensity_grid.flatten()
_new_error = np.sqrt(
np.array([np.max([err1 ** 2, err2 ** 2]) for err1, err2 in zip(self.error, bg_error_grid.flatten())]))
_sub_data = copy.copy(self.data)
_sub_data[self.data_keys['detector']] = _new_intensity
_sub_data[self.data_keys['monitor']] = np.ones(_new_intensity.shape)
_sub_data[self.data_keys['time']] = np.ones(_new_intensity.shape)
if ret:
data_obj = copy.deepcopy(self)
data_obj.t0 = 1
data_obj.m0 = 1
data_obj._err = _new_error
data_obj._data = _sub_data
return data_obj
else:
self.t0 = 1
self.m0 = 1
self._err = _new_error
self._data = _sub_data
def _bin_parallel(self, Q_chunk):
r"""Performs binning by finding data chunks to bin together.
Private function for performing binning in parallel using
multiprocessing library
Parameters
----------
Q_chunk : ndarray
Chunk of Q over which the binning will be performed
Returns
-------
(monitor, detector, temps) : tup of ndarrays
New monitor, detector, and temps of the binned data
"""
error = np.empty(Q_chunk.shape[0])
data_out = tuple(np.empty(Q_chunk.shape[0]) for key in self.data.keys() if key not in self.bin_keys)
for i, _Q_chunk in enumerate(Q_chunk):
_Q = np.vstack((self._data[key].flatten() for key in self.bin_keys)).T
_data_out = tuple(value for key, value in self._data.items() if key not in self.bin_keys)
_err = self.error
above = _Q_chunk + np.array(self._qstep, dtype=float) / 2.
below = _Q_chunk - np.array(self._qstep, dtype=float) / 2.
bin_ind = np.where(((_Q <= above).all(axis=1) & (_Q >= below).all(axis=1)))
if len(bin_ind[0]) > 0:
for j in range(len(data_out)):
data_out[j][i] = np.average(_data_out[j][bin_ind])
error[i] = np.sqrt(np.average(_err[bin_ind] ** 2))
else:
for j in range(len(data_out)):
data_out[j][i] = np.nan
error[i] = np.nan
return data_out + (error,)
def bin(self, to_bin, build_hkl=True):
r"""Rebin the data into the specified shape.
Parameters
----------
to_bin : dict
A dictionary containing information about which data_column
should be binned in the following format:
`'key': [lower_bound, upper_bound, num_points]`
Any key in `data_column` is a valid key. Those keys from
`data_column` not included in `to_bin` are averaged.
build_hkl : bool, optional
Toggle to build hkle. Must already have hkle built in object you
are binning. Default: True
Returns
-------
binned_data : :class:`.Data` object
The resulting data object with values binned to the specified
bounds
"""
_bin_keys = list(to_bin.keys())
if build_hkl:
for key, value in self.Q_keys.items():
if key in _bin_keys:
_bin_keys.remove(key)
_bin_keys.append(value)
self.bin_keys = copy.copy(_bin_keys)
args = tuple()
for key in self.bin_keys:
try:
args += to_bin[key],
except KeyError:
if key in self.Q_keys.values():
args += [self.data[key].min(), self.data[key].max(), 1],
else:
raise KeyError
q, qstep = tuple(), tuple()
for arg in args:
if arg[-1] == 1:
_q, _qstep = (np.array([np.average(arg[:2])]), (arg[1] - arg[0]))
else:
_q, _qstep = np.linspace(arg[0], arg[1], arg[2], retstep=True)
q += _q,
qstep += _qstep,
self._qstep = qstep
Q = np.meshgrid(*q)
Q = np.vstack((item.flatten() for item in Q)).T
nprocs = cpu_count()
Q_chunks = [Q[n * Q.shape[0] // nprocs:(n + 1) * Q.shape[0] // nprocs] for n in range(nprocs)]
pool = Pool(processes=nprocs)
outputs = pool.map(_call_bin_parallel, zip([self] * len(Q_chunks), Q_chunks))
pool.close()
_data_out = [np.concatenate(arg) for arg in zip(*outputs)]
data_out = tuple()
del_nan = np.where(np.isnan(_data_out[0]))
for arg in _data_out:
data_out += np.delete(arg, del_nan, axis=0),
Q = np.delete(Q, del_nan, axis=0)
_data = copy.copy(self._data)
n = 0
for key in _data.keys():
if key not in self.bin_keys:
_data[key] = data_out[n]
n += 1
else:
_data[key] = Q[:, self.bin_keys.index(key)]
output = copy.deepcopy(self)
output._data = _data
output._err = data_out[-1]
return output
| [
"numpy.abs",
"numpy.empty",
"numpy.ones",
"numpy.isnan",
"multiprocessing.cpu_count",
"numpy.meshgrid",
"numpy.max",
"numpy.linspace",
"copy.deepcopy",
"numpy.average",
"scipy.interpolate.griddata",
"multiprocessing.Pool",
"numpy.delete",
"numpy.concatenate",
"numpy.all",
"numpy.nanmax... | [((2328, 2341), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2339, 2341), False, 'from collections import OrderedDict\n'), ((3512, 3535), 'numpy.nanmax', 'np.nanmax', (['self.monitor'], {}), '(self.monitor)\n', (3521, 3535), True, 'import numpy as np\n'), ((3554, 3574), 'numpy.nanmax', 'np.nanmax', (['self.time'], {}), '(self.time)\n', (3563, 3574), True, 'import numpy as np\n'), ((4404, 4423), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (4417, 4423), False, 'import copy\n'), ((4611, 4630), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (4624, 4630), False, 'import copy\n'), ((4822, 4841), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (4835, 4841), False, 'import copy\n'), ((5038, 5057), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (5051, 5057), False, 'import copy\n'), ((5252, 5271), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (5265, 5271), False, 'import copy\n'), ((12981, 13006), 'copy.deepcopy', 'copy.deepcopy', (['self._data'], {}), '(self._data)\n', (12994, 13006), False, 'import copy\n'), ((14014, 14027), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (14025, 14027), False, 'from collections import OrderedDict\n'), ((16989, 17009), 'copy.copy', 'copy.copy', (['self.data'], {}), '(self.data)\n', (16998, 17009), False, 'import copy\n'), ((17120, 17149), 'numpy.ones', 'np.ones', (['_new_intensity.shape'], {}), '(_new_intensity.shape)\n', (17127, 17149), True, 'import numpy as np\n'), ((17194, 17223), 'numpy.ones', 'np.ones', (['_new_intensity.shape'], {}), '(_new_intensity.shape)\n', (17201, 17223), True, 'import numpy as np\n'), ((18092, 18118), 'numpy.empty', 'np.empty', (['Q_chunk.shape[0]'], {}), '(Q_chunk.shape[0])\n', (18100, 18118), True, 'import numpy as np\n'), ((20200, 20220), 'copy.copy', 'copy.copy', (['_bin_keys'], {}), '(_bin_keys)\n', (20209, 20220), False, 'import copy\n'), ((20906, 20921), 'numpy.meshgrid', 'np.meshgrid', (['*q'], {}), '(*q)\n', (20917, 20921), True, 'import numpy as np\n'), ((20996, 21007), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (21005, 21007), False, 'from multiprocessing import cpu_count, Pool\n'), ((21126, 21148), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'nprocs'}), '(processes=nprocs)\n', (21130, 21148), False, 'from multiprocessing import cpu_count, Pool\n'), ((21503, 21532), 'numpy.delete', 'np.delete', (['Q', 'del_nan'], {'axis': '(0)'}), '(Q, del_nan, axis=0)\n', (21512, 21532), True, 'import numpy as np\n'), ((21550, 21571), 'copy.copy', 'copy.copy', (['self._data'], {}), '(self._data)\n', (21559, 21571), False, 'import copy\n'), ((21820, 21839), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (21833, 21839), False, 'import copy\n'), ((2815, 2835), 'numpy.empty', 'np.empty', (['(n_dim, 5)'], {}), '((n_dim, 5))\n', (2823, 2835), True, 'import numpy as np\n'), ((3479, 3492), 'numpy.array', 'np.array', (['arg'], {}), '(arg)\n', (3487, 3492), True, 'import numpy as np\n'), ((7124, 7190), 'numpy.array', 'np.array', (["([value] * self.data[self.data_keys['detector']].shape[0])"], {}), "([value] * self.data[self.data_keys['detector']].shape[0])\n", (7132, 7190), True, 'import numpy as np\n'), ((7479, 7494), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (7487, 7494), True, 'import numpy as np\n'), ((7837, 7903), 'numpy.array', 'np.array', (["([value] * self.data[self.data_keys['detector']].shape[0])"], {}), "([value] * self.data[self.data_keys['detector']].shape[0])\n", (7845, 7903), True, 'import numpy as np\n'), ((8192, 8207), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (8200, 8207), True, 'import numpy as np\n'), ((8558, 8624), 'numpy.array', 'np.array', (["([value] * self.data[self.data_keys['detector']].shape[0])"], {}), "([value] * self.data[self.data_keys['detector']].shape[0])\n", (8566, 8624), True, 'import numpy as np\n'), ((8913, 8928), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (8921, 8928), True, 'import numpy as np\n'), ((9245, 9311), 'numpy.array', 'np.array', (["([value] * self.data[self.data_keys['detector']].shape[0])"], {}), "([value] * self.data[self.data_keys['detector']].shape[0])\n", (9253, 9311), True, 'import numpy as np\n'), ((9600, 9615), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (9608, 9615), True, 'import numpy as np\n'), ((9943, 10009), 'numpy.array', 'np.array', (["([value] * self.data[self.data_keys['detector']].shape[0])"], {}), "([value] * self.data[self.data_keys['detector']].shape[0])\n", (9951, 10009), True, 'import numpy as np\n'), ((10301, 10316), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (10309, 10316), True, 'import numpy as np\n'), ((11559, 11601), 'numpy.array', 'np.array', (['([value] * self.detector.shape[0])'], {}), '([value] * self.detector.shape[0])\n', (11567, 11601), True, 'import numpy as np\n'), ((15266, 15385), 'warnings.warn', 'warnings.warn', (['"""Normalization of detector is different. One is normalized to time, and the other to monitor."""'], {}), "(\n 'Normalization of detector is different. One is normalized to time, and the other to monitor.'\n )\n", (15279, 15385), False, 'import warnings\n'), ((17264, 17283), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (17277, 17283), False, 'import copy\n'), ((21278, 21297), 'numpy.concatenate', 'np.concatenate', (['arg'], {}), '(arg)\n', (21292, 21297), True, 'import numpy as np\n'), ((21379, 21401), 'numpy.isnan', 'np.isnan', (['_data_out[0]'], {}), '(_data_out[0])\n', (21387, 21401), True, 'import numpy as np\n'), ((3409, 3432), 'numpy.array', 'np.array', (['([arg] * n_dim)'], {}), '([arg] * n_dim)\n', (3417, 3432), True, 'import numpy as np\n'), ((5542, 5574), 'numpy.all', 'np.all', (['(value == right.data[key])'], {}), '(value == right.data[key])\n', (5548, 5574), True, 'import numpy as np\n'), ((10514, 10534), 'numpy.nanmax', 'np.nanmax', (['self.time'], {}), '(self.time)\n', (10523, 10534), True, 'import numpy as np\n'), ((10659, 10682), 'numpy.nanmax', 'np.nanmax', (['self.monitor'], {}), '(self.monitor)\n', (10668, 10682), True, 'import numpy as np\n'), ((10979, 11001), 'numpy.sqrt', 'np.sqrt', (['self.detector'], {}), '(self.detector)\n', (10986, 11001), True, 'import numpy as np\n'), ((11080, 11102), 'numpy.sqrt', 'np.sqrt', (['self.detector'], {}), '(self.detector)\n', (11087, 11102), True, 'import numpy as np\n'), ((11186, 11206), 'numpy.nanmax', 'np.nanmax', (['self.time'], {}), '(self.time)\n', (11195, 11206), True, 'import numpy as np\n'), ((11321, 11344), 'numpy.nanmax', 'np.nanmax', (['self.monitor'], {}), '(self.monitor)\n', (11330, 11344), True, 'import numpy as np\n'), ((12874, 12898), 'numpy.array', 'np.array', (["kwargs['tols']"], {}), "(kwargs['tols'])\n", (12882, 12898), True, 'import numpy as np\n'), ((16355, 16422), 'scipy.interpolate.griddata', 'griddata', (['bg_x', 'background_data.intensity', 'self_x'], {'method': '"""nearest"""'}), "(bg_x, background_data.intensity, self_x, method='nearest')\n", (16363, 16422), False, 'from scipy.interpolate import griddata\n'), ((18144, 18170), 'numpy.empty', 'np.empty', (['Q_chunk.shape[0]'], {}), '(Q_chunk.shape[0])\n', (18152, 18170), True, 'import numpy as np\n'), ((20764, 20813), 'numpy.linspace', 'np.linspace', (['arg[0]', 'arg[1]', 'arg[2]'], {'retstep': '(True)'}), '(arg[0], arg[1], arg[2], retstep=True)\n', (20775, 20813), True, 'import numpy as np\n'), ((21457, 21488), 'numpy.delete', 'np.delete', (['arg', 'del_nan'], {'axis': '(0)'}), '(arg, del_nan, axis=0)\n', (21466, 21488), True, 'import numpy as np\n'), ((2998, 3021), 'numpy.array', 'np.array', (['([arg] * n_dim)'], {}), '([arg] * n_dim)\n', (3006, 3021), True, 'import numpy as np\n'), ((3094, 3107), 'numpy.array', 'np.array', (['arg'], {}), '(arg)\n', (3102, 3107), True, 'import numpy as np\n'), ((16463, 16531), 'scipy.interpolate.griddata', 'griddata', (['bg_x', '(background_data.error ** 2)', 'self_x'], {'method': '"""nearest"""'}), "(bg_x, background_data.error ** 2, self_x, method='nearest')\n", (16471, 16531), False, 'from scipy.interpolate import griddata\n'), ((16581, 16692), 'warnings.warn', 'warnings.warn', (['"""Background subtraction failed. Scipy Import Error, use more recent version of Python"""'], {}), "(\n 'Background subtraction failed. Scipy Import Error, use more recent version of Python'\n )\n", (16594, 16692), False, 'import warnings\n'), ((18523, 18557), 'numpy.array', 'np.array', (['self._qstep'], {'dtype': 'float'}), '(self._qstep, dtype=float)\n', (18531, 18557), True, 'import numpy as np\n'), ((18594, 18628), 'numpy.array', 'np.array', (['self._qstep'], {'dtype': 'float'}), '(self._qstep, dtype=float)\n', (18602, 18628), True, 'import numpy as np\n'), ((18844, 18877), 'numpy.average', 'np.average', (['_data_out[j][bin_ind]'], {}), '(_data_out[j][bin_ind])\n', (18854, 18877), True, 'import numpy as np\n'), ((18913, 18943), 'numpy.average', 'np.average', (['(_err[bin_ind] ** 2)'], {}), '(_err[bin_ind] ** 2)\n', (18923, 18943), True, 'import numpy as np\n'), ((16875, 16905), 'numpy.max', 'np.max', (['[err1 ** 2, err2 ** 2]'], {}), '([err1 ** 2, err2 ** 2])\n', (16881, 16905), True, 'import numpy as np\n'), ((13425, 13453), 'numpy.abs', 'np.abs', (['(orig_vals - new_vals)'], {}), '(orig_vals - new_vals)\n', (13431, 13453), True, 'import numpy as np\n'), ((13829, 13859), 'numpy.array', 'np.array', (['[obj._data[_key][i]]'], {}), '([obj._data[_key][i]])\n', (13837, 13859), True, 'import numpy as np\n'), ((15577, 15607), 'numpy.max', 'np.max', (['[err1 ** 2, err2 ** 2]'], {}), '([err1 ** 2, err2 ** 2])\n', (15583, 15607), True, 'import numpy as np\n'), ((20675, 20694), 'numpy.average', 'np.average', (['arg[:2]'], {}), '(arg[:2])\n', (20685, 20694), True, 'import numpy as np\n')] |
import numpy as np
import medpy.metric.binary as mmb
import io_utils as io
import data_utils
import os
def evaluate_mmwhs(data_dir, data_list, model, label_ids, id_to_ignore = 0):
# Evaluation function
# Instead of re-training the model and reporting mean/std based on different random seeds, instead we just
# report the mean/std based on the four test images. This seems to be the method used in
# https://github.com/cchen-cc/SIFA/blob/master/evaluate.py
# Thus, for each test image we obtain prediction labels, and find DICE and ASSM results
# We make use of the pre-processing we did. Test tf-records 0..255 will correspond to image1,
# test tf-records 256..511 will correspond to image2 etc.
# Initialize result dicts
dice = {}
assd = {}
for label in label_ids:
if label_ids[label] == id_to_ignore:
continue
dice[label] = []
assd[label] = []
# Find results for each class
for test_image in range(4):
# The 3D labels and our predictions
y_true = []
y_hat = []
for i in range(test_image * 256, (test_image+1) * 256):
# We read in the i'th slice and compute predictions
X_slice,y_true_slice = io.sample_batch(data_dir, [data_list[i]], 1)
y_true.append(np.copy(y_true_slice))
y_hat.append(model.predict(X_slice))
y_true = np.array(y_true).reshape(256,256,256)
y_hat = np.array(y_hat).reshape(256,256,256)
print(y_true.shape, y_hat.shape)
for label in label_ids:
if label_ids[label] == id_to_ignore:
continue
# Prep data and compute metrics
curr_y_true = np.copy(y_true)
curr_y_true[curr_y_true != label_ids[label]] = 0
curr_y_hat = np.copy(y_hat)
curr_y_hat[curr_y_hat != label_ids[label]] = 0
dice[label].append(mmb.dc(curr_y_hat, curr_y_true))
assd[label].append(mmb.assd(curr_y_hat, curr_y_true))
# Compute mean/std for each class for dice and assd
dice_mu = {}
dice_sd = {}
dice_total_mu = []
assd_mu = {}
assd_sd = {}
assd_total_mu = []
for label in label_ids:
if label_ids[label] == id_to_ignore:
continue
dice_mu[label] = np.mean(dice[label])
dice_sd[label] = np.std(dice[label])
dice_total_mu.append(dice_mu[label])
assd_mu[label] = np.mean(assd[label])
assd_sd[label] = np.std(assd[label])
assd_total_mu.append(assd_mu[label])
dice_total_mu = np.mean(dice_total_mu)
assd_total_mu = np.mean(assd_total_mu)
return dice_total_mu, dice_mu, dice_sd, assd_total_mu, assd_mu, assd_sd
def evaluate_abdomen(raw_ct_img_dir, data_seed, model, label_ids, id_to_ignore = 0, whole_dataset_computation=False):
# Use the same random seed as in the data processing notebook
assert data_seed == 0
np.random.seed(data_seed)
train_indices = sorted(np.random.choice(range(30), 24, replace=False))
test_indices = np.asarray(sorted([x for x in range(30) if x not in train_indices]))
test_images = np.asarray(sorted(os.listdir(raw_ct_img_dir + "img/")))[test_indices]
test_labels = np.asarray(sorted(os.listdir(raw_ct_img_dir + "label/")))[test_indices]
print(test_images)
all_scans = []
all_segmaps = []
for imf,labelf in zip(test_images, test_labels):
print("Working on a new scan", imf, labelf)
scan,labels = data_utils.preprocess_abdomen_ct(raw_ct_img_dir, imf, labelf, final_cropping=True)
all_scans.append(np.copy(scan))
all_segmaps.append(np.copy(labels))
# Compute results for all images at once
if whole_dataset_computation == True:
combined_scans = []
for scan in all_scans:
combined_scans.append(np.moveaxis(np.copy(scan), 2, 0))
combined_scans = np.vstack(combined_scans)
combined_scans = np.moveaxis(combined_scans, 0, 2)
combined_segmaps = []
for segmap in all_segmaps:
combined_segmaps.append(np.moveaxis(np.copy(segmap), 2, 0))
combined_segmaps = np.vstack(combined_segmaps)
combined_segmaps = np.moveaxis(combined_segmaps, 0, 2)
all_scans = [combined_scans]
all_segmaps = [combined_segmaps]
# Initialize result dicts
dice = {}
assd = {}
for label in label_ids:
if label_ids[label] == id_to_ignore:
continue
dice[label] = []
assd[label] = []
# Find results for each class
for scan,labels in zip(all_scans, all_segmaps):
# The 3D labels and our predictions
y_true = []
y_hat = []
for idx in range(scan.shape[-1]):
# Reshape the images/labels to 256x256xNum_Slices
X,_ = io.get_consecutive_slices(scan, labels, idx, target_shape=(256,256,3))
# Ignore slices that have none of the target organs present
if len(np.unique(labels[...,idx])) == 1:
continue
Yhat = model.predict(X.reshape(1,256,256,3))[0]
y_true.append(np.copy(labels[...,idx].reshape(1,256,256)))
y_hat.append(np.copy(Yhat.reshape(1,256,256)))
y_true = np.vstack(y_true)
y_hat = np.vstack(y_hat)
print(y_true.shape, y_hat.shape)
for label in label_ids:
if label_ids[label] == id_to_ignore:
continue
# Prep data and compute metrics
curr_y_true = np.copy(y_true)
curr_y_true[curr_y_true != label_ids[label]] = 0
curr_y_hat = np.copy(y_hat)
curr_y_hat[curr_y_hat != label_ids[label]] = 0
dice[label].append(mmb.dc(curr_y_hat, curr_y_true))
assd[label].append(mmb.assd(curr_y_hat, curr_y_true))
# Compute mean/std for each class for dice and assd
dice_mu = {}
dice_sd = {}
dice_total_mu = []
assd_mu = {}
assd_sd = {}
assd_total_mu = []
for label in label_ids:
if label_ids[label] == id_to_ignore:
continue
dice_mu[label] = np.mean(dice[label])
dice_sd[label] = np.std(dice[label])
dice_total_mu.append(dice_mu[label])
assd_mu[label] = np.mean(assd[label])
assd_sd[label] = np.std(assd[label])
assd_total_mu.append(assd_mu[label])
dice_total_mu = np.mean(dice_total_mu)
assd_total_mu = np.mean(assd_total_mu)
return dice_total_mu, dice_mu, dice_sd, assd_total_mu, assd_mu, assd_sd | [
"numpy.moveaxis",
"numpy.random.seed",
"numpy.copy",
"io_utils.sample_batch",
"numpy.std",
"numpy.unique",
"medpy.metric.binary.dc",
"data_utils.preprocess_abdomen_ct",
"numpy.mean",
"numpy.array",
"medpy.metric.binary.assd",
"io_utils.get_consecutive_slices",
"os.listdir",
"numpy.vstack"
... | [((2717, 2739), 'numpy.mean', 'np.mean', (['dice_total_mu'], {}), '(dice_total_mu)\n', (2724, 2739), True, 'import numpy as np\n'), ((2760, 2782), 'numpy.mean', 'np.mean', (['assd_total_mu'], {}), '(assd_total_mu)\n', (2767, 2782), True, 'import numpy as np\n'), ((3080, 3105), 'numpy.random.seed', 'np.random.seed', (['data_seed'], {}), '(data_seed)\n', (3094, 3105), True, 'import numpy as np\n'), ((6582, 6604), 'numpy.mean', 'np.mean', (['dice_total_mu'], {}), '(dice_total_mu)\n', (6589, 6604), True, 'import numpy as np\n'), ((6625, 6647), 'numpy.mean', 'np.mean', (['assd_total_mu'], {}), '(assd_total_mu)\n', (6632, 6647), True, 'import numpy as np\n'), ((2432, 2452), 'numpy.mean', 'np.mean', (['dice[label]'], {}), '(dice[label])\n', (2439, 2452), True, 'import numpy as np\n'), ((2478, 2497), 'numpy.std', 'np.std', (['dice[label]'], {}), '(dice[label])\n', (2484, 2497), True, 'import numpy as np\n'), ((2577, 2597), 'numpy.mean', 'np.mean', (['assd[label]'], {}), '(assd[label])\n', (2584, 2597), True, 'import numpy as np\n'), ((2623, 2642), 'numpy.std', 'np.std', (['assd[label]'], {}), '(assd[label])\n', (2629, 2642), True, 'import numpy as np\n'), ((3640, 3726), 'data_utils.preprocess_abdomen_ct', 'data_utils.preprocess_abdomen_ct', (['raw_ct_img_dir', 'imf', 'labelf'], {'final_cropping': '(True)'}), '(raw_ct_img_dir, imf, labelf,\n final_cropping=True)\n', (3672, 3726), False, 'import data_utils\n'), ((4048, 4073), 'numpy.vstack', 'np.vstack', (['combined_scans'], {}), '(combined_scans)\n', (4057, 4073), True, 'import numpy as np\n'), ((4099, 4132), 'numpy.moveaxis', 'np.moveaxis', (['combined_scans', '(0)', '(2)'], {}), '(combined_scans, 0, 2)\n', (4110, 4132), True, 'import numpy as np\n'), ((4298, 4325), 'numpy.vstack', 'np.vstack', (['combined_segmaps'], {}), '(combined_segmaps)\n', (4307, 4325), True, 'import numpy as np\n'), ((4353, 4388), 'numpy.moveaxis', 'np.moveaxis', (['combined_segmaps', '(0)', '(2)'], {}), '(combined_segmaps, 0, 2)\n', (4364, 4388), True, 'import numpy as np\n'), ((5441, 5458), 'numpy.vstack', 'np.vstack', (['y_true'], {}), '(y_true)\n', (5450, 5458), True, 'import numpy as np\n'), ((5475, 5491), 'numpy.vstack', 'np.vstack', (['y_hat'], {}), '(y_hat)\n', (5484, 5491), True, 'import numpy as np\n'), ((6313, 6333), 'numpy.mean', 'np.mean', (['dice[label]'], {}), '(dice[label])\n', (6320, 6333), True, 'import numpy as np\n'), ((6359, 6378), 'numpy.std', 'np.std', (['dice[label]'], {}), '(dice[label])\n', (6365, 6378), True, 'import numpy as np\n'), ((6450, 6470), 'numpy.mean', 'np.mean', (['assd[label]'], {}), '(assd[label])\n', (6457, 6470), True, 'import numpy as np\n'), ((6496, 6515), 'numpy.std', 'np.std', (['assd[label]'], {}), '(assd[label])\n', (6502, 6515), True, 'import numpy as np\n'), ((1270, 1314), 'io_utils.sample_batch', 'io.sample_batch', (['data_dir', '[data_list[i]]', '(1)'], {}), '(data_dir, [data_list[i]], 1)\n', (1285, 1314), True, 'import io_utils as io\n'), ((1783, 1798), 'numpy.copy', 'np.copy', (['y_true'], {}), '(y_true)\n', (1790, 1798), True, 'import numpy as np\n'), ((1898, 1912), 'numpy.copy', 'np.copy', (['y_hat'], {}), '(y_hat)\n', (1905, 1912), True, 'import numpy as np\n'), ((3749, 3762), 'numpy.copy', 'np.copy', (['scan'], {}), '(scan)\n', (3756, 3762), True, 'import numpy as np\n'), ((3791, 3806), 'numpy.copy', 'np.copy', (['labels'], {}), '(labels)\n', (3798, 3806), True, 'import numpy as np\n'), ((4973, 5045), 'io_utils.get_consecutive_slices', 'io.get_consecutive_slices', (['scan', 'labels', 'idx'], {'target_shape': '(256, 256, 3)'}), '(scan, labels, idx, target_shape=(256, 256, 3))\n', (4998, 5045), True, 'import io_utils as io\n'), ((5712, 5727), 'numpy.copy', 'np.copy', (['y_true'], {}), '(y_true)\n', (5719, 5727), True, 'import numpy as np\n'), ((5815, 5829), 'numpy.copy', 'np.copy', (['y_hat'], {}), '(y_hat)\n', (5822, 5829), True, 'import numpy as np\n'), ((1342, 1363), 'numpy.copy', 'np.copy', (['y_true_slice'], {}), '(y_true_slice)\n', (1349, 1363), True, 'import numpy as np\n'), ((1440, 1456), 'numpy.array', 'np.array', (['y_true'], {}), '(y_true)\n', (1448, 1456), True, 'import numpy as np\n'), ((1494, 1509), 'numpy.array', 'np.array', (['y_hat'], {}), '(y_hat)\n', (1502, 1509), True, 'import numpy as np\n'), ((2016, 2047), 'medpy.metric.binary.dc', 'mmb.dc', (['curr_y_hat', 'curr_y_true'], {}), '(curr_y_hat, curr_y_true)\n', (2022, 2047), True, 'import medpy.metric.binary as mmb\n'), ((2080, 2113), 'medpy.metric.binary.assd', 'mmb.assd', (['curr_y_hat', 'curr_y_true'], {}), '(curr_y_hat, curr_y_true)\n', (2088, 2113), True, 'import medpy.metric.binary as mmb\n'), ((3305, 3340), 'os.listdir', 'os.listdir', (["(raw_ct_img_dir + 'img/')"], {}), "(raw_ct_img_dir + 'img/')\n", (3315, 3340), False, 'import os\n'), ((3393, 3430), 'os.listdir', 'os.listdir', (["(raw_ct_img_dir + 'label/')"], {}), "(raw_ct_img_dir + 'label/')\n", (3403, 3430), False, 'import os\n'), ((5921, 5952), 'medpy.metric.binary.dc', 'mmb.dc', (['curr_y_hat', 'curr_y_true'], {}), '(curr_y_hat, curr_y_true)\n', (5927, 5952), True, 'import medpy.metric.binary as mmb\n'), ((5985, 6018), 'medpy.metric.binary.assd', 'mmb.assd', (['curr_y_hat', 'curr_y_true'], {}), '(curr_y_hat, curr_y_true)\n', (5993, 6018), True, 'import medpy.metric.binary as mmb\n'), ((4001, 4014), 'numpy.copy', 'np.copy', (['scan'], {}), '(scan)\n', (4008, 4014), True, 'import numpy as np\n'), ((4247, 4262), 'numpy.copy', 'np.copy', (['segmap'], {}), '(segmap)\n', (4254, 4262), True, 'import numpy as np\n'), ((5136, 5163), 'numpy.unique', 'np.unique', (['labels[..., idx]'], {}), '(labels[..., idx])\n', (5145, 5163), True, 'import numpy as np\n')] |
"""
Reads in current year's Arctic sea ice extent from Sea Ice Index 3 (NSIDC)
and calculates the anomaly from the 1981-2010 median
Website : ftp://sidads.colorado.edu/DATASETS/NOAA/G02135/north/daily/data/
Author : <NAME>
Date : 5 September 2016
"""
### Import modules
import numpy as np
import urllib.request
import urllib as UL
import datetime
import matplotlib.pyplot as plt
### Directory and time
directoryfigure = './Figures/'
now = datetime.datetime.now()
currentmn = str(now.month)
currentdy = str(now.day)
currentyr = str(now.year)
currenttime = currentmn + '_' + currentdy + '_' + currentyr
currentdoy = now.timetuple().tm_yday
### Load url
url = 'ftp://sidads.colorado.edu/DATASETS/NOAA/G02135/north/daily/data/' \
'N_seaice_extent_daily_v3.0.csv'
### Read file
raw_data = UL.request.urlopen(url)
dataset = np.genfromtxt(raw_data, skip_header=2,delimiter=',',
usecols=[0,1,2,3,4])
print('\nCompleted: Read sea ice data!')
### Set missing data to nan
dataset[np.where(dataset==-9999)] = np.nan
### Variables
year = dataset[:,0]
month = dataset[:,1]
day = dataset[:,2]
ice = dataset[:,3]
missing = dataset[:,4]
### Find current year (2017)
yr2018 = np.where(year == 2018)[0]
sie18 = ice[yr2018]
### Ice Conversion
iceval = sie18 * 1e6
### Printing info
print('\n----- NSIDC Arctic Sea Ice -----')
print('Current Date =', now.strftime("%Y-%m-%d %H:%M"), '\n')
print('SIE Date = %s/%s/%s' % (int(month[-1]),int(day[-1]),int(year[-1])))
print('Current SIE = %s km^2 \n' % (iceval[-1]))
print('1-day change SIE = %s km^2' % (iceval[-1]-iceval[-2]))
print('7-day change SIE = %s km^2 \n' % (iceval[-1]-iceval[-8]))
###########################################################################
###########################################################################
###########################################################################
### Reads in 1981-2010 means
### Load url
url2 = 'ftp://sidads.colorado.edu/DATASETS/NOAA/G02135/north/daily/data/' \
'N_seaice_extent_climatology_1981-2010_v3.0.csv'
### Read file
raw_data2 = UL.request.urlopen(url2)
dataset2 = np.genfromtxt(raw_data2, skip_header=2,delimiter=',',
usecols=[0,1,2,3,4,5,6,7])
### Create variables
doy = dataset2[:,0]
meanice = dataset2[:,1] * 1e6
std = dataset2[:,2]
### Quartiles
quartile10 = dataset2[:,3]
quartile25 = dataset2[:,4]
quartile50 = dataset2[:,5]
quartile75 = dataset2[:,6]
quartile90 = dataset2[:,7]
### Anomalies
currentanom = iceval[-1]-meanice[currentdoy-2]
### Printing info
print('Current anomaly = %s km^2 \n' % currentanom)
### Finding select years since 2012
yr2012 = np.where(year == 2012)[0]
yr2013 = np.where(year == 2013)[0]
yr2014 = np.where(year == 2014)[0]
yr2015 = np.where(year == 2015)[0]
yr2016 = np.where(year == 2016)[0]
### Calculate anomaly from their median
sie12 = ice[yr2012] - quartile50
sie13 = ice[yr2013] - quartile50[:-1]
sie14 = ice[yr2014] - quartile50[:-1]
sie15 = ice[yr2015] - quartile50[:-1]
sie16 = ice[yr2016] - quartile50
sie17 = ice[yr2017] - quartile50[:-1]
sie18 = sie18 - quartile50[:len(sie18)]
### Append years to extented list
extend5 = np.append(sie12,sie13,axis=0)
extend4 = np.append(extend5,sie14,axis=0)
extend3 = np.append(extend4,sie15,axis=0)
extend2 = np.append(extend3,sie16,axis=0)
extend1 = np.append(extend2,sie17,axis=0)
extend = np.append(extend1,sie18,axis=0)
### Find median to plot
median = np.tile(quartile50,6)
###########################################################################
###########################################################################
###########################################################################
### Create plot
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
plt.rc('savefig',facecolor='black')
plt.rc('axes',edgecolor='white')
plt.rc('xtick',color='white')
plt.rc('ytick',color='white')
plt.rc('axes',labelcolor='white')
plt.rc('axes',facecolor='black')
fig = plt.figure()
ax = plt.subplot(111)
xlabels = map(str,np.arange(2012,2020,1))
plt.xticks(np.arange(0,2556,365),xlabels,rotation=0)
ylabels = [r'-5',r'-4',r'-3',r'-2',r'-1',r'\textbf{0.0}',r'1',r'2',r'3',r'4',r'5']
plt.yticks(np.arange(-5,6,1),ylabels)
plt.ylim([-5,5])
plt.xlim([0,2555])
### Adjust axes in time series plots
def adjust_spines(ax, spines):
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward', 5))
else:
spine.set_color('none')
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
ax.xaxis.set_ticks([])
ax.tick_params('both',length=7.5,width=2,which='major')
adjust_spines(ax, ['left','bottom'])
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_linewidth(2)
ax.spines['left'].set_linewidth(2)
upper2std = (meanice/1e6)+(std*2)
lower2std = (meanice/1e6)-(std*2)
ax.yaxis.grid(zorder=1,color='w',alpha=0.35)
zeroline = [0]*2191
recdiff_masked = np.ma.masked_less_equal(extend, 0)
plt.bar(np.arange(len(extend)),extend,color='r',
edgecolor='r',zorder=9)
plt.bar(np.arange(len(extend)),recdiff_masked.filled(np.nan),
color='dodgerblue',edgecolor='dodgerblue',zorder=10)
plt.ylabel(r'\textbf{Extent Anomalies} [$\times$10$^{6}$ km$^2$]',fontsize=13,
color='darkgrey')
plt.title(r'\textbf{ARCTIC SEA ICE EXTENT ANOMALIES}',
fontsize=20,color='darkgray')
plt.text(1195,0.25,r'\textbf{1981-2010 Climatology}',fontsize=8,
rotation=0,ha='center',color='darkgrey')
plt.text(155,0.8,r'$\bf{\rightarrow}$',fontsize=35,rotation=230,ha='center',
color='dodgerblue')
plt.text(len(extend)+30,-0.27,r'\textbf{Today!}',fontsize=8,rotation=270,ha='center',
color='r')
plt.text(0.5,-4.45,r'\textbf{DATA:} National Snow \& Ice Data Center, Boulder CO',
fontsize=6,rotation='horizontal',ha='left',color='darkgrey')
plt.text(0.5,-4.70,r'\textbf{CSV:} ftp://sidads.colorado.edu/DATASETS/NOAA/G02135/',
fontsize=6,rotation='horizontal',ha='left',color='darkgrey')
plt.text(0.5,-4.95,r'\textbf{GRAPHIC:} <NAME> (@ZLabe)',
fontsize=6,rotation='horizontal',ha='left',color='darkgrey')
fig.subplots_adjust(top=0.91)
plt.savefig(directoryfigure + 'nsidc_sie_median.png',dpi=300) | [
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.ylim",
"numpy.ma.masked_less_equal",
"numpy.genfromtxt",
"urllib.request.urlopen",
"matplotlib.pyplot.text",
"numpy.append",
"matplotlib.pyplot.figure",
"numpy.where",
"numpy.arange",
"numpy.... | [((453, 476), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (474, 476), False, 'import datetime\n'), ((804, 827), 'urllib.request.urlopen', 'UL.request.urlopen', (['url'], {}), '(url)\n', (822, 827), True, 'import urllib as UL\n'), ((838, 916), 'numpy.genfromtxt', 'np.genfromtxt', (['raw_data'], {'skip_header': '(2)', 'delimiter': '""","""', 'usecols': '[0, 1, 2, 3, 4]'}), "(raw_data, skip_header=2, delimiter=',', usecols=[0, 1, 2, 3, 4])\n", (851, 916), True, 'import numpy as np\n'), ((2156, 2180), 'urllib.request.urlopen', 'UL.request.urlopen', (['url2'], {}), '(url2)\n', (2174, 2180), True, 'import urllib as UL\n'), ((2192, 2284), 'numpy.genfromtxt', 'np.genfromtxt', (['raw_data2'], {'skip_header': '(2)', 'delimiter': '""","""', 'usecols': '[0, 1, 2, 3, 4, 5, 6, 7]'}), "(raw_data2, skip_header=2, delimiter=',', usecols=[0, 1, 2, 3,\n 4, 5, 6, 7])\n", (2205, 2284), True, 'import numpy as np\n'), ((3255, 3286), 'numpy.append', 'np.append', (['sie12', 'sie13'], {'axis': '(0)'}), '(sie12, sie13, axis=0)\n', (3264, 3286), True, 'import numpy as np\n'), ((3295, 3328), 'numpy.append', 'np.append', (['extend5', 'sie14'], {'axis': '(0)'}), '(extend5, sie14, axis=0)\n', (3304, 3328), True, 'import numpy as np\n'), ((3337, 3370), 'numpy.append', 'np.append', (['extend4', 'sie15'], {'axis': '(0)'}), '(extend4, sie15, axis=0)\n', (3346, 3370), True, 'import numpy as np\n'), ((3379, 3412), 'numpy.append', 'np.append', (['extend3', 'sie16'], {'axis': '(0)'}), '(extend3, sie16, axis=0)\n', (3388, 3412), True, 'import numpy as np\n'), ((3421, 3454), 'numpy.append', 'np.append', (['extend2', 'sie17'], {'axis': '(0)'}), '(extend2, sie17, axis=0)\n', (3430, 3454), True, 'import numpy as np\n'), ((3462, 3495), 'numpy.append', 'np.append', (['extend1', 'sie18'], {'axis': '(0)'}), '(extend1, sie18, axis=0)\n', (3471, 3495), True, 'import numpy as np\n'), ((3528, 3550), 'numpy.tile', 'np.tile', (['quartile50', '(6)'], {}), '(quartile50, 6)\n', (3535, 3550), True, 'import numpy as np\n'), ((3795, 3822), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (3801, 3822), True, 'import matplotlib.pyplot as plt\n'), ((3822, 3895), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {}), "('font', **{'family': 'sans-serif', 'sans-serif': ['Avant Garde']})\n", (3828, 3895), True, 'import matplotlib.pyplot as plt\n'), ((3893, 3929), 'matplotlib.pyplot.rc', 'plt.rc', (['"""savefig"""'], {'facecolor': '"""black"""'}), "('savefig', facecolor='black')\n", (3899, 3929), True, 'import matplotlib.pyplot as plt\n'), ((3929, 3962), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'edgecolor': '"""white"""'}), "('axes', edgecolor='white')\n", (3935, 3962), True, 'import matplotlib.pyplot as plt\n'), ((3962, 3992), 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick"""'], {'color': '"""white"""'}), "('xtick', color='white')\n", (3968, 3992), True, 'import matplotlib.pyplot as plt\n'), ((3992, 4022), 'matplotlib.pyplot.rc', 'plt.rc', (['"""ytick"""'], {'color': '"""white"""'}), "('ytick', color='white')\n", (3998, 4022), True, 'import matplotlib.pyplot as plt\n'), ((4022, 4056), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'labelcolor': '"""white"""'}), "('axes', labelcolor='white')\n", (4028, 4056), True, 'import matplotlib.pyplot as plt\n'), ((4056, 4089), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'facecolor': '"""black"""'}), "('axes', facecolor='black')\n", (4062, 4089), True, 'import matplotlib.pyplot as plt\n'), ((4096, 4108), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4106, 4108), True, 'import matplotlib.pyplot as plt\n'), ((4114, 4130), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (4125, 4130), True, 'import matplotlib.pyplot as plt\n'), ((4348, 4365), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-5, 5]'], {}), '([-5, 5])\n', (4356, 4365), True, 'import matplotlib.pyplot as plt\n'), ((4365, 4384), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 2555]'], {}), '([0, 2555])\n', (4373, 4384), True, 'import matplotlib.pyplot as plt\n'), ((5273, 5307), 'numpy.ma.masked_less_equal', 'np.ma.masked_less_equal', (['extend', '(0)'], {}), '(extend, 0)\n', (5296, 5307), True, 'import numpy as np\n'), ((5516, 5618), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""\\\\textbf{Extent Anomalies} [$\\\\times$10$^{6}$ km$^2$]"""'], {'fontsize': '(13)', 'color': '"""darkgrey"""'}), "('\\\\textbf{Extent Anomalies} [$\\\\times$10$^{6}$ km$^2$]',\n fontsize=13, color='darkgrey')\n", (5526, 5618), True, 'import matplotlib.pyplot as plt\n'), ((5625, 5715), 'matplotlib.pyplot.title', 'plt.title', (['"""\\\\textbf{ARCTIC SEA ICE EXTENT ANOMALIES}"""'], {'fontsize': '(20)', 'color': '"""darkgray"""'}), "('\\\\textbf{ARCTIC SEA ICE EXTENT ANOMALIES}', fontsize=20, color=\n 'darkgray')\n", (5634, 5715), True, 'import matplotlib.pyplot as plt\n'), ((5758, 5872), 'matplotlib.pyplot.text', 'plt.text', (['(1195)', '(0.25)', '"""\\\\textbf{1981-2010 Climatology}"""'], {'fontsize': '(8)', 'rotation': '(0)', 'ha': '"""center"""', 'color': '"""darkgrey"""'}), "(1195, 0.25, '\\\\textbf{1981-2010 Climatology}', fontsize=8,\n rotation=0, ha='center', color='darkgrey')\n", (5766, 5872), True, 'import matplotlib.pyplot as plt\n'), ((5897, 6004), 'matplotlib.pyplot.text', 'plt.text', (['(155)', '(0.8)', '"""$\\\\bf{\\\\rightarrow}$"""'], {'fontsize': '(35)', 'rotation': '(230)', 'ha': '"""center"""', 'color': '"""dodgerblue"""'}), "(155, 0.8, '$\\\\bf{\\\\rightarrow}$', fontsize=35, rotation=230, ha=\n 'center', color='dodgerblue')\n", (5905, 6004), True, 'import matplotlib.pyplot as plt\n'), ((6132, 6289), 'matplotlib.pyplot.text', 'plt.text', (['(0.5)', '(-4.45)', '"""\\\\textbf{DATA:} National Snow \\\\& Ice Data Center, Boulder CO"""'], {'fontsize': '(6)', 'rotation': '"""horizontal"""', 'ha': '"""left"""', 'color': '"""darkgrey"""'}), "(0.5, -4.45,\n '\\\\textbf{DATA:} National Snow \\\\& Ice Data Center, Boulder CO',\n fontsize=6, rotation='horizontal', ha='left', color='darkgrey')\n", (6140, 6289), True, 'import matplotlib.pyplot as plt\n'), ((6285, 6442), 'matplotlib.pyplot.text', 'plt.text', (['(0.5)', '(-4.7)', '"""\\\\textbf{CSV:} ftp://sidads.colorado.edu/DATASETS/NOAA/G02135/"""'], {'fontsize': '(6)', 'rotation': '"""horizontal"""', 'ha': '"""left"""', 'color': '"""darkgrey"""'}), "(0.5, -4.7,\n '\\\\textbf{CSV:} ftp://sidads.colorado.edu/DATASETS/NOAA/G02135/',\n fontsize=6, rotation='horizontal', ha='left', color='darkgrey')\n", (6293, 6442), True, 'import matplotlib.pyplot as plt\n'), ((6440, 6566), 'matplotlib.pyplot.text', 'plt.text', (['(0.5)', '(-4.95)', '"""\\\\textbf{GRAPHIC:} <NAME> (@ZLabe)"""'], {'fontsize': '(6)', 'rotation': '"""horizontal"""', 'ha': '"""left"""', 'color': '"""darkgrey"""'}), "(0.5, -4.95, '\\\\textbf{GRAPHIC:} <NAME> (@ZLabe)', fontsize=6,\n rotation='horizontal', ha='left', color='darkgrey')\n", (6448, 6566), True, 'import matplotlib.pyplot as plt\n'), ((6610, 6672), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(directoryfigure + 'nsidc_sie_median.png')"], {'dpi': '(300)'}), "(directoryfigure + 'nsidc_sie_median.png', dpi=300)\n", (6621, 6672), True, 'import matplotlib.pyplot as plt\n'), ((1063, 1089), 'numpy.where', 'np.where', (['(dataset == -9999)'], {}), '(dataset == -9999)\n', (1071, 1089), True, 'import numpy as np\n'), ((1254, 1276), 'numpy.where', 'np.where', (['(year == 2018)'], {}), '(year == 2018)\n', (1262, 1276), True, 'import numpy as np\n'), ((2745, 2767), 'numpy.where', 'np.where', (['(year == 2012)'], {}), '(year == 2012)\n', (2753, 2767), True, 'import numpy as np\n'), ((2780, 2802), 'numpy.where', 'np.where', (['(year == 2013)'], {}), '(year == 2013)\n', (2788, 2802), True, 'import numpy as np\n'), ((2815, 2837), 'numpy.where', 'np.where', (['(year == 2014)'], {}), '(year == 2014)\n', (2823, 2837), True, 'import numpy as np\n'), ((2850, 2872), 'numpy.where', 'np.where', (['(year == 2015)'], {}), '(year == 2015)\n', (2858, 2872), True, 'import numpy as np\n'), ((2885, 2907), 'numpy.where', 'np.where', (['(year == 2016)'], {}), '(year == 2016)\n', (2893, 2907), True, 'import numpy as np\n'), ((4150, 4174), 'numpy.arange', 'np.arange', (['(2012)', '(2020)', '(1)'], {}), '(2012, 2020, 1)\n', (4159, 4174), True, 'import numpy as np\n'), ((4185, 4208), 'numpy.arange', 'np.arange', (['(0)', '(2556)', '(365)'], {}), '(0, 2556, 365)\n', (4194, 4208), True, 'import numpy as np\n'), ((4321, 4340), 'numpy.arange', 'np.arange', (['(-5)', '(6)', '(1)'], {}), '(-5, 6, 1)\n', (4330, 4340), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[17]:
from IPython.display import display, HTML
import matplotlib.pyplot as plt
import plotly
import plotly.graph_objs as go
from plotly.subplots import make_subplots
from plotly.offline import init_notebook_mode,iplot
import plotly.express as px
from kaleido.scopes.plotly import PlotlyScope
import re
init_notebook_mode(connected=True)
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
import csv
import os
get_ipython().run_line_magic('matplotlib', 'inline')
# work in offline
import plotly.offline as pyo
pyo.init_notebook_mode()
# graph renderer
import plotly.io as pio
png_renderer = pio.renderers["png"]
# png_renderer.width=1800
# png_renderer.height=1200
# png_renderer.autoscale=True
pio.renderers.default = "png"
scope = PlotlyScope()
plotly.__version__
FIGURE_DIR = "C:\\Wenzhong\\我的坚果云\\实验\\Figures\\"
# In[18]:
def latency_df(result_dir, dir_prefix, minTime):
latency_data = pd.read_csv(os.path.join(result_dir, dir_prefix, "count-latency.txt"),
delimiter=" ", header=None, index_col=False,
names = "count latency currTime subTask".split())
latency_data["latency"][latency_data["latency"] < 0] = 0 # filter out outliers
latency_data["time"] = latency_data["currTime"] - minTime
#latency_data["time"] = latency_data["currTime"] - latency_data["latency"] - minTime
latency_data = latency_data[latency_data["time"] >= TIME_BEGIN]
latency_data.sort_values("time", inplace=True)
return latency_data
def failure_df(result_dir, dir_prefix, minTime, length):
failure_data = pd.read_csv(os.path.join(result_dir, dir_prefix, "restart-cost.txt"),
delimiter=" ", skiprows=[0], index_col=False)
failure_data["failedTimeFromZero"] = failure_data["failedTime"] - minTime
failure_data["recoveredTimeFromZero"] = failure_data["loadCheckpointCompleteTime"] - minTime
failure_data["RecoveryLength_ms"] = failure_data["loadCheckpointCompleteTime"] - failure_data["RecoveryStartTime"]
failure_data.sort_values("failedTimeFromZero", inplace=True)
failure_data = failure_data[(failure_data["failedTimeFromZero"] >= TIME_BEGIN) & (failure_data["failedTimeFromZero"] <= length)]
return failure_data
def throughput_df(result_dir, dir_prefix, filename, minTime, length):
throughput_data = pd.read_csv(os.path.join(result_dir, dir_prefix, filename),
delimiter=",", index_col=False)
throughput_data["startTimeFromZero"] = throughput_data["start"] - minTime
throughput_data.sort_values("startTimeFromZero", inplace=True)
throughput_data = throughput_data[(throughput_data["startTimeFromZero"] >= TIME_BEGIN) & (throughput_data["startTimeFromZero"] <= length)]
return throughput_data
def checkpoint_df(result_dir, dir_prefix, minTime, length):
checkpoint_data = pd.read_csv(os.path.join(result_dir, dir_prefix, "checkpoints.txt"),
delimiter=" ", index_col=False)
checkpoint_data["startTimeFromZero"] = checkpoint_data["startTime_ms"] - minTime
checkpoint_data.sort_values("startTimeFromZero", inplace=True)
checkpoint_data = checkpoint_data[(checkpoint_data["startTimeFromZero"] >= TIME_BEGIN) & (checkpoint_data["startTimeFromZero"] <= length)]
return checkpoint_data
def zk1_cpu_df(result_dir, dir_prefix):
resource_data = pd.read_csv(os.path.join(result_dir, dir_prefix, "cpu-zk1.txt"),
delimiter=" ", index_col=False)
minTime = resource_data["timestamp"].min()
resource_data["timeFromZero"] = resource_data["timestamp"] - minTime
resource_data.sort_values("timeFromZero", inplace=True)
return resource_data
def resource_df(result_dir, dir_prefix, resource, host, minTime, length):
resource_data = pd.read_csv(os.path.join(result_dir, dir_prefix, resource + "-" + host + ".txt"),
delimiter=" ", index_col=False)
resource_data["timeFromZero"] = resource_data["timestamp"] - minTime
resource_data.sort_values("timeFromZero", inplace=True)
resource_data = resource_data[(resource_data["timeFromZero"] >= TIME_BEGIN) & (resource_data["timeFromZero"] <= length)]
return resource_data
# In[75]:
def export_plots(path, fig_name, fig):
if not os.path.exists(path):
os.mkdir(path)
fig.write_image(os.path.join(path, fig_name+".png"))
def latency_plot(latency_data, marker_mode=False):
if marker_mode:
return go.Scatter(
x = latency_data.time,
y = latency_data.latency,
mode="markers", marker=dict(size=2),
name = "latency")
else:
return go.Scatter(
x = latency_data.time,
y = latency_data.latency,
name = "latency")
def throughput_plot(result_dir, dir_prefix, minTime, length, tm_list):
all_tm_throughput = pd.concat([
throughput_df(result_dir, dir_prefix, tm+".txt", minTime, length)
for tm in tm_list
if os.path.exists(os.path.join(result_dir, dir_prefix, tm+".txt"))
])[["startTimeFromZero", "elements/second/core"]]
all_tm_throughput.sort_values("startTimeFromZero")
groups = pd.cut(all_tm_throughput["startTimeFromZero"], np.arange(all_tm_throughput["startTimeFromZero"].min(), all_tm_throughput["startTimeFromZero"].max(), 1000))
res = all_tm_throughput.groupby(groups).sum()
res = pd.DataFrame({"elements/second": res["elements/second/core"].to_numpy(), "startTimeFromZero": res.index.categories.left})
# res = res[res["elements/second"] > 0]
return go.Scatter(
x = res["startTimeFromZero"],
y = res["elements/second"],
line_width=1,
# mode="markers", marker=dict(symbol="triangle-up", size=3),
name = "Input Throughput")
def output_throughput_plot(latency_data):
groups = pd.cut(latency_data["time"], np.arange(latency_data["time"].min(), latency_data["time"].max(), 1000))
res = latency_data.groupby(groups).count()
res = pd.DataFrame({"elements/second": res["count"].to_numpy(), "startTimeFromZero": res.index.categories.left})
return go.Scatter(
x = res["startTimeFromZero"],
y = res["elements/second"],
line=dict(width=1, color="rgba(0,255,255,0.5)"),
# mode="markers", marker=dict(symbol="triangle-up", size=3),
name = "Output Throughput")
# def throughput_plot(result_dir, dir_prefix, minTime, tm_list):
# all_tm_throughput = pd.concat([throughput_df(result_dir, dir_prefix, tm+".txt", minTime) for tm in tm_list])
# all_tm_throughput.sort_values("startTimeFromZero")
# return go.Histogram(x=all_tm_throughput["startTimeFromZero"],
# y=all_tm_throughput["elements/second/core"],
# name="throughput",
# xbins=dict(start=0, end=all_tm_throughput["startTimeFromZero"].max(), size=1000),
# histfunc="sum",
# opacity=1,
# )
# throughput_colors = [ '#AB63FA', '#FFA15A', '#19D3F3', '#FF6692', '#B6E880', '#FF97FF', '#FECB52']
# def throughput_plot(tm, throughput_data, color_idx):
# return go.Scatter(
# x = throughput_data["startTimeFromZero"],
# y = throughput_data["elements/second/core"],
# mode="markers", marker=dict(symbol="triangle-up", color=throughput_colors[color_idx]),
# name = tm + "_throughput")
def cpu_usage_plot(cpu, column, name):
return go.Scatter(
x = cpu.timeFromZero,
y = cpu[column],
line_width=1,
name = name
)
def memory_usage_plot(memory):
return go.Scatter(
x = memory.timeFromZero,
y = memory["used_percent"],
line=dict(dash = 'dot'),
name = "Memory Used"
)
# def memory_cache_plot(memory):
# return go.Scatter(
# x = memory.timeFromZero,
# y = memory["cache_percent"],
# line=dict(dash = 'dash'),
# name = "Memory Cached"
# )
def network_read_plot(network):
return go.Scatter(
x = network.index,
y = network["recv_bytes"],
line=dict(width=1, color="cyan"),
name = "Network",
legendgroup = "Network",
showlegend = True,
)
def network_write_plot(network):
return go.Scatter(
x = network.index,
y = network["sent_bytes"],
line=dict(width=1, color="cyan"),
name = "Network",
legendgroup = "Network",
showlegend = False,
)
def disk_read_plot(disk):
return go.Scatter(
x = disk.index,
y = disk["total_read_bytes_per_s"],
line=dict(width=1, dash = 'dashdot', color="orange"),
name = "Disk",
legendgroup = "Disk",
showlegend = True,
)
def disk_write_plot(disk):
return go.Scatter(
x = disk.index,
y = disk["total_write_bytes_per_s"],
line=dict(width=1, dash = 'dashdot', color="orange"),
name = "Disk",
legendgroup = "Disk",
showlegend = False,
)
def recovered_failure_plot(recovered_failure, showlegend):
res = []
for i, (start, end) in enumerate(zip(recovered_failure["failedTimeFromZero"], recovered_failure["recoveredTimeFromZero"])):
res.append(go.Scatter(x=[start, end],
y=[0, 0],
name="Recovered Failure",
legendgroup="Recovered Failure",
marker=dict(color="red", symbol="x-thin",size=10, line=dict(color="red", width=1)),
# marker=dict(symbol="x",size=7,color="rgba(255, 0, 0, 1)",line=dict(color="red", width=2)),
showlegend= i == 0 if showlegend else False))
return res
def recovered_failure_plot(recovered_failure, max_y, fig):
for i, (start, cost) in enumerate(zip(recovered_failure["failedTimeFromZero"], recovered_failure["RecoveryLength_ms"])):
fig.add_vrect(
x0=start,
x1=start + cost,
fillcolor="grey",
opacity=0.5,
layer="below",
line_color="black",
line_width=1,
row="all", col="all"
)
# res.append(go.Scatter(x=[start, start + cost],
# y=[0, 0],
# name="Recovered Failure",
# legendgroup="Recovered Failure",
# marker=dict(color="red", symbol="x-thin",size=10, line=dict(color="red", width=1)),
# # marker=dict(symbol="x",size=7,color="rgba(255, 0, 0, 1)",line=dict(color="red", width=2)),
# showlegend= i == 0 if showlegend else False))
def unrecovered_failure_plot(unrecovered_failure, showlegend):
failure_y = np.empty(len(unrecovered_failure))
failure_y.fill(0)
return go.Scatter(
x = unrecovered_failure["failedTimeFromZero"],
y = failure_y,
mode="markers",
marker=dict(symbol="x",size=7,color="rgba(255, 0, 0, 0)", line=dict(color="red", width=1)),
legendgroup="Unrecovered Failure",
name = "Unrecovered Failure",
showlegend=showlegend
)
def successful_checkpoints_plot(successful_checkpoints, showlegend):
res = []
for i, (start, cost) in enumerate(zip(successful_checkpoints["startTimeFromZero"],successful_checkpoints["timeCost_ms"])):
res.append(go.Scatter(
x=[start, start + cost],
y=[0, 0],
name="Successful Checkpoint",
legendgroup="Successful Checkpoint",
marker=dict(symbol='line-ns', color="green", size=7, line=dict(color="green", width=1)),
showlegend= i == 0 if showlegend else False))
return res
def failed_checkpoints_plot(successful_checkpoints, showlegend):
res = []
for i, (start, cost) in enumerate(zip(successful_checkpoints["startTimeFromZero"],successful_checkpoints["timeCost_ms"])):
res.append(
go.Scatter(x=[start, start + cost],
y=[0, 0],
name="Failed Checkpoint",
legendgroup="Failed Checkpoint",
mode="markers",
marker=dict(color="rgba(0,0,0,0)", size=7, line=dict(color="red", width=1)),
showlegend= i == 0 if showlegend else False))
return res
# In[76]:
TIME_BEGIN, TIME_END = 0, 4000000
tm_list = ["flink" + str(num) for num in range(2, 18)]
def latency_throughput_plot(result_dir, dir_prefix, export=False, marker_mode=False, include_title=True):
zk_cpu = zk1_cpu_df(result_dir, dir_prefix)
zk_cpu = zk_cpu[(TIME_BEGIN <= zk_cpu["timeFromZero"]) & (zk_cpu["timeFromZero"] <= TIME_END)]
minTime = zk_cpu["timestamp"].min()
if minTime is np.nan:
minTime = 0
latency_data = latency_df(result_dir, dir_prefix, minTime)
length = min(latency_data["time"].max(), TIME_END)
latency_data = latency_data[(TIME_BEGIN <= latency_data["time"]) & (latency_data["time"] <= length)]
print("time length: ", latency_data["time"].max(), " || ", "num windows: ", len(latency_data))
failure_data = failure_df(result_dir, dir_prefix, minTime, length)
checkpoint_data = checkpoint_df(result_dir, dir_prefix, minTime, length)
fig = make_subplots(
rows=2, cols=1,
subplot_titles=("Latency Over Time", "Throughput Over Time"),
specs=[[{"secondary_y": False}],
[{"secondary_y": True}]])
#row1
fig.update_xaxes(title_text="Time (ms)", title_font = {"size": 15}, range=(max(0, TIME_BEGIN), min(length, TIME_END)), row=1, col=1)
fig.update_yaxes(title_text="Latency (ms)", title_font = {"size": 15}, row=1, col=1)
#row2
fig.update_xaxes(title_text="Time (ms)", title_font = {"size": 15}, range=(max(0, TIME_BEGIN), min(length, TIME_END)), row=2, col=1)
fig.update_yaxes(title_text="Input Throughput (elements/s)", title_font = {"size": 15}, row=2, col=1, secondary_y=False)
fig.update_yaxes(title_text="Output Throughput (elements/s)", title_font = {"size": 15}, row=2, col=1, secondary_y=True)
#legend
fig.update_layout(legend=dict(
orientation="h",
yanchor="top", xanchor="center",
x=0.5, y=-0.12
))
if include_title:
fig.update_layout(title=dir_prefix)
else:
fig.update_layout(margin={'t': 20, 'b': 0})
#general layout
fig.update_layout(
font=dict(
family="Courier New, monospace", # 所有标题文字的字体
size=16, # 所有标题文字的大小
color="RebeccaPurple" # 所有标题的颜色
),
height=600
)
#latency
fig.add_trace(latency_plot(latency_data, marker_mode=marker_mode), row=1, col=1)
# throughputs
fig.add_trace(throughput_plot(result_dir, dir_prefix, minTime, length, tm_list), row=2, col=1)
fig.add_trace(output_throughput_plot(latency_data), row=2, col=1, secondary_y=True)
# throughput_data_list = [(tm, throughput_df(result_dir, dir_prefix, tm + ".txt", minTime))
# for tm in tm_list]
# for idx, (tm, throughput_data) in enumerate(throughput_data_list):
# fig.add_trace(throughput_plot(tm, throughput_data, idx), secondary_y=True)
# failures
recovered_failure = failure_data[(failure_data["loadCheckpointCompleteTime"] > 0) & (failure_data["RecoveryStartTime"] > 0)]
ones = [1] * len(recovered_failure)
# fig.add_traces(recovered_failure_plot(recovered_failure, True), rows=ones, cols=ones)
# fig.add_traces(recovered_failure_plot(recovered_failure, False), rows=[2] * len(recovered_failure), cols=ones)
recovered_failure_plot(recovered_failure, latency_data["latency"].max(), fig)
unrecovered_failure = failure_data[(failure_data["loadCheckpointCompleteTime"] <= 0) | (failure_data["RecoveryStartTime"] <= 0)]
fig.add_trace(unrecovered_failure_plot(unrecovered_failure, True), row=1, col=1)
fig.add_trace(unrecovered_failure_plot(unrecovered_failure, False), row=2, col=1)
# checkpoints
successful_checkpoints = checkpoint_data[checkpoint_data["size_bytes"] > 0]
ones = [1] * len(successful_checkpoints)
fig.add_traces(successful_checkpoints_plot(successful_checkpoints, True), rows=ones, cols=ones)
fig.add_traces(successful_checkpoints_plot(successful_checkpoints, False), rows=[2] * len(successful_checkpoints), cols=ones)
failed_checkpoints = checkpoint_data[checkpoint_data["size_bytes"] == 0]
ones = [1] * len(failed_checkpoints)
fig.add_traces(failed_checkpoints_plot(failed_checkpoints, True), rows=ones, cols=ones)
fig.add_traces(failed_checkpoints_plot(failed_checkpoints, False), rows=[2] * len(failed_checkpoints), cols=ones)
if (export is True):
export_plots(FIGURE_DIR, dir_prefix + "-latency", fig)
return fig
SAMPLE_RATE_S = 2
def resource_plot(result_dir, dir_prefix, host, export=False, include_title=True):
zk_cpu = zk1_cpu_df(result_dir, dir_prefix)
zk_cpu = zk_cpu[(TIME_BEGIN <= zk_cpu["timeFromZero"]) & (zk_cpu["timeFromZero"] <= TIME_END)]
minTime = zk_cpu["timestamp"].min()
if minTime is np.nan:
minTime = 0
latency_data = latency_df(result_dir, dir_prefix, minTime)
length = min(latency_data["time"].max(), TIME_END)
latency_data = latency_data[(TIME_BEGIN <= latency_data["time"]) & (latency_data["time"] <= length)]
failure_data = failure_df(result_dir, dir_prefix, minTime, length)
checkpoint_data = checkpoint_df(result_dir, dir_prefix, minTime, length)
# Create figure with secondary y-axis
#fig = make_subplots(specs=[[{"secondary_y": True}]])
fig = make_subplots(
rows=3, cols=1,
subplot_titles=("CPU/Memory Usage Over Time", "Dist/Network Reads Over Time", "Dist/Network Writes Over Time"))
y_title_size = 15
#row1
fig.update_yaxes(title_text="Usage(%)", title_font = {"size": y_title_size}, row=1, col=1)
fig.update_xaxes(title_text="timestamp (ms)", title_font = {"size": 15}, range=(max(0, TIME_BEGIN), min(length, TIME_END)), row=1, col=1)
#row2
fig.update_yaxes(title_text="Read Rate (Bytes/s)", title_font = {"size": y_title_size}, row=2, col=1)
fig.update_xaxes(title_text="timestamp (ms)", title_font = {"size": 15}, range=(max(0, TIME_BEGIN), min(length, TIME_END)), row=2, col=1)
#row3
fig.update_yaxes(title_text="Write Rate (Bytes/s)", title_font = {"size": y_title_size}, row=3, col=1)
fig.update_xaxes(title_text="timestamp (ms)", title_font = {"size": 15}, range=(max(0, TIME_BEGIN), min(length, TIME_END)), row=3, col=1)
#general layout
if include_title:
fig.update_layout(title=dir_prefix + "-" + host)
else:
fig.update_layout(margin={'t': 20, 'b': 0})
fig.update_layout(
font=dict(
family="Courier New, monospace", # 所有标题文字的字体
size=16, # 所有标题文字的大小
color="RebeccaPurple" # 所有标题的颜色
),
height=800
)
#legend
fig.update_layout(legend=dict(
orientation="h",
yanchor="top", xanchor="center",
x=0.5, y=-0.1
))
# timestamp r b swpd free buff cache si so bi bo in cs us sy id wa st
# cpu_mem = resource_df(result_dir, dir_prefix, "cpu-mem", host, minTime)
#cpu
cpu = resource_df(result_dir, dir_prefix, "cpu", host, minTime, length)
cpu["used"] = 100 - cpu["idle"]
# cpu_mem["cpu_used"] = 100 - cpu_mem["id"]
fig.add_trace(cpu_usage_plot(cpu, "used", "CPU Total Usage"), row=1, col=1)
fig.add_trace(cpu_usage_plot(cpu, "user_space", "CPU User Space Usage"), row=1, col=1)
fig.add_trace(cpu_usage_plot(cpu, "system", "CPU System Usage"), row=1, col=1)
#memory
heap_file = "heap-" + host + ".txt"
if host in tm_list and os.path.exists(os.path.join(result_dir,dir_prefix,heap_file)):
memory = resource_df(result_dir, dir_prefix, "heap", host, minTime, length)
memory["used_percent"] = (memory["used"] / memory["max"]) * 100
else:
print(heap_file, " not found. Will use 'top' recording instead")
memory = resource_df(result_dir, dir_prefix, "memory", host, minTime, length)
memory["used_percent"] = (memory["used"] / memory["total"]) * 100
# memory["cache_percent"] = (memory["buff/cache"] / memory["total"]) * 100
# memory = resource_df(result_dir, dir_prefix, "cpu-mem", host, minTime)
# memory["used_percent"] = (memory["used"] / memory["total"]) * 100
# mem_total = 16266168
# cpu_mem["mem_used_percent"] = 100 - (cpu_mem["free"] / mem_total * 100)
# cpu_mem["mem_cache_percent"] = cpu_mem["cache"] / mem_total * 100
fig.add_trace(memory_usage_plot(memory), row=1, col=1)
# fig.add_trace(memory_cache_plot(memory), row=1, col=1)
#network
network = resource_df(result_dir, dir_prefix, "network", host, minTime, length)
network = network.drop(columns=["interface", "timestamp"])
# calculate diff between consecutive rows except for timestamp
network["index"] = network["timeFromZero"]
network = network.set_index("index").diff()
network = network[network["timeFromZero"] > 0]
# divide by sample rate
network = network.div(network["timeFromZero"] / 1000, axis=0)
fig.add_trace(network_read_plot(network), row=2, col=1)
fig.add_trace(network_write_plot(network), row=3, col=1)
#disk
# timestamp disk read_total read_merged read_sectors read_ms write_total write_merged write_sectors write_ms io_cur io_sec
disk = resource_df(result_dir, dir_prefix, "disk", host, minTime, length)
disk = disk.drop(columns=["disk", "timestamp"])
# calculate diff between consecutive rows except for timestamp
disk["index"] = disk["timeFromZero"]
disk = disk.set_index("index").diff()
disk = disk[disk["timeFromZero"] > 0]
# divide by sample rate
disk = disk.div(disk["timeFromZero"] / 1000, axis=0)
# Sector is 512 bytes
disk["total_read_bytes_per_s"] = disk["read_sectors"] * 512
disk["total_write_bytes_per_s"] = disk["write_sectors"] * 512
fig.add_trace(disk_read_plot(disk), row=2, col=1)
fig.add_trace(disk_write_plot(disk), row=3, col=1)
# failures
recovered_failure = failure_data[(failure_data["loadCheckpointCompleteTime"] > 0) & (failure_data["RecoveryStartTime"] > 0)]
# recovered_failure = recovered_failure[recovered_failure["checkpointId"] % 3 == 0]
ones = [1] * len(recovered_failure)
# fig.add_traces(recovered_failure_plot(recovered_failure, True), rows=ones, cols=ones)
# fig.add_traces(recovered_failure_plot(recovered_failure, False), rows=[2] * len(recovered_failure), cols=ones)
# fig.add_traces(recovered_failure_plot(recovered_failure, False), rows=[3] * len(recovered_failure), cols=ones)
recovered_failure_plot(recovered_failure, latency_data["latency"].max(), fig)
unrecovered_failure = failure_data[(failure_data["loadCheckpointCompleteTime"] <= 0) | (failure_data["RecoveryStartTime"] <= 0)]
fig.add_trace(unrecovered_failure_plot(unrecovered_failure, True), row=1, col=1)
fig.add_trace(unrecovered_failure_plot(unrecovered_failure, False), row=2, col=1)
fig.add_trace(unrecovered_failure_plot(unrecovered_failure, False), row=3, col=1)
# checkpoints
successful_checkpoints = checkpoint_data[checkpoint_data["size_bytes"] > 0]
ones = [1] * len(successful_checkpoints)
fig.add_traces(successful_checkpoints_plot(successful_checkpoints, True), rows=ones, cols=ones)
fig.add_traces(successful_checkpoints_plot(successful_checkpoints, False), rows=[2] * len(successful_checkpoints), cols=ones)
fig.add_traces(successful_checkpoints_plot(successful_checkpoints, False), rows=[3] * len(successful_checkpoints), cols=ones)
failed_checkpoints = checkpoint_data[checkpoint_data["size_bytes"] == 0]
ones = [1] * len(failed_checkpoints)
fig.add_traces(failed_checkpoints_plot(failed_checkpoints, True), rows=ones, cols=ones)
fig.add_traces(failed_checkpoints_plot(failed_checkpoints, False), rows=[2] * len(failed_checkpoints), cols=ones)
fig.add_traces(failed_checkpoints_plot(failed_checkpoints, False), rows=[3] * len(failed_checkpoints), cols=ones)
if (export is True):
export_plots(FIGURE_DIR, dir_prefix + "-" + host, fig)
return fig
# In[77]:
# show column head
# result_dir="C:\\Wenzhong\\我的坚果云\\实验\\results\\"
# dir_prefix="case-study-low-level-failure-load-80000-multi"
# TIME_BEGIN, TIME_END = 110000, 300000
# zk_cpu = zk1_cpu_df(result_dir, dir_prefix)
# minTime = zk_cpu["timestamp"].min()
# latency_data = latency_df(result_dir, dir_prefix, minTime)
# length = latency_data.index.max()
# tm = "flink2"
# host = tm
# resource="cpu"
# print("zk_cpu")
# display(zk_cpu.head(1))
# print("latency_data")
# display(latency_data.head(1))
# print("throughput_df")
# display(throughput_df(result_dir, dir_prefix, tm+".txt", minTime, length).head(1))
# print("failure_df")
# display(failure_df(result_dir, dir_prefix, minTime, length).head(1))
# print("checkpoint_df")
# display(checkpoint_df(result_dir, dir_prefix, minTime, length).head(1))
# print("resource_df")
# display(resource_df(result_dir, dir_prefix, resource, host, minTime, length).head(1))
# zk_cpu["timeFromZero"]
# In[78]:
# result_dir="C:\\Users\\joinp\\Downloads\\results\\"
# result_dir="C:\\Wenzhong\\我的坚果云\\实验\\results\\"
# dir_prefix="failure-break-the-app-load-80000-single"
# # hosts=["flink2"]
# hosts=["hadoop1","hadoop2","hadoop3","hadoop4","flink1","flink2","flink3","flink4","flink5","kafka1","kafka2","redis1","zk1"]
# export=True
# def gc_box(fig, start, cost):
# color = '#AB63FA'
# fig.add_vrect(
# x0=start, x1=start + cost,
# fillcolor=color,
# opacity=0.5,
# layer="below",
# line_color = color,
# line_width=1,
# row="all", col="all"
# )
# TIME_BEGIN = 0
# TIME_END = 2000000
# # fig = latency_throughput_plot(os.path.abspath(result_dir), dir_prefix, export=export, marker_mode=True, include_title=False)
# fig = latency_throughput_plot(os.path.abspath(result_dir), dir_prefix, export=export, marker_mode=export, include_title=~export)
# gc_box(fig, 1730534, 20668)
# gc_box(fig, 1752169, 19970)
# fig.show(scale=2)
# for host in hosts:
# fig = resource_plot(result_dir, dir_prefix, host, export=export, include_title=~export)
# gc_box(fig, 1730534, 20668)
# gc_box(fig, 1752169, 19970)
# fig.show(scale=2)
# In[86]:
# case study (zoom in failure)
result_dir="C:\\Wenzhong\\我的坚果云\\实验\\results\\"
dir_prefix="case-study-low-level-failure-load-80000-multi"
# TIME_BEGIN, TIME_END = 0, 3000000
TIME_BEGIN, TIME_END = 100000, 400000
# hosts=["flink" + str(num) for num in range(2,18)]
hosts=["flink2"]
# hosts=["hadoop2","flink2","kafka1","redis1"]
FIGURE_DIR = "C:\\Wenzhong\\我的坚果云\\实验\\Figures\\results\\preliminary\\"
export = True
latency_throughput_plot(os.path.abspath(result_dir), dir_prefix, marker_mode=export, export=export, include_title=not(export)).show(scale=2)
for host in hosts:
resource_plot(result_dir, dir_prefix, host, export=export, include_title=not(export)).show(scale=2)
# In[ ]:
# case study (zoom in good time)
result_dir="C:\\Wenzhong\\我的坚果云\\实验\\results\\"
dir_prefix="case-study-low-level-failure-load-80000-multi"
TIME_BEGIN, TIME_END = 100000, 300000
# hosts=["flink" + str(num) for num in range(2,18)]
# hosts=[]
hosts=["hadoop2","flink2","kafka1","redis1"]
FIGURE_DIR = "C:\\Wenzhong\\我的坚果云\\实验\\Figures\\results\\preliminary\\"
export = True
latency_throughput_plot(os.path.abspath(result_dir), dir_prefix, marker_mode=export, export=export, include_title=not(export)).show(scale=2)
for host in hosts:
resource_plot(result_dir, dir_prefix, host, export=export, include_title=not(export)).show(scale=2)
# In[25]:
# result_dir="C:\\Users\\joinp\\Downloads\\results\\"
# result_dir="C:\\Wenzhong\\我的坚果云\\实验\\results\\"
# dir_prefix="async-cp-study-load-100000-single"
# hosts=["flink2"]
# # hosts=["hadoop1","hadoop2","hadoop3","hadoop4","flink1","flink2","flink3","flink4","flink5","kafka1","kafka2","redis1","zk1"]
# export=True
# TIME_BEGIN = 0
# TIME_END = 2000000
# fig = latency_throughput_plot(os.path.abspath(result_dir), dir_prefix, export=export, marker_mode=True, include_title=False)
# fig.show(scale=2)
# for host in hosts:
# fig = resource_plot(result_dir, dir_prefix, host, export=export, include_title=False)
# fig.show(scale=2)
# In[ ]:
# methodology about multi-level
def show_methodology(starts, costs, y, name, showlegend, color="green"):
res=[]
for i, (start, cost) in enumerate(zip(starts, costs)):
res.append(go.Scatter(
x=[start, cost],
y=[y, y],
name=name,
legendgroup=name,
marker=dict(symbol='line-ns', color=color, size=7, line=dict(color=color, width=1)),
showlegend= i == 0 if showlegend else False))
return res
def multilevel_costs(starts):
costs = np.copy(starts)
for i in range(len(starts)):
if i % 2 == 0:
costs[i] = starts[i] + HIGH_LEVEL_COST
else:
costs[i] = starts[i] + LOW_LEVEL_COST
return costs
interval = 8
LOW_LEVEL_COST = interval / 8
HIGH_LEVEL_COST = interval / 2
min_x = 0
max_x = min_x + interval * 5
fig = go.Figure()
fig.update_layout(
height = 300,
xaxis=dict(
title="Time (min)",
showline=True,
showgrid=False,
showticklabels=True,
linecolor='rgb(204, 204, 204)',
linewidth=2,
ticks='outside',
dtick=interval / 2,
tickfont=dict(
family='Arial',
size=12,
color='rgb(82, 82, 82)',
),
),
yaxis=dict(
showgrid=False,
zeroline=False,
showline=False,
showticklabels=False,
),
autosize=False,
margin=dict(
autoexpand=False,
l=0,
r=0,
t=0,
),
showlegend=False,
plot_bgcolor='white',
)
annotations = []
starts = np.arange(min_x, max_x, interval)
y = 1
name = "3. Single-level -- half interval"
pad_x = 0.0
pad_y = 0.2
xanchor = "left"
fig.add_traces(show_methodology(starts, starts + HIGH_LEVEL_COST, y, name, True))
annotations.append(dict(xref='paper', x=pad_x, y=y + pad_y,
xanchor=xanchor, yanchor='middle',
text=name,
font=dict(family='Arial',
size=16),
showarrow=False))
starts = np.arange(min_x, max_x, interval)
costs = multilevel_costs(starts)
y = 2
name = "2. Multi-level -- half interval"
fig.add_traces(show_methodology(starts, costs, y, name, True, color="red"))
annotations.append(dict(xref='paper', x=pad_x, y=y + pad_y,
xanchor=xanchor, yanchor='middle',
text=name,
font=dict(family='Arial',
size=16),
showarrow=False))
starts = np.arange(min_x, max_x, interval * 2)
y = 3
name = "1. Single-level"
fig.add_traces(show_methodology(starts, starts + HIGH_LEVEL_COST, y, name, True))
annotations.append(dict(xref='paper', x=pad_x, y=y + pad_y,
xanchor=xanchor, yanchor='middle',
text=name,
font=dict(family='Arial',
size=16),
showarrow=False))
fig.update_layout(annotations=annotations)
fig.show()
export_plots(FIGURE_DIR, "methodology_multilevel", fig)
# In[ ]:
# In[ ]:
# 1c4g nodes. flink7: JM, flink8-10: TM.
# show the recovery process is CPU bound
result_dir="C:\\Wenzhong\\我的坚果云\\实验\\results"
dir_prefix="1c4g-3node-15000-multi"
TIME_BEGIN, TIME_END = 0, 4000000
hosts=["flink" + str(num) for num in range(7,11)]
export = True
latency_throughput_plot(os.path.abspath(result_dir), dir_prefix, marker_mode=export, export=export).show(scale=2)
for host in hosts:
resource_plot(result_dir, dir_prefix, host, export=export).show(scale=2)
# In[ ]:
SLOT_PER_NODE = 2
def recover_cost_df(result_dir, dir_prefix, minTime, length, parallelism):
checkpoint_data = checkpoint_df(result_dir, dir_prefix, minTime, length)
checkpoint_data = checkpoint_data[checkpoint_data["size_bytes"] > 0]
failure_data = failure_df(result_dir, dir_prefix, minTime, length)
failure_data = failure_data[(failure_data["checkpointId"] > 0) &
(failure_data["RecoveryStartTime"] > 0) &
(failure_data["loadCheckpointCompleteTime"] > 0)]
res = pd.merge(left=failure_data, right=checkpoint_data[["id", "size_bytes", "timeCost_ms"]], how='left',
left_on='checkpointId', right_on='id')
res["size_MB/Node"] = res["size_bytes"] / 1000000 / parallelism * SLOT_PER_NODE
res["checkpoint speed per node (MB/Sec)"] = res["size_MB/Node"] / res["timeCost_ms"] * 1000
res["recovery speed per node (MB/Sec)"] = res["size_MB/Node"] / res["RecoveryLength_ms"] * 1000
res["parallelism"] = parallelism
return res.rename(columns={
'timeCost_ms': 'checkpoint cost (ms)',
'size_MB/Node': 'checkpoint size per node (MB)',
'RecoveryLength_ms': 'recovery cost (ms)',
})
def checkpoint_cost_df(result_dir, dir_prefix, minTime, length, parallelism, drop_during_failure=False):
checkpoint_data = checkpoint_df(result_dir, dir_prefix, minTime, length)
# drop checkpoints during failure (they are too long than normal checkpoints)
if drop_during_failure:
checkpoint_data["endTimeFromZero"] = checkpoint_data["startTimeFromZero"] + checkpoint_data["timeCost_ms"]
chechpoint_during_failure = checkpoint_data.apply(
lambda row: np.any(
((failure_data["failedTimeFromZero"] <= row["startTimeFromZero"]) & (failure_data["recoveredTimeFromZero"] >= row["startTimeFromZero"])) | \
((failure_data["failedTimeFromZero"] <= row["endTimeFromZero"]) & (failure_data["recoveredTimeFromZero"] >= row["endTimeFromZero"]))
),
axis=1)
checkpoint_data = checkpoint_data[~chechpoint_during_failure]
checkpoint_data["parallelism"] = parallelism
checkpoint_data["size per node (MB)"] = checkpoint_data["size_bytes"] / 1024 / 1024 / parallelism * SLOT_PER_NODE
checkpoint_data["time cost (s)"] = checkpoint_data["timeCost_ms"] / 1000
checkpoint_data["speed per node (MB/s)"] = checkpoint_data["size per node (MB)"] * 1000 / checkpoint_data["timeCost_ms"]
return checkpoint_data
def grep_parallelism(file):
# FLINK_PARALLELISM=$FLINK_PARALLELISM
with open(file, 'r') as file:
for line in file:
if re.search("# FLINK_PARALLELISM=", line):
return int(line[20:])
def grep_config(file, toSearch):
#multilevel.enable: false
with open(file, 'r') as file:
for line in file:
if re.search(toSearch, line):
return line.split(':')[1].strip().strip('"')
# grep_config("C:\\Users\\joinp\\Downloads\\results\\09-21_00-22-26_load-70000-single\\conf-copy.yaml", "multilevel.pattern")
result_dir="C:\\Wenzhong\\我的坚果云\\实验\\results"
dir_prefix="1c4g-3node-15000-multi"
TIME_BEGIN, TIME_END = 0, 4000000
hosts=["flink" + str(num) for num in range(7,11)]
export = True
latency_data = latency_df(result_dir, dir_prefix)
minTime = latency_data["currTime"].min()
length = latency_data["time"].max()
parallelism = grep_parallelism(os.path.join(result_dir, dir_prefix, "conf-copy.yaml"))
failure_data = recover_cost_df(result_dir, dir_prefix, minTime, length, parallelism)
failure_data = failure_data.round(2)
print(dir_prefix)
display(failure_data[[
"failedTimeFromZero",
"checkpointId",
"checkpoint size per node (MB)",
"checkpoint cost (ms)",
"recovery cost (ms)",
"checkpoint speed per node (MB/Sec)",
"recovery speed per node (MB/Sec)",
]])
# In[ ]:
latency_data = latency_df(result_dir, dir_prefix)
minTime = latency_data["currTime"].min()
length = latency_data["time"].max()
print(dir_prefix)
parallelism = grep_parallelism(os.path.join(result_dir, dir_prefix, "conf-copy.yaml"))
res = checkpoint_cost_df(result_dir, dir_prefix, minTime, length, parallelism).round(2)
# display(res[(res["speed per node (MB/s)"] < 40) & (res["id"] % 2 == 1)])
display(res)
| [
"os.mkdir",
"os.path.abspath",
"numpy.copy",
"warnings.filterwarnings",
"plotly.graph_objs.Scatter",
"pandas.merge",
"kaleido.scopes.plotly.PlotlyScope",
"os.path.exists",
"IPython.display.display",
"numpy.any",
"numpy.arange",
"re.search",
"plotly.subplots.make_subplots",
"plotly.graph_ob... | [((349, 383), 'plotly.offline.init_notebook_mode', 'init_notebook_mode', ([], {'connected': '(True)'}), '(connected=True)\n', (367, 383), False, 'from plotly.offline import init_notebook_mode, iplot\n'), ((400, 433), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (423, 433), False, 'import warnings\n'), ((595, 619), 'plotly.offline.init_notebook_mode', 'pyo.init_notebook_mode', ([], {}), '()\n', (617, 619), True, 'import plotly.offline as pyo\n'), ((820, 833), 'kaleido.scopes.plotly.PlotlyScope', 'PlotlyScope', ([], {}), '()\n', (831, 833), False, 'from kaleido.scopes.plotly import PlotlyScope\n'), ((29726, 29737), 'plotly.graph_objs.Figure', 'go.Figure', ([], {}), '()\n', (29735, 29737), True, 'import plotly.graph_objs as go\n'), ((30449, 30482), 'numpy.arange', 'np.arange', (['min_x', 'max_x', 'interval'], {}), '(min_x, max_x, interval)\n', (30458, 30482), True, 'import numpy as np\n'), ((31006, 31039), 'numpy.arange', 'np.arange', (['min_x', 'max_x', 'interval'], {}), '(min_x, max_x, interval)\n', (31015, 31039), True, 'import numpy as np\n'), ((31546, 31583), 'numpy.arange', 'np.arange', (['min_x', 'max_x', '(interval * 2)'], {}), '(min_x, max_x, interval * 2)\n', (31555, 31583), True, 'import numpy as np\n'), ((36334, 36560), 'IPython.display.display', 'display', (["failure_data[['failedTimeFromZero', 'checkpointId',\n 'checkpoint size per node (MB)', 'checkpoint cost (ms)',\n 'recovery cost (ms)', 'checkpoint speed per node (MB/Sec)',\n 'recovery speed per node (MB/Sec)']]"], {}), "(failure_data[['failedTimeFromZero', 'checkpointId',\n 'checkpoint size per node (MB)', 'checkpoint cost (ms)',\n 'recovery cost (ms)', 'checkpoint speed per node (MB/Sec)',\n 'recovery speed per node (MB/Sec)']])\n", (36341, 36560), False, 'from IPython.display import display, HTML\n'), ((36998, 37010), 'IPython.display.display', 'display', (['res'], {}), '(res)\n', (37005, 37010), False, 'from IPython.display import display, HTML\n'), ((5656, 5764), 'plotly.graph_objs.Scatter', 'go.Scatter', ([], {'x': "res['startTimeFromZero']", 'y': "res['elements/second']", 'line_width': '(1)', 'name': '"""Input Throughput"""'}), "(x=res['startTimeFromZero'], y=res['elements/second'], line_width\n =1, name='Input Throughput')\n", (5666, 5764), True, 'import plotly.graph_objs as go\n'), ((7569, 7639), 'plotly.graph_objs.Scatter', 'go.Scatter', ([], {'x': 'cpu.timeFromZero', 'y': 'cpu[column]', 'line_width': '(1)', 'name': 'name'}), '(x=cpu.timeFromZero, y=cpu[column], line_width=1, name=name)\n', (7579, 7639), True, 'import plotly.graph_objs as go\n'), ((13533, 13692), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'rows': '(2)', 'cols': '(1)', 'subplot_titles': "('Latency Over Time', 'Throughput Over Time')", 'specs': "[[{'secondary_y': False}], [{'secondary_y': True}]]"}), "(rows=2, cols=1, subplot_titles=('Latency Over Time',\n 'Throughput Over Time'), specs=[[{'secondary_y': False}], [{\n 'secondary_y': True}]])\n", (13546, 13692), False, 'from plotly.subplots import make_subplots\n'), ((17978, 18123), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'rows': '(3)', 'cols': '(1)', 'subplot_titles': "('CPU/Memory Usage Over Time', 'Dist/Network Reads Over Time',\n 'Dist/Network Writes Over Time')"}), "(rows=3, cols=1, subplot_titles=('CPU/Memory Usage Over Time',\n 'Dist/Network Reads Over Time', 'Dist/Network Writes Over Time'))\n", (17991, 18123), False, 'from plotly.subplots import make_subplots\n'), ((29401, 29416), 'numpy.copy', 'np.copy', (['starts'], {}), '(starts)\n', (29408, 29416), True, 'import numpy as np\n'), ((33218, 33360), 'pandas.merge', 'pd.merge', ([], {'left': 'failure_data', 'right': "checkpoint_data[['id', 'size_bytes', 'timeCost_ms']]", 'how': '"""left"""', 'left_on': '"""checkpointId"""', 'right_on': '"""id"""'}), "(left=failure_data, right=checkpoint_data[['id', 'size_bytes',\n 'timeCost_ms']], how='left', left_on='checkpointId', right_on='id')\n", (33226, 33360), True, 'import pandas as pd\n'), ((36137, 36191), 'os.path.join', 'os.path.join', (['result_dir', 'dir_prefix', '"""conf-copy.yaml"""'], {}), "(result_dir, dir_prefix, 'conf-copy.yaml')\n", (36149, 36191), False, 'import os\n'), ((36770, 36824), 'os.path.join', 'os.path.join', (['result_dir', 'dir_prefix', '"""conf-copy.yaml"""'], {}), "(result_dir, dir_prefix, 'conf-copy.yaml')\n", (36782, 36824), False, 'import os\n'), ((999, 1056), 'os.path.join', 'os.path.join', (['result_dir', 'dir_prefix', '"""count-latency.txt"""'], {}), "(result_dir, dir_prefix, 'count-latency.txt')\n", (1011, 1056), False, 'import os\n'), ((1681, 1737), 'os.path.join', 'os.path.join', (['result_dir', 'dir_prefix', '"""restart-cost.txt"""'], {}), "(result_dir, dir_prefix, 'restart-cost.txt')\n", (1693, 1737), False, 'import os\n'), ((2437, 2483), 'os.path.join', 'os.path.join', (['result_dir', 'dir_prefix', 'filename'], {}), '(result_dir, dir_prefix, filename)\n', (2449, 2483), False, 'import os\n'), ((2954, 3009), 'os.path.join', 'os.path.join', (['result_dir', 'dir_prefix', '"""checkpoints.txt"""'], {}), "(result_dir, dir_prefix, 'checkpoints.txt')\n", (2966, 3009), False, 'import os\n'), ((3465, 3516), 'os.path.join', 'os.path.join', (['result_dir', 'dir_prefix', '"""cpu-zk1.txt"""'], {}), "(result_dir, dir_prefix, 'cpu-zk1.txt')\n", (3477, 3516), False, 'import os\n'), ((3893, 3961), 'os.path.join', 'os.path.join', (['result_dir', 'dir_prefix', "(resource + '-' + host + '.txt')"], {}), "(result_dir, dir_prefix, resource + '-' + host + '.txt')\n", (3905, 3961), False, 'import os\n'), ((4373, 4393), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (4387, 4393), False, 'import os\n'), ((4403, 4417), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (4411, 4417), False, 'import os\n'), ((4438, 4475), 'os.path.join', 'os.path.join', (['path', "(fig_name + '.png')"], {}), "(path, fig_name + '.png')\n", (4450, 4475), False, 'import os\n'), ((4736, 4807), 'plotly.graph_objs.Scatter', 'go.Scatter', ([], {'x': 'latency_data.time', 'y': 'latency_data.latency', 'name': '"""latency"""'}), "(x=latency_data.time, y=latency_data.latency, name='latency')\n", (4746, 4807), True, 'import plotly.graph_objs as go\n'), ((20161, 20208), 'os.path.join', 'os.path.join', (['result_dir', 'dir_prefix', 'heap_file'], {}), '(result_dir, dir_prefix, heap_file)\n', (20173, 20208), False, 'import os\n'), ((27317, 27344), 'os.path.abspath', 'os.path.abspath', (['result_dir'], {}), '(result_dir)\n', (27332, 27344), False, 'import os\n'), ((27966, 27993), 'os.path.abspath', 'os.path.abspath', (['result_dir'], {}), '(result_dir)\n', (27981, 27993), False, 'import os\n'), ((32467, 32494), 'os.path.abspath', 'os.path.abspath', (['result_dir'], {}), '(result_dir)\n', (32482, 32494), False, 'import os\n'), ((35349, 35388), 're.search', 're.search', (['"""# FLINK_PARALLELISM="""', 'line'], {}), "('# FLINK_PARALLELISM=', line)\n", (35358, 35388), False, 'import re\n'), ((35579, 35604), 're.search', 're.search', (['toSearch', 'line'], {}), '(toSearch, line)\n', (35588, 35604), False, 'import re\n'), ((34381, 34671), 'numpy.any', 'np.any', (["((failure_data['failedTimeFromZero'] <= row['startTimeFromZero']) & (\n failure_data['recoveredTimeFromZero'] >= row['startTimeFromZero']) | (\n failure_data['failedTimeFromZero'] <= row['endTimeFromZero']) & (\n failure_data['recoveredTimeFromZero'] >= row['endTimeFromZero']))"], {}), "((failure_data['failedTimeFromZero'] <= row['startTimeFromZero']) & (\n failure_data['recoveredTimeFromZero'] >= row['startTimeFromZero']) | (\n failure_data['failedTimeFromZero'] <= row['endTimeFromZero']) & (\n failure_data['recoveredTimeFromZero'] >= row['endTimeFromZero']))\n", (34387, 34671), True, 'import numpy as np\n'), ((5087, 5136), 'os.path.join', 'os.path.join', (['result_dir', 'dir_prefix', "(tm + '.txt')"], {}), "(result_dir, dir_prefix, tm + '.txt')\n", (5099, 5136), False, 'import os\n')] |
import pandas as pd
import numpy as np
import itertools
import warnings
import sys
try:
import matplotlib.pyplot as plt
import seaborn as sns
except ImportError:
print('Importing hier_diff without matplotlib.')
import scipy.cluster.hierarchy as sch
from scipy.spatial import distance
from scipy import stats
try:
from adjustwithin import adjustnonnan
except ImportError:
print('Importing hier_diff without multiplicity adjustment package.')
__all__ = ['testHClusters',
'getClusterMembers',
'plotHClustProportions',
'testCondition',
'testSubset']
def testHClusters(cntsDf, members, cols=None, min_count=5):
"""Test each cluster for disproportionate representation of TCRs
from a set of conditions (e.g. stimulations). Test is based on the Chi2 statistic,
testing the observed proportions vs. expected proportions of TCRs
that are in vs. not-in a cluster (i.e. 2 x N_cols table).
Parameters
----------
cntsDf : pd.DataFrame [TCRs, conditions]
Counts table of TCRs (rows) that have been observed in specific conditions (columns)
Importantly the integer indices of the rows must match those used to define
clusters in members.
members : dict of lists
Each element has a cluster ID (key) and a list of cluster members (indices into cntsDf)
Can be generated from getClusterMembers with the result from calling sch.linkage (Z).
Cluster need not be mutually exclusive, and are not when using hierarchical clustering.
cols : list
Columns in cntsDf to use as conditions (default: all columns of cntsDf)
min_count : int
Required minimum number of member TCRs in a cluster to run the test.
Returns
-------
resDf : pd.DataFrame [clusters, result columns]
Results from the tests with observed/expected counts and frequencies, Chi2 statistics,
p-values, FWER and FDR adjusted p-values."""
if cols is None:
cols = cntsDf.columns
tot = cntsDf.sum()
Ncells = tot.sum()
uCDR3 = list(cntsDf.index)
results = []
for cid, m in members.items():
notM = [i for i in range(cntsDf.shape[0]) if not i in m]
obs = np.concatenate((np.sum(cntsDf[cols].values[m, :], axis=0, keepdims=True),
np.sum(cntsDf[cols].values[notM, :], axis=0, keepdims=True)), axis=0)
if np.sum(obs, axis=1)[0] > min_count:
"""Inner product of the marginal totals along both axes, divided by total cells"""
expect = np.dot(np.sum(obs, keepdims=True, axis=1),
np.sum(obs, keepdims=True, axis=0)) / Ncells
with warnings.catch_warnings():
warnings.simplefilter('ignore')
chi2 = (obs - expect)**2 / expect
sum_chi2 = np.sum(chi2)
degf = len(cols) - 1
pvalue = 1 - stats.chi2.cdf(sum_chi2, degf)
results.append({'cid':cid,
'chi2':sum_chi2,
'pvalue':pvalue,
'observed':tuple(obs[0, :]),
'observed_prop':(obs / np.sum(obs, axis=0))[0, :],
'expected':tuple(expect[0, :]),
'expected_prop':(expect / np.sum(obs, axis=0))[0, :],
'members':tuple(m),
'labels':cols})
else:
results.append({'cid':cid,
'chi2':np.nan,
'pvalue':np.nan,
'observed':tuple(obs[0, :]),
'observed_prop': (obs / np.sum(obs, axis=0))[0, :],
'expected':(np.nan, )*len(cols),
'expected_prop': (np.nan, )*len(cols),
'members':tuple(m),
'labels':cols})
resDf = pd.DataFrame(results)
if 'adjustwithin' in sys.modules:
resDf.loc[:, 'FWER-pvalue'] = adjustnonnan(resDf['pvalue'], method='holm')
resDf.loc[:, 'FDR-qvalue'] = adjustnonnan(resDf['pvalue'], method='fdr_bh')
return resDf.set_index('cid')
def getClusterMembers(Z):
"""Generate dict of lists where each key is a cluster ID from the results
of linkage-based hierarchical clustering with scipy.cluster.hierarchy.linkage (Z)
Parameters
----------
Z : linkage matrix [clusters, 4]
Returns
-------
members : dict of lists
Each element has a cluster ID (key) and a list of
cluster members (indices into the original data matrix)"""
clusters = {}
for i, merge in enumerate(Z):
cid = 1 + i + Z.shape[0]
clusters[cid] = [merge[0], merge[1]]
def _getIndices(clusters, i):
if i <= Z.shape[0]:
return [int(i)]
else:
return _getIndices(clusters, clusters[i][0]) + _getIndices(clusters, clusters[i][1])
members = {i:_getIndices(clusters, i) for i in range(Z.shape[0] + 1, max(clusters.keys()) + 1)}
return members
def plotHClustProportions(figh, Z, resDf, alpha_col='pvalue', alpha=0.05, colors=None, ann='N', xLim=None, maxY=None, min_count=20):
"""Plot tree of linkage-based hierarchical clustering, with nodes colored with stacked bars
representing proportion of cluster members associated with specific conditions. Nodes also optionally
annotated with pvalue, number of members or cluster ID.
Parameters
----------
figh : mpl Figure() handle
Z : linkage matrix
Result of calling sch.linkage on a compressed pair-wise distance matrix
resDf : pd.DataFrame
Result from calling testHClusters, with observed/frequencies and p-values for each node
alpha_col : str
Column in resDf to use for 'alpha' annotation
alpha : float
Threshold for plotting the stacked bars and annotation
colors : tuple of valid colors
Used for stacked bars of conditions at each node
labels : list of condition labels
Matched to tuples of colors and frequencies in resDf
ann : str
Indicates how nodes should be annotated: N, alpha, CID supported
xLim : tuple
Apply x-lims after plotting to focus on particular part of the tree"""
nCategories = len(resDf['observed'].iloc[0])
if colors is None:
colors = sns.color_palette('Set1', n_colors=nCategories)
labels = resDf['labels'].iloc[0]
dend = sch.dendrogram(Z, no_plot=True,
color_threshold=None,
link_color_func=lambda lid: hex(lid),
above_threshold_color='FFFFF')
figh.clf()
axh = plt.axes((0.05, 0.07, 0.8, 0.8), facecolor='w')
lowestY = None
annotateCount = 0
for xx, yy, hex_cid in zip(dend['icoord'], dend['dcoord'], dend['color_list']):
cid = int(hex_cid, 16)
xx = np.array(xx) / 10
axh.plot(xx, yy, zorder=1, lw=0.5, color='k', alpha=1)
N = np.sum(resDf.loc[cid, 'observed'])
if alpha is None or resDf.loc[cid, alpha_col] <= alpha and N > min_count:
obs = np.asarray(resDf.loc[cid, 'observed_prop'])
obs = obs / np.sum(obs)
L = (xx[2] - xx[1])
xvec = L * np.concatenate(([0.], obs, [1.]))
curX = xx[1]
for i in range(len(obs)):
c = colors[i]
axh.plot([curX, curX + L*obs[i]],
yy[1:3],
color=c,
lw=10,
solid_capstyle='butt')
curX += L*obs[i]
if ann == 'N':
s = '%1.0f' % N
elif ann == 'CID':
s = cid
elif ann == 'alpha':
if resDf.loc[cid, alpha_col] < 0.001:
s = '< 0.001'
else:
s = '%1.3f' % resDf.loc[cid, alpha_col]
if not ann == '':# and annotateCount < annC:
xy = (xx[1] + L/2, yy[1])
# print(s,np.round(xy[0]), np.round(xy[1]))
annotateCount += 1
axh.annotate(s,
xy=xy,
size='x-small',
horizontalalignment='center',
verticalalignment='center')
if lowestY is None or yy[1] < lowestY:
lowestY = yy[1]
yl = axh.get_ylim()
if not lowestY is None:
yl0 = 0.9*lowestY
else:
yl0 = yl[0]
if not maxY is None:
yl1 = maxY
else:
yl1 = yl[1]
axh.set_ylim((yl0, yl1))
axh.set_yticks(())
if not xLim is None:
if xLim[1] is None:
xl1 = axh.get_xlim()[1]
xLim = (xLim[0], xl1)
axh.set_xlim(xLim)
else:
xLim = axh.get_xlim()
xt = [x for x in range(0, Z.shape[0]) if x <= xLim[1] and x>= xLim[0]]
xt = xt[::len(xt) // 10]
# xtl = [x//10 for x in xt]
axh.set_xticks(xt)
# axh.set_xticklabels(xtl)
legh = axh.legend([plt.Rectangle((0,0), 1, 1, color=c) for c in colors],
labels,
loc='upper left', bbox_to_anchor=(1, 1))
def testCondition(df, indexCol, dmatDf, gbCol, gbValues=None, countCol='Cells', min_count=3):
"""Use hierarchical clustering to cluster data in df based on unique pair-wise distances
in dmatDf. Then test clusters for disproportionate association of members with a condition
indicated in gbCol.
Parameters
----------
df : pd.DataFrame [TCRs, metadata]
Contains freqeuncy data for TCRs in longform.
May be a subset of the larger dataset that was used for clustering.
indexCol : str
Column to use as the index for individual TCRs
dmatDf : pd.DataFrame [unique indices, unique indices]
Contains pairwise distances among all unique values in the indexCol of df
gbCol : str
Column of metadata in df containing conditions for testing
gbValues : list
List of values relevant for testing. Can be fewer than all values in gbCol to ignore
irrelevant conditions.
countCol : str
Column containing the integer counts for testing
min_count : int
Required minimum number of member TCRs in a cluster to run the test."""
if gbValues is None:
gbValues = sorted(df[gbCol].unique())
cnts = df.groupby([indexCol, gbCol])[countCol].agg(np.sum).unstack(gbCol, fill_value=0)[gbValues]
uIndices = list(df[indexCol].dropna().unique())
dmat = dmatDf.loc[:, uIndices].loc[uIndices, :]
compressedDmat = distance.squareform(dmat.values)
Z = sch.linkage(compressedDmat, method='complete')
members = getClusterMembers(Z)
resDf = testHClusters(cnts, members, gbValues, min_count=min_count)
return Z, resDf, np.array(uIndices)
def testSubset(df, fullIndex, indexCol, members, gbCol='Stimulus', gbValues=None, countCol='Cells', min_count=7, nsamps=None, rseed=110820):
"""Test clusters for disproportionate association of members with a condition indicated in gbCol.
Flexible for testing a subset of the data that was used for clustering
(and which is represented in members). This is helpful when the clustering is more accurate with the
larger dataset, but a questions is asked of only a subset of the data.
Permutation-based testing has been indistinguisable from analytic Chi2-based testing in preliminary tests.
Parameters
----------
df : pd.DataFrame [TCRs, metadata]
Contains freqeuncy data for TCRs in longform.
May be a subset of the larger dataset that was used for clustering.
fullIndex : list
List of all unique values of the indexCol in the whole dataset.
Order of values must match the integer indices in members.
indexCol : str
Column to use as the index for individual TCRs
members : dict of lists
Each element has a cluster ID (key) and a list of cluster members (indices into cntsDf)
Can be generated from getClusterMembers with the result from calling sch.linkage (Z).
Cluster need not be mutually exclusive, and are not when using hierarchical clustering.
gbCol : str
Column of metadata containing conditions for testing
gbValues : list
List of values relevant for testing. Can be fewer than all values in gbCol to ignore
irrelevant conditions.
countCol : str
Column containing the integer counts for testing
min_count : int
Required minimum number of member TCRs in a cluster to run the test.
nsamps : int
Number of permutations for permutation-based testing
rseed : int
Random numer seed for permutation-based testing"""
if gbValues is None:
gbValues = sorted(df[gbCol].unique())
cnts = df.groupby([indexCol, gbCol])[countCol].agg(np.sum).unstack(gbCol, fill_value=0)[gbValues]
cnts = cnts.reindex(fullIndex, axis=0, fill_value=0)
resDf = testHClusters(cnts, members, gbValues, min_count=min_count)
if not nsamps is None:
"""Preliminarily, permutation-based p-values have correlated perfectly
with the analytic p-values"""
np.random.seed(rseed)
rtmp = df.copy()
rchi2 = np.zeros((resDf.shape[0], nsamps))
rpvalue = np.zeros((resDf.shape[0], nsamps))
for sampi in range(nsamps):
rtmp.loc[:, gbCol] = rtmp[gbCol].values[np.random.permutation(rtmp.shape[0])]
rcnts = rtmp.groupby([indexCol, gbCol])['Cells'].agg(np.sum).unstack(gbCol, fill_value=0)
rcnts = rcnts.reindex(fullIndex, axis=0, fill_value=0)
rres = testHClusters(rcnts, members, gbValues, min_count=min_count)
rchi2[:, sampi] = rres['chi2']
rpvalue[:, sampi] = rres['pvalue']
ppvalue = ((rpvalue <= resDf['pvalue'].values[:, None]).sum(axis=1) + 1) / (nsamps + 1)
pchi2 = ((rchi2 <= resDf['chi2'].values[:, None]).sum(axis=1) + 1) / (nsamps + 1)
ppvalue[np.isnan(resDf['chi2'].values)] = np.nan
pchi2[np.isnan(resDf['chi2'].values)] = np.nan
resDf = resDf.assign(**{'Perm P-pvalue':ppvalue, 'Perm Chi2-pvalue':pchi2})
return resDf | [
"pandas.DataFrame",
"numpy.sum",
"adjustwithin.adjustnonnan",
"numpy.random.seed",
"warnings.simplefilter",
"matplotlib.pyplot.axes",
"scipy.spatial.distance.squareform",
"numpy.asarray",
"scipy.cluster.hierarchy.linkage",
"numpy.zeros",
"numpy.isnan",
"matplotlib.pyplot.Rectangle",
"numpy.a... | [((3972, 3993), 'pandas.DataFrame', 'pd.DataFrame', (['results'], {}), '(results)\n', (3984, 3993), True, 'import pandas as pd\n'), ((6762, 6809), 'matplotlib.pyplot.axes', 'plt.axes', (['(0.05, 0.07, 0.8, 0.8)'], {'facecolor': '"""w"""'}), "((0.05, 0.07, 0.8, 0.8), facecolor='w')\n", (6770, 6809), True, 'import matplotlib.pyplot as plt\n'), ((10720, 10752), 'scipy.spatial.distance.squareform', 'distance.squareform', (['dmat.values'], {}), '(dmat.values)\n', (10739, 10752), False, 'from scipy.spatial import distance\n'), ((10761, 10807), 'scipy.cluster.hierarchy.linkage', 'sch.linkage', (['compressedDmat'], {'method': '"""complete"""'}), "(compressedDmat, method='complete')\n", (10772, 10807), True, 'import scipy.cluster.hierarchy as sch\n'), ((4071, 4115), 'adjustwithin.adjustnonnan', 'adjustnonnan', (["resDf['pvalue']"], {'method': '"""holm"""'}), "(resDf['pvalue'], method='holm')\n", (4083, 4115), False, 'from adjustwithin import adjustnonnan\n'), ((4153, 4199), 'adjustwithin.adjustnonnan', 'adjustnonnan', (["resDf['pvalue']"], {'method': '"""fdr_bh"""'}), "(resDf['pvalue'], method='fdr_bh')\n", (4165, 4199), False, 'from adjustwithin import adjustnonnan\n'), ((6426, 6473), 'seaborn.color_palette', 'sns.color_palette', (['"""Set1"""'], {'n_colors': 'nCategories'}), "('Set1', n_colors=nCategories)\n", (6443, 6473), True, 'import seaborn as sns\n'), ((7074, 7108), 'numpy.sum', 'np.sum', (["resDf.loc[cid, 'observed']"], {}), "(resDf.loc[cid, 'observed'])\n", (7080, 7108), True, 'import numpy as np\n'), ((10936, 10954), 'numpy.array', 'np.array', (['uIndices'], {}), '(uIndices)\n', (10944, 10954), True, 'import numpy as np\n'), ((13321, 13342), 'numpy.random.seed', 'np.random.seed', (['rseed'], {}), '(rseed)\n', (13335, 13342), True, 'import numpy as np\n'), ((13384, 13418), 'numpy.zeros', 'np.zeros', (['(resDf.shape[0], nsamps)'], {}), '((resDf.shape[0], nsamps))\n', (13392, 13418), True, 'import numpy as np\n'), ((13437, 13471), 'numpy.zeros', 'np.zeros', (['(resDf.shape[0], nsamps)'], {}), '((resDf.shape[0], nsamps))\n', (13445, 13471), True, 'import numpy as np\n'), ((2860, 2872), 'numpy.sum', 'np.sum', (['chi2'], {}), '(chi2)\n', (2866, 2872), True, 'import numpy as np\n'), ((6980, 6992), 'numpy.array', 'np.array', (['xx'], {}), '(xx)\n', (6988, 6992), True, 'import numpy as np\n'), ((7209, 7252), 'numpy.asarray', 'np.asarray', (["resDf.loc[cid, 'observed_prop']"], {}), "(resDf.loc[cid, 'observed_prop'])\n", (7219, 7252), True, 'import numpy as np\n'), ((9168, 9204), 'matplotlib.pyplot.Rectangle', 'plt.Rectangle', (['(0, 0)', '(1)', '(1)'], {'color': 'c'}), '((0, 0), 1, 1, color=c)\n', (9181, 9204), True, 'import matplotlib.pyplot as plt\n'), ((14139, 14169), 'numpy.isnan', 'np.isnan', (["resDf['chi2'].values"], {}), "(resDf['chi2'].values)\n", (14147, 14169), True, 'import numpy as np\n'), ((14194, 14224), 'numpy.isnan', 'np.isnan', (["resDf['chi2'].values"], {}), "(resDf['chi2'].values)\n", (14202, 14224), True, 'import numpy as np\n'), ((2257, 2313), 'numpy.sum', 'np.sum', (['cntsDf[cols].values[m, :]'], {'axis': '(0)', 'keepdims': '(True)'}), '(cntsDf[cols].values[m, :], axis=0, keepdims=True)\n', (2263, 2313), True, 'import numpy as np\n'), ((2345, 2404), 'numpy.sum', 'np.sum', (['cntsDf[cols].values[notM, :]'], {'axis': '(0)', 'keepdims': '(True)'}), '(cntsDf[cols].values[notM, :], axis=0, keepdims=True)\n', (2351, 2404), True, 'import numpy as np\n'), ((2426, 2445), 'numpy.sum', 'np.sum', (['obs'], {'axis': '(1)'}), '(obs, axis=1)\n', (2432, 2445), True, 'import numpy as np\n'), ((2711, 2736), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (2734, 2736), False, 'import warnings\n'), ((2754, 2785), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (2775, 2785), False, 'import warnings\n'), ((2932, 2962), 'scipy.stats.chi2.cdf', 'stats.chi2.cdf', (['sum_chi2', 'degf'], {}), '(sum_chi2, degf)\n', (2946, 2962), False, 'from scipy import stats\n'), ((7277, 7288), 'numpy.sum', 'np.sum', (['obs'], {}), '(obs)\n', (7283, 7288), True, 'import numpy as np\n'), ((7344, 7379), 'numpy.concatenate', 'np.concatenate', (['([0.0], obs, [1.0])'], {}), '(([0.0], obs, [1.0]))\n', (7358, 7379), True, 'import numpy as np\n'), ((13560, 13596), 'numpy.random.permutation', 'np.random.permutation', (['rtmp.shape[0]'], {}), '(rtmp.shape[0])\n', (13581, 13596), True, 'import numpy as np\n'), ((2585, 2619), 'numpy.sum', 'np.sum', (['obs'], {'keepdims': '(True)', 'axis': '(1)'}), '(obs, keepdims=True, axis=1)\n', (2591, 2619), True, 'import numpy as np\n'), ((2649, 2683), 'numpy.sum', 'np.sum', (['obs'], {'keepdims': '(True)', 'axis': '(0)'}), '(obs, keepdims=True, axis=0)\n', (2655, 2683), True, 'import numpy as np\n'), ((3200, 3219), 'numpy.sum', 'np.sum', (['obs'], {'axis': '(0)'}), '(obs, axis=0)\n', (3206, 3219), True, 'import numpy as np\n'), ((3342, 3361), 'numpy.sum', 'np.sum', (['obs'], {'axis': '(0)'}), '(obs, axis=0)\n', (3348, 3361), True, 'import numpy as np\n'), ((3712, 3731), 'numpy.sum', 'np.sum', (['obs'], {'axis': '(0)'}), '(obs, axis=0)\n', (3718, 3731), True, 'import numpy as np\n')] |
# Created byMartin.cz
# Copyright (c) <NAME>. All rights reserved.
import unittest
import pero
import numpy
class TestCase(unittest.TestCase):
"""Test case for linear interpolator."""
def test_positive(self):
"""Tests whether interpolator works correctly for positive range."""
interpol = pero.LinInterpol()
# test inside
self.assertEqual(interpol.normalize(3, 2, 4), 0.5)
self.assertEqual(interpol.denormalize(0.5, 2, 4), 3)
# test left
self.assertEqual(interpol.normalize(1, 2, 4), -0.5)
self.assertEqual(interpol.denormalize(-0.5, 2, 4), 1)
# test right
self.assertEqual(interpol.normalize(5, 2, 4), 1.5)
self.assertEqual(interpol.denormalize(1.5, 2, 4), 5)
# test zero
self.assertEqual(interpol.normalize(0, 2, 4), -1.0)
self.assertEqual(interpol.denormalize(-1.0, 2, 4), 0)
def test_positive_reversed(self):
"""Tests whether interpolator works correctly for positive reversed range."""
interpol = pero.LinInterpol()
# test inside
self.assertEqual(interpol.normalize(3, 4, 2), 0.5)
self.assertEqual(interpol.denormalize(0.5, 4, 2), 3)
# test left
self.assertEqual(interpol.normalize(5, 4, 2), -0.5)
self.assertEqual(interpol.denormalize(-0.5, 4, 2), 5)
# test right
self.assertEqual(interpol.normalize(1, 4, 2), 1.5)
self.assertEqual(interpol.denormalize(1.5, 4, 2), 1)
# test zero
self.assertEqual(interpol.normalize(0, 4, 2), 2.0)
self.assertEqual(interpol.denormalize(2.0, 4, 2), 0)
def test_negative(self):
"""Tests whether interpolator works correctly for negative range."""
interpol = pero.LinInterpol()
# test inside
self.assertEqual(interpol.normalize(-3, -4, -2), 0.5)
self.assertEqual(interpol.denormalize(0.5, -4, -2), -3)
# test left
self.assertEqual(interpol.normalize(-5, -4, -2), -0.5)
self.assertEqual(interpol.denormalize(-0.5, -4, -2), -5)
# test right
self.assertEqual(interpol.normalize(-1, -4, -2), 1.5)
self.assertEqual(interpol.denormalize(1.5, -4, -2), -1)
# test zero
self.assertEqual(interpol.normalize(0, -4, -2), 2.0)
self.assertEqual(interpol.denormalize(2.0, -4, -2), 0)
def test_negative_reversed(self):
"""Tests whether interpolator works correctly for negative reversed range."""
interpol = pero.LinInterpol()
# test inside
self.assertEqual(interpol.normalize(-3, -2, -4), 0.5)
self.assertEqual(interpol.denormalize(0.5, -2, -4), -3)
# test left
self.assertEqual(interpol.normalize(-1, -2, -4), -0.5)
self.assertEqual(interpol.denormalize(-0.5, -2, -4), -1)
# test right
self.assertEqual(interpol.normalize(-5, -2, -4), 1.5)
self.assertEqual(interpol.denormalize(1.5, -2, -4), -5)
# test zero
self.assertEqual(interpol.normalize(0, -2, -4), -1.0)
self.assertEqual(interpol.denormalize(-1.0, -2, -4), 0)
def test_zero_cross(self):
"""Tests whether interpolator works correctly for cross-zero range."""
interpol = pero.LinInterpol()
# test inside
self.assertEqual(interpol.normalize(2, -2, 6), 0.5)
self.assertEqual(interpol.denormalize(0.5, -2, 6), 2)
# test left
self.assertEqual(interpol.normalize(-4, -2, 6), -0.25)
self.assertEqual(interpol.denormalize(-0.25, -2, 6), -4)
# test right
self.assertEqual(interpol.normalize(8, -2, 6), 1.25)
self.assertEqual(interpol.denormalize(1.25, -2, 6), 8)
# test zero
self.assertEqual(interpol.normalize(0, -2, 6), 0.25)
self.assertEqual(interpol.denormalize(0.25, -2, 6), 0)
def test_zero_left(self):
"""Tests whether interpolator works correctly for left-zero range."""
interpol = pero.LinInterpol()
# test inside
self.assertEqual(interpol.normalize(2, 0, 4), 0.5)
self.assertEqual(interpol.denormalize(0.5, 0, 4), 2)
# test left
self.assertEqual(interpol.normalize(-2, 0, 4), -0.5)
self.assertEqual(interpol.denormalize(-0.5, 0, 4), -2)
# test right
self.assertEqual(interpol.normalize(6, 0, 4), 1.5)
self.assertEqual(interpol.denormalize(1.5, 0, 4), 6)
# test zero
self.assertEqual(interpol.normalize(0, 0, 4), 0)
self.assertEqual(interpol.denormalize(0, 0, 4), 0)
def test_zero_right(self):
"""Tests whether interpolator works correctly for right-zero range."""
interpol = pero.LinInterpol()
# test inside
self.assertEqual(interpol.normalize(-2, -4, 0), 0.5)
self.assertEqual(interpol.denormalize(0.5, -4, 0), -2)
# test left
self.assertEqual(interpol.normalize(-6, -4, 0), -0.5)
self.assertEqual(interpol.denormalize(-0.5, -4, 0), -6)
# test right
self.assertEqual(interpol.normalize(2, -4, 0), 1.5)
self.assertEqual(interpol.denormalize(1.5, -4, 0), 2)
# test zero
self.assertEqual(interpol.normalize(0, -4, 0), 1.0)
self.assertEqual(interpol.denormalize(1.0, -4, 0), 0)
def test_arrays(self):
"""Tests whether interpolator works correctly with arrays."""
interpol = pero.LinInterpol()
# test positive
data = [0, 1, 3, 5]
model = [-1., -0.5, 0.5, 1.5]
self.assertEqual(list(interpol.normalize(numpy.array(data), 2, 4)), model)
self.assertEqual(list(interpol.denormalize(numpy.array(model), 2, 4)), data)
# test positive reversed
data = [0, 1, 3, 5]
model = [2., 1.5, 0.5, -0.5]
self.assertEqual(list(interpol.normalize(numpy.array(data), 4, 2)), model)
self.assertEqual(list(interpol.denormalize(numpy.array(model), 4, 2)), data)
# test negative
data = [0, -1, -3, -5]
model = [2., 1.5, 0.5, -0.5]
self.assertEqual(list(interpol.normalize(numpy.array(data), -4, -2)), model)
self.assertEqual(list(interpol.denormalize(numpy.array(model), -4, -2)), data)
# test negative reversed
data = [0, -1, -3, -5]
model = [-1., -0.5, 0.5, 1.5]
self.assertEqual(list(interpol.normalize(numpy.array(data), -2, -4)), model)
self.assertEqual(list(interpol.denormalize(numpy.array(model), -2, -4)), data)
# test zero cross
data = [-4, 0, 2, 8]
model = [-0.25, 0.25, 0.5, 1.25]
self.assertEqual(list(interpol.normalize(numpy.array(data), -2, 6)), model)
self.assertEqual(list(interpol.denormalize(numpy.array(model), -2, 6)), data)
# test zero left
data = [-2, 0, 2, 6]
model = [-0.5, 0, 0.5, 1.5]
self.assertEqual(list(interpol.normalize(numpy.array(data), 0, 4)), model)
self.assertEqual(list(interpol.denormalize(numpy.array(model), 0, 4)), data)
# test zero right
data = [-6, -2, 0, 2]
model = [-0.5, 0.5, 1.0, 1.5]
self.assertEqual(list(interpol.normalize(numpy.array(data), -4, 0)), model)
self.assertEqual(list(interpol.denormalize(numpy.array(model), -4, 0)), data)
# run test case
if __name__ == "__main__":
unittest.main(verbosity=2)
| [
"unittest.main",
"pero.LinInterpol",
"numpy.array"
] | [((7840, 7866), 'unittest.main', 'unittest.main', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (7853, 7866), False, 'import unittest\n'), ((337, 355), 'pero.LinInterpol', 'pero.LinInterpol', ([], {}), '()\n', (353, 355), False, 'import pero\n'), ((1121, 1139), 'pero.LinInterpol', 'pero.LinInterpol', ([], {}), '()\n', (1137, 1139), False, 'import pero\n'), ((1885, 1903), 'pero.LinInterpol', 'pero.LinInterpol', ([], {}), '()\n', (1901, 1903), False, 'import pero\n'), ((2689, 2707), 'pero.LinInterpol', 'pero.LinInterpol', ([], {}), '()\n', (2705, 2707), False, 'import pero\n'), ((3481, 3499), 'pero.LinInterpol', 'pero.LinInterpol', ([], {}), '()\n', (3497, 3499), False, 'import pero\n'), ((4263, 4281), 'pero.LinInterpol', 'pero.LinInterpol', ([], {}), '()\n', (4279, 4281), False, 'import pero\n'), ((5029, 5047), 'pero.LinInterpol', 'pero.LinInterpol', ([], {}), '()\n', (5045, 5047), False, 'import pero\n'), ((5796, 5814), 'pero.LinInterpol', 'pero.LinInterpol', ([], {}), '()\n', (5812, 5814), False, 'import pero\n'), ((5972, 5989), 'numpy.array', 'numpy.array', (['data'], {}), '(data)\n', (5983, 5989), False, 'import numpy\n'), ((6057, 6075), 'numpy.array', 'numpy.array', (['model'], {}), '(model)\n', (6068, 6075), False, 'import numpy\n'), ((6256, 6273), 'numpy.array', 'numpy.array', (['data'], {}), '(data)\n', (6267, 6273), False, 'import numpy\n'), ((6341, 6359), 'numpy.array', 'numpy.array', (['model'], {}), '(model)\n', (6352, 6359), False, 'import numpy\n'), ((6534, 6551), 'numpy.array', 'numpy.array', (['data'], {}), '(data)\n', (6545, 6551), False, 'import numpy\n'), ((6621, 6639), 'numpy.array', 'numpy.array', (['model'], {}), '(model)\n', (6632, 6639), False, 'import numpy\n'), ((6826, 6843), 'numpy.array', 'numpy.array', (['data'], {}), '(data)\n', (6837, 6843), False, 'import numpy\n'), ((6913, 6931), 'numpy.array', 'numpy.array', (['model'], {}), '(model)\n', (6924, 6931), False, 'import numpy\n'), ((7112, 7129), 'numpy.array', 'numpy.array', (['data'], {}), '(data)\n', (7123, 7129), False, 'import numpy\n'), ((7198, 7216), 'numpy.array', 'numpy.array', (['model'], {}), '(model)\n', (7209, 7216), False, 'import numpy\n'), ((7390, 7407), 'numpy.array', 'numpy.array', (['data'], {}), '(data)\n', (7401, 7407), False, 'import numpy\n'), ((7475, 7493), 'numpy.array', 'numpy.array', (['model'], {}), '(model)\n', (7486, 7493), False, 'import numpy\n'), ((7670, 7687), 'numpy.array', 'numpy.array', (['data'], {}), '(data)\n', (7681, 7687), False, 'import numpy\n'), ((7756, 7774), 'numpy.array', 'numpy.array', (['model'], {}), '(model)\n', (7767, 7774), False, 'import numpy\n')] |
# utils.py
# Author: <NAME> <<EMAIL>>
"""
This code contains utility functions that support the main functionality
of the yuzu codebase.
"""
import time
import numpy
import torch
import itertools as it
try:
import tensorflow as tf
except:
pass
def perturbations(X_0):
"""Produce all edit-distance-one pertuabtions of a sequence.
This function will take in a single one-hot encoded sequence of length N
and return a batch of N*(n_choices-1) sequences, each containing a single
perturbation from the given sequence.
Parameters
----------
X_0: numpy.ndarray, shape=(n_seqs, n_choices, seq_len)
A one-hot encoded sequence to generate all potential perturbations.
Returns
-------
X: torch.Tensor, shape=(n_seqs, (n_choices-1)*seq_len, n_choices, seq_len)
Each single-position perturbation of seq.
"""
if not isinstance(X_0, numpy.ndarray):
raise ValueError("X_0 must be of type numpy.ndarray, not {}".format(type(X_0)))
if len(X_0.shape) != 3:
raise ValueError("X_0 must have three dimensions: (n_seqs, n_choices, seq_len).")
n_seqs, n_choices, seq_len = X_0.shape
idxs = X_0.argmax(axis=1)
X_0 = torch.from_numpy(X_0)
n = seq_len*(n_choices-1)
X = torch.tile(X_0, (n, 1, 1))
X = X.reshape(n, n_seqs, n_choices, seq_len).permute(1, 0, 2, 3)
for i in range(n_seqs):
for k in range(1, n_choices):
idx = numpy.arange(seq_len)*(n_choices-1) + (k-1)
X[i, idx, idxs[i], numpy.arange(seq_len)] = 0
X[i, idx, (idxs[i]+k) % n_choices, numpy.arange(seq_len)] = 1
return X
def delta_perturbations(X_0):
"""Produce the deltas of all edit-distance-one perturbations of a sequence.
This function is similar to the `perturbation` function except that, rather
than returning the entire sequence, it returns a compressed version of the
deltas, i.e., where the sequence is changing.
Parameters
----------
X_0: numpy.ndarray, shape=(n_seqs, n_choices, seq_len)
A one-hot encoded sequence to generate all potential perturbations.
Returns
-------
X: torch.Tensor, shape=(n_seqs, (n_choices-1)*seq_len, n_choices, seq_len)
Each single-position perturbation of seq.
"""
if not isinstance(X_0, numpy.ndarray):
raise ValueError("X_0 must be of type numpy.ndarray, not {}".format(type(X_0)))
if len(X_0.shape) != 3:
raise ValueError("X_0 must have three dimensions: (n_seqs, n_choices, seq_len).")
n_seqs, n_choices, seq_len = X_0.shape
idxs = X_0.argmax(axis=1)
X = numpy.zeros((n_seqs, n_choices-1, n_choices, seq_len), dtype='float32')
for i in range(n_seqs):
for k in range(n_choices-1):
X[i, k, idxs[i], numpy.arange(seq_len)] = -1
X[i, k, (idxs[i] + k + 1) % n_choices, numpy.arange(seq_len)] = 1
return torch.from_numpy(X)
def calculate_flanks(seq_len, receptive_field):
"""A helper function to calculate the flanking regions.
This function will return the flanking regions given a receptive field and
a sequence length. The flanking regions are those where the receptive field
falls off the side of the sequence length which makes normal tensor
operations not work. Instead, we have to handle these positions one at a
time.
Parameters
----------
seq_len: int
The length of the sequence we are calculating the flanking regions for.
receptive_field: int
The receptive field of the model.
Returns
-------
fl: int
The flanking region on the left side.
fr: int
The flanking region on the right side.
idxs: list
The position indexes of the flanking regions.
"""
fl = receptive_field // 2
fr = receptive_field - fl - receptive_field % 2
idxs = list(it.chain(range(fl), range(seq_len-fr, seq_len)))
return fl, fr, idxs
def safe_to_device(X, device):
"""Move the tensor or model to the device if it doesn't already live there.
This function will avoid copying if the object already lives in the
correct context. Works on PyTorch tensors and PyTorch models that inherit
from the torch.nn.Module class.
Parameters
----------
X: torch.tensor
The tensor to move to a device.
device: str
The PyTorch context to move the tensor to.
Returns
-------
X: torch.tensor
The tensor on the desired device.
"""
if isinstance(X, torch.Tensor):
if X.device == device:
return X
return X.to(device)
elif isinstance(X, torch.nn.Module):
if next(X.parameters()).device == device:
return X
return X.to(device)
def tensorflow_to_pytorch(tf_model, torch_model):
"""Copy the weights from a Tensorflow model to a PyTorch model.
This function will take in a Tensorflow model and a PyTorch model
with the same architecture and will transfer the weights from the
TensorFlow model to the PyTorch model. It will disable additional
functionality provided by PyTorch or Tensorflow to ensure that the
models are equivalent.
Parameters
----------
tf_model: tf.keras.Model
A model implemented in Tensorflow
torch_model: torch.nn.Module
A model implemented in PyTorch
Returns
-------
torch_model: torch.nn.Module
A model implemented in PyTorch with the weights transferred
from the Tensorflow model.
"""
try:
tf_use_layers = (tf.keras.layers.Conv1D,
tf.keras.layers.BatchNormalization,
tf.keras.layers.Dense)
except:
raise ValueError("TensorFlow must be installed to use this function.")
torch_use_layers = (torch.nn.Conv1d,
torch.nn.BatchNorm1d,
torch.nn.Linear)
model2_layers = list(torch_model.children())
with torch.no_grad():
i, j = -1, -1
while i < len(tf_model.layers) - 1 and j < len(model2_layers) - 1:
while i < len(tf_model.layers) - 1:
i += 1
if isinstance(tf_model.layers[i], tf_use_layers):
break
while j < len(model2_layers) - 1:
j += 1
if isinstance(model2_layers[j], torch_use_layers):
break
if isinstance(tf_model.layers[i], tf_use_layers[0]):
weight = numpy.array(tf_model.layers[i].weights[0])
weight = weight.transpose(2, 1, 0)
bias = numpy.array(tf_model.layers[i].weights[1])
model2_layers[j].weight[:] = torch.tensor(weight)
model2_layers[j].bias[:] = torch.tensor(bias)
elif isinstance(tf_model.layers[i], tf_use_layers[1]):
mu = numpy.array(tf_model.layers[i].weights[2])
sigma = numpy.array(tf_model.layers[i].weights[3])
model2_layers[j].affine = False
model2_layers[j].eps = tf_model.layers[i].epsilon
model2_layers[j].running_mean[:] = torch.tensor(mu)
model2_layers[j].running_var[:] = torch.tensor(sigma)
elif isinstance(tf_model.layers[i], tf_use_layers[2]):
weight = numpy.array(tf_model.layers[i].weights[0])
bias = numpy.array(tf_model.layers[i].weights[1])
model2_layers[j].weight[:] = torch.tensor(weight.T)
model2_layers[j].bias[:] = torch.tensor(bias)
return torch_model
| [
"numpy.zeros",
"torch.tile",
"numpy.array",
"numpy.arange",
"torch.no_grad",
"torch.tensor",
"torch.from_numpy"
] | [((1209, 1230), 'torch.from_numpy', 'torch.from_numpy', (['X_0'], {}), '(X_0)\n', (1225, 1230), False, 'import torch\n'), ((1270, 1296), 'torch.tile', 'torch.tile', (['X_0', '(n, 1, 1)'], {}), '(X_0, (n, 1, 1))\n', (1280, 1296), False, 'import torch\n'), ((2633, 2706), 'numpy.zeros', 'numpy.zeros', (['(n_seqs, n_choices - 1, n_choices, seq_len)'], {'dtype': '"""float32"""'}), "((n_seqs, n_choices - 1, n_choices, seq_len), dtype='float32')\n", (2644, 2706), False, 'import numpy\n'), ((2918, 2937), 'torch.from_numpy', 'torch.from_numpy', (['X'], {}), '(X)\n', (2934, 2937), False, 'import torch\n'), ((5987, 6002), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6000, 6002), False, 'import torch\n'), ((6518, 6560), 'numpy.array', 'numpy.array', (['tf_model.layers[i].weights[0]'], {}), '(tf_model.layers[i].weights[0])\n', (6529, 6560), False, 'import numpy\n'), ((6635, 6677), 'numpy.array', 'numpy.array', (['tf_model.layers[i].weights[1]'], {}), '(tf_model.layers[i].weights[1])\n', (6646, 6677), False, 'import numpy\n'), ((6724, 6744), 'torch.tensor', 'torch.tensor', (['weight'], {}), '(weight)\n', (6736, 6744), False, 'import torch\n'), ((6788, 6806), 'torch.tensor', 'torch.tensor', (['bias'], {}), '(bias)\n', (6800, 6806), False, 'import torch\n'), ((1451, 1472), 'numpy.arange', 'numpy.arange', (['seq_len'], {}), '(seq_len)\n', (1463, 1472), False, 'import numpy\n'), ((1527, 1548), 'numpy.arange', 'numpy.arange', (['seq_len'], {}), '(seq_len)\n', (1539, 1548), False, 'import numpy\n'), ((1601, 1622), 'numpy.arange', 'numpy.arange', (['seq_len'], {}), '(seq_len)\n', (1613, 1622), False, 'import numpy\n'), ((2800, 2821), 'numpy.arange', 'numpy.arange', (['seq_len'], {}), '(seq_len)\n', (2812, 2821), False, 'import numpy\n'), ((2879, 2900), 'numpy.arange', 'numpy.arange', (['seq_len'], {}), '(seq_len)\n', (2891, 2900), False, 'import numpy\n'), ((6896, 6938), 'numpy.array', 'numpy.array', (['tf_model.layers[i].weights[2]'], {}), '(tf_model.layers[i].weights[2])\n', (6907, 6938), False, 'import numpy\n'), ((6963, 7005), 'numpy.array', 'numpy.array', (['tf_model.layers[i].weights[3]'], {}), '(tf_model.layers[i].weights[3])\n', (6974, 7005), False, 'import numpy\n'), ((7173, 7189), 'torch.tensor', 'torch.tensor', (['mu'], {}), '(mu)\n', (7185, 7189), False, 'import torch\n'), ((7240, 7259), 'torch.tensor', 'torch.tensor', (['sigma'], {}), '(sigma)\n', (7252, 7259), False, 'import torch\n'), ((7365, 7407), 'numpy.array', 'numpy.array', (['tf_model.layers[i].weights[0]'], {}), '(tf_model.layers[i].weights[0])\n', (7376, 7407), False, 'import numpy\n'), ((7431, 7473), 'numpy.array', 'numpy.array', (['tf_model.layers[i].weights[1]'], {}), '(tf_model.layers[i].weights[1])\n', (7442, 7473), False, 'import numpy\n'), ((7520, 7542), 'torch.tensor', 'torch.tensor', (['weight.T'], {}), '(weight.T)\n', (7532, 7542), False, 'import torch\n'), ((7586, 7604), 'torch.tensor', 'torch.tensor', (['bias'], {}), '(bias)\n', (7598, 7604), False, 'import torch\n')] |
import theano
import theano.tensor as T
import numpy as np
from lasagne.updates import get_or_compute_grads
from collections import OrderedDict
def graves_rmsprop(loss_or_grads, params, learning_rate=1e-4, chi=0.95, alpha=0.9, epsilon=1e-4):
r"""
<NAME>' RMSProp [1]_.
.. math ::
n_{i} &= \chi * n_i-1 + (1 - \chi) * grad^{2}\\
g_{i} &= \chi * g_i-1 + (1 - \chi) * grad\\
\Delta_{i} &= \alpha * Delta_{i-1} - learning_rate * grad /
sqrt(n_{i} - g_{i}^{2} + \epsilon)\\
w_{i} &= w_{i-1} + \Delta_{i}
References
----------
.. [1] Graves, Alex.
"Generating Sequences With Recurrent Neural Networks", p.23
arXiv:1308.0850
"""
grads = get_or_compute_grads(loss_or_grads, params)
updates = OrderedDict()
for param, grad in zip(params, grads):
value = param.get_value(borrow=True)
n = theano.shared(np.zeros(value.shape, dtype=value.dtype),
broadcastable=param.broadcastable)
g = theano.shared(np.zeros(value.shape, dtype=value.dtype),
broadcastable=param.broadcastable)
delta = theano.shared(np.zeros(value.shape, dtype=value.dtype),
broadcastable=param.broadcastable)
n_ip1 = chi * n + (1. - chi) * grad ** 2
g_ip1 = chi * g + (1. - chi) * grad
delta_ip1 = alpha * delta - learning_rate * grad / T.sqrt(n_ip1 + \
g_ip1 ** 2 + epsilon)
updates[n] = n_ip1
updates[g] = g_ip1
updates[delta] = delta_ip1
updates[param] = param + delta_ip1
return updates
| [
"collections.OrderedDict",
"theano.tensor.sqrt",
"numpy.zeros",
"lasagne.updates.get_or_compute_grads"
] | [((739, 782), 'lasagne.updates.get_or_compute_grads', 'get_or_compute_grads', (['loss_or_grads', 'params'], {}), '(loss_or_grads, params)\n', (759, 782), False, 'from lasagne.updates import get_or_compute_grads\n'), ((797, 810), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (808, 810), False, 'from collections import OrderedDict\n'), ((926, 966), 'numpy.zeros', 'np.zeros', (['value.shape'], {'dtype': 'value.dtype'}), '(value.shape, dtype=value.dtype)\n', (934, 966), True, 'import numpy as np\n'), ((1055, 1095), 'numpy.zeros', 'np.zeros', (['value.shape'], {'dtype': 'value.dtype'}), '(value.shape, dtype=value.dtype)\n', (1063, 1095), True, 'import numpy as np\n'), ((1188, 1228), 'numpy.zeros', 'np.zeros', (['value.shape'], {'dtype': 'value.dtype'}), '(value.shape, dtype=value.dtype)\n', (1196, 1228), True, 'import numpy as np\n'), ((1447, 1483), 'theano.tensor.sqrt', 'T.sqrt', (['(n_ip1 + g_ip1 ** 2 + epsilon)'], {}), '(n_ip1 + g_ip1 ** 2 + epsilon)\n', (1453, 1483), True, 'import theano.tensor as T\n')] |
# FitLib/Function.py
# ----------------
# Module Docstring
# ----------------
""" Core representations of functions and function parameters used by the Fitter class in the Fitter module. """
# -------
# Imports
# -------
import warnings
import numpy as np
# ---------------
# Parameter Class
# ---------------
class Parameter:
""" Class to encapsulate a function parameter. """
def __init__(self, val, bounds = None, fit_flag = True):
"""
Class constructor.
Args:
val -- initial parameter value
bounds -- optionally specify a (p_min, p_max) range for parameter; either of p_min or p_max can be None (default: None)
fit_flag -- True if parameter should be optimised, False if it should be fixed (default: True)
"""
# Store data.
self._val = float(val)
self.Bounds = bounds
self.FitFlag = fit_flag
@property
def Bounds(self):
""" Get a (p_min, p_max) tuple specifying the lower and upper limits of the parameter value (None indicates that no bounds are set). """
return self._bounds
@Bounds.setter
def Bounds(self, bounds):
""" Set the (p_min, p_max) parameter bounds; p_min/p_max = None or bounds = None can be set to indicate that no bounds should be applied. """
if bounds is not None:
p_min, p_max = bounds
if p_min is not None:
p_min = float(p_min)
if p_max is not None:
p_max = float(p_max)
self._bounds = (p_min, p_max)
else:
self._bounds = (None, None)
@property
def FitFlag(self):
""" Get the flag indicating whether the parameter should be optimised. """
return self._fit_flag
@FitFlag.setter
def FitFlag(self, fit_flag):
""" Set the fit flag: True if the parameter is to be optimised, False otherwise. """
self._fit_flag = bool(fit_flag)
@property
def Value(self):
""" Get parameter value. """
return self._val
@Value.setter
def Value(self, val):
""" Set parameter value. """
val = float(val)
if self._bounds is not None:
p_min, p_max = self._bounds
if (p_min is not None and val < p_min) or (p_max is not None and val > p_max):
warnings.warn("Parameter.Value is out of bounds.", RuntimeWarning)
self._val = val
def Clone(self):
""" Return a copy of the current Parameter object. """
return Parameter(self.Value, bounds = self.Bounds, fit_flag = self.FitFlag)
# ----------------
# Function Classes
# ----------------
class Function:
""" Class to encapsulate a function. """
def __init__(self, func, param_or_params):
"""
Class constructor.
Args:
func -- callable func(x, *params)
param_or_params -- Parameter object or iterable of Parameter objects encapsulating param(s) to be passed to func
"""
# Store function and parameters.
assert func is not None
self._func = func
assert param_or_params is not None
params = None
if isinstance(param_or_params, Parameter):
params = [param_or_params]
else:
params = [param for param in param_or_params]
for param in params:
assert isinstance(param, Parameter)
self._params = params
def Evaluate(self, x):
""" Evaluate the function with the current parameters over the supplied x values. """
return np.asarray(
self._func(x, *[param.Value for param in self._params]), dtype = np.float64
)
def GetParamsList(self, fit_only = False):
"""
Get a list of function parameters.
Args:
fit_only -- if True, excludes parameters with FitFlag = False (default: False)
"""
return [param for param in self._params if param.FitFlag or not fit_only]
def CloneParamsList(self):
""" Create a (deep) copy of the current parameter list. """
return [param.Clone() for param in self._params]
class CompositeFunction(Function):
""" Class to encapsulate a composite function built from multiple Function objects. """
def __init__(self, funcs):
"""
Class constructor.
Args:
funcs -- iterable of Function objects
"""
assert funcs is not None
funcs = [func for func in funcs]
for func in funcs:
assert isinstance(func, Function)
self._funcs = funcs
def Evaluate(self, x):
""" Evaluate the function with the current parameters over the supplied x values. """
return np.sum(self.EvaluateIndividual(x), axis = 0)
def EvaluateIndividual(self, x):
# Convert x to a NumPy array and check.
x = np.asarray(x, dtype = np.float64)
n_x, = np.shape(x)
assert n_x > 0
# Evaluate stored functions.
return [func.Evaluate(x) for func in self._funcs]
def GetParamsList(self, fit_only = False):
"""
Get a list of function parameters.
Args:
fit_only -- if True, excludes parameters with FitFlag = False (default: False)
"""
params_list = []
for func in self._funcs:
params_list += func.GetParamsList(fit_only = fit_only)
return params_list
# ----------------
# Factory Function
# ----------------
def CreateFunction(func, p_init, p_fit = None, p_bounds = None):
""" Factory function to simplify creating Function objects. """
assert func is not None
# Assume p_init is an iterable and convert to a list; if this fails, assume p_init is a single parameter and wrap in a list.
params = None
try:
params = [param for param in p_init]
except TypeError:
params = [p_init]
# Check whether we need to wrap any or all of the parameters in Parameter objects.
wrap = False
for param in params:
if not isinstance(param, Parameter):
wrap = True
break
# If required, build Parameter objects around initial parameters supplied as scalar values.
# In this case, the label is taken from p_labels, and the optional fit_flag and bounds are taken from p_fit and p_bounds if supplied.
if wrap:
if p_fit is not None:
p_fit = [fit for fit in p_fit]
assert len(p_fit) == len(params)
if p_bounds is not None:
p_bounds = [bounds for bounds in p_bounds]
assert len(p_bounds) == len(params)
for i, param in enumerate(params):
if not isinstance(param, Parameter):
params[i] = Parameter(
param, fit_flag = p_fit[i] if p_fit is not None else True, bounds = p_bounds[i] if p_bounds is not None else None
)
# Return a function object for the supplied function and parameters.
return Function(func, params)
| [
"numpy.shape",
"warnings.warn",
"numpy.asarray"
] | [((5306, 5337), 'numpy.asarray', 'np.asarray', (['x'], {'dtype': 'np.float64'}), '(x, dtype=np.float64)\n', (5316, 5337), True, 'import numpy as np\n'), ((5364, 5375), 'numpy.shape', 'np.shape', (['x'], {}), '(x)\n', (5372, 5375), True, 'import numpy as np\n'), ((2526, 2592), 'warnings.warn', 'warnings.warn', (['"""Parameter.Value is out of bounds."""', 'RuntimeWarning'], {}), "('Parameter.Value is out of bounds.', RuntimeWarning)\n", (2539, 2592), False, 'import warnings\n')] |
"""
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
from os import makedirs, listdir
from os.path import exists, join, isfile, basename
from argparse import ArgumentParser
import numpy as np
import cv2
from tqdm import tqdm
def ensure_dir_exists(dir_path):
if not exists(dir_path):
makedirs(dir_path)
def get_valid_sources(all_sources):
return [s for s in all_sources if exists(s)]
def print_data_sources_stat(data_sources):
print('Specified {} valid data sources:'.format(len(data_sources)))
for data_source in data_sources:
print(' - {}'.format(data_source))
def load_sign_durations(data_path):
with open(data_path) as input_stream:
data = json.load(input_stream)
return {int(k): float(v) for k, v in data.items()}
def get_video_names(videos_dir, extension):
return [f.split('.{}'.format(extension))[0]
for f in listdir(videos_dir) if isfile(join(videos_dir, f)) and f.endswith(extension)]
def collect_records(data_sources, valid_video_names, mean_sign_duration=0.8, speed_factor=1.1, preparation_time=0.8):
num_skipped_records = 0
out_records = list()
for data_source in data_sources:
with open(data_source) as input_stream:
data = json.load(input_stream)
data_type = basename(data_source).split('.')[0].split('_')[-1]
for record in data:
url = record['url']
video_name = url.split('?v=')[-1]
if video_name not in valid_video_names:
num_skipped_records += 1
continue
bbox = record['box']
start_frame = int(record['start'])
end_frame = int(record['end'])
assert start_frame >= 0 and end_frame >= 0
if end_frame - start_frame <= 1:
num_skipped_records += 1
continue
label = record['label']
expected_sign_duration = speed_factor * mean_sign_duration * float(record['fps'])
expected_clip_duration = expected_sign_duration + preparation_time * float(record['fps'])
real_clip_duration = float(end_frame - start_frame)
if real_clip_duration > expected_clip_duration:
left_duration = real_clip_duration - preparation_time * float(record['fps'])
ratio = left_duration / expected_sign_duration
num_repeats = int(np.round(ratio))
num_segments = num_repeats + 1 if num_repeats > 0 else 2
segment_length = real_clip_duration / float(num_segments)
center_frame = start_frame + segment_length # get the first most probable position
start_frame = int(center_frame - 0.5 * expected_sign_duration)
end_frame = int(center_frame + 0.5 * expected_sign_duration)
out_limits = [(start_frame, end_frame)]
else:
center_frame = 0.5 * (start_frame + end_frame)
trg_sign_duration = np.minimum(real_clip_duration, expected_sign_duration)
start_frame = int(center_frame - 0.5 * trg_sign_duration)
end_frame = int(center_frame + 0.5 * trg_sign_duration)
out_limits = [(start_frame, end_frame)]
for fixed_start_frame, fixed_end_frame in out_limits:
out_records.append(dict(label=label,
signer_id=record['signer_id'],
start=fixed_start_frame,
end=fixed_end_frame,
bbox=dict(xmin=bbox[1], ymin=bbox[0], xmax=bbox[3], ymax=bbox[2]),
video_name=video_name,
fps=float(record['fps']),
type=data_type))
if num_skipped_records > 0:
print('Warning. Skipped {} records.'.format(num_skipped_records))
else:
print('All records are parsed successfully.')
return out_records
def order_by_video_name(records):
out_records = dict()
for record in records:
video_name = record['video_name']
if video_name not in out_records:
out_records[video_name] = []
out_records[video_name].append(record)
return out_records
def validate_and_sort_records(records):
out_records = dict()
for video_name in records:
video_records = records[video_name]
video_records.sort(key=lambda r: r['start'])
out_video_records = list()
for record_id in range(len(video_records) - 1):
cur_record = video_records[record_id]
next_record = video_records[record_id + 1]
if cur_record['end'] > next_record['start']:
cur_record['end'] = next_record['start']
if cur_record['start'] < cur_record['end']:
out_video_records.append(cur_record)
if len(video_records) > 0:
out_video_records.append(video_records[-1])
out_records[video_name] = out_video_records
return out_records
def print_records_stat(records):
num_records = np.sum([len(video_records) for video_records in records.values()])
clip_lengths = [record['end'] - record['start'] for video_records in records.values() for record in video_records]
print('Stat for {} records:'.format(num_records))
print(' - min: {} max: {}'.format(np.min(clip_lengths),
np.max(clip_lengths)))
print(' - p@5: {} p@50: {} p@95: {}'.format(np.percentile(clip_lengths, 5.0),
np.percentile(clip_lengths, 50.0),
np.percentile(clip_lengths, 95.0)))
def crop_image(image, bbox, trg_size, scale):
def _fix_bbox():
frame_height, frame_width = image.shape[:2]
x_min, y_min, x_max, y_max = bbox['xmin'], bbox['ymin'], bbox['xmax'], bbox['ymax']
center_x = 0.5 * (x_min + x_max) * frame_width
center_y = 0.5 * (y_min + y_max) * frame_height
scaled_width = scale * (x_max - x_min) * frame_width
scaled_height = scale * (y_max - y_min) * frame_height
src_ar = float(scaled_height) / float(scaled_width)
trg_ar = float(trg_size[0]) / float(trg_size[1])
if src_ar < trg_ar:
out_width = scaled_width
out_height = trg_ar * out_width
else:
out_height = scaled_height
out_width = out_height / trg_ar
out_x_min = np.maximum(0, int(center_x - 0.5 * out_width))
out_y_min = np.maximum(0, int(center_y - 0.5 * out_height))
out_x_max = np.minimum(frame_width, int(center_x + 0.5 * out_width))
out_y_max = np.minimum(frame_height, int(center_y + 0.5 * out_height))
return out_x_min, out_y_min, out_x_max, out_y_max
roi = _fix_bbox()
cropped_image = image[roi[1]:roi[3], roi[0]:roi[2]]
resized_image = cv2.resize(cropped_image, (trg_size[1], trg_size[0]))
return resized_image
def extract_frames(records, videos_dir, video_name_template, out_dir, image_name_template,
target_num_frames, trg_size, scale, trg_fps=30.):
pbar = tqdm(total=len(records), desc='Dumping')
video_names = list(records)
for video_name in video_names:
video_records = records[video_name]
video_path = join(videos_dir, video_name_template.format(video_name))
video_capture = cv2.VideoCapture(video_path)
num_frames = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT))
frame_ids_map = dict()
for record_id in range(len(video_records)):
record = video_records[record_id]
left_limit = video_records[record_id - 1]['end'] if record_id > 0 else 0
right_limit = video_records[record_id + 1]['start'] if record_id + 1 < len(video_records) else num_frames
fps_factor = trg_fps / record['fps']
time_delta = np.maximum(int(target_num_frames / fps_factor), record['end'] - record['start'])
record['video_start'] = np.maximum(left_limit, record['end'] - time_delta)
record['video_end'] = np.minimum(right_limit, record['start'] + time_delta)
for i in range(record['video_start'], record['video_end']):
if i not in frame_ids_map:
frame_ids_map[i] = []
frame_ids_map[i].append(record_id)
record['rel_images_dir'] = join(video_name, 'clip_{:04}'.format(record_id))
abs_images_dir = join(out_dir, record['rel_images_dir'])
ensure_dir_exists(abs_images_dir)
success = True
read_frame_id = -1
while success:
success, frame = video_capture.read()
read_frame_id += 1
if not success or read_frame_id not in frame_ids_map:
continue
for record_id in frame_ids_map[read_frame_id]:
record = video_records[record_id]
cropped_frame = crop_image(frame, record['bbox'], trg_size, scale)
images_dir = join(out_dir, record['rel_images_dir'])
out_image_path = join(images_dir, image_name_template.format(read_frame_id - record['video_start'] + 1))
cv2.imwrite(out_image_path, cropped_frame)
video_capture.release()
pbar.update(1)
pbar.close()
return records
def split_data(records):
out_data = dict()
for video_records in records.values():
for record in video_records:
record_type = record['type']
if record_type not in out_data:
out_data[record_type] = []
out_data[record_type].append(record)
return out_data
def dump_paths(data, out_dir):
for data_type in data:
records = data[data_type]
out_path = join(out_dir, '{}.txt'.format(data_type))
with open(out_path, 'w') as out_stream:
record_ids = list(range(len(records)))
if data_type == 'train':
np.random.shuffle(record_ids)
for record_id in record_ids:
record = records[record_id]
converted_record = (
record['rel_images_dir'],
str(record['label']),
str(record['start'] - record['video_start']),
str(record['end'] - record['video_start']),
str(0),
str(record['video_end'] - record['video_start']),
str(record['fps'])
)
out_stream.write('{}\n'.format(' '.join(converted_record)))
def main():
parser = ArgumentParser()
parser.add_argument('--sources', '-s', nargs='+', type=str, required=True)
parser.add_argument('--videos_dir', '-v', type=str, required=True)
parser.add_argument('--output_dir', '-o', type=str, required=True)
parser.add_argument('--video_extension', '-ve', type=str, required=False, default='mp4')
parser.add_argument('--image_extension', '-ie', type=str, required=False, default='jpg')
parser.add_argument('--target_num_frames', '-l', type=int, required=False, default=64)
parser.add_argument('--trg_image_size', type=str, required=False, default='300,256')
parser.add_argument('--scale', type=float, required=False, default=1.2)
args = parser.parse_args()
assert exists(args.videos_dir)
images_out_dir = join(args.output_dir, 'global_crops')
ensure_dir_exists(images_out_dir)
data_sources = get_valid_sources(args.sources)
print_data_sources_stat(data_sources)
assert len(data_sources) > 0
available_video_names = get_video_names(args.videos_dir, args.video_extension)
records = order_by_video_name(collect_records(data_sources, available_video_names))
valid_records = validate_and_sort_records(records)
print_records_stat(valid_records)
trg_size = [int(v) for v in args.trg_image_size.split(',')]
video_name_template = '{}' + '.{}'.format(args.video_extension)
image_name_template = 'img_{:05}' + '.{}'.format(args.image_extension)
extended_records = extract_frames(valid_records, args.videos_dir, video_name_template,
images_out_dir, image_name_template, args.target_num_frames,
trg_size, args.scale)
data_splits = split_data(extended_records)
dump_paths(data_splits, args.output_dir)
if __name__ == '__main__':
main()
| [
"json.load",
"numpy.maximum",
"os.makedirs",
"argparse.ArgumentParser",
"numpy.minimum",
"numpy.random.shuffle",
"cv2.imwrite",
"os.path.basename",
"os.path.exists",
"cv2.VideoCapture",
"numpy.percentile",
"numpy.min",
"numpy.max",
"numpy.round",
"os.path.join",
"os.listdir",
"cv2.re... | [((7578, 7631), 'cv2.resize', 'cv2.resize', (['cropped_image', '(trg_size[1], trg_size[0])'], {}), '(cropped_image, (trg_size[1], trg_size[0]))\n', (7588, 7631), False, 'import cv2\n'), ((11313, 11329), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (11327, 11329), False, 'from argparse import ArgumentParser\n'), ((12036, 12059), 'os.path.exists', 'exists', (['args.videos_dir'], {}), '(args.videos_dir)\n', (12042, 12059), False, 'from os.path import exists, join, isfile, basename\n'), ((12082, 12119), 'os.path.join', 'join', (['args.output_dir', '"""global_crops"""'], {}), "(args.output_dir, 'global_crops')\n", (12086, 12119), False, 'from os.path import exists, join, isfile, basename\n'), ((813, 829), 'os.path.exists', 'exists', (['dir_path'], {}), '(dir_path)\n', (819, 829), False, 'from os.path import exists, join, isfile, basename\n'), ((839, 857), 'os.makedirs', 'makedirs', (['dir_path'], {}), '(dir_path)\n', (847, 857), False, 'from os import makedirs, listdir\n'), ((1239, 1262), 'json.load', 'json.load', (['input_stream'], {}), '(input_stream)\n', (1248, 1262), False, 'import json\n'), ((8086, 8114), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_path'], {}), '(video_path)\n', (8102, 8114), False, 'import cv2\n'), ((934, 943), 'os.path.exists', 'exists', (['s'], {}), '(s)\n', (940, 943), False, 'from os.path import exists, join, isfile, basename\n'), ((1434, 1453), 'os.listdir', 'listdir', (['videos_dir'], {}), '(videos_dir)\n', (1441, 1453), False, 'from os import makedirs, listdir\n'), ((1789, 1812), 'json.load', 'json.load', (['input_stream'], {}), '(input_stream)\n', (1798, 1812), False, 'import json\n'), ((6012, 6032), 'numpy.min', 'np.min', (['clip_lengths'], {}), '(clip_lengths)\n', (6018, 6032), True, 'import numpy as np\n'), ((6074, 6094), 'numpy.max', 'np.max', (['clip_lengths'], {}), '(clip_lengths)\n', (6080, 6094), True, 'import numpy as np\n'), ((6147, 6179), 'numpy.percentile', 'np.percentile', (['clip_lengths', '(5.0)'], {}), '(clip_lengths, 5.0)\n', (6160, 6179), True, 'import numpy as np\n'), ((6231, 6264), 'numpy.percentile', 'np.percentile', (['clip_lengths', '(50.0)'], {}), '(clip_lengths, 50.0)\n', (6244, 6264), True, 'import numpy as np\n'), ((6316, 6349), 'numpy.percentile', 'np.percentile', (['clip_lengths', '(95.0)'], {}), '(clip_lengths, 95.0)\n', (6329, 6349), True, 'import numpy as np\n'), ((8711, 8761), 'numpy.maximum', 'np.maximum', (['left_limit', "(record['end'] - time_delta)"], {}), "(left_limit, record['end'] - time_delta)\n", (8721, 8761), True, 'import numpy as np\n'), ((8796, 8849), 'numpy.minimum', 'np.minimum', (['right_limit', "(record['start'] + time_delta)"], {}), "(right_limit, record['start'] + time_delta)\n", (8806, 8849), True, 'import numpy as np\n'), ((9176, 9215), 'os.path.join', 'join', (['out_dir', "record['rel_images_dir']"], {}), "(out_dir, record['rel_images_dir'])\n", (9180, 9215), False, 'from os.path import exists, join, isfile, basename\n'), ((3545, 3599), 'numpy.minimum', 'np.minimum', (['real_clip_duration', 'expected_sign_duration'], {}), '(real_clip_duration, expected_sign_duration)\n', (3555, 3599), True, 'import numpy as np\n'), ((9733, 9772), 'os.path.join', 'join', (['out_dir', "record['rel_images_dir']"], {}), "(out_dir, record['rel_images_dir'])\n", (9737, 9772), False, 'from os.path import exists, join, isfile, basename\n'), ((9910, 9952), 'cv2.imwrite', 'cv2.imwrite', (['out_image_path', 'cropped_frame'], {}), '(out_image_path, cropped_frame)\n', (9921, 9952), False, 'import cv2\n'), ((10683, 10712), 'numpy.random.shuffle', 'np.random.shuffle', (['record_ids'], {}), '(record_ids)\n', (10700, 10712), True, 'import numpy as np\n'), ((1464, 1483), 'os.path.join', 'join', (['videos_dir', 'f'], {}), '(videos_dir, f)\n', (1468, 1483), False, 'from os.path import exists, join, isfile, basename\n'), ((2950, 2965), 'numpy.round', 'np.round', (['ratio'], {}), '(ratio)\n', (2958, 2965), True, 'import numpy as np\n'), ((1834, 1855), 'os.path.basename', 'basename', (['data_source'], {}), '(data_source)\n', (1842, 1855), False, 'from os.path import exists, join, isfile, basename\n')] |
## IMPORT PACKAGES
print("---------------------------------------------------")
print("Importing packages:\n")
import os # Used to manipulate and create directory with python.
print("os loaded")
import tifffile # Save .tif files from numpy arrays
print("tiffle loaded")
from skimage import io
print("io loaded")
import matplotlib.pyplot as plt
from matplotlib import cm
print("matplotlib loaded")
import numpy as np
print("numpy loaded")
from pathlib import Path
print("pathlib loaded")
print("\n")
## IMPORT IMAGEJ in the module
print("---------------------------------------------------")
print("Importing ImageJ:\n")
if not __name__ == "__main__":
import imagej
ij = imagej.init(['sc.fiji:fiji:2.0.0-pre-10', 'net.imagej:imagej-legacy'])
print("\nThe actual version of ImageJ is:", ij.getApp().getInfo(True))
print("\nLegacy state: Is active?", ij.legacy.isActive())
## IMAGING PROCESSING functions:
def Open(INPUT_filename = "", path_in = "", path_out = "", process = False):
# 1 - CREATE path for input/outputt
# FOR SPECIFIED path_in representing for the absolute path of the single file in the EXPERIMENT FOLDER.
if len(INPUT_filename) == 0:
INPUT_filename = path_in.rsplit('/', 1)[1]
path_in = path_in.rsplit('/', 1)[0] + "/"
if len(path_out) == 0:
path_out = path_in + "Results/"
path_image_out = path_in + "Images/"
try:
os.mkdir(path_out)
os.mkdir(path_image_out)
print("Output directories created")
except FileExistsError:
print("Output directories already exist.")
print("\n")
#FOR SPECIFIED INPUT_filename in IMAGE folder (located in the same directory of the notebook)
cwd = os.getcwd() #Get current directory
if len(path_in) == 0:
path_in = cwd + "/IMAGES/"
if len(path_out) == 0:
path_out = cwd + "/RESULTS/"
path_image_out = path_out + "resulted_images/"
try:
os.mkdir(path_out)
print("Output directory (RESULTS) created")
except FileExistsError:
print("Output directory (RESULTS) already exist.")
print("\n")
try:
os.mkdir(path_image_out)
print("Output directory (RESULTS/resulted_images/) created")
except FileExistsError:
print("Output directory (RESULTS/resulted_images/) already exist.")
print("\n")
# INITIALIZE dictionary with arguments
args_IO = {
'dir_in' :f"{path_in}",
'filename_in' : f"{INPUT_filename}"}
macro_Open = """
#@ String dir_in
#@ String filename_in
// OPEN IMAGE
open(dir_in + filename_in)
"""
# RUN the Macro
macro_results = ij.py.run_macro(macro_Open, args_IO)
# GET the results
INPUT_path = path_in + INPUT_filename
opened_image = ij.py.active_image_plus()
# PRINT the output
if not process:
ij.py.show(opened_image, cmap = "gray")
print("\nIMAGE ⬆︎")
print("\nImage path:", f"{INPUT_path}")
# MAKE variables available for following steps
global filename, base_dir, results_dir, images_dir
filename = INPUT_filename # to pass to other function the input filename
base_dir = path_in # to pass to other function the input directory
results_dir = path_out # to save the plots and data resulted from the processes
images_dir = path_image_out # to save the images resulted from intermediated processes
def Profile(path_out= "", process = False):
global filename, base_dir, results_dir
# If no image is open, a message is printed out that explain how to use this function.
# Create output filename
OUTPUT_filename = filename.rsplit('.', 1)[0] + "_profile.png" # create OUTPUT_filename
# 2 - ImageJ macro
# Initialize dictionary with arguments
args_IO = {
'dir_in' :f"{base_dir}",
'dir_out' : f'{results_dir}',
'filename_in' : f'{filename}',
'filename_out' : f'{OUTPUT_filename}'
}
# ImageJ macro commands
macro_Profile = """
//DEFINE IO
#@ String dir_in
#@ String dir_out
#@ String filename_in
#@ String filename_out
#@output String in
#@output String out
title = getTitle();
in = dir_in + "/" + filename_in
out = dir_out + "/" + filename_out
//MAKE AND SAVE PROFILE ON ORIGINAL IMAGE
H = getHeight(); // get image size
W = getWidth();
makeLine(0, 0, W, H, 5); // make line with
run("Plot Profile"); // make Profile
saveAs(".png", out);
// SELECT THE INPUT IMAGES AS LAST COMMAND FOR THE FOLLOWING STEPS
selectWindow(title);
"""
# RUN the Macro on the image left active from the Open function
open_images = ij.window().getOpenWindows()
if not len(open_images) == 0:
macro_results = ij.py.run_macro(macro_Profile, args_IO)
#GET the results from the macro for checking
INPUT_path = macro_results.getOutput("in")
OUTPUT_path = macro_results.getOutput("out")
#PRINT file/folder and output
if not process:
PrintOutput(filename, OUTPUT_filename, base_dir, results_dir)
profile = io.imread(f"{OUTPUT_path}")
ij.py.show(profile, cmap = "gray")
print("IMAGE PROFILE ⬆︎")
else:
print("""
There are no images open.\n
To use the Profile() function, firstly call the Open() function""")
def Filter(process = False):
global filename, base_dir, results_dir, images_dir
# This functions apply the mean filter plug-in with default radius = 2 pixels.
# the resulted filtered image will be shown to screen and saved to the IMAGES folder.
#a) The image is left activated by the previous function, filename is a global variable.
INPUT_filename = filename
OUTPUT_filename = INPUT_filename.rsplit('.', 1)[0] + "_filtered.tif"
path_image_out = images_dir + OUTPUT_filename
# 2 - RUN the plugin
plugin = 'Mean'
args_mean = {
'block_radius_x': 4,
'block_radius_y': 4}
ij.py.run_plugin(plugin, args_mean)
# ACTIVATE resulted image
filtered = ij.py.active_image_plus()
# SAVE resulted filename by modifying the global variable
filename = OUTPUT_filename
if not process:
#PRINT file/folder and output
PrintOutput(INPUT_filename, OUTPUT_filename, base_dir, results_dir)
#SAVE the resulted filtered image
numpy_filtered = ij.py.from_java(filtered)
tifffile.imwrite(path_image_out, numpy_filtered, imagej=True)
#SHOW the resulted filtered image to screen
ij.py.show(filtered, cmap = "gray")
print("\nIMAGE ⬆︎")
def SubtractBackground(process = False):
global filename, base_dir, results_dir, images_dir
# This functions apply a ImageJ macro to use the rolling subtract background plug in with default 50 pixel diameter.
# the resulted subtracted image will be shown to screen and saved to the IMAGES folder.
# 1 - Multiple working situations:
#a) The image is left activated by the previous function, filename is a global variable.
INPUT_filename = filename
OUTPUT_filename = INPUT_filename.rsplit('_', 1)[0] + "_subtracted.tif"
path_image_out = images_dir + OUTPUT_filename
# 2 - ImageJ macro subtract background text
# INITIALIZE dictionary with arguments
macro_SubtractBackground = """
// macro commands
run("Enhance Contrast...", "saturated=0.35"); // Run the default contract
run("Subtract Background...", "rolling=50 disable"); // Run the default contract
"""
ij.py.run_macro(macro_SubtractBackground)
#ACTIVATE subtracted image
subtracted = ij.py.active_image_plus()
# SAVE resulted filename
filename = OUTPUT_filename
if not process:
PrintOutput(INPUT_filename, OUTPUT_filename, base_dir, results_dir)
#SAVE the resulted subtracted image
numpy_subtracted = ij.py.from_java(subtracted)
tifffile.imwrite(path_image_out, numpy_subtracted, imagej=True)
#SHOW the results to screen
ij.py.show(subtracted, cmap = "gray")
print("\nIMAGE ⬆︎")
def Threshold(process = False):
global filename, base_dir, results_dir, images_dir
# This functions apply a ImageJ macro to applly the Threshold. Now setted as Li.
# the resulted thresholded image will be shown to screen and saved to the IMAGES folder.
# In the following macro, I had to save the image from the macro-text becuase the method used before gave an ERROR
# 1 - Multiple working situations:
#a) The image is left activated by the previous function, filename is a global variable.
INPUT_filename = filename
OUTPUT_filename = INPUT_filename.rsplit('_', 1)[0] + "_thresholded.tif"
path_image_out = images_dir + OUTPUT_filename
args_IO = {
'path_out' : f'{path_image_out}',
}
macro_Threshold = """
//DEFINE IO
#@ String path_out
// RUN threshold
setAutoThreshold("Default dark");
setOption("BlackBackground", true);
run("Convert to Mask");
run("Watershed");
saveAs("tiff", path_out); //In this case the image must be saved within the macro
"""
ij.py.run_macro(macro_Threshold, args_IO)
thresholded = ij.py.active_image_plus()
# SAVE resulted filename
filename = OUTPUT_filename
# ACTIVATE resulted image
if not process:
# PRINT file/folder AND output
PrintOutput(INPUT_filename, OUTPUT_filename, base_dir, results_dir)
#PRINT the results
imp_thresholded = io.imread(path_image_out)
ij.py.show(imp_thresholded, cmap = "gray")
print("\nIMAGE ⬆︎")
def Count(process = False):
global filename, base_dir, results_dir, images_dir
# This functions use the *Analyse Particles* in a ImageJ macro to applly to count the number of objects in the picture.
# the Result table will show the feature selected in *Set Measurements* In this case we are going to analysis:
# area, circularity, AR ratio. The resulted table will be saved in the RESULTS folder.
# Besides that, the table will be displayed by using pandas.
# 1 - Multiple working situations:
#a) The image is left activated by the previous function, filename is a global variable.
INPUT_filename = filename
OUTPUT_filename = INPUT_filename.rsplit('_', 1)[0] + "_data.csv"
path_data_out = results_dir + OUTPUT_filename
args_IO = {
'path_out' : f'{path_data_out}',
}
macro_Count = """
// DEFINE IO
#@ String path_out
// RUN THE MEASUREMENTS
title = getTitle()
run("Set Measurements...", "area shape display redirect=None decimal=3");
run("Analyze Particles...", "size=20-Infinity pixel circularity=0.10-1.00 show=Outlines display clear summarize in_situ");
saveAs("Results", path_out);
"""
ij.py.run_macro(macro_Count, args_IO)
if not process:
PrintOutput(INPUT_filename, OUTPUT_filename, base_dir, results_dir)
print("\nThe data are saved as:\n", path_data_out)
#PRINT the results
return path_data_out
def MacroRun(filepath= "", macro_title = "", process = False):
global macros
# This function run any kind of Macro of your choise by just giving in input the Macro macro_text
# The macros are collected in a dictionary and called to the SpecialRun() with their initialization name
# Manafe IO
if len(filepath) == 0:
print("Select input file by pasting the absolute file path")
else:
INPUT_filename = filepath.rsplit('/', 1)[1]
OUTPUT_filename = INPUT_filename.rsplit('.', 1)[0] + "_MacroResult.tiff"
path_in = filepath.rsplit('/', 1)[0] + "/"
path_out = path_in + "Results/"
try:
os.mkdir(path_out)
except FileExistsError:
print("Output directory already exist.")
print("\n")
# ImageJ macro arguments
macro_args = {
'dir_in' :f"{path_in}",
'dir_out' : f'{path_out}',
'filename_in' : f'{INPUT_filename}',
'filename_out' : f'{OUTPUT_filename}'
}
# ImageJ macro commands
macro_args_text = """
//DEFINE IO
#@ String dir_in
#@ String dir_out
#@ String filename_in
#@ String filename_out
"""
macro_text = macro_args_text + "\n" + macros[macro_title]
macro_results = ij.py.run_macro(macro_text, macro_args)
print("The results are saved in:\n\n", path_out+OUTPUT_filename)
## ACCESSORIES funtions:
def AddMacro(title = "", text = ""):
global macros
macros = {}
macros[title] = text
def Profile3D(INPUT_path):
#Set input/outout
filename_title = INPUT_path.rsplit('/', 1)[1].rsplit('.',1)[0] + "_profile3D"
filename_path = INPUT_path.rsplit('/', 1)[0]
path_out = filename_path + "/Results/"
try:
os.mkdir(path_out)
print("Output directories created.\n")
except FileExistsError:
print("Output directories already exist.\n")
filename_pathout = path_out + filename_title + ".tif"
#Open image
image = io.imread(INPUT_path)
print("\nThe computer see the image as a matrix of values.")
print(image)
print("\nThe human eye see the image as a change in light intensity.")
print("\nThe image can also be as a 3D representation of the pixel intensity:\n")
#Show image
plt.imshow(image, cmap = "gray")
#Create data to plot
dimensions = np.shape(image)
X = np.arange(0, dimensions[0])
Y = np.arange(0, dimensions[1])
X, Y = np.meshgrid(X, Y)
Z = image
# Plot the surface.
fig, ax = plt.subplots(subplot_kw={"projection": "3d"}, figsize = (12,12))
surf = ax.plot_surface(X, -Y, Z, cmap=cm.viridis, linewidth=0, antialiased=True)
# Customize the z axis.
ax.set_zlim(0, 255)
# A StrMethodFormatter is used automatically
ax.zaxis.set_major_formatter('{x:.02f}')
# Add a color bar which maps values to colors.
fig.colorbar(surf, shrink=0.3, aspect=10)
plt.title(filename_title)
plt.show()
fig.savefig(filename_pathout, dpi=fig.dpi, bbox_inches='tight', pad_inches=0.5)
## SUPPORTING functions:
def PrintOutput(I_file, O_file, I_path, O_path):
print("---------------------------------------------------")
print("\n")
print("The images are imported from: ", I_path)
print("INPUT_filename:", I_file)
print("\n")
print("The results are exported in: ", O_path)
print("OUTPUT_filename:", O_file)
print("\n")
print("---------------------------------------------------")
def CloseAll():
macro_close_text = """
close("*");
run("Close All");
"""
ij.py.run_macro(macro_close_text)
print("\nAll windows are closed")
def FolderTree(folder_path): #credits to [https://stackoverflow.com/users/2479038/abstrus
class DisplayablePath(object):
display_filename_prefix_middle = '├──'
display_filename_prefix_last = '└──'
display_parent_prefix_middle = ' '
display_parent_prefix_last = '│ '
def __init__(self, path, parent_path, is_last):
self.path = Path(str(path))
self.parent = parent_path
self.is_last = is_last
if self.parent:
self.depth = self.parent.depth + 1
else:
self.depth = 0
@property
def displayname(self):
if self.path.is_dir():
return self.path.name + '/'
return self.path.name
@classmethod
def make_tree(cls, root, parent=None, is_last=False, criteria=None):
root = Path(str(root))
criteria = criteria or cls._default_criteria
displayable_root = cls(root, parent, is_last)
yield displayable_root
children = sorted(list(path
for path in root.iterdir()
if criteria(path)),
key=lambda s: str(s).lower())
count = 1
for path in children:
is_last = count == len(children)
if path.is_dir():
yield from cls.make_tree(path,
parent=displayable_root,
is_last=is_last,
criteria=criteria)
else:
yield cls(path, displayable_root, is_last)
count += 1
@classmethod
def _default_criteria(cls, path):
return True
@property
def displayname(self):
if self.path.is_dir():
return self.path.name + '/'
return self.path.name
def displayable(self):
if self.parent is None:
return self.displayname
_filename_prefix = (self.display_filename_prefix_last
if self.is_last
else self.display_filename_prefix_middle)
parts = ['{!s} {!s}'.format(_filename_prefix,
self.displayname)]
parent = self.parent
while parent and parent.parent is not None:
parts.append(self.display_parent_prefix_middle
if parent.is_last
else self.display_parent_prefix_last)
parent = parent.parent
return ''.join(reversed(parts))
paths = DisplayablePath.make_tree(Path(folder_path))
for path in paths:
print(path.displayable())
print("---------------------------------------------------")
print("\nIPAN module loaded\n")
print("---------------------------------------------------")
| [
"matplotlib.pyplot.title",
"os.mkdir",
"numpy.meshgrid",
"matplotlib.pyplot.show",
"os.getcwd",
"matplotlib.pyplot.imshow",
"tifffile.imwrite",
"numpy.shape",
"imagej.init",
"pathlib.Path",
"numpy.arange",
"matplotlib.pyplot.subplots",
"skimage.io.imread"
] | [((722, 792), 'imagej.init', 'imagej.init', (["['sc.fiji:fiji:2.0.0-pre-10', 'net.imagej:imagej-legacy']"], {}), "(['sc.fiji:fiji:2.0.0-pre-10', 'net.imagej:imagej-legacy'])\n", (733, 792), False, 'import imagej\n'), ((1823, 1834), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1832, 1834), False, 'import os\n'), ((13063, 13084), 'skimage.io.imread', 'io.imread', (['INPUT_path'], {}), '(INPUT_path)\n', (13072, 13084), False, 'from skimage import io\n'), ((13350, 13380), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {'cmap': '"""gray"""'}), "(image, cmap='gray')\n", (13360, 13380), True, 'import matplotlib.pyplot as plt\n'), ((13426, 13441), 'numpy.shape', 'np.shape', (['image'], {}), '(image)\n', (13434, 13441), True, 'import numpy as np\n'), ((13450, 13477), 'numpy.arange', 'np.arange', (['(0)', 'dimensions[0]'], {}), '(0, dimensions[0])\n', (13459, 13477), True, 'import numpy as np\n'), ((13486, 13513), 'numpy.arange', 'np.arange', (['(0)', 'dimensions[1]'], {}), '(0, dimensions[1])\n', (13495, 13513), True, 'import numpy as np\n'), ((13525, 13542), 'numpy.meshgrid', 'np.meshgrid', (['X', 'Y'], {}), '(X, Y)\n', (13536, 13542), True, 'import numpy as np\n'), ((13596, 13659), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'subplot_kw': "{'projection': '3d'}", 'figsize': '(12, 12)'}), "(subplot_kw={'projection': '3d'}, figsize=(12, 12))\n", (13608, 13659), True, 'import matplotlib.pyplot as plt\n'), ((13996, 14021), 'matplotlib.pyplot.title', 'plt.title', (['filename_title'], {}), '(filename_title)\n', (14005, 14021), True, 'import matplotlib.pyplot as plt\n'), ((14027, 14037), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14035, 14037), True, 'import matplotlib.pyplot as plt\n'), ((6606, 6667), 'tifffile.imwrite', 'tifffile.imwrite', (['path_image_out', 'numpy_filtered'], {'imagej': '(True)'}), '(path_image_out, numpy_filtered, imagej=True)\n', (6622, 6667), False, 'import tifffile\n'), ((8089, 8152), 'tifffile.imwrite', 'tifffile.imwrite', (['path_image_out', 'numpy_subtracted'], {'imagej': '(True)'}), '(path_image_out, numpy_subtracted, imagej=True)\n', (8105, 8152), False, 'import tifffile\n'), ((9631, 9656), 'skimage.io.imread', 'io.imread', (['path_image_out'], {}), '(path_image_out)\n', (9640, 9656), False, 'from skimage import io\n'), ((12828, 12846), 'os.mkdir', 'os.mkdir', (['path_out'], {}), '(path_out)\n', (12836, 12846), False, 'import os\n'), ((17535, 17552), 'pathlib.Path', 'Path', (['folder_path'], {}), '(folder_path)\n', (17539, 17552), False, 'from pathlib import Path\n'), ((2063, 2081), 'os.mkdir', 'os.mkdir', (['path_out'], {}), '(path_out)\n', (2071, 2081), False, 'import os\n'), ((2283, 2307), 'os.mkdir', 'os.mkdir', (['path_image_out'], {}), '(path_image_out)\n', (2291, 2307), False, 'import os\n'), ((5297, 5324), 'skimage.io.imread', 'io.imread', (['f"""{OUTPUT_path}"""'], {}), "(f'{OUTPUT_path}')\n", (5306, 5324), False, 'from skimage import io\n'), ((11774, 11792), 'os.mkdir', 'os.mkdir', (['path_out'], {}), '(path_out)\n', (11782, 11792), False, 'import os\n'), ((1479, 1497), 'os.mkdir', 'os.mkdir', (['path_out'], {}), '(path_out)\n', (1487, 1497), False, 'import os\n'), ((1514, 1538), 'os.mkdir', 'os.mkdir', (['path_image_out'], {}), '(path_image_out)\n', (1522, 1538), False, 'import os\n')] |
"""
Dependencies:
pip install opencv - python
pip install astropy --no-deps
fits_to_png.py
Converts FITS files into PNGs. Can also make the PNGs into a video and delete
the FITS after processing into PNGs.
usage: fits_to_png.py [-h] [--video] [-d] [-v] fits_dir
Author: <NAME> and <NAME>
"""
import matplotlib.pyplot as plt
from astropy.io import fits
import numpy as np
import glob
import cv2
import os
import sys
import argparse
def fits_to_png(fits_fn):
"""
Converts the FITS file into a PNG image.
Assumes that the image information is located in the Primary HDU of the
FITS file.
:param fits_fn: The FITS files
:return: None
"""
# Generally the image information is located in the Primary HDU (ext 0)
# read the image data from this first extension using the keyword argument
data = fits.getdata(fits_fn, ext=0)
sizes = np.shape(data)
height = float(sizes[0])
width = float(sizes[1])
fig = plt.figure()
fig.set_size_inches(width / height, 1, forward=False)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
ax.imshow(data, cmap="binary")
# creating png filename from fits filename
png_fn = fits_fn.split(".fits")[0] + ".png"
plt.savefig(png_fn, dpi=height)
plt.close()
pass
def fits_folder_to_png(fits_dir,
make_vid=False, delete_fits=False, verbose=False):
"""
Converts all the FITS file in a dir to PNG images. Can also make a movie of
the PNGs and can delete the FITS after processing complete.
:param fits_dir: The folder containing the FITS files
:param make_vid: bool to makes a video of the PNGs if True
:param delete_fits: deletes FITS files after processing if True
:param verbose: bool to print status of processed files
:return: None
"""
if verbose:
print("FITS-->PNG")
fits_files = glob.glob(fits_dir + "*.fits")
num_files = len(fits_files)
status_flag = num_files * 0.1
for i in range(0, num_files):
fits_to_png(fits_files[i])
if verbose and i > status_flag:
status_flag += num_files * 0.1
p_done = i / num_files * 100
print(str(round(p_done,2)) + "% processed")
if make_vid:
vid_fname = make_movie_from_png(fits_dir)
if verbose:
print("Successfully saved video " + vid_fname)
if delete_fits:
delete_fits_from_folder(fits_dir)
if verbose:
print("Deleted fits files from " + fits_dir)
pass
def delete_fits_from_folder(fits_dir):
"""
Deletes the FITS files from fits_dir
:param fits_dir: The dir containing the FITS files
:return: None
"""
fits_files = glob.glob(fits_dir + "*.fits")
for f in fits_files:
os.remove(f)
def make_movie_from_png(png_dir):
"""
Takes PNG image files from a dir and combines them to make a movie
:param png_dir: The dir with the PNG
:return:
"""
vid_filename = os.path.basename(os.path.dirname(png_dir)) + ".avi"
vid_filepath = os.path.join(png_dir, vid_filename)
images = glob.glob(png_dir + "*.png")
frame = cv2.imread(images[0])
height, width, layers = frame.shape
video = cv2.VideoWriter(vid_filepath, -1, 25, (width, height))
for image in images:
video.write(cv2.imread(image))
video.release()
return vid_filename
def main():
"""
Takes command line arguments to process a folder with FITS files into PNGs
Positional parameters
[STRING] fits_dir: the dir containing the FITS files
Optional parameters
[BOOL] video: to make the PNG files that are created into a video
[BOOL] verbose: to print out FITS->PNG progress
[BOOL] delete: to delete the FITS files once converted into PNGs
:return: None
"""
parser = argparse.ArgumentParser(description='Process FITS files.')
# Positional parameters
parser.add_argument("fits_dir", help="dir where fits files located")
# Optional parameters
parser.add_argument("--video", help="make a video of the PNGs generated",
action="store_true")
parser.add_argument("-d", "--delete",
help="delete FITS files after PNG creation",
action="store_true")
parser.add_argument("-v", "--verbose", help="increase output verbosity",
action="store_true")
args = parser.parse_args()
if os.path.isdir(args.fits_dir):
fits_folder_to_png(fits_dir=args.fits_dir,
make_vid=args.video,
delete_fits=args.delete,
verbose=args.verbose)
else:
print("Error: Invalid fits dir")
sys.exit(1)
pass
if __name__ == "__main__":
main()
| [
"os.remove",
"cv2.VideoWriter",
"argparse.ArgumentParser",
"os.path.isdir",
"astropy.io.fits.getdata",
"matplotlib.pyplot.close",
"os.path.dirname",
"sys.exit",
"numpy.shape",
"cv2.imread",
"matplotlib.pyplot.figure",
"glob.glob",
"matplotlib.pyplot.Axes",
"os.path.join",
"matplotlib.pyp... | [((842, 870), 'astropy.io.fits.getdata', 'fits.getdata', (['fits_fn'], {'ext': '(0)'}), '(fits_fn, ext=0)\n', (854, 870), False, 'from astropy.io import fits\n'), ((884, 898), 'numpy.shape', 'np.shape', (['data'], {}), '(data)\n', (892, 898), True, 'import numpy as np\n'), ((967, 979), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (977, 979), True, 'import matplotlib.pyplot as plt\n'), ((1047, 1082), 'matplotlib.pyplot.Axes', 'plt.Axes', (['fig', '[0.0, 0.0, 1.0, 1.0]'], {}), '(fig, [0.0, 0.0, 1.0, 1.0])\n', (1055, 1082), True, 'import matplotlib.pyplot as plt\n'), ((1259, 1290), 'matplotlib.pyplot.savefig', 'plt.savefig', (['png_fn'], {'dpi': 'height'}), '(png_fn, dpi=height)\n', (1270, 1290), True, 'import matplotlib.pyplot as plt\n'), ((1295, 1306), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1304, 1306), True, 'import matplotlib.pyplot as plt\n'), ((1916, 1946), 'glob.glob', 'glob.glob', (["(fits_dir + '*.fits')"], {}), "(fits_dir + '*.fits')\n", (1925, 1946), False, 'import glob\n'), ((2748, 2778), 'glob.glob', 'glob.glob', (["(fits_dir + '*.fits')"], {}), "(fits_dir + '*.fits')\n", (2757, 2778), False, 'import glob\n'), ((3092, 3127), 'os.path.join', 'os.path.join', (['png_dir', 'vid_filename'], {}), '(png_dir, vid_filename)\n', (3104, 3127), False, 'import os\n'), ((3142, 3170), 'glob.glob', 'glob.glob', (["(png_dir + '*.png')"], {}), "(png_dir + '*.png')\n", (3151, 3170), False, 'import glob\n'), ((3183, 3204), 'cv2.imread', 'cv2.imread', (['images[0]'], {}), '(images[0])\n', (3193, 3204), False, 'import cv2\n'), ((3258, 3312), 'cv2.VideoWriter', 'cv2.VideoWriter', (['vid_filepath', '(-1)', '(25)', '(width, height)'], {}), '(vid_filepath, -1, 25, (width, height))\n', (3273, 3312), False, 'import cv2\n'), ((3865, 3923), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Process FITS files."""'}), "(description='Process FITS files.')\n", (3888, 3923), False, 'import argparse\n'), ((4494, 4522), 'os.path.isdir', 'os.path.isdir', (['args.fits_dir'], {}), '(args.fits_dir)\n', (4507, 4522), False, 'import os\n'), ((2812, 2824), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (2821, 2824), False, 'import os\n'), ((4784, 4795), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4792, 4795), False, 'import sys\n'), ((3038, 3062), 'os.path.dirname', 'os.path.dirname', (['png_dir'], {}), '(png_dir)\n', (3053, 3062), False, 'import os\n'), ((3359, 3376), 'cv2.imread', 'cv2.imread', (['image'], {}), '(image)\n', (3369, 3376), False, 'import cv2\n')] |
#!/usr/bin/env python
__author__ = '<NAME>'
__date__ = '2020-03-13'
__version__ = '0.0.1'
import argparse
import os
import csv
import random
import numpy as np
import pandas as pd
import scrublet as scr
import skimage.filters as skif
import scanpy as sc
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import seaborn as sns
import plotnine as plt9
# from statannot import add_stat_annotation
# Set seed for reproducibility
seed_value = 0
# 0. Set `PYTHONHASHSEED` environment variable at a fixed value
# os.environ['PYTHONHASHSEED']=str(seed_value)
# 1. Set `python` built-in pseudo-random generator at a fixed value
random.seed(seed_value)
# 2. Set `numpy` pseudo-random generator at a fixed value
np.random.seed(seed_value)
sns.set(style='whitegrid')
# Set the default dpi
plt9.options.dpi = 100
def custom_cmap(rgb_list):
"""Make a custom cmap."""
rgb_list = np.array(rgb_list)
cmap = plt.cm.Reds
cmap = cmap.from_list(rgb_list.shape[0], rgb_list)
return cmap
def comma_labels(x_list):
"""Change list of int to comma format."""
result = []
for x in x_list:
result.append(format(int(x), ','))
return(result)
def plot_scrub_hist(
scrub,
threshold,
output_file='scrublet_histogram',
scale_y_log10=False,
density=False,
zscores=False
):
"""Plot better histogram of multiplets."""
# Make a pandas dataframe of the data
if not zscores:
df_tmp1 = pd.DataFrame(
data=scrub.doublet_scores_obs_,
columns=['scores']
)
df_tmp2 = pd.DataFrame(
data=scrub.doublet_scores_sim_,
columns=['scores']
)
x_axis_label = 'Multiplet score'
else:
# Calculate z scores as in the scrublet source code (see call_doublets
# function).
df_tmp1 = pd.DataFrame(
data=scrub.z_scores_,
columns=['scores']
)
zscore_sim = (
scrub.doublet_scores_sim_ - threshold
) / scrub.doublet_errors_sim_
df_tmp2 = pd.DataFrame(
data=zscore_sim,
columns=['scores']
)
x_axis_label = 'Multiplet zscore'
df_tmp1['type'] = 'Observed'
df_tmp2['type'] = 'Simulated'
df_plt = pd.concat([df_tmp1, df_tmp2])
# Plot the data usig plotnine
gplt = plt9.ggplot(df_plt, plt9.aes(
x='scores'
))
gplt = gplt + plt9.theme_bw()
if density:
gplt = gplt + plt9.geom_density(alpha=0.8)
else:
gplt = gplt + plt9.geom_histogram(alpha=0.8)
if not zscores:
gplt = gplt + plt9.geom_vline(xintercept=threshold, linetype='solid')
if scale_y_log10:
gplt = gplt + plt9.scale_y_continuous(
trans='log10',
# labels=comma_labels,
minor_breaks=0
)
gplt = gplt + plt9.labs(
x=x_axis_label,
# y='Counts',
title=''
)
gplt = gplt + plt9.facet_wrap(
'~ {}'.format('type'),
scales='free_y'
)
# Sort out whitespace issue in plotnine:
# https://github.com/has2k1/plotnine/issues/106
gplt = gplt + plt9.theme(subplots_adjust={'wspace': 0.35})
gplt.save(
'{}.png'.format(output_file),
# dpi=300,
width=4,
height=2,
limitsize=False
)
def run_scrublet(
adata,
out_file_base='scrublet',
expected_multiplet_rate=0.1,
n_simulated_multiplet=100000,
verbose=True
):
"""Run scrublet.
Parameters
----------
adata : AnnData
Input AnnData object. Assume adata.X is counts.
expected_multiplet_rate : integer
Expected multiplet rate.
out_file_base : string
Tag for output file.
n_simulated_multiplet : integer
Number of multiplets to simulate.
verbose : boolean
Write extra info to standard out.
Returns
-------
NULL
"""
# Set color pallette for UMAP.
zissou_palette_hex = ['#3B9AB2', '#EBCC2A', '#F21A00']
zissou_palette_rgb = [colors.to_rgba(i) for i in zissou_palette_hex]
# If no expected multiplet rate, then estimate the multiplet rate using
# the coefficients from a lm predicting multiplet rate from recovered cells
# in data distributed by 10x for their 3.1 chemistry.
# https://support.10xgenomics.com/single-cell-gene-expression/library-prep/doc/user-guide-chromium-single-cell-3-reagent-kits-user-guide-v31-chemistry
cells_recovered = len(adata)
if expected_multiplet_rate == 0.0:
multiplet_rate = 0.0007589 * cells_recovered + 0.0527214
multiplet_rate = multiplet_rate / 100.0
else:
multiplet_rate = expected_multiplet_rate
# expected_n_multiplets = multiplet_rate * cells_recovered
if verbose:
print('cells_input_data:\t{}'.format(cells_recovered))
print('multiplet_rate:\t{}'.format(multiplet_rate))
# The authors note that the method is not that sensitive to this parameter
# https://github.com/AllonKleinLab/scrublet/blob/master/examples/scrublet_basics.ipynb
# From Chromium Single Cell 3' Reagent Kits User Guide (v2 Chemistry):
# https://support.10xgenomics.com/permalink/3vzDu3zQjY0o2AqkkkI4CC
# the expected multiplet rate is 0.06.
# From Chromium Single Cell 3' Reagent Kits User Guide (v3.1 Chemistry):
# https://support.10xgenomics.com/single-cell-gene-expression/library-prep/doc/user-guide-chromium-single-cell-3-reagent-kits-user-guide-v31-chemistry
# the expected multiplet rate is ~3.9% for ~8000 input cells.
scrub = scr.Scrublet(
counts_matrix=adata.X,
sim_doublet_ratio=n_simulated_multiplet/len(adata), # Default is 2.0
expected_doublet_rate=multiplet_rate, # Default is 0.1
random_state=0
)
# Run the scrublet pipeline with default parameters.
# This function performs:
# * Multiplet simulation
# * Normalization, gene filtering, rescaling, PCA
# * Multiplet score calculation
# * Multiplet score threshold detection and multiplet calling
multiplet_scores, predicted_multiplets = scrub.scrub_doublets(
verbose=False
)
# Calculate the threshold for calling multiplets
# The default method for scrublet is `threshold_minimum`, but in our
# hands it looks like `threshold_otsu` looks better.
# threshold = skif.threshold_minimum(scrub.doublet_scores_sim_)
threshold = skif.threshold_li(
scrub.doublet_scores_sim_,
initial_guess=skif.threshold_otsu(scrub.doublet_scores_sim_)
)
# print('threshold_isodata:\t{}'.format(
# skif.threshold_isodata(scrub.doublet_scores_sim_)
# ))
# print('threshold_li:\t{}'.format(
# skif.threshold_li(
# scrub.doublet_scores_sim_,
# initial_guess=skif.threshold_otsu(scrub.doublet_scores_sim_)
# )
# ))
# print('threshold_minimum:\t{}'.format(
# skif.threshold_minimum(scrub.doublet_scores_sim_)
# ))
# print('threshold_triangle:\t{}'.format(
# skif.threshold_triangle(scrub.doublet_scores_sim_)
# ))
# print('threshold_yen:\t{}'.format(
# skif.threshold_yen(scrub.doublet_scores_sim_)
# ))
# print('threshold_otsu:\t{}'.format(
# skif.threshold_otsu(scrub.doublet_scores_sim_)
# ))
# print('threshold_used:\t{}'.format(threshold))
# Call multiplets using the otsu threshold
predicted_multiplets = scrub.call_doublets(
threshold=threshold,
verbose=verbose
)
print(
'If automatic threshold is poor, adjust threshold with',
'scrub.call_doublets(threshold=<my_custom_threshold>)'
)
adata.obs['scrublet__predicted_multiplet'] = scrub.predicted_doublets_
adata.obs['scrublet__multiplet_scores'] = scrub.doublet_scores_obs_
adata.obs['scrublet__multiplet_zscores'] = scrub.z_scores_
# Get the number of multiplets one would expect if the 10x prediction were
# spot on. Taken from
# https://github.com/vib-singlecell-nf/scrublet/blob/master/bin/sc_doublet_detection.py
#
# Estimate the multiplet rate using coefficients from a lm predicting
# multiplet rate from recovered cells in data distributed by 10x for
# their 3.1 chemistry.
# https://support.10xgenomics.com/single-cell-gene-expression/library-prep/doc/user-guide-chromium-single-cell-3-reagent-kits-user-guide-v31-chemistry
# cells_recovered = len(adata)
# multiplet_rate = 0.0007589 * cells_recovered + 0.0527214
# multiplet_rate = multiplet_rate / 100.0
# multiplet_cells = adata.obs['scrublet__multiplet_scores'].sort_values(
# ascending=False
# ).head(
# n=expected_n_doublets
# ).index
# adata.obs['scrublet__predicted_multiplet_based_10x_rate'] = False
# adata.obs.loc[
# multiplet_cells,
# 'scrublet__predicted_multiplet_based_10x_rate'
# ] = True
# Save the results.
cols_save = [
'scrublet__multiplet_scores',
'scrublet__predicted_multiplet',
'scrublet__multiplet_zscores'
]
adata.obs[cols_save].to_csv(
'{}-scrublet.tsv.gz'.format(out_file_base),
sep='\t',
index=True,
quoting=csv.QUOTE_NONNUMERIC,
index_label='cell_barcode',
na_rep='',
compression='gzip'
)
# Plot a histogram of multiplets.
plot_scrub_hist(
scrub=scrub,
threshold=threshold,
scale_y_log10=False,
output_file='{}-histogram_multiplet_scores'.format(out_file_base)
)
plot_scrub_hist(
scrub=scrub,
threshold=threshold,
scale_y_log10=True,
output_file='{}-histogram_multiplet_scores_log'.format(out_file_base)
)
plot_scrub_hist(
scrub=scrub,
threshold=threshold,
zscores=True,
output_file='{}-histogram_multiplet_zscores'.format(out_file_base)
)
# plot_scrub_hist(
# scrub=scrub,
# threshold=threshold,
# density=True,
# scale_y_log10=False,
# output_file='{}-density_multiplet_scores'.format(out_file_base)
# )
# fig, ax = scrub.plot_histogram(
# scale_hist_obs='linear',
# scale_hist_sim='linear'
# )
# fig.savefig(
# '{}-histogram_multiplet_scores_v2.pdf'.format(out_file_base),
# # dpi=300,
# bbox_inches='tight'
# )
# plt.close(fig)
# Plot the average number of UMIs in multiplets vs singlets.
if 'total_counts' not in adata.obs.columns:
sc.pp.calculate_qc_metrics(adata, inplace=True)
fig, ax = plt.subplots(figsize=(3, 3.5))
ax = sns.boxplot(
data=adata.obs,
x='scrublet__predicted_multiplet',
y='total_counts'
# hue='scrublet__predicted_multiplet'
)
ax.set_yscale('log', basey=10)
ax.set(xlabel='Predicted multiplet', ylabel='Number of molecules')
# NOTE: I could not get p-value annotation to work.
# ax, test_results = add_stat_annotation(
# ax,
# data=adata.obs,
# x='scrublet__predicted_multiplet',
# y='total_counts',
# #hue='scrublet__predicted_multiplet',
# box_pairs=[('True', 'False')],
# test='Mann-Whitney',
# text_format='full',
# loc='inside'
# )
# plt.legend(loc='upper left', bbox_to_anchor=(1.03, 1))
fig.savefig(
'{}-boxplot_total_umi_counts.png'.format(out_file_base),
# dpi=300,
bbox_inches='tight'
)
plt.close(fig) # Close the figure.
# NOTE: Removed UMAP embedding on 30/06/2020 because it would not
# work with singularity.
# Plot UMAP embedding.
# scrub.set_embedding('UMAP', scr.get_umap(scrub.manifold_obs_))
# fig, ax = scrub.plot_embedding(
# 'UMAP',
# marker_size=1.5,
# color_map=custom_cmap(zissou_palette_rgb),
# order_points=True
# )
# fig.savefig(
# '{}-umap_multiplet_scores.png'.format(out_file_base),
# # dpi=300,
# bbox_inches='tight'
# )
# plt.close(fig)
#
# fig, ax = scrub.plot_embedding(
# 'UMAP',
# score='zscore',
# marker_size=1.5,
# color_map=custom_cmap(zissou_palette_rgb),
# order_points=True
# )
# fig.savefig(
# '{}-umap_multiplet_zscores.png'.format(out_file_base),
# # dpi=300,
# bbox_inches='tight'
# )
# plt.close(fig)
# Plot tSNE embedding.
# scrub.set_embedding('tSNE', scr.get_tsne(scrub.manifold_obs_, angle=0.9))
def main():
"""Run CLI."""
parser = argparse.ArgumentParser(
description="""
Read 10x data or AnnData object and annotates multiplets.
"""
)
parser.add_argument(
'-v', '--version',
action='version',
version='%(prog)s {version}'.format(version=__version__)
)
parser.add_argument(
'-txd', '--tenxdata_dir',
action='store',
dest='txd',
default='',
required=False,
help='Path to directory with data in 10x matrix format.'
)
parser.add_argument(
'-txh5', '--tenxdata_h5',
action='store',
dest='txh5',
default='',
required=False,
help='10x h5 format file.'
)
parser.add_argument(
'-h5', '--h5_anndata',
action='store',
dest='h5',
default='',
required=False,
help='H5 AnnData file.'
)
parser.add_argument(
'-emr', '--expected_multiplet_rate',
action='store',
dest='emr',
default=0.1,
type=float,
help='Expected multiplet rate. If 0.0 then automatically predict\
multiplet rate based on the number of recovered cells using\
published multiplet rates from 10x v3.1 chemistry. In tests, the\
scrublet default of 0.1 was more aggressive in calling multiplets\
than the automatic prediction and was able to capture a greater\
fraction of simulated multiplets (with the automatically\
calculated multiplet threshold). Note that the final multiplet\
scores are calculated with z-scores such that the\
expected_multiplet_rate does not have a huge effect on the final\
calls.\
(default: %(default)s)'
)
parser.add_argument(
'-nsm', '--n_simulated_multiplet',
action='store',
dest='nsm',
default=100000,
type=int,
help='Number of simulated multiplets. A larger number provides more\
accurate multiplet calls at the cost of execution time.\
(default: %(default)s)'
)
parser.add_argument(
'-of', '--output_file',
action='store',
dest='of',
default='scrublet',
help='Basename of output files, assuming output in current working \
directory.\
(default: %(default)s)'
)
options = parser.parse_args()
# Fixed settings.
verbose = True
# Scanpy settings
sc.settings.figdir = os.getcwd() # figure output directory to match base.
# sc.settings.n_jobs = options.ncpu # number CPUs
# sc.settings.max_memory = 500 # in Gb
# sc.set_figure_params(dpi_save = 300)
# Load the data.
if options.txd != '':
adata = sc.read_10x_mtx(
path=options.txd,
# var_names='gene_symbols',
var_names='gene_ids',
make_unique=False
)
elif options.txh5 != '':
adata = sc.read_10x_h5(path=options.txd)
adata.var['gene_symbols'] = adata.var.index
adata.var.index = adata.var['gene_ids'].values
del adata.var['gene_ids']
elif options.h5 != '':
adata = sc.read_h5ad(filename=options.h5)
print(
'Scrublet uses counts. Assuming adata.X are counts'
)
else:
raise Exception(
'Error invalid input.'
)
run_scrublet(
adata=adata,
out_file_base=options.of,
expected_multiplet_rate=options.emr,
n_simulated_multiplet=options.nsm,
verbose=verbose
)
if __name__ == '__main__':
main()
| [
"numpy.random.seed",
"argparse.ArgumentParser",
"scanpy.read_10x_mtx",
"plotnine.labs",
"scanpy.read_10x_h5",
"plotnine.aes",
"pandas.DataFrame",
"matplotlib.colors.to_rgba",
"plotnine.geom_histogram",
"skimage.filters.threshold_otsu",
"matplotlib.pyplot.close",
"plotnine.geom_vline",
"plotn... | [((640, 663), 'random.seed', 'random.seed', (['seed_value'], {}), '(seed_value)\n', (651, 663), False, 'import random\n'), ((722, 748), 'numpy.random.seed', 'np.random.seed', (['seed_value'], {}), '(seed_value)\n', (736, 748), True, 'import numpy as np\n'), ((750, 776), 'seaborn.set', 'sns.set', ([], {'style': '"""whitegrid"""'}), "(style='whitegrid')\n", (757, 776), True, 'import seaborn as sns\n'), ((897, 915), 'numpy.array', 'np.array', (['rgb_list'], {}), '(rgb_list)\n', (905, 915), True, 'import numpy as np\n'), ((2273, 2302), 'pandas.concat', 'pd.concat', (['[df_tmp1, df_tmp2]'], {}), '([df_tmp1, df_tmp2])\n', (2282, 2302), True, 'import pandas as pd\n'), ((10590, 10620), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(3, 3.5)'}), '(figsize=(3, 3.5))\n', (10602, 10620), True, 'import matplotlib.pyplot as plt\n'), ((10630, 10715), 'seaborn.boxplot', 'sns.boxplot', ([], {'data': 'adata.obs', 'x': '"""scrublet__predicted_multiplet"""', 'y': '"""total_counts"""'}), "(data=adata.obs, x='scrublet__predicted_multiplet', y='total_counts'\n )\n", (10641, 10715), True, 'import seaborn as sns\n'), ((11489, 11503), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (11498, 11503), True, 'import matplotlib.pyplot as plt\n'), ((12578, 12714), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""\n Read 10x data or AnnData object and annotates multiplets.\n """'}), '(description=\n """\n Read 10x data or AnnData object and annotates multiplets.\n """\n )\n', (12601, 12714), False, 'import argparse\n'), ((15086, 15097), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (15095, 15097), False, 'import os\n'), ((1462, 1526), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'scrub.doublet_scores_obs_', 'columns': "['scores']"}), "(data=scrub.doublet_scores_obs_, columns=['scores'])\n", (1474, 1526), True, 'import pandas as pd\n'), ((1579, 1643), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'scrub.doublet_scores_sim_', 'columns': "['scores']"}), "(data=scrub.doublet_scores_sim_, columns=['scores'])\n", (1591, 1643), True, 'import pandas as pd\n'), ((1847, 1901), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'scrub.z_scores_', 'columns': "['scores']"}), "(data=scrub.z_scores_, columns=['scores'])\n", (1859, 1901), True, 'import pandas as pd\n'), ((2065, 2114), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'zscore_sim', 'columns': "['scores']"}), "(data=zscore_sim, columns=['scores'])\n", (2077, 2114), True, 'import pandas as pd\n'), ((2369, 2389), 'plotnine.aes', 'plt9.aes', ([], {'x': '"""scores"""'}), "(x='scores')\n", (2377, 2389), True, 'import plotnine as plt9\n'), ((2423, 2438), 'plotnine.theme_bw', 'plt9.theme_bw', ([], {}), '()\n', (2436, 2438), True, 'import plotnine as plt9\n'), ((2853, 2888), 'plotnine.labs', 'plt9.labs', ([], {'x': 'x_axis_label', 'title': '""""""'}), "(x=x_axis_label, title='')\n", (2862, 2888), True, 'import plotnine as plt9\n'), ((3145, 3189), 'plotnine.theme', 'plt9.theme', ([], {'subplots_adjust': "{'wspace': 0.35}"}), "(subplots_adjust={'wspace': 0.35})\n", (3155, 3189), True, 'import plotnine as plt9\n'), ((4035, 4052), 'matplotlib.colors.to_rgba', 'colors.to_rgba', (['i'], {}), '(i)\n', (4049, 4052), True, 'import matplotlib.colors as colors\n'), ((10528, 10575), 'scanpy.pp.calculate_qc_metrics', 'sc.pp.calculate_qc_metrics', (['adata'], {'inplace': '(True)'}), '(adata, inplace=True)\n', (10554, 10575), True, 'import scanpy as sc\n'), ((15346, 15420), 'scanpy.read_10x_mtx', 'sc.read_10x_mtx', ([], {'path': 'options.txd', 'var_names': '"""gene_ids"""', 'make_unique': '(False)'}), "(path=options.txd, var_names='gene_ids', make_unique=False)\n", (15361, 15420), True, 'import scanpy as sc\n'), ((2477, 2505), 'plotnine.geom_density', 'plt9.geom_density', ([], {'alpha': '(0.8)'}), '(alpha=0.8)\n', (2494, 2505), True, 'import plotnine as plt9\n'), ((2538, 2568), 'plotnine.geom_histogram', 'plt9.geom_histogram', ([], {'alpha': '(0.8)'}), '(alpha=0.8)\n', (2557, 2568), True, 'import plotnine as plt9\n'), ((2611, 2666), 'plotnine.geom_vline', 'plt9.geom_vline', ([], {'xintercept': 'threshold', 'linetype': '"""solid"""'}), "(xintercept=threshold, linetype='solid')\n", (2626, 2666), True, 'import plotnine as plt9\n'), ((2711, 2765), 'plotnine.scale_y_continuous', 'plt9.scale_y_continuous', ([], {'trans': '"""log10"""', 'minor_breaks': '(0)'}), "(trans='log10', minor_breaks=0)\n", (2734, 2765), True, 'import plotnine as plt9\n'), ((6504, 6550), 'skimage.filters.threshold_otsu', 'skif.threshold_otsu', (['scrub.doublet_scores_sim_'], {}), '(scrub.doublet_scores_sim_)\n', (6523, 6550), True, 'import skimage.filters as skif\n'), ((15552, 15584), 'scanpy.read_10x_h5', 'sc.read_10x_h5', ([], {'path': 'options.txd'}), '(path=options.txd)\n', (15566, 15584), True, 'import scanpy as sc\n'), ((15769, 15802), 'scanpy.read_h5ad', 'sc.read_h5ad', ([], {'filename': 'options.h5'}), '(filename=options.h5)\n', (15781, 15802), True, 'import scanpy as sc\n')] |
# 091
# Create an array which contains five numbers (two of which
# should be repeated). Display the whole array to the user.
# Ask the user to enter one of the numbers from the array
# and then display a message saying how many times that
# number appears in the list.
import array as ar
import random
import numpy as np
from typing import List
def get_num_int(prompt: str) -> int:
"""Function to check if users input is an integer"""
while True:
try:
number = int(input(prompt))
return number
except Exception as e:
print(e)
def return_index(tp, element) -> List[int]:
"""Returns all the indexes of an element"""
indexes = []
[indexes.append(index) for index, value in enumerate(tp)
if value == element]
return indexes
if __name__ == '__main__':
# Using built in array module
numar = ar.array('i')
for i in range(5):
if i < 4:
numar.append(random.randint(1, 100))
elif i == 4:
numar.append(numar[random.randint(0, 4)])
print(numar)
while True:
user_input = get_num_int('Enter a number from the above array: ')
if user_input in numar:
print(f'Number at indexes - '
f'{return_index(numar, user_input)}')
break
# Using numpy modules
numnp = np.array([], dtype=np.int32)
for i in range(5):
if i < 4:
numnp = np.append(numnp, random.randint(1, 100))
elif i == 4:
numnp = np.append(numnp, numnp[random.randint(0, 4)])
print(numnp)
while True:
user_input = get_num_int(
'Enter a number from the above array: ')
if user_input in numnp:
print(f'Number at indexes - '
f'{return_index(numnp, user_input)}')
break
| [
"random.randint",
"numpy.array",
"array.array"
] | [((912, 925), 'array.array', 'ar.array', (['"""i"""'], {}), "('i')\n", (920, 925), True, 'import array as ar\n'), ((1412, 1440), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int32'}), '([], dtype=np.int32)\n', (1420, 1440), True, 'import numpy as np\n'), ((992, 1014), 'random.randint', 'random.randint', (['(1)', '(100)'], {}), '(1, 100)\n', (1006, 1014), False, 'import random\n'), ((1520, 1542), 'random.randint', 'random.randint', (['(1)', '(100)'], {}), '(1, 100)\n', (1534, 1542), False, 'import random\n'), ((1068, 1088), 'random.randint', 'random.randint', (['(0)', '(4)'], {}), '(0, 4)\n', (1082, 1088), False, 'import random\n'), ((1608, 1628), 'random.randint', 'random.randint', (['(0)', '(4)'], {}), '(0, 4)\n', (1622, 1628), False, 'import random\n')] |
import numpy as np
np.random.seed(777)
import chainer
from chainer import cuda
from chainer import optimizers
from chainer import serializers
from tqdm import tqdm
import argparse
import csv
import sys
import os
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group()
group.add_argument('--original', action='store_true',
default=True, help='train on original MNIST')
group.add_argument('--translated', action='store_true',
default=False, help='train on translated MNIST')
group.add_argument('--cluttered', action='store_true',
default=False, help='train on translated & cluttered MNIST')
parser.add_argument('--lstm', type=bool, default=False,
help='use LSTM units in core layer')
parser.add_argument('-m', '--model', type=str, default=None,
help='load model weights from given file')
parser.add_argument('-r', '--resume', type=str, default=None,
help='resume training with chainer optimizer file')
parser.add_argument('-g', '--gpuid', type=int, default=-1,
help='GPU device ID (default is CPU)')
parser.add_argument('-b', '--batchsize', type=int, default=100,
help='training batch size')
parser.add_argument('-e', '--epoch', type=int, default=1000,
help='iterate training given epoch times')
parser.add_argument('-f', '--filename', type=str, default=None,
help='prefix of output filenames')
args = parser.parse_args()
# load mnist dataset
train, test = chainer.datasets.get_mnist()
train_data, train_targets = np.array(train).transpose()
test_data, test_targets = np.array(test).transpose()
train_data = np.array(list(train_data)).reshape(train_data.shape[0],1,28,28)
test_data = np.array(list(test_data)).reshape(test_data.shape[0],1,28,28)
train_data.flags.writeable = False
train_targets = np.array(train_targets).astype(np.int32)
test_targets = np.array(test_targets).astype(np.int32)
# hyper-params for each task
if args.original:
tasktype = 'original'
# RAM params for original MNIST
g_size = 8
n_steps = 6
n_scales = 1
variance = 0.03
def process(batch):
return batch
if args.translated:
tasktype = 'translated'
g_size = 12
n_steps = 6
n_scales = 3
variance = 0.06
# create translated MNIST
def translate(batch):
n, c, w_i = batch.shape[:3]
w_o = 60
data = np.zeros(shape=(n,c,w_o,w_o), dtype=np.float32)
for k in range(n):
i, j = np.random.randint(0, w_o-w_i, size=2)
data[k, :, i:i+w_i, j:j+w_i] += batch[k]
return data
process = translate
if args.cluttered:
tasktype = 'cluttered'
g_size = 12
n_steps = 6
n_scales = 3
variance = 0.09
# create cluttered MNIST
def clutter(batch):
n, c, w_i = batch.shape[:3]
w_o = 60
data = np.zeros(shape=(n,c,w_o,w_o), dtype=np.float32)
for k in range(n):
i, j = np.random.randint(0, w_o-w_i, size=2)
data[k, :, i:i+w_i, j:j+w_i] += batch[k]
for _ in range(4):
clt = train_data[np.random.randint(0, train_data.shape[0]-1)]
c1, c2 = np.random.randint(0, w_i-8, size=2)
i1, i2 = np.random.randint(0, w_o-8, size=2)
data[k, :, i1:i1+8, i2:i2+8] += clt[:, c1:c1+8, c2:c2+8]
data = np.clip(data, 0., 1.)
return data
process = clutter
# init RAM model and set optimizer
from ram import RAM
model = RAM(g_size=g_size, n_steps=n_steps, n_scales=n_scales,
var=variance, use_lstm=args.lstm)
if not args.lstm:
data = model.core_hh.W.data
data[:] = np.identity(data.shape[0], dtype=np.float32)
lr_base = 1e-2
optimizer = optimizers.NesterovAG(lr=lr_base)
optimizer.use_cleargrads()
optimizer.setup(model)
if args.model is not None:
print('load model from {}'.format(args.model))
serializers.load_hdf5(args.model, model)
if args.resume is not None:
print('load optimizer state from {}'.format(args.resume))
serializers.load_hdf5(args.resume, optimizer)
# GPU/CPU
gpuid = args.gpuid
if gpuid >= 0:
cuda.get_device(gpuid).use()
model.to_gpu()
# logging
if args.filename is not None:
filename = args.filename
else:
import datetime
filename = datetime.datetime.now().strftime('%y%m%d%H%M%S')
with open(filename+'_setting.log', 'a') as f:
f.write(
'task: '+tasktype+' MNIST\n'+
'glimpse size: '+str(g_size)+'\n'+
'number of gimpse scales: '+str(n_scales)+'\n'+
'number of time steps: '+str(n_steps)+'\n'+
'variance of location policy: '+str(variance)+'\n'+
'use LSTMs as core units: '+str(args.lstm)+'\n'+
'training batch size: '+str(args.batchsize)
)
log = open(filename+'_loss.log', 'a')
writer = csv.writer(log, lineterminator='\n')
# get test scores
def test(x, t):
batchsize = 1000
sum_accuracy = sum_loss = 0
with tqdm(total=len(t)) as pbar:
pbar.set_description('test')
for i in range(0, len(t), batchsize):
pbar.update(batchsize)
model(x[i:i+batchsize], t[i:i+batchsize], train=False)
sum_loss += float(model.loss.data)
sum_accuracy += float(model.accuracy.data)
sys.stderr.flush()
return sum_loss*batchsize / len(t), sum_accuracy*batchsize / len(t)
test_data = process(test_data) # generate test data before training
test_data.flags.writeable = False
loss, acc = test(test_data, test_targets)
writer.writerow(('iteration', 'learning rate', 'loss', 'accuracy'))
writer.writerow((optimizer.epoch, lr_base, loss, acc))
log.flush()
sys.stdout.write('test: loss={0:.6f}, accuracy={1:.6f}\n'.format(loss, acc))
sys.stdout.flush()
# optimize weights
batchsize = args.batchsize
n_data = len(train_targets)
for epoch in range(optimizer.epoch+1, args.epoch+1):
optimizer.new_epoch()
sys.stdout.write('(epoch: {})\n'.format(epoch))
sys.stdout.flush()
# drop learning rate on loss plateau
if epoch > 400: optimizer.lr = lr_base * 0.1
if epoch > 800: optimizer.lr = lr_base * 0.01
print('learning rate: {:.3e}'.format(optimizer.lr))
perm = np.random.permutation(n_data)
with tqdm(total=n_data) as pbar:
for i in range(0, n_data, batchsize):
# generate train data on the fly
x = process(train_data[perm[i:i+batchsize]])
t = train_targets[perm[i:i+batchsize]]
optimizer.update(model, x, t)
pbar.set_description(
('train: loss={0:.1e}, b={1:.1e}, r={2:+.1e}').format(
float(model.loss_action.data),
float(model.loss_base.data), float(model.loss_reinforce.data)
)
)
pbar.update(batchsize)
sys.stderr.flush()
# evaluate
loss, acc = test(test_data, test_targets)
writer.writerow((epoch, optimizer.lr, loss, acc))
log.flush()
sys.stdout.write('test: loss={0:.6f}, accuracy={1:.6f}\n'.format(loss, acc))
sys.stdout.flush()
# save model params and optimizer's state
if epoch % 100 == 0:
model_filename = filename+'_epoch{0:d}'.format(epoch)
serializers.save_hdf5(model_filename+'.chainermodel', model)
serializers.save_hdf5(model_filename+'.chaineroptimizer', optimizer)
log.close()
| [
"numpy.random.seed",
"argparse.ArgumentParser",
"numpy.clip",
"numpy.random.randint",
"sys.stdout.flush",
"chainer.optimizers.NesterovAG",
"chainer.serializers.save_hdf5",
"numpy.identity",
"sys.stderr.flush",
"datetime.datetime.now",
"chainer.datasets.get_mnist",
"tqdm.tqdm",
"csv.writer",
... | [((19, 38), 'numpy.random.seed', 'np.random.seed', (['(777)'], {}), '(777)\n', (33, 38), True, 'import numpy as np\n'), ((224, 249), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (247, 249), False, 'import argparse\n'), ((1585, 1613), 'chainer.datasets.get_mnist', 'chainer.datasets.get_mnist', ([], {}), '()\n', (1611, 1613), False, 'import chainer\n'), ((3591, 3683), 'ram.RAM', 'RAM', ([], {'g_size': 'g_size', 'n_steps': 'n_steps', 'n_scales': 'n_scales', 'var': 'variance', 'use_lstm': 'args.lstm'}), '(g_size=g_size, n_steps=n_steps, n_scales=n_scales, var=variance,\n use_lstm=args.lstm)\n', (3594, 3683), False, 'from ram import RAM\n'), ((3830, 3863), 'chainer.optimizers.NesterovAG', 'optimizers.NesterovAG', ([], {'lr': 'lr_base'}), '(lr=lr_base)\n', (3851, 3863), False, 'from chainer import optimizers\n'), ((4910, 4946), 'csv.writer', 'csv.writer', (['log'], {'lineterminator': '"""\n"""'}), "(log, lineterminator='\\n')\n", (4920, 4946), False, 'import csv\n'), ((5812, 5830), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (5828, 5830), False, 'import sys\n'), ((3757, 3801), 'numpy.identity', 'np.identity', (['data.shape[0]'], {'dtype': 'np.float32'}), '(data.shape[0], dtype=np.float32)\n', (3768, 3801), True, 'import numpy as np\n'), ((3997, 4037), 'chainer.serializers.load_hdf5', 'serializers.load_hdf5', (['args.model', 'model'], {}), '(args.model, model)\n', (4018, 4037), False, 'from chainer import serializers\n'), ((4133, 4178), 'chainer.serializers.load_hdf5', 'serializers.load_hdf5', (['args.resume', 'optimizer'], {}), '(args.resume, optimizer)\n', (4154, 4178), False, 'from chainer import serializers\n'), ((5364, 5382), 'sys.stderr.flush', 'sys.stderr.flush', ([], {}), '()\n', (5380, 5382), False, 'import sys\n'), ((6043, 6061), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6059, 6061), False, 'import sys\n'), ((6271, 6300), 'numpy.random.permutation', 'np.random.permutation', (['n_data'], {}), '(n_data)\n', (6292, 6300), True, 'import numpy as np\n'), ((6888, 6906), 'sys.stderr.flush', 'sys.stderr.flush', ([], {}), '()\n', (6904, 6906), False, 'import sys\n'), ((7124, 7142), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (7140, 7142), False, 'import sys\n'), ((1642, 1657), 'numpy.array', 'np.array', (['train'], {}), '(train)\n', (1650, 1657), True, 'import numpy as np\n'), ((1696, 1710), 'numpy.array', 'np.array', (['test'], {}), '(test)\n', (1704, 1710), True, 'import numpy as np\n'), ((1925, 1948), 'numpy.array', 'np.array', (['train_targets'], {}), '(train_targets)\n', (1933, 1948), True, 'import numpy as np\n'), ((1981, 2003), 'numpy.array', 'np.array', (['test_targets'], {}), '(test_targets)\n', (1989, 2003), True, 'import numpy as np\n'), ((2489, 2539), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n, c, w_o, w_o)', 'dtype': 'np.float32'}), '(shape=(n, c, w_o, w_o), dtype=np.float32)\n', (2497, 2539), True, 'import numpy as np\n'), ((2957, 3007), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n, c, w_o, w_o)', 'dtype': 'np.float32'}), '(shape=(n, c, w_o, w_o), dtype=np.float32)\n', (2965, 3007), True, 'import numpy as np\n'), ((3461, 3484), 'numpy.clip', 'np.clip', (['data', '(0.0)', '(1.0)'], {}), '(data, 0.0, 1.0)\n', (3468, 3484), True, 'import numpy as np\n'), ((6310, 6328), 'tqdm.tqdm', 'tqdm', ([], {'total': 'n_data'}), '(total=n_data)\n', (6314, 6328), False, 'from tqdm import tqdm\n'), ((7285, 7347), 'chainer.serializers.save_hdf5', 'serializers.save_hdf5', (["(model_filename + '.chainermodel')", 'model'], {}), "(model_filename + '.chainermodel', model)\n", (7306, 7347), False, 'from chainer import serializers\n'), ((7354, 7424), 'chainer.serializers.save_hdf5', 'serializers.save_hdf5', (["(model_filename + '.chaineroptimizer')", 'optimizer'], {}), "(model_filename + '.chaineroptimizer', optimizer)\n", (7375, 7424), False, 'from chainer import serializers\n'), ((2583, 2622), 'numpy.random.randint', 'np.random.randint', (['(0)', '(w_o - w_i)'], {'size': '(2)'}), '(0, w_o - w_i, size=2)\n', (2600, 2622), True, 'import numpy as np\n'), ((3051, 3090), 'numpy.random.randint', 'np.random.randint', (['(0)', '(w_o - w_i)'], {'size': '(2)'}), '(0, w_o - w_i, size=2)\n', (3068, 3090), True, 'import numpy as np\n'), ((4229, 4251), 'chainer.cuda.get_device', 'cuda.get_device', (['gpuid'], {}), '(gpuid)\n', (4244, 4251), False, 'from chainer import cuda\n'), ((4389, 4412), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4410, 4412), False, 'import datetime\n'), ((3276, 3313), 'numpy.random.randint', 'np.random.randint', (['(0)', '(w_i - 8)'], {'size': '(2)'}), '(0, w_i - 8, size=2)\n', (3293, 3313), True, 'import numpy as np\n'), ((3337, 3374), 'numpy.random.randint', 'np.random.randint', (['(0)', '(w_o - 8)'], {'size': '(2)'}), '(0, w_o - 8, size=2)\n', (3354, 3374), True, 'import numpy as np\n'), ((3206, 3251), 'numpy.random.randint', 'np.random.randint', (['(0)', '(train_data.shape[0] - 1)'], {}), '(0, train_data.shape[0] - 1)\n', (3223, 3251), True, 'import numpy as np\n')] |
'''
Train PredNet on KITTI sequences. (Geiger et al. 2013, http://www.cvlibs.net/datasets/kitti/)
'''
import os
import numpy as np
np.random.seed(123)
from six.moves import cPickle
from keras import backend as K
from keras.models import Model
from keras.layers import Input, Dense, Flatten
from keras.layers import LSTM
from keras.layers import TimeDistributed
from keras.callbacks import LearningRateScheduler, ModelCheckpoint
from keras.optimizers import Adam
from prednet import PredNet
from data_utils import SequenceGenerator
from kitti_settings import *
'''
深度学习中的几个重要超参数:
batchsize:每批数据量的大小。DL通常用SGD的优化算法进行训练,也就是一次(1 个iteration)一起训练batchsize个样本,计算它们的平均损失函数值,来更新参数。
iteration:1个iteration即迭代一次,也就是用batchsize个样本训练一次。
epoch:1个epoch指用训练集中的全部样本训练一次,此时相当于batchsize 等于训练集的样本数。
'''
'''
#把模型的权重值保存起来
'''
save_model = True # if weights will be saved
weights_file = os.path.join(WEIGHTS_DIR, 'prednet_kitti_weights.hdf5') # where weights will be saved
json_file = os.path.join(WEIGHTS_DIR, 'prednet_kitti_model.json')
'''
Data files,数据集文件
'''
train_file = os.path.join(DATA_DIR, 'X_train.hkl')
train_sources = os.path.join(DATA_DIR, 'sources_train.hkl')
val_file = os.path.join(DATA_DIR, 'X_val.hkl')
val_sources = os.path.join(DATA_DIR, 'sources_val.hkl')
'''
Training parameters,训练参数的设置:
nb_epoch表示迭代次数;
batch_size表示XXX大小
samples_per_epoch表示XXX;
N_seq_val表示验证序列的个数
'''
nb_epoch = 150
batch_size = 4
samples_per_epoch = 500
N_seq_val = 100 # number of sequences to use for validation
'''
Model parameters,模型参数:
input_shape中shape指的是张量,即表示从最外层向量逐步到达最底层向量的降维解包过程。
图像在程序中表示一张彩色图片一般都分为(RGB三通道,输入图像高度,输入图像宽度),但是参数顺序有格式问题;
image_data_format表示数据格式问题,channels_first为(通道个数,输入图像攻读,输入图像宽度),channels_last为(输入图像攻读,输入图像宽度,通道个数)
stack_sizes = R_stack_sizes是???
A_filt_sizes是卷积层的大小???
layer_loss_weights代表每一层损失的权重,0层是【1,0,0,0],其他都是【1,0.1,0.1,0.1】
nt是训练的时候使用了前nt张图片
'''
n_channels, im_height, im_width = (3, 128, 160)
input_shape = (n_channels, im_height, im_width) if K.image_data_format() == 'channels_first' else (im_height, im_width, n_channels)
stack_sizes = (n_channels, 48, 96, 192)
R_stack_sizes = stack_sizes
A_filt_sizes = (3, 3, 3)
Ahat_filt_sizes = (3, 3, 3, 3)
R_filt_sizes = (3, 3, 3, 3)
layer_loss_weights = np.array([1., 0., 0., 0.]) # weighting for each layer in final loss; "L_0" model: [1, 0, 0, 0], "L_all": [1, 0.1, 0.1, 0.1]
layer_loss_weights = np.expand_dims(layer_loss_weights, 1)
nt = 10 # number of timesteps used for sequences in training
time_loss_weights = 1./ (nt - 1) * np.ones((nt,1)) # equally weight all timesteps except the first
time_loss_weights[0] = 0
# 利用以上Model parameters初始化prednet网络
prednet = PredNet(stack_sizes, R_stack_sizes,
A_filt_sizes, Ahat_filt_sizes, R_filt_sizes,
output_mode='error', return_sequences=True)
inputs = Input(shape=(nt,) + input_shape) ## 定义输入张量形状(batch_size,序列长,img_row,img_col,img_channels)
errors = prednet(inputs) # errors will be (batch_size, nt, nb_layers),计算各层A与Ahat误差?运行topology中Layer.__call__
# calculate weighted error by layer一个不训练有权重的dense全连接层,实际就是给各Layer的loss加权
errors_by_time = TimeDistributed(Dense(1, trainable=False), weights=[layer_loss_weights, np.zeros(1)], trainable=False)(errors)
# 对batch中每个样本,展平成一维向量
errors_by_time = Flatten()(errors_by_time) # will be (batch_size, nt)
# 一个全连接层,为各时刻error加权重
final_errors = Dense(1, weights=[time_loss_weights, np.zeros(1)], trainable=False)(errors_by_time) # weight errors by time
# keras中的模型主要包括model和weight两个部分:model可以通过json文件保存,保存权重可以通过保存(系数)
model = Model(inputs=inputs, outputs=final_errors)
# 自定义损失函数,参数为损失函数名字+优化器
model.compile(loss='mean_absolute_error', optimizer='adam')
# 根据训练的数据集以及batch size,生成每次epoch迭代的训练数据
train_generator = SequenceGenerator(train_file, train_sources, nt, batch_size=batch_size, shuffle=True)
val_generator = SequenceGenerator(val_file, val_sources, nt, batch_size=batch_size, N_seq=N_seq_val)
# 回调函数:学习率调度器,以epoch号为参数(从0算起的整数),返回一个新学习率(浮点数)。如果epoch < 75,学习率为0.001,否则0.0001
lr_schedule = lambda epoch: 0.001 if epoch < 75 else 0.0001 # start with lr of 0.001 and then drop to 0.0001 after 75 epochs
callbacks = [LearningRateScheduler(lr_schedule)]
if save_model:
if not os.path.exists(WEIGHTS_DIR): os.mkdir(WEIGHTS_DIR)
# 使用回调函数来观察训练过程中网络内部的状态和统计信息
callbacks.append(ModelCheckpoint(filepath=weights_file, monitor='val_loss', save_best_only=True))
# 与fit功能类似,利用Python的生成器,逐个生成数据的batch并进行训练,速度快
history = model.fit_generator(train_generator, samples_per_epoch / batch_size, nb_epoch, callbacks=callbacks,
validation_data=val_generator, validation_steps=N_seq_val / batch_size)
if save_model:
json_string = model.to_json()
with open(json_file, "w") as f:
f.write(json_string)
| [
"os.mkdir",
"numpy.random.seed",
"keras.callbacks.LearningRateScheduler",
"keras.backend.image_data_format",
"keras.callbacks.ModelCheckpoint",
"data_utils.SequenceGenerator",
"keras.layers.Flatten",
"numpy.expand_dims",
"keras.models.Model",
"numpy.ones",
"prednet.PredNet",
"os.path.exists",
... | [((132, 151), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (146, 151), True, 'import numpy as np\n'), ((880, 935), 'os.path.join', 'os.path.join', (['WEIGHTS_DIR', '"""prednet_kitti_weights.hdf5"""'], {}), "(WEIGHTS_DIR, 'prednet_kitti_weights.hdf5')\n", (892, 935), False, 'import os\n'), ((979, 1032), 'os.path.join', 'os.path.join', (['WEIGHTS_DIR', '"""prednet_kitti_model.json"""'], {}), "(WEIGHTS_DIR, 'prednet_kitti_model.json')\n", (991, 1032), False, 'import os\n'), ((1072, 1109), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""X_train.hkl"""'], {}), "(DATA_DIR, 'X_train.hkl')\n", (1084, 1109), False, 'import os\n'), ((1126, 1169), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""sources_train.hkl"""'], {}), "(DATA_DIR, 'sources_train.hkl')\n", (1138, 1169), False, 'import os\n'), ((1181, 1216), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""X_val.hkl"""'], {}), "(DATA_DIR, 'X_val.hkl')\n", (1193, 1216), False, 'import os\n'), ((1231, 1272), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""sources_val.hkl"""'], {}), "(DATA_DIR, 'sources_val.hkl')\n", (1243, 1272), False, 'import os\n'), ((2253, 2283), 'numpy.array', 'np.array', (['[1.0, 0.0, 0.0, 0.0]'], {}), '([1.0, 0.0, 0.0, 0.0])\n', (2261, 2283), True, 'import numpy as np\n'), ((2400, 2437), 'numpy.expand_dims', 'np.expand_dims', (['layer_loss_weights', '(1)'], {}), '(layer_loss_weights, 1)\n', (2414, 2437), True, 'import numpy as np\n'), ((2671, 2799), 'prednet.PredNet', 'PredNet', (['stack_sizes', 'R_stack_sizes', 'A_filt_sizes', 'Ahat_filt_sizes', 'R_filt_sizes'], {'output_mode': '"""error"""', 'return_sequences': '(True)'}), "(stack_sizes, R_stack_sizes, A_filt_sizes, Ahat_filt_sizes,\n R_filt_sizes, output_mode='error', return_sequences=True)\n", (2678, 2799), False, 'from prednet import PredNet\n'), ((2842, 2874), 'keras.layers.Input', 'Input', ([], {'shape': '((nt,) + input_shape)'}), '(shape=(nt,) + input_shape)\n', (2847, 2874), False, 'from keras.layers import Input, Dense, Flatten\n'), ((3558, 3600), 'keras.models.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': 'final_errors'}), '(inputs=inputs, outputs=final_errors)\n', (3563, 3600), False, 'from keras.models import Model\n'), ((3744, 3833), 'data_utils.SequenceGenerator', 'SequenceGenerator', (['train_file', 'train_sources', 'nt'], {'batch_size': 'batch_size', 'shuffle': '(True)'}), '(train_file, train_sources, nt, batch_size=batch_size,\n shuffle=True)\n', (3761, 3833), False, 'from data_utils import SequenceGenerator\n'), ((3846, 3935), 'data_utils.SequenceGenerator', 'SequenceGenerator', (['val_file', 'val_sources', 'nt'], {'batch_size': 'batch_size', 'N_seq': 'N_seq_val'}), '(val_file, val_sources, nt, batch_size=batch_size, N_seq=\n N_seq_val)\n', (3863, 3935), False, 'from data_utils import SequenceGenerator\n'), ((2535, 2551), 'numpy.ones', 'np.ones', (['(nt, 1)'], {}), '((nt, 1))\n', (2542, 2551), True, 'import numpy as np\n'), ((3283, 3292), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (3290, 3292), False, 'from keras.layers import Input, Dense, Flatten\n'), ((4154, 4188), 'keras.callbacks.LearningRateScheduler', 'LearningRateScheduler', (['lr_schedule'], {}), '(lr_schedule)\n', (4175, 4188), False, 'from keras.callbacks import LearningRateScheduler, ModelCheckpoint\n'), ((1999, 2020), 'keras.backend.image_data_format', 'K.image_data_format', ([], {}), '()\n', (2018, 2020), True, 'from keras import backend as K\n'), ((3149, 3174), 'keras.layers.Dense', 'Dense', (['(1)'], {'trainable': '(False)'}), '(1, trainable=False)\n', (3154, 3174), False, 'from keras.layers import Input, Dense, Flatten\n'), ((4216, 4243), 'os.path.exists', 'os.path.exists', (['WEIGHTS_DIR'], {}), '(WEIGHTS_DIR)\n', (4230, 4243), False, 'import os\n'), ((4245, 4266), 'os.mkdir', 'os.mkdir', (['WEIGHTS_DIR'], {}), '(WEIGHTS_DIR)\n', (4253, 4266), False, 'import os\n'), ((4321, 4400), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': 'weights_file', 'monitor': '"""val_loss"""', 'save_best_only': '(True)'}), "(filepath=weights_file, monitor='val_loss', save_best_only=True)\n", (4336, 4400), False, 'from keras.callbacks import LearningRateScheduler, ModelCheckpoint\n'), ((3205, 3216), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (3213, 3216), True, 'import numpy as np\n'), ((3412, 3423), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (3420, 3423), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
data_dir = '/home/abel/Documents/graphics/ADL/efficiency/'
#num_frames_train = [1567, 2323, 3041, 3727, 4376, 4982, 5541, 6050, 6525, 6971]
num_frames_train = [1.567, 2.323, 3.041, 3.727, 4.376, 4.982, 5.541, 6.050, 6.525, 6.971]
num_frames_eval = [118, 112, 106, 100, 94, 88, 82, 76, 70, 64]
time_train = [1.1, 1.7, 2.2, 2.7, 3.2, 3.7, 4.1, 4.5, 4.8, 5.2]
time_eval = [4.7, 4.4, 4.2, 4, 3.7, 3.5, 3.2, 3.0, 2.8, 2.5]
time_track = np.linspace(5,4,10)
time_total = time_train + time_eval
num_cycles = len(num_frames_train)
index = np.arange(num_cycles)
### Timings
bar_width = 0.2
fig, ax = plt.subplots()
rects1 = ax.bar(index, time_train, bar_width, color='b',label='Train on labeled')
rects2 = ax.bar(index+bar_width, time_eval, bar_width, color='r',label='Evaluate on unlabeled')
rects3 = ax.bar(index+2*bar_width, time_track, bar_width, color='g',label='Track detections')
ax.set_xlabel('Cycle')
ax.set_ylabel('Time (hours)')
ax.set_title('Active learning timing distribution')
ax.set_xticks(index+bar_width/3)
ax.set_xticklabels(range(1,11))
ax.legend()
plt.savefig(data_dir+'times')
plt.show()
### Number of frames
bar_width = 0.3
fig, ax = plt.subplots()
rects1 = ax.bar(index,num_frames_train, bar_width, color='b', label='Labeled')
rects2 = ax.bar(index+bar_width,num_frames_eval, bar_width, color='r', label='Unlabeled')
ax.set_xlabel('Cycle')
ax.set_ylabel('Number of frames (K)')
ax.set_title('Number of frames')
ax.set_xticks(index+bar_width/2)
ax.set_xticklabels(range(1,11))
ax.legend()
plt.savefig(data_dir+'frames')
plt.show()
| [
"matplotlib.pyplot.show",
"numpy.arange",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
] | [((490, 511), 'numpy.linspace', 'np.linspace', (['(5)', '(4)', '(10)'], {}), '(5, 4, 10)\n', (501, 511), True, 'import numpy as np\n'), ((591, 612), 'numpy.arange', 'np.arange', (['num_cycles'], {}), '(num_cycles)\n', (600, 612), True, 'import numpy as np\n'), ((653, 667), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (665, 667), True, 'import matplotlib.pyplot as plt\n'), ((1126, 1157), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(data_dir + 'times')"], {}), "(data_dir + 'times')\n", (1137, 1157), True, 'import matplotlib.pyplot as plt\n'), ((1157, 1167), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1165, 1167), True, 'import matplotlib.pyplot as plt\n'), ((1219, 1233), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1231, 1233), True, 'import matplotlib.pyplot as plt\n'), ((1576, 1608), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(data_dir + 'frames')"], {}), "(data_dir + 'frames')\n", (1587, 1608), True, 'import matplotlib.pyplot as plt\n'), ((1608, 1618), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1616, 1618), True, 'import matplotlib.pyplot as plt\n')] |
from controller import read_metric, read_knob, set_knob, knob_set, init_knobs, load_workload, run_workload, calc_metric, restart_beaver_datanode
from gpmodel import configuration_recommendation
from datamodel import GPDataSet
from settings import beaver_broker_ip, beaver_broker_port, target_knob_set, target_metric_name, wl_metrics, wltype, loadtype
import numpy as np
import time
if __name__ == '__main__':
ds = GPDataSet()
Round=200
init_knobs()
metric_list=wl_metrics[wltype]
ds.initdataset(metric_list)
num_knobs = len(target_knob_set)
num_metrics = len(metric_list)
KEY = str(time.time())
while(Round>0):
print("################## start a new Round ##################")
rec = configuration_recommendation(ds)
knob_cache = {}
for x in rec.keys():
set_knob(x, rec[x])
knob_cache[x] = rec[x]
print("Round: ", Round, rec)
restart_beaver_datanode()
# lres = load_workload(loadtype)
# print(lres)
# if("_ERROR" in lres):
# print("load workload error")
# exit()
new_knob_set = np.zeros([1, num_knobs])
new_metric_before = np.zeros([1, num_metrics])
new_metric_after = np.zeros([1, num_metrics])
for i,x in enumerate(metric_list):
new_metric_before[0][i] = read_metric(x)
for i,x in enumerate(target_knob_set):
new_knob_set[0][i] = read_knob(x, knob_cache)
rres = None
# rres = run_workload(wltype)
# print(rres)
# if("_ERROR" in rres):
# print("run workload error")
# exit()
for i,x in enumerate(metric_list):
new_metric_after[0][i] = read_metric(x, rres)
new_metric = calc_metric(new_metric_after, new_metric_before, metric_list)
# print(new_metric,metric_list)
ds.add_new_data(new_knob_set, new_metric)
import pickle
fp = "ds_"+KEY+"_"+str(Round)+"_.pkl"
with open(fp, "wb") as f:
pickle.dump(ds, f)
ds.printdata()
ds.merge_new_data()
Round-=1
| [
"pickle.dump",
"controller.read_knob",
"numpy.zeros",
"datamodel.GPDataSet",
"time.time",
"controller.set_knob",
"gpmodel.configuration_recommendation",
"controller.restart_beaver_datanode",
"controller.init_knobs",
"controller.read_metric",
"controller.calc_metric"
] | [((419, 430), 'datamodel.GPDataSet', 'GPDataSet', ([], {}), '()\n', (428, 430), False, 'from datamodel import GPDataSet\n'), ((449, 461), 'controller.init_knobs', 'init_knobs', ([], {}), '()\n', (459, 461), False, 'from controller import read_metric, read_knob, set_knob, knob_set, init_knobs, load_workload, run_workload, calc_metric, restart_beaver_datanode\n'), ((617, 628), 'time.time', 'time.time', ([], {}), '()\n', (626, 628), False, 'import time\n'), ((737, 769), 'gpmodel.configuration_recommendation', 'configuration_recommendation', (['ds'], {}), '(ds)\n', (765, 769), False, 'from gpmodel import configuration_recommendation\n'), ((937, 962), 'controller.restart_beaver_datanode', 'restart_beaver_datanode', ([], {}), '()\n', (960, 962), False, 'from controller import read_metric, read_knob, set_knob, knob_set, init_knobs, load_workload, run_workload, calc_metric, restart_beaver_datanode\n'), ((1146, 1170), 'numpy.zeros', 'np.zeros', (['[1, num_knobs]'], {}), '([1, num_knobs])\n', (1154, 1170), True, 'import numpy as np\n'), ((1199, 1225), 'numpy.zeros', 'np.zeros', (['[1, num_metrics]'], {}), '([1, num_metrics])\n', (1207, 1225), True, 'import numpy as np\n'), ((1253, 1279), 'numpy.zeros', 'np.zeros', (['[1, num_metrics]'], {}), '([1, num_metrics])\n', (1261, 1279), True, 'import numpy as np\n'), ((1783, 1844), 'controller.calc_metric', 'calc_metric', (['new_metric_after', 'new_metric_before', 'metric_list'], {}), '(new_metric_after, new_metric_before, metric_list)\n', (1794, 1844), False, 'from controller import read_metric, read_knob, set_knob, knob_set, init_knobs, load_workload, run_workload, calc_metric, restart_beaver_datanode\n'), ((835, 854), 'controller.set_knob', 'set_knob', (['x', 'rec[x]'], {}), '(x, rec[x])\n', (843, 854), False, 'from controller import read_metric, read_knob, set_knob, knob_set, init_knobs, load_workload, run_workload, calc_metric, restart_beaver_datanode\n'), ((1362, 1376), 'controller.read_metric', 'read_metric', (['x'], {}), '(x)\n', (1373, 1376), False, 'from controller import read_metric, read_knob, set_knob, knob_set, init_knobs, load_workload, run_workload, calc_metric, restart_beaver_datanode\n'), ((1458, 1482), 'controller.read_knob', 'read_knob', (['x', 'knob_cache'], {}), '(x, knob_cache)\n', (1467, 1482), False, 'from controller import read_metric, read_knob, set_knob, knob_set, init_knobs, load_workload, run_workload, calc_metric, restart_beaver_datanode\n'), ((1740, 1760), 'controller.read_metric', 'read_metric', (['x', 'rres'], {}), '(x, rres)\n', (1751, 1760), False, 'from controller import read_metric, read_knob, set_knob, knob_set, init_knobs, load_workload, run_workload, calc_metric, restart_beaver_datanode\n'), ((2052, 2070), 'pickle.dump', 'pickle.dump', (['ds', 'f'], {}), '(ds, f)\n', (2063, 2070), False, 'import pickle\n')] |
import numpy as np
dummy_points = np.array([
[4, -1, 3], # 0
[1, -3, 0], # 1
[-2, 0, -6], # 2
[0, 0, 0], # 3
[-3, -2, -5], # 4
[-3, -1, 8], # 5
[-4, -2, 3], # 6
[4, 0, 1], # 7
[-2, 1, 1], # 8
[4, 1, 6], # 9
[-4, 4, -1], # 10
[-5, 3, 3], # 11
[-1, 3, 2], # 12
[2, -3, -5] # 13
], dtype=np.float64)
| [
"numpy.array"
] | [((36, 244), 'numpy.array', 'np.array', (['[[4, -1, 3], [1, -3, 0], [-2, 0, -6], [0, 0, 0], [-3, -2, -5], [-3, -1, 8],\n [-4, -2, 3], [4, 0, 1], [-2, 1, 1], [4, 1, 6], [-4, 4, -1], [-5, 3, 3],\n [-1, 3, 2], [2, -3, -5]]'], {'dtype': 'np.float64'}), '([[4, -1, 3], [1, -3, 0], [-2, 0, -6], [0, 0, 0], [-3, -2, -5], [-3,\n -1, 8], [-4, -2, 3], [4, 0, 1], [-2, 1, 1], [4, 1, 6], [-4, 4, -1], [-5,\n 3, 3], [-1, 3, 2], [2, -3, -5]], dtype=np.float64)\n', (44, 244), True, 'import numpy as np\n')] |
#! /usr/bin/python3
import sys
import argparse
import logging
import numpy as np
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--exp", type=float, help="Exponent for smoothing", required=True)
args = parser.parse_args()
return args
def main():
args = parse_args()
logging.basicConfig(level=logging.DEBUG)
logging.debug(args)
for line in sys.stdin:
weights = [float(w) for w in line.strip().split(" ")]
weights = np.asarray(weights)
weights **= args.exp
as_string = " ".join([str(w) for w in weights])
sys.stdout.write(as_string + "\n")
if __name__ == '__main__':
main()
| [
"sys.stdout.write",
"logging.debug",
"argparse.ArgumentParser",
"logging.basicConfig",
"numpy.asarray"
] | [((116, 141), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (139, 141), False, 'import argparse\n'), ((327, 367), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (346, 367), False, 'import logging\n'), ((372, 391), 'logging.debug', 'logging.debug', (['args'], {}), '(args)\n', (385, 391), False, 'import logging\n'), ((500, 519), 'numpy.asarray', 'np.asarray', (['weights'], {}), '(weights)\n', (510, 519), True, 'import numpy as np\n'), ((615, 649), 'sys.stdout.write', 'sys.stdout.write', (["(as_string + '\\n')"], {}), "(as_string + '\\n')\n", (631, 649), False, 'import sys\n')] |
"""
sepp_base
~~~~~~~~~
A more abstract approach to SEPP type algorithms.
"""
from . import predictors
from . import logger as _ocp_logger
from . import data as _ocp_data
import numpy as _np
import datetime as _datetime
import logging as _logging
_logger = _logging.getLogger(__name__)
class ModelBase():
"""Interface for a "model".
We use the convention that the data is always an array of shape `(3,N)`
formed of `[times, x, y]` where `times` is an increasing sequence of
numbers from 0.
"""
def background(self, points):
"""Evaluate the background kernel at `points`. If `points is of
shape `(3,N)` then should return an array of shape `(N,)`.
:return: Array of shape `(N,)`
"""
raise NotImplementedError()
def trigger(self, trigger_point, delta_points):
"""We allow quite general trigger risk kernels which can depend on the
"trigger point" as well as the delta between the trigger and triggered
events.
:param trigger_point: Array of shape `(3,)` specifying the `(t, x, y)`
coords of the (single) trigger event.
:param delta_points: Array of shape `(3,m)` specifying the deltas to
the triggered events. Add to `trigger_point` to get the absolute
location of the triggered events.
:return: Array of shape `(m,)`
"""
raise NotImplementedError()
def log_likelihood_base(self, points):
"""Computes the non-normalised log likelihood,
:math:`\sum_{i=1}^n \log \lambda^*(t_i,x_i,y_i)`.
The normalisation requires integrating which is best left to a concrete
subclass.
"""
points = _np.asarray(points)
out = 0.0
for i in range(points.shape[1]):
pt = points[:,i]
ptt = pt[:,None]
li = self.background(ptt)[0]
deltas = ptt - points[:,:i]
li += _np.sum(self.trigger(pt, deltas))
out += _np.log(li)
return out
class FastModel():
"""An interface for a "fast" model."""
def time_trigger(self, times):
"""Return the time kernel (and, by convention, the overall rate as
well).
:param times: Array of shape `(n,)` of times into the past.
:return: Array of shape `(n,)` giving intensity at these times.
"""
raise NotImplementedError()
def space_trigger(self, space_points):
"""Return the space kernel (by convention, is a probability kernel).
:param space_points: Array of shape `(2,n)` of space locations.
:return: Array of shape `(n,)` giving intensity at these places.
"""
raise NotImplementedError()
def background_in_space(self, space_points):
"""Return the background risk, which is assumed not to vary in time.
:param space_points: Array of shape `(2,n)` of space locations.
:return: Array of shape `(n,)` giving intensity at these places.
"""
raise NotImplementedError()
class PredictorBase():
"""Base class which can perform "predictions". Predictions are formed by
evaluating the intensity (background and triggers) at one or more time
points and averaging.
:param model: The :class:`ModelBase` object to get the trigger and
background from.
:param points: Usual array of shape `(3,N)`
"""
def __init__(self, model, points):
self._model = model
self._points = _np.asarray(points)
@property
def model(self):
return self._model
@property
def points(self):
return self._points
def background_predict(self, time, space_points):
"""Find a point prediction at one time and one or more locations.
Ignores triggers, and only uses the background intensity.
:param time: Time point to evaluate at
:param space_points: Array of shape `(2,n)`
:return: Array of shape `(n,)`
"""
space_points = _np.asarray(space_points)
if len(space_points.shape) == 1:
space_points = space_points[:,None]
eval_points = _np.asarray([[time] * space_points.shape[1],
space_points[0], space_points[1]])
out = self._model.background(eval_points)
return out
def point_predict(self, time, space_points):
"""Find a point prediction at one time and one or more locations.
The data the class holds will be clipped to be before `time` and
the used as the trigger events.
:param time: Time point to evaluate at
:param space_points: Array of shape `(2,n)`
:return: Array of shape `(n,)`
"""
space_points = _np.asarray(space_points)
if len(space_points.shape) == 1:
space_points = space_points[:,None]
eval_points = _np.asarray([[time] * space_points.shape[1],
space_points[0], space_points[1]])
out = self._model.background(eval_points)
data = self._points[:,self._points[0] < time]
for i, pt in enumerate(eval_points.T):
out[i] += _np.sum(self._model.trigger(pt, pt[:,None] - data))
return out
def range_predict(self, time_start, time_end, space_points, samples=20):
if not time_start < time_end:
raise ValueError()
out = self.point_predict(time_start, space_points)
for i in range(1, samples):
t = time_start + (time_end - time_start) * i / (samples - 1)
n = self.point_predict(t, space_points)
out = out + n
return out / samples
def to_fast_split_predictor(self):
"""Return a new instance of a "predictor" which better performance if
the model conforms to the interface :class:`FastModel`.
"""
return FastPredictorBase(self._model)
def to_fast_split_predictor_histogram(self, grid, time_bin_size=1, space_bin_size=25):
"""Return a new instance of a "predictor" which offers faster
predictions by using approximations.
Currently we assume the the trigger intensity does not vary with
starting position, and that it "factors" into a product of a time
kernel and a space kernel. The model must conform to the
:class:`FastModel` interface. We also assume that the background
intensity does not vary in time.
:param time_bin_size: Size of bins for the histogram we use to
approximate the time kernel.
:param space_bin_size: Size of bins for the two dimensional histogram
we use to approximate the space kernel.
:param grid: The grid to base the background estimate on: for best
results, this should be the same grid you will eventually make
predictions for.
"""
return FastPredictorHist(self._model,
self._to_time_hist(time_bin_size), time_bin_size,
self._to_space_grid(space_bin_size), self._to_background(grid))
def _to_background(self, grid):
cts_pred = predictors.KernelRiskPredictor(self._model.background_in_space)
cts_pred.samples = -5
return predictors.grid_prediction(cts_pred, grid)
def _to_space_grid(self, space_bin_size):
size = 5
while True:
d = size * space_bin_size
region = _ocp_data.RectangularRegion(xmin=-d, ymin=-d, xmax=d, ymax=d)
pred = predictors.grid_prediction_from_kernel(self._model.space_trigger,
region, space_bin_size, samples=-5)
mat = pred.intensity_matrix
sorted_mat = _np.sort(mat.flatten())
cs = _np.cumsum(sorted_mat)
if not _np.any(cs <= cs[-1]*.001):
size += size
continue
sorted_index = _np.max(_np.where(cs <= cs[-1]*.001))
cutoff = sorted_mat[sorted_index]
mask = (pred.intensity_matrix <= cutoff)
r = int(size*80/100)
x = _np.broadcast_to(_np.arange(size*2)[:,None], (size*2, size*2))
y = _np.broadcast_to(_np.arange(size*2)[None,:], (size*2, size*2))
disc = _np.sqrt((x-size)**2 + (y-size)**2) >= r
if _np.all(mask[disc]):
return pred
size += size
def _to_time_hist(self, time_bin_size):
size = 100
while True:
hist = self._model.time_trigger(_np.arange(size) * time_bin_size)
sorted_hist = _np.sort(hist)
cs = _np.cumsum(sorted_hist)
if not _np.any(cs <= cs[-1]*.001):
size += size
continue
sorted_index = _np.max(_np.where(cs <= cs[-1]*.001))
cutoff = sorted_hist[sorted_index]
mask = (hist <= cutoff)
index_start = int(size * 80 / 100)
if _np.all(mask[index_start:]):
index_end = _np.max(_np.where(~mask))
return hist[:index_end+1]
size += size
class FastPredictorBase():
"""Base class which can perform fast "predictions" by assuming that the
background rate does not vary in time, and that the trigger kernel factors.
:param model: The :class:`FastModel` object to get the trigger and
background from.
"""
def __init__(self, model):
self._model = model
@property
def model(self):
"""The model we base predictions on."""
return self._model
@property
def points(self):
"""Points in the past we use as triggers."""
return self._points
@points.setter
def points(self, v):
self._points = v
def time_kernel(self, times):
return self._model.time_trigger(times)
def space_kernel(self, pts):
return self._model.space_trigger(pts)
def background_kernel(self, pts):
return self._model.background_in_space(pts)
def range_predict(self, time_start, time_end, space_points, time_samples=5):
space_points = _np.asarray(space_points)
if len(space_points.shape) == 1:
space_points = space_points[:,None]
data = self._points[:,self._points[0] < time_start]
tl = space_points.shape[-1] * data.shape[-1]
pts = (space_points[:,:,None] - data[1:,None,:]).reshape((2,tl))
space_triggers = self.space_kernel(pts).reshape(space_points.shape[-1], data.shape[-1])
times = _np.linspace(time_start, time_end, time_samples)
dtimes = (times[None,:] - data[0][:,None])
time_triggers = self.time_kernel(dtimes.flatten()).reshape(dtimes.shape)
time_triggers = _np.mean(time_triggers, axis=1)
return self.background_kernel(space_points) + _np.sum(space_triggers * time_triggers[None,:], axis=1)
class FastPredictorHist(FastPredictorBase):
"""Base class which can perform fast "predictions", based on using
histograms to approximate the kernels.
:param model: The :class:`FastModel` object to get the trigger and
background from.
:param time_hist: Array of shape `(k,)` giving the time kernel.
:param time_bandwidth: Width of each bin in the time histogram.
:param space_grid: Instance of :class:`GridPredictionArray` to use as an
approximation to the space kernel.
:param background_grid: Instance of :class:`GridPredictionArray` to use as an
approximation to the (time-invariant) background rate.
"""
def __init__(self, model, time_hist, time_bandwidth, space_grid, background_grid):
super().__init__(model)
self._time = (time_hist, time_bandwidth)
self._space_grid = space_grid
self._background_grid = background_grid
@property
def time_histogram_width(self):
"""The width of each bar in the time histogram."""
return self._time[1]
@property
def time_histogram(self):
"""An array giving the height of each bar in the time histogram."""
return self._time[0]
@property
def space_grid(self):
"""The grid array we use for approximating the space kernel."""
return self._space_grid
def time_kernel(self, times):
times = _np.atleast_1d(times)
indices = _np.floor_divide(times, self._time[1]).astype(_np.int)
m = indices < self._time[0].shape[0]
out = _np.empty(times.shape)
out[m] = self._time[0][indices[m]]
out[~m] = 0
return out
def space_kernel(self, pts):
return self._space_grid.risk(*pts)
def background_kernel(self, pts):
return self._background_grid.risk(*pts)
def non_normalised_p_matrix(model, points):
d = points.shape[1]
p = _np.zeros((d,d))
progress = _ocp_logger.ProgressLogger(d * (d+1) / 2, _datetime.timedelta(seconds=10), _logger)
p[_np.diag_indices(d)] = model.background(points)
progress.add_to_count(d)
for i in range(d):
trigger_point = points[:,i]
delta_points = trigger_point[:,None] - points[:, :i]
m = delta_points[0] > 0
p[:i, i][m] = model.trigger(trigger_point, delta_points[:,m])
p[:i, i][~m] = 0
progress.add_to_count(i)
return p
def normalise_p(p):
norm = _np.sum(p, axis=0)[None,:]
if _np.any(norm==0):
raise ValueError("Zero column in p matrix", p)
return p / norm
def p_matrix(model, points):
"""Compute the normalised "p" matrix.
:param model: Instance of :class:`ModelBase`
:param points: Data
"""
p = non_normalised_p_matrix(model, points)
return normalise_p(p)
def clamp_p(p, cutoff = 99.9):
"""For each column, set entries beyond the `cutoff` percentile to 0.
"""
pp = _np.array(p)
for j in range(1, p.shape[1]):
x = pp[:j+1,j]
lookup = _np.argsort(x)
s = x[lookup]
c = _np.sum(_np.cumsum(s) < 1 - cutoff / 100)
x[lookup[:c]] = 0
return pp
class Optimiser():
"""We cannot know all models and how to optimise them, but we provide some
helper routines."""
def __init__(self, model, points, make_p=True):
self._logger = _logging.getLogger(__name__)
self._model = model
self._points = points
if make_p:
self._p = _np.asarray( p_matrix(model, points) )
if _np.any(self._p < 0):
raise ValueError("p should ve +ve")
@property
def p(self):
"""The p matrix"""
return self._p
@property
def model(self):
return self._model
@property
def points(self):
return self._points
@property
def num_points(self):
return self._points.shape[1]
@property
def p_diag(self):
"""The diagonal of the p matrix."""
d = self._points.shape[1]
return self._p[_np.diag_indices(d)]
@property
def p_diag_sum(self):
return _np.sum(self.p_diag)
@property
def p_upper_tri_sum(self):
out = 0.0
for i in range(1, self._p.shape[0]):
out += _np.sum(self._p[:i, i])
if abs(out) < 1e-10:
#raise ValueError()
self._logger.warn("p-matrix has become diagonal-- no repeat behaviour!")
return out
def upper_tri_col(self, col):
return self._p[:col, col]
def diff_col_times(self, col):
"""`times[col] - times[:col]`"""
return self._points[0, col] - self._points[0, :col]
def diff_col_points(self, col):
"""`xypoints[col] - xypoints[:col]`"""
return self._points[1:, col][:,None] - self._points[1:, :col]
def sample(self):
"""Use the p-matrix to take a "sample", returning background events
and triggered events.
:return: Pair `(bk_indices, trigger_pairs)` where `bk_indices` are
indices into :attr:`points` giving the sampled background events,
and `trigger_pairs` is a list of pairs `(trigger, triggered)` where
`trigger` is the trigger index, and `triggered` if the (later) index
of the event which is triggered.
"""
bk, tr = [], []
for i in range(self.num_points):
j = _np.random.choice(i+1, p=self.p[:i+1,i])
if i==j:
bk.append(i)
else:
tr.append((j,i))
return bk, tr
def sample_to_points(self):
"""Use the p-matrix to take a "sample", returning background events
and triggered events.
:return: Pair `(bk_points, trigger_deltas)` both arrays of points,
`bk_points` being the background events, and `trigger_deltas` being
the "jumps" from the triggering to the triggered events.
"""
bk, tr = self.sample()
bk = _np.array(bk, dtype=_np.int)
bk_points = self._points[:, bk]
trigger_deltas = [self._points[:,end] - self._points[:,start]
for start, end in tr]
return bk_points, _np.asarray(trigger_deltas).T
def iterate(self):
"""Abstract method to be over-riden. Should return a new `model`."""
raise NotImplementedError()
class _BaseTrainer(predictors.DataTrainer):
def __init__(self):
self.time_unit = _np.timedelta64(1, "D")
self._logger = _logging.getLogger(__name__)
@property
def time_unit(self):
"""The unit of time to use to convert real timestamps into abstract
timestamps."""
return self._time_unit
@time_unit.setter
def time_unit(self, v):
self._time_unit = _np.timedelta64(v)
def make_data(self, predict_time=None):
"""Internal method, and for testing. Returns the data in the format
expected by the base classes.
:param predict_time: Crop the data to before this time, and use this time
as the end point. If `None` then use the final timestamp in the
data, rounded up by the currently in use time unit.
:return: `predict_time, for_fixed, data`
"""
if predict_time is None:
offset = _np.datetime64("2000-01-01T00:00")
x = self.data.timestamps[-1] - offset
x = _np.ceil(x / self.time_unit) * self.time_unit
predict_time = offset + x
else:
predict_time = _np.datetime64(predict_time)
data = self.data[self.data.timestamps <= predict_time]
times = (data.timestamps - data.timestamps[0]) / self.time_unit
for_fixed = (predict_time - data.timestamps) / self.time_unit
data = _np.asarray([times, data.xcoords, data.ycoords])
return predict_time, for_fixed, data
class Trainer(_BaseTrainer):
"""Base class for a standard "trainer". It is not assumed that this will
always be used; but it may prove helpful often.
"""
def __init__(self):
super().__init__()
def make_data(self, predict_time=None):
"""Internal method, and for testing. Returns the data in the format
expected by the base classes.
:param predict_time: As in :meth:`train`.
:return: `(fixed, data)` where `fixed` is a class describing any
"fixed" parameters of the model (meta-parameters if you like) and
`data` is an array of shape `(3,N)`.
"""
predict_time, for_fixed, data = super().make_data(predict_time)
return self.make_fixed(for_fixed), data
def make_fixed(self, times):
"""Abstract method to return the "fixed" model.
:param times: An array of the timestamps, converted to units of time
before the "predict point".
"""
raise NotImplementedError()
def initial_model(self, fixed, data):
"""Abstract method to return the initial model from which optimisation
is performed. The pair `(fixed, data)` is as returned by
:meth:`make_data`.
"""
raise NotImplementedError()
@property
def _optimiser(self):
"""The class to be used as the optimiser"""
raise NotImplementedError()
def train(self, predict_time=None, iterations=1):
"""Optimise the model.
:predict_time: Crop the data to before this time, and use this time
as the end point. If `None` then use the final timestamp in the
data, rounded up by the currently in use time unit.
:return: Instances of :class:`Model`.
"""
fixed, data = self.make_data(predict_time)
model = self.initial_model(fixed, data)
for _ in range(iterations):
opt = self._optimiser(model, data)
model = opt.iterate()
self._logger.debug(model)
return model
class Predictor(_BaseTrainer):
"""A :class:`DataTrainer` which uses a model to make predictions.
:param grid: The Grid object to make predictions against.
:param model: The model object to use.
"""
def __init__(self, grid, model):
super().__init__()
self._grid = grid
self._model = model
def to_fast_split_predictor_histogram(self, time_bin_size=1, space_bin_size=25):
"""Return a new instance of a "predictor" which offers faster
predictions by using approximations.
Currently we assume the the trigger intensity does not vary with
starting position, and that it "factors" into a product of a time
kernel and a space kernel. The model must conform to the
:class:`FastModel` interface. We also assume that the background
intensity does not vary in time.
:param time_bin_size: Size of bins for the histogram we use to
approximate the time kernel. In units of :attr:`time_unit`.
:param space_bin_size: Size of bins for the two dimensional histogram
we use to approximate the space kernel.
"""
pred = PredictorBase(self._model, [])
fsp = pred.to_fast_split_predictor_histogram(self._grid, time_bin_size, space_bin_size)
return FastPredictor(self._grid, fsp)
def to_fast_split_predictor(self):
"""Return a new instance of a "predictor" which offers faster
predictions, assuming that the model conforms to the interface
:class:`FastModel`.
"""
pred = PredictorBase(self._model, [])
return FastPredictor(self._grid, pred.to_fast_split_predictor())
def background_continuous_predict(self, predict_time, space_samples=20):
"""Make a prediction at this time, returning a continuous prediction.
:param predict_time: Limit to data before this time, and
use this as the predict time.
:param end_time: If not `None`, then approximately intergate
over this time range.
:return: A continuous prediction.
"""
predict_time, for_fixed, data = self.make_data(predict_time)
time = _np.max(for_fixed)
pred = PredictorBase(self._model, data)
def kernel(pts):
return pred.background_predict(time, pts)
return predictors.KernelRiskPredictor(kernel,
xoffset=self._grid.xoffset, yoffset=self._grid.yoffset,
cell_width=self._grid.xsize, cell_height=self._grid.ysize,
samples=space_samples)
def background_predict(self, predict_time, space_samples=20):
"""Make a prediction at this time.
:param predict_time: Limit to data before this time, and
use this as the predict time.
:param end_time: If not `None`, then approximately intergate
over this time range.
:return: A grid prediction, masked if possible with the grid, and
normalised.
"""
cts_predictor = self.background_continuous_predict(predict_time, space_samples)
return self._to_grid_pred(cts_predictor)
def continuous_predict(self, predict_time, end_time=None, time_samples=20, space_samples=20):
"""Make a prediction at this time, returning a continuous prediction.
:param predict_time: Limit to data before this time, and
use this as the predict time.
:param end_time: If not `None`, then approximately intergate
over this time range.
:return: A continuous prediction.
"""
predict_time, for_fixed, data = self.make_data(predict_time)
time = _np.max(for_fixed)
pred = PredictorBase(self._model, data)
if end_time is None:
def kernel(pts):
return pred.point_predict(time, pts)
else:
end_time = _np.datetime64(end_time)
time_end = time + (end_time - predict_time) / self.time_unit
def kernel(pts):
return pred.range_predict(time, time_end, pts, samples=time_samples)
return predictors.KernelRiskPredictor(kernel,
xoffset=self._grid.xoffset, yoffset=self._grid.yoffset,
cell_width=self._grid.xsize, cell_height=self._grid.ysize,
samples=space_samples)
def predict(self, predict_time, end_time=None, time_samples=20, space_samples=20):
"""Make a prediction at this time.
:param predict_time: Limit to data before this time, and
use this as the predict time.
:param end_time: If not `None`, then approximately intergate
over this time range.
:return: A grid prediction, masked if possible with the grid, and
normalised.
"""
cts_predictor = self.continuous_predict(predict_time, end_time, time_samples, space_samples)
return self._to_grid_pred(cts_predictor)
def _to_grid_pred(self, cts_predictor):
grid_pred = predictors.GridPredictionArray.from_continuous_prediction_grid(
cts_predictor, self._grid)
try:
grid_pred.mask_with(self._grid)
except:
pass
return grid_pred.renormalise()
class FastPredictor(_BaseTrainer):
"""A :class:`DataTrainer` which uses a model to make predictions.
Is optimised for certain classes of models and can optionally also
approximate kernels by histograms.
Currently we assume the the trigger intensity does not vary with
starting position, and that it "factors" into a product of a time
kernel and a space kernel. The model must conform to the
:class:`FastModel` interface.
:param grid: The Grid object to make predictions against.
:param fast_pred_base: The instance of :class:`FastPredictorBase`
we'll use internally.
"""
def __init__(self, grid, fast_pred_base):
super().__init__()
self._grid = grid
self._fast_pred_base = fast_pred_base
@property
def fast_predictor_base(self):
"""The underlying :class:`FastPredictorBase` which is used."""
return self._fast_pred_base
def background_predict(self, space_samples=-5):
cts_predictor = predictors.KernelRiskPredictor(self._fast_pred_base.background_kernel,
xoffset=self._grid.xoffset, yoffset=self._grid.yoffset,
cell_width=self._grid.xsize, cell_height=self._grid.ysize,
samples=space_samples)
return self._to_grid_pred(cts_predictor)
def continuous_predict(self, predict_time, end_time, time_samples=5, space_samples=-5):
"""Make a prediction at this time, returning a continuous prediction.
:param predict_time: Limit to data before this time, and use this as
the predict time.
:param end_time: Approximately intergate over this time range.
:param time_samples: The number of samples to use in approximating the
integral over time.
:param space_samples: The number of samples to use in the monte-carlo
integration over space
:return: A continuous prediction.
"""
predict_time, for_fixed, data = self.make_data(predict_time)
time = _np.max(for_fixed)
self._fast_pred_base.points = data
end_time = _np.datetime64(end_time)
time_end = time + (end_time - predict_time) / self.time_unit
def kernel(pts):
return self._fast_pred_base.range_predict(time, time_end, pts, time_samples=time_samples)
return predictors.KernelRiskPredictor(kernel,
xoffset=self._grid.xoffset, yoffset=self._grid.yoffset,
cell_width=self._grid.xsize, cell_height=self._grid.ysize,
samples=space_samples)
def predict(self, predict_time, end_time, time_samples=5, space_samples=-5):
"""Make a prediction at this time.
:param predict_time: Limit to data before this time, and
use this as the predict time.
:param end_time: Approximately intergate over this time range.
:param time_samples: The number of samples to use in approximating the
integral over time.
:param space_samples: The number of samples to use in the monte-carlo
integration over space
:return: A grid prediction, masked if possible with the grid, and
normalised.
"""
cts_predictor = self.continuous_predict(predict_time, end_time,
time_samples, space_samples)
return self._to_grid_pred(cts_predictor)
def _to_grid_pred(self, cts_predictor):
grid_pred = predictors.GridPredictionArray.from_continuous_prediction_grid(
cts_predictor, self._grid)
try:
grid_pred.mask_with(self._grid)
except:
pass
return grid_pred.renormalise()
| [
"numpy.sum",
"numpy.empty",
"numpy.argsort",
"numpy.mean",
"numpy.arange",
"numpy.cumsum",
"numpy.max",
"datetime.timedelta",
"numpy.linspace",
"numpy.random.choice",
"numpy.ceil",
"numpy.floor_divide",
"numpy.asarray",
"numpy.diag_indices",
"numpy.sort",
"numpy.all",
"numpy.log",
... | [((260, 288), 'logging.getLogger', '_logging.getLogger', (['__name__'], {}), '(__name__)\n', (278, 288), True, 'import logging as _logging\n'), ((12869, 12886), 'numpy.zeros', '_np.zeros', (['(d, d)'], {}), '((d, d))\n', (12878, 12886), True, 'import numpy as _np\n'), ((13427, 13445), 'numpy.any', '_np.any', (['(norm == 0)'], {}), '(norm == 0)\n', (13434, 13445), True, 'import numpy as _np\n'), ((13873, 13885), 'numpy.array', '_np.array', (['p'], {}), '(p)\n', (13882, 13885), True, 'import numpy as _np\n'), ((1756, 1775), 'numpy.asarray', '_np.asarray', (['points'], {}), '(points)\n', (1767, 1775), True, 'import numpy as _np\n'), ((3552, 3571), 'numpy.asarray', '_np.asarray', (['points'], {}), '(points)\n', (3563, 3571), True, 'import numpy as _np\n'), ((4102, 4127), 'numpy.asarray', '_np.asarray', (['space_points'], {}), '(space_points)\n', (4113, 4127), True, 'import numpy as _np\n'), ((4239, 4318), 'numpy.asarray', '_np.asarray', (['[[time] * space_points.shape[1], space_points[0], space_points[1]]'], {}), '([[time] * space_points.shape[1], space_points[0], space_points[1]])\n', (4250, 4318), True, 'import numpy as _np\n'), ((4859, 4884), 'numpy.asarray', '_np.asarray', (['space_points'], {}), '(space_points)\n', (4870, 4884), True, 'import numpy as _np\n'), ((4996, 5075), 'numpy.asarray', '_np.asarray', (['[[time] * space_points.shape[1], space_points[0], space_points[1]]'], {}), '([[time] * space_points.shape[1], space_points[0], space_points[1]])\n', (5007, 5075), True, 'import numpy as _np\n'), ((10190, 10215), 'numpy.asarray', '_np.asarray', (['space_points'], {}), '(space_points)\n', (10201, 10215), True, 'import numpy as _np\n'), ((10605, 10653), 'numpy.linspace', '_np.linspace', (['time_start', 'time_end', 'time_samples'], {}), '(time_start, time_end, time_samples)\n', (10617, 10653), True, 'import numpy as _np\n'), ((10810, 10841), 'numpy.mean', '_np.mean', (['time_triggers'], {'axis': '(1)'}), '(time_triggers, axis=1)\n', (10818, 10841), True, 'import numpy as _np\n'), ((12368, 12389), 'numpy.atleast_1d', '_np.atleast_1d', (['times'], {}), '(times)\n', (12382, 12389), True, 'import numpy as _np\n'), ((12522, 12544), 'numpy.empty', '_np.empty', (['times.shape'], {}), '(times.shape)\n', (12531, 12544), True, 'import numpy as _np\n'), ((12943, 12974), 'datetime.timedelta', '_datetime.timedelta', ([], {'seconds': '(10)'}), '(seconds=10)\n', (12962, 12974), True, 'import datetime as _datetime\n'), ((12991, 13010), 'numpy.diag_indices', '_np.diag_indices', (['d'], {}), '(d)\n', (13007, 13010), True, 'import numpy as _np\n'), ((13393, 13411), 'numpy.sum', '_np.sum', (['p'], {'axis': '(0)'}), '(p, axis=0)\n', (13400, 13411), True, 'import numpy as _np\n'), ((13961, 13975), 'numpy.argsort', '_np.argsort', (['x'], {}), '(x)\n', (13972, 13975), True, 'import numpy as _np\n'), ((14290, 14318), 'logging.getLogger', '_logging.getLogger', (['__name__'], {}), '(__name__)\n', (14308, 14318), True, 'import logging as _logging\n'), ((15077, 15097), 'numpy.sum', '_np.sum', (['self.p_diag'], {}), '(self.p_diag)\n', (15084, 15097), True, 'import numpy as _np\n'), ((16945, 16973), 'numpy.array', '_np.array', (['bk'], {'dtype': '_np.int'}), '(bk, dtype=_np.int)\n', (16954, 16973), True, 'import numpy as _np\n'), ((17407, 17430), 'numpy.timedelta64', '_np.timedelta64', (['(1)', '"""D"""'], {}), "(1, 'D')\n", (17422, 17430), True, 'import numpy as _np\n'), ((17454, 17482), 'logging.getLogger', '_logging.getLogger', (['__name__'], {}), '(__name__)\n', (17472, 17482), True, 'import logging as _logging\n'), ((17734, 17752), 'numpy.timedelta64', '_np.timedelta64', (['v'], {}), '(v)\n', (17749, 17752), True, 'import numpy as _np\n'), ((18740, 18788), 'numpy.asarray', '_np.asarray', (['[times, data.xcoords, data.ycoords]'], {}), '([times, data.xcoords, data.ycoords])\n', (18751, 18788), True, 'import numpy as _np\n'), ((23162, 23180), 'numpy.max', '_np.max', (['for_fixed'], {}), '(for_fixed)\n', (23169, 23180), True, 'import numpy as _np\n'), ((24666, 24684), 'numpy.max', '_np.max', (['for_fixed'], {}), '(for_fixed)\n', (24673, 24684), True, 'import numpy as _np\n'), ((28269, 28287), 'numpy.max', '_np.max', (['for_fixed'], {}), '(for_fixed)\n', (28276, 28287), True, 'import numpy as _np\n'), ((28351, 28375), 'numpy.datetime64', '_np.datetime64', (['end_time'], {}), '(end_time)\n', (28365, 28375), True, 'import numpy as _np\n'), ((2045, 2056), 'numpy.log', '_np.log', (['li'], {}), '(li)\n', (2052, 2056), True, 'import numpy as _np\n'), ((7836, 7858), 'numpy.cumsum', '_np.cumsum', (['sorted_mat'], {}), '(sorted_mat)\n', (7846, 7858), True, 'import numpy as _np\n'), ((8391, 8410), 'numpy.all', '_np.all', (['mask[disc]'], {}), '(mask[disc])\n', (8398, 8410), True, 'import numpy as _np\n'), ((8653, 8667), 'numpy.sort', '_np.sort', (['hist'], {}), '(hist)\n', (8661, 8667), True, 'import numpy as _np\n'), ((8685, 8708), 'numpy.cumsum', '_np.cumsum', (['sorted_hist'], {}), '(sorted_hist)\n', (8695, 8708), True, 'import numpy as _np\n'), ((9020, 9047), 'numpy.all', '_np.all', (['mask[index_start:]'], {}), '(mask[index_start:])\n', (9027, 9047), True, 'import numpy as _np\n'), ((10897, 10953), 'numpy.sum', '_np.sum', (['(space_triggers * time_triggers[None, :])'], {'axis': '(1)'}), '(space_triggers * time_triggers[None, :], axis=1)\n', (10904, 10953), True, 'import numpy as _np\n'), ((14472, 14492), 'numpy.any', '_np.any', (['(self._p < 0)'], {}), '(self._p < 0)\n', (14479, 14492), True, 'import numpy as _np\n'), ((14996, 15015), 'numpy.diag_indices', '_np.diag_indices', (['d'], {}), '(d)\n', (15012, 15015), True, 'import numpy as _np\n'), ((15230, 15253), 'numpy.sum', '_np.sum', (['self._p[:i, i]'], {}), '(self._p[:i, i])\n', (15237, 15253), True, 'import numpy as _np\n'), ((16365, 16410), 'numpy.random.choice', '_np.random.choice', (['(i + 1)'], {'p': 'self.p[:i + 1, i]'}), '(i + 1, p=self.p[:i + 1, i])\n', (16382, 16410), True, 'import numpy as _np\n'), ((18265, 18299), 'numpy.datetime64', '_np.datetime64', (['"""2000-01-01T00:00"""'], {}), "('2000-01-01T00:00')\n", (18279, 18299), True, 'import numpy as _np\n'), ((18491, 18519), 'numpy.datetime64', '_np.datetime64', (['predict_time'], {}), '(predict_time)\n', (18505, 18519), True, 'import numpy as _np\n'), ((24882, 24906), 'numpy.datetime64', '_np.datetime64', (['end_time'], {}), '(end_time)\n', (24896, 24906), True, 'import numpy as _np\n'), ((7878, 7907), 'numpy.any', '_np.any', (['(cs <= cs[-1] * 0.001)'], {}), '(cs <= cs[-1] * 0.001)\n', (7885, 7907), True, 'import numpy as _np\n'), ((7995, 8026), 'numpy.where', '_np.where', (['(cs <= cs[-1] * 0.001)'], {}), '(cs <= cs[-1] * 0.001)\n', (8004, 8026), True, 'import numpy as _np\n'), ((8335, 8378), 'numpy.sqrt', '_np.sqrt', (['((x - size) ** 2 + (y - size) ** 2)'], {}), '((x - size) ** 2 + (y - size) ** 2)\n', (8343, 8378), True, 'import numpy as _np\n'), ((8728, 8757), 'numpy.any', '_np.any', (['(cs <= cs[-1] * 0.001)'], {}), '(cs <= cs[-1] * 0.001)\n', (8735, 8757), True, 'import numpy as _np\n'), ((8845, 8876), 'numpy.where', '_np.where', (['(cs <= cs[-1] * 0.001)'], {}), '(cs <= cs[-1] * 0.001)\n', (8854, 8876), True, 'import numpy as _np\n'), ((12408, 12446), 'numpy.floor_divide', '_np.floor_divide', (['times', 'self._time[1]'], {}), '(times, self._time[1])\n', (12424, 12446), True, 'import numpy as _np\n'), ((14018, 14031), 'numpy.cumsum', '_np.cumsum', (['s'], {}), '(s)\n', (14028, 14031), True, 'import numpy as _np\n'), ((17144, 17171), 'numpy.asarray', '_np.asarray', (['trigger_deltas'], {}), '(trigger_deltas)\n', (17155, 17171), True, 'import numpy as _np\n'), ((18366, 18394), 'numpy.ceil', '_np.ceil', (['(x / self.time_unit)'], {}), '(x / self.time_unit)\n', (18374, 18394), True, 'import numpy as _np\n'), ((8191, 8211), 'numpy.arange', '_np.arange', (['(size * 2)'], {}), '(size * 2)\n', (8201, 8211), True, 'import numpy as _np\n'), ((8270, 8290), 'numpy.arange', '_np.arange', (['(size * 2)'], {}), '(size * 2)\n', (8280, 8290), True, 'import numpy as _np\n'), ((8593, 8609), 'numpy.arange', '_np.arange', (['size'], {}), '(size)\n', (8603, 8609), True, 'import numpy as _np\n'), ((9085, 9101), 'numpy.where', '_np.where', (['(~mask)'], {}), '(~mask)\n', (9094, 9101), True, 'import numpy as _np\n')] |
#!/usr/bin/env python
import argparse
import numpy as np
import csv
import time
import sys
import glob
import os
import matplotlib.pyplot as plt
import datetime
"""IMPORTANT"""
# NOTE: this script assumes it is run in Diagnostics/python/
# So it does NOT need to be in carla's PythonAPI, but it DOES need
# the CARLA_DIR (-c, --carladir) environment variable to be set
def save_to_file(data, dir_path, filename, suffix=""):
assert os.path.exists(dir_path)
filename = f"{filename}{suffix}.csv"
filepath = os.path.join(os.getcwd(), dir_path, filename)
print(f"saving data to: {filepath}")
mode = "w" if os.path.exists(filepath) else "w+"
with open(filepath, mode) as f:
csv_writer = csv.writer(f)
csv_writer.writerow(data.keys())
# writes ordered
csv_writer.writerows(zip(*[data[key] for key in data.keys()]))
# csv_writer.writerows(zip(*data.values())) # unordered
def plot(
x,
y,
title,
title_x="",
title_y="",
trim=(0, 0),
out_dir="graphs",
show_mean=True,
colour="r",
lines=True,
suffix="",
):
max_len = min(len(x), len(y))
x = np.array(x[trim[0] : max_len - trim[1]])
y = np.array(y[trim[0] : max_len - trim[1]])
fig = plt.figure()
plt.grid(True)
plt.xlabel(title_x, fontsize=16)
plt.ylabel(title_y, fontsize=16)
plt.xticks()
plt.yticks()
plt.tick_params(labelsize=15)
mean_str = f", mean: {np.mean(y):.3f}" if show_mean else ""
graph_title = f"{title}{mean_str}"
plt.title(graph_title, fontsize=18)
plt.plot(x, y, colour + "o")
if lines:
plt.plot(x, y, color=colour, linewidth=1)
plt.tight_layout()
os.makedirs(out_dir, exist_ok=True)
filename = (f"{title}{suffix}.jpg").replace(" ", "_") # no spaces!
filepath = os.path.join(os.getcwd(), out_dir, filename)
print(f"Saving graph to: {filepath}")
fig.savefig(filepath)
plt.close(fig)
def main():
argparser = argparse.ArgumentParser(description=__doc__)
argparser.add_argument(
"-c",
"--carladir",
metavar="C",
default="", # cwd
type=str,
help="Directory for Carla",
)
argparser.add_argument(
"--host",
metavar="H",
default="127.0.0.1",
help="IP of the host server (default: 127.0.0.1)",
)
argparser.add_argument(
"-p",
"--port",
metavar="P",
default=2000,
type=int,
help="TCP port to listen to (default: 2000)",
)
argparser.add_argument(
"-d",
"--dir",
metavar="D",
default="data", # cwd
type=str,
help="data directory for outputs",
)
argparser.add_argument(
"-f",
"--file",
metavar="F",
default="carla_data",
type=str,
help="name of output file",
)
argparser.add_argument(
"-i",
"--interval",
metavar="I",
default="0.1", # in seconds
type=float,
help="intervals (s) of which to take framerate",
)
args = argparser.parse_args()
carla_dir = args.carladir
output_dir = args.dir
py_delay = args.interval
filename = args.file
os.makedirs(output_dir, exist_ok=True)
"""Import Carla given the Carla Directory"""
if carla_dir == "":
print("Need to pass in the CARLA environment directory!")
exit(1)
egg_locn = os.path.join(carla_dir, "PythonAPI", "carla", "dist")
print(f"Trying to load python .egg from {egg_locn}")
python_egg = glob.glob(os.path.join(egg_locn, "carla-*.egg"))
try:
# sourcing python egg file
sys.path.append(python_egg[0])
import carla # works if the python egg file is properly sourced
except Exception as e:
print("Error:", e)
exit(1)
print(f"Success! Continuing with script, pinging every {py_delay:.3f}s")
client = carla.Client(args.host, args.port)
client.set_timeout(10.0) # should be running already
world = client.get_world()
data = {}
data["[CARLA]Idx"] = []
data["[CARLA]Fps"] = []
# to get FPS, we can get the world dt and compute the inv
i = 0
try:
while True:
# seconds=X presents maximum delay to wait for server
delta_t = world.wait_for_tick(seconds=0.5).timestamp.delta_seconds
fps = 1 / delta_t
data["[CARLA]Idx"].append(i)
data["[CARLA]Fps"].append(fps)
# sleep for the interval
time.sleep(py_delay)
i += 1
print(f"FPS: {fps:.3f}", end="\r") # no flush (slow & expensive)
except KeyboardInterrupt:
print("Stopped by user.")
except RuntimeError:
print("Simulator disconnected.")
finally: # simulator disconnected
ts = time.time()
timestamp = datetime.datetime.fromtimestamp(ts).strftime("%Y-%m-%d-%H-%M-%S")
suffix = f"_{timestamp}"
save_to_file(data, output_dir, filename=filename, suffix=suffix)
plot(
x=data["[CARLA]Idx"],
y=data["[CARLA]Fps"],
title="CARLA FPS",
title_x="",
title_y="fps",
out_dir=output_dir,
show_mean=True,
lines=True,
suffix=suffix,
)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| [
"matplotlib.pyplot.title",
"argparse.ArgumentParser",
"matplotlib.pyplot.figure",
"numpy.mean",
"carla.Client",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.tight_layout",
"os.path.join",
"sys.path.append",
"matplotlib.pyplot.close",
"matplotlib.pyplot.yticks",
"os.path.exists",
"matp... | [((440, 464), 'os.path.exists', 'os.path.exists', (['dir_path'], {}), '(dir_path)\n', (454, 464), False, 'import os\n'), ((1153, 1191), 'numpy.array', 'np.array', (['x[trim[0]:max_len - trim[1]]'], {}), '(x[trim[0]:max_len - trim[1]])\n', (1161, 1191), True, 'import numpy as np\n'), ((1202, 1240), 'numpy.array', 'np.array', (['y[trim[0]:max_len - trim[1]]'], {}), '(y[trim[0]:max_len - trim[1]])\n', (1210, 1240), True, 'import numpy as np\n'), ((1254, 1266), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1264, 1266), True, 'import matplotlib.pyplot as plt\n'), ((1271, 1285), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1279, 1285), True, 'import matplotlib.pyplot as plt\n'), ((1290, 1322), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['title_x'], {'fontsize': '(16)'}), '(title_x, fontsize=16)\n', (1300, 1322), True, 'import matplotlib.pyplot as plt\n'), ((1327, 1359), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['title_y'], {'fontsize': '(16)'}), '(title_y, fontsize=16)\n', (1337, 1359), True, 'import matplotlib.pyplot as plt\n'), ((1364, 1376), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {}), '()\n', (1374, 1376), True, 'import matplotlib.pyplot as plt\n'), ((1381, 1393), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {}), '()\n', (1391, 1393), True, 'import matplotlib.pyplot as plt\n'), ((1398, 1427), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelsize': '(15)'}), '(labelsize=15)\n', (1413, 1427), True, 'import matplotlib.pyplot as plt\n'), ((1535, 1570), 'matplotlib.pyplot.title', 'plt.title', (['graph_title'], {'fontsize': '(18)'}), '(graph_title, fontsize=18)\n', (1544, 1570), True, 'import matplotlib.pyplot as plt\n'), ((1575, 1603), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', "(colour + 'o')"], {}), "(x, y, colour + 'o')\n", (1583, 1603), True, 'import matplotlib.pyplot as plt\n'), ((1672, 1690), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1688, 1690), True, 'import matplotlib.pyplot as plt\n'), ((1695, 1730), 'os.makedirs', 'os.makedirs', (['out_dir'], {'exist_ok': '(True)'}), '(out_dir, exist_ok=True)\n', (1706, 1730), False, 'import os\n'), ((1935, 1949), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (1944, 1949), True, 'import matplotlib.pyplot as plt\n'), ((1980, 2024), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (2003, 2024), False, 'import argparse\n'), ((3243, 3281), 'os.makedirs', 'os.makedirs', (['output_dir'], {'exist_ok': '(True)'}), '(output_dir, exist_ok=True)\n', (3254, 3281), False, 'import os\n'), ((3454, 3507), 'os.path.join', 'os.path.join', (['carla_dir', '"""PythonAPI"""', '"""carla"""', '"""dist"""'], {}), "(carla_dir, 'PythonAPI', 'carla', 'dist')\n", (3466, 3507), False, 'import os\n'), ((3948, 3982), 'carla.Client', 'carla.Client', (['args.host', 'args.port'], {}), '(args.host, args.port)\n', (3960, 3982), False, 'import carla\n'), ((534, 545), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (543, 545), False, 'import os\n'), ((626, 650), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (640, 650), False, 'import os\n'), ((718, 731), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (728, 731), False, 'import csv\n'), ((1626, 1667), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'color': 'colour', 'linewidth': '(1)'}), '(x, y, color=colour, linewidth=1)\n', (1634, 1667), True, 'import matplotlib.pyplot as plt\n'), ((1831, 1842), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1840, 1842), False, 'import os\n'), ((3592, 3629), 'os.path.join', 'os.path.join', (['egg_locn', '"""carla-*.egg"""'], {}), "(egg_locn, 'carla-*.egg')\n", (3604, 3629), False, 'import os\n'), ((3683, 3713), 'sys.path.append', 'sys.path.append', (['python_egg[0]'], {}), '(python_egg[0])\n', (3698, 3713), False, 'import sys\n'), ((4853, 4864), 'time.time', 'time.time', ([], {}), '()\n', (4862, 4864), False, 'import time\n'), ((4553, 4573), 'time.sleep', 'time.sleep', (['py_delay'], {}), '(py_delay)\n', (4563, 4573), False, 'import time\n'), ((1454, 1464), 'numpy.mean', 'np.mean', (['y'], {}), '(y)\n', (1461, 1464), True, 'import numpy as np\n'), ((4885, 4920), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['ts'], {}), '(ts)\n', (4916, 4920), False, 'import datetime\n')] |
import numpy as np
from sklearn import linear_model
from dowhy.causal_estimator import CausalEstimate
from dowhy.causal_estimator import CausalEstimator
class LinearRegressionEstimator(CausalEstimator):
"""Compute effect of treatment using linear regression.
The coefficient of the treatment variable in the regression model is
computed as the causal effect. Common method but the assumptions required
are too strong. Avoid.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.logger.debug("Back-door variables used:" +
",".join(self._target_estimand.backdoor_variables))
self._observed_common_causes_names = self._target_estimand.backdoor_variables
self._observed_common_causes = self._data[self._observed_common_causes_names]
self.logger.info("INFO: Using Linear Regression Estimator")
self.symbolic_estimator = self.construct_symbolic_estimator(self._target_estimand)
self.logger.info(self.symbolic_estimator)
def _estimate_effect(self):
treatment_2d = self._treatment.values.reshape(len(self._treatment), -1)
features = np.concatenate((treatment_2d, self._observed_common_causes),
axis=1)
model = linear_model.LinearRegression()
model.fit(features, self._outcome)
coefficients = model.coef_
self.logger.debug("Coefficients of the fitted linear model: " +
",".join(map(str, coefficients)))
estimate = CausalEstimate(estimate=coefficients[0],
target_estimand=self._target_estimand,
realized_estimand_expr=self.symbolic_estimator,
intercept=model.intercept_)
return estimate
def construct_symbolic_estimator(self, estimand):
expr = "b: " + estimand.outcome_variable + "~"
var_list = [estimand.treatment_variable, ] + estimand.backdoor_variables
expr += "+".join(var_list)
return expr
| [
"sklearn.linear_model.LinearRegression",
"dowhy.causal_estimator.CausalEstimate",
"numpy.concatenate"
] | [((1185, 1253), 'numpy.concatenate', 'np.concatenate', (['(treatment_2d, self._observed_common_causes)'], {'axis': '(1)'}), '((treatment_2d, self._observed_common_causes), axis=1)\n', (1199, 1253), True, 'import numpy as np\n'), ((1304, 1335), 'sklearn.linear_model.LinearRegression', 'linear_model.LinearRegression', ([], {}), '()\n', (1333, 1335), False, 'from sklearn import linear_model\n'), ((1565, 1729), 'dowhy.causal_estimator.CausalEstimate', 'CausalEstimate', ([], {'estimate': 'coefficients[0]', 'target_estimand': 'self._target_estimand', 'realized_estimand_expr': 'self.symbolic_estimator', 'intercept': 'model.intercept_'}), '(estimate=coefficients[0], target_estimand=self.\n _target_estimand, realized_estimand_expr=self.symbolic_estimator,\n intercept=model.intercept_)\n', (1579, 1729), False, 'from dowhy.causal_estimator import CausalEstimate\n')] |
from .enums import CitySize, AreaKind
import numpy as np
class OkumuraHata:
def __init__(
self,
frequency,
transmitter_height,
receiver_height,
city_size,
area_kind,
):
self.frequency = frequency
self.transmitter_height = transmitter_height
self.receiver_height = receiver_height
self.city_size = city_size
self.area_kind = area_kind
def _height_correction(self):
if self.city_size.value == CitySize.LARGE.value and self.frequency <= 200:
return 8.29 * (np.log10(1.54 * self.receiver_height)**2) - 1.1
elif self.city_size == CitySize.LARGE.value:
return 3.2 * (np.log10(11.75 * self.receiver_height)**2) - 4.97
else:
return 0.8 + (1.1 * np.log10(self.frequency) - 0.7) * self.receiver_height - 1.56 * np.log10(self.frequency)
def _base_loss(self, distance):
constant_factor = 69.55
frequency_factor = 26.16 * np.log10(self.frequency)
base_height_factor = 13.82 * np.log10(self.transmitter_height)
distance_factor = (44.9 - 6.55 * np.log10(self.transmitter_height)) * np.log10(distance)
return constant_factor + frequency_factor - base_height_factor - self._height_correction() + distance_factor
def _suburban_loss(self, distance):
frequency_factor = 2 * (np.log10(self.frequency/28.0)**2)
constant_factor = 5.4
return self._base_loss(distance) - frequency_factor - constant_factor
def _rural_loss(self, distance):
frequency_factor = 4.78 * (np.log10(self.frequency)**2) - 18.33 * (np.log10(self.frequency))
constant_factor = 40.94
return self._base_loss(distance) - frequency_factor - constant_factor
def path_loss(self, distance):
if self.area_kind.value == AreaKind.URBAN.value:
return self._base_loss(distance)
elif self.area_kind.value == AreaKind.SUBURBAN.value:
return self._suburban_loss(distance)
elif self.area_kind.value == AreaKind.RURAL.value:
return self._rural_loss(distance)
else:
raise ValueError("Invalid area type")
| [
"numpy.log10"
] | [((924, 948), 'numpy.log10', 'np.log10', (['self.frequency'], {}), '(self.frequency)\n', (932, 948), True, 'import numpy as np\n'), ((982, 1015), 'numpy.log10', 'np.log10', (['self.transmitter_height'], {}), '(self.transmitter_height)\n', (990, 1015), True, 'import numpy as np\n'), ((1090, 1108), 'numpy.log10', 'np.log10', (['distance'], {}), '(distance)\n', (1098, 1108), True, 'import numpy as np\n'), ((1289, 1320), 'numpy.log10', 'np.log10', (['(self.frequency / 28.0)'], {}), '(self.frequency / 28.0)\n', (1297, 1320), True, 'import numpy as np\n'), ((1531, 1555), 'numpy.log10', 'np.log10', (['self.frequency'], {}), '(self.frequency)\n', (1539, 1555), True, 'import numpy as np\n'), ((1053, 1086), 'numpy.log10', 'np.log10', (['self.transmitter_height'], {}), '(self.transmitter_height)\n', (1061, 1086), True, 'import numpy as np\n'), ((1491, 1515), 'numpy.log10', 'np.log10', (['self.frequency'], {}), '(self.frequency)\n', (1499, 1515), True, 'import numpy as np\n'), ((533, 570), 'numpy.log10', 'np.log10', (['(1.54 * self.receiver_height)'], {}), '(1.54 * self.receiver_height)\n', (541, 570), True, 'import numpy as np\n'), ((800, 824), 'numpy.log10', 'np.log10', (['self.frequency'], {}), '(self.frequency)\n', (808, 824), True, 'import numpy as np\n'), ((650, 688), 'numpy.log10', 'np.log10', (['(11.75 * self.receiver_height)'], {}), '(11.75 * self.receiver_height)\n', (658, 688), True, 'import numpy as np\n'), ((736, 760), 'numpy.log10', 'np.log10', (['self.frequency'], {}), '(self.frequency)\n', (744, 760), True, 'import numpy as np\n')] |
"""Hourly temperature frequencies."""
import datetime
import calendar
from collections import OrderedDict
import numpy as np
from pandas.io.sql import read_sql
import matplotlib.colors as mpcolors
from pyiem.plot.use_agg import plt
from pyiem.util import get_autoplot_context, get_dbconn
from pyiem.exceptions import NoDataFound
PDICT = {'above': 'At or Above Threshold',
'below': 'Below Threshold'}
PDICT2 = OrderedDict((
('tmpf', "Air Temperature"),
('dwpf', "Dew Point Temp"),
('feel', "Feels Like Temp"),
('relh', "Relative Humidity"),
))
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
desc['cache'] = 86400
desc['description'] = """This plot presents the hourly frequency of having
a certain temperature above or below a given threshold. Values are
partitioned by week of the year to smooth out some of the day to day
variation."""
desc['data'] = True
desc['arguments'] = [
dict(type='zstation', name='zstation', default='DSM',
network='IA_ASOS', label='Select Station:'),
dict(type='select', name='var', default='tmpf', options=PDICT2,
label='Which Variable:'),
dict(type='int', name='threshold', default=32,
label='Threshold (Temperature in F, RH in %)'),
dict(type='select', name='direction', default='below',
label='Threshold direction:', options=PDICT),
dict(type='cmap', name='cmap', default='jet', label='Color Ramp:'),
]
return desc
def plotter(fdict):
""" Go """
pgconn = get_dbconn('asos')
ctx = get_autoplot_context(fdict, get_description())
station = ctx['zstation']
threshold = ctx['threshold']
direction = ctx['direction']
varname = ctx['var']
mydir = "<" if direction == 'below' else '>='
df = read_sql("""
WITH data as (
SELECT extract(week from valid) as week,
extract(hour from (valid + '10 minutes'::interval) at time zone %s) as hour,
""" + varname + """ as d from alldata where
station = %s and """ + varname + """ between -70 and 140
)
SELECT week::int, hour::int,
sum(case when d """+mydir+""" %s then 1 else 0 end),
count(*) from data GROUP by week, hour
""", pgconn, params=(
ctx['_nt'].sts[station]['tzname'], station, threshold),
index_col=None)
data = np.zeros((24, 53), 'f')
df['freq[%]'] = df['sum'] / df['count'] * 100.
for _, row in df.iterrows():
data[int(row['hour']), int(row['week']) - 1] = row['freq[%]']
sts = datetime.datetime(2012, 1, 1)
xticks = []
for i in range(1, 13):
ts = sts.replace(month=i)
xticks.append(float(ts.strftime("%j")) / 7.0)
(fig, ax) = plt.subplots(1, 1)
cmap = plt.get_cmap(ctx['cmap'])
cmap.set_under('white')
bins = np.arange(0, 101, 5)
bins[0] = 1
norm = mpcolors.BoundaryNorm(bins, cmap.N)
res = ax.imshow(data, interpolation='nearest', aspect='auto',
extent=[0, 53, 24, 0], cmap=cmap, norm=norm)
fig.colorbar(res, label='%', extend='min')
ax.grid(True, zorder=11)
units = r"$^\circ$F" if varname != 'relh' else '%'
ab = ctx['_nt'].sts[station]['archive_begin']
if ab is None:
raise NoDataFound("Unknown station metadata.")
ax.set_title(("%s [%s]\n"
"Hourly %s %s %s%s (%s-%s)"
) % (
ctx['_nt'].sts[station]['name'], station, PDICT2[varname],
PDICT[direction], threshold, units,
ab.year,
datetime.datetime.now().year), size=12)
ax.set_xticks(xticks)
ax.set_ylabel("%s Timezone" % (ctx['_nt'].sts[station]['tzname'],))
ax.set_xticklabels(calendar.month_abbr[1:])
ax.set_xlim(0, 53)
ax.set_ylim(0, 24)
ax.set_yticks([0, 4, 8, 12, 16, 20, 24])
ax.set_yticklabels(['12 AM', '4 AM', '8 AM', 'Noon', '4 PM', '8 PM',
'Mid'])
return fig, df
if __name__ == '__main__':
plotter(dict())
| [
"pandas.io.sql.read_sql",
"pyiem.plot.use_agg.plt.get_cmap",
"matplotlib.colors.BoundaryNorm",
"numpy.zeros",
"datetime.datetime.now",
"datetime.datetime",
"pyiem.exceptions.NoDataFound",
"numpy.arange",
"collections.OrderedDict",
"pyiem.plot.use_agg.plt.subplots",
"pyiem.util.get_dbconn"
] | [((420, 555), 'collections.OrderedDict', 'OrderedDict', (["(('tmpf', 'Air Temperature'), ('dwpf', 'Dew Point Temp'), ('feel',\n 'Feels Like Temp'), ('relh', 'Relative Humidity'))"], {}), "((('tmpf', 'Air Temperature'), ('dwpf', 'Dew Point Temp'), (\n 'feel', 'Feels Like Temp'), ('relh', 'Relative Humidity')))\n", (431, 555), False, 'from collections import OrderedDict\n'), ((1614, 1632), 'pyiem.util.get_dbconn', 'get_dbconn', (['"""asos"""'], {}), "('asos')\n", (1624, 1632), False, 'from pyiem.util import get_autoplot_context, get_dbconn\n'), ((1873, 2409), 'pandas.io.sql.read_sql', 'read_sql', (['(\n """\n WITH data as (\n SELECT extract(week from valid) as week,\n extract(hour from (valid + \'10 minutes\'::interval) at time zone %s) as hour,\n """\n + varname + """ as d from alldata where\n station = %s and """ +\n varname +\n """ between -70 and 140\n )\n SELECT week::int, hour::int,\n sum(case when d """\n + mydir +\n """ %s then 1 else 0 end),\n count(*) from data GROUP by week, hour\n """\n )', 'pgconn'], {'params': "(ctx['_nt'].sts[station]['tzname'], station, threshold)", 'index_col': 'None'}), '(\n """\n WITH data as (\n SELECT extract(week from valid) as week,\n extract(hour from (valid + \'10 minutes\'::interval) at time zone %s) as hour,\n """\n + varname + """ as d from alldata where\n station = %s and """ +\n varname +\n """ between -70 and 140\n )\n SELECT week::int, hour::int,\n sum(case when d """\n + mydir +\n """ %s then 1 else 0 end),\n count(*) from data GROUP by week, hour\n """\n , pgconn, params=(ctx[\'_nt\'].sts[station][\'tzname\'], station, threshold\n ), index_col=None)\n', (1881, 2409), False, 'from pandas.io.sql import read_sql\n'), ((2407, 2430), 'numpy.zeros', 'np.zeros', (['(24, 53)', '"""f"""'], {}), "((24, 53), 'f')\n", (2415, 2430), True, 'import numpy as np\n'), ((2596, 2625), 'datetime.datetime', 'datetime.datetime', (['(2012)', '(1)', '(1)'], {}), '(2012, 1, 1)\n', (2613, 2625), False, 'import datetime\n'), ((2774, 2792), 'pyiem.plot.use_agg.plt.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (2786, 2792), False, 'from pyiem.plot.use_agg import plt\n'), ((2804, 2829), 'pyiem.plot.use_agg.plt.get_cmap', 'plt.get_cmap', (["ctx['cmap']"], {}), "(ctx['cmap'])\n", (2816, 2829), False, 'from pyiem.plot.use_agg import plt\n'), ((2869, 2889), 'numpy.arange', 'np.arange', (['(0)', '(101)', '(5)'], {}), '(0, 101, 5)\n', (2878, 2889), True, 'import numpy as np\n'), ((2917, 2952), 'matplotlib.colors.BoundaryNorm', 'mpcolors.BoundaryNorm', (['bins', 'cmap.N'], {}), '(bins, cmap.N)\n', (2938, 2952), True, 'import matplotlib.colors as mpcolors\n'), ((3298, 3338), 'pyiem.exceptions.NoDataFound', 'NoDataFound', (['"""Unknown station metadata."""'], {}), "('Unknown station metadata.')\n", (3309, 3338), False, 'from pyiem.exceptions import NoDataFound\n'), ((3611, 3634), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3632, 3634), False, 'import datetime\n')] |
import torch
import pymesh
import numpy as np
import torch.nn as nn
from torch.autograd import Variable
from neuralnet_pytorch.metrics import emd_loss
'''Utility Classes'''
def emd_wrapper(pc1, pc2):
return emd_loss(pc1, pc2, reduce='sum', sinkhorn=True)
class Template(object):
def get_random_points(self):
print('Need to be implemented')
def get_regular_points(self):
print('Need to be implemented')
class ShpereTemplate(Template):
def __init__(self, device=0, grain=6):
self.device = device
self.dim = 3
self.npoints = 0
def get_random_points(self, shape, device='cuda'):
assert shape[1] == 3, f'3 is expected in dimension 1, while got {shape}'
rand_grid = torch.cuda.FloatTensor(shape).to(device).float()
rand_grid.data.normal_(0, 1)
rand_grid - rand_grid / torch.sqrt(torch.sum(rand_grid ** 2, dim=1, keepdim=True))
return Variable(rand_grid)
def get_regular_points(self, npoints=None, device='cuda'):
if self.npoints != npoints:
self.mesh = pymesh.generate_icosphere(1, [0, 0, 0], 4)
self.vertex = torch.from_numpy(self.mesh.vertices).to(device).float()
self.num_vertex = self.vertex.size(0)
self.vertex = self.vertex.transpose(0, 1).contiguous().unsqueeze(0)
self.npoints = npoints
return Variable(self.vertex.to(device))
class SquareTemplate(Template):
def __init__(self, device=0):
self.device = device
self.dim = 2
self.npoints = 0
def get_random_points(self, shape, device='cuda'):
rand_grid = torch.cuda.FloatTensor(shape).to(device).float()
rand_grid.data.uniform_(0, 1)
return Variable(rand_grid)
def get_regular_points(self, npoints=2048, device='cuda'):
if self.npoints != npoints:
vertices, faces = self.generate_square(np.sqrt(npoints))
self.mesh = pymesh.form_mesh(vertices=vertices, faces=faces)
self.vertex = torch.from_numpy(self.mesh.vertices).to(device).float()
self.num_vertex = self.vertex.size(0)
self.vertex = self.vertex.transpose(0, 1).contiguous().unsqueeze(0)
self.npoints = npoints
return Variable(self.vertex[:, :2].contiguous().to(device))
@staticmethod
def generate_square(grain):
grain = int(grain) - 1
faces, vertices = list(), list()
for i in range(grain + 1):
for j in range(grain + 1):
vertices.append([i / grain, j / grain, 0])
for i in range(grain + 1):
for j in range(grain):
faces.append([j + (grain + 1) * i, j + (grain + 1) * i + 1, j + (grain + 1) * (i - 1)])
for i in range(grain):
for j in range(1, grain+1):
faces.append([j + (grain + 1) * i, j + (grain + 1) * i - 1, j + (grain + 1) * (i + 1)])
return np.array(vertices), np.array(faces)
'''Utility Functions'''
def get_template(template_type, device=0):
getter = {
'SQUARE': SquareTemplate,
'SPHERE': ShpereTemplate,
}
template = getter.get(template_type, 'Invalid template')
return template(device=device)
def euclidean_dist(x, y):
# Copy from prototypical network
n = x.size(0)
m = y.size(0)
d = x.size(1)
assert d == y.size(1), 'Inconsistent dimension between tensor X and Y'
x = x.unsqueeze(1).expand(n, m, d)
y = y.unsqueeze(0).expand(n, m, d)
return torch.pow(x - y, 2).sum(2)
def build_pc_proto(ttl_class, pred_label, pc_proto_mat):
# Create pc prototype matrix via index
# ans = torch.zeros(len(pred_label), ttl_class).scatter_(1, pred_label.unsqueeze(1), 1.)
# print(ttl_class)
# print(pred_label.shape)
# print(pc_proto_mat.shape)
ans = torch.zeros(len(pred_label), ttl_class).cuda().scatter_(1, pred_label, 1.)
return torch.mm(ans, pc_proto_mat)
| [
"torch.from_numpy",
"pymesh.form_mesh",
"pymesh.generate_icosphere",
"torch.autograd.Variable",
"torch.cuda.FloatTensor",
"torch.mm",
"neuralnet_pytorch.metrics.emd_loss",
"numpy.array",
"torch.pow",
"torch.sum",
"numpy.sqrt"
] | [((215, 262), 'neuralnet_pytorch.metrics.emd_loss', 'emd_loss', (['pc1', 'pc2'], {'reduce': '"""sum"""', 'sinkhorn': '(True)'}), "(pc1, pc2, reduce='sum', sinkhorn=True)\n", (223, 262), False, 'from neuralnet_pytorch.metrics import emd_loss\n'), ((3936, 3963), 'torch.mm', 'torch.mm', (['ans', 'pc_proto_mat'], {}), '(ans, pc_proto_mat)\n', (3944, 3963), False, 'import torch\n'), ((936, 955), 'torch.autograd.Variable', 'Variable', (['rand_grid'], {}), '(rand_grid)\n', (944, 955), False, 'from torch.autograd import Variable\n'), ((1747, 1766), 'torch.autograd.Variable', 'Variable', (['rand_grid'], {}), '(rand_grid)\n', (1755, 1766), False, 'from torch.autograd import Variable\n'), ((1080, 1122), 'pymesh.generate_icosphere', 'pymesh.generate_icosphere', (['(1)', '[0, 0, 0]', '(4)'], {}), '(1, [0, 0, 0], 4)\n', (1105, 1122), False, 'import pymesh\n'), ((1960, 2008), 'pymesh.form_mesh', 'pymesh.form_mesh', ([], {'vertices': 'vertices', 'faces': 'faces'}), '(vertices=vertices, faces=faces)\n', (1976, 2008), False, 'import pymesh\n'), ((2958, 2976), 'numpy.array', 'np.array', (['vertices'], {}), '(vertices)\n', (2966, 2976), True, 'import numpy as np\n'), ((2978, 2993), 'numpy.array', 'np.array', (['faces'], {}), '(faces)\n', (2986, 2993), True, 'import numpy as np\n'), ((3534, 3553), 'torch.pow', 'torch.pow', (['(x - y)', '(2)'], {}), '(x - y, 2)\n', (3543, 3553), False, 'import torch\n'), ((1918, 1934), 'numpy.sqrt', 'np.sqrt', (['npoints'], {}), '(npoints)\n', (1925, 1934), True, 'import numpy as np\n'), ((873, 919), 'torch.sum', 'torch.sum', (['(rand_grid ** 2)'], {'dim': '(1)', 'keepdim': '(True)'}), '(rand_grid ** 2, dim=1, keepdim=True)\n', (882, 919), False, 'import torch\n'), ((744, 773), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['shape'], {}), '(shape)\n', (766, 773), False, 'import torch\n'), ((1645, 1674), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['shape'], {}), '(shape)\n', (1667, 1674), False, 'import torch\n'), ((1149, 1185), 'torch.from_numpy', 'torch.from_numpy', (['self.mesh.vertices'], {}), '(self.mesh.vertices)\n', (1165, 1185), False, 'import torch\n'), ((2035, 2071), 'torch.from_numpy', 'torch.from_numpy', (['self.mesh.vertices'], {}), '(self.mesh.vertices)\n', (2051, 2071), False, 'import torch\n')] |
import argparse
import json
import numpy as np
import os
import pickle
from imagernn.imagernn_utils import decodeGenerator
"""
This script is used to predict sentences for arbitrary images
that are located in a folder we call root_folder. It is assumed that
the root_folder contains:
- the raw images
- a file tasks.txt that lists the images you'd like to use
- a file vgg_feats.mat that contains the CNN features.
You'll need to use the Matlab script I provided and point it at the
root folder and its tasks.txt file to save the features.
Then point this script at the folder and at a checkpoint model you'd
like to evaluate.
"""
def main(params):
# load the checkpoint
checkpoint_path = params['checkpoint_path']
print('loading checkpoint %s' % (checkpoint_path, ))
checkpoint = pickle.load(open(checkpoint_path, 'rb'), encoding='latin1')
checkpoint_params = checkpoint['params']
model = checkpoint['model']
misc = {}
misc['wordtoix'] = checkpoint['wordtoix']
ixtoword = checkpoint['ixtoword']
# output blob which we will dump to JSON for visualizing the results
blob = {}
blob['params'] = params
blob['checkpoint_params'] = checkpoint_params
blob['imgblobs'] = []
# load the tasks.txt file
root_path = params['root_path']
img_names = open(os.path.join(root_path, 'img', 'tasks.txt'), 'r').read().splitlines()
# load the features for all images
features_path = os.path.join(root_path, 'self_img_vgg_feats.npy')
# features_struct = scipy.io.loadmat(features_path)
features = np.load(features_path)
features = features.T # this is a 4096 x N numpy array of features
D,N = features.shape
# iterate over all images and predict sentences
BatchGenerator = decodeGenerator(checkpoint_params)
for n in range(N):
print('image %d/%d:' % (n, N))
# encode the image
img = {}
img['feat'] = features[:, n]
img['local_file_path'] =img_names[n]
# perform the work. heavy lifting happens inside
kwparams = { 'beam_size' : params['beam_size'] }
Ys = BatchGenerator.predict([{'image':img}], model, checkpoint_params, **kwparams)
# build up the output
img_blob = {}
img_blob['img_path'] = img['local_file_path']
# encode the top prediction
top_predictions = Ys[0] # take predictions for the first (and only) image we passed in
top_prediction = top_predictions[0] # these are sorted with highest on top
candidate = ' '.join([ixtoword[ix] for ix in top_prediction[1] if ix > 0]) # ix 0 is the END token, skip that
print('PRED: (%f) %s' % (top_prediction[0], candidate))
img_blob['candidate'] = {'text': candidate, 'logprob': top_prediction[0]}
blob['imgblobs'].append(img_blob)
# dump result struct to file
save_file = os.path.join(root_path, 'result_struct.json')
print('writing predictions to %s...' % (save_file, ))
json.dump(blob, open(save_file, 'w'))
# dump output html
html = ''
for img in blob['imgblobs']:
html += '<img src="%s" height="400"><br>' % ('img/' + img['img_path'], )
html += '(%f) %s <br><br>' % (img['candidate']['logprob'], img['candidate']['text'])
html_file = os.path.join(root_path, 'result.html')
print('writing html result file to %s...' % (html_file, ))
open(html_file, 'w').write(html)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('checkpoint_path', type=str, help='the input checkpoint')
parser.add_argument('-r', '--root_path', default='self_pic', type=str, help='folder with the images, tasks.txt file, and corresponding vgg_feats.mat file')
parser.add_argument('-b', '--beam_size', type=int, default=1, help='beam size in inference. 1 indicates greedy per-word max procedure. Good value is approx 20 or so, and more = better.')
# args = parser.parse_args()
args = parser.parse_args()
params = vars(args) # convert to ordinary dict
print('parsed parameters:')
print(json.dumps(params, indent = 2))
main(params)
| [
"numpy.load",
"argparse.ArgumentParser",
"json.dumps",
"imagernn.imagernn_utils.decodeGenerator",
"os.path.join"
] | [((1415, 1464), 'os.path.join', 'os.path.join', (['root_path', '"""self_img_vgg_feats.npy"""'], {}), "(root_path, 'self_img_vgg_feats.npy')\n", (1427, 1464), False, 'import os\n'), ((1532, 1554), 'numpy.load', 'np.load', (['features_path'], {}), '(features_path)\n', (1539, 1554), True, 'import numpy as np\n'), ((1717, 1751), 'imagernn.imagernn_utils.decodeGenerator', 'decodeGenerator', (['checkpoint_params'], {}), '(checkpoint_params)\n', (1732, 1751), False, 'from imagernn.imagernn_utils import decodeGenerator\n'), ((2751, 2796), 'os.path.join', 'os.path.join', (['root_path', '"""result_struct.json"""'], {}), "(root_path, 'result_struct.json')\n", (2763, 2796), False, 'import os\n'), ((3138, 3176), 'os.path.join', 'os.path.join', (['root_path', '"""result.html"""'], {}), "(root_path, 'result.html')\n", (3150, 3176), False, 'import os\n'), ((3313, 3338), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3336, 3338), False, 'import argparse\n'), ((3914, 3942), 'json.dumps', 'json.dumps', (['params'], {'indent': '(2)'}), '(params, indent=2)\n', (3924, 3942), False, 'import json\n'), ((1289, 1332), 'os.path.join', 'os.path.join', (['root_path', '"""img"""', '"""tasks.txt"""'], {}), "(root_path, 'img', 'tasks.txt')\n", (1301, 1332), False, 'import os\n')] |
#! /usr/bin/env python3
# Copyright (c) 2021 Grumpy Cat Software S.L.
#
# This Source Code is licensed under the MIT 2.0 license.
# the terms can be found in LICENSE.md at the root of
# this project, or at http://mozilla.org/MPL/2.0/.
# %%
from entsoe import EntsoePandasClient, Area
import pandas as pd
import numpy as np
import os
client = EntsoePandasClient(api_key='<KEY>')
base_dir = '/Users/justo.ruiz/Development/shapelets/solo_comprobacion/modules/shapelets/data'
# %%
year = '2016'
country = 'es'
params = {
'country_code': Area.ES,
'start': pd.Timestamp(f'{year}0101', tz='Europe/Brussels'),
'end': pd.Timestamp(f'{year}1231', tz='Europe/Brussels'),
}
def save(query: str, data):
path = os.path.join(base_dir, f'entoe_{year}_{country}_{query}.gz')
np.savetxt(path, data)
# %%
day_ahead_prices = client.query_day_ahead_prices(**params)
save('day_ahead_prices', day_ahead_prices)
# %%
load = client.query_load(**params)
save('load', load)
# %%
load_forecast = client.query_load_forecast(**params)
save('load_forecast', load_forecast)
# %%
wind_solar_forecast = client.query_wind_and_solar_forecast(**params)
save('solar_forecast', wind_solar_forecast.to_numpy()[:, 0])
save('wind_forecast', wind_solar_forecast.to_numpy()[:, 1])
# %%
client.query_pri
| [
"pandas.Timestamp",
"numpy.savetxt",
"entsoe.EntsoePandasClient",
"os.path.join"
] | [((345, 380), 'entsoe.EntsoePandasClient', 'EntsoePandasClient', ([], {'api_key': '"""<KEY>"""'}), "(api_key='<KEY>')\n", (363, 380), False, 'from entsoe import EntsoePandasClient, Area\n'), ((563, 612), 'pandas.Timestamp', 'pd.Timestamp', (['f"""{year}0101"""'], {'tz': '"""Europe/Brussels"""'}), "(f'{year}0101', tz='Europe/Brussels')\n", (575, 612), True, 'import pandas as pd\n'), ((625, 674), 'pandas.Timestamp', 'pd.Timestamp', (['f"""{year}1231"""'], {'tz': '"""Europe/Brussels"""'}), "(f'{year}1231', tz='Europe/Brussels')\n", (637, 674), True, 'import pandas as pd\n'), ((719, 779), 'os.path.join', 'os.path.join', (['base_dir', 'f"""entoe_{year}_{country}_{query}.gz"""'], {}), "(base_dir, f'entoe_{year}_{country}_{query}.gz')\n", (731, 779), False, 'import os\n'), ((784, 806), 'numpy.savetxt', 'np.savetxt', (['path', 'data'], {}), '(path, data)\n', (794, 806), True, 'import numpy as np\n')] |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""utils for tinyms train, eval and predict"""
import random
import logging
import os
import time
from datetime import datetime
import numpy as np
from tinyms import Tensor
from mindspore.train.serialization import load_checkpoint, load_param_into_net, save_checkpoint
from tinyms.data.utils import save_image
__all__ = [
'GanReporter',
'gan_load_ckpt',
'GanImagePool',
]
class GanReporter(logging.Logger):
"""
Reporter class for Cycle Gan.
This class includes several functions that can save images/checkpoints and print/save logging information.
Args:
args (class): Option class.
"""
def __init__(self, args):
super(GanReporter, self).__init__("cyclegan")
self.log_dir = os.path.join(args.outputs_dir, 'log')
self.imgs_dir = os.path.join(args.outputs_dir, 'imgs')
self.ckpts_dir = os.path.join(args.outputs_dir, 'ckpt')
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir, exist_ok=True)
if not os.path.exists(self.imgs_dir):
os.makedirs(self.imgs_dir, exist_ok=True)
if not os.path.exists(self.ckpts_dir):
os.makedirs(self.ckpts_dir, exist_ok=True)
self.rank = 0
self.save_checkpoint_epochs = args.save_checkpoint_epochs
self.save_imgs = args.save_imgs
# console handler
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(message)s')
console.setFormatter(formatter)
self.addHandler(console)
# file handler
log_name = datetime.now().strftime('%Y-%m-%d_time_%H_%M_%S') + '_rank_{}.log'.format(self.rank)
self.log_fn = os.path.join(self.log_dir, log_name)
fh = logging.FileHandler(self.log_fn)
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
self.addHandler(fh)
self.save_args(args)
self.step = 0
self.epoch = 0
self.dataset_size = args.dataset_size
self.print_iter = 100
self.G_loss = []
self.D_loss = []
def info(self, msg, *args, **kwargs):
if self.isEnabledFor(logging.INFO):
self._log(logging.INFO, msg, args, **kwargs)
def save_args(self, args):
"""Show args configuration info."""
self.info('Args:')
args_dict = vars(args)
for key in args_dict.keys():
self.info('--> %s: %s', key, args_dict[key])
self.info('')
def important_info(self, msg, *args, **kwargs):
if self.logger.isEnabledFor(logging.INFO) and self.rank == 0:
line_width = 2
important_msg = '\n'
important_msg += ('*'*70 + '\n')*line_width
important_msg += ('*'*line_width + '\n')*2
important_msg += '*'*line_width + ' '*8 + msg + '\n'
important_msg += ('*'*line_width + '\n')*2
important_msg += ('*'*70 + '\n')*line_width
self.info(important_msg, *args, **kwargs)
def epoch_start(self):
"""Print log when step start."""
self.step_start_time = time.time()
self.epoch_start_time = time.time()
self.step = 0
self.epoch += 1
self.G_loss = []
self.D_loss = []
def step_end(self, res_G, res_D):
"""
Print log when step end.
Args:
res_G (layers.Layer): TrainOneStepG instance.
res_D (layers.Layer): TrainOneStepD instance.
"""
self.step += 1
loss_D = float(res_D.asnumpy())
res = []
for item in res_G[2:]:
res.append(float(item.asnumpy()))
self.G_loss.append(res[0])
self.D_loss.append(loss_D)
if self.step % self.print_iter == 0:
step_cost = (time.time() - self.step_start_time) * 1000 / self.print_iter
losses = "G_loss: {:.2f}, D_loss:{:.2f}, loss_G_A: {:.2f}, loss_G_B: {:.2f}, loss_C_A: {:.2f},"\
"loss_C_B: {:.2f}, loss_idt_A: {:.2f}, loss_idt_B:{:.2f}".format(
res[0], loss_D, res[1], res[2], res[3], res[4], res[5], res[6])
self.info("Epoch[{}] [{}/{}] step cost: {:.2f} ms, {}".format(
self.epoch, self.step, self.dataset_size, step_cost, losses))
self.step_start_time = time.time()
def epoch_end(self, net):
"""
Print log and save cgeckpoints when epoch end.
Args:
net (layers.Layer): TrainOneStepG instance.
"""
epoch_cost = (time.time() - self.epoch_start_time) * 1000
pre_step_time = epoch_cost / self.dataset_size
mean_loss_G = sum(self.G_loss) / self.dataset_size
mean_loss_D = sum(self.D_loss) / self.dataset_size
self.info("Epoch [{}] total cost: {:.2f} ms, pre step: {:.2f} ms, G_loss: {:.2f}, D_loss: {:.2f}".format(
self.epoch, epoch_cost, pre_step_time, mean_loss_G, mean_loss_D))
if self.epoch % self.save_checkpoint_epochs == 0 and self.rank == 0:
save_checkpoint(net.G.generator.G_A, os.path.join(self.ckpts_dir, f"G_A_{self.epoch}.ckpt"))
save_checkpoint(net.G.generator.G_B, os.path.join(self.ckpts_dir, f"G_B_{self.epoch}.ckpt"))
save_checkpoint(net.G.D_A, os.path.join(self.ckpts_dir, f"D_A_{self.epoch}.ckpt"))
save_checkpoint(net.G.D_B, os.path.join(self.ckpts_dir, f"D_B_{self.epoch}.ckpt"))
def visualizer(self, img_A, img_B, fake_A, fake_B):
"""
Save visualized image.
Args:
img_A (numpy.ndarray): Image data.
img_B (numpy.ndarray): Image data.
fake_A (numpy.ndarray): Generated image data.
fake_B (numpy.ndarray): Generated image data.
"""
if self.save_imgs and self.step % self.dataset_size == 0 and self.rank == 0:
save_image(img_A, os.path.join(self.imgs_dir, f"{self.epoch}_img_A.jpg"))
save_image(img_B, os.path.join(self.imgs_dir, f"{self.epoch}_img_B.jpg"))
save_image(fake_A, os.path.join(self.imgs_dir, f"{self.epoch}_fake_A.jpg"))
save_image(fake_B, os.path.join(self.imgs_dir, f"{self.epoch}_fake_B.jpg"))
def start_predict(self, direction):
"""
Print log when predict start.
Args:
direction (str): The predict name.
"""
self.predict_start_time = time.time()
self.direction = direction
self.info('==========start predict %s===============', self.direction)
def end_predict(self):
"""Print log when predict end."""
cost = (time.time() - self.predict_start_time) * 1000
pre_step_cost = cost / self.dataset_size
self.info('total {} imgs cost {:.2f} ms, pre img cost {:.2f}'.format(self.dataset_size, cost, pre_step_cost))
self.info('==========end predict %s===============\n', self.direction)
def start_eval(self):
"""Print log when eval start."""
self.eval_start_time = time.time()
self.info('==========start eval %s===============')
def end_eval(self):
"""Print log when eval end."""
cost = (time.time() - self.eval_start_time) * 1000
pre_step_cost = cost / self.dataset_size
self.info('total {} imgs cost {:.2f} ms, pre img cost {:.2f}'.format(self.dataset_size, cost, pre_step_cost))
self.info('==========end eval %s===============\n')
class GanImagePool():
"""
This class implements an image buffer that stores previously generated images.
This buffer enables us to update discriminators using a history of generated images
rather than the ones produced by the latest generators.
Args:
pool_size (int): The size of image buffer, if pool_size=0, no buffer will be created.
"""
def __init__(self, pool_size):
"""Initialize the ImagePool class."""
self.pool_size = pool_size
if self.pool_size > 0: # create an empty pool
self.num_imgs = 0
self.images = []
def query(self, images):
"""
Query an image from the pool.
By 50/100, the buffer will return input images.
By 50/100, the buffer will return images previously stored in the buffer,
and insert the current images to the buffer.
Args:
images (Tensor): The latest generated images from the generator
Returns:
Images tensor from the buffer.
"""
if isinstance(images, Tensor):
images = images.asnumpy()
if self.pool_size == 0: # if the buffer size is 0, do nothing
return Tensor(images)
return_images = []
for image in images:
# if the buffer is not full; keep inserting current images to the buffer
if self.num_imgs < self.pool_size:
self.num_imgs = self.num_imgs + 1
self.images.append(image)
return_images.append(image)
else:
p = random.uniform(0, 1)
# by 50% chance, the buffer will return a previously stored image
# and insert the current image into the buffer
if p > 0.5:
random_id = random.randint(0, self.pool_size - 1) # randint is inclusive
tmp = self.images[random_id].copy()
self.images[random_id] = image
return_images.append(tmp)
else: # by another 50% chance, the buffer will return the current image
return_images.append(image)
return_images = np.array(return_images) # collect all the images and return
if len(return_images.shape) != 4:
raise ValueError("img should be 4d, but get shape {}".format(return_images.shape))
return Tensor(return_images)
def gan_load_ckpt(G_A_ckpt=None, G_B_ckpt=None, D_A_ckpt=None, D_B_ckpt=None,
G_A=None, G_B=None, D_A=None, D_B=None):
"""
Load parameter from checkpoint files.
Args:
G_A_ckpt (Checkpoint): Load G_A checkpoint file.
G_B_ckpt (Checkpoint): Load G_B checkpoint file.
D_A_ckpt (Checkpoint): Load D_A checkpoint file.
D_B_ckpt (Checkpoint): Load D_B checkpoint file.
G_A (Generator): G_A Generator.
G_B (Generator): G_B Generator.
D_A (Discriminator): D_A Discriminator.
D_B (Discriminator): D_B Discriminator.
"""
if G_A_ckpt is not None:
param_GA = load_checkpoint(G_A_ckpt)
load_param_into_net(G_A, param_GA)
if G_B_ckpt is not None:
param_GB = load_checkpoint(G_B_ckpt)
load_param_into_net(G_B, param_GB)
if D_A is not None and D_A_ckpt is not None:
param_DA = load_checkpoint(D_A_ckpt)
load_param_into_net(D_A, param_DA)
if D_B is not None and D_B_ckpt is not None:
param_DB = load_checkpoint(D_B_ckpt)
load_param_into_net(D_B, param_DB)
| [
"logging.FileHandler",
"os.makedirs",
"random.uniform",
"random.randint",
"logging.StreamHandler",
"tinyms.Tensor",
"os.path.exists",
"datetime.datetime.now",
"time.time",
"logging.Formatter",
"numpy.array",
"mindspore.train.serialization.load_checkpoint",
"os.path.join",
"mindspore.train.... | [((1406, 1443), 'os.path.join', 'os.path.join', (['args.outputs_dir', '"""log"""'], {}), "(args.outputs_dir, 'log')\n", (1418, 1443), False, 'import os\n'), ((1468, 1506), 'os.path.join', 'os.path.join', (['args.outputs_dir', '"""imgs"""'], {}), "(args.outputs_dir, 'imgs')\n", (1480, 1506), False, 'import os\n'), ((1532, 1570), 'os.path.join', 'os.path.join', (['args.outputs_dir', '"""ckpt"""'], {}), "(args.outputs_dir, 'ckpt')\n", (1544, 1570), False, 'import os\n'), ((2043, 2066), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (2064, 2066), False, 'import logging\n'), ((2126, 2158), 'logging.Formatter', 'logging.Formatter', (['"""%(message)s"""'], {}), "('%(message)s')\n", (2143, 2158), False, 'import logging\n'), ((2381, 2417), 'os.path.join', 'os.path.join', (['self.log_dir', 'log_name'], {}), '(self.log_dir, log_name)\n', (2393, 2417), False, 'import os\n'), ((2431, 2463), 'logging.FileHandler', 'logging.FileHandler', (['self.log_fn'], {}), '(self.log_fn)\n', (2450, 2463), False, 'import logging\n'), ((3779, 3790), 'time.time', 'time.time', ([], {}), '()\n', (3788, 3790), False, 'import time\n'), ((3823, 3834), 'time.time', 'time.time', ([], {}), '()\n', (3832, 3834), False, 'import time\n'), ((7060, 7071), 'time.time', 'time.time', ([], {}), '()\n', (7069, 7071), False, 'import time\n'), ((7663, 7674), 'time.time', 'time.time', ([], {}), '()\n', (7672, 7674), False, 'import time\n'), ((10280, 10303), 'numpy.array', 'np.array', (['return_images'], {}), '(return_images)\n', (10288, 10303), True, 'import numpy as np\n'), ((10494, 10515), 'tinyms.Tensor', 'Tensor', (['return_images'], {}), '(return_images)\n', (10500, 10515), False, 'from tinyms import Tensor\n'), ((11176, 11201), 'mindspore.train.serialization.load_checkpoint', 'load_checkpoint', (['G_A_ckpt'], {}), '(G_A_ckpt)\n', (11191, 11201), False, 'from mindspore.train.serialization import load_checkpoint, load_param_into_net, save_checkpoint\n'), ((11210, 11244), 'mindspore.train.serialization.load_param_into_net', 'load_param_into_net', (['G_A', 'param_GA'], {}), '(G_A, param_GA)\n', (11229, 11244), False, 'from mindspore.train.serialization import load_checkpoint, load_param_into_net, save_checkpoint\n'), ((11293, 11318), 'mindspore.train.serialization.load_checkpoint', 'load_checkpoint', (['G_B_ckpt'], {}), '(G_B_ckpt)\n', (11308, 11318), False, 'from mindspore.train.serialization import load_checkpoint, load_param_into_net, save_checkpoint\n'), ((11327, 11361), 'mindspore.train.serialization.load_param_into_net', 'load_param_into_net', (['G_B', 'param_GB'], {}), '(G_B, param_GB)\n', (11346, 11361), False, 'from mindspore.train.serialization import load_checkpoint, load_param_into_net, save_checkpoint\n'), ((11430, 11455), 'mindspore.train.serialization.load_checkpoint', 'load_checkpoint', (['D_A_ckpt'], {}), '(D_A_ckpt)\n', (11445, 11455), False, 'from mindspore.train.serialization import load_checkpoint, load_param_into_net, save_checkpoint\n'), ((11464, 11498), 'mindspore.train.serialization.load_param_into_net', 'load_param_into_net', (['D_A', 'param_DA'], {}), '(D_A, param_DA)\n', (11483, 11498), False, 'from mindspore.train.serialization import load_checkpoint, load_param_into_net, save_checkpoint\n'), ((11567, 11592), 'mindspore.train.serialization.load_checkpoint', 'load_checkpoint', (['D_B_ckpt'], {}), '(D_B_ckpt)\n', (11582, 11592), False, 'from mindspore.train.serialization import load_checkpoint, load_param_into_net, save_checkpoint\n'), ((11601, 11635), 'mindspore.train.serialization.load_param_into_net', 'load_param_into_net', (['D_B', 'param_DB'], {}), '(D_B, param_DB)\n', (11620, 11635), False, 'from mindspore.train.serialization import load_checkpoint, load_param_into_net, save_checkpoint\n'), ((1586, 1614), 'os.path.exists', 'os.path.exists', (['self.log_dir'], {}), '(self.log_dir)\n', (1600, 1614), False, 'import os\n'), ((1628, 1668), 'os.makedirs', 'os.makedirs', (['self.log_dir'], {'exist_ok': '(True)'}), '(self.log_dir, exist_ok=True)\n', (1639, 1668), False, 'import os\n'), ((1684, 1713), 'os.path.exists', 'os.path.exists', (['self.imgs_dir'], {}), '(self.imgs_dir)\n', (1698, 1713), False, 'import os\n'), ((1727, 1768), 'os.makedirs', 'os.makedirs', (['self.imgs_dir'], {'exist_ok': '(True)'}), '(self.imgs_dir, exist_ok=True)\n', (1738, 1768), False, 'import os\n'), ((1784, 1814), 'os.path.exists', 'os.path.exists', (['self.ckpts_dir'], {}), '(self.ckpts_dir)\n', (1798, 1814), False, 'import os\n'), ((1828, 1870), 'os.makedirs', 'os.makedirs', (['self.ckpts_dir'], {'exist_ok': '(True)'}), '(self.ckpts_dir, exist_ok=True)\n', (1839, 1870), False, 'import os\n'), ((4989, 5000), 'time.time', 'time.time', ([], {}), '()\n', (4998, 5000), False, 'import time\n'), ((9296, 9310), 'tinyms.Tensor', 'Tensor', (['images'], {}), '(images)\n', (9302, 9310), False, 'from tinyms import Tensor\n'), ((5204, 5215), 'time.time', 'time.time', ([], {}), '()\n', (5213, 5215), False, 'import time\n'), ((5740, 5794), 'os.path.join', 'os.path.join', (['self.ckpts_dir', 'f"""G_A_{self.epoch}.ckpt"""'], {}), "(self.ckpts_dir, f'G_A_{self.epoch}.ckpt')\n", (5752, 5794), False, 'import os\n'), ((5845, 5899), 'os.path.join', 'os.path.join', (['self.ckpts_dir', 'f"""G_B_{self.epoch}.ckpt"""'], {}), "(self.ckpts_dir, f'G_B_{self.epoch}.ckpt')\n", (5857, 5899), False, 'import os\n'), ((5940, 5994), 'os.path.join', 'os.path.join', (['self.ckpts_dir', 'f"""D_A_{self.epoch}.ckpt"""'], {}), "(self.ckpts_dir, f'D_A_{self.epoch}.ckpt')\n", (5952, 5994), False, 'import os\n'), ((6035, 6089), 'os.path.join', 'os.path.join', (['self.ckpts_dir', 'f"""D_B_{self.epoch}.ckpt"""'], {}), "(self.ckpts_dir, f'D_B_{self.epoch}.ckpt')\n", (6047, 6089), False, 'import os\n'), ((6543, 6597), 'os.path.join', 'os.path.join', (['self.imgs_dir', 'f"""{self.epoch}_img_A.jpg"""'], {}), "(self.imgs_dir, f'{self.epoch}_img_A.jpg')\n", (6555, 6597), False, 'import os\n'), ((6629, 6683), 'os.path.join', 'os.path.join', (['self.imgs_dir', 'f"""{self.epoch}_img_B.jpg"""'], {}), "(self.imgs_dir, f'{self.epoch}_img_B.jpg')\n", (6641, 6683), False, 'import os\n'), ((6716, 6771), 'os.path.join', 'os.path.join', (['self.imgs_dir', 'f"""{self.epoch}_fake_A.jpg"""'], {}), "(self.imgs_dir, f'{self.epoch}_fake_A.jpg')\n", (6728, 6771), False, 'import os\n'), ((6804, 6859), 'os.path.join', 'os.path.join', (['self.imgs_dir', 'f"""{self.epoch}_fake_B.jpg"""'], {}), "(self.imgs_dir, f'{self.epoch}_fake_B.jpg')\n", (6816, 6859), False, 'import os\n'), ((7272, 7283), 'time.time', 'time.time', ([], {}), '()\n', (7281, 7283), False, 'import time\n'), ((7815, 7826), 'time.time', 'time.time', ([], {}), '()\n', (7824, 7826), False, 'import time\n'), ((9673, 9693), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (9687, 9693), False, 'import random\n'), ((2274, 2288), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2286, 2288), False, 'from datetime import datetime\n'), ((9899, 9936), 'random.randint', 'random.randint', (['(0)', '(self.pool_size - 1)'], {}), '(0, self.pool_size - 1)\n', (9913, 9936), False, 'import random\n'), ((4455, 4466), 'time.time', 'time.time', ([], {}), '()\n', (4464, 4466), False, 'import time\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 23 17:34:29 2020
@author: <NAME>
"""
import numpy as np
from scipy.stats import beta
import matplotlib.pyplot as plt
import Organization as og
fig, ax = plt.subplots(1, 1,dpi=300)
mu = np.arange(0.1,1,0.2)
phi = 15
n = 1000
a = np.zeros(len(mu))
b = np.zeros(len(mu))
x = np.linspace(0.001,0.999,n)
ls = ['-','--','-.',':','-']
lw = [1,1,1,1,2]
for ii in np.arange(len(mu)):
a, b = og.beta(mu[ii],phi)
label = '%.1f' % mu[ii]
ax.plot(x,beta.pdf(x,a,b),label=label,ls=ls[ii],lw=lw[ii])
ax.legend(loc='best', frameon=False)
ax.set_xlabel('Dependent Variable (q)')
ax.set_ylabel('Probability Density')
#ax.set_title('Beta Distributions w/ Different Mean Values')
plt.show() | [
"matplotlib.pyplot.show",
"numpy.arange",
"numpy.linspace",
"Organization.beta",
"matplotlib.pyplot.subplots",
"scipy.stats.beta.pdf"
] | [((204, 231), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'dpi': '(300)'}), '(1, 1, dpi=300)\n', (216, 231), True, 'import matplotlib.pyplot as plt\n'), ((236, 258), 'numpy.arange', 'np.arange', (['(0.1)', '(1)', '(0.2)'], {}), '(0.1, 1, 0.2)\n', (245, 258), True, 'import numpy as np\n'), ((324, 352), 'numpy.linspace', 'np.linspace', (['(0.001)', '(0.999)', 'n'], {}), '(0.001, 0.999, n)\n', (335, 352), True, 'import numpy as np\n'), ((726, 736), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (734, 736), True, 'import matplotlib.pyplot as plt\n'), ((439, 459), 'Organization.beta', 'og.beta', (['mu[ii]', 'phi'], {}), '(mu[ii], phi)\n', (446, 459), True, 'import Organization as og\n'), ((501, 518), 'scipy.stats.beta.pdf', 'beta.pdf', (['x', 'a', 'b'], {}), '(x, a, b)\n', (509, 518), False, 'from scipy.stats import beta\n')] |
import hes_inference
import numpy as np
protein_at_observations = np.zeros((50,2))
protein_at_observations[:,0] = np.arange(0,15*50,15)
protein_at_observations[:,1] = np.arange(0,500*50,500)
model_parameters = np.array([10000,4,np.log(2)/30,np.log(2)/90,1,1,15])
measurement_variance = 100**2
hes_inference.kalman_filter(protein_at_observations,model_parameters,measurement_variance)
| [
"numpy.log",
"numpy.zeros",
"numpy.arange",
"hes_inference.kalman_filter"
] | [((67, 84), 'numpy.zeros', 'np.zeros', (['(50, 2)'], {}), '((50, 2))\n', (75, 84), True, 'import numpy as np\n'), ((115, 140), 'numpy.arange', 'np.arange', (['(0)', '(15 * 50)', '(15)'], {}), '(0, 15 * 50, 15)\n', (124, 140), True, 'import numpy as np\n'), ((168, 195), 'numpy.arange', 'np.arange', (['(0)', '(500 * 50)', '(500)'], {}), '(0, 500 * 50, 500)\n', (177, 195), True, 'import numpy as np\n'), ((296, 392), 'hes_inference.kalman_filter', 'hes_inference.kalman_filter', (['protein_at_observations', 'model_parameters', 'measurement_variance'], {}), '(protein_at_observations, model_parameters,\n measurement_variance)\n', (323, 392), False, 'import hes_inference\n'), ((230, 239), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (236, 239), True, 'import numpy as np\n'), ((243, 252), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (249, 252), True, 'import numpy as np\n')] |
import numpy as np
from gate import *
from layer import *
from output import *
class Model:
def __init__(self, layers_dim):
self.b = []
self.W = []
for i in range(len(layers_dim)-1):
self.W.append(np.random.randn(layers_dim[i], layers_dim[i+1]) / np.sqrt(layers_dim[i]))
self.b.append(np.random.randn(layers_dim[i+1]).reshape(1, layers_dim[i+1]))
def calculate_loss(self, X, y):
mulGate = MultiplyGate()
addGate = AddGate()
layer = Tanh()
softmaxOutput = Softmax()
input = X
for i in range(len(self.W)):
mul = mulGate.forward(self.W[i], input)
add = addGate.forward(mul, self.b[i])
input = layer.forward(add)
return softmaxOutput.loss(input, y)
def predict(self, X):
mulGate = MultiplyGate()
addGate = AddGate()
layer = Tanh()
softmaxOutput = Softmax()
input = X
for i in range(len(self.W)):
mul = mulGate.forward(self.W[i], input)
add = addGate.forward(mul, self.b[i])
input = layer.forward(add)
probs = softmaxOutput.predict(input)
return np.argmax(probs, axis=1)
def train(self, X, y, num_passes=20000, epsilon=0.01, reg_lambda=0.01, print_loss=False):
mulGate = MultiplyGate()
addGate = AddGate()
layer = Tanh()
softmaxOutput = Softmax()
for epoch in range(num_passes):
# Forward propagation
input = X
forward = [(None, None, input)]
for i in range(len(self.W)):
mul = mulGate.forward(self.W[i], input)
add = addGate.forward(mul, self.b[i])
input = layer.forward(add)
forward.append((mul, add, input))
# Back propagation
dtanh = softmaxOutput.diff(forward[len(forward) - 1][2], y)
for i in range(len(forward) - 1, 0, -1):
dadd = layer.backward(forward[i][1], dtanh)
db, dmul = addGate.backward(forward[i][0], self.b[i - 1], dadd)
dW, dtanh = mulGate.backward(self.W[i - 1], forward[i - 1][2], dmul)
# Add regularization terms (b1 and b2 don't have regularization terms)
dW += reg_lambda * self.W[i - 1]
# Gradient descent parameter update
self.b[i - 1] += -epsilon * db
self.W[i - 1] += -epsilon * dW
if print_loss and epoch % 1000 == 0:
print("Loss after iteration %i: %f" % (epoch, self.calculate_loss(X, y)))
| [
"numpy.random.randn",
"numpy.sqrt",
"numpy.argmax"
] | [((1201, 1225), 'numpy.argmax', 'np.argmax', (['probs'], {'axis': '(1)'}), '(probs, axis=1)\n', (1210, 1225), True, 'import numpy as np\n'), ((239, 288), 'numpy.random.randn', 'np.random.randn', (['layers_dim[i]', 'layers_dim[i + 1]'], {}), '(layers_dim[i], layers_dim[i + 1])\n', (254, 288), True, 'import numpy as np\n'), ((289, 311), 'numpy.sqrt', 'np.sqrt', (['layers_dim[i]'], {}), '(layers_dim[i])\n', (296, 311), True, 'import numpy as np\n'), ((339, 373), 'numpy.random.randn', 'np.random.randn', (['layers_dim[i + 1]'], {}), '(layers_dim[i + 1])\n', (354, 373), True, 'import numpy as np\n')] |
import cv2
import numpy as np
from Functions import imgproc_func as fnc
img = np.zeros((300,512,3), np.uint8)
fnc.define_trackbar('R', 'colors', (0, 255))
fnc.define_trackbar('G', 'colors', (0, 255))
fnc.define_trackbar('B', 'colors', (0, 255))
while (1):
cv2.imshow('image', img)
k = cv2.waitKey(1)
if k == ord('q'):
break
# get current positions of four trackbars
r = fnc.retrieve_trackbar('R','colors')
g = fnc.retrieve_trackbar('G', 'colors')
b = fnc.retrieve_trackbar('B', 'colors')
img[:] = [b, g, r] | [
"cv2.waitKey",
"Functions.imgproc_func.retrieve_trackbar",
"numpy.zeros",
"Functions.imgproc_func.define_trackbar",
"cv2.imshow"
] | [((80, 113), 'numpy.zeros', 'np.zeros', (['(300, 512, 3)', 'np.uint8'], {}), '((300, 512, 3), np.uint8)\n', (88, 113), True, 'import numpy as np\n'), ((113, 157), 'Functions.imgproc_func.define_trackbar', 'fnc.define_trackbar', (['"""R"""', '"""colors"""', '(0, 255)'], {}), "('R', 'colors', (0, 255))\n", (132, 157), True, 'from Functions import imgproc_func as fnc\n'), ((158, 202), 'Functions.imgproc_func.define_trackbar', 'fnc.define_trackbar', (['"""G"""', '"""colors"""', '(0, 255)'], {}), "('G', 'colors', (0, 255))\n", (177, 202), True, 'from Functions import imgproc_func as fnc\n'), ((203, 247), 'Functions.imgproc_func.define_trackbar', 'fnc.define_trackbar', (['"""B"""', '"""colors"""', '(0, 255)'], {}), "('B', 'colors', (0, 255))\n", (222, 247), True, 'from Functions import imgproc_func as fnc\n'), ((264, 288), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'img'], {}), "('image', img)\n", (274, 288), False, 'import cv2\n'), ((297, 311), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (308, 311), False, 'import cv2\n'), ((406, 442), 'Functions.imgproc_func.retrieve_trackbar', 'fnc.retrieve_trackbar', (['"""R"""', '"""colors"""'], {}), "('R', 'colors')\n", (427, 442), True, 'from Functions import imgproc_func as fnc\n'), ((450, 486), 'Functions.imgproc_func.retrieve_trackbar', 'fnc.retrieve_trackbar', (['"""G"""', '"""colors"""'], {}), "('G', 'colors')\n", (471, 486), True, 'from Functions import imgproc_func as fnc\n'), ((495, 531), 'Functions.imgproc_func.retrieve_trackbar', 'fnc.retrieve_trackbar', (['"""B"""', '"""colors"""'], {}), "('B', 'colors')\n", (516, 531), True, 'from Functions import imgproc_func as fnc\n')] |
# -*- coding:utf-8 -*-
import numpy as np
import random
class SVM:
def __init__(self, kernel='rbf', C=10.0, gamma=0.5, p=2):
'''
kernel: rbf 高斯核函数 linear 线性核函数 polynomial 多项式
C: penalty term 惩罚因子
gammar: hyperparameter of rbf/polynomial kernel 用于 rbf/polynomial 的超参数
p: hyperparameter of polynomial kernel 用于 polynomial 的超参数
'''
if C <= 0.0:
C = 1.0
self.b = 0
self.alpha = list()
self.kernel_type = kernel
self.C = C
self.epsilon = 1e-3 # SVM 目标函数下降的范围
self.tol = 1e-3 # KKT 误差
self.gamma = gamma
self.p = p
self.error_list = list()
def fit(self, x, y):
'''
x: count by dimension matrix
y: 1 by count vector/list
'''
self.__prepare(x, y)
self.__train()
def predict(self, x):
'''
sign of decision function
'''
return np.sign(self.__decision(x, self.alpha))
def __prepare(self, x, y):
'''
x: count by dimension matrix
y: 1 by count vector/list
'''
self.train_x = np.asmatrix(x)
self.train_y = np.asarray(y)
self.count, self.dimension = np.shape(self.train_x)
self.w = np.zeros((1, self.dimension))
self.alpha = np.zeros(self.count)
self.error_list = np.zeros(self.count)
def __calculate_w(self):
self.w = np.sum(self.alpha[i] * self.train_y[i] * self.train_x[i] for i in range(len(self.train_y)))
def __train(self):
num_changed = 0
examine_all = True
while num_changed > 0 or examine_all:
num_changed = 0
if examine_all:
for i in range(self.count):
if self.__examine_example(i):
num_changed += 1
else:
c = np.arange(self.count)
np.random.shuffle(c)
for i in c:
if self.alpha[i] != 0 and self.alpha[i] != self.C and self.__examine_example(i):
num_changed += 1
if examine_all:
examine_all = False
elif num_changed == 0:
examine_all = True
def __take_step(self, i1, i2):
if i1 == i2:
return False
a1 = self.alpha[i1]
a2 = self.alpha[i2]
x1 = self.train_x[i1]
x2 = self.train_x[i2]
y1 = self.train_y[i1]
y2 = self.train_y[i2]
e1 = self.__get_error(i1)
e2 = self.__get_error(i2)
s = y1 * y2
if y1 != y2:
l = max(0, a2 - a1)
h = min(self.C, self.C + a2 - a1)
else:
l = max(0, a2 + a1 - self.C)
h = min(self.C, a1 + a2)
if l == h:
return False
k11 = self.__kernel(x1, x1)
k22 = self.__kernel(x2, x2)
k12 = self.__kernel(x1, x2)
eta = k11 + k22 - 2 * k12
if eta > 0:
new_a2 = a2 + y2 * (e1 - e2) / eta
if new_a2 < l:
new_a2 = l
elif new_a2 > h:
new_a2 = h
else:
f1 = y1 * (e1 + self.b) - a1 * k11 - s * a2 * k12
f2 = y2 * (e2 + self.b) - s * a1 * k12 - a2 * k22
l1 = a1 + s * (a2 - l)
h1 = a1 + s * (a2 - h)
l_obj_value = l1*f1 + l*f2 + 0.5 * (l1**2)*k11 + 0.5*(l**2)*k22 + s*l*l1*k12
h_obj_value = h1*f1 + h*f2 + 0.5 * (h1**2)*k11 + 0.5*(h**2)*k22 + s*h*h1*k12
if l_obj_value < h_obj_value - self.epsilon:
new_a2 = l
elif h_obj_value < l_obj_value - self.epsilon:
new_a2 = h
else:
new_a2 = a2
if new_a2 < 1e-8:
new_a2 = 0
elif new_a2 > self.C - 1e-8:
new_a2 = self.C
if abs(new_a2 - a2) < self.epsilon * (new_a2 + a2 + self.epsilon):
return False
new_a1 = a1 + s * (a2 - new_a2)
b1 = e1 + y1 * (new_a1-a1) * k11 + y2 * (new_a2-a2) * k12 + self.b
b2 = e2 + y1 * (new_a1-a1) * k12 + y2 * (new_a2-a2) * k22 + self.b
if 0 < new_a1 and new_a1 < self.C:
new_b = b1
elif 0 < new_a2 and new_a2 < self.C:
new_b = b2
else:
new_b = (b1 + b2) / 2.0
db = new_b - self.b
self.b = new_b
if self.kernel_type == 'linear':
self.w += y1 * float(new_a1 - a1) * x1 + y2 * float(new_a2 - a2) * x2
d1 = y1*(new_a1 - a1)
d2 = y2*(new_a2 - a2)
for i in range(self.count):
if 0 < self.alpha[i] < self.C:
xi = self.train_x[i]
self.error_list[i] += d1 * self.__kernel(x1, xi) + d2 * self.__kernel(x2, xi) - db
self.error_list[i1] = 0
self.error_list[i2] = 0
self.alpha[i1] = float(new_a1)
self.alpha[i2] = float(new_a2)
return True
def __examine_example(self, i2):
y2 = self.train_y[i2]
a2 = self.alpha[i2]
e2 = self.__get_error(i2)
r2 = e2 * y2
if (r2 < -self.tol and a2 < self.C) or (r2 > self.tol and a2 > 0):
if len(np.where((self.alpha!=0)&(self.alpha!=self.C))) > 1:
if e2 > 0:
i1 = np.argmin(self.error_list)
elif e2 < 0:
i1 = np.argmax(self.error_list)
else:
a = np.argmin(self.error_list)
b = np.argmax(self.error_list)
if abs(self.error_list[a]) > abs(self.error_list[b]):
i1 = a
else:
i1 = b
if self.__take_step(i1, i2):
return True
c = np.arange(self.count)
np.random.shuffle(c)
for i in c:
if self.alpha[i] != 0 and self.alpha[i] != self.C and self.__take_step(i, i2):
return True
np.random.shuffle(c)
for i in c:
if self.__take_step(i, i2):
return True
return False
def get_model(self):
self.__calculate_w()
return self.w, self.b
def __decision(self, x, alpha):
'''
g(x_i) = sum_1^n alpha_i * y_i * K(x_i, x) - b
'''
if self.kernel_type == 'linear':
return np.dot(self.w, x.T) - self.b
t = 0
for i in range(self.count):
t = t + alpha[i] * self.train_y[i] * self.__kernel(x, self.train_x[i])
return t - self.b
def __get_error(self, i):
if 0 < self.alpha[i] < self.C:
return self.error_list[i]
else:
return self.__decision(self.train_x[i], self.alpha) - self.train_y[i]
def __observe_function(self, alpha):
'''
SVM 的目标函数
0.5 * (sum_i^nsum_1^jalpha_i * alpha_j * y_i * y_j * K(x_i, x_j)) - sum_1_n * alpha_i
'''
y = np.asmatrix(self.train_y)
x = np.asmatrix(self.train_x)
a = np.asmatrix(alpha)
return 0.5 * np.sum(np.multiply(np.multiply(a.T*a, y.T*y), self.__kernel(x, x))) - np.sum(a)
def __kernel(self, x1, x2):
'''
kernel trick
rbf
linear
polynomial
'''
if self.kernel_type == 'rbf':
return self.__rbf(x1, x2)
elif self.kernel_type == 'polynomial':
return self.__polynomial(x1, x2)
elif self.kernel_type == 'linear':
return self.__linear(x1, x2)
def __rbf(self, x1, x2):
'''
rbf kernel:
K(x1, x2) = exp[-gamma||x1-x2||^2]
'''
x1 = np.asmatrix(x1)
x2 = np.asmatrix(x2)
r, _ = np.shape(x1)
r2, _ = np.shape(x2)
if r == r2 == 1:
return np.exp(-self.gamma*np.linalg.norm(x1-x2)**2)
else:
l = list()
for i in r:
tx1 = x1[i]
tmp = tx1 - x2
l2 = list()
for j in r:
t = tmp[j]
l2.append(np.exp(-self.gamma*np.linalg.norm(t)**2))
l.append(l2)
return np.asmatrix(l)
def __polynomial(self, x1, x2):
'''
polynomial kernel:
K(x1, x2) = (gammar * x1 * x2 + 1)^p
'''
x1 = np.asmatrix(x1)
x2 = np.asmatrix(x2)
r, _ = np.shape(x1)
r2, _ = np.shape(x2)
if r == r2 == 1:
return (self.gamma * np.dot(x1, x2.T) + 1) ** self.p
else:
l = list()
for i in r:
tx1 = x1[i]
l2 = list()
for j in r:
tx2 = x2[j]
l2.append((self.gamma * np.dot(tx1, tx2.T) + 1) ** self.p)
l.append(l2)
return np.asmatrix(l)
def __linear(self, x1, x2):
return np.dot(x1, x2.T) + self.b
| [
"numpy.sum",
"numpy.multiply",
"numpy.argmax",
"numpy.asarray",
"numpy.zeros",
"numpy.argmin",
"numpy.shape",
"numpy.where",
"numpy.asmatrix",
"numpy.arange",
"numpy.linalg.norm",
"numpy.dot",
"numpy.random.shuffle"
] | [((1151, 1165), 'numpy.asmatrix', 'np.asmatrix', (['x'], {}), '(x)\n', (1162, 1165), True, 'import numpy as np\n'), ((1189, 1202), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (1199, 1202), True, 'import numpy as np\n'), ((1240, 1262), 'numpy.shape', 'np.shape', (['self.train_x'], {}), '(self.train_x)\n', (1248, 1262), True, 'import numpy as np\n'), ((1280, 1309), 'numpy.zeros', 'np.zeros', (['(1, self.dimension)'], {}), '((1, self.dimension))\n', (1288, 1309), True, 'import numpy as np\n'), ((1331, 1351), 'numpy.zeros', 'np.zeros', (['self.count'], {}), '(self.count)\n', (1339, 1351), True, 'import numpy as np\n'), ((1378, 1398), 'numpy.zeros', 'np.zeros', (['self.count'], {}), '(self.count)\n', (1386, 1398), True, 'import numpy as np\n'), ((7032, 7057), 'numpy.asmatrix', 'np.asmatrix', (['self.train_y'], {}), '(self.train_y)\n', (7043, 7057), True, 'import numpy as np\n'), ((7070, 7095), 'numpy.asmatrix', 'np.asmatrix', (['self.train_x'], {}), '(self.train_x)\n', (7081, 7095), True, 'import numpy as np\n'), ((7108, 7126), 'numpy.asmatrix', 'np.asmatrix', (['alpha'], {}), '(alpha)\n', (7119, 7126), True, 'import numpy as np\n'), ((7734, 7749), 'numpy.asmatrix', 'np.asmatrix', (['x1'], {}), '(x1)\n', (7745, 7749), True, 'import numpy as np\n'), ((7763, 7778), 'numpy.asmatrix', 'np.asmatrix', (['x2'], {}), '(x2)\n', (7774, 7778), True, 'import numpy as np\n'), ((7794, 7806), 'numpy.shape', 'np.shape', (['x1'], {}), '(x1)\n', (7802, 7806), True, 'import numpy as np\n'), ((7823, 7835), 'numpy.shape', 'np.shape', (['x2'], {}), '(x2)\n', (7831, 7835), True, 'import numpy as np\n'), ((8413, 8428), 'numpy.asmatrix', 'np.asmatrix', (['x1'], {}), '(x1)\n', (8424, 8428), True, 'import numpy as np\n'), ((8442, 8457), 'numpy.asmatrix', 'np.asmatrix', (['x2'], {}), '(x2)\n', (8453, 8457), True, 'import numpy as np\n'), ((8473, 8485), 'numpy.shape', 'np.shape', (['x1'], {}), '(x1)\n', (8481, 8485), True, 'import numpy as np\n'), ((8502, 8514), 'numpy.shape', 'np.shape', (['x2'], {}), '(x2)\n', (8510, 8514), True, 'import numpy as np\n'), ((5821, 5842), 'numpy.arange', 'np.arange', (['self.count'], {}), '(self.count)\n', (5830, 5842), True, 'import numpy as np\n'), ((5855, 5875), 'numpy.random.shuffle', 'np.random.shuffle', (['c'], {}), '(c)\n', (5872, 5875), True, 'import numpy as np\n'), ((6039, 6059), 'numpy.random.shuffle', 'np.random.shuffle', (['c'], {}), '(c)\n', (6056, 6059), True, 'import numpy as np\n'), ((7218, 7227), 'numpy.sum', 'np.sum', (['a'], {}), '(a)\n', (7224, 7227), True, 'import numpy as np\n'), ((8252, 8266), 'numpy.asmatrix', 'np.asmatrix', (['l'], {}), '(l)\n', (8263, 8266), True, 'import numpy as np\n'), ((8909, 8923), 'numpy.asmatrix', 'np.asmatrix', (['l'], {}), '(l)\n', (8920, 8923), True, 'import numpy as np\n'), ((8972, 8988), 'numpy.dot', 'np.dot', (['x1', 'x2.T'], {}), '(x1, x2.T)\n', (8978, 8988), True, 'import numpy as np\n'), ((1888, 1909), 'numpy.arange', 'np.arange', (['self.count'], {}), '(self.count)\n', (1897, 1909), True, 'import numpy as np\n'), ((1926, 1946), 'numpy.random.shuffle', 'np.random.shuffle', (['c'], {}), '(c)\n', (1943, 1946), True, 'import numpy as np\n'), ((6446, 6465), 'numpy.dot', 'np.dot', (['self.w', 'x.T'], {}), '(self.w, x.T)\n', (6452, 6465), True, 'import numpy as np\n'), ((5229, 5281), 'numpy.where', 'np.where', (['((self.alpha != 0) & (self.alpha != self.C))'], {}), '((self.alpha != 0) & (self.alpha != self.C))\n', (5237, 5281), True, 'import numpy as np\n'), ((5334, 5360), 'numpy.argmin', 'np.argmin', (['self.error_list'], {}), '(self.error_list)\n', (5343, 5360), True, 'import numpy as np\n'), ((5415, 5441), 'numpy.argmax', 'np.argmax', (['self.error_list'], {}), '(self.error_list)\n', (5424, 5441), True, 'import numpy as np\n'), ((5488, 5514), 'numpy.argmin', 'np.argmin', (['self.error_list'], {}), '(self.error_list)\n', (5497, 5514), True, 'import numpy as np\n'), ((5539, 5565), 'numpy.argmax', 'np.argmax', (['self.error_list'], {}), '(self.error_list)\n', (5548, 5565), True, 'import numpy as np\n'), ((7167, 7196), 'numpy.multiply', 'np.multiply', (['(a.T * a)', '(y.T * y)'], {}), '(a.T * a, y.T * y)\n', (7178, 7196), True, 'import numpy as np\n'), ((7899, 7922), 'numpy.linalg.norm', 'np.linalg.norm', (['(x1 - x2)'], {}), '(x1 - x2)\n', (7913, 7922), True, 'import numpy as np\n'), ((8573, 8589), 'numpy.dot', 'np.dot', (['x1', 'x2.T'], {}), '(x1, x2.T)\n', (8579, 8589), True, 'import numpy as np\n'), ((8181, 8198), 'numpy.linalg.norm', 'np.linalg.norm', (['t'], {}), '(t)\n', (8195, 8198), True, 'import numpy as np\n'), ((8826, 8844), 'numpy.dot', 'np.dot', (['tx1', 'tx2.T'], {}), '(tx1, tx2.T)\n', (8832, 8844), True, 'import numpy as np\n')] |
import numpy as np
import tensorflow as tf
import FilterDetections as fd
# dimension of this array is
# (batch size, num boxes, 4)
boxes = np.array(
# boxes
[
# 4 coords
[0, 0, 1, 1],
[0.5, 0.5, 0.6, 0.6],
[0.1, 0.1, 0.6, 0.6],
], "float32"
)
scores = np.array([.6, .2, .1], "float32")
labels = np.array([1, 2, 1], "int64")
def test_filter_by_score_and_nms():
with tf.compat.v1.Session().as_default():
detections = fd.filter_by_score_and_nms(
scores, labels, .12, boxes, 3, .5
)
np.testing.assert_array_almost_equal(
detections.eval(), np.array([[0, 1], [1, 2]]))
| [
"FilterDetections.filter_by_score_and_nms",
"numpy.array",
"tensorflow.compat.v1.Session"
] | [((141, 220), 'numpy.array', 'np.array', (['[[0, 0, 1, 1], [0.5, 0.5, 0.6, 0.6], [0.1, 0.1, 0.6, 0.6]]', '"""float32"""'], {}), "([[0, 0, 1, 1], [0.5, 0.5, 0.6, 0.6], [0.1, 0.1, 0.6, 0.6]], 'float32')\n", (149, 220), True, 'import numpy as np\n'), ((318, 354), 'numpy.array', 'np.array', (['[0.6, 0.2, 0.1]', '"""float32"""'], {}), "([0.6, 0.2, 0.1], 'float32')\n", (326, 354), True, 'import numpy as np\n'), ((361, 389), 'numpy.array', 'np.array', (['[1, 2, 1]', '"""int64"""'], {}), "([1, 2, 1], 'int64')\n", (369, 389), True, 'import numpy as np\n'), ((495, 558), 'FilterDetections.filter_by_score_and_nms', 'fd.filter_by_score_and_nms', (['scores', 'labels', '(0.12)', 'boxes', '(3)', '(0.5)'], {}), '(scores, labels, 0.12, boxes, 3, 0.5)\n', (521, 558), True, 'import FilterDetections as fd\n'), ((657, 683), 'numpy.array', 'np.array', (['[[0, 1], [1, 2]]'], {}), '([[0, 1], [1, 2]])\n', (665, 683), True, 'import numpy as np\n'), ((437, 459), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (457, 459), True, 'import tensorflow as tf\n')] |
# Collection of scripts developed to implent the
# Dressler-Schectamn Test (Dressler & Shectman, 1988)
# in a cluster of galaxies
# Section dedicated to import python modules
import numpy as np
from astropy.stats import biweight_scale
# Section dedicated to importing the modules from CALSAGOS
from . import utils
from . import cluster_kinematics
__author__ = '<NAME> & <NAME>'
__email__ = '<EMAIL> - <EMAIL>'
VERSION = '0.1'
#####################################################################################################################################################################################
#####################################################################################################################################################################################
def calc_delta_DS(RA, DEC, peculiar_velocity, redshift_member, escape_velocity, cluster_starting_redshift):
""" calsagos.dressler_schectman_test.calc_delta_DS(RA, DEC, peculiar_velocity, redshift_member, escape_velocity, cluster_starting_redshift)
Function that computes the delta_i value
defined by the Dressler-Schectman Test
(see Dressler & Shectman, 1988)
This function was developed by <NAME>
and <NAME> (04/07/2016)
:param RA: Right Ascension of the galaxies
in degree units
:param DEC: Declination of the galaxies
in degree units
:param peculiar_velocity: peculiar velocity
of the galaxies in km s-1 units
:param redshift_members: redshift of the
galaxies in the cluster
:param escape_velocity: escape velocity of
the cluster
:param cluster_starting_redshift: preliminar
estimation of the redshift of the cluster
:type RA: array
:type DEC: array
:type peculiar_velocity: array
:type redshift_member: array
:type escape_velocity: int, float
:type cluster_starting_redshift: int, float
:returns: delta_i for each galaxy, critical_value
threshold_value
:rtype: numpy array
.. note::
The DS-Test verifies the existence of regions
kinematically distinct from the main galaxy
cluster
The larger the delta_i value, the greater the
probability that the galaxy belongs to a
substructure.
The output of this module is:
calc_delta_DS(RA, DEC, peculiar_velocity, redshift_member, escape_velocity, cluster_starting_redshift)[0] corresponds to the
delta_i value for each galaxy by Dressler & Shectman, (1988)
calc_delta_DS(RA, DEC, peculiar_velocity, redshift_member, escape_velocity, cluster_starting_redshift)[1] corresponds to the
critical value definen by Dressler & Shectman, (1988)
calc_delta_DS(RA, DEC, peculiar_velocity, redshift_member, escape_velocity, cluster_starting_redshift)[2] corresponds to the
threshold_value by Dressler & Shectman, (1988)
"""
print("starting estimation of delta_i from DS-test")
#-- estimating the velocity dispersion of the cluster
sigma_estimate = cluster_kinematics.calc_cluster_velocity_dispersion(redshift_member, escape_velocity, cluster_starting_redshift)
sigma_cluster = sigma_estimate[1]
#-- estimating the mean velocity of the cluster
mean_cluster_velocity = utils.calc_mean_and_standard_error(peculiar_velocity)
cluster_velocity = mean_cluster_velocity[0]
#-- defining output quantities
dim = redshift_member.size # number of elements in the input arrays
d = np.zeros(dim) # distance between galaxies
sigma_group = np.zeros(dim) # velocity dispersion of the groups
delta = np.zeros(dim) # delta_i value to each galaxy
for ii in range(dim):
for jj in range(dim):
#-- estimating the distance between galaxies in the sample
d[jj] = np.sqrt((RA[ii] - RA[jj])**2. + (DEC[ii] - DEC[jj])**2.)
#-- sorting the distance between galaxies with the aim to select the nearest neighbors
sorted_indices = np.argsort(d)
#-- synchronise the peculiar velocity array with the sorted distance
# (<NAME> 05/07/2016)
v_of_d_sorted = peculiar_velocity[sorted_indices]
#-- select the velocity of the ten nearest neighbours in the synchronised peculiar velocity array
# (<NAME> 05/07/2016)
v_nearest_ten = v_of_d_sorted[0:10]
#-- calc of the mean velocity of the 10-nearest neighbors
local_velocity_nearest_ten = utils.calc_mean_and_standard_error(v_nearest_ten)
local_velocity = local_velocity_nearest_ten[0]
#-- calc of the velocity dispersion of the 10-nearest neighbors
sigma_local = np.std(v_nearest_ten)
sigma_group[ii] = np.std(v_nearest_ten)
#-- function that estimates the delta from Dressler & Shectman (1988)
delta[ii] = np.sqrt((11./(sigma_cluster**2.))*((local_velocity - cluster_velocity)**2. + (sigma_local - sigma_cluster)**2.))
print("ending estimation of delta_i from DS-test")
# -- END OF LOOP --
delta_obs = delta
#-- calculating critical value and threshold value
critical_value = np.sum(delta)
threshold_value = critical_value/dim
#-- building matrix with output quantities
delta_DS_array = np.array([delta_obs, critical_value, threshold_value], dtype=object)
#-- returning output quantity
return delta_DS_array
#####################################################################################################################################################################################
#####################################################################################################################################################################################
def calc_delta_shuffle(RA, DEC, peculiar_velocity, redshift_member, escape_velocity, cluster_starting_redshift, n_bootstrap):
""" calsagos.dressler_schectman_test.calc_delta_DS(RA, DEC, peculiar_velocity, redshift_member, escape_velocity, cluster_starting_redshift)
Function that randomly shuffles the
observed velocities and reassigns
these values to the member position
This function was developed by <NAME> (31/08/2016)
:param RA: Right Ascension of the galaxies
in degree units
:param DEC: Declination of the galaxies
in degree units
:param peculiar_velocity: peculiar velocity
of the galaxies in km s-1 units
:param redshift_members: redshift of the
galaxies in the cluster
:param escape_velocity: escape velocity of
the cluster
:param cluster_starting_redshift: preliminar
estimarion of the redshift of the cluster
:param n_bootstrap: number of of bootstrap
iterations
:type RA: array
:type DEC: array
:type peculiar_velocity: array
:type redshift_member: array
:type escape_velocity: int, float
:type cluster_starting_redshift: int, float
:type n_bootstrap: int
:returns: simulated critical_value
and threshold_value
:rtype: numpy array
.. note::
calc_delta_shuffle(RA, DEC, peculiar_velocity, redshift_member, escape_velocity, cluster_starting_redshift, n_bootstrap)[0] corresponds to the
simulated critical value definen by Dressler & Shectman, (1988)
calc_delta_shuffle(RA, DEC, peculiar_velocity, redshift_member, escape_velocity, cluster_starting_redshift, n_bootstrap)[1] corresponds to the
simulated threshold_value by Dressler & Shectman, (1988)
"""
#-- number of elements in the input arrays
dim = redshift_member.size
#-- defining arrays with output quantities and quantities useful for calculations
critical_value_sim = np.zeros(n_bootstrap)
threshold_value_sim = np.zeros(n_bootstrap)
#-- running bootstrap simulations
for ii in range(n_bootstrap):
# -- select random indices within redshift array
R = np.random.randint(0, dim, size=dim)
redshift_member_sim = redshift_member[R]
peculiar_velocity_sim = peculiar_velocity[R]
#-- delta_DS_array_bootstrap = calc_delta_DS(RA, DEC, peculiar_velocity_sim, redshift_member_sim)
delta_DS_array_bootstrap = calc_delta_DS(RA, DEC, peculiar_velocity_sim, redshift_member_sim, escape_velocity, cluster_starting_redshift)
critical_value_sim[ii] = delta_DS_array_bootstrap[1]
threshold_value_sim[ii] = delta_DS_array_bootstrap[2]
#-- building matrix with output quantities
delta_shuffle_matrix = np.array([critical_value_sim, threshold_value_sim])
#-- returning output quantity
return delta_shuffle_matrix
#####################################################################################################################################################################################
#####################################################################################################################################################################################
# FUNCTION THAT ESTIMATES THE P-VALUE BY COMPARING THE VALUE OF OBSERVED DELTA_DS TO VALUE OF SHUFFLED DELTA_DS
def probability_value(cumulative_delta, cumulative_delta_shuffle, n_bootstrap):
""" calsagos.dressler_schectman_test.probability_value(cumulative_delta, cumulative_delta_shuffle, n_bootstrap)
Function that estimates the P-value
by comparing the value of observed
delta_i to value of shuffled delta_i
This function was developed by <NAME> (31/08/2016)
:param cumulative_delta: sum of the all
observed delta_i in the cluster
:param cumulative_delta_shuffle: sum of
the all shuffled delta_i in the cluster
:param n_bootstrap: number of bootstrap
iterations
:type cumulative_delta: float
:type cumulative_delta_shuffle: float
:type n: int
:returns: sum of delta_i and delta probability
:rtype: numpy array
.. note::
probability_value(cumulative_delta, cumulative_delta_shuffle, n_bootstrap)[0] corresponds to the
sum of the delta_i used to estimate the probability of a cluster host substructures. This
value is defined by Dressler & Shectman (1988)
probability_value(cumulative_delta, cumulative_delta_shuffle, n_bootstrap)[1] orresponds to the
the probability of a cluster host substructures. This value is defined by Dressler & Shectman(1988)
"""
#-- defining the criteria to select galaxies that will be used to estimate the probability
good_values = np.where(cumulative_delta_shuffle >= cumulative_delta)[0]
#-- selecting values to estimate the probability
delta_good_valued = cumulative_delta_shuffle[good_values]
#-- estimating the sum of the delta_i values
sum_delta = np.sum(delta_good_valued)
#-- estimating the P-value defined in Dressler & Schectman (1988)
delta_prob = sum_delta/n_bootstrap
#-- building matrix with output quantities
delta_probability = np.array([sum_delta, delta_prob])
#-- returning output quantity
return delta_probability
#####################################################################################################################################################################################
#####################################################################################################################################################################################
# FUNCTION THAT DETERMINES IF A GALAXY HAS OR NOT A PROBABILITY TO BE PART OF A SUBSTRUCTURE
def calc_delta_outliers(delta_i):
""" calsagos.ds_test.calc_delta_outliers(delta_i)
Function that determines if a galaxy
has or not a probability to be part
of a substructure
This function was developed by <NAME> (01/09/2016)
following Girardi et al. (1996), the galaxies with
a delta_i >= delta_lim are considered possible members
of a substructures
:param delta_i: delta_i value to each galaxy,
which was defined following Dressler &
Schectman (1988)
:type delta_i: array
:returns: label that indicates if a galaxy
has or not a high probability to be
part of a substructure
:rtype: array
.. note::
The output of this module is a label that indicates if
a galaxy has a value of delta_i that corresponds to
an outlier or not in the delta_i distribution
In this case delta_lim = 3.*sigma_delta, where sigma_delta
is the dispersion of the delta_i distribution
sigma_delta is estimated using biweight_scale
(Beers et al. 1990) function from astropy version 4.3.1
label = 1: the galaxy is an outlier and it is probable
that is hosting in a substructure
label = 0: the galaxy is a standard galaxy that is not
part of a substructure
"""
#-- Determination of the standard deviation of the delta_i distribution
sigma_delta = biweight_scale(delta_i)
#-- number of elements in the input arrays
member_size = len(delta_i)
label = np.zeros(member_size)
#-- The output parameter label indicates if a galaxy has a value of delta_i that corresponds to an outlier or not in the delta_i distribution
#-- label = 1: the galaxy is an outlier and it is probable that is hosting in a substructure and label = 0: the galaxy is a standard galaxy that is not part of a substructure
for ii in range(member_size):
if (delta_i[ii] <= -3.*sigma_delta) or (delta_i[ii] >= 3.*sigma_delta):
label[ii] = 1
elif (delta_i[ii] > -3*sigma_delta) and (delta_i[ii] < 3*sigma_delta):
label[ii] = 0
#-- returning output quantity
return np.array(label)
#####################################################################################################################################################################################
#####################################################################################################################################################################################
| [
"numpy.sum",
"numpy.std",
"numpy.zeros",
"numpy.argsort",
"numpy.random.randint",
"numpy.array",
"numpy.where",
"astropy.stats.biweight_scale",
"numpy.sqrt"
] | [((3454, 3467), 'numpy.zeros', 'np.zeros', (['dim'], {}), '(dim)\n', (3462, 3467), True, 'import numpy as np\n'), ((3514, 3527), 'numpy.zeros', 'np.zeros', (['dim'], {}), '(dim)\n', (3522, 3527), True, 'import numpy as np\n'), ((3576, 3589), 'numpy.zeros', 'np.zeros', (['dim'], {}), '(dim)\n', (3584, 3589), True, 'import numpy as np\n'), ((5224, 5237), 'numpy.sum', 'np.sum', (['delta'], {}), '(delta)\n', (5230, 5237), True, 'import numpy as np\n'), ((5348, 5416), 'numpy.array', 'np.array', (['[delta_obs, critical_value, threshold_value]'], {'dtype': 'object'}), '([delta_obs, critical_value, threshold_value], dtype=object)\n', (5356, 5416), True, 'import numpy as np\n'), ((7773, 7794), 'numpy.zeros', 'np.zeros', (['n_bootstrap'], {}), '(n_bootstrap)\n', (7781, 7794), True, 'import numpy as np\n'), ((7821, 7842), 'numpy.zeros', 'np.zeros', (['n_bootstrap'], {}), '(n_bootstrap)\n', (7829, 7842), True, 'import numpy as np\n'), ((8611, 8662), 'numpy.array', 'np.array', (['[critical_value_sim, threshold_value_sim]'], {}), '([critical_value_sim, threshold_value_sim])\n', (8619, 8662), True, 'import numpy as np\n'), ((10831, 10856), 'numpy.sum', 'np.sum', (['delta_good_valued'], {}), '(delta_good_valued)\n', (10837, 10856), True, 'import numpy as np\n'), ((11039, 11072), 'numpy.array', 'np.array', (['[sum_delta, delta_prob]'], {}), '([sum_delta, delta_prob])\n', (11047, 11072), True, 'import numpy as np\n'), ((12972, 12995), 'astropy.stats.biweight_scale', 'biweight_scale', (['delta_i'], {}), '(delta_i)\n', (12986, 12995), False, 'from astropy.stats import biweight_scale\n'), ((13087, 13108), 'numpy.zeros', 'np.zeros', (['member_size'], {}), '(member_size)\n', (13095, 13108), True, 'import numpy as np\n'), ((13742, 13757), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (13750, 13757), True, 'import numpy as np\n'), ((7998, 8033), 'numpy.random.randint', 'np.random.randint', (['(0)', 'dim'], {'size': 'dim'}), '(0, dim, size=dim)\n', (8015, 8033), True, 'import numpy as np\n'), ((10587, 10641), 'numpy.where', 'np.where', (['(cumulative_delta_shuffle >= cumulative_delta)'], {}), '(cumulative_delta_shuffle >= cumulative_delta)\n', (10595, 10641), True, 'import numpy as np\n'), ((3791, 3853), 'numpy.sqrt', 'np.sqrt', (['((RA[ii] - RA[jj]) ** 2.0 + (DEC[ii] - DEC[jj]) ** 2.0)'], {}), '((RA[ii] - RA[jj]) ** 2.0 + (DEC[ii] - DEC[jj]) ** 2.0)\n', (3798, 3853), True, 'import numpy as np\n'), ((4001, 4014), 'numpy.argsort', 'np.argsort', (['d'], {}), '(d)\n', (4011, 4014), True, 'import numpy as np\n'), ((4745, 4766), 'numpy.std', 'np.std', (['v_nearest_ten'], {}), '(v_nearest_ten)\n', (4751, 4766), True, 'import numpy as np\n'), ((4797, 4818), 'numpy.std', 'np.std', (['v_nearest_ten'], {}), '(v_nearest_ten)\n', (4803, 4818), True, 'import numpy as np\n'), ((4931, 5057), 'numpy.sqrt', 'np.sqrt', (['(11.0 / sigma_cluster ** 2.0 * ((local_velocity - cluster_velocity) ** 2.0 +\n (sigma_local - sigma_cluster) ** 2.0))'], {}), '(11.0 / sigma_cluster ** 2.0 * ((local_velocity - cluster_velocity) **\n 2.0 + (sigma_local - sigma_cluster) ** 2.0))\n', (4938, 5057), True, 'import numpy as np\n')] |
import numpy as np
from skimage.io import imread, imsave
import glob, os, tqdm
from imagej_fun import imagej_metadata_tags, make_lut
from skimage.filters import threshold_otsu
from skimage.measure import label
from skimage import img_as_uint
import pandas as pd
import matplotlib.pyplot as plt
###############################################################################
def binary2volume(
path, pxl_size=[2.,0.76,0.76]
):
voxel_volume = 1e-9*np.prod(pxl_size) # in mm^3
flist = glob.glob(os.path.join(path,'mask_channel*.tif'))
flist = [f for f in flist if 'MIP' not in f]
flist.sort()
N = len(flist)
file_name = os.path.basename(flist[0])
file_path = os.path.dirname(flist[0])
# compute volume of gastruloid
org_mask = imread(os.path.join(file_path,'mask_total.tif'))
org_mask = org_mask.astype(float)/np.max(org_mask)
V_gastr = float(np.sum(org_mask)) * voxel_volume
# print('Total volume:',V_gastr)
volumes = np.array([0. for i in flist])
overlap = np.zeros((N,N))
# compute volume of every channel
masks = [0 for i in flist]
i = 0
for filename in tqdm.tqdm(flist):
file_name = os.path.basename(filename)
file_path = os.path.dirname(filename)
file_root, file_ext = os.path.splitext(file_name)
masks[i] = imread(os.path.join(file_path,file_name))
masks[i] = masks[i].astype(float)/np.max(masks[i])
v = float(np.sum(masks[i])) * voxel_volume
# print('Volume ch%d'%i,v)
# print(v/V_gastr)
volumes[i] = v
# print('Volume fraction ch%d'%i,v/V_gastr)
i+=1
# compute overlap between pairs of channels
for i in range(N):
for j in range(N):
if j>i:
m = masks[i] * masks[j]
v = float(np.sum(m))
overlap[i,j] = v * voxel_volume
# save in dataframe
vals = [path,V_gastr]
names = ['name','V_tot']
for i, v in enumerate(volumes):
vals.append(v)
names.append('V_ch%d'%i)
for i in range(N):
for j in range(N):
if j>i:
vals.append(overlap[i,j])
names.append('V_ch%d-%d'%(i,j))
df = pd.DataFrame([vals], columns=names)
# print(df)
return df
################################################################################
if __name__=='__main__':
paths = [
os.path.join('Y:',os.sep,'Nicola_Gritti','raw_data',
'2020-09-23_gastrHCR','Cond1_2I','2020-09-23_114830'),
os.path.join('Y:',os.sep,'Nicola_Gritti','raw_data',
'2020-09-23_gastrHCR','Cond1_2I','2020-09-23_115427'),
os.path.join('Y:',os.sep,'Nicola_Gritti','raw_data',
'2020-09-23_gastrHCR','Cond1_2I','2020-09-23_120041'),
os.path.join('Y:',os.sep,'Nicola_Gritti','raw_data',
'2020-09-23_gastrHCR','Cond2_2I','2020-09-23_121905'),
os.path.join('Y:',os.sep,'Nicola_Gritti','raw_data',
'2020-09-23_gastrHCR','Cond2_2I','2020-09-23_122616'),
os.path.join('Y:',os.sep,'Nicola_Gritti','raw_data',
'2020-09-23_gastrHCR','Cond2_2I','2020-09-23_123405'),
]
for path in tqdm.tqdm(paths):
binary2volume(path)
| [
"pandas.DataFrame",
"tqdm.tqdm",
"numpy.sum",
"os.path.basename",
"os.path.dirname",
"numpy.zeros",
"numpy.max",
"numpy.array",
"os.path.splitext",
"os.path.join",
"numpy.prod"
] | [((675, 701), 'os.path.basename', 'os.path.basename', (['flist[0]'], {}), '(flist[0])\n', (691, 701), False, 'import glob, os, tqdm\n'), ((718, 743), 'os.path.dirname', 'os.path.dirname', (['flist[0]'], {}), '(flist[0])\n', (733, 743), False, 'import glob, os, tqdm\n'), ((1004, 1036), 'numpy.array', 'np.array', (['[(0.0) for i in flist]'], {}), '([(0.0) for i in flist])\n', (1012, 1036), True, 'import numpy as np\n'), ((1048, 1064), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (1056, 1064), True, 'import numpy as np\n'), ((1164, 1180), 'tqdm.tqdm', 'tqdm.tqdm', (['flist'], {}), '(flist)\n', (1173, 1180), False, 'import glob, os, tqdm\n'), ((2241, 2276), 'pandas.DataFrame', 'pd.DataFrame', (['[vals]'], {'columns': 'names'}), '([vals], columns=names)\n', (2253, 2276), True, 'import pandas as pd\n'), ((3327, 3343), 'tqdm.tqdm', 'tqdm.tqdm', (['paths'], {}), '(paths)\n', (3336, 3343), False, 'import glob, os, tqdm\n'), ((482, 499), 'numpy.prod', 'np.prod', (['pxl_size'], {}), '(pxl_size)\n', (489, 499), True, 'import numpy as np\n'), ((533, 572), 'os.path.join', 'os.path.join', (['path', '"""mask_channel*.tif"""'], {}), "(path, 'mask_channel*.tif')\n", (545, 572), False, 'import glob, os, tqdm\n'), ((802, 843), 'os.path.join', 'os.path.join', (['file_path', '"""mask_total.tif"""'], {}), "(file_path, 'mask_total.tif')\n", (814, 843), False, 'import glob, os, tqdm\n'), ((882, 898), 'numpy.max', 'np.max', (['org_mask'], {}), '(org_mask)\n', (888, 898), True, 'import numpy as np\n'), ((1202, 1228), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (1218, 1228), False, 'import glob, os, tqdm\n'), ((1249, 1274), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (1264, 1274), False, 'import glob, os, tqdm\n'), ((1305, 1332), 'os.path.splitext', 'os.path.splitext', (['file_name'], {}), '(file_name)\n', (1321, 1332), False, 'import glob, os, tqdm\n'), ((2448, 2563), 'os.path.join', 'os.path.join', (['"""Y:"""', 'os.sep', '"""Nicola_Gritti"""', '"""raw_data"""', '"""2020-09-23_gastrHCR"""', '"""Cond1_2I"""', '"""2020-09-23_114830"""'], {}), "('Y:', os.sep, 'Nicola_Gritti', 'raw_data',\n '2020-09-23_gastrHCR', 'Cond1_2I', '2020-09-23_114830')\n", (2460, 2563), False, 'import glob, os, tqdm\n'), ((2592, 2707), 'os.path.join', 'os.path.join', (['"""Y:"""', 'os.sep', '"""Nicola_Gritti"""', '"""raw_data"""', '"""2020-09-23_gastrHCR"""', '"""Cond1_2I"""', '"""2020-09-23_115427"""'], {}), "('Y:', os.sep, 'Nicola_Gritti', 'raw_data',\n '2020-09-23_gastrHCR', 'Cond1_2I', '2020-09-23_115427')\n", (2604, 2707), False, 'import glob, os, tqdm\n'), ((2736, 2851), 'os.path.join', 'os.path.join', (['"""Y:"""', 'os.sep', '"""Nicola_Gritti"""', '"""raw_data"""', '"""2020-09-23_gastrHCR"""', '"""Cond1_2I"""', '"""2020-09-23_120041"""'], {}), "('Y:', os.sep, 'Nicola_Gritti', 'raw_data',\n '2020-09-23_gastrHCR', 'Cond1_2I', '2020-09-23_120041')\n", (2748, 2851), False, 'import glob, os, tqdm\n'), ((2880, 2995), 'os.path.join', 'os.path.join', (['"""Y:"""', 'os.sep', '"""Nicola_Gritti"""', '"""raw_data"""', '"""2020-09-23_gastrHCR"""', '"""Cond2_2I"""', '"""2020-09-23_121905"""'], {}), "('Y:', os.sep, 'Nicola_Gritti', 'raw_data',\n '2020-09-23_gastrHCR', 'Cond2_2I', '2020-09-23_121905')\n", (2892, 2995), False, 'import glob, os, tqdm\n'), ((3024, 3139), 'os.path.join', 'os.path.join', (['"""Y:"""', 'os.sep', '"""Nicola_Gritti"""', '"""raw_data"""', '"""2020-09-23_gastrHCR"""', '"""Cond2_2I"""', '"""2020-09-23_122616"""'], {}), "('Y:', os.sep, 'Nicola_Gritti', 'raw_data',\n '2020-09-23_gastrHCR', 'Cond2_2I', '2020-09-23_122616')\n", (3036, 3139), False, 'import glob, os, tqdm\n'), ((3168, 3283), 'os.path.join', 'os.path.join', (['"""Y:"""', 'os.sep', '"""Nicola_Gritti"""', '"""raw_data"""', '"""2020-09-23_gastrHCR"""', '"""Cond2_2I"""', '"""2020-09-23_123405"""'], {}), "('Y:', os.sep, 'Nicola_Gritti', 'raw_data',\n '2020-09-23_gastrHCR', 'Cond2_2I', '2020-09-23_123405')\n", (3180, 3283), False, 'import glob, os, tqdm\n'), ((919, 935), 'numpy.sum', 'np.sum', (['org_mask'], {}), '(org_mask)\n', (925, 935), True, 'import numpy as np\n'), ((1360, 1394), 'os.path.join', 'os.path.join', (['file_path', 'file_name'], {}), '(file_path, file_name)\n', (1372, 1394), False, 'import glob, os, tqdm\n'), ((1437, 1453), 'numpy.max', 'np.max', (['masks[i]'], {}), '(masks[i])\n', (1443, 1453), True, 'import numpy as np\n'), ((1472, 1488), 'numpy.sum', 'np.sum', (['masks[i]'], {}), '(masks[i])\n', (1478, 1488), True, 'import numpy as np\n'), ((1841, 1850), 'numpy.sum', 'np.sum', (['m'], {}), '(m)\n', (1847, 1850), True, 'import numpy as np\n')] |
"""
Generic tools for distributing computationally intensive tasks across multiple threads.
"""
import os
import numpy as np
import shutil
from tempfile import mkdtemp
import multiprocessing as mp
from tqdm import tqdm
from hylite import HyCloud, HyImage
from hylite import io
def _split(data, nchunks):
"""
Split the specified HyCloud instance into a number of chunks.
*Arguments*:
- data = the complete HyData object to copy and split.
- nchunks = the number of chunks to split into.
*Returns*:
- a list of split
"""
if isinstance(data, HyCloud): # special case for hyperclouds - split xyz, rgb and normals too
chunksize = int(np.floor(data.point_count() / nchunks))
chunks = [(i * chunksize, (i + 1) * chunksize) for i in range(nchunks)]
chunks[-1] = (chunks[-1][0], data.point_count()) # expand last chunk to include remainder
# split points
xyz = [data.xyz[c[0]:c[1], :].copy() for c in chunks]
# split data
bands = [None for c in chunks]
if data.has_bands():
X = data.get_raveled().copy()
bands = [X[c[0]:c[1], :] for c in chunks]
# split rgb
rgb = [None for c in chunks]
if data.has_rgb():
rgb = [data.rgb[c[0]:c[1], :].copy() for c in chunks]
# split normals
normals = [None for c in chunks]
if data.has_normals():
normals = [data.normals[c[0]:c[1], :].copy() for c in chunks]
return [HyCloud(xyz[i],
rgb=rgb[i],
normals=normals[i],
bands=bands[i],
header=data.header.copy()) for i in range(len(chunks))]
else: # just split data (for HyImage and other types)
X = data.get_raveled().copy()
chunksize = int(np.floor(X.shape[0] / nchunks))
chunks = [(i * chunksize, (i + 1) * chunksize) for i in range(nchunks)]
chunks[-1] = (chunks[-1][0], X.shape[0]) # expand last chunk to include remainder
out = []
for c in chunks:
_o = data.copy(data=False) # create copy
_o.data = X[c[0]:c[1], :][:,None,:]
out.append(_o)
return out
def _merge(chunks, shape):
"""
Merge a list of HyData objects into a combined one (aka. do the opposite of split(...)).
*Arguments*:
- chunks = a list of HyData chunks to merge.
- shape = the output data shape.
*Returns*: a single merged HyData instance (of the same type as the input).
The header of this instance will be a copy of chunks[0].header.
"""
# merge data
X = np.vstack([c.data for c in chunks])
X = X.reshape((*shape, -1))
if not isinstance(chunks[0], HyCloud): # easy!
# make copy
out = chunks[0].copy(data=False)
out.data = X
out.header = chunks[0].header.copy()
return out
else: # less easy
xyz = np.vstack([c.xyz for c in chunks])
rgb = None
if chunks[0].has_rgb():
rgb = np.vstack([c.rgb for c in chunks])
normals = None
if chunks[0].has_normals():
normals = np.vstack([c.normals for c in chunks])
return HyCloud( xyz, rgb=rgb, normals=normals, bands=X, header=chunks[0].header.copy())
def _call(func, path, arg, kwd, n):
"""
This function will be called by each thread. It loads each data chunk from disk, runs the operation, then saves
the results.
"""
# print("Spawning thread %d." % n)
# func, path, arg, kwd = args
# load data chunk
if '.ply' in path:
data = io.loadCloudPLY(path) # load point cloud
result = func(data, *arg, **kwd) # compute results
assert isinstance(result, HyCloud), "Error - function %s does not return a HyCloud." % func
io.saveCloudPLY(path, result) # save point cloud
else:
data = io.loadWithGDAL(path) # load image
result = func(data, *arg, **kwd) # compute results
assert isinstance(result, HyImage), "Error - function %s does not return a HyImage." % func
io.saveWithGDAL(path, result) # save result
return True # done
def parallel_chunks(function, data, *args, **kwds):
"""
Run a function that operates per-point or per-pixel on smaller chunks of a point cloud or image dataset
in parallel. Only use for expensive operations as otherwise overheads (writing files to cache, spawning threads,
loading files from cache) are too costly.
*Arguments*:
- function = the function to run on each chunk of the dataset. Must take a HyCloud or HyImage dataset as it's first
argument and also return a HyCloud or HyImage dataset (cf., mwl(...), get_hull_corrected(...)).
- data = the HyCloud or HyImage instance to run the function on.
- args = tuple of arguments to pass to the function.
**Keywords**:
- nthreads = the number of threads to spawn. Default is the number of cores - 2. Negative numbers will be subtracted
from the number of cores.
- any other keywords are passed to the function
"""
assert isinstance(data, HyCloud) or isinstance(data, HyImage)
# get number of threads
if 'nthreads' in kwds:
nthreads = kwds['nthreads']
del kwds['nthreads']
else:
nthreads = -2
if nthreads < 1:
nthreads = os.cpu_count() - nthreads
assert nthreads > 0, "Error - cannot spawn %d threads" % nthreads
assert isinstance(nthreads, int), "Error - nthreads must be an integer."
assert nthreads is not None, "Error - could not identify CPU count. Please specify nthreads keyword."
# split data into chunks
shape = data.data.shape[:-1] # store shape (important for images)
chunks = _split(data, nthreads)
# dump chunks into temp directory
pth = mkdtemp() # make temp directory
print("Writing thread cache to %s:" % pth)
# dump clouds to directory
paths = []
for i, c in enumerate(chunks):
if isinstance(c, HyCloud):
p = os.path.join(pth, '%d.ply' % i)
io.saveCloudPLY(p, c)
else:
p = os.path.join(pth, '%d.hdr' % i)
io.saveWithGDAL(p, c)
paths.append(p)
# make sure we don't multithread twice when using advanced scipy/numpy functions...
os.environ['MKL_NUM_THREADS'] = '1'
os.environ['OMP_NUM_THREADS'] = '1'
os.environ['MKL_DYNAMIC'] = 'FALSE'
# spawn worker processes
P = [mp.Process(target=_call, args=(function, p, args, kwds, i)) for i, p in enumerate(paths)]
try:
for p in P:
p.start()
for p in P:
p.join()
# successs! load data again...
if isinstance(data, HyCloud):
chunks = [io.loadCloudPLY(p) for p in paths]
else:
chunks = [io.loadWithGDAL(p) for p in paths]
# remove temp directory
shutil.rmtree(pth) # delete temp directory
print("Process complete (thread cache cleaned successfully).")
except (KeyboardInterrupt, SystemExit) as e:
print("Job cancelled. Cleaning temp directory... ", end='')
shutil.rmtree(pth) # delete temp directory
print("Done.")
assert False, "Multiprocessing job cancelled by KeyboardInterrupt or SystemExit."
except Exception as e:
print("Error thrown. Cleaning temp directory... ", end='')
shutil.rmtree(pth) # delete temp directory
print("Done.")
raise e
# re-enable scipy/numpy multithreading
del os.environ['MKL_NUM_THREADS']
del os.environ['OMP_NUM_THREADS']
del os.environ['MKL_DYNAMIC']
# merge back into one dataset
out = _merge(chunks, shape=shape)
return out
def _call2(func, in_paths, out_paths, kwd, n):
for i, o in zip(in_paths, out_paths): # loop through paths managed by this thread
func(i, o, **kwd) # call function
def parallel_datasets(function, in_paths, out_paths=None, nthreads=-2, **kwds):
"""
Parallelise a single function across many HyData datasets.
*Arguments*:
- function = the function to run on each dataset. This should take an input path (string) as its first input
and output path (also string) as its second output. Anything returned by the function will be ignored.
- in_paths = a list of input paths, each of which will be passed to function in each thread.
- out_paths = a list of corresponding output paths that each function should write to. Defaults to in_paths.
- nthreads = the number of threads to spawn. Default is the number of cores - 2. Negative numbers are subtracted
from the total number of cores.
*Keywords*:
- any keywords are passed directly to function in each thread.
*Returns*: Nothing.
"""
assert isinstance(in_paths, list), "Error - in_paths must be a list of file paths (string)."
if out_paths is None:
out_paths = in_paths
assert isinstance(out_paths, list), "Error - out_paths must be a list of file paths (string)."
assert len(out_paths) == len(in_paths), "Error - length of input and output paths must match."
# get number of threads
assert isinstance(nthreads, int), "Error - nthreads must be an integer."
if nthreads < 1:
nthreads = os.cpu_count() - nthreads
assert nthreads > 0, "Error - cannot spawn %d threads" % nthreads
# distribute input paths across threads
nthreads = min( len(in_paths), nthreads ) # avoid case where we have more threads than paths
stride = int( len(in_paths) / nthreads )
inP = []
outP = []
for i in range(nthreads):
idx0 = i*stride
idx1 = min( (i+1)*stride, len(in_paths) )
inP.append( in_paths[idx0:idx1] )
outP.append( out_paths[idx0:idx1] )
for i in range(len(in_paths) % nthreads): # and add remainder
inP[i].append(in_paths[-i-1])
outP[i].append(out_paths[-i - 1])
# make sure we don't multithread twice when using advanced scipy/numpy functions...
os.environ['MKL_NUM_THREADS'] = '1'
os.environ['OMP_NUM_THREADS'] = '1'
os.environ['MKL_DYNAMIC'] = 'FALSE'
# spawn worker processes and wait for jobs to finish
P = [mp.Process(target=_call2, args=(function, inP[i], outP[i], kwds, i)) for i in range(nthreads)]
for p in P:
p.start()
for p in P:
p.join()
# re-enable scipy/numpy multithreading
del os.environ['MKL_NUM_THREADS']
del os.environ['OMP_NUM_THREADS']
del os.environ['MKL_DYNAMIC']
| [
"hylite.io.loadCloudPLY",
"os.path.join",
"numpy.floor",
"hylite.io.loadWithGDAL",
"os.cpu_count",
"tempfile.mkdtemp",
"shutil.rmtree",
"multiprocessing.Process",
"hylite.io.saveWithGDAL",
"hylite.io.saveCloudPLY",
"numpy.vstack"
] | [((2673, 2708), 'numpy.vstack', 'np.vstack', (['[c.data for c in chunks]'], {}), '([c.data for c in chunks])\n', (2682, 2708), True, 'import numpy as np\n'), ((5899, 5908), 'tempfile.mkdtemp', 'mkdtemp', ([], {}), '()\n', (5906, 5908), False, 'from tempfile import mkdtemp\n'), ((2975, 3009), 'numpy.vstack', 'np.vstack', (['[c.xyz for c in chunks]'], {}), '([c.xyz for c in chunks])\n', (2984, 3009), True, 'import numpy as np\n'), ((3651, 3672), 'hylite.io.loadCloudPLY', 'io.loadCloudPLY', (['path'], {}), '(path)\n', (3666, 3672), False, 'from hylite import io\n'), ((3861, 3890), 'hylite.io.saveCloudPLY', 'io.saveCloudPLY', (['path', 'result'], {}), '(path, result)\n', (3876, 3890), False, 'from hylite import io\n'), ((3936, 3957), 'hylite.io.loadWithGDAL', 'io.loadWithGDAL', (['path'], {}), '(path)\n', (3951, 3957), False, 'from hylite import io\n'), ((4140, 4169), 'hylite.io.saveWithGDAL', 'io.saveWithGDAL', (['path', 'result'], {}), '(path, result)\n', (4155, 4169), False, 'from hylite import io\n'), ((6547, 6606), 'multiprocessing.Process', 'mp.Process', ([], {'target': '_call', 'args': '(function, p, args, kwds, i)'}), '(target=_call, args=(function, p, args, kwds, i))\n', (6557, 6606), True, 'import multiprocessing as mp\n'), ((6976, 6994), 'shutil.rmtree', 'shutil.rmtree', (['pth'], {}), '(pth)\n', (6989, 6994), False, 'import shutil\n'), ((10289, 10357), 'multiprocessing.Process', 'mp.Process', ([], {'target': '_call2', 'args': '(function, inP[i], outP[i], kwds, i)'}), '(target=_call2, args=(function, inP[i], outP[i], kwds, i))\n', (10299, 10357), True, 'import multiprocessing as mp\n'), ((1850, 1880), 'numpy.floor', 'np.floor', (['(X.shape[0] / nchunks)'], {}), '(X.shape[0] / nchunks)\n', (1858, 1880), True, 'import numpy as np\n'), ((3079, 3113), 'numpy.vstack', 'np.vstack', (['[c.rgb for c in chunks]'], {}), '([c.rgb for c in chunks])\n', (3088, 3113), True, 'import numpy as np\n'), ((3195, 3233), 'numpy.vstack', 'np.vstack', (['[c.normals for c in chunks]'], {}), '([c.normals for c in chunks])\n', (3204, 3233), True, 'import numpy as np\n'), ((5433, 5447), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (5445, 5447), False, 'import os\n'), ((6113, 6144), 'os.path.join', 'os.path.join', (['pth', "('%d.ply' % i)"], {}), "(pth, '%d.ply' % i)\n", (6125, 6144), False, 'import os\n'), ((6157, 6178), 'hylite.io.saveCloudPLY', 'io.saveCloudPLY', (['p', 'c'], {}), '(p, c)\n', (6172, 6178), False, 'from hylite import io\n'), ((6209, 6240), 'os.path.join', 'os.path.join', (['pth', "('%d.hdr' % i)"], {}), "(pth, '%d.hdr' % i)\n", (6221, 6240), False, 'import os\n'), ((6253, 6274), 'hylite.io.saveWithGDAL', 'io.saveWithGDAL', (['p', 'c'], {}), '(p, c)\n', (6268, 6274), False, 'from hylite import io\n'), ((7216, 7234), 'shutil.rmtree', 'shutil.rmtree', (['pth'], {}), '(pth)\n', (7229, 7234), False, 'import shutil\n'), ((7474, 7492), 'shutil.rmtree', 'shutil.rmtree', (['pth'], {}), '(pth)\n', (7487, 7492), False, 'import shutil\n'), ((9367, 9381), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (9379, 9381), False, 'import os\n'), ((6829, 6847), 'hylite.io.loadCloudPLY', 'io.loadCloudPLY', (['p'], {}), '(p)\n', (6844, 6847), False, 'from hylite import io\n'), ((6900, 6918), 'hylite.io.loadWithGDAL', 'io.loadWithGDAL', (['p'], {}), '(p)\n', (6915, 6918), False, 'from hylite import io\n')] |
from pyspark import SparkContext,SparkConf
import numpy as np
conf = SparkConf()
conf.set('master','spark://hadoop-maste:7077')
context = SparkContext(conf=conf)
acc = context.accumulator(0)
print(type(acc),acc.value)
rdd = context.parallelize(np.arange(101),5)
def acc_add(a):
acc.add(a)
return a
rdd2 = rdd.map(acc_add)
print(rdd2.collect())
print(acc.value)
context.stop()
| [
"pyspark.SparkContext",
"pyspark.SparkConf",
"numpy.arange"
] | [((69, 80), 'pyspark.SparkConf', 'SparkConf', ([], {}), '()\n', (78, 80), False, 'from pyspark import SparkContext, SparkConf\n'), ((138, 161), 'pyspark.SparkContext', 'SparkContext', ([], {'conf': 'conf'}), '(conf=conf)\n', (150, 161), False, 'from pyspark import SparkContext, SparkConf\n'), ((244, 258), 'numpy.arange', 'np.arange', (['(101)'], {}), '(101)\n', (253, 258), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# <NAME> <<EMAIL>> <https://hanxiao.github.io>
import multiprocessing
import os
import sys
import threading
import time
import uuid
from collections import defaultdict
from datetime import datetime
from multiprocessing import Process
from multiprocessing.pool import Pool
import numpy as np
import zmq
from termcolor import colored
from zmq.utils import jsonapi
from .bert import modeling, tokenization
from .bert.extract_features import convert_lst_to_features, masked_reduce_mean, PoolingStrategy, \
masked_reduce_max, mul_mask
from .helper import set_logger, send_ndarray, optimize_graph
def _check_tf_version():
import tensorflow as tf
tf_ver = tf.__version__.split('.')
assert int(tf_ver[0]) >= 1 and int(tf_ver[1]) >= 10, 'Tensorflow >=1.10 is required!'
return tf_ver
__version__ = '1.5.5'
_tf_ver_ = _check_tf_version()
def _auto_bind(socket):
if os.name == 'nt': # for Windows
socket.bind_to_random_port('tcp://*')
else:
# Get the location for tmp file for sockets
try:
tmp_dir = os.environ['ZEROMQ_SOCK_TMP_DIR']
if not os.path.exists(tmp_dir):
raise ValueError('This directory for sockets ({}) does not seems to exist.'.format(tmp_dir))
tmp_dir = os.path.join(tmp_dir, str(uuid.uuid1())[:8])
except KeyError:
tmp_dir = '*'
socket.bind('ipc://{}'.format(tmp_dir))
return socket.getsockopt(zmq.LAST_ENDPOINT).decode('ascii')
class ServerCommand:
terminate = b'TERMINATION'
show_config = b'SHOW_CONFIG'
new_job = b'REGISTER'
class BertServer(threading.Thread):
def __init__(self, args):
super().__init__()
self.logger = set_logger(colored('VENTILATOR', 'magenta'))
self.model_dir = args.model_dir
self.max_seq_len = args.max_seq_len
self.num_worker = args.num_worker
self.max_batch_size = args.max_batch_size
self.port = args.port
self.args = args
self.args_dict = {
'model_dir': args.model_dir,
'max_seq_len': args.max_seq_len,
'num_worker': args.num_worker,
'max_batch_size': args.max_batch_size,
'port': args.port,
'port_out': args.port_out,
'pooling_layer': args.pooling_layer,
'pooling_strategy': args.pooling_strategy.value,
'tensorflow_version': _tf_ver_,
'python_version': sys.version,
'server_start_time': str(datetime.now()),
'use_xla_compiler': args.xla,
}
self.processes = []
self.context = zmq.Context()
# frontend facing client
self.frontend = self.context.socket(zmq.PULL)
self.frontend.bind('tcp://*:%d' % self.port)
# pair connection between frontend and sink
self.sink = self.context.socket(zmq.PAIR)
self.addr_front2sink = _auto_bind(self.sink)
# backend facing workers
self.backend = self.context.socket(zmq.PUSH)
self.addr_backend = _auto_bind(self.backend)
# start the sink thread
proc_sink = BertSink(self.args, self.addr_front2sink)
proc_sink.start()
self.processes.append(proc_sink)
self.addr_sink = self.sink.recv().decode('ascii')
self.logger.info('freezing, optimizing and exporting graph, could take a while...')
with Pool(processes=1) as pool:
# optimize the graph, must be done in another process
self.graph_path = pool.apply(optimize_graph, (self.args,))
self.logger.info('optimized graph is stored at: %s' % self.graph_path)
def close(self):
self.logger.info('shutting down...')
for p in self.processes:
p.close()
self.frontend.close()
self.backend.close()
self.sink.close()
self.context.term()
self.logger.info('terminated!')
def run(self):
num_req = 0
run_on_gpu = False
device_map = [-1] * self.num_worker
if not self.args.cpu:
try:
import GPUtil
num_all_gpu = len(GPUtil.getGPUs())
avail_gpu = GPUtil.getAvailable(order='memory', limit=min(num_all_gpu, self.num_worker))
num_avail_gpu = len(avail_gpu)
if num_avail_gpu < self.num_worker:
self.logger.warn('only %d out of %d GPU(s) is available/free, but "-num_worker=%d"' %
(num_avail_gpu, num_all_gpu, self.num_worker))
self.logger.warn('multiple workers will be allocated to one GPU, '
'may not scale well and may raise out-of-memory')
device_map = (avail_gpu * self.num_worker)[: self.num_worker]
run_on_gpu = True
except FileNotFoundError:
self.logger.warn('nvidia-smi is missing, often means no gpu on this machine. '
'fall back to cpu!')
self.logger.info('device_map: \n\t\t%s' % '\n\t\t'.join(
'worker %2d -> %s' % (w_id, ('gpu %2d' % g_id) if g_id >= 0 else 'cpu') for w_id, g_id in
enumerate(device_map)))
# start the backend processes
for idx, device_id in enumerate(device_map):
process = BertWorker(idx, self.args, self.addr_backend, self.addr_sink, device_id, self.graph_path)
self.processes.append(process)
process.start()
while True:
try:
request = self.frontend.recv_multipart()
client, msg, req_id = request
if msg == ServerCommand.show_config:
self.logger.info('new config request\treq id: %d\tclient: %s' % (int(req_id), client))
self.sink.send_multipart([client, msg,
jsonapi.dumps({**{'client': client.decode('ascii'),
'num_subprocess': len(self.processes),
'ventilator -> worker': self.addr_backend,
'worker -> sink': self.addr_sink,
'ventilator <-> sink': self.addr_front2sink,
'server_current_time': str(datetime.now()),
'num_request': num_req,
'run_on_gpu': run_on_gpu,
'server_version': __version__},
**self.args_dict}), req_id])
continue
self.logger.info('new encode request\treq id: %d\tclient: %s' % (int(req_id), client))
num_req += 1
seqs = jsonapi.loads(msg)
num_seqs = len(seqs)
# register a new job at sink
self.sink.send_multipart([client, ServerCommand.new_job, b'%d' % num_seqs, req_id])
job_id = client + b'#' + req_id
if num_seqs > self.max_batch_size:
# partition the large batch into small batches
s_idx = 0
while s_idx < num_seqs:
tmp = seqs[s_idx: (s_idx + self.max_batch_size)]
if tmp:
partial_job_id = job_id + b'@%d' % s_idx
self.backend.send_multipart([partial_job_id, jsonapi.dumps(tmp)])
s_idx += len(tmp)
else:
self.backend.send_multipart([job_id, msg])
except zmq.error.ContextTerminated:
self.logger.error('context is closed!')
except ValueError:
self.logger.error('received a wrongly-formatted request (expected 3 frames, got %d)' % len(request))
self.logger.error('\n'.join('field %d: %s' % (idx, k) for idx, k in enumerate(request)))
class BertSink(Process):
def __init__(self, args, front_sink_addr):
super().__init__()
self.port = args.port_out
self.exit_flag = multiprocessing.Event()
self.logger = set_logger(colored('SINK', 'green'))
self.front_sink_addr = front_sink_addr
def close(self):
self.logger.info('shutting down...')
self.exit_flag.set()
self.terminate()
self.join()
self.logger.info('terminated!')
def run(self):
context = zmq.Context()
# receive from workers
receiver = context.socket(zmq.PULL)
receiver_addr = _auto_bind(receiver)
frontend = context.socket(zmq.PAIR)
frontend.connect(self.front_sink_addr)
# publish to client
sender = context.socket(zmq.PUB)
sender.bind('tcp://*:%d' % self.port)
pending_checksum = defaultdict(int)
pending_result = defaultdict(list)
job_checksum = {}
poller = zmq.Poller()
poller.register(frontend, zmq.POLLIN)
poller.register(receiver, zmq.POLLIN)
# send worker receiver address back to frontend
frontend.send(receiver_addr.encode('ascii'))
try:
while not self.exit_flag.is_set():
socks = dict(poller.poll())
if socks.get(receiver) == zmq.POLLIN:
msg = receiver.recv_multipart()
job_id = msg[0]
# parsing the ndarray
arr_info, arr_val = jsonapi.loads(msg[1]), msg[2]
X = np.frombuffer(memoryview(arr_val), dtype=arr_info['dtype'])
X = X.reshape(arr_info['shape'])
job_info = job_id.split(b'@')
job_id = job_info[0]
partial_id = job_info[1] if len(job_info) == 2 else 0
pending_result[job_id].append((X, partial_id))
pending_checksum[job_id] += X.shape[0]
self.logger.info('collect job %s (%d/%d)' % (job_id,
pending_checksum[job_id],
job_checksum[job_id]))
# check if there are finished jobs, send it back to workers
finished = [(k, v) for k, v in pending_result.items() if pending_checksum[k] == job_checksum[k]]
for job_info, tmp in finished:
self.logger.info(
'send back\tsize: %d\tjob id:%s\t' % (
job_checksum[job_info], job_info))
# re-sort to the original order
tmp = [x[0] for x in sorted(tmp, key=lambda x: int(x[1]))]
client_addr, req_id = job_info.split(b'#')
send_ndarray(sender, client_addr, np.concatenate(tmp, axis=0), req_id)
pending_result.pop(job_info)
pending_checksum.pop(job_info)
job_checksum.pop(job_info)
if socks.get(frontend) == zmq.POLLIN:
client_addr, msg_type, msg_info, req_id = frontend.recv_multipart()
if msg_type == ServerCommand.new_job:
job_info = client_addr + b'#' + req_id
job_checksum[job_info] = int(msg_info)
self.logger.info('job register\tsize: %d\tjob id: %s' % (int(msg_info), job_info))
elif msg_type == ServerCommand.show_config:
time.sleep(0.1) # dirty fix of slow-joiner: sleep so that client receiver can connect.
self.logger.info('send config\tclient %s' % client_addr)
sender.send_multipart([client_addr, msg_info, req_id])
except zmq.error.ContextTerminated:
self.logger.error('context is closed!')
class BertWorker(Process):
def __init__(self, id, args, worker_address, sink_address, device_id, graph_path):
super().__init__()
self.worker_id = id
self.device_id = device_id
self.logger = set_logger(colored('WORKER-%d' % self.worker_id, 'yellow'))
self.max_seq_len = args.max_seq_len
self.daemon = True
self.exit_flag = multiprocessing.Event()
self.worker_address = worker_address
self.sink_address = sink_address
self.prefetch_factor = 10
self.gpu_memory_fraction = args.gpu_memory_fraction
self.model_dir = args.model_dir
self.graph_path = graph_path
def close(self):
self.logger.info('shutting down...')
self.exit_flag.set()
self.terminate()
self.join()
self.logger.info('terminated!')
def get_estimator(self, tf):
from tensorflow.python.estimator.estimator import Estimator
from tensorflow.python.estimator.run_config import RunConfig
from tensorflow.python.estimator.model_fn import EstimatorSpec
def model_fn(features, labels, mode, params):
with tf.gfile.GFile(self.graph_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
input_names = ['input_ids', 'input_mask', 'input_type_ids']
output = tf.import_graph_def(graph_def,
input_map={k + ':0': features[k] for k in input_names},
return_elements=['final_encodes:0'])
return EstimatorSpec(mode=mode, predictions={
'client_id': features['client_id'],
'encodes': output[0]
})
config = tf.ConfigProto(device_count={'GPU': 0 if self.device_id < 0 else 1})
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = self.gpu_memory_fraction
config.log_device_placement = False
# session-wise XLA doesn't seem to work on tf 1.10
# if args.xla:
# config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
return Estimator(model_fn=model_fn, config=RunConfig(session_config=config))
def run(self):
self.logger.info('use device %s, load graph from %s' %
('cpu' if self.device_id < 0 else ('gpu: %d' % self.device_id), self.graph_path))
os.environ['CUDA_VISIBLE_DEVICES'] = str(self.device_id)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
self.logger.info('please ignore "WARNING: Using temporary folder as model directory"')
import tensorflow as tf
estimator = self.get_estimator(tf)
context = zmq.Context()
receiver = context.socket(zmq.PULL)
receiver.connect(self.worker_address)
sink = context.socket(zmq.PUSH)
sink.connect(self.sink_address)
for r in estimator.predict(self.input_fn_builder(receiver, tf), yield_single_examples=False):
send_ndarray(sink, r['client_id'], r['encodes'])
self.logger.info('job done\tsize: %s\tclient: %s' % (r['encodes'].shape, r['client_id']))
receiver.close()
sink.close()
context.term()
self.logger.info('terminated!')
def input_fn_builder(self, worker, tf):
def gen():
tokenizer = tokenization.FullTokenizer(vocab_file=os.path.join(self.model_dir, 'vocab.txt'))
self.logger.info('ready and listening!')
while not self.exit_flag.is_set():
client_id, msg = worker.recv_multipart()
msg = jsonapi.loads(msg)
self.logger.info('new job\tsize: %d\tclient: %s' % (len(msg), client_id))
# check if msg is a list of list, if yes consider the input is already tokenized
is_tokenized = all(isinstance(el, list) for el in msg)
tmp_f = list(convert_lst_to_features(msg, self.max_seq_len, tokenizer, is_tokenized))
yield {
'client_id': client_id,
'input_ids': [f.input_ids for f in tmp_f],
'input_mask': [f.input_mask for f in tmp_f],
'input_type_ids': [f.input_type_ids for f in tmp_f]
}
def input_fn():
return (tf.data.Dataset.from_generator(
gen,
output_types={'input_ids': tf.int32,
'input_mask': tf.int32,
'input_type_ids': tf.int32,
'client_id': tf.string},
output_shapes={
'client_id': (),
'input_ids': (None, self.max_seq_len),
'input_mask': (None, self.max_seq_len),
'input_type_ids': (None, self.max_seq_len)}).prefetch(self.prefetch_factor))
return input_fn
| [
"GPUtil.getGPUs",
"tensorflow.__version__.split",
"collections.defaultdict",
"tensorflow.ConfigProto",
"zmq.Poller",
"os.path.join",
"zmq.Context",
"tensorflow.python.estimator.model_fn.EstimatorSpec",
"os.path.exists",
"zmq.utils.jsonapi.loads",
"multiprocessing.pool.Pool",
"multiprocessing.E... | [((711, 736), 'tensorflow.__version__.split', 'tf.__version__.split', (['"""."""'], {}), "('.')\n", (731, 736), True, 'import tensorflow as tf\n'), ((2665, 2678), 'zmq.Context', 'zmq.Context', ([], {}), '()\n', (2676, 2678), False, 'import zmq\n'), ((8432, 8455), 'multiprocessing.Event', 'multiprocessing.Event', ([], {}), '()\n', (8453, 8455), False, 'import multiprocessing\n'), ((8781, 8794), 'zmq.Context', 'zmq.Context', ([], {}), '()\n', (8792, 8794), False, 'import zmq\n'), ((9151, 9167), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (9162, 9167), False, 'from collections import defaultdict\n'), ((9193, 9210), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (9204, 9210), False, 'from collections import defaultdict\n'), ((9255, 9267), 'zmq.Poller', 'zmq.Poller', ([], {}), '()\n', (9265, 9267), False, 'import zmq\n'), ((12644, 12667), 'multiprocessing.Event', 'multiprocessing.Event', ([], {}), '()\n', (12665, 12667), False, 'import multiprocessing\n'), ((14040, 14108), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'device_count': "{'GPU': 0 if self.device_id < 0 else 1}"}), "(device_count={'GPU': 0 if self.device_id < 0 else 1})\n", (14054, 14108), True, 'import tensorflow as tf\n'), ((15045, 15058), 'zmq.Context', 'zmq.Context', ([], {}), '()\n', (15056, 15058), False, 'import zmq\n'), ((1768, 1800), 'termcolor.colored', 'colored', (['"""VENTILATOR"""', '"""magenta"""'], {}), "('VENTILATOR', 'magenta')\n", (1775, 1800), False, 'from termcolor import colored\n'), ((3442, 3459), 'multiprocessing.pool.Pool', 'Pool', ([], {'processes': '(1)'}), '(processes=1)\n', (3446, 3459), False, 'from multiprocessing.pool import Pool\n'), ((8489, 8513), 'termcolor.colored', 'colored', (['"""SINK"""', '"""green"""'], {}), "('SINK', 'green')\n", (8496, 8513), False, 'from termcolor import colored\n'), ((12499, 12546), 'termcolor.colored', 'colored', (["('WORKER-%d' % self.worker_id)", '"""yellow"""'], {}), "('WORKER-%d' % self.worker_id, 'yellow')\n", (12506, 12546), False, 'from termcolor import colored\n'), ((13653, 13782), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def'], {'input_map': "{(k + ':0'): features[k] for k in input_names}", 'return_elements': "['final_encodes:0']"}), "(graph_def, input_map={(k + ':0'): features[k] for k in\n input_names}, return_elements=['final_encodes:0'])\n", (13672, 13782), True, 'import tensorflow as tf\n'), ((13879, 13979), 'tensorflow.python.estimator.model_fn.EstimatorSpec', 'EstimatorSpec', ([], {'mode': 'mode', 'predictions': "{'client_id': features['client_id'], 'encodes': output[0]}"}), "(mode=mode, predictions={'client_id': features['client_id'],\n 'encodes': output[0]})\n", (13892, 13979), False, 'from tensorflow.python.estimator.model_fn import EstimatorSpec\n'), ((1162, 1185), 'os.path.exists', 'os.path.exists', (['tmp_dir'], {}), '(tmp_dir)\n', (1176, 1185), False, 'import os\n'), ((2545, 2559), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2557, 2559), False, 'from datetime import datetime\n'), ((7078, 7096), 'zmq.utils.jsonapi.loads', 'jsonapi.loads', (['msg'], {}), '(msg)\n', (7091, 7096), False, 'from zmq.utils import jsonapi\n'), ((13420, 13457), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['self.graph_path', '"""rb"""'], {}), "(self.graph_path, 'rb')\n", (13434, 13457), True, 'import tensorflow as tf\n'), ((13492, 13505), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (13503, 13505), True, 'import tensorflow as tf\n'), ((14517, 14549), 'tensorflow.python.estimator.run_config.RunConfig', 'RunConfig', ([], {'session_config': 'config'}), '(session_config=config)\n', (14526, 14549), False, 'from tensorflow.python.estimator.run_config import RunConfig\n'), ((15954, 15972), 'zmq.utils.jsonapi.loads', 'jsonapi.loads', (['msg'], {}), '(msg)\n', (15967, 15972), False, 'from zmq.utils import jsonapi\n'), ((4182, 4198), 'GPUtil.getGPUs', 'GPUtil.getGPUs', ([], {}), '()\n', (4196, 4198), False, 'import GPUtil\n'), ((15731, 15772), 'os.path.join', 'os.path.join', (['self.model_dir', '"""vocab.txt"""'], {}), "(self.model_dir, 'vocab.txt')\n", (15743, 15772), False, 'import os\n'), ((16664, 16987), 'tensorflow.data.Dataset.from_generator', 'tf.data.Dataset.from_generator', (['gen'], {'output_types': "{'input_ids': tf.int32, 'input_mask': tf.int32, 'input_type_ids': tf.int32,\n 'client_id': tf.string}", 'output_shapes': "{'client_id': (), 'input_ids': (None, self.max_seq_len), 'input_mask': (\n None, self.max_seq_len), 'input_type_ids': (None, self.max_seq_len)}"}), "(gen, output_types={'input_ids': tf.int32,\n 'input_mask': tf.int32, 'input_type_ids': tf.int32, 'client_id': tf.\n string}, output_shapes={'client_id': (), 'input_ids': (None, self.\n max_seq_len), 'input_mask': (None, self.max_seq_len), 'input_type_ids':\n (None, self.max_seq_len)})\n", (16694, 16987), True, 'import tensorflow as tf\n'), ((1344, 1356), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (1354, 1356), False, 'import uuid\n'), ((9799, 9820), 'zmq.utils.jsonapi.loads', 'jsonapi.loads', (['msg[1]'], {}), '(msg[1])\n', (9812, 9820), False, 'from zmq.utils import jsonapi\n'), ((11198, 11225), 'numpy.concatenate', 'np.concatenate', (['tmp'], {'axis': '(0)'}), '(tmp, axis=0)\n', (11212, 11225), True, 'import numpy as np\n'), ((11916, 11931), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (11926, 11931), False, 'import time\n'), ((7767, 7785), 'zmq.utils.jsonapi.dumps', 'jsonapi.dumps', (['tmp'], {}), '(tmp)\n', (7780, 7785), False, 'from zmq.utils import jsonapi\n'), ((6512, 6526), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6524, 6526), False, 'from datetime import datetime\n')] |
import keras
from resnet152 import Scale
from keras.models import load_model
from keras.utils.generic_utils import CustomObjectScope
import math
import sys
import argparse
import numpy as np
import scipy.io as sio
import os
import glob
import h5py
import cv2
import gc
''' This code is based on <NAME>., <NAME>., & Arganda-Carreras,
I. (2017). "Vision-Based Fall Detection with Convolutional Neural Networks"
Wireless Communications and Mobile Computing, 2017.
Also, new features were added by <NAME> working in
Semantix.
'''
''' Documentation: class Optflow_extractor
This class has a few methods:
extract
extract_optflow
The only method that should be called outside of this class is:
extract: simply calls to extract_optflow for it's multiple classes
extract_optflow: extracts opticalflows from videos using Farnebacks's
algorithm and stores in the same folder that the video is.
'''
class Optflow_extractor:
def __init__(self, classes, x_size, y_size, ext):
self.classes = classes
self.ext = ext
self.classes_dirs = []
self.classes_videos = []
self.fall_dirs = []
self.class_value = []
self.x_size = x_size
self.y_size = y_size
def extract(self, data_folder):
self.get_dirs(data_folder)
for i in range(len(self.classes)):
# Extracting optical flow
self.extract_optflow(data_folder, self.classes_videos[i],
self.classes_dirs[i], self.classes[i])
def extract_optflow(self, data_folder, videos, dirs, class_):
for (video, dir) in zip(videos, dirs):
print(dir)
counter = 1
cap = cv2.VideoCapture(video)
success, frame1 = cap.read()
try:
prvs = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
except cv2.error as e:
print("Inside every folder in dataset it's expected a valid" +
"(non-empty) video with name equal to the folder + .mp4." +
"In your case, inside %s it's expected a %s video"
% (data_folder + class_ + '/' + dir, video)
, file=sys.stderr)
exit(1)
hsv = np.zeros_like(frame1)
hsv[...,1] = 255
path = data_folder + class_ + '/' + dir
while True:
success, frame2 = cap.read()
if success == False:
break
next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs, next, None,
0.702, 5, 10, 2, 7, 1.5, 0)
mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])
# todo: (ALERT) because of a unknown reason cartToPolar is
# returning -inf for some mag positions and than normalize
# gets all 0...
for i in range(len(mag)):
for j in range(len(mag[i])):
if math.isnan(mag[i][j]) or math.isinf(mag[i][j]):
mag[i][j] = 0
hsv[...,0] = ang*180/np.pi/2
hsv[...,2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
'''
todo: this isn't fine and only will work for urfd data set
'''
if self.x_size != 224 or self.y_size != 224:
print("-input_dim 224 224 are obrigatory so far. sorry.",
file=sys.stderr)
exit(1)
cv2.imwrite(path + '/' + 'flow_x_' + str(counter).zfill(5) +
'.jpg', hsv[..., 0])
cv2.imwrite(path + '/' + 'flow_y_' + str(counter).zfill(5) +
'.jpg', hsv[..., 2])
cv2.imwrite(path + '/' + 'flow_z_' + str(counter).zfill(5) +
'.jpg', bgr)
counter += 1
prvs = next
cap.release()
cv2.destroyAllWindows()
def get_dirs(self, data_folder):
for c in self.classes:
self.classes_dirs.append([f for f in os.listdir(data_folder + c)
if os.path.isdir(os.path.join(data_folder, c, f))])
self.classes_dirs[-1].sort()
self.classes_videos.append([])
for f in self.classes_dirs[-1]:
self.classes_videos[-1].append(data_folder + c+ '/' + f +
'/' + f + self.ext)
self.classes_videos[-1].sort()
if __name__ == '__main__':
print("***********************************************************",
file=sys.stderr)
print(" SEMANTIX - UNICAMP DATALAB 2018", file=sys.stderr)
print("***********************************************************",
file=sys.stderr)
argp = argparse.ArgumentParser(description='Do feature extraction tasks')
argp.add_argument("-data", dest='data_folder', type=str, nargs=1,
help='Usage: -data <path_to_your_data_folder>', required=True)
argp.add_argument("-class", dest='classes', type=str, nargs='+',
help='Usage: -class <class0_name> <class1_name>..<n-th_class_name>',
required=True)
argp.add_argument("-input_dim", dest='input_dim', type=int, nargs=2,
help='Usage: -input_dim <x_dimension> <y_dimension>', required=True)
argp.add_argument("-ext", dest='ext', type=str, nargs=1,
help='Usage: -ext <file_extension> .mp4 | .avi | ...', required=True)
try:
args = argp.parse_args()
except:
argp.print_help(sys.stderr)
exit(1)
optflow_extractor = Optflow_extractor(args.classes, args.input_dim[0],
args.input_dim[1], args.ext[0])
optflow_extractor.extract(args.data_folder[0])
'''
todo: criar excecoes para facilitar o uso
'''
'''
todo: impressao dupla de help se -h ou --help eh passado
'''
| [
"math.isnan",
"numpy.zeros_like",
"cv2.cartToPolar",
"argparse.ArgumentParser",
"math.isinf",
"cv2.cvtColor",
"cv2.VideoCapture",
"cv2.calcOpticalFlowFarneback",
"cv2.normalize",
"cv2.destroyAllWindows",
"os.path.join",
"os.listdir"
] | [((4997, 5063), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Do feature extraction tasks"""'}), "(description='Do feature extraction tasks')\n", (5020, 5063), False, 'import argparse\n'), ((1734, 1757), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video'], {}), '(video)\n', (1750, 1757), False, 'import cv2\n'), ((2275, 2296), 'numpy.zeros_like', 'np.zeros_like', (['frame1'], {}), '(frame1)\n', (2288, 2296), True, 'import numpy as np\n'), ((4129, 4152), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4150, 4152), False, 'import cv2\n'), ((1839, 1879), 'cv2.cvtColor', 'cv2.cvtColor', (['frame1', 'cv2.COLOR_BGR2GRAY'], {}), '(frame1, cv2.COLOR_BGR2GRAY)\n', (1851, 1879), False, 'import cv2\n'), ((2535, 2575), 'cv2.cvtColor', 'cv2.cvtColor', (['frame2', 'cv2.COLOR_BGR2GRAY'], {}), '(frame2, cv2.COLOR_BGR2GRAY)\n', (2547, 2575), False, 'import cv2\n'), ((2599, 2673), 'cv2.calcOpticalFlowFarneback', 'cv2.calcOpticalFlowFarneback', (['prvs', 'next', 'None', '(0.702)', '(5)', '(10)', '(2)', '(7)', '(1.5)', '(0)'], {}), '(prvs, next, None, 0.702, 5, 10, 2, 7, 1.5, 0)\n', (2627, 2673), False, 'import cv2\n'), ((2726, 2769), 'cv2.cartToPolar', 'cv2.cartToPolar', (['flow[..., 0]', 'flow[..., 1]'], {}), '(flow[..., 0], flow[..., 1])\n', (2741, 2769), False, 'import cv2\n'), ((3233, 3282), 'cv2.normalize', 'cv2.normalize', (['mag', 'None', '(0)', '(255)', 'cv2.NORM_MINMAX'], {}), '(mag, None, 0, 255, cv2.NORM_MINMAX)\n', (3246, 3282), False, 'import cv2\n'), ((3305, 3341), 'cv2.cvtColor', 'cv2.cvtColor', (['hsv', 'cv2.COLOR_HSV2BGR'], {}), '(hsv, cv2.COLOR_HSV2BGR)\n', (3317, 3341), False, 'import cv2\n'), ((4272, 4299), 'os.listdir', 'os.listdir', (['(data_folder + c)'], {}), '(data_folder + c)\n', (4282, 4299), False, 'import os\n'), ((3069, 3090), 'math.isnan', 'math.isnan', (['mag[i][j]'], {}), '(mag[i][j])\n', (3079, 3090), False, 'import math\n'), ((3094, 3115), 'math.isinf', 'math.isinf', (['mag[i][j]'], {}), '(mag[i][j])\n', (3104, 3115), False, 'import math\n'), ((4342, 4373), 'os.path.join', 'os.path.join', (['data_folder', 'c', 'f'], {}), '(data_folder, c, f)\n', (4354, 4373), False, 'import os\n')] |
import numpy as np
def load_data():
data = np.loadtxt('input.csv', dtype='int32', delimiter=',')
return data
def main():
data = load_data()
data[1] = 12
data[2] = 2
index = 0
while(True):
cmd = data[index * 4]
if cmd == 99:
break
else:
in1 = data[index*4 + 1]
val1 = data[in1]
in2 = data[index*4 + 2]
val2 = data[in2]
out = data[index*4 + 3]
if cmd == 1:
data[out] = val1 + val2
elif cmd == 2:
data[out] = val1 * val2
index += 1
print(data)
print(data[0])
if __name__ == "__main__":
main()
| [
"numpy.loadtxt"
] | [((46, 99), 'numpy.loadtxt', 'np.loadtxt', (['"""input.csv"""'], {'dtype': '"""int32"""', 'delimiter': '""","""'}), "('input.csv', dtype='int32', delimiter=',')\n", (56, 99), True, 'import numpy as np\n')] |
import numpy as np
from numpy.linalg import inv
import matplotlib.pyplot as plt
def x_to_phi(x):
# get x and generate the corresponding 6-dim vector phi(x)
phi = np.array([1, x, x**2, x**3, x**4, x**5])
return phi
def generate_y(x, th, s_n, m_n):
# generate a y for a specific x
# use the true model and add noise n
phi = x_to_phi(x)
n = np.random.normal(m_n, s_n)
y = phi @ th + n
return y
def m_th_y(th_0, s_n, s_th, phis, ys):
# find mu theta given y
id = np.identity(th_0.shape[0])
temp1 = 1 / s_n * inv((1 / s_th) * id + (1 / s_n) * phis.T @ phis)
temp2 = phis.T @ (ys - phis @ th_0)
res = th_0 + temp1 @ temp2
return res
def get_m_y(x, m_th_y):
phi = x_to_phi(x)
m_y = phi.T @ m_th_y
return m_y
def get_s_y_sq(x, s_n, s_th, phis):
phi = x_to_phi(x)
temp1 = s_n * s_th * phi.T
temp2 = inv(s_n * np.identity(phis.shape[1]) + s_th * phis.T @ phis)
res = s_n + (temp1 @ temp2) @ phi
return res
np.random.seed(0)
theta_true = np.array([0.2, -1, 0.9, 0.7, 0, -0.2])
s_ns = [0.05, 0.15]
for s_n in s_ns:
N = 20
s_th = 0.1
m_n = 0
# form training set
phis = np.zeros((N, theta_true.shape[0]))
ys = np.zeros(N)
x_train = np.linspace(0,2, N)
for i in range(0, N):
phi = x_to_phi(x_train[i])
y = generate_y(x_train[i], theta_true, s_n, m_n)
phis[i, :] = phi
ys[i] = y
# perform Bayesian Inference
# find mu theta given y
mu_y_th = m_th_y(theta_true, s_n, s_th, phis, ys)
# create test set
N = 20
x_test = np.zeros(N)
true_y = np.zeros(N)
pred_y = np.zeros(N)
err_y = np.zeros(N)
for i in range(0, N):
x = np.random.uniform(0,2)
x_test[i] = x
pred_y[i] = get_m_y(x_test[i], mu_y_th)
err_y[i] = get_s_y_sq(x, s_n, s_th, phis)
# generate a smooth true model with linspace
x_for_true = np.linspace(0,2, 10000)
true_y = np.zeros(x_for_true.shape[0])
for i, x in enumerate(x_for_true):
true_y[i] = generate_y(x, theta_true, 0.0, m_n)
# plot results
plt.title("Sigma noise: %.2f, Number of training points: %d" % (s_n, N))
plt.scatter(x_for_true, true_y, color='red', marker='.', s=1)
plt.errorbar(x_test, pred_y, yerr=err_y, fmt='o')
# plt.savefig("1.4_%s_%s.png" % (s_n, N))
plt.show()
| [
"matplotlib.pyplot.title",
"numpy.random.uniform",
"numpy.random.seed",
"matplotlib.pyplot.show",
"matplotlib.pyplot.scatter",
"numpy.zeros",
"numpy.identity",
"numpy.array",
"numpy.linalg.inv",
"numpy.random.normal",
"numpy.linspace",
"matplotlib.pyplot.errorbar"
] | [((997, 1014), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (1011, 1014), True, 'import numpy as np\n'), ((1028, 1066), 'numpy.array', 'np.array', (['[0.2, -1, 0.9, 0.7, 0, -0.2]'], {}), '([0.2, -1, 0.9, 0.7, 0, -0.2])\n', (1036, 1066), True, 'import numpy as np\n'), ((172, 220), 'numpy.array', 'np.array', (['[1, x, x ** 2, x ** 3, x ** 4, x ** 5]'], {}), '([1, x, x ** 2, x ** 3, x ** 4, x ** 5])\n', (180, 220), True, 'import numpy as np\n'), ((369, 395), 'numpy.random.normal', 'np.random.normal', (['m_n', 's_n'], {}), '(m_n, s_n)\n', (385, 395), True, 'import numpy as np\n'), ((508, 534), 'numpy.identity', 'np.identity', (['th_0.shape[0]'], {}), '(th_0.shape[0])\n', (519, 534), True, 'import numpy as np\n'), ((1179, 1213), 'numpy.zeros', 'np.zeros', (['(N, theta_true.shape[0])'], {}), '((N, theta_true.shape[0]))\n', (1187, 1213), True, 'import numpy as np\n'), ((1223, 1234), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (1231, 1234), True, 'import numpy as np\n'), ((1249, 1269), 'numpy.linspace', 'np.linspace', (['(0)', '(2)', 'N'], {}), '(0, 2, N)\n', (1260, 1269), True, 'import numpy as np\n'), ((1593, 1604), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (1601, 1604), True, 'import numpy as np\n'), ((1618, 1629), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (1626, 1629), True, 'import numpy as np\n'), ((1643, 1654), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (1651, 1654), True, 'import numpy as np\n'), ((1667, 1678), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (1675, 1678), True, 'import numpy as np\n'), ((1927, 1951), 'numpy.linspace', 'np.linspace', (['(0)', '(2)', '(10000)'], {}), '(0, 2, 10000)\n', (1938, 1951), True, 'import numpy as np\n'), ((1964, 1993), 'numpy.zeros', 'np.zeros', (['x_for_true.shape[0]'], {}), '(x_for_true.shape[0])\n', (1972, 1993), True, 'import numpy as np\n'), ((2114, 2186), 'matplotlib.pyplot.title', 'plt.title', (["('Sigma noise: %.2f, Number of training points: %d' % (s_n, N))"], {}), "('Sigma noise: %.2f, Number of training points: %d' % (s_n, N))\n", (2123, 2186), True, 'import matplotlib.pyplot as plt\n'), ((2191, 2252), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_for_true', 'true_y'], {'color': '"""red"""', 'marker': '"""."""', 's': '(1)'}), "(x_for_true, true_y, color='red', marker='.', s=1)\n", (2202, 2252), True, 'import matplotlib.pyplot as plt\n'), ((2257, 2306), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['x_test', 'pred_y'], {'yerr': 'err_y', 'fmt': '"""o"""'}), "(x_test, pred_y, yerr=err_y, fmt='o')\n", (2269, 2306), True, 'import matplotlib.pyplot as plt\n'), ((2357, 2367), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2365, 2367), True, 'import matplotlib.pyplot as plt\n'), ((557, 601), 'numpy.linalg.inv', 'inv', (['(1 / s_th * id + 1 / s_n * phis.T @ phis)'], {}), '(1 / s_th * id + 1 / s_n * phis.T @ phis)\n', (560, 601), False, 'from numpy.linalg import inv\n'), ((1717, 1740), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(2)'], {}), '(0, 2)\n', (1734, 1740), True, 'import numpy as np\n'), ((892, 918), 'numpy.identity', 'np.identity', (['phis.shape[1]'], {}), '(phis.shape[1])\n', (903, 918), True, 'import numpy as np\n')] |
"""Furcifar Utility Modul
This module contains functions for visualisation, logging and progress output during the
preprocessing of whole-slide images of the CAMELYON data sets.
"""
from collections import namedtuple
from datetime import datetime
import fnmatch
import logging
import numpy as np
import os
from PIL import Image, ImageDraw, ImageFont
from progress.bar import IncrementalBar
from typing import Dict
Point = namedtuple('Point', 'x y')
# If True, display additional NumPy array stats (min, max, mean, is_binary).
ADDITIONAL_NP_STATS = False
class LogMessage(object):
def __init__(self, fmt, args):
self.fmt = fmt
self.args = args
def __str__(self):
return self.fmt.format(*self.args)
class LogStyleAdapter(logging.LoggerAdapter):
"""Style Adapter to allow Python 3 styled string format with '{}'."""
def __init__(self, logger, extra=None):
super(LogStyleAdapter, self).__init__(logger, extra or {})
def log(self, level, msg, *args, **kwargs):
if self.isEnabledFor(level):
msg, kwargs = self.process(msg, kwargs)
self.logger._log(level, LogMessage(msg, args), (), **kwargs)
def find_files(pattern, path) -> Dict[str, str]:
"""Find files in a directory by given file name pattern.
Parameters
----------
pattern : str
File pattern allowing wildcards.
path : str
Root directory to search in.
Returns
-------
dict(str: str)
Dictionary of all found files where the file names are keys and the relative paths
from search root are values.
"""
result = {}
for root, dirs, files in os.walk(path):
for name in files:
if fnmatch.fnmatch(name, pattern):
result[name] = os.path.join(root, name)
return result
class ProgressBar(IncrementalBar):
@property
def remaining_fmt(self):
m, s = divmod(self.eta, 60)
h, m = divmod(m, 60)
return f'{h:02}:{m:02}:{s:02}'
@property
def elapsed_fmt(self):
m, s = divmod(self.elapsed, 60)
h, m = divmod(m, 60)
return f'{h:02}:{m:02}:{s:02}'
def draw_polygon(image: Image.Image, polygon, *, fill, outline) -> Image.Image:
"""Draw a filled polygon on to an image.
Parameters
----------
image : Image.Image
Background image to be drawn on.
polygon :
Polygon to be drawn.
fill : color str or tuple
Fill color.
outline : color str or tuple
Outline color.
Returns
-------
Image.Image
A copy of the background image with the polygon drawn onto.
"""
img_back = image
img_poly = Image.new('RGBA', img_back.size)
img_draw = ImageDraw.Draw(img_poly)
img_draw.polygon(polygon, fill, outline)
img_back.paste(img_poly, mask=img_poly)
return img_back
def get_relative_polygon(polygon, origin: Point, downsample=1):
"""Translate the polygon to relative to a point.
Parameters
----------
polygon : Sequence[Point]
Polygon points.
origin : Point
The new origin the polygons points shall be relative to.
downsample : int, optional
Layer downsample >= 1 (Default: 1)
Returns
-------
tuple(Point)
New polygon with points relative to origin.
"""
rel_polygon = []
for point in polygon:
rel_polygon.append(Point((point.x - origin.x) / downsample,
(point.y - origin.y) / downsample))
return tuple(rel_polygon)
class TileMap:
"""Visualisation for slide tiles.
Creates an image with with tile boundaries drawn over the slide image visualisation
purposes.
Attributes
----------
image : PIL.Image.Image
Map that displays the slide with each added tile drawn over it.
"""
def __init__(self, slide: "Slide", level=None, fill=(20, 180, 8, 80),
outline=(20, 180, 8)):
"""
Parameters
----------
slide : Slide
Tissue slide.
level
Slide Layer.
fill : PIL color, optional
Tile fill color.
outline : PIL color, optional
Tile outline color.
"""
self._slide = slide
if level is None:
self._level = slide.level_count - 1
else:
self._level = level
self._fill = fill
self._outline = outline
self._downsample = slide.level_downsamples[self._level]
self.tiles = []
self.image = slide.get_full_slide(self._level)
def __repr__(self):
return '{}(slide={!r}, level={!r})'.format(
type(self).__name__,
self._slide,
self._level
)
def add_tile(self, bounds):
"""Add a tile to the map.
Parameters
----------
bounds : Tuple
Tile boundaries as a tuple of ((x, y), (width, height)) in layer 0 pixel.
"""
self.tiles.append(bounds)
(x, y), (width, height) = bounds
poly = (Point(x, y), Point(x + width, y), Point(x + width, y + height),
Point(x, y + height))
rel_poly = get_relative_polygon(poly, Point(0, 0),
downsample=self._downsample)
self.image = draw_polygon(self.image, rel_poly, fill=self._fill,
outline=self._outline)
class Time:
"""
Class for displaying elapsed time.
FROM DEEPHISTOPATH
"""
def __init__(self):
self.start = datetime.now()
def elapsed_display(self):
time_elapsed = self.elapsed()
print("Time elapsed: " + str(time_elapsed))
def elapsed(self):
self.end = datetime.now()
time_elapsed = self.end - self.start
return time_elapsed
def np_info(np_arr, name=None, elapsed=None):
"""
Display information (shape, type, max, min, etc) about a NumPy array.
FROM DEEPHISTOPATH
Args:
np_arr: The NumPy array.
name: The (optional) name of the array.
elapsed: The (optional) time elapsed to perform a filtering operation.
"""
if name is None:
name = "NumPy Array"
if elapsed is None:
elapsed = "---"
if ADDITIONAL_NP_STATS is False:
print("%-20s | Time: %-14s Type: %-7s Shape: %s" % (name, str(elapsed), np_arr.dtype, np_arr.shape))
else:
max = np_arr.max()
min = np_arr.min()
mean = np_arr.mean()
is_binary = "T" if (np.unique(np_arr).size == 2) else "F"
print("%-20s | Time: %-14s Min: %6.2f Max: %6.2f Mean: %6.2f Binary: %s Type: %-7s Shape: %s" % (
name, str(elapsed), min, max, mean, is_binary, np_arr.dtype, np_arr.shape))
def pil_to_np_rgb(pil_img):
"""
Convert a PIL Image to a NumPy array.
FROM DEEPATHISTO
Note that RGB PIL (w, h) -> NumPy (h, w, 3).
Args:
pil_img: The PIL Image.
Returns:
The PIL image converted to a NumPy array.
"""
t = Time()
rgb = np.asarray(pil_img)
np_info(rgb, "RGB", t.elapsed())
return rgb
def np_to_pil(np_img):
"""
Convert a NumPy array to a PIL Image.
FROM DEEPATHISTO
Args:
np_img: The image represented as a NumPy array.
Returns:
The NumPy array converted to a PIL Image.
"""
if np_img.dtype == "bool":
np_img = np_img.astype("uint8") * 255
elif np_img.dtype == "float64":
np_img = (np_img * 255).astype("uint8")
return Image.fromarray(np_img)
def display_img(np_img, text=None, font_path="/Library/Fonts/Arial Bold.ttf", size=48, color=(255, 0, 0),
background=(255, 255, 255), border=(0, 0, 0), bg=False):
"""
Convert a NumPy array to a PIL image, add text to the image, and display the image.
FROM DEEPATHISTO
Args:
np_img: Image as a NumPy array.
text: The text to add to the image.
font_path: The path to the font to use.
size: The font size
color: The font color
background: The background color
border: The border color
bg: If True, add rectangle background behind text
"""
result = np_to_pil(np_img)
# if gray, convert to RGB for display
if result.mode == 'L':
result = result.convert('RGB')
draw = ImageDraw.Draw(result)
if text is not None:
font = ImageFont.truetype(font_path, size)
if bg:
(x, y) = draw.textsize(text, font)
draw.rectangle([(0, 0), (x + 5, y + 4)], fill=background, outline=border)
draw.text((2, 0), text, color, font=font)
result.show()
def mask_rgb(rgb, mask):
"""
Apply a binary (T/F, 1/0) mask to a 3-channel RGB image and output the result.
FROM DEEPATHISTO
Args:
rgb: RGB image as a NumPy array.
mask: An image mask to determine which pixels in the original image should be displayed.
Returns:
NumPy array representing an RGB image with mask applied.
"""
t = Time()
result = rgb * np.dstack([mask, mask, mask])
np_info(result, "Mask RGB", t.elapsed())
return result
def mask_percent(np_img):
"""
Determine the percentage of a NumPy array that is masked (how many of the values are 0 values).
FROM DEEPHISTOPATH
Args:
np_img: Image as a NumPy array.
Returns:
The percentage of the NumPy array that is masked.
"""
if (len(np_img.shape) == 3) and (np_img.shape[2] == 3):
np_sum = np_img[:, :, 0] + np_img[:, :, 1] + np_img[:, :, 2]
mask_percentage = 100 - np.count_nonzero(np_sum) / np_sum.size * 100
else:
mask_percentage = 100 - np.count_nonzero(np_img) / np_img.size * 100
return mask_percentage
def tissue_percent(np_img):
"""
Determine the percentage of a NumPy array that is tissue (not masked).
FROM DEEPHISTOPATH
Args:
np_img: Image as a NumPy array.
Returns:
The percentage of the NumPy array that is tissue.
"""
return 100 - mask_percent(np_img)
| [
"numpy.dstack",
"PIL.Image.new",
"numpy.count_nonzero",
"os.path.join",
"numpy.asarray",
"os.walk",
"PIL.Image.fromarray",
"PIL.ImageFont.truetype",
"collections.namedtuple",
"PIL.ImageDraw.Draw",
"datetime.datetime.now",
"fnmatch.fnmatch",
"numpy.unique"
] | [((426, 452), 'collections.namedtuple', 'namedtuple', (['"""Point"""', '"""x y"""'], {}), "('Point', 'x y')\n", (436, 452), False, 'from collections import namedtuple\n'), ((1662, 1675), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (1669, 1675), False, 'import os\n'), ((2688, 2720), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', 'img_back.size'], {}), "('RGBA', img_back.size)\n", (2697, 2720), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((2736, 2760), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img_poly'], {}), '(img_poly)\n', (2750, 2760), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((7068, 7087), 'numpy.asarray', 'np.asarray', (['pil_img'], {}), '(pil_img)\n', (7078, 7087), True, 'import numpy as np\n'), ((7539, 7562), 'PIL.Image.fromarray', 'Image.fromarray', (['np_img'], {}), '(np_img)\n', (7554, 7562), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((8319, 8341), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['result'], {}), '(result)\n', (8333, 8341), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((5595, 5609), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5607, 5609), False, 'from datetime import datetime\n'), ((5775, 5789), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5787, 5789), False, 'from datetime import datetime\n'), ((8382, 8417), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['font_path', 'size'], {}), '(font_path, size)\n', (8400, 8417), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((9030, 9059), 'numpy.dstack', 'np.dstack', (['[mask, mask, mask]'], {}), '([mask, mask, mask])\n', (9039, 9059), True, 'import numpy as np\n'), ((1719, 1749), 'fnmatch.fnmatch', 'fnmatch.fnmatch', (['name', 'pattern'], {}), '(name, pattern)\n', (1734, 1749), False, 'import fnmatch\n'), ((1782, 1806), 'os.path.join', 'os.path.join', (['root', 'name'], {}), '(root, name)\n', (1794, 1806), False, 'import os\n'), ((6550, 6567), 'numpy.unique', 'np.unique', (['np_arr'], {}), '(np_arr)\n', (6559, 6567), True, 'import numpy as np\n'), ((9565, 9589), 'numpy.count_nonzero', 'np.count_nonzero', (['np_sum'], {}), '(np_sum)\n', (9581, 9589), True, 'import numpy as np\n'), ((9652, 9676), 'numpy.count_nonzero', 'np.count_nonzero', (['np_img'], {}), '(np_img)\n', (9668, 9676), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
from matplotlib import rc
import numpy as np
import os
import seaborn
if __name__ == "__main__":
seaborn.set()
seaborn.set_style("ticks")
seaborn.set_context("paper")
rc('text', usetex=True)
rc('font', **{'family':'serif', 'serif':['Times New Roman]']})
# Training plot
fig, ax = plt.subplots(2, 1, figsize=[3.2, 4])
class Colors:
blue = (0, 160/255, 255/255)
orange = (255/255, 160/255, 0/255)
red = (255/255, 80/255, 0/255)
green = (0/255, 200/255, 0/255)
grey = (100/255, 100/255, 100/255)
lightgrey = (150/255, 150/255, 150/255)
red = Colors.red
blue = Colors.blue
files = os.listdir('./data/results')
files.remove('README.md')
for file in files:
data = np.load('./data/results/' + file)
_, system, algorithm, seed, N = file[:-4].split('_')
if system == 'controller':
if N == '10':
c = 'black'
elif N == '100':
c = red
elif N == '1000':
c = blue
if algorithm == 'admm':
#plt.semilogy(data['res_pri'])
ax[0].semilogy(data['obj'], color=c)
else:
ax[1].semilogy(data['obj'], color=c)
line0, = ax[0].plot(0, 0, 'black')
line1, = ax[0].plot(0, 0, color=red)
line2, = ax[0].plot(0, 0, color=blue)
line0.set_label('N=10')
line1.set_label('N=100')
line2.set_label('N=1000')
ax[0].grid()
ax[0].set_ylim([1E1, 9E10])
ax[0].set_xlim([-15, 300])
ax[0].set_title('ADMM', loc='left')
ax[0].set_ylabel('Imitaiton Loss')
ax[0].set_xlabel('Iteration')
ax[1].set_ylim([1E1, 9E10])
ax[1].set_xlim([-50, 1000])
ax[1].grid()
ax[1].set_title('Projected Gradient Descent', loc='left')
ax[1].set_ylabel('Imitaiton Loss')
ax[1].set_xlabel('Iteration')
fig.legend(handles=[line0, line1, line2], ncol=3, bbox_to_anchor=[1.05, 0.08],
frameon=False)
seaborn.despine(trim="true")
fig.tight_layout(pad=0, h_pad=1)
fig.subplots_adjust(bottom=0.19)
plt.savefig('./data/figures/training2.pgf', pad_inches=0)
plt.show() | [
"os.listdir",
"seaborn.set_style",
"matplotlib.rc",
"numpy.load",
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots",
"seaborn.despine",
"seaborn.set",
"seaborn.set_context",
"matplotlib.pyplot.savefig"
] | [((135, 148), 'seaborn.set', 'seaborn.set', ([], {}), '()\n', (146, 148), False, 'import seaborn\n'), ((153, 179), 'seaborn.set_style', 'seaborn.set_style', (['"""ticks"""'], {}), "('ticks')\n", (170, 179), False, 'import seaborn\n'), ((184, 212), 'seaborn.set_context', 'seaborn.set_context', (['"""paper"""'], {}), "('paper')\n", (203, 212), False, 'import seaborn\n'), ((217, 240), 'matplotlib.rc', 'rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (219, 240), False, 'from matplotlib import rc\n'), ((245, 309), 'matplotlib.rc', 'rc', (['"""font"""'], {}), "('font', **{'family': 'serif', 'serif': ['Times New Roman]']})\n", (247, 309), False, 'from matplotlib import rc\n'), ((344, 380), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '[3.2, 4]'}), '(2, 1, figsize=[3.2, 4])\n', (356, 380), True, 'import matplotlib.pyplot as plt\n'), ((708, 736), 'os.listdir', 'os.listdir', (['"""./data/results"""'], {}), "('./data/results')\n", (718, 736), False, 'import os\n'), ((2041, 2069), 'seaborn.despine', 'seaborn.despine', ([], {'trim': '"""true"""'}), "(trim='true')\n", (2056, 2069), False, 'import seaborn\n'), ((2148, 2205), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./data/figures/training2.pgf"""'], {'pad_inches': '(0)'}), "('./data/figures/training2.pgf', pad_inches=0)\n", (2159, 2205), True, 'import matplotlib.pyplot as plt\n'), ((2210, 2220), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2218, 2220), True, 'import matplotlib.pyplot as plt\n'), ((806, 839), 'numpy.load', 'np.load', (["('./data/results/' + file)"], {}), "('./data/results/' + file)\n", (813, 839), True, 'import numpy as np\n')] |
import os
import cv2
import numpy as np
from enum import Enum
import pickle
from tfci import compress, decompress
from functools import partial
from statistics import mean
from skimage.metrics import structural_similarity as ssim
from skimage.metrics import mean_squared_error as mse
from skimage.metrics import peak_signal_noise_ratio as psnr
from skimage.metrics import normalized_root_mse as nrmse
def get_metrics(imageA, imageB):
val_ssim = ssim(imageA, imageB, multichannel=True)
val_mse = mse(imageA, imageB)
val_psnr = psnr(imageA, imageB)
val_nrmse = nrmse(imageA, imageB)
return {
"ssim": round(val_ssim, 3),
"mse": round(val_mse, 3),
"psnr": round(val_psnr, 3),
"nrmse": round(val_nrmse, 3),
}
class normalize(Enum):
OPENCV = 1
NUMPY = 2
class soften(Enum):
BLUR = 1
MEDIAN = 2
NONE = 3
class encoding(Enum):
NO_COMP = "NO_COMP"
COMP_TFCI_HI = "COMP_TFCI_HI"
COMP_TFCI_MI = "COMP_TFCI_MI"
COMP_TFCI_LO = "COMP_TFCI_LO"
COMP_JPEG_90 = "COMP_JPEG_90"
COMP_JPEG_70 = "COMP_JPEG_70"
COMP_JPEG_50 = "COMP_JPEG_50"
COMP_BMSHJ_8 = "COMP_BMSHJ_8"
COMP_BMSHJ_4 = "COMP_BMSHJ_4"
COMP_BMSHJ_1 = "COMP_BMSHJ_1"
def img_normalize(img, choice=normalize.OPENCV):
if choice == normalize.OPENCV:
return cv2.normalize(img, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U)
return np.interp(img, (np.min(img), np.max(img)), (0.0, 255.0)).astype(np.uint8)
def img_denormalize(img, choice=normalize.OPENCV, to_range=None):
if to_range is None:
rng_min = 0.0
rng_max = 1.0
else:
rng_min = to_range[0]
rng_max = to_range[1]
if choice == normalize.OPENCV:
return cv2.normalize(np.float32(img), None, rng_min, rng_max, cv2.NORM_MINMAX)
return np.interp(np.float32(img), (np.min(img), np.max(img)), (rng_min, rng_max))
def img_soften(img, choice=soften.BLUR):
if choice == soften.BLUR: # DWT_HH
return cv2.blur(np.float32(img), (3, 3))
elif choice == soften.MEDIAN: # DWT_HL, DWT_LH, DWT_LL
return cv2.medianBlur(np.float32(img), 5)
return img
def getImgStat(img):
return {"mean": np.mean(img), "min": np.min(img), "max": np.max(img)}
def generate_paths(uid, method, inFld, outFld, inFname_imgSrc, inFname_imgWtr):
dct_paths = dict()
dct_paths["inPath_imgSrc"] = os.path.join(inFld, inFname_imgSrc)
dct_paths["inPath_imgWtr"] = os.path.join(inFld, inFname_imgWtr)
dct_paths["outPath_imgSrc"] = os.path.join(
outFld, "_".join([uid, method, "_imgSrc_", inFname_imgSrc])
)
dct_paths["outPath_imgWtr"] = os.path.join(
outFld, "_".join([uid, method, "_imgWtr_", inFname_imgWtr])
)
dct_paths["outPath_imgWtrFilt"] = os.path.join(
outFld, "_".join([uid, method, "_imgWtrFilt_", inFname_imgWtr])
)
dct_paths["outPath_imgEmb"] = os.path.join(
outFld, "_".join([uid, method, "_imgEmb_", inFname_imgSrc])
)
dct_paths["outPath_imgEmbComp"] = os.path.join(
outFld, "_".join([uid, method, "_imgEmbComp_", inFname_imgSrc])
)
dct_paths["outPath_imgExt"] = os.path.join(
outFld, "_".join([uid, method, "_imgExt_", inFname_imgWtr])
)
dct_paths["outPath_imgExtFilt"] = os.path.join(
outFld, "_".join([uid, method, "_imgExtFilt_", inFname_imgWtr])
)
dct_paths["outPath_imgDiffSrc"] = os.path.join(
outFld, "_".join([uid, method, "_imgDiffSrc_", inFname_imgSrc])
)
dct_paths["outPath_imgDiffWtr"] = os.path.join(
outFld, "_".join([uid, method, "_imgDiffWtr_", inFname_imgWtr])
)
return dct_paths
def getDiffImgs(choice=soften.BLUR, **kwargs):
imgSrc = pickle.load(open(kwargs.get("outPath_imgSrc") + ".pkl", "rb"))
# imgEmb = pickle.load(open(kwargs.get("outPath_imgEmb") + ".pkl", "rb"))
imgEmbPkl = pickle.load(open(kwargs.get("outPath_imgEmb") + ".pkl", "rb"))
imgEmb = cv2.imread(kwargs.get("outPath_imgEmb"))
imgEmb = img_denormalize(imgEmb, to_range=(np.min(imgEmbPkl), np.max(imgEmbPkl)))
imgDiffSrc = np.abs(imgEmb - imgSrc)
imgDiffSrc = img_normalize(imgDiffSrc)
cv2.imwrite(kwargs.get("outPath_imgDiffSrc"), imgDiffSrc)
imgSrc = img_normalize(imgSrc)
imgEmb = img_normalize(imgEmb)
dct_metrics_emb_src = get_metrics(imgEmb, imgSrc)
print("Emb vs Src : {}".format(dct_metrics_emb_src))
imgWtrRz = pickle.load(open(kwargs.get("outPath_imgWtr") + ".pkl", "rb"))
imgExt = pickle.load(open(kwargs.get("outPath_imgExt") + ".pkl", "rb"))
imgExtFilt = img_soften(imgExt, choice)
imgWtrRzFilt = img_soften(imgWtrRz, choice)
imgDiffwtr = np.abs(imgExtFilt - imgWtrRzFilt)
imgDiffwtr = img_normalize(imgDiffwtr)
cv2.imwrite(kwargs.get("outPath_imgDiffWtr"), imgDiffwtr)
pickle.dump(
imgWtrRzFilt,
open(kwargs.get("outPath_imgWtrFilt") + ".pkl", "wb"),
pickle.HIGHEST_PROTOCOL,
)
# imgWtrRzFilt = cv2.normalize(imgWtrRzFilt, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U)
imgWtrRzFilt = img_normalize(imgWtrRzFilt)
cv2.imwrite(kwargs.get("outPath_imgWtrFilt"), imgWtrRzFilt)
pickle.dump(
imgExtFilt,
open(kwargs.get("outPath_imgExtFilt") + ".pkl", "wb"),
pickle.HIGHEST_PROTOCOL,
)
# imgExtFilt = cv2.normalize(imgExtFilt, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U)
imgExtFilt = img_normalize(imgExtFilt)
cv2.imwrite(kwargs.get("outPath_imgExtFilt"), imgExtFilt)
dct_metrics_ext_wtr = get_metrics(imgExtFilt, imgWtrRzFilt)
print("Ext vs Wtr : {}".format(dct_metrics_ext_wtr))
return dct_metrics_emb_src, dct_metrics_ext_wtr
# return {
# "metrics_ext_wtr": dct_metrics_ext_wtr,
# "metrics_emb_src": dct_metrics_emb_src,
# }
def compress_deepEncoding(imgPath, mode="hific-hi"):
compress(mode, imgPath, imgPath + ".tfci")
decompress(imgPath + ".tfci", imgPath)
def compress_JpegEnconding(imgPath, quality=90):
imgemb = cv2.imread(imgPath)
cv2.imwrite(
imgPath + ".jpg",
imgemb,
[int(cv2.IMWRITE_JPEG_QUALITY), quality],
)
imgemb = cv2.imread(imgPath + ".jpg")
cv2.imwrite(imgPath, imgemb)
def compress_nothing():
pass
def do_compress(imgPath, choice=encoding.NO_COMP):
switcher = {
encoding.COMP_JPEG_50: partial(compress_JpegEnconding, imgPath, quality=50),
encoding.COMP_JPEG_70: partial(compress_JpegEnconding, imgPath, quality=70),
encoding.COMP_JPEG_90: partial(compress_JpegEnconding, imgPath, quality=90),
encoding.COMP_TFCI_HI: partial(compress_deepEncoding, imgPath, mode="hific-hi"),
encoding.COMP_TFCI_MI: partial(compress_deepEncoding, imgPath, mode="hific-mi"),
encoding.COMP_TFCI_LO: partial(compress_deepEncoding, imgPath, mode="hific-lo"),
encoding.COMP_BMSHJ_8: partial(
compress_deepEncoding, imgPath, mode="bmshj2018-factorized-mse-8"
),
encoding.COMP_BMSHJ_4: partial(
compress_deepEncoding, imgPath, mode="bmshj2018-factorized-mse-4"
),
encoding.COMP_BMSHJ_1: partial(
compress_deepEncoding, imgPath, mode="bmshj2018-factorized-mse-1"
),
encoding.NO_COMP: partial(compress_nothing),
}
return switcher.get(choice, compress_nothing)()
def get_summary(dct_results):
lst_ssim_src = list()
lst_ssim_wtr = list()
lst_psnr_src = list()
lst_psnr_wtr = list()
for key, value in dct_results.items():
if "metrics_emb_src" in value:
lst_ssim_src.append(value["metrics_emb_src"]["ssim"])
lst_psnr_src.append(value["metrics_emb_src"]["psnr"])
lst_ssim_wtr.append(value["metrics_ext_wtr"]["ssim"])
lst_psnr_wtr.append(value["metrics_ext_wtr"]["psnr"])
dct_summary = {
"summary": {
"ssim_emb_src": round(mean(lst_ssim_src), 3),
"psnr_emb_src": round(mean(lst_psnr_src), 3),
"ssim_ext_wtr": round(mean(lst_ssim_wtr), 3),
"psnr_ext_wtr": round(mean(lst_psnr_wtr), 3),
}
}
print(dct_summary)
return dct_summary
| [
"functools.partial",
"tfci.decompress",
"numpy.abs",
"cv2.imwrite",
"numpy.float32",
"skimage.metrics.mean_squared_error",
"cv2.imread",
"numpy.min",
"skimage.metrics.structural_similarity",
"numpy.mean",
"numpy.max",
"statistics.mean",
"tfci.compress",
"cv2.normalize",
"skimage.metrics.... | [((451, 490), 'skimage.metrics.structural_similarity', 'ssim', (['imageA', 'imageB'], {'multichannel': '(True)'}), '(imageA, imageB, multichannel=True)\n', (455, 490), True, 'from skimage.metrics import structural_similarity as ssim\n'), ((505, 524), 'skimage.metrics.mean_squared_error', 'mse', (['imageA', 'imageB'], {}), '(imageA, imageB)\n', (508, 524), True, 'from skimage.metrics import mean_squared_error as mse\n'), ((540, 560), 'skimage.metrics.peak_signal_noise_ratio', 'psnr', (['imageA', 'imageB'], {}), '(imageA, imageB)\n', (544, 560), True, 'from skimage.metrics import peak_signal_noise_ratio as psnr\n'), ((577, 598), 'skimage.metrics.normalized_root_mse', 'nrmse', (['imageA', 'imageB'], {}), '(imageA, imageB)\n', (582, 598), True, 'from skimage.metrics import normalized_root_mse as nrmse\n'), ((2387, 2422), 'os.path.join', 'os.path.join', (['inFld', 'inFname_imgSrc'], {}), '(inFld, inFname_imgSrc)\n', (2399, 2422), False, 'import os\n'), ((2456, 2491), 'os.path.join', 'os.path.join', (['inFld', 'inFname_imgWtr'], {}), '(inFld, inFname_imgWtr)\n', (2468, 2491), False, 'import os\n'), ((4090, 4113), 'numpy.abs', 'np.abs', (['(imgEmb - imgSrc)'], {}), '(imgEmb - imgSrc)\n', (4096, 4113), True, 'import numpy as np\n'), ((4664, 4697), 'numpy.abs', 'np.abs', (['(imgExtFilt - imgWtrRzFilt)'], {}), '(imgExtFilt - imgWtrRzFilt)\n', (4670, 4697), True, 'import numpy as np\n'), ((5834, 5876), 'tfci.compress', 'compress', (['mode', 'imgPath', "(imgPath + '.tfci')"], {}), "(mode, imgPath, imgPath + '.tfci')\n", (5842, 5876), False, 'from tfci import compress, decompress\n'), ((5881, 5919), 'tfci.decompress', 'decompress', (["(imgPath + '.tfci')", 'imgPath'], {}), "(imgPath + '.tfci', imgPath)\n", (5891, 5919), False, 'from tfci import compress, decompress\n'), ((5984, 6003), 'cv2.imread', 'cv2.imread', (['imgPath'], {}), '(imgPath)\n', (5994, 6003), False, 'import cv2\n'), ((6132, 6160), 'cv2.imread', 'cv2.imread', (["(imgPath + '.jpg')"], {}), "(imgPath + '.jpg')\n", (6142, 6160), False, 'import cv2\n'), ((6165, 6193), 'cv2.imwrite', 'cv2.imwrite', (['imgPath', 'imgemb'], {}), '(imgPath, imgemb)\n', (6176, 6193), False, 'import cv2\n'), ((1334, 1394), 'cv2.normalize', 'cv2.normalize', (['img', 'None', '(0)', '(255)', 'cv2.NORM_MINMAX', 'cv2.CV_8U'], {}), '(img, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U)\n', (1347, 1394), False, 'import cv2\n'), ((1830, 1845), 'numpy.float32', 'np.float32', (['img'], {}), '(img)\n', (1840, 1845), True, 'import numpy as np\n'), ((2195, 2207), 'numpy.mean', 'np.mean', (['img'], {}), '(img)\n', (2202, 2207), True, 'import numpy as np\n'), ((2216, 2227), 'numpy.min', 'np.min', (['img'], {}), '(img)\n', (2222, 2227), True, 'import numpy as np\n'), ((2236, 2247), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (2242, 2247), True, 'import numpy as np\n'), ((6330, 6382), 'functools.partial', 'partial', (['compress_JpegEnconding', 'imgPath'], {'quality': '(50)'}), '(compress_JpegEnconding, imgPath, quality=50)\n', (6337, 6382), False, 'from functools import partial\n'), ((6415, 6467), 'functools.partial', 'partial', (['compress_JpegEnconding', 'imgPath'], {'quality': '(70)'}), '(compress_JpegEnconding, imgPath, quality=70)\n', (6422, 6467), False, 'from functools import partial\n'), ((6500, 6552), 'functools.partial', 'partial', (['compress_JpegEnconding', 'imgPath'], {'quality': '(90)'}), '(compress_JpegEnconding, imgPath, quality=90)\n', (6507, 6552), False, 'from functools import partial\n'), ((6585, 6641), 'functools.partial', 'partial', (['compress_deepEncoding', 'imgPath'], {'mode': '"""hific-hi"""'}), "(compress_deepEncoding, imgPath, mode='hific-hi')\n", (6592, 6641), False, 'from functools import partial\n'), ((6674, 6730), 'functools.partial', 'partial', (['compress_deepEncoding', 'imgPath'], {'mode': '"""hific-mi"""'}), "(compress_deepEncoding, imgPath, mode='hific-mi')\n", (6681, 6730), False, 'from functools import partial\n'), ((6763, 6819), 'functools.partial', 'partial', (['compress_deepEncoding', 'imgPath'], {'mode': '"""hific-lo"""'}), "(compress_deepEncoding, imgPath, mode='hific-lo')\n", (6770, 6819), False, 'from functools import partial\n'), ((6852, 6926), 'functools.partial', 'partial', (['compress_deepEncoding', 'imgPath'], {'mode': '"""bmshj2018-factorized-mse-8"""'}), "(compress_deepEncoding, imgPath, mode='bmshj2018-factorized-mse-8')\n", (6859, 6926), False, 'from functools import partial\n'), ((6981, 7055), 'functools.partial', 'partial', (['compress_deepEncoding', 'imgPath'], {'mode': '"""bmshj2018-factorized-mse-4"""'}), "(compress_deepEncoding, imgPath, mode='bmshj2018-factorized-mse-4')\n", (6988, 7055), False, 'from functools import partial\n'), ((7110, 7184), 'functools.partial', 'partial', (['compress_deepEncoding', 'imgPath'], {'mode': '"""bmshj2018-factorized-mse-1"""'}), "(compress_deepEncoding, imgPath, mode='bmshj2018-factorized-mse-1')\n", (7117, 7184), False, 'from functools import partial\n'), ((7234, 7259), 'functools.partial', 'partial', (['compress_nothing'], {}), '(compress_nothing)\n', (7241, 7259), False, 'from functools import partial\n'), ((1751, 1766), 'numpy.float32', 'np.float32', (['img'], {}), '(img)\n', (1761, 1766), True, 'import numpy as np\n'), ((1848, 1859), 'numpy.min', 'np.min', (['img'], {}), '(img)\n', (1854, 1859), True, 'import numpy as np\n'), ((1861, 1872), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (1867, 1872), True, 'import numpy as np\n'), ((2002, 2017), 'numpy.float32', 'np.float32', (['img'], {}), '(img)\n', (2012, 2017), True, 'import numpy as np\n'), ((2117, 2132), 'numpy.float32', 'np.float32', (['img'], {}), '(img)\n', (2127, 2132), True, 'import numpy as np\n'), ((4034, 4051), 'numpy.min', 'np.min', (['imgEmbPkl'], {}), '(imgEmbPkl)\n', (4040, 4051), True, 'import numpy as np\n'), ((4053, 4070), 'numpy.max', 'np.max', (['imgEmbPkl'], {}), '(imgEmbPkl)\n', (4059, 4070), True, 'import numpy as np\n'), ((7877, 7895), 'statistics.mean', 'mean', (['lst_ssim_src'], {}), '(lst_ssim_src)\n', (7881, 7895), False, 'from statistics import mean\n'), ((7935, 7953), 'statistics.mean', 'mean', (['lst_psnr_src'], {}), '(lst_psnr_src)\n', (7939, 7953), False, 'from statistics import mean\n'), ((7993, 8011), 'statistics.mean', 'mean', (['lst_ssim_wtr'], {}), '(lst_ssim_wtr)\n', (7997, 8011), False, 'from statistics import mean\n'), ((8051, 8069), 'statistics.mean', 'mean', (['lst_psnr_wtr'], {}), '(lst_psnr_wtr)\n', (8055, 8069), False, 'from statistics import mean\n'), ((1422, 1433), 'numpy.min', 'np.min', (['img'], {}), '(img)\n', (1428, 1433), True, 'import numpy as np\n'), ((1435, 1446), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (1441, 1446), True, 'import numpy as np\n')] |
#####################
#####################
## ##
## Copyright (C) ##
## <NAME> ##
## 2017-2018 ##
## ##
#####################
#####################
"""
Here are the notes referenced by the code.
1. To retrieve the original score, use the logit (inverse sigmoid) function and multiply by DIV_SCORE.
2. This is the data generator for the model -- it takes data from a PGN file and converts it into
flattened arrays using the following process:
a. Read a game from the PGN file.
b. Take each individual position:
i. Convert it to a 768-long one-hot array.
ii. Run an evaluation function on each position.
c. Return a batch of BATCH_SIZE positions to be fed into the model.
"""
import sys
import random
import keras
import numpy as np
import chess
import chess.pgn
import math
import time
import evalp # Local PST/value function
# Specific imports
from keras.models import Sequential, load_model
from keras.layers import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras import backend as K
# Configuration
# Defaults: -, 10k, 100k, 1k, 1
PGN_FILE_PATH = "" # PGN file reference
#PGN_FILE_PATH = "/Users/bracho/Desktop/cgd/data.pgn"
PREVIOUS_MODEL = input("Previous model filepath (Enter to skip): ")
name_components = ["$TIME_SPENT", "$POS_EVALUATED", "W"] # The components in the name (use $VAR to access VAR)
BATCH_SIZE = 10000 # In different positions (not necessarily unique)
MAX_GENERATOR_SIZE = 100000
EPOCHS = 1000
DATA_UPDATES = 100
# Custom parameters (for naming -- changeable, but make sure to add code in loop to change value)
TIME_SPENT = int(input("Previous time spent on model (0 is default): "))
DATA_OFFSET = int(input("Data offset (default 0): ")) # Start at the Nth batch of BATCH_SIZE.
# Optimizer parameters
LEARNING_RATE = 1e-2
DECAY = 5e-6
MOMENTUM = 0.9
USE_NESTEROV = True
# Model.compile() parameters
LOSS_TYPE = "mse"
### --------- ###
pgn_file = open(PGN_FILE_PATH)
if(PREVIOUS_MODEL != ""):
model = load_model(PREVIOUS_MODEL)
PIECE_MAP = {None:-1,"K":0,"Q":1,"R":2,"B":3,"N":4,"P":5,"k":6,"q":7,"r":8,"b":9,"n":10,"p":11}
### --------- ###
def K_logit(x): # Not used yet
return K.log(x/(1-x))
def sigmoid(x):
return 1/(1+math.exp(-x)) # Watch out for an overflow error!
def data_generator(): # See (2).
print("Calculating offset of", str(DATA_OFFSET) + ".")
POS_EVALUATED = 0
while(POS_EVALUATED < DATA_OFFSET):
game = chess.pgn.read_game(pgn_file)
for move in game.main_line():
POS_EVALUATED += 1
sys.stdout.write("\r" + str(round((POS_EVALUATED/DATA_OFFSET)*100,3)) + "% generated.")
sys.stdout.flush()
positions = []
while(True):
while(len(positions) < MAX_GENERATOR_SIZE):
sys.stdout.write("\r" + str(round((len(positions)/MAX_GENERATOR_SIZE)*100,3)) + "% generated.")
sys.stdout.flush()
game = chess.pgn.read_game(pgn_file)
board = game.board()
for move in game.main_line():
board.push(move)
# Here, we have to convert the chess.Board() object into a 768-long array.
position = []
for square in chess.SQUARES: # len(chess.SQUARES) == range(0,64) -- more descriptive
try:
piece_type = board.piece_at(square).symbol()
except AttributeError: # piece.symbol() doesn't exist for None type.
piece_type = None
one = PIECE_MAP[piece_type]
one_hot = [0 for _ in range(0,12)]
if(one != -1):
one_hot[one] = 1
position.append(one_hot)
flattened_position = []
for oh in position:
for v in oh:
flattened_position.append(v) # Flatten the 64*12 matrix into a 1-D array (size 768).
positions.append((flattened_position, board))
POS_EVALUATED += 1
data = positions[:MAX_GENERATOR_SIZE]
labels = [sigmoid(evalp.evaluate(pos[0], pos[1])) for pos in data] # Use a sigmoid function for data regularization.
data = [pos[0] for pos in data]
yield data, labels, POS_EVALUATED
positions = []
if(PREVIOUS_MODEL == ""):
model = Sequential([
Dense(8*8*12, input_shape=(8*8*12,)), # 8*8 for the positions, 12 for the one-hot array
Dropout(0.5),
Dense(8*8),
Dropout(0.5),
Dense(8),
Dropout(0.5),
Dense(1), # Should return a scalar value for the score.
Activation("sigmoid")
])
sgd = SGD(lr=LEARNING_RATE, decay=DECAY, momentum=MOMENTUM, nesterov=USE_NESTEROV)
model.compile(optimizer=sgd, loss=LOSS_TYPE)
# Generate training data from PGN file in config.
generator = data_generator()
for _ in range(0,DATA_UPDATES):
data, labels, POS_EVALUATED = generator.__next__()
score = model.evaluate(data, labels, batch_size=BATCH_SIZE) # Loss, accuracy (see (1))
print(score) # Test the model before training to test for overfitting.
# Generate labels using these positions and a basic evaluation function.
# See (1)
# Fit the model!
print(np.array(data).shape, "->", np.array(labels).shape)
timer = time.time()
model.fit(data, labels, epochs=EPOCHS, batch_size=BATCH_SIZE, shuffle=True, validation_split=0.2)
TIME_SPENT += time.time()-timer
TIME_SPENT = round(TIME_SPENT)
tnc = name_components[:]
for i in range(0,len(tnc)):
if(tnc[i][0] == "$"):
tnc[i] = globals()[tnc[i][1:]]
model.save("../models/" + "__".join([str(comp) for comp in tnc]))
print("Saved model to file.")
| [
"keras.models.load_model",
"math.exp",
"keras.optimizers.SGD",
"chess.pgn.read_game",
"keras.layers.Activation",
"keras.layers.Dropout",
"time.time",
"keras.backend.log",
"keras.layers.Dense",
"sys.stdout.flush",
"numpy.array",
"evalp.evaluate"
] | [((4865, 4941), 'keras.optimizers.SGD', 'SGD', ([], {'lr': 'LEARNING_RATE', 'decay': 'DECAY', 'momentum': 'MOMENTUM', 'nesterov': 'USE_NESTEROV'}), '(lr=LEARNING_RATE, decay=DECAY, momentum=MOMENTUM, nesterov=USE_NESTEROV)\n', (4868, 4941), False, 'from keras.optimizers import SGD\n'), ((2118, 2144), 'keras.models.load_model', 'load_model', (['PREVIOUS_MODEL'], {}), '(PREVIOUS_MODEL)\n', (2128, 2144), False, 'from keras.models import Sequential, load_model\n'), ((2305, 2323), 'keras.backend.log', 'K.log', (['(x / (1 - x))'], {}), '(x / (1 - x))\n', (2310, 2323), True, 'from keras import backend as K\n'), ((5523, 5534), 'time.time', 'time.time', ([], {}), '()\n', (5532, 5534), False, 'import time\n'), ((2581, 2610), 'chess.pgn.read_game', 'chess.pgn.read_game', (['pgn_file'], {}), '(pgn_file)\n', (2600, 2610), False, 'import chess\n'), ((2788, 2806), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2804, 2806), False, 'import sys\n'), ((5657, 5668), 'time.time', 'time.time', ([], {}), '()\n', (5666, 5668), False, 'import time\n'), ((2356, 2368), 'math.exp', 'math.exp', (['(-x)'], {}), '(-x)\n', (2364, 2368), False, 'import math\n'), ((3022, 3040), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3038, 3040), False, 'import sys\n'), ((3061, 3090), 'chess.pgn.read_game', 'chess.pgn.read_game', (['pgn_file'], {}), '(pgn_file)\n', (3080, 3090), False, 'import chess\n'), ((4555, 4599), 'keras.layers.Dense', 'Dense', (['(8 * 8 * 12)'], {'input_shape': '(8 * 8 * 12,)'}), '(8 * 8 * 12, input_shape=(8 * 8 * 12,))\n', (4560, 4599), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((4652, 4664), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (4659, 4664), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((4675, 4687), 'keras.layers.Dense', 'Dense', (['(8 * 8)'], {}), '(8 * 8)\n', (4680, 4687), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((4696, 4708), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (4703, 4708), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((4719, 4727), 'keras.layers.Dense', 'Dense', (['(8)'], {}), '(8)\n', (4724, 4727), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((4738, 4750), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (4745, 4750), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((4761, 4769), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (4766, 4769), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((4826, 4847), 'keras.layers.Activation', 'Activation', (['"""sigmoid"""'], {}), "('sigmoid')\n", (4836, 4847), False, 'from keras.layers import Dense, Dropout, Activation\n'), ((5458, 5472), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (5466, 5472), True, 'import numpy as np\n'), ((5486, 5502), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (5494, 5502), True, 'import numpy as np\n'), ((4284, 4314), 'evalp.evaluate', 'evalp.evaluate', (['pos[0]', 'pos[1]'], {}), '(pos[0], pos[1])\n', (4298, 4314), False, 'import evalp\n')] |
# Template by <NAME>
# Spring 2015
# CS 251 Project 8
#
# Classifier class and child definitions
import sys
import data
import analysis as an
import numpy as np
import scipy.cluster.vq as vq
import sklearn.metrics as cm
class Classifier:
def __init__(self, type):
'''The parent Classifier class stores only a single field: the type of
the classifier. A string makes the most sense.
'''
self._type = type
def type(self, newtype=None):
'''Set or get the type with this function'''
if newtype != None:
self._type = newtype
return self._type
def confusion_matrix(self, truecats, classcats):
'''Takes in two Nx1 matrices of zero-index numeric categories and
computes the confusion matrix. The rows represent true
categories, and the columns represent the classifier output.
'''
counts = cm.confusion_matrix(truecats, classcats)
# numCats = len(np.unique(truecats))
# cmtx = np.empty((3, 3))
# counts = np.empty((numCats+1, numCats+1))
# for i in range(numCats):
# for j in range(numCats):
# counts[i, j] =
# accuracy = len([x for x in truecats == classcats])
# cmtx[0, 0] =
# cmtx[0, 1] =
# cmtx[1, 0] =
# cmtx[1, 1] =
return counts
def confusion_matrix_str(self, cmtx):
'''Takes in a confusion matrix and returns a string suitable for printing.'''
s = '\n\nConfusion matrix:\n\n'
for i in range(cmtx.shape[0]):
s += str(cmtx[i, :]) + '\n'
return s
def __str__(self):
'''Converts a classifier object to a string. Prints out the type.'''
return str(self._type)
class NaiveBayes(Classifier):
'''NaiveBayes implements a simple NaiveBayes classifier using a
Gaussian distribution as the pdf.
'''
def __init__(self, dataObj=None, headers=[], categories=None):
'''Takes in a Data object with N points, a set of F headers, and a
matrix of categories, one category label for each data point.'''
# call the parent init with the type
Classifier.__init__(self, 'Naive Bayes Classifier')
# store the headers used for classification
self.headers = headers
# number of classes and number of features
# original class labels
# unique data for the Naive Bayes: means, variances, scales
# if given data,
if dataObj != None:
# call the build function
self.build(dataObj.get_data(headers), categories)
def build(self, A, categories):
'''Builds the classifier given the data points in A and the categories'''
# figure out how many categories there are and get the mapping (np.unique)
unique, mapping = np.unique(np.array(categories.T), return_inverse=True)
self.num_classes = len(unique)
self.num_features = A.shape[1]
self.class_labels = np.matrix(mapping).T
# create the matrices for the means, vars, and scales
# the output matrices will be categories (C) x features (F)
self.class_means = np.zeros((self.num_classes, self.num_features))
self.class_vars = np.zeros((self.num_classes, self.num_features))
self.class_scales = np.zeros((self.num_classes, self.num_features))
# compute the means/vars/scales for each class
for i in range(self.num_classes):
data = A[(mapping == i), :]
self.class_means[i, :] = np.mean(data, axis=0)
self.class_vars[i, :] = np.var(data, axis=0)
self.class_scales[i, :] = 1/np.sqrt(2*np.pi*np.var(data, axis=0))
# store any other necessary information: # of classes, # of features, original labels
return
def classify(self, A, return_likelihoods=False):
'''Classify each row of A into one category. Return a matrix of
category IDs in the range [0..C-1], and an array of class
labels using the original label values. If return_likelihoods
is True, it also returns the NxC likelihood matrix.
'''
# error check to see if A has the same number of columns as
# the class means
assert A.shape[1] == self.class_means.shape[1]
# make a matrix that is N x C to store the probability of each
# class for each data point
P = np.matrix(np.zeros((A.shape[0], self.num_classes))) # a matrix of zeros that is N (rows of A) x C (number of classes)
# calculate the probabilities by looping over the classes
# with numpy-fu you can do this in one line inside a for loop
for i in np.arange(P.shape[1]):
P[:, i] = np.multiply(self.class_scales[i, :], np.exp(-np.square(A-self.class_means[i, :])/(2*self.class_vars[i, :]))).prod(axis=1)
# calculate the most likely class for each data point
cats = np.argmax(P, axis=1) # take the argmax of P along axis 1
print("Cats:")
print(cats)
# use the class ID as a lookup to generate the original labels
# FIXME: labels: get lookup to work
print("Class labels:")
print(self.class_labels)
# labels = self.class_labels[cats] isn't working
labels = self.class_labels[cats[:, 0], 0]
print("Labels:")
print(labels)
if return_likelihoods:
return cats, labels, P
return cats, labels
def __str__(self):
'''Make a pretty string that prints out the classifier information.'''
s = "\nNaive Bayes Classifier\n"
for i in range(self.num_classes):
s += 'Class %d --------------------\n' % (i)
s += 'Mean : ' + str(self.class_means[i,:]) + "\n"
s += 'Var : ' + str(self.class_vars[i,:]) + "\n"
s += 'Scales: ' + str(self.class_scales[i,:]) + "\n"
s += "\n"
return s
def write(self, filename):
'''Writes the Bayes classifier to a file.'''
# extension
return
def read(self, filename):
'''Reads in the Bayes classifier from the file'''
# extension
return
class KNN(Classifier):
def __init__(self, dataObj=None, headers=[], categories=None, K=None):
'''Take in a Data object with N points, a set of F headers, and a
matrix of categories, with one category label for each data point.'''
# call the parent init with the type
Classifier.__init__(self, 'KNN Classifier')
# store the headers used for classification
self.headers = headers
# number of classes and number of features
# original class labels
# unique data for the KNN classifier: list of exemplars (matrices)
# if given data,
if dataObj != None:
# call the build function
self.build(dataObj.getData(headers), categories)
def build(self, A, categories, K=None):
'''Builds the classifier given the data points in A and the categories'''
# figure out how many categories there are and get the mapping (np.unique)
unique, mapping = np.unique(np.array(categories.T), return_inverse=True)
self.num_classes = len(unique)
self.num_features = A.shape[1]
self.class_labels = np.matrix(mapping).T
# for each category i, build the set of exemplars
self.exemplars = []
for i in range(self.num_classes):
data = A[(mapping == i), :]
if K is None:
self.exemplars.append(data)
else:
# run K-means on the rows of A where the category/mapping is i
codebook, bookerror = vq.kmeans2(data, K)
print(codebook) # FIXME: numpy.linalg.linalg.LinAlgError: Matrix is not positive definite
self.exemplars.append(codebook)
# store any other necessary information: # of classes, # of features, original labels
return
def classify(self, A, K=3, return_distances=False):
'''Classify each row of A into one category. Return a matrix of
category IDs in the range [0..C-1], and an array of class
labels using the original label values. If return_distances is
True, it also returns the NxC distance matrix.
The parameter K specifies how many neighbors to use in the
distance computation. The default is three.'''
# error check to see if A has the same number of columns as the class means
assert A.shape[1] == self.num_features
# make a matrix that is N x C to store the distance to each class for each data point
N = A.shape[0]
D = np.zeros((N, self.num_classes)) # a matrix of zeros that is N (rows of A) x C (number of classes)
for i in range(self.num_classes):
# make a temporary matrix that is N x M where M is the number of exemplars (rows in exemplars[i])
M = self.exemplars[i].shape[0]
temp = np.zeros((N, M))
# calculate the distance from each point in A to each point in exemplar matrix i (for loop)
for j in range(N):
for k in range(M):
temp[j, k] = np.sqrt(np.sum(np.square(A[j, :] - self.exemplars[i][k, :])))
np.sort(temp, axis=1)
D[:, i] = np.sum(temp[:, 0:K], axis=1)
# calculate the most likely class for each data point
cats = np.matrix(np.argmin(D, axis=1)).T # take the argmin of D along axis 1
print("Cats:")
print(cats)
# use the class ID as a lookup to generate the original labels
# FIXME: labels: get lookup to work
print("Class labels:")
print(self.class_labels)
# labels = self.class_labels[cats] isn't working
labels = self.class_labels[cats[:, 0], 0]
print("Labels:")
print(labels)
if return_distances:
return cats, labels, D
return cats, labels
def __str__(self):
'''Make a pretty string that prints out the classifier information.'''
s = "\nKNN Classifier\n"
for i in range(self.num_classes):
s += 'Class %d --------------------\n' % (i)
s += 'Number of Exemplars: %d\n' % (self.exemplars[i].shape[0])
s += 'Mean of Exemplars :' + str(np.mean(self.exemplars[i], axis=0)) + "\n"
s += "\n"
return s
def write(self, filename):
'''Writes the KNN classifier to a file.'''
# extension
return
def read(self, filename):
'''Reads in the KNN classifier from the file'''
# extension
return
| [
"numpy.matrix",
"numpy.sum",
"numpy.argmax",
"numpy.square",
"numpy.zeros",
"numpy.argmin",
"numpy.sort",
"numpy.mean",
"numpy.array",
"numpy.arange",
"scipy.cluster.vq.kmeans2",
"sklearn.metrics.confusion_matrix",
"numpy.var"
] | [((912, 952), 'sklearn.metrics.confusion_matrix', 'cm.confusion_matrix', (['truecats', 'classcats'], {}), '(truecats, classcats)\n', (931, 952), True, 'import sklearn.metrics as cm\n'), ((3192, 3239), 'numpy.zeros', 'np.zeros', (['(self.num_classes, self.num_features)'], {}), '((self.num_classes, self.num_features))\n', (3200, 3239), True, 'import numpy as np\n'), ((3266, 3313), 'numpy.zeros', 'np.zeros', (['(self.num_classes, self.num_features)'], {}), '((self.num_classes, self.num_features))\n', (3274, 3313), True, 'import numpy as np\n'), ((3342, 3389), 'numpy.zeros', 'np.zeros', (['(self.num_classes, self.num_features)'], {}), '((self.num_classes, self.num_features))\n', (3350, 3389), True, 'import numpy as np\n'), ((4708, 4729), 'numpy.arange', 'np.arange', (['P.shape[1]'], {}), '(P.shape[1])\n', (4717, 4729), True, 'import numpy as np\n'), ((4953, 4973), 'numpy.argmax', 'np.argmax', (['P'], {'axis': '(1)'}), '(P, axis=1)\n', (4962, 4973), True, 'import numpy as np\n'), ((8738, 8769), 'numpy.zeros', 'np.zeros', (['(N, self.num_classes)'], {}), '((N, self.num_classes))\n', (8746, 8769), True, 'import numpy as np\n'), ((2863, 2885), 'numpy.array', 'np.array', (['categories.T'], {}), '(categories.T)\n', (2871, 2885), True, 'import numpy as np\n'), ((3014, 3032), 'numpy.matrix', 'np.matrix', (['mapping'], {}), '(mapping)\n', (3023, 3032), True, 'import numpy as np\n'), ((3564, 3585), 'numpy.mean', 'np.mean', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (3571, 3585), True, 'import numpy as np\n'), ((3622, 3642), 'numpy.var', 'np.var', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (3628, 3642), True, 'import numpy as np\n'), ((4446, 4486), 'numpy.zeros', 'np.zeros', (['(A.shape[0], self.num_classes)'], {}), '((A.shape[0], self.num_classes))\n', (4454, 4486), True, 'import numpy as np\n'), ((7202, 7224), 'numpy.array', 'np.array', (['categories.T'], {}), '(categories.T)\n', (7210, 7224), True, 'import numpy as np\n'), ((7353, 7371), 'numpy.matrix', 'np.matrix', (['mapping'], {}), '(mapping)\n', (7362, 7371), True, 'import numpy as np\n'), ((9059, 9075), 'numpy.zeros', 'np.zeros', (['(N, M)'], {}), '((N, M))\n', (9067, 9075), True, 'import numpy as np\n'), ((9353, 9374), 'numpy.sort', 'np.sort', (['temp'], {'axis': '(1)'}), '(temp, axis=1)\n', (9360, 9374), True, 'import numpy as np\n'), ((9397, 9425), 'numpy.sum', 'np.sum', (['temp[:, 0:K]'], {'axis': '(1)'}), '(temp[:, 0:K], axis=1)\n', (9403, 9425), True, 'import numpy as np\n'), ((7747, 7766), 'scipy.cluster.vq.kmeans2', 'vq.kmeans2', (['data', 'K'], {}), '(data, K)\n', (7757, 7766), True, 'import scipy.cluster.vq as vq\n'), ((9514, 9534), 'numpy.argmin', 'np.argmin', (['D'], {'axis': '(1)'}), '(D, axis=1)\n', (9523, 9534), True, 'import numpy as np\n'), ((3699, 3719), 'numpy.var', 'np.var', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (3705, 3719), True, 'import numpy as np\n'), ((10402, 10436), 'numpy.mean', 'np.mean', (['self.exemplars[i]'], {'axis': '(0)'}), '(self.exemplars[i], axis=0)\n', (10409, 10436), True, 'import numpy as np\n'), ((9294, 9338), 'numpy.square', 'np.square', (['(A[j, :] - self.exemplars[i][k, :])'], {}), '(A[j, :] - self.exemplars[i][k, :])\n', (9303, 9338), True, 'import numpy as np\n'), ((4798, 4835), 'numpy.square', 'np.square', (['(A - self.class_means[i, :])'], {}), '(A - self.class_means[i, :])\n', (4807, 4835), True, 'import numpy as np\n')] |
'''
本库用于科学计算和快速建模
maysics主要包括十三个模块:
1、algorithm 封装了几种模拟方法,用于简易模拟;
2、calculus 封装了部分常见的算符算子和积分方法,辅助数学运算;
3、constant 储存了部分常数;
4、equation 封装了部分方程求解运算;
5、explainer 用于评估和解释模型;
6、graph 用于图论分析;
7、imageprocess 用于数字图像处理;
8、models 封装了几种常用的模型以便快速构建数理模型;
9、preprocess 用于数据预处理;
10、stats 用于统计分析;
11、tprocess 用于处理时间数据;
12、transformation 储存了常用的坐标转换及其他数学变换;
13、utils 是额外工具箱。
This package is used for scientific calculating and fast modeling.
maysics includes thirteen modules:
1. "algorithm" packages several simulation methods for simple simulation;
2. "calculus" packages some common operators and integration method to assist in mathematical operations;
3. "constant" contents some usual constants;
4. "equation" packages some equation solving operation;
5. "explainer" is used for estimating and explaining model;
6. "graph" is used for graph theory analysis;
7. "imageprocess" is used for digital image process;
8. "models" packages several commonly used models for fast modeling;
9. "preprocess" is used for data preproccessing;
10. "stats" is uesd for statistical analysis;
11. "tprocess" is used for processing time data;
12. "transformation" stores common coordinate transformations and other mathematical transformations;
13. "utils" is extra Utils.
'''
import numpy as np
import pickle, csv
from PIL import Image
from maysics import algorithm, calculus, constant, equation, explainer, graph,\
models, preprocess, stats, tprocess, transformation, utils
from maysics.models import linear_r
from maysics.preprocess import preview, preview_file, shuffle
from maysics.utils import circle, discrete, grid_net
def covs1d(a, b, n):
'''
一维序列卷积和
参数
----
a:一维数组
b:一维数组
n:整型,平移步数
返回
----
数类型,a[n] * b[n]
Convolution Sum of 1-D List
Parameters
----------
a: 1-D array
b: 1-D array
n: int, translation steps
Return
------
num, a[n] * b[n]
'''
a = np.array(a)
b = list(b)
b.reverse()
b = np.array(b)
num_a = len(a)
num_b = len(b)
if n <= 0 or n >= num_a + num_b:
result = 0
else:
a = np.hstack((np.zeros(num_b), a, np.zeros(num_b)))
b = np.hstack((b, np.zeros(num_a + num_b)))
b[n : n+num_b] = b[: num_b]
b[: n] = 0
result = sum(a * b)
return result
def covs2d(a, b, n, m):
'''
二维序列卷积和
参数
----
a:二维数组
b:二维数组
n:整型,沿axis=0方向的平移步数
m:整型,沿axis=1方向的平移步数
返回
----
数类型,a[n, m] * b[n, m]
Convolution Sum of 2-D List
Parameters
----------
a: 2-D array
b: 2-D array
n: int, translation steps along axis=0
m: int, translation steps along axis=1
Return
------
num, a[n, m] * b[n, m]
'''
a = np.array(a)
b = np.array(b)
b = np.fliplr(b)
b = np.flipud(b)
num_a_x = a.shape[1]
num_a_y = a.shape[0]
num_b_x = b.shape[1]
num_b_y = b.shape[0]
if n <= 0 and m <= 0 or n >= num_a_x + num_b_x and m >= num_a_y + num_b_y:
result = 0
else:
a = np.hstack((np.zeros((num_a_y, num_b_x)), a, np.zeros((num_a_y, num_b_x))))
a = np.vstack((np.zeros((num_b_y, num_a_x + 2 * num_b_x)), a, np.zeros((num_b_y, num_a_x + 2 * num_b_x))))
b = np.hstack((b, np.zeros((num_b_y, num_a_x + num_b_x))))
b = np.vstack((b, np.zeros((num_a_y + num_b_y, num_a_x + 2 * num_b_x))))
# 移动b矩阵
b[n : n+num_b_y] = b[: num_b_y]
b[: n] = 0
b[:, m : m+num_b_x] = b[:, : num_b_x]
b[:, : m] = 0
result = (a * b).sum()
return result
def save(filename, data, header=None):
'''
保存为.pkl、.npy或.csv文件
参数
----
filename:字符串类型,文件名
data:需要保存的数据
header:一维列表类型,可选,数据的列名称,仅在写入csv文件时有效
Save as .pkl, .npy or .csv file
Parameters
----------
filename: str, file name
data: data
header: 1-D list, callable, the names of columns, effective only when writing csv files
'''
if filename[-4:] == '.pkl':
with open(filename, 'wb') as file:
pickle.dump(data, file)
elif filename[-4:] == '.npy':
np.save(filename, data)
elif filename[-4:] == '.csv':
data = np.array(data, dtype=np.object)
if not header:
header = []
if len(data.shape) == 1:
for i in range(data.shape[0]):
header.append(i)
with open(filename, 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(header)
writer.writerow(data)
else:
for i in range(data.shape[1]):
header.append(i)
with open(filename, 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(header)
writer.writerows(data)
else:
raise Exception("Suffix of filename must be '.pkl', '.npy' or '.csv'.")
def load(filename, header=True):
'''
载入.pkl、.npy或.csv文件
参数
----
filename:字符串类型,文件名
header:布尔类型,可选,True表示csv文件第一行为列名,仅在读取csv文件时有效,默认为True
Load .pkl, .npy or .csv file
Parameter
---------
filename: str, file name
header: bool, callable, True means the first row of the csv file if the names of columns, effective only when reading csv files, default=True
'''
if filename[-4:] == '.pkl':
with open(filename, 'rb') as file:
data = pickle.load(file)
return data
elif filename[-4:] == '.npy':
return np.load(filename, allow_pickle=True)
elif filename[-4:] == '.csv':
with open(filename, 'r') as f:
reader = list(csv.reader(f))
if header:
reader = reader[1:]
return np.array(reader)
else:
raise Exception("Suffix of filename must be '.pkl', '.npy' or '.csv'.")
def all_same(x):
'''
判断数组元素是否全相同
参数
----
x:数组
返回
----
布尔类型,True或者False
Determine whether the array elements are all the same
Parameter
---------
x: array
Return
------
bool, True or False
'''
x = np.array(x)
if len(x.shape) == 1:
x = len(set(x))
if x == 1:
return True
else:
return False
else:
for i in x:
if i.all() != x[0].all():
return False
return True | [
"pickle.dump",
"numpy.save",
"numpy.load",
"csv.reader",
"csv.writer",
"numpy.zeros",
"numpy.flipud",
"numpy.fliplr",
"pickle.load",
"numpy.array"
] | [((1947, 1958), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (1955, 1958), True, 'import numpy as np\n'), ((1999, 2010), 'numpy.array', 'np.array', (['b'], {}), '(b)\n', (2007, 2010), True, 'import numpy as np\n'), ((2785, 2796), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (2793, 2796), True, 'import numpy as np\n'), ((2805, 2816), 'numpy.array', 'np.array', (['b'], {}), '(b)\n', (2813, 2816), True, 'import numpy as np\n'), ((2825, 2837), 'numpy.fliplr', 'np.fliplr', (['b'], {}), '(b)\n', (2834, 2837), True, 'import numpy as np\n'), ((2846, 2858), 'numpy.flipud', 'np.flipud', (['b'], {}), '(b)\n', (2855, 2858), True, 'import numpy as np\n'), ((6361, 6372), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (6369, 6372), True, 'import numpy as np\n'), ((4129, 4152), 'pickle.dump', 'pickle.dump', (['data', 'file'], {}), '(data, file)\n', (4140, 4152), False, 'import pickle, csv\n'), ((4200, 4223), 'numpy.save', 'np.save', (['filename', 'data'], {}), '(filename, data)\n', (4207, 4223), True, 'import numpy as np\n'), ((5618, 5635), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (5629, 5635), False, 'import pickle, csv\n'), ((5719, 5755), 'numpy.load', 'np.load', (['filename'], {'allow_pickle': '(True)'}), '(filename, allow_pickle=True)\n', (5726, 5755), True, 'import numpy as np\n'), ((2143, 2158), 'numpy.zeros', 'np.zeros', (['num_b'], {}), '(num_b)\n', (2151, 2158), True, 'import numpy as np\n'), ((2163, 2178), 'numpy.zeros', 'np.zeros', (['num_b'], {}), '(num_b)\n', (2171, 2178), True, 'import numpy as np\n'), ((2207, 2230), 'numpy.zeros', 'np.zeros', (['(num_a + num_b)'], {}), '(num_a + num_b)\n', (2215, 2230), True, 'import numpy as np\n'), ((3100, 3128), 'numpy.zeros', 'np.zeros', (['(num_a_y, num_b_x)'], {}), '((num_a_y, num_b_x))\n', (3108, 3128), True, 'import numpy as np\n'), ((3133, 3161), 'numpy.zeros', 'np.zeros', (['(num_a_y, num_b_x)'], {}), '((num_a_y, num_b_x))\n', (3141, 3161), True, 'import numpy as np\n'), ((3187, 3229), 'numpy.zeros', 'np.zeros', (['(num_b_y, num_a_x + 2 * num_b_x)'], {}), '((num_b_y, num_a_x + 2 * num_b_x))\n', (3195, 3229), True, 'import numpy as np\n'), ((3234, 3276), 'numpy.zeros', 'np.zeros', (['(num_b_y, num_a_x + 2 * num_b_x)'], {}), '((num_b_y, num_a_x + 2 * num_b_x))\n', (3242, 3276), True, 'import numpy as np\n'), ((3305, 3343), 'numpy.zeros', 'np.zeros', (['(num_b_y, num_a_x + num_b_x)'], {}), '((num_b_y, num_a_x + num_b_x))\n', (3313, 3343), True, 'import numpy as np\n'), ((3372, 3424), 'numpy.zeros', 'np.zeros', (['(num_a_y + num_b_y, num_a_x + 2 * num_b_x)'], {}), '((num_a_y + num_b_y, num_a_x + 2 * num_b_x))\n', (3380, 3424), True, 'import numpy as np\n'), ((4278, 4309), 'numpy.array', 'np.array', (['data'], {'dtype': 'np.object'}), '(data, dtype=np.object)\n', (4286, 4309), True, 'import numpy as np\n'), ((5953, 5969), 'numpy.array', 'np.array', (['reader'], {}), '(reader)\n', (5961, 5969), True, 'import numpy as np\n'), ((5860, 5873), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (5870, 5873), False, 'import pickle, csv\n'), ((4574, 4587), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (4584, 4587), False, 'import pickle, csv\n'), ((4893, 4906), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (4903, 4906), False, 'import pickle, csv\n')] |
import numpy as np
from unittest import TestCase
from diffprivlib.tools.histograms import histogramdd
from diffprivlib.utils import global_seed, PrivacyLeakWarning
class TestHistogramdd(TestCase):
def test_no_params(self):
a = np.array([1, 2, 3, 4, 5])
with self.assertWarns(PrivacyLeakWarning):
res = histogramdd(a)
self.assertIsNotNone(res)
def test_no_range(self):
a = np.array([1, 2, 3, 4, 5])
with self.assertWarns(PrivacyLeakWarning):
res = histogramdd(a, epsilon=2)
self.assertIsNotNone(res)
def test_same_edges(self):
a = np.array([1, 2, 3, 4, 5])
_, edges = np.histogramdd(a, bins=3, range=[(0, 10)])
_, dp_edges = histogramdd(a, epsilon=1, bins=3, range=[(0, 10)])
for i in range(len(edges)):
self.assertTrue((edges[i] == dp_edges[i]).all())
def test_different_result(self):
global_seed(3141592653)
a = np.array([1, 2, 3, 4, 5])
hist, _ = np.histogramdd(a, bins=3, range=[(0, 10)])
dp_hist, _ = histogramdd(a, epsilon=0.1, bins=3, range=[(0, 10)])
# print("Non-private histogram: %s" % hist)
# print("Private histogram: %s" % dp_hist)
self.assertTrue((hist != dp_hist).any())
def test_density_1d(self):
global_seed(3141592653)
a = np.array([1, 2, 3, 4, 5])
dp_hist, _ = histogramdd(a, epsilon=1, bins=3, range=[(0, 10)], density=True)
# print(dp_hist.sum())
self.assertAlmostEqual(dp_hist.sum(), 1.0 * 3 / 10)
def test_density_2d(self):
global_seed(3141592653)
a = np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]).T
dp_hist, _ = histogramdd(a, epsilon=1, bins=3, range=[(0, 10), (0, 10)], density=True)
# print(dp_hist.sum())
self.assertAlmostEqual(dp_hist.sum(), 1.0 * (3 / 10) ** 2)
| [
"diffprivlib.utils.global_seed",
"numpy.histogramdd",
"numpy.array",
"diffprivlib.tools.histograms.histogramdd"
] | [((242, 267), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (250, 267), True, 'import numpy as np\n'), ((428, 453), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (436, 453), True, 'import numpy as np\n'), ((627, 652), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (635, 652), True, 'import numpy as np\n'), ((672, 714), 'numpy.histogramdd', 'np.histogramdd', (['a'], {'bins': '(3)', 'range': '[(0, 10)]'}), '(a, bins=3, range=[(0, 10)])\n', (686, 714), True, 'import numpy as np\n'), ((737, 787), 'diffprivlib.tools.histograms.histogramdd', 'histogramdd', (['a'], {'epsilon': '(1)', 'bins': '(3)', 'range': '[(0, 10)]'}), '(a, epsilon=1, bins=3, range=[(0, 10)])\n', (748, 787), False, 'from diffprivlib.tools.histograms import histogramdd\n'), ((932, 955), 'diffprivlib.utils.global_seed', 'global_seed', (['(3141592653)'], {}), '(3141592653)\n', (943, 955), False, 'from diffprivlib.utils import global_seed, PrivacyLeakWarning\n'), ((968, 993), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (976, 993), True, 'import numpy as np\n'), ((1012, 1054), 'numpy.histogramdd', 'np.histogramdd', (['a'], {'bins': '(3)', 'range': '[(0, 10)]'}), '(a, bins=3, range=[(0, 10)])\n', (1026, 1054), True, 'import numpy as np\n'), ((1076, 1128), 'diffprivlib.tools.histograms.histogramdd', 'histogramdd', (['a'], {'epsilon': '(0.1)', 'bins': '(3)', 'range': '[(0, 10)]'}), '(a, epsilon=0.1, bins=3, range=[(0, 10)])\n', (1087, 1128), False, 'from diffprivlib.tools.histograms import histogramdd\n'), ((1322, 1345), 'diffprivlib.utils.global_seed', 'global_seed', (['(3141592653)'], {}), '(3141592653)\n', (1333, 1345), False, 'from diffprivlib.utils import global_seed, PrivacyLeakWarning\n'), ((1358, 1383), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (1366, 1383), True, 'import numpy as np\n'), ((1405, 1469), 'diffprivlib.tools.histograms.histogramdd', 'histogramdd', (['a'], {'epsilon': '(1)', 'bins': '(3)', 'range': '[(0, 10)]', 'density': '(True)'}), '(a, epsilon=1, bins=3, range=[(0, 10)], density=True)\n', (1416, 1469), False, 'from diffprivlib.tools.histograms import histogramdd\n'), ((1603, 1626), 'diffprivlib.utils.global_seed', 'global_seed', (['(3141592653)'], {}), '(3141592653)\n', (1614, 1626), False, 'from diffprivlib.utils import global_seed, PrivacyLeakWarning\n'), ((1708, 1781), 'diffprivlib.tools.histograms.histogramdd', 'histogramdd', (['a'], {'epsilon': '(1)', 'bins': '(3)', 'range': '[(0, 10), (0, 10)]', 'density': '(True)'}), '(a, epsilon=1, bins=3, range=[(0, 10), (0, 10)], density=True)\n', (1719, 1781), False, 'from diffprivlib.tools.histograms import histogramdd\n'), ((337, 351), 'diffprivlib.tools.histograms.histogramdd', 'histogramdd', (['a'], {}), '(a)\n', (348, 351), False, 'from diffprivlib.tools.histograms import histogramdd\n'), ((523, 548), 'diffprivlib.tools.histograms.histogramdd', 'histogramdd', (['a'], {'epsilon': '(2)'}), '(a, epsilon=2)\n', (534, 548), False, 'from diffprivlib.tools.histograms import histogramdd\n'), ((1639, 1684), 'numpy.array', 'np.array', (['[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]'], {}), '([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]])\n', (1647, 1684), True, 'import numpy as np\n')] |
import numpy as np
from periodicity.core import TSeries
from periodicity.spectral import GLS
def test_gls_default_frequency_grid():
t0 = 2.5
ts = 0.1
fs = 1 / ts
f0 = 1 / t0
time = np.arange(0, t0 + ts, ts)
signal = TSeries(time)
gls = GLS(n=1)
ls = gls(signal)
freq = ls.frequency
# frequencies are sorted
assert sorted(freq) == list(freq)
# minimum frequency corresponds to a half-cycle within the baseline
assert freq[0] == f0 / 2
# maximum frequency is half the sampling rate
assert np.round(freq[-1], 6) == fs / 2
# uniform grid with spacing equal to f0
assert np.max(np.abs(np.diff(freq) - f0)) < 1e-10
def test_can_find_periods():
sine = TSeries(values=np.sin((np.arange(100) / 100) * 20 * np.pi))
gls = GLS()
ls = gls(sine)
assert ls.period_at_highest_peak == 10.0
| [
"periodicity.spectral.GLS",
"numpy.diff",
"numpy.arange",
"periodicity.core.TSeries",
"numpy.round"
] | [((204, 229), 'numpy.arange', 'np.arange', (['(0)', '(t0 + ts)', 'ts'], {}), '(0, t0 + ts, ts)\n', (213, 229), True, 'import numpy as np\n'), ((243, 256), 'periodicity.core.TSeries', 'TSeries', (['time'], {}), '(time)\n', (250, 256), False, 'from periodicity.core import TSeries\n'), ((267, 275), 'periodicity.spectral.GLS', 'GLS', ([], {'n': '(1)'}), '(n=1)\n', (270, 275), False, 'from periodicity.spectral import GLS\n'), ((792, 797), 'periodicity.spectral.GLS', 'GLS', ([], {}), '()\n', (795, 797), False, 'from periodicity.spectral import GLS\n'), ((550, 571), 'numpy.round', 'np.round', (['freq[-1]', '(6)'], {}), '(freq[-1], 6)\n', (558, 571), True, 'import numpy as np\n'), ((651, 664), 'numpy.diff', 'np.diff', (['freq'], {}), '(freq)\n', (658, 664), True, 'import numpy as np\n'), ((745, 759), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (754, 759), True, 'import numpy as np\n')] |
import os
import glob
import tensorflow as tf
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import precision_recall_fscore_support, mean_squared_error
import numpy as np
np.random.seed(0)
# Local imports
import dataset
# def evaluate(task, data, model_dir, model_types, voting_type='hard_voting', dataset_split='full_dataset', n_folds = 5):
def evaluate(data, config):
print('Loading data...')
if config.task == 'classification':
y = data['y_clf']
elif config.task == 'regression':
y = data['y_reg']
model_dir = config.model_dir
model_types = config.model_types
voting_type = config.voting_type
dataset_split = config.dataset_split
n_folds = config.n_folds
task = config.task
saved_model_types = {}
for m in model_types:
model_files = sorted(glob.glob(os.path.join(model_dir, '{}/*.h5'.format(m))))
saved_models = list(map(lambda x: tf.keras.models.load_model(x), model_files))
saved_model_types[m] = saved_models
print('Loading models from {}'.format(model_dir))
print('Using {} on {}'.format(voting_type, dataset_split))
print('Models evaluated ', model_types)
train_accuracies = []
val_accuracies = []
# if dataset_split == 'full_dataset': # compare features need to be projected
# if len(model_types) == 1:
# if m == 'compare':
# m = model_types[0]
# accuracy = get_individual_accuracy(saved_model_types[m][0], X_compare, y)
# else:
# m = model_types[0]
# accuracy = get_individual_accuracy(saved_model_types[m][0], data[m], y)
# else:
# models = []
# features = []
# for m in model_types:
# models.append(saved_model_types[m][2])
# if m == 'compare':
# features.append(X_compare)
# else:
# features.append(data[m])
# print('Full dataset')
# accuracy, learnt_voter = get_ensemble_accuracy(models, features, y, voting_type)
if dataset_split == 'k_fold':
fold = 0
for train_index, val_index in KFold(n_folds).split(y):
compare_train, compare_val = data['compare'][train_index], data['compare'][val_index]
y_train, y_val = y[train_index], y[val_index]
sc = StandardScaler()
sc.fit(compare_train)
compare_train = sc.transform(compare_train)
compare_val = sc.transform(compare_val)
pca = PCA(n_components=config.compare_features_size)
pca.fit(compare_train)
compare_train = pca.transform(compare_train)
compare_val = pca.transform(compare_val)
if len(model_types) == 1:
m = model_types[0]
if m == 'compare':
print('Fold {}'.format(fold+1))
print('Train')
train_accuracy = get_individual_accuracy(task, saved_model_types[m][fold], compare_train, y_train, fold=fold)
print('Val')
val_accuracy = get_individual_accuracy(task, saved_model_types[m][fold], compare_val, y_val, fold=fold)
else:
print('Fold {}'.format(fold+1))
print('Train')
train_accuracy = get_individual_accuracy(task, saved_model_types[m][fold], data[m][train_index], y_train, fold=fold)
print('Val')
val_accuracy = get_individual_accuracy(task, saved_model_types[m][fold], data[m][val_index], y_val, fold=fold)
else:
models = []
features = []
for m in model_types:
models.append(saved_model_types[m][fold])
if m == 'compare':
features.append(compare_train)
else:
features.append(data[m][train_index])
print('Fold {}'.format(fold+1))
print('Train')
train_accuracy, learnt_voter = get_ensemble_accuracy(task, models, features, y_train, voting_type)
print('Val')
features = []
for m in model_types:
if m == 'compare':
features.append(compare_val)
else:
features.append(data[m][val_index])
val_accuracy, _ = get_ensemble_accuracy(task, models, features, y_val, voting_type, learnt_voter=learnt_voter, fold=fold)
print('----'*10)
train_accuracies.append(train_accuracy)
val_accuracies.append(val_accuracy)
fold+=1
print('Train mean: {:.3f}'.format(np.mean(train_accuracies)))
print('Train std: {:.3f}'.format(np.std(train_accuracies)))
if len(val_accuracies) > 0:
print('Val mean: {:.3f}'.format(np.mean(val_accuracies)))
print('Val std: {:.3f}'.format(np.std(val_accuracies)))
def get_individual_accuracy(task, model, feature, y, fold=None):
if task == 'classification':
preds = model.predict(feature)
preds = np.argmax(preds, axis=-1)
accuracy = accuracy_score(np.argmax(y, axis=-1), preds)
report = precision_recall_fscore_support(np.argmax(y, axis=-1), preds, average='binary')
print('precision: {:.3f}, recall: {:.3f}, f1_score: {:.3f}, accuracy: {:.3f}'.format(report[0], report[1], report[2], accuracy))
return accuracy
elif task == 'regression':
y = np.array(y)
preds = model.predict(feature)
print(len([i for i in preds if i>=26]))
score = mean_squared_error(np.expand_dims(y, axis=-1), preds, squared=False)
return score
def get_ensemble_accuracy(task, models, features, y, voting_type, num_classes=2, learnt_voter=None, fold=None):
if task == 'classification':
probs = []
for model, feature in zip(models, features):
pred = model.predict(feature)
probs.append(pred)
probs = np.stack(probs, axis=1)
if voting_type=='hard_voting':
model_predictions = np.argmax(probs, axis=-1)
model_predictions = np.squeeze(model_predictions)
voted_predictions = [max(set(i), key = list(i).count) for i in model_predictions]
elif voting_type=='soft_voting':
model_predictions = np.sum(probs, axis=1)
voted_predictions = np.argmax(model_predictions, axis=-1)
elif voting_type=='learnt_voting':
model_predictions = np.reshape(probs, (len(y), -1))
if learnt_voter is None:
learnt_voter = LogisticRegression(C=0.1).fit(model_predictions, np.argmax(y, axis=-1))
# print('Voter coef ', voter.coef_)
voted_predictions = learnt_voter.predict(model_predictions)
accuracy = accuracy_score(np.argmax(y, axis=-1), voted_predictions)
report = precision_recall_fscore_support(np.argmax(y, axis=-1), voted_predictions, average='binary')
print('precision: {:.3f}, recall: {:.3f}, f1_score: {:.3f}, accuracy: {:.3f}'.format(report[0], report[1], report[2], accuracy))
return accuracy, learnt_voter
elif task == 'regression':
preds = []
for model, feature in zip(models, features):
probs = model.predict(feature)
preds.append(probs)
preds = np.stack(preds, axis=1) # 86,3,1
voted_predictions = np.mean(preds, axis=1)
score = mean_squared_error(np.expand_dims(y, axis=-1), voted_predictions, squared=False)
print('rmse: {:.3f}'.format(score))
return score, None
| [
"numpy.stack",
"numpy.random.seed",
"sklearn.preprocessing.StandardScaler",
"numpy.sum",
"numpy.argmax",
"numpy.std",
"tensorflow.keras.models.load_model",
"numpy.expand_dims",
"sklearn.model_selection.KFold",
"sklearn.linear_model.LogisticRegression",
"numpy.mean",
"sklearn.decomposition.PCA"... | [((370, 387), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (384, 387), True, 'import numpy as np\n'), ((4517, 4542), 'numpy.argmax', 'np.argmax', (['preds'], {'axis': '(-1)'}), '(preds, axis=-1)\n', (4526, 4542), True, 'import numpy as np\n'), ((5328, 5351), 'numpy.stack', 'np.stack', (['probs'], {'axis': '(1)'}), '(probs, axis=1)\n', (5336, 5351), True, 'import numpy as np\n'), ((2278, 2294), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2292, 2294), False, 'from sklearn.preprocessing import StandardScaler\n'), ((2421, 2467), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'config.compare_features_size'}), '(n_components=config.compare_features_size)\n', (2424, 2467), False, 'from sklearn.decomposition import PCA\n'), ((4571, 4592), 'numpy.argmax', 'np.argmax', (['y'], {'axis': '(-1)'}), '(y, axis=-1)\n', (4580, 4592), True, 'import numpy as np\n'), ((4644, 4665), 'numpy.argmax', 'np.argmax', (['y'], {'axis': '(-1)'}), '(y, axis=-1)\n', (4653, 4665), True, 'import numpy as np\n'), ((4876, 4887), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (4884, 4887), True, 'import numpy as np\n'), ((5409, 5434), 'numpy.argmax', 'np.argmax', (['probs'], {'axis': '(-1)'}), '(probs, axis=-1)\n', (5418, 5434), True, 'import numpy as np\n'), ((5458, 5487), 'numpy.squeeze', 'np.squeeze', (['model_predictions'], {}), '(model_predictions)\n', (5468, 5487), True, 'import numpy as np\n'), ((6056, 6077), 'numpy.argmax', 'np.argmax', (['y'], {'axis': '(-1)'}), '(y, axis=-1)\n', (6065, 6077), True, 'import numpy as np\n'), ((6141, 6162), 'numpy.argmax', 'np.argmax', (['y'], {'axis': '(-1)'}), '(y, axis=-1)\n', (6150, 6162), True, 'import numpy as np\n'), ((6523, 6546), 'numpy.stack', 'np.stack', (['preds'], {'axis': '(1)'}), '(preds, axis=1)\n', (6531, 6546), True, 'import numpy as np\n'), ((6578, 6600), 'numpy.mean', 'np.mean', (['preds'], {'axis': '(1)'}), '(preds, axis=1)\n', (6585, 6600), True, 'import numpy as np\n'), ((2106, 2120), 'sklearn.model_selection.KFold', 'KFold', (['n_folds'], {}), '(n_folds)\n', (2111, 2120), False, 'from sklearn.model_selection import KFold\n'), ((4137, 4162), 'numpy.mean', 'np.mean', (['train_accuracies'], {}), '(train_accuracies)\n', (4144, 4162), True, 'import numpy as np\n'), ((4200, 4224), 'numpy.std', 'np.std', (['train_accuracies'], {}), '(train_accuracies)\n', (4206, 4224), True, 'import numpy as np\n'), ((4993, 5019), 'numpy.expand_dims', 'np.expand_dims', (['y'], {'axis': '(-1)'}), '(y, axis=-1)\n', (5007, 5019), True, 'import numpy as np\n'), ((5631, 5652), 'numpy.sum', 'np.sum', (['probs'], {'axis': '(1)'}), '(probs, axis=1)\n', (5637, 5652), True, 'import numpy as np\n'), ((5676, 5713), 'numpy.argmax', 'np.argmax', (['model_predictions'], {'axis': '(-1)'}), '(model_predictions, axis=-1)\n', (5685, 5713), True, 'import numpy as np\n'), ((6631, 6657), 'numpy.expand_dims', 'np.expand_dims', (['y'], {'axis': '(-1)'}), '(y, axis=-1)\n', (6645, 6657), True, 'import numpy as np\n'), ((1059, 1088), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['x'], {}), '(x)\n', (1085, 1088), True, 'import tensorflow as tf\n'), ((4292, 4315), 'numpy.mean', 'np.mean', (['val_accuracies'], {}), '(val_accuracies)\n', (4299, 4315), True, 'import numpy as np\n'), ((4352, 4374), 'numpy.std', 'np.std', (['val_accuracies'], {}), '(val_accuracies)\n', (4358, 4374), True, 'import numpy as np\n'), ((5902, 5923), 'numpy.argmax', 'np.argmax', (['y'], {'axis': '(-1)'}), '(y, axis=-1)\n', (5911, 5923), True, 'import numpy as np\n'), ((5853, 5878), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'C': '(0.1)'}), '(C=0.1)\n', (5871, 5878), False, 'from sklearn.linear_model import LogisticRegression\n')] |
from configs import general_config,stacking_config,modelDict
from sklearn.model_selection import KFold,GridSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.externals import joblib
from models.TextCNN import model as TextCNN
from models.TextRNN import model as TextRNN
from models.CRNN import model as CRNN
from models.RCNN import model as RCNN
from models.HAN import model as HAN
from utils import ensure_dir_exist,WriteToSubmission,my_logger
from data_helpers.utils import getNonstaticWordDict,create_visual_metadata,readNewFile
import numpy as np, pandas as pd
import os
def createCrossValidationData(num_cv=5):
trainingFile=general_config.training_file
with open(trainingFile,'r') as f:
raw_data=np.asarray(f.readlines())
saveDir=ensure_dir_exist(general_config.data_dir+"/cv/"+str(num_cv))
kf=KFold(num_cv,random_state=1234+num_cv,shuffle=True)
count=0
for train_index,test_index in kf.split(raw_data):
train=raw_data[train_index]
test=raw_data[test_index]
with open(saveDir+"/train"+str(count)+".txt",'w') as f:
f.writelines(train)
getNonstaticWordDict(trainFile=saveDir+"/train"+str(count)+".txt",
global_v2i_path=general_config.global_nonstatic_v2i_path)
create_visual_metadata(int2vocab_path=saveDir+"/train"+str(count)+"_i2v.json")
with open(saveDir+"/valid"+str(count)+".txt",'w') as f:
f.writelines(test)
count+=1
class model(object):
def __init__(self,
base_model_list=stacking_config.base_model_list,
num_cv=stacking_config.num_cv):
self.base_model_list = base_model_list.split("-")
self.num_models=len(self.base_model_list)
self.num_cv=num_cv
self.dataDir = general_config.data_dir + "/cv/" + str(self.num_cv)
if not os.path.exists(self.dataDir):
createCrossValidationData(self.num_cv)
self.models = []
self.models_name = []
for n in range(self.num_models):
base_model = self.base_model_list[n]
assert base_model in ["1", "2", "3", "4","5"], "Invalid base model type!"
if base_model == "1":
model = TextCNN()
elif base_model == "2":
model = TextRNN()
elif base_model == "3":
model = CRNN()
elif base_model=="4":
model = RCNN()
else:
model=HAN()
self.models.append(model)
self.models_name.append(modelDict[base_model])
self.logDir = ensure_dir_exist(general_config.log_dir + "/stacking/"
+ "-".join(self.models_name)+"/"+str(self.num_cv))
self.saveDir = ensure_dir_exist(general_config.save_dir + "/stacking/"
+ "-".join(self.models_name)+"/"+str(self.num_cv))
self.classifier=LogisticRegression()
self.logger=my_logger(self.logDir+"/log.txt")
# level-l train
def train_1(self):
for i in range(self.num_models):
model=self.models[i]
model_name=self.models_name[i]
log_dir_tmp=self.logDir + "/" + model_name
save_dir_tmp=self.saveDir+"/"+model_name
for i in range(self.num_cv):
log_dir=log_dir_tmp+"/"+str(i)
save_dir=save_dir_tmp+"/"+str(i)
trainFile = self.dataDir + "/train" + str(i) + ".txt"
if not os.path.exists(save_dir):
model.fit(trainFile=trainFile,with_validation=True,
log_dir=log_dir,save_dir=save_dir,
num_visual=0)
# level-2 train
def train_2(self):
predicted_train=None
id_train=None
for i in range(self.num_models):
model=self.models[i]
model_name=self.models_name[i]
save_dir_tmp=self.saveDir+"/"+model_name
res={}
for i in range(self.num_cv):
save_dir=save_dir_tmp+"/"+str(i)
if model_name=="TextCNN":
save_dir+="/nonstatic"
save_dir+="/train_valid"
testFile = self.dataDir + "/valid" + str(i) + ".txt"
vocab2intPath=testFile.replace("valid","train").replace(".txt","_v2i.json")
resPath=save_dir + "/valid_predicted.csv"
if os.path.exists(resPath):
res_={}
res_tmp=pd.read_csv(filepath_or_buffer=resPath)
for id,label in zip(res_tmp["id"].values,res_tmp["label"].values):
res_[id]=label
else:
res_=model.predict(testFile=testFile,vocab2intPath=vocab2intPath,
load_path=save_dir,
resPath=resPath)
res.update(res_)
res = [[key, value] for (key, value) in res.items()]
tmp = pd.DataFrame(res, columns=["id", "label"])
tmp = tmp.sort_values(by="id", axis=0, ascending=True)
id_train=np.reshape(tmp["id"].values,newshape=(-1,))
try:
predicted_train=np.concatenate([predicted_train,tmp["label"].values.reshape((-1,1))],
axis=-1)
except:
predicted_train=tmp["label"].values.reshape((-1,1))
assert predicted_train.shape[1]==self.num_models
id,_,label=readNewFile(file=general_config.training_file)
assert np.allclose(np.array(id),np.array(id_train)),"Inconsistent indices!"
parameters = {'C': [0.001,0.01,0.1,1,10,100]}# Inverse of regularization strength;
# must be a positive float.
# Like in support vector machines, smaller values specify stronger regularization.
self.classifier = GridSearchCV(self.classifier, parameters,cv=self.num_cv,refit=True)
self.classifier.fit(predicted_train,np.array(label))
self.logger.info(self.classifier.cv_results_)
self.logger.info(self.classifier.get_params())
save_path=self.saveDir+"/lr.pkl"
joblib.dump(self.classifier, save_path)
def evaluate(self,validFile=None):
if validFile is None:
trainFile=general_config.training_file
else:
trainFile=validFile
predicted_train = None
id_train = None
for i in range(self.num_models):
model = self.models[i]
model_name = self.models_name[i]
save_dir_tmp = self.saveDir + "/" + model_name
res = None
for i in range(self.num_cv):
save_dir = save_dir_tmp + "/" + str(i)
if model_name == "TextCNN":
save_dir += "/nonstatic"
save_dir += "/train_valid"
vocab2intPath = (self.dataDir + "/train" + str(i) + ".txt").replace(".txt", "_v2i.json")
resPath = save_dir + "/train_predicted.csv"
if os.path.exists(resPath):
res_ = {}
res_tmp = pd.read_csv(filepath_or_buffer=resPath)
for id, label in zip(res_tmp["id"].values, res_tmp["label"].values):
res_[id] = label
else:
res_ = model.predict(testFile=trainFile, vocab2intPath=vocab2intPath,
load_path=save_dir, resPath=resPath)
res_ = [[key, value] for (key, value) in res_.items()]
res_ = pd.DataFrame(res_, columns=["id", "label"])
res_ = res_.sort_values(by="id", axis=0, ascending=True)
if i == 0:
id_train = res_["id"].values
else:
assert np.allclose(id_train, res_["id"].values)
try:
res+= res_["label"].values
except:
res= res_["label"].values
res = res / self.num_cv
try:
predicted_train = np.concatenate([predicted_train, res.reshape((-1, 1))], axis=-1)
except:
predicted_train = res.reshape((-1, 1))
assert predicted_train.shape[1] == self.num_models
id, _, label = readNewFile(trainFile)
assert np.allclose(np.array(id), np.array(id_train)), "Inconsistent indices!"
self.classifier = joblib.load(self.saveDir + "/lr.pkl")
predicted_ = self.classifier.predict(predicted_train)
train_accuracy = np.mean(np.equal(np.array(label).reshape((-1,)), np.array(predicted_).reshape((-1,))))
self.logger.info("Accuracy: %s" % train_accuracy)
return train_accuracy
def predict(self,testFile=None):
if testFile is None:
testFile=general_config.testing_file
predicted_test = None
id_test=None
for i in range(self.num_models):
model=self.models[i]
model_name=self.models_name[i]
save_dir_tmp = self.saveDir + "/" + model_name
res = None
for i in range(self.num_cv):
save_dir =save_dir_tmp+ "/" + str(i)
if model_name=="TextCNN":
save_dir+="/nonstatic"
save_dir+="/train_valid"
vocab2intPath =(self.dataDir + "/train" + str(i) + ".txt").replace(".txt", "_v2i.json")
resPath=save_dir+"/test_predicted.csv"
if os.path.exists(resPath):
res_={}
res_tmp=pd.read_csv(filepath_or_buffer=resPath)
for id,label in zip(res_tmp["id"].values,res_tmp["label"].values):
res_[id]=label
else:
res_ = model.predict(testFile=testFile, vocab2intPath=vocab2intPath,
load_path=save_dir,
resPath=resPath)
res_ = [[key, value] for (key, value) in res_.items()]
tmp = pd.DataFrame(res_, columns=["id", "label"])
tmp = tmp.sort_values(by="id", axis=0, ascending=True)
if i==0:
id_test=tmp["id"].values
else:
assert np.allclose(id_test,tmp["id"].values)
try:
res += tmp["label"].values
except:
res = tmp["label"].values
res =res/self.num_cv
try:
predicted_test=np.concatenate([predicted_test,res.reshape((-1,1))],axis=-1)
except:
predicted_test=res.reshape((-1,1))
assert predicted_test.shape[1]==self.num_models
self.classifier = joblib.load(self.saveDir+"/lr.pkl")
predicted=self.classifier.predict(predicted_test)
res=np.concatenate([id_test.reshape((-1,1)),predicted.reshape((-1,1))],axis=1)
WriteToSubmission(res,fileName=self.saveDir.replace("checkpoints","results")+"/predicted.csv") | [
"pandas.DataFrame",
"sklearn.model_selection.GridSearchCV",
"sklearn.externals.joblib.dump",
"models.TextCNN.model",
"models.CRNN.model",
"pandas.read_csv",
"numpy.allclose",
"data_helpers.utils.readNewFile",
"os.path.exists",
"sklearn.model_selection.KFold",
"sklearn.linear_model.LogisticRegres... | [((848, 903), 'sklearn.model_selection.KFold', 'KFold', (['num_cv'], {'random_state': '(1234 + num_cv)', 'shuffle': '(True)'}), '(num_cv, random_state=1234 + num_cv, shuffle=True)\n', (853, 903), False, 'from sklearn.model_selection import KFold, GridSearchCV\n'), ((2967, 2987), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (2985, 2987), False, 'from sklearn.linear_model import LogisticRegression\n'), ((3008, 3043), 'utils.my_logger', 'my_logger', (["(self.logDir + '/log.txt')"], {}), "(self.logDir + '/log.txt')\n", (3017, 3043), False, 'from utils import ensure_dir_exist, WriteToSubmission, my_logger\n'), ((5572, 5618), 'data_helpers.utils.readNewFile', 'readNewFile', ([], {'file': 'general_config.training_file'}), '(file=general_config.training_file)\n', (5583, 5618), False, 'from data_helpers.utils import getNonstaticWordDict, create_visual_metadata, readNewFile\n'), ((5947, 6016), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['self.classifier', 'parameters'], {'cv': 'self.num_cv', 'refit': '(True)'}), '(self.classifier, parameters, cv=self.num_cv, refit=True)\n', (5959, 6016), False, 'from sklearn.model_selection import KFold, GridSearchCV\n'), ((6234, 6273), 'sklearn.externals.joblib.dump', 'joblib.dump', (['self.classifier', 'save_path'], {}), '(self.classifier, save_path)\n', (6245, 6273), False, 'from sklearn.externals import joblib\n'), ((8377, 8399), 'data_helpers.utils.readNewFile', 'readNewFile', (['trainFile'], {}), '(trainFile)\n', (8388, 8399), False, 'from data_helpers.utils import getNonstaticWordDict, create_visual_metadata, readNewFile\n'), ((8512, 8549), 'sklearn.externals.joblib.load', 'joblib.load', (["(self.saveDir + '/lr.pkl')"], {}), "(self.saveDir + '/lr.pkl')\n", (8523, 8549), False, 'from sklearn.externals import joblib\n'), ((10845, 10882), 'sklearn.externals.joblib.load', 'joblib.load', (["(self.saveDir + '/lr.pkl')"], {}), "(self.saveDir + '/lr.pkl')\n", (10856, 10882), False, 'from sklearn.externals import joblib\n'), ((1880, 1908), 'os.path.exists', 'os.path.exists', (['self.dataDir'], {}), '(self.dataDir)\n', (1894, 1908), False, 'import os\n'), ((5058, 5100), 'pandas.DataFrame', 'pd.DataFrame', (['res'], {'columns': "['id', 'label']"}), "(res, columns=['id', 'label'])\n", (5070, 5100), True, 'import numpy as np, pandas as pd\n'), ((5189, 5233), 'numpy.reshape', 'np.reshape', (["tmp['id'].values"], {'newshape': '(-1,)'}), "(tmp['id'].values, newshape=(-1,))\n", (5199, 5233), True, 'import numpy as np, pandas as pd\n'), ((5646, 5658), 'numpy.array', 'np.array', (['id'], {}), '(id)\n', (5654, 5658), True, 'import numpy as np, pandas as pd\n'), ((5659, 5677), 'numpy.array', 'np.array', (['id_train'], {}), '(id_train)\n', (5667, 5677), True, 'import numpy as np, pandas as pd\n'), ((6059, 6074), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (6067, 6074), True, 'import numpy as np, pandas as pd\n'), ((8427, 8439), 'numpy.array', 'np.array', (['id'], {}), '(id)\n', (8435, 8439), True, 'import numpy as np, pandas as pd\n'), ((8441, 8459), 'numpy.array', 'np.array', (['id_train'], {}), '(id_train)\n', (8449, 8459), True, 'import numpy as np, pandas as pd\n'), ((2251, 2260), 'models.TextCNN.model', 'TextCNN', ([], {}), '()\n', (2258, 2260), True, 'from models.TextCNN import model as TextCNN\n'), ((4486, 4509), 'os.path.exists', 'os.path.exists', (['resPath'], {}), '(resPath)\n', (4500, 4509), False, 'import os\n'), ((7111, 7134), 'os.path.exists', 'os.path.exists', (['resPath'], {}), '(resPath)\n', (7125, 7134), False, 'import os\n'), ((7647, 7690), 'pandas.DataFrame', 'pd.DataFrame', (['res_'], {'columns': "['id', 'label']"}), "(res_, columns=['id', 'label'])\n", (7659, 7690), True, 'import numpy as np, pandas as pd\n'), ((9576, 9599), 'os.path.exists', 'os.path.exists', (['resPath'], {}), '(resPath)\n', (9590, 9599), False, 'import os\n'), ((10140, 10183), 'pandas.DataFrame', 'pd.DataFrame', (['res_'], {'columns': "['id', 'label']"}), "(res_, columns=['id', 'label'])\n", (10152, 10183), True, 'import numpy as np, pandas as pd\n'), ((2321, 2330), 'models.TextRNN.model', 'TextRNN', ([], {}), '()\n', (2328, 2330), True, 'from models.TextRNN import model as TextRNN\n'), ((3541, 3565), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (3555, 3565), False, 'import os\n'), ((4567, 4606), 'pandas.read_csv', 'pd.read_csv', ([], {'filepath_or_buffer': 'resPath'}), '(filepath_or_buffer=resPath)\n', (4578, 4606), True, 'import numpy as np, pandas as pd\n'), ((7196, 7235), 'pandas.read_csv', 'pd.read_csv', ([], {'filepath_or_buffer': 'resPath'}), '(filepath_or_buffer=resPath)\n', (7207, 7235), True, 'import numpy as np, pandas as pd\n'), ((7889, 7929), 'numpy.allclose', 'np.allclose', (['id_train', "res_['id'].values"], {}), "(id_train, res_['id'].values)\n", (7900, 7929), True, 'import numpy as np, pandas as pd\n'), ((9657, 9696), 'pandas.read_csv', 'pd.read_csv', ([], {'filepath_or_buffer': 'resPath'}), '(filepath_or_buffer=resPath)\n', (9668, 9696), True, 'import numpy as np, pandas as pd\n'), ((10374, 10412), 'numpy.allclose', 'np.allclose', (['id_test', "tmp['id'].values"], {}), "(id_test, tmp['id'].values)\n", (10385, 10412), True, 'import numpy as np, pandas as pd\n'), ((2391, 2397), 'models.CRNN.model', 'CRNN', ([], {}), '()\n', (2395, 2397), True, 'from models.CRNN import model as CRNN\n'), ((8654, 8669), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (8662, 8669), True, 'import numpy as np, pandas as pd\n'), ((8686, 8706), 'numpy.array', 'np.array', (['predicted_'], {}), '(predicted_)\n', (8694, 8706), True, 'import numpy as np, pandas as pd\n'), ((2456, 2462), 'models.RCNN.model', 'RCNN', ([], {}), '()\n', (2460, 2462), True, 'from models.RCNN import model as RCNN\n'), ((2503, 2508), 'models.HAN.model', 'HAN', ([], {}), '()\n', (2506, 2508), True, 'from models.HAN import model as HAN\n')] |
import sys
import numpy as np
import scipy.optimize as op
import math
MAX = sys.float_info.max
# def wd(a): return 0 if a > MAX else np.abs(a - MAX)
def wd(a):
return 0 if a == math.inf else np.log(MAX) - np.log(np.abs(a))
def average(X):
x = X[0]
y = X[1]
ret = (x + y) / 2.0
return wd(ret)
if __name__ == "__main__":
print(
op.basinhopping(
average,
[1, 1],
niter=100,
stepsize=1e2,
minimizer_kwargs={"method": "nelder-mead"},
)
)
| [
"numpy.log",
"numpy.abs",
"scipy.optimize.basinhopping"
] | [((364, 471), 'scipy.optimize.basinhopping', 'op.basinhopping', (['average', '[1, 1]'], {'niter': '(100)', 'stepsize': '(100.0)', 'minimizer_kwargs': "{'method': 'nelder-mead'}"}), "(average, [1, 1], niter=100, stepsize=100.0,\n minimizer_kwargs={'method': 'nelder-mead'})\n", (379, 471), True, 'import scipy.optimize as op\n'), ((197, 208), 'numpy.log', 'np.log', (['MAX'], {}), '(MAX)\n', (203, 208), True, 'import numpy as np\n'), ((218, 227), 'numpy.abs', 'np.abs', (['a'], {}), '(a)\n', (224, 227), True, 'import numpy as np\n')] |
# Neural Networks and Deep Learning with TensorFlow (1.12)
import tensorflow as tf
import numpy as np
if __name__ == "__main__":
# Create TF session
# sesh = tf.Session()
# sesh.run(tf.constant(100))
# Operations
x = tf.constant(2)
y = tf.constant(3)
with tf.Session() as sesh:
print("Operations with Constants")
print("Addition:", sesh.run(x+y))
print("Subtraction:", sesh.run(x-y))
print("Multiplication:", sesh.run(x*y))
print("Division:", sesh.run(x/y))
# # Variable input
# s, t = tf.placeholder(tf.int32), tf.placeholder(tf.int32)
# add = tf.add(s, t)
# sub = tf.add(s, t)
# with tf.Session() as sesh:
# print("Operations with Placeholders")
# print("Addition", sesh.run(add, feed_dict={s:20, y:30}))
# print("Subtraction", sesh.run(sub, feed_dict={s:20, y:30}))
a, b = np.array([[5.0, 5.0]]), np.array([[2.0], [2.0]])
# Convert matricies to tf objects
mat1, mat2 = tf.constant(a), tf.constant(b)
mat_mul = tf.matmul(mat1, mat2)
with tf.Session() as sesh:
print("Matrix multiplication", sesh.run(mat_mul))
| [
"numpy.array",
"tensorflow.matmul",
"tensorflow.Session",
"tensorflow.constant"
] | [((241, 255), 'tensorflow.constant', 'tf.constant', (['(2)'], {}), '(2)\n', (252, 255), True, 'import tensorflow as tf\n'), ((264, 278), 'tensorflow.constant', 'tf.constant', (['(3)'], {}), '(3)\n', (275, 278), True, 'import tensorflow as tf\n'), ((1051, 1072), 'tensorflow.matmul', 'tf.matmul', (['mat1', 'mat2'], {}), '(mat1, mat2)\n', (1060, 1072), True, 'import tensorflow as tf\n'), ((288, 300), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (298, 300), True, 'import tensorflow as tf\n'), ((902, 924), 'numpy.array', 'np.array', (['[[5.0, 5.0]]'], {}), '([[5.0, 5.0]])\n', (910, 924), True, 'import numpy as np\n'), ((926, 950), 'numpy.array', 'np.array', (['[[2.0], [2.0]]'], {}), '([[2.0], [2.0]])\n', (934, 950), True, 'import numpy as np\n'), ((1006, 1020), 'tensorflow.constant', 'tf.constant', (['a'], {}), '(a)\n', (1017, 1020), True, 'import tensorflow as tf\n'), ((1022, 1036), 'tensorflow.constant', 'tf.constant', (['b'], {}), '(b)\n', (1033, 1036), True, 'import tensorflow as tf\n'), ((1082, 1094), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1092, 1094), True, 'import tensorflow as tf\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.