code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import numpy as np
from numpy.fft import fft, ifft, fftshift, fft2, ifft2
class FourierFourierSpatialDiscretization:
def __init__(self, config):
self.length_x = config['length_x']
self.length_y = config['length_y']
self.num_points_x = config['num_points_x']
self.num_points_y = config['num_points_y']
self.__build_grid__()
self.__build_wavenumbers__()
self.__build_filter__()
def __build_grid__(self):
x_1d, dx = np.linspace(0, self.length_x, self.num_points_x, False, True)
y_1d, dy = np.linspace(0, self.length_y, self.num_points_y, False, True)
self.x, self.y = np.meshgrid(x_1d, y_1d)
self.min_grid_spacing = min(dx,dy)
def __build_wavenumbers__(self):
k_1d = fftshift(np.array(range(0, self.num_points_x))*
((2*np.pi)/self.length_x))
l_1d = fftshift(np.array(range(0, self.num_points_y))*
((2*np.pi)/self.length_y))
self.kmax = max(k_1d)
self.lmax = max(l_1d)
self.k, self.l = np.meshgrid(k_1d, l_1d)
def __build_filter__(self):
cutoff = 0.65 # 0.65 typically, CUTOFF OF 0 CORRESPONDS TO HYPERVISCOSITY
epsf = 1e-15 # FILTER STRENGTH AT HIGH WAVENUMBERS
kcrit = self.kmax*cutoff
lcrit = self.lmax*cutoff
filter_order = 4
self.filter = np.ones([self.num_points_y, self.num_points_x])
# Filter in x.
mask = np.abs(self.k) < kcrit
self.filter *= (mask + (1-mask)*np.exp(np.log(epsf)*(np.power((self.k-kcrit)/(self.kmax-kcrit), filter_order))))
# Filter in y.
mask = np.abs(self.l) < lcrit
self.filter *= (mask + (1-mask)*np.exp(np.log(epsf)*(np.power((self.l-lcrit)/(self.lmax-lcrit), filter_order))))
# Remove the Nyquist frequency entirely
self.filter[:, np.floor(self.num_points_x/2+1)] = 0
self.filter[np.floor(self.num_points_y/2+1), :] = 0
def differentiate_x(self, field):
return np.real(ifft((1.j*self.k)*fft(field, axis=1), axis=1))
def differentiate_y(self, field):
return np.real(ifft((1.j*self.l)*fft(field, axis=0), axis=0))
def filter_field(self, field):
return np.real(ifft2(self.filter*fft2(field)))
| [
"numpy.abs",
"numpy.ones",
"numpy.power",
"numpy.fft.fft",
"numpy.floor",
"numpy.log",
"numpy.fft.fft2",
"numpy.linspace",
"numpy.meshgrid"
] | [((489, 550), 'numpy.linspace', 'np.linspace', (['(0)', 'self.length_x', 'self.num_points_x', '(False)', '(True)'], {}), '(0, self.length_x, self.num_points_x, False, True)\n', (500, 550), True, 'import numpy as np\n'), ((570, 631), 'numpy.linspace', 'np.linspace', (['(0)', 'self.length_y', 'self.num_points_y', '(False)', '(True)'], {}), '(0, self.length_y, self.num_points_y, False, True)\n', (581, 631), True, 'import numpy as np\n'), ((657, 680), 'numpy.meshgrid', 'np.meshgrid', (['x_1d', 'y_1d'], {}), '(x_1d, y_1d)\n', (668, 680), True, 'import numpy as np\n'), ((1123, 1146), 'numpy.meshgrid', 'np.meshgrid', (['k_1d', 'l_1d'], {}), '(k_1d, l_1d)\n', (1134, 1146), True, 'import numpy as np\n'), ((1438, 1485), 'numpy.ones', 'np.ones', (['[self.num_points_y, self.num_points_x]'], {}), '([self.num_points_y, self.num_points_x])\n', (1445, 1485), True, 'import numpy as np\n'), ((1524, 1538), 'numpy.abs', 'np.abs', (['self.k'], {}), '(self.k)\n', (1530, 1538), True, 'import numpy as np\n'), ((1707, 1721), 'numpy.abs', 'np.abs', (['self.l'], {}), '(self.l)\n', (1713, 1721), True, 'import numpy as np\n'), ((1923, 1958), 'numpy.floor', 'np.floor', (['(self.num_points_x / 2 + 1)'], {}), '(self.num_points_x / 2 + 1)\n', (1931, 1958), True, 'import numpy as np\n'), ((1980, 2015), 'numpy.floor', 'np.floor', (['(self.num_points_y / 2 + 1)'], {}), '(self.num_points_y / 2 + 1)\n', (1988, 2015), True, 'import numpy as np\n'), ((2101, 2119), 'numpy.fft.fft', 'fft', (['field'], {'axis': '(1)'}), '(field, axis=1)\n', (2104, 2119), False, 'from numpy.fft import fft, ifft, fftshift, fft2, ifft2\n'), ((2210, 2228), 'numpy.fft.fft', 'fft', (['field'], {'axis': '(0)'}), '(field, axis=0)\n', (2213, 2228), False, 'from numpy.fft import fft, ifft, fftshift, fft2, ifft2\n'), ((2316, 2327), 'numpy.fft.fft2', 'fft2', (['field'], {}), '(field)\n', (2320, 2327), False, 'from numpy.fft import fft, ifft, fftshift, fft2, ifft2\n'), ((1594, 1606), 'numpy.log', 'np.log', (['epsf'], {}), '(epsf)\n', (1600, 1606), True, 'import numpy as np\n'), ((1608, 1670), 'numpy.power', 'np.power', (['((self.k - kcrit) / (self.kmax - kcrit))', 'filter_order'], {}), '((self.k - kcrit) / (self.kmax - kcrit), filter_order)\n', (1616, 1670), True, 'import numpy as np\n'), ((1777, 1789), 'numpy.log', 'np.log', (['epsf'], {}), '(epsf)\n', (1783, 1789), True, 'import numpy as np\n'), ((1791, 1853), 'numpy.power', 'np.power', (['((self.l - lcrit) / (self.lmax - lcrit))', 'filter_order'], {}), '((self.l - lcrit) / (self.lmax - lcrit), filter_order)\n', (1799, 1853), True, 'import numpy as np\n')] |
from __future__ import print_function, absolute_import
import unittest, sklearn, sklearn.dummy, numpy as np
from SplitClassifier import SplitClassifier
class T(unittest.TestCase):
def test_split_classifier_with_single_classifier(self):
c = sklearn.dummy.DummyClassifier('constant', constant=0)
sc = SplitClassifier(c, lambda X: np.arange(len(X)) % 2)
X = np.ones(shape=(100, 3))
y = np.zeros(100)
X_test = np.ones(shape=(100, 3))
sc.fit(X, y)
sc.classifiers[1].constant = 1
predictions = sc.predict(X_test)
self.assertEqual((100,), predictions.shape)
np.testing.assert_array_equal(np.arange(100) % 2, predictions)
def test_split_classifier_with_multiple_classifier(self):
c0 = sklearn.dummy.DummyClassifier('constant', constant=0)
c1 = sklearn.dummy.DummyClassifier('constant', constant=1)
sc = SplitClassifier((c0, c1), lambda X: np.arange(len(X)) % 2)
X = np.ones(shape=(100, 3))
y = np.arange(100) % 2
X_test = np.ones(shape=(100, 3))
sc.fit(X, y)
predictions = sc.predict(X_test)
self.assertEqual((100,), predictions.shape)
np.testing.assert_array_equal(np.arange(100) % 2, predictions)
def test_split_classifier_with_3_indexes(self):
c0 = sklearn.dummy.DummyClassifier('constant', constant=0)
c1 = sklearn.dummy.DummyClassifier('constant', constant=1)
c2 = sklearn.dummy.DummyClassifier('constant', constant=2)
sc = SplitClassifier((c0, c1, c2), lambda X: np.arange(len(X)) % 3)
X = np.ones(shape=(100, 3))
y = np.arange(100) % 3
X_test = np.ones(shape=(100, 3))
sc.fit(X, y)
predictions = sc.predict(X_test)
self.assertEqual((100,), predictions.shape)
np.testing.assert_array_equal(np.arange(100) % 3, predictions)
def test_split_classifier_with_fallback_classifier(self):
c0 = sklearn.dummy.DummyClassifier('constant', constant=0)
c1 = sklearn.dummy.DummyClassifier('constant', constant=1)
fallback = sklearn.dummy.DummyClassifier('constant', constant=999)
def indexer(X):
indexes = np.arange(len(X)) % 3
indexes[indexes==2] = -1
return indexes
sc = SplitClassifier((c0, c1), indexer, fallback_classifier=fallback)
X = np.ones(shape=(100, 3))
y = np.arange(100) % 3
X_test = np.ones(shape=(100, 3))
fallback.fit(X, np.array([999] * 100))
sc.fit(X, y)
predictions = sc.predict(X_test)
self.assertEqual((100,), predictions.shape)
expected = np.arange(100) % 3
expected[expected==2] = 999
np.testing.assert_array_equal(expected, predictions) | [
"numpy.ones",
"numpy.arange",
"numpy.array",
"numpy.zeros",
"SplitClassifier.SplitClassifier",
"sklearn.dummy.DummyClassifier",
"numpy.testing.assert_array_equal"
] | [((255, 308), 'sklearn.dummy.DummyClassifier', 'sklearn.dummy.DummyClassifier', (['"""constant"""'], {'constant': '(0)'}), "('constant', constant=0)\n", (284, 308), False, 'import unittest, sklearn, sklearn.dummy, numpy as np\n'), ((380, 403), 'numpy.ones', 'np.ones', ([], {'shape': '(100, 3)'}), '(shape=(100, 3))\n', (387, 403), True, 'import unittest, sklearn, sklearn.dummy, numpy as np\n'), ((413, 426), 'numpy.zeros', 'np.zeros', (['(100)'], {}), '(100)\n', (421, 426), True, 'import unittest, sklearn, sklearn.dummy, numpy as np\n'), ((441, 464), 'numpy.ones', 'np.ones', ([], {'shape': '(100, 3)'}), '(shape=(100, 3))\n', (448, 464), True, 'import unittest, sklearn, sklearn.dummy, numpy as np\n'), ((753, 806), 'sklearn.dummy.DummyClassifier', 'sklearn.dummy.DummyClassifier', (['"""constant"""'], {'constant': '(0)'}), "('constant', constant=0)\n", (782, 806), False, 'import unittest, sklearn, sklearn.dummy, numpy as np\n'), ((817, 870), 'sklearn.dummy.DummyClassifier', 'sklearn.dummy.DummyClassifier', (['"""constant"""'], {'constant': '(1)'}), "('constant', constant=1)\n", (846, 870), False, 'import unittest, sklearn, sklearn.dummy, numpy as np\n'), ((949, 972), 'numpy.ones', 'np.ones', ([], {'shape': '(100, 3)'}), '(shape=(100, 3))\n', (956, 972), True, 'import unittest, sklearn, sklearn.dummy, numpy as np\n'), ((1015, 1038), 'numpy.ones', 'np.ones', ([], {'shape': '(100, 3)'}), '(shape=(100, 3))\n', (1022, 1038), True, 'import unittest, sklearn, sklearn.dummy, numpy as np\n'), ((1279, 1332), 'sklearn.dummy.DummyClassifier', 'sklearn.dummy.DummyClassifier', (['"""constant"""'], {'constant': '(0)'}), "('constant', constant=0)\n", (1308, 1332), False, 'import unittest, sklearn, sklearn.dummy, numpy as np\n'), ((1343, 1396), 'sklearn.dummy.DummyClassifier', 'sklearn.dummy.DummyClassifier', (['"""constant"""'], {'constant': '(1)'}), "('constant', constant=1)\n", (1372, 1396), False, 'import unittest, sklearn, sklearn.dummy, numpy as np\n'), ((1407, 1460), 'sklearn.dummy.DummyClassifier', 'sklearn.dummy.DummyClassifier', (['"""constant"""'], {'constant': '(2)'}), "('constant', constant=2)\n", (1436, 1460), False, 'import unittest, sklearn, sklearn.dummy, numpy as np\n'), ((1543, 1566), 'numpy.ones', 'np.ones', ([], {'shape': '(100, 3)'}), '(shape=(100, 3))\n', (1550, 1566), True, 'import unittest, sklearn, sklearn.dummy, numpy as np\n'), ((1609, 1632), 'numpy.ones', 'np.ones', ([], {'shape': '(100, 3)'}), '(shape=(100, 3))\n', (1616, 1632), True, 'import unittest, sklearn, sklearn.dummy, numpy as np\n'), ((1883, 1936), 'sklearn.dummy.DummyClassifier', 'sklearn.dummy.DummyClassifier', (['"""constant"""'], {'constant': '(0)'}), "('constant', constant=0)\n", (1912, 1936), False, 'import unittest, sklearn, sklearn.dummy, numpy as np\n'), ((1947, 2000), 'sklearn.dummy.DummyClassifier', 'sklearn.dummy.DummyClassifier', (['"""constant"""'], {'constant': '(1)'}), "('constant', constant=1)\n", (1976, 2000), False, 'import unittest, sklearn, sklearn.dummy, numpy as np\n'), ((2017, 2072), 'sklearn.dummy.DummyClassifier', 'sklearn.dummy.DummyClassifier', (['"""constant"""'], {'constant': '(999)'}), "('constant', constant=999)\n", (2046, 2072), False, 'import unittest, sklearn, sklearn.dummy, numpy as np\n'), ((2201, 2265), 'SplitClassifier.SplitClassifier', 'SplitClassifier', (['(c0, c1)', 'indexer'], {'fallback_classifier': 'fallback'}), '((c0, c1), indexer, fallback_classifier=fallback)\n', (2216, 2265), False, 'from SplitClassifier import SplitClassifier\n'), ((2275, 2298), 'numpy.ones', 'np.ones', ([], {'shape': '(100, 3)'}), '(shape=(100, 3))\n', (2282, 2298), True, 'import unittest, sklearn, sklearn.dummy, numpy as np\n'), ((2341, 2364), 'numpy.ones', 'np.ones', ([], {'shape': '(100, 3)'}), '(shape=(100, 3))\n', (2348, 2364), True, 'import unittest, sklearn, sklearn.dummy, numpy as np\n'), ((2589, 2641), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['expected', 'predictions'], {}), '(expected, predictions)\n', (2618, 2641), True, 'import unittest, sklearn, sklearn.dummy, numpy as np\n'), ((982, 996), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (991, 996), True, 'import unittest, sklearn, sklearn.dummy, numpy as np\n'), ((1576, 1590), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (1585, 1590), True, 'import unittest, sklearn, sklearn.dummy, numpy as np\n'), ((2308, 2322), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (2317, 2322), True, 'import unittest, sklearn, sklearn.dummy, numpy as np\n'), ((2388, 2409), 'numpy.array', 'np.array', (['([999] * 100)'], {}), '([999] * 100)\n', (2396, 2409), True, 'import unittest, sklearn, sklearn.dummy, numpy as np\n'), ((2532, 2546), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (2541, 2546), True, 'import unittest, sklearn, sklearn.dummy, numpy as np\n'), ((645, 659), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (654, 659), True, 'import unittest, sklearn, sklearn.dummy, numpy as np\n'), ((1183, 1197), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (1192, 1197), True, 'import unittest, sklearn, sklearn.dummy, numpy as np\n'), ((1775, 1789), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (1784, 1789), True, 'import unittest, sklearn, sklearn.dummy, numpy as np\n')] |
# -*- coding: utf-8 -*-
import logging; logging.basicConfig(level=logging.DEBUG)
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import logictensornetworks_wrapper as ltnw
import logictensornetworks_library as ltnl
start=-1
end=1
training_size=10
testing_size=10
learning_rate = 0.01
slope=1.
var=0.1
epochs=1000
# define data
train_X = np.random.uniform(start,end,(training_size)).astype("float32")
train_Y = slope*train_X + np.random.normal(scale=var,size=len(train_X))
# define the language, we translate every training example into constants
[ltnw.constant("x_%s" % i,[x]) for i,x in enumerate(train_X)]
[ltnw.constant("y_%s" % i,[y]) for i,y in enumerate(train_Y)]
# define the function f as a linear regressor
W = tf.Variable(np.random.randn(), name="weight")
b = tf.Variable(np.random.randn(), name="bias")
ltnw.function("f",1,1,fun_definition=lambda X: tf.add(tf.multiply(X, W), b))
# defining an equal predicate based on the euclidian distance of two vectors
ltnw.predicate("eq",2,ltnl.equal_euclidian)
# defining the theory
for f in ["eq(f(x_%s),y_%s)" % (i,i) for i in range(len(train_X))]:
ltnw.axiom(f)
print("\n".join(sorted(ltnw.AXIOMS.keys())))
# initializing knowledgebase and optimizing
ltnw.initialize_knowledgebase(optimizer=tf.train.GradientDescentOptimizer(learning_rate=learning_rate))
ltnw.train(max_epochs=epochs)
# visualize results on training data
ltnw.variable("?x",1)
prediction=ltnw.ask("f(?x)",feed_dict={"?x" : train_X.reshape(len(train_X),1)})
plt.figure(figsize=(12,5))
plt.subplot(1,2,1)
plt.plot(train_X, train_Y, 'bo', label='Training data',color="black")
plt.plot(train_X, ltnw.SESSION.run(W) * train_X + ltnw.SESSION.run(b), label='Fitted line')
plt.plot(train_X, prediction, 'bo', label='prediction',color="red")
plt.legend()
# generate test data and visualize regressor results
test_X = np.random.uniform(start,end,(testing_size)).astype("float32")
prediction=ltnw.ask("f(?x)",feed_dict={"?x" : test_X.reshape(len(test_X),1)})
test_Y = slope*test_X + np.random.normal(scale=var,size=len(train_X))
plt.subplot(1,2,2)
plt.plot(test_X, test_Y, 'bo', label='Testing data')
plt.plot(test_X, prediction, 'bo', label='prediction',color="red")
plt.plot(test_X, ltnw.SESSION.run(W) * test_X + ltnw.SESSION.run(b), label='Fitted line')
plt.legend()
plt.show() | [
"logging.basicConfig",
"logictensornetworks_wrapper.constant",
"logictensornetworks_wrapper.variable",
"matplotlib.pyplot.plot",
"tensorflow.multiply",
"logictensornetworks_wrapper.train",
"logictensornetworks_wrapper.AXIOMS.keys",
"tensorflow.train.GradientDescentOptimizer",
"logictensornetworks_wr... | [((40, 80), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (59, 80), False, 'import logging\n'), ((1001, 1046), 'logictensornetworks_wrapper.predicate', 'ltnw.predicate', (['"""eq"""', '(2)', 'ltnl.equal_euclidian'], {}), "('eq', 2, ltnl.equal_euclidian)\n", (1015, 1046), True, 'import logictensornetworks_wrapper as ltnw\n'), ((1348, 1377), 'logictensornetworks_wrapper.train', 'ltnw.train', ([], {'max_epochs': 'epochs'}), '(max_epochs=epochs)\n', (1358, 1377), True, 'import logictensornetworks_wrapper as ltnw\n'), ((1416, 1438), 'logictensornetworks_wrapper.variable', 'ltnw.variable', (['"""?x"""', '(1)'], {}), "('?x', 1)\n", (1429, 1438), True, 'import logictensornetworks_wrapper as ltnw\n'), ((1518, 1545), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 5)'}), '(figsize=(12, 5))\n', (1528, 1545), True, 'import matplotlib.pyplot as plt\n'), ((1545, 1565), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (1556, 1565), True, 'import matplotlib.pyplot as plt\n'), ((1564, 1634), 'matplotlib.pyplot.plot', 'plt.plot', (['train_X', 'train_Y', '"""bo"""'], {'label': '"""Training data"""', 'color': '"""black"""'}), "(train_X, train_Y, 'bo', label='Training data', color='black')\n", (1572, 1634), True, 'import matplotlib.pyplot as plt\n'), ((1726, 1794), 'matplotlib.pyplot.plot', 'plt.plot', (['train_X', 'prediction', '"""bo"""'], {'label': '"""prediction"""', 'color': '"""red"""'}), "(train_X, prediction, 'bo', label='prediction', color='red')\n", (1734, 1794), True, 'import matplotlib.pyplot as plt\n'), ((1794, 1806), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1804, 1806), True, 'import matplotlib.pyplot as plt\n'), ((2080, 2100), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (2091, 2100), True, 'import matplotlib.pyplot as plt\n'), ((2099, 2151), 'matplotlib.pyplot.plot', 'plt.plot', (['test_X', 'test_Y', '"""bo"""'], {'label': '"""Testing data"""'}), "(test_X, test_Y, 'bo', label='Testing data')\n", (2107, 2151), True, 'import matplotlib.pyplot as plt\n'), ((2152, 2219), 'matplotlib.pyplot.plot', 'plt.plot', (['test_X', 'prediction', '"""bo"""'], {'label': '"""prediction"""', 'color': '"""red"""'}), "(test_X, prediction, 'bo', label='prediction', color='red')\n", (2160, 2219), True, 'import matplotlib.pyplot as plt\n'), ((2309, 2321), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2319, 2321), True, 'import matplotlib.pyplot as plt\n'), ((2322, 2332), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2330, 2332), True, 'import matplotlib.pyplot as plt\n'), ((578, 608), 'logictensornetworks_wrapper.constant', 'ltnw.constant', (["('x_%s' % i)", '[x]'], {}), "('x_%s' % i, [x])\n", (591, 608), True, 'import logictensornetworks_wrapper as ltnw\n'), ((640, 670), 'logictensornetworks_wrapper.constant', 'ltnw.constant', (["('y_%s' % i)", '[y]'], {}), "('y_%s' % i, [y])\n", (653, 670), True, 'import logictensornetworks_wrapper as ltnw\n'), ((764, 781), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (779, 781), True, 'import numpy as np\n'), ((814, 831), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (829, 831), True, 'import numpy as np\n'), ((1140, 1153), 'logictensornetworks_wrapper.axiom', 'ltnw.axiom', (['f'], {}), '(f)\n', (1150, 1153), True, 'import logictensornetworks_wrapper as ltnw\n'), ((367, 411), 'numpy.random.uniform', 'np.random.uniform', (['start', 'end', 'training_size'], {}), '(start, end, training_size)\n', (384, 411), True, 'import numpy as np\n'), ((1284, 1346), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (1317, 1346), True, 'import tensorflow as tf\n'), ((1684, 1703), 'logictensornetworks_wrapper.SESSION.run', 'ltnw.SESSION.run', (['b'], {}), '(b)\n', (1700, 1703), True, 'import logictensornetworks_wrapper as ltnw\n'), ((1870, 1913), 'numpy.random.uniform', 'np.random.uniform', (['start', 'end', 'testing_size'], {}), '(start, end, testing_size)\n', (1887, 1913), True, 'import numpy as np\n'), ((2267, 2286), 'logictensornetworks_wrapper.SESSION.run', 'ltnw.SESSION.run', (['b'], {}), '(b)\n', (2283, 2286), True, 'import logictensornetworks_wrapper as ltnw\n'), ((1177, 1195), 'logictensornetworks_wrapper.AXIOMS.keys', 'ltnw.AXIOMS.keys', ([], {}), '()\n', (1193, 1195), True, 'import logictensornetworks_wrapper as ltnw\n'), ((1652, 1671), 'logictensornetworks_wrapper.SESSION.run', 'ltnw.SESSION.run', (['W'], {}), '(W)\n', (1668, 1671), True, 'import logictensornetworks_wrapper as ltnw\n'), ((2236, 2255), 'logictensornetworks_wrapper.SESSION.run', 'ltnw.SESSION.run', (['W'], {}), '(W)\n', (2252, 2255), True, 'import logictensornetworks_wrapper as ltnw\n'), ((900, 917), 'tensorflow.multiply', 'tf.multiply', (['X', 'W'], {}), '(X, W)\n', (911, 917), True, 'import tensorflow as tf\n')] |
import numpy as np
from numpy import linalg as la, random as rnd
from scipy.linalg import sqrtm
from pymanopt.manifolds.manifold import EuclideanEmbeddedSubmanifold
from pymanopt.tools.multi import multihconj, multiherm, multilog
from pymanopt.tools.multi import multiprod, multitransp
class HermitianPositiveDefinite(EuclideanEmbeddedSubmanifold):
"""Manifold of (n x n)^k complex Hermitian positive definite matrices.
"""
def __init__(self, n, k=1):
self._n = n
self._k = k
if k == 1:
name = ("Manifold of Hermitian positive definite\
({} x {}) matrices").format(n, n)
else:
name = "Product manifold of {} ({} x {}) matrices".format(k, n, n)
dimension = 2 * int(k * n * (n + 1) / 2)
super().__init__(name, dimension)
def rand(self):
# Generate eigenvalues between 1 and 2
# (eigenvalues of a symmetric matrix are always real).
d = np.ones((self._k, self._n, 1)) + rnd.rand(self._k, self._n, 1)
# Generate an orthogonal matrix. Annoyingly qr decomp isn't
# vectorized so need to use a for loop. Could be done using
# svd but this is slower for bigger matrices.
u = np.zeros((self._k, self._n, self._n), dtype=np.complex)
for i in range(self._k):
u[i], r = la.qr(
rnd.randn(self._n, self._n)+1j*rnd.randn(self._n, self._n))
if self._k == 1:
return multiprod(u, d * multihconj(u))[0]
return multiprod(u, d * multihconj(u))
def randvec(self, x):
k = self._k
n = self._n
if k == 1:
u = multiherm(rnd.randn(n, n)+1j*rnd.randn(n, n))
else:
u = multiherm(rnd.randn(k, n, n)+1j*rnd.randn(k, n, n))
return u / self.norm(x, u)
def zerovec(self, x):
k = self._k
n = self._n
if k != 1:
return np.zeros((k, n, n), dtype=np.complex)
return np.zeros((n, n), dtype=np.complex)
def inner(self, x, u, v):
return np.real(
np.tensordot(la.solve(x, u), multitransp(la.solve(x, v)),
axes=x.ndim))
def norm(self, x, u):
# This implementation is as fast as np.linalg.solve_triangular and is
# more stable, as the above solver tends to output non positive
# definite results.
c = la.cholesky(x)
c_inv = la.inv(c)
return np.real(
la.norm(multiprod(multiprod(c_inv, u), multihconj(c_inv))))
def proj(self, X, G):
return multiherm(G)
def egrad2rgrad(self, x, u):
return multiprod(multiprod(x, multiherm(u)), x)
def ehess2rhess(self, x, egrad, ehess, u):
egrad = multiherm(egrad)
hess = multiprod(multiprod(x, multiherm(ehess)), x)
hess += multiherm(multiprod(multiprod(u, egrad), x))
return hess
def exp(self, x, u):
k = self._k
d, q = la.eigh(x)
if k == 1:
x_sqrt = q@np.diag(np.sqrt(d))@q.conj().T
x_isqrt = q@np.diag(1/np.sqrt(d))@q.conj().T
else:
temp = np.zeros(q.shape, dtype=np.complex)
for i in range(q.shape[0]):
temp[i, :, :] = np.diag(np.sqrt(d[i, :]))[np.newaxis, :, :]
x_sqrt = multiprod(multiprod(q, temp), multihconj(q))
temp = np.zeros(q.shape, dtype=np.complex)
for i in range(q.shape[0]):
temp[i, :, :] = np.diag(1/np.sqrt(d[i, :]))[np.newaxis, :, :]
x_isqrt = multiprod(multiprod(q, temp), multihconj(q))
d, q = la.eigh(multiprod(multiprod(x_isqrt, u), x_isqrt))
if k == 1:
e = q@np.diag(np.exp(d))@q.conj().T
else:
temp = np.zeros(q.shape, dtype=np.complex)
for i in range(q.shape[0]):
temp[i, :, :] = np.diag(np.exp(d[i, :]))[np.newaxis, :, :]
d = temp
e = multiprod(multiprod(q, d), multihconj(q))
e = multiprod(multiprod(x_sqrt, e), x_sqrt)
e = multiherm(e)
return e
def retr(self, x, u):
r = x + u + (1/2)*u@la.solve(x, u)
return r
def log(self, x, y):
k = self._k
d, q = la.eigh(x)
if k == 1:
x_sqrt = q@np.diag(np.sqrt(d))@q.conj().T
x_isqrt = q@np.diag(1/np.sqrt(d))@q.conj().T
else:
temp = np.zeros(q.shape, dtype=np.complex)
for i in range(q.shape[0]):
temp[i, :, :] = np.diag(np.sqrt(d[i, :]))[np.newaxis, :, :]
x_sqrt = multiprod(multiprod(q, temp), multihconj(q))
temp = np.zeros(q.shape, dtype=np.complex)
for i in range(q.shape[0]):
temp[i, :, :] = np.diag(1/np.sqrt(d[i, :]))[np.newaxis, :, :]
x_isqrt = multiprod(multiprod(q, temp), multihconj(q))
d, q = la.eigh(multiprod(multiprod(x_isqrt, y), x_isqrt))
if k == 1:
log = q@np.diag(np.log(d))@q.conj().T
else:
temp = np.zeros(q.shape, dtype=np.complex)
for i in range(q.shape[0]):
temp[i, :, :] = np.diag(np.log(d[i, :]))[np.newaxis, :, :]
d = temp
log = multiprod(multiprod(q, d), multihconj(q))
xi = multiprod(multiprod(x_sqrt, log), x_sqrt)
xi = multiherm(xi)
return xi
def transp(self, x1, x2, d):
E = multihconj(la.solve(multihconj(x1), multihconj(x2)))
if self._k == 1:
E = sqrtm(E)
else:
for i in range(len(E)):
E[i, :, :] = sqrtm(E[i, :, :])
transp_d = multiprod(multiprod(E, d), multihconj(E))
return transp_d
def dist(self, x, y):
c = la.cholesky(x)
c_inv = la.inv(c)
logm = multilog(multiprod(multiprod(c_inv, y), multihconj(c_inv)),
pos_def=True)
return np.real(la.norm(logm))
class SpecialHermitianPositiveDefinite(EuclideanEmbeddedSubmanifold):
"""Manifold of (n x n)^k Hermitian positive
definite matrices with unit determinant
called 'Special Hermitian positive definite manifold'.
It is a totally geodesic submanifold of
the Hermitian positive definite matrices.
"""
def __init__(self, n, k=1):
self._n = n
self._k = k
self.HPD = HermitianPositiveDefinite(n, k)
if k == 1:
name = ("Manifold of special Hermitian positive definite\
({} x {}) matrices").format(n, n)
else:
name = "Product manifold of {} special Hermitian positive\
definite ({} x {}) matrices".format(k, n, n)
dimension = int(k * (n*(n+1) - 1))
super().__init__(name, dimension)
def rand(self):
# Generate k HPD matrices.
x = self.HPD.rand()
# Normalize them.
if self._k == 1:
x = x / (np.real(la.det(x))**(1/self._n))
else:
x = x / (np.real(la.det(x))**(1/self._n)).reshape(-1, 1, 1)
return x
def randvec(self, x):
# Generate k matrices.
k = self._k
n = self._n
if k == 1:
u = rnd.randn(n, n)+1j*rnd.randn(n, n)
else:
u = rnd.randn(k, n, n)+1j*rnd.randn(k, n, n)
# Project them on tangent space.
u = self.proj(x, u)
# Unit norm.
u = u / self.norm(x, u)
return u
def zerovec(self, x):
return self.HPD.zerovec(x)
def inner(self, x, u, v):
return self.HPD.inner(x, u, v)
def norm(self, x, u):
return self.HPD.norm(x, u)
def proj(self, x, u):
n = self._n
k = self._k
# Project matrix on tangent space of HPD.
u = multiherm(u)
# Project on tangent space of SHPD at x.
t = np.trace(la.solve(x, u), axis1=-2, axis2=-1)
if k == 1:
u = u - (1/n) * np.real(t) * x
else:
u = u - (1/n) * np.real(t.reshape(-1, 1, 1)) * x
return u
def egrad2rgrad(self, x, u):
rgrad = multiprod(multiprod(x, u), x)
rgrad = self.proj(x, rgrad)
return rgrad
def exp(self, x, u):
e = self.HPD.exp(x, u)
# Normalize them.
if self._k == 1:
e = e / np.real(la.det(e))**(1/self._n)
else:
e = e / (np.real(la.det(e))**(1/self._n)).reshape(-1, 1, 1)
return e
def retr(self, x, u):
r = self.HPD.retr(x, u)
# Normalize them.
if self._k == 1:
r = r / np.real(la.det(r))**(1/self._n)
else:
r = r / (np.real(la.det(r))**(1/self._n)).reshape(-1, 1, 1)
return r
def log(self, x, y):
return self.HPD.log(x, y)
def transp(self, x1, x2, d):
return self.proj(x2, self.HPD.transp(x1, x2, d))
def dist(self, x, y):
return self.HPD.dist(x, y)
| [
"numpy.linalg.eigh",
"scipy.linalg.sqrtm",
"numpy.linalg.solve",
"numpy.ones",
"numpy.random.rand",
"numpy.sqrt",
"pymanopt.tools.multi.multiprod",
"numpy.log",
"pymanopt.tools.multi.multihconj",
"numpy.linalg.det",
"numpy.exp",
"pymanopt.tools.multi.multiherm",
"numpy.zeros",
"numpy.linal... | [((1236, 1291), 'numpy.zeros', 'np.zeros', (['(self._k, self._n, self._n)'], {'dtype': 'np.complex'}), '((self._k, self._n, self._n), dtype=np.complex)\n', (1244, 1291), True, 'import numpy as np\n'), ((1980, 2014), 'numpy.zeros', 'np.zeros', (['(n, n)'], {'dtype': 'np.complex'}), '((n, n), dtype=np.complex)\n', (1988, 2014), True, 'import numpy as np\n'), ((2396, 2410), 'numpy.linalg.cholesky', 'la.cholesky', (['x'], {}), '(x)\n', (2407, 2410), True, 'from numpy import linalg as la, random as rnd\n'), ((2427, 2436), 'numpy.linalg.inv', 'la.inv', (['c'], {}), '(c)\n', (2433, 2436), True, 'from numpy import linalg as la, random as rnd\n'), ((2575, 2587), 'pymanopt.tools.multi.multiherm', 'multiherm', (['G'], {}), '(G)\n', (2584, 2587), False, 'from pymanopt.tools.multi import multihconj, multiherm, multilog\n'), ((2742, 2758), 'pymanopt.tools.multi.multiherm', 'multiherm', (['egrad'], {}), '(egrad)\n', (2751, 2758), False, 'from pymanopt.tools.multi import multihconj, multiherm, multilog\n'), ((2962, 2972), 'numpy.linalg.eigh', 'la.eigh', (['x'], {}), '(x)\n', (2969, 2972), True, 'from numpy import linalg as la, random as rnd\n'), ((4057, 4069), 'pymanopt.tools.multi.multiherm', 'multiherm', (['e'], {}), '(e)\n', (4066, 4069), False, 'from pymanopt.tools.multi import multihconj, multiherm, multilog\n'), ((4236, 4246), 'numpy.linalg.eigh', 'la.eigh', (['x'], {}), '(x)\n', (4243, 4246), True, 'from numpy import linalg as la, random as rnd\n'), ((5339, 5352), 'pymanopt.tools.multi.multiherm', 'multiherm', (['xi'], {}), '(xi)\n', (5348, 5352), False, 'from pymanopt.tools.multi import multihconj, multiherm, multilog\n'), ((5741, 5755), 'numpy.linalg.cholesky', 'la.cholesky', (['x'], {}), '(x)\n', (5752, 5755), True, 'from numpy import linalg as la, random as rnd\n'), ((5772, 5781), 'numpy.linalg.inv', 'la.inv', (['c'], {}), '(c)\n', (5778, 5781), True, 'from numpy import linalg as la, random as rnd\n'), ((7756, 7768), 'pymanopt.tools.multi.multiherm', 'multiherm', (['u'], {}), '(u)\n', (7765, 7768), False, 'from pymanopt.tools.multi import multihconj, multiherm, multilog\n'), ((970, 1000), 'numpy.ones', 'np.ones', (['(self._k, self._n, 1)'], {}), '((self._k, self._n, 1))\n', (977, 1000), True, 'import numpy as np\n'), ((1003, 1032), 'numpy.random.rand', 'rnd.rand', (['self._k', 'self._n', '(1)'], {}), '(self._k, self._n, 1)\n', (1011, 1032), True, 'from numpy import linalg as la, random as rnd\n'), ((1927, 1964), 'numpy.zeros', 'np.zeros', (['(k, n, n)'], {'dtype': 'np.complex'}), '((k, n, n), dtype=np.complex)\n', (1935, 1964), True, 'import numpy as np\n'), ((3136, 3171), 'numpy.zeros', 'np.zeros', (['q.shape'], {'dtype': 'np.complex'}), '(q.shape, dtype=np.complex)\n', (3144, 3171), True, 'import numpy as np\n'), ((3374, 3409), 'numpy.zeros', 'np.zeros', (['q.shape'], {'dtype': 'np.complex'}), '(q.shape, dtype=np.complex)\n', (3382, 3409), True, 'import numpy as np\n'), ((3762, 3797), 'numpy.zeros', 'np.zeros', (['q.shape'], {'dtype': 'np.complex'}), '(q.shape, dtype=np.complex)\n', (3770, 3797), True, 'import numpy as np\n'), ((4015, 4035), 'pymanopt.tools.multi.multiprod', 'multiprod', (['x_sqrt', 'e'], {}), '(x_sqrt, e)\n', (4024, 4035), False, 'from pymanopt.tools.multi import multiprod, multitransp\n'), ((4410, 4445), 'numpy.zeros', 'np.zeros', (['q.shape'], {'dtype': 'np.complex'}), '(q.shape, dtype=np.complex)\n', (4418, 4445), True, 'import numpy as np\n'), ((4648, 4683), 'numpy.zeros', 'np.zeros', (['q.shape'], {'dtype': 'np.complex'}), '(q.shape, dtype=np.complex)\n', (4656, 4683), True, 'import numpy as np\n'), ((5038, 5073), 'numpy.zeros', 'np.zeros', (['q.shape'], {'dtype': 'np.complex'}), '(q.shape, dtype=np.complex)\n', (5046, 5073), True, 'import numpy as np\n'), ((5294, 5316), 'pymanopt.tools.multi.multiprod', 'multiprod', (['x_sqrt', 'log'], {}), '(x_sqrt, log)\n', (5303, 5316), False, 'from pymanopt.tools.multi import multiprod, multitransp\n'), ((5511, 5519), 'scipy.linalg.sqrtm', 'sqrtm', (['E'], {}), '(E)\n', (5516, 5519), False, 'from scipy.linalg import sqrtm\n'), ((5646, 5661), 'pymanopt.tools.multi.multiprod', 'multiprod', (['E', 'd'], {}), '(E, d)\n', (5655, 5661), False, 'from pymanopt.tools.multi import multiprod, multitransp\n'), ((5663, 5676), 'pymanopt.tools.multi.multihconj', 'multihconj', (['E'], {}), '(E)\n', (5673, 5676), False, 'from pymanopt.tools.multi import multihconj, multiherm, multilog\n'), ((5918, 5931), 'numpy.linalg.norm', 'la.norm', (['logm'], {}), '(logm)\n', (5925, 5931), True, 'from numpy import linalg as la, random as rnd\n'), ((7840, 7854), 'numpy.linalg.solve', 'la.solve', (['x', 'u'], {}), '(x, u)\n', (7848, 7854), True, 'from numpy import linalg as la, random as rnd\n'), ((8091, 8106), 'pymanopt.tools.multi.multiprod', 'multiprod', (['x', 'u'], {}), '(x, u)\n', (8100, 8106), False, 'from pymanopt.tools.multi import multiprod, multitransp\n'), ((1542, 1555), 'pymanopt.tools.multi.multihconj', 'multihconj', (['u'], {}), '(u)\n', (1552, 1555), False, 'from pymanopt.tools.multi import multihconj, multiherm, multilog\n'), ((2095, 2109), 'numpy.linalg.solve', 'la.solve', (['x', 'u'], {}), '(x, u)\n', (2103, 2109), True, 'from numpy import linalg as la, random as rnd\n'), ((2660, 2672), 'pymanopt.tools.multi.multiherm', 'multiherm', (['u'], {}), '(u)\n', (2669, 2672), False, 'from pymanopt.tools.multi import multihconj, multiherm, multilog\n'), ((2797, 2813), 'pymanopt.tools.multi.multiherm', 'multiherm', (['ehess'], {}), '(ehess)\n', (2806, 2813), False, 'from pymanopt.tools.multi import multihconj, multiherm, multilog\n'), ((2855, 2874), 'pymanopt.tools.multi.multiprod', 'multiprod', (['u', 'egrad'], {}), '(u, egrad)\n', (2864, 2874), False, 'from pymanopt.tools.multi import multiprod, multitransp\n'), ((3319, 3337), 'pymanopt.tools.multi.multiprod', 'multiprod', (['q', 'temp'], {}), '(q, temp)\n', (3328, 3337), False, 'from pymanopt.tools.multi import multiprod, multitransp\n'), ((3339, 3352), 'pymanopt.tools.multi.multihconj', 'multihconj', (['q'], {}), '(q)\n', (3349, 3352), False, 'from pymanopt.tools.multi import multihconj, multiherm, multilog\n'), ((3560, 3578), 'pymanopt.tools.multi.multiprod', 'multiprod', (['q', 'temp'], {}), '(q, temp)\n', (3569, 3578), False, 'from pymanopt.tools.multi import multiprod, multitransp\n'), ((3580, 3593), 'pymanopt.tools.multi.multihconj', 'multihconj', (['q'], {}), '(q)\n', (3590, 3593), False, 'from pymanopt.tools.multi import multihconj, multiherm, multilog\n'), ((3629, 3650), 'pymanopt.tools.multi.multiprod', 'multiprod', (['x_isqrt', 'u'], {}), '(x_isqrt, u)\n', (3638, 3650), False, 'from pymanopt.tools.multi import multiprod, multitransp\n'), ((3960, 3975), 'pymanopt.tools.multi.multiprod', 'multiprod', (['q', 'd'], {}), '(q, d)\n', (3969, 3975), False, 'from pymanopt.tools.multi import multiprod, multitransp\n'), ((3977, 3990), 'pymanopt.tools.multi.multihconj', 'multihconj', (['q'], {}), '(q)\n', (3987, 3990), False, 'from pymanopt.tools.multi import multihconj, multiherm, multilog\n'), ((4142, 4156), 'numpy.linalg.solve', 'la.solve', (['x', 'u'], {}), '(x, u)\n', (4150, 4156), True, 'from numpy import linalg as la, random as rnd\n'), ((4593, 4611), 'pymanopt.tools.multi.multiprod', 'multiprod', (['q', 'temp'], {}), '(q, temp)\n', (4602, 4611), False, 'from pymanopt.tools.multi import multiprod, multitransp\n'), ((4613, 4626), 'pymanopt.tools.multi.multihconj', 'multihconj', (['q'], {}), '(q)\n', (4623, 4626), False, 'from pymanopt.tools.multi import multihconj, multiherm, multilog\n'), ((4834, 4852), 'pymanopt.tools.multi.multiprod', 'multiprod', (['q', 'temp'], {}), '(q, temp)\n', (4843, 4852), False, 'from pymanopt.tools.multi import multiprod, multitransp\n'), ((4854, 4867), 'pymanopt.tools.multi.multihconj', 'multihconj', (['q'], {}), '(q)\n', (4864, 4867), False, 'from pymanopt.tools.multi import multihconj, multiherm, multilog\n'), ((4903, 4924), 'pymanopt.tools.multi.multiprod', 'multiprod', (['x_isqrt', 'y'], {}), '(x_isqrt, y)\n', (4912, 4924), False, 'from pymanopt.tools.multi import multiprod, multitransp\n'), ((5238, 5253), 'pymanopt.tools.multi.multiprod', 'multiprod', (['q', 'd'], {}), '(q, d)\n', (5247, 5253), False, 'from pymanopt.tools.multi import multiprod, multitransp\n'), ((5255, 5268), 'pymanopt.tools.multi.multihconj', 'multihconj', (['q'], {}), '(q)\n', (5265, 5268), False, 'from pymanopt.tools.multi import multihconj, multiherm, multilog\n'), ((5437, 5451), 'pymanopt.tools.multi.multihconj', 'multihconj', (['x1'], {}), '(x1)\n', (5447, 5451), False, 'from pymanopt.tools.multi import multihconj, multiherm, multilog\n'), ((5453, 5467), 'pymanopt.tools.multi.multihconj', 'multihconj', (['x2'], {}), '(x2)\n', (5463, 5467), False, 'from pymanopt.tools.multi import multihconj, multiherm, multilog\n'), ((5599, 5616), 'scipy.linalg.sqrtm', 'sqrtm', (['E[i, :, :]'], {}), '(E[i, :, :])\n', (5604, 5616), False, 'from scipy.linalg import sqrtm\n'), ((5816, 5835), 'pymanopt.tools.multi.multiprod', 'multiprod', (['c_inv', 'y'], {}), '(c_inv, y)\n', (5825, 5835), False, 'from pymanopt.tools.multi import multiprod, multitransp\n'), ((5837, 5854), 'pymanopt.tools.multi.multihconj', 'multihconj', (['c_inv'], {}), '(c_inv)\n', (5847, 5854), False, 'from pymanopt.tools.multi import multihconj, multiherm, multilog\n'), ((7184, 7199), 'numpy.random.randn', 'rnd.randn', (['n', 'n'], {}), '(n, n)\n', (7193, 7199), True, 'from numpy import linalg as la, random as rnd\n'), ((7249, 7267), 'numpy.random.randn', 'rnd.randn', (['k', 'n', 'n'], {}), '(k, n, n)\n', (7258, 7267), True, 'from numpy import linalg as la, random as rnd\n'), ((1370, 1397), 'numpy.random.randn', 'rnd.randn', (['self._n', 'self._n'], {}), '(self._n, self._n)\n', (1379, 1397), True, 'from numpy import linalg as la, random as rnd\n'), ((1669, 1684), 'numpy.random.randn', 'rnd.randn', (['n', 'n'], {}), '(n, n)\n', (1678, 1684), True, 'from numpy import linalg as la, random as rnd\n'), ((1745, 1763), 'numpy.random.randn', 'rnd.randn', (['k', 'n', 'n'], {}), '(k, n, n)\n', (1754, 1763), True, 'from numpy import linalg as la, random as rnd\n'), ((2123, 2137), 'numpy.linalg.solve', 'la.solve', (['x', 'v'], {}), '(x, v)\n', (2131, 2137), True, 'from numpy import linalg as la, random as rnd\n'), ((2491, 2510), 'pymanopt.tools.multi.multiprod', 'multiprod', (['c_inv', 'u'], {}), '(c_inv, u)\n', (2500, 2510), False, 'from pymanopt.tools.multi import multiprod, multitransp\n'), ((2512, 2529), 'pymanopt.tools.multi.multihconj', 'multihconj', (['c_inv'], {}), '(c_inv)\n', (2522, 2529), False, 'from pymanopt.tools.multi import multihconj, multiherm, multilog\n'), ((7203, 7218), 'numpy.random.randn', 'rnd.randn', (['n', 'n'], {}), '(n, n)\n', (7212, 7218), True, 'from numpy import linalg as la, random as rnd\n'), ((7271, 7289), 'numpy.random.randn', 'rnd.randn', (['k', 'n', 'n'], {}), '(k, n, n)\n', (7280, 7289), True, 'from numpy import linalg as la, random as rnd\n'), ((1401, 1428), 'numpy.random.randn', 'rnd.randn', (['self._n', 'self._n'], {}), '(self._n, self._n)\n', (1410, 1428), True, 'from numpy import linalg as la, random as rnd\n'), ((1492, 1505), 'pymanopt.tools.multi.multihconj', 'multihconj', (['u'], {}), '(u)\n', (1502, 1505), False, 'from pymanopt.tools.multi import multihconj, multiherm, multilog\n'), ((1688, 1703), 'numpy.random.randn', 'rnd.randn', (['n', 'n'], {}), '(n, n)\n', (1697, 1703), True, 'from numpy import linalg as la, random as rnd\n'), ((1767, 1785), 'numpy.random.randn', 'rnd.randn', (['k', 'n', 'n'], {}), '(k, n, n)\n', (1776, 1785), True, 'from numpy import linalg as la, random as rnd\n'), ((3023, 3033), 'numpy.sqrt', 'np.sqrt', (['d'], {}), '(d)\n', (3030, 3033), True, 'import numpy as np\n'), ((3252, 3268), 'numpy.sqrt', 'np.sqrt', (['d[i, :]'], {}), '(d[i, :])\n', (3259, 3268), True, 'import numpy as np\n'), ((3707, 3716), 'numpy.exp', 'np.exp', (['d'], {}), '(d)\n', (3713, 3716), True, 'import numpy as np\n'), ((3878, 3893), 'numpy.exp', 'np.exp', (['d[i, :]'], {}), '(d[i, :])\n', (3884, 3893), True, 'import numpy as np\n'), ((4297, 4307), 'numpy.sqrt', 'np.sqrt', (['d'], {}), '(d)\n', (4304, 4307), True, 'import numpy as np\n'), ((4526, 4542), 'numpy.sqrt', 'np.sqrt', (['d[i, :]'], {}), '(d[i, :])\n', (4533, 4542), True, 'import numpy as np\n'), ((4983, 4992), 'numpy.log', 'np.log', (['d'], {}), '(d)\n', (4989, 4992), True, 'import numpy as np\n'), ((5154, 5169), 'numpy.log', 'np.log', (['d[i, :]'], {}), '(d[i, :])\n', (5160, 5169), True, 'import numpy as np\n'), ((6922, 6931), 'numpy.linalg.det', 'la.det', (['x'], {}), '(x)\n', (6928, 6931), True, 'from numpy import linalg as la, random as rnd\n'), ((7923, 7933), 'numpy.real', 'np.real', (['t'], {}), '(t)\n', (7930, 7933), True, 'import numpy as np\n'), ((8305, 8314), 'numpy.linalg.det', 'la.det', (['e'], {}), '(e)\n', (8311, 8314), True, 'from numpy import linalg as la, random as rnd\n'), ((8571, 8580), 'numpy.linalg.det', 'la.det', (['r'], {}), '(r)\n', (8577, 8580), True, 'from numpy import linalg as la, random as rnd\n'), ((3080, 3090), 'numpy.sqrt', 'np.sqrt', (['d'], {}), '(d)\n', (3087, 3090), True, 'import numpy as np\n'), ((3492, 3508), 'numpy.sqrt', 'np.sqrt', (['d[i, :]'], {}), '(d[i, :])\n', (3499, 3508), True, 'import numpy as np\n'), ((4354, 4364), 'numpy.sqrt', 'np.sqrt', (['d'], {}), '(d)\n', (4361, 4364), True, 'import numpy as np\n'), ((4766, 4782), 'numpy.sqrt', 'np.sqrt', (['d[i, :]'], {}), '(d[i, :])\n', (4773, 4782), True, 'import numpy as np\n'), ((6990, 6999), 'numpy.linalg.det', 'la.det', (['x'], {}), '(x)\n', (6996, 6999), True, 'from numpy import linalg as la, random as rnd\n'), ((8372, 8381), 'numpy.linalg.det', 'la.det', (['e'], {}), '(e)\n', (8378, 8381), True, 'from numpy import linalg as la, random as rnd\n'), ((8638, 8647), 'numpy.linalg.det', 'la.det', (['r'], {}), '(r)\n', (8644, 8647), True, 'from numpy import linalg as la, random as rnd\n')] |
import math
import logging
from nltk.stem.porter import *
import numpy as np
import os
import copy
import keyphrase.dataset.keyphrase_test_dataset as test_dataset
from dataset import dataset_utils
logger = logging.getLogger(__name__)
def evaluate_multiple(config, test_set, inputs, outputs,
original_input, original_outputs,
samples, scores, idx2word, do_stem,
model_name, dataset_name):
'''
inputs_unk is same as inputs except for filtered out all the low-freq words to 1 (<unk>)
return the top few keywords, number is set in config
:param: original_input, same as inputs, the vector of one input sentence
:param: original_outputs, vectors of corresponding multiple outputs (e.g. keyphrases)
:return:
'''
# Generate keyphrases
# if inputs_unk is None:
# samples, scores = self.generate_multiple(inputs[None, :], return_all=True)
# else:
# samples, scores = self.generate_multiple(inputs_unk[None, :], return_all=True)
stemmer = PorterStemmer()
# Evaluation part
outs = []
micro_metrics = []
micro_matches = []
predict_scores = []
# load stopword
with open(config['path'] + '/dataset/stopword/stopword_en.txt') as stopword_file:
stopword_set = set([stemmer.stem(w.strip()) for w in stopword_file])
# postag_lists = [[s[1] for s in d] for d in test_set['tagged_source']]
# postag_lists = [[] for d in test_set['tagged_source']]
model_nickname = config['model_name'] # 'TfIdf', 'TextRank', 'SingleRank', 'ExpandRank', 'Maui', 'Kea', 'RNN', 'CopyRNN'
base_dir = config['path'] + '/dataset/keyphrase/prediction/' + model_nickname + '_' + config['timemark'] + '/'
# text_dir = config['baseline_data_path'] + dataset_name + '/text/'
# target_dir = config['baseline_data_path'] + dataset_name + '/keyphrase/'
prediction_dir = base_dir + dataset_name
# doc_names = [name[:name.index('.')] for name in os.listdir(text_dir)]
loader = test_dataset.testing_data_loader(dataset_name, kwargs=dict(basedir=config['path']))
docs = loader.get_docs(return_dict=False)
doc_names = [d.name for d in docs]
# reload the targets from corpus directly
# target_dir = config['baseline_data_path'] + dataset_name + '/keyphrase/'
# test_set['source_postag'] = test_set['target_str']
# for input_sentence, target_list, predict_list, score_list in zip(inputs, original_outputs, samples, scores):
for doc_name, source_str, input_sentence, target_list, predict_list, score_list, postag_list in zip(doc_names, test_set['source_str'], inputs, test_set['target_str'], samples, scores, test_set['source_postag']):
'''
enumerate each document, process target/predict/score and measure via p/r/f1
'''
target_outputs = []
original_target_list = copy.copy(target_list) # no stemming
predict_indexes = []
original_predict_outputs = [] # no stemming
predict_outputs = []
predict_score = []
predict_set = set()
correctly_matched = np.asarray([0] * max(len(target_list), len(predict_list)), dtype='int32')
is_copied = []
# stem the original input, do on source_str not the index list input_sentence
# stemmed_input = [stemmer.stem(w) for w in cut_zero(input_sentence, idx2word)]
stemmed_input = [stemmer.stem(w) for w in source_str]
# convert target index into string
for target in target_list:
# target = cut_zero(target, idx2word)
if do_stem:
target = [stemmer.stem(w) for w in target]
# print(target)
keep = True
# whether do filtering on groundtruth phrases. if config['target_filter']==None, do nothing
if config['target_filter']:
match = None
for i in range(len(stemmed_input) - len(target) + 1):
match = None
for j in range(len(target)):
if target[j] != stemmed_input[i + j]:
match = False
break
if j == len(target) - 1 and match == None:
match = True
break
if match == True:
# if match and 'appear-only', keep this phrase
if config['target_filter'] == 'appear-only':
keep = keep and True
elif config['target_filter'] == 'non-appear-only':
keep = keep and False
elif match == False:
# if not match and 'appear-only', discard this phrase
if config['target_filter'] == 'appear-only':
keep = keep and False
# if not match and 'non-appear-only', keep this phrase
elif config['target_filter'] == 'non-appear-only':
keep = keep and True
if not keep:
continue
target_outputs.append(target)
# check if prediction is noun-phrase, initialize a filter. Be sure this should be after stemming
if config['noun_phrase_only']:
stemmed_source = [stemmer.stem(w) for w in source_str]
noun_phrases = dataset_utils.get_none_phrases(stemmed_source, postag_list, config['max_len'])
noun_phrase_set = set([' '.join(p[0]) for p in noun_phrases])
def cut_zero(sample_index, idx2word, source_str):
sample_index = list(sample_index)
# if 0 not in sample:
# return ['{}'.format(idx2word[w].encode('utf-8')) for w in sample]
# # return the string before 0 (<eol>)
# return ['{}'.format(idx2word[w].encode('utf-8')) for w in sample[:sample.index(0)]]
if 0 in sample_index:
sample_index = sample_index[:sample_index.index(0)]
wordlist = []
find_copy = False
for w_index in sample_index:
if w_index >= config['voc_size']:
wordlist.append(source_str[w_index-config['voc_size']].encode('utf-8'))
find_copy = True
else:
wordlist.append(idx2word[w_index].encode('utf-8'))
if find_copy:
logger.info('Find copy! - %s - %s' % (' '.join(wordlist), str(sample_index)))
return sample_index, wordlist
single_word_maximum = 1
# convert predict index into string
for id, (predict, score) in enumerate(zip(predict_list, score_list)):
predict_index, original_predict = cut_zero(predict, idx2word, source_str)
predict = [stemmer.stem(w) for w in original_predict]
# filter some not good ones
keep = True
if len(predict) == 0:
keep = False
number_digit = 0
for w in predict:
w = w.strip()
if w == '<unk>' or w == '<eos>':
keep = False
if re.match(r'[_,\(\)\.\'%]', w):
keep = False
# print('\t\tPunctuations! - %s' % str(predict))
if w == '<digit>':
number_digit += 1
if len(predict) >= 1 and (predict[0] in stopword_set or predict[-1] in stopword_set):
keep = False
# filter out single-word predictions
if len(predict) <= 1:
if single_word_maximum > 0:
single_word_maximum -= 1
else:
keep = False
# whether do filtering on predicted phrases. if config['predict_filter']==None, do nothing
if config['predict_filter']:
match = None
for i in range(len(stemmed_input) - len(predict) + 1):
match = None
for j in range(len(predict)):
if predict[j] != stemmed_input[i + j]:
match = False
break
if j == len(predict) - 1 and match == None:
match = True
break
if match == True:
# if match and 'appear-only', keep this phrase
if config['predict_filter'] == 'appear-only':
keep = keep and True
elif config['predict_filter'] == 'non-appear-only':
keep = keep and False
elif match == False:
# if not match and 'appear-only', discard this phrase
if config['predict_filter'] == 'appear-only':
keep = keep and False
# if not match and 'non-appear-only', keep this phrase
elif config['predict_filter'] == 'non-appear-only':
keep = keep and True
# if all are <digit>, discard
if number_digit == len(predict):
keep = False
# remove duplicates
key = '-'.join(predict)
if key in predict_set:
keep = False
# if #(word) == #(letter), it predicts like this: h a s k e l
if sum([len(w) for w in predict])==len(predict) and len(predict) > 2:
keep = False
# print('\t\tall letters! - %s' % str(predict))
# check if prediction is noun-phrase
if config['noun_phrase_only']:
if ' '.join(predict) not in noun_phrase_set:
print('Not a NP: %s' % (' '.join(predict)))
keep = False
# discard invalid ones
if not keep:
continue
if any(i_>config['voc_size'] for i_ in predict_index):
is_copied.append(1)
else:
is_copied.append(0)
original_predict_outputs.append(original_predict)
predict_indexes.append(predict_index)
predict_outputs.append(predict)
predict_score.append(score)
predict_set.add(key)
# whether keep the longest phrases only, as there're too many phrases are part of other longer phrases
if config['keep_longest']:
match_phrase_index = []
for ii, p_ii in enumerate(predict_outputs): # shorter one
match_times = 0
for jj, p_jj in enumerate(predict_outputs): # longer one
if ii==jj or len(p_ii)>=len(p_jj): # p_jj must be longer than p_ii
continue
match = None
for start in range(len(p_jj) - len(p_ii) + 1): # iterate the start of long phrase
match = None
for w_index in range(len(p_ii)): # iterate the short phrase
if (p_ii[w_index]!=p_jj[start+w_index]):
match = False
break
if w_index == len(p_ii) - 1 and match == None:
match = True
match_times += 1
if match_times == 1: # p_ii is part of p_jj, discard
match_phrase_index.append(ii)
# print("Matched pair: %s \t - \t %s" % (str(p_ii), str(p_jj)))
# pass
original_predict_outputs = np.delete(original_predict_outputs, match_phrase_index)
predict_indexes = np.delete(predict_indexes, match_phrase_index)
predict_outputs = np.delete(predict_outputs, match_phrase_index)
predict_score = np.delete(predict_score, match_phrase_index)
is_copied = np.delete(is_copied, match_phrase_index)
# check whether the predicted phrase is correct (match any groundtruth)
for p_id, predict in enumerate(predict_outputs):
for target in target_outputs:
if len(target) == len(predict):
flag = True
for i, w in enumerate(predict):
if predict[i] != target[i]:
flag = False
if flag:
correctly_matched[p_id] = 1
# print('%s correct!!!' % predict)
original_predict_outputs = np.asarray(original_predict_outputs)
predict_indexes = np.asarray(predict_indexes)
predict_outputs = np.asarray(predict_outputs)
predict_score = np.asarray(predict_score)
is_copied = np.asarray(is_copied)
# normalize the score?
if config['normalize_score']:
predict_score = np.asarray(
[math.log(math.exp(score) / len(predict)) for predict, score in zip(predict_outputs, predict_score)])
score_list_index = np.argsort(predict_score)
original_predict_outputs = original_predict_outputs[score_list_index]
predict_indexes = predict_indexes[score_list_index]
predict_outputs = predict_outputs[score_list_index]
predict_score = predict_score[score_list_index]
correctly_matched = correctly_matched[score_list_index]
is_copied = is_copied[score_list_index]
metric_dict = {}
'''
Compute micro metrics
'''
for number_to_predict in [5, 10, 15, 20, 30, 40, 50]: #5, 10, 15, 20, 30, 40, 50
metric_dict['appear_target_number'] = len(target_outputs)
metric_dict['target_number'] = len(target_list)
metric_dict['correct_number@%d' % number_to_predict] = sum(correctly_matched[:number_to_predict])
metric_dict['p@%d' % number_to_predict] = float(sum(correctly_matched[:number_to_predict])) / float(
number_to_predict)
if len(target_outputs) != 0:
metric_dict['r@%d' % number_to_predict] = float(sum(correctly_matched[:number_to_predict])) / float(
len(target_outputs))
else:
metric_dict['r@%d' % number_to_predict] = 0
if metric_dict['p@%d' % number_to_predict] + metric_dict['r@%d' % number_to_predict] != 0:
metric_dict['f1@%d' % number_to_predict] = 2 * metric_dict['p@%d' % number_to_predict] * metric_dict[
'r@%d' % number_to_predict] / float(
metric_dict['p@%d' % number_to_predict] + metric_dict['r@%d' % number_to_predict])
else:
metric_dict['f1@%d' % number_to_predict] = 0
# Compute the binary preference measure (Bpref)
bpref = 0.
trunked_match = correctly_matched[:number_to_predict].tolist() # get the first K prediction to evaluate
match_indexes = np.nonzero(trunked_match)[0]
if len(match_indexes) > 0:
for mid, mindex in enumerate(match_indexes):
bpref += 1. - float(mindex - mid) / float(number_to_predict) # there're mindex elements, and mid elements are correct, before the (mindex+1)-th element
metric_dict['bpref@%d' % number_to_predict] = float(bpref)/float(len(match_indexes))
else:
metric_dict['bpref@%d' % number_to_predict] = 0
# Compute the mean reciprocal rank (MRR)
rank_first = 0
try:
rank_first = trunked_match.index(1) + 1
except ValueError:
pass
if rank_first > 0:
metric_dict['mrr@%d' % number_to_predict] = float(1)/float(rank_first)
else:
metric_dict['mrr@%d' % number_to_predict] = 0
micro_metrics.append(metric_dict)
micro_matches.append(correctly_matched)
predict_scores.append(predict_score)
'''
Output keyphrases to prediction folder
'''
if not os.path.exists(prediction_dir):
os.makedirs(prediction_dir)
with open(prediction_dir + '/' + doc_name + '.txt.phrases', 'w') as output_file:
output_file.write('\n'.join([' '.join(o_) for o_ in original_predict_outputs]))
'''
Print information on each prediction
'''
# print stuff
a = '[SOURCE][{0}]: {1}'.format(len(input_sentence) ,' '.join(source_str))
logger.info(a)
a += '\n'
b = '[GROUND-TRUTH]: %d/%d ground-truth phrases\n\t\t' % (len(target_outputs), len(target_list))
target_output_set = set(['_'.join(t) for t in target_outputs])
for id, target in enumerate(original_target_list):
if '_'.join([stemmer.stem(w) for w in target]) in target_output_set:
b += '['+' '.join(target) + ']; '
else:
b += ' '.join(target) + '; '
logger.info(b)
b += '\n'
c = '[PREDICTION]: %d/%d predictions\n' % (len(predict_outputs), len(predict_list))
c += '[Correct@10] = %d\n' % metric_dict['correct_number@10']
c += '[Correct@50] = %d\n' % metric_dict['correct_number@50']
for id, (predict, score, predict_index) in enumerate(zip(original_predict_outputs, predict_score, predict_indexes)):
c += ('\n\t\t[%.3f][%d][%d]' % (score, len(predict), sum([len(w) for w in predict]))) + ' '.join(predict)
if correctly_matched[id] == 1:
c += ' [correct!]'
if is_copied[id] == 1:
c += '[copied!] %s'%str(predict_index)
# print(('\n\t\t[%.3f]'% score) + ' '.join(predict) + ' [correct!]')
# print(('\n\t\t[%.3f]'% score) + ' '.join(predict))
c += '\n'
# c = '[DECODE]: {}'.format(' '.join(cut_zero(phrase, idx2word)))
# if inputs_unk is not None:
# k = '[_INPUT]: {}\n'.format(' '.join(cut_zero(inputs_unk.tolist(), idx2word, Lmax=len(idx2word))))
# logger.info(k)
# a += k
logger.info(c)
a += b + c
for number_to_predict in [5, 10, 15, 20, 30, 40, 50]:
d = '@%d - Precision=%.4f, Recall=%.4f, F1=%.4f, Bpref=%.4f, MRR=%.4f' % (
number_to_predict, metric_dict['p@%d' % number_to_predict], metric_dict['r@%d' % number_to_predict],
metric_dict['f1@%d' % number_to_predict], metric_dict['bpref@%d' % number_to_predict], metric_dict['mrr@%d' % number_to_predict])
logger.info(d)
a += d + '\n'
logger.info('*' * 100)
outs.append(a)
outs.append('*' * 100 + '\n')
# omit the bad data which contains 0 predictions
# real_test_size = sum([1 if m['target_number'] > 0 else 0 for m in micro_metrics])
real_test_size = len(inputs)
'''
Compute the corpus evaluation
'''
logger.info('Experiment result: %s' % (config['predict_path'] + '/' + model_name+'-'+dataset_name+'.txt'))
csv_writer = open(config['predict_path'] + '/' + model_name+'-'+dataset_name+'.txt', 'w')
overall_score = {}
for k in [5, 10, 15, 20, 30, 40, 50]:
correct_number = sum([m['correct_number@%d' % k] for m in micro_metrics])
appear_target_number = sum([m['appear_target_number'] for m in micro_metrics])
target_number = sum([m['target_number'] for m in micro_metrics])
# Compute the Micro Measures, by averaging the micro-score of each prediction
overall_score['p@%d' % k] = float(sum([m['p@%d' % k] for m in micro_metrics])) / float(real_test_size)
overall_score['r@%d' % k] = float(sum([m['r@%d' % k] for m in micro_metrics])) / float(real_test_size)
overall_score['f1@%d' % k] = float(sum([m['f1@%d' % k] for m in micro_metrics])) / float(real_test_size)
output_str = 'Overall - %s valid testing data=%d, Number of Target=%d/%d, Number of Prediction=%d, Number of Correct=%d' % (
config['predict_type'], real_test_size,
appear_target_number, target_number,
real_test_size * k, correct_number
)
outs.append(output_str+'\n')
logger.info(output_str)
output_str = 'Micro:\t\tP@%d=%f, R@%d=%f, F1@%d=%f' % (
k, overall_score['p@%d' % k],
k, overall_score['r@%d' % k],
k, overall_score['f1@%d' % k]
)
outs.append(output_str+'\n')
logger.info(output_str)
csv_writer.write('Micro@%d, %f, %f, %f\n' % (
k,
overall_score['p@%d' % k],
overall_score['r@%d' % k],
overall_score['f1@%d' % k]
))
# Compute the Macro Measures
overall_score['macro_p@%d' % k] = correct_number / float(real_test_size * k)
overall_score['macro_r@%d' % k] = correct_number / float(appear_target_number)
if overall_score['macro_p@%d' % k] + overall_score['macro_r@%d' % k] > 0:
overall_score['macro_f1@%d' % k] = 2 * overall_score['macro_p@%d' % k] * overall_score[
'macro_r@%d' % k] / float(overall_score['macro_p@%d' % k] + overall_score['macro_r@%d' % k])
else:
overall_score['macro_f1@%d' % k] = 0
output_str = 'Macro:\t\tP@%d=%f, R@%d=%f, F1@%d=%f' % (
k, overall_score['macro_p@%d' % k],
k, overall_score['macro_r@%d' % k],
k, overall_score['macro_f1@%d' % k]
)
outs.append(output_str+'\n')
logger.info(output_str)
csv_writer.write('Macro@%d, %f, %f, %f\n' % (
k,
overall_score['macro_p@%d' % k],
overall_score['macro_r@%d' % k],
overall_score['macro_f1@%d' % k]
))
# Compute the binary preference measure (Bpref)
overall_score['bpref@%d' % k] = float(sum([m['bpref@%d' % k] for m in micro_metrics])) / float(real_test_size)
# Compute the mean reciprocal rank (MRR)
overall_score['mrr@%d' % k] = float(sum([m['mrr@%d' % k] for m in micro_metrics])) / float(real_test_size)
output_str = '\t\t\tBpref@%d=%f, MRR@%d=%f' % (
k, overall_score['bpref@%d' % k],
k, overall_score['mrr@%d' % k]
)
outs.append(output_str+'\n')
logger.info(output_str)
# evaluate the score cutoff
for cutoff in range(15):
overall_predicted_number = 0
overall_correct_number = 0
overall_target_number = sum([m['target_number'] for m in micro_metrics])
for score_list, metric_dict, correctly_matched in zip(predict_scores, micro_metrics, micro_matches):
predicted_number = len(filter(lambda s:s < cutoff, score_list))
overall_predicted_number += predicted_number
overall_correct_number += sum(correctly_matched[:predicted_number])
if overall_predicted_number > 0:
macro_p = float(overall_correct_number) / float(overall_predicted_number)
else:
macro_p = 0
macro_r = float(overall_correct_number) / float(overall_target_number)
if macro_p + macro_r > 0:
macro_f1 = 2. * macro_p * macro_r / (macro_p + macro_r)
else:
macro_f1 = 0
logger.info('Macro,cutoff@%d, correct_number=%d, predicted_number=%d, target_number=%d, p=%f, r=%f, f1=%f' % (
cutoff,
overall_correct_number, overall_predicted_number, overall_target_number,
macro_p, macro_r, macro_f1
))
csv_writer.write('Macro,cutoff@%d, %f, %f, %f\n' % (
cutoff, macro_p, macro_r, macro_f1
))
csv_writer.close()
return outs, overall_score
def export_keyphrase(predictions, text_dir, prediction_dir):
doc_names = [name[:name.index('.')] for name in os.listdir(text_dir)]
for name_, prediction_ in zip(doc_names, predictions):
with open(prediction_dir+name_+'.phrases') as output_file:
output_file.write('\n'.join(prediction_))
| [
"logging.getLogger",
"os.path.exists",
"os.listdir",
"os.makedirs",
"numpy.delete",
"numpy.asarray",
"numpy.argsort",
"numpy.nonzero",
"copy.copy",
"dataset.dataset_utils.get_none_phrases",
"math.exp"
] | [((209, 236), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (226, 236), False, 'import logging\n'), ((2902, 2924), 'copy.copy', 'copy.copy', (['target_list'], {}), '(target_list)\n', (2911, 2924), False, 'import copy\n'), ((12562, 12598), 'numpy.asarray', 'np.asarray', (['original_predict_outputs'], {}), '(original_predict_outputs)\n', (12572, 12598), True, 'import numpy as np\n'), ((12625, 12652), 'numpy.asarray', 'np.asarray', (['predict_indexes'], {}), '(predict_indexes)\n', (12635, 12652), True, 'import numpy as np\n'), ((12679, 12706), 'numpy.asarray', 'np.asarray', (['predict_outputs'], {}), '(predict_outputs)\n', (12689, 12706), True, 'import numpy as np\n'), ((12731, 12756), 'numpy.asarray', 'np.asarray', (['predict_score'], {}), '(predict_score)\n', (12741, 12756), True, 'import numpy as np\n'), ((12777, 12798), 'numpy.asarray', 'np.asarray', (['is_copied'], {}), '(is_copied)\n', (12787, 12798), True, 'import numpy as np\n'), ((5399, 5477), 'dataset.dataset_utils.get_none_phrases', 'dataset_utils.get_none_phrases', (['stemmed_source', 'postag_list', "config['max_len']"], {}), "(stemmed_source, postag_list, config['max_len'])\n", (5429, 5477), False, 'from dataset import dataset_utils\n'), ((11631, 11686), 'numpy.delete', 'np.delete', (['original_predict_outputs', 'match_phrase_index'], {}), '(original_predict_outputs, match_phrase_index)\n', (11640, 11686), True, 'import numpy as np\n'), ((11717, 11763), 'numpy.delete', 'np.delete', (['predict_indexes', 'match_phrase_index'], {}), '(predict_indexes, match_phrase_index)\n', (11726, 11763), True, 'import numpy as np\n'), ((11794, 11840), 'numpy.delete', 'np.delete', (['predict_outputs', 'match_phrase_index'], {}), '(predict_outputs, match_phrase_index)\n', (11803, 11840), True, 'import numpy as np\n'), ((11870, 11914), 'numpy.delete', 'np.delete', (['predict_score', 'match_phrase_index'], {}), '(predict_score, match_phrase_index)\n', (11879, 11914), True, 'import numpy as np\n'), ((11940, 11980), 'numpy.delete', 'np.delete', (['is_copied', 'match_phrase_index'], {}), '(is_copied, match_phrase_index)\n', (11949, 11980), True, 'import numpy as np\n'), ((13057, 13082), 'numpy.argsort', 'np.argsort', (['predict_score'], {}), '(predict_score)\n', (13067, 13082), True, 'import numpy as np\n'), ((16115, 16145), 'os.path.exists', 'os.path.exists', (['prediction_dir'], {}), '(prediction_dir)\n', (16129, 16145), False, 'import os\n'), ((16159, 16186), 'os.makedirs', 'os.makedirs', (['prediction_dir'], {}), '(prediction_dir)\n', (16170, 16186), False, 'import os\n'), ((24065, 24085), 'os.listdir', 'os.listdir', (['text_dir'], {}), '(text_dir)\n', (24075, 24085), False, 'import os\n'), ((15001, 15026), 'numpy.nonzero', 'np.nonzero', (['trunked_match'], {}), '(trunked_match)\n', (15011, 15026), True, 'import numpy as np\n'), ((12934, 12949), 'math.exp', 'math.exp', (['score'], {}), '(score)\n', (12942, 12949), False, 'import math\n')] |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import LeaveOneOut
from sklearn import linear_model, datasets, metrics
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
import pandas as pd
import seaborn as sn
from sklearn.linear_model import LogisticRegressionCV
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import RandomizedSearchCV
from sklearn.metrics import make_scorer
class TrainValTestModel:
# create object storing path of data
def __init__(self, X_train, X_val, X_test, y_train, y_val, y_test, model_name, cross_val=False):
# X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=.2, random_state=0)
self.scaler = StandardScaler()
self.scaler.fit(X_train)
self.X_train = self.scaler.transform(X_train)
self.X_val = self.scaler.transform(X_val)
self.X_test = self.scaler.transform(X_test)
self.y_train = y_train
self.y_val = y_val
self.y_test = y_test
self.model_name = model_name
if cross_val == True:
best_params = self.tuning_hyperparameter()
self.best_params = best_params
self.clf = RandomForestClassifier(bootstrap=best_params['bootstrap'],
max_depth=best_params['max_depth'],
max_features=best_params['max_features'],
min_samples_leaf=best_params['min_samples_leaf'],
min_samples_split=best_params['min_samples_split'],
n_estimators=best_params['n_estimators'])
self.clf.fit(self.X_train, self.y_train)
else:
self.clf = self.get_model(model_name, self.X_train, self.y_train)
self.fpr, self.tpr, self.thrshd_roc = self.get_fpr_tpr()
# bulid model
def get_model(self, model_name, X_train, y_train):
# logistic regression
if model_name == 'LR':
clf = LogisticRegression(solver='lbfgs')
# random forest
elif model_name == 'RF':
clf = RandomForestClassifier(max_depth=2, random_state=0)
# C-Support Vector Classification
elif model_name == 'GB':
clf = clf = GradientBoostingClassifier(random_state=0)
clf.fit(X_train, y_train)
return clf
def tuning_hyperparameter(self):
n_estimators = [int(x) for x in np.linspace(start = 200, stop = 2000, num = 10)]
max_features = ['auto', 'sqrt']
max_depth = [int(x) for x in np.linspace(10, 110, num = 11)]
max_depth.append(None)
min_samples_split = [2, 5, 10]
min_samples_leaf = [1, 2, 4]
bootstrap = [True, False]
# Create the random grid
random_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
'bootstrap': bootstrap}
def my_scorer(clf, X, y_true):
y_pred_proba = clf.predict_proba(X)
y_pred = np.where(y_pred_proba > 0.5, 1, 0)
error = np.sum(np.logical_and(y_pred != y_true, y_pred == 1)) / np.count_nonzero(y_true == 0)
return error
def fp(y_true, y_pred): return confusion_matrix(y_true, y_pred)[0, 1]
score = make_scorer(fp)
rf = RandomForestClassifier()
rf_random = RandomizedSearchCV(estimator=rf, param_distributions=random_grid, scoring=score, n_iter=100, cv=4, verbose=2, random_state=42, n_jobs=-1)
# Fit the random search model
rf_random.fit(self.X_train, self.y_train)
return rf_random.best_params_
def get_fpr_tpr(self):
prob_on_val = self.clf.predict_proba(self.X_val)[:,1]
fpr, tpr, thrshd_roc = metrics.roc_curve(self.y_val, prob_on_val, pos_label=1)
return fpr, tpr, thrshd_roc
# see the metrics of model
def get_metrics(self, thresh=None):
if thresh == None:
p = 0.5
else:
p = thresh
pred_proba_df = pd.DataFrame(self.clf.predict_proba(self.X_test)[:,1])
y_pred = pred_proba_df.applymap(lambda x: 1 if x>p else 0).to_numpy().reshape((pred_proba_df.shape[0]))
print("%s:\n%s\n" % (self.model_name,
metrics.classification_report(self.y_test, y_pred)))
# get the indices of important features
def get_important_feature(self):
# logistic regression
if self.model_name == 'LR':
importance = self.clf.coef_[0]
# random forest
elif self.model_name == 'RF':
importance = self.clf.feature_importances_
# gradient boosting
elif self.model_name == 'GB':
importance = self.clf.feature_importances_
return importance
# false-positive rate
def test_false_positive(self):
# choose threshold
pred_proba_df = pd.DataFrame(self.clf.predict_proba(self.X_test)[:,1])
threshold_list = [0.05,0.1,0.15,0.2,0.25,0.3,0.35,0.4,0.45,0.5,0.55,0.6,0.65,.7,.75,.8,.85,.9, .95,.99]
for i in threshold_list:
print ('\n******** For i = {} ******'.format(i))
y_test_pred = pred_proba_df.applymap(lambda x: 1 if x>i else 0).to_numpy().reshape( (pred_proba_df.shape[0]))
dataset = {'y_Actual': self.y_test,
'y_Predicted': y_test_pred
}
df = pd.DataFrame(dataset, columns=['y_Actual','y_Predicted'])
confusion_matrix = pd.crosstab(df['y_Actual'], df['y_Predicted'], rownames= ['Actual'], colnames=['Predicted'])
plt.show()
sn.heatmap(confusion_matrix, annot=True)
# get the index of false-positive image
def false_positive_index(self, clf, X_test, y_test, threshold):
pred_proba_df = pd.DataFrame(clf.predict_proba(X_test)[:,1])
y_test_pred = pred_proba_df.applymap(lambda x: 1 if x>threshold else 0).to_numpy().reshape( (pred_proba_df.shape[0]))
false_positives = np.logical_and(y_test != y_test_pred, y_test_pred == 1)
return np.arange(len(y_test))[false_positives]
# get the index of false-negtive image
def false_negtive_index(self, clf, X_test, y_test, threshold):
pred_proba_df = pd.DataFrame(clf.predict_proba(X_test)[:,1])
y_test_pred = pred_proba_df.applymap(lambda x: 1 if x>threshold else 0).to_numpy().reshape( (pred_proba_df.shape[0]))
false_negtives = np.logical_and(y_test != y_test_pred, y_test_pred == 0)
return np.arange(len(y_test))[false_negtives]
class LeaveOneOutModel:
# create object storing path of data
def __init__(self, X_train, X_test, y_train, y_test, model_name):
self.scaler = StandardScaler()
self.scaler.fit(X_train)
self.X_train = self.scaler.transform(X_train)
self.X_test = self.scaler.transform(X_test)
self.y_train = y_train
self.y_test = y_test
self.model_name = model_name
self.bst_thresh, self._y_prob, self.fpr, self.tpr, self.thrshd_roc = self.leave_one_out_cv_v1(self.X_train, self.y_train, self.model_name)
self.clf = self.get_model(model_name, self.X_train, self.y_train)
def leave_one_out_cv_v0(self, X, y, model_name):
# choose threshold
threshold_list = np.arange(0.01, 1, 0.01)
score = np.zeros(threshold_list.shape)
test_num = len(X)
TP = np.zeros(threshold_list.shape)
FN = np.zeros(threshold_list.shape)
FP = np.zeros(threshold_list.shape)
TN = np.zeros(threshold_list.shape)
for i in range(len(threshold_list)):
loo = LeaveOneOut()
# leave one out loop
for _train_index, _test_index in loo.split(X):
_X_train, _X_test = X[_train_index], X[_test_index]
_y_train, _y_test = y[_train_index], y[_test_index]
clf = self.get_model(model_name, _X_train, _y_train)
pred_proba_df = clf.predict_proba(_X_test)[:,1]
if _y_test == 0:
if pred_proba_df <= threshold_list[i]:
score[i] += 1 / test_num
TN[i] += 1
else:
FN[i] += 1
elif _y_test == 1:
if pred_proba_df > threshold_list[i]:
score[i] += 1 / test_num
TP[i] += 1
else:
FP[i] += 1
# compute ROC
# ######################
# have error when denominator == 0
TPR = TP / (TP + FN)
FPR = TN / (TN + FP)
# get the threshold of best score
threshold = threshold_list[np.argmax(score)]
return threshold, TPR, FPR
def leave_one_out_cv_v1(self, X, y, model_name):
# choose threshold
threshold_list = np.arange(0.01, 1, 0.01)
score = np.zeros(threshold_list.shape)
test_num = len(X)
_y_prob = np.zeros(len(X))
loo = LeaveOneOut()
# leave one out loop
for _train_index, _test_index in loo.split(X):
_X_train, _X_test = X[_train_index], X[_test_index]
_y_train, _y_test = y[_train_index], y[_test_index]
clf = self.get_model(model_name, _X_train, _y_train)
pred_proba_df = clf.predict_proba(_X_test)[:,1]
_y_prob[_test_index] = pred_proba_df
for i in range(len(threshold_list)):
if _y_test == 0 and pred_proba_df <= threshold_list[i]:
score[i] += 1 / test_num
elif _y_test == 1 and pred_proba_df > threshold_list[i]:
score[i] += 1 / test_num
# get the threshold of best score
threshold = threshold_list[np.argmax(score)]
fpr, tpr, thrshd_roc = metrics.roc_curve(y, _y_prob, pos_label=1)
# fpr, tpr, thrshd_roc = None, None, None
return threshold, _y_prob, fpr, tpr, thrshd_roc
# bulid model
def get_model(self, model_name, X_train, y_train):
# logistic regression
if model_name == 'LR':
clf = LogisticRegression(solver='lbfgs')
# random forest
elif model_name == 'RF':
clf = RandomForestClassifier(max_depth=2, random_state=0)
# C-Support Vector Classification
elif model_name == 'GB':
clf = clf = GradientBoostingClassifier(random_state=0)
clf.fit(X_train, y_train)
return clf
# see the metrics of model
def get_metrics(self, thresh=None):
if thresh == None:
p = self.bst_thresh
else:
p = thresh
pred_proba_df = pd.DataFrame(self.clf.predict_proba(self.X_test)[:,1])
Y_pred = pred_proba_df.applymap(lambda x: 1 if x>p else 0).to_numpy().reshape((pred_proba_df.shape[0]))
print("%s:\n%s\n" % (self.model_name,
metrics.classification_report(self.y_test, Y_pred)))
return 0
# get the indices of important features
def get_important_feature(self):
# logistic regression
if self.model_name == 'LR':
importance = self.clf.coef_[0]
# random forest
elif self.model_name == 'RF':
importance = self.clf.feature_importances_
return importance
# false-positive rate
def test_false_positive(self):
# choose threshold
pred_proba_df = pd.DataFrame(self.clf.predict_proba(self.X_test)[:,1])
threshold_list = [0.05,0.1,0.15,0.2,0.25,0.3,0.35,0.4,0.45,0.5,0.55,0.6,0.65,.7,.75,.8,.85,.9, .95,.99]
for i in threshold_list:
print ('\n******** For i = {} ******'.format(i))
y_test_pred = pred_proba_df.applymap(lambda x: 1 if x>i else 0).to_numpy().reshape( (pred_proba_df.shape[0]))
dataset = {'y_Actual': self.y_test,
'y_Predicted': y_test_pred
}
df = pd.DataFrame(dataset, columns=['y_Actual','y_Predicted'])
confusion_matrix = pd.crosstab(df['y_Actual'], df['y_Predicted'], rownames= ['Actual'], colnames=['Predicted'])
plt.show()
sn.heatmap(confusion_matrix, annot=True)
# get the index of false-positive image
def false_positive_index(self, clf, X_test, y_test, threshold):
pred_proba_df = pd.DataFrame(clf.predict_proba(X_test)[:,1])
y_test_pred = pred_proba_df.applymap(lambda x: 1 if x>threshold else 0).to_numpy().reshape( (pred_proba_df.shape[0]))
false_positives = np.logical_and(y_test != y_test_pred, y_test_pred == 1)
return np.arange(len(y_test))[false_positives]
# get the index of false-negtive image
def false_negtive_index(self, clf, X_test, y_test, threshold):
pred_proba_df = pd.DataFrame(clf.predict_proba(X_test)[:,1])
y_test_pred = pred_proba_df.applymap(lambda x: 1 if x>threshold else 0).to_numpy().reshape( (pred_proba_df.shape[0]))
false_negtives = np.logical_and(y_test != y_test_pred, y_test_pred == 0)
return np.arange(len(y_test))[false_negtives] | [
"sklearn.metrics.classification_report",
"numpy.count_nonzero",
"sklearn.metrics.roc_curve",
"numpy.arange",
"numpy.where",
"numpy.linspace",
"pandas.DataFrame",
"sklearn.metrics.confusion_matrix",
"sklearn.model_selection.LeaveOneOut",
"pandas.crosstab",
"sklearn.ensemble.RandomForestClassifier... | [((1143, 1159), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1157, 1159), False, 'from sklearn.preprocessing import StandardScaler\n'), ((3990, 4005), 'sklearn.metrics.make_scorer', 'make_scorer', (['fp'], {}), '(fp)\n', (4001, 4005), False, 'from sklearn.metrics import make_scorer\n'), ((4019, 4043), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {}), '()\n', (4041, 4043), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((4064, 4206), 'sklearn.model_selection.RandomizedSearchCV', 'RandomizedSearchCV', ([], {'estimator': 'rf', 'param_distributions': 'random_grid', 'scoring': 'score', 'n_iter': '(100)', 'cv': '(4)', 'verbose': '(2)', 'random_state': '(42)', 'n_jobs': '(-1)'}), '(estimator=rf, param_distributions=random_grid, scoring=\n score, n_iter=100, cv=4, verbose=2, random_state=42, n_jobs=-1)\n', (4082, 4206), False, 'from sklearn.model_selection import RandomizedSearchCV\n'), ((4450, 4505), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (['self.y_val', 'prob_on_val'], {'pos_label': '(1)'}), '(self.y_val, prob_on_val, pos_label=1)\n', (4467, 4505), False, 'from sklearn import linear_model, datasets, metrics\n'), ((6682, 6737), 'numpy.logical_and', 'np.logical_and', (['(y_test != y_test_pred)', '(y_test_pred == 1)'], {}), '(y_test != y_test_pred, y_test_pred == 1)\n', (6696, 6737), True, 'import numpy as np\n'), ((7124, 7179), 'numpy.logical_and', 'np.logical_and', (['(y_test != y_test_pred)', '(y_test_pred == 0)'], {}), '(y_test != y_test_pred, y_test_pred == 0)\n', (7138, 7179), True, 'import numpy as np\n'), ((7393, 7409), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (7407, 7409), False, 'from sklearn.preprocessing import StandardScaler\n'), ((7997, 8021), 'numpy.arange', 'np.arange', (['(0.01)', '(1)', '(0.01)'], {}), '(0.01, 1, 0.01)\n', (8006, 8021), True, 'import numpy as np\n'), ((8038, 8068), 'numpy.zeros', 'np.zeros', (['threshold_list.shape'], {}), '(threshold_list.shape)\n', (8046, 8068), True, 'import numpy as np\n'), ((8108, 8138), 'numpy.zeros', 'np.zeros', (['threshold_list.shape'], {}), '(threshold_list.shape)\n', (8116, 8138), True, 'import numpy as np\n'), ((8152, 8182), 'numpy.zeros', 'np.zeros', (['threshold_list.shape'], {}), '(threshold_list.shape)\n', (8160, 8182), True, 'import numpy as np\n'), ((8196, 8226), 'numpy.zeros', 'np.zeros', (['threshold_list.shape'], {}), '(threshold_list.shape)\n', (8204, 8226), True, 'import numpy as np\n'), ((8240, 8270), 'numpy.zeros', 'np.zeros', (['threshold_list.shape'], {}), '(threshold_list.shape)\n', (8248, 8270), True, 'import numpy as np\n'), ((9579, 9603), 'numpy.arange', 'np.arange', (['(0.01)', '(1)', '(0.01)'], {}), '(0.01, 1, 0.01)\n', (9588, 9603), True, 'import numpy as np\n'), ((9620, 9650), 'numpy.zeros', 'np.zeros', (['threshold_list.shape'], {}), '(threshold_list.shape)\n', (9628, 9650), True, 'import numpy as np\n'), ((9727, 9740), 'sklearn.model_selection.LeaveOneOut', 'LeaveOneOut', ([], {}), '()\n', (9738, 9740), False, 'from sklearn.model_selection import LeaveOneOut\n'), ((10539, 10581), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (['y', '_y_prob'], {'pos_label': '(1)'}), '(y, _y_prob, pos_label=1)\n', (10556, 10581), False, 'from sklearn import linear_model, datasets, metrics\n'), ((13249, 13304), 'numpy.logical_and', 'np.logical_and', (['(y_test != y_test_pred)', '(y_test_pred == 1)'], {}), '(y_test != y_test_pred, y_test_pred == 1)\n', (13263, 13304), True, 'import numpy as np\n'), ((13691, 13746), 'numpy.logical_and', 'np.logical_and', (['(y_test != y_test_pred)', '(y_test_pred == 0)'], {}), '(y_test != y_test_pred, y_test_pred == 0)\n', (13705, 13746), True, 'import numpy as np\n'), ((1635, 1929), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'bootstrap': "best_params['bootstrap']", 'max_depth': "best_params['max_depth']", 'max_features': "best_params['max_features']", 'min_samples_leaf': "best_params['min_samples_leaf']", 'min_samples_split': "best_params['min_samples_split']", 'n_estimators': "best_params['n_estimators']"}), "(bootstrap=best_params['bootstrap'], max_depth=\n best_params['max_depth'], max_features=best_params['max_features'],\n min_samples_leaf=best_params['min_samples_leaf'], min_samples_split=\n best_params['min_samples_split'], n_estimators=best_params['n_estimators'])\n", (1657, 1929), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((2518, 2552), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""lbfgs"""'}), "(solver='lbfgs')\n", (2536, 2552), False, 'from sklearn.linear_model import LogisticRegression\n'), ((3729, 3763), 'numpy.where', 'np.where', (['(y_pred_proba > 0.5)', '(1)', '(0)'], {}), '(y_pred_proba > 0.5, 1, 0)\n', (3737, 3763), True, 'import numpy as np\n'), ((6090, 6148), 'pandas.DataFrame', 'pd.DataFrame', (['dataset'], {'columns': "['y_Actual', 'y_Predicted']"}), "(dataset, columns=['y_Actual', 'y_Predicted'])\n", (6102, 6148), True, 'import pandas as pd\n'), ((6179, 6274), 'pandas.crosstab', 'pd.crosstab', (["df['y_Actual']", "df['y_Predicted']"], {'rownames': "['Actual']", 'colnames': "['Predicted']"}), "(df['y_Actual'], df['y_Predicted'], rownames=['Actual'],\n colnames=['Predicted'])\n", (6190, 6274), True, 'import pandas as pd\n'), ((6284, 6294), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6292, 6294), True, 'from matplotlib import pyplot as plt\n'), ((6307, 6347), 'seaborn.heatmap', 'sn.heatmap', (['confusion_matrix'], {'annot': '(True)'}), '(confusion_matrix, annot=True)\n', (6317, 6347), True, 'import seaborn as sn\n'), ((8335, 8348), 'sklearn.model_selection.LeaveOneOut', 'LeaveOneOut', ([], {}), '()\n', (8346, 8348), False, 'from sklearn.model_selection import LeaveOneOut\n'), ((9418, 9434), 'numpy.argmax', 'np.argmax', (['score'], {}), '(score)\n', (9427, 9434), True, 'import numpy as np\n'), ((10490, 10506), 'numpy.argmax', 'np.argmax', (['score'], {}), '(score)\n', (10499, 10506), True, 'import numpy as np\n'), ((10841, 10875), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""lbfgs"""'}), "(solver='lbfgs')\n", (10859, 10875), False, 'from sklearn.linear_model import LogisticRegression\n'), ((12657, 12715), 'pandas.DataFrame', 'pd.DataFrame', (['dataset'], {'columns': "['y_Actual', 'y_Predicted']"}), "(dataset, columns=['y_Actual', 'y_Predicted'])\n", (12669, 12715), True, 'import pandas as pd\n'), ((12746, 12841), 'pandas.crosstab', 'pd.crosstab', (["df['y_Actual']", "df['y_Predicted']"], {'rownames': "['Actual']", 'colnames': "['Predicted']"}), "(df['y_Actual'], df['y_Predicted'], rownames=['Actual'],\n colnames=['Predicted'])\n", (12757, 12841), True, 'import pandas as pd\n'), ((12851, 12861), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12859, 12861), True, 'from matplotlib import pyplot as plt\n'), ((12874, 12914), 'seaborn.heatmap', 'sn.heatmap', (['confusion_matrix'], {'annot': '(True)'}), '(confusion_matrix, annot=True)\n', (12884, 12914), True, 'import seaborn as sn\n'), ((2628, 2679), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'max_depth': '(2)', 'random_state': '(0)'}), '(max_depth=2, random_state=0)\n', (2650, 2679), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((2953, 2994), 'numpy.linspace', 'np.linspace', ([], {'start': '(200)', 'stop': '(2000)', 'num': '(10)'}), '(start=200, stop=2000, num=10)\n', (2964, 2994), True, 'import numpy as np\n'), ((3079, 3107), 'numpy.linspace', 'np.linspace', (['(10)', '(110)'], {'num': '(11)'}), '(10, 110, num=11)\n', (3090, 3107), True, 'import numpy as np\n'), ((3841, 3870), 'numpy.count_nonzero', 'np.count_nonzero', (['(y_true == 0)'], {}), '(y_true == 0)\n', (3857, 3870), True, 'import numpy as np\n'), ((3935, 3967), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (3951, 3967), False, 'from sklearn.metrics import confusion_matrix\n'), ((10951, 11002), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'max_depth': '(2)', 'random_state': '(0)'}), '(max_depth=2, random_state=0)\n', (10973, 11002), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((2779, 2821), 'sklearn.ensemble.GradientBoostingClassifier', 'GradientBoostingClassifier', ([], {'random_state': '(0)'}), '(random_state=0)\n', (2805, 2821), False, 'from sklearn.ensemble import GradientBoostingClassifier\n'), ((3792, 3837), 'numpy.logical_and', 'np.logical_and', (['(y_pred != y_true)', '(y_pred == 1)'], {}), '(y_pred != y_true, y_pred == 1)\n', (3806, 3837), True, 'import numpy as np\n'), ((4951, 5001), 'sklearn.metrics.classification_report', 'metrics.classification_report', (['self.y_test', 'y_pred'], {}), '(self.y_test, y_pred)\n', (4980, 5001), False, 'from sklearn import linear_model, datasets, metrics\n'), ((11102, 11144), 'sklearn.ensemble.GradientBoostingClassifier', 'GradientBoostingClassifier', ([], {'random_state': '(0)'}), '(random_state=0)\n', (11128, 11144), False, 'from sklearn.ensemble import GradientBoostingClassifier\n'), ((11619, 11669), 'sklearn.metrics.classification_report', 'metrics.classification_report', (['self.y_test', 'Y_pred'], {}), '(self.y_test, Y_pred)\n', (11648, 11669), False, 'from sklearn import linear_model, datasets, metrics\n')] |
import time
import unittest
from typing import Union
import torch
import numpy as np
import random
from functools import lru_cache
from einops import rearrange, repeat
import torch.nn.functional as F
from torch import nn, einsum
@lru_cache()
def get_2dmask(seq_len, nx, ny, w, d):
return torch.BoolTensor([
[
abs(i // ny - j // ny) > w or abs(i % ny - j % ny) > w or (i // ny - j // ny)%d or (i % ny - j % ny)%d for j in range(seq_len)
]
for i in range(seq_len)
], device='cpu')
def naive2d_matmul_qk(q, k, nx, ny, w, d, padding=0.0):
bsz, num_heads, seq_len, head_dim = q.size()
attn_weights = q @ k.transpose(-2, -1)
# get mask
mask = get_2dmask(seq_len, nx, ny, w, d).to(q.device)
mask = mask[None, None, :, :]
attn_weights.masked_fill_(mask, padding)
return attn_weights
def _get_invalid_locations_mask_fixed_dilation(seq_len: int, nx: int, ny: int, w: int, d: int):
c1d = 2 * w + 1
c = 2 * w * (w + 1)
return torch.BoolTensor([
[
i // ny + d * (j // c1d - w) < 0 or i % ny + d * (j % c1d - w) < 0 or i % ny + d * (j % c1d - w) >= ny
for j in range(c)
]
for i in range(seq_len)
], device='cpu')
@lru_cache()
def _get_invalid_locations_mask(seq_len: int, nx: int, ny: int, w: int, d: Union[torch.Tensor,int], autoregressive: bool, device: str):
if isinstance(d, int):
mask = _get_invalid_locations_mask_fixed_dilation(seq_len, nx, ny, w, d)
mask = mask[None, None, :, :]
num_invalid = mask.sum()
else:
head_masks = []
head_invalids = []
d_list = d.cpu().numpy().tolist()
for d in d_list:
one_head_mask = _get_invalid_locations_mask_fixed_dilation(seq_len, nx, ny, w, d)
head_masks.append(one_head_mask)
head_invalids.append(one_head_mask.sum())
mask = torch.stack(head_masks, dim=0)
num_invalid = torch.stack(head_invalids, dim=0)
mask = mask[None, :, :, :]
ending_mask = None if autoregressive else mask.flip(dims=(2, 3)).to(device)
end_num_invalid = None if autoregressive else num_invalid.to(device)
return mask.to(device), ending_mask, num_invalid.to(device), end_num_invalid
def mask_invalid_locations(input_tensor: torch.Tensor, nx: int, ny: int, w: int, d: Union[torch.Tensor, int], autoregressive: bool) -> torch.Tensor:
seq_len = input_tensor.size(2)
beginning_mask, ending_mask, num_invalid, end_num_invalid = \
_get_invalid_locations_mask(seq_len, nx, ny, w, d, autoregressive,
input_tensor.device)
c = 2 * w * (w + 1)
beginning_input = input_tensor[:, :, :, :c]
beginning_mask = beginning_mask.expand(beginning_input.size())
beginning_input.masked_fill_(beginning_mask, -float('inf'))
if not autoregressive:
ending_input = input_tensor[:, :, :, -c:]
ending_mask = ending_mask.expand(ending_input.size())
ending_input.masked_fill_(ending_mask, -float('inf'))
num_invalid = num_invalid + end_num_invalid
return num_invalid
@lru_cache()
def _get_invalid_locations_mask_offical(nx: int, ny: int, w: int, d: int, autoregressive: bool, device: str):
img_seq = torch.arange(nx * ny)
k_img_indices = rearrange(img_seq.float(), '(h w) -> () () h w', h=nx)
k_img_indices = F.pad(k_img_indices, (w * d,) * 4,
value=nx * ny) # padding set to be max, so it is never attended to
k_img_indices = F.unfold(k_img_indices, 2 * w + 1, dilation=d)
k_img_indices = rearrange(k_img_indices, 'b j i -> b i j')
if autoregressive:
q_img_indices = rearrange(img_seq, 'i -> () i ()')
mask = q_img_indices >= k_img_indices
else:
mask = k_img_indices >= nx * ny
num_invalid = mask.sum()
return mask.to(device), num_invalid.to(device)
def mask_invalid_locations_offical(input_tensor: torch.Tensor, nx: int, ny: int, w: int, d: int, autoregressive: bool) -> torch.Tensor:
mask, num_invalid = _get_invalid_locations_mask_offical(
nx, ny, w, d, autoregressive, input_tensor.device
)
input_tensor.masked_fill_(mask, -float('inf'))
return num_invalid
def same_storage(x, y):
'''Tests if two tensors share the same underlying storage (for memory optimizations)'''
return x.storage().data_ptr() == y.storage().data_ptr()
class TestSlidingChunksMM(unittest.TestCase):
def test_tvm_equal_naiven2(self):
np.random.seed(300)
random.seed(300)
torch.manual_seed(300)
torch.cuda.manual_seed(300)
torch.cuda.manual_seed_all(300)
torch.set_printoptions(sci_mode=False)
nx = 30
ny = 26
N = nx * ny # * 16
M = 64 # hidden size
W = 8 # one sided. Actual window size = (2w+1)**2
nlocal = (2 * W + 1) ** 2
B = 2
D = 1 # no dilation
padding = W * D
H = 12 # number of heads
autoregressive = False # not autoregressive
device = 'cuda'
dtype = torch.float32
failed_tests = 0
time1 = time2 = 0
for i in range(100):
if i < 5:
time1 = time2 = 0 # don't include the first few iterations because of high variance
query = torch.randn(B * H * N * M, requires_grad=True, device=device, dtype=dtype).view(B, H, N, M)
query.retain_grad()
key = torch.randn(B * H * N * M, requires_grad=True, device=device, dtype=dtype).flip(dims=(0,)).view(B, H, N, M)
key.retain_grad()
value = torch.randn(B * H * N * M, requires_grad=True, device=device, dtype=dtype).view(B, H, N, M)
value.retain_grad()
# TVM MM
torch.cuda.synchronize()
start = time.time()
(q_img, k_img, v_img) = map(lambda t: t.view(B * H, N, M), (query, key, value))
k_img, v_img = map(lambda t: rearrange(t, 'b (h w) c -> b c h w', h=nx), (k_img, v_img))
# start use torch.nn.F
k_img, v_img = map(lambda t: F.unfold(t, 2*W+1, padding=padding, dilation=D), (k_img, v_img))
k_img, v_img = map(lambda t: rearrange(t, 'b (d j) i -> b i j d', j=nlocal), (k_img, v_img))
# end use torch.nn.F
# start use tensor.unfold
# (k_img, v_img) = map(
# lambda t: F.pad(t, (padding,)*4), (k_img, v_img)
# )
# (k_img, v_img) = map(
# lambda t: t.unfold(2, 2*W+1, 1).unfold(3, 2*W+1, 1), (k_img, v_img) # bh * c * nx * ny * 2w1 * 2w1
# )
# k_img, v_img = map(
# lambda t: rearrange(t, 'b c h w x y -> b (h w) (x y) c'),
# (k_img, v_img))
# end use tensor.unfold
dots_image = einsum('b i d, b i j d -> b i j', q_img, k_img)
mask_invalid_locations_offical(dots_image, nx, ny, W, D, autoregressive)
attention_probs1 = torch.nn.functional.softmax(dots_image, dim=-1)
context1 = einsum('b i j, b i j d -> b i d', attention_probs1, v_img).view(B, H, N, M)
context1.sum().backward()
torch.cuda.synchronize()
end = time.time()
time1 += end - start
query_grad1 = 1.0*query.grad
query.grad.zero_()
key_grad1 = 1.0*key.grad
key.grad.zero_()
value_grad1 = 1.0*value.grad
value.grad.zero_()
torch.cuda.empty_cache()
assert D == 1
assert not autoregressive
torch.cuda.synchronize()
start = time.time()
attention2 = naive2d_matmul_qk(query, key, nx, ny, W, D, float('-inf'))
attention_probs2 = torch.nn.functional.softmax(attention2, dim=-1) # (bsz, num_heads, seq_len, seq_len)
context2 = attention_probs2 @ value # (bsz, num_heads, seq_len, head_dim)
context2.sum().backward()
torch.cuda.synchronize()
end = time.time()
time2 += end - start
query_grad2 = 1.0*query.grad
query.grad.zero_()
key_grad2 = 1.0*key.grad
key.grad.zero_()
value_grad2 = 1.0*value.grad
value.grad.zero_()
torch.cuda.empty_cache()
try:
# assert torch.allclose(attention1, attention2.float(), atol=1e-4, rtol=1e-5)
assert torch.allclose(context1, context2.float(), atol=1e-4, rtol=1e-5), "context1"
assert torch.allclose(query_grad1, query_grad2.float(), atol=1e-4, rtol=1e-3), "query_grad1"
assert torch.allclose(key_grad1, key_grad2.float(), atol=1e-4, rtol=1e-3), "key_grad1"
assert torch.allclose(value_grad1, value_grad2.float(), atol=1e-4, rtol=1e-3), "value_grad1"
except AssertionError:
failed_tests += 1
print('Time unfold total: {0:.5f} s'.format(time1))
print('Time pytorch naive implementation: {0:.5f} s'.format(time2))
print('Unfold vs. Naive speedup: {0:.5f}x'.format(time1/time2))
print(f'Failed tests: {failed_tests}/{i+1}')
assert failed_tests == 0
if __name__ == '__main__':
unittest.main()
| [
"torch.cuda.synchronize",
"unittest.main",
"torch.nn.functional.pad",
"torch.nn.functional.softmax",
"torch.arange",
"torch.set_printoptions",
"numpy.random.seed",
"torch.randn",
"einops.rearrange",
"torch.einsum",
"time.time",
"torch.cuda.empty_cache",
"torch.cuda.manual_seed_all",
"torch... | [((232, 243), 'functools.lru_cache', 'lru_cache', ([], {}), '()\n', (241, 243), False, 'from functools import lru_cache\n'), ((1244, 1255), 'functools.lru_cache', 'lru_cache', ([], {}), '()\n', (1253, 1255), False, 'from functools import lru_cache\n'), ((3135, 3146), 'functools.lru_cache', 'lru_cache', ([], {}), '()\n', (3144, 3146), False, 'from functools import lru_cache\n'), ((3271, 3292), 'torch.arange', 'torch.arange', (['(nx * ny)'], {}), '(nx * ny)\n', (3283, 3292), False, 'import torch\n'), ((3388, 3437), 'torch.nn.functional.pad', 'F.pad', (['k_img_indices', '((w * d,) * 4)'], {'value': '(nx * ny)'}), '(k_img_indices, (w * d,) * 4, value=nx * ny)\n', (3393, 3437), True, 'import torch.nn.functional as F\n'), ((3537, 3583), 'torch.nn.functional.unfold', 'F.unfold', (['k_img_indices', '(2 * w + 1)'], {'dilation': 'd'}), '(k_img_indices, 2 * w + 1, dilation=d)\n', (3545, 3583), True, 'import torch.nn.functional as F\n'), ((3604, 3646), 'einops.rearrange', 'rearrange', (['k_img_indices', '"""b j i -> b i j"""'], {}), "(k_img_indices, 'b j i -> b i j')\n", (3613, 3646), False, 'from einops import rearrange, repeat\n'), ((9283, 9298), 'unittest.main', 'unittest.main', ([], {}), '()\n', (9296, 9298), False, 'import unittest\n'), ((1907, 1937), 'torch.stack', 'torch.stack', (['head_masks'], {'dim': '(0)'}), '(head_masks, dim=0)\n', (1918, 1937), False, 'import torch\n'), ((1960, 1993), 'torch.stack', 'torch.stack', (['head_invalids'], {'dim': '(0)'}), '(head_invalids, dim=0)\n', (1971, 1993), False, 'import torch\n'), ((3695, 3729), 'einops.rearrange', 'rearrange', (['img_seq', '"""i -> () i ()"""'], {}), "(img_seq, 'i -> () i ()')\n", (3704, 3729), False, 'from einops import rearrange, repeat\n'), ((4517, 4536), 'numpy.random.seed', 'np.random.seed', (['(300)'], {}), '(300)\n', (4531, 4536), True, 'import numpy as np\n'), ((4545, 4561), 'random.seed', 'random.seed', (['(300)'], {}), '(300)\n', (4556, 4561), False, 'import random\n'), ((4570, 4592), 'torch.manual_seed', 'torch.manual_seed', (['(300)'], {}), '(300)\n', (4587, 4592), False, 'import torch\n'), ((4601, 4628), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(300)'], {}), '(300)\n', (4623, 4628), False, 'import torch\n'), ((4637, 4668), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['(300)'], {}), '(300)\n', (4663, 4668), False, 'import torch\n'), ((4678, 4716), 'torch.set_printoptions', 'torch.set_printoptions', ([], {'sci_mode': '(False)'}), '(sci_mode=False)\n', (4700, 4716), False, 'import torch\n'), ((5791, 5815), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (5813, 5815), False, 'import torch\n'), ((5836, 5847), 'time.time', 'time.time', ([], {}), '()\n', (5845, 5847), False, 'import time\n'), ((6851, 6898), 'torch.einsum', 'einsum', (['"""b i d, b i j d -> b i j"""', 'q_img', 'k_img'], {}), "('b i d, b i j d -> b i j', q_img, k_img)\n", (6857, 6898), False, 'from torch import nn, einsum\n'), ((7015, 7062), 'torch.nn.functional.softmax', 'torch.nn.functional.softmax', (['dots_image'], {'dim': '(-1)'}), '(dots_image, dim=-1)\n', (7042, 7062), False, 'import torch\n'), ((7212, 7236), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (7234, 7236), False, 'import torch\n'), ((7255, 7266), 'time.time', 'time.time', ([], {}), '()\n', (7264, 7266), False, 'import time\n'), ((7522, 7546), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (7544, 7546), False, 'import torch\n'), ((7624, 7648), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (7646, 7648), False, 'import torch\n'), ((7669, 7680), 'time.time', 'time.time', ([], {}), '()\n', (7678, 7680), False, 'import time\n'), ((7796, 7843), 'torch.nn.functional.softmax', 'torch.nn.functional.softmax', (['attention2'], {'dim': '(-1)'}), '(attention2, dim=-1)\n', (7823, 7843), False, 'import torch\n'), ((8018, 8042), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (8040, 8042), False, 'import torch\n'), ((8061, 8072), 'time.time', 'time.time', ([], {}), '()\n', (8070, 8072), False, 'import time\n'), ((8328, 8352), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (8350, 8352), False, 'import torch\n'), ((5333, 5407), 'torch.randn', 'torch.randn', (['(B * H * N * M)'], {'requires_grad': '(True)', 'device': 'device', 'dtype': 'dtype'}), '(B * H * N * M, requires_grad=True, device=device, dtype=dtype)\n', (5344, 5407), False, 'import torch\n'), ((5633, 5707), 'torch.randn', 'torch.randn', (['(B * H * N * M)'], {'requires_grad': '(True)', 'device': 'device', 'dtype': 'dtype'}), '(B * H * N * M, requires_grad=True, device=device, dtype=dtype)\n', (5644, 5707), False, 'import torch\n'), ((5981, 6023), 'einops.rearrange', 'rearrange', (['t', '"""b (h w) c -> b c h w"""'], {'h': 'nx'}), "(t, 'b (h w) c -> b c h w', h=nx)\n", (5990, 6023), False, 'from einops import rearrange, repeat\n'), ((6117, 6168), 'torch.nn.functional.unfold', 'F.unfold', (['t', '(2 * W + 1)'], {'padding': 'padding', 'dilation': 'D'}), '(t, 2 * W + 1, padding=padding, dilation=D)\n', (6125, 6168), True, 'import torch.nn.functional as F\n'), ((6223, 6269), 'einops.rearrange', 'rearrange', (['t', '"""b (d j) i -> b i j d"""'], {'j': 'nlocal'}), "(t, 'b (d j) i -> b i j d', j=nlocal)\n", (6232, 6269), False, 'from einops import rearrange, repeat\n'), ((7086, 7144), 'torch.einsum', 'einsum', (['"""b i j, b i j d -> b i d"""', 'attention_probs1', 'v_img'], {}), "('b i j, b i j d -> b i d', attention_probs1, v_img)\n", (7092, 7144), False, 'from torch import nn, einsum\n'), ((5475, 5549), 'torch.randn', 'torch.randn', (['(B * H * N * M)'], {'requires_grad': '(True)', 'device': 'device', 'dtype': 'dtype'}), '(B * H * N * M, requires_grad=True, device=device, dtype=dtype)\n', (5486, 5549), False, 'import torch\n')] |
from __future__ import division, print_function, absolute_import
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import numpy
import PIL
from PIL import Image
np.random.seed(1337) # for reproducibility
from math import sqrt
import random
from keras.datasets import mnist
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Input, Lambda
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.layers import Flatten
from keras.optimizers import RMSprop
from keras import backend as K
from keras.layers import Concatenate, Dense, LSTM, Input, concatenate
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import scipy.io
mat = scipy.io.loadmat('/home/aniruddha/deep-learning-projects/Siamese_Networks/Dataset/PaviaCentre.mat')
arr = mat['pavia']
arr = np.array(arr)
print(arr.shape)
import scipy.io
mat = scipy.io.loadmat('/home/aniruddha/deep-learning-projects/Siamese_Networks/Dataset/PaviaCentre_gt.mat')
arr1 = mat['pavia_gt']
arr1 = np.array(arr1)
print(arr1.shape)
a=[]
label=[]
k=0
for i in range(0,arr1.shape[0]):
for j in range(0,arr1[i].shape[0]):
a.append(arr[i][j])
label.append(arr1[i][j])
a=np.array(a)
label=np.array(label)
X_train=[]
y_train=[]
for i in range (0,a.shape[0]):
if(label[i]==2):
y_train.append(0)
if(label[i]==3):
y_train.append(1)
if(label[i]==4):
y_train.append(2)
if(label[i]==5):
y_train.append(3)
if(label[i]==7):
y_train.append(4)
if(label[i]==8):
y_train.append(5)
if(label[i]==9):
y_train.append(6)
if (label[i]==2 or label[i]==3 or label[i]==4 or label[i]==5 or label[i]==7 or label[i]==8 or label[i]==9):
X_train.append(a[i])
X_train=np.array(X_train)
y_train=np.array(y_train)
print(X_train.shape)
print(y_train.shape)
from sklearn.utils import shuffle
X_train, y_train = shuffle(X_train, y_train, random_state = 0)
from sklearn.preprocessing import StandardScaler
X_train = StandardScaler().fit_transform(X_train)
from sklearn.decomposition import PCA
pca = PCA(n_components=64)
X_train = pca.fit_transform(X_train)
print(X_train.shape)
import scipy.io
mat = scipy.io.loadmat('/home/aniruddha/deep-learning-projects/Siamese_Networks/Dataset/PaviaU.mat')
arr = mat['paviaU']
arr = np.array(arr)
import scipy.io
mat = scipy.io.loadmat('/home/aniruddha/deep-learning-projects/Siamese_Networks/Dataset/PaviaU_gt.mat')
arr1 = mat['paviaU_gt']
arr1 = np.array(arr1)
print(arr1.shape)
a=[]
label=[]
k=0
for i in range(0,arr1.shape[0]):
for j in range(0,arr1[i].shape[0]):
a.append(arr[i][j])
label.append(arr1[i][j])
a=np.array(a)
label=np.array(label)
print(a.shape)
print(label.shape)
X_train1=[]
y_train1=[]
for i in range (0,a.shape[0]):
if(label[i]==4):
y_train1.append(0)
if(label[i]==1):
y_train1.append(1)
if(label[i]==8):
y_train1.append(2)
if(label[i]==7):
y_train1.append(3)
if(label[i]==9):
y_train1.append(4)
if(label[i]==2):
y_train1.append(5)
if(label[i]==6):
y_train1.append(6)
if (label[i]==4 or label[i]==1 or label[i]==8 or label[i]==7 or label[i]==9 or label[i]==2 or label[i]==6):
X_train1.append(a[i])
X_train1=np.array(X_train1)
y_train1=np.array(y_train1)
from sklearn.utils import shuffle
X_train1, y_train1 = shuffle(X_train1, y_train1, random_state = 0)
from sklearn.preprocessing import StandardScaler
X_train1 = StandardScaler().fit_transform(X_train1)
from sklearn.decomposition import PCA
pca = PCA(n_components=64)
X_train1 = pca.fit_transform(X_train1)
print(X_train1.shape)
print(X_train.max())
print(X_train1.max())
X_train=X_train.astype('float32')
X_train1=X_train1.astype('float32')
X_train=X_train/100
X_train1=X_train1/100
X_test=X_train1[20000:39332,:]
y_test=y_train1[20000:39332]
X_train1=X_train1[0:20000,:]
y_train1=y_train1[0:20000]
print(X_train.shape)
print(X_train1.shape)
print(X_test.shape)
learning_rate = 0.01
num_steps = 20
batch_size = 20
total_numbers = 291
display_step = 1000
examples_to_show = 10
# Network Parameters
num_hidden_1 = 32 # 1st layer num features
num_hidden_2 = 16 # 2nd layer num features (the latent dim)
num_input = 64
num_classes = 7
# tf Graph input (only pictures)
X = tf.placeholder("float", [None, num_input])
Y = tf.placeholder("float", [None, num_classes])
weights = {
'encoder_h1': tf.Variable(tf.random_uniform([num_input, num_hidden_1], minval=-4*np.sqrt(6.0/(num_input + num_hidden_1)), maxval=4*np.sqrt(6.0/(num_input + num_hidden_1)))),
'encoder_h2': tf.Variable(tf.random_uniform([num_hidden_1, num_hidden_2], minval=-4*np.sqrt(6.0/(num_hidden_1 + num_hidden_2)), maxval=4*np.sqrt(6.0/(num_hidden_1 + num_hidden_2)))),
'decoder_h1': tf.Variable(tf.random_uniform([num_hidden_2, num_hidden_1], minval=-4*np.sqrt(6.0/(num_hidden_1 + num_hidden_2)), maxval=4*np.sqrt(6.0/(num_hidden_1 + num_hidden_2)))),
'decoder_h2': tf.Variable(tf.random_uniform([num_hidden_1, num_input], minval=-4*np.sqrt(6.0/(num_input + num_hidden_1)), maxval=4*np.sqrt(6.0/(num_input + num_hidden_1)))),
'classifier1_h': tf.Variable(tf.random_uniform([num_hidden_2, 10], minval=-4*np.sqrt(6.0/(10 + num_hidden_2)), maxval=4*np.sqrt(6.0/(10 + num_hidden_2)))),
'classifier_h': tf.Variable(tf.random_uniform([10, num_classes], minval=-4*np.sqrt(6.0/(10 + num_classes)), maxval=4*np.sqrt(6.0/(10 + num_classes)))),
}
biases = {
'encoder_b1': tf.Variable(tf.truncated_normal([num_hidden_1])/sqrt(num_hidden_1)),
'encoder_b2': tf.Variable(tf.truncated_normal([num_hidden_2])/sqrt(num_hidden_2)),
'decoder_b1': tf.Variable(tf.truncated_normal([num_hidden_1])/sqrt(num_hidden_1)),
'decoder_b2': tf.Variable(tf.truncated_normal([num_input])/sqrt(num_hidden_2)),
'classifier1_b': tf.Variable(tf.truncated_normal([10])/sqrt(10)),
'classifier_b': tf.Variable(tf.truncated_normal([num_classes])/sqrt(num_classes)),
}
# Building the encoder
def encoder(x):
# Encoder Hidden layer with sigmoid activation #1
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['encoder_h1']),
biases['encoder_b1']))
# Encoder Hidden layer with sigmoid activation #2
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['encoder_h2']),
biases['encoder_b2']))
return layer_2
# Building the decoder
def decoder(x):
# Decoder Hidden layer with sigmoid activation #1
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['decoder_h1']),
biases['decoder_b1']))
# Decoder Hidden layer with sigmoid activation #2
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['decoder_h2']),
biases['decoder_b2']))
return layer_2
# Construct model
encoder_op = encoder(X)
decoder_op = decoder(encoder_op)
# Prediction
y_pred = decoder_op
classify1 = tf.nn.sigmoid(tf.add(tf.matmul(encoder_op, weights['classifier1_h']), biases['classifier1_b']))
label_pred = tf.nn.softmax(tf.add(tf.matmul(classify1, weights['classifier_h']), biases['classifier_b']))
y_clipped = tf.clip_by_value(label_pred, 1e-10, 0.9999999)
# Targets (Labels) are the input data.
y_true = X
label_true = Y
# Define loss and optimizer, minimize the squared error
loss_autoencoder = tf.reduce_mean(tf.pow(y_true - y_pred, 2))
cross_entropy_loss = -tf.reduce_mean(tf.reduce_sum(label_true * tf.log(y_clipped)
+ (1 - label_true) * tf.log(1 - y_clipped), axis=1))
loss_total = loss_autoencoder+cross_entropy_loss
optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(loss_total)
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
from keras.utils import np_utils
y_test11 = np_utils.to_categorical(y_test)
y_train11 = np_utils.to_categorical(y_train)
print(y_train11.shape)
print(y_test11.shape)
# define an accuracy assessment operation
correct_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(label_pred, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Start Training
# Start a new TF session
sess = tf.Session()
# Run the initializer
sess.run(init)
batch_size = 64
num_batch = 1139
# Training
for i in range(0,200):
k = 0
# Prepare Data
# Get the next batch of MNIST data (only images are needed, not labels)
avg_cost = 0
for j in (0,num_batch):
batch_x = X_train[k:k+batch_size,:]
batch_y = y_train11[k:k+batch_size,:]
k += 64
#print(j)
# Run optimization op (backprop) and cost op (to get loss value)
_, l = sess.run([optimizer, loss_total], feed_dict={X: batch_x, Y: batch_y})
avg_cost += l / num_batch
print("Epoch:", (i + 1), "cost =", "{:.8f}".format(avg_cost))
print("Epoch:", (i + 1), "accuracy =", "{:.8f}".format(sess.run(accuracy, feed_dict={X: X_train, Y: y_train11})))
# on 200 epoch
print(sess.run([accuracy], feed_dict={X: X_test, Y: y_test11})) | [
"numpy.sqrt",
"math.sqrt",
"numpy.array",
"tensorflow.cast",
"tensorflow.log",
"tensorflow.pow",
"sklearn.decomposition.PCA",
"tensorflow.placeholder",
"tensorflow.Session",
"numpy.random.seed",
"tensorflow.clip_by_value",
"tensorflow.matmul",
"tensorflow.truncated_normal",
"tensorflow.tra... | [((207, 227), 'numpy.random.seed', 'np.random.seed', (['(1337)'], {}), '(1337)\n', (221, 227), True, 'import numpy as np\n'), ((912, 925), 'numpy.array', 'np.array', (['arr'], {}), '(arr)\n', (920, 925), True, 'import numpy as np\n'), ((1099, 1113), 'numpy.array', 'np.array', (['arr1'], {}), '(arr1)\n', (1107, 1113), True, 'import numpy as np\n'), ((1296, 1307), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (1304, 1307), True, 'import numpy as np\n'), ((1314, 1329), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (1322, 1329), True, 'import numpy as np\n'), ((1862, 1879), 'numpy.array', 'np.array', (['X_train'], {}), '(X_train)\n', (1870, 1879), True, 'import numpy as np\n'), ((1888, 1905), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (1896, 1905), True, 'import numpy as np\n'), ((2002, 2043), 'sklearn.utils.shuffle', 'shuffle', (['X_train', 'y_train'], {'random_state': '(0)'}), '(X_train, y_train, random_state=0)\n', (2009, 2043), False, 'from sklearn.utils import shuffle\n'), ((2190, 2210), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(64)'}), '(n_components=64)\n', (2193, 2210), False, 'from sklearn.decomposition import PCA\n'), ((2413, 2426), 'numpy.array', 'np.array', (['arr'], {}), '(arr)\n', (2421, 2426), True, 'import numpy as np\n'), ((2579, 2593), 'numpy.array', 'np.array', (['arr1'], {}), '(arr1)\n', (2587, 2593), True, 'import numpy as np\n'), ((2776, 2787), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (2784, 2787), True, 'import numpy as np\n'), ((2794, 2809), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (2802, 2809), True, 'import numpy as np\n'), ((3387, 3405), 'numpy.array', 'np.array', (['X_train1'], {}), '(X_train1)\n', (3395, 3405), True, 'import numpy as np\n'), ((3415, 3433), 'numpy.array', 'np.array', (['y_train1'], {}), '(y_train1)\n', (3423, 3433), True, 'import numpy as np\n'), ((3490, 3533), 'sklearn.utils.shuffle', 'shuffle', (['X_train1', 'y_train1'], {'random_state': '(0)'}), '(X_train1, y_train1, random_state=0)\n', (3497, 3533), False, 'from sklearn.utils import shuffle\n'), ((3681, 3701), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(64)'}), '(n_components=64)\n', (3684, 3701), False, 'from sklearn.decomposition import PCA\n'), ((4410, 4452), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, num_input]'], {}), "('float', [None, num_input])\n", (4424, 4452), True, 'import tensorflow as tf\n'), ((4457, 4501), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""', '[None, num_classes]'], {}), "('float', [None, num_classes])\n", (4471, 4501), True, 'import tensorflow as tf\n'), ((7284, 7330), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['label_pred', '(1e-10)', '(0.9999999)'], {}), '(label_pred, 1e-10, 0.9999999)\n', (7300, 7330), True, 'import tensorflow as tf\n'), ((7870, 7903), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (7901, 7903), True, 'import tensorflow as tf\n'), ((7949, 7980), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['y_test'], {}), '(y_test)\n', (7972, 7980), False, 'from keras.utils import np_utils\n'), ((7993, 8025), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['y_train'], {}), '(y_train)\n', (8016, 8025), False, 'from keras.utils import np_utils\n'), ((8304, 8316), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (8314, 8316), True, 'import tensorflow as tf\n'), ((7489, 7515), 'tensorflow.pow', 'tf.pow', (['(y_true - y_pred)', '(2)'], {}), '(y_true - y_pred, 2)\n', (7495, 7515), True, 'import tensorflow as tf\n'), ((8144, 8159), 'tensorflow.argmax', 'tf.argmax', (['Y', '(1)'], {}), '(Y, 1)\n', (8153, 8159), True, 'import tensorflow as tf\n'), ((8161, 8185), 'tensorflow.argmax', 'tf.argmax', (['label_pred', '(1)'], {}), '(label_pred, 1)\n', (8170, 8185), True, 'import tensorflow as tf\n'), ((8213, 8252), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (8220, 8252), True, 'import tensorflow as tf\n'), ((2106, 2122), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2120, 2122), False, 'from sklearn.preprocessing import StandardScaler\n'), ((3596, 3612), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (3610, 3612), False, 'from sklearn.preprocessing import StandardScaler\n'), ((7091, 7138), 'tensorflow.matmul', 'tf.matmul', (['encoder_op', "weights['classifier1_h']"], {}), "(encoder_op, weights['classifier1_h'])\n", (7100, 7138), True, 'import tensorflow as tf\n'), ((7200, 7245), 'tensorflow.matmul', 'tf.matmul', (['classify1', "weights['classifier_h']"], {}), "(classify1, weights['classifier_h'])\n", (7209, 7245), True, 'import tensorflow as tf\n'), ((7739, 7779), 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (7764, 7779), True, 'import tensorflow as tf\n'), ((5605, 5640), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[num_hidden_1]'], {}), '([num_hidden_1])\n', (5624, 5640), True, 'import tensorflow as tf\n'), ((5641, 5659), 'math.sqrt', 'sqrt', (['num_hidden_1'], {}), '(num_hidden_1)\n', (5645, 5659), False, 'from math import sqrt\n'), ((5692, 5727), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[num_hidden_2]'], {}), '([num_hidden_2])\n', (5711, 5727), True, 'import tensorflow as tf\n'), ((5728, 5746), 'math.sqrt', 'sqrt', (['num_hidden_2'], {}), '(num_hidden_2)\n', (5732, 5746), False, 'from math import sqrt\n'), ((5779, 5814), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[num_hidden_1]'], {}), '([num_hidden_1])\n', (5798, 5814), True, 'import tensorflow as tf\n'), ((5815, 5833), 'math.sqrt', 'sqrt', (['num_hidden_1'], {}), '(num_hidden_1)\n', (5819, 5833), False, 'from math import sqrt\n'), ((5866, 5898), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[num_input]'], {}), '([num_input])\n', (5885, 5898), True, 'import tensorflow as tf\n'), ((5899, 5917), 'math.sqrt', 'sqrt', (['num_hidden_2'], {}), '(num_hidden_2)\n', (5903, 5917), False, 'from math import sqrt\n'), ((5953, 5978), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[10]'], {}), '([10])\n', (5972, 5978), True, 'import tensorflow as tf\n'), ((5979, 5987), 'math.sqrt', 'sqrt', (['(10)'], {}), '(10)\n', (5983, 5987), False, 'from math import sqrt\n'), ((6022, 6056), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['[num_classes]'], {}), '([num_classes])\n', (6041, 6056), True, 'import tensorflow as tf\n'), ((6057, 6074), 'math.sqrt', 'sqrt', (['num_classes'], {}), '(num_classes)\n', (6061, 6074), False, 'from math import sqrt\n'), ((6210, 6245), 'tensorflow.matmul', 'tf.matmul', (['x', "weights['encoder_h1']"], {}), "(x, weights['encoder_h1'])\n", (6219, 6245), True, 'import tensorflow as tf\n'), ((6394, 6435), 'tensorflow.matmul', 'tf.matmul', (['layer_1', "weights['encoder_h2']"], {}), "(layer_1, weights['encoder_h2'])\n", (6403, 6435), True, 'import tensorflow as tf\n'), ((6644, 6679), 'tensorflow.matmul', 'tf.matmul', (['x', "weights['decoder_h1']"], {}), "(x, weights['decoder_h1'])\n", (6653, 6679), True, 'import tensorflow as tf\n'), ((6828, 6869), 'tensorflow.matmul', 'tf.matmul', (['layer_1', "weights['decoder_h2']"], {}), "(layer_1, weights['decoder_h2'])\n", (6837, 6869), True, 'import tensorflow as tf\n'), ((4601, 4642), 'numpy.sqrt', 'np.sqrt', (['(6.0 / (num_input + num_hidden_1))'], {}), '(6.0 / (num_input + num_hidden_1))\n', (4608, 4642), True, 'import numpy as np\n'), ((4651, 4692), 'numpy.sqrt', 'np.sqrt', (['(6.0 / (num_input + num_hidden_1))'], {}), '(6.0 / (num_input + num_hidden_1))\n', (4658, 4692), True, 'import numpy as np\n'), ((4782, 4826), 'numpy.sqrt', 'np.sqrt', (['(6.0 / (num_hidden_1 + num_hidden_2))'], {}), '(6.0 / (num_hidden_1 + num_hidden_2))\n', (4789, 4826), True, 'import numpy as np\n'), ((4835, 4879), 'numpy.sqrt', 'np.sqrt', (['(6.0 / (num_hidden_1 + num_hidden_2))'], {}), '(6.0 / (num_hidden_1 + num_hidden_2))\n', (4842, 4879), True, 'import numpy as np\n'), ((4969, 5013), 'numpy.sqrt', 'np.sqrt', (['(6.0 / (num_hidden_1 + num_hidden_2))'], {}), '(6.0 / (num_hidden_1 + num_hidden_2))\n', (4976, 5013), True, 'import numpy as np\n'), ((5022, 5066), 'numpy.sqrt', 'np.sqrt', (['(6.0 / (num_hidden_1 + num_hidden_2))'], {}), '(6.0 / (num_hidden_1 + num_hidden_2))\n', (5029, 5066), True, 'import numpy as np\n'), ((5153, 5194), 'numpy.sqrt', 'np.sqrt', (['(6.0 / (num_input + num_hidden_1))'], {}), '(6.0 / (num_input + num_hidden_1))\n', (5160, 5194), True, 'import numpy as np\n'), ((5203, 5244), 'numpy.sqrt', 'np.sqrt', (['(6.0 / (num_input + num_hidden_1))'], {}), '(6.0 / (num_input + num_hidden_1))\n', (5210, 5244), True, 'import numpy as np\n'), ((5327, 5361), 'numpy.sqrt', 'np.sqrt', (['(6.0 / (10 + num_hidden_2))'], {}), '(6.0 / (10 + num_hidden_2))\n', (5334, 5361), True, 'import numpy as np\n'), ((5370, 5404), 'numpy.sqrt', 'np.sqrt', (['(6.0 / (10 + num_hidden_2))'], {}), '(6.0 / (10 + num_hidden_2))\n', (5377, 5404), True, 'import numpy as np\n'), ((5485, 5518), 'numpy.sqrt', 'np.sqrt', (['(6.0 / (10 + num_classes))'], {}), '(6.0 / (10 + num_classes))\n', (5492, 5518), True, 'import numpy as np\n'), ((5527, 5560), 'numpy.sqrt', 'np.sqrt', (['(6.0 / (10 + num_classes))'], {}), '(6.0 / (10 + num_classes))\n', (5534, 5560), True, 'import numpy as np\n'), ((7581, 7598), 'tensorflow.log', 'tf.log', (['y_clipped'], {}), '(y_clipped)\n', (7587, 7598), True, 'import tensorflow as tf\n'), ((7645, 7666), 'tensorflow.log', 'tf.log', (['(1 - y_clipped)'], {}), '(1 - y_clipped)\n', (7651, 7666), True, 'import tensorflow as tf\n')] |
import numpy as np
from Get_global_value import num_q
from Get_global_value import BB
from Get_global_value import J_type
from Get_global_value import Qi
from Get_global_value import c0
from Get_global_value import cc
from Get_global_value import Ez
from rpy2dc import rpy2dc
def calc_pos(R0, A0, AA, q):
RR = np.zeros((num_q, 3))
if num_q == 0:
print('Single body, there is no link')
else:
for i in range(num_q):
A_I_i = AA[i, 0:3, 0:3]
if BB[i] == -1:
if J_type == 'R':
RR[i, 0:3] = R0[0:3] + np.dot(A0, c0[i, 0:3]) - np.dot(A_I_i, cc[i, i, 0:3])
else:
RR[i, 0:3] = R0[0:3] + np.dot(A0, c0[i, 0:3]) \
+ np.dot(A_I_i, (np.dot(Ez, q[i]) - cc[i, i, 0:3])) # needs check
else:
A_I_BB = AA[BB[i], 0:3, 0:3]
if J_type == 'R':
RR[i, :] = RR[BB[i], :] + np.dot(A_I_BB, cc[BB[i], i, :]) - np.dot(A_I_i, cc[i, i, :])
else:
RR[i, :] = RR[BB[i], :] + np.dot(A_I_BB, cc[BB[i], i, :]) \
+ np.dot(A_I_i, (np.dot(Ez, q[i]) - cc[i, i, 0:3]))
return RR
| [
"numpy.dot",
"numpy.zeros"
] | [((316, 336), 'numpy.zeros', 'np.zeros', (['(num_q, 3)'], {}), '((num_q, 3))\n', (324, 336), True, 'import numpy as np\n'), ((610, 638), 'numpy.dot', 'np.dot', (['A_I_i', 'cc[i, i, 0:3]'], {}), '(A_I_i, cc[i, i, 0:3])\n', (616, 638), True, 'import numpy as np\n'), ((1007, 1033), 'numpy.dot', 'np.dot', (['A_I_i', 'cc[i, i, :]'], {}), '(A_I_i, cc[i, i, :])\n', (1013, 1033), True, 'import numpy as np\n'), ((585, 607), 'numpy.dot', 'np.dot', (['A0', 'c0[i, 0:3]'], {}), '(A0, c0[i, 0:3])\n', (591, 607), True, 'import numpy as np\n'), ((704, 726), 'numpy.dot', 'np.dot', (['A0', 'c0[i, 0:3]'], {}), '(A0, c0[i, 0:3])\n', (710, 726), True, 'import numpy as np\n'), ((973, 1004), 'numpy.dot', 'np.dot', (['A_I_BB', 'cc[BB[i], i, :]'], {}), '(A_I_BB, cc[BB[i], i, :])\n', (979, 1004), True, 'import numpy as np\n'), ((1102, 1133), 'numpy.dot', 'np.dot', (['A_I_BB', 'cc[BB[i], i, :]'], {}), '(A_I_BB, cc[BB[i], i, :])\n', (1108, 1133), True, 'import numpy as np\n'), ((779, 795), 'numpy.dot', 'np.dot', (['Ez', 'q[i]'], {}), '(Ez, q[i])\n', (785, 795), True, 'import numpy as np\n'), ((1184, 1200), 'numpy.dot', 'np.dot', (['Ez', 'q[i]'], {}), '(Ez, q[i])\n', (1190, 1200), True, 'import numpy as np\n')] |
# -*-coding:utf8-*-
"""
author:zhangyu
email:<EMAIL>
"""
import os
import numpy as np
import operator
import sys
def load_item_vec(input_file: str):
"""
Args:
input_file: 词向量文件
Return:
dict key:item_id value:np.array([num1, num2....])
"""
if not os.path.exists(input_file):
return {}
line_num = 0
item_vec = {}
fp = open(input_file)
for line in fp:
if line_num == 0:
line_num += 1
continue
item = line.strip().split()
if len(item) < 129:
continue
item_id = item[0]
if item_id == "</s>":
continue
item_vec[item_id] = np.array([float(ele) for ele in item[1:]])
fp.close()
return item_vec
def cal_item_sim(item_vec, item_id: str, output_file: str):
"""
Args
item_vec:词嵌入向量
item_id:固定的标致
output_file: 输出文件
"""
if item_id not in item_vec:
return
score = {}
top_k = 10
fix_item_vec = item_vec[item_id]
for tmp_item_id in item_vec:
if tmp_item_id == item_id:
continue
tmp_item_vec = item_vec[tmp_item_id]
fenmu = np.linalg.norm(fix_item_vec) * np.linalg.norm(tmp_item_vec)
if fenmu == 0:
score[tmp_item_id] = 0
else:
score[tmp_item_id] = round(np.dot(fix_item_vec, tmp_item_vec) / fenmu, 3)
fw = open(output_file, "w+")
out_str = item_id + "\t"
tmp_list = []
for s in sorted(score.items(), key=operator.itemgetter(1), reverse=True)[:top_k]:
tmp_list.append(s[0] + "_" + str(s[1]))
out_str += ";".join(tmp_list)
fw.write(out_str + "\n")
fw.close()
def run_main(input_file: str, output_file: str) -> None:
'''
文件内容
Args:
input_file: 输入文件
output_file: 输出文件
Returns:
None
'''
item_vec = load_item_vec(input_file)
cal_item_sim(item_vec, "27", output_file)
if __name__ == "__main__":
if len(sys.argv) < 3:
print("usage: python xx.py inputfile outputfile")
sys.exit()
else:
input_file = sys.argv[1]
output_file = sys.argv[2]
run_main(input_file, output_file)
| [
"os.path.exists",
"sys.exit",
"numpy.dot",
"numpy.linalg.norm",
"operator.itemgetter"
] | [((285, 311), 'os.path.exists', 'os.path.exists', (['input_file'], {}), '(input_file)\n', (299, 311), False, 'import os\n'), ((2069, 2079), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2077, 2079), False, 'import sys\n'), ((1175, 1203), 'numpy.linalg.norm', 'np.linalg.norm', (['fix_item_vec'], {}), '(fix_item_vec)\n', (1189, 1203), True, 'import numpy as np\n'), ((1206, 1234), 'numpy.linalg.norm', 'np.linalg.norm', (['tmp_item_vec'], {}), '(tmp_item_vec)\n', (1220, 1234), True, 'import numpy as np\n'), ((1512, 1534), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (1531, 1534), False, 'import operator\n'), ((1346, 1380), 'numpy.dot', 'np.dot', (['fix_item_vec', 'tmp_item_vec'], {}), '(fix_item_vec, tmp_item_vec)\n', (1352, 1380), True, 'import numpy as np\n')] |
import numpy as np
# Importing keras classes and functions using TensorFlow backend
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import BatchNormalization, Dense, Flatten, Dropout
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.metrics import categorical_accuracy
# Importing functions from different modules
from preprocess import preprocess_data
from utils import give_convolution_layer
# Defining constants
BATCH_SIZE = 128
EPOCHS = 30
IMG_SIZE = (48, 48)
# NUM_CLASSES = 7
X_train, X_test, y_train, y_test, NUM_CLASSES = preprocess_data(filename='/content/drive/My Drive/fer2013.csv',
image_size=IMG_SIZE)
model = Sequential()
# 1st Convolution layer
model.add(give_convolution_layer(filters=64, kernel_size=(3,3),
padding='same', use_bn=False, dropout_percentage=None, pool_size=(2,2)))
# 2nd Convolution layer
model.add(give_convolution_layer(filters=128, kernel_size=(3,3),
padding='same', use_bn=True, dropout_percentage=0.3, pool_size=(2,2)))
# 3rd Convolution layer
model.add(give_convolution_layer(filters=256, kernel_size=(3,3),
padding='same', use_bn=True, dropout_percentage=0.3, pool_size=(2,2)))
# 4th Convolution layer
model.add(give_convolution_layer(filters=512, kernel_size=(3,3),
padding='same', use_bn=True, dropout_percentage=0.3, pool_size=(2,2)))
# 5th Convolution layer
model.add(give_convolution_layer(filters=1024, kernel_size=(3,3),
padding='same', use_bn=True, dropout_percentage=0.3, pool_size=(2,2)))
# Flattening
model.add(Flatten())
# Fully connected layer 1st layer
model.add(Dense(512, activation='relu', kernel_initializer='glorot_normal'))
model.add(BatchNormalization())
model.add(Dropout(0.2))
# Last layer
model.add(Dense(NUM_CLASSES, activation='softmax', kernel_initializer='glorot_normal'))
# Compile model
model.compile(optimizer=Adam(learning_rate=0.0001), loss='categorical_crossentropy', metrics=[categorical_accuracy])
# Print model summary
print(model.summary())
# Training the model
model.fit(X_train, y_train,
BATCH_SIZE=BATCH_SIZE,
EPOCHS=EPOCHS,
verbose=1,
validation_split=0.1111)
# Model will predict the probability values for 7 labels for a test image
test_output = model.predict(X_test)
new_X = np.argmax(test_output, axis=1)
y_test2 = np.argmax(test_output, axis=1)
# Calculating categorical accuracy taking label having highest probability
accuracy = [(x == y) for x, y in zip(new_X, y_test2)]
print("Accuracy on Test set : ", np.mean(accuracy))
| [
"numpy.mean",
"preprocess.preprocess_data",
"tensorflow.keras.layers.Dropout",
"utils.give_convolution_layer",
"numpy.argmax",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.mod... | [((579, 668), 'preprocess.preprocess_data', 'preprocess_data', ([], {'filename': '"""/content/drive/My Drive/fer2013.csv"""', 'image_size': 'IMG_SIZE'}), "(filename='/content/drive/My Drive/fer2013.csv', image_size=\n IMG_SIZE)\n", (594, 668), False, 'from preprocess import preprocess_data\n'), ((726, 738), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (736, 738), False, 'from tensorflow.keras.models import Sequential\n'), ((2375, 2405), 'numpy.argmax', 'np.argmax', (['test_output'], {'axis': '(1)'}), '(test_output, axis=1)\n', (2384, 2405), True, 'import numpy as np\n'), ((2416, 2446), 'numpy.argmax', 'np.argmax', (['test_output'], {'axis': '(1)'}), '(test_output, axis=1)\n', (2425, 2446), True, 'import numpy as np\n'), ((774, 905), 'utils.give_convolution_layer', 'give_convolution_layer', ([], {'filters': '(64)', 'kernel_size': '(3, 3)', 'padding': '"""same"""', 'use_bn': '(False)', 'dropout_percentage': 'None', 'pool_size': '(2, 2)'}), "(filters=64, kernel_size=(3, 3), padding='same',\n use_bn=False, dropout_percentage=None, pool_size=(2, 2))\n", (796, 905), False, 'from utils import give_convolution_layer\n'), ((948, 1078), 'utils.give_convolution_layer', 'give_convolution_layer', ([], {'filters': '(128)', 'kernel_size': '(3, 3)', 'padding': '"""same"""', 'use_bn': '(True)', 'dropout_percentage': '(0.3)', 'pool_size': '(2, 2)'}), "(filters=128, kernel_size=(3, 3), padding='same',\n use_bn=True, dropout_percentage=0.3, pool_size=(2, 2))\n", (970, 1078), False, 'from utils import give_convolution_layer\n'), ((1121, 1251), 'utils.give_convolution_layer', 'give_convolution_layer', ([], {'filters': '(256)', 'kernel_size': '(3, 3)', 'padding': '"""same"""', 'use_bn': '(True)', 'dropout_percentage': '(0.3)', 'pool_size': '(2, 2)'}), "(filters=256, kernel_size=(3, 3), padding='same',\n use_bn=True, dropout_percentage=0.3, pool_size=(2, 2))\n", (1143, 1251), False, 'from utils import give_convolution_layer\n'), ((1294, 1424), 'utils.give_convolution_layer', 'give_convolution_layer', ([], {'filters': '(512)', 'kernel_size': '(3, 3)', 'padding': '"""same"""', 'use_bn': '(True)', 'dropout_percentage': '(0.3)', 'pool_size': '(2, 2)'}), "(filters=512, kernel_size=(3, 3), padding='same',\n use_bn=True, dropout_percentage=0.3, pool_size=(2, 2))\n", (1316, 1424), False, 'from utils import give_convolution_layer\n'), ((1467, 1598), 'utils.give_convolution_layer', 'give_convolution_layer', ([], {'filters': '(1024)', 'kernel_size': '(3, 3)', 'padding': '"""same"""', 'use_bn': '(True)', 'dropout_percentage': '(0.3)', 'pool_size': '(2, 2)'}), "(filters=1024, kernel_size=(3, 3), padding='same',\n use_bn=True, dropout_percentage=0.3, pool_size=(2, 2))\n", (1489, 1598), False, 'from utils import give_convolution_layer\n'), ((1630, 1639), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (1637, 1639), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Flatten, Dropout\n'), ((1686, 1751), 'tensorflow.keras.layers.Dense', 'Dense', (['(512)'], {'activation': '"""relu"""', 'kernel_initializer': '"""glorot_normal"""'}), "(512, activation='relu', kernel_initializer='glorot_normal')\n", (1691, 1751), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Flatten, Dropout\n'), ((1763, 1783), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (1781, 1783), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Flatten, Dropout\n'), ((1795, 1807), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (1802, 1807), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Flatten, Dropout\n'), ((1833, 1909), 'tensorflow.keras.layers.Dense', 'Dense', (['NUM_CLASSES'], {'activation': '"""softmax"""', 'kernel_initializer': '"""glorot_normal"""'}), "(NUM_CLASSES, activation='softmax', kernel_initializer='glorot_normal')\n", (1838, 1909), False, 'from tensorflow.keras.layers import BatchNormalization, Dense, Flatten, Dropout\n'), ((2610, 2627), 'numpy.mean', 'np.mean', (['accuracy'], {}), '(accuracy)\n', (2617, 2627), True, 'import numpy as np\n'), ((1952, 1978), 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'learning_rate': '(0.0001)'}), '(learning_rate=0.0001)\n', (1956, 1978), False, 'from tensorflow.keras.optimizers import Adam\n')] |
import numpy as np
import healpy as hp
from .. import coords
from .. import randoms
from .. import utils
class Cube2Shell:
"""Remaps pixels given on 3D grid to spherical polar grid on a set of HEALPix
maps.
"""
def __init__(self):
"""Initialises the class."""
self.xedges = None
self.yedges = None
self.zedges = None
self.xmid = None
self.ymid = None
self.zmid = None
self.dx = None
self.dy = None
self.dz = None
self.x3d = None
self.y3d = None
self.z3d = None
self.redges = None
self.nside = None
self.rmid = None
self.dr = None
self.center = None
self.rebin_shell = None
self.rebin_r = None
self.centers = None
self.rebin_redges = None
self.rebin_rmid = None
def setup_cube(self, xmin, xmax, numx, ymin, ymax, numy, zmin, zmax, numz):
"""Setups the box grid.
Parameters
----------
xmin : float
Minimum x.
xmax : float
Maximum x.
numx : int
Number of bins along x-axis.
ymin : float
Minimum y.
ymax : float
Maximum y.
numy : int
Number of bins along y-axis.
"""
assert numx > 0, "numx must be greater than zero."
assert numy > 0, "numy must be greater than zero."
self.xedges = np.linspace(xmin, xmax, numx+1)
self.yedges = np.linspace(ymin, ymax, numy+1)
self.zedges = np.linspace(zmin, zmax, numz+1)
self.xmid = 0.5*(self.xedges[1:] + self.xedges[:-1])
self.ymid = 0.5*(self.yedges[1:] + self.yedges[:-1])
self.zmid = 0.5*(self.zedges[1:] + self.zedges[:-1])
self.dx = self.xedges[1] - self.xedges[0]
self.dy = self.yedges[1] - self.yedges[0]
self.dz = self.zedges[1] - self.zedges[0]
self.x3d, self.y3d, self.z3d = np.meshgrid(self.xmid, self.ymid, self.zmid)
def setup_polar(self, rmin, rmax, numr, nside, center=[0., 0., 0.], rebin_shell=2,
rebin_r=2, periodicx=[0, 0], periodicy=[0, 0], periodicz=[0, 0]):
"""Setups the polar grid.
Parameters
----------
rmin : float
Minimum r.
rmax : float
Maximum r.
numr : int
Number of bins along r-axis.
nside : int
Nside for healpix maps for each shell.
center : list
Center point of polar coordinate grid.
rebin_shell : int
Integer factor (a power of 2) for regriding the healpix shells to a
higher nside than desired. This is then downgraded to the desired nside.
rebin_r : int
Integer factor for regridding the r axis by a factor rebin_r. This is
then rebinned to the desired grid.
"""
assert rmin >= 0., "rmin must be greater or equal to zero."
assert rmin < rmax, "rmin must be smaller than rmax."
assert numr > 0, "numr must be greater than zero."
assert len(center) == 3, "center list must have length 3."
assert rebin_shell >= 1, "rebin_shell must be greater or equal to 1."
assert hp.isnsideok(nside) == True, "Incompatible nside."
assert hp.isnsideok(nside*rebin_shell) == True, "Incompatible shell_rebin must be power of 2."
assert rebin_r >= 1, "rebin_r must be greater or equal to 1."
self.redges = np.linspace(rmin, rmax, numr+1)
self.rmid = 0.5*(self.redges[1:] + self.redges[:-1])
self.dr = self.rmid[1] - self.rmid[0]
self.nside = nside
self.center = center
self.rebin_shell = rebin_shell
self.rebin_r = rebin_r
centers = []
for i in range(periodicx[0], periodicx[1]+1):
for j in range(periodicy[0], periodicy[1]+1):
for k in range(periodicz[0], periodicz[1]+1):
centers.append([center[0] + i*(self.xedges[-1]-self.xedges[0]),
center[1] + j*(self.yedges[-1]-self.yedges[0]),
center[2] + k*(self.zedges[-1]-self.zedges[0])])
self.centers = centers
self.rebin_redges = np.linspace(self.redges[0], self.redges[-1], self.rebin_r*len(self.rmid) + 1)
self.rebin_rmid = 0.5*(self.rebin_redges[1:] + self.rebin_redges[:-1])
def remap(self, f, verbose=True):
"""Remaps 3d grid data f onto spherical polar coordinate grid.
Parameters
----------
f : 3darray
3d pixel data.
verbose : bool
If true will output a progress bar.
Returns
-------
f_sphere : 2darray
Remapped 3d grid data on to spherical polar coordinates (in healpix shells).
"""
f_sphere = np.zeros((len(self.rebin_rmid), hp.nside2npix(self.nside)))
for i in range(0, len(self.rebin_rmid)):
f_shell_highres = np.zeros(hp.nside2npix(self.nside*self.rebin_shell))
pix = np.arange(hp.nside2npix(self.nside*self.rebin_shell))
theta, phi = hp.pix2ang(self.nside*self.rebin_shell, pix)
r = np.ones(len(phi))*self.rebin_rmid[i]
for j in range(0, len(self.centers)):
x, y, z = coords.sphere2cart(r, phi, theta, center=self.centers[j])
x -= self.xedges[0]
y -= self.yedges[0]
z -= self.zedges[0]
xind = x / self.dx
yind = y / self.dy
zind = z / self.dz
xind = np.floor(xind).astype(int)
yind = np.floor(yind).astype(int)
zind = np.floor(zind).astype(int)
condition = np.where((xind >= 0) & (xind < len(self.xmid)) & (yind >= 0) & (yind < len(self.ymid)) & (zind >= 0) & (zind < len(self.zmid)))[0]
f_shell_highres[pix[condition]] = f[xind[condition], yind[condition], zind[condition]]
f_sphere[i] = hp.ud_grade(f_shell_highres, self.nside)
if verbose == True:
utils.progress_bar(i, len(self.rebin_rmid), explanation='Remapping')
if verbose == True:
print('Downgrading to desired spherical polar coordinate grid...')
rbin_weights = self.rebin_redges[1:]**3 - self.rebin_redges[:-1]**3
f_sphere = np.array([f_sphere[i]*rbin_weights[i] for i in range(0, len(f_sphere))])
rbin_weights_sum = np.sum(rbin_weights.reshape(len(self.rmid), self.rebin_r), axis=1)
f_sphere = np.sum(f_sphere.reshape(len(self.rmid), self.rebin_r, hp.nside2npix(self.nside)), axis=1)
f_sphere = np.array([f_sphere[i]/rbin_weights_sum[i] for i in range(0, len(f_sphere))])
if verbose == True:
print('Done!')
return f_sphere
def clean(self):
"""Cleans by reinitialising the class."""
self.__init__()
| [
"healpy.isnsideok",
"numpy.floor",
"healpy.pix2ang",
"numpy.linspace",
"healpy.nside2npix",
"numpy.meshgrid",
"healpy.ud_grade"
] | [((1465, 1498), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', '(numx + 1)'], {}), '(xmin, xmax, numx + 1)\n', (1476, 1498), True, 'import numpy as np\n'), ((1519, 1552), 'numpy.linspace', 'np.linspace', (['ymin', 'ymax', '(numy + 1)'], {}), '(ymin, ymax, numy + 1)\n', (1530, 1552), True, 'import numpy as np\n'), ((1573, 1606), 'numpy.linspace', 'np.linspace', (['zmin', 'zmax', '(numz + 1)'], {}), '(zmin, zmax, numz + 1)\n', (1584, 1606), True, 'import numpy as np\n'), ((1977, 2021), 'numpy.meshgrid', 'np.meshgrid', (['self.xmid', 'self.ymid', 'self.zmid'], {}), '(self.xmid, self.ymid, self.zmid)\n', (1988, 2021), True, 'import numpy as np\n'), ((3511, 3544), 'numpy.linspace', 'np.linspace', (['rmin', 'rmax', '(numr + 1)'], {}), '(rmin, rmax, numr + 1)\n', (3522, 3544), True, 'import numpy as np\n'), ((3265, 3284), 'healpy.isnsideok', 'hp.isnsideok', (['nside'], {}), '(nside)\n', (3277, 3284), True, 'import healpy as hp\n'), ((3331, 3364), 'healpy.isnsideok', 'hp.isnsideok', (['(nside * rebin_shell)'], {}), '(nside * rebin_shell)\n', (3343, 3364), True, 'import healpy as hp\n'), ((5177, 5223), 'healpy.pix2ang', 'hp.pix2ang', (['(self.nside * self.rebin_shell)', 'pix'], {}), '(self.nside * self.rebin_shell, pix)\n', (5187, 5223), True, 'import healpy as hp\n'), ((6060, 6100), 'healpy.ud_grade', 'hp.ud_grade', (['f_shell_highres', 'self.nside'], {}), '(f_shell_highres, self.nside)\n', (6071, 6100), True, 'import healpy as hp\n'), ((4920, 4945), 'healpy.nside2npix', 'hp.nside2npix', (['self.nside'], {}), '(self.nside)\n', (4933, 4945), True, 'import healpy as hp\n'), ((5036, 5080), 'healpy.nside2npix', 'hp.nside2npix', (['(self.nside * self.rebin_shell)'], {}), '(self.nside * self.rebin_shell)\n', (5049, 5080), True, 'import healpy as hp\n'), ((5108, 5152), 'healpy.nside2npix', 'hp.nside2npix', (['(self.nside * self.rebin_shell)'], {}), '(self.nside * self.rebin_shell)\n', (5121, 5152), True, 'import healpy as hp\n'), ((6660, 6685), 'healpy.nside2npix', 'hp.nside2npix', (['self.nside'], {}), '(self.nside)\n', (6673, 6685), True, 'import healpy as hp\n'), ((5645, 5659), 'numpy.floor', 'np.floor', (['xind'], {}), '(xind)\n', (5653, 5659), True, 'import numpy as np\n'), ((5695, 5709), 'numpy.floor', 'np.floor', (['yind'], {}), '(yind)\n', (5703, 5709), True, 'import numpy as np\n'), ((5745, 5759), 'numpy.floor', 'np.floor', (['zind'], {}), '(zind)\n', (5753, 5759), True, 'import numpy as np\n')] |
import math
import numpy as np
from itmlogic.aknfe import aknfe
from itmlogic.fht import fht
def adiff(d, prop):
"""
Returns adiff1 given input parameters. All parameters may not be needed.
The diffraction region is beyond the smooth-earth horizon and short of where troposhpheric
scatter takes over. It is an essential region and associated coefficients must be computed.
The function adiff finds the 'diffraction attenuation' at the distance d, using a convex
combination of smooth earth diffraction and double knife-edge diffraction. A call with
d = 0 sets up initial constants.
Parameters
----------
d : float
Distance in meters.
prop : dict
Contains all input propagation parameters
Returns
-------
adiff1 : float
Returns the estimated diffraction attenuation.
prop : dict
Contains all input and output propagation parameters.
"""
third = 1 / 3
if d == 0:
q = prop['hg'][0] * prop['hg'][1]
prop['qk'] = prop['he'][0] * prop['he'][1] - q
if prop['mdp'] < 0:
q = q + 10
prop['wd1'] = math.sqrt(1 + prop['qk'] / q)
prop['xd1'] = prop['dla'] + prop['tha'] / prop['gme']
q = (1 - 0.8 * math.exp(-prop['dlsa'] / 50e3)) * prop['dh']
q = 0.78 * q * math.exp(-(q / 16) ** 0.25)
prop['afo'] = (
min(15, 2.171 * np.log(1 + 4.77e-4 * prop['hg'][0]
* prop['hg'][1] * prop['wn'] * q))
)
prop['qk'] = 1 / abs(prop['zgnd'])
prop['aht'] = 20
prop['xht'] = 0
for j in range(0, 2):
a = 0.5 * prop['dl'][j] ** 2 / prop['he'][j]
wa = (a * prop['wn']) ** third
pk = prop['qk'] / wa
q = (1.607 - pk) * 151.0 * wa * prop['dl'][j] / a
prop['xht'] = prop['xht'] + q
prop['aht'] = prop['aht'] + fht(q, pk)
adiff1 = 0
else:
th = prop['tha'] + d * prop['gme']
ds = d - prop['dla']
q = 0.0795775 * prop['wn'] * ds * th**2
adiff1 = aknfe(q * prop['dl'][0] /
(ds + prop['dl'][0])) + aknfe(q * prop['dl'][1] /
(ds + prop['dl'][1]))
a = ds / th
wa = (a * prop['wn']) ** third
pk = prop['qk'] / wa
q = (1.607 - pk) * 151.0 * wa * th + prop['xht']
ar = 0.05751 * q - 4.343 * np.log(q) - prop['aht']
q = (
(prop['wd1'] + prop['xd1'] / d) *
min(((1 - 0.8 * math.exp(-d / 50e3)) *
prop['dh'] * prop['wn']), 6283.2)
)
wd = 25.1 / (25.1 + math.sqrt(q))
adiff1 = ar * wd + (1 - wd) * adiff1 + prop['afo']
return adiff1, prop
| [
"numpy.log",
"math.sqrt",
"itmlogic.fht.fht",
"math.exp",
"itmlogic.aknfe.aknfe"
] | [((1146, 1175), 'math.sqrt', 'math.sqrt', (["(1 + prop['qk'] / q)"], {}), "(1 + prop['qk'] / q)\n", (1155, 1175), False, 'import math\n'), ((1330, 1357), 'math.exp', 'math.exp', (['(-(q / 16) ** 0.25)'], {}), '(-(q / 16) ** 0.25)\n', (1338, 1357), False, 'import math\n'), ((2093, 2140), 'itmlogic.aknfe.aknfe', 'aknfe', (["(q * prop['dl'][0] / (ds + prop['dl'][0]))"], {}), "(q * prop['dl'][0] / (ds + prop['dl'][0]))\n", (2098, 2140), False, 'from itmlogic.aknfe import aknfe\n'), ((2155, 2202), 'itmlogic.aknfe.aknfe', 'aknfe', (["(q * prop['dl'][1] / (ds + prop['dl'][1]))"], {}), "(q * prop['dl'][1] / (ds + prop['dl'][1]))\n", (2160, 2202), False, 'from itmlogic.aknfe import aknfe\n'), ((1411, 1480), 'numpy.log', 'np.log', (["(1 + 0.000477 * prop['hg'][0] * prop['hg'][1] * prop['wn'] * q)"], {}), "(1 + 0.000477 * prop['hg'][0] * prop['hg'][1] * prop['wn'] * q)\n", (1417, 1480), True, 'import numpy as np\n'), ((1910, 1920), 'itmlogic.fht.fht', 'fht', (['q', 'pk'], {}), '(q, pk)\n', (1913, 1920), False, 'from itmlogic.fht import fht\n'), ((2624, 2636), 'math.sqrt', 'math.sqrt', (['q'], {}), '(q)\n', (2633, 2636), False, 'import math\n'), ((1262, 1295), 'math.exp', 'math.exp', (["(-prop['dlsa'] / 50000.0)"], {}), "(-prop['dlsa'] / 50000.0)\n", (1270, 1295), False, 'import math\n'), ((2399, 2408), 'numpy.log', 'np.log', (['q'], {}), '(q)\n', (2405, 2408), True, 'import numpy as np\n'), ((2512, 2534), 'math.exp', 'math.exp', (['(-d / 50000.0)'], {}), '(-d / 50000.0)\n', (2520, 2534), False, 'import math\n')] |
import numpy as np
import os
from IPython.core.display import HTML
from ipywidgets import widgets
from IPython.core.display import display
from NeuNorm.normalization import Normalization
from __code.ipywe import fileselector
from __code import utilities, file_handler
class BinHandler(object):
working_dir = ''
images_ui = None
data = []
list_file_names = []
image_dimension = {'height': np.NaN,
'width': np.NaN}
def __init__(self, working_dir=''):
self.working_dir = working_dir
self.output_folder_ui = None
def select_images(self):
_instruction = 'Select images to bin'
self.images_ui = fileselector.FileSelectorPanel(instruction=_instruction,
start_dir=self.working_dir,
multiple=True,
next=self.load)
self.images_ui.show()
def get_list_images(self):
return self.images_ui.selected
def load(self, list_images):
#list_images = self.get_list_images()
self.list_file_names = list_images
self.o_norm = Normalization()
self.o_norm.load(file=list_images, notebook=True)
self.data = self.o_norm.data['sample']['data']
self.__calculate_image_dimension()
def __calculate_image_dimension(self):
_image_0 = self.data[0]
[self.image_dimension['height'], self.image_dimension['width']] = np.shape(_image_0)
def __bin_parameter_changed(self, sender):
new_bin = np.int(self.bin_para.value)
self.bin_value = new_bin
old_width = self.image_dimension['width']
old_height = self.image_dimension['height']
new_width = np.int(old_width / new_bin)
new_height = np.int(old_height / new_bin)
self.right_widgets.children[1].value = "Width: {} pixels".format(new_width)
self.right_widgets.children[2].value = "Height: {} pixels".format(new_height)
def select_bin_parameter(self):
_width = self.image_dimension['width']
_height = self.image_dimension['height']
left_widgets = widgets.VBox([widgets.HTML(value="<b>Current Image Size:</b>",
layout=widgets.Layout(width='250px')),
widgets.Label("Width: {} pixels".format(_width),
layout=widgets.Layout(width='100%')),
widgets.Label("Height: {} pixels".format(_height),
layout=widgets.Layout(width='100%'))])
options_list = [str(_) for _ in np.arange(2, 21)]
self.bin_para = widgets.Dropdown(options=options_list,
value='2',
continuous_update=False,
layout=widgets.Layout(width='50%'))
self.bin_para.observe(self.__bin_parameter_changed)
center_widgets = widgets.VBox([widgets.HTML("<b>Bin Parameter:</b>",
layout=widgets.Layout(width='250px')),
self.bin_para])
self.right_widgets = widgets.VBox([widgets.HTML("<b>New Image Size:</b>",
layout=widgets.Layout(width='250px')),
widgets.Label("Width: {} pixels".format(250),
layout=widgets.Layout(width='100%')),
widgets.Label("Height: {} pixels".format(250),
layout=widgets.Layout(width='100%'))])
self.__bin_parameter_changed(None)
full_widget = widgets.HBox([left_widgets,
center_widgets,
self.right_widgets])
display(full_widget)
def select_export_folder(self):
self.output_folder_ui = fileselector.FileSelectorPanel(instruction='Select Output Folder',
start_dir=self.working_dir,
multiple=False,
next=self.export,
type='directory')
self.output_folder_ui.show()
def rebin_data(self, data=[]):
bin = self.bin_value
height = self.image_dimension['height']
width = self.image_dimension['width']
# checking if last bin size match other bins
new_height = height
_nbr_height_bin = int(np.floor(height / bin))
if not (np.mod(height, bin) == 0):
new_height = int(_nbr_height_bin * bin)
new_height = int(new_height)
new_width = width
_nbr_width_bin = int(np.floor(width / bin))
if not (np.mod(width, bin) == 0):
new_width = int(_nbr_width_bin * bin)
new_width = int(new_width)
_new_data = data[0: new_height, 0: new_width]
_new_data = _new_data.reshape(_nbr_height_bin, bin, _nbr_width_bin, bin)
data_rebinned = _new_data.mean(axis=3).mean(axis=1)
return data_rebinned
def get_input_folder(self):
list_files = self.list_file_names
_file0 = list_files[0]
full_dir_name = os.path.dirname(_file0)
return os.path.basename(full_dir_name)
def export(self, output_folder):
input_folder = self.get_input_folder()
# output_folder = os.path.abspath(os.path.join(self.output_folder_ui.selected,
# "{}_rebin_by_{}".format(input_folder, self.bin_value)))
output_folder = os.path.abspath(os.path.join(output_folder, "{}_rebin_by_{}".format(input_folder, self.bin_value)))
utilities.make_dir(dir=output_folder, overwrite=False)
w = widgets.IntProgress()
w.max = len(self.list_file_names)
display(w)
for _index, _file in enumerate(self.list_file_names):
basename = os.path.basename(_file)
_base, _ext = os.path.splitext(basename)
output_file_name = os.path.join(output_folder, _base + '.tiff')
_rebin_data = self.rebin_data(self.data[_index])
file_handler.make_tiff(filename=output_file_name, data=_rebin_data)
w.value = _index + 1
display(HTML('<span style="font-size: 20px; color:blue">File created in ' + \
output_folder + '</span>'))
| [
"IPython.core.display.display",
"IPython.core.display.HTML",
"NeuNorm.normalization.Normalization",
"__code.utilities.make_dir",
"numpy.floor",
"os.path.splitext",
"os.path.join",
"os.path.dirname",
"numpy.mod",
"ipywidgets.widgets.IntProgress",
"os.path.basename",
"ipywidgets.widgets.HBox",
... | [((679, 799), '__code.ipywe.fileselector.FileSelectorPanel', 'fileselector.FileSelectorPanel', ([], {'instruction': '_instruction', 'start_dir': 'self.working_dir', 'multiple': '(True)', 'next': 'self.load'}), '(instruction=_instruction, start_dir=self.\n working_dir, multiple=True, next=self.load)\n', (709, 799), False, 'from __code.ipywe import fileselector\n'), ((1227, 1242), 'NeuNorm.normalization.Normalization', 'Normalization', ([], {}), '()\n', (1240, 1242), False, 'from NeuNorm.normalization import Normalization\n'), ((1549, 1567), 'numpy.shape', 'np.shape', (['_image_0'], {}), '(_image_0)\n', (1557, 1567), True, 'import numpy as np\n'), ((1635, 1662), 'numpy.int', 'np.int', (['self.bin_para.value'], {}), '(self.bin_para.value)\n', (1641, 1662), True, 'import numpy as np\n'), ((1820, 1847), 'numpy.int', 'np.int', (['(old_width / new_bin)'], {}), '(old_width / new_bin)\n', (1826, 1847), True, 'import numpy as np\n'), ((1869, 1897), 'numpy.int', 'np.int', (['(old_height / new_bin)'], {}), '(old_height / new_bin)\n', (1875, 1897), True, 'import numpy as np\n'), ((3907, 3971), 'ipywidgets.widgets.HBox', 'widgets.HBox', (['[left_widgets, center_widgets, self.right_widgets]'], {}), '([left_widgets, center_widgets, self.right_widgets])\n', (3919, 3971), False, 'from ipywidgets import widgets\n'), ((4053, 4073), 'IPython.core.display.display', 'display', (['full_widget'], {}), '(full_widget)\n', (4060, 4073), False, 'from IPython.core.display import display\n'), ((4144, 4299), '__code.ipywe.fileselector.FileSelectorPanel', 'fileselector.FileSelectorPanel', ([], {'instruction': '"""Select Output Folder"""', 'start_dir': 'self.working_dir', 'multiple': '(False)', 'next': 'self.export', 'type': '"""directory"""'}), "(instruction='Select Output Folder',\n start_dir=self.working_dir, multiple=False, next=self.export, type=\n 'directory')\n", (4174, 4299), False, 'from __code.ipywe import fileselector\n'), ((5595, 5618), 'os.path.dirname', 'os.path.dirname', (['_file0'], {}), '(_file0)\n', (5610, 5618), False, 'import os\n'), ((5634, 5665), 'os.path.basename', 'os.path.basename', (['full_dir_name'], {}), '(full_dir_name)\n', (5650, 5665), False, 'import os\n'), ((6082, 6136), '__code.utilities.make_dir', 'utilities.make_dir', ([], {'dir': 'output_folder', 'overwrite': '(False)'}), '(dir=output_folder, overwrite=False)\n', (6100, 6136), False, 'from __code import utilities, file_handler\n'), ((6150, 6171), 'ipywidgets.widgets.IntProgress', 'widgets.IntProgress', ([], {}), '()\n', (6169, 6171), False, 'from ipywidgets import widgets\n'), ((6222, 6232), 'IPython.core.display.display', 'display', (['w'], {}), '(w)\n', (6229, 6232), False, 'from IPython.core.display import display\n'), ((4877, 4899), 'numpy.floor', 'np.floor', (['(height / bin)'], {}), '(height / bin)\n', (4885, 4899), True, 'import numpy as np\n'), ((5089, 5110), 'numpy.floor', 'np.floor', (['(width / bin)'], {}), '(width / bin)\n', (5097, 5110), True, 'import numpy as np\n'), ((6319, 6342), 'os.path.basename', 'os.path.basename', (['_file'], {}), '(_file)\n', (6335, 6342), False, 'import os\n'), ((6369, 6395), 'os.path.splitext', 'os.path.splitext', (['basename'], {}), '(basename)\n', (6385, 6395), False, 'import os\n'), ((6427, 6471), 'os.path.join', 'os.path.join', (['output_folder', "(_base + '.tiff')"], {}), "(output_folder, _base + '.tiff')\n", (6439, 6471), False, 'import os\n'), ((6545, 6612), '__code.file_handler.make_tiff', 'file_handler.make_tiff', ([], {'filename': 'output_file_name', 'data': '_rebin_data'}), '(filename=output_file_name, data=_rebin_data)\n', (6567, 6612), False, 'from __code import utilities, file_handler\n'), ((6664, 6762), 'IPython.core.display.HTML', 'HTML', (['(\'<span style="font-size: 20px; color:blue">File created in \' +\n output_folder + \'</span>\')'], {}), '(\'<span style="font-size: 20px; color:blue">File created in \' +\n output_folder + \'</span>\')\n', (6668, 6762), False, 'from IPython.core.display import HTML\n'), ((2772, 2788), 'numpy.arange', 'np.arange', (['(2)', '(21)'], {}), '(2, 21)\n', (2781, 2788), True, 'import numpy as np\n'), ((3004, 3031), 'ipywidgets.widgets.Layout', 'widgets.Layout', ([], {'width': '"""50%"""'}), "(width='50%')\n", (3018, 3031), False, 'from ipywidgets import widgets\n'), ((4917, 4936), 'numpy.mod', 'np.mod', (['height', 'bin'], {}), '(height, bin)\n', (4923, 4936), True, 'import numpy as np\n'), ((5128, 5146), 'numpy.mod', 'np.mod', (['width', 'bin'], {}), '(width, bin)\n', (5134, 5146), True, 'import numpy as np\n'), ((2346, 2375), 'ipywidgets.widgets.Layout', 'widgets.Layout', ([], {'width': '"""250px"""'}), "(width='250px')\n", (2360, 2375), False, 'from ipywidgets import widgets\n'), ((2522, 2550), 'ipywidgets.widgets.Layout', 'widgets.Layout', ([], {'width': '"""100%"""'}), "(width='100%')\n", (2536, 2550), False, 'from ipywidgets import widgets\n'), ((2699, 2727), 'ipywidgets.widgets.Layout', 'widgets.Layout', ([], {'width': '"""100%"""'}), "(width='100%')\n", (2713, 2727), False, 'from ipywidgets import widgets\n'), ((3230, 3259), 'ipywidgets.widgets.Layout', 'widgets.Layout', ([], {'width': '"""250px"""'}), "(width='250px')\n", (3244, 3259), False, 'from ipywidgets import widgets\n'), ((3458, 3487), 'ipywidgets.widgets.Layout', 'widgets.Layout', ([], {'width': '"""250px"""'}), "(width='250px')\n", (3472, 3487), False, 'from ipywidgets import widgets\n'), ((3633, 3661), 'ipywidgets.widgets.Layout', 'widgets.Layout', ([], {'width': '"""100%"""'}), "(width='100%')\n", (3647, 3661), False, 'from ipywidgets import widgets\n'), ((3808, 3836), 'ipywidgets.widgets.Layout', 'widgets.Layout', ([], {'width': '"""100%"""'}), "(width='100%')\n", (3822, 3836), False, 'from ipywidgets import widgets\n')] |
#%% Change working directory from the workspace root to the ipynb file location. Turn this addition off with the DataScience.changeDirOnImportExport setting
# ms-python.python added
import os
try:
os.chdir(os.path.join(os.getcwd(), '1_introduction/w1_python_fundamentals/1_week1'))
print(os.getcwd())
except:
pass
#%% [markdown]
# ---
#
# _You are currently looking at **version 1.0** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-data-analysis/resources/0dhYG) course resource._
#
# ---
#%% [markdown]
# # The Python Programming Language: Functions
#%%
x = 1
y = 2
x + y
#%%
x
#%% [markdown]
# <br>
# `add_numbers` is a function that takes two numbers and adds them together.
#%%
def add_numbers(x, y):
return x + y
add_numbers(1, 2)
#%% [markdown]
# <br>
# `add_numbers` updated to take an optional 3rd parameter. Using `print` allows printing of multiple expressions within a single cell.
#%%
def add_numbers(x,y,z=None):
if (z==None):
return x+y
else:
return x+y+z
print(add_numbers(1, 2))
print(add_numbers(1, 2, 3))
#%% [markdown]
# <br>
# `add_numbers` updated to take an optional flag parameter.
#%%
def add_numbers(x, y, z=None, flag=False):
if (flag):
print('Flag is true!')
if (z==None):
return x + y
else:
return x + y + z
print(add_numbers(1, 2, flag=True))
#%% [markdown]
# <br>
# Assign function `add_numbers` to variable `a`.
#%%
def add_numbers(x,y):
return x+y
a = add_numbers
a(1,2)
#%% [markdown]
# <br>
# # The Python Programming Language: Types and Sequences
#%% [markdown]
# <br>
# Use `type` to return the object's type.
#%%
type('This is a string')
#%%
type(None)
#%%
type(1)
#%%
type(1.0)
#%%
type(add_numbers)
#%% [markdown]
# <br>
# Tuples are an immutable data structure (cannot be altered).
#%%
x = (1, 'a', 2, 'b')
type(x)
#%% [markdown]
# <br>
# Lists are a mutable data structure.
#%%
x = [1, 'a', 2, 'b']
type(x)
#%% [markdown]
# <br>
# Use `append` to append an object to a list.
#%%
x.append(3.3)
print(x)
#%% [markdown]
# <br>
# This is an example of how to loop through each item in the list.
#%%
for item in x:
print(item)
#%% [markdown]
# <br>
# Or using the indexing operator:
#%%
i=0
while( i != len(x) ):
print(x[i])
i = i + 1
#%% [markdown]
# <br>
# Use `+` to concatenate lists.
#%%
[1,2] + [3,4]
#%% [markdown]
# <br>
# Use `*` to repeat lists.
#%%
[1]*3
#%% [markdown]
# <br>
# Use the `in` operator to check if something is inside a list.
#%%
1 in [1, 2, 3]
#%% [markdown]
# <br>
# Now let's look at strings. Use bracket notation to slice a string.
#%%
x = 'This is a string'
print(x[0]) #first character
print(x[0:1]) #first character, but we have explicitly set the end character
print(x[0:2]) #first two characters
#%% [markdown]
# <br>
# This will return the last element of the string.
#%%
x[-1]
#%% [markdown]
# <br>
# This will return the slice starting from the 4th element from the end and stopping before the 2nd element from the end.
#%%
x[-4:-2]
#%% [markdown]
# <br>
# This is a slice from the beginning of the string and stopping before the 3rd element.
#%%
x[:3]
#%% [markdown]
# <br>
# And this is a slice starting from the 3rd element of the string and going all the way to the end.
#%%
x[3:]
#%%
firstname = 'Christopher'
lastname = 'Brooks'
print(firstname + ' ' + lastname)
print(firstname*3)
print('Chris' in firstname)
#%% [markdown]
# <br>
# `split` returns a list of all the words in a string, or a list split on a specific character.
#%%
firstname = '<NAME>'.split(' ')[0] # [0] selects the first element of the list
lastname = '<NAME>'.split(' ')[-1] # [-1] selects the last element of the list
print(firstname)
print(lastname)
#%% [markdown]
# <br>
# Make sure you convert objects to strings before concatenating.
#%%
'Chris' + 2
#%%
'Chris' + str(2)
#%% [markdown]
# <br>
# Dictionaries associate keys with values.
#%%
x = {'<NAME>': '<EMAIL>', '<NAME>': '<EMAIL>'}
x['<NAME>'] # Retrieve a value by using the indexing operator
#%%
x['<NAME>'] = None
x['<NAME>']
#%% [markdown]
# <br>
# Iterate over all of the keys:
#%%
for name in x:
print(x[name])
#%% [markdown]
# <br>
# Iterate over all of the values:
#%%
for email in x.values():
print(email)
#%% [markdown]
# <br>
# Iterate over all of the items in the list:
#%%
for name, email in x.items():
print(name)
print(email)
#%% [markdown]
# <br>
# You can unpack a sequence into different variables:
#%%
x = ('Christopher', 'Brooks', '<EMAIL>')
fname, lname, email = x
#%%
fname
#%%
lname
#%% [markdown]
# <br>
# Make sure the number of values you are unpacking matches the number of variables being assigned.
#%%
x = ('Christopher', 'Brooks', '<EMAIL>', '<NAME>')
fname, lname, email = x
#%% [markdown]
# <br>
# # The Python Programming Language: More on Strings
#%%
print('Chris' + 2)
#%%
print('Chris' + str(2))
#%% [markdown]
# <br>
# Python has a built in method for convenient string formatting.
#%%
sales_record = {
'price': 3.24,
'num_items': 4,
'person': 'Chris'}
sales_statement = '{} bought {} item(s) at a price of {} each for a total of {}'
print(sales_statement.format(sales_record['person'],
sales_record['num_items'],
sales_record['price'],
sales_record['num_items']*sales_record['price']))
#%% [markdown]
# <br>
# # Reading and Writing CSV files
#%% [markdown]
# <br>
# Let's import our datafile mpg.csv, which contains fuel economy data for 234 cars.
#%%
import csv
get_ipython().run_line_magic('precision', '2')
with open('mpg.csv') as csvfile:
mpg = list(csv.DictReader(csvfile))
mpg[:3] # The first three dictionaries in our list.
#%% [markdown]
# <br>
# `csv.Dictreader` has read in each row of our csv file as a dictionary. `len` shows that our list is comprised of 234 dictionaries.
#%%
len(mpg)
#%% [markdown]
# <br>
# `keys` gives us the column names of our csv.
#%%
mpg[0].keys()
#%% [markdown]
# <br>
# This is how to find the average cty fuel economy across all cars. All values in the dictionaries are strings, so we need to convert to float.
#%%
sum(float(d['cty']) for d in mpg) / len(mpg)
#%% [markdown]
# <br>
# Similarly this is how to find the average hwy fuel economy across all cars.
#%%
sum(float(d['hwy']) for d in mpg) / len(mpg)
#%% [markdown]
# <br>
# Use `set` to return the unique values for the number of cylinders the cars in our dataset have.
#%%
cylinders = set(d['cyl'] for d in mpg)
cylinders
#%% [markdown]
# <br>
# Here's a more complex example where we are grouping the cars by number of cylinder, and finding the average cty mpg for each group.
#%%
CtyMpgByCyl = []
for c in cylinders: # iterate over all the cylinder levels
summpg = 0
cyltypecount = 0
for d in mpg: # iterate over all dictionaries
if d['cyl'] == c: # if the cylinder level type matches,
summpg += float(d['cty']) # add the cty mpg
cyltypecount += 1 # increment the count
CtyMpgByCyl.append((c, summpg / cyltypecount)) # append the tuple ('cylinder', 'avg mpg')
CtyMpgByCyl.sort(key=lambda x: x[0])
CtyMpgByCyl
#%% [markdown]
# <br>
# Use `set` to return the unique values for the class types in our dataset.
#%%
vehicleclass = set(d['class'] for d in mpg) # what are the class types
vehicleclass
#%% [markdown]
# <br>
# And here's an example of how to find the average hwy mpg for each class of vehicle in our dataset.
#%%
HwyMpgByClass = []
for t in vehicleclass: # iterate over all the vehicle classes
summpg = 0
vclasscount = 0
for d in mpg: # iterate over all dictionaries
if d['class'] == t: # if the cylinder amount type matches,
summpg += float(d['hwy']) # add the hwy mpg
vclasscount += 1 # increment the count
HwyMpgByClass.append((t, summpg / vclasscount)) # append the tuple ('class', 'avg mpg')
HwyMpgByClass.sort(key=lambda x: x[1])
HwyMpgByClass
#%% [markdown]
# <br>
# # The Python Programming Language: Dates and Times
#%%
import datetime as dt
import time as tm
#%% [markdown]
# <br>
# `time` returns the current time in seconds since the Epoch. (January 1st, 1970)
#%%
tm.time()
#%% [markdown]
# <br>
# Convert the timestamp to datetime.
#%%
dtnow = dt.datetime.fromtimestamp(tm.time())
dtnow
#%% [markdown]
# <br>
# Handy datetime attributes:
#%%
dtnow.year, dtnow.month, dtnow.day, dtnow.hour, dtnow.minute, dtnow.second # get year, month, day, etc.from a datetime
#%% [markdown]
# <br>
# `timedelta` is a duration expressing the difference between two dates.
#%%
delta = dt.timedelta(days = 100) # create a timedelta of 100 days
delta
#%% [markdown]
# <br>
# `date.today` returns the current local date.
#%%
today = dt.date.today()
#%%
today - delta # the date 100 days ago
#%%
today > today-delta # compare dates
#%% [markdown]
# <br>
# # The Python Programming Language: Objects and map()
#%% [markdown]
# <br>
# An example of a class in python:
#%%
class Person:
department = 'School of Information' #a class variable
def set_name(self, new_name): #a method
self.name = new_name
def set_location(self, new_location):
self.location = new_location
#%%
person = Person()
person.set_name('<NAME>')
person.set_location('Ann Arbor, MI, USA')
print('{} live in {} and works in the department {}'.format(person.name, person.location, person.department))
#%% [markdown]
# <br>
# Here's an example of mapping the `min` function between two lists.
#%%
store1 = [10.00, 11.00, 12.34, 2.34]
store2 = [9.00, 11.10, 12.34, 2.01]
cheapest = map(min, store1, store2)
cheapest
#%% [markdown]
# <br>
# Now let's iterate through the map object to see the values.
#%%
for item in cheapest:
print(item)
#%% [markdown]
# <br>
# # The Python Programming Language: Lambda and List Comprehensions
#%% [markdown]
# <br>
# Here's an example of lambda that takes in three parameters and adds the first two.
#%%
my_function = lambda a, b, c : a + b
#%%
my_function(1, 2, 3)
#%% [markdown]
# <br>
# Let's iterate from 0 to 999 and return the even numbers.
#%%
my_list = []
for number in range(0, 1000):
if number % 2 == 0:
my_list.append(number)
my_list
#%% [markdown]
# <br>
# Now the same thing but with list comprehension.
#%%
my_list = [number for number in range(0,1000) if number % 2 == 0]
my_list
#%% [markdown]
# <br>
# # The Python Programming Language: Numerical Python (NumPy)
#%%
import numpy as np
#%% [markdown]
# <br>
# ## Creating Arrays
#%% [markdown]
# Create a list and convert it to a numpy array
#%%
mylist = [1, 2, 3]
x = np.array(mylist)
x
#%% [markdown]
# <br>
# Or just pass in a list directly
#%%
y = np.array([4, 5, 6])
y
#%% [markdown]
# <br>
# Pass in a list of lists to create a multidimensional array.
#%%
m = np.array([[7, 8, 9], [10, 11, 12]])
m
#%% [markdown]
# <br>
# Use the shape method to find the dimensions of the array. (rows, columns)
#%%
m.shape
#%% [markdown]
# <br>
# `arange` returns evenly spaced values within a given interval.
#%%
n = np.arange(0, 30, 2) # start at 0 count up by 2, stop before 30
n
#%% [markdown]
# <br>
# `reshape` returns an array with the same data with a new shape.
#%%
n = n.reshape(3, 5) # reshape array to be 3x5
n
#%% [markdown]
# <br>
# `linspace` returns evenly spaced numbers over a specified interval.
#%%
o = np.linspace(0, 4, 9) # return 9 evenly spaced values from 0 to 4
o
#%% [markdown]
# <br>
# `resize` changes the shape and size of array in-place.
#%%
o.resize(3, 3)
o
#%% [markdown]
# <br>
# `ones` returns a new array of given shape and type, filled with ones.
#%%
np.ones((3, 2))
#%% [markdown]
# <br>
# `zeros` returns a new array of given shape and type, filled with zeros.
#%%
np.zeros((2, 3))
#%% [markdown]
# <br>
# `eye` returns a 2-D array with ones on the diagonal and zeros elsewhere.
#%%
np.eye(3)
#%% [markdown]
# <br>
# `diag` extracts a diagonal or constructs a diagonal array.
#%%
np.diag(y)
#%% [markdown]
# <br>
# Create an array using repeating list (or see `np.tile`)
#%%
np.array([1, 2, 3] * 3)
#%% [markdown]
# <br>
# Repeat elements of an array using `repeat`.
#%%
np.repeat([1, 2, 3], 3)
#%% [markdown]
# <br>
# #### Combining Arrays
#%%
p = np.ones([2, 3], int)
p
#%% [markdown]
# <br>
# Use `vstack` to stack arrays in sequence vertically (row wise).
#%%
np.vstack([p, 2*p])
#%% [markdown]
# <br>
# Use `hstack` to stack arrays in sequence horizontally (column wise).
#%%
np.hstack([p, 2*p])
#%% [markdown]
# <br>
# ## Operations
#%% [markdown]
# Use `+`, `-`, `*`, `/` and `**` to perform element wise addition, subtraction, multiplication, division and power.
#%%
print(x + y) # elementwise addition [1 2 3] + [4 5 6] = [5 7 9]
print(x - y) # elementwise subtraction [1 2 3] - [4 5 6] = [-3 -3 -3]
#%%
print(x * y) # elementwise multiplication [1 2 3] * [4 5 6] = [4 10 18]
print(x / y) # elementwise divison [1 2 3] / [4 5 6] = [0.25 0.4 0.5]
#%%
print(x**2) # elementwise power [1 2 3] ^2 = [1 4 9]
#%% [markdown]
# <br>
# **Dot Product:**
#
# $ \begin{bmatrix}x_1 \ x_2 \ x_3\end{bmatrix}
# \cdot
# \begin{bmatrix}y_1 \\ y_2 \\ y_3\end{bmatrix}
# = x_1 y_1 + x_2 y_2 + x_3 y_3$
#%%
x.dot(y) # dot product 1*4 + 2*5 + 3*6
#%%
z = np.array([y, y**2])
print(len(z)) # number of rows of array
#%% [markdown]
# <br>
# Let's look at transposing arrays. Transposing permutes the dimensions of the array.
#%%
z = np.array([y, y**2])
z
#%% [markdown]
# <br>
# The shape of array `z` is `(2,3)` before transposing.
#%%
z.shape
#%% [markdown]
# <br>
# Use `.T` to get the transpose.
#%%
z.T
#%% [markdown]
# <br>
# The number of rows has swapped with the number of columns.
#%%
z.T.shape
#%% [markdown]
# <br>
# Use `.dtype` to see the data type of the elements in the array.
#%%
z.dtype
#%% [markdown]
# <br>
# Use `.astype` to cast to a specific type.
#%%
z = z.astype('f')
z.dtype
#%% [markdown]
# <br>
# ## Math Functions
#%% [markdown]
# Numpy has many built in math functions that can be performed on arrays.
#%%
a = np.array([-4, -2, 1, 3, 5])
#%%
a.sum()
#%%
a.max()
#%%
a.min()
#%%
a.mean()
#%%
a.std()
#%% [markdown]
# <br>
# `argmax` and `argmin` return the index of the maximum and minimum values in the array.
#%%
a.argmax()
#%%
a.argmin()
#%% [markdown]
# <br>
# ## Indexing / Slicing
#%%
s = np.arange(13)**2
s
#%% [markdown]
# <br>
# Use bracket notation to get the value at a specific index. Remember that indexing starts at 0.
#%%
s[0], s[4], s[-1]
#%% [markdown]
# <br>
# Use `:` to indicate a range. `array[start:stop]`
#
#
# Leaving `start` or `stop` empty will default to the beginning/end of the array.
#%%
s[1:5]
#%% [markdown]
# <br>
# Use negatives to count from the back.
#%%
s[-4:]
#%% [markdown]
# <br>
# A second `:` can be used to indicate step-size. `array[start:stop:stepsize]`
#
# Here we are starting 5th element from the end, and counting backwards by 2 until the beginning of the array is reached.
#%%
s[-5::-2]
#%% [markdown]
# <br>
# Let's look at a multidimensional array.
#%%
r = np.arange(36)
r.resize((6, 6))
r
#%% [markdown]
# <br>
# Use bracket notation to slice: `array[row, column]`
#%%
r[2, 2]
#%% [markdown]
# <br>
# And use : to select a range of rows or columns
#%%
r[3, 3:6]
#%% [markdown]
# <br>
# Here we are selecting all the rows up to (and not including) row 2, and all the columns up to (and not including) the last column.
#%%
r[:2, :-1]
#%% [markdown]
# <br>
# This is a slice of the last row, and only every other element.
#%%
r[-1, ::2]
#%% [markdown]
# <br>
# We can also perform conditional indexing. Here we are selecting values from the array that are greater than 30. (Also see `np.where`)
#%%
r[r > 30]
#%% [markdown]
# <br>
# Here we are assigning all values in the array that are greater than 30 to the value of 30.
#%%
r[r > 30] = 30
r
#%% [markdown]
# <br>
# ## Copying Data
#%% [markdown]
# Be careful with copying and modifying arrays in NumPy!
#
#
# `r2` is a slice of `r`
#%%
r2 = r[:3,:3]
r2
#%% [markdown]
# <br>
# Set this slice's values to zero ([:] selects the entire array)
#%%
r2[:] = 0
r2
#%% [markdown]
# <br>
# `r` has also been changed!
#%%
r
#%% [markdown]
# <br>
# To avoid this, use `r.copy` to create a copy that will not affect the original array
#%%
r_copy = r.copy()
r_copy
#%% [markdown]
# <br>
# Now when r_copy is modified, r will not be changed.
#%%
r_copy[:] = 10
print(r_copy, '\n')
print(r)
#%% [markdown]
# <br>
# ### Iterating Over Arrays
#%% [markdown]
# Let's create a new 4 by 3 array of random numbers 0-9.
#%%
test = np.random.randint(0, 10, (4,3))
test
#%% [markdown]
# <br>
# Iterate by row:
#%%
for row in test:
print(row)
#%% [markdown]
# <br>
# Iterate by index:
#%%
for i in range(len(test)):
print(test[i])
#%% [markdown]
# <br>
# Iterate by row and index:
#%%
for i, row in enumerate(test):
print('row', i, 'is', row)
#%% [markdown]
# <br>
# Use `zip` to iterate over multiple iterables.
#%%
test2 = test**2
test2
#%%
for i, j in zip(test, test2):
print(i,'+',j,'=',i+j)
| [
"numpy.eye",
"csv.DictReader",
"numpy.repeat",
"numpy.ones",
"numpy.hstack",
"datetime.date.today",
"numpy.diag",
"os.getcwd",
"numpy.array",
"numpy.linspace",
"numpy.zeros",
"numpy.random.randint",
"numpy.vstack",
"datetime.timedelta",
"time.time",
"numpy.arange"
] | [((8395, 8404), 'time.time', 'tm.time', ([], {}), '()\n', (8402, 8404), True, 'import time as tm\n'), ((8806, 8828), 'datetime.timedelta', 'dt.timedelta', ([], {'days': '(100)'}), '(days=100)\n', (8818, 8828), True, 'import datetime as dt\n'), ((8953, 8968), 'datetime.date.today', 'dt.date.today', ([], {}), '()\n', (8966, 8968), True, 'import datetime as dt\n'), ((10822, 10838), 'numpy.array', 'np.array', (['mylist'], {}), '(mylist)\n', (10830, 10838), True, 'import numpy as np\n'), ((10907, 10926), 'numpy.array', 'np.array', (['[4, 5, 6]'], {}), '([4, 5, 6])\n', (10915, 10926), True, 'import numpy as np\n'), ((11023, 11058), 'numpy.array', 'np.array', (['[[7, 8, 9], [10, 11, 12]]'], {}), '([[7, 8, 9], [10, 11, 12]])\n', (11031, 11058), True, 'import numpy as np\n'), ((11270, 11289), 'numpy.arange', 'np.arange', (['(0)', '(30)', '(2)'], {}), '(0, 30, 2)\n', (11279, 11289), True, 'import numpy as np\n'), ((11579, 11599), 'numpy.linspace', 'np.linspace', (['(0)', '(4)', '(9)'], {}), '(0, 4, 9)\n', (11590, 11599), True, 'import numpy as np\n'), ((11848, 11863), 'numpy.ones', 'np.ones', (['(3, 2)'], {}), '((3, 2))\n', (11855, 11863), True, 'import numpy as np\n'), ((11966, 11982), 'numpy.zeros', 'np.zeros', (['(2, 3)'], {}), '((2, 3))\n', (11974, 11982), True, 'import numpy as np\n'), ((12086, 12095), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (12092, 12095), True, 'import numpy as np\n'), ((12185, 12195), 'numpy.diag', 'np.diag', (['y'], {}), '(y)\n', (12192, 12195), True, 'import numpy as np\n'), ((12282, 12305), 'numpy.array', 'np.array', (['([1, 2, 3] * 3)'], {}), '([1, 2, 3] * 3)\n', (12290, 12305), True, 'import numpy as np\n'), ((12380, 12403), 'numpy.repeat', 'np.repeat', (['[1, 2, 3]', '(3)'], {}), '([1, 2, 3], 3)\n', (12389, 12403), True, 'import numpy as np\n'), ((12460, 12480), 'numpy.ones', 'np.ones', (['[2, 3]', 'int'], {}), '([2, 3], int)\n', (12467, 12480), True, 'import numpy as np\n'), ((12577, 12598), 'numpy.vstack', 'np.vstack', (['[p, 2 * p]'], {}), '([p, 2 * p])\n', (12586, 12598), True, 'import numpy as np\n'), ((12696, 12717), 'numpy.hstack', 'np.hstack', (['[p, 2 * p]'], {}), '([p, 2 * p])\n', (12705, 12717), True, 'import numpy as np\n'), ((13494, 13515), 'numpy.array', 'np.array', (['[y, y ** 2]'], {}), '([y, y ** 2])\n', (13502, 13515), True, 'import numpy as np\n'), ((13672, 13693), 'numpy.array', 'np.array', (['[y, y ** 2]'], {}), '([y, y ** 2])\n', (13680, 13693), True, 'import numpy as np\n'), ((14291, 14318), 'numpy.array', 'np.array', (['[-4, -2, 1, 3, 5]'], {}), '([-4, -2, 1, 3, 5])\n', (14299, 14318), True, 'import numpy as np\n'), ((15318, 15331), 'numpy.arange', 'np.arange', (['(36)'], {}), '(36)\n', (15327, 15331), True, 'import numpy as np\n'), ((16848, 16880), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)', '(4, 3)'], {}), '(0, 10, (4, 3))\n', (16865, 16880), True, 'import numpy as np\n'), ((8504, 8513), 'time.time', 'tm.time', ([], {}), '()\n', (8511, 8513), True, 'import time as tm\n'), ((14591, 14604), 'numpy.arange', 'np.arange', (['(13)'], {}), '(13)\n', (14600, 14604), True, 'import numpy as np\n'), ((290, 301), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (299, 301), False, 'import os\n'), ((5835, 5858), 'csv.DictReader', 'csv.DictReader', (['csvfile'], {}), '(csvfile)\n', (5849, 5858), False, 'import csv\n'), ((220, 231), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (229, 231), False, 'import os\n')] |
import os
import numpy as np
import pytest
from capreolus.benchmark.robust04 import Robust04Benchmark
from capreolus.collection import Collection
from capreolus.extractor.berttext import BertText
from capreolus.searcher.bm25 import BM25Grid
from capreolus.tests.common_fixtures import trec_index, dummy_collection_config
def test_transform_qid_posdocid_negdocid(monkeypatch, tmpdir, trec_index, dummy_collection_config):
collection = Collection(dummy_collection_config)
pipeline_config = {
"indexstops": True,
"maxthreads": 1,
"stemmer": "anserini",
"bmax": 0.2,
"k1max": 0.2,
"maxqlen": 5,
"maxdoclen": 10,
"keepstops": True,
"rundocsonly": False,
}
bm25_run = BM25Grid(trec_index, collection, os.path.join(tmpdir, "searcher"), pipeline_config)
bm25_run.create()
folds = {"s1": {"train_qids": ["301"], "predict": {"dev": ["301"], "test": ["301"]}}}
benchmark = Robust04Benchmark(bm25_run, collection, pipeline_config)
benchmark.create_and_store_train_and_pred_pairs(folds)
feature = BertText(tmpdir, tmpdir, pipeline_config, index=trec_index, collection=collection, benchmark=benchmark)
feature.build_from_benchmark()
transformed = feature.transform_qid_posdocid_negdocid("301", "LA010189-0001", "LA010189-0001")
assert np.array_equal(
transformed["postoks"],
[101, 24369, 9986, 0, 0, 0, 102, 24369, 24369, 24369, 7592, 2088, 1010, 14806, 2015, 2013, 6058, 102],
)
assert np.array_equal(transformed["posmask"], [1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
assert np.array_equal(transformed["possegs"], [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
assert np.array_equal(transformed["posqmask"], [1, 1, 0, 0, 0])
assert np.array_equal(transformed["posdmask"], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
assert np.array_equal(
transformed["negtoks"],
[101, 24369, 9986, 0, 0, 0, 102, 24369, 24369, 24369, 7592, 2088, 1010, 14806, 2015, 2013, 6058, 102],
)
assert np.array_equal(transformed["negmask"], [1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
assert np.array_equal(transformed["negsegs"], [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
assert np.array_equal(transformed["negqmask"], [1, 1, 0, 0, 0])
assert np.array_equal(transformed["negdmask"], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
assert transformed["posdocid"] == "LA010189-0001"
assert transformed["negdocid"] == "LA010189-0001"
assert transformed["qid"] == "301"
| [
"capreolus.collection.Collection",
"capreolus.extractor.berttext.BertText",
"os.path.join",
"numpy.array_equal",
"capreolus.benchmark.robust04.Robust04Benchmark"
] | [((441, 476), 'capreolus.collection.Collection', 'Collection', (['dummy_collection_config'], {}), '(dummy_collection_config)\n', (451, 476), False, 'from capreolus.collection import Collection\n'), ((965, 1021), 'capreolus.benchmark.robust04.Robust04Benchmark', 'Robust04Benchmark', (['bm25_run', 'collection', 'pipeline_config'], {}), '(bm25_run, collection, pipeline_config)\n', (982, 1021), False, 'from capreolus.benchmark.robust04 import Robust04Benchmark\n'), ((1096, 1204), 'capreolus.extractor.berttext.BertText', 'BertText', (['tmpdir', 'tmpdir', 'pipeline_config'], {'index': 'trec_index', 'collection': 'collection', 'benchmark': 'benchmark'}), '(tmpdir, tmpdir, pipeline_config, index=trec_index, collection=\n collection, benchmark=benchmark)\n', (1104, 1204), False, 'from capreolus.extractor.berttext import BertText\n'), ((1346, 1492), 'numpy.array_equal', 'np.array_equal', (["transformed['postoks']", '[101, 24369, 9986, 0, 0, 0, 102, 24369, 24369, 24369, 7592, 2088, 1010, \n 14806, 2015, 2013, 6058, 102]'], {}), "(transformed['postoks'], [101, 24369, 9986, 0, 0, 0, 102, \n 24369, 24369, 24369, 7592, 2088, 1010, 14806, 2015, 2013, 6058, 102])\n", (1360, 1492), True, 'import numpy as np\n'), ((1522, 1620), 'numpy.array_equal', 'np.array_equal', (["transformed['posmask']", '[1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]'], {}), "(transformed['posmask'], [1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1])\n", (1536, 1620), True, 'import numpy as np\n'), ((1628, 1726), 'numpy.array_equal', 'np.array_equal', (["transformed['possegs']", '[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]'], {}), "(transformed['possegs'], [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1])\n", (1642, 1726), True, 'import numpy as np\n'), ((1734, 1790), 'numpy.array_equal', 'np.array_equal', (["transformed['posqmask']", '[1, 1, 0, 0, 0]'], {}), "(transformed['posqmask'], [1, 1, 0, 0, 0])\n", (1748, 1790), True, 'import numpy as np\n'), ((1802, 1873), 'numpy.array_equal', 'np.array_equal', (["transformed['posdmask']", '[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]'], {}), "(transformed['posdmask'], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1])\n", (1816, 1873), True, 'import numpy as np\n'), ((1886, 2032), 'numpy.array_equal', 'np.array_equal', (["transformed['negtoks']", '[101, 24369, 9986, 0, 0, 0, 102, 24369, 24369, 24369, 7592, 2088, 1010, \n 14806, 2015, 2013, 6058, 102]'], {}), "(transformed['negtoks'], [101, 24369, 9986, 0, 0, 0, 102, \n 24369, 24369, 24369, 7592, 2088, 1010, 14806, 2015, 2013, 6058, 102])\n", (1900, 2032), True, 'import numpy as np\n'), ((2062, 2160), 'numpy.array_equal', 'np.array_equal', (["transformed['negmask']", '[1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]'], {}), "(transformed['negmask'], [1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1])\n", (2076, 2160), True, 'import numpy as np\n'), ((2168, 2266), 'numpy.array_equal', 'np.array_equal', (["transformed['negsegs']", '[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]'], {}), "(transformed['negsegs'], [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1])\n", (2182, 2266), True, 'import numpy as np\n'), ((2274, 2330), 'numpy.array_equal', 'np.array_equal', (["transformed['negqmask']", '[1, 1, 0, 0, 0]'], {}), "(transformed['negqmask'], [1, 1, 0, 0, 0])\n", (2288, 2330), True, 'import numpy as np\n'), ((2342, 2413), 'numpy.array_equal', 'np.array_equal', (["transformed['negdmask']", '[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]'], {}), "(transformed['negdmask'], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1])\n", (2356, 2413), True, 'import numpy as np\n'), ((786, 818), 'os.path.join', 'os.path.join', (['tmpdir', '"""searcher"""'], {}), "(tmpdir, 'searcher')\n", (798, 818), False, 'import os\n')] |
import numpy as np
from iaf.model import FCDecoder
def test_call():
n_h = 100
n_out = 256
model = FCDecoder(n_h, n_out)
n_batch = 16
n_z = 32
z = np.random.randn(n_batch, n_z).astype('f')
y = model(z)
assert y.shape == (n_batch, n_out)
| [
"numpy.random.randn",
"iaf.model.FCDecoder"
] | [((112, 133), 'iaf.model.FCDecoder', 'FCDecoder', (['n_h', 'n_out'], {}), '(n_h, n_out)\n', (121, 133), False, 'from iaf.model import FCDecoder\n'), ((173, 202), 'numpy.random.randn', 'np.random.randn', (['n_batch', 'n_z'], {}), '(n_batch, n_z)\n', (188, 202), True, 'import numpy as np\n')] |
import copy
import numpy as np
from hexrd.utils.decorators import memoize
def test_memoize():
# This will only be set to true if memoization did not happen
modified = False
def was_memoized():
# Get the state, and reset it
nonlocal modified
ret = not modified
modified = False
return ret
@memoize
def run(*args, **kwargs):
nonlocal modified
modified = True
#### Basic tests #### noqa
run(1, 3, var=10)
assert not was_memoized()
run(1, 3, var=10)
assert was_memoized()
run(1, 4, var=10)
assert not was_memoized()
run(1, 3, var=11)
assert not was_memoized()
run(1, 3, var=10)
assert was_memoized()
run(3, 1, var=10)
assert not was_memoized()
run(1, var1=3, var2=5)
assert not was_memoized()
run(1, var1=3, var2=5)
assert was_memoized()
run(1, var2=5, var1=3)
assert was_memoized()
run(1, var1=3, var2=6)
assert not was_memoized()
#### Test numpy arrays #### noqa
array1 = np.arange(6).reshape(2, 3)
array2 = copy.deepcopy(array1)
array3 = np.arange(9)
run(array1, array=array2)
assert not was_memoized()
run(array1, array=array2)
assert was_memoized()
# Arrays are identical
run(array2, array=array1)
assert was_memoized()
# Array 3 is different
run(array2, array=array3)
assert not was_memoized()
run(array2, array=array2)
assert was_memoized()
run(array1, array2)
assert not was_memoized()
run(array1, array2)
assert was_memoized()
# Show that it won't memoize if modified
array1[0][0] = 3
run(array1, array2)
assert not was_memoized()
# Modify it back and show that it is still memoized
array1[0][0] = 0
run(array1, array2)
assert was_memoized()
# It won't memoize if the shape is changed either
run(array1, array2)
assert was_memoized()
run(array1, array2.reshape(3, 2))
assert not was_memoized()
run(array1, array2.reshape(2, 3))
assert was_memoized()
#### Test lists and dicts #### noqa
list1 = [1, 2, 3]
list2 = copy.deepcopy(list1)
list3 = [5, 9, 8]
dict1 = {'key1': 4, 'key2': 3}
dict2 = copy.deepcopy(dict1)
dict3 = {'key4': 1, 'key3': 2}
run(list1, list2, dict1, dict2, kwarg=dict2)
assert not was_memoized()
run(list1, list2, dict1, dict2, kwarg=dict2)
assert was_memoized()
run(list2, list1, dict2, dict1, kwarg=dict1)
assert was_memoized()
run(list1, list3, dict1, dict2, kwarg=dict2)
assert not was_memoized()
run(list1, list2, dict1, dict2, kwarg=dict3)
assert not was_memoized()
dict2['key2'] = 4
run(list1, list2, dict1, dict2, kwarg=dict2)
assert not was_memoized()
dict2['key2'] = 3
run(list1, list2, dict1, dict2, kwarg=dict2)
assert was_memoized()
| [
"numpy.arange",
"copy.deepcopy"
] | [((1096, 1117), 'copy.deepcopy', 'copy.deepcopy', (['array1'], {}), '(array1)\n', (1109, 1117), False, 'import copy\n'), ((1131, 1143), 'numpy.arange', 'np.arange', (['(9)'], {}), '(9)\n', (1140, 1143), True, 'import numpy as np\n'), ((2160, 2180), 'copy.deepcopy', 'copy.deepcopy', (['list1'], {}), '(list1)\n', (2173, 2180), False, 'import copy\n'), ((2251, 2271), 'copy.deepcopy', 'copy.deepcopy', (['dict1'], {}), '(dict1)\n', (2264, 2271), False, 'import copy\n'), ((1056, 1068), 'numpy.arange', 'np.arange', (['(6)'], {}), '(6)\n', (1065, 1068), True, 'import numpy as np\n')] |
import ast
import os
import sys
import argparse
from tqdm import tqdm
import requests
import pandas as pd
import numpy as np
from sklearn.model_selection import StratifiedShuffleSplit
np.random.seed(0)
tqdm.pandas()
sys.path.insert(0, "./src")
from config import config
MAX_ID_IN_DATASET = config.MAX_ID_IN_DATASET
def download_data(data_url, raw_data_path, force_download=False):
if not os.path.isfile(raw_data_path) or force_download:
print("downloading - this might take up to 2 minutes.")
r = requests.get(data_url, allow_redirects=True)
with open(raw_data_path, "wb") as f:
f.write(r.content)
def train_test_split(
raw_data_path,
train_data_path,
test_data_path,
max_id=MAX_ID_IN_DATASET,
turn_into_matches=True,
):
"""
Separate 20% of ratings from 100% of users.
:param nrows: use None for all
:return:
"""
assert max_id <= MAX_ID_IN_DATASET
print("Train-Test splitting")
ratings = pd.read_csv(raw_data_path, names=["rater", "rated", "r"])
date = ratings[ratings["rater"] <= max_id]
# set train/test split
splitter = StratifiedShuffleSplit(n_splits=1, test_size=0.1, random_state=0)
splitter = splitter.split(date, date["rater"])
train_idx, test_idx = list(splitter)[0]
train = date.iloc[train_idx].set_index("rater")
test = date.iloc[test_idx].set_index("rater")
# get the quantile for the raters
quantiles = (
date.groupby(date["rater"])["r"]
.progress_apply(lambda x: np.quantile(x, q=config.match_threshold))
.to_frame()
.reset_index()
) # df with rater as index and quantile as value
quantiles.columns = ["rater", "r_quantile"]
# apply to get matches
train = pd.merge(
train, quantiles, left_on=train.index, right_on=quantiles.rater
).drop(columns=["key_0"])
train["m"] = 1.0 * (train["r"] >= train["r_quantile"])
test = pd.merge(test, quantiles, left_on=test.index, right_on=quantiles.rater).drop(
columns=["key_0"]
)
test["m"] = 1.0 * (test["r"] >= test["r_quantile"])
train = train[["rater", "rated", "m"]]
test = test[["rater", "rated", "m"]]
# save
train_data_path.parent.mkdir(parents=True, exist_ok=True)
test_data_path.parent.mkdir(parents=True, exist_ok=True)
train.to_csv(train_data_path, index=False)
test.to_csv(test_data_path, index=False)
return train, test
def matches_to_matches_triplet(data_path, output_path):
data = pd.read_csv(data_path, dtype={"rated": str}) # rater, rated, m
A_col, B_col, m_col = data.columns
# keep only matches
print(f"N : {data.shape[0]}")
data = data[data[m_col] > 0]
print(f"N : {data.shape[0]} (matches only)")
# group rated id into a set
data = data.groupby(A_col)[[B_col]].agg(list)
data.to_csv(output_path, index=False)
def load_d2v_formated_data(data_path):
df = pd.read_csv(data_path)
df = df["rated"].map(ast.literal_eval)
return df
def list_shuffler(x):
np.random.shuffle(x)
return x
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--max_id",
help="Whether to load saved weight or to start learning from scratch",
default=MAX_ID_IN_DATASET,
)
parser = parser.parse_args()
return parser
def main():
args = get_args()
# download the data if not downloaded already
download_data(config.raw_data_url, config.raw_data_path)
# Keep last x% of ratings for each rater as test set.
print(f"Processing N={args.max_id} records.")
train_test_split(
config.raw_data_path,
config.train_data_path,
config.test_data_path,
max_id=int(args.max_id),
)
matches_to_matches_triplet(config.train_data_path, config.d2v_train_data_path)
matches_to_matches_triplet(config.test_data_path, config.d2v_test_data_path)
if __name__ == "__main__":
main()
| [
"sklearn.model_selection.StratifiedShuffleSplit",
"sys.path.insert",
"pandas.read_csv",
"argparse.ArgumentParser",
"pandas.merge",
"requests.get",
"os.path.isfile",
"numpy.quantile",
"numpy.random.seed",
"tqdm.tqdm.pandas",
"numpy.random.shuffle"
] | [((187, 204), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (201, 204), True, 'import numpy as np\n'), ((205, 218), 'tqdm.tqdm.pandas', 'tqdm.pandas', ([], {}), '()\n', (216, 218), False, 'from tqdm import tqdm\n'), ((219, 246), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""./src"""'], {}), "(0, './src')\n", (234, 246), False, 'import sys\n'), ((988, 1045), 'pandas.read_csv', 'pd.read_csv', (['raw_data_path'], {'names': "['rater', 'rated', 'r']"}), "(raw_data_path, names=['rater', 'rated', 'r'])\n", (999, 1045), True, 'import pandas as pd\n'), ((1136, 1201), 'sklearn.model_selection.StratifiedShuffleSplit', 'StratifiedShuffleSplit', ([], {'n_splits': '(1)', 'test_size': '(0.1)', 'random_state': '(0)'}), '(n_splits=1, test_size=0.1, random_state=0)\n', (1158, 1201), False, 'from sklearn.model_selection import StratifiedShuffleSplit\n'), ((2512, 2556), 'pandas.read_csv', 'pd.read_csv', (['data_path'], {'dtype': "{'rated': str}"}), "(data_path, dtype={'rated': str})\n", (2523, 2556), True, 'import pandas as pd\n'), ((2929, 2951), 'pandas.read_csv', 'pd.read_csv', (['data_path'], {}), '(data_path)\n', (2940, 2951), True, 'import pandas as pd\n'), ((3037, 3057), 'numpy.random.shuffle', 'np.random.shuffle', (['x'], {}), '(x)\n', (3054, 3057), True, 'import numpy as np\n'), ((3102, 3127), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3125, 3127), False, 'import argparse\n'), ((523, 567), 'requests.get', 'requests.get', (['data_url'], {'allow_redirects': '(True)'}), '(data_url, allow_redirects=True)\n', (535, 567), False, 'import requests\n'), ((398, 427), 'os.path.isfile', 'os.path.isfile', (['raw_data_path'], {}), '(raw_data_path)\n', (412, 427), False, 'import os\n'), ((1758, 1831), 'pandas.merge', 'pd.merge', (['train', 'quantiles'], {'left_on': 'train.index', 'right_on': 'quantiles.rater'}), '(train, quantiles, left_on=train.index, right_on=quantiles.rater)\n', (1766, 1831), True, 'import pandas as pd\n'), ((1941, 2012), 'pandas.merge', 'pd.merge', (['test', 'quantiles'], {'left_on': 'test.index', 'right_on': 'quantiles.rater'}), '(test, quantiles, left_on=test.index, right_on=quantiles.rater)\n', (1949, 2012), True, 'import pandas as pd\n'), ((1531, 1571), 'numpy.quantile', 'np.quantile', (['x'], {'q': 'config.match_threshold'}), '(x, q=config.match_threshold)\n', (1542, 1571), True, 'import numpy as np\n')] |
#!/GPFS/zhangli_lab_permanent/zhuqingjie/env/py3_tf2/bin/python
'''
@Time : 20/07/21 下午 03:25
@Author : zhuqingjie
@User : zhu
@FileName: analysis.py
@Software: PyCharm
'''
import pickle
import warnings
from pathlib import Path
import cv2
import math
import numpy as np
import pandas as pd
from easyFlyTracker.src_code.Camera_Calibration import Undistortion
from easyFlyTracker.src_code.utils import NumpyArrayHasNanValuesExceptin
from easyFlyTracker.src_code.utils import Pbar, equalizeHist_use_mask
from easyFlyTracker.src_code.kernel_density_estimation import get_KernelDensity
warnings.filterwarnings("ignore")
class Analysis():
'''
分析实验结果,
这里计算出来的结果涉及到长度的单位都是像素,比例尺在后续分析中使用,在这统一不使用比例尺。
'''
__doc__ = 'ana'
def __init__(
self,
video_path, # 视频路径
output_dir, # 输出文件夹
roi_flys_flag,
area_th=0.5, # 内圈面积占比
roi_flys_ids=None,
ana_time_duration=10., # 分析移动距离的时候每个值需要统计的时间跨度
sleep_time_duration=10., # 统计睡眠信息的时候每个值需要统计的时间跨度
angle_time_duration=10, # 统计角度变化信息的时候每个值需要统计的时间跨度
sleep_dist_th_per_second=5,
sleep_time_th=300, # 每秒睡眠状态持续多久算是真正的睡眠
Undistortion_model_path=None, # 畸变矫正参数路径
):
# 初始化各种文件夹及路径
self.video_path = Path(video_path)
self.res_dir = Path(output_dir) # 保存用户需要的结果
self.cache_dir = Path(self.res_dir, '.cache') # 保存程序计算的中间结果
self.saved_dir = Path(self.cache_dir, 'analysis_result') # analysis计算出的结果
self.npy_file_path = Path(self.cache_dir, f'track.npy')
self.npy_file_path_cor = Path(self.cache_dir, f'track_cor.npy')
self.move_direction_pre_frame_path = Path(self.saved_dir, 'move_direction_pre_frame.npy')
self.fly_angles_cor_path = Path(self.saved_dir, 'fly_angles_cor.npy')
self.speeds_npy = Path(self.saved_dir, 'all_fly_speeds_per_frame.npy')
self.dist_npy = Path(self.saved_dir, 'all_fly_dist_per_frame.npy')
# self.angle_changes_path = Path(self.saved_dir, 'angle_changes.npy')
config_pkl_path = Path(self.res_dir, 'config.pkl')
self.cache_dir.mkdir(exist_ok=True)
self.saved_dir.mkdir(exist_ok=True)
self.Undistortion_model_path = Undistortion_model_path
# load cps and radius
config_pk = np.array(pickle.load(open(config_pkl_path, 'rb'))[0])
self.cps = config_pk[:, :2]
self.dish_radius = int(round(float(np.mean(config_pk[:, -1]))))
self.mask_imgs = np.load(Path(self.cache_dir, 'mask_imgs.npy'))
self.mask_imgs = self.mask_imgs.astype(np.bool)
self.roi_flys_flag = roi_flys_flag
self.ana_time_duration = ana_time_duration
self.sleep_time_duration = sleep_time_duration
self.angle_time_duration = angle_time_duration
self.sleep_dist_th_per_second = sleep_dist_th_per_second
self.sleep_time_th = sleep_time_th
self.region_radius = int(round(math.sqrt(area_th) * self.dish_radius))
cap = cv2.VideoCapture(str(video_path))
self.fps = round(cap.get(cv2.CAP_PROP_FPS))
self.video_frames_num = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
cap.release()
# 果蝇总数,这个结果是综合了roi_flys_mask_arry和Dish_exclude后的结果
if roi_flys_ids == None:
self.roi_flys_list = np.array([True] * len(self.cps))
else:
self.roi_flys_list = np.array([False] * len(self.cps))
self.roi_flys_list[roi_flys_ids] = True
self.roi_flys_id = [i for i, r in enumerate(self.roi_flys_list) if r]
self.roi_flys_nubs = self.roi_flys_list.sum()
# 初始化加载某些数据
self._get_all_res()
self._get_speed_perframe_dist_perframe()
# load heatmap
heatmap_path = Path(self.cache_dir, 'heatmap.npy')
self.heatmap = np.load(heatmap_path)
def _get_all_res(self):
if self.npy_file_path_cor.exists():
self.all_datas = np.load(self.npy_file_path_cor)
else:
res = np.load(self.npy_file_path)
self.all_datas = np.transpose(res, [1, 0, 2])
self._cor()
np.save(self.npy_file_path_cor, self.all_datas)
def _cor(self):
def _correction(l):
'''
对一个向量进行校正,以下规则:
1,不含有-1,不作处理直接return;
2,含有-1,使用线性插值去掉-1。
:param l:
:return:
'''
# l: 一个int或者float类型的数值list,形如[-1, -1, 88, 90, -1, -1, -1, 100],其中-1为异常点
if not (np.array(l) == -1).any(): # 不含有-1直接return
return l
# 因为pandas的方法不能对前面的坑进行插值,所以先把前面的坑补全了
if l[0] < 0:
for i in range(len(l)):
if l[i] > 0:
l[:i] = l[i]
break
l = np.where(l < 0, np.nan, l)
df = pd.DataFrame(data=l)
df.interpolate(method="linear", inplace=True)
return df.values[:, 0]
def correction2D(ps):
return list(zip(_correction(ps[:, 0]), _correction(ps[:, 1])))
res = []
for ps in self.all_datas:
# 判断是不是空盘(空盘值全部为(-1,-1)),空盘直接返回
if ps.min() == -1 and ps.max() == -1:
res.append(ps)
else:
res.append(correction2D(ps))
res = np.array(res)
if np.isnan(res).sum() != 0:
raise NumpyArrayHasNanValuesExceptin(res)
self.all_datas = res
def _get_speed_perframe_dist_perframe(self, redo=False):
if self.speeds_npy.exists() and self.dist_npy.exists() and not redo:
self.all_fly_speeds_per_frame = np.load(self.speeds_npy)
self.all_fly_dist_per_frame = np.load(self.dist_npy)
return
fn = lambda x, y: math.sqrt(pow(x, 2) + pow(y, 2))
fn2 = lambda ps, k: fn(ps[k][0] - ps[k + 1][0], # 两点之间的距离
ps[k][1] - ps[k + 1][1])
all_fly_speeds = [] # 长度等于帧数
all_fly_displacement = [] # 长度等于帧数减一
mperframe = 1 / self.fps
for fly in self.all_datas:
# if not exc:
# all_fly_displacement.append([0] * (self.all_datas.shape[1] - 1))
# all_fly_speeds.append([0] * self.all_datas.shape[1])
# continue
ds = [fn2(fly, i) for i in range(len(fly) - 1)]
all_fly_displacement.append(ds)
ds = [ds[0]] + ds + [ds[-1]]
speed = [(ds[i] + ds[i + 1]) / (2 * mperframe) for i in range(len(ds) - 1)]
all_fly_speeds.append(speed)
if np.isnan(all_fly_speeds).sum() != 0:
raise NumpyArrayHasNanValuesExceptin(all_fly_speeds)
if np.isnan(all_fly_displacement).sum() != 0:
raise NumpyArrayHasNanValuesExceptin(all_fly_displacement)
np.save(self.speeds_npy, all_fly_speeds)
np.save(self.dist_npy, all_fly_displacement)
self.all_fly_speeds_per_frame = np.array(all_fly_speeds)
self.all_fly_dist_per_frame = np.array(all_fly_displacement)
def PARAM_speed_displacement(self, redo=False):
'''
计算两个参数:
time_duration_stat_speed:10分钟总速度/帧数/果蝇个数;
time_duration_stat_displacement:10分钟总位移/果蝇个数
:return: 返回npy路径
'''
speed_npy = Path(self.saved_dir, f'speed_per_duration_{self.roi_flys_flag}.npy')
disp_npy = Path(self.saved_dir, f'displacement_per_duration_{self.roi_flys_flag}.npy')
if speed_npy.exists() and disp_npy.exists() and not redo:
return speed_npy, disp_npy
duration_frames = int(round(self.ana_time_duration * 60 * self.fps))
frame_start_ind = list(range(0, self.all_datas.shape[1], duration_frames))
all_fly_speeds = self.all_fly_speeds_per_frame * \
np.tile(self.roi_flys_list[:, np.newaxis],
(1, self.all_fly_speeds_per_frame.shape[1]))
all_fly_displacement = self.all_fly_dist_per_frame * \
np.tile(self.roi_flys_list[:, np.newaxis],
(1, self.all_fly_dist_per_frame.shape[1]))
# res = []
# for ind in frame_start_ind:
# x = np.sum(self.all_fly_dist_per_frame[:, ind:ind + duration_frames], axis=1)
# res.append(x)
# res = np.stack(res, axis=-1)
# res = res * 0.26876426270157516
# np.save(r'Z:\dataset\qususu\ceshishipin\v080\output_72hole_0330_v080\plot_images\qudashen.npy', res)
# df = pd.DataFrame(data=res)
# df.to_excel(r'Z:\dataset\qususu\ceshishipin\v080\output_72hole_0330_v080\plot_images\qudashen.xlsx')
# exit()
time_duration_stat_speed = [] # 10分钟总速度/帧数/果蝇个数
time_duration_stat_displacement = [] # 10分钟总位移/果蝇个数
for ind in frame_start_ind:
time_duration_stat_speed.append(
all_fly_speeds[:, ind:ind + duration_frames].sum() / duration_frames / self.roi_flys_nubs)
time_duration_stat_displacement.append(
all_fly_displacement[:, ind:ind + duration_frames].sum() / self.roi_flys_nubs)
np.save(speed_npy, time_duration_stat_speed)
np.save(disp_npy, time_duration_stat_displacement)
return speed_npy, disp_npy
def PARAM_dist_per_h(self):
'''
每小时的移动总距离/果蝇数量
:return: list
'''
self._get_speed_perframe_dist_perframe()
fps = self.fps
dist_per_h = []
da = self.all_fly_dist_per_frame * \
np.tile(self.roi_flys_list[:, np.newaxis],
(1, self.all_fly_dist_per_frame.shape[1]))
duration_frames = int(round(fps * 60 * 60))
for i in range(0, da.shape[1], duration_frames):
dist_per_h.append(np.sum(da[:, i:i + duration_frames]) / self.roi_flys_nubs)
return dist_per_h
def PARAM_sleep_status(self, redo=False):
'''
首先计算每一秒的睡眠状态,然后计算统计时间段(sleep_time_duration)内果蝇的总睡眠时长,计算方式为:
所有果蝇该时间段的总睡眠时长/果蝇数量
返回保存的npy路径
:param redo:
:return:
'''
npy_path = Path(self.saved_dir, f'sleep_time_per_duration_{self.roi_flys_flag}.npy')
if npy_path.exists() and not redo:
return str(npy_path)
cache_all_sleep_status_path = Path(self.cache_dir, 'all_sleep_status.npy')
def get_all_sleep_status(self):
if cache_all_sleep_status_path.exists():
return np.load(cache_all_sleep_status_path)
self._get_speed_perframe_dist_perframe()
fps = self.fps
all_dist_per_s = []
for i in range(self.all_fly_dist_per_frame.shape[0]):
dist_per_s = []
for j in range(0, self.all_fly_dist_per_frame.shape[1], fps):
dist_per_s.append(np.sum(self.all_fly_dist_per_frame[i, j:j + fps]))
all_dist_per_s.append(dist_per_s)
sleep_dist_th = self.sleep_dist_th_per_second
all_sleep_status_per_s = np.array(all_dist_per_s) < sleep_dist_th
self.all_sleep_status_per_s = all_sleep_status_per_s
# all_sleep_status_per_s = np.delete(all_sleep_status_per_s, exclude_ids, axis=0)
sleep_time_th = self.sleep_time_th
all_sleep_status = []
for k, sleep_status_per_s in enumerate(all_sleep_status_per_s):
sleep_time = 0 # 用于保存截止当前秒已经睡了多久(单位秒)
sleep_status_per_s = np.concatenate(
[sleep_status_per_s, np.array([False])]) # 在末尾加一个false,防止末尾是True时遍历结束时无法判断睡眠
sleep_status = np.zeros([len(sleep_status_per_s) - 1, ], np.bool) # 新创建的list,用于保存睡眠状态
for i, ss in enumerate(sleep_status_per_s):
if ss:
sleep_time += 1
else:
# 到没睡的时候都判断一下,上一刻截止是不是满足睡眠条件
if sleep_time >= sleep_time_th:
sleep_status[i - sleep_time:i] = True
sleep_time = 0
all_sleep_status.append(sleep_status)
# 每个果蝇每秒钟的睡眠状态
all_sleep_status = np.array(all_sleep_status)
np.save(cache_all_sleep_status_path, all_sleep_status)
return all_sleep_status
all_sleep_status = get_all_sleep_status(self)
all_sleep_status = all_sleep_status[self.roi_flys_id]
dt = int(round(self.sleep_time_duration * 60)) # 多少秒
start_ind = list(range(0, all_sleep_status.shape[1], dt))
# 因为最后一个时间段可能不足设定的时间段,所以这里一块返回两者
values_durations = []
flys_num = self.roi_flys_nubs
for i in range(len(start_ind) - 1):
all_sleep_status_duration = all_sleep_status[:, start_ind[i]:start_ind[i + 1]]
value = all_sleep_status_duration.sum() / flys_num
value = value / 60 # 转化为分钟
sleep_flys_nubs = np.sum(all_sleep_status_duration, axis=-1).astype(np.bool).sum()
proportion_of_sleep_flys = sleep_flys_nubs / flys_num # 当前时间段睡觉的果蝇的比例
values_durations.append([value, dt, proportion_of_sleep_flys])
last_da = all_sleep_status[:, start_ind[-1]:]
value = last_da.sum() / flys_num
value = value / 60 # 转化为分钟
sleep_flys_nubs = np.sum(last_da, axis=-1).astype(np.bool).sum()
proportion_of_sleep_flys = sleep_flys_nubs / flys_num # 当前时间段睡觉的果蝇的比例
values_durations.append([value, last_da.shape[1], proportion_of_sleep_flys])
values_durations = np.array(values_durations)
np.save(str(npy_path), values_durations)
return str(npy_path)
def PARAM_region_status(self):
'''
统计每一帧是否在内圈的结果,在为True,不在为False。注意,被排除的果盘也被置为False了
:return: 保存的npy路径 (果蝇数,帧数)
'''
region_status_npy = Path(self.saved_dir, f'region_status.npy')
if Path(region_status_npy).exists():
self.all_region_status = np.load(region_status_npy)
return str(region_status_npy)
cps = self.cps
all_datas = self.all_datas.astype(np.float64)
all_region_status = []
print('get_region_status:')
pbar = Pbar(total=len(cps))
for i, (cp, da) in enumerate(zip(cps, all_datas)):
dist_to_cp = lambda x: math.sqrt(math.pow(x[0] - cp[0], 2) + math.pow(x[1] - cp[1], 2))
region_status = np.array([dist_to_cp(p) < self.region_radius for p in da])
all_region_status.append(region_status)
pbar.update()
pbar.close()
self.all_region_status = np.array(all_region_status)
np.save(region_status_npy, self.all_region_status)
return str(region_status_npy)
def heatmap_to_pcolor(self, heat, mask):
"""
转伪彩图
:return:
"""
# 尝试了生成16位的伪彩图,发现applyColorMap函数不支持
max_v, datatype = 255, np.uint8
heat = equalizeHist_use_mask(heat, mask, notuint8=True)
heat = heat / heat.max() * max_v
heat = np.round(heat).astype(datatype)
heat = cv2.applyColorMap(heat, cv2.COLORMAP_JET)
return heat
def PARAM_heatmap(self, p):
'''
跟roi没有关系,算的是所有果蝇的热图。
:param p:
:return:
'''
heatmap = self.heatmap.copy()
heatmaps = []
for mask, cp in zip(self.mask_imgs, self.cps):
hm = heatmap * mask
pcolor = self.heatmap_to_pcolor(hm, mask)
pcolor *= np.tile(mask[:, :, None], (1, 1, 3)) # 只有在这mask一下,后面才能叠加
heatmaps.append(pcolor)
heatmap_img = np.array(heatmaps).sum(axis=0).astype(np.uint8) # 叠加后的图像背景是黑的
mask_all = np.array(self.mask_imgs).sum(axis=0)
mask_all = (mask_all == 0).astype(np.uint8) * 128 # 背景蓝色 bgr(128,0,0)
heatmap_img[:, :, 0] += mask_all
# cv2.imshow('', heatmap_img)
# cv2.waitKeyEx()
cv2.imwrite(str(p), heatmap_img)
def PARAM_heatmap_barycenter(self, p, p_heatmap):
'''
计算热图重心,并可视化
:return:
'''
def get_barycenter_of_mat(m): # 求矩阵重心
def get_barycenter_of_line(l): # 求直线重心
i = np.arange(len(l))
return np.sum(l * i) / np.sum(l)
lx = np.sum(m, axis=0)
ly = np.sum(m, axis=1)
return (get_barycenter_of_line(lx),
get_barycenter_of_line(ly))
barycps = []
heatmap = self.heatmap
r = self.dish_radius
for cp in self.cps:
p0 = (cp[0] - r, cp[1] - r)
m = heatmap[
p0[1]:p0[1] + 2 * r + 1,
p0[0]:p0[0] + 2 * r + 1]
barycp = get_barycenter_of_mat(m)
barycps.append((barycp[0] + p0[0],
barycp[1] + p0[1]))
self.barycps = barycps
img = cv2.imread(str(p_heatmap))
img = np.zeros_like(img)
def dist2p(p1, p2):
return ((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2) ** 0.5
barycps_r = []
for bp, cp in zip(barycps, self.cps):
dist = dist2p(bp, cp)
barycps_r.append(dist)
cv2.circle(img, tuple(cp), self.dish_radius, (200, 200, 200), 1, cv2.LINE_AA)
cv2.circle(img, tuple(cp), int(round(dist)), (255, 255, 0), -1, cv2.LINE_AA)
if dist == 0: # 不画
continue
else:
x = (bp[0] - cp[0]) * self.dish_radius / dist + cp[0]
y = (bp[1] - cp[1]) * self.dish_radius / dist + cp[1]
x = int(round(x))
y = int(round(y))
# cv2.line(img, tuple(cp), (x, y), (0, 0, 255), 1, cv2.LINE_AA)
cv2.arrowedLine(img, tuple(cp), (x, y), (0, 0, 255), 1, cv2.LINE_AA)
# np.save(r'Z:\dataset\qususu\ceshishipin\v080\output_72hole_0330_v080\plot_images\barycps_r.npy', barycps_r)
# cv2.imshow('', img)
# cv2.waitKeyEx()
cv2.imwrite(str(p), img)
def PARAM_heatmap_of_roi(self, p):
'''
根据当前roi组来算热图,组内不管有多少个圆圈,只算一个平均的,并对热图放大显示。
:return:
'''
heatmap = self.heatmap.copy()
r = self.dish_radius
heatmap_sum = np.zeros([r * 2 + 1] * 2, dtype=heatmap.dtype)
for roi_id in self.roi_flys_id:
x, y = self.cps[roi_id]
a_hp = heatmap[y - r:y + r + 1, x - r:x + r + 1]
heatmap_sum += a_hp
mask = np.zeros(heatmap_sum.shape, np.uint8)
cv2.circle(mask, (r, r), r, 255, -1)
mask = mask != 0
pcolor = self.heatmap_to_pcolor(heatmap_sum, mask)
# pcolor *= np.tile(mask[:, :, None], (1, 1, 3))
# pcolor = cv2.resize(pcolor, dsize=None, fx=4, fy=4, interpolation=cv2.INTER_NEAREST)
pcolor = cv2.resize(pcolor, dsize=None, fx=4, fy=4, interpolation=cv2.INTER_LINEAR)
cv2.imwrite(str(p), pcolor)
# pcolor = cv2.GaussianBlur(pcolor, (5, 5), 0)
# cv2.imshow('', pcolor)
# cv2.waitKeyEx()
# exit()
# ...
def PARAM_heatmap_exclude_sleeptime(self, p1, p2):
'''
排除睡眠时间,然后重新算一遍热图
实现逻辑:
改变self.heatmap的值,然后重新运行计算heatmap的函数
:return:
'''
# 因为下面操作要改变self.heatmap的值,所以这个做个备份,等操作完成再还原回来
heatmap_bak = self.heatmap.copy()
sleeptime_heatmap = self._get_sleeptime_heatmap()
heatmap_exclude_sleeptime = self.heatmap - sleeptime_heatmap
self.heatmap = heatmap_exclude_sleeptime
if not p1.exists():
self.PARAM_heatmap(p1)
self.PARAM_heatmap_of_roi(p2)
self.heatmap = heatmap_bak # 还原回来
def _get_sleeptime_heatmap(self):
'''
计算睡眠时间段果蝇活动区域的heatmap,
:param self:
:return:
'''
sleeptime_heatmap_path = Path(self.cache_dir, 'heatmap_sleeptime.npy')
if sleeptime_heatmap_path.exists():
return np.load(sleeptime_heatmap_path)
cap = cv2.VideoCapture(str(self.video_path))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
fps = int(cap.get(cv2.CAP_PROP_FPS))
sleeptime_heatmap = np.zeros((h, w), np.int) # 初始化返回值
# 先计算哪些时间段是睡眠时间(有果蝇在睡觉)
sleep_status = np.load(Path(self.cache_dir, 'all_sleep_status.npy'))
timeline = sleep_status.sum(0).astype(np.bool)
if timeline.sum() == 0: # 说明没有果蝇在睡觉,所以这里直接返回零矩阵,后面就不用折腾了
np.save(sleeptime_heatmap_path, sleeptime_heatmap)
return sleeptime_heatmap
timeline = np.concatenate([np.array([False]), timeline, np.array([False])], 0) # 先在两头加上False
start_t, end_t = [], []
for i in range(1, len(timeline)):
pt, t = timeline[i - 1], timeline[i]
if pt == False and t == True:
start_t.append(i - 1)
elif pt == True and t == False:
end_t.append(i - 1)
sleep_durations = list(zip(start_t, end_t)) # [起始秒,终止秒)
sleep_durations = np.array(sleep_durations) * fps # [起始帧,终止帧)
# 逐时间段计算睡觉果蝇热图
seg_th = 120, # 分割阈值。注意,这俩值要跟前面分割时保持一致
background_th = 70, # 跟背景差的阈值。注意,这俩值要跟前面分割时保持一致
if self.Undistortion_model_path:
bg_img_path = Path(self.cache_dir, 'background_image_undistort.bmp')
else:
bg_img_path = Path(self.cache_dir, 'background_image.bmp')
bg = cv2.imread(str(bg_img_path))
gray_bg_int16 = cv2.cvtColor(bg, cv2.COLOR_BGR2GRAY).astype(np.int16)
undistort = Undistortion(self.Undistortion_model_path)
mask_imgs = np.load(Path(self.cache_dir, 'mask_imgs.npy')).astype(np.bool)
mask_all = mask_imgs.sum(0).astype(np.bool)
sleep_status = np.repeat(sleep_status, fps, axis=-1)
for du_i, (st, ed) in enumerate(sleep_durations):
print(f'\nsleep duration: {du_i + 1}/{len(sleep_durations)}')
status = sleep_status[:, st:ed]
cap.set(cv2.CAP_PROP_POS_FRAMES, st)
nub = 0
pbar = Pbar(total=ed - st)
while True:
ret, frame = cap.read()
if not ret:
break
sleep_fly_id = np.argwhere(status[:, nub] == True)
if len(sleep_fly_id) == 0:
sleep_fly_id = []
else:
sleep_fly_id = list(np.squeeze(sleep_fly_id, axis=1))
mask_sleep = np.zeros_like(mask_all)
for sl_id in sleep_fly_id:
mask_sleep += mask_imgs[sl_id] # 只mask睡眠的果蝇圆环
frame = undistort.do(frame)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
foreground_mask = np.abs(frame.astype(np.int16) - gray_bg_int16) > background_th
frame = frame < seg_th
frame *= mask_all
frame = frame.astype(np.uint8) * 255 * foreground_mask # 该值是原始heatmap累加分割区域图
frame *= mask_sleep # 原始累加分割区域图跟睡眠果蝇区域mask相乘
sleeptime_heatmap += frame.astype(np.bool).astype(np.int)
nub += 1
if nub >= ed - st:
break
pbar.update()
pbar.close()
np.save(sleeptime_heatmap_path, sleeptime_heatmap)
return sleeptime_heatmap
def _get_move_direction_pre_frame(self):
'''
算法:某一帧的运动方向由当前帧跟上一帧的坐标点来确定,第一帧因为没有上一帧,第一帧的运动方向设为跟下一帧相同.
算的是运动方向,不是果蝇身体朝向,注意区分。
:return:
'''
if self.move_direction_pre_frame_path.exists():
return np.load(self.move_direction_pre_frame_path)
else:
img_h, img_w = self.mask_imgs.shape[1:]
R = self.all_datas
R[:, :, 1] = img_h - R[:, :, 1] # 转换坐标系,从左上转到左下
R1 = R[:, :-1] # 要计算的上一帧
R2 = R[:, 1:] # 要计算的当前帧,从1开始而不是0
Diff = R2 - R1 # 跟前一帧的差
Dx, Dy = Diff[:, :, 0], Diff[:, :, 1] # 分解为横坐标以及纵坐标的差
Ds = (Dx ** 2 + Dy ** 2) ** 0.5 # 差组成的直角三角形的斜边边长
Theta = np.arccos(Dx / Ds)
Theta = np.where(np.isnan(Theta), 0, Theta) # 去除因为除零造成的nan值
Theta = Theta * 180 / np.pi # 转换成角度值,但是arccos的值域是0-180,咱的期望值域是0-360
Theta = np.where(Dy < 0, 360 - Theta, Theta) # Dy小于0的地方角度应该是180-360,而不是0-180,需要拿360减去当前值来修正。
Theta = np.pad(Theta, ((0, 0), (1, 0)), 'edge') # 第一帧的运动方向设置为跟下一帧相同
np.save(self.move_direction_pre_frame_path, Theta)
return Theta
def _get_fly_angle_cor(self):
'''
根据运动方向来修正果蝇头部朝向
:return:
'''
if self.fly_angles_cor_path.exists():
return np.load(self.fly_angles_cor_path)
else:
move_ang = self._get_move_direction_pre_frame()
move_ang = np.transpose(move_ang, [1, 0])
fly_ang = np.load(Path(self.cache_dir, 'fly_angles.npy'))
diff = np.abs(move_ang - fly_ang)
mask = (diff > 90) * (diff < 270)
fly_ang_cor = np.where(mask, fly_ang + 180, fly_ang)
np.save(self.fly_angles_cor_path, fly_ang_cor)
return fly_ang_cor
def PARAM_angle_changes_old(self):
# if self.angle_changes_path.exists():
# return self.angle_changes_path
ang = self._get_fly_angle_cor()
ang_sec = ang[::self.fps, self.roi_flys_id]
as1 = ang_sec[:-1]
as2 = ang_sec[1:]
changes = np.abs(as2 - as1)
changes = np.where(changes > 180, 360 - changes, changes) # 相比前一秒的变化角度(0-180)
# ana_duration_secs = int(self.ana_time_duration * 60)
ana_duration_secs = int(self.angle_time_duration * 60)
ana_times = int(len(changes) / ana_duration_secs) + 1
# 按照时间段来分出来
changes_es = [changes[i * ana_duration_secs:(i + 1) * ana_duration_secs] for i in range(ana_times)]
if len(changes_es[-1]) < len(changes_es[-2]) * 0.1: # 最后一段太小的话就舍弃
changes_es = changes_es[:-1]
bins = 18 # 直方图横坐标维度
hists = []
'''
这里注意一下,求出来的直方图横坐标是0-10,10-20,20-30,。。。,170-180,更具体来说,应该是这样:
[0,10),[10,20),[20,30),...[170,180],前闭后开,但是最后一个是全闭。加上0后变为:
0,(0,10),[10,20),[20,30),...[170,180]
'''
zeros_nums = []
for cha in changes_es:
hist = np.histogram(cha.flatten(), bins=bins, range=(0, 180))
hists.append(hist)
zeros_nums.append(np.sum(cha == 0))
# np.save(self.angle_changes_path, hists)
return hists, zeros_nums
def PARAM_angle_changes(self):
'''
相比old,使用核密度估计的方法来估计密度函数
:return:
'''
ang = self._get_fly_angle_cor()
ang_sec = ang[::self.fps, self.roi_flys_id]
as1 = ang_sec[:-1]
as2 = ang_sec[1:]
changes = np.abs(as2 - as1)
changes = np.where(changes > 180, 360 - changes, changes) # 相比前一秒的变化角度(0-180)
ana_duration_secs = int(self.angle_time_duration * 60)
ana_times = int(len(changes) / ana_duration_secs) + 1
# 按照时间段来分出来
changes_es = [changes[i * ana_duration_secs:(i + 1) * ana_duration_secs] for i in range(ana_times)]
if len(changes_es[-1]) < len(changes_es[-2]) * 0.1: # 最后一段太小的话就舍弃
changes_es = changes_es[:-1]
bins = 500 # 因为是密度函数,为了使曲线尽可能平滑,所以要设置稍微大一点
hists = []
for cha in changes_es:
hist = get_KernelDensity(cha, bins=bins, range=(0, 180))
hists.append(hist)
return hists
if __name__ == '__main__':
da = np.array([1, 1, 2, 3, 9, 7, 4, 8, 19, 20])
print(np.histogram(da, bins=5, range=(0, 20)))
| [
"easyFlyTracker.src_code.utils.NumpyArrayHasNanValuesExceptin",
"numpy.arccos",
"math.sqrt",
"numpy.array",
"numpy.save",
"numpy.mean",
"numpy.histogram",
"numpy.repeat",
"pathlib.Path",
"numpy.where",
"easyFlyTracker.src_code.Camera_Calibration.Undistortion",
"pandas.DataFrame",
"easyFlyTra... | [((592, 625), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (615, 625), False, 'import warnings\n'), ((27566, 27608), 'numpy.array', 'np.array', (['[1, 1, 2, 3, 9, 7, 4, 8, 19, 20]'], {}), '([1, 1, 2, 3, 9, 7, 4, 8, 19, 20])\n', (27574, 27608), True, 'import numpy as np\n'), ((1325, 1341), 'pathlib.Path', 'Path', (['video_path'], {}), '(video_path)\n', (1329, 1341), False, 'from pathlib import Path\n'), ((1365, 1381), 'pathlib.Path', 'Path', (['output_dir'], {}), '(output_dir)\n', (1369, 1381), False, 'from pathlib import Path\n'), ((1420, 1448), 'pathlib.Path', 'Path', (['self.res_dir', '""".cache"""'], {}), "(self.res_dir, '.cache')\n", (1424, 1448), False, 'from pathlib import Path\n'), ((1489, 1528), 'pathlib.Path', 'Path', (['self.cache_dir', '"""analysis_result"""'], {}), "(self.cache_dir, 'analysis_result')\n", (1493, 1528), False, 'from pathlib import Path\n'), ((1576, 1610), 'pathlib.Path', 'Path', (['self.cache_dir', 'f"""track.npy"""'], {}), "(self.cache_dir, f'track.npy')\n", (1580, 1610), False, 'from pathlib import Path\n'), ((1644, 1682), 'pathlib.Path', 'Path', (['self.cache_dir', 'f"""track_cor.npy"""'], {}), "(self.cache_dir, f'track_cor.npy')\n", (1648, 1682), False, 'from pathlib import Path\n'), ((1728, 1780), 'pathlib.Path', 'Path', (['self.saved_dir', '"""move_direction_pre_frame.npy"""'], {}), "(self.saved_dir, 'move_direction_pre_frame.npy')\n", (1732, 1780), False, 'from pathlib import Path\n'), ((1816, 1858), 'pathlib.Path', 'Path', (['self.saved_dir', '"""fly_angles_cor.npy"""'], {}), "(self.saved_dir, 'fly_angles_cor.npy')\n", (1820, 1858), False, 'from pathlib import Path\n'), ((1885, 1937), 'pathlib.Path', 'Path', (['self.saved_dir', '"""all_fly_speeds_per_frame.npy"""'], {}), "(self.saved_dir, 'all_fly_speeds_per_frame.npy')\n", (1889, 1937), False, 'from pathlib import Path\n'), ((1962, 2012), 'pathlib.Path', 'Path', (['self.saved_dir', '"""all_fly_dist_per_frame.npy"""'], {}), "(self.saved_dir, 'all_fly_dist_per_frame.npy')\n", (1966, 2012), False, 'from pathlib import Path\n'), ((2117, 2149), 'pathlib.Path', 'Path', (['self.res_dir', '"""config.pkl"""'], {}), "(self.res_dir, 'config.pkl')\n", (2121, 2149), False, 'from pathlib import Path\n'), ((3793, 3828), 'pathlib.Path', 'Path', (['self.cache_dir', '"""heatmap.npy"""'], {}), "(self.cache_dir, 'heatmap.npy')\n", (3797, 3828), False, 'from pathlib import Path\n'), ((3852, 3873), 'numpy.load', 'np.load', (['heatmap_path'], {}), '(heatmap_path)\n', (3859, 3873), True, 'import numpy as np\n'), ((5348, 5361), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (5356, 5361), True, 'import numpy as np\n'), ((6835, 6875), 'numpy.save', 'np.save', (['self.speeds_npy', 'all_fly_speeds'], {}), '(self.speeds_npy, all_fly_speeds)\n', (6842, 6875), True, 'import numpy as np\n'), ((6884, 6928), 'numpy.save', 'np.save', (['self.dist_npy', 'all_fly_displacement'], {}), '(self.dist_npy, all_fly_displacement)\n', (6891, 6928), True, 'import numpy as np\n'), ((6969, 6993), 'numpy.array', 'np.array', (['all_fly_speeds'], {}), '(all_fly_speeds)\n', (6977, 6993), True, 'import numpy as np\n'), ((7032, 7062), 'numpy.array', 'np.array', (['all_fly_displacement'], {}), '(all_fly_displacement)\n', (7040, 7062), True, 'import numpy as np\n'), ((7304, 7372), 'pathlib.Path', 'Path', (['self.saved_dir', 'f"""speed_per_duration_{self.roi_flys_flag}.npy"""'], {}), "(self.saved_dir, f'speed_per_duration_{self.roi_flys_flag}.npy')\n", (7308, 7372), False, 'from pathlib import Path\n'), ((7392, 7467), 'pathlib.Path', 'Path', (['self.saved_dir', 'f"""displacement_per_duration_{self.roi_flys_flag}.npy"""'], {}), "(self.saved_dir, f'displacement_per_duration_{self.roi_flys_flag}.npy')\n", (7396, 7467), False, 'from pathlib import Path\n'), ((9155, 9199), 'numpy.save', 'np.save', (['speed_npy', 'time_duration_stat_speed'], {}), '(speed_npy, time_duration_stat_speed)\n', (9162, 9199), True, 'import numpy as np\n'), ((9208, 9258), 'numpy.save', 'np.save', (['disp_npy', 'time_duration_stat_displacement'], {}), '(disp_npy, time_duration_stat_displacement)\n', (9215, 9258), True, 'import numpy as np\n'), ((10124, 10197), 'pathlib.Path', 'Path', (['self.saved_dir', 'f"""sleep_time_per_duration_{self.roi_flys_flag}.npy"""'], {}), "(self.saved_dir, f'sleep_time_per_duration_{self.roi_flys_flag}.npy')\n", (10128, 10197), False, 'from pathlib import Path\n'), ((10312, 10356), 'pathlib.Path', 'Path', (['self.cache_dir', '"""all_sleep_status.npy"""'], {}), "(self.cache_dir, 'all_sleep_status.npy')\n", (10316, 10356), False, 'from pathlib import Path\n'), ((13550, 13576), 'numpy.array', 'np.array', (['values_durations'], {}), '(values_durations)\n', (13558, 13576), True, 'import numpy as np\n'), ((13837, 13879), 'pathlib.Path', 'Path', (['self.saved_dir', 'f"""region_status.npy"""'], {}), "(self.saved_dir, f'region_status.npy')\n", (13841, 13879), False, 'from pathlib import Path\n'), ((14590, 14617), 'numpy.array', 'np.array', (['all_region_status'], {}), '(all_region_status)\n', (14598, 14617), True, 'import numpy as np\n'), ((14626, 14676), 'numpy.save', 'np.save', (['region_status_npy', 'self.all_region_status'], {}), '(region_status_npy, self.all_region_status)\n', (14633, 14676), True, 'import numpy as np\n'), ((14914, 14962), 'easyFlyTracker.src_code.utils.equalizeHist_use_mask', 'equalizeHist_use_mask', (['heat', 'mask'], {'notuint8': '(True)'}), '(heat, mask, notuint8=True)\n', (14935, 14962), False, 'from easyFlyTracker.src_code.utils import Pbar, equalizeHist_use_mask\n'), ((15066, 15107), 'cv2.applyColorMap', 'cv2.applyColorMap', (['heat', 'cv2.COLORMAP_JET'], {}), '(heat, cv2.COLORMAP_JET)\n', (15083, 15107), False, 'import cv2\n'), ((16890, 16908), 'numpy.zeros_like', 'np.zeros_like', (['img'], {}), '(img)\n', (16903, 16908), True, 'import numpy as np\n'), ((18203, 18249), 'numpy.zeros', 'np.zeros', (['([r * 2 + 1] * 2)'], {'dtype': 'heatmap.dtype'}), '([r * 2 + 1] * 2, dtype=heatmap.dtype)\n', (18211, 18249), True, 'import numpy as np\n'), ((18434, 18471), 'numpy.zeros', 'np.zeros', (['heatmap_sum.shape', 'np.uint8'], {}), '(heatmap_sum.shape, np.uint8)\n', (18442, 18471), True, 'import numpy as np\n'), ((18480, 18516), 'cv2.circle', 'cv2.circle', (['mask', '(r, r)', 'r', '(255)', '(-1)'], {}), '(mask, (r, r), r, 255, -1)\n', (18490, 18516), False, 'import cv2\n'), ((18770, 18844), 'cv2.resize', 'cv2.resize', (['pcolor'], {'dsize': 'None', 'fx': '(4)', 'fy': '(4)', 'interpolation': 'cv2.INTER_LINEAR'}), '(pcolor, dsize=None, fx=4, fy=4, interpolation=cv2.INTER_LINEAR)\n', (18780, 18844), False, 'import cv2\n'), ((19793, 19838), 'pathlib.Path', 'Path', (['self.cache_dir', '"""heatmap_sleeptime.npy"""'], {}), "(self.cache_dir, 'heatmap_sleeptime.npy')\n", (19797, 19838), False, 'from pathlib import Path\n'), ((20164, 20188), 'numpy.zeros', 'np.zeros', (['(h, w)', 'np.int'], {}), '((h, w), np.int)\n', (20172, 20188), True, 'import numpy as np\n'), ((21527, 21569), 'easyFlyTracker.src_code.Camera_Calibration.Undistortion', 'Undistortion', (['self.Undistortion_model_path'], {}), '(self.Undistortion_model_path)\n', (21539, 21569), False, 'from easyFlyTracker.src_code.Camera_Calibration import Undistortion\n'), ((21728, 21765), 'numpy.repeat', 'np.repeat', (['sleep_status', 'fps'], {'axis': '(-1)'}), '(sleep_status, fps, axis=-1)\n', (21737, 21765), True, 'import numpy as np\n'), ((23277, 23327), 'numpy.save', 'np.save', (['sleeptime_heatmap_path', 'sleeptime_heatmap'], {}), '(sleeptime_heatmap_path, sleeptime_heatmap)\n', (23284, 23327), True, 'import numpy as np\n'), ((25477, 25494), 'numpy.abs', 'np.abs', (['(as2 - as1)'], {}), '(as2 - as1)\n', (25483, 25494), True, 'import numpy as np\n'), ((25513, 25560), 'numpy.where', 'np.where', (['(changes > 180)', '(360 - changes)', 'changes'], {}), '(changes > 180, 360 - changes, changes)\n', (25521, 25560), True, 'import numpy as np\n'), ((26831, 26848), 'numpy.abs', 'np.abs', (['(as2 - as1)'], {}), '(as2 - as1)\n', (26837, 26848), True, 'import numpy as np\n'), ((26867, 26914), 'numpy.where', 'np.where', (['(changes > 180)', '(360 - changes)', 'changes'], {}), '(changes > 180, 360 - changes, changes)\n', (26875, 26914), True, 'import numpy as np\n'), ((27619, 27658), 'numpy.histogram', 'np.histogram', (['da'], {'bins': '(5)', 'range': '(0, 20)'}), '(da, bins=5, range=(0, 20))\n', (27631, 27658), True, 'import numpy as np\n'), ((2547, 2584), 'pathlib.Path', 'Path', (['self.cache_dir', '"""mask_imgs.npy"""'], {}), "(self.cache_dir, 'mask_imgs.npy')\n", (2551, 2584), False, 'from pathlib import Path\n'), ((3976, 4007), 'numpy.load', 'np.load', (['self.npy_file_path_cor'], {}), '(self.npy_file_path_cor)\n', (3983, 4007), True, 'import numpy as np\n'), ((4040, 4067), 'numpy.load', 'np.load', (['self.npy_file_path'], {}), '(self.npy_file_path)\n', (4047, 4067), True, 'import numpy as np\n'), ((4097, 4125), 'numpy.transpose', 'np.transpose', (['res', '[1, 0, 2]'], {}), '(res, [1, 0, 2])\n', (4109, 4125), True, 'import numpy as np\n'), ((4162, 4209), 'numpy.save', 'np.save', (['self.npy_file_path_cor', 'self.all_datas'], {}), '(self.npy_file_path_cor, self.all_datas)\n', (4169, 4209), True, 'import numpy as np\n'), ((4830, 4856), 'numpy.where', 'np.where', (['(l < 0)', 'np.nan', 'l'], {}), '(l < 0, np.nan, l)\n', (4838, 4856), True, 'import numpy as np\n'), ((4874, 4894), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'l'}), '(data=l)\n', (4886, 4894), True, 'import pandas as pd\n'), ((5417, 5452), 'easyFlyTracker.src_code.utils.NumpyArrayHasNanValuesExceptin', 'NumpyArrayHasNanValuesExceptin', (['res'], {}), '(res)\n', (5447, 5452), False, 'from easyFlyTracker.src_code.utils import NumpyArrayHasNanValuesExceptin\n'), ((5665, 5689), 'numpy.load', 'np.load', (['self.speeds_npy'], {}), '(self.speeds_npy)\n', (5672, 5689), True, 'import numpy as np\n'), ((5732, 5754), 'numpy.load', 'np.load', (['self.dist_npy'], {}), '(self.dist_npy)\n', (5739, 5754), True, 'import numpy as np\n'), ((6655, 6701), 'easyFlyTracker.src_code.utils.NumpyArrayHasNanValuesExceptin', 'NumpyArrayHasNanValuesExceptin', (['all_fly_speeds'], {}), '(all_fly_speeds)\n', (6685, 6701), False, 'from easyFlyTracker.src_code.utils import NumpyArrayHasNanValuesExceptin\n'), ((6774, 6826), 'easyFlyTracker.src_code.utils.NumpyArrayHasNanValuesExceptin', 'NumpyArrayHasNanValuesExceptin', (['all_fly_displacement'], {}), '(all_fly_displacement)\n', (6804, 6826), False, 'from easyFlyTracker.src_code.utils import NumpyArrayHasNanValuesExceptin\n'), ((7817, 7909), 'numpy.tile', 'np.tile', (['self.roi_flys_list[:, np.newaxis]', '(1, self.all_fly_speeds_per_frame.shape[1])'], {}), '(self.roi_flys_list[:, np.newaxis], (1, self.\n all_fly_speeds_per_frame.shape[1]))\n', (7824, 7909), True, 'import numpy as np\n'), ((8032, 8122), 'numpy.tile', 'np.tile', (['self.roi_flys_list[:, np.newaxis]', '(1, self.all_fly_dist_per_frame.shape[1])'], {}), '(self.roi_flys_list[:, np.newaxis], (1, self.all_fly_dist_per_frame.\n shape[1]))\n', (8039, 8122), True, 'import numpy as np\n'), ((9550, 9640), 'numpy.tile', 'np.tile', (['self.roi_flys_list[:, np.newaxis]', '(1, self.all_fly_dist_per_frame.shape[1])'], {}), '(self.roi_flys_list[:, np.newaxis], (1, self.all_fly_dist_per_frame.\n shape[1]))\n', (9557, 9640), True, 'import numpy as np\n'), ((12179, 12205), 'numpy.array', 'np.array', (['all_sleep_status'], {}), '(all_sleep_status)\n', (12187, 12205), True, 'import numpy as np\n'), ((12218, 12272), 'numpy.save', 'np.save', (['cache_all_sleep_status_path', 'all_sleep_status'], {}), '(cache_all_sleep_status_path, all_sleep_status)\n', (12225, 12272), True, 'import numpy as np\n'), ((13962, 13988), 'numpy.load', 'np.load', (['region_status_npy'], {}), '(region_status_npy)\n', (13969, 13988), True, 'import numpy as np\n'), ((15473, 15509), 'numpy.tile', 'np.tile', (['mask[:, :, None]', '(1, 1, 3)'], {}), '(mask[:, :, None], (1, 1, 3))\n', (15480, 15509), True, 'import numpy as np\n'), ((16255, 16272), 'numpy.sum', 'np.sum', (['m'], {'axis': '(0)'}), '(m, axis=0)\n', (16261, 16272), True, 'import numpy as np\n'), ((16290, 16307), 'numpy.sum', 'np.sum', (['m'], {'axis': '(1)'}), '(m, axis=1)\n', (16296, 16307), True, 'import numpy as np\n'), ((19902, 19933), 'numpy.load', 'np.load', (['sleeptime_heatmap_path'], {}), '(sleeptime_heatmap_path)\n', (19909, 19933), True, 'import numpy as np\n'), ((20263, 20307), 'pathlib.Path', 'Path', (['self.cache_dir', '"""all_sleep_status.npy"""'], {}), "(self.cache_dir, 'all_sleep_status.npy')\n", (20267, 20307), False, 'from pathlib import Path\n'), ((20442, 20492), 'numpy.save', 'np.save', (['sleeptime_heatmap_path', 'sleeptime_heatmap'], {}), '(sleeptime_heatmap_path, sleeptime_heatmap)\n', (20449, 20492), True, 'import numpy as np\n'), ((21006, 21031), 'numpy.array', 'np.array', (['sleep_durations'], {}), '(sleep_durations)\n', (21014, 21031), True, 'import numpy as np\n'), ((21247, 21301), 'pathlib.Path', 'Path', (['self.cache_dir', '"""background_image_undistort.bmp"""'], {}), "(self.cache_dir, 'background_image_undistort.bmp')\n", (21251, 21301), False, 'from pathlib import Path\n'), ((21342, 21386), 'pathlib.Path', 'Path', (['self.cache_dir', '"""background_image.bmp"""'], {}), "(self.cache_dir, 'background_image.bmp')\n", (21346, 21386), False, 'from pathlib import Path\n'), ((22030, 22049), 'easyFlyTracker.src_code.utils.Pbar', 'Pbar', ([], {'total': '(ed - st)'}), '(total=ed - st)\n', (22034, 22049), False, 'from easyFlyTracker.src_code.utils import Pbar, equalizeHist_use_mask\n'), ((23618, 23661), 'numpy.load', 'np.load', (['self.move_direction_pre_frame_path'], {}), '(self.move_direction_pre_frame_path)\n', (23625, 23661), True, 'import numpy as np\n'), ((24090, 24108), 'numpy.arccos', 'np.arccos', (['(Dx / Ds)'], {}), '(Dx / Ds)\n', (24099, 24108), True, 'import numpy as np\n'), ((24283, 24319), 'numpy.where', 'np.where', (['(Dy < 0)', '(360 - Theta)', 'Theta'], {}), '(Dy < 0, 360 - Theta, Theta)\n', (24291, 24319), True, 'import numpy as np\n'), ((24389, 24428), 'numpy.pad', 'np.pad', (['Theta', '((0, 0), (1, 0))', '"""edge"""'], {}), "(Theta, ((0, 0), (1, 0)), 'edge')\n", (24395, 24428), True, 'import numpy as np\n'), ((24462, 24512), 'numpy.save', 'np.save', (['self.move_direction_pre_frame_path', 'Theta'], {}), '(self.move_direction_pre_frame_path, Theta)\n', (24469, 24512), True, 'import numpy as np\n'), ((24703, 24736), 'numpy.load', 'np.load', (['self.fly_angles_cor_path'], {}), '(self.fly_angles_cor_path)\n', (24710, 24736), True, 'import numpy as np\n'), ((24834, 24864), 'numpy.transpose', 'np.transpose', (['move_ang', '[1, 0]'], {}), '(move_ang, [1, 0])\n', (24846, 24864), True, 'import numpy as np\n'), ((24954, 24980), 'numpy.abs', 'np.abs', (['(move_ang - fly_ang)'], {}), '(move_ang - fly_ang)\n', (24960, 24980), True, 'import numpy as np\n'), ((25053, 25091), 'numpy.where', 'np.where', (['mask', '(fly_ang + 180)', 'fly_ang'], {}), '(mask, fly_ang + 180, fly_ang)\n', (25061, 25091), True, 'import numpy as np\n'), ((25104, 25150), 'numpy.save', 'np.save', (['self.fly_angles_cor_path', 'fly_ang_cor'], {}), '(self.fly_angles_cor_path, fly_ang_cor)\n', (25111, 25150), True, 'import numpy as np\n'), ((27426, 27475), 'easyFlyTracker.src_code.kernel_density_estimation.get_KernelDensity', 'get_KernelDensity', (['cha'], {'bins': 'bins', 'range': '(0, 180)'}), '(cha, bins=bins, range=(0, 180))\n', (27443, 27475), False, 'from easyFlyTracker.src_code.kernel_density_estimation import get_KernelDensity\n'), ((10474, 10510), 'numpy.load', 'np.load', (['cache_all_sleep_status_path'], {}), '(cache_all_sleep_status_path)\n', (10481, 10510), True, 'import numpy as np\n'), ((11034, 11058), 'numpy.array', 'np.array', (['all_dist_per_s'], {}), '(all_dist_per_s)\n', (11042, 11058), True, 'import numpy as np\n'), ((13891, 13914), 'pathlib.Path', 'Path', (['region_status_npy'], {}), '(region_status_npy)\n', (13895, 13914), False, 'from pathlib import Path\n'), ((15019, 15033), 'numpy.round', 'np.round', (['heat'], {}), '(heat)\n', (15027, 15033), True, 'import numpy as np\n'), ((15672, 15696), 'numpy.array', 'np.array', (['self.mask_imgs'], {}), '(self.mask_imgs)\n', (15680, 15696), True, 'import numpy as np\n'), ((20565, 20582), 'numpy.array', 'np.array', (['[False]'], {}), '([False])\n', (20573, 20582), True, 'import numpy as np\n'), ((20594, 20611), 'numpy.array', 'np.array', (['[False]'], {}), '([False])\n', (20602, 20611), True, 'import numpy as np\n'), ((21453, 21489), 'cv2.cvtColor', 'cv2.cvtColor', (['bg', 'cv2.COLOR_BGR2GRAY'], {}), '(bg, cv2.COLOR_BGR2GRAY)\n', (21465, 21489), False, 'import cv2\n'), ((22199, 22234), 'numpy.argwhere', 'np.argwhere', (['(status[:, nub] == True)'], {}), '(status[:, nub] == True)\n', (22210, 22234), True, 'import numpy as np\n'), ((24138, 24153), 'numpy.isnan', 'np.isnan', (['Theta'], {}), '(Theta)\n', (24146, 24153), True, 'import numpy as np\n'), ((24895, 24933), 'pathlib.Path', 'Path', (['self.cache_dir', '"""fly_angles.npy"""'], {}), "(self.cache_dir, 'fly_angles.npy')\n", (24899, 24933), False, 'from pathlib import Path\n'), ((26458, 26474), 'numpy.sum', 'np.sum', (['(cha == 0)'], {}), '(cha == 0)\n', (26464, 26474), True, 'import numpy as np\n'), ((2485, 2510), 'numpy.mean', 'np.mean', (['config_pk[:, -1]'], {}), '(config_pk[:, -1])\n', (2492, 2510), True, 'import numpy as np\n'), ((2994, 3012), 'math.sqrt', 'math.sqrt', (['area_th'], {}), '(area_th)\n', (3003, 3012), False, 'import math\n'), ((5373, 5386), 'numpy.isnan', 'np.isnan', (['res'], {}), '(res)\n', (5381, 5386), True, 'import numpy as np\n'), ((6600, 6624), 'numpy.isnan', 'np.isnan', (['all_fly_speeds'], {}), '(all_fly_speeds)\n', (6608, 6624), True, 'import numpy as np\n'), ((6713, 6743), 'numpy.isnan', 'np.isnan', (['all_fly_displacement'], {}), '(all_fly_displacement)\n', (6721, 6743), True, 'import numpy as np\n'), ((9796, 9832), 'numpy.sum', 'np.sum', (['da[:, i:i + duration_frames]'], {}), '(da[:, i:i + duration_frames])\n', (9802, 9832), True, 'import numpy as np\n'), ((16211, 16224), 'numpy.sum', 'np.sum', (['(l * i)'], {}), '(l * i)\n', (16217, 16224), True, 'import numpy as np\n'), ((16227, 16236), 'numpy.sum', 'np.sum', (['l'], {}), '(l)\n', (16233, 16236), True, 'import numpy as np\n'), ((21598, 21635), 'pathlib.Path', 'Path', (['self.cache_dir', '"""mask_imgs.npy"""'], {}), "(self.cache_dir, 'mask_imgs.npy')\n", (21602, 21635), False, 'from pathlib import Path\n'), ((22445, 22468), 'numpy.zeros_like', 'np.zeros_like', (['mask_all'], {}), '(mask_all)\n', (22458, 22468), True, 'import numpy as np\n'), ((22663, 22702), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (22675, 22702), False, 'import cv2\n'), ((10837, 10886), 'numpy.sum', 'np.sum', (['self.all_fly_dist_per_frame[i, j:j + fps]'], {}), '(self.all_fly_dist_per_frame[i, j:j + fps])\n', (10843, 10886), True, 'import numpy as np\n'), ((11540, 11557), 'numpy.array', 'np.array', (['[False]'], {}), '([False])\n', (11548, 11557), True, 'import numpy as np\n'), ((13312, 13336), 'numpy.sum', 'np.sum', (['last_da'], {'axis': '(-1)'}), '(last_da, axis=-1)\n', (13318, 13336), True, 'import numpy as np\n'), ((14316, 14341), 'math.pow', 'math.pow', (['(x[0] - cp[0])', '(2)'], {}), '(x[0] - cp[0], 2)\n', (14324, 14341), False, 'import math\n'), ((14344, 14369), 'math.pow', 'math.pow', (['(x[1] - cp[1])', '(2)'], {}), '(x[1] - cp[1], 2)\n', (14352, 14369), False, 'import math\n'), ((15590, 15608), 'numpy.array', 'np.array', (['heatmaps'], {}), '(heatmaps)\n', (15598, 15608), True, 'import numpy as np\n'), ((22378, 22410), 'numpy.squeeze', 'np.squeeze', (['sleep_fly_id'], {'axis': '(1)'}), '(sleep_fly_id, axis=1)\n', (22388, 22410), True, 'import numpy as np\n'), ((4531, 4542), 'numpy.array', 'np.array', (['l'], {}), '(l)\n', (4539, 4542), True, 'import numpy as np\n'), ((12932, 12974), 'numpy.sum', 'np.sum', (['all_sleep_status_duration'], {'axis': '(-1)'}), '(all_sleep_status_duration, axis=-1)\n', (12938, 12974), True, 'import numpy as np\n')] |
"""
Misc Utility functions
"""
import os
import logging
import datetime
import numpy as np
import torch
import collections
from collections import OrderedDict
def decode_segmap(temp, plot=False):
Seed = [255, 255, 255]
P_Root = [0, 255, 0]
L_Root = [255, 100, 100]
P_tip = [255, 0, 0]
L_tip = [147, 0, 227]
Back = [0, 0, 0]
label_colours = torch.Tensor(
[
Seed,
P_Root,
L_Root,
P_tip,
L_tip,
Back,
]
)
r = temp.clone()
g = temp.clone()
b = temp.clone()
for l in range(0, 6):
r[temp == l] = label_colours[l, 0]
g[temp == l] = label_colours[l, 1]
b[temp == l] = label_colours[l, 2]
rgb = torch.zeros((3, temp.shape[0], temp.shape[1]))
rgb[0, :, :] = r / 255.0
rgb[1, :, :] = g / 255.0
rgb[2, :, :] = b / 255.0
return rgb
def dict_collate(batch):
if isinstance(batch[0], collections.Mapping):
return {key: [d[key] for d in batch] for key in batch[0]}
elif torch.is_tensor(batch[0]):
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
# This is true when number of threads > 1
numel = sum([x.numel() for x in batch])
storage = batch[0].storage()._new_shared(numel)
out = batch[0].new(storage)
return torch.stack(batch, 0, out=out)
elif isinstance(batch[0], collections.Sequence):
# check to make sure that the elements in batch have consistent size
it = iter(batch)
elem_size = len(next(it))
if not all(len(elem) == elem_size for elem in it):
raise RuntimeError('each element in list of batch should be of equal size')
transposed = zip(*batch)
return tuple(dict_collate(samples) for samples in transposed)
else:
raise TypeError("BAD TYPE", type(batch[0]))
def recursive_glob(rootdir=".", suffix=""):
"""Performs recursive glob with given suffix and rootdir
:param rootdir is the root directory
:param suffix is the suffix to be searched
"""
return [
os.path.join(looproot, filename)
for looproot, _, filenames in os.walk(rootdir)
for filename in filenames
if filename.endswith(suffix)
]
def alpha_blend(input_image, segmentation_mask, alpha=0.5):
"""Alpha Blending utility to overlay RGB masks on RBG images
:param input_image is a np.ndarray with 3 channels
:param segmentation_mask is a np.ndarray with 3 channels
:param alpha is a float value
"""
blended = np.zeros(input_image.size, dtype=np.float32)
blended = input_image * alpha + segmentation_mask * (1 - alpha)
return blended
def convert_state_dict(state_dict):
"""Converts a state dict saved from a dataParallel module to normal
module state_dict inplace
:param state_dict is the loaded DataParallel model_state
"""
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k[7:] # remove `module.`
new_state_dict[name] = v
return new_state_dict
def get_logger(logdir):
logger = logging.getLogger('ptsemseg')
ts = str(datetime.datetime.now()).split('.')[0].replace(" ", "_")
ts = ts.replace(":", "_").replace("-","_")
file_path = os.path.join(logdir, 'run_{}.log'.format(ts))
hdlr = logging.FileHandler(file_path)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.INFO)
return logger
| [
"logging.getLogger",
"collections.OrderedDict",
"logging.Formatter",
"torch.stack",
"torch.Tensor",
"os.path.join",
"torch.is_tensor",
"numpy.zeros",
"datetime.datetime.now",
"logging.FileHandler",
"torch.zeros",
"os.walk"
] | [((371, 427), 'torch.Tensor', 'torch.Tensor', (['[Seed, P_Root, L_Root, P_tip, L_tip, Back]'], {}), '([Seed, P_Root, L_Root, P_tip, L_tip, Back])\n', (383, 427), False, 'import torch\n'), ((754, 800), 'torch.zeros', 'torch.zeros', (['(3, temp.shape[0], temp.shape[1])'], {}), '((3, temp.shape[0], temp.shape[1]))\n', (765, 800), False, 'import torch\n'), ((2654, 2698), 'numpy.zeros', 'np.zeros', (['input_image.size'], {'dtype': 'np.float32'}), '(input_image.size, dtype=np.float32)\n', (2662, 2698), True, 'import numpy as np\n'), ((3028, 3041), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3039, 3041), False, 'from collections import OrderedDict\n'), ((3217, 3246), 'logging.getLogger', 'logging.getLogger', (['"""ptsemseg"""'], {}), "('ptsemseg')\n", (3234, 3246), False, 'import logging\n'), ((3437, 3467), 'logging.FileHandler', 'logging.FileHandler', (['file_path'], {}), '(file_path)\n', (3456, 3467), False, 'import logging\n'), ((3484, 3542), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s %(levelname)s %(message)s"""'], {}), "('%(asctime)s %(levelname)s %(message)s')\n", (3501, 3542), False, 'import logging\n'), ((1054, 1079), 'torch.is_tensor', 'torch.is_tensor', (['batch[0]'], {}), '(batch[0])\n', (1069, 1079), False, 'import torch\n'), ((2176, 2208), 'os.path.join', 'os.path.join', (['looproot', 'filename'], {}), '(looproot, filename)\n', (2188, 2208), False, 'import os\n'), ((1412, 1442), 'torch.stack', 'torch.stack', (['batch', '(0)'], {'out': 'out'}), '(batch, 0, out=out)\n', (1423, 1442), False, 'import torch\n'), ((2247, 2263), 'os.walk', 'os.walk', (['rootdir'], {}), '(rootdir)\n', (2254, 2263), False, 'import os\n'), ((3260, 3283), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3281, 3283), False, 'import datetime\n')] |
"""This module includes all important computation functions which are used internally.
They (normally) should not be used by users.
"""
import logging
from collections import defaultdict
import attr
import joblib
import numpy as np
import scipy
from scipy import stats
from scipy.cluster.hierarchy import fcluster, linkage
from fri.model.base_cvxproblem import Relevance_CVXProblem
from fri.model.base_initmodel import InitModel
from fri.model.base_type import ProblemType
from fri.utils import permutate_feature_in_data
from .utils import distance
MIN_N_PROBE_FEATURES = 20 # Lower bound of probe features
def _start_solver_worker(bound: Relevance_CVXProblem):
"""
Worker thread method for parallel computation
"""
return bound.solve()
class RelevanceBoundsIntervals(object):
def __init__(
self,
data,
problem_type: ProblemType,
best_init_model: InitModel,
random_state,
n_resampling,
n_jobs,
verbose,
normalize=True,
):
self.data = data
self.problem_type = problem_type
self.verbose = verbose
self.n_jobs = n_jobs
self.n_resampling = n_resampling
self.random_state = random_state
self.best_init_model = best_init_model
self.best_hyperparameters = best_init_model.get_params()
self.normalize = normalize
# Relax constraints to improve stability
self.init_constraints = problem_type.get_relaxed_constraints(
best_init_model.constraints
)
def get_normalized_lupi_intervals(self, lupi_features, presetModel=None):
# We define a list of all the features we want to compute relevance bounds for
X, _ = self.data # TODO: handle other data formats
all_d = X.shape[1]
normal_d = all_d - lupi_features
# Compute relevance bounds and probes for normal features and LUPI
with joblib.Parallel(n_jobs=self.n_jobs, verbose=self.verbose) as parallel:
d_n = _get_necessary_dimensions(normal_d, presetModel)
rb = self.compute_relevance_bounds(d_n, parallel=parallel)
probe_upper = self.compute_probe_values(d_n, True, parallel=parallel)
probe_lower = self.compute_probe_values(d_n, False, parallel=parallel)
d_l = _get_necessary_dimensions(all_d, presetModel, start=normal_d)
rb_l = self.compute_relevance_bounds(d_l, parallel=parallel)
probe_priv_upper = self.compute_probe_values(d_l, True, parallel=parallel)
probe_priv_lower = self.compute_probe_values(d_l, False, parallel=parallel)
#
# Postprocess
#
# Get Scaling Parameters
l1 = self.init_constraints["w_l1"]
l1_priv = self.init_constraints["w_priv_l1"]
l1 = l1 + l1_priv
# Normalize Normal and Lupi features
rb_norm = self._postprocessing(l1, rb)
rb_l_norm = self._postprocessing(l1, rb_l)
interval_ = np.concatenate([rb_norm, rb_l_norm])
# Normalize Probes
probe_lower = self._postprocessing(l1, probe_lower)
probe_upper = self._postprocessing(l1, probe_upper)
probe_priv_lower = self._postprocessing(l1, probe_priv_lower)
probe_priv_upper = self._postprocessing(l1, probe_priv_upper)
#
#
# Classify features
self.f_classifier = FeatureClassifier(
probe_lower, probe_upper, verbose=self.verbose
)
feature_classes = self.f_classifier.classify(rb_norm)
self.f_classifier_lupi = FeatureClassifier(
probe_priv_lower, probe_priv_upper, verbose=self.verbose
)
feature_classes_lupi = self.f_classifier_lupi.classify(rb_l_norm)
fc_both = np.concatenate([feature_classes, feature_classes_lupi])
return interval_, fc_both
def get_normalized_intervals(self, presetModel=None):
# We define a list of all the features we want to compute relevance bounds for
X, _ = self.data
d = X.shape[1]
# Depending on the preset model, we dont need to compute all bounds
# e.g. in the case of fixed features we skip those
dims = _get_necessary_dimensions(d, presetModel)
with joblib.Parallel(n_jobs=self.n_jobs, verbose=self.verbose) as parallel:
relevance_bounds = self.compute_relevance_bounds(
dims, parallel=parallel, presetModel=presetModel
)
probe_values_upper = self.compute_probe_values(
dims, isUpper=True, parallel=parallel, presetModel=presetModel
)
probe_values_lower = self.compute_probe_values(
dims, isUpper=False, parallel=parallel, presetModel=presetModel
)
# Postprocess bounds
norm_bounds = self._postprocessing(
self.best_init_model.L1_factor, relevance_bounds
)
norm_probe_values_upper = self._postprocessing(
self.best_init_model.L1_factor, probe_values_upper
)
norm_probe_values_lower = self._postprocessing(
self.best_init_model.L1_factor, probe_values_lower
)
self.f_classifier = FeatureClassifier(
norm_probe_values_lower, norm_probe_values_upper, verbose=self.verbose
)
feature_classes = self.f_classifier.classify(norm_bounds)
return norm_bounds, feature_classes
def compute_relevance_bounds(
self, dims, parallel=None, presetModel=None, solverargs=None
):
init_model_state = self.best_init_model.model_state
work_queue = self._generate_relevance_bounds_tasks(
dims, self.data, presetModel, init_model_state
)
# Solve relevance bounds in parallel (when available)
if parallel is None:
parallel = joblib.Parallel(n_jobs=self.n_jobs, verbose=self.verbose)
bound_results = parallel(map(joblib.delayed(_start_solver_worker), work_queue))
# Retrieve results and aggregate values in dict
solved_bounds = defaultdict(list)
for finished_bound in bound_results:
# Only add bounds with feasible solutions
if finished_bound.is_solved:
solved_bounds[finished_bound.current_feature].append(finished_bound)
# Initalize array for pair of bounds(= intervals)
length = len(dims)
intervals = np.zeros((length, 2))
for abs_index, rel_index in zip(dims, range(length)):
# Return interval for feature i (can be a fixed value when set beforehand)
interval_i = self._create_interval(abs_index, solved_bounds, presetModel)
intervals[rel_index] = interval_i
return intervals
def compute_probe_values(self, dims, isUpper=True, parallel=None, presetModel=None):
# Get model parameters
init_model_state = self.best_init_model.model_state
# Prepare parallel framework
if parallel is None:
parallel = joblib.Parallel(n_jobs=self.n_jobs, verbose=self.verbose)
# Generate
probe_queue = self._generate_probe_value_tasks(
self.data,
dims,
isUpper,
self.n_resampling,
self.random_state,
presetModel,
init_model_state,
)
# Compute solution
probe_results = parallel(map(joblib.delayed(_start_solver_worker), probe_queue))
# probe_values.extend([probe.objective.value for probe in probe_results if probe.is_solved])
candidates = defaultdict(list)
for candidate in probe_results:
# Only add bounds with feasible solutions
if candidate.is_solved:
candidates[candidate.probeID].append(candidate)
probe_values = []
for probes_for_ID in candidates.values():
if isUpper:
probe_values.append(
self.problem_type.get_cvxproblem_template.aggregate_max_candidates(
probes_for_ID
)
)
else:
probe_values.append(
self.problem_type.get_cvxproblem_template.aggregate_min_candidates(
probes_for_ID
)
)
return np.array(probe_values)
def _generate_relevance_bounds_tasks(
self, dims, data, preset_model=None, best_model_state=None
):
# Do not compute bounds for fixed features
if preset_model is not None:
dims = [di for di in dims if di not in preset_model]
# Instantiate objects for computation later
for di in dims:
# Add Lower Bound problem(s) to work list
yield from self.problem_type.get_cvxproblem_template.generate_lower_bound_problem(
self.best_hyperparameters,
self.init_constraints,
best_model_state,
data,
di,
preset_model,
)
# Add problem(s) for Upper bound
yield from self.problem_type.get_cvxproblem_template.generate_upper_bound_problem(
self.best_hyperparameters,
self.init_constraints,
best_model_state,
data,
di,
preset_model,
)
def _generate_probe_value_tasks(
self,
data,
dims,
isUpper,
n_resampling,
random_state,
preset_model=None,
best_model_state=None,
):
if isUpper:
factory = (
self.problem_type.get_cvxproblem_template.generate_upper_bound_problem
)
else:
factory = (
self.problem_type.get_cvxproblem_template.generate_lower_bound_problem
)
# Random sample n_resampling shadow features by permuting real features and computing upper bound
random_choice = random_state.choice(a=dims, size=n_resampling)
# Instantiate objects
for i, di in enumerate(random_choice):
data_perm = permutate_feature_in_data(data, di, random_state)
# We only use upper bounds as probe features
yield from factory(
self.best_hyperparameters,
self.init_constraints,
best_model_state,
data_perm,
di,
preset_model,
probeID=i,
)
def _create_interval(
self, feature: int, solved_bounds: dict, presetModel: dict = None
):
# Return preset values for fixed features
if presetModel is not None:
if feature in presetModel:
return presetModel[feature].squeeze()
all_bounds = solved_bounds[feature]
min_problems_candidates = [p for p in all_bounds if p.isLowerBound]
max_problems_candidates = [p for p in all_bounds if not p.isLowerBound]
if len(all_bounds) < 2:
logging.error(
f"(Some) relevance bounds for feature {feature} were not solved."
)
raise Exception("Infeasible bound(s).")
lower_bound = (
self.problem_type.get_cvxproblem_template.aggregate_min_candidates(
min_problems_candidates
)
)
upper_bound = (
self.problem_type.get_cvxproblem_template.aggregate_max_candidates(
max_problems_candidates
)
)
return lower_bound, upper_bound
def compute_single_preset_relevance_bounds(
self, i: int, signed_preset_i: [float, float]
):
"""
Method to run method once for one restricted feature
Parameters
----------
i:
restricted feature
signed_preset_i:
restricted range of feature i (set before optimization = preset)
"""
preset = {i: signed_preset_i}
rangevector = self.compute_multi_preset_relevance_bounds(preset)
return rangevector
def compute_multi_preset_relevance_bounds(self, preset, lupi_features=0):
"""
Method to run method with preset values
Parameters
----------
lupi_features
"""
# The user is working with normalized values while we compute them unscaled
if self.normalize:
normalized = {}
for k, v in preset.items():
normalized[k] = np.asarray(v) * self.best_init_model.L1_factor
preset = normalized
# Add sign to presets
preset = self._add_sign_to_preset(preset)
# Calculate all bounds with feature i set to min_i
if lupi_features > 0:
rangevector, _ = self.get_normalized_lupi_intervals(
lupi_features, presetModel=preset
)
else:
rangevector, _ = self.get_normalized_intervals(presetModel=preset)
return rangevector
def _add_sign_to_preset(self, unsigned_presets):
"""
We need signed presets for our convex problem definition later.
We reuse the coefficients of the optimal model for this
Parameters
----------
unsigned_presets : dict
Returns
-------
dict
"""
signed_presets = {}
# Obtain optimal model parameters
w = self.best_init_model.model_state["w"]
preset_sum = 0
for i, preset in unsigned_presets.items():
preset = np.array(preset)
if preset.size == 1:
preset = np.repeat(preset, 2)
unsigned_preset_i = np.sign(w[i]) * preset
# accumulate maximal feature contribution
preset_sum += unsigned_preset_i[1] # Take upper preset
signed_presets[i] = unsigned_preset_i
# Check if unsigned_presets makes sense
l1 = self.init_constraints["w_l1"]
if preset_sum > l1:
print("maximum L1 norm of presets: ", preset_sum)
print("L1 allowed:", l1)
print("Presets are not feasible. Try lowering values.")
return
return signed_presets
def _postprocessing(self, L1, rangevector, round_to_zero=True):
if self.normalize:
assert L1 > 0
rangevector = rangevector.copy() / L1
if round_to_zero:
rangevector[rangevector <= 1e-11] = 0
return rangevector
def grouping(self, interval, cutoff_threshold=0.55, method="single"):
"""Find feature clusters based on observed variance when changing feature contributions
Parameters
----------
cutoff_threshold : float, optional
Cutoff value for the flat clustering step; decides at which height in the dendrogram the cut is made to determine groups.
method : str, optional
Linkage method used in the hierarchical clustering.
Returns
-------
self
"""
# Do we have intervals?
if self.best_init_model is None:
raise Exception("Model needs to be fitted already.")
d = len(interval)
# Init arrays
interval_constrained_to_min = np.zeros(
(d, d, 2)
) # Save ranges (d,2-dim) for every contrained run (d-times)
absolute_delta_bounds_summed_min = np.zeros((d, d, 2))
interval_constrained_to_max = np.zeros(
(d, d, 2)
) # Save ranges (d,2-dim) for every contrained run (d-times)
absolute_delta_bounds_summed_max = np.zeros((d, d, 2))
# Set weight for each dimension to minimum and maximum possible value and run optimization of all others
# We retrieve the relevance bounds and calculate the absolute difference between them and non-constrained bounds
for i in range(d):
# min
lowb = interval[i, 0]
ranges = self.compute_single_preset_relevance_bounds(i, [lowb, lowb])
diff = interval - ranges
diff[i] = 0
interval_constrained_to_min[i] = ranges
absolute_delta_bounds_summed_min[i] = diff
# max
highb = interval[i, 1]
ranges = self.compute_single_preset_relevance_bounds(i, [highb, highb])
diff = interval - ranges
diff[i] = 0
interval_constrained_to_max[i] = ranges
absolute_delta_bounds_summed_max[i] = diff
feature_points = np.zeros((d, 2 * d * 2))
for i in range(d):
feature_points[i, : (2 * d)] = absolute_delta_bounds_summed_min[i].flatten()
feature_points[i, (2 * d) :] = absolute_delta_bounds_summed_max[i].flatten()
self.relevance_variance = feature_points
# Calculate similarity using custom measure
dist_mat = scipy.spatial.distance.pdist(feature_points, metric=distance)
# Create linkage tree
link = linkage(dist_mat, method=method, optimal_ordering=True)
# Set cutoff at which threshold the linkage gets flattened (clustering)
RATIO = cutoff_threshold
threshold = RATIO * np.max(link[:, 2]) # max of branch lengths (distances)
feature_clustering = fcluster(link, threshold, criterion="distance")
self.feature_clusters_, self.linkage_ = feature_clustering, link
return self.feature_clusters_, self.linkage_
def _get_necessary_dimensions(d: int, presetModel: dict = None, start=0):
dims = np.arange(start, d)
# if presetModel is not None:
# # Exclude fixed (preset) dimensions from being redundantly computed
# dims = [di for di in dims if di not in presetModel.keys()]
return dims
class FeatureClassifier:
def __init__(self, probes_low, probes_up, fpr=1e-4, verbose=0):
self.lower_stat = create_probe_statistic(probes_low, fpr, verbose=verbose)
self.upper_stat = create_probe_statistic(probes_up, fpr, verbose=verbose)
if verbose > 0:
logging.info("**** Feature Selection ****")
logging.info("Lower Probe Statistic")
logging.info(self.lower_stat)
logging.info("Upper Probe Statistic")
logging.info(self.upper_stat)
def classify(self, relevance_bounds):
"""
Parameters
----------
relevance_bounds : numpy.ndarray
two dimensional array with relevance bounds
first column coresponds to minrel and second to maxrel
"""
weakly = relevance_bounds[:, 1] > self.upper_stat.upper_threshold
strongly = relevance_bounds[:, 0] > self.lower_stat.upper_threshold
both = np.logical_and(weakly, strongly)
prediction = np.zeros(relevance_bounds.shape[0], dtype=np.int)
prediction[weakly] = 1
prediction[both] = 2
return prediction
@attr.s
class ProbeStatistic:
"""
Collects the threshold values about the statistics
from one kind of relevance bounds (minrel or maxrel).
"""
lower_threshold = attr.ib(type=float)
upper_threshold = attr.ib(type=float)
n_probes = attr.ib(type=int)
def create_probe_statistic(probe_values, fpr, verbose=0):
# Create prediction interval statistics based on randomly permutated probe features (based on real features)
n = len(probe_values)
if n == 0:
if verbose > 0:
logging.info(
"All probes were infeasible. All features considered relevant."
)
# # If all probes were infeasible we expect an empty list
# # If they are infeasible it also means that only strongly relevant features were in the data
# # As such we just set the prediction without considering the statistics
low_t = 0
up_t = 0
elif n == 1:
val = probe_values[0]
low_t = val
up_t = val
else:
probe_values = np.asarray(probe_values)
mean = probe_values.mean()
s = probe_values.std()
low_t = mean + stats.t(df=n - 1).ppf(fpr) * s * np.sqrt(1 + (1 / n))
up_t = mean - stats.t(df=n - 1).ppf(fpr) * s * np.sqrt(1 + (1 / n))
return ProbeStatistic(low_t, up_t, n)
| [
"numpy.sqrt",
"numpy.array",
"logging.info",
"logging.error",
"scipy.cluster.hierarchy.fcluster",
"numpy.arange",
"numpy.repeat",
"numpy.asarray",
"numpy.max",
"scipy.cluster.hierarchy.linkage",
"numpy.concatenate",
"fri.utils.permutate_feature_in_data",
"scipy.spatial.distance.pdist",
"nu... | [((17591, 17610), 'numpy.arange', 'np.arange', (['start', 'd'], {}), '(start, d)\n', (17600, 17610), True, 'import numpy as np\n'), ((19141, 19160), 'attr.ib', 'attr.ib', ([], {'type': 'float'}), '(type=float)\n', (19148, 19160), False, 'import attr\n'), ((19183, 19202), 'attr.ib', 'attr.ib', ([], {'type': 'float'}), '(type=float)\n', (19190, 19202), False, 'import attr\n'), ((19218, 19235), 'attr.ib', 'attr.ib', ([], {'type': 'int'}), '(type=int)\n', (19225, 19235), False, 'import attr\n'), ((3002, 3038), 'numpy.concatenate', 'np.concatenate', (['[rb_norm, rb_l_norm]'], {}), '([rb_norm, rb_l_norm])\n', (3016, 3038), True, 'import numpy as np\n'), ((3779, 3834), 'numpy.concatenate', 'np.concatenate', (['[feature_classes, feature_classes_lupi]'], {}), '([feature_classes, feature_classes_lupi])\n', (3793, 3834), True, 'import numpy as np\n'), ((6086, 6103), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (6097, 6103), False, 'from collections import defaultdict\n'), ((6436, 6457), 'numpy.zeros', 'np.zeros', (['(length, 2)'], {}), '((length, 2))\n', (6444, 6457), True, 'import numpy as np\n'), ((7598, 7615), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (7609, 7615), False, 'from collections import defaultdict\n'), ((8350, 8372), 'numpy.array', 'np.array', (['probe_values'], {}), '(probe_values)\n', (8358, 8372), True, 'import numpy as np\n'), ((15322, 15341), 'numpy.zeros', 'np.zeros', (['(d, d, 2)'], {}), '((d, d, 2))\n', (15330, 15341), True, 'import numpy as np\n'), ((15467, 15486), 'numpy.zeros', 'np.zeros', (['(d, d, 2)'], {}), '((d, d, 2))\n', (15475, 15486), True, 'import numpy as np\n'), ((15525, 15544), 'numpy.zeros', 'np.zeros', (['(d, d, 2)'], {}), '((d, d, 2))\n', (15533, 15544), True, 'import numpy as np\n'), ((15670, 15689), 'numpy.zeros', 'np.zeros', (['(d, d, 2)'], {}), '((d, d, 2))\n', (15678, 15689), True, 'import numpy as np\n'), ((16586, 16610), 'numpy.zeros', 'np.zeros', (['(d, 2 * d * 2)'], {}), '((d, 2 * d * 2))\n', (16594, 16610), True, 'import numpy as np\n'), ((16938, 16999), 'scipy.spatial.distance.pdist', 'scipy.spatial.distance.pdist', (['feature_points'], {'metric': 'distance'}), '(feature_points, metric=distance)\n', (16966, 16999), False, 'import scipy\n'), ((17045, 17100), 'scipy.cluster.hierarchy.linkage', 'linkage', (['dist_mat'], {'method': 'method', 'optimal_ordering': '(True)'}), '(dist_mat, method=method, optimal_ordering=True)\n', (17052, 17100), False, 'from scipy.cluster.hierarchy import fcluster, linkage\n'), ((17328, 17375), 'scipy.cluster.hierarchy.fcluster', 'fcluster', (['link', 'threshold'], {'criterion': '"""distance"""'}), "(link, threshold, criterion='distance')\n", (17336, 17375), False, 'from scipy.cluster.hierarchy import fcluster, linkage\n'), ((18767, 18799), 'numpy.logical_and', 'np.logical_and', (['weakly', 'strongly'], {}), '(weakly, strongly)\n', (18781, 18799), True, 'import numpy as np\n'), ((18821, 18870), 'numpy.zeros', 'np.zeros', (['relevance_bounds.shape[0]'], {'dtype': 'np.int'}), '(relevance_bounds.shape[0], dtype=np.int)\n', (18829, 18870), True, 'import numpy as np\n'), ((1936, 1993), 'joblib.Parallel', 'joblib.Parallel', ([], {'n_jobs': 'self.n_jobs', 'verbose': 'self.verbose'}), '(n_jobs=self.n_jobs, verbose=self.verbose)\n', (1951, 1993), False, 'import joblib\n'), ((4270, 4327), 'joblib.Parallel', 'joblib.Parallel', ([], {'n_jobs': 'self.n_jobs', 'verbose': 'self.verbose'}), '(n_jobs=self.n_jobs, verbose=self.verbose)\n', (4285, 4327), False, 'import joblib\n'), ((5859, 5916), 'joblib.Parallel', 'joblib.Parallel', ([], {'n_jobs': 'self.n_jobs', 'verbose': 'self.verbose'}), '(n_jobs=self.n_jobs, verbose=self.verbose)\n', (5874, 5916), False, 'import joblib\n'), ((7036, 7093), 'joblib.Parallel', 'joblib.Parallel', ([], {'n_jobs': 'self.n_jobs', 'verbose': 'self.verbose'}), '(n_jobs=self.n_jobs, verbose=self.verbose)\n', (7051, 7093), False, 'import joblib\n'), ((10184, 10233), 'fri.utils.permutate_feature_in_data', 'permutate_feature_in_data', (['data', 'di', 'random_state'], {}), '(data, di, random_state)\n', (10209, 10233), False, 'from fri.utils import permutate_feature_in_data\n'), ((11090, 11175), 'logging.error', 'logging.error', (['f"""(Some) relevance bounds for feature {feature} were not solved."""'], {}), "(f'(Some) relevance bounds for feature {feature} were not solved.'\n )\n", (11103, 11175), False, 'import logging\n'), ((13618, 13634), 'numpy.array', 'np.array', (['preset'], {}), '(preset)\n', (13626, 13634), True, 'import numpy as np\n'), ((17243, 17261), 'numpy.max', 'np.max', (['link[:, 2]'], {}), '(link[:, 2])\n', (17249, 17261), True, 'import numpy as np\n'), ((18104, 18147), 'logging.info', 'logging.info', (['"""**** Feature Selection ****"""'], {}), "('**** Feature Selection ****')\n", (18116, 18147), False, 'import logging\n'), ((18160, 18197), 'logging.info', 'logging.info', (['"""Lower Probe Statistic"""'], {}), "('Lower Probe Statistic')\n", (18172, 18197), False, 'import logging\n'), ((18210, 18239), 'logging.info', 'logging.info', (['self.lower_stat'], {}), '(self.lower_stat)\n', (18222, 18239), False, 'import logging\n'), ((18252, 18289), 'logging.info', 'logging.info', (['"""Upper Probe Statistic"""'], {}), "('Upper Probe Statistic')\n", (18264, 18289), False, 'import logging\n'), ((18302, 18331), 'logging.info', 'logging.info', (['self.upper_stat'], {}), '(self.upper_stat)\n', (18314, 18331), False, 'import logging\n'), ((19487, 19564), 'logging.info', 'logging.info', (['"""All probes were infeasible. All features considered relevant."""'], {}), "('All probes were infeasible. All features considered relevant.')\n", (19499, 19564), False, 'import logging\n'), ((20010, 20034), 'numpy.asarray', 'np.asarray', (['probe_values'], {}), '(probe_values)\n', (20020, 20034), True, 'import numpy as np\n'), ((5954, 5990), 'joblib.delayed', 'joblib.delayed', (['_start_solver_worker'], {}), '(_start_solver_worker)\n', (5968, 5990), False, 'import joblib\n'), ((7423, 7459), 'joblib.delayed', 'joblib.delayed', (['_start_solver_worker'], {}), '(_start_solver_worker)\n', (7437, 7459), False, 'import joblib\n'), ((13693, 13713), 'numpy.repeat', 'np.repeat', (['preset', '(2)'], {}), '(preset, 2)\n', (13702, 13713), True, 'import numpy as np\n'), ((13746, 13759), 'numpy.sign', 'np.sign', (['w[i]'], {}), '(w[i])\n', (13753, 13759), True, 'import numpy as np\n'), ((12571, 12584), 'numpy.asarray', 'np.asarray', (['v'], {}), '(v)\n', (12581, 12584), True, 'import numpy as np\n'), ((20157, 20175), 'numpy.sqrt', 'np.sqrt', (['(1 + 1 / n)'], {}), '(1 + 1 / n)\n', (20164, 20175), True, 'import numpy as np\n'), ((20233, 20251), 'numpy.sqrt', 'np.sqrt', (['(1 + 1 / n)'], {}), '(1 + 1 / n)\n', (20240, 20251), True, 'import numpy as np\n'), ((20124, 20141), 'scipy.stats.t', 'stats.t', ([], {'df': '(n - 1)'}), '(df=n - 1)\n', (20131, 20141), False, 'from scipy import stats\n'), ((20200, 20217), 'scipy.stats.t', 'stats.t', ([], {'df': '(n - 1)'}), '(df=n - 1)\n', (20207, 20217), False, 'from scipy import stats\n')] |
import datetime
import matplotlib.pyplot as plt
import numpy as np
import geospacelab.visualization.mpl.geomap.geodashboards as geomap
def test_ampere():
dt_fr = datetime.datetime(2016, 3, 15, 0)
dt_to = datetime.datetime(2016, 3, 15, 23, 59)
time1 = datetime.datetime(2016, 3, 15, 1, 10)
pole = 'N'
load_mode = 'assigned'
# specify the file full path
data_file_paths = ['/home/lei/afys-data/SuperDARN/PotentialMap/2016/test.dat']
# data_file_paths = ['/Users/lcai/Geospacelab/Data/SuperDARN/POTMAP/2016/SuperDARM_POTMAP_20160314_10min_test.txt']
viewer = geomap.GeoDashboard(dt_fr=dt_fr, dt_to=dt_to, figure_config={'figsize': (8, 8)})
viewer.dock(datasource_contents=['superdarn', 'potmap'], load_mode=load_mode, data_file_paths=data_file_paths)
viewer.set_layout(1, 1)
dataset_superdarn = viewer.datasets[1]
phi = viewer.assign_variable('GRID_phi', dataset_index=1)
dts = viewer.assign_variable('DATETIME', dataset_index=1).value.flatten()
mlat = viewer.assign_variable('GRID_MLAT', dataset_index=1)
mlon = viewer.assign_variable('GRID_MLON', dataset_index=1)
mlt = viewer.assign_variable(('GRID_MLT'), dataset_index=1)
ind_t = dataset_superdarn.get_time_ind(ut=time1)
# initialize the polar map
pid = viewer.add_polar_map(row_ind=0, col_ind=0, style='mlt-fixed', cs='AACGM', mlt_c=0., pole=pole, ut=time1, boundary_lat=50, mirror_south=True)
panel1 = viewer.panels[pid]
panel1.add_coastlines()
phi_ = phi.value[ind_t]
mlat_ = mlat.value[ind_t]
mlt_ = mlt.value[ind_t]
mlon_ = mlon.value[ind_t]
# grid_mlat, grid_mlt, grid_phi = dataset_superdarn.grid_phi(mlat_, mlt_, phi_, interp_method='cubic')
grid_mlat, grid_mlt, grid_phi = dataset_superdarn.postprocess_roll(mlat_, mlt_, phi_)
# re-grid the original data with higher spatial resolution, default mlt_res = 0.05, mlat_res = 0.5. used for plotting.
# grid_mlat, grid_mlt, grid_fac = dataset_ampere.grid_fac(phi_, mlt_res=0.05, mlat_res=0.05, interp_method='linear')
levels = np.array([-21e3, -18e3, -15e3, -12e3, -9e3, -6e3, 3e3, 6e3, 9e3, 12e3, 15e3, 18e3, 21e3])
# ipc = panel1.add_pcolor(fac_, coords={'lat': mlat[ind_t, ::], 'lon': None, 'mlt': mlt[ind_t, ::], 'height': 250.}, cs='AACGM', **pcolormesh_config)
ict = panel1.add_contour(grid_phi, coords={'lat': grid_mlat, 'lon': None, 'mlt': grid_mlt}, cs='AACGM', colors='b', levels=levels)
# panel1.major_ax.clabel(ict, inline=True, fontsize=10)
panel1.add_gridlines(lat_res=5, lon_label_separator=5)
polestr = 'North' if pole == 'N' else 'South'
# panel1.add_title('DMSP/SSUSI, ' + band + ', ' + sat_id.upper() + ', ' + polestr + ', ' + time1.strftime('%Y-%m-%d %H%M UT'), pad=20)
plt.savefig('superdarn_example', dpi=300)
plt.show()
if __name__ == "__main__":
test_ampere()
| [
"datetime.datetime",
"matplotlib.pyplot.savefig",
"numpy.array",
"geospacelab.visualization.mpl.geomap.geodashboards.GeoDashboard",
"matplotlib.pyplot.show"
] | [((169, 202), 'datetime.datetime', 'datetime.datetime', (['(2016)', '(3)', '(15)', '(0)'], {}), '(2016, 3, 15, 0)\n', (186, 202), False, 'import datetime\n'), ((215, 253), 'datetime.datetime', 'datetime.datetime', (['(2016)', '(3)', '(15)', '(23)', '(59)'], {}), '(2016, 3, 15, 23, 59)\n', (232, 253), False, 'import datetime\n'), ((266, 303), 'datetime.datetime', 'datetime.datetime', (['(2016)', '(3)', '(15)', '(1)', '(10)'], {}), '(2016, 3, 15, 1, 10)\n', (283, 303), False, 'import datetime\n'), ((596, 681), 'geospacelab.visualization.mpl.geomap.geodashboards.GeoDashboard', 'geomap.GeoDashboard', ([], {'dt_fr': 'dt_fr', 'dt_to': 'dt_to', 'figure_config': "{'figsize': (8, 8)}"}), "(dt_fr=dt_fr, dt_to=dt_to, figure_config={'figsize': (8, 8)}\n )\n", (615, 681), True, 'import geospacelab.visualization.mpl.geomap.geodashboards as geomap\n'), ((2067, 2199), 'numpy.array', 'np.array', (['[-21000.0, -18000.0, -15000.0, -12000.0, -9000.0, -6000.0, 3000.0, 6000.0, \n 9000.0, 12000.0, 15000.0, 18000.0, 21000.0]'], {}), '([-21000.0, -18000.0, -15000.0, -12000.0, -9000.0, -6000.0, 3000.0,\n 6000.0, 9000.0, 12000.0, 15000.0, 18000.0, 21000.0])\n', (2075, 2199), True, 'import numpy as np\n'), ((2760, 2801), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""superdarn_example"""'], {'dpi': '(300)'}), "('superdarn_example', dpi=300)\n", (2771, 2801), True, 'import matplotlib.pyplot as plt\n'), ((2806, 2816), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2814, 2816), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
from sklearn.utils import gen_batches
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
import keras
if __name__ == '__main__':
np.random.seed(12227)
X, y = make_classification(n_samples=1500, n_features=400, n_classes=3, n_clusters_per_class=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
y_train = keras.utils.to_categorical(y_train)
y_test = keras.utils.to_categorical(y_test)
import h5py
h5f = h5py.File('multiclass_data.h5', 'w')
#h5f.create_group('data')
h5f['X_train'] = X_train
h5f['y_train'] = y_train
h5f['X_test'] = X_test
h5f['y_test'] = y_test
h5f.close() | [
"sklearn.model_selection.train_test_split",
"h5py.File",
"keras.utils.to_categorical",
"numpy.random.seed",
"sklearn.datasets.make_classification"
] | [((211, 232), 'numpy.random.seed', 'np.random.seed', (['(12227)'], {}), '(12227)\n', (225, 232), True, 'import numpy as np\n'), ((247, 339), 'sklearn.datasets.make_classification', 'make_classification', ([], {'n_samples': '(1500)', 'n_features': '(400)', 'n_classes': '(3)', 'n_clusters_per_class': '(1)'}), '(n_samples=1500, n_features=400, n_classes=3,\n n_clusters_per_class=1)\n', (266, 339), False, 'from sklearn.datasets import make_classification\n'), ((378, 432), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.1)', 'random_state': '(42)'}), '(X, y, test_size=0.1, random_state=42)\n', (394, 432), False, 'from sklearn.model_selection import train_test_split\n'), ((450, 485), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_train'], {}), '(y_train)\n', (476, 485), False, 'import keras\n'), ((500, 534), 'keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_test'], {}), '(y_test)\n', (526, 534), False, 'import keras\n'), ((565, 601), 'h5py.File', 'h5py.File', (['"""multiclass_data.h5"""', '"""w"""'], {}), "('multiclass_data.h5', 'w')\n", (574, 601), False, 'import h5py\n')] |
from src.find_plate import find_plate
import numpy as np
import imutils
import cv2
def replace_plate(image_PIL):
'''
Function that detect and replace the car old plate with new plate
'''
## Read and grayscale the image
img = cv2.cvtColor(np.array(image_PIL), cv2.COLOR_RGB2BGR)
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
## Noise reduction
#bfilter = cv2.bilateralFilter(img_gray, 11, 17, 17)
## Find edges for localization
edged = cv2.Canny(img_gray, 170, 200)
## Find plate contour
locations = find_plate(edged)
if (len(locations) == 0):
return False
plate = locations[0]
## Show car plate
mask = np.zeros(img_gray.shape, np.uint8)
new_image = cv2.drawContours(mask, [plate], 0,255, -1)
new_image = cv2.bitwise_and(img, img, mask=mask)
# Trait new plate
## plate
img_plate = cv2.imread('./static/plate.png')
pts_src = np.array([[img_plate.shape[1],0], [0,0], [0, img_plate.shape[0]], [img_plate.shape[1], img_plate.shape[0]]])
## Calculate Homography
h, status = cv2.findHomography(pts_src, plate)
## Wrap the source
new_plate = cv2.warpPerspective(img_plate, h, (img.shape[1],img.shape[0]))
# Put the new plate
## Now create a mask of logo and create its inverse mask also
new_plate_gray = cv2.cvtColor(new_plate, cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(new_plate_gray, 10, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)
## Now black-out the area of the old plate
img_bo = cv2.bitwise_and(img ,img, mask=mask_inv)
## Merge the images
final_image = cv2.bitwise_or(img_bo, new_plate)
## Save the old image
cv2.imwrite('./static/results/original.jpg', img.copy())
## Save the result image
cv2.imwrite('./static/results/result_replace.jpg', final_image)
return True
| [
"cv2.imwrite",
"cv2.drawContours",
"cv2.findHomography",
"cv2.threshold",
"cv2.bitwise_and",
"numpy.array",
"numpy.zeros",
"cv2.warpPerspective",
"cv2.bitwise_not",
"cv2.cvtColor",
"cv2.bitwise_or",
"src.find_plate.find_plate",
"cv2.Canny",
"cv2.imread"
] | [((319, 356), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (331, 356), False, 'import cv2\n'), ((486, 515), 'cv2.Canny', 'cv2.Canny', (['img_gray', '(170)', '(200)'], {}), '(img_gray, 170, 200)\n', (495, 515), False, 'import cv2\n'), ((559, 576), 'src.find_plate.find_plate', 'find_plate', (['edged'], {}), '(edged)\n', (569, 576), False, 'from src.find_plate import find_plate\n'), ((687, 721), 'numpy.zeros', 'np.zeros', (['img_gray.shape', 'np.uint8'], {}), '(img_gray.shape, np.uint8)\n', (695, 721), True, 'import numpy as np\n'), ((738, 781), 'cv2.drawContours', 'cv2.drawContours', (['mask', '[plate]', '(0)', '(255)', '(-1)'], {}), '(mask, [plate], 0, 255, -1)\n', (754, 781), False, 'import cv2\n'), ((797, 833), 'cv2.bitwise_and', 'cv2.bitwise_and', (['img', 'img'], {'mask': 'mask'}), '(img, img, mask=mask)\n', (812, 833), False, 'import cv2\n'), ((887, 919), 'cv2.imread', 'cv2.imread', (['"""./static/plate.png"""'], {}), "('./static/plate.png')\n", (897, 919), False, 'import cv2\n'), ((935, 1050), 'numpy.array', 'np.array', (['[[img_plate.shape[1], 0], [0, 0], [0, img_plate.shape[0]], [img_plate.shape\n [1], img_plate.shape[0]]]'], {}), '([[img_plate.shape[1], 0], [0, 0], [0, img_plate.shape[0]], [\n img_plate.shape[1], img_plate.shape[0]]])\n', (943, 1050), True, 'import numpy as np\n'), ((1090, 1124), 'cv2.findHomography', 'cv2.findHomography', (['pts_src', 'plate'], {}), '(pts_src, plate)\n', (1108, 1124), False, 'import cv2\n'), ((1165, 1228), 'cv2.warpPerspective', 'cv2.warpPerspective', (['img_plate', 'h', '(img.shape[1], img.shape[0])'], {}), '(img_plate, h, (img.shape[1], img.shape[0]))\n', (1184, 1228), False, 'import cv2\n'), ((1341, 1384), 'cv2.cvtColor', 'cv2.cvtColor', (['new_plate', 'cv2.COLOR_BGR2GRAY'], {}), '(new_plate, cv2.COLOR_BGR2GRAY)\n', (1353, 1384), False, 'import cv2\n'), ((1401, 1458), 'cv2.threshold', 'cv2.threshold', (['new_plate_gray', '(10)', '(255)', 'cv2.THRESH_BINARY'], {}), '(new_plate_gray, 10, 255, cv2.THRESH_BINARY)\n', (1414, 1458), False, 'import cv2\n'), ((1474, 1495), 'cv2.bitwise_not', 'cv2.bitwise_not', (['mask'], {}), '(mask)\n', (1489, 1495), False, 'import cv2\n'), ((1557, 1597), 'cv2.bitwise_and', 'cv2.bitwise_and', (['img', 'img'], {'mask': 'mask_inv'}), '(img, img, mask=mask_inv)\n', (1572, 1597), False, 'import cv2\n'), ((1641, 1674), 'cv2.bitwise_or', 'cv2.bitwise_or', (['img_bo', 'new_plate'], {}), '(img_bo, new_plate)\n', (1655, 1674), False, 'import cv2\n'), ((1797, 1860), 'cv2.imwrite', 'cv2.imwrite', (['"""./static/results/result_replace.jpg"""', 'final_image'], {}), "('./static/results/result_replace.jpg', final_image)\n", (1808, 1860), False, 'import cv2\n'), ((264, 283), 'numpy.array', 'np.array', (['image_PIL'], {}), '(image_PIL)\n', (272, 283), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Submission for CVPR 2020 CLVision Challenge
# Copyright (c) 2020. <NAME>, <NAME>, and <NAME>. All rights reserved.
# Copyrights licensed under the CC-BY-NC 4.0 License.
# See the accompanying LICENSE file for terms.
# Based on the utils.train_test.py by <NAME>, <NAME>,
# <NAME>, <NAME> (Under the CC BY 4.0 License)
# From https://github.com/vlomonaco/cvpr_clvision_challenge
# Python 2-3 compatible
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import numpy as np
import torch
from utils.common import check_ext_mem, check_ram_usage
class ReservoirNet(torch.nn.Module) :
def __init__(self, modelClass, memorySize, memoryDataDim, memoryTargetDim, sameDeviceMemory=True) :
super(ReservoirNet, self).__init__()
self.net = modelClass()
self.memory = MemoryReservoir(memorySize, memoryDataDim, memoryTargetDim)
self.sameDeviceMemory = sameDeviceMemory
def forward(self, x) :
return self.net(x)
def addToMemory(self, tasks, data, targets) :
self.memory.add(tasks, data, targets)
def sampleFromMemory(self, size) :
return self.memory.sample(size)
class MemoryReservoir(torch.nn.Module) :
def __init__(self, memorySize, dataDim, targetDim=[1,]) :
super(MemoryReservoir, self).__init__()
self.register_buffer("memoryData", torch.empty([memorySize, *dataDim], dtype=torch.float))
self.register_buffer("memoryTarget", torch.empty([memorySize, *targetDim], dtype=torch.long))
self.register_buffer("observed", torch.zeros([1], dtype=torch.long))
def add(self, tasks, inputs, targets) :
for i in range(len(targets)) :
if self.observed < self.memoryTarget.size(0) :
self.memoryData[self.observed] = inputs[i]
self.memoryTarget[self.observed] = targets[i]
else :
pos = torch.randint(self.observed.item(), (1,)).item()
if pos < self.memoryTarget.size(0) :
self.memoryData[pos] = inputs[i]
self.memoryTarget[pos] = targets[i]
self.observed += 1
def sample(self, size) :
datas = torch.FloatTensor().to(self.memoryData.device)
targets = torch.LongTensor().to(self.memoryData.device)
if self.observed.item() > 0 :
datas = torch.empty([size, *self.memoryData.size()[1:]], dtype=torch.float, device=self.memoryData.device)
targets = torch.empty([size], dtype=torch.long, device=self.memoryData.device)
lenght = min([self.memoryTarget.size(0), self.observed.item()])
randID = torch.randint(lenght, (size,))
for i in range(len(randID)) :
datas[i] = self.memoryData[randID[i]].unsqueeze(0)
targets[i] = self.memoryTarget[randID[i]].unsqueeze(0)
return datas, targets
def train_net(featureModel, classifier, optimizer, epochs, batchSize, batchSize_backbone, device, x, y, t, preproc=None):
cur_ep = 0
cur_train_t = t
stats = {"ram": [], "disk": []}
if preproc:
x = preproc(x)
train_x = torch.from_numpy(x).type(torch.FloatTensor)
train_y = torch.from_numpy(y).type(torch.LongTensor)
acc = None
classifier.train()
for ep in range(epochs):
stats['disk'].append(check_ext_mem("cl_ext_mem"))
stats['ram'].append(check_ram_usage())
correct_cnt, avg_loss = 0, 0
order = torch.randperm(train_y.size(0))
### Change start here ###
#iters_backbone = order.size(0) // batchSize_backbone + 1
iters_backbone = int(np.ceil(order.size(0)/batchSize_backbone))
### Change end here ###
for it in range(iters_backbone):
start_backbone = it * batchSize_backbone
end_backbone = (it + 1) * batchSize_backbone
x_backbone = train_x[order[start_backbone:end_backbone]].to(device)
y_backbone = train_y[order[start_backbone:end_backbone]].to(device)
with torch.no_grad():
features = featureModel(x_backbone)
iters = features.size(0) // (batchSize//2)
for it in range(iters):
start = it * (batchSize//2)
end = (it + 1) * (batchSize//2)
x_memo, y_memo = classifier.sampleFromMemory(batchSize//2)
x_mb = torch.cat((features[start:end], x_memo))
y_mb = torch.cat((y_backbone[start:end], y_memo))
optimizer.zero_grad()
logits = classifier(x_mb)
pred_label = torch.argmax(logits, dim=1)
correct_cnt += (pred_label == y_mb).sum()
loss = torch.nn.functional.cross_entropy(logits, y_mb)
avg_loss += loss.item()
loss.backward()
optimizer.step()
classifier.addToMemory(y_backbone[start:end], features[start:end], y_backbone[start:end])
acc = correct_cnt.item() / \
((it + 1) * y_mb.size(0))
avg_loss /= ((it + 1) * y_mb.size(0))
cur_ep += 1
return acc, avg_loss, stats
def test_multitask(featureModel, classifier, batchSize, device, test_set, preproc=None, multi_heads=[], verbose=True):
acc_x_task = []
stats = {'accs': [], 'acc': []}
preds = []
classifier.eval()
for (x, y), t in test_set:
if preproc:
x = preproc(x)
if multi_heads != [] and len(multi_heads) > t:
if verbose:
print("Using head: ", t)
classifier = multi_heads[t]
acc = None
test_x = torch.from_numpy(x).type(torch.FloatTensor)
test_y = torch.from_numpy(y).type(torch.LongTensor)
correct_cnt = 0
total = 0
with torch.no_grad():
### Change start here ###
#iters = test_y.size(0) // batchSize + 1
iters = int(np.ceil(test_y.size(0)/batchSize))
### Change end here ###
for it in range(iters):
start = it * batchSize
end = (it + 1) * batchSize
total += end - start
x_mb = test_x[start:end].to(device)
y_mb = test_y[start:end].to(device)
pred_label = torch.argmax(classifier(featureModel(x_mb)), dim=1)
correct_cnt += (pred_label == y_mb).sum()
preds += list(pred_label.data.cpu().numpy())
acc = correct_cnt.item() / test_y.shape[0]
if verbose:
print('TEST Acc. Task {}==>>> acc: {:.3f}'.format(t, acc))
acc_x_task.append(acc)
stats['accs'].append(acc)
stats['acc'].append(np.mean(acc_x_task))
return stats, preds
| [
"numpy.mean",
"utils.common.check_ext_mem",
"torch.LongTensor",
"torch.FloatTensor",
"torch.from_numpy",
"torch.randint",
"torch.nn.functional.cross_entropy",
"torch.no_grad",
"utils.common.check_ram_usage",
"torch.empty",
"torch.zeros",
"torch.cat",
"torch.argmax"
] | [((6902, 6921), 'numpy.mean', 'np.mean', (['acc_x_task'], {}), '(acc_x_task)\n', (6909, 6921), True, 'import numpy as np\n'), ((1441, 1495), 'torch.empty', 'torch.empty', (['[memorySize, *dataDim]'], {'dtype': 'torch.float'}), '([memorySize, *dataDim], dtype=torch.float)\n', (1452, 1495), False, 'import torch\n'), ((1542, 1597), 'torch.empty', 'torch.empty', (['[memorySize, *targetDim]'], {'dtype': 'torch.long'}), '([memorySize, *targetDim], dtype=torch.long)\n', (1553, 1597), False, 'import torch\n'), ((1640, 1674), 'torch.zeros', 'torch.zeros', (['[1]'], {'dtype': 'torch.long'}), '([1], dtype=torch.long)\n', (1651, 1674), False, 'import torch\n'), ((2583, 2651), 'torch.empty', 'torch.empty', (['[size]'], {'dtype': 'torch.long', 'device': 'self.memoryData.device'}), '([size], dtype=torch.long, device=self.memoryData.device)\n', (2594, 2651), False, 'import torch\n'), ((2750, 2780), 'torch.randint', 'torch.randint', (['lenght', '(size,)'], {}), '(lenght, (size,))\n', (2763, 2780), False, 'import torch\n'), ((3251, 3270), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (3267, 3270), False, 'import torch\n'), ((3309, 3328), 'torch.from_numpy', 'torch.from_numpy', (['y'], {}), '(y)\n', (3325, 3328), False, 'import torch\n'), ((3452, 3479), 'utils.common.check_ext_mem', 'check_ext_mem', (['"""cl_ext_mem"""'], {}), "('cl_ext_mem')\n", (3465, 3479), False, 'from utils.common import check_ext_mem, check_ram_usage\n'), ((3509, 3526), 'utils.common.check_ram_usage', 'check_ram_usage', ([], {}), '()\n', (3524, 3526), False, 'from utils.common import check_ext_mem, check_ram_usage\n'), ((5982, 5997), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5995, 5997), False, 'import torch\n'), ((2292, 2311), 'torch.FloatTensor', 'torch.FloatTensor', ([], {}), '()\n', (2309, 2311), False, 'import torch\n'), ((2357, 2375), 'torch.LongTensor', 'torch.LongTensor', ([], {}), '()\n', (2373, 2375), False, 'import torch\n'), ((4157, 4172), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4170, 4172), False, 'import torch\n'), ((4524, 4564), 'torch.cat', 'torch.cat', (['(features[start:end], x_memo)'], {}), '((features[start:end], x_memo))\n', (4533, 4564), False, 'import torch\n'), ((4588, 4630), 'torch.cat', 'torch.cat', (['(y_backbone[start:end], y_memo)'], {}), '((y_backbone[start:end], y_memo))\n', (4597, 4630), False, 'import torch\n'), ((4743, 4770), 'torch.argmax', 'torch.argmax', (['logits'], {'dim': '(1)'}), '(logits, dim=1)\n', (4755, 4770), False, 'import torch\n'), ((4853, 4900), 'torch.nn.functional.cross_entropy', 'torch.nn.functional.cross_entropy', (['logits', 'y_mb'], {}), '(logits, y_mb)\n', (4886, 4900), False, 'import torch\n'), ((5821, 5840), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (5837, 5840), False, 'import torch\n'), ((5882, 5901), 'torch.from_numpy', 'torch.from_numpy', (['y'], {}), '(y)\n', (5898, 5901), False, 'import torch\n')] |
from os import listdir
from os.path import isfile, join
from PIL import Image
import numpy as np
import deep_nn_step_by_step as dnn
from sklearn.neural_network import MLPClassifier
from sklearn.externals import joblib
def flatten_image(img, is_car=True, flat_image_size=3072):
flat = list(img.getdata())
label = 1 if is_car else 0
if (img.size[0] * img.size[1] * 3) != flat_image_size:
print("Unexpected size %d, expecting %d" %(img.size[0] * img.size[1] * 3, flat_image_size))
np_flat = np.array(flat).reshape(1, flat_image_size)
np_label = np.array([label]).reshape(1, 1)
return np_flat, np_label
def flatten_image_from_path(path="image_data/cropped_images/car/6b6777a5-ff5c-4f11-9f1c-8a98ba75e2f4_car_0.jpg", is_car=True, flat_image_size=3072):
im = Image.open(path)
return flatten_image(im, is_car, flat_image_size)
def flatten_images():
flat_image_size = 32 *32 *3
flatten_cars = [flatten_image_from_path(join("./image_data/cropped_images/car", f), True, flat_image_size) for f in listdir("./image_data/cropped_images/car") if isfile(join("./image_data/cropped_images/car", f))]
flatten_cars_X = [fc[0][0] for fc in flatten_cars]
flatten_cars_y = [fc[1][0] for fc in flatten_cars]
flatten_cars_X = np.array(flatten_cars_X).reshape(len(flatten_cars), flat_image_size)
flatten_cars_y = np.array(flatten_cars_y).reshape(len(flatten_cars), 1)
flatten_not_cars = [flatten_image_from_path(join("./image_data/cropped_images/not_car", f), False, flat_image_size) for f in listdir("./image_data/cropped_images/not_car") if isfile(join("./image_data/cropped_images/not_car", f))]
flatten_not_cars_X = [fc[0][0] for fc in flatten_not_cars]
flatten_not_cars_y = [fc[1][0] for fc in flatten_not_cars]
flatten_not_cars_X = np.array(flatten_not_cars_X).reshape(len(flatten_not_cars), flat_image_size)
flatten_not_cars_y = np.array(flatten_not_cars_y).reshape(len(flatten_not_cars), 1)
return flatten_cars_X, flatten_cars_y, flatten_not_cars_X, flatten_not_cars_y
def split_data(flatten_cars_X, flatten_cars_y, flatten_not_cars_X, flatten_not_cars_y, split_ratio=0.7):
car_idx = int(flatten_cars_X.shape[0] * split_ratio)
not_car_idx = int(flatten_not_cars_X.shape[0] * split_ratio)
train_X = np.append(flatten_cars_X[:car_idx, :], flatten_not_cars_X[:not_car_idx, :], axis=0)
test_X = np.append(flatten_cars_X[car_idx:, :], flatten_not_cars_X[not_car_idx:, :], axis=0)
train_y = np.append(flatten_cars_y[:car_idx, :], flatten_not_cars_y[:not_car_idx, :], axis=0)
test_y = np.append(flatten_cars_y[car_idx:, :], flatten_not_cars_y[not_car_idx:, :], axis=0)
return train_X, train_y, test_X, test_y
def test_custom_model(train_X, train_y, test_X, test_y, layers, alpha):
layers_dims = layers
x_train_deep = np.array(train_X).T
y_train_deep = np.array([train_y])
parameters = dnn.L_layer_model(x_train_deep, y_train_deep, layers_dims, learning_rate = alpha, num_iterations=20000, print_cost=False, plot_cost=False)
x_test_deep = np.array(test_X).T
A_out, _ = dnn.L_model_forward(x_test_deep, parameters)
res = [1 if y < 0.5 else 0 for y in A_out[0]]
print("___")
accuracy_deep = (1 / len(test_X)) * np.sum([1 if res[i] == test_y[i] else 0 for i in range(len(test_X))])
print("Accuracy custom NN: " + str(accuracy_deep))
print("___")
return accuracy_deep
def get_model_accuracy(clf, test_X, test_y):
pred = clf.predict(test_X)
accuracy = (1 / len(test_X)) * np.sum([1 if pred[i] == test_y[i] else 0 for i in range(len(test_X))])
return accuracy
def test_model(train_X, train_y, test_X, test_y, layers=(3, 3), alpha=0.0075, save=False):
clf = MLPClassifier(solver='lbfgs', alpha=alpha, hidden_layer_sizes=layers, random_state=1)
clf.fit(train_X, train_y)
if save:
joblib.dump(clf, './image_data/models/car_detection_nn_model.pkl')
accuracy = get_model_accuracy(clf, test_X, test_y)
return accuracy
def train_layer_size(train_X, train_y, test_X, test_y):
for i in range(31):
layers = (i + 1, )
accuracy = test_model(train_X, train_y, test_X, test_y, layers)
print(i, accuracy)
# Best: 16: 0.88, 23: 0.90
def train_layer_depth(train_X, train_y, test_X, test_y):
for i in [2, 5, 16, 23]:
for j in [1, 2, 10, 20]:
layers = ()
for _ in range(j):
layers = layers + (i,)
accuracy = test_model(train_X, train_y, test_X, test_y, layers)
print(layers, accuracy)
# Best 16x10: 0.909, 23x10: 0.907
def train_alpha(train_X, train_y, test_X, test_y):
for a in [0.00001, 0.00003, 0.00005,
0.0001, 0.0003, 0.0005,
0.001, 0.003, 0.005,
0.01, 0.03, 0.05,
0.1, 0.3, 0.5]:
layers = (16, 16, 16, 16, 16, 16, 16, 16, 16, 16)
accuracy = test_model(train_X, train_y, test_X, test_y, layers, a)
print(a, accuracy)
# Best: 0.0001: 0.911, 0.001: 0.903
def train_models(train_X, train_y, test_X, test_y):
train_layer_size(train_X, train_y, test_X, test_y)
train_layer_depth(train_X, train_y, test_X, test_y)
train_alpha(train_X, train_y, test_X, test_y)
def load_and_test_model(test_X, test_y):
clf = joblib.load('./image_data/models/car_detection_nn_model.pkl')
accuracy = get_model_accuracy(clf, test_X, test_y)
print(accuracy)
def classify_image_from_path(image_path="image_data/cropped_images/car/6b6777a5-ff5c-4f11-9f1c-8a98ba75e2f4_car_0.jpg"):
X, _ = flatten_image_from_path(image_path)
clf = joblib.load('./image_data/models/car_detection_nn_model.pkl')
pred = clf.predict(X)
return pred
def main():
# split_ratio = 0.7
# flatten_cars_X, flatten_cars_y, flatten_not_cars_X, flatten_not_cars_y = flatten_images()
# train_X, train_y, test_X, test_y = split_data(flatten_cars_X, flatten_cars_y, flatten_not_cars_X, flatten_not_cars_y, split_ratio)
# train_models(train_X, train_y, test_X, test_y)
# layers = (16, 16, 16, 16, 16, 16, 16, 16, 16, 16)
# alpha = 0.001
# accuracy = test_model(train_X, train_y, test_X, test_y, layers, alpha, True)
# print(accuracy)
# load_and_test_model(test_X, test_y)
car_image_file_names = ["6b6777a5-ff5c-4f11-9f1c-8a98ba75e2f4_car_0.jpg", "6b6777a5-ff5c-4f11-9f1c-8a98ba75e2f4_car_13.jpg", "047a3217-f379-48ee-80ee-d388aa3d48d2_car_16.jpg"]
car_images = ["image_data/cropped_images/car/" + fn for fn in car_image_file_names]
not_car_image_file_names = ["6b6777a5-ff5c-4f11-9f1c-8a98ba75e2f4_not_car_20.jpg", "35b4b453-f4c9-4e21-ab5d-2468927bd8cd_not_car_10.jpg", "35b4b453-f4c9-4e21-ab5d-2468927bd8cd_not_car_69.jpg"]
not_car_images = ["image_data/cropped_images/not_car/" + fn for fn in not_car_image_file_names]
images = car_images + not_car_images
for i in images:
pred = classify_image_from_path(i)
print(pred)
if __name__ == "__main__":
main()
| [
"PIL.Image.open",
"sklearn.neural_network.MLPClassifier",
"os.listdir",
"sklearn.externals.joblib.load",
"os.path.join",
"numpy.append",
"numpy.array",
"deep_nn_step_by_step.L_layer_model",
"deep_nn_step_by_step.L_model_forward",
"sklearn.externals.joblib.dump"
] | [((795, 811), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (805, 811), False, 'from PIL import Image\n'), ((2295, 2382), 'numpy.append', 'np.append', (['flatten_cars_X[:car_idx, :]', 'flatten_not_cars_X[:not_car_idx, :]'], {'axis': '(0)'}), '(flatten_cars_X[:car_idx, :], flatten_not_cars_X[:not_car_idx, :],\n axis=0)\n', (2304, 2382), True, 'import numpy as np\n'), ((2392, 2479), 'numpy.append', 'np.append', (['flatten_cars_X[car_idx:, :]', 'flatten_not_cars_X[not_car_idx:, :]'], {'axis': '(0)'}), '(flatten_cars_X[car_idx:, :], flatten_not_cars_X[not_car_idx:, :],\n axis=0)\n', (2401, 2479), True, 'import numpy as np\n'), ((2490, 2577), 'numpy.append', 'np.append', (['flatten_cars_y[:car_idx, :]', 'flatten_not_cars_y[:not_car_idx, :]'], {'axis': '(0)'}), '(flatten_cars_y[:car_idx, :], flatten_not_cars_y[:not_car_idx, :],\n axis=0)\n', (2499, 2577), True, 'import numpy as np\n'), ((2587, 2674), 'numpy.append', 'np.append', (['flatten_cars_y[car_idx:, :]', 'flatten_not_cars_y[not_car_idx:, :]'], {'axis': '(0)'}), '(flatten_cars_y[car_idx:, :], flatten_not_cars_y[not_car_idx:, :],\n axis=0)\n', (2596, 2674), True, 'import numpy as np\n'), ((2872, 2891), 'numpy.array', 'np.array', (['[train_y]'], {}), '([train_y])\n', (2880, 2891), True, 'import numpy as np\n'), ((2910, 3051), 'deep_nn_step_by_step.L_layer_model', 'dnn.L_layer_model', (['x_train_deep', 'y_train_deep', 'layers_dims'], {'learning_rate': 'alpha', 'num_iterations': '(20000)', 'print_cost': '(False)', 'plot_cost': '(False)'}), '(x_train_deep, y_train_deep, layers_dims, learning_rate=\n alpha, num_iterations=20000, print_cost=False, plot_cost=False)\n', (2927, 3051), True, 'import deep_nn_step_by_step as dnn\n'), ((3102, 3146), 'deep_nn_step_by_step.L_model_forward', 'dnn.L_model_forward', (['x_test_deep', 'parameters'], {}), '(x_test_deep, parameters)\n', (3121, 3146), True, 'import deep_nn_step_by_step as dnn\n'), ((3729, 3818), 'sklearn.neural_network.MLPClassifier', 'MLPClassifier', ([], {'solver': '"""lbfgs"""', 'alpha': 'alpha', 'hidden_layer_sizes': 'layers', 'random_state': '(1)'}), "(solver='lbfgs', alpha=alpha, hidden_layer_sizes=layers,\n random_state=1)\n", (3742, 3818), False, 'from sklearn.neural_network import MLPClassifier\n'), ((5308, 5369), 'sklearn.externals.joblib.load', 'joblib.load', (['"""./image_data/models/car_detection_nn_model.pkl"""'], {}), "('./image_data/models/car_detection_nn_model.pkl')\n", (5319, 5369), False, 'from sklearn.externals import joblib\n'), ((5624, 5685), 'sklearn.externals.joblib.load', 'joblib.load', (['"""./image_data/models/car_detection_nn_model.pkl"""'], {}), "('./image_data/models/car_detection_nn_model.pkl')\n", (5635, 5685), False, 'from sklearn.externals import joblib\n'), ((2833, 2850), 'numpy.array', 'np.array', (['train_X'], {}), '(train_X)\n', (2841, 2850), True, 'import numpy as np\n'), ((3068, 3084), 'numpy.array', 'np.array', (['test_X'], {}), '(test_X)\n', (3076, 3084), True, 'import numpy as np\n'), ((3867, 3933), 'sklearn.externals.joblib.dump', 'joblib.dump', (['clf', '"""./image_data/models/car_detection_nn_model.pkl"""'], {}), "(clf, './image_data/models/car_detection_nn_model.pkl')\n", (3878, 3933), False, 'from sklearn.externals import joblib\n'), ((516, 530), 'numpy.array', 'np.array', (['flat'], {}), '(flat)\n', (524, 530), True, 'import numpy as np\n'), ((574, 591), 'numpy.array', 'np.array', (['[label]'], {}), '([label])\n', (582, 591), True, 'import numpy as np\n'), ((967, 1009), 'os.path.join', 'join', (['"""./image_data/cropped_images/car"""', 'f'], {}), "('./image_data/cropped_images/car', f)\n", (971, 1009), False, 'from os.path import isfile, join\n'), ((1043, 1085), 'os.listdir', 'listdir', (['"""./image_data/cropped_images/car"""'], {}), "('./image_data/cropped_images/car')\n", (1050, 1085), False, 'from os import listdir\n'), ((1272, 1296), 'numpy.array', 'np.array', (['flatten_cars_X'], {}), '(flatten_cars_X)\n', (1280, 1296), True, 'import numpy as np\n'), ((1362, 1386), 'numpy.array', 'np.array', (['flatten_cars_y'], {}), '(flatten_cars_y)\n', (1370, 1386), True, 'import numpy as np\n'), ((1466, 1512), 'os.path.join', 'join', (['"""./image_data/cropped_images/not_car"""', 'f'], {}), "('./image_data/cropped_images/not_car', f)\n", (1470, 1512), False, 'from os.path import isfile, join\n'), ((1547, 1593), 'os.listdir', 'listdir', (['"""./image_data/cropped_images/not_car"""'], {}), "('./image_data/cropped_images/not_car')\n", (1554, 1593), False, 'from os import listdir\n'), ((1804, 1832), 'numpy.array', 'np.array', (['flatten_not_cars_X'], {}), '(flatten_not_cars_X)\n', (1812, 1832), True, 'import numpy as np\n'), ((1906, 1934), 'numpy.array', 'np.array', (['flatten_not_cars_y'], {}), '(flatten_not_cars_y)\n', (1914, 1934), True, 'import numpy as np\n'), ((1096, 1138), 'os.path.join', 'join', (['"""./image_data/cropped_images/car"""', 'f'], {}), "('./image_data/cropped_images/car', f)\n", (1100, 1138), False, 'from os.path import isfile, join\n'), ((1604, 1650), 'os.path.join', 'join', (['"""./image_data/cropped_images/not_car"""', 'f'], {}), "('./image_data/cropped_images/not_car', f)\n", (1608, 1650), False, 'from os.path import isfile, join\n')] |
from clawpack.visclaw.data import ClawPlotData
import numpy as np
import pylab
def read_data(outdir="_output", adjoint=False):
pd = ClawPlotData()
pd.outdir = outdir
times = []
qxt = []
for frameno in range(5001):
try:
frame = pd.getframe(frameno)
except:
break
q = frame.state.q
t = frame.state.t
qxt.append(q)
times.append(t)
x = frame.state.patch.x.centers
x = x
X,T = np.meshgrid(x,times)
qxt = np.array(qxt)
if adjoint:
qxt = np.flipud(qxt) # reverse t for adjoint
return X,T,qxt
| [
"numpy.array",
"numpy.meshgrid",
"clawpack.visclaw.data.ClawPlotData",
"numpy.flipud"
] | [((139, 153), 'clawpack.visclaw.data.ClawPlotData', 'ClawPlotData', ([], {}), '()\n', (151, 153), False, 'from clawpack.visclaw.data import ClawPlotData\n'), ((485, 506), 'numpy.meshgrid', 'np.meshgrid', (['x', 'times'], {}), '(x, times)\n', (496, 506), True, 'import numpy as np\n'), ((516, 529), 'numpy.array', 'np.array', (['qxt'], {}), '(qxt)\n', (524, 529), True, 'import numpy as np\n'), ((560, 574), 'numpy.flipud', 'np.flipud', (['qxt'], {}), '(qxt)\n', (569, 574), True, 'import numpy as np\n')] |
import cortado.chunk as ch
from cortado.seq import Seq
import numpy as np
from cortado.abstractfactor import AbstractFactor
from cortado.funcslicer import FuncSlicer
from cortado.consts import HEADLENGTH, SLICELEN, MISSINGLEVEL
class ConstFactor(AbstractFactor):
def __init__(self, length):
self._name = "Intercept"
self._length = length
self._levels = [MISSINGLEVEL, "Intercept"]
def slicer(start, length, slicelen):
length = min(self._length - start, length)
slicelen = min(length, slicelen)
buf = np.ones(slicelen, dtype = np.uint8)
return Seq.map((lambda x: buf[x[0]:x[1]]), Seq.from_next((start, length, slicelen), ch.next_slice_indices))
self._slicer = FuncSlicer(slicer, np.uint8)
@property
def name(self):
return self._name
def __len__(self):
return self._length
@property
def levels(self):
return self._levels
@property
def slicer(self):
return self._slicer | [
"cortado.funcslicer.FuncSlicer",
"numpy.ones",
"cortado.seq.Seq.from_next"
] | [((753, 781), 'cortado.funcslicer.FuncSlicer', 'FuncSlicer', (['slicer', 'np.uint8'], {}), '(slicer, np.uint8)\n', (763, 781), False, 'from cortado.funcslicer import FuncSlicer\n'), ((574, 607), 'numpy.ones', 'np.ones', (['slicelen'], {'dtype': 'np.uint8'}), '(slicelen, dtype=np.uint8)\n', (581, 607), True, 'import numpy as np\n'), ((665, 728), 'cortado.seq.Seq.from_next', 'Seq.from_next', (['(start, length, slicelen)', 'ch.next_slice_indices'], {}), '((start, length, slicelen), ch.next_slice_indices)\n', (678, 728), False, 'from cortado.seq import Seq\n')] |
import numpy as np
import torch
def objective_function(
config,
model_objective,
model_cost,
task_feature_objective,
task_feature_cost,
x_mean_objective,
x_std_objective,
x_mean_cost,
x_std_cost,
y_mean_objective=None,
y_std_objective=None,
y_mean_cost=None,
y_std_cost=None,
log_objective=False,
with_noise=True,
):
Ht = np.repeat(task_feature_objective[None, :], config.shape[0], axis=0)
x = np.concatenate((config, Ht), axis=1)
x_norm = torch.from_numpy((x - x_mean_objective) / x_std_objective).float()
output = model_objective.forward(x_norm).data.numpy()
mean = output[:, 0]
log_variance = output[:, 1]
if y_mean_objective is not None or y_std_objective is not None:
mean = mean * y_std_objective + y_mean_objective
log_variance *= y_std_objective**2
feval = mean
if with_noise:
feval += np.random.randn() * np.sqrt(np.exp(log_variance))
if log_objective:
feval = np.exp(feval)
Ht = np.repeat(task_feature_cost[None, :], config.shape[0], axis=0)
x = np.concatenate((config, Ht), axis=1)
x_norm = torch.from_numpy((x - x_mean_cost) / x_std_cost).float()
output = model_cost.forward(x_norm).data.numpy()
log_mean = output[:, 0]
log_log_variance = output[:, 1]
if y_mean_cost is not None or y_std_cost is not None:
log_mean = log_mean * y_std_cost + y_mean_cost
log_log_variance *= y_std_cost**2
log_cost = log_mean
if with_noise:
log_cost += np.random.randn() * np.sqrt(np.exp(log_log_variance))
return feval[:, None], np.exp(log_cost)[:, None]
| [
"numpy.repeat",
"torch.from_numpy",
"numpy.exp",
"numpy.concatenate",
"numpy.random.randn"
] | [((389, 456), 'numpy.repeat', 'np.repeat', (['task_feature_objective[None, :]', 'config.shape[0]'], {'axis': '(0)'}), '(task_feature_objective[None, :], config.shape[0], axis=0)\n', (398, 456), True, 'import numpy as np\n'), ((465, 501), 'numpy.concatenate', 'np.concatenate', (['(config, Ht)'], {'axis': '(1)'}), '((config, Ht), axis=1)\n', (479, 501), True, 'import numpy as np\n'), ((1032, 1094), 'numpy.repeat', 'np.repeat', (['task_feature_cost[None, :]', 'config.shape[0]'], {'axis': '(0)'}), '(task_feature_cost[None, :], config.shape[0], axis=0)\n', (1041, 1094), True, 'import numpy as np\n'), ((1103, 1139), 'numpy.concatenate', 'np.concatenate', (['(config, Ht)'], {'axis': '(1)'}), '((config, Ht), axis=1)\n', (1117, 1139), True, 'import numpy as np\n'), ((1008, 1021), 'numpy.exp', 'np.exp', (['feval'], {}), '(feval)\n', (1014, 1021), True, 'import numpy as np\n'), ((515, 573), 'torch.from_numpy', 'torch.from_numpy', (['((x - x_mean_objective) / x_std_objective)'], {}), '((x - x_mean_objective) / x_std_objective)\n', (531, 573), False, 'import torch\n'), ((919, 936), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (934, 936), True, 'import numpy as np\n'), ((1153, 1201), 'torch.from_numpy', 'torch.from_numpy', (['((x - x_mean_cost) / x_std_cost)'], {}), '((x - x_mean_cost) / x_std_cost)\n', (1169, 1201), False, 'import torch\n'), ((1547, 1564), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (1562, 1564), True, 'import numpy as np\n'), ((1629, 1645), 'numpy.exp', 'np.exp', (['log_cost'], {}), '(log_cost)\n', (1635, 1645), True, 'import numpy as np\n'), ((947, 967), 'numpy.exp', 'np.exp', (['log_variance'], {}), '(log_variance)\n', (953, 967), True, 'import numpy as np\n'), ((1575, 1599), 'numpy.exp', 'np.exp', (['log_log_variance'], {}), '(log_log_variance)\n', (1581, 1599), True, 'import numpy as np\n')] |
import pandas as pd
import itertools
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
from tqdm import tqdm
train_data = pd.read_csv("./tianchi_datasets/track3_round1_train.tsv", sep="\t", header=None,
quoting=3, encoding="utf-8", names=["sentence1", "sentence2", "labels"])
train_data["document"] = train_data["sentence1"].str.cat(train_data["sentence2"], sep=" ")
test_data = pd.read_csv("./tianchi_datasets/track3_round1_testA.tsv", sep="\t", header=None,
quoting=3, encoding="utf-8", names=["sentence1", "sentence2"])
test_data["document"] = test_data["sentence1"].str.cat(test_data["sentence2"], sep=" ")
docs = []
q_words = []
for word1, word2 in itertools.zip_longest(train_data["document"], test_data["document"]):
docs.append(word1)
if word2 is None:
continue
q_words.append(word2)
vectorizer = TfidfVectorizer()
tf_idf = vectorizer.fit_transform(docs)
tmp = []
for q in tqdm(q_words):
qtf_idf = vectorizer.transform([q])
res = cosine_similarity(tf_idf, qtf_idf).ravel()
score = max(res)
idx = np.argmax(res)
if score > 0.95:
label = train_data["labels"][idx]
else:
label = 2
tmp.append(label)
test_data["labels"] = tmp
test_data1 = test_data[test_data["labels"] < 2]
new_data = pd.concat([train_data, test_data1], axis = 0)
new_data = new_data[["sentence1", "sentence2", "labels"]]
test_data1 = test_data1[["sentence1", "sentence2", "labels"]]
test_data1.to_csv("./tianchi_datasets/track3_round1_testA_label2.tsv", sep="\t", header=None, index=None)
new_data.to_csv("./tianchi_datasets/track3_round1_newtrain2.tsv", sep="\t", header=None, index=None) | [
"sklearn.metrics.pairwise.cosine_similarity",
"pandas.read_csv",
"tqdm.tqdm",
"itertools.zip_longest",
"numpy.argmax",
"sklearn.feature_extraction.text.TfidfVectorizer",
"pandas.concat"
] | [((208, 370), 'pandas.read_csv', 'pd.read_csv', (['"""./tianchi_datasets/track3_round1_train.tsv"""'], {'sep': '"""\t"""', 'header': 'None', 'quoting': '(3)', 'encoding': '"""utf-8"""', 'names': "['sentence1', 'sentence2', 'labels']"}), "('./tianchi_datasets/track3_round1_train.tsv', sep='\\t', header=\n None, quoting=3, encoding='utf-8', names=['sentence1', 'sentence2',\n 'labels'])\n", (219, 370), True, 'import pandas as pd\n'), ((490, 638), 'pandas.read_csv', 'pd.read_csv', (['"""./tianchi_datasets/track3_round1_testA.tsv"""'], {'sep': '"""\t"""', 'header': 'None', 'quoting': '(3)', 'encoding': '"""utf-8"""', 'names': "['sentence1', 'sentence2']"}), "('./tianchi_datasets/track3_round1_testA.tsv', sep='\\t', header=\n None, quoting=3, encoding='utf-8', names=['sentence1', 'sentence2'])\n", (501, 638), True, 'import pandas as pd\n'), ((790, 858), 'itertools.zip_longest', 'itertools.zip_longest', (["train_data['document']", "test_data['document']"], {}), "(train_data['document'], test_data['document'])\n", (811, 858), False, 'import itertools\n'), ((962, 979), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {}), '()\n', (977, 979), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((1039, 1052), 'tqdm.tqdm', 'tqdm', (['q_words'], {}), '(q_words)\n', (1043, 1052), False, 'from tqdm import tqdm\n'), ((1392, 1435), 'pandas.concat', 'pd.concat', (['[train_data, test_data1]'], {'axis': '(0)'}), '([train_data, test_data1], axis=0)\n', (1401, 1435), True, 'import pandas as pd\n'), ((1178, 1192), 'numpy.argmax', 'np.argmax', (['res'], {}), '(res)\n', (1187, 1192), True, 'import numpy as np\n'), ((1104, 1138), 'sklearn.metrics.pairwise.cosine_similarity', 'cosine_similarity', (['tf_idf', 'qtf_idf'], {}), '(tf_idf, qtf_idf)\n', (1121, 1138), False, 'from sklearn.metrics.pairwise import cosine_similarity\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[28]:
# Characterization of the signal from the input file
# We will be using Fourier Transforms to convert the signals to a frequency domain distribution
# In[29]:
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import wavfile
# In[30]:
freq_sample, sig_audio = wavfile.read("Welcome.wav")
# In[31]:
print('\nShape of the Signal:', sig_audio.shape)
print('Signal Datatype:', sig_audio.dtype)
print('Signal duration:', round(sig_audio.shape[0] / float(freq_sample), 2), 'seconds')
# In[32]:
sig_audio = sig_audio / np.power(2, 15)
# In[33]:
# Extracting the length and the half-length of the signal to input to the foruier transform
sig_length = len(sig_audio)
half_length = np.ceil((sig_length + 1) / 2.0).astype(np.int)
# In[34]:
# We will now be using the Fourier Transform to form the frequency domain of the signal
signal_freq = np.fft.fft(sig_audio)
# Normalize the frequency domain and square it
signal_freq = abs(signal_freq[0:half_length]) / sig_length
signal_freq **= 2
# In[35]:
transform_len = len(signal_freq)
# The Fourier transformed signal now needs to be adjusted for both even and odd cases
# In[36]:
if sig_length % 2:
signal_freq[1:transform_len] *= 2
else:
signal_freq[1:transform_len-1] *= 2
# In[37]:
# Extract the signal's strength in decibels (dB)
exp_signal = 10 * np.log10(signal_freq)
# In[38]:
x_axis = np.arange(0, half_length, 1) * (freq_sample / sig_length) / 1000.0
# In[39]:
plt.figure()
plt.plot(x_axis, exp_signal, color='green', linewidth=1)
plt.xlabel('Frequency Representation (kHz)')
plt.ylabel('Power of Signal (dB)')
plt.show()
# In[ ]:
| [
"numpy.ceil",
"numpy.log10",
"matplotlib.pyplot.ylabel",
"numpy.power",
"matplotlib.pyplot.xlabel",
"numpy.fft.fft",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"scipy.io.wavfile.read",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((333, 360), 'scipy.io.wavfile.read', 'wavfile.read', (['"""Welcome.wav"""'], {}), "('Welcome.wav')\n", (345, 360), False, 'from scipy.io import wavfile\n'), ((920, 941), 'numpy.fft.fft', 'np.fft.fft', (['sig_audio'], {}), '(sig_audio)\n', (930, 941), True, 'import numpy as np\n'), ((1524, 1536), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1534, 1536), True, 'import matplotlib.pyplot as plt\n'), ((1537, 1593), 'matplotlib.pyplot.plot', 'plt.plot', (['x_axis', 'exp_signal'], {'color': '"""green"""', 'linewidth': '(1)'}), "(x_axis, exp_signal, color='green', linewidth=1)\n", (1545, 1593), True, 'import matplotlib.pyplot as plt\n'), ((1594, 1638), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Frequency Representation (kHz)"""'], {}), "('Frequency Representation (kHz)')\n", (1604, 1638), True, 'import matplotlib.pyplot as plt\n'), ((1639, 1673), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Power of Signal (dB)"""'], {}), "('Power of Signal (dB)')\n", (1649, 1673), True, 'import matplotlib.pyplot as plt\n'), ((1674, 1684), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1682, 1684), True, 'import matplotlib.pyplot as plt\n'), ((593, 608), 'numpy.power', 'np.power', (['(2)', '(15)'], {}), '(2, 15)\n', (601, 608), True, 'import numpy as np\n'), ((1398, 1419), 'numpy.log10', 'np.log10', (['signal_freq'], {}), '(signal_freq)\n', (1406, 1419), True, 'import numpy as np\n'), ((757, 788), 'numpy.ceil', 'np.ceil', (['((sig_length + 1) / 2.0)'], {}), '((sig_length + 1) / 2.0)\n', (764, 788), True, 'import numpy as np\n'), ((1443, 1471), 'numpy.arange', 'np.arange', (['(0)', 'half_length', '(1)'], {}), '(0, half_length, 1)\n', (1452, 1471), True, 'import numpy as np\n')] |
import numpy as np
class PoissonClutter2d:
"""Clutter model.
The number of clutters, k, follows a poisson distribution.
The k clutters are uniformaly spatially distributed.
"""
def __init__(self, density):
self.dentity = density
def arise(self, centor, scope):
num_clutter = np.random.poisson(lam=self.dentity*(scope**2))
if num_clutter == 0:
return np.empty((0, 2))
xy_min = centor - scope
xy_max = centor + scope
clutter = np.random.uniform(low=xy_min,
high=xy_max,
size=(num_clutter, 2))
return clutter
| [
"numpy.empty",
"numpy.random.poisson",
"numpy.random.uniform"
] | [((320, 368), 'numpy.random.poisson', 'np.random.poisson', ([], {'lam': '(self.dentity * scope ** 2)'}), '(lam=self.dentity * scope ** 2)\n', (337, 368), True, 'import numpy as np\n'), ((515, 580), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'xy_min', 'high': 'xy_max', 'size': '(num_clutter, 2)'}), '(low=xy_min, high=xy_max, size=(num_clutter, 2))\n', (532, 580), True, 'import numpy as np\n'), ((415, 431), 'numpy.empty', 'np.empty', (['(0, 2)'], {}), '((0, 2))\n', (423, 431), True, 'import numpy as np\n')] |
from kaldi_io import read_vec_flt, write_vec_flt, open_or_fd, write_mat
import sys
import numpy as np
from collections import defaultdict
dev_test_spk = ['p311', 'p226', 'p303', 'p234', 'p302', 'p237', 'p294', 'p225']
with open(sys.argv[1], 'r') as f:
content = f.readlines()
content = [x.strip() for x in content]
spk2mat = defaultdict(list)
for line in content:
(key,rxfile) = line.split()
spk = key.split('-')[0]
if spk in dev_test_spk:
seg = int(key.split('-')[2])
if seg < 25:
continue
spk2mat[spk].append(read_vec_flt(rxfile))
out_file = sys.argv[2]
ark_scp_output = 'ark:| copy-feats --compress=true ark:- ark,scp:' + out_file + '.ark,' + out_file + '.scp'
with open_or_fd(ark_scp_output, 'wb') as f:
for spk,mat in spk2mat.items():
spk_emb = np.mean(mat, axis=0).reshape(-1, 1)
#print(spk)
#print(spk_emb.shape)
write_mat(f, spk_emb, key=spk)
| [
"numpy.mean",
"kaldi_io.open_or_fd",
"collections.defaultdict",
"kaldi_io.read_vec_flt",
"kaldi_io.write_mat"
] | [((332, 349), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (343, 349), False, 'from collections import defaultdict\n'), ((721, 753), 'kaldi_io.open_or_fd', 'open_or_fd', (['ark_scp_output', '"""wb"""'], {}), "(ark_scp_output, 'wb')\n", (731, 753), False, 'from kaldi_io import read_vec_flt, write_vec_flt, open_or_fd, write_mat\n'), ((562, 582), 'kaldi_io.read_vec_flt', 'read_vec_flt', (['rxfile'], {}), '(rxfile)\n', (574, 582), False, 'from kaldi_io import read_vec_flt, write_vec_flt, open_or_fd, write_mat\n'), ((908, 938), 'kaldi_io.write_mat', 'write_mat', (['f', 'spk_emb'], {'key': 'spk'}), '(f, spk_emb, key=spk)\n', (917, 938), False, 'from kaldi_io import read_vec_flt, write_vec_flt, open_or_fd, write_mat\n'), ((814, 834), 'numpy.mean', 'np.mean', (['mat'], {'axis': '(0)'}), '(mat, axis=0)\n', (821, 834), True, 'import numpy as np\n')] |
# coding: utf-8
import sys, os
sys.path.append(os.pardir) # 親ディレクトリのファイルをインポートするための設定
import numpy as np
import pickle
from dataset.mnist import load_mnist
from common.functions import sigmoid, softmax
def get_data():
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, flatten=True, one_hot_label=False)
return x_test, t_test
def init_network():
with open("sample_weight.pkl", 'rb') as f:
network = pickle.load(f)
return network
cnt = 0
def predict(network, x):
W1, W2, W3 = network['W1'], network['W2'], network['W3']
b1, b2, b3 = network['b1'], network['b2'], network['b3']
a1 = np.dot(x, W1) + b1
z1 = sigmoid(a1)
a2 = np.dot(z1, W2) + b2
z2 = sigmoid(a2)
a3 = np.dot(z2, W3) + b3
y = softmax(a3)
global cnt
if (cnt==0):
print("a1 = np.dot(x, W1) + b1")
print("z1 = sigmoid(a1)")
print(" x.shape : "+str(x.shape))
print(" W1.shape : "+str(W1.shape))
print(" b1.shape : "+str(b1.shape))
print(" a1.shape : "+str(a1.shape))
print(" z1.shape : "+str(z1.shape))
print("a2 = np.dot(z1, W2) + b2")
print("z2 = sigmoid(a2)")
print(" z1.shape : "+str(z1.shape))
print(" W2.shape : "+str(W2.shape))
print(" b2.shape : "+str(b2.shape))
print(" a2.shape : "+str(a2.shape))
print(" z2.shape : "+str(z2.shape))
print("a3 = np.dot(z2, W3) + b3")
print(" y = softmax(a3)")
print(" z2.shape : "+str(z2.shape))
print(" W3.shape : "+str(W3.shape))
print(" b3.shape : "+str(b3.shape))
print(" a3.shape : "+str(a3.shape))
print(" y.shape : "+str(y.shape))
cnt = cnt + 1
return y
x, t = get_data()
network = init_network()
accuracy_cnt = 0
for i in range(len(x)):
y = predict(network, x[i])
p= np.argmax(y) # 最も確率の高い要素のインデックスを取得
if p == t[i]:
accuracy_cnt += 1
print("Accuracy:" + str(float(accuracy_cnt) / len(x)))
| [
"common.functions.sigmoid",
"dataset.mnist.load_mnist",
"common.functions.softmax",
"pickle.load",
"numpy.argmax",
"numpy.dot",
"sys.path.append"
] | [((31, 57), 'sys.path.append', 'sys.path.append', (['os.pardir'], {}), '(os.pardir)\n', (46, 57), False, 'import sys, os\n'), ((264, 325), 'dataset.mnist.load_mnist', 'load_mnist', ([], {'normalize': '(True)', 'flatten': '(True)', 'one_hot_label': '(False)'}), '(normalize=True, flatten=True, one_hot_label=False)\n', (274, 325), False, 'from dataset.mnist import load_mnist\n'), ((668, 679), 'common.functions.sigmoid', 'sigmoid', (['a1'], {}), '(a1)\n', (675, 679), False, 'from common.functions import sigmoid, softmax\n'), ((718, 729), 'common.functions.sigmoid', 'sigmoid', (['a2'], {}), '(a2)\n', (725, 729), False, 'from common.functions import sigmoid, softmax\n'), ((767, 778), 'common.functions.softmax', 'softmax', (['a3'], {}), '(a3)\n', (774, 778), False, 'from common.functions import sigmoid, softmax\n'), ((1898, 1910), 'numpy.argmax', 'np.argmax', (['y'], {}), '(y)\n', (1907, 1910), True, 'import numpy as np\n'), ((439, 453), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (450, 453), False, 'import pickle\n'), ((640, 653), 'numpy.dot', 'np.dot', (['x', 'W1'], {}), '(x, W1)\n', (646, 653), True, 'import numpy as np\n'), ((689, 703), 'numpy.dot', 'np.dot', (['z1', 'W2'], {}), '(z1, W2)\n', (695, 703), True, 'import numpy as np\n'), ((739, 753), 'numpy.dot', 'np.dot', (['z2', 'W3'], {}), '(z2, W3)\n', (745, 753), True, 'import numpy as np\n')] |
import pandas
import numpy as np
import matplotlib.pyplot as plt
COLOR = ['C2', 'C1', 'C0']
SAVE_DIR = "benchmarks_results"
def plot_scaling_1d_benchmark(strategies, list_n_times):
# compute the width of the bars
n_group = len(list_n_times)
n_bar = len(strategies)
width = 1 / ((n_bar + 1) * n_group - 1)
fig = plt.figure('comparison CD', figsize=(6, 3.5))
fig.patch.set_alpha(0)
ax_bar = fig.subplots()
xticks, labels = [], []
for i, n_times in enumerate(list_n_times):
fig_scaling = plt.figure(f'Scaling T={n_times}', figsize=(6, 3))
fig_scaling.patch.set_alpha(0)
ax_scaling = fig_scaling.subplots()
handles = []
xticks.append(((i + .5) * (n_bar + 1)) * width)
labels.append(f"$T = {n_times}L$")
for j, (strategy, name, style) in enumerate(strategies):
col_name = ['pb', 'n_jobs', 'runtime', 'runtime1']
csv_name = (f"benchmarks_results/runtimes_n_jobs_"
f"{n_times}_{strategy}.csv")
try:
df = pandas.read_csv(csv_name, names=col_name)
except FileNotFoundError:
print(f"Not found {csv_name}")
continue
runtimes_1 = df[df['n_jobs'] == 1]['runtime'].values
position = (i * (n_bar + 1) + j + 1) * width
handles.append(ax_bar.bar(position, height=np.mean(runtimes_1),
width=width, color=COLOR[j], label=name,
hatch='//' if strategy == 'lgcd' else '')
)
ax_bar.plot(
np.ones_like(runtimes_1) * position,
runtimes_1, '_', color='k')
n_jobs = df['n_jobs'].unique()
n_jobs.sort()
runtimes_scale = []
runtimes_scale_mean = []
for n in n_jobs:
runtimes_scale.append(df[df['n_jobs'] == n]['runtime'].values)
runtimes_scale_mean.append(np.mean(runtimes_scale[-1]))
runtimes_scale_mean = np.array(runtimes_scale_mean)
if strategy != 'random':
t = np.logspace(0, np.log2(2 * n_jobs.max()), 3, base=2)
R0 = runtimes_scale_mean.max()
# Linear and quadratic lines
p = 1 if strategy == 'lgcd' else 2
ax_scaling.plot(t, R0 / t ** p, 'k--', linewidth=1)
tt = 2
bbox = None # dict(facecolor="white", edgecolor="white")
if strategy == 'lgcd':
ax_scaling.text(tt, 1.4 * R0 / tt, "linear", rotation=-14,
bbox=bbox, fontsize=12)
name_ = "DiCoDiLe-$Z$"
else:
ax_scaling.text(tt, 1.4 * R0 / tt**2, "quadratic",
rotation=-25, bbox=bbox, fontsize=12)
name_ = "DICOD"
ax_scaling.plot(n_jobs, runtimes_scale_mean, style,
label=name_, zorder=10, markersize=8)
# for i, n in enumerate(n_jobs):
# x = np.array(runtimes_scale[i])
# ax_scaling.plot(np.ones(value.shape) * n, value, 'k_')
if n_times == 150:
y_lim = (.5, 1e3)
else:
y_lim = (2, 2e4)
ax_scaling.vlines(n_times / 4, *y_lim, 'g', '-.')
ax_scaling.set_ylim(y_lim)
ax_scaling.set_xscale('log')
ax_scaling.set_yscale('log')
ax_scaling.set_xlim((1, 75))
ax_scaling.grid(True, which='both', axis='x', alpha=.5)
ax_scaling.grid(True, which='major', axis='y', alpha=.5)
# ax_scaling.set_xticks(n_jobs)
# ax_scaling.set_xticklabels(n_jobs, fontsize=12)
ax_scaling.set_ylabel("Runtime [sec]", fontsize=12)
ax_scaling.set_xlabel("# workers $W$", fontsize=12)
ax_scaling.legend(fontsize=14)
fig_scaling.tight_layout()
fig_scaling.savefig(f"benchmarks_results/scaling_T{n_times}.pdf",
dpi=300, bbox_inches='tight', pad_inches=0)
ax_bar.set_ylabel("Runtime [sec]", fontsize=12)
ax_bar.set_yscale('log')
ax_bar.set_xticks(xticks)
ax_bar.set_xticklabels(labels, fontsize=12)
ax_bar.set_ylim(1, 2e4)
ax_bar.legend(bbox_to_anchor=(-.02, 1.02, 1., .3), loc="lower left",
handles=handles, ncol=3, fontsize=14, borderaxespad=0.)
fig.tight_layout()
fig.savefig("benchmarks_results/CD_strategies_comparison.png", dpi=300,
bbox_inches='tight', pad_inches=0)
plt.show()
if __name__ == "__main__":
list_n_times = [150, 750]
strategies = [
('greedy', 'Greedy', 's-'),
('random', 'Random', "h-"),
('lgcd', "LGCD", 'o-')
]
plot_scaling_1d_benchmark(strategies, list_n_times)
| [
"numpy.ones_like",
"numpy.mean",
"pandas.read_csv",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.show"
] | [((337, 382), 'matplotlib.pyplot.figure', 'plt.figure', (['"""comparison CD"""'], {'figsize': '(6, 3.5)'}), "('comparison CD', figsize=(6, 3.5))\n", (347, 382), True, 'import matplotlib.pyplot as plt\n'), ((4643, 4653), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4651, 4653), True, 'import matplotlib.pyplot as plt\n'), ((535, 585), 'matplotlib.pyplot.figure', 'plt.figure', (['f"""Scaling T={n_times}"""'], {'figsize': '(6, 3)'}), "(f'Scaling T={n_times}', figsize=(6, 3))\n", (545, 585), True, 'import matplotlib.pyplot as plt\n'), ((2090, 2119), 'numpy.array', 'np.array', (['runtimes_scale_mean'], {}), '(runtimes_scale_mean)\n', (2098, 2119), True, 'import numpy as np\n'), ((1072, 1113), 'pandas.read_csv', 'pandas.read_csv', (['csv_name'], {'names': 'col_name'}), '(csv_name, names=col_name)\n', (1087, 1113), False, 'import pandas\n'), ((1655, 1679), 'numpy.ones_like', 'np.ones_like', (['runtimes_1'], {}), '(runtimes_1)\n', (1667, 1679), True, 'import numpy as np\n'), ((2027, 2054), 'numpy.mean', 'np.mean', (['runtimes_scale[-1]'], {}), '(runtimes_scale[-1])\n', (2034, 2054), True, 'import numpy as np\n'), ((1404, 1423), 'numpy.mean', 'np.mean', (['runtimes_1'], {}), '(runtimes_1)\n', (1411, 1423), True, 'import numpy as np\n')] |
from pppr import aabb
import numpy as np
from pak.datasets.MOT import MOT16
from pak import utils
from pppr import aabb
from time import time
from cselect import color as cs
# ===========================================
# Helper functions
# ===========================================
def remove_negative_pairs(Dt, W, H, is_gt_trajectory=False):
"""
...
is_gt_trajectory: {boolean} if true than the
structure of the data is slightly different
"""
result = []
if is_gt_trajectory:
for frame, pid, x, y, w, h in Dt:
if x >= 0 and y >= 0 and x + w < W and y + h < H:
result.append((frame, pid, x, y, w, h))
else:
if Dt.shape[1] == 7:
for frame, pid, x, y, w, h, score in Dt:
if x >= 0 and y >= 0 and x + w < W and y + h < H:
result.append((frame, pid, x, y, w, h, score))
else:
for frame, x, y, w, h, score in Dt:
if x >= 0 and y >= 0 and x + w < W and y + h < H:
result.append((frame, x, y, w, h, score))
return np.array(result)
def get_visible_pedestrains(Y_gt, frame):
Y_gt_frame1 = utils.extract_eq(Y_gt, col=0, value=frame)
#Y_gt_frame1 = utils.extract_eq(Y_gt_frame1, col=7, value=1)
#Y_gt_frame1 = utils.extract_eq(Y_gt_frame1, col=8, value=1)
return Y_gt_frame1
def get_visible_pedestrains_det(Y_det, frame):
Y_det_frame1 = utils.extract_eq(Y_det, col=0, value=frame)
return Y_det_frame1
def get_center(d):
""" full detection has 7 parameters:
full_detection: (frame, pid, x, y, w, h, score)
"""
x, y, w, h = d[2], d[3], d[4], d[5]
return x+w/2, y+h/2
# ===========================================
# Experiments implementation
# ===========================================
verbose = False
class MOT16_Experiments:
def __init__(self, folder):
""" For the experiments we need MOT16-02 and
MOT16-11 for the analysis
The detections will have the following structure:
0: frame_nbr
1: person id
2: detection top-left x position
3: detection top-left y position
4: detection bb width
5: detection bb height
6: detection output score
"""
global verbose
mot16 = MOT16(folder, verbose=verbose)
mot16_02 = mot16.get_train("MOT16-02", memmapped=True)
mot16_11 = mot16.get_train("MOT16-11", memmapped=True)
self.mot16_02_X = mot16_02[0]
self.mot16_11_X = mot16_11[0]
detections_per_video = []
gt_per_video = []
true_detections_per_video = []
true_detections_per_video_no_pid = []
color_lookups_per_video = []
for X, Y_det, Y_gt in [mot16_02, mot16_11]:
# --- run for each video ---
# this is not the most efficient way but not important atm..
_, H, W, _ = X.shape
Y_gt = MOT16.simplify_gt(Y_gt)
gt_bbs = []
all_detections = []
detections_per_video.append(all_detections)
true_detections = []
true_detections_per_video.append(true_detections)
true_detections_no_pid = []
true_detections_per_video_no_pid.append(true_detections_no_pid)
gt_per_video.append(gt_bbs)
frames = X.shape[0]
TIMING_start = time()
for frame in range(1, frames+1):
y_gt = get_visible_pedestrains(Y_gt, frame)
y_det = get_visible_pedestrains_det(Y_det, frame)
for ped_ in y_gt:
j, pid, l_gt, t_gt, w_gt, h_gt = ped_
gt_bbs.append((j, pid, l_gt, t_gt, w_gt, h_gt))
for ped in y_det:
i, _,l, t, w, h, score, _, _,_ = ped
if l >= 0 and t >= 0 and l + w < W and \
t + h < H:
all_detections.append(
np.array([i, l, t, w, h, score])
)
for ped_ in y_gt:
j, pid, l_gt, t_gt, w_gt, h_gt = ped_
assert(i == j)
if aabb.IoU((l,t,w,h), (l_gt,t_gt,w_gt,h_gt)) > 0.5:
true_detections.append(
np.array([i, pid, l, t, w, h, score]))
true_detections_no_pid.append(
np.array([i, l, t, w, h, score]))
TIMING_end = time()
if verbose:
print("Handling " + str(frames) + " frames in " + \
str(TIMING_end - TIMING_start) + " seconds")
# --- figure out coloring ---
Y = np.array(true_detections)
U = np.unique(Y[:,1])
Color_lookup = {}
Colors = cs.lincolor(len(U), random_sat=True, random_val=True)
#Colors = cs.poisson_disc_sampling_Lab(len(U))
Colors = np.array(Colors, 'float32') / 255
for u,c in zip(U, Colors):
Color_lookup[u] = c
color_lookups_per_video.append(Color_lookup)
self.mot16_02_gt_bbs = np.array(gt_per_video[0])
self.mot16_11_gt_bbs = np.array(gt_per_video[1])
self.mot16_02_detections = np.array(detections_per_video[0])
self.mot16_11_detections = np.array(detections_per_video[1])
self.mot16_02_true_detections = np.array(true_detections_per_video[0])
self.mot16_11_true_detections = np.array(true_detections_per_video[1])
self.mot16_02_true_detections_no_pid = \
np.array(true_detections_per_video_no_pid[0])
self.mot16_11_true_detections_no_pid = \
np.array(true_detections_per_video_no_pid[1])
self.mot16_02_color_lookup = color_lookups_per_video[0]
self.mot16_11_color_lookup = color_lookups_per_video[1]
def get_MOT16_02_gt_trajectories(self, as_point=False):
return self.get_detections_as_trajectories(
self.mot16_02_gt_bbs, as_point)
def get_MOT16_02_trajectories(self, as_point=False):
return self.get_detections_as_trajectories(
self.mot16_02_true_detections, as_point)
def get_MOT16_11_gt_trajectories(self, as_point=False):
return self.get_detections_as_trajectories(
self.mot16_11_gt_bbs, as_point)
def get_MOT16_11_trajectories(self, as_point=False):
return self.get_detections_as_trajectories(
self.mot16_11_true_detections, as_point)
def get_detections_as_trajectories(self, true_detections, as_point=False):
trajectories = []
for d in true_detections:
frame = d[0]
pid = d[1]
if as_point:
x,y = get_center(d)
trajectories.append((frame, pid, x, y))
else:
x, y, w, h = d[2], d[3], d[4], d[5]
trajectories.append((frame, pid, x, y, w, h))
return np.array(trajectories)
def plot_frame_MOT16_02(self, ax, frame, with_gt=False):
self.plot_frame(ax,
self.mot16_02_X,
self.mot16_02_true_detections,
self.mot16_02_color_lookup,
frame, with_gt, self.mot16_02_gt_bbs)
def plot_frame_MOT16_11(self, ax, frame, with_gt=False):
self.plot_frame(ax,
self.mot16_11_X,
self.mot16_11_true_detections,
self.mot16_11_color_lookup,
frame, with_gt, self.mot16_11_gt_bbs)
def plot_frame(self, ax, X, true_detections, id_colors, frame,
with_gt, gt_bbs):
""" plots the frame with its true detections
"""
Y = utils.extract_eq(true_detections, col=0, value=frame)
X = X[frame]
ax.imshow(X)
for _, pid, x, y, w, h, score in Y:
ax.text(x, y, str(int(pid)), color='white', fontsize=17,
bbox={'facecolor': 'red', 'alpha': 0.5})
bbX, bbY = utils.bb_to_plt_plot(x, y, w, h)
ax.plot(bbX, bbY, linewidth=2, color=id_colors[pid])
if with_gt:
Y = utils.extract_eq(gt_bbs, col=0, value=frame)
for _, pid, x, y, w, h in Y:
bbX, bbY = utils.bb_to_plt_plot(x, y, w, h)
ax.plot(bbX, bbY, 'g--', linewidth=4)
# -------------
| [
"numpy.unique",
"pak.datasets.MOT.MOT16",
"pak.utils.extract_eq",
"pak.utils.bb_to_plt_plot",
"numpy.array",
"pak.datasets.MOT.MOT16.simplify_gt",
"pppr.aabb.IoU",
"time.time"
] | [((1112, 1128), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (1120, 1128), True, 'import numpy as np\n'), ((1190, 1232), 'pak.utils.extract_eq', 'utils.extract_eq', (['Y_gt'], {'col': '(0)', 'value': 'frame'}), '(Y_gt, col=0, value=frame)\n', (1206, 1232), False, 'from pak import utils\n'), ((1453, 1496), 'pak.utils.extract_eq', 'utils.extract_eq', (['Y_det'], {'col': '(0)', 'value': 'frame'}), '(Y_det, col=0, value=frame)\n', (1469, 1496), False, 'from pak import utils\n'), ((2387, 2417), 'pak.datasets.MOT.MOT16', 'MOT16', (['folder'], {'verbose': 'verbose'}), '(folder, verbose=verbose)\n', (2392, 2417), False, 'from pak.datasets.MOT import MOT16\n'), ((5287, 5312), 'numpy.array', 'np.array', (['gt_per_video[0]'], {}), '(gt_per_video[0])\n', (5295, 5312), True, 'import numpy as np\n'), ((5344, 5369), 'numpy.array', 'np.array', (['gt_per_video[1]'], {}), '(gt_per_video[1])\n', (5352, 5369), True, 'import numpy as np\n'), ((5406, 5439), 'numpy.array', 'np.array', (['detections_per_video[0]'], {}), '(detections_per_video[0])\n', (5414, 5439), True, 'import numpy as np\n'), ((5475, 5508), 'numpy.array', 'np.array', (['detections_per_video[1]'], {}), '(detections_per_video[1])\n', (5483, 5508), True, 'import numpy as np\n'), ((5550, 5588), 'numpy.array', 'np.array', (['true_detections_per_video[0]'], {}), '(true_detections_per_video[0])\n', (5558, 5588), True, 'import numpy as np\n'), ((5629, 5667), 'numpy.array', 'np.array', (['true_detections_per_video[1]'], {}), '(true_detections_per_video[1])\n', (5637, 5667), True, 'import numpy as np\n'), ((5730, 5775), 'numpy.array', 'np.array', (['true_detections_per_video_no_pid[0]'], {}), '(true_detections_per_video_no_pid[0])\n', (5738, 5775), True, 'import numpy as np\n'), ((5837, 5882), 'numpy.array', 'np.array', (['true_detections_per_video_no_pid[1]'], {}), '(true_detections_per_video_no_pid[1])\n', (5845, 5882), True, 'import numpy as np\n'), ((7107, 7129), 'numpy.array', 'np.array', (['trajectories'], {}), '(trajectories)\n', (7115, 7129), True, 'import numpy as np\n'), ((7915, 7968), 'pak.utils.extract_eq', 'utils.extract_eq', (['true_detections'], {'col': '(0)', 'value': 'frame'}), '(true_detections, col=0, value=frame)\n', (7931, 7968), False, 'from pak import utils\n'), ((3023, 3046), 'pak.datasets.MOT.MOT16.simplify_gt', 'MOT16.simplify_gt', (['Y_gt'], {}), '(Y_gt)\n', (3040, 3046), False, 'from pak.datasets.MOT import MOT16\n'), ((3469, 3475), 'time.time', 'time', ([], {}), '()\n', (3473, 3475), False, 'from time import time\n'), ((4619, 4625), 'time.time', 'time', ([], {}), '()\n', (4623, 4625), False, 'from time import time\n'), ((4844, 4869), 'numpy.array', 'np.array', (['true_detections'], {}), '(true_detections)\n', (4852, 4869), True, 'import numpy as np\n'), ((4886, 4904), 'numpy.unique', 'np.unique', (['Y[:, 1]'], {}), '(Y[:, 1])\n', (4895, 4904), True, 'import numpy as np\n'), ((8210, 8242), 'pak.utils.bb_to_plt_plot', 'utils.bb_to_plt_plot', (['x', 'y', 'w', 'h'], {}), '(x, y, w, h)\n', (8230, 8242), False, 'from pak import utils\n'), ((8345, 8389), 'pak.utils.extract_eq', 'utils.extract_eq', (['gt_bbs'], {'col': '(0)', 'value': 'frame'}), '(gt_bbs, col=0, value=frame)\n', (8361, 8389), False, 'from pak import utils\n'), ((5089, 5116), 'numpy.array', 'np.array', (['Colors', '"""float32"""'], {}), "(Colors, 'float32')\n", (5097, 5116), True, 'import numpy as np\n'), ((8458, 8490), 'pak.utils.bb_to_plt_plot', 'utils.bb_to_plt_plot', (['x', 'y', 'w', 'h'], {}), '(x, y, w, h)\n', (8478, 8490), False, 'from pak import utils\n'), ((4071, 4103), 'numpy.array', 'np.array', (['[i, l, t, w, h, score]'], {}), '([i, l, t, w, h, score])\n', (4079, 4103), True, 'import numpy as np\n'), ((4296, 4344), 'pppr.aabb.IoU', 'aabb.IoU', (['(l, t, w, h)', '(l_gt, t_gt, w_gt, h_gt)'], {}), '((l, t, w, h), (l_gt, t_gt, w_gt, h_gt))\n', (4304, 4344), False, 'from pppr import aabb\n'), ((4430, 4467), 'numpy.array', 'np.array', (['[i, pid, l, t, w, h, score]'], {}), '([i, pid, l, t, w, h, score])\n', (4438, 4467), True, 'import numpy as np\n'), ((4560, 4592), 'numpy.array', 'np.array', (['[i, l, t, w, h, score]'], {}), '([i, l, t, w, h, score])\n', (4568, 4592), True, 'import numpy as np\n')] |
#############################################################################
# Import #
#############################################################################
import os
import random
import PIL.Image as Image
from tqdm import tqdm
import numpy as np
import scipy.io
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import Variable
from torch.autograd import Function
import torch.nn.functional as F
class DotDict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
#############################################################################
# Hyperparameters #
#############################################################################
opt = DotDict()
opt.dataset = 'celebA'
opt.dataPath = './data'
# Input space
opt.nc = 3 # number of input channels
opt.sizeX = 64 # size of the image
opt.sizeS = 64 # size of random noise S vectors
opt.sizeZ = 512 # size of random noise Z vectors
# Convolution settings
opt.nf = 64 # base number of filter in G and D
# Hardward settings
opt.workers = 4 # workers data for preprocessing
opt.cuda = True # use CUDA
opt.gpu = 0 # GPU id
# Optimisation scheme
opt.batchSize = 128 # minibatch size
opt.nIteration = 1000001 # number of training iterations
opt.lrG = 2e-4 # learning rate for G
opt.lrD = 5e-5 # learning rate for D
opt.recW = 0.5
opt.swap1W = 1
opt.swap2W = 1
opt.classW = 0
opt.klzW = .1
# Save/Load networks
opt.checkpointDir = '.' # checkpoints directory
opt.load = 0 # if > 0, load given checkpoint
opt.checkpointFreq = 5 # frequency of checkpoints (in number of epochs)
#############################################################################
# Loading Weights #
#############################################################################
opt.netEnc = ''
opt.netDec = ''
opt.netD = ''
if opt.load > 0:
opt.netEnc = '%s/netEnc_%d.pth' % (opt.checkpointDir, opt.load)
opt.netDec = '%s/netDec_%d.pth' % (opt.checkpointDir, opt.load)
opt.netD = '%s/netD_%d.pth' % (opt.checkpointDir, opt.load)
#############################################################################
# RandomSeed #
#############################################################################
opt.manualSeed = random.randint(1, 10000) # fix seed
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
#############################################################################
# CUDA #
#############################################################################
cudnn.benchmark = True
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
if opt.cuda:
torch.cuda.set_device(opt.gpu)
#############################################################################
# Dataloader #
#############################################################################
if opt.dataset == 'celebA':
opt.nClass = 10000
elif opt.dataset == '3Dchairs':
opt.nClass = 1300
class PairCelebADataset(torch.utils.data.Dataset):
def __init__(self, dataPath, labelFile, transform=transforms.ToTensor()):
super(PairCelebADataset, self).__init__()
self.dataPath = dataPath
with open(labelFile, 'r') as f:
lines = np.array([p.split() for p in f.readlines()])
self.files = lines[:,0]
self.labels = lines[:,1].astype(int)
self.transform = transform
def __len__(self):
return len(self.files)
def __getitem__(self, idx):
label = self.labels[idx]
file1 = self.files[idx]
file2 = np.random.choice(self.files[self.labels == label])
img1 = self.transform(Image.open(os.path.join(self.dataPath, file1)))
img2 = self.transform(Image.open(os.path.join(self.dataPath, file2)))
return img1, img2, torch.LongTensor(1).fill_(int(label))
class Pair3DchairsDataset(torch.utils.data.Dataset):
def __init__(self, dataPath, transform=transforms.ToTensor()):
super(Pair3DchairsDataset, self).__init__()
self.dataPath = dataPath
self.folders = np.array(os.listdir(dataPath))
self.transform = transform
def __len__(self):
return len(self.folders)
def __getitem__(self, idx):
idA, idB = np.random.choice(os.listdir(os.path.join(self.dataPath, self.folders[idx])),2)
label = idx
imgA = Image.open(os.path.join(self.dataPath, self.folders[idx], idA))
imgB = Image.open(os.path.join(self.dataPath, self.folders[idx], idB))
imgA = self.transform(imgA)
imgB = self.transform(imgB)
return imgA, imgB, torch.LongTensor(1).fill_(int(label))
#############################################################################
# Datasets #
#############################################################################
if opt.dataset == 'celebA':
dataset = PairCelebADataset(os.path.join(opt.dataPath, "celebA/aligned"),
os.path.join(opt.dataPath, "celebA/identity_celebA_train.txt"),
transforms.Compose([transforms.CenterCrop(128),
transforms.Resize(opt.sizeX),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]))
testset = PairCelebADataset(os.path.join(opt.dataPath, "celebA/aligned"),
os.path.join(opt.dataPath, "celebA/identity_celebA_val.txt"),
transforms.Compose([transforms.CenterCrop(128),
transforms.Resize(opt.sizeX),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]))
elif opt.dataset == '3Dchairs':
dataset = Pair3DchairsDataset(os.path.join(opt.dataPath, "rendered_chairs/train"),
transforms.Compose([transforms.CenterCrop(300),
transforms.Resize(opt.sizeX),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]))
testset = Pair3DchairsDataset(os.path.join(opt.dataPath, "rendered_chairs/val"),
transforms.Compose([transforms.CenterCrop(300),
transforms.Resize(opt.sizeX),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]))
dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize, shuffle=True, num_workers=int(opt.workers))
#############################################################################
# weights init #
#############################################################################
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
if m.weight:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
#############################################################################
# Modules #
#############################################################################
class _encoder(nn.Module):
def __init__(self, nc, zSize, sSize, nf, xSize):
super(_encoder, self).__init__()
self.mods = nn.Sequential(nn.Conv2d(nc, nf, 3, 1, 1),
nn.BatchNorm2d(nf, 0.1, affine=False),
nn.ReLU(),
nn.Conv2d(nf, nf, 3, 1, 1),
nn.BatchNorm2d(nf, 0.1, affine=False),
nn.ReLU(),
nn.Conv2d(nf, 2*nf, 2, 2),
nn.BatchNorm2d(2*nf, 0.1, affine=False),
nn.ReLU(),
nn.Conv2d(2*nf, 2*nf, 3, 1, 1),
nn.BatchNorm2d(2*nf, 0.1, affine=False),
nn.ReLU(),
nn.Conv2d(2*nf, 4*nf, 2, 2),
nn.BatchNorm2d(4*nf, 0.1, affine=False),
nn.ReLU(),
nn.Conv2d(4*nf, 4*nf, 3, 1, 1),
nn.BatchNorm2d(4*nf, 0.1, affine=False),
nn.ReLU(),
nn.Conv2d(4*nf, 8*nf, 2, 2),
nn.BatchNorm2d(8*nf, 0.1, affine=False),
nn.ReLU(),
nn.Conv2d(8*nf, 8*nf, 3, 1, 1),
nn.BatchNorm2d(8*nf, 0.1, affine=False),
nn.ReLU(),)
npix = nf * 8 * (xSize//8) * (xSize//8)
self.modsZ = nn.Linear(npix, zSize*2)
self.modsS = nn.Linear(npix, sSize)
def forward(self, x):
x = self.mods(x)
x = x.view(x.size(0), -1)
z = self.modsZ(x)
s = self.modsS(x)
z = z.view(z.size(0), 2, -1)
return z, s
class _decoder(nn.Module):
def __init__(self, nc, zSize, sSize, nf, xSize):
super(_decoder, self).__init__()
npix = nf * 8 * 4 * 4
self.modsZ = nn.Linear(zSize, npix)
self.modsS = nn.Linear(sSize, npix)
self.mods = nn.Sequential(nn.Conv2d(nf*8, nf*8, 3, 1, 1),
nn.BatchNorm2d(nf*8, 0.1, affine=False),
nn.ReLU(),
nn.ConvTranspose2d(nf*8, nf*4, 2, 2),
nn.BatchNorm2d(nf*4, 0.1, affine=False),
nn.ReLU(),
nn.Conv2d(nf*4, nf*4, 3, 1, 1),
nn.BatchNorm2d(nf*4, 0.1, affine=False),
nn.ReLU(),
nn.ConvTranspose2d(nf*4, nf*2, 2, 2),
nn.BatchNorm2d(nf*2, 0.1, affine=False),
nn.ReLU(),
nn.Conv2d(nf*2, nf*2, 3, 1, 1),
nn.BatchNorm2d(nf*2, 0.1, affine=False),
nn.ReLU(),
nn.ConvTranspose2d(nf*2, nf, 2, 2),
nn.BatchNorm2d(nf, 0.1, affine=False),
nn.ReLU(),
nn.Conv2d(nf, nf, 3, 1, 1),
nn.BatchNorm2d(nf, 0.1, affine=False),
nn.ReLU(),
nn.Conv2d(nf, nc, 3, 1, 1))
def forward(self, z, s):
z = self.modsZ(z)
s = self.modsS(s)
x = z + s
x = x.view(x.size(0), -1, 4, 4)
x = self.mods(x)
return F.tanh(x)
class _discriminator(nn.Module):
def __init__(self, nc, nf, xSize, nClass):
super(_discriminator, self).__init__()
self.xSize = xSize
self.embeddings = nn.ModuleList([nn.Embedding(nClass, nf*1*(xSize//8)*(xSize//8)),
nn.Embedding(nClass, nf*2*(xSize//8)*(xSize//8)),
nn.Embedding(nClass, nf*4*(xSize//8)*(xSize//8))])
self.mods = nn.ModuleList([nn.Sequential(nn.Conv2d(nc, nf, 3, 1, 1),
nn.LeakyReLU(.2),
nn.Conv2d(nf, nf, 2, 2),),
nn.Sequential(nn.BatchNorm2d(nf),
nn.LeakyReLU(.2),
nn.Conv2d(nf, nf*2, 2, 2),
nn.BatchNorm2d(nf*2),
nn.LeakyReLU(.2),
nn.Conv2d(nf*2, nf*2, 3, 1, 1),
nn.BatchNorm2d(nf*2),
nn.LeakyReLU(.2),),
nn.Sequential(nn.BatchNorm2d(nf*2),
nn.LeakyReLU(.2),
nn.Conv2d(nf*2, nf*4, 2, 2),
nn.BatchNorm2d(nf*4),
nn.LeakyReLU(.2),
nn.Conv2d(nf*4, nf*4, 3, 1, 1),),
nn.Sequential(nn.BatchNorm2d(nf*4),
nn.LeakyReLU(.2),
nn.Conv2d(nf*4, nf*4, 3, 1, 1),
nn.BatchNorm2d(nf*4),
nn.LeakyReLU(.2),),
nn.Linear(nf*4*(xSize//8)*(xSize//8), 1),
])
def forward(self, x, sid):
x = self.mods[0](x)
s0 = self.embeddings[0](sid)
s0 = s0.view(x.size(0), x.size(1), self.xSize//8, self.xSize//8)
s0 = nn.functional.upsample(s0, scale_factor=4, mode='nearest')
x = self.mods[1](x + s0)
s1 = self.embeddings[1](sid)
s1 = s1.view(x.size(0), x.size(1), self.xSize//8, self.xSize//8)
s1 = nn.functional.upsample(s1, scale_factor=2, mode='nearest')
x = self.mods[2](x + s1)
s2 = self.embeddings[2](sid)
s2 = s2.view(x.size(0), x.size(1), self.xSize//8, self.xSize//8)
x = self.mods[3](x + s2)
x = x.view(x.size(0), -1)
x = F.dropout(x)
x = self.mods[4](x)
return x
#############################################################################
# Modules - DC #
#############################################################################
class _dcencoder(nn.Module):
def __init__(self, nc, zSize, sSize, nf, xSize):
super(_dcencoder, self).__init__()
self.mods = nn.Sequential(nn.Conv2d(nc, nf, 4, 2, 1),
nn.BatchNorm2d(nf, 0.1, affine=False),
nn.ReLU(),
nn.Conv2d(nf, 2*nf, 4, 2, 1),
nn.BatchNorm2d(2*nf, 0.1, affine=False),
nn.ReLU(),
nn.Conv2d(2*nf, 4*nf, 4, 2, 1),
nn.BatchNorm2d(4*nf, 0.1, affine=False),
nn.ReLU(),
nn.Conv2d(4*nf, 8*nf, 4, 2, 1),
nn.BatchNorm2d(8*nf, 0.1, affine=False),
nn.ReLU(),)
npix = nf * 8 * (xSize//16) * (xSize//16)
self.modsZ = nn.Linear(npix, zSize*2)
self.modsS = nn.Linear(npix, sSize)
def forward(self, x):
x = self.mods(x)
x = x.view(x.size(0), -1)
z = self.modsZ(x)
s = self.modsS(x)
z = z.view(z.size(0), 2, -1)
return z, s
class _dcdecoder(nn.Module):
def __init__(self, nc, zSize, sSize, nf, xSize):
super(_dcdecoder, self).__init__()
npix = nf * 8 * (xSize//16) * (xSize//16)
self.modsZ = nn.Linear(zSize, npix)
self.modsS = nn.Linear(sSize, npix)
self.mods = nn.Sequential(nn.BatchNorm2d(nf*8, 0.1, affine=False),
nn.ReLU(),
nn.ConvTranspose2d(nf*8, nf*4, 4, 2, 1),
nn.BatchNorm2d(nf*4, 0.1, affine=False),
nn.ReLU(),
nn.ConvTranspose2d(nf*4, nf*2, 4, 2, 1),
nn.BatchNorm2d(nf*2, 0.1, affine=False),
nn.ReLU(),
nn.ConvTranspose2d(nf*2, nf, 4, 2, 1),
nn.BatchNorm2d(nf, 0.1, affine=False),
nn.ReLU(),
nn.ConvTranspose2d(nf, nc, 4, 2, 1))
def forward(self, z, s):
z = self.modsZ(z)
s = self.modsS(s)
x = z + s
x = x.view(x.size(0), -1, 4, 4)
x = self.mods(x)
return F.tanh(x)
class _dcdiscriminator(nn.Module):
def __init__(self, nc, nf, xSize, nClass):
super(_dcdiscriminator, self).__init__()
self.xSize = xSize
self.embeddings = nn.ModuleList([nn.Embedding(nClass, nf*1*(xSize//8)*(xSize//8)),
nn.Embedding(nClass, nf*1*(xSize//8)*(xSize//8)),
nn.Embedding(nClass, nf*2*(xSize//8)*(xSize//8)),
nn.Embedding(nClass, nf*4*(xSize//8)*(xSize//8))])
self.mods = nn.ModuleList([nn.Sequential(nn.Conv2d(nc, nf, 3, 1, 1),
nn.LeakyReLU(.2)),
nn.Sequential(nn.BatchNorm2d(nf),
nn.LeakyReLU(.2),
nn.Conv2d(nf, nf, 4, 2, 1)),
nn.Sequential(nn.BatchNorm2d(nf),
nn.LeakyReLU(.2),
nn.Conv2d(nf, nf*2, 4, 2, 1)),
nn.Sequential(nn.BatchNorm2d(nf*2),
nn.LeakyReLU(.2),
nn.Conv2d(nf*2, nf*4, 4, 2, 1)),
nn.Sequential(nn.BatchNorm2d(nf*4),
nn.LeakyReLU(.2),
nn.Conv2d(nf*4, nf*8, 4, 2, 1)),
nn.Linear(nf*8*(xSize//16)*(xSize//16), 1),
])
def forward(self, x, sid):
x = self.mods[0](x)
s0 = self.embeddings[0](sid)
s0 = s0.view(x.size(0), x.size(1), self.xSize//8, self.xSize//8)
s0 = nn.functional.upsample(s0, scale_factor=8, mode='nearest')
x = self.mods[1](x + s0)
s1 = self.embeddings[1](sid)
s1 = s1.view(x.size(0), x.size(1), self.xSize//8, self.xSize//8)
s1 = nn.functional.upsample(s1, scale_factor=4, mode='nearest')
x = self.mods[2](x + s1)
s2 = self.embeddings[2](sid)
s2 = s2.view(x.size(0), x.size(1), self.xSize//8, self.xSize//8)
s2 = nn.functional.upsample(s2, scale_factor=2, mode='nearest')
x = self.mods[3](x + s2)
s3 = self.embeddings[3](sid)
s3 = s3.view(x.size(0), x.size(1), self.xSize//8, self.xSize//8)
x = self.mods[4](x + s3)
x = x.view(x.size(0), -1)
x = F.dropout(x)
x = self.mods[5](x)
return x
def UnitGaussianKLDLoss(z):
return (.5 * (- z[:,1] + (z[:,1]).exp() + (z[:,0]*z[:,0]) - 1)).mean()
lossD = nn.BCEWithLogitsLoss()
lossL = nn.MSELoss()
lossKL = UnitGaussianKLDLoss
netEnc = _dcencoder(opt.nc, opt.sizeZ, opt.sizeS, opt.nf, opt.sizeX)
netDec = _dcdecoder(opt.nc, opt.sizeZ, opt.sizeS, opt.nf, opt.sizeX)
netD = _dcdiscriminator(opt.nc, opt.nf, opt.sizeX, opt.nClass)
#############################################################################
# Placeholders #
#############################################################################
x1 = torch.FloatTensor()
x2 = torch.FloatTensor()
x3 = torch.FloatTensor()
ids = torch.LongTensor()
eps = torch.FloatTensor()
zero = torch.FloatTensor(1,1).fill_(0)
labelPos = torch.FloatTensor(1,1).fill_(.9)
labelNeg = torch.FloatTensor(1,1).fill_(.1)
#############################################################################
# Test data #
#############################################################################
batch_test = 5
views = 5
steps = 5
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_test, shuffle=True, num_workers=int(opt.workers), drop_last=True)
x_test, _, _ = next(iter(testloader))
z_test = torch.FloatTensor(1, views, steps, opt.sizeZ)
z_test[:,:,0].normal_()
z_test[:,:,-1].normal_()
zstep_test = (z_test[:,:,-1] - z_test[:,:,0]) / steps
for i in range(1, steps-1):
z_test[:,:,i] = z_test[:,:,i-1] + zstep_test
z_test = z_test.repeat(batch_test,1,1,1).view(-1, opt.sizeZ)
#############################################################################
# To Cuda #
#############################################################################
if opt.cuda:
netEnc.cuda()
netDec.cuda()
netD.cuda()
x1 = x1.cuda()
x2 = x2.cuda()
x3 = x3.cuda()
ids = ids.cuda()
eps = eps.cuda()
zero = zero.cuda()
labelPos = labelPos.cuda()
labelNeg = labelNeg.cuda()
z_test = z_test.cuda()
x_test = x_test.cuda()
#############################################################################
# Optimizer #
#############################################################################
optimizerEnc = optim.Adam(netEnc.parameters(), lr=opt.lrG, betas=(0.5, 0.999))
optimizerDec = optim.Adam(netDec.parameters(), lr=opt.lrG, betas=(0.5, 0.999))
optimizerD = optim.Adam(netD.parameters(), lr=opt.lrD, betas=(0.5, 0.999))
#############################################################################
# Train #
#############################################################################
print("Start Training")
iteration = opt.load * len(dataloader)
epoch = opt.load
while iteration <= opt.nIteration:
log_dNeg = []
log_dPos = []
log_rec = []
log_swap = []
log_kl = []
for x1_cpu, x2_cpu, ids_cpu in tqdm(dataloader):
netEnc.train()
netDec.train()
netD.train()
x1.resize_(x1_cpu.size(0),x1_cpu.size(1),x1_cpu.size(2),x1_cpu.size(3)).copy_(x1_cpu)
x2.resize_(x2_cpu.size(0),x2_cpu.size(1),x2_cpu.size(2),x2_cpu.size(3)).copy_(x2_cpu)
x3.resize_(x2_cpu.size(0),x2_cpu.size(1),x2_cpu.size(2),x2_cpu.size(3))
x3[:-1].copy_(x2_cpu[1:])
x3[-1].copy_(x2_cpu[0])
ids.resize_(ids_cpu.size(0), ids_cpu.size(1))
ids[:-1].copy_(ids_cpu[1:])
ids[-1].copy_(ids_cpu[0])
pz1, s1 = netEnc(Variable(x1))
pz2, s2 = netEnc(Variable(x2))
pz3, s3 = netEnc(Variable(x3))
eps.resize_as_(pz1.data[:,0]).normal_()
z1 = pz1[:,0] + (pz1[:,1]*.5).exp() * Variable(eps)
z2 = pz2[:,0] + (pz2[:,1]*.5).exp() * Variable(eps)
z3 = pz3[:,0] + (pz3[:,1]*.5).exp() * Variable(eps)
y1 = netDec(z1, s1)
y2 = netDec(z1, s2)
y3 = netDec(z1, s3)
ye = netDec(Variable(eps), s3)
err_rec = lossL(y1, Variable(x1))
err_swap = lossL(y2, Variable(x1))
err_kl = lossKL(pz1)
d3 = netD(y3, Variable(ids))
de = netD(ye, Variable(ids))
(err_rec * opt.recW +
err_swap * opt.swap1W +
lossD(d3, Variable(labelNeg.expand_as(d3))) * opt.swap2W +
lossD(de, Variable(labelPos.expand_as(de))) * opt.swap2W +
err_kl * opt.klzW).backward()
netD.zero_grad()
d3 = netD(y3.detach(), Variable(ids))
de = netD(ye.detach(), Variable(ids))
(lossD(d3, Variable(labelPos.expand_as(d3))) * opt.swap2W +
lossD(de, Variable(labelNeg.expand_as(de))) * opt.swap2W).backward()
optimizerEnc.step()
optimizerDec.step()
optimizerD.step()
netEnc.zero_grad()
netDec.zero_grad()
netD.zero_grad()
log_dNeg.append(de.data.mean())
log_dPos.append(d3.data.mean())
log_rec.append(err_rec.data.mean())
log_swap.append(err_swap.data.mean())
log_kl.append(err_kl.data.mean())
iteration += 1
epoch = epoch+1
print(epoch,
np.array(log_dNeg).mean(),
np.array(log_dPos).mean(),
np.array(log_rec).mean(),
np.array(log_swap).mean(),
np.array(log_kl).mean())
if epoch% opt.checkpointFreq == 0:
netEnc.eval()
netDec.eval()
pz_test, s_test = netEnc(Variable(x_test, volatile=True))
s_test = s_test.unsqueeze(1).repeat(1,steps*views,1).view(-1,opt.sizeS)
y_test = netDec(Variable(z_test, volatile=True), s_test)
vutils.save_image(y_test.data, "interpolate_%d.png" % (epoch+1), nrow=views*steps, normalize=True, range=(-1,1))
torch.save(netEnc.state_dict(), '%s/netEnc_%d.pth' % (opt.checkpointDir, epoch))
torch.save(netDec.state_dict(), '%s/netDec_%d.pth' % (opt.checkpointDir, epoch))
#torch.save(netD.state_dict(), '%s/netD_%d.pth' % (opt.checkpointDir, epoch))
| [
"torch.nn.functional.upsample",
"torch.nn.ReLU",
"torch.LongTensor",
"torch.nn.MSELoss",
"numpy.array",
"torch.cuda.is_available",
"torchvision.utils.save_image",
"torch.nn.BatchNorm2d",
"os.listdir",
"torchvision.transforms.ToTensor",
"torch.autograd.Variable",
"random.randint",
"torch.nn.E... | [((2940, 2964), 'random.randint', 'random.randint', (['(1)', '(10000)'], {}), '(1, 10000)\n', (2954, 2964), False, 'import random\n'), ((3016, 3043), 'random.seed', 'random.seed', (['opt.manualSeed'], {}), '(opt.manualSeed)\n', (3027, 3043), False, 'import random\n'), ((3044, 3077), 'torch.manual_seed', 'torch.manual_seed', (['opt.manualSeed'], {}), '(opt.manualSeed)\n', (3061, 3077), False, 'import torch\n'), ((20740, 20762), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {}), '()\n', (20760, 20762), True, 'import torch.nn as nn\n'), ((20771, 20783), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (20781, 20783), True, 'import torch.nn as nn\n'), ((21255, 21274), 'torch.FloatTensor', 'torch.FloatTensor', ([], {}), '()\n', (21272, 21274), False, 'import torch\n'), ((21280, 21299), 'torch.FloatTensor', 'torch.FloatTensor', ([], {}), '()\n', (21297, 21299), False, 'import torch\n'), ((21305, 21324), 'torch.FloatTensor', 'torch.FloatTensor', ([], {}), '()\n', (21322, 21324), False, 'import torch\n'), ((21331, 21349), 'torch.LongTensor', 'torch.LongTensor', ([], {}), '()\n', (21347, 21349), False, 'import torch\n'), ((21356, 21375), 'torch.FloatTensor', 'torch.FloatTensor', ([], {}), '()\n', (21373, 21375), False, 'import torch\n'), ((21955, 22000), 'torch.FloatTensor', 'torch.FloatTensor', (['(1)', 'views', 'steps', 'opt.sizeZ'], {}), '(1, views, steps, opt.sizeZ)\n', (21972, 22000), False, 'import torch\n'), ((3339, 3364), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3362, 3364), False, 'import torch\n'), ((3485, 3515), 'torch.cuda.set_device', 'torch.cuda.set_device', (['opt.gpu'], {}), '(opt.gpu)\n', (3506, 3515), False, 'import torch\n'), ((23721, 23737), 'tqdm.tqdm', 'tqdm', (['dataloader'], {}), '(dataloader)\n', (23725, 23737), False, 'from tqdm import tqdm\n'), ((3963, 3984), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3982, 3984), True, 'import torchvision.transforms as transforms\n'), ((4462, 4512), 'numpy.random.choice', 'np.random.choice', (['self.files[self.labels == label]'], {}), '(self.files[self.labels == label])\n', (4478, 4512), True, 'import numpy as np\n'), ((4835, 4856), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4854, 4856), True, 'import torchvision.transforms as transforms\n'), ((5829, 5873), 'os.path.join', 'os.path.join', (['opt.dataPath', '"""celebA/aligned"""'], {}), "(opt.dataPath, 'celebA/aligned')\n", (5841, 5873), False, 'import os\n'), ((5907, 5969), 'os.path.join', 'os.path.join', (['opt.dataPath', '"""celebA/identity_celebA_train.txt"""'], {}), "(opt.dataPath, 'celebA/identity_celebA_train.txt')\n", (5919, 5969), False, 'import os\n'), ((6384, 6428), 'os.path.join', 'os.path.join', (['opt.dataPath', '"""celebA/aligned"""'], {}), "(opt.dataPath, 'celebA/aligned')\n", (6396, 6428), False, 'import os\n'), ((6462, 6522), 'os.path.join', 'os.path.join', (['opt.dataPath', '"""celebA/identity_celebA_val.txt"""'], {}), "(opt.dataPath, 'celebA/identity_celebA_val.txt')\n", (6474, 6522), False, 'import os\n'), ((10417, 10443), 'torch.nn.Linear', 'nn.Linear', (['npix', '(zSize * 2)'], {}), '(npix, zSize * 2)\n', (10426, 10443), True, 'import torch.nn as nn\n'), ((10463, 10485), 'torch.nn.Linear', 'nn.Linear', (['npix', 'sSize'], {}), '(npix, sSize)\n', (10472, 10485), True, 'import torch.nn as nn\n'), ((10853, 10875), 'torch.nn.Linear', 'nn.Linear', (['zSize', 'npix'], {}), '(zSize, npix)\n', (10862, 10875), True, 'import torch.nn as nn\n'), ((10897, 10919), 'torch.nn.Linear', 'nn.Linear', (['sSize', 'npix'], {}), '(sSize, npix)\n', (10906, 10919), True, 'import torch.nn as nn\n'), ((12471, 12480), 'torch.nn.functional.tanh', 'F.tanh', (['x'], {}), '(x)\n', (12477, 12480), True, 'import torch.nn.functional as F\n'), ((14794, 14852), 'torch.nn.functional.upsample', 'nn.functional.upsample', (['s0'], {'scale_factor': '(4)', 'mode': '"""nearest"""'}), "(s0, scale_factor=4, mode='nearest')\n", (14816, 14852), True, 'import torch.nn as nn\n'), ((15009, 15067), 'torch.nn.functional.upsample', 'nn.functional.upsample', (['s1'], {'scale_factor': '(2)', 'mode': '"""nearest"""'}), "(s1, scale_factor=2, mode='nearest')\n", (15031, 15067), True, 'import torch.nn as nn\n'), ((15290, 15302), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {}), '(x)\n', (15299, 15302), True, 'import torch.nn.functional as F\n'), ((16516, 16542), 'torch.nn.Linear', 'nn.Linear', (['npix', '(zSize * 2)'], {}), '(npix, zSize * 2)\n', (16525, 16542), True, 'import torch.nn as nn\n'), ((16562, 16584), 'torch.nn.Linear', 'nn.Linear', (['npix', 'sSize'], {}), '(npix, sSize)\n', (16571, 16584), True, 'import torch.nn as nn\n'), ((16976, 16998), 'torch.nn.Linear', 'nn.Linear', (['zSize', 'npix'], {}), '(zSize, npix)\n', (16985, 16998), True, 'import torch.nn as nn\n'), ((17020, 17042), 'torch.nn.Linear', 'nn.Linear', (['sSize', 'npix'], {}), '(sSize, npix)\n', (17029, 17042), True, 'import torch.nn as nn\n'), ((17994, 18003), 'torch.nn.functional.tanh', 'F.tanh', (['x'], {}), '(x)\n', (18000, 18003), True, 'import torch.nn.functional as F\n'), ((19854, 19912), 'torch.nn.functional.upsample', 'nn.functional.upsample', (['s0'], {'scale_factor': '(8)', 'mode': '"""nearest"""'}), "(s0, scale_factor=8, mode='nearest')\n", (19876, 19912), True, 'import torch.nn as nn\n'), ((20069, 20127), 'torch.nn.functional.upsample', 'nn.functional.upsample', (['s1'], {'scale_factor': '(4)', 'mode': '"""nearest"""'}), "(s1, scale_factor=4, mode='nearest')\n", (20091, 20127), True, 'import torch.nn as nn\n'), ((20284, 20342), 'torch.nn.functional.upsample', 'nn.functional.upsample', (['s2'], {'scale_factor': '(2)', 'mode': '"""nearest"""'}), "(s2, scale_factor=2, mode='nearest')\n", (20306, 20342), True, 'import torch.nn as nn\n'), ((20565, 20577), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {}), '(x)\n', (20574, 20577), True, 'import torch.nn.functional as F\n'), ((21383, 21406), 'torch.FloatTensor', 'torch.FloatTensor', (['(1)', '(1)'], {}), '(1, 1)\n', (21400, 21406), False, 'import torch\n'), ((21426, 21449), 'torch.FloatTensor', 'torch.FloatTensor', (['(1)', '(1)'], {}), '(1, 1)\n', (21443, 21449), False, 'import torch\n'), ((21470, 21493), 'torch.FloatTensor', 'torch.FloatTensor', (['(1)', '(1)'], {}), '(1, 1)\n', (21487, 21493), False, 'import torch\n'), ((26335, 26457), 'torchvision.utils.save_image', 'vutils.save_image', (['y_test.data', "('interpolate_%d.png' % (epoch + 1))"], {'nrow': '(views * steps)', 'normalize': '(True)', 'range': '(-1, 1)'}), "(y_test.data, 'interpolate_%d.png' % (epoch + 1), nrow=\n views * steps, normalize=True, range=(-1, 1))\n", (26352, 26457), True, 'import torchvision.utils as vutils\n'), ((4976, 4996), 'os.listdir', 'os.listdir', (['dataPath'], {}), '(dataPath)\n', (4986, 4996), False, 'import os\n'), ((5265, 5316), 'os.path.join', 'os.path.join', (['self.dataPath', 'self.folders[idx]', 'idA'], {}), '(self.dataPath, self.folders[idx], idA)\n', (5277, 5316), False, 'import os\n'), ((5344, 5395), 'os.path.join', 'os.path.join', (['self.dataPath', 'self.folders[idx]', 'idB'], {}), '(self.dataPath, self.folders[idx], idB)\n', (5356, 5395), False, 'import os\n'), ((6971, 7022), 'os.path.join', 'os.path.join', (['opt.dataPath', '"""rendered_chairs/train"""'], {}), "(opt.dataPath, 'rendered_chairs/train')\n", (6983, 7022), False, 'import os\n'), ((7449, 7498), 'os.path.join', 'os.path.join', (['opt.dataPath', '"""rendered_chairs/val"""'], {}), "(opt.dataPath, 'rendered_chairs/val')\n", (7461, 7498), False, 'import os\n'), ((8916, 8942), 'torch.nn.Conv2d', 'nn.Conv2d', (['nc', 'nf', '(3)', '(1)', '(1)'], {}), '(nc, nf, 3, 1, 1)\n', (8925, 8942), True, 'import torch.nn as nn\n'), ((8978, 9015), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['nf', '(0.1)'], {'affine': '(False)'}), '(nf, 0.1, affine=False)\n', (8992, 9015), True, 'import torch.nn as nn\n'), ((9051, 9060), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (9058, 9060), True, 'import torch.nn as nn\n'), ((9096, 9122), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', 'nf', '(3)', '(1)', '(1)'], {}), '(nf, nf, 3, 1, 1)\n', (9105, 9122), True, 'import torch.nn as nn\n'), ((9158, 9195), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['nf', '(0.1)'], {'affine': '(False)'}), '(nf, 0.1, affine=False)\n', (9172, 9195), True, 'import torch.nn as nn\n'), ((9231, 9240), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (9238, 9240), True, 'import torch.nn as nn\n'), ((9276, 9303), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', '(2 * nf)', '(2)', '(2)'], {}), '(nf, 2 * nf, 2, 2)\n', (9285, 9303), True, 'import torch.nn as nn\n'), ((9337, 9378), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(2 * nf)', '(0.1)'], {'affine': '(False)'}), '(2 * nf, 0.1, affine=False)\n', (9351, 9378), True, 'import torch.nn as nn\n'), ((9412, 9421), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (9419, 9421), True, 'import torch.nn as nn\n'), ((9457, 9491), 'torch.nn.Conv2d', 'nn.Conv2d', (['(2 * nf)', '(2 * nf)', '(3)', '(1)', '(1)'], {}), '(2 * nf, 2 * nf, 3, 1, 1)\n', (9466, 9491), True, 'import torch.nn as nn\n'), ((9523, 9564), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(2 * nf)', '(0.1)'], {'affine': '(False)'}), '(2 * nf, 0.1, affine=False)\n', (9537, 9564), True, 'import torch.nn as nn\n'), ((9598, 9607), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (9605, 9607), True, 'import torch.nn as nn\n'), ((9643, 9674), 'torch.nn.Conv2d', 'nn.Conv2d', (['(2 * nf)', '(4 * nf)', '(2)', '(2)'], {}), '(2 * nf, 4 * nf, 2, 2)\n', (9652, 9674), True, 'import torch.nn as nn\n'), ((9706, 9747), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(4 * nf)', '(0.1)'], {'affine': '(False)'}), '(4 * nf, 0.1, affine=False)\n', (9720, 9747), True, 'import torch.nn as nn\n'), ((9781, 9790), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (9788, 9790), True, 'import torch.nn as nn\n'), ((9826, 9860), 'torch.nn.Conv2d', 'nn.Conv2d', (['(4 * nf)', '(4 * nf)', '(3)', '(1)', '(1)'], {}), '(4 * nf, 4 * nf, 3, 1, 1)\n', (9835, 9860), True, 'import torch.nn as nn\n'), ((9892, 9933), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(4 * nf)', '(0.1)'], {'affine': '(False)'}), '(4 * nf, 0.1, affine=False)\n', (9906, 9933), True, 'import torch.nn as nn\n'), ((9967, 9976), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (9974, 9976), True, 'import torch.nn as nn\n'), ((10012, 10043), 'torch.nn.Conv2d', 'nn.Conv2d', (['(4 * nf)', '(8 * nf)', '(2)', '(2)'], {}), '(4 * nf, 8 * nf, 2, 2)\n', (10021, 10043), True, 'import torch.nn as nn\n'), ((10075, 10116), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(8 * nf)', '(0.1)'], {'affine': '(False)'}), '(8 * nf, 0.1, affine=False)\n', (10089, 10116), True, 'import torch.nn as nn\n'), ((10150, 10159), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (10157, 10159), True, 'import torch.nn as nn\n'), ((10195, 10229), 'torch.nn.Conv2d', 'nn.Conv2d', (['(8 * nf)', '(8 * nf)', '(3)', '(1)', '(1)'], {}), '(8 * nf, 8 * nf, 3, 1, 1)\n', (10204, 10229), True, 'import torch.nn as nn\n'), ((10261, 10302), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(8 * nf)', '(0.1)'], {'affine': '(False)'}), '(8 * nf, 0.1, affine=False)\n', (10275, 10302), True, 'import torch.nn as nn\n'), ((10336, 10345), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (10343, 10345), True, 'import torch.nn as nn\n'), ((10954, 10988), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nf * 8)', '(nf * 8)', '(3)', '(1)', '(1)'], {}), '(nf * 8, nf * 8, 3, 1, 1)\n', (10963, 10988), True, 'import torch.nn as nn\n'), ((11020, 11061), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(nf * 8)', '(0.1)'], {'affine': '(False)'}), '(nf * 8, 0.1, affine=False)\n', (11034, 11061), True, 'import torch.nn as nn\n'), ((11095, 11104), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (11102, 11104), True, 'import torch.nn as nn\n'), ((11140, 11180), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(nf * 8)', '(nf * 4)', '(2)', '(2)'], {}), '(nf * 8, nf * 4, 2, 2)\n', (11158, 11180), True, 'import torch.nn as nn\n'), ((11212, 11253), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(nf * 4)', '(0.1)'], {'affine': '(False)'}), '(nf * 4, 0.1, affine=False)\n', (11226, 11253), True, 'import torch.nn as nn\n'), ((11287, 11296), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (11294, 11296), True, 'import torch.nn as nn\n'), ((11332, 11366), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nf * 4)', '(nf * 4)', '(3)', '(1)', '(1)'], {}), '(nf * 4, nf * 4, 3, 1, 1)\n', (11341, 11366), True, 'import torch.nn as nn\n'), ((11398, 11439), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(nf * 4)', '(0.1)'], {'affine': '(False)'}), '(nf * 4, 0.1, affine=False)\n', (11412, 11439), True, 'import torch.nn as nn\n'), ((11473, 11482), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (11480, 11482), True, 'import torch.nn as nn\n'), ((11518, 11558), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(nf * 4)', '(nf * 2)', '(2)', '(2)'], {}), '(nf * 4, nf * 2, 2, 2)\n', (11536, 11558), True, 'import torch.nn as nn\n'), ((11590, 11631), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(nf * 2)', '(0.1)'], {'affine': '(False)'}), '(nf * 2, 0.1, affine=False)\n', (11604, 11631), True, 'import torch.nn as nn\n'), ((11665, 11674), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (11672, 11674), True, 'import torch.nn as nn\n'), ((11710, 11744), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nf * 2)', '(nf * 2)', '(3)', '(1)', '(1)'], {}), '(nf * 2, nf * 2, 3, 1, 1)\n', (11719, 11744), True, 'import torch.nn as nn\n'), ((11776, 11817), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(nf * 2)', '(0.1)'], {'affine': '(False)'}), '(nf * 2, 0.1, affine=False)\n', (11790, 11817), True, 'import torch.nn as nn\n'), ((11851, 11860), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (11858, 11860), True, 'import torch.nn as nn\n'), ((11896, 11932), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(nf * 2)', 'nf', '(2)', '(2)'], {}), '(nf * 2, nf, 2, 2)\n', (11914, 11932), True, 'import torch.nn as nn\n'), ((11966, 12003), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['nf', '(0.1)'], {'affine': '(False)'}), '(nf, 0.1, affine=False)\n', (11980, 12003), True, 'import torch.nn as nn\n'), ((12039, 12048), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (12046, 12048), True, 'import torch.nn as nn\n'), ((12084, 12110), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', 'nf', '(3)', '(1)', '(1)'], {}), '(nf, nf, 3, 1, 1)\n', (12093, 12110), True, 'import torch.nn as nn\n'), ((12146, 12183), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['nf', '(0.1)'], {'affine': '(False)'}), '(nf, 0.1, affine=False)\n', (12160, 12183), True, 'import torch.nn as nn\n'), ((12219, 12228), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (12226, 12228), True, 'import torch.nn as nn\n'), ((12264, 12290), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', 'nc', '(3)', '(1)', '(1)'], {}), '(nf, nc, 3, 1, 1)\n', (12273, 12290), True, 'import torch.nn as nn\n'), ((15742, 15768), 'torch.nn.Conv2d', 'nn.Conv2d', (['nc', 'nf', '(4)', '(2)', '(1)'], {}), '(nc, nf, 4, 2, 1)\n', (15751, 15768), True, 'import torch.nn as nn\n'), ((15804, 15841), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['nf', '(0.1)'], {'affine': '(False)'}), '(nf, 0.1, affine=False)\n', (15818, 15841), True, 'import torch.nn as nn\n'), ((15877, 15886), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (15884, 15886), True, 'import torch.nn as nn\n'), ((15922, 15952), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', '(2 * nf)', '(4)', '(2)', '(1)'], {}), '(nf, 2 * nf, 4, 2, 1)\n', (15931, 15952), True, 'import torch.nn as nn\n'), ((15986, 16027), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(2 * nf)', '(0.1)'], {'affine': '(False)'}), '(2 * nf, 0.1, affine=False)\n', (16000, 16027), True, 'import torch.nn as nn\n'), ((16061, 16070), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (16068, 16070), True, 'import torch.nn as nn\n'), ((16106, 16140), 'torch.nn.Conv2d', 'nn.Conv2d', (['(2 * nf)', '(4 * nf)', '(4)', '(2)', '(1)'], {}), '(2 * nf, 4 * nf, 4, 2, 1)\n', (16115, 16140), True, 'import torch.nn as nn\n'), ((16172, 16213), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(4 * nf)', '(0.1)'], {'affine': '(False)'}), '(4 * nf, 0.1, affine=False)\n', (16186, 16213), True, 'import torch.nn as nn\n'), ((16247, 16256), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (16254, 16256), True, 'import torch.nn as nn\n'), ((16292, 16326), 'torch.nn.Conv2d', 'nn.Conv2d', (['(4 * nf)', '(8 * nf)', '(4)', '(2)', '(1)'], {}), '(4 * nf, 8 * nf, 4, 2, 1)\n', (16301, 16326), True, 'import torch.nn as nn\n'), ((16358, 16399), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(8 * nf)', '(0.1)'], {'affine': '(False)'}), '(8 * nf, 0.1, affine=False)\n', (16372, 16399), True, 'import torch.nn as nn\n'), ((16433, 16442), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (16440, 16442), True, 'import torch.nn as nn\n'), ((17077, 17118), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(nf * 8)', '(0.1)'], {'affine': '(False)'}), '(nf * 8, 0.1, affine=False)\n', (17091, 17118), True, 'import torch.nn as nn\n'), ((17152, 17161), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (17159, 17161), True, 'import torch.nn as nn\n'), ((17197, 17240), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(nf * 8)', '(nf * 4)', '(4)', '(2)', '(1)'], {}), '(nf * 8, nf * 4, 4, 2, 1)\n', (17215, 17240), True, 'import torch.nn as nn\n'), ((17272, 17313), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(nf * 4)', '(0.1)'], {'affine': '(False)'}), '(nf * 4, 0.1, affine=False)\n', (17286, 17313), True, 'import torch.nn as nn\n'), ((17347, 17356), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (17354, 17356), True, 'import torch.nn as nn\n'), ((17392, 17435), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(nf * 4)', '(nf * 2)', '(4)', '(2)', '(1)'], {}), '(nf * 4, nf * 2, 4, 2, 1)\n', (17410, 17435), True, 'import torch.nn as nn\n'), ((17467, 17508), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(nf * 2)', '(0.1)'], {'affine': '(False)'}), '(nf * 2, 0.1, affine=False)\n', (17481, 17508), True, 'import torch.nn as nn\n'), ((17542, 17551), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (17549, 17551), True, 'import torch.nn as nn\n'), ((17587, 17626), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(nf * 2)', 'nf', '(4)', '(2)', '(1)'], {}), '(nf * 2, nf, 4, 2, 1)\n', (17605, 17626), True, 'import torch.nn as nn\n'), ((17660, 17697), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['nf', '(0.1)'], {'affine': '(False)'}), '(nf, 0.1, affine=False)\n', (17674, 17697), True, 'import torch.nn as nn\n'), ((17733, 17742), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (17740, 17742), True, 'import torch.nn as nn\n'), ((17778, 17813), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['nf', 'nc', '(4)', '(2)', '(1)'], {}), '(nf, nc, 4, 2, 1)\n', (17796, 17813), True, 'import torch.nn as nn\n'), ((24289, 24301), 'torch.autograd.Variable', 'Variable', (['x1'], {}), '(x1)\n', (24297, 24301), False, 'from torch.autograd import Variable\n'), ((24328, 24340), 'torch.autograd.Variable', 'Variable', (['x2'], {}), '(x2)\n', (24336, 24340), False, 'from torch.autograd import Variable\n'), ((24367, 24379), 'torch.autograd.Variable', 'Variable', (['x3'], {}), '(x3)\n', (24375, 24379), False, 'from torch.autograd import Variable\n'), ((24713, 24726), 'torch.autograd.Variable', 'Variable', (['eps'], {}), '(eps)\n', (24721, 24726), False, 'from torch.autograd import Variable\n'), ((24760, 24772), 'torch.autograd.Variable', 'Variable', (['x1'], {}), '(x1)\n', (24768, 24772), False, 'from torch.autograd import Variable\n'), ((24803, 24815), 'torch.autograd.Variable', 'Variable', (['x1'], {}), '(x1)\n', (24811, 24815), False, 'from torch.autograd import Variable\n'), ((24868, 24881), 'torch.autograd.Variable', 'Variable', (['ids'], {}), '(ids)\n', (24876, 24881), False, 'from torch.autograd import Variable\n'), ((24905, 24918), 'torch.autograd.Variable', 'Variable', (['ids'], {}), '(ids)\n', (24913, 24918), False, 'from torch.autograd import Variable\n'), ((25215, 25228), 'torch.autograd.Variable', 'Variable', (['ids'], {}), '(ids)\n', (25223, 25228), False, 'from torch.autograd import Variable\n'), ((25261, 25274), 'torch.autograd.Variable', 'Variable', (['ids'], {}), '(ids)\n', (25269, 25274), False, 'from torch.autograd import Variable\n'), ((26149, 26180), 'torch.autograd.Variable', 'Variable', (['x_test'], {'volatile': '(True)'}), '(x_test, volatile=True)\n', (26157, 26180), False, 'from torch.autograd import Variable\n'), ((26286, 26317), 'torch.autograd.Variable', 'Variable', (['z_test'], {'volatile': '(True)'}), '(z_test, volatile=True)\n', (26294, 26317), False, 'from torch.autograd import Variable\n'), ((4554, 4588), 'os.path.join', 'os.path.join', (['self.dataPath', 'file1'], {}), '(self.dataPath, file1)\n', (4566, 4588), False, 'import os\n'), ((4632, 4666), 'os.path.join', 'os.path.join', (['self.dataPath', 'file2'], {}), '(self.dataPath, file2)\n', (4644, 4666), False, 'import os\n'), ((5168, 5214), 'os.path.join', 'os.path.join', (['self.dataPath', 'self.folders[idx]'], {}), '(self.dataPath, self.folders[idx])\n', (5180, 5214), False, 'import os\n'), ((6023, 6049), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(128)'], {}), '(128)\n', (6044, 6049), True, 'import torchvision.transforms as transforms\n'), ((6103, 6131), 'torchvision.transforms.Resize', 'transforms.Resize', (['opt.sizeX'], {}), '(opt.sizeX)\n', (6120, 6131), True, 'import torchvision.transforms as transforms\n'), ((6185, 6206), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (6204, 6206), True, 'import torchvision.transforms as transforms\n'), ((6260, 6314), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (6280, 6314), True, 'import torchvision.transforms as transforms\n'), ((6576, 6602), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(128)'], {}), '(128)\n', (6597, 6602), True, 'import torchvision.transforms as transforms\n'), ((6656, 6684), 'torchvision.transforms.Resize', 'transforms.Resize', (['opt.sizeX'], {}), '(opt.sizeX)\n', (6673, 6684), True, 'import torchvision.transforms as transforms\n'), ((6738, 6759), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (6757, 6759), True, 'import torchvision.transforms as transforms\n'), ((6813, 6867), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (6833, 6867), True, 'import torchvision.transforms as transforms\n'), ((12677, 12735), 'torch.nn.Embedding', 'nn.Embedding', (['nClass', '(nf * 1 * (xSize // 8) * (xSize // 8))'], {}), '(nClass, nf * 1 * (xSize // 8) * (xSize // 8))\n', (12689, 12735), True, 'import torch.nn as nn\n'), ((12768, 12826), 'torch.nn.Embedding', 'nn.Embedding', (['nClass', '(nf * 2 * (xSize // 8) * (xSize // 8))'], {}), '(nClass, nf * 2 * (xSize // 8) * (xSize // 8))\n', (12780, 12826), True, 'import torch.nn as nn\n'), ((12859, 12917), 'torch.nn.Embedding', 'nn.Embedding', (['nClass', '(nf * 4 * (xSize // 8) * (xSize // 8))'], {}), '(nClass, nf * 4 * (xSize // 8) * (xSize // 8))\n', (12871, 12917), True, 'import torch.nn as nn\n'), ((14532, 14582), 'torch.nn.Linear', 'nn.Linear', (['(nf * 4 * (xSize // 8) * (xSize // 8))', '(1)'], {}), '(nf * 4 * (xSize // 8) * (xSize // 8), 1)\n', (14541, 14582), True, 'import torch.nn as nn\n'), ((18216, 18274), 'torch.nn.Embedding', 'nn.Embedding', (['nClass', '(nf * 1 * (xSize // 8) * (xSize // 8))'], {}), '(nClass, nf * 1 * (xSize // 8) * (xSize // 8))\n', (18228, 18274), True, 'import torch.nn as nn\n'), ((18307, 18365), 'torch.nn.Embedding', 'nn.Embedding', (['nClass', '(nf * 1 * (xSize // 8) * (xSize // 8))'], {}), '(nClass, nf * 1 * (xSize // 8) * (xSize // 8))\n', (18319, 18365), True, 'import torch.nn as nn\n'), ((18398, 18456), 'torch.nn.Embedding', 'nn.Embedding', (['nClass', '(nf * 2 * (xSize // 8) * (xSize // 8))'], {}), '(nClass, nf * 2 * (xSize // 8) * (xSize // 8))\n', (18410, 18456), True, 'import torch.nn as nn\n'), ((18489, 18547), 'torch.nn.Embedding', 'nn.Embedding', (['nClass', '(nf * 4 * (xSize // 8) * (xSize // 8))'], {}), '(nClass, nf * 4 * (xSize // 8) * (xSize // 8))\n', (18501, 18547), True, 'import torch.nn as nn\n'), ((19590, 19642), 'torch.nn.Linear', 'nn.Linear', (['(nf * 8 * (xSize // 16) * (xSize // 16))', '(1)'], {}), '(nf * 8 * (xSize // 16) * (xSize // 16), 1)\n', (19599, 19642), True, 'import torch.nn as nn\n'), ((24475, 24488), 'torch.autograd.Variable', 'Variable', (['eps'], {}), '(eps)\n', (24483, 24488), False, 'from torch.autograd import Variable\n'), ((24535, 24548), 'torch.autograd.Variable', 'Variable', (['eps'], {}), '(eps)\n', (24543, 24548), False, 'from torch.autograd import Variable\n'), ((24595, 24608), 'torch.autograd.Variable', 'Variable', (['eps'], {}), '(eps)\n', (24603, 24608), False, 'from torch.autograd import Variable\n'), ((25865, 25883), 'numpy.array', 'np.array', (['log_dNeg'], {}), '(log_dNeg)\n', (25873, 25883), True, 'import numpy as np\n'), ((25901, 25919), 'numpy.array', 'np.array', (['log_dPos'], {}), '(log_dPos)\n', (25909, 25919), True, 'import numpy as np\n'), ((25937, 25954), 'numpy.array', 'np.array', (['log_rec'], {}), '(log_rec)\n', (25945, 25954), True, 'import numpy as np\n'), ((25972, 25990), 'numpy.array', 'np.array', (['log_swap'], {}), '(log_swap)\n', (25980, 25990), True, 'import numpy as np\n'), ((26008, 26024), 'numpy.array', 'np.array', (['log_kl'], {}), '(log_kl)\n', (26016, 26024), True, 'import numpy as np\n'), ((4696, 4715), 'torch.LongTensor', 'torch.LongTensor', (['(1)'], {}), '(1)\n', (4712, 4715), False, 'import torch\n'), ((5496, 5515), 'torch.LongTensor', 'torch.LongTensor', (['(1)'], {}), '(1)\n', (5512, 5515), False, 'import torch\n'), ((7078, 7104), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(300)'], {}), '(300)\n', (7099, 7104), True, 'import torchvision.transforms as transforms\n'), ((7160, 7188), 'torchvision.transforms.Resize', 'transforms.Resize', (['opt.sizeX'], {}), '(opt.sizeX)\n', (7177, 7188), True, 'import torchvision.transforms as transforms\n'), ((7244, 7265), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (7263, 7265), True, 'import torchvision.transforms as transforms\n'), ((7321, 7375), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (7341, 7375), True, 'import torchvision.transforms as transforms\n'), ((7554, 7580), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(300)'], {}), '(300)\n', (7575, 7580), True, 'import torchvision.transforms as transforms\n'), ((7636, 7664), 'torchvision.transforms.Resize', 'transforms.Resize', (['opt.sizeX'], {}), '(opt.sizeX)\n', (7653, 7664), True, 'import torchvision.transforms as transforms\n'), ((7720, 7741), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (7739, 7741), True, 'import torchvision.transforms as transforms\n'), ((7797, 7851), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (7817, 7851), True, 'import torchvision.transforms as transforms\n'), ((12959, 12985), 'torch.nn.Conv2d', 'nn.Conv2d', (['nc', 'nf', '(3)', '(1)', '(1)'], {}), '(nc, nf, 3, 1, 1)\n', (12968, 12985), True, 'import torch.nn as nn\n'), ((13036, 13053), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (13048, 13053), True, 'import torch.nn as nn\n'), ((13103, 13126), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', 'nf', '(2)', '(2)'], {}), '(nf, nf, 2, 2)\n', (13112, 13126), True, 'import torch.nn as nn\n'), ((13179, 13197), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['nf'], {}), '(nf)\n', (13193, 13197), True, 'import torch.nn as nn\n'), ((13248, 13265), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (13260, 13265), True, 'import torch.nn as nn\n'), ((13315, 13342), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', '(nf * 2)', '(2)', '(2)'], {}), '(nf, nf * 2, 2, 2)\n', (13324, 13342), True, 'import torch.nn as nn\n'), ((13391, 13413), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(nf * 2)'], {}), '(nf * 2)\n', (13405, 13413), True, 'import torch.nn as nn\n'), ((13462, 13479), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (13474, 13479), True, 'import torch.nn as nn\n'), ((13529, 13563), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nf * 2)', '(nf * 2)', '(3)', '(1)', '(1)'], {}), '(nf * 2, nf * 2, 3, 1, 1)\n', (13538, 13563), True, 'import torch.nn as nn\n'), ((13610, 13632), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(nf * 2)'], {}), '(nf * 2)\n', (13624, 13632), True, 'import torch.nn as nn\n'), ((13681, 13698), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (13693, 13698), True, 'import torch.nn as nn\n'), ((13750, 13772), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(nf * 2)'], {}), '(nf * 2)\n', (13764, 13772), True, 'import torch.nn as nn\n'), ((13821, 13838), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (13833, 13838), True, 'import torch.nn as nn\n'), ((13888, 13919), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nf * 2)', '(nf * 4)', '(2)', '(2)'], {}), '(nf * 2, nf * 4, 2, 2)\n', (13897, 13919), True, 'import torch.nn as nn\n'), ((13966, 13988), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(nf * 4)'], {}), '(nf * 4)\n', (13980, 13988), True, 'import torch.nn as nn\n'), ((14037, 14054), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (14049, 14054), True, 'import torch.nn as nn\n'), ((14104, 14138), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nf * 4)', '(nf * 4)', '(3)', '(1)', '(1)'], {}), '(nf * 4, nf * 4, 3, 1, 1)\n', (14113, 14138), True, 'import torch.nn as nn\n'), ((14187, 14209), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(nf * 4)'], {}), '(nf * 4)\n', (14201, 14209), True, 'import torch.nn as nn\n'), ((14258, 14275), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (14270, 14275), True, 'import torch.nn as nn\n'), ((14325, 14359), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nf * 4)', '(nf * 4)', '(3)', '(1)', '(1)'], {}), '(nf * 4, nf * 4, 3, 1, 1)\n', (14334, 14359), True, 'import torch.nn as nn\n'), ((14406, 14428), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(nf * 4)'], {}), '(nf * 4)\n', (14420, 14428), True, 'import torch.nn as nn\n'), ((14477, 14494), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (14489, 14494), True, 'import torch.nn as nn\n'), ((18589, 18615), 'torch.nn.Conv2d', 'nn.Conv2d', (['nc', 'nf', '(3)', '(1)', '(1)'], {}), '(nc, nf, 3, 1, 1)\n', (18598, 18615), True, 'import torch.nn as nn\n'), ((18666, 18683), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (18678, 18683), True, 'import torch.nn as nn\n'), ((18734, 18752), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['nf'], {}), '(nf)\n', (18748, 18752), True, 'import torch.nn as nn\n'), ((18803, 18820), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (18815, 18820), True, 'import torch.nn as nn\n'), ((18870, 18896), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', 'nf', '(4)', '(2)', '(1)'], {}), '(nf, nf, 4, 2, 1)\n', (18879, 18896), True, 'import torch.nn as nn\n'), ((18948, 18966), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['nf'], {}), '(nf)\n', (18962, 18966), True, 'import torch.nn as nn\n'), ((19017, 19034), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (19029, 19034), True, 'import torch.nn as nn\n'), ((19084, 19114), 'torch.nn.Conv2d', 'nn.Conv2d', (['nf', '(nf * 2)', '(4)', '(2)', '(1)'], {}), '(nf, nf * 2, 4, 2, 1)\n', (19093, 19114), True, 'import torch.nn as nn\n'), ((19164, 19186), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(nf * 2)'], {}), '(nf * 2)\n', (19178, 19186), True, 'import torch.nn as nn\n'), ((19235, 19252), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (19247, 19252), True, 'import torch.nn as nn\n'), ((19302, 19336), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nf * 2)', '(nf * 4)', '(4)', '(2)', '(1)'], {}), '(nf * 2, nf * 4, 4, 2, 1)\n', (19311, 19336), True, 'import torch.nn as nn\n'), ((19384, 19406), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(nf * 4)'], {}), '(nf * 4)\n', (19398, 19406), True, 'import torch.nn as nn\n'), ((19455, 19472), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (19467, 19472), True, 'import torch.nn as nn\n'), ((19522, 19556), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nf * 4)', '(nf * 8)', '(4)', '(2)', '(1)'], {}), '(nf * 4, nf * 8, 4, 2, 1)\n', (19531, 19556), True, 'import torch.nn as nn\n')] |
import os
import time
import sys
import math
import gzip
import pickle
import glob
import numpy as np
#
from multiprocessing import Process
from joblib import Parallel, delayed
import multiprocessing
#
from multiprocessing.dummy import Pool as ThreadPool
#
from collections import defaultdict
# rdkit cheminformania
from rdkit import DataStructs
from rdkit import rdBase
from rdkit import Chem
from rdkit.Chem import Crippen
from rdkit.Chem import QED
from rdkit.Chem import rdMolDescriptors
#
from rdkit.Chem.Draw import SimilarityMaps
#Machine learning modules
import sklearn
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
print ("\n")
print (" Python:", sys.version )
print (" Numpy :", np.__version__ )
print (" Rdkit :", rdBase.rdkitVersion ,"\n" )
_fscores = None
def genFP(mol,Dummy=-1):
# Helper function to convert to Morgan type fingerprint in Numpy Array
fp = SimilarityMaps.GetMorganFingerprint(mol)
fp_vect = np.zeros((1,))
DataStructs.ConvertToNumpyArray(fp, fp_vect)
return fp_vect
def readFragmentScores(name='fpscores'):
#import cPickle,gzip
global _fscores
_fscores = pickle.load(gzip.open('%s.pkl.gz'%name))
outDict = {}
for i in _fscores:
for j in range(1,len(i)):
outDict[i[j]] = float(i[0])
_fscores = outDict
def numBridgeheadsAndSpiro(mol,ri=None):
if ri is None:
ri=mol.GetRingInfo()
arings = [set(x) for x in ri.AtomRings()]
spiros=set()
for i,ari in enumerate(arings):
for j in range(i+1,len(arings)):
shared=ariås[j]
if len(shared)==1:
spiros.update(shared)
nSpiro=len(spiros)
# find bonds that are shared between rings that share at least 2 bonds:
nBridge=0
brings = [set(x) for x in ri.BondRings()]
bridges=set()
for i,bri in enumerate(brings):
for j in range(i+1,len(brings)):
shared=bri&brings[j]
if len(shared)>1:
atomCounts=defaultdict(int)
for bi in shared:
bond = mol.GetBondWithIdx(bi)
atomCounts[bond.GetBeginAtomIdx()]+=1
atomCounts[bond.GetEndAtomIdx()]+=1
tmp=0
for ai,cnt in atomCounts.items():
if cnt==1:
tmp+=1
bridges.add(ai)
#if tmp!=2: # no need to stress the users
#print 'huh:',tmp
return len(bridges),nSpiro
def calculateScore(m):
if _fscores is None: readFragmentScores()
##<NAME>. and <NAME>. “Estimation of Synthetic Accessibility
##Score of Drug-like Molecules based on Molecular Complexity and Fragment
##Contributions” Journal of Cheminformatics 1:8 (2009)
#
# fragment score
#<- 2 is the *radius* of the circular fingerprint
fp = rdMolDescriptors.GetMorganFingerprint(m,2)
fps = fp.GetNonzeroElements()
score1 = 0.
nf = 0
for bitId,v in fps.items():
nf += v
sfp = bitId
score1 += _fscores.get(sfp,-4)*v
score1 /= nf
# features score
nAtoms = m.GetNumAtoms()
nChiralCenters = len(Chem.FindMolChiralCenters(m,includeUnassigned=True))
ri = m.GetRingInfo()
nBridgeheads,nSpiro=numBridgeheadsAndSpiro(m,ri)
nMacrocycles=0
for x in ri.AtomRings():
if len(x)>8: nMacrocycles+=1
sizePenalty = nAtoms**1.005 - nAtoms
stereoPenalty = math.log10(nChiralCenters+1)
spiroPenalty = math.log10(nSpiro+1)
bridgePenalty = math.log10(nBridgeheads+1)
macrocyclePenalty = 0.
# ---------------------------------------
# This differs from the paper, which defines:
# macrocyclePenalty = math.log10(nMacrocycles+1)
# This form generates better results when 2 or more macrocycles are present
if nMacrocycles > 0: macrocyclePenalty = math.log10(2)
score2 = 0. -sizePenalty -stereoPenalty -spiroPenalty -bridgePenalty -macrocyclePenalty
# correction for the fingerprint density
# not in the original publication, added in version 1.1
# to make highly symmetrical molecules easier to synthetise
score3 = 0.
if nAtoms > len(fps):
score3 = math.log(float(nAtoms) / len(fps)) * .5
sascore = score1 + score2 + score3
# need to transform "raw" value into scale between 1 and 10
min = -4.0
max = 2.5
sascore = 11. - (sascore - min + 1) / (max - min) * 9.
# smooth the 10-end
if sascore > 8.: sascore = 8. + math.log(sascore+1.-9.)
if sascore > 10.: sascore = 10.0
elif sascore < 1.: sascore = 1.0
return sascore
def pepLinealtoSMILE(seq):
# Convertir Fasta a SMILES
tmpSeq = seq[0:1]+"."+seq[1:2]+"."+seq[2:3]+"."+seq[3:4]+"."+seq[4:5]+"."+seq[5:6]+"."+seq[6:7]
helmLineal="PEPTIDE1{"+tmpSeq +"}$$$$V2.0"
SeqFasta = Chem.MolFromHELM(str(helmLineal))
SeqSmiles=Chem.MolToSmiles(SeqFasta)
#
#print (SeqSmiles)
return SeqSmiles
def QSArproperties_test(array,forest, num, namefile):
##<NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>. (2012)
##Quantifying the chemical beauty of drugs,
##Nature Chemistry, 4, 90-98
##[https://doi.org/10.1038/nchem.1243]
#
##<NAME>.; <NAME>. (1999)
##Prediction of Physicochemical Parameters by Atomic Contributions,
##Journal of Chemical Information and Computer Sciences, 39, 868-873
##[https://doi.org/10.1021/ci990307l]
#
fw = open( 'QSAR-2D' + str(num) + str(namefile) + '.csv', 'w')
#
for line in array:
parameter= line.split(sep="\t",maxsplit=9)
peptide_seq = parameter[0]
peptide = parameter[1]
#
molpeptideLi = Chem.MolFromSmiles(peptide)
# subprocess
scoreSA_Li = calculateScore(molpeptideLi)
#
# Make Prediction Random-Forest
fp_vect_Li = genFP(molpeptideLi)
# Get probabilities
predictionsLi = forest.predict_proba(fp_vect_Li.reshape(1,-1))
#print ("Probability %s mutagenic %0.6f " % (sequence,predictionsLi[0][1]))
# See http://cdb.ics.uci.edu/cgibin/Smi2DepictWeb.py
propQSAR = QED.properties(molpeptideLi)
MolWeight = propQSAR.MW
MolLogP = propQSAR.ALOGP
HbondA = propQSAR.HBA
HbondD = propQSAR.HBD
PolarSA = propQSAR.PSA
Rbonds = propQSAR.ROTB
Aromatic = propQSAR.AROM
#
MolarRefractivity = Crippen.MolMR(molpeptideLi)
nAtoms = molpeptideLi.GetNumAtoms()
#
SynthAcces = scoreSA_Li
AmesMutagenic = predictionsLi[0][1]
#
result = ( str(MolWeight) + "\t" + str(MolLogP) + "\t" + str(HbondA) + "\t" + str(HbondD) + "\t" + str(PolarSA) + "\t" + str(Rbonds) + "\t" + str(MolarRefractivity) + "\t" + str(nAtoms) + "\t" + str(SynthAcces) + "\t" + str(AmesMutagenic) + "\t" + str (peptide_seq) + "\t" + str(peptide) + "\n")
#print (result)
fw.write(result)
fw.close()
if __name__=='__main__':
#
# Time
t1=time.time()
# Data Base Synthetic Accessibility
readFragmentScores("fpscores")
##<NAME>., <NAME>., <NAME>., <NAME>., ter <NAME>.,
##<NAME>., <NAME>., and <NAME>. (2009)
##Benchmark Data Set for in Silico Prediction of Ames Mutagenicity.
##J. Chem. Inf. Model. 49, 2077−2081.
data = np.genfromtxt('smiles_cas_N6512.smi',
delimiter='\t',
names=['Smiles','CAS','Mutagen'],
encoding=None,
dtype=None,
comments='##')
#
# Convert smiles to RDkit molecules and calculate fingerprints
mols = []
X = []
y = []
for record in data:
try:
mol = Chem.MolFromSmiles(record[0])
if type(mol) != type(None):
fp_vect = genFP(mol)
mols.append([mol, record[1],record[2]])
X.append(fp_vect)
y.append(record[2])
except:
print ("Failed for CAS: %s" % record[1])
#See how succesful the conversions were
print ("Imported smiles %s" % len(data))
print ("Converted smiles %s" % len(mols))
# Prepare the data for modelling
X=np.array(X)
y=np.array(y)
#
# Random Forest
print ('\n <- Random Forest -> \n')
# Cross Validate
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
# Prepare scoring lists
accs_train = []
accs_test = []
for i in [1, 3, 5, 10, 30, 50, 100, 500, 1000]:
## for i in [1,1000]:
forest = RandomForestClassifier(n_estimators=1000, max_depth=i,n_jobs=-1)
# Fit and score
forest.fit(X_train, y_train)
accs_train.append(forest.score(X_train, y_train))
accs_test.append(forest.score(X_test, y_test))
print('--- max_depth = {} ---'.format(i))
print('Accuracy on training set: {:.3f}'.format(forest.score(X_train, y_train)))
print('Accuracy on test set: {:.3f}'.format(forest.score(X_test, y_test)))
# Build a simple model 0.82
print ('Value Model: %s' % str(forest.score(X,y)) )
#
#
outputBase = 'output' # output.1.txt, output.2.txt, etc.
path = 'Values*'
files = glob.glob(path)
for name in files:
# This is shorthand and not friendly with memory
# on very large files (<NAME>), but it works.
input = open(name, 'r').read().split('\n')
inputread = open(name, 'r')
splitLen = len(inputread.readlines())
inputread.close()
div_lines = int ((splitLen / 20) + 2)
print ("Total lines :" + str(splitLen))
print ("Div lines :" + str(div_lines))
at = 1
for lines in range(0, len(input), div_lines):
# First, get the list slice
outputData = input[lines:lines+div_lines]
# Now open the output file, join the new slice with newlines
# and write it out. Then close the file.
output = open(outputBase + str(at) + '.txt', 'w')
output.write('\n'.join(outputData))
output.close()
# Increment the counter
at += 1
print ("\nFinal dividir archivo principal\n")
# Read Smiles
smiles_1 = []
smiles_2 = []
smiles_3 = []
smiles_4 = []
smiles_5 = []
smiles_6 = []
smiles_7 = []
smiles_8 = []
smiles_9 = []
smiles_10 = []
smiles_11 = []
smiles_12 = []
smiles_13 = []
smiles_14 = []
smiles_15 = []
smiles_16 = []
smiles_17 = []
smiles_18 = []
smiles_19 = []
smiles_20 = []
#
filename_1 = "output1.txt"
f_1=open(filename_1, "r")
f1_1 = f_1.readlines()
f_1.close()
for x in f1_1:
# delete \n
x = x.replace('\n', '').replace('\r', '')
smiles_1.append(x)
filename_2 = "output2.txt"
f_2=open(filename_2, "r")
f1_2 = f_2.readlines()
f_2.close()
for x in f1_2:
# delete \n
x = x.replace('\n', '').replace('\r', '')
smiles_2.append(x)
filename_3 = "output3.txt"
f_3=open(filename_3, "r")
f1_3 = f_3.readlines()
f_3.close()
for x in f1_3:
# delete \n
x = x.replace('\n', '').replace('\r', '')
smiles_3.append(x)
filename_4 = "output4.txt"
f_4=open(filename_4, "r")
f1_4 = f_4.readlines()
f_4.close()
for x in f1_4:
# delete \n
x = x.replace('\n', '').replace('\r', '')
smiles_4.append(x)
filename_5 = "output5.txt"
f_5=open(filename_5, "r")
f1_5 = f_5.readlines()
f_5.close()
for x in f1_5:
# delete \n
x = x.replace('\n', '').replace('\r', '')
smiles_5.append(x)
filename_6 = "output6.txt"
f_6=open(filename_6, "r")
f1_6 = f_6.readlines()
f_6.close()
for x in f1_6:
# delete \n
x = x.replace('\n', '').replace('\r', '')
smiles_6.append(x)
filename_7 = "output7.txt"
f_7=open(filename_7, "r")
f1_7 = f_7.readlines()
f_7.close()
for x in f1_7:
# delete \n
x = x.replace('\n', '').replace('\r', '')
smiles_7.append(x)
filename_8 = "output8.txt"
f_8=open(filename_8, "r")
f1_8 = f_8.readlines()
f_8.close()
for x in f1_8:
# delete \n
x = x.replace('\n', '').replace('\r', '')
smiles_8.append(x)
filename_9 = "output9.txt"
f_9=open(filename_9, "r")
f1_9 = f_9.readlines()
f_9.close()
for x in f1_9:
# delete \n
x = x.replace('\n', '').replace('\r', '')
smiles_9.append(x)
filename_10 = "output10.txt"
f_10=open(filename_10, "r")
f1_10 = f_10.readlines()
f_10.close()
for x in f1_10:
# delete \n
x = x.replace('\n', '').replace('\r', '')
smiles_10.append(x)
#
#
#
filename_11 = "output11.txt"
f_11=open(filename_11, "r")
f1_11 = f_11.readlines()
f_11.close()
for x in f1_11:
# delete \n
x = x.replace('\n', '').replace('\r', '')
smiles_11.append(x)
filename_12 = "output12.txt"
f_12=open(filename_12, "r")
f1_12 = f_12.readlines()
f_12.close()
for x in f1_12:
# delete \n
x = x.replace('\n', '').replace('\r', '')
smiles_12.append(x)
filename_13 = "output13.txt"
f_13=open(filename_13, "r")
f1_13 = f_13.readlines()
f_13.close()
for x in f1_13:
# delete \n
x = x.replace('\n', '').replace('\r', '')
smiles_13.append(x)
filename_14 = "output14.txt"
f_14=open(filename_14, "r")
f1_14 = f_14.readlines()
f_14.close()
for x in f1_14:
# delete \n
x = x.replace('\n', '').replace('\r', '')
smiles_14.append(x)
filename_15 = "output15.txt"
f_15=open(filename_15, "r")
f1_15 = f_15.readlines()
f_15.close()
for x in f1_15:
# delete \n
x = x.replace('\n', '').replace('\r', '')
smiles_15.append(x)
filename_16 = "output16.txt"
f_16=open(filename_16, "r")
f1_16 = f_16.readlines()
f_16.close()
for x in f1_16:
# delete \n
x = x.replace('\n', '').replace('\r', '')
smiles_16.append(x)
filename_17 = "output17.txt"
f_17=open(filename_17, "r")
f1_17 = f_17.readlines()
f_17.close()
for x in f1_17:
# delete \n
x = x.replace('\n', '').replace('\r', '')
smiles_17.append(x)
filename_18 = "output18.txt"
f_18=open(filename_18, "r")
f1_18 = f_18.readlines()
f_18.close()
for x in f1_18:
# delete \n
x = x.replace('\n', '').replace('\r', '')
smiles_18.append(x)
filename_19 = "output19.txt"
f_19=open(filename_19, "r")
f1_19 = f_19.readlines()
f_19.close()
for x in f1_19:
# delete \n
x = x.replace('\n', '').replace('\r', '')
smiles_19.append(x)
filename_20 = "output20.txt"
f_20=open(filename_20, "r")
f1_20 = f_20.readlines()
f_20.close()
for x in f1_20:
# delete \n
x = x.replace('\n', '').replace('\r', '')
smiles_20.append(x)
p1 = Process(target=QSArproperties_test, args=(smiles_1,forest,1,name))
p2 = Process(target=QSArproperties_test, args=(smiles_2,forest,2,name))
p3 = Process(target=QSArproperties_test, args=(smiles_3,forest,3,name))
p4 = Process(target=QSArproperties_test, args=(smiles_4,forest,4,name))
p5 = Process(target=QSArproperties_test, args=(smiles_5,forest,5,name))
p6 = Process(target=QSArproperties_test, args=(smiles_6,forest,6,name))
p7 = Process(target=QSArproperties_test, args=(smiles_7,forest,7,name))
p8 = Process(target=QSArproperties_test, args=(smiles_8,forest,8,name))
p9 = Process(target=QSArproperties_test, args=(smiles_9,forest,9,name))
p10 = Process(target=QSArproperties_test, args=(smiles_10,forest,10,name))
p11 = Process(target=QSArproperties_test, args=(smiles_11,forest,11,name))
p12 = Process(target=QSArproperties_test, args=(smiles_12,forest,12,name))
p13 = Process(target=QSArproperties_test, args=(smiles_13,forest,13,name))
p14 = Process(target=QSArproperties_test, args=(smiles_14,forest,14,name))
p15 = Process(target=QSArproperties_test, args=(smiles_15,forest,15,name))
p16 = Process(target=QSArproperties_test, args=(smiles_16,forest,16,name))
p17 = Process(target=QSArproperties_test, args=(smiles_17,forest,17,name))
p18 = Process(target=QSArproperties_test, args=(smiles_18,forest,18,name))
p19 = Process(target=QSArproperties_test, args=(smiles_19,forest,19,name))
p20 = Process(target=QSArproperties_test, args=(smiles_20,forest,20,name))
p1.start()
p2.start()
p3.start()
p4.start()
p5.start()
p6.start()
p7.start()
p8.start()
p9.start()
p10.start()
p11.start()
p12.start()
p13.start()
p14.start()
p15.start()
p16.start()
p17.start()
p18.start()
p19.start()
p20.start()
p1.join()
p2.join()
p3.join()
p4.join()
p5.join()
p6.join()
p7.join()
p8.join()
p9.join()
p10.join()
p11.join()
p12.join()
p13.join()
p14.join()
p15.join()
p16.join()
p17.join()
p18.join()
p19.join()
p20.join()
print ("Ciclos : " + str(name))
print ("Fin programa")
t2=time.time()
resTime1=(t2-t1)
#
print (' Reading took %.2f seconds. \n' % resTime1 )
| [
"gzip.open",
"rdkit.Chem.rdMolDescriptors.GetMorganFingerprint",
"multiprocessing.Process",
"math.log",
"numpy.array",
"math.log10",
"rdkit.Chem.Crippen.MolMR",
"numpy.genfromtxt",
"rdkit.Chem.FindMolChiralCenters",
"rdkit.Chem.Draw.SimilarityMaps.GetMorganFingerprint",
"rdkit.Chem.QED.propertie... | [((979, 1019), 'rdkit.Chem.Draw.SimilarityMaps.GetMorganFingerprint', 'SimilarityMaps.GetMorganFingerprint', (['mol'], {}), '(mol)\n', (1014, 1019), False, 'from rdkit.Chem.Draw import SimilarityMaps\n'), ((1035, 1049), 'numpy.zeros', 'np.zeros', (['(1,)'], {}), '((1,))\n', (1043, 1049), True, 'import numpy as np\n'), ((1055, 1099), 'rdkit.DataStructs.ConvertToNumpyArray', 'DataStructs.ConvertToNumpyArray', (['fp', 'fp_vect'], {}), '(fp, fp_vect)\n', (1086, 1099), False, 'from rdkit import DataStructs\n'), ((3015, 3058), 'rdkit.Chem.rdMolDescriptors.GetMorganFingerprint', 'rdMolDescriptors.GetMorganFingerprint', (['m', '(2)'], {}), '(m, 2)\n', (3052, 3058), False, 'from rdkit.Chem import rdMolDescriptors\n'), ((3617, 3647), 'math.log10', 'math.log10', (['(nChiralCenters + 1)'], {}), '(nChiralCenters + 1)\n', (3627, 3647), False, 'import math\n'), ((3666, 3688), 'math.log10', 'math.log10', (['(nSpiro + 1)'], {}), '(nSpiro + 1)\n', (3676, 3688), False, 'import math\n'), ((3708, 3736), 'math.log10', 'math.log10', (['(nBridgeheads + 1)'], {}), '(nBridgeheads + 1)\n', (3718, 3736), False, 'import math\n'), ((5086, 5112), 'rdkit.Chem.MolToSmiles', 'Chem.MolToSmiles', (['SeqFasta'], {}), '(SeqFasta)\n', (5102, 5112), False, 'from rdkit import Chem\n'), ((7334, 7345), 'time.time', 'time.time', ([], {}), '()\n', (7343, 7345), False, 'import time\n'), ((7651, 7786), 'numpy.genfromtxt', 'np.genfromtxt', (['"""smiles_cas_N6512.smi"""'], {'delimiter': '"""\t"""', 'names': "['Smiles', 'CAS', 'Mutagen']", 'encoding': 'None', 'dtype': 'None', 'comments': '"""##"""'}), "('smiles_cas_N6512.smi', delimiter='\\t', names=['Smiles',\n 'CAS', 'Mutagen'], encoding=None, dtype=None, comments='##')\n", (7664, 7786), True, 'import numpy as np\n'), ((8576, 8587), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (8584, 8587), True, 'import numpy as np\n'), ((8595, 8606), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (8603, 8606), True, 'import numpy as np\n'), ((8738, 8791), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.3)', 'random_state': '(0)'}), '(X, y, test_size=0.3, random_state=0)\n', (8754, 8791), False, 'from sklearn.model_selection import train_test_split\n'), ((9633, 9648), 'glob.glob', 'glob.glob', (['path'], {}), '(path)\n', (9642, 9648), False, 'import glob\n'), ((19075, 19086), 'time.time', 'time.time', ([], {}), '()\n', (19084, 19086), False, 'import time\n'), ((1239, 1268), 'gzip.open', 'gzip.open', (["('%s.pkl.gz' % name)"], {}), "('%s.pkl.gz' % name)\n", (1248, 1268), False, 'import gzip\n'), ((3331, 3383), 'rdkit.Chem.FindMolChiralCenters', 'Chem.FindMolChiralCenters', (['m'], {'includeUnassigned': '(True)'}), '(m, includeUnassigned=True)\n', (3356, 3383), False, 'from rdkit import Chem\n'), ((4043, 4056), 'math.log10', 'math.log10', (['(2)'], {}), '(2)\n', (4053, 4056), False, 'import math\n'), ((5902, 5929), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['peptide'], {}), '(peptide)\n', (5920, 5929), False, 'from rdkit import Chem\n'), ((6376, 6404), 'rdkit.Chem.QED.properties', 'QED.properties', (['molpeptideLi'], {}), '(molpeptideLi)\n', (6390, 6404), False, 'from rdkit.Chem import QED\n'), ((6714, 6741), 'rdkit.Chem.Crippen.MolMR', 'Crippen.MolMR', (['molpeptideLi'], {}), '(molpeptideLi)\n', (6727, 6741), False, 'from rdkit.Chem import Crippen\n'), ((8962, 9027), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(1000)', 'max_depth': 'i', 'n_jobs': '(-1)'}), '(n_estimators=1000, max_depth=i, n_jobs=-1)\n', (8984, 9027), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((16547, 16616), 'multiprocessing.Process', 'Process', ([], {'target': 'QSArproperties_test', 'args': '(smiles_1, forest, 1, name)'}), '(target=QSArproperties_test, args=(smiles_1, forest, 1, name))\n', (16554, 16616), False, 'from multiprocessing import Process\n'), ((16628, 16697), 'multiprocessing.Process', 'Process', ([], {'target': 'QSArproperties_test', 'args': '(smiles_2, forest, 2, name)'}), '(target=QSArproperties_test, args=(smiles_2, forest, 2, name))\n', (16635, 16697), False, 'from multiprocessing import Process\n'), ((16709, 16778), 'multiprocessing.Process', 'Process', ([], {'target': 'QSArproperties_test', 'args': '(smiles_3, forest, 3, name)'}), '(target=QSArproperties_test, args=(smiles_3, forest, 3, name))\n', (16716, 16778), False, 'from multiprocessing import Process\n'), ((16790, 16859), 'multiprocessing.Process', 'Process', ([], {'target': 'QSArproperties_test', 'args': '(smiles_4, forest, 4, name)'}), '(target=QSArproperties_test, args=(smiles_4, forest, 4, name))\n', (16797, 16859), False, 'from multiprocessing import Process\n'), ((16871, 16940), 'multiprocessing.Process', 'Process', ([], {'target': 'QSArproperties_test', 'args': '(smiles_5, forest, 5, name)'}), '(target=QSArproperties_test, args=(smiles_5, forest, 5, name))\n', (16878, 16940), False, 'from multiprocessing import Process\n'), ((16952, 17021), 'multiprocessing.Process', 'Process', ([], {'target': 'QSArproperties_test', 'args': '(smiles_6, forest, 6, name)'}), '(target=QSArproperties_test, args=(smiles_6, forest, 6, name))\n', (16959, 17021), False, 'from multiprocessing import Process\n'), ((17033, 17102), 'multiprocessing.Process', 'Process', ([], {'target': 'QSArproperties_test', 'args': '(smiles_7, forest, 7, name)'}), '(target=QSArproperties_test, args=(smiles_7, forest, 7, name))\n', (17040, 17102), False, 'from multiprocessing import Process\n'), ((17114, 17183), 'multiprocessing.Process', 'Process', ([], {'target': 'QSArproperties_test', 'args': '(smiles_8, forest, 8, name)'}), '(target=QSArproperties_test, args=(smiles_8, forest, 8, name))\n', (17121, 17183), False, 'from multiprocessing import Process\n'), ((17195, 17264), 'multiprocessing.Process', 'Process', ([], {'target': 'QSArproperties_test', 'args': '(smiles_9, forest, 9, name)'}), '(target=QSArproperties_test, args=(smiles_9, forest, 9, name))\n', (17202, 17264), False, 'from multiprocessing import Process\n'), ((17277, 17348), 'multiprocessing.Process', 'Process', ([], {'target': 'QSArproperties_test', 'args': '(smiles_10, forest, 10, name)'}), '(target=QSArproperties_test, args=(smiles_10, forest, 10, name))\n', (17284, 17348), False, 'from multiprocessing import Process\n'), ((17361, 17432), 'multiprocessing.Process', 'Process', ([], {'target': 'QSArproperties_test', 'args': '(smiles_11, forest, 11, name)'}), '(target=QSArproperties_test, args=(smiles_11, forest, 11, name))\n', (17368, 17432), False, 'from multiprocessing import Process\n'), ((17445, 17516), 'multiprocessing.Process', 'Process', ([], {'target': 'QSArproperties_test', 'args': '(smiles_12, forest, 12, name)'}), '(target=QSArproperties_test, args=(smiles_12, forest, 12, name))\n', (17452, 17516), False, 'from multiprocessing import Process\n'), ((17529, 17600), 'multiprocessing.Process', 'Process', ([], {'target': 'QSArproperties_test', 'args': '(smiles_13, forest, 13, name)'}), '(target=QSArproperties_test, args=(smiles_13, forest, 13, name))\n', (17536, 17600), False, 'from multiprocessing import Process\n'), ((17613, 17684), 'multiprocessing.Process', 'Process', ([], {'target': 'QSArproperties_test', 'args': '(smiles_14, forest, 14, name)'}), '(target=QSArproperties_test, args=(smiles_14, forest, 14, name))\n', (17620, 17684), False, 'from multiprocessing import Process\n'), ((17697, 17768), 'multiprocessing.Process', 'Process', ([], {'target': 'QSArproperties_test', 'args': '(smiles_15, forest, 15, name)'}), '(target=QSArproperties_test, args=(smiles_15, forest, 15, name))\n', (17704, 17768), False, 'from multiprocessing import Process\n'), ((17781, 17852), 'multiprocessing.Process', 'Process', ([], {'target': 'QSArproperties_test', 'args': '(smiles_16, forest, 16, name)'}), '(target=QSArproperties_test, args=(smiles_16, forest, 16, name))\n', (17788, 17852), False, 'from multiprocessing import Process\n'), ((17865, 17936), 'multiprocessing.Process', 'Process', ([], {'target': 'QSArproperties_test', 'args': '(smiles_17, forest, 17, name)'}), '(target=QSArproperties_test, args=(smiles_17, forest, 17, name))\n', (17872, 17936), False, 'from multiprocessing import Process\n'), ((17949, 18020), 'multiprocessing.Process', 'Process', ([], {'target': 'QSArproperties_test', 'args': '(smiles_18, forest, 18, name)'}), '(target=QSArproperties_test, args=(smiles_18, forest, 18, name))\n', (17956, 18020), False, 'from multiprocessing import Process\n'), ((18033, 18104), 'multiprocessing.Process', 'Process', ([], {'target': 'QSArproperties_test', 'args': '(smiles_19, forest, 19, name)'}), '(target=QSArproperties_test, args=(smiles_19, forest, 19, name))\n', (18040, 18104), False, 'from multiprocessing import Process\n'), ((18117, 18188), 'multiprocessing.Process', 'Process', ([], {'target': 'QSArproperties_test', 'args': '(smiles_20, forest, 20, name)'}), '(target=QSArproperties_test, args=(smiles_20, forest, 20, name))\n', (18124, 18188), False, 'from multiprocessing import Process\n'), ((4688, 4717), 'math.log', 'math.log', (['(sascore + 1.0 - 9.0)'], {}), '(sascore + 1.0 - 9.0)\n', (4696, 4717), False, 'import math\n'), ((8084, 8113), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['record[0]'], {}), '(record[0])\n', (8102, 8113), False, 'from rdkit import Chem\n'), ((2107, 2123), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (2118, 2123), False, 'from collections import defaultdict\n')] |
#!/usr/bin/env python
"""
This is a script that receives two MuNG files. The first
is supposed to be the output of an OMR file, while the
second represents the expected MuNG (ground-truth.)
The script then computes evaluation metrics as regards
the notation assembly stage of the OMR pipeline.
"""
from __future__ import print_function, unicode_literals, division
import copy
from glob import glob
from typing import List
from muscima.cropobject import CropObject
__version__ = "0.0.1"
__author__ = "<NAME>"
import argparse
import logging
import os
import numpy as np
from muscima.io import parse_cropobject_list
##############################################################################
def build_argument_parser():
parser = argparse.ArgumentParser(description=__doc__, add_help=True,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-r', '--reference', action='store', required=True,
help='The reference MuNG (ground-truth annotation). File or directory')
parser.add_argument('-p', '--predicted', action='store',
help='The predicted MuNG (output of OMR). File or directory')
parser.add_argument('-v', '--verbose', action='store_true',
help='Turn on INFO messages.')
parser.add_argument('--debug', action='store_true',
help='Turn on DEBUG messages.')
return parser
"""
Check whether two objects (predicted and reference ones)
should be considered to match. They do iif:
- The class name is equal
- Their IoU exceeds a threshold
"""
def match(p_obj, r_obj, threshold=0.5):
if p_obj.clsname == r_obj.clsname:
p_box = [p_obj.left, p_obj.top, p_obj.right, p_obj.bottom]
r_box = [r_obj.left, r_obj.top, r_obj.right, r_obj.bottom]
box_area = ((r_box[2] - r_box[0] + 1) * (r_box[3] - r_box[1] + 1))
iw = (min(p_box[2], r_box[2]) - max(p_box[0], r_box[0]) + 1)
if iw > 0:
ih = (min(p_box[3], r_box[3]) - max(p_box[1], r_box[1]) + 1)
if ih > 0:
ua = np.float64((p_box[2] - p_box[0] + 1) *
(p_box[3] - p_box[1] + 1) +
box_area - (iw * ih)
)
IoU = iw * ih / ua
if IoU > threshold:
return True
return False
def get_object_matching_pairs(predicted_objects: List[CropObject], reference_objects: List[CropObject]):
pairs = []
for p_obj in predicted_objects:
for r_obj in reference_objects:
if match(p_obj, r_obj):
pairs.append((p_obj.objid, r_obj.objid))
return pairs
def cropobject_dict_from_list(cropobject_list):
return {cropobject.objid: cropobject for cropobject in cropobject_list}
def evaluate_result(mung_reference_file, predicted_mung_file):
print("Computing statistics for {0}".format(predicted_mung_file))
# Read crop objects list
reference_objects = parse_cropobject_list(mung_reference_file)
predicted_objects = parse_cropobject_list(predicted_mung_file)
precision, recall, f1_score, true_positives, false_positives, false_negatives = \
compute_statistics_on_crop_objects(reference_objects, predicted_objects)
print('Precision: {0:.3f}, Recall: {1:.3f}, F1-Score: {2:.3f}, True positives: {3}, False positives: {4}, '
'False Negatives: {5}'.format(precision, recall, f1_score, true_positives, false_positives, false_negatives))
return precision, recall, f1_score, true_positives, false_positives, false_negatives
def sanitize_crop_object_class_names(crop_objects: List[CropObject]):
for crop_object in crop_objects:
# Some classes have special characters in their class name that we have to remove
crop_object.class_name = crop_object.class_name.replace('"', '').replace('/', '').replace('.', '')
def compute_statistics_on_crop_objects(reference_objects, predicted_objects):
reference_objects = [copy.deepcopy(c) for c in reference_objects]
predicted_objects = [copy.deepcopy(c) for c in predicted_objects]
sanitize_crop_object_class_names(reference_objects)
sanitize_crop_object_class_names(predicted_objects)
# Build pairs between predicted and reference
object_matching_pair = get_object_matching_pairs(predicted_objects, reference_objects)
# Relative ids
reference_to_prediction_mapping = {r: p for p, r in object_matching_pair}
prediction_to_reference_mapping = {p: r for p, r in object_matching_pair}
# Build dict's from crop object lists that are accessed by id
predicted_objects = cropobject_dict_from_list(predicted_objects)
reference_objects = cropobject_dict_from_list(reference_objects)
# Basic evaluation metrics
true_positives, false_positives, false_negatives = [0, 0, 0]
for p_obj_id, r_obj_id in object_matching_pair:
predicted_object = predicted_objects[p_obj_id]
reference_object = reference_objects[r_obj_id]
# Check TP and FP (from predicted to reference)
for out_p_edge in predicted_object.outlinks:
if out_p_edge not in prediction_to_reference_mapping:
# We predicted an edge between objects, but the objects from the prediction
# could not be matched to objects from the ground truth, therefore this
# edge does not exists there as should be counted as false positive.
false_positives += 1
elif prediction_to_reference_mapping[out_p_edge] in reference_object.outlinks:
true_positives += 1
logging.debug(" ".join(map(str, [p_obj_id, r_obj_id, out_p_edge])))
else:
false_positives += 1
# Check FN (from reference to predicted)
for out_r_edge in reference_object.outlinks:
if out_r_edge not in reference_to_prediction_mapping:
# An outgoing edge from the reference object does not even have a corresponding object
# in the prediction, therefore it is a false negative.
false_negatives += 1
elif reference_to_prediction_mapping[out_r_edge] not in predicted_object.outlinks:
# An outgoing edge from the reference object does have a corresponding object
# in the prediction, but no edge, therefore it is a false negative.
false_negatives += 1
precision = true_positives / (true_positives + false_positives)
recall = true_positives / (true_positives + false_negatives)
f1_score = (2. * true_positives) / (2. * true_positives + false_positives + false_negatives)
return precision, recall, f1_score, true_positives, false_positives, false_negatives
if __name__ == '__main__':
parser = build_argument_parser()
args = parser.parse_args()
reference_mungs = []
if os.path.isfile(args.reference):
reference_mungs.append(args.reference)
elif os.path.isdir(args.reference):
reference_mungs.extend(glob(args.reference + "/*.xml"))
predicted_mungs = []
if os.path.isfile(args.predicted):
predicted_mungs.append(args.predicted)
elif os.path.isdir(args.predicted):
predicted_mungs.extend(glob(args.predicted + "/*.xml"))
if len(reference_mungs) != len(predicted_mungs):
print("Didn't find a prediction for every reference mung. Filtering...")
predicted_mung_names = [os.path.basename(p).replace("_predicted.xml", ".xml") for p in predicted_mungs]
reference_mungs = [r for r in reference_mungs if os.path.basename(r) in predicted_mung_names]
for reference, prediction in zip(reference_mungs, predicted_mungs):
precision, recall, f1_score, true_positives, false_positives, false_negatives = \
evaluate_result(reference, prediction)
| [
"argparse.ArgumentParser",
"numpy.float64",
"os.path.isfile",
"os.path.isdir",
"os.path.basename",
"muscima.io.parse_cropobject_list",
"copy.deepcopy",
"glob.glob"
] | [((742, 860), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__', 'add_help': '(True)', 'formatter_class': 'argparse.RawDescriptionHelpFormatter'}), '(description=__doc__, add_help=True, formatter_class\n =argparse.RawDescriptionHelpFormatter)\n', (765, 860), False, 'import argparse\n'), ((3087, 3129), 'muscima.io.parse_cropobject_list', 'parse_cropobject_list', (['mung_reference_file'], {}), '(mung_reference_file)\n', (3108, 3129), False, 'from muscima.io import parse_cropobject_list\n'), ((3154, 3196), 'muscima.io.parse_cropobject_list', 'parse_cropobject_list', (['predicted_mung_file'], {}), '(predicted_mung_file)\n', (3175, 3196), False, 'from muscima.io import parse_cropobject_list\n'), ((6992, 7022), 'os.path.isfile', 'os.path.isfile', (['args.reference'], {}), '(args.reference)\n', (7006, 7022), False, 'import os\n'), ((7208, 7238), 'os.path.isfile', 'os.path.isfile', (['args.predicted'], {}), '(args.predicted)\n', (7222, 7238), False, 'import os\n'), ((4097, 4113), 'copy.deepcopy', 'copy.deepcopy', (['c'], {}), '(c)\n', (4110, 4113), False, 'import copy\n'), ((4167, 4183), 'copy.deepcopy', 'copy.deepcopy', (['c'], {}), '(c)\n', (4180, 4183), False, 'import copy\n'), ((7080, 7109), 'os.path.isdir', 'os.path.isdir', (['args.reference'], {}), '(args.reference)\n', (7093, 7109), False, 'import os\n'), ((7296, 7325), 'os.path.isdir', 'os.path.isdir', (['args.predicted'], {}), '(args.predicted)\n', (7309, 7325), False, 'import os\n'), ((2154, 2244), 'numpy.float64', 'np.float64', (['((p_box[2] - p_box[0] + 1) * (p_box[3] - p_box[1] + 1) + box_area - iw * ih)'], {}), '((p_box[2] - p_box[0] + 1) * (p_box[3] - p_box[1] + 1) + box_area -\n iw * ih)\n', (2164, 2244), True, 'import numpy as np\n'), ((7142, 7173), 'glob.glob', 'glob', (["(args.reference + '/*.xml')"], {}), "(args.reference + '/*.xml')\n", (7146, 7173), False, 'from glob import glob\n'), ((7358, 7389), 'glob.glob', 'glob', (["(args.predicted + '/*.xml')"], {}), "(args.predicted + '/*.xml')\n", (7362, 7389), False, 'from glob import glob\n'), ((7558, 7577), 'os.path.basename', 'os.path.basename', (['p'], {}), '(p)\n', (7574, 7577), False, 'import os\n'), ((7695, 7714), 'os.path.basename', 'os.path.basename', (['r'], {}), '(r)\n', (7711, 7714), False, 'import os\n')] |
import torch
import numpy as np
import random
import os
import shutil
from .metrics import accuracy
import logging
class Trainer(object):
def __init__(self, model, optimizer, criteria=torch.nn.CrossEntropyLoss, metric=accuracy, metric_name = 'acc',
scheduler=None, seed=0):
self.model = model
self.optimizer = optimizer
self.criteria = criteria
self.metric = metric
self.metric_name = metric_name
self.scheduler = scheduler
self.seed = seed
self.use_cuda = torch.cuda.is_available()
self.device = torch.device("cuda:0" if self.use_cuda else "cpu")
self.best_loss = float('Inf')
self.best_metric = 0
self.start_epoch = 0
self.manual_seed()
self.model_path = './exp/'
self.logger = None
def manual_seed(self):
torch.manual_seed(self.seed)
torch.cuda.manual_seed(self.seed)
if self.use_cuda:
torch.cuda.manual_seed_all(self.seed) # if you are using multi-GPU.
np.random.seed(self.seed) # Numpy module.
random.seed(self.seed) # Python random module.
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def set_device(self):
self.model = self.model.to(self.device)
self.criteria = self.criteria.to(self.device)
def train_step(self, dataloader):
epoch_loss = 0.
epoch_metric = 0.
self.model.train()
for X, y in dataloader:
X = X.float().to(self.device)
y = y.long().to(self.device)
self.optimizer.zero_grad()
y_pred = self.model(X)
loss = self.criteria(y_pred, y)
metric = self.metric(y_pred, y)
# update
loss.backward()
self.optimizer.step()
if self.scheduler and hasattr(self.scheduler.__class__, 'batch_step') \
and callable(getattr(self.scheduler.__class__, 'batch_step')):
self.scheduler.batch_step()
epoch_loss += loss.item()
epoch_metric += metric
if self.scheduler:
self.scheduler.step()
epoch_loss = epoch_loss / len(dataloader)
epoch_metric = epoch_metric / len(dataloader)
return epoch_loss, epoch_metric
def val_step(self, dataloader):
epoch_loss = 0.
epoch_metric = 0
self.model.eval()
for X, y in dataloader:
X = X.float().to(self.device)
y = y.long().to(self.device)
with torch.no_grad():
y_pred = self.model(X)
loss = self.criteria(y_pred, y)
metric = self.metric(y_pred, y)
epoch_loss += loss.item()
epoch_metric += metric
epoch_loss = epoch_loss / len(dataloader)
epoch_metric = epoch_metric/ len(dataloader)
return epoch_loss, epoch_metric
def save_checkpoint(self, epoch, model_dir, is_best=False):
# create the state dictionary
state = {
'epoch': epoch,
'state_dict': self.model.state_dict(),
'best_metric': self.best_metric,
'best_loss': self.best_loss,
'optimizer': self.optimizer.state_dict(),
}
# save
if not os.path.exists(model_dir):
os.makedirs(model_dir)
model_path = model_dir + "model_checkpoint.pth.tar"
torch.save(state, model_path)
if is_best:
shutil.copyfile(model_path, model_dir + 'model_best.pth.tar')
def resume(self, model_path):
# Note: Input model & optimizer should be pre-defined. This routine only updates their states.
self.set_device()
self.model_path = model_path
model_dir = os.path.dirname(os.path.abspath(model_path))
self.logger = logging.getLogger(__name__)
self.start_epoch = 1
self.init_logger(model_dir + '/training.log')
if os.path.isfile(model_path):
self.logger.info("=> loading checkpoint '%s'", model_path)
state = torch.load(model_path, map_location=torch.device('cpu'))
self.start_epoch = state['epoch']+1
self.model.load_state_dict(state['state_dict'])
self.optimizer.load_state_dict(state['optimizer'])
self.best_metric = state['best_metric']
self.best_loss = state['best_loss']
self.logger.info("=> loaded checkpoint '%s' (epoch %d)", model_path, state['epoch'] + 1)
else:
self.logger.info("=> no checkpoint found at '%s'", model_path)
def init_logger(self, path):
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
filemode = 'w' if self.start_epoch==0 else 'a'
self.logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
ch = logging.FileHandler(path, mode=filemode, encoding='utf-8')
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
self.logger.addHandler(ch)
def __call__(self, dataloaders, n_epochs, model_dir):
self.set_device()
self.model_path = model_dir
if not os.path.exists(model_dir):
os.makedirs(model_dir)
if self.logger is None:
self.logger = logging.getLogger(__name__)
self.init_logger(model_dir + '/training.log')
for epoch in range(self.start_epoch, n_epochs):
train_loss, train_metric = self.train_step(dataloaders['train'])
val_loss, val_metric = self.val_step(dataloaders['val'])
self.save_checkpoint(epoch, model_dir)
if val_loss < self.best_loss:
self.logger.info('val_loss improved from %.4f to %.4f, saving model to path', self.best_loss, val_loss)
self.best_loss = val_loss
self.best_metric = val_metric
self.save_checkpoint(epoch, model_dir, is_best=True)
epoch_data = {'loss': train_loss,
self.metric_name: train_metric,
'val_loss': val_loss,
'val_'+self.metric_name: val_metric,
'best_val_loss': self.best_loss,
'best_val_'+self.metric_name: self.best_metric}
str1 = [' %s: %.4f ' % item for item in epoch_data.items()]
self.logger.info('Epoch %04d/%04d: %s', epoch + 1, n_epochs, '-'.join(str1))
if self.use_cuda:
torch.cuda.empty_cache()
| [
"logging.getLogger",
"torch.cuda.is_available",
"os.path.exists",
"logging.FileHandler",
"numpy.random.seed",
"os.path.isfile",
"shutil.copyfile",
"torch.save",
"logging.root.removeHandler",
"torch.cuda.empty_cache",
"torch.device",
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"os.ma... | [((545, 570), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (568, 570), False, 'import torch\n'), ((593, 643), 'torch.device', 'torch.device', (["('cuda:0' if self.use_cuda else 'cpu')"], {}), "('cuda:0' if self.use_cuda else 'cpu')\n", (605, 643), False, 'import torch\n'), ((865, 893), 'torch.manual_seed', 'torch.manual_seed', (['self.seed'], {}), '(self.seed)\n', (882, 893), False, 'import torch\n'), ((902, 935), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['self.seed'], {}), '(self.seed)\n', (924, 935), False, 'import torch\n'), ((1051, 1076), 'numpy.random.seed', 'np.random.seed', (['self.seed'], {}), '(self.seed)\n', (1065, 1076), True, 'import numpy as np\n'), ((1102, 1124), 'random.seed', 'random.seed', (['self.seed'], {}), '(self.seed)\n', (1113, 1124), False, 'import random\n'), ((3477, 3506), 'torch.save', 'torch.save', (['state', 'model_path'], {}), '(state, model_path)\n', (3487, 3506), False, 'import torch\n'), ((3890, 3917), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (3907, 3917), False, 'import logging\n'), ((4012, 4038), 'os.path.isfile', 'os.path.isfile', (['model_path'], {}), '(model_path)\n', (4026, 4038), False, 'import os\n'), ((4948, 5006), 'logging.FileHandler', 'logging.FileHandler', (['path'], {'mode': 'filemode', 'encoding': '"""utf-8"""'}), "(path, mode=filemode, encoding='utf-8')\n", (4967, 5006), False, 'import logging\n'), ((5089, 5146), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(name)s - %(message)s"""'], {}), "('%(asctime)s - %(name)s - %(message)s')\n", (5106, 5146), False, 'import logging\n'), ((974, 1011), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['self.seed'], {}), '(self.seed)\n', (1000, 1011), False, 'import torch\n'), ((3346, 3371), 'os.path.exists', 'os.path.exists', (['model_dir'], {}), '(model_dir)\n', (3360, 3371), False, 'import os\n'), ((3385, 3407), 'os.makedirs', 'os.makedirs', (['model_dir'], {}), '(model_dir)\n', (3396, 3407), False, 'import os\n'), ((3539, 3600), 'shutil.copyfile', 'shutil.copyfile', (['model_path', "(model_dir + 'model_best.pth.tar')"], {}), "(model_path, model_dir + 'model_best.pth.tar')\n", (3554, 3600), False, 'import shutil\n'), ((3839, 3866), 'os.path.abspath', 'os.path.abspath', (['model_path'], {}), '(model_path)\n', (3854, 3866), False, 'import os\n'), ((4744, 4779), 'logging.root.removeHandler', 'logging.root.removeHandler', (['handler'], {}), '(handler)\n', (4770, 4779), False, 'import logging\n'), ((5410, 5435), 'os.path.exists', 'os.path.exists', (['model_dir'], {}), '(model_dir)\n', (5424, 5435), False, 'import os\n'), ((5449, 5471), 'os.makedirs', 'os.makedirs', (['model_dir'], {}), '(model_dir)\n', (5460, 5471), False, 'import os\n'), ((5531, 5558), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (5548, 5558), False, 'import logging\n'), ((6739, 6763), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (6761, 6763), False, 'import torch\n'), ((2590, 2605), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2603, 2605), False, 'import torch\n'), ((4167, 4186), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (4179, 4186), False, 'import torch\n')] |
import numpy as np
from src.models.dnam.tabnet import TabNetModel
import torch
import lightgbm as lgb
import pandas as pd
import hydra
from omegaconf import DictConfig
from pytorch_lightning import (
LightningDataModule,
seed_everything,
)
from experiment.logging import log_hyperparameters
from pytorch_lightning.loggers import LightningLoggerBase
from src.utils import utils
from experiment.routines import eval_classification_sa
from typing import List
import wandb
from catboost import CatBoost
import xgboost as xgb
log = utils.get_logger(__name__)
def inference(config: DictConfig):
if "seed" in config:
seed_everything(config.seed)
if 'wandb' in config.logger:
config.logger.wandb["project"] = config.project_name
# Init lightning loggers
loggers: List[LightningLoggerBase] = []
if "logger" in config:
for _, lg_conf in config.logger.items():
if "_target_" in lg_conf:
log.info(f"Instantiating logger <{lg_conf._target_}>")
loggers.append(hydra.utils.instantiate(lg_conf))
log.info("Logging hyperparameters!")
log_hyperparameters(loggers, config)
# Init Lightning datamodule for test
log.info(f"Instantiating datamodule <{config.datamodule._target_}>")
datamodule: LightningDataModule = hydra.utils.instantiate(config.datamodule)
datamodule.setup()
feature_names = datamodule.get_feature_names()
class_names = datamodule.get_class_names()
outcome_name = datamodule.get_outcome_name()
df = datamodule.get_df()
df['pred'] = 0
X_test = df.loc[:, feature_names].values
y_test = df.loc[:, outcome_name].values
if config.model_type == "lightgbm":
model = lgb.Booster(model_file=config.ckpt_path)
y_test_pred_prob = model.predict(X_test)
elif config.model_type == "catboost":
model = CatBoost()
model.load_model(config.ckpt_path)
y_test_pred_prob = model.predict(X_test)
elif config.model_type == "xgboost":
model = xgb.Booster()
model.load_model(config.ckpt_path)
dmat_test = xgb.DMatrix(X_test, y_test, feature_names=feature_names)
y_test_pred_prob = model.predict(dmat_test)
elif config.model_type == "tabnet":
model = TabNetModel.load_from_checkpoint(checkpoint_path=f"{config.ckpt_path}")
model.produce_probabilities = True
model.eval()
model.freeze()
X_test_pt = torch.from_numpy(X_test)
y_test_pred_prob = model(X_test_pt).cpu().detach().numpy()
else:
raise ValueError(f"Unsupported sa_model")
y_test_pred = np.argmax(y_test_pred_prob, 1)
eval_classification_sa(config, class_names, y_test, y_test_pred, y_test_pred_prob, loggers, 'inference', is_log=True, is_save=True)
df.loc[:, "pred"] = y_test_pred
for cl_id, cl in enumerate(class_names):
df.loc[:, f"pred_prob_{cl_id}"] = y_test_pred_prob[:, cl_id]
predictions = df.loc[:, [outcome_name, "pred"] + [f"pred_prob_{cl_id}" for cl_id, cl in enumerate(class_names)]]
predictions.to_excel(f"predictions.xlsx", index=True)
for logger in loggers:
logger.save()
if 'wandb' in config.logger:
wandb.finish()
| [
"src.models.dnam.tabnet.TabNetModel.load_from_checkpoint",
"experiment.routines.eval_classification_sa",
"hydra.utils.instantiate",
"pytorch_lightning.seed_everything",
"lightgbm.Booster",
"numpy.argmax",
"torch.from_numpy",
"experiment.logging.log_hyperparameters",
"wandb.finish",
"xgboost.Booste... | [((537, 563), 'src.utils.utils.get_logger', 'utils.get_logger', (['__name__'], {}), '(__name__)\n', (553, 563), False, 'from src.utils import utils\n'), ((1128, 1164), 'experiment.logging.log_hyperparameters', 'log_hyperparameters', (['loggers', 'config'], {}), '(loggers, config)\n', (1147, 1164), False, 'from experiment.logging import log_hyperparameters\n'), ((1318, 1360), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['config.datamodule'], {}), '(config.datamodule)\n', (1341, 1360), False, 'import hydra\n'), ((2625, 2655), 'numpy.argmax', 'np.argmax', (['y_test_pred_prob', '(1)'], {}), '(y_test_pred_prob, 1)\n', (2634, 2655), True, 'import numpy as np\n'), ((2661, 2796), 'experiment.routines.eval_classification_sa', 'eval_classification_sa', (['config', 'class_names', 'y_test', 'y_test_pred', 'y_test_pred_prob', 'loggers', '"""inference"""'], {'is_log': '(True)', 'is_save': '(True)'}), "(config, class_names, y_test, y_test_pred,\n y_test_pred_prob, loggers, 'inference', is_log=True, is_save=True)\n", (2683, 2796), False, 'from experiment.routines import eval_classification_sa\n'), ((634, 662), 'pytorch_lightning.seed_everything', 'seed_everything', (['config.seed'], {}), '(config.seed)\n', (649, 662), False, 'from pytorch_lightning import LightningDataModule, seed_everything\n'), ((1725, 1765), 'lightgbm.Booster', 'lgb.Booster', ([], {'model_file': 'config.ckpt_path'}), '(model_file=config.ckpt_path)\n', (1736, 1765), True, 'import lightgbm as lgb\n'), ((3210, 3224), 'wandb.finish', 'wandb.finish', ([], {}), '()\n', (3222, 3224), False, 'import wandb\n'), ((1873, 1883), 'catboost.CatBoost', 'CatBoost', ([], {}), '()\n', (1881, 1883), False, 'from catboost import CatBoost\n'), ((2033, 2046), 'xgboost.Booster', 'xgb.Booster', ([], {}), '()\n', (2044, 2046), True, 'import xgboost as xgb\n'), ((2110, 2166), 'xgboost.DMatrix', 'xgb.DMatrix', (['X_test', 'y_test'], {'feature_names': 'feature_names'}), '(X_test, y_test, feature_names=feature_names)\n', (2121, 2166), True, 'import xgboost as xgb\n'), ((1048, 1080), 'hydra.utils.instantiate', 'hydra.utils.instantiate', (['lg_conf'], {}), '(lg_conf)\n', (1071, 1080), False, 'import hydra\n'), ((2275, 2346), 'src.models.dnam.tabnet.TabNetModel.load_from_checkpoint', 'TabNetModel.load_from_checkpoint', ([], {'checkpoint_path': 'f"""{config.ckpt_path}"""'}), "(checkpoint_path=f'{config.ckpt_path}')\n", (2307, 2346), False, 'from src.models.dnam.tabnet import TabNetModel\n'), ((2454, 2478), 'torch.from_numpy', 'torch.from_numpy', (['X_test'], {}), '(X_test)\n', (2470, 2478), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
"""
Created on 23/03/18
Author : <NAME>
Produces 2D maps of Lick indices
"""
from __future__ import print_function, division
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__),
'..')))
import numpy as np
import astropy.units as u
from astropy.table import Table, vstack, hstack
import context
from geomfov import get_geom
from mapplot import PlotVoronoiMaps
def make_tables(key="lick", targetSN=70, nsim=200, dataset="MUSE-DEEP",
redo=False, sigma=None):
""" Produces tables for maps of individual indices. """
# Reading name of the indices
indexfile = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', "tables", "spindex_CS.dat"))
# spindex = np.loadtxt(indexfile, usecols=(8,), dtype=str)
units_bin = np.loadtxt(indexfile, usecols=(7,))
units = np.where(units_bin, u.mag, u.AA)
sigma_str = "" if sigma is None else "_sigma{}".format(sigma)
for field in context.fields[::-1]:
wdir = os.path.join(context.data_dir, dataset, field )
output = os.path.join(wdir, "table_{}_{}_sn{}_nsim{}{}.fits".format(key,
field, targetSN, nsim, sigma_str))
if os.path.exists(output) and not redo:
continue
geom = get_geom(field, targetSN)
lick_dir = os.path.join(wdir, "lick" )
licktables = ["{}_sn{}_{}_nsim{}{}.fits".format(field, targetSN,
_, nsim, sigma_str) for _ in geom["BIN"]]
tables, idx = [], []
for i, table in enumerate(licktables):
datafile = os.path.join(lick_dir, table)
if not os.path.exists(datafile):
continue
else:
idx.append(i)
data = Table.read(datafile)
# Setting headers of the tables
names = np.array(data["name"].tolist(), dtype="U25")
nameserr = np.array(["{}_err".format(_) for _ in names],
dtype=names.dtype)
newnames = np.empty(2 * len(names), dtype="U25")
newnames[0::2] = names
newnames[1::2] = nameserr
# Reading data and making new table
values = data[key]
errors = data["{}err".format(key)]
data = np.empty(2 * len(values), dtype=values.dtype)
data[0::2] = values
data[1::2] = errors
newtab = Table(data, names=newnames)
tables.append(newtab)
if not tables:
continue
table = vstack(tables)
########################################################################
# Setting units
units2x = map(list, zip(units, units))
units = [item for sublist in units2x for item in sublist]
for unit, col in zip(units, table.colnames):
table[col] = table[col] * unit
########################################################################
table = hstack([geom[idx], table])
table.write(output, format="fits", overwrite=True)
def lick_maps_individual(key="lick", targetSN=70, dataset="MUSE-DEEP",
nsim=200, sigma=None):
""" Produces maps of Lick indices individually. """
sigma_str = "" if sigma is None else "_sigma{}".format(sigma)
tables = []
for field in context.fields:
tables.append(os.path.join(context.data_dir, dataset, field,
"table_{}_{}_sn{}_nsim{}{}.fits".format(key, field, targetSN,
nsim, sigma_str)))
idx = [i for i,_ in enumerate(tables) if os.path.exists(_)]
fields = [context.fields[i] for i in idx]
tables = [tables[i] for i in idx]
data = [Table.read(_) for _ in tables]
imin, imax = 6, 32
############################################################################
# Setting columns to be used
indexfile = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', "tables", "spindex_CS.dat"))
columns = np.loadtxt(indexfile, usecols=(8,), dtype=str)[imin:imax]
###########################################################################
# Calculate limits and correct labels
lims, labels = [], []
for col in columns:
a = np.concatenate([data[i][col] for i in idx])
q1, q2 = np.percentile(a[np.isfinite(a)], [15, 95])
lims.append([q1, q2])
has_muse = True if "_muse" in col else False
label = col.replace("_muse", "")
label = "{}$".format(label.replace("_", "$_")) if "_" in label else \
label
label = label.replace("_beta", "\\beta")
label = label.replace("_D", " D")
label = "{}$^{{\\rm MUSE}}$".format(label) if has_muse else label
labels.append(label)
###########################################################################
cmaps = len(columns) * ["YlOrBr"]
units_bin = np.loadtxt(indexfile, usecols=(7,))
units = np.where(units_bin, "mag", "\AA")[imin:imax]
cb_fmts = ["%.2f" if unit =="\AA" else "%.3f" for unit in units]
labels = ["{0} ({1})".format(x,y) for x,y in zip(labels, units)]
pvm = PlotVoronoiMaps(data, columns, labels=labels, lims=lims,
cmaps=cmaps, cb_fmts=cb_fmts)
pvm.plot(sigma=sigma)
return
if __name__ == "__main__":
sigmas = [None, 300]
for sigma in sigmas:
licktable = make_tables(redo=False, sigma=sigma)
lick_maps_individual(sigma=sigma)
| [
"geomfov.get_geom",
"os.path.exists",
"astropy.table.hstack",
"astropy.table.Table",
"numpy.where",
"os.path.join",
"mapplot.PlotVoronoiMaps",
"os.path.dirname",
"numpy.isfinite",
"numpy.concatenate",
"astropy.table.vstack",
"numpy.loadtxt",
"astropy.table.Table.read"
] | [((862, 897), 'numpy.loadtxt', 'np.loadtxt', (['indexfile'], {'usecols': '(7,)'}), '(indexfile, usecols=(7,))\n', (872, 897), True, 'import numpy as np\n'), ((910, 942), 'numpy.where', 'np.where', (['units_bin', 'u.mag', 'u.AA'], {}), '(units_bin, u.mag, u.AA)\n', (918, 942), True, 'import numpy as np\n'), ((4992, 5027), 'numpy.loadtxt', 'np.loadtxt', (['indexfile'], {'usecols': '(7,)'}), '(indexfile, usecols=(7,))\n', (5002, 5027), True, 'import numpy as np\n'), ((5233, 5323), 'mapplot.PlotVoronoiMaps', 'PlotVoronoiMaps', (['data', 'columns'], {'labels': 'labels', 'lims': 'lims', 'cmaps': 'cmaps', 'cb_fmts': 'cb_fmts'}), '(data, columns, labels=labels, lims=lims, cmaps=cmaps,\n cb_fmts=cb_fmts)\n', (5248, 5323), False, 'from mapplot import PlotVoronoiMaps\n'), ((1063, 1109), 'os.path.join', 'os.path.join', (['context.data_dir', 'dataset', 'field'], {}), '(context.data_dir, dataset, field)\n', (1075, 1109), False, 'import os\n'), ((1341, 1366), 'geomfov.get_geom', 'get_geom', (['field', 'targetSN'], {}), '(field, targetSN)\n', (1349, 1366), False, 'from geomfov import get_geom\n'), ((1386, 1412), 'os.path.join', 'os.path.join', (['wdir', '"""lick"""'], {}), "(wdir, 'lick')\n", (1398, 1412), False, 'import os\n'), ((2598, 2612), 'astropy.table.vstack', 'vstack', (['tables'], {}), '(tables)\n', (2604, 2612), False, 'from astropy.table import Table, vstack, hstack\n'), ((3024, 3050), 'astropy.table.hstack', 'hstack', (['[geom[idx], table]'], {}), '([geom[idx], table])\n', (3030, 3050), False, 'from astropy.table import Table, vstack, hstack\n'), ((3775, 3788), 'astropy.table.Table.read', 'Table.read', (['_'], {}), '(_)\n', (3785, 3788), False, 'from astropy.table import Table, vstack, hstack\n'), ((4081, 4127), 'numpy.loadtxt', 'np.loadtxt', (['indexfile'], {'usecols': '(8,)', 'dtype': 'str'}), '(indexfile, usecols=(8,), dtype=str)\n', (4091, 4127), True, 'import numpy as np\n'), ((4323, 4366), 'numpy.concatenate', 'np.concatenate', (['[data[i][col] for i in idx]'], {}), '([data[i][col] for i in idx])\n', (4337, 4366), True, 'import numpy as np\n'), ((5040, 5074), 'numpy.where', 'np.where', (['units_bin', '"""mag"""', '"""\\\\AA"""'], {}), "(units_bin, 'mag', '\\\\AA')\n", (5048, 5074), True, 'import numpy as np\n'), ((225, 250), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (240, 250), False, 'import os\n'), ((721, 746), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (736, 746), False, 'import os\n'), ((1268, 1290), 'os.path.exists', 'os.path.exists', (['output'], {}), '(output)\n', (1282, 1290), False, 'import os\n'), ((1649, 1678), 'os.path.join', 'os.path.join', (['lick_dir', 'table'], {}), '(lick_dir, table)\n', (1661, 1678), False, 'import os\n'), ((1816, 1836), 'astropy.table.Table.read', 'Table.read', (['datafile'], {}), '(datafile)\n', (1826, 1836), False, 'from astropy.table import Table, vstack, hstack\n'), ((2476, 2503), 'astropy.table.Table', 'Table', (['data'], {'names': 'newnames'}), '(data, names=newnames)\n', (2481, 2503), False, 'from astropy.table import Table, vstack, hstack\n'), ((3660, 3677), 'os.path.exists', 'os.path.exists', (['_'], {}), '(_)\n', (3674, 3677), False, 'import os\n'), ((4005, 4030), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (4020, 4030), False, 'import os\n'), ((1698, 1722), 'os.path.exists', 'os.path.exists', (['datafile'], {}), '(datafile)\n', (1712, 1722), False, 'import os\n'), ((4400, 4414), 'numpy.isfinite', 'np.isfinite', (['a'], {}), '(a)\n', (4411, 4414), True, 'import numpy as np\n')] |
import os
import time
import random
import numpy as np
import datasets
# CHANGE THESE PATHS TO PATHS OF THE .mat DATASETS DESCRIBED IN THE PAPER
mat_path_coil20 = os.path.expanduser("~/Documents/datasets/elm/coil20.mat")
mat_path_g50c = os.path.expanduser("~/Documents/datasets/elm/g50c.mat")
mat_path_uspst = os.path.expanduser("~/Documents/datasets/elm/uspst.mat")
# SET UNDESIRED DATASETS TO FALSE
# e.g to not generate a json file for them
do_coil20 = True
do_coil20b = True
do_g50c = True
do_uspst = True
do_uspstb = True
# place to store generated indices (folds)
# support for changing this might be flaky, so it is best to leave it as is.
json_output_directory = os.path.expanduser("./idx_datasets/")
def gen():
seed = 32
random.seed(seed)
np.random.seed(seed)
print("Generating k-fold partitions")
t = str(time.time()).split('.')[0]
unique_subdir = os.path.join(json_output_directory, t)
os.mkdir(unique_subdir)
print("Storing json files in directory {}".format(unique_subdir))
# coil20
if do_coil20:
d = datasets.gen_partitions(mat_path_coil20, size=1440, L=40, U=1000, V=40, T=360)
finalpath = os.path.join(unique_subdir, "coil20.json")
datasets.dump_json(d, finalpath)
# coil20b, strictly speaking generated the same way as coil20
# (the binarization is done when the coil20 set is loaded for use with
# the model, as the indices are the same with just the classes changed
# to [1,2] -- but using a different shuffle seemed appropriate)
if do_coil20b:
d = datasets.gen_partitions(mat_path_coil20, size=1440, L=40, U=1000, V=40, T=360)
finalpath = os.path.join(unique_subdir, "coil20b.json")
datasets.dump_json(d, finalpath)
# G50C
if do_g50c:
d = datasets.gen_partitions(mat_path_g50c, size=550, L=50, U=314, V=50, T=136)
finalpath = os.path.join(unique_subdir, "g50c.json")
datasets.dump_json(d, finalpath)
# USPST
if do_uspst:
d = datasets.gen_partitions(mat_path_uspst, size=2007, L=50, U=1409, V=50, T=498)
finalpath = os.path.join(unique_subdir, "uspst.json")
datasets.dump_json(d, finalpath)
# USPST(B)
if do_uspstb:
d = datasets.gen_partitions(mat_path_uspst, size=2007, L=50, U=1409, V=50, T=498)
finalpath = os.path.join(unique_subdir, "uspstb.json")
datasets.dump_json(d, finalpath)
if __name__ == '__main__':
gen() | [
"os.path.join",
"random.seed",
"os.mkdir",
"numpy.random.seed",
"datasets.gen_partitions",
"datasets.dump_json",
"time.time",
"os.path.expanduser"
] | [((165, 222), 'os.path.expanduser', 'os.path.expanduser', (['"""~/Documents/datasets/elm/coil20.mat"""'], {}), "('~/Documents/datasets/elm/coil20.mat')\n", (183, 222), False, 'import os\n'), ((239, 294), 'os.path.expanduser', 'os.path.expanduser', (['"""~/Documents/datasets/elm/g50c.mat"""'], {}), "('~/Documents/datasets/elm/g50c.mat')\n", (257, 294), False, 'import os\n'), ((312, 368), 'os.path.expanduser', 'os.path.expanduser', (['"""~/Documents/datasets/elm/uspst.mat"""'], {}), "('~/Documents/datasets/elm/uspst.mat')\n", (330, 368), False, 'import os\n'), ((676, 713), 'os.path.expanduser', 'os.path.expanduser', (['"""./idx_datasets/"""'], {}), "('./idx_datasets/')\n", (694, 713), False, 'import os\n'), ((744, 761), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (755, 761), False, 'import random\n'), ((766, 786), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (780, 786), True, 'import numpy as np\n'), ((890, 928), 'os.path.join', 'os.path.join', (['json_output_directory', 't'], {}), '(json_output_directory, t)\n', (902, 928), False, 'import os\n'), ((933, 956), 'os.mkdir', 'os.mkdir', (['unique_subdir'], {}), '(unique_subdir)\n', (941, 956), False, 'import os\n'), ((1071, 1149), 'datasets.gen_partitions', 'datasets.gen_partitions', (['mat_path_coil20'], {'size': '(1440)', 'L': '(40)', 'U': '(1000)', 'V': '(40)', 'T': '(360)'}), '(mat_path_coil20, size=1440, L=40, U=1000, V=40, T=360)\n', (1094, 1149), False, 'import datasets\n'), ((1170, 1212), 'os.path.join', 'os.path.join', (['unique_subdir', '"""coil20.json"""'], {}), "(unique_subdir, 'coil20.json')\n", (1182, 1212), False, 'import os\n'), ((1221, 1253), 'datasets.dump_json', 'datasets.dump_json', (['d', 'finalpath'], {}), '(d, finalpath)\n', (1239, 1253), False, 'import datasets\n'), ((1572, 1650), 'datasets.gen_partitions', 'datasets.gen_partitions', (['mat_path_coil20'], {'size': '(1440)', 'L': '(40)', 'U': '(1000)', 'V': '(40)', 'T': '(360)'}), '(mat_path_coil20, size=1440, L=40, U=1000, V=40, T=360)\n', (1595, 1650), False, 'import datasets\n'), ((1671, 1714), 'os.path.join', 'os.path.join', (['unique_subdir', '"""coil20b.json"""'], {}), "(unique_subdir, 'coil20b.json')\n", (1683, 1714), False, 'import os\n'), ((1723, 1755), 'datasets.dump_json', 'datasets.dump_json', (['d', 'finalpath'], {}), '(d, finalpath)\n', (1741, 1755), False, 'import datasets\n'), ((1796, 1870), 'datasets.gen_partitions', 'datasets.gen_partitions', (['mat_path_g50c'], {'size': '(550)', 'L': '(50)', 'U': '(314)', 'V': '(50)', 'T': '(136)'}), '(mat_path_g50c, size=550, L=50, U=314, V=50, T=136)\n', (1819, 1870), False, 'import datasets\n'), ((1891, 1931), 'os.path.join', 'os.path.join', (['unique_subdir', '"""g50c.json"""'], {}), "(unique_subdir, 'g50c.json')\n", (1903, 1931), False, 'import os\n'), ((1940, 1972), 'datasets.dump_json', 'datasets.dump_json', (['d', 'finalpath'], {}), '(d, finalpath)\n', (1958, 1972), False, 'import datasets\n'), ((2015, 2092), 'datasets.gen_partitions', 'datasets.gen_partitions', (['mat_path_uspst'], {'size': '(2007)', 'L': '(50)', 'U': '(1409)', 'V': '(50)', 'T': '(498)'}), '(mat_path_uspst, size=2007, L=50, U=1409, V=50, T=498)\n', (2038, 2092), False, 'import datasets\n'), ((2113, 2154), 'os.path.join', 'os.path.join', (['unique_subdir', '"""uspst.json"""'], {}), "(unique_subdir, 'uspst.json')\n", (2125, 2154), False, 'import os\n'), ((2163, 2195), 'datasets.dump_json', 'datasets.dump_json', (['d', 'finalpath'], {}), '(d, finalpath)\n', (2181, 2195), False, 'import datasets\n'), ((2242, 2319), 'datasets.gen_partitions', 'datasets.gen_partitions', (['mat_path_uspst'], {'size': '(2007)', 'L': '(50)', 'U': '(1409)', 'V': '(50)', 'T': '(498)'}), '(mat_path_uspst, size=2007, L=50, U=1409, V=50, T=498)\n', (2265, 2319), False, 'import datasets\n'), ((2340, 2382), 'os.path.join', 'os.path.join', (['unique_subdir', '"""uspstb.json"""'], {}), "(unique_subdir, 'uspstb.json')\n", (2352, 2382), False, 'import os\n'), ((2391, 2423), 'datasets.dump_json', 'datasets.dump_json', (['d', 'finalpath'], {}), '(d, finalpath)\n', (2409, 2423), False, 'import datasets\n'), ((843, 854), 'time.time', 'time.time', ([], {}), '()\n', (852, 854), False, 'import time\n')] |
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
def plot_time_series(x: np.ndarray, title=None) -> None:
sns.set(font_scale=1.5)
sns.set_style("white")
t = np.arange(start=0, stop=x.shape[0])
plt.plot(t, x, linestyle='-', marker='o')
plt.title(title)
plt.xlabel(r'$t$')
plt.ylabel(r'$x_t$')
plt.show()
| [
"seaborn.set",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"seaborn.set_style",
"matplotlib.pyplot.title",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((136, 159), 'seaborn.set', 'sns.set', ([], {'font_scale': '(1.5)'}), '(font_scale=1.5)\n', (143, 159), True, 'import seaborn as sns\n'), ((164, 186), 'seaborn.set_style', 'sns.set_style', (['"""white"""'], {}), "('white')\n", (177, 186), True, 'import seaborn as sns\n'), ((195, 230), 'numpy.arange', 'np.arange', ([], {'start': '(0)', 'stop': 'x.shape[0]'}), '(start=0, stop=x.shape[0])\n', (204, 230), True, 'import numpy as np\n'), ((235, 276), 'matplotlib.pyplot.plot', 'plt.plot', (['t', 'x'], {'linestyle': '"""-"""', 'marker': '"""o"""'}), "(t, x, linestyle='-', marker='o')\n", (243, 276), True, 'import matplotlib.pyplot as plt\n'), ((281, 297), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (290, 297), True, 'import matplotlib.pyplot as plt\n'), ((302, 319), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$t$"""'], {}), "('$t$')\n", (312, 319), True, 'import matplotlib.pyplot as plt\n'), ((325, 344), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$x_t$"""'], {}), "('$x_t$')\n", (335, 344), True, 'import matplotlib.pyplot as plt\n'), ((350, 360), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (358, 360), True, 'import matplotlib.pyplot as plt\n')] |
'''
测试YOLO模型
'''
import cv2
import scipy
import scipy.misc
import numpy as np
import random
from math import *
def get_transform(center, scale, res, rot=0):
"""
General image processing functions
"""
# Generate transformation matrix
h = 200 * scale
t = np.zeros((3, 3))
t[0, 0] = float(res[1]) / h
t[1, 1] = float(res[0]) / h
t[0, 2] = res[1] * (-float(center[0]) / h + .5)
t[1, 2] = res[0] * (-float(center[1]) / h + .5)
t[2, 2] = 1
if not rot == 0:
rot = -rot # To match direction of rotation from cropping
rot_mat = np.zeros((3, 3))
rot_rad = rot * np.pi / 180
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
rot_mat[0, :2] = [cs, -sn]
rot_mat[1, :2] = [sn, cs]
rot_mat[2, 2] = 1
# Need to rotate around center
t_mat = np.eye(3)
t_mat[0, 2] = -res[1] / 2
t_mat[1, 2] = -res[0] / 2
t_inv = t_mat.copy()
t_inv[:2, 2] *= -1
t = np.dot(t_inv, np.dot(rot_mat, np.dot(t_mat, t)))
return t
def transform(pt, center, scale, res, invert=0, rot=0):
# Transform pixel location to different reference
t = get_transform(center, scale, res, rot=rot)
if invert:
t = np.linalg.inv(t)
new_pt = np.array([pt[0] - 1, pt[1] - 1, 1.]).T
new_pt = np.dot(t, new_pt)
return new_pt[:2].astype(int) + 1
def crop(img, center, scale, res, rot=0):
# Preprocessing for efficient cropping
ht, wd = img.shape[0], img.shape[1]
sf = scale * 200.0 / res[0]
if sf < 2:
sf = 1
else:
new_size = int(np.math.floor(max(ht, wd) / sf))
new_ht = int(np.math.floor(ht / sf))
new_wd = int(np.math.floor(wd / sf))
img = scipy.misc.imresize(img, [new_ht, new_wd])
center = center * 1.0 / sf
scale = scale / sf
# Upper left point
ul = np.array(transform([0, 0], center, scale, res, invert=1))
# Bottom right point
br = np.array(transform(res, center, scale, res, invert=1))
# Padding so that when rotated proper amount of context is included
pad = int(np.linalg.norm(br - ul) / 2 - float(br[1] - ul[1]) / 2)
if not rot == 0:
ul -= pad
br += pad
new_shape = [br[1] - ul[1], br[0] - ul[0]]
if len(img.shape) > 2:
new_shape += [img.shape[2]]
new_img = np.zeros(new_shape)
# Range to fill new array
new_x = max(0, -ul[0]), min(br[0], len(img[0])) - ul[0]
new_y = max(0, -ul[1]), min(br[1], len(img)) - ul[1]
# Range to sample from original image
old_x = max(0, ul[0]), min(len(img[0]), br[0])
old_y = max(0, ul[1]), min(len(img), br[1])
new_img[new_y[0]:new_y[1], new_x[0]:new_x[1]] = img[old_y[0]:old_y[1], old_x[0]:old_x[1]]
if not rot == 0:
# Remove padding
new_img = scipy.misc.imrotate(new_img, rot)
new_img = new_img[pad:-pad, pad:-pad]
new_img = scipy.misc.imresize(new_img, res)
return new_img
def rotate(img,degree):
height,width=img.shape[:2]
heightNew=int(width*fabs(sin(radians(degree)))+height*fabs(cos(radians(degree))))
widthNew=int(height*fabs(sin(radians(degree)))+width*fabs(cos(radians(degree))))
matRotation=cv2.getRotationMatrix2D((width/2,height/2),degree,1)
matRotation[0,2] +=(widthNew-width)/2 #重点在这步,目前不懂为什么加这步
matRotation[1,2] +=(heightNew-height)/2 #重点在这步
imgRotation=cv2.warpAffine(img,matRotation,(widthNew,heightNew),borderValue=(255,255,255))
return imgRotation
if __name__ == '__main__':
'''
with open('keras-yolo3/train_regression.txt','r') as f:
i = random.randint(0,250)
for j in range(i):
f.readline()
'''
src = cv2.imread('data/rot_images/201908_10470622_19-S02012_2000764_1564567176835_0.jpg')
cv2.namedWindow('input_image', cv2.WINDOW_AUTOSIZE)
org_shape = src.shape
#center = ((435+109)/2,(41+362/2))
cv2.rectangle(src,(172,196),(390,247),(0,255,0),2)
#a=(85.0, 80.5) *1.0 /2.0693979933110365
#print(a)
#src = rotate(src,40)
#src = src[81:505,24:451]
#new_shape = src.shape
#center = (src.shape[1]/2,src.shape[0]/2)
#scale = max(org_shape[0]/new_shape[0],org_shape[1]/new_shape[1])
#theta = 0.509*180
#src = crop(src,center,2,(416,416),-theta)
cv2.imshow('input_image', src)
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
"cv2.rectangle",
"cv2.imshow",
"numpy.array",
"cv2.destroyAllWindows",
"scipy.misc.imresize",
"numpy.linalg.norm",
"numpy.sin",
"numpy.dot",
"cv2.waitKey",
"numpy.eye",
"cv2.warpAffine",
"numpy.cos",
"cv2.getRotationMatrix2D",
"cv2.imread",
"cv2.namedWindow",
"scipy.misc.imrotate",
"... | [((279, 295), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (287, 295), True, 'import numpy as np\n'), ((1319, 1336), 'numpy.dot', 'np.dot', (['t', 'new_pt'], {}), '(t, new_pt)\n', (1325, 1336), True, 'import numpy as np\n'), ((2344, 2363), 'numpy.zeros', 'np.zeros', (['new_shape'], {}), '(new_shape)\n', (2352, 2363), True, 'import numpy as np\n'), ((2907, 2940), 'scipy.misc.imresize', 'scipy.misc.imresize', (['new_img', 'res'], {}), '(new_img, res)\n', (2926, 2940), False, 'import scipy\n'), ((3204, 3263), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(width / 2, height / 2)', 'degree', '(1)'], {}), '((width / 2, height / 2), degree, 1)\n', (3227, 3263), False, 'import cv2\n'), ((3388, 3477), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'matRotation', '(widthNew, heightNew)'], {'borderValue': '(255, 255, 255)'}), '(img, matRotation, (widthNew, heightNew), borderValue=(255, \n 255, 255))\n', (3402, 3477), False, 'import cv2\n'), ((3692, 3780), 'cv2.imread', 'cv2.imread', (['"""data/rot_images/201908_10470622_19-S02012_2000764_1564567176835_0.jpg"""'], {}), "(\n 'data/rot_images/201908_10470622_19-S02012_2000764_1564567176835_0.jpg')\n", (3702, 3780), False, 'import cv2\n'), ((3780, 3831), 'cv2.namedWindow', 'cv2.namedWindow', (['"""input_image"""', 'cv2.WINDOW_AUTOSIZE'], {}), "('input_image', cv2.WINDOW_AUTOSIZE)\n", (3795, 3831), False, 'import cv2\n'), ((3901, 3959), 'cv2.rectangle', 'cv2.rectangle', (['src', '(172, 196)', '(390, 247)', '(0, 255, 0)', '(2)'], {}), '(src, (172, 196), (390, 247), (0, 255, 0), 2)\n', (3914, 3959), False, 'import cv2\n'), ((4289, 4319), 'cv2.imshow', 'cv2.imshow', (['"""input_image"""', 'src'], {}), "('input_image', src)\n", (4299, 4319), False, 'import cv2\n'), ((4324, 4338), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (4335, 4338), False, 'import cv2\n'), ((4343, 4366), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4364, 4366), False, 'import cv2\n'), ((586, 602), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (594, 602), True, 'import numpy as np\n'), ((839, 848), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (845, 848), True, 'import numpy as np\n'), ((1237, 1253), 'numpy.linalg.inv', 'np.linalg.inv', (['t'], {}), '(t)\n', (1250, 1253), True, 'import numpy as np\n'), ((1267, 1304), 'numpy.array', 'np.array', (['[pt[0] - 1, pt[1] - 1, 1.0]'], {}), '([pt[0] - 1, pt[1] - 1, 1.0])\n', (1275, 1304), True, 'import numpy as np\n'), ((1734, 1776), 'scipy.misc.imresize', 'scipy.misc.imresize', (['img', '[new_ht, new_wd]'], {}), '(img, [new_ht, new_wd])\n', (1753, 1776), False, 'import scipy\n'), ((2812, 2845), 'scipy.misc.imrotate', 'scipy.misc.imrotate', (['new_img', 'rot'], {}), '(new_img, rot)\n', (2831, 2845), False, 'import scipy\n'), ((656, 671), 'numpy.sin', 'np.sin', (['rot_rad'], {}), '(rot_rad)\n', (662, 671), True, 'import numpy as np\n'), ((673, 688), 'numpy.cos', 'np.cos', (['rot_rad'], {}), '(rot_rad)\n', (679, 688), True, 'import numpy as np\n'), ((1651, 1673), 'numpy.math.floor', 'np.math.floor', (['(ht / sf)'], {}), '(ht / sf)\n', (1664, 1673), True, 'import numpy as np\n'), ((1696, 1718), 'numpy.math.floor', 'np.math.floor', (['(wd / sf)'], {}), '(wd / sf)\n', (1709, 1718), True, 'import numpy as np\n'), ((1015, 1031), 'numpy.dot', 'np.dot', (['t_mat', 't'], {}), '(t_mat, t)\n', (1021, 1031), True, 'import numpy as np\n'), ((2106, 2129), 'numpy.linalg.norm', 'np.linalg.norm', (['(br - ul)'], {}), '(br - ul)\n', (2120, 2129), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2017 by University of Kassel and Fraunhofer Institute for Wind Energy and
# Energy System Technology (IWES), Kassel. All rights reserved. Use of this source code is governed
# by a BSD-style license that can be found in the LICENSE file.
import numpy as np
from pandapower.auxiliary import _select_is_elements_numba, _add_ppc_options
from pandapower.pd2ppc import _pd2ppc
from pandapower.estimation.idx_bus import *
from pandapower.estimation.idx_brch import *
from pandapower.idx_brch import branch_cols
from pandapower.idx_bus import bus_cols
from pandapower.pf.run_newton_raphson_pf import _run_dc_pf
from pandapower.build_branch import get_is_lines
def _init_ppc(net, v_start, delta_start, calculate_voltage_angles):
# initialize ppc voltages
net.res_bus.vm_pu = v_start
net.res_bus.vm_pu[net.bus.index[net.bus.in_service == False]] = np.nan
net.res_bus.va_degree = delta_start
# select elements in service and convert pandapower ppc to ppc
net._options = {}
_add_ppc_options(net, check_connectivity=False, init="results", trafo_model="t",
copy_constraints_to_ppc=False, mode="pf", enforce_q_lims=False,
calculate_voltage_angles=calculate_voltage_angles, r_switch=0.0,
recycle=dict(_is_elements=False, ppc=False, Ybus=False))
net["_is_elements"] = _select_is_elements_numba(net)
ppc, ppci = _pd2ppc(net)
# do dc power flow for phase shifting transformers
if np.any(net.trafo.shift_degree):
vm_backup = ppci["bus"][:, 7].copy()
ppci["bus"][:, [2, 3]] = 0.
ppci = _run_dc_pf(ppci)
ppci["bus"][:, 7] = vm_backup
return ppc, ppci
def _add_measurements_to_ppc(net, mapping_table, ppci, s_ref):
"""
Add pandapower measurements to the ppci structure by adding new columns
:param net: pandapower net
:param mapping_table: mapping table pd->ppc
:param ppci: generated ppci
:param s_ref: reference power in W
:return: ppc with added columns
"""
# set measurements for ppc format
# add 9 columns to ppc[bus] for Vm, Vm std dev, P, P std dev, Q, Q std dev,
# pandapower measurement indices V, P, Q
bus_append = np.full((ppci["bus"].shape[0], bus_cols_se), np.nan, dtype=ppci["bus"].dtype)
v_measurements = net.measurement[(net.measurement.type == "v")
& (net.measurement.element_type == "bus")]
if len(v_measurements):
bus_positions = mapping_table[v_measurements.bus.values.astype(int)]
bus_append[bus_positions, VM] = v_measurements.value.values
bus_append[bus_positions, VM_STD] = v_measurements.std_dev.values
bus_append[bus_positions, VM_IDX] = v_measurements.index.values
p_measurements = net.measurement[(net.measurement.type == "p")
& (net.measurement.element_type == "bus")]
if len(p_measurements):
bus_positions = mapping_table[p_measurements.bus.values.astype(int)]
bus_append[bus_positions, P] = p_measurements.value.values * 1e3 / s_ref
bus_append[bus_positions, P_STD] = p_measurements.std_dev.values * 1e3 / s_ref
bus_append[bus_positions, P_IDX] = p_measurements.index.values
q_measurements = net.measurement[(net.measurement.type == "q")
& (net.measurement.element_type == "bus")]
if len(q_measurements):
bus_positions = mapping_table[q_measurements.bus.values.astype(int)]
bus_append[bus_positions, Q] = q_measurements.value.values * 1e3 / s_ref
bus_append[bus_positions, Q_STD] = q_measurements.std_dev.values * 1e3 / s_ref
bus_append[bus_positions, Q_IDX] = q_measurements.index.values
# add virtual measurements for artificial buses, which were created because
# of an open line switch. p/q are 0. and std dev is 1. (small value)
new_in_line_buses = np.setdiff1d(np.arange(ppci["bus"].shape[0]),
mapping_table[mapping_table >= 0])
bus_append[new_in_line_buses, 2] = 0.
bus_append[new_in_line_buses, 3] = 1.
bus_append[new_in_line_buses, 4] = 0.
bus_append[new_in_line_buses, 5] = 1.
# add 15 columns to mpc[branch] for Im_from, Im_from std dev, Im_to, Im_to std dev,
# P_from, P_from std dev, P_to, P_to std dev, Q_from, Q_from std dev, Q_to, Q_to std dev,
# pandapower measurement index I, P, Q
branch_append = np.full((ppci["branch"].shape[0], branch_cols_se),
np.nan, dtype=ppci["branch"].dtype)
i_measurements = net.measurement[(net.measurement.type == "i")
& (net.measurement.element_type == "line")]
if len(i_measurements):
meas_from = i_measurements[(i_measurements.bus.values.astype(int) ==
net.line.from_bus[i_measurements.element]).values]
meas_to = i_measurements[(i_measurements.bus.values.astype(int) ==
net.line.to_bus[i_measurements.element]).values]
ix_from = meas_from.element.values.astype(int)
ix_to = meas_to.element.values.astype(int)
i_a_to_pu_from = (net.bus.vn_kv[meas_from.bus] * 1e3 / s_ref).values
i_a_to_pu_to = (net.bus.vn_kv[meas_to.bus] * 1e3 / s_ref).values
branch_append[ix_from, IM_FROM] = meas_from.value.values * i_a_to_pu_from
branch_append[ix_from, IM_FROM_STD] = meas_from.std_dev.values * i_a_to_pu_from
branch_append[ix_to, IM_TO] = meas_to.value.values * i_a_to_pu_to
branch_append[ix_to, IM_TO_STD] = meas_to.std_dev.values * i_a_to_pu_to
branch_append[meas_from.element.values.astype(int), IM_FROM_IDX] = meas_from.index.values
branch_append[meas_to.element.values.astype(int), IM_TO_IDX] = meas_to.index.values
p_measurements = net.measurement[(net.measurement.type == "p")
& (net.measurement.element_type == "line")]
if len(p_measurements):
meas_from = p_measurements[(p_measurements.bus.values.astype(int) ==
net.line.from_bus[p_measurements.element]).values]
meas_to = p_measurements[(p_measurements.bus.values.astype(int) ==
net.line.to_bus[p_measurements.element]).values]
ix_from = meas_from.element.values.astype(int)
ix_to = meas_to.element.values.astype(int)
branch_append[ix_from, P_FROM] = meas_from.value.values * 1e3 / s_ref
branch_append[ix_from, P_FROM_STD] = meas_from.std_dev.values * 1e3 / s_ref
branch_append[ix_to, P_TO] = meas_to.value.values * 1e3 / s_ref
branch_append[ix_to, P_TO_STD] = meas_to.std_dev.values * 1e3 / s_ref
branch_append[meas_from.element.values.astype(int), P_FROM_IDX] = meas_from.index.values
branch_append[meas_to.element.values.astype(int), P_TO_IDX] = meas_to.index.values
q_measurements = net.measurement[(net.measurement.type == "q")
& (net.measurement.element_type == "line")]
if len(q_measurements):
meas_from = q_measurements[(q_measurements.bus.values.astype(int) ==
net.line.from_bus[q_measurements.element]).values]
meas_to = q_measurements[(q_measurements.bus.values.astype(int) ==
net.line.to_bus[q_measurements.element]).values]
ix_from = meas_from.element.values.astype(int)
ix_to = meas_to.element.values.astype(int)
branch_append[ix_from, Q_FROM] = meas_from.value.values * 1e3 / s_ref
branch_append[ix_from, Q_FROM_STD] = meas_from.std_dev.values * 1e3 / s_ref
branch_append[ix_to, Q_TO] = meas_to.value.values * 1e3 / s_ref
branch_append[ix_to, Q_TO_STD] = meas_to.std_dev.values * 1e3 / s_ref
branch_append[meas_from.element.values.astype(int), Q_FROM_IDX] = meas_from.index.values
branch_append[meas_to.element.values.astype(int), Q_TO_IDX] = meas_to.index.values
# determine number of lines in ppci["branch"]
# out of service lines and lines with open switches at both ends are not in the ppci
_is_elements = net["_is_elements"]
if "line" not in _is_elements:
get_is_lines(net)
lines_is = _is_elements['line']
bus_is_idx = _is_elements['bus_is_idx']
slidx = (net["switch"]["closed"].values == 0) \
& (net["switch"]["et"].values == "l") \
& (np.in1d(net["switch"]["element"].values, lines_is.index)) \
& (np.in1d(net["switch"]["bus"].values, bus_is_idx))
ppci_lines = len(lines_is) - np.count_nonzero(slidx)
i_tr_measurements = net.measurement[(net.measurement.type == "i")
& (net.measurement.element_type == "transformer")]
if len(i_tr_measurements):
meas_from = i_tr_measurements[(i_tr_measurements.bus.values.astype(int) ==
net.trafo.hv_bus[i_tr_measurements.element]).values]
meas_to = i_tr_measurements[(i_tr_measurements.bus.values.astype(int) ==
net.trafo.lv_bus[i_tr_measurements.element]).values]
ix_from = ppci_lines + meas_from.element.values.astype(int)
ix_to = ppci_lines + meas_to.element.values.astype(int)
i_a_to_pu_from = (net.bus.vn_kv[meas_from.bus] * 1e3 / s_ref).values
i_a_to_pu_to = (net.bus.vn_kv[meas_to.bus] * 1e3 / s_ref).values
branch_append[ix_from, IM_FROM] = meas_from.value.values * i_a_to_pu_from
branch_append[ix_from, IM_FROM_STD] = meas_from.std_dev.values * i_a_to_pu_from
branch_append[ix_to, IM_TO] = meas_to.value.values * i_a_to_pu_to
branch_append[ix_to, IM_TO_STD] = meas_to.std_dev.values * i_a_to_pu_to
branch_append[meas_from.element.values.astype(int), IM_FROM_IDX] = meas_from.index.values
branch_append[meas_to.element.values.astype(int), IM_TO_IDX] = meas_to.index.values
p_tr_measurements = net.measurement[(net.measurement.type == "p") &
(net.measurement.element_type == "transformer")]
if len(p_tr_measurements):
meas_from = p_tr_measurements[(p_tr_measurements.bus.values.astype(int) ==
net.trafo.hv_bus[p_tr_measurements.element]).values]
meas_to = p_tr_measurements[(p_tr_measurements.bus.values.astype(int) ==
net.trafo.lv_bus[p_tr_measurements.element]).values]
ix_from = ppci_lines + meas_from.element.values.astype(int)
ix_to = ppci_lines + meas_to.element.values.astype(int)
branch_append[ix_from, P_FROM] = meas_from.value.values * 1e3 / s_ref
branch_append[ix_from, P_FROM_STD] = meas_from.std_dev.values * 1e3 / s_ref
branch_append[ix_to, P_TO] = meas_to.value.values * 1e3 / s_ref
branch_append[ix_to, P_TO_STD] = meas_to.std_dev.values * 1e3 / s_ref
branch_append[meas_from.element.values.astype(int), P_FROM_IDX] = meas_from.index.values
branch_append[meas_to.element.values.astype(int), P_TO_IDX] = meas_to.index.values
q_tr_measurements = net.measurement[(net.measurement.type == "q") &
(net.measurement.element_type == "transformer")]
if len(q_tr_measurements):
meas_from = q_tr_measurements[(q_tr_measurements.bus.values.astype(int) ==
net.trafo.hv_bus[q_tr_measurements.element]).values]
meas_to = q_tr_measurements[(q_tr_measurements.bus.values.astype(int) ==
net.trafo.lv_bus[q_tr_measurements.element]).values]
ix_from = ppci_lines + meas_from.element.values.astype(int)
ix_to = ppci_lines + meas_to.element.values.astype(int)
branch_append[ix_from, Q_FROM] = meas_from.value.values * 1e3 / s_ref
branch_append[ix_from, Q_FROM_STD] = meas_from.std_dev.values * 1e3 / s_ref
branch_append[ix_to, Q_TO] = meas_to.value.values * 1e3 / s_ref
branch_append[ix_to, Q_TO_STD] = meas_to.std_dev.values * 1e3 / s_ref
branch_append[meas_from.element.values.astype(int), Q_FROM_IDX] = meas_from.index.values
branch_append[meas_to.element.values.astype(int), Q_TO_IDX] = meas_to.index.values
ppci["bus"] = np.hstack((ppci["bus"], bus_append))
ppci["branch"] = np.hstack((ppci["branch"], branch_append))
return ppci
def _build_measurement_vectors(ppci):
"""
Building measurement vector z, pandapower to ppci measurement mapping and covariance matrix R
:param ppci: generated ppci which contains the measurement columns
:param branch_cols: number of columns in original ppci["branch"] without measurements
:param bus_cols: number of columns in original ppci["bus"] without measurements
:return: both created vectors
"""
p_bus_not_nan = ~np.isnan(ppci["bus"][:, bus_cols + P])
p_line_f_not_nan = ~np.isnan(ppci["branch"][:, branch_cols + P_FROM])
p_line_t_not_nan = ~np.isnan(ppci["branch"][:, branch_cols + P_TO])
q_bus_not_nan = ~np.isnan(ppci["bus"][:, bus_cols + Q])
q_line_f_not_nan = ~np.isnan(ppci["branch"][:, branch_cols + Q_FROM])
q_line_t_not_nan = ~np.isnan(ppci["branch"][:, branch_cols + Q_TO])
v_bus_not_nan = ~np.isnan(ppci["bus"][:, bus_cols + VM])
i_line_f_not_nan = ~np.isnan(ppci["branch"][:, branch_cols + IM_FROM])
i_line_t_not_nan = ~np.isnan(ppci["branch"][:, branch_cols + IM_TO])
# piece together our measurement vector z
z = np.concatenate((ppci["bus"][p_bus_not_nan, bus_cols + P],
ppci["branch"][p_line_f_not_nan, branch_cols + P_FROM],
ppci["branch"][p_line_t_not_nan, branch_cols + P_TO],
ppci["bus"][q_bus_not_nan, bus_cols + Q],
ppci["branch"][q_line_f_not_nan, branch_cols + Q_FROM],
ppci["branch"][q_line_t_not_nan, branch_cols + Q_TO],
ppci["bus"][v_bus_not_nan, bus_cols + VM],
ppci["branch"][i_line_f_not_nan, branch_cols + IM_FROM],
ppci["branch"][i_line_t_not_nan, branch_cols + IM_TO]
)).real.astype(np.float64)
# conserve the pandapower indices of measurements in the ppci order
pp_meas_indices = np.concatenate((ppci["bus"][p_bus_not_nan, bus_cols + P_IDX],
ppci["branch"][p_line_f_not_nan, branch_cols + P_FROM_IDX],
ppci["branch"][p_line_t_not_nan, branch_cols + P_TO_IDX],
ppci["bus"][q_bus_not_nan, bus_cols + Q_IDX],
ppci["branch"][q_line_f_not_nan, branch_cols + Q_FROM_IDX],
ppci["branch"][q_line_t_not_nan, branch_cols + Q_TO_IDX],
ppci["bus"][v_bus_not_nan, bus_cols + VM_IDX],
ppci["branch"][i_line_f_not_nan, branch_cols + IM_FROM_IDX],
ppci["branch"][i_line_t_not_nan, branch_cols + IM_TO_IDX]
)).real.astype(int)
# Covariance matrix R
r_cov = np.concatenate((ppci["bus"][p_bus_not_nan, bus_cols + P_STD],
ppci["branch"][p_line_f_not_nan, branch_cols + P_FROM_STD],
ppci["branch"][p_line_t_not_nan, branch_cols + P_TO_STD],
ppci["bus"][q_bus_not_nan, bus_cols + Q_STD],
ppci["branch"][q_line_f_not_nan, branch_cols + Q_FROM_STD],
ppci["branch"][q_line_t_not_nan, branch_cols + Q_TO_STD],
ppci["bus"][v_bus_not_nan, bus_cols + VM_STD],
ppci["branch"][i_line_f_not_nan, branch_cols + IM_FROM_STD],
ppci["branch"][i_line_t_not_nan, branch_cols + IM_TO_STD]
)).real.astype(np.float64)
return z, pp_meas_indices, r_cov
| [
"pandapower.build_branch.get_is_lines",
"pandapower.pd2ppc._pd2ppc",
"numpy.hstack",
"pandapower.pf.run_newton_raphson_pf._run_dc_pf",
"numpy.in1d",
"numpy.any",
"pandapower.auxiliary._select_is_elements_numba",
"numpy.count_nonzero",
"numpy.isnan",
"numpy.concatenate",
"numpy.full",
"numpy.ar... | [((1396, 1426), 'pandapower.auxiliary._select_is_elements_numba', '_select_is_elements_numba', (['net'], {}), '(net)\n', (1421, 1426), False, 'from pandapower.auxiliary import _select_is_elements_numba, _add_ppc_options\n'), ((1443, 1455), 'pandapower.pd2ppc._pd2ppc', '_pd2ppc', (['net'], {}), '(net)\n', (1450, 1455), False, 'from pandapower.pd2ppc import _pd2ppc\n'), ((1519, 1549), 'numpy.any', 'np.any', (['net.trafo.shift_degree'], {}), '(net.trafo.shift_degree)\n', (1525, 1549), True, 'import numpy as np\n'), ((2248, 2325), 'numpy.full', 'np.full', (["(ppci['bus'].shape[0], bus_cols_se)", 'np.nan'], {'dtype': "ppci['bus'].dtype"}), "((ppci['bus'].shape[0], bus_cols_se), np.nan, dtype=ppci['bus'].dtype)\n", (2255, 2325), True, 'import numpy as np\n'), ((4488, 4579), 'numpy.full', 'np.full', (["(ppci['branch'].shape[0], branch_cols_se)", 'np.nan'], {'dtype': "ppci['branch'].dtype"}), "((ppci['branch'].shape[0], branch_cols_se), np.nan, dtype=ppci[\n 'branch'].dtype)\n", (4495, 4579), True, 'import numpy as np\n'), ((12401, 12437), 'numpy.hstack', 'np.hstack', (["(ppci['bus'], bus_append)"], {}), "((ppci['bus'], bus_append))\n", (12410, 12437), True, 'import numpy as np\n'), ((12459, 12501), 'numpy.hstack', 'np.hstack', (["(ppci['branch'], branch_append)"], {}), "((ppci['branch'], branch_append))\n", (12468, 12501), True, 'import numpy as np\n'), ((1647, 1663), 'pandapower.pf.run_newton_raphson_pf._run_dc_pf', '_run_dc_pf', (['ppci'], {}), '(ppci)\n', (1657, 1663), False, 'from pandapower.pf.run_newton_raphson_pf import _run_dc_pf\n'), ((3968, 3999), 'numpy.arange', 'np.arange', (["ppci['bus'].shape[0]"], {}), "(ppci['bus'].shape[0])\n", (3977, 3999), True, 'import numpy as np\n'), ((8305, 8322), 'pandapower.build_branch.get_is_lines', 'get_is_lines', (['net'], {}), '(net)\n', (8317, 8322), False, 'from pandapower.build_branch import get_is_lines\n'), ((8597, 8645), 'numpy.in1d', 'np.in1d', (["net['switch']['bus'].values", 'bus_is_idx'], {}), "(net['switch']['bus'].values, bus_is_idx)\n", (8604, 8645), True, 'import numpy as np\n'), ((8680, 8703), 'numpy.count_nonzero', 'np.count_nonzero', (['slidx'], {}), '(slidx)\n', (8696, 8703), True, 'import numpy as np\n'), ((12972, 13010), 'numpy.isnan', 'np.isnan', (["ppci['bus'][:, bus_cols + P]"], {}), "(ppci['bus'][:, bus_cols + P])\n", (12980, 13010), True, 'import numpy as np\n'), ((13035, 13084), 'numpy.isnan', 'np.isnan', (["ppci['branch'][:, branch_cols + P_FROM]"], {}), "(ppci['branch'][:, branch_cols + P_FROM])\n", (13043, 13084), True, 'import numpy as np\n'), ((13109, 13156), 'numpy.isnan', 'np.isnan', (["ppci['branch'][:, branch_cols + P_TO]"], {}), "(ppci['branch'][:, branch_cols + P_TO])\n", (13117, 13156), True, 'import numpy as np\n'), ((13178, 13216), 'numpy.isnan', 'np.isnan', (["ppci['bus'][:, bus_cols + Q]"], {}), "(ppci['bus'][:, bus_cols + Q])\n", (13186, 13216), True, 'import numpy as np\n'), ((13241, 13290), 'numpy.isnan', 'np.isnan', (["ppci['branch'][:, branch_cols + Q_FROM]"], {}), "(ppci['branch'][:, branch_cols + Q_FROM])\n", (13249, 13290), True, 'import numpy as np\n'), ((13315, 13362), 'numpy.isnan', 'np.isnan', (["ppci['branch'][:, branch_cols + Q_TO]"], {}), "(ppci['branch'][:, branch_cols + Q_TO])\n", (13323, 13362), True, 'import numpy as np\n'), ((13384, 13423), 'numpy.isnan', 'np.isnan', (["ppci['bus'][:, bus_cols + VM]"], {}), "(ppci['bus'][:, bus_cols + VM])\n", (13392, 13423), True, 'import numpy as np\n'), ((13448, 13498), 'numpy.isnan', 'np.isnan', (["ppci['branch'][:, branch_cols + IM_FROM]"], {}), "(ppci['branch'][:, branch_cols + IM_FROM])\n", (13456, 13498), True, 'import numpy as np\n'), ((13523, 13571), 'numpy.isnan', 'np.isnan', (["ppci['branch'][:, branch_cols + IM_TO]"], {}), "(ppci['branch'][:, branch_cols + IM_TO])\n", (13531, 13571), True, 'import numpy as np\n'), ((8522, 8578), 'numpy.in1d', 'np.in1d', (["net['switch']['element'].values", 'lines_is.index'], {}), "(net['switch']['element'].values, lines_is.index)\n", (8529, 8578), True, 'import numpy as np\n'), ((13626, 14134), 'numpy.concatenate', 'np.concatenate', (["(ppci['bus'][p_bus_not_nan, bus_cols + P], ppci['branch'][p_line_f_not_nan,\n branch_cols + P_FROM], ppci['branch'][p_line_t_not_nan, branch_cols +\n P_TO], ppci['bus'][q_bus_not_nan, bus_cols + Q], ppci['branch'][\n q_line_f_not_nan, branch_cols + Q_FROM], ppci['branch'][\n q_line_t_not_nan, branch_cols + Q_TO], ppci['bus'][v_bus_not_nan, \n bus_cols + VM], ppci['branch'][i_line_f_not_nan, branch_cols + IM_FROM],\n ppci['branch'][i_line_t_not_nan, branch_cols + IM_TO])"], {}), "((ppci['bus'][p_bus_not_nan, bus_cols + P], ppci['branch'][\n p_line_f_not_nan, branch_cols + P_FROM], ppci['branch'][\n p_line_t_not_nan, branch_cols + P_TO], ppci['bus'][q_bus_not_nan, \n bus_cols + Q], ppci['branch'][q_line_f_not_nan, branch_cols + Q_FROM],\n ppci['branch'][q_line_t_not_nan, branch_cols + Q_TO], ppci['bus'][\n v_bus_not_nan, bus_cols + VM], ppci['branch'][i_line_f_not_nan, \n branch_cols + IM_FROM], ppci['branch'][i_line_t_not_nan, branch_cols +\n IM_TO]))\n", (13640, 14134), True, 'import numpy as np\n'), ((14437, 14981), 'numpy.concatenate', 'np.concatenate', (["(ppci['bus'][p_bus_not_nan, bus_cols + P_IDX], ppci['branch'][\n p_line_f_not_nan, branch_cols + P_FROM_IDX], ppci['branch'][\n p_line_t_not_nan, branch_cols + P_TO_IDX], ppci['bus'][q_bus_not_nan, \n bus_cols + Q_IDX], ppci['branch'][q_line_f_not_nan, branch_cols +\n Q_FROM_IDX], ppci['branch'][q_line_t_not_nan, branch_cols + Q_TO_IDX],\n ppci['bus'][v_bus_not_nan, bus_cols + VM_IDX], ppci['branch'][\n i_line_f_not_nan, branch_cols + IM_FROM_IDX], ppci['branch'][\n i_line_t_not_nan, branch_cols + IM_TO_IDX])"], {}), "((ppci['bus'][p_bus_not_nan, bus_cols + P_IDX], ppci['branch'\n ][p_line_f_not_nan, branch_cols + P_FROM_IDX], ppci['branch'][\n p_line_t_not_nan, branch_cols + P_TO_IDX], ppci['bus'][q_bus_not_nan, \n bus_cols + Q_IDX], ppci['branch'][q_line_f_not_nan, branch_cols +\n Q_FROM_IDX], ppci['branch'][q_line_t_not_nan, branch_cols + Q_TO_IDX],\n ppci['bus'][v_bus_not_nan, bus_cols + VM_IDX], ppci['branch'][\n i_line_f_not_nan, branch_cols + IM_FROM_IDX], ppci['branch'][\n i_line_t_not_nan, branch_cols + IM_TO_IDX]))\n", (14451, 14981), True, 'import numpy as np\n'), ((15347, 15891), 'numpy.concatenate', 'np.concatenate', (["(ppci['bus'][p_bus_not_nan, bus_cols + P_STD], ppci['branch'][\n p_line_f_not_nan, branch_cols + P_FROM_STD], ppci['branch'][\n p_line_t_not_nan, branch_cols + P_TO_STD], ppci['bus'][q_bus_not_nan, \n bus_cols + Q_STD], ppci['branch'][q_line_f_not_nan, branch_cols +\n Q_FROM_STD], ppci['branch'][q_line_t_not_nan, branch_cols + Q_TO_STD],\n ppci['bus'][v_bus_not_nan, bus_cols + VM_STD], ppci['branch'][\n i_line_f_not_nan, branch_cols + IM_FROM_STD], ppci['branch'][\n i_line_t_not_nan, branch_cols + IM_TO_STD])"], {}), "((ppci['bus'][p_bus_not_nan, bus_cols + P_STD], ppci['branch'\n ][p_line_f_not_nan, branch_cols + P_FROM_STD], ppci['branch'][\n p_line_t_not_nan, branch_cols + P_TO_STD], ppci['bus'][q_bus_not_nan, \n bus_cols + Q_STD], ppci['branch'][q_line_f_not_nan, branch_cols +\n Q_FROM_STD], ppci['branch'][q_line_t_not_nan, branch_cols + Q_TO_STD],\n ppci['bus'][v_bus_not_nan, bus_cols + VM_STD], ppci['branch'][\n i_line_f_not_nan, branch_cols + IM_FROM_STD], ppci['branch'][\n i_line_t_not_nan, branch_cols + IM_TO_STD]))\n", (15361, 15891), True, 'import numpy as np\n')] |
import numpy as np
import torch
def embed_bert_cls(text, model, tokenizer):
t = tokenizer(text, padding=True, truncation=True, max_length=128, return_tensors='pt')
t = {k: v.to(model.device) for k, v in t.items()}
with torch.no_grad():
model_output = model(**t)
# embeddings = model_output.pooler_output # do not use pooler because it has one unused layer
embeddings = model_output.last_hidden_state[:, 0, :]
embeddings = torch.nn.functional.normalize(embeddings)
return {'cls': embeddings[0].cpu().numpy()}
def embed_bert_cls2(text, model, tokenizer):
t = tokenizer(text, padding=True, truncation=True, max_length=128, return_tensors='pt')
t = {k: v.to(model.device) for k, v in t.items()}
with torch.no_grad():
model_output = model(**t)
embeddings = model_output.pooler_output
embeddings = torch.nn.functional.normalize(embeddings)
return embeddings[0].cpu().numpy()
def mean_pooling(model_output, attention_mask, norm=True):
token_embeddings = model_output[0] # First element of model_output contains all token embeddings
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
sum_embeddings = torch.sum(token_embeddings ** 2 * input_mask_expanded, 1)
sum_mask = torch.clamp(input_mask_expanded.sum([1]), min=1e-9)
sums = sum_embeddings / sum_mask
if norm:
sums = torch.nn.functional.normalize(sums)
return sums
def embed_bert_pool(text, model, tokenizer, max_length=128):
encoded_input = tokenizer(text, padding=True, truncation=True, max_length=max_length, return_tensors='pt')
encoded_input = {k: v.to(model.device) for k, v in encoded_input.items()}
with torch.no_grad():
model_output = model(**encoded_input)
sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
return {'mean': sentence_embeddings[0].cpu().numpy()}
def embed_bert_both(text, model, tokenizer):
t = tokenizer(text, padding=True, truncation=True, max_length=128, return_tensors='pt')
t = {k: v.to(model.device) for k, v in t.items()}
with torch.no_grad():
model_output = model(**t)
e1 = torch.nn.functional.normalize(model_output.last_hidden_state[:, 0, :])
e2 = mean_pooling(model_output, t['attention_mask'])
return {'cls': e1[0].cpu().numpy(), 'mean': e2[0].cpu().numpy()}
def get_word_vectors_with_bert(words, model, tokenizer, return_raw=False):
"""
Take list of words (or other tokens) as an input.
Return either a matrix of token embeddings and its corresponding word ids,
or a dict from word id to its average vector.
Can be used to evaluate feature extractors for NER and other sequence labeling problems.
"""
b = tokenizer(words, is_split_into_words=True, return_tensors='pt', truncation=True).to(model.device)
with torch.no_grad():
model_output = model(**b)
vectors = model_output.last_hidden_state[0, :, :].cpu().numpy()
word_ids = b.word_ids()
if return_raw:
return vectors, word_ids
id2vecs = {i: [] for i, _ in enumerate(words)}
for i, word_id in enumerate(word_ids):
if word_id is not None:
id2vecs[word_id].append(vectors[i])
for i in sorted(id2vecs.keys()):
if len(id2vecs[i]) == 0:
id2vecs[i] = np.zeros(model.config.hidden_size)
else:
id2vecs[i] = np.mean(id2vecs[i], 0)
return id2vecs
| [
"numpy.mean",
"torch.nn.functional.normalize",
"numpy.zeros",
"torch.sum",
"torch.no_grad"
] | [((459, 500), 'torch.nn.functional.normalize', 'torch.nn.functional.normalize', (['embeddings'], {}), '(embeddings)\n', (488, 500), False, 'import torch\n'), ((863, 904), 'torch.nn.functional.normalize', 'torch.nn.functional.normalize', (['embeddings'], {}), '(embeddings)\n', (892, 904), False, 'import torch\n'), ((1223, 1280), 'torch.sum', 'torch.sum', (['(token_embeddings ** 2 * input_mask_expanded)', '(1)'], {}), '(token_embeddings ** 2 * input_mask_expanded, 1)\n', (1232, 1280), False, 'import torch\n'), ((2196, 2266), 'torch.nn.functional.normalize', 'torch.nn.functional.normalize', (['model_output.last_hidden_state[:, 0, :]'], {}), '(model_output.last_hidden_state[:, 0, :])\n', (2225, 2266), False, 'import torch\n'), ((234, 249), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (247, 249), False, 'import torch\n'), ((751, 766), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (764, 766), False, 'import torch\n'), ((1413, 1448), 'torch.nn.functional.normalize', 'torch.nn.functional.normalize', (['sums'], {}), '(sums)\n', (1442, 1448), False, 'import torch\n'), ((1726, 1741), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1739, 1741), False, 'import torch\n'), ((2136, 2151), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2149, 2151), False, 'import torch\n'), ((2877, 2892), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2890, 2892), False, 'import torch\n'), ((3346, 3380), 'numpy.zeros', 'np.zeros', (['model.config.hidden_size'], {}), '(model.config.hidden_size)\n', (3354, 3380), True, 'import numpy as np\n'), ((3420, 3442), 'numpy.mean', 'np.mean', (['id2vecs[i]', '(0)'], {}), '(id2vecs[i], 0)\n', (3427, 3442), True, 'import numpy as np\n')] |
"""Spectral methods
"""
import numpy as np
from math import ceil
from numpy import pi
from numpy.fft import fft, ifft, fftfreq, fftshift, ifftshift
class spectralMethod1(object):
"""Spectral Method.
Args:
n (int): The length of the array along the axis specified by axis.
d (float): Sample spacing (inverse of the sampling rate). Defaults to 1.
axis: Axis over which to compute the FFT. Defaults to 1.
norm: Normalization mode. Default is "forward".
Attributes:
k (np.array): Wave Numbers.
pad_width (int): Number of values padded to the edges of the specified axis.
"""
def __init__(self, n:int, d=1., axis=-1, norm='forward'):
self.n = n
self.d = d
self.axis = axis
self.norm = norm
self.k = fftfreq(n, d) * 2*pi
self.pad_width = ceil(self.n / 4)
def fft(self, u):
"""Compute the discrete Fourier Transform.
Args:
u (np.array): State in physical space.
Returns:
np.array: State in wave space.
"""
return fft(u, axis=self.axis, norm=self.norm)
def ifft(self, u):
"""Compute the inverse discrete Fourier Transform.
Args:
u (np.array): State in physical space.
Returns:
np.array: State in wave space.
"""
return ifft(u, axis=self.axis, norm=self.norm)
def fftshift(self, u):
"""Shift the zero-frequency component to the center of the spectrum.
Args:
u (np.array): Input array.
Returns:
np.array: The shifted array.
"""
return fftshift(u, self.axis)
def ifftshift(self, u):
"""The inverse of fftshift.
Args:
u (np.array): Input array.
Returns:
np.array: The shifted array.
"""
return ifftshift(u, self.axis)
def diff(self, u, order:int=1):
"""Differentiate in the wave space.
Args:
u (np.array): States in the wave space.
order (int): Order of the differential.
Returns:
np.array: Differentiated states in the wave space.
"""
dim = np.ndim(u)
k_shape = [1]*dim
k_shape[self.axis] = len(self.k)
k_shape = tuple(k_shape)
k_expand = self.k.reshape(k_shape)
return u * (1j * k_expand)**order
def diff_phys(self, u, order:int=1):
"""Differentiate in the wave space.
Args:
u (np.array): States in the physical space.
order (int): Order of the differential.
Returns:
np.array: Differentiated states in the physical space.
"""
u = self.fft(u)
u = self.diff(u, order)
u = self.ifft(u)
return u.real
def multiply(self, u1, u2):
"""Multiply arguments element-wise.
The aliasing error is eliminated by zero-padding (3/2 rule).
Args:
u1 (np.array): States in the wave space.
u2 (np.array): States in the wave space.
Returns:
np.array: u1 * u2 in the wave space.
"""
dim = np.ndim(u1)
pad_width_ = np.zeros((dim,2), dtype=int)
pad_width_[self.axis,:] = self.pad_width
pad_width_ = tuple(map(tuple, pad_width_))
### padding
u1 = self.fftshift(u1)
u2 = self.fftshift(u2)
p1 = np.pad(u1, pad_width_, mode='constant', constant_values=0.)
p2 = np.pad(u2, pad_width_, mode='constant', constant_values=0.)
p1 = self.ifftshift(p1)
p2 = self.ifftshift(p2)
### transform states from wave space to 3/2 physical space
p1 = self.ifft(p1)
p2 = self.ifft(p2)
### multiply
p1p2 = p1 * p2
### transform states from 3/2 physical space to 3/2 wave space
p1p2 = self.fft(p1p2)
### unpadding
p1p2 = self.fftshift(p1p2)
mask = np.pad(np.ones_like(u1), pad_width_, mode='constant', constant_values=0.)
p1p2 = np.compress(condition=mask, a=p1p2)
p1p2 = self.ifftshift(p1p2)
return p1p2
def multiply_phys(self, u1, u2):
"""Multiply arguments element-wise.
The aliasing error is eliminated by zero-padding (3/2 rule).
Args:
u1 (np.array): States in the physical space.
u2 (np.array): States in the physical space.
Returns:
np.array: u1 * u2 in the physical space.
"""
u1 = self.fft(u1)
u2 = self.fft(u2)
u1u2 = self.multiply(u1, u2)
u1u2 = self.ifft(u1u2)
return u1u2.real
| [
"numpy.ones_like",
"math.ceil",
"numpy.fft.fftfreq",
"numpy.fft.fft",
"numpy.ndim",
"numpy.compress",
"numpy.zeros",
"numpy.fft.ifftshift",
"numpy.fft.fftshift",
"numpy.pad",
"numpy.fft.ifft"
] | [((857, 873), 'math.ceil', 'ceil', (['(self.n / 4)'], {}), '(self.n / 4)\n', (861, 873), False, 'from math import ceil\n'), ((1102, 1140), 'numpy.fft.fft', 'fft', (['u'], {'axis': 'self.axis', 'norm': 'self.norm'}), '(u, axis=self.axis, norm=self.norm)\n', (1105, 1140), False, 'from numpy.fft import fft, ifft, fftfreq, fftshift, ifftshift\n'), ((1378, 1417), 'numpy.fft.ifft', 'ifft', (['u'], {'axis': 'self.axis', 'norm': 'self.norm'}), '(u, axis=self.axis, norm=self.norm)\n', (1382, 1417), False, 'from numpy.fft import fft, ifft, fftfreq, fftshift, ifftshift\n'), ((1663, 1685), 'numpy.fft.fftshift', 'fftshift', (['u', 'self.axis'], {}), '(u, self.axis)\n', (1671, 1685), False, 'from numpy.fft import fft, ifft, fftfreq, fftshift, ifftshift\n'), ((1891, 1914), 'numpy.fft.ifftshift', 'ifftshift', (['u', 'self.axis'], {}), '(u, self.axis)\n', (1900, 1914), False, 'from numpy.fft import fft, ifft, fftfreq, fftshift, ifftshift\n'), ((2222, 2232), 'numpy.ndim', 'np.ndim', (['u'], {}), '(u)\n', (2229, 2232), True, 'import numpy as np\n'), ((3187, 3198), 'numpy.ndim', 'np.ndim', (['u1'], {}), '(u1)\n', (3194, 3198), True, 'import numpy as np\n'), ((3220, 3249), 'numpy.zeros', 'np.zeros', (['(dim, 2)'], {'dtype': 'int'}), '((dim, 2), dtype=int)\n', (3228, 3249), True, 'import numpy as np\n'), ((3445, 3505), 'numpy.pad', 'np.pad', (['u1', 'pad_width_'], {'mode': '"""constant"""', 'constant_values': '(0.0)'}), "(u1, pad_width_, mode='constant', constant_values=0.0)\n", (3451, 3505), True, 'import numpy as np\n'), ((3518, 3578), 'numpy.pad', 'np.pad', (['u2', 'pad_width_'], {'mode': '"""constant"""', 'constant_values': '(0.0)'}), "(u2, pad_width_, mode='constant', constant_values=0.0)\n", (3524, 3578), True, 'import numpy as np\n'), ((4073, 4108), 'numpy.compress', 'np.compress', ([], {'condition': 'mask', 'a': 'p1p2'}), '(condition=mask, a=p1p2)\n', (4084, 4108), True, 'import numpy as np\n'), ((3991, 4007), 'numpy.ones_like', 'np.ones_like', (['u1'], {}), '(u1)\n', (4003, 4007), True, 'import numpy as np\n'), ((811, 824), 'numpy.fft.fftfreq', 'fftfreq', (['n', 'd'], {}), '(n, d)\n', (818, 824), False, 'from numpy.fft import fft, ifft, fftfreq, fftshift, ifftshift\n')] |
import os
import glob
import numpy as np
import Tkinter
import tkFont
import ConfigManager
###
# gse_misc.py
#
# Miscellaneous utility methods for Gse
###
# Support custom entry validation
import Pmw
def hex_integer_validate(text):
"""
Validate both hex and integer values
"""
if Pmw.hexadecimalvalidator(text):
return 1
if Pmw.integervalidator(text):
return 1
return -1
def hex_integer_stringtovalue(text):
"""
Convert a string value to its appropriate
numerical value.
"""
if text == '' or text=='-':
return 0
if '0x' in text:
if text == '0x' or text=='-0x':
return 0
return int(text,16)
else:
return int(text)
# Start up utilities
def backwards_search(nlevels, dirname):
"""
Walk backwards nlevels and search for
dirname. If found return path. Else
return None.
"""
stop = None
# Search current directory
if dirname in glob.glob("./"):
stop = "./"
else:
# Search through nlevels
l = [(a+1)*'../' for a in range(nlevels)]
for lvl in l:
dir_list = glob.glob("{}/*".format(lvl))
if dirname in ''.join(dir_list):
# We found it! Now create the relative path
stop = os.path.join(lvl, dirname)
break
if stop is not None:
return os.path.abspath(stop)
else:
return None
# Utility methods to fix dropdown menus
import Tkinter
def __comboBox_postList(event, cb):
"""
Event handler for combobox's Up and Down keys
The key press inserts ascii into the entryWidget
so we must delete it.
Return break to stop event from propogating.
"""
#cb._postList()
cb._entryWidget.delete(Tkinter.END)
return 'break'
def rebind_comboBox(comboBox):
"""
Rebind Up and Down key events to open menu instead of crashing
"""
comboBox._entryWidget.unbind("<Up>")
comboBox._entryWidget.unbind("<Down")
comboBox._entryWidget.bind("<Up>", lambda e,cb=comboBox: __comboBox_postList(e,cb))
comboBox._entryWidget.bind("<Down>", lambda e,cb=comboBox: __comboBox_postList(e,cb))
class HyperlinkManager(object):
"""
Tkinter Text Widget Hyperlink manager from:
http://effbot.org/zone/tkinter-text-hyperlink.htm
"""
def __init__(self, text, justify=Tkinter.LEFT):
self.text = text
config = ConfigManager.ConfigManager.getInstance()
font = tkFont.Font(family='Helvetica', size=int(config.get("helppanel", "default_header_link_size")))
self.text.tag_config("hyper", font=font, foreground="blue", underline=1, justify=justify, tabs='15c')
self.text.tag_bind("hyper", "<Enter>", self._enter)
self.text.tag_bind("hyper", "<Leave>", self._leave)
self.text.tag_bind("hyper", "<Button-1>", self._click)
self.reset()
def reset(self):
self.links = {}
def add(self, action, link_name):
# add an action to the manager. returns tags to use in
# associated text widget
tag = "hyper-%d" % len(self.links)
self.links[tag] = (action, link_name)
return "hyper", tag
def _enter(self, event):
self.text.config(cursor="hand2")
def _leave(self, event):
self.text.config(cursor="")
def _click(self, event):
for tag in self.text.tag_names(Tkinter.CURRENT):
if tag[:6] == "hyper-":
action = self.links[tag][0]
link_name = self.links[tag][1]
action(link_name)
return
class CircularBuffer(object):
"""
Fixed length circular buffer.
"""
def __init__(self, max_len):
self.__data = np.zeros(max_len)
self.__max_len = max_len
self.__max_idx = max_len - 1
self.__idx = 0
self.__tail = 0
self.__total = 0
def add(self, datapoint):
self.__data[self.__idx] = datapoint
self.__idx += 1
self.__total += 1
# Reset index when it reaches end
if self.__idx > self.__max_idx:
self.__idx = 0
# Buffer is full. Tail can move
if self.__total > self.__max_len:
self.__tail += 1
# Reset tail when it reaches end
if self.__tail > self.__max_idx:
self.__tail = 0
def asArray(self):
# Array is not yet full
if self.__total < self.__max_idx:
return self.__data[:self.__idx]
# Array has cycled
else:
start = -(self.__max_len - self.__idx)
r = range(start, self.__idx)
return np.take(self.__data, r, mode="wrap")
def getData(self):
return self.__data
def getHead(self):
"""
Get most recent data point
"""
return self.__data[self.__idx - 1]
def getTail(self):
"""
Get last data point
"""
return self.__data[self.__tail]
def test_CircularBuffer():
max_len = 100
offset = 5
mostRecent = 104
oldest = 5
r = CircularBuffer(max_len)
for i in range(max_len + offset):
r.add(i)
correct = np.array(range(offset, max_len + offset)).astype(float)
assert np.array_equal(correct,r.asArray())
assert mostRecent == r.getMostRecent()
assert oldest == r.getOldest()
| [
"ConfigManager.ConfigManager.getInstance",
"Pmw.hexadecimalvalidator",
"os.path.join",
"Pmw.integervalidator",
"numpy.take",
"numpy.zeros",
"os.path.abspath",
"glob.glob"
] | [((298, 328), 'Pmw.hexadecimalvalidator', 'Pmw.hexadecimalvalidator', (['text'], {}), '(text)\n', (322, 328), False, 'import Pmw\n'), ((348, 374), 'Pmw.integervalidator', 'Pmw.integervalidator', (['text'], {}), '(text)\n', (368, 374), False, 'import Pmw\n'), ((927, 942), 'glob.glob', 'glob.glob', (['"""./"""'], {}), "('./')\n", (936, 942), False, 'import glob\n'), ((1320, 1341), 'os.path.abspath', 'os.path.abspath', (['stop'], {}), '(stop)\n', (1335, 1341), False, 'import os\n'), ((2353, 2394), 'ConfigManager.ConfigManager.getInstance', 'ConfigManager.ConfigManager.getInstance', ([], {}), '()\n', (2392, 2394), False, 'import ConfigManager\n'), ((3611, 3628), 'numpy.zeros', 'np.zeros', (['max_len'], {}), '(max_len)\n', (3619, 3628), True, 'import numpy as np\n'), ((4426, 4462), 'numpy.take', 'np.take', (['self.__data', 'r'], {'mode': '"""wrap"""'}), "(self.__data, r, mode='wrap')\n", (4433, 4462), True, 'import numpy as np\n'), ((1238, 1264), 'os.path.join', 'os.path.join', (['lvl', 'dirname'], {}), '(lvl, dirname)\n', (1250, 1264), False, 'import os\n')] |
import numpy as np
import tensorflow as tf
import tensorflow.contrib.layers as layers
import tensorflow.contrib.slim as slim
from sklearn.utils import shuffle
def create_network(input,is_training,scope='LeNet',reuse=False):
"""setting up parameters"""
num_maps={
'layer_1': 6,
'layer_2': 16,
'layer_fully_1': 120,
'layer_fully_2': 84,
'layer_fully_3': 10
}
conv_kernel_height=5
conv_kernel_width=5
pool_kernel_height=2
pool_kernel_width=2
"""creating network"""
with tf.variable_scope(scope,reuse=reuse):
with slim.arg_scope([slim.conv2d],padding='VALID',activation_fn=tf.nn.relu,normalizer_fn=slim.batch_norm, normalizer_params={'is_training': is_training,'updates_collections':None}):
net=slim.conv2d(input,6,[conv_kernel_height,conv_kernel_width],scope='conv1')
net=slim.max_pool2d(net,[pool_kernel_height,pool_kernel_width],scope='pool1')
net=slim.conv2d(net,num_maps['layer_2'],[conv_kernel_height,conv_kernel_width],scope='conv2')
net=slim.max_pool2d(net,[pool_kernel_height,pool_kernel_width],scope='pool2')
net=slim.flatten(net,scope='flatten') # flatten layer
net=slim.fully_connected(net,num_maps['layer_fully_1'],activation_fn=tf.nn.relu,scope='fully_connected1')
net=slim.fully_connected(net,num_maps['layer_fully_2'],activation_fn=tf.nn.relu,scope='fully_connected2')
net=slim.fully_connected(net,num_maps['layer_fully_3'])
return net
def evaluate(x_data,y_data,batch_size):
n_examples=len(x_data)
x = tf.placeholder(tf.float32, (None, 32, 32, 1))
y = tf.placeholder(tf.int32, (None))
one_hot_y = tf.one_hot(y, 10)
net=create_network(x,False,scope='lenet',reuse=True)
correct_prediction=tf.equal(tf.argmax(net,1),tf.argmax(one_hot_y,1))
evaluate_operation=tf.reduce_mean(tf.cast(correct_prediction,tf.float64))
total_accuracy=0
num_batch=int(np.ceil(len(x_data)/batch_size))
sess=tf.get_default_session()
for batch_index in range(num_batch):
x_batch,y_batch=x_data[batch_index*batch_size:(batch_index+1)*batch_size],y_data[batch_index*batch_size:(batch_index+1)*batch_size]
accuracy=sess.run(evaluate_operation,feed_dict={x:x_batch,y:y_batch})
total_accuracy+=accuracy*len(x_batch)
# print('x_batch',len(x_batch),' batch_size',batch_size)
return total_accuracy/n_examples
def train(x_train,y_train,x_validation,y_validation,b_size=128,l_rate=0.001,n_epoch=10): # input is one-hot encoded
""" Define the graph"""
x = tf.placeholder(tf.float32, (None, 32, 32, 1))
y = tf.placeholder(tf.int32, (None))
one_hot_y = tf.one_hot(y, 10)
net=create_network(x,True,scope='lenet',reuse=False) # create network
# compute cross-entropy loss
cross_entropy=tf.nn.softmax_cross_entropy_with_logits(logits=net,labels=one_hot_y) # getting the vector of softmax cross entropy loss (not averaged yet)
loss=tf.reduce_mean(cross_entropy)
# setting up the optimizer to use
optimizer=tf.train.AdamOptimizer(learning_rate=l_rate)
training_operation=optimizer.minimize(loss)
# running the graph
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
num_training_eg=len(x_train)
num_epoch=n_epoch
batch_size=b_size
num_batch=int(np.ceil(num_training_eg/batch_size))
print('number of batches',num_batch)
print('training')
print()
for epoch in range(num_epoch):
x_train,y_train=shuffle(x_train,y_train)
for batch_index in range(num_batch):
x_batch,y_batch=x_train[batch_index*batch_size:(batch_index+1)*batch_size],y_train[batch_index*batch_size:(batch_index+1)*batch_size]
sess.run(training_operation,feed_dict={x:x_batch,y:y_batch})
validation_accuracy=evaluate(x_validation,y_validation,b_size)
print(validation_accuracy)
saver.save(sess, 'lenet')
# print("Model saved")
| [
"tensorflow.contrib.slim.arg_scope",
"tensorflow.get_default_session",
"tensorflow.contrib.slim.fully_connected",
"tensorflow.reduce_mean",
"tensorflow.cast",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.train.AdamOptimizer",
"tenso... | [((1654, 1699), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(None, 32, 32, 1)'], {}), '(tf.float32, (None, 32, 32, 1))\n', (1668, 1699), True, 'import tensorflow as tf\n'), ((1709, 1739), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', 'None'], {}), '(tf.int32, None)\n', (1723, 1739), True, 'import tensorflow as tf\n'), ((1759, 1776), 'tensorflow.one_hot', 'tf.one_hot', (['y', '(10)'], {}), '(y, 10)\n', (1769, 1776), True, 'import tensorflow as tf\n'), ((2072, 2096), 'tensorflow.get_default_session', 'tf.get_default_session', ([], {}), '()\n', (2094, 2096), True, 'import tensorflow as tf\n'), ((2669, 2714), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '(None, 32, 32, 1)'], {}), '(tf.float32, (None, 32, 32, 1))\n', (2683, 2714), True, 'import tensorflow as tf\n'), ((2724, 2754), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', 'None'], {}), '(tf.int32, None)\n', (2738, 2754), True, 'import tensorflow as tf\n'), ((2774, 2791), 'tensorflow.one_hot', 'tf.one_hot', (['y', '(10)'], {}), '(y, 10)\n', (2784, 2791), True, 'import tensorflow as tf\n'), ((2920, 2989), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'logits': 'net', 'labels': 'one_hot_y'}), '(logits=net, labels=one_hot_y)\n', (2959, 2989), True, 'import tensorflow as tf\n'), ((3069, 3098), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['cross_entropy'], {}), '(cross_entropy)\n', (3083, 3098), True, 'import tensorflow as tf\n'), ((3153, 3197), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'l_rate'}), '(learning_rate=l_rate)\n', (3175, 3197), True, 'import tensorflow as tf\n'), ((3285, 3301), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (3299, 3301), True, 'import tensorflow as tf\n'), ((568, 605), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {'reuse': 'reuse'}), '(scope, reuse=reuse)\n', (585, 605), True, 'import tensorflow as tf\n'), ((1868, 1885), 'tensorflow.argmax', 'tf.argmax', (['net', '(1)'], {}), '(net, 1)\n', (1877, 1885), True, 'import tensorflow as tf\n'), ((1885, 1908), 'tensorflow.argmax', 'tf.argmax', (['one_hot_y', '(1)'], {}), '(one_hot_y, 1)\n', (1894, 1908), True, 'import tensorflow as tf\n'), ((1948, 1987), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float64'], {}), '(correct_prediction, tf.float64)\n', (1955, 1987), True, 'import tensorflow as tf\n'), ((3312, 3324), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3322, 3324), True, 'import tensorflow as tf\n'), ((622, 810), 'tensorflow.contrib.slim.arg_scope', 'slim.arg_scope', (['[slim.conv2d]'], {'padding': '"""VALID"""', 'activation_fn': 'tf.nn.relu', 'normalizer_fn': 'slim.batch_norm', 'normalizer_params': "{'is_training': is_training, 'updates_collections': None}"}), "([slim.conv2d], padding='VALID', activation_fn=tf.nn.relu,\n normalizer_fn=slim.batch_norm, normalizer_params={'is_training':\n is_training, 'updates_collections': None})\n", (636, 810), True, 'import tensorflow.contrib.slim as slim\n'), ((818, 895), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['input', '(6)', '[conv_kernel_height, conv_kernel_width]'], {'scope': '"""conv1"""'}), "(input, 6, [conv_kernel_height, conv_kernel_width], scope='conv1')\n", (829, 895), True, 'import tensorflow.contrib.slim as slim\n'), ((909, 985), 'tensorflow.contrib.slim.max_pool2d', 'slim.max_pool2d', (['net', '[pool_kernel_height, pool_kernel_width]'], {'scope': '"""pool1"""'}), "(net, [pool_kernel_height, pool_kernel_width], scope='pool1')\n", (924, 985), True, 'import tensorflow.contrib.slim as slim\n'), ((1000, 1097), 'tensorflow.contrib.slim.conv2d', 'slim.conv2d', (['net', "num_maps['layer_2']", '[conv_kernel_height, conv_kernel_width]'], {'scope': '"""conv2"""'}), "(net, num_maps['layer_2'], [conv_kernel_height,\n conv_kernel_width], scope='conv2')\n", (1011, 1097), True, 'import tensorflow.contrib.slim as slim\n'), ((1107, 1183), 'tensorflow.contrib.slim.max_pool2d', 'slim.max_pool2d', (['net', '[pool_kernel_height, pool_kernel_width]'], {'scope': '"""pool2"""'}), "(net, [pool_kernel_height, pool_kernel_width], scope='pool2')\n", (1122, 1183), True, 'import tensorflow.contrib.slim as slim\n'), ((1198, 1232), 'tensorflow.contrib.slim.flatten', 'slim.flatten', (['net'], {'scope': '"""flatten"""'}), "(net, scope='flatten')\n", (1210, 1232), True, 'import tensorflow.contrib.slim as slim\n'), ((1266, 1375), 'tensorflow.contrib.slim.fully_connected', 'slim.fully_connected', (['net', "num_maps['layer_fully_1']"], {'activation_fn': 'tf.nn.relu', 'scope': '"""fully_connected1"""'}), "(net, num_maps['layer_fully_1'], activation_fn=tf.nn.\n relu, scope='fully_connected1')\n", (1286, 1375), True, 'import tensorflow.contrib.slim as slim\n'), ((1385, 1494), 'tensorflow.contrib.slim.fully_connected', 'slim.fully_connected', (['net', "num_maps['layer_fully_2']"], {'activation_fn': 'tf.nn.relu', 'scope': '"""fully_connected2"""'}), "(net, num_maps['layer_fully_2'], activation_fn=tf.nn.\n relu, scope='fully_connected2')\n", (1405, 1494), True, 'import tensorflow.contrib.slim as slim\n'), ((1504, 1556), 'tensorflow.contrib.slim.fully_connected', 'slim.fully_connected', (['net', "num_maps['layer_fully_3']"], {}), "(net, num_maps['layer_fully_3'])\n", (1524, 1556), True, 'import tensorflow.contrib.slim as slim\n'), ((3352, 3385), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3383, 3385), True, 'import tensorflow as tf\n'), ((3502, 3539), 'numpy.ceil', 'np.ceil', (['(num_training_eg / batch_size)'], {}), '(num_training_eg / batch_size)\n', (3509, 3539), True, 'import numpy as np\n'), ((3698, 3723), 'sklearn.utils.shuffle', 'shuffle', (['x_train', 'y_train'], {}), '(x_train, y_train)\n', (3705, 3723), False, 'from sklearn.utils import shuffle\n')] |
"""
Created on 16:46, May. 22nd, 2021
Author: fassial
Filename: main.py
"""
import os
import copy
import pickle
import numpy as np
import brainpy as bp
# local dep
import model
import stimulus
# macro
DIR_ROOT = os.getcwd()
DIR_FIGS = os.path.join(DIR_ROOT, "figs")
if not os.path.exists(DIR_FIGS): os.mkdir(DIR_FIGS)
DIR_OUTPUTS = os.path.join(DIR_ROOT, "outputs")
if not os.path.exists(DIR_OUTPUTS): os.mkdir(DIR_OUTPUTS)
DIR_OUTPUTS_STIM = os.path.join(DIR_OUTPUTS, "stimulus")
if not os.path.exists(DIR_OUTPUTS_STIM): os.mkdir(DIR_OUTPUTS_STIM)
DIR_OUTPUTS_SPIKE = os.path.join(DIR_OUTPUTS, "spike")
if not os.path.exists(DIR_OUTPUTS_SPIKE): os.mkdir(DIR_OUTPUTS_SPIKE)
## default params
# default stim_params
default_stim_params = {
"normal": stimulus.stim_params(
name = "normal",
height = 200,
width = 1,
duration = 1000,
others = {
"freqs": np.full((200,), 20., dtype = np.float32),
"noise": 0.,
}
),
}
# default net_params
default_net_params = {
"neurons" : {
"size": (200,),
"V_init": "reset",
},
"GJ": {
"r": 5,
"p": 0.,
"weight": .3,
"conn": model.connector.IndexConnector(),
},
"CHEMS": {
"r": 5,
"p": 1.,
"weight": 0.,
"conn": model.connector.IndexConnector(),
}
}
def main(dt = 0.01):
# init seed
np.random.seed(0)
# init backend
bp.backend.set(dt = dt)
bp.backend.set(backend = "numpy")
# init expr_curr
expr_curr = "normal"
# init inputs
inputs_neurons = []
# get stimulus
stim_fname = os.path.join(
DIR_OUTPUTS_STIM,
expr_curr + "-" + str(default_stim_params[expr_curr].duration) + ".csv"
)
if os.path.exists(stim_fname):
# load stim
stim_neurons = np.loadtxt(
fname = stim_fname,
delimiter = ","
)
else:
# get stim
stim_neurons, _ = stimulus.stimulus.get(
stim_params = default_stim_params[expr_curr]
); stim_neurons += .5
# save stim
np.savetxt(fname = stim_fname, X = stim_neurons, delimiter = ",")
# inst FSI
net = model.FSI(net_params = default_net_params, run_params = {
"inputs": stim_neurons,
"dt": dt,
"duration": default_stim_params[expr_curr].duration,
})
# net run
net.run(report = True)
# show net.mon
net_monitors = net.get_monitors()
net.show(img_fname = os.path.join(DIR_FIGS, expr_curr + ".png"))
net.save(spike_fname = os.path.join(DIR_OUTPUTS_SPIKE, expr_curr + "-" + str(dt) + ".csv"))
if __name__ == "__main__":
main()
| [
"os.path.exists",
"model.FSI",
"model.connector.IndexConnector",
"stimulus.stimulus.get",
"os.path.join",
"os.getcwd",
"numpy.random.seed",
"os.mkdir",
"brainpy.backend.set",
"numpy.savetxt",
"numpy.full",
"numpy.loadtxt"
] | [((213, 224), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (222, 224), False, 'import os\n'), ((236, 266), 'os.path.join', 'os.path.join', (['DIR_ROOT', '"""figs"""'], {}), "(DIR_ROOT, 'figs')\n", (248, 266), False, 'import os\n'), ((333, 366), 'os.path.join', 'os.path.join', (['DIR_ROOT', '"""outputs"""'], {}), "(DIR_ROOT, 'outputs')\n", (345, 366), False, 'import os\n'), ((444, 481), 'os.path.join', 'os.path.join', (['DIR_OUTPUTS', '"""stimulus"""'], {}), "(DIR_OUTPUTS, 'stimulus')\n", (456, 481), False, 'import os\n'), ((570, 604), 'os.path.join', 'os.path.join', (['DIR_OUTPUTS', '"""spike"""'], {}), "(DIR_OUTPUTS, 'spike')\n", (582, 604), False, 'import os\n'), ((274, 298), 'os.path.exists', 'os.path.exists', (['DIR_FIGS'], {}), '(DIR_FIGS)\n', (288, 298), False, 'import os\n'), ((300, 318), 'os.mkdir', 'os.mkdir', (['DIR_FIGS'], {}), '(DIR_FIGS)\n', (308, 318), False, 'import os\n'), ((374, 401), 'os.path.exists', 'os.path.exists', (['DIR_OUTPUTS'], {}), '(DIR_OUTPUTS)\n', (388, 401), False, 'import os\n'), ((403, 424), 'os.mkdir', 'os.mkdir', (['DIR_OUTPUTS'], {}), '(DIR_OUTPUTS)\n', (411, 424), False, 'import os\n'), ((489, 521), 'os.path.exists', 'os.path.exists', (['DIR_OUTPUTS_STIM'], {}), '(DIR_OUTPUTS_STIM)\n', (503, 521), False, 'import os\n'), ((523, 549), 'os.mkdir', 'os.mkdir', (['DIR_OUTPUTS_STIM'], {}), '(DIR_OUTPUTS_STIM)\n', (531, 549), False, 'import os\n'), ((612, 645), 'os.path.exists', 'os.path.exists', (['DIR_OUTPUTS_SPIKE'], {}), '(DIR_OUTPUTS_SPIKE)\n', (626, 645), False, 'import os\n'), ((647, 674), 'os.mkdir', 'os.mkdir', (['DIR_OUTPUTS_SPIKE'], {}), '(DIR_OUTPUTS_SPIKE)\n', (655, 674), False, 'import os\n'), ((1407, 1424), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (1421, 1424), True, 'import numpy as np\n'), ((1448, 1469), 'brainpy.backend.set', 'bp.backend.set', ([], {'dt': 'dt'}), '(dt=dt)\n', (1462, 1469), True, 'import brainpy as bp\n'), ((1476, 1507), 'brainpy.backend.set', 'bp.backend.set', ([], {'backend': '"""numpy"""'}), "(backend='numpy')\n", (1490, 1507), True, 'import brainpy as bp\n'), ((1769, 1795), 'os.path.exists', 'os.path.exists', (['stim_fname'], {}), '(stim_fname)\n', (1783, 1795), False, 'import os\n'), ((2206, 2350), 'model.FSI', 'model.FSI', ([], {'net_params': 'default_net_params', 'run_params': "{'inputs': stim_neurons, 'dt': dt, 'duration': default_stim_params[\n expr_curr].duration}"}), "(net_params=default_net_params, run_params={'inputs': stim_neurons,\n 'dt': dt, 'duration': default_stim_params[expr_curr].duration})\n", (2215, 2350), False, 'import model\n'), ((1196, 1228), 'model.connector.IndexConnector', 'model.connector.IndexConnector', ([], {}), '()\n', (1226, 1228), False, 'import model\n'), ((1323, 1355), 'model.connector.IndexConnector', 'model.connector.IndexConnector', ([], {}), '()\n', (1353, 1355), False, 'import model\n'), ((1840, 1883), 'numpy.loadtxt', 'np.loadtxt', ([], {'fname': 'stim_fname', 'delimiter': '""","""'}), "(fname=stim_fname, delimiter=',')\n", (1850, 1883), True, 'import numpy as np\n'), ((1977, 2042), 'stimulus.stimulus.get', 'stimulus.stimulus.get', ([], {'stim_params': 'default_stim_params[expr_curr]'}), '(stim_params=default_stim_params[expr_curr])\n', (1998, 2042), False, 'import stimulus\n'), ((2115, 2174), 'numpy.savetxt', 'np.savetxt', ([], {'fname': 'stim_fname', 'X': 'stim_neurons', 'delimiter': '""","""'}), "(fname=stim_fname, X=stim_neurons, delimiter=',')\n", (2125, 2174), True, 'import numpy as np\n'), ((2505, 2547), 'os.path.join', 'os.path.join', (['DIR_FIGS', "(expr_curr + '.png')"], {}), "(DIR_FIGS, expr_curr + '.png')\n", (2517, 2547), False, 'import os\n'), ((907, 946), 'numpy.full', 'np.full', (['(200,)', '(20.0)'], {'dtype': 'np.float32'}), '((200,), 20.0, dtype=np.float32)\n', (914, 946), True, 'import numpy as np\n')] |
import torch
import numpy as np
from torch.nn import Sequential, Linear, ReLU, Module, Tanh
from torch.nn.functional import mse_loss
class SimpleAutoEncoder(torch.nn.Module):
def __init__(self, num_inputs, val_lambda=666):
super(SimpleAutoEncoder, self).__init__()
self.val_lambda = val_lambda
self.neurons_l1_to_l2 = 12
self.neurons_l2_to_latent = 8
self.encoder = Sequential(
Linear(num_inputs, self.neurons_l1_to_l2),
ReLU(True),
Linear(self.neurons_l1_to_l2, self.neurons_l2_to_latent),
Tanh()
)
self.decoder = Sequential(
Linear(self.neurons_l2_to_latent,self.neurons_l1_to_l2),
ReLU(True),
Linear(self.neurons_l1_to_l2, num_inputs),
Tanh()
)
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
def set_lambda(self, val_lambda):
self.val_lambda = val_lambda
print('Set lambda of model to: {}'.format(self.val_lambda))
def calc_reconstruction_error(self,x):
re = mse_loss(self.forward(x),x)
return re
def predict_binary(self,x):
re = self.calc_reconstruction_error(x)
if re.data.item() > self.val_lambda:
return 1
else:
return 0
@staticmethod
def weight_init(m):
if isinstance(m, torch.nn.Linear):
size = m.weight.size()
fan_out = size[0] # number of rows
fan_in = size[1] # number of columns
variance = np.sqrt(2.0 / (fan_in + fan_out))
m.weight.data.normal_(0.0, variance)
| [
"torch.nn.ReLU",
"numpy.sqrt",
"torch.nn.Tanh",
"torch.nn.Linear"
] | [((445, 486), 'torch.nn.Linear', 'Linear', (['num_inputs', 'self.neurons_l1_to_l2'], {}), '(num_inputs, self.neurons_l1_to_l2)\n', (451, 486), False, 'from torch.nn import Sequential, Linear, ReLU, Module, Tanh\n'), ((500, 510), 'torch.nn.ReLU', 'ReLU', (['(True)'], {}), '(True)\n', (504, 510), False, 'from torch.nn import Sequential, Linear, ReLU, Module, Tanh\n'), ((524, 580), 'torch.nn.Linear', 'Linear', (['self.neurons_l1_to_l2', 'self.neurons_l2_to_latent'], {}), '(self.neurons_l1_to_l2, self.neurons_l2_to_latent)\n', (530, 580), False, 'from torch.nn import Sequential, Linear, ReLU, Module, Tanh\n'), ((594, 600), 'torch.nn.Tanh', 'Tanh', ([], {}), '()\n', (598, 600), False, 'from torch.nn import Sequential, Linear, ReLU, Module, Tanh\n'), ((667, 723), 'torch.nn.Linear', 'Linear', (['self.neurons_l2_to_latent', 'self.neurons_l1_to_l2'], {}), '(self.neurons_l2_to_latent, self.neurons_l1_to_l2)\n', (673, 723), False, 'from torch.nn import Sequential, Linear, ReLU, Module, Tanh\n'), ((736, 746), 'torch.nn.ReLU', 'ReLU', (['(True)'], {}), '(True)\n', (740, 746), False, 'from torch.nn import Sequential, Linear, ReLU, Module, Tanh\n'), ((760, 801), 'torch.nn.Linear', 'Linear', (['self.neurons_l1_to_l2', 'num_inputs'], {}), '(self.neurons_l1_to_l2, num_inputs)\n', (766, 801), False, 'from torch.nn import Sequential, Linear, ReLU, Module, Tanh\n'), ((815, 821), 'torch.nn.Tanh', 'Tanh', ([], {}), '()\n', (819, 821), False, 'from torch.nn import Sequential, Linear, ReLU, Module, Tanh\n'), ((1622, 1655), 'numpy.sqrt', 'np.sqrt', (['(2.0 / (fan_in + fan_out))'], {}), '(2.0 / (fan_in + fan_out))\n', (1629, 1655), True, 'import numpy as np\n')] |
"""
Interface to Growth Model Training Routine
"""
from gemodels import BaseModelInterface, ModelStats, ModelError, StatError
from gemodels import check_data, check_data_1d
from .models import all_func
import numpy as np
from scipy.optimize import curve_fit, minimize, least_squares
from matplotlib import pyplot as plt
from scipy.stats import t as statt, f as statf, chi2 as statx2, nbinom as statnb
class GrowthModel(BaseModelInterface):
"""
Boiler plat for all growth models
"""
def __init__(self, model='logistic', method='curve', method_params=dict(),
alter_strategy=None, valid_steps=0, confidence_criteria='one-student',
confidence_alpha=0.05, saddle_tol=1e-4, inverse=False, copy=True):
"""
Growth Models
:param model:'logistic','richard','bass','chapmanrichard','gompretz','weibull
:param method: 'curve','lsq','minloss', # stochastic in progress
:param method_params: extra params while training
:param alter_strategy: Manipulate data before fitting 'ma', 'dtrend' in progress
:param valid_steps: data points to do validity on actual data.
:param confidence_criteria: 'covariance'
:param confidence_alpha: float value to define confidence interval
:param saddle_tol: Tolerance to find the saddle point / stable state of the curve
:param inverse: a flag for true and false.
:param copy: Copy data with the model object
"""
super().__init__()
self.model_name = "Growth"
self.model_type = model.title()
self._model = all_func[model]
self.method = method
self.method_params = method_params
self._func = None
self._pfunc = self._model['parametric']
self._parameters = self._model['parameters']
self.parameters = None
self.parameters_std = None
self.stats = ModelStats(name='{} {} Model'.format(self.model_type, self.model_name),
p_alpha=confidence_alpha)
self.alter_strategy = alter_strategy
self.inverse = inverse
self.valid_steps = valid_steps
self.conf_criteria = confidence_criteria
self.saddle_tol = saddle_tol
self.state_flag = copy
def _alter_data(self, y, t):
# todo: implement moving average and all here
if self.alter_strategy is None:
return y, t
def _get_saddle_point(self):
# todo: make this
return 0
def _get_data(self, X=None, use_alter=True):
if X is None and self.state_flag:
X = self.state
y, t = check_data(X)
if use_alter:
y, t = self._alter_data(y, t)
return y, t
# def __repr__(self):
# # todo: make this
def _fit_curve(self, X, **kwargs):
y, t = self._get_data(X)
opt, covar_mat = curve_fit(self._func, t, y)
# setting optimal parameters
self.parameters = self._parameters._make(opt)
# getting covariance based standard deviations
sigma_ab = np.sqrt(np.diagonal(covar_mat))
self.parameters_std = self._parameters._make(sigma_ab)
print('Curve Fitted on {} {} Model with Parameters'.format(
self.model_type, self.model_name), self.parameters)
return y, self._pfunc(t, self.parameters)
def _fit_linear(self, X, **kwargs):
y, t = self._get_data(X)
opt = []
self.parameters = self._parameters._make(opt)
return y, self.predict(t)
def _fit_stochastic(self, X, **kwargs):
y, t = self._get_data(X)
opt = []
self.parameters = self._parameters._make(opt)
return y, self.predict(t)
def _fit_minimize(self, X, **kwargs):
y, t = self._get_data(X)
opt = []
self.parameters = self._parameters._make(opt)
return y, self.predict(t)
def fit(self, X, **model_args):
"""
:param X:
:param model_args:
:return:
"""
if self.method == "curve":
self._func = self._model['curve']
y_act, y_fit = self._fit_curve(X)
elif self.method == "linear":
self._func = self._model['curve']
y_act, y_fit = self._fit_linear(X)
elif self.method == "minimize":
self._func = self._model['curve']
y_act, y_fit = self._fit_minimize(X, **model_args)
elif self.method == "stochastic":
self._func = self._model['curve']
y_act, y_fit = self._fit_stochastic(X)
else:
raise ModelError('Not a Valid Method for fitting')
self.stats.score(y_act=y_act, y_fit=y_fit,
ndf=len(y_act) - self._model['df_model'] + 1,
mdf=self._model['df_model'])
if self.state_flag:
self.state = X
def summary(self):
self.stats.summary(return_df=False)
def _get_steps(self, steps, use_data=False, smoothed=False, breaks=100):
"""
Step formulation, checking and smoothening
:param steps: integer, list or 1D numpy array
:param use_data: Use the exsisting data and add steps with them
:param smoothed: To smoothed the steps or not
:param breaks:
:return:
"""
if use_data and self.state_flag:
_, steps = self._get_data()
if smoothed:
breaks = breaks if len(steps) < 0.75 * breaks else int(2 * len(steps))
steps = np.linspace(int(0.95 * np.min(steps)), int(1.05 * np.max(steps)), breaks)
return steps
elif use_data:
raise ModelError('Data is not stored with the model. Flag \'use_data\' wont work!')
if self.state_flag: # based on value of data
_ , t = self._get_data()
t_steps = int(np.max(t))
else: # based on degree of freedoms
t_steps = self.stats.ndf + self.stats.mdf - 1
if isinstance(steps, int) and smoothed:
# This is crude as of now need better methods
steps = np.linspace(t_steps + 1, t_steps + steps + 1, breaks)
elif isinstance(steps,int) and not smoothed:
steps = np.arange(t_steps + 1, t_steps + steps + 1)
elif (isinstance(steps, list) or isinstance(steps, tuple)) and len(steps) == 2:
steps = np.linspace(steps[0], steps[1], breaks)
elif smoothed:
breaks = breaks if len(steps) < 0.75 * breaks else int(2 * len(steps))
steps = np.linspace(int(0.95 * np.min(steps)), int(1.05 * np.max(steps)), breaks)
else:
steps = check_data_1d(steps)
return steps
def predict(self, steps, response=False, sigma=1.96, breaks=100):
steps = self._get_steps(steps, breaks=breaks)
y_fit = self._pfunc(steps, self.parameters)
if response:
params = [self.parameters, self.parameters_std]
uparam = self._parameters(*map(lambda x: x[0] + sigma * x[1], zip(*params)))
lparam = self._parameters(*map(lambda x: x[0] - sigma * x[1], zip(*params)))
fit_upper = self._pfunc(steps, uparam)
fit_lower = self._pfunc(steps, lparam)
return y_fit, fit_upper, fit_lower
return y_fit
def plot(self, title=None, new_data=None, plot_range=None, confidence=True, confidence_band=True, sigma=1.96,
breaks=100, fig_size=(10, 7)):
title = title if title is not None else 'Estimated {} {} Model'.format(self.model_type, self.model_name)
try:
y_act, t = self._get_data(new_data)
except Exception as e:
raise ModelError('No data to make a plot on or Data is not in right format! Aborting! Error:\n', e)
# Confidence level
alpha = int(100 - self.stats.confidence_alpha * 100)
# plotting actual data
# plt.figure(figsize=fig_size, dpi=300)
plt.scatter(t, y_act, s=3, label='Data')
# print("Actual Steps: ", t)
# getting smoothed breaks
if plot_range is None:
plot_range = t
t_smoothed = self._get_steps(plot_range, smoothed=True, breaks=breaks)
y_fit = self._pfunc(t_smoothed, self.parameters)
# print("Smooth Steps: ", t_smoothed)
# plot the regression
plt.plot(t_smoothed, y_fit, c='black',
label='{} {} Model'.format(self.model_type, self.model_name))
if confidence:
params = [self.parameters, self.parameters_std]
uparam = self._parameters(*map(lambda x: x[0] + sigma * x[1], zip(*params)))
lparam = self._parameters(*map(lambda x: x[0] - sigma * x[1], zip(*params)))
fit_upper = self._pfunc(t_smoothed, uparam)
fit_lower = self._pfunc(t_smoothed, lparam)
plt.plot(t_smoothed, fit_lower, c='orange', label='{}% Confidence Region'.format(alpha))
plt.plot(t_smoothed, fit_upper, c='orange')
if confidence_band:
lpb, upb = confidence_band_t(func=self._pfunc, params=self.parameters,
y_act=y_act, t=t,
t_breaks=t_smoothed,
alpha=self.stats.confidence_alpha)
plt.plot(t_smoothed, lpb, 'k--', label='{}% Prediction Band'.format(alpha))
plt.plot(t_smoothed, upb, 'k--')
plt.ylabel('Estimated Values')
plt.xlabel('Data Steps')
plt.title(title)
plt.legend(loc='best')
# save and show figure
plt.savefig('{}.png'.format(title))
plt.show()
def plot_forecast(self, steps, plot_range=None, title=None, use_trianing=True,
confidence=True, return_forecast=False, sigma=1.96, fig_size=(10, 7)):
# plt.figure(figsize=fig_size, dpi=300)
title = title if title is not None else 'Estimated {} {} Model'.format(self.model_type, self.model_name)
steps = self._get_steps(steps, use_data=use_trianing)
# Confidence level
alpha = int(100 - self.stats.confidence_alpha * 100)
res = self.predict(steps, response=confidence, sigma=sigma)
if confidence:
plt.plot(steps, res[0], 'black', label='Forecast Values')
plt.plot(steps, res[1], 'k--', label='{}% Prediction Band'.format(alpha))
plt.plot(steps, res[2], 'k--')
else:
plt.plot(steps, res, 'black', label='Forecast Values')
plt.ylabel('Estimated Values')
plt.xlabel('Data Steps')
plt.title(title)
plt.legend(loc='best')
plt.show()
if return_forecast:
return res
| [
"scipy.optimize.curve_fit",
"numpy.diagonal",
"gemodels.check_data_1d",
"matplotlib.pyplot.ylabel",
"numpy.arange",
"gemodels.check_data",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.linspace",
"matplotlib.pyplot.scatter",
"numpy.min",
"matplotlib.pyplot.title",... | [((2646, 2659), 'gemodels.check_data', 'check_data', (['X'], {}), '(X)\n', (2656, 2659), False, 'from gemodels import check_data, check_data_1d\n'), ((2897, 2924), 'scipy.optimize.curve_fit', 'curve_fit', (['self._func', 't', 'y'], {}), '(self._func, t, y)\n', (2906, 2924), False, 'from scipy.optimize import curve_fit, minimize, least_squares\n'), ((7989, 8029), 'matplotlib.pyplot.scatter', 'plt.scatter', (['t', 'y_act'], {'s': '(3)', 'label': '"""Data"""'}), "(t, y_act, s=3, label='Data')\n", (8000, 8029), True, 'from matplotlib import pyplot as plt\n'), ((9484, 9514), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Estimated Values"""'], {}), "('Estimated Values')\n", (9494, 9514), True, 'from matplotlib import pyplot as plt\n'), ((9523, 9547), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Data Steps"""'], {}), "('Data Steps')\n", (9533, 9547), True, 'from matplotlib import pyplot as plt\n'), ((9556, 9572), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (9565, 9572), True, 'from matplotlib import pyplot as plt\n'), ((9581, 9603), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (9591, 9603), True, 'from matplotlib import pyplot as plt\n'), ((9688, 9698), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9696, 9698), True, 'from matplotlib import pyplot as plt\n'), ((10568, 10598), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Estimated Values"""'], {}), "('Estimated Values')\n", (10578, 10598), True, 'from matplotlib import pyplot as plt\n'), ((10607, 10631), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Data Steps"""'], {}), "('Data Steps')\n", (10617, 10631), True, 'from matplotlib import pyplot as plt\n'), ((10640, 10656), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (10649, 10656), True, 'from matplotlib import pyplot as plt\n'), ((10665, 10687), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (10675, 10687), True, 'from matplotlib import pyplot as plt\n'), ((10696, 10706), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10704, 10706), True, 'from matplotlib import pyplot as plt\n'), ((3100, 3122), 'numpy.diagonal', 'np.diagonal', (['covar_mat'], {}), '(covar_mat)\n', (3111, 3122), True, 'import numpy as np\n'), ((6132, 6185), 'numpy.linspace', 'np.linspace', (['(t_steps + 1)', '(t_steps + steps + 1)', 'breaks'], {}), '(t_steps + 1, t_steps + steps + 1, breaks)\n', (6143, 6185), True, 'import numpy as np\n'), ((8988, 9031), 'matplotlib.pyplot.plot', 'plt.plot', (['t_smoothed', 'fit_upper'], {'c': '"""orange"""'}), "(t_smoothed, fit_upper, c='orange')\n", (8996, 9031), True, 'from matplotlib import pyplot as plt\n'), ((9442, 9474), 'matplotlib.pyplot.plot', 'plt.plot', (['t_smoothed', 'upb', '"""k--"""'], {}), "(t_smoothed, upb, 'k--')\n", (9450, 9474), True, 'from matplotlib import pyplot as plt\n'), ((10291, 10348), 'matplotlib.pyplot.plot', 'plt.plot', (['steps', 'res[0]', '"""black"""'], {'label': '"""Forecast Values"""'}), "(steps, res[0], 'black', label='Forecast Values')\n", (10299, 10348), True, 'from matplotlib import pyplot as plt\n'), ((10447, 10477), 'matplotlib.pyplot.plot', 'plt.plot', (['steps', 'res[2]', '"""k--"""'], {}), "(steps, res[2], 'k--')\n", (10455, 10477), True, 'from matplotlib import pyplot as plt\n'), ((10504, 10558), 'matplotlib.pyplot.plot', 'plt.plot', (['steps', 'res', '"""black"""'], {'label': '"""Forecast Values"""'}), "(steps, res, 'black', label='Forecast Values')\n", (10512, 10558), True, 'from matplotlib import pyplot as plt\n'), ((5697, 5772), 'gemodels.ModelError', 'ModelError', (['"""Data is not stored with the model. Flag \'use_data\' wont work!"""'], {}), '("Data is not stored with the model. Flag \'use_data\' wont work!")\n', (5707, 5772), False, 'from gemodels import BaseModelInterface, ModelStats, ModelError, StatError\n'), ((5892, 5901), 'numpy.max', 'np.max', (['t'], {}), '(t)\n', (5898, 5901), True, 'import numpy as np\n'), ((6259, 6302), 'numpy.arange', 'np.arange', (['(t_steps + 1)', '(t_steps + steps + 1)'], {}), '(t_steps + 1, t_steps + steps + 1)\n', (6268, 6302), True, 'import numpy as np\n'), ((7718, 7821), 'gemodels.ModelError', 'ModelError', (['"""No data to make a plot on or Data is not in right format! Aborting! Error:\n"""', 'e'], {}), "(\n 'No data to make a plot on or Data is not in right format! Aborting! Error:\\n'\n , e)\n", (7728, 7821), False, 'from gemodels import BaseModelInterface, ModelStats, ModelError, StatError\n'), ((6411, 6450), 'numpy.linspace', 'np.linspace', (['steps[0]', 'steps[1]', 'breaks'], {}), '(steps[0], steps[1], breaks)\n', (6422, 6450), True, 'import numpy as np\n'), ((4621, 4665), 'gemodels.ModelError', 'ModelError', (['"""Not a Valid Method for fitting"""'], {}), "('Not a Valid Method for fitting')\n", (4631, 4665), False, 'from gemodels import BaseModelInterface, ModelStats, ModelError, StatError\n'), ((6685, 6705), 'gemodels.check_data_1d', 'check_data_1d', (['steps'], {}), '(steps)\n', (6698, 6705), False, 'from gemodels import check_data, check_data_1d\n'), ((5580, 5593), 'numpy.min', 'np.min', (['steps'], {}), '(steps)\n', (5586, 5593), True, 'import numpy as np\n'), ((5607, 5620), 'numpy.max', 'np.max', (['steps'], {}), '(steps)\n', (5613, 5620), True, 'import numpy as np\n'), ((6600, 6613), 'numpy.min', 'np.min', (['steps'], {}), '(steps)\n', (6606, 6613), True, 'import numpy as np\n'), ((6627, 6640), 'numpy.max', 'np.max', (['steps'], {}), '(steps)\n', (6633, 6640), True, 'import numpy as np\n')] |
# Implementation of an interferometric direction finding algorithm for an orthoganal L antenna array
#
#
# 90 degrees
# |
# v
#
# 7
# |
# |
# |
# |
# 5
# |
# |
# 3
# |
# 1--2-----4------------6 <-- 0 degrees
#
# TODO: Fix ambiguities when the wavefront is parallel to one of the array's legs.
# In this situation the aforementioned leg provides no information (phase deltas are zero).
# So the linear regression tends to fit two DOAs +-180 degrees apart equally well.
#
from cmath import phase, rect
from functools import reduce
from math import pi, cos, sin, ceil
import matplotlib.pyplot as plt
from numpy import array, arange, iinfo, int32
from numpy.linalg import inv, norm
def create_bases_mat(base_sizes):
num_bases = len(base_sizes)
bases = arange(num_bases * 4).reshape((num_bases * 2, 2))
for i, base_size in enumerate(base_sizes[::-1]):
bases[i][0] = 0
bases[i][1] = base_size
for i, base_size in enumerate(base_sizes):
bases[i + num_bases][0] = base_size
bases[i + num_bases][1] = 0
return bases
def calculate_df(bases, phases) -> complex:
bases_t = bases.T
A = bases_t.dot(bases)
B = inv(A)
C = B.dot(bases_t)
df = C.dot(phases)
x = df[1][0]
y = df[0][0]
return x + y*1j
def calculate_azimuth_deg(df):
return phase(df) * 180 / pi
def simulate_phases(bases, azimuth_deg):
simulated_azimuth_rad = azimuth_deg * pi / 180
simulated_df = rect(1, simulated_azimuth_rad)
simulated_df_mat = array([
[simulated_df.imag],
[simulated_df.real],
])
return bases.dot(simulated_df_mat)
def calculate_phases(apertures):
phases = [phase(ap) for ap in apertures]
phases = phases[0::2] + phases[1::2]
return phases
def calculate_apertures(base_sizes, azimuth_deg, frequency_Hz):
c = 299_792_458
azimuth_rad = azimuth_deg * pi / 180
apertures = []
for base_size in base_sizes:
max_phase_delta = 2 * pi * base_size * frequency_Hz / c
x_base = max_phase_delta * cos(azimuth_rad)
y_base = max_phase_delta * sin(azimuth_rad)
apertures.append(rect(1, x_base))
apertures.append(rect(1, y_base))
return apertures
def get_OLS_linear_regression_error(x, y):
s_x = sum(x)
s_y = sum(y)
s_xx = reduce(lambda sum, val: sum + val*val, x, 0)
s_xy = reduce(lambda sum, x_y: (sum[0] + x_y[0] * x_y[1], 0), zip(x, y), (0,0))[0]
n = len(x)
beta = (n * s_xy - s_x * s_y) / (n * s_xx - s_x*s_x)
alpha = (s_y - beta * s_x) / n
g = [beta * x_val + alpha for x_val in x]
error = reduce(lambda sum, y_g: (sum[0] + pow(y_g[0] - y_g[1], 2), 0), zip(y, g), (0,0))[0]
return error
def disambiguate_phases(phases, base_sizes, frequency_Hz):
phases = [phase * 180 / pi for phase in phases]
c = 299_792_458
max_phase = ceil(360 * base_sizes[0] * frequency_Hz / c)
min_phase = -max_phase
first_base_phase = phases[0]
phase_sets = [
[first_base_phase]
]
phase = first_base_phase + 360
while phase <= max_phase:
phase_sets.append([phase])
phase += 360
phase = first_base_phase - 360
while phase >= min_phase:
phase_sets.append([phase])
phase -= 360
for phase_set in phase_sets:
for phase_index in range(1, len(phases)):
extrapolated_phase = phase_set[phase_index - 1] * 2.5
delta = phases[phase_index] - extrapolated_phase
ambiguity = 360 * round(delta / 360)
phase_set.append(phases[phase_index] - ambiguity)
# num_ambiguities = len(phase_sets)
# print(num_ambiguities)
unambiguous_phases = []
min_error = iinfo(int32).max
for phase_set in phase_sets:
x = [0] + base_sizes
y = [0] + phase_set
error = get_OLS_linear_regression_error(x, y)
if error < min_error:
min_error = error
unambiguous_phases = phase_set
unambiguous_phases = [phase * pi / 180 for phase in unambiguous_phases]
return unambiguous_phases
def normalise(v):
k = norm(v)
if k == 0:
return v
return v / k
def do_df_algo(frequency_Hz, simulated_azimuth_deg, base_sizes):
bases = create_bases_mat(base_sizes)
apertures = calculate_apertures(base_sizes, simulated_azimuth_deg, frequency_Hz)
# phases = simulate_phases(bases, simulated_azimuth_deg)
phases = calculate_phases(apertures)
phases[:3:] = disambiguate_phases(phases[:3:], base_sizes, frequency_Hz)[::-1]
phases[3::] = disambiguate_phases(phases[3::], base_sizes, frequency_Hz)
phases = array(phases).reshape(6,1)
df = calculate_df(bases, phases)
# simulated_phases = simulate_phases(bases, 2)
# simulated_phases = normalise(simulated_phases)
# print(simulated_phases)
# phases = normalise(phases)
# print(phases)
azimuth = round(calculate_azimuth_deg(df))
return azimuth
frequency_Hz = 30_000_000
base_sizes = [13.6, 34, 85]
x = range(-180, 180)
y = x
g = []
for simulated_azimuth_deg in x:
az = do_df_algo(frequency_Hz, simulated_azimuth_deg, base_sizes)
if simulated_azimuth_deg != az:
print(f"expected: {simulated_azimuth_deg}, got: {az}")
g.append(az)
plt.plot(x,y,x,g)
plt.show()
| [
"math.ceil",
"functools.reduce",
"cmath.rect",
"matplotlib.pyplot.plot",
"numpy.iinfo",
"math.cos",
"numpy.array",
"numpy.linalg.inv",
"cmath.phase",
"numpy.linalg.norm",
"math.sin",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((5252, 5272), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', 'x', 'g'], {}), '(x, y, x, g)\n', (5260, 5272), True, 'import matplotlib.pyplot as plt\n'), ((5270, 5280), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5278, 5280), True, 'import matplotlib.pyplot as plt\n'), ((1179, 1185), 'numpy.linalg.inv', 'inv', (['A'], {}), '(A)\n', (1182, 1185), False, 'from numpy.linalg import inv, norm\n'), ((1465, 1495), 'cmath.rect', 'rect', (['(1)', 'simulated_azimuth_rad'], {}), '(1, simulated_azimuth_rad)\n', (1469, 1495), False, 'from cmath import phase, rect\n'), ((1519, 1568), 'numpy.array', 'array', (['[[simulated_df.imag], [simulated_df.real]]'], {}), '([[simulated_df.imag], [simulated_df.real]])\n', (1524, 1568), False, 'from numpy import array, arange, iinfo, int32\n'), ((2309, 2355), 'functools.reduce', 'reduce', (['(lambda sum, val: sum + val * val)', 'x', '(0)'], {}), '(lambda sum, val: sum + val * val, x, 0)\n', (2315, 2355), False, 'from functools import reduce\n'), ((2860, 2904), 'math.ceil', 'ceil', (['(360 * base_sizes[0] * frequency_Hz / c)'], {}), '(360 * base_sizes[0] * frequency_Hz / c)\n', (2864, 2904), False, 'from math import pi, cos, sin, ceil\n'), ((4096, 4103), 'numpy.linalg.norm', 'norm', (['v'], {}), '(v)\n', (4100, 4103), False, 'from numpy.linalg import inv, norm\n'), ((1679, 1688), 'cmath.phase', 'phase', (['ap'], {}), '(ap)\n', (1684, 1688), False, 'from cmath import phase, rect\n'), ((3697, 3709), 'numpy.iinfo', 'iinfo', (['int32'], {}), '(int32)\n', (3702, 3709), False, 'from numpy import array, arange, iinfo, int32\n'), ((771, 792), 'numpy.arange', 'arange', (['(num_bases * 4)'], {}), '(num_bases * 4)\n', (777, 792), False, 'from numpy import array, arange, iinfo, int32\n'), ((1332, 1341), 'cmath.phase', 'phase', (['df'], {}), '(df)\n', (1337, 1341), False, 'from cmath import phase, rect\n'), ((2046, 2062), 'math.cos', 'cos', (['azimuth_rad'], {}), '(azimuth_rad)\n', (2049, 2062), False, 'from math import pi, cos, sin, ceil\n'), ((2098, 2114), 'math.sin', 'sin', (['azimuth_rad'], {}), '(azimuth_rad)\n', (2101, 2114), False, 'from math import pi, cos, sin, ceil\n'), ((2140, 2155), 'cmath.rect', 'rect', (['(1)', 'x_base'], {}), '(1, x_base)\n', (2144, 2155), False, 'from cmath import phase, rect\n'), ((2182, 2197), 'cmath.rect', 'rect', (['(1)', 'y_base'], {}), '(1, y_base)\n', (2186, 2197), False, 'from cmath import phase, rect\n'), ((4623, 4636), 'numpy.array', 'array', (['phases'], {}), '(phases)\n', (4628, 4636), False, 'from numpy import array, arange, iinfo, int32\n')] |
import numpy as np
import matplotlib.pyplot as plt
import utils
RANGES = np.array([-5.0, 5.0])
X_train = np.array([
-3.0,
-1.0,
0.5,
1.5,
4.95,
])[..., np.newaxis]
X_test = np.linspace(RANGES[0], RANGES[1], 201)[..., np.newaxis]
FUN_TARGET = np.sin
Y_train = FUN_TARGET(X_train) + np.random.RandomState(42).randn(*X_train.shape) * 0.2
Y_test = FUN_TARGET(X_test)
Y_train = np.squeeze(Y_train, axis=1)
Y_test = np.squeeze(Y_test, axis=1)
if __name__ == '__main__':
print(X_train)
print(X_test)
print(Y_train)
print(Y_test)
utils.plot_1d(X_train, Y_train, X_test, Y_test)
| [
"utils.plot_1d",
"numpy.squeeze",
"numpy.array",
"numpy.linspace",
"numpy.random.RandomState"
] | [((76, 97), 'numpy.array', 'np.array', (['[-5.0, 5.0]'], {}), '([-5.0, 5.0])\n', (84, 97), True, 'import numpy as np\n'), ((401, 428), 'numpy.squeeze', 'np.squeeze', (['Y_train'], {'axis': '(1)'}), '(Y_train, axis=1)\n', (411, 428), True, 'import numpy as np\n'), ((438, 464), 'numpy.squeeze', 'np.squeeze', (['Y_test'], {'axis': '(1)'}), '(Y_test, axis=1)\n', (448, 464), True, 'import numpy as np\n'), ((109, 147), 'numpy.array', 'np.array', (['[-3.0, -1.0, 0.5, 1.5, 4.95]'], {}), '([-3.0, -1.0, 0.5, 1.5, 4.95])\n', (117, 147), True, 'import numpy as np\n'), ((198, 236), 'numpy.linspace', 'np.linspace', (['RANGES[0]', 'RANGES[1]', '(201)'], {}), '(RANGES[0], RANGES[1], 201)\n', (209, 236), True, 'import numpy as np\n'), ((573, 620), 'utils.plot_1d', 'utils.plot_1d', (['X_train', 'Y_train', 'X_test', 'Y_test'], {}), '(X_train, Y_train, X_test, Y_test)\n', (586, 620), False, 'import utils\n'), ((308, 333), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (329, 333), True, 'import numpy as np\n')] |
'''
Multiprocessing Error
---------------------
This module alerts the user to a possible multiprocessing error. This occurs
when the data from different cores is incorrectly combined, with weights not
corresponding to the data.
'''
import numpy as np
# This function tests if a multiprocessing error has occured. This is when the
# data from the different cores becomes mixed, and the weights and N are not
# correct
def multi_processing_error(data, weights):
"""Alerts the user to possible multiprocessing error if the data is
and log of the weights is sufficiently uncorrelated.
Parameters
----------
data : numpy.ndarray
Input first-passage time data.
weights: numpy.ndarray
Associated weights to the first-passage time data. Must be a one-to-one
correspondance between them.
"""
# Checking if multipprocessing error occured, by looking at correlation
pearson_corr = np.corrcoef(data, np.log10(weights))
pearson_corr = pearson_corr[0, 1]
if abs(pearson_corr) < 0.55: # Data is uncorrelated
print('Possible multiprocessing error occured, see documentation')
return True
else:
return False
| [
"numpy.log10"
] | [((954, 971), 'numpy.log10', 'np.log10', (['weights'], {}), '(weights)\n', (962, 971), True, 'import numpy as np\n')] |
#! /usr/bin/env python3
# Copyright 2018 <NAME>.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# The software is provided "AS IS", without warranty of any kind, express or
# implied, including but not limited to the warranties of merchantability,
# fitness for a particular purpose and noninfringement. In no event shall
# the authors or copyright holders be liable for any claim, damages or
# other liability, whether in an action of contract, tort or otherwise,
# arising from, out of or in connection with the software or the use or
# other dealings in the software.
"""Compares numpy running times against python interpreter.
"""
import os
import timeit
import unittest
import PIL.Image
import PIL.ImageColor
import PIL.ImageDraw2
import numpy as np
def get_marked_image(mark_coord=None, radius=20, im_size=(200, 100)):
if mark_coord is None:
mark_coord = (20, 10)
x0, y0 = mark_coord
box = (x0, y0, x0 + radius, y0 + radius)
pen = PIL.ImageDraw2.Pen('steelblue', width=9)
im = PIL.Image.new('RGBA', im_size)
draw = PIL.ImageDraw2.Draw(im)
draw.ellipse(box, pen)
return im
class BboxFinder1:
@staticmethod
def _find_first_positive(vals, reverse=False):
start = 0
sgn = 1
if reverse:
start = len(vals) - 1
sgn = -1
vals = reversed(vals)
for i, val in enumerate(vals):
if val > 0:
return start + sgn * i
return start + sgn * i
class BboxFinder2:
@staticmethod
def _find_first_positive(vals, reverse=False):
nz = np.nonzero(vals)[0]
if len(nz) == 0:
if reverse:
return len(vals)
else:
return 0
if reverse:
return nz[-1]
else:
return nz[0]
class BboxFinder(BboxFinder2):
@classmethod
def find_bbox(cls, im):
pix = np.array(im.convert('L')) # one-byte greyscale
col_sums = pix.sum(axis=0)
row_sums = pix.sum(axis=1)
assert len(col_sums) == im.width
assert len(row_sums) == im.height
x0 = cls._find_first_positive(col_sums)
x1 = cls._find_first_positive(col_sums, reverse=True)
y0 = cls._find_first_positive(row_sums)
y1 = cls._find_first_positive(row_sums, reverse=True)
return (x0, y0, x1, y1)
class BboxFinderTest(unittest.TestCase):
@staticmethod
def _find1():
im = get_marked_image()
return BboxFinder.find_bbox(im)
def test_bbox_finder(self):
im = get_marked_image()
im.save(os.path.expanduser('~/Desktop/t.png'))
bbox = BboxFinder.find_bbox(im)
self.assertEqual((20, 10, 40, 30), bbox)
# t = timeit.Timer(self._find1).autorange()
elapsed = timeit.timeit(self._find1, number=1000)
print(f'{elapsed:.3f}')
self.assertLess(.15, elapsed)
self.assertLess(elapsed, .99)
| [
"timeit.timeit",
"os.path.expanduser",
"numpy.nonzero"
] | [((3335, 3374), 'timeit.timeit', 'timeit.timeit', (['self._find1'], {'number': '(1000)'}), '(self._find1, number=1000)\n', (3348, 3374), False, 'import timeit\n'), ((2127, 2143), 'numpy.nonzero', 'np.nonzero', (['vals'], {}), '(vals)\n', (2137, 2143), True, 'import numpy as np\n'), ((3136, 3173), 'os.path.expanduser', 'os.path.expanduser', (['"""~/Desktop/t.png"""'], {}), "('~/Desktop/t.png')\n", (3154, 3173), False, 'import os\n')] |
#/usr/bin/python3
try:
from pymoab import core, types
except ImportError as err:
raise err('PyMoab not found, Reactor.export_h5m method not available')
import numpy as np
class dagmcGeom:
def __init__(self, pygmsh):
# create pymoab instance
self.mb = core.Core()
self.tags = dict()
self.__make_tags()
self.pygmsh = pygmsh
self.moab_gmsh_verts = {}
self.moab_gmsh_surfs = {}
self.moab_gmsh_vols = {}
return
# make the all the necessary tags for
# dagmc
def __make_tags(self):
SENSE_TAG_NAME = "GEOM_SENSE_2"
SENSE_TAG_SIZE = 2
self.tags['surf_sense'] = self.mb.tag_get_handle(
SENSE_TAG_NAME,
SENSE_TAG_SIZE,
types.MB_TYPE_HANDLE,
types.MB_TAG_SPARSE,
create_if_missing=True)
self.tags['category'] = self.mb.tag_get_handle(
types.CATEGORY_TAG_NAME,
types.CATEGORY_TAG_SIZE,
types.MB_TYPE_OPAQUE,
types.MB_TAG_SPARSE,
create_if_missing=True)
self.tags['name'] = self.mb.tag_get_handle(
types.NAME_TAG_NAME,
types.NAME_TAG_SIZE,
types.MB_TYPE_OPAQUE,
types.MB_TAG_SPARSE,
create_if_missing=True)
self.tags['geom_dimension'] = self.mb.tag_get_handle(
types.GEOM_DIMENSION_TAG_NAME,
1,
types.MB_TYPE_INTEGER,
types.MB_TAG_DENSE,
create_if_missing=True)
# Global ID is a default tag, just need the name to retrieve
self.tags['global_id'] = self.mb.tag_get_handle(types.GLOBAL_ID_TAG_NAME)
return
# using pygmsh instance transfer the geometry for dagmc
def transfer_geometry(self):
self.make_vertices()
self.make_surfaces()
self.make_volumes()
self.make_topology()
# make the vertices
def make_vertices(self):
vertices = []
for vertex in self.pygmsh.node_x.keys():
x = self.pygmsh.node_x[vertex]
y = self.pygmsh.node_y[vertex]
z = self.pygmsh.node_z[vertex]
vertices.append([x,y,z])
# create all vertices at once
vert_handles = self.mb.create_vertices(vertices)
# assign vertex/handle correspondence
for idx,vertex in enumerate(self.pygmsh.node_x.keys()):
self.moab_gmsh_verts[vertex] = vert_handles[idx]
return
def make_surfaces(self):
for surf in self.pygmsh.surface_mesh.keys():
surface_set = self.mb.create_meshset()
self.mb.tag_set_data(self.tags['global_id'], surface_set, surf[1])
self.mb.tag_set_data(self.tags['category'], surface_set, "Surface")
self.mb.tag_set_data(self.tags['geom_dimension'], surface_set, 2)
# triangle and vertex data
triangles = self.pygmsh.surface_mesh[surf][0][0]
vertices = self.pygmsh.surface_mesh[surf][1][0]
# loop over the triangles
for idx,triangle in enumerate(triangles):
vert1 = self.moab_gmsh_verts[vertices[idx*3]]
vert2 = self.moab_gmsh_verts[vertices[idx*3+1]]
vert3 = self.moab_gmsh_verts[vertices[idx*3+2]]
verts = np.array([vert1,vert2,vert3],dtype='uint64')
tri = self.mb.create_element(types.MBTRI, verts)
self.mb.add_entity(surface_set,tri)
self.moab_gmsh_surfs[surf[1]] = surface_set
# make the surface set data
def make_volumes(self):
for vol in self.pygmsh.volume_surface.keys():
id = vol[1]
volume_set = self.mb.create_meshset()
self.mb.tag_set_data(self.tags['global_id'], volume_set, id)
self.mb.tag_set_data(self.tags['category'], volume_set, "Volume")
self.mb.tag_set_data(self.tags['geom_dimension'], volume_set, 3)
self.moab_gmsh_vols[id] = volume_set
return
# set the topology
def make_topology(self):
# loop over the surfaces
for surf in self.pygmsh.sense_data.keys():
# surface_id = surf[1]
# the line above crashed the code as it would return negative
# numbers that don't exist in the dictionary. abs() has been added
# which needs checking to see if it is valid
surface_id = abs(surf[1])
surf_handle = self.moab_gmsh_surfs[surface_id]
# for each volume
for vol in self.pygmsh.sense_data[surf]:
volume_id = vol[1]
volume_handle = self.moab_gmsh_vols[volume_id]
self.mb.add_parent_child(volume_handle,surf_handle)
# set the surface sense
sense_data = self.pygmsh.sense_data[surf]
if len(sense_data) > 1:
senses = [self.moab_gmsh_vols[sense_data[0][1]],
self.moab_gmsh_vols[sense_data[1][1]]]
else:
senses = [self.moab_gmsh_vols[sense_data[0][1]],
np.uint64(0)]
# set the sense tag
self.mb.tag_set_data(self.tags['surf_sense'], surf_handle, senses)
def assign_metadata(self):
import gmsh
# returns entities with 3 (dimenetions which are always volumes) and their ids
dims_and_volume_ids = gmsh.model.getEntities(3)
for dim_and_vol_id in dims_and_volume_ids:
volume_id = dim_and_vol_id[1]
print('get entities in volume ', volume_id, ' and assign to moab core')
# not sure if this is the way to go about the setting of meta data
# for vol in self.pygmsh.sense_data[surf]:
# volume_id = vol[1]
# volume_handle = self.moab_gmsh_vols[volume_id]
def export_h5m(self,filename):
all_sets = self.mb.get_entities_by_handle(0)
file_set = self.mb.create_meshset()
self.mb.add_entities(file_set, all_sets)
self.mb.write_file(filename)
return filename
"""
surface_id = 1
volume_id = 1
for item in material_dict:
stl_filename = item['stl_filename']
if skip_graveyard and "graveyard" in stl_filename.lower():
continue
surface_set = mb.create_meshset()
volume_set = mb.create_meshset()
# recent versions of MOAB handle this automatically
# but best to go ahead and do it manually
mb.tag_set_data(tags['global_id'], volume_set, volume_id)
volume_id += 1
mb.tag_set_data(tags['global_id'], surface_set, surface_id)
surface_id += 1
# set geom IDs
mb.tag_set_data(tags['geom_dimension'], volume_set, 3)
mb.tag_set_data(tags['geom_dimension'], surface_set, 2)
# set category tag values
mb.tag_set_data(tags['category'], volume_set, "Volume")
mb.tag_set_data(tags['category'], surface_set, "Surface")
# establish parent-child relationship
mb.add_parent_child(volume_set, surface_set)
# set surface sense
sense_data = [volume_set, np.uint64(0)]
mb.tag_set_data(tags['surf_sense'], surface_set, sense_data)
# load the stl triangles/vertices into the surface set
mb.load_file(stl_filename, surface_set)
material_name = item['material']
if skip_graveyard and "graveyard" in stl_filename.lower():
continue
group_set = mb.create_meshset()
mb.tag_set_data(tags['category'], group_set, "Group")
print("mat:{}".format(material_name))
mb.tag_set_data(
tags['name'],
group_set,
"mat:{}".format(material_name))
mb.tag_set_data(tags['geom_dimension'], group_set, 4)
# add the volume to this group set
mb.add_entity(group_set, volume_set)
all_sets = mb.get_entities_by_handle(0)
file_set = mb.create_meshset()
mb.add_entities(file_set, all_sets)
"""
| [
"numpy.uint64",
"gmsh.model.getEntities",
"numpy.array",
"pymoab.core.Core"
] | [((281, 292), 'pymoab.core.Core', 'core.Core', ([], {}), '()\n', (290, 292), False, 'from pymoab import core, types\n'), ((5451, 5476), 'gmsh.model.getEntities', 'gmsh.model.getEntities', (['(3)'], {}), '(3)\n', (5473, 5476), False, 'import gmsh\n'), ((3327, 3374), 'numpy.array', 'np.array', (['[vert1, vert2, vert3]'], {'dtype': '"""uint64"""'}), "([vert1, vert2, vert3], dtype='uint64')\n", (3335, 3374), True, 'import numpy as np\n'), ((5136, 5148), 'numpy.uint64', 'np.uint64', (['(0)'], {}), '(0)\n', (5145, 5148), True, 'import numpy as np\n')] |
import argparse
import os
from tensorflow import keras
import numpy as np
from utils import generator, model, utils
parser = argparse.ArgumentParser()
parser.add_argument('--num_epoch', default=56, type=int, help='训练的轮数')
parser.add_argument('--lr', default=0.001, type=float, help='初始学习率的大小')
parser.add_argument('--batch_size', default=16, type=int, help='训练的批量大小')
parser.add_argument('--num_classes', default=3242, type=int, help='分类的类别数量')
parser.add_argument('--train_list', default='dataset/train_list.txt', type=str, help='训练数据的数据列表路径')
parser.add_argument('--val_list', default='dataset/test_list.txt', type=str, help='测试数据的数据列表路径')
parser.add_argument('--resume', default=None, type=str, help='预训练模型的路径,当为None则不使用预训练模型')
parser.add_argument('--model_path', default='models', type=str, help='模型保存的路径')
args = parser.parse_args()
utils.print_arguments(args)
def main(args):
# Datasets
trnlist, trnlb = utils.get_data_list(path=args.train_list)
vallist, vallb = utils.get_data_list(path=args.val_list)
# Generators
trn_gen = generator.DataGenerator(list_IDs=trnlist.flatten(),
labels=trnlb.flatten(),
n_classes=args.num_classes,
batch_size=args.batch_size)
val_gen = generator.DataGenerator(list_IDs=vallist.flatten(),
labels=vallb.flatten(),
n_classes=args.num_classes,
batch_size=args.batch_size)
image_len = len(trnlist.flatten())
# 获取模型
network = model.vggvox_resnet2d_icassp(num_classes=args.num_classes, mode='train')
# 加载预训练模型
initial_epoch = 0
if args.resume:
network.load_weights(os.path.join(args.resume))
initial_epoch = int(os.path.basename(args.resume)[:-3].split('-')[1])
print('==> successfully loading model {}.'.format(args.resume))
print(network.summary())
print('==> training {} audios, classes: {} '.format(image_len, args.num_classes))
if not os.path.exists(args.model_path):
os.makedirs(args.model_path)
normal_lr = keras.callbacks.LearningRateScheduler(step_decay)
callbacks = [keras.callbacks.ModelCheckpoint(os.path.join(args.model_path, 'resnet34-{epoch:02d}.h5'),
monitor='loss',
mode='min',
save_best_only=True), normal_lr]
network.fit_generator(generator=trn_gen,
steps_per_epoch=int(image_len // args.batch_size),
epochs=args.num_epoch,
initial_epoch=initial_epoch,
max_queue_size=10,
callbacks=callbacks,
use_multiprocessing=True,
validation_data=val_gen,
workers=6,
verbose=1)
# 学习率衰减
def step_decay(epoch):
half_epoch = args.num_epoch // 2
stage1, stage2, stage3 = int(half_epoch * 0.5), int(half_epoch * 0.8), half_epoch
stage4 = stage3 + stage1
stage5 = stage4 + (stage2 - stage1)
stage6 = args.num_epoch
milestone = [stage1, stage2, stage3, stage4, stage5, stage6]
gamma = [1.0, 0.1, 0.01, 1.0, 0.1, 0.01]
lr = 0.005
init_lr = args.lr
stage = len(milestone)
for s in range(stage):
if epoch < milestone[s]:
lr = init_lr * gamma[s]
break
print('Learning rate for epoch {} is {}.'.format(epoch + 1, lr))
return np.float(lr)
if __name__ == "__main__":
main(args)
| [
"os.path.exists",
"numpy.float",
"argparse.ArgumentParser",
"os.makedirs",
"tensorflow.keras.callbacks.LearningRateScheduler",
"os.path.join",
"os.path.basename",
"utils.utils.get_data_list",
"utils.utils.print_arguments",
"utils.model.vggvox_resnet2d_icassp"
] | [((128, 153), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (151, 153), False, 'import argparse\n'), ((931, 958), 'utils.utils.print_arguments', 'utils.print_arguments', (['args'], {}), '(args)\n', (952, 958), False, 'from utils import generator, model, utils\n'), ((1013, 1054), 'utils.utils.get_data_list', 'utils.get_data_list', ([], {'path': 'args.train_list'}), '(path=args.train_list)\n', (1032, 1054), False, 'from utils import generator, model, utils\n'), ((1076, 1115), 'utils.utils.get_data_list', 'utils.get_data_list', ([], {'path': 'args.val_list'}), '(path=args.val_list)\n', (1095, 1115), False, 'from utils import generator, model, utils\n'), ((1718, 1790), 'utils.model.vggvox_resnet2d_icassp', 'model.vggvox_resnet2d_icassp', ([], {'num_classes': 'args.num_classes', 'mode': '"""train"""'}), "(num_classes=args.num_classes, mode='train')\n", (1746, 1790), False, 'from utils import generator, model, utils\n'), ((2268, 2317), 'tensorflow.keras.callbacks.LearningRateScheduler', 'keras.callbacks.LearningRateScheduler', (['step_decay'], {}), '(step_decay)\n', (2305, 2317), False, 'from tensorflow import keras\n'), ((3752, 3764), 'numpy.float', 'np.float', (['lr'], {}), '(lr)\n', (3760, 3764), True, 'import numpy as np\n'), ((2182, 2213), 'os.path.exists', 'os.path.exists', (['args.model_path'], {}), '(args.model_path)\n', (2196, 2213), False, 'import os\n'), ((2223, 2251), 'os.makedirs', 'os.makedirs', (['args.model_path'], {}), '(args.model_path)\n', (2234, 2251), False, 'import os\n'), ((1877, 1902), 'os.path.join', 'os.path.join', (['args.resume'], {}), '(args.resume)\n', (1889, 1902), False, 'import os\n'), ((2367, 2423), 'os.path.join', 'os.path.join', (['args.model_path', '"""resnet34-{epoch:02d}.h5"""'], {}), "(args.model_path, 'resnet34-{epoch:02d}.h5')\n", (2379, 2423), False, 'import os\n'), ((1932, 1961), 'os.path.basename', 'os.path.basename', (['args.resume'], {}), '(args.resume)\n', (1948, 1961), False, 'import os\n')] |
import numpy as np
import pandas as pd
import os
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import confusion_matrix, accuracy_score
from sklearn.model_selection import train_test_split
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import *
from keras.utils.np_utils import to_categorical
from keras.initializers import Constant
import re
import spacy
class Model(object):
def __init__(self,
max_features=20000):
self.max_features = max_features
class Dataset(Model):
def __init__(self,
data=['data/train.tsv', 'data/dev.tsv'],
sep='\t',
nlp=None,
sentence_header='sentence',
sentiment_header='label',
len_header='len',
**kwargs):
super(Dataset, self).__init__(**kwargs)
if isinstance(data, str):
self.data = pd.read_csv(data, sep=sep)
elif isinstance(data, list):
self.data = pd.read_csv(data[0], sep=sep)
for item in data[1:]:
tmp = pd.read_csv(item, sep=sep)
self.data = self.data.append(tmp)
self.nlp = nlp
self.sentence_header = sentence_header
self.sentiment_header = sentiment_header
self.len_header = len_header
self.tokenizer = None
self.word_index = None
def spacy_tokenize(self):
self.data[self.sentence_header] = self.data[self.sentence_header].apply(self.parse)
self.data[self.len_header] = self.data[self.sentence_header].apply(lambda x: len(str(x).split(' ')))
self.sequence_length = self.data[self.len_header].max() + 1
def keras_train_test_split(self, split=" ", oov_token="<unw>", filters=" "):
self.tokenizer = Tokenizer(num_words=self.max_features, split=split, oov_token=oov_token, filters=filters)
self.tokenizer.fit_on_texts(self.data[self.sentence_header].values)
X = self.tokenizer.texts_to_sequences(self.data[self.sentence_header].values)
X = pad_sequences(X, self.sequence_length)
y = pd.get_dummies(self.data[self.sentiment_header]).values
self.word_index = self.tokenizer.word_index
return train_test_split(X, y, test_size=0.3)
def parse(self,x):
return ' '.join([y.text for y in self.nlp(x, disable=['parser', 'tagger', 'ner']) if y.is_alpha])
class WordEmbedding(Dataset):
def __init__(self,
path='data/glove.6B.300d.txt',
encoding='utf-8',
dtype='float32',
dim=300,
**kwargs):
super(WordEmbedding, self).__init__(**kwargs)
self.path = path
self.encoding = encoding
self.dtype = dtype
self.embeddings_index = {}
self.embedding_matrix = None
self.embedding_dim = dim
self.num_words = None
self.word_index = None
def read_embedding(self):
f = open(self.path, encoding=self.encoding)
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
self.embeddings_index[word] = coefs
f.close()
self.word_index = self.tokenizer.word_index
def build_embedding_matrix(self):
self.num_words = min(self.max_features, len(self.word_index)) + 1
self.embedding_matrix = np.zeros((self.num_words, self.embedding_dim))
for word, i in self.word_index.items():
if i > self.max_features:
continue
embedding_vector = self.embeddings_index.get(word)
if embedding_vector is not None:
# we found the word - add that words vector to the matrix
self.embedding_matrix[i] = embedding_vector
else:
# doesn't exist, assign a random vector
self.embedding_matrix[i] = np.random.randn(self.embedding_dim)
class SentimentModel(WordEmbedding):
def __init__(self, **kwargs):
super(SentimentModel, self).__init__(**kwargs)
self.model = None
def compile_sentiment_model(self, units=2, trainable=False):
self.model = Sequential()
self.model.add(Embedding(self.num_words,
self.embedding_dim,
embeddings_initializer=Constant(self.embedding_matrix),
input_length=self.sequence_length,
trainable=trainable))
self.model.add(SpatialDropout1D(0.2))
self.model.add(Bidirectional(CuDNNLSTM(64, return_sequences=True)))
self.model.add(Bidirectional(CuDNNLSTM(32)))
self.model.add(Dropout(0.25))
self.model.add(Dense(units=units, activation='softmax'))
self.model.compile(loss = 'categorical_crossentropy', optimizer='adam',metrics = ['accuracy'])
def fit_sentiment_model(self, X_train, y_train, epochs=5, batch_size=128, verbose=0, validation_split=0.3):
history = self.model.fit(X_train, y_train, epochs=epochs, batch_size=batch_size, verbose=verbose, validation_split=validation_split)
return history
def evaluate_sentiment_model(self, X_test, y_test):
y_hat = self.model.predict(X_test)
acc = accuracy_score(list(map(lambda x: np.argmax(x), y_test)), list(map(lambda x: np.argmax(x), y_hat)))
conf = confusion_matrix(list(map(lambda x: np.argmax(x), y_test)), list(map(lambda x: np.argmax(x), y_hat)))
tp = conf[0][0]
fn = conf[0][1]
fp = conf[1][0]
tn = conf[1][1]
matthews_correlation_coefficient = \
((tp * tn) - (fp * fn)) / ( (tp + fp) * (tp + fn) * (tn + fp) * (tn + fn) ) ** 0.5
return {
"Matthews correlation coefficient" : matthews_correlation_coefficient,
"Accuracy score": acc
}
class SentimentPipeline(SentimentModel):
def __init__(self, **kwargs):
super(SentimentPipeline, self).__init__(**kwargs)
def execute_sentiment_pipeline(self):
self.spacy_tokenize()
X_train, X_test, y_train, y_test = self.keras_train_test_split()
self.read_embedding()
self.build_embedding_matrix()
self.compile_sentiment_model()
history = self.fit_sentiment_model(X_train, y_train)
result = self.evaluate_sentiment_model(X_test, y_test)
return history, result
| [
"keras.preprocessing.text.Tokenizer",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"numpy.asarray",
"numpy.argmax",
"keras.models.Sequential",
"numpy.zeros",
"keras.initializers.Constant",
"pandas.get_dummies",
"keras.preprocessing.sequence.pad_sequences",
"numpy.random.randn"
... | [((1907, 2000), 'keras.preprocessing.text.Tokenizer', 'Tokenizer', ([], {'num_words': 'self.max_features', 'split': 'split', 'oov_token': 'oov_token', 'filters': 'filters'}), '(num_words=self.max_features, split=split, oov_token=oov_token,\n filters=filters)\n', (1916, 2000), False, 'from keras.preprocessing.text import Tokenizer\n'), ((2171, 2209), 'keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['X', 'self.sequence_length'], {}), '(X, self.sequence_length)\n', (2184, 2209), False, 'from keras.preprocessing.sequence import pad_sequences\n'), ((2345, 2382), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.3)'}), '(X, y, test_size=0.3)\n', (2361, 2382), False, 'from sklearn.model_selection import train_test_split\n'), ((3530, 3576), 'numpy.zeros', 'np.zeros', (['(self.num_words, self.embedding_dim)'], {}), '((self.num_words, self.embedding_dim))\n', (3538, 3576), True, 'import numpy as np\n'), ((4324, 4336), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (4334, 4336), False, 'from keras.models import Sequential\n'), ((1032, 1058), 'pandas.read_csv', 'pd.read_csv', (['data'], {'sep': 'sep'}), '(data, sep=sep)\n', (1043, 1058), True, 'import pandas as pd\n'), ((2222, 2270), 'pandas.get_dummies', 'pd.get_dummies', (['self.data[self.sentiment_header]'], {}), '(self.data[self.sentiment_header])\n', (2236, 2270), True, 'import pandas as pd\n'), ((3227, 3266), 'numpy.asarray', 'np.asarray', (['values[1:]'], {'dtype': '"""float32"""'}), "(values[1:], dtype='float32')\n", (3237, 3266), True, 'import numpy as np\n'), ((1120, 1149), 'pandas.read_csv', 'pd.read_csv', (['data[0]'], {'sep': 'sep'}), '(data[0], sep=sep)\n', (1131, 1149), True, 'import pandas as pd\n'), ((4047, 4082), 'numpy.random.randn', 'np.random.randn', (['self.embedding_dim'], {}), '(self.embedding_dim)\n', (4062, 4082), True, 'import numpy as np\n'), ((1206, 1232), 'pandas.read_csv', 'pd.read_csv', (['item'], {'sep': 'sep'}), '(item, sep=sep)\n', (1217, 1232), True, 'import pandas as pd\n'), ((4485, 4516), 'keras.initializers.Constant', 'Constant', (['self.embedding_matrix'], {}), '(self.embedding_matrix)\n', (4493, 4516), False, 'from keras.initializers import Constant\n'), ((5437, 5449), 'numpy.argmax', 'np.argmax', (['x'], {}), '(x)\n', (5446, 5449), True, 'import numpy as np\n'), ((5480, 5492), 'numpy.argmax', 'np.argmax', (['x'], {}), '(x)\n', (5489, 5492), True, 'import numpy as np\n'), ((5554, 5566), 'numpy.argmax', 'np.argmax', (['x'], {}), '(x)\n', (5563, 5566), True, 'import numpy as np\n'), ((5597, 5609), 'numpy.argmax', 'np.argmax', (['x'], {}), '(x)\n', (5606, 5609), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# https://www.microchip.com/wwwproducts/en/ATSAMD21E18
import attr
import time
from serial import Serial
import struct
from math import log10, sin, cos, acos, atan2, asin, pi, sqrt
from collections import deque
from collections import namedtuple
import numpy as np
import cv2
from slurm.rate import Rate
import pickle
from opencv_camera import ThreadedCamera
from opencv_camera.color_space import ColorSpace
ImageIMU = namedtuple("ImageIMU","image accel gyro temperature timestamp")
deg2rad = pi / 180.0
RAD2DEG = 180/pi
DEG2RAD = pi/180
FT2M = 0.3048 # feet to meters
MI2M = 1609.34 # miles to meters
PACKET_LEN = 7
class AverageFilter(deque):
def __init__(self, maxlen=5):
super().__init__(maxlen=maxlen)
for i in range(maxlen):
# self.__setitem__(i, 0.0)
self.append(np.zeros(3))
def avg(self):
avg = 0
num = self.__len__()
# print(num)
for i in range(num):
# print(self.__getitem__(i), end=" ")
avg += self.__getitem__(i)
return avg/num
def normalize3(x, y, z):
"""Return a unit vector"""
norm = sqrt(x * x + y * y + z * z)
# already a unit vector
if norm == 1.0:
return (x, y, z)
inorm = 1.0/norm
if inorm > 1e-6:
x *= inorm
y *= inorm
z *= inorm
else:
raise ZeroDivisionError(f'norm({x:.4f}, {y:.4f}, {z:.4f},) = {inorm:.6f}')
return (x, y, z,)
# @attr.s(slots=True)
class IMUDriver:
__slots__ = ["s"]
# s = attr.ib(default=None)
# port = attr.ib()
# speed
def __init__(self, port):
speed = 115200
self.s = Serial(port, speed, timeout=0.01)
def close(self):
self.s.close()
def read(self):
data_size = PACKET_LEN*4
self.s.reset_input_buffer()
self.s.write(b"g")
bad = True
for _ in range(data_size):
m = self.s.read(1)
if m == b"":
# print(".", end="", flush=True)
continue
if m != b"\xff":
# print("x", end="", flush=True)
continue
else:
bad = False
break
if bad:
return None
d = self.s.read(data_size)
num = len(d)
while num != data_size:
d += self.s.read(num-len(d))
# msg = struct.unpack("fffffffffff", d)
# a = msg[:3] # g's
# g = msg[3:6] # rads/sec
# m = msg[6:9] # normalized to uTesla
# p = msg[9] # pressure hPa
# p = self.height(p)
# t = msg[10] # C
# t = t*9/5+32 # F
msg = struct.unpack("fffffff", d)
a = msg[:3] # g's
g = msg[3:6] # rads/sec
t = msg[6] # C
return a,g,t
def compensate(self, accel, mag=None):
"""
"""
try:
ax, ay, az = normalize3(*accel)
pitch = asin(-ax)
if abs(pitch) >= pi/2:
roll = 0.0
else:
roll = asin(ay/cos(pitch))
if mag:
# mx, my, mz = mag
mx, my, mz = normalize3(*mag)
x = mx*cos(pitch)+mz*sin(pitch)
y = mx*sin(roll)*sin(pitch)+my*cos(roll)-mz*sin(roll)*cos(pitch)
heading = atan2(y, x)
# wrap heading between 0 and 360 degrees
if heading > 2*pi:
heading -= 2*pi
elif heading < 0:
heading += 2*pi
else:
heading = None
# if self.angle_units == Angle.degrees:
# roll *= RAD2DEG
# pitch *= RAD2DEG
# heading *= RAD2DEG
# elif self.angle_units == Angle.quaternion:
# return Quaternion.from_euler(roll, pitch, heading)
return (roll, pitch, heading,)
except ZeroDivisionError as e:
print('Error', e)
# if self.angle_units == Angle.quaternion:
# return Quaternion(1, 0, 0, 0)
# else:
return (0.0, 0.0, 0.0,)
def height(self, p):
"""
given pressure in hPa, returns altitude in meters.
"""
h = (1 - pow(p / 1013.25, 0.190263)) * 44330.8
return h
def savePickle(data, filename):
with open(filename, 'wb') as fd:
d = pickle.dumps(data)
fd.write(d)
af = AverageFilter(5)
gf = AverageFilter(5)
#
# for i in range(20):
# v = np.array([i,i,i])
# a.append(0.1*v)
# print(a.avg())
#
# exit(0)
loop_rate = Rate(100)
images = []
last = time.monotonic()
loop = 1
rate = 0.0
# port = "/dev/tty.usbmodem14401"
port = "/dev/tty.usbmodem14501"
s = IMUDriver(port)
# aa = AverageFilter(10)
path = 0
camera = ThreadedCamera()
res = None
# res = (480,640)
# res = (720,2560)
camera.open(path, resolution=res, fmt=ColorSpace.gray)
aa = af.avg()
gg = gf.avg()
t = 0
try:
start = time.monotonic()
while True:
if loop % 5:
aa = af.avg()
gg = gf.avg()
ok, img = camera.read()
if ok:
h,w = img.shape
si = cv2.resize(img, (w//2,h//2))
cv2.imshow('capture', si)
ch = cv2.waitKey(20)
if ch == ord('q'):
break
elif ch == ord('s'):
imgimu = ImageIMU(img,aa,gg,t,(time.monotonic() - start))
images.append(imgimu)
else:
img = np.array((1,1))
# roll, pitch, _ = s.compensate(aa)
# roll, pitch, _ = 0,0,0
# roll *= RAD2DEG
# pitch *= RAD2DEG
# yaw *= RAD2DEG
print(f"R: {rate:3.0f} I: {img.shape} A: {aa[0]:5.2f} {aa[1]:5.2f} {aa[2]:5.2f} G: {gg[0]:5.2f} {gg[1]:5.2f} {gg[2]:5.2f} T: {t:2.1f}", end="\r")
# if ok and 100%20 == 0:
# print(ok, img.shape)
ret = s.read()
# ret = None
if ret:
a,g,t = ret
# a = (a[0]-0.11, a[1]-0.82, a[2])
af.append(np.array(a))
gf.append(np.array(g))
#
# roll, pitch, _ = s.compensate(a)
# roll *= RAD2DEG
# pitch *= RAD2DEG
# yaw *= RAD2DEG
# if loop % 20 == 0:
# # print(f"R: {rate:6.1f} A: {a[0]:5.3f} {a[1]:5.3f} {a[2]:5.3f} G: {g[0]:5.2f} {g[1]:5.2f} {g[2]:5.2f} M: {m[0]:5.1f} {m[1]:5.1f} {m[2]:5.1f} H: {p:5.1f} T: {t:3.1f}", end="\r")
# print(f"R: {rate:6.1f} A: {a[0]:5.3f} {a[1]:5.3f} {a[2]:5.3f} G: {g[0]:5.2f} {g[1]:5.2f} {g[2]:5.2f} T: {t:3.1f}", end="\r")
# # print(f"roll: {roll:6.1f} pitch: {pitch:6.1f} yaw: {yaw:6.1f}", end="\r")
if loop % 100 == 0:
now = time.monotonic()
rate = 100/(now - last)
last = now
# print(f">> Rate: {rate:0.3f} Hz")
loop += 1
loop_rate.sleep()
except KeyboardInterrupt:
# s.close()
print("ctrl-C")
finally:
camera.close()
cv2.destroyAllWindows()
if len(images) > 0:
savePickle(images, "images.pickle")
print("\n\nbye ...\n")
| [
"collections.namedtuple",
"pickle.dumps",
"time.monotonic",
"math.asin",
"math.sqrt",
"cv2.imshow",
"math.cos",
"numpy.array",
"numpy.zeros",
"struct.unpack",
"cv2.destroyAllWindows",
"serial.Serial",
"math.atan2",
"math.sin",
"cv2.resize",
"slurm.rate.Rate",
"cv2.waitKey",
"opencv... | [((442, 506), 'collections.namedtuple', 'namedtuple', (['"""ImageIMU"""', '"""image accel gyro temperature timestamp"""'], {}), "('ImageIMU', 'image accel gyro temperature timestamp')\n", (452, 506), False, 'from collections import namedtuple\n'), ((4647, 4656), 'slurm.rate.Rate', 'Rate', (['(100)'], {}), '(100)\n', (4651, 4656), False, 'from slurm.rate import Rate\n'), ((4676, 4692), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (4690, 4692), False, 'import time\n'), ((4844, 4860), 'opencv_camera.ThreadedCamera', 'ThreadedCamera', ([], {}), '()\n', (4858, 4860), False, 'from opencv_camera import ThreadedCamera\n'), ((1150, 1177), 'math.sqrt', 'sqrt', (['(x * x + y * y + z * z)'], {}), '(x * x + y * y + z * z)\n', (1154, 1177), False, 'from math import log10, sin, cos, acos, atan2, asin, pi, sqrt\n'), ((5017, 5033), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (5031, 5033), False, 'import time\n'), ((7165, 7188), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (7186, 7188), False, 'import cv2\n'), ((1666, 1699), 'serial.Serial', 'Serial', (['port', 'speed'], {'timeout': '(0.01)'}), '(port, speed, timeout=0.01)\n', (1672, 1699), False, 'from serial import Serial\n'), ((2685, 2712), 'struct.unpack', 'struct.unpack', (['"""fffffff"""', 'd'], {}), "('fffffff', d)\n", (2698, 2712), False, 'import struct\n'), ((4438, 4456), 'pickle.dumps', 'pickle.dumps', (['data'], {}), '(data)\n', (4450, 4456), False, 'import pickle\n'), ((2968, 2977), 'math.asin', 'asin', (['(-ax)'], {}), '(-ax)\n', (2972, 2977), False, 'from math import log10, sin, cos, acos, atan2, asin, pi, sqrt\n'), ((6900, 6916), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (6914, 6916), False, 'import time\n'), ((842, 853), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (850, 853), True, 'import numpy as np\n'), ((3359, 3370), 'math.atan2', 'atan2', (['y', 'x'], {}), '(y, x)\n', (3364, 3370), False, 'from math import log10, sin, cos, acos, atan2, asin, pi, sqrt\n'), ((5232, 5265), 'cv2.resize', 'cv2.resize', (['img', '(w // 2, h // 2)'], {}), '(img, (w // 2, h // 2))\n', (5242, 5265), False, 'import cv2\n'), ((5277, 5302), 'cv2.imshow', 'cv2.imshow', (['"""capture"""', 'si'], {}), "('capture', si)\n", (5287, 5302), False, 'import cv2\n'), ((5324, 5339), 'cv2.waitKey', 'cv2.waitKey', (['(20)'], {}), '(20)\n', (5335, 5339), False, 'import cv2\n'), ((5599, 5615), 'numpy.array', 'np.array', (['(1, 1)'], {}), '((1, 1))\n', (5607, 5615), True, 'import numpy as np\n'), ((6183, 6194), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (6191, 6194), True, 'import numpy as np\n'), ((6218, 6229), 'numpy.array', 'np.array', (['g'], {}), '(g)\n', (6226, 6229), True, 'import numpy as np\n'), ((3090, 3100), 'math.cos', 'cos', (['pitch'], {}), '(pitch)\n', (3093, 3100), False, 'from math import log10, sin, cos, acos, atan2, asin, pi, sqrt\n'), ((3227, 3237), 'math.cos', 'cos', (['pitch'], {}), '(pitch)\n', (3230, 3237), False, 'from math import log10, sin, cos, acos, atan2, asin, pi, sqrt\n'), ((3241, 3251), 'math.sin', 'sin', (['pitch'], {}), '(pitch)\n', (3244, 3251), False, 'from math import log10, sin, cos, acos, atan2, asin, pi, sqrt\n'), ((3322, 3332), 'math.cos', 'cos', (['pitch'], {}), '(pitch)\n', (3325, 3332), False, 'from math import log10, sin, cos, acos, atan2, asin, pi, sqrt\n'), ((3285, 3295), 'math.sin', 'sin', (['pitch'], {}), '(pitch)\n', (3288, 3295), False, 'from math import log10, sin, cos, acos, atan2, asin, pi, sqrt\n'), ((3299, 3308), 'math.cos', 'cos', (['roll'], {}), '(roll)\n', (3302, 3308), False, 'from math import log10, sin, cos, acos, atan2, asin, pi, sqrt\n'), ((3312, 3321), 'math.sin', 'sin', (['roll'], {}), '(roll)\n', (3315, 3321), False, 'from math import log10, sin, cos, acos, atan2, asin, pi, sqrt\n'), ((3275, 3284), 'math.sin', 'sin', (['roll'], {}), '(roll)\n', (3278, 3284), False, 'from math import log10, sin, cos, acos, atan2, asin, pi, sqrt\n'), ((5489, 5505), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (5503, 5505), False, 'import time\n')] |
import argparse
from datetime import datetime
import time
import os
from tqdm import trange, tqdm
from timeit import default_timer as timer
import numpy as np
import matplotlib.pyplot as plt
from collections import deque
from perlin import TileableNoise
from math import sin, pi
from random import random, seed, uniform, randrange
from scene_storage import *
try:
from manta import *
import gc
except ImportError:
pass
import sys
sys.path.append(sys.path[0]+"/../")
from keras_models_combined_cleansplit import *
from keras_data import read_args_file
from scipy import ndimage
from scipy.signal import argrelextrema
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument("--load_path", type=str, required=True)
parser.add_argument('--warmup_steps', type=int, default=10)
parser.add_argument('--randomized_warmup_steps', action='store_true')
parser.add_argument('--min_warmup_steps', type=int, default=10)
parser.add_argument('--seed', type=int, default=10)
parser.add_argument('--num_frames', type=int, default=100)
parser.add_argument('--num_scenes', type=int, default=1)
parser.add_argument('--output_images', action='store_true')
parser.add_argument('--dont_delete_images', action='store_true')
parser.add_argument('--output_uni', action='store_true')
parser.add_argument('--additional_inflow', action='store_true')
parser.add_argument('--random_sink', action='store_true')
parser.add_argument('--random_obstacle', action='store_true')
parser.add_argument('--second_order_density_advection', action='store_true')
parser.add_argument('--show_gui', action='store_true')
parser.add_argument('--classic_ae', action='store_true')
parser.add_argument('--profile', action='store_true')
parser.add_argument('--upres', action='store_true')
parser.add_argument('--load_warmup_from_disk', action='store_true')
parser.add_argument('--override_vel', action='store_true')
parser.add_argument('--min_vel', type=float, default=0.0)
parser.add_argument('--max_vel', type=float, default=0.0)
parser.add_argument('--randomize_vel', action='store_true')
add_storage_args(parser)
args = parser.parse_args()
warmup_steps = args.warmup_steps
randomized_warmup_steps = args.randomized_warmup_steps
min_warmup_steps = args.min_warmup_steps
nseed = args.seed
num_frames = args.num_frames
num_scenes = args.num_scenes
output_images = args.output_images
dont_delete_images = args.dont_delete_images
output_uni = args.output_uni
prediction_type = args.prediction_type
screenshot_path_format = args.screenshot_path_format
field_path_format = args.field_path_format
show_gui = args.show_gui
classic_ae = args.classic_ae
profile = args.profile
upres = args.upres
second_order_density_advection = args.second_order_density_advection
load_warmup_from_disk = args.load_warmup_from_disk
override_vel = args.override_vel
min_vel_override = args.min_vel
max_vel_override = args.max_vel
randomize_vel = args.randomize_vel
model_name = args.load_path.rstrip(os.path.sep+"/\\")
model_name = model_name.split(os.path.sep)[-2:]
if model_name[1] == "checkpoint":
model_name = model_name[0]
else:
model_name = model_name[1]
print(model_name)
log_dir = create_folder_hierarchy("pred_smoke_karman", model_name, args.prediction_type, nseed)
dump_metadata(log_dir, args)
perf_data_path = log_dir
log_dir += "%06d/"
# Load input_args.json
with open(find_input_args_file(args.load_path)) as f:
config_json = json.load(f)
config = DictToNamespace(config_json)
# read config entries
input_frame_count = config.input_frame_count
prediction_window = config.w_num
decode_predictions = config.decode_predictions
skip_pred_steps = config.skip_pred_steps
init_state_network = config.init_state_network
in_out_states = config.in_out_states
pred_gradient_loss = config.pred_gradient_loss
ls_prediction_loss = config.ls_prediction_loss
ls_supervision = config.ls_supervision
sqrd_diff_loss = config.sqrd_diff_loss
ls_split = config.ls_split
model_base_dir = find_model_base_dir(args.load_path)
data_args_path = None
if os.path.exists( os.path.join( model_base_dir, "data_args.txt")):
data_args_path = os.path.join(model_base_dir, "data_args.txt")
dataset_meta_info = read_args_file(data_args_path)
else:
data_args_path = os.path.join(config.data_path, "args.txt")
dataset_meta_info = read_args_file(data_args_path)
sup_param_count = max(1,int(dataset_meta_info['num_param']) - 2) # two parameters are always present -> scene num and frame num
res_x = int(dataset_meta_info["resolution_x"])
res_y = int(dataset_meta_info["resolution_y"])
res_z = int(dataset_meta_info["resolution_z"])
in_out_dim = 3 if "density" in config.data_type else 2
in_out_dim = in_out_dim + 1 if config.is_3d else in_out_dim
input_shape = (input_frame_count,)
input_shape += (res_z,) if config.is_3d else ()
input_shape += (res_y, res_x, in_out_dim)
if classic_ae:
rec_pred = RecursivePredictionCleanSplit(config=config, input_shape=input_shape, decode_predictions=decode_predictions, skip_pred_steps=skip_pred_steps, init_state_network=init_state_network, in_out_states=in_out_states, pred_gradient_loss=pred_gradient_loss, ls_prediction_loss=ls_prediction_loss, ls_supervision=ls_supervision, sqrd_diff_loss=sqrd_diff_loss, ls_split=ls_split, supervised_parameters=sup_param_count)
else:
rec_pred = RecursivePrediction(config=config, input_shape=input_shape, decode_predictions=decode_predictions, skip_pred_steps=skip_pred_steps, init_state_network=init_state_network, in_out_states=in_out_states, pred_gradient_loss=pred_gradient_loss, ls_prediction_loss=ls_prediction_loss, ls_supervision=ls_supervision, sqrd_diff_loss=sqrd_diff_loss, ls_split=ls_split, supervised_parameters=sup_param_count)
rec_pred.load_model(args.load_path, data_args_path=data_args_path) # load_path argument
pred = Prediction(config=rec_pred.config, input_shape=(rec_pred.w_num, rec_pred.z_num))
pred._build_model()
pred.model.set_weights(rec_pred.pred.model.get_weights())
# Load dataset args
args = DictToNamespace(dataset_meta_info)
if os.path.exists( os.path.join( model_base_dir, "v_range.txt")):
vr = np.loadtxt(os.path.join(model_base_dir, "v_range.txt"))
else:
vr = np.loadtxt(os.path.join(config.data_path, "v_range.txt"))
normalization_factor_v = max(abs(vr[0]), abs(vr[1]))
print("Normalization Factor Velocity: {}".format(normalization_factor_v))
if os.path.exists( os.path.join( model_base_dir, "d_range.txt")):
dr = np.loadtxt(os.path.join(model_base_dir, "d_range.txt"))
else:
dr = np.loadtxt(os.path.join(config.data_path, "d_range.txt"))
normalization_factor_d = max(abs(dr[0]), abs(dr[1]))
print("Normalization Factor Density: {}".format(normalization_factor_d))
np.random.seed(seed=int(nseed))
seed(nseed)
assert sup_param_count == 1, "Supervised param count {} does not match {}!".format(sup_param_count, 1)
boundary_cond_order = int(args.boundary_cond_order)
density_adv_order = 2 if second_order_density_advection else int(args.density_adv_order)
training_warmup_steps = int(args.warmup_steps)
if training_warmup_steps > warmup_steps:
print("WARNING: training warmup steps {} were higher than given warmup_steps parameter... warmup_steps={}".format(training_warmup_steps, warmup_steps))
def main():
prediction_history = PredictionHistory(in_ts=rec_pred.w_num, data_shape=(rec_pred.z_num,))
# solver params
res_x = int(args.resolution_x)
res_y = int(args.resolution_y)
res_z = int(args.resolution_z)
gs = vec3(res_x, res_y, res_z)
res_max = max(res_x, max(res_y, res_z))
s = Solver(name='main', gridSize=gs, dim=3 if res_z > 1 else 2)
s.frameLength = float(args.time_step)
s.timestep = float(args.time_step)
# cg solver params
cgAcc = 1e-04
cgIter = 5
# frequency analysis
freq_x_coord = 0.8
freq_y_coord = 0.7
if upres:
gs_upres = vec3(res_x * 2, res_y * 2, res_z * 2 if res_z > 1 else res_z)
s_upres = Solver(name='upres', gridSize=gs_upres, dim=3 if res_z > 1 else 2)
density_upres = s_upres.create(RealGrid, name="density_upres")
vel_upres = s_upres.create(MACGrid, name="vel_upres")
flags_upres = s_upres.create(FlagGrid, name="flags_upres")
phiWalls_upres = s_upres.create(LevelsetGrid, name="phiWalls_upres")
fractions_upres = s_upres.create(MACGrid, name="fractions_upres")
phiObs_upres = s_upres.create(LevelsetGrid, name="phiObs_upres")
if output_uni:
if upres:
gs_blender = vec3(res_x*2, res_z * 2 if res_z > 1 else res_z, res_y*2)
else:
gs_blender = vec3(res_x, res_z, res_y)
s_blender = Solver(name='blender', gridSize=gs_blender, dim=3 if res_z > 1 else 2)
density_blender = s_blender.create(RealGrid, name="density_blender")
if not (gs_blender.x == gs_blender.y == gs_blender.z):
max_dim = max(max(gs_blender.x, gs_blender.y), gs_blender.z)
gs_blender_cubic = vec3(max_dim, max_dim, max_dim)
s_blender_cubic = Solver(name='blender', gridSize=gs_blender_cubic, dim=3 if res_z > 1 else 2)
density_blender_cubic = s_blender_cubic.create(RealGrid, name="density_blender_cubic")
else:
density_blender_cubic = None
# viscosity
worldScale = 1.0 # the normalized unit cube in manta has which world space size?
# viscosity, in [m^2/s] , rescale to unit cube
# uncomment one of these to select LDC with specific Reynolds nr
# (higher ones will need larger resolution!)
#visc = 0.0002 / (worldScale*worldScale) # Re 5k
#visc = 0.0001 / (worldScale*worldScale) # Re 10k
#visc = 0.00005 / (worldScale*worldScale) # Re 20k
#visc = 0.00001 / (worldScale*worldScale) # Re 100k
visc = 0.0000183 / (worldScale*worldScale) # Re 100k
#visc = 0. # off, rely on numerical viscosity, no proper LDC!
flags = s.create(FlagGrid, name="flags")
vel = s.create(MACGrid, name="vel")
density = s.create(RealGrid, name="density")
pressure = s.create(RealGrid, name="pressure")
fractions = s.create(MACGrid, name="fractions")
phiWalls = s.create(LevelsetGrid, name="phiWalls")
phiObs = s.create(LevelsetGrid, name="phiObs")
v_ = np.zeros([res_z,res_y,res_x,3], dtype=np.float32)
d_ = np.zeros([res_z,res_y,res_x,1], dtype=np.float32)
gui = None
if GUI and show_gui:
gui = Gui()
gui.show(True)
gui.pause()
print('start generation')
sim_id = 0
# pre-generate noise, so that all generated scenes for prediction and simulation look the same
nx_list = []
warmup_list = []
for i in range(num_scenes):
# Warmup steps
if randomized_warmup_steps:
warmup_list.append(randrange(min_warmup_steps, warmup_steps))
else:
warmup_list.append(warmup_steps)
# noise
nx_list_entry = []
if override_vel:
print("Training min/max vel: {}, {} <-> Override min/max vel: {}, {}".format(float(args.min_vel), float(args.max_vel), min_vel_override, max_vel_override))
min_vel = min_vel_override
max_vel = max_vel_override
else:
min_vel = float(args.min_vel)
max_vel = float(args.max_vel)
if randomize_vel:
rand_vel = uniform(min_vel, max_vel)
else:
cur_a = i / (num_scenes-1)
rand_vel = min_vel * (1-cur_a) + max_vel * cur_a
t_end = num_frames + warmup_list[i] if randomized_warmup_steps else num_frames
for t in range(t_end):
nx_list_entry.append(rand_vel)
nx_list.append(nx_list_entry)
# Store warmup steps
warmup_file = os.path.join(perf_data_path, 'warmup_steps.txt')
print(warmup_file)
with open(warmup_file, 'w') as f:
print("Warmup List")
print(warmup_list)
for warmup_entry in range(len(warmup_list) - 1):
f.write('%d\n' % warmup_list[warmup_entry])
f.write('%d' % warmup_list[-1])
# load vars from simulation execution
if load_warmup_from_disk:
simulation_path = get_path_to_sim("pred_smoke_karman", model_name, "simulation", nseed)
assert os.path.exists(simulation_path), "Simulation path does not exist for given seed! Abort..."
shelve_vars = shelve_file_to_var(simulation_path)
for key in shelve_vars:
locals()[key] = shelve_vars[key]
# Store variables to disk before simulation starts
shelve_vars_to_file(locals(), dir(), perf_data_path)
# Sim loop
per_scene_duration = []
per_scene_advection_duration = []
per_scene_solve_duration = []
print("Starting sim")
for i in trange(num_scenes, desc='scenes'):
freq_measure = []
flags.clear()
vel.clear()
density.clear()
pressure.clear()
fractions.clear()
phiWalls.clear()
phiObs.clear()
if upres:
flags_upres.clear()
density_upres.clear()
phiObs_upres.clear()
def init_flag(flag_grid, phiWalls_grid, phiObs_grid, fractions_grid, solver, solver_res):
obs_radius = solver_res.x * float(args.obs_radius)
inflow_radius = obs_radius * 1.3 # slightly larger
flag_grid.initDomain(inflow="xX", phiWalls=phiWalls_grid, boundaryWidth=0)
obstacle = Cylinder( parent=solver, center=solver_res*vec3(0.25,0.5,0.5), radius=obs_radius, z=solver_res*vec3(0, 0, 1.0))
phiObs_grid.join(obstacle.computeLevelset())
# slightly larger copy for density source
inflow_p0 = vec3(0.24 * solver_res.x, 0.5*solver_res.y + obs_radius, 0.0*solver_res.z)
inflow_p1 = vec3(0.27 * solver_res.x, 0.5*solver_res.y + inflow_radius, 1.0*solver_res.z)
densInflow0 = Box( parent=s, p0=inflow_p0, p1=inflow_p1) # basin
inflow_p0 = vec3(0.24 * solver_res.x, 0.5*solver_res.y - inflow_radius, 0.0*solver_res.z)
inflow_p1 = vec3(0.27 * solver_res.x, 0.5*solver_res.y - obs_radius, 1.0*solver_res.z)
densInflow1 = Box( parent=s, p0=inflow_p0, p1=inflow_p1) # basin
phiObs_grid.join(phiWalls_grid)
updateFractions( flags=flag_grid, phiObs=phiObs_grid, fractions=fractions_grid)
setObstacleFlags(flags=flag_grid, phiObs=phiObs_grid, fractions=fractions_grid)
flag_grid.fillGrid()
return densInflow0, densInflow1
densInflow0, densInflow1 = init_flag(flags, phiWalls, phiObs, fractions, s, gs)
if upres:
densInflow0_upres, densInflow1_upres = init_flag(flags_upres, phiWalls_upres, phiObs_upres, fractions_upres, s_upres, gs_upres)
# random
t_end = num_frames + warmup_list[i] if randomized_warmup_steps else num_frames
nq = deque([-1] * t_end, t_end)
# Setup fields
velInflow = vec3(nx_list[i][0], 0, 0)
vel.setConst(velInflow)
# compute Reynolds nr
Re = 0.0
if visc>0.0:
Re = ((velInflow.x/res_max) * worldScale * float(args.obs_radius) * 2.0) / visc
print("Reynolds number: {}".format(Re))
if not os.path.exists(log_dir % i):
os.makedirs(log_dir % i)
open("{}/Re_{}".format(log_dir % i, Re), "w")
# optionally randomize y component
if 1:
noiseField = s.create(NoiseField, loadFromFile=True)
noiseField.posScale = vec3(75)
noiseField.clamp = True
noiseField.clampNeg = -1.
noiseField.clampPos = 1.
testall = s.create(RealGrid); testall.setConst(-1.)
addNoise(flags=flags, density=density, noise=noiseField, sdf=testall, scale=0.1 )
setComponent(target=vel, source=density, component=1)
density.setConst(0.)
# load fields from simulation
if load_warmup_from_disk:
print("Loading warmup step {}...".format(warmup_list[i]))
t_start = warmup_list[i] - prediction_window
load_sim_path = simulation_path + "%06d/"
v_tmp = load_velocity(load_sim_path % i, t_start-1, field_path_format)
d_tmp = load_density(load_sim_path % i, t_start-1, field_path_format)
copyArrayToGridMAC(v_tmp, vel)
copyArrayToGridReal(d_tmp, density)
del v_tmp, d_tmp
else:
t_start = 0
# frame loop
per_frame_advection_duration = []
per_frame_solve_duration = []
for t in tqdm(range(t_start, t_end), desc='sim', leave=False):
start = timer()
nx = nx_list[i][t]
nq.append(nx)
densInflow0.applyToGrid( grid=density, value=1. )
densInflow1.applyToGrid( grid=density, value=1. )
if upres:
densInflow0_upres.applyToGrid(grid=density_upres, value=1.)
densInflow1_upres.applyToGrid(grid=density_upres, value=1.)
advectSemiLagrange(flags=flags, vel=vel, grid=density, order=density_adv_order)
advectSemiLagrange(flags=flags, vel=vel, grid=vel , order=2)
if upres:
zoom_mask = [2.0 if res_z > 1 else 1.0, 2.0, 2.0, 1.0]
np_vec_temp = np.zeros([res_z,res_y,res_x,3], dtype=np.float32)
copyGridToArrayVec3(vel, np_vec_temp)
np_zoomed = ndimage.zoom(np_vec_temp, zoom_mask) * 2.0
copyArrayToGridVec3(np_zoomed, vel_upres)
advectSemiLagrange(flags=flags_upres, vel=vel_upres, grid=density_upres, order=2) # use order 2 instad of 1 (as in low res)
end = timer()
if t > warmup_list[i]:
per_frame_advection_duration.append(end-start)
start = timer()
def decode(cur_ls_frame):
# decode (ae)
if classic_ae:
np_pred_v = rec_pred.ae_v._decoder.predict(x=cur_ls_frame[...,:rec_pred.z_num_vel], batch_size=1)
np_pred_d = rec_pred.ae_d._decoder.predict(x=cur_ls_frame[...,rec_pred.z_num_vel:], batch_size=1)
np_pred = np.concatenate([np_pred_v,np_pred_d],axis=-1)
else:
np_pred = rec_pred.ae._decoder.predict(x=cur_ls_frame, batch_size=1)
# velocity
if res_z > 1:
np_vel = np_pred[:,:,:,:,:3] * normalization_factor_v
else:
np_vel = np_pred[:,:,:,:2] * normalization_factor_v
# Similar to preprocessing of training data, mirror y
if res_z > 1:
np_vel = np_vel[:,:,::-1]
else:
np_vel = np_vel[:,::-1]
# reshape
if res_z > 1:
np_vel = np_vel[0] # remove batch dim
else:
in_shape = np_pred.shape
np_tmp_make3d = np.zeros(list(in_shape)[:-1] + [1])
np_vel = np.concatenate([np_vel, np_tmp_make3d], axis=-1)
# store in grid
copyArrayToGridMAC(np_vel, vel)
# density
if (prediction_type == "vel_den_prediction") and "density" in config.data_type: # or prediction_type == "enc_dec"
if res_z > 1:
np_den = (np_pred[:,:,:,:,-1] + 1.0) * 0.5
else:
np_den = (np_pred[:,:,:,-1] + 1.0) * 0.5
np_den = np.expand_dims(np_den, -1)
if res_z > 1:
np_den = np_den[0] # remove batch dim
# Similar to preprocessing of training data, mirror y
np_den = np_den[:,::-1]
copyArrayToGridReal(np_den, density)
# Solve or Prediction
if t < warmup_list[i] or prediction_type == "simulation" or prediction_type == "enc_dec" or prediction_type == "enc_only":
# vel diffusion / viscosity!
if visc > 0.0:
# diffusion param for solve = const * dt / dx^2
alphaV = visc * s.timestep * float(res_max * res_max)
#mantaMsg("Viscosity: %f , alpha=%f , Re=%f " %(visc, alphaV, Re), 0 )
setWallBcs(flags=flags, vel=vel)
cgSolveDiffusion( flags, vel, alphaV )
if(boundary_cond_order == 1):
setWallBcs(flags=flags, vel=vel)
else:
extrapolateMACSimple( flags=flags, vel=vel, distance=2 , intoObs=True)
setWallBcs(flags=flags, vel=vel, fractions=fractions, phiObs=phiObs)
setInflowBcs(vel=vel,dir='xX',value=velInflow)
solvePressure( flags=flags, vel=vel, pressure=pressure, fractions=fractions, cgAccuracy=cgAcc, cgMaxIterFac=cgIter)
if(boundary_cond_order == 1):
setWallBcs(flags=flags, vel=vel)
else:
extrapolateMACSimple( flags=flags, vel=vel, distance=5 , intoObs=True)
setWallBcs(flags=flags, vel=vel, fractions=fractions, phiObs=phiObs)
setInflowBcs(vel=vel,dir='xX',value=velInflow)
if not prediction_type == "simulation":
copyGridToArrayMAC(target=v_, source=vel)
copyGridToArrayReal(target=d_, source=density)
if res_z > 1:
input_arr = v_[:,:,:,:3] / normalization_factor_v
else:
input_arr = v_[:,:,:,:2] / normalization_factor_v
if "density" in config.data_type:
input_arr = np.concatenate([input_arr, d_ * 2.0 - 1.0], axis=-1)
# Similar to preprocessing of training data
input_arr = input_arr[:,::-1]
if res_z > 1:
input_arr = np.expand_dims(input_arr, 0) # add batch dimension...
if classic_ae:
if res_z > 1:
velo_dim = 3
else:
velo_dim = 2
enc_v_part = rec_pred.ae_v._encoder.predict(input_arr[...,:velo_dim], batch_size=1)
enc_d_part = rec_pred.ae_d._encoder.predict(input_arr[...,velo_dim:], batch_size=1)
enc_v = np.concatenate([enc_v_part,enc_d_part],axis=-1)
else:
enc_v = rec_pred.ae._encoder.predict(input_arr, batch_size=1)
if prediction_type == "enc_only":
store_latentspace(enc_v[0], log_dir % i, t, nx, field_path_format)
# Supervised entry
if ls_supervision:
if classic_ae:
enc_v[0, rec_pred.z_num_vel-1] = nx
enc_v[0, -1] = nx
else:
enc_v[0, -1] = nx
prediction_history.add_simulation(enc_v[0])
if t >= warmup_list[i] and prediction_type == "enc_dec":
decode(enc_v)
else:
# ~~ Start of Prediction
if prediction_type == "vel_prediction" and "density" in config.data_type:
# overwrite density part of history with current density
# 1) encode current density d0 (with zero vel components)
copyGridToArrayMAC(target=v_, source=vel) # added on 05.11... otherwise old v is used
copyGridToArrayReal(target=d_, source=density)
if res_z > 1:
input_arr = v_[:,:,:,:3] / normalization_factor_v
else:
input_arr = v_[:,:,:,:2] / normalization_factor_v
input_arr = np.concatenate([input_arr, d_ * 2.0 - 1.0], axis=-1)
# Similar to preprocessing of training data
input_arr = input_arr[:,::-1]
if res_z > 1:
input_arr = np.expand_dims(input_arr, 0) # add batch dimension...
if classic_ae:
if res_z > 1:
velo_dim = 3
else:
velo_dim = 2
enc_d = rec_pred.ae_d._encoder.predict(input_arr[...,velo_dim:], batch_size=1)
# Keep supervised param
if ls_supervision:
prediction_history.simulation_history[0, -1, rec_pred.z_num_vel:-sup_param_count] = enc_d[0, 0:-sup_param_count]
else:
prediction_history.simulation_history[0, -1, rec_pred.z_num_vel:] = enc_d[0, 0:]
else:
enc_d = rec_pred.ae._encoder.predict(input_arr, batch_size=1)
# 2) replace density part of sim history (maybe overwrite "wrong" vel parts with zero)
enc_d[0, :rec_pred.ls_split_idx] = 0.0 # overwrite velo components
# Keep supervised param
if ls_supervision:
prediction_history.simulation_history[0, -1, rec_pred.ls_split_idx:-sup_param_count] = enc_d[0, rec_pred.ls_split_idx:-sup_param_count]
else:
prediction_history.simulation_history[0, -1, rec_pred.ls_split_idx:] = enc_d[0, rec_pred.ls_split_idx:]
X = prediction_history.get()
# predict new field
input_shape = X.shape # e.g. (1, 16, 1, 1, 1, 2048)
X = X.reshape(*X.shape[0:2], -1) # e.g. (1, 16, 2048)
pred_delta_z = pred.model.predict(X, batch_size=X.shape[0])
cur_pred = X[0, -1] + pred_delta_z
# supervised entries
if ls_supervision:
cur_pred[0,-1,-1] = nx
# add to history
prediction_history.add_prediction(cur_pred[0])
# decode (ae)
decode(cur_pred[0])
# ~~ End of Prediction
if not profile:
# Store to disk
copyGridToArrayMAC(target=v_, source=vel)
copyGridToArrayReal(target=d_, source=density)
if res_z > 1 and output_uni:
store_density_blender(density_upres if upres else density, log_dir % i, t, density_blender=density_blender, density_blender_cubic=density_blender_cubic)
store_velocity(v_, log_dir % i, t, list(nq), field_path_format)
store_density(d_, log_dir % i, t, list(nq), field_path_format)
if t > warmup_list[i]:
# freq measure
y_coord = int(freq_y_coord * v_.shape[1])
x_coord = int(freq_x_coord * v_.shape[2])
# store only y direction
freq_measure.append(float(v_[0, y_coord, x_coord, 1]))
end = timer()
if t > warmup_list[i]:
per_frame_solve_duration.append(end-start)
s.step()
if not profile and output_images:
screenshot(gui, log_dir % i, t, density=density_upres if upres else density, scale=2.0)
if not profile and output_images:
convert_sequence( os.path.join(log_dir % i, 'screenshots'), output_name="%06d" % i, file_format="%06d.jpg" if gui else "%06d.ppm", delete_images=not dont_delete_images )
per_scene_advection_duration.append(np.array(per_frame_advection_duration))
per_scene_solve_duration.append(np.array(per_frame_solve_duration))
per_scene_duration.append(np.array(per_frame_advection_duration) + np.array(per_frame_solve_duration))
# write freq measure to disk
np_freq_measure = np.array(freq_measure)
# smooth function
N = 20
np_freq_measure_smooth = np.convolve(np_freq_measure, np.ones((N,))/N, mode='valid')
# for local maxima
freq_arg_maxima = argrelextrema(np_freq_measure_smooth, np.greater)
mask = np.ones_like(np_freq_measure,dtype=bool)
mask[freq_arg_maxima[0]] = False
np_freq_measure[mask] = 0
np_freq_measure[~mask] = 1
np_freq_measure = np.trim_zeros(np_freq_measure)
delta_N = np.sum(np_freq_measure) - 1
delta_t = len(np_freq_measure) * s.timestep
f = delta_N / delta_t
# store to disk
freq_dict = {}
freq_dict["frequency"] = float(f)
freq_dict["delta_N"] = float(delta_N)
freq_dict["delta_t"] = float(delta_t)
freq_dict["freq_measure"] = freq_measure
freq_data_path = os.path.join(log_dir % i, "frequency_{}.json".format(i))
with open( freq_data_path, 'w') as f:
json.dump(freq_dict, f, indent=4)
# plot to disk
plt.plot(np_freq_measure)
plt.ylabel('local_maximum')
plt.grid()
plt.savefig(os.path.join(log_dir % i, "frequency_{}.png".format(i)))
plt.clf()
sim_id += 1
gc.collect()
profile_dict = {}
profile_dict["model_name"] = model_name
profile_dict["per_scene_timings"] = [a.tolist() for a in per_scene_duration]
profile_dict["mean_timings_all"] = np.mean(np.array(per_scene_duration))
profile_dict["mean_timings_advection"] = np.mean(np.array(per_scene_advection_duration))
profile_dict["mean_timings_solve"] = np.mean(np.array(per_scene_solve_duration))
perf_data_path_json = os.path.join(perf_data_path, "perf_%06d.json")
perf_data_count = 0
while os.path.isfile(perf_data_path_json % perf_data_count):
perf_data_count += 1
with open( perf_data_path_json % perf_data_count, 'w') as f:
json.dump(profile_dict, f, indent=4)
print('Done')
if __name__ == '__main__':
main()
| [
"matplotlib.pyplot.grid",
"scipy.signal.argrelextrema",
"matplotlib.pyplot.ylabel",
"numpy.array",
"sys.path.append",
"scipy.ndimage.zoom",
"os.path.exists",
"collections.deque",
"argparse.ArgumentParser",
"matplotlib.pyplot.plot",
"numpy.concatenate",
"random.uniform",
"numpy.trim_zeros",
... | [((438, 475), 'sys.path.append', 'sys.path.append', (["(sys.path[0] + '/../')"], {}), "(sys.path[0] + '/../')\n", (453, 475), False, 'import sys\n'), ((668, 693), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (691, 693), False, 'import argparse\n'), ((6683, 6694), 'random.seed', 'seed', (['nseed'], {}), '(nseed)\n', (6687, 6694), False, 'from random import random, seed, uniform, randrange\n'), ((4034, 4079), 'os.path.join', 'os.path.join', (['model_base_dir', '"""data_args.txt"""'], {}), "(model_base_dir, 'data_args.txt')\n", (4046, 4079), False, 'import os\n'), ((4101, 4146), 'os.path.join', 'os.path.join', (['model_base_dir', '"""data_args.txt"""'], {}), "(model_base_dir, 'data_args.txt')\n", (4113, 4146), False, 'import os\n'), ((4168, 4198), 'keras_data.read_args_file', 'read_args_file', (['data_args_path'], {}), '(data_args_path)\n', (4182, 4198), False, 'from keras_data import read_args_file\n'), ((4223, 4265), 'os.path.join', 'os.path.join', (['config.data_path', '"""args.txt"""'], {}), "(config.data_path, 'args.txt')\n", (4235, 4265), False, 'import os\n'), ((4287, 4317), 'keras_data.read_args_file', 'read_args_file', (['data_args_path'], {}), '(data_args_path)\n', (4301, 4317), False, 'from keras_data import read_args_file\n'), ((6019, 6062), 'os.path.join', 'os.path.join', (['model_base_dir', '"""v_range.txt"""'], {}), "(model_base_dir, 'v_range.txt')\n", (6031, 6062), False, 'import os\n'), ((6345, 6388), 'os.path.join', 'os.path.join', (['model_base_dir', '"""d_range.txt"""'], {}), "(model_base_dir, 'd_range.txt')\n", (6357, 6388), False, 'import os\n'), ((10003, 10055), 'numpy.zeros', 'np.zeros', (['[res_z, res_y, res_x, 3]'], {'dtype': 'np.float32'}), '([res_z, res_y, res_x, 3], dtype=np.float32)\n', (10011, 10055), True, 'import numpy as np\n'), ((10059, 10111), 'numpy.zeros', 'np.zeros', (['[res_z, res_y, res_x, 1]'], {'dtype': 'np.float32'}), '([res_z, res_y, res_x, 1], dtype=np.float32)\n', (10067, 10111), True, 'import numpy as np\n'), ((11248, 11296), 'os.path.join', 'os.path.join', (['perf_data_path', '"""warmup_steps.txt"""'], {}), "(perf_data_path, 'warmup_steps.txt')\n", (11260, 11296), False, 'import os\n'), ((12145, 12178), 'tqdm.trange', 'trange', (['num_scenes'], {'desc': '"""scenes"""'}), "(num_scenes, desc='scenes')\n", (12151, 12178), False, 'from tqdm import trange, tqdm\n'), ((25784, 25830), 'os.path.join', 'os.path.join', (['perf_data_path', '"""perf_%06d.json"""'], {}), "(perf_data_path, 'perf_%06d.json')\n", (25796, 25830), False, 'import os\n'), ((25859, 25912), 'os.path.isfile', 'os.path.isfile', (['(perf_data_path_json % perf_data_count)'], {}), '(perf_data_path_json % perf_data_count)\n', (25873, 25912), False, 'import os\n'), ((6083, 6126), 'os.path.join', 'os.path.join', (['model_base_dir', '"""v_range.txt"""'], {}), "(model_base_dir, 'v_range.txt')\n", (6095, 6126), False, 'import os\n'), ((6151, 6196), 'os.path.join', 'os.path.join', (['config.data_path', '"""v_range.txt"""'], {}), "(config.data_path, 'v_range.txt')\n", (6163, 6196), False, 'import os\n'), ((6409, 6452), 'os.path.join', 'os.path.join', (['model_base_dir', '"""d_range.txt"""'], {}), "(model_base_dir, 'd_range.txt')\n", (6421, 6452), False, 'import os\n'), ((6477, 6522), 'os.path.join', 'os.path.join', (['config.data_path', '"""d_range.txt"""'], {}), "(config.data_path, 'd_range.txt')\n", (6489, 6522), False, 'import os\n'), ((11694, 11725), 'os.path.exists', 'os.path.exists', (['simulation_path'], {}), '(simulation_path)\n', (11708, 11725), False, 'import os\n'), ((14004, 14030), 'collections.deque', 'deque', (['([-1] * t_end)', 't_end'], {}), '([-1] * t_end, t_end)\n', (14009, 14030), False, 'from collections import deque\n'), ((24294, 24316), 'numpy.array', 'np.array', (['freq_measure'], {}), '(freq_measure)\n', (24302, 24316), True, 'import numpy as np\n'), ((24474, 24523), 'scipy.signal.argrelextrema', 'argrelextrema', (['np_freq_measure_smooth', 'np.greater'], {}), '(np_freq_measure_smooth, np.greater)\n', (24487, 24523), False, 'from scipy.signal import argrelextrema\n'), ((24533, 24574), 'numpy.ones_like', 'np.ones_like', (['np_freq_measure'], {'dtype': 'bool'}), '(np_freq_measure, dtype=bool)\n', (24545, 24574), True, 'import numpy as np\n'), ((24686, 24716), 'numpy.trim_zeros', 'np.trim_zeros', (['np_freq_measure'], {}), '(np_freq_measure)\n', (24699, 24716), True, 'import numpy as np\n'), ((25193, 25218), 'matplotlib.pyplot.plot', 'plt.plot', (['np_freq_measure'], {}), '(np_freq_measure)\n', (25201, 25218), True, 'import matplotlib.pyplot as plt\n'), ((25221, 25248), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""local_maximum"""'], {}), "('local_maximum')\n", (25231, 25248), True, 'import matplotlib.pyplot as plt\n'), ((25251, 25261), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (25259, 25261), True, 'import matplotlib.pyplot as plt\n'), ((25335, 25344), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (25342, 25344), True, 'import matplotlib.pyplot as plt\n'), ((25362, 25374), 'gc.collect', 'gc.collect', ([], {}), '()\n', (25372, 25374), False, 'import gc\n'), ((25558, 25586), 'numpy.array', 'np.array', (['per_scene_duration'], {}), '(per_scene_duration)\n', (25566, 25586), True, 'import numpy as np\n'), ((25638, 25676), 'numpy.array', 'np.array', (['per_scene_advection_duration'], {}), '(per_scene_advection_duration)\n', (25646, 25676), True, 'import numpy as np\n'), ((25724, 25758), 'numpy.array', 'np.array', (['per_scene_solve_duration'], {}), '(per_scene_solve_duration)\n', (25732, 25758), True, 'import numpy as np\n'), ((10922, 10947), 'random.uniform', 'uniform', (['min_vel', 'max_vel'], {}), '(min_vel, max_vel)\n', (10929, 10947), False, 'from random import random, seed, uniform, randrange\n'), ((15494, 15501), 'timeit.default_timer', 'timer', ([], {}), '()\n', (15499, 15501), True, 'from timeit import default_timer as timer\n'), ((16372, 16379), 'timeit.default_timer', 'timer', ([], {}), '()\n', (16377, 16379), True, 'from timeit import default_timer as timer\n'), ((16469, 16476), 'timeit.default_timer', 'timer', ([], {}), '()\n', (16474, 16476), True, 'from timeit import default_timer as timer\n'), ((23554, 23561), 'timeit.default_timer', 'timer', ([], {}), '()\n', (23559, 23561), True, 'from timeit import default_timer as timer\n'), ((24027, 24065), 'numpy.array', 'np.array', (['per_frame_advection_duration'], {}), '(per_frame_advection_duration)\n', (24035, 24065), True, 'import numpy as np\n'), ((24101, 24135), 'numpy.array', 'np.array', (['per_frame_solve_duration'], {}), '(per_frame_solve_duration)\n', (24109, 24135), True, 'import numpy as np\n'), ((24729, 24752), 'numpy.sum', 'np.sum', (['np_freq_measure'], {}), '(np_freq_measure)\n', (24735, 24752), True, 'import numpy as np\n'), ((10456, 10497), 'random.randrange', 'randrange', (['min_warmup_steps', 'warmup_steps'], {}), '(min_warmup_steps, warmup_steps)\n', (10465, 10497), False, 'from random import random, seed, uniform, randrange\n'), ((14304, 14331), 'os.path.exists', 'os.path.exists', (['(log_dir % i)'], {}), '(log_dir % i)\n', (14318, 14331), False, 'import os\n'), ((14337, 14361), 'os.makedirs', 'os.makedirs', (['(log_dir % i)'], {}), '(log_dir % i)\n', (14348, 14361), False, 'import os\n'), ((16037, 16089), 'numpy.zeros', 'np.zeros', (['[res_z, res_y, res_x, 3]'], {'dtype': 'np.float32'}), '([res_z, res_y, res_x, 3], dtype=np.float32)\n', (16045, 16089), True, 'import numpy as np\n'), ((23836, 23876), 'os.path.join', 'os.path.join', (['(log_dir % i)', '"""screenshots"""'], {}), "(log_dir % i, 'screenshots')\n", (23848, 23876), False, 'import os\n'), ((24165, 24203), 'numpy.array', 'np.array', (['per_frame_advection_duration'], {}), '(per_frame_advection_duration)\n', (24173, 24203), True, 'import numpy as np\n'), ((24206, 24240), 'numpy.array', 'np.array', (['per_frame_solve_duration'], {}), '(per_frame_solve_duration)\n', (24214, 24240), True, 'import numpy as np\n'), ((24402, 24415), 'numpy.ones', 'np.ones', (['(N,)'], {}), '((N,))\n', (24409, 24415), True, 'import numpy as np\n'), ((16145, 16181), 'scipy.ndimage.zoom', 'ndimage.zoom', (['np_vec_temp', 'zoom_mask'], {}), '(np_vec_temp, zoom_mask)\n', (16157, 16181), False, 'from scipy import ndimage\n'), ((16764, 16811), 'numpy.concatenate', 'np.concatenate', (['[np_pred_v, np_pred_d]'], {'axis': '(-1)'}), '([np_pred_v, np_pred_d], axis=-1)\n', (16778, 16811), True, 'import numpy as np\n'), ((17386, 17434), 'numpy.concatenate', 'np.concatenate', (['[np_vel, np_tmp_make3d]'], {'axis': '(-1)'}), '([np_vel, np_tmp_make3d], axis=-1)\n', (17400, 17434), True, 'import numpy as np\n'), ((17764, 17790), 'numpy.expand_dims', 'np.expand_dims', (['np_den', '(-1)'], {}), '(np_den, -1)\n', (17778, 17790), True, 'import numpy as np\n'), ((21102, 21154), 'numpy.concatenate', 'np.concatenate', (['[input_arr, d_ * 2.0 - 1.0]'], {'axis': '(-1)'}), '([input_arr, d_ * 2.0 - 1.0], axis=-1)\n', (21116, 21154), True, 'import numpy as np\n'), ((19493, 19545), 'numpy.concatenate', 'np.concatenate', (['[input_arr, d_ * 2.0 - 1.0]'], {'axis': '(-1)'}), '([input_arr, d_ * 2.0 - 1.0], axis=-1)\n', (19507, 19545), True, 'import numpy as np\n'), ((19668, 19696), 'numpy.expand_dims', 'np.expand_dims', (['input_arr', '(0)'], {}), '(input_arr, 0)\n', (19682, 19696), True, 'import numpy as np\n'), ((20010, 20059), 'numpy.concatenate', 'np.concatenate', (['[enc_v_part, enc_d_part]'], {'axis': '(-1)'}), '([enc_v_part, enc_d_part], axis=-1)\n', (20024, 20059), True, 'import numpy as np\n'), ((21276, 21304), 'numpy.expand_dims', 'np.expand_dims', (['input_arr', '(0)'], {}), '(input_arr, 0)\n', (21290, 21304), True, 'import numpy as np\n')] |
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import numpy as np
# setup Lambert Conformal basemap.
m = Basemap(width=12000000,height=9000000,projection='lcc',
resolution='c',lat_1=45.,lat_2=55,lat_0=50,lon_0=-107.)
# draw a boundary around the map, fill the background.
# this background will end up being the ocean color, since
# the continents will be drawn on top.
m.drawmapboundary(fill_color='aqua')
# fill continents, set lake color same as ocean color.
m.fillcontinents(color='coral',lake_color='aqua')
# draw parallels and meridians.
# label parallels on right and top
# meridians on bottom and left
parallels = np.arange(0.,81,10.)
# labels = [left,right,top,bottom]
m.drawparallels(parallels,labels=[False,True,True,False])
meridians = np.arange(10.,351.,20.)
m.drawmeridians(meridians,labels=[True,False,False,True])
# plot blue dot on Boulder, colorado and label it as such.
lon, lat = -104.237, 40.125 # Location of Boulder
# convert to map projection coords.
# Note that lon,lat can be scalars, lists or numpy arrays.
xpt,ypt = m(lon,lat)
# convert back to lat/lon
lonpt, latpt = m(xpt,ypt,inverse=True)
m.plot(xpt,ypt,'bo') # plot a blue dot there
# put some text next to the dot, offset a little bit
# (the offset is in map projection coordinates)
plt.text(xpt+100000,ypt+100000,'Boulder (%5.1fW,%3.1fN)' % (lonpt,latpt))
plt.show()
| [
"matplotlib.pyplot.text",
"mpl_toolkits.basemap.Basemap",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((131, 254), 'mpl_toolkits.basemap.Basemap', 'Basemap', ([], {'width': '(12000000)', 'height': '(9000000)', 'projection': '"""lcc"""', 'resolution': '"""c"""', 'lat_1': '(45.0)', 'lat_2': '(55)', 'lat_0': '(50)', 'lon_0': '(-107.0)'}), "(width=12000000, height=9000000, projection='lcc', resolution='c',\n lat_1=45.0, lat_2=55, lat_0=50, lon_0=-107.0)\n", (138, 254), False, 'from mpl_toolkits.basemap import Basemap\n'), ((662, 686), 'numpy.arange', 'np.arange', (['(0.0)', '(81)', '(10.0)'], {}), '(0.0, 81, 10.0)\n', (671, 686), True, 'import numpy as np\n'), ((788, 816), 'numpy.arange', 'np.arange', (['(10.0)', '(351.0)', '(20.0)'], {}), '(10.0, 351.0, 20.0)\n', (797, 816), True, 'import numpy as np\n'), ((1309, 1394), 'matplotlib.pyplot.text', 'plt.text', (['(xpt + 100000)', '(ypt + 100000)', "('Boulder (%5.1fW,%3.1fN)' % (lonpt, latpt))"], {}), "(xpt + 100000, ypt + 100000, 'Boulder (%5.1fW,%3.1fN)' % (lonpt, latpt)\n )\n", (1317, 1394), True, 'import matplotlib.pyplot as plt\n'), ((1383, 1393), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1391, 1393), True, 'import matplotlib.pyplot as plt\n')] |
from multiprocessing import Process, Pool
import atexit
from collections import OrderedDict
import subprocess
import logging
import imp
import os
import os.path
import sys
import copy
import argparse
from datetime import datetime
from threading import Thread
import pprint
import psutil
import sys
import time
import traceback
import numpy as np
# import tensorflow as tf
from roslaunch.core import RLException
from roslaunch.parent import ROSLaunchParent
import rosgraph
import rospy
from gps.algorithm.cost.cost_utils import *
# from opentamp.src.policy_hooks.control_attention_policy_opt import ControlAttentionPolicyOpt
from opentamp.src.policy_hooks.mcts_explore import MCTSExplore
from opentamp.src.policy_hooks.sample import Sample
from opentamp.src.policy_hooks.utils.policy_solver_utils import *
# from opentamp.src.policy_hooks.multi_head_policy_opt_tf import MultiHeadPolicyOptTf
import policy_hooks.utils.policy_solver_utils as utils
# from opentamp.src.policy_hooks.task_net import tf_binary_network, tf_classification_network
from opentamp.src.policy_hooks.mcts import MCTS
from opentamp.src.policy_hooks.state_traj_cost import StateTrajCost
from opentamp.src.policy_hooks.action_traj_cost import ActionTrajCost
from opentamp.src.policy_hooks.traj_constr_cost import TrajConstrCost
from opentamp.src.policy_hooks.cost_product import CostProduct
from opentamp.src.policy_hooks.sample import Sample
from opentamp.src.policy_hooks.policy_solver import get_base_solver
from opentamp.src.policy_hooks.utils.load_task_definitions import *
# from opentamp.src.policy_hooks.value_server import ValueServer
# from opentamp.src.policy_hooks.primitive_server import PrimitiveServer
# from opentamp.src.policy_hooks.policy_server import PolicyServer
# from opentamp.src.policy_hooks.rollout_server import RolloutServer
# from opentamp.src.policy_hooks.tf_models import tf_network, multi_modal_network_fp
# from opentamp.src.policy_hooks.view_server import ViewServer
from opentamp.src.policy_hooks.vae.reward_trainer import RewardTrainer
from opentamp.src.policy_hooks.vae.vae_server import VAEServer
from opentamp.src.policy_hooks.vae.vae_trainer import VAETrainer
from opentamp.src.policy_hooks.vae.vae_rollout_server import VAERolloutServer
from opentamp.src.policy_hooks.vae.vae_tamp_rollout_server import VAETampRolloutServer
def spawn_server(cls, hyperparams):
server = cls(hyperparams)
server.run()
class MultiProcessMain(object):
def __init__(self, config):
if config is None:
return
self.config = config
prob = config['prob']
if 'num_objs' in config:
prob.NUM_OBJS = config['num_objs']
conditions = self.config['num_conds']
self.task_list = tuple(get_tasks(self.config['task_map_file']).keys())
self.task_durations = get_task_durations(self.config['task_map_file'])
self.config['task_list'] = self.task_list
task_encoding = get_task_encoding(self.task_list)
plans = {}
task_breaks = []
goal_states = []
targets = []
for _ in range(conditions):
targets.append(prob.get_end_targets(prob.NUM_OBJS))
plans, openrave_bodies, env = prob.get_plans()
state_vector_include, action_vector_include, target_vector_include = prob.get_vector(self.config)
self.dX, self.state_inds, self.dU, self.action_inds, self.symbolic_bound = utils.get_state_action_inds(list(plans.values())[0], self.config['robot_name'], self.config['attr_map'], state_vector_include, action_vector_include)
self.target_dim, self.target_inds = utils.get_target_inds(list(plans.values())[0], self.config['attr_map'], target_vector_include)
x0 = prob.get_random_initial_state_vec(self.config, plans, self.dX, self.state_inds, conditions)
for plan in list(plans.values()):
plan.state_inds = self.state_inds
plan.action_inds = self.action_inds
plan.dX = self.dX
plan.dU = self.dU
plan.symbolic_bound = self.symbolic_bound
plan.target_dim = self.target_dim
plan.target_inds = self.target_inds
sensor_dims = {
utils.STATE_ENUM: self.symbolic_bound,
utils.ACTION_ENUM: self.dU,
utils.TRAJ_HIST_ENUM: self.dU*self.config['hist_len'],
utils.TASK_ENUM: len(self.task_list),
utils.TARGETS_ENUM: self.target_dim,
}
for enum in self.config['sensor_dims']:
sensor_dims[enum] = self.config['sensor_dims'][enum]
self.prim_bounds = []
self.prim_dims = OrderedDict({})
self.config['prim_dims'] = self.prim_dims
options = prob.get_prim_choices()
ind = len(self.task_list)
self.prim_bounds.append((0, ind))
for enum in options:
if enum == utils.TASK_ENUM: continue
n_options = len(options[enum])
next_ind = ind+n_options
self.prim_bounds.append((ind, next_ind))
self.prim_dims[enum] = n_options
ind = next_ind
for enum in self.prim_dims:
sensor_dims[enum] = self.prim_dims[enum]
self.config['prim_bounds'] = self.prim_bounds
self.config['prim_dims'] = self.prim_dims
# self.config['goal_f'] = prob.goal_f
# self.config['cost_f'] = prob.cost_f
self.config['target_f'] = None # prob.get_next_target
self.config['encode_f'] = None # prob.sorting_state_encode
# self.config['weight_file'] = 'tf_saved/2018-09-12 23:43:45.748906_namo_5.ckpt'
self.config['task_durations'] = self.task_durations
self.policy_inf_coeff = self.config['algorithm']['policy_inf_coeff']
self.policy_out_coeff = self.config['algorithm']['policy_out_coeff']
self.config['agent'] = {
'type': self.config['agent_type'],
'x0': x0,
'targets': targets,
'task_list': self.task_list,
'plans': plans,
'task_breaks': task_breaks,
'task_encoding': task_encoding,
'task_durations': self.task_durations,
'state_inds': self.state_inds,
'action_inds': self.action_inds,
'target_inds': self.target_inds,
'dU': self.dU,
'dX': self.symbolic_bound,
'symbolic_bound': self.symbolic_bound,
'target_dim': self.target_dim,
'get_plan': None, # prob.get_plan,
'sensor_dims': sensor_dims,
'state_include': self.config['state_include'],
'obs_include': self.config['obs_include'],
'prim_obs_include': self.config['prim_obs_include'],
'prim_out_include': self.config['prim_out_include'],
'val_obs_include': self.config['val_obs_include'],
'conditions': self.config['num_conds'],
'solver': None,
'num_objs': prob.NUM_OBJS,
'obj_list': [],
'stochastic_conditions': False,
'image_width': utils.IM_W,
'image_height': utils.IM_H,
'image_channels': utils.IM_C,
'hist_len': self.config['hist_len'],
'T': 1,
'viewer': config['viewer'],
'model': None,
'get_hl_plan': None,
'env': env,
'openrave_bodies': openrave_bodies,
'n_dirs': self.config['n_dirs'],
'prob': prob,
'attr_map': self.config['attr_map'],
'image_width': self.config['image_width'],
'image_height': self.config['image_height'],
'image_channels': self.config['image_channels'],
'prim_dims': self.prim_dims,
'solver_type': self.config['solver_type'],
'robot_name': self.config['robot_name'],
'policy_inf_coeff': self.config['policy_inf_coeff'],
'policy_out_coeff': self.config['policy_out_coeff'],
}
if 'cloth_width' in self.config:
self.config['agent']['cloth_width'] = self.config['cloth_width']
self.config['agent']['cloth_length'] = self.config['cloth_length']
self.config['agent']['cloth_spacing'] = self.config['cloth_spacing']
self.config['agent']['cloth_radius'] = self.config['cloth_radius']
# action_cost_wp = np.ones((self.config['agent']['T'], self.dU), dtype='float64')
state_cost_wp = np.ones((self.symbolic_bound), dtype='float64')
traj_cost = {
'type': StateTrajCost,
'data_types': {
utils.STATE_ENUM: {
'wp': state_cost_wp,
'target_state': np.zeros((1, self.symbolic_bound)),
'wp_final_multiplier': 1.0,
}
},
'ramp_option': RAMP_CONSTANT
}
action_cost = {
'type': ActionTrajCost,
'data_types': {
utils.ACTION_ENUM: {
'wp': np.ones((1, self.dU), dtype='float64'),
'target_state': np.zeros((1, self.dU)),
}
},
'ramp_option': RAMP_CONSTANT
}
# constr_cost = {
# 'type': TrajConstrCost,
# }
# self.config['algorithm']['cost'] = {
# 'type': CostSum,
# 'costs': [traj_cost, action_cost],
# 'weights': [1.0, 1.0],
#
self.agent = self.config['agent']['type'](self.config['agent'])
self.weight_dir = self.config['weight_dir']
self.traj_opt_steps = self.config['traj_opt_steps']
self.num_samples = self.config['num_samples']
self.mcts = []
for condition in range(len(x0)):
self.mcts.append(MCTS(
self.task_list,
self.prim_dims,
[],
None,
None,
condition,
self.agent,
self.config['branching_factor'],
self.config['num_samples'],
self.config['num_distilled_samples'],
soft_decision=1.0,
C=2,
max_depth=self.config['max_tree_depth'],
explore_depth=5,
opt_strength=0,
))
self.config['mcts'] = self.mcts
# self.config['agent'] = self.agent
self.config['dX'] = self.dX
self.config['dU'] = self.dU
self.config['symbolic_bound'] = self.symbolic_bound
self.config['dO'] = self.agent.dO
self.config['dPrimObs'] = self.agent.dPrim
self.config['dValObs'] = self.agent.dVal
self.config['dPrimOut'] = self.agent.dPrimOut
self.config['state_inds'] = self.state_inds
self.config['action_inds'] = self.action_inds
self.config['policy_out_coeff'] = self.policy_out_coeff
self.config['policy_inf_coeff'] = self.policy_inf_coeff
self.config['target_inds'] = self.target_inds
self.config['target_dim'] = self.target_dim
self.config['task_list'] = self.task_list
self.config['time_log'] = self.config['weight_dir']+'/timing_info.txt'
self.config['rollout_len'] =self.config.get('rollout_len', 20)
self.config['vae'] = {}
self.config['vae']['task_dims'] = int(len(self.task_list) + np.sum(list(self.prim_dims.values())))
self.config['vae']['obs_dims'] = (utils.IM_W, utils.IM_H, 3)
self.config['vae']['weight_dir'] = self.weight_dir
self.config['vae']['rollout_len'] = self.config['rollout_len']
self.config['vae'].update(self.config['train_params'])
self.rollout_type = VAETampRolloutServer
self.roscore = None
@classmethod
def no_config_load(cls, env, name, config):
main = cls(None)
config['env'] = env
if config['rollout_len'] <= 0:
config['rollout_len'] = 50
temp_env = env()
act_space = temp_env.action_space
prim_dims = {'prim{}'.format(i): act_space.nvec[i] for i in range(1, len(act_space.nvec))} if hasattr(act_space, 'nvec') else {}
n = act_space.nvec[0] if hasattr(act_space, 'nvec') else act_space.n
config['weight_dir'] = 'tf_saved/'+name.lower()+'_t{0}_vae_data'.format(config['rollout_len']) if config['weight_dir'] == '' else config['weight_dir']
if hasattr(temp_env, 'n_blocks'):
config['weight_dir'] += '_{0}_blocks'.format(temp_env.n_blocks)
config['mcts'] = MCTS(
['task{0}'.format(i) for i in range(n)],
prim_dims,
[],
None,
None,
0,
None,
20,
1,
0,
soft_decision=1.0,
C=2,
max_depth=config['rollout_len'],
explore_depth=config['rollout_len'],
opt_strength=0,
)
config['vae'] = {}
config['vae']['task_dims'] = int(n * np.sum(list(prim_dims.values())))
config['vae']['obs_dims'] = (temp_env.im_height, temp_env.im_wid, 3)
config['vae']['weight_dir'] = config['weight_dir']
config['vae']['rollout_len'] = config['rollout_len']
config['vae']['load_step'] = config['load_step']
config['vae'].update(config['train_params'])
config['topic'] = name
temp_env.close()
main.rollout_type = VAERolloutServer
main.config = config
main.roscore = None
main.weight_dir = config['weight_dir']
return main
def spawn_servers(self, config):
self.processes = []
self.process_info = []
self.process_configs = {}
self.threads = []
if self.config['vae_server']:
self.create_vae_server(config)
if self.config['rollout_server']:
self.create_rollout_servers(config)
def start_servers(self):
for p in self.processes:
p.start()
time.sleep(2)
for t in self.threads:
t.start()
def create_server(self, server_cls, hyperparams, process=True):
process = not hyperparams['no_child_process']
if process:
p = Process(target=spawn_server, args=(server_cls, hyperparams))
p.daemon = True
self.processes.append(p)
server_id = hyperparams['id'] if 'id' in hyperparams else hyperparams['scope']
self.process_info.append((server_cls, server_id))
self.process_configs[p.pid] = (server_cls, hyperparams)
else:
# t = Thread(target=spawn_server, args=(server_cls, hyperparams))
# t.daemon = True
# self.threads.append(t)
spawn_server(server_cls, hyperparams)
def create_vae_server(self, hyperparams):
new_hyperparams = copy.copy(hyperparams)
new_hyperparams['scope'] = 'vae'
self.create_server(VAEServer, new_hyperparams)
def create_rollout_servers(self, hyperparams):
for n in range(hyperparams['n_rollout_servers']):
new_hyperparams = copy.copy(hyperparams)
new_hyperparams['id'] = hyperparams['server_id']+'_'+str(n)
self.create_server(self.rollout_type, new_hyperparams)
def watch_processes(self, kill_all=False):
exit = False
while not exit and len(self.processes):
for n in range(len(self.processes)):
p = self.processes[n]
if not p.is_alive():
message = 'Killing All.' if kill_all else 'Restarting Dead Process.'
print('\n\nProcess died: ' + str(self.process_info[n]) + ' - ' + message)
exit = kill_all
if kill_all: break
process_config = self.process_configs[p.pid]
del self.process_info[n]
self.create_server(*process_config)
print("Relaunched dead process")
time.sleep(60)
# self.log_mem_info()
for p in self.processes:
if p.is_alive(): p.terminate()
def check_dirs(self):
if not os.path.exists(self.config['weight_dir']):
os.makedirs(self.config['weight_dir'])
if not os.path.exists(self.config['weight_dir']+'_trained'):
os.makedirs(self.config['weight_dir']+'_trained')
def start_ros(self):
if self.roscore is not None or rosgraph.is_master_online(): return
try:
self.roscore = ROSLaunchParent('train_roscore', [], is_core=True, num_workers=16, verbose=True)
self.roscore.start()
except RLException as e:
pass
def start(self, kill_all=False):
if self.config['train_vae']:
self.config['id'] = 0
self.config['vae']['train_mode'] = 'unconditional' if self.config['unconditional'] else 'conditional'
trainer = VAETrainer(self.config)
# o, d, d2 = trainer.vae.test_decode()
# import ipdb; ipdb.set_trace()
trainer.train()
elif self.config['train_reward']:
self.config['id'] = 0
self.config['vae']['train_mode'] = 'conditional'
trainer = RewardTrainer(self.config)
trainer.train()
else:
self.check_dirs()
if 'log_timing' in self.config and self.config['log_timing']:
with open(self.config['time_log'], 'a+') as f:
f.write('\n\nTiming info for {0}:'.format(datetime.now()))
self.start_ros()
time.sleep(1)
self.spawn_servers(self.config)
self.start_servers()
self.watch_processes(kill_all)
if self.roscore is not None: self.roscore.shutdown()
| [
"os.path.exists",
"collections.OrderedDict",
"opentamp.src.policy_hooks.mcts.MCTS",
"numpy.ones",
"os.makedirs",
"opentamp.src.policy_hooks.vae.reward_trainer.RewardTrainer",
"opentamp.src.policy_hooks.vae.vae_trainer.VAETrainer",
"multiprocessing.Process",
"roslaunch.parent.ROSLaunchParent",
"tim... | [((4629, 4644), 'collections.OrderedDict', 'OrderedDict', (['{}'], {}), '({})\n', (4640, 4644), False, 'from collections import OrderedDict\n'), ((8449, 8494), 'numpy.ones', 'np.ones', (['self.symbolic_bound'], {'dtype': '"""float64"""'}), "(self.symbolic_bound, dtype='float64')\n", (8456, 8494), True, 'import numpy as np\n'), ((15793, 15815), 'copy.copy', 'copy.copy', (['hyperparams'], {}), '(hyperparams)\n', (15802, 15815), False, 'import copy\n'), ((14936, 14949), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (14946, 14949), False, 'import time\n'), ((15163, 15223), 'multiprocessing.Process', 'Process', ([], {'target': 'spawn_server', 'args': '(server_cls, hyperparams)'}), '(target=spawn_server, args=(server_cls, hyperparams))\n', (15170, 15223), False, 'from multiprocessing import Process, Pool\n'), ((16053, 16075), 'copy.copy', 'copy.copy', (['hyperparams'], {}), '(hyperparams)\n', (16062, 16075), False, 'import copy\n'), ((16946, 16960), 'time.sleep', 'time.sleep', (['(60)'], {}), '(60)\n', (16956, 16960), False, 'import time\n'), ((17115, 17156), 'os.path.exists', 'os.path.exists', (["self.config['weight_dir']"], {}), "(self.config['weight_dir'])\n", (17129, 17156), False, 'import os\n'), ((17170, 17208), 'os.makedirs', 'os.makedirs', (["self.config['weight_dir']"], {}), "(self.config['weight_dir'])\n", (17181, 17208), False, 'import os\n'), ((17224, 17278), 'os.path.exists', 'os.path.exists', (["(self.config['weight_dir'] + '_trained')"], {}), "(self.config['weight_dir'] + '_trained')\n", (17238, 17278), False, 'import os\n'), ((17290, 17341), 'os.makedirs', 'os.makedirs', (["(self.config['weight_dir'] + '_trained')"], {}), "(self.config['weight_dir'] + '_trained')\n", (17301, 17341), False, 'import os\n'), ((17406, 17433), 'rosgraph.is_master_online', 'rosgraph.is_master_online', ([], {}), '()\n', (17431, 17433), False, 'import rosgraph\n'), ((17482, 17567), 'roslaunch.parent.ROSLaunchParent', 'ROSLaunchParent', (['"""train_roscore"""', '[]'], {'is_core': '(True)', 'num_workers': '(16)', 'verbose': '(True)'}), "('train_roscore', [], is_core=True, num_workers=16, verbose=True\n )\n", (17497, 17567), False, 'from roslaunch.parent import ROSLaunchParent\n'), ((17893, 17916), 'opentamp.src.policy_hooks.vae.vae_trainer.VAETrainer', 'VAETrainer', (['self.config'], {}), '(self.config)\n', (17903, 17916), False, 'from opentamp.src.policy_hooks.vae.vae_trainer import VAETrainer\n'), ((10147, 10433), 'opentamp.src.policy_hooks.mcts.MCTS', 'MCTS', (['self.task_list', 'self.prim_dims', '[]', 'None', 'None', 'condition', 'self.agent', "self.config['branching_factor']", "self.config['num_samples']", "self.config['num_distilled_samples']"], {'soft_decision': '(1.0)', 'C': '(2)', 'max_depth': "self.config['max_tree_depth']", 'explore_depth': '(5)', 'opt_strength': '(0)'}), "(self.task_list, self.prim_dims, [], None, None, condition, self.agent,\n self.config['branching_factor'], self.config['num_samples'], self.\n config['num_distilled_samples'], soft_decision=1.0, C=2, max_depth=self\n .config['max_tree_depth'], explore_depth=5, opt_strength=0)\n", (10151, 10433), False, 'from opentamp.src.policy_hooks.mcts import MCTS\n'), ((18199, 18225), 'opentamp.src.policy_hooks.vae.reward_trainer.RewardTrainer', 'RewardTrainer', (['self.config'], {}), '(self.config)\n', (18212, 18225), False, 'from opentamp.src.policy_hooks.vae.reward_trainer import RewardTrainer\n'), ((18555, 18568), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (18565, 18568), False, 'import time\n'), ((8755, 8789), 'numpy.zeros', 'np.zeros', (['(1, self.symbolic_bound)'], {}), '((1, self.symbolic_bound))\n', (8763, 8789), True, 'import numpy as np\n'), ((9182, 9220), 'numpy.ones', 'np.ones', (['(1, self.dU)'], {'dtype': '"""float64"""'}), "((1, self.dU), dtype='float64')\n", (9189, 9220), True, 'import numpy as np\n'), ((9270, 9292), 'numpy.zeros', 'np.zeros', (['(1, self.dU)'], {}), '((1, self.dU))\n', (9278, 9292), True, 'import numpy as np\n'), ((18497, 18511), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (18509, 18511), False, 'from datetime import datetime\n')] |
#!/usr/bin/env python
u"""
grace_months_index.py
Written by <NAME> (05/2021)
Creates a file with the start and end days for each dataset
Shows the range of each month for (CSR/GFZ/JPL) (RL04/RL05/RL06)
Shows which months are missing for each dataset as **missing**
INPUTS:
base_dir: Working data directory for GRACE/GRACE-FO data
OPTIONS:
DREL: GRACE/GRACE-FO data release (RL04, RL05, RL06)
MODE: Permissions mode of output index file
OUTPUTS:
GRACE_months.txt
Column 1: GRACE Month
Column 2: Calendar Month and Year
Column 3: CSR RL06 Dates
Column 4: GFZ RL06 Dates
Column 5: GSFC v02.4 Mascon Dates
Column 6: JPL RL06 Dates
COMMAND LINE OPTIONS:
--help: list the command line options
-D X, --directory X: Working GRACE/GRACE-FO data directory
-r X, --release X: GRACE/GRACE-FO Data Releases to run (RL06)
--mode X: permissions mode of output GRACE month file
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python (https://numpy.org)
UPDATE HISTORY:
Updated 05/2021: define int/float precision to prevent deprecation warning
Updated 10/2020: use argparse to set command line parameters
Updated 09/2020: add column for GSFC v02.4 GRACE mascons
Updated 07/2020: added function docstrings
Updated 05/2020 for public release
Updated 05-06/2018: GRACE release 6 (not all processing centers have RL06)
Updated 01/2017: added MODE to set file and directory permissions
Updated 05-06/2016: using __future__ print function. format month lines
Updated 03/2016: using getopt to set RL04 parameter, added new help module
Updated 10/2015: cleaned up and added a few comments
Updated 11/2014: minor updates to code. added main definition
Updated 10/2014: updated comments
Updated 05/2014: added OPTION to not run RL04
Updated 05/2013: added years to month label
Written 07/2012
"""
from __future__ import print_function
import sys
import os
import argparse
import calendar
import numpy as np
def grace_months_index(base_dir, DREL=['RL06','v02.4'], MODE=None):
"""
Creates a file with the start and end days for each dataset
Shows the range of each month for (CSR/GFZ/JPL) (RL04/RL05/RL06)
Shows which months are missing for each dataset as **missing**
Arguments
---------
base_dir: Working data directory for GRACE/GRACE-FO data
Keyword arguments
-----------------
DREL: GRACE/GRACE-FO data release (RL04, RL05, RL06)
MODE: Permissions mode of output index file
"""
#-- Output GRACE months file
grace_months_file = 'GRACE_months.txt'
fid = open(os.path.join(base_dir,grace_months_file), 'w')
#-- Initial parameters
#-- processing centers
PROC = ['CSR', 'GFZ', 'GSFC', 'JPL']
#-- read from GSM datasets
DSET = 'GSM'
#-- maximum month of the datasets
#-- checks for the maximum month between processing centers
max_mon = 0
#-- contain the information for each dataset
var_info = {}
#-- Looping through data releases first (all RL04 then all RL05)
#-- for each considered data release (RL04,RL05)
for rl in DREL:
#-- for each processing centers (CSR, GFZ, JPL)
for pr in PROC:
#-- Setting the data directory for processing center and release
grace_dir = os.path.join(base_dir, pr, rl, DSET)
#-- read GRACE date ascii file
#-- file created in read_grace.py or grace_dates.py
grace_date_file = '{0}_{1}_DATES.txt'.format(pr,rl)
if os.access(os.path.join(grace_dir,grace_date_file), os.F_OK):
#-- skip the header line
date_input = np.loadtxt(os.path.join(grace_dir,grace_date_file),
skiprows=1)
#-- number of months
nmon = np.shape(date_input)[0]
#-- Setting the dictionary key e.g. 'CSR_RL04'
var_name = '{0}_{1}'.format(pr,rl)
#-- Creating a python dictionary for each dataset with parameters:
#-- month #, start year, start day, end year, end day
#-- Purpose is to get all of the dates loaded for each dataset
#-- Adding data to dictionary for data processing and release
var_info[var_name] = {}
#-- allocate for output variables
var_info[var_name]['mon'] = np.zeros((nmon),dtype=np.int64)
var_info[var_name]['styr'] = np.zeros((nmon),dtype=np.int64)
var_info[var_name]['stday'] = np.zeros((nmon),dtype=np.int64)
var_info[var_name]['endyr'] = np.zeros((nmon),dtype=np.int64)
var_info[var_name]['endday'] = np.zeros((nmon),dtype=np.int64)
#-- place output variables in dictionary
for i,key in enumerate(['mon','styr','stday','endyr','endday']):
#-- first column is date in decimal form (start at 1 not 0)
var_info[var_name][key] = date_input[:,i+1].astype(np.int64)
#-- Finding the maximum month measured
if (var_info[var_name]['mon'].max() > max_mon):
#-- if the maximum month in this dataset is greater
#-- than the previously read datasets
max_mon = np.int64(var_info[var_name]['mon'].max())
#-- sort datasets alphanumerically
var_name = sorted(var_info.keys())
txt = ''.join(['{0:^21}'.format(d) for d in var_name])
#-- printing header to file
print('{0:^11} {1}'.format('MONTH',txt),file=fid)
#-- for each possible month
#-- GRACE starts at month 004 (April 2002)
#-- max_mon+1 to include max_mon
for m in range(4, max_mon+1):
#-- finding the month name e.g. Apr
calendar_year = 2002 + (m-1)//12
calendar_month = (m-1) % 12 + 1
month_string = calendar.month_abbr[calendar_month]
#-- create list object for output string
output_string = []
#-- for each processing center and data release
for var in var_name:
#-- find if the month of data exists
#-- exists will be greater than 0 if there is a match
exists = np.count_nonzero(var_info[var]['mon'] == m)
if (exists != 0):
#-- if there is a matching month
#-- indice of matching month
ind, = np.nonzero(var_info[var]['mon'] == m)
#-- start date
st_yr, = var_info[var]['styr'][ind]
st_day, = var_info[var]['stday'][ind]
#-- end date
end_yr, = var_info[var]['endyr'][ind]
end_day, = var_info[var]['endday'][ind]
#-- output string is the date range
#-- string format: 2002_102--2002_120
output_string.append('{0:4d}_{1:03d}--{2:4d}_{3:03d}'.format(
st_yr, st_day, end_yr, end_day))
else:
#-- if there is no matching month = missing
output_string.append(' ** missing ** ')
#-- create single string with output string components
#-- formatting the strings to be 20 characters in length
data_string = ' '.join(['{0:>20}'.format(s) for s in output_string])
#-- printing data line to file
args = (m, month_string, calendar_year, data_string)
print('{0:03d} {1:>3}{2:4d} {3}'.format(*args), file=fid)
#-- close months file
fid.close()
#-- set the permissions level of the output file
os.chmod(os.path.join(base_dir,grace_months_file), MODE)
#-- This is the main part of the program that calls the individual modules
def main():
#-- Read the system arguments listed after the program
parser = argparse.ArgumentParser(
description="""Creates a file with the start and end days for
each month of GRACE/GRACE-FO data
"""
)
#-- command line parameters
#-- working data directory
parser.add_argument('--directory','-D',
type=lambda p: os.path.abspath(os.path.expanduser(p)),
default=os.getcwd(),
help='Working data directory')
#-- GRACE/GRACE-FO data release
parser.add_argument('--release','-r',
metavar='DREL', type=str, nargs='+',
default=['RL06','v02.4'],
help='GRACE/GRACE-FO Data Release')
#-- permissions mode of the local directories and files (number in octal)
parser.add_argument('--mode','-M',
type=lambda x: int(x,base=8), default=0o775,
help='permissions mode of output files')
args,_ = parser.parse_known_args()
#-- run GRACE/GRACE-FO months program
grace_months_index(args.directory, DREL=args.release, MODE=args.mode)
#-- run main program
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser",
"os.path.join",
"os.getcwd",
"numpy.count_nonzero",
"numpy.zeros",
"numpy.nonzero",
"numpy.shape",
"os.path.expanduser"
] | [((7806, 7964), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Creates a file with the start and end days for\n each month of GRACE/GRACE-FO data\n """'}), '(description=\n """Creates a file with the start and end days for\n each month of GRACE/GRACE-FO data\n """\n )\n', (7829, 7964), False, 'import argparse\n'), ((2636, 2677), 'os.path.join', 'os.path.join', (['base_dir', 'grace_months_file'], {}), '(base_dir, grace_months_file)\n', (2648, 2677), False, 'import os\n'), ((7598, 7639), 'os.path.join', 'os.path.join', (['base_dir', 'grace_months_file'], {}), '(base_dir, grace_months_file)\n', (7610, 7639), False, 'import os\n'), ((3336, 3372), 'os.path.join', 'os.path.join', (['base_dir', 'pr', 'rl', 'DSET'], {}), '(base_dir, pr, rl, DSET)\n', (3348, 3372), False, 'import os\n'), ((6239, 6282), 'numpy.count_nonzero', 'np.count_nonzero', (["(var_info[var]['mon'] == m)"], {}), "(var_info[var]['mon'] == m)\n", (6255, 6282), True, 'import numpy as np\n'), ((8155, 8166), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (8164, 8166), False, 'import os\n'), ((3569, 3609), 'os.path.join', 'os.path.join', (['grace_dir', 'grace_date_file'], {}), '(grace_dir, grace_date_file)\n', (3581, 3609), False, 'import os\n'), ((4418, 4448), 'numpy.zeros', 'np.zeros', (['nmon'], {'dtype': 'np.int64'}), '(nmon, dtype=np.int64)\n', (4426, 4448), True, 'import numpy as np\n'), ((4495, 4525), 'numpy.zeros', 'np.zeros', (['nmon'], {'dtype': 'np.int64'}), '(nmon, dtype=np.int64)\n', (4503, 4525), True, 'import numpy as np\n'), ((4573, 4603), 'numpy.zeros', 'np.zeros', (['nmon'], {'dtype': 'np.int64'}), '(nmon, dtype=np.int64)\n', (4581, 4603), True, 'import numpy as np\n'), ((4651, 4681), 'numpy.zeros', 'np.zeros', (['nmon'], {'dtype': 'np.int64'}), '(nmon, dtype=np.int64)\n', (4659, 4681), True, 'import numpy as np\n'), ((4730, 4760), 'numpy.zeros', 'np.zeros', (['nmon'], {'dtype': 'np.int64'}), '(nmon, dtype=np.int64)\n', (4738, 4760), True, 'import numpy as np\n'), ((6430, 6467), 'numpy.nonzero', 'np.nonzero', (["(var_info[var]['mon'] == m)"], {}), "(var_info[var]['mon'] == m)\n", (6440, 6467), True, 'import numpy as np\n'), ((3701, 3741), 'os.path.join', 'os.path.join', (['grace_dir', 'grace_date_file'], {}), '(grace_dir, grace_date_file)\n', (3713, 3741), False, 'import os\n'), ((3834, 3854), 'numpy.shape', 'np.shape', (['date_input'], {}), '(date_input)\n', (3842, 3854), True, 'import numpy as np\n'), ((8115, 8136), 'os.path.expanduser', 'os.path.expanduser', (['p'], {}), '(p)\n', (8133, 8136), False, 'import os\n')] |
import os
import sys, stat
import logging
import torch
import numpy as np
def transform_list_to_tensor(model_params_list):
for k in model_params_list.keys():
model_params_list[k] = torch.from_numpy(np.asarray(model_params_list[k])).float()
return model_params_list
def transform_tensor_to_list(model_params):
for k in model_params.keys():
model_params[k] = model_params[k].detach().numpy().tolist()
return model_params
def post_complete_message_to_sweep_process(args):
pipe_path = "/home/zengrf/fedml/fedml_performance/tmp/"
if not os.path.exists(pipe_path):
os.mkdir(pipe_path)
pipe_path_file = pipe_path+"fedml"
if not os.path.exists(pipe_path_file):
os.mkfifo(pipe_path_file)
logging.info(pipe_path)
#pipe_fd = os.open(pipe_path, os.O_WRONLY|os.O_CREAT)
pipe_fd = os.open(pipe_path_file, os.O_RDWR|os.O_CREAT, 777)
logging.info("************************")
with os.fdopen(pipe_fd, 'w') as pipe:
pipe.write("training is finished! \n%s\n" % (str(args)))
| [
"os.path.exists",
"os.open",
"numpy.asarray",
"os.mkdir",
"os.mkfifo",
"os.fdopen",
"logging.info"
] | [((754, 777), 'logging.info', 'logging.info', (['pipe_path'], {}), '(pipe_path)\n', (766, 777), False, 'import logging\n'), ((850, 902), 'os.open', 'os.open', (['pipe_path_file', '(os.O_RDWR | os.O_CREAT)', '(777)'], {}), '(pipe_path_file, os.O_RDWR | os.O_CREAT, 777)\n', (857, 902), False, 'import os\n'), ((905, 945), 'logging.info', 'logging.info', (['"""************************"""'], {}), "('************************')\n", (917, 945), False, 'import logging\n'), ((578, 603), 'os.path.exists', 'os.path.exists', (['pipe_path'], {}), '(pipe_path)\n', (592, 603), False, 'import os\n'), ((613, 632), 'os.mkdir', 'os.mkdir', (['pipe_path'], {}), '(pipe_path)\n', (621, 632), False, 'import os\n'), ((683, 713), 'os.path.exists', 'os.path.exists', (['pipe_path_file'], {}), '(pipe_path_file)\n', (697, 713), False, 'import os\n'), ((723, 748), 'os.mkfifo', 'os.mkfifo', (['pipe_path_file'], {}), '(pipe_path_file)\n', (732, 748), False, 'import os\n'), ((955, 978), 'os.fdopen', 'os.fdopen', (['pipe_fd', '"""w"""'], {}), "(pipe_fd, 'w')\n", (964, 978), False, 'import os\n'), ((212, 244), 'numpy.asarray', 'np.asarray', (['model_params_list[k]'], {}), '(model_params_list[k])\n', (222, 244), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
#
# Copyright © 2009-2012 CEA
# <NAME>
# Licensed under the terms of the CECILL License
# (see guiqwt/__init__.py for details)
# pylint: disable=C0103
"""
guiqwt.io
---------
The `io` module provides input/output helper functions:
* :py:func:`guiqwt.io.imread`: load an image (.png, .tiff,
.dicom, etc.) and return its data as a NumPy array
* :py:func:`guiqwt.io.imwrite`: save an array to an image file
* :py:func:`guiqwt.io.load_items`: load plot items from HDF5
* :py:func:`guiqwt.io.save_items`: save plot items to HDF5
Reference
~~~~~~~~~
.. autofunction:: imread
.. autofunction:: imwrite
.. autofunction:: load_items
.. autofunction:: save_items
"""
from __future__ import print_function
import sys
import re
import os.path as osp
import numpy as np
from qtpy.py3compat import is_text_string, to_text_string
# Local imports
from guiqwt.config import _
def scale_data_to_dtype(data, dtype):
"""Scale array `data` to fit datatype `dtype` dynamic range
WARNING: modifies data in place"""
info = np.iinfo(dtype)
dmin = data.min()
dmax = data.max()
data -= dmin
data *= float(info.max - info.min) / (dmax - dmin)
data += float(info.min)
return np.array(data, dtype)
def eliminate_outliers(data, percent=2.0, bins=256):
"""Eliminate data histogram outliers"""
hist, bin_edges = np.histogram(data, bins)
from guiqwt.histogram import hist_range_threshold
vmin, vmax = hist_range_threshold(hist, bin_edges, percent)
return data.clip(vmin, vmax)
# ===============================================================================
# I/O File type definitions
# ===============================================================================
class FileType(object):
"""Filetype object:
* `name` : description of filetype,
* `read_func`, `write_func` : I/O callbacks,
* `extensions`: filename extensions (with a dot!) or filenames,
(list, tuple or space-separated string)
* `data_types`: supported data types"""
def __init__(
self,
name,
extensions,
read_func=None,
write_func=None,
data_types=None,
requires_template=False,
):
self.name = name
if is_text_string(extensions):
extensions = extensions.split()
self.extensions = [osp.splitext(" " + ext)[1] for ext in extensions]
self.read_func = read_func
self.write_func = write_func
self.data_types = data_types
self.requires_template = requires_template
def matches(self, action, dtype, template):
"""Return True if file type matches passed data type and template
(or if dtype is None)"""
assert action in ("load", "save")
matches = dtype is None or self.data_types is None or dtype in self.data_types
if action == "save" and self.requires_template:
matches = matches and template is not None
return matches
@property
def wcards(self):
return "*" + (" *".join(self.extensions))
def filters(self, action, dtype, template):
assert action in ("load", "save")
if self.matches(action, dtype, template):
return "\n%s (%s)" % (self.name, self.wcards)
else:
return ""
class ImageIOHandler(object):
"""I/O handler: regroup all FileType objects"""
def __init__(self):
self.filetypes = []
def allfilters(self, action, dtype, template):
wcards = " ".join(
[
ftype.wcards
for ftype in self.filetypes
if ftype.matches(action, dtype, template)
]
)
return "%s (%s)" % (_("All supported files"), wcards)
def get_filters(self, action, dtype=None, template=None):
"""Return file type filters for `action` (string: 'save' or 'load'),
`dtype` data type (None: all data types), and `template` (True if save
function requires a template (e.g. DICOM files), False otherwise)"""
filters = self.allfilters(action, dtype, template)
for ftype in self.filetypes:
filters += ftype.filters(action, dtype, template)
return filters
def add(
self,
name,
extensions,
read_func=None,
write_func=None,
import_func=None,
data_types=None,
requires_template=None,
):
if import_func is not None:
try:
import_func()
except ImportError:
return
assert read_func is not None or write_func is not None
ftype = FileType(
name,
extensions,
read_func=read_func,
write_func=write_func,
data_types=data_types,
requires_template=requires_template,
)
self.filetypes.append(ftype)
def _get_filetype(self, ext):
"""Return FileType object associated to file extension `ext`"""
for ftype in self.filetypes:
if ext.lower() in ftype.extensions:
return ftype
else:
raise RuntimeError("Unsupported file type: '%s'" % ext)
def get_readfunc(self, ext):
"""Return read function associated to file extension `ext`"""
ftype = self._get_filetype(ext)
if ftype.read_func is None:
raise RuntimeError("Unsupported file type (read): '%s'" % ext)
else:
return ftype.read_func
def get_writefunc(self, ext):
"""Return read function associated to file extension `ext`"""
ftype = self._get_filetype(ext)
if ftype.write_func is None:
raise RuntimeError("Unsupported file type (write): '%s'" % ext)
else:
return ftype.write_func
iohandler = ImageIOHandler()
# ==============================================================================
# tifffile-based Private I/O functions
# ==============================================================================
def _imread_tiff(filename):
"""Open a TIFF image and return a NumPy array"""
try:
import tifffile
return tifffile.imread(filename)
except ImportError:
return _imread_pil(filename)
def _imwrite_tiff(filename, arr):
"""Save a NumPy array to a TIFF file"""
try:
import tifffile
return tifffile.imread(filename, arr)
except ImportError:
return _imwrite_pil(filename, arr)
# ==============================================================================
# PIL-based Private I/O functions
# ==============================================================================
if sys.byteorder == "little":
_ENDIAN = "<"
else:
_ENDIAN = ">"
DTYPES = {
"1": ("|b1", None),
"L": ("|u1", None),
"I": ("%si4" % _ENDIAN, None),
"F": ("%sf4" % _ENDIAN, None),
"I;16": ("%su2" % _ENDIAN, None),
"I;16B": ("%su2" % _ENDIAN, None),
"I;16S": ("%si2" % _ENDIAN, None),
"P": ("|u1", None),
"RGB": ("|u1", 3),
"RGBX": ("|u1", 4),
"RGBA": ("|u1", 4),
"CMYK": ("|u1", 4),
"YCbCr": ("|u1", 4),
}
def _imread_pil(filename, to_grayscale=False):
"""Open image with PIL and return a NumPy array"""
import PIL.Image
import PIL.TiffImagePlugin # py2exe
PIL.TiffImagePlugin.OPEN_INFO[(PIL.TiffImagePlugin.II, 0, 1, 1, (16,), ())] = (
"I;16",
"I;16",
)
img = PIL.Image.open(filename)
if img.mode in ("CMYK", "YCbCr"):
# Converting to RGB
img = img.convert("RGB")
if to_grayscale and img.mode in ("RGB", "RGBA", "RGBX"):
# Converting to grayscale
img = img.convert("L")
elif "A" in img.mode or (img.mode == "P" and "transparency" in img.info):
img = img.convert("RGBA")
elif img.mode == "P":
img = img.convert("RGB")
try:
dtype, extra = DTYPES[img.mode]
except KeyError:
raise RuntimeError("%s mode is not supported" % img.mode)
shape = (img.size[1], img.size[0])
if extra is not None:
shape += (extra,)
try:
return np.array(img, dtype=np.dtype(dtype)).reshape(shape)
except SystemError:
return np.array(img.getdata(), dtype=np.dtype(dtype)).reshape(shape)
def _imwrite_pil(filename, arr):
"""Write `arr` NumPy array to `filename` using PIL"""
import PIL.Image
import PIL.TiffImagePlugin # py2exe
for mode, (dtype_str, extra) in list(DTYPES.items()):
if dtype_str == arr.dtype.str:
if extra is None: # mode for grayscale images
if len(arr.shape[2:]) > 0:
continue # not suitable for RGB(A) images
else:
break # this is it!
else: # mode for RGB(A) images
if len(arr.shape[2:]) == 0:
continue # not suitable for grayscale images
elif arr.shape[-1] == extra:
break # this is it!
else:
raise RuntimeError("Cannot determine PIL data type")
img = PIL.Image.fromarray(arr, mode)
img.save(filename)
# ==============================================================================
# DICOM Private I/O functions
# ==============================================================================
def _import_dcm():
"""DICOM Import function (checking for required libraries):
DICOM support requires library `pydicom`"""
import logging
logger = logging.getLogger("pydicom")
logger.setLevel(logging.CRITICAL)
try:
# pydicom 1.0
from pydicom import dicomio # analysis:ignore
except ImportError:
# pydicom 0.9
import dicom as dicomio # analysis:ignore
logger.setLevel(logging.WARNING)
def _imread_dcm(filename):
"""Open DICOM image with pydicom and return a NumPy array"""
try:
# pydicom 1.0
from pydicom import dicomio
except ImportError:
# pydicom 0.9
import dicom as dicomio
dcm = dicomio.read_file(filename, force=True)
# **********************************************************************
# The following is necessary until pydicom numpy support is improved:
# (after that, a simple: 'arr = dcm.PixelArray' will work the same)
format_str = "%sint%s" % (("u", "")[dcm.PixelRepresentation], dcm.BitsAllocated)
try:
dtype = np.dtype(format_str)
except TypeError:
raise TypeError(
"Data type not understood by NumPy: "
"PixelRepresentation=%d, BitsAllocated=%d"
% (dcm.PixelRepresentation, dcm.BitsAllocated)
)
arr = np.fromstring(dcm.PixelData, dtype)
try:
# pydicom 0.9.3:
dcm_is_little_endian = dcm.isLittleEndian
except AttributeError:
# pydicom 0.9.4:
dcm_is_little_endian = dcm.is_little_endian
if dcm_is_little_endian != (sys.byteorder == "little"):
arr.byteswap(True)
if hasattr(dcm, "NumberofFrames") and dcm.NumberofFrames > 1:
if dcm.SamplesperPixel > 1:
arr = arr.reshape(
dcm.SamplesperPixel, dcm.NumberofFrames, dcm.Rows, dcm.Columns
)
else:
arr = arr.reshape(dcm.NumberofFrames, dcm.Rows, dcm.Columns)
else:
if dcm.SamplesperPixel > 1:
if dcm.BitsAllocated == 8:
arr = arr.reshape(dcm.SamplesperPixel, dcm.Rows, dcm.Columns)
else:
raise NotImplementedError(
"This code only handles "
"SamplesPerPixel > 1 if Bits Allocated = 8"
)
else:
arr = arr.reshape(dcm.Rows, dcm.Columns)
# **********************************************************************
return arr
def _imwrite_dcm(filename, arr, template=None):
"""Save a numpy array `arr` into a DICOM image file `filename`
based on DICOM structure `template`"""
# Note: due to IOHandler formalism, `template` has to be a keyword argument
assert template is not None, (
"The `template` keyword argument is required to save DICOM files\n"
"(that is the template DICOM structure object)"
)
infos = np.iinfo(arr.dtype)
template.BitsAllocated = infos.bits
template.BitsStored = infos.bits
template.HighBit = infos.bits - 1
template.PixelRepresentation = ("u", "i").index(infos.kind)
data_vr = ("US", "SS")[template.PixelRepresentation]
template.Rows = arr.shape[0]
template.Columns = arr.shape[1]
template.SmallestImagePixelValue = int(arr.min())
template[0x00280106].VR = data_vr
template.LargestImagePixelValue = int(arr.max())
template[0x00280107].VR = data_vr
if not template.PhotometricInterpretation.startswith("MONOCHROME"):
template.PhotometricInterpretation = "MONOCHROME1"
template.PixelData = arr.tostring()
template[0x7FE00010].VR = "OB"
template.save_as(filename)
# ==============================================================================
# Text files Private I/O functions
# ==============================================================================
def _imread_txt(filename):
"""Open text file image and return a NumPy array"""
for delimiter in ("\t", ",", " ", ";"):
try:
return np.loadtxt(filename, delimiter=delimiter)
except ValueError:
continue
else:
raise
def _imwrite_txt(filename, arr):
"""Write `arr` NumPy array to text file `filename`"""
if arr.dtype in (np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32):
fmt = "%d"
else:
fmt = "%.18e"
ext = osp.splitext(filename)[1]
if ext.lower() in (".txt", ".asc", ""):
np.savetxt(filename, arr, fmt=fmt)
elif ext.lower() == ".csv":
np.savetxt(filename, arr, fmt=fmt, delimiter=",")
# ==============================================================================
# Registering I/O functions
# ==============================================================================
iohandler.add(
_("PNG files"),
"*.png",
read_func=_imread_pil,
write_func=_imwrite_pil,
data_types=(np.uint8, np.uint16),
)
iohandler.add(
_("TIFF files"), "*.tif *.tiff", read_func=_imread_tiff, write_func=_imwrite_tiff
)
iohandler.add(
_("8-bit images"),
"*.jpg *.gif",
read_func=_imread_pil,
write_func=_imwrite_pil,
data_types=(np.uint8,),
)
iohandler.add(_("NumPy arrays"), "*.npy", read_func=np.load, write_func=np.save)
iohandler.add(
_("Text files"), "*.txt *.csv *.asc", read_func=_imread_txt, write_func=_imwrite_txt
)
iohandler.add(
_("DICOM files"),
"*.dcm",
read_func=_imread_dcm,
write_func=_imwrite_dcm,
import_func=_import_dcm,
data_types=(np.int8, np.uint8, np.int16, np.uint16),
requires_template=True,
)
# ==============================================================================
# Generic image read/write functions
# ==============================================================================
def imread(fname, ext=None, to_grayscale=False):
"""Return a NumPy array from an image filename `fname`.
If `to_grayscale` is True, convert RGB images to grayscale
The `ext` (optional) argument is a string that specifies the file extension
which defines the input format: when not specified, the input format is
guessed from filename."""
if not is_text_string(fname):
fname = to_text_string(fname) # in case filename is a QString instance
if ext is None:
_base, ext = osp.splitext(fname)
arr = iohandler.get_readfunc(ext)(fname)
if to_grayscale and arr.ndim == 3:
# Converting to grayscale
return arr[..., :4].mean(axis=2)
else:
return arr
def imwrite(fname, arr, ext=None, dtype=None, max_range=None, **kwargs):
"""Save a NumPy array to an image filename `fname`.
If `to_grayscale` is True, convert RGB images to grayscale
The `ext` (optional) argument is a string that specifies the file extension
which defines the input format: when not specified, the input format is
guessed from filename.
If `max_range` is True, array data is scaled to fit the `dtype` (or data
type itself if `dtype` is None) dynamic range
Warning: option `max_range` changes data in place"""
if not is_text_string(fname):
fname = to_text_string(fname) # in case filename is a QString instance
if ext is None:
_base, ext = osp.splitext(fname)
if max_range:
arr = scale_data_to_dtype(arr, arr.dtype if dtype is None else dtype)
iohandler.get_writefunc(ext)(fname, arr, **kwargs)
# ==============================================================================
# Deprecated functions
# ==============================================================================
def imagefile_to_array(filename, to_grayscale=False):
"""
Return a NumPy array from an image file `filename`
If `to_grayscale` is True, convert RGB images to grayscale
"""
print("io.imagefile_to_array is deprecated: use io.imread instead", file=sys.stderr)
return imread(filename, to_grayscale=to_grayscale)
def array_to_imagefile(arr, filename, mode=None, max_range=False):
"""
Save a numpy array `arr` into an image file `filename`
Warning: option 'max_range' changes data in place
"""
print(
"io.array_to_imagefile is deprecated: use io.imwrite instead", file=sys.stderr
)
return imwrite(filename, arr, mode=mode, max_range=max_range)
# ==============================================================================
# guiqwt plot items I/O
# ==============================================================================
SERIALIZABLE_ITEMS = []
ITEM_MODULES = {}
def register_serializable_items(modname, classnames):
"""Register serializable item from module name and class name"""
global SERIALIZABLE_ITEMS, ITEM_MODULES
SERIALIZABLE_ITEMS += classnames
ITEM_MODULES[modname] = ITEM_MODULES.setdefault(modname, []) + classnames
# Curves
register_serializable_items(
"guiqwt.curve", ["CurveItem", "PolygonMapItem", "ErrorBarCurveItem"]
)
# Images
register_serializable_items(
"guiqwt.image",
[
"RawImageItem",
"ImageItem",
"TrImageItem",
"XYImageItem",
"RGBImageItem",
"MaskedImageItem",
],
)
# Shapes
register_serializable_items(
"guiqwt.shapes",
[
"PolygonShape",
"PointShape",
"SegmentShape",
"RectangleShape",
"ObliqueRectangleShape",
"EllipseShape",
"Axes",
],
)
# Annotations
register_serializable_items(
"guiqwt.annotations",
[
"AnnotatedPoint",
"AnnotatedSegment",
"AnnotatedRectangle",
"AnnotatedObliqueRectangle",
"AnnotatedEllipse",
"AnnotatedCircle",
],
)
# Labels
register_serializable_items(
"guiqwt.label", ["LabelItem", "LegendBoxItem", "SelectedLegendBoxItem"]
)
def item_class_from_name(name):
"""Return plot item class from class name"""
global SERIALIZABLE_ITEMS, ITEM_MODULES
assert name in SERIALIZABLE_ITEMS, "Unknown class %r" % name
for modname, names in list(ITEM_MODULES.items()):
if name in names:
return getattr(__import__(modname, fromlist=[name]), name)
def item_name_from_object(obj):
"""Return plot item class name from instance"""
return obj.__class__.__name__
def save_item(writer, group_name, item):
"""Save plot item to HDF5 group"""
with writer.group(group_name):
if item is None:
writer.write_none()
else:
item.serialize(writer)
with writer.group("item_class_name"):
writer.write_str(item_name_from_object(item))
def load_item(reader, group_name):
"""Load plot item from HDF5 group"""
with reader.group(group_name):
with reader.group("item_class_name"):
try:
klass_name = reader.read_str()
except ValueError:
# None was saved instead of a real item
return
klass = item_class_from_name(klass_name)
item = klass()
item.deserialize(reader)
return item
def save_items(writer, items):
"""Save items to HDF5 file:
* writer: :py:class:`guidata.hdf5io.HDF5Writer` object
* items: serializable plot items"""
counts = {}
names = []
def _get_name(item):
basename = item_name_from_object(item)
count = counts[basename] = counts.setdefault(basename, 0) + 1
name = "%s_%03d" % (basename, count)
names.append(name.encode("utf-8"))
return name
for item in items:
with writer.group(_get_name(item)):
item.serialize(writer)
with writer.group("plot_items"):
writer.write_sequence(names)
def load_items(reader):
"""Load items from HDF5 file:
* reader: :py:class:`guidata.hdf5io.HDF5Reader` object"""
with reader.group("plot_items"):
names = reader.read_sequence()
items = []
for name in names:
klass_name = re.match(
r"([A-Z]+[A-Za-z0-9\_]*)\_([0-9]*)", name.decode()
).groups()[0]
klass = item_class_from_name(klass_name)
item = klass()
with reader.group(name):
item.deserialize(reader)
items.append(item)
return items
if __name__ == "__main__":
# Test if items can all be constructed from their Python module
for name in SERIALIZABLE_ITEMS:
print(name, "-->", item_class_from_name(name))
| [
"logging.getLogger",
"numpy.histogram",
"tifffile.imread",
"dicom.read_file",
"qtpy.py3compat.to_text_string",
"os.path.splitext",
"numpy.iinfo",
"guiqwt.config._",
"numpy.array",
"numpy.loadtxt",
"qtpy.py3compat.is_text_string",
"numpy.savetxt",
"guiqwt.histogram.hist_range_threshold",
"n... | [((1074, 1089), 'numpy.iinfo', 'np.iinfo', (['dtype'], {}), '(dtype)\n', (1082, 1089), True, 'import numpy as np\n'), ((1245, 1266), 'numpy.array', 'np.array', (['data', 'dtype'], {}), '(data, dtype)\n', (1253, 1266), True, 'import numpy as np\n'), ((1388, 1412), 'numpy.histogram', 'np.histogram', (['data', 'bins'], {}), '(data, bins)\n', (1400, 1412), True, 'import numpy as np\n'), ((1485, 1531), 'guiqwt.histogram.hist_range_threshold', 'hist_range_threshold', (['hist', 'bin_edges', 'percent'], {}), '(hist, bin_edges, percent)\n', (1505, 1531), False, 'from guiqwt.histogram import hist_range_threshold\n'), ((9524, 9552), 'logging.getLogger', 'logging.getLogger', (['"""pydicom"""'], {}), "('pydicom')\n", (9541, 9552), False, 'import logging\n'), ((10060, 10099), 'dicom.read_file', 'dicomio.read_file', (['filename'], {'force': '(True)'}), '(filename, force=True)\n', (10077, 10099), True, 'import dicom as dicomio\n'), ((10685, 10720), 'numpy.fromstring', 'np.fromstring', (['dcm.PixelData', 'dtype'], {}), '(dcm.PixelData, dtype)\n', (10698, 10720), True, 'import numpy as np\n'), ((12245, 12264), 'numpy.iinfo', 'np.iinfo', (['arr.dtype'], {}), '(arr.dtype)\n', (12253, 12264), True, 'import numpy as np\n'), ((14113, 14127), 'guiqwt.config._', '_', (['"""PNG files"""'], {}), "('PNG files')\n", (14114, 14127), False, 'from guiqwt.config import _\n'), ((14257, 14272), 'guiqwt.config._', '_', (['"""TIFF files"""'], {}), "('TIFF files')\n", (14258, 14272), False, 'from guiqwt.config import _\n'), ((14360, 14377), 'guiqwt.config._', '_', (['"""8-bit images"""'], {}), "('8-bit images')\n", (14361, 14377), False, 'from guiqwt.config import _\n'), ((14498, 14515), 'guiqwt.config._', '_', (['"""NumPy arrays"""'], {}), "('NumPy arrays')\n", (14499, 14515), False, 'from guiqwt.config import _\n'), ((14584, 14599), 'guiqwt.config._', '_', (['"""Text files"""'], {}), "('Text files')\n", (14585, 14599), False, 'from guiqwt.config import _\n'), ((14690, 14706), 'guiqwt.config._', '_', (['"""DICOM files"""'], {}), "('DICOM files')\n", (14691, 14706), False, 'from guiqwt.config import _\n'), ((2289, 2315), 'qtpy.py3compat.is_text_string', 'is_text_string', (['extensions'], {}), '(extensions)\n', (2303, 2315), False, 'from qtpy.py3compat import is_text_string, to_text_string\n'), ((6212, 6237), 'tifffile.imread', 'tifffile.imread', (['filename'], {}), '(filename)\n', (6227, 6237), False, 'import tifffile\n'), ((6426, 6456), 'tifffile.imread', 'tifffile.imread', (['filename', 'arr'], {}), '(filename, arr)\n', (6441, 6456), False, 'import tifffile\n'), ((10433, 10453), 'numpy.dtype', 'np.dtype', (['format_str'], {}), '(format_str)\n', (10441, 10453), True, 'import numpy as np\n'), ((13699, 13721), 'os.path.splitext', 'osp.splitext', (['filename'], {}), '(filename)\n', (13711, 13721), True, 'import os.path as osp\n'), ((13777, 13811), 'numpy.savetxt', 'np.savetxt', (['filename', 'arr'], {'fmt': 'fmt'}), '(filename, arr, fmt=fmt)\n', (13787, 13811), True, 'import numpy as np\n'), ((15469, 15490), 'qtpy.py3compat.is_text_string', 'is_text_string', (['fname'], {}), '(fname)\n', (15483, 15490), False, 'from qtpy.py3compat import is_text_string, to_text_string\n'), ((15508, 15529), 'qtpy.py3compat.to_text_string', 'to_text_string', (['fname'], {}), '(fname)\n', (15522, 15529), False, 'from qtpy.py3compat import is_text_string, to_text_string\n'), ((15613, 15632), 'os.path.splitext', 'osp.splitext', (['fname'], {}), '(fname)\n', (15625, 15632), True, 'import os.path as osp\n'), ((16400, 16421), 'qtpy.py3compat.is_text_string', 'is_text_string', (['fname'], {}), '(fname)\n', (16414, 16421), False, 'from qtpy.py3compat import is_text_string, to_text_string\n'), ((16439, 16460), 'qtpy.py3compat.to_text_string', 'to_text_string', (['fname'], {}), '(fname)\n', (16453, 16460), False, 'from qtpy.py3compat import is_text_string, to_text_string\n'), ((16544, 16563), 'os.path.splitext', 'osp.splitext', (['fname'], {}), '(fname)\n', (16556, 16563), True, 'import os.path as osp\n'), ((13348, 13389), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {'delimiter': 'delimiter'}), '(filename, delimiter=delimiter)\n', (13358, 13389), True, 'import numpy as np\n'), ((13852, 13901), 'numpy.savetxt', 'np.savetxt', (['filename', 'arr'], {'fmt': 'fmt', 'delimiter': '""","""'}), "(filename, arr, fmt=fmt, delimiter=',')\n", (13862, 13901), True, 'import numpy as np\n'), ((2388, 2411), 'os.path.splitext', 'osp.splitext', (["(' ' + ext)"], {}), "(' ' + ext)\n", (2400, 2411), True, 'import os.path as osp\n'), ((3752, 3776), 'guiqwt.config._', '_', (['"""All supported files"""'], {}), "('All supported files')\n", (3753, 3776), False, 'from guiqwt.config import _\n'), ((8177, 8192), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (8185, 8192), True, 'import numpy as np\n'), ((8278, 8293), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (8286, 8293), True, 'import numpy as np\n')] |
from .poly import FixedPoly
import numpy as np
import pymunk as pm
from .gravity_obj import MOVING_OBJ_COLLISION_TYPE
from .img_tool import ImageTool
from .funnel import dist
import pygame as pg
def bucket_touching_handler(arbiter, space, data):
locations = arbiter.shapes[1].locations
if arbiter.shapes[0].in_bucket:
arbiter.shapes[0].body.position = locations[0][:]
arbiter.shapes[0].body.velocity = pm.Vec2d(0., 0.)
arbiter.shapes[0].body.force = pm.Vec2d(0., 0.)
return False
elif arbiter.shapes[0].collision_type == MOVING_OBJ_COLLISION_TYPE:
d1 = dist(arbiter.shapes[0].body.position, locations[1])
d2 = dist(arbiter.shapes[0].body.position, locations[2])
d3 = dist(arbiter.shapes[0].body.position, locations[3])
d4 = dist(arbiter.shapes[0].body.position, locations[4])
if d1 < d2 and d1 < d3 and d4 < d2 and d4 < d3:
new_pos = locations[0]
obj = arbiter.shapes[0]
obj.body.position = new_pos[:]
obj.body.velocity = pm.Vec2d(0., 0.)
obj.body.angular_velocity = 0.
obj.body.force = pm.Vec2d(0., 0.)
obj.body.torque = 0.
obj.body.angle = 0.
obj.in_bucket = True
return False
else:
return True
return True
class Bucket(FixedPoly):
# 1, 5 pi
def __init__(self, pos, angle = np.pi/4, size=10.0, color='black'):
super().__init__(pos, n_sides=4, angle=(np.pi/4 + angle), size=size, color=color)
self.color = color
self.center_position = [self.pos[0], self.pos[1]]
self.v_1 = np.array(self.pos) + np.array(self.vertices[0])
self.v_2 = np.array(self.pos) + np.array(self.vertices[1])
self.v_3 = np.array(self.pos) + np.array(self.vertices[2])
self.v_4 = np.array(self.pos) + np.array(self.vertices[3])
self.img = ImageTool('bucket.png', 0.0 + angle, pos[:],
use_shape=self.shape,
debug_render=False)
self.collision_type = 6
def add_to_space(self, space):
bucket = self.img.get_shape()
bucket.collision_type = self.collision_type
bucket.locations = [self.center_position, self.v_1, self.v_2, self.v_3, self.v_4]
self.shape = bucket
space.add(bucket)
self.attached_shapes.append(bucket)
# Called when 1 (movable objects) collides with 3 (bucket)
h = space.add_collision_handler(1, self.collision_type)
h.pre_solve = bucket_touching_handler
def render(self, screen, scale=None, anti_alias=False):
if scale is None:
scale = 1
self.img.render(screen, scale, self.flipy)
| [
"numpy.array",
"pymunk.Vec2d"
] | [((428, 446), 'pymunk.Vec2d', 'pm.Vec2d', (['(0.0)', '(0.0)'], {}), '(0.0, 0.0)\n', (436, 446), True, 'import pymunk as pm\n'), ((484, 502), 'pymunk.Vec2d', 'pm.Vec2d', (['(0.0)', '(0.0)'], {}), '(0.0, 0.0)\n', (492, 502), True, 'import pymunk as pm\n'), ((1649, 1667), 'numpy.array', 'np.array', (['self.pos'], {}), '(self.pos)\n', (1657, 1667), True, 'import numpy as np\n'), ((1670, 1696), 'numpy.array', 'np.array', (['self.vertices[0]'], {}), '(self.vertices[0])\n', (1678, 1696), True, 'import numpy as np\n'), ((1716, 1734), 'numpy.array', 'np.array', (['self.pos'], {}), '(self.pos)\n', (1724, 1734), True, 'import numpy as np\n'), ((1737, 1763), 'numpy.array', 'np.array', (['self.vertices[1]'], {}), '(self.vertices[1])\n', (1745, 1763), True, 'import numpy as np\n'), ((1783, 1801), 'numpy.array', 'np.array', (['self.pos'], {}), '(self.pos)\n', (1791, 1801), True, 'import numpy as np\n'), ((1804, 1830), 'numpy.array', 'np.array', (['self.vertices[2]'], {}), '(self.vertices[2])\n', (1812, 1830), True, 'import numpy as np\n'), ((1850, 1868), 'numpy.array', 'np.array', (['self.pos'], {}), '(self.pos)\n', (1858, 1868), True, 'import numpy as np\n'), ((1871, 1897), 'numpy.array', 'np.array', (['self.vertices[3]'], {}), '(self.vertices[3])\n', (1879, 1897), True, 'import numpy as np\n'), ((1058, 1076), 'pymunk.Vec2d', 'pm.Vec2d', (['(0.0)', '(0.0)'], {}), '(0.0, 0.0)\n', (1066, 1076), True, 'import pymunk as pm\n'), ((1147, 1165), 'pymunk.Vec2d', 'pm.Vec2d', (['(0.0)', '(0.0)'], {}), '(0.0, 0.0)\n', (1155, 1165), True, 'import pymunk as pm\n')] |
#!/usr/bin/env python
import rospy
from geometry_msgs.msg import PoseStamped
from styx_msgs.msg import Lane, Waypoint
from std_msgs.msg import Int32
from scipy.spatial import KDTree
import math
import numpy as np
'''
This node will publish waypoints from the car's current position to some `x` distance ahead.
As mentioned in the doc, you should ideally first implement a version which does not care
about traffic lights or obstacles.
Once you have created dbw_node, you will update this node to use the status of traffic lights too.
Please note that our simulator also provides the exact location of traffic lights and their
current status in `/vehicle/traffic_lights` message. You can use this message to build this node
as well as to verify your TL classifier.
TODO (for Yousuf and Aaron): Stopline location for each traffic light.
'''
LOOKAHEAD_WPS = 200 # Number of waypoints we will publish. You can change this number
MAX_DECEL = 5
class WaypointUpdater(object):
def __init__(self):
rospy.init_node('waypoint_updater')
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
rospy.Subscriber('/traffic_waypoint',Int32,self.traffic_cb)
#rospy.Subscriber('/obstacle_waypoint',Int32,self.obstacle_cb)
self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)
self.base_waypoints = None
self.waypoints_2D = None
self.waypoints_tree = None
self.pose = None
self.stopline_wp_idx = -1
self.loop()
#print("Waypoint Updater started!")
rospy.spin()
def loop(self):
rate = rospy.Rate(50)
while not rospy.is_shutdown():
if self.pose and self.base_waypoints:
#closest_waypoint_index = self.get_closest_waypoint_index()
self.publish_waypoints()
print("Waypoint Updater publishing!")
rate.sleep()
def get_closest_waypoint_index(self):
x = self.pose.pose.position.x
y = self.pose.pose.position.y
closest_index = self.waypoints_tree.query([x,y],1)[1]
closest_coord = self.waypoints_2D[closest_index]
prev_coord = self.waypoints_2D[(closest_index-1)%len(self.waypoints_2D)]
cl_vec = np.array(closest_coord)
pr_vec = np.array(prev_coord)
pos_vec = np.array([x,y])
if np.dot(cl_vec-pr_vec,pos_vec-cl_vec)>0:
closest_index = (closest_index+1)%len(self.waypoints_2D)
return closest_index
def publish_waypoints(self):
# wps = Lane()
# wps.header = self.base_waypoints.header
# wps.waypoints = self.base_waypoints.waypoints[cl_index : cl_index+LOOKAHEAD_WPS]
# self.final_waypoints_pub.publish(wps)
final_path = self.generate_path()
self.final_waypoints_pub.publish(final_path)
def generate_path(self):
path = Lane()
closest_wp_idx = self.get_closest_waypoint_index()
farthest_wp_idx = closest_wp_idx + LOOKAHEAD_WPS
base_wps = self.base_waypoints.waypoints[closest_wp_idx:farthest_wp_idx]
if self.stopline_wp_idx == -1 or (self.stopline_wp_idx>=farthest_wp_idx):
path.waypoints = base_wps
else:
path.waypoints = self.decelerate(base_wps,closest_wp_idx)
return path
def decelerate(self,wps,closest_idx):
tmp = []
for i,wp in enumerate(wps):
p = Waypoint()
p.pose = wp.pose
stop_idx = max(self.stopline_wp_idx - closest_idx -3,0)
dist = self.distance(wps,i,stop_idx)
vel = math.sqrt(2*MAX_DECEL*dist)
if vel<1.0:
vel = 0
p.twist.twist.linear.x = min(vel,wp.twist.twist.linear.x)
tmp.append(p)
return tmp
def pose_cb(self, msg):
self.pose = msg
def waypoints_cb(self, waypoints):
self.base_waypoints = waypoints
if None==self.waypoints_2D:
self.waypoints_2D = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints]
self.waypoints_tree = KDTree(self.waypoints_2D)
def traffic_cb(self, msg):
# Callback for /traffic_waypoint message. Implement
self.stopline_wp_idx = msg.data
#print("Waypoint_Updater: got traffic wpt and set self stopline_wp_idx ",msg)
def obstacle_cb(self, msg):
# TODO: Callback for /obstacle_waypoint message. We will implement it later
pass
def get_waypoint_velocity(self, waypoint):
return waypoint.twist.twist.linear.x
def set_waypoint_velocity(self, waypoints, waypoint, velocity):
waypoints[waypoint].twist.twist.linear.x = velocity
def distance(self, waypoints, wp1, wp2):
dist = 0
dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)
for i in range(wp1, wp2+1):
dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)
wp1 = i
return dist
if __name__ == '__main__':
try:
WaypointUpdater()
except rospy.ROSInterruptException:
rospy.logerr('Could not start waypoint updater node.')
| [
"rospy.logerr",
"rospy.Subscriber",
"rospy.is_shutdown",
"rospy.init_node",
"scipy.spatial.KDTree",
"math.sqrt",
"numpy.array",
"numpy.dot",
"rospy.Rate",
"rospy.spin",
"styx_msgs.msg.Waypoint",
"rospy.Publisher",
"styx_msgs.msg.Lane"
] | [((1012, 1047), 'rospy.init_node', 'rospy.init_node', (['"""waypoint_updater"""'], {}), "('waypoint_updater')\n", (1027, 1047), False, 'import rospy\n'), ((1057, 1117), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/current_pose"""', 'PoseStamped', 'self.pose_cb'], {}), "('/current_pose', PoseStamped, self.pose_cb)\n", (1073, 1117), False, 'import rospy\n'), ((1126, 1186), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/base_waypoints"""', 'Lane', 'self.waypoints_cb'], {}), "('/base_waypoints', Lane, self.waypoints_cb)\n", (1142, 1186), False, 'import rospy\n'), ((1196, 1257), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/traffic_waypoint"""', 'Int32', 'self.traffic_cb'], {}), "('/traffic_waypoint', Int32, self.traffic_cb)\n", (1212, 1257), False, 'import rospy\n'), ((1363, 1417), 'rospy.Publisher', 'rospy.Publisher', (['"""final_waypoints"""', 'Lane'], {'queue_size': '(1)'}), "('final_waypoints', Lane, queue_size=1)\n", (1378, 1417), False, 'import rospy\n'), ((1656, 1668), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (1666, 1668), False, 'import rospy\n'), ((1705, 1719), 'rospy.Rate', 'rospy.Rate', (['(50)'], {}), '(50)\n', (1715, 1719), False, 'import rospy\n'), ((2342, 2365), 'numpy.array', 'np.array', (['closest_coord'], {}), '(closest_coord)\n', (2350, 2365), True, 'import numpy as np\n'), ((2383, 2403), 'numpy.array', 'np.array', (['prev_coord'], {}), '(prev_coord)\n', (2391, 2403), True, 'import numpy as np\n'), ((2422, 2438), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (2430, 2438), True, 'import numpy as np\n'), ((2985, 2991), 'styx_msgs.msg.Lane', 'Lane', ([], {}), '()\n', (2989, 2991), False, 'from styx_msgs.msg import Lane, Waypoint\n'), ((1738, 1757), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (1755, 1757), False, 'import rospy\n'), ((2450, 2491), 'numpy.dot', 'np.dot', (['(cl_vec - pr_vec)', '(pos_vec - cl_vec)'], {}), '(cl_vec - pr_vec, pos_vec - cl_vec)\n', (2456, 2491), True, 'import numpy as np\n'), ((3535, 3545), 'styx_msgs.msg.Waypoint', 'Waypoint', ([], {}), '()\n', (3543, 3545), False, 'from styx_msgs.msg import Lane, Waypoint\n'), ((3711, 3742), 'math.sqrt', 'math.sqrt', (['(2 * MAX_DECEL * dist)'], {}), '(2 * MAX_DECEL * dist)\n', (3720, 3742), False, 'import math\n'), ((4260, 4285), 'scipy.spatial.KDTree', 'KDTree', (['self.waypoints_2D'], {}), '(self.waypoints_2D)\n', (4266, 4285), False, 'from scipy.spatial import KDTree\n'), ((4945, 5010), 'math.sqrt', 'math.sqrt', (['((a.x - b.x) ** 2 + (a.y - b.y) ** 2 + (a.z - b.z) ** 2)'], {}), '((a.x - b.x) ** 2 + (a.y - b.y) ** 2 + (a.z - b.z) ** 2)\n', (4954, 5010), False, 'import math\n'), ((5279, 5333), 'rospy.logerr', 'rospy.logerr', (['"""Could not start waypoint updater node."""'], {}), "('Could not start waypoint updater node.')\n", (5291, 5333), False, 'import rospy\n')] |
"""
Create stimuli to probe the networks.
"""
import numpy as np
def expanding_disk(pos,speed,width,exp_rate,maxwidth,amplitude,gridsize,appears,duration,order=10):
# Creates artificial stimuli of expanding disks. Params:
# pos: 2 dim starting position in grid coordinates
# speed: 2 dim speed vector, in pixels per time point
# width: the initial width of the disk
# exp_rate: the rate with which the disk expands, in pixels per time point
# maxwidth: the maximum attainable width of the disk
# amplitude: peak amplitude of the disk
# gridsize: the size of the grid, in pixels
# appears: the time point the disk first appears
# duration: the temporal extent of the stimulus, in units of time
# order: controls how sharp the transition on the margins of the disk is
disk_dur = duration - appears
xc = pos[0] + speed[0]*np.arange(disk_dur)
yc = pos[1] + speed[1]*np.arange(disk_dur)
w = width + exp_rate*np.arange(disk_dur)
w[w>maxwidth] = maxwidth
# correction for negative expansion rates
if exp_rate<0:
w[w<1] = 1
# do a meshgrid over 3 coordinates (x,y,w)
x = np.arange(gridsize); y = np.arange(gridsize)
X, Y, W = np.meshgrid(x,y,w)
norm_dist = ((X-xc)**2+(Y-yc)**2)/W**2
stim1 = amplitude*np.exp(-1/2*norm_dist**int(order/2))
stim = np.zeros((gridsize,gridsize,duration))
stim[:,:,appears:duration] = stim1
return stim
def expanding_annuli(pos,speed,width,init,exp_rate,maxsize,amplitude,gridsize,appears,duration,order=10):
# Creates artificial stimuli of expanding annuli. Params:
# pos: 2 dim starting position in grid coordinates
# speed: 2 dim speed vector, in pixels per time point
# width: the width of the annulus
# init: the initial size of the annulus
# exp_rate: the rate with which the annulus expands, in pixels per time point
# maxsize: the maximum attainable width of the annulus
# amplitude: peak amplitude of the annulus
# gridsize: the size of the grid, in pixels
# appears: the time point the annulus first appears
# duration: the temporal extent of the stimulus, in units of time
# order: controls how sharp the transition on the margins of the annulus is
base = expanding_disk(pos,speed,init,exp_rate,maxsize,amplitude,gridsize,appears,duration,order)
extract = expanding_disk(pos,speed,init-width,exp_rate,maxsize-width,amplitude,gridsize,appears,duration,order)
stim = base - extract
return stim
def moving_bars(k,speed,theta,phase,contrast,gridsize,duration):
# Creates artificial stimuli of moving bars. Params:
# k: spatial frequency of the bars, in inverse pixel values
# speed: amplitude and direction of moving speed
# theta: orientation of the bars in space in rads, 0 rads being horizontal
# contrast: amplitude of positive and negative amplitude of negative part
# gridsize: the size of the grid, in pixels
# duration: the temporal extent of the stimulus, in units of time
x = np.arange(gridsize); y = np.arange(gridsize); t = np.arange(duration)
X, Y, T = np.meshgrid(x,y,t)
stim = np.cos(2*np.pi*k*X*np.cos(theta)+2*np.pi*k*Y*np.sin(theta)+phase-2*np.pi*speed*T)
return contrast*np.sign(stim)
def counterphase_grating(k,f,theta,phase,contrast,gridsize,duration):
# Creates artificial stimuli of moving bars. Equation 2.18 from Dayan & Abbott. Params:
# k: spatial frequency of the bars, in inverse pixel values
# f: temporal frequency of the bars, in inverse temporal unit values
# theta: orientation of the bars in space in rads, 0 rads being horizontal
# contrast: amplitude of positive and negative amplitude of negative part
# gridsize: the size of the grid, in pixels
# duration: the temporal extent of the stimulus, in units of time
x = np.arange(gridsize); y = np.arange(gridsize); t = np.arange(duration)
X, Y, T = np.meshgrid(x,y,t)
stim = contrast*np.cos(2*np.pi*k*X*np.cos(theta)+2*np.pi*k*Y*np.sin(theta)+phase)*np.cos(2*np.pi*f*T)
return stim
def flashing_disk(pos,width,amplitude,f,gridsize,duration,order=10):
# Creates artificial stimuli of expanding disks. Params:
# pos: 2 dim starting position in grid coordinates
# width: the initial width of the disk
# amplitude: peak amplitude of the disk
# f: frequency of flashing
# gridsize: the size of the grid, in pixels
# duration: the temporal extent of the stimulus, in units of time
# order: controls how sharp the transition on the margins of the disk is
x = np.arange(gridsize); y = np.arange(gridsize); t = np.arange(duration)
X, Y, T = np.meshgrid(x,y,t)
norm_dist = ((X-pos[0])**2+(Y-pos[1])**2)/width**2
stim = amplitude*np.exp(-1/2*norm_dist**int(order/2))*np.cos(2*np.pi*f*T)
return stim | [
"numpy.zeros",
"numpy.cos",
"numpy.sign",
"numpy.sin",
"numpy.meshgrid",
"numpy.arange"
] | [((1176, 1195), 'numpy.arange', 'np.arange', (['gridsize'], {}), '(gridsize)\n', (1185, 1195), True, 'import numpy as np\n'), ((1201, 1220), 'numpy.arange', 'np.arange', (['gridsize'], {}), '(gridsize)\n', (1210, 1220), True, 'import numpy as np\n'), ((1235, 1255), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y', 'w'], {}), '(x, y, w)\n', (1246, 1255), True, 'import numpy as np\n'), ((1372, 1412), 'numpy.zeros', 'np.zeros', (['(gridsize, gridsize, duration)'], {}), '((gridsize, gridsize, duration))\n', (1380, 1412), True, 'import numpy as np\n'), ((3079, 3098), 'numpy.arange', 'np.arange', (['gridsize'], {}), '(gridsize)\n', (3088, 3098), True, 'import numpy as np\n'), ((3104, 3123), 'numpy.arange', 'np.arange', (['gridsize'], {}), '(gridsize)\n', (3113, 3123), True, 'import numpy as np\n'), ((3129, 3148), 'numpy.arange', 'np.arange', (['duration'], {}), '(duration)\n', (3138, 3148), True, 'import numpy as np\n'), ((3163, 3183), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y', 't'], {}), '(x, y, t)\n', (3174, 3183), True, 'import numpy as np\n'), ((3908, 3927), 'numpy.arange', 'np.arange', (['gridsize'], {}), '(gridsize)\n', (3917, 3927), True, 'import numpy as np\n'), ((3933, 3952), 'numpy.arange', 'np.arange', (['gridsize'], {}), '(gridsize)\n', (3942, 3952), True, 'import numpy as np\n'), ((3958, 3977), 'numpy.arange', 'np.arange', (['duration'], {}), '(duration)\n', (3967, 3977), True, 'import numpy as np\n'), ((3992, 4012), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y', 't'], {}), '(x, y, t)\n', (4003, 4012), True, 'import numpy as np\n'), ((4660, 4679), 'numpy.arange', 'np.arange', (['gridsize'], {}), '(gridsize)\n', (4669, 4679), True, 'import numpy as np\n'), ((4685, 4704), 'numpy.arange', 'np.arange', (['gridsize'], {}), '(gridsize)\n', (4694, 4704), True, 'import numpy as np\n'), ((4710, 4729), 'numpy.arange', 'np.arange', (['duration'], {}), '(duration)\n', (4719, 4729), True, 'import numpy as np\n'), ((4744, 4764), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y', 't'], {}), '(x, y, t)\n', (4755, 4764), True, 'import numpy as np\n'), ((3305, 3318), 'numpy.sign', 'np.sign', (['stim'], {}), '(stim)\n', (3312, 3318), True, 'import numpy as np\n'), ((4102, 4127), 'numpy.cos', 'np.cos', (['(2 * np.pi * f * T)'], {}), '(2 * np.pi * f * T)\n', (4108, 4127), True, 'import numpy as np\n'), ((4876, 4901), 'numpy.cos', 'np.cos', (['(2 * np.pi * f * T)'], {}), '(2 * np.pi * f * T)\n', (4882, 4901), True, 'import numpy as np\n'), ((878, 897), 'numpy.arange', 'np.arange', (['disk_dur'], {}), '(disk_dur)\n', (887, 897), True, 'import numpy as np\n'), ((925, 944), 'numpy.arange', 'np.arange', (['disk_dur'], {}), '(disk_dur)\n', (934, 944), True, 'import numpy as np\n'), ((970, 989), 'numpy.arange', 'np.arange', (['disk_dur'], {}), '(disk_dur)\n', (979, 989), True, 'import numpy as np\n'), ((3217, 3230), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (3223, 3230), True, 'import numpy as np\n'), ((3243, 3256), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (3249, 3256), True, 'import numpy as np\n'), ((4055, 4068), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (4061, 4068), True, 'import numpy as np\n'), ((4081, 4094), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (4087, 4094), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import re
from law.utils import *
import jieba.posseg as pseg
import datetime
import mysql.connector
class case_reader:
def __init__(self, user, password, n=1000, preprocessing=False):
'''
n is total types,
preprocessing: whether needs preprocessing
'''
# old version: use file_path
# self.file_path = file_path
# self.data = pd.read_csv(self.file_path, encoding='utf-8', engine='python')
# new version: directly reading data
# connect database
self.n = n
self.preprocessing = preprocessing
print("Connecting to Server...")
cnx = mysql.connector.connect(user=user, password=password,
host="cdb-74dx1ytr.gz.tencentcdb.com",
port=10008,
database='law')
cursor = cnx.cursor(buffered=True)
print("Server Connected.")
# read database
if n>=0:
query = 'SELECT * FROM Civil LIMIT ' + str(self.n) + ';'
else:
query = 'SELECT * FROM Civil;'
print("Start Reading Data...")
self.data = pd.read_sql(query,con=cnx)
print("Read Data Successful...")
self.data_len = len(self.data)
print("This dataset has ", self.data_len, "rows of data.")
# np.nan replace missing value
self.data = self.data.fillna(np.nan)
def return_data(self):
if self.preprocessing:
self.preprocess()
return self.data
def number2(self):
'''
This function change '庭审程序' into one hot encodings
-- Klaus
'''
xingfabiangeng = np.zeros(self.data_len)
yishen = np.zeros(self.data_len)
ershen = np.zeros(self.data_len)
fushen = np.zeros(self.data_len)
qita = np.zeros(self.data_len)
for i in range(self.data_len):
if self.data['proc'][i] == "刑罚变更":
xingfabiangeng[i] += 1
if self.data['proc'][i] == "一审":
yishen[i] += 1
if self.data['proc'][i] == "二审":
ershen[i] += 1
if self.data['proc'][i] == "复核":
fushen[i] += 1
if self.data['proc'][i] == "其他" :
qita[i] += 1
self.data['proc_是否_刑罚变更'] = xingfabiangeng
self.data['proc_是否_一审'] = yishen
self.data['proc_是否_二审'] = ershen
self.data['proc_是否_复核'] = fushen
self.data['proc_是否_其他'] = qita
#print(xingfabiangeng)
#print(yishen)
#print(ershen)
#print(qita)
del xingfabiangeng, yishen, ershen, fushen, qita
def number3(self):
'''
This function change '案由' into one hot encodings
'''
reasons = ['机动车交通事故责任纠纷' ,'物件损害责任纠纷' ,'侵权责任纠纷', '产品责任纠纷', '提供劳务者受害责任纠纷' ,'医疗损害责任纠纷',
'地面施工、地下设施损害责任纠纷', '饲养动物损害责任纠纷' ,'产品销售者责任纠纷', '因申请诉中财产保全损害责任纠纷', '教育机构责任纠纷',
'违反安全保障义务责任纠纷' , '网络侵权责任纠纷' ,'因申请诉前财产保全损害责任纠纷' ,'物件脱落、坠落损害责任纠纷',
'因申请诉中证据保全损害责任纠纷' ,'建筑物、构筑物倒塌损害责任纠纷' ,'提供劳务者致害责任纠纷' ,'产品生产者责任纠纷',
'公共场所管理人责任纠纷', '公证损害责任纠纷', '用人单位责任纠纷' ,'触电人身损害责任纠纷', '义务帮工人受害责任纠纷',
'高度危险活动损害责任纠纷', '噪声污染责任纠纷' ,'堆放物倒塌致害责任纠纷', '公共道路妨碍通行损害责任纠纷' ,'见义勇为人受害责任纠纷',
'医疗产品责任纠纷' ,'监护人责任纠纷', '水上运输人身损害责任纠纷', '环境污染责任纠纷', '因申请先予执行损害责任纠纷',
'铁路运输人身损害责任纠纷' ,'水污染责任纠纷', '林木折断损害责任纠纷', '侵害患者知情同意权责任纠纷' ,'群众性活动组织者责任纠纷',
'土壤污染责任纠纷']
mreason = np.zeros(self.data_len)
for i in range(self.data_len):
for j,reason in enumerate(reasons):
if self.data['class'][i] == reasons[j]:
mreason[i] +=j+1
self.data['class_index'] = mreason
del mreason
def number4(self):
'''
This function change '文书类型' into one hot encodings
'''
panjueshu = np.zeros(self.data_len)
caidingshu = np.zeros(self.data_len)
for i in range(self.data_len):
if self.data['doc_type'][i] == "判决书":
panjueshu[i] += 1
if self.data['doc_type'][i] == "裁定书":
caidingshu[i] += 1
self.data['doc_type'] = panjueshu
self.data['doc_type'] = caidingshu
del panjueshu, caidingshu
def number5(self):
'''
court → province、city、level
-- <NAME>
'''
level = [] # court level
distinct = [] # province
block = [] # city
for x in self.data['court_name']:
if pd.isna(x):#if empty
level.append(None)
distinct.append(None)
block.append(None)
else:
# search “省”(province)
a = re.compile(r'.*省')
b = a.search(x)
if b == None:
distinct.append(None)
else:
distinct.append(b.group(0))
x = re.sub(b.group(0), '', x)
# search "市"(city)
a = re.compile(r'.*市')
b = a.search(x)
if b == None:
block.append(None)
else:
block.append(b.group(0))
# search“级”(level)
a = re.compile(r'.级')
b = a.search(x)
if b == None:
level.append(None)
else:
level.append(b.group(0))
newdict = {
'法院所在省': distinct,
'法院所在市': block,
'法院等级': level
}
# DataFrame
newdata = pd.DataFrame(newdict)
self.data = pd.concat([self.data, newdata], axis=1)
del newdata, level, distinct, block
def number6(self):
'''
分成年月日
:return:
'''
year = []
month = []
day = []
for x in self.data['date']:
# year
a = re.compile(r'.*年')
b = a.search(str(x))
if b == None:
year.append(None)
else:
year.append(b.group(0))
x = re.sub(b.group(0), '', x)
# month
a1 = re.compile(r'.*月')
b1 = a1.search(str(x))
if b1 == None:
month.append(None)
else:
month.append(b1.group(0))
x = re.sub(b1.group(0), '', x)
# day
a2 = re.compile(r'.*日')
b2 = a2.search(str(x))
if b2 == None:
day.append(None)
else:
day.append(b2.group(0))
newdict = {
'判决年份': year,
'判决月份': month,
'判决日期': day
}
# DataFrame
newdata = pd.DataFrame(newdict)
self.data = pd.concat([self.data, newdata], axis=1)
del year, month, day
def number7(self): # 四列 one hot 检察院,法人,自然人,其他
'''
--<NAME>
'''
self.data['原告_是否_检察院'] = 0
self.data['原告_是否_法人'] = 0
self.data['原告_是否_自然人'] = 0
self.data['原告_是否_其他'] = 0
pattern = r'(?::|:|。|、|\s|,|,)\s*'
jcy_pattern = re.compile(r'.*检察院')
gs_pattern = re.compile(r'.*公司')
for i in range(len(self.data['plantiff'])):
if pd.isna(self.data['plantiff'][i]):
continue
self.data['plantiff'][i] = re.sub(' ', '', self.data['plantiff'][i])
result_list = re.split(pattern, self.data['plantiff'][i])
for x in result_list:
temp1 = jcy_pattern.findall(x)
temp2 = gs_pattern.findall(x)
if len(temp1) != 0:
self.data['原告_是否_检察院'][i] = 1
if (0 < len(x) <= 4):
self.data['原告_是否_自然人'][i] = 1
if ((len(temp1) != 0) or len(temp2) != 0):
self.data['原告_是否_法人'][i] = 1
if (len(x) > 4 and len(temp1) == 0 and len(temp2) == 0):
self.data['原告_是否_其他'][i] = 1
def number8(self):
# http://www.sohu.com/a/249531167_656612
company = re.compile(r'.*?公司')
natural_person = np.zeros(self.data_len)
legal_person = np.zeros(self.data_len)
other_person = np.zeros(self.data_len)
for i in range(self.data_len):
# 显示进度
#if i % 100 == 0:
# print(i)
if pd.isna(self.data['defendant'][i]):
continue
if re.search(company, self.data['defendant'][i]) is not None:
legal_person[i] = 1
l = re.split('、', self.data['defendant'][i])
l1 = list(filter(lambda s: len(s) <= 4, l))
l2 = list(filter(lambda s: (re.search(company, s)) is None, l1))
if len(l2) > 0:
natural_person[i] = 1
l3 = list(filter(lambda s: len(s) > 4, l))
l4 = list(filter(lambda s: (re.search(company, s)) is None, l3))
if len(l4) > 0:
other_person[i] = 1
for mes in l4:
words = pseg.cut(mes)
#verbs = []
for word, flag in words:
if flag == 'v':
other_person[i] = 0
break
self.data['被告_是否_自然人'] = natural_person
self.data['被告_是否_法人'] = legal_person
self.data['被告_是否_其他'] = other_person
del natural_person, legal_person, other_person
def number9(self):
'''
--<NAME>'''
self.data['第三人_有无自然人'] = 0
pattern = r'(?::|:|。|、|\s|,|,)\s*'
for i in range(len(self.data['third_party'])):
if pd.isna(self.data['third_party'][i]):
continue
result_list = re.split(pattern, self.data['third_party'][i])
for x in result_list:
if (0 < len(x) <= 4):
self.data['第三人_有无自然人'][i] = 1
break
def number10(self):
information = []
for i in range(self.data_len):
#if i % 100 == 0:
#print(i)
info = {}
if pd.isna(self.data['party'][i]):
information.append({})
continue
information.append(ADBinfo(self.data, i))
self.data['party_one_hot'] = information
del information, info
def number11(self):
types = []
money = []
for x in self.data['procedure']:
#print(x)
if str(x)=='nan' or re.search('[0-9]+元',x)==None:
money.append(0)
else:
money.append(1)
if str(x)=='nan':
types.append('空白')
elif not(re.search('不宜在互联网公布|涉及国家秘密的|未成年人犯罪的',x)==None):
types.append('不公开')
elif not(re.search('以调解方式结案的',x)==None):
types.append('调解结案')
elif not(re.search('一案.*本院.*简易程序.*(因|转为)',x)==None):
types.append('已审理(简易转普通)')
elif not(re.search('一案.*(小额诉讼程序|简易程序).*审理(。$|终结。$|.*到庭参加诉讼|.*到庭应诉|.*参加诉讼)',x)==None):
types.append('已审理(简易)')
elif not(re.search('(一案.*本院.*(审理。$|审理终结。$|公开开庭进行了审理。$|公开开庭进行.?审理.*到庭参加.?诉讼))',x)==None):
types.append('已审理')
#elif not(re.search('一案.*本院.*(受理|立案).*简易程序.*(因|转为)',x)==None):
#types.append('已受理/立案(简易转普通)')
#这种情况出现的太少,暂不单独分类
elif not(re.search('一案.*本院.*(受理|立案).*(小额诉讼程序|简易程序)(。$|.*由.*审判。$)',x)==None):
types.append('已受理/立案(简易)')
elif not(re.search('一案.*本院.*(立案。$|立案受理。$|立案后。$)',x)==None):
types.append('已受理/立案')
elif not(re.search('一案.*(调解.*原告|原告.*调解).*撤',x)==None):
types.append('调解撤诉')
elif (re.search('调解',x)==None) and not(re.search('一案.*原告.*撤',x)==None):
types.append('其他撤诉')
elif not(re.search('一案.*原告.*((未|不).*(受理|诉讼)费|(受理|诉讼)费.*(未|不))',x)==None):
types.append('未交费')
elif not(re.search('一案.*本院.*依法追加.*被告',x)==None):
types.append('追加被告')
elif not(re.search('上诉人.*不服.*上诉。$',x)==None):
types.append('上诉')
elif not(re.search('再审.*一案.*不服.*再审。$',x)==None):
types.append('要求再审')
elif not(re.search('一案.*申请财产保全.*符合法律规定。$',x)==None):
types.append('同意诉前财产保全')
elif not(re.search('申请.*(请求|要求).*(查封|冻结|扣押|保全措施)',x)==None):
types.append('申请财产保全')
elif not(re.search('一案.*(缺席|拒不到庭|未到庭)',x)==None):
types.append('缺席审判')
elif not(re.search('一案.*申请.*解除(查封|冻结|扣押|保全措施).*符合法律规定。$',x)==None):
types.append('同意解除冻结')
else:
types.append('其他/错误')
#newdict={'庭审程序分类':types,'money':money}
newdict={'庭审程序分类':types}
newdata = pd.DataFrame(newdict)
self.data = pd.concat([self.data, newdata], axis=1)
del types
def number12(self):
#if cancel
repeal_pattern = re.compile(r'撤诉')
yes = np.zeros(self.data_len)
no = np.zeros(self.data_len)
dk = np.zeros(self.data_len)
al = np.zeros(self.data_len)
for i in range(self.data_len):
if not pd.isna(self.data['process'][i]):
temp = repeal_pattern.findall(str(self.data['process'][i]))
if len(temp) == 0:
no[i] += 1
al[i] = 0
else:
yes[i] += 1
al[i] = 1
else:
dk[i] += 1
al[i] = -1
self.data['庭审过程_是否撤诉_是'] = yes
self.data['庭审过程_是否撤诉_未知'] = dk
self.data['庭审过程_是否撤诉_否'] = no
self.data['庭审过程_是否撤诉_汇总'] = al
del yes, no, dk, al
#if hurt
situation_pattern = re.compile(r'受伤|死亡|伤残|残疾|致残')
yes = np.zeros(self.data_len)
no = np.zeros(self.data_len)
dk = np.zeros(self.data_len)
al = np.zeros(self.data_len)
for i in range(self.data_len):
if not pd.isna(self.data['process'][i]):
temp = situation_pattern.findall(str(self.data['process'][i]))
if len(temp) == 0:
no[i] += 1
al[i] = 0
else:
yes[i] += 1
al[i] = 1
else:
dk[i] += 1
al[i] = -1
self.data['庭审过程_是否受伤_是'] = yes
self.data['庭审过程_是否受伤_否'] = no
self.data['庭审过程_是否受伤_未知'] = dk
self.data['庭审过程_是否受伤_汇总'] = al
del yes, no, dk, al
#if money
money_pattern = re.compile(r'[0-9]+元|[0-9]+万元|[0-9]+万+[0-9]+千元|[0-9]+千+[0-9]+百元'
r'[0-9]+万+[0-9]+千+[0-9]+百元|[0-9]+,+[0-9]+元|[0-9]+,+[0-9]+,+[0-9]+元')
'''
包含xxx元 xxx万元 xxx万xxx千元 xxx万xxx千xxx百元 xxx千xxx百元 xxx,xxx元 xxx,xxx,xxx元
'''
yes = np.zeros(self.data_len)
no = np.zeros(self.data_len)
dk = np.zeros(self.data_len)
al = np.zeros(self.data_len)
for i in range(self.data_len):
if not pd.isna(self.data['process'][i]):
temp = money_pattern.findall(str(self.data['process'][i]))
if len(temp) == 0:
no[i] += 1
al[i] = 0
else:
yes[i] += 1
al[i] = 1
else:
dk[i] += 1
al[i] = -1
self.data['庭审过程_是否涉及金钱_是'] = yes
self.data['庭审过程_是否涉及金钱_否'] = no
self.data['庭审过程_是否涉及金钱_未知'] = dk
self.data['庭审过程_是否涉及金钱_汇总'] = al
del yes, no, dk, al
#if on purpose
intent_pattern = re.compile(r'有意|故意')
yes = np.zeros(self.data_len)
no = np.zeros(self.data_len)
dk = np.zeros(self.data_len)
al = np.zeros(self.data_len)
for i in range(self.data_len):
if not pd.isna(self.data['process'][i]):
temp = intent_pattern.findall(str(self.data['process'][i]))
if len(temp) == 0:
no[i] += 1
al[i] = 0
else:
yes[i] += 1
al[i] = 1
else:
dk[i] += 1
al[i] = -1
self.data['庭审过程_是否故意_是'] = yes
self.data['庭审过程_是否故意_否'] = no
self.data['庭审过程_是否故意_未知'] = dk
self.data['庭审过程_是否故意_汇总'] = al
del yes, no, dk, al
#if moral reparation
mental_pattern = re.compile(r'精神损失|精神赔偿|精神抚慰')
yes = np.zeros(self.data_len)
no = np.zeros(self.data_len)
dk = np.zeros(self.data_len)
al = np.zeros(self.data_len)
for i in range(self.data_len):
if not pd.isna(self.data['process'][i]):
temp = mental_pattern.findall(str(self.data['process'][i]))
if len(temp) == 0:
no[i] += 1
al[i] = 0
else:
yes[i] += 1
al[i] = 1
else:
dk[i] += 1
al[i] = -1
self.data['庭审过程_是否要求精神赔偿_是'] = yes
self.data['庭审过程_是否要求精神赔偿_否'] = no
self.data['庭审过程_是否要求精神赔偿_未知'] = dk
self.data['庭审过程_是否要求精神赔偿_汇总'] = al
del yes, no, dk, al
#if rejection
absent_pattern = re.compile(r'拒不到庭')
yes = np.zeros(self.data_len)
no = np.zeros(self.data_len)
dk = np.zeros(self.data_len)
al = np.zeros(self.data_len)
for i in range(self.data_len):
if not pd.isna(self.data['process'][i]):
temp = absent_pattern.findall(str(self.data['process'][i]))
if len(temp) == 0:
no[i] += 1
al[i] = 0
else:
yes[i] += 1
al[i] = 1
else:
dk[i] += 1
al[i] = -1
self.data['庭审过程_是否拒不到庭_是'] = yes
self.data['庭审过程_是否拒不到庭_否'] = no
self.data['庭审过程_是否拒不到庭_未知'] = dk
self.data['庭审过程_是否拒不到庭_汇总'] = al
del yes, no, dk, al
#if argument
objection_pattern = re.compile(r'有异议|重新鉴定|判决异议|')
yes = np.zeros(self.data_len)
no = np.zeros(self.data_len)
dk = np.zeros(self.data_len)
al = np.zeros(self.data_len)
for i in range(self.data_len):
if not pd.isna(self.data['process'][i]):
temp = objection_pattern.findall(str(self.data['process'][i]))
if len(temp) == 0:
no[i] += 1
al[i] = 0
else:
yes[i] += 1
al[i] = 1
else:
dk[i] += 1
al[i] = -1
self.data['庭审过程_是否有异议_是'] = yes
self.data['庭审过程_是否有异议_否'] = no
self.data['庭审过程_是否有异议_未知'] = dk
self.data['庭审过程_是否有异议_汇总'] = al
del yes, no, dk, al
#length
length = np.zeros(self.data_len)
for i in range(self.data_len):
if type(self.data['process'][i]) == str:
length[i] = len(self.data['process'][i])
else:
length[i] = 0
self.data['庭审过程_长度'] = length
del length
def number13(self):
'''“法院意见”(court comments)
-money
-number of law
-number of pieces
-number of clause
--<NAME>'''
self.data['法院意见_是否涉及金额'] = 0
self.data['法院意见_涉及的法数'] = 0
self.data['法院意见_涉及的条数'] = 0
self.data['法院意见_涉及的款数'] = 0
money_pattern = re.compile(r'[0-9]+元')
for i in range(len(self.data['opinion'])):
if not pd.isna(self.data['opinion'][i]):
try:
temp = find_law_tiao_kuan_in_text(self.data['opinion'][i])
except:
print('法院意见无法处理的案件案号:'+self.data['id'][i])
else:
if len(temp) > 0:
self.data['法院意见_涉及的法数'][i] = len(temp)
sum_tiao = 0
sum_kuan = 0
for j in range(len(temp)):
sum_tiao += len(temp[j][1])
sum_kuan += len(temp[j][2])
self.data['法院意见_涉及的条数'][i] = sum_tiao
self.data['法院意见_涉及的款数'][i] = sum_kuan
# money
for i in range(len(self.data['opinion'])):
if not pd.isna(self.data['opinion'][i]):
temp1 = money_pattern.findall(self.data['opinion'][i])
if len(temp1) == 0:
continue
self.data['法院意见_是否涉及金额'][i] = 1
def number14(self):
selected_data = self.data["result"]
data_len = len(selected_data)
basis = [] # clause
result = ["N/A"] * data_len
charge = ["N/A"] * data_len
sentence = ["N/A"] * data_len
for i in range(data_len):
if pd.isnull(selected_data.iloc[i]):
basis.append([])
continue
basis.append(find_law_tiao_kuan_in_text(selected_data.iloc[i]))
# result
for i in range(selected_data.shape[0]):
if type(selected_data[i]) is not float:
for j in range(len(selected_data[i])):
if ("判决" in selected_data[i][j - 4:j + 4] or "裁定" in selected_data[i][j - 4:j + 4]) and (
"法院" not in selected_data[i][j - 10:j + 4]):
if selected_data[i][j] == ':':
if selected_data[i][j + 1] == '、':
result[i] = selected_data[i][j + 2:-1]
else:
result[i] = selected_data[i][j + 1:-1]
else:
result[i] = "N/A"
for i in range(selected_data.shape[0]):
if type(selected_data[i]) is not float:
for j in range(len(selected_data[i])):
if "费" in selected_data[i][j + 1:j + 10]:
if selected_data[i][j - 1] == '、':
if selected_data[i][j] == '。':
charge[i] = selected_data[i][j + 1:-1]
else:
charge[i] = selected_data[i][j:-1]
else:
charge[i] = "N/A"
for i in range(selected_data.shape[0]):
if type(result[i]) is not float:
for j in range(len(result[i])):
if result[i][j - 1] == '、':
if result[i][j] == '。':
sentence[i] = result[i][0:j - 2]
else:
sentence[i] = result[i][0:j - 1]
else:
sentence[i] = "N/A"
newdict = {
'判决法条': basis,
'赔偿结果': charge
}
# DataFrame
newdata = pd.DataFrame(newdict)
self.data = pd.concat([self.data, newdata], axis=1)
del newdata, newdict, basis, result, charge, sentence
def number15(self):
'''
庭后告知 -- <NAME>
'''
final1 = [] # final instance
final2 = [] # final order
for x in self.data['notice']:
if type(x) == type(np.nan):
final1.append(0)
final2.append(0)
else:
a = re.compile(r'.*为终审判决')
b = a.search(x)
if b == None:
final1.append(0)
else:
final1.append(1)
a = re.compile(r'.*为终审裁定')
b = a.search(x)
if b == None:
final2.append(0)
else:
final2.append(1)
#print(len(final1))
#print(len(final2))
newdict = {
'是否为终审判决': final1,
'是否为终审裁定': final2
}
# DataFrame
newdata = pd.DataFrame(newdict)
self.data = pd.concat([self.data, newdata], axis=1)
del newdata, final1, final2, newdict
def number16(self):
'''
Appendix
'''
pass
def preprocess(self):
self.number2()
print("#2 finished")
self.number3()
print("#3 finished")
self.number4()
print("#4 finished")
self.number5()
print("#5 finished")
self.number6()
print("#6 finished")
self.number7()
print("#7 finished")
self.number8()
print("#8 finished")
self.number9()
print("#9 finished")
self.number10()
print("#10 finished")
self.number11()
print("#11 finished")
self.number12()
print("#12 finished")
self.number13()
print("#13 finished")
self.number14()
print("#14 finished")
self.number15()
print("#15 finished")
self.number16()
print("#16 finished")
def store(self):
self.data.to_csv("./.cache/" + str(datetime.time()))
class law_reader:
def __init__(self, user, password):
print("Connecting to Server...")
self.cnx = mysql.connector.connect(user=user, password=password,
host="cdb-74dx1ytr.gz.tencentcdb.com",
port=10008,
database='law_article')
self.cursor = self.cnx.cursor(buffered=True)
print("Server Connected.")
def return_full_law(self, law_name):
'''
:param law_name: string
:return: pd.Dataframe
'''
law_name = law_name
# read database
query = 'SELECT * FROM ' + law_name + ';'
print("Start Reading Law...")
law_article = pd.read_sql(query, con=self.cnx)
return law_article
def query(self, law_name:str, tiao:int):
'''
:param law_name: string name of law
:param tiao: int id of clause
:return: law dict [index,tag1,tag2,tag3,tag4,tag5,article]
'''
assert type(tiao) == int
query = 'SELECT * FROM ' + law_name + ' WHERE '+law_name+'.index = '+str(tiao)+';'
print("Start Querying")
law_article = pd.read_sql(query, con=self.cnx)
law_article = law_article.iloc[0].to_dict()
return law_article
| [
"re.split",
"pandas.isnull",
"datetime.time",
"pandas.DataFrame",
"re.compile",
"jieba.posseg.cut",
"numpy.zeros",
"pandas.read_sql",
"re.sub",
"pandas.isna",
"pandas.concat",
"re.search"
] | [((1230, 1257), 'pandas.read_sql', 'pd.read_sql', (['query'], {'con': 'cnx'}), '(query, con=cnx)\n', (1241, 1257), True, 'import pandas as pd\n'), ((1753, 1776), 'numpy.zeros', 'np.zeros', (['self.data_len'], {}), '(self.data_len)\n', (1761, 1776), True, 'import numpy as np\n'), ((1794, 1817), 'numpy.zeros', 'np.zeros', (['self.data_len'], {}), '(self.data_len)\n', (1802, 1817), True, 'import numpy as np\n'), ((1835, 1858), 'numpy.zeros', 'np.zeros', (['self.data_len'], {}), '(self.data_len)\n', (1843, 1858), True, 'import numpy as np\n'), ((1876, 1899), 'numpy.zeros', 'np.zeros', (['self.data_len'], {}), '(self.data_len)\n', (1884, 1899), True, 'import numpy as np\n'), ((1915, 1938), 'numpy.zeros', 'np.zeros', (['self.data_len'], {}), '(self.data_len)\n', (1923, 1938), True, 'import numpy as np\n'), ((3524, 3547), 'numpy.zeros', 'np.zeros', (['self.data_len'], {}), '(self.data_len)\n', (3532, 3547), True, 'import numpy as np\n'), ((3919, 3942), 'numpy.zeros', 'np.zeros', (['self.data_len'], {}), '(self.data_len)\n', (3927, 3942), True, 'import numpy as np\n'), ((3964, 3987), 'numpy.zeros', 'np.zeros', (['self.data_len'], {}), '(self.data_len)\n', (3972, 3987), True, 'import numpy as np\n'), ((5671, 5692), 'pandas.DataFrame', 'pd.DataFrame', (['newdict'], {}), '(newdict)\n', (5683, 5692), True, 'import pandas as pd\n'), ((5713, 5752), 'pandas.concat', 'pd.concat', (['[self.data, newdata]'], {'axis': '(1)'}), '([self.data, newdata], axis=1)\n', (5722, 5752), True, 'import pandas as pd\n'), ((6835, 6856), 'pandas.DataFrame', 'pd.DataFrame', (['newdict'], {}), '(newdict)\n', (6847, 6856), True, 'import pandas as pd\n'), ((6877, 6916), 'pandas.concat', 'pd.concat', (['[self.data, newdata]'], {'axis': '(1)'}), '([self.data, newdata], axis=1)\n', (6886, 6916), True, 'import pandas as pd\n'), ((7248, 7267), 're.compile', 're.compile', (['""".*检察院"""'], {}), "('.*检察院')\n", (7258, 7267), False, 'import re\n'), ((7290, 7308), 're.compile', 're.compile', (['""".*公司"""'], {}), "('.*公司')\n", (7300, 7308), False, 'import re\n'), ((8215, 8234), 're.compile', 're.compile', (['""".*?公司"""'], {}), "('.*?公司')\n", (8225, 8234), False, 'import re\n'), ((8261, 8284), 'numpy.zeros', 'np.zeros', (['self.data_len'], {}), '(self.data_len)\n', (8269, 8284), True, 'import numpy as np\n'), ((8308, 8331), 'numpy.zeros', 'np.zeros', (['self.data_len'], {}), '(self.data_len)\n', (8316, 8331), True, 'import numpy as np\n'), ((8355, 8378), 'numpy.zeros', 'np.zeros', (['self.data_len'], {}), '(self.data_len)\n', (8363, 8378), True, 'import numpy as np\n'), ((13048, 13069), 'pandas.DataFrame', 'pd.DataFrame', (['newdict'], {}), '(newdict)\n', (13060, 13069), True, 'import pandas as pd\n'), ((13090, 13129), 'pandas.concat', 'pd.concat', (['[self.data, newdata]'], {'axis': '(1)'}), '([self.data, newdata], axis=1)\n', (13099, 13129), True, 'import pandas as pd\n'), ((13218, 13234), 're.compile', 're.compile', (['"""撤诉"""'], {}), "('撤诉')\n", (13228, 13234), False, 'import re\n'), ((13251, 13274), 'numpy.zeros', 'np.zeros', (['self.data_len'], {}), '(self.data_len)\n', (13259, 13274), True, 'import numpy as np\n'), ((13288, 13311), 'numpy.zeros', 'np.zeros', (['self.data_len'], {}), '(self.data_len)\n', (13296, 13311), True, 'import numpy as np\n'), ((13325, 13348), 'numpy.zeros', 'np.zeros', (['self.data_len'], {}), '(self.data_len)\n', (13333, 13348), True, 'import numpy as np\n'), ((13362, 13385), 'numpy.zeros', 'np.zeros', (['self.data_len'], {}), '(self.data_len)\n', (13370, 13385), True, 'import numpy as np\n'), ((14046, 14074), 're.compile', 're.compile', (['"""受伤|死亡|伤残|残疾|致残"""'], {}), "('受伤|死亡|伤残|残疾|致残')\n", (14056, 14074), False, 'import re\n'), ((14091, 14114), 'numpy.zeros', 'np.zeros', (['self.data_len'], {}), '(self.data_len)\n', (14099, 14114), True, 'import numpy as np\n'), ((14128, 14151), 'numpy.zeros', 'np.zeros', (['self.data_len'], {}), '(self.data_len)\n', (14136, 14151), True, 'import numpy as np\n'), ((14165, 14188), 'numpy.zeros', 'np.zeros', (['self.data_len'], {}), '(self.data_len)\n', (14173, 14188), True, 'import numpy as np\n'), ((14202, 14225), 'numpy.zeros', 'np.zeros', (['self.data_len'], {}), '(self.data_len)\n', (14210, 14225), True, 'import numpy as np\n'), ((14888, 15026), 're.compile', 're.compile', (['"""[0-9]+元|[0-9]+万元|[0-9]+万+[0-9]+千元|[0-9]+千+[0-9]+百元[0-9]+万+[0-9]+千+[0-9]+百元|[0-9]+,+[0-9]+元|[0-9]+,+[0-9]+,+[0-9]+元"""'], {}), "(\n '[0-9]+元|[0-9]+万元|[0-9]+万+[0-9]+千元|[0-9]+千+[0-9]+百元[0-9]+万+[0-9]+千+[0-9]+百元|[0-9]+,+[0-9]+元|[0-9]+,+[0-9]+,+[0-9]+元'\n )\n", (14898, 15026), False, 'import re\n'), ((15174, 15197), 'numpy.zeros', 'np.zeros', (['self.data_len'], {}), '(self.data_len)\n', (15182, 15197), True, 'import numpy as np\n'), ((15211, 15234), 'numpy.zeros', 'np.zeros', (['self.data_len'], {}), '(self.data_len)\n', (15219, 15234), True, 'import numpy as np\n'), ((15248, 15271), 'numpy.zeros', 'np.zeros', (['self.data_len'], {}), '(self.data_len)\n', (15256, 15271), True, 'import numpy as np\n'), ((15285, 15308), 'numpy.zeros', 'np.zeros', (['self.data_len'], {}), '(self.data_len)\n', (15293, 15308), True, 'import numpy as np\n'), ((15980, 15999), 're.compile', 're.compile', (['"""有意|故意"""'], {}), "('有意|故意')\n", (15990, 15999), False, 'import re\n'), ((16016, 16039), 'numpy.zeros', 'np.zeros', (['self.data_len'], {}), '(self.data_len)\n', (16024, 16039), True, 'import numpy as np\n'), ((16053, 16076), 'numpy.zeros', 'np.zeros', (['self.data_len'], {}), '(self.data_len)\n', (16061, 16076), True, 'import numpy as np\n'), ((16090, 16113), 'numpy.zeros', 'np.zeros', (['self.data_len'], {}), '(self.data_len)\n', (16098, 16113), True, 'import numpy as np\n'), ((16127, 16150), 'numpy.zeros', 'np.zeros', (['self.data_len'], {}), '(self.data_len)\n', (16135, 16150), True, 'import numpy as np\n'), ((16822, 16850), 're.compile', 're.compile', (['"""精神损失|精神赔偿|精神抚慰"""'], {}), "('精神损失|精神赔偿|精神抚慰')\n", (16832, 16850), False, 'import re\n'), ((16867, 16890), 'numpy.zeros', 'np.zeros', (['self.data_len'], {}), '(self.data_len)\n', (16875, 16890), True, 'import numpy as np\n'), ((16904, 16927), 'numpy.zeros', 'np.zeros', (['self.data_len'], {}), '(self.data_len)\n', (16912, 16927), True, 'import numpy as np\n'), ((16941, 16964), 'numpy.zeros', 'np.zeros', (['self.data_len'], {}), '(self.data_len)\n', (16949, 16964), True, 'import numpy as np\n'), ((16978, 17001), 'numpy.zeros', 'np.zeros', (['self.data_len'], {}), '(self.data_len)\n', (16986, 17001), True, 'import numpy as np\n'), ((17682, 17700), 're.compile', 're.compile', (['"""拒不到庭"""'], {}), "('拒不到庭')\n", (17692, 17700), False, 'import re\n'), ((17717, 17740), 'numpy.zeros', 'np.zeros', (['self.data_len'], {}), '(self.data_len)\n', (17725, 17740), True, 'import numpy as np\n'), ((17754, 17777), 'numpy.zeros', 'np.zeros', (['self.data_len'], {}), '(self.data_len)\n', (17762, 17777), True, 'import numpy as np\n'), ((17791, 17814), 'numpy.zeros', 'np.zeros', (['self.data_len'], {}), '(self.data_len)\n', (17799, 17814), True, 'import numpy as np\n'), ((17828, 17851), 'numpy.zeros', 'np.zeros', (['self.data_len'], {}), '(self.data_len)\n', (17836, 17851), True, 'import numpy as np\n'), ((18524, 18552), 're.compile', 're.compile', (['"""有异议|重新鉴定|判决异议|"""'], {}), "('有异议|重新鉴定|判决异议|')\n", (18534, 18552), False, 'import re\n'), ((18569, 18592), 'numpy.zeros', 'np.zeros', (['self.data_len'], {}), '(self.data_len)\n', (18577, 18592), True, 'import numpy as np\n'), ((18606, 18629), 'numpy.zeros', 'np.zeros', (['self.data_len'], {}), '(self.data_len)\n', (18614, 18629), True, 'import numpy as np\n'), ((18643, 18666), 'numpy.zeros', 'np.zeros', (['self.data_len'], {}), '(self.data_len)\n', (18651, 18666), True, 'import numpy as np\n'), ((18680, 18703), 'numpy.zeros', 'np.zeros', (['self.data_len'], {}), '(self.data_len)\n', (18688, 18703), True, 'import numpy as np\n'), ((19358, 19381), 'numpy.zeros', 'np.zeros', (['self.data_len'], {}), '(self.data_len)\n', (19366, 19381), True, 'import numpy as np\n'), ((19989, 20010), 're.compile', 're.compile', (['"""[0-9]+元"""'], {}), "('[0-9]+元')\n", (19999, 20010), False, 'import re\n'), ((23420, 23441), 'pandas.DataFrame', 'pd.DataFrame', (['newdict'], {}), '(newdict)\n', (23432, 23441), True, 'import pandas as pd\n'), ((23462, 23501), 'pandas.concat', 'pd.concat', (['[self.data, newdata]'], {'axis': '(1)'}), '([self.data, newdata], axis=1)\n', (23471, 23501), True, 'import pandas as pd\n'), ((24464, 24485), 'pandas.DataFrame', 'pd.DataFrame', (['newdict'], {}), '(newdict)\n', (24476, 24485), True, 'import pandas as pd\n'), ((24506, 24545), 'pandas.concat', 'pd.concat', (['[self.data, newdata]'], {'axis': '(1)'}), '([self.data, newdata], axis=1)\n', (24515, 24545), True, 'import pandas as pd\n'), ((26351, 26383), 'pandas.read_sql', 'pd.read_sql', (['query'], {'con': 'self.cnx'}), '(query, con=self.cnx)\n', (26362, 26383), True, 'import pandas as pd\n'), ((26839, 26871), 'pandas.read_sql', 'pd.read_sql', (['query'], {'con': 'self.cnx'}), '(query, con=self.cnx)\n', (26850, 26871), True, 'import pandas as pd\n'), ((4582, 4592), 'pandas.isna', 'pd.isna', (['x'], {}), '(x)\n', (4589, 4592), True, 'import pandas as pd\n'), ((6003, 6020), 're.compile', 're.compile', (['""".*年"""'], {}), "('.*年')\n", (6013, 6020), False, 'import re\n'), ((6257, 6274), 're.compile', 're.compile', (['""".*月"""'], {}), "('.*月')\n", (6267, 6274), False, 'import re\n'), ((6516, 6533), 're.compile', 're.compile', (['""".*日"""'], {}), "('.*日')\n", (6526, 6533), False, 'import re\n'), ((7382, 7415), 'pandas.isna', 'pd.isna', (["self.data['plantiff'][i]"], {}), "(self.data['plantiff'][i])\n", (7389, 7415), True, 'import pandas as pd\n'), ((7481, 7522), 're.sub', 're.sub', (['""" """', '""""""', "self.data['plantiff'][i]"], {}), "(' ', '', self.data['plantiff'][i])\n", (7487, 7522), False, 'import re\n'), ((7549, 7592), 're.split', 're.split', (['pattern', "self.data['plantiff'][i]"], {}), "(pattern, self.data['plantiff'][i])\n", (7557, 7592), False, 'import re\n'), ((8509, 8543), 'pandas.isna', 'pd.isna', (["self.data['defendant'][i]"], {}), "(self.data['defendant'][i])\n", (8516, 8543), True, 'import pandas as pd\n'), ((8698, 8738), 're.split', 're.split', (['"""、"""', "self.data['defendant'][i]"], {}), "('、', self.data['defendant'][i])\n", (8706, 8738), False, 'import re\n'), ((9808, 9844), 'pandas.isna', 'pd.isna', (["self.data['third_party'][i]"], {}), "(self.data['third_party'][i])\n", (9815, 9844), True, 'import pandas as pd\n'), ((9898, 9944), 're.split', 're.split', (['pattern', "self.data['third_party'][i]"], {}), "(pattern, self.data['third_party'][i])\n", (9906, 9944), False, 'import re\n'), ((10277, 10307), 'pandas.isna', 'pd.isna', (["self.data['party'][i]"], {}), "(self.data['party'][i])\n", (10284, 10307), True, 'import pandas as pd\n'), ((21421, 21453), 'pandas.isnull', 'pd.isnull', (['selected_data.iloc[i]'], {}), '(selected_data.iloc[i])\n', (21430, 21453), True, 'import pandas as pd\n'), ((4788, 4805), 're.compile', 're.compile', (['""".*省"""'], {}), "('.*省')\n", (4798, 4805), False, 'import re\n'), ((5087, 5104), 're.compile', 're.compile', (['""".*市"""'], {}), "('.*市')\n", (5097, 5104), False, 'import re\n'), ((5330, 5346), 're.compile', 're.compile', (['""".级"""'], {}), "('.级')\n", (5340, 5346), False, 'import re\n'), ((8586, 8631), 're.search', 're.search', (['company', "self.data['defendant'][i]"], {}), "(company, self.data['defendant'][i])\n", (8595, 8631), False, 'import re\n'), ((13446, 13478), 'pandas.isna', 'pd.isna', (["self.data['process'][i]"], {}), "(self.data['process'][i])\n", (13453, 13478), True, 'import pandas as pd\n'), ((14287, 14319), 'pandas.isna', 'pd.isna', (["self.data['process'][i]"], {}), "(self.data['process'][i])\n", (14294, 14319), True, 'import pandas as pd\n'), ((15370, 15402), 'pandas.isna', 'pd.isna', (["self.data['process'][i]"], {}), "(self.data['process'][i])\n", (15377, 15402), True, 'import pandas as pd\n'), ((16212, 16244), 'pandas.isna', 'pd.isna', (["self.data['process'][i]"], {}), "(self.data['process'][i])\n", (16219, 16244), True, 'import pandas as pd\n'), ((17063, 17095), 'pandas.isna', 'pd.isna', (["self.data['process'][i]"], {}), "(self.data['process'][i])\n", (17070, 17095), True, 'import pandas as pd\n'), ((17913, 17945), 'pandas.isna', 'pd.isna', (["self.data['process'][i]"], {}), "(self.data['process'][i])\n", (17920, 17945), True, 'import pandas as pd\n'), ((18765, 18797), 'pandas.isna', 'pd.isna', (["self.data['process'][i]"], {}), "(self.data['process'][i])\n", (18772, 18797), True, 'import pandas as pd\n'), ((20090, 20122), 'pandas.isna', 'pd.isna', (["self.data['opinion'][i]"], {}), "(self.data['opinion'][i])\n", (20097, 20122), True, 'import pandas as pd\n'), ((20906, 20938), 'pandas.isna', 'pd.isna', (["self.data['opinion'][i]"], {}), "(self.data['opinion'][i])\n", (20913, 20938), True, 'import pandas as pd\n'), ((23894, 23915), 're.compile', 're.compile', (['""".*为终审判决"""'], {}), "('.*为终审判决')\n", (23904, 23915), False, 'import re\n'), ((24096, 24117), 're.compile', 're.compile', (['""".*为终审裁定"""'], {}), "('.*为终审裁定')\n", (24106, 24117), False, 'import re\n'), ((9194, 9207), 'jieba.posseg.cut', 'pseg.cut', (['mes'], {}), '(mes)\n', (9202, 9207), True, 'import jieba.posseg as pseg\n'), ((10666, 10689), 're.search', 're.search', (['"""[0-9]+元"""', 'x'], {}), "('[0-9]+元', x)\n", (10675, 10689), False, 'import re\n'), ((25557, 25572), 'datetime.time', 'datetime.time', ([], {}), '()\n', (25570, 25572), False, 'import datetime\n'), ((10864, 10904), 're.search', 're.search', (['"""不宜在互联网公布|涉及国家秘密的|未成年人犯罪的"""', 'x'], {}), "('不宜在互联网公布|涉及国家秘密的|未成年人犯罪的', x)\n", (10873, 10904), False, 'import re\n'), ((8835, 8856), 're.search', 're.search', (['company', 's'], {}), '(company, s)\n', (8844, 8856), False, 'import re\n'), ((9034, 9055), 're.search', 're.search', (['company', 's'], {}), '(company, s)\n', (9043, 9055), False, 'import re\n'), ((10969, 10993), 're.search', 're.search', (['"""以调解方式结案的"""', 'x'], {}), "('以调解方式结案的', x)\n", (10978, 10993), False, 'import re\n'), ((11059, 11095), 're.search', 're.search', (['"""一案.*本院.*简易程序.*(因|转为)"""', 'x'], {}), "('一案.*本院.*简易程序.*(因|转为)', x)\n", (11068, 11095), False, 'import re\n'), ((11167, 11236), 're.search', 're.search', (['"""一案.*(小额诉讼程序|简易程序).*审理(。$|终结。$|.*到庭参加诉讼|.*到庭应诉|.*参加诉讼)"""', 'x'], {}), "('一案.*(小额诉讼程序|简易程序).*审理(。$|终结。$|.*到庭参加诉讼|.*到庭应诉|.*参加诉讼)', x)\n", (11176, 11236), False, 'import re\n'), ((11305, 11377), 're.search', 're.search', (['"""(一案.*本院.*(审理。$|审理终结。$|公开开庭进行了审理。$|公开开庭进行.?审理.*到庭参加.?诉讼))"""', 'x'], {}), "('(一案.*本院.*(审理。$|审理终结。$|公开开庭进行了审理。$|公开开庭进行.?审理.*到庭参加.?诉讼))', x)\n", (11314, 11377), False, 'import re\n'), ((11598, 11658), 're.search', 're.search', (['"""一案.*本院.*(受理|立案).*(小额诉讼程序|简易程序)(。$|.*由.*审判。$)"""', 'x'], {}), "('一案.*本院.*(受理|立案).*(小额诉讼程序|简易程序)(。$|.*由.*审判。$)', x)\n", (11607, 11658), False, 'import re\n'), ((11730, 11773), 're.search', 're.search', (['"""一案.*本院.*(立案。$|立案受理。$|立案后。$)"""', 'x'], {}), "('一案.*本院.*(立案。$|立案受理。$|立案后。$)', x)\n", (11739, 11773), False, 'import re\n'), ((11841, 11879), 're.search', 're.search', (['"""一案.*(调解.*原告|原告.*调解).*撤"""', 'x'], {}), "('一案.*(调解.*原告|原告.*调解).*撤', x)\n", (11850, 11879), False, 'import re\n'), ((11942, 11960), 're.search', 're.search', (['"""调解"""', 'x'], {}), "('调解', x)\n", (11951, 11960), False, 'import re\n'), ((11979, 12004), 're.search', 're.search', (['"""一案.*原告.*撤"""', 'x'], {}), "('一案.*原告.*撤', x)\n", (11988, 12004), False, 'import re\n'), ((12066, 12123), 're.search', 're.search', (['"""一案.*原告.*((未|不).*(受理|诉讼)费|(受理|诉讼)费.*(未|不))"""', 'x'], {}), "('一案.*原告.*((未|不).*(受理|诉讼)费|(受理|诉讼)费.*(未|不))', x)\n", (12075, 12123), False, 'import re\n'), ((12188, 12220), 're.search', 're.search', (['"""一案.*本院.*依法追加.*被告"""', 'x'], {}), "('一案.*本院.*依法追加.*被告', x)\n", (12197, 12220), False, 'import re\n'), ((12286, 12315), 're.search', 're.search', (['"""上诉人.*不服.*上诉。$"""', 'x'], {}), "('上诉人.*不服.*上诉。$', x)\n", (12295, 12315), False, 'import re\n'), ((12379, 12411), 're.search', 're.search', (['"""再审.*一案.*不服.*再审。$"""', 'x'], {}), "('再审.*一案.*不服.*再审。$', x)\n", (12388, 12411), False, 'import re\n'), ((12477, 12513), 're.search', 're.search', (['"""一案.*申请财产保全.*符合法律规定。$"""', 'x'], {}), "('一案.*申请财产保全.*符合法律规定。$', x)\n", (12486, 12513), False, 'import re\n'), ((12583, 12627), 're.search', 're.search', (['"""申请.*(请求|要求).*(查封|冻结|扣押|保全措施)"""', 'x'], {}), "('申请.*(请求|要求).*(查封|冻结|扣押|保全措施)', x)\n", (12592, 12627), False, 'import re\n'), ((12695, 12728), 're.search', 're.search', (['"""一案.*(缺席|拒不到庭|未到庭)"""', 'x'], {}), "('一案.*(缺席|拒不到庭|未到庭)', x)\n", (12704, 12728), False, 'import re\n'), ((12794, 12845), 're.search', 're.search', (['"""一案.*申请.*解除(查封|冻结|扣押|保全措施).*符合法律规定。$"""', 'x'], {}), "('一案.*申请.*解除(查封|冻结|扣押|保全措施).*符合法律规定。$', x)\n", (12803, 12845), False, 'import re\n')] |
## TODO: test case should cover, n_class from 3 to 256, test ignore index, test speed and memory usage
import random
import numpy as np
import torch
import torch.nn as nn
import torchvision
from label_smooth import LabelSmoothSoftmaxCEV3
torch.manual_seed(15)
random.seed(15)
np.random.seed(15)
torch.backends.cudnn.deterministic = True
class Model(nn.Module):
def __init__(self, n_classes):
super(Model, self).__init__()
net = torchvision.models.resnet18(pretrained=False)
self.conv1 = net.conv1
self.bn1 = net.bn1
self.maxpool = net.maxpool
self.relu = net.relu
self.layer1 = net.layer1
self.layer2 = net.layer2
self.layer3 = net.layer3
self.layer4 = net.layer4
self.fc = nn.Conv2d(512, n_classes, 3, 1, 1)
def forward(self, x):
feat = self.conv1(x)
feat = self.bn1(feat)
feat = self.relu(feat)
feat = self.maxpool(feat)
feat = self.layer1(feat)
feat = self.layer2(feat)
feat = self.layer3(feat)
feat = self.layer4(feat)
feat = self.fc(feat)
# out = F.interpolate(feat, x.size()[2:], mode='bilinear', align_corners=True)
out = torch.mean(feat, dim=(2, 3))
return out
c = 2
net1 = Model(c)
# net2 = Model()
# net2.load_state_dict(net1.state_dict())
red = 'mean'
# criteria1 = LovaszSoftmaxV1(reduction='sum', ignore_index=255)
# criteria1 = LovaszSoftmaxV3(reduction='sum', ignore_index=255)
criteria1 = LabelSmoothSoftmaxCEV3(reduction='sum', ignore_index=255)
print(criteria1)
net1.cuda()
# net2.cuda()
net1.train()
# net2.train()
criteria1.cuda()
# criteria2.cuda()
# net1 = net1.half()
optim1 = torch.optim.SGD(net1.parameters(), lr=1e-2)
# optim2 = torch.optim.SGD(net2.parameters(), lr=1e-2)
bs, h, w = 2, 1000, 1000
for it in range(1000):
inten = torch.randn(bs, 3, h, w).cuda()#.half()
# lbs = torch.randint(0, c, (bs, h, w)).cuda()
lbs = torch.randint(0, c, (bs, )).cuda()
# lbs[1, 1, 1] = 255
# lbs[0, 3:100, 2:100] = 255
# lbs[1, 4:70, 28:200] = 255
logits1 = net1(inten)
logits1.retain_grad()
loss1 = criteria1(logits1, lbs)
optim1.zero_grad()
loss1.backward()
optim1.step()
with torch.no_grad():
if (it+1) % 50 == 0:
print('iter: {}, ================='.format(it+1))
| [
"torch.manual_seed",
"label_smooth.LabelSmoothSoftmaxCEV3",
"torch.mean",
"torchvision.models.resnet18",
"random.seed",
"torch.nn.Conv2d",
"torch.randint",
"numpy.random.seed",
"torch.no_grad",
"torch.randn"
] | [((242, 263), 'torch.manual_seed', 'torch.manual_seed', (['(15)'], {}), '(15)\n', (259, 263), False, 'import torch\n'), ((264, 279), 'random.seed', 'random.seed', (['(15)'], {}), '(15)\n', (275, 279), False, 'import random\n'), ((280, 298), 'numpy.random.seed', 'np.random.seed', (['(15)'], {}), '(15)\n', (294, 298), True, 'import numpy as np\n'), ((1509, 1566), 'label_smooth.LabelSmoothSoftmaxCEV3', 'LabelSmoothSoftmaxCEV3', ([], {'reduction': '"""sum"""', 'ignore_index': '(255)'}), "(reduction='sum', ignore_index=255)\n", (1531, 1566), False, 'from label_smooth import LabelSmoothSoftmaxCEV3\n'), ((454, 499), 'torchvision.models.resnet18', 'torchvision.models.resnet18', ([], {'pretrained': '(False)'}), '(pretrained=False)\n', (481, 499), False, 'import torchvision\n'), ((772, 806), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', 'n_classes', '(3)', '(1)', '(1)'], {}), '(512, n_classes, 3, 1, 1)\n', (781, 806), True, 'import torch.nn as nn\n'), ((1220, 1248), 'torch.mean', 'torch.mean', (['feat'], {'dim': '(2, 3)'}), '(feat, dim=(2, 3))\n', (1230, 1248), False, 'import torch\n'), ((2261, 2276), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2274, 2276), False, 'import torch\n'), ((1871, 1895), 'torch.randn', 'torch.randn', (['bs', '(3)', 'h', 'w'], {}), '(bs, 3, h, w)\n', (1882, 1895), False, 'import torch\n'), ((1973, 1999), 'torch.randint', 'torch.randint', (['(0)', 'c', '(bs,)'], {}), '(0, c, (bs,))\n', (1986, 1999), False, 'import torch\n')] |
# Copyright (c) 2019 <NAME>
import numpy as np
from PokerRL.cfr._CFRBase import CFRBase as _CFRBase
class CFRPlus(_CFRBase):
def __init__(self,
name,
chief_handle,
game_cls,
agent_bet_set,
other_agent_bet_set=None,
starting_stack_sizes=None,
delay=0, ):
"""
delay (int): Linear Averaging delay of CFR+ (only applicable if ""cfr_plus"" is
True)
"""
super().__init__(name=name,
chief_handle=chief_handle,
game_cls=game_cls,
starting_stack_sizes=starting_stack_sizes,
agent_bet_set=agent_bet_set,
other_agent_bet_set=other_agent_bet_set,
algo_name="CFRp_delay" + str(delay)
)
self.delay = delay
self.reset()
def _evaluate_avg_strats(self):
if self._iter_counter > self.delay:
return super()._evaluate_avg_strats()
def _regret_formula_after_first_it(self, ev_all_actions, strat_ev, last_regrets):
return np.maximum(ev_all_actions - strat_ev + last_regrets, 0)
def _regret_formula_first_it(self, ev_all_actions, strat_ev):
return np.maximum(ev_all_actions - strat_ev, 0) # not max of axis; this is like relu
def _compute_new_strategy(self, p_id):
for t_idx in range(len(self._trees)):
def _fill(_node):
if _node.p_id_acting_next == p_id:
N = len(_node.children)
_reg = _node.data["regret"]
_reg_sum = np.expand_dims(np.sum(_reg, axis=1), axis=1).repeat(N, axis=1)
with np.errstate(divide='ignore', invalid='ignore'):
_node.strategy = np.where(
_reg_sum > 0.0,
_reg / _reg_sum,
np.full(shape=(self._env_bldrs[t_idx].rules.RANGE_SIZE, N,), fill_value=1.0 / N,
dtype=np.float32)
)
for c in _node.children:
_fill(c)
_fill(self._trees[t_idx].root)
def _add_strategy_to_average(self, p_id):
def _fill(_node):
if _node.p_id_acting_next == p_id:
# if self._iter_counter > self.delay:
# current_weight = np.sum(np.arange(self.delay + 1, self._iter_counter + 1))
# new_weight = self._iter_counter - self.delay + 1
# m_old = current_weight / (current_weight + new_weight)
# m_new = new_weight / (current_weight + new_weight)
# _node.data["avg_strat"] = m_old * _node.data["avg_strat"] + m_new * _node.strategy
# assert np.allclose(np.sum(_node.data["avg_strat"], axis=1), 1, atol=0.0001)
# elif self._iter_counter == self.delay:
# _node.data["avg_strat"] = np.copy(_node.strategy)
# assert np.allclose(np.sum(_node.data["avg_strat"], axis=1), 1, atol=0.0001)
contrib = _node.strategy * np.expand_dims(_node.reach_probs[p_id], axis=1) * (self._iter_counter + 1)
if self._iter_counter > 0:
_node.data["avg_strat_sum"] += contrib
else:
_node.data["avg_strat_sum"] = contrib
_s = np.expand_dims(np.sum(_node.data["avg_strat_sum"], axis=1), axis=1)
with np.errstate(divide='ignore', invalid='ignore'):
_node.data["avg_strat"] = np.where(_s == 0,
np.full(shape=len(_node.allowed_actions),
fill_value=1.0 / len(_node.allowed_actions)),
_node.data["avg_strat_sum"] / _s
)
assert np.allclose(np.sum(_node.data["avg_strat"], axis=1), 1, atol=0.0001)
for c in _node.children:
_fill(c)
for t_idx in range(len(self._trees)):
_fill(self._trees[t_idx].root)
| [
"numpy.sum",
"numpy.errstate",
"numpy.expand_dims",
"numpy.full",
"numpy.maximum"
] | [((1266, 1321), 'numpy.maximum', 'np.maximum', (['(ev_all_actions - strat_ev + last_regrets)', '(0)'], {}), '(ev_all_actions - strat_ev + last_regrets, 0)\n', (1276, 1321), True, 'import numpy as np\n'), ((1404, 1444), 'numpy.maximum', 'np.maximum', (['(ev_all_actions - strat_ev)', '(0)'], {}), '(ev_all_actions - strat_ev, 0)\n', (1414, 1444), True, 'import numpy as np\n'), ((3622, 3665), 'numpy.sum', 'np.sum', (["_node.data['avg_strat_sum']"], {'axis': '(1)'}), "(_node.data['avg_strat_sum'], axis=1)\n", (3628, 3665), True, 'import numpy as np\n'), ((3697, 3743), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (3708, 3743), True, 'import numpy as np\n'), ((4195, 4234), 'numpy.sum', 'np.sum', (["_node.data['avg_strat']"], {'axis': '(1)'}), "(_node.data['avg_strat'], axis=1)\n", (4201, 4234), True, 'import numpy as np\n'), ((1867, 1913), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (1878, 1913), True, 'import numpy as np\n'), ((3328, 3375), 'numpy.expand_dims', 'np.expand_dims', (['_node.reach_probs[p_id]'], {'axis': '(1)'}), '(_node.reach_probs[p_id], axis=1)\n', (3342, 3375), True, 'import numpy as np\n'), ((2083, 2184), 'numpy.full', 'np.full', ([], {'shape': '(self._env_bldrs[t_idx].rules.RANGE_SIZE, N)', 'fill_value': '(1.0 / N)', 'dtype': 'np.float32'}), '(shape=(self._env_bldrs[t_idx].rules.RANGE_SIZE, N), fill_value=1.0 /\n N, dtype=np.float32)\n', (2090, 2184), True, 'import numpy as np\n'), ((1793, 1813), 'numpy.sum', 'np.sum', (['_reg'], {'axis': '(1)'}), '(_reg, axis=1)\n', (1799, 1813), True, 'import numpy as np\n')] |
import numpy as np
import pytest
from conformance_checking import EmbeddingConformance
def example():
class Mock(EmbeddingConformance):
def _calc_embeddings(self, model_traces, real_traces):
model_embeddings = np.asarray(
[len(t) for t in model_traces], dtype=np.float32
)
real_embeddings = np.asarray(
[len(t) for t in real_traces], dtype=np.float32
)
return model_embeddings, real_embeddings, 1
def _calc_dissimilarity(self, model_embedding, real_embedding, context):
assert context == 1
max_len = max(model_embedding, real_embedding)
min_len = min(model_embedding, real_embedding)
if max_len == 0:
return 0
else:
return 1 - min_len / max_len
model_traces = [
["hi", "foo"],
["hi", "foo"],
["bar"],
[],
["a", "long", "trace", "with", "doubled", "words", "like", "long"],
]
real_traces = [
["foobar", "hi"],
["bar"],
["bar"],
[],
["a", "long", "long", "trace", "but", "not", "the", "same"],
]
expected_matrix = np.asarray(
[
[0, 0.5, 0.5, 1, 0.75],
[0, 0.5, 0.5, 1, 0.75],
[0.5, 0, 0, 1, 0.875],
[1, 1, 1, 0, 1],
[0.75, 0.875, 0.875, 1, 0],
],
dtype=np.float32,
)
return Mock(), model_traces, real_traces, expected_matrix
def check_algorithm(algorithm):
_, model_traces, real_traces, _ = example()
# check embeddings
model_embeddings, real_embeddings, context = algorithm._calc_embeddings(
model_traces, real_traces
)
assert len(model_embeddings) == len(
model_traces
), "There must be as many model embeddings as model traces!"
assert len(real_embeddings) == len(
real_traces
), "There must be as many real embeddings as real traces!"
for model_trace, model_embedding in zip(model_traces, model_embeddings):
for real_trace, real_embedding in zip(real_traces, real_embeddings):
dissimilarity = algorithm._calc_dissimilarity(
model_embedding, real_embedding, context
)
assert 0 <= dissimilarity <= 1, "Dissimilarity values should be in [0,1]!"
if model_trace == real_trace:
assert dissimilarity == pytest.approx(
0, abs=1e-6
), "Equal traces should have a dissimilarity of zero!"
def test_check_algorithm():
algorithm, _, _, _ = example()
check_algorithm(algorithm)
def test_algorithm_execution():
algorithm, model_traces, real_traces, expected_matrix = example()
result = algorithm.execute(model_traces, real_traces)
matrix = result.get_dissimilarity_matrix()
assert isinstance(
matrix, np.ndarray
), "Dissimilarity matrix should be a numpy array!"
assert matrix.dtype == np.float32, "Dissimilarity matrix should be of type float32!"
assert np.all(matrix == expected_matrix), "Expected:\n%s\nGot:\n%s" % (
str(expected_matrix),
str(matrix),
)
| [
"pytest.approx",
"numpy.all",
"numpy.asarray"
] | [((1220, 1371), 'numpy.asarray', 'np.asarray', (['[[0, 0.5, 0.5, 1, 0.75], [0, 0.5, 0.5, 1, 0.75], [0.5, 0, 0, 1, 0.875], [1,\n 1, 1, 0, 1], [0.75, 0.875, 0.875, 1, 0]]'], {'dtype': 'np.float32'}), '([[0, 0.5, 0.5, 1, 0.75], [0, 0.5, 0.5, 1, 0.75], [0.5, 0, 0, 1, \n 0.875], [1, 1, 1, 0, 1], [0.75, 0.875, 0.875, 1, 0]], dtype=np.float32)\n', (1230, 1371), True, 'import numpy as np\n'), ((3078, 3111), 'numpy.all', 'np.all', (['(matrix == expected_matrix)'], {}), '(matrix == expected_matrix)\n', (3084, 3111), True, 'import numpy as np\n'), ((2450, 2477), 'pytest.approx', 'pytest.approx', (['(0)'], {'abs': '(1e-06)'}), '(0, abs=1e-06)\n', (2463, 2477), False, 'import pytest\n')] |
import datetime
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
from core.data.base_dataset import BaseDataset
class BewacoDataset(BaseDataset):
def __init__(self, __C):
self.__C = __C
self.create_dataframe()
self.preprocess()
def create_dataframe(self):
column_names = [
'Date Time',
'S1-Batt Volt',
'S1-PTemp',
'S1-Salt',
'S1-SpCond',
'S1-Temp',
'S2-Batt Volt',
'S2-PTemp',
'S2-Salt',
'S2-SPCond',
'S2-Temp',
'S2-Wiper cur',
'S2-Wiper pos',
'S3-Batt Volt',
'S3-PTemp',
'S3-Salt'
]
self.df = pd.read_csv(self.__C.DATA_PATH['bewaco'][self.__C.RUN_MODE], names=column_names,
header=2, parse_dates=['Date Time'])
def get_columns(self):
return self.df.columns
def get_str_label_columns(self):
label_columns = self.__C.LABEL_COLUMNS
try:
for i, ele in enumerate(self.__C.LABEL_COLUMNS):
if isinstance(ele, int):
label_columns[i] = self.get_columns()[i]
except IndexError:
print('You should check the LABEL_COLUMN input')
self.__C.LABEL_COLUMNS = label_columns
return label_columns
def preprocess(self):
# Drop the first row
self.df.drop(index=0, inplace=True)
# Replace erronous value to missing value
self.df.replace(-99.99, np.nan, inplace=True)
error_s1temp = self.df['S1-Temp'] < 1
error_s2temp = self.df['S2-Temp'] < 1
self.df.loc[error_s1temp, 'S1-Temp'] = np.nan
self.df.loc[error_s2temp, 'S2-Temp'] = np.nan
# Change into 30-minute data
thirty_minutes = 60 * 30
selected_rows = self.df['Date Time'].map(datetime.timestamp) % thirty_minutes == 0
self.df = self.df[selected_rows]
self.df.reset_index(drop=True, inplace=True)
# Check if every rows is recorded after 30 minutes
missing_times = []
for i in range(self.df.shape[0] - 1):
if (self.df.loc[i+1, 'Date Time'] - self.df.loc[i, 'Date Time']) != timedelta(minutes=30):
missing_times.append((self.df.loc[i, 'Date Time'], self.df.loc[i+1, 'Date Time']))
df_all_missing_timestamps = pd.DataFrame(columns=self.df.columns)
for f, t in missing_times:
i = f + timedelta(minutes=30)
while i < t:
df_new = pd.DataFrame({'Date Time': i}, index=[0], columns=self.df.columns)
df_all_missing_timestamps = df_all_missing_timestamps.append(df_new)
i += timedelta(minutes=30)
self.df = self.df.append(df_all_missing_timestamps).sort_values('Date Time')
# Sum 2 S2-Wiper columns
self.df['S2-Wiper sum'] = self.df.pop('S2-Wiper cur') + self.df.pop('S2-Wiper pos')
# Fill missing values
self.df = self.df.fillna(method='bfill').fillna(method='ffill')
# Replace outliers
self.df.set_index('Date Time', inplace=True)
for col in self.df.columns:
self.df = self._replace_outliers(self.df, col)
# Create day/year sin/cos columns as signals for Date Time
timestamp = self.df.index.map(datetime.timestamp)
day = 24*60*60
year = (365.2425)*day
self.df['Day sin'] = np.sin(timestamp * (2 * np.pi / day))
self.df['Day cos'] = np.cos(timestamp * (2 * np.pi / day))
self.df['Year sin'] = np.sin(timestamp * (2 * np.pi / year))
self.df['Year cos'] = np.cos(timestamp * (2 * np.pi / year))
def set_predict_dataset(self):
if self.df.shape[0] < self.__C.N_HISTORY_DATA:
raise Exception(
f"""The provided dataset must have number of records greater or
equal to {self.__C.N_HISTORY_DATA}""")
# When predict data has more row than history data, get the last N_HISTORY_DATA
self.df = self.df[-self.__C.N_HISTORY_DATA:]
def _replace_outliers(self, data, col):
lower_range = data[col].quantile(0.10)
upper_range = data[col].quantile(0.90)
data[col] = np.where((data[col] < lower_range), lower_range, data[col])
data[col] = np.where((data[col] > upper_range), upper_range, data[col])
return data | [
"pandas.read_csv",
"numpy.where",
"numpy.sin",
"numpy.cos",
"pandas.DataFrame",
"datetime.timedelta"
] | [((777, 899), 'pandas.read_csv', 'pd.read_csv', (["self.__C.DATA_PATH['bewaco'][self.__C.RUN_MODE]"], {'names': 'column_names', 'header': '(2)', 'parse_dates': "['Date Time']"}), "(self.__C.DATA_PATH['bewaco'][self.__C.RUN_MODE], names=\n column_names, header=2, parse_dates=['Date Time'])\n", (788, 899), True, 'import pandas as pd\n'), ((2446, 2483), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'self.df.columns'}), '(columns=self.df.columns)\n', (2458, 2483), True, 'import pandas as pd\n'), ((3513, 3550), 'numpy.sin', 'np.sin', (['(timestamp * (2 * np.pi / day))'], {}), '(timestamp * (2 * np.pi / day))\n', (3519, 3550), True, 'import numpy as np\n'), ((3580, 3617), 'numpy.cos', 'np.cos', (['(timestamp * (2 * np.pi / day))'], {}), '(timestamp * (2 * np.pi / day))\n', (3586, 3617), True, 'import numpy as np\n'), ((3648, 3686), 'numpy.sin', 'np.sin', (['(timestamp * (2 * np.pi / year))'], {}), '(timestamp * (2 * np.pi / year))\n', (3654, 3686), True, 'import numpy as np\n'), ((3717, 3755), 'numpy.cos', 'np.cos', (['(timestamp * (2 * np.pi / year))'], {}), '(timestamp * (2 * np.pi / year))\n', (3723, 3755), True, 'import numpy as np\n'), ((4312, 4369), 'numpy.where', 'np.where', (['(data[col] < lower_range)', 'lower_range', 'data[col]'], {}), '(data[col] < lower_range, lower_range, data[col])\n', (4320, 4369), True, 'import numpy as np\n'), ((4392, 4449), 'numpy.where', 'np.where', (['(data[col] > upper_range)', 'upper_range', 'data[col]'], {}), '(data[col] > upper_range, upper_range, data[col])\n', (4400, 4449), True, 'import numpy as np\n'), ((2287, 2308), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(30)'}), '(minutes=30)\n', (2296, 2308), False, 'from datetime import datetime, timedelta\n'), ((2539, 2560), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(30)'}), '(minutes=30)\n', (2548, 2560), False, 'from datetime import datetime, timedelta\n'), ((2611, 2677), 'pandas.DataFrame', 'pd.DataFrame', (["{'Date Time': i}"], {'index': '[0]', 'columns': 'self.df.columns'}), "({'Date Time': i}, index=[0], columns=self.df.columns)\n", (2623, 2677), True, 'import pandas as pd\n'), ((2784, 2805), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(30)'}), '(minutes=30)\n', (2793, 2805), False, 'from datetime import datetime, timedelta\n')] |
#!/usr/bin/env python
# ===============================================================================
# Copyright 2017 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
'''
Created on 8 Mar 2020
@author: <NAME> <<EMAIL>>
'''
import argparse
import numpy as np
import re
import os
import sys
from datetime import datetime
from pprint import pformat
import tempfile
import logging
import locale
from math import log10
from collections import OrderedDict
from functools import reduce
import zipfile
import zipstream
from geophys_utils import get_spatial_ref_from_wkt
from geophys_utils import NetCDFPointUtils
locale.setlocale(locale.LC_ALL, '') # Use '' for auto, or force e.g. to 'en_US.UTF-8'
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO) # Logging level for this module
# Dynamically adjust integer field widths to fit all data values if True
ADJUST_INTEGER_FIELD_WIDTH = True
# Truncate ASEG-GDF2 field names to eight characters if True
TRUNCATE_VARIABLE_NAMES = False
STRING_VAR_NULL_VALUE = "NULL"
# Set this to non zero to limit string field width in .dat file.
# WARNING - string truncation may corrupt data!
# N.B: Must be >= all numeric field widths defined in ASEG_GDF_FORMAT dict below (enforced by assertion)
MAX_FIELD_WIDTH = 0
# Maximum width of comment fields in .des file
MAX_COMMENT_WIDTH = 128
# Character encoding for .dfn, .dat & .des files
CHARACTER_ENCODING = 'utf-8'
# Default number of rows to read from netCDF before outputting a chunk of lines.
CACHE_CHUNK_ROWS = 32768
# Buffer size per-line for 64-bit zipfile
LINE_BUFFER_SIZE = 4096 # Conservative (biggest) line size in bytes
TEMP_DIR = tempfile.gettempdir()
# TEMP_DIR = 'C:\Temp'
# Set this to zero for no limit - only set a non-zero value for testing when debug = True
DEBUG_POINT_LIMIT = 0
# List of regular expressions for variable names to exclude from output
EXCLUDE_NAME_REGEXES = (['.*_index$', 'ga_.*metadata', 'latitude.+', 'longitude.+', 'easting.+', 'northing.+'] +
NetCDFPointUtils.CRS_VARIABLE_NAMES
)
# List of regular expressions for variable attributes to include in .dfn file
INCLUDE_VARIABLE_ATTRIBUTE_REGEXES = ['Intrepid.+']
# From <NAME>'s email to <NAME>, sent: Monday, 24 February 2020 4:27 PM
ASEG_GDF_FORMAT = {
'float64': {
'width': 18,
'null': -9.9999999999e+32,
'aseg_gdf_format': 'E18.10',
'python_format': '{:>18.10e}',
},
'float32': {
'width': 14,
'null': -9.999999e+32,
'aseg_gdf_format': 'E14.6',
'python_format': '{:>14.6e}',
},
'int64': {
'width': 21,
'null': -9223372036854775808,
'aseg_gdf_format': 'I21',
'python_format': '{:>21d}',
},
'uint64': {
'width': 21,
'null': 18446744073709551616,
'aseg_gdf_format': 'I21',
'python_format': '{:>21d}',
},
'int32': {
'width': 12,
'null': -2147483647,
'aseg_gdf_format': 'I12',
'python_format': '{:>12d}',
},
'uint32': {
'width': 12,
'null': 4294967295,
'aseg_gdf_format': 'I12',
'python_format': '{:>12d}',
},
'int16': {
'width': 7,
'null': -32767,
'aseg_gdf_format': 'I7',
'python_format': '{:>7d}',
},
'uint16': {
'width': 7,
'null': 65535,
'aseg_gdf_format': 'I7',
'python_format': '{:>7d}',
},
'int8': {
'width': 5,
'null': -127,
'aseg_gdf_format': 'I5',
'python_format': '{:>5d}',
},
'uint8': {
'width': 5,
'null': 255,
'aseg_gdf_format': 'I5',
'python_format': '{:>5d}',
},
}
# Check to ensure that MAX_FIELD_WIDTH will not truncate numeric fields
assert not MAX_FIELD_WIDTH or all([format_specification['width'] <= MAX_FIELD_WIDTH for format_specification in
ASEG_GDF_FORMAT.values()]), 'Invalid MAX_FIELD_WIDTH {}'.format(MAX_FIELD_WIDTH)
class RowValueCache(object):
'''\
Class to manage cache of row data from netCDF file
'''
def __init__(self, nc2aseggdf):
'''
Constructor
'''
self.nc2aseggdf = nc2aseggdf
self.total_points = nc2aseggdf.total_points
self.field_definitions = nc2aseggdf.field_definitions
self.netcdf_dataset = nc2aseggdf.netcdf_dataset
self.clear_cache()
def clear_cache(self):
'''
Clear cache
'''
self.index_range = 0
self.cache = {}
def read_points(self, start_index, end_index, point_mask=None):
'''
Function to read points from start_index to end_index
'''
self.index_range = end_index - start_index
if point_mask is None: # No point_mask defined - take all points in range
subset_mask = np.ones(shape=(self.index_range,), dtype='bool')
else:
subset_mask = point_mask[start_index:end_index]
self.index_range = np.count_nonzero(subset_mask)
# If no points to retrieve, don't read anything
if not self.index_range:
logger.debug('No points to retrieve - all masked out')
return
# Build cache of data value slices keyed by field_name
self.cache = {field_name: self.nc2aseggdf.get_data_values(field_name, slice(start_index, end_index))
for field_name in self.field_definitions.keys()
}
# logger.debug('self.cache: {}'.format(pformat(self.cache)))
def chunk_row_data_generator(self, clear_cache=True):
'''
Generator yielding chunks of all values from cache, expanding 2D variables to multiple columns
'''
if not self.index_range:
logger.debug('Cache is empty - nothing to yield')
return
for index in range(self.index_range):
row_value_list = []
for field_name, field_definition in self.field_definitions.items():
data = self.cache[field_name][index]
# Convert array to string if required (OPeNDAP behaviour with string arrays?)
if type(data) == np.ndarray and data.dtype == object:
data = str(data)
if field_definition['columns'] == 1: # Element from 1D variable
row_value_list.append(data)
else: # Row from 2D variable
row_value_list += [element for element in data]
# logger.debug('row_value_list: {}'.format(row_value_list))
yield row_value_list
if clear_cache:
self.clear_cache() # Clear cache after outputting all lines
class NC2ASEGGDF2(object):
def __init__(self,
netcdf_dataset,
debug=False,
verbose=False,
):
'''
'''
def build_field_definitions():
'''\
Helper function to build self.field_definitions as an OrderedDict of field definitions keyed by ASEG-GDF2 field name
'''
self.field_definitions = OrderedDict()
for variable_name, variable in self.netcdf_dataset.variables.items():
# Check for any name exclusion matches
if any([re.match(exclude_name_regex, variable_name, re.IGNORECASE)
for exclude_name_regex in EXCLUDE_NAME_REGEXES]):
logger.debug('Excluding variable {}'.format(variable_name))
continue
if variable_name in self.field_definitions.keys(): # already processed
continue
if len(variable.dimensions) == 1 and variable.dimensions != (
'point',): # Non-point indexed array variable, e.g.flight(line)
# Need to work backwards from variable to point indexed variable
try:
try:
index_variable = self.netcdf_dataset.variables[
variable.index] # Try to use explicit index attribute
except AttributeError:
variable_dimension_name = variable.dimensions[0]
index_variable = self.netcdf_dataset.variables[variable_dimension_name + '_index']
assert index_variable.dimensions == ('point',), 'Invalid dimensions for variable {}: {}'.format(
index_variable.name,
index_variable.dimensions)
variables = [index_variable, variable]
logger.debug(
'Found index variable {} for lookup variable {}'.format(index_variable.name, variable_name))
except:
logger.debug('Index variable not found for lookup variable {}'.format(variable_name))
continue # Can't find lookup variable - ignore this one
elif (
len(variable.dimensions)
and variable.dimensions[0] == 'point'
and not (
variable.dimensions == ('point',)
and (
variable_name.endswith('_index')
or hasattr(variable, 'lookup')
)
)
):
logger.debug('Found point-wise array data variable {}'.format(variable_name))
variables = [variable] # Not an index variable - just use primary variable values
elif not len(variable.dimensions) and variable_name != self.ncpu.crs_variable.name:
logger.debug('Found point-wise scalar data variable {}'.format(variable_name))
variables = [variable] # Scalar variable - broadcast out to all points
else:
logger.debug('Unable to deal with variable {} - ignoring'.format(variable_name))
continue
variable_attributes = dict(variables[-1].__dict__)
# logger.debug('variable_attributes = {}'.format(pformat(variable_attributes)))
dtype = variables[-1].dtype
logger.debug('Variable is of dtype {}'.format(dtype))
format_dict = dict(ASEG_GDF_FORMAT.get(str(dtype)) or {})
if not format_dict: # Unrecognised format. Treat as string
width = max([len(str(element).strip()) for element in variables[-1][:]]) + 1
if MAX_FIELD_WIDTH and width > MAX_FIELD_WIDTH:
logger.warning(
'WARNING: String variable "{}" data will be truncated from a width of {} to {}'.format(
variable_name, width, MAX_FIELD_WIDTH))
width = MAX_FIELD_WIDTH
format_dict = {
'width': width,
'null': STRING_VAR_NULL_VALUE,
'aseg_gdf_format': 'A{}'.format(width),
'python_format': '{{:>{}s}}'.format(width),
}
try:
column_count = reduce(lambda x, y: x * y, variable.shape[
1:]) # This will work for (2+)D, even if we only support 1D or 2D
except: # Scalar or 1D
column_count = 1
variable_definition = {
'variable_name': variable_name,
'variables': variables,
'attributes': variable_attributes,
'dtype': dtype,
'format': format_dict,
'columns': column_count
}
if TRUNCATE_VARIABLE_NAMES:
# Sanitise field name, truncate to 8 characters and ensure uniqueness
field_name = re.sub('(\W|_)+', '', variable_name)[:8].upper()
field_name_count = 0
while field_name in [variable_definition.get('field_name')
for variable_definition in self.field_definitions.values()]:
field_name_count += 1
field_name = field_name[:-len(str(field_name_count))] + str(field_name_count)
else:
field_name = re.sub('\W+', '_',
variable_name) # Sanitisation shouldn't be necessary, but we'll do it anyway
variable_definition['field_name'] = field_name
# Add definition to allow subsequent self.get_data_values(field_name) call
self.field_definitions[field_name] = variable_definition
if ADJUST_INTEGER_FIELD_WIDTH and 'int' in str(
dtype): # Field is some kind of integer - adjust format for data
# logger.debug('\tChecking values to adjust integer field width for variable {}'.format(variable_name))
max_abs_value = np.nanmax(np.abs(self.get_data_values(field_name)))
min_value = np.nanmin(self.get_data_values(field_name))
# logger.debug('\tMaximum absolute value = {}, minimum value = {}'.format(max_abs_value, min_value))
if max_abs_value > 0:
width = int(log10(max_abs_value)) + 2 # allow for leading space
if min_value < 0:
width += 1 # allow for "-"
else:
width = 2
if width != format_dict['width']:
logger.debug(
'\tAdjusting integer field width from {} to {} for variable {}'.format(format_dict['width'],
width,
variable_name))
format_dict['width'] = width
format_dict['aseg_gdf_format'] = 'I{}'.format(width)
format_dict['python_format'] = '{{:>{}d}}'.format(width)
# logger.debug(self.field_definitions)
# Start of __init__
# TODO: Make this a property
self.debug = debug
log_level = logging.DEBUG if debug else logging.INFO
logger.setLevel(level=log_level)
if verbose:
logger.debug('Enabling info level output')
self.info_output = logger.info # Verbose
else:
self.info_output = logger.debug # Non-verbose
self.ncpu = NetCDFPointUtils(netcdf_dataset, debug=debug)
self.netcdf_dataset = self.ncpu.netcdf_dataset
self.netcdf_path = self.ncpu.netcdf_dataset.filepath()
self.netcdf_dataset.set_auto_mask(False) # Turn auto-masking off to allow substitution of new null values
assert 'point' in self.netcdf_dataset.dimensions.keys(), '"point" not found in dataset dimensions'
self.info_output('Opened netCDF dataset {}'.format(self.netcdf_path))
self.total_points = self.ncpu.point_count
self.spatial_ref = get_spatial_ref_from_wkt(self.ncpu.wkt)
# set self.field_definitions
build_field_definitions()
# set reporting increment to nice number giving 100 - 199 progress reports
self.line_report_increment = (10.0 ** int(log10(self.ncpu.point_count / 50))) / 2.0
def get_data_values(self, field_name, point_slice=slice(None, None, None)):
'''\
Function to return data values as an array, expanding lookups and broadcasting scalars if necessary
@param field_name: Variable name to query (key in self.field_definitions)
@param point_slice: slice to apply to point (i.e. first) dimension
@return data_array: Array of data values
'''
variables = self.field_definitions[field_name]['variables']
# logger.debug('Field {} represents variable {}({})'.format(field_name, variables[-1].name, ','.join(variables[0].dimensions)))
if len(variables) == 1: # Single variable => no lookup
if len(variables[0].dimensions): # Array
assert variables[0].dimensions[0] == 'point', 'First array dimension must be "point"'
data = variables[0][point_slice]
else: # Scalar
# Broadcast scalar to required array shape
data = np.array([variables[0][:]] * (
((point_slice.stop or self.total_points) - (point_slice.start or 0)) // (
point_slice.step or 1)))
elif len(variables) == 2: # Index & Lookup variables
mask_value_index_var = getattr(variables[0], "_FillValue")
# mask_value_lookup_var = getattr(variables[1], "_FillValue")
# check if the index variable contains any masked values
# If so return a list of where the masked values are replaced with the STRING_VAR_NULL_VALUE
# and non masked values are given their corresponding value in the lookup table
#TODO this may be needed for lines also if any line variables contain masked values for lookup tables
if np.any(variables[0][:] == mask_value_index_var):
logger.debug("Variable '{}' contains one or more masked values. Converting masked value/s to {}".format(variables[0].name, STRING_VAR_NULL_VALUE))
i = 0
lookup_len = len(variables[0][:])
data = [None] * (lookup_len) # create a list of the required size to fill in with correct values
while i < lookup_len:
if variables[0][i] != mask_value_index_var:
data[i] = variables[1][variables[0][i]]
else:
data[i] = str(STRING_VAR_NULL_VALUE)
i = i + 1
# if no masked values, make a list with the index values converted to their corresponding lookup table
# values
else:
data = variables[1][:][variables[0][:][point_slice]] # Use first array to index second one
else:
raise BaseException(
'Unable to resolve chained lookups (yet): {}'.format([variable.name for variable in variables]))
# Substitute null_value for _FillValue if required. This
null_value = self.field_definitions[field_name]['format']['null']
if null_value is not None and hasattr(variables[-1], '_FillValue'):
data[(data == (variables[-1]._FillValue))] = null_value
return data
def create_dfn_line(
self,
rt,
name,
aseg_gdf_format,
definition=None,
defn=None,
st='RECD'
):
'''
Helper function to write line to .dfn file.
self.defn is used to track the DEFN number, which can be reset using the optional defn parameter
@param rt: value for "RT=<rt>" portion of DEFN line, e.g. '' or 'PROJ'
@param name: Name of DEFN
@param format_specifier_dict: format specifier dict, e.g. {'width': 5, 'null': 256, 'aseg_gdf_format': 'I5', 'python_format': '{:>5d}'}
@param definition=None: Definition string
@param defn=None: New value of DEFN number. Defaults to self.defn+1
@param st: value for "RT=<rt>" portion of DEFN line. Default = 'RECD'
@return line: output line
'''
if defn is None:
self.defn += 1 # Increment last DEFN value (initialised to 0 in constructor)
else:
self.defn = defn
line = 'DEFN {defn} ST={st},RT={rt}; {name}'.format(defn=self.defn,
st=st,
rt=rt,
name=name,
)
if aseg_gdf_format:
line += ': {aseg_gdf_format}'.format(aseg_gdf_format=aseg_gdf_format)
if definition:
line += ': ' + definition
# logger.debug('dfn file line: {}'.format(line))
return line
def create_dfn_file(self, dfn_out_path, zipstream_zipfile=None):
'''
Helper function to output .dfn file
'''
if zipstream_zipfile:
dfn_basename = os.path.basename(dfn_out_path)
zipstream_zipfile.write_iter(dfn_basename,
self.encoded_dfn_line_generator(encoding=CHARACTER_ENCODING),
)
else:
# Create, write and close .dfn file
with open(dfn_out_path, 'w') as dfn_file:
for dfn_line in self.dfn_line_generator():
dfn_file.write(dfn_line)
dfn_file.close()
self.info_output('Finished writing .dfn file {}'.format(self.dfn_out_path))
def encoded_dfn_line_generator(self, encoding=CHARACTER_ENCODING):
'''
Helper generator to yield encoded bytestrings of all lines in .dfn file
'''
for line_string in self.dfn_line_generator():
yield line_string.encode(encoding)
def dfn_line_generator(self):
'''
Helper generator to yield all lines in .dfn file
'''
def variable_defns_generator():
"""
Helper function to write a DEFN line for each variable
"""
self.defn = 0 # reset DEFN number
# for variable_name, variable_attributes in self.field_definitions.items():
for field_name, field_definition in self.field_definitions.items():
optional_attribute_list = []
units = field_definition['attributes'].get('units')
if units:
optional_attribute_list.append('UNITS={units}'.format(units=units))
# fill_value = field_definition['attributes'].get('_FillValue')
null_value = field_definition['format'].get('null')
if null_value is not None:
optional_attribute_list.append(
'NULL=' + field_definition['format']['python_format'].format(null_value).strip())
long_name = field_definition['attributes'].get('long_name') or re.sub('(\W|_)+', ' ',
field_definition['variable_name'])
if long_name:
optional_attribute_list.append('NAME={long_name}'.format(long_name=long_name))
# Include any variable attributes which match regexes in INCLUDE_VARIABLE_ATTRIBUTE_REGEXES
for attribute_name, attribute_value in field_definition['attributes'].items():
if any([re.match(variable_attribute_regex, attribute_name, re.IGNORECASE)
for variable_attribute_regex in INCLUDE_VARIABLE_ATTRIBUTE_REGEXES]):
optional_attribute_list.append('{}={}'.format(attribute_name,
attribute_value))
# ===========================================================
# # Check for additional ASEG-GDF attributes defined in settings
# variable_attributes = field_definition.get('variable_attributes')
# if variable_attributes:
# for aseg_gdf_attribute, netcdf_attribute in self.settings['attributes'].items():
# attribute_value = variable_attributes.get(netcdf_attribute)
# if attribute_value is not None:
# optional_attribute_list.append('{aseg_gdf_attribute}={attribute_value}'.format(aseg_gdf_attribute=aseg_gdf_attribute,
# attribute_value=attribute_value
# ))
# ===========================================================
if optional_attribute_list:
definition = ', '.join(optional_attribute_list)
else:
definition = None
aseg_gdf_format = field_definition['format']['aseg_gdf_format']
if field_definition['columns'] > 1: # Need to pre-pend number of columns to format string
aseg_gdf_format = '{}{}'.format(field_definition['columns'], aseg_gdf_format)
yield self.create_dfn_line(rt='',
name=field_name,
aseg_gdf_format=aseg_gdf_format,
definition=definition,
)
# Write 'END DEFN'
yield self.create_dfn_line(rt='',
name='END DEFN',
aseg_gdf_format=None
)
def proj_defns_generator():
"""
Helper function to write PROJ lines
From standard:
DEFN 1 ST=RECD,RT=PROJ; RT: A4
DEFN 2 ST=RECD,RT=PROJ; COORDSYS: A40: NAME=projection name, POSC projection name
DEFN 3 ST=RECD,RT=PROJ; DATUM: A40: NAME=datum name, EPSG compliant ellipsoid name
DEFN 4 ST=RECD,RT=PROJ; MAJ_AXIS: D12.1: UNIT=m, NAME=major_axis, Major axis in units
relevant to the ellipsoid definition
DEFN 5 ST=RECD,RT=PROJ; INVFLATT: D14.9: NAME=inverse flattening, 1/f inverse of flattening
DEFN 6 ST=RECD,RT=PROJ; PRIMEMER: F10.1: UNIT=deg, NAME=prime_meridian, Location of prime
meridian relative to Greenwich
DEFN 7 ST=RECD,RT=PROJ; PROJMETH: A30: NAME=projection_method, eg. Transverse Mercator,
Lambert etc
DEFN 8 ST=RECD,RT=PROJ; PARAM1: D14.0: NAME=Proj_par1, 1st projecton paramater See Table 1
DEFN 9 ST=RECD,RT=PROJ; PARAM2: D14.0: NAME=Proj_par2, 2nd projection parameter
DEFN 10 ST=RECD,RT=PROJ; PARAM3: D14.0: NAME=Proj_par3, 3rd projection parameter
DEFN 11 ST=RECD,RT=PROJ; PARAM4: D14.0: NAME=Proj_par4, 4th projection parameter
DEFN 12 ST=RECD,RT=PROJ; PARAM5: D14.0: NAME=Proj_par5, 5th projection parameter
DEFN 13 ST=RECD,RT=PROJ; PARAM6: D14.0: NAME=Proj_par6, 6th projection parameter
DEFN 14 ST=RECD,RT=PROJ; PARAM7: D14.0: NAME=Proj_par7, 7th projection parameter
DEFN 15 ST=RECD,RT=PROJ; END DEFN
From sample file:
DEFN 1 ST=RECD,RT=PROJ; RT:A4
DEFN 2 ST=RECD,RT=PROJ; PROJNAME:A30: COMMENT=GDA94 / MGA zone 54
DEFN 3 ST=RECD,RT=PROJ; ELLPSNAM:A30: COMMENT=GRS 1980
DEFN 4 ST=RECD,RT=PROJ; MAJ_AXIS: D12.1: UNIT=m, COMMENT=6378137.000000
DEFN 5 ST=RECD,RT=PROJ; ECCENT: D12.9: COMMENT=298.257222
DEFN 6 ST=RECD,RT=PROJ; PRIMEMER: F10.1: UNIT=deg, COMMENT=0.000000
DEFN 7 ST=RECD,RT=PROJ; PROJMETH: A30: COMMENT=Transverse Mercator
DEFN 8 ST=RECD,RT=PROJ; PARAM1: D14.0: COMMENT= 0.000000
DEFN 9 ST=RECD,RT=PROJ; PARAM2: D14.0: COMMENT= 141.000000
DEFN 10 ST=RECD,RT=PROJ; PARAM3: D14.0: COMMENT= 0.999600
DEFN 11 ST=RECD,RT=PROJ; PARAM4: D14.0: COMMENT= 500000.000000
DEFN 12 ST=RECD,RT=PROJ; PARAM5: D14.0: COMMENT=10000000.00000
DEFN 13 ST=RECD,RT=PROJ; PARAM6: D14.0:
DEFN 14 ST=RECD,RT=PROJ; PARAM7: D14.0:
DEFN 15 ST=RECD,RT=PROJ; END DEFN
PROJGDA94 / MGA zone 54 GRS 1980 6378137.0000 298.257222 0.000000 Transverse Mercator 0.000000 141.000000 0.999600 500000.000000 10000000.00000
"""
geogcs = self.spatial_ref.GetAttrValue('geogcs') # e.g. 'GDA94'
projcs = self.spatial_ref.GetAttrValue('projcs') # e.g. 'UTM Zone 54, Southern Hemisphere'
ellipse_name = self.spatial_ref.GetAttrValue('spheroid', 0)
major_axis = float(self.spatial_ref.GetAttrValue('spheroid', 1))
prime_meridian = float(self.spatial_ref.GetAttrValue('primem', 1))
inverse_flattening = float(self.spatial_ref.GetInvFlattening())
# eccentricity = self.spatial_ref.GetAttrValue('spheroid', 2) # Non-standard definition same as inverse_flattening?
if self.spatial_ref.IsProjected():
if projcs.startswith(geogcs):
projection_name = projcs
else:
projection_name = geogcs + ' / ' + re.sub('[\:\,\=]+', '',
projcs) # e.g. 'GDA94 / UTM Zone 54, Southern Hemisphere'
projection_method = self.spatial_ref.GetAttrValue('projection').replace('_', ' ')
projection_parameters = [(key, float(value))
for key, value in re.findall('PARAMETER\["(.+)",(\d+\.?\d*)\]',
self.spatial_ref.ExportToPrettyWkt())
]
else: # Unprojected CRS
projection_name = geogcs
projection_method = None
projection_parameters = None
self.defn = 0 # reset DEFN number
# write 'DEFN 1 ST=RECD,RT=PROJ; RT:A4'
yield self.create_dfn_line(rt='PROJ',
name='RT',
aseg_gdf_format='A4'
)
yield self.create_dfn_line(rt='PROJ',
name='COORDSYS',
aseg_gdf_format='A40',
definition='NAME={projection_name}, Projection name'.format(
projection_name=projection_name)
)
yield self.create_dfn_line(rt='PROJ',
name='DATUM',
aseg_gdf_format='A40',
definition='NAME={ellipse_name}, Ellipsoid name'.format(
ellipse_name=ellipse_name)
)
yield self.create_dfn_line(rt='PROJ',
name='MAJ_AXIS',
aseg_gdf_format='D12.1',
definition='UNIT={unit}, NAME={major_axis}, Major axis'.format(unit='m',
major_axis=major_axis)
)
yield self.create_dfn_line(rt='PROJ',
name='INVFLATT',
aseg_gdf_format='D14.9',
definition='NAME={inverse_flattening}, 1/f inverse of flattening'.format(
inverse_flattening=inverse_flattening)
)
yield self.create_dfn_line(rt='PROJ',
name='PRIMEMER',
aseg_gdf_format='F10.1',
definition='UNIT={unit}, NAME={prime_meridian}, Location of prime meridian'.format(
unit='degree', prime_meridian=prime_meridian)
)
# ===============================================================================
# # Non-standard definitions
# yield self.create_dfn_line(rt='PROJ',
# name='ELLPSNAM',
# aseg_gdf_format='A30',
# definition='NAME={ellipse_name}, Non-standard definition for ellipse name'.format(ellipse_name=ellipse_name)
# )
#
# yield self.create_dfn_line(rt='PROJ',
# name='PROJNAME',
# aseg_gdf_format='A40',
# definition='NAME={projection_name}, Non-standard definition for projection name'.format(projection_name=projection_name)
# )
#
# yield self.create_dfn_line(rt='PROJ',
# name='ECCENT',
# aseg_gdf_format='D12.9',
# definition='NAME={eccentricity}, Non-standard definition for ellipsoidal eccentricity'.format(eccentricity=eccentricity)
# )
# ===============================================================================
if projection_method:
yield self.create_dfn_line(rt='PROJ',
name='PROJMETH',
aseg_gdf_format='A30',
definition='NAME={projection_method}, projection method'.format(
projection_method=projection_method)
)
# Write all projection parameters starting from DEFN 8
param_no = 0
for param_name, param_value in projection_parameters:
param_no += 1
yield self.create_dfn_line(rt='PROJ',
name='PARAM{param_no}'.format(param_no=param_no),
aseg_gdf_format='D14.0',
# TODO: Investigate whether this is OK - it looks dodgy to me
definition='NAME={param_value}, {param_name}'.format(
param_value=param_value, param_name=param_name)
)
# Write 'END DEFN'
yield self.create_dfn_line(rt='PROJ',
name='END DEFN',
aseg_gdf_format=''
)
# TODO: Write fixed length PROJ line at end of file
return # End of function proj_defns_generator
yield 'DEFN ST=RECD,RT=COMM;RT:A4;COMMENTS:A{}\n'.format(MAX_COMMENT_WIDTH) # TODO: Check this first line
for defn_line in variable_defns_generator():
yield defn_line + '\n'
for proj_line in proj_defns_generator():
yield proj_line + '\n'
def create_dat_file(self, dat_out_path, cache_chunk_rows=None, point_mask=None, zipstream_zipfile=None):
'''
Helper function to output .dat file
'''
def chunk_buffer_generator(row_value_cache, python_format_list, cache_chunk_rows, point_mask=None,
encoding=None):
'''
Generator to yield all line strings across all point variables for specified row range
'''
def chunk_line_generator(row_value_cache, python_format_list, start_index, end_index, point_mask=None):
'''
Helper Generator to yield line strings for specified rows across all point variables
'''
logger.debug('Reading rows {:n} - {:n}'.format(start_index + 1, end_index))
row_value_cache.read_points(start_index, end_index, point_mask=point_mask)
logger.debug('Preparing ASEG-GDF lines for rows {:n} - {:n}'.format(start_index + 1, end_index))
for row_value_list in row_value_cache.chunk_row_data_generator():
# logger.debug('row_value_list: {}'.format(row_value_list))
# Turn list of values into a string using python_formats
# Truncate fields to maximum width with leading space - only string fields should be affected
yield ''.join([' ' + python_format_list[value_index].format(row_value_list[value_index])[
1 - MAX_FIELD_WIDTH::]
for value_index in range(len(
python_format_list))]) # .lstrip() # lstrip if we want to discard leading spaces from line
# Process all chunks
point_count = 0
for chunk_index in range(self.total_points // cache_chunk_rows + 1):
chunk_line_list = []
for line in chunk_line_generator(row_value_cache, python_format_list,
start_index=chunk_index * cache_chunk_rows,
end_index=min((chunk_index + 1) * cache_chunk_rows,
self.total_points
),
point_mask=point_mask
):
point_count += 1
if not (point_count % self.line_report_increment):
self.info_output(
'{:n} / {:n} ASEG-GDF2 rows converted to text'.format(point_count, self.total_points))
# logger.debug('line: "{}"'.format(line))
chunk_line_list.append(line)
if self.debug and DEBUG_POINT_LIMIT and (
point_count >= DEBUG_POINT_LIMIT): # Don't process more lines
break
chunk_buffer_string = '\n'.join(chunk_line_list) + '\n' # Yield a chunk of lines
if encoding:
encoded_bytestring = chunk_buffer_string.encode(encoding)
line_size = sys.getsizeof(encoded_bytestring)
assert line_size < LINE_BUFFER_SIZE * CACHE_CHUNK_ROWS, 'Line size of {} exceeds buffer size of {}'.format(
line_size,
LINE_BUFFER_SIZE * CACHE_CHUNK_ROWS)
logger.debug('Writing ASEG-GDF line buffer of size {:n} bytes'.format(line_size))
yield (encoded_bytestring)
else:
logger.debug('Writing ASEG-GDF line buffer')
yield (chunk_buffer_string)
if self.debug and DEBUG_POINT_LIMIT and (point_count >= DEBUG_POINT_LIMIT): # Don't process more chunks
logger.warning('WARNING: Output limited to {:n} points in debug mode'.format(DEBUG_POINT_LIMIT))
break
self.info_output('A total of {:n} rows were output'.format(point_count))
# Start of create_dat_file function
cache_chunk_rows = cache_chunk_rows or CACHE_CHUNK_ROWS
# Start of chunk_buffer_generator
row_value_cache = RowValueCache(self) # Create cache for multiple chunks of data
python_format_list = []
for field_definition in self.field_definitions.values():
for _column_index in range(field_definition['columns']):
python_format_list.append(field_definition['format']['python_format'])
# logger.debug('python_format_list: {}'.format(python_format_list))
if zipstream_zipfile:
# Write to zip file
dat_basename = os.path.basename(dat_out_path)
zipstream_zipfile.write_iter(
dat_basename,
chunk_buffer_generator(row_value_cache, python_format_list, cache_chunk_rows, point_mask,
encoding=CHARACTER_ENCODING),
buffer_size=self.ncpu.point_count * LINE_BUFFER_SIZE # Need this to force 64-bit zip
)
else: # No zip
# Create, write and close .dat file
dat_out_file = open(dat_out_path, mode='w')
for chunk_buffer in chunk_buffer_generator(row_value_cache, python_format_list, cache_chunk_rows,
point_mask):
dat_out_file.write(chunk_buffer + '\n')
dat_out_file.close()
self.info_output('Finished writing .dat file {}'.format(dat_out_path))
def create_des_file(self, des_out_path, zipstream_zipfile=None):
'''
Helper function to output .des file
'''
def des_line_generator(encoding=None):
'''
Helper Generator to yield line strings for .des file
'''
# Ignore netCDF system attributes
global_attributes_dict = {key: str(value).strip()
for key, value in self.netcdf_dataset.__dict__.items()
if not key.startswith('_')
}
# Determine maximum key length for fixed field width
max_key_length = max([len(key) for key in global_attributes_dict.keys()])
global_attributes_dict['ASEG_GDF2'] = 'Generated at {} from {} using nc2aseg.py'.format(
datetime.now().isoformat(),
os.path.basename(self.netcdf_path))
# Show dimension sizes
for dimension_name, dimension in self.netcdf_dataset.dimensions.items():
global_attributes_dict[dimension_name + '_count'] = str(dimension.size)
#logger.debug('global_attributes_dict = {}'.format(pformat(global_attributes_dict)))
for key in sorted(global_attributes_dict.keys()):
value = global_attributes_dict[key]
key_string = (' {{:<{}s}} : '.format(max_key_length)).format(key) # Include leading space
for value_line in value.split('\n'):
# Split long values into multiple lines. Need to be careful with leading & trailing spaces when reassembling
while value_line:
comment_line = 'COMM{}{}'.format(key_string,
value_line[:MAX_COMMENT_WIDTH - len(key_string)]) + '\n'
if encoding:
yield comment_line.encode(encoding)
else:
yield comment_line
value_line = value_line[MAX_COMMENT_WIDTH - len(key_string):]
if zipstream_zipfile:
# Write to zip file
des_basename = os.path.basename(des_out_path)
zipstream_zipfile.write_iter(des_basename,
des_line_generator(encoding=CHARACTER_ENCODING),
)
else: # No zip
# Create, write and close .dat file
des_out_file = open(des_out_path, mode='w')
logger.debug('Writing lines to .des file {}'.format(self.dat_out_path))
for des_line in des_line_generator():
logger.debug('Writing "{}" to .des file'.format(des_line))
des_out_file.write(des_line)
des_out_file.close()
self.info_output('Finished writing .des file {}'.format(des_out_path))
def convert2aseg_gdf(self,
dat_out_path=None,
zip_out_path=None,
stride=1,
point_mask=None):
'''
Function to convert netCDF file to ASEG-GDF
'''
start_time = datetime.now()
self.dat_out_path = dat_out_path or os.path.splitext(self.netcdf_dataset.filepath())[0] + '.dat'
self.dfn_out_path = os.path.splitext(dat_out_path)[0] + '.dfn'
self.des_out_path = os.path.splitext(dat_out_path)[0] + '.des'
if zip_out_path:
zipstream_zipfile = zipstream.ZipFile(compression=zipfile.ZIP_DEFLATED,
allowZip64=True
)
zipstream_zipfile.comment = ('ASEG-GDF2 files generated at {} from {}'.format(datetime.now().isoformat(),
os.path.basename(
self.netcdf_path))
).encode(CHARACTER_ENCODING)
try:
os.remove(zip_out_path)
except:
pass
else:
zipstream_zipfile = None
try:
self.create_dfn_file(self.dfn_out_path, zipstream_zipfile=zipstream_zipfile)
self.create_dat_file(self.dat_out_path, zipstream_zipfile=zipstream_zipfile)
self.create_des_file(self.des_out_path, zipstream_zipfile=zipstream_zipfile)
if zipstream_zipfile:
zip_out_file = open(zip_out_path, 'wb')
self.info_output('Writing zip file {}'.format(zip_out_path))
for data in zipstream_zipfile:
zip_out_file.write(data)
self.info_output('Closing zip file {}'.format(zip_out_path))
zipstream_zipfile.close()
except:
# Close and remove incomplete zip file
try:
zipstream_zipfile.close()
except:
pass
try:
zip_out_file.close()
except:
pass
try:
os.remove(zip_out_path)
logger.debug('Removed failed zip file {}'.format(zip_out_path))
except:
pass
raise
elapsed_time = datetime.now() - start_time
self.info_output(
'ASEG-GDF output completed in {}'.format(str(elapsed_time).split('.')[0])) # Discard partial seconds
def main():
'''
Main function
'''
def get_args():
"""
Handles all the arguments that are passed into the script
:return: Returns a parsed version of the arguments.
"""
parser = argparse.ArgumentParser(description='Convert netCDF file to ASEG-GDF2')
parser.add_argument("-r", "--crs",
help="Coordinate Reference System string (e.g. GDA94, EPSG:4283) for output",
type=str,
dest="crs")
parser.add_argument('-z', '--zip', action='store_const', const=True, default=False,
help='Zip directly to an archive file. Default is no zip')
parser.add_argument('-d', '--debug', action='store_const', const=True, default=False,
help='output debug information. Default is no debug info')
parser.add_argument('-v', '--verbose', action='store_const', const=True, default=False,
help='output verbosity. Default is non-verbose')
parser.add_argument('positional_args',
nargs=argparse.REMAINDER,
help='<nc_in_path> [<dat_out_path>] [<zip_out_path>]')
return parser.parse_args()
args = get_args()
# Setup Logging
log_level = logging.DEBUG if args.debug else logging.INFO
logger.setLevel(level=log_level)
assert 1 <= len(args.positional_args) <= 2, 'Invalid number of positional arguments.\n\
Usage: python {} <options> <nc_in_path> [<dat_out_path>] [<zip_out_path>]'.format(os.path.basename(sys.argv[0]))
nc_in_path = args.positional_args[0]
if len(args.positional_args) == 2:
dat_out_path = args.positional_args[1]
else:
dat_out_path = os.path.splitext(nc_in_path)[0] + '.dat'
if args.zip:
if len(args.positional_args) == 3:
zip_out_path = args.positional_args[2]
else:
zip_out_path = os.path.splitext(nc_in_path)[0] + '_ASEG_GDF2.zip'
else:
zip_out_path = None
logger.debug('args: {}'.format(args.__dict__))
nc2aseggdf2 = NC2ASEGGDF2(nc_in_path, debug=args.debug, verbose=args.verbose)
nc2aseggdf2.convert2aseg_gdf(dat_out_path, zip_out_path)
if __name__ == '__main__':
# Setup logging handlers if required
if not logger.handlers:
# Set handler for root logger to standard output
console_handler = logging.StreamHandler(sys.stdout)
# console_handler.setLevel(logging.INFO)
console_handler.setLevel(logging.DEBUG)
console_formatter = logging.Formatter('%(message)s')
console_handler.setFormatter(console_formatter)
logger.addHandler(console_handler)
logger.debug('Logging handlers set up for logger {}'.format(logger.name))
main() | [
"logging.getLogger",
"logging.StreamHandler",
"geophys_utils.NetCDFPointUtils",
"numpy.count_nonzero",
"numpy.array",
"math.log10",
"os.remove",
"argparse.ArgumentParser",
"sys.getsizeof",
"collections.OrderedDict",
"locale.setlocale",
"numpy.ones",
"functools.reduce",
"os.path.splitext",
... | [((1285, 1320), 'locale.setlocale', 'locale.setlocale', (['locale.LC_ALL', '""""""'], {}), "(locale.LC_ALL, '')\n", (1301, 1320), False, 'import locale\n'), ((1384, 1411), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1401, 1411), False, 'import logging\n'), ((2356, 2377), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (2375, 2377), False, 'import tempfile\n'), ((15982, 16027), 'geophys_utils.NetCDFPointUtils', 'NetCDFPointUtils', (['netcdf_dataset'], {'debug': 'debug'}), '(netcdf_dataset, debug=debug)\n', (15998, 16027), False, 'from geophys_utils import NetCDFPointUtils\n'), ((16538, 16577), 'geophys_utils.get_spatial_ref_from_wkt', 'get_spatial_ref_from_wkt', (['self.ncpu.wkt'], {}), '(self.ncpu.wkt)\n', (16562, 16577), False, 'from geophys_utils import get_spatial_ref_from_wkt\n'), ((45949, 45963), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (45961, 45963), False, 'from datetime import datetime\n'), ((48627, 48698), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Convert netCDF file to ASEG-GDF2"""'}), "(description='Convert netCDF file to ASEG-GDF2')\n", (48650, 48698), False, 'import argparse\n'), ((50031, 50060), 'os.path.basename', 'os.path.basename', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (50047, 50060), False, 'import os\n'), ((50913, 50946), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (50934, 50946), False, 'import logging\n'), ((51075, 51107), 'logging.Formatter', 'logging.Formatter', (['"""%(message)s"""'], {}), "('%(message)s')\n", (51092, 51107), False, 'import logging\n'), ((5709, 5757), 'numpy.ones', 'np.ones', ([], {'shape': '(self.index_range,)', 'dtype': '"""bool"""'}), "(shape=(self.index_range,), dtype='bool')\n", (5716, 5757), True, 'import numpy as np\n'), ((5866, 5895), 'numpy.count_nonzero', 'np.count_nonzero', (['subset_mask'], {}), '(subset_mask)\n', (5882, 5895), True, 'import numpy as np\n'), ((8059, 8072), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8070, 8072), False, 'from collections import OrderedDict\n'), ((21952, 21982), 'os.path.basename', 'os.path.basename', (['dfn_out_path'], {}), '(dfn_out_path)\n', (21968, 21982), False, 'import os\n'), ((41752, 41782), 'os.path.basename', 'os.path.basename', (['dat_out_path'], {}), '(dat_out_path)\n', (41768, 41782), False, 'import os\n'), ((44915, 44945), 'os.path.basename', 'os.path.basename', (['des_out_path'], {}), '(des_out_path)\n', (44931, 44945), False, 'import os\n'), ((46277, 46345), 'zipstream.ZipFile', 'zipstream.ZipFile', ([], {'compression': 'zipfile.ZIP_DEFLATED', 'allowZip64': '(True)'}), '(compression=zipfile.ZIP_DEFLATED, allowZip64=True)\n', (46294, 46345), False, 'import zipstream\n'), ((48206, 48220), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (48218, 48220), False, 'from datetime import datetime\n'), ((17852, 17984), 'numpy.array', 'np.array', (['([variables[0][:]] * (((point_slice.stop or self.total_points) - (\n point_slice.start or 0)) // (point_slice.step or 1)))'], {}), '([variables[0][:]] * (((point_slice.stop or self.total_points) - (\n point_slice.start or 0)) // (point_slice.step or 1)))\n', (17860, 17984), True, 'import numpy as np\n'), ((18658, 18705), 'numpy.any', 'np.any', (['(variables[0][:] == mask_value_index_var)'], {}), '(variables[0][:] == mask_value_index_var)\n', (18664, 18705), True, 'import numpy as np\n'), ((43563, 43597), 'os.path.basename', 'os.path.basename', (['self.netcdf_path'], {}), '(self.netcdf_path)\n', (43579, 43597), False, 'import os\n'), ((46101, 46131), 'os.path.splitext', 'os.path.splitext', (['dat_out_path'], {}), '(dat_out_path)\n', (46117, 46131), False, 'import os\n'), ((46173, 46203), 'os.path.splitext', 'os.path.splitext', (['dat_out_path'], {}), '(dat_out_path)\n', (46189, 46203), False, 'import os\n'), ((46897, 46920), 'os.remove', 'os.remove', (['zip_out_path'], {}), '(zip_out_path)\n', (46906, 46920), False, 'import os\n'), ((50231, 50259), 'os.path.splitext', 'os.path.splitext', (['nc_in_path'], {}), '(nc_in_path)\n', (50247, 50259), False, 'import os\n'), ((12351, 12397), 'functools.reduce', 'reduce', (['(lambda x, y: x * y)', 'variable.shape[1:]'], {}), '(lambda x, y: x * y, variable.shape[1:])\n', (12357, 12397), False, 'from functools import reduce\n'), ((13598, 13632), 're.sub', 're.sub', (['"""\\\\W+"""', '"""_"""', 'variable_name'], {}), "('\\\\W+', '_', variable_name)\n", (13604, 13632), False, 'import re\n'), ((16790, 16823), 'math.log10', 'log10', (['(self.ncpu.point_count / 50)'], {}), '(self.ncpu.point_count / 50)\n', (16795, 16823), False, 'from math import log10\n'), ((23980, 24038), 're.sub', 're.sub', (['"""(\\\\W|_)+"""', '""" """', "field_definition['variable_name']"], {}), "('(\\\\W|_)+', ' ', field_definition['variable_name'])\n", (23986, 24038), False, 'import re\n'), ((40167, 40200), 'sys.getsizeof', 'sys.getsizeof', (['encoded_bytestring'], {}), '(encoded_bytestring)\n', (40180, 40200), False, 'import sys\n'), ((48011, 48034), 'os.remove', 'os.remove', (['zip_out_path'], {}), '(zip_out_path)\n', (48020, 48034), False, 'import os\n'), ((50431, 50459), 'os.path.splitext', 'os.path.splitext', (['nc_in_path'], {}), '(nc_in_path)\n', (50447, 50459), False, 'import os\n'), ((8239, 8297), 're.match', 're.match', (['exclude_name_regex', 'variable_name', 're.IGNORECASE'], {}), '(exclude_name_regex, variable_name, re.IGNORECASE)\n', (8247, 8297), False, 'import re\n'), ((30133, 30167), 're.sub', 're.sub', (['"""[\\\\:\\\\,\\\\=]+"""', '""""""', 'projcs'], {}), "('[\\\\:\\\\,\\\\=]+', '', projcs)\n", (30139, 30167), False, 'import re\n'), ((43518, 43532), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (43530, 43532), False, 'from datetime import datetime\n'), ((46659, 46693), 'os.path.basename', 'os.path.basename', (['self.netcdf_path'], {}), '(self.netcdf_path)\n', (46675, 46693), False, 'import os\n'), ((24492, 24557), 're.match', 're.match', (['variable_attribute_regex', 'attribute_name', 're.IGNORECASE'], {}), '(variable_attribute_regex, attribute_name, re.IGNORECASE)\n', (24500, 24557), False, 'import re\n'), ((13117, 13154), 're.sub', 're.sub', (['"""(\\\\W|_)+"""', '""""""', 'variable_name'], {}), "('(\\\\W|_)+', '', variable_name)\n", (13123, 13154), False, 'import re\n'), ((14623, 14643), 'math.log10', 'log10', (['max_abs_value'], {}), '(max_abs_value)\n', (14628, 14643), False, 'from math import log10\n'), ((46540, 46554), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (46552, 46554), False, 'from datetime import datetime\n')] |
import numpy as np
import scipy.constants as scpct
def compute_bremzeff(Te=None, ne=None, zeff=None, lamb=None):
""" Return the bremsstrahlun spectral radiance at lamb
The plasma conditions are set by:
- Te (eV)
- ne (/m3)
- zeff (adim.)
The wavelength is set by the diagnostics
- lamb (m)
The vol. spectral emis. is returned in ph / (s.m3.sr.m)
The computation requires an intermediate : gff(Te, zeff)
"""
ktkeV = Te * 1.e-3
ktJ = Te * scpct.e
gff = 5.54 - (3.11-np.log(ktkeV))*(0.69-0.13/zeff)
Const = ((scpct.e**6/(scpct.h*scpct.c**3*(np.pi*scpct.epsilon_0)**3))
* np.sqrt(np.pi/(864.*scpct.m_e**3)))
hc = scpct.h*scpct.c
emis = Const/lamb * ne**2*zeff * np.exp(-hc/(lamb*ktJ)) * gff/np.sqrt(ktJ)
units = r'ph / (s.m3.sr.m)'
return emis, units
def compute_fangle(BR=None, BPhi=None, BZ=None, ne=None, lamb=None):
""" The the vector quantity to be integrated on LOS to get faraday angle
fangle = int_LOS ( abs(sca(quant, u_LOS)) )
Where:
quant = C * lamb**2 * ne * Bv
With:
- C = 2.615e-13 (1/T)
- ne (/m3)
- lamb (m)
- Bv (T)
The resulting faraday angle (after integration) will be in radians
"""
const = scpct.e**3 / (8.*scpct.pi**2
* scpct.epsilon_0 * scpct.m_e**2 * scpct.c**3)
quant = const * lamb**2 * ne * np.array([BR, BPhi, BZ])
units = r'rad / m'
return quant, units
| [
"numpy.exp",
"numpy.array",
"numpy.log",
"numpy.sqrt"
] | [((667, 708), 'numpy.sqrt', 'np.sqrt', (['(np.pi / (864.0 * scpct.m_e ** 3))'], {}), '(np.pi / (864.0 * scpct.m_e ** 3))\n', (674, 708), True, 'import numpy as np\n'), ((794, 806), 'numpy.sqrt', 'np.sqrt', (['ktJ'], {}), '(ktJ)\n', (801, 806), True, 'import numpy as np\n'), ((1441, 1465), 'numpy.array', 'np.array', (['[BR, BPhi, BZ]'], {}), '([BR, BPhi, BZ])\n', (1449, 1465), True, 'import numpy as np\n'), ((546, 559), 'numpy.log', 'np.log', (['ktkeV'], {}), '(ktkeV)\n', (552, 559), True, 'import numpy as np\n'), ((765, 791), 'numpy.exp', 'np.exp', (['(-hc / (lamb * ktJ))'], {}), '(-hc / (lamb * ktJ))\n', (771, 791), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 21 10:54:36 2022
@author: Oliver
"""
import math as m
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
def record_to_vec(df, i):
wave = df.iloc[i].wave[1:-1].split()
vec = df.iloc[i].vector[1:-1].split()
return np.array(vec).astype(float), np.array(wave).astype(float)
def cumulative_distance(pattern):
"""Calculate the cumulative distance traveled at each point in the pattern.
"""
dists = np.zeros(pattern.shape[0])
for i, x in enumerate(pattern):
if i:
segment_length = np.linalg.norm(x - pattern[i - 1])
dists[i] = dists[i - 1] + segment_length
else:
pass
return dists
def microns_into_pattern(x, pattern, scale):
"""Return the point coordinates of the location 'x' microns into the
provided pattern. Pattern assumed in units pixels, x in units microns.
"""
dists = cumulative_distance(pattern) / scale
idx_past = np.where(dists > x)[0].min() # first point further than x
x_rel = (x - dists[idx_past - 1]) / (dists[idx_past] - dists[idx_past - 1])
vec = pattern[idx_past] - pattern[idx_past - 1]
point = pattern[idx_past - 1] + x_rel * vec
angle = vec / np.linalg.norm(vec)
return point, np.arctan2(angle[1], angle[0])
def old_patchmaker(img, height, width, center_y, center_x, angle):
"""Courtesy user <NAME> at
https://stackoverflow.com/questions/49892205/extracting-patch-of-a-certain-size-at-certain-angle-with-given-center-from-an-im
"""
theta = angle/180*3.14
img_shape = np.shape(img)
# print(img_shape)
x, y = np.meshgrid(range(img_shape[1]), range(img_shape[0]))
rotatex = x[center_y-m.floor(height/2):center_y+m.floor(height/2),
center_x-m.floor(width/2):center_x+m.floor(width/2)]
rotatey = y[center_y-m.floor(height/2):center_y+m.floor(height/2),
center_x-m.floor(width/2):center_x+m.floor(width/2)]
coords = [rotatex.reshape((1, height*width))-center_x,
rotatey.reshape((1, height*width))-center_y]
coords = np.asarray(coords)
coords = coords.reshape(2, height*width)
roatemat = [[m.cos(theta), m.sin(theta)], [-m.sin(theta), m.cos(theta)]]
rotatedcoords = np.matmul(roatemat, coords)
patch = ndimage.map_coordinates(
img, [rotatedcoords[1]+center_y, rotatedcoords[0]+center_x], order=1, mode='nearest').reshape(height, width)
return patch
def patchmaker(img, height, width, center_y, center_x, angle):
new_height = np.abs(np.cos(angle) * height + np.sin(angle) * width)
new_width = np.abs(np.cos(angle) * width + np.sin(angle) * height)
max_vals = np.shape(img)
min_x = np.max((0, int(center_x - new_width / 2)))
max_x = np.min((max_vals[1], int(center_x + new_width / 2)))
min_y = np.max((0, int(center_y - new_height / 2)))
max_y = np.min((max_vals[0], int(center_y + new_height / 2)))
patch = img[min_y:max_y, min_x:max_x]
return patch
def align_pattern(csv, scale, theta, offset):
R = np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]])
# Import pattern and remove non-printed orders
pattern = np.genfromtxt(csv, delimiter="\t")
pattern = pattern[:, :2][pattern[:, -1] == 1]
# Affine transformations
pattern = np.matmul(pattern, scale * R)
pattern += np.array(offset)
return pattern
def list_points_on_pattern(pattern, start, spacing, point_count, scale):
points = [microns_into_pattern(
start + i * spacing, pattern, scale) for i in range(point_count)]
return points
def display_patch(im, point, angle, spacing, pitch, pattern, axs=None):
if axs is None:
_, axs = plt.subplots(1, 2)
axs[0].imshow(im)
axs[0].plot(pattern[:, 1], pattern[:, 0], '--r', linewidth=0.2)
# point, angle = points[point_idx]
axs[0].plot(point[1], point[0], '*r')
patch = patchmaker(im, spacing, pitch,
int(point[0]), int(point[1]), angle)
axs[1].imshow(patch)
# plt.show()
return axs
| [
"math.floor",
"numpy.where",
"numpy.asarray",
"scipy.ndimage.map_coordinates",
"math.cos",
"numpy.array",
"numpy.zeros",
"numpy.matmul",
"numpy.arctan2",
"numpy.cos",
"numpy.linalg.norm",
"numpy.sin",
"numpy.shape",
"math.sin",
"numpy.genfromtxt",
"matplotlib.pyplot.subplots"
] | [((500, 526), 'numpy.zeros', 'np.zeros', (['pattern.shape[0]'], {}), '(pattern.shape[0])\n', (508, 526), True, 'import numpy as np\n'), ((1616, 1629), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (1624, 1629), True, 'import numpy as np\n'), ((2129, 2147), 'numpy.asarray', 'np.asarray', (['coords'], {}), '(coords)\n', (2139, 2147), True, 'import numpy as np\n'), ((2290, 2317), 'numpy.matmul', 'np.matmul', (['roatemat', 'coords'], {}), '(roatemat, coords)\n', (2299, 2317), True, 'import numpy as np\n'), ((2712, 2725), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (2720, 2725), True, 'import numpy as np\n'), ((3245, 3279), 'numpy.genfromtxt', 'np.genfromtxt', (['csv'], {'delimiter': '"""\t"""'}), "(csv, delimiter='\\t')\n", (3258, 3279), True, 'import numpy as np\n'), ((3374, 3403), 'numpy.matmul', 'np.matmul', (['pattern', '(scale * R)'], {}), '(pattern, scale * R)\n', (3383, 3403), True, 'import numpy as np\n'), ((3419, 3435), 'numpy.array', 'np.array', (['offset'], {}), '(offset)\n', (3427, 3435), True, 'import numpy as np\n'), ((1266, 1285), 'numpy.linalg.norm', 'np.linalg.norm', (['vec'], {}), '(vec)\n', (1280, 1285), True, 'import numpy as np\n'), ((1304, 1334), 'numpy.arctan2', 'np.arctan2', (['angle[1]', 'angle[0]'], {}), '(angle[1], angle[0])\n', (1314, 1334), True, 'import numpy as np\n'), ((3770, 3788), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (3782, 3788), True, 'from matplotlib import pyplot as plt\n'), ((606, 640), 'numpy.linalg.norm', 'np.linalg.norm', (['(x - pattern[i - 1])'], {}), '(x - pattern[i - 1])\n', (620, 640), True, 'import numpy as np\n'), ((2210, 2222), 'math.cos', 'm.cos', (['theta'], {}), '(theta)\n', (2215, 2222), True, 'import math as m\n'), ((2224, 2236), 'math.sin', 'm.sin', (['theta'], {}), '(theta)\n', (2229, 2236), True, 'import math as m\n'), ((2255, 2267), 'math.cos', 'm.cos', (['theta'], {}), '(theta)\n', (2260, 2267), True, 'import math as m\n'), ((2330, 2447), 'scipy.ndimage.map_coordinates', 'ndimage.map_coordinates', (['img', '[rotatedcoords[1] + center_y, rotatedcoords[0] + center_x]'], {'order': '(1)', 'mode': '"""nearest"""'}), "(img, [rotatedcoords[1] + center_y, rotatedcoords[0] +\n center_x], order=1, mode='nearest')\n", (2353, 2447), False, 'from scipy import ndimage\n'), ((306, 319), 'numpy.array', 'np.array', (['vec'], {}), '(vec)\n', (314, 319), True, 'import numpy as np\n'), ((335, 349), 'numpy.array', 'np.array', (['wave'], {}), '(wave)\n', (343, 349), True, 'import numpy as np\n'), ((1009, 1028), 'numpy.where', 'np.where', (['(dists > x)'], {}), '(dists > x)\n', (1017, 1028), True, 'import numpy as np\n'), ((2241, 2253), 'math.sin', 'm.sin', (['theta'], {}), '(theta)\n', (2246, 2253), True, 'import math as m\n'), ((2578, 2591), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (2584, 2591), True, 'import numpy as np\n'), ((2603, 2616), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (2609, 2616), True, 'import numpy as np\n'), ((2649, 2662), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (2655, 2662), True, 'import numpy as np\n'), ((2673, 2686), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (2679, 2686), True, 'import numpy as np\n'), ((3095, 3108), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (3101, 3108), True, 'import numpy as np\n'), ((3146, 3159), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (3152, 3159), True, 'import numpy as np\n'), ((3162, 3175), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (3168, 3175), True, 'import numpy as np\n'), ((1743, 1762), 'math.floor', 'm.floor', (['(height / 2)'], {}), '(height / 2)\n', (1750, 1762), True, 'import math as m\n'), ((1770, 1789), 'math.floor', 'm.floor', (['(height / 2)'], {}), '(height / 2)\n', (1777, 1789), True, 'import math as m\n'), ((1814, 1832), 'math.floor', 'm.floor', (['(width / 2)'], {}), '(width / 2)\n', (1821, 1832), True, 'import math as m\n'), ((1840, 1858), 'math.floor', 'm.floor', (['(width / 2)'], {}), '(width / 2)\n', (1847, 1858), True, 'import math as m\n'), ((1883, 1902), 'math.floor', 'm.floor', (['(height / 2)'], {}), '(height / 2)\n', (1890, 1902), True, 'import math as m\n'), ((1910, 1929), 'math.floor', 'm.floor', (['(height / 2)'], {}), '(height / 2)\n', (1917, 1929), True, 'import math as m\n'), ((1954, 1972), 'math.floor', 'm.floor', (['(width / 2)'], {}), '(width / 2)\n', (1961, 1972), True, 'import math as m\n'), ((1980, 1998), 'math.floor', 'm.floor', (['(width / 2)'], {}), '(width / 2)\n', (1987, 1998), True, 'import math as m\n'), ((3111, 3124), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (3117, 3124), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns # noqa
from sklearn.base import BaseEstimator
from sklearn.exceptions import NotFittedError
from sklearn.utils.validation import check_is_fitted
class GaussianProcess(BaseEstimator):
"""
Fits a Gaussian Process regressor.
Parameters
----------
kernel : Gaus
Attributes
----------
kernel : ml.Kernel
Kernel function used to compute covariance matrix.
Examples
--------
Note
----
Most of code is taken from the following tutorial:
https://katbailey.github.io/post/gaussian-processes-for-dummies/.
"""
def __init__(self, kernel):
self.kernel = kernel
self.Xtrain_ = None
self.ytrain_ = None
def fit(self, X, y):
"""
Computes the Xtrain variance and stores as attribute. Also stores
Xtrain and ytrain as attributes.
Parameters
----------
X : np.ndarray, shape (-1, n)
Input.
y : np.array, shape (n)
Targets
Returns
-------
None
Note
----
Note the K matrix is:
K_11 K_21
K_21 K_22
"""
self.Xtrain_ = X
self.ytrain_ = y
# Compute Xtrain/Xtrain elements of covariance matrix (Xtrain variance)
K_11 = self.kernel.transform(self.Xtrain_, self.Xtrain_)
self.L_11_ = np.linalg.cholesky(K_11
+ 0.00005*np.eye(len(self.Xtrain_)))
def predict(self, Xtest, n_samples=1):
"""
Returns predictions for input data by returning the posterior mean (at
the test points) of the joint distribution of the training data Xtrain
and the test data Xtest.
High-level Intuition
--------------------
* Goal of is to learn distribution over possible "functions" f(x) = y.
* Compute the "difference" between the Xtrain data and the Xtest data.
* Compute the Xtrain covariance "feature weights" cov_fw s.t.
XtrainCovMatrix • cov_fw = ytrain
* Compute post. mean by mult. cov_fw by the Xtrain/Xtest "difference":
mu = cov_fw • XtrainXtestCovDiff
Parameters
----------
Xtest : np.array
Input data.
Returns
-------
np.array, length len(Xtest)
Predictions which are the posterior mean of the joint distribution
of the training data and the test data.
Note
----
Note the K matrix is:
K_11 K_21
K_21 K_22
"""
'''Compute the posterior mean at test points.'''
if not self._is_fitted():
raise NotFittedError()
mu, L_12 = self._compute_mean_and_non_diag_covariance(Xtest)
return mu
def sample(self, Xtest, n_samples=1, use_prior=False):
"""
Returns predictions for input data by returning samples from the either
the prior or the posterior of the joint distribution of the training
data Xtrain and the test data Xtest.
If the model is not yet fitted or use_prior=True, then samples from
prior are returned. Otherwise, samples are taken from the posterior.
Parameters
----------
Xtest : np.array
Input data.
n_samples : int, default 1
Number of samples (predictions) to return.
use_prior : bool, default False
Whether or not to sample from the prior distribution. If true,
posterior is used.
Returns
-------
np.ndarray, shape (len(Xtest), n_samples)
Predictions which are samples drawn from the joint distribution of
the training data and the test data.
"""
ntest = len(Xtest)
# Compute Xtest covariance and its decomposition (sqroot)
K_22 = self.kernel.transform(Xtest, Xtest)
L_22 = np.linalg.cholesky(K_22 + 1e-15*np.eye(ntest))
if use_prior or not self._is_fitted():
# Sample n_samples sets of standard normals for our test points,
# then multiply them by the square root of the Xtest covariance.
f_prior = np.dot(L_22, np.random.normal(size=(ntest, n_samples)))
return f_prior
# Compute mean and non-diagonal (Xtrain/Xtest) elements of cov. matrix
mu, L_12 = self._compute_mean_and_non_diag_covariance(Xtest)
# Compute sqroot of entire covariance matrix
L = np.linalg.cholesky(K_22 + 1e-6*np.eye(ntest) - np.dot(L_12.T,
L_12))
# Sample n_samples sets of standard normals for our test points, then
# multiply them by the square root of the covariance matrix.
f_post = mu.reshape(-1, 1) + np.dot(L, np.random.normal(
size=(ntest, n_samples)))
return f_post
def plot_prior_samples(self, Xtest, n_samples=1):
"""
Plots samples of the prior (defined by kernel) at the test points.
Parameters
----------
Xtest : np.array
Input data.
n_samples : int, default 1
Number of samples (predictions) to return.
Returns
-------
fig : matplotlib.figure.Figure
Plotted figure
axes : tuple<matplotlib.axes._subplots.AxesSubplot>
Axes used for plotting.
"""
f_prior = self.sample(Xtest, n_samples, use_prior=True)
# Now let's plot the sampled functions.
fig, ax = plt.subplots(1, 1, figsize=(7, 4))
# Sort values for better plotting
sort_idx = np.argsort(Xtest.flatten())
Xtest_sorted = np.take_along_axis(Xtest.flatten(), sort_idx, axis=0)
for sample in range(n_samples):
f_prior_sorted = np.take_along_axis(f_prior[:, sample], sort_idx,
axis=0)
ax.plot(Xtest_sorted, f_prior_sorted,
label='Prior Sample {} (predictions)'.format(sample))
ax.set_title('{} samples from the GP prior'.format(n_samples))
ax.legend()
plt.show()
return fig, (ax)
def plot_posterior_samples(self, Xtest, n_samples=1):
"""
Plots samples of the posterior at the test points.
Parameters
----------
Xtest : np.array
Input data.
n_samples : int, default 1
Number of samples (predictions) to return.
Returns
-------
fig : matplotlib.figure.Figure
Plotted figure
axes : tuple<matplotlib.axes._subplots.AxesSubplot>
Axes used for plotting.
Note
----
Model instance must be fitted prior to calling this method.
"""
# Compute mean and non-diagonal (Xtrain/Xtest) elements of cov. matrix
mu, L_12 = self._compute_mean_and_non_diag_covariance(Xtest)
# Compute Xtest covariance
K_22 = self.kernel.transform(Xtest, Xtest)
# Compute the standard deviation so we can plot it
s2 = np.diag(K_22) - np.sum(L_12**2, axis=0)
stdv = np.sqrt(s2)
# Create figure and axis for plotting
fig, ax = plt.subplots(1, 1, figsize=(10, 6))
# Sort values for better plotting
sort_idx = np.argsort(Xtest.flatten())
Xtest_sorted = np.take_along_axis(Xtest.flatten(), sort_idx, axis=0)
mu_sorted = np.take_along_axis(mu, sort_idx, axis=0)
# Plot training data points
ax.plot(self.Xtrain_, self.ytrain_, 'bs', ms=8, label='Train')
# Plot posterior mean
ax.plot(Xtest_sorted, mu_sorted, '--r',
label='Posterior $\mu$') # noqa
# Sample from posterior
f_post = self.sample(Xtest, n_samples)
# Plot sampled functions
for sample in range(n_samples):
f_post_sorted = np.take_along_axis(f_post[:, sample], sort_idx,
axis=0)
ax.plot(Xtest_sorted, f_post_sorted,
label='Posterior Sample {} (predictions)'.format(sample))
# Plot standard deviation
plt.gca().fill_between(Xtest_sorted, mu_sorted-2*stdv,
mu_sorted+2*stdv, color='#999999', alpha=.4)
ax.set_title('{} samples from the GP posterior'.format(n_samples))
ax.legend()
plt.show()
return fig, (ax)
def _is_fitted(self):
"""
Helper method to check if instance is fitted or not.
Parameters
----------
N/A
Returns
-------
bool
True if model is fitted, otherwise false.
"""
try:
check_is_fitted(self, ['Xtrain_', 'ytrain_', 'L_11_'])
return True
except NotFittedError:
return False
def _compute_mean_and_non_diag_covariance(self, Xtest):
"""
Computes Xtrain/Xtest covariance and the mean of the joint train/test
posterior.
Parameters
----------
Xtest : np.array
Input data
Returns
-------
np.array, shape (len(Xtest))
Posterior mean.
np.ndarray, shape TODO
Xtrain/Xtest covariance.
"""
ntest = len(Xtest)
# Compute Xtrain/Xtest elements of covariance metrix
K_12 = self.kernel.transform(self.Xtrain_, Xtest)
# Compute the "difference" between the Xtrain data and the Xtest data.
# L_11_ is the sqroot of Xtrain Covariance.
# K_12 is the covariance of Xtrain and Xtest
# Therefore, L_12 is the matrix that solves: (L_11)(L_12) = K_12.
L_12 = np.linalg.solve(self.L_11_, K_12)
# Compute the Xtrain covariance "feature weighs".
# np.linalg.solve returns x in Ax=B, where A = L_11 and B = y_train
# We can interpret x as the feature weights. In other words, this
# step returns the feature weights where the inputs is the
# Xtrain/Xtrain covariance matrix elements.
cov_fw = np.linalg.solve(self.L_11_, self.ytrain_).reshape(ntest,)
# Obtain the posterior mean by multiplying the cov_fw by the
# "difference" b/w Xtrain and Xtest.
# L12 is the "difference" b/w Xtrain/Xtest covariances.
# cov_fw are the weights that produce ytrain when multiplied by
# Xtrain.
mu = np.dot(L_12.T, cov_fw)
return mu, L_12
| [
"sklearn.utils.validation.check_is_fitted",
"numpy.random.normal",
"numpy.eye",
"numpy.linalg.solve",
"numpy.sqrt",
"sklearn.exceptions.NotFittedError",
"matplotlib.pyplot.gca",
"numpy.diag",
"numpy.sum",
"numpy.dot",
"numpy.take_along_axis",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.... | [((5643, 5677), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(7, 4)'}), '(1, 1, figsize=(7, 4))\n', (5655, 5677), True, 'import matplotlib.pyplot as plt\n'), ((6244, 6254), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6252, 6254), True, 'import matplotlib.pyplot as plt\n'), ((7254, 7265), 'numpy.sqrt', 'np.sqrt', (['s2'], {}), '(s2)\n', (7261, 7265), True, 'import numpy as np\n'), ((7331, 7366), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(10, 6)'}), '(1, 1, figsize=(10, 6))\n', (7343, 7366), True, 'import matplotlib.pyplot as plt\n'), ((7554, 7594), 'numpy.take_along_axis', 'np.take_along_axis', (['mu', 'sort_idx'], {'axis': '(0)'}), '(mu, sort_idx, axis=0)\n', (7572, 7594), True, 'import numpy as np\n'), ((8520, 8530), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8528, 8530), True, 'import matplotlib.pyplot as plt\n'), ((9848, 9881), 'numpy.linalg.solve', 'np.linalg.solve', (['self.L_11_', 'K_12'], {}), '(self.L_11_, K_12)\n', (9863, 9881), True, 'import numpy as np\n'), ((10601, 10623), 'numpy.dot', 'np.dot', (['L_12.T', 'cov_fw'], {}), '(L_12.T, cov_fw)\n', (10607, 10623), True, 'import numpy as np\n'), ((2765, 2781), 'sklearn.exceptions.NotFittedError', 'NotFittedError', ([], {}), '()\n', (2779, 2781), False, 'from sklearn.exceptions import NotFittedError\n'), ((5915, 5971), 'numpy.take_along_axis', 'np.take_along_axis', (['f_prior[:, sample]', 'sort_idx'], {'axis': '(0)'}), '(f_prior[:, sample], sort_idx, axis=0)\n', (5933, 5971), True, 'import numpy as np\n'), ((7199, 7212), 'numpy.diag', 'np.diag', (['K_22'], {}), '(K_22)\n', (7206, 7212), True, 'import numpy as np\n'), ((7215, 7240), 'numpy.sum', 'np.sum', (['(L_12 ** 2)'], {'axis': '(0)'}), '(L_12 ** 2, axis=0)\n', (7221, 7240), True, 'import numpy as np\n'), ((8013, 8068), 'numpy.take_along_axis', 'np.take_along_axis', (['f_post[:, sample]', 'sort_idx'], {'axis': '(0)'}), '(f_post[:, sample], sort_idx, axis=0)\n', (8031, 8068), True, 'import numpy as np\n'), ((8845, 8899), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['self', "['Xtrain_', 'ytrain_', 'L_11_']"], {}), "(self, ['Xtrain_', 'ytrain_', 'L_11_'])\n", (8860, 8899), False, 'from sklearn.utils.validation import check_is_fitted\n'), ((4290, 4331), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(ntest, n_samples)'}), '(size=(ntest, n_samples))\n', (4306, 4331), True, 'import numpy as np\n'), ((4623, 4643), 'numpy.dot', 'np.dot', (['L_12.T', 'L_12'], {}), '(L_12.T, L_12)\n', (4629, 4643), True, 'import numpy as np\n'), ((4906, 4947), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(ntest, n_samples)'}), '(size=(ntest, n_samples))\n', (4922, 4947), True, 'import numpy as np\n'), ((8286, 8295), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (8293, 8295), True, 'import matplotlib.pyplot as plt\n'), ((10243, 10284), 'numpy.linalg.solve', 'np.linalg.solve', (['self.L_11_', 'self.ytrain_'], {}), '(self.L_11_, self.ytrain_)\n', (10258, 10284), True, 'import numpy as np\n'), ((4038, 4051), 'numpy.eye', 'np.eye', (['ntest'], {}), '(ntest)\n', (4044, 4051), True, 'import numpy as np\n'), ((4607, 4620), 'numpy.eye', 'np.eye', (['ntest'], {}), '(ntest)\n', (4613, 4620), True, 'import numpy as np\n')] |
import torch
import h5py
import numpy as np
from pathlib import Path
from typing import List, Union, Optional
from torch.utils.data import Dataset
from torch_geometric.data import Data
from mdgraph.data.preprocess import aminoacid_int_to_onehot
PathLike = Union[str, Path]
class ContactMapDataset(Dataset):
"""
PyTorch Dataset class to load contact matrix data. Uses HDF5
files and only reads into memory what is necessary for one batch.
"""
def __init__(
self,
path: PathLike,
dataset_name: str,
scalar_dset_names: List[str],
num_node_features: Optional[int] = None,
node_feature_path: Optional[PathLike] = None,
split_ptc: float = 0.8,
split: str = "train",
seed: int = 333,
scalar_requires_grad: bool = False,
):
"""
Parameters
----------
path : PathLike
Path to h5 file containing contact matrices.
dataset_name : str
Path to contact maps in HDF5 file.
scalar_dset_names : List[str]
List of scalar dataset names inside HDF5 file to be passed
to training logs.
num_node_features : int
Number of node features.
split_ptc : float
Percentage of total data to be used as training set.
split : str
Either 'train' or 'valid', specifies whether this
dataset returns train or validation data.
seed : int
Seed for the RNG for the splitting. Make sure it is the
same for all workers reading from the same file.
scalar_requires_grad : bool
Sets requires_grad torch.Tensor parameter for scalars specified by
`scalar_dset_names`. Set to True, to use scalars for multi-task
learning. If scalars are only required for plotting, then set it as False.
"""
if split not in ("train", "valid"):
raise ValueError("Parameter split must be 'train' or 'valid'.")
if split_ptc < 0 or split_ptc > 1:
raise ValueError("Parameter split_ptc must satisfy 0 <= split_ptc <= 1.")
# HDF5 data params
self.file_path = str(path)
self.dataset_name = dataset_name
self.scalar_dset_names = scalar_dset_names
self._num_node_features = num_node_features
self._scalar_requires_grad = scalar_requires_grad
# get node features
if node_feature_path is not None:
self.labels = np.load(node_feature_path)
self.node_features = aminoacid_int_to_onehot(self.labels)
self.labels = torch.from_numpy(self.labels).to(torch.long)
self.node_features = torch.from_numpy(self.node_features).to(torch.float32)
else:
self.node_features, self.labels = None, None
# get lengths and paths
with self._open_h5_file() as f:
self.len = len(f[self.dataset_name])
# do splitting
self.split_ind = int(split_ptc * self.len)
self.split = split
split_rng = np.random.default_rng(seed)
self.indices = split_rng.permutation(list(range(self.len)))
if self.split == "train":
self.indices = sorted(self.indices[: self.split_ind])
else:
self.indices = sorted(self.indices[self.split_ind :])
# inited:
self._initialized = False
def _open_h5_file(self):
return h5py.File(self.file_path, "r", libver="latest", swmr=False)
def __len__(self):
return len(self.indices)
def __getitem__(self, idx):
# Only happens once. Need to open h5 file in current process
if not self._initialized:
self._h5_file = self._open_h5_file()
self.dset = self._h5_file[self.dataset_name]
# Load scalar dsets
self.scalar_dsets = {
name: self._h5_file[name] for name in self.scalar_dset_names
}
self._initialized = True
# get real index
index = self.indices[idx]
# Get adjacency list
edge_index = self.dset[index, ...].reshape(2, -1) # [2, num_edges]
edge_index = torch.from_numpy(edge_index).to(torch.long)
# node features (contast all ones)
if self.node_features is None:
num_nodes = int(edge_index.max().item()) + 1
x = torch.ones((num_nodes, self._num_node_features))
y = None
else:
x = self.node_features
y = self.labels
# Great graph data object
data = Data(x=x, edge_index=edge_index, y=y)
sample = {"X": data}
# Add index into dataset to sample
sample["index"] = torch.tensor(index, requires_grad=False)
# Add scalars
for name, dset in self.scalar_dsets.items():
sample[name] = torch.tensor(
dset[index], requires_grad=self._scalar_requires_grad
)
return sample
| [
"numpy.random.default_rng",
"mdgraph.data.preprocess.aminoacid_int_to_onehot",
"torch.from_numpy",
"h5py.File",
"torch.tensor",
"numpy.load",
"torch_geometric.data.Data",
"torch.ones"
] | [((3085, 3112), 'numpy.random.default_rng', 'np.random.default_rng', (['seed'], {}), '(seed)\n', (3106, 3112), True, 'import numpy as np\n'), ((3459, 3518), 'h5py.File', 'h5py.File', (['self.file_path', '"""r"""'], {'libver': '"""latest"""', 'swmr': '(False)'}), "(self.file_path, 'r', libver='latest', swmr=False)\n", (3468, 3518), False, 'import h5py\n'), ((4598, 4635), 'torch_geometric.data.Data', 'Data', ([], {'x': 'x', 'edge_index': 'edge_index', 'y': 'y'}), '(x=x, edge_index=edge_index, y=y)\n', (4602, 4635), False, 'from torch_geometric.data import Data\n'), ((4735, 4775), 'torch.tensor', 'torch.tensor', (['index'], {'requires_grad': '(False)'}), '(index, requires_grad=False)\n', (4747, 4775), False, 'import torch\n'), ((2514, 2540), 'numpy.load', 'np.load', (['node_feature_path'], {}), '(node_feature_path)\n', (2521, 2540), True, 'import numpy as np\n'), ((2574, 2610), 'mdgraph.data.preprocess.aminoacid_int_to_onehot', 'aminoacid_int_to_onehot', (['self.labels'], {}), '(self.labels)\n', (2597, 2610), False, 'from mdgraph.data.preprocess import aminoacid_int_to_onehot\n'), ((4401, 4449), 'torch.ones', 'torch.ones', (['(num_nodes, self._num_node_features)'], {}), '((num_nodes, self._num_node_features))\n', (4411, 4449), False, 'import torch\n'), ((4878, 4945), 'torch.tensor', 'torch.tensor', (['dset[index]'], {'requires_grad': 'self._scalar_requires_grad'}), '(dset[index], requires_grad=self._scalar_requires_grad)\n', (4890, 4945), False, 'import torch\n'), ((4201, 4229), 'torch.from_numpy', 'torch.from_numpy', (['edge_index'], {}), '(edge_index)\n', (4217, 4229), False, 'import torch\n'), ((2637, 2666), 'torch.from_numpy', 'torch.from_numpy', (['self.labels'], {}), '(self.labels)\n', (2653, 2666), False, 'import torch\n'), ((2715, 2751), 'torch.from_numpy', 'torch.from_numpy', (['self.node_features'], {}), '(self.node_features)\n', (2731, 2751), False, 'import torch\n')] |
# coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Generate stl10 like files, smaller and with random data.
"""
import os
from absl import app
from absl import flags
import numpy as np
from tensorflow_datasets.core.utils import py_utils
from tensorflow_datasets.testing import test_utils
HEIGHT, WIDTH = (96, 96)
NUMBER_LABELS = 10
flags.DEFINE_string("tfds_dir", py_utils.tfds_dir(),
"Path to tensorflow_datasets directory")
FLAGS = flags.FLAGS
def stl_output_dir():
return os.path.join(FLAGS.tfds_dir, "testing", "test_data", "fake_examples",
"stl10", "stl10_binary")
def dump(output_dir, fname, data):
path = os.path.join(output_dir, fname)
print("Writing %s..." % path)
with open(path, "wb") as out_file:
out_file.write(data.tobytes())
def _generate_stl10_data():
"""Generates .bin files for stl10."""
output_dir = stl_output_dir()
test_utils.remake_dir(output_dir)
for fname in ["train_y.bin", "test_y.bin"]:
labels = np.random.randint(
NUMBER_LABELS, size=(1), dtype=np.uint8)
dump(stl_output_dir(), fname, labels)
for fname in ["train_X.bin", "test_X.bin", "unlabeled_X.bin"]:
images = np.random.randint(
256, size=(1, HEIGHT * WIDTH * 3), dtype=np.uint8)
dump(stl_output_dir(), fname, images)
label_names = [
"airplane", "bird", "car", "cat", "deer", "dog", "horse", "monkey",
"ship", "truck"
]
with open(os.path.join(output_dir, "class_names.txt"), "w") as f:
f.write("\n".join(label_names))
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
_generate_stl10_data()
if __name__ == "__main__":
app.run(main)
| [
"absl.app.UsageError",
"tensorflow_datasets.core.utils.py_utils.tfds_dir",
"os.path.join",
"absl.app.run",
"numpy.random.randint",
"tensorflow_datasets.testing.test_utils.remake_dir"
] | [((935, 954), 'tensorflow_datasets.core.utils.py_utils.tfds_dir', 'py_utils.tfds_dir', ([], {}), '()\n', (952, 954), False, 'from tensorflow_datasets.core.utils import py_utils\n'), ((1070, 1168), 'os.path.join', 'os.path.join', (['FLAGS.tfds_dir', '"""testing"""', '"""test_data"""', '"""fake_examples"""', '"""stl10"""', '"""stl10_binary"""'], {}), "(FLAGS.tfds_dir, 'testing', 'test_data', 'fake_examples',\n 'stl10', 'stl10_binary')\n", (1082, 1168), False, 'import os\n'), ((1233, 1264), 'os.path.join', 'os.path.join', (['output_dir', 'fname'], {}), '(output_dir, fname)\n', (1245, 1264), False, 'import os\n'), ((1473, 1506), 'tensorflow_datasets.testing.test_utils.remake_dir', 'test_utils.remake_dir', (['output_dir'], {}), '(output_dir)\n', (1494, 1506), False, 'from tensorflow_datasets.testing import test_utils\n'), ((2252, 2265), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (2259, 2265), False, 'from absl import app\n'), ((1566, 1622), 'numpy.random.randint', 'np.random.randint', (['NUMBER_LABELS'], {'size': '(1)', 'dtype': 'np.uint8'}), '(NUMBER_LABELS, size=1, dtype=np.uint8)\n', (1583, 1622), True, 'import numpy as np\n'), ((1755, 1823), 'numpy.random.randint', 'np.random.randint', (['(256)'], {'size': '(1, HEIGHT * WIDTH * 3)', 'dtype': 'np.uint8'}), '(256, size=(1, HEIGHT * WIDTH * 3), dtype=np.uint8)\n', (1772, 1823), True, 'import numpy as np\n'), ((2145, 2195), 'absl.app.UsageError', 'app.UsageError', (['"""Too many command-line arguments."""'], {}), "('Too many command-line arguments.')\n", (2159, 2195), False, 'from absl import app\n'), ((2005, 2048), 'os.path.join', 'os.path.join', (['output_dir', '"""class_names.txt"""'], {}), "(output_dir, 'class_names.txt')\n", (2017, 2048), False, 'import os\n')] |
#########################################################################################################
################ Environment class for La Robo Liga ################
################ DO NOT change any piece of code in here ################
#########################################################################################################
import gym
from gym import error, spaces, utils
from gym.utils import seeding
import pybullet as p
import pybullet_data
import cv2
import numpy as np
import random
from os.path import normpath, basename
import time
class LaRoboLigaPs2Arena(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self,ball_locations = None,husky_pos = None,husky_orn = None):
"""
Class contructor
-Opens up the Simulation
-Loads the arena
Arguments:
ball locations -
A dictionary with initial co-ordinates of the Colored balls of color 'red', 'yellow', 'blue' and 'purple'.
The co-ordinates must be in form of a python list or a numpy array.
husky_pos-
Position of husky in 3D space
husky_orn-
Orientation of the husky in 3D spce expressed expressed in Quatrnions
"""
p.connect(p.GUI)
p.setAdditionalSearchPath(pybullet_data.getDataPath())
p.setGravity(0,0,-10)
p.configureDebugVisualizer(p.COV_ENABLE_SHADOWS,0)
p.configureDebugVisualizer(p.COV_ENABLE_WIREFRAME,0)
if ball_locations is not None:
self.color_balls_location = ball_locations
else:
self.color_balls_location = dict({
'red' : [6,0,1.5],
'yellow' : [0,6,1.5],
'blue' : [-6,0,1.5],
'purple' : [0,-6,1.5]
})
if husky_pos is None:
self.husky_pos = [0,0,0.3]
else:
self.husky_pos = husky_pos
if husky_orn is None:
self.husky_orn = p.getQuaternionFromEuler([0,0,np.pi])
else:
self.husky_orn = husky_orn
self.husky = None
self.balls = list()
self.__load_arena()
self.load_balls()
self.spawn_husky()
self._width = 600
self._height = 600
for i in range(100): # running the Simulatin for short period to let Everything settle
p.stepSimulation()
time.sleep(1./240.)
def move_husky(self,leftfront,rightfront,leftback,rightback):
"""
Function to move the husky
--------------------------
Arguments:
leftFrontWheel - Velocity of the front left wheel
rightFrontWheel - Velocity of the front right wheel
leftRearWheel - Velocity of the rear left wheel
rightRearWheel - Velocity of the rear right wheel
Return Values:
None
"""
vels = [leftfront,-rightfront,leftback,-rightback]
p.setJointMotorControlArray(
bodyIndex = self.husky,
jointIndices = [0,1,2,3],
controlMode = p.VELOCITY_CONTROL,
targetVelocities = vels
)
def open_husky_gripper(self):
"""
Function to open the grippers attached in front of husky
Arguments :
None
Returns :
None
"""
for i in range(100):
p.setJointMotorControl2(self.husky,5,p.POSITION_CONTROL,targetPosition = np.pi/2)
p.setJointMotorControl2(self.husky,6,p.POSITION_CONTROL,targetPosition = -np.pi/2)
p.stepSimulation()
time.sleep(1./240.)
def close_husky_gripper(self):
"""
Function to close the grippers attached in front of husky
Arguments :
None
Returns :
None
"""
for i in range(100):
p.setJointMotorControl2(self.husky,5,p.POSITION_CONTROL,targetPosition = 0)
p.setJointMotorControl2(self.husky,6,p.POSITION_CONTROL,targetPosition = 0)
p.stepSimulation()
time.sleep(1./240.)
def get_camera_image(self):
"""
Function to get camera feed from the onboard camera on husky.
Arguments:
None
Return Values:
numpy array of BGR values
"""
fov = 60
aspect = self._width / self._height
near = 0.02
far = 50
orn = p.getEulerFromQuaternion(p.getBasePositionAndOrientation(self.husky)[1])
pos = p.getBasePositionAndOrientation(self.husky)[0]
camera_eye = [pos[0]+0.4*np.cos(orn[2]),pos[1]+0.4*np.sin(orn[2]),pos[2]+1.15*np.cos(orn[0])]
target_pos = [pos[0]-2*np.cos(orn[2]),pos[1]-2*np.sin(orn[2]),pos[2]+1.15*np.cos(orn[0])]
view_matrix = p.computeViewMatrix(camera_eye, target_pos, [0, 0, 1])
projection_matrix = p.computeProjectionMatrixFOV(fov, aspect, near, far)
images = p.getCameraImage(
self._width,
self._height,
view_matrix,
projection_matrix,
shadow=True,
renderer=p.ER_BULLET_HARDWARE_OPENGL
)
rgba_opengl = np.reshape(images[2], (self._height, self._width, 4))
rgba_opengl = np.uint8(rgba_opengl)
bgr = cv2.cvtColor(rgba_opengl[:,:,0:3],cv2.COLOR_BGR2RGB)
return bgr
def load_balls(self):
"""
Function to load the Colored balls in the arena
Arguments:
custom_ball_locations:
A dictionary with initial co-ordinates of the Colored balls of color 'red', 'yellow', 'blue' and 'purple'.
The co-ordinates must be in form of a python list or a numpy array.
returns:
None
"""
for color in self.color_balls_location:
id = p.loadURDF('rsc/Balls/ball_'+color+'.urdf',self.color_balls_location[color])
p.changeDynamics(id,-1,lateralFriction=0.1,rollingFriction=0.1)
self.balls.append(id)
def spawn_husky(self):
self.husky = p.loadURDF('rsc/car_with_gripper/urdf/car_with_gripper.urdf',self.husky_pos,self.husky_orn)
def __load_arena(self):
"""
Function to load the arena
Arguments:
None
returns:
None
"""
p.loadURDF('rsc/arena/urdf/arena.urdf',useFixedBase = 1)
def reset_arena(self):
"""
Function to reset the postions of the husky and the balls in arena
Arguments:
None
returns:
None
"""
p.removeBody(self.husky)
for id in self.balls:
p.removeBody(id)
self.balls.clear()
self.spawn_husky()
self.load_balls()
| [
"numpy.uint8",
"pybullet.computeViewMatrix",
"pybullet_data.getDataPath",
"pybullet.setGravity",
"time.sleep",
"numpy.sin",
"numpy.reshape",
"pybullet.connect",
"pybullet.getCameraImage",
"pybullet.getQuaternionFromEuler",
"pybullet.removeBody",
"pybullet.configureDebugVisualizer",
"pybullet... | [((1499, 1515), 'pybullet.connect', 'p.connect', (['p.GUI'], {}), '(p.GUI)\n', (1508, 1515), True, 'import pybullet as p\n'), ((1589, 1612), 'pybullet.setGravity', 'p.setGravity', (['(0)', '(0)', '(-10)'], {}), '(0, 0, -10)\n', (1601, 1612), True, 'import pybullet as p\n'), ((1620, 1671), 'pybullet.configureDebugVisualizer', 'p.configureDebugVisualizer', (['p.COV_ENABLE_SHADOWS', '(0)'], {}), '(p.COV_ENABLE_SHADOWS, 0)\n', (1646, 1671), True, 'import pybullet as p\n'), ((1680, 1733), 'pybullet.configureDebugVisualizer', 'p.configureDebugVisualizer', (['p.COV_ENABLE_WIREFRAME', '(0)'], {}), '(p.COV_ENABLE_WIREFRAME, 0)\n', (1706, 1733), True, 'import pybullet as p\n'), ((3418, 3553), 'pybullet.setJointMotorControlArray', 'p.setJointMotorControlArray', ([], {'bodyIndex': 'self.husky', 'jointIndices': '[0, 1, 2, 3]', 'controlMode': 'p.VELOCITY_CONTROL', 'targetVelocities': 'vels'}), '(bodyIndex=self.husky, jointIndices=[0, 1, 2, 3],\n controlMode=p.VELOCITY_CONTROL, targetVelocities=vels)\n', (3445, 3553), True, 'import pybullet as p\n'), ((5485, 5539), 'pybullet.computeViewMatrix', 'p.computeViewMatrix', (['camera_eye', 'target_pos', '[0, 0, 1]'], {}), '(camera_eye, target_pos, [0, 0, 1])\n', (5504, 5539), True, 'import pybullet as p\n'), ((5569, 5621), 'pybullet.computeProjectionMatrixFOV', 'p.computeProjectionMatrixFOV', (['fov', 'aspect', 'near', 'far'], {}), '(fov, aspect, near, far)\n', (5597, 5621), True, 'import pybullet as p\n'), ((5642, 5772), 'pybullet.getCameraImage', 'p.getCameraImage', (['self._width', 'self._height', 'view_matrix', 'projection_matrix'], {'shadow': '(True)', 'renderer': 'p.ER_BULLET_HARDWARE_OPENGL'}), '(self._width, self._height, view_matrix, projection_matrix,\n shadow=True, renderer=p.ER_BULLET_HARDWARE_OPENGL)\n', (5658, 5772), True, 'import pybullet as p\n'), ((5993, 6046), 'numpy.reshape', 'np.reshape', (['images[2]', '(self._height, self._width, 4)'], {}), '(images[2], (self._height, self._width, 4))\n', (6003, 6046), True, 'import numpy as np\n'), ((6070, 6091), 'numpy.uint8', 'np.uint8', (['rgba_opengl'], {}), '(rgba_opengl)\n', (6078, 6091), True, 'import numpy as np\n'), ((6107, 6162), 'cv2.cvtColor', 'cv2.cvtColor', (['rgba_opengl[:, :, 0:3]', 'cv2.COLOR_BGR2RGB'], {}), '(rgba_opengl[:, :, 0:3], cv2.COLOR_BGR2RGB)\n', (6119, 6162), False, 'import cv2\n'), ((6961, 7059), 'pybullet.loadURDF', 'p.loadURDF', (['"""rsc/car_with_gripper/urdf/car_with_gripper.urdf"""', 'self.husky_pos', 'self.husky_orn'], {}), "('rsc/car_with_gripper/urdf/car_with_gripper.urdf', self.\n husky_pos, self.husky_orn)\n", (6971, 7059), True, 'import pybullet as p\n'), ((7299, 7354), 'pybullet.loadURDF', 'p.loadURDF', (['"""rsc/arena/urdf/arena.urdf"""'], {'useFixedBase': '(1)'}), "('rsc/arena/urdf/arena.urdf', useFixedBase=1)\n", (7309, 7354), True, 'import pybullet as p\n'), ((7621, 7645), 'pybullet.removeBody', 'p.removeBody', (['self.husky'], {}), '(self.husky)\n', (7633, 7645), True, 'import pybullet as p\n'), ((1551, 1578), 'pybullet_data.getDataPath', 'pybullet_data.getDataPath', ([], {}), '()\n', (1576, 1578), False, 'import pybullet_data\n'), ((2290, 2329), 'pybullet.getQuaternionFromEuler', 'p.getQuaternionFromEuler', (['[0, 0, np.pi]'], {}), '([0, 0, np.pi])\n', (2314, 2329), True, 'import pybullet as p\n'), ((2735, 2753), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (2751, 2753), True, 'import pybullet as p\n'), ((2767, 2790), 'time.sleep', 'time.sleep', (['(1.0 / 240.0)'], {}), '(1.0 / 240.0)\n', (2777, 2790), False, 'import time\n'), ((3934, 4023), 'pybullet.setJointMotorControl2', 'p.setJointMotorControl2', (['self.husky', '(5)', 'p.POSITION_CONTROL'], {'targetPosition': '(np.pi / 2)'}), '(self.husky, 5, p.POSITION_CONTROL, targetPosition=\n np.pi / 2)\n', (3957, 4023), True, 'import pybullet as p\n'), ((4029, 4119), 'pybullet.setJointMotorControl2', 'p.setJointMotorControl2', (['self.husky', '(6)', 'p.POSITION_CONTROL'], {'targetPosition': '(-np.pi / 2)'}), '(self.husky, 6, p.POSITION_CONTROL, targetPosition=-\n np.pi / 2)\n', (4052, 4119), True, 'import pybullet as p\n'), ((4125, 4143), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (4141, 4143), True, 'import pybullet as p\n'), ((4157, 4180), 'time.sleep', 'time.sleep', (['(1.0 / 240.0)'], {}), '(1.0 / 240.0)\n', (4167, 4180), False, 'import time\n'), ((4492, 4568), 'pybullet.setJointMotorControl2', 'p.setJointMotorControl2', (['self.husky', '(5)', 'p.POSITION_CONTROL'], {'targetPosition': '(0)'}), '(self.husky, 5, p.POSITION_CONTROL, targetPosition=0)\n', (4515, 4568), True, 'import pybullet as p\n'), ((4581, 4657), 'pybullet.setJointMotorControl2', 'p.setJointMotorControl2', (['self.husky', '(6)', 'p.POSITION_CONTROL'], {'targetPosition': '(0)'}), '(self.husky, 6, p.POSITION_CONTROL, targetPosition=0)\n', (4604, 4657), True, 'import pybullet as p\n'), ((4683, 4701), 'pybullet.stepSimulation', 'p.stepSimulation', ([], {}), '()\n', (4699, 4701), True, 'import pybullet as p\n'), ((4715, 4738), 'time.sleep', 'time.sleep', (['(1.0 / 240.0)'], {}), '(1.0 / 240.0)\n', (4725, 4738), False, 'import time\n'), ((5213, 5256), 'pybullet.getBasePositionAndOrientation', 'p.getBasePositionAndOrientation', (['self.husky'], {}), '(self.husky)\n', (5244, 5256), True, 'import pybullet as p\n'), ((6720, 6806), 'pybullet.loadURDF', 'p.loadURDF', (["('rsc/Balls/ball_' + color + '.urdf')", 'self.color_balls_location[color]'], {}), "('rsc/Balls/ball_' + color + '.urdf', self.color_balls_location[\n color])\n", (6730, 6806), True, 'import pybullet as p\n'), ((6810, 6876), 'pybullet.changeDynamics', 'p.changeDynamics', (['id', '(-1)'], {'lateralFriction': '(0.1)', 'rollingFriction': '(0.1)'}), '(id, -1, lateralFriction=0.1, rollingFriction=0.1)\n', (6826, 6876), True, 'import pybullet as p\n'), ((7690, 7706), 'pybullet.removeBody', 'p.removeBody', (['id'], {}), '(id)\n', (7702, 7706), True, 'import pybullet as p\n'), ((5150, 5193), 'pybullet.getBasePositionAndOrientation', 'p.getBasePositionAndOrientation', (['self.husky'], {}), '(self.husky)\n', (5181, 5193), True, 'import pybullet as p\n'), ((5294, 5308), 'numpy.cos', 'np.cos', (['orn[2]'], {}), '(orn[2])\n', (5300, 5308), True, 'import numpy as np\n'), ((5320, 5334), 'numpy.sin', 'np.sin', (['orn[2]'], {}), '(orn[2])\n', (5326, 5334), True, 'import numpy as np\n'), ((5347, 5361), 'numpy.cos', 'np.cos', (['orn[0]'], {}), '(orn[0])\n', (5353, 5361), True, 'import numpy as np\n'), ((5395, 5409), 'numpy.cos', 'np.cos', (['orn[2]'], {}), '(orn[2])\n', (5401, 5409), True, 'import numpy as np\n'), ((5419, 5433), 'numpy.sin', 'np.sin', (['orn[2]'], {}), '(orn[2])\n', (5425, 5433), True, 'import numpy as np\n'), ((5446, 5460), 'numpy.cos', 'np.cos', (['orn[0]'], {}), '(orn[0])\n', (5452, 5460), True, 'import numpy as np\n')] |
# File: problem1.py
# Author: <NAME>
# Date: 11/06/2019
# Class: ECE 555 - Probability for Electrical Engineers
# Description:
# Write and run a program to simulate an M/E2/1 queue and obtain realizations of
# the four stochastic processes defined in Example 6.2. Plot these realizations. You
# may use a simulation language such as SIMULA or GPSS or you may use one
# of the standard high-level languages. You will have to generate random deviates
# of the interarrival time distribution (assume arrival rate λ = 1 per second) and
# the service time distribution (assume mean service time 0.8 s) using methods of
# Chapter 3.
import math
import queue
import numpy as np
import matplotlib.pyplot as plt
def getErlang2(u1, u2, l=1):
return (-np.log(u1)/(2*l)) + (-np.log(u2)/(2*l))
# u is a number drawn from a random uniform distribution (between 0 and 1)
# l is the rate of the exp distrbution
def getEXP(u,l=1):
return -np.log(u)/l
# Returns a list of all the values from a given list larger than a specific value
def getSmaller(list2Check, value):
return [x for x in list2Check if x <= value]
def getBetween(list2Check, small_val, big_val):
return [x for x in list2Check if x >= small_val and x <= big_val]
def addToQueue(list2Check, que):
# add to queue
if que.empty():
for value in list2Check:
que.put(value)
return que
# When queue has elementsd
for value in list2Check:
# add to queue
if value > que.queue[-1]:
que.put(value)
return que
def graph(x, xlabel, ylabel, title, isDiscrete=True):
if isDiscrete:
y = [i for i in range(len(x))]
plt.scatter(x,y)
else:
plt.plot(x)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.show()
return 0
#
# Arrival time = exponentially distributed
# Service time = erlang distribution
# Iterations = number of seconds
def simulate_M_E2_1(total_jobs=1000, arrival_rate=1, mean_service_time=0.8):
k = total_jobs # Total number of jobs
rand_samples1 = np.random.random_sample((k,))
rand_samples2 = np.random.random_sample((k,))
rand_samples3 = np.random.random_sample((k,))
arrival_queue = queue.Queue(maxsize=k-1)
service_queue = queue.Queue(maxsize=1)
arrival_times = np.zeros(k)
continuous_arrivals = np.zeros(k)
service_times = np.zeros(k)
continuous_service = np.zeros(k)
N_k = np.zeros(k+1)
exit_times = np.copy(N_k) # related to X_t and Y_t
W_n = np.zeros(k+1)
#DISCRETE = True
CONTINUOUS = False
# Setting rates
for i,_ in enumerate(arrival_times):
arrival_times[i] = getEXP(rand_samples1[i])
service_times[i] = getErlang2(rand_samples2[i], rand_samples3[i])
if i == 0:
continuous_arrivals[i] = arrival_times[i]
continuous_service[i] = service_times[i] + arrival_times[i]
else:
continuous_arrivals[i] = arrival_times[i] + continuous_arrivals[i-1]
continuous_service[i] = service_times[i] + continuous_service[i-1] #+ continuous_arrivals[i-1]
# Because a service cannot be serviced before it arrives
time_diff = continuous_service[i] - continuous_arrivals[i]
if time_diff < 0:
continuous_service[i] += -time_diff
# Figure 6.3
for index, k in enumerate(continuous_service):
service_queue.put(k)
possible_arrivals = getSmaller(continuous_arrivals,k)
addToQueue(possible_arrivals, arrival_queue)
N_k[index+1] = arrival_queue.qsize()
exit_times[index+1] = service_queue.get()
if not arrival_queue.empty():
arrival_queue.get()
graph(N_k,"k","Nk", "Figure 6.3")
# Figure 6.4
final_time = int( np.ceil(exit_times[-1]) )
X_t = []
for t in range(final_time):
# Get the index of times less than current time
result = np.where(exit_times <= t)
target_index = result[0][-1]
target_val = N_k[target_index]
X_t.append(target_val)
graph(X_t,"t","X(t)", "Figure 6.4", CONTINUOUS)
# Figure 6.5
W_n = continuous_service - continuous_arrivals
graph(W_n,"k","Wn", "Figure 6.5")
# Figure 6.6
Y_t = []
for t in range(final_time):
# Get the index of times less than current time
result = np.where(exit_times <= t)
target_index = result[0][-1]
target_val = W_n[target_index]
Y_t.append(target_val)
graph(Y_t,"t","Y(t)", "Figure 6.6", CONTINUOUS)
return 0
def main():
simulate_M_E2_1(25)
return 0
main() | [
"numpy.copy",
"numpy.ceil",
"numpy.random.random_sample",
"matplotlib.pyplot.ylabel",
"numpy.where",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.log",
"numpy.zeros",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"queue.Queue",
"matplotlib.pyplot.show"
] | [((1629, 1645), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (1638, 1645), True, 'import matplotlib.pyplot as plt\n'), ((1647, 1665), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (1657, 1665), True, 'import matplotlib.pyplot as plt\n'), ((1667, 1685), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (1677, 1685), True, 'import matplotlib.pyplot as plt\n'), ((1687, 1697), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1695, 1697), True, 'import matplotlib.pyplot as plt\n'), ((1959, 1988), 'numpy.random.random_sample', 'np.random.random_sample', (['(k,)'], {}), '((k,))\n', (1982, 1988), True, 'import numpy as np\n'), ((2006, 2035), 'numpy.random.random_sample', 'np.random.random_sample', (['(k,)'], {}), '((k,))\n', (2029, 2035), True, 'import numpy as np\n'), ((2053, 2082), 'numpy.random.random_sample', 'np.random.random_sample', (['(k,)'], {}), '((k,))\n', (2076, 2082), True, 'import numpy as np\n'), ((2101, 2127), 'queue.Queue', 'queue.Queue', ([], {'maxsize': '(k - 1)'}), '(maxsize=k - 1)\n', (2112, 2127), False, 'import queue\n'), ((2143, 2165), 'queue.Queue', 'queue.Queue', ([], {'maxsize': '(1)'}), '(maxsize=1)\n', (2154, 2165), False, 'import queue\n'), ((2184, 2195), 'numpy.zeros', 'np.zeros', (['k'], {}), '(k)\n', (2192, 2195), True, 'import numpy as np\n'), ((2219, 2230), 'numpy.zeros', 'np.zeros', (['k'], {}), '(k)\n', (2227, 2230), True, 'import numpy as np\n'), ((2248, 2259), 'numpy.zeros', 'np.zeros', (['k'], {}), '(k)\n', (2256, 2259), True, 'import numpy as np\n'), ((2282, 2293), 'numpy.zeros', 'np.zeros', (['k'], {}), '(k)\n', (2290, 2293), True, 'import numpy as np\n'), ((2302, 2317), 'numpy.zeros', 'np.zeros', (['(k + 1)'], {}), '(k + 1)\n', (2310, 2317), True, 'import numpy as np\n'), ((2330, 2342), 'numpy.copy', 'np.copy', (['N_k'], {}), '(N_k)\n', (2337, 2342), True, 'import numpy as np\n'), ((2375, 2390), 'numpy.zeros', 'np.zeros', (['(k + 1)'], {}), '(k + 1)\n', (2383, 2390), True, 'import numpy as np\n'), ((1590, 1607), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {}), '(x, y)\n', (1601, 1607), True, 'import matplotlib.pyplot as plt\n'), ((1616, 1627), 'matplotlib.pyplot.plot', 'plt.plot', (['x'], {}), '(x)\n', (1624, 1627), True, 'import matplotlib.pyplot as plt\n'), ((3484, 3507), 'numpy.ceil', 'np.ceil', (['exit_times[-1]'], {}), '(exit_times[-1])\n', (3491, 3507), True, 'import numpy as np\n'), ((3611, 3636), 'numpy.where', 'np.where', (['(exit_times <= t)'], {}), '(exit_times <= t)\n', (3619, 3636), True, 'import numpy as np\n'), ((3990, 4015), 'numpy.where', 'np.where', (['(exit_times <= t)'], {}), '(exit_times <= t)\n', (3998, 4015), True, 'import numpy as np\n'), ((934, 943), 'numpy.log', 'np.log', (['u'], {}), '(u)\n', (940, 943), True, 'import numpy as np\n'), ((751, 761), 'numpy.log', 'np.log', (['u1'], {}), '(u1)\n', (757, 761), True, 'import numpy as np\n'), ((773, 783), 'numpy.log', 'np.log', (['u2'], {}), '(u2)\n', (779, 783), True, 'import numpy as np\n')] |
import glob
import numpy as np
import pandas as pd
from shapely.geometry import LineString,MultiLineString,Point,MultiPoint
from shapely.ops import linemerge
import pyproj
from sklearn.ensemble import RandomForestClassifier,ExtraTreesClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.svm import SVC
import xgboost
from tqdm import tqdm
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import confusion_matrix,accuracy_score
import pickle
import os
import argparse
np.random.seed(10)
from param import *
#get an ensemble of 5 classifiers from scikit-learn i.e randome_forest, extra_tree,svc,KNeighbours
#and xgboost classifier
#the parameters are tuned for this dataset, set class_weights to balanced as the start to end
#goals have different distribution
def get_ensemble_of_classifiers(vote=True):
clfs={}
clf1=ExtraTreesClassifier(100,class_weight='balanced',n_jobs=-1)
clfs['extra_tree']=clf1
clf2=RandomForestClassifier(50,class_weight='balanced',n_jobs=-1)
clfs['random_forest']=clf2
clf3=KNeighborsClassifier(20,weights='distance',n_jobs=-1)
clfs['knn']=clf3
clf4=xgboost.XGBClassifier(n_estimators=100,subsample=.7)
clfs['xgb']=clf4
if vote:
clf5=SVC(0.1)
cvote=VotingClassifier(estimators=[('et', clf1), ('rf', clf2), ('kn', clf3),('xgb',clf4),('svc',clf5)], voting='hard')
return {'cvote':cvote}
else:
clf5=SVC(0.1,class_weight='balanced',probability=True)
clfs['svc']=clf5
return clfs
# get the closest and farthest distance for a track to all the goals
def closest_farthest(track):
closest_to_track=[]
farthest_to_track=[]
for i in range(0,goal.shape[0]):
point2=Point(goal[['lon','lat']].values[i])
cd=[]
for item in track:
point1=Point(item)
_,_,distance = geod.inv(point1.x, point1.y, point2.x, point2.y)
cd.append(distance)
closest_to_track.append(np.min(cd))
farthest_to_track.append(np.max(cd))
return closest_to_track,farthest_to_track
# get distance to a goal given a point on the track
def goal_dist(point1):
d={}
for i in range(0,goal.shape[0]):
point2=Point(goal[['lon','lat']].values[i])
angle1,angle2,distance = geod.inv(point1.x, point1.y, point2.x, point2.y)
d[i]=distance
return d.values()
# gets distance features for training and testing
# the feature vector includes closest and nearest distances
# and distance to goal from the start or end points of track
def get_distances(df,goal,trim=None):
start,end=Point(df[['lon','lat']].values[0]),Point(df[['lon','lat']].values[-1])
duration=df.elapsedTime_sec.values[-1]
_,_,total_distance_covered = geod.inv(start.x, start.y, end.x, end.y)
distance_to_goal_from_start=goal_dist(start)
distance_to_goal_from_end=goal_dist(end)
closest,farthest=closest_farthest(df[['lon','lat']].values)
return duration,total_distance_covered,distance_to_goal_from_start,distance_to_goal_from_end,closest,farthest
# similar to get_distance function above but additionally trims the start and end point randomly
def get_distances_multi(df,goal):
# how much to trim from start
trim_start=np.random.randint(TRIM_START,TRIM_END)
idx_s=np.where(df.elapsedTime_sec>trim_start)[0][0]
start=Point(df[['lon','lat']].values[idx_s])
# how much to trim from end
trim_end=np.random.randint(TRIM_START,TRIM_END)
idx_e=np.where(df.elapsedTime_sec>df.elapsedTime_sec.values[-1]-trim_end)[0][0]
end=Point(df[['lon','lat']].values[idx_e])
_,_,total_distance_covered = geod.inv(start.x, start.y, end.x, end.y)
distance_to_goal_from_start=goal_dist(start)
distance_to_goal_from_end=goal_dist(end)
duration=df.elapsedTime_sec.values[idx_e]
closest,farthest=closest_farthest(df[['lon','lat']].values[idx_s:idx_e])
return duration,total_distance_covered,distance_to_goal_from_start,distance_to_goal_from_end,closest,farthest
# get the train feature vector. The feature vector are aggressively augmented
# i.e for each feature vector 20 tracks with random trims are created from start and end
# also include other feature such as age, gender,duration,velocity and total distance covered
def get_train_feat(datafiles):
print ('Multi trim featurees 20 samp in each')
xfeat={}
for f in tqdm(datafiles):
for i in range(0,20):
df = pd.read_csv(f)
if i==0:
duration,total_distance_covered,distance_to_goal_from_start,distance_to_goal_from_end,cd,fd=get_distances(df,goal,trim=None)
else:
duration,total_distance_covered,distance_to_goal_from_start,distance_to_goal_from_end,cd,fd=get_distances_multi(df,goal)
feat=[duration,total_distance_covered]
feat.extend(distance_to_goal_from_start)
feat.extend(distance_to_goal_from_end)
feat.extend(cd)
feat.extend(fd)
if df.tripID.values[0] not in xfeat.keys():
xfeat[df.tripID.values[0]]=[feat]
else:
xfeat[df.tripID.values[0]].append(feat)
train_info['gender']=pd.factorize(train_info['gender'])[0]
train_info['age']=train_info['age'].fillna(train_info['age'].mean())
features=[]
labels_start=[]
labels_end=[]
for i,k in enumerate(train_info.tripID.values):
for item in xfeat[k]:
feat=train_info.loc[k][['age','gender']].values.tolist()
duration=item[0]
velocity=item[1]/duration
feat.extend([duration,velocity])
feat.extend(item)
features.append(feat)
labels_start.append(train_info.iloc[i]['startLocID'])
labels_end.append(train_info.iloc[i]['destLocID'])
features=np.asarray(features).astype('float32')
labels_start=np.asarray(labels_start).astype('int')
labels_end=np.asarray(labels_end).astype('int')
if SHUFFLE:
idx=range(0,len(features))
np.random.shuffle(idx)
features,labels_start,labels_end=features[idx],labels_start[idx],labels_end[idx]
return features,labels_start,labels_end
# get the test features...no augmentation as in the compition the features are already trimed
def get_test_feat(datafiles):
xfeat={}
for f in tqdm(datafiles):
df = pd.read_csv(f)
duration,total_distance_covered,distance_to_goal_from_start,distance_to_goal_from_end,cd,fd=get_distances(df,goal,trim=None)
feat=[duration,total_distance_covered]
feat.extend(distance_to_goal_from_start)
feat.extend(distance_to_goal_from_end)
feat.extend(cd)
feat.extend(fd)
xfeat[df.tripID.values[0]]=feat
test_info['gender']=pd.factorize(test_info['gender'])[0]
test_info['age']=test_info['age'].fillna(test_info['age'].mean())
features_test=[]
for k in test_info.tripID.values:
feat=test_info.loc[k][['age','gender']].values.tolist()
duration=xfeat[k][0]
velocity=xfeat[k][1]/duration
feat.extend([duration,velocity])
feat.extend(xfeat[k])
features_test.append(feat)
features_test=np.asarray(features_test).astype('float32')
return features_test
# train the ensemble of classifiers
def train_ens(features,slabels):
sc=StandardScaler()
sc.fit(features)
clfs=get_ensemble_of_classifiers(vote=False)
ft=sc.transform(features)
for k in clfs:
clfs[k].fit(ft,slabels)
print ('train full data...done..with ',k)
return sc,clfs
# predict from the ensemble and create submission
def submit_ens(clfs,features_test,ks,subname):
y_pred=[]
for key in clfs.keys():
y_pred_i = clfs[key].predict_proba(features_test)
y_pred.append(y_pred_i)
y_pred = np.asarray(y_pred)
y=np.mean(y_pred,axis=0)
y_pred = np.argmax(y,axis=-1)
preds = [list(ks[item]) for item in y_pred]
np.savetxt(subname,preds, fmt='%d',delimiter=',')
print ('done...')
# do cross val ensemble so we know what kind of score we will get
# note there is no weighting the tracks as in compition metric. simply get accuracy score and confusion matrix
def cross_val_ens(features,slabels,dirname):
result={}
clfs = get_ensemble_of_classifiers(vote=False)
sc=StandardScaler()
ft=sc.fit_transform(features)
y_pred=[]
for key in clfs.keys():
y_pred_i = cross_val_predict(clfs[key], ft,slabels, cv=5,method='predict_proba')
y_pred.append(y_pred_i)
print ('cross val ...done...for ', key)
y_pred=np.argmax(np.mean(y_pred,axis=0),axis=-1)
score = accuracy_score(slabels,y_pred)
result['start_acc']=score
print("labels ens Accuracy: %0.4f " % score)
conf_mat = confusion_matrix(slabels,y_pred)
result['start_confusion']=conf_mat
pickle.dump(result,open(''.join([dirname,'result.pkl']),'wb'))
# save the augmented feature for reproducability also save ensemble
def save_aug_feat_model(features_train,labels_start,labels_end,features_test,clfs,dirname):
features_dict={'feat_augs':[],'labels':[]}
for i in range(0,len(features_train)):
features_dict['feat_augs'].append(features_train[i])
features_dict['labels'].append([labels_start[i],labels_end[i]])
pickle.dump(features_dict,open(''.join([dirname,'features_train.pkl']),'wb'))
pickle.dump(features_test,open(''.join([dirname,'features_test.pkl']),'wb'))
if clfs is not None:
pickle.dump(clfs,open(''.join([dirname,'clfs.pkl']),'wb'))
print ('saved to...',dirname)
# There are 18 start-end pairs so we use this as training labels..ie 18 classes
# at test time compute the correspond start end label from predicted class
def get_combo_label(labels):
ss=[tuple(item) for item in labels]
s=set(ss)
sk={}
for i,k in enumerate(s):
sk[k]=i
slabel=[]
for item in ss:
slabel.append(sk[item])
ks = {v: k for k, v in sk.items()}
return slabel,ks
# do all augmented feature vector creation, training, converting start-end pair , saving and generating submission
def train_ens_means(dirname):
if not os.path.exists(dirname):
os.mkdir(dirname)
features_train,labels_start,labels_end=get_train_feat(train_datafiles)
features_test=get_test_feat(test_datafiles)
labels=np.vstack((labels_start,labels_end)).T
slabels,ks=get_combo_label(labels)
cross_val_ens(features_train,slabels,dirname)
sc,clfs=train_ens(features_train,slabels)
ft_test=sc.transform(features_test)
save_aug_feat_model(features_train,labels_start,labels_end,features_test,clfs,dirname)
submit_ens(clfs,ft_test,ks,''.join([dirname,'y_submission.txt']))
# generate submission from the feature vectors, labels and classifiers stored in a directoty
# can use same classifier or retrain
def test_from_dir(dirname,retrain=False):
features_dict=pickle.load(open(''.join([dirname,'features_train.pkl']),'rb'))
features_test=pickle.load(open(''.join([dirname,'features_test.pkl']),'rb'))
features_train=np.asarray([item for item in features_dict['feat_augs']])
labels_start=np.asarray([item[0] for item in features_dict['labels']])
labels_end=np.asarray([item[1] for item in features_dict['labels']])
labels=np.asarray([item for item in features_dict['labels']])
slabels,ks=get_combo_label(labels)
cross_val_ens(features_train,slabels,dirname)
if retrain:
print ('Retraining Ensembles')
sc,clfs=train_ens(features_train,slabels)
pickle.dump(clfs,open(''.join([dirname,'clfs.pkl']),'wb'))
else:
sc=StandardScaler()
sc.fit(features_train)
print ('Loading ensembles')
clfs=pickle.load(open(''.join([dirname,'clfs.pkl']),'rb'))
ft_test=sc.transform(features_test)
submit_ens(clfs,ft_test,ks,''.join([dirname,'y_submission_retest.txt']))
# please specify the required command line options (refer to readme.txt for example)
if __name__=='__main__':
parser = argparse.ArgumentParser(description='train and test')
parser.add_argument('--execute',default='train',help='Train or Test')
parser.add_argument('--indir',required=True,help='Dir to read/write results')
parser.add_argument('--retrain',default=False,help='Retrain from augmented features')
args = parser.parse_args()
if args.execute=='train':
print ('Training and storing tmp results plus classifiers including y_submission.txt in {0}'.format(args.indir))
if not os.path.exists(args.indir):
os.mkdir(args.indir)
else:
print ('Results will be Replaced in {0} '.format(args.indir))
train_ens_means(args.indir)
if args.execute=='test':
if not os.path.exists(args.indir):
print ('{0} does not exists..'.format(args.indir))
else:
print ('Testing From {0} and write result in y_submission_retest.txt'.format(args.indir))
test_from_dir(args.indir,args.retrain)
| [
"sklearn.ensemble.VotingClassifier",
"sklearn.ensemble.ExtraTreesClassifier",
"pandas.read_csv",
"sklearn.neighbors.KNeighborsClassifier",
"shapely.geometry.Point",
"numpy.mean",
"os.path.exists",
"argparse.ArgumentParser",
"numpy.where",
"numpy.asarray",
"numpy.max",
"numpy.random.seed",
"o... | [((663, 681), 'numpy.random.seed', 'np.random.seed', (['(10)'], {}), '(10)\n', (677, 681), True, 'import numpy as np\n'), ((1024, 1085), 'sklearn.ensemble.ExtraTreesClassifier', 'ExtraTreesClassifier', (['(100)'], {'class_weight': '"""balanced"""', 'n_jobs': '(-1)'}), "(100, class_weight='balanced', n_jobs=-1)\n", (1044, 1085), False, 'from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier\n'), ((1121, 1183), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', (['(50)'], {'class_weight': '"""balanced"""', 'n_jobs': '(-1)'}), "(50, class_weight='balanced', n_jobs=-1)\n", (1143, 1183), False, 'from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier\n'), ((1222, 1277), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', (['(20)'], {'weights': '"""distance"""', 'n_jobs': '(-1)'}), "(20, weights='distance', n_jobs=-1)\n", (1242, 1277), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((1306, 1360), 'xgboost.XGBClassifier', 'xgboost.XGBClassifier', ([], {'n_estimators': '(100)', 'subsample': '(0.7)'}), '(n_estimators=100, subsample=0.7)\n', (1327, 1360), False, 'import xgboost\n'), ((3418, 3457), 'numpy.random.randint', 'np.random.randint', (['TRIM_START', 'TRIM_END'], {}), '(TRIM_START, TRIM_END)\n', (3435, 3457), True, 'import numpy as np\n'), ((3523, 3562), 'shapely.geometry.Point', 'Point', (["df[['lon', 'lat']].values[idx_s]"], {}), "(df[['lon', 'lat']].values[idx_s])\n", (3528, 3562), False, 'from shapely.geometry import LineString, MultiLineString, Point, MultiPoint\n'), ((3607, 3646), 'numpy.random.randint', 'np.random.randint', (['TRIM_START', 'TRIM_END'], {}), '(TRIM_START, TRIM_END)\n', (3624, 3646), True, 'import numpy as np\n'), ((3738, 3777), 'shapely.geometry.Point', 'Point', (["df[['lon', 'lat']].values[idx_e]"], {}), "(df[['lon', 'lat']].values[idx_e])\n", (3743, 3777), False, 'from shapely.geometry import LineString, MultiLineString, Point, MultiPoint\n'), ((4564, 4579), 'tqdm.tqdm', 'tqdm', (['datafiles'], {}), '(datafiles)\n', (4568, 4579), False, 'from tqdm import tqdm\n'), ((6536, 6551), 'tqdm.tqdm', 'tqdm', (['datafiles'], {}), '(datafiles)\n', (6540, 6551), False, 'from tqdm import tqdm\n'), ((7544, 7560), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (7558, 7560), False, 'from sklearn.preprocessing import StandardScaler\n'), ((8024, 8042), 'numpy.asarray', 'np.asarray', (['y_pred'], {}), '(y_pred)\n', (8034, 8042), True, 'import numpy as np\n'), ((8049, 8072), 'numpy.mean', 'np.mean', (['y_pred'], {'axis': '(0)'}), '(y_pred, axis=0)\n', (8056, 8072), True, 'import numpy as np\n'), ((8085, 8106), 'numpy.argmax', 'np.argmax', (['y'], {'axis': '(-1)'}), '(y, axis=-1)\n', (8094, 8106), True, 'import numpy as np\n'), ((8158, 8209), 'numpy.savetxt', 'np.savetxt', (['subname', 'preds'], {'fmt': '"""%d"""', 'delimiter': '""","""'}), "(subname, preds, fmt='%d', delimiter=',')\n", (8168, 8209), True, 'import numpy as np\n'), ((8529, 8545), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (8543, 8545), False, 'from sklearn.preprocessing import StandardScaler\n'), ((8856, 8887), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['slabels', 'y_pred'], {}), '(slabels, y_pred)\n', (8870, 8887), False, 'from sklearn.metrics import confusion_matrix, accuracy_score\n'), ((8981, 9014), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['slabels', 'y_pred'], {}), '(slabels, y_pred)\n', (8997, 9014), False, 'from sklearn.metrics import confusion_matrix, accuracy_score\n'), ((11296, 11353), 'numpy.asarray', 'np.asarray', (["[item for item in features_dict['feat_augs']]"], {}), "([item for item in features_dict['feat_augs']])\n", (11306, 11353), True, 'import numpy as np\n'), ((11371, 11428), 'numpy.asarray', 'np.asarray', (["[item[0] for item in features_dict['labels']]"], {}), "([item[0] for item in features_dict['labels']])\n", (11381, 11428), True, 'import numpy as np\n'), ((11444, 11501), 'numpy.asarray', 'np.asarray', (["[item[1] for item in features_dict['labels']]"], {}), "([item[1] for item in features_dict['labels']])\n", (11454, 11501), True, 'import numpy as np\n'), ((11513, 11567), 'numpy.asarray', 'np.asarray', (["[item for item in features_dict['labels']]"], {}), "([item for item in features_dict['labels']])\n", (11523, 11567), True, 'import numpy as np\n'), ((12248, 12301), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""train and test"""'}), "(description='train and test')\n", (12271, 12301), False, 'import argparse\n'), ((1406, 1414), 'sklearn.svm.SVC', 'SVC', (['(0.1)'], {}), '(0.1)\n', (1409, 1414), False, 'from sklearn.svm import SVC\n'), ((1429, 1550), 'sklearn.ensemble.VotingClassifier', 'VotingClassifier', ([], {'estimators': "[('et', clf1), ('rf', clf2), ('kn', clf3), ('xgb', clf4), ('svc', clf5)]", 'voting': '"""hard"""'}), "(estimators=[('et', clf1), ('rf', clf2), ('kn', clf3), (\n 'xgb', clf4), ('svc', clf5)], voting='hard')\n", (1445, 1550), False, 'from sklearn.ensemble import VotingClassifier\n'), ((1596, 1647), 'sklearn.svm.SVC', 'SVC', (['(0.1)'], {'class_weight': '"""balanced"""', 'probability': '(True)'}), "(0.1, class_weight='balanced', probability=True)\n", (1599, 1647), False, 'from sklearn.svm import SVC\n'), ((1899, 1936), 'shapely.geometry.Point', 'Point', (["goal[['lon', 'lat']].values[i]"], {}), "(goal[['lon', 'lat']].values[i])\n", (1904, 1936), False, 'from shapely.geometry import LineString, MultiLineString, Point, MultiPoint\n'), ((2388, 2425), 'shapely.geometry.Point', 'Point', (["goal[['lon', 'lat']].values[i]"], {}), "(goal[['lon', 'lat']].values[i])\n", (2393, 2425), False, 'from shapely.geometry import LineString, MultiLineString, Point, MultiPoint\n'), ((2778, 2813), 'shapely.geometry.Point', 'Point', (["df[['lon', 'lat']].values[0]"], {}), "(df[['lon', 'lat']].values[0])\n", (2783, 2813), False, 'from shapely.geometry import LineString, MultiLineString, Point, MultiPoint\n'), ((2813, 2849), 'shapely.geometry.Point', 'Point', (["df[['lon', 'lat']].values[-1]"], {}), "(df[['lon', 'lat']].values[-1])\n", (2818, 2849), False, 'from shapely.geometry import LineString, MultiLineString, Point, MultiPoint\n'), ((5376, 5410), 'pandas.factorize', 'pd.factorize', (["train_info['gender']"], {}), "(train_info['gender'])\n", (5388, 5410), True, 'import pandas as pd\n'), ((6225, 6247), 'numpy.random.shuffle', 'np.random.shuffle', (['idx'], {}), '(idx)\n', (6242, 6247), True, 'import numpy as np\n'), ((6566, 6580), 'pandas.read_csv', 'pd.read_csv', (['f'], {}), '(f)\n', (6577, 6580), True, 'import pandas as pd\n'), ((6969, 7002), 'pandas.factorize', 'pd.factorize', (["test_info['gender']"], {}), "(test_info['gender'])\n", (6981, 7002), True, 'import pandas as pd\n'), ((8641, 8712), 'sklearn.model_selection.cross_val_predict', 'cross_val_predict', (['clfs[key]', 'ft', 'slabels'], {'cv': '(5)', 'method': '"""predict_proba"""'}), "(clfs[key], ft, slabels, cv=5, method='predict_proba')\n", (8658, 8712), False, 'from sklearn.model_selection import cross_val_predict\n'), ((8812, 8835), 'numpy.mean', 'np.mean', (['y_pred'], {'axis': '(0)'}), '(y_pred, axis=0)\n', (8819, 8835), True, 'import numpy as np\n'), ((10381, 10404), 'os.path.exists', 'os.path.exists', (['dirname'], {}), '(dirname)\n', (10395, 10404), False, 'import os\n'), ((10414, 10431), 'os.mkdir', 'os.mkdir', (['dirname'], {}), '(dirname)\n', (10422, 10431), False, 'import os\n'), ((10566, 10603), 'numpy.vstack', 'np.vstack', (['(labels_start, labels_end)'], {}), '((labels_start, labels_end))\n', (10575, 10603), True, 'import numpy as np\n'), ((11850, 11866), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (11864, 11866), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1996, 2007), 'shapely.geometry.Point', 'Point', (['item'], {}), '(item)\n', (2001, 2007), False, 'from shapely.geometry import LineString, MultiLineString, Point, MultiPoint\n'), ((2148, 2158), 'numpy.min', 'np.min', (['cd'], {}), '(cd)\n', (2154, 2158), True, 'import numpy as np\n'), ((2193, 2203), 'numpy.max', 'np.max', (['cd'], {}), '(cd)\n', (2199, 2203), True, 'import numpy as np\n'), ((3467, 3508), 'numpy.where', 'np.where', (['(df.elapsedTime_sec > trim_start)'], {}), '(df.elapsedTime_sec > trim_start)\n', (3475, 3508), True, 'import numpy as np\n'), ((3656, 3727), 'numpy.where', 'np.where', (['(df.elapsedTime_sec > df.elapsedTime_sec.values[-1] - trim_end)'], {}), '(df.elapsedTime_sec > df.elapsedTime_sec.values[-1] - trim_end)\n', (3664, 3727), True, 'import numpy as np\n'), ((4628, 4642), 'pandas.read_csv', 'pd.read_csv', (['f'], {}), '(f)\n', (4639, 4642), True, 'import pandas as pd\n'), ((6019, 6039), 'numpy.asarray', 'np.asarray', (['features'], {}), '(features)\n', (6029, 6039), True, 'import numpy as np\n'), ((6075, 6099), 'numpy.asarray', 'np.asarray', (['labels_start'], {}), '(labels_start)\n', (6085, 6099), True, 'import numpy as np\n'), ((6129, 6151), 'numpy.asarray', 'np.asarray', (['labels_end'], {}), '(labels_end)\n', (6139, 6151), True, 'import numpy as np\n'), ((7390, 7415), 'numpy.asarray', 'np.asarray', (['features_test'], {}), '(features_test)\n', (7400, 7415), True, 'import numpy as np\n'), ((12745, 12771), 'os.path.exists', 'os.path.exists', (['args.indir'], {}), '(args.indir)\n', (12759, 12771), False, 'import os\n'), ((12785, 12805), 'os.mkdir', 'os.mkdir', (['args.indir'], {}), '(args.indir)\n', (12793, 12805), False, 'import os\n'), ((12974, 13000), 'os.path.exists', 'os.path.exists', (['args.indir'], {}), '(args.indir)\n', (12988, 13000), False, 'import os\n')] |
# parse arguments ([ConfigArgParse](https://github.com/bw2/ConfigArgParse))
from config import config
args = config.parse()
# seed
import os, torch, numpy as np
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# gpu, seed
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)
os.environ['PYTHONHASHSEED'] = str(args.seed)
# load data
from data import data
dataset, train, test = data.load(args)
print("length of train is", len(train))
# # initialise HyperGCN
from model import model
HyperGCN = model.initialise(dataset, args)
# train and test HyperGCN
HyperGCN = model.train(HyperGCN, dataset, train, args)
acc = model.test(HyperGCN, dataset, test, args)
print("accuracy:", float(acc), ", error:", float(100*(1-acc))) | [
"torch.manual_seed",
"model.model.train",
"model.model.initialise",
"numpy.random.seed",
"data.data.load",
"config.config.parse",
"model.model.test"
] | [((109, 123), 'config.config.parse', 'config.parse', ([], {}), '()\n', (121, 123), False, 'from config import config\n'), ((164, 192), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (181, 192), False, 'import os, torch, numpy as np\n'), ((193, 218), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (207, 218), True, 'import os, torch, numpy as np\n'), ((446, 461), 'data.data.load', 'data.load', (['args'], {}), '(args)\n', (455, 461), False, 'from data import data\n'), ((564, 595), 'model.model.initialise', 'model.initialise', (['dataset', 'args'], {}), '(dataset, args)\n', (580, 595), False, 'from model import model\n'), ((636, 679), 'model.model.train', 'model.train', (['HyperGCN', 'dataset', 'train', 'args'], {}), '(HyperGCN, dataset, train, args)\n', (647, 679), False, 'from model import model\n'), ((686, 727), 'model.model.test', 'model.test', (['HyperGCN', 'dataset', 'test', 'args'], {}), '(HyperGCN, dataset, test, args)\n', (696, 727), False, 'from model import model\n')] |
from ruleextractor.functions import Select, SpacyProcessor, Masker, PathFinder, Chunker
import numpy as np
class Block(object):
def __init__(self, raw_string, tokens, ent, pos, dep):
self.raw_string = raw_string
self.tokens = tokens
self.ent = ent
self.pos = pos
self.dep = dep
self.arr = np.array([self.tokens, self.ent, self.pos, self.dep], dtype=np.unicode_)
self.masked = None
def __str__(self):
if self.masked is not None:
return str(self.masked)
else:
return '{0}\n{1}\n{2}\n{3}'.format(self.tokens, self.ent, self.pos, self.dep)
def __unicode__(self):
return unicode(self.__str__())
def __repr__(self):
return self.__str__()
class Interpreter(object):
def __init__(self, indexer):
self.funcs = {}
self.init_functions(indexer)
self.selection = None
self.selection_history = []
self.stack = []
self.do_print = True
self.history_enabled = True
def init_functions(self, indexer):
self.funcs['select'] = Select(indexer)
self.funcs['mask'] = Masker(indexer)
self.funcs['path'] = PathFinder(indexer)
self.funcs['chunk'] = Chunker(indexer)
self.funcs['tokenize'] = SpacyProcessor(indexer, lambda x: x.text)
self.funcs['dep'] = SpacyProcessor(indexer, lambda x: x.dep_)
self.funcs['pos'] = SpacyProcessor(indexer, lambda x: x.pos_)
self.funcs['ent'] = SpacyProcessor(indexer, lambda x: x.ent_type_)
def run(self):
while True:
input_value = raw_input(':> ')
values = input_value.split(' ')
command = values[0]
if self.isdigit(command): continue
if self.unmask(command): continue
if self.back(command): continue
if self.push(command): continue
if self.merge(command): continue
if self.get_block(command): continue
if self.check_is_function(command): continue
self.execute_function(command, values)
def unmask(self, command):
if command == 'unmask':
if isinstance(self.selection, Block):
self.selection.masked = None
self.print_selection()
elif isinstance(self.selection, list):
if isinstance(self.selection[0], Block):
for block in self.selection:
block.masked = None
self.print_selection()
else:
print('No block selected, cannot unmask!')
else:
print('No block selected, cannot unmask!')
return True
else:
return False
def push(self, command):
if command == 'push':
self.stack.append(self.selection)
self.back('back')
return True
else:
return False
def get_block(self, command):
if command == 'block':
self.do_print = False
past = self.selection
self.execute_function('tokenize', '')
self.push('push')
self.execute_function('ent', '')
self.push('push')
self.execute_function('pos', '')
self.push('push')
self.execute_function('dep', '')
self.push('push')
self.history_enabled = False
self.merge('merge')
if isinstance(past, list):
blocks = []
tokens = self.selection[0]
ents = self.selection[1]
poss = self.selection[2]
deps = self.selection[3]
for i, (token, ent, pos, dep) in enumerate(zip(tokens, ents, poss, deps)):
blocks.append(Block(past[i], token, ent, pos, dep))
self.selection = blocks
else:
self.selection = Block(past, self.selection[0], self.selection[1], self.selection[2], self.selection[3])
self.history_enabled = True
self.do_print = True
self.add_to_selection_history(past)
self.print_selection()
return True
else:
return False
def merge(self, command):
if command == 'merge':
self.add_to_selection_history(self.selection)
self.selection = self.stack[:]
self.stack = []
self.print_selection()
return True
else:
return False
def execute_function(self, command, values):
if self.selection is not None: self.add_to_selection_history(self.selection)
self.selection = self.funcs[command].execute_func(' '.join(values[1:]), self.selection)
self.print_selection()
def add_to_selection_history(self, selection):
if self.history_enabled:
self.selection_history.append(selection)
def isdigit(self, command):
if command.isdigit():
self.add_to_selection_history(self.selection)
self.selection = self.selection[int(command)]
self.print_selection()
return True
else:
return False
def back(self, command):
if command == 'back':
if self.selection_history is None or len(self.selection_history) == 0:
print('No selection history!')
return True
self.selection = self.selection_history.pop()
self.print_selection()
return True
else:
return False
def check_is_function(self, command):
if command not in self.funcs:
print('Function unknown. Available functions are')
for key in self.funcs:
print(key)
return True
else:
return False
def print_selection(self):
if self.do_print:
print('-'*60)
if isinstance(self.selection, unicode) or isinstance(self.selection, Block):
print(self.selection)
else:
for i, results in enumerate(self.selection):
print(i, results)
print('-'*60)
| [
"ruleextractor.functions.SpacyProcessor",
"ruleextractor.functions.Masker",
"numpy.array",
"ruleextractor.functions.Select",
"ruleextractor.functions.Chunker",
"ruleextractor.functions.PathFinder"
] | [((342, 414), 'numpy.array', 'np.array', (['[self.tokens, self.ent, self.pos, self.dep]'], {'dtype': 'np.unicode_'}), '([self.tokens, self.ent, self.pos, self.dep], dtype=np.unicode_)\n', (350, 414), True, 'import numpy as np\n'), ((1112, 1127), 'ruleextractor.functions.Select', 'Select', (['indexer'], {}), '(indexer)\n', (1118, 1127), False, 'from ruleextractor.functions import Select, SpacyProcessor, Masker, PathFinder, Chunker\n'), ((1157, 1172), 'ruleextractor.functions.Masker', 'Masker', (['indexer'], {}), '(indexer)\n', (1163, 1172), False, 'from ruleextractor.functions import Select, SpacyProcessor, Masker, PathFinder, Chunker\n'), ((1202, 1221), 'ruleextractor.functions.PathFinder', 'PathFinder', (['indexer'], {}), '(indexer)\n', (1212, 1221), False, 'from ruleextractor.functions import Select, SpacyProcessor, Masker, PathFinder, Chunker\n'), ((1252, 1268), 'ruleextractor.functions.Chunker', 'Chunker', (['indexer'], {}), '(indexer)\n', (1259, 1268), False, 'from ruleextractor.functions import Select, SpacyProcessor, Masker, PathFinder, Chunker\n'), ((1302, 1343), 'ruleextractor.functions.SpacyProcessor', 'SpacyProcessor', (['indexer', '(lambda x: x.text)'], {}), '(indexer, lambda x: x.text)\n', (1316, 1343), False, 'from ruleextractor.functions import Select, SpacyProcessor, Masker, PathFinder, Chunker\n'), ((1372, 1413), 'ruleextractor.functions.SpacyProcessor', 'SpacyProcessor', (['indexer', '(lambda x: x.dep_)'], {}), '(indexer, lambda x: x.dep_)\n', (1386, 1413), False, 'from ruleextractor.functions import Select, SpacyProcessor, Masker, PathFinder, Chunker\n'), ((1442, 1483), 'ruleextractor.functions.SpacyProcessor', 'SpacyProcessor', (['indexer', '(lambda x: x.pos_)'], {}), '(indexer, lambda x: x.pos_)\n', (1456, 1483), False, 'from ruleextractor.functions import Select, SpacyProcessor, Masker, PathFinder, Chunker\n'), ((1512, 1558), 'ruleextractor.functions.SpacyProcessor', 'SpacyProcessor', (['indexer', '(lambda x: x.ent_type_)'], {}), '(indexer, lambda x: x.ent_type_)\n', (1526, 1558), False, 'from ruleextractor.functions import Select, SpacyProcessor, Masker, PathFinder, Chunker\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.modelUtils import ChebConv, Pool, residualBlock
import torchvision.ops.roi_align as roi_align
import numpy as np
class EncoderConv(nn.Module):
def __init__(self, latents = 64, hw = 32):
super(EncoderConv, self).__init__()
self.latents = latents
self.c = 4
self.size = self.c * np.array([2,4,8,16,32], dtype = np.intc)
self.maxpool = nn.MaxPool2d(2)
self.dconv_down1 = residualBlock(1, self.size[0])
self.dconv_down2 = residualBlock(self.size[0], self.size[1])
self.dconv_down3 = residualBlock(self.size[1], self.size[2])
self.dconv_down4 = residualBlock(self.size[2], self.size[3])
self.dconv_down5 = residualBlock(self.size[3], self.size[4])
self.dconv_down6 = residualBlock(self.size[4], self.size[4])
self.fc_mu = nn.Linear(in_features=self.size[4]*hw*hw, out_features=self.latents)
self.fc_logvar = nn.Linear(in_features=self.size[4]*hw*hw, out_features=self.latents)
def forward(self, x):
x = self.dconv_down1(x)
x = self.maxpool(x)
x = self.dconv_down2(x)
x = self.maxpool(x)
conv3 = self.dconv_down3(x)
x = self.maxpool(conv3)
conv4 = self.dconv_down4(x)
x = self.maxpool(conv4)
conv5 = self.dconv_down5(x)
x = self.maxpool(conv5)
conv6 = self.dconv_down6(x)
x = conv6.view(conv6.size(0), -1) # flatten batch of multi-channel feature maps to a batch of feature vectors
x_mu = self.fc_mu(x)
x_logvar = self.fc_logvar(x)
return x_mu, x_logvar, conv6, conv5
class SkipBlock(nn.Module):
def __init__(self, in_filters, window):
super(SkipBlock, self).__init__()
self.window = window
self.graphConv_pre = ChebConv(in_filters, 2, 1, bias = False)
def lookup(self, pos, layer, salida = (1,1)):
B = pos.shape[0]
N = pos.shape[1]
F = layer.shape[1]
h = layer.shape[-1]
## Scale from [0,1] to [0, h]
pos = pos * h
_x1 = (self.window[0] // 2) * 1.0
_x2 = (self.window[0] // 2 + 1) * 1.0
_y1 = (self.window[1] // 2) * 1.0
_y2 = (self.window[1] // 2 + 1) * 1.0
boxes = []
for batch in range(0, B):
x1 = pos[batch,:,0].reshape(-1, 1) - _x1
x2 = pos[batch,:,0].reshape(-1, 1) + _x2
y1 = pos[batch,:,1].reshape(-1, 1) - _y1
y2 = pos[batch,:,1].reshape(-1, 1) + _y2
aux = torch.cat([x1, y1, x2, y2], axis = 1)
boxes.append(aux)
skip = roi_align(layer, boxes, output_size = salida, aligned=True)
vista = skip.view([B, N, -1])
return vista
def forward(self, x, adj, conv_layer):
pos = self.graphConv_pre(x, adj)
skip = self.lookup(pos, conv_layer)
return torch.cat((x, skip, pos), axis = 2), pos
class Hybrid(nn.Module):
def __init__(self, config, downsample_matrices, upsample_matrices, adjacency_matrices):
super(Hybrid, self).__init__()
self.config = config
hw = config['inputsize'] // 32
self.z = config['latents']
self.encoder = EncoderConv(latents = self.z, hw = hw)
self.downsample_matrices = downsample_matrices
self.upsample_matrices = upsample_matrices
self.adjacency_matrices = adjacency_matrices
self.kld_weight = 1e-5
n_nodes = config['n_nodes']
self.filters = config['filters']
self.K = 6
self.window = (3,3)
# Genero la capa fully connected del decoder
outshape = self.filters[-1] * n_nodes[-1]
self.dec_lin = torch.nn.Linear(self.z, outshape)
self.normalization2u = torch.nn.InstanceNorm1d(self.filters[1])
self.normalization3u = torch.nn.InstanceNorm1d(self.filters[2])
self.normalization4u = torch.nn.InstanceNorm1d(self.filters[3])
self.normalization5u = torch.nn.InstanceNorm1d(self.filters[4])
self.normalization6u = torch.nn.InstanceNorm1d(self.filters[5])
outsize1 = self.encoder.size[4]
outsize2 = self.encoder.size[4]
# Guardo las capas de convoluciones en grafo
self.graphConv_up6 = ChebConv(self.filters[6], self.filters[5], self.K)
self.graphConv_up5 = ChebConv(self.filters[5], self.filters[4], self.K)
self.SC_1 = SkipBlock(self.filters[4], self.window)
self.graphConv_up4 = ChebConv(self.filters[4] + outsize1 + 2, self.filters[3], self.K)
self.graphConv_up3 = ChebConv(self.filters[3], self.filters[2], self.K)
self.SC_2 = SkipBlock(self.filters[2], self.window)
self.graphConv_up2 = ChebConv(self.filters[2] + outsize2 + 2, self.filters[1], self.K)
self.graphConv_up1 = ChebConv(self.filters[1], self.filters[0], 1, bias = False)
self.pool = Pool()
self.reset_parameters()
def reset_parameters(self):
torch.nn.init.normal_(self.dec_lin.weight, 0, 0.1)
def sampling(self, mu, log_var):
std = torch.exp(0.5*log_var)
eps = torch.randn_like(std)
return eps.mul(std).add_(mu)
def forward(self, x):
self.mu, self.log_var, conv6, conv5 = self.encoder(x)
if self.training:
z = self.sampling(self.mu, self.log_var)
else:
z = self.mu
x = self.dec_lin(z)
x = F.relu(x)
x = x.reshape(x.shape[0], -1, self.filters[-1])
x = self.graphConv_up6(x, self.adjacency_matrices[5]._indices())
x = self.normalization6u(x)
x = F.relu(x)
x = self.graphConv_up5(x, self.adjacency_matrices[4]._indices())
x = self.normalization5u(x)
x = F.relu(x)
x, pos1 = self.SC_1(x, self.adjacency_matrices[3]._indices(), conv6)
x = self.graphConv_up4(x, self.adjacency_matrices[3]._indices())
x = self.normalization4u(x)
x = F.relu(x)
x = self.pool(x, self.upsample_matrices[0])
x = self.graphConv_up3(x, self.adjacency_matrices[2]._indices())
x = self.normalization3u(x)
x = F.relu(x)
x, pos2 = self.SC_2(x, self.adjacency_matrices[1]._indices(), conv5)
x = self.graphConv_up2(x, self.adjacency_matrices[1]._indices())
x = self.normalization2u(x)
x = F.relu(x)
x = self.graphConv_up1(x, self.adjacency_matrices[0]._indices()) # Sin relu y sin bias
return x, pos1, pos2 | [
"models.modelUtils.Pool",
"torch.nn.InstanceNorm1d",
"torch.exp",
"torch.nn.init.normal_",
"torch.randn_like",
"torch.nn.MaxPool2d",
"numpy.array",
"torch.cat",
"torch.nn.Linear",
"models.modelUtils.residualBlock",
"torch.nn.functional.relu",
"models.modelUtils.ChebConv",
"torchvision.ops.ro... | [((485, 500), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (497, 500), True, 'import torch.nn as nn\n'), ((537, 567), 'models.modelUtils.residualBlock', 'residualBlock', (['(1)', 'self.size[0]'], {}), '(1, self.size[0])\n', (550, 567), False, 'from models.modelUtils import ChebConv, Pool, residualBlock\n'), ((595, 636), 'models.modelUtils.residualBlock', 'residualBlock', (['self.size[0]', 'self.size[1]'], {}), '(self.size[0], self.size[1])\n', (608, 636), False, 'from models.modelUtils import ChebConv, Pool, residualBlock\n'), ((664, 705), 'models.modelUtils.residualBlock', 'residualBlock', (['self.size[1]', 'self.size[2]'], {}), '(self.size[1], self.size[2])\n', (677, 705), False, 'from models.modelUtils import ChebConv, Pool, residualBlock\n'), ((733, 774), 'models.modelUtils.residualBlock', 'residualBlock', (['self.size[2]', 'self.size[3]'], {}), '(self.size[2], self.size[3])\n', (746, 774), False, 'from models.modelUtils import ChebConv, Pool, residualBlock\n'), ((802, 843), 'models.modelUtils.residualBlock', 'residualBlock', (['self.size[3]', 'self.size[4]'], {}), '(self.size[3], self.size[4])\n', (815, 843), False, 'from models.modelUtils import ChebConv, Pool, residualBlock\n'), ((871, 912), 'models.modelUtils.residualBlock', 'residualBlock', (['self.size[4]', 'self.size[4]'], {}), '(self.size[4], self.size[4])\n', (884, 912), False, 'from models.modelUtils import ChebConv, Pool, residualBlock\n'), ((943, 1015), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': '(self.size[4] * hw * hw)', 'out_features': 'self.latents'}), '(in_features=self.size[4] * hw * hw, out_features=self.latents)\n', (952, 1015), True, 'import torch.nn as nn\n'), ((1037, 1109), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': '(self.size[4] * hw * hw)', 'out_features': 'self.latents'}), '(in_features=self.size[4] * hw * hw, out_features=self.latents)\n', (1046, 1109), True, 'import torch.nn as nn\n'), ((1976, 2014), 'models.modelUtils.ChebConv', 'ChebConv', (['in_filters', '(2)', '(1)'], {'bias': '(False)'}), '(in_filters, 2, 1, bias=False)\n', (1984, 2014), False, 'from models.modelUtils import ChebConv, Pool, residualBlock\n'), ((2853, 2910), 'torchvision.ops.roi_align', 'roi_align', (['layer', 'boxes'], {'output_size': 'salida', 'aligned': '(True)'}), '(layer, boxes, output_size=salida, aligned=True)\n', (2862, 2910), True, 'import torchvision.ops.roi_align as roi_align\n'), ((4000, 4033), 'torch.nn.Linear', 'torch.nn.Linear', (['self.z', 'outshape'], {}), '(self.z, outshape)\n', (4015, 4033), False, 'import torch\n'), ((4082, 4122), 'torch.nn.InstanceNorm1d', 'torch.nn.InstanceNorm1d', (['self.filters[1]'], {}), '(self.filters[1])\n', (4105, 4122), False, 'import torch\n'), ((4154, 4194), 'torch.nn.InstanceNorm1d', 'torch.nn.InstanceNorm1d', (['self.filters[2]'], {}), '(self.filters[2])\n', (4177, 4194), False, 'import torch\n'), ((4226, 4266), 'torch.nn.InstanceNorm1d', 'torch.nn.InstanceNorm1d', (['self.filters[3]'], {}), '(self.filters[3])\n', (4249, 4266), False, 'import torch\n'), ((4298, 4338), 'torch.nn.InstanceNorm1d', 'torch.nn.InstanceNorm1d', (['self.filters[4]'], {}), '(self.filters[4])\n', (4321, 4338), False, 'import torch\n'), ((4370, 4410), 'torch.nn.InstanceNorm1d', 'torch.nn.InstanceNorm1d', (['self.filters[5]'], {}), '(self.filters[5])\n', (4393, 4410), False, 'import torch\n'), ((4606, 4656), 'models.modelUtils.ChebConv', 'ChebConv', (['self.filters[6]', 'self.filters[5]', 'self.K'], {}), '(self.filters[6], self.filters[5], self.K)\n', (4614, 4656), False, 'from models.modelUtils import ChebConv, Pool, residualBlock\n'), ((4686, 4736), 'models.modelUtils.ChebConv', 'ChebConv', (['self.filters[5]', 'self.filters[4]', 'self.K'], {}), '(self.filters[5], self.filters[4], self.K)\n', (4694, 4736), False, 'from models.modelUtils import ChebConv, Pool, residualBlock\n'), ((4851, 4916), 'models.modelUtils.ChebConv', 'ChebConv', (['(self.filters[4] + outsize1 + 2)', 'self.filters[3]', 'self.K'], {}), '(self.filters[4] + outsize1 + 2, self.filters[3], self.K)\n', (4859, 4916), False, 'from models.modelUtils import ChebConv, Pool, residualBlock\n'), ((4954, 5004), 'models.modelUtils.ChebConv', 'ChebConv', (['self.filters[3]', 'self.filters[2]', 'self.K'], {}), '(self.filters[3], self.filters[2], self.K)\n', (4962, 5004), False, 'from models.modelUtils import ChebConv, Pool, residualBlock\n'), ((5112, 5177), 'models.modelUtils.ChebConv', 'ChebConv', (['(self.filters[2] + outsize2 + 2)', 'self.filters[1]', 'self.K'], {}), '(self.filters[2] + outsize2 + 2, self.filters[1], self.K)\n', (5120, 5177), False, 'from models.modelUtils import ChebConv, Pool, residualBlock\n'), ((5207, 5264), 'models.modelUtils.ChebConv', 'ChebConv', (['self.filters[1]', 'self.filters[0]', '(1)'], {'bias': '(False)'}), '(self.filters[1], self.filters[0], 1, bias=False)\n', (5215, 5264), False, 'from models.modelUtils import ChebConv, Pool, residualBlock\n'), ((5304, 5310), 'models.modelUtils.Pool', 'Pool', ([], {}), '()\n', (5308, 5310), False, 'from models.modelUtils import ChebConv, Pool, residualBlock\n'), ((5401, 5451), 'torch.nn.init.normal_', 'torch.nn.init.normal_', (['self.dec_lin.weight', '(0)', '(0.1)'], {}), '(self.dec_lin.weight, 0, 0.1)\n', (5422, 5451), False, 'import torch\n'), ((5505, 5529), 'torch.exp', 'torch.exp', (['(0.5 * log_var)'], {}), '(0.5 * log_var)\n', (5514, 5529), False, 'import torch\n'), ((5542, 5563), 'torch.randn_like', 'torch.randn_like', (['std'], {}), '(std)\n', (5558, 5563), False, 'import torch\n'), ((5875, 5884), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (5881, 5884), True, 'import torch.nn.functional as F\n'), ((6080, 6089), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (6086, 6089), True, 'import torch.nn.functional as F\n'), ((6220, 6229), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (6226, 6229), True, 'import torch.nn.functional as F\n'), ((6446, 6455), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (6452, 6455), True, 'import torch.nn.functional as F\n'), ((6647, 6656), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (6653, 6656), True, 'import torch.nn.functional as F\n'), ((6873, 6882), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (6879, 6882), True, 'import torch.nn.functional as F\n'), ((412, 454), 'numpy.array', 'np.array', (['[2, 4, 8, 16, 32]'], {'dtype': 'np.intc'}), '([2, 4, 8, 16, 32], dtype=np.intc)\n', (420, 454), True, 'import numpy as np\n'), ((2737, 2772), 'torch.cat', 'torch.cat', (['[x1, y1, x2, y2]'], {'axis': '(1)'}), '([x1, y1, x2, y2], axis=1)\n', (2746, 2772), False, 'import torch\n'), ((3130, 3163), 'torch.cat', 'torch.cat', (['(x, skip, pos)'], {'axis': '(2)'}), '((x, skip, pos), axis=2)\n', (3139, 3163), False, 'import torch\n')] |
import unittest
import numpy as np
from dacbench import AbstractEnv
from dacbench.benchmarks import CMAESBenchmark
from dacbench.wrappers import ObservationWrapper
class TestObservationTrackingWrapper(unittest.TestCase):
def get_test_env(self) -> AbstractEnv:
bench = CMAESBenchmark()
env = bench.get_benchmark(seed=42)
return env
def test_flatten(self):
wrapped_env = ObservationWrapper(self.get_test_env())
d = {"b": 0, "a": np.array([0, 1.4, 3])}
flat = wrapped_env.flatten(d)
expected = np.array([0, 1.4, 3, 0])
np.testing.assert_array_almost_equal(flat, expected)
def test_conversion_wrapper(self):
action = 0.2
env = self.get_test_env()
reset_state_env = env.reset()
step_state_env, *rest_env = env.step(action)
self.assertIsInstance(reset_state_env, dict)
wrapped_env = ObservationWrapper(self.get_test_env())
reset_state_wrapped = wrapped_env.reset()
step_state_wrapped, *reset_wrapped = wrapped_env.step(action)
self.assertIsInstance(reset_state_wrapped, np.ndarray)
self.assertListEqual(rest_env, reset_wrapped)
np.testing.assert_array_equal(
wrapped_env.flatten(reset_state_env), reset_state_wrapped
)
np.testing.assert_array_equal(
wrapped_env.flatten(step_state_env), step_state_wrapped
)
| [
"numpy.array",
"numpy.testing.assert_array_almost_equal",
"dacbench.benchmarks.CMAESBenchmark"
] | [((283, 299), 'dacbench.benchmarks.CMAESBenchmark', 'CMAESBenchmark', ([], {}), '()\n', (297, 299), False, 'from dacbench.benchmarks import CMAESBenchmark\n'), ((561, 585), 'numpy.array', 'np.array', (['[0, 1.4, 3, 0]'], {}), '([0, 1.4, 3, 0])\n', (569, 585), True, 'import numpy as np\n'), ((595, 647), 'numpy.testing.assert_array_almost_equal', 'np.testing.assert_array_almost_equal', (['flat', 'expected'], {}), '(flat, expected)\n', (631, 647), True, 'import numpy as np\n'), ((480, 501), 'numpy.array', 'np.array', (['[0, 1.4, 3]'], {}), '([0, 1.4, 3])\n', (488, 501), True, 'import numpy as np\n')] |
import argparse
import csv
import random
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.preprocessing import normalize
from Wasserstein_Distance import (WassersteinMatcher, WassersteinRetriever,
load_embeddings, process_corpus)
def main(args):
np.seterr(divide="ignore") # POT has issues with divide by zero errors
source_lang = args.source_lang
target_lang = args.target_lang
source_vectors_filename = args.source_vector
target_vectors_filename = args.target_vector
vectors_source = load_embeddings(source_vectors_filename)
vectors_target = load_embeddings(target_vectors_filename)
source_defs_filename = args.source_defs
target_defs_filename = args.target_defs
batch = args.batch
input_mode = args.mode
input_paradigm = args.paradigm
run_method = list()
run_paradigm = list()
if input_paradigm == "all":
run_paradigm.extend(("matching", "retrieval"))
else:
run_paradigm.append(input_paradigm)
if input_mode == "all":
run_method.extend(["wmd", "snk"])
else:
run_method.append(input_mode)
defs_source = [
line.rstrip("\n") for line in open(source_defs_filename, encoding="utf8")
]
defs_target = [
line.rstrip("\n") for line in open(target_defs_filename, encoding="utf8")
]
clean_src_corpus, clean_src_vectors, src_keys = process_corpus(
set(vectors_source.keys()), defs_source, vectors_source, source_lang
)
clean_target_corpus, clean_target_vectors, target_keys = process_corpus(
set(vectors_target.keys()), defs_target, vectors_target, target_lang
)
take = args.instances
common_keys = set(src_keys).intersection(set(target_keys))
take = min(len(common_keys), take) # you can't sample more than length
experiment_keys = random.sample(common_keys, take)
instances = len(experiment_keys)
clean_src_corpus = list(clean_src_corpus[experiment_keys])
clean_target_corpus = list(clean_target_corpus[experiment_keys])
del vectors_source, vectors_target, defs_source, defs_target
vec = CountVectorizer().fit(clean_src_corpus + clean_target_corpus)
common = [
word
for word in vec.get_feature_names()
if word in clean_src_vectors or word in clean_target_vectors
]
W_common = []
for w in common:
if w in clean_src_vectors:
W_common.append(np.array(clean_src_vectors[w]))
else:
W_common.append(np.array(clean_target_vectors[w]))
if not batch:
print(
f"{source_lang} - {target_lang}\n"
+ f" document sizes: {len(clean_src_corpus)}, {len(clean_target_corpus)}\n"
+ f" vocabulary size: {len(W_common)}"
)
W_common = np.array(W_common)
W_common = normalize(W_common)
vect = TfidfVectorizer(vocabulary=common, dtype=np.double, norm=None)
vect.fit(clean_src_corpus + clean_target_corpus)
X_train_idf = vect.transform(clean_src_corpus)
X_test_idf = vect.transform(clean_target_corpus)
for paradigm in run_paradigm:
WassersteinDriver = None
if paradigm == "matching":
WassersteinDriver = WassersteinMatcher
else:
WassersteinDriver = WassersteinRetriever
for metric in run_method:
if not batch:
print(f"{paradigm} - {metric} on {source_lang} - {target_lang}")
clf = WassersteinDriver(
W_embed=W_common, n_neighbors=5, n_jobs=14, sinkhorn=(metric == "snk")
)
clf.fit(X_train_idf[:instances], np.ones(instances))
p_at_one, percentage = clf.align(
X_test_idf[:instances], n_neighbors=instances
)
if not batch:
print(f"P @ 1: {p_at_one}\n{percentage}% {instances} definitions\n")
else:
fields = [
f"{source_lang}",
f"{target_lang}",
f"{instances}",
f"{p_at_one}",
f"{percentage}",
]
with open(f"{metric}_{paradigm}_results.csv", "a") as f:
writer = csv.writer(f)
writer.writerow(fields)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="align dictionaries using wmd and wasserstein distance"
)
parser.add_argument("source_lang", help="source language short name")
parser.add_argument("target_lang", help="target language short name")
parser.add_argument("source_vector", help="path of the source vector")
parser.add_argument("target_vector", help="path of the target vector")
parser.add_argument("source_defs", help="path of the source definitions")
parser.add_argument("target_defs", help="path of the target definitions")
parser.add_argument(
"-b",
"--batch",
action="store_true",
help="running in batch (store results in csv) or "
+ "running a single instance (output the results)",
)
parser.add_argument(
"mode",
choices=["all", "wmd", "snk"],
default="all",
help="which methods to run",
)
parser.add_argument(
"paradigm",
choices=["all", "retrieval", "matching"],
default="all",
help="which paradigms to align with",
)
parser.add_argument(
"-n",
"--instances",
help="number of instances in each language to retrieve",
default=1000,
type=int,
)
args = parser.parse_args()
main(args)
| [
"random.sample",
"numpy.ones",
"argparse.ArgumentParser",
"sklearn.feature_extraction.text.CountVectorizer",
"csv.writer",
"numpy.array",
"sklearn.feature_extraction.text.TfidfVectorizer",
"Wasserstein_Distance.load_embeddings",
"sklearn.preprocessing.normalize",
"numpy.seterr"
] | [((349, 375), 'numpy.seterr', 'np.seterr', ([], {'divide': '"""ignore"""'}), "(divide='ignore')\n", (358, 375), True, 'import numpy as np\n'), ((611, 651), 'Wasserstein_Distance.load_embeddings', 'load_embeddings', (['source_vectors_filename'], {}), '(source_vectors_filename)\n', (626, 651), False, 'from Wasserstein_Distance import WassersteinMatcher, WassersteinRetriever, load_embeddings, process_corpus\n'), ((673, 713), 'Wasserstein_Distance.load_embeddings', 'load_embeddings', (['target_vectors_filename'], {}), '(target_vectors_filename)\n', (688, 713), False, 'from Wasserstein_Distance import WassersteinMatcher, WassersteinRetriever, load_embeddings, process_corpus\n'), ((1920, 1952), 'random.sample', 'random.sample', (['common_keys', 'take'], {}), '(common_keys, take)\n', (1933, 1952), False, 'import random\n'), ((2867, 2885), 'numpy.array', 'np.array', (['W_common'], {}), '(W_common)\n', (2875, 2885), True, 'import numpy as np\n'), ((2901, 2920), 'sklearn.preprocessing.normalize', 'normalize', (['W_common'], {}), '(W_common)\n', (2910, 2920), False, 'from sklearn.preprocessing import normalize\n'), ((2932, 2994), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'vocabulary': 'common', 'dtype': 'np.double', 'norm': 'None'}), '(vocabulary=common, dtype=np.double, norm=None)\n', (2947, 2994), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\n'), ((4403, 4500), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""align dictionaries using wmd and wasserstein distance"""'}), "(description=\n 'align dictionaries using wmd and wasserstein distance')\n", (4426, 4500), False, 'import argparse\n'), ((2201, 2218), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {}), '()\n', (2216, 2218), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\n'), ((2512, 2542), 'numpy.array', 'np.array', (['clean_src_vectors[w]'], {}), '(clean_src_vectors[w])\n', (2520, 2542), True, 'import numpy as np\n'), ((2586, 2619), 'numpy.array', 'np.array', (['clean_target_vectors[w]'], {}), '(clean_target_vectors[w])\n', (2594, 2619), True, 'import numpy as np\n'), ((3699, 3717), 'numpy.ones', 'np.ones', (['instances'], {}), '(instances)\n', (3706, 3717), True, 'import numpy as np\n'), ((4302, 4315), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (4312, 4315), False, 'import csv\n')] |
import cv2
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers, models
from os import walk
from os.path import join,splitext
characterRecognition = tf.keras.models.load_model('/home/ff5v/git-file/car_template_recognition/CNN/weights/char_binary_model.h5')
def opencvReadPlate(img):
charList=[]
# set black thresh
lower_black=np.array([0,0,0])
upper_black=np.array([180,255,46])
lower_white = np.array([0, 0, 65])
upper_white = np.array([180, 80, 255])
# change to hsv model
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# get mask
mask = cv2.inRange(hsv, lower_black, upper_black)
#draw rect
ctrs, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)#只检测外轮廓
#cv2.boundingRect回傳值: x、y是矩阵左上点的坐标,w、h是矩阵的宽和高
sorted_ctrs = sorted(ctrs, key=lambda ctr: cv2.boundingRect(ctr)[0]) #左到右排序
img_area = img.shape[0]*img.shape[1]
for i, ctr in enumerate(sorted_ctrs):
x, y, w, h = cv2.boundingRect(ctr)
roi_area = w*h
non_max_sup = roi_area/img_area #框中面積/原圖面
if((non_max_sup >= 0.011) and (non_max_sup < 0.1)):
if ((h>1.2*w) and (10.6*w>=h) and (x!=0) and (y!=0)):
char = mask[y:y+h,x:x+w]
char = char.reshape(char.shape[0],char.shape[1],1)
#print(char.shape)
char=np.concatenate([char,char,char],2)
#print(char.shape)
cv2.imshow('char', char)
cv2.waitKey(0)
charList.append(cnnCharRecognition(char))
cv2.rectangle(img,(x,y),( x + w, y + h ),(90,0,255),2)
licensePlate="".join(charList)
return licensePlate
def cnnCharRecognition(img):
dictionary = {0:'0', 1:'1', 2 :'2', 3:'3', 4:'4', 5:'5', 6:'6', 7:'7', 8:'8', 9:'9', 10:'A',
11:'B', 12:'C', 13:'D', 14:'E', 15:'F', 16:'G', 17:'H', 18:'J', 19:'K',
20:'L', 21:'M', 22:'N', 23:'P', 24:'Q', 25:'R', 26:'S', 27:'T', 28:'U',
29:'V', 30:'W', 31:'X', 32:'Y', 33:'Z'}
test_img = cv2.resize(img,(224,224))
test_img = np.asarray(test_img.astype('float32'))
test_img = test_img/255.
test_img = test_img.reshape((1,224,224,3))
new_predictions = characterRecognition.predict(test_img)
char = np.argmax(new_predictions)
return dictionary[char]
img = cv2.imread('/home/ff5v/train_1/default/1.jpg')
print(type(img))
print(img.shape)
licensePlate = opencvReadPlate(img)
print("OpenCV+CNN : " + licensePlate)
cv2.putText(img, licensePlate, (40, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 2)
cv2.imshow('result', img)
cv2.waitKey(0) | [
"cv2.rectangle",
"cv2.inRange",
"cv2.boundingRect",
"numpy.argmax",
"cv2.imshow",
"cv2.putText",
"numpy.array",
"tensorflow.keras.models.load_model",
"cv2.cvtColor",
"numpy.concatenate",
"cv2.findContours",
"cv2.resize",
"cv2.waitKey",
"cv2.imread"
] | [((183, 300), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['"""/home/ff5v/git-file/car_template_recognition/CNN/weights/char_binary_model.h5"""'], {}), "(\n '/home/ff5v/git-file/car_template_recognition/CNN/weights/char_binary_model.h5'\n )\n", (209, 300), True, 'import tensorflow as tf\n'), ((2426, 2472), 'cv2.imread', 'cv2.imread', (['"""/home/ff5v/train_1/default/1.jpg"""'], {}), "('/home/ff5v/train_1/default/1.jpg')\n", (2436, 2472), False, 'import cv2\n'), ((2586, 2677), 'cv2.putText', 'cv2.putText', (['img', 'licensePlate', '(40, 40)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1)', '(255, 255, 0)', '(2)'], {}), '(img, licensePlate, (40, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,\n 255, 0), 2)\n', (2597, 2677), False, 'import cv2\n'), ((2675, 2700), 'cv2.imshow', 'cv2.imshow', (['"""result"""', 'img'], {}), "('result', img)\n", (2685, 2700), False, 'import cv2\n'), ((2702, 2716), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (2713, 2716), False, 'import cv2\n'), ((381, 400), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (389, 400), True, 'import numpy as np\n'), ((416, 440), 'numpy.array', 'np.array', (['[180, 255, 46]'], {}), '([180, 255, 46])\n', (424, 440), True, 'import numpy as np\n'), ((460, 480), 'numpy.array', 'np.array', (['[0, 0, 65]'], {}), '([0, 0, 65])\n', (468, 480), True, 'import numpy as np\n'), ((500, 524), 'numpy.array', 'np.array', (['[180, 80, 255]'], {}), '([180, 80, 255])\n', (508, 524), True, 'import numpy as np\n'), ((565, 601), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2HSV'], {}), '(img, cv2.COLOR_BGR2HSV)\n', (577, 601), False, 'import cv2\n'), ((632, 674), 'cv2.inRange', 'cv2.inRange', (['hsv', 'lower_black', 'upper_black'], {}), '(hsv, lower_black, upper_black)\n', (643, 674), False, 'import cv2\n'), ((708, 774), 'cv2.findContours', 'cv2.findContours', (['mask', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n', (724, 774), False, 'import cv2\n'), ((2123, 2150), 'cv2.resize', 'cv2.resize', (['img', '(224, 224)'], {}), '(img, (224, 224))\n', (2133, 2150), False, 'import cv2\n'), ((2358, 2384), 'numpy.argmax', 'np.argmax', (['new_predictions'], {}), '(new_predictions)\n', (2367, 2384), True, 'import numpy as np\n'), ((1026, 1047), 'cv2.boundingRect', 'cv2.boundingRect', (['ctr'], {}), '(ctr)\n', (1042, 1047), False, 'import cv2\n'), ((1429, 1466), 'numpy.concatenate', 'np.concatenate', (['[char, char, char]', '(2)'], {}), '([char, char, char], 2)\n', (1443, 1466), True, 'import numpy as np\n'), ((1517, 1541), 'cv2.imshow', 'cv2.imshow', (['"""char"""', 'char'], {}), "('char', char)\n", (1527, 1541), False, 'import cv2\n'), ((1559, 1573), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1570, 1573), False, 'import cv2\n'), ((1650, 1709), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x, y)', '(x + w, y + h)', '(90, 0, 255)', '(2)'], {}), '(img, (x, y), (x + w, y + h), (90, 0, 255), 2)\n', (1663, 1709), False, 'import cv2\n'), ((884, 905), 'cv2.boundingRect', 'cv2.boundingRect', (['ctr'], {}), '(ctr)\n', (900, 905), False, 'import cv2\n')] |
from __future__ import print_function
import matplotlib
matplotlib.use('Agg')
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.insert(0,'../powderday/agn_models/')
from hopkins import agn_spectrum as hopkins_agn
from astropy import units as u
from astropy import constants as const
import h5py
from hyperion.model import ModelOutput
BH_modelfile = "/home/desika.narayanan/pd_git/powderday/agn_models/clumpy_models_201410_tvavg.hdf5"
nenkova_params = [5,30,0,1.5,30,40] #Nenkova+ (2008) model parameters
class Nenkova2008:
def __init__(self, N0=5, Y=30, i=0, q=1.5, sig=30, tv=40):
self.N0 = N0
self.Y = Y
self.i = i
self.q = q
self.sig = sig
self.tv = tv
try:
self.h = h5py.File(BH_modelfile, 'r')
except IOError:
raise IOError('Unable to find Nenkova BH model file. '
'Check the path in parameters master, or '
'download the file here: https://www.clump'
'y.org/downloads/clumpy_models_201410_tvav'
'g.hdf5')
self.check_params()
def nenkova_agn_spectrum(log_L_bol):
h = h5py.File(BH_modelfile, 'r')
nu_vec = 3e14 / h['wave'][:]
nu_vec = np.log10(nu_vec)
nu_vec = np.concatenate((nu_vec, [-1, -2, -3, -4]))
l_band_vec_torus = h['flux_tor'][:][1][0]
agn_nu, agn_l_band_vec = hopkins_agn(log_L_bol)
l_band_vec = np.log10(l_band_vec_torus) + np.interp(
nu_vec[:-4], agn_nu[:-4], agn_l_band_vec[:-4])
l_band_vec = np.concatenate((l_band_vec, [0, 0, 0, 0]))
return nu_vec, l_band_vec
#nenkova
fig = plt.figure()
ax = fig.add_subplot(111)
log_lum = np.linspace(9,13,100)*u.Lsun
norm = matplotlib.colors.Normalize(
vmin = np.min(log_lum.value),
vmax = np.max(log_lum.value))
c_m = matplotlib.cm.viridis_r
s_m = matplotlib.cm.ScalarMappable(cmap = c_m,norm=norm)
s_m.set_array([])
for lum in log_lum:
print('lum = %e'%lum.value)
nu,bh_fnu = nenkova_agn_spectrum(lum.value)
#regularlizing units
bh_fnu = bh_fnu[0:-4]
bh_fnu = 10.**bh_fnu * u.erg/u.s
bh_fnu = bh_fnu.to(u.Lsun)
nu = nu[0:-4]
nu = 10.**nu
nu *= u.Hz
lam = (const.c/nu).to(u.micron)
ax.loglog(lam,bh_fnu,color=s_m.to_rgba(lum.value))
ax.set_xlabel(r'Wavelength ($\mu$m)')
ax.set_ylabel(r'F$_\nu$')
cb = fig.colorbar(s_m,orientation='vertical')
cb.set_label(r'Black Hole L$_\mathrm{bol}$ (L$_\odot$)')
cb.ax.tick_params(labelsize=8)
plt.savefig('nenkova.png',dpi=300)
#hopkins
fig = plt.figure()
ax = fig.add_subplot(111)
log_lum = np.linspace(9,13,100)*u.Lsun
norm = matplotlib.colors.Normalize(
vmin = np.min(log_lum.value),
vmax = np.max(log_lum.value))
c_m = matplotlib.cm.viridis_r
s_m = matplotlib.cm.ScalarMappable(cmap = c_m,norm=norm)
s_m.set_array([])
for lum in log_lum:
print('lum = %e'%lum.value)
nu,bh_fnu = hopkins_agn(lum.value)
#regularlizing units
bh_fnu = bh_fnu[0:-4]
bh_fnu = 10.**bh_fnu * u.erg/u.s
bh_fnu = bh_fnu.to(u.Lsun)
nu = nu[0:-4]
nu = 10.**nu
nu *= u.Hz
lam = (const.c/nu).to(u.micron)
ax.loglog(lam,bh_fnu,color=s_m.to_rgba(lum.value))
#load in the bh sed from a powderday run
'''
data = np.load('/ufrc/narayanan/desika.narayanan/pd_runs/ena/bh_sed.npz')
nholes = data['luminosity'].shape[0]
for i in range(nholes):
pd_nu = data['nu']*u.Hz
pd_lam = (const.c/pd_nu).to(u.micron)
pd_fnu = (data['fnu'][i][:]*u.erg/u.s).to(u.Lsun).value
if data['luminosity'][i] > 0:
ax.plot(pd_lam.value,pd_fnu)
'''
'''
#now plot the powderday SED
run = '/ufrc/narayanan/desika.narayanan/pd_runs/ena/example.094.rtout.bhon.sed'
m = ModelOutput(run)
wav,flux = m.get_sed(inclination='all',aperture=-1)
fullrun_wav = np.asarray(wav)*u.micron
fullrun_flux = np.asarray(flux)*u.erg/u.s
fullrun_nu = (const.c/fullrun_wav).to(u.Hz)
fullrun_fnu = fullrun_flux/fullrun_nu
ax.plot(fullrun_wav.value,fullrun_fnu[0,:].value/1.e20)
'''
ax.set_xlabel(r'Wavelength ($\mu$m)')
ax.set_ylabel(r'F$_\nu$')
cb = fig.colorbar(s_m,orientation='vertical')
cb.set_label(r'Black Hole L$_\mathrm{bol}$ (L$_\odot$)')
cb.ax.tick_params(labelsize=8)
fig.savefig('hopkins.png',dpi=300)
| [
"sys.path.insert",
"matplotlib.pyplot.savefig",
"numpy.log10",
"matplotlib.use",
"numpy.min",
"h5py.File",
"numpy.max",
"matplotlib.pyplot.figure",
"matplotlib.cm.ScalarMappable",
"numpy.linspace",
"numpy.concatenate",
"numpy.interp",
"hopkins.agn_spectrum"
] | [((56, 77), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (70, 77), False, 'import matplotlib\n'), ((141, 187), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../powderday/agn_models/"""'], {}), "(0, '../powderday/agn_models/')\n", (156, 187), False, 'import sys\n'), ((1679, 1691), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1689, 1691), True, 'import matplotlib.pyplot as plt\n'), ((1900, 1949), 'matplotlib.cm.ScalarMappable', 'matplotlib.cm.ScalarMappable', ([], {'cmap': 'c_m', 'norm': 'norm'}), '(cmap=c_m, norm=norm)\n', (1928, 1949), False, 'import matplotlib\n'), ((2533, 2568), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""nenkova.png"""'], {'dpi': '(300)'}), "('nenkova.png', dpi=300)\n", (2544, 2568), True, 'import matplotlib.pyplot as plt\n'), ((2587, 2599), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2597, 2599), True, 'import matplotlib.pyplot as plt\n'), ((2808, 2857), 'matplotlib.cm.ScalarMappable', 'matplotlib.cm.ScalarMappable', ([], {'cmap': 'c_m', 'norm': 'norm'}), '(cmap=c_m, norm=norm)\n', (2836, 2857), False, 'import matplotlib\n'), ((1206, 1234), 'h5py.File', 'h5py.File', (['BH_modelfile', '"""r"""'], {}), "(BH_modelfile, 'r')\n", (1215, 1234), False, 'import h5py\n'), ((1281, 1297), 'numpy.log10', 'np.log10', (['nu_vec'], {}), '(nu_vec)\n', (1289, 1297), True, 'import numpy as np\n'), ((1311, 1353), 'numpy.concatenate', 'np.concatenate', (['(nu_vec, [-1, -2, -3, -4])'], {}), '((nu_vec, [-1, -2, -3, -4]))\n', (1325, 1353), True, 'import numpy as np\n'), ((1435, 1457), 'hopkins.agn_spectrum', 'hopkins_agn', (['log_L_bol'], {}), '(log_L_bol)\n', (1446, 1457), True, 'from hopkins import agn_spectrum as hopkins_agn\n'), ((1587, 1629), 'numpy.concatenate', 'np.concatenate', (['(l_band_vec, [0, 0, 0, 0])'], {}), '((l_band_vec, [0, 0, 0, 0]))\n', (1601, 1629), True, 'import numpy as np\n'), ((1728, 1751), 'numpy.linspace', 'np.linspace', (['(9)', '(13)', '(100)'], {}), '(9, 13, 100)\n', (1739, 1751), True, 'import numpy as np\n'), ((2637, 2660), 'numpy.linspace', 'np.linspace', (['(9)', '(13)', '(100)'], {}), '(9, 13, 100)\n', (2648, 2660), True, 'import numpy as np\n'), ((2947, 2969), 'hopkins.agn_spectrum', 'hopkins_agn', (['lum.value'], {}), '(lum.value)\n', (2958, 2969), True, 'from hopkins import agn_spectrum as hopkins_agn\n'), ((1475, 1501), 'numpy.log10', 'np.log10', (['l_band_vec_torus'], {}), '(l_band_vec_torus)\n', (1483, 1501), True, 'import numpy as np\n'), ((1504, 1560), 'numpy.interp', 'np.interp', (['nu_vec[:-4]', 'agn_nu[:-4]', 'agn_l_band_vec[:-4]'], {}), '(nu_vec[:-4], agn_nu[:-4], agn_l_band_vec[:-4])\n', (1513, 1560), True, 'import numpy as np\n'), ((1806, 1827), 'numpy.min', 'np.min', (['log_lum.value'], {}), '(log_lum.value)\n', (1812, 1827), True, 'import numpy as np\n'), ((1840, 1861), 'numpy.max', 'np.max', (['log_lum.value'], {}), '(log_lum.value)\n', (1846, 1861), True, 'import numpy as np\n'), ((2714, 2735), 'numpy.min', 'np.min', (['log_lum.value'], {}), '(log_lum.value)\n', (2720, 2735), True, 'import numpy as np\n'), ((2748, 2769), 'numpy.max', 'np.max', (['log_lum.value'], {}), '(log_lum.value)\n', (2754, 2769), True, 'import numpy as np\n'), ((767, 795), 'h5py.File', 'h5py.File', (['BH_modelfile', '"""r"""'], {}), "(BH_modelfile, 'r')\n", (776, 795), False, 'import h5py\n')] |
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Tuple
import numpy as np
import nnabla as nn
import nnabla.functions as F
def classification_loss_with_orthogonal_loss(
pred_logit: nn.Variable, label: nn.Variable, transformation_mat: nn.Variable, reg_weight=0.001
) -> Tuple[nn.Variable, Dict[str, nn.Variable]]:
"""classification loss with orthogonal loss
Args:
pred_logit (nn.Variable): pred logit, shape(batch, num_classes)
label (nn.Variable): label, shape(batch, 1)
transformation_mat (nn.Variable): label, shape(batch, K, K)
Returns:
Tuple[nn.Variable, Dict[str, nn.Variable]]: loss and internal loss
"""
cross_entropy_loss = F.softmax_cross_entropy(pred_logit, label)
classify_loss = F.mean(cross_entropy_loss)
# Enforce the transformation as orthogonal matrix
mat_squared = F.batch_matmul(
transformation_mat, F.transpose(transformation_mat, (0, 2, 1)))
batch_size, k, _ = transformation_mat.shape
target_array = np.tile(np.eye(k, dtype=np.float32), (batch_size, 1, 1))
target = nn.Variable.from_numpy_array(target_array)
mat_diff = mat_squared - target
# Frobenius norm
mat_diff = F.reshape(mat_diff, (batch_size, -1))
mat_loss = F.mean(F.norm(mat_diff, axis=1))
return classify_loss + mat_loss * reg_weight, {
"classify_loss": classify_loss,
"mat_loss": mat_loss,
"mat_diff": mat_diff,
}
| [
"nnabla.functions.transpose",
"nnabla.functions.softmax_cross_entropy",
"numpy.eye",
"nnabla.Variable.from_numpy_array",
"nnabla.functions.norm",
"nnabla.functions.reshape",
"nnabla.functions.mean"
] | [((1257, 1299), 'nnabla.functions.softmax_cross_entropy', 'F.softmax_cross_entropy', (['pred_logit', 'label'], {}), '(pred_logit, label)\n', (1280, 1299), True, 'import nnabla.functions as F\n'), ((1320, 1346), 'nnabla.functions.mean', 'F.mean', (['cross_entropy_loss'], {}), '(cross_entropy_loss)\n', (1326, 1346), True, 'import nnabla.functions as F\n'), ((1645, 1687), 'nnabla.Variable.from_numpy_array', 'nn.Variable.from_numpy_array', (['target_array'], {}), '(target_array)\n', (1673, 1687), True, 'import nnabla as nn\n'), ((1761, 1798), 'nnabla.functions.reshape', 'F.reshape', (['mat_diff', '(batch_size, -1)'], {}), '(mat_diff, (batch_size, -1))\n', (1770, 1798), True, 'import nnabla.functions as F\n'), ((1464, 1506), 'nnabla.functions.transpose', 'F.transpose', (['transformation_mat', '(0, 2, 1)'], {}), '(transformation_mat, (0, 2, 1))\n', (1475, 1506), True, 'import nnabla.functions as F\n'), ((1583, 1610), 'numpy.eye', 'np.eye', (['k'], {'dtype': 'np.float32'}), '(k, dtype=np.float32)\n', (1589, 1610), True, 'import numpy as np\n'), ((1821, 1845), 'nnabla.functions.norm', 'F.norm', (['mat_diff'], {'axis': '(1)'}), '(mat_diff, axis=1)\n', (1827, 1845), True, 'import nnabla.functions as F\n')] |
import numpy as np
from PuzzleLib import Config
from PuzzleLib.Backend import gpuarray
from PuzzleLib.Modules.Module import ModuleError
from PuzzleLib.Modules.DeconvND import DeconvND
class Deconv1D(DeconvND):
def __init__(self, inmaps, outmaps, size, stride=1, pad=0, dilation=1, postpad=0, wscale=1.0, useBias=True,
name=None, initscheme=None, empty=False, groups=1):
super().__init__(
2, inmaps, outmaps, (1, size), (1, stride), (0, pad), (1, dilation), (0, postpad), wscale, useBias,
name, initscheme, empty, groups
)
self.registerBlueprint(locals())
def optimizeForShape(self, shape, memlimit=None):
shape = shape[:2] + (1, ) + shape[2:]
super().optimizeForShape(shape, memlimit)
def updateData(self, data):
data = data.reshape(*data.shape[:2], 1, *data.shape[2:])
super().updateData(data)
self.data = self.data.reshape(*self.data.shape[:2], *self.data.shape[3:])
def updateGrad(self, grad):
grad = grad.reshape(*grad.shape[:2], 1, *grad.shape[2:])
data = self.inData
self.inData = data.reshape(*data.shape[:2], 1, *data.shape[2:])
super().updateGrad(grad)
self.inData = data
self.grad = self.grad.reshape(*self.grad.shape[:2], *self.grad.shape[3:])
def accGradParams(self, grad, scale=1.0, momentum=0.0):
grad = grad.reshape(*grad.shape[:2], 1, *grad.shape[2:])
data = self.inData
self.inData = data.reshape(*data.shape[:2], 1, *data.shape[2:])
super().accGradParams(grad, scale, momentum)
self.inData = data
def checkDataShape(self, shape):
if len(shape) != 3:
raise ModuleError("Data must be 3d tensor")
_, inmaps, _ = shape
if inmaps != self.W.shape[0]:
raise ModuleError("Data has %d maps (expected: %d)" % (inmaps, self.W.shape[0]))
def dataShapeFrom(self, shape):
batchsize, inmaps, insize = shape
_, outmaps, _, fsize = self.W.shape
_, pad = self.pad
_, postpad = self.postpad
_, dilation = self.dilation
_, stride = self.stride
outmaps *= self.groups
outsize = (insize - 1) * stride + dilation * (fsize - 1) - 2 * pad + 1 + postpad
return batchsize, outmaps, outsize
def checkGradShape(self, shape):
if len(shape) != 3:
raise ModuleError("Grad must be 3d tensor")
_, outmaps, size = shape
if outmaps != self.W.shape[1] * self.groups:
raise ModuleError("Grad has %d maps (expected: %d)" % (outmaps, self.W.shape[1] * self.groups))
if size + 2 * self.pad[1] < self.dilation[1] * (self.W.shape[3] - 1) + 1:
raise ModuleError(
"Grad maps height is too small (got %d, expected at least %d)" %
(size + 2 * self.pad[1], self.dilation[1] * (self.W.shape[3] - 1) + 1)
)
def gradShapeFrom(self, shape):
batchsize, outmaps, outsize = shape
inmaps, _, _, fsize = self.W.shape
_, pad = self.pad
_, dilation = self.dilation
_, stride = self.stride
insize = (outsize + 2 * pad - dilation * (fsize - 1) - 1) // stride + 1
return batchsize, inmaps, insize
def unittest():
if Config.backend in {Config.Backend.cuda, Config.Backend.hip}:
multiMapsWithPadsTest()
trainTest()
def multiMapsWithPadsTest():
batchsize, inmaps, size = 5, 4, 2
outmaps, fsize, stride, pad, dilation = 4, 2, 2, 1, 2
hostData = np.random.randn(batchsize, inmaps, size).astype(np.float32)
data = gpuarray.to_gpu(hostData)
deconv = Deconv1D(inmaps, outmaps, size=size, stride=stride, pad=pad, dilation=dilation, initscheme="gaussian")
deconv(data)
hostW, hostBias = deconv.W.get(), deconv.b.get()
hostOutData = np.zeros(deconv.data.shape[:2]+(deconv.data.shape[2]+2*pad, ), dtype=np.float32)
for c in range(outmaps):
hostOutData[:, c, :] = hostBias[0, c, 0, 0]
for b in range(batchsize):
for oc in range(outmaps):
for ic in range(inmaps):
for x in range(size):
for dx in range(fsize):
hostOutData[b, oc, x * stride + dx * dilation] += hostW[ic, oc, 0, dx] * hostData[b, ic, x]
assert np.allclose(hostOutData[:, :, pad:-pad], deconv.data.get())
hostGrad = np.random.randn(*deconv.data.shape).astype(np.float32)
grad = gpuarray.to_gpu(hostGrad)
deconv.backward(grad)
hostExtGrad = np.zeros(grad.shape[:2] + (grad.shape[2] + 2 * pad, ), dtype=np.float32)
hostExtGrad[:, :, pad:-pad] = hostGrad
hostGrad = hostExtGrad
hostInGrad = np.zeros(hostData.shape, dtype=np.float32)
for b in range(batchsize):
for ic in range(inmaps):
for oc in range(outmaps):
for x in range(size):
for dx in range(fsize):
hostInGrad[b, ic, x] += hostGrad[b, oc, x * stride + dx * dilation] * hostW[ic, oc, 0, dx]
assert np.allclose(hostInGrad, deconv.grad.get())
hostWGrad = np.zeros(deconv.getVar("W").grad.shape, dtype=np.float32)
for b in range(batchsize):
for ic in range(inmaps):
for oc in range(outmaps):
for dx in range(fsize):
for x in range(size):
hostWGrad[ic, oc, 0, dx] += hostGrad[b, oc, x * stride + dx * dilation] * hostData[b, ic, x]
assert np.allclose(hostWGrad, deconv.getVar("W").grad.get())
hostBGrad = np.empty(hostBias.shape, dtype=np.float32)
for oc in range(outmaps):
hostBGrad[0, oc, 0, 0] = np.sum(hostGrad[:, oc, :])
assert np.allclose(hostBGrad, deconv.getVar("b").grad.get())
def trainTest():
batchsize, inmaps, size = 5, 5, 2
outmaps = 1
fsize = 3
data = gpuarray.to_gpu(np.random.normal(0.0, 1.0, (batchsize, inmaps, size)).astype(np.float32))
deconv = Deconv1D(inmaps, outmaps, fsize)
from PuzzleLib.Cost.MSE import MSE
mse = MSE()
target = gpuarray.to_gpu(np.random.normal(0.0, 1.0, (batchsize, outmaps, 4)).astype(np.float32))
for i in range(100):
learnRate = 1e-2
deconv(data)
error, grad = mse(deconv.data, target)
deconv.backward(grad)
deconv.updateParams(learnRate)
if (i + 1) % 5 == 0:
print("Iteration #%d error: %s" % (i + 1, error))
if __name__ == "__main__":
unittest()
| [
"PuzzleLib.Backend.gpuarray.to_gpu",
"numpy.random.normal",
"PuzzleLib.Cost.MSE.MSE",
"PuzzleLib.Modules.Module.ModuleError",
"numpy.sum",
"numpy.zeros",
"numpy.empty",
"numpy.random.randn"
] | [((3241, 3266), 'PuzzleLib.Backend.gpuarray.to_gpu', 'gpuarray.to_gpu', (['hostData'], {}), '(hostData)\n', (3256, 3266), False, 'from PuzzleLib.Backend import gpuarray\n'), ((3461, 3551), 'numpy.zeros', 'np.zeros', (['(deconv.data.shape[:2] + (deconv.data.shape[2] + 2 * pad,))'], {'dtype': 'np.float32'}), '(deconv.data.shape[:2] + (deconv.data.shape[2] + 2 * pad,), dtype=\n np.float32)\n', (3469, 3551), True, 'import numpy as np\n'), ((3998, 4023), 'PuzzleLib.Backend.gpuarray.to_gpu', 'gpuarray.to_gpu', (['hostGrad'], {}), '(hostGrad)\n', (4013, 4023), False, 'from PuzzleLib.Backend import gpuarray\n'), ((4064, 4135), 'numpy.zeros', 'np.zeros', (['(grad.shape[:2] + (grad.shape[2] + 2 * pad,))'], {'dtype': 'np.float32'}), '(grad.shape[:2] + (grad.shape[2] + 2 * pad,), dtype=np.float32)\n', (4072, 4135), True, 'import numpy as np\n'), ((4217, 4259), 'numpy.zeros', 'np.zeros', (['hostData.shape'], {'dtype': 'np.float32'}), '(hostData.shape, dtype=np.float32)\n', (4225, 4259), True, 'import numpy as np\n'), ((4936, 4978), 'numpy.empty', 'np.empty', (['hostBias.shape'], {'dtype': 'np.float32'}), '(hostBias.shape, dtype=np.float32)\n', (4944, 4978), True, 'import numpy as np\n'), ((5387, 5392), 'PuzzleLib.Cost.MSE.MSE', 'MSE', ([], {}), '()\n', (5390, 5392), False, 'from PuzzleLib.Cost.MSE import MSE\n'), ((5033, 5059), 'numpy.sum', 'np.sum', (['hostGrad[:, oc, :]'], {}), '(hostGrad[:, oc, :])\n', (5039, 5059), True, 'import numpy as np\n'), ((1551, 1588), 'PuzzleLib.Modules.Module.ModuleError', 'ModuleError', (['"""Data must be 3d tensor"""'], {}), "('Data must be 3d tensor')\n", (1562, 1588), False, 'from PuzzleLib.Modules.Module import ModuleError\n'), ((1654, 1728), 'PuzzleLib.Modules.Module.ModuleError', 'ModuleError', (["('Data has %d maps (expected: %d)' % (inmaps, self.W.shape[0]))"], {}), "('Data has %d maps (expected: %d)' % (inmaps, self.W.shape[0]))\n", (1665, 1728), False, 'from PuzzleLib.Modules.Module import ModuleError\n'), ((2158, 2195), 'PuzzleLib.Modules.Module.ModuleError', 'ModuleError', (['"""Grad must be 3d tensor"""'], {}), "('Grad must be 3d tensor')\n", (2169, 2195), False, 'from PuzzleLib.Modules.Module import ModuleError\n'), ((2281, 2374), 'PuzzleLib.Modules.Module.ModuleError', 'ModuleError', (["('Grad has %d maps (expected: %d)' % (outmaps, self.W.shape[1] * self.groups))"], {}), "('Grad has %d maps (expected: %d)' % (outmaps, self.W.shape[1] *\n self.groups))\n", (2292, 2374), False, 'from PuzzleLib.Modules.Module import ModuleError\n'), ((2457, 2609), 'PuzzleLib.Modules.Module.ModuleError', 'ModuleError', (["('Grad maps height is too small (got %d, expected at least %d)' % (size + 2 *\n self.pad[1], self.dilation[1] * (self.W.shape[3] - 1) + 1))"], {}), "('Grad maps height is too small (got %d, expected at least %d)' %\n (size + 2 * self.pad[1], self.dilation[1] * (self.W.shape[3] - 1) + 1))\n", (2468, 2609), False, 'from PuzzleLib.Modules.Module import ModuleError\n'), ((3173, 3213), 'numpy.random.randn', 'np.random.randn', (['batchsize', 'inmaps', 'size'], {}), '(batchsize, inmaps, size)\n', (3188, 3213), True, 'import numpy as np\n'), ((3935, 3970), 'numpy.random.randn', 'np.random.randn', (['*deconv.data.shape'], {}), '(*deconv.data.shape)\n', (3950, 3970), True, 'import numpy as np\n'), ((5226, 5279), 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(1.0)', '(batchsize, inmaps, size)'], {}), '(0.0, 1.0, (batchsize, inmaps, size))\n', (5242, 5279), True, 'import numpy as np\n'), ((5420, 5471), 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(1.0)', '(batchsize, outmaps, 4)'], {}), '(0.0, 1.0, (batchsize, outmaps, 4))\n', (5436, 5471), True, 'import numpy as np\n')] |
import os
import copy
import pickle
import numpy as np
import matplotlib.animation as animation
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import torch
from tqdm import tqdm
from behavenet import get_user_dir
from behavenet import make_dir_if_not_exists
from behavenet.data.utils import build_data_generator
from behavenet.data.utils import load_labels_like_latents
from behavenet.fitting.eval import get_reconstruction
from behavenet.fitting.utils import experiment_exists
from behavenet.fitting.utils import get_best_model_and_data
from behavenet.fitting.utils import get_expt_dir
from behavenet.fitting.utils import get_lab_example
from behavenet.fitting.utils import get_session_dir
from behavenet.plotting import concat
from behavenet.plotting import get_crop
from behavenet.plotting import load_latents
from behavenet.plotting import load_metrics_csv_as_df
from behavenet.plotting import save_movie
# to ignore imports for sphix-autoapidoc
__all__ = [
'get_input_range', 'compute_range', 'get_labels_2d_for_trial', 'get_model_input',
'interpolate_2d', 'interpolate_1d', 'interpolate_point_path', 'plot_2d_frame_array',
'plot_1d_frame_array', 'make_interpolated', 'make_interpolated_multipanel',
'plot_psvae_training_curves', 'plot_hyperparameter_search_results',
'plot_label_reconstructions', 'plot_latent_traversals', 'make_latent_traversal_movie']
# ----------------------------------------
# low-level util functions
# ----------------------------------------
def get_input_range(
input_type, hparams, sess_ids=None, sess_idx=0, model=None, data_gen=None, version=0,
min_p=5, max_p=95, apply_label_masks=False):
"""Helper function to compute input range for a variety of data types.
Parameters
----------
input_type : :obj:`str`
'latents' | 'labels' | 'labels_sc'
hparams : :obj:`dict`
needs to contain enough information to specify an autoencoder
sess_ids : :obj:`list`, optional
each entry is a session dict with keys 'lab', 'expt', 'animal', 'session'; for loading
labels and labels_sc
sess_idx : :obj:`int`, optional
session index into data generator
model : :obj:`AE` object, optional
for generating latents if latent file does not exist
data_gen : :obj:`ConcatSessionGenerator` object, optional
for generating latents if latent file does not exist
version : :obj:`int`, optional
specify AE version for loading latents
min_p : :obj:`int`, optional
defines lower end of range; percentile in [0, 100]
max_p : :obj:`int`, optional
defines upper end of range; percentile in [0, 100]
apply_label_masks : :obj:`bool`, optional
`True` to set masked values to NaN in labels
Returns
-------
:obj:`dict`
keys are 'min' and 'max'
"""
if input_type == 'latents':
# load latents
latent_file = str('%s_%s_%s_%s_latents.pkl' % (
hparams['lab'], hparams['expt'], hparams['animal'], hparams['session']))
filename = os.path.join(
hparams['expt_dir'], 'version_%i' % version, latent_file)
if not os.path.exists(filename):
from behavenet.fitting.eval import export_latents
print('latents file not found at %s' % filename)
print('exporting latents...', end='')
filenames = export_latents(data_gen, model)
filename = filenames[0]
print('done')
latents = pickle.load(open(filename, 'rb'))
inputs = latents['latents']
elif input_type == 'labels':
labels = load_labels_like_latents(hparams, sess_ids, sess_idx=sess_idx)
inputs = labels['latents']
elif input_type == 'labels_sc':
hparams2 = copy.deepcopy(hparams)
hparams2['conditional_encoder'] = True # to actually return labels
labels_sc = load_labels_like_latents(
hparams2, sess_ids, sess_idx=sess_idx, data_key='labels_sc')
inputs = labels_sc['latents']
else:
raise NotImplementedError
if apply_label_masks:
masks = load_labels_like_latents(
hparams, sess_ids, sess_idx=sess_idx, data_key='labels_masks')
for i, m in zip(inputs, masks):
i[m == 0] = np.nan
input_range = compute_range(inputs, min_p=min_p, max_p=max_p)
return input_range
def compute_range(values_list, min_p=5, max_p=95):
"""Compute min and max of a list of numbers using percentiles.
Parameters
----------
values_list : :obj:`list`
list of np.ndarrays; min/max calculated over axis 0 once all lists are vertically stacked
min_p : :obj:`int`
defines lower end of range; percentile in [0, 100]
max_p : :obj:`int`
defines upper end of range; percentile in [0, 100]
Returns
-------
:obj:`dict`
lower ['min'] and upper ['max'] range of input
"""
if np.any([len(arr) == 0 for arr in values_list]):
values_ = []
for arr in values_list:
if len(arr) != 0:
values_.append(arr)
values = np.vstack(values_)
else:
values = np.vstack(values_list)
ranges = {
'min': np.nanpercentile(values, min_p, axis=0),
'max': np.nanpercentile(values, max_p, axis=0)}
return ranges
def get_labels_2d_for_trial(
hparams, sess_ids, trial=None, trial_idx=None, sess_idx=0, dtype='test', data_gen=None):
"""Return scaled labels (in pixel space) for a given trial.
Parameters
----------
hparams : :obj:`dict`
needs to contain enough information to build a data generator
sess_ids : :obj:`list` of :obj:`dict`
each entry is a session dict with keys 'lab', 'expt', 'animal', 'session'
trial : :obj:`int`, optional
trial index into all possible trials (train, val, test); one of `trial` or `trial_idx`
must be specified; `trial` takes precedence over `trial_idx`
trial_idx : :obj:`int`, optional
trial index into trial type defined by `dtype`; one of `trial` or `trial_idx` must be
specified; `trial` takes precedence over `trial_idx`
sess_idx : :obj:`int`, optional
session index into data generator
dtype : :obj:`str`, optional
data type that is indexed by `trial_idx`; 'train' | 'val' | 'test'
data_gen : :obj:`ConcatSessionGenerator` object, optional
for generating labels
Returns
-------
:obj:`tuple`
- labels_2d_pt (:obj:`torch.Tensor`) of shape (batch, n_labels, y_pix, x_pix)
- labels_2d_np (:obj:`np.ndarray`) of shape (batch, n_labels, y_pix, x_pix)
"""
if (trial_idx is not None) and (trial is not None):
raise ValueError('only one of "trial" or "trial_idx" can be specified')
if data_gen is None:
hparams_new = copy.deepcopy(hparams)
hparams_new['conditional_encoder'] = True # ensure scaled labels are returned
hparams_new['device'] = 'cpu'
hparams_new['as_numpy'] = False
hparams_new['batch_load'] = True
data_gen = build_data_generator(hparams_new, sess_ids, export_csv=False)
# get trial
if trial is None:
trial = data_gen.datasets[sess_idx].batch_idxs[dtype][trial_idx]
batch = data_gen.datasets[sess_idx][trial]
labels_2d_pt = batch['labels_sc']
labels_2d_np = labels_2d_pt.cpu().detach().numpy()
return labels_2d_pt, labels_2d_np
def get_model_input(
data_generator, hparams, model, trial=None, trial_idx=None, sess_idx=0, max_frames=200,
compute_latents=False, compute_2d_labels=True, compute_scaled_labels=False, dtype='test'):
"""Return images, latents, and labels for a given trial.
Parameters
----------
data_generator: :obj:`ConcatSessionGenerator`
for generating model input
hparams : :obj:`dict`
needs to contain enough information to specify both a model and the associated data
model : :obj:`behavenet.models` object
model type
trial : :obj:`int`, optional
trial index into all possible trials (train, val, test); one of `trial` or `trial_idx`
must be specified; `trial` takes precedence over `trial_idx`
trial_idx : :obj:`int`, optional
trial index into trial type defined by `dtype`; one of `trial` or `trial_idx` must be
specified; `trial` takes precedence over `trial_idx`
sess_idx : :obj:`int`, optional
session index into data generator
max_frames : :obj:`int`, optional
maximum size of batch to return
compute_latents : :obj:`bool`, optional
`True` to return latents
compute_2d_labels : :obj:`bool`, optional
`True` to return 2d label tensors of shape (batch, n_labels, y_pix, x_pix)
compute_scaled_labels : :obj:`bool`, optional
ignored if `compute_2d_labels` is `True`; if `compute_scaled_labels=True`, return scaled
labels as shape (batch, n_labels) rather than 2d labels as shape
(batch, n_labels, y_pix, x_pix).
dtype : :obj:`str`, optional
data type that is indexed by `trial_idx`; 'train' | 'val' | 'test'
Returns
-------
:obj:`tuple`
- ims_pt (:obj:`torch.Tensor`) of shape (max_frames, n_channels, y_pix, x_pix)
- ims_np (:obj:`np.ndarray`) of shape (max_frames, n_channels, y_pix, x_pix)
- latents_np (:obj:`np.ndarray`) of shape (max_frames, n_latents)
- labels_pt (:obj:`torch.Tensor`) of shape (max_frames, n_labels)
- labels_2d_pt (:obj:`torch.Tensor`) of shape (max_frames, n_labels, y_pix, x_pix)
- labels_2d_np (:obj:`np.ndarray`) of shape (max_frames, n_labels, y_pix, x_pix)
"""
if (trial_idx is not None) and (trial is not None):
raise ValueError('only one of "trial" or "trial_idx" can be specified')
if (trial_idx is None) and (trial is None):
raise ValueError('one of "trial" or "trial_idx" must be specified')
# get trial
if trial is None:
trial = data_generator.datasets[sess_idx].batch_idxs[dtype][trial_idx]
batch = data_generator.datasets[sess_idx][trial]
ims_pt = batch['images'][:max_frames]
ims_np = ims_pt.cpu().detach().numpy()
# continuous labels
if hparams['model_class'] == 'ae' \
or hparams['model_class'] == 'vae' \
or hparams['model_class'] == 'beta-tcvae':
labels_pt = None
labels_np = None
elif hparams['model_class'] == 'cond-ae' \
or hparams['model_class'] == 'cond-vae' \
or hparams['model_class'] == 'cond-ae-msp' \
or hparams['model_class'] == 'ps-vae' \
or hparams['model_class'] == 'labels-images':
labels_pt = batch['labels'][:max_frames]
labels_np = labels_pt.cpu().detach().numpy()
else:
raise NotImplementedError
# one hot labels
if hparams['conditional_encoder']:
labels_2d_pt = batch['labels_sc'][:max_frames]
labels_2d_np = labels_2d_pt.cpu().detach().numpy()
else:
if compute_2d_labels:
hparams['session_dir'], sess_ids = get_session_dir(hparams)
labels_2d_pt, labels_2d_np = get_labels_2d_for_trial(hparams, sess_ids, trial=trial)
elif compute_scaled_labels:
labels_2d_pt = None
import h5py
hdf5_file = data_generator.datasets[sess_idx].paths['labels']
with h5py.File(hdf5_file, 'r', libver='latest', swmr=True) as f:
labels_2d_np = f['labels_sc'][str('trial_%04i' % trial)][()].astype('float32')
else:
labels_2d_pt, labels_2d_np = None, None
# latents
if compute_latents:
if hparams['model_class'] == 'cond-ae-msp' or hparams['model_class'] == 'ps-vae':
latents_np = model.get_transformed_latents(ims_pt, dataset=sess_idx, as_numpy=True)
else:
_, latents_np = get_reconstruction(
model, ims_pt, labels=labels_pt, labels_2d=labels_2d_pt, return_latents=True)
else:
latents_np = None
return ims_pt, ims_np, latents_np, labels_pt, labels_np, labels_2d_pt, labels_2d_np
def interpolate_2d(
interp_type, model, ims_0, latents_0, labels_0, labels_sc_0, mins, maxes, input_idxs,
n_frames, crop_type=None, mins_sc=None, maxes_sc=None, crop_kwargs=None,
marker_idxs=None, ch=0):
"""Return reconstructed images created by interpolating through latent/label space.
Parameters
----------
interp_type : :obj:`str`
'latents' | 'labels'
model : :obj:`behavenet.models` object
autoencoder model
ims_0 : :obj:`torch.Tensor`
base images for interpolating labels, of shape (1, n_channels, y_pix, x_pix)
latents_0 : :obj:`np.ndarray`
base latents of shape (1, n_latents); only two of these dimensions will be changed if
`interp_type='latents'`
labels_0 : :obj:`np.ndarray`
base labels of shape (1, n_labels)
labels_sc_0 : :obj:`np.ndarray`
base scaled labels in pixel space of shape (1, n_labels, y_pix, x_pix)
mins : :obj:`array-like`
minimum values of labels/latents, one for each dim
maxes : :obj:`list`
maximum values of labels/latents, one for each dim
input_idxs : :obj:`list`
indices of labels/latents that will be interpolated; for labels, must be y first, then x
for proper marker recording
n_frames : :obj:`int`
number of interpolation points between mins and maxes (inclusive)
crop_type : :obj:`str` or :obj:`NoneType`, optional
currently only implements 'fixed'; if not None, cropped images are returned, and returned
labels are also cropped so that they can be plotted on top of the cropped images; if None,
returned cropped images are empty and labels are relative to original image size
mins_sc : :obj:`list`, optional
min values of scaled labels that correspond to min values of labels when using conditional
encoders
maxes_sc : :obj:`list`, optional
max values of scaled labels that correspond to max values of labels when using conditional
encoders
crop_kwargs : :obj:`dict`, optional
define center and extent of crop if `crop_type='fixed'`; keys are 'x_0', 'x_ext', 'y_0',
'y_ext'
marker_idxs : :obj:`list`, optional
indices of `labels_sc_0` that will be interpolated; note that this is analogous but
different from `input_idxs`, since the 2d tensor `labels_sc_0` has half as many label
dimensions as `latents_0` and `labels_0`
ch : :obj:`int`, optional
specify which channel of input images to return (can only be a single value)
Returns
-------
:obj:`tuple`
- ims_list (:obj:`list` of :obj:`list` of :obj:`np.ndarray`) interpolated images
- labels_list (:obj:`list` of :obj:`list` of :obj:`np.ndarray`) interpolated labels
- ims_crop_list (:obj:`list` of :obj:`list` of :obj:`np.ndarray`) interpolated , cropped
images
"""
if interp_type == 'labels':
from behavenet.data.transforms import MakeOneHot2D
_, _, y_pix, x_pix = ims_0.shape
one_hot_2d = MakeOneHot2D(y_pix, x_pix)
# compute grid for relevant inputs
n_interp_dims = len(input_idxs)
assert n_interp_dims == 2
# compute ranges for relevant inputs
inputs = []
inputs_sc = []
for d in input_idxs:
inputs.append(np.linspace(mins[d], maxes[d], n_frames))
if mins_sc is not None and maxes_sc is not None:
inputs_sc.append(np.linspace(mins_sc[d], maxes_sc[d], n_frames))
else:
if interp_type == 'labels':
raise NotImplementedError
ims_list = []
ims_crop_list = []
labels_list = []
# latent_vals = []
for i0 in range(n_frames):
ims_tmp = []
ims_crop_tmp = []
labels_tmp = []
# latents_tmp = []
for i1 in range(n_frames):
if interp_type == 'latents':
# get (new) latents
latents = np.copy(latents_0)
latents[0, input_idxs[0]] = inputs[0][i0]
latents[0, input_idxs[1]] = inputs[1][i1]
# get scaled labels (for markers)
labels_sc = _get_updated_scaled_labels(labels_sc_0)
if model.hparams['model_class'] == 'cond-ae-msp':
# get reconstruction
im_tmp = get_reconstruction(
model,
torch.from_numpy(latents).float(),
apply_inverse_transform=True)
else:
# get labels
if model.hparams['model_class'] == 'ae' \
or model.hparams['model_class'] == 'vae' \
or model.hparams['model_class'] == 'beta-tcvae' \
or model.hparams['model_class'] == 'ps-vae':
labels = None
elif model.hparams['model_class'] == 'cond-ae' \
or model.hparams['model_class'] == 'cond-vae':
labels = torch.from_numpy(labels_0).float()
else:
raise NotImplementedError
# get reconstruction
im_tmp = get_reconstruction(
model,
torch.from_numpy(latents).float(),
labels=labels)
elif interp_type == 'labels':
# get (new) scaled labels
labels_sc = _get_updated_scaled_labels(
labels_sc_0, input_idxs, [inputs_sc[0][i0], inputs_sc[1][i1]])
if len(labels_sc_0.shape) == 4:
# 2d scaled labels
labels_2d = torch.from_numpy(one_hot_2d(labels_sc)).float()
else:
# 1d scaled labels
labels_2d = None
if model.hparams['model_class'] == 'cond-ae-msp' \
or model.hparams['model_class'] == 'ps-vae':
# change latents that correspond to desired labels
latents = np.copy(latents_0)
latents[0, input_idxs[0]] = inputs[0][i0]
latents[0, input_idxs[1]] = inputs[1][i1]
# get reconstruction
im_tmp = get_reconstruction(model, latents, apply_inverse_transform=True)
else:
# get (new) labels
labels = np.copy(labels_0)
labels[0, input_idxs[0]] = inputs[0][i0]
labels[0, input_idxs[1]] = inputs[1][i1]
# get reconstruction
im_tmp = get_reconstruction(
model,
ims_0,
labels=torch.from_numpy(labels).float(),
labels_2d=labels_2d)
else:
raise NotImplementedError
ims_tmp.append(np.copy(im_tmp[0, ch]))
if crop_type:
x_min_tmp = crop_kwargs['x_0'] - crop_kwargs['x_ext']
y_min_tmp = crop_kwargs['y_0'] - crop_kwargs['y_ext']
else:
x_min_tmp = 0
y_min_tmp = 0
if interp_type == 'labels':
labels_tmp.append([
np.copy(labels_sc[0, input_idxs[0]]) - y_min_tmp,
np.copy(labels_sc[0, input_idxs[1]]) - x_min_tmp])
elif interp_type == 'latents' and labels_sc_0 is not None:
labels_tmp.append([
np.copy(labels_sc[0, marker_idxs[0]]) - y_min_tmp,
np.copy(labels_sc[0, marker_idxs[1]]) - x_min_tmp])
else:
labels_tmp.append([np.nan, np.nan])
if crop_type:
ims_crop_tmp.append(get_crop(
im_tmp[0, 0], crop_kwargs['y_0'], crop_kwargs['y_ext'], crop_kwargs['x_0'],
crop_kwargs['x_ext']))
else:
ims_crop_tmp.append([])
ims_list.append(ims_tmp)
ims_crop_list.append(ims_crop_tmp)
labels_list.append(labels_tmp)
return ims_list, labels_list, ims_crop_list
def interpolate_1d(
interp_type, model, ims_0, latents_0, labels_0, labels_sc_0, mins, maxes, input_idxs,
n_frames, crop_type=None, mins_sc=None, maxes_sc=None, crop_kwargs=None,
marker_idxs=None, ch=0):
"""Return reconstructed images created by interpolating through latent/label space.
Parameters
----------
interp_type : :obj:`str`
'latents' | 'labels'
model : :obj:`behavenet.models` object
autoencoder model
ims_0 : :obj:`torch.Tensor`
base images for interpolating labels, of shape (1, n_channels, y_pix, x_pix)
latents_0 : :obj:`np.ndarray`
base latents of shape (1, n_latents); only two of these dimensions will be changed if
`interp_type='latents'`
labels_0 : :obj:`np.ndarray`
base labels of shape (1, n_labels)
labels_sc_0 : :obj:`np.ndarray`
base scaled labels in pixel space of shape (1, n_labels, y_pix, x_pix)
mins : :obj:`array-like`
minimum values of all labels/latents
maxes : :obj:`array-like`
maximum values of all labels/latents
input_idxs : :obj:`array-like`
indices of labels/latents that will be interpolated
n_frames : :obj:`int`
number of interpolation points between mins and maxes (inclusive)
crop_type : :obj:`str` or :obj:`NoneType`, optional
currently only implements 'fixed'; if not None, cropped images are returned, and returned
labels are also cropped so that they can be plotted on top of the cropped images; if None,
returned cropped images are empty and labels are relative to original image size
mins_sc : :obj:`list`, optional
min values of scaled labels that correspond to min values of labels when using conditional
encoders
maxes_sc : :obj:`list`, optional
max values of scaled labels that correspond to max values of labels when using conditional
encoders
crop_kwargs : :obj:`dict`, optional
define center and extent of crop if `crop_type='fixed'`; keys are 'x_0', 'x_ext', 'y_0',
'y_ext'
marker_idxs : :obj:`list`, optional
indices of `labels_sc_0` that will be interpolated; note that this is analogous but
different from `input_idxs`, since the 2d tensor `labels_sc_0` has half as many label
dimensions as `latents_0` and `labels_0`
ch : :obj:`int`, optional
specify which channel of input images to return (can only be a single value)
Returns
-------
:obj:`tuple`
- ims_list (:obj:`list` of :obj:`list` of :obj:`np.ndarray`) interpolated images
- labels_list (:obj:`list` of :obj:`list` of :obj:`np.ndarray`) interpolated labels
- ims_crop_list (:obj:`list` of :obj:`list` of :obj:`np.ndarray`) interpolated , cropped
images
"""
if interp_type == 'labels':
from behavenet.data.transforms import MakeOneHot2D
_, _, y_pix, x_pix = ims_0.shape
one_hot_2d = MakeOneHot2D(y_pix, x_pix)
n_interp_dims = len(input_idxs)
# compute ranges for relevant inputs
inputs = []
inputs_sc = []
for d in input_idxs:
inputs.append(np.linspace(mins[d], maxes[d], n_frames))
if mins_sc is not None and maxes_sc is not None:
inputs_sc.append(np.linspace(mins_sc[d], maxes_sc[d], n_frames))
else:
if interp_type == 'labels':
raise NotImplementedError
ims_list = []
ims_crop_list = []
labels_list = []
# latent_vals = []
for i0 in range(n_interp_dims):
ims_tmp = []
ims_crop_tmp = []
labels_tmp = []
for i1 in range(n_frames):
if interp_type == 'latents':
# get (new) latents
latents = np.copy(latents_0)
latents[0, input_idxs[i0]] = inputs[i0][i1]
# get scaled labels (for markers)
labels_sc = _get_updated_scaled_labels(labels_sc_0)
if model.hparams['model_class'] == 'cond-ae-msp':
# get reconstruction
im_tmp = get_reconstruction(
model,
torch.from_numpy(latents).float(),
apply_inverse_transform=True)
else:
# get labels
if model.hparams['model_class'] == 'ae' \
or model.hparams['model_class'] == 'vae' \
or model.hparams['model_class'] == 'beta-tcvae' \
or model.hparams['model_class'] == 'ps-vae':
labels = None
elif model.hparams['model_class'] == 'cond-ae' \
or model.hparams['model_class'] == 'cond-vae':
labels = torch.from_numpy(labels_0).float()
else:
raise NotImplementedError
# get reconstruction
im_tmp = get_reconstruction(
model,
torch.from_numpy(latents).float(),
labels=labels)
elif interp_type == 'labels':
# get (new) scaled labels
labels_sc = _get_updated_scaled_labels(
labels_sc_0, input_idxs[i0], inputs_sc[i0][i1])
if len(labels_sc_0.shape) == 4:
# 2d scaled labels
labels_2d = torch.from_numpy(one_hot_2d(labels_sc)).float()
else:
# 1d scaled labels
labels_2d = None
if model.hparams['model_class'] == 'cond-ae-msp' \
or model.hparams['model_class'] == 'ps-vae':
# change latents that correspond to desired labels
latents = np.copy(latents_0)
latents[0, input_idxs[i0]] = inputs[i0][i1]
# get reconstruction
im_tmp = get_reconstruction(model, latents, apply_inverse_transform=True)
else:
# get (new) labels
labels = np.copy(labels_0)
labels[0, input_idxs[i0]] = inputs[i0][i1]
# get reconstruction
im_tmp = get_reconstruction(
model,
ims_0,
labels=torch.from_numpy(labels).float(),
labels_2d=labels_2d)
else:
raise NotImplementedError
ims_tmp.append(np.copy(im_tmp[0, ch]))
if crop_type:
x_min_tmp = crop_kwargs['x_0'] - crop_kwargs['x_ext']
y_min_tmp = crop_kwargs['y_0'] - crop_kwargs['y_ext']
else:
x_min_tmp = 0
y_min_tmp = 0
if interp_type == 'labels':
labels_tmp.append([
np.copy(labels_sc[0, input_idxs[0]]) - y_min_tmp,
np.copy(labels_sc[0, input_idxs[1]]) - x_min_tmp])
elif interp_type == 'latents' and labels_sc_0 is not None:
labels_tmp.append([
np.copy(labels_sc[0, marker_idxs[0]]) - y_min_tmp,
np.copy(labels_sc[0, marker_idxs[1]]) - x_min_tmp])
else:
labels_tmp.append([np.nan, np.nan])
if crop_type:
ims_crop_tmp.append(get_crop(
im_tmp[0, 0], crop_kwargs['y_0'], crop_kwargs['y_ext'], crop_kwargs['x_0'],
crop_kwargs['x_ext']))
else:
ims_crop_tmp.append([])
ims_list.append(ims_tmp)
ims_crop_list.append(ims_crop_tmp)
labels_list.append(labels_tmp)
return ims_list, labels_list, ims_crop_list
def interpolate_point_path(
interp_type, model, ims_0, labels_0, points, n_frames=10, ch=0, crop_kwargs=None,
apply_inverse_transform=True):
"""Return reconstructed images created by interpolating through multiple points.
This function is a simplified version of :func:`interpolate_1d()`; this function computes a
traversal for a single dimension instead of all dimensions; also, this function does not
support conditional encoders, nor does it attempt to compute the interpolated, scaled values
of the labels as :func:`interpolate_1d()` does. This function should supercede
:func:`interpolate_1d()` in a future refactor. Also note that this function is utilized by
the code to make traversal movies, whereas :func:`interpolate_1d()` is utilized by the code to
make traversal plots.
Parameters
----------
interp_type : :obj:`str`
'latents' | 'labels'
model : :obj:`behavenet.models` object
autoencoder model
ims_0 : :obj:`np.ndarray`
base images for interpolating labels, of shape (1, n_channels, y_pix, x_pix)
labels_0 : :obj:`np.ndarray`
base labels of shape (1, n_labels); these values will be used if
`interp_type='latents'`, and they will be ignored if `inter_type='labels'`
(since `points` will be used)
points : :obj:`list`
one entry for each point in path; each entry is an np.ndarray of shape (n_latents,)
n_frames : :obj:`int` or :obj:`array-like`
number of interpolation points between each point; can be an integer that is used
for all paths, or an array/list of length one less than number of points
ch : :obj:`int`, optional
specify which channel of input images to return; if not an int, all channels are
concatenated in the horizontal dimension
crop_kwargs : :obj:`dict`, optional
if crop_type is not None, provides information about the crop (for a fixed crop window)
keys : 'y_0', 'x_0', 'y_ext', 'x_ext'; window is
(y_0 - y_ext, y_0 + y_ext) in vertical direction and
(x_0 - x_ext, x_0 + x_ext) in horizontal direction
apply_inverse_transform : :obj:`bool`
if inputs are latents (and model class is 'cond-ae-msp' or 'ps-vae'), apply inverse
transform to put in original latent space
Returns
-------
:obj:`tuple`
- ims_list (:obj:`list` of :obj:`np.ndarray`) interpolated images
- inputs_list (:obj:`list` of :obj:`np.ndarray`) interpolated values
"""
if model.hparams.get('conditional_encoder', False):
raise NotImplementedError
n_points = len(points)
if isinstance(n_frames, int):
n_frames = [n_frames] * (n_points - 1)
assert len(n_frames) == (n_points - 1)
ims_list = []
inputs_list = []
for p in range(n_points - 1):
p0 = points[None, p]
p1 = points[None, p + 1]
p_vec = (p1 - p0) / n_frames[p]
for pn in range(n_frames[p]):
vec = p0 + pn * p_vec
if interp_type == 'latents':
if model.hparams['model_class'] == 'cond-ae' \
or model.hparams['model_class'] == 'cond-vae':
im_tmp = get_reconstruction(
model, vec, apply_inverse_transform=apply_inverse_transform,
labels=torch.from_numpy(labels_0).float().to(model.hparams['device']))
else:
im_tmp = get_reconstruction(
model, vec, apply_inverse_transform=apply_inverse_transform)
elif interp_type == 'labels':
if model.hparams['model_class'] == 'cond-ae-msp' \
or model.hparams['model_class'] == 'ps-vae':
im_tmp = get_reconstruction(
model, vec, apply_inverse_transform=True)
else: # cond-ae
im_tmp = get_reconstruction(
model, ims_0,
labels=torch.from_numpy(vec).float().to(model.hparams['device']))
else:
raise NotImplementedError
if crop_kwargs is not None:
if not isinstance(ch, int):
raise ValueError('"ch" must be an integer to use crop_kwargs')
ims_list.append(get_crop(
im_tmp[0, ch],
crop_kwargs['y_0'], crop_kwargs['y_ext'],
crop_kwargs['x_0'], crop_kwargs['x_ext']))
else:
if isinstance(ch, int):
ims_list.append(np.copy(im_tmp[0, ch]))
else:
ims_list.append(np.copy(concat(im_tmp[0])))
inputs_list.append(vec)
return ims_list, inputs_list
def _get_updated_scaled_labels(labels_og, idxs=None, vals=None):
"""Helper function for interpolate_xd functions."""
if labels_og is not None:
if len(labels_og.shape) == 4:
# 2d scaled labels
tmp = np.copy(labels_og)
t, y, x = np.where(tmp[0] == 1)
labels_sc = np.hstack([x, y])[None, :]
else:
# 1d scaled labels
labels_sc = np.copy(labels_og)
if idxs is not None:
if isinstance(idxs, int):
assert isinstance(vals, float)
idxs = [idxs]
vals = [vals]
else:
assert len(idxs) == len(vals)
for idx, val in zip(idxs, vals):
labels_sc[0, idx] = val
else:
labels_sc = None
return labels_sc
# ----------------------------------------
# mid-level plotting functions
# ----------------------------------------
def plot_2d_frame_array(
ims_list, markers=None, im_kwargs=None, marker_kwargs=None, figsize=None, save_file=None,
format='pdf'):
"""Plot list of list of interpolated images output by :func:`interpolate_2d()` in a 2d grid.
Parameters
----------
ims_list : :obj:`list` of :obj:`list`
each inner list element holds an np.ndarray of shape (y_pix, x_pix)
markers : :obj:`list` of :obj:`list` or NoneType, optional
each inner list element holds an array-like object with values (y_pix, x_pix); if None,
markers are not plotted on top of frames
im_kwargs : :obj:`dict` or NoneType, optional
kwargs for `matplotlib.pyplot.imshow()` function (vmin, vmax, cmap, etc)
marker_kwargs : :obj:`dict` or NoneType, optional
kwargs for `matplotlib.pyplot.plot()` function (markersize, markeredgewidth, etc)
figsize : :obj:`tuple`, optional
(width, height) in inches
save_file : :obj:`str` or NoneType, optional
figure saved if not None
format : :obj:`str`, optional
format of saved image; 'pdf' | 'png' | 'jpeg' | ...
"""
n_y = len(ims_list)
n_x = len(ims_list[0])
if figsize is None:
y_pix, x_pix = ims_list[0][0].shape
# how many inches per pixel?
in_per_pix = 15 / (x_pix * n_x)
figsize = (15, in_per_pix * y_pix * n_y)
fig, axes = plt.subplots(n_y, n_x, figsize=figsize)
if im_kwargs is None:
im_kwargs = {'vmin': 0, 'vmax': 1, 'cmap': 'gray'}
if marker_kwargs is None:
marker_kwargs = {'markersize': 20, 'markeredgewidth': 3}
for r, ims_list_y in enumerate(ims_list):
for c, im in enumerate(ims_list_y):
axes[r, c].imshow(im, **im_kwargs)
axes[r, c].set_xticks([])
axes[r, c].set_yticks([])
if markers is not None:
axes[r, c].plot(
markers[r][c][1], markers[r][c][0], 'o', **marker_kwargs)
plt.subplots_adjust(wspace=0, hspace=0, bottom=0, left=0, top=1, right=1)
if save_file is not None:
make_dir_if_not_exists(save_file)
plt.savefig(save_file + '.' + format, dpi=300, bbox_inches='tight')
plt.show()
def plot_1d_frame_array(
ims_list, markers=None, im_kwargs=None, marker_kwargs=None, plot_ims=True, plot_diffs=True,
figsize=None, save_file=None, format='pdf'):
"""Plot list of list of interpolated images output by :func:`interpolate_1d()` in a 2d grid.
Parameters
----------
ims_list : :obj:`list` of :obj:`list`
each inner list element holds an np.ndarray of shape (y_pix, x_pix)
markers : :obj:`list` of :obj:`list` or NoneType, optional
each inner list element holds an array-like object with values (y_pix, x_pix); if None,
markers are not plotted on top of frames
im_kwargs : :obj:`dict` or NoneType, optional
kwargs for `matplotlib.pyplot.imshow()` function (vmin, vmax, cmap, etc)
marker_kwargs : :obj:`dict` or NoneType, optional
kwargs for `matplotlib.pyplot.plot()` function (markersize, markeredgewidth, etc)
plot_ims : :obj:`bool`, optional
plot images
plot_diffs : :obj:`bool`, optional
plot differences
figsize : :obj:`tuple`, optional
(width, height) in inches
save_file : :obj:`str` or NoneType, optional
figure saved if not None
format : :obj:`str`, optional
format of saved image; 'pdf' | 'png' | 'jpeg' | ...
"""
if not (plot_ims or plot_diffs):
raise ValueError('Must plot at least one of ims or diffs')
if plot_ims and plot_diffs:
n_y = len(ims_list) * 2
offset = 2
else:
n_y = len(ims_list)
offset = 1
n_x = len(ims_list[0])
if figsize is None:
y_pix, x_pix = ims_list[0][0].shape
# how many inches per pixel?
in_per_pix = 15 / (x_pix * n_x)
figsize = (15, in_per_pix * y_pix * n_y)
fig, axes = plt.subplots(n_y, n_x, figsize=figsize)
if im_kwargs is None:
im_kwargs = {'vmin': 0, 'vmax': 1, 'cmap': 'gray'}
if marker_kwargs is None:
marker_kwargs = {'markersize': 20, 'markeredgewidth': 3}
for r, ims_list_y in enumerate(ims_list):
base_im = ims_list_y[0]
for c, im in enumerate(ims_list_y):
# plot original images
if plot_ims:
axes[offset * r, c].imshow(im, **im_kwargs)
axes[offset * r, c].set_xticks([])
axes[offset * r, c].set_yticks([])
if markers is not None:
axes[offset * r, c].plot(
markers[r][c][1], markers[r][c][0], 'o', **marker_kwargs)
# plot differences
if plot_diffs and plot_ims:
axes[offset * r + 1, c].imshow(0.5 + (im - base_im), **im_kwargs)
axes[offset * r + 1, c].set_xticks([])
axes[offset * r + 1, c].set_yticks([])
elif plot_diffs:
axes[offset * r, c].imshow(0.5 + (im - base_im), **im_kwargs)
axes[offset * r, c].set_xticks([])
axes[offset * r, c].set_yticks([])
plt.subplots_adjust(wspace=0, hspace=0, bottom=0, left=0, top=1, right=1)
if save_file is not None:
make_dir_if_not_exists(save_file)
plt.savefig(save_file + '.' + format, dpi=300, bbox_inches='tight')
plt.show()
def make_interpolated(
ims, save_file, markers=None, text=None, text_title=None, text_color=[1, 1, 1],
frame_rate=20, scale=3, markersize=10, markeredgecolor='w', markeredgewidth=1, ax=None):
"""Make a latent space interpolation movie.
Parameters
----------
ims : :obj:`list` of :obj:`np.ndarray`
each list element is an array of shape (y_pix, x_pix)
save_file : :obj:`str`
absolute path of save file; does not need file extension, will automatically be saved as
mp4. To save as a gif, include the '.gif' file extension in `save_file`. The movie will
only be saved if `ax` is `NoneType`; else the list of animated frames is returned
markers : :obj:`array-like`, optional
array of size (n_frames, 2) which specifies the (x, y) coordinates of a marker on each
frame
text : :obj:`array-like`, optional
array of size (n_frames) which specifies text printed in the lower left corner of each
frame
text_title : :obj:`array-like`, optional
array of size (n_frames) which specifies text printed in the upper left corner of each
frame
text_color : :obj:`array-like`, optional
rgb array specifying color of `text` and `text_title`, if applicable
frame_rate : :obj:`float`, optional
frame rate of saved movie
scale : :obj:`float`, optional
width of panel is (scale / 2) inches
markersize : :obj:`float`, optional
size of marker if `markers` is not `NoneType`
markeredgecolor : :obj:`float`, optional
color of marker edge if `markers` is not `NoneType`
markeredgewidth : :obj:`float`, optional
width of marker edge if `markers` is not `NoneType`
ax : :obj:`matplotlib.axes.Axes` object
optional axis in which to plot the frames; if this argument is not `NoneType` the list of
animated frames is returned and the movie is not saved
Returns
-------
:obj:`list`
list of list of animated frames if `ax` is True; else save movie
"""
y_pix, x_pix = ims[0].shape
if ax is None:
fig_width = scale / 2
fig_height = y_pix / x_pix * scale / 2
fig = plt.figure(figsize=(fig_width, fig_height), dpi=300)
ax = plt.gca()
return_ims = False
else:
return_ims = True
ax.set_xticks([])
ax.set_yticks([])
default_kwargs = {'animated': True, 'cmap': 'gray', 'vmin': 0, 'vmax': 1}
txt_kwargs = {
'fontsize': 4, 'color': text_color, 'fontname': 'monospace',
'horizontalalignment': 'left', 'verticalalignment': 'center',
'transform': ax.transAxes}
# ims is a list of lists, each row is a list of artists to draw in the current frame; here we
# are just animating one artist, the image, in each frame
ims_ani = []
for i, im in enumerate(ims):
im_tmp = []
im_tmp.append(ax.imshow(im, **default_kwargs))
# [s.set_visible(False) for s in ax.spines.values()]
if markers is not None:
im_tmp.append(ax.plot(
markers[i, 0], markers[i, 1], '.r', markersize=markersize,
markeredgecolor=markeredgecolor, markeredgewidth=markeredgewidth)[0])
if text is not None:
im_tmp.append(ax.text(0.02, 0.06, text[i], **txt_kwargs))
if text_title is not None:
im_tmp.append(ax.text(0.02, 0.92, text_title[i], **txt_kwargs))
ims_ani.append(im_tmp)
if return_ims:
return ims_ani
else:
plt.tight_layout(pad=0)
ani = animation.ArtistAnimation(fig, ims_ani, blit=True, repeat_delay=1000)
save_movie(save_file, ani, frame_rate=frame_rate)
def make_interpolated_multipanel(
ims, save_file, markers=None, text=None, text_title=None, frame_rate=20, n_cols=3, scale=1,
**kwargs):
"""Make a multi-panel latent space interpolation movie.
Parameters
----------
ims : :obj:`list` of :obj:`list` of :obj:`np.ndarray`
each list element is used to for a single panel, and is another list that contains arrays
of shape (y_pix, x_pix)
save_file : :obj:`str`
absolute path of save file; does not need file extension, will automatically be saved as
mp4. To save as a gif, include the '.gif' file extension in `save_file`.
markers : :obj:`list` of :obj:`array-like`, optional
each list element is used for a single panel, and is an array of size (n_frames, 2)
which specifies the (x, y) coordinates of a marker on each frame for that panel
text : :obj:`list` of :obj:`array-like`, optional
each list element is used for a single panel, and is an array of size (n_frames) which
specifies text printed in the lower left corner of each frame for that panel
text_title : :obj:`list` of :obj:`array-like`, optional
each list element is used for a single panel, and is an array of size (n_frames) which
specifies text printed in the upper left corner of each frame for that panel
frame_rate : :obj:`float`, optional
frame rate of saved movie
n_cols : :obj:`int`, optional
movie is `n_cols` panels wide
scale : :obj:`float`, optional
width of panel is (scale / 2) inches
kwargs
arguments are additional arguments to :func:`make_interpolated`, like 'markersize',
'markeredgewidth', 'markeredgecolor', etc.
"""
n_panels = len(ims)
markers = [None] * n_panels if markers is None else markers
text = [None] * n_panels if text is None else text
y_pix, x_pix = ims[0][0].shape
n_rows = int(np.ceil(n_panels / n_cols))
fig_width = scale / 2 * n_cols
fig_height = y_pix / x_pix * scale / 2 * n_rows
fig, axes = plt.subplots(n_rows, n_cols, figsize=(fig_width, fig_height), dpi=300)
plt.subplots_adjust(wspace=0, hspace=0, left=0, bottom=0, right=1, top=1)
# fill out empty panels with black frames
while len(ims) < n_rows * n_cols:
ims.append(np.zeros(ims[0].shape))
markers.append(None)
text.append(None)
# ims is a list of lists, each row is a list of artists to draw in the current frame; here we
# are just animating one artist, the image, in each frame
ims_ani = []
for i, (ims_curr, markers_curr, text_curr) in enumerate(zip(ims, markers, text)):
col = i % n_cols
row = int(np.floor(i / n_cols))
if i == 0:
text_title_str = text_title
else:
text_title_str = None
if n_rows == 1:
ax = axes[col]
elif n_cols == 1:
ax = axes[row]
else:
ax = axes[row, col]
ims_ani_curr = make_interpolated(
ims=ims_curr, markers=markers_curr, text=text_curr, text_title=text_title_str, ax=ax,
save_file=None, **kwargs)
ims_ani.append(ims_ani_curr)
# turn off other axes
i += 1
while i < n_rows * n_cols:
col = i % n_cols
row = int(np.floor(i / n_cols))
axes[row, col].set_axis_off()
i += 1
# rearrange ims:
# currently a list of length n_panels, each element of which is a list of length n_t
# we need a list of length n_t, each element of which is a list of length n_panels
n_frames = len(ims_ani[0])
ims_final = [[] for _ in range(n_frames)]
for i in range(n_frames):
for j in range(n_panels):
ims_final[i] += ims_ani[j][i]
ani = animation.ArtistAnimation(fig, ims_final, blit=True, repeat_delay=1000)
save_movie(save_file, ani, frame_rate=frame_rate)
# ----------------------------------------
# high-level plotting functions
# ----------------------------------------
def _get_psvae_hparams(**kwargs):
hparams = {
'data_dir': get_user_dir('data'),
'save_dir': get_user_dir('save'),
'model_class': 'ps-vae',
'model_type': 'conv',
'rng_seed_data': 0,
'trial_splits': '8;1;1;0',
'train_frac': 1.0,
'rng_seed_model': 0,
'fit_sess_io_layers': False,
'learning_rate': 1e-4,
'l2_reg': 0,
'conditional_encoder': False,
'vae.beta': 1}
# update hparams
for key, val in kwargs.items():
if key == 'alpha' or key == 'beta' or key == 'gamma':
hparams['ps_vae.%s' % key] = val
else:
hparams[key] = val
return hparams
def plot_psvae_training_curves(
lab, expt, animal, session, alphas, betas, gammas, n_ae_latents, rng_seeds_model,
experiment_name, n_labels, dtype='val', save_file=None, format='pdf', **kwargs):
"""Create training plots for each term in the ps-vae objective function.
The `dtype` argument controls which type of trials are plotted ('train' or 'val').
Additionally, multiple models can be plotted simultaneously by varying one (and only one) of
the following parameters:
- alpha
- beta
- gamma
- number of unsupervised latents
- random seed used to initialize model weights
Each of these entries must be an array of length 1 except for one option, which can be an array
of arbitrary length (corresponding to already trained models). This function generates a single
plot with panels for each of the following terms:
- total loss
- pixel mse
- label R^2 (note the objective function contains the label MSE, but R^2 is easier to parse)
- KL divergence of supervised latents
- index-code mutual information of unsupervised latents
- total correlation of unsupervised latents
- dimension-wise KL of unsupervised latents
- subspace overlap
Parameters
----------
lab : :obj:`str`
lab id
expt : :obj:`str`
expt id
animal : :obj:`str`
animal id
session : :obj:`str`
session id
alphas : :obj:`array-like`
alpha values to plot
betas : :obj:`array-like`
beta values to plot
gammas : :obj:`array-like`
gamma values to plot
n_ae_latents : :obj:`array-like`
unsupervised dimensionalities to plot
rng_seeds_model : :obj:`array-like`
model seeds to plot
experiment_name : :obj:`str`
test-tube experiment name
n_labels : :obj:`int`
dimensionality of supervised latent space
dtype : :obj:`str`
'train' | 'val'
save_file : :obj:`str`, optional
absolute path of save file; does not need file extension
format : :obj:`str`, optional
format of saved image; 'pdf' | 'png' | 'jpeg' | ...
kwargs
arguments are keys of `hparams`, for example to set `train_frac`, `rng_seed_model`, etc.
"""
# check for arrays, turn ints into lists
n_arrays = 0
hue = None
if len(alphas) > 1:
n_arrays += 1
hue = 'alpha'
if len(betas) > 1:
n_arrays += 1
hue = 'beta'
if len(gammas) > 1:
n_arrays += 1
hue = 'gamma'
if len(n_ae_latents) > 1:
n_arrays += 1
hue = 'n latents'
if len(rng_seeds_model) > 1:
n_arrays += 1
hue = 'rng seed'
if n_arrays > 1:
raise ValueError(
'Can only set one of "alphas", "betas", "gammas", "n_ae_latents", or ' +
'"rng_seeds_model" as an array')
# set model info
hparams = _get_psvae_hparams(experiment_name=experiment_name, **kwargs)
metrics_list = [
'loss', 'loss_data_mse', 'label_r2',
'loss_zs_kl', 'loss_zu_mi', 'loss_zu_tc', 'loss_zu_dwkl', 'loss_AB_orth']
metrics_dfs = []
i = 0
for alpha in alphas:
for beta in betas:
for gamma in gammas:
for n_latents in n_ae_latents:
for rng in rng_seeds_model:
# update hparams
hparams['ps_vae.alpha'] = alpha
hparams['ps_vae.beta'] = beta
hparams['ps_vae.gamma'] = gamma
hparams['n_ae_latents'] = n_latents + n_labels
hparams['rng_seed_model'] = rng
try:
get_lab_example(hparams, lab, expt)
hparams['animal'] = animal
hparams['session'] = session
hparams['session_dir'], sess_ids = get_session_dir(hparams)
hparams['expt_dir'] = get_expt_dir(hparams)
_, version = experiment_exists(hparams, which_version=True)
print(
'loading results with alpha=%i, beta=%i, gamma=%i (version %i)' %
(alpha, beta, gamma, version))
metrics_dfs.append(load_metrics_csv_as_df(
hparams, lab, expt, metrics_list, version=None))
metrics_dfs[i]['alpha'] = alpha
metrics_dfs[i]['beta'] = beta
metrics_dfs[i]['gamma'] = gamma
metrics_dfs[i]['n latents'] = hparams['n_ae_latents']
metrics_dfs[i]['rng seed'] = rng
i += 1
except TypeError:
print(
'could not find model for alpha=%i, beta=%i, gamma=%i' %
(alpha, beta, gamma))
continue
metrics_df = pd.concat(metrics_dfs, sort=False)
sns.set_style('white')
sns.set_context('talk')
data_queried = metrics_df[
(metrics_df.epoch > 10) & ~pd.isna(metrics_df.val) & (metrics_df.dtype == dtype)]
g = sns.FacetGrid(
data_queried, col='loss', col_wrap=3, hue=hue, sharey=False, height=4)
g = g.map(plt.plot, 'epoch', 'val').add_legend() # , color=".3", fit_reg=False, x_jitter=.1);
if save_file is not None:
make_dir_if_not_exists(save_file)
g.savefig(save_file + '.' + format, dpi=300, format=format)
def plot_hyperparameter_search_results(
lab, expt, animal, session, n_labels, label_names, alpha_weights, alpha_n_ae_latents,
alpha_expt_name, beta_weights, gamma_weights, beta_gamma_n_ae_latents,
beta_gamma_expt_name, alpha, beta, gamma, save_file, batch_size=None, format='pdf',
**kwargs):
"""Create a variety of diagnostic plots to assess the ps-vae hyperparameters.
These diagnostic plots are based on the recommended way to perform a hyperparameter search in
the ps-vae models; first, fix beta=1 and gamma=0, and do a sweep over alpha values and number
of latents (for example alpha=[50, 100, 500, 1000] and n_ae_latents=[2, 4, 8, 16]). The best
alpha value is subjective because it involves a tradeoff between pixel mse and label mse. After
choosing a suitable value, fix alpha and the number of latents and vary beta and gamma. This
function will then plot the following panels:
- pixel mse as a function of alpha/num latents (for fixed beta/gamma)
- label mse as a function of alpha/num_latents (for fixed beta/gamma)
- pixel mse as a function of beta/gamma (for fixed alpha/n_ae_latents)
- label mse as a function of beta/gamma (for fixed alpha/n_ae_latents)
- index-code mutual information (part of the KL decomposition) as a function of beta/gamma (for
fixed alpha/n_ae_latents)
- total correlation(part of the KL decomposition) as a function of beta/gamma (for fixed
alpha/n_ae_latents)
- dimension-wise KL (part of the KL decomposition) as a function of beta/gamma (for fixed
alpha/n_ae_latents)
- average correlation coefficient across all pairs of unsupervised latent dims as a function of
beta/gamma (for fixed alpha/n_ae_latents)
- subspace overlap computed as ||[A; B] - I||_2^2 for A, B the projections to the supervised
and unsupervised subspaces, respectively, and I the identity - as a function of beta/gamma
(for fixed alpha/n_ae_latents)
- example subspace overlap matrix for gamma=0 and beta=1, with fixed alpha/n_ae_latents
- example subspace overlap matrix for gamma=1000 and beta=1, with fixed alpha/n_ae_latents
Parameters
----------
lab : :obj:`str`
lab id
expt : :obj:`str`
expt id
animal : :obj:`str`
animal id
session : :obj:`str`
session id
n_labels : :obj:`str`
number of label dims
label_names : :obj:`array-like`
names of label dims
alpha_weights : :obj:`array-like`
array of alpha weights for fixed values of beta, gamma
alpha_n_ae_latents : :obj:`array-like`
array of latent dimensionalities for fixed values of beta, gamma using alpha_weights
alpha_expt_name : :obj:`str`
test-tube experiment name of alpha-based hyperparam search
beta_weights : :obj:`array-like`
array of beta weights for a fixed value of alpha
gamma_weights : :obj:`array-like`
array of beta weights for a fixed value of alpha
beta_gamma_n_ae_latents : :obj:`int`
latent dimensionality used for beta-gamma hyperparam search
beta_gamma_expt_name : :obj:`str`
test-tube experiment name of beta-gamma hyperparam search
alpha : :obj:`float`
fixed value of alpha for beta-gamma search
beta : :obj:`float`
fixed value of beta for alpha search
gamma : :obj:`float`
fixed value of gamma for alpha search
save_file : :obj:`str`
absolute path of save file; does not need file extension
batch_size : :obj:`int`, optional
size of batches, used to compute correlation coefficient per batch; if NoneType, the
correlation coefficient is computed across all time points
format : :obj:`str`, optional
format of saved image; 'pdf' | 'png' | 'jpeg' | ...
kwargs
arguments are keys of `hparams`, preceded by either `alpha_` or `beta_gamma_`. For example,
to set the train frac of the alpha models, use `alpha_train_frac`; to set the rng_data_seed
of the beta-gamma models, use `beta_gamma_rng_data_seed`.
"""
def apply_masks(data, masks):
return data[masks == 1]
def get_label_r2(hparams, model, data_generator, version, dtype='val', overwrite=False):
from sklearn.metrics import r2_score
save_file = os.path.join(
hparams['expt_dir'], 'version_%i' % version, 'r2_supervised.csv')
if not os.path.exists(save_file) or overwrite:
if not os.path.exists(save_file):
print('R^2 metrics do not exist; computing from scratch')
else:
print('overwriting metrics at %s' % save_file)
metrics_df = []
data_generator.reset_iterators(dtype)
for i_test in tqdm(range(data_generator.n_tot_batches[dtype])):
# get next minibatch and put it on the device
data, sess = data_generator.next_batch(dtype)
x = data['images'][0]
y = data['labels'][0].cpu().detach().numpy()
if 'labels_masks' in data:
n = data['labels_masks'][0].cpu().detach().numpy()
else:
n = np.ones_like(y)
z = model.get_transformed_latents(x, dataset=sess)
for i in range(n_labels):
y_true = apply_masks(y[:, i], n[:, i])
y_pred = apply_masks(z[:, i], n[:, i])
if len(y_true) > 10:
r2 = r2_score(y_true, y_pred, multioutput='variance_weighted')
mse = np.mean(np.square(y_true - y_pred))
else:
r2 = np.nan
mse = np.nan
metrics_df.append(pd.DataFrame({
'Trial': data['batch_idx'].item(),
'Label': label_names[i],
'R2': r2,
'MSE': mse,
'Model': 'PS-VAE'}, index=[0]))
metrics_df = pd.concat(metrics_df)
print('saving results to %s' % save_file)
metrics_df.to_csv(save_file, index=False, header=True)
else:
print('loading results from %s' % save_file)
metrics_df = pd.read_csv(save_file)
return metrics_df
# -----------------------------------------------------
# load pixel/label MSE as a function of n_latents/alpha
# -----------------------------------------------------
# set model info
hparams = _get_psvae_hparams(experiment_name=alpha_expt_name)
# update hparams
for key, val in kwargs.items():
# hparam vals should be named 'alpha_[property]', for example 'alpha_train_frac'
if key.split('_')[0] == 'alpha':
prop = key[6:]
hparams[prop] = val
else:
hparams[key] = val
metrics_list = ['loss_data_mse']
metrics_dfs_frame = []
metrics_dfs_marker = []
for n_latent in alpha_n_ae_latents:
hparams['n_ae_latents'] = n_latent + n_labels
for alpha_ in alpha_weights:
hparams['ps_vae.alpha'] = alpha_
hparams['ps_vae.beta'] = beta
hparams['ps_vae.gamma'] = gamma
try:
get_lab_example(hparams, lab, expt)
hparams['animal'] = animal
hparams['session'] = session
hparams['session_dir'], sess_ids = get_session_dir(hparams)
hparams['expt_dir'] = get_expt_dir(hparams)
_, version = experiment_exists(hparams, which_version=True)
print('loading results with alpha=%i, beta=%i, gamma=%i (version %i)' % (
hparams['ps_vae.alpha'], hparams['ps_vae.beta'], hparams['ps_vae.gamma'],
version))
# get frame mse
metrics_dfs_frame.append(load_metrics_csv_as_df(
hparams, lab, expt, metrics_list, version=None, test=True))
metrics_dfs_frame[-1]['alpha'] = alpha_
metrics_dfs_frame[-1]['n_latents'] = hparams['n_ae_latents']
# get marker mse
model, data_gen = get_best_model_and_data(
hparams, Model=None, load_data=True, version=version)
metrics_df_ = get_label_r2(hparams, model, data_gen, version, dtype='val')
metrics_df_['alpha'] = alpha_
metrics_df_['n_latents'] = hparams['n_ae_latents']
metrics_dfs_marker.append(metrics_df_[metrics_df_.Model == 'PS-VAE'])
except TypeError:
print('could not find model for alpha=%i, beta=%i, gamma=%i' % (
hparams['ps_vae.alpha'], hparams['ps_vae.beta'], hparams['ps_vae.gamma']))
continue
metrics_df_frame = pd.concat(metrics_dfs_frame, sort=False)
metrics_df_marker = pd.concat(metrics_dfs_marker, sort=False)
print('done')
# -----------------------------------------------------
# load pixel/label MSE as a function of beta/gamma
# -----------------------------------------------------
# update hparams
hparams['experiment_name'] = beta_gamma_expt_name
for key, val in kwargs.items():
# hparam vals should be named 'beta_gamma_[property]', for example 'alpha_train_frac'
if key.split('_')[0] == 'beta' and key.split('_')[1] == 'gamma':
prop = key[11:]
hparams[prop] = val
metrics_list = ['loss_data_mse', 'loss_zu_mi', 'loss_zu_tc', 'loss_zu_dwkl', 'loss_AB_orth']
metrics_dfs_frame_bg = []
metrics_dfs_marker_bg = []
metrics_dfs_corr_bg = []
overlaps = {}
for beta in beta_weights:
for gamma in gamma_weights:
hparams['n_ae_latents'] = beta_gamma_n_ae_latents + n_labels
hparams['ps_vae.alpha'] = alpha
hparams['ps_vae.beta'] = beta
hparams['ps_vae.gamma'] = gamma
try:
get_lab_example(hparams, lab, expt)
hparams['animal'] = animal
hparams['session'] = session
hparams['session_dir'], sess_ids = get_session_dir(hparams)
hparams['expt_dir'] = get_expt_dir(hparams)
_, version = experiment_exists(hparams, which_version=True)
print('loading results with alpha=%i, beta=%i, gamma=%i (version %i)' % (
hparams['ps_vae.alpha'], hparams['ps_vae.beta'], hparams['ps_vae.gamma'],
version))
# get frame mse
metrics_dfs_frame_bg.append(load_metrics_csv_as_df(
hparams, lab, expt, metrics_list, version=None, test=True))
metrics_dfs_frame_bg[-1]['beta'] = beta
metrics_dfs_frame_bg[-1]['gamma'] = gamma
# get marker mse
model, data_gen = get_best_model_and_data(
hparams, Model=None, load_data=True, version=version)
metrics_df_ = get_label_r2(hparams, model, data_gen, version, dtype='val')
metrics_df_['beta'] = beta
metrics_df_['gamma'] = gamma
metrics_dfs_marker_bg.append(metrics_df_[metrics_df_.Model == 'PS-VAE'])
# get subspace overlap
A = model.encoding.A.weight.data.cpu().detach().numpy()
B = model.encoding.B.weight.data.cpu().detach().numpy()
C = np.concatenate([A, B], axis=0)
overlap = np.matmul(C, C.T)
overlaps['beta=%i_gamma=%i' % (beta, gamma)] = overlap
# get corr
latents = load_latents(hparams, version, dtype='test')
if batch_size is None:
corr = np.corrcoef(latents[:, n_labels + np.array([0, 1])].T)
metrics_dfs_corr_bg.append(pd.DataFrame({
'loss': 'corr',
'dtype': 'test',
'val': np.abs(corr[0, 1]),
'beta': beta,
'gamma': gamma}, index=[0]))
else:
n_batches = int(np.ceil(latents.shape[0] / batch_size))
for i in range(n_batches):
corr = np.corrcoef(
latents[i * batch_size:(i + 1) * batch_size,
n_labels + np.array([0, 1])].T)
metrics_dfs_corr_bg.append(pd.DataFrame({
'loss': 'corr',
'dtype': 'test',
'val': np.abs(corr[0, 1]),
'beta': beta,
'gamma': gamma}, index=[0]))
except TypeError:
print('could not find model for alpha=%i, beta=%i, gamma=%i' % (
hparams['ps_vae.alpha'], hparams['ps_vae.beta'], hparams['ps_vae.gamma']))
continue
print()
metrics_df_frame_bg = pd.concat(metrics_dfs_frame_bg, sort=False)
metrics_df_marker_bg = pd.concat(metrics_dfs_marker_bg, sort=False)
metrics_df_corr_bg = pd.concat(metrics_dfs_corr_bg, sort=False)
print('done')
# -----------------------------------------------------
# ----------------- PLOT DATA -------------------------
# -----------------------------------------------------
sns.set_style('white')
sns.set_context('paper', font_scale=1.2)
alpha_palette = sns.color_palette('Greens')
beta_palette = sns.color_palette('Reds', len(metrics_df_corr_bg.beta.unique()))
gamma_palette = sns.color_palette('Blues', len(metrics_df_corr_bg.gamma.unique()))
from matplotlib.gridspec import GridSpec
fig = plt.figure(figsize=(12, 10), dpi=300)
n_rows = 3
n_cols = 12
gs = GridSpec(n_rows, n_cols, figure=fig)
def despine(ax):
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
sns.set_palette(alpha_palette)
# --------------------------------------------------
# MSE per pixel
# --------------------------------------------------
ax_pixel_mse_alpha = fig.add_subplot(gs[0, 0:3])
data_queried = metrics_df_frame[(metrics_df_frame.dtype == 'test')]
sns.barplot(x='n_latents', y='val', hue='alpha', data=data_queried, ax=ax_pixel_mse_alpha)
ax_pixel_mse_alpha.legend().set_visible(False)
ax_pixel_mse_alpha.set_xlabel('Latent dimension')
ax_pixel_mse_alpha.set_ylabel('MSE per pixel')
ax_pixel_mse_alpha.ticklabel_format(axis='y', style='sci', scilimits=(-3, 3))
ax_pixel_mse_alpha.set_title('Beta=1, Gamma=0')
despine(ax_pixel_mse_alpha)
# --------------------------------------------------
# MSE per marker
# --------------------------------------------------
ax_marker_mse_alpha = fig.add_subplot(gs[0, 3:6])
data_queried = metrics_df_marker
sns.barplot(x='n_latents', y='MSE', hue='alpha', data=data_queried, ax=ax_marker_mse_alpha)
ax_marker_mse_alpha.set_xlabel('Latent dimension')
ax_marker_mse_alpha.set_ylabel('MSE per marker')
ax_marker_mse_alpha.set_title('Beta=1, Gamma=0')
ax_marker_mse_alpha.legend(frameon=True, title='Alpha')
despine(ax_marker_mse_alpha)
sns.set_palette(gamma_palette)
# --------------------------------------------------
# MSE per pixel (beta/gamma)
# --------------------------------------------------
ax_pixel_mse_bg = fig.add_subplot(gs[0, 6:9])
data_queried = metrics_df_frame_bg[
(metrics_df_frame_bg.dtype == 'test') &
(metrics_df_frame_bg.loss == 'loss_data_mse') &
(metrics_df_frame_bg.epoch == 200)]
sns.barplot(x='beta', y='val', hue='gamma', data=data_queried, ax=ax_pixel_mse_bg)
ax_pixel_mse_bg.legend().set_visible(False)
ax_pixel_mse_bg.set_xlabel('Beta')
ax_pixel_mse_bg.set_ylabel('MSE per pixel')
ax_pixel_mse_bg.ticklabel_format(axis='y', style='sci', scilimits=(-3, 3))
ax_pixel_mse_bg.set_title('Latents=%i, Alpha=1000' % hparams['n_ae_latents'])
despine(ax_pixel_mse_bg)
# --------------------------------------------------
# MSE per marker (beta/gamma)
# --------------------------------------------------
ax_marker_mse_bg = fig.add_subplot(gs[0, 9:12])
data_queried = metrics_df_marker_bg
sns.barplot(x='beta', y='MSE', hue='gamma', data=data_queried, ax=ax_marker_mse_bg)
ax_marker_mse_bg.set_xlabel('Beta')
ax_marker_mse_bg.set_ylabel('MSE per marker')
ax_marker_mse_bg.set_title('Latents=%i, Alpha=1000' % hparams['n_ae_latents'])
ax_marker_mse_bg.legend(frameon=True, title='Gamma', loc='lower left')
despine(ax_marker_mse_bg)
# --------------------------------------------------
# ICMI
# --------------------------------------------------
ax_icmi = fig.add_subplot(gs[1, 0:4])
data_queried = metrics_df_frame_bg[
(metrics_df_frame_bg.dtype == 'test') &
(metrics_df_frame_bg.loss == 'loss_zu_mi') &
(metrics_df_frame_bg.epoch == 200)]
sns.lineplot(
x='beta', y='val', hue='gamma', data=data_queried, ax=ax_icmi, ci=None,
palette=gamma_palette)
ax_icmi.legend().set_visible(False)
ax_icmi.set_xlabel('Beta')
ax_icmi.set_ylabel('Index-code Mutual Information')
ax_icmi.set_title('Latents=%i, Alpha=1000' % hparams['n_ae_latents'])
despine(ax_icmi)
# --------------------------------------------------
# TC
# --------------------------------------------------
ax_tc = fig.add_subplot(gs[1, 4:8])
data_queried = metrics_df_frame_bg[
(metrics_df_frame_bg.dtype == 'test') &
(metrics_df_frame_bg.loss == 'loss_zu_tc') &
(metrics_df_frame_bg.epoch == 200)]
sns.lineplot(
x='beta', y='val', hue='gamma', data=data_queried, ax=ax_tc, ci=None,
palette=gamma_palette)
ax_tc.legend().set_visible(False)
ax_tc.set_xlabel('Beta')
ax_tc.set_ylabel('Total Correlation')
ax_tc.set_title('Latents=%i, Alpha=1000' % hparams['n_ae_latents'])
despine(ax_tc)
# --------------------------------------------------
# DWKL
# --------------------------------------------------
ax_dwkl = fig.add_subplot(gs[1, 8:12])
data_queried = metrics_df_frame_bg[
(metrics_df_frame_bg.dtype == 'test') &
(metrics_df_frame_bg.loss == 'loss_zu_dwkl') &
(metrics_df_frame_bg.epoch == 200)]
sns.lineplot(
x='beta', y='val', hue='gamma', data=data_queried, ax=ax_dwkl, ci=None,
palette=gamma_palette)
ax_dwkl.legend().set_visible(False)
ax_dwkl.set_xlabel('Beta')
ax_dwkl.set_ylabel('Dimension-wise KL')
ax_dwkl.set_title('Latents=%i, Alpha=1000' % hparams['n_ae_latents'])
despine(ax_dwkl)
# --------------------------------------------------
# CC
# --------------------------------------------------
ax_cc = fig.add_subplot(gs[2, 0:3])
data_queried = metrics_df_corr_bg
sns.lineplot(
x='beta', y='val', hue='gamma', data=data_queried, ax=ax_cc, ci=None,
palette=gamma_palette)
ax_cc.legend().set_visible(False)
ax_cc.set_xlabel('Beta')
ax_cc.set_ylabel('Correlation Coefficient')
ax_cc.set_title('Latents=%i, Alpha=1000' % hparams['n_ae_latents'])
despine(ax_cc)
# --------------------------------------------------
# AB orth
# --------------------------------------------------
ax_orth = fig.add_subplot(gs[2, 3:6])
data_queried = metrics_df_frame_bg[
(metrics_df_frame_bg.dtype == 'test') &
(metrics_df_frame_bg.loss == 'loss_AB_orth') &
(metrics_df_frame_bg.epoch == 200) &
~metrics_df_frame_bg.val.isna()]
sns.lineplot(
x='gamma', y='val', hue='beta', data=data_queried, ax=ax_orth, ci=None,
palette=beta_palette)
ax_orth.legend(frameon=False, title='Beta')
ax_orth.set_xlabel('Gamma')
ax_orth.set_ylabel('Subspace overlap')
ax_orth.set_title('Latents=%i, Alpha=1000' % hparams['n_ae_latents'])
despine(ax_orth)
# --------------------------------------------------
# Gamma = 0 overlap
# --------------------------------------------------
ax_gamma0 = fig.add_subplot(gs[2, 6:9])
overlap = overlaps['beta=%i_gamma=%i' % (np.min(beta_weights), np.min(gamma_weights))]
im = ax_gamma0.imshow(overlap, cmap='PuOr', vmin=-1, vmax=1)
ax_gamma0.set_xticks(np.arange(overlap.shape[1]))
ax_gamma0.set_yticks(np.arange(overlap.shape[0]))
ax_gamma0.set_title('Subspace overlap\nGamma=%i' % np.max(gamma_weights))
fig.colorbar(im, ax=ax_gamma0, orientation='vertical', shrink=0.75)
# --------------------------------------------------
# Gamma = 1000 overlap
# --------------------------------------------------
ax_gamma1 = fig.add_subplot(gs[2, 9:12])
overlap = overlaps['beta=%i_gamma=%i' % (np.min(beta_weights), np.max(gamma_weights))]
im = ax_gamma1.imshow(overlap, cmap='PuOr', vmin=-1, vmax=1)
ax_gamma1.set_xticks(np.arange(overlap.shape[1]))
ax_gamma1.set_yticks(np.arange(overlap.shape[0]))
ax_gamma1.set_title('Subspace overlap\nGamma=%i' % np.max(gamma_weights))
fig.colorbar(im, ax=ax_gamma1, orientation='vertical', shrink=0.75)
plt.tight_layout(h_pad=3) # h_pad is fraction of font size
# reset to default color palette
# sns.set_palette(sns.color_palette(None, 10))
sns.reset_orig()
if save_file is not None:
make_dir_if_not_exists(save_file)
plt.savefig(save_file + '.' + format, dpi=300, format=format)
def plot_label_reconstructions(
lab, expt, animal, session, n_ae_latents, experiment_name, n_labels, trials, version=None,
plot_scale=0.5, sess_idx=0, save_file=None, format='pdf', xtick_locs=None, frame_rate=None,
max_traces=8, add_r2=True, add_legend=True, colored_predictions=True, concat_trials=False,
**kwargs):
"""Plot labels and their reconstructions from an ps-vae.
Parameters
----------
lab : :obj:`str`
lab id
expt : :obj:`str`
expt id
animal : :obj:`str`
animal id
session : :obj:`str`
session id
n_ae_latents : :obj:`str`
dimensionality of unsupervised latent space; n_labels will be added to this
experiment_name : :obj:`str`
test-tube experiment name
n_labels : :obj:`str`
dimensionality of supervised latent space
trials : :obj:`array-like`
array of trials to reconstruct
version : :obj:`str` or :obj:`int`, optional
can be 'best' to load best model, and integer to load a specific model, or NoneType to use
the values in hparams to load a specific model
plot_scale : :obj:`float`
scale the magnitude of reconstructions
sess_idx : :obj:`int`, optional
session index into data generator
save_file : :obj:`str`, optional
absolute path of save file; does not need file extension
format : :obj:`str`, optional
format of saved image; 'pdf' | 'png' | 'jpeg' | ...
xtick_locs : :obj:`array-like`, optional
tick locations in units of bins
frame_rate : :obj:`float`, optional
frame rate of behavorial video; to properly relabel xticks
max_traces : :obj:`int`, optional
maximum number of traces to plot, for easier visualization
add_r2 : :obj:`bool`, optional
print R2 value on plot
add_legend : :obj:`bool`, optional
print legend on plot
colored_predictions : :obj:`bool`, optional
color predictions using default seaborn colormap; else predictions are black
concat_trials : :obj:`bool`, optional
True to plot all trials together, separated by a small gap
kwargs
arguments are keys of `hparams`, for example to set `train_frac`, `rng_seed_model`, etc.
"""
from behavenet.plotting.decoder_utils import plot_neural_reconstruction_traces
if len(trials) == 1:
concat_trials = False
# set model info
hparams = _get_psvae_hparams(
experiment_name=experiment_name, n_ae_latents=n_ae_latents + n_labels, **kwargs)
# programmatically fill out other hparams options
get_lab_example(hparams, lab, expt)
hparams['animal'] = animal
hparams['session'] = session
model, data_generator = get_best_model_and_data(
hparams, Model=None, load_data=True, version=version, data_kwargs=None)
print(data_generator)
print('alpha: %i' % model.hparams['ps_vae.alpha'])
print('beta: %i' % model.hparams['ps_vae.beta'])
print('gamma: %i' % model.hparams['ps_vae.gamma'])
print('model seed: %i' % model.hparams['rng_seed_model'])
n_blank = 5 # buffer time points between trials if concatenating
labels_og_all = []
labels_pred_all = []
for trial in trials:
# collect data
batch = data_generator.datasets[sess_idx][trial]
labels_og = batch['labels'].detach().cpu().numpy()
labels_pred = model.get_predicted_labels(batch['images']).detach().cpu().numpy()
if 'labels_masks' in batch:
labels_masks = batch['labels_masks'].detach().cpu().numpy()
labels_og[labels_masks == 0] = np.nan
# store data
labels_og_all.append(labels_og)
labels_pred_all.append(labels_pred)
if trial != trials[-1]:
labels_og_all.append(np.nan * np.zeros((n_blank, labels_og.shape[1])))
labels_pred_all.append(np.nan * np.zeros((n_blank, labels_pred.shape[1])))
# plot data from single trial
if not concat_trials:
if save_file is not None:
save_file_trial = save_file + '_trial-%i' % trial
else:
save_file_trial = None
plot_neural_reconstruction_traces(
labels_og, labels_pred, scale=plot_scale, save_file=save_file_trial, format=format,
xtick_locs=xtick_locs, frame_rate=frame_rate, max_traces=max_traces, add_r2=add_r2,
add_legend=add_legend, colored_predictions=colored_predictions)
# plot data from all trials
if concat_trials:
if save_file is not None:
save_file_trial = save_file + '_trial-{}'.format(trials)
else:
save_file_trial = None
plot_neural_reconstruction_traces(
np.vstack(labels_og_all), np.vstack(labels_pred_all), scale=plot_scale,
save_file=save_file_trial, format=format,
xtick_locs=xtick_locs, frame_rate=frame_rate, max_traces=max_traces, add_r2=add_r2,
add_legend=add_legend, colored_predictions=colored_predictions)
def plot_latent_traversals(
lab, expt, animal, session, model_class, alpha, beta, gamma, n_ae_latents, rng_seed_model,
experiment_name, n_labels, label_idxs, label_min_p=5, label_max_p=95,
channel=0, n_frames_zs=4, n_frames_zu=4, trial=None, trial_idx=1, batch_idx=1,
crop_type=None, crop_kwargs=None, sess_idx=0, save_file=None, format='pdf', **kwargs):
"""Plot video frames representing the traversal of individual dimensions of the latent space.
Parameters
----------
lab : :obj:`str`
lab id
expt : :obj:`str`
expt id
animal : :obj:`str`
animal id
session : :obj:`str`
session id
model_class : :obj:`str`
model class in which to perform traversal; currently supported models are:
'ae' | 'vae' | 'cond-ae' | 'cond-vae' | 'beta-tcvae' | 'cond-ae-msp' | 'ps-vae'
note that models with conditional encoders are not currently supported
alpha : :obj:`float`
ps-vae alpha value
beta : :obj:`float`
ps-vae beta value
gamma : :obj:`array-like`
ps-vae gamma value
n_ae_latents : :obj:`int`
dimensionality of unsupervised latents
rng_seed_model : :obj:`int`
model seed
experiment_name : :obj:`str`
test-tube experiment name
n_labels : :obj:`str`
dimensionality of supervised latent space (ignored when using fully unsupervised models)
label_idxs : :obj:`array-like`, optional
set of label indices (dimensions) to individually traverse
label_min_p : :obj:`float`, optional
lower percentile of training data used to compute range of traversal
label_max_p : :obj:`float`, optional
upper percentile of training data used to compute range of traversal
channel : :obj:`int`, optional
image channel to plot
n_frames_zs : :obj:`int`, optional
number of frames (points) to display for traversal through supervised dimensions
n_frames_zu : :obj:`int`, optional
number of frames (points) to display for traversal through unsupervised dimensions
trial : :obj:`int`, optional
trial index into all possible trials (train, val, test); one of `trial` or `trial_idx`
must be specified; `trial` takes precedence over `trial_idx`
trial_idx : :obj:`int`, optional
trial index of base frame used for interpolation
batch_idx : :obj:`int`, optional
batch index of base frame used for interpolation
crop_type : :obj:`str`, optional
cropping method used on interpolated frames
'fixed' | None
crop_kwargs : :obj:`dict`, optional
if crop_type is not None, provides information about the crop
keys for 'fixed' type: 'y_0', 'x_0', 'y_ext', 'x_ext'; window is
(y_0 - y_ext, y_0 + y_ext) in vertical direction and
(x_0 - x_ext, x_0 + x_ext) in horizontal direction
sess_idx : :obj:`int`, optional
session index into data generator
save_file : :obj:`str`, optional
absolute path of save file; does not need file extension
format : :obj:`str`, optional
format of saved image; 'pdf' | 'png' | 'jpeg' | ...
kwargs
arguments are keys of `hparams`, for example to set `train_frac`, `rng_seed_model`, etc.
"""
hparams = _get_psvae_hparams(
model_class=model_class, alpha=alpha, beta=beta, gamma=gamma, n_ae_latents=n_ae_latents,
experiment_name=experiment_name, rng_seed_model=rng_seed_model, **kwargs)
if model_class == 'cond-ae-msp' or model_class == 'ps-vae':
hparams['n_ae_latents'] += n_labels
# programmatically fill out other hparams options
get_lab_example(hparams, lab, expt)
hparams['animal'] = animal
hparams['session'] = session
hparams['session_dir'], sess_ids = get_session_dir(hparams)
hparams['expt_dir'] = get_expt_dir(hparams)
_, version = experiment_exists(hparams, which_version=True)
model_ae, data_generator = get_best_model_and_data(hparams, Model=None, version=version)
# get latent/label info
latent_range = get_input_range(
'latents', hparams, model=model_ae, data_gen=data_generator, min_p=15, max_p=85,
version=version)
label_range = get_input_range(
'labels', hparams, sess_ids=sess_ids, sess_idx=sess_idx,
min_p=label_min_p, max_p=label_max_p)
try:
label_sc_range = get_input_range(
'labels_sc', hparams, sess_ids=sess_ids, sess_idx=sess_idx,
min_p=label_min_p, max_p=label_max_p)
except KeyError:
import copy
label_sc_range = copy.deepcopy(label_range)
# ----------------------------------------
# label traversals
# ----------------------------------------
interp_func_label = interpolate_1d
plot_func_label = plot_1d_frame_array
save_file_new = save_file + '_label-traversals'
if model_class == 'cond-ae' or model_class == 'cond-ae-msp' or model_class == 'ps-vae' or \
model_class == 'cond-vae':
# get model input for this trial
ims_pt, ims_np, latents_np, labels_pt, labels_np, labels_2d_pt, labels_2d_np = \
get_model_input(
data_generator, hparams, model_ae, trial_idx=trial_idx, trial=trial,
compute_latents=True, compute_scaled_labels=False, compute_2d_labels=False)
if labels_2d_np is None:
labels_2d_np = np.copy(labels_np)
if crop_type == 'fixed':
crop_kwargs_ = crop_kwargs
else:
crop_kwargs_ = None
# perform interpolation
ims_label, markers_loc_label, ims_crop_label = interp_func_label(
'labels', model_ae, ims_pt[None, batch_idx, :], latents_np[None, batch_idx, :],
labels_np[None, batch_idx, :], labels_2d_np[None, batch_idx, :],
mins=label_range['min'], maxes=label_range['max'],
n_frames=n_frames_zs, input_idxs=label_idxs, crop_type=crop_type,
mins_sc=label_sc_range['min'], maxes_sc=label_sc_range['max'],
crop_kwargs=crop_kwargs_, ch=channel)
# plot interpolation
if crop_type:
marker_kwargs = {
'markersize': 30, 'markeredgewidth': 8, 'markeredgecolor': [1, 1, 0],
'fillstyle': 'none'}
plot_func_label(
ims_crop_label, markers=None, marker_kwargs=marker_kwargs, save_file=save_file_new,
format=format)
else:
marker_kwargs = {
'markersize': 20, 'markeredgewidth': 5, 'markeredgecolor': [1, 1, 0],
'fillstyle': 'none'}
plot_func_label(
ims_label, markers=None, marker_kwargs=marker_kwargs, save_file=save_file_new,
format=format)
# ----------------------------------------
# latent traversals
# ----------------------------------------
interp_func_latent = interpolate_1d
plot_func_latent = plot_1d_frame_array
save_file_new = save_file + '_latent-traversals'
if hparams['model_class'] == 'cond-ae-msp' or hparams['model_class'] == 'ps-vae':
latent_idxs = n_labels + np.arange(n_ae_latents)
elif hparams['model_class'] == 'ae' \
or hparams['model_class'] == 'vae' \
or hparams['model_class'] == 'cond-vae' \
or hparams['model_class'] == 'beta-tcvae':
latent_idxs = np.arange(n_ae_latents)
else:
raise NotImplementedError
# simplify options here
scaled_labels = False
twod_labels = False
crop_type = None
crop_kwargs = None
labels_2d_np_sel = None
# get model input for this trial
ims_pt, ims_np, latents_np, labels_pt, labels_np, labels_2d_pt, labels_2d_np = \
get_model_input(
data_generator, hparams, model_ae, trial=trial, trial_idx=trial_idx,
compute_latents=True, compute_scaled_labels=scaled_labels,
compute_2d_labels=twod_labels)
latents_np[:, n_labels:] = 0
if hparams['model_class'] == 'ae' or hparams['model_class'] == 'beta-tcvae':
labels_np_sel = labels_np
else:
labels_np_sel = labels_np[None, batch_idx, :]
# perform interpolation
ims_latent, markers_loc_latent_, ims_crop_latent = interp_func_latent(
'latents', model_ae, ims_pt[None, batch_idx, :], latents_np[None, batch_idx, :],
labels_np_sel, labels_2d_np_sel,
mins=latent_range['min'], maxes=latent_range['max'],
n_frames=n_frames_zu, input_idxs=latent_idxs, crop_type=crop_type,
mins_sc=None, maxes_sc=None, crop_kwargs=crop_kwargs, ch=channel)
# plot interpolation
marker_kwargs = {
'markersize': 20, 'markeredgewidth': 5, 'markeredgecolor': [1, 1, 0],
'fillstyle': 'none'}
plot_func_latent(
ims_latent, markers=None, marker_kwargs=marker_kwargs, save_file=save_file_new,
format=format)
def make_latent_traversal_movie(
lab, expt, animal, session, model_class, alpha, beta, gamma, n_ae_latents,
rng_seed_model, experiment_name, n_labels, trial_idxs, batch_idxs, trials,
label_min_p=5, label_max_p=95, channel=0, sess_idx=0, n_frames=10, n_buffer_frames=5,
crop_kwargs=None, n_cols=3, movie_kwargs={}, panel_titles=None, order_idxs=None,
split_movies=False, save_file=None, **kwargs):
"""Create a multi-panel movie with each panel showing traversals of an individual latent dim.
The traversals will start at a lower bound, increase to an upper bound, then return to a lower
bound; the traversal of each dimension occurs simultaneously. It is also possible to specify
multiple base frames for the traversals; the traversal of each base frame is separated by
several blank frames. Note that support for plotting markers on top of the corresponding
supervised dimensions is not supported by this function.
Parameters
----------
lab : :obj:`str`
lab id
expt : :obj:`str`
expt id
animal : :obj:`str`
animal id
session : :obj:`str`
session id
model_class : :obj:`str`
model class in which to perform traversal; currently supported models are:
'ae' | 'vae' | 'cond-ae' | 'cond-vae' | 'ps-vae'
note that models with conditional encoders are not currently supported
alpha : :obj:`float`
ps-vae alpha value
beta : :obj:`float`
ps-vae beta value
gamma : :obj:`array-like`
ps-vae gamma value
n_ae_latents : :obj:`int`
dimensionality of unsupervised latents
rng_seed_model : :obj:`int`
model seed
experiment_name : :obj:`str`
test-tube experiment name
n_labels : :obj:`str`
dimensionality of supervised latent space (ignored when using fully unsupervised models)
trial_idxs : :obj:`array-like` of :obj:`int`
trial indices of base frames used for interpolation; if an entry is an integer, the
corresponding entry in `trials` must be `None`. This value is a trial index into all
*test* trials, and is not affected by how the test trials are shuffled. The `trials`
argument (see below) takes precedence over `trial_idxs`.
batch_idxs : :obj:`array-like` of :obj:`int`
batch indices of base frames used for interpolation; correspond to entries in `trial_idxs`
and `trials`
trials : :obj:`array-like` of :obj:`int`
trials of base frame used for interpolation; if an entry is an integer, the
corresponding entry in `trial_idxs` must be `None`. This value is a trial index into all
possible trials (train, val, test), whereas `trial_idxs` is an index only into test trials
label_min_p : :obj:`float`, optional
lower percentile of training data used to compute range of traversal
label_max_p : :obj:`float`, optional
upper percentile of training data used to compute range of traversal
channel : :obj:`int`, optional
image channel to plot
sess_idx : :obj:`int`, optional
session index into data generator
n_frames : :obj:`int`, optional
number of frames (points) to display for traversal across latent dimensions; the movie
will display a traversal of `n_frames` across each dim, then another traversal of
`n_frames` in the opposite direction
n_buffer_frames : :obj:`int`, optional
number of blank frames to insert between base frames
crop_kwargs : :obj:`dict`, optional
if crop_type is not None, provides information about the crop (for a fixed crop window)
keys : 'y_0', 'x_0', 'y_ext', 'x_ext'; window is
(y_0 - y_ext, y_0 + y_ext) in vertical direction and
(x_0 - x_ext, x_0 + x_ext) in horizontal direction
n_cols : :obj:`int`, optional
movie is `n_cols` panels wide
movie_kwargs : :obj:`dict`, optional
additional kwargs for individual panels; possible keys are 'markersize', 'markeredgecolor',
'markeredgewidth', and 'text_color'
panel_titles : :obj:`list` of :obj:`str`, optional
optional titles for each panel
order_idxs : :obj:`array-like`, optional
used to reorder panels (which are plotted in row-major order) if desired; can also be used
to choose a subset of latent dimensions to include
split_movies : :obj:`bool`, optional
True to save a separate latent traversal movie for each latent dimension
save_file : :obj:`str`, optional
absolute path of save file; does not need file extension, will automatically be saved as
mp4. To save as a gif, include the '.gif' file extension in `save_file`
kwargs
arguments are keys of `hparams`, for example to set `train_frac`, `rng_seed_model`, etc.
"""
panel_titles = [''] * (n_labels + n_ae_latents) if panel_titles is None else panel_titles
hparams = _get_psvae_hparams(
model_class=model_class, alpha=alpha, beta=beta, gamma=gamma, n_ae_latents=n_ae_latents,
experiment_name=experiment_name, rng_seed_model=rng_seed_model, **kwargs)
if model_class == 'cond-ae-msp' or model_class == 'ps-vae':
hparams['n_ae_latents'] += n_labels
# programmatically fill out other hparams options
get_lab_example(hparams, lab, expt)
hparams['animal'] = animal
hparams['session'] = session
hparams['session_dir'], sess_ids = get_session_dir(hparams)
hparams['expt_dir'] = get_expt_dir(hparams)
_, version = experiment_exists(hparams, which_version=True)
model_ae, data_generator = get_best_model_and_data(hparams, Model=None, version=version)
# get latent/label info
latent_range = get_input_range(
'latents', hparams, model=model_ae, data_gen=data_generator, min_p=15, max_p=85,
version=version)
label_range = get_input_range(
'labels', hparams, sess_ids=sess_ids, sess_idx=sess_idx,
min_p=label_min_p, max_p=label_max_p)
# ----------------------------------------
# collect frames/latents/labels
# ----------------------------------------
if hparams['model_class'] == 'vae':
csl = False
c2dl = False
else:
csl = False
c2dl = False
ims_pt = []
ims_np = []
latents_np = []
labels_pt = []
labels_np = []
# labels_2d_pt = []
# labels_2d_np = []
for trial, trial_idx in zip(trials, trial_idxs):
ims_pt_, ims_np_, latents_np_, labels_pt_, labels_np_, labels_2d_pt_, labels_2d_np_ = \
get_model_input(
data_generator, hparams, model_ae, trial_idx=trial_idx, trial=trial,
compute_latents=True, compute_scaled_labels=csl, compute_2d_labels=c2dl,
max_frames=200)
ims_pt.append(ims_pt_)
ims_np.append(ims_np_)
latents_np.append(latents_np_)
labels_pt.append(labels_pt_)
labels_np.append(labels_np_)
# labels_2d_pt.append(labels_2d_pt_)
# labels_2d_np.append(labels_2d_np_)
if hparams['model_class'] == 'ps-vae':
label_idxs = np.arange(n_labels)
latent_idxs = n_labels + np.arange(n_ae_latents)
elif hparams['model_class'] == 'vae':
label_idxs = []
latent_idxs = np.arange(hparams['n_ae_latents'])
elif hparams['model_class'] == 'cond-vae':
label_idxs = np.arange(n_labels)
latent_idxs = np.arange(hparams['n_ae_latents'])
else:
raise Exception
# ----------------------------------------
# label traversals
# ----------------------------------------
ims_all = []
txt_strs_all = []
txt_strs_titles = []
for label_idx in label_idxs:
ims = []
txt_strs = []
for b, batch_idx in enumerate(batch_idxs):
if hparams['model_class'] == 'ps-vae':
points = np.array([latents_np[b][batch_idx, :]] * 3)
elif hparams['model_class'] == 'cond-vae':
points = np.array([labels_np[b][batch_idx, :]] * 3)
else:
raise Exception
points[0, label_idx] = label_range['min'][label_idx]
points[1, label_idx] = label_range['max'][label_idx]
points[2, label_idx] = label_range['min'][label_idx]
ims_curr, inputs = interpolate_point_path(
'labels', model_ae, ims_pt[b][None, batch_idx, :],
labels_np[b][None, batch_idx, :], points=points, n_frames=n_frames, ch=channel,
crop_kwargs=crop_kwargs)
ims.append(ims_curr)
txt_strs += [panel_titles[label_idx] for _ in range(len(ims_curr))]
if label_idx == 0:
tmp = trial_idxs[b] if trial_idxs[b] is not None else trials[b]
txt_strs_titles += [
'base frame %02i-%02i' % (tmp, batch_idx) for _ in range(len(ims_curr))]
# add blank frames
if len(batch_idxs) > 1:
y_pix, x_pix = ims_curr[0].shape
ims.append([np.zeros((y_pix, x_pix)) for _ in range(n_buffer_frames)])
txt_strs += ['' for _ in range(n_buffer_frames)]
if label_idx == 0:
txt_strs_titles += ['' for _ in range(n_buffer_frames)]
ims_all.append(np.vstack(ims))
txt_strs_all.append(txt_strs)
# ----------------------------------------
# latent traversals
# ----------------------------------------
crop_kwargs_ = None
for latent_idx in latent_idxs:
ims = []
txt_strs = []
for b, batch_idx in enumerate(batch_idxs):
points = np.array([latents_np[b][batch_idx, :]] * 3)
# points[:, latent_idxs] = 0
points[0, latent_idx] = latent_range['min'][latent_idx]
points[1, latent_idx] = latent_range['max'][latent_idx]
points[2, latent_idx] = latent_range['min'][latent_idx]
if hparams['model_class'] == 'vae':
labels_curr = None
else:
labels_curr = labels_np[b][None, batch_idx, :]
ims_curr, inputs = interpolate_point_path(
'latents', model_ae, ims_pt[b][None, batch_idx, :],
labels_curr, points=points, n_frames=n_frames, ch=channel,
crop_kwargs=crop_kwargs_)
ims.append(ims_curr)
if hparams['model_class'] == 'cond-vae':
txt_strs += [panel_titles[latent_idx + n_labels] for _ in range(len(ims_curr))]
else:
txt_strs += [panel_titles[latent_idx] for _ in range(len(ims_curr))]
if latent_idx == 0 and len(label_idxs) == 0:
# add frame ids here if skipping labels
tmp = trial_idxs[b] if trial_idxs[b] is not None else trials[b]
txt_strs_titles += [
'base frame %02i-%02i' % (tmp, batch_idx) for _ in range(len(ims_curr))]
# add blank frames
if len(batch_idxs) > 1:
y_pix, x_pix = ims_curr[0].shape
ims.append([np.zeros((y_pix, x_pix)) for _ in range(n_buffer_frames)])
txt_strs += ['' for _ in range(n_buffer_frames)]
if latent_idx == 0 and len(label_idxs) == 0:
txt_strs_titles += ['' for _ in range(n_buffer_frames)]
ims_all.append(np.vstack(ims))
txt_strs_all.append(txt_strs)
# ----------------------------------------
# make video
# ----------------------------------------
if order_idxs is None:
# don't change order of latents
order_idxs = np.arange(len(ims_all))
if split_movies:
for idx in order_idxs:
if save_file.split('.')[-1] == 'gif':
save_file_new = save_file[:-4] + '_latent-%i.gif' % idx
elif save_file.split('.')[-1] == 'mp4':
save_file_new = save_file[:-4] + '_latent-%i.mp4' % idx
else:
save_file_new = save_file + '_latent-%i' % 0
make_interpolated(
ims=ims_all[idx],
text=txt_strs_all[idx],
text_title=txt_strs_titles,
save_file=save_file_new, scale=3, **movie_kwargs)
else:
make_interpolated_multipanel(
ims=[ims_all[i] for i in order_idxs],
text=[txt_strs_all[i] for i in order_idxs],
text_title=txt_strs_titles,
save_file=save_file, scale=2, n_cols=n_cols, **movie_kwargs)
| [
"numpy.nanpercentile",
"pandas.read_csv",
"behavenet.data.utils.load_labels_like_latents",
"numpy.hstack",
"behavenet.plotting.decoder_utils.plot_neural_reconstruction_traces",
"torch.from_numpy",
"behavenet.data.utils.build_data_generator",
"seaborn.set_style",
"numpy.array",
"copy.deepcopy",
"... | [((35375, 35414), 'matplotlib.pyplot.subplots', 'plt.subplots', (['n_y', 'n_x'], {'figsize': 'figsize'}), '(n_y, n_x, figsize=figsize)\n', (35387, 35414), True, 'import matplotlib.pyplot as plt\n'), ((35961, 36034), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0)', 'hspace': '(0)', 'bottom': '(0)', 'left': '(0)', 'top': '(1)', 'right': '(1)'}), '(wspace=0, hspace=0, bottom=0, left=0, top=1, right=1)\n', (35980, 36034), True, 'import matplotlib.pyplot as plt\n'), ((36187, 36197), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (36195, 36197), True, 'import matplotlib.pyplot as plt\n'), ((37967, 38006), 'matplotlib.pyplot.subplots', 'plt.subplots', (['n_y', 'n_x'], {'figsize': 'figsize'}), '(n_y, n_x, figsize=figsize)\n', (37979, 38006), True, 'import matplotlib.pyplot as plt\n'), ((39178, 39251), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0)', 'hspace': '(0)', 'bottom': '(0)', 'left': '(0)', 'top': '(1)', 'right': '(1)'}), '(wspace=0, hspace=0, bottom=0, left=0, top=1, right=1)\n', (39197, 39251), True, 'import matplotlib.pyplot as plt\n'), ((39404, 39414), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (39412, 39414), True, 'import matplotlib.pyplot as plt\n'), ((45191, 45261), 'matplotlib.pyplot.subplots', 'plt.subplots', (['n_rows', 'n_cols'], {'figsize': '(fig_width, fig_height)', 'dpi': '(300)'}), '(n_rows, n_cols, figsize=(fig_width, fig_height), dpi=300)\n', (45203, 45261), True, 'import matplotlib.pyplot as plt\n'), ((45266, 45339), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0)', 'hspace': '(0)', 'left': '(0)', 'bottom': '(0)', 'right': '(1)', 'top': '(1)'}), '(wspace=0, hspace=0, left=0, bottom=0, right=1, top=1)\n', (45285, 45339), True, 'import matplotlib.pyplot as plt\n'), ((46903, 46974), 'matplotlib.animation.ArtistAnimation', 'animation.ArtistAnimation', (['fig', 'ims_final'], {'blit': '(True)', 'repeat_delay': '(1000)'}), '(fig, ims_final, blit=True, repeat_delay=1000)\n', (46928, 46974), True, 'import matplotlib.animation as animation\n'), ((46979, 47028), 'behavenet.plotting.save_movie', 'save_movie', (['save_file', 'ani'], {'frame_rate': 'frame_rate'}), '(save_file, ani, frame_rate=frame_rate)\n', (46989, 47028), False, 'from behavenet.plotting import save_movie\n'), ((52949, 52983), 'pandas.concat', 'pd.concat', (['metrics_dfs'], {'sort': '(False)'}), '(metrics_dfs, sort=False)\n', (52958, 52983), True, 'import pandas as pd\n'), ((52989, 53011), 'seaborn.set_style', 'sns.set_style', (['"""white"""'], {}), "('white')\n", (53002, 53011), True, 'import seaborn as sns\n'), ((53016, 53039), 'seaborn.set_context', 'sns.set_context', (['"""talk"""'], {}), "('talk')\n", (53031, 53039), True, 'import seaborn as sns\n'), ((53169, 53257), 'seaborn.FacetGrid', 'sns.FacetGrid', (['data_queried'], {'col': '"""loss"""', 'col_wrap': '(3)', 'hue': 'hue', 'sharey': '(False)', 'height': '(4)'}), "(data_queried, col='loss', col_wrap=3, hue=hue, sharey=False,\n height=4)\n", (53182, 53257), True, 'import seaborn as sns\n'), ((62390, 62430), 'pandas.concat', 'pd.concat', (['metrics_dfs_frame'], {'sort': '(False)'}), '(metrics_dfs_frame, sort=False)\n', (62399, 62430), True, 'import pandas as pd\n'), ((62455, 62496), 'pandas.concat', 'pd.concat', (['metrics_dfs_marker'], {'sort': '(False)'}), '(metrics_dfs_marker, sort=False)\n', (62464, 62496), True, 'import pandas as pd\n'), ((66585, 66628), 'pandas.concat', 'pd.concat', (['metrics_dfs_frame_bg'], {'sort': '(False)'}), '(metrics_dfs_frame_bg, sort=False)\n', (66594, 66628), True, 'import pandas as pd\n'), ((66656, 66700), 'pandas.concat', 'pd.concat', (['metrics_dfs_marker_bg'], {'sort': '(False)'}), '(metrics_dfs_marker_bg, sort=False)\n', (66665, 66700), True, 'import pandas as pd\n'), ((66726, 66768), 'pandas.concat', 'pd.concat', (['metrics_dfs_corr_bg'], {'sort': '(False)'}), '(metrics_dfs_corr_bg, sort=False)\n', (66735, 66768), True, 'import pandas as pd\n'), ((66972, 66994), 'seaborn.set_style', 'sns.set_style', (['"""white"""'], {}), "('white')\n", (66985, 66994), True, 'import seaborn as sns\n'), ((66999, 67039), 'seaborn.set_context', 'sns.set_context', (['"""paper"""'], {'font_scale': '(1.2)'}), "('paper', font_scale=1.2)\n", (67014, 67039), True, 'import seaborn as sns\n'), ((67061, 67088), 'seaborn.color_palette', 'sns.color_palette', (['"""Greens"""'], {}), "('Greens')\n", (67078, 67088), True, 'import seaborn as sns\n'), ((67317, 67354), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 10)', 'dpi': '(300)'}), '(figsize=(12, 10), dpi=300)\n', (67327, 67354), True, 'import matplotlib.pyplot as plt\n'), ((67396, 67432), 'matplotlib.gridspec.GridSpec', 'GridSpec', (['n_rows', 'n_cols'], {'figure': 'fig'}), '(n_rows, n_cols, figure=fig)\n', (67404, 67432), False, 'from matplotlib.gridspec import GridSpec\n'), ((67550, 67580), 'seaborn.set_palette', 'sns.set_palette', (['alpha_palette'], {}), '(alpha_palette)\n', (67565, 67580), True, 'import seaborn as sns\n'), ((67845, 67940), 'seaborn.barplot', 'sns.barplot', ([], {'x': '"""n_latents"""', 'y': '"""val"""', 'hue': '"""alpha"""', 'data': 'data_queried', 'ax': 'ax_pixel_mse_alpha'}), "(x='n_latents', y='val', hue='alpha', data=data_queried, ax=\n ax_pixel_mse_alpha)\n", (67856, 67940), True, 'import seaborn as sns\n'), ((68489, 68585), 'seaborn.barplot', 'sns.barplot', ([], {'x': '"""n_latents"""', 'y': '"""MSE"""', 'hue': '"""alpha"""', 'data': 'data_queried', 'ax': 'ax_marker_mse_alpha'}), "(x='n_latents', y='MSE', hue='alpha', data=data_queried, ax=\n ax_marker_mse_alpha)\n", (68500, 68585), True, 'import seaborn as sns\n'), ((68840, 68870), 'seaborn.set_palette', 'sns.set_palette', (['gamma_palette'], {}), '(gamma_palette)\n', (68855, 68870), True, 'import seaborn as sns\n'), ((69261, 69348), 'seaborn.barplot', 'sns.barplot', ([], {'x': '"""beta"""', 'y': '"""val"""', 'hue': '"""gamma"""', 'data': 'data_queried', 'ax': 'ax_pixel_mse_bg'}), "(x='beta', y='val', hue='gamma', data=data_queried, ax=\n ax_pixel_mse_bg)\n", (69272, 69348), True, 'import seaborn as sns\n'), ((69914, 70002), 'seaborn.barplot', 'sns.barplot', ([], {'x': '"""beta"""', 'y': '"""MSE"""', 'hue': '"""gamma"""', 'data': 'data_queried', 'ax': 'ax_marker_mse_bg'}), "(x='beta', y='MSE', hue='gamma', data=data_queried, ax=\n ax_marker_mse_bg)\n", (69925, 70002), True, 'import seaborn as sns\n'), ((70633, 70744), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': '"""beta"""', 'y': '"""val"""', 'hue': '"""gamma"""', 'data': 'data_queried', 'ax': 'ax_icmi', 'ci': 'None', 'palette': 'gamma_palette'}), "(x='beta', y='val', hue='gamma', data=data_queried, ax=ax_icmi,\n ci=None, palette=gamma_palette)\n", (70645, 70744), True, 'import seaborn as sns\n'), ((71333, 71442), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': '"""beta"""', 'y': '"""val"""', 'hue': '"""gamma"""', 'data': 'data_queried', 'ax': 'ax_tc', 'ci': 'None', 'palette': 'gamma_palette'}), "(x='beta', y='val', hue='gamma', data=data_queried, ax=ax_tc,\n ci=None, palette=gamma_palette)\n", (71345, 71442), True, 'import seaborn as sns\n'), ((72016, 72127), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': '"""beta"""', 'y': '"""val"""', 'hue': '"""gamma"""', 'data': 'data_queried', 'ax': 'ax_dwkl', 'ci': 'None', 'palette': 'gamma_palette'}), "(x='beta', y='val', hue='gamma', data=data_queried, ax=ax_dwkl,\n ci=None, palette=gamma_palette)\n", (72028, 72127), True, 'import seaborn as sns\n'), ((72557, 72666), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': '"""beta"""', 'y': '"""val"""', 'hue': '"""gamma"""', 'data': 'data_queried', 'ax': 'ax_cc', 'ci': 'None', 'palette': 'gamma_palette'}), "(x='beta', y='val', hue='gamma', data=data_queried, ax=ax_cc,\n ci=None, palette=gamma_palette)\n", (72569, 72666), True, 'import seaborn as sns\n'), ((73290, 73400), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': '"""gamma"""', 'y': '"""val"""', 'hue': '"""beta"""', 'data': 'data_queried', 'ax': 'ax_orth', 'ci': 'None', 'palette': 'beta_palette'}), "(x='gamma', y='val', hue='beta', data=data_queried, ax=ax_orth,\n ci=None, palette=beta_palette)\n", (73302, 73400), True, 'import seaborn as sns\n'), ((74835, 74860), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'h_pad': '(3)'}), '(h_pad=3)\n', (74851, 74860), True, 'import matplotlib.pyplot as plt\n'), ((74988, 75004), 'seaborn.reset_orig', 'sns.reset_orig', ([], {}), '()\n', (75002, 75004), True, 'import seaborn as sns\n'), ((77766, 77801), 'behavenet.fitting.utils.get_lab_example', 'get_lab_example', (['hparams', 'lab', 'expt'], {}), '(hparams, lab, expt)\n', (77781, 77801), False, 'from behavenet.fitting.utils import get_lab_example\n'), ((77895, 77995), 'behavenet.fitting.utils.get_best_model_and_data', 'get_best_model_and_data', (['hparams'], {'Model': 'None', 'load_data': '(True)', 'version': 'version', 'data_kwargs': 'None'}), '(hparams, Model=None, load_data=True, version=\n version, data_kwargs=None)\n', (77918, 77995), False, 'from behavenet.fitting.utils import get_best_model_and_data\n'), ((83883, 83918), 'behavenet.fitting.utils.get_lab_example', 'get_lab_example', (['hparams', 'lab', 'expt'], {}), '(hparams, lab, expt)\n', (83898, 83918), False, 'from behavenet.fitting.utils import get_lab_example\n'), ((84022, 84046), 'behavenet.fitting.utils.get_session_dir', 'get_session_dir', (['hparams'], {}), '(hparams)\n', (84037, 84046), False, 'from behavenet.fitting.utils import get_session_dir\n'), ((84073, 84094), 'behavenet.fitting.utils.get_expt_dir', 'get_expt_dir', (['hparams'], {}), '(hparams)\n', (84085, 84094), False, 'from behavenet.fitting.utils import get_expt_dir\n'), ((84112, 84158), 'behavenet.fitting.utils.experiment_exists', 'experiment_exists', (['hparams'], {'which_version': '(True)'}), '(hparams, which_version=True)\n', (84129, 84158), False, 'from behavenet.fitting.utils import experiment_exists\n'), ((84190, 84251), 'behavenet.fitting.utils.get_best_model_and_data', 'get_best_model_and_data', (['hparams'], {'Model': 'None', 'version': 'version'}), '(hparams, Model=None, version=version)\n', (84213, 84251), False, 'from behavenet.fitting.utils import get_best_model_and_data\n'), ((94453, 94488), 'behavenet.fitting.utils.get_lab_example', 'get_lab_example', (['hparams', 'lab', 'expt'], {}), '(hparams, lab, expt)\n', (94468, 94488), False, 'from behavenet.fitting.utils import get_lab_example\n'), ((94592, 94616), 'behavenet.fitting.utils.get_session_dir', 'get_session_dir', (['hparams'], {}), '(hparams)\n', (94607, 94616), False, 'from behavenet.fitting.utils import get_session_dir\n'), ((94643, 94664), 'behavenet.fitting.utils.get_expt_dir', 'get_expt_dir', (['hparams'], {}), '(hparams)\n', (94655, 94664), False, 'from behavenet.fitting.utils import get_expt_dir\n'), ((94682, 94728), 'behavenet.fitting.utils.experiment_exists', 'experiment_exists', (['hparams'], {'which_version': '(True)'}), '(hparams, which_version=True)\n', (94699, 94728), False, 'from behavenet.fitting.utils import experiment_exists\n'), ((94760, 94821), 'behavenet.fitting.utils.get_best_model_and_data', 'get_best_model_and_data', (['hparams'], {'Model': 'None', 'version': 'version'}), '(hparams, Model=None, version=version)\n', (94783, 94821), False, 'from behavenet.fitting.utils import get_best_model_and_data\n'), ((3091, 3161), 'os.path.join', 'os.path.join', (["hparams['expt_dir']", "('version_%i' % version)", 'latent_file'], {}), "(hparams['expt_dir'], 'version_%i' % version, latent_file)\n", (3103, 3161), False, 'import os\n'), ((4141, 4233), 'behavenet.data.utils.load_labels_like_latents', 'load_labels_like_latents', (['hparams', 'sess_ids'], {'sess_idx': 'sess_idx', 'data_key': '"""labels_masks"""'}), "(hparams, sess_ids, sess_idx=sess_idx, data_key=\n 'labels_masks')\n", (4165, 4233), False, 'from behavenet.data.utils import load_labels_like_latents\n'), ((5142, 5160), 'numpy.vstack', 'np.vstack', (['values_'], {}), '(values_)\n', (5151, 5160), True, 'import numpy as np\n'), ((5188, 5210), 'numpy.vstack', 'np.vstack', (['values_list'], {}), '(values_list)\n', (5197, 5210), True, 'import numpy as np\n'), ((5241, 5280), 'numpy.nanpercentile', 'np.nanpercentile', (['values', 'min_p'], {'axis': '(0)'}), '(values, min_p, axis=0)\n', (5257, 5280), True, 'import numpy as np\n'), ((5297, 5336), 'numpy.nanpercentile', 'np.nanpercentile', (['values', 'max_p'], {'axis': '(0)'}), '(values, max_p, axis=0)\n', (5313, 5336), True, 'import numpy as np\n'), ((6872, 6894), 'copy.deepcopy', 'copy.deepcopy', (['hparams'], {}), '(hparams)\n', (6885, 6894), False, 'import copy\n'), ((7120, 7181), 'behavenet.data.utils.build_data_generator', 'build_data_generator', (['hparams_new', 'sess_ids'], {'export_csv': '(False)'}), '(hparams_new, sess_ids, export_csv=False)\n', (7140, 7181), False, 'from behavenet.data.utils import build_data_generator\n'), ((15230, 15256), 'behavenet.data.transforms.MakeOneHot2D', 'MakeOneHot2D', (['y_pix', 'x_pix'], {}), '(y_pix, x_pix)\n', (15242, 15256), False, 'from behavenet.data.transforms import MakeOneHot2D\n'), ((23361, 23387), 'behavenet.data.transforms.MakeOneHot2D', 'MakeOneHot2D', (['y_pix', 'x_pix'], {}), '(y_pix, x_pix)\n', (23373, 23387), False, 'from behavenet.data.transforms import MakeOneHot2D\n'), ((36073, 36106), 'behavenet.make_dir_if_not_exists', 'make_dir_if_not_exists', (['save_file'], {}), '(save_file)\n', (36095, 36106), False, 'from behavenet import make_dir_if_not_exists\n'), ((36115, 36182), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(save_file + '.' + format)"], {'dpi': '(300)', 'bbox_inches': '"""tight"""'}), "(save_file + '.' + format, dpi=300, bbox_inches='tight')\n", (36126, 36182), True, 'import matplotlib.pyplot as plt\n'), ((39290, 39323), 'behavenet.make_dir_if_not_exists', 'make_dir_if_not_exists', (['save_file'], {}), '(save_file)\n', (39312, 39323), False, 'from behavenet import make_dir_if_not_exists\n'), ((39332, 39399), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(save_file + '.' + format)"], {'dpi': '(300)', 'bbox_inches': '"""tight"""'}), "(save_file + '.' + format, dpi=300, bbox_inches='tight')\n", (39343, 39399), True, 'import matplotlib.pyplot as plt\n'), ((41624, 41676), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(fig_width, fig_height)', 'dpi': '(300)'}), '(figsize=(fig_width, fig_height), dpi=300)\n', (41634, 41676), True, 'import matplotlib.pyplot as plt\n'), ((41690, 41699), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (41697, 41699), True, 'import matplotlib.pyplot as plt\n'), ((42957, 42980), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'pad': '(0)'}), '(pad=0)\n', (42973, 42980), True, 'import matplotlib.pyplot as plt\n'), ((42995, 43064), 'matplotlib.animation.ArtistAnimation', 'animation.ArtistAnimation', (['fig', 'ims_ani'], {'blit': '(True)', 'repeat_delay': '(1000)'}), '(fig, ims_ani, blit=True, repeat_delay=1000)\n', (43020, 43064), True, 'import matplotlib.animation as animation\n'), ((43073, 43122), 'behavenet.plotting.save_movie', 'save_movie', (['save_file', 'ani'], {'frame_rate': 'frame_rate'}), '(save_file, ani, frame_rate=frame_rate)\n', (43083, 43122), False, 'from behavenet.plotting import save_movie\n'), ((45060, 45086), 'numpy.ceil', 'np.ceil', (['(n_panels / n_cols)'], {}), '(n_panels / n_cols)\n', (45067, 45086), True, 'import numpy as np\n'), ((47220, 47240), 'behavenet.get_user_dir', 'get_user_dir', (['"""data"""'], {}), "('data')\n", (47232, 47240), False, 'from behavenet import get_user_dir\n'), ((47262, 47282), 'behavenet.get_user_dir', 'get_user_dir', (['"""save"""'], {}), "('save')\n", (47274, 47282), False, 'from behavenet import get_user_dir\n'), ((53401, 53434), 'behavenet.make_dir_if_not_exists', 'make_dir_if_not_exists', (['save_file'], {}), '(save_file)\n', (53423, 53434), False, 'from behavenet import make_dir_if_not_exists\n'), ((57849, 57927), 'os.path.join', 'os.path.join', (["hparams['expt_dir']", "('version_%i' % version)", '"""r2_supervised.csv"""'], {}), "(hparams['expt_dir'], 'version_%i' % version, 'r2_supervised.csv')\n", (57861, 57927), False, 'import os\n'), ((73996, 74023), 'numpy.arange', 'np.arange', (['overlap.shape[1]'], {}), '(overlap.shape[1])\n', (74005, 74023), True, 'import numpy as np\n'), ((74050, 74077), 'numpy.arange', 'np.arange', (['overlap.shape[0]'], {}), '(overlap.shape[0])\n', (74059, 74077), True, 'import numpy as np\n'), ((74597, 74624), 'numpy.arange', 'np.arange', (['overlap.shape[1]'], {}), '(overlap.shape[1])\n', (74606, 74624), True, 'import numpy as np\n'), ((74651, 74678), 'numpy.arange', 'np.arange', (['overlap.shape[0]'], {}), '(overlap.shape[0])\n', (74660, 74678), True, 'import numpy as np\n'), ((75044, 75077), 'behavenet.make_dir_if_not_exists', 'make_dir_if_not_exists', (['save_file'], {}), '(save_file)\n', (75066, 75077), False, 'from behavenet import make_dir_if_not_exists\n'), ((75086, 75147), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(save_file + '.' + format)"], {'dpi': '(300)', 'format': 'format'}), "(save_file + '.' + format, dpi=300, format=format)\n", (75097, 75147), True, 'import matplotlib.pyplot as plt\n'), ((96263, 96282), 'numpy.arange', 'np.arange', (['n_labels'], {}), '(n_labels)\n', (96272, 96282), True, 'import numpy as np\n'), ((3190, 3214), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (3204, 3214), False, 'import os\n'), ((3413, 3444), 'behavenet.fitting.eval.export_latents', 'export_latents', (['data_gen', 'model'], {}), '(data_gen, model)\n', (3427, 3444), False, 'from behavenet.fitting.eval import export_latents\n'), ((3645, 3707), 'behavenet.data.utils.load_labels_like_latents', 'load_labels_like_latents', (['hparams', 'sess_ids'], {'sess_idx': 'sess_idx'}), '(hparams, sess_ids, sess_idx=sess_idx)\n', (3669, 3707), False, 'from behavenet.data.utils import load_labels_like_latents\n'), ((11131, 11155), 'behavenet.fitting.utils.get_session_dir', 'get_session_dir', (['hparams'], {}), '(hparams)\n', (11146, 11155), False, 'from behavenet.fitting.utils import get_session_dir\n'), ((11924, 12024), 'behavenet.fitting.eval.get_reconstruction', 'get_reconstruction', (['model', 'ims_pt'], {'labels': 'labels_pt', 'labels_2d': 'labels_2d_pt', 'return_latents': '(True)'}), '(model, ims_pt, labels=labels_pt, labels_2d=labels_2d_pt,\n return_latents=True)\n', (11942, 12024), False, 'from behavenet.fitting.eval import get_reconstruction\n'), ((15487, 15527), 'numpy.linspace', 'np.linspace', (['mins[d]', 'maxes[d]', 'n_frames'], {}), '(mins[d], maxes[d], n_frames)\n', (15498, 15527), True, 'import numpy as np\n'), ((23549, 23589), 'numpy.linspace', 'np.linspace', (['mins[d]', 'maxes[d]', 'n_frames'], {}), '(mins[d], maxes[d], n_frames)\n', (23560, 23589), True, 'import numpy as np\n'), ((33278, 33296), 'numpy.copy', 'np.copy', (['labels_og'], {}), '(labels_og)\n', (33285, 33296), True, 'import numpy as np\n'), ((33319, 33340), 'numpy.where', 'np.where', (['(tmp[0] == 1)'], {}), '(tmp[0] == 1)\n', (33327, 33340), True, 'import numpy as np\n'), ((33461, 33479), 'numpy.copy', 'np.copy', (['labels_og'], {}), '(labels_og)\n', (33468, 33479), True, 'import numpy as np\n'), ((45444, 45466), 'numpy.zeros', 'np.zeros', (['ims[0].shape'], {}), '(ims[0].shape)\n', (45452, 45466), True, 'import numpy as np\n'), ((45830, 45850), 'numpy.floor', 'np.floor', (['(i / n_cols)'], {}), '(i / n_cols)\n', (45838, 45850), True, 'import numpy as np\n'), ((46436, 46456), 'numpy.floor', 'np.floor', (['(i / n_cols)'], {}), '(i / n_cols)\n', (46444, 46456), True, 'import numpy as np\n'), ((59583, 59604), 'pandas.concat', 'pd.concat', (['metrics_df'], {}), '(metrics_df)\n', (59592, 59604), True, 'import pandas as pd\n'), ((59822, 59844), 'pandas.read_csv', 'pd.read_csv', (['save_file'], {}), '(save_file)\n', (59833, 59844), True, 'import pandas as pd\n'), ((74134, 74155), 'numpy.max', 'np.max', (['gamma_weights'], {}), '(gamma_weights)\n', (74140, 74155), True, 'import numpy as np\n'), ((74735, 74756), 'numpy.max', 'np.max', (['gamma_weights'], {}), '(gamma_weights)\n', (74741, 74756), True, 'import numpy as np\n'), ((79329, 79607), 'behavenet.plotting.decoder_utils.plot_neural_reconstruction_traces', 'plot_neural_reconstruction_traces', (['labels_og', 'labels_pred'], {'scale': 'plot_scale', 'save_file': 'save_file_trial', 'format': 'format', 'xtick_locs': 'xtick_locs', 'frame_rate': 'frame_rate', 'max_traces': 'max_traces', 'add_r2': 'add_r2', 'add_legend': 'add_legend', 'colored_predictions': 'colored_predictions'}), '(labels_og, labels_pred, scale=plot_scale,\n save_file=save_file_trial, format=format, xtick_locs=xtick_locs,\n frame_rate=frame_rate, max_traces=max_traces, add_r2=add_r2, add_legend\n =add_legend, colored_predictions=colored_predictions)\n', (79362, 79607), False, 'from behavenet.plotting.decoder_utils import plot_neural_reconstruction_traces\n'), ((79906, 79930), 'numpy.vstack', 'np.vstack', (['labels_og_all'], {}), '(labels_og_all)\n', (79915, 79930), True, 'import numpy as np\n'), ((79932, 79958), 'numpy.vstack', 'np.vstack', (['labels_pred_all'], {}), '(labels_pred_all)\n', (79941, 79958), True, 'import numpy as np\n'), ((84816, 84842), 'copy.deepcopy', 'copy.deepcopy', (['label_range'], {}), '(label_range)\n', (84829, 84842), False, 'import copy\n'), ((85628, 85646), 'numpy.copy', 'np.copy', (['labels_np'], {}), '(labels_np)\n', (85635, 85646), True, 'import numpy as np\n'), ((87369, 87392), 'numpy.arange', 'np.arange', (['n_ae_latents'], {}), '(n_ae_latents)\n', (87378, 87392), True, 'import numpy as np\n'), ((87615, 87638), 'numpy.arange', 'np.arange', (['n_ae_latents'], {}), '(n_ae_latents)\n', (87624, 87638), True, 'import numpy as np\n'), ((96316, 96339), 'numpy.arange', 'np.arange', (['n_ae_latents'], {}), '(n_ae_latents)\n', (96325, 96339), True, 'import numpy as np\n'), ((96428, 96462), 'numpy.arange', 'np.arange', (["hparams['n_ae_latents']"], {}), "(hparams['n_ae_latents'])\n", (96437, 96462), True, 'import numpy as np\n'), ((98456, 98470), 'numpy.vstack', 'np.vstack', (['ims'], {}), '(ims)\n', (98465, 98470), True, 'import numpy as np\n'), ((98802, 98845), 'numpy.array', 'np.array', (['([latents_np[b][batch_idx, :]] * 3)'], {}), '([latents_np[b][batch_idx, :]] * 3)\n', (98810, 98845), True, 'import numpy as np\n'), ((100535, 100549), 'numpy.vstack', 'np.vstack', (['ims'], {}), '(ims)\n', (100544, 100549), True, 'import numpy as np\n'), ((3798, 3820), 'copy.deepcopy', 'copy.deepcopy', (['hparams'], {}), '(hparams)\n', (3811, 3820), False, 'import copy\n'), ((3917, 4007), 'behavenet.data.utils.load_labels_like_latents', 'load_labels_like_latents', (['hparams2', 'sess_ids'], {'sess_idx': 'sess_idx', 'data_key': '"""labels_sc"""'}), "(hparams2, sess_ids, sess_idx=sess_idx, data_key=\n 'labels_sc')\n", (3941, 4007), False, 'from behavenet.data.utils import load_labels_like_latents\n'), ((15615, 15661), 'numpy.linspace', 'np.linspace', (['mins_sc[d]', 'maxes_sc[d]', 'n_frames'], {}), '(mins_sc[d], maxes_sc[d], n_frames)\n', (15626, 15661), True, 'import numpy as np\n'), ((16116, 16134), 'numpy.copy', 'np.copy', (['latents_0'], {}), '(latents_0)\n', (16123, 16134), True, 'import numpy as np\n'), ((19141, 19163), 'numpy.copy', 'np.copy', (['im_tmp[0, ch]'], {}), '(im_tmp[0, ch])\n', (19148, 19163), True, 'import numpy as np\n'), ((23677, 23723), 'numpy.linspace', 'np.linspace', (['mins_sc[d]', 'maxes_sc[d]', 'n_frames'], {}), '(mins_sc[d], maxes_sc[d], n_frames)\n', (23688, 23723), True, 'import numpy as np\n'), ((24156, 24174), 'numpy.copy', 'np.copy', (['latents_0'], {}), '(latents_0)\n', (24163, 24174), True, 'import numpy as np\n'), ((26991, 27013), 'numpy.copy', 'np.copy', (['im_tmp[0, ch]'], {}), '(im_tmp[0, ch])\n', (26998, 27013), True, 'import numpy as np\n'), ((33365, 33382), 'numpy.hstack', 'np.hstack', (['[x, y]'], {}), '([x, y])\n', (33374, 33382), True, 'import numpy as np\n'), ((57956, 57981), 'os.path.exists', 'os.path.exists', (['save_file'], {}), '(save_file)\n', (57970, 57981), False, 'import os\n'), ((58015, 58040), 'os.path.exists', 'os.path.exists', (['save_file'], {}), '(save_file)\n', (58029, 58040), False, 'import os\n'), ((60820, 60855), 'behavenet.fitting.utils.get_lab_example', 'get_lab_example', (['hparams', 'lab', 'expt'], {}), '(hparams, lab, expt)\n', (60835, 60855), False, 'from behavenet.fitting.utils import get_lab_example\n'), ((60995, 61019), 'behavenet.fitting.utils.get_session_dir', 'get_session_dir', (['hparams'], {}), '(hparams)\n', (61010, 61019), False, 'from behavenet.fitting.utils import get_session_dir\n'), ((61058, 61079), 'behavenet.fitting.utils.get_expt_dir', 'get_expt_dir', (['hparams'], {}), '(hparams)\n', (61070, 61079), False, 'from behavenet.fitting.utils import get_expt_dir\n'), ((61109, 61155), 'behavenet.fitting.utils.experiment_exists', 'experiment_exists', (['hparams'], {'which_version': '(True)'}), '(hparams, which_version=True)\n', (61126, 61155), False, 'from behavenet.fitting.utils import experiment_exists\n'), ((61747, 61824), 'behavenet.fitting.utils.get_best_model_and_data', 'get_best_model_and_data', (['hparams'], {'Model': 'None', 'load_data': '(True)', 'version': 'version'}), '(hparams, Model=None, load_data=True, version=version)\n', (61770, 61824), False, 'from behavenet.fitting.utils import get_best_model_and_data\n'), ((63538, 63573), 'behavenet.fitting.utils.get_lab_example', 'get_lab_example', (['hparams', 'lab', 'expt'], {}), '(hparams, lab, expt)\n', (63553, 63573), False, 'from behavenet.fitting.utils import get_lab_example\n'), ((63713, 63737), 'behavenet.fitting.utils.get_session_dir', 'get_session_dir', (['hparams'], {}), '(hparams)\n', (63728, 63737), False, 'from behavenet.fitting.utils import get_session_dir\n'), ((63776, 63797), 'behavenet.fitting.utils.get_expt_dir', 'get_expt_dir', (['hparams'], {}), '(hparams)\n', (63788, 63797), False, 'from behavenet.fitting.utils import get_expt_dir\n'), ((63827, 63873), 'behavenet.fitting.utils.experiment_exists', 'experiment_exists', (['hparams'], {'which_version': '(True)'}), '(hparams, which_version=True)\n', (63844, 63873), False, 'from behavenet.fitting.utils import experiment_exists\n'), ((64449, 64526), 'behavenet.fitting.utils.get_best_model_and_data', 'get_best_model_and_data', (['hparams'], {'Model': 'None', 'load_data': '(True)', 'version': 'version'}), '(hparams, Model=None, load_data=True, version=version)\n', (64472, 64526), False, 'from behavenet.fitting.utils import get_best_model_and_data\n'), ((65019, 65049), 'numpy.concatenate', 'np.concatenate', (['[A, B]'], {'axis': '(0)'}), '([A, B], axis=0)\n', (65033, 65049), True, 'import numpy as np\n'), ((65076, 65093), 'numpy.matmul', 'np.matmul', (['C', 'C.T'], {}), '(C, C.T)\n', (65085, 65093), True, 'import numpy as np\n'), ((65218, 65262), 'behavenet.plotting.load_latents', 'load_latents', (['hparams', 'version'], {'dtype': '"""test"""'}), "(hparams, version, dtype='test')\n", (65230, 65262), False, 'from behavenet.plotting import load_latents\n'), ((73860, 73880), 'numpy.min', 'np.min', (['beta_weights'], {}), '(beta_weights)\n', (73866, 73880), True, 'import numpy as np\n'), ((73882, 73903), 'numpy.min', 'np.min', (['gamma_weights'], {}), '(gamma_weights)\n', (73888, 73903), True, 'import numpy as np\n'), ((74461, 74481), 'numpy.min', 'np.min', (['beta_weights'], {}), '(beta_weights)\n', (74467, 74481), True, 'import numpy as np\n'), ((74483, 74504), 'numpy.max', 'np.max', (['gamma_weights'], {}), '(gamma_weights)\n', (74489, 74504), True, 'import numpy as np\n'), ((96531, 96550), 'numpy.arange', 'np.arange', (['n_labels'], {}), '(n_labels)\n', (96540, 96550), True, 'import numpy as np\n'), ((96573, 96607), 'numpy.arange', 'np.arange', (["hparams['n_ae_latents']"], {}), "(hparams['n_ae_latents'])\n", (96582, 96607), True, 'import numpy as np\n'), ((97026, 97069), 'numpy.array', 'np.array', (['([latents_np[b][batch_idx, :]] * 3)'], {}), '([latents_np[b][batch_idx, :]] * 3)\n', (97034, 97069), True, 'import numpy as np\n'), ((11436, 11489), 'h5py.File', 'h5py.File', (['hdf5_file', '"""r"""'], {'libver': '"""latest"""', 'swmr': '(True)'}), "(hdf5_file, 'r', libver='latest', swmr=True)\n", (11445, 11489), False, 'import h5py\n'), ((20011, 20121), 'behavenet.plotting.get_crop', 'get_crop', (['im_tmp[0, 0]', "crop_kwargs['y_0']", "crop_kwargs['y_ext']", "crop_kwargs['x_0']", "crop_kwargs['x_ext']"], {}), "(im_tmp[0, 0], crop_kwargs['y_0'], crop_kwargs['y_ext'],\n crop_kwargs['x_0'], crop_kwargs['x_ext'])\n", (20019, 20121), False, 'from behavenet.plotting import get_crop\n'), ((27861, 27971), 'behavenet.plotting.get_crop', 'get_crop', (['im_tmp[0, 0]', "crop_kwargs['y_0']", "crop_kwargs['y_ext']", "crop_kwargs['x_0']", "crop_kwargs['x_ext']"], {}), "(im_tmp[0, 0], crop_kwargs['y_0'], crop_kwargs['y_ext'],\n crop_kwargs['x_0'], crop_kwargs['x_ext'])\n", (27869, 27971), False, 'from behavenet.plotting import get_crop\n'), ((31721, 31800), 'behavenet.fitting.eval.get_reconstruction', 'get_reconstruction', (['model', 'vec'], {'apply_inverse_transform': 'apply_inverse_transform'}), '(model, vec, apply_inverse_transform=apply_inverse_transform)\n', (31739, 31800), False, 'from behavenet.fitting.eval import get_reconstruction\n'), ((32591, 32702), 'behavenet.plotting.get_crop', 'get_crop', (['im_tmp[0, ch]', "crop_kwargs['y_0']", "crop_kwargs['y_ext']", "crop_kwargs['x_0']", "crop_kwargs['x_ext']"], {}), "(im_tmp[0, ch], crop_kwargs['y_0'], crop_kwargs['y_ext'],\n crop_kwargs['x_0'], crop_kwargs['x_ext'])\n", (32599, 32702), False, 'from behavenet.plotting import get_crop\n'), ((53106, 53129), 'pandas.isna', 'pd.isna', (['metrics_df.val'], {}), '(metrics_df.val)\n', (53113, 53129), True, 'import pandas as pd\n'), ((58734, 58749), 'numpy.ones_like', 'np.ones_like', (['y'], {}), '(y)\n', (58746, 58749), True, 'import numpy as np\n'), ((61443, 61529), 'behavenet.plotting.load_metrics_csv_as_df', 'load_metrics_csv_as_df', (['hparams', 'lab', 'expt', 'metrics_list'], {'version': 'None', 'test': '(True)'}), '(hparams, lab, expt, metrics_list, version=None, test\n =True)\n', (61465, 61529), False, 'from behavenet.plotting import load_metrics_csv_as_df\n'), ((64164, 64250), 'behavenet.plotting.load_metrics_csv_as_df', 'load_metrics_csv_as_df', (['hparams', 'lab', 'expt', 'metrics_list'], {'version': 'None', 'test': '(True)'}), '(hparams, lab, expt, metrics_list, version=None, test\n =True)\n', (64186, 64250), False, 'from behavenet.plotting import load_metrics_csv_as_df\n'), ((78960, 78999), 'numpy.zeros', 'np.zeros', (['(n_blank, labels_og.shape[1])'], {}), '((n_blank, labels_og.shape[1]))\n', (78968, 78999), True, 'import numpy as np\n'), ((79045, 79086), 'numpy.zeros', 'np.zeros', (['(n_blank, labels_pred.shape[1])'], {}), '((n_blank, labels_pred.shape[1]))\n', (79053, 79086), True, 'import numpy as np\n'), ((97150, 97192), 'numpy.array', 'np.array', (['([labels_np[b][batch_idx, :]] * 3)'], {}), '([labels_np[b][batch_idx, :]] * 3)\n', (97158, 97192), True, 'import numpy as np\n'), ((18283, 18301), 'numpy.copy', 'np.copy', (['latents_0'], {}), '(latents_0)\n', (18290, 18301), True, 'import numpy as np\n'), ((18496, 18560), 'behavenet.fitting.eval.get_reconstruction', 'get_reconstruction', (['model', 'latents'], {'apply_inverse_transform': '(True)'}), '(model, latents, apply_inverse_transform=True)\n', (18514, 18560), False, 'from behavenet.fitting.eval import get_reconstruction\n'), ((18651, 18668), 'numpy.copy', 'np.copy', (['labels_0'], {}), '(labels_0)\n', (18658, 18668), True, 'import numpy as np\n'), ((26252, 26270), 'numpy.copy', 'np.copy', (['latents_0'], {}), '(latents_0)\n', (26259, 26270), True, 'import numpy as np\n'), ((26405, 26469), 'behavenet.fitting.eval.get_reconstruction', 'get_reconstruction', (['model', 'latents'], {'apply_inverse_transform': '(True)'}), '(model, latents, apply_inverse_transform=True)\n', (26423, 26469), False, 'from behavenet.fitting.eval import get_reconstruction\n'), ((26560, 26577), 'numpy.copy', 'np.copy', (['labels_0'], {}), '(labels_0)\n', (26567, 26577), True, 'import numpy as np\n'), ((32035, 32095), 'behavenet.fitting.eval.get_reconstruction', 'get_reconstruction', (['model', 'vec'], {'apply_inverse_transform': '(True)'}), '(model, vec, apply_inverse_transform=True)\n', (32053, 32095), False, 'from behavenet.fitting.eval import get_reconstruction\n'), ((32855, 32877), 'numpy.copy', 'np.copy', (['im_tmp[0, ch]'], {}), '(im_tmp[0, ch])\n', (32862, 32877), True, 'import numpy as np\n'), ((59047, 59104), 'sklearn.metrics.r2_score', 'r2_score', (['y_true', 'y_pred'], {'multioutput': '"""variance_weighted"""'}), "(y_true, y_pred, multioutput='variance_weighted')\n", (59055, 59104), False, 'from sklearn.metrics import r2_score\n'), ((65727, 65765), 'numpy.ceil', 'np.ceil', (['(latents.shape[0] / batch_size)'], {}), '(latents.shape[0] / batch_size)\n', (65734, 65765), True, 'import numpy as np\n'), ((98197, 98221), 'numpy.zeros', 'np.zeros', (['(y_pix, x_pix)'], {}), '((y_pix, x_pix))\n', (98205, 98221), True, 'import numpy as np\n'), ((100250, 100274), 'numpy.zeros', 'np.zeros', (['(y_pix, x_pix)'], {}), '((y_pix, x_pix))\n', (100258, 100274), True, 'import numpy as np\n'), ((19507, 19543), 'numpy.copy', 'np.copy', (['labels_sc[0, input_idxs[0]]'], {}), '(labels_sc[0, input_idxs[0]])\n', (19514, 19543), True, 'import numpy as np\n'), ((19577, 19613), 'numpy.copy', 'np.copy', (['labels_sc[0, input_idxs[1]]'], {}), '(labels_sc[0, input_idxs[1]])\n', (19584, 19613), True, 'import numpy as np\n'), ((27357, 27393), 'numpy.copy', 'np.copy', (['labels_sc[0, input_idxs[0]]'], {}), '(labels_sc[0, input_idxs[0]])\n', (27364, 27393), True, 'import numpy as np\n'), ((27427, 27463), 'numpy.copy', 'np.copy', (['labels_sc[0, input_idxs[1]]'], {}), '(labels_sc[0, input_idxs[1]])\n', (27434, 27463), True, 'import numpy as np\n'), ((32945, 32962), 'behavenet.plotting.concat', 'concat', (['im_tmp[0]'], {}), '(im_tmp[0])\n', (32951, 32962), False, 'from behavenet.plotting import concat\n'), ((51570, 51605), 'behavenet.fitting.utils.get_lab_example', 'get_lab_example', (['hparams', 'lab', 'expt'], {}), '(hparams, lab, expt)\n', (51585, 51605), False, 'from behavenet.fitting.utils import get_lab_example\n'), ((51781, 51805), 'behavenet.fitting.utils.get_session_dir', 'get_session_dir', (['hparams'], {}), '(hparams)\n', (51796, 51805), False, 'from behavenet.fitting.utils import get_session_dir\n'), ((51856, 51877), 'behavenet.fitting.utils.get_expt_dir', 'get_expt_dir', (['hparams'], {}), '(hparams)\n', (51868, 51877), False, 'from behavenet.fitting.utils import get_expt_dir\n'), ((51919, 51965), 'behavenet.fitting.utils.experiment_exists', 'experiment_exists', (['hparams'], {'which_version': '(True)'}), '(hparams, which_version=True)\n', (51936, 51965), False, 'from behavenet.fitting.utils import experiment_exists\n'), ((59143, 59169), 'numpy.square', 'np.square', (['(y_true - y_pred)'], {}), '(y_true - y_pred)\n', (59152, 59169), True, 'import numpy as np\n'), ((16582, 16607), 'torch.from_numpy', 'torch.from_numpy', (['latents'], {}), '(latents)\n', (16598, 16607), False, 'import torch\n'), ((17481, 17506), 'torch.from_numpy', 'torch.from_numpy', (['latents'], {}), '(latents)\n', (17497, 17506), False, 'import torch\n'), ((19755, 19792), 'numpy.copy', 'np.copy', (['labels_sc[0, marker_idxs[0]]'], {}), '(labels_sc[0, marker_idxs[0]])\n', (19762, 19792), True, 'import numpy as np\n'), ((19826, 19863), 'numpy.copy', 'np.copy', (['labels_sc[0, marker_idxs[1]]'], {}), '(labels_sc[0, marker_idxs[1]])\n', (19833, 19863), True, 'import numpy as np\n'), ((24566, 24591), 'torch.from_numpy', 'torch.from_numpy', (['latents'], {}), '(latents)\n', (24582, 24591), False, 'import torch\n'), ((25465, 25490), 'torch.from_numpy', 'torch.from_numpy', (['latents'], {}), '(latents)\n', (25481, 25490), False, 'import torch\n'), ((27605, 27642), 'numpy.copy', 'np.copy', (['labels_sc[0, marker_idxs[0]]'], {}), '(labels_sc[0, marker_idxs[0]])\n', (27612, 27642), True, 'import numpy as np\n'), ((27676, 27713), 'numpy.copy', 'np.copy', (['labels_sc[0, marker_idxs[1]]'], {}), '(labels_sc[0, marker_idxs[1]])\n', (27683, 27713), True, 'import numpy as np\n'), ((52211, 52281), 'behavenet.plotting.load_metrics_csv_as_df', 'load_metrics_csv_as_df', (['hparams', 'lab', 'expt', 'metrics_list'], {'version': 'None'}), '(hparams, lab, expt, metrics_list, version=None)\n', (52233, 52281), False, 'from behavenet.plotting import load_metrics_csv_as_df\n'), ((65558, 65576), 'numpy.abs', 'np.abs', (['corr[0, 1]'], {}), '(corr[0, 1])\n', (65564, 65576), True, 'import numpy as np\n'), ((17225, 17251), 'torch.from_numpy', 'torch.from_numpy', (['labels_0'], {}), '(labels_0)\n', (17241, 17251), False, 'import torch\n'), ((25209, 25235), 'torch.from_numpy', 'torch.from_numpy', (['labels_0'], {}), '(labels_0)\n', (25225, 25235), False, 'import torch\n'), ((66189, 66207), 'numpy.abs', 'np.abs', (['corr[0, 1]'], {}), '(corr[0, 1])\n', (66195, 66207), True, 'import numpy as np\n'), ((18974, 18998), 'torch.from_numpy', 'torch.from_numpy', (['labels'], {}), '(labels)\n', (18990, 18998), False, 'import torch\n'), ((26824, 26848), 'torch.from_numpy', 'torch.from_numpy', (['labels'], {}), '(labels)\n', (26840, 26848), False, 'import torch\n'), ((65363, 65379), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (65371, 65379), True, 'import numpy as np\n'), ((31606, 31632), 'torch.from_numpy', 'torch.from_numpy', (['labels_0'], {}), '(labels_0)\n', (31622, 31632), False, 'import torch\n'), ((65978, 65994), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (65986, 65994), True, 'import numpy as np\n'), ((32272, 32293), 'torch.from_numpy', 'torch.from_numpy', (['vec'], {}), '(vec)\n', (32288, 32293), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 30 09:19:37 2021
@author: <NAME>
"""
import numpy as np
import matplotlib.pyplot as plt
import os
uspsdatatrain = os.path.abspath("../Datasets/USPS/USPS_train.txt")
uspsdatatest = os.path.abspath("../Datasets/USPS/USPS_test.txt")
def load_usps(mode="train"):
"""
Parameters
----------
mode : TYPE, optional
DESCRIPTION. The default is "train".
Returns
-------
TYPE
DESCRIPTION.
"""
fn = uspsdatatrain if mode == "train" else uspsdatatest
with open(fn,"r") as f:
f.readline()
data = [[float(x) for x in l.split()] for l in f if len(l.split())>2]
tmp=np.array(data)
return tmp[:,1:],tmp[:,0].astype(int)
def get_usps(l,datax,datay):
"""
Parameters
----------
l : TYPE
DESCRIPTION.
datax : TYPE
DESCRIPTION.
datay : TYPE
DESCRIPTION.
Returns
-------
TYPE
DESCRIPTION.
TYPE
DESCRIPTION.
"""
if type(l)!=list:
resx = datax[datay==l,:]
resy = datay[datay==l]
return resx,resy
tmp = list(zip(*[get_usps(i,datax,datay) for i in l]))
tmpx,tmpy = np.vstack(tmp[0]),np.hstack(tmp[1])
return tmpx,tmpy
def show_usps(data):
plt.imshow(data.reshape((16,16)),interpolation="nearest",cmap="gray") | [
"os.path.abspath",
"numpy.array",
"numpy.vstack",
"numpy.hstack"
] | [((165, 215), 'os.path.abspath', 'os.path.abspath', (['"""../Datasets/USPS/USPS_train.txt"""'], {}), "('../Datasets/USPS/USPS_train.txt')\n", (180, 215), False, 'import os\n'), ((231, 280), 'os.path.abspath', 'os.path.abspath', (['"""../Datasets/USPS/USPS_test.txt"""'], {}), "('../Datasets/USPS/USPS_test.txt')\n", (246, 280), False, 'import os\n'), ((706, 720), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (714, 720), True, 'import numpy as np\n'), ((1229, 1246), 'numpy.vstack', 'np.vstack', (['tmp[0]'], {}), '(tmp[0])\n', (1238, 1246), True, 'import numpy as np\n'), ((1247, 1264), 'numpy.hstack', 'np.hstack', (['tmp[1]'], {}), '(tmp[1])\n', (1256, 1264), True, 'import numpy as np\n')] |
# from nipype import config
# config.enable_debug_mode()
# Importing necessary packages
import os
import sys
import os.path as op
import glob
import json
import nipype
import matplotlib.pyplot as pl
import seaborn as sn
import pandas as pd
import numpy as np
from IPython import embed as shell
#
# run as in:
#
# for s in {001..049}
# do
# echo sub-$s
# python postprocessing.py sub-$s rl test &
# done
from pearl.surf.surf_draw import av_surf_across_sjs
from pearl.utils.utils import natural_sort
import pearl.rl as rl
import pearl.stop as stop
# the subject id and experiment vars are commandline arguments to this script.
sub_id = 'all'
experiment = str(sys.argv[1])
phase = str(sys.argv[2])
# from pearl.parameters import *
# execfile('pearl/parameters.py')
exec(open("pearl/parameters.py").read())
try:
os.makedirs(os.path.join(opd, 'surf'))
os.makedirs(opd)
except:
pass
# shell()
sjs_info = pd.read_csv(os.path.join(raw_data_dir, 'participants.tsv'), delimiter = '\t')
if (experiment == 'stop') | ((experiment == 'rl') and (phase == 'test')):
which_sjs = (sjs_info['Incl_ex'] == 'ok')
new_good_names = np.array(sjs_info['participant_id'][which_sjs])
good_sjs_info = sjs_info[which_sjs]
elif (experiment == 'rl') and (phase == 'learn'):
which_sjs = (sjs_info['Incl_ex'] == 'ok') + (sjs_info['Incl_ex'] == 'stop')
new_good_names = np.array(sjs_info['participant_id'][which_sjs])
good_sjs_info = sjs_info[which_sjs]
# elif (experiment == 'rl') and (phase == 'learn'):
# which_sjs = (sjs_info['good_bad'] == 'good')
# new_good_names = np.array(sjs_info['participant_id'][which_sjs])
# good_sjs_info = sjs_info[which_sjs]
print(len(new_good_names))
print(new_good_names)
if phase == 'test' and experiment == 'rl':
sj_covariates_dicts = [
{
'ww': ['SSRT'],
'll': ['SSRT'],
'wl_u': ['SSRT'],
},
{
'ww': ['SSRT', 'ac_ww'],
'll': ['SSRT', 'ac_ll'],
'wl_u': ['SSRT', 'ac_wlu'],
# 'wl_l': ['SSRT', 'ac_wll'],
},
{
'ww': ['SSRT', 'Beta', 'ac_ww'],
'll': ['SSRT', 'Beta', 'ac_ll'],
'wl_u': ['SSRT', 'Beta', 'ac_wlu'],
# 'wl_l': ['SSRT', 'Beta', 'ac_wll'],
},
{
'ww': ['SSRT', 'Beta'],
'll': ['SSRT', 'Beta'],
'wl_u': ['SSRT', 'Beta'],
# 'wl_l': ['SSRT', 'Beta', 'ac_wll'],
},
{
'ww': ['SSRT', 'Beta','medRT_ww'],
'll': ['SSRT', 'Beta','medRT_ll'],
'wl_u': ['SSRT', 'Beta','medRT_wlu'],
# 'wl_l': ['SSRT', 'Beta', 'ac_wll'],
}
]
for roi in analysis_info['rl_test_rois']: # , 'temporal_middle'
which_signal_selection = 'projection'
# a final plot, first select which covariates to use across subjects
sj_cov_nr = 3
sj_covariates = sj_covariates_dicts[sj_cov_nr]
roi = 'maxSTN25exc_flirt'
fn_suffix = 'publication_%i'%sj_cov_nr
which_signal_selection = 'projection'
all_deco_files = [os.path.join(os.path.split(opd)[0], ngn, 'roi', phase, roi + '_deco_test_%s.tsv'%which_signal_selection) for ngn in new_good_names]
all_deco_files = [af for af in all_deco_files if os.path.isfile(af)]
rl.plot.plot_deco_results_for_publication(all_deco_files,
good_sjs_info, roi, analysis_info['deconvolution_interval'],
output_filename = op.join(opd, roi + '_deco_%s_%s.pdf'%(fn_suffix, 'SSRT')),
sj_covariates = sj_covariates,
rl_test_FIR_amplitude_range = analysis_info['rl_test_FIR_amplitude_range'],
rl_test_FIR_pe_range = analysis_info['rl_test_FIR_pe_range'],
second_plot_covariate = 'SSRT')
rl.plot.plot_deco_results_for_publication(all_deco_files,
good_sjs_info, roi, analysis_info['deconvolution_interval'],
output_filename = op.join(opd, roi + '_deco_%s_%s.pdf'%(fn_suffix, 'Beta')),
sj_covariates = sj_covariates,
rl_test_FIR_amplitude_range = analysis_info['rl_test_FIR_amplitude_range'],
rl_test_FIR_pe_range = analysis_info['rl_test_FIR_pe_range'],
second_plot_covariate = 'Beta')
if experiment == 'stop':
sj_covariates_dicts = [
{
'correct': ['SSRT'],
'succesful_stop': ['SSRT'],
'Failed_stop': ['SSRT'],
},
{
'correct': ['SSRT', 'Beta'],
'succesful_stop': ['SSRT', 'Beta'],
'Failed_stop': ['SSRT', 'Beta'],
},
{
'correct': ['SSRT', 'Beta', 'ac_wsuccesful_stop'],
'succesful_stop': ['SSRT', 'Beta', 'ac_wsuccesful_stop'],
'Failed_stop': ['SSRT', 'Beta', 'ac_wsuccesful_stop'],
}
]
# a final plot
sj_cov_nr = 1
sj_covariates = sj_covariates_dicts[sj_cov_nr]
roi = 'maxSTN25exc_flirt'
fn_suffix = 'publication_%i'%1
all_deco_files = [os.path.join(os.path.split(opd)[0], ngn, 'roi', phase, roi + '_deco_stop.tsv') for ngn in new_good_names]
all_deco_files = [af for af in all_deco_files if os.path.isfile(af)]
stop.plot.plot_deco_results_for_publication(all_deco_files,
good_sjs_info, roi, analysis_info['deconvolution_interval'],
output_filename = op.join(opd, roi + '_deco_%s_%s.pdf'%(fn_suffix, 'SSRT')),
sj_covariates = sj_covariates,
stop_FIR_amplitude_range = analysis_info['stop_FIR_amplitude_range'],
stop_FIR_pe_range = analysis_info['stop_FIR_pe_range'],
second_plot_covariate = 'SSRT')
| [
"os.makedirs",
"os.path.join",
"os.path.split",
"os.path.isfile",
"numpy.array"
] | [((871, 887), 'os.makedirs', 'os.makedirs', (['opd'], {}), '(opd)\n', (882, 887), False, 'import os\n'), ((940, 986), 'os.path.join', 'os.path.join', (['raw_data_dir', '"""participants.tsv"""'], {}), "(raw_data_dir, 'participants.tsv')\n", (952, 986), False, 'import os\n'), ((1147, 1194), 'numpy.array', 'np.array', (["sjs_info['participant_id'][which_sjs]"], {}), "(sjs_info['participant_id'][which_sjs])\n", (1155, 1194), True, 'import numpy as np\n'), ((840, 865), 'os.path.join', 'os.path.join', (['opd', '"""surf"""'], {}), "(opd, 'surf')\n", (852, 865), False, 'import os\n'), ((1387, 1434), 'numpy.array', 'np.array', (["sjs_info['participant_id'][which_sjs]"], {}), "(sjs_info['participant_id'][which_sjs])\n", (1395, 1434), True, 'import numpy as np\n'), ((3327, 3345), 'os.path.isfile', 'os.path.isfile', (['af'], {}), '(af)\n', (3341, 3345), False, 'import os\n'), ((3528, 3587), 'os.path.join', 'op.join', (['opd', "(roi + '_deco_%s_%s.pdf' % (fn_suffix, 'SSRT'))"], {}), "(opd, roi + '_deco_%s_%s.pdf' % (fn_suffix, 'SSRT'))\n", (3535, 3587), True, 'import os.path as op\n'), ((4050, 4109), 'os.path.join', 'op.join', (['opd', "(roi + '_deco_%s_%s.pdf' % (fn_suffix, 'Beta'))"], {}), "(opd, roi + '_deco_%s_%s.pdf' % (fn_suffix, 'Beta'))\n", (4057, 4109), True, 'import os.path as op\n'), ((5256, 5274), 'os.path.isfile', 'os.path.isfile', (['af'], {}), '(af)\n', (5270, 5274), False, 'import os\n'), ((5460, 5519), 'os.path.join', 'op.join', (['opd', "(roi + '_deco_%s_%s.pdf' % (fn_suffix, 'SSRT'))"], {}), "(opd, roi + '_deco_%s_%s.pdf' % (fn_suffix, 'SSRT'))\n", (5467, 5519), True, 'import os.path as op\n'), ((3155, 3173), 'os.path.split', 'os.path.split', (['opd'], {}), '(opd)\n', (3168, 3173), False, 'import os\n'), ((5110, 5128), 'os.path.split', 'os.path.split', (['opd'], {}), '(opd)\n', (5123, 5128), False, 'import os\n')] |
import numpy as np
import tensorflow as tf
import Nn
from .policy import Policy
class TD3(Policy):
def __init__(self,
s_dim,
visual_sources,
visual_resolution,
a_dim_or_list,
action_type,
gamma=0.99,
max_episode=50000,
batch_size=128,
buffer_size=10000,
base_dir=None,
ployak=0.995,
lr=5.0e-4,
logger2file=False,
out_graph=False):
assert action_type == 'continuous', 'td3 only support continuous action space'
super().__init__(
s_dim=s_dim,
visual_sources=visual_sources,
visual_resolution=visual_resolution,
a_dim_or_list=a_dim_or_list,
action_type=action_type,
gamma=gamma,
max_episode=max_episode,
base_dir=base_dir,
policy_mode='OFF',
batch_size=batch_size,
buffer_size=buffer_size)
self.ployak = ployak
with self.graph.as_default():
self.lr = tf.train.polynomial_decay(lr, self.episode, self.max_episode, 1e-10, power=1.0)
# self.action_noise = Nn.NormalActionNoise(mu=np.zeros(self.a_counts), sigma=1 * np.ones(self.a_counts))
self.action_noise = Nn.OrnsteinUhlenbeckActionNoise(mu=np.zeros(self.a_counts), sigma=0.2 * np.ones(self.a_counts))
self.mu = Nn.actor_dpg('actor_net', self.pl_s, self.pl_visual_s, self.a_counts)
self.action = tf.clip_by_value(self.mu + self.action_noise(), -1, 1)
tf.identity(self.mu, 'action')
self.target_mu = Nn.actor_dpg('actor_target_net', self.pl_s_, self.pl_visual_s_, self.a_counts)
self.action_target = tf.clip_by_value(self.target_mu + self.action_noise(), -1, 1)
self.q1 = Nn.critic_q_one('q1_net', self.pl_s, self.pl_visual_s, self.pl_a)
self.q1_actor = Nn.critic_q_one('q1_net', self.pl_s, self.pl_visual_s, self.mu)
self.q1_target = Nn.critic_q_one('q1_target_net', self.pl_s_, self.pl_visual_s_, self.action_target)
self.q2 = Nn.critic_q_one('q2_net', self.pl_s, self.pl_visual_s, self.pl_a)
self.q2_target = Nn.critic_q_one('q2_target_net', self.pl_s_, self.pl_visual_s_, self.action_target)
self.q_target = tf.minimum(self.q1_target, self.q2_target)
self.dc_r = tf.stop_gradient(self.pl_r + self.gamma * self.q_target * (1 - self.pl_done))
self.q1_loss = tf.reduce_mean(tf.squared_difference(self.q1, self.dc_r))
self.q2_loss = tf.reduce_mean(tf.squared_difference(self.q2, self.dc_r))
self.critic_loss = 0.5 * (self.q1_loss + self.q2_loss)
self.actor_loss = -tf.reduce_mean(self.q1_actor)
self.q1_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='q1_net')
self.q1_target_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='q1_target_net')
self.q2_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='q2_net')
self.q2_target_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='q2_target_net')
self.actor_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='actor_net')
self.actor_target_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='actor_target_net')
self.assign_init = self.update_target_net_weights(
self.q1_target_vars + self.q2_target_vars + self.actor_target_vars,
self.q1_vars + self.q2_vars + self.actor_vars
)
optimizer_critic = tf.train.AdamOptimizer(self.lr)
optimizer_actor = tf.train.AdamOptimizer(self.lr)
# self.train_q1 = optimizer.minimize(self.q1_loss, var_list=self.q1_vars)
# self.train_q2 = optimizer.minimize(self.q2_loss, var_list=self.q2_vars)
self.train_value = optimizer_critic.minimize(self.critic_loss, var_list=self.q1_vars + self.q2_vars)
with tf.control_dependencies([self.train_value]):
self.train_actor = optimizer_actor.minimize(self.actor_loss, var_list=self.actor_vars, global_step=self.global_step)
with tf.control_dependencies([self.train_actor]):
self.assign_target = self.update_target_net_weights(
self.q1_target_vars + self.q2_target_vars + self.actor_target_vars,
self.q1_vars + self.q2_vars + self.actor_vars,
self.ployak
)
# self.assign_target = self.update_target_net_weights(
# self.q1_target_vars+self.q2_target_vars+self.actor_target_vars,
# self.q1_vars+self.q2_vars+self.actor_vars,
# 1-1/(self.episode+1)
# )
self.train_sequence = [self.train_value, self.train_actor, self.assign_target]
tf.summary.scalar('LOSS/actor_loss', tf.reduce_mean(self.actor_loss))
tf.summary.scalar('LOSS/critic_loss', tf.reduce_mean(self.critic_loss))
tf.summary.scalar('LEARNING_RATE/lr', tf.reduce_mean(self.lr))
self.summaries = tf.summary.merge_all()
self.generate_recorder(
logger2file=logger2file,
graph=self.graph if out_graph else None
)
self.recorder.logger.info('''
xxxxxxxxx xxxxxxx xxxxx
xx x xx x xxx xx xx
xx x xx x xx xx xx
x x xx xxx
x x xxx xxxx
x x xx xxx
x x xx xx xx
x x xxx xx xxx
xxxxx xxxxxxx xxxxx
''')
def choose_action(self, s, visual_s):
return self.sess.run(self.action, feed_dict={
self.pl_visual_s: visual_s,
self.pl_s: s
})
def choose_inference_action(self, s, visual_s):
return self.sess.run(self.mu, feed_dict={
self.pl_visual_s: visual_s,
self.pl_s: s
})
def store_data(self, s, visual_s, a, r, s_, visual_s_, done):
self.off_store(s, visual_s, a, r[:, np.newaxis], s_, visual_s_, done[:, np.newaxis])
def learn(self, **kwargs):
episode = kwargs['episode']
for i in range(kwargs['step']):
s, visual_s, a, r, s_, visual_s_, done = self.data.sample()
self.sess.run(self.train_value, feed_dict={
self.pl_visual_s: visual_s,
self.pl_s: s,
self.pl_a: a,
self.pl_r: r,
self.pl_visual_s_: visual_s_,
self.pl_s_: s_,
self.pl_done: done,
self.episode: episode
})
summaries, _ = self.sess.run([self.summaries, self.train_sequence], feed_dict={
self.pl_visual_s: visual_s,
self.pl_s: s,
self.pl_a: a,
self.pl_r: r,
self.pl_visual_s_: visual_s_,
self.pl_s_: s_,
self.pl_done: done,
self.episode: episode
})
self.recorder.writer.add_summary(summaries, self.sess.run(self.global_step))
| [
"Nn.actor_dpg",
"tensorflow.summary.merge_all",
"numpy.ones",
"tensorflow.squared_difference",
"tensorflow.get_collection",
"Nn.critic_q_one",
"tensorflow.stop_gradient",
"numpy.zeros",
"tensorflow.control_dependencies",
"tensorflow.reduce_mean",
"tensorflow.identity",
"tensorflow.train.AdamOp... | [((1169, 1248), 'tensorflow.train.polynomial_decay', 'tf.train.polynomial_decay', (['lr', 'self.episode', 'self.max_episode', '(1e-10)'], {'power': '(1.0)'}), '(lr, self.episode, self.max_episode, 1e-10, power=1.0)\n', (1194, 1248), True, 'import tensorflow as tf\n'), ((1517, 1586), 'Nn.actor_dpg', 'Nn.actor_dpg', (['"""actor_net"""', 'self.pl_s', 'self.pl_visual_s', 'self.a_counts'], {}), "('actor_net', self.pl_s, self.pl_visual_s, self.a_counts)\n", (1529, 1586), False, 'import Nn\n'), ((1680, 1710), 'tensorflow.identity', 'tf.identity', (['self.mu', '"""action"""'], {}), "(self.mu, 'action')\n", (1691, 1710), True, 'import tensorflow as tf\n'), ((1740, 1818), 'Nn.actor_dpg', 'Nn.actor_dpg', (['"""actor_target_net"""', 'self.pl_s_', 'self.pl_visual_s_', 'self.a_counts'], {}), "('actor_target_net', self.pl_s_, self.pl_visual_s_, self.a_counts)\n", (1752, 1818), False, 'import Nn\n'), ((1937, 2002), 'Nn.critic_q_one', 'Nn.critic_q_one', (['"""q1_net"""', 'self.pl_s', 'self.pl_visual_s', 'self.pl_a'], {}), "('q1_net', self.pl_s, self.pl_visual_s, self.pl_a)\n", (1952, 2002), False, 'import Nn\n'), ((2031, 2094), 'Nn.critic_q_one', 'Nn.critic_q_one', (['"""q1_net"""', 'self.pl_s', 'self.pl_visual_s', 'self.mu'], {}), "('q1_net', self.pl_s, self.pl_visual_s, self.mu)\n", (2046, 2094), False, 'import Nn\n'), ((2124, 2212), 'Nn.critic_q_one', 'Nn.critic_q_one', (['"""q1_target_net"""', 'self.pl_s_', 'self.pl_visual_s_', 'self.action_target'], {}), "('q1_target_net', self.pl_s_, self.pl_visual_s_, self.\n action_target)\n", (2139, 2212), False, 'import Nn\n'), ((2231, 2296), 'Nn.critic_q_one', 'Nn.critic_q_one', (['"""q2_net"""', 'self.pl_s', 'self.pl_visual_s', 'self.pl_a'], {}), "('q2_net', self.pl_s, self.pl_visual_s, self.pl_a)\n", (2246, 2296), False, 'import Nn\n'), ((2326, 2414), 'Nn.critic_q_one', 'Nn.critic_q_one', (['"""q2_target_net"""', 'self.pl_s_', 'self.pl_visual_s_', 'self.action_target'], {}), "('q2_target_net', self.pl_s_, self.pl_visual_s_, self.\n action_target)\n", (2341, 2414), False, 'import Nn\n'), ((2439, 2481), 'tensorflow.minimum', 'tf.minimum', (['self.q1_target', 'self.q2_target'], {}), '(self.q1_target, self.q2_target)\n', (2449, 2481), True, 'import tensorflow as tf\n'), ((2506, 2583), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['(self.pl_r + self.gamma * self.q_target * (1 - self.pl_done))'], {}), '(self.pl_r + self.gamma * self.q_target * (1 - self.pl_done))\n', (2522, 2583), True, 'import tensorflow as tf\n'), ((2911, 2978), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""q1_net"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='q1_net')\n", (2928, 2978), True, 'import tensorflow as tf\n'), ((3013, 3087), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""q1_target_net"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='q1_target_net')\n", (3030, 3087), True, 'import tensorflow as tf\n'), ((3115, 3182), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""q2_net"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='q2_net')\n", (3132, 3182), True, 'import tensorflow as tf\n'), ((3217, 3291), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""q2_target_net"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='q2_target_net')\n", (3234, 3291), True, 'import tensorflow as tf\n'), ((3322, 3392), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""actor_net"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='actor_net')\n", (3339, 3392), True, 'import tensorflow as tf\n'), ((3430, 3507), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""actor_target_net"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='actor_target_net')\n", (3447, 3507), True, 'import tensorflow as tf\n'), ((3763, 3794), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.lr'], {}), '(self.lr)\n', (3785, 3794), True, 'import tensorflow as tf\n'), ((3825, 3856), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.lr'], {}), '(self.lr)\n', (3847, 3856), True, 'import tensorflow as tf\n'), ((5364, 5386), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (5384, 5386), True, 'import tensorflow as tf\n'), ((2627, 2668), 'tensorflow.squared_difference', 'tf.squared_difference', (['self.q1', 'self.dc_r'], {}), '(self.q1, self.dc_r)\n', (2648, 2668), True, 'import tensorflow as tf\n'), ((2712, 2753), 'tensorflow.squared_difference', 'tf.squared_difference', (['self.q2', 'self.dc_r'], {}), '(self.q2, self.dc_r)\n', (2733, 2753), True, 'import tensorflow as tf\n'), ((2853, 2882), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.q1_actor'], {}), '(self.q1_actor)\n', (2867, 2882), True, 'import tensorflow as tf\n'), ((4159, 4202), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[self.train_value]'], {}), '([self.train_value])\n', (4182, 4202), True, 'import tensorflow as tf\n'), ((5143, 5174), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.actor_loss'], {}), '(self.actor_loss)\n', (5157, 5174), True, 'import tensorflow as tf\n'), ((5226, 5258), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.critic_loss'], {}), '(self.critic_loss)\n', (5240, 5258), True, 'import tensorflow as tf\n'), ((5310, 5333), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['self.lr'], {}), '(self.lr)\n', (5324, 5333), True, 'import tensorflow as tf\n'), ((1434, 1457), 'numpy.zeros', 'np.zeros', (['self.a_counts'], {}), '(self.a_counts)\n', (1442, 1457), True, 'import numpy as np\n'), ((4358, 4401), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['[self.train_actor]'], {}), '([self.train_actor])\n', (4381, 4401), True, 'import tensorflow as tf\n'), ((1471, 1493), 'numpy.ones', 'np.ones', (['self.a_counts'], {}), '(self.a_counts)\n', (1478, 1493), True, 'import numpy as np\n')] |
import unittest
import numpy as np
from disarm_gears.util import trend_1st_order, trend_2nd_order, trend_3rd_order
X = np.arange(6).reshape(3, 2)
class TessellationTests(unittest.TestCase):
def test_trend_1st_order(self):
Z = trend_1st_order(X)
self.assertIsInstance(Z, np.ndarray)
self.assertEqual(Z.shape[0], 3)
self.assertEqual(Z.shape[1], 3)
self.assertEqual(Z[2, 2], 20)
def test_trend_2nd_order(self):
Z = trend_2nd_order(X)
self.assertIsInstance(Z, np.ndarray)
self.assertEqual(Z.shape[0], 3)
self.assertEqual(Z.shape[1], 5)
self.assertEqual(Z[1, 0], 4)
self.assertEqual(Z[2, 1], 25)
self.assertEqual(Z[0, 2], 0)
self.assertEqual(Z[1, 3], 3)
self.assertEqual(Z[2, 4], 20)
def test_trend_3rd_order(self):
Z = trend_3rd_order(X)
self.assertIsInstance(Z, np.ndarray)
self.assertEqual(Z.shape[0], 3)
self.assertEqual(Z.shape[1], 9)
self.assertEqual(Z[1, 0], 8)
self.assertEqual(Z[2, 1], 125)
self.assertEqual(Z[1, 2], 4)
self.assertEqual(Z[2, 3], 25)
self.assertEqual(Z[0, 4], 0)
self.assertEqual(Z[1, 5], 3)
self.assertEqual(Z[1, 6], 12)
self.assertEqual(Z[1, 7], 18)
self.assertEqual(Z[2, 8], 20)
| [
"disarm_gears.util.trend_2nd_order",
"disarm_gears.util.trend_3rd_order",
"disarm_gears.util.trend_1st_order",
"numpy.arange"
] | [((120, 132), 'numpy.arange', 'np.arange', (['(6)'], {}), '(6)\n', (129, 132), True, 'import numpy as np\n'), ((241, 259), 'disarm_gears.util.trend_1st_order', 'trend_1st_order', (['X'], {}), '(X)\n', (256, 259), False, 'from disarm_gears.util import trend_1st_order, trend_2nd_order, trend_3rd_order\n'), ((472, 490), 'disarm_gears.util.trend_2nd_order', 'trend_2nd_order', (['X'], {}), '(X)\n', (487, 490), False, 'from disarm_gears.util import trend_1st_order, trend_2nd_order, trend_3rd_order\n'), ((852, 870), 'disarm_gears.util.trend_3rd_order', 'trend_3rd_order', (['X'], {}), '(X)\n', (867, 870), False, 'from disarm_gears.util import trend_1st_order, trend_2nd_order, trend_3rd_order\n')] |
import os
import json
import argparse
import numpy as np
import pickle as pkl
from tqdm import tqdm
import innvestigate
from DNN_training import get_damd_cnn
def explain_behavior(analyzer, data, labels, dims_to_explain, filenames, save_path):
"""
Creates explanation for each data instance
@param analyzer: Object of innvestigate analyzer class
@param data: List of lists representing sequences of glog calls as indices
@param labels: Labels corresponding to data
@param dims_to_explain: List of classes for which explanation shall be generated
@param filenames: Name of each sample that is used as identifier when saving results
@param save_path: Location where to store results
"""
no_behaviors = labels.shape[1]
if not os.path.isdir(os.path.join(save_path, 'relevances_raw')):
os.makedirs(os.path.join(save_path, 'relevances_raw'))
for d, dimensions, fname in tqdm(zip(data, dims_to_explain, filenames), total=len(data)):
d = np.array(d).reshape(1, -1)
no_glog_calls = d.shape[1]
rel = []
for i in range(no_behaviors):
if i in dimensions:
r = list(analyzer.analyze(d, neuron_selection=i).reshape((no_glog_calls,)))
rel.append(r)
else:
rel.append(np.nan)
# these can become really large in sum, therefore we save each separatly
filename = os.path.join(save_path, 'relevances_raw', fname) + '.pkl'
pkl.dump(rel, open(filename, 'wb'))
def get_explanations_for_behavior(filenames, data, idx2call, idx2behave, save_path, index_range=10, top_n=10):
"""
Summarizes raw explanations by using the 'top_n' tokens with highest relevance in each sample and saves the
surrounding of the tokens where surrounding is given by 'index_range' functions calls before and after the token
@param filenames: Name of each sample that is used as identifier when saving results
@param data: List of lists representing sequences of glog calls as indices
@param idx2call: Dictionary that maps an index in data to the corresponding function call
@param idx2behave: Dictionary that maps an index in labels to the corresponding name
@param save_path: Path where raw relevances as produced by 'explain_behavior' lie
@param index_range: How many indices before and after the most relevant tokens shall be considered
@param top_n: How many of the most relevant tokens shall be analyzed
"""
if not os.path.isdir(os.path.join(save_path, 'relevances_text')):
os.makedirs(os.path.join(save_path, 'relevances_text'))
for filename, d in tqdm(zip(filenames, data), total=len(data)):
json_dict = {}
rel_vector = pkl.load(open(os.path.join(save_path, 'relevances_raw', filename+'.pkl'), 'rb'))
for i in range(rel_vector.shape[0]):
if not(np.isnan(rel_vector[i, :]).all()):
max_relevance = np.max(np.abs(rel_vector[i]))
behavior_name = idx2behave[i]
json_dict[behavior_name] = {}
top_n = np.argsort(-rel_vector[i, :])[:top_n]
json_dict[behavior_name]['top-10-tokens'] = {}
json_dict[behavior_name]['surroundings'] = {}
for idx_no, idx in enumerate(top_n):
token_name = idx2call[d[idx]]
token_precessor = idx2call[d[idx-1]] if idx > 0 else ''
token_decessor = idx2call[d[idx + 1]] if idx != len(d)-1 else ''
running_idx = idx
fcall_name = 'not-found'
# search for preceeding function call (they start with "gfn")
while fcall_name == 'not-found' and running_idx > 0:
preceeding_token_name = idx2call[d[running_idx]]
if preceeding_token_name[:3] == 'gfn':
fcall_name = preceeding_token_name
running_idx -= 1
token_name = f'[{fcall_name}][{token_precessor}]{token_name}[{token_decessor}]'
rel_value = rel_vector[i, idx]
rel_value_normed = 1./max_relevance * rel_value if max_relevance != 0 else 0
json_dict[behavior_name]['top-10-tokens'][token_name] = rel_value_normed
json_dict[behavior_name]['surroundings'][token_name] = []
range_lower = idx - index_range if idx >= index_range else 0
range_upper = idx + index_range if idx <= len(d) - index_range else len(d)
for glog_idx in range(range_lower, range_upper):
token_name_surr = idx2call[d[glog_idx]]
rel_surr = rel_vector[i, glog_idx]
rel_surr_normed = 1./max_relevance*rel_vector[i, glog_idx] if rel_surr != 0 else 0
json_dict[behavior_name]['surroundings'][token_name].append([token_name_surr, rel_surr_normed])
with open(os.path.join(save_path, 'relevances_text', filename+'.json'), 'w') as f:
json.dump(json_dict, f, indent=4)
def gen_explanations_args(args):
data = pkl.load(open(args.data_path, 'rb'))
labels = np.load(args.label_path)
filenames = open(args.filename_path).read().splitlines()
calls = open(args.glog_call_path).read().splitlines()
no_labels = labels.shape[1]
no_tokens = len(calls)
print('no tokens', no_tokens)
model_w_softmax = get_damd_cnn(no_tokens, no_labels, final_nonlinearity=args.nonlinearity)
model_wo_softmax = get_damd_cnn(no_tokens, no_labels, final_nonlinearity=None)
model_w_softmax.load_weights(args.model_path)
model_wo_softmax.load_weights(args.model_path)
if args.calculate_raw:
print('Predicting samples ...')
dims_to_explain = []
for sample in tqdm(data):
s_arr = np.array(sample).reshape(1, -1)
prediction = model_w_softmax.predict(s_arr)
if args.nonlinearity == 'softmax':
dims_to_explain.append([np.argmax(prediction[0])])
else:
dims_to_explain.append(np.where(prediction > 0.5)[1])
analyzer = innvestigate.create_analyzer('lrp.epsilon', model_wo_softmax,
neuron_selection_mode='index', epsilon=1e-2)
tag_names = open(args.tag_names).read().splitlines() if args.tag_names is not None else None
idx_to_call = dict(zip(range(1, len(calls) + 1), calls))
idx_2_tag = dict(zip(range(len(tag_names)), tag_names)) if tag_names is not None \
else dict(zip(range(no_labels), [str(x) for x in range(no_labels)]))
if args.calculate_raw:
explain_behavior(analyzer, data, labels, dims_to_explain, filenames, args.save_path)
get_explanations_for_behavior(filenames, data, idx_to_call, idx_2_tag, args.save_path)
def average_explanations(args):
save_folder = args.save_folder
filenames = [f for f in os.listdir(args.data_dir) if f.endswith('json')]
filepaths = [os.path.join(args.data_dir, f) for f in filenames]
behavior_dict = {}
print('Averaging {} reports'.format(len(filepaths)))
for fp in tqdm(filepaths):
sample_dict = json.load(open(fp, 'r'))
for behavior in sample_dict:
if behavior not in behavior_dict:
behavior_dict[behavior] = {}
behavior_dict[behavior]['occurences'] = 1
else:
behavior_dict[behavior]['occurences'] += 1
for feature in sample_dict[behavior]['top-10-tokens']:
feature_relevance = sample_dict[behavior]['top-10-tokens'][feature]
# order of entries : occurence, percentage_in_top_10, relevances, mean_relevance
if feature not in behavior_dict[behavior]:
behavior_dict[behavior][feature] = {}
behavior_dict[behavior][feature]['no_occurences'] = 1
behavior_dict[behavior][feature]['avg_relevance'] = feature_relevance
behavior_dict[behavior][feature]['relevances'] = [feature_relevance]
else:
behavior_dict[behavior][feature]['no_occurences'] += 1
behavior_dict[behavior][feature]['relevances'].append(feature_relevance)
behavior_dict[behavior][feature]['avg_relevance'] = np.mean(
behavior_dict[behavior][feature]['relevances'])
# update relative frequencies
for behavior in behavior_dict:
for feature in behavior_dict[behavior]:
if feature == 'occurences':
continue
behavior_occ = behavior_dict[behavior]['occurences']
feature_occ = behavior_dict[behavior][feature]['no_occurences']
behavior_dict[behavior][feature]['percentage_in_top_10'] = feature_occ / behavior_occ
# for each behavior save json
for behavior in behavior_dict:
behavior_dict_ = behavior_dict[behavior]
for feature in behavior_dict_:
if feature == 'occurences':
continue
del behavior_dict_[feature]['relevances']
with open(os.path.join(save_folder, behavior+'.json'), 'w') as f:
json.dump(behavior_dict_, f, indent=4)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Generates explanations for Trained Model.')
subparsers = parser.add_subparsers(dest="command")
# generate explanations
expl_parser = subparsers.add_parser("generate_explanations",
help="Creates explanations and saves them in json file")
expl_parser.add_argument("model_path", type=str, help="Path to tensorflow model to explain (as .hdf5 file)")
expl_parser.add_argument("data_path", type=str, help="Path to data to explain (as .pkl file)")
expl_parser.add_argument("label_path", type=str, help="Path to labels belonging to data (as .npy file)")
expl_parser.add_argument("glog_call_path", type=str, help="Path to file containing all glog function calls")
expl_parser.add_argument("save_path", type=str, help="Where to save explanations and results")
expl_parser.add_argument("filename_path", type=str, help="File containing filenames for each data sample")
expl_parser.add_argument("--tag_names", type=str, help="Path to file containing names for tags")
expl_parser.add_argument("--nonlinearity", type=str, help="Final nonlinearity used for model", default='softmax')
expl_parser.add_argument("--calculate_raw", help="Whether to calculate raw explanations aswell. The raw"
"explanations can use a lot of disk space and are normally"
"not necessary", action='store_true')
# average_explanations
train_parser = subparsers.add_parser("average_explanations", help="Averages explanations in directory.")
train_parser.add_argument("data_dir", type=str, help="Path to folder containing jsons as generated by"
"'generate_explanations' call")
train_parser.add_argument("save_folder", type=str, help="Path to save destination of averaged analyses")
args = parser.parse_args()
if args.command == 'generate_explanations':
gen_explanations_args(args)
elif args.command == 'average_explanations':
average_explanations(args)
| [
"numpy.abs",
"numpy.mean",
"os.listdir",
"argparse.ArgumentParser",
"numpy.where",
"tqdm.tqdm",
"os.path.join",
"numpy.argmax",
"innvestigate.create_analyzer",
"numpy.argsort",
"numpy.array",
"numpy.isnan",
"DNN_training.get_damd_cnn",
"numpy.load",
"json.dump"
] | [((5258, 5282), 'numpy.load', 'np.load', (['args.label_path'], {}), '(args.label_path)\n', (5265, 5282), True, 'import numpy as np\n'), ((5517, 5589), 'DNN_training.get_damd_cnn', 'get_damd_cnn', (['no_tokens', 'no_labels'], {'final_nonlinearity': 'args.nonlinearity'}), '(no_tokens, no_labels, final_nonlinearity=args.nonlinearity)\n', (5529, 5589), False, 'from DNN_training import get_damd_cnn\n'), ((5613, 5672), 'DNN_training.get_damd_cnn', 'get_damd_cnn', (['no_tokens', 'no_labels'], {'final_nonlinearity': 'None'}), '(no_tokens, no_labels, final_nonlinearity=None)\n', (5625, 5672), False, 'from DNN_training import get_damd_cnn\n'), ((7229, 7244), 'tqdm.tqdm', 'tqdm', (['filepaths'], {}), '(filepaths)\n', (7233, 7244), False, 'from tqdm import tqdm\n'), ((9381, 9466), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generates explanations for Trained Model."""'}), "(description='Generates explanations for Trained Model.'\n )\n", (9404, 9466), False, 'import argparse\n'), ((5892, 5902), 'tqdm.tqdm', 'tqdm', (['data'], {}), '(data)\n', (5896, 5902), False, 'from tqdm import tqdm\n'), ((6233, 6343), 'innvestigate.create_analyzer', 'innvestigate.create_analyzer', (['"""lrp.epsilon"""', 'model_wo_softmax'], {'neuron_selection_mode': '"""index"""', 'epsilon': '(0.01)'}), "('lrp.epsilon', model_wo_softmax,\n neuron_selection_mode='index', epsilon=0.01)\n", (6261, 6343), False, 'import innvestigate\n'), ((7084, 7114), 'os.path.join', 'os.path.join', (['args.data_dir', 'f'], {}), '(args.data_dir, f)\n', (7096, 7114), False, 'import os\n'), ((784, 825), 'os.path.join', 'os.path.join', (['save_path', '"""relevances_raw"""'], {}), "(save_path, 'relevances_raw')\n", (796, 825), False, 'import os\n'), ((848, 889), 'os.path.join', 'os.path.join', (['save_path', '"""relevances_raw"""'], {}), "(save_path, 'relevances_raw')\n", (860, 889), False, 'import os\n'), ((1421, 1469), 'os.path.join', 'os.path.join', (['save_path', '"""relevances_raw"""', 'fname'], {}), "(save_path, 'relevances_raw', fname)\n", (1433, 1469), False, 'import os\n'), ((2519, 2561), 'os.path.join', 'os.path.join', (['save_path', '"""relevances_text"""'], {}), "(save_path, 'relevances_text')\n", (2531, 2561), False, 'import os\n'), ((2584, 2626), 'os.path.join', 'os.path.join', (['save_path', '"""relevances_text"""'], {}), "(save_path, 'relevances_text')\n", (2596, 2626), False, 'import os\n'), ((7018, 7043), 'os.listdir', 'os.listdir', (['args.data_dir'], {}), '(args.data_dir)\n', (7028, 7043), False, 'import os\n'), ((9300, 9338), 'json.dump', 'json.dump', (['behavior_dict_', 'f'], {'indent': '(4)'}), '(behavior_dict_, f, indent=4)\n', (9309, 9338), False, 'import json\n'), ((997, 1008), 'numpy.array', 'np.array', (['d'], {}), '(d)\n', (1005, 1008), True, 'import numpy as np\n'), ((2754, 2814), 'os.path.join', 'os.path.join', (['save_path', '"""relevances_raw"""', "(filename + '.pkl')"], {}), "(save_path, 'relevances_raw', filename + '.pkl')\n", (2766, 2814), False, 'import os\n'), ((5128, 5161), 'json.dump', 'json.dump', (['json_dict', 'f'], {'indent': '(4)'}), '(json_dict, f, indent=4)\n', (5137, 5161), False, 'import json\n'), ((9232, 9277), 'os.path.join', 'os.path.join', (['save_folder', "(behavior + '.json')"], {}), "(save_folder, behavior + '.json')\n", (9244, 9277), False, 'import os\n'), ((2959, 2980), 'numpy.abs', 'np.abs', (['rel_vector[i]'], {}), '(rel_vector[i])\n', (2965, 2980), True, 'import numpy as np\n'), ((3098, 3127), 'numpy.argsort', 'np.argsort', (['(-rel_vector[i, :])'], {}), '(-rel_vector[i, :])\n', (3108, 3127), True, 'import numpy as np\n'), ((5039, 5101), 'os.path.join', 'os.path.join', (['save_path', '"""relevances_text"""', "(filename + '.json')"], {}), "(save_path, 'relevances_text', filename + '.json')\n", (5051, 5101), False, 'import os\n'), ((5924, 5940), 'numpy.array', 'np.array', (['sample'], {}), '(sample)\n', (5932, 5940), True, 'import numpy as np\n'), ((8436, 8491), 'numpy.mean', 'np.mean', (["behavior_dict[behavior][feature]['relevances']"], {}), "(behavior_dict[behavior][feature]['relevances'])\n", (8443, 8491), True, 'import numpy as np\n'), ((2885, 2911), 'numpy.isnan', 'np.isnan', (['rel_vector[i, :]'], {}), '(rel_vector[i, :])\n', (2893, 2911), True, 'import numpy as np\n'), ((6099, 6123), 'numpy.argmax', 'np.argmax', (['prediction[0]'], {}), '(prediction[0])\n', (6108, 6123), True, 'import numpy as np\n'), ((6183, 6209), 'numpy.where', 'np.where', (['(prediction > 0.5)'], {}), '(prediction > 0.5)\n', (6191, 6209), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.